Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d0258eb..851189d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@
 
 source "drivers/spi/Kconfig"
 
+source "drivers/slimbus/Kconfig"
+
 source "drivers/pps/Kconfig"
 
 source "drivers/ptp/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 4ea4ac9..e18822cd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -51,6 +51,7 @@
 obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
+obj-$(CONFIG_SLIMBUS)		+= slimbus/
 obj-y				+= net/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_FUSION)		+= message/
@@ -71,6 +72,7 @@
 obj-$(CONFIG_USB)		+= usb/
 obj-$(CONFIG_PCI)		+= usb/
 obj-$(CONFIG_USB_GADGET)	+= usb/
+obj-$(CONFIG_DIAG_CHAR)         += char/diag/
 obj-$(CONFIG_SERIO)		+= input/serio/
 obj-$(CONFIG_GAMEPORT)		+= input/gameport/
 obj-$(CONFIG_INPUT)		+= input/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 45d7c8f..42befd1 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -62,6 +62,9 @@
 
 static BLOCKING_NOTIFIER_HEAD(memory_chain);
 
+unsigned long movable_reserved_start, movable_reserved_size;
+unsigned long low_power_memory_start, low_power_memory_size;
+
 int register_memory_notifier(struct notifier_block *nb)
 {
         return blocking_notifier_chain_register(&memory_chain, nb);
@@ -366,6 +369,64 @@
 				&attr_block_size_bytes.attr);
 }
 
+static ssize_t
+print_movable_size(struct class *class, struct class_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lx\n", movable_reserved_size);
+}
+
+static CLASS_ATTR(movable_size_bytes, 0444, print_movable_size, NULL);
+
+static int movable_size_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_movable_size_bytes.attr);
+}
+
+static ssize_t
+print_movable_start(struct class *class, struct class_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lx\n", movable_reserved_start);
+}
+
+static CLASS_ATTR(movable_start_bytes, 0444, print_movable_start, NULL);
+
+static int movable_start_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_movable_start_bytes.attr);
+}
+
+static ssize_t
+print_low_power_memory_size(struct class *class, struct class_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lx\n", low_power_memory_size);
+}
+
+static CLASS_ATTR(low_power_memory_size_bytes, 0444,
+	print_low_power_memory_size, NULL);
+
+static int low_power_memory_size_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_low_power_memory_size_bytes.attr);
+}
+
+static ssize_t
+print_low_power_memory_start(struct class *class, struct class_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lx\n", low_power_memory_start);
+}
+
+static CLASS_ATTR(low_power_memory_start_bytes, 0444,
+	print_low_power_memory_start, NULL);
+
+static int low_power_memory_start_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_low_power_memory_start_bytes.attr);
+}
+
 /*
  * Some architectures will have custom drivers to do this, and
  * will not need to do it from userspace.  The fake hot-add code
@@ -473,6 +534,96 @@
 }
 #endif
 
+#ifdef CONFIG_ARCH_MEMORY_REMOVE
+static ssize_t
+memory_remove_store(struct class *class, struct class_attribute *attr,
+		    const char *buf, size_t count)
+{
+	u64 phys_addr;
+	int ret;
+
+	phys_addr = simple_strtoull(buf, NULL, 0);
+
+	ret = physical_remove_memory(phys_addr,
+		PAGES_PER_SECTION << PAGE_SHIFT);
+
+	if (ret)
+		count = ret;
+
+	return count;
+}
+static CLASS_ATTR(remove, S_IWUSR, NULL, memory_remove_store);
+
+static int memory_remove_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_remove.attr);
+}
+
+static ssize_t
+memory_active_store(struct class *class, struct class_attribute *attr,
+		    const char *buf, size_t count)
+{
+	u64 phys_addr;
+	int ret;
+
+	phys_addr = simple_strtoull(buf, NULL, 0);
+
+	ret = physical_active_memory(phys_addr,
+		PAGES_PER_SECTION << PAGE_SHIFT);
+
+	if (ret)
+		count = ret;
+
+	return count;
+}
+static CLASS_ATTR(active, S_IWUSR, NULL, memory_active_store);
+
+static int memory_active_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_active.attr);
+}
+
+static ssize_t
+memory_low_power_store(struct class *class, struct class_attribute *attr,
+		       const char *buf, size_t count)
+{
+	u64 phys_addr;
+	int ret;
+
+	phys_addr = simple_strtoull(buf, NULL, 0);
+
+	ret = physical_low_power_memory(phys_addr,
+		PAGES_PER_SECTION << PAGE_SHIFT);
+
+	if (ret)
+		count = ret;
+
+	return count;
+}
+static CLASS_ATTR(low_power, S_IWUSR, NULL, memory_low_power_store);
+
+static int memory_low_power_init(void)
+{
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_low_power.attr);
+}
+#else
+static inline int memory_remove_init(void)
+{
+	return 0;
+}
+static inline int memory_active_init(void)
+{
+	return 0;
+}
+static inline int memory_low_power_init(void)
+{
+	return 0;
+}
+#endif
+
 /*
  * Note that phys_device is optional.  It is here to allow for
  * differentiation between which *physical* devices each
@@ -665,9 +816,30 @@
 	err = memory_fail_init();
 	if (!ret)
 		ret = err;
+	err = memory_remove_init();
+	if (!ret)
+		ret = err;
+	err = memory_active_init();
+	if (!ret)
+		ret = err;
+	err = memory_low_power_init();
+	if (!ret)
+		ret = err;
 	err = block_size_init();
 	if (!ret)
 		ret = err;
+	err = movable_size_init();
+	if (!ret)
+		ret = err;
+	err = movable_start_init();
+	if (!ret)
+		ret = err;
+	err = low_power_memory_size_init();
+	if (!ret)
+		ret = err;
+	err = low_power_memory_start_init();
+	if (!ret)
+		ret = err;
 out:
 	if (ret)
 		printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd..ea1c27a 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -2,6 +2,16 @@
 menu "Bluetooth device drivers"
 	depends on BT
 
+config BT_HCISMD
+	tristate "HCI SMD driver"
+	help
+	  Bluetooth HCI SMD driver.
+          This driver is required if you want to use Bluetoth device with
+	  SMD interface.
+
+	  Say Y here to compile support for Bluetooth USB devices into the
+          kernel or say M to compile is as a module (hci_smd).
+
 config BT_HCIBTUSB
 	tristate "HCI USB driver"
 	depends on USB
@@ -81,6 +91,17 @@
 
 	  Say Y here to compile support for HCILL protocol.
 
+config BT_HCIUART_IBS
+	bool "HCI_IBS protocol support"
+	depends on BT_HCIUART
+	default n
+	help
+	  HCI_IBS (HCI In-Band Sleep) is a serial protocol for communication
+	  between Bluetooth device and host. This protocol is required for
+	  UART clock control for some Qualcomm Bluetooth devices.
+
+	  Say Y here to compile support for HCI_IBS protocol.
+
 config BT_HCIBCM203X
 	tristate "HCI BCM203x USB driver"
 	depends on USB
@@ -104,6 +125,14 @@
 	  Say Y here to compile support for HCI BPA10x devices into the
 	  kernel or say M to compile it as module (bpa10x).
 
+config BT_MSM_SLEEP
+	tristate "MSM Bluesleep driver"
+	depends on BT && SERIAL_MSM_HS
+	default n
+	help
+	  Bluetooth MSM bluesleep driver.
+	  This driver provides support for BTS sleep.
+
 config BT_HCIBFUSB
 	tristate "HCI BlueFRITZ! USB driver"
 	depends on USB
@@ -207,6 +236,14 @@
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
 	  into the kernel or say M to compile it as module.
 
+config MSM_BT_POWER
+	tristate "MSM Bluetooth Power Control"
+	depends on ARCH_MSM && RFKILL
+	default m
+	help
+	  Provides a parameter to switch on/off power from PMIC
+	  to Bluetooth device.
+
 config BT_ATH3K
 	tristate "Atheros firmware download driver"
 	depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f4460f4..a20a056 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the Linux Bluetooth HCI device drivers.
 #
 
+obj-$(CONFIG_BT_HCISMD)		+= hci_smd.o
 obj-$(CONFIG_BT_HCIVHCI)	+= hci_vhci.o
 obj-$(CONFIG_BT_HCIUART)	+= hci_uart.o
 obj-$(CONFIG_BT_HCIBCM203X)	+= bcm203x.o
@@ -28,4 +29,8 @@
 hci_uart-$(CONFIG_BT_HCIUART_BCSP)	+= hci_bcsp.o
 hci_uart-$(CONFIG_BT_HCIUART_LL)	+= hci_ll.o
 hci_uart-$(CONFIG_BT_HCIUART_ATH3K)	+= hci_ath.o
+hci_uart-$(CONFIG_BT_HCIUART_IBS)	+= hci_ibs.o
 hci_uart-objs				:= $(hci_uart-y)
+obj-$(CONFIG_BT_MSM_SLEEP)              += msm_bt_sleep.o
+msm_bt_sleep-objs                       := bluesleep.o
+obj-$(CONFIG_MSM_BT_POWER)		+= bluetooth-power.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a585473..695d441 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -62,7 +62,6 @@
 
 	/* Atheros AR3011 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x3002) },
-	{ USB_DEVICE(0x13d3, 0x3304) },
 
 	/* Atheros AR9285 Malbec with sflash firmware */
 	{ USB_DEVICE(0x03F0, 0x311D) },
@@ -375,11 +374,6 @@
 
 	/* load patch and sysconfig files for AR3012 */
 	if (id->driver_info & BTUSB_ATH3012) {
-
-		/* New firmware with patch and sysconfig files already loaded */
-		if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001)
-			return -ENODEV;
-
 		ret = ath3k_load_patch(udev);
 		if (ret < 0) {
 			BT_ERR("Loading patch file failed");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index aed1904..4104b7f 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -930,7 +930,7 @@
 	pcmcia_disable_device(link);
 }
 
-static const struct pcmcia_device_id bluecard_ids[] = {
+static struct pcmcia_device_id bluecard_ids[] = {
 	PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e),
 	PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c),
 	PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab),
diff --git a/drivers/bluetooth/bluesleep.c b/drivers/bluetooth/bluesleep.c
new file mode 100644
index 0000000..0d11141
--- /dev/null
+++ b/drivers/bluetooth/bluesleep.c
@@ -0,0 +1,757 @@
+/*
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+   for more details.
+
+
+   Copyright (C) 2006-2007 - Motorola
+   Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+
+   Date         Author           Comment
+   -----------  --------------   --------------------------------
+   2006-Apr-28	Motorola	 The kernel module for running the Bluetooth(R)
+				 Sleep-Mode Protocol from the Host side
+   2006-Sep-08  Motorola         Added workqueue for handling sleep work.
+   2007-Jan-24  Motorola         Added mbm_handle_ioi() call to ISR.
+
+*/
+
+#include <linux/module.h>	/* kernel module definitions */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/irq.h>
+#include <linux/param.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <mach/gpio.h>
+#include <mach/msm_serial_hs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h> /* event notifications */
+#include "hci_uart.h"
+
+#define BT_SLEEP_DBG
+#ifndef BT_SLEEP_DBG
+#define BT_DBG(fmt, arg...)
+#endif
+/*
+ * Defines
+ */
+
+#define VERSION		"1.1"
+#define PROC_DIR	"bluetooth/sleep"
+
+struct bluesleep_info {
+	unsigned host_wake;
+	unsigned ext_wake;
+	unsigned host_wake_irq;
+	struct uart_port *uport;
+};
+
+/* work function */
+static void bluesleep_sleep_work(struct work_struct *work);
+
+/* work queue */
+DECLARE_DELAYED_WORK(sleep_workqueue, bluesleep_sleep_work);
+
+/* Macros for handling sleep work */
+#define bluesleep_rx_busy()     schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_tx_busy()     schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_rx_idle()     schedule_delayed_work(&sleep_workqueue, 0)
+#define bluesleep_tx_idle()     schedule_delayed_work(&sleep_workqueue, 0)
+
+/* 1 second timeout */
+#define TX_TIMER_INTERVAL	1
+
+/* state variable names and bit positions */
+#define BT_PROTO	0x01
+#define BT_TXDATA	0x02
+#define BT_ASLEEP	0x04
+
+/* global pointer to a single hci device. */
+static struct hci_dev *bluesleep_hdev;
+
+static struct bluesleep_info *bsi;
+
+/* module usage */
+static atomic_t open_count = ATOMIC_INIT(1);
+
+/*
+ * Local function prototypes
+ */
+
+static int bluesleep_hci_event(struct notifier_block *this,
+			    unsigned long event, void *data);
+
+/*
+ * Global variables
+ */
+
+/** Global state flags */
+static unsigned long flags;
+
+/** Tasklet to respond to change in hostwake line */
+static struct tasklet_struct hostwake_task;
+
+/** Transmission timer */
+static struct timer_list tx_timer;
+
+/** Lock for state transitions */
+static spinlock_t rw_lock;
+
+/** Notifier block for HCI events */
+struct notifier_block hci_event_nblock = {
+	.notifier_call = bluesleep_hci_event,
+};
+
+struct proc_dir_entry *bluetooth_dir, *sleep_dir;
+
+/*
+ * Local functions
+ */
+
+static void hsuart_power(int on)
+{
+	if (on) {
+		msm_hs_request_clock_on(bsi->uport);
+		msm_hs_set_mctrl(bsi->uport, TIOCM_RTS);
+	} else {
+		msm_hs_set_mctrl(bsi->uport, 0);
+		msm_hs_request_clock_off(bsi->uport);
+	}
+}
+
+
+/**
+ * @return 1 if the Host can go to sleep, 0 otherwise.
+ */
+static inline int bluesleep_can_sleep(void)
+{
+	/* check if MSM_WAKE_BT_GPIO and BT_WAKE_MSM_GPIO are both deasserted */
+	return gpio_get_value(bsi->ext_wake) &&
+		gpio_get_value(bsi->host_wake) &&
+		(bsi->uport != NULL);
+}
+
+void bluesleep_sleep_wakeup(void)
+{
+	if (test_bit(BT_ASLEEP, &flags)) {
+		BT_DBG("waking up...");
+		/* Start the timer */
+		mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+		gpio_set_value(bsi->ext_wake, 0);
+		clear_bit(BT_ASLEEP, &flags);
+		/*Activating UART */
+		hsuart_power(1);
+	}
+}
+
+/**
+ * @brief@  main sleep work handling function which update the flags
+ * and activate and deactivate UART ,check FIFO.
+ */
+static void bluesleep_sleep_work(struct work_struct *work)
+{
+	if (bluesleep_can_sleep()) {
+		/* already asleep, this is an error case */
+		if (test_bit(BT_ASLEEP, &flags)) {
+			BT_DBG("already asleep");
+			return;
+		}
+
+		if (msm_hs_tx_empty(bsi->uport)) {
+			BT_DBG("going to sleep...");
+			set_bit(BT_ASLEEP, &flags);
+			/*Deactivating UART */
+			hsuart_power(0);
+		} else {
+
+		  mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+			return;
+		}
+	} else {
+		bluesleep_sleep_wakeup();
+	}
+}
+
+/**
+ * A tasklet function that runs in tasklet context and reads the value
+ * of the HOST_WAKE GPIO pin and further defer the work.
+ * @param data Not used.
+ */
+static void bluesleep_hostwake_task(unsigned long data)
+{
+	BT_DBG("hostwake line change");
+
+	spin_lock(&rw_lock);
+
+	if (gpio_get_value(bsi->host_wake))
+		bluesleep_rx_busy();
+	else
+		bluesleep_rx_idle();
+
+	spin_unlock(&rw_lock);
+}
+
+/**
+ * Handles proper timer action when outgoing data is delivered to the
+ * HCI line discipline. Sets BT_TXDATA.
+ */
+static void bluesleep_outgoing_data(void)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&rw_lock, irq_flags);
+
+	/* log data passing by */
+	set_bit(BT_TXDATA, &flags);
+
+	/* if the tx side is sleeping... */
+	if (gpio_get_value(bsi->ext_wake)) {
+
+		BT_DBG("tx was sleeping");
+		bluesleep_sleep_wakeup();
+	}
+
+	spin_unlock_irqrestore(&rw_lock, irq_flags);
+}
+
+/**
+ * Handles HCI device events.
+ * @param this Not used.
+ * @param event The event that occurred.
+ * @param data The HCI device associated with the event.
+ * @return <code>NOTIFY_DONE</code>.
+ */
+static int bluesleep_hci_event(struct notifier_block *this,
+				unsigned long event, void *data)
+{
+	struct hci_dev *hdev = (struct hci_dev *) data;
+	struct hci_uart *hu;
+	struct uart_state *state;
+
+	if (!hdev)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case HCI_DEV_REG:
+		if (!bluesleep_hdev) {
+			bluesleep_hdev = hdev;
+			hu  = (struct hci_uart *) hdev->driver_data;
+			state = (struct uart_state *) hu->tty->driver_data;
+			bsi->uport = state->uart_port;
+		}
+		break;
+	case HCI_DEV_UNREG:
+		bluesleep_hdev = NULL;
+		bsi->uport = NULL;
+		break;
+	case HCI_DEV_WRITE:
+		bluesleep_outgoing_data();
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/**
+ * Handles transmission timer expiration.
+ * @param data Not used.
+ */
+static void bluesleep_tx_timer_expire(unsigned long data)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&rw_lock, irq_flags);
+
+	BT_DBG("Tx timer expired");
+
+	/* were we silent during the last timeout? */
+	if (!test_bit(BT_TXDATA, &flags)) {
+		BT_DBG("Tx has been idle");
+		gpio_set_value(bsi->ext_wake, 1);
+		bluesleep_tx_idle();
+	} else {
+		BT_DBG("Tx data during last period");
+		mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL*HZ));
+	}
+
+	/* clear the incoming data flag */
+	clear_bit(BT_TXDATA, &flags);
+
+	spin_unlock_irqrestore(&rw_lock, irq_flags);
+}
+
+/**
+ * Schedules a tasklet to run when receiving an interrupt on the
+ * <code>HOST_WAKE</code> GPIO pin.
+ * @param irq Not used.
+ * @param dev_id Not used.
+ */
+static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
+{
+	/* schedule a tasklet to handle the change in the host wake line */
+	tasklet_schedule(&hostwake_task);
+	return IRQ_HANDLED;
+}
+
+/**
+ * Starts the Sleep-Mode Protocol on the Host.
+ * @return On success, 0. On error, -1, and <code>errno</code> is set
+ * appropriately.
+ */
+static int bluesleep_start(void)
+{
+	int retval;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&rw_lock, irq_flags);
+
+	if (test_bit(BT_PROTO, &flags)) {
+		spin_unlock_irqrestore(&rw_lock, irq_flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&rw_lock, irq_flags);
+
+	if (!atomic_dec_and_test(&open_count)) {
+		atomic_inc(&open_count);
+		return -EBUSY;
+	}
+
+	/* start the timer */
+
+	mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL*HZ));
+
+	/* assert BT_WAKE */
+	gpio_set_value(bsi->ext_wake, 0);
+	retval = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+				IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+				"bluetooth hostwake", NULL);
+	if (retval  < 0) {
+		BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
+		goto fail;
+	}
+
+	retval = enable_irq_wake(bsi->host_wake_irq);
+	if (retval < 0) {
+		BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
+		free_irq(bsi->host_wake_irq, NULL);
+		goto fail;
+	}
+
+	set_bit(BT_PROTO, &flags);
+	return 0;
+fail:
+	del_timer(&tx_timer);
+	atomic_inc(&open_count);
+
+	return retval;
+}
+
+/**
+ * Stops the Sleep-Mode Protocol on the Host.
+ */
+static void bluesleep_stop(void)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&rw_lock, irq_flags);
+
+	if (!test_bit(BT_PROTO, &flags)) {
+		spin_unlock_irqrestore(&rw_lock, irq_flags);
+		return;
+	}
+
+	/* assert BT_WAKE */
+	gpio_set_value(bsi->ext_wake, 0);
+	del_timer(&tx_timer);
+	clear_bit(BT_PROTO, &flags);
+
+	if (test_bit(BT_ASLEEP, &flags)) {
+		clear_bit(BT_ASLEEP, &flags);
+		hsuart_power(1);
+	}
+
+	atomic_inc(&open_count);
+
+	spin_unlock_irqrestore(&rw_lock, irq_flags);
+	if (disable_irq_wake(bsi->host_wake_irq))
+		BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
+	free_irq(bsi->host_wake_irq, NULL);
+}
+/**
+ * Read the <code>BT_WAKE</code> GPIO pin value via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the
+ * pin is high, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluepower_read_proc_btwake(char *page, char **start, off_t offset,
+					int count, int *eof, void *data)
+{
+	*eof = 1;
+	return sprintf(page, "btwake:%u\n", gpio_get_value(bsi->ext_wake));
+}
+
+/**
+ * Write the <code>BT_WAKE</code> GPIO pin value via the proc interface.
+ * @param file Not used.
+ * @param buffer The buffer to read from.
+ * @param count The number of bytes to be written.
+ * @param data Not used.
+ * @return On success, the number of bytes written. On error, -1, and
+ * <code>errno</code> is set appropriately.
+ */
+static int bluepower_write_proc_btwake(struct file *file, const char *buffer,
+					unsigned long count, void *data)
+{
+	char *buf;
+
+	if (count < 1)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, buffer, count)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	if (buf[0] == '0') {
+		gpio_set_value(bsi->ext_wake, 0);
+	} else if (buf[0] == '1') {
+		gpio_set_value(bsi->ext_wake, 1);
+	} else {
+		kfree(buf);
+		return -EINVAL;
+	}
+
+	kfree(buf);
+	return count;
+}
+
+/**
+ * Read the <code>BT_HOST_WAKE</code> GPIO pin value via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the pin
+ * is high, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluepower_read_proc_hostwake(char *page, char **start, off_t offset,
+					int count, int *eof, void *data)
+{
+	*eof = 1;
+	return sprintf(page, "hostwake: %u \n", gpio_get_value(bsi->host_wake));
+}
+
+
+/**
+ * Read the low-power status of the Host via the proc interface.
+ * When this function returns, <code>page</code> contains a 1 if the Host
+ * is asleep, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluesleep_read_proc_asleep(char *page, char **start, off_t offset,
+					int count, int *eof, void *data)
+{
+	unsigned int asleep;
+
+	asleep = test_bit(BT_ASLEEP, &flags) ? 1 : 0;
+	*eof = 1;
+	return sprintf(page, "asleep: %u\n", asleep);
+}
+
+/**
+ * Read the low-power protocol being used by the Host via the proc interface.
+ * When this function returns, <code>page</code> will contain a 1 if the Host
+ * is using the Sleep Mode Protocol, 0 otherwise.
+ * @param page Buffer for writing data.
+ * @param start Not used.
+ * @param offset Not used.
+ * @param count Not used.
+ * @param eof Whether or not there is more data to be read.
+ * @param data Not used.
+ * @return The number of bytes written.
+ */
+static int bluesleep_read_proc_proto(char *page, char **start, off_t offset,
+					int count, int *eof, void *data)
+{
+	unsigned int proto;
+
+	proto = test_bit(BT_PROTO, &flags) ? 1 : 0;
+	*eof = 1;
+	return sprintf(page, "proto: %u\n", proto);
+}
+
+/**
+ * Modify the low-power protocol used by the Host via the proc interface.
+ * @param file Not used.
+ * @param buffer The buffer to read from.
+ * @param count The number of bytes to be written.
+ * @param data Not used.
+ * @return On success, the number of bytes written. On error, -1, and
+ * <code>errno</code> is set appropriately.
+ */
+static int bluesleep_write_proc_proto(struct file *file, const char *buffer,
+					unsigned long count, void *data)
+{
+	char proto;
+
+	if (count < 1)
+		return -EINVAL;
+
+	if (copy_from_user(&proto, buffer, 1))
+		return -EFAULT;
+
+	if (proto == '0')
+		bluesleep_stop();
+	else
+		bluesleep_start();
+
+	/* claim that we wrote everything */
+	return count;
+}
+
+static int __init bluesleep_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+
+	bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
+	if (!bsi)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+				"gpio_host_wake");
+	if (!res) {
+		BT_ERR("couldn't find host_wake gpio\n");
+		ret = -ENODEV;
+		goto free_bsi;
+	}
+	bsi->host_wake = res->start;
+
+	ret = gpio_request(bsi->host_wake, "bt_host_wake");
+	if (ret)
+		goto free_bsi;
+	ret = gpio_direction_input(bsi->host_wake);
+	if (ret)
+		goto free_bt_host_wake;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+				"gpio_ext_wake");
+	if (!res) {
+		BT_ERR("couldn't find ext_wake gpio\n");
+		ret = -ENODEV;
+		goto free_bt_host_wake;
+	}
+	bsi->ext_wake = res->start;
+
+	ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
+	if (ret)
+		goto free_bt_host_wake;
+	/* assert bt wake */
+	ret = gpio_direction_output(bsi->ext_wake, 0);
+	if (ret)
+		goto free_bt_ext_wake;
+
+	bsi->host_wake_irq = platform_get_irq_byname(pdev, "host_wake");
+	if (bsi->host_wake_irq < 0) {
+		BT_ERR("couldn't find host_wake irq\n");
+		ret = -ENODEV;
+		goto free_bt_ext_wake;
+	}
+
+
+	return 0;
+
+free_bt_ext_wake:
+	gpio_free(bsi->ext_wake);
+free_bt_host_wake:
+	gpio_free(bsi->host_wake);
+free_bsi:
+	kfree(bsi);
+	return ret;
+}
+
+static int bluesleep_remove(struct platform_device *pdev)
+{
+	/* assert bt wake */
+	gpio_set_value(bsi->ext_wake, 0);
+	if (test_bit(BT_PROTO, &flags)) {
+		if (disable_irq_wake(bsi->host_wake_irq))
+			BT_ERR("Couldn't disable hostwake IRQ wakeup mode \n");
+		free_irq(bsi->host_wake_irq, NULL);
+		del_timer(&tx_timer);
+		if (test_bit(BT_ASLEEP, &flags))
+			hsuart_power(1);
+	}
+
+	gpio_free(bsi->host_wake);
+	gpio_free(bsi->ext_wake);
+	kfree(bsi);
+	return 0;
+}
+
+static struct platform_driver bluesleep_driver = {
+	.remove = bluesleep_remove,
+	.driver = {
+		.name = "bluesleep",
+		.owner = THIS_MODULE,
+	},
+};
+/**
+ * Initializes the module.
+ * @return On success, 0. On error, -1, and <code>errno</code> is set
+ * appropriately.
+ */
+static int __init bluesleep_init(void)
+{
+	int retval;
+	struct proc_dir_entry *ent;
+
+	BT_INFO("MSM Sleep Mode Driver Ver %s", VERSION);
+
+	retval = platform_driver_probe(&bluesleep_driver, bluesleep_probe);
+	if (retval)
+		return retval;
+
+	bluesleep_hdev = NULL;
+
+	bluetooth_dir = proc_mkdir("bluetooth", NULL);
+	if (bluetooth_dir == NULL) {
+		BT_ERR("Unable to create /proc/bluetooth directory");
+		return -ENOMEM;
+	}
+
+	sleep_dir = proc_mkdir("sleep", bluetooth_dir);
+	if (sleep_dir == NULL) {
+		BT_ERR("Unable to create /proc/%s directory", PROC_DIR);
+		return -ENOMEM;
+	}
+
+	/* Creating read/write "btwake" entry */
+	ent = create_proc_entry("btwake", 0, sleep_dir);
+	if (ent == NULL) {
+		BT_ERR("Unable to create /proc/%s/btwake entry", PROC_DIR);
+		retval = -ENOMEM;
+		goto fail;
+	}
+	ent->read_proc = bluepower_read_proc_btwake;
+	ent->write_proc = bluepower_write_proc_btwake;
+
+	/* read only proc entries */
+	if (create_proc_read_entry("hostwake", 0, sleep_dir,
+				bluepower_read_proc_hostwake, NULL) == NULL) {
+		BT_ERR("Unable to create /proc/%s/hostwake entry", PROC_DIR);
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	/* read/write proc entries */
+	ent = create_proc_entry("proto", 0, sleep_dir);
+	if (ent == NULL) {
+		BT_ERR("Unable to create /proc/%s/proto entry", PROC_DIR);
+		retval = -ENOMEM;
+		goto fail;
+	}
+	ent->read_proc = bluesleep_read_proc_proto;
+	ent->write_proc = bluesleep_write_proc_proto;
+
+	/* read only proc entries */
+	if (create_proc_read_entry("asleep", 0,
+			sleep_dir, bluesleep_read_proc_asleep, NULL) == NULL) {
+		BT_ERR("Unable to create /proc/%s/asleep entry", PROC_DIR);
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	flags = 0; /* clear all status bits */
+
+	/* Initialize spinlock. */
+	spin_lock_init(&rw_lock);
+
+	/* Initialize timer */
+	init_timer(&tx_timer);
+	tx_timer.function = bluesleep_tx_timer_expire;
+	tx_timer.data = 0;
+
+	/* initialize host wake tasklet */
+	tasklet_init(&hostwake_task, bluesleep_hostwake_task, 0);
+
+	hci_register_notifier(&hci_event_nblock);
+
+	return 0;
+
+fail:
+	remove_proc_entry("asleep", sleep_dir);
+	remove_proc_entry("proto", sleep_dir);
+	remove_proc_entry("hostwake", sleep_dir);
+	remove_proc_entry("btwake", sleep_dir);
+	remove_proc_entry("sleep", bluetooth_dir);
+	remove_proc_entry("bluetooth", 0);
+	return retval;
+}
+
+/**
+ * Cleans up the module.
+ */
+static void __exit bluesleep_exit(void)
+{
+	hci_unregister_notifier(&hci_event_nblock);
+	platform_driver_unregister(&bluesleep_driver);
+
+	remove_proc_entry("asleep", sleep_dir);
+	remove_proc_entry("proto", sleep_dir);
+	remove_proc_entry("hostwake", sleep_dir);
+	remove_proc_entry("btwake", sleep_dir);
+	remove_proc_entry("sleep", bluetooth_dir);
+	remove_proc_entry("bluetooth", 0);
+}
+
+module_init(bluesleep_init);
+module_exit(bluesleep_exit);
+
+MODULE_DESCRIPTION("Bluetooth Sleep Mode Driver ver %s " VERSION);
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
new file mode 100644
index 0000000..3bf49d1
--- /dev/null
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Bluetooth Power Switch Module
+ * controls power to external Bluetooth device
+ * with interface to power management device
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+static bool previous;
+
+static int bluetooth_toggle_radio(void *data, bool blocked)
+{
+	int ret = 0;
+	int (*power_control)(int enable);
+
+	power_control = data;
+	if (previous != blocked)
+		ret = (*power_control)(!blocked);
+	if (!ret)
+		previous = blocked;
+	return ret;
+}
+
+static const struct rfkill_ops bluetooth_power_rfkill_ops = {
+	.set_block = bluetooth_toggle_radio,
+};
+
+static int bluetooth_power_rfkill_probe(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+	int ret;
+
+	rfkill = rfkill_alloc("bt_power", &pdev->dev, RFKILL_TYPE_BLUETOOTH,
+			      &bluetooth_power_rfkill_ops,
+			      pdev->dev.platform_data);
+
+	if (!rfkill) {
+		dev_err(&pdev->dev, "rfkill allocate failed\n");
+		return -ENOMEM;
+	}
+
+	/* force Bluetooth off during init to allow for user control */
+	rfkill_init_sw_state(rfkill, 1);
+	previous = 1;
+
+	ret = rfkill_register(rfkill);
+	if (ret) {
+		dev_err(&pdev->dev, "rfkill register failed=%d\n", ret);
+		rfkill_destroy(rfkill);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, rfkill);
+
+	return 0;
+}
+
+static void bluetooth_power_rfkill_remove(struct platform_device *pdev)
+{
+	struct rfkill *rfkill;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	rfkill = platform_get_drvdata(pdev);
+	if (rfkill)
+		rfkill_unregister(rfkill);
+	rfkill_destroy(rfkill);
+	platform_set_drvdata(pdev, NULL);
+}
+
+static int __devinit bt_power_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	if (!pdev->dev.platform_data) {
+		dev_err(&pdev->dev, "platform data not initialized\n");
+		return -ENOSYS;
+	}
+
+	ret = bluetooth_power_rfkill_probe(pdev);
+
+	return ret;
+}
+
+static int __devexit bt_power_remove(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
+	bluetooth_power_rfkill_remove(pdev);
+
+	return 0;
+}
+
+static struct platform_driver bt_power_driver = {
+	.probe = bt_power_probe,
+	.remove = __devexit_p(bt_power_remove),
+	.driver = {
+		.name = "bt_power",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init bluetooth_power_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&bt_power_driver);
+	return ret;
+}
+
+static void __exit bluetooth_power_exit(void)
+{
+	platform_driver_unregister(&bt_power_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Bluetooth power control driver");
+MODULE_VERSION("1.40");
+
+module_init(bluetooth_power_init);
+module_exit(bluetooth_power_exit);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4fc0194..0c8a655 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -761,7 +761,7 @@
 }
 
 
-static const struct pcmcia_device_id bt3c_ids[] = {
+static struct pcmcia_device_id bt3c_ids[] = {
 	PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02),
 	PCMCIA_DEVICE_NULL
 };
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 8ecf4c6..fd6305b 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,8 +64,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 10, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.hscfgcmd = result;
 
@@ -110,8 +108,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 10, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.psmode = result;
 
@@ -151,8 +147,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 10, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.pscmd = result;
 
@@ -197,8 +191,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 16, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.gpio_gap = result;
 
@@ -238,8 +230,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 10, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.hscmd = result;
 	if (priv->btmrvl_dev.hscmd) {
@@ -282,8 +272,6 @@
 		return -EFAULT;
 
 	ret = strict_strtol(buf, 10, &result);
-	if (ret)
-		return ret;
 
 	priv->btmrvl_dev.hsmode = result;
 
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 526b618..f8a0708 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -689,7 +689,7 @@
 	pcmcia_disable_device(link);
 }
 
-static const struct pcmcia_device_id btuart_ids[] = {
+static struct pcmcia_device_id btuart_ids[] = {
 	/* don't use this driver. Use serial_cs + hci_uart instead */
 	PCMCIA_DEVICE_NULL
 };
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 91d13a9..14776e5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -54,7 +54,6 @@
 #define BTUSB_BCM92035		0x10
 #define BTUSB_BROKEN_ISOC	0x20
 #define BTUSB_WRONG_SCO_MTU	0x40
-#define BTUSB_ATH3012		0x80
 
 static struct usb_device_id btusb_table[] = {
 	/* Generic Bluetooth USB device */
@@ -105,13 +104,12 @@
 
 	/* Atheros 3011 with sflash firmware */
 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
-	{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
 
 	/* Atheros AR9285 Malbec with sflash firmware */
 	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
 	/* Atheros 3012 with sflash firmware */
-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -438,7 +436,7 @@
 	}
 }
 
-static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
 {
 	int i, offset = 0;
 
@@ -786,7 +784,7 @@
 	}
 }
 
-static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
 {
 	struct btusb_data *data = hdev->driver_data;
 	struct usb_interface *intf = data->isoc;
@@ -915,15 +913,6 @@
 	if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
 		return -ENODEV;
 
-	if (id->driver_info & BTUSB_ATH3012) {
-		struct usb_device *udev = interface_to_usbdev(intf);
-
-		/* Old firmware would otherwise let ath3k driver load
-		 * patch and sysconfig files */
-		if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
-			return -ENODEV;
-	}
-
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 5e4c2de..26ee0cf 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -636,7 +636,7 @@
 }
 
 
-static const struct pcmcia_device_id dtl1_ids[] = {
+static struct pcmcia_device_id dtl1_ids[] = {
 	PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
 	PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82),
 	PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863),
diff --git a/drivers/bluetooth/hci_ibs.c b/drivers/bluetooth/hci_ibs.c
new file mode 100644
index 0000000..2a6f3f8
--- /dev/null
+++ b/drivers/bluetooth/hci_ibs.c
@@ -0,0 +1,820 @@
+/*
+ *  Qualcomm's Bluetooth Software In-Band Sleep UART protocol
+ *
+ *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ *  protocol extension to H4.
+ *
+ *  Copyright (C) 2007 Texas Instruments, Inc.
+ *  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ *  Acknowledgements:
+ *  This file is based on hci_ll.c, which was...
+ *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ *  which was in turn based on hci_h4.c, which was written
+ *  by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/poll.h>
+
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <linux/ioctl.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/serial_core.h>
+
+#ifdef CONFIG_SERIAL_MSM_HS
+#include <mach/msm_serial_hs.h>
+#endif
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND	0xFE
+#define HCI_IBS_WAKE_IND	0xFD
+#define HCI_IBS_WAKE_ACK	0xFC
+
+/* HCI_IBS receiver States */
+#define HCI_IBS_W4_PACKET_TYPE	0
+#define HCI_IBS_W4_EVENT_HDR	1
+#define HCI_IBS_W4_ACL_HDR	2
+#define HCI_IBS_W4_SCO_HDR	3
+#define HCI_IBS_W4_DATA		4
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states_e {
+	HCI_IBS_TX_ASLEEP,
+	HCI_IBS_TX_WAKING,
+	HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states_e {
+	HCI_IBS_RX_ASLEEP,
+	HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote_e {
+	HCI_IBS_VOTE_STATS_UPDATE,
+	HCI_IBS_TX_VOTE_CLOCK_ON,
+	HCI_IBS_TX_VOTE_CLOCK_OFF,
+	HCI_IBS_RX_VOTE_CLOCK_ON,
+	HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+static unsigned long wake_retrans = 1;
+static unsigned long tx_idle_delay = (HZ * 2);
+
+struct hci_ibs_cmd {
+	u8 cmd;
+} __attribute__((packed));
+
+struct ibs_struct {
+	unsigned long rx_state;
+	unsigned long rx_count;
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
+	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
+	unsigned long tx_ibs_state;	/* HCI_IBS transmit side power state */
+	unsigned long rx_ibs_state;	/* HCI_IBS receive side power state */
+	unsigned long tx_vote;		/* clock must be on for TX */
+	unsigned long rx_vote;		/* clock must be on for RX */
+	struct	timer_list tx_idle_timer;
+	struct	timer_list wake_retrans_timer;
+	/* debug */
+	unsigned long ibs_sent_wacks;
+	unsigned long ibs_sent_slps;
+	unsigned long ibs_sent_wakes;
+	unsigned long ibs_recv_wacks;
+	unsigned long ibs_recv_slps;
+	unsigned long ibs_recv_wakes;
+	unsigned long vote_last_jif;
+	unsigned long vote_on_ticks;
+	unsigned long vote_off_ticks;
+	unsigned long tx_votes_on;
+	unsigned long rx_votes_on;
+	unsigned long tx_votes_off;
+	unsigned long rx_votes_off;
+	unsigned long votes_on;
+	unsigned long votes_off;
+};
+
+#ifdef CONFIG_SERIAL_MSM_HS
+static void __ibs_msm_serial_clock_on(struct tty_struct *tty)
+{
+	struct uart_state *state = tty->driver_data;
+	struct uart_port *port = state->uart_port;
+
+	msm_hs_request_clock_on(port);
+}
+
+static void __ibs_msm_serial_clock_request_off(struct tty_struct *tty)
+{
+	struct uart_state *state = tty->driver_data;
+	struct uart_port *port = state->uart_port;
+
+	msm_hs_request_clock_off(port);
+}
+#else
+static inline void __ibs_msm_serial_clock_on(struct tty_struct *tty) {}
+static inline void __ibs_msm_serial_clock_request_off(struct tty_struct *tty) {}
+#endif
+
+/* clock_vote needs to be called with the ibs lock held */
+static void ibs_msm_serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+	struct ibs_struct *ibs = hu->priv;
+
+	unsigned long old_vote = (ibs->tx_vote | ibs->rx_vote);
+	unsigned long new_vote;
+
+	switch (vote) {
+	default: /* error */
+		BT_ERR("voting irregularity");
+		return;
+	case HCI_IBS_VOTE_STATS_UPDATE:
+		if (old_vote)
+			ibs->vote_off_ticks += (jiffies - ibs->vote_last_jif);
+		else
+			ibs->vote_on_ticks += (jiffies - ibs->vote_last_jif);
+		return;
+	case HCI_IBS_TX_VOTE_CLOCK_ON:
+		ibs->tx_vote = 1;
+		ibs->tx_votes_on++;
+		new_vote = 1;
+		break;
+	case HCI_IBS_RX_VOTE_CLOCK_ON:
+		ibs->rx_vote = 1;
+		ibs->rx_votes_on++;
+		new_vote = 1;
+		break;
+	case HCI_IBS_TX_VOTE_CLOCK_OFF:
+		ibs->tx_vote = 0;
+		ibs->tx_votes_off++;
+		new_vote = ibs->rx_vote | ibs->tx_vote;
+		break;
+	case HCI_IBS_RX_VOTE_CLOCK_OFF:
+		ibs->rx_vote = 0;
+		ibs->rx_votes_off++;
+		new_vote = ibs->rx_vote | ibs->tx_vote;
+		break;
+	}
+	if (new_vote != old_vote) {
+		if (new_vote)
+			__ibs_msm_serial_clock_on(hu->tty);
+		else
+			__ibs_msm_serial_clock_request_off(hu->tty);
+
+		BT_DBG("HCIUART_IBS: vote msm_serial_hs clock %lu(%lu)",
+			new_vote, vote);
+		/* debug */
+		if (new_vote) {
+			ibs->votes_on++;
+			ibs->vote_off_ticks += (jiffies - ibs->vote_last_jif);
+		} else {
+			ibs->votes_off++;
+			ibs->vote_on_ticks += (jiffies - ibs->vote_last_jif);
+		}
+		ibs->vote_last_jif = jiffies;
+	}
+}
+
+/*
+ * Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+	int err = 0;
+	struct sk_buff *skb = NULL;
+	struct ibs_struct *ibs = hu->priv;
+	struct hci_ibs_cmd *hci_ibs_packet;
+
+	BT_DBG("hu %p cmd 0x%x", hu, cmd);
+
+	/* allocate packet */
+	skb = bt_skb_alloc(1, GFP_ATOMIC);
+	if (!skb) {
+		BT_ERR("cannot allocate memory for HCI_IBS packet");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* prepare packet */
+	hci_ibs_packet = (struct hci_ibs_cmd *) skb_put(skb, 1);
+	hci_ibs_packet->cmd = cmd;
+	skb->dev = (void *) hu->hdev;
+
+	/* send packet */
+	skb_queue_tail(&ibs->txq, skb);
+out:
+	return err;
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *) arg;
+	struct ibs_struct *ibs = hu->priv;
+	unsigned long flags;
+	unsigned long vote_tx_sleep = 0;
+
+	BT_DBG("hu %p idle timeout in %lu state", hu, ibs->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
+					flags, SINGLE_DEPTH_NESTING);
+
+	switch (ibs->tx_ibs_state) {
+	default:
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_WAKING:
+		BT_ERR("spurrious timeout in tx state %ld", ibs->tx_ibs_state);
+		goto out;
+	case HCI_IBS_TX_AWAKE: /* TX_IDLE, go to SLEEP */
+		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+			BT_ERR("cannot send SLEEP to device");
+			goto out;
+		}
+		ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+		ibs->ibs_sent_slps++; /* debug */
+		vote_tx_sleep = 1;
+		break;
+	}
+
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+
+	hci_uart_tx_wakeup(hu);  /* run HCI tx handling unlocked */
+
+	if (!vote_tx_sleep)
+		return;
+	/* now that message queued to tty driver, vote for tty clocks off */
+	/* It is up to the tty driver to pend the clocks off until tx done. */
+
+	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
+					flags, SINGLE_DEPTH_NESTING);
+	ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+out:
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *) arg;
+	struct ibs_struct *ibs = hu->priv;
+	unsigned long flags;
+	unsigned long retransmit = 0;
+
+	BT_DBG("hu %p wake retransmit timeout in %lu state",
+		hu, ibs->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
+					flags, SINGLE_DEPTH_NESTING);
+
+	switch (ibs->tx_ibs_state) {
+	default:
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_AWAKE:
+		BT_ERR("spurrious timeout tx state %ld", ibs->tx_ibs_state);
+		goto out;
+	case HCI_IBS_TX_WAKING: /* No WAKE_ACK, retransmit WAKE */
+		retransmit = 1;
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+			BT_ERR("cannot acknowledge device wake up");
+			goto out;
+		}
+		ibs->ibs_sent_wakes++; /* debug */
+		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
+		break;
+	}
+out:
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+	if (retransmit)
+		hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int ibs_open(struct hci_uart *hu)
+{
+	struct ibs_struct *ibs;
+
+	BT_DBG("hu %p", hu);
+
+	ibs = kzalloc(sizeof(*ibs), GFP_ATOMIC);
+	if (!ibs)
+		return -ENOMEM;
+
+	skb_queue_head_init(&ibs->txq);
+	skb_queue_head_init(&ibs->tx_wait_q);
+	spin_lock_init(&ibs->hci_ibs_lock);
+
+	/* Assume we start with both sides asleep -- extra wakes OK */
+	ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+	ibs->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+	/* clocks actually on, but we start votes off */
+	ibs->tx_vote = 0;
+	ibs->rx_vote = 0;
+
+	/* debug */
+	ibs->ibs_sent_wacks = 0;
+	ibs->ibs_sent_slps = 0;
+	ibs->ibs_sent_wakes = 0;
+	ibs->ibs_recv_wacks = 0;
+	ibs->ibs_recv_slps = 0;
+	ibs->ibs_recv_wakes = 0;
+	ibs->vote_last_jif = jiffies;
+	ibs->vote_on_ticks = 0;
+	ibs->vote_off_ticks = 0;
+	ibs->votes_on = 0;
+	ibs->votes_off = 0;
+	ibs->tx_votes_on = 0;
+	ibs->tx_votes_off = 0;
+	ibs->rx_votes_on = 0;
+	ibs->rx_votes_off = 0;
+
+	hu->priv = ibs;
+
+	init_timer(&ibs->wake_retrans_timer);
+	ibs->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+	ibs->wake_retrans_timer.data     = (u_long) hu;
+
+	init_timer(&ibs->tx_idle_timer);
+	ibs->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+	ibs->tx_idle_timer.data     = (u_long) hu;
+
+	BT_INFO("HCI_IBS open, tx_idle_delay=%lu, wake_retrans=%lu",
+		tx_idle_delay, wake_retrans);
+
+	return 0;
+}
+
+void ibs_log_local_stats(struct ibs_struct *ibs)
+{
+	BT_INFO("HCI_IBS stats: tx_idle_delay=%lu, wake_retrans=%lu",
+		tx_idle_delay, wake_retrans);
+
+	BT_INFO("HCI_IBS stats: tx_ibs_state=%lu, rx_ibs_state=%lu",
+		ibs->tx_ibs_state, ibs->rx_ibs_state);
+	BT_INFO("HCI_IBS stats: sent: sleep=%lu, wake=%lu, wake_ack=%lu",
+		ibs->ibs_sent_slps, ibs->ibs_sent_wakes, ibs->ibs_sent_wacks);
+	BT_INFO("HCI_IBS stats: recv: sleep=%lu, wake=%lu, wake_ack=%lu",
+		ibs->ibs_recv_slps, ibs->ibs_recv_wakes, ibs->ibs_recv_wacks);
+
+	BT_INFO("HCI_IBS stats: queues: txq=%s, txwaitq=%s",
+		skb_queue_empty(&(ibs->txq)) ? "empty" : "full",
+		skb_queue_empty(&(ibs->tx_wait_q)) ? "empty" : "full");
+
+	BT_INFO("HCI_IBS stats: vote state: tx=%lu, rx=%lu",
+		ibs->tx_vote, ibs->rx_vote);
+	BT_INFO("HCI_IBS stats: tx votes cast: on=%lu, off=%lu",
+		ibs->tx_votes_on, ibs->tx_votes_off);
+	BT_INFO("HCI_IBS stats: rx votes cast: on=%lu, off=%lu",
+		ibs->rx_votes_on, ibs->rx_votes_off);
+	BT_INFO("HCI_IBS stats: msm_clock votes cast: on=%lu, off=%lu",
+		ibs->votes_on, ibs->votes_off);
+	BT_INFO("HCI_IBS stats: vote ticks: on=%lu, off=%lu",
+		ibs->vote_on_ticks, ibs->vote_off_ticks);
+}
+
+/* Flush protocol data */
+static int ibs_flush(struct hci_uart *hu)
+{
+	struct ibs_struct *ibs = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	skb_queue_purge(&ibs->tx_wait_q);
+	skb_queue_purge(&ibs->txq);
+
+	return 0;
+}
+
+/* Close protocol */
+static int ibs_close(struct hci_uart *hu)
+{
+	struct ibs_struct *ibs = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	ibs_msm_serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+	ibs_log_local_stats(ibs);
+
+	skb_queue_purge(&ibs->tx_wait_q);
+	skb_queue_purge(&ibs->txq);
+	del_timer(&ibs->tx_idle_timer);
+	del_timer(&ibs->wake_retrans_timer);
+
+	kfree_skb(ibs->rx_skb);
+
+	hu->priv = NULL;
+
+	kfree(ibs);
+
+	return 0;
+}
+
+/*
+ * Called upon a wake-up-indication from the device
+ */
+static void ibs_device_want_to_wakeup(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct ibs_struct *ibs = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	/* lock hci_ibs state */
+	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
+
+	/* debug */
+	ibs->ibs_recv_wakes++;
+
+	switch (ibs->rx_ibs_state) {
+	case HCI_IBS_RX_ASLEEP:
+		/* Make sure clock is on - we may have turned clock off since
+		 * receiving the wake up indicator
+		 */
+		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+		ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
+		/* deliberate fall-through */
+	case HCI_IBS_RX_AWAKE:
+		/* Always acknowledge device wake up,
+		 * sending IBS message doesn't count as TX ON.
+		 */
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+			BT_ERR("cannot acknowledge device wake up");
+			goto out;
+		}
+		ibs->ibs_sent_wacks++; /* debug */
+		break;
+	default:
+		/* any other state is illegal */
+		BT_ERR("received HCI_IBS_WAKE_IND in rx state %ld",
+			ibs->rx_ibs_state);
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+
+	/* actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/*
+ * Called upon a sleep-indication from the device
+ */
+static void ibs_device_want_to_sleep(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct ibs_struct *ibs = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	/* lock hci_ibs state */
+	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
+
+	/* debug */
+	ibs->ibs_recv_slps++;
+
+	switch (ibs->rx_ibs_state) {
+	case HCI_IBS_RX_AWAKE:
+		/* update state */
+		ibs->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+		break;
+	case HCI_IBS_RX_ASLEEP:
+		/* deliberate fall-through */
+	default:
+		/* any other state is illegal */
+		BT_ERR("received HCI_IBS_SLEEP_IND in rx state %ld",
+			ibs->rx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+}
+
+/*
+ * Called upon wake-up-acknowledgement from the device
+ */
+static void ibs_device_woke_up(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct ibs_struct *ibs = hu->priv;
+	struct sk_buff *skb = NULL;
+
+	BT_DBG("hu %p", hu);
+
+	/* lock hci_ibs state */
+	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
+
+	/* debug */
+	ibs->ibs_recv_wacks++;
+
+	switch (ibs->tx_ibs_state) {
+	case HCI_IBS_TX_ASLEEP:
+		/* This could be spurrious rx wake on the BT chip.
+		 * Send it another SLEEP othwise it will stay awake. */
+	default:
+		BT_ERR("received HCI_IBS_WAKE_ACK in tx state %ld",
+			ibs->tx_ibs_state);
+		break;
+	case HCI_IBS_TX_AWAKE:
+		/* expect one if we send 2 WAKEs */
+		BT_DBG("received HCI_IBS_WAKE_ACK in tx state %ld",
+			ibs->tx_ibs_state);
+		break;
+	case HCI_IBS_TX_WAKING:
+		/* send pending packets */
+		while ((skb = skb_dequeue(&ibs->tx_wait_q)))
+			skb_queue_tail(&ibs->txq, skb);
+		/* switch timers and change state to HCI_IBS_TX_AWAKE */
+		del_timer(&ibs->wake_retrans_timer);
+		mod_timer(&ibs->tx_idle_timer, jiffies + tx_idle_delay);
+		ibs->tx_ibs_state = HCI_IBS_TX_AWAKE;
+	}
+
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+
+	/* actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) */
+/* may be called from two simultaneous tasklets */
+static int ibs_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+	unsigned long flags = 0;
+	struct ibs_struct *ibs = hu->priv;
+
+	BT_DBG("hu %p skb %p", hu, skb);
+
+	/* Prepend skb with frame type */
+	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+	/* lock hci_ibs state */
+	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
+
+	/* act according to current state */
+	switch (ibs->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		BT_DBG("device awake, sending normally");
+		skb_queue_tail(&ibs->txq, skb);
+		mod_timer(&ibs->tx_idle_timer, jiffies + tx_idle_delay);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+		BT_DBG("device asleep, waking up and queueing packet");
+		ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+		/* save packet for later */
+		skb_queue_tail(&ibs->tx_wait_q, skb);
+		/* awake device */
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+			BT_ERR("cannot send WAKE to device");
+			break;
+		}
+		ibs->ibs_sent_wakes++; /* debug */
+
+		/* start retransmit timer */
+		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
+
+		ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
+		break;
+
+	case HCI_IBS_TX_WAKING:
+		BT_DBG("device waking up, queueing packet");
+		/* transient state; just keep packet for later */
+		skb_queue_tail(&ibs->tx_wait_q, skb);
+		break;
+
+	default:
+		BT_ERR("illegal tx state: %ld (losing packet)",
+			ibs->tx_ibs_state);
+		kfree_skb(skb);
+		break;
+	}
+
+	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
+
+	return 0;
+}
+
+static inline int ibs_check_data_len(struct ibs_struct *ibs, int len)
+{
+	register int room = skb_tailroom(ibs->rx_skb);
+
+	BT_DBG("len %d room %d", len, room);
+
+	if (!len) {
+		hci_recv_frame(ibs->rx_skb);
+	} else if (len > room) {
+		BT_ERR("Data length is too large");
+		kfree_skb(ibs->rx_skb);
+	} else {
+		ibs->rx_state = HCI_IBS_W4_DATA;
+		ibs->rx_count = len;
+		return len;
+	}
+
+	ibs->rx_state = HCI_IBS_W4_PACKET_TYPE;
+	ibs->rx_skb   = NULL;
+	ibs->rx_count = 0;
+
+	return 0;
+}
+
+/* Recv data */
+static int ibs_recv(struct hci_uart *hu, void *data, int count)
+{
+	struct ibs_struct *ibs = hu->priv;
+	register char *ptr;
+	struct hci_event_hdr *eh;
+	struct hci_acl_hdr   *ah;
+	struct hci_sco_hdr   *sh;
+	register int len, type, dlen;
+
+	BT_DBG("hu %p count %d rx_state %ld rx_count %ld",
+			hu, count, ibs->rx_state, ibs->rx_count);
+
+	ptr = data;
+	while (count) {
+		if (ibs->rx_count) {
+			len = min_t(unsigned int, ibs->rx_count, count);
+			memcpy(skb_put(ibs->rx_skb, len), ptr, len);
+			ibs->rx_count -= len; count -= len; ptr += len;
+
+			if (ibs->rx_count)
+				continue;
+
+			switch (ibs->rx_state) {
+			case HCI_IBS_W4_DATA:
+				BT_DBG("Complete data");
+				hci_recv_frame(ibs->rx_skb);
+
+				ibs->rx_state = HCI_IBS_W4_PACKET_TYPE;
+				ibs->rx_skb = NULL;
+				continue;
+
+			case HCI_IBS_W4_EVENT_HDR:
+				eh = (struct hci_event_hdr *) ibs->rx_skb->data;
+
+				BT_DBG("Event header: evt 0x%2.2x plen %d",
+					eh->evt, eh->plen);
+
+				ibs_check_data_len(ibs, eh->plen);
+				continue;
+
+			case HCI_IBS_W4_ACL_HDR:
+				ah = (struct hci_acl_hdr *) ibs->rx_skb->data;
+				dlen = __le16_to_cpu(ah->dlen);
+
+				BT_DBG("ACL header: dlen %d", dlen);
+
+				ibs_check_data_len(ibs, dlen);
+				continue;
+
+			case HCI_IBS_W4_SCO_HDR:
+				sh = (struct hci_sco_hdr *) ibs->rx_skb->data;
+
+				BT_DBG("SCO header: dlen %d", sh->dlen);
+
+				ibs_check_data_len(ibs, sh->dlen);
+				continue;
+			}
+		}
+
+		/* HCI_IBS_W4_PACKET_TYPE */
+		switch (*ptr) {
+		case HCI_EVENT_PKT:
+			BT_DBG("Event packet");
+			ibs->rx_state = HCI_IBS_W4_EVENT_HDR;
+			ibs->rx_count = HCI_EVENT_HDR_SIZE;
+			type = HCI_EVENT_PKT;
+			break;
+
+		case HCI_ACLDATA_PKT:
+			BT_DBG("ACL packet");
+			ibs->rx_state = HCI_IBS_W4_ACL_HDR;
+			ibs->rx_count = HCI_ACL_HDR_SIZE;
+			type = HCI_ACLDATA_PKT;
+			break;
+
+		case HCI_SCODATA_PKT:
+			BT_DBG("SCO packet");
+			ibs->rx_state = HCI_IBS_W4_SCO_HDR;
+			ibs->rx_count = HCI_SCO_HDR_SIZE;
+			type = HCI_SCODATA_PKT;
+			break;
+
+		/* HCI_IBS signals */
+		case HCI_IBS_SLEEP_IND:
+			BT_DBG("HCI_IBS_SLEEP_IND packet");
+			ibs_device_want_to_sleep(hu);
+			ptr++; count--;
+			continue;
+
+		case HCI_IBS_WAKE_IND:
+			BT_DBG("HCI_IBS_WAKE_IND packet");
+			ibs_device_want_to_wakeup(hu);
+			ptr++; count--;
+			continue;
+
+		case HCI_IBS_WAKE_ACK:
+			BT_DBG("HCI_IBS_WAKE_ACK packet");
+			ibs_device_woke_up(hu);
+			ptr++; count--;
+			continue;
+
+		default:
+			BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
+			hu->hdev->stat.err_rx++;
+			ptr++; count--;
+			continue;
+		};
+
+		ptr++; count--;
+
+		/* Allocate packet */
+		ibs->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+		if (!ibs->rx_skb) {
+			BT_ERR("Can't allocate mem for new packet");
+			ibs->rx_state = HCI_IBS_W4_PACKET_TYPE;
+			ibs->rx_count = 0;
+			return 0;
+		}
+
+		ibs->rx_skb->dev = (void *) hu->hdev;
+		bt_cb(ibs->rx_skb)->pkt_type = type;
+	}
+
+	return count;
+}
+
+static struct sk_buff *ibs_dequeue(struct hci_uart *hu)
+{
+	struct ibs_struct *ibs = hu->priv;
+	return skb_dequeue(&ibs->txq);
+}
+
+static struct hci_uart_proto ibs_p = {
+	.id		= HCI_UART_IBS,
+	.open		= ibs_open,
+	.close		= ibs_close,
+	.recv		= ibs_recv,
+	.enqueue	= ibs_enqueue,
+	.dequeue	= ibs_dequeue,
+	.flush		= ibs_flush,
+};
+
+int ibs_init(void)
+{
+	int err = hci_uart_register_proto(&ibs_p);
+
+	if (!err)
+		BT_INFO("HCI_IBS protocol initialized");
+	else
+		BT_ERR("HCI_IBS protocol registration failed");
+
+	return err;
+}
+
+int ibs_deinit(void)
+{
+	return hci_uart_unregister_proto(&ibs_p);
+}
+
+module_param(wake_retrans, ulong, 0644);
+MODULE_PARM_DESC(wake_retrans, "Delay (1/HZ) to retransmit WAKE_IND");
+
+module_param(tx_idle_delay, ulong, 0644);
+MODULE_PARM_DESC(tx_idle_delay, "Delay (1/HZ) since last tx for SLEEP_IND");
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 48ad2a7..b07af02 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -2,9 +2,9 @@
  *
  *  Bluetooth HCI UART driver
  *
- *  Copyright (C) 2000-2001  Qualcomm Incorporated
  *  Copyright (C) 2002-2003  Maxim Krasnyansky <maxk@qualcomm.com>
  *  Copyright (C) 2004-2005  Marcel Holtmann <marcel@holtmann.org>
+ *  Copyright (c) 2000-2001, 2010-2011, Code Aurora Forum. All rights reserved.
  *
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -359,6 +359,7 @@
  */
 static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
 {
+	int ret;
 	struct hci_uart *hu = (void *)tty->disc_data;
 
 	if (!hu || tty != hu->tty)
@@ -368,8 +369,9 @@
 		return;
 
 	spin_lock(&hu->rx_lock);
-	hu->proto->recv(hu, (void *) data, count);
-	hu->hdev->stat.byte_rx += count;
+	ret = hu->proto->recv(hu, (void *) data, count);
+	if (ret > 0)
+		hu->hdev->stat.byte_rx += count;
 	spin_unlock(&hu->rx_lock);
 
 	tty_unthrottle(tty);
@@ -468,11 +470,18 @@
 
 	switch (cmd) {
 	case HCIUARTSETPROTO:
-		if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+		if (!test_and_set_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
+			&hu->flags) && !test_bit(HCI_UART_PROTO_SET,
+				&hu->flags)) {
 			err = hci_uart_set_proto(hu, arg);
 			if (err) {
-				clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+				clear_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
+						&hu->flags);
 				return err;
+			} else {
+				set_bit(HCI_UART_PROTO_SET, &hu->flags);
+				clear_bit(HCI_UART_PROTO_SET_IN_PROGRESS,
+						&hu->flags);
 			}
 		} else
 			return -EBUSY;
@@ -565,6 +574,9 @@
 #ifdef CONFIG_BT_HCIUART_ATH3K
 	ath_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_IBS
+	ibs_init();
+#endif
 
 	return 0;
 }
@@ -585,6 +597,9 @@
 #ifdef CONFIG_BT_HCIUART_ATH3K
 	ath_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_IBS
+	ibs_deinit();
+#endif
 
 	/* Release tty registration of line discipline */
 	if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 7e4b435..38595e7 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -207,7 +207,7 @@
 		/*
 		 * This state means that both the host and the BRF chip
 		 * have simultaneously sent a wake-up-indication packet.
-		 * Traditionally, in this case, receiving a wake-up-indication
+		 * Traditionaly, in this case, receiving a wake-up-indication
 		 * was enough and an additional wake-up-ack wasn't needed.
 		 * This has changed with the BRF6350, which does require an
 		 * explicit wake-up-ack. Other BRF versions, which do not
diff --git a/drivers/bluetooth/hci_smd.c b/drivers/bluetooth/hci_smd.c
new file mode 100644
index 0000000..7132c7c
--- /dev/null
+++ b/drivers/bluetooth/hci_smd.c
@@ -0,0 +1,305 @@
+/*
+ *  HCI_SMD (HCI Shared Memory Driver) is Qualcomm's Shared memory driver
+ *  for the BT HCI protocol.
+ *
+ *  Copyright (c) 2000-2001, 2011 Code Aurora Forum. All rights reserved.
+ *  Copyright (C) 2002-2003  Maxim Krasnyansky <maxk@qualcomm.com>
+ *  Copyright (C) 2004-2006  Marcel Holtmann <marcel@holtmann.org>
+ *
+ *  This file is based on drivers/bluetooth/hci_vhci.c
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+#include <mach/msm_smd.h>
+
+#define EVENT_CHANNEL "APPS_RIVA_BT_CMD"
+#define DATA_CHANNEL "APPS_RIVA_BT_ACL"
+
+struct hci_smd_data {
+	struct hci_dev *hdev;
+
+	struct smd_channel *event_channel;
+	struct smd_channel *data_channel;
+};
+struct hci_smd_data hs;
+
+static int hci_smd_open(struct hci_dev *hdev)
+{
+	set_bit(HCI_RUNNING, &hdev->flags);
+	return 0;
+}
+
+
+static int hci_smd_close(struct hci_dev *hdev)
+{
+	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+		return 0;
+	else
+		return -EPERM;
+}
+
+
+static void hci_smd_destruct(struct hci_dev *hdev)
+{
+	kfree(hdev->driver_data);
+}
+
+static void hci_smd_recv_data(unsigned long arg)
+{
+	int len;
+	int rc;
+	struct sk_buff *skb;
+	unsigned  char *buf;
+	struct hci_smd_data *hsmd = &hs;
+
+	len = smd_read_avail(hsmd->data_channel);
+
+	while (len > 0) {
+		skb = bt_skb_alloc(len, GFP_KERNEL);
+		if (!skb) {
+			BT_ERR("Error in allocating  socket buffer\n");
+			return;
+		}
+
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf)  {
+			BT_ERR("Error in allocating  buffer\n");
+			kfree_skb(skb);
+			return;
+		}
+
+		rc = smd_read_from_cb(hsmd->data_channel, (void *)buf, len);
+		if (rc < len) {
+			BT_ERR("Error in reading from the channel");
+			return;
+		}
+
+		memcpy(skb_put(skb, len), buf, len);
+		skb->dev = (void *)hsmd->hdev;
+		bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+
+		skb_orphan(skb);
+
+		rc = hci_recv_frame(skb);
+		if (rc < 0) {
+			BT_ERR("Error in passing the packet to HCI Layer");
+			return;
+		}
+
+		kfree(buf);
+		len = smd_read_avail(hsmd->data_channel);
+	}
+}
+
+static void hci_smd_recv_event(unsigned long arg)
+{
+	int len;
+	int rc;
+	struct sk_buff *skb;
+	unsigned  char *buf;
+	struct hci_smd_data *hsmd = &hs;
+
+	len = smd_read_avail(hsmd->event_channel);
+	if (len > HCI_MAX_FRAME_SIZE) {
+		BT_ERR("Frame larger than the allowed size");
+		return;
+	}
+
+	while (len > 0) {
+		skb = bt_skb_alloc(len, GFP_KERNEL);
+		if (!skb)
+			return;
+
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf) {
+			kfree_skb(skb);
+			return;
+		}
+
+		rc = smd_read_from_cb(hsmd->event_channel, (void *)buf, len);
+
+		memcpy(skb_put(skb, len), buf, len);
+		skb->dev = (void *)hsmd->hdev;
+		bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+		skb_orphan(skb);
+
+		rc = hci_recv_frame(skb);
+		if (rc < 0) {
+			BT_ERR("Error in passing the packet to HCI Layer");
+			return;
+		}
+
+		kfree(buf);
+		len = smd_read_avail(hsmd->event_channel);
+	}
+}
+
+static int hci_smd_send_frame(struct sk_buff *skb)
+{
+	int len;
+
+	switch (bt_cb(skb)->pkt_type) {
+	case HCI_COMMAND_PKT:
+		len = smd_write(hs.event_channel, skb->data, skb->len);
+		if (len < skb->len) {
+			BT_ERR("Failed to write Command %d", len);
+			return -ENODEV;
+		}
+		break;
+	case HCI_ACLDATA_PKT:
+	case HCI_SCODATA_PKT:
+		len = smd_write(hs.data_channel, skb->data, skb->len);
+		if (len < skb->len) {
+			BT_ERR("Failed to write Data %d", len);
+			return -ENODEV;
+		}
+		break;
+	default:
+		BT_ERR("Uknown packet type\n");
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+
+static void hci_smd_notify_event(void *data, unsigned int event)
+{
+	struct hci_dev *hdev = hs.hdev;
+
+	if (!hdev) {
+		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
+		return;
+	}
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		hci_smd_recv_event(event);
+		break;
+	case SMD_EVENT_OPEN:
+		hci_smd_open(hdev);
+		break;
+	case SMD_EVENT_CLOSE:
+		hci_smd_close(hdev);
+		break;
+	default:
+		break;
+	}
+}
+
+static void hci_smd_notify_data(void *data, unsigned int event)
+{
+	struct hci_dev *hdev = hs.hdev;
+	if (!hdev) {
+		BT_ERR("HCI device (hdev=NULL)");
+		return;
+	}
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		hci_smd_recv_data(event);
+		break;
+	case SMD_EVENT_OPEN:
+		hci_smd_open(hdev);
+		break;
+	case SMD_EVENT_CLOSE:
+		hci_smd_close(hdev);
+		break;
+	default:
+		break;
+	}
+
+}
+
+static int hci_smd_register_dev(struct hci_smd_data *hsmd)
+{
+	struct hci_dev *hdev;
+	int rc;
+
+	/* Initialize and register HCI device */
+	hdev = hci_alloc_dev();
+	if (!hdev) {
+		BT_ERR("Can't allocate HCI device");
+		return -ENOMEM;
+	}
+
+	hsmd->hdev = hdev;
+	hdev->bus = HCI_SMD;
+	hdev->driver_data = hsmd;
+	hdev->open  = hci_smd_open;
+	hdev->close = hci_smd_close;
+	hdev->send  = hci_smd_send_frame;
+	hdev->destruct = hci_smd_destruct;
+	hdev->owner = THIS_MODULE;
+
+	/* Open the SMD Channel and device and register the callback function */
+	rc = smd_named_open_on_edge(EVENT_CHANNEL, SMD_APPS_WCNSS,
+			&hsmd->event_channel, hdev, hci_smd_notify_event);
+	if (rc < 0) {
+		BT_ERR("Cannot open the command channel");
+		hci_free_dev(hdev);
+		return -ENODEV;
+	}
+
+	rc = smd_named_open_on_edge(DATA_CHANNEL, SMD_APPS_WCNSS,
+			&hsmd->data_channel, hdev, hci_smd_notify_data);
+	if (rc < 0) {
+		BT_ERR("Failed to open the Data channel\n");
+		hci_free_dev(hdev);
+		return -ENODEV;
+	}
+
+	/* Disable the read interrupts on the channel */
+	smd_disable_read_intr(hsmd->event_channel);
+	smd_disable_read_intr(hsmd->data_channel);
+
+	if (hci_register_dev(hdev) < 0) {
+		BT_ERR("Can't register HCI device");
+		hci_free_dev(hdev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void hci_smd_deregister(void)
+{
+	smd_close(hs.event_channel);
+	hs.event_channel = 0;
+	smd_close(hs.data_channel);
+	hs.data_channel = 0;
+}
+
+static int hci_smd_init(void)
+{
+	return hci_smd_register_dev(&hs);
+}
+module_init(hci_smd_init);
+
+static void __exit hci_smd_exit(void)
+{
+	hci_smd_deregister();
+}
+module_exit(hci_smd_exit);
+
+MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>");
+MODULE_DESCRIPTION("Bluetooth SMD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 99fb352..dc48239 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -2,9 +2,9 @@
  *
  *  Bluetooth HCI UART driver
  *
- *  Copyright (C) 2000-2001  Qualcomm Incorporated
  *  Copyright (C) 2002-2003  Maxim Krasnyansky <maxk@qualcomm.com>
  *  Copyright (C) 2004-2005  Marcel Holtmann <marcel@holtmann.org>
+ *  Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
  *
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -35,14 +35,15 @@
 #define HCIUARTGETFLAGS		_IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO	6
+#define HCI_UART_MAX_PROTO	7
 
 #define HCI_UART_H4	0
 #define HCI_UART_BCSP	1
 #define HCI_UART_3WIRE	2
 #define HCI_UART_H4DS	3
 #define HCI_UART_LL	4
-#define HCI_UART_ATH3K	5
+#define HCI_UART_IBS	5
+#define HCI_UART_ATH3K	6
 
 #define HCI_UART_RAW_DEVICE	0
 
@@ -73,7 +74,8 @@
 };
 
 /* HCI_UART proto flag bits */
-#define HCI_UART_PROTO_SET	0
+#define HCI_UART_PROTO_SET			0
+#define HCI_UART_PROTO_SET_IN_PROGRESS		1
 
 /* TX states  */
 #define HCI_UART_SENDING	1
@@ -102,3 +104,8 @@
 int ath_init(void);
 int ath_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_IBS
+int ibs_init(void);
+int ibs_deinit(void);
+#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7d10ae3..b55746c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -64,6 +64,8 @@
 
 source "drivers/tty/serial/Kconfig"
 
+source "drivers/char/diag/Kconfig"
+
 config TTY_PRINTK
 	bool "TTY driver to output user messages via printk"
 	depends on EXPERT
@@ -633,5 +635,46 @@
 	  Enables userspace clients to read and write to some packet SMD
 	  ports via device interface for MSM chipset.
 
+config MSM_ROTATOR
+        tristate "MSM Offline Image Rotator Driver"
+        depends on (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960) && ANDROID_PMEM
+        default y
+        help
+          This driver provides support for the image rotator HW block in the
+          MSM 7x30 SoC.
+
+config MSM_ROTATOR_USE_IMEM
+        bool "Enable rotator driver to use iMem"
+        depends on ARCH_MSM7X30 && MSM_ROTATOR
+        default y
+        help
+          This option enables the msm_rotator driver to use the move efficient
+          iMem.  Some MSM platforms may not have iMem available for the rotator
+          block.  Or some systems may want the iMem to be dedicated to a
+          different function.
+
+config MMC_GENERIC_CSDIO
+	tristate "Generic sdio driver"
+	default n
+	help
+	  SDIO function driver that extends SDIO card as character device
+	  in user space.
+
+config CSDIO_VENDOR_ID
+	hex "Card VendorId"
+	depends on MMC_GENERIC_CSDIO
+	default "0"
+	help
+	  Enter vendor id for targeted sdio device, this may be overwritten by
+	  module parameters.
+
+config CSDIO_DEVICE_ID
+	hex "CardDeviceId"
+	depends on MMC_GENERIC_CSDIO
+	default "0"
+	help
+	  Enter device id for targeted sdio device, this may be overwritten by
+	  module parameters.
+.
 endmenu
 
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 3f63254..1a295b8 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
-obj-$(CONFIG_MSM_SMD_PKT)	+= msm_smd_pkt.o
 obj-$(CONFIG_MSPEC)		+= mspec.o
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
@@ -64,3 +63,6 @@
 
 obj-$(CONFIG_JS_RTC)		+= js-rtc.o
 js-rtc-y = rtc.o
+
+obj-$(CONFIG_MSM_ROTATOR)	+= msm_rotator.o
+obj-$(CONFIG_MMC_GENERIC_CSDIO)	+= csdio.o
diff --git a/drivers/char/csdio.c b/drivers/char/csdio.c
new file mode 100644
index 0000000..ca7e986
--- /dev/null
+++ b/drivers/char/csdio.c
@@ -0,0 +1,1074 @@
+/*
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+/* Char device */
+#include <linux/cdev.h>
+#include <linux/fs.h>
+
+/* Sdio device */
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <linux/csdio.h>
+
+#define FALSE   0
+#define TRUE    1
+
+#define VERSION                     "0.5"
+#define CSDIO_NUM_OF_SDIO_FUNCTIONS 7
+#define CSDIO_DEV_NAME              "csdio"
+#define TP_DEV_NAME                 CSDIO_DEV_NAME"f"
+#define CSDIO_DEV_PERMISSIONS       0666
+
+#define CSDIO_SDIO_BUFFER_SIZE      (64*512)
+
+int csdio_major;
+int csdio_minor;
+int csdio_transport_nr_devs = CSDIO_NUM_OF_SDIO_FUNCTIONS;
+static uint csdio_vendor_id;
+static uint csdio_device_id;
+static char *host_name;
+
+static struct csdio_func_t {
+	struct sdio_func   *m_func;
+	int                 m_enabled;
+	struct cdev         m_cdev;      /* char device structure */
+	struct device      *m_device;
+	u32                 m_block_size;
+} *g_csdio_func_table[CSDIO_NUM_OF_SDIO_FUNCTIONS] = {0};
+
+struct csdio_t {
+	struct cdev             m_cdev;
+	struct device          *m_device;
+	struct class           *m_driver_class;
+	struct fasync_struct   *m_async_queue;
+	unsigned char           m_current_irq_mask; /* currently enabled irqs */
+	struct mmc_host        *m_host;
+	unsigned int            m_num_of_func;
+} g_csdio;
+
+struct csdio_file_descriptor {
+	struct csdio_func_t    *m_port;
+	u32                     m_block_mode;/* data tran. byte(0)/block(1) */
+	u32                     m_op_code;   /* address auto increment flag */
+	u32                     m_address;
+};
+
+static void *g_sdio_buffer;
+
+/*
+ * Open and release
+ */
+static int csdio_transport_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct csdio_func_t *port = NULL; /*  device information */
+	struct sdio_func *func = NULL;
+	struct csdio_file_descriptor *descriptor = NULL;
+
+	port = container_of(inode->i_cdev, struct csdio_func_t, m_cdev);
+	func = port->m_func;
+	descriptor = kzalloc(sizeof(struct csdio_file_descriptor), GFP_KERNEL);
+	if (!descriptor) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_info(TP_DEV_NAME"%d: open: func=%p, port=%p\n",
+			func->num, func, port);
+	sdio_claim_host(func);
+	ret = sdio_enable_func(func);
+	if (ret) {
+		pr_err(TP_DEV_NAME"%d:Enable func failed (%d)\n",
+				func->num, ret);
+		ret = -EIO;
+		goto free_descriptor;
+	}
+	descriptor->m_port = port;
+	filp->private_data = descriptor;
+	goto release_host;
+
+free_descriptor:
+	kfree(descriptor);
+release_host:
+	sdio_release_host(func);
+exit:
+	return ret;
+}
+
+static int csdio_transport_release(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct csdio_file_descriptor *descriptor = filp->private_data;
+	struct csdio_func_t *port = descriptor->m_port;
+	struct sdio_func *func = port->m_func;
+
+	pr_info(TP_DEV_NAME"%d: release\n", func->num);
+	sdio_claim_host(func);
+	ret = sdio_disable_func(func);
+	if (ret) {
+		pr_err(TP_DEV_NAME"%d:Disable func failed(%d)\n",
+				func->num, ret);
+		ret = -EIO;
+	}
+	sdio_release_host(func);
+	kfree(descriptor);
+	return ret;
+}
+
+/*
+ * Data management: read and write
+ */
+static ssize_t csdio_transport_read(struct file *filp,
+		char __user *buf,
+		size_t count,
+		loff_t *f_pos)
+{
+	ssize_t ret = 0;
+	struct csdio_file_descriptor *descriptor = filp->private_data;
+	struct csdio_func_t *port = descriptor->m_port;
+	struct sdio_func *func = port->m_func;
+	size_t t_count = count;
+
+	if (descriptor->m_block_mode) {
+		pr_info(TP_DEV_NAME "%d: CMD53 read, Md:%d, Addr:0x%04X,"
+				" Un:%d (Bl:%d, BlSz:%d)\n", func->num,
+				descriptor->m_block_mode,
+				descriptor->m_address,
+				count*port->m_block_size,
+				count, port->m_block_size);
+		/* recalculate size */
+		count *= port->m_block_size;
+	}
+	sdio_claim_host(func);
+	if (descriptor->m_op_code) {
+		/* auto increment */
+		ret = sdio_memcpy_fromio(func, g_sdio_buffer,
+				descriptor->m_address, count);
+	} else { /* FIFO */
+		ret = sdio_readsb(func, g_sdio_buffer,
+				descriptor->m_address, count);
+	}
+	sdio_release_host(func);
+	if (!ret) {
+		if (copy_to_user(buf, g_sdio_buffer, count))
+			ret = -EFAULT;
+		else
+			ret = t_count;
+	}
+	if (ret < 0) {
+		pr_err(TP_DEV_NAME "%d: CMD53 read failed (%d)"
+				"(Md:%d, Addr:0x%04X, Sz:%d)\n",
+				func->num, ret,
+				descriptor->m_block_mode,
+				descriptor->m_address, count);
+	}
+	return ret;
+}
+
+static ssize_t csdio_transport_write(struct file *filp,
+		const char __user *buf,
+		size_t count,
+		loff_t *f_pos)
+{
+	ssize_t ret = 0;
+	struct csdio_file_descriptor *descriptor = filp->private_data;
+	struct csdio_func_t *port = descriptor->m_port;
+	struct sdio_func *func = port->m_func;
+	size_t t_count = count;
+
+	if (descriptor->m_block_mode)
+		count *= port->m_block_size;
+
+	if (copy_from_user(g_sdio_buffer, buf, count)) {
+		pr_err(TP_DEV_NAME"%d:copy_from_user failed\n", func->num);
+		ret = -EFAULT;
+	} else {
+		sdio_claim_host(func);
+		if (descriptor->m_op_code) {
+			/* auto increment */
+			ret = sdio_memcpy_toio(func, descriptor->m_address,
+					g_sdio_buffer, count);
+		} else {
+			/* FIFO */
+			ret = sdio_writesb(func, descriptor->m_address,
+					g_sdio_buffer, count);
+		}
+		sdio_release_host(func);
+		if (!ret) {
+			ret = t_count;
+		} else {
+			pr_err(TP_DEV_NAME "%d: CMD53 write failed (%d)"
+				"(Md:%d, Addr:0x%04X, Sz:%d)\n",
+				func->num, ret, descriptor->m_block_mode,
+				descriptor->m_address, count);
+		}
+	}
+	return ret;
+}
+
+/* disable interrupt for sdio client */
+static int disable_sdio_client_isr(struct sdio_func *func)
+{
+	int ret;
+
+	/* disable for all functions, to restore interrupts
+	 * use g_csdio.m_current_irq_mask */
+	sdio_f0_writeb(func, 0, SDIO_CCCR_IENx, &ret);
+	if (ret)
+		pr_err(CSDIO_DEV_NAME" Can't sdio_f0_writeb (%d)\n", ret);
+
+	return ret;
+}
+
+/*
+ * This handles the interrupt from SDIO.
+ */
+static void csdio_sdio_irq(struct sdio_func *func)
+{
+	int ret;
+
+	pr_info(CSDIO_DEV_NAME" csdio_sdio_irq: func=%d\n", func->num);
+	ret = disable_sdio_client_isr(func);
+	if (ret) {
+		pr_err(CSDIO_DEV_NAME" Can't disable client isr(%d)\n", ret);
+		return;
+	}
+	/*  signal asynchronous readers */
+	if (g_csdio.m_async_queue)
+		kill_fasync(&g_csdio.m_async_queue, SIGIO, POLL_IN);
+}
+
+/*
+ * The ioctl() implementation
+ */
+static int csdio_transport_ioctl(struct inode *inode,
+		struct file *filp,
+		unsigned int cmd,
+		unsigned long arg)
+{
+	int err = 0;
+	int ret = 0;
+	struct csdio_file_descriptor *descriptor = filp->private_data;
+	struct csdio_func_t *port = descriptor->m_port;
+	struct sdio_func *func = port->m_func;
+
+	/*  extract the type and number bitfields
+	    sanity check: return ENOTTY (inappropriate ioctl) before
+	    access_ok()
+	*/
+	if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) ||
+			(_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) {
+		pr_err(TP_DEV_NAME "Wrong ioctl command parameters\n");
+		ret = -ENOTTY;
+		goto exit;
+	}
+
+	/*  the direction is a bitmask, and VERIFY_WRITE catches R/W
+	 *  transfers. `Type' is user-oriented, while access_ok is
+	    kernel-oriented, so the concept of "read" and "write" is reversed
+	*/
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		err = !access_ok(VERIFY_WRITE, (void __user *)arg,
+				_IOC_SIZE(cmd));
+	} else {
+		if (_IOC_DIR(cmd) & _IOC_WRITE) {
+			err =  !access_ok(VERIFY_READ, (void __user *)arg,
+					_IOC_SIZE(cmd));
+		}
+	}
+	if (err) {
+		pr_err(TP_DEV_NAME "Wrong ioctl access direction\n");
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	switch (cmd) {
+	case CSDIO_IOC_SET_OP_CODE:
+		{
+			pr_info(TP_DEV_NAME"%d:SET_OP_CODE=%d\n",
+					func->num, descriptor->m_op_code);
+			ret = get_user(descriptor->m_op_code,
+					(unsigned char __user *)arg);
+			if (ret) {
+				pr_err(TP_DEV_NAME"%d:SET_OP_CODE get data"
+						" from user space failed(%d)\n",
+						func->num, ret);
+				ret = -ENOTTY;
+				break;
+			}
+		}
+		break;
+	case CSDIO_IOC_FUNCTION_SET_BLOCK_SIZE:
+		{
+			unsigned block_size;
+
+			ret = get_user(block_size, (unsigned __user *)arg);
+			if (ret) {
+				pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE get data"
+						" from user space failed(%d)\n",
+						func->num, ret);
+				ret = -ENOTTY;
+				break;
+			}
+			pr_info(TP_DEV_NAME"%d:SET_BLOCK_SIZE=%d\n",
+					func->num, block_size);
+			sdio_claim_host(func);
+			ret = sdio_set_block_size(func, block_size);
+			if (!ret) {
+				port->m_block_size = block_size;
+			} else {
+				pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE set block"
+						" size to %d failed (%d)\n",
+						func->num, block_size, ret);
+				ret = -ENOTTY;
+				break;
+			}
+			sdio_release_host(func);
+		}
+		break;
+	case CSDIO_IOC_SET_BLOCK_MODE:
+		{
+			pr_info(TP_DEV_NAME"%d:SET_BLOCK_MODE=%d\n",
+					func->num, descriptor->m_block_mode);
+			ret = get_user(descriptor->m_block_mode,
+					(unsigned char __user *)arg);
+			if (ret) {
+				pr_err(TP_DEV_NAME"%d:SET_BLOCK_MODE get data"
+						" from user space failed\n",
+						func->num);
+				ret = -ENOTTY;
+				break;
+			}
+		}
+		break;
+	case CSDIO_IOC_CMD52:
+		{
+			struct csdio_cmd52_ctrl_t cmd52ctrl;
+			int cmd52ret;
+
+			if (copy_from_user(&cmd52ctrl,
+					(const unsigned char __user *)arg,
+					sizeof(cmd52ctrl))) {
+				pr_err(TP_DEV_NAME"%d:IOC_CMD52 get data"
+						" from user space failed\n",
+						func->num);
+				ret = -ENOTTY;
+				break;
+			}
+			sdio_claim_host(func);
+			if (cmd52ctrl.m_write)
+				sdio_writeb(func, cmd52ctrl.m_data,
+						cmd52ctrl.m_address, &cmd52ret);
+			else
+				cmd52ctrl.m_data = sdio_readb(func,
+						cmd52ctrl.m_address, &cmd52ret);
+
+			cmd52ctrl.m_ret = cmd52ret;
+			sdio_release_host(func);
+			if (cmd52ctrl.m_ret)
+				pr_err(TP_DEV_NAME"%d:IOC_CMD52 failed (%d)\n",
+						func->num, cmd52ctrl.m_ret);
+
+			if (copy_to_user((unsigned char __user *)arg,
+						&cmd52ctrl,
+						sizeof(cmd52ctrl))) {
+				pr_err(TP_DEV_NAME"%d:IOC_CMD52 put data"
+						" to user space failed\n",
+						func->num);
+				ret = -ENOTTY;
+				break;
+			}
+		}
+		break;
+	case CSDIO_IOC_CMD53:
+		{
+			struct csdio_cmd53_ctrl_t csdio_cmd53_ctrl;
+
+			if (copy_from_user(&csdio_cmd53_ctrl,
+						(const char __user *)arg,
+						sizeof(csdio_cmd53_ctrl))) {
+				ret = -EPERM;
+				pr_err(TP_DEV_NAME"%d:"
+					"Get data from user space failed\n",
+					func->num);
+				break;
+			}
+			descriptor->m_block_mode =
+				csdio_cmd53_ctrl.m_block_mode;
+			descriptor->m_op_code = csdio_cmd53_ctrl.m_op_code;
+			descriptor->m_address = csdio_cmd53_ctrl.m_address;
+		}
+		break;
+	case CSDIO_IOC_CONNECT_ISR:
+		{
+			pr_info(CSDIO_DEV_NAME" SDIO_CONNECT_ISR"
+				" func=%d, csdio_sdio_irq=%x\n",
+				func->num, (unsigned int)csdio_sdio_irq);
+			sdio_claim_host(func);
+			ret = sdio_claim_irq(func, csdio_sdio_irq);
+			sdio_release_host(func);
+			if (ret) {
+				pr_err(CSDIO_DEV_NAME" SDIO_CONNECT_ISR"
+						" claim irq failed(%d)\n", ret);
+			} else {
+				/* update current irq mask for disable/enable */
+				g_csdio.m_current_irq_mask |= (1 << func->num);
+			}
+		}
+		break;
+	case CSDIO_IOC_DISCONNECT_ISR:
+		{
+			pr_info(CSDIO_DEV_NAME " SDIO_DISCONNECT_ISR func=%d\n",
+					func->num);
+			sdio_claim_host(func);
+			sdio_release_irq(func);
+			sdio_release_host(func);
+			/* update current irq mask for disable/enable */
+			g_csdio.m_current_irq_mask &= ~(1 << func->num);
+		}
+		break;
+	default:  /*  redundant, as cmd was checked against MAXNR */
+		pr_warning(TP_DEV_NAME"%d: Redundant IOCTL\n",
+				func->num);
+		ret = -ENOTTY;
+	}
+exit:
+	return ret;
+}
+
+static const struct file_operations csdio_transport_fops = {
+	.owner =    THIS_MODULE,
+	.read =     csdio_transport_read,
+	.write =    csdio_transport_write,
+	.ioctl =    csdio_transport_ioctl,
+	.open =     csdio_transport_open,
+	.release =  csdio_transport_release,
+};
+
+static void csdio_transport_cleanup(struct csdio_func_t *port)
+{
+	int devno = MKDEV(csdio_major, csdio_minor + port->m_func->num);
+	device_destroy(g_csdio.m_driver_class, devno);
+	port->m_device = NULL;
+	cdev_del(&port->m_cdev);
+}
+
+#if defined(CONFIG_DEVTMPFS)
+static inline int csdio_cdev_update_permissions(
+    const char *devname, int dev_minor)
+{
+	return 0;
+}
+#else
+static int csdio_cdev_update_permissions(
+    const char *devname, int dev_minor)
+{
+	int ret = 0;
+	mm_segment_t fs;
+	struct file *file;
+	struct inode *inode;
+	struct iattr newattrs;
+	int mode = CSDIO_DEV_PERMISSIONS;
+	char dev_file[64];
+
+	fs = get_fs();
+	set_fs(get_ds());
+
+	snprintf(dev_file, sizeof(dev_file), "/dev/%s%d",
+		devname, dev_minor);
+	file = filp_open(dev_file, O_RDWR, 0);
+	if (IS_ERR(file)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	inode = file->f_path.dentry->d_inode;
+
+	mutex_lock(&inode->i_mutex);
+	newattrs.ia_mode =
+		(mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+	ret = notify_change(file->f_path.dentry, &newattrs);
+	mutex_unlock(&inode->i_mutex);
+
+	filp_close(file, NULL);
+
+exit:
+	set_fs(fs);
+	return ret;
+}
+#endif
+
+static struct device *csdio_cdev_init(struct cdev *char_dev,
+		const struct file_operations *file_op, int dev_minor,
+		const char *devname, struct device *parent)
+{
+	int ret = 0;
+	struct device *new_device = NULL;
+	dev_t devno = MKDEV(csdio_major, dev_minor);
+
+	/*  Initialize transport device */
+	cdev_init(char_dev, file_op);
+	char_dev->owner = THIS_MODULE;
+	char_dev->ops = file_op;
+	ret = cdev_add(char_dev, devno, 1);
+
+	/*  Fail gracefully if need be */
+	if (ret) {
+		pr_warning("Error %d adding CSDIO char device '%s%d'",
+				ret, devname, dev_minor);
+		goto exit;
+	}
+	pr_info("'%s%d' char driver registered\n", devname, dev_minor);
+
+	/*  create a /dev entry for transport drivers */
+	new_device = device_create(g_csdio.m_driver_class, parent, devno, NULL,
+			"%s%d", devname, dev_minor);
+	if (!new_device) {
+		pr_err("Can't create device node '/dev/%s%d'\n",
+				devname, dev_minor);
+		goto cleanup;
+	}
+	/* no irq attached */
+	g_csdio.m_current_irq_mask = 0;
+
+	if (csdio_cdev_update_permissions(devname, dev_minor)) {
+		pr_warning("%s%d: Unable to update access permissions of the"
+			" '/dev/%s%d'\n",
+			devname, dev_minor, devname, dev_minor);
+	}
+
+	pr_info("%s%d: Device node '/dev/%s%d' created successfully\n",
+			devname, dev_minor, devname, dev_minor);
+	goto exit;
+cleanup:
+	cdev_del(char_dev);
+exit:
+	return new_device;
+}
+
+/* Looks for first non empty function, returns NULL otherwise */
+static struct sdio_func *get_active_func(void)
+{
+	int i;
+
+	for (i = 0; i < CSDIO_NUM_OF_SDIO_FUNCTIONS; i++) {
+		if (g_csdio_func_table[i])
+			return g_csdio_func_table[i]->m_func;
+	}
+	return NULL;
+}
+
+static ssize_t
+show_vdd(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	if (NULL == g_csdio.m_host)
+		return snprintf(buf, PAGE_SIZE, "N/A\n");
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		g_csdio.m_host->ios.vdd);
+}
+
+static int
+set_vdd_helper(int value)
+{
+	struct mmc_ios *ios = NULL;
+
+	if (NULL == g_csdio.m_host) {
+		pr_err("%s0: Set VDD, no MMC host assigned\n", CSDIO_DEV_NAME);
+		return -ENXIO;
+	}
+
+	mmc_claim_host(g_csdio.m_host);
+	ios = &g_csdio.m_host->ios;
+	ios->vdd = value;
+	g_csdio.m_host->ops->set_ios(g_csdio.m_host, ios);
+	mmc_release_host(g_csdio.m_host);
+	return 0;
+}
+
+static ssize_t
+set_vdd(struct device *dev, struct device_attribute *att,
+	const char *buf, size_t count)
+{
+	int value = 0;
+
+	sscanf(buf, "%d", &value);
+	if (set_vdd_helper(value))
+		return -ENXIO;
+	return count;
+}
+
+static DEVICE_ATTR(vdd, S_IRUGO | S_IWUSR,
+	show_vdd, set_vdd);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_vdd.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+
+/*
+ * The ioctl() implementation for control device
+ */
+static int csdio_ctrl_ioctl(struct inode *inode, struct file *filp,
+		unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	int ret = 0;
+
+	pr_info("CSDIO ctrl ioctl.\n");
+
+	/*  extract the type and number bitfields
+	    sanity check: return ENOTTY (inappropriate ioctl) before
+	    access_ok()
+	*/
+	if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) ||
+			(_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) {
+		pr_err(CSDIO_DEV_NAME "Wrong ioctl command parameters\n");
+		ret = -ENOTTY;
+		goto exit;
+	}
+
+	/*  the direction is a bitmask, and VERIFY_WRITE catches R/W
+	  transfers. `Type' is user-oriented, while access_ok is
+	  kernel-oriented, so the concept of "read" and "write" is reversed
+	  */
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		err = !access_ok(VERIFY_WRITE, (void __user *)arg,
+				_IOC_SIZE(cmd));
+	} else {
+		if (_IOC_DIR(cmd) & _IOC_WRITE)
+			err =  !access_ok(VERIFY_READ, (void __user *)arg,
+					_IOC_SIZE(cmd));
+	}
+	if (err) {
+		pr_err(CSDIO_DEV_NAME "Wrong ioctl access direction\n");
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	switch (cmd) {
+	case CSDIO_IOC_ENABLE_HIGHSPEED_MODE:
+		pr_info(CSDIO_DEV_NAME" ENABLE_HIGHSPEED_MODE\n");
+		break;
+	case CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS:
+		{
+			struct mmc_host *host = g_csdio.m_host;
+			struct mmc_ios *ios = NULL;
+
+			if (NULL == host) {
+				pr_err("%s0: "
+					"CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS,"
+					" no MMC host assigned\n",
+					CSDIO_DEV_NAME);
+				ret = -EFAULT;
+				goto exit;
+			}
+			ios = &host->ios;
+
+			mmc_claim_host(host);
+			ret = get_user(host->ios.clock,
+					(unsigned int __user *)arg);
+			if (ret) {
+				pr_err(CSDIO_DEV_NAME
+					" get data from user space failed\n");
+			} else {
+				pr_err(CSDIO_DEV_NAME
+					"SET_DATA_TRANSFER_CLOCKS(%d-%d)(%d)\n",
+					host->f_min, host->f_max,
+					host->ios.clock);
+				host->ops->set_ios(host, ios);
+			}
+			mmc_release_host(host);
+		}
+		break;
+	case CSDIO_IOC_ENABLE_ISR:
+		{
+			int ret;
+			unsigned char reg;
+			struct sdio_func *func = get_active_func();
+
+			if (!func) {
+				pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR"
+						" no active sdio function\n");
+				ret = -EFAULT;
+				goto exit;
+			}
+			pr_info(CSDIO_DEV_NAME
+					" CSDIO_IOC_ENABLE_ISR func=%d\n",
+					func->num);
+			reg = g_csdio.m_current_irq_mask | 1;
+
+			sdio_claim_host(func);
+			sdio_f0_writeb(func, reg, SDIO_CCCR_IENx, &ret);
+			sdio_release_host(func);
+			if (ret) {
+				pr_err(CSDIO_DEV_NAME
+						" Can't sdio_f0_writeb (%d)\n",
+						ret);
+				goto exit;
+			}
+		}
+		break;
+	case CSDIO_IOC_DISABLE_ISR:
+		{
+			int ret;
+			struct sdio_func *func = get_active_func();
+			if (!func) {
+				pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR"
+						" no active sdio function\n");
+				ret = -EFAULT;
+				goto exit;
+			}
+			pr_info(CSDIO_DEV_NAME
+					" CSDIO_IOC_DISABLE_ISR func=%p\n",
+					func);
+
+			sdio_claim_host(func);
+			ret = disable_sdio_client_isr(func);
+			sdio_release_host(func);
+			if (ret) {
+				pr_err("%s0: Can't disable client isr (%d)\n",
+					CSDIO_DEV_NAME, ret);
+				goto exit;
+			}
+		}
+	break;
+	case CSDIO_IOC_SET_VDD:
+		{
+			unsigned int vdd = 0;
+
+			ret = get_user(vdd, (unsigned int __user *)arg);
+			if (ret) {
+				pr_err("%s0: CSDIO_IOC_SET_VDD,"
+					" get data from user space failed\n",
+					CSDIO_DEV_NAME);
+				goto exit;
+			}
+			pr_info(CSDIO_DEV_NAME" CSDIO_IOC_SET_VDD - %d\n", vdd);
+
+			ret = set_vdd_helper(vdd);
+			if (ret)
+				goto exit;
+		}
+	break;
+	case CSDIO_IOC_GET_VDD:
+		{
+			if (NULL == g_csdio.m_host) {
+				pr_err("%s0: CSDIO_IOC_GET_VDD,"
+					" no MMC host assigned\n",
+					CSDIO_DEV_NAME);
+				ret = -EFAULT;
+				goto exit;
+			}
+			ret = put_user(g_csdio.m_host->ios.vdd,
+				(unsigned short __user *)arg);
+			if (ret) {
+				pr_err("%s0: CSDIO_IOC_GET_VDD, put data"
+					" to user space failed\n",
+					CSDIO_DEV_NAME);
+				goto exit;
+			}
+		}
+	break;
+	default:  /*  redundant, as cmd was checked against MAXNR */
+		pr_warning(CSDIO_DEV_NAME" Redundant IOCTL\n");
+		ret = -ENOTTY;
+	}
+exit:
+	return ret;
+}
+
+static int csdio_ctrl_fasync(int fd, struct file *filp, int mode)
+{
+	pr_info(CSDIO_DEV_NAME
+			" csdio_ctrl_fasync: fd=%d, filp=%p, mode=%d\n",
+			fd, filp, mode);
+	return fasync_helper(fd, filp, mode, &g_csdio.m_async_queue);
+}
+
+/*
+ * Open and close
+ */
+static int csdio_ctrl_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct csdio_t *csdio_ctrl_drv = NULL; /*  device information */
+
+	pr_info("CSDIO ctrl open.\n");
+	csdio_ctrl_drv = container_of(inode->i_cdev, struct csdio_t, m_cdev);
+	filp->private_data = csdio_ctrl_drv; /*  for other methods */
+	return ret;
+}
+
+static int csdio_ctrl_release(struct inode *inode, struct file *filp)
+{
+	pr_info("CSDIO ctrl release.\n");
+	/*  remove this filp from the asynchronously notified filp's */
+	csdio_ctrl_fasync(-1, filp, 0);
+	return 0;
+}
+
+static const struct file_operations csdio_ctrl_fops = {
+	.owner =	THIS_MODULE,
+	.ioctl =	csdio_ctrl_ioctl,
+	.open  =	csdio_ctrl_open,
+	.release =	csdio_ctrl_release,
+	.fasync =	csdio_ctrl_fasync,
+};
+
+static int csdio_probe(struct sdio_func *func,
+		const struct sdio_device_id *id)
+{
+	struct csdio_func_t *port;
+	int ret = 0;
+	struct mmc_host *host = func->card->host;
+
+	if (NULL != g_csdio.m_host && g_csdio.m_host != host) {
+		pr_info("%s: Device is on unexpected host\n",
+			CSDIO_DEV_NAME);
+		ret = -ENODEV;
+		goto exit;
+	}
+
+	/* enforce single instance policy */
+	if (g_csdio_func_table[func->num-1]) {
+		pr_err("%s - only single SDIO device supported",
+				sdio_func_id(func));
+		ret = -EEXIST;
+		goto exit;
+	}
+
+	port = kzalloc(sizeof(struct csdio_func_t), GFP_KERNEL);
+	if (!port) {
+		pr_err("Can't allocate memory\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* initialize SDIO side */
+	port->m_func = func;
+	sdio_set_drvdata(func, port);
+
+	pr_info("%s - SDIO device found. Function %d\n",
+			sdio_func_id(func), func->num);
+
+	port->m_device = csdio_cdev_init(&port->m_cdev, &csdio_transport_fops,
+			csdio_minor + port->m_func->num,
+			TP_DEV_NAME, &port->m_func->dev);
+
+	/* create appropriate char device */
+	if (!port->m_device)
+		goto free;
+
+	if (0 == g_csdio.m_num_of_func && NULL == host_name)
+		g_csdio.m_host = host;
+	g_csdio.m_num_of_func++;
+	g_csdio_func_table[func->num-1] = port;
+	port->m_enabled = TRUE;
+	goto exit;
+free:
+	kfree(port);
+exit:
+	return ret;
+}
+
+static void csdio_remove(struct sdio_func *func)
+{
+	struct csdio_func_t *port = sdio_get_drvdata(func);
+
+	csdio_transport_cleanup(port);
+	sdio_claim_host(func);
+	sdio_release_irq(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+	kfree(port);
+	g_csdio_func_table[func->num-1] = NULL;
+	g_csdio.m_num_of_func--;
+	if (0 == g_csdio.m_num_of_func && NULL == host_name)
+		g_csdio.m_host = NULL;
+	pr_info("%s%d: Device removed (%s). Function %d\n",
+		CSDIO_DEV_NAME, func->num, sdio_func_id(func), func->num);
+}
+
+/* CONFIG_CSDIO_VENDOR_ID and CONFIG_CSDIO_DEVICE_ID are defined in Kconfig.
+ * Use kernel configuration to change the values or overwrite them through
+ * module parameters */
+static struct sdio_device_id csdio_ids[] = {
+	{ SDIO_DEVICE(CONFIG_CSDIO_VENDOR_ID, CONFIG_CSDIO_DEVICE_ID) },
+	{ /* end: all zeroes */},
+};
+
+MODULE_DEVICE_TABLE(sdio, csdio_ids);
+
+static struct sdio_driver csdio_driver = {
+	.probe      = csdio_probe,
+	.remove     = csdio_remove,
+	.name       = "csdio",
+	.id_table   = csdio_ids,
+};
+
+static void __exit csdio_exit(void)
+{
+	dev_t devno = MKDEV(csdio_major, csdio_minor);
+
+	sdio_unregister_driver(&csdio_driver);
+	sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp);
+	kfree(g_sdio_buffer);
+	device_destroy(g_csdio.m_driver_class, devno);
+	cdev_del(&g_csdio.m_cdev);
+	class_destroy(g_csdio.m_driver_class);
+	unregister_chrdev_region(devno, csdio_transport_nr_devs);
+	pr_info("%s: Exit driver module\n", CSDIO_DEV_NAME);
+}
+
+static char *csdio_devnode(struct device *dev, mode_t *mode)
+{
+	*mode = CSDIO_DEV_PERMISSIONS;
+	return NULL;
+}
+
+static int __init csdio_init(void)
+{
+	int ret = 0;
+	dev_t devno = 0;
+
+	pr_info("Init CSDIO driver module.\n");
+
+	/*  Get a range of minor numbers to work with, asking for a dynamic */
+	/*  major unless directed otherwise at load time. */
+	if (csdio_major) {
+		devno = MKDEV(csdio_major, csdio_minor);
+		ret = register_chrdev_region(devno, csdio_transport_nr_devs,
+				CSDIO_DEV_NAME);
+	} else {
+		ret = alloc_chrdev_region(&devno, csdio_minor,
+				csdio_transport_nr_devs, CSDIO_DEV_NAME);
+		csdio_major = MAJOR(devno);
+	}
+	if (ret < 0) {
+		pr_err("CSDIO: can't get major %d\n", csdio_major);
+		goto exit;
+	}
+	pr_info("CSDIO char driver major number is %d\n", csdio_major);
+
+	/* kernel module got parameters: overwrite vendor and device id's */
+	if ((csdio_vendor_id != 0) && (csdio_device_id != 0)) {
+		csdio_ids[0].vendor = (u16)csdio_vendor_id;
+		csdio_ids[0].device = (u16)csdio_device_id;
+	}
+
+	/*  prepare create /dev/... instance */
+	g_csdio.m_driver_class = class_create(THIS_MODULE, CSDIO_DEV_NAME);
+	if (IS_ERR(g_csdio.m_driver_class)) {
+		ret = -ENOMEM;
+		pr_err(CSDIO_DEV_NAME " class_create failed\n");
+		goto unregister_region;
+	}
+	g_csdio.m_driver_class->devnode = csdio_devnode;
+
+	/*  create CSDIO ctrl driver */
+	g_csdio.m_device = csdio_cdev_init(&g_csdio.m_cdev,
+		&csdio_ctrl_fops, csdio_minor, CSDIO_DEV_NAME, NULL);
+	if (!g_csdio.m_device) {
+		pr_err("%s: Unable to create ctrl driver\n",
+			CSDIO_DEV_NAME);
+		goto destroy_class;
+	}
+
+	g_sdio_buffer = kmalloc(CSDIO_SDIO_BUFFER_SIZE, GFP_KERNEL);
+	if (!g_sdio_buffer) {
+		pr_err("Unable to allocate %d bytes\n", CSDIO_SDIO_BUFFER_SIZE);
+		ret = -ENOMEM;
+		goto destroy_cdev;
+	}
+
+	ret = sysfs_create_group(&g_csdio.m_device->kobj, &dev_attr_grp);
+	if (ret) {
+		pr_err("%s: Unable to create device attribute\n",
+			CSDIO_DEV_NAME);
+		goto free_sdio_buff;
+	}
+
+	g_csdio.m_num_of_func = 0;
+	g_csdio.m_host = NULL;
+
+	if (NULL != host_name) {
+		struct device *dev = bus_find_device_by_name(&platform_bus_type,
+			NULL, host_name);
+		if (NULL != dev) {
+			g_csdio.m_host = dev_get_drvdata(dev);
+		} else {
+			pr_err("%s: Host '%s' doesn't exist!\n", CSDIO_DEV_NAME,
+				host_name);
+		}
+	}
+
+	pr_info("%s: Match with VendorId=0x%X, DeviceId=0x%X, Host = %s\n",
+		CSDIO_DEV_NAME, csdio_device_id, csdio_vendor_id,
+		(NULL == host_name) ? "Any" : host_name);
+
+	/* register sdio driver */
+	ret = sdio_register_driver(&csdio_driver);
+	if (ret) {
+		pr_err("%s: Unable to register as SDIO driver\n",
+			CSDIO_DEV_NAME);
+		goto remove_group;
+	}
+
+	goto exit;
+
+remove_group:
+	sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp);
+free_sdio_buff:
+	kfree(g_sdio_buffer);
+destroy_cdev:
+	cdev_del(&g_csdio.m_cdev);
+destroy_class:
+	class_destroy(g_csdio.m_driver_class);
+unregister_region:
+	unregister_chrdev_region(devno, csdio_transport_nr_devs);
+exit:
+	return ret;
+}
+module_param(csdio_vendor_id, uint, S_IRUGO);
+module_param(csdio_device_id, uint, S_IRUGO);
+module_param(host_name, charp, S_IRUGO);
+
+module_init(csdio_init);
+module_exit(csdio_exit);
+
+MODULE_AUTHOR("Code Aurora Forum");
+MODULE_DESCRIPTION("CSDIO device driver version " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
index a787acc..7643f50 100644
--- a/drivers/char/dcc_tty.c
+++ b/drivers/char/dcc_tty.c
@@ -21,12 +21,13 @@
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
+#include <linux/spinlock.h>
 
 MODULE_DESCRIPTION("DCC TTY Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
 
-static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t g_dcc_tty_lock = __SPIN_LOCK_UNLOCKED(g_dcc_tty_lock);
 static struct hrtimer g_dcc_timer;
 static char g_dcc_buffer[16];
 static int g_dcc_buffer_head;
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
new file mode 100644
index 0000000..eb0b21e
--- /dev/null
+++ b/drivers/char/diag/Kconfig
@@ -0,0 +1,31 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+	tristate "char driver interface and diag forwarding to/from modem"
+	default m
+	depends on USB_G_ANDROID || USB_FUNCTION_DIAG || USB_QCOM_MAEMO
+	depends on ARCH_MSM
+	help
+	 Char driver interface for diag user space and diag-forwarding to modem ARM and back.
+	 This enables diagchar for maemo usb gadget or android usb gadget based on config selected.
+endmenu
+
+menu "DIAG traffic over USB"
+
+config DIAG_OVER_USB
+	bool "Enable DIAG traffic to go over USB"
+        depends on ARCH_MSM
+	default y
+	help
+	 This feature helps segregate code required for DIAG traffic to go over USB.
+endmenu
+
+menu "SDIO support for DIAG"
+
+config DIAG_SDIO_PIPE
+	depends on MSM_SDIO_AL
+	default y
+	bool "Enable 9K DIAG traffic over SDIO"
+	help
+	 SDIO Transport Layer for DIAG Router
+endmenu
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
new file mode 100644
index 0000000..52ab2b9
--- /dev/null
+++ b/drivers/char/diag/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
new file mode 100644
index 0000000..6041954
--- /dev/null
+++ b/drivers/char/diag/diagchar.h
@@ -0,0 +1,222 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <mach/msm_smd.h>
+#include <asm/atomic.h>
+#include <asm/mach-types.h>
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define IN_BUF_SIZE		16384
+#define MAX_IN_BUF_SIZE	32768
+#define MAX_SYNC_OBJ_NAME_SIZE	32
+/* Size of the buffer used for deframing a packet
+  reveived from the PC tool*/
+#define HDLC_MAX 4096
+#define HDLC_OUT_BUF_SIZE	8192
+#define POOL_TYPE_COPY		1
+#define POOL_TYPE_HDLC		2
+#define POOL_TYPE_WRITE_STRUCT	4
+#define POOL_TYPE_ALL		7
+#define MODEM_DATA 		1
+#define QDSP_DATA  		2
+#define APPS_DATA  		3
+#define SDIO_DATA		4
+#define WCNSS_DATA		5
+#define MODEM_PROC		0
+#define APPS_PROC		1
+#define QDSP_PROC		2
+#define WCNSS_PROC		3
+#define MSG_MASK_SIZE 8000
+#define LOG_MASK_SIZE 8000
+#define EVENT_MASK_SIZE 1000
+#define PKT_SIZE 4096
+#define MAX_EQUIP_ID 12
+
+/* Maximum number of pkt reg supported at initialization*/
+extern unsigned int diag_max_registration;
+extern unsigned int diag_threshold_registration;
+
+#define APPEND_DEBUG(ch) \
+do {							\
+	diag_debug_buf[diag_debug_buf_idx] = ch; \
+	(diag_debug_buf_idx < 1023) ? \
+	(diag_debug_buf_idx++) : (diag_debug_buf_idx = 0); \
+} while (0)
+
+struct diag_master_table {
+	uint16_t cmd_code;
+	uint16_t subsys_id;
+	uint32_t client_id;
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	int process_id;
+};
+
+struct bindpkt_params_per_process {
+	/* Name of the synchronization object associated with this proc */
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;	/* Number of entries in this bind */
+	struct bindpkt_params *params; /* first bind params */
+};
+
+struct bindpkt_params {
+	uint16_t cmd_code;
+	uint16_t subsys_id;
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	/* For Central Routing, used to store Processor number */
+	uint16_t proc_id;
+	uint32_t event_id;
+	uint32_t log_code;
+	/* For Central Routing, used to store SMD channel pointer */
+	uint32_t client_id;
+};
+
+struct diag_write_device {
+	void *buf;
+	int length;
+};
+
+struct diag_client_map {
+	char name[20];
+	int pid;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+	char *buf;
+	int length;
+	int actual;
+	int status;
+	void *context;
+};
+#endif
+
+struct diagchar_dev {
+
+	/* State for the char driver */
+	unsigned int major;
+	unsigned int minor_start;
+	int num;
+	struct cdev *cdev;
+	char *name;
+	int dropped_count;
+	struct class *diagchar_class;
+	int ref_count;
+	struct mutex diagchar_mutex;
+	wait_queue_head_t wait_q;
+	struct diag_client_map *client_map;
+	int *data_ready;
+	int num_clients;
+	struct diag_write_device *buf_tbl;
+
+	/* Memory pool parameters */
+	unsigned int itemsize;
+	unsigned int poolsize;
+	unsigned int itemsize_hdlc;
+	unsigned int poolsize_hdlc;
+	unsigned int itemsize_write_struct;
+	unsigned int poolsize_write_struct;
+	unsigned int debug_flag;
+	/* State for the mempool for the char driver */
+	mempool_t *diagpool;
+	mempool_t *diag_hdlc_pool;
+	mempool_t *diag_write_struct_pool;
+	struct mutex diagmem_mutex;
+	int count;
+	int count_hdlc_pool;
+	int count_write_struct_pool;
+	int used;
+
+	/* State for diag forwarding */
+	unsigned char *buf_in_1;
+	unsigned char *buf_in_2;
+	unsigned char *buf_in_cntl;
+	unsigned char *buf_in_qdsp_1;
+	unsigned char *buf_in_qdsp_2;
+	unsigned char *buf_in_qdsp_cntl;
+	unsigned char *buf_in_wcnss;
+	unsigned char *buf_in_wcnss_cntl;
+	unsigned char *usb_buf_out;
+	unsigned char *apps_rsp_buf;
+	smd_channel_t *ch;
+	smd_channel_t *ch_cntl;
+	smd_channel_t *chqdsp;
+	smd_channel_t *chqdsp_cntl;
+	smd_channel_t *ch_wcnss;
+	smd_channel_t *ch_wcnss_cntl;
+	int in_busy_1;
+	int in_busy_2;
+	int in_busy_qdsp_1;
+	int in_busy_qdsp_2;
+	int in_busy_wcnss;
+	int read_len_legacy;
+	unsigned char *hdlc_buf;
+	unsigned hdlc_count;
+	unsigned hdlc_escape;
+#ifdef CONFIG_DIAG_OVER_USB
+	int usb_connected;
+	struct usb_diag_ch *legacy_ch;
+	struct work_struct diag_proc_hdlc_work;
+	struct work_struct diag_read_work;
+#endif
+	struct workqueue_struct *diag_wq;
+	struct work_struct diag_drain_work;
+	struct work_struct diag_read_smd_work;
+	struct work_struct diag_read_smd_cntl_work;
+	struct work_struct diag_read_smd_qdsp_work;
+	struct work_struct diag_read_smd_qdsp_cntl_work;
+	struct work_struct diag_read_smd_wcnss_work;
+	struct work_struct diag_read_smd_wcnss_cntl_work;
+	uint8_t *msg_masks;
+	uint8_t *log_masks;
+	int log_masks_length;
+	uint8_t *event_masks;
+	struct diag_master_table *table;
+	uint8_t *pkt_buf;
+	int pkt_length;
+	struct diag_request *write_ptr_1;
+	struct diag_request *write_ptr_2;
+	struct diag_request *usb_read_ptr;
+	struct diag_request *write_ptr_svc;
+	struct diag_request *write_ptr_qdsp_1;
+	struct diag_request *write_ptr_qdsp_2;
+	struct diag_request *write_ptr_wcnss;
+	int logging_mode;
+	int logging_process_id;
+#ifdef CONFIG_DIAG_SDIO_PIPE
+	unsigned char *buf_in_sdio;
+	unsigned char *usb_buf_mdm_out;
+	struct sdio_channel *sdio_ch;
+	int read_len_mdm;
+	int in_busy_sdio;
+	struct usb_diag_ch *mdm_ch;
+	struct work_struct diag_read_mdm_work;
+	struct workqueue_struct *diag_sdio_wq;
+	struct work_struct diag_read_sdio_work;
+	struct work_struct diag_remove_sdio_work;
+	struct diag_request *usb_read_mdm_ptr;
+	struct diag_request *write_ptr_mdm;
+#endif
+};
+
+extern struct diagchar_dev *driver;
+#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
new file mode 100644
index 0000000..c9a9d57
--- /dev/null
+++ b/drivers/char/diag/diagchar_core.c
@@ -0,0 +1,982 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#ifdef CONFIG_DIAG_SDIO_PIPE
+#include "diagfwd_sdio.h"
+#endif
+#include <linux/timer.h>
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+
+#define INIT	1
+#define EXIT	-1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+	int pid;
+};
+/* The following variables can be specified by module options */
+ /* for copy buffer */
+static unsigned int itemsize = 2048; /*Size of item in the mempool */
+static unsigned int poolsize = 10; /*Number of items in the mempool */
+/* for hdlc buffer */
+static unsigned int itemsize_hdlc = 8192; /*Size of item in the mempool */
+static unsigned int poolsize_hdlc = 8;  /*Number of items in the mempool */
+/* for write structure buffer */
+static unsigned int itemsize_write_struct = 20; /*Size of item in the mempool */
+static unsigned int poolsize_write_struct = 8; /* Num of items in the mempool */
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+static unsigned int threshold_client_limit = 30;
+/* This is the maximum number of pkt registrations supported at initialization*/
+unsigned int diag_max_registration = 500;
+unsigned int diag_threshold_registration = 650;
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+void *buf_hdlc;
+module_param(itemsize, uint, 0);
+module_param(poolsize, uint, 0);
+module_param(max_clients, uint, 0);
+
+/* delayed_rsp_id 0 represents no delay in the response. Any other number
+    means that the diag packet has a delayed response. */
+static uint16_t delayed_rsp_id = 1;
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+/* This macro gets the next delayed respose id. Once it reaches
+ DIAGPKT_MAX_DELAYED_RSP, it stays at DIAGPKT_MAX_DELAYED_RSP */
+
+#define DIAGPKT_NEXT_DELAYED_RSP_ID(x) 				\
+((x < DIAGPKT_MAX_DELAYED_RSP) ? x++ : DIAGPKT_MAX_DELAYED_RSP)
+
+#define COPY_USER_SPACE_OR_EXIT(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+		goto exit;					\
+	}							\
+	ret += length;						\
+} while (0)
+
+static void drain_timer_func(unsigned long data)
+{
+	queue_work(driver->diag_wq , &(driver->diag_drain_work));
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	timer_in_progress = 0;
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (buf_hdlc) {
+		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
+		if (err) {
+			/*Free the buffer right away if write failed */
+			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+			diagmem_free(driver, (unsigned char *)driver->
+				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+		}
+		buf_hdlc = NULL;
+#ifdef DIAG_DEBUG
+		pr_debug("diag: Number of bytes written "
+				 "from timer is %d ", driver->used);
+#endif
+		driver->used = 0;
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_read_smd_work_fn(struct work_struct *work)
+{
+	__diag_smd_send_req();
+}
+
+void diag_read_smd_qdsp_work_fn(struct work_struct *work)
+{
+	__diag_smd_qdsp_send_req();
+}
+
+void diag_read_smd_wcnss_work_fn(struct work_struct *work)
+{
+	__diag_smd_wcnss_send_req();
+}
+
+void diag_add_client(int i, struct file *file)
+{
+	struct diagchar_priv *diagpriv_data;
+
+	driver->client_map[i].pid = current->tgid;
+	diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+							GFP_KERNEL);
+	if (diagpriv_data)
+		diagpriv_data->pid = current->tgid;
+	file->private_data = diagpriv_data;
+	strncpy(driver->client_map[i].name, current->comm, 20);
+	driver->client_map[i].name[19] = '\0';
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	void *temp;
+
+	if (driver) {
+		mutex_lock(&driver->diagchar_mutex);
+
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid == 0)
+				break;
+
+		if (i < driver->num_clients) {
+			diag_add_client(i, file);
+		} else {
+			if (i < threshold_client_limit) {
+				driver->num_clients++;
+				temp = krealloc(driver->client_map
+					, (driver->num_clients) * sizeof(struct
+						 diag_client_map), GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->client_map = temp;
+				temp = krealloc(driver->data_ready
+					, (driver->num_clients) * sizeof(int),
+							GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->data_ready = temp;
+				diag_add_client(i, file);
+			} else {
+				mutex_unlock(&driver->diagchar_mutex);
+				pr_alert("Max client limit for DIAG reached\n");
+				pr_info("Cannot open handle %s"
+					   " %d", current->comm, current->tgid);
+				for (i = 0; i < driver->num_clients; i++)
+					pr_debug("%d) %s PID=%d", i, driver->
+						client_map[i].name,
+						driver->client_map[i].pid);
+				return -ENOMEM;
+			}
+		}
+		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		driver->data_ready[i] |= LOG_MASKS_TYPE;
+
+		if (driver->ref_count == 0)
+			diagmem_init(driver);
+		driver->ref_count++;
+		mutex_unlock(&driver->diagchar_mutex);
+		return 0;
+	}
+	return -ENOMEM;
+
+fail:
+	mutex_unlock(&driver->diagchar_mutex);
+	driver->num_clients--;
+	pr_alert("diag: Insufficient memory for new client");
+	return -ENOMEM;
+}
+
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	struct diagchar_priv *diagpriv_data = file->private_data;
+
+	if (!(file->private_data)) {
+		pr_alert("diag: Invalid file pointer");
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	/* If the SD logging process exits, change logging to USB mode */
+	if (driver->logging_process_id == current->tgid) {
+		driver->logging_mode = USB_MODE;
+		diagfwd_connect();
+	}
+#endif /* DIAG over USB */
+	/* Delete the pkt response table entry for the exiting process */
+	for (i = 0; i < diag_max_registration; i++)
+			if (driver->table[i].process_id == current->tgid)
+					driver->table[i].process_id = 0;
+
+	if (driver) {
+		mutex_lock(&driver->diagchar_mutex);
+		driver->ref_count--;
+		/* On Client exit, try to destroy all 3 pools */
+		diagmem_exit(driver, POOL_TYPE_COPY);
+		diagmem_exit(driver, POOL_TYPE_HDLC);
+		diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT);
+		for (i = 0; i < driver->num_clients; i++) {
+			if (NULL != diagpriv_data && diagpriv_data->pid ==
+				 driver->client_map[i].pid) {
+				driver->client_map[i].pid = 0;
+				kfree(diagpriv_data);
+				diagpriv_data = NULL;
+				break;
+			}
+		}
+		mutex_unlock(&driver->diagchar_mutex);
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+void diag_fill_reg_table(int j, struct bindpkt_params *params,
+					  int *success, int *count_entries)
+{
+	*success = 1;
+	driver->table[j].cmd_code = params->cmd_code;
+	driver->table[j].subsys_id = params->subsys_id;
+	driver->table[j].cmd_code_lo = params->cmd_code_lo;
+	driver->table[j].cmd_code_hi = params->cmd_code_hi;
+	if (params->proc_id == APPS_PROC) {
+		driver->table[j].process_id = current->tgid;
+		driver->table[j].client_id = APPS_PROC;
+	} else {
+		driver->table[j].process_id = NON_APPS_PROC;
+		driver->table[j].client_id = params->client_id;
+	}
+	(*count_entries)++;
+}
+
+long diagchar_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int i, j, count_entries = 0, temp;
+	int success = -1;
+	void *temp_buf;
+
+	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
+		struct bindpkt_params_per_process *pkt_params =
+			 (struct bindpkt_params_per_process *) ioarg;
+		mutex_lock(&driver->diagchar_mutex);
+		for (i = 0; i < diag_max_registration; i++) {
+			if (driver->table[i].process_id == 0) {
+				diag_fill_reg_table(i, pkt_params->params,
+						&success, &count_entries);
+				if (pkt_params->count > count_entries) {
+					pkt_params->params++;
+				} else {
+					mutex_unlock(&driver->diagchar_mutex);
+					return success;
+				}
+			}
+		}
+		if (i < diag_threshold_registration) {
+			/* Increase table size by amount required */
+			diag_max_registration += pkt_params->count -
+							 count_entries;
+			/* Make sure size doesnt go beyond threshold */
+			if (diag_max_registration > diag_threshold_registration)
+				diag_max_registration =
+						 diag_threshold_registration;
+			temp_buf = krealloc(driver->table,
+					 diag_max_registration*sizeof(struct
+					 diag_master_table), GFP_KERNEL);
+			if (!temp_buf) {
+				diag_max_registration -= pkt_params->count -
+							 count_entries;
+				pr_alert("diag: Insufficient memory for reg.");
+				mutex_unlock(&driver->diagchar_mutex);
+				return 0;
+			} else {
+				driver->table = temp_buf;
+			}
+			for (j = i; j < diag_max_registration; j++) {
+				diag_fill_reg_table(j, pkt_params->params,
+						&success, &count_entries);
+				if (pkt_params->count > count_entries) {
+					pkt_params->params++;
+				} else {
+					mutex_unlock(&driver->diagchar_mutex);
+					return success;
+				}
+			}
+		} else {
+			mutex_unlock(&driver->diagchar_mutex);
+			pr_err("Max size reached, Pkt Registration failed for"
+						" Process %d", current->tgid);
+		}
+		success = 0;
+	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
+		struct diagpkt_delay_params *delay_params =
+					(struct diagpkt_delay_params *) ioarg;
+
+		if ((delay_params->rsp_ptr) &&
+		 (delay_params->size == sizeof(delayed_rsp_id)) &&
+				 (delay_params->num_bytes_ptr)) {
+			*((uint16_t *)delay_params->rsp_ptr) =
+				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
+			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
+			success = 0;
+		}
+	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid == current->tgid)
+				break;
+		if (i == -1)
+			return -EINVAL;
+		driver->data_ready[i] |= DEINIT_TYPE;
+		wake_up_interruptible(&driver->wait_q);
+		success = 1;
+	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
+		mutex_lock(&driver->diagchar_mutex);
+		temp = driver->logging_mode;
+		driver->logging_mode = (int)ioarg;
+		driver->logging_process_id = current->tgid;
+		mutex_unlock(&driver->diagchar_mutex);
+		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
+							== NO_LOGGING_MODE) {
+			driver->in_busy_1 = 1;
+			driver->in_busy_2 = 1;
+			driver->in_busy_qdsp_1 = 1;
+			driver->in_busy_qdsp_2 = 1;
+		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
+							== MEMORY_DEVICE_MODE) {
+			driver->in_busy_1 = 0;
+			driver->in_busy_2 = 0;
+			driver->in_busy_qdsp_1 = 0;
+			driver->in_busy_qdsp_2 = 0;
+			/* Poll SMD channels to check for data*/
+			if (driver->ch)
+				queue_work(driver->diag_wq,
+					&(driver->diag_read_smd_work));
+			if (driver->chqdsp)
+				queue_work(driver->diag_wq,
+					&(driver->diag_read_smd_qdsp_work));
+		}
+#ifdef CONFIG_DIAG_OVER_USB
+		else if (temp == USB_MODE && driver->logging_mode
+							 == NO_LOGGING_MODE)
+			diagfwd_disconnect();
+		else if (temp == NO_LOGGING_MODE && driver->logging_mode
+								== USB_MODE)
+			diagfwd_connect();
+		else if (temp == USB_MODE && driver->logging_mode
+							== MEMORY_DEVICE_MODE) {
+			diagfwd_disconnect();
+			driver->in_busy_1 = 0;
+			driver->in_busy_2 = 0;
+			driver->in_busy_qdsp_2 = 0;
+			driver->in_busy_qdsp_2 = 0;
+			/* Poll SMD channels to check for data*/
+			if (driver->ch)
+				queue_work(driver->diag_wq,
+					 &(driver->diag_read_smd_work));
+			if (driver->chqdsp)
+				queue_work(driver->diag_wq,
+					&(driver->diag_read_smd_qdsp_work));
+		} else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
+								== USB_MODE)
+			diagfwd_connect();
+#endif /* DIAG over USB */
+		success = 1;
+	}
+
+	return success;
+}
+
+static int diagchar_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	int index = -1, i = 0, ret = 0;
+	int num_data = 0, data_type;
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			index = i;
+
+	if (index == -1) {
+		pr_err("diag: Client PID not found in table");
+		return -EINVAL;
+	}
+
+	wait_event_interruptible(driver->wait_q,
+				  driver->data_ready[index]);
+	mutex_lock(&driver->diagchar_mutex);
+
+	if ((driver->data_ready[index] & MEMORY_DEVICE_LOG_TYPE) && (driver->
+					logging_mode == MEMORY_DEVICE_MODE)) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & MEMORY_DEVICE_LOG_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		/* place holder for number of data field */
+		ret += 4;
+
+		for (i = 0; i < driver->poolsize_write_struct; i++) {
+			if (driver->buf_tbl[i].length > 0) {
+#ifdef DIAG_DEBUG
+				pr_debug("diag: WRITING the buf address "
+				       "and length is %x , %d\n", (unsigned int)
+					(driver->buf_tbl[i].buf),
+					driver->buf_tbl[i].length);
+#endif
+				num_data++;
+				/* Copy the length of data being passed */
+				if (copy_to_user(buf+ret, (void *)&(driver->
+						buf_tbl[i].length), 4)) {
+						num_data--;
+						goto drop;
+				}
+				ret += 4;
+
+				/* Copy the actual data being passed */
+				if (copy_to_user(buf+ret, (void *)driver->
+				buf_tbl[i].buf, driver->buf_tbl[i].length)) {
+					ret -= 4;
+					num_data--;
+					goto drop;
+				}
+				ret += driver->buf_tbl[i].length;
+drop:
+#ifdef DIAG_DEBUG
+				pr_debug("diag: DEQUEUE buf address and"
+				       " length is %x,%d\n", (unsigned int)
+				       (driver->buf_tbl[i].buf), driver->
+				       buf_tbl[i].length);
+#endif
+				diagmem_free(driver, (unsigned char *)
+				(driver->buf_tbl[i].buf), POOL_TYPE_HDLC);
+				driver->buf_tbl[i].length = 0;
+				driver->buf_tbl[i].buf = 0;
+			}
+		}
+
+		/* copy modem data */
+		if (driver->in_busy_1 == 1) {
+			num_data++;
+			/*Copy the length of data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+					 (driver->write_ptr_1->length), 4);
+			/*Copy the actual data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+					*(driver->buf_in_1),
+					 driver->write_ptr_1->length);
+			driver->in_busy_1 = 0;
+		}
+		if (driver->in_busy_2 == 1) {
+			num_data++;
+			/*Copy the length of data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+					 (driver->write_ptr_2->length), 4);
+			/*Copy the actual data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+					 *(driver->buf_in_2),
+					 driver->write_ptr_2->length);
+			driver->in_busy_2 = 0;
+		}
+
+		/* copy q6 data */
+		if (driver->in_busy_qdsp_1 == 1) {
+			num_data++;
+			/*Copy the length of data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+				 (driver->write_ptr_qdsp_1->length), 4);
+			/*Copy the actual data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->
+							buf_in_qdsp_1),
+					 driver->write_ptr_qdsp_1->length);
+			driver->in_busy_qdsp_1 = 0;
+		}
+		if (driver->in_busy_qdsp_2 == 1) {
+			num_data++;
+			/*Copy the length of data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret,
+				 (driver->write_ptr_qdsp_2->length), 4);
+			/*Copy the actual data being passed*/
+			COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver->
+				buf_in_qdsp_2), driver->
+					write_ptr_qdsp_2->length);
+			driver->in_busy_qdsp_2 = 0;
+		}
+
+		/* copy number of data fields */
+		COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4);
+		ret -= 4;
+		driver->data_ready[index] ^= MEMORY_DEVICE_LOG_TYPE;
+		if (driver->ch)
+			queue_work(driver->diag_wq,
+					 &(driver->diag_read_smd_work));
+		if (driver->chqdsp)
+			queue_work(driver->diag_wq,
+					 &(driver->diag_read_smd_qdsp_work));
+		APPEND_DEBUG('n');
+		goto exit;
+	} else if (driver->data_ready[index] & MEMORY_DEVICE_LOG_TYPE) {
+		/* In case, the thread wakes up and the logging mode is
+		not memory device any more, the condition needs to be cleared */
+		driver->data_ready[index] ^= MEMORY_DEVICE_LOG_TYPE;
+	}
+
+	if (driver->data_ready[index] & DEINIT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DEINIT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		driver->data_ready[index] ^= DEINIT_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->msg_masks),
+							 MSG_MASK_SIZE);
+		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->event_masks),
+							 EVENT_MASK_SIZE);
+		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->log_masks),
+							 LOG_MASK_SIZE);
+		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & PKT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->pkt_buf),
+							 driver->pkt_length);
+		driver->data_ready[index] ^= PKT_TYPE;
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&driver->diagchar_mutex);
+	return ret;
+}
+
+static int diagchar_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int err, ret = 0, pkt_type;
+#ifdef DIAG_DEBUG
+	int length = 0, i;
+#endif
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	void *buf_copy = NULL;
+	int payload_size;
+#ifdef CONFIG_DIAG_OVER_USB
+	if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) ||
+				(driver->logging_mode == NO_LOGGING_MODE)) {
+		/*Drop the diag payload */
+		return -EIO;
+	}
+#endif /* DIAG over USB */
+	/* Get the packet type F3/log/event/Pkt response */
+	err = copy_from_user((&pkt_type), buf, 4);
+	/*First 4 bytes indicate the type of payload - ignore these */
+	payload_size = count - 4;
+
+	if (pkt_type == MEMORY_DEVICE_LOG_TYPE) {
+		if (!mask_request_validate((unsigned char *)buf)) {
+			printk(KERN_ALERT "mask request Invalid ..cannot send to modem \n");
+			return -EFAULT;
+		}
+		buf = buf + 4;
+#ifdef DIAG_DEBUG
+		pr_debug("diag: masks: %d\n", payload_size);
+		for (i = 0; i < payload_size; i++)
+			printk(KERN_DEBUG "\t %x", *(((unsigned char *)buf)+i));
+#endif
+		diag_process_hdlc((void *)buf, payload_size);
+		return 0;
+	}
+
+	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
+	if (!buf_copy) {
+		driver->dropped_count++;
+		return -ENOMEM;
+	}
+
+	err = copy_from_user(buf_copy, buf + 4, payload_size);
+	if (err) {
+		printk(KERN_INFO "diagchar : copy_from_user failed\n");
+		ret = -EFAULT;
+		goto fail_free_copy;
+	}
+#ifdef DIAG_DEBUG
+	printk(KERN_DEBUG "data is -->\n");
+	for (i = 0; i < payload_size; i++)
+		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_copy)+i));
+#endif
+	send.state = DIAG_STATE_START;
+	send.pkt = buf_copy;
+	send.last = (void *)(buf_copy + payload_size - 1);
+	send.terminate = 1;
+#ifdef DIAG_DEBUG
+	pr_debug("diag: Already used bytes in buffer %d, and"
+	" incoming payload size is %d\n", driver->used, payload_size);
+	printk(KERN_DEBUG "hdlc encoded data is -->\n");
+	for (i = 0; i < payload_size + 8; i++) {
+		printk(KERN_DEBUG "\t %x \t", *(((unsigned char *)buf_hdlc)+i));
+		if (*(((unsigned char *)buf_hdlc)+i) != 0x7e)
+			length++;
+	}
+#endif
+	mutex_lock(&driver->diagchar_mutex);
+	if (!buf_hdlc)
+		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
+						 POOL_TYPE_HDLC);
+	if (!buf_hdlc) {
+		ret = -ENOMEM;
+		goto fail_free_hdlc;
+	}
+	if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
+		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
+		if (err) {
+			/*Free the buffer right away if write failed */
+			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+			diagmem_free(driver, (unsigned char *)driver->
+				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			ret = -EIO;
+			goto fail_free_hdlc;
+		}
+		buf_hdlc = NULL;
+		driver->used = 0;
+		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
+							 POOL_TYPE_HDLC);
+		if (!buf_hdlc) {
+			ret = -ENOMEM;
+			goto fail_free_hdlc;
+		}
+	}
+
+	enc.dest = buf_hdlc + driver->used;
+	enc.dest_last = (void *)(buf_hdlc + driver->used + 2*payload_size + 3);
+	diag_hdlc_encode(&send, &enc);
+
+	/* This is to check if after HDLC encoding, we are still within the
+	 limits of aggregation buffer. If not, we write out the current buffer
+	and start aggregation in a newly allocated buffer */
+	if ((unsigned int) enc.dest >=
+		 (unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
+		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
+		if (err) {
+			/*Free the buffer right away if write failed */
+			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+			diagmem_free(driver, (unsigned char *)driver->
+				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			ret = -EIO;
+			goto fail_free_hdlc;
+		}
+		buf_hdlc = NULL;
+		driver->used = 0;
+		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
+							 POOL_TYPE_HDLC);
+		if (!buf_hdlc) {
+			ret = -ENOMEM;
+			goto fail_free_hdlc;
+		}
+		enc.dest = buf_hdlc + driver->used;
+		enc.dest_last = (void *)(buf_hdlc + driver->used +
+							 (2*payload_size) + 3);
+		diag_hdlc_encode(&send, &enc);
+	}
+
+	driver->used = (uint32_t) enc.dest - (uint32_t) buf_hdlc;
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
+		if (err) {
+			/*Free the buffer right away if write failed */
+			diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
+			diagmem_free(driver, (unsigned char *)driver->
+				 write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
+			ret = -EIO;
+			goto fail_free_hdlc;
+		}
+		buf_hdlc = NULL;
+		driver->used = 0;
+	}
+
+	mutex_unlock(&driver->diagchar_mutex);
+	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+	if (!timer_in_progress)	{
+		timer_in_progress = 1;
+		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
+	}
+	return 0;
+
+fail_free_hdlc:
+	buf_hdlc = NULL;
+	driver->used = 0;
+	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+	mutex_unlock(&driver->diagchar_mutex);
+	return ret;
+
+fail_free_copy:
+	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+	return ret;
+}
+
+int mask_request_validate(unsigned char mask_buf[])
+{
+	uint8_t packet_id;
+	uint8_t subsys_id;
+	uint16_t ss_cmd;
+
+	packet_id = mask_buf[4];
+
+	if (packet_id == 0x4B) {
+		subsys_id = mask_buf[5];
+		ss_cmd = *(uint16_t *)(mask_buf + 6);
+		/* Packets with SSID which are allowed */
+		switch (subsys_id) {
+		case 0x04: /* DIAG_SUBSYS_WCDMA */
+			if ((ss_cmd == 0) || (ss_cmd == 0xF))
+				return 1;
+			break;
+		case 0x08: /* DIAG_SUBSYS_GSM */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		case 0x09: /* DIAG_SUBSYS_UMTS */
+		case 0x0F: /* DIAG_SUBSYS_CM */
+			if (ss_cmd == 0)
+				return 1;
+			break;
+		case 0x0C: /* DIAG_SUBSYS_OS */
+			if ((ss_cmd == 2) || (ss_cmd == 0x100))
+				return 1; /* MPU and APU */
+			break;
+		case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+			if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+				return 1;
+			break;
+		case 0x13: /* DIAG_SUBSYS_FS */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	} else {
+		switch (packet_id) {
+		case 0x00:    /* Version Number */
+		case 0x0C:    /* CDMA status packet */
+		case 0x1C:    /* Diag Version */
+		case 0x1D:    /* Time Stamp */
+		case 0x60:    /* Event Report Control */
+		case 0x63:    /* Status snapshot */
+		case 0x73:    /* Logging Configuration */
+		case 0x7C:    /* Extended build ID */
+		case 0x7D:    /* Extended Message configuration */
+		case 0x81:    /* Event get mask */
+		case 0x82:    /* Set the event mask */
+			return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	}
+	return 0;
+}
+
+static const struct file_operations diagcharfops = {
+	.owner = THIS_MODULE,
+	.read = diagchar_read,
+	.write = diagchar_write,
+	.unlocked_ioctl = diagchar_ioctl,
+	.open = diagchar_open,
+	.release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+	int err;
+
+	cdev_init(driver->cdev, &diagcharfops);
+
+	driver->cdev->owner = THIS_MODULE;
+	driver->cdev->ops = &diagcharfops;
+
+	err = cdev_add(driver->cdev, devno, 1);
+
+	if (err) {
+		printk(KERN_INFO "diagchar cdev registration failed !\n\n");
+		return -1;
+	}
+
+	driver->diagchar_class = class_create(THIS_MODULE, "diag");
+
+	if (IS_ERR(driver->diagchar_class)) {
+		printk(KERN_ERR "Error creating diagchar class.\n");
+		return -1;
+	}
+
+	device_create(driver->diagchar_class, NULL, devno,
+				  (void *)driver, "diag");
+
+	return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+	if (driver) {
+		if (driver->cdev) {
+			/* TODO - Check if device exists before deleting */
+			device_destroy(driver->diagchar_class,
+				       MKDEV(driver->major,
+					     driver->minor_start));
+			cdev_del(driver->cdev);
+		}
+		if (!IS_ERR(driver->diagchar_class))
+			class_destroy(driver->diagchar_class);
+		kfree(driver);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DIAG_SDIO_PIPE
+void diag_sdio_fn(int type)
+{
+	if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) {
+		if (type == INIT)
+			diagfwd_sdio_init();
+		else if (type == EXIT)
+			diagfwd_sdio_exit();
+	}
+}
+#else
+inline void diag_sdio_fn(int type) {}
+#endif
+
+static int __init diagchar_init(void)
+{
+	dev_t dev;
+	int error;
+
+	pr_debug("diagfwd initializing ..\n");
+	driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+
+	if (driver) {
+		driver->used = 0;
+		timer_in_progress = 0;
+		driver->debug_flag = 1;
+		setup_timer(&drain_timer, drain_timer_func, 1234);
+		driver->itemsize = itemsize;
+		driver->poolsize = poolsize;
+		driver->itemsize_hdlc = itemsize_hdlc;
+		driver->poolsize_hdlc = poolsize_hdlc;
+		driver->itemsize_write_struct = itemsize_write_struct;
+		driver->poolsize_write_struct = poolsize_write_struct;
+		driver->num_clients = max_clients;
+		driver->logging_mode = USB_MODE;
+		mutex_init(&driver->diagchar_mutex);
+		init_waitqueue_head(&driver->wait_q);
+		INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_work), diag_read_smd_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_cntl_work),
+						 diag_read_smd_cntl_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_qdsp_work),
+			   diag_read_smd_qdsp_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_qdsp_cntl_work),
+			   diag_read_smd_qdsp_cntl_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_wcnss_work),
+			diag_read_smd_wcnss_work_fn);
+		INIT_WORK(&(driver->diag_read_smd_wcnss_cntl_work),
+			diag_read_smd_wcnss_cntl_work_fn);
+		diagfwd_init();
+		diagfwd_cntl_init();
+		diag_sdio_fn(INIT);
+		pr_debug("diagchar initializing ..\n");
+		driver->num = 1;
+		driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+		strlcpy(driver->name, "diag", 4);
+
+		/* Get major number from kernel and initialize */
+		error = alloc_chrdev_region(&dev, driver->minor_start,
+					    driver->num, driver->name);
+		if (!error) {
+			driver->major = MAJOR(dev);
+			driver->minor_start = MINOR(dev);
+		} else {
+			printk(KERN_INFO "Major number not allocated\n");
+			goto fail;
+		}
+		driver->cdev = cdev_alloc();
+		error = diagchar_setup_cdev(dev);
+		if (error)
+			goto fail;
+	} else {
+		printk(KERN_INFO "kzalloc failed\n");
+		goto fail;
+	}
+
+	pr_info("diagchar initialized now");
+	return 0;
+
+fail:
+	diagchar_cleanup();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_sdio_fn(EXIT);
+	return -1;
+}
+
+static void __exit diagchar_exit(void)
+{
+	printk(KERN_INFO "diagchar exiting ..\n");
+	/* On Driver exit, send special pool type to
+	 ensure no memory leaks */
+	diagmem_exit(driver, POOL_TYPE_ALL);
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_sdio_fn(EXIT);
+	diagchar_cleanup();
+	printk(KERN_INFO "done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
new file mode 100644
index 0000000..ef57d52
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -0,0 +1,223 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED           0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+	crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc)
+{
+	uint8_t *dest;
+	uint8_t *dest_last;
+	const uint8_t *src;
+	const uint8_t *src_last;
+	uint16_t crc;
+	unsigned char src_byte = 0;
+	enum diag_send_state_enum_type state;
+	unsigned int used = 0;
+
+	if (src_desc && enc) {
+
+		/* Copy parts to local variables. */
+		src = src_desc->pkt;
+		src_last = src_desc->last;
+		state = src_desc->state;
+		dest = enc->dest;
+		dest_last = enc->dest_last;
+
+		if (state == DIAG_STATE_START) {
+			crc = CRC_16_L_SEED;
+			state++;
+		} else {
+			/* Get a local copy of the CRC */
+			crc = enc->crc;
+		}
+
+		/* dest or dest_last may be NULL to trigger a
+		   state transition only */
+		if (dest && dest_last) {
+			/* This condition needs to include the possibility
+			   of 2 dest bytes for an escaped byte */
+			while (src <= src_last && dest <= dest_last) {
+
+				src_byte = *src++;
+
+				if ((src_byte == CONTROL_CHAR) ||
+				    (src_byte == ESC_CHAR)) {
+
+					/* If the escape character is not the
+					   last byte */
+					if (dest != dest_last) {
+						crc = CRC_16_L_STEP(crc,
+								    src_byte);
+
+						*dest++ = ESC_CHAR;
+						used++;
+
+						*dest++ = src_byte
+							  ^ ESC_MASK;
+						used++;
+					} else {
+
+						src--;
+						break;
+					}
+
+				} else {
+					crc = CRC_16_L_STEP(crc, src_byte);
+					*dest++ = src_byte;
+					used++;
+				}
+			}
+
+			if (src > src_last) {
+
+				if (state == DIAG_STATE_BUSY) {
+					if (src_desc->terminate) {
+						crc = ~crc;
+						state++;
+					} else {
+						/* Done with fragment */
+						state = DIAG_STATE_COMPLETE;
+					}
+				}
+
+				while (dest <= dest_last &&
+				       state >= DIAG_STATE_CRC1 &&
+				       state < DIAG_STATE_TERM) {
+					/* Encode a byte of the CRC next */
+					src_byte = crc & 0xFF;
+
+					if ((src_byte == CONTROL_CHAR)
+					    || (src_byte == ESC_CHAR)) {
+
+						if (dest != dest_last) {
+
+							*dest++ = ESC_CHAR;
+							used++;
+							*dest++ = src_byte ^
+								  ESC_MASK;
+							used++;
+
+							crc >>= 8;
+						} else {
+
+							break;
+						}
+					} else {
+
+						crc >>= 8;
+						*dest++ = src_byte;
+						used++;
+					}
+
+					state++;
+				}
+
+				if (state == DIAG_STATE_TERM) {
+					if (dest_last >= dest) {
+						*dest++ = CONTROL_CHAR;
+						used++;
+						state++;	/* Complete */
+					}
+				}
+			}
+		}
+		/* Copy local variables back into the encode structure. */
+
+		enc->dest = dest;
+		enc->dest_last = dest_last;
+		enc->crc = crc;
+		src_desc->pkt = src;
+		src_desc->last = src_last;
+		src_desc->state = state;
+	}
+
+	return;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+	uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+	unsigned int src_length = 0, dest_length = 0;
+
+	unsigned int len = 0;
+	unsigned int i;
+	uint8_t src_byte;
+
+	int pkt_bnd = 0;
+
+	if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+	    (hdlc->src_size - hdlc->src_idx > 0) &&
+	    (hdlc->dest_size - hdlc->dest_idx > 0)) {
+
+		src_ptr = hdlc->src_ptr;
+		src_ptr = &src_ptr[hdlc->src_idx];
+		src_length = hdlc->src_size - hdlc->src_idx;
+
+		dest_ptr = hdlc->dest_ptr;
+		dest_ptr = &dest_ptr[hdlc->dest_idx];
+		dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+		for (i = 0; i < src_length; i++) {
+
+			src_byte = src_ptr[i];
+
+			if (hdlc->escaping) {
+				dest_ptr[len++] = src_byte ^ ESC_MASK;
+				hdlc->escaping = 0;
+			} else if (src_byte == ESC_CHAR) {
+				if (i == (src_length - 1)) {
+					hdlc->escaping = 1;
+					i++;
+					break;
+				} else {
+					dest_ptr[len++] = src_ptr[++i]
+							  ^ ESC_MASK;
+				}
+			} else if (src_byte == CONTROL_CHAR) {
+				dest_ptr[len++] = src_byte;
+				pkt_bnd = 1;
+				i++;
+				break;
+			} else {
+				dest_ptr[len++] = src_byte;
+			}
+
+			if (len >= dest_length) {
+				i++;
+				break;
+			}
+		}
+
+		hdlc->src_idx += i;
+		hdlc->dest_idx += len;
+	}
+
+	return pkt_bnd;
+}
diff --git a/drivers/char/diag/diagchar_hdlc.h b/drivers/char/diag/diagchar_hdlc.h
new file mode 100644
index 0000000..2df81de
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+	DIAG_STATE_START,
+	DIAG_STATE_BUSY,
+	DIAG_STATE_CRC1,
+	DIAG_STATE_CRC2,
+	DIAG_STATE_TERM,
+	DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+	const void *pkt;
+	const void *last;	/* Address of last byte to send. */
+	enum diag_send_state_enum_type state;
+	unsigned char terminate;	/* True if this fragment
+					   terminates the packet */
+};
+
+struct diag_hdlc_dest_type {
+	void *dest;
+	void *dest_last;
+	/* Below: internal use only */
+	uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+	uint8_t *src_ptr;
+	unsigned int src_idx;
+	unsigned int src_size;
+	uint8_t *dest_ptr;
+	unsigned int dest_idx;
+	unsigned int dest_size;
+	int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+#define ESC_CHAR     0x7D
+#define CONTROL_CHAR 0x7E
+#define ESC_MASK     0x20
+
+#endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
new file mode 100644
index 0000000..433f09a
--- /dev/null
+++ b/drivers/char/diag/diagfwd.c
@@ -0,0 +1,1384 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include <mach/msm_smd.h>
+#include <mach/socinfo.h>
+#include <mach/restart.h>
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#ifdef CONFIG_DIAG_SDIO_PIPE
+#include "diagfwd_sdio.h"
+#endif
+#define MODE_CMD	41
+#define RESET_ID	2
+
+int diag_debug_buf_idx;
+unsigned char diag_debug_buf[1024];
+static unsigned int buf_tbl_size = 8; /*Number of entries in table of buffers */
+struct diag_master_table entry;
+
+struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+
+#define ENCODE_RSP_AND_SEND(buf_length)				\
+do {									\
+	send.state = DIAG_STATE_START;					\
+	send.pkt = driver->apps_rsp_buf;				\
+	send.last = (void *)(driver->apps_rsp_buf + buf_length);	\
+	send.terminate = 1;						\
+	if (!driver->in_busy_1) {					\
+		enc.dest = driver->buf_in_1;				\
+		enc.dest_last = (void *)(driver->buf_in_1 + 499);	\
+		diag_hdlc_encode(&send, &enc);				\
+		driver->write_ptr_1->buf = driver->buf_in_1;		\
+		driver->write_ptr_1->length = (int)(enc.dest - \
+						(void *)(driver->buf_in_1)); \
+		usb_diag_write(driver->legacy_ch, driver->write_ptr_1);	\
+		memset(driver->apps_rsp_buf, '\0', 500);		\
+	}								\
+} while (0)
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+((bufStart <= start) && (end - start >= length)) ? 1 : 0
+
+int chk_config_get_id()
+{
+	switch (socinfo_get_id()) {
+	case APQ8060_MACHINE_ID:
+	case MSM8660_MACHINE_ID:
+		return APQ8060_TOOLS_ID;
+	case AO8960_MACHINE_ID:
+		return AO8960_TOOLS_ID;
+	default:
+		return 0;
+	}
+}
+
+void __diag_smd_send_req(void)
+{
+	void *buf = NULL;
+	int *in_busy_ptr = NULL;
+	struct diag_request *write_ptr_modem = NULL;
+
+	if (!driver->in_busy_1) {
+		buf = driver->buf_in_1;
+		write_ptr_modem = driver->write_ptr_1;
+		in_busy_ptr = &(driver->in_busy_1);
+	} else if (!driver->in_busy_2) {
+		buf = driver->buf_in_2;
+		write_ptr_modem = driver->write_ptr_2;
+		in_busy_ptr = &(driver->in_busy_2);
+	}
+
+	if (driver->ch && buf) {
+		int r = smd_read_avail(driver->ch);
+
+		if (r > IN_BUF_SIZE) {
+			if (r < MAX_IN_BUF_SIZE) {
+				pr_err("diag: SMD sending in "
+						   "packets upto %d bytes", r);
+				buf = krealloc(buf, r, GFP_KERNEL);
+			} else {
+				pr_err("diag: SMD sending in "
+				"packets more than %d bytes", MAX_IN_BUF_SIZE);
+				return;
+			}
+		}
+		if (r > 0) {
+			if (!buf)
+				pr_info("Out of diagmem for Modem\n");
+			else {
+				APPEND_DEBUG('i');
+				smd_read(driver->ch, buf, r);
+				APPEND_DEBUG('j');
+				write_ptr_modem->length = r;
+				*in_busy_ptr = 1;
+				diag_device_write(buf, MODEM_DATA,
+							 write_ptr_modem);
+			}
+		}
+	}
+}
+
+int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr)
+{
+	int i, err = 0;
+
+	if (driver->logging_mode == MEMORY_DEVICE_MODE) {
+		if (proc_num == APPS_DATA) {
+			for (i = 0; i < driver->poolsize_write_struct; i++)
+				if (driver->buf_tbl[i].length == 0) {
+					driver->buf_tbl[i].buf = buf;
+					driver->buf_tbl[i].length =
+								 driver->used;
+#ifdef DIAG_DEBUG
+					pr_debug("diag: ENQUEUE buf ptr"
+						   " and length is %x , %d\n",
+						   (unsigned int)(driver->buf_
+				tbl[i].buf), driver->buf_tbl[i].length);
+#endif
+					break;
+				}
+		}
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid ==
+						 driver->logging_process_id)
+				break;
+		if (i < driver->num_clients) {
+			driver->data_ready[i] |= MEMORY_DEVICE_LOG_TYPE;
+			wake_up_interruptible(&driver->wait_q);
+		} else
+			return -EINVAL;
+	} else if (driver->logging_mode == NO_LOGGING_MODE) {
+		if (proc_num == MODEM_DATA) {
+			driver->in_busy_1 = 0;
+			driver->in_busy_2 = 0;
+			queue_work(driver->diag_wq, &(driver->
+							diag_read_smd_work));
+		} else if (proc_num == QDSP_DATA) {
+			driver->in_busy_qdsp_1 = 0;
+			driver->in_busy_qdsp_2 = 0;
+			queue_work(driver->diag_wq, &(driver->
+						diag_read_smd_qdsp_work));
+		}  else if (proc_num == WCNSS_DATA) {
+			driver->in_busy_wcnss = 0;
+			queue_work(driver->diag_wq, &(driver->
+				diag_read_smd_wcnss_work));
+		}
+		err = -1;
+	}
+#ifdef CONFIG_DIAG_OVER_USB
+	else if (driver->logging_mode == USB_MODE) {
+		if (proc_num == APPS_DATA) {
+			driver->write_ptr_svc = (struct diag_request *)
+			(diagmem_alloc(driver, sizeof(struct diag_request),
+				 POOL_TYPE_WRITE_STRUCT));
+			if (driver->write_ptr_svc) {
+				driver->write_ptr_svc->length = driver->used;
+				driver->write_ptr_svc->buf = buf;
+				err = usb_diag_write(driver->legacy_ch,
+						driver->write_ptr_svc);
+			} else
+				err = -1;
+		} else if (proc_num == MODEM_DATA) {
+			write_ptr->buf = buf;
+#ifdef DIAG_DEBUG
+			printk(KERN_INFO "writing data to USB,"
+				"pkt length %d\n", write_ptr->length);
+			print_hex_dump(KERN_DEBUG, "Written Packet Data to"
+					   " USB: ", 16, 1, DUMP_PREFIX_ADDRESS,
+					    buf, write_ptr->length, 1);
+#endif /* DIAG DEBUG */
+			err = usb_diag_write(driver->legacy_ch, write_ptr);
+		} else if (proc_num == QDSP_DATA) {
+			write_ptr->buf = buf;
+			err = usb_diag_write(driver->legacy_ch, write_ptr);
+		} else if (proc_num == WCNSS_DATA) {
+			write_ptr->buf = buf;
+			err = usb_diag_write(driver->legacy_ch, write_ptr);
+		}
+#ifdef CONFIG_DIAG_SDIO_PIPE
+		else if (proc_num == SDIO_DATA) {
+			if (machine_is_msm8x60_fusion() ||
+					machine_is_msm8x60_fusn_ffa()) {
+				write_ptr->buf = buf;
+				err = usb_diag_write(driver->mdm_ch, write_ptr);
+			} else
+				pr_err("diag: Incorrect data while USB write");
+		}
+#endif
+		APPEND_DEBUG('d');
+	}
+#endif /* DIAG OVER USB */
+    return err;
+}
+
+void __diag_smd_wcnss_send_req(void)
+{
+	void *buf = driver->buf_in_wcnss;
+	int *in_busy_wcnss_ptr = &(driver->in_busy_wcnss);
+	struct diag_request *write_ptr_wcnss = driver->write_ptr_wcnss;
+
+	if (driver->ch_wcnss && buf) {
+		int r = smd_read_avail(driver->ch_wcnss);
+		if (r > IN_BUF_SIZE) {
+			if (r < MAX_IN_BUF_SIZE) {
+				pr_err("diag: wcnss packets > %d bytes", r);
+				buf = krealloc(buf, r, GFP_KERNEL);
+			} else {
+				pr_err("diag: wcnss pkt > %d", MAX_IN_BUF_SIZE);
+				return;
+			}
+		}
+		if (r > 0) {
+			if (!buf) {
+				pr_err("Out of diagmem for wcnss\n");
+			} else {
+				APPEND_DEBUG('i');
+				smd_read(driver->ch_wcnss, buf, r);
+				APPEND_DEBUG('j');
+				write_ptr_wcnss->length = r;
+				*in_busy_wcnss_ptr = 1;
+				diag_device_write(buf, WCNSS_DATA,
+					 write_ptr_wcnss);
+			}
+		}
+	}
+}
+
+void __diag_smd_qdsp_send_req(void)
+{
+	void *buf = NULL;
+	int *in_busy_qdsp_ptr = NULL;
+	struct diag_request *write_ptr_qdsp = NULL;
+
+	if (!driver->in_busy_qdsp_1) {
+		buf = driver->buf_in_qdsp_1;
+		write_ptr_qdsp = driver->write_ptr_qdsp_1;
+		in_busy_qdsp_ptr = &(driver->in_busy_qdsp_1);
+	} else if (!driver->in_busy_qdsp_2) {
+		buf = driver->buf_in_qdsp_2;
+		write_ptr_qdsp = driver->write_ptr_qdsp_2;
+		in_busy_qdsp_ptr = &(driver->in_busy_qdsp_2);
+	}
+
+	if (driver->chqdsp && buf) {
+		int r = smd_read_avail(driver->chqdsp);
+
+		if (r > IN_BUF_SIZE) {
+			if (r < MAX_IN_BUF_SIZE) {
+				pr_err("diag: SMD sending in "
+						   "packets upto %d bytes", r);
+				buf = krealloc(buf, r, GFP_KERNEL);
+			} else {
+				pr_err("diag: SMD sending in "
+				"packets more than %d bytes", MAX_IN_BUF_SIZE);
+				return;
+			}
+		}
+		if (r > 0) {
+			if (!buf)
+				printk(KERN_INFO "Out of diagmem for QDSP\n");
+			else {
+				APPEND_DEBUG('i');
+				smd_read(driver->chqdsp, buf, r);
+				APPEND_DEBUG('j');
+				write_ptr_qdsp->length = r;
+				*in_busy_qdsp_ptr = 1;
+				diag_device_write(buf, QDSP_DATA,
+							 write_ptr_qdsp);
+			}
+		}
+	}
+}
+
+static void diag_print_mask_table(void)
+{
+/* Enable this to print mask table when updated */
+#ifdef MASK_DEBUG
+	int first;
+	int last;
+	uint8_t *ptr = driver->msg_masks;
+	int i = 0;
+
+	while (*(uint32_t *)(ptr + 4)) {
+		first = *(uint32_t *)ptr;
+		ptr += 4;
+		last = *(uint32_t *)ptr;
+		ptr += 4;
+		printk(KERN_INFO "SSID %d - %d\n", first, last);
+		for (i = 0 ; i <= last - first ; i++)
+			printk(KERN_INFO "MASK:%x\n", *((uint32_t *)ptr + i));
+		ptr += ((last - first) + 1)*4;
+
+	}
+#endif
+}
+
+static void diag_update_msg_mask(int start, int end , uint8_t *buf)
+{
+	int found = 0;
+	int first;
+	int last;
+	uint8_t *ptr = driver->msg_masks;
+	uint8_t *ptr_buffer_start = &(*(driver->msg_masks));
+	uint8_t *ptr_buffer_end = &(*(driver->msg_masks)) + MSG_MASK_SIZE;
+
+	mutex_lock(&driver->diagchar_mutex);
+	/* First SSID can be zero : So check that last is non-zero */
+
+	while (*(uint32_t *)(ptr + 4)) {
+		first = *(uint32_t *)ptr;
+		ptr += 4;
+		last = *(uint32_t *)ptr;
+		ptr += 4;
+		if (start >= first && start <= last) {
+			ptr += (start - first)*4;
+			if (end <= last)
+				if (CHK_OVERFLOW(ptr_buffer_start, ptr,
+						  ptr_buffer_end,
+						  (((end - start)+1)*4)))
+					memcpy(ptr, buf , ((end - start)+1)*4);
+				else
+					printk(KERN_CRIT "Not enough"
+							 " buffer space for"
+							 " MSG_MASK\n");
+			else
+				printk(KERN_INFO "Unable to copy"
+						 " mask change\n");
+
+			found = 1;
+			break;
+		} else {
+			ptr += ((last - first) + 1)*4;
+		}
+	}
+	/* Entry was not found - add new table */
+	if (!found) {
+		if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end,
+				  8 + ((end - start) + 1)*4)) {
+			memcpy(ptr, &(start) , 4);
+			ptr += 4;
+			memcpy(ptr, &(end), 4);
+			ptr += 4;
+			memcpy(ptr, buf , ((end - start) + 1)*4);
+		} else
+			printk(KERN_CRIT " Not enough buffer"
+					 " space for MSG_MASK\n");
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	diag_print_mask_table();
+
+}
+
+static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bits)
+{
+	uint8_t *ptr = driver->event_masks;
+	uint8_t *temp = buf + 2;
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (!toggle)
+		memset(ptr, 0 , EVENT_MASK_SIZE);
+	else
+		if (CHK_OVERFLOW(ptr, ptr,
+				 ptr+EVENT_MASK_SIZE,
+				  num_bits/8 + 1))
+			memcpy(ptr, temp , num_bits/8 + 1);
+		else
+			printk(KERN_CRIT "Not enough buffer space "
+					 "for EVENT_MASK\n");
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items)
+{
+	uint8_t *temp = buf;
+	struct mask_info {
+		int equip_id;
+		int index;
+	};
+	int i = 0;
+	unsigned char *ptr_data;
+	int offset = 8*MAX_EQUIP_ID;
+	struct mask_info *ptr = (struct mask_info *)driver->log_masks;
+
+	mutex_lock(&driver->diagchar_mutex);
+	/* Check if we already know index of this equipment ID */
+	for (i = 0; i < MAX_EQUIP_ID; i++) {
+		if ((ptr->equip_id == equip_id) && (ptr->index != 0)) {
+			offset = ptr->index;
+			break;
+		}
+		if ((ptr->equip_id == 0) && (ptr->index == 0)) {
+			/*Reached a null entry */
+			ptr->equip_id = equip_id;
+			ptr->index = driver->log_masks_length;
+			offset = driver->log_masks_length;
+			driver->log_masks_length += ((num_items+7)/8);
+			break;
+		}
+		ptr++;
+	}
+	ptr_data = driver->log_masks + offset;
+	if (CHK_OVERFLOW(driver->log_masks, ptr_data, driver->log_masks
+					 + LOG_MASK_SIZE, (num_items+7)/8))
+		memcpy(ptr_data, temp , (num_items+7)/8);
+	else
+		printk(KERN_CRIT " Not enough buffer space for LOG_MASK\n");
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static void diag_update_pkt_buffer(unsigned char *buf)
+{
+	unsigned char *ptr = driver->pkt_buf;
+	unsigned char *temp = buf;
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, driver->pkt_length))
+		memcpy(ptr, temp , driver->pkt_length);
+	else
+		printk(KERN_CRIT " Not enough buffer space for PKT_RESP\n");
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid != 0)
+			driver->data_ready[i] |= type;
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_sleeping_process(int process_id)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == process_id) {
+			driver->data_ready[i] |= PKT_TYPE;
+			break;
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_send_data(struct diag_master_table entry, unsigned char *buf,
+					 int len, int type)
+{
+	driver->pkt_length = len;
+	if (entry.process_id != NON_APPS_PROC && type != MODEM_DATA) {
+		diag_update_pkt_buffer(buf);
+		diag_update_sleeping_process(entry.process_id);
+	} else {
+		if (len > 0) {
+			if (entry.client_id == MODEM_PROC && driver->ch)
+				smd_write(driver->ch, buf, len);
+			else if (entry.client_id == QDSP_PROC &&
+							 driver->chqdsp)
+				smd_write(driver->chqdsp, buf, len);
+			else if (entry.client_id == WCNSS_PROC &&
+							 driver->ch_wcnss)
+				smd_write(driver->ch_wcnss, buf, len);
+			else
+				pr_alert("diag: incorrect channel");
+		}
+	}
+}
+
+static int diag_process_apps_pkt(unsigned char *buf, int len)
+{
+	uint16_t subsys_cmd_code;
+	int subsys_id, ssid_first, ssid_last, ssid_range;
+	int packet_type = 1, i, cmd_code;
+	unsigned char *temp = buf;
+	int data_type;
+#if defined(CONFIG_DIAG_OVER_USB)
+	int payload_length;
+	unsigned char *ptr;
+#endif
+
+	/* Check for registered clients and forward packet to apropriate proc */
+	cmd_code = (int)(*(char *)buf);
+	temp++;
+	subsys_id = (int)(*(char *)temp);
+	temp++;
+	subsys_cmd_code = *(uint16_t *)temp;
+	temp += 2;
+	data_type = APPS_DATA;
+	/* Dont send any command other than mode reset */
+	if (cpu_is_msm8960() && cmd_code == MODE_CMD) {
+		if (subsys_id != RESET_ID)
+			data_type = MODEM_DATA;
+	}
+
+	pr_debug("diag: %d %d %d", cmd_code, subsys_id, subsys_cmd_code);
+	for (i = 0; i < diag_max_registration; i++) {
+		entry = driver->table[i];
+		if (entry.process_id != NO_PROCESS) {
+			if (entry.cmd_code == cmd_code && entry.subsys_id ==
+				 subsys_id && entry.cmd_code_lo <=
+							 subsys_cmd_code &&
+				  entry.cmd_code_hi >= subsys_cmd_code) {
+				diag_send_data(entry, buf, len, data_type);
+				packet_type = 0;
+			} else if (entry.cmd_code == 255
+				  && cmd_code == 75) {
+				if (entry.subsys_id ==
+					subsys_id &&
+				   entry.cmd_code_lo <=
+					subsys_cmd_code &&
+					 entry.cmd_code_hi >=
+					subsys_cmd_code) {
+					diag_send_data(entry, buf, len,
+								 data_type);
+					packet_type = 0;
+				}
+			} else if (entry.cmd_code == 255 &&
+				  entry.subsys_id == 255) {
+				if (entry.cmd_code_lo <=
+						 cmd_code &&
+						 entry.
+						cmd_code_hi >= cmd_code) {
+					diag_send_data(entry, buf, len,
+								 data_type);
+					packet_type = 0;
+				}
+			}
+		}
+	}
+	/* set event mask */
+	if (*buf == 0x82) {
+		buf += 4;
+		diag_update_event_mask(buf, 1, *(uint16_t *)buf);
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+	}
+	/* event mask change */
+	else if ((*buf == 0x60) && (*(buf+1) == 0x0)) {
+		diag_update_event_mask(buf+1, 0, 0);
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+#if defined(CONFIG_DIAG_OVER_USB)
+		/* Check for Apps Only 8960 */
+		if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) {
+			/* echo response back for apps only DIAG */
+			driver->apps_rsp_buf[0] = 0x60;
+			driver->apps_rsp_buf[1] = 0x0;
+			driver->apps_rsp_buf[2] = 0x0;
+			ENCODE_RSP_AND_SEND(2);
+			return 0;
+		}
+#endif
+	}
+	/* Set log masks */
+	else if (*buf == 0x73 && *(int *)(buf+4) == 3) {
+		buf += 8;
+		/* Read Equip ID and pass as first param below*/
+		diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4));
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+#if defined(CONFIG_DIAG_OVER_USB)
+		/* Check for Apps Only 8960 */
+		if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) {
+			/* echo response back for Apps only DIAG */
+			driver->apps_rsp_buf[0] = 0x73;
+			*(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */
+			*(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */
+			payload_length = 8 + ((*(int *)(buf + 4)) + 7)/8;
+			for (i = 0; i < payload_length; i++)
+				*(int *)(driver->apps_rsp_buf+12+i) =
+								 *(buf+8+i);
+			ENCODE_RSP_AND_SEND(12 + payload_length - 1);
+			return 0;
+		}
+#endif
+	}
+	/* Check for set message mask  */
+	else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) {
+		ssid_first = *(uint16_t *)(buf + 2);
+		ssid_last = *(uint16_t *)(buf + 4);
+		ssid_range = 4 * (ssid_last - ssid_first + 1);
+		diag_update_msg_mask(ssid_first, ssid_last , buf + 8);
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+#if defined(CONFIG_DIAG_OVER_USB)
+		if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) {
+			/* echo response back for apps only DIAG */
+			for (i = 0; i < 8 + ssid_range; i++)
+				*(driver->apps_rsp_buf + i) = *(buf+i);
+			ENCODE_RSP_AND_SEND(8 + ssid_range - 1);
+			return 0;
+		}
+#endif
+	}
+#if defined(CONFIG_DIAG_OVER_USB)
+	/* Check for Apps Only 8960 & get event mask request */
+	else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)
+			  && *buf == 0x81) {
+		driver->apps_rsp_buf[0] = 0x81;
+		driver->apps_rsp_buf[1] = 0x0;
+		*(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0;
+		*(uint16_t *)(driver->apps_rsp_buf + 4) = EVENT_LAST_ID + 1;
+		for (i = 0; i < EVENT_LAST_ID/8 + 1; i++)
+			*(unsigned char *)(driver->apps_rsp_buf + 6 + i) = 0x0;
+		ENCODE_RSP_AND_SEND(6 + EVENT_LAST_ID/8);
+		return 0;
+	}
+	/* Get log ID range & Check for Apps Only 8960 */
+	else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)
+			  && (*buf == 0x73) && *(int *)(buf+4) == 1) {
+		driver->apps_rsp_buf[0] = 0x73;
+		*(int *)(driver->apps_rsp_buf + 4) = 0x1; /* operation ID */
+		*(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success code */
+		*(int *)(driver->apps_rsp_buf + 12) = LOG_GET_ITEM_NUM(LOG_0);
+		*(int *)(driver->apps_rsp_buf + 16) = LOG_GET_ITEM_NUM(LOG_1);
+		*(int *)(driver->apps_rsp_buf + 20) = LOG_GET_ITEM_NUM(LOG_2);
+		*(int *)(driver->apps_rsp_buf + 24) = LOG_GET_ITEM_NUM(LOG_3);
+		*(int *)(driver->apps_rsp_buf + 28) = LOG_GET_ITEM_NUM(LOG_4);
+		*(int *)(driver->apps_rsp_buf + 32) = LOG_GET_ITEM_NUM(LOG_5);
+		*(int *)(driver->apps_rsp_buf + 36) = LOG_GET_ITEM_NUM(LOG_6);
+		*(int *)(driver->apps_rsp_buf + 40) = LOG_GET_ITEM_NUM(LOG_7);
+		*(int *)(driver->apps_rsp_buf + 44) = LOG_GET_ITEM_NUM(LOG_8);
+		*(int *)(driver->apps_rsp_buf + 48) = LOG_GET_ITEM_NUM(LOG_9);
+		*(int *)(driver->apps_rsp_buf + 52) = LOG_GET_ITEM_NUM(LOG_10);
+		*(int *)(driver->apps_rsp_buf + 56) = LOG_GET_ITEM_NUM(LOG_11);
+		*(int *)(driver->apps_rsp_buf + 60) = LOG_GET_ITEM_NUM(LOG_12);
+		*(int *)(driver->apps_rsp_buf + 64) = LOG_GET_ITEM_NUM(LOG_13);
+		*(int *)(driver->apps_rsp_buf + 68) = LOG_GET_ITEM_NUM(LOG_14);
+		*(int *)(driver->apps_rsp_buf + 72) = LOG_GET_ITEM_NUM(LOG_15);
+		ENCODE_RSP_AND_SEND(75);
+		return 0;
+	}
+	/* Respond to Get SSID Range request message */
+	else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)
+			 && (*buf == 0x7d) && (*(buf+1) == 0x1)) {
+		driver->apps_rsp_buf[0] = 0x7d;
+		driver->apps_rsp_buf[1] = 0x1;
+		driver->apps_rsp_buf[2] = 0x1;
+		driver->apps_rsp_buf[3] = 0x0;
+		*(int *)(driver->apps_rsp_buf + 4) = MSG_MASK_TBL_CNT;
+		*(uint16_t *)(driver->apps_rsp_buf + 8) = MSG_SSID_0;
+		*(uint16_t *)(driver->apps_rsp_buf + 10) = MSG_SSID_0_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 12) = MSG_SSID_1;
+		*(uint16_t *)(driver->apps_rsp_buf + 14) = MSG_SSID_1_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 16) = MSG_SSID_2;
+		*(uint16_t *)(driver->apps_rsp_buf + 18) = MSG_SSID_2_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 20) = MSG_SSID_3;
+		*(uint16_t *)(driver->apps_rsp_buf + 22) = MSG_SSID_3_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 24) = MSG_SSID_4;
+		*(uint16_t *)(driver->apps_rsp_buf + 26) = MSG_SSID_4_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 28) = MSG_SSID_5;
+		*(uint16_t *)(driver->apps_rsp_buf + 30) = MSG_SSID_5_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 32) = MSG_SSID_6;
+		*(uint16_t *)(driver->apps_rsp_buf + 34) = MSG_SSID_6_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 36) = MSG_SSID_7;
+		*(uint16_t *)(driver->apps_rsp_buf + 38) = MSG_SSID_7_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 40) = MSG_SSID_8;
+		*(uint16_t *)(driver->apps_rsp_buf + 42) = MSG_SSID_8_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 44) = MSG_SSID_9;
+		*(uint16_t *)(driver->apps_rsp_buf + 46) = MSG_SSID_9_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 48) = MSG_SSID_10;
+		*(uint16_t *)(driver->apps_rsp_buf + 50) = MSG_SSID_10_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 52) = MSG_SSID_11;
+		*(uint16_t *)(driver->apps_rsp_buf + 54) = MSG_SSID_11_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 56) = MSG_SSID_12;
+		*(uint16_t *)(driver->apps_rsp_buf + 58) = MSG_SSID_12_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 60) = MSG_SSID_13;
+		*(uint16_t *)(driver->apps_rsp_buf + 62) = MSG_SSID_13_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 64) = MSG_SSID_14;
+		*(uint16_t *)(driver->apps_rsp_buf + 66) = MSG_SSID_14_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 68) = MSG_SSID_15;
+		*(uint16_t *)(driver->apps_rsp_buf + 70) = MSG_SSID_15_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 72) = MSG_SSID_16;
+		*(uint16_t *)(driver->apps_rsp_buf + 74) = MSG_SSID_16_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 76) = MSG_SSID_17;
+		*(uint16_t *)(driver->apps_rsp_buf + 78) = MSG_SSID_17_LAST;
+		*(uint16_t *)(driver->apps_rsp_buf + 80) = MSG_SSID_18;
+		*(uint16_t *)(driver->apps_rsp_buf + 82) = MSG_SSID_18_LAST;
+		ENCODE_RSP_AND_SEND(83);
+		return 0;
+	}
+	/* Check for AO8960 Respond to Get Subsys Build mask */
+	else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)
+			 && (*buf == 0x7d) && (*(buf+1) == 0x2)) {
+		ssid_first = *(uint16_t *)(buf + 2);
+		ssid_last = *(uint16_t *)(buf + 4);
+		ssid_range = 4 * (ssid_last - ssid_first + 1);
+		/* frame response */
+		driver->apps_rsp_buf[0] = 0x7d;
+		driver->apps_rsp_buf[1] = 0x2;
+		*(uint16_t *)(driver->apps_rsp_buf + 2) = ssid_first;
+		*(uint16_t *)(driver->apps_rsp_buf + 4) = ssid_last;
+		driver->apps_rsp_buf[6] = 0x1;
+		driver->apps_rsp_buf[7] = 0x0;
+		ptr = driver->apps_rsp_buf + 8;
+		/* bld time masks */
+		switch (ssid_first) {
+		case MSG_SSID_0:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_0[i/4];
+			break;
+		case MSG_SSID_1:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_1[i/4];
+			break;
+		case MSG_SSID_2:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_2[i/4];
+			break;
+		case MSG_SSID_3:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_3[i/4];
+			break;
+		case MSG_SSID_4:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_4[i/4];
+			break;
+		case MSG_SSID_5:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_5[i/4];
+			break;
+		case MSG_SSID_6:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_6[i/4];
+			break;
+		case MSG_SSID_7:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_7[i/4];
+			break;
+		case MSG_SSID_8:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_8[i/4];
+			break;
+		case MSG_SSID_9:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_9[i/4];
+			break;
+		case MSG_SSID_10:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_10[i/4];
+			break;
+		case MSG_SSID_11:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_11[i/4];
+			break;
+		case MSG_SSID_12:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_12[i/4];
+			break;
+		case MSG_SSID_13:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_13[i/4];
+			break;
+		case MSG_SSID_14:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_14[i/4];
+			break;
+		case MSG_SSID_15:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_15[i/4];
+			break;
+		case MSG_SSID_16:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_16[i/4];
+			break;
+		case MSG_SSID_17:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_17[i/4];
+			break;
+		case MSG_SSID_18:
+			for (i = 0; i < ssid_range; i += 4)
+				*(int *)(ptr + i) = msg_bld_masks_18[i/4];
+			break;
+		}
+		ENCODE_RSP_AND_SEND(8 + ssid_range - 1);
+		return 0;
+	}
+	/* Check for download command */
+	else if ((cpu_is_msm8x60() || cpu_is_msm8960()) && (*buf == 0x3A)) {
+		/* send response back */
+		driver->apps_rsp_buf[0] = *buf;
+		ENCODE_RSP_AND_SEND(0);
+		msleep(5000);
+		/* call download API */
+		msm_set_restart_mode(RESTART_DLOAD);
+		printk(KERN_CRIT "diag: download mode set, Rebooting SoC..\n");
+		kernel_restart(NULL);
+		/* Not required, represents that command isnt sent to modem */
+		return 0;
+	}
+	 /* Check for ID for NO MODEM present */
+	else if (!(driver->ch)) {
+		/* Respond to polling for Apps only DIAG */
+		if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+							 (*(buf+2) == 0x03)) {
+			for (i = 0; i < 3; i++)
+				driver->apps_rsp_buf[i] = *(buf+i);
+			for (i = 0; i < 13; i++)
+				driver->apps_rsp_buf[i+3] = 0;
+
+			ENCODE_RSP_AND_SEND(15);
+			return 0;
+		}
+		/* respond to 0x0 command */
+		else if (*buf == 0x00) {
+			for (i = 0; i < 55; i++)
+				driver->apps_rsp_buf[i] = 0;
+
+			ENCODE_RSP_AND_SEND(54);
+			return 0;
+		}
+		/* respond to 0x7c command */
+		else if (*buf == 0x7c) {
+			driver->apps_rsp_buf[0] = 0x7c;
+			for (i = 1; i < 8; i++)
+				driver->apps_rsp_buf[i] = 0;
+			/* Tools ID for APQ 8060 */
+			*(int *)(driver->apps_rsp_buf + 8) =
+							 chk_config_get_id();
+			*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+			*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+			ENCODE_RSP_AND_SEND(13);
+			return 0;
+		}
+	}
+#endif
+		return packet_type;
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_send_error_rsp(int index)
+{
+	int i;
+	driver->apps_rsp_buf[0] = 0x13; /* error code 13 */
+	for (i = 0; i < index; i++)
+		driver->apps_rsp_buf[i+1] = *(driver->hdlc_buf+i);
+	ENCODE_RSP_AND_SEND(index - 3);
+}
+#else
+static inline void diag_send_error_rsp(int index) {}
+#endif
+
+void diag_process_hdlc(void *data, unsigned len)
+{
+	struct diag_hdlc_decode_type hdlc;
+	int ret, type = 0;
+	pr_debug("diag: HDLC decode fn, len of data  %d\n", len);
+	hdlc.dest_ptr = driver->hdlc_buf;
+	hdlc.dest_size = USB_MAX_OUT_BUF;
+	hdlc.src_ptr = data;
+	hdlc.src_size = len;
+	hdlc.src_idx = 0;
+	hdlc.dest_idx = 0;
+	hdlc.escaping = 0;
+
+	ret = diag_hdlc_decode(&hdlc);
+
+	if (ret)
+		type = diag_process_apps_pkt(driver->hdlc_buf,
+							  hdlc.dest_idx - 3);
+	else if (driver->debug_flag) {
+		printk(KERN_ERR "Packet dropped due to bad HDLC coding/CRC"
+				" errors or partial packet received, packet"
+				" length = %d\n", len);
+		print_hex_dump(KERN_DEBUG, "Dropped Packet Data: ", 16, 1,
+					   DUMP_PREFIX_ADDRESS, data, len, 1);
+		driver->debug_flag = 0;
+	}
+	/* send error responses from APPS for Central Routing */
+	if (type == 1 && chk_config_get_id() == AO8960_TOOLS_ID) {
+		diag_send_error_rsp(hdlc.dest_idx);
+		type = 0;
+	}
+	/* implies this packet is NOT meant for apps */
+	if (!(driver->ch) && type == 1) {
+		if (chk_config_get_id() == AO8960_TOOLS_ID) {
+			diag_send_error_rsp(hdlc.dest_idx);
+		} else { /* APQ 8060, Let Q6 respond */
+			if (driver->chqdsp)
+				smd_write(driver->chqdsp, driver->hdlc_buf,
+						  hdlc.dest_idx - 3);
+		}
+		type = 0;
+	}
+
+#ifdef DIAG_DEBUG
+	pr_debug("diag: hdlc.dest_idx = %d", hdlc.dest_idx);
+	for (i = 0; i < hdlc.dest_idx; i++)
+		printk(KERN_DEBUG "\t%x", *(((unsigned char *)
+							driver->hdlc_buf)+i));
+#endif /* DIAG DEBUG */
+	/* ignore 2 bytes for CRC, one for 7E and send */
+	if ((driver->ch) && (ret) && (type) && (hdlc.dest_idx > 3)) {
+		APPEND_DEBUG('g');
+		smd_write(driver->ch, driver->hdlc_buf, hdlc.dest_idx - 3);
+		APPEND_DEBUG('h');
+#ifdef DIAG_DEBUG
+		printk(KERN_INFO "writing data to SMD, pkt length %d\n", len);
+		print_hex_dump(KERN_DEBUG, "Written Packet Data to SMD: ", 16,
+			       1, DUMP_PREFIX_ADDRESS, data, len, 1);
+#endif /* DIAG DEBUG */
+	}
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+#define N_LEGACY_WRITE	(driver->poolsize + 5) /* 2+1 for modem ; 2 for q6 */
+#define N_LEGACY_READ	1
+
+int diagfwd_connect(void)
+{
+	int err;
+
+	printk(KERN_DEBUG "diag: USB connected\n");
+	err = usb_diag_alloc_req(driver->legacy_ch, N_LEGACY_WRITE,
+			N_LEGACY_READ);
+	if (err)
+		printk(KERN_ERR "diag: unable to alloc USB req on legacy ch");
+
+	driver->usb_connected = 1;
+	driver->in_busy_1 = 0;
+	driver->in_busy_2 = 0;
+	driver->in_busy_qdsp_1 = 0;
+	driver->in_busy_qdsp_2 = 0;
+	driver->in_busy_wcnss = 0;
+
+	/* Poll SMD channels to check for data*/
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_work));
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work));
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work));
+	/* Poll USB channel to check for data*/
+	queue_work(driver->diag_wq, &(driver->diag_read_work));
+#ifdef CONFIG_DIAG_SDIO_PIPE
+	if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) {
+		if (driver->mdm_ch && !IS_ERR(driver->mdm_ch))
+			diagfwd_connect_sdio();
+		else
+			printk(KERN_INFO "diag: No USB MDM ch");
+	}
+#endif
+	return 0;
+}
+
+int diagfwd_disconnect(void)
+{
+	printk(KERN_DEBUG "diag: USB disconnected\n");
+	driver->usb_connected = 0;
+	driver->in_busy_1 = 1;
+	driver->in_busy_2 = 1;
+	driver->in_busy_qdsp_1 = 1;
+	driver->in_busy_qdsp_2 = 1;
+	driver->in_busy_wcnss = 1;
+	driver->debug_flag = 1;
+	usb_diag_free_req(driver->legacy_ch);
+#ifdef CONFIG_DIAG_SDIO_PIPE
+	if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa())
+		if (driver->mdm_ch && !IS_ERR(driver->mdm_ch))
+			diagfwd_disconnect_sdio();
+#endif
+	/* TBD - notify and flow control SMD */
+	return 0;
+}
+
+int diagfwd_write_complete(struct diag_request *diag_write_ptr)
+{
+	unsigned char *buf = diag_write_ptr->buf;
+	/*Determine if the write complete is for data from modem/apps/q6 */
+	/* Need a context variable here instead */
+	if (buf == (void *)driver->buf_in_1) {
+		driver->in_busy_1 = 0;
+		APPEND_DEBUG('o');
+		queue_work(driver->diag_wq, &(driver->diag_read_smd_work));
+	} else if (buf == (void *)driver->buf_in_2) {
+		driver->in_busy_2 = 0;
+		APPEND_DEBUG('O');
+		queue_work(driver->diag_wq, &(driver->diag_read_smd_work));
+	} else if (buf == (void *)driver->buf_in_qdsp_1) {
+		driver->in_busy_qdsp_1 = 0;
+		APPEND_DEBUG('p');
+		queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work));
+	} else if (buf == (void *)driver->buf_in_qdsp_2) {
+		driver->in_busy_qdsp_2 = 0;
+		APPEND_DEBUG('P');
+		queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work));
+	} else if (buf == (void *)driver->buf_in_wcnss) {
+		driver->in_busy_wcnss = 0;
+		APPEND_DEBUG('R');
+		queue_work(driver->diag_wq,
+			 &(driver->diag_read_smd_wcnss_work));
+	}
+#ifdef CONFIG_DIAG_SDIO_PIPE
+	else if (buf == (void *)driver->buf_in_sdio)
+		if (machine_is_msm8x60_fusion() ||
+					 machine_is_msm8x60_fusn_ffa())
+			diagfwd_write_complete_sdio();
+		else
+			pr_err("diag: Incorrect buffer pointer while WRITE");
+#endif
+	else {
+		diagmem_free(driver, (unsigned char *)buf, POOL_TYPE_HDLC);
+		diagmem_free(driver, (unsigned char *)diag_write_ptr,
+						 POOL_TYPE_WRITE_STRUCT);
+		APPEND_DEBUG('q');
+	}
+	return 0;
+}
+
+int diagfwd_read_complete(struct diag_request *diag_read_ptr)
+{
+	int status = diag_read_ptr->status;
+	unsigned char *buf = diag_read_ptr->buf;
+
+	/* Determine if the read complete is for data on legacy/mdm ch */
+	if (buf == (void *)driver->usb_buf_out) {
+		driver->read_len_legacy = diag_read_ptr->actual;
+		APPEND_DEBUG('s');
+#ifdef DIAG_DEBUG
+		printk(KERN_INFO "read data from USB, pkt length %d",
+		    diag_read_ptr->actual);
+		print_hex_dump(KERN_DEBUG, "Read Packet Data from USB: ", 16, 1,
+		       DUMP_PREFIX_ADDRESS, diag_read_ptr->buf,
+		       diag_read_ptr->actual, 1);
+#endif /* DIAG DEBUG */
+		if (driver->logging_mode == USB_MODE) {
+			if (status != -ECONNRESET && status != -ESHUTDOWN)
+				queue_work(driver->diag_wq,
+					&(driver->diag_proc_hdlc_work));
+			else
+				queue_work(driver->diag_wq,
+						 &(driver->diag_read_work));
+		}
+	}
+#ifdef CONFIG_DIAG_SDIO_PIPE
+	else if (buf == (void *)driver->usb_buf_mdm_out) {
+		if (machine_is_msm8x60_fusion() ||
+					 machine_is_msm8x60_fusn_ffa()) {
+			driver->read_len_mdm = diag_read_ptr->actual;
+			diagfwd_read_complete_sdio();
+		} else
+			pr_err("diag: Incorrect buffer pointer while READ");
+	}
+#endif
+	else
+		printk(KERN_ERR "diag: Unknown buffer ptr from USB");
+
+	return 0;
+}
+
+void diag_read_work_fn(struct work_struct *work)
+{
+	APPEND_DEBUG('d');
+	driver->usb_read_ptr->buf = driver->usb_buf_out;
+	driver->usb_read_ptr->length = USB_MAX_OUT_BUF;
+	usb_diag_read(driver->legacy_ch, driver->usb_read_ptr);
+	APPEND_DEBUG('e');
+}
+
+void diag_process_hdlc_fn(struct work_struct *work)
+{
+	APPEND_DEBUG('D');
+	diag_process_hdlc(driver->usb_buf_out, driver->read_len_legacy);
+	diag_read_work_fn(work);
+	APPEND_DEBUG('E');
+}
+
+void diag_usb_legacy_notifier(void *priv, unsigned event,
+			struct diag_request *d_req)
+{
+	switch (event) {
+	case USB_DIAG_CONNECT:
+		diagfwd_connect();
+		break;
+	case USB_DIAG_DISCONNECT:
+		diagfwd_disconnect();
+		break;
+	case USB_DIAG_READ_DONE:
+		diagfwd_read_complete(d_req);
+		break;
+	case USB_DIAG_WRITE_DONE:
+		diagfwd_write_complete(d_req);
+		break;
+	default:
+		printk(KERN_ERR "Unknown event from USB diag\n");
+		break;
+	}
+}
+
+#endif /* DIAG OVER USB */
+
+static void diag_smd_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_work));
+}
+
+#if defined(CONFIG_MSM_N_WAY_SMD)
+static void diag_smd_qdsp_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work));
+}
+#endif
+
+static void diag_smd_wcnss_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work));
+}
+
+static int diag_smd_probe(struct platform_device *pdev)
+{
+	int r = 0;
+
+	if (pdev->id == SMD_APPS_MODEM)
+		r = smd_open("DIAG", &driver->ch, driver, diag_smd_notify);
+#if defined(CONFIG_MSM_N_WAY_SMD)
+	if (pdev->id == SMD_APPS_QDSP)
+		r = smd_named_open_on_edge("DIAG", SMD_APPS_QDSP
+			, &driver->chqdsp, driver, diag_smd_qdsp_notify);
+#endif
+	if (pdev->id == SMD_APPS_WCNSS)
+		r = smd_named_open_on_edge("APPS_RIVA_DATA", SMD_APPS_WCNSS
+			, &driver->ch_wcnss, driver, diag_smd_wcnss_notify);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pr_debug("diag: open SMD port, Id = %d, r = %d\n", pdev->id, r);
+
+	return 0;
+}
+
+static int diagfwd_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int diagfwd_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops diagfwd_dev_pm_ops = {
+	.runtime_suspend = diagfwd_runtime_suspend,
+	.runtime_resume = diagfwd_runtime_resume,
+};
+
+static struct platform_driver msm_smd_ch1_driver = {
+
+	.probe = diag_smd_probe,
+	.driver = {
+		   .name = "DIAG",
+		   .owner = THIS_MODULE,
+		   .pm   = &diagfwd_dev_pm_ops,
+		   },
+};
+
+static struct platform_driver diag_smd_lite_driver = {
+
+	.probe = diag_smd_probe,
+	.driver = {
+		   .name = "APPS_RIVA_DATA",
+		   .owner = THIS_MODULE,
+		   .pm   = &diagfwd_dev_pm_ops,
+		   },
+};
+
+void diagfwd_init(void)
+{
+	diag_debug_buf_idx = 0;
+	driver->read_len_legacy = 0;
+	if (driver->buf_in_1 == NULL) {
+		driver->buf_in_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_1 == NULL)
+			goto err;
+	}
+	if (driver->buf_in_2 == NULL) {
+		driver->buf_in_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_2 == NULL)
+			goto err;
+	}
+	if (driver->buf_in_qdsp_1 == NULL) {
+		driver->buf_in_qdsp_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_qdsp_1 == NULL)
+			goto err;
+	}
+	if (driver->buf_in_qdsp_2 == NULL) {
+		driver->buf_in_qdsp_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_qdsp_2 == NULL)
+			goto err;
+	}
+	if (driver->buf_in_wcnss == NULL) {
+		driver->buf_in_wcnss = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_wcnss == NULL)
+			goto err;
+	}
+	if (driver->usb_buf_out  == NULL &&
+	     (driver->usb_buf_out = kzalloc(USB_MAX_OUT_BUF,
+					 GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->hdlc_buf == NULL
+	    && (driver->hdlc_buf = kzalloc(HDLC_MAX, GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->msg_masks == NULL
+	    && (driver->msg_masks = kzalloc(MSG_MASK_SIZE,
+					     GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->log_masks == NULL &&
+	    (driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL)) == NULL)
+		goto err;
+	driver->log_masks_length = 8*MAX_EQUIP_ID;
+	if (driver->event_masks == NULL &&
+	    (driver->event_masks = kzalloc(EVENT_MASK_SIZE,
+					    GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->client_map == NULL &&
+	    (driver->client_map = kzalloc
+	     ((driver->num_clients) * sizeof(struct diag_client_map),
+		   GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->buf_tbl == NULL)
+			driver->buf_tbl = kzalloc(buf_tbl_size *
+			  sizeof(struct diag_write_device), GFP_KERNEL);
+	if (driver->buf_tbl == NULL)
+		goto err;
+	if (driver->data_ready == NULL &&
+	     (driver->data_ready = kzalloc(driver->num_clients * sizeof(int)
+							, GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->table == NULL &&
+	     (driver->table = kzalloc(diag_max_registration*
+		      sizeof(struct diag_master_table),
+		       GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->write_ptr_1 == NULL) {
+		driver->write_ptr_1 = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_1 == NULL)
+			goto err;
+	}
+	if (driver->write_ptr_2 == NULL) {
+		driver->write_ptr_2 = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_2 == NULL)
+			goto err;
+	}
+	if (driver->write_ptr_qdsp_1 == NULL) {
+		driver->write_ptr_qdsp_1 = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_qdsp_1 == NULL)
+			goto err;
+	}
+	if (driver->write_ptr_qdsp_2 == NULL) {
+		driver->write_ptr_qdsp_2 = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_qdsp_2 == NULL)
+			goto err;
+	}
+	if (driver->write_ptr_wcnss == NULL) {
+		driver->write_ptr_wcnss = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_wcnss == NULL)
+			goto err;
+	}
+	if (driver->usb_read_ptr == NULL) {
+		driver->usb_read_ptr = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->usb_read_ptr == NULL)
+			goto err;
+	}
+	if (driver->pkt_buf == NULL &&
+	     (driver->pkt_buf = kzalloc(PKT_SIZE,
+			 GFP_KERNEL)) == NULL)
+		goto err;
+	if (driver->apps_rsp_buf == NULL) {
+			driver->apps_rsp_buf = kzalloc(500, GFP_KERNEL);
+		if (driver->apps_rsp_buf == NULL)
+			goto err;
+	}
+	driver->diag_wq = create_singlethread_workqueue("diag_wq");
+#ifdef CONFIG_DIAG_OVER_USB
+	INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn);
+	INIT_WORK(&(driver->diag_read_work), diag_read_work_fn);
+	driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver,
+			diag_usb_legacy_notifier);
+	if (IS_ERR(driver->legacy_ch)) {
+		printk(KERN_ERR "Unable to open USB diag legacy channel\n");
+		goto err;
+	}
+#endif
+	platform_driver_register(&msm_smd_ch1_driver);
+	platform_driver_register(&diag_smd_lite_driver);
+
+	return;
+err:
+		pr_err("diag: Could not initialize diag buffers");
+		kfree(driver->buf_in_1);
+		kfree(driver->buf_in_2);
+		kfree(driver->buf_in_qdsp_1);
+		kfree(driver->buf_in_qdsp_2);
+		kfree(driver->buf_in_wcnss);
+		kfree(driver->usb_buf_out);
+		kfree(driver->hdlc_buf);
+		kfree(driver->msg_masks);
+		kfree(driver->log_masks);
+		kfree(driver->event_masks);
+		kfree(driver->client_map);
+		kfree(driver->buf_tbl);
+		kfree(driver->data_ready);
+		kfree(driver->table);
+		kfree(driver->pkt_buf);
+		kfree(driver->write_ptr_1);
+		kfree(driver->write_ptr_2);
+		kfree(driver->write_ptr_qdsp_1);
+		kfree(driver->write_ptr_qdsp_2);
+		kfree(driver->write_ptr_wcnss);
+		kfree(driver->usb_read_ptr);
+		kfree(driver->apps_rsp_buf);
+		if (driver->diag_wq)
+			destroy_workqueue(driver->diag_wq);
+}
+
+void diagfwd_exit(void)
+{
+	smd_close(driver->ch);
+	smd_close(driver->chqdsp);
+	smd_close(driver->ch_wcnss);
+	driver->ch = 0;		/* SMD can make this NULL */
+	driver->chqdsp = 0;
+	driver->ch_wcnss = 0;
+#ifdef CONFIG_DIAG_OVER_USB
+	if (driver->usb_connected)
+		usb_diag_free_req(driver->legacy_ch);
+	usb_diag_close(driver->legacy_ch);
+#endif
+	platform_driver_unregister(&msm_smd_ch1_driver);
+	platform_driver_unregister(&diag_smd_lite_driver);
+	kfree(driver->buf_in_1);
+	kfree(driver->buf_in_2);
+	kfree(driver->buf_in_qdsp_1);
+	kfree(driver->buf_in_qdsp_2);
+	kfree(driver->buf_in_wcnss);
+	kfree(driver->usb_buf_out);
+	kfree(driver->hdlc_buf);
+	kfree(driver->msg_masks);
+	kfree(driver->log_masks);
+	kfree(driver->event_masks);
+	kfree(driver->client_map);
+	kfree(driver->buf_tbl);
+	kfree(driver->data_ready);
+	kfree(driver->table);
+	kfree(driver->pkt_buf);
+	kfree(driver->write_ptr_1);
+	kfree(driver->write_ptr_2);
+	kfree(driver->write_ptr_qdsp_1);
+	kfree(driver->write_ptr_qdsp_2);
+	kfree(driver->write_ptr_wcnss);
+	kfree(driver->usb_read_ptr);
+	kfree(driver->apps_rsp_buf);
+	destroy_workqueue(driver->diag_wq);
+}
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
new file mode 100644
index 0000000..cc24cbc
--- /dev/null
+++ b/drivers/char/diag/diagfwd.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+#define NO_PROCESS	0
+#define NON_APPS_PROC	-1
+
+void diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc(void *data, unsigned len);
+void __diag_smd_send_req(void);
+void __diag_smd_qdsp_send_req(void);
+void __diag_smd_wcnss_send_req(void);
+void diag_usb_legacy_notifier(void *, unsigned, struct diag_request *);
+long diagchar_ioctl(struct file *, unsigned int, unsigned long);
+int diag_device_write(void *, int, struct diag_request *);
+int mask_request_validate(unsigned char mask_buf[]);
+int chk_config_get_id(void);
+/* State for diag forwarding */
+#ifdef CONFIG_DIAG_OVER_USB
+int diagfwd_connect(void);
+int diagfwd_disconnect(void);
+#endif
+extern int diag_debug_buf_idx;
+extern unsigned char diag_debug_buf[1024];
+
+#endif
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
new file mode 100644
index 0000000..45226ba
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -0,0 +1,226 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/platform_device.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+
+#define HDR_SIZ 8
+
+static void diag_smd_cntl_send_req(int proc_num)
+{
+	int data_len = 0, type = -1, count_bytes = 0, j, r;
+	struct bindpkt_params_per_process *pkt_params =
+		 kzalloc(sizeof(struct bindpkt_params_per_process), GFP_KERNEL);
+	struct diag_ctrl_msg *msg;
+	struct cmd_code_range *range;
+	struct bindpkt_params *temp;
+	void *buf = NULL;
+	smd_channel_t *smd_ch = NULL;
+
+	if (proc_num == MODEM_PROC) {
+		buf = driver->buf_in_cntl;
+		smd_ch = driver->ch_cntl;
+	} else if (proc_num == QDSP_PROC) {
+		buf = driver->buf_in_qdsp_cntl;
+		smd_ch = driver->chqdsp_cntl;
+	} else if (proc_num == WCNSS_PROC) {
+		buf = driver->buf_in_wcnss_cntl;
+		smd_ch = driver->ch_wcnss_cntl;
+	}
+
+	if (!smd_ch || !buf)
+		return;
+
+	r = smd_read_avail(smd_ch);
+	if (r > IN_BUF_SIZE) {
+		if (r < MAX_IN_BUF_SIZE) {
+			pr_err("diag: SMD CNTL sending pkt upto %d bytes", r);
+			buf = krealloc(buf, r, GFP_KERNEL);
+		} else {
+			pr_err("diag: CNTL pkt > %d bytes", MAX_IN_BUF_SIZE);
+			kfree(pkt_params);
+			return;
+		}
+	}
+	if (buf && r > 0) {
+		smd_read(smd_ch, buf, r);
+		while (count_bytes + HDR_SIZ <= r) {
+			type = *(uint32_t *)(buf);
+			data_len = *(uint32_t *)(buf + 4);
+			count_bytes = count_bytes+HDR_SIZ+data_len;
+			if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) {
+				msg = buf+HDR_SIZ;
+				range = buf+HDR_SIZ+
+						sizeof(struct diag_ctrl_msg);
+				pkt_params->count = msg->count_entries;
+				temp = kzalloc(pkt_params->count * sizeof(struct
+						 bindpkt_params), GFP_KERNEL);
+				for (j = 0; j < pkt_params->count; j++) {
+					temp->cmd_code = msg->cmd_code;
+					temp->subsys_id = msg->subsysid;
+					temp->client_id = proc_num;
+					temp->proc_id = proc_num;
+					temp->cmd_code_lo = range->cmd_code_lo;
+					temp->cmd_code_hi = range->cmd_code_hi;
+					range++;
+					temp++;
+				}
+				temp -= pkt_params->count;
+				pkt_params->params = temp;
+				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
+						 (unsigned long)pkt_params);
+				kfree(temp);
+				buf = buf + HDR_SIZ + data_len;
+			}
+		}
+	}
+	kfree(pkt_params);
+}
+
+void diag_read_smd_cntl_work_fn(struct work_struct *work)
+{
+	diag_smd_cntl_send_req(MODEM_PROC);
+}
+
+void diag_read_smd_qdsp_cntl_work_fn(struct work_struct *work)
+{
+	diag_smd_cntl_send_req(QDSP_PROC);
+}
+
+void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *work)
+{
+	diag_smd_cntl_send_req(WCNSS_PROC);
+}
+
+static void diag_smd_cntl_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work));
+}
+
+#if defined(CONFIG_MSM_N_WAY_SMD)
+static void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_cntl_work));
+}
+#endif
+
+static void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event)
+{
+	queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work));
+}
+
+static int diag_smd_cntl_probe(struct platform_device *pdev)
+{
+	int r = 0;
+
+	/* open control ports only on 8960 */
+	if (chk_config_get_id() == AO8960_TOOLS_ID) {
+		if (pdev->id == SMD_APPS_MODEM)
+			r = smd_open("DIAG_CNTL", &driver->ch_cntl, driver,
+							diag_smd_cntl_notify);
+		if (pdev->id == SMD_APPS_QDSP)
+			r = smd_named_open_on_edge("DIAG_CNTL", SMD_APPS_QDSP
+				, &driver->chqdsp_cntl, driver,
+					 diag_smd_qdsp_cntl_notify);
+		if (pdev->id == SMD_APPS_WCNSS)
+			r = smd_named_open_on_edge("APPS_RIVA_CTRL",
+				SMD_APPS_WCNSS, &driver->ch_wcnss_cntl,
+					driver, diag_smd_wcnss_cntl_notify);
+		pr_debug("diag: open CNTL port, ID = %d,r = %d\n", pdev->id, r);
+	}
+	return 0;
+}
+
+static int diagfwd_cntl_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int diagfwd_cntl_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops diagfwd_cntl_dev_pm_ops = {
+	.runtime_suspend = diagfwd_cntl_runtime_suspend,
+	.runtime_resume = diagfwd_cntl_runtime_resume,
+};
+
+static struct platform_driver msm_smd_ch1_cntl_driver = {
+
+	.probe = diag_smd_cntl_probe,
+	.driver = {
+			.name = "DIAG_CNTL",
+			.owner = THIS_MODULE,
+			.pm   = &diagfwd_cntl_dev_pm_ops,
+		   },
+};
+
+static struct platform_driver diag_smd_lite_cntl_driver = {
+
+	.probe = diag_smd_cntl_probe,
+	.driver = {
+			.name = "APPS_RIVA_CTRL",
+			.owner = THIS_MODULE,
+			.pm   = &diagfwd_cntl_dev_pm_ops,
+		   },
+};
+
+void diagfwd_cntl_init(void)
+{
+	if (driver->buf_in_cntl == NULL) {
+		driver->buf_in_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_cntl == NULL)
+			goto err;
+	}
+	if (driver->buf_in_qdsp_cntl == NULL) {
+		driver->buf_in_qdsp_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_qdsp_cntl == NULL)
+			goto err;
+	}
+	if (driver->buf_in_wcnss_cntl == NULL) {
+		driver->buf_in_wcnss_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_wcnss_cntl == NULL)
+			goto err;
+	}
+	platform_driver_register(&msm_smd_ch1_cntl_driver);
+	platform_driver_register(&diag_smd_lite_cntl_driver);
+
+	return;
+err:
+		pr_err("diag: Could not initialize diag buffers");
+		kfree(driver->buf_in_cntl);
+		kfree(driver->buf_in_qdsp_cntl);
+		kfree(driver->buf_in_wcnss_cntl);
+}
+
+void diagfwd_cntl_exit(void)
+{
+	smd_close(driver->ch_cntl);
+	smd_close(driver->chqdsp_cntl);
+	smd_close(driver->ch_wcnss_cntl);
+	driver->ch_cntl = 0;
+	driver->chqdsp_cntl = 0;
+	driver->ch_wcnss_cntl = 0;
+	platform_driver_unregister(&msm_smd_ch1_cntl_driver);
+	platform_driver_unregister(&diag_smd_lite_cntl_driver);
+
+	kfree(driver->buf_in_cntl);
+	kfree(driver->buf_in_qdsp_cntl);
+	kfree(driver->buf_in_wcnss_cntl);
+}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
new file mode 100644
index 0000000..542138d
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+#define DIAG_CTRL_MSG_REG 1	/* Message registration commands */
+
+struct cmd_code_range {
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	uint32_t data;
+};
+
+struct diag_ctrl_msg {
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+	uint16_t port;
+};
+
+void diagfwd_cntl_init(void);
+void diagfwd_cntl_exit(void);
+void diag_read_smd_cntl_work_fn(struct work_struct *);
+void diag_read_smd_qdsp_cntl_work_fn(struct work_struct *);
+void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_sdio.c b/drivers/char/diag/diagfwd_sdio.c
new file mode 100644
index 0000000..8d43286
--- /dev/null
+++ b/drivers/char/diag/diagfwd_sdio.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <asm/current.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <mach/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_sdio.h"
+
+void __diag_sdio_send_req(void)
+{
+	int r = 0;
+	void *buf = driver->buf_in_sdio;
+
+	if (driver->sdio_ch && (!driver->in_busy_sdio)) {
+		r = sdio_read_avail(driver->sdio_ch);
+
+		if (r > IN_BUF_SIZE) {
+			if (r < MAX_IN_BUF_SIZE) {
+				pr_err("diag: SDIO sending"
+					  " in packets more than %d bytes", r);
+				buf = krealloc(buf, r, GFP_KERNEL);
+			} else {
+				pr_err("diag: SDIO sending"
+			  " in packets more than %d bytes", MAX_IN_BUF_SIZE);
+				return;
+			}
+		}
+		if (r > 0) {
+			if (!buf)
+				printk(KERN_INFO "Out of diagmem for SDIO\n");
+			else {
+				APPEND_DEBUG('i');
+				sdio_read(driver->sdio_ch, buf, r);
+				APPEND_DEBUG('j');
+				driver->write_ptr_mdm->length = r;
+				driver->in_busy_sdio = 1;
+				diag_device_write(buf, SDIO_DATA,
+						 driver->write_ptr_mdm);
+			}
+		}
+	}
+}
+
+static void diag_read_sdio_work_fn(struct work_struct *work)
+{
+	__diag_sdio_send_req();
+}
+
+int diagfwd_connect_sdio(void)
+{
+	int err;
+
+	err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE,
+							 N_MDM_READ);
+	if (err)
+		printk(KERN_ERR "diag: unable to alloc USB req on mdm ch");
+
+	driver->in_busy_sdio = 0;
+
+	/* Poll USB channel to check for data*/
+	queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work));
+	/* Poll SDIO channel to check for data*/
+	queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work));
+	return 0;
+}
+
+int diagfwd_disconnect_sdio(void)
+{
+	driver->in_busy_sdio = 1;
+	usb_diag_free_req(driver->mdm_ch);
+	return 0;
+}
+
+int diagfwd_write_complete_sdio(void)
+{
+	driver->in_busy_sdio = 0;
+	APPEND_DEBUG('q');
+	queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work));
+	return 0;
+}
+
+int diagfwd_read_complete_sdio(void)
+{
+	queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work));
+	return 0;
+}
+
+void diag_read_mdm_work_fn(struct work_struct *work)
+{
+	if (driver->sdio_ch) {
+		wait_event_interruptible(driver->wait_q, (sdio_write_avail
+				(driver->sdio_ch) >= driver->read_len_mdm));
+		if (driver->sdio_ch && driver->usb_buf_mdm_out &&
+						 (driver->read_len_mdm > 0))
+			sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out,
+							 driver->read_len_mdm);
+		APPEND_DEBUG('x');
+		driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out;
+		driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF;
+		usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr);
+		APPEND_DEBUG('y');
+	}
+}
+
+static void diag_sdio_notify(void *ctxt, unsigned event)
+{
+	if (event == SDIO_EVENT_DATA_READ_AVAIL)
+		queue_work(driver->diag_sdio_wq,
+				 &(driver->diag_read_sdio_work));
+
+	if (event == SDIO_EVENT_DATA_WRITE_AVAIL)
+		wake_up_interruptible(&driver->wait_q);
+}
+
+static int diag_sdio_probe(struct platform_device *pdev)
+{
+	int err;
+
+	err = sdio_open("SDIO_DIAG", &driver->sdio_ch, driver,
+							 diag_sdio_notify);
+	if (err)
+		printk(KERN_INFO "DIAG could not open SDIO channel");
+	else {
+		printk(KERN_INFO "DIAG opened SDIO channel");
+		queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work));
+	}
+
+	return err;
+}
+
+static int diag_sdio_remove(struct platform_device *pdev)
+{
+	queue_work(driver->diag_sdio_wq, &(driver->diag_remove_sdio_work));
+	return 0;
+}
+
+static void diag_remove_sdio_work_fn(struct work_struct *work)
+{
+	pr_debug("\n diag: sdio remove called");
+	/*Disable SDIO channel to prevent further read/write */
+	driver->sdio_ch = NULL;
+}
+
+static int diagfwd_sdio_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int diagfwd_sdio_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops diagfwd_sdio_dev_pm_ops = {
+	.runtime_suspend = diagfwd_sdio_runtime_suspend,
+	.runtime_resume = diagfwd_sdio_runtime_resume,
+};
+
+static struct platform_driver msm_sdio_ch_driver = {
+	.probe = diag_sdio_probe,
+	.remove = diag_sdio_remove,
+	.driver = {
+		   .name = "SDIO_DIAG",
+		   .owner = THIS_MODULE,
+		   .pm   = &diagfwd_sdio_dev_pm_ops,
+		   },
+};
+
+void diagfwd_sdio_init(void)
+{
+	int ret;
+
+	driver->read_len_mdm = 0;
+	if (driver->buf_in_sdio == NULL)
+		driver->buf_in_sdio = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (driver->buf_in_sdio == NULL)
+			goto err;
+	if (driver->usb_buf_mdm_out  == NULL)
+		driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+		if (driver->usb_buf_mdm_out == NULL)
+			goto err;
+	if (driver->write_ptr_mdm == NULL)
+		driver->write_ptr_mdm = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->write_ptr_mdm == NULL)
+			goto err;
+	if (driver->usb_read_mdm_ptr == NULL)
+		driver->usb_read_mdm_ptr = kzalloc(
+			sizeof(struct diag_request), GFP_KERNEL);
+		if (driver->usb_read_mdm_ptr == NULL)
+			goto err;
+	driver->diag_sdio_wq = create_singlethread_workqueue("diag_sdio_wq");
+#ifdef CONFIG_DIAG_OVER_USB
+	driver->mdm_ch = usb_diag_open(DIAG_MDM, driver,
+			diag_usb_legacy_notifier);
+	if (IS_ERR(driver->mdm_ch)) {
+		printk(KERN_ERR "Unable to open USB diag MDM channel\n");
+		goto err;
+	}
+	INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn);
+#endif
+	INIT_WORK(&(driver->diag_read_sdio_work), diag_read_sdio_work_fn);
+	INIT_WORK(&(driver->diag_remove_sdio_work), diag_remove_sdio_work_fn);
+	ret = platform_driver_register(&msm_sdio_ch_driver);
+	if (ret)
+		printk(KERN_INFO "DIAG could not register SDIO device");
+	else
+		printk(KERN_INFO "DIAG registered SDIO device");
+
+	return;
+err:
+		printk(KERN_INFO "\n Could not initialize diag buf for SDIO");
+		kfree(driver->buf_in_sdio);
+		kfree(driver->usb_buf_mdm_out);
+		kfree(driver->write_ptr_mdm);
+		kfree(driver->usb_read_mdm_ptr);
+		if (driver->diag_sdio_wq)
+			destroy_workqueue(driver->diag_sdio_wq);
+}
+
+void diagfwd_sdio_exit(void)
+{
+#ifdef CONFIG_DIAG_OVER_USB
+	if (driver->usb_connected)
+		usb_diag_free_req(driver->mdm_ch);
+#endif
+	platform_driver_unregister(&msm_sdio_ch_driver);
+#ifdef CONFIG_DIAG_OVER_USB
+	usb_diag_close(driver->mdm_ch);
+#endif
+	kfree(driver->buf_in_sdio);
+	kfree(driver->usb_buf_mdm_out);
+	kfree(driver->write_ptr_mdm);
+	kfree(driver->usb_read_mdm_ptr);
+	destroy_workqueue(driver->diag_sdio_wq);
+}
diff --git a/drivers/char/diag/diagfwd_sdio.h b/drivers/char/diag/diagfwd_sdio.h
new file mode 100644
index 0000000..40982c3
--- /dev/null
+++ b/drivers/char/diag/diagfwd_sdio.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SDIO_H
+#define DIAGFWD_SDIO_H
+
+#include <mach/sdio_al.h>
+#define N_MDM_WRITE	1 /* Upgrade to 2 with ping pong buffer */
+#define N_MDM_READ	1
+
+void diagfwd_sdio_init(void);
+void diagfwd_sdio_exit(void);
+int diagfwd_connect_sdio(void);
+int diagfwd_disconnect_sdio(void);
+int diagfwd_read_complete_sdio(void);
+int diagfwd_write_complete_sdio(void);
+
+#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
new file mode 100644
index 0000000..0b5c27a
--- /dev/null
+++ b/drivers/char/diag/diagmem.c
@@ -0,0 +1,145 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <asm/atomic.h>
+#include "diagchar.h"
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+	void *buf = NULL;
+
+	if (pool_type == POOL_TYPE_COPY) {
+		if (driver->diagpool) {
+			mutex_lock(&driver->diagmem_mutex);
+			if (driver->count < driver->poolsize) {
+				atomic_add(1, (atomic_t *)&driver->count);
+				buf = mempool_alloc(driver->diagpool,
+								 GFP_ATOMIC);
+			}
+			mutex_unlock(&driver->diagmem_mutex);
+		}
+	} else if (pool_type == POOL_TYPE_HDLC) {
+		if (driver->diag_hdlc_pool) {
+			if (driver->count_hdlc_pool < driver->poolsize_hdlc) {
+				atomic_add(1,
+					 (atomic_t *)&driver->count_hdlc_pool);
+				buf = mempool_alloc(driver->diag_hdlc_pool,
+								 GFP_ATOMIC);
+			}
+		}
+	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
+		if (driver->diag_write_struct_pool) {
+			if (driver->count_write_struct_pool <
+					 driver->poolsize_write_struct) {
+				atomic_add(1,
+				 (atomic_t *)&driver->count_write_struct_pool);
+				buf = mempool_alloc(
+				driver->diag_write_struct_pool, GFP_ATOMIC);
+			}
+		}
+	}
+	return buf;
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int pool_type)
+{
+	if (driver->diagpool) {
+		if (driver->count == 0 && driver->ref_count == 0) {
+			mempool_destroy(driver->diagpool);
+			driver->diagpool = NULL;
+		} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
+			printk(KERN_ALERT "Unable to destroy COPY mempool");
+		}
+
+	if (driver->diag_hdlc_pool) {
+		if (driver->count_hdlc_pool == 0 && driver->ref_count == 0) {
+			mempool_destroy(driver->diag_hdlc_pool);
+			driver->diag_hdlc_pool = NULL;
+		} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
+			printk(KERN_ALERT "Unable to destroy HDLC mempool");
+		}
+
+	if (driver->diag_write_struct_pool) {
+		/* Free up struct pool ONLY if there are no outstanding
+		transactions(aggregation buffer) with USB */
+		if (driver->count_write_struct_pool == 0 &&
+		 driver->count_hdlc_pool == 0 && driver->ref_count == 0) {
+			mempool_destroy(driver->diag_write_struct_pool);
+			driver->diag_write_struct_pool = NULL;
+		} else if (driver->ref_count == 0 && pool_type == POOL_TYPE_ALL)
+			printk(KERN_ALERT "Unable to destroy STRUCT mempool");
+		}
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+	if (pool_type == POOL_TYPE_COPY) {
+		if (driver->diagpool != NULL && driver->count > 0) {
+			mempool_free(buf, driver->diagpool);
+			atomic_add(-1, (atomic_t *)&driver->count);
+		} else
+			pr_err("diag: Attempt to free up DIAG driver "
+	       "mempool memory which is already free %d", driver->count);
+	} else if (pool_type == POOL_TYPE_HDLC) {
+		if (driver->diag_hdlc_pool != NULL &&
+			 driver->count_hdlc_pool > 0) {
+			mempool_free(buf, driver->diag_hdlc_pool);
+			atomic_add(-1, (atomic_t *)&driver->count_hdlc_pool);
+		} else
+			pr_err("diag: Attempt to free up DIAG driver "
+	"HDLC mempool which is already free %d ", driver->count_hdlc_pool);
+	} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
+		if (driver->diag_write_struct_pool != NULL &&
+			 driver->count_write_struct_pool > 0) {
+			mempool_free(buf, driver->diag_write_struct_pool);
+			atomic_add(-1,
+				 (atomic_t *)&driver->count_write_struct_pool);
+		} else
+			pr_err("diag: Attempt to free up DIAG driver "
+			   "USB structure mempool which is already free %d ",
+				    driver->count_write_struct_pool);
+	}
+
+	diagmem_exit(driver, pool_type);
+}
+
+void diagmem_init(struct diagchar_dev *driver)
+{
+	mutex_init(&driver->diagmem_mutex);
+
+	if (driver->count == 0)
+		driver->diagpool = mempool_create_kmalloc_pool(
+					driver->poolsize, driver->itemsize);
+
+	if (driver->count_hdlc_pool == 0)
+		driver->diag_hdlc_pool = mempool_create_kmalloc_pool(
+				driver->poolsize_hdlc, driver->itemsize_hdlc);
+
+	if (driver->count_write_struct_pool == 0)
+		driver->diag_write_struct_pool = mempool_create_kmalloc_pool(
+		driver->poolsize_write_struct, driver->itemsize_write_struct);
+
+	if (!driver->diagpool)
+		printk(KERN_INFO "Cannot allocate diag mempool\n");
+
+	if (!driver->diag_hdlc_pool)
+		printk(KERN_INFO "Cannot allocate diag HDLC mempool\n");
+
+	if (!driver->diag_write_struct_pool)
+		printk(KERN_INFO "Cannot allocate diag USB struct mempool\n");
+}
+
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
new file mode 100644
index 0000000..43829ae
--- /dev/null
+++ b/drivers/char/diag/diagmem.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver);
+void diagmem_exit(struct diagchar_dev *driver, int pool_type);
+
+#endif
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a60043b..127bdc6 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -210,3 +210,16 @@
 	  module will be called picoxcell-rng.
 
 	  If unsure, say Y.
+
+config HW_RANDOM_MSM
+	tristate "Qualcomm MSM Random Number Generator support"
+	depends on HW_RANDOM && ARCH_MSM
+	default n
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Qualcomm MSM SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called msm_rng.
+
+	  If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3db4eb8..d0c065d 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,3 +20,4 @@
 obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
 obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
 obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM) += msm_rng.o
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 0000000..7051bf9
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET    0x0000
+#define PRNG_STATUS_OFFSET	0x0004
+#define PRNG_LFSR_CFG_OFFSET	0x0100
+#define PRNG_CONFIG_OFFSET	0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS	0x0000DDDD
+#define PRNG_CONFIG_MASK	0xFFFFFFFD
+#define PRNG_CONFIG_ENABLE	0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+};
+
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t maxsize;
+	size_t currsize = 0;
+	unsigned long val;
+	unsigned long *retdata = data;
+	int ret;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+	/* calculate max size bytes to transfer back to caller */
+	maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
+
+	/* no room for word data */
+	if (maxsize < 4)
+		return 0;
+
+	/* enable PRNG clock */
+	ret = clk_enable(msm_rng_dev->prng_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock in callback\n");
+		return 0;
+	}
+
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		if (!(readl(base + PRNG_STATUS_OFFSET) & 0x00000001))
+			break;	/* no data to read so just bail */
+
+		/* read FIFO */
+		val = readl(base + PRNG_DATA_OUT_OFFSET);
+		if (!val)
+			break;	/* no data to read so just bail */
+
+		/* write data back to callers pointer */
+		*(retdata++) = val;
+		currsize += 4;
+
+		/* make sure we stay on 32bit boundary */
+		if ((maxsize - currsize) < 4)
+			break;
+	} while (currsize < maxsize);
+
+	/* vote to turn off clock */
+	clk_disable(msm_rng_dev->prng_clk);
+
+	return currsize;
+}
+
+static struct hwrng msm_rng = {
+	.name = DRIVER_NAME,
+	.read = msm_rng_read,
+};
+
+static int __devinit msm_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct msm_rng_device *msm_rng_dev = NULL;
+	void __iomem *base = NULL;
+	int error = 0;
+	unsigned long val;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "invalid address\n");
+		error = -EFAULT;
+		goto err_exit;
+	}
+
+	msm_rng_dev = kzalloc(sizeof(msm_rng_dev), GFP_KERNEL);
+	if (!msm_rng_dev) {
+		dev_err(&pdev->dev, "cannot allocate memory\n");
+		error = -ENOMEM;
+		goto err_exit;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		error = -ENOMEM;
+		goto err_iomap;
+	}
+	msm_rng_dev->base = base;
+
+	/* create a handle for clock control */
+	msm_rng_dev->prng_clk = clk_get(NULL, "prng_clk");
+	if (IS_ERR(msm_rng_dev->prng_clk)) {
+		dev_err(&pdev->dev, "failed to register clock source\n");
+		error = -EPERM;
+		goto err_clk_get;
+	}
+
+	/* save away pdev and register driver data */
+	msm_rng_dev->pdev = pdev;
+	platform_set_drvdata(pdev, msm_rng_dev);
+
+	ret = clk_enable(msm_rng_dev->prng_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock in probe\n");
+		error = -EPERM;
+		goto err_clk_enable;
+	}
+
+	/* enable PRNG h/w (this may not work since XPU protect may be enabled
+	 * elsewhere which case then the hardware should have already been set
+	 * up)
+	 */
+	val = readl(base + PRNG_LFSR_CFG_OFFSET) & PRNG_LFSR_CFG_MASK;
+	val |= PRNG_LFSR_CFG_CLOCKS;
+	writel(val, base + PRNG_LFSR_CFG_OFFSET);
+
+	val = readl(base + PRNG_CONFIG_OFFSET) & PRNG_CONFIG_MASK;
+	val |= PRNG_CONFIG_ENABLE;
+	writel(val, base + PRNG_CONFIG_OFFSET);
+
+	clk_disable(msm_rng_dev->prng_clk);
+
+	/* register with hwrng framework */
+	msm_rng.priv = (unsigned long) msm_rng_dev;
+	error = hwrng_register(&msm_rng);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		error = -EPERM;
+		goto err_hw_register;
+	}
+
+	return 0;
+
+err_hw_register:
+err_clk_enable:
+	clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+	iounmap(msm_rng_dev->base);
+err_iomap:
+	kfree(msm_rng_dev);
+err_exit:
+	return error;
+}
+
+static int __devexit msm_rng_remove(struct platform_device *pdev)
+{
+	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	hwrng_unregister(&msm_rng);
+	clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	platform_set_drvdata(pdev, NULL);
+	kfree(msm_rng_dev);
+	return 0;
+}
+
+static struct platform_driver rng_driver = {
+	.probe      = msm_rng_probe,
+	.remove     = __devexit_p(msm_rng_remove),
+	.driver     = {
+		.name   = DRIVER_NAME,
+		.owner  = THIS_MODULE,
+	}
+};
+
+static int __init msm_rng_init(void)
+{
+	return platform_driver_register(&rng_driver);
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+	platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_AUTHOR("Code Aurora Forum");
+MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
new file mode 100644
index 0000000..e7c790d
--- /dev/null
+++ b/drivers/char/msm_rotator.c
@@ -0,0 +1,1523 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <linux/android_pmem.h>
+#include <linux/msm_rotator.h>
+#include <linux/io.h>
+#include <mach/msm_rotator_imem.h>
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+#include <linux/major.h>
+#include <linux/regulator/consumer.h>
+
+#define DRIVER_NAME "msm_rotator"
+
+#define MSM_ROTATOR_BASE (msm_rotator_dev->io_base)
+#define MSM_ROTATOR_INTR_ENABLE			(MSM_ROTATOR_BASE+0x0020)
+#define MSM_ROTATOR_INTR_STATUS			(MSM_ROTATOR_BASE+0x0024)
+#define MSM_ROTATOR_INTR_CLEAR			(MSM_ROTATOR_BASE+0x0028)
+#define MSM_ROTATOR_START			(MSM_ROTATOR_BASE+0x0030)
+#define MSM_ROTATOR_MAX_BURST_SIZE		(MSM_ROTATOR_BASE+0x0050)
+#define MSM_ROTATOR_HW_VERSION			(MSM_ROTATOR_BASE+0x0070)
+#define MSM_ROTATOR_SRC_SIZE			(MSM_ROTATOR_BASE+0x1108)
+#define MSM_ROTATOR_SRCP0_ADDR			(MSM_ROTATOR_BASE+0x110c)
+#define MSM_ROTATOR_SRCP1_ADDR			(MSM_ROTATOR_BASE+0x1110)
+#define MSM_ROTATOR_SRC_YSTRIDE1		(MSM_ROTATOR_BASE+0x111c)
+#define MSM_ROTATOR_SRC_YSTRIDE2		(MSM_ROTATOR_BASE+0x1120)
+#define MSM_ROTATOR_SRC_FORMAT			(MSM_ROTATOR_BASE+0x1124)
+#define MSM_ROTATOR_SRC_UNPACK_PATTERN1		(MSM_ROTATOR_BASE+0x1128)
+#define MSM_ROTATOR_SUB_BLOCK_CFG		(MSM_ROTATOR_BASE+0x1138)
+#define MSM_ROTATOR_OUT_PACK_PATTERN1		(MSM_ROTATOR_BASE+0x1154)
+#define MSM_ROTATOR_OUTP0_ADDR			(MSM_ROTATOR_BASE+0x1168)
+#define MSM_ROTATOR_OUTP1_ADDR			(MSM_ROTATOR_BASE+0x116c)
+#define MSM_ROTATOR_OUT_YSTRIDE1		(MSM_ROTATOR_BASE+0x1178)
+#define MSM_ROTATOR_OUT_YSTRIDE2		(MSM_ROTATOR_BASE+0x117c)
+#define MSM_ROTATOR_SRC_XY			(MSM_ROTATOR_BASE+0x1200)
+#define MSM_ROTATOR_SRC_IMAGE_SIZE		(MSM_ROTATOR_BASE+0x1208)
+
+#define MSM_ROTATOR_MAX_ROT	0x07
+#define MSM_ROTATOR_MAX_H	0x1fff
+#define MSM_ROTATOR_MAX_W	0x1fff
+
+/* from lsb to msb */
+#define GET_PACK_PATTERN(a, x, y, z, bit) \
+			(((a)<<((bit)*3))|((x)<<((bit)*2))|((y)<<(bit))|(z))
+#define CLR_G 0x0
+#define CLR_B 0x1
+#define CLR_R 0x2
+#define CLR_ALPHA 0x3
+
+#define CLR_Y  CLR_G
+#define CLR_CB CLR_B
+#define CLR_CR CLR_R
+
+#define ROTATIONS_TO_BITMASK(r) ((((r) & MDP_ROT_90) ? 1 : 0)  | \
+				 (((r) & MDP_FLIP_LR) ? 2 : 0) | \
+				 (((r) & MDP_FLIP_UD) ? 4 : 0))
+
+#define IMEM_NO_OWNER -1;
+
+#define MAX_SESSIONS 16
+#define INVALID_SESSION -1
+#define VERSION_KEY_MASK 0xFFFFFF00
+
+struct tile_parm {
+	unsigned int width;  /* tile's width */
+	unsigned int height; /* tile's height */
+	unsigned int row_tile_w; /* tiles per row's width */
+	unsigned int row_tile_h; /* tiles per row's height */
+};
+
+struct msm_rotator_dev {
+	void __iomem *io_base;
+	int irq;
+	struct msm_rotator_img_info *img_info[MAX_SESSIONS];
+	struct clk *core_clk;
+	int pid_list[MAX_SESSIONS];
+	struct clk *pclk;
+	struct clk *axi_clk;
+	int rot_clk_state;
+	struct regulator *regulator;
+	struct delayed_work rot_clk_work;
+	struct clk *imem_clk;
+	int imem_clk_state;
+	struct delayed_work imem_clk_work;
+	struct platform_device *pdev;
+	struct cdev cdev;
+	struct device *device;
+	struct class *class;
+	dev_t dev_num;
+	int processing;
+	int last_session_idx;
+	struct mutex rotator_lock;
+	struct mutex imem_lock;
+	int imem_owner;
+	wait_queue_head_t wq;
+};
+
+#define chroma_addr(start, w, h, bpp) ((start) + ((h) * (w) * (bpp)))
+
+#define COMPONENT_5BITS 1
+#define COMPONENT_6BITS 2
+#define COMPONENT_8BITS 3
+
+static struct msm_rotator_dev *msm_rotator_dev;
+
+enum {
+	CLK_EN,
+	CLK_DIS,
+	CLK_SUSPEND,
+};
+
+int msm_rotator_imem_allocate(int requestor)
+{
+	int rc = 0;
+
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	switch (requestor) {
+	case ROTATOR_REQUEST:
+		if (mutex_trylock(&msm_rotator_dev->imem_lock)) {
+			msm_rotator_dev->imem_owner = ROTATOR_REQUEST;
+			rc = 1;
+		} else
+			rc = 0;
+		break;
+	case JPEG_REQUEST:
+		mutex_lock(&msm_rotator_dev->imem_lock);
+		msm_rotator_dev->imem_owner = JPEG_REQUEST;
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+	}
+#else
+	if (requestor == JPEG_REQUEST)
+		rc = 1;
+#endif
+	if (rc == 1) {
+		cancel_delayed_work(&msm_rotator_dev->imem_clk_work);
+		if (msm_rotator_dev->imem_clk_state != CLK_EN
+			&& msm_rotator_dev->imem_clk) {
+			clk_enable(msm_rotator_dev->imem_clk);
+			msm_rotator_dev->imem_clk_state = CLK_EN;
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_rotator_imem_allocate);
+
+void msm_rotator_imem_free(int requestor)
+{
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	if (msm_rotator_dev->imem_owner == requestor) {
+		schedule_delayed_work(&msm_rotator_dev->imem_clk_work, HZ);
+		mutex_unlock(&msm_rotator_dev->imem_lock);
+	}
+#else
+	if (requestor == JPEG_REQUEST)
+		schedule_delayed_work(&msm_rotator_dev->imem_clk_work, HZ);
+#endif
+}
+EXPORT_SYMBOL(msm_rotator_imem_free);
+
+static void msm_rotator_imem_clk_work_f(struct work_struct *work)
+{
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	if (mutex_trylock(&msm_rotator_dev->imem_lock)) {
+		if (msm_rotator_dev->imem_clk_state == CLK_EN
+		     && msm_rotator_dev->imem_clk) {
+			clk_disable(msm_rotator_dev->imem_clk);
+			msm_rotator_dev->imem_clk_state = CLK_DIS;
+		} else if (msm_rotator_dev->imem_clk_state == CLK_SUSPEND)
+			msm_rotator_dev->imem_clk_state = CLK_DIS;
+		mutex_unlock(&msm_rotator_dev->imem_lock);
+	}
+#endif
+}
+
+/* enable clocks needed by rotator block */
+static void enable_rot_clks(void)
+{
+	if (msm_rotator_dev->regulator)
+		regulator_enable(msm_rotator_dev->regulator);
+	if (msm_rotator_dev->core_clk != NULL)
+		clk_enable(msm_rotator_dev->core_clk);
+	if (msm_rotator_dev->pclk != NULL)
+		clk_enable(msm_rotator_dev->pclk);
+	if (msm_rotator_dev->axi_clk != NULL)
+		clk_enable(msm_rotator_dev->axi_clk);
+}
+
+/* disable clocks needed by rotator block */
+static void disable_rot_clks(void)
+{
+	if (msm_rotator_dev->core_clk != NULL)
+		clk_disable(msm_rotator_dev->core_clk);
+	if (msm_rotator_dev->pclk != NULL)
+		clk_disable(msm_rotator_dev->pclk);
+	if (msm_rotator_dev->axi_clk != NULL)
+		clk_disable(msm_rotator_dev->axi_clk);
+	if (msm_rotator_dev->regulator)
+		regulator_disable(msm_rotator_dev->regulator);
+}
+
+static void msm_rotator_rot_clk_work_f(struct work_struct *work)
+{
+	if (mutex_trylock(&msm_rotator_dev->rotator_lock)) {
+		if (msm_rotator_dev->rot_clk_state == CLK_EN) {
+			disable_rot_clks();
+			msm_rotator_dev->rot_clk_state = CLK_DIS;
+		} else if (msm_rotator_dev->rot_clk_state == CLK_SUSPEND)
+			msm_rotator_dev->rot_clk_state = CLK_DIS;
+		mutex_unlock(&msm_rotator_dev->rotator_lock);
+	}
+}
+
+static irqreturn_t msm_rotator_isr(int irq, void *dev_id)
+{
+	if (msm_rotator_dev->processing) {
+		msm_rotator_dev->processing = 0;
+		wake_up(&msm_rotator_dev->wq);
+	} else
+		printk(KERN_WARNING "%s: unexpected interrupt\n", DRIVER_NAME);
+
+	return IRQ_HANDLED;
+}
+
+static int get_bpp(int format)
+{
+	switch (format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+		return 2;
+
+	case MDP_XRGB_8888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
+		return 4;
+
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CRCB_H2V2_TILE:
+	case MDP_Y_CBCR_H2V2_TILE:
+		return 1;
+
+	case MDP_RGB_888:
+		return 3;
+
+	case MDP_YCRYCB_H2V1:
+		return 2;/* YCrYCb interleave */
+
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CBCR_H2V1:
+		return 1;
+
+	default:
+		return -1;
+	}
+
+}
+
+static int msm_rotator_ycxcx_h2v1(struct msm_rotator_img_info *info,
+				  unsigned int in_paddr,
+				  unsigned int out_paddr,
+				  unsigned int use_imem,
+				  int new_session,
+				  unsigned int in_chroma_paddr,
+				  unsigned int out_chroma_paddr)
+{
+	int bpp;
+	unsigned int in_chr_addr, out_chr_addr;
+
+	if (info->src.format != info->dst.format)
+		return -EINVAL;
+
+	bpp = get_bpp(info->src.format);
+	if (bpp < 0)
+		return -ENOTTY;
+
+	if (!in_chroma_paddr) {
+		in_chr_addr = chroma_addr(in_paddr, info->src.width,
+				info->src.height,
+				bpp);
+	} else
+		in_chr_addr = in_chroma_paddr;
+
+	if (!out_chroma_paddr) {
+		out_chr_addr = chroma_addr(out_paddr, info->dst.width,
+				info->dst.height,
+				bpp);
+	} else
+		out_chr_addr = out_chroma_paddr;
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+	iowrite32(in_chr_addr, MSM_ROTATOR_SRCP1_ADDR);
+	iowrite32(out_paddr +
+			((info->dst_y * info->dst.width) + info->dst_x),
+		  MSM_ROTATOR_OUTP0_ADDR);
+	iowrite32(out_chr_addr +
+			((info->dst_y * info->dst.width) + info->dst_x),
+		  MSM_ROTATOR_OUTP1_ADDR);
+
+	if (new_session) {
+		iowrite32(info->src.width |
+			  info->src.width << 16,
+			  MSM_ROTATOR_SRC_YSTRIDE1);
+		if (info->rotations & MDP_ROT_90)
+			iowrite32(info->dst.width |
+				  info->dst.width*2 << 16,
+				  MSM_ROTATOR_OUT_YSTRIDE1);
+		else
+			iowrite32(info->dst.width |
+				  info->dst.width << 16,
+				  MSM_ROTATOR_OUT_YSTRIDE1);
+		if (info->src.format == MDP_Y_CBCR_H2V1) {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		} else {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		}
+		iowrite32((1  << 18) | 		/* chroma sampling 1=H2V1 */
+			  (ROTATIONS_TO_BITMASK(info->rotations) << 9) |
+			  1 << 8,      		/* ROT_EN */
+			  MSM_ROTATOR_SUB_BLOCK_CFG);
+		iowrite32(0 << 29 | 		/* frame format 0 = linear */
+			  (use_imem ? 0 : 1) << 22 | /* tile size */
+			  2 << 19 | 		/* fetch planes 2 = pseudo */
+			  0 << 18 | 		/* unpack align */
+			  1 << 17 | 		/* unpack tight */
+			  1 << 13 | 		/* unpack count 0=1 component */
+			  (bpp-1) << 9 |	/* src Bpp 0=1 byte ... */
+			  0 << 8  | 		/* has alpha */
+			  0 << 6  | 		/* alpha bits 3=8bits */
+			  3 << 4  | 		/* R/Cr bits 1=5 2=6 3=8 */
+			  3 << 2  | 		/* B/Cb bits 1=5 2=6 3=8 */
+			  3 << 0,   		/* G/Y  bits 1=5 2=6 3=8 */
+			  MSM_ROTATOR_SRC_FORMAT);
+	}
+
+	return 0;
+}
+
+static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info,
+				  unsigned int in_paddr,
+				  unsigned int out_paddr,
+				  unsigned int use_imem,
+				  int new_session,
+				  unsigned int in_chroma_paddr,
+				  unsigned int out_chroma_paddr)
+{
+	int bpp;
+	unsigned int in_chr_addr, out_chr_addr;
+
+	if (info->src.format != info->dst.format)
+		return -EINVAL;
+
+	bpp = get_bpp(info->src.format);
+	if (bpp < 0)
+		return -ENOTTY;
+
+	if (!in_chroma_paddr) {
+		in_chr_addr = chroma_addr(in_paddr, info->src.width,
+				info->src.height,
+				bpp);
+	} else
+		in_chr_addr = in_chroma_paddr;
+
+	if (!out_chroma_paddr) {
+		out_chr_addr = chroma_addr(out_paddr, info->dst.width,
+				info->dst.height,
+				bpp);
+	} else
+		out_chr_addr = out_chroma_paddr;
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+	iowrite32(in_chr_addr,
+		  MSM_ROTATOR_SRCP1_ADDR);
+	iowrite32(out_paddr +
+			((info->dst_y * info->dst.width) + info->dst_x),
+		  MSM_ROTATOR_OUTP0_ADDR);
+	iowrite32(out_chr_addr +
+			((info->dst_y * info->dst.width)/2 + info->dst_x),
+		  MSM_ROTATOR_OUTP1_ADDR);
+
+	if (new_session) {
+		iowrite32(info->src.width |
+			  info->src.width << 16,
+			  MSM_ROTATOR_SRC_YSTRIDE1);
+		iowrite32(info->dst.width |
+			  info->dst.width << 16,
+			  MSM_ROTATOR_OUT_YSTRIDE1);
+		if (info->src.format == MDP_Y_CBCR_H2V2) {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		} else {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		}
+		iowrite32((3  << 18) | 		/* chroma sampling 3=4:2:0 */
+			  (ROTATIONS_TO_BITMASK(info->rotations) << 9) |
+			  1 << 8,      		/* ROT_EN */
+			  MSM_ROTATOR_SUB_BLOCK_CFG);
+		iowrite32(0 << 29 | 		/* frame format 0 = linear */
+			  (use_imem ? 0 : 1) << 22 | /* tile size */
+			  2 << 19 | 		/* fetch planes 2 = pseudo */
+			  0 << 18 | 		/* unpack align */
+			  1 << 17 | 		/* unpack tight */
+			  1 << 13 | 		/* unpack count 0=1 component */
+			  (bpp-1) << 9  |	/* src Bpp 0=1 byte ... */
+			  0 << 8  | 		/* has alpha */
+			  0 << 6  | 		/* alpha bits 3=8bits */
+			  3 << 4  | 		/* R/Cr bits 1=5 2=6 3=8 */
+			  3 << 2  | 		/* B/Cb bits 1=5 2=6 3=8 */
+			  3 << 0,   		/* G/Y  bits 1=5 2=6 3=8 */
+			  MSM_ROTATOR_SRC_FORMAT);
+	}
+	return 0;
+}
+
+static unsigned int tile_size(unsigned int src_width,
+		unsigned int src_height,
+		const struct tile_parm *tp)
+{
+	unsigned int tile_w, tile_h;
+	unsigned int row_num_w, row_num_h;
+	tile_w = tp->width * tp->row_tile_w;
+	tile_h = tp->height * tp->row_tile_h;
+	row_num_w = (src_width + tile_w - 1) / tile_w;
+	row_num_h = (src_height + tile_h - 1) / tile_h;
+	return ((row_num_w * row_num_h * tile_w * tile_h) + 8191) & ~8191;
+}
+
+static int msm_rotator_ycxcx_h2v2_tile(struct msm_rotator_img_info *info,
+				  unsigned int in_paddr,
+				  unsigned int out_paddr,
+				  unsigned int use_imem,
+				  int new_session,
+				  unsigned in_chroma_paddr,
+				  unsigned out_chroma_paddr)
+{
+	int bpp;
+	unsigned int offset = 0;
+	unsigned int in_chr_addr, out_chr_addr;
+	/*
+	 * each row of samsung tile consists of two tiles in height
+	 * and two tiles in width which means width should align to
+	 * 64 x 2 bytes and height should align to 32 x 2 bytes.
+	 * video decoder generate two tiles in width and one tile
+	 * in height which ends up height align to 32 X 1 bytes.
+	 */
+	const struct tile_parm tile = {64, 32, 2, 1};
+	if ((info->src.format == MDP_Y_CRCB_H2V2_TILE &&
+		info->dst.format != MDP_Y_CRCB_H2V2) ||
+		(info->src.format == MDP_Y_CBCR_H2V2_TILE &&
+		info->dst.format != MDP_Y_CBCR_H2V2))
+		return -EINVAL;
+
+	bpp = get_bpp(info->src.format);
+	if (bpp < 0)
+		return -ENOTTY;
+
+	offset = tile_size(info->src.width, info->src.height, &tile);
+	if (!in_chroma_paddr)
+		in_chr_addr = in_paddr + offset;
+	else
+		in_chr_addr = in_chroma_paddr;
+
+	if (!out_chroma_paddr) {
+		out_chr_addr = chroma_addr(out_paddr, info->dst.width,
+				info->dst.height,
+				bpp);
+	} else
+		out_chr_addr = out_chroma_paddr;
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+	iowrite32(in_paddr + offset, MSM_ROTATOR_SRCP1_ADDR);
+	iowrite32(out_paddr +
+			((info->dst_y * info->dst.width) + info->dst_x),
+		  MSM_ROTATOR_OUTP0_ADDR);
+	iowrite32(out_chr_addr +
+			((info->dst_y * info->dst.width)/2 + info->dst_x),
+		  MSM_ROTATOR_OUTP1_ADDR);
+
+	if (new_session) {
+		iowrite32(info->src.width |
+			  info->src.width << 16,
+			  MSM_ROTATOR_SRC_YSTRIDE1);
+
+		iowrite32(info->dst.width |
+			  info->dst.width << 16,
+			  MSM_ROTATOR_OUT_YSTRIDE1);
+		if (info->src.format == MDP_Y_CBCR_H2V2_TILE) {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		} else {
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		}
+		iowrite32((3  << 18) | 		/* chroma sampling 3=4:2:0 */
+			  (ROTATIONS_TO_BITMASK(info->rotations) << 9) |
+			  1 << 8,      		/* ROT_EN */
+			  MSM_ROTATOR_SUB_BLOCK_CFG);
+		iowrite32(2 << 29 | 		/* frame format 2 = supertile */
+			  (use_imem ? 0 : 1) << 22 | /* tile size */
+			  2 << 19 | 		/* fetch planes 2 = pseudo */
+			  0 << 18 | 		/* unpack align */
+			  1 << 17 | 		/* unpack tight */
+			  1 << 13 | 		/* unpack count 0=1 component */
+			  (bpp-1) << 9  |	/* src Bpp 0=1 byte ... */
+			  0 << 8  | 		/* has alpha */
+			  0 << 6  | 		/* alpha bits 3=8bits */
+			  3 << 4  | 		/* R/Cr bits 1=5 2=6 3=8 */
+			  3 << 2  | 		/* B/Cb bits 1=5 2=6 3=8 */
+			  3 << 0,   		/* G/Y  bits 1=5 2=6 3=8 */
+			  MSM_ROTATOR_SRC_FORMAT);
+	}
+	return 0;
+}
+
+static int msm_rotator_ycrycb(struct msm_rotator_img_info *info,
+			      unsigned int in_paddr,
+			      unsigned int out_paddr,
+			      unsigned int use_imem,
+			      int new_session)
+{
+	int bpp;
+
+	if (info->src.format != info->dst.format)
+		return -EINVAL;
+
+	bpp = get_bpp(info->src.format);
+	if (bpp < 0)
+		return -ENOTTY;
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+	iowrite32(out_paddr +
+			((info->dst_y * info->dst.width) + info->dst_x),
+		  MSM_ROTATOR_OUTP0_ADDR);
+
+	if (new_session) {
+		iowrite32(info->src.width,
+			  MSM_ROTATOR_SRC_YSTRIDE1);
+		iowrite32(info->dst.width,
+			  MSM_ROTATOR_OUT_YSTRIDE1);
+		iowrite32(GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8),
+			  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+		iowrite32(GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8),
+			  MSM_ROTATOR_OUT_PACK_PATTERN1);
+		iowrite32((1  << 18) | 		/* chroma sampling 1=H2V1 */
+			  (ROTATIONS_TO_BITMASK(info->rotations) << 9) |
+			  1 << 8,      		/* ROT_EN */
+			  MSM_ROTATOR_SUB_BLOCK_CFG);
+		iowrite32(0 << 29 | 		/* frame format 0 = linear */
+			  (use_imem ? 0 : 1) << 22 | /* tile size */
+			  0 << 19 | 		/* fetch planes 0=interleaved */
+			  0 << 18 | 		/* unpack align */
+			  1 << 17 | 		/* unpack tight */
+			  3 << 13 | 		/* unpack count 0=1 component */
+			  (bpp-1) << 9 |	/* src Bpp 0=1 byte ... */
+			  0 << 8  | 		/* has alpha */
+			  0 << 6  | 		/* alpha bits 3=8bits */
+			  3 << 4  | 		/* R/Cr bits 1=5 2=6 3=8 */
+			  3 << 2  | 		/* B/Cb bits 1=5 2=6 3=8 */
+			  3 << 0,   		/* G/Y  bits 1=5 2=6 3=8 */
+			  MSM_ROTATOR_SRC_FORMAT);
+	}
+
+	return 0;
+}
+
+static int msm_rotator_rgb_types(struct msm_rotator_img_info *info,
+				 unsigned int in_paddr,
+				 unsigned int out_paddr,
+				 unsigned int use_imem,
+				 int new_session)
+{
+	int bpp, abits, rbits, gbits, bbits;
+
+	if (info->src.format != info->dst.format)
+		return -EINVAL;
+
+	bpp = get_bpp(info->src.format);
+	if (bpp < 0)
+		return -ENOTTY;
+
+	iowrite32(in_paddr, MSM_ROTATOR_SRCP0_ADDR);
+	iowrite32(out_paddr +
+			((info->dst_y * info->dst.width) + info->dst_x) * bpp,
+		  MSM_ROTATOR_OUTP0_ADDR);
+
+	if (new_session) {
+		iowrite32(info->src.width * bpp, MSM_ROTATOR_SRC_YSTRIDE1);
+		iowrite32(info->dst.width * bpp, MSM_ROTATOR_OUT_YSTRIDE1);
+		iowrite32((0  << 18) | 		/* chroma sampling 0=rgb */
+			  (ROTATIONS_TO_BITMASK(info->rotations) << 9) |
+			  1 << 8,      		/* ROT_EN */
+			  MSM_ROTATOR_SUB_BLOCK_CFG);
+		switch (info->src.format) {
+		case MDP_RGB_565:
+			iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+			abits = 0;
+			rbits = COMPONENT_5BITS;
+			gbits = COMPONENT_6BITS;
+			bbits = COMPONENT_5BITS;
+			break;
+
+		case MDP_BGR_565:
+			iowrite32(GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+			abits = 0;
+			rbits = COMPONENT_5BITS;
+			gbits = COMPONENT_6BITS;
+			bbits = COMPONENT_5BITS;
+			break;
+
+		case MDP_RGB_888:
+			iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+			abits = 0;
+			rbits = COMPONENT_8BITS;
+			gbits = COMPONENT_8BITS;
+			bbits = COMPONENT_8BITS;
+			break;
+
+		case MDP_ARGB_8888:
+		case MDP_RGBA_8888:
+		case MDP_XRGB_8888:
+		case MDP_RGBX_8888:
+			iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G,
+						   CLR_B, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G,
+						   CLR_B, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+			abits = COMPONENT_8BITS;
+			rbits = COMPONENT_8BITS;
+			gbits = COMPONENT_8BITS;
+			bbits = COMPONENT_8BITS;
+			break;
+
+		case MDP_BGRA_8888:
+			iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G,
+						   CLR_R, 8),
+				  MSM_ROTATOR_SRC_UNPACK_PATTERN1);
+			iowrite32(GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G,
+						   CLR_R, 8),
+				  MSM_ROTATOR_OUT_PACK_PATTERN1);
+			abits = COMPONENT_8BITS;
+			rbits = COMPONENT_8BITS;
+			gbits = COMPONENT_8BITS;
+			bbits = COMPONENT_8BITS;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+		iowrite32(0 << 29 | 		/* frame format 0 = linear */
+			  (use_imem ? 0 : 1) << 22 | /* tile size */
+			  0 << 19 | 		/* fetch planes 0=interleaved */
+			  0 << 18 | 		/* unpack align */
+			  1 << 17 | 		/* unpack tight */
+			  (abits ? 3 : 2) << 13 | /* unpack count 0=1 comp */
+			  (bpp-1) << 9 | 	/* src Bpp 0=1 byte ... */
+			  (abits ? 1 : 0) << 8  | /* has alpha */
+			  abits << 6  | 	/* alpha bits 3=8bits */
+			  rbits << 4  | 	/* R/Cr bits 1=5 2=6 3=8 */
+			  bbits << 2  | 	/* B/Cb bits 1=5 2=6 3=8 */
+			  gbits << 0,   	/* G/Y  bits 1=5 2=6 3=8 */
+			  MSM_ROTATOR_SRC_FORMAT);
+	}
+
+	return 0;
+}
+
+static int get_img(int memory_id, unsigned long *start, unsigned long *len,
+		struct file **pp_file)
+{
+	int ret = 0;
+#ifdef CONFIG_FB
+	struct file *file;
+	int put_needed, fb_num;
+#endif
+#ifdef CONFIG_ANDROID_PMEM
+	unsigned long vstart;
+#endif
+
+#ifdef CONFIG_ANDROID_PMEM
+	if (!get_pmem_file(memory_id, start, &vstart, len, pp_file))
+		return 0;
+#endif
+#ifdef CONFIG_FB
+	file = fget_light(memory_id, &put_needed);
+	if (file == NULL)
+		return -1;
+
+	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+		fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
+		if (get_fb_phys_info(start, len, fb_num))
+			ret = -1;
+		else
+			*pp_file = file;
+	} else
+		ret = -1;
+	if (ret)
+		fput_light(file, put_needed);
+#endif
+	return ret;
+}
+
+static int msm_rotator_do_rotate(unsigned long arg)
+{
+	int rc = 0;
+	unsigned int status;
+	struct msm_rotator_data_info info;
+	unsigned int in_paddr, out_paddr;
+	unsigned long len;
+	struct file *src_file = 0;
+	struct file *dst_file = 0;
+	int use_imem = 0;
+	int s;
+	struct file *src_chroma_file = 0;
+	struct file *dst_chroma_file = 0;
+	unsigned int in_chroma_paddr = 0, out_chroma_paddr = 0;
+	uint32_t format;
+
+	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+		return -EFAULT;
+
+	rc = get_img(info.src.memory_id, (unsigned long *)&in_paddr,
+			(unsigned long *)&len, &src_file);
+	if (rc) {
+		printk(KERN_ERR "%s: in get_img() failed id=0x%08x\n",
+		       DRIVER_NAME, info.src.memory_id);
+		return rc;
+	}
+	in_paddr += info.src.offset;
+
+	rc = get_img(info.dst.memory_id, (unsigned long *)&out_paddr,
+			(unsigned long *)&len, &dst_file);
+	if (rc) {
+		printk(KERN_ERR "%s: out get_img() failed id=0x%08x\n",
+		       DRIVER_NAME, info.dst.memory_id);
+		goto do_rotate_fail_dst_img;
+	}
+	out_paddr += info.dst.offset;
+
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	for (s = 0; s < MAX_SESSIONS; s++)
+		if ((msm_rotator_dev->img_info[s] != NULL) &&
+			(info.session_id ==
+			(unsigned int)msm_rotator_dev->img_info[s]
+			))
+			break;
+
+	if (s == MAX_SESSIONS) {
+		dev_dbg(msm_rotator_dev->device,
+			"%s() : Attempt to use invalid session_id %d\n",
+			__func__, s);
+		rc = -EINVAL;
+		goto do_rotate_unlock_mutex;
+	}
+
+	if (msm_rotator_dev->img_info[s]->enable == 0) {
+		dev_dbg(msm_rotator_dev->device,
+			"%s() : Session_id %d not enabled \n",
+			__func__, s);
+		rc = -EINVAL;
+		goto do_rotate_unlock_mutex;
+	}
+
+	format = msm_rotator_dev->img_info[s]->src.format;
+	if (((info.version_key & VERSION_KEY_MASK) == 0xA5B4C300) &&
+		((info.version_key & ~VERSION_KEY_MASK) > 0) &&
+		 (format == MDP_Y_CBCR_H2V2 ||
+		  format == MDP_Y_CRCB_H2V2 ||
+		  format == MDP_Y_CRCB_H2V2_TILE ||
+		  format == MDP_Y_CBCR_H2V2_TILE ||
+		  format == MDP_Y_CBCR_H2V1 ||
+		  format == MDP_Y_CRCB_H2V1)) {
+		rc = get_img(info.src_chroma.memory_id,
+				(unsigned long *)&in_chroma_paddr,
+				(unsigned long *)&len, &src_chroma_file);
+		if (rc) {
+			printk(KERN_ERR "%s: in chroma get_img() failed id=0x%08x\n",
+				DRIVER_NAME, info.src_chroma.memory_id);
+			goto do_rotate_unlock_mutex;
+		}
+		in_chroma_paddr += info.src_chroma.offset;
+
+		rc = get_img(info.dst_chroma.memory_id,
+				(unsigned long *)&out_chroma_paddr,
+				(unsigned long *)&len, &dst_chroma_file);
+		if (rc) {
+			printk(KERN_ERR "%s: out chroma get_img() failed id=0x%08x\n",
+				DRIVER_NAME, info.dst_chroma.memory_id);
+			goto do_rotate_fail_dst_chr_img;
+		}
+		out_chroma_paddr += info.dst_chroma.offset;
+	}
+
+	cancel_delayed_work(&msm_rotator_dev->rot_clk_work);
+	if (msm_rotator_dev->rot_clk_state != CLK_EN) {
+		enable_rot_clks();
+		msm_rotator_dev->rot_clk_state = CLK_EN;
+	}
+	enable_irq(msm_rotator_dev->irq);
+
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	use_imem = msm_rotator_imem_allocate(ROTATOR_REQUEST);
+#else
+	use_imem = 0;
+#endif
+	/*
+	 * workaround for a hardware bug. rotator hardware hangs when we
+	 * use write burst beat size 16 on 128X128 tile fetch mode. As a
+	 * temporary fix use 0x42 for BURST_SIZE when imem used.
+	 */
+	if (use_imem)
+		iowrite32(0x42, MSM_ROTATOR_MAX_BURST_SIZE);
+
+	iowrite32(((msm_rotator_dev->img_info[s]->src_rect.h & 0x1fff)
+				<< 16) |
+		  (msm_rotator_dev->img_info[s]->src_rect.w & 0x1fff),
+		  MSM_ROTATOR_SRC_SIZE);
+	iowrite32(((msm_rotator_dev->img_info[s]->src_rect.y & 0x1fff)
+				<< 16) |
+		  (msm_rotator_dev->img_info[s]->src_rect.x & 0x1fff),
+		  MSM_ROTATOR_SRC_XY);
+	iowrite32(((msm_rotator_dev->img_info[s]->src.height & 0x1fff)
+				<< 16) |
+		  (msm_rotator_dev->img_info[s]->src.width & 0x1fff),
+		  MSM_ROTATOR_SRC_IMAGE_SIZE);
+
+	switch (format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+	case MDP_RGB_888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_XRGB_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
+		rc = msm_rotator_rgb_types(msm_rotator_dev->img_info[s],
+					   in_paddr, out_paddr,
+					   use_imem,
+					   msm_rotator_dev->last_session_idx
+								!= s);
+		break;
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		rc = msm_rotator_ycxcx_h2v2(msm_rotator_dev->img_info[s],
+					    in_paddr, out_paddr, use_imem,
+					    msm_rotator_dev->last_session_idx
+								!= s,
+					    in_chroma_paddr,
+					    out_chroma_paddr);
+		break;
+	case MDP_Y_CRCB_H2V2_TILE:
+	case MDP_Y_CBCR_H2V2_TILE:
+		rc = msm_rotator_ycxcx_h2v2_tile(msm_rotator_dev->img_info[s],
+				in_paddr, out_paddr, use_imem,
+				msm_rotator_dev->last_session_idx
+				!= s,
+				in_chroma_paddr,
+				out_chroma_paddr);
+	break;
+
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		rc = msm_rotator_ycxcx_h2v1(msm_rotator_dev->img_info[s],
+					    in_paddr, out_paddr, use_imem,
+					    msm_rotator_dev->last_session_idx
+								!= s,
+					    in_chroma_paddr,
+					    out_chroma_paddr);
+		break;
+	case MDP_YCRYCB_H2V1:
+		rc = msm_rotator_ycrycb(msm_rotator_dev->img_info[s],
+				in_paddr, out_paddr, use_imem,
+				msm_rotator_dev->last_session_idx != s);
+		break;
+	default:
+		rc = -EINVAL;
+		goto do_rotate_exit;
+	}
+
+	if (rc != 0) {
+		msm_rotator_dev->last_session_idx = INVALID_SESSION;
+		goto do_rotate_exit;
+	}
+
+	iowrite32(3, MSM_ROTATOR_INTR_ENABLE);
+
+	msm_rotator_dev->processing = 1;
+	iowrite32(0x1, MSM_ROTATOR_START);
+
+	wait_event(msm_rotator_dev->wq,
+		   (msm_rotator_dev->processing == 0));
+	status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS);
+	if ((status & 0x03) != 0x01)
+		rc = -EFAULT;
+	iowrite32(0, MSM_ROTATOR_INTR_ENABLE);
+	iowrite32(3, MSM_ROTATOR_INTR_CLEAR);
+
+do_rotate_exit:
+	disable_irq(msm_rotator_dev->irq);
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	msm_rotator_imem_free(ROTATOR_REQUEST);
+#endif
+	schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
+	if (dst_chroma_file)
+		put_pmem_file(dst_chroma_file);
+do_rotate_fail_dst_chr_img:
+	if (src_chroma_file)
+		put_pmem_file(src_chroma_file);
+do_rotate_unlock_mutex:
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+	if (dst_file)
+		put_pmem_file(dst_file);
+do_rotate_fail_dst_img:
+	if (src_file)
+		put_pmem_file(src_file);
+	dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
+		__func__, rc);
+	return rc;
+}
+
+static int msm_rotator_start(unsigned long arg, int pid)
+{
+	struct msm_rotator_img_info info;
+	int rc = 0;
+	int s;
+	int first_free_index = INVALID_SESSION;
+
+	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+		return -EFAULT;
+
+	if ((info.rotations > MSM_ROTATOR_MAX_ROT) ||
+	    (info.src.height > MSM_ROTATOR_MAX_H) ||
+	    (info.src.width > MSM_ROTATOR_MAX_W) ||
+	    (info.dst.height > MSM_ROTATOR_MAX_H) ||
+	    (info.dst.width > MSM_ROTATOR_MAX_W) ||
+	    ((info.src_rect.x + info.src_rect.w) > info.src.width) ||
+	    ((info.src_rect.y + info.src_rect.h) > info.src.height) ||
+	    ((info.rotations & MDP_ROT_90) &&
+		((info.dst_x + info.src_rect.h) > info.dst.width)) ||
+	    ((info.rotations & MDP_ROT_90) &&
+		((info.dst_y + info.src_rect.w) > info.dst.height)) ||
+	    (!(info.rotations & MDP_ROT_90) &&
+		((info.dst_x + info.src_rect.w) > info.dst.width)) ||
+	    (!(info.rotations & MDP_ROT_90) &&
+		((info.dst_y + info.src_rect.h) > info.dst.height)))
+		return -EINVAL;
+
+	switch (info.src.format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+	case MDP_RGB_888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+	case MDP_BGRA_8888:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+	case MDP_Y_CRCB_H2V2_TILE:
+	case MDP_Y_CBCR_H2V2_TILE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (info.dst.format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+	case MDP_RGB_888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+	case MDP_BGRA_8888:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	for (s = 0; s < MAX_SESSIONS; s++) {
+		if ((msm_rotator_dev->img_info[s] != NULL) &&
+			(info.session_id ==
+			(unsigned int)msm_rotator_dev->img_info[s]
+			)) {
+			*(msm_rotator_dev->img_info[s]) = info;
+			msm_rotator_dev->pid_list[s] = pid;
+
+			if (msm_rotator_dev->last_session_idx == s)
+				msm_rotator_dev->last_session_idx =
+				INVALID_SESSION;
+			break;
+		}
+
+		if ((msm_rotator_dev->img_info[s] == NULL) &&
+			(first_free_index ==
+			INVALID_SESSION))
+			first_free_index = s;
+	}
+
+	if ((s == MAX_SESSIONS) && (first_free_index != INVALID_SESSION)) {
+		/* allocate a session id */
+		msm_rotator_dev->img_info[first_free_index] =
+			kzalloc(sizeof(struct msm_rotator_img_info),
+					GFP_KERNEL);
+		if (!msm_rotator_dev->img_info[first_free_index]) {
+			printk(KERN_ERR "%s : unable to alloc mem\n",
+					__func__);
+			rc = -ENOMEM;
+			goto rotator_start_exit;
+		}
+		info.session_id = (unsigned int)
+			msm_rotator_dev->img_info[first_free_index];
+		*(msm_rotator_dev->img_info[first_free_index]) = info;
+		msm_rotator_dev->pid_list[first_free_index] = pid;
+
+		if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+			rc = -EFAULT;
+	} else if (s == MAX_SESSIONS) {
+		dev_dbg(msm_rotator_dev->device, "%s: all sessions in use\n",
+			__func__);
+		rc = -EBUSY;
+	}
+
+rotator_start_exit:
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+
+	return rc;
+}
+
+static int msm_rotator_finish(unsigned long arg)
+{
+	int rc = 0;
+	int s;
+	unsigned int session_id;
+
+	if (copy_from_user(&session_id, (void __user *)arg, sizeof(s)))
+		return -EFAULT;
+
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	for (s = 0; s < MAX_SESSIONS; s++) {
+		if ((msm_rotator_dev->img_info[s] != NULL) &&
+			(session_id ==
+			(unsigned int)msm_rotator_dev->img_info[s])) {
+			if (msm_rotator_dev->last_session_idx == s)
+				msm_rotator_dev->last_session_idx =
+					INVALID_SESSION;
+			kfree(msm_rotator_dev->img_info[s]);
+			msm_rotator_dev->img_info[s] = NULL;
+			msm_rotator_dev->pid_list[s] = 0;
+			break;
+		}
+	}
+
+	if (s == MAX_SESSIONS)
+		rc = -EINVAL;
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+	return rc;
+}
+
+static int
+msm_rotator_open(struct inode *inode, struct file *filp)
+{
+	int *id;
+	int i;
+
+	if (filp->private_data)
+		return -EBUSY;
+
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	id = &msm_rotator_dev->pid_list[0];
+	for (i = 0; i < MAX_SESSIONS; i++, id++) {
+		if (*id == 0)
+			break;
+	}
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+
+	if (i == MAX_SESSIONS)
+		return -EBUSY;
+
+	filp->private_data = (void *)task_tgid_nr(current);
+
+	return 0;
+}
+
+static int
+msm_rotator_close(struct inode *inode, struct file *filp)
+{
+	int s;
+	int pid;
+
+	pid = (int)filp->private_data;
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	for (s = 0; s < MAX_SESSIONS; s++) {
+		if (msm_rotator_dev->img_info[s] != NULL &&
+			msm_rotator_dev->pid_list[s] == pid) {
+			kfree(msm_rotator_dev->img_info[s]);
+			msm_rotator_dev->img_info[s] = NULL;
+			if (msm_rotator_dev->last_session_idx == s)
+				msm_rotator_dev->last_session_idx =
+					INVALID_SESSION;
+		}
+	}
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+
+	return 0;
+}
+
+static long msm_rotator_ioctl(struct file *file, unsigned cmd,
+						 unsigned long arg)
+{
+	int pid;
+
+	if (_IOC_TYPE(cmd) != MSM_ROTATOR_IOCTL_MAGIC)
+		return -ENOTTY;
+
+	pid = (int)file->private_data;
+
+	switch (cmd) {
+	case MSM_ROTATOR_IOCTL_START:
+		return msm_rotator_start(arg, pid);
+	case MSM_ROTATOR_IOCTL_ROTATE:
+		return msm_rotator_do_rotate(arg);
+	case MSM_ROTATOR_IOCTL_FINISH:
+		return msm_rotator_finish(arg);
+
+	default:
+		dev_dbg(msm_rotator_dev->device,
+			"unexpected IOCTL %d\n", cmd);
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations msm_rotator_fops = {
+	.owner = THIS_MODULE,
+	.open = msm_rotator_open,
+	.release = msm_rotator_close,
+	.unlocked_ioctl = msm_rotator_ioctl,
+};
+
+static int __devinit msm_rotator_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct resource *res;
+	struct msm_rotator_platform_data *pdata = NULL;
+	int i, number_of_clks;
+	uint32_t ver;
+
+	msm_rotator_dev = kzalloc(sizeof(struct msm_rotator_dev), GFP_KERNEL);
+	if (!msm_rotator_dev) {
+		printk(KERN_ERR "%s Unable to allocate memory for struct\n",
+		       __func__);
+		return -ENOMEM;
+	}
+	for (i = 0; i < MAX_SESSIONS; i++)
+		msm_rotator_dev->img_info[i] = NULL;
+	msm_rotator_dev->last_session_idx = INVALID_SESSION;
+
+	pdata = pdev->dev.platform_data;
+	number_of_clks = pdata->number_of_clocks;
+
+	msm_rotator_dev->imem_owner = IMEM_NO_OWNER;
+	mutex_init(&msm_rotator_dev->imem_lock);
+	msm_rotator_dev->imem_clk_state = CLK_DIS;
+	INIT_DELAYED_WORK(&msm_rotator_dev->imem_clk_work,
+			  msm_rotator_imem_clk_work_f);
+	msm_rotator_dev->imem_clk = NULL;
+	msm_rotator_dev->pdev = pdev;
+
+	msm_rotator_dev->core_clk = NULL;
+	msm_rotator_dev->pclk = NULL;
+	msm_rotator_dev->axi_clk = NULL;
+
+	for (i = 0; i < number_of_clks; i++) {
+		if (pdata->rotator_clks[i].clk_type == ROTATOR_IMEM_CLK) {
+			msm_rotator_dev->imem_clk =
+			clk_get(&msm_rotator_dev->pdev->dev,
+				pdata->rotator_clks[i].clk_name);
+			if (IS_ERR(msm_rotator_dev->imem_clk)) {
+				rc = PTR_ERR(msm_rotator_dev->imem_clk);
+				msm_rotator_dev->imem_clk = NULL;
+				printk(KERN_ERR "%s: cannot get imem_clk "
+					"rc=%d\n", DRIVER_NAME, rc);
+				goto error_imem_clk;
+			}
+			if (pdata->rotator_clks[i].clk_rate)
+				clk_set_min_rate(msm_rotator_dev->imem_clk,
+					pdata->rotator_clks[i].clk_rate);
+		}
+		if (pdata->rotator_clks[i].clk_type == ROTATOR_PCLK) {
+			msm_rotator_dev->pclk =
+			clk_get(&msm_rotator_dev->pdev->dev,
+				pdata->rotator_clks[i].clk_name);
+			if (IS_ERR(msm_rotator_dev->pclk)) {
+				rc = PTR_ERR(msm_rotator_dev->pclk);
+				msm_rotator_dev->pclk = NULL;
+				printk(KERN_ERR "%s: cannot get pclk rc=%d\n",
+					DRIVER_NAME, rc);
+				goto error_pclk;
+			}
+
+			if (pdata->rotator_clks[i].clk_rate)
+				clk_set_min_rate(msm_rotator_dev->pclk,
+					pdata->rotator_clks[i].clk_rate);
+		}
+
+		if (pdata->rotator_clks[i].clk_type == ROTATOR_CORE_CLK) {
+			msm_rotator_dev->core_clk =
+			clk_get(&msm_rotator_dev->pdev->dev,
+				pdata->rotator_clks[i].clk_name);
+			if (IS_ERR(msm_rotator_dev->core_clk)) {
+				rc = PTR_ERR(msm_rotator_dev->core_clk);
+				msm_rotator_dev->core_clk = NULL;
+				printk(KERN_ERR "%s: cannot get core clk "
+					"rc=%d\n", DRIVER_NAME, rc);
+			goto error_core_clk;
+			}
+
+			if (pdata->rotator_clks[i].clk_rate)
+				clk_set_min_rate(msm_rotator_dev->core_clk,
+					pdata->rotator_clks[i].clk_rate);
+		}
+
+		if (pdata->rotator_clks[i].clk_type == ROTATOR_AXI_CLK) {
+			msm_rotator_dev->axi_clk =
+			clk_get(&msm_rotator_dev->pdev->dev,
+				pdata->rotator_clks[i].clk_name);
+			if (IS_ERR(msm_rotator_dev->axi_clk)) {
+				rc = PTR_ERR(msm_rotator_dev->axi_clk);
+				msm_rotator_dev->axi_clk = NULL;
+				printk(KERN_ERR "%s: cannot get axi clk "
+					"rc=%d\n", DRIVER_NAME, rc);
+			goto error_axi_clk;
+			}
+
+			if (pdata->rotator_clks[i].clk_rate)
+				clk_set_min_rate(msm_rotator_dev->axi_clk,
+					pdata->rotator_clks[i].clk_rate);
+		}
+	}
+
+	msm_rotator_dev->regulator = regulator_get(NULL, pdata->regulator_name);
+	if (IS_ERR(msm_rotator_dev->regulator))
+		msm_rotator_dev->regulator = NULL;
+
+	msm_rotator_dev->rot_clk_state = CLK_DIS;
+	INIT_DELAYED_WORK(&msm_rotator_dev->rot_clk_work,
+			  msm_rotator_rot_clk_work_f);
+
+	mutex_init(&msm_rotator_dev->rotator_lock);
+
+	platform_set_drvdata(pdev, msm_rotator_dev);
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		printk(KERN_ALERT
+		       "%s: could not get IORESOURCE_MEM\n", DRIVER_NAME);
+		rc = -ENODEV;
+		goto error_get_resource;
+	}
+	msm_rotator_dev->io_base = ioremap(res->start,
+					   resource_size(res));
+
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	if (msm_rotator_dev->imem_clk)
+		clk_enable(msm_rotator_dev->imem_clk);
+#endif
+	enable_rot_clks();
+	ver = ioread32(MSM_ROTATOR_HW_VERSION);
+	disable_rot_clks();
+
+#ifdef CONFIG_MSM_ROTATOR_USE_IMEM
+	if (msm_rotator_dev->imem_clk)
+		clk_disable(msm_rotator_dev->imem_clk);
+#endif
+	if (ver != pdata->hardware_version_number) {
+		printk(KERN_ALERT "%s: invalid HW version\n", DRIVER_NAME);
+		rc = -ENODEV;
+		goto error_get_resource;
+	}
+	msm_rotator_dev->irq = platform_get_irq(pdev, 0);
+	if (msm_rotator_dev->irq < 0) {
+		printk(KERN_ALERT "%s: could not get IORESOURCE_IRQ\n",
+		       DRIVER_NAME);
+		rc = -ENODEV;
+		goto error_get_irq;
+	}
+	rc = request_irq(msm_rotator_dev->irq, msm_rotator_isr,
+			 IRQF_TRIGGER_RISING, DRIVER_NAME, NULL);
+	if (rc) {
+		printk(KERN_ERR "%s: request_irq() failed\n", DRIVER_NAME);
+		goto error_get_irq;
+	}
+	/* we enable the IRQ when we need it in the ioctl */
+	disable_irq(msm_rotator_dev->irq);
+
+	rc = alloc_chrdev_region(&msm_rotator_dev->dev_num, 0, 1, DRIVER_NAME);
+	if (rc < 0) {
+		printk(KERN_ERR "%s: alloc_chrdev_region Failed rc = %d\n",
+		       __func__, rc);
+		goto error_get_irq;
+	}
+
+	msm_rotator_dev->class = class_create(THIS_MODULE, DRIVER_NAME);
+	if (IS_ERR(msm_rotator_dev->class)) {
+		rc = PTR_ERR(msm_rotator_dev->class);
+		printk(KERN_ERR "%s: couldn't create class rc = %d\n",
+		       DRIVER_NAME, rc);
+		goto error_class_create;
+	}
+
+	msm_rotator_dev->device = device_create(msm_rotator_dev->class, NULL,
+						msm_rotator_dev->dev_num, NULL,
+						DRIVER_NAME);
+	if (IS_ERR(msm_rotator_dev->device)) {
+		rc = PTR_ERR(msm_rotator_dev->device);
+		printk(KERN_ERR "%s: device_create failed %d\n",
+		       DRIVER_NAME, rc);
+		goto error_class_device_create;
+	}
+
+	cdev_init(&msm_rotator_dev->cdev, &msm_rotator_fops);
+	rc = cdev_add(&msm_rotator_dev->cdev,
+		      MKDEV(MAJOR(msm_rotator_dev->dev_num), 0),
+		      1);
+	if (rc < 0) {
+		printk(KERN_ERR "%s: cdev_add failed %d\n", __func__, rc);
+		goto error_cdev_add;
+	}
+
+	init_waitqueue_head(&msm_rotator_dev->wq);
+
+	dev_dbg(msm_rotator_dev->device, "probe successful\n");
+	return rc;
+
+error_cdev_add:
+	device_destroy(msm_rotator_dev->class, msm_rotator_dev->dev_num);
+error_class_device_create:
+	class_destroy(msm_rotator_dev->class);
+error_class_create:
+	unregister_chrdev_region(msm_rotator_dev->dev_num, 1);
+error_get_irq:
+	iounmap(msm_rotator_dev->io_base);
+error_get_resource:
+	mutex_destroy(&msm_rotator_dev->rotator_lock);
+	if (msm_rotator_dev->regulator)
+		regulator_put(msm_rotator_dev->regulator);
+	clk_put(msm_rotator_dev->axi_clk);
+error_axi_clk:
+	clk_put(msm_rotator_dev->core_clk);
+error_core_clk:
+	clk_put(msm_rotator_dev->pclk);
+error_pclk:
+	if (msm_rotator_dev->imem_clk)
+		clk_put(msm_rotator_dev->imem_clk);
+error_imem_clk:
+	mutex_destroy(&msm_rotator_dev->imem_lock);
+	kfree(msm_rotator_dev);
+	return rc;
+}
+
+static int __devexit msm_rotator_remove(struct platform_device *plat_dev)
+{
+	int i;
+
+	free_irq(msm_rotator_dev->irq, NULL);
+	mutex_destroy(&msm_rotator_dev->rotator_lock);
+	cdev_del(&msm_rotator_dev->cdev);
+	device_destroy(msm_rotator_dev->class, msm_rotator_dev->dev_num);
+	class_destroy(msm_rotator_dev->class);
+	unregister_chrdev_region(msm_rotator_dev->dev_num, 1);
+	iounmap(msm_rotator_dev->io_base);
+	if (msm_rotator_dev->imem_clk) {
+		if (msm_rotator_dev->imem_clk_state == CLK_EN)
+			clk_disable(msm_rotator_dev->imem_clk);
+		clk_put(msm_rotator_dev->imem_clk);
+		msm_rotator_dev->imem_clk = NULL;
+	}
+	if (msm_rotator_dev->rot_clk_state == CLK_EN)
+		disable_rot_clks();
+	clk_put(msm_rotator_dev->core_clk);
+	clk_put(msm_rotator_dev->pclk);
+	clk_put(msm_rotator_dev->axi_clk);
+	if (msm_rotator_dev->regulator)
+		regulator_put(msm_rotator_dev->regulator);
+	msm_rotator_dev->core_clk = NULL;
+	msm_rotator_dev->pclk = NULL;
+	msm_rotator_dev->axi_clk = NULL;
+	mutex_destroy(&msm_rotator_dev->imem_lock);
+	for (i = 0; i < MAX_SESSIONS; i++)
+		if (msm_rotator_dev->img_info[i] != NULL)
+			kfree(msm_rotator_dev->img_info[i]);
+	kfree(msm_rotator_dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_rotator_suspend(struct platform_device *dev, pm_message_t state)
+{
+	mutex_lock(&msm_rotator_dev->imem_lock);
+	if (msm_rotator_dev->imem_clk_state == CLK_EN
+		&& msm_rotator_dev->imem_clk) {
+		clk_disable(msm_rotator_dev->imem_clk);
+		msm_rotator_dev->imem_clk_state = CLK_SUSPEND;
+	}
+	mutex_unlock(&msm_rotator_dev->imem_lock);
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	if (msm_rotator_dev->rot_clk_state == CLK_EN) {
+		disable_rot_clks();
+		msm_rotator_dev->rot_clk_state = CLK_SUSPEND;
+	}
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+	return 0;
+}
+
+static int msm_rotator_resume(struct platform_device *dev)
+{
+	mutex_lock(&msm_rotator_dev->imem_lock);
+	if (msm_rotator_dev->imem_clk_state == CLK_SUSPEND
+		&& msm_rotator_dev->imem_clk) {
+		clk_enable(msm_rotator_dev->imem_clk);
+		msm_rotator_dev->imem_clk_state = CLK_EN;
+	}
+	mutex_unlock(&msm_rotator_dev->imem_lock);
+	mutex_lock(&msm_rotator_dev->rotator_lock);
+	if (msm_rotator_dev->rot_clk_state == CLK_SUSPEND) {
+		enable_rot_clks();
+		msm_rotator_dev->rot_clk_state = CLK_EN;
+	}
+	mutex_unlock(&msm_rotator_dev->rotator_lock);
+	return 0;
+}
+#endif
+
+static struct platform_driver msm_rotator_platform_driver = {
+	.probe = msm_rotator_probe,
+	.remove = __devexit_p(msm_rotator_remove),
+#ifdef CONFIG_PM
+	.suspend = msm_rotator_suspend,
+	.resume = msm_rotator_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DRIVER_NAME
+	}
+};
+
+static int __init msm_rotator_init(void)
+{
+	return platform_driver_register(&msm_rotator_platform_driver);
+}
+
+static void __exit msm_rotator_exit(void)
+{
+	return platform_driver_unregister(&msm_rotator_platform_driver);
+}
+
+module_init(msm_rotator_init);
+module_exit(msm_rotator_exit);
+
+MODULE_DESCRIPTION("MSM Offline Image Rotator driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65..a8e28d3 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -9,11 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
  */
 /*
  * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595ab..c720e9a 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -60,4 +60,18 @@
 	  Further information on this driver and the supported hardware
 	  can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 
 
+config TCG_ST_I2C
+	tristate "ST Micro ST19NP18-TPM-I2C TPM interface"
+	depends on I2C
+	default n
+	---help---
+	  If you have a ST19NP18-TPM-I2C TPM security chip from ST Micro
+	  say Yes and it will be accessible from Linux.
+
+config TCG_TPMD_DEV
+	tristate "tpmd_dev TPM Emulator driver"
+	default n
+	---help---
+	  Enables the TPM emulator driver
+
 endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index ea3a1e0..c113cf1 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,6 +6,8 @@
 	obj-$(CONFIG_TCG_TPM) += tpm_bios.o
 endif
 obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_ST_I2C) += tpm_st_i2c.o
 obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
 obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
 obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
+obj-$(CONFIG_TCG_TPMD_DEV) += tpmd_dev/
diff --git a/drivers/char/tpm/tpm_st_i2c.c b/drivers/char/tpm/tpm_st_i2c.c
new file mode 100644
index 0000000..3a6e8c4f
--- /dev/null
+++ b/drivers/char/tpm/tpm_st_i2c.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <mach/gpio.h>
+#include <mach/tpm_st_i2c.h>
+#include <mach/msm_iomap.h>
+#include "tpm.h"
+
+#define DEVICE_NAME "tpm_st_i2c"
+
+#define TPM_HEADER_LEN sizeof(struct tpm_input_header)
+#define TPM_ST_I2C_BLOCK_MAX 40
+
+struct tpm_st_i2c_dev {
+	struct i2c_client *client;
+	struct tpm_st_i2c_platform_data *pd;
+	struct completion com[2];
+};
+
+/* for completion array */
+#define ACCEPT_CMD_INDEX 0
+#define DATA_AVAIL_INDEX 1
+
+static struct tpm_st_i2c_dev *tpm_st_i2c_dev;
+
+#define TPM_ST_I2C_REQ_COMPLETE_MASK 1
+
+static u8 tpm_st_i2c_status(struct tpm_chip *chip)
+{
+	int gpio = tpm_st_i2c_dev->pd->data_avail_gpio;
+	return gpio_get_value(gpio);
+}
+
+static void tpm_st_i2c_cancel(struct tpm_chip *chip)
+{
+	/* not supported */
+	return;
+}
+
+static int tpm_st_i2c_transfer_buf(struct tpm_chip *chip, u8 *buf, size_t count,
+				   int recv)
+{
+	struct i2c_msg msg = {
+		.addr = tpm_st_i2c_dev->client->addr,
+		.flags = 0,
+		.buf = buf,
+		.len = TPM_HEADER_LEN, /* must read/write header first */
+	};
+	int gpio;
+	int irq;
+	struct completion *com;
+	__be32 *native_size;
+	int read_header = 0;
+	int rc = 0;
+	int len = count;
+	uint32_t size = count;
+	int tmp;
+
+	if (recv) {
+		msg.flags |= I2C_M_RD;
+		read_header = 1;
+		gpio = tpm_st_i2c_dev->pd->data_avail_gpio;
+		irq = tpm_st_i2c_dev->pd->data_avail_irq;
+		com = &tpm_st_i2c_dev->com[DATA_AVAIL_INDEX];
+	} else {
+		gpio = tpm_st_i2c_dev->pd->accept_cmd_gpio;
+		irq = tpm_st_i2c_dev->pd->accept_cmd_irq;
+		com = &tpm_st_i2c_dev->com[ACCEPT_CMD_INDEX];
+	}
+
+	if (len < TPM_HEADER_LEN) {
+		dev_dbg(chip->dev, "%s: invalid len\n", __func__);
+		return -EINVAL;
+	}
+
+	do {
+		if (!gpio_get_value(gpio)) {
+			/* reset the completion in case the irq fired
+			 * during the probe
+			 */
+			init_completion(com);
+			enable_irq(irq);
+			tmp = wait_for_completion_interruptible_timeout(
+				com, HZ/2);
+			if (!tmp) {
+				dev_dbg(chip->dev, "%s timeout\n",
+					__func__);
+				return -EBUSY;
+			}
+		}
+		rc = i2c_transfer(tpm_st_i2c_dev->client->adapter,
+				  &msg, 1);
+		if (rc < 0) {
+			dev_dbg(chip->dev, "Error in I2C transfer\n");
+			return rc;
+		}
+		if (read_header) {
+			read_header = 0;
+			native_size = (__force __be32 *) (buf + 2);
+			size = be32_to_cpu(*native_size);
+			if (count < size) {
+				dev_dbg(chip->dev,
+					"%s: invalid count\n",
+					__func__);
+				rc = -EIO;
+			}
+			len = size;
+		}
+		len -= msg.len;
+		if (len) {
+			buf += msg.len;
+			msg.buf = buf;
+			if (len > TPM_ST_I2C_BLOCK_MAX)
+				msg.len = TPM_ST_I2C_BLOCK_MAX;
+			else
+				msg.len = len;
+		}
+	} while (len > 0);
+
+	if (rc >= 0)
+		return size;
+	else
+		return rc;
+}
+
+static int tpm_st_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+	return tpm_st_i2c_transfer_buf(chip, buf, count, 1);
+}
+
+static int tpm_st_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+	return tpm_st_i2c_transfer_buf(chip, buf, len, 0);
+}
+
+#ifdef CONFIG_PM
+static int tpm_st_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+	return tpm_pm_suspend(&client->dev, msg);
+}
+
+static int tpm_st_i2c_resume(struct i2c_client *client)
+{
+	return tpm_pm_resume(&client->dev);
+}
+#endif
+
+static const struct file_operations tpm_st_i2c_fs_ops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.open = tpm_open,
+	.read = tpm_read,
+	.write = tpm_write,
+	.release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
+		   NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+
+static struct attribute *tpm_st_i2c_attrs[] = {
+	&dev_attr_pubek.attr,
+	&dev_attr_pcrs.attr,
+	&dev_attr_enabled.attr,
+	&dev_attr_active.attr,
+	&dev_attr_owned.attr,
+	&dev_attr_temp_deactivated.attr,
+	&dev_attr_caps.attr,
+	NULL,
+};
+
+static struct attribute_group tpm_st_i2c_attr_grp = {
+	.attrs = tpm_st_i2c_attrs
+};
+
+static struct tpm_vendor_specific tpm_st_i2c_vendor = {
+	.status = tpm_st_i2c_status,
+	.recv = tpm_st_i2c_recv,
+	.send = tpm_st_i2c_send,
+	.cancel = tpm_st_i2c_cancel,
+	.req_complete_mask = TPM_ST_I2C_REQ_COMPLETE_MASK,
+	.req_complete_val = TPM_ST_I2C_REQ_COMPLETE_MASK,
+	.req_canceled = 0xff,  /* not supported */
+	.attr_group = &tpm_st_i2c_attr_grp,
+	.miscdev = {
+		    .fops = &tpm_st_i2c_fs_ops,},
+};
+
+static irqreturn_t tpm_st_i2c_isr(int irq, void *dev_id)
+{
+	disable_irq_nosync(irq);
+	if (irq == tpm_st_i2c_dev->pd->accept_cmd_irq)
+		complete(&tpm_st_i2c_dev->com[ACCEPT_CMD_INDEX]);
+	else
+		complete(&tpm_st_i2c_dev->com[DATA_AVAIL_INDEX]);
+	return IRQ_HANDLED;
+}
+
+static int tpm_st_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct tpm_st_i2c_platform_data *pd;
+	struct  tpm_chip *chip;
+	int high;
+
+	dev_dbg(&client->dev, "%s()\n", __func__);
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE |
+				     I2C_FUNC_SMBUS_I2C_BLOCK |
+				     I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "incompatible adapter\n");
+		return -ENODEV;
+	}
+
+	pd = client->dev.platform_data;
+	if (!pd || !pd->gpio_setup || !pd->gpio_release) {
+		dev_err(&client->dev, "platform data not setup\n");
+		rc = -EFAULT;
+		goto no_platform_data;
+	}
+	rc = pd->gpio_setup();
+	if (rc) {
+		dev_err(&client->dev, "gpio_setup failed\n");
+		goto gpio_setup_fail;
+	}
+
+	gpio_direction_input(pd->accept_cmd_gpio);
+	gpio_direction_input(pd->data_avail_gpio);
+
+	tpm_st_i2c_dev = kzalloc(sizeof(struct tpm_st_i2c_dev), GFP_KERNEL);
+	if (!tpm_st_i2c_dev) {
+		printk(KERN_ERR "%s Unable to allocate memory for struct\n",
+		       __func__);
+		rc = -ENOMEM;
+		goto kzalloc_fail;
+	}
+
+	tpm_st_i2c_dev->client = client;
+	tpm_st_i2c_dev->pd = pd;
+
+	init_completion(&tpm_st_i2c_dev->com[ACCEPT_CMD_INDEX]);
+	init_completion(&tpm_st_i2c_dev->com[DATA_AVAIL_INDEX]);
+	/* This logic allows us to setup irq but not have it enabled, in
+	 * case the lines are already active
+	 */
+	high = gpio_get_value(pd->data_avail_gpio);
+	rc = request_irq(pd->data_avail_irq, tpm_st_i2c_isr, IRQF_TRIGGER_HIGH,
+			 DEVICE_NAME "-data", NULL);
+	if (rc) {
+		dev_err(&client->dev, "request for data irq failed\n");
+		goto data_irq_fail;
+	}
+	if (!high)
+		disable_irq(pd->data_avail_irq);
+	high = gpio_get_value(pd->accept_cmd_gpio);
+	rc = request_irq(pd->accept_cmd_irq, tpm_st_i2c_isr, IRQF_TRIGGER_HIGH,
+			 DEVICE_NAME "-cmd", NULL);
+	if (rc) {
+		dev_err(&client->dev, "request for cmd irq failed\n");
+		goto cmd_irq_fail;
+	}
+	if (!high)
+		disable_irq(pd->accept_cmd_irq);
+
+	tpm_st_i2c_vendor.irq = pd->data_avail_irq;
+
+	chip = tpm_register_hardware(&client->dev, &tpm_st_i2c_vendor);
+	if (!chip) {
+		dev_err(&client->dev, "Could not register tpm hardware\n");
+		rc = -ENODEV;
+		goto tpm_reg_fail;
+	}
+
+	dev_info(&client->dev, "added\n");
+
+	return 0;
+
+tpm_reg_fail:
+	free_irq(pd->accept_cmd_irq, NULL);
+cmd_irq_fail:
+	free_irq(pd->data_avail_irq, NULL);
+data_irq_fail:
+kzalloc_fail:
+	pd->gpio_release();
+gpio_setup_fail:
+no_platform_data:
+
+	return rc;
+}
+
+static int __exit tpm_st_i2c_remove(struct i2c_client *client)
+{
+	free_irq(tpm_st_i2c_dev->pd->accept_cmd_irq, NULL);
+	free_irq(tpm_st_i2c_dev->pd->data_avail_irq, NULL);
+	tpm_remove_hardware(&client->dev);
+	tpm_st_i2c_dev->pd->gpio_release();
+	kfree(tpm_st_i2c_dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id tpm_st_i2c_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+
+static struct i2c_driver tpm_st_i2c_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = tpm_st_i2c_probe,
+	.remove =  __exit_p(tpm_st_i2c_remove),
+#ifdef CONFIG_PM
+	.suspend = tpm_st_i2c_suspend,
+	.resume = tpm_st_i2c_resume,
+#endif
+	.id_table = tpm_st_i2c_id,
+};
+
+static int __init tpm_st_i2c_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&tpm_st_i2c_driver);
+	if (ret)
+		printk(KERN_ERR "%s: failed to add i2c driver\n", __func__);
+
+	return ret;
+}
+
+static void __exit tpm_st_i2c_exit(void)
+{
+	i2c_del_driver(&tpm_st_i2c_driver);
+}
+
+module_init(tpm_st_i2c_init);
+module_exit(tpm_st_i2c_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("ST19NP18-TPM-I2C driver");
diff --git a/drivers/char/tpm/tpmd_dev/Makefile b/drivers/char/tpm/tpmd_dev/Makefile
new file mode 100644
index 0000000..7d62de4
--- /dev/null
+++ b/drivers/char/tpm/tpmd_dev/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the kernel tpm emulator device driver.
+#
+obj-$(CONFIG_TCG_TPM) += tpmd_dev.o
diff --git a/drivers/char/tpm/tpmd_dev/config.h b/drivers/char/tpm/tpmd_dev/config.h
new file mode 100644
index 0000000..ec8d93e
--- /dev/null
+++ b/drivers/char/tpm/tpmd_dev/config.h
@@ -0,0 +1,32 @@
+/* Software-based Trusted Platform Module (TPM) Emulator
+ * Copyright (C) 2004-2010 Mario Strasser <mast@gmx.net>
+ *
+ * This module is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This module is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * $Id: config.h.in 426 2010-02-22 17:11:58Z mast $
+ */
+
+#ifndef _CONFIG_H_
+#define _CONFIG_H_
+
+/* project and build version */
+#define VERSION_MAJOR 0
+#define VERSION_MINOR 7
+#define VERSION_BUILD 424
+
+/* TDDL and LKM configuration */
+#define TPM_SOCKET_NAME  "/var/run/tpm/tpmd_socket:0"
+#define TPM_STORAGE_NAME "/var/lib/tpm/tpm_emulator-1_2_0_7"
+#define TPM_DEVICE_NAME  "/dev/tpm"
+#define TPM_LOG_FILE     ""
+#define TPM_CMD_BUF_SIZE 4096
+
+#endif /* _CONFIG_H_ */
diff --git a/drivers/char/tpm/tpmd_dev/tpmd_dev.c b/drivers/char/tpm/tpmd_dev/tpmd_dev.c
new file mode 100644
index 0000000..cbfcbd8
--- /dev/null
+++ b/drivers/char/tpm/tpmd_dev/tpmd_dev.c
@@ -0,0 +1,272 @@
+/* Software-based Trusted Platform Module (TPM) Emulator
+ * Copyright (C) 2004-2010 Mario Strasser <mast@gmx.net>
+ *
+ * This module is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This module is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * $Id: tpmd_dev.c 426 2010-02-22 17:11:58Z mast $
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/un.h>
+
+#include "config.h"
+
+#define TPM_DEVICE_MINOR  224
+#define TPM_DEVICE_ID     "tpm"
+#define TPM_MODULE_NAME   "tpmd_dev"
+
+#define TPM_STATE_IS_OPEN 0
+
+#ifdef DEBUG
+#define debug(fmt, ...) printk(KERN_DEBUG "%s %s:%d: Debug: " fmt "\n", \
+                        TPM_MODULE_NAME, __FILE__, __LINE__, ## __VA_ARGS__)
+#else
+#define debug(fmt, ...)
+#endif
+#define info(fmt, ...)  printk(KERN_INFO "%s %s:%d: Info: " fmt "\n", \
+                        TPM_MODULE_NAME, __FILE__, __LINE__, ## __VA_ARGS__)
+#define error(fmt, ...) printk(KERN_ERR "%s %s:%d: Error: " fmt "\n", \
+                        TPM_MODULE_NAME, __FILE__, __LINE__, ## __VA_ARGS__)
+#define alert(fmt, ...) printk(KERN_ALERT "%s %s:%d: Alert: " fmt "\n", \
+                        TPM_MODULE_NAME, __FILE__, __LINE__, ## __VA_ARGS__)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mario Strasser <mast@gmx.net>");
+MODULE_DESCRIPTION("Trusted Platform Module (TPM) Emulator");
+MODULE_SUPPORTED_DEVICE(TPM_DEVICE_ID);
+
+/* module parameters */
+char *tpmd_socket_name = TPM_SOCKET_NAME;
+module_param(tpmd_socket_name, charp, 0444);
+MODULE_PARM_DESC(tpmd_socket_name, " Sets the name of the TPM daemon socket.");
+
+/* TPM lock */
+static struct semaphore tpm_mutex;
+
+/* TPM command response */
+static struct {
+  uint8_t *data;
+  uint32_t size;
+} tpm_response;
+
+/* module state */
+static uint32_t module_state;
+static struct socket *tpmd_sock;
+static struct sockaddr_un addr;
+
+static int tpmd_connect(char *socket_name)
+{
+  int res;
+  res = sock_create(PF_UNIX, SOCK_STREAM, 0, &tpmd_sock);
+  if (res != 0) {
+    error("sock_create() failed: %d\n", res);
+    tpmd_sock = NULL;
+    return res;
+  }
+  addr.sun_family = AF_UNIX;
+  strncpy(addr.sun_path, socket_name, sizeof(addr.sun_path));
+  res = tpmd_sock->ops->connect(tpmd_sock, 
+    (struct sockaddr*)&addr, sizeof(struct sockaddr_un), 0);
+  if (res != 0) {
+    error("sock_connect() failed: %d\n", res);
+    tpmd_sock->ops->release(tpmd_sock);
+    tpmd_sock = NULL;
+    return res;
+  }
+  return 0;
+}
+
+static void tpmd_disconnect(void)
+{
+  if (tpmd_sock != NULL) tpmd_sock->ops->release(tpmd_sock);
+  tpmd_sock = NULL;
+}
+
+static int tpmd_handle_command(const uint8_t *in, uint32_t in_size)
+{
+  int res;
+  mm_segment_t oldmm;
+  struct msghdr msg;
+  struct iovec iov;
+  /* send command to tpmd */
+  memset(&msg, 0, sizeof(msg));
+  iov.iov_base = (void*)in;
+  iov.iov_len = in_size;
+  msg.msg_iov = &iov;
+  msg.msg_iovlen = 1;
+  res = sock_sendmsg(tpmd_sock, &msg, in_size);
+  if (res < 0) {
+    error("sock_sendmsg() failed: %d\n", res);
+    return res;
+  }
+  /* receive response from tpmd */
+  tpm_response.size = TPM_CMD_BUF_SIZE;
+  tpm_response.data = kmalloc(tpm_response.size, GFP_KERNEL);
+  if (tpm_response.data == NULL) return -1;
+  memset(&msg, 0, sizeof(msg));
+  iov.iov_base = (void*)tpm_response.data;
+  iov.iov_len = tpm_response.size;
+  msg.msg_iov = &iov;
+  msg.msg_iovlen = 1;
+  oldmm = get_fs();
+  set_fs(KERNEL_DS);
+  res = sock_recvmsg(tpmd_sock, &msg, tpm_response.size, 0);
+  set_fs(oldmm);
+  if (res < 0) {
+    error("sock_recvmsg() failed: %d\n", res);
+    tpm_response.data = NULL;
+    return res;
+  }
+  tpm_response.size = res;
+  return 0;
+}
+
+static int tpm_open(struct inode *inode, struct file *file)
+{
+  int res;
+  debug("%s()", __FUNCTION__);
+  if (test_and_set_bit(TPM_STATE_IS_OPEN, (void*)&module_state)) return -EBUSY;
+  down(&tpm_mutex);
+  res = tpmd_connect(tpmd_socket_name);
+  up(&tpm_mutex);
+  if (res != 0) {
+    clear_bit(TPM_STATE_IS_OPEN, (void*)&module_state);
+    return -EIO;
+  }
+  return 0;
+}
+
+static int tpm_release(struct inode *inode, struct file *file)
+{
+  debug("%s()", __FUNCTION__);
+  down(&tpm_mutex);
+  if (tpm_response.data != NULL) {
+    kfree(tpm_response.data);
+    tpm_response.data = NULL;
+  }
+  tpmd_disconnect();
+  up(&tpm_mutex);
+  clear_bit(TPM_STATE_IS_OPEN, (void*)&module_state);
+  return 0;
+}
+
+static ssize_t tpm_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+  debug("%s(%zd)", __FUNCTION__, count);
+  down(&tpm_mutex);
+  if (tpm_response.data != NULL) {
+    count = min(count, (size_t)tpm_response.size - (size_t)*ppos);
+    count -= copy_to_user(buf, &tpm_response.data[*ppos], count);
+    *ppos += count;
+    if ((size_t)tpm_response.size == (size_t)*ppos) {
+      kfree(tpm_response.data);
+      tpm_response.data = NULL;
+    }
+  } else {
+    count = 0;
+  }
+  up(&tpm_mutex);
+  return count;
+}
+
+static ssize_t tpm_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+{
+  debug("%s(%zd)", __FUNCTION__, count);
+  down(&tpm_mutex);
+  *ppos = 0;
+  if (tpm_response.data != NULL) {
+    kfree(tpm_response.data);
+    tpm_response.data = NULL;
+  }
+  if (tpmd_handle_command(buf, count) != 0) { 
+    count = -EILSEQ;
+    tpm_response.data = NULL;
+  }
+  up(&tpm_mutex);
+  return count;
+}
+
+#define TPMIOC_CANCEL   _IO('T', 0x00)
+#define TPMIOC_TRANSMIT _IO('T', 0x01)
+
+static int tpm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+  debug("%s(%d, %p)", __FUNCTION__, cmd, (char*)arg);
+  if (cmd == TPMIOC_TRANSMIT) {
+    uint32_t count = ntohl(*(uint32_t*)(arg + 2));
+    down(&tpm_mutex);
+    if (tpm_response.data != NULL) {
+      kfree(tpm_response.data);
+      tpm_response.data = NULL;
+    }
+    if (tpmd_handle_command((char*)arg, count) == 0) {
+      tpm_response.size -= copy_to_user((char*)arg, tpm_response.data, tpm_response.size);
+      kfree(tpm_response.data);
+      tpm_response.data = NULL;
+    } else {
+      tpm_response.size = 0;
+      tpm_response.data = NULL;
+    }
+    up(&tpm_mutex);
+    return tpm_response.size;
+  }
+  return -1;
+}
+
+struct file_operations fops = {
+  .owner   = THIS_MODULE,
+  .open    = tpm_open,
+  .release = tpm_release,
+  .read    = tpm_read,
+  .write   = tpm_write,
+  .ioctl   = tpm_ioctl,
+};
+
+static struct miscdevice tpm_dev = {
+  .minor      = TPM_DEVICE_MINOR, 
+  .name       = TPM_DEVICE_ID, 
+  .fops       = &fops,
+};
+
+int __init init_tpm_module(void)
+{
+  int res = misc_register(&tpm_dev);
+  if (res != 0) {
+    error("misc_register() failed for minor %d\n", TPM_DEVICE_MINOR);
+    return res;
+  }
+  /* initialize variables */
+  sema_init(&tpm_mutex, 1);
+  module_state = 0;
+  tpm_response.data = NULL;
+  tpm_response.size = 0;
+  tpmd_sock = NULL;
+  return 0;
+}
+
+void __exit cleanup_tpm_module(void)
+{
+  misc_deregister(&tpm_dev);
+  tpmd_disconnect();
+  if (tpm_response.data != NULL) kfree(tpm_response.data);
+}
+
+module_init(init_tpm_module);
+module_exit(cleanup_tpm_module);
+
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
new file mode 100644
index 0000000..94bb440
--- /dev/null
+++ b/drivers/char/tty_io.c
@@ -0,0 +1,3154 @@
+/*
+ *  linux/drivers/char/tty_io.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures.  Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time.  Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c).  This
+ * makes for cleaner and more compact code.  -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling.  No delays, but all
+ * other bits should be there.
+ *	-- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * 	-- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ *	-- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ *	-- mj@k332.feld.cvut.cz, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ *      -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ *	-- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote tty_init_dev and tty_release_dev to eliminate races.
+ *	-- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ *      -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context.  Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc()
+ *			 -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+
+#include <linux/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+
+#undef TTY_DEBUG_HANGUP
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct ktermios tty_std_termios = {	/* for the benefit of tty drivers  */
+	.c_iflag = ICRNL | IXON,
+	.c_oflag = OPOST | ONLCR,
+	.c_cflag = B38400 | CS8 | CREAD | HUPCL,
+	.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+		   ECHOCTL | ECHOKE | IEXTEN,
+	.c_cc = INIT_C_CC,
+	.c_ispeed = 38400,
+	.c_ospeed = 38400
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+   could do with some rationalisation such as pulling the tty proc function
+   into this file */
+
+LIST_HEAD(tty_drivers);			/* linked list of tty drivers */
+
+/* Mutex to protect creating and releasing a tty. This is shared with
+   vt.c for deeply disgusting hack reasons */
+DEFINE_MUTEX(tty_mutex);
+EXPORT_SYMBOL(tty_mutex);
+
+static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ssize_t redirected_tty_write(struct file *, const char __user *,
+							size_t, loff_t *);
+static unsigned int tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#else
+#define tty_compat_ioctl NULL
+#endif
+static int tty_fasync(int fd, struct file *filp, int on);
+static void release_tty(struct tty_struct *tty, int idx);
+static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+
+/**
+ *	alloc_tty_struct	-	allocate a tty object
+ *
+ *	Return a new empty tty structure. The data fields have not
+ *	been initialized in any way but has been zeroed
+ *
+ *	Locking: none
+ */
+
+struct tty_struct *alloc_tty_struct(void)
+{
+	return kzalloc(sizeof(struct tty_struct), GFP_KERNEL);
+}
+
+/**
+ *	free_tty_struct		-	free a disused tty
+ *	@tty: tty struct to free
+ *
+ *	Free the write buffers, tty queue and tty memory itself.
+ *
+ *	Locking: none. Must be called after tty is definitely unused
+ */
+
+void free_tty_struct(struct tty_struct *tty)
+{
+	kfree(tty->write_buf);
+	tty_buffer_free_all(tty);
+	kfree(tty);
+}
+
+#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
+
+/**
+ *	tty_name	-	return tty naming
+ *	@tty: tty structure
+ *	@buf: buffer for output
+ *
+ *	Convert a tty structure into a name. The name reflects the kernel
+ *	naming policy and if udev is in use may not reflect user space
+ *
+ *	Locking: none
+ */
+
+char *tty_name(struct tty_struct *tty, char *buf)
+{
+	if (!tty) /* Hmm.  NULL pointer.  That's fun. */
+		strcpy(buf, "NULL tty");
+	else
+		strcpy(buf, tty->name);
+	return buf;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+			      const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (!tty) {
+		printk(KERN_WARNING
+			"null TTY for (%d:%d) in %s\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+	if (tty->magic != TTY_MAGIC) {
+		printk(KERN_WARNING
+			"bad magic number for tty struct (%d:%d) in %s\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+	struct list_head *p;
+	int count = 0;
+
+	file_list_lock();
+	list_for_each(p, &tty->tty_files) {
+		count++;
+	}
+	file_list_unlock();
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_SLAVE &&
+	    tty->link && tty->link->count)
+		count++;
+	if (tty->count != count) {
+		printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
+				    "!= #fd's(%d) in %s\n",
+		       tty->name, tty->count, count, routine);
+		return count;
+	}
+#endif
+	return 0;
+}
+
+/**
+ *	get_tty_driver		-	find device of a tty
+ *	@dev_t: device identifier
+ *	@index: returns the index of the tty
+ *
+ *	This routine returns a tty driver structure, given a device number
+ *	and also passes back the index number.
+ *
+ *	Locking: caller must hold tty_mutex
+ */
+
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+	struct tty_driver *p;
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		dev_t base = MKDEV(p->major, p->minor_start);
+		if (device < base || device >= base + p->num)
+			continue;
+		*index = device - base;
+		return tty_driver_kref_get(p);
+	}
+	return NULL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ *	tty_find_polling_driver	-	find device of a polled tty
+ *	@name: name string to match
+ *	@line: pointer to resulting tty line nr
+ *
+ *	This routine returns a tty driver structure, given a name
+ *	and the condition that the tty driver is capable of polled
+ *	operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+	struct tty_driver *p, *res = NULL;
+	int tty_line = 0;
+	int len;
+	char *str, *stp;
+
+	for (str = name; *str; str++)
+		if ((*str >= '0' && *str <= '9') || *str == ',')
+			break;
+	if (!*str)
+		return NULL;
+
+	len = str - name;
+	tty_line = simple_strtoul(str, &str, 10);
+
+	mutex_lock(&tty_mutex);
+	/* Search through the tty devices to look for a match */
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		if (strncmp(name, p->name, len) != 0)
+			continue;
+		stp = str;
+		if (*stp == ',')
+			stp++;
+		if (*stp == '\0')
+			stp = NULL;
+
+		if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+		    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
+			res = tty_driver_kref_get(p);
+			*line = tty_line;
+			break;
+		}
+	}
+	mutex_unlock(&tty_mutex);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
+/**
+ *	tty_check_change	-	check for POSIX terminal changes
+ *	@tty: tty to check
+ *
+ *	If we try to write to, or set the state of, a terminal and we're
+ *	not in the foreground, send a SIGTTOU.  If the signal is blocked or
+ *	ignored, go ahead and perform the operation.  (POSIX 7.2)
+ *
+ *	Locking: ctrl_lock
+ */
+
+int tty_check_change(struct tty_struct *tty)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	if (current->signal->tty != tty)
+		return 0;
+
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+
+	if (!tty->pgrp) {
+		printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
+		goto out_unlock;
+	}
+	if (task_pgrp(current) == tty->pgrp)
+		goto out_unlock;
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+	if (is_ignored(SIGTTOU))
+		goto out;
+	if (is_current_pgrp_orphaned()) {
+		ret = -EIO;
+		goto out;
+	}
+	kill_pgrp(task_pgrp(current), SIGTTOU, 1);
+	set_thread_flag(TIF_SIGPENDING);
+	ret = -ERESTARTSYS;
+out:
+	return ret;
+out_unlock:
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+	return ret;
+}
+
+EXPORT_SYMBOL(tty_check_change);
+
+static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+{
+	return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+}
+
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file *file,
+				     unsigned int cmd, unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static const struct file_operations tty_fops = {
+	.llseek		= no_llseek,
+	.read		= tty_read,
+	.write		= tty_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+};
+
+static const struct file_operations console_fops = {
+	.llseek		= no_llseek,
+	.read		= tty_read,
+	.write		= redirected_tty_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+};
+
+static const struct file_operations hung_up_tty_fops = {
+	.llseek		= no_llseek,
+	.read		= hung_up_tty_read,
+	.write		= hung_up_tty_write,
+	.poll		= hung_up_tty_poll,
+	.unlocked_ioctl	= hung_up_tty_ioctl,
+	.compat_ioctl	= hung_up_tty_compat_ioctl,
+	.release	= tty_release,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+/**
+ *	tty_wakeup	-	request more data
+ *	@tty: terminal
+ *
+ *	Internal and external helper for wakeups of tty. This function
+ *	informs the line discipline if present that the driver is ready
+ *	to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+	struct tty_ldisc *ld;
+
+	if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+		ld = tty_ldisc_ref(tty);
+		if (ld) {
+			if (ld->ops->write_wakeup)
+				ld->ops->write_wakeup(tty);
+			tty_ldisc_deref(ld);
+		}
+	}
+	wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ *	do_tty_hangup		-	actual handler for hangup events
+ *	@work: tty device
+ *
+ *	This can be called by the "eventd" kernel thread.  That is process
+ *	synchronous but doesn't hold any locks, so we need to make sure we
+ *	have the appropriate locks for what we're doing.
+ *
+ *	The hangup event clears any pending redirections onto the hung up
+ *	device. It ensures future writes will error and it does the needed
+ *	line discipline hangup and signal delivery. The tty object itself
+ *	remains intact.
+ *
+ *	Locking:
+ *		BKL
+ *		  redirect lock for undoing redirection
+ *		  file list lock for manipulating list of ttys
+ *		  tty_ldisc_lock from called functions
+ *		  termios_mutex resetting termios data
+ *		  tasklist_lock to walk task list for hangup event
+ *		    ->siglock to protect ->signal/->sighand
+ */
+static void do_tty_hangup(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+	struct file *cons_filp = NULL;
+	struct file *filp, *f = NULL;
+	struct task_struct *p;
+	int    closecount = 0, n;
+	unsigned long flags;
+	int refs = 0;
+
+	if (!tty)
+		return;
+
+
+	spin_lock(&redirect_lock);
+	if (redirect && redirect->private_data == tty) {
+		f = redirect;
+		redirect = NULL;
+	}
+	spin_unlock(&redirect_lock);
+
+	/* inuse_filps is protected by the single kernel lock */
+	lock_kernel();
+	check_tty_count(tty, "do_tty_hangup");
+
+	file_list_lock();
+	/* This breaks for file handles being sent over AF_UNIX sockets ? */
+	list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
+		if (filp->f_op->write == redirected_tty_write)
+			cons_filp = filp;
+		if (filp->f_op->write != tty_write)
+			continue;
+		closecount++;
+		tty_fasync(-1, filp, 0);	/* can't block */
+		filp->f_op = &hung_up_tty_fops;
+	}
+	file_list_unlock();
+
+	tty_ldisc_hangup(tty);
+
+	read_lock(&tasklist_lock);
+	if (tty->session) {
+		do_each_pid_task(tty->session, PIDTYPE_SID, p) {
+			spin_lock_irq(&p->sighand->siglock);
+			if (p->signal->tty == tty) {
+				p->signal->tty = NULL;
+				/* We defer the dereferences outside fo
+				   the tasklist lock */
+				refs++;
+			}
+			if (!p->signal->leader) {
+				spin_unlock_irq(&p->sighand->siglock);
+				continue;
+			}
+			__group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+			__group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+			put_pid(p->signal->tty_old_pgrp);  /* A noop */
+			spin_lock_irqsave(&tty->ctrl_lock, flags);
+			if (tty->pgrp)
+				p->signal->tty_old_pgrp = get_pid(tty->pgrp);
+			spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+			spin_unlock_irq(&p->sighand->siglock);
+		} while_each_pid_task(tty->session, PIDTYPE_SID, p);
+	}
+	read_unlock(&tasklist_lock);
+
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	clear_bit(TTY_THROTTLED, &tty->flags);
+	clear_bit(TTY_PUSH, &tty->flags);
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	put_pid(tty->session);
+	put_pid(tty->pgrp);
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	tty->ctrl_status = 0;
+	set_bit(TTY_HUPPED, &tty->flags);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+	/* Account for the p->signal references we killed */
+	while (refs--)
+		tty_kref_put(tty);
+
+	/*
+	 * If one of the devices matches a console pointer, we
+	 * cannot just call hangup() because that will cause
+	 * tty->count and state->count to go out of sync.
+	 * So we just call close() the right number of times.
+	 */
+	if (cons_filp) {
+		if (tty->ops->close)
+			for (n = 0; n < closecount; n++)
+				tty->ops->close(tty, cons_filp);
+	} else if (tty->ops->hangup)
+		(tty->ops->hangup)(tty);
+	/*
+	 * We don't want to have driver/ldisc interactions beyond
+	 * the ones we did here. The driver layer expects no
+	 * calls after ->hangup() from the ldisc side. However we
+	 * can't yet guarantee all that.
+	 */
+	set_bit(TTY_HUPPED, &tty->flags);
+	tty_ldisc_enable(tty);
+	unlock_kernel();
+	if (f)
+		fput(f);
+}
+
+/**
+ *	tty_hangup		-	trigger a hangup event
+ *	@tty: tty to hangup
+ *
+ *	A carrier loss (virtual or otherwise) has occurred on this like
+ *	schedule a hangup sequence to run after this event.
+ */
+
+void tty_hangup(struct tty_struct *tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+	char	buf[64];
+	printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
+#endif
+	schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+/**
+ *	tty_vhangup		-	process vhangup
+ *	@tty: tty to hangup
+ *
+ *	The user has asked via system call for the terminal to be hung up.
+ *	We do this synchronously so that when the syscall returns the process
+ *	is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup(struct tty_struct *tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+	char	buf[64];
+
+	printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
+#endif
+	do_tty_hangup(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_vhangup);
+
+/**
+ *	tty_vhangup_self	-	process vhangup for own ctty
+ *
+ *	Perform a vhangup on the current controlling tty
+ */
+
+void tty_vhangup_self(void)
+{
+	struct tty_struct *tty;
+
+	tty = get_current_tty();
+	if (tty) {
+		tty_vhangup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/**
+ *	tty_hung_up_p		-	was tty hung up
+ *	@filp: file pointer of tty
+ *
+ *	Return true if the tty has been subject to a vhangup or a carrier
+ *	loss
+ */
+
+int tty_hung_up_p(struct file *filp)
+{
+	return (filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+static void session_clear_tty(struct pid *session)
+{
+	struct task_struct *p;
+	do_each_pid_task(session, PIDTYPE_SID, p) {
+		proc_clear_tty(p);
+	} while_each_pid_task(session, PIDTYPE_SID, p);
+}
+
+/**
+ *	disassociate_ctty	-	disconnect controlling tty
+ *	@on_exit: true if exiting so need to "hang up" the session
+ *
+ *	This function is typically called only by the session leader, when
+ *	it wants to disassociate itself from its controlling tty.
+ *
+ *	It performs the following functions:
+ * 	(1)  Sends a SIGHUP and SIGCONT to the foreground process group
+ * 	(2)  Clears the tty from being controlling the session
+ * 	(3)  Clears the controlling tty for all processes in the
+ * 		session group.
+ *
+ *	The argument on_exit is set to 1 if called when a process is
+ *	exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ *
+ *	Locking:
+ *		BKL is taken for hysterical raisins
+ *		  tty_mutex is taken to protect tty
+ *		  ->siglock is taken to protect ->signal/->sighand
+ *		  tasklist_lock is taken to walk process list for sessions
+ *		    ->siglock is taken to protect ->signal/->sighand
+ */
+
+void disassociate_ctty(int on_exit)
+{
+	struct tty_struct *tty;
+	struct pid *tty_pgrp = NULL;
+
+	if (!current->signal->leader)
+		return;
+
+	tty = get_current_tty();
+	if (tty) {
+		tty_pgrp = get_pid(tty->pgrp);
+		lock_kernel();
+		if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
+			tty_vhangup(tty);
+		unlock_kernel();
+		tty_kref_put(tty);
+	} else if (on_exit) {
+		struct pid *old_pgrp;
+		spin_lock_irq(&current->sighand->siglock);
+		old_pgrp = current->signal->tty_old_pgrp;
+		current->signal->tty_old_pgrp = NULL;
+		spin_unlock_irq(&current->sighand->siglock);
+		if (old_pgrp) {
+			kill_pgrp(old_pgrp, SIGHUP, on_exit);
+			kill_pgrp(old_pgrp, SIGCONT, on_exit);
+			put_pid(old_pgrp);
+		}
+		return;
+	}
+	if (tty_pgrp) {
+		kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+		if (!on_exit)
+			kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+		put_pid(tty_pgrp);
+	}
+
+	spin_lock_irq(&current->sighand->siglock);
+	put_pid(current->signal->tty_old_pgrp);
+	current->signal->tty_old_pgrp = NULL;
+	spin_unlock_irq(&current->sighand->siglock);
+
+	tty = get_current_tty();
+	if (tty) {
+		unsigned long flags;
+		spin_lock_irqsave(&tty->ctrl_lock, flags);
+		put_pid(tty->session);
+		put_pid(tty->pgrp);
+		tty->session = NULL;
+		tty->pgrp = NULL;
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		tty_kref_put(tty);
+	} else {
+#ifdef TTY_DEBUG_HANGUP
+		printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
+		       " = NULL", tty);
+#endif
+	}
+
+	/* Now clear signal->tty under the lock */
+	read_lock(&tasklist_lock);
+	session_clear_tty(task_session(current));
+	read_unlock(&tasklist_lock);
+}
+
+/**
+ *
+ *	no_tty	- Ensure the current process does not have a controlling tty
+ */
+void no_tty(void)
+{
+	struct task_struct *tsk = current;
+	lock_kernel();
+	disassociate_ctty(0);
+	unlock_kernel();
+	proc_clear_tty(tsk);
+}
+
+
+/**
+ *	stop_tty	-	propagate flow control
+ *	@tty: tty to stop
+ *
+ *	Perform flow control to the driver. For PTY/TTY pairs we
+ *	must also propagate the TIOCKPKT status. May be called
+ *	on an already stopped device and will not re-call the driver
+ *	method.
+ *
+ *	This functionality is used by both the line disciplines for
+ *	halting incoming flow and by the driver. It may therefore be
+ *	called from any context, may be under the tty atomic_write_lock
+ *	but not always.
+ *
+ *	Locking:
+ *		Uses the tty control lock internally
+ */
+
+void stop_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	if (tty->stopped) {
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		return;
+	}
+	tty->stopped = 1;
+	if (tty->link && tty->link->packet) {
+		tty->ctrl_status &= ~TIOCPKT_START;
+		tty->ctrl_status |= TIOCPKT_STOP;
+		wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+	}
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+	if (tty->ops->stop)
+		(tty->ops->stop)(tty);
+}
+
+EXPORT_SYMBOL(stop_tty);
+
+/**
+ *	start_tty	-	propagate flow control
+ *	@tty: tty to start
+ *
+ *	Start a tty that has been stopped if at all possible. Perform
+ *	any necessary wakeups and propagate the TIOCPKT status. If this
+ *	is the tty was previous stopped and is being started then the
+ *	driver start method is invoked and the line discipline woken.
+ *
+ *	Locking:
+ *		ctrl_lock
+ */
+
+void start_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	if (!tty->stopped || tty->flow_stopped) {
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		return;
+	}
+	tty->stopped = 0;
+	if (tty->link && tty->link->packet) {
+		tty->ctrl_status &= ~TIOCPKT_STOP;
+		tty->ctrl_status |= TIOCPKT_START;
+		wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+	}
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+	if (tty->ops->start)
+		(tty->ops->start)(tty);
+	/* If we have a running line discipline it may need kicking */
+	tty_wakeup(tty);
+}
+
+EXPORT_SYMBOL(start_tty);
+
+/**
+ *	tty_read	-	read method for tty device files
+ *	@file: pointer to tty file
+ *	@buf: user buffer
+ *	@count: size of user buffer
+ *	@ppos: unused
+ *
+ *	Perform the read system call function on this terminal device. Checks
+ *	for hung up devices before calling the line discipline method.
+ *
+ *	Locking:
+ *		Locks the line discipline internally while needed. Multiple
+ *	read calls may be outstanding in parallel.
+ */
+
+static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+			loff_t *ppos)
+{
+	int i;
+	struct tty_struct *tty;
+	struct inode *inode;
+	struct tty_ldisc *ld;
+
+	tty = (struct tty_struct *)file->private_data;
+	inode = file->f_path.dentry->d_inode;
+	if (tty_paranoia_check(tty, inode, "tty_read"))
+		return -EIO;
+	if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+		return -EIO;
+
+	/* We want to wait for the line discipline to sort out in this
+	   situation */
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld->ops->read)
+		i = (ld->ops->read)(tty, file, buf, count);
+	else
+		i = -EIO;
+	tty_ldisc_deref(ld);
+	if (i > 0)
+		inode->i_atime = current_fs_time(inode->i_sb);
+	return i;
+}
+
+void tty_write_unlock(struct tty_struct *tty)
+{
+	mutex_unlock(&tty->atomic_write_lock);
+	wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+}
+
+int tty_write_lock(struct tty_struct *tty, int ndelay)
+{
+	if (!mutex_trylock(&tty->atomic_write_lock)) {
+		if (ndelay)
+			return -EAGAIN;
+		if (mutex_lock_interruptible(&tty->atomic_write_lock))
+			return -ERESTARTSYS;
+	}
+	return 0;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+	ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+	struct tty_struct *tty,
+	struct file *file,
+	const char __user *buf,
+	size_t count)
+{
+	ssize_t ret, written = 0;
+	unsigned int chunk;
+
+	ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * We chunk up writes into a temporary buffer. This
+	 * simplifies low-level drivers immensely, since they
+	 * don't have locking issues and user mode accesses.
+	 *
+	 * But if TTY_NO_WRITE_SPLIT is set, we should use a
+	 * big chunk-size..
+	 *
+	 * The default chunk-size is 2kB, because the NTTY
+	 * layer has problems with bigger chunks. It will
+	 * claim to be able to handle more characters than
+	 * it actually does.
+	 *
+	 * FIXME: This can probably go away now except that 64K chunks
+	 * are too likely to fail unless switched to vmalloc...
+	 */
+	chunk = 2048;
+	if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+		chunk = 65536;
+	if (count < chunk)
+		chunk = count;
+
+	/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
+	if (tty->write_cnt < chunk) {
+		unsigned char *buf_chunk;
+
+		if (chunk < 1024)
+			chunk = 1024;
+
+		buf_chunk = kmalloc(chunk, GFP_KERNEL);
+		if (!buf_chunk) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		kfree(tty->write_buf);
+		tty->write_cnt = chunk;
+		tty->write_buf = buf_chunk;
+	}
+
+	/* Do the write .. */
+	for (;;) {
+		size_t size = count;
+		if (size > chunk)
+			size = chunk;
+		ret = -EFAULT;
+		if (copy_from_user(tty->write_buf, buf, size))
+			break;
+		ret = write(tty, file, tty->write_buf, size);
+		if (ret <= 0)
+			break;
+		written += ret;
+		buf += ret;
+		count -= ret;
+		if (!count)
+			break;
+		ret = -ERESTARTSYS;
+		if (signal_pending(current))
+			break;
+		cond_resched();
+	}
+	if (written) {
+		struct inode *inode = file->f_path.dentry->d_inode;
+		inode->i_mtime = current_fs_time(inode->i_sb);
+		ret = written;
+	}
+out:
+	tty_write_unlock(tty);
+	return ret;
+}
+
+/**
+ * tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
+ *
+ * This is used for messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ *
+ * We must still hold the BKL and test the CLOSING flag for the moment.
+ */
+
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+	if (tty) {
+		mutex_lock(&tty->atomic_write_lock);
+		lock_kernel();
+		if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
+			unlock_kernel();
+			tty->ops->write(tty, msg, strlen(msg));
+		} else
+			unlock_kernel();
+		tty_write_unlock(tty);
+	}
+	return;
+}
+
+
+/**
+ *	tty_write		-	write method for tty device file
+ *	@file: tty file pointer
+ *	@buf: user data to write
+ *	@count: bytes to write
+ *	@ppos: unused
+ *
+ *	Write data to a tty device via the line discipline.
+ *
+ *	Locking:
+ *		Locks the line discipline as required
+ *		Writes to the tty driver are serialized by the atomic_write_lock
+ *	and are then processed in chunks to the device. The line discipline
+ *	write method will not be invoked in parallel for each device.
+ */
+
+static ssize_t tty_write(struct file *file, const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	struct tty_struct *tty;
+	struct inode *inode = file->f_path.dentry->d_inode;
+	ssize_t ret;
+	struct tty_ldisc *ld;
+
+	tty = (struct tty_struct *)file->private_data;
+	if (tty_paranoia_check(tty, inode, "tty_write"))
+		return -EIO;
+	if (!tty || !tty->ops->write ||
+		(test_bit(TTY_IO_ERROR, &tty->flags)))
+			return -EIO;
+	/* Short term debug to catch buggy drivers */
+	if (tty->ops->write_room == NULL)
+		printk(KERN_ERR "tty driver %s lacks a write_room method.\n",
+			tty->driver->name);
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld->ops->write)
+		ret = -EIO;
+	else
+		ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+ssize_t redirected_tty_write(struct file *file, const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	struct file *p = NULL;
+
+	spin_lock(&redirect_lock);
+	if (redirect) {
+		get_file(redirect);
+		p = redirect;
+	}
+	spin_unlock(&redirect_lock);
+
+	if (p) {
+		ssize_t res;
+		res = vfs_write(p, buf, count, &p->f_pos);
+		fput(p);
+		return res;
+	}
+	return tty_write(file, buf, count, ppos);
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+/**
+ *	pty_line_name	-	generate name for a pty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 6 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	int i = index + driver->name_base;
+	/* ->name is initialized to "ttyp", but "tty" is expected */
+	sprintf(p, "%s%c%x",
+		driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+		ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+/**
+ *	tty_line_name	-	generate name for a tty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 7 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static void tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	sprintf(p, "%s%d", driver->name, index + driver->name_base);
+}
+
+/**
+ *	tty_driver_lookup_tty() - find an existing tty, if any
+ *	@driver: the driver for the tty
+ *	@idx:	 the minor number
+ *
+ *	Return the tty, if found or ERR_PTR() otherwise.
+ *
+ *	Locking: tty_mutex must be held. If tty is found, the mutex must
+ *	be held until the 'fast-open' is also done. Will change once we
+ *	have refcounting in the driver and per driver locking
+ */
+static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+		struct inode *inode, int idx)
+{
+	struct tty_struct *tty;
+
+	if (driver->ops->lookup)
+		return driver->ops->lookup(driver, inode, idx);
+
+	tty = driver->ttys[idx];
+	return tty;
+}
+
+/**
+ *	tty_init_termios	-  helper for termios setup
+ *	@tty: the tty to set up
+ *
+ *	Initialise the termios structures for this tty. Thus runs under
+ *	the tty_mutex currently so we can be relaxed about ordering.
+ */
+
+int tty_init_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+
+	tp = tty->driver->termios[idx];
+	if (tp == NULL) {
+		tp = kzalloc(sizeof(struct ktermios[2]), GFP_KERNEL);
+		if (tp == NULL)
+			return -ENOMEM;
+		memcpy(tp, &tty->driver->init_termios,
+						sizeof(struct ktermios));
+		tty->driver->termios[idx] = tp;
+	}
+	tty->termios = tp;
+	tty->termios_locked = tp + 1;
+
+	/* Compatibility until drivers always set this */
+	tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
+	tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tty_init_termios);
+
+/**
+ *	tty_driver_install_tty() - install a tty entry in the driver
+ *	@driver: the driver for the tty
+ *	@tty: the tty
+ *
+ *	Install a tty object into the driver tables. The tty->index field
+ *	will be set by the time this is called. This method is responsible
+ *	for ensuring any need additional structures are allocated and
+ *	configured.
+ *
+ *	Locking: tty_mutex for now
+ */
+static int tty_driver_install_tty(struct tty_driver *driver,
+						struct tty_struct *tty)
+{
+	int idx = tty->index;
+	int ret;
+
+	if (driver->ops->install) {
+		lock_kernel();
+		ret = driver->ops->install(driver, tty);
+		unlock_kernel();
+		return ret;
+	}
+
+	if (tty_init_termios(tty) == 0) {
+		lock_kernel();
+		tty_driver_kref_get(driver);
+		tty->count++;
+		driver->ttys[idx] = tty;
+		unlock_kernel();
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/**
+ *	tty_driver_remove_tty() - remove a tty from the driver tables
+ *	@driver: the driver for the tty
+ *	@idx:	 the minor number
+ *
+ *	Remvoe a tty object from the driver tables. The tty->index field
+ *	will be set by the time this is called.
+ *
+ *	Locking: tty_mutex for now
+ */
+static void tty_driver_remove_tty(struct tty_driver *driver,
+						struct tty_struct *tty)
+{
+	if (driver->ops->remove)
+		driver->ops->remove(driver, tty);
+	else
+		driver->ttys[tty->index] = NULL;
+}
+
+/*
+ * 	tty_reopen()	- fast re-open of an open tty
+ * 	@tty	- the tty to open
+ *
+ *	Return 0 on success, -errno on error.
+ *
+ *	Locking: tty_mutex must be held from the time the tty was found
+ *		 till this open completes.
+ */
+static int tty_reopen(struct tty_struct *tty)
+{
+	struct tty_driver *driver = tty->driver;
+
+	if (test_bit(TTY_CLOSING, &tty->flags))
+		return -EIO;
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY &&
+	    driver->subtype == PTY_TYPE_MASTER) {
+		/*
+		 * special case for PTY masters: only one open permitted,
+		 * and the slave side open count is incremented as well.
+		 */
+		if (tty->count)
+			return -EIO;
+
+		tty->link->count++;
+	}
+	tty->count++;
+	tty->driver = driver; /* N.B. why do this every time?? */
+
+	mutex_lock(&tty->ldisc_mutex);
+	WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
+	mutex_unlock(&tty->ldisc_mutex);
+
+	return 0;
+}
+
+/**
+ *	tty_init_dev		-	initialise a tty device
+ *	@driver: tty driver we are opening a device on
+ *	@idx: device index
+ *	@ret_tty: returned tty structure
+ *	@first_ok: ok to open a new device (used by ptmx)
+ *
+ *	Prepare a tty device. This may not be a "new" clean device but
+ *	could also be an active device. The pty drivers require special
+ *	handling because of this.
+ *
+ *	Locking:
+ *		The function is called under the tty_mutex, which
+ *	protects us from the tty struct or driver itself going away.
+ *
+ *	On exit the tty device has the line discipline attached and
+ *	a reference count of 1. If a pair was created for pty/tty use
+ *	and the other was a pty master then it too has a reference count of 1.
+ *
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open.  The new code protects the open with a mutex, so it's
+ * really quite straightforward.  The mutex locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ */
+
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
+								int first_ok)
+{
+	struct tty_struct *tty;
+	int retval;
+
+	lock_kernel();
+	/* Check if pty master is being opened multiple times */
+	if (driver->subtype == PTY_TYPE_MASTER &&
+		(driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) {
+		unlock_kernel();
+		return ERR_PTR(-EIO);
+	}
+	unlock_kernel();
+
+	/*
+	 * First time open is complex, especially for PTY devices.
+	 * This code guarantees that either everything succeeds and the
+	 * TTY is ready for operation, or else the table slots are vacated
+	 * and the allocated memory released.  (Except that the termios
+	 * and locked termios may be retained.)
+	 */
+
+	if (!try_module_get(driver->owner))
+		return ERR_PTR(-ENODEV);
+
+	tty = alloc_tty_struct();
+	if (!tty)
+		goto fail_no_mem;
+	initialize_tty_struct(tty, driver, idx);
+
+	retval = tty_driver_install_tty(driver, tty);
+	if (retval < 0) {
+		free_tty_struct(tty);
+		module_put(driver->owner);
+		return ERR_PTR(retval);
+	}
+
+	/*
+	 * Structures all installed ... call the ldisc open routines.
+	 * If we fail here just call release_tty to clean up.  No need
+	 * to decrement the use counts, as release_tty doesn't care.
+	 */
+	retval = tty_ldisc_setup(tty, tty->link);
+	if (retval)
+		goto release_mem_out;
+	return tty;
+
+fail_no_mem:
+	module_put(driver->owner);
+	return ERR_PTR(-ENOMEM);
+
+	/* call the tty release_tty routine to clean out this slot */
+release_mem_out:
+	if (printk_ratelimit())
+		printk(KERN_INFO "tty_init_dev: ldisc open failed, "
+				 "clearing slot %d\n", idx);
+	lock_kernel();
+	release_tty(tty, idx);
+	unlock_kernel();
+	return ERR_PTR(retval);
+}
+
+void tty_free_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+	/* Kill this flag and push into drivers for locking etc */
+	if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+		/* FIXME: Locking on ->termios array */
+		tp = tty->termios;
+		tty->driver->termios[idx] = NULL;
+		kfree(tp);
+	}
+}
+EXPORT_SYMBOL(tty_free_termios);
+
+void tty_shutdown(struct tty_struct *tty)
+{
+	tty_driver_remove_tty(tty->driver, tty);
+	tty_free_termios(tty);
+}
+EXPORT_SYMBOL(tty_shutdown);
+
+/**
+ *	release_one_tty		-	release tty structure memory
+ *	@kref: kref of tty we are obliterating
+ *
+ *	Releases memory associated with a tty structure, and clears out the
+ *	driver table slots. This function is called when a device is no longer
+ *	in use. It also gets called when setup of a device fails.
+ *
+ *	Locking:
+ *		tty_mutex - sometimes only
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *
+ *	This method gets called from a work queue so that the driver private
+ *	cleanup ops can sleep (needed for USB at least)
+ */
+static void release_one_tty(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+	struct tty_driver *driver = tty->driver;
+
+	if (tty->ops->cleanup)
+		tty->ops->cleanup(tty);
+
+	tty->magic = 0;
+	tty_driver_kref_put(driver);
+	module_put(driver->owner);
+
+	file_list_lock();
+	list_del_init(&tty->tty_files);
+	file_list_unlock();
+
+	put_pid(tty->pgrp);
+	put_pid(tty->session);
+	free_tty_struct(tty);
+}
+
+static void queue_release_one_tty(struct kref *kref)
+{
+	struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
+
+	if (tty->ops->shutdown)
+		tty->ops->shutdown(tty);
+	else
+		tty_shutdown(tty);
+
+	/* The hangup queue is now free so we can reuse it rather than
+	   waste a chunk of memory for each port */
+	INIT_WORK(&tty->hangup_work, release_one_tty);
+	schedule_work(&tty->hangup_work);
+}
+
+/**
+ *	tty_kref_put		-	release a tty kref
+ *	@tty: tty device
+ *
+ *	Release a reference to a tty device and if need be let the kref
+ *	layer destruct the object for us
+ */
+
+void tty_kref_put(struct tty_struct *tty)
+{
+	if (tty)
+		kref_put(&tty->kref, queue_release_one_tty);
+}
+EXPORT_SYMBOL(tty_kref_put);
+
+/**
+ *	release_tty		-	release tty structure memory
+ *
+ *	Release both @tty and a possible linked partner (think pty pair),
+ *	and decrement the refcount of the backing module.
+ *
+ *	Locking:
+ *		tty_mutex - sometimes only
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *		FIXME: should we require tty_mutex is held here ??
+ *
+ */
+static void release_tty(struct tty_struct *tty, int idx)
+{
+	/* This should always be true but check for the moment */
+	WARN_ON(tty->index != idx);
+
+	if (tty->link)
+		tty_kref_put(tty->link);
+	tty_kref_put(tty);
+}
+
+/**
+ *	tty_release		-	vfs callback for close
+ *	@inode: inode of tty
+ *	@filp: file pointer for handle to tty
+ *
+ *	Called the last time each file handle is closed that references
+ *	this tty. There may however be several such references.
+ *
+ *	Locking:
+ *		Takes bkl. See tty_release_dev
+ *
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+
+int tty_release(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty, *o_tty;
+	int	pty_master, tty_closing, o_tty_closing, do_sleep;
+	int	devpts;
+	int	idx;
+	char	buf[64];
+
+	tty = (struct tty_struct *)filp->private_data;
+	if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+		return 0;
+
+	lock_kernel();
+	check_tty_count(tty, "tty_release_dev");
+
+	tty_fasync(-1, filp, 0);
+
+	idx = tty->index;
+	pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+		      tty->driver->subtype == PTY_TYPE_MASTER);
+	devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
+	o_tty = tty->link;
+
+#ifdef TTY_PARANOIA_CHECK
+	if (idx < 0 || idx >= tty->driver->num) {
+		printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
+				  "free (%s)\n", tty->name);
+		unlock_kernel();
+		return 0;
+	}
+	if (!devpts) {
+		if (tty != tty->driver->ttys[idx]) {
+			unlock_kernel();
+			printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
+			       "for (%s)\n", idx, tty->name);
+			return 0;
+		}
+		if (tty->termios != tty->driver->termios[idx]) {
+			unlock_kernel();
+			printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
+			       "for (%s)\n",
+			       idx, tty->name);
+			return 0;
+		}
+	}
+#endif
+
+#ifdef TTY_DEBUG_HANGUP
+	printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
+	       tty_name(tty, buf), tty->count);
+#endif
+
+#ifdef TTY_PARANOIA_CHECK
+	if (tty->driver->other &&
+	     !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+		if (o_tty != tty->driver->other->ttys[idx]) {
+			unlock_kernel();
+			printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
+					  "not o_tty for (%s)\n",
+			       idx, tty->name);
+			return 0 ;
+		}
+		if (o_tty->termios != tty->driver->other->termios[idx]) {
+			unlock_kernel();
+			printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
+					  "not o_termios for (%s)\n",
+			       idx, tty->name);
+			return 0;
+		}
+		if (o_tty->link != tty) {
+			unlock_kernel();
+			printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
+			return 0;
+		}
+	}
+#endif
+	if (tty->ops->close)
+		tty->ops->close(tty, filp);
+
+	unlock_kernel();
+	/*
+	 * Sanity check: if tty->count is going to zero, there shouldn't be
+	 * any waiters on tty->read_wait or tty->write_wait.  We test the
+	 * wait queues and kick everyone out _before_ actually starting to
+	 * close.  This ensures that we won't block while releasing the tty
+	 * structure.
+	 *
+	 * The test for the o_tty closing is necessary, since the master and
+	 * slave sides may close in any order.  If the slave side closes out
+	 * first, its count will be one, since the master side holds an open.
+	 * Thus this test wouldn't be triggered at the time the slave closes,
+	 * so we do it now.
+	 *
+	 * Note that it's possible for the tty to be opened again while we're
+	 * flushing out waiters.  By recalculating the closing flags before
+	 * each iteration we avoid any problems.
+	 */
+	while (1) {
+		/* Guard against races with tty->count changes elsewhere and
+		   opens on /dev/tty */
+
+		mutex_lock(&tty_mutex);
+		lock_kernel();
+		tty_closing = tty->count <= 1;
+		o_tty_closing = o_tty &&
+			(o_tty->count <= (pty_master ? 1 : 0));
+		do_sleep = 0;
+
+		if (tty_closing) {
+			if (waitqueue_active(&tty->read_wait)) {
+				wake_up_poll(&tty->read_wait, POLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&tty->write_wait)) {
+				wake_up_poll(&tty->write_wait, POLLOUT);
+				do_sleep++;
+			}
+		}
+		if (o_tty_closing) {
+			if (waitqueue_active(&o_tty->read_wait)) {
+				wake_up_poll(&o_tty->read_wait, POLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&o_tty->write_wait)) {
+				wake_up_poll(&o_tty->write_wait, POLLOUT);
+				do_sleep++;
+			}
+		}
+		if (!do_sleep)
+			break;
+
+		printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
+				    "active!\n", tty_name(tty, buf));
+		unlock_kernel();
+		mutex_unlock(&tty_mutex);
+		schedule();
+	}
+
+	/*
+	 * The closing flags are now consistent with the open counts on
+	 * both sides, and we've completed the last operation that could
+	 * block, so it's safe to proceed with closing.
+	 */
+	if (pty_master) {
+		if (--o_tty->count < 0) {
+			printk(KERN_WARNING "tty_release_dev: bad pty slave count "
+					    "(%d) for %s\n",
+			       o_tty->count, tty_name(o_tty, buf));
+			o_tty->count = 0;
+		}
+	}
+	if (--tty->count < 0) {
+		printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
+		       tty->count, tty_name(tty, buf));
+		tty->count = 0;
+	}
+
+	/*
+	 * We've decremented tty->count, so we need to remove this file
+	 * descriptor off the tty->tty_files list; this serves two
+	 * purposes:
+	 *  - check_tty_count sees the correct number of file descriptors
+	 *    associated with this tty.
+	 *  - do_tty_hangup no longer sees this file descriptor as
+	 *    something that needs to be handled for hangups.
+	 */
+	file_kill(filp);
+	filp->private_data = NULL;
+
+	/*
+	 * Perform some housekeeping before deciding whether to return.
+	 *
+	 * Set the TTY_CLOSING flag if this was the last open.  In the
+	 * case of a pty we may have to wait around for the other side
+	 * to close, and TTY_CLOSING makes sure we can't be reopened.
+	 */
+	if (tty_closing)
+		set_bit(TTY_CLOSING, &tty->flags);
+	if (o_tty_closing)
+		set_bit(TTY_CLOSING, &o_tty->flags);
+
+	/*
+	 * If _either_ side is closing, make sure there aren't any
+	 * processes that still think tty or o_tty is their controlling
+	 * tty.
+	 */
+	if (tty_closing || o_tty_closing) {
+		read_lock(&tasklist_lock);
+		session_clear_tty(tty->session);
+		if (o_tty)
+			session_clear_tty(o_tty->session);
+		read_unlock(&tasklist_lock);
+	}
+
+	mutex_unlock(&tty_mutex);
+
+	/* check whether both sides are closing ... */
+	if (!tty_closing || (o_tty && !o_tty_closing)) {
+		unlock_kernel();
+		return 0;
+	}
+
+#ifdef TTY_DEBUG_HANGUP
+	printk(KERN_DEBUG "freeing tty structure...");
+#endif
+	/*
+	 * Ask the line discipline code to release its structures
+	 */
+	tty_ldisc_release(tty, o_tty);
+	/*
+	 * The release_tty function takes care of the details of clearing
+	 * the slots and preserving the termios structure.
+	 */
+	release_tty(tty, idx);
+
+	/* Make this pty number available for reallocation */
+	if (devpts)
+		devpts_kill_index(inode, idx);
+	unlock_kernel();
+	return 0;
+}
+
+/**
+ *	tty_open		-	open a tty device
+ *	@inode: inode of device file
+ *	@filp: file pointer to tty
+ *
+ *	tty_open and tty_release keep up the tty count that contains the
+ *	number of opens done on a tty. We cannot use the inode-count, as
+ *	different inodes might point to the same tty.
+ *
+ *	Open-counting is needed for pty masters, as well as for keeping
+ *	track of serial lines: DTR is dropped when the last close happens.
+ *	(This is not done solely through tty->count, now.  - Ted 1/27/92)
+ *
+ *	The termios state of a pty is reset on first open so that
+ *	settings don't persist across reuse.
+ *
+ *	Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ *		 tty->count should protect the rest.
+ *		 ->siglock protects ->signal/->sighand
+ */
+
+static int tty_open(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty = NULL;
+	int noctty, retval;
+	struct tty_driver *driver;
+	int index;
+	dev_t device = inode->i_rdev;
+	unsigned saved_flags = filp->f_flags;
+
+	nonseekable_open(inode, filp);
+
+retry_open:
+	noctty = filp->f_flags & O_NOCTTY;
+	index  = -1;
+	retval = 0;
+
+	mutex_lock(&tty_mutex);
+	lock_kernel();
+
+	if (device == MKDEV(TTYAUX_MAJOR, 0)) {
+		tty = get_current_tty();
+		if (!tty) {
+			unlock_kernel();
+			mutex_unlock(&tty_mutex);
+			return -ENXIO;
+		}
+		driver = tty_driver_kref_get(tty->driver);
+		index = tty->index;
+		filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+		/* noctty = 1; */
+		/* FIXME: Should we take a driver reference ? */
+		tty_kref_put(tty);
+		goto got_driver;
+	}
+#ifdef CONFIG_VT
+	if (device == MKDEV(TTY_MAJOR, 0)) {
+		extern struct tty_driver *console_driver;
+		driver = tty_driver_kref_get(console_driver);
+		index = fg_console;
+		noctty = 1;
+		goto got_driver;
+	}
+#endif
+	if (device == MKDEV(TTYAUX_MAJOR, 1)) {
+		struct tty_driver *console_driver = console_device(&index);
+		if (console_driver) {
+			driver = tty_driver_kref_get(console_driver);
+			if (driver) {
+				/* Don't let /dev/console block */
+				filp->f_flags |= O_NONBLOCK;
+				noctty = 1;
+				goto got_driver;
+			}
+		}
+		unlock_kernel();
+		mutex_unlock(&tty_mutex);
+		return -ENODEV;
+	}
+
+	driver = get_tty_driver(device, &index);
+	if (!driver) {
+		unlock_kernel();
+		mutex_unlock(&tty_mutex);
+		return -ENODEV;
+	}
+got_driver:
+	if (!tty) {
+		/* check whether we're reopening an existing tty */
+		tty = tty_driver_lookup_tty(driver, inode, index);
+
+		if (IS_ERR(tty)) {
+			unlock_kernel();
+			mutex_unlock(&tty_mutex);
+			return PTR_ERR(tty);
+		}
+	}
+
+	if (tty) {
+		retval = tty_reopen(tty);
+		if (retval)
+			tty = ERR_PTR(retval);
+	} else
+		tty = tty_init_dev(driver, index, 0);
+
+	mutex_unlock(&tty_mutex);
+	tty_driver_kref_put(driver);
+	if (IS_ERR(tty)) {
+		unlock_kernel();
+		return PTR_ERR(tty);
+	}
+
+	filp->private_data = tty;
+	file_move(filp, &tty->tty_files);
+	check_tty_count(tty, "tty_open");
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		noctty = 1;
+#ifdef TTY_DEBUG_HANGUP
+	printk(KERN_DEBUG "opening %s...", tty->name);
+#endif
+	if (!retval) {
+		if (tty->ops->open)
+			retval = tty->ops->open(tty, filp);
+		else
+			retval = -ENODEV;
+	}
+	filp->f_flags = saved_flags;
+
+	if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
+						!capable(CAP_SYS_ADMIN))
+		retval = -EBUSY;
+
+	if (retval) {
+#ifdef TTY_DEBUG_HANGUP
+		printk(KERN_DEBUG "error %d in opening %s...", retval,
+		       tty->name);
+#endif
+		tty_release(inode, filp);
+		if (retval != -ERESTARTSYS) {
+			unlock_kernel();
+			return retval;
+		}
+		if (signal_pending(current)) {
+			unlock_kernel();
+			return retval;
+		}
+		schedule();
+		/*
+		 * Need to reset f_op in case a hangup happened.
+		 */
+		if (filp->f_op == &hung_up_tty_fops)
+			filp->f_op = &tty_fops;
+		unlock_kernel();
+		goto retry_open;
+	}
+	unlock_kernel();
+
+
+	mutex_lock(&tty_mutex);
+	lock_kernel();
+	spin_lock_irq(&current->sighand->siglock);
+	if (!noctty &&
+	    current->signal->leader &&
+	    !current->signal->tty &&
+	    tty->session == NULL)
+		__proc_set_tty(current, tty);
+	spin_unlock_irq(&current->sighand->siglock);
+	unlock_kernel();
+	mutex_unlock(&tty_mutex);
+	return 0;
+}
+
+
+
+/**
+ *	tty_poll	-	check tty status
+ *	@filp: file being polled
+ *	@wait: poll wait structures to update
+ *
+ *	Call the line discipline polling method to obtain the poll
+ *	status of the device.
+ *
+ *	Locking: locks called line discipline but ldisc poll method
+ *	may be re-entered freely by other callers.
+ */
+
+static unsigned int tty_poll(struct file *filp, poll_table *wait)
+{
+	struct tty_struct *tty;
+	struct tty_ldisc *ld;
+	int ret = 0;
+
+	tty = (struct tty_struct *)filp->private_data;
+	if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
+		return 0;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld->ops->poll)
+		ret = (ld->ops->poll)(tty, filp, wait);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+	struct tty_struct *tty;
+	unsigned long flags;
+	int retval = 0;
+
+	lock_kernel();
+	tty = (struct tty_struct *)filp->private_data;
+	if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
+		goto out;
+
+	retval = fasync_helper(fd, filp, on, &tty->fasync);
+	if (retval <= 0)
+		goto out;
+
+	if (on) {
+		enum pid_type type;
+		struct pid *pid;
+		if (!waitqueue_active(&tty->read_wait))
+			tty->minimum_to_wake = 1;
+		spin_lock_irqsave(&tty->ctrl_lock, flags);
+		if (tty->pgrp) {
+			pid = tty->pgrp;
+			type = PIDTYPE_PGID;
+		} else {
+			pid = task_pid(current);
+			type = PIDTYPE_PID;
+		}
+		get_pid(pid);
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		retval = __f_setown(filp, pid, type, 0);
+		put_pid(pid);
+		if (retval)
+			goto out;
+	} else {
+		if (!tty->fasync && !waitqueue_active(&tty->read_wait))
+			tty->minimum_to_wake = N_TTY_BUF_SIZE;
+	}
+	retval = 0;
+out:
+	unlock_kernel();
+	return retval;
+}
+
+/**
+ *	tiocsti			-	fake input character
+ *	@tty: tty to fake input into
+ *	@p: pointer to character
+ *
+ *	Fake input to a tty device. Does the necessary locking and
+ *	input management.
+ *
+ *	FIXME: does not honour flow control ??
+ *
+ *	Locking:
+ *		Called functions take tty_ldisc_lock
+ *		current->signal->tty check is safe without locks
+ *
+ *	FIXME: may race normal receive processing
+ */
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+	char ch, mbz = 0;
+	struct tty_ldisc *ld;
+
+	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (get_user(ch, p))
+		return -EFAULT;
+	tty_audit_tiocsti(tty, ch);
+	ld = tty_ldisc_ref_wait(tty);
+	ld->ops->receive_buf(tty, &ch, &mbz, 1);
+	tty_ldisc_deref(ld);
+	return 0;
+}
+
+/**
+ *	tiocgwinsz		-	implement window query ioctl
+ *	@tty; tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the kernel idea of the window size into the user buffer.
+ *
+ *	Locking: tty->termios_mutex is taken to ensure the winsize data
+ *		is consistent.
+ */
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	int err;
+
+	mutex_lock(&tty->termios_mutex);
+	err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
+	mutex_unlock(&tty->termios_mutex);
+
+	return err ? -EFAULT: 0;
+}
+
+/**
+ *	tty_do_resize		-	resize event
+ *	@tty: tty being resized
+ *	@rows: rows (character)
+ *	@cols: cols (character)
+ *
+ *	Update the termios variables and send the necessary signals to
+ *	peform a terminal resize correctly
+ */
+
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
+{
+	struct pid *pgrp;
+	unsigned long flags;
+
+	/* Lock the tty */
+	mutex_lock(&tty->termios_mutex);
+	if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+		goto done;
+	/* Get the PID values and reference them so we can
+	   avoid holding the tty ctrl lock while sending signals */
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	pgrp = get_pid(tty->pgrp);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+	if (pgrp)
+		kill_pgrp(pgrp, SIGWINCH, 1);
+	put_pid(pgrp);
+
+	tty->winsize = *ws;
+done:
+	mutex_unlock(&tty->termios_mutex);
+	return 0;
+}
+
+/**
+ *	tiocswinsz		-	implement window size set ioctl
+ *	@tty; tty side of tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the user idea of the window size to the kernel. Traditionally
+ *	this is just advisory information but for the Linux console it
+ *	actually has driver level meaning and triggers a VC resize.
+ *
+ *	Locking:
+ *		Driver dependant. The default do_resize method takes the
+ *	tty termios mutex and ctrl_lock. The console takes its own lock
+ *	then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	struct winsize tmp_ws;
+	if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+		return -EFAULT;
+
+	if (tty->ops->resize)
+		return tty->ops->resize(tty, &tmp_ws);
+	else
+		return tty_do_resize(tty, &tmp_ws);
+}
+
+/**
+ *	tioccons	-	allow admin to move logical console
+ *	@file: the file to become console
+ *
+ *	Allow the adminstrator to move the redirected console device
+ *
+ *	Locking: uses redirect_lock to guard the redirect information
+ */
+
+static int tioccons(struct file *file)
+{
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (file->f_op->write == redirected_tty_write) {
+		struct file *f;
+		spin_lock(&redirect_lock);
+		f = redirect;
+		redirect = NULL;
+		spin_unlock(&redirect_lock);
+		if (f)
+			fput(f);
+		return 0;
+	}
+	spin_lock(&redirect_lock);
+	if (redirect) {
+		spin_unlock(&redirect_lock);
+		return -EBUSY;
+	}
+	get_file(file);
+	redirect = file;
+	spin_unlock(&redirect_lock);
+	return 0;
+}
+
+/**
+ *	fionbio		-	non blocking ioctl
+ *	@file: file to set blocking value
+ *	@p: user parameter
+ *
+ *	Historical tty interfaces had a blocking control ioctl before
+ *	the generic functionality existed. This piece of history is preserved
+ *	in the expected tty API of posix OS's.
+ *
+ *	Locking: none, the open file handle ensures it won't go away.
+ */
+
+static int fionbio(struct file *file, int __user *p)
+{
+	int nonblock;
+
+	if (get_user(nonblock, p))
+		return -EFAULT;
+
+	spin_lock(&file->f_lock);
+	if (nonblock)
+		file->f_flags |= O_NONBLOCK;
+	else
+		file->f_flags &= ~O_NONBLOCK;
+	spin_unlock(&file->f_lock);
+	return 0;
+}
+
+/**
+ *	tiocsctty	-	set controlling tty
+ *	@tty: tty structure
+ *	@arg: user argument
+ *
+ *	This ioctl is used to manage job control. It permits a session
+ *	leader to set this tty as the controlling tty for the session.
+ *
+ *	Locking:
+ *		Takes tty_mutex() to protect tty instance
+ *		Takes tasklist_lock internally to walk sessions
+ *		Takes ->siglock() when updating signal->tty
+ */
+
+static int tiocsctty(struct tty_struct *tty, int arg)
+{
+	int ret = 0;
+	if (current->signal->leader && (task_session(current) == tty->session))
+		return ret;
+
+	mutex_lock(&tty_mutex);
+	/*
+	 * The process must be a session leader and
+	 * not have a controlling tty already.
+	 */
+	if (!current->signal->leader || current->signal->tty) {
+		ret = -EPERM;
+		goto unlock;
+	}
+
+	if (tty->session) {
+		/*
+		 * This tty is already the controlling
+		 * tty for another session group!
+		 */
+		if (arg == 1 && capable(CAP_SYS_ADMIN)) {
+			/*
+			 * Steal it away
+			 */
+			read_lock(&tasklist_lock);
+			session_clear_tty(tty->session);
+			read_unlock(&tasklist_lock);
+		} else {
+			ret = -EPERM;
+			goto unlock;
+		}
+	}
+	proc_set_tty(current, tty);
+unlock:
+	mutex_unlock(&tty_mutex);
+	return ret;
+}
+
+/**
+ *	tty_get_pgrp	-	return a ref counted pgrp pid
+ *	@tty: tty to read
+ *
+ *	Returns a refcounted instance of the pid struct for the process
+ *	group controlling the tty.
+ */
+
+struct pid *tty_get_pgrp(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct pid *pgrp;
+
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	pgrp = get_pid(tty->pgrp);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+	return pgrp;
+}
+EXPORT_SYMBOL_GPL(tty_get_pgrp);
+
+/**
+ *	tiocgpgrp		-	get process group
+ *	@tty: tty passed by user
+ *	@real_tty: tty side of the tty pased by the user if a pty else the tty
+ *	@p: returned pid
+ *
+ *	Obtain the process group of the tty. If there is no process group
+ *	return an error.
+ *
+ *	Locking: none. Reference to current->signal->tty is safe.
+ */
+
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+	struct pid *pid;
+	int ret;
+	/*
+	 * (tty == real_tty) is a cheap way of
+	 * testing if the tty is NOT a master pty.
+	 */
+	if (tty == real_tty && current->signal->tty != real_tty)
+		return -ENOTTY;
+	pid = tty_get_pgrp(real_tty);
+	ret =  put_user(pid_vnr(pid), p);
+	put_pid(pid);
+	return ret;
+}
+
+/**
+ *	tiocspgrp		-	attempt to set process group
+ *	@tty: tty passed by user
+ *	@real_tty: tty side device matching tty passed by user
+ *	@p: pid pointer
+ *
+ *	Set the process group of the tty to the session passed. Only
+ *	permitted where the tty session is our session.
+ *
+ *	Locking: RCU, ctrl lock
+ */
+
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+	struct pid *pgrp;
+	pid_t pgrp_nr;
+	int retval = tty_check_change(real_tty);
+	unsigned long flags;
+
+	if (retval == -EIO)
+		return -ENOTTY;
+	if (retval)
+		return retval;
+	if (!current->signal->tty ||
+	    (current->signal->tty != real_tty) ||
+	    (real_tty->session != task_session(current)))
+		return -ENOTTY;
+	if (get_user(pgrp_nr, p))
+		return -EFAULT;
+	if (pgrp_nr < 0)
+		return -EINVAL;
+	rcu_read_lock();
+	pgrp = find_vpid(pgrp_nr);
+	retval = -ESRCH;
+	if (!pgrp)
+		goto out_unlock;
+	retval = -EPERM;
+	if (session_of_pgrp(pgrp) != task_session(current))
+		goto out_unlock;
+	retval = 0;
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	put_pid(real_tty->pgrp);
+	real_tty->pgrp = get_pid(pgrp);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+out_unlock:
+	rcu_read_unlock();
+	return retval;
+}
+
+/**
+ *	tiocgsid		-	get session id
+ *	@tty: tty passed by user
+ *	@real_tty: tty side of the tty pased by the user if a pty else the tty
+ *	@p: pointer to returned session id
+ *
+ *	Obtain the session id of the tty. If there is no session
+ *	return an error.
+ *
+ *	Locking: none. Reference to current->signal->tty is safe.
+ */
+
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+	/*
+	 * (tty == real_tty) is a cheap way of
+	 * testing if the tty is NOT a master pty.
+	*/
+	if (tty == real_tty && current->signal->tty != real_tty)
+		return -ENOTTY;
+	if (!real_tty->session)
+		return -ENOTTY;
+	return put_user(pid_vnr(real_tty->session), p);
+}
+
+/**
+ *	tiocsetd	-	set line discipline
+ *	@tty: tty device
+ *	@p: pointer to user data
+ *
+ *	Set the line discipline according to user request.
+ *
+ *	Locking: see tty_set_ldisc, this function is just a helper
+ */
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+	int ldisc;
+	int ret;
+
+	if (get_user(ldisc, p))
+		return -EFAULT;
+
+	ret = tty_set_ldisc(tty, ldisc);
+
+	return ret;
+}
+
+/**
+ *	send_break	-	performed time break
+ *	@tty: device to break on
+ *	@duration: timeout in mS
+ *
+ *	Perform a timed break on hardware that lacks its own driver level
+ *	timed break functionality.
+ *
+ *	Locking:
+ *		atomic_write_lock serializes
+ *
+ */
+
+static int send_break(struct tty_struct *tty, unsigned int duration)
+{
+	int retval;
+
+	if (tty->ops->break_ctl == NULL)
+		return 0;
+
+	if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+		retval = tty->ops->break_ctl(tty, duration);
+	else {
+		/* Do the work ourselves */
+		if (tty_write_lock(tty, 0) < 0)
+			return -EINTR;
+		retval = tty->ops->break_ctl(tty, -1);
+		if (retval)
+			goto out;
+		if (!signal_pending(current))
+			msleep_interruptible(duration);
+		retval = tty->ops->break_ctl(tty, 0);
+out:
+		tty_write_unlock(tty);
+		if (signal_pending(current))
+			retval = -EINTR;
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmget		-	get modem status
+ *	@tty: tty device
+ *	@file: user file pointer
+ *	@p: pointer to result
+ *
+ *	Obtain the modem status bits from the tty driver if the feature
+ *	is supported. Return -EINVAL if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
+{
+	int retval = -EINVAL;
+
+	if (tty->ops->tiocmget) {
+		retval = tty->ops->tiocmget(tty, file);
+
+		if (retval >= 0)
+			retval = put_user(retval, p);
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmset		-	set modem status
+ *	@tty: tty device
+ *	@file: user file pointer
+ *	@cmd: command - clear bits, set bits or set all
+ *	@p: pointer to desired bits
+ *
+ *	Set the modem status bits from the tty driver if the feature
+ *	is supported. Return -EINVAL if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
+	     unsigned __user *p)
+{
+	int retval;
+	unsigned int set, clear, val;
+
+	if (tty->ops->tiocmset == NULL)
+		return -EINVAL;
+
+	retval = get_user(val, p);
+	if (retval)
+		return retval;
+	set = clear = 0;
+	switch (cmd) {
+	case TIOCMBIS:
+		set = val;
+		break;
+	case TIOCMBIC:
+		clear = val;
+		break;
+	case TIOCMSET:
+		set = val;
+		clear = ~val;
+		break;
+	}
+	set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP|TIOCM_CD|
+		TIOCM_RI|TIOCM_DSR|TIOCM_CTS;
+	clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP|TIOCM_CD|
+		TIOCM_RI|TIOCM_DSR|TIOCM_CTS;
+	return tty->ops->tiocmset(tty, file, set, clear);
+}
+
+struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		tty = tty->link;
+	return tty;
+}
+EXPORT_SYMBOL(tty_pair_get_tty);
+
+struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
+{
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+	    return tty;
+	return tty->link;
+}
+EXPORT_SYMBOL(tty_pair_get_pty);
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct tty_struct *tty, *real_tty;
+	void __user *p = (void __user *)arg;
+	int retval;
+	struct tty_ldisc *ld;
+	struct inode *inode = file->f_dentry->d_inode;
+
+	tty = (struct tty_struct *)file->private_data;
+	if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+		return -EINVAL;
+
+	real_tty = tty_pair_get_tty(tty);
+
+	/*
+	 * Factor out some common prep work
+	 */
+	switch (cmd) {
+	case TIOCSETD:
+	case TIOCSBRK:
+	case TIOCCBRK:
+	case TCSBRK:
+	case TCSBRKP:
+		retval = tty_check_change(tty);
+		if (retval)
+			return retval;
+		if (cmd != TIOCCBRK) {
+			tty_wait_until_sent(tty, 0);
+			if (signal_pending(current))
+				return -EINTR;
+		}
+		break;
+	}
+
+	/*
+	 *	Now do the stuff.
+	 */
+	switch (cmd) {
+	case TIOCSTI:
+		return tiocsti(tty, p);
+	case TIOCGWINSZ:
+		return tiocgwinsz(real_tty, p);
+	case TIOCSWINSZ:
+		return tiocswinsz(real_tty, p);
+	case TIOCCONS:
+		return real_tty != tty ? -EINVAL : tioccons(file);
+	case FIONBIO:
+		return fionbio(file, p);
+	case TIOCEXCL:
+		set_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCNXCL:
+		clear_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCNOTTY:
+		if (current->signal->tty != tty)
+			return -ENOTTY;
+		no_tty();
+		return 0;
+	case TIOCSCTTY:
+		return tiocsctty(tty, arg);
+	case TIOCGPGRP:
+		return tiocgpgrp(tty, real_tty, p);
+	case TIOCSPGRP:
+		return tiocspgrp(tty, real_tty, p);
+	case TIOCGSID:
+		return tiocgsid(tty, real_tty, p);
+	case TIOCGETD:
+		return put_user(tty->ldisc->ops->num, (int __user *)p);
+	case TIOCSETD:
+		return tiocsetd(tty, p);
+	/*
+	 * Break handling
+	 */
+	case TIOCSBRK:	/* Turn break on, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, -1);
+		return 0;
+	case TIOCCBRK:	/* Turn break off, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, 0);
+		return 0;
+	case TCSBRK:   /* SVID version: non-zero arg --> no break */
+		/* non-zero arg means wait for all output data
+		 * to be sent (performed above) but don't send break.
+		 * This is used by the tcdrain() termios function.
+		 */
+		if (!arg)
+			return send_break(tty, 250);
+		return 0;
+	case TCSBRKP:	/* support for POSIX tcsendbreak() */
+		return send_break(tty, arg ? arg*100 : 250);
+
+	case TIOCMGET:
+		return tty_tiocmget(tty, file, p);
+	case TIOCMSET:
+	case TIOCMBIC:
+	case TIOCMBIS:
+		return tty_tiocmset(tty, file, cmd, p);
+	case TCFLSH:
+		switch (arg) {
+		case TCIFLUSH:
+		case TCIOFLUSH:
+		/* flush tty buffer and allow ldisc to process ioctl */
+			tty_buffer_flush(tty);
+			break;
+		}
+		break;
+	}
+	if (tty->ops->ioctl) {
+		retval = (tty->ops->ioctl)(tty, file, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+	ld = tty_ldisc_ref_wait(tty);
+	retval = -EINVAL;
+	if (ld->ops->ioctl) {
+		retval = ld->ops->ioctl(tty, file, cmd, arg);
+		if (retval == -ENOIOCTLCMD)
+			retval = -EINVAL;
+	}
+	tty_ldisc_deref(ld);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct tty_struct *tty = file->private_data;
+	struct tty_ldisc *ld;
+	int retval = -ENOIOCTLCMD;
+
+	if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+		return -EINVAL;
+
+	if (tty->ops->compat_ioctl) {
+		retval = (tty->ops->compat_ioctl)(tty, file, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld->ops->compat_ioctl)
+		retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+	tty_ldisc_deref(ld);
+
+	return retval;
+}
+#endif
+
+/*
+ * This implements the "Secure Attention Key" ---  the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key".  Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal.  But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context.  This can
+ * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
+ */
+void __do_SAK(struct tty_struct *tty)
+{
+#ifdef TTY_SOFT_SAK
+	tty_hangup(tty);
+#else
+	struct task_struct *g, *p;
+	struct pid *session;
+	int		i;
+	struct file	*filp;
+	struct fdtable *fdt;
+
+	if (!tty)
+		return;
+	session = tty->session;
+
+	tty_ldisc_flush(tty);
+
+	tty_driver_flush_buffer(tty);
+
+	read_lock(&tasklist_lock);
+	/* Kill the entire session */
+	do_each_pid_task(session, PIDTYPE_SID, p) {
+		printk(KERN_NOTICE "SAK: killed process %d"
+			" (%s): task_session(p)==tty->session\n",
+			task_pid_nr(p), p->comm);
+		send_sig(SIGKILL, p, 1);
+	} while_each_pid_task(session, PIDTYPE_SID, p);
+	/* Now kill any processes that happen to have the
+	 * tty open.
+	 */
+	do_each_thread(g, p) {
+		if (p->signal->tty == tty) {
+			printk(KERN_NOTICE "SAK: killed process %d"
+			    " (%s): task_session(p)==tty->session\n",
+			    task_pid_nr(p), p->comm);
+			send_sig(SIGKILL, p, 1);
+			continue;
+		}
+		task_lock(p);
+		if (p->files) {
+			/*
+			 * We don't take a ref to the file, so we must
+			 * hold ->file_lock instead.
+			 */
+			spin_lock(&p->files->file_lock);
+			fdt = files_fdtable(p->files);
+			for (i = 0; i < fdt->max_fds; i++) {
+				filp = fcheck_files(p->files, i);
+				if (!filp)
+					continue;
+				if (filp->f_op->read == tty_read &&
+				    filp->private_data == tty) {
+					printk(KERN_NOTICE "SAK: killed process %d"
+					    " (%s): fd#%d opened to the tty\n",
+					    task_pid_nr(p), p->comm, i);
+					force_sig(SIGKILL, p);
+					break;
+				}
+			}
+			spin_unlock(&p->files->file_lock);
+		}
+		task_unlock(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+#endif
+}
+
+static void do_SAK_work(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, SAK_work);
+	__do_SAK(tty);
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+	if (!tty)
+		return;
+	schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/**
+ *	initialize_tty_struct
+ *	@tty: tty to initialize
+ *
+ *	This subroutine initializes a tty structure that has been newly
+ *	allocated.
+ *
+ *	Locking: none - tty in question must not be exposed at this point
+ */
+
+void initialize_tty_struct(struct tty_struct *tty,
+		struct tty_driver *driver, int idx)
+{
+	memset(tty, 0, sizeof(struct tty_struct));
+	kref_init(&tty->kref);
+	tty->magic = TTY_MAGIC;
+	tty_ldisc_init(tty);
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	tty->overrun_time = jiffies;
+	tty->buf.head = tty->buf.tail = NULL;
+	tty_buffer_init(tty);
+	mutex_init(&tty->termios_mutex);
+	mutex_init(&tty->ldisc_mutex);
+	init_waitqueue_head(&tty->write_wait);
+	init_waitqueue_head(&tty->read_wait);
+	INIT_WORK(&tty->hangup_work, do_tty_hangup);
+	mutex_init(&tty->atomic_read_lock);
+	mutex_init(&tty->atomic_write_lock);
+	mutex_init(&tty->output_lock);
+	mutex_init(&tty->echo_lock);
+	spin_lock_init(&tty->read_lock);
+	spin_lock_init(&tty->ctrl_lock);
+	INIT_LIST_HEAD(&tty->tty_files);
+	INIT_WORK(&tty->SAK_work, do_SAK_work);
+
+	tty->driver = driver;
+	tty->ops = driver->ops;
+	tty->index = idx;
+	tty_line_name(driver, idx, tty->name);
+}
+
+/**
+ *	tty_put_char	-	write one character to a tty
+ *	@tty: tty
+ *	@ch: character
+ *
+ *	Write one byte to the tty using the provided put_char method
+ *	if present. Returns the number of characters successfully output.
+ *
+ *	Note: the specific put_char operation in the driver layer may go
+ *	away soon. Don't call it directly, use this method
+ */
+
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	if (tty->ops->put_char)
+		return tty->ops->put_char(tty, ch);
+	return tty->ops->write(tty, &ch, 1);
+}
+EXPORT_SYMBOL_GPL(tty_put_char);
+
+struct class *tty_class;
+
+/**
+ *	tty_register_device - register a tty device
+ *	@driver: the tty driver that describes the tty device
+ *	@index: the index in the tty driver for this tty device
+ *	@device: a struct device that is associated with this tty device.
+ *		This field is optional, if there is no known struct device
+ *		for this tty device it can be set to NULL safely.
+ *
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
+ *
+ *	This call is required to be made to register an individual tty device
+ *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
+ *	that bit is not set, this function should not be called by a tty
+ *	driver.
+ *
+ *	Locking: ??
+ */
+
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+				   struct device *device)
+{
+	char name[64];
+	dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+	if (index >= driver->num) {
+		printk(KERN_ERR "Attempt to register invalid tty line number "
+		       " (%d).\n", index);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY)
+		pty_line_name(driver, index, name);
+	else
+		tty_line_name(driver, index, name);
+
+	return device_create(tty_class, device, dev, NULL, name);
+}
+EXPORT_SYMBOL(tty_register_device);
+
+/**
+ * 	tty_unregister_device - unregister a tty device
+ * 	@driver: the tty driver that describes the tty device
+ * 	@index: the index in the tty driver for this tty device
+ *
+ * 	If a tty device is registered with a call to tty_register_device() then
+ *	this function must be called when the tty device is gone.
+ *
+ *	Locking: ??
+ */
+
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+	device_destroy(tty_class,
+		MKDEV(driver->major, driver->minor_start) + index);
+}
+EXPORT_SYMBOL(tty_unregister_device);
+
+struct tty_driver *alloc_tty_driver(int lines)
+{
+	struct tty_driver *driver;
+
+	driver = kzalloc(sizeof(struct tty_driver), GFP_KERNEL);
+	if (driver) {
+		kref_init(&driver->kref);
+		driver->magic = TTY_DRIVER_MAGIC;
+		driver->num = lines;
+		/* later we'll move allocation of tables here */
+	}
+	return driver;
+}
+EXPORT_SYMBOL(alloc_tty_driver);
+
+static void destruct_tty_driver(struct kref *kref)
+{
+	struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
+	int i;
+	struct ktermios *tp;
+	void *p;
+
+	if (driver->flags & TTY_DRIVER_INSTALLED) {
+		/*
+		 * Free the termios and termios_locked structures because
+		 * we don't want to get memory leaks when modular tty
+		 * drivers are removed from the kernel.
+		 */
+		for (i = 0; i < driver->num; i++) {
+			tp = driver->termios[i];
+			if (tp) {
+				driver->termios[i] = NULL;
+				kfree(tp);
+			}
+			if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
+				tty_unregister_device(driver, i);
+		}
+		p = driver->ttys;
+		proc_tty_unregister_driver(driver);
+		driver->ttys = NULL;
+		driver->termios = NULL;
+		kfree(p);
+		cdev_del(&driver->cdev);
+	}
+	kfree(driver);
+}
+
+void tty_driver_kref_put(struct tty_driver *driver)
+{
+	kref_put(&driver->kref, destruct_tty_driver);
+}
+EXPORT_SYMBOL(tty_driver_kref_put);
+
+void tty_set_operations(struct tty_driver *driver,
+			const struct tty_operations *op)
+{
+	driver->ops = op;
+};
+EXPORT_SYMBOL(tty_set_operations);
+
+void put_tty_driver(struct tty_driver *d)
+{
+	tty_driver_kref_put(d);
+}
+EXPORT_SYMBOL(put_tty_driver);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+	int error;
+	int i;
+	dev_t dev;
+	void **p = NULL;
+
+	if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) {
+		p = kzalloc(driver->num * 2 * sizeof(void *), GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+	}
+
+	if (!driver->major) {
+		error = alloc_chrdev_region(&dev, driver->minor_start,
+						driver->num, driver->name);
+		if (!error) {
+			driver->major = MAJOR(dev);
+			driver->minor_start = MINOR(dev);
+		}
+	} else {
+		dev = MKDEV(driver->major, driver->minor_start);
+		error = register_chrdev_region(dev, driver->num, driver->name);
+	}
+	if (error < 0) {
+		kfree(p);
+		return error;
+	}
+
+	if (p) {
+		driver->ttys = (struct tty_struct **)p;
+		driver->termios = (struct ktermios **)(p + driver->num);
+	} else {
+		driver->ttys = NULL;
+		driver->termios = NULL;
+	}
+
+	cdev_init(&driver->cdev, &tty_fops);
+	driver->cdev.owner = driver->owner;
+	error = cdev_add(&driver->cdev, dev, driver->num);
+	if (error) {
+		unregister_chrdev_region(dev, driver->num);
+		driver->ttys = NULL;
+		driver->termios = NULL;
+		kfree(p);
+		return error;
+	}
+
+	mutex_lock(&tty_mutex);
+	list_add(&driver->tty_drivers, &tty_drivers);
+	mutex_unlock(&tty_mutex);
+
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
+		for (i = 0; i < driver->num; i++)
+		    tty_register_device(driver, i, NULL);
+	}
+	proc_tty_register_driver(driver);
+	driver->flags |= TTY_DRIVER_INSTALLED;
+	return 0;
+}
+
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+#if 0
+	/* FIXME */
+	if (driver->refcount)
+		return -EBUSY;
+#endif
+	unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+				driver->num);
+	mutex_lock(&tty_mutex);
+	list_del(&driver->tty_drivers);
+	mutex_unlock(&tty_mutex);
+	return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+dev_t tty_devnum(struct tty_struct *tty)
+{
+	return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+}
+EXPORT_SYMBOL(tty_devnum);
+
+void proc_clear_tty(struct task_struct *p)
+{
+	unsigned long flags;
+	struct tty_struct *tty;
+	spin_lock_irqsave(&p->sighand->siglock, flags);
+	tty = p->signal->tty;
+	p->signal->tty = NULL;
+	spin_unlock_irqrestore(&p->sighand->siglock, flags);
+	tty_kref_put(tty);
+}
+
+/* Called under the sighand lock */
+
+static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
+{
+	if (tty) {
+		unsigned long flags;
+		/* We should not have a session or pgrp to put here but.... */
+		spin_lock_irqsave(&tty->ctrl_lock, flags);
+		put_pid(tty->session);
+		put_pid(tty->pgrp);
+		tty->pgrp = get_pid(task_pgrp(tsk));
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		tty->session = get_pid(task_session(tsk));
+		if (tsk->signal->tty) {
+			printk(KERN_DEBUG "tty not NULL!!\n");
+			tty_kref_put(tsk->signal->tty);
+		}
+	}
+	put_pid(tsk->signal->tty_old_pgrp);
+	tsk->signal->tty = tty_kref_get(tty);
+	tsk->signal->tty_old_pgrp = NULL;
+}
+
+static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
+{
+	spin_lock_irq(&tsk->sighand->siglock);
+	__proc_set_tty(tsk, tty);
+	spin_unlock_irq(&tsk->sighand->siglock);
+}
+
+struct tty_struct *get_current_tty(void)
+{
+	struct tty_struct *tty;
+	unsigned long flags;
+
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	tty = tty_kref_get(current->signal->tty);
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+	return tty;
+}
+EXPORT_SYMBOL_GPL(get_current_tty);
+
+void tty_default_fops(struct file_operations *fops)
+{
+	*fops = tty_fops;
+}
+
+/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+	initcall_t *call;
+
+	/* Setup the default TTY line discipline. */
+	tty_ldisc_begin();
+
+	/*
+	 * set up the console device so that later boot sequences can
+	 * inform about problems etc..
+	 */
+	call = __con_initcall_start;
+	while (call < __con_initcall_end) {
+		(*call)();
+		call++;
+	}
+}
+
+static char *tty_devnode(struct device *dev, mode_t *mode)
+{
+	if (!mode)
+		return NULL;
+	if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
+	    dev->devt == MKDEV(TTYAUX_MAJOR, 2))
+		*mode = 0666;
+	return NULL;
+}
+
+static int __init tty_class_init(void)
+{
+	tty_class = class_create(THIS_MODULE, "tty");
+	if (IS_ERR(tty_class))
+		return PTR_ERR(tty_class);
+	tty_class->devnode = tty_devnode;
+	return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+
+static struct cdev tty_cdev, console_cdev;
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+int __init tty_init(void)
+{
+	cdev_init(&tty_cdev, &tty_fops);
+	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+		panic("Couldn't register /dev/tty driver\n");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL,
+			      "tty");
+
+	cdev_init(&console_cdev, &console_fops);
+	if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+		panic("Couldn't register /dev/console driver\n");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL,
+			      "console");
+
+#ifdef CONFIG_VT
+	vty_init(&console_fops);
+#endif
+	return 0;
+}
+
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9..ff15497 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,7 +41,11 @@
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
 #ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
-static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
+struct cpufreq_cpu_save_data {
+	char gov[CPUFREQ_NAME_LEN];
+	unsigned int max, min;
+};
+static DEFINE_PER_CPU(struct cpufreq_cpu_save_data, cpufreq_policy_save);
 #endif
 static DEFINE_SPINLOCK(cpufreq_driver_lock);
 
@@ -68,7 +72,7 @@
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
 
 #define lock_policy_rwsem(mode, cpu)					\
-static int lock_policy_rwsem_##mode					\
+int lock_policy_rwsem_##mode						\
 (int cpu)								\
 {									\
 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);		\
@@ -93,7 +97,7 @@
 	up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
 }
 
-static void unlock_policy_rwsem_write(int cpu)
+void unlock_policy_rwsem_write(int cpu)
 {
 	int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
 	BUG_ON(policy_cpu == -1);
@@ -688,12 +692,22 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	struct cpufreq_governor *gov;
 
-	gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+	gov = __find_governor(per_cpu(cpufreq_policy_save, cpu).gov);
 	if (gov) {
 		policy->governor = gov;
 		pr_debug("Restoring governor %s for cpu %d\n",
 		       policy->governor->name, cpu);
 	}
+	if (per_cpu(cpufreq_policy_save, cpu).min) {
+		policy->min = per_cpu(cpufreq_policy_save, cpu).min;
+		policy->user_policy.min = policy->min;
+	}
+	if (per_cpu(cpufreq_policy_save, cpu).max) {
+		policy->max = per_cpu(cpufreq_policy_save, cpu).max;
+		policy->user_policy.max = policy->max;
+	}
+	pr_debug("Restoring CPU%d min %d and max %d\n",
+		cpu, policy->min, policy->max);
 #endif
 
 	for_each_cpu(j, policy->cpus) {
@@ -1043,8 +1057,12 @@
 #ifdef CONFIG_SMP
 
 #ifdef CONFIG_HOTPLUG_CPU
-	strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
+	strncpy(per_cpu(cpufreq_policy_save, cpu).gov, data->governor->name,
 			CPUFREQ_NAME_LEN);
+	per_cpu(cpufreq_policy_save, cpu).min = data->min;
+	per_cpu(cpufreq_policy_save, cpu).max = data->max;
+	pr_debug("Saving CPU%d policy min %d and max %d\n",
+			cpu, data->min, data->max);
 #endif
 
 	/* if we have other CPUs still registered, we need to unlink them,
@@ -1068,8 +1086,12 @@
 				continue;
 			pr_debug("removing link for cpu %u\n", j);
 #ifdef CONFIG_HOTPLUG_CPU
-			strncpy(per_cpu(cpufreq_cpu_governor, j),
+			strncpy(per_cpu(cpufreq_policy_save, j).gov,
 				data->governor->name, CPUFREQ_NAME_LEN);
+			per_cpu(cpufreq_policy_save, j).min = data->min;
+			per_cpu(cpufreq_policy_save, j).max = data->max;
+			pr_debug("Saving CPU%d policy min %d and max %d\n",
+					j, data->min, data->max);
 #endif
 			cpu_sys_dev = get_cpu_sysdev(j);
 			kobj = &cpu_sys_dev->kobj;
@@ -1555,8 +1577,11 @@
 	for_each_present_cpu(cpu) {
 		if (cpu_online(cpu))
 			continue;
-		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
-			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
+		if (!strcmp(per_cpu(cpufreq_policy_save, cpu).gov,
+					governor->name))
+			strcpy(per_cpu(cpufreq_policy_save, cpu).gov, "\0");
+		per_cpu(cpufreq_policy_save, cpu).min = 0;
+		per_cpu(cpufreq_policy_save, cpu).max = 0;
 	}
 #endif
 
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 891360e..a175ae7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,6 +22,9 @@
 #include <linux/tick.h>
 #include <linux/ktime.h>
 #include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
 
 /*
  * dbs is used in this file as a shortform for demandbased switching
@@ -37,6 +40,7 @@
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 #define MIN_FREQUENCY_UP_THRESHOLD		(11)
 #define MAX_FREQUENCY_UP_THRESHOLD		(100)
+#define MIN_FREQUENCY_DOWN_DIFFERENTIAL		(1)
 
 /*
  * The polling frequency of this governor depends on the capability of
@@ -103,6 +107,10 @@
  */
 static DEFINE_MUTEX(dbs_mutex);
 
+static struct workqueue_struct *input_wq;
+
+static DEFINE_PER_CPU(struct work_struct, dbs_refresh_work);
+
 static struct dbs_tuners {
 	unsigned int sampling_rate;
 	unsigned int up_threshold;
@@ -252,6 +260,7 @@
 show_one(sampling_rate, sampling_rate);
 show_one(io_is_busy, io_is_busy);
 show_one(up_threshold, up_threshold);
+show_one(down_differential, down_differential);
 show_one(sampling_down_factor, sampling_down_factor);
 show_one(ignore_nice_load, ignore_nice);
 show_one(powersave_bias, powersave_bias);
@@ -296,6 +305,23 @@
 	return count;
 }
 
+static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
+		const char *buf, size_t count)
+{
+	unsigned int input;
+	int ret;
+	ret = sscanf(buf, "%u", &input);
+
+	if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
+			input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
+		return -EINVAL;
+	}
+
+	dbs_tuners_ins.down_differential = input;
+
+	return count;
+}
+
 static ssize_t store_sampling_down_factor(struct kobject *a,
 			struct attribute *b, const char *buf, size_t count)
 {
@@ -370,6 +396,7 @@
 define_one_global_rw(sampling_rate);
 define_one_global_rw(io_is_busy);
 define_one_global_rw(up_threshold);
+define_one_global_rw(down_differential);
 define_one_global_rw(sampling_down_factor);
 define_one_global_rw(ignore_nice_load);
 define_one_global_rw(powersave_bias);
@@ -378,6 +405,7 @@
 	&sampling_rate_min.attr,
 	&sampling_rate.attr,
 	&up_threshold.attr,
+	&down_differential.attr,
 	&sampling_down_factor.attr,
 	&ignore_nice_load.attr,
 	&powersave_bias.attr,
@@ -619,6 +647,89 @@
 	return 0;
 }
 
+static void dbs_refresh_callback(struct work_struct *unused)
+{
+	struct cpufreq_policy *policy;
+	struct cpu_dbs_info_s *this_dbs_info;
+	unsigned int cpu = smp_processor_id();
+
+	if (lock_policy_rwsem_write(cpu) < 0)
+		return;
+
+	this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+	policy = this_dbs_info->cur_policy;
+
+	if (policy->cur < policy->max) {
+		policy->cur = policy->max;
+
+		__cpufreq_driver_target(policy, policy->max,
+					CPUFREQ_RELATION_L);
+		this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
+				&this_dbs_info->prev_cpu_wall);
+	}
+	unlock_policy_rwsem_write(cpu);
+}
+
+static void dbs_input_event(struct input_handle *handle, unsigned int type,
+		unsigned int code, int value)
+{
+	int i;
+
+	for_each_online_cpu(i) {
+		queue_work_on(i, input_wq, &per_cpu(dbs_refresh_work, i));
+	}
+}
+
+static int dbs_input_connect(struct input_handler *handler,
+		struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "cpufreq";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void dbs_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id dbs_ids[] = {
+	{ .driver_info = 1 },
+	{ },
+};
+
+static struct input_handler dbs_input_handler = {
+	.event		= dbs_input_event,
+	.connect	= dbs_input_connect,
+	.disconnect	= dbs_input_disconnect,
+	.name		= "cpufreq_ond",
+	.id_table	= dbs_ids,
+};
+
 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 				   unsigned int event)
 {
@@ -678,6 +789,8 @@
 				    latency * LATENCY_MULTIPLIER);
 			dbs_tuners_ins.io_is_busy = should_io_be_busy();
 		}
+		if (!cpu)
+			rc = input_register_handler(&dbs_input_handler);
 		mutex_unlock(&dbs_mutex);
 
 		mutex_init(&this_dbs_info->timer_mutex);
@@ -690,6 +803,8 @@
 		mutex_lock(&dbs_mutex);
 		mutex_destroy(&this_dbs_info->timer_mutex);
 		dbs_enable--;
+		if (!cpu)
+			input_unregister_handler(&dbs_input_handler);
 		mutex_unlock(&dbs_mutex);
 		if (!dbs_enable)
 			sysfs_remove_group(cpufreq_global_kobject,
@@ -715,6 +830,7 @@
 {
 	cputime64_t wall;
 	u64 idle_time;
+	unsigned int i;
 	int cpu = get_cpu();
 
 	idle_time = get_cpu_idle_time_us(cpu, &wall);
@@ -736,12 +852,22 @@
 			MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
 	}
 
+	input_wq = create_workqueue("iewq");
+	if (!input_wq) {
+		printk(KERN_ERR "Failed to create iewq workqueue\n");
+		return -EFAULT;
+	}
+	for_each_possible_cpu(i) {
+		INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
+	}
+
 	return cpufreq_register_governor(&cpufreq_gov_ondemand);
 }
 
 static void __exit cpufreq_gov_dbs_exit(void)
 {
 	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+	destroy_workqueue(input_wq);
 }
 
 
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c315ec9..f6fba49 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,6 +350,7 @@
 		cpufreq_update_policy(cpu);
 		break;
 	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
 		cpufreq_stats_free_sysfs(cpu);
 		break;
 	case CPU_DEAD:
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e0b25de..8850516 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -292,4 +292,46 @@
 	  Select this to offload Samsung S5PV210 or S5PC110 from AES
 	  algorithms execution.
 
+config CRYPTO_DEV_QCE40
+	bool
+
+config CRYPTO_DEV_QCRYPTO
+	tristate "Qualcomm Crypto accelerator"
+	select CRYPTO_DES
+	select CRYPTO_ALGAPI
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	default n
+	help
+          This driver supports Qualcomm crypto acceleration.
+          To compile this driver as a module, choose M here: the
+          module will be called qcrypto.
+
+config CRYPTO_DEV_QCE
+	tristate "Qualcomm Crypto Engine (QCE) module"
+	select  CRYPTO_DEV_QCE40 if ARCH_MSM8960
+	default n
+	help
+          This driver supports Qualcomm Crypto Engine in MSM7x30 MSM8660
+	  MSM8x55 and MSM8960
+	  To compile this driver as a module, choose M here: the
+	  For MSM7x30 MSM8660 and MSM8x55 the module is called qce
+	  For MSM8960 the module is called qce40
+
+config CRYPTO_DEV_QCEDEV
+	tristate "QCEDEV Interface to CE module"
+	default n
+	help
+          This driver supports Qualcomm QCEDEV Crypto in MSM7x30 MSM8660, MSM8960.
+          This exposes the interface to the QCE hardware accelerator via IOCTLs
+	  To compile this driver as a module, choose M here: the
+	  module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+	tristate "OTA Crypto module"
+	help
+          This driver supports Qualcomm OTA Crypto in the FSM9xxx.
+	  To compile this driver as a module, choose M here: the
+	  module will be called ota_crypto.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53ea501..549a7b2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 0000000..61406b9
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+else
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+endif
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/inc/qce.h b/drivers/crypto/msm/inc/qce.h
new file mode 100644
index 0000000..7230036
--- /dev/null
+++ b/drivers/crypto/msm/inc/qce.h
@@ -0,0 +1,160 @@
+/* Qualcomm Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0x8000
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+};
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/inc/qce_ota.h b/drivers/crypto/msm/inc/qce_ota.h
new file mode 100644
index 0000000..f21bd0b
--- /dev/null
+++ b/drivers/crypto/msm/inc/qce_ota.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+#include <inc/qce.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/inc/qcedev.h b/drivers/crypto/msm/inc/qcedev.h
new file mode 100644
index 0000000..893251f
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcedev.h
@@ -0,0 +1,267 @@
+/* Qualcomm Crypto Engine driver QCEDEV API
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCEDEV__H
+#define __QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCEDEV_MAX_SHA_BLOCK_SIZE	64
+#define QCEDEV_MAX_BEARER	31
+#define QCEDEV_MAX_KEY_SIZE	64
+#define QCEDEV_MAX_IV_SIZE	32
+
+#define QCEDEV_MAX_BUFFERS      16
+#define QCEDEV_MAX_SHA_DIGEST	32
+
+#define QCEDEV_USE_PMEM		1
+#define QCEDEV_NO_PMEM		0
+
+#define QCEDEV_AES_KEY_128	16
+#define QCEDEV_AES_KEY_192	24
+#define QCEDEV_AES_KEY_256	32
+/**
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC:		Encrypt
+* @QCEDEV_OPER_DEC:		Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+*				user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+*				user. Key already set by an external processor.
+*/
+enum qcedev_oper_enum {
+  QCEDEV_OPER_DEC		= 0,
+  QCEDEV_OPER_ENC		= 1,
+  QCEDEV_OPER_DEC_NO_KEY	= 2,
+  QCEDEV_OPER_ENC_NO_KEY	= 3,
+  QCEDEV_OPER_LAST
+};
+
+/**
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES:		DES
+* @QCEDEV_ALG_3DES:		3DES
+* @QCEDEV_ALG_AES:		AES
+*/
+enum qcedev_cipher_alg_enum {
+	QCEDEV_ALG_DES		= 0,
+	QCEDEV_ALG_3DES		= 1,
+	QCEDEV_ALG_AES		= 2,
+	QCEDEV_ALG_LAST
+};
+
+/**
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC:		CBC
+* @QCEDEV_AES_MODE_ECB:		ECB
+* @QCEDEV_AES_MODE_CTR:		CTR
+* @QCEDEV_AES_MODE_XTS:		XTS
+* @QCEDEV_AES_MODE_CCM:		CCM
+* @QCEDEV_DES_MODE_CBC:		CBC
+* @QCEDEV_DES_MODE_ECB:		ECB
+*/
+enum qcedev_cipher_mode_enum {
+	QCEDEV_AES_MODE_CBC	= 0,
+	QCEDEV_AES_MODE_ECB	= 1,
+	QCEDEV_AES_MODE_CTR	= 2,
+	QCEDEV_AES_MODE_XTS	= 3,
+	QCEDEV_AES_MODE_CCM	= 4,
+	QCEDEV_DES_MODE_CBC	= 5,
+	QCEDEV_DES_MODE_ECB	= 6,
+	QCEDEV_AES_DES_MODE_LAST
+};
+
+/**
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+*/
+enum qcedev_sha_alg_enum {
+	QCEDEV_ALG_SHA1		= 0,
+	QCEDEV_ALG_SHA256	= 1,
+	QCEDEV_ALG_SHA1_HMAC	= 2,
+	QCEDEV_ALG_SHA256_HMAC	= 3,
+	QCEDEV_ALG_AES_CMAC	= 4,
+	QCEDEV_ALG_SHA_ALG_LAST
+};
+
+/**
+* struct buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
+struct	buf_info {
+	union{
+		uint32_t	offset;
+		uint8_t		*vaddr;
+	};
+	uint32_t	len;
+};
+
+/**
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
+struct	qcedev_vbuf_info {
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+struct	qcedev_sha_ctxt{
+	uint32_t		auth_data[4];
+	uint8_t			digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t		diglen;
+	uint8_t			trailing_buf[64];
+	uint32_t		trailing_buf_len;
+	uint8_t			first_blk;
+	uint8_t			last_blk;
+	uint8_t			authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+};
+
+/**
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
+struct	qcedev_pmem_info{
+	int		fd_src;
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	int		fd_dst;
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct	qcedev_cipher_op_req {
+	uint8_t				use_pmem;
+	union{
+		struct qcedev_pmem_info	pmem;
+		struct qcedev_vbuf_info	vbuf;
+	};
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				in_place_op;
+	uint8_t				enckey[QCEDEV_MAX_KEY_SIZE];
+	uint32_t			encklen;
+	uint8_t				iv[QCEDEV_MAX_IV_SIZE];
+	uint32_t			ivlen;
+	uint32_t			byteoffset;
+	enum qcedev_cipher_alg_enum	alg;
+	enum qcedev_cipher_mode_enum	mode;
+	enum qcedev_oper_enum		op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+* @ctxt (Reserved):		RESERVED: User should not modify this data.
+*/
+struct	qcedev_sha_op_req {
+	struct buf_info			data[QCEDEV_MAX_BUFFERS];
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t			diglen;
+	uint8_t				*authkey;
+	uint32_t			authklen;
+	enum qcedev_sha_alg_enum	alg;
+	struct qcedev_sha_ctxt		ctxt;
+};
+
+
+#define QCEDEV_IOC_MAGIC	0x87
+
+#define QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_cipher_op_req)
+#endif /* _QCEDEV__H */
diff --git a/drivers/crypto/msm/inc/qcryptohw_30.h b/drivers/crypto/msm/inc/qcryptohw_30.h
new file mode 100644
index 0000000..edbee71
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcryptohw_30.h
@@ -0,0 +1,308 @@
+/* Copyright (c)2009- 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+
+#define QCE_AUTH_REG_BYTE_COUNT 2
+#define CRYPTO_DATA_IN_REG			0x0
+#define CRYPTO_DATA_OUT_REG			0x10
+#define CRYPTO_STATUS_REG			0x20
+#define CRYPTO_CONFIG_REG			0x24
+#define CRYPTO_DEBUG_REG			0x28
+#define CRYPTO_REGISTER_LOCK_REG		0x2C
+#define CRYPTO_SEG_CFG_REG			0x30
+#define CRYPTO_ENCR_SEG_CFG_REG			0x34
+#define CRYPTO_AUTH_SEG_CFG_REG			0x38
+#define CRYPTO_SEG_SIZE_REG			0x3C
+#define CRYPTO_GOPROC_REG			0x40
+#define CRYPTO_ENGINES_AVAIL			0x44
+
+#define CRYPTO_DES_KEY0_REG			0x50
+#define CRYPTO_DES_KEY1_REG			0x54
+#define CRYPTO_DES_KEY2_REG			0x58
+#define CRYPTO_DES_KEY3_REG			0x5C
+#define CRYPTO_DES_KEY4_REG			0x60
+#define CRYPTO_DES_KEY5_REG			0x64
+
+#define CRYPTO_CNTR0_IV0_REG			0x70
+#define CRYPTO_CNTR1_IV1_REG			0x74
+#define CRYPTO_CNTR2_IV2_REG			0x78
+#define CRYPTO_CNTR3_IV3_REG			0x7C
+#define CRYPTO_CNTR_MASK_REG			0x80
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x90
+#define CRYPTO_AUTH_BYTECNT1_REG		0x94
+#define CRYPTO_AUTH_BYTECNT2_REG		0x98
+#define CRYPTO_AUTH_BYTECNT3_REG		0x9C
+
+#define CRYPTO_AUTH_IV0_REG			0x100
+#define CRYPTO_AUTH_IV1_REG			0x104
+#define CRYPTO_AUTH_IV2_REG			0x108
+#define CRYPTO_AUTH_IV3_REG			0x10C
+#define CRYPTO_AUTH_IV4_REG			0x110
+#define CRYPTO_AUTH_IV5_REG			0x114
+#define CRYPTO_AUTH_IV6_REG			0x118
+#define CRYPTO_AUTH_IV7_REG			0x11C
+#define CRYPTO_AUTH_IV8_REG			0x120
+#define CRYPTO_AUTH_IV9_REG			0x124
+#define CRYPTO_AUTH_IV10_REG			0x128
+#define CRYPTO_AUTH_IV11_REG			0x12C
+#define CRYPTO_AUTH_IV12_REG			0x130
+#define CRYPTO_AUTH_IV13_REG			0x134
+#define CRYPTO_AUTH_IV14_REG			0x138
+#define CRYPTO_AUTH_IV15_REG			0x13C
+
+#define CRYPTO_AES_RNDKEY0			0x200
+#define CRYPTO_AES_RNDKEY1			0x204
+#define CRYPTO_AES_RNDKEY2			0x208
+#define CRYPTO_AES_RNDKEY3			0x20C
+#define CRYPTO_AES_RNDKEY4			0x210
+#define CRYPTO_AES_RNDKEY5			0x214
+#define CRYPTO_AES_RNDKEY6			0x218
+#define CRYPTO_AES_RNDKEY7			0x21C
+#define CRYPTO_AES_RNDKEY8			0x220
+#define CRYPTO_AES_RNDKEY9			0x224
+#define CRYPTO_AES_RNDKEY10			0x228
+#define CRYPTO_AES_RNDKEY11			0x22c
+#define CRYPTO_AES_RNDKEY12			0x230
+#define CRYPTO_AES_RNDKEY13			0x234
+#define CRYPTO_AES_RNDKEY14			0x238
+#define CRYPTO_AES_RNDKEY15			0x23C
+#define CRYPTO_AES_RNDKEY16			0x240
+#define CRYPTO_AES_RNDKEY17			0x244
+#define CRYPTO_AES_RNDKEY18			0x248
+#define CRYPTO_AES_RNDKEY19			0x24C
+#define CRYPTO_AES_RNDKEY20			0x250
+#define CRYPTO_AES_RNDKEY21			0x254
+#define CRYPTO_AES_RNDKEY22			0x258
+#define CRYPTO_AES_RNDKEY23			0x25C
+#define CRYPTO_AES_RNDKEY24			0x260
+#define CRYPTO_AES_RNDKEY25			0x264
+#define CRYPTO_AES_RNDKEY26			0x268
+#define CRYPTO_AES_RNDKEY27			0x26C
+#define CRYPTO_AES_RNDKEY28			0x270
+#define CRYPTO_AES_RNDKEY29			0x274
+#define CRYPTO_AES_RNDKEY30			0x278
+#define CRYPTO_AES_RNDKEY31			0x27C
+#define CRYPTO_AES_RNDKEY32			0x280
+#define CRYPTO_AES_RNDKEY33			0x284
+#define CRYPTO_AES_RNDKEY34			0x288
+#define CRYPTO_AES_RNDKEY35			0x28c
+#define CRYPTO_AES_RNDKEY36			0x290
+#define CRYPTO_AES_RNDKEY37			0x294
+#define CRYPTO_AES_RNDKEY38			0x298
+#define CRYPTO_AES_RNDKEY39			0x29C
+#define CRYPTO_AES_RNDKEY40			0x2A0
+#define CRYPTO_AES_RNDKEY41			0x2A4
+#define CRYPTO_AES_RNDKEY42			0x2A8
+#define CRYPTO_AES_RNDKEY43			0x2AC
+#define CRYPTO_AES_RNDKEY44			0x2B0
+#define CRYPTO_AES_RNDKEY45			0x2B4
+#define CRYPTO_AES_RNDKEY46			0x2B8
+#define CRYPTO_AES_RNDKEY47			0x2BC
+#define CRYPTO_AES_RNDKEY48			0x2C0
+#define CRYPTO_AES_RNDKEY49			0x2C4
+#define CRYPTO_AES_RNDKEY50			0x2C8
+#define CRYPTO_AES_RNDKEY51			0x2CC
+#define CRYPTO_AES_RNDKEY52			0x2D0
+#define CRYPTO_AES_RNDKEY53			0x2D4
+#define CRYPTO_AES_RNDKEY54			0x2D8
+#define CRYPTO_AES_RNDKEY55			0x2DC
+#define CRYPTO_AES_RNDKEY56			0x2E0
+#define CRYPTO_AES_RNDKEY57			0x2E4
+#define CRYPTO_AES_RNDKEY58			0x2E8
+#define CRYPTO_AES_RNDKEY59			0x2EC
+
+#define CRYPTO_DATA_SHADOW0			0x8000
+#define CRYPTO_DATA_SHADOW8191			0x8FFC
+
+/* status reg  */
+#define CRYPTO_CORE_REV				28	/* bit 31-28 */
+#define CRYPTO_CORE_REV_MASK			(0xf << CRYPTO_CORE_REV)
+#define CRYPTO_DOUT_SIZE_AVAIL			22	/* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			19	/* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			18
+#define CRYPTO_SEG_CHNG_ERR			17
+#define CRYPTO_CFH_CHNG_ERR			16
+#define CRYPTO_DOUT_ERR				15
+#define CRYPTO_DIN_ERR				14
+#define CRYPTO_LOCKED				13
+#define CRYPTO_CRYPTO_STATE			10	/* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_AUTH_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_AUTH_DONE			1
+#define CRYPTO_SW_ERR				0
+
+#define CRYPTO_CRYPTO_STATE_IDLE		0
+#define CRYPTO_CRYPTO_STATE_LOCKED		1
+#define CRYPTO_CRYPTO_STATE_GO			3
+#define CRYPTO_CRYPTO_STATE_PROCESSING		4
+#define CRYPTO_CRYPTO_STATE_FINAL_READ		5
+#define CRYPTO_CRYPTO_STATE_CTXT_CLEARING	6
+#define CRYPTO_CRYPTO_STATE_UNLOCKING		7
+
+/* config reg */
+#define CRYPTO_HIGH_SPD_HASH_EN_N		15
+#define CRYPTO_HIGH_SPD_OUT_EN_N		14
+#define CRYPTO_HIGH_SPD_IN_EN_N			13
+#define CRYPTO_DBG_EN				12
+#define CRYPTO_DBG_SEL				7	/* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK			(0x1F << CRYPTO_DBG_SEL)
+#define CRYPTO_MASK_DOUT_INTR			6
+#define CRYPTO_MASK_DIN_INTR			5
+#define CRYPTO_MASK_AUTH_DONE_INTR		4
+#define CRYPTO_MASK_ERR_INTR			3
+#define CRYPTO_AUTO_SHUTDOWN_EN			2
+#define CRYPTO_CLK_EN_N				1
+#define CRYPTO_SW_RST				0
+
+/* seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		25
+#define CRYPTO_F9_DIRECTION			24
+#define CRYPTO_F8_DIRECTION			23
+#define CRYPTO_USE_HW_KEY			22
+
+#define CRYPTO_CNTR_ALG				20	/* bit 21-20 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << efine CRYPTO_CNTR_ALG)
+
+#define CRYPTO_CLR_CNTXT			19
+#define CRYPTO_LAST				18
+#define CRYPTO_FIRST				17
+#define CRYPTO_ENCODE				16
+
+#define CRYPTO_AUTH_POS				14	/* bit 15-14 */
+#define CRYPTO_AUTH_POS_MASK			(3 << CRYPTO_AUTH_POS)
+
+#define CRYPTO_AUTH_SIZE			11	/* bit 13-11 */
+#define CRYPTO_AUTH_SIZE_MASK			(7 << CRYPTO_AUTH_SIZE)
+
+#define CRYPTO_AUTH_ALG				9	/* bit 10-9 */
+#define CRYPTO_AUTH_ALG_MASK			(3 << CRYPTO_AUTH_ALG)
+
+#define CRYPTO_ENCR_MODE			6	/* bit 8-6 */
+#define CRYPTO_ENCR_MODE_MASK			(7 << CRYPTO_ENCR_MODE)
+
+#define CRYPTO_ENCR_KEY_SZ			3	/* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+
+#define CRYPTO_ENCR_ALG				0	/* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+
+#define CRYPTO_CNTR_ALG_NIST			0
+#define CRYPTO_CNTR_ALG_UMB			1
+#define CRYPTO_CNTR_ALG_VAR2			2
+
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE_SHA1			0
+#define CRYPTO_AUTH_SIZE_SHA256			1
+#define CRYPTO_AUTH_SIZE_SHA384			2
+#define CRYPTO_AUTH_SIZE_SHA512			3
+#define CRYPTO_AUTH_SIZE_HMAC_SHA1		4
+
+#define CRYPTO_AUTH_SIZE_UIA1			0
+#define CRYPTO_AUTH_SIZE_UIA2			1
+
+#define CRYPTO_AUTH_ALG_NONE			0
+#define CRYPTO_AUTH_ALG_SHA			1
+#define CRYPTO_AUTH_ALG_F9			2
+#define CRYPTO_AUTH_ALG_RESERVED1		3
+
+#define CRYPTO_ENCR_MODE_ECB			0
+#define CRYPTO_ENCR_MODE_CBC			1
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_CTR			2
+
+
+#define CRYPTO_ENCR_KEY_SZ_DES			0
+#define CRYPTO_ENCR_KEY_SZ_3DES			1
+
+#define CRYPTO_ENCR_KEY_SZ_AES128		0
+#define CRYPTO_ENCR_KEY_SZ_AES192		1
+#define CRYPTO_ENCR_KEY_SZ_AES256		2
+
+#define CRYPTO_ENCR_KEY_SZ_UEA1			0
+#define CRYPTO_ENCR_KEY_SZ_UEA2			1
+
+#define CRYPTO_ENCR_ALG_NONE			0
+#define CRYPTO_ENCR_ALG_DES			1
+#define CRYPTO_ENCR_ALG_AES			2
+#define CRYPTO_ENCR_ALG_C2			3
+#define CRYPTO_ENCR_ALG_F8			4
+
+/* encr_seg_cfg reg */
+#define CRYPTO_ENCR_SEG_SIZE			16	/* bit 31-16  */
+#define CRYPTO_ENCR_SEG_SIZE_MASK		(0xffff << CRYPTO_ENCR_SEG_SIZE)
+
+#define CRYPTO_ENCR_START			0
+#define CRYPTO_ENCR_START_MASK			(0xffff << CRYPTO_ENCR_START)
+
+/* auth_seg_cfg reg */
+#define CRYPTO_AUTH_SEG_SIZE			16	/* bit 31-16  */
+#define CRYPTO_AUTH_SEG_SIZE_MASK		(0xffff << CRYPTO_AUTH_SEG_SIZE)
+
+#define CRYPTO_AUTH_START			0
+#define CRYPTO_AUTH_START_MASK			(0xffff << CRYPTO_AUTH_START)
+
+
+/* seg_size reg */
+#define CRYPTO_SEG_SIZE				0
+#define CRYPTO_SEG_SIZE_MASK			(0xffff << CRYPTO_SEG_SIZE)
+
+/* goproc reg */
+#define CRYPTO_GO				0
+
+/* engines_avail */
+#define CRYPTO_F9_SEL				8
+#define CRYPTO_F8_SEL				7
+#define CRYPTO_HMAC_SEL				6
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_C2_SEL				2
+
+#define CRYPTO_AES_SEL				0	/* bit 1-0 */
+#define CRYPTO_AES_SEL_MASK			(3 <<  CRYPTO_AES_SEL)
+#define CRYPTO_AES_SEL_NO			0
+#define CRYPTO_AES_SEL_SLOW			1
+#define CRYPTO_AES_SEL_FAST			2
+#define CRYPTO_AES_SEL_RESERVED			3
+
+/*  F8 definition of CRYPTO_CNTR1_IV1_REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4_REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* misc  */
+#define CRYPTO_AES_RNDKEYS			60
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ */
diff --git a/drivers/crypto/msm/inc/qcryptohw_40.h b/drivers/crypto/msm/inc/qcryptohw_40.h
new file mode 100644
index 0000000..367bdaa
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcryptohw_40.h
@@ -0,0 +1,316 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x0
+#define CRYPTO_DATA_IN_REG			0x008
+#define CRYPTO_DATA_OUT_REG			0x010
+#define CRYPTO_STATUS_REG			0x100
+#define CRYPTO_ENGINES_AVAIL			0x104
+#define CRYPTO3_VERSION_REG			0x108
+#define CRYPTO_SEG_SIZE_REG			0x200
+#define CRYPTO_GOPROC_REG			0x204
+#define CRYPTO_ENCR_SEG_CFG_REG			0x300
+
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x304
+#define CRYPTO_ENCR_SEG_START_REG		0x308
+
+#define CRYPTO_ENCR_KEY0_REG			0x310
+#define CRYPTO_ENCR_KEY1_REG			0x314
+#define CRYPTO_ENCR_KEY2_REG			0x318
+#define CRYPTO_ENCR_KEY3_REG			0x31C
+#define CRYPTO_ENCR_KEY4_REG			0x320
+#define CRYPTO_ENCR_KEY5_REG			0x324
+#define CRYPTO_ENCR_KEY6_REG			0x328
+#define CRYPTO_ENCR_KEY7_REG			0x32C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x330
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x334
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x338
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x33C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x340
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x344
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x348
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x34C
+
+#define CRYPTO_CNTR0_IV0_REG			0x350
+#define CRYPTO_CNTR1_IV1_REG			0x354
+#define CRYPTO_CNTR2_IV2_REG			0x358
+#define CRYPTO_CNTR3_IV3_REG			0x35C
+
+#define CRYPTO_CNTR_MASK_REG			0x360
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x364
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x400
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x404
+#define CRYPTO_AUTH_SEG_START_REG		0x408
+
+#define CRYPTO_AUTH_KEY0_REG			0x410
+#define CRYPTO_AUTH_KEY1_REG			0x414
+#define CRYPTO_AUTH_KEY2_REG			0x418
+#define CRYPTO_AUTH_KEY3_REG			0x41C
+#define CRYPTO_AUTH_KEY4_REG			0x420
+#define CRYPTO_AUTH_KEY5_REG			0x424
+#define CRYPTO_AUTH_KEY6_REG			0x428
+#define CRYPTO_AUTH_KEY7_REG			0x42C
+#define CRYPTO_AUTH_KEY8_REG			0x430
+#define CRYPTO_AUTH_KEY9_REG			0x434
+#define CRYPTO_AUTH_KEY10_REG			0x438
+#define CRYPTO_AUTH_KEY11_REG			0x43C
+#define CRYPTO_AUTH_KEY12_REG			0x440
+#define CRYPTO_AUTH_KEY13_REG			0x444
+#define CRYPTO_AUTH_KEY14_REG			0x448
+#define CRYPTO_AUTH_KEY15_REG			0x44C
+
+#define CRYPTO_AUTH_IV0_REG			0x450
+#define CRYPTO_AUTH_IV1_REG			0x454
+#define CRYPTO_AUTH_IV2_REG			0x458
+#define CRYPTO_AUTH_IV3_REG			0x45C
+#define CRYPTO_AUTH_IV4_REG			0x460
+#define CRYPTO_AUTH_IV5_REG			0x464
+#define CRYPTO_AUTH_IV6_REG			0x468
+#define CRYPTO_AUTH_IV7_REG			0x46C
+#define CRYPTO_AUTH_IV8_REG			0x470
+#define CRYPTO_AUTH_IV9_REG			0x474
+#define CRYPTO_AUTH_IV10_REG			0x478
+#define CRYPTO_AUTH_IV11_REG			0x47C
+#define CRYPTO_AUTH_IV12_REG			0x480
+#define CRYPTO_AUTH_IV13_REG			0x484
+#define CRYPTO_AUTH_IV14_REG			0x488
+#define CRYPTO_AUTH_IV15_REG			0x48C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x490
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x494
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x498
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x49C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x4A0
+#define CRYPTO_AUTH_BYTECNT1_REG		0x4A4
+#define CRYPTO_AUTH_BYTECNT2_REG		0x4A8
+#define CRYPTO_AUTH_BYTECNT3_REG		0x4AC
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x4B0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x4B4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x4B8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x4BC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x4C0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x4C4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x4C8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x4CC
+
+#define CRYPTO_CONFIG_REG			0x500
+#define CRYPTO_SACR_REG				0x504
+#define CRYPTO_DEBUG_REG			0x508
+
+#define CRYPTO_DATA_SHADOW0			0x8000
+#define CRYPTO_DATA_SHADOW8191			0x8FFC
+
+
+/* Register bits */
+
+#define CRYPTO_CORE_MAJOR_REV			4 /* bit 7-4 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		(0xF << CRYPTO_CORE_MAJOR_REV)
+#define CRYPTO_CORE_MINOR_REV			0 /* bit 3-0 */
+#define CRYPTO_CORE_MINOR_REV_MASK		(0xF << CRYPTO_CORE_MINOR_REV)
+#define CRYPTO_CORE_REV_MASK			0xFF
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			25
+#define CRYPTO_DOUT_SIZE_AVAIL			22 /* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			19 /* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			18
+#define CRYPTO_SEG_CHNG_ERR			17
+#define CRYPTO_CFH_CHNG_ERR			16
+#define CRYPTO_DOUT_ERR				15
+#define CRYPTO_DIN_ERR				14
+#define CRYPTO_LOCKED				13
+#define CRYPTO_CRYPTO_STATE			10 /* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				30 /* bit 31-30 */
+#define CRYPTO_REQ_SIZE_MASK			(0x3 << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_16_BYTES	0
+#define CRYPTO_REQ_SIZE_ENUM_32_BYTES	1
+#define CRYPTO_REQ_SIZE_ENUM_64_BYTES	2
+
+#define CRYPTO_MAX_QUEUED_REQ			27 /* bit 29-27 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM1_QUEUED_REQS		0
+#define CRYPTO_ENUM2_QUEUED_REQS		1
+#define CRYPTO_ENUM3_QUEUED_REQS		2
+#define CRYPTO_ENUM4_QUEUED_REQS		3
+
+#define CRYPTO_FIFO_THRESHOLD			24 /* bit 26-24 */
+#define CRYPTO_FIFO_THRESHOLD_MASK		(0x7 << CRYPTO_FIFO_THRESHOLD)
+#define CRYPTO_FIFO_ENUM_16_BYTES		0
+#define CRYPTO_FIFO_ENUM_32_BYTES		1
+#define CRYPTO_FIFO_ENUM_48_BYTES		2
+#define CRYPTO_FIFO_ENUM_64_BYTES		3
+
+#define CRYPTO_IRQ_ENABLES			20	/* bit 23-20 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_ACR_EN				18
+#define CRYPTO_BAM_MODE				17
+#define CRYPTO_LITTLE_ENDIAN_MODE		16
+#define CRYPTO_HIGH_SPD_OUT_EN_N		14
+#define CRYPTO_HIGH_SPD_IN_EN_N			13
+#define CRYPTO_DBG_EN				12
+
+#define CRYPTO_DBG_SEL				7 /* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK			(0x1F << CRYPTO_DBG_SEL)
+
+#define CRYPTO_MASK_DOUT_INTR			6
+#define CRYPTO_MASK_DIN_INTR			5
+#define CRYPTO_MASK_OP_DONE_INTR		4
+#define CRYPTO_MASK_ERR_INTR			3
+#define CRYPTO_AUTO_SHUTDOWN_EN			2
+#define CRYPTO_CLK_EN_N				1
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			20
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			19
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		16
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+					(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_HW_KEY_AUTH			15
+
+#define CRYPTO_LAST				14
+
+#define CRYPTO_AUTH_POS				12 /* bit 13 .. 12*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 11 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x7 << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1			0
+#define CRYPTO_AUTH_SIZE_SHA256			1
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES		0
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES		1
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES		2
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES		3
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES		4
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES		5
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES		6
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH			0
+#define CRYPTO_AUTH_MODE_HMAC			1
+#define CRYPTO_AUTH_MODE_CCM			0
+#define CRYPTO_AUTH_MODE_CMAC			1
+
+#define CRYPTO_AUTH_KEY_SIZE			3
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128		0
+#define CRYPTO_AUTH_KEY_SZ_AES256		2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE			0
+#define CRYPTO_AUTH_ALG_SHA			1
+#define CRYPTO_AUTH_ALG_AES			2
+#define CRYPTO_AUTH_ALG_KASUMI			3
+#define CRYPTO_AUTH_ALG_SNOW3G			4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		15
+#define CRYPTO_F8_KEYSTREAM_DISABLED		0
+#define CRYPTO_F8_KEYSTREAM_ENABLED		1
+
+#define CRYPTO_F8_DIRECTION			14
+#define CRYPTO_F8_DIRECTION_UPLINK		0
+#define CRYPTO_F8_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_USE_HW_KEY_ENCR			13
+#define CRYPTO_USE_HW_KEY_REG			0
+#define CRYPTO_USE_HW_KEY			1
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST			0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB			0
+#define CRYPTO_ENCR_MODE_CBC			1
+#define CRYPTO_ENCR_MODE_CTR			2
+#define CRYPTO_ENCR_MODE_XTS			3
+#define CRYPTO_ENCR_MODE_CCM			4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES			0
+#define CRYPTO_ENCR_KEY_SZ_3DES			1
+#define CRYPTO_ENCR_KEY_SZ_AES128		0
+#define CRYPTO_ENCR_KEY_SZ_AES256		2
+#define CRYPTO_ENCR_KEY_SZ_UEA1			0
+#define CRYPTO_ENCR_KEY_SZ_UEA2			1
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE			0
+#define CRYPTO_ENCR_ALG_DES			1
+#define CRYPTO_ENCR_ALG_AES			2
+#define CRYPTO_ENCR_ALG_KASUMI			3
+#define CRYPTO_ENCR_ALG_SNOW_3G			5
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_ENCR_SNOW3G_SEL			4
+#define CRYPTO_ENCR_KASUMI_SEL			5
+#define CRYPTO_SHA_SEL				6
+#define CRYPTO_SHA512_SEL			7
+#define CRYPTO_AUTH_AES_SEL			8
+#define CRYPTO_AUTH_SNOW3G_SEL			9
+#define CRYPTO_AUTH_KASUMI_SEL			10
+#define CRYPTO_BAM_SEL				11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ */
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 0000000..516253a
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,731 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+
+#include <linux/qcota.h>
+#include "inc/qce_ota.h"
+#include "inc/qce.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head list;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+	} req;
+
+	struct ota_dev_control  *podev;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota"
+
+
+struct ota_dev_control {
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct ota_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_dev_control *podev);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota0",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	},
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota1",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	},
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota2",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	}
+};
+
+#define MAX_OTA_DEVICE ARRAY_SIZE(qcota_dev)
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u32 f8_req;
+	u32 f8_mp_req;
+	u32 f9_req;
+	u32 f8_op_success;
+	u32 f8_op_fail;
+	u32 f8_mp_op_success;
+	u32 f8_mp_op_fail;
+	u32 f9_op_success;
+	u32 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat[MAX_OTA_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota[MAX_OTA_DEVICE];
+
+static struct ota_dev_control *qcota_minor_to_control(unsigned n)
+{
+	int i;
+
+	for (i = 0; i < MAX_OTA_DEVICE; i++) {
+		if (qcota_dev[i].miscdevice.minor == n)
+			return &qcota_dev[i];
+	}
+	return NULL;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_dev_control *podev = (struct ota_dev_control *)data;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		ret = start_req(podev);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+
+	return;
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_dev_control *podev;
+	struct qcota_stat *pstat;
+
+	podev = areq->podev;
+	pstat = &_qcota_stat[podev->pdev->id];
+	areq->req.f9_req.mac_i  = (uint32_t) icv;
+
+	if (ret)
+		areq->err = -ENXIO;
+	else
+		areq->err = 0;
+
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_dev_control *podev;
+	struct qcota_stat *pstat;
+
+	podev = areq->podev;
+	pstat = &_qcota_stat[podev->pdev->id];
+
+	if (ret)
+		areq->err = -ENXIO;
+	else
+		areq->err = 0;
+
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_req(struct ota_dev_control *podev)
+{
+	struct ota_async_req *areq;
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	areq = podev->active_command;
+	areq->podev = podev;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(podev->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(podev->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(podev->qce, pf9, areq, f9_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	};
+	areq->err = ret;
+	return ret;
+};
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+
+	areq->err = 0;
+	spin_lock_irqsave(&podev->lock, flags);
+	if (podev->active_command == NULL) {
+		podev->active_command = areq;
+		ret = start_req(podev);
+	} else {
+		list_add_tail(&areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat[podev->pdev->id];
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+	default:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	};
+
+	return areq->err;
+};
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total;
+	struct qcota_stat *pstat;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat[podev->pdev->id];
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		if (__copy_from_user(k_buf, (void __user *)user_src,
+				areq.req.f9_req.msize)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && __copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		};
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && __copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+
+		total = areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+			kfree(k_buf);
+
+			return -EFAULT;
+		}
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+
+	if (pdev->id >= MAX_OTA_DEVICE) {
+		pr_err("%s: device id %d  exceeds allowed %d\n",
+			__func__, pdev->id, MAX_OTA_DEVICE);
+		return -ENOENT;
+	}
+
+	podev = &qcota_dev[pdev->id];
+
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+	spin_lock_init(&podev->lock);
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device id %d, can not open qce\n",
+			__func__, pdev->id);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					ce_support.ota == false) {
+		pr_err("%s: device id %d, qce does not support ota capability\n",
+			__func__, pdev->id);
+		rc = -ENODEV;
+		goto err;
+	}
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	rc = misc_register(&podev->miscdevice);
+	if (rc < 0)
+		goto err;
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcota_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm OTA crypto accelerator %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request             : %d\n",
+					pstat->f8_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success   : %d\n",
+					pstat->f8_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail      : %d\n",
+					pstat->f8_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request          : %d\n",
+					pstat->f8_mp_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success: %d\n",
+					pstat->f8_mp_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail   : %d\n",
+					pstat->f8_mp_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request             : %d\n",
+					pstat->f9_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success   : %d\n",
+					pstat->f9_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail      : %d\n",
+					pstat->f9_op_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcota = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcota);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcota = *((int *) file->private_data);
+
+	memset((char *)&_qcota_stat[qcota], 0, sizeof(struct qcota_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_OTA_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcota[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Ota Crypto driver");
+MODULE_VERSION("1.01");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
new file mode 100644
index 0000000..b945d24
--- /dev/null
+++ b/drivers/crypto/msm/qce.c
@@ -0,0 +1,2607 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+#include <linux/qcota.h>
+#include <mach/dma.h>
+
+#include "inc/qce.h"
+#include "inc/qcedev.h"
+#include "inc/qcryptohw_30.h"
+#include "inc/qce_ota.h"
+
+/* ADM definitions */
+#define LI_SG_CMD  (1 << 31)    /* last index in the scatter gather cmd */
+#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
+#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
+#define ADM_DESC_LAST  (1 << 31)
+
+/* Data xfer between DM and CE in blocks of 16 bytes */
+#define ADM_CE_BLOCK_SIZE  16
+
+/* Data xfer between DM and CE in blocks of 64 bytes */
+#define ADM_SHA_BLOCK_SIZE  64
+
+#define ADM_DESC_LENGTH_MASK 0xffff
+#define ADM_DESC_LENGTH(x)  (x & ADM_DESC_LENGTH_MASK)
+
+struct dmov_desc {
+	uint32_t addr;
+	uint32_t len;
+};
+
+#define ADM_STATUS_OK 0x80000002
+
+/* Misc definitions */
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+
+/* State of DM channel */
+enum qce_chan_st_enum {
+	QCE_CHAN_STATE_IDLE = 0,
+	QCE_CHAN_STATE_IN_PROG = 1,
+	QCE_CHAN_STATE_COMP = 2,
+	QCE_CHAN_STATE_LAST
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+	struct clk *ce_clk;	    /* Handle to CE clk */
+	unsigned int crci_in;	      /* CRCI for CE DM IN Channel   */
+	unsigned int crci_out;	      /* CRCI for CE DM OUT Channel   */
+	unsigned int crci_hash;	      /* CRCI for CE HASH   */
+	unsigned int chan_ce_in;      /* ADM channel used for CE input
+					* and auth result if authentication
+					* only operation. */
+	unsigned int chan_ce_out;     /* ADM channel used for CE output,
+					and icv for esp */
+
+
+	unsigned int *cmd_pointer_list_ce_in;
+	dma_addr_t  phy_cmd_pointer_list_ce_in;
+
+	unsigned int *cmd_pointer_list_ce_out;
+	dma_addr_t  phy_cmd_pointer_list_ce_out;
+
+	unsigned char *cmd_list_ce_in;
+	dma_addr_t  phy_cmd_list_ce_in;
+
+	unsigned char *cmd_list_ce_out;
+	dma_addr_t  phy_cmd_list_ce_out;
+
+	struct dmov_desc *ce_out_src_desc;
+	dma_addr_t  phy_ce_out_src_desc;
+
+	struct dmov_desc *ce_out_dst_desc;
+	dma_addr_t  phy_ce_out_dst_desc;
+
+	struct dmov_desc *ce_in_src_desc;
+	dma_addr_t  phy_ce_in_src_desc;
+
+	struct dmov_desc *ce_in_dst_desc;
+	dma_addr_t  phy_ce_in_dst_desc;
+
+	unsigned char *ce_out_ignore;
+	dma_addr_t phy_ce_out_ignore;
+
+	unsigned char *ce_pad;
+	dma_addr_t phy_ce_pad;
+
+	struct msm_dmov_cmd  *chan_ce_in_cmd;
+	struct msm_dmov_cmd  *chan_ce_out_cmd;
+
+	uint32_t ce_out_ignore_size;
+
+	int ce_out_dst_desc_index;
+	int ce_in_src_desc_index;
+
+	enum qce_chan_st_enum chan_ce_in_state;		/* chan ce_in state */
+	enum qce_chan_st_enum chan_ce_out_state;	/* chan ce_out state */
+
+	int chan_ce_in_status;		/* chan ce_in status      */
+	int chan_ce_out_status;		/* chan ce_out status */
+
+
+	unsigned char *dig_result;
+	dma_addr_t phy_dig_result;
+
+	/* cached aes key */
+	uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
+
+	uint32_t aes_key_size;		/* cached aes key size in bytes */
+	int fastaes;			/* ce supports fast aes */
+	int hmac;			/* ce support hmac-sha1 */
+	bool ota;			/* ce support ota */
+
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int src_nents;
+	int dst_nents;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+
+	dma_addr_t phy_iv_in;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	int err;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
+static const uint32_t _s_box[256] = {
+	0x63, 0x7c, 0x77, 0x7b,   0xf2, 0x6b, 0x6f, 0xc5,
+	0x30, 0x01, 0x67, 0x2b,   0xfe, 0xd7, 0xab, 0x76,
+
+	0xca, 0x82, 0xc9, 0x7d,   0xfa, 0x59, 0x47, 0xf0,
+	0xad, 0xd4, 0xa2, 0xaf,   0x9c, 0xa4, 0x72, 0xc0,
+
+	0xb7, 0xfd, 0x93, 0x26,   0x36, 0x3f, 0xf7, 0xcc,
+	0x34, 0xa5, 0xe5, 0xf1,   0x71, 0xd8, 0x31, 0x15,
+
+	0x04, 0xc7, 0x23, 0xc3,   0x18, 0x96, 0x05, 0x9a,
+	0x07, 0x12, 0x80, 0xe2,   0xeb, 0x27, 0xb2, 0x75,
+
+	0x09, 0x83, 0x2c, 0x1a,   0x1b, 0x6e, 0x5a, 0xa0,
+	0x52, 0x3b, 0xd6, 0xb3,   0x29, 0xe3, 0x2f, 0x84,
+
+	0x53, 0xd1, 0x00, 0xed,   0x20, 0xfc, 0xb1, 0x5b,
+	0x6a, 0xcb, 0xbe, 0x39,   0x4a, 0x4c, 0x58, 0xcf,
+
+	0xd0, 0xef, 0xaa, 0xfb,   0x43, 0x4d, 0x33, 0x85,
+	0x45, 0xf9, 0x02, 0x7f,   0x50, 0x3c, 0x9f, 0xa8,
+
+	0x51, 0xa3, 0x40, 0x8f,   0x92, 0x9d, 0x38, 0xf5,
+	0xbc, 0xb6, 0xda, 0x21,   0x10, 0xff, 0xf3, 0xd2,
+
+	0xcd, 0x0c, 0x13, 0xec,   0x5f, 0x97, 0x44, 0x17,
+	0xc4, 0xa7, 0x7e, 0x3d,   0x64, 0x5d, 0x19, 0x73,
+
+	0x60, 0x81, 0x4f, 0xdc,   0x22, 0x2a, 0x90, 0x88,
+	0x46, 0xee, 0xb8, 0x14,   0xde, 0x5e, 0x0b, 0xdb,
+
+	0xe0, 0x32, 0x3a, 0x0a,   0x49, 0x06, 0x24, 0x5c,
+	0xc2, 0xd3, 0xac, 0x62,   0x91, 0x95, 0xe4, 0x79,
+
+	0xe7, 0xc8, 0x37, 0x6d,   0x8d, 0xd5, 0x4e, 0xa9,
+	0x6c, 0x56, 0xf4, 0xea,   0x65, 0x7a, 0xae, 0x08,
+
+	0xba, 0x78, 0x25, 0x2e,   0x1c, 0xa6, 0xb4, 0xc6,
+	0xe8, 0xdd, 0x74, 0x1f,   0x4b, 0xbd, 0x8b, 0x8a,
+
+	0x70, 0x3e, 0xb5, 0x66,   0x48, 0x03, 0xf6, 0x0e,
+	0x61, 0x35, 0x57, 0xb9,   0x86, 0xc1, 0x1d, 0x9e,
+
+	0xe1, 0xf8, 0x98, 0x11,   0x69, 0xd9, 0x8e, 0x94,
+	0x9b, 0x1e, 0x87, 0xe9,   0xce, 0x55, 0x28, 0xdf,
+
+	0x8c, 0xa1, 0x89, 0x0d,   0xbf, 0xe6, 0x42, 0x68,
+	0x41, 0x99, 0x2d, 0x0f,   0xb0, 0x54, 0xbb, 0x16 };
+
+
+/*
+ *	Source:	FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
+ *		Expansion.
+ */
+static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
+		uint32_t *AES_RND_KEY)
+{
+	uint32_t i;
+	uint32_t Nk;
+	uint32_t Nr, rot_data;
+	uint32_t Rcon = 0x01000000;
+	uint32_t temp;
+	uint32_t data_in;
+	uint32_t MSB_store;
+	uint32_t byte_for_sub;
+	uint32_t word_sub[4];
+
+	switch (keysize) {
+	case 192:
+		Nk = 6;
+		Nr = 12;
+		break;
+
+	case 256:
+		Nk = 8;
+		Nr = 14;
+		break;
+
+	case 128:
+	default:  /* default to AES128 */
+		Nk = 4;
+		Nr = 10;
+		break;
+	}
+
+	/* key expansion */
+	i = 0;
+	while (i < Nk) {
+		AES_RND_KEY[i] = AES_KEY[i];
+		i = i + 1;
+	}
+
+	i = Nk;
+	while (i < (4 * (Nr + 1))) {
+		temp = AES_RND_KEY[i-1];
+		if (Nr == 14) {
+			switch (i) {
+			case 8:
+				Rcon = 0x01000000;
+				break;
+
+			case 16:
+				Rcon = 0x02000000;
+				break;
+
+			case 24:
+				Rcon = 0x04000000;
+				break;
+
+			case 32:
+				Rcon = 0x08000000;
+				break;
+
+			case 40:
+				Rcon = 0x10000000;
+				break;
+
+			case 48:
+				Rcon = 0x20000000;
+				break;
+
+			case 56:
+				Rcon = 0x40000000;
+				break;
+			}
+		} else if (Nr == 12) {
+			switch (i) {
+			case  6:
+				Rcon = 0x01000000;
+				break;
+
+			case 12:
+				Rcon = 0x02000000;
+				break;
+
+			case 18:
+				Rcon = 0x04000000;
+				break;
+
+			case 24:
+				Rcon = 0x08000000;
+				break;
+
+			case 30:
+				Rcon = 0x10000000;
+				break;
+
+			case 36:
+				Rcon = 0x20000000;
+				break;
+
+			case 42:
+				Rcon = 0x40000000;
+				break;
+
+			case 48:
+				Rcon = 0x80000000;
+				break;
+			}
+		} else if (Nr == 10) {
+			switch (i) {
+			case 4:
+				Rcon = 0x01000000;
+				break;
+
+			case 8:
+				Rcon = 0x02000000;
+				break;
+
+			case 12:
+				Rcon = 0x04000000;
+				break;
+
+			case 16:
+				Rcon = 0x08000000;
+				break;
+
+			case 20:
+				Rcon = 0x10000000;
+				break;
+
+			case 24:
+				Rcon = 0x20000000;
+				break;
+
+			case 28:
+				Rcon = 0x40000000;
+				break;
+
+			case 32:
+				Rcon = 0x80000000;
+				break;
+
+			case 36:
+				Rcon = 0x1b000000;
+				break;
+
+			case 40:
+				Rcon = 0x36000000;
+				break;
+			}
+		}
+
+		if ((i % Nk) == 0) {
+			data_in   = temp;
+			MSB_store = (data_in >> 24 & 0xff);
+			rot_data  = (data_in << 8) | MSB_store;
+			byte_for_sub = rot_data;
+			word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+			word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+								<< 8);
+			word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+								<< 16);
+			word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
+								>> 24)] << 24);
+			word_sub[0] =  word_sub[0] | word_sub[1] | word_sub[2] |
+							word_sub[3];
+			temp = word_sub[0] ^ Rcon;
+		} else if ((Nk > 6) && ((i % Nk) == 4)) {
+			byte_for_sub = temp;
+			word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+			word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+								<< 8);
+			word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+								<< 16);
+			word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
+								 24)] << 24);
+			word_sub[0] =  word_sub[0] | word_sub[1] | word_sub[2] |
+						word_sub[3];
+			temp = word_sub[0];
+		}
+
+		AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
+		i = i+1;
+	}
+}
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
+						struct scatterlist *sg)
+{
+	int i = 0;
+	for (i = 0; i < entries; i++) {
+
+		sg->dma_address = (dma_addr_t)pmem->offset;
+		sg++;
+		pmem++;
+	}
+	return 0;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+	unsigned int rev;
+	unsigned int eng_availability;	/* engine available functions    */
+
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if ((val & 0xfffffff) != 0x0200004) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x 0x%x\n",
+				pce_dev->phy_iobase, val);
+		return -EIO;
+	};
+	rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
+	if (rev == 0x2) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 3e device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else if (rev == 0x1) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 3 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else if (rev == 0x0) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 2 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	}
+
+	eng_availability = readl_relaxed(pce_dev->iobase +
+						CRYPTO_ENGINES_AVAIL);
+
+	if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
+			== CRYPTO_AES_SEL_FAST)
+		pce_dev->fastaes = 1;
+	else
+		pce_dev->fastaes = 0;
+
+	if (eng_availability & (1 << CRYPTO_HMAC_SEL))
+		pce_dev->hmac = 1;
+	else
+		pce_dev->hmac = 0;
+
+	if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
+			(eng_availability & (1 << CRYPTO_F8_SEL)))
+		pce_dev->ota = true;
+	else
+		pce_dev->ota = false;
+
+	pce_dev->aes_key_size = 0;
+
+	return 0;
+};
+
+static int _init_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+
+	/* reset qce */
+	writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+	/* Ensure previous instruction (write to reset bit)
+	 * was completed.
+	 */
+	mb();
+	/* configure ce */
+	val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
+			(1 << CRYPTO_MASK_AUTH_DONE_INTR) |
+					(1 << CRYPTO_MASK_ERR_INTR);
+	writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+	if (_probe_ce_engine(pce_dev) < 0)
+		return -EIO;
+	if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	};
+	return 0;
+};
+
+static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int rc;
+	int i;
+	uint32_t cfg = 0;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		diglen = SHA1_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/*
+	 * write 20/32 bytes, 5/8 words into auth_iv
+	 *  for SHA1/SHA256
+	 */
+
+	if (sreq->first_blk) {
+		if (sreq->alg == QCE_HASH_SHA1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+
+	rc = clk_enable(pce_dev->ce_clk);
+	if (rc)
+		return rc;
+
+	writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+	writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+	writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+	writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+	if (sreq->alg == QCE_HASH_SHA256) {
+		writel_relaxed(auth32[5], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(auth32[6], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(auth32[7], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+	}
+	/* write auth_bytecnt 0/1, start with 0 */
+	writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG);
+	writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write auth_seg_cfg */
+	writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
+			pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/*
+	 * write seg_cfg
+	 */
+
+	if (sreq->alg == QCE_HASH_SHA1)
+		cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
+	else
+		cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
+
+	if (sreq->first_blk)
+		cfg |= 1 << CRYPTO_FIRST;
+	if (sreq->last_blk)
+		cfg |= 1 << CRYPTO_LAST;
+	cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/* Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+
+	return 0;
+}
+
+static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen, uint32_t coffset)
+{
+	uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0};
+	uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
+	int aes_key_chg;
+	int i, rc;
+	uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
+	uint32_t cfg;
+	uint32_t ivsize = q_req->ivsize;
+
+	rc = clk_enable(pce_dev->ce_clk);
+	if (rc)
+		return rc;
+
+	cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
+	if (q_req->op == QCE_REQ_AEAD) {
+
+		/* do authentication setup */
+
+		cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
+
+		/* write sha1 init vector */
+		writel_relaxed(_std_init_vector_sha1[0],
+				pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+		writel_relaxed(_std_init_vector_sha1[1],
+				pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+		writel_relaxed(_std_init_vector_sha1[2],
+				pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+		writel_relaxed(_std_init_vector_sha1[3],
+				pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+		writel_relaxed(_std_init_vector_sha1[4],
+				pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+		/* write hmac key */
+		_byte_stream_to_net_words(hmackey, q_req->authkey,
+						q_req->authklen);
+		writel_relaxed(hmackey[0], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(hmackey[1], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(hmackey[2], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+		writel_relaxed(hmackey[3], pce_dev->iobase +
+							CRYPTO_AUTH_IV8_REG);
+		writel_relaxed(hmackey[4], pce_dev->iobase +
+							CRYPTO_AUTH_IV9_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+		/* write auth_seg_cfg */
+		writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
+				pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	}
+
+	_byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
+
+	switch (q_req->mode) {
+	case QCE_MODE_ECB:
+		cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CBC:
+		cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CTR:
+	default:
+		cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = q_req->mode;
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_DES_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_DES_KEY1_REG);
+		cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_3DES:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_DES_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_DES_KEY1_REG);
+		writel_relaxed(enckey32[2], pce_dev->iobase +
+							CRYPTO_DES_KEY2_REG);
+		writel_relaxed(enckey32[3], pce_dev->iobase +
+							CRYPTO_DES_KEY3_REG);
+		writel_relaxed(enckey32[4], pce_dev->iobase +
+							CRYPTO_DES_KEY4_REG);
+		writel_relaxed(enckey32[5], pce_dev->iobase +
+							CRYPTO_DES_KEY5_REG);
+		cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_AES:
+	default:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			writel_relaxed(enciv32[2], pce_dev->iobase +
+						CRYPTO_CNTR2_IV2_REG);
+			writel_relaxed(enciv32[3], pce_dev->iobase +
+						CRYPTO_CNTR3_IV3_REG);
+		}
+		/* set number of counter bits */
+		writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+
+		if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			switch (q_req->encklen) {
+			case AES128_KEY_SIZE:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES192_KEY_SIZE:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES256_KEY_SIZE:
+			default:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ);
+
+				/* check for null key. If null, use hw key*/
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != 0)
+						break;
+				}
+				if (i == enck_size_in_word)
+					cfg |= 1 << CRYPTO_USE_HW_KEY;
+				break;
+			} /* end of switch (q_req->encklen) */
+
+			cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+			if (pce_dev->aes_key_size !=  q_req->encklen)
+				aes_key_chg = 1;
+			else {
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != pce_dev->aeskey[i])
+						break;
+				}
+				aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
+			}
+
+			if (aes_key_chg) {
+				if (pce_dev->fastaes) {
+					for (i = 0; i < enck_size_in_word;
+									i++) {
+						writel_relaxed(enckey32[i],
+							pce_dev->iobase +
+							CRYPTO_AES_RNDKEY0 +
+							(i * sizeof(uint32_t)));
+					}
+				} else {
+					/* size in bit */
+					_aes_expand_key_schedule(
+						q_req->encklen * 8,
+						enckey32, aes_round_key);
+
+					for (i = 0; i < CRYPTO_AES_RNDKEYS;
+									i++) {
+						writel_relaxed(aes_round_key[i],
+							pce_dev->iobase +
+							CRYPTO_AES_RNDKEY0 +
+							(i * sizeof(uint32_t)));
+					}
+				}
+
+				pce_dev->aes_key_size = q_req->encklen;
+				for (i = 0; i < enck_size_in_word; i++)
+					pce_dev->aeskey[i] = enckey32[i];
+			} /*if (aes_key_chg) { */
+		} /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (q_req->mode)  */
+
+	if (q_req->dir == QCE_ENCRYPT)
+		cfg |= (1 << CRYPTO_AUTH_POS);
+	cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+
+	/* write encr seg cfg */
+	writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
+			(coffset & 0xffff),      /* cipher offset */
+			pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg cfg and size */
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+	writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/* Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct aead_request *) pce_dev->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+			ivsize, DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	} else {
+
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	};
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+
+	struct ahash_request *areq;
+	uint32_t auth_data[2];
+	uint32_t status;
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+		return;
+	};
+
+	auth_data[0] = readl_relaxed(pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG);
+	auth_data[1] = readl_relaxed(pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+	/* Ensure previous instruction (retriving byte count information)
+	 * was completed before disabling the clk.
+	 */
+	mb();
+	clk_disable(pce_dev->ce_clk);
+	pce_dev->qce_cb(areq,  pce_dev->dig_result, (unsigned char *)auth_data,
+				pce_dev->chan_ce_in_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+
+
+static int _chain_sg_buffer_in(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen))
+			pdesc->len  = dlen + len;
+		else {
+			pce_dev->ce_in_src_desc_index++;
+			if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_in(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_in_src_desc_index++;
+		if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+			return -ENOMEM;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	return 0;
+}
+
+static void _chain_buffer_in_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_in_src_desc_index = 0;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->len = 0;
+}
+
+static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_in_dst_desc;
+	pdesc->len = ADM_DESC_LAST | total;
+
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	if (ncmd == 1)
+		pcmd->cmd |= CMD_LC;
+	else {
+		dmov_s  *pscmd;
+
+		pcmd->cmd &= ~CMD_LC;
+		pcmd++;
+		pscmd = (dmov_s *)pcmd;
+		pscmd->cmd |= CMD_LC;
+	}
+
+#ifdef QCE_DEBUG
+	dev_info(pce_dev->pdev, "_ce_in_final %d\n",
+					pce_dev->ce_in_src_desc_index);
+#endif
+}
+
+#ifdef QCE_DEBUG
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_in_dump\n");
+	for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
+		pdesc = pce_dev->ce_in_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	pdesc = pce_dev->ce_in_dst_desc;
+	dev_info(pce_dev->pdev, "dst - %x , %x\n", pdesc->addr,
+				pdesc->len);
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_out_dump\n");
+	for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_out_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	pdesc = pce_dev->ce_out_src_desc;
+	dev_info(pce_dev->pdev, "src - %x , %x\n", pdesc->addr,
+				pdesc->len);
+};
+#endif
+
+static int _chain_sg_buffer_out(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+		} else {
+			pce_dev->ce_out_dst_desc_index++;
+			if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+				return -EIO;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_out(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_out_dst_desc_index++;
+		if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+			return -EIO;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	return 0;
+};
+
+static void _chain_buffer_out_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_out_dst_desc_index = 0;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->len = 0;
+};
+
+static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_out_src_desc;
+	pdesc->len = ADM_DESC_LAST | total;
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	if (ncmd == 1)
+		pcmd->cmd |= CMD_LC;
+	else {
+		dmov_s  *pscmd;
+
+		pcmd->cmd &= ~CMD_LC;
+		pcmd++;
+		pscmd = (dmov_s *)pcmd;
+		pscmd->cmd |= CMD_LC;
+	}
+#ifdef QCE_DEBUG
+	dev_info(pce_dev->pdev, "_ce_out_final %d\n",
+			pce_dev->ce_out_dst_desc_index);
+#endif
+
+};
+
+static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+
+};
+
+static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_sha_complete(pce_dev);
+};
+
+static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+
+static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static int _setup_cmd_template(struct qce_device *pce_dev)
+{
+	dmov_sg *pcmd;
+	dmov_s  *pscmd;
+	struct dmov_desc *pdesc;
+	unsigned char *vaddr;
+
+	/* Divide up the 4K coherent memory */
+	/* 1. ce_in channel 1st command src descriptors, 128 entries */
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 2. ce_in channel 1st command dst descriptor, 1 entry */
+	pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(struct dmov_desc) ;
+
+	/*
+	 * 3. ce_in channel command list of one scatter gather command
+	 *    and one simple command.
+	 */
+	pce_dev->cmd_list_ce_in = vaddr;
+	pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+	/* 4. authentication result. */
+	pce_dev->dig_result = vaddr;
+	pce_dev->phy_dig_result = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + SHA256_DIGESTSIZE;
+
+	/*
+	 * 5. ce_out channel command list of one scatter gather command
+	 *    and one simple command.
+	 */
+	pce_dev->cmd_list_ce_out = vaddr;
+	pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+	/* 6. ce_out channel command src descriptors, 1 entry */
+	pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(struct dmov_desc) ;
+
+	/* 7. ce_out channel command dst descriptors, 128 entries.  */
+	pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 8. pad area. */
+	pce_dev->ce_pad = vaddr;
+	pce_dev->phy_ce_pad = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + ADM_CE_BLOCK_SIZE;
+
+	/* 9. ce_in channel command pointer list.	 */
+	pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+
+	/* 10. ce_ou channel command pointer list. */
+	pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_out =  pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 11. throw away area to store by-pass data from ce_out. */
+	pce_dev->ce_out_ignore = (unsigned char *) vaddr;
+	pce_dev->phy_ce_out_ignore  = pce_dev->coh_pmem
+			+ (vaddr - pce_dev->coh_vmem);
+	pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
+			pce_dev->coh_vmem);  /* at least 1.5 K of space */
+	/*
+	 * The first command of command list ce_in is for the input of
+	 * concurrent operation of encrypt/decrypt or for the input
+	 * of authentication.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	/* swap byte and half word , dst crci ,  scatter gather */
+	pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
+			CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->addr = 0;	/* to be filled in each operation */
+	pdesc->len = 0;		/* to be filled in each operation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
+	pdesc = pce_dev->ce_in_dst_desc;
+	pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+	pdesc->len = 0 | ADM_DESC_LAST;	/* to be filled in each operation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+	/*
+	 * The second command is for the digested data of
+	 * hashing operation only. For others, this command is not used.
+	 */
+	pscmd = (dmov_s *) pcmd;
+	/* last command, swap byte, half word, src crci, single   */
+	pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+	pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+	pscmd->len = SHA256_DIGESTSIZE;	/* to be filled.  */
+	pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)
+					pce_dev->phy_cmd_list_ce_in));
+	pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
+	pce_dev->chan_ce_in_cmd->exec_func = NULL;
+	pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
+	pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(2,
+			pce_dev->crci_in, pce_dev->crci_hash);
+	/*
+	 * The first command in the command list ce_out.
+	 * It is for encry/decryp output.
+	 * If hashing only, ce_out is not used.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	/* swap byte, half word, source crci, scatter gather */
+	pcmd->cmd =   CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_out_src_desc;
+	pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+	pdesc->len = 0;  /* to be filled in each opeation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->addr = 0;  /* to be filled in each opeation */
+	pdesc->len = 0;  /* to be filled in each opeation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+	/*
+	 * The second command is for digested data of esp operation.
+	 * For ciphering, this command is not used.
+	 */
+	pscmd = (dmov_s *) pcmd;
+	/* last command, swap byte, half word, src crci, single   */
+	pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+	pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+	pscmd->len = SHA1_DIGESTSIZE;     /* we only support hmac(sha1) */
+	pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)pce_dev->
+						phy_cmd_list_ce_out));
+
+	pce_dev->chan_ce_out_cmd->user = pce_dev;
+	pce_dev->chan_ce_out_cmd->exec_func = NULL;
+	pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
+	pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(2,
+			pce_dev->crci_out, pce_dev->crci_hash);
+
+
+	return 0;
+};
+
+static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
+{
+
+	if (ce_in)
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_out)
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_in)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
+					pce_dev->chan_ce_in_cmd);
+	if (ce_out)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
+					pce_dev->chan_ce_out_cmd);
+
+	return 0;
+};
+
+static void _f9_complete(struct qce_device *pce_dev)
+{
+	uint32_t mac_i;
+	uint32_t status;
+
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				pce_dev->ota_size, DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+		return;
+	};
+
+	mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
+				pce_dev->chan_ce_in_status);
+};
+
+static void _f8_complete(struct qce_device *pce_dev)
+{
+	uint32_t status;
+
+	if (pce_dev->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+				pce_dev->ota_size, DMA_FROM_DEVICE);
+	if (pce_dev->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+		return;
+	};
+
+	pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
+				pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+};
+
+
+static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_f9_complete(pce_dev);
+};
+
+static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						 result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_f8_complete(pce_dev);
+	}
+};
+
+static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_f8_complete(pce_dev);
+	}
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
+{
+	uint32_t cfg;
+	uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+	_byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
+	writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+	writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+	writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+	writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+	writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	writel_relaxed(req->count_i, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write auth_seg_cfg */
+	writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
+			pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write seg_cfg */
+	cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
+			(1 << CRYPTO_LAST);
+
+	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+		cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
+	else
+		cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
+
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= 1 << CRYPTO_F9_DIRECTION;
+
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+	/*
+	 * barrier to ensure previous instructions
+	 * (including GO) to CE finish before issue DMA transfer
+	 * request.
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size)
+{
+	uint32_t cfg;
+	uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+	if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
+				(req->bearer >= QCE_OTA_MAX_BEARER))
+		return -EINVAL;
+
+	/*  write seg_cfg */
+	cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
+				(1 << CRYPTO_LAST);
+	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
+	else
+		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
+	if (key_stream_mode)
+		cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= 1 << CRYPTO_F8_DIRECTION;
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* write 0 to auth_size, auth_offset */
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write encr_seg_cfg seg_size, seg_offset */
+	writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
+			(cipher_offset & 0xffff),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write keys */
+	_byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
+	writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
+	writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
+	writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
+	writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
+
+	/* write cntr0_iv0 for countC */
+	writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
+
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+			npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+				pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+	/*
+	 * barrier to ensure previous instructions
+	 * (including GO) to CE finish before issue DMA transfer
+	 * request.
+	 */
+	mb();
+	return 0;
+};
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	uint32_t ivsize = crypto_aead_ivsize(aead);
+	uint32_t totallen;
+	uint32_t pad_len;
+	uint32_t authsize = crypto_aead_authsize(aead);
+	int rc = 0;
+
+	q_req->ivsize = ivsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	totallen = q_req->cryptlen + ivsize + areq->assoclen;
+	pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->assoc_nents = 0;
+	pce_dev->phy_iv_in = 0;
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher iv for input                                 */
+	pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+			ivsize, DMA_TO_DEVICE);
+	if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* for output, ignore associated data and cipher iv */
+	if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
+						ivsize + areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher input       */
+	pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output      */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+	_ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+
+	/* set up crypto device */
+	rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+	if (pce_dev->phy_iv_in) {
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+
+	uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
+						- areq->nbytes;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	/* cipher input       */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	if (c_req->use_pmem != 1)
+		dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	else
+		dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
+								areq->src);
+
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output      */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+		if (c_req->use_pmem != 1)
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+		else
+			dma_map_pmem_sg(&c_req->pmem->dst[0],
+					pce_dev->dst_nents, areq->dst);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
+	_ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
+
+#ifdef QCE_DEBUG
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+#endif
+	/* set up crypto device */
+	rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+	if (c_req->use_pmem == 1) {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back_pmem;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back_pmem;
+	} else {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back;
+	}
+	rc = _qce_start_dma(pce_dev, true, true);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (c_req->use_pmem != 1) {
+		if (pce_dev->dst_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->dst,
+				pce_dev->dst_nents, DMA_FROM_DEVICE);
+		}
+		if (pce_dev->src_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->src,
+					pce_dev->src_nents,
+					(areq->src == areq->dst) ?
+						DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+
+	_chain_buffer_in_init(pce_dev);
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+
+	if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+				rc = -ENOMEM;
+				goto bad;
+			}
+	}
+	 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
+
+#ifdef QCE_DEBUG
+	_ce_in_dump(pce_dev);
+#endif
+
+	rc =  _sha_ce_setup(pce_dev, sreq);
+
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+	pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+/*
+ * crypto engine open function.
+ */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	struct resource *resource;
+	struct clk *ce_clk;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		dev_err(&pdev->dev, "Can not allocate memory\n");
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+	ce_clk = clk_get(pce_dev->pdev, "ce_clk");
+	if (IS_ERR(ce_clk)) {
+		*rc = PTR_ERR(ce_clk);
+		return NULL;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing MEM resource\n");
+		goto err;
+	};
+	pce_dev->phy_iobase = resource->start;
+	pce_dev->iobase = ioremap_nocache(resource->start,
+				resource->end - resource->start + 1);
+	if (!pce_dev->iobase) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not map io memory\n");
+		goto err;
+	}
+
+	pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	if (pce_dev->chan_ce_in_cmd == NULL ||
+			pce_dev->chan_ce_out_cmd == NULL) {
+		dev_err(pce_dev->pdev, "Can not allocate memory\n");
+		*rc = -ENOMEM;
+		goto err;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+					"crypto_channels");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
+		goto err;
+	};
+	pce_dev->chan_ce_in = resource->start;
+	pce_dev->chan_ce_out = resource->end;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_in");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
+		goto err;
+	};
+	pce_dev->crci_in = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_out");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
+		goto err;
+	};
+	pce_dev->crci_out = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_hash");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
+		goto err;
+	};
+	pce_dev->crci_hash = resource->start;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
+		goto err;
+	}
+	_setup_cmd_template(pce_dev);
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+	*rc = clk_enable(pce_dev->ce_clk);
+	if (*rc)
+		return NULL;
+
+	if (_init_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		clk_disable(pce_dev->ce_clk);
+		goto err;
+	}
+	*rc = 0;
+	clk_disable(pce_dev->ce_clk);
+
+	pce_dev->err = 0;
+
+	return pce_dev;
+err:
+	if (pce_dev)
+		qce_close(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/*
+ * crypto engine close function.
+ */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
+				pce_dev->coh_pmem);
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+
+	kfree(handle);
+	clk_put(pce_dev->ce_clk);
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	if (pce_dev->hmac == 1)
+		ce_support->sha1_hmac_20 = true;
+	else
+		ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = false;
+	ce_support->cmac  = false;
+	ce_support->aes_key_192 = true;
+	ce_support->aes_xts  = false;
+	ce_support->aes_ccm  = false;
+	ce_support->ota = pce_dev->ota;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
+						req->data_len;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* F8 cipher input       */
+	if (key_stream_mode)
+		pce_dev->phy_ota_src = 0;
+	else {
+		pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+				req->data_len) < 0) {
+			pce_dev->phy_ota_dst = 0;
+			rc =  -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		pce_dev->phy_ota_dst = dst;
+	} else {
+		dst = pce_dev->phy_ota_src;
+		pce_dev->phy_ota_dst = 0;
+	}
+	if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	pce_dev->ota_size = req->data_len;
+
+	/* pad data      */
+	if (pad_len) {
+		if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
+					pce_dev->phy_ce_pad, pad_len) < 0) {
+			rc =  -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc =  -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	if (!key_stream_mode)
+		_ce_in_final(pce_dev, 1, req->data_len + pad_len);
+	_ce_out_final(pce_dev, 1, req->data_len + pad_len);
+
+	/* set up crypto device */
+	rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	if (!key_stream_mode)
+		pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+
+	pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+	rc =  _qce_start_dma(pce_dev, !(key_stream_mode), true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (pce_dev->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	uint32_t pad_len;
+	dma_addr_t dst = 0;
+	int rc = 0;
+
+	total = num_pkt *  req->data_len;
+	pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	/* F8 cipher input       */
+	pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+				total) < 0) {
+		pce_dev->phy_ota_dst = 0;
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		pce_dev->phy_ota_dst = dst;
+	} else {
+		dst = pce_dev->phy_ota_src;
+		pce_dev->phy_ota_dst = 0;
+	}
+	if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
+		rc = -ENOMEM;
+		goto  bad;
+	}
+
+	pce_dev->ota_size = total;
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+					pad_len) < 0) {
+			rc = -ENOMEM;
+			goto  bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto  bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, total + pad_len);
+	_ce_out_final(pce_dev, 1, total + pad_len);
+
+
+	/* set up crypto device */
+	rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size);
+	if (rc)
+		goto bad ;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
+
+	pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	_chain_buffer_in_init(pce_dev);
+	rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
+	if (rc < 0) {
+		rc =  -ENOMEM;
+		goto bad;
+	}
+
+	pce_dev->ota_size = req->msize;
+	if (pad_len) {
+		rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+				pad_len);
+		if (rc < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+	_ce_in_final(pce_dev, 2, req->msize + pad_len);
+	rc = _ce_f9_setup(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+	if (rc == 0)
+		return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Crypto Engine driver");
+MODULE_VERSION("1.11");
+
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
new file mode 100644
index 0000000..7724d67
--- /dev/null
+++ b/drivers/crypto/msm/qce40.c
@@ -0,0 +1,2038 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include "inc/qce.h"
+#include "inc/qcedev.h"
+#include "inc/qcryptohw_40.h"
+
+/* ADM definitions */
+#define LI_SG_CMD  (1 << 31)    /* last index in the scatter gather cmd */
+#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
+#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
+#define ADM_DESC_LAST  (1 << 31)
+
+/* Data xfer between DM and CE in blocks of 16 bytes */
+#define ADM_CE_BLOCK_SIZE  16
+
+#define ADM_DESC_LENGTH_MASK 0xffff
+#define ADM_DESC_LENGTH(x)  (x & ADM_DESC_LENGTH_MASK)
+
+struct dmov_desc {
+	uint32_t addr;
+	uint32_t len;
+};
+
+#define ADM_STATUS_OK 0x80000002
+
+/* Misc definitions */
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+
+/* State of DM channel */
+enum qce_chan_st_enum {
+	QCE_CHAN_STATE_IDLE = 0,
+	QCE_CHAN_STATE_IN_PROG = 1,
+	QCE_CHAN_STATE_COMP = 2,
+	QCE_CHAN_STATE_LAST
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+	struct clk *ce_core_clk;	    /* Handle to CE clk */
+	struct clk *ce_clk;	    /* Handle to CE clk */
+	unsigned int crci_in;	      /* CRCI for CE DM IN Channel   */
+	unsigned int crci_out;	      /* CRCI for CE DM OUT Channel   */
+	unsigned int chan_ce_in;      /* ADM channel used for CE input
+					* and auth result if authentication
+					* only operation. */
+	unsigned int chan_ce_out;     /* ADM channel used for CE output,
+					and icv for esp */
+	unsigned int *cmd_pointer_list_ce_in;
+	dma_addr_t  phy_cmd_pointer_list_ce_in;
+
+	unsigned int *cmd_pointer_list_ce_out;
+	dma_addr_t  phy_cmd_pointer_list_ce_out;
+
+	unsigned char *cmd_list_ce_in;
+	dma_addr_t  phy_cmd_list_ce_in;
+
+	unsigned char *cmd_list_ce_out;
+	dma_addr_t  phy_cmd_list_ce_out;
+
+	struct dmov_desc *ce_out_src_desc;
+	dma_addr_t  phy_ce_out_src_desc;
+
+	struct dmov_desc *ce_out_dst_desc;
+	dma_addr_t  phy_ce_out_dst_desc;
+
+	struct dmov_desc *ce_in_src_desc;
+	dma_addr_t  phy_ce_in_src_desc;
+
+	struct dmov_desc *ce_in_dst_desc;
+	dma_addr_t  phy_ce_in_dst_desc;
+
+	unsigned char *ce_out_ignore;
+	dma_addr_t phy_ce_out_ignore;
+
+	unsigned char *ce_pad;
+	dma_addr_t phy_ce_pad;
+
+	struct msm_dmov_cmd  *chan_ce_in_cmd;
+	struct msm_dmov_cmd  *chan_ce_out_cmd;
+
+	uint32_t ce_out_ignore_size;
+
+	int ce_out_dst_desc_index;
+	int ce_in_dst_desc_index;
+
+	int ce_out_src_desc_index;
+	int ce_in_src_desc_index;
+
+	enum qce_chan_st_enum chan_ce_in_state;		/* chan ce_in state */
+	enum qce_chan_st_enum chan_ce_out_state;	/* chan ce_out state */
+
+	int chan_ce_in_status;		/* chan ce_in status      */
+	int chan_ce_out_status;		/* chan ce_out status */
+
+	unsigned char *dig_result;
+	dma_addr_t phy_dig_result;
+
+	/* cached aes key */
+	uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
+
+	uint32_t cipher_key_size;	/* cached aes key size in bytes */
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int ivsize;
+	int authsize;
+	int src_nents;
+	int dst_nents;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+
+	dma_addr_t phy_iv_in;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
+						struct scatterlist *sg)
+{
+	int i;
+	for (i = 0; i < entries; i++) {
+
+		sg->dma_address = (dma_addr_t)pmem->offset;
+		sg++;
+		pmem++;
+	}
+	return 0;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+	unsigned int rev;
+
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	if (((val & 0xfffffff) != 0x0000042) &&
+			((val & 0xfffffff) != 0x0000040)) {
+		dev_err(pce_dev->pdev,
+				"Unknown Qualcomm crypto device at 0x%x 0x%x\n",
+				pce_dev->phy_iobase, val);
+		return -EIO;
+	};
+	rev = (val & CRYPTO_CORE_REV_MASK);
+	if (rev == 0x42) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 4.2 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else {
+		if (rev == 0x40) {
+			dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 4.0 device found at 0x%x\n",
+							pce_dev->phy_iobase);
+		}
+	}
+
+	dev_info(pce_dev->pdev,
+			"IO base 0x%x, ce_in channel %d, "
+			"ce_out channel %d, "
+			"crci_in %d, crci_out %d\n",
+			(unsigned int) pce_dev->iobase,
+			pce_dev->chan_ce_in, pce_dev->chan_ce_out,
+			pce_dev->crci_in, pce_dev->crci_out);
+
+	pce_dev->cipher_key_size = 0;
+
+	return 0;
+};
+
+static int _init_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+
+	/* Reset ce */
+	clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
+	clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
+	/*
+	 * Ensure previous instruction (any writes to CLK registers)
+	 * to toggle the CLK reset lines was completed.
+	 */
+	dsb();
+	/* configure ce */
+	val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
+			(1 << CRYPTO_MASK_OP_DONE_INTR) |
+					(1 << CRYPTO_MASK_ERR_INTR);
+	writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
+	/*
+	 * Ensure previous instruction (writel_relaxed to config register bit)
+	 * was completed.
+	 */
+	dsb();
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
+	if (!val) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	};
+	if (_probe_ce_engine(pce_dev) < 0)
+		return -EIO;
+	return 0;
+};
+
+static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t auth_cfg = 0;
+	bool sha1 = false;
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC) {
+		uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+		uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			writel_relaxed(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		_byte_stream_to_net_words(authkey32, sreq->authkey,
+						sreq->authklen);
+		for (i = 0; i < authklen32; i++)
+			writel_relaxed(authkey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
+		/*
+		 * write seg_cfg
+		 */
+		auth_cfg |= (1 << CRYPTO_LAST);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
+		auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE);
+		auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
+
+		switch (sreq->authklen) {
+		case AES128_KEY_SIZE:
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+			break;
+		case AES256_KEY_SIZE:
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+					CRYPTO_AUTH_KEY_SIZE);
+			break;
+		default:
+			break;
+		}
+
+		goto go_proc;
+	}
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+				(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+		uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
+
+		_byte_stream_to_net_words(hmackey, sreq->authkey,
+						sreq->authklen);
+		/* write hmac key */
+		for (i = 0; i < hmacklen; i++)
+			writel_relaxed(hmackey[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
+
+		auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
+	} else {
+		auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	for (i = 0; i < 5; i++)
+		writel_relaxed(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		writel_relaxed(auth32[5], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(auth32[6], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(auth32[7], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	for (i = 0; i < 4; i++)
+		writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
+
+	/* write seg_cfg */
+	if (sha1)
+		auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
+	else
+		auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
+
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+
+	auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
+
+go_proc:
+	auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	 /* write seg_cfg */
+	writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* write auth_seg_size */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	dsb();
+
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+	/*
+	 * Ensure previous instructions (setting all the CE registers)
+	 * was completed before writing to GO register
+	 */
+	dsb();
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	dsb();
+
+	return 0;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
+	int aes_key_chg;
+	int i;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+
+	if (creq->mode ==  QCE_MODE_XTS)
+		_byte_stream_to_net_words(enckey32, creq->enckey,
+						creq->encklen/2);
+	else
+		_byte_stream_to_net_words(enckey32, creq->enckey,
+							creq->encklen);
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			writel_relaxed(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write auth key */
+		for (i = 0; i < authklen32; i++)
+			writel_relaxed(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			writel_relaxed(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		auth_cfg |= (1 << CRYPTO_LAST);
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE);
+		}
+		auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
+		writel_relaxed(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT)
+			writel_relaxed(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		else
+			writel_relaxed((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	dsb();
+
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CBC:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_XTS:
+		encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CCM:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CTR:
+	default:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = creq->mode;
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		for (i = 0; i < 6; i++)
+			writel_relaxed(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+
+		encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			_byte_stream_to_net_words(xtskey32, (creq->enckey +
+					creq->encklen/2), creq->encklen/2);
+			for (i = 0; i < xtsklen; i++)
+				writel_relaxed(xtskey32[i], pce_dev->iobase +
+					CRYPTO_ENCR_XTS_KEY0_REG +
+					(i * sizeof(uint32_t)));
+
+				writel_relaxed(creq->cryptlen ,
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_DU_SIZE_REG);
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			for (i = 0; i <= 3; i++)
+				writel_relaxed(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+		}
+		/* set number of counter bits */
+		writel_relaxed(0xffffffff, pce_dev->iobase +
+							CRYPTO_CNTR_MASK_REG);
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			uint32_t key_size;
+
+			if (creq->mode == QCE_MODE_XTS) {
+				key_size = creq->encklen/2;
+				enck_size_in_word = key_size/sizeof(uint32_t);
+			} else {
+				key_size = creq->encklen;
+			}
+
+			switch (key_size) {
+			case AES128_KEY_SIZE:
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES256_KEY_SIZE:
+			default:
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ);
+
+				/* check for null key. If null, use hw key*/
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != 0)
+						break;
+				}
+				if (i == enck_size_in_word)
+					encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
+				break;
+			} /* end of switch (creq->encklen) */
+
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+			if (pce_dev->cipher_key_size !=  creq->encklen)
+				aes_key_chg = 1;
+			else {
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i]
+						!= pce_dev->cipher_key[i])
+						break;
+				}
+				aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
+			}
+
+			if (aes_key_chg) {
+				for (i = 0; i < enck_size_in_word; i++)
+					writel_relaxed(enckey32[i],
+							pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG +
+							(i * sizeof(uint32_t)));
+				pce_dev->cipher_key_size = creq->encklen;
+				for (i = 0; i < enck_size_in_word; i++)
+					pce_dev->cipher_key[i] = enckey32[i];
+			} /*if (aes_key_chg) { */
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+
+	/* write encr seg cfg */
+	writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		writel_relaxed((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	else
+		writel_relaxed(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	/* write encr seg start */
+	writel_relaxed((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+	/* write seg size  */
+	writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+	/*
+	 * Ensure previous instructions (setting all the CE registers)
+	 * was completed before writing to GO register
+	 */
+	dsb();
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	dsb();
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	int i;
+	uint32_t ivsize;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct aead_request *) pce_dev->areq;
+	ivsize = pce_dev->ivsize;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (pce_dev->mode != QCE_MODE_CCM)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+
+	/* get iv out */
+	if ((pce_dev->mode == QCE_MODE_ECB) ||
+					(pce_dev->mode == QCE_MODE_CCM)) {
+		if (pce_dev->mode == QCE_MODE_CCM) {
+			int result;
+			result = readl_relaxed(pce_dev->iobase +
+							CRYPTO_STATUS_REG);
+			result &= (1 << CRYPTO_MAC_FAILED);
+			result |= (pce_dev->chan_ce_in_status |
+						pce_dev->chan_ce_out_status);
+			dsb();
+			pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+								result);
+		} else {
+			pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+					pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+		}
+	} else {
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	};
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+
+	struct ahash_request *areq;
+	uint32_t auth_data[4];
+	uint32_t digest[8];
+	int i;
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+
+	for (i = 0; i < 4; i++)
+		auth_data[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_AUTH_BYTECNT0_REG +
+					i * sizeof(uint32_t)));
+
+	for (i = 0; i < 8; i++)
+		digest[i] = readl_relaxed(pce_dev->iobase +
+			CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
+
+	_net_words_to_byte_stream(digest, pce_dev->dig_result,
+						SHA256_DIGEST_SIZE);
+
+	pce_dev->qce_cb(areq,  pce_dev->dig_result, (unsigned char *)auth_data,
+				pce_dev->chan_ce_in_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		int i;
+
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		int i;
+
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
+			unsigned int plen, unsigned int paddr, int *index)
+{
+	while (plen > 0x8000) {
+		pdesc->len = 0x8000;
+		if (paddr > 0) {
+			pdesc->addr = paddr;
+			paddr += 0x8000;
+		}
+		plen -= pdesc->len;
+		if (plen > 0) {
+			*index = (*index) + 1;
+			if ((*index) >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+		}
+	}
+	if ((plen > 0) && (plen <= 0x8000)) {
+		pdesc->len = plen;
+		if (paddr > 0)
+			pdesc->addr = paddr;
+	}
+
+	return 0;
+}
+
+static int _chain_sg_buffer_in(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	if (nbytes > 0x8000)
+		qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
+				&pce_dev->ce_in_dst_desc_index);
+	else
+		pdesc->len = nbytes;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						sg_dma_address(sg),
+						&pce_dev->ce_in_src_desc_index);
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						pdesc->addr,
+						&pce_dev->ce_in_src_desc_index);
+		} else {
+			pce_dev->ce_in_src_desc_index++;
+			if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						sg_dma_address(sg),
+						&pce_dev->ce_in_src_desc_index);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_in(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_in_src_desc_index++;
+		if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+			return -ENOMEM;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	pdesc->len += nbytes;
+
+	return 0;
+}
+
+static void _chain_buffer_in_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_in_src_desc_index = 0;
+	pce_dev->ce_in_dst_desc_index = 0;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->len = 0;
+}
+
+static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	pcmd->cmd |= CMD_LC;
+}
+
+#ifdef QCE_DEBUG
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
+	for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
+		pdesc = pce_dev->ce_in_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
+	for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_in_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
+	for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
+		pdesc = pce_dev->ce_out_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+
+	dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
+	for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_out_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+};
+
+#else
+
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+};
+
+#endif
+
+static int _chain_sg_buffer_out(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	if (nbytes > 0x8000)
+		qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
+				&pce_dev->ce_out_src_desc_index);
+	else
+		pdesc->len = nbytes;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					sg_dma_address(sg),
+					&pce_dev->ce_out_dst_desc_index);
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					pdesc->addr,
+					&pce_dev->ce_out_dst_desc_index);
+
+		} else {
+			pce_dev->ce_out_dst_desc_index++;
+			if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+				return -EIO;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					sg_dma_address(sg),
+					&pce_dev->ce_out_dst_desc_index);
+
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_out(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_out_dst_desc_index++;
+		if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+			return -EIO;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	pdesc->len += nbytes;
+
+	return 0;
+};
+
+static void _chain_buffer_out_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_out_dst_desc_index = 0;
+	pce_dev->ce_out_src_desc_index = 0;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->len = 0;
+};
+
+static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	pcmd->cmd |= CMD_LC;
+};
+
+static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+
+};
+
+static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_sha_complete(pce_dev);
+};
+
+static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+
+static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static int _setup_cmd_template(struct qce_device *pce_dev)
+{
+	dmov_sg *pcmd;
+	struct dmov_desc *pdesc;
+	unsigned char *vaddr;
+	int i = 0;
+
+	/* Divide up the 4K coherent memory */
+
+	/* 1. ce_in channel 1st command src descriptors, 128 entries */
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 2. ce_in channel 1st command dst descriptor, 1 entry */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 3. ce_in channel command list of one scatter gather command */
+	pce_dev->cmd_list_ce_in = vaddr;
+	pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_sg);
+
+	/* 4. authentication result. */
+	pce_dev->dig_result = vaddr;
+	pce_dev->phy_dig_result = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + SHA256_DIGESTSIZE;
+
+	/* 5. ce_out channel command list of one scatter gather command */
+	pce_dev->cmd_list_ce_out = vaddr;
+	pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_sg);
+
+	/* 6. ce_out channel command src descriptors, 1 entry */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 7. ce_out channel command dst descriptors, 128 entries.  */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 8. pad area. */
+	pce_dev->ce_pad = vaddr;
+	pce_dev->phy_ce_pad = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+
+	/* Padding length is set to twice for worst case scenario in AES-CCM */
+	vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
+
+	/* 9. ce_in channel command pointer list.	 */
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+	pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 10. ce_ou channel command pointer list. */
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+	pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_out =  pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 11. throw away area to store by-pass data from ce_out. */
+	pce_dev->ce_out_ignore = (unsigned char *) vaddr;
+	pce_dev->phy_ce_out_ignore  = pce_dev->coh_pmem
+			+ (vaddr - pce_dev->coh_vmem);
+	pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
+			pce_dev->coh_vmem);  /* at least 1.5 K of space */
+	/*
+	 * The first command of command list ce_in is for the input of
+	 * concurrent operation of encrypt/decrypt or for the input
+	 * of authentication.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	/* swap byte and half word , dst crci ,  scatter gather */
+	pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
+			CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->addr = 0;	/* to be filled in each operation */
+	pdesc->len = 0;		/* to be filled in each operation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
+
+	pdesc = pce_dev->ce_in_dst_desc;
+	for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+		pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+		pdesc->len = 0; /* to be filled in each operation */
+		pdesc++;
+	}
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)
+					pce_dev->phy_cmd_list_ce_in));
+	pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
+	pce_dev->chan_ce_in_cmd->exec_func = NULL;
+	pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
+	pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(1,
+			pce_dev->crci_in);
+
+
+	/*
+	 * The first command in the command list ce_out.
+	 * It is for encry/decryp output.
+	 * If hashing only, ce_out is not used.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	/* swap byte, half word, source crci, scatter gather */
+	pcmd->cmd =   CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
+
+	pdesc = pce_dev->ce_out_src_desc;
+	for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+		pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+		pdesc->len = 0;  /* to be filled in each operation */
+		pdesc++;
+	}
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
+
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->addr = 0;  /* to be filled in each operation */
+	pdesc->len = 0;   /* to be filled in each operation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+
+	pcmd++;
+
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)pce_dev->
+						phy_cmd_list_ce_out));
+
+	pce_dev->chan_ce_out_cmd->user = pce_dev;
+	pce_dev->chan_ce_out_cmd->exec_func = NULL;
+	pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
+	pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(1,
+			pce_dev->crci_out);
+
+	return 0;
+};
+
+static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
+{
+
+	if (ce_in)
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_out)
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_in)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
+					pce_dev->chan_ce_in_cmd);
+	if (ce_out)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
+					pce_dev->chan_ce_out_cmd);
+
+	return 0;
+};
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, totallen_out, out_len;
+	uint32_t pad_len_in, pad_len_out;
+	uint32_t pad_mac_len_out, pad_ptx_len_out;
+	int rc = 0;
+
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		totallen_in = q_req->cryptlen + areq->assoclen;
+		totallen_out = q_req->cryptlen + authsize + areq->assoclen;
+		out_len = areq->cryptlen + authsize;
+		pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
+								totallen_in;
+		pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
+								authsize;
+		pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
+							q_req->cryptlen;
+		pad_len_out = pad_ptx_len_out + pad_mac_len_out;
+		totallen_out += pad_len_out;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		totallen_in = areq->cryptlen + areq->assoclen;
+		totallen_out = q_req->cryptlen + areq->assoclen;
+		out_len = areq->cryptlen - authsize;
+		pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
+							areq->cryptlen;
+		pad_len_out = pad_len_in + authsize;
+		totallen_out += pad_len_out;
+	}
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->assoc_nents = 0;
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	pce_dev->ivsize = q_req->ivsize;
+	pce_dev->authsize = q_req->authsize;
+
+	/* associated data input */
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* pad data in */
+	if (pad_len_in) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len_in) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* ignore associated data */
+	if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
+				areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, out_len);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* pad data out */
+	if (pad_len_out) {
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len_out) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
+	_ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
+
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
+
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+
+	uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
+						- areq->nbytes;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	if (c_req->use_pmem != 1)
+		dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	else
+		dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
+								areq->src);
+
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+		if (c_req->use_pmem != 1)
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+		else
+			dma_map_pmem_sg(&c_req->pmem->dst[0],
+					pce_dev->dst_nents, areq->dst);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, areq->nbytes + pad_len);
+	_ce_out_final(pce_dev, areq->nbytes + pad_len);
+
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+	if (c_req->use_pmem == 1) {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back_pmem;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back_pmem;
+	} else {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back;
+	}
+	rc = _qce_start_dma(pce_dev, true, true);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (c_req->use_pmem != 1) {
+			if (pce_dev->dst_nents) {
+				dma_unmap_sg(pce_dev->pdev, areq->dst,
+				pce_dev->dst_nents, DMA_FROM_DEVICE);
+			}
+		if (pce_dev->src_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->src,
+					pce_dev->src_nents,
+					(areq->src == areq->dst) ?
+						DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+
+	_chain_buffer_in_init(pce_dev);
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+
+	if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+				rc = -ENOMEM;
+				goto bad;
+			}
+	}
+	 _ce_in_final(pce_dev, sreq->size + pad_len);
+
+	_ce_in_dump(pce_dev);
+
+	rc =  _ce_setup_hash(pce_dev, sreq);
+
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+	pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	struct resource *resource;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		dev_err(&pdev->dev, "Can not allocate memory\n");
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing MEM resource\n");
+		goto err_pce_dev;
+	};
+	pce_dev->phy_iobase = resource->start;
+	pce_dev->iobase = ioremap_nocache(resource->start,
+				resource->end - resource->start + 1);
+	if (!pce_dev->iobase) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not map io memory\n");
+		goto err_pce_dev;
+	}
+
+	pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	if (pce_dev->chan_ce_in_cmd == NULL ||
+			pce_dev->chan_ce_out_cmd == NULL) {
+		dev_err(pce_dev->pdev, "Can not allocate memory\n");
+		*rc = -ENOMEM;
+		goto err_dm_chan_cmd;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+					"crypto_channels");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->chan_ce_in = resource->start;
+	pce_dev->chan_ce_out = resource->end;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_in");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->crci_in = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_out");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->crci_out = resource->start;
+
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
+		goto err;
+	}
+
+	/* Get CE core clk */
+	ce_core_clk = clk_get(pce_dev->pdev, "ce_clk");
+	if (IS_ERR(ce_core_clk)) {
+		*rc = PTR_ERR(ce_core_clk);
+		goto err;
+	}
+	pce_dev->ce_core_clk = ce_core_clk;
+	/* Get CE clk */
+	ce_clk = clk_get(pce_dev->pdev, "ce_pclk");
+	if (IS_ERR(ce_clk)) {
+		*rc = PTR_ERR(ce_clk);
+		clk_put(pce_dev->ce_core_clk);
+		goto err;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	/* Enable CE core clk */
+	*rc = clk_enable(pce_dev->ce_core_clk);
+	if (*rc) {
+		clk_put(pce_dev->ce_core_clk);
+		clk_put(pce_dev->ce_clk);
+		goto err;
+	} else {
+		/* Enable CE clk */
+		*rc = clk_enable(pce_dev->ce_clk);
+		if (*rc) {
+			clk_disable(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_clk);
+			goto err;
+
+		}
+	}
+	_setup_cmd_template(pce_dev);
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+	if (_init_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+	return pce_dev;
+
+err:
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
+						pce_dev->coh_pmem);
+err_dm_chan_cmd:
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+err_pce_dev:
+
+	kfree(pce_dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
+						pce_dev->coh_pmem);
+	clk_disable(pce_dev->ce_clk);
+	clk_disable(pce_dev->ce_core_clk);
+
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = false;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	ce_support->aes_ccm = true;
+	ce_support->ota = false;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Crypto Engine driver");
+MODULE_VERSION("2.04");
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 0000000..18eff22
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2095 @@
+/* Qualcomm CE device driver.
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/android_pmem.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <mach/scm.h>
+#include "inc/qcedev.h"
+#include "inc/qce.h"
+
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+enum qcedev_crypto_oper_type {
+  QCEDEV_CRYPTO_OPER_CIPHER	= 0,
+  QCEDEV_CRYPTO_OPER_SHA	= 1,
+  QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_control;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	struct qcedev_sha_ctxt *sha_ctxt;
+	void *cookie;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+	union{
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_control			*podev;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace. */
+
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control{
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	bool ce_locked;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+/*-------------------------------------------------------------------------
+* Resource Locking Service
+* ------------------------------------------------------------------------*/
+#define QCEDEV_CMD_ID				1
+#define QCEDEV_CE_LOCK_CMD			1
+#define QCEDEV_CE_UNLOCK_CMD			0
+#define NUM_RETRY				1000
+#define CE_BUSY					55
+
+static int qcedev_scm_cmd(int resource, int cmd, int *response)
+{
+#ifdef CONFIG_MSM_SCM
+
+	struct {
+		int resource;
+		int cmd;
+	} cmd_buf;
+
+	cmd_buf.resource = resource;
+	cmd_buf.cmd = cmd;
+
+	return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
+		sizeof(cmd_buf), response, sizeof(*response));
+
+#else
+	return 0;
+#endif
+}
+
+static int qcedev_unlock_ce(struct qcedev_control *podev)
+{
+	if ((podev->platform_support.ce_shared) && (podev->ce_locked == true)) {
+		int response = 0;
+
+		if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
+					QCEDEV_CE_UNLOCK_CMD, &response)) {
+			printk(KERN_ERR "%s Failed to release CE lock\n",
+				__func__);
+			return -EUSERS;
+		}
+		podev->ce_locked = false;
+	}
+	return 0;
+}
+
+static int qcedev_lock_ce(struct qcedev_control *podev)
+{
+	if ((podev->platform_support.ce_shared) &&
+					(podev->ce_locked == false)) {
+		int response = -CE_BUSY;
+		int i = 0;
+
+		do {
+			if (qcedev_scm_cmd(
+				podev->platform_support.shared_ce_resource,
+				QCEDEV_CE_LOCK_CMD, &response)) {
+				response = -EINVAL;
+				break;
+			}
+		} while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
+
+		if ((response == -CE_BUSY) && (i >= NUM_RETRY))
+			return -EUSERS;
+		if (response < 0)
+			return -EINVAL;
+
+		podev->ce_locked = true;
+	}
+
+	return 0;
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev,
+			struct qcedev_sha_op_req *sha_op_req);
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qce",
+			.fops = &qcedev_fops,
+		},
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev[MAX_QCE_DEVICE];
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].miscdevice.minor == n)
+			return &qce_dev[i];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		printk(KERN_ERR "%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		printk(KERN_ERR "%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev, &areq->sha_op_req);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+
+	return;
+}
+
+static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	pdev = (struct qcedev_control *) areq->cookie;
+
+	if (digest)
+		memcpy(&areq->sha_ctxt->digest[0], digest, 32);
+
+	if (authdata) {
+		areq->sha_ctxt->auth_data[0] = auth32[0];
+		areq->sha_ctxt->auth_data[1] = auth32[1];
+		areq->sha_ctxt->auth_data[2] = auth32[2];
+		areq->sha_ctxt->auth_data[3] = auth32[3];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	pdev = (struct qcedev_control *) areq->cookie;
+	qcedev_areq = pdev->active_command;
+
+	if (iv)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->podev = podev;
+
+	qcedev_areq->cipher_req.cookie = qcedev_areq->podev;
+	creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
+		creq.pmem = &qcedev_areq->cipher_op_req.pmem;
+	else
+		creq.pmem = NULL;
+
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		break;
+	};
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		break;
+	};
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev,
+			struct qcedev_sha_op_req *sha_op_req)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->podev = podev;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey =
+				&qcedev_areq->sha_op_req.ctxt.authkey[0];
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey =
+				&qcedev_areq->sha_op_req.ctxt.authkey[0];
+
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &qcedev_areq->sha_op_req.ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		break;
+	};
+
+	qcedev_areq->sha_req.cookie = podev;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = sha_op_req->ctxt.auth_data[0];
+		sreq.auth_data[1] = sha_op_req->ctxt.auth_data[1];
+		sreq.auth_data[2] = sha_op_req->ctxt.auth_data[2];
+		sreq.auth_data[3] = sha_op_req->ctxt.auth_data[3];
+		sreq.digest = &sha_op_req->ctxt.digest[0];
+		sreq.first_blk = sha_op_req->ctxt.first_blk;
+		sreq.last_blk = sha_op_req->ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	qcedev_areq->sha_req.sha_ctxt =
+		(struct qcedev_sha_ctxt *)(&sha_op_req->ctxt);
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_control *podev)
+{
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq->err = 0;
+
+	ret = qcedev_lock_ce(podev);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	if (podev->active_command == NULL) {
+		podev->active_command = qcedev_areq;
+		if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev, &qcedev_areq->sha_op_req);
+	} else {
+		list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&qcedev_areq->complete);
+
+	ret = qcedev_unlock_ce(podev);
+	if (ret)
+			qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat[podev->pdev->id];
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		};
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &areq->sha_op_req.ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	struct scatterlist sg_src[2];
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &qcedev_areq->sha_op_req.ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && __copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		qcedev_areq->sha_op_req.ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+
+	k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && __copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		kfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && __copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
+		memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	/* verify address src(s) */
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+			qcedev_areq->sha_op_req.data[i].len))
+			return -EFAULT;
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+		/* save the original req structure */
+		saved_req =
+			kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+		if (saved_req == NULL) {
+			printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
+			__func__, (uint32_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+		memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+									podev);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+									podev);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		kfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, podev);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 1;
+
+	total = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
+
+	if (total) {
+		k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+					GFP_KERNEL);
+		if (k_buf_src == NULL)
+			return -ENOMEM;
+
+		k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+		memcpy(k_align_src,
+				&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+				total);
+	}
+	qcedev_areq->sha_op_req.ctxt.last_blk = 1;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = 0;
+	memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	struct scatterlist sg_src[2];
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	/* verify address src(s) */
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+			qcedev_areq->sha_op_req.data[i].len))
+			return -EFAULT;
+
+	/* Verify Source Address */
+	if (!access_ok(VERIFY_READ,
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+			return -EFAULT;
+	if (__copy_from_user(&qcedev_areq->sha_op_req.ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+
+	k_buf_src = kmalloc(total, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && __copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	qcedev_areq->sha_op_req.ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, podev);
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_control *podev)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		/* Verify Source Address */
+		if (!access_ok(VERIFY_READ,
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+		if (__copy_from_user(&areq->sha_op_req.ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, podev);
+		err = qcedev_sha_update(&authkey_areq, podev);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, podev);
+		else
+			return err;
+		memcpy(&areq->sha_op_req.ctxt.authkey[0],
+				&authkey_areq.sha_op_req.ctxt.digest[0],
+				authkey_areq.sha_op_req.ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL)
+		return -ENOMEM;
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+			qcedev_areq->sha_op_req.ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0,
+							sha_block_size);
+	memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+					&qcedev_areq->sha_op_req.ctxt.digest[0],
+					sha_digest_size);
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = sha_digest_size;
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 1;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		qcedev_areq->sha_op_req.ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		qcedev_areq->sha_op_req.ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	kfree(k_src);
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_control *podev, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&areq->sha_op_req.ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		areq->sha_op_req.ctxt.trailing_buf[i] =
+				(areq->sha_op_req.ctxt.authkey[i] ^ constant);
+
+	areq->sha_op_req.ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	int err;
+
+	qcedev_sha_init(areq, podev);
+	err = qcedev_set_hmac_auth_key(areq, podev);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, podev, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	int err;
+
+	err = qcedev_sha_final(areq, podev);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, podev, false);
+	err = qcedev_hmac_get_ohash(areq, podev);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, podev);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, podev);
+	else
+		return qcedev_hmac_init(areq, podev);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	return qcedev_sha_update(qcedev_areq, podev);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, podev);
+	else
+		return qcedev_hmac_final(areq, podev);
+}
+
+static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+						struct qcedev_control *podev)
+{
+	int i = 0;
+	int err = 0;
+	struct scatterlist *sg_src = NULL;
+	struct scatterlist *sg_dst = NULL;
+	struct scatterlist *sg_ndex = NULL;
+	struct file *file_src = NULL;
+	struct file *file_dst = NULL;
+	unsigned long paddr;
+	unsigned long kvaddr;
+	unsigned long len;
+
+	sg_src = kmalloc((sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries),	GFP_KERNEL);
+	if (sg_src == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory:s g_src 0x%x\n",
+			__func__, (uint32_t)sg_src);
+		return -ENOMEM;
+
+	}
+	memset(sg_src, 0, (sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries));
+	sg_ndex = sg_src;
+	areq->cipher_req.creq.src = sg_src;
+
+	/* address src */
+	get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
+					&kvaddr, &len, &file_src);
+
+	for (i = 0; i < areq->cipher_op_req.entries; i++) {
+		sg_set_buf(sg_ndex,
+		((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
+		areq->cipher_op_req.pmem.src[i].len);
+		sg_ndex++;
+	}
+	sg_mark_end(--sg_ndex);
+
+	for (i = 0; i < areq->cipher_op_req.entries; i++)
+		areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
+
+	/* address dst */
+	/* If not place encryption/decryption */
+	if (areq->cipher_op_req.in_place_op != 1) {
+		sg_dst = kmalloc((sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries), GFP_KERNEL);
+		if (sg_dst == NULL)
+			return -ENOMEM;
+		memset(sg_dst, 0, (sizeof(struct scatterlist) *
+					areq->cipher_op_req.entries));
+		areq->cipher_req.creq.dst = sg_dst;
+		sg_ndex = sg_dst;
+
+		get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
+					&kvaddr, &len, &file_dst);
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			sg_set_buf(sg_ndex++,
+			((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
+			+ kvaddr), areq->cipher_op_req.pmem.dst[i].len);
+		sg_mark_end(--sg_ndex);
+
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			areq->cipher_op_req.pmem.dst[i].offset +=
+							(uint32_t)paddr;
+	} else {
+		areq->cipher_req.creq.dst = sg_src;
+		for (i = 0; i < areq->cipher_op_req.entries; i++) {
+			areq->cipher_op_req.pmem.dst[i].offset =
+				areq->cipher_op_req.pmem.src[i].offset;
+			areq->cipher_op_req.pmem.dst[i].len =
+				areq->cipher_op_req.pmem.src[i].len;
+		}
+	}
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+
+	err = submit_req(areq, podev);
+
+	kfree(sg_src);
+	kfree(sg_dst);
+
+	if (file_dst)
+		put_pmem_file(file_dst);
+	if (file_src)
+		put_pmem_file(file_src);
+
+	return err;
+};
+
+
+static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
+						struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	struct qcedev_cipher_op_req *saved_req;
+	struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
+		__func__, (uint32_t)saved_req);
+		return -ENOMEM;
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
+				creq->pmem.src[0].len =	QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					creq->pmem.src[0].offset =
+						creq->pmem.src[i].offset;
+				}
+
+				creq->data_len = QCE_MAX_OPER_DATA;
+				creq->entries = 1;
+
+				err =
+				qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+								podev);
+
+				creq->pmem.src[i].len =	req.pmem.src[i].len -
+							QCE_MAX_OPER_DATA;
+				creq->pmem.src[i].offset =
+						req.pmem.src[i].offset +
+						QCE_MAX_OPER_DATA;
+				req.pmem.src[i].offset =
+						creq->pmem.src[i].offset;
+				req.pmem.src[i].len = creq->pmem.src[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->pmem.src[j].len)
+							>= QCE_MAX_OPER_DATA) {
+						creq->pmem.src[j].len =
+						QCE_MAX_OPER_DATA - total;
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += creq->pmem.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->pmem.src[k].len =
+						creq->pmem.src[i+k].len;
+						creq->pmem.src[k].offset =
+						creq->pmem.src[i+k].offset;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err =
+				qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+								podev);
+				num_entries = 0;
+
+					creq->pmem.src[i].offset =
+						req.pmem.src[i].offset +
+						creq->pmem.src[i].len;
+					creq->pmem.src[i].len =
+						req.pmem.src[i].len -
+						creq->pmem.src[i].len;
+					req.pmem.src[i].offset =
+						creq->pmem.src[i].offset;
+					req.pmem.src[i].len =
+						creq->pmem.src[i].len;
+
+				if (creq->pmem.src[i].len == 0)
+					i++;
+			}
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+	} else
+		err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, podev);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->pmem.src[i].len = saved_req->pmem.src[i].len;
+		creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	kfree(saved_req);
+
+	return err;
+
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_control *podev,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && __copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src =
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && __copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_set_buf(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+	sg_mark_end(areq->cipher_req.creq.src);
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, podev);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && __copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len))
+					return -EFAULT;
+
+			k_align_dst += creq->vbuf.dst[dst_i].len +
+						byteoffset;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+				if (err == 0 && __copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+				(k_align_dst + byteoffset),
+				creq->data_len))
+					return -EFAULT;
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_control *podev)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	/* Verify Source Address's */
+	for (i = 0; i < areq->cipher_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
+					areq->cipher_op_req.vbuf.src[i].len))
+			return -EFAULT;
+
+	/* Verify Destination Address's */
+	if (areq->cipher_op_req.in_place_op != 1)
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			if (!access_ok(VERIFY_READ,
+			(void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
+					areq->cipher_op_req.vbuf.dst[i].len))
+				return -EFAULT;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory: k_buf_src 0x%x\n",
+			__func__, (uint32_t)k_buf_src);
+		return -ENOMEM;
+	}
+	k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory:saved_req 0x%x\n",
+			__func__, (uint32_t)saved_req);
+		kfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, podev, k_align_src);
+				if (err < 0) {
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, podev, k_align_src);
+				if (err < 0) {
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, podev,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	kfree(saved_req);
+	kfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	if ((req->entries == 0) || (req->data_len == 0))
+		goto error;
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST))
+		goto error;
+	if (req->alg == QCEDEV_ALG_AES) {
+		if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+					(!podev->ce_support.aes_xts))
+			goto error;
+		/* if intending to use HW key make sure key fields are set
+		 * correctly and HW key is indeed supported in target
+		 */
+		if (req->encklen == 0) {
+			int i;
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
+				if (req->enckey[i])
+					goto error;
+			if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+				(req->op != QCEDEV_OPER_DEC_NO_KEY))
+				if (!podev->platform_support.hw_key_support)
+					goto error;
+		} else {
+			if (req->encklen == QCEDEV_AES_KEY_192) {
+				if (!podev->ce_support.aes_key_192)
+					goto error;
+			} else {
+				/* if not using HW key make sure key
+				 * length is valid
+				 */
+				if (!((req->encklen == QCEDEV_AES_KEY_128) ||
+					(req->encklen == QCEDEV_AES_KEY_256)))
+					goto error;
+			}
+		}
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR)
+			goto error;
+		else { /* if using CTR mode make sure not using Pmem */
+			if (req->use_pmem)
+				goto error;
+		}
+	}
+	/* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
+	if (req->use_pmem) {
+		if (!req->in_place_op)
+			goto error;
+	}
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen != 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB))
+			goto error;
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB))
+			goto error;
+	}
+
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac))
+		goto sha_error;
+
+	if ((req->entries == 0) || (req->data_len == 0))
+		goto sha_error;
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
+		goto sha_error;
+
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_control *podev;
+	struct qcedev_async_req qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		printk(KERN_ERR "%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&qcedev_areq.complete);
+	pstat = &_qcedev_stat[podev->pdev->id];
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_LOCK_CE:
+		err = qcedev_lock_ce(podev);
+		break;
+	case QCEDEV_IOCTL_UNLOCK_CE:
+		err = qcedev_unlock_ce(podev);
+		break;
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+				podev))
+			return -EINVAL;
+
+		if (qcedev_areq.cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
+			err = qcedev_pmem_ablk_cipher(&qcedev_areq, podev);
+		else
+			err = qcedev_vbuf_ablk_cipher(&qcedev_areq, podev);
+		if (err)
+			return err;
+		if (__copy_to_user((void __user *)arg,
+					&qcedev_areq.cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req)))
+				return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(&qcedev_areq, podev);
+		if (err)
+			return err;
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+				return -EFAULT;
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac)
+			return -ENOTTY;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(&qcedev_areq, podev);
+			if (err)
+				return err;
+		} else {
+			err = qcedev_hash_update(&qcedev_areq, podev);
+			if (err)
+				return err;
+		}
+
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(&qcedev_areq, podev);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =
+				qcedev_areq.sha_op_req.ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(&qcedev_areq, podev);
+		err = qcedev_hash_update(&qcedev_areq, podev);
+		if (err)
+			return err;
+		err = qcedev_hash_final(&qcedev_areq, podev);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =
+				qcedev_areq.sha_op_req.ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	if (pdev->id >= MAX_QCE_DEVICE) {
+		printk(KERN_ERR "%s: device id %d  exceeds allowed %d\n",
+			__func__, pdev->id, MAX_QCE_DEVICE);
+		return -ENOENT;
+	}
+	podev = &qce_dev[pdev->id];
+
+	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
+	podev->platform_support.ce_shared = platform_support->ce_shared;
+	podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+	podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+	podev->ce_locked = false;
+
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+	qce_hw_support(podev->qce, &podev->ce_support);
+	rc = misc_register(&podev->miscdevice);
+
+	if (rc >= 0)
+		return 0;
+
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.driver = {
+		.name = "qce",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcedev = *((int *) file->private_data);
+
+	memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcedev[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcedev[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcedev debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	int rc;
+
+	rc = _qcedev_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
+MODULE_VERSION("1.20");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 0000000..c4fd64b
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,3274 @@
+/* Qualcomm Crypto driver
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include <mach/scm.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include "inc/qce.h"
+
+
+#define MAX_CRYPTO_DEVICE 3
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct crypto_stat {
+	u32 aead_sha1_aes_enc;
+	u32 aead_sha1_aes_dec;
+	u32 aead_sha1_des_enc;
+	u32 aead_sha1_des_dec;
+	u32 aead_sha1_3des_enc;
+	u32 aead_sha1_3des_dec;
+	u32 aead_op_success;
+	u32 aead_op_fail;
+	u32 ablk_cipher_aes_enc;
+	u32 ablk_cipher_aes_dec;
+	u32 ablk_cipher_des_enc;
+	u32 ablk_cipher_des_dec;
+	u32 ablk_cipher_3des_enc;
+	u32 ablk_cipher_3des_dec;
+	u32 ablk_cipher_op_success;
+	u32 ablk_cipher_op_fail;
+	u32 sha1_digest;
+	u32 sha256_digest;
+	u32 sha_op_success;
+	u32 sha_op_fail;
+	u32 sha1_hmac_digest;
+	u32 sha256_hmac_digest;
+	u32 sha_hmac_op_success;
+	u32 sha_hmac_op_fail;
+};
+static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+	/* the lock protects queue and req*/
+	spinlock_t lock;
+
+	/* qce handle */
+	void *qce;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	/* current active request */
+	struct crypto_async_request *req;
+	int res;
+
+	/* request queue */
+	struct crypto_queue queue;
+
+	uint32_t ce_lock_count;
+
+	struct work_struct unlock_ce_ws;
+
+	struct tasklet_struct done_tasklet;
+};
+
+
+/*-------------------------------------------------------------------------
+* Resource Locking Service
+* ------------------------------------------------------------------------*/
+#define QCRYPTO_CMD_ID				1
+#define QCRYPTO_CE_LOCK_CMD			1
+#define QCRYPTO_CE_UNLOCK_CMD			0
+#define NUM_RETRY				1000
+#define CE_BUSY				        55
+
+static int qcrypto_scm_cmd(int resource, int cmd, int *response)
+{
+#ifdef CONFIG_MSM_SCM
+
+	struct {
+		int resource;
+		int cmd;
+	} cmd_buf;
+
+	cmd_buf.resource = resource;
+	cmd_buf.cmd = cmd;
+
+	return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf,
+		sizeof(cmd_buf), response, sizeof(*response));
+
+#else
+	return 0;
+#endif
+}
+
+static void qcrypto_unlock_ce(struct work_struct *work)
+{
+	int response = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							unlock_ce_ws);
+	if (cp->ce_lock_count == 1)
+		BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource,
+				QCRYPTO_CE_UNLOCK_CMD, &response) != 0);
+	spin_lock_irqsave(&cp->lock, flags);
+	cp->ce_lock_count--;
+	spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static int qcrypto_lock_ce(struct crypto_priv *cp)
+{
+	unsigned long flags;
+	int response = -CE_BUSY;
+	int i = 0;
+
+	if (cp->ce_lock_count == 0) {
+		do {
+			if (qcrypto_scm_cmd(
+				cp->platform_support.shared_ce_resource,
+				QCRYPTO_CE_LOCK_CMD, &response)) {
+				response = -EINVAL;
+				break;
+			}
+		} while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
+
+		if ((response == -CE_BUSY) && (i >= NUM_RETRY))
+			return -EUSERS;
+		if (response < 0)
+			return -EINVAL;
+	}
+	spin_lock_irqsave(&cp->lock, flags);
+	cp->ce_lock_count++;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+
+	return 0;
+}
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct crypto_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+struct qcrypto_cipher_ctx {
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	struct crypto_priv *cp;
+};
+
+struct qcrypto_cipher_req_ctx {
+	u8 *iv;
+	unsigned int ivsize;
+	int  aead;
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *assoc;		/* Pointer to formatted assoc data */
+	unsigned int assoclen;		/* Save Unformatted assoc data length */
+	struct scatterlist *assoc_sg;	/* Save Unformatted assoc data sg */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	enum qce_hash_alg_enum  alg;
+	uint32_t		byte_count[4];
+	uint8_t			digest[SHA_MAX_DIGEST_SIZE];
+	uint32_t		diglen;
+	uint8_t			*tmp_tbuf;
+	uint8_t			*trailing_buf;
+	uint8_t			*in_buf;
+	uint32_t		authkey_in_len;
+	uint32_t		trailing_buf_len;
+	uint8_t			first_blk;
+	uint8_t			last_blk;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+	struct scatterlist *sg;
+	struct scatterlist tmp_sg;
+	struct crypto_priv *cp;
+};
+
+struct qcrypto_sha_req_ctx {
+	union {
+		struct sha1_state sha1_state_ctx;
+		struct sha256_state sha256_state_ctx;
+	};
+	struct scatterlist *src;
+	uint32_t nbytes;
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void _start_qcrypto_process(struct crypto_priv *cp);
+
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct crypto_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+
+	return 0;
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->sg = NULL;
+	sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
+					SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
+	if (sha_ctx->tmp_tbuf == NULL) {
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
+			PTR_ERR(sha_ctx->tmp_tbuf));
+		return -ENOMEM;
+	}
+
+	sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
+	if (sha_ctx->trailing_buf == NULL) {
+		kfree(sha_ctx->tmp_tbuf);
+		sha_ctx->tmp_tbuf = NULL;
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
+			PTR_ERR(sha_ctx->trailing_buf));
+		return -ENOMEM;
+	}
+
+	sha_ctx->ahash_req = NULL;
+	return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	kfree(sha_ctx->tmp_tbuf);
+	sha_ctx->tmp_tbuf = NULL;
+	kfree(sha_ctx->trailing_buf);
+	sha_ctx->trailing_buf = NULL;
+	if (sha_ctx->sg != NULL) {
+		kfree(sha_ctx->sg);
+		sha_ctx->sg = NULL;
+	}
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcrypto_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm crypto accelerator %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK AES CIPHER encryption   : %d\n",
+					pstat->ablk_cipher_aes_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK AES CIPHER decryption   : %d\n",
+					pstat->ablk_cipher_aes_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK DES CIPHER encryption   : %d\n",
+					pstat->ablk_cipher_des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK DES CIPHER decryption   : %d\n",
+					pstat->ablk_cipher_des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK 3DES CIPHER encryption  : %d\n",
+					pstat->ablk_cipher_3des_enc);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK 3DES CIPHER decryption  : %d\n",
+					pstat->ablk_cipher_3des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation success: %d\n",
+					pstat->ablk_cipher_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation fail   : %d\n",
+					pstat->ablk_cipher_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption      : %d\n",
+					pstat->aead_sha1_aes_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption      : %d\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption      : %d\n",
+					pstat->aead_sha1_des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption      : %d\n",
+					pstat->aead_sha1_des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption     : %d\n",
+					pstat->aead_sha1_3des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption     : %d\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success       : %d\n",
+					pstat->aead_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail          : %d\n",
+					pstat->aead_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA1 digest			 : %d\n",
+					pstat->sha1_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA256 digest		 : %d\n",
+					pstat->sha256_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA  operation fail          : %d\n",
+					pstat->sha_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA  operation success          : %d\n",
+					pstat->sha_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA1 HMAC digest			 : %d\n",
+					pstat->sha1_hmac_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA256 HMAC digest		 : %d\n",
+					pstat->sha256_hmac_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA HMAC operation fail          : %d\n",
+					pstat->sha_hmac_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA HMAC operation success          : %d\n",
+					pstat->sha_hmac_op_success);
+	return len;
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+
+	cp = platform_get_drvdata(pdev);
+
+	if (!cp)
+		return 0;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_alg(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		list_del(&q_alg->entry);
+		kfree(q_alg);
+	}
+
+	if (cp->qce)
+		qce_close(cp->qce);
+	tasklet_kill(&cp->done_tasklet);
+	kfree(cp);
+	return 0;
+};
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret = des_ekey(tmp, key);
+
+	if (len != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static void req_done(unsigned long data)
+{
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp = (struct crypto_priv *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	areq = cp->req;
+	cp->req = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (areq)
+		areq->complete(areq, cp->res);
+	_start_qcrypto_process(cp);
+};
+
+static void _update_sha1_ctx(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+
+	if (sha_ctx->last_blk == 1)
+		memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
+	else {
+		memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
+		memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
+						sha_ctx->trailing_buf_len);
+		_byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
+					SHA1_DIGEST_SIZE);
+	}
+	return;
+}
+
+static void _update_sha256_ctx(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+
+	if (sha_ctx->last_blk == 1)
+		memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
+	else {
+		memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
+		memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
+						sha_ctx->trailing_buf_len);
+		_byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
+					SHA256_DIGEST_SIZE);
+	}
+	return;
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+				areq, ret);
+#endif
+
+	if (digest) {
+		memcpy(sha_ctx->digest, digest, diglen);
+		memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		sha_ctx->byte_count[0] = auth32[0];
+		sha_ctx->byte_count[1] = auth32[1];
+		sha_ctx->byte_count[2] = auth32[2];
+		sha_ctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	if (sha_ctx->sg != NULL) {
+		kfree(sha_ctx->sg);
+		sha_ctx->sg = NULL;
+	}
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		_update_sha1_ctx(areq);
+	if (sha_ctx->alg == QCE_HASH_SHA256)
+		_update_sha256_ctx(areq);
+
+	sha_ctx->last_blk = 0;
+	sha_ctx->first_blk = 0;
+
+	if (ret) {
+		cp->res = -ENXIO;
+		pstat->sha_op_fail++;
+	} else {
+		cp->res = 0;
+		pstat->sha_op_success++;
+	}
+
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+	if (ret) {
+		cp->res = -ENXIO;
+		pstat->ablk_cipher_op_fail++;
+	} else {
+		cp->res = 0;
+		pstat->ablk_cipher_op_success++;
+	}
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+};
+
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kzfree(rctx->assoc);
+		areq->assoc = rctx->assoc_sg;
+		areq->assoclen = rctx->assoclen;
+		if (ret) {
+			if (ret == 0x2000000)
+				ret = -EBADMSG;
+			else
+				ret = -ENXIO;
+		}
+	} else {
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE];
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->cryptlen -
+					ctx->authsize, ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, crypto_aead_ivsize(aead));
+	}
+
+	if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+	struct aead_request *areq = (struct aead_request *) qreq->areq;
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (areq->assoclen)
+		qreq->nonce[0] |= 64;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+						struct scatterlist *sg)
+{
+	unsigned char *adata;
+	uint32_t len, l;
+
+	qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA));
+	if (!qreq->assoc) {
+		pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
+				PTR_ERR(qreq->assoc));
+		return -ENOMEM;
+	}
+	adata = qreq->assoc;
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+			if ((alen >= 65280) && (alen <= 0xffffffff)) {
+				*(__be16 *)adata = cpu_to_be16(0xfffe);
+				*(__be32 *)&adata[2] = cpu_to_be32(alen);
+				len = 6;
+		} else {
+				*(__be16 *)adata = cpu_to_be16(0xffff);
+				*(__be32 *)&adata[6] = cpu_to_be32(alen);
+				len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+	for (l = alen; l > 0; sg = sg_next(sg)) {
+		memcpy(adata, sg_virt(sg), sg->length);
+		l -= sg->length;
+		adata += sg->length;
+	}
+	return 0;
+}
+
+static void _start_qcrypto_process(struct crypto_priv *cp)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog = NULL;
+	unsigned long flags;
+	u32 type;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->req == NULL) {
+		backlog = crypto_get_backlog(&cp->queue);
+		async_req = crypto_dequeue_request(&cp->queue);
+		cp->req = async_req;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (!async_req)
+		return;
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+	type = crypto_tfm_alg_type(async_req->tfm);
+
+	if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
+		struct ablkcipher_request *req;
+		struct crypto_ablkcipher *tfm;
+
+		req = container_of(async_req, struct ablkcipher_request, base);
+		cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+		rctx = ablkcipher_request_ctx(req);
+		tfm = crypto_ablkcipher_reqtfm(req);
+
+		qreq.op = QCE_REQ_ABLK_CIPHER;
+		qreq.qce_cb = _qce_ablk_cipher_complete;
+		qreq.areq = req;
+		qreq.alg = rctx->alg;
+		qreq.dir = rctx->dir;
+		qreq.mode = rctx->mode;
+		qreq.enckey = cipher_ctx->enc_key;
+		qreq.encklen = cipher_ctx->enc_key_len;
+		qreq.iv = req->info;
+		qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+		qreq.cryptlen = req->nbytes;
+		qreq.use_pmem = 0;
+
+		if ((cipher_ctx->enc_key_len == 0) &&
+				(cp->platform_support.hw_key_support == 0))
+			ret = -EINVAL;
+		else
+			ret =  qce_ablk_cipher_req(cp->qce, &qreq);
+	} else {
+		if (type == CRYPTO_ALG_TYPE_AHASH) {
+
+			struct ahash_request *req;
+			struct qce_sha_req sreq;
+
+			req = container_of(async_req,
+						struct ahash_request, base);
+			sha_ctx = crypto_tfm_ctx(async_req->tfm);
+
+			sreq.qce_cb = _qce_ahash_complete;
+			sreq.digest =  &sha_ctx->digest[0];
+			sreq.src = req->src;
+			sreq.auth_data[0] = sha_ctx->byte_count[0];
+			sreq.auth_data[1] = sha_ctx->byte_count[1];
+			sreq.auth_data[2] = sha_ctx->byte_count[2];
+			sreq.auth_data[3] = sha_ctx->byte_count[3];
+			sreq.first_blk = sha_ctx->first_blk;
+			sreq.last_blk = sha_ctx->last_blk;
+			sreq.size = req->nbytes;
+			sreq.areq = req;
+
+			switch (sha_ctx->alg) {
+			case QCE_HASH_SHA1:
+				sreq.alg = QCE_HASH_SHA1;
+				sreq.authkey = NULL;
+				break;
+			case QCE_HASH_SHA256:
+				sreq.alg = QCE_HASH_SHA256;
+				sreq.authkey = NULL;
+				break;
+			case QCE_HASH_SHA1_HMAC:
+				sreq.alg = QCE_HASH_SHA1_HMAC;
+				sreq.authkey = &sha_ctx->authkey[0];
+				break;
+			case QCE_HASH_SHA256_HMAC:
+				sreq.alg = QCE_HASH_SHA256_HMAC;
+				sreq.authkey = &sha_ctx->authkey[0];
+				break;
+			default:
+				break;
+			};
+			ret =  qce_process_sha_req(cp->qce, &sreq);
+
+		} else {
+			struct aead_request *req = container_of(async_req,
+						struct aead_request, base);
+			struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+			rctx = aead_request_ctx(req);
+			cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+			qreq.op = QCE_REQ_AEAD;
+			qreq.qce_cb = _qce_aead_complete;
+
+			qreq.areq = req;
+			qreq.alg = rctx->alg;
+			qreq.dir = rctx->dir;
+			qreq.mode = rctx->mode;
+			qreq.iv = rctx->iv;
+
+			qreq.enckey = cipher_ctx->enc_key;
+			qreq.encklen = cipher_ctx->enc_key_len;
+			qreq.authkey = cipher_ctx->auth_key;
+			qreq.authklen = cipher_ctx->auth_key_len;
+			qreq.authsize = crypto_aead_authsize(aead);
+			qreq.ivsize =  crypto_aead_ivsize(aead);
+			if (qreq.mode == QCE_MODE_CCM) {
+				if (qreq.dir == QCE_ENCRYPT)
+					qreq.cryptlen = req->cryptlen;
+				else
+					qreq.cryptlen = req->cryptlen -
+								qreq.authsize;
+				/* Get NONCE */
+				ret = qccrypto_set_aead_ccm_nonce(&qreq);
+				if (ret)
+					goto done;
+				/* Format Associated data    */
+				ret = qcrypto_aead_ccm_format_adata(&qreq,
+								req->assoclen,
+								req->assoc);
+				if (ret)
+					goto done;
+				/*
+				 * Save the original associated data
+				 * length and sg
+				 */
+				rctx->assoc_sg  = req->assoc;
+				rctx->assoclen  = req->assoclen;
+				rctx->assoc  = qreq.assoc;
+				/*
+				 * update req with new formatted associated
+				 * data info
+				 */
+				req->assoc = &rctx->asg;
+				req->assoclen = qreq.assoclen;
+				sg_set_buf(req->assoc, qreq.assoc,
+							req->assoclen);
+				sg_mark_end(req->assoc);
+			}
+			ret =  qce_aead_req(cp->qce, &qreq);
+		}
+	};
+done:
+	if (ret) {
+
+		spin_lock_irqsave(&cp->lock, flags);
+		cp->req = NULL;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+			pstat->ablk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->sha_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		async_req->complete(async_req, ret);
+		goto again;
+	};
+};
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	if (cp->platform_support.ce_shared) {
+		ret = qcrypto_lock_ce(cp);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+	ret = crypto_enqueue_request(&cp->queue, req);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp);
+
+	return ret;
+}
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		ctx->enc_key_len = 0;
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+#ifdef QCRYPTO_AEAD_AES_CTR
+static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+};
+#endif /* QCRYPTO_AEAD_AES_CTR */
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_des_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_3des_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static int _sha_init(struct qcrypto_sha_ctx *ctx)
+{
+	ctx->first_blk = 1;
+	ctx->last_blk = 0;
+	ctx->byte_count[0] = 0;
+	ctx->byte_count[1] = 0;
+	ctx->byte_count[2] = 0;
+	ctx->byte_count[3] = 0;
+	ctx->trailing_buf_len = 0;
+
+	return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	_sha_init(sha_ctx);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	_update_sha1_ctx(req);
+
+	pstat->sha1_digest++;
+	return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	_sha_init(sha_ctx);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	_update_sha256_ctx(req);
+
+	pstat->sha256_digest++;
+	return 0;
+};
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = sha_state_ctx->count;
+	memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
+	memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+
+	sha_state_ctx->count = in_ctx->count;
+	memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
+	memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+
+	sha_ctx->byte_count[0] =  (uint32_t)(in_ctx->count & 0xFFFFFFC0);
+	sha_ctx->byte_count[1] =  (uint32_t)(in_ctx->count >> 32);
+	_words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
+
+	sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+
+	if (!(in_ctx->count))
+		sha_ctx->first_blk = 1;
+	else
+		sha_ctx->first_blk = 0;
+
+	return 0;
+}
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = sha_state_ctx->count;
+	memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
+	memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+
+	sha_state_ctx->count = in_ctx->count;
+	memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
+	memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+	memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	sha_ctx->byte_count[0] =  (uint32_t)(in_ctx->count & 0xFFFFFFC0);
+	sha_ctx->byte_count[1] =  (uint32_t)(in_ctx->count >> 32);
+	_words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
+
+	sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+	if (!(in_ctx->count))
+		sha_ctx->first_blk = 1;
+	else
+		sha_ctx->first_blk = 0;
+
+	return 0;
+}
+
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, i, num_sg;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t end_src = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes, index = 0;
+	uint32_t saved_length = 0;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + sha_ctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		i = 0;
+
+		k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
+		while (len > 0) {
+			memcpy(k_src, sg_virt(&req->src[i]),
+							req->src[i].length);
+			len -= req->src[i].length;
+			k_src += req->src[i].length;
+			i++;
+		}
+		sha_ctx->trailing_buf_len = total;
+		if (sha_ctx->alg == QCE_HASH_SHA1)
+			_update_sha1_ctx(req);
+		if (sha_ctx->alg == QCE_HASH_SHA256)
+			_update_sha256_ctx(req);
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	k_src = &sha_ctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = sha_ctx->trailing_buf_len;
+	i = 0;
+
+	while (len < nbytes) {
+		if ((len + req->src[i].length) > nbytes)
+			break;
+		len += req->src[i].length;
+		i++;
+	}
+
+	end_src = i;
+	if (len < nbytes) {
+		uint32_t remnant = (nbytes - len);
+		memcpy(k_src, (sg_virt(&req->src[i]) + remnant),
+				(req->src[i].length - remnant));
+		k_src += (req->src[i].length - remnant);
+		saved_length = req->src[i].length;
+		index = i;
+		req->src[i].length = remnant;
+		i++;
+	}
+
+	while (i < num_sg) {
+		memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length);
+		k_src += req->src[i].length;
+		i++;
+	}
+
+	if (sha_ctx->trailing_buf_len) {
+		num_sg = end_src + 2;
+		sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
+								GFP_KERNEL);
+		if (sha_ctx->sg == NULL) {
+			pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
+				PTR_ERR(sha_ctx->sg));
+			return -ENOMEM;
+		}
+
+		sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+						sha_ctx->trailing_buf_len);
+		for (i = 1; i < num_sg; i++)
+			sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
+							req->src[i-1].length);
+
+		req->src = sha_ctx->sg;
+		sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+	} else
+		sg_mark_end(&req->src[end_src]);
+
+	req->nbytes = nbytes;
+	if (saved_length > 0)
+		rctx->src[index].length = saved_length;
+	sha_ctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+};
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+
+	sha_state_ctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+
+	sha_state_ctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+
+	sha_ctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	req->src = &sha_ctx->tmp_sg;
+	req->nbytes = sha_ctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+};
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	sha_ctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+
+	sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
+	if (sha_ctx->in_buf == NULL) {
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
+		PTR_ERR(sha_ctx->in_buf));
+		return -ENOMEM;
+	}
+	memcpy(sha_ctx->in_buf, key, len);
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
+				&sha_ctx->authkey[0], len);
+
+	ret = _sha_digest(sha_ctx->ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&sha_ctx->ahash_req_complete);
+		INIT_COMPLETION(sha_ctx->ahash_req_complete);
+	}
+
+	sha_ctx->authkey_in_len = len;
+	kfree(sha_ctx->in_buf);
+	sha_ctx->in_buf = NULL;
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (len <= SHA1_BLOCK_SIZE)
+		memcpy(&sha_ctx->authkey[0], key, len);
+	else {
+		_sha_init(sha_ctx);
+		sha_ctx->alg = QCE_HASH_SHA1;
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+	}
+	return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (len <= SHA256_BLOCK_SIZE)
+		memcpy(&sha_ctx->authkey[0], key, len);
+	else {
+		_sha_init(sha_ctx);
+		sha_ctx->alg = QCE_HASH_SHA256;
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+	}
+
+	return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	sha_ctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	_update_sha1_ctx(req);
+
+	if (cp->ce_support.sha_hmac)
+			sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	_update_sha256_ctx(req);
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
+						 sha_digest_size);
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&sha_ctx->tmp_sg);
+	req->src = &sha_ctx->tmp_sg;
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(sha_ctx);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	sha_ctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	sha_ctx->last_blk = 1;
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
+						sha_ctx->trailing_buf_len);
+	sha_ctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		INIT_COMPLETION(sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+	}
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+	}
+	return 0;
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init		=	_sha1_init,
+		.update		=	_sha1_update,
+		.final		=	_sha1_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_digest,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "sha1",
+				.cra_driver_name = "qcrypto-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_init,
+		.update		=	_sha256_update,
+		.final		=	_sha256_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_digest,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "sha256",
+				.cra_driver_name = "qcrypto-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init		=	_sha1_hmac_init,
+		.update		=	_sha1_hmac_update,
+		.final		=	_sha1_hmac_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_hmac_digest,
+		.setkey		=	_sha1_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "hmac(sha1)",
+				.cra_driver_name = "qcrypto-hmac-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_hmac_init,
+		.update		=	_sha256_hmac_update,
+		.final		=	_sha256_hmac_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_hmac_digest,
+		.setkey		=	_sha256_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "hmac(sha256)",
+				.cra_driver_name = "qcrypto-hmac-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "qcrypto-ecb-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ecb,
+				.decrypt	= _qcrypto_dec_aes_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(aes)",
+		.cra_driver_name = "qcrypto-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_cbc,
+				.decrypt	= _qcrypto_dec_aes_cbc,
+			},
+		},
+	},
+	{
+		.cra_name	= "ctr(aes)",
+		.cra_driver_name = "qcrypto-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ctr,
+				.decrypt	= _qcrypto_dec_aes_ctr,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "qcrypto-ecb-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_ecb,
+				.decrypt	= _qcrypto_dec_des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des)",
+		.cra_driver_name = "qcrypto-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES_BLOCK_SIZE,
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_cbc,
+				.decrypt	= _qcrypto_dec_des_cbc,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "qcrypto-ecb-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_ecb,
+				.decrypt	= _qcrypto_dec_3des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des3_ede)",
+		.cra_driver_name = "qcrypto-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES3_EDE_BLOCK_SIZE,
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_cbc,
+				.decrypt	= _qcrypto_dec_3des_cbc,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+	.cra_name	= "xts(aes)",
+	.cra_driver_name = "qcrypto-xts-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_ablkcipher_init,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= _qcrypto_setkey_aes,
+			.encrypt	= _qcrypto_enc_aes_xts,
+			.decrypt	= _qcrypto_dec_aes_xts,
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+
+#ifdef QCRYPTO_AEAD_AES_CTR
+	{
+		.cra_name	= "authenc(hmac(sha1),ctr(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_ctr,
+				.decrypt = _qcrypto_aead_decrypt_aes_ctr,
+				.givencrypt = _qcrypto_aead_givencrypt_aes_ctr,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+#endif /* QCRYPTO_AEAD_AES_CTR */
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_des_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+};
+
+static struct crypto_alg _qcrypto_aead_ccm_algo = {
+	.cra_name	= "ccm(aes)",
+	.cra_driver_name = "qcrypto-aes-ccm",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize  = AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_aead_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_aead_init,
+	.cra_u		= {
+		.aead = {
+			.ivsize         = AES_BLOCK_SIZE,
+			.maxauthsize    = SHA1_DIGEST_SIZE,
+			.setkey = _qcrypto_aead_ccm_setkey,
+			.setauthsize = _qcrypto_aead_ccm_setauthsize,
+			.encrypt = _qcrypto_aead_encrypt_aes_ccm,
+			.decrypt = _qcrypto_aead_decrypt_aes_ccm,
+			.geniv = "<built-in>",
+		}
+	}
+};
+
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+
+	if (pdev->id >= MAX_CRYPTO_DEVICE) {
+		printk(KERN_ERR "%s: device id %d  exceeds allowed %d\n",
+				__func__, pdev->id, MAX_CRYPTO_DEVICE);
+		return -ENOENT;
+	}
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(cp));
+		return -ENOMEM;
+	}
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		kfree(cp);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&cp->alg_list);
+	platform_set_drvdata(pdev, cp);
+	spin_lock_init(&cp->lock);
+	tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
+	crypto_init_queue(&cp->queue, 50);
+	cp->qce = handle;
+	cp->pdev = pdev;
+	qce_hw_support(cp->qce, &cp->ce_support);
+	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
+	cp->platform_support.ce_shared = platform_support->ce_shared;
+	cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+	cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+	cp->ce_lock_count = 0;
+	cp->platform_support.sha_hmac = platform_support->sha_hmac;
+
+	if (cp->platform_support.ce_shared)
+		INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+
+			rc = crypto_register_alg(&q_alg->cipher_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	return 0;
+err:
+	_qcrypto_remove(pdev);
+	return rc;
+};
+
+static struct platform_driver _qualcomm_crypto = {
+	.probe          = _qcrypto_probe,
+	.remove         = _qcrypto_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcrypto",
+	},
+};
+
+static int _debug_qcrypto[MAX_CRYPTO_DEVICE];
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcrypto = *((int *) file->private_data);
+
+	memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_CRYPTO_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcrypto[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	int rc;
+
+	rc = _qcrypto_debug_init();
+	if (rc)
+		return rc;
+
+	return platform_driver_register(&_qualcomm_crypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	printk(KERN_ALERT "%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&_qualcomm_crypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Crypto driver");
+MODULE_VERSION("1.18");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2967002..385e9c7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,4 +453,38 @@
 	help
 	  Select this option to enable GPIO driver for the TPS65910
 	  chip family.
+
+config MPP_PMIC8901
+        tristate "Qualcomm PMIC8901 MPP"
+        depends on GPIOLIB && PMIC8901
+        default y
+        help
+          Say yes here to support GPIO functionality on Qualcomm's
+          PM8901 chip for MPP(Multi-Purpose Pin) pins. These pins
+          work like GPIO pins when configured as digital input and/or
+          output signals.
+
+config GPIO_PM8XXX
+	tristate "Qualcomm PM8xxx GPIO support"
+	depends on MFD_PM8XXX
+	default y if MFD_PM8XXX
+	help
+	  This option enables support for on-chip GPIO found on Qualcomm PM8xxx
+	  PMICs.
+
+config GPIO_PM8XXX_MPP
+	tristate "Support for Qualcomm PM8xxx MPP features"
+	depends on MFD_PM8XXX
+	default y if MFD_PM8XXX
+	help
+	  This is the multi-purpose pin (MPP) driver for Qualcomm PM 8xxx PMIC
+	  chips.
+
+config GPIO_PM8XXX_RPC
+	tristate "Qualcomm PM8xxx RPC based GPIO support"
+	depends on MSM_SMD
+	help
+	  This option enables support for on-chip GPIO found on Qualcomm PM8xxx
+	  PMICs through RPC.
+
 endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b605f8e..1b2c4b1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -48,3 +48,7 @@
 obj-$(CONFIG_GPIO_ML_IOH)	+= ml_ioh_gpio.o
 obj-$(CONFIG_AB8500_GPIO)       += ab8500-gpio.o
 obj-$(CONFIG_GPIO_TPS65910)	+= tps65910-gpio.o
+obj-$(CONFIG_MPP_PMIC8901)      += pmic8901-mpp.o
+obj-$(CONFIG_GPIO_PM8XXX)	+= pm8xxx-gpio.o
+obj-$(CONFIG_GPIO_PM8XXX_MPP) 	+= pm8xxx-mpp.o
+obj-$(CONFIG_GPIO_PM8XXX_RPC)	+= gpio-pm8xxx-rpc.o
diff --git a/drivers/gpio/gpio-pm8xxx-rpc.c b/drivers/gpio/gpio-pm8xxx-rpc.c
new file mode 100644
index 0000000..1acc741
--- /dev/null
+++ b/drivers/gpio/gpio-pm8xxx-rpc.c
@@ -0,0 +1,241 @@
+/*
+ * Qualcomm PMIC8XXX GPIO driver based on RPC
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/gpio-pm8xxx-rpc.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <mach/pmic.h>
+
+struct pm8xxx_gpio_rpc_chip {
+	struct list_head	link;
+	struct gpio_chip	gpio_chip;
+};
+
+static LIST_HEAD(pm8xxx_gpio_rpc_chips);
+static DEFINE_MUTEX(pm8xxx_gpio_chips_lock);
+
+static int pm8xxx_gpio_rpc_get(struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip,
+								unsigned gpio)
+{
+	int rc;
+
+	if (gpio >= pm8xxx_gpio_chip->gpio_chip.ngpio
+					|| pm8xxx_gpio_chip == NULL)
+		return -EINVAL;
+
+	rc =  pmic_gpio_get_value(gpio);
+
+	return rc;
+}
+
+static int pm8xxx_gpio_rpc_set(struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip,
+						 unsigned gpio, int value)
+{
+	int rc;
+
+	if (gpio >= pm8xxx_gpio_chip->gpio_chip.ngpio ||
+					pm8xxx_gpio_chip == NULL)
+		return -EINVAL;
+
+	rc = pmic_gpio_set_value(gpio, value);
+
+	return rc;
+}
+
+static int pm8xxx_gpio_rpc_set_direction(struct pm8xxx_gpio_rpc_chip
+			*pm8xxx_gpio_chip, unsigned gpio, int direction)
+{
+	int rc = 0;
+
+	if (!direction || pm8xxx_gpio_chip == NULL)
+		return -EINVAL;
+
+	if (direction ==  PM_GPIO_DIR_IN)
+		rc = pmic_gpio_direction_input(gpio);
+	else if (direction == PM_GPIO_DIR_OUT)
+		rc = pmic_gpio_direction_output(gpio);
+
+	return rc;
+}
+
+static int pm8xxx_gpio_rpc_read(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip =
+					dev_get_drvdata(gpio_chip->dev);
+
+	return pm8xxx_gpio_rpc_get(pm8xxx_gpio_chip, offset);
+}
+
+static void pm8xxx_gpio_rpc_write(struct gpio_chip *gpio_chip,
+						unsigned offset, int val)
+{
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip =
+					dev_get_drvdata(gpio_chip->dev);
+
+	pm8xxx_gpio_rpc_set(pm8xxx_gpio_chip, offset, !!val);
+}
+
+static int pm8xxx_gpio_rpc_direction_input(struct gpio_chip *gpio_chip,
+							unsigned offset)
+{
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip =
+					dev_get_drvdata(gpio_chip->dev);
+
+	return pm8xxx_gpio_rpc_set_direction(pm8xxx_gpio_chip, offset,
+							PM_GPIO_DIR_IN);
+}
+
+static int pm8xxx_gpio_rpc_direction_output(struct gpio_chip *gpio_chip,
+						unsigned offset, int val)
+{
+	int ret = 0;
+
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip =
+					dev_get_drvdata(gpio_chip->dev);
+
+	ret = pm8xxx_gpio_rpc_set_direction(pm8xxx_gpio_chip, offset,
+							PM_GPIO_DIR_OUT);
+	if (!ret)
+		ret = pm8xxx_gpio_rpc_set(pm8xxx_gpio_chip, offset, !!val);
+
+	return ret;
+}
+
+static void pm8xxx_gpio_rpc_dbg_show(struct seq_file *s, struct gpio_chip
+								*gpio_chip)
+{
+	struct pm8xxx_gpio_rpc_chip *pmxx_gpio_chip =
+					dev_get_drvdata(gpio_chip->dev);
+	u8 state, mode;
+	const char *label;
+	int i;
+
+	for (i = 0; i < gpio_chip->ngpio; i++) {
+		label = gpiochip_is_requested(gpio_chip, i);
+		state = pm8xxx_gpio_rpc_get(pmxx_gpio_chip, i);
+		mode =  pmic_gpio_get_direction(i);
+		seq_printf(s, "gpio-%-3d (%-12.12s) %s %s",
+				gpio_chip->base + i,
+				label ? label : " ", mode ? "out" : "in",
+				state ? "hi" : "lo");
+		seq_printf(s, "\n");
+	}
+}
+
+static int __devinit pm8xxx_gpio_rpc_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip;
+	const struct pm8xxx_gpio_rpc_platform_data *pdata =
+					pdev->dev.platform_data;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	pm8xxx_gpio_chip = kzalloc(sizeof(struct pm8xxx_gpio_rpc_chip),
+								GFP_KERNEL);
+	if (!pm8xxx_gpio_chip) {
+		pr_err("Cannot allocate pm8xxx_gpio_chip\n");
+		return -ENOMEM;
+	}
+
+	pm8xxx_gpio_chip->gpio_chip.label = "pm8xxx-gpio-rpc";
+	pm8xxx_gpio_chip->gpio_chip.direction_input	=
+					pm8xxx_gpio_rpc_direction_input;
+	pm8xxx_gpio_chip->gpio_chip.direction_output	=
+					pm8xxx_gpio_rpc_direction_output;
+	pm8xxx_gpio_chip->gpio_chip.get		= pm8xxx_gpio_rpc_read;
+	pm8xxx_gpio_chip->gpio_chip.set		= pm8xxx_gpio_rpc_write;
+	pm8xxx_gpio_chip->gpio_chip.dbg_show	= pm8xxx_gpio_rpc_dbg_show;
+	pm8xxx_gpio_chip->gpio_chip.ngpio	= pdata->ngpios;
+	pm8xxx_gpio_chip->gpio_chip.can_sleep	= 1;
+	pm8xxx_gpio_chip->gpio_chip.dev		= &pdev->dev;
+	pm8xxx_gpio_chip->gpio_chip.base	= pdata->gpio_base;
+
+	mutex_lock(&pm8xxx_gpio_chips_lock);
+	list_add(&pm8xxx_gpio_chip->link, &pm8xxx_gpio_rpc_chips);
+	mutex_unlock(&pm8xxx_gpio_chips_lock);
+	platform_set_drvdata(pdev, pm8xxx_gpio_chip);
+
+	ret = gpiochip_add(&pm8xxx_gpio_chip->gpio_chip);
+	if (ret) {
+		pr_err("gpiochip_add failed ret = %d\n", ret);
+		goto reset_drvdata;
+	}
+
+	pr_info("OK: base=%d, ngpio=%d\n", pm8xxx_gpio_chip->gpio_chip.base,
+		pm8xxx_gpio_chip->gpio_chip.ngpio);
+
+	return 0;
+
+reset_drvdata:
+	mutex_lock(&pm8xxx_gpio_chips_lock);
+	list_del(&pm8xxx_gpio_chip->link);
+	mutex_unlock(&pm8xxx_gpio_chips_lock);
+	platform_set_drvdata(pdev, NULL);
+	kfree(pm8xxx_gpio_chip);
+	mutex_destroy(&pm8xxx_gpio_chips_lock);
+	return ret;
+}
+
+static int __devexit pm8xxx_gpio_rpc_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_gpio_rpc_chip *pm8xxx_gpio_chip =
+						platform_get_drvdata(pdev);
+
+	mutex_lock(&pm8xxx_gpio_chips_lock);
+	list_del(&pm8xxx_gpio_chip->link);
+	mutex_unlock(&pm8xxx_gpio_chips_lock);
+	platform_set_drvdata(pdev, NULL);
+	if (gpiochip_remove(&pm8xxx_gpio_chip->gpio_chip))
+		pr_err("failed to remove gpio chip\n");
+	kfree(pm8xxx_gpio_chip);
+	mutex_destroy(&pm8xxx_gpio_chips_lock);
+	return 0;
+}
+
+static struct platform_driver pm8xxx_gpio_rpc_driver = {
+	.probe		= pm8xxx_gpio_rpc_probe,
+	.remove		= __devexit_p(pm8xxx_gpio_rpc_remove),
+	.driver		= {
+		.name	= PM8XXX_GPIO_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_gpio_rpc_init(void)
+{
+	return platform_driver_register(&pm8xxx_gpio_rpc_driver);
+}
+postcore_initcall(pm8xxx_gpio_rpc_init);
+
+static void __exit pm8xxx_gpio_rpc_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_gpio_rpc_driver);
+}
+module_exit(pm8xxx_gpio_rpc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC GPIO driver based on RPC");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_GPIO_DEV_NAME);
diff --git a/drivers/gpio/pm8xxx-gpio.c b/drivers/gpio/pm8xxx-gpio.c
new file mode 100644
index 0000000..026fd05
--- /dev/null
+++ b/drivers/gpio/pm8xxx-gpio.c
@@ -0,0 +1,458 @@
+/*
+ * Qualcomm PMIC8XXX GPIO driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/* GPIO registers */
+#define	SSBI_REG_ADDR_GPIO_BASE		0x150
+#define	SSBI_REG_ADDR_GPIO(n)		(SSBI_REG_ADDR_GPIO_BASE + n)
+
+/* GPIO */
+#define	PM_GPIO_BANK_MASK		0x70
+#define	PM_GPIO_BANK_SHIFT		4
+#define	PM_GPIO_WRITE			0x80
+
+/* Bank 0 */
+#define	PM_GPIO_VIN_MASK		0x0E
+#define	PM_GPIO_VIN_SHIFT		1
+#define	PM_GPIO_MODE_ENABLE		0x01
+
+/* Bank 1 */
+#define	PM_GPIO_MODE_MASK		0x0C
+#define	PM_GPIO_MODE_SHIFT		2
+#define	PM_GPIO_OUT_BUFFER		0x02
+#define	PM_GPIO_OUT_INVERT		0x01
+
+#define	PM_GPIO_MODE_OFF		3
+#define	PM_GPIO_MODE_OUTPUT		2
+#define	PM_GPIO_MODE_INPUT		0
+#define	PM_GPIO_MODE_BOTH		1
+
+/* Bank 2 */
+#define	PM_GPIO_PULL_MASK		0x0E
+#define	PM_GPIO_PULL_SHIFT		1
+
+/* Bank 3 */
+#define	PM_GPIO_OUT_STRENGTH_MASK	0x0C
+#define	PM_GPIO_OUT_STRENGTH_SHIFT	2
+#define PM_GPIO_PIN_ENABLE		0x00
+#define	PM_GPIO_PIN_DISABLE		0x01
+
+/* Bank 4 */
+#define	PM_GPIO_FUNC_MASK		0x0E
+#define	PM_GPIO_FUNC_SHIFT		1
+
+/* Bank 5 */
+#define	PM_GPIO_NON_INT_POL_INV	0x08
+#define PM_GPIO_BANKS		6
+
+struct pm_gpio_chip {
+	struct list_head	link;
+	struct gpio_chip	gpio_chip;
+	spinlock_t		pm_lock;
+	u8			*bank1;
+	int			irq_base;
+};
+
+static LIST_HEAD(pm_gpio_chips);
+static DEFINE_MUTEX(pm_gpio_chips_lock);
+
+static int pm_gpio_get(struct pm_gpio_chip *pm_gpio_chip, unsigned gpio)
+{
+	int	mode;
+
+	if (gpio >= pm_gpio_chip->gpio_chip.ngpio || pm_gpio_chip == NULL)
+		return -EINVAL;
+
+	/* Get gpio value from config bank 1 if output gpio.
+	   Get gpio value from IRQ RT status register for all other gpio modes.
+	 */
+	mode = (pm_gpio_chip->bank1[gpio] & PM_GPIO_MODE_MASK) >>
+		PM_GPIO_MODE_SHIFT;
+	if (mode == PM_GPIO_MODE_OUTPUT)
+		return pm_gpio_chip->bank1[gpio] & PM_GPIO_OUT_INVERT;
+	else
+		return pm8xxx_read_irq_stat(pm_gpio_chip->gpio_chip.dev->parent,
+				pm_gpio_chip->irq_base + gpio);
+}
+
+static int pm_gpio_set(struct pm_gpio_chip *pm_gpio_chip,
+		unsigned gpio, int value)
+{
+	int	rc;
+	u8	bank1;
+	unsigned long flags;
+
+	if (gpio >= pm_gpio_chip->gpio_chip.ngpio || pm_gpio_chip == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(&pm_gpio_chip->pm_lock, flags);
+	bank1 = PM_GPIO_WRITE
+			| (pm_gpio_chip->bank1[gpio] & ~PM_GPIO_OUT_INVERT);
+
+	if (value)
+		bank1 |= PM_GPIO_OUT_INVERT;
+
+	pm_gpio_chip->bank1[gpio] = bank1;
+	rc = pm8xxx_writeb(pm_gpio_chip->gpio_chip.dev->parent,
+				SSBI_REG_ADDR_GPIO(gpio), bank1);
+	spin_unlock_irqrestore(&pm_gpio_chip->pm_lock, flags);
+
+	if (rc)
+		pr_err("FAIL pm8xxx_writeb(): rc=%d. "
+		       "(gpio=%d, value=%d)\n",
+		       rc, gpio, value);
+
+	return rc;
+}
+
+static int dir_map[] = {
+	PM_GPIO_MODE_OFF,
+	PM_GPIO_MODE_OUTPUT,
+	PM_GPIO_MODE_INPUT,
+	PM_GPIO_MODE_BOTH,
+};
+
+static int pm_gpio_set_direction(struct pm_gpio_chip *pm_gpio_chip,
+			      unsigned gpio, int direction)
+{
+	int	rc;
+	u8	bank1;
+	unsigned long flags;
+
+	if (!direction || pm_gpio_chip == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(&pm_gpio_chip->pm_lock, flags);
+	bank1 = PM_GPIO_WRITE
+			| (pm_gpio_chip->bank1[gpio] & ~PM_GPIO_MODE_MASK);
+
+	bank1 |= ((dir_map[direction] << PM_GPIO_MODE_SHIFT)
+		  & PM_GPIO_MODE_MASK);
+
+	pm_gpio_chip->bank1[gpio] = bank1;
+	rc = pm8xxx_writeb(pm_gpio_chip->gpio_chip.dev->parent,
+				SSBI_REG_ADDR_GPIO(gpio), bank1);
+	spin_unlock_irqrestore(&pm_gpio_chip->pm_lock, flags);
+
+	if (rc)
+		pr_err("Failed on pm8xxx_writeb(): rc=%d (GPIO config)\n",
+			rc);
+
+	return rc;
+}
+
+static int pm_gpio_init_bank1(struct pm_gpio_chip *pm_gpio_chip)
+{
+	int i, rc;
+	u8 bank;
+
+	for (i = 0; i < pm_gpio_chip->gpio_chip.ngpio; i++) {
+		bank = 1 << PM_GPIO_BANK_SHIFT;
+		rc = pm8xxx_writeb(pm_gpio_chip->gpio_chip.dev->parent,
+				SSBI_REG_ADDR_GPIO(i),
+				bank);
+		if (rc) {
+			pr_err("error setting bank rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = pm8xxx_readb(pm_gpio_chip->gpio_chip.dev->parent,
+				SSBI_REG_ADDR_GPIO(i),
+				&pm_gpio_chip->bank1[i]);
+		if (rc) {
+			pr_err("error reading bank 1 rc=%d\n", rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int pm_gpio_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+
+	return pm_gpio_chip->irq_base + offset;
+}
+
+static int pm_gpio_read(struct gpio_chip *gpio_chip, unsigned offset)
+{
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+
+	return pm_gpio_get(pm_gpio_chip, offset);
+}
+
+static void pm_gpio_write(struct gpio_chip *gpio_chip,
+		unsigned offset, int val)
+{
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+
+	pm_gpio_set(pm_gpio_chip, offset, val);
+}
+
+static int pm_gpio_direction_input(struct gpio_chip *gpio_chip,
+		unsigned offset)
+{
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+
+	return pm_gpio_set_direction(pm_gpio_chip, offset, PM_GPIO_DIR_IN);
+}
+
+static int pm_gpio_direction_output(struct gpio_chip *gpio_chip,
+		unsigned offset,
+		int val)
+{
+	int ret;
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+
+	ret = pm_gpio_set_direction(pm_gpio_chip, offset, PM_GPIO_DIR_OUT);
+	if (!ret)
+		ret = pm_gpio_set(pm_gpio_chip, offset, val);
+
+	return ret;
+}
+
+static void pm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gpio_chip)
+{
+	static const char * const cmode[] = { "in", "in/out", "out", "off" };
+	struct pm_gpio_chip *pm_gpio_chip = dev_get_drvdata(gpio_chip->dev);
+	u8 mode, state, bank;
+	const char *label;
+	int i, j;
+
+	for (i = 0; i < gpio_chip->ngpio; i++) {
+		label = gpiochip_is_requested(gpio_chip, i);
+		mode = (pm_gpio_chip->bank1[i] & PM_GPIO_MODE_MASK) >>
+			PM_GPIO_MODE_SHIFT;
+		state = pm_gpio_get(pm_gpio_chip, i);
+		seq_printf(s, "gpio-%-3d (%-12.12s) %-10.10s"
+				" %s",
+				gpio_chip->base + i,
+				label ? label : "--",
+				cmode[mode],
+				state ? "hi" : "lo");
+		for (j = 0; j < PM_GPIO_BANKS; j++) {
+			bank = j << PM_GPIO_BANK_SHIFT;
+			pm8xxx_writeb(gpio_chip->dev->parent,
+					SSBI_REG_ADDR_GPIO(i),
+					bank);
+			pm8xxx_readb(gpio_chip->dev->parent,
+					SSBI_REG_ADDR_GPIO(i),
+					&bank);
+			seq_printf(s, " 0x%02x", bank);
+		}
+		seq_printf(s, "\n");
+	}
+}
+
+static int __devinit pm_gpio_probe(struct platform_device *pdev)
+{
+	int ret;
+	const struct pm8xxx_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct pm_gpio_chip *pm_gpio_chip;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	pm_gpio_chip = kzalloc(sizeof(struct pm_gpio_chip), GFP_KERNEL);
+	if (!pm_gpio_chip) {
+		pr_err("Cannot allocate pm_gpio_chip\n");
+		return -ENOMEM;
+	}
+
+	pm_gpio_chip->bank1 = kzalloc(sizeof(u8) * pdata->gpio_cdata.ngpios,
+					GFP_KERNEL);
+	if (!pm_gpio_chip->bank1) {
+		pr_err("Cannot allocate pm_gpio_chip->bank1\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&pm_gpio_chip->pm_lock);
+	pm_gpio_chip->gpio_chip.label = "pm-gpio";
+	pm_gpio_chip->gpio_chip.direction_input	= pm_gpio_direction_input;
+	pm_gpio_chip->gpio_chip.direction_output = pm_gpio_direction_output;
+	pm_gpio_chip->gpio_chip.to_irq = pm_gpio_to_irq;
+	pm_gpio_chip->gpio_chip.get = pm_gpio_read;
+	pm_gpio_chip->gpio_chip.set = pm_gpio_write;
+	pm_gpio_chip->gpio_chip.dbg_show = pm_gpio_dbg_show;
+	pm_gpio_chip->gpio_chip.ngpio = pdata->gpio_cdata.ngpios;
+	pm_gpio_chip->gpio_chip.can_sleep = 1;
+	pm_gpio_chip->gpio_chip.dev = &pdev->dev;
+	pm_gpio_chip->gpio_chip.base = pdata->gpio_base;
+	pm_gpio_chip->irq_base = platform_get_irq(pdev, 0);
+	mutex_lock(&pm_gpio_chips_lock);
+	list_add(&pm_gpio_chip->link, &pm_gpio_chips);
+	mutex_unlock(&pm_gpio_chips_lock);
+	platform_set_drvdata(pdev, pm_gpio_chip);
+
+	ret = gpiochip_add(&pm_gpio_chip->gpio_chip);
+	if (ret) {
+		pr_err("gpiochip_add failed ret = %d\n", ret);
+		goto reset_drvdata;
+	}
+
+	ret = pm_gpio_init_bank1(pm_gpio_chip);
+	if (ret) {
+		pr_err("gpio init bank failed ret = %d\n", ret);
+		goto remove_chip;
+	}
+
+	pr_info("OK: base=%d, ngpio=%d\n", pm_gpio_chip->gpio_chip.base,
+		pm_gpio_chip->gpio_chip.ngpio);
+
+	return 0;
+
+remove_chip:
+	if (gpiochip_remove(&pm_gpio_chip->gpio_chip))
+		pr_err("failed to remove gpio chip\n");
+reset_drvdata:
+	platform_set_drvdata(pdev, NULL);
+	kfree(pm_gpio_chip);
+	return ret;
+}
+
+static int __devexit pm_gpio_remove(struct platform_device *pdev)
+{
+	struct pm_gpio_chip *pm_gpio_chip
+		= platform_get_drvdata(pdev);
+
+	mutex_lock(&pm_gpio_chips_lock);
+	list_del(&pm_gpio_chip->link);
+	mutex_unlock(&pm_gpio_chips_lock);
+	platform_set_drvdata(pdev, NULL);
+	if (gpiochip_remove(&pm_gpio_chip->gpio_chip))
+		pr_err("failed to remove gpio chip\n");
+	kfree(pm_gpio_chip->bank1);
+	kfree(pm_gpio_chip);
+	return 0;
+}
+
+int pm8xxx_gpio_config(int gpio, struct pm_gpio *param)
+{
+	int	rc, pm_gpio = -EINVAL;
+	u8	bank[8];
+	unsigned long flags;
+	struct pm_gpio_chip *pm_gpio_chip;
+	struct gpio_chip *gpio_chip;
+
+	if (param == NULL)
+		return -EINVAL;
+
+	mutex_lock(&pm_gpio_chips_lock);
+	list_for_each_entry(pm_gpio_chip, &pm_gpio_chips, link) {
+		gpio_chip = &pm_gpio_chip->gpio_chip;
+		if (gpio >= gpio_chip->base
+			&& gpio < gpio_chip->base + gpio_chip->ngpio) {
+			pm_gpio = gpio - gpio_chip->base;
+			break;
+		}
+	}
+	mutex_unlock(&pm_gpio_chips_lock);
+	if (pm_gpio < 0) {
+		pr_err("called on gpio %d not handled by any pmic\n", gpio);
+		return -EINVAL;
+	}
+
+	/* Select banks and configure the gpio */
+	bank[0] = PM_GPIO_WRITE |
+		((param->vin_sel << PM_GPIO_VIN_SHIFT) &
+			PM_GPIO_VIN_MASK) |
+		PM_GPIO_MODE_ENABLE;
+	bank[1] = PM_GPIO_WRITE |
+		((1 << PM_GPIO_BANK_SHIFT) &
+			PM_GPIO_BANK_MASK) |
+		((dir_map[param->direction] <<
+			PM_GPIO_MODE_SHIFT) &
+			PM_GPIO_MODE_MASK) |
+		((param->direction & PM_GPIO_DIR_OUT) ?
+			((param->output_buffer & 1) ?
+			 PM_GPIO_OUT_BUFFER : 0) : 0) |
+		((param->direction & PM_GPIO_DIR_OUT) ?
+			param->output_value & 0x01 : 0);
+	bank[2] = PM_GPIO_WRITE |
+		((2 << PM_GPIO_BANK_SHIFT) &
+			PM_GPIO_BANK_MASK) |
+		((param->pull << PM_GPIO_PULL_SHIFT) &
+			PM_GPIO_PULL_MASK);
+	bank[3] = PM_GPIO_WRITE |
+		((3 << PM_GPIO_BANK_SHIFT) &
+			PM_GPIO_BANK_MASK) |
+		((param->out_strength <<
+			PM_GPIO_OUT_STRENGTH_SHIFT) &
+			PM_GPIO_OUT_STRENGTH_MASK) |
+		(param->disable_pin ?
+			PM_GPIO_PIN_DISABLE : PM_GPIO_PIN_ENABLE);
+	bank[4] = PM_GPIO_WRITE |
+		((4 << PM_GPIO_BANK_SHIFT) &
+			PM_GPIO_BANK_MASK) |
+		((param->function << PM_GPIO_FUNC_SHIFT) &
+			PM_GPIO_FUNC_MASK);
+	bank[5] = PM_GPIO_WRITE |
+		((5 << PM_GPIO_BANK_SHIFT) & PM_GPIO_BANK_MASK) |
+		(param->inv_int_pol ? 0 : PM_GPIO_NON_INT_POL_INV);
+
+	spin_lock_irqsave(&pm_gpio_chip->pm_lock, flags);
+	/* Remember bank1 for later use */
+	pm_gpio_chip->bank1[pm_gpio] = bank[1];
+	rc = pm8xxx_write_buf(pm_gpio_chip->gpio_chip.dev->parent,
+			SSBI_REG_ADDR_GPIO(pm_gpio), bank, 6);
+	spin_unlock_irqrestore(&pm_gpio_chip->pm_lock, flags);
+
+	if (rc)
+		pr_err("Failed on pm8xxx_write_buf() rc=%d (GPIO config)\n",
+			rc);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_gpio_config);
+
+static struct platform_driver pm_gpio_driver = {
+	.probe		= pm_gpio_probe,
+	.remove		= __devexit_p(pm_gpio_remove),
+	.driver		= {
+		.name	= PM8XXX_GPIO_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm_gpio_init(void)
+{
+	return platform_driver_register(&pm_gpio_driver);
+}
+postcore_initcall(pm_gpio_init);
+
+static void __exit pm_gpio_exit(void)
+{
+	platform_driver_unregister(&pm_gpio_driver);
+}
+module_exit(pm_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC GPIO driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_GPIO_DEV_NAME);
diff --git a/drivers/gpio/pm8xxx-mpp.c b/drivers/gpio/pm8xxx-mpp.c
new file mode 100644
index 0000000..82a11a2
--- /dev/null
+++ b/drivers/gpio/pm8xxx-mpp.c
@@ -0,0 +1,334 @@
+/*
+ * Qualcomm PM8XXX Multi-Purpose Pin (MPP) driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/mpp.h>
+
+/* MPP Type */
+#define	PM8XXX_MPP_TYPE_MASK		0xE0
+#define	PM8XXX_MPP_TYPE_SHIFT		5
+
+/* MPP Config Level */
+#define	PM8XXX_MPP_CONFIG_LVL_MASK	0x1C
+#define	PM8XXX_MPP_CONFIG_LVL_SHIFT	2
+
+/* MPP Config Control */
+#define	PM8XXX_MPP_CONFIG_CTRL_MASK	0x03
+#define	PM8XXX_MPP_CONFIG_CTRL_SHIFT	0
+
+struct pm8xxx_mpp_chip {
+	struct list_head	link;
+	struct gpio_chip	gpio_chip;
+	spinlock_t		pm_lock;
+	u8			*ctrl_reg;
+	int			mpp_base;
+	int			irq_base;
+	int			nmpps;
+	u16			base_addr;
+};
+
+static LIST_HEAD(pm8xxx_mpp_chips);
+static DEFINE_MUTEX(pm8xxx_mpp_chips_lock);
+
+static int pm8xxx_mpp_write(struct pm8xxx_mpp_chip *mpp_chip, u16 offset,
+				u8 val, u8 mask)
+{
+	u8 reg;
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpp_chip->pm_lock, flags);
+
+	reg = (mpp_chip->ctrl_reg[offset] & ~mask) | (val & mask);
+	rc = pm8xxx_writeb(mpp_chip->gpio_chip.dev->parent,
+				mpp_chip->base_addr + offset, reg);
+	if (!rc)
+		mpp_chip->ctrl_reg[offset] = reg;
+
+	spin_unlock_irqrestore(&mpp_chip->pm_lock, flags);
+
+	return rc;
+}
+
+static int pm8xxx_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+
+	return mpp_chip->irq_base + offset;
+}
+
+static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	int rc;
+
+	if ((mpp_chip->ctrl_reg[offset] & PM8XXX_MPP_TYPE_MASK) >>
+			PM8XXX_MPP_TYPE_SHIFT == PM8XXX_MPP_TYPE_D_OUTPUT)
+		rc = mpp_chip->ctrl_reg[offset] & PM8XXX_MPP_CONFIG_CTRL_MASK;
+	else
+		rc = pm8xxx_read_irq_stat(mpp_chip->gpio_chip.dev->parent,
+				mpp_chip->irq_base + offset);
+
+	return rc;
+}
+
+static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 reg = val ? PM8XXX_MPP_DOUT_CTRL_HIGH : PM8XXX_MPP_DOUT_CTRL_LOW;
+	int rc;
+
+	rc = pm8xxx_mpp_write(mpp_chip, offset, reg,
+			PM8XXX_MPP_CONFIG_CTRL_MASK);
+	if (rc)
+		pr_err("pm8xxx_mpp_write(): rc=%d\n", rc);
+}
+
+static int pm8xxx_mpp_dir_input(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	int rc = pm8xxx_mpp_write(mpp_chip, offset,
+			PM8XXX_MPP_TYPE_D_INPUT << PM8XXX_MPP_TYPE_SHIFT,
+			PM8XXX_MPP_TYPE_MASK);
+
+	if (rc)
+		pr_err("pm8xxx_mpp_write(): rc=%d\n", rc);
+	return rc;
+}
+
+static int pm8xxx_mpp_dir_output(struct gpio_chip *chip,
+		unsigned offset, int val)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 reg = (PM8XXX_MPP_TYPE_D_OUTPUT << PM8XXX_MPP_TYPE_SHIFT) |
+		(val & PM8XXX_MPP_CONFIG_CTRL_MASK);
+	u8 mask = PM8XXX_MPP_TYPE_MASK | PM8XXX_MPP_CONFIG_CTRL_MASK;
+	int rc = pm8xxx_mpp_write(mpp_chip, offset, reg, mask);
+
+	if (rc)
+		pr_err("pm8xxx_mpp_write(): rc=%d\n", rc);
+	return rc;
+}
+
+static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	static const char * const ctype[] = {	"d_in", "d_out", "bi_dir",
+						"a_in", "a_out", "sink",
+						"dtest_sink", "dtest_out"
+	};
+	struct pm8xxx_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 type, state;
+	const char *label;
+	int i;
+
+	for (i = 0; i < mpp_chip->nmpps; i++) {
+		label = gpiochip_is_requested(chip, i);
+		type = (mpp_chip->ctrl_reg[i] & PM8XXX_MPP_TYPE_MASK) >>
+			PM8XXX_MPP_TYPE_SHIFT;
+		state = pm8xxx_mpp_get(chip, i);
+		seq_printf(s, "gpio-%-3d (%-12.12s) %-10.10s"
+				" %s 0x%02x\n",
+				chip->base + i,
+				label ? label : "--",
+				ctype[type],
+				state ? "hi" : "lo",
+				mpp_chip->ctrl_reg[i]);
+	}
+}
+
+int pm8xxx_mpp_config(unsigned mpp, struct pm8xxx_mpp_config_data *config)
+{
+	struct pm8xxx_mpp_chip *mpp_chip;
+	int rc, found = 0;
+	u8 config_reg, mask;
+
+	if (!config) {
+		pr_err("config not specified for MPP %d\n", mpp);
+		return -EINVAL;
+	}
+
+	mutex_lock(&pm8xxx_mpp_chips_lock);
+	list_for_each_entry(mpp_chip, &pm8xxx_mpp_chips, link) {
+		if (mpp >= mpp_chip->mpp_base
+		    && mpp < mpp_chip->mpp_base + mpp_chip->nmpps) {
+			found = 1;
+			break;
+		}
+	}
+	mutex_unlock(&pm8xxx_mpp_chips_lock);
+	if (!found) {
+		pr_err("called on mpp %d not handled by any pmic\n", mpp);
+		return -EINVAL;
+	}
+
+	mask = PM8XXX_MPP_TYPE_MASK | PM8XXX_MPP_CONFIG_LVL_MASK |
+		PM8XXX_MPP_CONFIG_CTRL_MASK;
+	config_reg = (config->type << PM8XXX_MPP_TYPE_SHIFT)
+			& PM8XXX_MPP_TYPE_MASK;
+	config_reg |= (config->level << PM8XXX_MPP_CONFIG_LVL_SHIFT)
+			& PM8XXX_MPP_CONFIG_LVL_MASK;
+	config_reg |= config->control & PM8XXX_MPP_CONFIG_CTRL_MASK;
+
+	rc = pm8xxx_mpp_write(mpp_chip, mpp - mpp_chip->mpp_base, config_reg,
+			      mask);
+
+	if (rc)
+		pr_err("pm8xxx_mpp_write(): rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_mpp_config);
+
+static int __devinit pm8xxx_mpp_reg_init(struct pm8xxx_mpp_chip *mpp_chip)
+{
+	int rc, i;
+
+	for (i = 0; i < mpp_chip->nmpps; i++) {
+		rc = pm8xxx_readb(mpp_chip->gpio_chip.dev->parent,
+					mpp_chip->base_addr + i,
+					&mpp_chip->ctrl_reg[i]);
+		if (rc) {
+			pr_err("failed to read register 0x%x rc=%d\n",
+						mpp_chip->base_addr + i, rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int __devinit pm8xxx_mpp_probe(struct platform_device *pdev)
+{
+	int rc;
+	const struct pm8xxx_mpp_platform_data *pdata = pdev->dev.platform_data;
+	struct pm8xxx_mpp_chip *mpp_chip;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	mpp_chip = kzalloc(sizeof(struct pm8xxx_mpp_chip), GFP_KERNEL);
+	if (!mpp_chip) {
+		pr_err("Cannot allocate %d bytes\n",
+			sizeof(struct pm8xxx_mpp_chip));
+		return -ENOMEM;
+	}
+
+	mpp_chip->ctrl_reg = kzalloc(pdata->core_data.nmpps, GFP_KERNEL);
+	if (!mpp_chip->ctrl_reg) {
+		pr_err("Cannot allocate %d bytes\n", pdata->core_data.nmpps);
+		rc = -ENOMEM;
+		goto free_mpp_chip;
+	}
+
+	spin_lock_init(&mpp_chip->pm_lock);
+
+	mpp_chip->gpio_chip.label = PM8XXX_MPP_DEV_NAME;
+	mpp_chip->gpio_chip.direction_input = pm8xxx_mpp_dir_input;
+	mpp_chip->gpio_chip.direction_output = pm8xxx_mpp_dir_output;
+	mpp_chip->gpio_chip.to_irq = pm8xxx_mpp_to_irq;
+	mpp_chip->gpio_chip.get = pm8xxx_mpp_get;
+	mpp_chip->gpio_chip.set = pm8xxx_mpp_set;
+	mpp_chip->gpio_chip.dbg_show = pm8xxx_mpp_dbg_show;
+	mpp_chip->gpio_chip.ngpio = pdata->core_data.nmpps;
+	mpp_chip->gpio_chip.can_sleep = 1;
+	mpp_chip->gpio_chip.dev = &pdev->dev;
+	mpp_chip->gpio_chip.base = pdata->mpp_base;
+	mpp_chip->irq_base = platform_get_irq(pdev, 0);
+	mpp_chip->mpp_base = pdata->mpp_base;
+	mpp_chip->base_addr = pdata->core_data.base_addr;
+	mpp_chip->nmpps = pdata->core_data.nmpps;
+
+	mutex_lock(&pm8xxx_mpp_chips_lock);
+	list_add(&mpp_chip->link, &pm8xxx_mpp_chips);
+	mutex_unlock(&pm8xxx_mpp_chips_lock);
+
+	platform_set_drvdata(pdev, mpp_chip);
+
+	rc = gpiochip_add(&mpp_chip->gpio_chip);
+	if (rc) {
+		pr_err("gpiochip_add failed, rc=%d\n", rc);
+		goto reset_drvdata;
+	}
+
+	rc = pm8xxx_mpp_reg_init(mpp_chip);
+	if (rc) {
+		pr_err("failed to read MPP ctrl registers, rc=%d\n", rc);
+		goto remove_chip;
+	}
+
+	pr_info("OK: base=%d, ngpio=%d\n", mpp_chip->gpio_chip.base,
+		mpp_chip->gpio_chip.ngpio);
+
+	return 0;
+
+remove_chip:
+	if (gpiochip_remove(&mpp_chip->gpio_chip))
+		pr_err("failed to remove gpio chip\n");
+reset_drvdata:
+	platform_set_drvdata(pdev, NULL);
+free_mpp_chip:
+	kfree(mpp_chip);
+	return rc;
+}
+
+static int __devexit pm8xxx_mpp_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_mpp_chip *mpp_chip = platform_get_drvdata(pdev);
+
+	mutex_lock(&pm8xxx_mpp_chips_lock);
+	list_del(&mpp_chip->link);
+	mutex_unlock(&pm8xxx_mpp_chips_lock);
+	platform_set_drvdata(pdev, NULL);
+	if (gpiochip_remove(&mpp_chip->gpio_chip))
+		pr_err("failed to remove gpio chip\n");
+	kfree(mpp_chip->ctrl_reg);
+	kfree(mpp_chip);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_mpp_driver = {
+	.probe		= pm8xxx_mpp_probe,
+	.remove		= __devexit_p(pm8xxx_mpp_remove),
+	.driver		= {
+		.name	= PM8XXX_MPP_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_mpp_init(void)
+{
+	return platform_driver_register(&pm8xxx_mpp_driver);
+}
+postcore_initcall(pm8xxx_mpp_init);
+
+static void __exit pm8xxx_mpp_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_mpp_driver);
+}
+module_exit(pm8xxx_mpp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8XXX MPP driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_MPP_DEV_NAME);
diff --git a/drivers/gpio/pmic8901-mpp.c b/drivers/gpio/pmic8901-mpp.c
new file mode 100644
index 0000000..85e6539
--- /dev/null
+++ b/drivers/gpio/pmic8901-mpp.c
@@ -0,0 +1,231 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8901 MPP driver
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/mfd/pmic8901.h>
+#include <mach/mpp.h>
+#include <linux/seq_file.h>
+
+/* MPP Control Registers */
+#define	SSBI_MPP_CNTRL_BASE		0x27
+#define	SSBI_MPP_CNTRL(n)		(SSBI_MPP_CNTRL_BASE + (n))
+
+/* MPP Type */
+#define	PM8901_MPP_TYPE_MASK		0xE0
+#define	PM8901_MPP_TYPE_SHIFT		5
+
+/* MPP Config Level */
+#define	PM8901_MPP_CONFIG_LVL_MASK	0x1C
+#define	PM8901_MPP_CONFIG_LVL_SHIFT	2
+
+/* MPP Config Control */
+#define	PM8901_MPP_CONFIG_CTL_MASK	0x03
+
+struct pm8901_mpp_chip {
+	struct gpio_chip	chip;
+	struct pm8901_chip	*pm_chip;
+	u8			ctrl[PM8901_MPPS];
+};
+
+static int pm8901_mpp_write(struct pm8901_chip *chip, u16 addr, u8 val,
+		u8 mask, u8 *bak)
+{
+	u8 reg = (*bak & ~mask) | (val & mask);
+	int rc = pm8901_write(chip, addr, &reg, 1);
+	if (!rc)
+		*bak = reg;
+	return rc;
+}
+
+static int pm8901_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8901_gpio_platform_data *pdata;
+	pdata = chip->dev->platform_data;
+	return pdata->irq_base + offset;
+}
+
+static int pm8901_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8901_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	int ret;
+
+	if ((mpp_chip->ctrl[offset] & PM8901_MPP_TYPE_MASK) >>
+			PM8901_MPP_TYPE_SHIFT == PM_MPP_TYPE_D_OUTPUT)
+		ret = mpp_chip->ctrl[offset] & PM8901_MPP_CONFIG_CTL_MASK;
+	else
+		ret = pm8901_irq_get_rt_status(mpp_chip->pm_chip,
+				pm8901_mpp_to_irq(chip, offset));
+	return ret;
+}
+
+static void pm8901_mpp_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+	struct pm8901_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 reg = val ? PM_MPP_DOUT_CTL_HIGH : PM_MPP_DOUT_CTL_LOW;
+	int rc;
+
+	rc = pm8901_mpp_write(mpp_chip->pm_chip, SSBI_MPP_CNTRL(offset),
+			reg, PM8901_MPP_CONFIG_CTL_MASK,
+			&mpp_chip->ctrl[offset]);
+	if (rc)
+		pr_err("%s: pm8901_mpp_write(): rc=%d\n", __func__, rc);
+}
+
+static int pm8901_mpp_dir_input(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8901_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	int rc = pm8901_mpp_write(mpp_chip->pm_chip,
+			SSBI_MPP_CNTRL(offset),
+			PM_MPP_TYPE_D_INPUT << PM8901_MPP_TYPE_SHIFT,
+			PM8901_MPP_TYPE_MASK, &mpp_chip->ctrl[offset]);
+	if (rc)
+		pr_err("%s: pm8901_mpp_write(): rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int pm8901_mpp_dir_output(struct gpio_chip *chip,
+		unsigned offset, int val)
+{
+	struct pm8901_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 reg = (PM_MPP_TYPE_D_OUTPUT << PM8901_MPP_TYPE_SHIFT) |
+		(val & PM8901_MPP_CONFIG_CTL_MASK);
+	u8 mask = PM8901_MPP_TYPE_MASK | PM8901_MPP_CONFIG_CTL_MASK;
+	int rc = pm8901_mpp_write(mpp_chip->pm_chip,
+			SSBI_MPP_CNTRL(offset), reg, mask,
+			&mpp_chip->ctrl[offset]);
+	if (rc)
+		pr_err("%s: pm8901_mpp_write(): rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static void pm8901_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	static const char *ctype[] = { "d_in", "d_out", "bi_dir", "a_in",
+		"a_out", "sink", "dtest_sink", "dtest_out" };
+	struct pm8901_mpp_chip *mpp_chip = dev_get_drvdata(chip->dev);
+	u8 type, state;
+	const char *label;
+	int i;
+
+	for (i = 0; i < PM8901_MPPS; i++) {
+		label = gpiochip_is_requested(chip, i);
+		type = (mpp_chip->ctrl[i] & PM8901_MPP_TYPE_MASK) >>
+			PM8901_MPP_TYPE_SHIFT;
+		state = pm8901_mpp_get(chip, i);
+		seq_printf(s, "gpio-%-3d (%-12.12s) %-10.10s"
+				" %s 0x%02x\n",
+				chip->base + i,
+				label ? label : "--",
+				ctype[type],
+				state ? "hi" : "lo",
+				mpp_chip->ctrl[i]);
+	}
+}
+
+static struct pm8901_mpp_chip pm8901_mpp_chip = {
+	.chip = {
+		.label			= "pm8901-mpp",
+		.to_irq			= pm8901_mpp_to_irq,
+		.get			= pm8901_mpp_get,
+		.set			= pm8901_mpp_set,
+		.direction_input	= pm8901_mpp_dir_input,
+		.direction_output	= pm8901_mpp_dir_output,
+		.dbg_show		= pm8901_mpp_dbg_show,
+		.ngpio			= PM8901_MPPS,
+	},
+};
+
+int pm8901_mpp_config(unsigned mpp, unsigned type, unsigned level,
+		      unsigned control)
+{
+	u8	config, mask;
+	int	rc;
+
+	if (mpp >= PM8901_MPPS)
+		return -EINVAL;
+
+	mask = PM8901_MPP_TYPE_MASK | PM8901_MPP_CONFIG_LVL_MASK |
+		PM8901_MPP_CONFIG_CTL_MASK;
+	config = (type << PM8901_MPP_TYPE_SHIFT) & PM8901_MPP_TYPE_MASK;
+	config |= (level << PM8901_MPP_CONFIG_LVL_SHIFT) &
+			PM8901_MPP_CONFIG_LVL_MASK;
+	config |= control & PM8901_MPP_CONFIG_CTL_MASK;
+
+	rc = pm8901_mpp_write(pm8901_mpp_chip.pm_chip, SSBI_MPP_CNTRL(mpp),
+			config, mask, &pm8901_mpp_chip.ctrl[mpp]);
+	if (rc)
+		pr_err("%s: pm8901_mpp_write(): rc=%d\n", __func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8901_mpp_config);
+
+static int __devinit pm8901_mpp_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct pm8901_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+	pm8901_mpp_chip.pm_chip = dev_get_drvdata(pdev->dev.parent);
+	for (i = 0; i < PM8901_MPPS; i++) {
+		ret = pm8901_read(pm8901_mpp_chip.pm_chip,
+				SSBI_MPP_CNTRL(i), &pm8901_mpp_chip.ctrl[i], 1);
+		if (ret)
+			goto bail;
+
+	}
+	platform_set_drvdata(pdev, &pm8901_mpp_chip);
+	pm8901_mpp_chip.chip.dev = &pdev->dev;
+	pm8901_mpp_chip.chip.base = pdata->gpio_base;
+	ret = gpiochip_add(&pm8901_mpp_chip.chip);
+
+bail:
+	pr_info("%s: gpiochip_add(): rc=%d\n", __func__, ret);
+	return ret;
+}
+
+static int __devexit pm8901_mpp_remove(struct platform_device *pdev)
+{
+	return gpiochip_remove(&pm8901_mpp_chip.chip);
+}
+
+static struct platform_driver pm8901_mpp_driver = {
+	.probe		= pm8901_mpp_probe,
+	.remove		= __devexit_p(pm8901_mpp_remove),
+	.driver		= {
+		.name = "pm8901-mpp",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8901_mpp_init(void)
+{
+	return platform_driver_register(&pm8901_mpp_driver);
+}
+
+static void __exit pm8901_mpp_exit(void)
+{
+	platform_driver_unregister(&pm8901_mpp_driver);
+}
+
+subsys_initcall(pm8901_mpp_init);
+module_exit(pm8901_mpp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8901 MPP driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8901-mpp");
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index a4f7353..93b94bd 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 #include <linux/gpio.h>
 #include <linux/i2c.h>
@@ -189,9 +184,9 @@
 	return err;
 }
 
-static void sx150x_set_oscio(struct sx150x_chip *chip, int val)
+static s32 sx150x_set_oscio(struct sx150x_chip *chip, int val)
 {
-	sx150x_i2c_write(chip->client,
+	return sx150x_i2c_write(chip->client,
 			chip->dev_cfg->reg_clock,
 			(val ? 0x1f : 0x10));
 }
@@ -286,11 +281,13 @@
 
 	chip = container_of(gc, struct sx150x_chip, gpio_chip);
 
-	if (!offset_is_oscio(chip, offset)) {
-		mutex_lock(&chip->lock);
+	mutex_lock(&chip->lock);
+	if (offset_is_oscio(chip, offset))
+		status = sx150x_set_oscio(chip, val);
+	else
 		status = sx150x_io_output(chip, offset, val);
-		mutex_unlock(&chip->lock);
-	}
+	mutex_unlock(&chip->lock);
+
 	return status;
 }
 
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index ca2d3b3..01cef64 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
 obj-y			+= drm/ vga/ stub/ ion/
+obj-$(CONFIG_MSM_KGSL)	+= msm/
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
new file mode 100644
index 0000000..64cbc30
--- /dev/null
+++ b/drivers/gpu/msm/Kconfig
@@ -0,0 +1,105 @@
+config MSM_KGSL
+	tristate "MSM 3D Graphics driver"
+	default n
+	depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
+	select GENERIC_ALLOCATOR
+	select FW_LOADER
+	---help---
+	  3D graphics driver. Required to use hardware accelerated
+	  OpenGL ES 2.0 and 1.1.
+
+config MSM_KGSL_CFF_DUMP
+	bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
+	default n
+	depends on MSM_KGSL
+	select RELAY
+	---help---
+	  This is an analysis and diagnostic feature only, and should only be
+	  turned on during KGSL GPU diagnostics and will slow down the KGSL
+	  performance sigificantly, hence *do not use in production builds*.
+	  When enabled, CFF Dump is on at boot. It can be turned off at runtime
+	  via 'echo 0 > /d/kgsl/cff_dump'.  The log can be captured via
+	  /d/kgsl-cff/cpu[0|1].
+
+config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+	bool "When selected will disable KGSL CFF Dump for context switches"
+	default n
+	depends on MSM_KGSL_CFF_DUMP
+	---help---
+	  Dumping all the memory for every context switch can produce quite
+	  huge log files, to reduce this, turn this feature on.
+
+config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+	bool "Disable human readable CP_STAT fields in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  For a more compact kernel log the human readable output of
+	  CP_STAT can be turned off with this option.
+
+config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+	bool "Disable dumping current IB1 and IB2 in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  For a more compact kernel log the IB1 and IB2 embedded dump
+	  can be turned off with this option.  Some IB dumps take up
+	  so much space that vital other information gets cut from the
+	  post-mortem dump.
+
+config MSM_KGSL_PSTMRTMDMP_RB_HEX
+	bool "Use hex version for ring-buffer in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  Use hex version for the ring-buffer in the post-mortem dump, instead
+	  of the human readable version.
+
+config MSM_KGSL_2D
+	tristate "MSM 2D graphics driver. Required for OpenVG"
+	default y
+	depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
+
+config MSM_KGSL_DRM
+	bool "Build a DRM interface for the MSM_KGSL driver"
+	depends on MSM_KGSL && DRM
+
+config MSM_KGSL_MMU
+	bool "Enable the GPU MMU in the MSM_KGSL driver"
+	depends on MSM_KGSL && MMU && !MSM_KGSL_CFF_DUMP
+	default y
+
+config KGSL_PER_PROCESS_PAGE_TABLE
+	bool "Enable Per Process page tables for the KGSL driver"
+	default n
+	depends on MSM_KGSL_MMU && !MSM_KGSL_DRM
+	---help---
+	  The MMU will use per process pagetables when enabled.
+
+config MSM_KGSL_PAGE_TABLE_SIZE
+	hex "Size of pagetables"
+	default 0xFFF0000
+	depends on MSM_KGSL_MMU
+	---help---
+	  Sets the pagetable size used by the MMU.  The max value
+	  is 0xFFF0000 or (256M - 64K).
+
+config MSM_KGSL_PAGE_TABLE_COUNT
+	int "Minimum of concurrent pagetables to support"
+	default 8
+	depends on KGSL_PER_PROCESS_PAGE_TABLE
+	---help---
+	  Specify the number of pagetables to allocate at init time
+	  This is the number of concurrent processes that are guaranteed to
+	  to run at any time.  Additional processes can be created dynamically
+	  assuming there is enough contiguous memory to allocate the pagetable.
+
+config MSM_KGSL_MMU_PAGE_FAULT
+	bool "Force the GPU MMU to page fault for unmapped regions"
+	default y
+	depends on MSM_KGSL_MMU
+
+config MSM_KGSL_DISABLE_SHADOW_WRITES
+	bool "Disable register shadow writes for context switches"
+	default n
+	depends on MSM_KGSL
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
new file mode 100644
index 0000000..e31a3dd
--- /dev/null
+++ b/drivers/gpu/msm/Makefile
@@ -0,0 +1,31 @@
+ccflags-y := -Iinclude/drm
+
+msm_kgsl_core-y = \
+	kgsl.o \
+	kgsl_sharedmem.o \
+	kgsl_pwrctrl.o \
+	kgsl_pwrscale.o
+
+msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_MMU) += kgsl_mmu.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
+msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
+
+msm_adreno-y += \
+	adreno_ringbuffer.o \
+	adreno_drawctxt.o \
+	adreno_postmortem.o \
+	adreno.o
+
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+
+msm_z180-y += z180.o
+
+msm_kgsl_core-objs = $(msm_kgsl_core-y)
+msm_adreno-objs = $(msm_adreno-y)
+msm_z180-objs = $(msm_z180-y)
+
+obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
+obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
+obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o
diff --git a/drivers/gpu/msm/a200_reg.h b/drivers/gpu/msm/a200_reg.h
new file mode 100644
index 0000000..e1681f9
--- /dev/null
+++ b/drivers/gpu/msm/a200_reg.h
@@ -0,0 +1,408 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A200_REG_H
+#define __A200_REG_H
+
+enum VGT_EVENT_TYPE {
+	VS_DEALLOC = 0,
+	PS_DEALLOC = 1,
+	VS_DONE_TS = 2,
+	PS_DONE_TS = 3,
+	CACHE_FLUSH_TS = 4,
+	CONTEXT_DONE = 5,
+	CACHE_FLUSH = 6,
+	VIZQUERY_START = 7,
+	VIZQUERY_END = 8,
+	SC_WAIT_WC = 9,
+	RST_PIX_CNT = 13,
+	RST_VTX_CNT = 14,
+	TILE_FLUSH = 15,
+	CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+	ZPASS_DONE = 21,
+	CACHE_FLUSH_AND_INV_EVENT = 22,
+	PERFCOUNTER_START = 23,
+	PERFCOUNTER_STOP = 24,
+	VS_FETCH_DONE = 27,
+	FACENESS_FLUSH = 28,
+};
+
+enum COLORFORMATX {
+	COLORX_4_4_4_4 = 0,
+	COLORX_1_5_5_5 = 1,
+	COLORX_5_6_5 = 2,
+	COLORX_8 = 3,
+	COLORX_8_8 = 4,
+	COLORX_8_8_8_8 = 5,
+	COLORX_S8_8_8_8 = 6,
+	COLORX_16_FLOAT = 7,
+	COLORX_16_16_FLOAT = 8,
+	COLORX_16_16_16_16_FLOAT = 9,
+	COLORX_32_FLOAT = 10,
+	COLORX_32_32_FLOAT = 11,
+	COLORX_32_32_32_32_FLOAT = 12,
+	COLORX_2_3_3 = 13,
+	COLORX_8_8_8 = 14,
+};
+
+enum SURFACEFORMAT {
+	FMT_1_REVERSE                  = 0,
+	FMT_1                          = 1,
+	FMT_8                          = 2,
+	FMT_1_5_5_5                    = 3,
+	FMT_5_6_5                      = 4,
+	FMT_6_5_5                      = 5,
+	FMT_8_8_8_8                    = 6,
+	FMT_2_10_10_10                 = 7,
+	FMT_8_A                        = 8,
+	FMT_8_B                        = 9,
+	FMT_8_8                        = 10,
+	FMT_Cr_Y1_Cb_Y0                = 11,
+	FMT_Y1_Cr_Y0_Cb                = 12,
+	FMT_5_5_5_1                    = 13,
+	FMT_8_8_8_8_A                  = 14,
+	FMT_4_4_4_4                    = 15,
+	FMT_10_11_11                   = 16,
+	FMT_11_11_10                   = 17,
+	FMT_DXT1                       = 18,
+	FMT_DXT2_3                     = 19,
+	FMT_DXT4_5                     = 20,
+	FMT_24_8                       = 22,
+	FMT_24_8_FLOAT                 = 23,
+	FMT_16                         = 24,
+	FMT_16_16                      = 25,
+	FMT_16_16_16_16                = 26,
+	FMT_16_EXPAND                  = 27,
+	FMT_16_16_EXPAND               = 28,
+	FMT_16_16_16_16_EXPAND         = 29,
+	FMT_16_FLOAT                   = 30,
+	FMT_16_16_FLOAT                = 31,
+	FMT_16_16_16_16_FLOAT          = 32,
+	FMT_32                         = 33,
+	FMT_32_32                      = 34,
+	FMT_32_32_32_32                = 35,
+	FMT_32_FLOAT                   = 36,
+	FMT_32_32_FLOAT                = 37,
+	FMT_32_32_32_32_FLOAT          = 38,
+	FMT_32_AS_8                    = 39,
+	FMT_32_AS_8_8                  = 40,
+	FMT_16_MPEG                    = 41,
+	FMT_16_16_MPEG                 = 42,
+	FMT_8_INTERLACED               = 43,
+	FMT_32_AS_8_INTERLACED         = 44,
+	FMT_32_AS_8_8_INTERLACED       = 45,
+	FMT_16_INTERLACED              = 46,
+	FMT_16_MPEG_INTERLACED         = 47,
+	FMT_16_16_MPEG_INTERLACED      = 48,
+	FMT_DXN                        = 49,
+	FMT_8_8_8_8_AS_16_16_16_16     = 50,
+	FMT_DXT1_AS_16_16_16_16        = 51,
+	FMT_DXT2_3_AS_16_16_16_16      = 52,
+	FMT_DXT4_5_AS_16_16_16_16      = 53,
+	FMT_2_10_10_10_AS_16_16_16_16  = 54,
+	FMT_10_11_11_AS_16_16_16_16    = 55,
+	FMT_11_11_10_AS_16_16_16_16    = 56,
+	FMT_32_32_32_FLOAT             = 57,
+	FMT_DXT3A                      = 58,
+	FMT_DXT5A                      = 59,
+	FMT_CTX1                       = 60,
+	FMT_DXT3A_AS_1_1_1_1           = 61
+};
+
+#define REG_PERF_MODE_CNT	0x0
+#define REG_PERF_STATE_RESET	0x0
+#define REG_PERF_STATE_ENABLE	0x1
+#define REG_PERF_STATE_FREEZE	0x2
+
+#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE                      4
+#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE              2
+#define RB_EDRAM_INFO_UNUSED0_SIZE                         8
+#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE                     18
+
+struct rb_edram_info_t {
+	unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
+	unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
+	unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
+	unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
+};
+
+union reg_rb_edram_info {
+	unsigned int val;
+	struct rb_edram_info_t f;
+};
+
+#define RBBM_READ_ERROR_UNUSED0_SIZE		2
+#define RBBM_READ_ERROR_READ_ADDRESS_SIZE	15
+#define RBBM_READ_ERROR_UNUSED1_SIZE		13
+#define RBBM_READ_ERROR_READ_REQUESTER_SIZE	1
+#define RBBM_READ_ERROR_READ_ERROR_SIZE		1
+
+struct rbbm_read_error_t {
+	unsigned int unused0:RBBM_READ_ERROR_UNUSED0_SIZE;
+	unsigned int read_address:RBBM_READ_ERROR_READ_ADDRESS_SIZE;
+	unsigned int unused1:RBBM_READ_ERROR_UNUSED1_SIZE;
+	unsigned int read_requester:RBBM_READ_ERROR_READ_REQUESTER_SIZE;
+	unsigned int read_error:RBBM_READ_ERROR_READ_ERROR_SIZE;
+};
+
+union rbbm_read_error_u {
+	unsigned int val:32;
+	struct rbbm_read_error_t f;
+};
+
+#define CP_RB_CNTL_RB_BUFSZ_SIZE                           6
+#define CP_RB_CNTL_UNUSED0_SIZE                            2
+#define CP_RB_CNTL_RB_BLKSZ_SIZE                           6
+#define CP_RB_CNTL_UNUSED1_SIZE                            2
+#define CP_RB_CNTL_BUF_SWAP_SIZE                           2
+#define CP_RB_CNTL_UNUSED2_SIZE                            2
+#define CP_RB_CNTL_RB_POLL_EN_SIZE                         1
+#define CP_RB_CNTL_UNUSED3_SIZE                            6
+#define CP_RB_CNTL_RB_NO_UPDATE_SIZE                       1
+#define CP_RB_CNTL_UNUSED4_SIZE                            3
+#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE                     1
+
+struct cp_rb_cntl_t {
+	unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
+	unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
+	unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
+	unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
+	unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
+	unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
+	unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
+	unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
+	unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
+	unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
+	unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
+};
+
+union reg_cp_rb_cntl {
+	unsigned int val:32;
+	struct cp_rb_cntl_t f;
+};
+
+#define RB_COLOR_INFO__COLOR_FORMAT_MASK                   0x0000000fL
+#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT         0x00000004
+
+
+#define SQ_INT_CNTL__PS_WATCHDOG_MASK                      0x00000001L
+#define SQ_INT_CNTL__VS_WATCHDOG_MASK                      0x00000002L
+
+#define RBBM_INT_CNTL__RDERR_INT_MASK                      0x00000001L
+#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK             0x00000002L
+#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK                   0x00080000L
+
+#define RBBM_STATUS__CMDFIFO_AVAIL_MASK                    0x0000001fL
+#define RBBM_STATUS__TC_BUSY_MASK                          0x00000020L
+#define RBBM_STATUS__HIRQ_PENDING_MASK                     0x00000100L
+#define RBBM_STATUS__CPRQ_PENDING_MASK                     0x00000200L
+#define RBBM_STATUS__CFRQ_PENDING_MASK                     0x00000400L
+#define RBBM_STATUS__PFRQ_PENDING_MASK                     0x00000800L
+#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK                  0x00001000L
+#define RBBM_STATUS__RBBM_WU_BUSY_MASK                     0x00004000L
+#define RBBM_STATUS__CP_NRT_BUSY_MASK                      0x00010000L
+#define RBBM_STATUS__MH_BUSY_MASK                          0x00040000L
+#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK                0x00080000L
+#define RBBM_STATUS__SX_BUSY_MASK                          0x00200000L
+#define RBBM_STATUS__TPC_BUSY_MASK                         0x00400000L
+#define RBBM_STATUS__SC_CNTX_BUSY_MASK                     0x01000000L
+#define RBBM_STATUS__PA_BUSY_MASK                          0x02000000L
+#define RBBM_STATUS__VGT_BUSY_MASK                         0x04000000L
+#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK                   0x08000000L
+#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK                    0x10000000L
+#define RBBM_STATUS__RB_CNTX_BUSY_MASK                     0x40000000L
+#define RBBM_STATUS__GUI_ACTIVE_MASK                       0x80000000L
+
+#define CP_INT_CNTL__SW_INT_MASK                           0x00080000L
+#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK                  0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_MASK                     0x01000000L
+#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK             0x02000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK               0x04000000L
+#define CP_INT_CNTL__IB_ERROR_MASK                         0x08000000L
+#define CP_INT_CNTL__IB2_INT_MASK                          0x20000000L
+#define CP_INT_CNTL__IB1_INT_MASK                          0x40000000L
+#define CP_INT_CNTL__RB_INT_MASK                           0x80000000L
+
+#define MASTER_INT_SIGNAL__MH_INT_STAT                     0x00000020L
+#define MASTER_INT_SIGNAL__SQ_INT_STAT                     0x04000000L
+#define MASTER_INT_SIGNAL__CP_INT_STAT                     0x40000000L
+#define MASTER_INT_SIGNAL__RBBM_INT_STAT                   0x80000000L
+
+#define RB_EDRAM_INFO__EDRAM_SIZE_MASK                     0x0000000fL
+#define RB_EDRAM_INFO__EDRAM_RANGE_MASK                    0xffffc000L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT    0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT            0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT       0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT           0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT                0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT        0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT       0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT   0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT          0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT           0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT          0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT           0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT           0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT           0x0000001a
+
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT                        0x00000000
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT                        0x00000008
+#define CP_RB_CNTL__RB_POLL_EN__SHIFT                      0x00000014
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT                    0x0000001b
+
+#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT                 0x00000000
+#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT           0x00000004
+#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT                  0x0000000e
+
+#define REG_CP_CSQ_IB1_STAT              0x01FE
+#define REG_CP_CSQ_IB2_STAT              0x01FF
+#define REG_CP_CSQ_RB_STAT               0x01FD
+#define REG_CP_DEBUG                     0x01FC
+#define REG_CP_IB1_BASE                  0x0458
+#define REG_CP_IB1_BUFSZ                 0x0459
+#define REG_CP_IB2_BASE                  0x045A
+#define REG_CP_IB2_BUFSZ                 0x045B
+#define REG_CP_INT_ACK                   0x01F4
+#define REG_CP_INT_CNTL                  0x01F2
+#define REG_CP_INT_STATUS                0x01F3
+#define REG_CP_ME_CNTL                   0x01F6
+#define REG_CP_ME_RAM_DATA               0x01FA
+#define REG_CP_ME_RAM_WADDR              0x01F8
+#define REG_CP_ME_STATUS                 0x01F7
+#define REG_CP_PFP_UCODE_ADDR            0x00C0
+#define REG_CP_PFP_UCODE_DATA            0x00C1
+#define REG_CP_QUEUE_THRESHOLDS          0x01D5
+#define REG_CP_RB_BASE                   0x01C0
+#define REG_CP_RB_CNTL                   0x01C1
+#define REG_CP_RB_RPTR                   0x01C4
+#define REG_CP_RB_RPTR_ADDR              0x01C3
+#define REG_CP_RB_RPTR_WR                0x01C7
+#define REG_CP_RB_WPTR                   0x01C5
+#define REG_CP_RB_WPTR_BASE              0x01C8
+#define REG_CP_RB_WPTR_DELAY             0x01C6
+#define REG_CP_STAT                      0x047F
+#define REG_CP_STATE_DEBUG_DATA          0x01ED
+#define REG_CP_STATE_DEBUG_INDEX         0x01EC
+#define REG_CP_ST_BASE                   0x044D
+#define REG_CP_ST_BUFSZ                  0x044E
+
+#define REG_CP_PERFMON_CNTL              0x0444
+#define REG_CP_PERFCOUNTER_SELECT        0x0445
+#define REG_CP_PERFCOUNTER_LO            0x0446
+#define REG_CP_PERFCOUNTER_HI            0x0447
+
+#define REG_RBBM_PERFCOUNTER1_SELECT     0x0395
+#define REG_RBBM_PERFCOUNTER1_HI         0x0398
+#define REG_RBBM_PERFCOUNTER1_LO         0x0397
+
+#define REG_MASTER_INT_SIGNAL            0x03B7
+
+#define REG_MH_ARBITER_CONFIG            0x0A40
+#define REG_MH_CLNT_INTF_CTRL_CONFIG1    0x0A54
+#define REG_MH_CLNT_INTF_CTRL_CONFIG2    0x0A55
+
+#define REG_PA_CL_VPORT_XSCALE           0x210F
+#define REG_PA_CL_VPORT_ZOFFSET          0x2114
+#define REG_PA_CL_VPORT_ZSCALE           0x2113
+#define REG_PA_CL_CLIP_CNTL              0x2204
+#define REG_PA_CL_VTE_CNTL               0x2206
+#define REG_PA_SC_AA_MASK                0x2312
+#define REG_PA_SC_LINE_CNTL              0x2300
+#define REG_PA_SC_SCREEN_SCISSOR_BR      0x200F
+#define REG_PA_SC_SCREEN_SCISSOR_TL      0x200E
+#define REG_PA_SC_VIZ_QUERY              0x2293
+#define REG_PA_SC_VIZ_QUERY_STATUS       0x0C44
+#define REG_PA_SC_WINDOW_OFFSET          0x2080
+#define REG_PA_SC_WINDOW_SCISSOR_BR      0x2082
+#define REG_PA_SC_WINDOW_SCISSOR_TL      0x2081
+#define REG_PA_SU_FACE_DATA              0x0C86
+#define REG_PA_SU_POINT_SIZE             0x2280
+#define REG_PA_SU_LINE_CNTL              0x2282
+#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
+#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
+#define REG_PA_SU_SC_MODE_CNTL           0x2205
+
+#define REG_PC_INDEX_OFFSET              0x2102
+
+#define REG_RBBM_CNTL                    0x003B
+#define REG_RBBM_INT_ACK                 0x03B6
+#define REG_RBBM_INT_CNTL                0x03B4
+#define REG_RBBM_INT_STATUS              0x03B5
+#define REG_RBBM_PATCH_RELEASE           0x0001
+#define REG_RBBM_PERIPHID1               0x03F9
+#define REG_RBBM_PERIPHID2               0x03FA
+#define REG_RBBM_DEBUG                   0x039B
+#define REG_RBBM_DEBUG_OUT               0x03A0
+#define REG_RBBM_DEBUG_CNTL              0x03A1
+#define REG_RBBM_PM_OVERRIDE1            0x039C
+#define REG_RBBM_PM_OVERRIDE2            0x039D
+#define REG_RBBM_READ_ERROR              0x03B3
+#define REG_RBBM_SOFT_RESET              0x003C
+#define REG_RBBM_STATUS                  0x05D0
+
+#define REG_RB_COLORCONTROL              0x2202
+#define REG_RB_COLOR_DEST_MASK           0x2326
+#define REG_RB_COLOR_MASK                0x2104
+#define REG_RB_COPY_CONTROL              0x2318
+#define REG_RB_DEPTHCONTROL              0x2200
+#define REG_RB_EDRAM_INFO                0x0F02
+#define REG_RB_MODECONTROL               0x2208
+#define REG_RB_SURFACE_INFO              0x2000
+#define REG_RB_SAMPLE_POS                0x220a
+
+#define REG_SCRATCH_ADDR                 0x01DD
+#define REG_SCRATCH_REG0                 0x0578
+#define REG_SCRATCH_REG2                 0x057A
+#define REG_SCRATCH_UMSK                 0x01DC
+
+#define REG_SQ_CF_BOOLEANS               0x4900
+#define REG_SQ_CF_LOOP                   0x4908
+#define REG_SQ_GPR_MANAGEMENT            0x0D00
+#define REG_SQ_INST_STORE_MANAGMENT      0x0D02
+#define REG_SQ_INT_ACK                   0x0D36
+#define REG_SQ_INT_CNTL                  0x0D34
+#define REG_SQ_INT_STATUS                0x0D35
+#define REG_SQ_PROGRAM_CNTL              0x2180
+#define REG_SQ_PS_PROGRAM                0x21F6
+#define REG_SQ_VS_PROGRAM                0x21F7
+#define REG_SQ_WRAPPING_0                0x2183
+#define REG_SQ_WRAPPING_1                0x2184
+
+#define REG_VGT_ENHANCE                  0x2294
+#define REG_VGT_INDX_OFFSET              0x2102
+#define REG_VGT_MAX_VTX_INDX             0x2100
+#define REG_VGT_MIN_VTX_INDX             0x2101
+
+#define REG_TP0_CHICKEN                  0x0E1E
+#define REG_TC_CNTL_STATUS               0x0E00
+#define REG_PA_SC_AA_CONFIG              0x2301
+#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL  0x2316
+#define REG_SQ_INTERPOLATOR_CNTL         0x2182
+#define REG_RB_DEPTH_INFO                0x2002
+#define REG_COHER_DEST_BASE_0            0x2006
+#define REG_RB_FOG_COLOR                 0x2109
+#define REG_RB_STENCILREFMASK_BF         0x210C
+#define REG_PA_SC_LINE_STIPPLE           0x2283
+#define REG_SQ_PS_CONST                  0x2308
+#define REG_RB_DEPTH_CLEAR               0x231D
+#define REG_RB_SAMPLE_COUNT_CTL          0x2324
+#define REG_SQ_CONSTANT_0                0x4000
+#define REG_SQ_FETCH_0                   0x4800
+
+#define REG_MH_DEBUG_CTRL                0xA4E
+#define REG_MH_DEBUG_DATA                0xA4F
+#define REG_COHER_BASE_PM4               0xA2A
+#define REG_COHER_STATUS_PM4             0xA2B
+#define REG_COHER_SIZE_PM4               0xA29
+
+#endif /* __A200_REG_H */
diff --git a/drivers/gpu/msm/a220_reg.h b/drivers/gpu/msm/a220_reg.h
new file mode 100644
index 0000000..7cfe705
--- /dev/null
+++ b/drivers/gpu/msm/a220_reg.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A205_REG_H
+#define __A205_REG_H
+
+#define REG_LEIA_PC_INDX_OFFSET          REG_VGT_INDX_OFFSET
+#define REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
+#define REG_LEIA_PC_MAX_VTX_INDX         REG_VGT_MAX_VTX_INDX
+#define REG_LEIA_RB_LRZ_VSC_CONTROL	 0x2209
+#define REG_LEIA_GRAS_CONTROL            0x2210
+#define REG_LEIA_VSC_BIN_SIZE            0x0C01
+#define REG_LEIA_VSC_PIPE_DATA_LENGTH_7  0x0C1D
+
+#endif /*__A205_REG_H */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
new file mode 100644
index 0000000..6b9adf8
--- /dev/null
+++ b/drivers/gpu/msm/adreno.c
@@ -0,0 +1,1329 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_debugfs.h"
+#include "adreno_postmortem.h"
+
+#include "a200_reg.h"
+
+#define DRIVER_VERSION_MAJOR   3
+#define DRIVER_VERSION_MINOR   1
+
+#define KGSL_RBBM_INT_MASK \
+	 (RBBM_INT_CNTL__RDERR_INT_MASK |  \
+	  RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK)
+
+/* Adreno MH arbiter config*/
+#define ADRENO_CFG_MHARB \
+	(0x10 \
+		| (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+		| (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define ADRENO_MMU_CONFIG						\
+	(0x01								\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+/* max msecs to wait for gpu to finish its operation(s) */
+#define MAX_WAITGPU_SECS (HZ + HZ/2)
+
+static const struct kgsl_functable adreno_functable;
+
+static struct adreno_device device_3d0 = {
+	.dev = {
+		.name = DEVICE_3D0_NAME,
+		.id = KGSL_DEVICE_3D0,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = ADRENO_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx3d",
+			.irq_name = KGSL_3D0_IRQ,
+			.src_clk_name = "grp_src_clk",
+		},
+		.mutex = __MUTEX_INITIALIZER(device_3d0.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_3D0_REG_MEMORY,
+		.ftbl = &adreno_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+	.gmemspace = {
+		.gpu_base = 0,
+		.sizebytes = SZ_256K,
+	},
+	.pfp_fw = NULL,
+	.pm4_fw = NULL,
+	.mharb  = ADRENO_CFG_MHARB,
+};
+
+static int adreno_gmeminit(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	union reg_rb_edram_info rb_edram_info;
+	unsigned int gmem_size;
+	unsigned int edram_value = 0;
+
+	/* make sure edram range is aligned to size */
+	BUG_ON(adreno_dev->gmemspace.gpu_base &
+				(adreno_dev->gmemspace.sizebytes - 1));
+
+	/* get edram_size value equivalent */
+	gmem_size = (adreno_dev->gmemspace.sizebytes >> 14);
+	while (gmem_size >>= 1)
+		edram_value++;
+
+	rb_edram_info.val = 0;
+
+	rb_edram_info.f.edram_size = edram_value;
+	if (!adreno_is_a220(adreno_dev))
+		rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
+
+	/* must be aligned to size */
+	rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14);
+
+	adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
+
+	return 0;
+}
+
+static int adreno_gmemclose(struct kgsl_device *device)
+{
+	adreno_regwrite(device, REG_RB_EDRAM_INFO, 0x00000000);
+
+	return 0;
+}
+
+static void adreno_rbbm_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0;
+	unsigned int rderr = 0;
+
+	adreno_regread(device, REG_RBBM_INT_STATUS, &status);
+
+	if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
+		union rbbm_read_error_u rerr;
+		adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
+		rerr.val = rderr;
+		if (rerr.f.read_address == REG_CP_INT_STATUS &&
+			rerr.f.read_error &&
+			rerr.f.read_requester)
+			KGSL_DRV_WARN(device,
+				"rbbm read error interrupt: %08x\n", rderr);
+		else
+			KGSL_DRV_CRIT(device,
+				"rbbm read error interrupt: %08x\n", rderr);
+	} else if (status & RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK) {
+		KGSL_DRV_INFO(device, "rbbm display update interrupt\n");
+	} else if (status & RBBM_INT_CNTL__GUI_IDLE_INT_MASK) {
+		KGSL_DRV_INFO(device, "rbbm gui idle interrupt\n");
+	} else {
+		KGSL_CMD_WARN(device,
+			"bad bits in REG_CP_INT_STATUS %08x\n", status);
+	}
+
+	status &= KGSL_RBBM_INT_MASK;
+	adreno_regwrite(device, REG_RBBM_INT_ACK, status);
+}
+
+irqreturn_t adreno_isr(int irq, void *data)
+{
+	irqreturn_t result = IRQ_NONE;
+	struct kgsl_device *device;
+	unsigned int status;
+
+	device = (struct kgsl_device *) data;
+
+	BUG_ON(device == NULL);
+	BUG_ON(device->regspace.sizebytes == 0);
+	BUG_ON(device->regspace.mmio_virt_base == 0);
+
+	adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
+
+	if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
+		kgsl_mh_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+		kgsl_cp_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
+		adreno_rbbm_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (device->requested_state == KGSL_STATE_NONE) {
+		if (device->pwrctrl.nap_allowed == true) {
+			device->requested_state = KGSL_STATE_NAP;
+			queue_work(device->work_queue, &device->idle_check_ws);
+		} else if (device->pwrscale.policy != NULL) {
+			queue_work(device->work_queue, &device->idle_check_ws);
+		}
+	}
+
+	/* Reset the time-out in our idle timer */
+	mod_timer(&device->idle_timer,
+		jiffies + device->pwrctrl.interval_timeout);
+	return result;
+}
+
+static int adreno_cleanup_pt(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+	kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+	return 0;
+}
+
+static int adreno_setup_pt(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable)
+{
+	int result = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	BUG_ON(rb->buffer_desc.physaddr == 0);
+	BUG_ON(rb->memptrs_desc.physaddr == 0);
+	BUG_ON(device->memstore.physaddr == 0);
+#ifdef CONFIG_MSM_KGSL_MMU
+	BUG_ON(device->mmu.dummyspace.physaddr == 0);
+#endif
+	result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc,
+				     GSL_PT_PAGE_RV);
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_buffer_desc;
+
+	result = kgsl_mmu_map_global(pagetable, &device->memstore,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_memptrs_desc;
+
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_memstore_desc;
+
+	return result;
+
+unmap_memstore_desc:
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+unmap_memptrs_desc:
+	kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+unmap_buffer_desc:
+	kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+error:
+	return result;
+}
+
+static void adreno_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int link[32];
+	unsigned int *cmds = &link[0];
+	int sizedwords = 0;
+	unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+
+	if (!kgsl_mmu_enabled() || !flags)
+		return;
+
+	/* If possible, then set the state via the command stream to avoid
+	   a CPU idle.  Otherwise, use the default setstate which uses register
+	   writes */
+
+	if (adreno_dev->drawctxt_active) {
+		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+			/* wait for graphics pipe to be idle */
+			*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+			*cmds++ = 0x00000000;
+
+			/* set page table base */
+			*cmds++ = pm4_type0_packet(MH_MMU_PT_BASE, 1);
+			*cmds++ = device->mmu.hwpagetable->base.gpuaddr;
+			sizedwords += 4;
+		}
+
+		if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+			if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
+				*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE,
+								1);
+				*cmds++ = 0x00000000;
+				sizedwords += 2;
+			}
+			*cmds++ = pm4_type0_packet(MH_MMU_INVALIDATE, 1);
+			*cmds++ = mh_mmu_invalidate;
+			sizedwords += 2;
+		}
+
+		if (flags & KGSL_MMUFLAGS_PTUPDATE &&
+			!adreno_is_a220(adreno_dev)) {
+			/* HW workaround: to resolve MMU page fault interrupts
+			* caused by the VGT.It prevents the CP PFP from filling
+			* the VGT DMA request fifo too early,thereby ensuring
+			* that the VGT will not fetch vertex/bin data until
+			* after the page table base register has been updated.
+			*
+			* Two null DRAW_INDX_BIN packets are inserted right
+			* after the page table base update, followed by a
+			* wait for idle. The null packets will fill up the
+			* VGT DMA request fifo and prevent any further
+			* vertex/bin updates from occurring until the wait
+			* has finished. */
+			*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+			*cmds++ = (0x4 << 16) |
+				(REG_PA_SU_SC_MODE_CNTL - 0x2000);
+			*cmds++ = 0;	  /* disable faceness generation */
+			*cmds++ = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+			*cmds++ = device->mmu.dummyspace.gpuaddr;
+			*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+			*cmds++ = 0;	  /* viz query info */
+			*cmds++ = 0x0003C004; /* draw indicator */
+			*cmds++ = 0;	  /* bin base */
+			*cmds++ = 3;	  /* bin size */
+			*cmds++ = device->mmu.dummyspace.gpuaddr; /* dma base */
+			*cmds++ = 6;	  /* dma size */
+			*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+			*cmds++ = 0;	  /* viz query info */
+			*cmds++ = 0x0003C004; /* draw indicator */
+			*cmds++ = 0;	  /* bin base */
+			*cmds++ = 3;	  /* bin size */
+			/* dma base */
+			*cmds++ = device->mmu.dummyspace.gpuaddr;
+			*cmds++ = 6;	  /* dma size */
+			*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+			*cmds++ = 0x00000000;
+			sizedwords += 21;
+		}
+
+		if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
+			*cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+			*cmds++ = 0x7fff; /* invalidate all base pointers */
+			sizedwords += 2;
+		}
+
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+					&link[0], sizedwords);
+	} else
+		kgsl_default_setstate(device, flags);
+}
+
+static unsigned int
+adreno_getchipid(struct kgsl_device *device)
+{
+	unsigned int chipid = 0;
+	unsigned int coreid, majorid, minorid, patchid, revid;
+
+	adreno_regread(device, REG_RBBM_PERIPHID1, &coreid);
+	adreno_regread(device, REG_RBBM_PERIPHID2, &majorid);
+	adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
+
+	/*
+	* adreno 22x gpus are indicated by coreid 2,
+	* but REG_RBBM_PERIPHID1 always contains 0 for this field
+	*/
+	if (cpu_is_msm8960() || cpu_is_msm8x60())
+		chipid = 2 << 24;
+	else
+		chipid = (coreid & 0xF) << 24;
+
+	chipid |= ((majorid >> 4) & 0xF) << 16;
+
+	minorid = ((revid >> 0)  & 0xFF);
+
+	patchid = ((revid >> 16) & 0xFF);
+
+	/* 8x50 returns 0 for patch release, but it should be 1 */
+	if (cpu_is_qsd8x50())
+		patchid = 1;
+	/* userspace isn't prepared to deal with patch id for these chips yet */
+	else if (cpu_is_msm8960() || cpu_is_msm8x60())
+		patchid = 0;
+
+	chipid |= (minorid << 8) | patchid;
+
+	return chipid;
+}
+
+/* all chipid fields are 8 bits wide so 256 won't occur in a real chipid */
+#define DONT_CARE 256
+static const struct {
+	unsigned int core;
+	unsigned int major;
+	unsigned int minor;
+	enum adreno_gpurev gpurev;
+} gpurev_table[] = {
+	/* major and minor may be DONT_CARE, but core must not be */
+	{0, 2, DONT_CARE, ADRENO_REV_A200},
+	{0, 1, 0, ADRENO_REV_A205},
+	{2, 1, DONT_CARE, ADRENO_REV_A220},
+	{2, 2, DONT_CARE, ADRENO_REV_A225},
+};
+
+static inline bool _rev_match(unsigned int id, unsigned int entry)
+{
+	return (entry == DONT_CARE || entry == id);
+}
+#undef DONT_CARE
+
+static void
+adreno_identify_gpu(struct adreno_device *adreno_dev)
+{
+	enum adreno_gpurev gpurev = ADRENO_REV_UNKNOWN;
+	unsigned int i, core, major, minor;
+
+	adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
+
+	core = (adreno_dev->chip_id >> 24) & 0xff;
+	major = (adreno_dev->chip_id >> 16) & 0xff;
+	minor = (adreno_dev->chip_id >> 8) & 0xff;
+
+	for (i = 0; i < ARRAY_SIZE(gpurev_table); i++) {
+		if (core == gpurev_table[i].core &&
+		    _rev_match(major, gpurev_table[i].major) &&
+		    _rev_match(minor, gpurev_table[i].minor)) {
+			gpurev = gpurev_table[i].gpurev;
+			break;
+		}
+	}
+
+	adreno_dev->gpurev = gpurev;
+}
+
+static int __devinit
+adreno_probe(struct platform_device *pdev)
+{
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+	int status = -EINVAL;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	adreno_dev = ADRENO_DEVICE(device);
+	device->parentdev = &pdev->dev;
+
+	init_completion(&device->recovery_gate);
+
+	status = adreno_ringbuffer_init(device);
+	if (status != 0)
+		goto error;
+
+	status = kgsl_device_platform_probe(device, adreno_isr);
+	if (status)
+		goto error_close_rb;
+
+	adreno_debugfs_init(device);
+
+	kgsl_pwrscale_init(device);
+	kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
+
+	device->flags &= ~KGSL_FLAGS_SOFT_RESET;
+	return 0;
+
+error_close_rb:
+	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+error:
+	device->parentdev = NULL;
+	return status;
+}
+
+static int __devexit adreno_remove(struct platform_device *pdev)
+{
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	adreno_dev = ADRENO_DEVICE(device);
+
+	kgsl_pwrscale_detach_policy(device);
+	kgsl_pwrscale_close(device);
+
+	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+	kgsl_device_platform_remove(device);
+
+	return 0;
+}
+
+static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
+{
+	int status = -EINVAL;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int init_reftimestamp = 0x7fffffff;
+
+	device->state = KGSL_STATE_INIT;
+	device->requested_state = KGSL_STATE_NONE;
+
+	/* Power up the device */
+	kgsl_pwrctrl_enable(device);
+
+	/* Identify the specific GPU */
+	adreno_identify_gpu(adreno_dev);
+
+	if (kgsl_mmu_start(device))
+		goto error_clk_off;
+
+	/*We need to make sure all blocks are powered up and clocked before
+	*issuing a soft reset.  The overrides will then be turned off (set to 0)
+	*/
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+	/* Only reset CP block if all blocks have previously been reset */
+	if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
+		!adreno_is_a220(adreno_dev)) {
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0xFFFFFFFF);
+		device->flags |= KGSL_FLAGS_SOFT_RESET;
+	} else
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000001);
+
+	/* The core is in an indeterminate state until the reset completes
+	 * after 30ms.
+	 */
+	msleep(30);
+
+	adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
+
+	adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+
+	adreno_regwrite(device, REG_MH_ARBITER_CONFIG,
+				adreno_dev->mharb);
+
+	/* Remove 1k boundary check in z470 to avoid GPU hang.
+	   Notice that, this solution won't work if both EBI and SMI are used */
+	if (adreno_is_a220(adreno_dev)) {
+		adreno_regwrite(device, REG_MH_CLNT_INTF_CTRL_CONFIG1,
+				 0x00032f07);
+	}
+
+	adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
+	adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
+
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
+	if (!adreno_is_a220(adreno_dev))
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
+	else
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
+
+	kgsl_sharedmem_writel(&device->memstore,
+			      KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+			      init_reftimestamp);
+
+	adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
+
+	/* Make sure interrupts are disabled */
+
+	adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+	adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+	adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
+
+	if (adreno_is_a220(adreno_dev))
+		adreno_dev->gmemspace.sizebytes = SZ_512K;
+	else
+		adreno_dev->gmemspace.sizebytes = SZ_256K;
+	adreno_gmeminit(adreno_dev);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram);
+	if (status != 0)
+		goto error_irq_off;
+
+	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+	return status;
+
+error_irq_off:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+error_clk_off:
+	kgsl_pwrctrl_disable(device);
+	kgsl_mmu_stop(device);
+
+	return status;
+}
+
+static int adreno_stop(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	del_timer(&device->idle_timer);
+
+	adreno_dev->drawctxt_active = NULL;
+
+	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+	adreno_gmemclose(device);
+
+	kgsl_mmu_stop(device);
+
+	/* Power down the device */
+	kgsl_pwrctrl_disable(device);
+
+	return 0;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device)
+{
+	int ret;
+	unsigned int *rb_buffer;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int timestamp;
+	unsigned int num_rb_contents;
+	unsigned int bad_context;
+	unsigned int reftimestamp;
+	unsigned int enable_ts;
+	unsigned int soptimestamp;
+	unsigned int eoptimestamp;
+	struct adreno_context *drawctxt;
+
+	KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
+	rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!rb_buffer) {
+		KGSL_MEM_ERR(device,
+			"Failed to allocate memory for recovery: %x\n",
+			rb->buffer_desc.size);
+		return -ENOMEM;
+	}
+	/* Extract valid contents from rb which can stil be executed after
+	 * hang */
+	ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
+	if (ret)
+		goto done;
+	timestamp = rb->timestamp;
+	KGSL_DRV_ERR(device, "Last issued timestamp: %x\n", timestamp);
+	kgsl_sharedmem_readl(&device->memstore, &bad_context,
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
+	kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
+	kgsl_sharedmem_readl(&device->memstore, &enable_ts,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
+	kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
+	kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
+	/* Make sure memory is synchronized before restarting the GPU */
+	mb();
+	KGSL_CTXT_ERR(device,
+		"Context that caused a GPU hang: %x\n", bad_context);
+	/* restart device */
+	ret = adreno_stop(device);
+	if (ret)
+		goto done;
+	ret = adreno_start(device, true);
+	if (ret)
+		goto done;
+	KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
+	/* Restore timestamp states */
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
+			soptimestamp);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
+			eoptimestamp);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
+			soptimestamp);
+	if (num_rb_contents) {
+		kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+			reftimestamp);
+		kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+			enable_ts);
+	}
+	/* Make sure all writes are posted before the GPU reads them */
+	wmb();
+	/* Mark the invalid context so no more commands are accepted from
+	 * that context */
+
+	drawctxt = (struct adreno_context *) bad_context;
+
+	KGSL_CTXT_ERR(device,
+		"Context that caused a GPU hang: %x\n", bad_context);
+
+	drawctxt->flags |= CTXT_FLAGS_GPU_HANG;
+
+	/* Restore valid commands in ringbuffer */
+	adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
+	rb->timestamp = timestamp;
+done:
+	vfree(rb_buffer);
+	return ret;
+}
+
+static int
+adreno_dump_and_recover(struct kgsl_device *device)
+{
+	static int recovery;
+	int result = -ETIMEDOUT;
+
+	if (device->state == KGSL_STATE_HUNG)
+		goto done;
+	if (device->state == KGSL_STATE_DUMP_AND_RECOVER && !recovery) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->recovery_gate);
+		mutex_lock(&device->mutex);
+		if (!(device->state & KGSL_STATE_HUNG))
+			/* recovery success */
+			result = 0;
+	} else {
+		INIT_COMPLETION(device->recovery_gate);
+		/* Detected a hang - trigger an automatic dump */
+		adreno_postmortem_dump(device, 0);
+		if (!recovery) {
+			recovery = 1;
+			result = adreno_recover_hang(device);
+			if (result)
+				device->state = KGSL_STATE_HUNG;
+			recovery = 0;
+			complete_all(&device->recovery_gate);
+		} else
+			KGSL_DRV_ERR(device,
+				"Cannot recover from another hang while "
+				"recovering from a hang\n");
+	}
+done:
+	return result;
+}
+
+static int adreno_getproperty(struct kgsl_device *device,
+				enum kgsl_property_type type,
+				void *value,
+				unsigned int sizebytes)
+{
+	int status = -EINVAL;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	switch (type) {
+	case KGSL_PROP_DEVICE_INFO:
+		{
+			struct kgsl_devinfo devinfo;
+
+			if (sizebytes != sizeof(devinfo)) {
+				status = -EINVAL;
+				break;
+			}
+
+			memset(&devinfo, 0, sizeof(devinfo));
+			devinfo.device_id = device->id+1;
+			devinfo.chip_id = adreno_dev->chip_id;
+			devinfo.mmu_enabled = kgsl_mmu_enabled();
+			devinfo.gpu_id = adreno_dev->gpurev;
+			devinfo.gmem_gpubaseaddr = adreno_dev->gmemspace.
+					gpu_base;
+			devinfo.gmem_sizebytes = adreno_dev->gmemspace.
+					sizebytes;
+
+			if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+					0) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_DEVICE_SHADOW:
+		{
+			struct kgsl_shadowprop shadowprop;
+
+			if (sizebytes != sizeof(shadowprop)) {
+				status = -EINVAL;
+				break;
+			}
+			memset(&shadowprop, 0, sizeof(shadowprop));
+			if (device->memstore.hostptr) {
+				/*NOTE: with mmu enabled, gpuaddr doesn't mean
+				 * anything to mmap().
+				 */
+				shadowprop.gpuaddr = device->memstore.physaddr;
+				shadowprop.size = device->memstore.size;
+				/* GSL needs this to be set, even if it
+				   appears to be meaningless */
+				shadowprop.flags = KGSL_FLAGS_INITIALIZED;
+			}
+			if (copy_to_user(value, &shadowprop,
+				sizeof(shadowprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_MMU_ENABLE:
+		{
+#ifdef CONFIG_MSM_KGSL_MMU
+			int mmuProp = 1;
+#else
+			int mmuProp = 0;
+#endif
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_INTERRUPT_WAITS:
+		{
+			int int_waits = 1;
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &int_waits, sizeof(int))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	default:
+		status = -EINVAL;
+	}
+
+	return status;
+}
+
+/* Caller must hold the device mutex. */
+int adreno_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int rbbm_status;
+	unsigned long wait_time = jiffies + MAX_WAITGPU_SECS;
+
+	kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2,
+		0x00000000, 0x80000000);
+	/* first, wait until the CP has consumed all the commands in
+	 * the ring buffer
+	 */
+retry:
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		do {
+			GSL_RB_GET_READPTR(rb, &rb->rptr);
+			if (time_after(jiffies, wait_time)) {
+				KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
+					rb->rptr, rb->wptr);
+				goto err;
+			}
+		} while (rb->rptr != rb->wptr);
+	}
+
+	/* now, wait for the GPU to finish its operations */
+	wait_time = jiffies + MAX_WAITGPU_SECS;
+	while (time_before(jiffies, wait_time)) {
+		adreno_regread(device, REG_RBBM_STATUS, &rbbm_status);
+		if (rbbm_status == 0x110)
+			return 0;
+	}
+
+err:
+	KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
+	if (!adreno_dump_and_recover(device)) {
+		wait_time = jiffies + MAX_WAITGPU_SECS;
+		goto retry;
+	}
+	return -ETIMEDOUT;
+}
+
+static unsigned int adreno_isidle(struct kgsl_device *device)
+{
+	int status = false;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int rbbm_status;
+
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		/* Is the ring buffer is empty? */
+		GSL_RB_GET_READPTR(rb, &rb->rptr);
+		if (!device->active_cnt && (rb->rptr == rb->wptr)) {
+			/* Is the core idle? */
+			adreno_regread(device, REG_RBBM_STATUS,
+					    &rbbm_status);
+			if (rbbm_status == 0x110)
+				status = true;
+		}
+	} else {
+		KGSL_DRV_ERR(device, "ringbuffer not started\n");
+		BUG();
+	}
+	return status;
+}
+
+/* Caller must hold the device mutex. */
+static int adreno_suspend_context(struct kgsl_device *device)
+{
+	int status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* switch to NULL ctxt */
+	if (adreno_dev->drawctxt_active != NULL) {
+		adreno_drawctxt_switch(adreno_dev, NULL, 0);
+		status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	}
+
+	return status;
+}
+
+uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
+	unsigned int pt_base, unsigned int gpuaddr, unsigned int *size)
+{
+	uint8_t *result = NULL;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_process_private *priv;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
+
+	if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&ringbuffer->buffer_desc,
+					gpuaddr, size);
+	}
+
+	if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&ringbuffer->memptrs_desc,
+					gpuaddr, size);
+	}
+
+	if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&device->memstore,
+					gpuaddr, size);
+	}
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(priv, &kgsl_driver.process_list, list) {
+		if (pt_base != 0
+			&& priv->pagetable
+			&& priv->pagetable->base.gpuaddr != pt_base) {
+			continue;
+		}
+
+		spin_lock(&priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(priv, gpuaddr,
+						sizeof(unsigned int));
+		if (entry) {
+			result = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+							gpuaddr, size);
+			spin_unlock(&priv->mem_lock);
+			mutex_unlock(&kgsl_driver.process_mutex);
+			return result;
+		}
+		spin_unlock(&priv->mem_lock);
+	}
+	mutex_unlock(&kgsl_driver.process_mutex);
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+	list_for_each_entry(entry, &device->memqueue, list) {
+		if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr)) {
+			result = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+							gpuaddr, size);
+			break;
+		}
+
+	}
+	return result;
+}
+
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int *value)
+{
+	unsigned int *reg;
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	/*ensure this read finishes before the next one.
+	 * i.e. act like normal readl() */
+	*value = __raw_readl(reg);
+	rmb();
+}
+
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+
+	/*ensure previous writes post before this one,
+	 * i.e. act like normal writel() */
+	wmb();
+	__raw_writel(value, reg);
+}
+
+static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
+					unsigned int timestamp)
+{
+	int status;
+	unsigned int ref_ts, enableflag;
+
+	status = kgsl_check_timestamp(device, timestamp);
+	if (!status) {
+		mutex_lock(&device->mutex);
+		kgsl_sharedmem_readl(&device->memstore, &enableflag,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
+		mb();
+
+		if (enableflag) {
+			kgsl_sharedmem_readl(&device->memstore, &ref_ts,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
+			mb();
+			if (timestamp_cmp(ref_ts, timestamp)) {
+				kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+				timestamp);
+				wmb();
+			}
+		} else {
+			unsigned int cmds[2];
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+				timestamp);
+			enableflag = 1;
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+				enableflag);
+			wmb();
+			/* submit a dummy packet so that even if all
+			* commands upto timestamp get executed we will still
+			* get an interrupt */
+			cmds[0] = pm4_type3_packet(PM4_NOP, 1);
+			cmds[1] = 0;
+			adreno_ringbuffer_issuecmds(device, 0, &cmds[0], 2);
+		}
+		mutex_unlock(&device->mutex);
+	}
+
+	return status;
+}
+
+/*
+ wait_io_event_interruptible_timeout checks for the exit condition before
+ placing a process in wait q. For conditional interrupts we expect the
+ process to already be in its wait q when its exit condition checking
+ function is called.
+*/
+#define kgsl_wait_io_event_interruptible_timeout(wq, condition, timeout)\
+({									\
+	long __ret = timeout;						\
+	__wait_io_event_interruptible_timeout(wq, condition, __ret);	\
+	__ret;								\
+})
+
+/* MUST be called with the device mutex held */
+static int adreno_waittimestamp(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	long status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (timestamp != adreno_dev->ringbuffer.timestamp &&
+		timestamp_cmp(timestamp,
+		adreno_dev->ringbuffer.timestamp)) {
+		KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, "
+			"rb->timestamp: %x\n",
+			timestamp, adreno_dev->ringbuffer.timestamp);
+		status = -EINVAL;
+		goto done;
+	}
+	if (!kgsl_check_timestamp(device, timestamp)) {
+		mutex_unlock(&device->mutex);
+		/* We need to make sure that the process is placed in wait-q
+		 * before its condition is called */
+		status = kgsl_wait_io_event_interruptible_timeout(
+				device->wait_queue,
+				kgsl_check_interrupt_timestamp(device,
+					timestamp), msecs_to_jiffies(msecs));
+		mutex_lock(&device->mutex);
+
+		if (status > 0)
+			status = 0;
+		else if (status == 0) {
+			if (!kgsl_check_timestamp(device, timestamp)) {
+				status = -ETIMEDOUT;
+				KGSL_DRV_ERR(device,
+					"Device hang detected while waiting "
+					"for timestamp: %x, last "
+					"submitted(rb->timestamp): %x, wptr: "
+					"%x\n", timestamp,
+					adreno_dev->ringbuffer.timestamp,
+					adreno_dev->ringbuffer.wptr);
+				if (!adreno_dump_and_recover(device)) {
+					/* wait for idle after recovery as the
+					 * timestamp that this process wanted
+					 * to wait on may be invalid */
+					if (!adreno_idle(device,
+						KGSL_TIMEOUT_DEFAULT))
+						status = 0;
+				}
+			}
+		}
+	}
+
+done:
+	return (int)status;
+}
+
+static unsigned int adreno_readtimestamp(struct kgsl_device *device,
+			     enum kgsl_timestamp_type type)
+{
+	unsigned int timestamp = 0;
+
+	if (type == KGSL_TIMESTAMP_CONSUMED)
+		adreno_regread(device, REG_CP_TIMESTAMP, &timestamp);
+	else if (type == KGSL_TIMESTAMP_RETIRED)
+		kgsl_sharedmem_readl(&device->memstore, &timestamp,
+				 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
+	rmb();
+
+	return timestamp;
+}
+
+static long adreno_ioctl(struct kgsl_device_private *dev_priv,
+			      unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_set_bin_base_offset *binbase;
+	struct kgsl_context *context;
+
+	switch (cmd) {
+	case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET:
+		binbase = data;
+
+		context = kgsl_find_context(dev_priv, binbase->drawctxt_id);
+		if (context) {
+			adreno_drawctxt_set_bin_base_offset(
+				dev_priv->device, context, binbase->offset);
+		} else {
+			result = -EINVAL;
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid drawctxt drawctxt_id %d "
+				"device_id=%d\n",
+				binbase->drawctxt_id, dev_priv->device->id);
+		}
+		break;
+
+	default:
+		KGSL_DRV_INFO(dev_priv->device,
+			"invalid ioctl code %08x\n", cmd);
+		result = -EINVAL;
+		break;
+	}
+	return result;
+
+}
+
+static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
+{
+	gpu_freq /= 1000000;
+	return ticks / gpu_freq;
+}
+
+static void adreno_power_stats(struct kgsl_device *device,
+				struct kgsl_power_stats *stats)
+{
+	unsigned int reg;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	/* In order to calculate idle you have to have run the algorithm *
+	 * at least once to get a start time. */
+	if (pwr->time != 0) {
+		s64 tmp;
+		/* Stop the performance moniter and read the current *
+		 * busy cycles. */
+		adreno_regwrite(device,
+			REG_CP_PERFMON_CNTL,
+			REG_PERF_MODE_CNT |
+			REG_PERF_STATE_FREEZE);
+		adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &reg);
+		tmp = ktime_to_us(ktime_get());
+		stats->total_time = tmp - pwr->time;
+		pwr->time = tmp;
+		stats->busy_time = adreno_ticks_to_us(reg, device->pwrctrl.
+				pwrlevels[device->pwrctrl.active_pwrlevel].
+				gpu_freq);
+
+		adreno_regwrite(device,
+			REG_CP_PERFMON_CNTL,
+			REG_PERF_MODE_CNT |
+			REG_PERF_STATE_RESET);
+	} else {
+		stats->total_time = 0;
+		stats->busy_time = 0;
+		pwr->time = ktime_to_us(ktime_get());
+	}
+
+	/* re-enable the performance moniters */
+	adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
+	adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
+	adreno_regwrite(device,
+		REG_CP_PERFMON_CNTL,
+		REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
+}
+
+void adreno_irqctrl(struct kgsl_device *device, int state)
+{
+	/* Enable GPU and GPUMMU interrupts */
+
+	if (state) {
+		adreno_regwrite(device, REG_RBBM_INT_CNTL, KGSL_RBBM_INT_MASK);
+		adreno_regwrite(device, REG_CP_INT_CNTL, KGSL_CP_INT_MASK);
+		adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+	} else {
+		adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+		adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+		adreno_regwrite(device, MH_INTERRUPT_MASK, 0);
+	}
+}
+
+static const struct kgsl_functable adreno_functable = {
+	/* Mandatory functions */
+	.regread = adreno_regread,
+	.regwrite = adreno_regwrite,
+	.idle = adreno_idle,
+	.isidle = adreno_isidle,
+	.suspend_context = adreno_suspend_context,
+	.start = adreno_start,
+	.stop = adreno_stop,
+	.getproperty = adreno_getproperty,
+	.waittimestamp = adreno_waittimestamp,
+	.readtimestamp = adreno_readtimestamp,
+	.issueibcmds = adreno_ringbuffer_issueibcmds,
+	.ioctl = adreno_ioctl,
+	.setup_pt = adreno_setup_pt,
+	.cleanup_pt = adreno_cleanup_pt,
+	.power_stats = adreno_power_stats,
+	.irqctrl = adreno_irqctrl,
+	/* Optional functions */
+	.setstate = adreno_setstate,
+	.drawctxt_create = adreno_drawctxt_create,
+	.drawctxt_destroy = adreno_drawctxt_destroy,
+};
+
+static struct platform_device_id adreno_id_table[] = {
+	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct platform_driver adreno_platform_driver = {
+	.probe = adreno_probe,
+	.remove = __devexit_p(adreno_remove),
+	.suspend = kgsl_suspend_driver,
+	.resume = kgsl_resume_driver,
+	.id_table = adreno_id_table,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DEVICE_3D_NAME,
+		.pm = &kgsl_pm_ops,
+	}
+};
+
+static int __init kgsl_3d_init(void)
+{
+	return platform_driver_register(&adreno_platform_driver);
+}
+
+static void __exit kgsl_3d_exit(void)
+{
+	platform_driver_unregister(&adreno_platform_driver);
+}
+
+module_init(kgsl_3d_init);
+module_exit(kgsl_3d_exit);
+
+MODULE_DESCRIPTION("3D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_3d");
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
new file mode 100644
index 0000000..597c6b8
--- /dev/null
+++ b/drivers/gpu/msm/adreno.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_H
+#define __ADRENO_H
+
+#include "kgsl_device.h"
+#include "adreno_drawctxt.h"
+#include "adreno_ringbuffer.h"
+
+#define DEVICE_3D_NAME "kgsl-3d"
+#define DEVICE_3D0_NAME "kgsl-3d0"
+
+#define ADRENO_DEVICE(device) \
+		KGSL_CONTAINER_OF(device, struct adreno_device, dev)
+
+/* Flags to control command packet settings */
+#define KGSL_CMD_FLAGS_PMODE		0x00000001
+#define KGSL_CMD_FLAGS_NO_TS_CMP	0x00000002
+#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD	0x00000004
+
+/* Command identifiers */
+#define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0xDEADBEEF
+#define KGSL_CMD_IDENTIFIER		0xFEEDFACE
+
+#ifdef CONFIG_MSM_SCM
+#define ADRENO_DEFAULT_PWRSCALE_POLICY  (&kgsl_pwrscale_policy_tz)
+#else
+#define ADRENO_DEFAULT_PWRSCALE_POLICY  NULL
+#endif
+
+#define KGSL_CP_INT_MASK \
+	(CP_INT_CNTL__SW_INT_MASK | \
+	CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
+	CP_INT_CNTL__OPCODE_ERROR_MASK | \
+	CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
+	CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
+	CP_INT_CNTL__IB_ERROR_MASK | \
+	CP_INT_CNTL__IB2_INT_MASK | \
+	CP_INT_CNTL__IB1_INT_MASK | \
+	CP_INT_CNTL__RB_INT_MASK)
+
+enum adreno_gpurev {
+	ADRENO_REV_UNKNOWN = 0,
+	ADRENO_REV_A200 = 200,
+	ADRENO_REV_A205 = 205,
+	ADRENO_REV_A220 = 220,
+	ADRENO_REV_A225 = 225,
+};
+
+struct adreno_device {
+	struct kgsl_device dev;    /* Must be first field in this struct */
+	unsigned int chip_id;
+	enum adreno_gpurev gpurev;
+	struct kgsl_memregion gmemspace;
+	struct adreno_context *drawctxt_active;
+	wait_queue_head_t ib1_wq;
+	unsigned int *pfp_fw;
+	size_t pfp_fw_size;
+	unsigned int *pm4_fw;
+	size_t pm4_fw_size;
+	struct adreno_ringbuffer ringbuffer;
+	unsigned int mharb;
+};
+
+int adreno_idle(struct kgsl_device *device, unsigned int timeout);
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int *value);
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int value);
+
+uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
+	unsigned int pt_base, unsigned int gpuaddr, unsigned int *size);
+
+static inline int adreno_is_a200(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a205(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev  == ADRENO_REV_A200 ||
+		adreno_dev->gpurev == ADRENO_REV_A205);
+}
+
+static inline int adreno_is_a220(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A220);
+}
+
+static inline int adreno_is_a225(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev  == ADRENO_REV_A220 ||
+		adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
new file mode 100644
index 0000000..b897e10
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "kgsl.h"
+#include "adreno_postmortem.h"
+#include "adreno.h"
+
+#include "a200_reg.h"
+
+unsigned int kgsl_cff_dump_enable;
+int kgsl_pm_regs_enabled;
+
+static uint32_t kgsl_ib_base;
+static uint32_t kgsl_ib_size;
+
+static struct dentry *pm_d_debugfs;
+
+static int pm_dump_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+
+	if (val) {
+		mutex_lock(&device->mutex);
+		adreno_postmortem_dump(device, 1);
+		mutex_unlock(&device->mutex);
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
+			NULL,
+			pm_dump_set, "%llu\n");
+
+static int pm_regs_enabled_set(void *data, u64 val)
+{
+	kgsl_pm_regs_enabled = val ? 1 : 0;
+	return 0;
+}
+
+static int pm_regs_enabled_get(void *data, u64 *val)
+{
+	*val = kgsl_pm_regs_enabled;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
+			pm_regs_enabled_get,
+			pm_regs_enabled_set, "%llu\n");
+
+
+static int kgsl_cff_dump_enable_set(void *data, u64 val)
+{
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+	kgsl_cff_dump_enable = (val != 0);
+	return 0;
+#else
+	return -EINVAL;
+#endif
+}
+
+static int kgsl_cff_dump_enable_get(void *data, u64 *val)
+{
+	*val = kgsl_cff_dump_enable;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
+			kgsl_cff_dump_enable_set, "%llu\n");
+
+static int kgsl_dbgfs_open(struct inode *inode, struct file *file)
+{
+	file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int kgsl_dbgfs_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int kgsl_hex_dump(const char *prefix, int c, uint8_t *data,
+	int rowc, int linec, char __user *buff)
+{
+	int ss;
+	/* Prefix of 20 chars max, 32 bytes per row, in groups of four - that's
+	 * 8 groups at 8 chars per group plus a space, plus new-line, plus
+	 * ending character */
+	char linebuf[20 + 64 + 1 + 1];
+
+	ss = snprintf(linebuf, sizeof(linebuf), prefix, c);
+	hex_dump_to_buffer(data, linec, rowc, 4, linebuf+ss,
+		sizeof(linebuf)-ss, 0);
+	strncat(linebuf, "\n", sizeof(linebuf));
+	linebuf[sizeof(linebuf)-1] = 0;
+	ss = strlen(linebuf);
+	if (copy_to_user(buff, linebuf, ss+1))
+		return -EFAULT;
+	return ss;
+}
+
+static ssize_t kgsl_ib_dump_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	int i, count = kgsl_ib_size, remaining, pos = 0, tot = 0, ss;
+	struct kgsl_device *device = file->private_data;
+	const int rowc = 32;
+	unsigned int pt_base, ib_memsize;
+	uint8_t *base_addr;
+	char linebuf[80];
+
+	if (!ppos || !device || !kgsl_ib_base)
+		return 0;
+
+	kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
+	base_addr = kgsl_sharedmem_convertaddr(device, pt_base, kgsl_ib_base,
+		&ib_memsize);
+
+	if (!base_addr)
+		return 0;
+
+	pr_info("%s ppos=%ld, buff_count=%d, count=%d\n", __func__, (long)*ppos,
+		buff_count, count);
+	ss = snprintf(linebuf, sizeof(linebuf), "IB: base=%08x(%08x"
+		"), size=%d, memsize=%d\n", kgsl_ib_base,
+		(uint32_t)base_addr, kgsl_ib_size, ib_memsize);
+	if (*ppos == 0) {
+		if (copy_to_user(buff, linebuf, ss+1))
+			return -EFAULT;
+		tot += ss;
+		buff += ss;
+		*ppos += ss;
+	}
+	pos += ss;
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		int linec = min(remaining, rowc);
+
+		remaining -= rowc;
+		ss = kgsl_hex_dump("IB: %05x: ", i, base_addr, rowc, linec,
+			buff);
+		if (ss < 0)
+			return ss;
+
+		if (pos >= *ppos) {
+			if (tot+ss >= buff_count) {
+				ss = copy_to_user(buff, "", 1);
+				return tot;
+			}
+			tot += ss;
+			buff += ss;
+			*ppos += ss;
+		}
+		pos += ss;
+		base_addr += linec;
+	}
+
+	return tot;
+}
+
+static ssize_t kgsl_ib_dump_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	char local_buff[64];
+
+	if (count >= sizeof(local_buff))
+		return -EFAULT;
+
+	if (copy_from_user(local_buff, buff, count))
+		return -EFAULT;
+
+	local_buff[count] = 0;	/* end of string */
+	sscanf(local_buff, "%x %d", &kgsl_ib_base, &kgsl_ib_size);
+
+	pr_info("%s: base=%08X size=%d\n", __func__, kgsl_ib_base,
+		kgsl_ib_size);
+
+	return count;
+}
+
+static const struct file_operations kgsl_ib_dump_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_ib_dump_read,
+	.write = kgsl_ib_dump_write,
+};
+
+static int kgsl_regread_nolock(struct kgsl_device *device,
+	unsigned int offsetwords, unsigned int *value)
+{
+	unsigned int *reg;
+
+	if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) {
+		KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords);
+		return -ERANGE;
+	}
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+	*value = __raw_readl(reg);
+	return 0;
+}
+
+#define KGSL_ISTORE_START 0x5000
+#define KGSL_ISTORE_LENGTH 0x600
+static ssize_t kgsl_istore_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	int i, count = KGSL_ISTORE_LENGTH, remaining, pos = 0, tot = 0;
+	struct kgsl_device *device = file->private_data;
+	const int rowc = 8;
+
+	if (!ppos || !device)
+		return 0;
+
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		unsigned int vals[rowc];
+		int j, ss;
+		int linec = min(remaining, rowc);
+		remaining -= rowc;
+
+		if (pos >= *ppos) {
+			for (j = 0; j < linec; ++j)
+				kgsl_regread_nolock(device,
+					KGSL_ISTORE_START+i+j, vals+j);
+		} else
+			memset(vals, 0, sizeof(vals));
+
+		ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4,
+			linec*4, buff);
+		if (ss < 0)
+			return ss;
+
+		if (pos >= *ppos) {
+			if (tot+ss >= buff_count)
+				return tot;
+			tot += ss;
+			buff += ss;
+			*ppos += ss;
+		}
+		pos += ss;
+	}
+
+	return tot;
+}
+
+static const struct file_operations kgsl_istore_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_istore_read,
+	.llseek = default_llseek,
+};
+
+typedef void (*reg_read_init_t)(struct kgsl_device *device);
+typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec);
+static ssize_t kgsl_reg_read(struct kgsl_device *device, int count,
+	reg_read_init_t reg_read_init,
+	reg_read_fill_t reg_read_fill, const char *prefix, char __user *buff,
+	loff_t *ppos)
+{
+	int i, remaining;
+	const int rowc = 8;
+
+	if (!ppos || *ppos || !device)
+		return 0;
+
+	mutex_lock(&device->mutex);
+	reg_read_init(device);
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		unsigned int vals[rowc];
+		int ss;
+		int linec = min(remaining, rowc);
+		remaining -= rowc;
+
+		reg_read_fill(device, i, vals, linec);
+		ss = kgsl_hex_dump(prefix, i, (uint8_t *)vals, rowc*4, linec*4,
+			buff);
+		if (ss < 0) {
+			mutex_unlock(&device->mutex);
+			return ss;
+		}
+		buff += ss;
+		*ppos += ss;
+	}
+	mutex_unlock(&device->mutex);
+
+	return *ppos;
+}
+
+
+static void kgsl_sx_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_sx_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
+		kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
+	}
+}
+
+static ssize_t kgsl_sx_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 0x1B, kgsl_sx_reg_read_init,
+			     kgsl_sx_reg_read_fill, "SX: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_sx_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_sx_debug_read,
+};
+
+static void kgsl_cp_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_cp_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
+		kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
+		msleep(100);
+	}
+}
+
+static ssize_t kgsl_cp_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 20, kgsl_cp_reg_read_init,
+		kgsl_cp_reg_read_fill,
+		"CP: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_cp_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_cp_debug_read,
+};
+
+static void kgsl_mh_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_MH_DEBUG_CTRL, i+j);
+		kgsl_regread(device, REG_MH_DEBUG_DATA, vals+j);
+	}
+}
+
+static ssize_t kgsl_mh_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 0x40, kgsl_mh_reg_read_init,
+		kgsl_mh_reg_read_fill,
+		"MH: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_mh_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_mh_debug_read,
+};
+
+void adreno_debugfs_init(struct kgsl_device *device)
+{
+	if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+		return;
+
+	debugfs_create_file("ib_dump",  0600, device->d_debugfs, device,
+			    &kgsl_ib_dump_fops);
+	debugfs_create_file("istore",   0400, device->d_debugfs, device,
+			    &kgsl_istore_fops);
+	debugfs_create_file("sx_debug", 0400, device->d_debugfs, device,
+			    &kgsl_sx_debug_fops);
+	debugfs_create_file("cp_debug", 0400, device->d_debugfs, device,
+			    &kgsl_cp_debug_fops);
+	debugfs_create_file("mh_debug", 0400, device->d_debugfs, device,
+			    &kgsl_mh_debug_fops);
+	debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
+			    &kgsl_cff_dump_enable_fops);
+
+	/* Create post mortem control files */
+
+	pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
+
+	if (IS_ERR(pm_d_debugfs))
+		return;
+
+	debugfs_create_file("dump",  0600, pm_d_debugfs, device,
+			    &pm_dump_fops);
+	debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
+			    &pm_regs_enabled_fops);
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h
new file mode 100644
index 0000000..0356ac6
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DEBUGFS_H
+#define __ADRENO_DEBUGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+
+int adreno_debugfs_init(struct kgsl_device *device);
+
+extern int kgsl_pm_regs_enabled;
+
+static inline int kgsl_pmregs_enabled(void)
+{
+	return kgsl_pm_regs_enabled;
+}
+
+#else
+static inline int adreno_debugfs_init(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_pmregs_enabled(void)
+{
+	/* If debugfs is turned off, then always print registers */
+	return 1;
+}
+#endif
+
+#endif /* __ADRENO_DEBUGFS_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
new file mode 100644
index 0000000..4db3966
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -0,0 +1,1645 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_drawctxt.h"
+
+/*
+ *
+ *  Memory Map for Register, Constant & Instruction Shadow, and Command Buffers
+ *  (34.5KB)
+ *
+ *  +---------------------+------------+-------------+---+---------------------+
+ *  | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow |
+ *  +---------------------+------------+-------------+---+---------------------+
+ *    ________________________________/               \____________________
+ *   /                                                                     |
+ *  +--------------+-----------+------+-----------+------------------------+
+ *  | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused  |
+ *  +--------------+-----------+------+-----------+------------------------+
+ *
+ *              8K - ALU Constant Shadow (8K aligned)
+ *              4K - H/W Register Shadow (8K aligned)
+ *              4K - Command and Vertex Buffers
+ *                         - Indirect command buffer : Const/Reg restore
+ *                               - includes Loop & Bool const shadows
+ *                         - Indirect command buffer : Const/Reg save
+ *                         - Quad vertices & texture coordinates
+ *                         - Indirect command buffer : Gmem save
+ *                         - Indirect command buffer : Gmem restore
+ *                         - Unused (padding to 8KB boundary)
+ *             <1K - Texture Constant Shadow (768 bytes) (8K aligned)
+ *       18K - Shader Instruction Shadow
+ *               - 6K vertex (32 byte aligned)
+ *               - 6K pixel  (32 byte aligned)
+ *               - 6K shared (32 byte aligned)
+ *
+ *  Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes
+ *  3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of
+ *  16 bytes per constant.  If the texture constants were transfered this way,
+ *  the Command & Vertex Buffers section would extend past the 16K boundary.
+ *  By moving the texture constant shadow area to start at 16KB boundary, we
+ *  only require approximately 40 bytes more memory, but are able to use the
+ *  LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up
+ *  context switching.
+ *
+ *  [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool
+ *  constants would require an additional 8KB each, for alignment.]
+ *
+ */
+
+/* Constants */
+
+#define ALU_CONSTANTS	2048	/* DWORDS */
+#define NUM_REGISTERS	1024	/* DWORDS */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_LEN	9216	/* DWORDS */
+#else
+#define CMD_BUFFER_LEN	3072	/* DWORDS */
+#endif
+#define TEX_CONSTANTS		(32*6)	/* DWORDS */
+#define BOOL_CONSTANTS		8	/* DWORDS */
+#define LOOP_CONSTANTS		56	/* DWORDS */
+#define SHADER_INSTRUCT_LOG2	9U	/* 2^n == SHADER_INSTRUCTIONS */
+
+#if defined(PM4_IM_STORE)
+/* 96-bit instructions */
+#define SHADER_INSTRUCT		(1<<SHADER_INSTRUCT_LOG2)
+#else
+#define SHADER_INSTRUCT		0
+#endif
+
+/* LOAD_CONSTANT_CONTEXT shadow size */
+#define LCC_SHADOW_SIZE		0x2000	/* 8KB */
+
+#define ALU_SHADOW_SIZE		LCC_SHADOW_SIZE	/* 8KB */
+#define REG_SHADOW_SIZE		0x1000	/* 4KB */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_SIZE		0x9000	/* 36KB */
+#else
+#define CMD_BUFFER_SIZE		0x3000	/* 12KB */
+#endif
+#define TEX_SHADOW_SIZE		(TEX_CONSTANTS*4)	/* 768 bytes */
+#define SHADER_SHADOW_SIZE	(SHADER_INSTRUCT*12)	/* 6KB */
+
+#define REG_OFFSET		LCC_SHADOW_SIZE
+#define CMD_OFFSET		(REG_OFFSET + REG_SHADOW_SIZE)
+#define TEX_OFFSET		(CMD_OFFSET + CMD_BUFFER_SIZE)
+#define SHADER_OFFSET		((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
+
+#define CONTEXT_SIZE		(SHADER_OFFSET + 3 * SHADER_SHADOW_SIZE)
+
+/* temporary work structure */
+struct tmp_ctx {
+	unsigned int *start;	/* Command & Vertex buffer start */
+	unsigned int *cmd;	/* Next available dword in C&V buffer */
+
+	/* address of buffers, needed when creating IB1 command buffers. */
+	uint32_t bool_shadow;	/* bool constants */
+	uint32_t loop_shadow;	/* loop constants */
+
+#if defined(PM4_IM_STORE)
+	uint32_t shader_shared;	/* shared shader instruction shadow */
+	uint32_t shader_vertex;	/* vertex shader instruction shadow */
+	uint32_t shader_pixel;	/* pixel shader instruction shadow */
+#endif
+
+	/* Addresses in command buffer where separately handled registers
+	 * are saved
+	 */
+	uint32_t reg_values[33];
+	uint32_t chicken_restore;
+
+	uint32_t gmem_base;	/* Base gpu address of GMEM */
+
+};
+
+/* Helper function to calculate IEEE754 single precision float values
+*  without FPU
+*/
+unsigned int uint2float(unsigned int uintval)
+{
+	unsigned int exp, frac = 0;
+
+	if (uintval == 0)
+		return 0;
+
+	exp = ilog2(uintval);
+
+	/* Calculate fraction */
+	if (23 > exp)
+		frac = (uintval & (~(1 << exp))) << (23 - exp);
+
+	/* Exp is biased by 127 and shifted 23 bits */
+	exp = (exp + 127) << 23;
+
+	return exp | frac;
+}
+
+/* context save (gmem -> sys) */
+
+/* pre-compiled vertex shader program
+*
+*  attribute vec4  P;
+*  void main(void)
+*  {
+*    gl_Position = P;
+*  }
+*/
+#define GMEM2SYS_VTX_PGM_LEN	0x12
+
+static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
+	0x00011003, 0x00001000, 0xc2000000,
+	0x00001004, 0x00001000, 0xc4000000,
+	0x00001005, 0x00002000, 0x00000000,
+	0x1cb81000, 0x00398a88, 0x00000003,
+	0x140f803e, 0x00000000, 0xe2010100,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+*  precision highp float;
+*  uniform   vec4  clear_color;
+*  void main(void)
+*  {
+*     gl_FragColor = clear_color;
+*  }
+*/
+
+#define GMEM2SYS_FRAG_PGM_LEN	0x0c
+
+static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
+	0x00000000, 0x1002c400, 0x10000000,
+	0x00001003, 0x00002000, 0x00000000,
+	0x140f8000, 0x00000000, 0x22000000,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* context restore (sys -> gmem) */
+/* pre-compiled vertex shader program
+*
+*  attribute vec4 position;
+*  attribute vec4 texcoord;
+*  varying   vec4 texcoord0;
+*  void main()
+*  {
+*     gl_Position = position;
+*     texcoord0 = texcoord;
+*  }
+*/
+
+#define SYS2GMEM_VTX_PGM_LEN	0x18
+
+static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
+	0x00052003, 0x00001000, 0xc2000000, 0x00001005,
+	0x00001000, 0xc4000000, 0x00001006, 0x10071000,
+	0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
+	0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
+	0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
+	0xe2020200, 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+*  precision mediump   float;
+*  uniform   sampler2D tex0;
+*  varying   vec4      texcoord0;
+*  void main()
+*  {
+*     gl_FragColor = texture2D(tex0, texcoord0.xy);
+*  }
+*/
+
+#define SYS2GMEM_FRAG_PGM_LEN	0x0f
+
+static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
+	0x00011002, 0x00001000, 0xc4000000, 0x00001003,
+	0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
+	0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* shader texture constants (sysmem -> gmem)  */
+#define SYS2GMEM_TEX_CONST_LEN	6
+
+static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = {
+	/* Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat,
+	 * RFMode=ZeroClamp-1, Dim=1:2d
+	 */
+	0x00000002,		/* Pitch = TBD */
+
+	/* Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0,
+	 * NearestClamp=1:OGL Mode
+	 */
+	0x00000800,		/* Address[31:12] = TBD */
+
+	/* Width, Height, EndianSwap=0:None */
+	0,			/* Width & Height = TBD */
+
+	/* NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point,
+	 * Mip=2:BaseMap
+	 */
+	0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
+
+	/* VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0,
+	 * Dim3d=0
+	 */
+	0,
+
+	/* BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0,
+	 * Dim=1:2d, MipPacking=0
+	 */
+	1 << 9			/* Mip Address[31:12] = TBD */
+};
+
+/* quad for copying GMEM to context shadow */
+#define QUAD_LEN				12
+
+static unsigned int gmem_copy_quad[QUAD_LEN] = {
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000
+};
+
+#define TEXCOORD_LEN			8
+
+static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
+	0x00000000, 0x3f800000,
+	0x3f800000, 0x3f800000,
+	0x00000000, 0x00000000,
+	0x3f800000, 0x00000000
+};
+
+#define NUM_COLOR_FORMATS   13
+
+static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = {
+	FMT_4_4_4_4,		/* COLORX_4_4_4_4 */
+	FMT_1_5_5_5,		/* COLORX_1_5_5_5 */
+	FMT_5_6_5,		/* COLORX_5_6_5 */
+	FMT_8,			/* COLORX_8 */
+	FMT_8_8,		/* COLORX_8_8 */
+	FMT_8_8_8_8,		/* COLORX_8_8_8_8 */
+	FMT_8_8_8_8,		/* COLORX_S8_8_8_8 */
+	FMT_16_FLOAT,		/* COLORX_16_FLOAT */
+	FMT_16_16_FLOAT,	/* COLORX_16_16_FLOAT */
+	FMT_16_16_16_16_FLOAT,	/* COLORX_16_16_16_16_FLOAT */
+	FMT_32_FLOAT,		/* COLORX_32_FLOAT */
+	FMT_32_32_FLOAT,	/* COLORX_32_32_FLOAT */
+	FMT_32_32_32_32_FLOAT,	/* COLORX_32_32_32_32_FLOAT */
+};
+
+static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = {
+	2,			/* COLORX_4_4_4_4 */
+	2,			/* COLORX_1_5_5_5 */
+	2,			/* COLORX_5_6_5 */
+	1,			/* COLORX_8 */
+	2,			/* COLORX_8_8 8*/
+	4,			/* COLORX_8_8_8_8 */
+	4,			/* COLORX_S8_8_8_8 */
+	2,			/* COLORX_16_FLOAT */
+	4,			/* COLORX_16_16_FLOAT */
+	8,			/* COLORX_16_16_16_16_FLOAT */
+	4,			/* COLORX_32_FLOAT */
+	8,			/* COLORX_32_32_FLOAT */
+	16,			/* COLORX_32_32_32_32_FLOAT */
+};
+
+/* shader linkage info */
+#define SHADER_CONST_ADDR	(11 * 6 + 3)
+
+/* gmem command buffer length */
+#define PM4_REG(reg)		((0x4 << 16) | (GSL_HAL_SUBBLOCK_OFFSET(reg)))
+
+/* functions */
+static void config_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
+{
+	int w = 64, h = 64;	/* 16KB surface, minimum */
+
+	shadow->format = COLORX_8_8_8_8;
+	/* convert from bytes to 32-bit words */
+	gmem_size = (gmem_size + 3) / 4;
+
+	/* find the right surface size, close to a square. */
+	while (w * h < gmem_size)
+		if (w < h)
+			w *= 2;
+		else
+			h *= 2;
+
+	shadow->width = w;
+	shadow->pitch = w;
+	shadow->height = h;
+	shadow->gmem_pitch = shadow->pitch;
+
+	shadow->size = shadow->pitch * shadow->height * 4;
+}
+
+static unsigned int gpuaddr(unsigned int *cmd, struct kgsl_memdesc *memdesc)
+{
+	return memdesc->gpuaddr + ((char *)cmd - (char *)memdesc->hostptr);
+}
+
+static void
+create_ib1(struct adreno_context *drawctxt, unsigned int *cmd,
+	   unsigned int *start, unsigned int *end)
+{
+	cmd[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
+	cmd[1] = gpuaddr(start, &drawctxt->gpustate);
+	cmd[2] = end - start;
+}
+
+static unsigned int *program_shader(unsigned int *cmds, int vtxfrag,
+				    unsigned int *shader_pgm, int dwords)
+{
+	/* load the patched vertex shader stream */
+	*cmds++ = pm4_type3_packet(PM4_IM_LOAD_IMMEDIATE, 2 + dwords);
+	/* 0=vertex shader, 1=fragment shader */
+	*cmds++ = vtxfrag;
+	/* instruction start & size (in 32-bit words) */
+	*cmds++ = ((0 << 16) | dwords);
+
+	memcpy(cmds, shader_pgm, dwords << 2);
+	cmds += dwords;
+
+	return cmds;
+}
+
+static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst,
+				uint32_t src, int dwords)
+{
+	while (dwords-- > 0) {
+		*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+		*cmds++ = src++;
+		*cmds++ = dst;
+		dst += 4;
+	}
+
+	return cmds;
+}
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+static void build_reg_to_mem_range(unsigned int start, unsigned int end,
+				   unsigned int **cmd,
+				   struct adreno_context *drawctxt)
+{
+	unsigned int i = start;
+
+	for (i = start; i <= end; i++) {
+		*(*cmd)++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+		*(*cmd)++ = i;
+		*(*cmd)++ =
+		    ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
+		    (i - 0x2000) * 4;
+	}
+}
+
+#endif
+
+/* chicken restore */
+static unsigned int *build_chicken_restore_cmds(
+					struct adreno_context *drawctxt,
+					struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmds = start;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	ctx->chicken_restore = gpuaddr(cmds, &drawctxt->gpustate);
+	*cmds++ = 0x00000000;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
+
+	return cmds;
+}
+
+/****************************************************************************/
+/* context save                                                             */
+/****************************************************************************/
+
+static const unsigned int register_ranges_a20x[] = {
+	REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+	REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+	REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+	REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+	REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+	REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+	REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+	REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+	REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+	REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR,
+	REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL,
+	REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE,
+	REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY,
+	REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a22x[] = {
+	REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+	REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+	REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+	REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+	REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+	REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+	REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+	REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+	REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+	/* all the below registers are specific to Leia */
+	REG_LEIA_PC_MAX_VTX_INDX, REG_LEIA_PC_INDX_OFFSET,
+	REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+	REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+	REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+	REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+	REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+	REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL,
+	REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL,
+	REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR
+};
+
+
+/* save h/w regs, alu constants, texture contants, etc. ...
+*  requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr
+*/
+static void build_regsave_cmds(struct adreno_device *adreno_dev,
+			       struct adreno_context *drawctxt,
+			       struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmd = start;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Make sure the HW context has the correct register values
+	 * before reading them. */
+	*cmd++ = pm4_type3_packet(PM4_CONTEXT_UPDATE, 1);
+	*cmd++ = 0;
+
+	{
+		unsigned int i = 0;
+		unsigned int reg_array_size = 0;
+		const unsigned int *ptr_register_ranges;
+
+		/* Based on chip id choose the register ranges */
+		if (adreno_is_a220(adreno_dev)) {
+			ptr_register_ranges = register_ranges_a22x;
+			reg_array_size = ARRAY_SIZE(register_ranges_a22x);
+		} else {
+			ptr_register_ranges = register_ranges_a20x;
+			reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+		}
+
+
+		/* Write HW registers into shadow */
+		for (i = 0; i < (reg_array_size/2) ; i++) {
+			build_reg_to_mem_range(ptr_register_ranges[i*2],
+					ptr_register_ranges[i*2+1],
+					&cmd, drawctxt);
+		}
+	}
+
+	/* Copy ALU constants */
+	cmd =
+	    reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000,
+		       REG_SQ_CONSTANT_0, ALU_CONSTANTS);
+
+	/* Copy Tex constants */
+	cmd =
+	    reg_to_mem(cmd,
+		       (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000,
+		       REG_SQ_FETCH_0, TEX_CONSTANTS);
+#else
+
+	/* Insert a wait for idle packet before reading the registers.
+	 * This is to fix a hang/reset seen during stress testing.  In this
+	 * hang, CP encountered a timeout reading SQ's boolean constant
+	 * register. There is logic in the HW that blocks reading of this
+	 * register when the SQ block is not idle, which we believe is
+	 * contributing to the hang.*/
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* H/w registers are already shadowed; just need to disable shadowing
+	 * to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+	*cmd++ = 4 << 16;	/* regs, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+
+	/* ALU constants are already shadowed; just need to disable shadowing
+	 * to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+	*cmd++ = 0 << 16;	/* ALU, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+
+	/* Tex constants are already shadowed; just need to disable shadowing
+	 *  to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+	*cmd++ = 1 << 16;	/* Tex, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+#endif
+
+	/* Need to handle some of the registers separately */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_GPR_MANAGEMENT;
+	*cmd++ = ctx->reg_values[0];
+
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_TP0_CHICKEN;
+	*cmd++ = ctx->reg_values[1];
+
+	if (adreno_is_a220(adreno_dev)) {
+		unsigned int i;
+		unsigned int j = 2;
+		for (i = REG_LEIA_VSC_BIN_SIZE; i <=
+				REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
+			*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+			*cmd++ = i;
+			*cmd++ = ctx->reg_values[j];
+			j++;
+		}
+	}
+
+	/* Copy Boolean constants */
+	cmd = reg_to_mem(cmd, ctx->bool_shadow, REG_SQ_CF_BOOLEANS,
+			 BOOL_CONSTANTS);
+
+	/* Copy Loop constants */
+	cmd = reg_to_mem(cmd, ctx->loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS);
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/*copy colour, depth, & stencil buffers from graphics memory to system memory*/
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct tmp_ctx *ctx,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = shadow->gmem_save_commands;
+	unsigned int *start = cmds;
+	/* Calculate the new offset based on the adjusted base */
+	unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
+	unsigned int addr = shadow->gmemshadow.gpuaddr;
+	unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
+
+	/* Store TP0_CHICKEN register */
+	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmds++ = REG_TP0_CHICKEN;
+	if (ctx)
+		*cmds++ = ctx->chicken_restore;
+	else
+		cmds++;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	/* Set TP0_CHICKEN to zero */
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	*cmds++ = 0x00000000;
+
+	/* Set PA_SC_AA_CONFIG to 0 */
+	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+	*cmds++ = 0x00000000;
+
+	/* program shader */
+
+	/* load shader vtx constants ... 5 dwords */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
+	*cmds++ = 0;
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+	/* limit = 12 dwords */
+	*cmds++ = 0x00000030;
+
+	/* Invalidate L2 cache to make sure vertices are updated */
+	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+	*cmds++ = 0x1;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
+	*cmds++ = 0x00ffffff;	/* REG_VGT_MAX_VTX_INDX */
+	*cmds++ = 0x0;		/* REG_VGT_MIN_VTX_INDX */
+	*cmds++ = 0x00000000;	/* REG_VGT_INDX_OFFSET */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
+	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
+	*cmds++ = 0x00000c20;
+
+	/* load the patched vertex shader stream */
+	cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
+
+	/* Load the patched fragment shader stream */
+	cmds =
+	    program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
+
+	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 0x10018001;
+	else
+		*cmds++ = 0x10010001;
+	*cmds++ = 0x00000008;
+
+	/* resolve */
+
+	/* PA_CL_VTE_CNTL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
+	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+	*cmds++ = 0x00000b00;
+
+	/* program surface info */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
+	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */
+
+	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+	 *                Base=gmem_base
+	 */
+	/* gmem base assumed 4K aligned. */
+	if (ctx) {
+		BUG_ON(ctx->gmem_base & 0xFFF);
+		*cmds++ =
+		    (shadow->
+		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
+		    gmem_base;
+	} else {
+		unsigned int temp = *cmds;
+		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
+			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
+	}
+
+	/* disable Z */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 0x08;
+	else
+		*cmds++ = 0;
+
+	/* set REG_PA_SU_SC_MODE_CNTL
+	 *              Front_ptype = draw triangles
+	 *              Back_ptype = draw triangles
+	 *              Provoking vertex = last
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
+	*cmds++ = 0x00080240;
+
+	/* Use maximum scissor values -- quad vertices already have the
+	 * correct bounds */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+	*cmds++ = (0 << 16) | 0;
+	*cmds++ = (0x1fff << 16) | (0x1fff);
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+	*cmds++ = (0x1fff << 16) | (0x1fff);
+
+	/* load the viewport so that z scale = clear depth and
+	 *  z offset = 0.0f
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
+	*cmds++ = 0xbf800000;	/* -1.0f */
+	*cmds++ = 0x0;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
+	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
+	*cmds++ = 0xffffffff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+
+	/* load the stencil ref value
+	 * $AAM - do this later
+	 */
+
+	/* load the COPY state */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6);
+	*cmds++ = PM4_REG(REG_RB_COPY_CONTROL);
+	*cmds++ = 0;		/* RB_COPY_CONTROL */
+	*cmds++ = addr & 0xfffff000;	/* RB_COPY_DEST_BASE */
+	*cmds++ = shadow->pitch >> 5;	/* RB_COPY_DEST_PITCH */
+
+	/* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,
+	 *  MaskWrite:R=G=B=A=1
+	 */
+	*cmds++ = 0x0003c008 |
+	    (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
+	/* Make sure we stay in offsetx field. */
+	BUG_ON(offset & 0xfffff000);
+	*cmds++ = offset;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
+	*cmds++ = 0x6;		/* EDRAM copy */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
+	*cmds++ = 0x00010000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+		*cmds++ = 0;
+
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
+		*cmds++ = 0x0000000;
+
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+		*cmds++ = 0;           /* viz query info. */
+		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
+		*cmds++ = 0x00004088;
+		*cmds++ = 3;	       /* NumIndices=3 */
+	} else {
+		/* queue the draw packet */
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+		*cmds++ = 0;		/* viz query info. */
+		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+		*cmds++ = 0x00030088;
+	}
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+	return cmds;
+}
+
+/* context restore */
+
+/*copy colour, depth, & stencil buffers from system memory to graphics memory*/
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct tmp_ctx *ctx,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = shadow->gmem_restore_commands;
+	unsigned int *start = cmds;
+
+	/* Store TP0_CHICKEN register */
+	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmds++ = REG_TP0_CHICKEN;
+	if (ctx)
+		*cmds++ = ctx->chicken_restore;
+	else
+		cmds++;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	/* Set TP0_CHICKEN to zero */
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	*cmds++ = 0x00000000;
+
+	/* Set PA_SC_AA_CONFIG to 0 */
+	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+	*cmds++ = 0x00000000;
+	/* shader constants */
+
+	/* vertex buffer constants */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7);
+
+	*cmds++ = (0x1 << 16) | (9 * 6);
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+	/* limit = 12 dwords */
+	*cmds++ = 0x00000030;
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
+	/* limit = 8 dwords */
+	*cmds++ = 0x00000020;
+	*cmds++ = 0;
+	*cmds++ = 0;
+
+	/* Invalidate L2 cache to make sure vertices are updated */
+	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+	*cmds++ = 0x1;
+
+	cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
+
+	/* Load the patched fragment shader stream */
+	cmds =
+	    program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
+
+	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
+	*cmds++ = 0x10030002;
+	*cmds++ = 0x00000008;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
+	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */
+
+	if (!adreno_is_a220(adreno_dev)) {
+		/* PA_SC_VIZ_QUERY */
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_PA_SC_VIZ_QUERY);
+		*cmds++ = 0x0;		/*REG_PA_SC_VIZ_QUERY */
+	}
+
+	/* RB_COLORCONTROL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
+	*cmds++ = 0x00000c20;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
+	*cmds++ = 0x00ffffff;	/* mmVGT_MAX_VTX_INDX */
+	*cmds++ = 0x0;		/* mmVGT_MIN_VTX_INDX */
+	*cmds++ = 0x00000000;	/* mmVGT_INDX_OFFSET */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
+	*cmds++ = 0x00000002;	/* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
+	*cmds++ = 0x00000002;	/* mmVGT_OUT_DEALLOC_CNTL */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_SQ_INTERPOLATOR_CNTL);
+	*cmds++ = 0xffffffff;	/* mmSQ_INTERPOLATOR_CNTL */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_CONFIG);
+	*cmds++ = 0x00000000;	/* REG_PA_SC_AA_CONFIG */
+
+	/* set REG_PA_SU_SC_MODE_CNTL
+	 * Front_ptype = draw triangles
+	 * Back_ptype = draw triangles
+	 * Provoking vertex = last
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
+	*cmds++ = 0x00080240;
+
+	/* texture constants */
+	*cmds++ =
+	    pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+	*cmds++ = (0x1 << 16) | (0 * 6);
+	memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
+	cmds[0] |= (shadow->pitch >> 5) << 22;
+	cmds[1] |=
+	    shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
+	cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
+	cmds += SYS2GMEM_TEX_CONST_LEN;
+
+	/* program surface info */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
+	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */
+
+	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+	 *                Base=gmem_base
+	 */
+	if (ctx)
+		*cmds++ =
+		    (shadow->
+		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
+		    gmem_base;
+	else {
+		unsigned int temp = *cmds;
+		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
+			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
+	}
+
+	/* RB_DEPTHCONTROL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);
+
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 8;		/* disable Z */
+	else
+		*cmds++ = 0;		/* disable Z */
+
+	/* Use maximum scissor values -- quad vertices already
+	 * have the correct bounds */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+	*cmds++ = (0 << 16) | 0;
+	*cmds++ = ((0x1fff) << 16) | 0x1fff;
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+	*cmds++ = ((0x1fff) << 16) | 0x1fff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
+	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+	*cmds++ = 0x00000b00;
+
+	/*load the viewport so that z scale = clear depth and z offset = 0.0f */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
+	*cmds++ = 0xbf800000;
+	*cmds++ = 0x0;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
+	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
+	*cmds++ = 0xffffffff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+
+	/* load the stencil ref value
+	 *  $AAM - do this later
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
+	/* draw pixels with color and depth/stencil component */
+	*cmds++ = 0x4;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
+	*cmds++ = 0x00010000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+		*cmds++ = 0;
+
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
+		*cmds++ = 0x0000000;
+
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+		*cmds++ = 0;           /* viz query info. */
+		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
+		*cmds++ = 0x00004088;
+		*cmds++ = 3;	       /* NumIndices=3 */
+	} else {
+		/* queue the draw packet */
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+		*cmds++ = 0;		/* viz query info. */
+		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+		*cmds++ = 0x00030088;
+	}
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+	return cmds;
+}
+
+/* restore h/w regs, alu constants, texture constants, etc. ... */
+static unsigned *reg_range(unsigned int *cmd, unsigned int start,
+			   unsigned int end)
+{
+	*cmd++ = PM4_REG(start);	/* h/w regs, start addr */
+	*cmd++ = end - start + 1;	/* count */
+	return cmd;
+}
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+				  struct adreno_context *drawctxt,
+				  struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmd = start;
+
+	unsigned int i = 0;
+	unsigned int reg_array_size = 0;
+	const unsigned int *ptr_register_ranges;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* H/W Registers */
+	/* deferred pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, ???); */
+	cmd++;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Force mismatch */
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+	/* Based on chip id choose the registers ranges*/
+	if (adreno_is_a220(adreno_dev)) {
+		ptr_register_ranges = register_ranges_a22x;
+		reg_array_size = ARRAY_SIZE(register_ranges_a22x);
+	} else {
+		ptr_register_ranges = register_ranges_a20x;
+		reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+	}
+
+
+	for (i = 0; i < (reg_array_size/2); i++) {
+		cmd = reg_range(cmd, ptr_register_ranges[i*2],
+				ptr_register_ranges[i*2+1]);
+	}
+
+	/* Now we know how many register blocks we have, we can compute command
+	 * length
+	 */
+	start[2] =
+	    pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
+	/* Enable shadowing for the entire register block. */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	start[4] |= (0 << 24) | (4 << 16);	/* Disable shadowing. */
+#else
+	start[4] |= (1 << 24) | (4 << 16);
+#endif
+
+	/* Need to handle some of the registers separately */
+	*cmd++ = pm4_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
+	ctx->reg_values[0] = gpuaddr(cmd, &drawctxt->gpustate);
+	*cmd++ = 0x00040400;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+	*cmd++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	ctx->reg_values[1] = gpuaddr(cmd, &drawctxt->gpustate);
+	*cmd++ = 0x00000000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		unsigned int i;
+		unsigned int j = 2;
+		for (i = REG_LEIA_VSC_BIN_SIZE; i <=
+				REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
+			*cmd++ = pm4_type0_packet(i, 1);
+			ctx->reg_values[j] = gpuaddr(cmd, &drawctxt->gpustate);
+			*cmd++ = 0x00000000;
+			j++;
+		}
+	}
+
+	/* ALU Constants */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	*cmd++ = (0 << 24) | (0 << 16) | 0;	/* Disable shadowing */
+#else
+	*cmd++ = (1 << 24) | (0 << 16) | 0;
+#endif
+	*cmd++ = ALU_CONSTANTS;
+
+	/* Texture Constants */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Disable shadowing */
+	*cmd++ = (0 << 24) | (1 << 16) | 0;
+#else
+	*cmd++ = (1 << 24) | (1 << 16) | 0;
+#endif
+	*cmd++ = TEX_CONSTANTS;
+
+	/* Boolean Constants */
+	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+	*cmd++ = (2 << 16) | 0;
+
+	/* the next BOOL_CONSTANT dwords is the shadow area for
+	 *  boolean constants.
+	 */
+	ctx->bool_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+	cmd += BOOL_CONSTANTS;
+
+	/* Loop Constants */
+	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+	*cmd++ = (3 << 16) | 0;
+
+	/* the next LOOP_CONSTANTS dwords is the shadow area for
+	 * loop constants.
+	 */
+	ctx->loop_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+	cmd += LOOP_CONSTANTS;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/* quad for saving/restoring gmem */
+static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
+{
+	/* set vertex buffer values */
+	gmem_copy_quad[1] = uint2float(shadow->height);
+	gmem_copy_quad[3] = uint2float(shadow->width);
+	gmem_copy_quad[4] = uint2float(shadow->height);
+	gmem_copy_quad[9] = uint2float(shadow->width);
+
+	gmem_copy_quad[0] = uint2float(0);
+	gmem_copy_quad[6] = uint2float(0);
+	gmem_copy_quad[7] = uint2float(0);
+	gmem_copy_quad[10] = uint2float(0);
+
+	memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+
+	memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
+	       TEXCOORD_LEN << 2);
+}
+
+/* quad for saving/restoring gmem */
+static void build_quad_vtxbuff(struct adreno_context *drawctxt,
+		       struct tmp_ctx *ctx, struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmd = ctx->cmd;
+
+	/* quad vertex buffer location (in GPU space) */
+	shadow->quad_vertices.hostptr = cmd;
+	shadow->quad_vertices.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+
+	cmd += QUAD_LEN;
+
+	/* tex coord buffer location (in GPU space) */
+	shadow->quad_texcoords.hostptr = cmd;
+	shadow->quad_texcoords.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+
+	cmd += TEXCOORD_LEN;
+
+	set_gmem_copy_quad(shadow);
+
+	ctx->cmd = cmd;
+}
+
+static void
+build_shader_save_restore_cmds(struct adreno_context *drawctxt,
+			       struct tmp_ctx *ctx)
+{
+	unsigned int *cmd = ctx->cmd;
+	unsigned int *save, *restore, *fixup;
+#if defined(PM4_IM_STORE)
+	unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
+#endif
+	unsigned int *partition1;
+	unsigned int *shaderBases, *partition2;
+
+#if defined(PM4_IM_STORE)
+	/* compute vertex, pixel and shared instruction shadow GPU addresses */
+	ctx->shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
+	ctx->shader_pixel = ctx->shader_vertex + SHADER_SHADOW_SIZE;
+	ctx->shader_shared = ctx->shader_pixel + SHADER_SHADOW_SIZE;
+#endif
+
+	/* restore shader partitioning and instructions */
+
+	restore = cmd;		/* start address */
+
+	/* Invalidate Vertex & Pixel instruction code address and sizes */
+	*cmd++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+	*cmd++ = 0x00000300;	/* 0x100 = Vertex, 0x200 = Pixel */
+
+	/* Restore previous shader vertex & pixel instruction bases. */
+	*cmd++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+	shaderBases = cmd++;	/* TBD #5: shader bases (from fixup) */
+
+	/* write the shader partition information to a scratch register */
+	*cmd++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+	partition1 = cmd++;	/* TBD #4a: partition info (from save) */
+
+#if defined(PM4_IM_STORE)
+	/* load vertex shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
+	startSizeVtx = cmd++;	/* TBD #1: start/size (from save) */
+
+	/* load pixel shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
+	startSizePix = cmd++;	/* TBD #2: start/size (from save) */
+
+	/* load shared shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
+	startSizeShared = cmd++;	/* TBD #3: start/size (from save) */
+#endif
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
+
+	/*
+	 *  fixup SET_SHADER_BASES data
+	 *
+	 *  since self-modifying PM4 code is being used here, a seperate
+	 *  command buffer is used for this fixup operation, to ensure the
+	 *  commands are not read by the PM4 engine before the data fields
+	 *  have been written.
+	 */
+
+	fixup = cmd;		/* start address */
+
+	/* write the shader partition information to a scratch register */
+	*cmd++ = pm4_type0_packet(REG_SCRATCH_REG2, 1);
+	partition2 = cmd++;	/* TBD #4b: partition info (from save) */
+
+	/* mask off unused bits, then OR with shader instruction memory size */
+	*cmd++ = pm4_type3_packet(PM4_REG_RMW, 3);
+	*cmd++ = REG_SCRATCH_REG2;
+	/* AND off invalid bits. */
+	*cmd++ = 0x0FFF0FFF;
+	/* OR in instruction memory size */
+	*cmd++ = (unsigned int)((SHADER_INSTRUCT_LOG2 - 5U) << 29);
+
+	/* write the computed value to the SET_SHADER_BASES data field */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SCRATCH_REG2;
+	/* TBD #5: shader bases (to restore) */
+	*cmd++ = gpuaddr(shaderBases, &drawctxt->gpustate);
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
+
+	/* save shader partitioning and instructions */
+
+	save = cmd;		/* start address */
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* fetch the SQ_INST_STORE_MANAGMENT register value,
+	 *  store the value in the data fields of the SET_CONSTANT commands
+	 *  above.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+	/* TBD #4a: partition info (to restore) */
+	*cmd++ = gpuaddr(partition1, &drawctxt->gpustate);
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+	/* TBD #4b: partition info (to fixup) */
+	*cmd++ = gpuaddr(partition2, &drawctxt->gpustate);
+
+#if defined(PM4_IM_STORE)
+
+	/* store the vertex shader instructions */
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
+	/* TBD #1: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizeVtx, &drawctxt->gpustate);
+
+	/* store the pixel shader instructions */
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
+	/* TBD #2: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizePix, &drawctxt->gpustate);
+
+	/* store the shared shader instructions if vertex base is nonzero */
+
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
+	/* TBD #3: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizeShared, &drawctxt->gpustate);
+
+#endif
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int
+create_gpustate_shadow(struct kgsl_device *device,
+		       struct adreno_context *drawctxt,
+		       struct tmp_ctx *ctx)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int result;
+
+	/* Allocate vmalloc memory to store the gpustate */
+	result = kgsl_allocate(&drawctxt->gpustate,
+		drawctxt->pagetable, CONTEXT_SIZE);
+
+	if (result)
+		return result;
+
+	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+	/* Blank out h/w register, constant, and command buffer shadows. */
+	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
+
+	/* set-up command and vertex buffer pointers */
+	ctx->cmd = ctx->start
+	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
+
+	/* build indirect command buffers to save & restore regs/constants */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	build_regrestore_cmds(adreno_dev, drawctxt, ctx);
+	build_regsave_cmds(adreno_dev, drawctxt, ctx);
+
+	build_shader_save_restore_cmds(drawctxt, ctx);
+
+	kgsl_cache_range_op(&drawctxt->gpustate,
+			    KGSL_CACHE_OP_FLUSH);
+
+	return 0;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int
+create_gmem_shadow(struct adreno_device *adreno_dev,
+		   struct adreno_context *drawctxt,
+		   struct tmp_ctx *ctx)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	int result;
+
+	config_gmemsize(&drawctxt->context_gmem_shadow,
+			adreno_dev->gmemspace.sizebytes);
+	ctx->gmem_base = adreno_dev->gmemspace.gpu_base;
+
+	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
+
+	if (result)
+		return result;
+
+	/* we've allocated the shadow, when swapped out, GMEM must be saved. */
+	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE;
+
+	/* blank out gmem shadow. */
+	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
+			   drawctxt->context_gmem_shadow.size);
+
+	/* build quad vertex buffer */
+	build_quad_vtxbuff(drawctxt, ctx, &drawctxt->context_gmem_shadow);
+
+	/* build TP0_CHICKEN register restore command buffer */
+	ctx->cmd = build_chicken_restore_cmds(drawctxt, ctx);
+
+	/* build indirect command buffers to save & restore gmem */
+	/* Idle because we are reading PM override registers */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	drawctxt->context_gmem_shadow.gmem_save_commands = ctx->cmd;
+	ctx->cmd =
+	    build_gmem2sys_cmds(adreno_dev, drawctxt, ctx,
+				&drawctxt->context_gmem_shadow);
+	drawctxt->context_gmem_shadow.gmem_restore_commands = ctx->cmd;
+	ctx->cmd =
+	    build_sys2gmem_cmds(adreno_dev, drawctxt, ctx,
+				&drawctxt->context_gmem_shadow);
+
+	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+			    KGSL_CACHE_OP_FLUSH);
+
+	return 0;
+}
+
+/* create a new drawing context */
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable,
+			struct kgsl_context *context, uint32_t flags)
+{
+	struct adreno_context *drawctxt;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct tmp_ctx ctx;
+	int ret;
+
+	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
+	if (drawctxt == NULL)
+		return -ENOMEM;
+
+	drawctxt->pagetable = pagetable;
+	drawctxt->bin_base_offset = 0;
+
+	ret = create_gpustate_shadow(device, drawctxt, &ctx);
+	if (ret)
+		goto err;
+
+	/* Save the shader instruction memory on context switching */
+	drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+
+	if (!(flags & KGSL_CONTEXT_NO_GMEM_ALLOC)) {
+		/* create gmem shadow */
+		ret = create_gmem_shadow(adreno_dev, drawctxt, &ctx);
+		if (ret != 0)
+			goto err;
+	}
+
+	BUG_ON(ctx.cmd - ctx.start > CMD_BUFFER_LEN);
+
+	context->devctxt = drawctxt;
+	return 0;
+err:
+	kgsl_sharedmem_free(&drawctxt->gpustate);
+	kfree(drawctxt);
+	return ret;
+}
+
+/* destroy a drawing context */
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (drawctxt == NULL)
+		return;
+
+	/* deactivate context */
+	if (adreno_dev->drawctxt_active == drawctxt) {
+		/* no need to save GMEM or shader, the context is
+		 * being destroyed.
+		 */
+		drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE |
+				     CTXT_FLAGS_SHADER_SAVE |
+				     CTXT_FLAGS_GMEM_SHADOW |
+				     CTXT_FLAGS_STATE_SHADOW);
+
+		adreno_drawctxt_switch(adreno_dev, NULL, 0);
+	}
+
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	kgsl_sharedmem_free(&drawctxt->gpustate);
+	kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+	kfree(drawctxt);
+	context->devctxt = NULL;
+}
+
+/* set bin base offset */
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+				      struct kgsl_context *context,
+				      unsigned int offset)
+{
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (drawctxt)
+		drawctxt->bin_base_offset = offset;
+}
+
+/* switch drawing contexts */
+void
+adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+			struct adreno_context *drawctxt,
+			unsigned int flags)
+{
+	struct adreno_context *active_ctxt =
+	  adreno_dev->drawctxt_active;
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int cmds[5];
+
+	if (drawctxt) {
+		if (flags & KGSL_CONTEXT_SAVE_GMEM)
+			/* Set the flag in context so that the save is done
+			* when this context is switched out. */
+			drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
+		else
+			/* Remove GMEM saving flag from the context */
+			drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
+	}
+	/* already current? */
+	if (active_ctxt == drawctxt)
+		return;
+
+	KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
+			adreno_dev->drawctxt_active, drawctxt, flags);
+	/* save old context*/
+	if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG)
+		KGSL_CTXT_WARN(device,
+			"Current active context has caused gpu hang\n");
+
+	if (active_ctxt != NULL) {
+		KGSL_CTXT_INFO(device,
+			"active_ctxt flags %08x\n", active_ctxt->flags);
+		/* save registers and constants. */
+		adreno_ringbuffer_issuecmds(device, 0,
+				active_ctxt->reg_save, 3);
+
+		if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) {
+			/* save shader partitioning and instructions. */
+			adreno_ringbuffer_issuecmds(device,
+					KGSL_CMD_FLAGS_PMODE,
+					active_ctxt->shader_save, 3);
+
+			/* fixup shader partitioning parameter for
+			 *  SET_SHADER_BASES.
+			 */
+			adreno_ringbuffer_issuecmds(device, 0,
+					active_ctxt->shader_fixup, 3);
+
+			active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE;
+		}
+
+		if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE
+			&& active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) {
+			/* save gmem.
+			 * (note: changes shader. shader must already be saved.)
+			 */
+			adreno_ringbuffer_issuecmds(device,
+				KGSL_CMD_FLAGS_PMODE,
+				active_ctxt->context_gmem_shadow.gmem_save, 3);
+
+			/* Restore TP0_CHICKEN */
+			adreno_ringbuffer_issuecmds(device, 0,
+				active_ctxt->chicken_restore, 3);
+
+			active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE;
+		}
+	}
+
+	adreno_dev->drawctxt_active = drawctxt;
+
+	/* restore new context */
+	if (drawctxt != NULL) {
+
+		KGSL_CTXT_INFO(device,
+			"drawctxt flags %08x\n", drawctxt->flags);
+		cmds[0] = pm4_nop_packet(1);
+		cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+		cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2);
+		cmds[3] = device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
+		cmds[4] = (unsigned int)adreno_dev->drawctxt_active;
+		adreno_ringbuffer_issuecmds(device, 0, cmds, 5);
+		kgsl_mmu_setstate(device, drawctxt->pagetable);
+
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+		kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
+			drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE +
+			REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE,
+			false);
+#endif
+
+		/* restore gmem.
+		 *  (note: changes shader. shader must not already be restored.)
+		 */
+		if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) {
+			adreno_ringbuffer_issuecmds(device,
+				KGSL_CMD_FLAGS_PMODE,
+				drawctxt->context_gmem_shadow.gmem_restore, 3);
+
+			/* Restore TP0_CHICKEN */
+			adreno_ringbuffer_issuecmds(device, 0,
+				drawctxt->chicken_restore, 3);
+
+			drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+		}
+
+		/* restore registers and constants. */
+		adreno_ringbuffer_issuecmds(device, 0,
+					  drawctxt->reg_restore, 3);
+
+		/* restore shader instructions & partitioning. */
+		if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) {
+			adreno_ringbuffer_issuecmds(device, 0,
+					  drawctxt->shader_restore, 3);
+		}
+
+		cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+		cmds[1] = drawctxt->bin_base_offset;
+		if (!adreno_is_a220(adreno_dev))
+			adreno_ringbuffer_issuecmds(device, 0, cmds, 2);
+
+	} else
+		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
new file mode 100644
index 0000000..049adf7
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DRAWCTXT_H
+#define __ADRENO_DRAWCTXT_H
+
+#include "a200_reg.h"
+#include "a220_reg.h"
+
+/* Flags */
+
+#define CTXT_FLAGS_NOT_IN_USE		0x00000000
+#define CTXT_FLAGS_IN_USE		0x00000001
+
+/* state shadow memory allocated */
+#define CTXT_FLAGS_STATE_SHADOW		0x00000010
+
+/* gmem shadow memory allocated */
+#define CTXT_FLAGS_GMEM_SHADOW		0x00000100
+/* gmem must be copied to shadow */
+#define CTXT_FLAGS_GMEM_SAVE		0x00000200
+/* gmem can be restored from shadow */
+#define CTXT_FLAGS_GMEM_RESTORE		0x00000400
+/* shader must be copied to shadow */
+#define CTXT_FLAGS_SHADER_SAVE		0x00002000
+/* shader can be restored from shadow */
+#define CTXT_FLAGS_SHADER_RESTORE	0x00004000
+/* Context has caused a GPU hang */
+#define CTXT_FLAGS_GPU_HANG		0x00008000
+
+struct kgsl_device;
+struct adreno_device;
+struct kgsl_device_private;
+struct kgsl_context;
+
+/* draw context */
+struct gmem_shadow_t {
+	struct kgsl_memdesc gmemshadow;	/* Shadow buffer address */
+
+	/* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
+	* 256 rows. */
+	/* width & height must be a multiples of 32, in case tiled textures
+	 * are used. */
+	enum COLORFORMATX format;
+	unsigned int size;	/* Size of surface used to store GMEM */
+	unsigned int width;	/* Width of surface used to store GMEM */
+	unsigned int height;	/* Height of surface used to store GMEM */
+	unsigned int pitch;	/* Pitch of surface used to store GMEM */
+	unsigned int gmem_pitch;	/* Pitch value used for GMEM */
+	unsigned int *gmem_save_commands;
+	unsigned int *gmem_restore_commands;
+	unsigned int gmem_save[3];
+	unsigned int gmem_restore[3];
+	struct kgsl_memdesc quad_vertices;
+	struct kgsl_memdesc quad_texcoords;
+};
+
+struct adreno_context {
+	uint32_t flags;
+	struct kgsl_pagetable *pagetable;
+	struct kgsl_memdesc gpustate;
+	unsigned int reg_save[3];
+	unsigned int reg_restore[3];
+	unsigned int shader_save[3];
+	unsigned int shader_fixup[3];
+	unsigned int shader_restore[3];
+	unsigned int chicken_restore[3];
+	unsigned int bin_base_offset;
+	/* Information of the GMEM shadow that is created in context create */
+	struct gmem_shadow_t context_gmem_shadow;
+};
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable,
+			struct kgsl_context *context,
+			uint32_t flags);
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context);
+
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+				struct adreno_context *drawctxt,
+				unsigned int flags);
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+				      struct kgsl_context *context,
+					unsigned int offset);
+
+#endif  /* __ADRENO_DRAWCTXT_H */
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
new file mode 100644
index 0000000..4d6f70a
--- /dev/null
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PM4TYPES_H
+#define __ADRENO_PM4TYPES_H
+
+
+#define PM4_PKT_MASK	0xc0000000
+
+#define PM4_TYPE0_PKT	((unsigned int)0 << 30)
+#define PM4_TYPE1_PKT	((unsigned int)1 << 30)
+#define PM4_TYPE2_PKT	((unsigned int)2 << 30)
+#define PM4_TYPE3_PKT	((unsigned int)3 << 30)
+
+
+/* type3 packets */
+/* initialize CP's micro-engine */
+#define PM4_ME_INIT		0x48
+
+/* skip N 32-bit words to get to the next packet */
+#define PM4_NOP			0x10
+
+/* indirect buffer dispatch.  prefetch parser uses this packet type to determine
+*  whether to pre-fetch the IB
+*/
+#define PM4_INDIRECT_BUFFER	0x3f
+
+/* indirect buffer dispatch.  same as IB, but init is pipelined */
+#define PM4_INDIRECT_BUFFER_PFD	0x37
+
+/* wait for the IDLE state of the engine */
+#define PM4_WAIT_FOR_IDLE	0x26
+
+/* wait until a register or memory location is a specific value */
+#define PM4_WAIT_REG_MEM	0x3c
+
+/* wait until a register location is equal to a specific value */
+#define PM4_WAIT_REG_EQ		0x52
+
+/* wait until a register location is >= a specific value */
+#define PM4_WAT_REG_GTE		0x53
+
+/* wait until a read completes */
+#define PM4_WAIT_UNTIL_READ	0x5c
+
+/* wait until all base/size writes from an IB_PFD packet have completed */
+#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
+
+/* register read/modify/write */
+#define PM4_REG_RMW		0x21
+
+/* reads register in chip and writes to memory */
+#define PM4_REG_TO_MEM		0x3e
+
+/* write N 32-bit words to memory */
+#define PM4_MEM_WRITE		0x3d
+
+/* write CP_PROG_COUNTER value to memory */
+#define PM4_MEM_WRITE_CNTR	0x4f
+
+/* conditional execution of a sequence of packets */
+#define PM4_COND_EXEC		0x44
+
+/* conditional write to memory or register */
+#define PM4_COND_WRITE		0x45
+
+/* generate an event that creates a write to memory when completed */
+#define PM4_EVENT_WRITE		0x46
+
+/* generate a VS|PS_done event */
+#define PM4_EVENT_WRITE_SHD	0x58
+
+/* generate a cache flush done event */
+#define PM4_EVENT_WRITE_CFL	0x59
+
+/* generate a z_pass done event */
+#define PM4_EVENT_WRITE_ZPD	0x5b
+
+
+/* initiate fetch of index buffer and draw */
+#define PM4_DRAW_INDX		0x22
+
+/* draw using supplied indices in packet */
+#define PM4_DRAW_INDX_2		0x36
+
+/* initiate fetch of index buffer and binIDs and draw */
+#define PM4_DRAW_INDX_BIN	0x34
+
+/* initiate fetch of bin IDs and draw using supplied indices */
+#define PM4_DRAW_INDX_2_BIN	0x35
+
+
+/* begin/end initiator for viz query extent processing */
+#define PM4_VIZ_QUERY		0x23
+
+/* fetch state sub-blocks and initiate shader code DMAs */
+#define PM4_SET_STATE		0x25
+
+/* load constant into chip and to memory */
+#define PM4_SET_CONSTANT	0x2d
+
+/* load sequencer instruction memory (pointer-based) */
+#define PM4_IM_LOAD		0x27
+
+/* load sequencer instruction memory (code embedded in packet) */
+#define PM4_IM_LOAD_IMMEDIATE	0x2b
+
+/* load constants from a location in memory */
+#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
+
+/* selective invalidation of state pointers */
+#define PM4_INVALIDATE_STATE	0x3b
+
+
+/* dynamically changes shader instruction memory partition */
+#define PM4_SET_SHADER_BASES	0x4A
+
+/* sets the 64-bit BIN_MASK register in the PFP */
+#define PM4_SET_BIN_MASK	0x50
+
+/* sets the 64-bit BIN_SELECT register in the PFP */
+#define PM4_SET_BIN_SELECT	0x51
+
+
+/* updates the current context, if needed */
+#define PM4_CONTEXT_UPDATE	0x5e
+
+/* generate interrupt from the command stream */
+#define PM4_INTERRUPT		0x40
+
+
+/* copy sequencer instruction memory to system memory */
+#define PM4_IM_STORE            0x2c
+
+/*
+ * for a20x
+ * program an offset that will added to the BIN_BASE value of
+ * the 3D_DRAW_INDX_BIN packet
+ */
+#define PM4_SET_BIN_BASE_OFFSET     0x4B
+
+/*
+ * for a22x
+ * sets draw initiator flags register in PFP, gets bitwise-ORed into
+ * every draw initiator
+ */
+#define PM4_SET_DRAW_INIT_FLAGS      0x4B
+
+#define PM4_SET_PROTECTED_MODE  0x5f /* sets the register protection mode */
+
+
+/* packet header building macros */
+#define pm4_type0_packet(regindx, cnt) \
+	(PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
+
+#define pm4_type0_packet_for_sameregister(regindx, cnt) \
+	((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
+		((regindx) & 0x7FFF)))
+
+#define pm4_type1_packet(reg0, reg1) \
+	 (PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
+
+#define pm4_type3_packet(opcode, cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
+
+#define pm4_predicated_type3_packet(opcode, cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
+
+#define pm4_nop_packet(cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
+
+
+/* packet headers */
+#define PM4_HDR_ME_INIT	pm4_type3_packet(PM4_ME_INIT, 18)
+#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
+#define PM4_HDR_INDIRECT_BUFFER	pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
+
+#endif	/* __ADRENO_PM4TYPES_H */
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
new file mode 100644
index 0000000..76db7fa
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -0,0 +1,846 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include "kgsl.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_postmortem.h"
+#include "adreno_debugfs.h"
+
+#include "a200_reg.h"
+
+#define INVALID_RB_CMD 0xaaaaaaaa
+
+struct pm_id_name {
+	uint32_t id;
+	char name[9];
+};
+
+static const struct pm_id_name pm0_types[] = {
+	{REG_PA_SC_AA_CONFIG,		"RPASCAAC"},
+	{REG_RBBM_PM_OVERRIDE2,		"RRBBPMO2"},
+	{REG_SCRATCH_REG2,		"RSCRTRG2"},
+	{REG_SQ_GPR_MANAGEMENT,		"RSQGPRMN"},
+	{REG_SQ_INST_STORE_MANAGMENT,	"RSQINSTS"},
+	{REG_TC_CNTL_STATUS,		"RTCCNTLS"},
+	{REG_TP0_CHICKEN,		"RTP0CHCK"},
+	{REG_CP_TIMESTAMP,		"CP_TM_ST"},
+};
+
+static const struct pm_id_name pm3_types[] = {
+	{PM4_COND_EXEC,			"CND_EXEC"},
+	{PM4_CONTEXT_UPDATE,		"CX__UPDT"},
+	{PM4_DRAW_INDX,			"DRW_NDX_"},
+	{PM4_DRAW_INDX_BIN,		"DRW_NDXB"},
+	{PM4_EVENT_WRITE,		"EVENT_WT"},
+	{PM4_IM_LOAD,			"IN__LOAD"},
+	{PM4_IM_LOAD_IMMEDIATE,		"IM_LOADI"},
+	{PM4_IM_STORE,			"IM_STORE"},
+	{PM4_INDIRECT_BUFFER,		"IND_BUF_"},
+	{PM4_INDIRECT_BUFFER_PFD,	"IND_BUFP"},
+	{PM4_INTERRUPT,			"PM4_INTR"},
+	{PM4_INVALIDATE_STATE,		"INV_STAT"},
+	{PM4_LOAD_CONSTANT_CONTEXT,	"LD_CN_CX"},
+	{PM4_ME_INIT,			"ME__INIT"},
+	{PM4_NOP,			"PM4__NOP"},
+	{PM4_REG_RMW,			"REG__RMW"},
+	{PM4_REG_TO_MEM,		"REG2_MEM"},
+	{PM4_SET_BIN_BASE_OFFSET,	"ST_BIN_O"},
+	{PM4_SET_CONSTANT,		"ST_CONST"},
+	{PM4_SET_PROTECTED_MODE,	"ST_PRT_M"},
+	{PM4_SET_SHADER_BASES,		"ST_SHD_B"},
+	{PM4_WAIT_FOR_IDLE,		"WAIT4IDL"},
+};
+
+/* Offset address pairs: start, end of range to dump (inclusive) */
+
+/* GPU < Z470 */
+
+static const int a200_registers[] = {
+	0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
+	0x0100, 0x0110, 0x0118, 0x011c,
+	0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
+	0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
+	0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
+	0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
+	0x0fe0, 0x0fec, 0x1100, 0x1100,
+
+	0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
+	0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
+	0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
+	0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
+	0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
+	0x2900, 0x290c, 0x2914, 0x2914, 0x2938, 0x293c,
+	0x30b0, 0x30b0, 0x30c0, 0x30c0, 0x30e0, 0x30f0,
+	0x3100, 0x3100, 0x3110, 0x3110, 0x3200, 0x3218,
+	0x3220, 0x3250, 0x3264, 0x3268, 0x3290, 0x3294,
+	0x3400, 0x340c, 0x3418, 0x3418, 0x3420, 0x342c,
+	0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
+	0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
+	0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
+	0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
+	0x3c04, 0x3c08, 0x3c30, 0x3c30, 0x3c38, 0x3c48,
+	0x3c98, 0x3ca8, 0x3cb0, 0x3cb0,
+
+	0x8000, 0x8008, 0x8018, 0x803c, 0x8200, 0x8208,
+	0x8400, 0x8424, 0x8430, 0x8450, 0x8600, 0x8610,
+	0x87d4, 0x87dc, 0x8800, 0x8820, 0x8a00, 0x8a0c,
+	0x8a4c, 0x8a50, 0x8c00, 0x8c20, 0x8c48, 0x8c48,
+	0x8c58, 0x8c74, 0x8c90, 0x8c98, 0x8e00, 0x8e0c,
+
+	0x9000, 0x9008, 0x9018, 0x903c, 0x9200, 0x9208,
+	0x9400, 0x9424, 0x9430, 0x9450, 0x9600, 0x9610,
+	0x97d4, 0x97dc, 0x9800, 0x9820, 0x9a00, 0x9a0c,
+	0x9a4c, 0x9a50, 0x9c00, 0x9c20, 0x9c48, 0x9c48,
+	0x9c58, 0x9c74, 0x9c90, 0x9c98, 0x9e00, 0x9e0c,
+
+	0x10000, 0x1000c, 0x12000, 0x12014,
+	0x12400, 0x12400, 0x12420, 0x12420
+};
+
+/* GPU = Z470 */
+
+static const int a220_registers[] = {
+	0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
+	0x0100, 0x0110, 0x0118, 0x011c,
+	0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
+	0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
+	0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
+	0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
+	0x0fe0, 0x0fec, 0x1100, 0x1100,
+
+	0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
+	0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
+	0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
+	0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
+	0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
+	0x2900, 0x2900, 0x2908, 0x290c, 0x2914, 0x2914,
+	0x2938, 0x293c, 0x30c0, 0x30c0, 0x30e0, 0x30e4,
+	0x30f0, 0x30f0, 0x3200, 0x3204, 0x3220, 0x324c,
+	0x3400, 0x340c, 0x3414, 0x3418, 0x3420, 0x342c,
+	0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
+	0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
+	0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
+	0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
+	0x3c04, 0x3c08, 0x8000, 0x8008, 0x8018, 0x803c,
+	0x8200, 0x8208, 0x8400, 0x8408, 0x8410, 0x8424,
+	0x8430, 0x8450, 0x8600, 0x8610, 0x87d4, 0x87dc,
+	0x8800, 0x8808, 0x8810, 0x8810, 0x8820, 0x8820,
+	0x8a00, 0x8a08, 0x8a50, 0x8a50,
+	0x8c00, 0x8c20, 0x8c24, 0x8c28, 0x8c48, 0x8c48,
+	0x8c58, 0x8c58, 0x8c60, 0x8c74, 0x8c90, 0x8c98,
+	0x8e00, 0x8e0c, 0x9000, 0x9008, 0x9018, 0x903c,
+	0x9200, 0x9208, 0x9400, 0x9408, 0x9410, 0x9424,
+	0x9430, 0x9450, 0x9600, 0x9610, 0x97d4, 0x97dc,
+	0x9800, 0x9808, 0x9810, 0x9818, 0x9820, 0x9820,
+	0x9a00, 0x9a08, 0x9a50, 0x9a50, 0x9c00, 0x9c20,
+	0x9c48, 0x9c48, 0x9c58, 0x9c58, 0x9c60, 0x9c74,
+	0x9c90, 0x9c98, 0x9e00, 0x9e0c,
+
+	0x10000, 0x1000c, 0x12000, 0x12014,
+	0x12400, 0x12400, 0x12420, 0x12420
+};
+
+static uint32_t adreno_is_pm4_len(uint32_t word)
+{
+	if (word == INVALID_RB_CMD)
+		return 0;
+
+	return (word >> 16) & 0x3FFF;
+}
+
+static bool adreno_is_pm4_type(uint32_t word)
+{
+	int i;
+
+	if (word == INVALID_RB_CMD)
+		return 1;
+
+	if (adreno_is_pm4_len(word) > 16)
+		return 0;
+
+	if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+			if ((word & 0x7FFF) == pm0_types[i].id)
+				return 1;
+		}
+		return 0;
+	}
+	if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+			if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+				return 1;
+		}
+		return 0;
+	}
+	return 0;
+}
+
+static const char *adreno_pm4_name(uint32_t word)
+{
+	int i;
+
+	if (word == INVALID_RB_CMD)
+		return "--------";
+
+	if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+			if ((word & 0x7FFF) == pm0_types[i].id)
+				return pm0_types[i].name;
+		}
+		return "????????";
+	}
+	if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+			if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+				return pm3_types[i].name;
+		}
+		return "????????";
+	}
+	return "????????";
+}
+
+static void adreno_dump_regs(struct kgsl_device *device,
+			   const int *registers, int size)
+{
+	int range = 0, offset = 0;
+
+	for (range = 0; range < size; range++) {
+		/* start and end are in dword offsets */
+		int start = registers[range * 2] / 4;
+		int end = registers[range * 2 + 1] / 4;
+
+		unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+		int linelen, i;
+
+		for (offset = start; offset <= end; offset += linelen) {
+			unsigned int regvals[32/4];
+			linelen = min(end+1-offset, 32/4);
+
+			for (i = 0; i < linelen; ++i)
+				kgsl_regread(device, offset+i, regvals+i);
+
+			hex_dump_to_buffer(regvals, linelen*4, 32, 4,
+				linebuf, sizeof(linebuf), 0);
+			KGSL_LOG_DUMP(device,
+				"REG: %5.5X: %s\n", offset<<2, linebuf);
+		}
+	}
+}
+
+static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
+	uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
+{
+	unsigned int memsize;
+	uint8_t *base_addr = kgsl_sharedmem_convertaddr(device, pt_base,
+		ib_base, &memsize);
+
+	if (base_addr && dump)
+		print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
+				 32, 4, base_addr, ib_size*4, 0);
+	else
+		KGSL_LOG_DUMP(device, "%s base:%8.8X  ib_size:%d  "
+			"offset:%5.5X%s\n",
+			buffId, ib_base, ib_size*4, base_offset,
+			base_addr ? "" : " [Invalid]");
+}
+
+#define IB_LIST_SIZE	64
+struct ib_list {
+	int count;
+	uint32_t bases[IB_LIST_SIZE];
+	uint32_t sizes[IB_LIST_SIZE];
+	uint32_t offsets[IB_LIST_SIZE];
+};
+
+static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
+			uint32_t base_offset,
+			uint32_t ib1_base, uint32_t ib1_size,
+			struct ib_list *ib_list, bool dump)
+{
+	int i, j;
+	uint32_t value;
+	uint32_t *ib1_addr;
+	unsigned int memsize;
+
+	dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
+		ib1_size, dump);
+
+	/* fetch virtual address for given IB base */
+	ib1_addr = (uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
+		ib1_base, &memsize);
+	if (!ib1_addr)
+		return;
+
+	for (i = 0; i+3 < ib1_size; ) {
+		value = ib1_addr[i++];
+		if (value == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+			uint32_t ib2_base = ib1_addr[i++];
+			uint32_t ib2_size = ib1_addr[i++];
+
+			/* find previous match */
+			for (j = 0; j < ib_list->count; ++j)
+				if (ib_list->sizes[j] == ib2_size
+					&& ib_list->bases[j] == ib2_base)
+					break;
+
+			if (j < ib_list->count || ib_list->count
+				>= IB_LIST_SIZE)
+				continue;
+
+			/* store match */
+			ib_list->sizes[ib_list->count] = ib2_size;
+			ib_list->bases[ib_list->count] = ib2_base;
+			ib_list->offsets[ib_list->count] = i<<2;
+			++ib_list->count;
+		}
+	}
+}
+
+static void adreno_dump_rb_buffer(const void *buf, size_t len,
+		char *linebuf, size_t linebuflen, int *argp)
+{
+	const u32 *ptr4 = buf;
+	const int ngroups = len;
+	int lx = 0, j;
+	bool nxsp = 1;
+
+	for (j = 0; j < ngroups; j++) {
+		if (*argp < 0) {
+			lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
+			*argp = -*argp;
+		} else if (nxsp)
+			lx += scnprintf(linebuf + lx, linebuflen - lx, "  ");
+		else
+			nxsp = 1;
+		if (!*argp && adreno_is_pm4_type(ptr4[j])) {
+			lx += scnprintf(linebuf + lx, linebuflen - lx,
+				"%s", adreno_pm4_name(ptr4[j]));
+			*argp = -(adreno_is_pm4_len(ptr4[j])+1);
+		} else {
+			lx += scnprintf(linebuf + lx, linebuflen - lx,
+				"%8.8X", ptr4[j]);
+			if (*argp > 1)
+				--*argp;
+			else if (*argp == 1) {
+				*argp = 0;
+				nxsp = 0;
+				lx += scnprintf(linebuf + lx, linebuflen - lx,
+					"> ");
+			}
+		}
+	}
+	linebuf[lx] = '\0';
+}
+
+static bool adreno_rb_use_hex(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+			 size_t len, int start, int size)
+{
+	const uint32_t *ptr = buf;
+	int i, remaining, args = 0;
+	unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+	const int rowsize = 8;
+
+	len >>= 2;
+	remaining = len;
+	for (i = 0; i < len; i += rowsize) {
+		int linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+
+		if (adreno_rb_use_hex())
+			hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
+				linebuf, sizeof(linebuf), 0);
+		else
+			adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
+				sizeof(linebuf), &args);
+		KGSL_LOG_DUMP(device,
+			"RB: %4.4X:%s\n", (start+i)%size, linebuf);
+	}
+}
+
+static bool adreno_ib_dump_enabled(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+struct log_field {
+	bool show;
+	const char *display;
+};
+
+static int adreno_dump_fields_line(struct kgsl_device *device,
+				 const char *start, char *str, int slen,
+				 const struct log_field **lines,
+				 int num)
+{
+	const struct log_field *l = *lines;
+	int sptr, count  = 0;
+
+	sptr = snprintf(str, slen, "%s", start);
+
+	for (  ; num && sptr < slen; num--, l++) {
+		int ilen = strlen(l->display);
+
+		if (count)
+			ilen += strlen("  | ");
+
+		if (ilen > (slen - sptr))
+			break;
+
+		if (count++)
+			sptr += snprintf(str + sptr, slen - sptr, " | ");
+
+		sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
+	}
+
+	KGSL_LOG_DUMP(device, "%s\n", str);
+
+	*lines = l;
+	return num;
+}
+
+static void adreno_dump_fields(struct kgsl_device *device,
+			     const char *start, const struct log_field *lines,
+			     int num)
+{
+	char lb[90];
+	const char *sstr = start;
+
+	lb[sizeof(lb)  - 1] = '\0';
+
+	while (num) {
+		int ret = adreno_dump_fields_line(device, sstr, lb,
+			sizeof(lb) - 1, &lines, num);
+
+		if (ret == num)
+			break;
+
+		num = ret;
+		sstr = "        ";
+	}
+}
+
+static int adreno_dump(struct kgsl_device *device)
+{
+	unsigned int r1, r2, r3, rbbm_status;
+	unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
+	unsigned int cp_ib2_base, cp_ib2_bufsz;
+	unsigned int pt_base;
+	unsigned int cp_rb_base, rb_count;
+	unsigned int cp_rb_wptr, cp_rb_rptr;
+	unsigned int i;
+	int result = 0;
+	uint32_t *rb_copy;
+	const uint32_t *rb_vaddr;
+	int num_item = 0;
+	int read_idx, write_idx;
+	unsigned int ts_processed, rb_memsize;
+
+	static struct ib_list ib_list;
+
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	mb();
+
+	KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+			pwr->power_flags, pwr->active_pwrlevel);
+
+	KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+		pwr->interval_timeout);
+
+	KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
+				  kgsl_get_clkrate(pwr->grp_clks[0]));
+
+	KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
+		kgsl_get_clkrate(pwr->ebi1_clk));
+
+
+	kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
+	kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
+	kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
+	KGSL_LOG_DUMP(device, "RBBM:   STATUS   = %08X | PM_OVERRIDE1 = %08X | "
+		"PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
+
+	kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
+	kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
+	kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
+	KGSL_LOG_DUMP(device, "        INT_CNTL = %08X | INT_STATUS   = %08X | "
+		"READ_ERROR   = %08X\n", r1, r2, r3);
+
+	{
+		char cmdFifo[16];
+		struct log_field lines[] = {
+			{rbbm_status &  0x000F, cmdFifo},
+			{rbbm_status &  BIT(5), "TC busy     "},
+			{rbbm_status &  BIT(8), "HIRQ pending"},
+			{rbbm_status &  BIT(9), "CPRQ pending"},
+			{rbbm_status & BIT(10), "CFRQ pending"},
+			{rbbm_status & BIT(11), "PFRQ pending"},
+			{rbbm_status & BIT(12), "VGT 0DMA bsy"},
+			{rbbm_status & BIT(14), "RBBM WU busy"},
+			{rbbm_status & BIT(16), "CP NRT busy "},
+			{rbbm_status & BIT(18), "MH busy     "},
+			{rbbm_status & BIT(19), "MH chncy bsy"},
+			{rbbm_status & BIT(21), "SX busy     "},
+			{rbbm_status & BIT(22), "TPC busy    "},
+			{rbbm_status & BIT(24), "SC CNTX busy"},
+			{rbbm_status & BIT(25), "PA busy     "},
+			{rbbm_status & BIT(26), "VGT busy    "},
+			{rbbm_status & BIT(27), "SQ cntx1 bsy"},
+			{rbbm_status & BIT(28), "SQ cntx0 bsy"},
+			{rbbm_status & BIT(30), "RB busy     "},
+			{rbbm_status & BIT(31), "Grphs pp bsy"},
+		};
+		snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X  ",
+			rbbm_status & 0xf);
+		adreno_dump_fields(device, " STATUS=", lines,
+				ARRAY_SIZE(lines));
+	}
+
+	kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
+	kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+	rb_count = 2 << (r2 & (BIT(6)-1));
+	kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+	KGSL_LOG_DUMP(device,
+		"CP_RB:  BASE = %08X | CNTL   = %08X | RPTR_ADDR = %08X"
+		"\n", cp_rb_base, r2, r3);
+
+	kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
+	kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
+	kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+	KGSL_LOG_DUMP(device,
+		"        RPTR = %08X | WPTR   = %08X | RPTR_WR   = %08X"
+		"\n", cp_rb_rptr, cp_rb_wptr, r3);
+
+	kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
+	kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
+	KGSL_LOG_DUMP(device,
+		"CP_IB1: BASE = %08X | BUFSZ  = %d\n", cp_ib1_base,
+		cp_ib1_bufsz);
+
+	kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
+	kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
+	KGSL_LOG_DUMP(device,
+		"CP_IB2: BASE = %08X | BUFSZ  = %d\n", cp_ib2_base,
+		cp_ib2_bufsz);
+
+	kgsl_regread(device, REG_CP_INT_CNTL, &r1);
+	kgsl_regread(device, REG_CP_INT_STATUS, &r2);
+	KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+	kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+	kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+	kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
+	KGSL_LOG_DUMP(device,
+		"CP_ME:  CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
+		"%08X\n", r1, r2, r3);
+
+	kgsl_regread(device, REG_CP_STAT, &cp_stat);
+	KGSL_LOG_DUMP(device, "CP_STAT      = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(0), "WR_BSY     0"},
+			{cp_stat &  BIT(1), "RD_RQ_BSY  1"},
+			{cp_stat &  BIT(2), "RD_RTN_BSY 2"},
+		};
+		adreno_dump_fields(device, "    MIU=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(5), "RING_BUSY  5"},
+			{cp_stat &  BIT(6), "NDRCTS_BSY 6"},
+			{cp_stat &  BIT(7), "NDRCT2_BSY 7"},
+			{cp_stat &  BIT(9), "ST_BUSY    9"},
+			{cp_stat & BIT(10), "BUSY      10"},
+		};
+		adreno_dump_fields(device, "    CSF=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat & BIT(11), "RNG_Q_BSY 11"},
+			{cp_stat & BIT(12), "NDRCTS_Q_B12"},
+			{cp_stat & BIT(13), "NDRCT2_Q_B13"},
+			{cp_stat & BIT(16), "ST_QUEUE_B16"},
+			{cp_stat & BIT(17), "PFP_BUSY  17"},
+		};
+		adreno_dump_fields(device, "   RING=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(3), "RBIU_BUSY  3"},
+			{cp_stat &  BIT(4), "RCIU_BUSY  4"},
+			{cp_stat & BIT(18), "MQ_RG_BSY 18"},
+			{cp_stat & BIT(19), "MQ_NDRS_BS19"},
+			{cp_stat & BIT(20), "MQ_NDR2_BS20"},
+			{cp_stat & BIT(21), "MIU_WC_STL21"},
+			{cp_stat & BIT(22), "CP_NRT_BSY22"},
+			{cp_stat & BIT(23), "3D_BUSY   23"},
+			{cp_stat & BIT(26), "ME_BUSY   26"},
+			{cp_stat & BIT(29), "ME_WC_BSY 29"},
+			{cp_stat & BIT(30), "MIU_FF EM 30"},
+			{cp_stat & BIT(31), "CP_BUSY   31"},
+		};
+		adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+	}
+#endif
+
+	kgsl_regread(device, REG_SCRATCH_REG0, &r1);
+	KGSL_LOG_DUMP(device, "SCRATCH_REG0       = %08X\n", r1);
+
+	kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
+	kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
+	kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
+	KGSL_LOG_DUMP(device,
+		"COHER:  SIZE_PM4   = %08X | BASE_PM4 = %08X | STATUS_PM4"
+		" = %08X\n", r1, r2, r3);
+
+	kgsl_regread(device, MH_AXI_ERROR, &r1);
+	KGSL_LOG_DUMP(device, "MH:     AXI_ERROR  = %08X\n", r1);
+
+	kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
+	kgsl_regread(device, MH_MMU_CONFIG, &r2);
+	kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
+	KGSL_LOG_DUMP(device,
+		"MH_MMU: PAGE_FAULT = %08X | CONFIG   = %08X | MPU_BASE ="
+		" %08X\n", r1, r2, r3);
+
+	kgsl_regread(device, MH_MMU_MPU_END, &r1);
+	kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
+	kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
+	KGSL_LOG_DUMP(device,
+		"        MPU_END    = %08X | VA_RANGE = %08X | PT_BASE  ="
+		" %08X\n", r1, r2, pt_base);
+
+	KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
+
+	kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
+	KGSL_LOG_DUMP(device, "        TRAN_ERROR = %08X\n", r1);
+
+	kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
+	kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
+	KGSL_LOG_DUMP(device,
+		"MH_INTERRUPT: MASK = %08X | STATUS   = %08X\n", r1, r2);
+
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+	KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
+
+	num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
+						cp_rb_rptr);
+	if (num_item <= 0)
+		KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
+
+	rb_copy = vmalloc(rb_count<<2);
+	if (!rb_copy) {
+		KGSL_LOG_POSTMORTEM_WRITE(device,
+			"vmalloc(%d) failed\n", rb_count << 2);
+		result = -ENOMEM;
+		goto end;
+	}
+
+	KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x  rb_size:%d  num_item:%d\n",
+		cp_rb_base, rb_count<<2, num_item);
+	rb_vaddr = (const uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
+					cp_rb_base, &rb_memsize);
+	if (!rb_vaddr) {
+		KGSL_LOG_POSTMORTEM_WRITE(device,
+			"Can't fetch vaddr for CP_RB_BASE\n");
+		goto error_vfree;
+	}
+
+	read_idx = (int)cp_rb_rptr - 64;
+	if (read_idx < 0)
+		read_idx += rb_count;
+	write_idx = (int)cp_rb_wptr + 16;
+	if (write_idx > rb_count)
+		write_idx -= rb_count;
+	num_item += 64+16;
+	if (num_item > rb_count)
+		num_item = rb_count;
+	if (write_idx >= read_idx)
+		memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
+	else {
+		int part1_c = rb_count-read_idx;
+		memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
+		memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
+	}
+
+	/* extract the latest ib commands from the buffer */
+	ib_list.count = 0;
+	i = 0;
+	for (read_idx = 0; read_idx < num_item; ) {
+		uint32_t this_cmd = rb_copy[read_idx++];
+		if (this_cmd == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+			uint32_t ib_addr = rb_copy[read_idx++];
+			uint32_t ib_size = rb_copy[read_idx++];
+			dump_ib1(device, pt_base, (read_idx-3)<<2, ib_addr,
+				ib_size, &ib_list, 0);
+			for (; i < ib_list.count; ++i)
+				dump_ib(device, "IB2:", pt_base,
+					ib_list.offsets[i],
+					ib_list.bases[i],
+					ib_list.sizes[i], 0);
+		}
+	}
+
+	read_idx = (int)cp_rb_rptr - 64;
+	if (read_idx < 0)
+		read_idx += rb_count;
+	KGSL_LOG_DUMP(device,
+		"RB: addr=%8.8x  window:%4.4x-%4.4x, start:%4.4x\n",
+		cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
+	adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
+
+	if (adreno_ib_dump_enabled()) {
+		for (read_idx = 64; read_idx >= 0; --read_idx) {
+			uint32_t this_cmd = rb_copy[read_idx];
+			if (this_cmd == pm4_type3_packet(
+				PM4_INDIRECT_BUFFER_PFD, 2)) {
+				uint32_t ib_addr = rb_copy[read_idx+1];
+				uint32_t ib_size = rb_copy[read_idx+2];
+				if (cp_ib1_bufsz && cp_ib1_base == ib_addr) {
+					KGSL_LOG_DUMP(device,
+						"IB1: base:%8.8X  "
+						"count:%d\n", ib_addr, ib_size);
+					dump_ib(device, "IB1: ", pt_base,
+						read_idx<<2, ib_addr, ib_size,
+						1);
+				}
+			}
+		}
+		for (i = 0; i < ib_list.count; ++i) {
+			if (cp_ib2_bufsz && cp_ib2_base == ib_list.bases[i]) {
+				uint32_t ib_size = ib_list.sizes[i];
+				uint32_t ib_offset = ib_list.offsets[i];
+				KGSL_LOG_DUMP(device,
+					"IB2: base:%8.8X  count:%d\n",
+					cp_ib2_base, ib_size);
+				dump_ib(device, "IB2: ", pt_base, ib_offset,
+					ib_list.bases[i], ib_size, 1);
+			}
+		}
+	}
+
+	/* Dump the registers if the user asked for it */
+
+	if (adreno_is_a20x(adreno_dev))
+		adreno_dump_regs(device, a200_registers,
+			ARRAY_SIZE(a200_registers) / 2);
+	else if (adreno_is_a22x(adreno_dev))
+		adreno_dump_regs(device, a220_registers,
+			ARRAY_SIZE(a220_registers) / 2);
+
+error_vfree:
+	vfree(rb_copy);
+end:
+	return result;
+}
+
+/**
+ * adreno_postmortem_dump - Dump the current GPU state
+ * @device - A pointer to the KGSL device to dump
+ * @manual - A flag that indicates if this was a manually triggered
+ *           dump (from debugfs).  If zero, then this is assumed to be a
+ *           dump automaticlaly triggered from a hang
+*/
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual)
+{
+	bool saved_nap;
+
+	BUG_ON(device == NULL);
+
+	/* For a manual dump, make sure that the system is idle */
+
+	if (manual) {
+		if (device->active_cnt != 0) {
+			mutex_unlock(&device->mutex);
+			wait_for_completion(&device->suspend_gate);
+			mutex_lock(&device->mutex);
+		}
+
+		if (device->state == KGSL_STATE_ACTIVE)
+			kgsl_idle(device,  KGSL_TIMEOUT_DEFAULT);
+
+	}
+	/* Disable the idle timer so we don't get interrupted */
+	del_timer(&device->idle_timer);
+
+	/* Turn off napping to make sure we have the clocks full
+	   attention through the following process */
+	saved_nap = device->pwrctrl.nap_allowed;
+	device->pwrctrl.nap_allowed = false;
+
+	/* Force on the clocks */
+	kgsl_pwrctrl_wake(device);
+
+	/* Disable the irq */
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	/* If this is not a manual trigger, then set up the
+	   state to try to recover */
+
+	if (!manual) {
+		device->state = KGSL_STATE_DUMP_AND_RECOVER;
+		KGSL_PWR_WARN(device,
+				"state -> DUMP_AND_RECOVER, device %d\n",
+				device->id);
+	}
+
+	KGSL_DRV_ERR(device,
+			"wait for work in workqueue to complete\n");
+	mutex_unlock(&device->mutex);
+	flush_workqueue(device->work_queue);
+	mutex_lock(&device->mutex);
+	adreno_dump(device);
+
+	/* Restore nap mode */
+	device->pwrctrl.nap_allowed = saved_nap;
+
+	/* On a manual trigger, turn on the interrupts and put
+	   the clocks to sleep.  They will recover themselves
+	   on the next event.  For a hang, leave things as they
+	   are until recovery kicks in. */
+
+	if (manual) {
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+		/* try to go into a sleep mode until the next event */
+		device->requested_state = KGSL_STATE_SLEEP;
+		kgsl_pwrctrl_sleep(device);
+	}
+
+	KGSL_DRV_ERR(device, "Dump Finished\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/msm/adreno_postmortem.h b/drivers/gpu/msm/adreno_postmortem.h
new file mode 100644
index 0000000..b677800
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ADRENO_POSTMORTEM_H
+#define __ADRENO_POSTMORTEM_H
+
+struct kgsl_device;
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual);
+
+#endif /* __ADRENO_POSTMORTEM_H */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
new file mode 100644
index 0000000..e7aaa14
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -0,0 +1,929 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/log2.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+
+#include "a200_reg.h"
+
+#define VALID_STATUS_COUNT_MAX	10
+#define GSL_RB_NOP_SIZEDWORDS				2
+/* protected mode error checking below register address 0x800
+*  note: if CP_INTERRUPT packet is used then checking needs
+*  to change to below register address 0x7C8
+*/
+#define GSL_RB_PROTECTED_MODE_CONTROL		0x200001F2
+
+/* Firmware file names
+ * Legacy names must remain but replacing macro names to
+ * match current kgsl model.
+ * a200 is yamato
+ * a220 is leia
+ */
+#define A200_PFP_FW "yamato_pfp.fw"
+#define A200_PM4_FW "yamato_pm4.fw"
+#define A220_PFP_470_FW "leia_pfp_470.fw"
+#define A220_PM4_470_FW "leia_pm4_470.fw"
+#define A225_PFP_FW "a225_pfp.fw"
+#define A225_PM4_FW "a225_pm4.fw"
+
+/* functions */
+void kgsl_cp_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0, num_reads = 0, master_status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
+	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
+		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
+		adreno_regread(device, REG_CP_INT_STATUS, &status);
+		adreno_regread(device, REG_MASTER_INT_SIGNAL,
+					&master_status);
+		num_reads++;
+	}
+	if (num_reads > 1)
+		KGSL_DRV_WARN(device,
+			"Looped %d times to read REG_CP_INT_STATUS\n",
+			num_reads);
+	if (!status) {
+		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+			/* This indicates that we could not read CP_INT_STAT.
+			 * As a precaution just wake up processes so
+			 * they can check their timestamps. Since, we
+			 * did not ack any interrupts this interrupt will
+			 * be generated again */
+			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
+			wake_up_interruptible_all(&device->wait_queue);
+		} else
+			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
+		return;
+	}
+
+	if (status & CP_INT_CNTL__RB_INT_MASK) {
+		/* signal intr completion event */
+		unsigned int enableflag = 0;
+		kgsl_sharedmem_writel(&rb->device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+			enableflag);
+		wmb();
+		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
+	}
+
+	if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer TO packet in IB interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer opcode error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer protected mode error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer reserved bit error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__IB_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer IB error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__SW_INT_MASK)
+		KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n");
+
+	if (status & CP_INT_CNTL__IB2_INT_MASK)
+		KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n");
+
+	if (status & (~KGSL_CP_INT_MASK))
+		KGSL_CMD_WARN(rb->device,
+			"bad bits in REG_CP_INT_STATUS %08x\n", status);
+
+	/* only ack bits we understand */
+	status &= KGSL_CP_INT_MASK;
+	adreno_regwrite(device, REG_CP_INT_ACK, status);
+
+	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
+		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
+		wake_up_interruptible_all(&device->wait_queue);
+		atomic_notifier_call_chain(&(device->ts_notifier_list),
+					   device->id,
+					   NULL);
+	}
+}
+
+static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
+{
+	BUG_ON(rb->wptr == 0);
+
+	/*synchronize memory before informing the hardware of the
+	 *new commands.
+	 */
+	mb();
+
+	adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
+}
+
+static int
+adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
+			  int wptr_ahead)
+{
+	int nopcount;
+	unsigned int freecmds;
+	unsigned int *cmds;
+	uint cmds_gpu;
+
+	/* if wptr ahead, fill the remaining with NOPs */
+	if (wptr_ahead) {
+		/* -1 for header */
+		nopcount = rb->sizedwords - rb->wptr - 1;
+
+		cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+		cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
+
+		GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
+
+		/* Make sure that rptr is not 0 before submitting
+		 * commands at the end of ringbuffer. We do not
+		 * want the rptr and wptr to become equal when
+		 * the ringbuffer is not empty */
+		do {
+			GSL_RB_GET_READPTR(rb, &rb->rptr);
+		} while (!rb->rptr);
+
+		rb->wptr++;
+
+		adreno_ringbuffer_submit(rb);
+
+		rb->wptr = 0;
+	}
+
+	/* wait for space in ringbuffer */
+	do {
+		GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+		freecmds = rb->rptr - rb->wptr;
+
+	} while ((freecmds != 0) && (freecmds <= numcmds));
+
+	return 0;
+}
+
+
+static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+					     unsigned int numcmds)
+{
+	unsigned int	*ptr = NULL;
+	int				status = 0;
+
+	BUG_ON(numcmds >= rb->sizedwords);
+
+	GSL_RB_GET_READPTR(rb, &rb->rptr);
+	/* check for available space */
+	if (rb->wptr >= rb->rptr) {
+		/* wptr ahead or equal to rptr */
+		/* reserve dwords for nop packet */
+		if ((rb->wptr + numcmds) > (rb->sizedwords -
+				GSL_RB_NOP_SIZEDWORDS))
+			status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+	} else {
+		/* wptr behind rptr */
+		if ((rb->wptr + numcmds) >= rb->rptr)
+			status  = adreno_ringbuffer_waitspace(rb, numcmds, 0);
+		/* check for remaining space */
+		/* reserve dwords for nop packet */
+		if ((rb->wptr + numcmds) > (rb->sizedwords -
+				GSL_RB_NOP_SIZEDWORDS))
+			status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+	}
+
+	if (status == 0) {
+		ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+		rb->wptr += numcmds;
+	}
+
+	return ptr;
+}
+
+static int _load_firmware(struct kgsl_device *device, const char *fwfile,
+			  void **data, int *len)
+{
+	const struct firmware *fw = NULL;
+	int ret;
+
+	ret = request_firmware(&fw, fwfile, device->dev);
+
+	if (ret) {
+		KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
+			     fwfile, ret);
+		return ret;
+	}
+
+	*data = kmalloc(fw->size, GFP_KERNEL);
+
+	if (*data) {
+		memcpy(*data, fw->data, fw->size);
+		*len = fw->size;
+	} else
+		KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
+
+	release_firmware(fw);
+	return (*data != NULL) ? 0 : -ENOMEM;
+}
+
+static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	const char *fwfile;
+	int i, ret = 0;
+
+	if (adreno_is_a220(adreno_dev)) {
+		fwfile =  A220_PM4_470_FW;
+	} else if (adreno_is_a225(adreno_dev)) {
+		fwfile =  A225_PM4_FW;
+	} else if (adreno_is_a20x(adreno_dev)) {
+		fwfile =  A200_PM4_FW;
+	} else {
+		KGSL_DRV_ERR(device, "Could not load PM4 file\n");
+		return -EINVAL;
+	}
+
+	if (adreno_dev->pm4_fw == NULL) {
+		int len;
+		unsigned int *ptr;
+
+		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
+		if (ret)
+			goto err;
+
+		/* PM4 size is 3 dword aligned plus 1 dword of version */
+		if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
+			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
+		adreno_dev->pm4_fw = ptr;
+	}
+
+	KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
+		adreno_dev->pm4_fw[0]);
+
+	adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
+	adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
+	for (i = 1; i < adreno_dev->pm4_fw_size; i++)
+		adreno_regwrite(device, REG_CP_ME_RAM_DATA,
+				     adreno_dev->pm4_fw[i]);
+err:
+	return ret;
+}
+
+static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	const char *fwfile;
+	int i, ret = 0;
+
+	if (adreno_is_a220(adreno_dev)) {
+		fwfile =  A220_PFP_470_FW;
+	} else if (adreno_is_a225(adreno_dev)) {
+		fwfile =  A225_PFP_FW;
+	} else if (adreno_is_a20x(adreno_dev)) {
+		fwfile = A200_PFP_FW;
+	} else {
+		KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
+		return -EINVAL;
+	}
+
+	if (adreno_dev->pfp_fw == NULL) {
+		int len;
+		unsigned int *ptr;
+
+		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
+		if (ret)
+			goto err;
+
+		/* PFP size shold be dword aligned */
+		if (len % sizeof(uint32_t) != 0) {
+			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
+		adreno_dev->pfp_fw = ptr;
+	}
+
+	KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
+		adreno_dev->pfp_fw[0]);
+
+	adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
+	for (i = 1; i < adreno_dev->pfp_fw_size; i++)
+		adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
+				     adreno_dev->pfp_fw[i]);
+err:
+	return ret;
+}
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
+{
+	int status;
+	/*cp_rb_cntl_u cp_rb_cntl; */
+	union reg_cp_rb_cntl cp_rb_cntl;
+	unsigned int *cmds, rb_cntl;
+	struct kgsl_device *device = rb->device;
+	uint cmds_gpu;
+
+	if (rb->flags & KGSL_FLAGS_STARTED)
+		return 0;
+
+	if (init_ram) {
+		rb->timestamp = 0;
+		GSL_RB_INIT_TIMESTAMP(rb);
+	}
+
+	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
+			   sizeof(struct kgsl_rbmemptrs));
+
+	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
+			   (rb->sizedwords << 2));
+
+	adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
+			     (rb->memptrs_desc.gpuaddr
+			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+
+	/* setup WPTR delay */
+	adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
+
+	/*setup REG_CP_RB_CNTL */
+	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
+	cp_rb_cntl.val = rb_cntl;
+
+	/*
+	 * The size of the ringbuffer in the hardware is the log2
+	 * representation of the size in quadwords (sizedwords / 2)
+	 */
+	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
+
+	/*
+	 * Specify the quadwords to read before updating mem RPTR.
+	 * Like above, pass the log2 representation of the blocksize
+	 * in quadwords.
+	*/
+	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
+
+	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
+	/* mem RPTR writebacks */
+	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;
+
+	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
+
+	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
+
+	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
+			     rb->memptrs_desc.gpuaddr +
+			     GSL_RB_MEMPTRS_RPTR_OFFSET);
+
+	/* explicitly clear all cp interrupts */
+	adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+
+	/* setup scratch/timestamp */
+	adreno_regwrite(device, REG_SCRATCH_ADDR,
+			     device->memstore.gpuaddr +
+			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
+
+	adreno_regwrite(device, REG_SCRATCH_UMSK,
+			     GSL_RB_MEMPTRS_SCRATCH_MASK);
+
+	/* load the CP ucode */
+
+	status = adreno_ringbuffer_load_pm4_ucode(device);
+	if (status != 0)
+		return status;
+
+	/* load the prefetch parser ucode */
+	status = adreno_ringbuffer_load_pfp_ucode(device);
+	if (status != 0)
+		return status;
+
+	adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+	rb->rptr = 0;
+	rb->wptr = 0;
+
+	/* clear ME_HALT to start micro engine */
+	adreno_regwrite(device, REG_CP_ME_CNTL, 0);
+
+	/* ME_INIT */
+	cmds = adreno_ringbuffer_allocspace(rb, 19);
+	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
+
+	GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
+	/* All fields present (bits 9:0) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
+	/* Disable/Enable Real-Time Stream processing (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
+
+	/* Vertex and Pixel Shader Start Addresses in instructions
+	* (3 DWORDS per instruction) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
+	/* Maximum Contexts */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+	/* Write Confirm Interval and The CP will wait the
+	* wait_interval * 16 clocks between polling  */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	/* NQ and External Memory Swap */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Protected mode error checking */
+	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
+	/* Disable header dumping and Header dump address */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Header dump size */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	adreno_ringbuffer_submit(rb);
+
+	/* idle device to validate ME INIT */
+	status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	if (status == 0)
+		rb->flags |= KGSL_FLAGS_STARTED;
+
+	return status;
+}
+
+int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
+{
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		/* ME_HALT */
+		adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+
+		rb->flags &= ~KGSL_FLAGS_STARTED;
+	}
+
+	return 0;
+}
+
+int adreno_ringbuffer_init(struct kgsl_device *device)
+{
+	int status;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	rb->device = device;
+	/*
+	 * It is silly to convert this to words and then back to bytes
+	 * immediately below, but most of the rest of the code deals
+	 * in words, so we might as well only do the math once
+	 */
+	rb->sizedwords = KGSL_RB_SIZE >> 2;
+
+	/* allocate memory for ringbuffer */
+	status = kgsl_allocate_contiguous(&rb->buffer_desc,
+		(rb->sizedwords << 2));
+
+	if (status != 0) {
+		adreno_ringbuffer_close(rb);
+		return status;
+	}
+
+	/* allocate memory for polling and timestamps */
+	/* This really can be at 4 byte alignment boundry but for using MMU
+	 * we need to make it at page boundary */
+	status = kgsl_allocate_contiguous(&rb->memptrs_desc,
+		sizeof(struct kgsl_rbmemptrs));
+
+	if (status != 0) {
+		adreno_ringbuffer_close(rb);
+		return status;
+	}
+
+	/* overlay structure on memptrs memory */
+	rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
+
+	return 0;
+}
+
+int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+
+	kgsl_sharedmem_free(&rb->buffer_desc);
+	kgsl_sharedmem_free(&rb->memptrs_desc);
+
+	kfree(adreno_dev->pfp_fw);
+	kfree(adreno_dev->pm4_fw);
+
+	adreno_dev->pfp_fw = NULL;
+	adreno_dev->pm4_fw = NULL;
+
+	memset(rb, 0, sizeof(struct adreno_ringbuffer));
+
+	return 0;
+}
+
+static uint32_t
+adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
+				unsigned int flags, unsigned int *cmds,
+				int sizedwords)
+{
+	unsigned int *ringcmds;
+	unsigned int timestamp;
+	unsigned int total_sizedwords = sizedwords + 6;
+	unsigned int i;
+	unsigned int rcmd_gpu;
+
+	/* reserve space to temporarily turn off protected mode
+	*  error checking if needed
+	*/
+	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
+	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
+	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
+
+	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+	rcmd_gpu = rb->buffer_desc.gpuaddr
+		+ sizeof(uint)*(rb->wptr-total_sizedwords);
+
+	if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+	}
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* disable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+	}
+
+	for (i = 0; i < sizedwords; i++) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
+		cmds++;
+	}
+
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* re-enable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+	}
+
+	rb->timestamp++;
+	timestamp = rb->timestamp;
+
+	/* start-of-pipeline and end-of-pipeline timestamps */
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+	GSL_RB_WRITE(ringcmds, rcmd_gpu,
+		     (rb->device->memstore.gpuaddr +
+		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+
+	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+		/* Conditional execution based on memory values */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_COND_EXEC, 4));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
+			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+		/* # of conditional command DWORDs */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_INTERRUPT, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
+	}
+
+	adreno_ringbuffer_submit(rb);
+
+	/* return timestamp of issued coREG_ands */
+	return timestamp;
+}
+
+void
+adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+						unsigned int flags,
+						unsigned int *cmds,
+						int sizedwords)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	if (device->state & KGSL_STATE_HUNG)
+		return;
+	adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+				struct kgsl_context *context,
+				struct kgsl_ibdesc *ibdesc,
+				unsigned int numibs,
+				uint32_t *timestamp,
+				unsigned int flags)
+{
+	struct kgsl_device *device = dev_priv->device;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int *link;
+	unsigned int *cmds;
+	unsigned int i;
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (device->state & KGSL_STATE_HUNG)
+		return -EBUSY;
+	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
+	      context == NULL)
+		return -EINVAL;
+
+	BUG_ON(ibdesc == 0);
+	BUG_ON(numibs == 0);
+
+	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
+		KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
+			" will not accept commands for this context\n",
+			drawctxt);
+		return -EDEADLK;
+	}
+	link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
+	cmds = link;
+	if (!link) {
+		KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
+			" submission, size %x\n", numibs * 3);
+		return -ENOMEM;
+	}
+	for (i = 0; i < numibs; i++) {
+		(void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
+
+		*cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
+		*cmds++ = ibdesc[i].gpuaddr;
+		*cmds++ = ibdesc[i].sizedwords;
+	}
+
+	kgsl_setstate(device,
+		      kgsl_pt_get_flags(device->mmu.hwpagetable,
+					device->id));
+
+	adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
+
+	*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+					KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
+					&link[0], (cmds - link));
+
+	KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
+		context->id, (unsigned int)ibdesc, numibs, *timestamp);
+
+	kfree(link);
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+	/*
+	 * insert wait for idle after every IB1
+	 * this is conservative but works reliably and is ok
+	 * even for performance simulations
+	 */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+#endif
+
+	return 0;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				unsigned int *temp_rb_buffer,
+				int *rb_size)
+{
+	struct kgsl_device *device = rb->device;
+	unsigned int rb_rptr;
+	unsigned int retired_timestamp;
+	unsigned int temp_idx = 0;
+	unsigned int value;
+	unsigned int val1;
+	unsigned int val2;
+	unsigned int val3;
+	unsigned int copy_rb_contents = 0;
+	unsigned int cur_context;
+	unsigned int j;
+
+	GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+	retired_timestamp = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+	KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
+			retired_timestamp);
+	/*
+	 * We need to go back in history by 4 dwords from the current location
+	 * of read pointer as 4 dwords are read to match the end of a command.
+	 * Also, take care of wrap around when moving back
+	 */
+	if (rb->rptr >= 4)
+		rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
+	else
+		rb_rptr = rb->buffer_desc.size -
+			((4 - rb->rptr) * sizeof(unsigned int));
+	/* Read the rb contents going backwards to locate end of last
+	 * sucessfully executed command */
+	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+		if (value == retired_timestamp) {
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
+			/* match the pattern found at the end of a command */
+			if ((val1 == 2 &&
+				val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
+				&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
+				(val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
+				&& val2 == CACHE_FLUSH_TS &&
+				val3 == (rb->device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
+				rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+				KGSL_DRV_ERR(device,
+					"Found end of last executed "
+					"command at offset: %x\n",
+					rb_rptr / sizeof(unsigned int));
+				break;
+			} else {
+				if (rb_rptr < (3 * sizeof(unsigned int)))
+					rb_rptr = rb->buffer_desc.size -
+						(3 * sizeof(unsigned int))
+							+ rb_rptr;
+				else
+					rb_rptr -= (3 * sizeof(unsigned int));
+			}
+		}
+
+		if (rb_rptr == 0)
+			rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
+		else
+			rb_rptr -= sizeof(unsigned int);
+	}
+
+	if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
+		KGSL_DRV_ERR(device,
+			"GPU recovery from hang not possible because last"
+			" successful timestamp is overwritten\n");
+		return -EINVAL;
+	}
+	/* rb_rptr is now pointing to the first dword of the command following
+	 * the last sucessfully executed command sequence. Assumption is that
+	 * GPU is hung in the command sequence pointed by rb_rptr */
+	/* make sure the GPU is not hung in a command submitted by kgsl
+	 * itself */
+	kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+	kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+				adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size));
+	if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
+		KGSL_DRV_ERR(device,
+			"GPU recovery from hang not possible because "
+			"of hang in kgsl command\n");
+		return -EINVAL;
+	}
+
+	/* current_context is the context that is presently active in the
+	 * GPU, i.e the context in which the hang is caused */
+	kgsl_sharedmem_readl(&device->memstore, &cur_context,
+		KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
+	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+						rb->buffer_desc.size);
+		/* check for context switch indicator */
+		if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON(val1 != (device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
+			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON((copy_rb_contents == 0) &&
+				(value == cur_context));
+			/*
+			 * If we were copying the commands and got to this point
+			 * then we need to remove the 3 commands that appear
+			 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
+			 */
+			if (temp_idx)
+				temp_idx -= 3;
+			/* if context switches to a context that did not cause
+			 * hang then start saving the rb contents as those
+			 * commands can be executed */
+			if (value != cur_context) {
+				copy_rb_contents = 1;
+				temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+				temp_rb_buffer[temp_idx++] =
+						KGSL_CMD_IDENTIFIER;
+				temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+				temp_rb_buffer[temp_idx++] =
+						KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+				temp_rb_buffer[temp_idx++] =
+					pm4_type3_packet(PM4_MEM_WRITE, 2);
+				temp_rb_buffer[temp_idx++] = val1;
+				temp_rb_buffer[temp_idx++] = value;
+			} else {
+				copy_rb_contents = 0;
+			}
+		} else if (copy_rb_contents)
+			temp_rb_buffer[temp_idx++] = value;
+	}
+
+	*rb_size = temp_idx;
+	KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
+	for (temp_idx = 0; temp_idx < *rb_size;) {
+		char str[80];
+		int idx = 0;
+		if ((temp_idx + 8) <= *rb_size)
+			j = 8;
+		else
+			j = *rb_size - temp_idx;
+		for (; j != 0; j--)
+			idx += scnprintf(str + idx, 80 - idx,
+				"%8.8X ", temp_rb_buffer[temp_idx++]);
+		printk(KERN_ALERT "%s", str);
+	}
+	return 0;
+}
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+			int num_rb_contents)
+{
+	int i;
+	unsigned int *ringcmds;
+	unsigned int rcmd_gpu;
+
+	if (!num_rb_contents)
+		return;
+
+	if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
+		adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
+		rb->rptr = 0;
+		BUG_ON(num_rb_contents > rb->buffer_desc.size);
+	}
+	ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+	rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
+	for (i = 0; i < num_rb_contents; i++)
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
+	rb->wptr += num_rb_contents;
+	adreno_ringbuffer_submit(rb);
+}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
new file mode 100644
index 0000000..04432fe
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_RINGBUFFER_H
+#define __ADRENO_RINGBUFFER_H
+
+#define GSL_RB_USE_MEM_RPTR
+#define GSL_RB_USE_MEM_TIMESTAMP
+#define GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+
+/*
+ * Adreno ringbuffer sizes in bytes - these are converted to
+ * the appropriate log2 values in the code
+ */
+
+#define KGSL_RB_SIZE (32 * 1024)
+#define KGSL_RB_BLKSIZE 16
+
+/* CP timestamp register */
+#define	REG_CP_TIMESTAMP		 REG_SCRATCH_REG0
+
+
+struct kgsl_device;
+struct kgsl_device_private;
+
+#define GSL_RB_MEMPTRS_SCRATCH_COUNT	 8
+struct kgsl_rbmemptrs {
+	int  rptr;
+	int  wptr_poll;
+};
+
+#define GSL_RB_MEMPTRS_RPTR_OFFSET \
+	(offsetof(struct kgsl_rbmemptrs, rptr))
+
+#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
+	(offsetof(struct kgsl_rbmemptrs, wptr_poll))
+
+struct adreno_ringbuffer {
+	struct kgsl_device *device;
+	uint32_t flags;
+
+	struct kgsl_memdesc buffer_desc;
+
+	struct kgsl_memdesc memptrs_desc;
+	struct kgsl_rbmemptrs *memptrs;
+
+	/*ringbuffer size */
+	unsigned int sizedwords;
+
+	unsigned int wptr; /* write pointer offset in dwords from baseaddr */
+	unsigned int rptr; /* read pointer offset in dwords from baseaddr */
+	uint32_t timestamp;
+};
+
+/* dword base address of the GFX decode space */
+#define GSL_HAL_SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
+
+#define GSL_RB_WRITE(ring, gpuaddr, data) \
+	do { \
+		writel_relaxed(data, ring); \
+		wmb(); \
+		kgsl_cffdump_setmem(gpuaddr, data, 4); \
+		ring++; \
+		gpuaddr += sizeof(uint); \
+	} while (0)
+
+/* timestamp */
+#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+#define GSL_RB_USE_MEM_TIMESTAMP
+#endif /* GSL_DEVICE_SHADOW_MEMSTORE_TO_USER */
+
+#ifdef GSL_RB_USE_MEM_TIMESTAMP
+/* enable timestamp (...scratch0) memory shadowing */
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
+#define GSL_RB_INIT_TIMESTAMP(rb)
+
+#else
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x0
+#define GSL_RB_INIT_TIMESTAMP(rb) \
+		adreno_regwrite((rb)->device->id, REG_CP_TIMESTAMP, 0)
+
+#endif /* GSL_RB_USE_MEMTIMESTAMP */
+
+/* mem rptr */
+#ifdef GSL_RB_USE_MEM_RPTR
+#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
+#define GSL_RB_GET_READPTR(rb, data) \
+	do { \
+		*(data) = readl_relaxed(&(rb)->memptrs->rptr); \
+	} while (0)
+#else
+#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */
+#define GSL_RB_GET_READPTR(rb, data) \
+	do { \
+		adreno_regread((rb)->device->id, REG_CP_RB_RPTR, (data)); \
+	} while (0)
+#endif /* GSL_RB_USE_MEMRPTR */
+
+#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
+
+int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+				struct kgsl_context *context,
+				struct kgsl_ibdesc *ibdesc,
+				unsigned int numibs,
+				uint32_t *timestamp,
+				unsigned int flags);
+
+int adreno_ringbuffer_init(struct kgsl_device *device);
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
+				unsigned int init_ram);
+
+int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
+
+int adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
+
+void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+					unsigned int flags,
+					unsigned int *cmdaddr,
+					int sizedwords);
+
+void kgsl_cp_intrcallback(struct kgsl_device *device);
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				unsigned int *temp_rb_buffer,
+				int *rb_size);
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+			int num_rb_contents);
+
+static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
+	unsigned int rptr)
+{
+	if (rb->wptr >= rptr)
+		return rb->wptr - rptr;
+	return rb->wptr + rb->sizedwords - rptr;
+}
+
+/* Increment a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
+							unsigned int size)
+{
+	return (val + sizeof(unsigned int)) % size;
+}
+
+#endif  /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
new file mode 100644
index 0000000..fef664b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.c
@@ -0,0 +1,2119 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/ashmem.h>
+#include <linux/major.h>
+
+#include "kgsl.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_log.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_device.h"
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kgsl."
+
+static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
+module_param_named(ptcount, kgsl_pagetable_count, int, 0);
+MODULE_PARM_DESC(kgsl_pagetable_count,
+"Minimum number of pagetables for KGSL to allocate at initialization time");
+
+static inline struct kgsl_mem_entry *
+kgsl_mem_entry_create(void)
+{
+	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+	if (!entry)
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
+	else
+		kref_init(&entry->refcount);
+
+	return entry;
+}
+
+void
+kgsl_mem_entry_destroy(struct kref *kref)
+{
+	struct kgsl_mem_entry *entry = container_of(kref,
+						    struct kgsl_mem_entry,
+						    refcount);
+	size_t size = entry->memdesc.size;
+
+	kgsl_sharedmem_free(&entry->memdesc);
+
+	if (entry->memtype == KGSL_USER_MEMORY)
+		entry->priv->stats.user -= size;
+	else if (entry->memtype == KGSL_MAPPED_MEMORY) {
+		if (entry->file_ptr)
+			fput(entry->file_ptr);
+
+		kgsl_driver.stats.mapped -= size;
+		entry->priv->stats.mapped -= size;
+	}
+
+	kfree(entry);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy);
+
+static
+void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+				   struct kgsl_process_private *process)
+{
+	spin_lock(&process->mem_lock);
+	list_add(&entry->list, &process->mem_list);
+	spin_unlock(&process->mem_lock);
+
+	entry->priv = process;
+}
+
+/* Allocate a new context id */
+
+static struct kgsl_context *
+kgsl_create_context(struct kgsl_device_private *dev_priv)
+{
+	struct kgsl_context *context;
+	int ret, id;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+	if (context == NULL)
+		return NULL;
+
+	while (1) {
+		if (idr_pre_get(&dev_priv->device->context_idr,
+				GFP_KERNEL) == 0) {
+			kfree(context);
+			return NULL;
+		}
+
+		ret = idr_get_new(&dev_priv->device->context_idr,
+				  context, &id);
+
+		if (ret != -EAGAIN)
+			break;
+	}
+
+	if (ret) {
+		kfree(context);
+		return NULL;
+	}
+
+	context->id = id;
+	context->dev_priv = dev_priv;
+
+	return context;
+}
+
+static void
+kgsl_destroy_context(struct kgsl_device_private *dev_priv,
+		     struct kgsl_context *context)
+{
+	int id;
+
+	if (context == NULL)
+		return;
+
+	/* Fire a bug if the devctxt hasn't been freed */
+	BUG_ON(context->devctxt);
+
+	id = context->id;
+	kfree(context);
+
+	idr_remove(&dev_priv->device->context_idr, id);
+}
+
+/* to be called when a process is destroyed, this walks the memqueue and
+ * frees any entryies that belong to the dying process
+ */
+static void kgsl_memqueue_cleanup(struct kgsl_device *device,
+				     struct kgsl_process_private *private)
+{
+	struct kgsl_mem_entry *entry, *entry_tmp;
+
+	if (!private)
+		return;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
+		if (entry->priv == private) {
+			list_del(&entry->list);
+			kgsl_mem_entry_put(entry);
+		}
+	}
+}
+
+static void kgsl_memqueue_freememontimestamp(struct kgsl_device *device,
+				  struct kgsl_mem_entry *entry,
+				  uint32_t timestamp,
+				  enum kgsl_timestamp_type type)
+{
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	entry->free_timestamp = timestamp;
+
+	list_add_tail(&entry->list, &device->memqueue);
+}
+
+static void kgsl_memqueue_drain(struct kgsl_device *device)
+{
+	struct kgsl_mem_entry *entry, *entry_tmp;
+	uint32_t ts_processed;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	/* get current EOP timestamp */
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+
+	list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
+		KGSL_MEM_INFO(device,
+			"ts_processed %d ts_free %d gpuaddr %x)\n",
+			ts_processed, entry->free_timestamp,
+			entry->memdesc.gpuaddr);
+		if (!timestamp_cmp(ts_processed, entry->free_timestamp))
+			break;
+
+		list_del(&entry->list);
+		kgsl_mem_entry_put(entry);
+	}
+}
+
+static void kgsl_memqueue_drain_unlocked(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+	kgsl_memqueue_drain(device);
+	mutex_unlock(&device->mutex);
+}
+
+static void kgsl_check_idle_locked(struct kgsl_device *device)
+{
+	if (device->pwrctrl.nap_allowed == true &&
+	    device->state == KGSL_STATE_ACTIVE &&
+		device->requested_state == KGSL_STATE_NONE) {
+		device->requested_state = KGSL_STATE_NAP;
+		if (kgsl_pwrctrl_sleep(device) != 0)
+			mod_timer(&device->idle_timer,
+				  jiffies +
+				  device->pwrctrl.interval_timeout);
+	}
+}
+
+static void kgsl_check_idle(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	kgsl_check_idle_locked(device);
+	mutex_unlock(&device->mutex);
+}
+
+struct kgsl_device *kgsl_get_device(int dev_idx)
+{
+	int i;
+	struct kgsl_device *ret = NULL;
+
+	mutex_lock(&kgsl_driver.devlock);
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
+			ret = kgsl_driver.devp[i];
+			break;
+		}
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_get_device);
+
+static struct kgsl_device *kgsl_get_minor(int minor)
+{
+	struct kgsl_device *ret = NULL;
+
+	if (minor < 0 || minor >= KGSL_DEVICE_MAX)
+		return NULL;
+
+	mutex_lock(&kgsl_driver.devlock);
+	ret = kgsl_driver.devp[minor];
+	mutex_unlock(&kgsl_driver.devlock);
+
+	return ret;
+}
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+			      struct notifier_block *nb)
+{
+	BUG_ON(device == NULL);
+	return atomic_notifier_chain_register(&device->ts_notifier_list,
+					      nb);
+}
+EXPORT_SYMBOL(kgsl_register_ts_notifier);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+				struct notifier_block *nb)
+{
+	BUG_ON(device == NULL);
+	return atomic_notifier_chain_unregister(&device->ts_notifier_list,
+						nb);
+}
+EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
+
+int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
+{
+	unsigned int ts_processed;
+
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+
+	return timestamp_cmp(ts_processed, timestamp);
+}
+EXPORT_SYMBOL(kgsl_check_timestamp);
+
+static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
+{
+	int status = -EINVAL;
+	unsigned int nap_allowed_saved;
+	struct kgsl_pwrscale_policy *policy_saved;
+
+	if (!device)
+		return -EINVAL;
+
+	KGSL_PWR_WARN(device, "suspend start\n");
+
+	mutex_lock(&device->mutex);
+	nap_allowed_saved = device->pwrctrl.nap_allowed;
+	device->pwrctrl.nap_allowed = false;
+	policy_saved = device->pwrscale.policy;
+	device->pwrscale.policy = NULL;
+	device->requested_state = KGSL_STATE_SUSPEND;
+	/* Make sure no user process is waiting for a timestamp *
+	 * before supending */
+	if (device->active_cnt != 0) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->suspend_gate);
+		mutex_lock(&device->mutex);
+	}
+	/* Don't let the timer wake us during suspended sleep. */
+	del_timer(&device->idle_timer);
+	switch (device->state) {
+		case KGSL_STATE_INIT:
+			break;
+		case KGSL_STATE_ACTIVE:
+			/* Wait for the device to become idle */
+			device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
+		case KGSL_STATE_NAP:
+		case KGSL_STATE_SLEEP:
+			/* Get the completion ready to be waited upon. */
+			INIT_COMPLETION(device->hwaccess_gate);
+			device->ftbl->suspend_context(device);
+			device->ftbl->stop(device);
+			device->state = KGSL_STATE_SUSPEND;
+			KGSL_PWR_WARN(device, "state -> SUSPEND, device %d\n",
+				device->id);
+			break;
+		default:
+			KGSL_PWR_ERR(device, "suspend fail, device %d\n",
+					device->id);
+			goto end;
+	}
+	device->requested_state = KGSL_STATE_NONE;
+	device->pwrctrl.nap_allowed = nap_allowed_saved;
+	device->pwrscale.policy = policy_saved;
+	status = 0;
+
+end:
+	mutex_unlock(&device->mutex);
+	KGSL_PWR_WARN(device, "suspend end\n");
+	return status;
+}
+
+static int kgsl_resume_device(struct kgsl_device *device)
+{
+	int status = -EINVAL;
+
+	if (!device)
+		return -EINVAL;
+
+	KGSL_PWR_WARN(device, "resume start\n");
+	mutex_lock(&device->mutex);
+	if (device->state == KGSL_STATE_SUSPEND) {
+		device->requested_state = KGSL_STATE_ACTIVE;
+		kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
+		status = device->ftbl->start(device, 0);
+		if (status == 0) {
+			device->state = KGSL_STATE_ACTIVE;
+			KGSL_PWR_WARN(device,
+					"state -> ACTIVE, device %d\n",
+					device->id);
+		} else {
+			KGSL_PWR_ERR(device,
+					"resume failed, device %d\n",
+					device->id);
+			device->state = KGSL_STATE_INIT;
+			goto end;
+		}
+		complete_all(&device->hwaccess_gate);
+	}
+	device->requested_state = KGSL_STATE_NONE;
+
+end:
+	mutex_unlock(&device->mutex);
+	kgsl_check_idle(device);
+	KGSL_PWR_WARN(device, "resume end\n");
+	return status;
+}
+
+static int kgsl_suspend(struct device *dev)
+{
+
+	pm_message_t arg = {0};
+	struct kgsl_device *device = dev_get_drvdata(dev);
+	return kgsl_suspend_device(device, arg);
+}
+
+static int kgsl_resume(struct device *dev)
+{
+	struct kgsl_device *device = dev_get_drvdata(dev);
+	return kgsl_resume_device(device);
+}
+
+static int kgsl_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int kgsl_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+const struct dev_pm_ops kgsl_pm_ops = {
+	.suspend = kgsl_suspend,
+	.resume = kgsl_resume,
+	.runtime_suspend = kgsl_runtime_suspend,
+	.runtime_resume = kgsl_runtime_resume,
+};
+EXPORT_SYMBOL(kgsl_pm_ops);
+
+void kgsl_early_suspend_driver(struct early_suspend *h)
+{
+	struct kgsl_device *device = container_of(h,
+					struct kgsl_device, display_off);
+	kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
+}
+EXPORT_SYMBOL(kgsl_early_suspend_driver);
+
+int kgsl_suspend_driver(struct platform_device *pdev,
+					pm_message_t state)
+{
+	struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+	return kgsl_suspend_device(device, state);
+}
+EXPORT_SYMBOL(kgsl_suspend_driver);
+
+int kgsl_resume_driver(struct platform_device *pdev)
+{
+	struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+	return kgsl_resume_device(device);
+}
+EXPORT_SYMBOL(kgsl_resume_driver);
+
+void kgsl_late_resume_driver(struct early_suspend *h)
+{
+	struct kgsl_device *device = container_of(h,
+					struct kgsl_device, display_off);
+	kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+}
+EXPORT_SYMBOL(kgsl_late_resume_driver);
+
+/* file operations */
+static struct kgsl_process_private *
+kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
+{
+	struct kgsl_process_private *private;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+		if (private->pid == task_tgid_nr(current)) {
+			private->refcnt++;
+			goto out;
+		}
+	}
+
+	/* no existing process private found for this dev_priv, create one */
+	private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
+	if (private == NULL) {
+		KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
+			sizeof(struct kgsl_process_private));
+		goto out;
+	}
+
+	spin_lock_init(&private->mem_lock);
+	private->refcnt = 1;
+	private->pid = task_tgid_nr(current);
+
+	INIT_LIST_HEAD(&private->mem_list);
+
+#ifdef CONFIG_MSM_KGSL_MMU
+	{
+		unsigned long pt_name;
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+		pt_name = task_tgid_nr(current);
+#else
+		pt_name = KGSL_MMU_GLOBAL_PT;
+#endif
+		private->pagetable = kgsl_mmu_getpagetable(pt_name);
+		if (private->pagetable == NULL) {
+			kfree(private);
+			private = NULL;
+			goto out;
+		}
+	}
+#endif
+
+	list_add(&private->list, &kgsl_driver.process_list);
+
+	kgsl_process_init_sysfs(private);
+
+out:
+	mutex_unlock(&kgsl_driver.process_mutex);
+	return private;
+}
+
+static void
+kgsl_put_process_private(struct kgsl_device *device,
+			 struct kgsl_process_private *private)
+{
+	struct kgsl_mem_entry *entry = NULL;
+	struct kgsl_mem_entry *entry_tmp = NULL;
+
+	if (!private)
+		return;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+
+	if (--private->refcnt)
+		goto unlock;
+
+	KGSL_MEM_INFO(device,
+			"Memory usage: user (%d/%d) mapped (%d/%d)\n",
+			private->stats.user, private->stats.user_max,
+			private->stats.mapped, private->stats.mapped_max);
+
+	kgsl_process_uninit_sysfs(private);
+
+	list_del(&private->list);
+
+	list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
+		list_del(&entry->list);
+		kgsl_mem_entry_put(entry);
+	}
+
+	kgsl_mmu_putpagetable(private->pagetable);
+	kfree(private);
+unlock:
+	mutex_unlock(&kgsl_driver.process_mutex);
+}
+
+static int kgsl_release(struct inode *inodep, struct file *filep)
+{
+	int result = 0;
+	struct kgsl_device_private *dev_priv = NULL;
+	struct kgsl_process_private *private = NULL;
+	struct kgsl_device *device;
+	struct kgsl_context *context;
+	int next = 0;
+
+	device = kgsl_driver.devp[iminor(inodep)];
+	BUG_ON(device == NULL);
+
+	dev_priv = (struct kgsl_device_private *) filep->private_data;
+	BUG_ON(dev_priv == NULL);
+	BUG_ON(device != dev_priv->device);
+	/* private could be null if kgsl_open is not successful */
+	private = dev_priv->process_priv;
+	filep->private_data = NULL;
+
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+
+	while (1) {
+		context = idr_get_next(&dev_priv->device->context_idr, &next);
+		if (context == NULL)
+			break;
+
+		if (context->dev_priv == dev_priv) {
+			device->ftbl->drawctxt_destroy(device, context);
+			kgsl_destroy_context(dev_priv, context);
+		}
+
+		next = next + 1;
+	}
+
+	device->open_count--;
+	if (device->open_count == 0) {
+		result = device->ftbl->stop(device);
+		device->state = KGSL_STATE_INIT;
+		KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
+	}
+	/* clean up any to-be-freed entries that belong to this
+	 * process and this device
+	 */
+	kgsl_memqueue_cleanup(device, private);
+
+	mutex_unlock(&device->mutex);
+	kfree(dev_priv);
+
+	kgsl_put_process_private(device, private);
+
+	pm_runtime_put(device->parentdev);
+	return result;
+}
+
+static int kgsl_open(struct inode *inodep, struct file *filep)
+{
+	int result;
+	struct kgsl_device_private *dev_priv;
+	struct kgsl_device *device;
+	unsigned int minor = iminor(inodep);
+
+	device = kgsl_get_minor(minor);
+	BUG_ON(device == NULL);
+
+	if (filep->f_flags & O_EXCL) {
+		KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
+		return -EBUSY;
+	}
+
+	result = pm_runtime_get_sync(device->parentdev);
+	if (result < 0) {
+		KGSL_DRV_ERR(device,
+			"Runtime PM: Unable to wake up the device, rc = %d\n",
+			result);
+		return result;
+	}
+	result = 0;
+
+	dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
+	if (dev_priv == NULL) {
+		KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
+			sizeof(struct kgsl_device_private));
+		result = -ENOMEM;
+		goto err_pmruntime;
+	}
+
+	dev_priv->device = device;
+	filep->private_data = dev_priv;
+
+	/* Get file (per process) private struct */
+	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+	if (dev_priv->process_priv ==  NULL) {
+		result = -ENOMEM;
+		goto err_freedevpriv;
+	}
+
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+
+	if (device->open_count == 0) {
+		result = device->ftbl->start(device, true);
+
+		if (result) {
+			mutex_unlock(&device->mutex);
+			goto err_putprocess;
+		}
+		device->state = KGSL_STATE_ACTIVE;
+		KGSL_PWR_WARN(device,
+				"state -> ACTIVE, device %d\n", minor);
+	}
+	device->open_count++;
+	mutex_unlock(&device->mutex);
+
+	KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
+		device->name, kgsl_mmu_enabled() ? "on" : "off",
+		kgsl_pagetable_count);
+
+	return result;
+
+err_putprocess:
+	kgsl_put_process_private(device, dev_priv->process_priv);
+err_freedevpriv:
+	filep->private_data = NULL;
+	kfree(dev_priv);
+err_pmruntime:
+	pm_runtime_put(device->parentdev);
+	return result;
+}
+
+
+/*call with private->mem_lock locked */
+static struct kgsl_mem_entry *
+kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
+{
+	struct kgsl_mem_entry *entry = NULL, *result = NULL;
+
+	BUG_ON(private == NULL);
+
+	gpuaddr &= PAGE_MASK;
+
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (entry->memdesc.gpuaddr == gpuaddr) {
+			result = entry;
+			break;
+		}
+	}
+	return result;
+}
+
+/*call with private->mem_lock locked */
+struct kgsl_mem_entry *
+kgsl_sharedmem_find_region(struct kgsl_process_private *private,
+				unsigned int gpuaddr,
+				size_t size)
+{
+	struct kgsl_mem_entry *entry = NULL, *result = NULL;
+
+	BUG_ON(private == NULL);
+
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (gpuaddr >= entry->memdesc.gpuaddr &&
+		    ((gpuaddr + size) <=
+			(entry->memdesc.gpuaddr + entry->memdesc.size))) {
+			result = entry;
+			break;
+		}
+	}
+
+	return result;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_find_region);
+
+uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
+	unsigned int gpuaddr, unsigned int *size)
+{
+	BUG_ON(memdesc->hostptr == NULL);
+
+	if (memdesc->gpuaddr == 0 || (gpuaddr < memdesc->gpuaddr ||
+		gpuaddr >= memdesc->gpuaddr + memdesc->size))
+		return NULL;
+
+	*size = memdesc->size - (gpuaddr - memdesc->gpuaddr);
+	return memdesc->hostptr + (gpuaddr - memdesc->gpuaddr);
+}
+EXPORT_SYMBOL(kgsl_gpuaddr_to_vaddr);
+
+/*call all ioctl sub functions with driver locked*/
+static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
+					  unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_device_getproperty *param = data;
+
+	switch (param->type) {
+	case KGSL_PROP_VERSION:
+	{
+		struct kgsl_version version;
+		if (param->sizebytes != sizeof(version)) {
+			result = -EINVAL;
+			break;
+		}
+
+		version.drv_major = KGSL_VERSION_MAJOR;
+		version.drv_minor = KGSL_VERSION_MINOR;
+		version.dev_major = dev_priv->device->ver_major;
+		version.dev_minor = dev_priv->device->ver_minor;
+
+		if (copy_to_user(param->value, &version, sizeof(version)))
+			result = -EFAULT;
+
+		break;
+	}
+	default:
+		result = dev_priv->device->ftbl->getproperty(
+					dev_priv->device, param->type,
+					param->value, param->sizebytes);
+	}
+
+
+	return result;
+}
+
+static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
+						*dev_priv, unsigned int cmd,
+						void *data)
+{
+	int result = 0;
+	struct kgsl_device_waittimestamp *param = data;
+
+	/* Set the active count so that suspend doesn't do the
+	   wrong thing */
+
+	dev_priv->device->active_cnt++;
+
+	/* Don't wait forever, set a max value for now */
+	if (param->timeout == -1)
+		param->timeout = 10 * MSEC_PER_SEC;
+
+	result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
+					param->timestamp,
+					param->timeout);
+
+	kgsl_memqueue_drain(dev_priv->device);
+
+	/* Fire off any pending suspend operations that are in flight */
+
+	INIT_COMPLETION(dev_priv->device->suspend_gate);
+	dev_priv->device->active_cnt--;
+	complete(&dev_priv->device->suspend_gate);
+
+	return result;
+}
+static bool check_ibdesc(struct kgsl_device_private *dev_priv,
+			 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
+			 bool parse)
+{
+	bool result = true;
+	unsigned int i;
+	for (i = 0; i < numibs; i++) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid cmd buffer gpuaddr %08x " \
+				"sizedwords %d\n", ibdesc[i].gpuaddr,
+				ibdesc[i].sizedwords);
+			result = false;
+			break;
+		}
+
+		if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid cmd buffer gpuaddr %08x " \
+				"sizedwords %d numibs %d/%d\n",
+				ibdesc[i].gpuaddr,
+				ibdesc[i].sizedwords, i+1, numibs);
+			result = false;
+			break;
+		}
+	}
+	return result;
+}
+
+static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
+				      unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_ringbuffer_issueibcmds *param = data;
+	struct kgsl_ibdesc *ibdesc;
+	struct kgsl_context *context;
+
+#ifdef CONFIG_MSM_KGSL_DRM
+	kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
+#endif
+
+	context = kgsl_find_context(dev_priv, param->drawctxt_id);
+	if (context == NULL) {
+		result = -EINVAL;
+		KGSL_DRV_ERR(dev_priv->device,
+			"invalid drawctxt drawctxt_id %d\n",
+			param->drawctxt_id);
+		goto done;
+	}
+
+	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
+		KGSL_DRV_INFO(dev_priv->device,
+			"Using IB list mode for ib submission, numibs: %d\n",
+			param->numibs);
+		if (!param->numibs) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Invalid numibs as parameter: %d\n",
+				 param->numibs);
+			result = -EINVAL;
+			goto done;
+		}
+
+		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
+					GFP_KERNEL);
+		if (!ibdesc) {
+			KGSL_MEM_ERR(dev_priv->device,
+				"kzalloc(%d) failed\n",
+				sizeof(struct kgsl_ibdesc) * param->numibs);
+			result = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
+				sizeof(struct kgsl_ibdesc) * param->numibs)) {
+			result = -EFAULT;
+			KGSL_DRV_ERR(dev_priv->device,
+				"copy_from_user failed\n");
+			goto free_ibdesc;
+		}
+	} else {
+		KGSL_DRV_INFO(dev_priv->device,
+			"Using single IB submission mode for ib submission\n");
+		/* If user space driver is still using the old mode of
+		 * submitting single ib then we need to support that as well */
+		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
+		if (!ibdesc) {
+			KGSL_MEM_ERR(dev_priv->device,
+				"kzalloc(%d) failed\n",
+				sizeof(struct kgsl_ibdesc));
+			result = -ENOMEM;
+			goto done;
+		}
+		ibdesc[0].gpuaddr = param->ibdesc_addr;
+		ibdesc[0].sizedwords = param->numibs;
+		param->numibs = 1;
+	}
+
+	if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
+		KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
+		result = -EINVAL;
+		goto free_ibdesc;
+	}
+
+	/* Let the pwrscale policy know that a new command buffer
+	   is being issued */
+
+	kgsl_pwrscale_busy(dev_priv->device);
+
+	result = dev_priv->device->ftbl->issueibcmds(dev_priv,
+					     context,
+					     ibdesc,
+					     param->numibs,
+					     &param->timestamp,
+					     param->flags);
+
+	if (result != 0)
+		goto free_ibdesc;
+
+	/* this is a check to try to detect if a command buffer was freed
+	 * during issueibcmds().
+	 */
+	if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
+		KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
+		result = -EINVAL;
+		goto free_ibdesc;
+	}
+
+free_ibdesc:
+	kfree(ibdesc);
+done:
+
+#ifdef CONFIG_MSM_KGSL_DRM
+	kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
+#endif
+
+	return result;
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
+						*dev_priv, unsigned int cmd,
+						void *data)
+{
+	struct kgsl_cmdstream_readtimestamp *param = data;
+
+	param->timestamp =
+		dev_priv->device->ftbl->readtimestamp(dev_priv->device,
+		param->type);
+
+	return 0;
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
+						    *dev_priv, unsigned int cmd,
+						    void *data)
+{
+	int result = 0;
+	struct kgsl_cmdstream_freememontimestamp *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+
+	spin_lock(&dev_priv->process_priv->mem_lock);
+	entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
+	if (entry)
+		list_del(&entry->list);
+	spin_unlock(&dev_priv->process_priv->mem_lock);
+
+	if (entry) {
+		kgsl_memqueue_freememontimestamp(dev_priv->device, entry,
+					param->timestamp, param->type);
+		kgsl_memqueue_drain(dev_priv->device);
+	} else {
+		KGSL_DRV_ERR(dev_priv->device,
+			"invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	}
+
+	return result;
+}
+
+static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_create *param = data;
+	struct kgsl_context *context = NULL;
+
+	context = kgsl_create_context(dev_priv);
+
+	if (context == NULL) {
+		result = -ENOMEM;
+		goto done;
+	}
+
+	if (dev_priv->device->ftbl->drawctxt_create)
+		result = dev_priv->device->ftbl->drawctxt_create(
+			dev_priv->device, dev_priv->process_priv->pagetable,
+			context, param->flags);
+
+	param->drawctxt_id = context->id;
+
+done:
+	if (result && context)
+		kgsl_destroy_context(dev_priv, context);
+
+	return result;
+}
+
+static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_destroy *param = data;
+	struct kgsl_context *context;
+
+	context = kgsl_find_context(dev_priv, param->drawctxt_id);
+
+	if (context == NULL) {
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (dev_priv->device->ftbl->drawctxt_destroy)
+		dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
+			context);
+
+	kgsl_destroy_context(dev_priv, context);
+
+done:
+	return result;
+}
+
+static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_sharedmem_free *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry = NULL;
+
+	spin_lock(&private->mem_lock);
+	entry = kgsl_sharedmem_find(private, param->gpuaddr);
+	if (entry)
+		list_del(&entry->list);
+	spin_unlock(&private->mem_lock);
+
+	if (entry) {
+		kgsl_mem_entry_put(entry);
+	} else {
+		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	}
+
+	return result;
+}
+
+static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
+{
+	struct vm_area_struct *vma;
+	int len;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, addr);
+	up_read(&current->mm->mmap_sem);
+	if (!vma) {
+		KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
+		return NULL;
+	}
+	len = vma->vm_end - vma->vm_start;
+	if (vma->vm_pgoff || !KGSL_IS_PAGE_ALIGNED(len) ||
+	  !KGSL_IS_PAGE_ALIGNED(vma->vm_start)) {
+		KGSL_CORE_ERR("address %x is not aligned\n", addr);
+		return NULL;
+	}
+	if (vma->vm_start != addr) {
+		KGSL_CORE_ERR("vma address does not match mmap address\n");
+		return NULL;
+	}
+	return vma;
+}
+
+static long
+kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
+				unsigned int cmd, void *data)
+{
+	int result = 0, len = 0;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_sharedmem_from_vmalloc *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	struct vm_area_struct *vma;
+
+	if (!kgsl_mmu_enabled())
+		return -ENODEV;
+
+	/* Make sure all pending freed memory is collected */
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	if (!param->hostptr) {
+		KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
+		result = -EINVAL;
+		goto error;
+	}
+
+	vma = kgsl_get_vma_from_start_addr(param->hostptr);
+	if (!vma) {
+		result = -EINVAL;
+		goto error;
+	}
+	len = vma->vm_end - vma->vm_start;
+	if (len == 0) {
+		KGSL_CORE_ERR("Invalid vma region length %d\n", len);
+		result = -EINVAL;
+		goto error;
+	}
+
+	entry = kgsl_mem_entry_create();
+	if (entry == NULL) {
+		result = -ENOMEM;
+		goto error;
+	}
+
+	result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
+					     private->pagetable, len,
+					     param->flags);
+	if (result != 0)
+		goto error_free_entry;
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
+	if (result) {
+		KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
+		goto error_free_vmalloc;
+	}
+
+	param->gpuaddr = entry->memdesc.gpuaddr;
+
+	entry->memtype = KGSL_USER_MEMORY;
+
+	kgsl_mem_entry_attach_process(entry, private);
+
+	/* Process specific statistics */
+	KGSL_STATS_ADD(len, private->stats.user,
+		       private->stats.user_max);
+
+	kgsl_check_idle(dev_priv->device);
+	return 0;
+
+error_free_vmalloc:
+	kgsl_sharedmem_free(&entry->memdesc);
+
+error_free_entry:
+	kfree(entry);
+
+error:
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+static inline int _check_region(unsigned long start, unsigned long size,
+				uint64_t len)
+{
+	uint64_t end = ((uint64_t) start) + size;
+	return (end > len);
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
+			      unsigned long *vstart, struct file **filep)
+{
+	struct file *fbfile;
+	int ret = 0;
+	dev_t rdev;
+	struct fb_info *info;
+
+	*filep = NULL;
+	if (!get_pmem_file(fd, start, vstart, len, filep))
+		return 0;
+
+	fbfile = fget(fd);
+	if (fbfile == NULL) {
+		KGSL_CORE_ERR("fget_light failed\n");
+		return -1;
+	}
+
+	rdev = fbfile->f_dentry->d_inode->i_rdev;
+	info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
+	if (info) {
+		*start = info->fix.smem_start;
+		*len = info->fix.smem_len;
+		*vstart = (unsigned long)__va(info->fix.smem_start);
+		ret = 0;
+	} else {
+		KGSL_CORE_ERR("framebuffer minor %d not found\n",
+			      MINOR(rdev));
+		ret = -1;
+	}
+
+	fput(fbfile);
+
+	return ret;
+}
+
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+				struct kgsl_pagetable *pagetable,
+				unsigned int fd, unsigned int offset,
+				size_t size)
+{
+	int ret;
+	unsigned long phys, virt, len;
+	struct file *filep;
+
+	ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
+	if (ret)
+		return ret;
+
+	if (offset >= len) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (size == 0)
+		size = len;
+
+	/* Adjust the size of the region to account for the offset */
+	size += offset & ~PAGE_MASK;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (_check_region(offset & PAGE_MASK, size, len)) {
+		KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+			      "than pmem region length %ld\n",
+			      offset & PAGE_MASK, size, len);
+		ret = -EINVAL;
+		goto err;
+
+	}
+
+	entry->file_ptr = filep;
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = size;
+	entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
+	entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
+	entry->memdesc.ops = &kgsl_contiguous_ops;
+
+	return 0;
+err:
+	put_pmem_file(filep);
+	return ret;
+}
+#else
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+				struct kgsl_pagetable *pagetable,
+				unsigned int fd, unsigned int offset,
+				size_t size)
+{
+	return -EINVAL;
+}
+#endif
+
+static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
+			      struct kgsl_pagetable *pagetable,
+			      void *hostptr, unsigned int offset,
+			      size_t size)
+{
+	struct vm_area_struct *vma;
+	unsigned int len;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, (unsigned int) hostptr);
+	up_read(&current->mm->mmap_sem);
+
+	if (!vma) {
+		KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
+		return -EINVAL;
+	}
+
+	/* We don't necessarily start at vma->vm_start */
+	len = vma->vm_end - (unsigned long) hostptr;
+
+	if (offset >= len)
+		return -EINVAL;
+
+	if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
+	    !KGSL_IS_PAGE_ALIGNED(len)) {
+		KGSL_CORE_ERR("user address len(%u)"
+			      "and start(%p) must be page"
+			      "aligned\n", len, hostptr);
+		return -EINVAL;
+	}
+
+	if (size == 0)
+		size = len;
+
+	/* Adjust the size of the region to account for the offset */
+	size += offset & ~PAGE_MASK;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (_check_region(offset & PAGE_MASK, size, len)) {
+		KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+			      "than region length %d\n",
+			      offset & PAGE_MASK, size, len);
+		return -EINVAL;
+	}
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = size;
+	entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
+	entry->memdesc.ops = &kgsl_userptr_ops;
+
+	return 0;
+}
+
+#ifdef CONFIG_ASHMEM
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+			     struct kgsl_pagetable *pagetable,
+			     int fd, void *hostptr, size_t size)
+{
+	int ret;
+	struct vm_area_struct *vma;
+	struct file *filep, *vmfile;
+	unsigned long len;
+
+	vma = kgsl_get_vma_from_start_addr((unsigned long) hostptr);
+	if (vma == NULL)
+		return -EINVAL;
+
+	len = vma->vm_end - vma->vm_start;
+
+	if (size == 0)
+		size = len;
+
+	if (size != len) {
+		KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
+			      size, hostptr);
+		return -EINVAL;
+	}
+
+	ret = get_ashmem_file(fd, &filep, &vmfile, &len);
+
+	if (ret) {
+		KGSL_CORE_ERR("get_ashmem_file failed\n");
+		return ret;
+	}
+
+	if (vmfile != vma->vm_file) {
+		KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	entry->file_ptr = filep;
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = ALIGN(size, PAGE_SIZE);
+	entry->memdesc.hostptr = hostptr;
+	entry->memdesc.ops = &kgsl_userptr_ops;
+
+	return 0;
+
+err:
+	put_ashmem_file(filep);
+	return ret;
+}
+#else
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+			     struct kgsl_pagetable *pagetable,
+			     int fd, void *hostptr, size_t size)
+{
+	return -EINVAL;
+}
+#endif
+
+static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
+				     unsigned int cmd, void *data)
+{
+	int result = -EINVAL;
+	struct kgsl_map_user_mem *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+
+	entry = kgsl_mem_entry_create();
+
+	if (entry == NULL)
+		return -ENOMEM;
+
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	switch (param->memtype) {
+	case KGSL_USER_MEM_TYPE_PMEM:
+		if (param->fd == 0 || param->len == 0)
+			break;
+
+		result = kgsl_setup_phys_file(entry, private->pagetable,
+					      param->fd, param->offset,
+					      param->len);
+		break;
+
+	case KGSL_USER_MEM_TYPE_ADDR:
+		if (!kgsl_mmu_enabled()) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Cannot map paged memory with the "
+				"MMU disabled\n");
+			break;
+		}
+
+		if (param->hostptr == 0)
+			break;
+
+		result = kgsl_setup_hostptr(entry, private->pagetable,
+					    (void *) param->hostptr,
+					    param->offset, param->len);
+		break;
+
+	case KGSL_USER_MEM_TYPE_ASHMEM:
+		if (!kgsl_mmu_enabled()) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Cannot map paged memory with the "
+				"MMU disabled\n");
+			break;
+		}
+
+		if (param->hostptr == 0)
+			break;
+
+		result = kgsl_setup_ashmem(entry, private->pagetable,
+					   param->fd, (void *) param->hostptr,
+					   param->len);
+		break;
+	default:
+		KGSL_CORE_ERR("Invalid memory type: %x\n", param->memtype);
+		break;
+	}
+
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map(private->pagetable,
+			      &entry->memdesc,
+			      GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		goto error_put_file_ptr;
+
+	/* Adjust the returned value for a non 4k aligned offset */
+	param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+
+	entry->memtype = KGSL_MAPPED_MEMORY;
+
+	KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
+		       kgsl_driver.stats.mapped_max);
+
+	/* Statistics */
+	KGSL_STATS_ADD(param->len, private->stats.mapped,
+		       private->stats.mapped_max);
+
+	kgsl_mem_entry_attach_process(entry, private);
+
+	kgsl_check_idle(dev_priv->device);
+	return result;
+
+ error_put_file_ptr:
+	if (entry->file_ptr)
+		fput(entry->file_ptr);
+
+error:
+	kfree(entry);
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+/*This function flushes a graphics memory allocation from CPU cache
+ *when caching is enabled with MMU*/
+static long
+kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
+				 unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_sharedmem_free *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+
+	spin_lock(&private->mem_lock);
+	entry = kgsl_sharedmem_find(private, param->gpuaddr);
+	if (!entry) {
+		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	} else {
+		if (!entry->memdesc.hostptr)
+			entry->memdesc.hostptr =
+				kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+					param->gpuaddr, &entry->memdesc.size);
+
+		if (!entry->memdesc.hostptr) {
+			KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
+				param->gpuaddr);
+			goto done;
+		}
+
+		kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
+
+		/* Statistics - keep track of how many flushes each process
+		   does */
+		private->stats.flushes++;
+	}
+	spin_unlock(&private->mem_lock);
+done:
+	return result;
+}
+
+static long
+kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
+			unsigned int cmd, void *data)
+{
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_gpumem_alloc *param = data;
+	struct kgsl_mem_entry *entry;
+	int result;
+
+	entry = kgsl_mem_entry_create();
+	if (entry == NULL)
+		return -ENOMEM;
+
+	/* Make sure all pending freed memory is collected */
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
+		param->size, param->flags);
+
+	if (result == 0) {
+		entry->memtype = KGSL_USER_MEMORY;
+		kgsl_mem_entry_attach_process(entry, private);
+		param->gpuaddr = entry->memdesc.gpuaddr;
+
+		KGSL_STATS_ADD(entry->memdesc.size, private->stats.user,
+		       private->stats.user_max);
+	} else
+		kfree(entry);
+
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
+	unsigned int, void *);
+
+#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
+	[_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
+
+static const struct {
+	unsigned int cmd;
+	kgsl_ioctl_func_t func;
+	int lock;
+} kgsl_ioctl_funcs[] = {
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
+			kgsl_ioctl_device_getproperty, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
+			kgsl_ioctl_device_waittimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
+			kgsl_ioctl_rb_issueibcmds, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
+			kgsl_ioctl_cmdstream_readtimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
+			kgsl_ioctl_cmdstream_freememontimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
+			kgsl_ioctl_drawctxt_create, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
+			kgsl_ioctl_drawctxt_destroy, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
+			kgsl_ioctl_map_user_mem, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
+			kgsl_ioctl_map_user_mem, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
+			kgsl_ioctl_sharedmem_free, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
+			kgsl_ioctl_sharedmem_from_vmalloc, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
+			kgsl_ioctl_sharedmem_flush_cache, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
+			kgsl_ioctl_gpumem_alloc, 0),
+};
+
+static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	struct kgsl_device_private *dev_priv = filep->private_data;
+	unsigned int nr = _IOC_NR(cmd);
+	kgsl_ioctl_func_t func;
+	int lock, ret;
+	char ustack[64];
+	void *uptr = NULL;
+
+	BUG_ON(dev_priv == NULL);
+
+	/* Workaround for an previously incorrectly defined ioctl code.
+	   This helps ensure binary compatability */
+
+	if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
+		cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
+
+	if (cmd & (IOC_IN | IOC_OUT)) {
+		if (_IOC_SIZE(cmd) < sizeof(ustack))
+			uptr = ustack;
+		else {
+			uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+			if (uptr == NULL) {
+				KGSL_MEM_ERR(dev_priv->device,
+					"kzalloc(%d) failed\n", _IOC_SIZE(cmd));
+				ret = -ENOMEM;
+				goto done;
+			}
+		}
+
+		if (cmd & IOC_IN) {
+			if (copy_from_user(uptr, (void __user *) arg,
+				_IOC_SIZE(cmd))) {
+				ret = -EFAULT;
+				goto done;
+			}
+		} else
+			memset(uptr, 0, _IOC_SIZE(cmd));
+	}
+
+	if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
+	    kgsl_ioctl_funcs[nr].func != NULL) {
+		func = kgsl_ioctl_funcs[nr].func;
+		lock = kgsl_ioctl_funcs[nr].lock;
+	} else {
+		func = dev_priv->device->ftbl->ioctl;
+		if (!func) {
+			KGSL_DRV_INFO(dev_priv->device,
+				      "invalid ioctl code %08x\n", cmd);
+			ret = -EINVAL;
+			goto done;
+		}
+		lock = 1;
+	}
+
+	if (lock) {
+		mutex_lock(&dev_priv->device->mutex);
+		kgsl_check_suspended(dev_priv->device);
+	}
+
+	ret = func(dev_priv, cmd, uptr);
+
+	if (lock) {
+		kgsl_check_idle_locked(dev_priv->device);
+		mutex_unlock(&dev_priv->device->mutex);
+	}
+
+	if (ret == 0 && (cmd & IOC_OUT)) {
+		if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
+			ret = -EFAULT;
+	}
+
+done:
+	if (_IOC_SIZE(cmd) >= sizeof(ustack))
+		kfree(uptr);
+
+	return ret;
+}
+
+static int
+kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
+{
+	struct kgsl_memdesc *memdesc = &device->memstore;
+	int result;
+	unsigned int vma_size = vma->vm_end - vma->vm_start;
+
+	/* The memstore can only be mapped as read only */
+
+	if (vma->vm_flags & VM_WRITE)
+		return -EPERM;
+
+	if (memdesc->size  !=  vma_size) {
+		KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
+			     vma_size, memdesc->size);
+		return -EINVAL;
+	}
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+				 vma_size, vma->vm_page_prot);
+	if (result != 0)
+		KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
+			     result);
+
+	return result;
+}
+
+static int
+kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct kgsl_mem_entry *entry = vma->vm_private_data;
+
+	if (!entry->memdesc.ops->vmfault)
+		return VM_FAULT_SIGBUS;
+
+	return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
+}
+
+static void
+kgsl_gpumem_vm_close(struct vm_area_struct *vma)
+{
+	struct kgsl_mem_entry *entry  = vma->vm_private_data;
+	kgsl_mem_entry_put(entry);
+}
+
+static struct vm_operations_struct kgsl_gpumem_vm_ops = {
+	.fault = kgsl_gpumem_vm_fault,
+	.close = kgsl_gpumem_vm_close,
+};
+
+static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
+	struct inode *inodep = file->f_path.dentry->d_inode;
+	struct kgsl_device_private *dev_priv = file->private_data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_device *device;
+
+	device = kgsl_driver.devp[iminor(inodep)];
+	BUG_ON(device == NULL);
+
+	/* Handle leagacy behavior for memstore */
+
+	if (vma_offset == device->memstore.physaddr)
+		return kgsl_mmap_memstore(device, vma);
+
+	/* Find a chunk of GPU memory */
+
+	spin_lock(&private->mem_lock);
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (vma_offset == entry->memdesc.gpuaddr) {
+			kgsl_mem_entry_get(entry);
+			break;
+		}
+	}
+	spin_unlock(&private->mem_lock);
+
+	if (entry == NULL)
+		return -EINVAL;
+
+	if (!entry->memdesc.ops->vmflags || !entry->memdesc.ops->vmfault)
+		return -EINVAL;
+
+	vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
+
+	vma->vm_private_data = entry;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_ops = &kgsl_gpumem_vm_ops;
+	vma->vm_file = file;
+
+	return 0;
+}
+
+static const struct file_operations kgsl_fops = {
+	.owner = THIS_MODULE,
+	.release = kgsl_release,
+	.open = kgsl_open,
+	.mmap = kgsl_mmap,
+	.unlocked_ioctl = kgsl_ioctl,
+};
+
+struct kgsl_driver kgsl_driver  = {
+	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
+	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+};
+EXPORT_SYMBOL(kgsl_driver);
+
+void kgsl_unregister_device(struct kgsl_device *device)
+{
+	int minor;
+
+	mutex_lock(&kgsl_driver.devlock);
+	for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+		if (device == kgsl_driver.devp[minor])
+			break;
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+
+	if (minor == KGSL_DEVICE_MAX)
+		return;
+
+	kgsl_cffdump_close(device->id);
+	kgsl_pwrctrl_uninit_sysfs(device);
+
+	wake_lock_destroy(&device->idle_wakelock);
+	pm_qos_remove_request(&device->pm_qos_req_dma);
+
+	idr_destroy(&device->context_idr);
+
+	if (device->memstore.hostptr)
+		kgsl_sharedmem_free(&device->memstore);
+
+	kgsl_mmu_close(device);
+
+	if (device->work_queue) {
+		destroy_workqueue(device->work_queue);
+		device->work_queue = NULL;
+	}
+
+	device_destroy(kgsl_driver.class,
+		       MKDEV(MAJOR(kgsl_driver.major), minor));
+
+	mutex_lock(&kgsl_driver.devlock);
+	kgsl_driver.devp[minor] = NULL;
+	mutex_unlock(&kgsl_driver.devlock);
+}
+EXPORT_SYMBOL(kgsl_unregister_device);
+
+int
+kgsl_register_device(struct kgsl_device *device)
+{
+	int minor, ret;
+	dev_t dev;
+
+	/* Find a minor for the device */
+
+	mutex_lock(&kgsl_driver.devlock);
+	for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+		if (kgsl_driver.devp[minor] == NULL) {
+			kgsl_driver.devp[minor] = device;
+			break;
+		}
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+
+	if (minor == KGSL_DEVICE_MAX) {
+		KGSL_CORE_ERR("minor devices exhausted\n");
+		return -ENODEV;
+	}
+
+	/* Create the device */
+	dev = MKDEV(MAJOR(kgsl_driver.major), minor);
+	device->dev = device_create(kgsl_driver.class,
+				    device->parentdev,
+				    dev, device,
+				    device->name);
+
+	if (IS_ERR(device->dev)) {
+		ret = PTR_ERR(device->dev);
+		KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
+		goto err_devlist;
+	}
+
+	dev_set_drvdata(device->parentdev, device);
+
+	/* Generic device initialization */
+	init_waitqueue_head(&device->wait_queue);
+
+	kgsl_cffdump_open(device->id);
+
+	init_completion(&device->hwaccess_gate);
+	init_completion(&device->suspend_gate);
+
+	ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
+
+	setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+	ret = kgsl_create_device_workqueue(device);
+	if (ret)
+		goto err_devlist;
+
+	INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
+
+	INIT_LIST_HEAD(&device->memqueue);
+
+	ret = kgsl_mmu_init(device);
+	if (ret != 0)
+		goto err_dest_work_q;
+
+	ret = kgsl_allocate_contiguous(&device->memstore,
+		sizeof(struct kgsl_devmemstore));
+
+	if (ret != 0)
+		goto err_close_mmu;
+
+	kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
+
+	wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name);
+	pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	idr_init(&device->context_idr);
+
+	/* sysfs and debugfs initalization - failure here is non fatal */
+
+	/* Initialize logging */
+	kgsl_device_debugfs_init(device);
+
+	/* Initialize common sysfs entries */
+	kgsl_pwrctrl_init_sysfs(device);
+
+	return 0;
+
+err_close_mmu:
+	kgsl_mmu_close(device);
+err_dest_work_q:
+	destroy_workqueue(device->work_queue);
+	device->work_queue = NULL;
+err_devlist:
+	mutex_lock(&kgsl_driver.devlock);
+	kgsl_driver.devp[minor] = NULL;
+	mutex_unlock(&kgsl_driver.devlock);
+
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_register_device);
+
+int kgsl_device_platform_probe(struct kgsl_device *device,
+			       irqreturn_t (*dev_isr) (int, void*))
+{
+	int status = -EINVAL;
+	struct kgsl_memregion *regspace = NULL;
+	struct resource *res;
+	struct platform_device *pdev =
+		container_of(device->parentdev, struct platform_device, dev);
+
+	pm_runtime_enable(device->parentdev);
+
+	status = kgsl_pwrctrl_init(device);
+	if (status)
+		goto error;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   device->iomemname);
+	if (res == NULL) {
+		KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
+		status = -EINVAL;
+		goto error_pwrctrl_close;
+	}
+	if (res->start == 0 || resource_size(res) == 0) {
+		KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
+		status = -EINVAL;
+		goto error_pwrctrl_close;
+	}
+
+	regspace = &device->regspace;
+	regspace->mmio_phys_base = res->start;
+	regspace->sizebytes = resource_size(res);
+
+	if (!request_mem_region(regspace->mmio_phys_base,
+				regspace->sizebytes, device->name)) {
+		KGSL_DRV_ERR(device, "request_mem_region failed\n");
+		status = -ENODEV;
+		goto error_pwrctrl_close;
+	}
+
+	regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
+					   regspace->sizebytes);
+
+	if (regspace->mmio_virt_base == NULL) {
+		KGSL_DRV_ERR(device, "ioremap failed\n");
+		status = -ENODEV;
+		goto error_release_mem;
+	}
+
+	status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
+			     IRQF_TRIGGER_HIGH, device->name, device);
+	if (status) {
+		KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
+			      device->pwrctrl.interrupt_num, status);
+		goto error_iounmap;
+	}
+	device->pwrctrl.have_irq = 1;
+	disable_irq(device->pwrctrl.interrupt_num);
+
+	KGSL_DRV_INFO(device,
+		"dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
+		device->id, regspace->mmio_phys_base,
+		regspace->sizebytes, regspace->mmio_virt_base);
+
+
+	status = kgsl_register_device(device);
+	if (!status)
+		return status;
+
+	free_irq(device->pwrctrl.interrupt_num, NULL);
+	device->pwrctrl.have_irq = 0;
+error_iounmap:
+	iounmap(regspace->mmio_virt_base);
+	regspace->mmio_virt_base = NULL;
+error_release_mem:
+	release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
+error_pwrctrl_close:
+	kgsl_pwrctrl_close(device);
+error:
+	return status;
+}
+EXPORT_SYMBOL(kgsl_device_platform_probe);
+
+void kgsl_device_platform_remove(struct kgsl_device *device)
+{
+	struct kgsl_memregion *regspace = &device->regspace;
+
+	kgsl_unregister_device(device);
+
+	if (regspace->mmio_virt_base != NULL) {
+		iounmap(regspace->mmio_virt_base);
+		regspace->mmio_virt_base = NULL;
+		release_mem_region(regspace->mmio_phys_base,
+					regspace->sizebytes);
+	}
+	kgsl_pwrctrl_close(device);
+
+	pm_runtime_disable(device->parentdev);
+}
+EXPORT_SYMBOL(kgsl_device_platform_remove);
+
+static int __devinit
+kgsl_ptdata_init(void)
+{
+	INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
+
+	return kgsl_ptpool_init(&kgsl_driver.ptpool, KGSL_PAGETABLE_SIZE,
+		kgsl_pagetable_count);
+}
+
+static void kgsl_core_exit(void)
+{
+	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
+
+	kgsl_ptpool_destroy(&kgsl_driver.ptpool);
+
+	device_unregister(&kgsl_driver.virtdev);
+
+	if (kgsl_driver.class) {
+		class_destroy(kgsl_driver.class);
+		kgsl_driver.class = NULL;
+	}
+
+	kgsl_drm_exit();
+	kgsl_cffdump_destroy();
+}
+
+static int __init kgsl_core_init(void)
+{
+	int result = 0;
+
+	/* alloc major and minor device numbers */
+	result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
+				  KGSL_NAME);
+	if (result < 0) {
+		KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
+		goto err;
+	}
+
+	cdev_init(&kgsl_driver.cdev, &kgsl_fops);
+	kgsl_driver.cdev.owner = THIS_MODULE;
+	kgsl_driver.cdev.ops = &kgsl_fops;
+	result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
+		       KGSL_DEVICE_MAX);
+
+	if (result) {
+		KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
+			     " result= %d\n", kgsl_driver.major, result);
+		goto err;
+	}
+
+	kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
+
+	if (IS_ERR(kgsl_driver.class)) {
+		result = PTR_ERR(kgsl_driver.class);
+		KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
+		goto err;
+	}
+
+	/* Make a virtual device for managing core related things
+	   in sysfs */
+	kgsl_driver.virtdev.class = kgsl_driver.class;
+	dev_set_name(&kgsl_driver.virtdev, "kgsl");
+	result = device_register(&kgsl_driver.virtdev);
+	if (result) {
+		KGSL_CORE_ERR("driver_register failed\n");
+		goto err;
+	}
+
+	/* Make kobjects in the virtual device for storing statistics */
+
+	kgsl_driver.ptkobj =
+	  kobject_create_and_add("pagetables",
+				 &kgsl_driver.virtdev.kobj);
+
+	kgsl_driver.prockobj =
+		kobject_create_and_add("proc",
+				       &kgsl_driver.virtdev.kobj);
+
+	kgsl_core_debugfs_init();
+
+	kgsl_sharedmem_init_sysfs();
+	kgsl_cffdump_init();
+
+	/* Generic device initialization */
+	INIT_LIST_HEAD(&kgsl_driver.process_list);
+
+	result = kgsl_ptdata_init();
+	if (result)
+		goto err;
+
+	result = kgsl_drm_init(NULL);
+
+	if (result)
+		goto err;
+
+	return 0;
+
+err:
+	kgsl_core_exit();
+	return result;
+}
+
+module_init(kgsl_core_init);
+module_exit(kgsl_core_exit);
+
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("MSM GPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
new file mode 100644
index 0000000..cd11bd9
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.h
@@ -0,0 +1,220 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_H
+#define __KGSL_H
+
+#include <linux/types.h>
+#include <linux/msm_kgsl.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+
+#define KGSL_NAME "kgsl"
+
+/* Flags to control whether to flush or invalidate a cached memory range */
+#define KGSL_CACHE_INV		0x00000000
+#define KGSL_CACHE_CLEAN	0x00000001
+#define KGSL_CACHE_FLUSH	0x00000002
+
+#define KGSL_CACHE_USER_ADDR	0x00000010
+#define KGSL_CACHE_VMALLOC_ADDR	0x00000020
+
+/*cache coherency ops */
+#define DRM_KGSL_GEM_CACHE_OP_TO_DEV	0x0001
+#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV	0x0002
+
+/* The size of each entry in a page table */
+#define KGSL_PAGETABLE_ENTRY_SIZE  4
+
+/* Pagetable Virtual Address base */
+#define KGSL_PAGETABLE_BASE	0x66000000
+
+/* Extra accounting entries needed in the pagetable */
+#define KGSL_PT_EXTRA_ENTRIES      16
+
+#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
+				     KGSL_PT_EXTRA_ENTRIES)
+
+#ifdef CONFIG_MSM_KGSL_MMU
+#define KGSL_PAGETABLE_SIZE \
+ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
+KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
+#else
+#define KGSL_PAGETABLE_SIZE 0
+#endif
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
+#else
+#define KGSL_PAGETABLE_COUNT 1
+#endif
+
+/* Casting using container_of() for structures that kgsl owns. */
+#define KGSL_CONTAINER_OF(ptr, type, member) \
+		container_of(ptr, type, member)
+
+/* A macro for memory statistics - add the new size to the stat and if
+   the statisic is greater then _max, set _max
+*/
+
+#define KGSL_STATS_ADD(_size, _stat, _max) \
+	do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
+
+struct kgsl_device;
+
+struct kgsl_ptpool {
+	size_t ptsize;
+	struct mutex lock;
+	struct list_head list;
+	int entries;
+	int static_entries;
+	int chunks;
+};
+
+struct kgsl_driver {
+	struct cdev cdev;
+	dev_t major;
+	struct class *class;
+	/* Virtual device for managing the core */
+	struct device virtdev;
+	/* Kobjects for storing pagetable and process statistics */
+	struct kobject *ptkobj;
+	struct kobject *prockobj;
+	struct kgsl_device *devp[KGSL_DEVICE_MAX];
+
+	uint32_t flags_debug;
+
+	/* Global lilst of open processes */
+	struct list_head process_list;
+	/* Global list of pagetables */
+	struct list_head pagetable_list;
+	/* Spinlock for accessing the pagetable list */
+	spinlock_t ptlock;
+	/* Mutex for accessing the process list */
+	struct mutex process_mutex;
+
+	/* Mutex for protecting the device list */
+	struct mutex devlock;
+
+	struct kgsl_ptpool ptpool;
+
+	struct {
+		unsigned int vmalloc;
+		unsigned int vmalloc_max;
+		unsigned int coherent;
+		unsigned int coherent_max;
+		unsigned int mapped;
+		unsigned int mapped_max;
+		unsigned int histogram[16];
+	} stats;
+};
+
+extern struct kgsl_driver kgsl_driver;
+
+#define KGSL_USER_MEMORY 1
+#define KGSL_MAPPED_MEMORY 2
+
+struct kgsl_pagetable;
+struct kgsl_memdesc_ops;
+
+/* shared memory allocation */
+struct kgsl_memdesc {
+	struct kgsl_pagetable *pagetable;
+	void *hostptr;
+	unsigned int gpuaddr;
+	unsigned int physaddr;
+	unsigned int size;
+	unsigned int priv;
+	struct kgsl_memdesc_ops *ops;
+};
+
+struct kgsl_mem_entry {
+	struct kref refcount;
+	struct kgsl_memdesc memdesc;
+	int memtype;
+	struct file *file_ptr;
+	struct list_head list;
+	uint32_t free_timestamp;
+	/* back pointer to private structure under whose context this
+	* allocation is made */
+	struct kgsl_process_private *priv;
+};
+
+#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
+#define MMU_CONFIG 2
+#else
+#define MMU_CONFIG 1
+#endif
+
+void kgsl_mem_entry_destroy(struct kref *kref);
+uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
+	unsigned int gpuaddr, unsigned int *size);
+struct kgsl_mem_entry *kgsl_sharedmem_find_region(
+	struct kgsl_process_private *private, unsigned int gpuaddr,
+	size_t size);
+
+extern const struct dev_pm_ops kgsl_pm_ops;
+
+struct early_suspend;
+int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
+int kgsl_resume_driver(struct platform_device *pdev);
+void kgsl_early_suspend_driver(struct early_suspend *h);
+void kgsl_late_resume_driver(struct early_suspend *h);
+
+#ifdef CONFIG_MSM_KGSL_DRM
+extern int kgsl_drm_init(struct platform_device *dev);
+extern void kgsl_drm_exit(void);
+extern void kgsl_gpu_mem_flush(int op);
+#else
+static inline int kgsl_drm_init(struct platform_device *dev)
+{
+	return 0;
+}
+
+static inline void kgsl_drm_exit(void)
+{
+}
+#endif
+
+static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
+				unsigned int gpuaddr)
+{
+	if (gpuaddr >= memdesc->gpuaddr && (gpuaddr + sizeof(unsigned int)) <=
+		(memdesc->gpuaddr + memdesc->size)) {
+		return 1;
+	}
+	return 0;
+}
+
+static inline bool timestamp_cmp(unsigned int new, unsigned int old)
+{
+	int ts_diff = new - old;
+	return (ts_diff >= 0) || (ts_diff < -20000);
+}
+
+static inline void
+kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
+{
+	kref_get(&entry->refcount);
+}
+
+static inline void
+kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
+{
+	kref_put(&entry->refcount, kgsl_mem_entry_destroy);
+}
+
+#endif /* __KGSL_H */
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
new file mode 100644
index 0000000..4349316
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -0,0 +1,711 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+#define ALIGN_CPU
+
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_debugfs.h"
+
+static struct rchan	*chan;
+static struct dentry	*dir;
+static int		suspended;
+static size_t		dropped;
+static size_t		subbuf_size = 256*1024;
+static size_t		n_subbufs = 64;
+
+/* forward declarations */
+static void destroy_channel(void);
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
+
+static spinlock_t cffdump_lock;
+static ulong serial_nr;
+static ulong total_bytes;
+static ulong total_syncmem;
+static long last_sec;
+
+#define MEMBUF_SIZE	64
+
+#define CFF_OP_WRITE_REG        0x00000002
+struct cff_op_write_reg {
+	unsigned char op;
+	uint addr;
+	uint value;
+} __attribute__((packed));
+
+#define CFF_OP_POLL_REG         0x00000004
+struct cff_op_poll_reg {
+	unsigned char op;
+	uint addr;
+	uint value;
+	uint mask;
+} __attribute__((packed));
+
+#define CFF_OP_WAIT_IRQ         0x00000005
+struct cff_op_wait_irq {
+	unsigned char op;
+} __attribute__((packed));
+
+#define CFF_OP_VERIFY_MEM_FILE  0x00000007
+#define CFF_OP_RMW              0x0000000a
+
+#define CFF_OP_WRITE_MEM        0x0000000b
+struct cff_op_write_mem {
+	unsigned char op;
+	uint addr;
+	uint value;
+} __attribute__((packed));
+
+#define CFF_OP_WRITE_MEMBUF     0x0000000c
+struct cff_op_write_membuf {
+	unsigned char op;
+	uint addr;
+	ushort count;
+	uint buffer[MEMBUF_SIZE];
+} __attribute__((packed));
+
+#define CFF_OP_EOF              0xffffffff
+struct cff_op_eof {
+	unsigned char op;
+} __attribute__((packed));
+
+
+static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
+{
+	static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
+		"pqrstuvwxyz0123456789+/";
+
+	out[0] = tob64[in[0] >> 2];
+	out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
+	out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
+		| ((in[2] & 0xc0) >> 6)] : '=');
+	out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
+}
+
+static void b64_encode(const unsigned char *in_buf, int in_size,
+	unsigned char *out_buf, int out_bufsize, int *out_size)
+{
+	unsigned char in[3], out[4];
+	int i, len;
+
+	*out_size = 0;
+	while (in_size > 0) {
+		len = 0;
+		for (i = 0; i < 3; ++i) {
+			if (in_size-- > 0) {
+				in[i] = *in_buf++;
+				++len;
+			} else
+				in[i] = 0;
+		}
+		if (len) {
+			b64_encodeblock(in, out, len);
+			if (out_bufsize < 4) {
+				pr_warn("kgsl: cffdump: %s: out of buffer\n",
+					__func__);
+				return;
+			}
+			for (i = 0; i < 4; ++i)
+				*out_buf++ = out[i];
+			*out_size += 4;
+			out_bufsize -= 4;
+		}
+	}
+}
+
+#define KLOG_TMPBUF_SIZE (1024)
+static void klog_printk(const char *fmt, ...)
+{
+	/* per-cpu klog formatting temporary buffer */
+	static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
+
+	va_list args;
+	int len;
+	char *cbuf;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	cbuf = klog_buf[smp_processor_id()];
+	va_start(args, fmt);
+	len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
+	total_bytes += len;
+	va_end(args);
+	relay_write(chan, cbuf, len);
+	local_irq_restore(flags);
+}
+
+static struct cff_op_write_membuf cff_op_write_membuf;
+static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
+{
+	void *data;
+	int len, out_size;
+	struct cff_op_write_mem cff_op_write_mem;
+
+	uint addr = cff_op_write_membuf.addr
+		- sizeof(uint)*cff_op_write_membuf.count;
+
+	if (!cff_op_write_membuf.count) {
+		pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
+		return;
+	}
+
+	if (cff_op_write_membuf.count != 1) {
+		cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
+		cff_op_write_membuf.addr = addr;
+		len = sizeof(cff_op_write_membuf) -
+			sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
+		data = &cff_op_write_membuf;
+	} else {
+		cff_op_write_mem.op = CFF_OP_WRITE_MEM;
+		cff_op_write_mem.addr = addr;
+		cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
+		data = &cff_op_write_mem;
+		len = sizeof(cff_op_write_mem);
+	}
+	b64_encode(data, len, out_buf, out_bufsize, &out_size);
+	out_buf[out_size] = 0;
+	klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+	cff_op_write_membuf.count = 0;
+	cff_op_write_membuf.addr = 0;
+}
+
+static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
+	uint op3)
+{
+	struct cff_op_write_reg cff_op_write_reg;
+	struct cff_op_poll_reg cff_op_poll_reg;
+	struct cff_op_wait_irq cff_op_wait_irq;
+	struct cff_op_eof cff_op_eof;
+	unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
+	void *data;
+	int len = 0, out_size;
+	long cur_secs;
+
+	spin_lock(&cffdump_lock);
+	if (opcode == CFF_OP_WRITE_MEM) {
+		if (op1 < 0x40000000 || op1 >= 0x60000000)
+			KGSL_CORE_ERR("addr out-of-range: op1=%08x", op1);
+		if ((cff_op_write_membuf.addr != op1 &&
+			cff_op_write_membuf.count)
+			|| (cff_op_write_membuf.count == MEMBUF_SIZE))
+			cffdump_membuf(id, out_buf, sizeof(out_buf));
+
+		cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
+		cff_op_write_membuf.addr = op1 + sizeof(uint);
+		spin_unlock(&cffdump_lock);
+		return;
+	} else if (cff_op_write_membuf.count)
+		cffdump_membuf(id, out_buf, sizeof(out_buf));
+	spin_unlock(&cffdump_lock);
+
+	switch (opcode) {
+	case CFF_OP_WRITE_REG:
+		cff_op_write_reg.op = opcode;
+		cff_op_write_reg.addr = op1;
+		cff_op_write_reg.value = op2;
+		data = &cff_op_write_reg;
+		len = sizeof(cff_op_write_reg);
+		break;
+
+	case CFF_OP_POLL_REG:
+		cff_op_poll_reg.op = opcode;
+		cff_op_poll_reg.addr = op1;
+		cff_op_poll_reg.value = op2;
+		cff_op_poll_reg.mask = op3;
+		data = &cff_op_poll_reg;
+		len = sizeof(cff_op_poll_reg);
+		break;
+
+	case CFF_OP_WAIT_IRQ:
+		cff_op_wait_irq.op = opcode;
+		data = &cff_op_wait_irq;
+		len = sizeof(cff_op_wait_irq);
+		break;
+
+	case CFF_OP_EOF:
+		cff_op_eof.op = opcode;
+		data = &cff_op_eof;
+		len = sizeof(cff_op_eof);
+		break;
+	}
+
+	if (len) {
+		b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
+		out_buf[out_size] = 0;
+		klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+	} else
+		pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
+
+	cur_secs = get_seconds();
+	if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
+		pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
+			"seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
+			serial_nr);
+		last_sec = cur_secs;
+	}
+}
+
+void kgsl_cffdump_init()
+{
+	struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
+
+#ifdef ALIGN_CPU
+	cpumask_t mask;
+
+	cpumask_clear(&mask);
+	cpumask_set_cpu(1, &mask);
+	sched_setaffinity(0, &mask);
+#endif
+	if (!debugfs_dir || IS_ERR(debugfs_dir)) {
+		KGSL_CORE_ERR("Debugfs directory is bad\n");
+		return;
+	}
+
+	kgsl_cff_dump_enable = 1;
+
+	spin_lock_init(&cffdump_lock);
+
+	dir = debugfs_create_dir("cff", debugfs_dir);
+	if (!dir) {
+		KGSL_CORE_ERR("debugfs_create_dir failed\n");
+		return;
+	}
+
+	chan = create_channel(subbuf_size, n_subbufs);
+}
+
+void kgsl_cffdump_destroy()
+{
+	if (chan)
+		relay_flush(chan);
+	destroy_channel();
+	if (dir)
+		debugfs_remove(dir);
+}
+
+void kgsl_cffdump_open(enum kgsl_deviceid device_id)
+{
+}
+
+void kgsl_cffdump_close(enum kgsl_deviceid device_id)
+{
+	cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0);
+}
+
+void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
+	bool clean_cache)
+{
+	const void *src;
+	uint host_size;
+	uint physaddr;
+
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	total_syncmem += sizebytes;
+
+	if (memdesc == NULL) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			gpuaddr, sizebytes);
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_CORE_ERR("did not find mapping "
+				"for gpuaddr: 0x%08x\n", gpuaddr);
+			return;
+		}
+		memdesc = &entry->memdesc;
+	}
+	BUG_ON(memdesc->gpuaddr == 0);
+	BUG_ON(gpuaddr == 0);
+	physaddr = kgsl_get_realaddr(memdesc) + (gpuaddr - memdesc->gpuaddr);
+
+	src = kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
+	if (src == NULL || host_size < sizebytes) {
+		KGSL_CORE_ERR(("did not find mapping for "
+			"gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n",
+			gpuaddr, memdesc->hostptr, memdesc->physaddr);
+		return;
+	}
+
+	if (clean_cache) {
+		/* Ensure that this memory region is not read from the
+		 * cache but fetched fresh */
+
+		mb();
+
+		kgsl_cache_range_op(memdesc->hostptr, memdesc->size,
+				    memdesc->type, KGSL_CACHE_OP_INV);
+	}
+
+	BUG_ON(physaddr > 0x66000000 && physaddr < 0x66ffffff);
+	while (sizebytes > 3) {
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, physaddr, *(uint *)src,
+			0);
+		physaddr += 4;
+		src += 4;
+		sizebytes -= 4;
+	}
+	if (sizebytes > 0)
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, physaddr, *(uint *)src,
+			0);
+}
+
+void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	BUG_ON(addr > 0x66000000 && addr < 0x66ffffff);
+	while (sizebytes > 3) {
+		/* Use 32bit memory writes as long as there's at least
+		 * 4 bytes left */
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, 0);
+		addr += 4;
+		sizebytes -= 4;
+	}
+	if (sizebytes > 0)
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, 0);
+}
+
+void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
+	uint value)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value, 0);
+}
+
+void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
+	uint value, uint mask)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value, mask);
+}
+
+void kgsl_cffdump_slavewrite(uint addr, uint value)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0);
+}
+
+int kgsl_cffdump_waitirq(void)
+{
+	if (!kgsl_cff_dump_enable)
+		return 0;
+
+	cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0);
+
+	return 1;
+}
+EXPORT_SYMBOL(kgsl_cffdump_waitirq);
+
+#define ADDRESS_STACK_SIZE 256
+#define GET_PM4_TYPE3_OPCODE(x) ((*(x) >> 8) & 0xFF)
+static unsigned int kgsl_cffdump_addr_count;
+
+static bool kgsl_cffdump_handle_type3(struct kgsl_device_private *dev_priv,
+	uint *hostaddr, bool check_only)
+{
+	static uint addr_stack[ADDRESS_STACK_SIZE];
+	static uint size_stack[ADDRESS_STACK_SIZE];
+
+	switch (GET_PM4_TYPE3_OPCODE(hostaddr)) {
+	case PM4_INDIRECT_BUFFER_PFD:
+	case PM4_INDIRECT_BUFFER:
+	{
+		/* traverse indirect buffers */
+		int i;
+		uint ibaddr = hostaddr[1];
+		uint ibsize = hostaddr[2];
+
+		/* is this address already in encountered? */
+		for (i = 0;
+			i < kgsl_cffdump_addr_count && addr_stack[i] != ibaddr;
+			++i)
+			;
+
+		if (kgsl_cffdump_addr_count == i) {
+			addr_stack[kgsl_cffdump_addr_count] = ibaddr;
+			size_stack[kgsl_cffdump_addr_count++] = ibsize;
+
+			if (kgsl_cffdump_addr_count >= ADDRESS_STACK_SIZE) {
+				KGSL_CORE_ERR("stack overflow\n");
+				return false;
+			}
+
+			return kgsl_cffdump_parse_ibs(dev_priv, NULL,
+				ibaddr, ibsize, check_only);
+		} else if (size_stack[i] != ibsize) {
+			KGSL_CORE_ERR("gpuaddr: 0x%08x, "
+				"wc: %u, with size wc: %u already on the "
+				"stack\n", ibaddr, ibsize, size_stack[i]);
+			return false;
+		}
+	}
+	break;
+	}
+
+	return true;
+}
+
+/*
+ * Traverse IBs and dump them to test vector. Detect swap by inspecting
+ * register writes, keeping note of the current state, and dump
+ * framebuffer config to test vector
+ */
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+	bool check_only)
+{
+	static uint level; /* recursion level */
+	bool ret = true;
+	uint host_size;
+	uint *hostaddr, *hoststart;
+	int dwords_left = sizedwords; /* dwords left in the current command
+					 buffer */
+
+	if (level == 0)
+		kgsl_cffdump_addr_count = 0;
+
+	if (memdesc == NULL) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			gpuaddr, sizedwords * sizeof(uint));
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_CORE_ERR("did not find mapping "
+				"for gpuaddr: 0x%08x\n", gpuaddr);
+			return true;
+		}
+		memdesc = &entry->memdesc;
+	}
+
+	hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
+	if (hostaddr == NULL) {
+		KGSL_CORE_ERR("did not find mapping for "
+			"gpuaddr: 0x%08x\n", gpuaddr);
+		return true;
+	}
+
+	hoststart = hostaddr;
+
+	level++;
+
+	if (!memdesc->physaddr) {
+		KGSL_CORE_ERR("no physaddr");
+		return true;
+	} else {
+		mb();
+		kgsl_cache_range_op(memdesc->hostptr, memdesc->size,
+				    memdesc->type, KGSL_CACHE_OP_INV);
+	}
+
+#ifdef DEBUG
+	pr_info("kgsl: cffdump: ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
+		gpuaddr, sizedwords, hostaddr);
+#endif
+
+	while (dwords_left > 0) {
+		int count = 0; /* dword count including packet header */
+		bool cur_ret = true;
+
+		switch (*hostaddr >> 30) {
+		case 0x0: /* type-0 */
+			count = (*hostaddr >> 16)+2;
+			break;
+		case 0x1: /* type-1 */
+			count = 2;
+			break;
+		case 0x3: /* type-3 */
+			count = ((*hostaddr >> 16) & 0x3fff) + 2;
+			cur_ret = kgsl_cffdump_handle_type3(dev_priv,
+				hostaddr, check_only);
+			break;
+		default:
+			pr_warn("kgsl: cffdump: parse-ib: unexpected type: "
+				"type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
+				*hostaddr >> 30, *hostaddr, hostaddr,
+				gpuaddr+4*(sizedwords-dwords_left));
+			cur_ret = false;
+			count = dwords_left;
+			break;
+		}
+
+#ifdef DEBUG
+		if (!cur_ret) {
+			pr_info("kgsl: cffdump: bad sub-type: #:%d/%d, v:0x%08x"
+				" @ 0x%p[gb:0x%08x], level:%d\n",
+				sizedwords-dwords_left, sizedwords, *hostaddr,
+				hostaddr, gpuaddr+4*(sizedwords-dwords_left),
+				level);
+
+			print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
+				DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+				sizedwords*4, 0);
+		}
+#endif
+		ret = ret && cur_ret;
+
+		/* jump to next packet */
+		dwords_left -= count;
+		hostaddr += count;
+		cur_ret = dwords_left >= 0;
+
+#ifdef DEBUG
+		if (!cur_ret) {
+			pr_info("kgsl: cffdump: bad count: c:%d, #:%d/%d, "
+				"v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
+				count, sizedwords-(dwords_left+count),
+				sizedwords, *(hostaddr-count), hostaddr-count,
+				gpuaddr+4*(sizedwords-(dwords_left+count)),
+				level);
+
+			print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
+				DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+				sizedwords*4, 0);
+		}
+#endif
+
+		ret = ret && cur_ret;
+	}
+
+	if (!ret)
+		pr_info("kgsl: cffdump: parsing failed: gpuaddr:0x%08x, "
+			"host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
+
+	if (!check_only) {
+#ifdef DEBUG
+		uint offset = gpuaddr - memdesc->gpuaddr;
+		pr_info("kgsl: cffdump: ib-dump: hostptr:%p, gpuaddr:%08x, "
+			"physaddr:%08x, offset:%d, size:%d", hoststart,
+			gpuaddr, memdesc->physaddr + offset, offset,
+			sizedwords*4);
+#endif
+		kgsl_cffdump_syncmem(dev_priv, memdesc, gpuaddr, sizedwords*4,
+			false);
+	}
+
+	level--;
+
+	return ret;
+}
+
+static int subbuf_start_handler(struct rchan_buf *buf,
+	void *subbuf, void *prev_subbuf, uint prev_padding)
+{
+	pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
+		"=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
+
+	if (relay_buf_full(buf)) {
+		if (!suspended) {
+			suspended = 1;
+			pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
+				smp_processor_id());
+		}
+		dropped++;
+		return 0;
+	} else if (suspended) {
+		suspended = 0;
+		pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
+			smp_processor_id());
+	}
+
+	subbuf_start_reserve(buf, 0);
+	return 1;
+}
+
+static struct dentry *create_buf_file_handler(const char *filename,
+	struct dentry *parent, int mode, struct rchan_buf *buf,
+	int *is_global)
+{
+	return debugfs_create_file(filename, mode, parent, buf,
+				       &relay_file_operations);
+}
+
+/*
+ * file_remove() default callback.  Removes relay file in debugfs.
+ */
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+	pr_info("kgsl: cffdump: %s()\n", __func__);
+	debugfs_remove(dentry);
+	return 0;
+}
+
+/*
+ * relay callbacks
+ */
+static struct rchan_callbacks relay_callbacks = {
+	.subbuf_start = subbuf_start_handler,
+	.create_buf_file = create_buf_file_handler,
+	.remove_buf_file = remove_buf_file_handler,
+};
+
+/**
+ *	create_channel - creates channel /debug/klog/cpuXXX
+ *
+ *	Creates channel along with associated produced/consumed control files
+ *
+ *	Returns channel on success, NULL otherwise
+ */
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
+{
+	struct rchan *chan;
+
+	pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
+		"n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
+
+	chan = relay_open("cpu", dir, subbuf_size,
+			  n_subbufs, &relay_callbacks, NULL);
+	if (!chan) {
+		KGSL_CORE_ERR("relay_open failed\n");
+		return NULL;
+	}
+
+	suspended = 0;
+	dropped = 0;
+
+	return chan;
+}
+
+/**
+ *	destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
+ *
+ *	Destroys channel along with associated produced/consumed control files
+ */
+static void destroy_channel(void)
+{
+	pr_info("kgsl: cffdump: relay: destroy_channel\n");
+	if (chan) {
+		relay_close(chan);
+		chan = NULL;
+	}
+}
+
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
new file mode 100644
index 0000000..aca7a7c
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_CFFDUMP_H
+#define __KGSL_CFFDUMP_H
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+
+#include <linux/types.h>
+
+#include "kgsl_device.h"
+
+void kgsl_cffdump_init(void);
+void kgsl_cffdump_destroy(void);
+void kgsl_cffdump_open(enum kgsl_deviceid device_id);
+void kgsl_cffdump_close(enum kgsl_deviceid device_id);
+void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+	bool clean_cache);
+void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
+void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
+	uint value);
+void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
+	uint value, uint mask);
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+	bool check_only);
+static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
+
+#else
+
+#define kgsl_cffdump_init()					(void)0
+#define kgsl_cffdump_destroy()					(void)0
+#define kgsl_cffdump_open(device_id)				(void)0
+#define kgsl_cffdump_close(device_id)				(void)0
+#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
+	(void) 0
+#define kgsl_cffdump_setmem(addr, value, sizebytes)		(void)0
+#define kgsl_cffdump_regwrite(device_id, addr, value)		(void)0
+#define kgsl_cffdump_regpoll(device_id, addr, value, mask)	(void)0
+#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \
+	sizedwords, check_only)					true
+#define kgsl_cffdump_flags_no_memzero()				true
+
+#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
+
+#endif /* __KGSL_CFFDUMP_H */
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
new file mode 100644
index 0000000..9da3096
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -0,0 +1,82 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+
+/*default log levels is error for everything*/
+#define KGSL_LOG_LEVEL_DEFAULT 3
+#define KGSL_LOG_LEVEL_MAX     7
+
+struct dentry *kgsl_debugfs_dir;
+
+static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
+{
+	*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
+	return 0;
+}
+
+#define KGSL_DEBUGFS_LOG(__log)                         \
+static int __log ## _set(void *data, u64 val)           \
+{                                                       \
+	struct kgsl_device *device = data;              \
+	return kgsl_log_set(&device->__log, data, val); \
+}                                                       \
+static int __log ## _get(void *data, u64 *val)	        \
+{                                                       \
+	struct kgsl_device *device = data;              \
+	*val = device->__log;                           \
+	return 0;                                       \
+}                                                       \
+DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops,                 \
+__log ## _get, __log ## _set, "%llu\n");                \
+
+KGSL_DEBUGFS_LOG(drv_log);
+KGSL_DEBUGFS_LOG(cmd_log);
+KGSL_DEBUGFS_LOG(ctxt_log);
+KGSL_DEBUGFS_LOG(mem_log);
+KGSL_DEBUGFS_LOG(pwr_log);
+
+void kgsl_device_debugfs_init(struct kgsl_device *device)
+{
+	if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
+		device->d_debugfs = debugfs_create_dir(device->name,
+						       kgsl_debugfs_dir);
+
+	if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+		return;
+
+	device->cmd_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+
+	debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
+			    &cmd_log_fops);
+	debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
+			    &ctxt_log_fops);
+	debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
+			    &drv_log_fops);
+	debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
+				&mem_log_fops);
+	debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
+				&pwr_log_fops);
+}
+
+void kgsl_core_debugfs_init(void)
+{
+	kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
+}
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
new file mode 100644
index 0000000..1e36fab
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_DEBUGFS_H
+#define _KGSL_DEBUGFS_H
+
+struct kgsl_device;
+
+#ifdef CONFIG_DEBUG_FS
+void kgsl_core_debugfs_init(void);
+void kgsl_device_debugfs_init(struct kgsl_device *device);
+
+extern struct dentry *kgsl_debugfs_dir;
+static inline struct dentry *kgsl_get_debugfs_dir(void)
+{
+	return kgsl_debugfs_dir;
+}
+
+#else
+static inline void kgsl_core_debugfs_init(void) { }
+static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
+
+static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
+
+#endif
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
new file mode 100644
index 0000000..692a9ec
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -0,0 +1,289 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_DEVICE_H
+#define __KGSL_DEVICE_H
+
+#include <linux/idr.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos_params.h>
+#include <linux/earlysuspend.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_pwrctrl.h"
+#include "kgsl_log.h"
+#include "kgsl_pwrscale.h"
+
+#define KGSL_TIMEOUT_NONE       0
+#define KGSL_TIMEOUT_DEFAULT    0xFFFFFFFF
+
+#define FIRST_TIMEOUT (HZ / 2)
+
+
+/* KGSL device state is initialized to INIT when platform_probe		*
+ * sucessfully initialized the device.  Once a device has been opened	*
+ * (started) it becomes active.  NAP implies that only low latency	*
+ * resources (for now clocks on some platforms) are off.  SLEEP implies	*
+ * that the KGSL module believes a device is idle (has been inactive	*
+ * past its timer) and all system resources are released.  SUSPEND is	*
+ * requested by the kernel and will be enforced upon all open devices.	*/
+
+#define KGSL_STATE_NONE		0x00000000
+#define KGSL_STATE_INIT		0x00000001
+#define KGSL_STATE_ACTIVE	0x00000002
+#define KGSL_STATE_NAP		0x00000004
+#define KGSL_STATE_SLEEP	0x00000008
+#define KGSL_STATE_SUSPEND	0x00000010
+#define KGSL_STATE_HUNG		0x00000020
+#define KGSL_STATE_DUMP_AND_RECOVER	0x00000040
+
+#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK  0x1000000
+
+#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+
+struct kgsl_device;
+struct platform_device;
+struct kgsl_device_private;
+struct kgsl_context;
+struct kgsl_power_stats;
+
+struct kgsl_functable {
+	/* Mandatory functions - these functions must be implemented
+	   by the client device.  The driver will not check for a NULL
+	   pointer before calling the hook.
+	 */
+	void (*regread) (struct kgsl_device *device,
+		unsigned int offsetwords, unsigned int *value);
+	void (*regwrite) (struct kgsl_device *device,
+		unsigned int offsetwords, unsigned int value);
+	int (*idle) (struct kgsl_device *device, unsigned int timeout);
+	unsigned int (*isidle) (struct kgsl_device *device);
+	int (*suspend_context) (struct kgsl_device *device);
+	int (*start) (struct kgsl_device *device, unsigned int init_ram);
+	int (*stop) (struct kgsl_device *device);
+	int (*getproperty) (struct kgsl_device *device,
+		enum kgsl_property_type type, void *value,
+		unsigned int sizebytes);
+	int (*waittimestamp) (struct kgsl_device *device,
+		unsigned int timestamp, unsigned int msecs);
+	unsigned int (*readtimestamp) (struct kgsl_device *device,
+		enum kgsl_timestamp_type type);
+	int (*issueibcmds) (struct kgsl_device_private *dev_priv,
+		struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
+		unsigned int sizedwords, uint32_t *timestamp,
+		unsigned int flags);
+	int (*setup_pt)(struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable);
+	int (*cleanup_pt)(struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable);
+	void (*power_stats)(struct kgsl_device *device,
+		struct kgsl_power_stats *stats);
+	void (*irqctrl)(struct kgsl_device *device, int state);
+	/* Optional functions - these functions are not mandatory.  The
+	   driver will check that the function pointer is not NULL before
+	   calling the hook */
+	void (*setstate) (struct kgsl_device *device, uint32_t flags);
+	int (*drawctxt_create) (struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable, struct kgsl_context *context,
+		uint32_t flags);
+	void (*drawctxt_destroy) (struct kgsl_device *device,
+		struct kgsl_context *context);
+	long (*ioctl) (struct kgsl_device_private *dev_priv,
+		unsigned int cmd, void *data);
+};
+
+struct kgsl_memregion {
+	unsigned char *mmio_virt_base;
+	unsigned int mmio_phys_base;
+	uint32_t gpu_base;
+	unsigned int sizebytes;
+};
+
+struct kgsl_device {
+	struct device *dev;
+	const char *name;
+	unsigned int ver_major;
+	unsigned int ver_minor;
+	uint32_t flags;
+	enum kgsl_deviceid id;
+	struct kgsl_memregion regspace;
+	struct kgsl_memdesc memstore;
+	const char *iomemname;
+
+	struct kgsl_mmu mmu;
+	struct completion hwaccess_gate;
+	const struct kgsl_functable *ftbl;
+	struct work_struct idle_check_ws;
+	struct timer_list idle_timer;
+	struct kgsl_pwrctrl pwrctrl;
+	int open_count;
+
+	struct atomic_notifier_head ts_notifier_list;
+	struct mutex mutex;
+	uint32_t state;
+	uint32_t requested_state;
+
+	struct list_head memqueue;
+	unsigned int active_cnt;
+	struct completion suspend_gate;
+
+	wait_queue_head_t wait_queue;
+	struct workqueue_struct *work_queue;
+	struct device *parentdev;
+	struct completion recovery_gate;
+	struct dentry *d_debugfs;
+	struct idr context_idr;
+	struct early_suspend display_off;
+
+	/* Logging levels */
+	int cmd_log;
+	int ctxt_log;
+	int drv_log;
+	int mem_log;
+	int pwr_log;
+	struct wake_lock idle_wakelock;
+	struct kgsl_pwrscale pwrscale;
+	struct kobject pwrscale_kobj;
+	struct pm_qos_request_list pm_qos_req_dma;
+};
+
+struct kgsl_context {
+	uint32_t id;
+
+	/* Pointer to the owning device instance */
+	struct kgsl_device_private *dev_priv;
+
+	/* Pointer to the device specific context information */
+	void *devctxt;
+};
+
+struct kgsl_process_private {
+	unsigned int refcnt;
+	pid_t pid;
+	spinlock_t mem_lock;
+	struct list_head mem_list;
+	struct kgsl_pagetable *pagetable;
+	struct list_head list;
+	struct kobject *kobj;
+
+	struct {
+		unsigned int user;
+		unsigned int user_max;
+		unsigned int mapped;
+		unsigned int mapped_max;
+		unsigned int flushes;
+	} stats;
+};
+
+struct kgsl_device_private {
+	struct kgsl_device *device;
+	struct kgsl_process_private *process_priv;
+};
+
+struct kgsl_power_stats {
+	s64 total_time;
+	s64 busy_time;
+};
+
+struct kgsl_device *kgsl_get_device(int dev_idx);
+
+static inline void kgsl_regread(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value)
+{
+	device->ftbl->regread(device, offsetwords, value);
+}
+
+static inline void kgsl_regwrite(struct kgsl_device *device,
+				 unsigned int offsetwords,
+				 unsigned int value)
+{
+	device->ftbl->regwrite(device, offsetwords, value);
+}
+
+static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	return device->ftbl->idle(device, timeout);
+}
+
+static inline int kgsl_create_device_sysfs_files(struct device *root,
+	const struct device_attribute **list)
+{
+	int ret = 0, i;
+	for (i = 0; list[i] != NULL; i++)
+		ret |= device_create_file(root, list[i]);
+	return ret;
+}
+
+static inline void kgsl_remove_device_sysfs_files(struct device *root,
+	const struct device_attribute **list)
+{
+	int i;
+	for (i = 0; list[i] != NULL; i++)
+		device_remove_file(root, list[i]);
+}
+
+static inline struct kgsl_mmu *
+kgsl_get_mmu(struct kgsl_device *device)
+{
+	return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
+}
+
+static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
+			return kgsl_driver.devp[i];
+	}
+
+	return NULL;
+}
+
+static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
+{
+	device->work_queue = create_workqueue(device->name);
+	if (!device->work_queue) {
+		KGSL_DRV_ERR(device, "create_workqueue(%s) failed\n",
+			device->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline struct kgsl_context *
+kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
+{
+	struct kgsl_context *ctxt =
+		idr_find(&dev_priv->device->context_idr, id);
+
+	/* Make sure that the context belongs to the current instance so
+	   that other processes can't guess context IDs and mess things up */
+
+	return  (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
+}
+
+int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+			      struct notifier_block *nb);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+				struct notifier_block *nb);
+
+int kgsl_device_platform_probe(struct kgsl_device *device,
+		irqreturn_t (*dev_isr) (int, void*));
+void kgsl_device_platform_remove(struct kgsl_device *device);
+
+#endif  /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
new file mode 100644
index 0000000..202783b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -0,0 +1,1690 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Implements an interface between KGSL and the DRM subsystem.  For now this
+ * is pretty simple, but it will take on more of the workload as time goes
+ * on
+ */
+#include "drmP.h"
+#include "drm.h"
+#include <linux/android_pmem.h>
+#include <linux/notifier.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_drm.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+
+#define DRIVER_AUTHOR           "Qualcomm"
+#define DRIVER_NAME             "kgsl"
+#define DRIVER_DESC             "KGSL DRM"
+#define DRIVER_DATE             "20100127"
+
+#define DRIVER_MAJOR            2
+#define DRIVER_MINOR            1
+#define DRIVER_PATCHLEVEL       1
+
+#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
+
+#define ENTRY_EMPTY -1
+#define ENTRY_NEEDS_CLEANUP -2
+
+#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
+#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
+
+/* Returns true if the memory type is in PMEM */
+
+#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
+#define TYPE_IS_PMEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
+   ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
+#else
+#define TYPE_IS_PMEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+   ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
+#endif
+
+/* Returns true if the memory type is regular */
+
+#define TYPE_IS_MEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
+   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+   ((_t) & DRM_KGSL_GEM_TYPE_MEM))
+
+#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
+
+/* Returns true if KMEM region is uncached */
+
+#define IS_MEM_UNCACHED(_t) \
+  ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+   (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
+   (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
+
+struct drm_kgsl_gem_object_wait_list_entry {
+	struct list_head list;
+	int pid;
+	int in_use;
+	wait_queue_head_t process_wait_q;
+};
+
+struct drm_kgsl_gem_object_fence {
+	int32_t fence_id;
+	unsigned int num_buffers;
+	int ts_valid;
+	unsigned int timestamp;
+	int ts_device;
+	int lockpid;
+	struct list_head buffers_in_fence;
+};
+
+struct drm_kgsl_gem_object_fence_list_entry {
+	struct list_head list;
+	int in_use;
+	struct drm_gem_object *gem_obj;
+};
+
+static int32_t fence_id = 0x1;
+
+static struct drm_kgsl_gem_object_fence
+			  gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+struct drm_kgsl_gem_object {
+	struct drm_gem_object *obj;
+	uint32_t type;
+	struct kgsl_memdesc memdesc;
+	struct kgsl_pagetable *pagetable;
+	uint64_t mmap_offset;
+	int bufcount;
+	int flags;
+	struct list_head list;
+	int active;
+
+	struct {
+		uint32_t offset;
+		uint32_t gpuaddr;
+	} bufs[DRM_KGSL_GEM_MAX_BUFFERS];
+
+	int bound;
+	int lockpid;
+	/* Put these here to avoid allocing all the time */
+	struct drm_kgsl_gem_object_wait_list_entry
+	wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
+	/* Each object can only appear in a single fence */
+	struct drm_kgsl_gem_object_fence_list_entry
+	fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+	struct list_head wait_list;
+};
+
+/* This is a global list of all the memory currently mapped in the MMU */
+static struct list_head kgsl_mem_list;
+
+static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
+{
+	int cacheop = 0;
+
+	switch (op) {
+	case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
+		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
+			    DRM_KGSL_GEM_CACHE_WBACKWA))
+			cacheop = KGSL_CACHE_OP_CLEAN;
+
+		break;
+
+	case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
+		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
+			    DRM_KGSL_GEM_CACHE_WBACKWA |
+			    DRM_KGSL_GEM_CACHE_WTHROUGH))
+			cacheop = KGSL_CACHE_OP_INV;
+	}
+
+	kgsl_cache_range_op(memdesc, cacheop);
+}
+
+/* Flush all the memory mapped in the MMU */
+
+void kgsl_gpu_mem_flush(int op)
+{
+	struct drm_kgsl_gem_object *entry;
+
+	list_for_each_entry(entry, &kgsl_mem_list, list) {
+		kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
+	}
+
+	/* Takes care of WT/WC case.
+	 * More useful when we go barrierless
+	 */
+	dmb();
+}
+
+/* TODO:
+ * Add vsync wait */
+
+static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	return 0;
+}
+
+static int kgsl_drm_unload(struct drm_device *dev)
+{
+	return 0;
+}
+
+struct kgsl_drm_device_priv {
+	struct kgsl_device *device[KGSL_DEVICE_MAX];
+	struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
+};
+
+static int kgsl_ts_notifier_cb(struct notifier_block *blk,
+			       unsigned long code, void *_param);
+
+static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
+
+static int kgsl_drm_firstopen(struct drm_device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_get_device(i);
+
+		if (device == NULL)
+			continue;
+
+		kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
+		kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
+	}
+
+	return 0;
+}
+
+void kgsl_drm_lastclose(struct drm_device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_get_device(i);
+		if (device == NULL)
+			continue;
+
+		kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
+	}
+}
+
+void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+}
+
+static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+	return 0;
+}
+
+static int kgsl_drm_resume(struct drm_device *dev)
+{
+	return 0;
+}
+
+static void
+kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	struct drm_map_list *list;
+
+	list = &obj->map_list;
+	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	if (list->file_offset_node) {
+		drm_mm_put_block(list->file_offset_node);
+		list->file_offset_node = NULL;
+	}
+
+	kfree(list->map);
+	list->map = NULL;
+
+	priv->mmap_offset = 0;
+}
+
+static int
+kgsl_gem_memory_allocated(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	return priv->memdesc.size ? 1 : 0;
+}
+
+static int
+kgsl_gem_alloc_memory(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+
+	/* Return if the memory is already allocated */
+
+	if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+		return 0;
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		int type;
+
+		if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
+		    priv->type & DRM_KGSL_GEM_PMEM_EBI)
+			type = PMEM_MEMTYPE_EBI1;
+		else
+			type = PMEM_MEMTYPE_SMI;
+
+		priv->memdesc.physaddr =
+			pmem_kalloc(obj->size * priv->bufcount,
+				    type | PMEM_ALIGNMENT_4K);
+
+		if (IS_ERR((void *) priv->memdesc.physaddr)) {
+			DRM_ERROR("Unable to allocate PMEM memory\n");
+			return -ENOMEM;
+		}
+
+		priv->memdesc.size = obj->size * priv->bufcount;
+		priv->memdesc.ops = &kgsl_contiguous_ops;
+
+	} else if (TYPE_IS_MEM(priv->type)) {
+		priv->memdesc.hostptr =
+			vmalloc_user(obj->size * priv->bufcount);
+
+		if (priv->memdesc.hostptr == NULL) {
+			DRM_ERROR("Unable to allocate vmalloc memory\n");
+			return -ENOMEM;
+		}
+
+		priv->memdesc.size = obj->size * priv->bufcount;
+		priv->memdesc.ops = &kgsl_vmalloc_ops;
+	} else
+		return -EINVAL;
+
+	for (index = 0; index < priv->bufcount; index++)
+		priv->bufs[index].offset = index * obj->size;
+
+
+	return 0;
+}
+
+#ifdef CONFIG_MSM_KGSL_MMU
+static void
+kgsl_gem_unmap(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+
+	if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
+		return;
+
+	kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
+
+	kgsl_mmu_putpagetable(priv->pagetable);
+	priv->pagetable = NULL;
+
+	if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
+	    (priv->type & DRM_KGSL_GEM_CACHE_MASK))
+		list_del(&priv->list);
+
+	priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
+}
+#else
+static void
+kgsl_gem_unmap(struct drm_gem_object *obj)
+{
+}
+#endif
+
+static void
+kgsl_gem_free_memory(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+
+	if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+		return;
+
+	kgsl_gem_mem_flush(&priv->memdesc,  priv->type,
+			   DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
+
+	kgsl_gem_unmap(obj);
+
+	if (TYPE_IS_PMEM(priv->type))
+		pmem_kfree(priv->memdesc.physaddr);
+
+	kgsl_sharedmem_free(&priv->memdesc);
+}
+
+int
+kgsl_gem_init_object(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv;
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL) {
+		DRM_ERROR("Unable to create GEM object\n");
+		return -ENOMEM;
+	}
+
+	obj->driver_private = priv;
+	priv->obj = obj;
+
+	return 0;
+}
+
+void
+kgsl_gem_free_object(struct drm_gem_object *obj)
+{
+	kgsl_gem_free_memory(obj);
+	kgsl_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+	kfree(obj->driver_private);
+}
+
+static int
+kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	struct drm_map_list *list;
+	int msize;
+
+	list = &obj->map_list;
+	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+	if (list->map == NULL) {
+		DRM_ERROR("Unable to allocate drm_map_list\n");
+		return -ENOMEM;
+	}
+
+	msize = obj->size * priv->bufcount;
+
+	list->map->type = _DRM_GEM;
+	list->map->size = msize;
+	list->map->handle = obj;
+
+	/* Allocate a mmap offset */
+	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+						    msize / PAGE_SIZE,
+						    0, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+						  msize / PAGE_SIZE, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("Unable to create the file_offset_node\n");
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+		DRM_ERROR("Failed to add to map hash\n");
+		drm_mm_put_block(list->file_offset_node);
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
+	return 0;
+}
+
+int
+kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
+			unsigned long *len)
+{
+	struct file *filp;
+	struct drm_device *dev;
+	struct drm_file *file_priv;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	filp = fget(drm_fd);
+	if (unlikely(filp == NULL)) {
+		DRM_ERROR("Unable to ghet the DRM file descriptor\n");
+		return -EINVAL;
+	}
+	file_priv = filp->private_data;
+	if (unlikely(file_priv == NULL)) {
+		DRM_ERROR("Unable to get the file private data\n");
+		fput(filp);
+		return -EINVAL;
+	}
+	dev = file_priv->minor->dev;
+	if (unlikely(dev == NULL)) {
+		DRM_ERROR("Unable to get the minor device\n");
+		fput(filp);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (unlikely(obj == NULL)) {
+		DRM_ERROR("Invalid GEM handle %x\n", handle);
+		fput(filp);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	/* We can only use the MDP for PMEM regions */
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		*start = priv->memdesc.physaddr +
+			priv->bufs[priv->active].offset;
+
+		*len = priv->memdesc.size;
+
+		kgsl_gem_mem_flush(&priv->memdesc,
+				   priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
+	} else {
+		*start = 0;
+		*len = 0;
+		ret = -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	fput(filp);
+	return ret;
+}
+
+static int
+kgsl_gem_init_obj(struct drm_device *dev,
+		  struct drm_file *file_priv,
+		  struct drm_gem_object *obj,
+		  int *handle)
+{
+	struct drm_kgsl_gem_object *priv;
+	int ret, i;
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	memset(&priv->memdesc, 0, sizeof(priv->memdesc));
+	priv->bufcount = 1;
+	priv->active = 0;
+	priv->bound = 0;
+
+	/* To preserve backwards compatability, the default memory source
+	   is EBI */
+
+	priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
+
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+
+	drm_gem_object_handle_unreference(obj);
+	INIT_LIST_HEAD(&priv->wait_list);
+
+	for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+		INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
+		priv->wait_entries[i].pid = 0;
+		init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
+	}
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
+		priv->fence_entries[i].in_use = 0;
+		priv->fence_entries[i].gem_obj = obj;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_create *create = data;
+	struct drm_gem_object *obj;
+	int ret, handle;
+
+	/* Page align the size so we can allocate multiple buffers */
+	create->size = ALIGN(create->size, 4096);
+
+	obj = drm_gem_object_alloc(dev, create->size);
+
+	if (obj == NULL) {
+		DRM_ERROR("Unable to allocate the GEM object\n");
+		return -ENOMEM;
+	}
+
+	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+	if (ret)
+		return ret;
+
+	create->handle = handle;
+	return 0;
+}
+
+int
+kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_create_fd *args = data;
+	struct file *file;
+	dev_t rdev;
+	struct fb_info *info;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret, put_needed, handle;
+
+	file = fget_light(args->fd, &put_needed);
+
+	if (file == NULL) {
+		DRM_ERROR("Unable to get the file object\n");
+		return -EBADF;
+	}
+
+	rdev = file->f_dentry->d_inode->i_rdev;
+
+	/* Only framebuffer objects are supported ATM */
+
+	if (MAJOR(rdev) != FB_MAJOR) {
+		DRM_ERROR("File descriptor is not a framebuffer\n");
+		ret = -EBADF;
+		goto error_fput;
+	}
+
+	info = registered_fb[MINOR(rdev)];
+
+	if (info == NULL) {
+		DRM_ERROR("Framebuffer minor %d is not registered\n",
+			  MINOR(rdev));
+		ret = -EBADF;
+		goto error_fput;
+	}
+
+	obj = drm_gem_object_alloc(dev, info->fix.smem_len);
+
+	if (obj == NULL) {
+		DRM_ERROR("Unable to allocate GEM object\n");
+		ret = -ENOMEM;
+		goto error_fput;
+	}
+
+	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+
+	if (ret)
+		goto error_fput;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+	priv->memdesc.physaddr = info->fix.smem_start;
+	priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
+
+	mutex_unlock(&dev->struct_mutex);
+	args->handle = handle;
+
+error_fput:
+	fput_light(file, put_needed);
+
+	return ret;
+}
+
+int
+kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_memtype *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (TYPE_IS_FD(priv->type))
+		ret = -EINVAL;
+	else {
+		if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
+			priv->type = args->type;
+		else
+			ret = -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_memtype *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	args->type = priv->type;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bind_gpu *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (--priv->bound == 0)
+		kgsl_gem_unmap(obj);
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+#ifdef CONFIG_MSM_KGSL_MMU
+static int
+kgsl_gem_map(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+	int ret = -EINVAL;
+
+	if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
+		return 0;
+
+	/* Get the global page table */
+
+	if (priv->pagetable == NULL) {
+		priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+		if (priv->pagetable == NULL) {
+			DRM_ERROR("Unable to get the GPU MMU pagetable\n");
+			return -EINVAL;
+		}
+	}
+
+	priv->memdesc.pagetable = priv->pagetable;
+
+	ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
+			   GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (!ret) {
+		for (index = 0; index < priv->bufcount; index++) {
+			priv->bufs[index].gpuaddr =
+				priv->memdesc.gpuaddr +
+				priv->bufs[index].offset;
+		}
+	}
+
+	/* Add cached memory to the list to be cached */
+
+	if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
+	    priv->type & DRM_KGSL_GEM_CACHE_MASK)
+		list_add(&priv->list, &kgsl_mem_list);
+
+	priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
+
+	return ret;
+}
+#else
+static int
+kgsl_gem_map(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		for (index = 0; index < priv->bufcount; index++)
+			priv->bufs[index].gpuaddr =
+			priv->memdesc.physaddr + priv->bufs[index].offset;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+int
+kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bind_gpu *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (priv->bound++ == 0) {
+
+		if (!kgsl_gem_memory_allocated(obj)) {
+			DRM_ERROR("Memory not allocated for this object\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = kgsl_gem_map(obj);
+
+		/* This is legacy behavior - use GET_BUFFERINFO instead */
+		args->gpuptr = priv->bufs[0].gpuaddr;
+	}
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/* Allocate the memory and prepare it for CPU mapping */
+
+int
+kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_alloc *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	ret = kgsl_gem_alloc_memory(obj);
+
+	if (ret) {
+		DRM_ERROR("Unable to allocate object memory\n");
+	} else if (!priv->mmap_offset) {
+		ret = kgsl_gem_create_mmap_offset(obj);
+		if (ret)
+			DRM_ERROR("Unable to create a mmap offset\n");
+	}
+
+	args->offset = priv->mmap_offset;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned long addr;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	down_write(&current->mm->mmap_sem);
+
+	addr = do_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+
+	up_write(&current->mm->mmap_sem);
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (IS_ERR((void *) addr))
+		return addr;
+
+	args->hostptr = (uint32_t) addr;
+	return 0;
+}
+
+/* This function is deprecated */
+
+int
+kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_prep *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	ret = kgsl_gem_alloc_memory(obj);
+	if (ret) {
+		DRM_ERROR("Unable to allocate object memory\n");
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	if (priv->mmap_offset == 0) {
+		ret = kgsl_gem_create_mmap_offset(obj);
+		if (ret) {
+			drm_gem_object_unreference(obj);
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
+	}
+
+	args->offset = priv->mmap_offset;
+	args->phys = priv->memdesc.physaddr;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bufinfo *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+	int index;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (!kgsl_gem_memory_allocated(obj)) {
+		DRM_ERROR("Memory not allocated for this object\n");
+		goto out;
+	}
+
+	for (index = 0; index < priv->bufcount; index++) {
+		args->offset[index] = priv->bufs[index].offset;
+		args->gpuaddr[index] = priv->bufs[index].gpuaddr;
+	}
+
+	args->count = priv->bufcount;
+	args->active = priv->active;
+
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bufcount *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+
+	if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	/* It is too much math to worry about what happens if we are already
+	   allocated, so just bail if we are */
+
+	if (kgsl_gem_memory_allocated(obj)) {
+		DRM_ERROR("Memory already allocated - cannot change"
+			  "number of buffers\n");
+		goto out;
+	}
+
+	priv->bufcount = args->bufcount;
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_active *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (args->active < 0 || args->active >= priv->bufcount) {
+		DRM_ERROR("Invalid active buffer %d\n", args->active);
+		goto out;
+	}
+
+	priv->active = args->active;
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	struct drm_kgsl_gem_object *priv;
+	unsigned long offset, pg;
+	struct page *page;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+
+	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+	pg = (unsigned long) priv->memdesc.hostptr + offset;
+
+	page = vmalloc_to_page((void *) pg);
+	if (!page) {
+		mutex_unlock(&dev->struct_mutex);
+		return VM_FAULT_SIGBUS;
+	}
+
+	get_page(page);
+	vmf->page = page;
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	struct drm_kgsl_gem_object *priv;
+	unsigned long offset, pfn;
+	int ret = 0;
+
+	offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+
+	pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
+	ret = vm_insert_pfn(vma,
+			    (unsigned long) vmf->virtual_address, pfn);
+	mutex_unlock(&dev->struct_mutex);
+
+	switch (ret) {
+	case -ENOMEM:
+	case -EAGAIN:
+		return VM_FAULT_OOM;
+	case -EFAULT:
+		return VM_FAULT_SIGBUS;
+	default:
+		return VM_FAULT_NOPAGE;
+	}
+}
+
+static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
+	.fault = kgsl_gem_kmem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
+	.fault = kgsl_gem_phys_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+/* This is a clone of the standard drm_gem_mmap function modified to allow
+   us to properly map KMEM regions as well as the PMEM regions */
+
+int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_local_map *map = NULL;
+	struct drm_gem_object *obj;
+	struct drm_hash_item *hash;
+	struct drm_kgsl_gem_object *gpriv;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+		mutex_unlock(&dev->struct_mutex);
+		return drm_mmap(filp, vma);
+	}
+
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	if (!map ||
+	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+		ret =  -EPERM;
+		goto out_unlock;
+	}
+
+	/* Check for valid size. */
+	if (map->size < vma->vm_end - vma->vm_start) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	obj = map->handle;
+
+	gpriv = obj->driver_private;
+
+	/* VM_PFNMAP is only for memory that doesn't use struct page
+	 * in other words, not "normal" memory.  If you try to use it
+	 * with "normal" memory then the mappings don't get flushed. */
+
+	if (TYPE_IS_MEM(gpriv->type)) {
+		vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+		vma->vm_ops = &kgsl_gem_kmem_vm_ops;
+	} else {
+		vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
+			VM_DONTEXPAND;
+		vma->vm_ops = &kgsl_gem_phys_vm_ops;
+	}
+
+	vma->vm_private_data = map->handle;
+
+
+	/* Take care of requested caching policy */
+	if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
+	    gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
+		if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
+			vma->vm_page_prot =
+			pgprot_writebackwacache(vma->vm_page_prot);
+		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
+				vma->vm_page_prot =
+				pgprot_writebackcache(vma->vm_page_prot);
+		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
+				vma->vm_page_prot =
+				pgprot_writethroughcache(vma->vm_page_prot);
+		else
+			vma->vm_page_prot =
+			pgprot_writecombine(vma->vm_page_prot);
+	} else {
+		if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
+			vma->vm_page_prot =
+			pgprot_noncached(vma->vm_page_prot);
+		else
+			/* default pmem is WC */
+			vma->vm_page_prot =
+			pgprot_writecombine(vma->vm_page_prot);
+	}
+
+	/* flush out existing KMEM cached mappings if new ones are
+	 * of uncached type */
+	if (IS_MEM_UNCACHED(gpriv->type))
+		kgsl_cache_range_op(&gpriv->memdesc,
+				    KGSL_CACHE_OP_FLUSH);
+
+	/* Add the other memory types here */
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	drm_gem_object_reference(obj);
+
+	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+	drm_vm_open_locked(vma);
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+void
+cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
+{
+	int j;
+	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object *unlock_obj;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+
+	fence->ts_valid = 0;
+	fence->timestamp = -1;
+	fence->ts_device = -1;
+
+	/* Walk the list of buffers in this fence and clean up the */
+	/* references. Note that this can cause memory allocations */
+	/* to be freed */
+	for (j = fence->num_buffers; j > 0; j--) {
+		this_fence_entry =
+				(struct drm_kgsl_gem_object_fence_list_entry *)
+				fence->buffers_in_fence.prev;
+
+		this_fence_entry->in_use = 0;
+		obj = this_fence_entry->gem_obj;
+		unlock_obj = obj->driver_private;
+
+		/* Delete it from the list */
+
+		list_del(&this_fence_entry->list);
+
+		/* we are unlocking - see if there are other pids waiting */
+		if (check_waiting) {
+			if (!list_empty(&unlock_obj->wait_list)) {
+				lock_next =
+				(struct drm_kgsl_gem_object_wait_list_entry *)
+					unlock_obj->wait_list.prev;
+
+				list_del((struct list_head *)&lock_next->list);
+
+				unlock_obj->lockpid = 0;
+				wake_up_interruptible(
+						&lock_next->process_wait_q);
+				lock_next->pid = 0;
+
+			} else {
+				/* List is empty so set pid to 0 */
+				unlock_obj->lockpid = 0;
+			}
+		}
+
+		drm_gem_object_unreference(obj);
+	}
+	/* here all the buffers in the fence are released */
+	/* clear the fence entry */
+	fence->fence_id = ENTRY_EMPTY;
+}
+
+int
+find_empty_fence(void)
+{
+	int i;
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
+			gem_buf_fence[i].fence_id = fence_id++;
+			gem_buf_fence[i].ts_valid = 0;
+			INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
+			if (fence_id == 0xFFFFFFF0)
+				fence_id = 1;
+			return i;
+		} else {
+
+			/* Look for entries to be cleaned up */
+			if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
+				cleanup_fence(&gem_buf_fence[i], 0);
+		}
+	}
+
+	return ENTRY_EMPTY;
+}
+
+int
+find_fence(int index)
+{
+	int i;
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		if (gem_buf_fence[i].fence_id == index)
+			return i;
+	}
+
+	return ENTRY_EMPTY;
+}
+
+void
+wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
+{
+    struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+	struct drm_kgsl_gem_object *unlock_obj;
+	struct drm_gem_object *obj;
+
+	/* TS has expired when we get here */
+	fence->ts_valid = 0;
+	fence->timestamp = -1;
+	fence->ts_device = -1;
+
+	list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
+		obj = this_fence_entry->gem_obj;
+		unlock_obj = obj->driver_private;
+
+		if (!list_empty(&unlock_obj->wait_list)) {
+			lock_next =
+				(struct drm_kgsl_gem_object_wait_list_entry *)
+					unlock_obj->wait_list.prev;
+
+			/* Unblock the pid */
+			lock_next->pid = 0;
+
+			/* Delete it from the list */
+			list_del((struct list_head *)&lock_next->list);
+
+			unlock_obj->lockpid = 0;
+			wake_up_interruptible(&lock_next->process_wait_q);
+
+		} else {
+			/* List is empty so set pid to 0 */
+			unlock_obj->lockpid = 0;
+		}
+	}
+	fence->fence_id = ENTRY_NEEDS_CLEANUP;  /* Mark it as needing cleanup */
+}
+
+static int kgsl_ts_notifier_cb(struct notifier_block *blk,
+			       unsigned long code, void *_param)
+{
+	struct drm_kgsl_gem_object_fence *fence;
+	struct kgsl_device *device = kgsl_get_device(code);
+	int i;
+
+	/* loop through the fences to see what things can be processed */
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		fence = &gem_buf_fence[i];
+		if (!fence->ts_valid || fence->ts_device != code)
+			continue;
+
+		if (kgsl_check_timestamp(device, fence->timestamp))
+			wakeup_fence_entries(fence);
+	}
+
+	return 0;
+}
+
+int
+kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
+						   struct drm_file *file_priv)
+{
+	/* The purpose of this function is to lock a given set of handles. */
+	/* The driver will maintain a list of locked handles. */
+	/* If a request comes in for a handle that's locked the thread will */
+	/* block until it's no longer in use. */
+
+	struct drm_kgsl_gem_lock_handles *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object_fence *fence;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_item;
+	int i, j;
+	int result = 0;
+	uint32_t *lock_list;
+	uint32_t *work_list = NULL;
+	int32_t fence_index;
+
+	/* copy in the data from user space */
+	lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
+	if (!lock_list) {
+		DRM_ERROR("Unable allocate memory for lock list\n");
+		result = -ENOMEM;
+		goto error;
+	}
+
+	if (copy_from_user(lock_list, args->handle_list,
+			   sizeof(uint32_t) * args->num_handles)) {
+		DRM_ERROR("Unable to copy the lock list from the user\n");
+		result = -EFAULT;
+		goto free_handle_list;
+	}
+
+
+	work_list = lock_list;
+	mutex_lock(&dev->struct_mutex);
+
+	/* build the fence for this group of handles */
+	fence_index = find_empty_fence();
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Unable to find a empty fence\n");
+		args->lock_id = 0xDEADBEEF;
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	fence = &gem_buf_fence[fence_index];
+	gem_buf_fence[fence_index].num_buffers = args->num_handles;
+	args->lock_id = gem_buf_fence[fence_index].fence_id;
+
+	for (j = args->num_handles; j > 0; j--, lock_list++) {
+		obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
+
+		if (obj == NULL) {
+			DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
+			result = -EBADF;
+			goto out_unlock;
+		}
+
+		priv = obj->driver_private;
+		this_fence_entry = NULL;
+
+		/* get a fence entry to hook into the fence */
+		for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+			if (!priv->fence_entries[i].in_use) {
+				this_fence_entry = &priv->fence_entries[i];
+				this_fence_entry->in_use = 1;
+				break;
+			}
+		}
+
+		if (this_fence_entry == NULL) {
+			fence->num_buffers = 0;
+			fence->fence_id = ENTRY_EMPTY;
+			args->lock_id = 0xDEADBEAD;
+			result = -EFAULT;
+			drm_gem_object_unreference(obj);
+			goto out_unlock;
+		}
+
+		/* We're trying to lock - add to a fence */
+		list_add((struct list_head *)this_fence_entry,
+				 &gem_buf_fence[fence_index].buffers_in_fence);
+		if (priv->lockpid) {
+
+			if (priv->lockpid == args->pid) {
+				/* now that things are running async this  */
+				/* happens when an op isn't done */
+				/* so it's already locked by the calling pid */
+					continue;
+			}
+
+
+			/* if a pid already had it locked */
+			/* create and add to wait list */
+			for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+				if (priv->wait_entries[i].in_use == 0) {
+					/* this one is empty */
+					lock_item = &priv->wait_entries[i];
+				    lock_item->in_use = 1;
+					lock_item->pid = args->pid;
+					INIT_LIST_HEAD((struct list_head *)
+						&priv->wait_entries[i]);
+					break;
+				}
+			}
+
+			if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
+
+				result =  -EFAULT;
+				drm_gem_object_unreference(obj);
+				goto out_unlock;
+			}
+
+			list_add_tail((struct list_head *)&lock_item->list,
+							&priv->wait_list);
+			mutex_unlock(&dev->struct_mutex);
+			/* here we need to block */
+			wait_event_interruptible_timeout(
+					priv->wait_entries[i].process_wait_q,
+					(priv->lockpid == 0),
+					msecs_to_jiffies(64));
+			mutex_lock(&dev->struct_mutex);
+			lock_item->in_use = 0;
+		}
+
+		/* Getting here means no one currently holds the lock */
+		priv->lockpid = args->pid;
+
+		args->lock_id = gem_buf_fence[fence_index].fence_id;
+	}
+	fence->lockpid = args->pid;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+free_handle_list:
+	kfree(work_list);
+
+error:
+	return result;
+}
+
+int
+kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_unlock_handles *args = data;
+	int result = 0;
+	int32_t fence_index;
+
+	mutex_lock(&dev->struct_mutex);
+	fence_index = find_fence(args->lock_id);
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	cleanup_fence(&gem_buf_fence[fence_index], 1);
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return result;
+}
+
+
+int
+kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_unlock_on_ts *args = data;
+	int result = 0;
+	int ts_done = 0;
+	int32_t fence_index, ts_device;
+	struct drm_kgsl_gem_object_fence *fence;
+	struct kgsl_device *device;
+
+	if (args->type == DRM_KGSL_GEM_TS_3D)
+		ts_device = KGSL_DEVICE_3D0;
+	else if (args->type == DRM_KGSL_GEM_TS_2D)
+		ts_device = KGSL_DEVICE_2D0;
+	else {
+		result = -EINVAL;
+		goto error;
+	}
+
+	device = kgsl_get_device(ts_device);
+	ts_done = kgsl_check_timestamp(device, args->timestamp);
+
+	mutex_lock(&dev->struct_mutex);
+
+	fence_index = find_fence(args->lock_id);
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	fence = &gem_buf_fence[fence_index];
+	fence->ts_device = ts_device;
+
+	if (!ts_done)
+		fence->ts_valid = 1;
+	else
+		cleanup_fence(fence, 1);
+
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+error:
+	return result;
+}
+
+struct drm_ioctl_desc kgsl_drm_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
+		      kgsl_gem_set_bufcount_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
+				  kgsl_gem_lock_handle_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
+				  kgsl_gem_unlock_handle_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
+				  kgsl_gem_unlock_on_ts_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
+		      DRM_MASTER),
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
+	.load = kgsl_drm_load,
+	.unload = kgsl_drm_unload,
+	.firstopen = kgsl_drm_firstopen,
+	.lastclose = kgsl_drm_lastclose,
+	.preclose = kgsl_drm_preclose,
+	.suspend = kgsl_drm_suspend,
+	.resume = kgsl_drm_resume,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.gem_init_object = kgsl_gem_init_object,
+	.gem_free_object = kgsl_gem_free_object,
+	.ioctls = kgsl_drm_ioctls,
+
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .unlocked_ioctl = drm_ioctl,
+		 .mmap = msm_drm_gem_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+		 },
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int kgsl_drm_init(struct platform_device *dev)
+{
+	int i;
+
+	driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
+	driver.platform_device = dev;
+
+	INIT_LIST_HEAD(&kgsl_mem_list);
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		gem_buf_fence[i].num_buffers = 0;
+		gem_buf_fence[i].ts_valid = 0;
+		gem_buf_fence[i].fence_id = ENTRY_EMPTY;
+	}
+
+	return drm_init(&driver);
+}
+
+void kgsl_drm_exit(void)
+{
+	drm_exit(&driver);
+}
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
new file mode 100644
index 0000000..9fafcf4
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_LOG_H
+#define __KGSL_LOG_H
+
+extern unsigned int kgsl_cff_dump_enable;
+
+#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 6)  \
+			dev_info(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 4)  \
+			dev_warn(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 3)  \
+			dev_err(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 2) \
+			dev_crit(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
+	do { dev_crit(_dev->dev, fmt, ##args); } while (0)
+
+#define KGSL_LOG_DUMP(_dev, fmt, args...)	dev_err(_dev->dev, fmt, ##args)
+
+#define KGSL_DRV_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
+
+#define KGSL_CMD_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
+
+#define KGSL_CTXT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
+
+#define KGSL_MEM_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
+
+#define KGSL_PWR_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+
+/* Core error messages - these are for core KGSL functions that have
+   no device associated with them (such as memory) */
+
+#define KGSL_CORE_ERR(fmt, args...) \
+pr_err("kgsl: %s: " fmt, __func__, ##args)
+
+#endif /* __KGSL_LOG_H */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
new file mode 100644
index 0000000..15ec0ec
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -0,0 +1,1151 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+
+#define KGSL_MMU_ALIGN_SHIFT    13
+#define KGSL_MMU_ALIGN_MASK     (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+
+#define GSL_PT_PAGE_BITS_MASK	0x00000007
+#define GSL_PT_PAGE_ADDR_MASK	PAGE_MASK
+
+static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
+
+static ssize_t
+sysfs_show_ptpool_entries(struct kobject *kobj,
+			  struct kobj_attribute *attr,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_min(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_chunks(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
+}
+
+static ssize_t
+sysfs_show_ptpool_ptsize(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
+}
+
+static struct kobj_attribute attr_ptpool_entries = {
+	.attr = { .name = "ptpool_entries", .mode = 0444 },
+	.show = sysfs_show_ptpool_entries,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_min = {
+	.attr = { .name = "ptpool_min", .mode = 0444 },
+	.show = sysfs_show_ptpool_min,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_chunks = {
+	.attr = { .name = "ptpool_chunks", .mode = 0444 },
+	.show = sysfs_show_ptpool_chunks,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_ptsize = {
+	.attr = { .name = "ptpool_ptsize", .mode = 0444 },
+	.show = sysfs_show_ptpool_ptsize,
+	.store = NULL,
+};
+
+static struct attribute *ptpool_attrs[] = {
+	&attr_ptpool_entries.attr,
+	&attr_ptpool_min.attr,
+	&attr_ptpool_chunks.attr,
+	&attr_ptpool_ptsize.attr,
+	NULL,
+};
+
+static struct attribute_group ptpool_attr_group = {
+	.attrs = ptpool_attrs,
+};
+
+static int
+_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
+{
+	struct kgsl_ptpool_chunk *chunk;
+	size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
+
+	BUG_ON(count == 0);
+
+	if (get_order(size) >= MAX_ORDER) {
+		KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
+		return -EINVAL;
+	}
+
+	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+	if (chunk == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
+		return -ENOMEM;
+	}
+
+	chunk->size = size;
+	chunk->count = count;
+	chunk->dynamic = dynamic;
+
+	chunk->data = dma_alloc_coherent(NULL, size,
+					 &chunk->phys, GFP_KERNEL);
+
+	if (chunk->data == NULL) {
+		KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+		goto err;
+	}
+
+	chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
+
+	if (chunk->bitmap == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			BITS_TO_LONGS(count) * 4);
+		goto err_dma;
+	}
+
+	list_add_tail(&chunk->list, &pool->list);
+
+	pool->chunks++;
+	pool->entries += count;
+
+	if (!dynamic)
+		pool->static_entries += count;
+
+	return 0;
+
+err_dma:
+	dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
+err:
+	kfree(chunk);
+	return -ENOMEM;
+}
+
+static void *
+_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
+{
+	struct kgsl_ptpool_chunk *chunk;
+
+	list_for_each_entry(chunk, &pool->list, list) {
+		int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
+
+		if (bit >= chunk->count)
+			continue;
+
+		set_bit(bit, chunk->bitmap);
+		*physaddr = chunk->phys + (bit * pool->ptsize);
+
+		return chunk->data + (bit * pool->ptsize);
+	}
+
+	return NULL;
+}
+
+/**
+ * kgsl_ptpool_add
+ * @pool:  A pointer to a ptpool structure
+ * @entries: Number of entries to add
+ *
+ * Add static entries to the pagetable pool.
+ */
+
+int
+kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
+{
+	int ret = 0;
+	BUG_ON(count == 0);
+
+	mutex_lock(&pool->lock);
+
+	/* Only 4MB can be allocated in one chunk, so larger allocations
+	   need to be split into multiple sections */
+
+	while (count) {
+		int entries = ((count * pool->ptsize) > SZ_4M) ?
+			SZ_4M / pool->ptsize : count;
+
+		/* Add the entries as static, i.e. they don't ever stand
+		   a chance of being removed */
+
+		ret =  _kgsl_ptpool_add_entries(pool, entries, 0);
+		if (ret)
+			break;
+
+		count -= entries;
+	}
+
+	mutex_unlock(&pool->lock);
+	return ret;
+}
+
+/**
+ * kgsl_ptpool_alloc
+ * @pool:  A pointer to a ptpool structure
+ * @addr: A pointer to store the physical address of the chunk
+ *
+ * Allocate a pagetable from the pool.  Returns the virtual address
+ * of the pagetable, the physical address is returned in physaddr
+ */
+
+void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
+{
+	void *addr = NULL;
+	int ret;
+
+	mutex_lock(&pool->lock);
+	addr = _kgsl_ptpool_get_entry(pool, physaddr);
+	if (addr)
+		goto done;
+
+	/* Add a chunk for 1 more pagetable and mark it as dynamic */
+	ret = _kgsl_ptpool_add_entries(pool, 1, 1);
+
+	if (ret)
+		goto done;
+
+	addr = _kgsl_ptpool_get_entry(pool, physaddr);
+done:
+	mutex_unlock(&pool->lock);
+	return addr;
+}
+
+static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
+{
+	list_del(&chunk->list);
+
+	if (chunk->data)
+		dma_free_coherent(NULL, chunk->size, chunk->data,
+			chunk->phys);
+	kfree(chunk->bitmap);
+	kfree(chunk);
+}
+
+/**
+ * kgsl_ptpool_free
+ * @pool:  A pointer to a ptpool structure
+ * @addr: A pointer to the virtual address to free
+ *
+ * Free a pagetable allocated from the pool
+ */
+
+void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
+{
+	struct kgsl_ptpool_chunk *chunk, *tmp;
+
+	if (pool == NULL || addr == NULL)
+		return;
+
+	mutex_lock(&pool->lock);
+	list_for_each_entry_safe(chunk, tmp, &pool->list, list)  {
+		if (addr >=  chunk->data &&
+		    addr < chunk->data + chunk->size) {
+			int bit = ((unsigned long) (addr - chunk->data)) /
+				pool->ptsize;
+
+			clear_bit(bit, chunk->bitmap);
+			memset(addr, 0, pool->ptsize);
+
+			if (chunk->dynamic &&
+				bitmap_empty(chunk->bitmap, chunk->count))
+				_kgsl_ptpool_rm_chunk(chunk);
+
+			break;
+		}
+	}
+
+	mutex_unlock(&pool->lock);
+}
+
+void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
+{
+	struct kgsl_ptpool_chunk *chunk, *tmp;
+
+	if (pool == NULL)
+		return;
+
+	mutex_lock(&pool->lock);
+	list_for_each_entry_safe(chunk, tmp, &pool->list, list)
+		_kgsl_ptpool_rm_chunk(chunk);
+	mutex_unlock(&pool->lock);
+
+	memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * kgsl_ptpool_init
+ * @pool:  A pointer to a ptpool structure to initialize
+ * @ptsize: The size of each pagetable entry
+ * @entries:  The number of inital entries to add to the pool
+ *
+ * Initalize a pool and allocate an initial chunk of entries.
+ */
+
+int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
+{
+	int ret = 0;
+	BUG_ON(ptsize == 0);
+
+	pool->ptsize = ptsize;
+	mutex_init(&pool->lock);
+	INIT_LIST_HEAD(&pool->list);
+
+	if (entries) {
+		ret = kgsl_ptpool_add(pool, entries);
+		if (ret)
+			return ret;
+	}
+
+	return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
+}
+
+static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
+{
+	int i;
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device)
+			device->ftbl->cleanup_pt(device, pt);
+	}
+	return 0;
+}
+
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+	struct kgsl_pagetable *pagetable = container_of(kref,
+		struct kgsl_pagetable, refcount);
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_del(&pagetable->list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+	pagetable_remove_sysfs_objects(pagetable);
+
+	kgsl_cleanup_pt(pagetable);
+
+	kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
+
+	kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
+
+	if (pagetable->pool)
+		gen_pool_destroy(pagetable->pool);
+
+	kfree(pagetable->tlbflushfilter.base);
+	kfree(pagetable);
+}
+
+static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
+{
+	if (pagetable)
+		kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
+}
+
+static struct kgsl_pagetable *
+kgsl_get_pagetable(unsigned long name)
+{
+	struct kgsl_pagetable *pt, *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+		if (pt->name == name) {
+			ret = pt;
+			kref_get(&ret->refcount);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+	return ret;
+}
+
+static struct kgsl_pagetable *
+_get_pt_from_kobj(struct kobject *kobj)
+{
+	unsigned long ptname;
+
+	if (!kobj)
+		return NULL;
+
+	if (sscanf(kobj->name, "%ld", &ptname) != 1)
+		return NULL;
+
+	return kgsl_get_pagetable(ptname);
+}
+
+static ssize_t
+sysfs_show_entries(struct kobject *kobj,
+		   struct kobj_attribute *attr,
+		   char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.entries);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_mapped(struct kobject *kobj,
+		  struct kobj_attribute *attr,
+		  char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.mapped);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_va_range(struct kobject *kobj,
+		    struct kobj_attribute *attr,
+		    char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "0x%x\n", pt->va_range);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_max_mapped(struct kobject *kobj,
+		      struct kobj_attribute *attr,
+		      char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_max_entries(struct kobject *kobj,
+		       struct kobj_attribute *attr,
+		       char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.max_entries);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static struct kobj_attribute attr_entries = {
+	.attr = { .name = "entries", .mode = 0444 },
+	.show = sysfs_show_entries,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_mapped = {
+	.attr = { .name = "mapped", .mode = 0444 },
+	.show = sysfs_show_mapped,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_va_range = {
+	.attr = { .name = "va_range", .mode = 0444 },
+	.show = sysfs_show_va_range,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_max_mapped = {
+	.attr = { .name = "max_mapped", .mode = 0444 },
+	.show = sysfs_show_max_mapped,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_max_entries = {
+	.attr = { .name = "max_entries", .mode = 0444 },
+	.show = sysfs_show_max_entries,
+	.store = NULL,
+};
+
+static struct attribute *pagetable_attrs[] = {
+	&attr_entries.attr,
+	&attr_mapped.attr,
+	&attr_va_range.attr,
+	&attr_max_mapped.attr,
+	&attr_max_entries.attr,
+	NULL,
+};
+
+static struct attribute_group pagetable_attr_group = {
+	.attrs = pagetable_attrs,
+};
+
+static void
+pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+	if (pagetable->kobj)
+		sysfs_remove_group(pagetable->kobj,
+				   &pagetable_attr_group);
+
+	kobject_put(pagetable->kobj);
+}
+
+static int
+pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+	char ptname[16];
+	int ret = -ENOMEM;
+
+	snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
+	pagetable->kobj = kobject_create_and_add(ptname,
+						 kgsl_driver.ptkobj);
+	if (pagetable->kobj == NULL)
+		goto err;
+
+	ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
+
+err:
+	if (ret) {
+		if (pagetable->kobj)
+			kobject_put(pagetable->kobj);
+
+		pagetable->kobj = NULL;
+	}
+
+	return ret;
+}
+
+static inline uint32_t
+kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
+{
+	return (va - pt->va_base) >> PAGE_SHIFT;
+}
+
+static inline void
+kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
+{
+	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+
+	writel_relaxed(val, &baseptr[pte]);
+}
+
+static inline uint32_t
+kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
+{
+	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+	uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
+	return ret;
+}
+
+void kgsl_mh_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0;
+	unsigned int reg;
+
+	kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
+	kgsl_regread(device, MH_AXI_ERROR, &reg);
+
+	if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
+		KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
+	else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+		KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
+	else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
+		unsigned int ptbase;
+		struct kgsl_pagetable *pt;
+		int ptid = -1;
+
+		kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
+		kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
+
+		spin_lock(&kgsl_driver.ptlock);
+		list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+			if (ptbase == pt->base.gpuaddr) {
+				ptid = (int) pt->name;
+				break;
+			}
+		}
+		spin_unlock(&kgsl_driver.ptlock);
+
+		KGSL_MEM_CRIT(device,
+			"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
+			reg & ~(PAGE_SIZE - 1), ptid,
+			reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+	} else
+		KGSL_MEM_WARN(device,
+			"bad bits in REG_MH_INTERRUPT_STATUS %08x\n", status);
+
+	kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
+
+	/*TODO: figure out how to handle errror interupts.
+	* specifically, page faults should probably nuke the client that
+	* caused them, but we don't have enough info to figure that out yet.
+	*/
+}
+EXPORT_SYMBOL(kgsl_mh_intrcallback);
+
+static int kgsl_setup_pt(struct kgsl_pagetable *pt)
+{
+	int i = 0;
+	int status = 0;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device) {
+			status = device->ftbl->setup_pt(device, pt);
+			if (status)
+				goto error_pt;
+		}
+	}
+	return status;
+error_pt:
+	while (i >= 0) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device)
+			device->ftbl->cleanup_pt(device, pt);
+		i--;
+	}
+	return status;
+}
+
+static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
+				unsigned int name)
+{
+	int status = 0;
+	struct kgsl_pagetable *pagetable = NULL;
+	unsigned long flags;
+
+	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
+	if (pagetable == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			sizeof(struct kgsl_pagetable));
+		return NULL;
+	}
+
+	kref_init(&pagetable->refcount);
+
+	spin_lock_init(&pagetable->lock);
+	pagetable->tlb_flags = 0;
+	pagetable->name = name;
+	pagetable->va_base = KGSL_PAGETABLE_BASE;
+	pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+	pagetable->last_superpte = 0;
+	pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
+
+	pagetable->tlbflushfilter.size = (pagetable->va_range /
+				(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
+	pagetable->tlbflushfilter.base = (unsigned int *)
+			kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
+	if (!pagetable->tlbflushfilter.base) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			pagetable->tlbflushfilter.size);
+		goto err_alloc;
+	}
+	GSL_TLBFLUSH_FILTER_RESET();
+
+	pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
+	if (pagetable->pool == NULL) {
+		KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
+		goto err_flushfilter;
+	}
+
+	if (gen_pool_add(pagetable->pool, pagetable->va_base,
+				pagetable->va_range, -1)) {
+		KGSL_CORE_ERR("gen_pool_add failed\n");
+		goto err_pool;
+	}
+
+	pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
+		&pagetable->base.physaddr);
+
+	if (pagetable->base.hostptr == NULL)
+		goto err_pool;
+
+	/* ptpool allocations are from coherent memory, so update the
+	   device statistics acordingly */
+
+	KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
+		       kgsl_driver.stats.coherent_max);
+
+	pagetable->base.gpuaddr = pagetable->base.physaddr;
+	pagetable->base.size = KGSL_PAGETABLE_SIZE;
+
+	status = kgsl_setup_pt(pagetable);
+	if (status)
+		goto err_free_sharedmem;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_add(&pagetable->list, &kgsl_driver.pagetable_list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+	/* Create the sysfs entries */
+	pagetable_add_sysfs_objects(pagetable);
+
+	return pagetable;
+
+err_free_sharedmem:
+	kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
+err_pool:
+	gen_pool_destroy(pagetable->pool);
+err_flushfilter:
+	kfree(pagetable->tlbflushfilter.base);
+err_alloc:
+	kfree(pagetable);
+
+	return NULL;
+}
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
+{
+	struct kgsl_pagetable *pt;
+
+	pt = kgsl_get_pagetable(name);
+
+	if (pt == NULL)
+		pt = kgsl_mmu_createpagetableobject(name);
+
+	return pt;
+}
+
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
+{
+	kgsl_put_pagetable(pagetable);
+}
+
+void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	if (!kgsl_mmu_enabled())
+		return;
+
+	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+		kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
+		kgsl_regwrite(device, MH_MMU_PT_BASE,
+			device->mmu.hwpagetable->base.gpuaddr);
+	}
+
+	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+		/* Invalidate all and tc */
+		kgsl_regwrite(device, MH_MMU_INVALIDATE,  0x00000003);
+	}
+}
+EXPORT_SYMBOL(kgsl_default_setstate);
+
+void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	if (device->ftbl->setstate)
+		device->ftbl->setstate(device, flags);
+}
+EXPORT_SYMBOL(kgsl_setstate);
+
+void kgsl_mmu_setstate(struct kgsl_device *device,
+				struct kgsl_pagetable *pagetable)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED) {
+		/* page table not current, then setup mmu to use new
+		 *  specified page table
+		 */
+		if (mmu->hwpagetable != pagetable) {
+			mmu->hwpagetable = pagetable;
+			spin_lock(&mmu->hwpagetable->lock);
+			mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
+			spin_unlock(&mmu->hwpagetable->lock);
+
+			/* call device specific set page table */
+			kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
+				KGSL_MMUFLAGS_PTUPDATE);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_mmu_setstate);
+
+int kgsl_mmu_init(struct kgsl_device *device)
+{
+	/*
+	 * intialize device mmu
+	 *
+	 * call this with the global lock held
+	 */
+	int status = 0;
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	mmu->device = device;
+
+	/* make sure aligned to pagesize */
+	BUG_ON(mmu->mpu_base & (PAGE_SIZE - 1));
+	BUG_ON((mmu->mpu_base + mmu->mpu_range) & (PAGE_SIZE - 1));
+
+	/* sub-client MMU lookups require address translation */
+	if ((mmu->config & ~0x1) > 0) {
+		/*make sure virtual address range is a multiple of 64Kb */
+		BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
+
+		/* allocate memory used for completing r/w operations that
+		 * cannot be mapped by the MMU
+		 */
+		status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
+		if (!status)
+			kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
+					   mmu->dummyspace.size);
+	}
+
+	return status;
+}
+
+int kgsl_mmu_start(struct kgsl_device *device)
+{
+	/*
+	 * intialize device mmu
+	 *
+	 * call this with the global lock held
+	 */
+
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED)
+		return 0;
+
+	/* MMU not enabled */
+	if ((mmu->config & 0x1) == 0)
+		return 0;
+
+	mmu->flags |= KGSL_FLAGS_STARTED;
+
+	/* setup MMU and sub-client behavior */
+	kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
+
+	/*
+	 * Interrupts are enabled on a per-device level when
+	 * kgsl_pwrctrl_irq() is called
+	*/
+
+	/* idle device */
+	kgsl_idle(device,  KGSL_TIMEOUT_DEFAULT);
+
+	/* define physical memory range accessible by the core */
+	kgsl_regwrite(device, MH_MMU_MPU_BASE, mmu->mpu_base);
+	kgsl_regwrite(device, MH_MMU_MPU_END,
+			mmu->mpu_base + mmu->mpu_range);
+
+	/* sub-client MMU lookups require address translation */
+	if ((mmu->config & ~0x1) > 0) {
+
+		kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
+				   mmu->dummyspace.size);
+
+		/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
+		 * to complete transactions in case of an MMU fault. Note that
+		 * we'll leave the bottom 32 bytes of the dummyspace for other
+		 * purposes (e.g. use it when dummy read cycles are needed
+		 * for other blocks */
+		kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
+			mmu->dummyspace.physaddr + 32);
+
+		if (mmu->defaultpagetable == NULL)
+			mmu->defaultpagetable =
+				kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+		/* Return error if the default pagetable doesn't exist */
+		if (mmu->defaultpagetable == NULL)
+			return -ENOMEM;
+
+		mmu->hwpagetable = mmu->defaultpagetable;
+
+		kgsl_regwrite(device, MH_MMU_PT_BASE,
+			      mmu->hwpagetable->base.gpuaddr);
+		kgsl_regwrite(device, MH_MMU_VA_RANGE,
+			      (mmu->hwpagetable->va_base |
+			      (mmu->hwpagetable->va_range >> 16)));
+		kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_start);
+
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
+{
+	unsigned int physaddr = 0;
+	pgd_t *pgd_ptr = NULL;
+	pmd_t *pmd_ptr = NULL;
+	pte_t *pte_ptr = NULL, pte;
+
+	pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
+	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+		KGSL_CORE_ERR("Invalid pgd entry\n");
+		return 0;
+	}
+
+	pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
+	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+		KGSL_CORE_ERR("Invalid pmd entry\n");
+		return 0;
+	}
+
+	pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
+	if (!pte_ptr) {
+		KGSL_CORE_ERR("pt_offset_map failed\n");
+		return 0;
+	}
+	pte = *pte_ptr;
+	physaddr = pte_pfn(pte);
+	pte_unmap(pte_ptr);
+	physaddr <<= PAGE_SHIFT;
+	return physaddr;
+}
+
+int
+kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+				struct kgsl_memdesc *memdesc,
+				unsigned int protflags)
+{
+	int numpages;
+	unsigned int pte, ptefirst, ptelast, physaddr;
+	int flushtlb;
+	unsigned int offset = 0;
+
+	BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
+	BUG_ON(protflags == 0);
+
+	memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
+		memdesc->size, KGSL_MMU_ALIGN_SHIFT);
+
+	if (memdesc->gpuaddr == 0) {
+		KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
+		KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+				pagetable->name, pagetable->stats.mapped,
+				pagetable->stats.entries);
+		return -ENOMEM;
+	}
+
+	numpages = (memdesc->size >> PAGE_SHIFT);
+
+	ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
+	ptelast = ptefirst + numpages;
+
+	pte = ptefirst;
+	flushtlb = 0;
+
+	/* tlb needs to be flushed when the first and last pte are not at
+	* superpte boundaries */
+	if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
+		((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
+		flushtlb = 1;
+
+	spin_lock(&pagetable->lock);
+	for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
+#ifdef VERBOSE_DEBUG
+		/* check if PTE exists */
+		uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
+		BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
+#endif
+		if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
+			if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
+				flushtlb = 1;
+		/* mark pte as in use */
+
+		physaddr = memdesc->ops->physaddr(memdesc, offset);
+		BUG_ON(physaddr == 0);
+		kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
+	}
+
+	/* Keep track of the statistics for the sysfs files */
+
+	KGSL_STATS_ADD(1, pagetable->stats.entries,
+		       pagetable->stats.max_entries);
+
+	KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
+		       pagetable->stats.max_mapped);
+
+	/* Post all writes to the pagetable */
+	wmb();
+
+	/* Invalidate tlb only if current page table used by GPU is the
+	 * pagetable that we used to allocate */
+	if (flushtlb) {
+		/*set all devices as needing flushing*/
+		pagetable->tlb_flags = UINT_MAX;
+		GSL_TLBFLUSH_FILTER_RESET();
+	}
+	spin_unlock(&pagetable->lock);
+
+	return 0;
+}
+
+int
+kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+		struct kgsl_memdesc *memdesc)
+{
+	unsigned int numpages;
+	unsigned int pte, ptefirst, ptelast, superpte;
+	unsigned int range = memdesc->size;
+
+	/* All GPU addresses as assigned are page aligned, but some
+	   functions purturb the gpuaddr with an offset, so apply the
+	   mask here to make sure we have the right address */
+
+	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;
+
+	if (range == 0 || gpuaddr == 0)
+		return 0;
+
+	numpages = (range >> PAGE_SHIFT);
+	if (range & (PAGE_SIZE - 1))
+		numpages++;
+
+	ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
+	ptelast = ptefirst + numpages;
+
+	spin_lock(&pagetable->lock);
+	superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
+	GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+	for (pte = ptefirst; pte < ptelast; pte++) {
+#ifdef VERBOSE_DEBUG
+		/* check if PTE exists */
+		BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
+#endif
+		kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
+		superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
+		if (pte == superpte)
+			GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
+				GSL_PT_SUPER_PTE);
+	}
+
+	/* Remove the statistics */
+	pagetable->stats.entries--;
+	pagetable->stats.mapped -= range;
+
+	/* Post all writes to the pagetable */
+	wmb();
+
+	spin_unlock(&pagetable->lock);
+
+	gen_pool_free(pagetable->pool, gpuaddr, range);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_unmap);
+
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+			struct kgsl_memdesc *memdesc, unsigned int protflags)
+{
+	int result = -EINVAL;
+	unsigned int gpuaddr = 0;
+
+	if (memdesc == NULL) {
+		KGSL_CORE_ERR("invalid memdesc\n");
+		goto error;
+	}
+
+	gpuaddr = memdesc->gpuaddr;
+
+	result = kgsl_mmu_map(pagetable, memdesc, protflags);
+	if (result)
+		goto error;
+
+	/*global mappings must have the same gpu address in all pagetables*/
+	if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
+		KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
+			"gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
+			gpuaddr, memdesc->gpuaddr);
+		goto error_unmap;
+	}
+	return result;
+error_unmap:
+	kgsl_mmu_unmap(pagetable, memdesc);
+error:
+	return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_map_global);
+
+int kgsl_mmu_stop(struct kgsl_device *device)
+{
+	/*
+	 *  stop device mmu
+	 *
+	 *  call this with the global lock held
+	 */
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED) {
+		/* disable MMU */
+		kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
+
+		mmu->flags &= ~KGSL_FLAGS_STARTED;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_stop);
+
+int kgsl_mmu_close(struct kgsl_device *device)
+{
+	/*
+	 *  close device mmu
+	 *
+	 *  call this with the global lock held
+	 */
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->dummyspace.gpuaddr)
+		kgsl_sharedmem_free(&mmu->dummyspace);
+
+	if (mmu->defaultpagetable)
+		kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+	return 0;
+}
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
new file mode 100644
index 0000000..3425277
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -0,0 +1,273 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_MMU_H
+#define __KGSL_MMU_H
+
+/* Identifier for the global page table */
+/* Per process page tables will probably pass in the thread group
+   as an identifier */
+
+#define KGSL_MMU_GLOBAL_PT 0
+
+#define GSL_PT_SUPER_PTE 8
+#define GSL_PT_PAGE_WV		0x00000001
+#define GSL_PT_PAGE_RV		0x00000002
+#define GSL_PT_PAGE_DIRTY	0x00000004
+
+/* MMU registers - the register locations for all cores are the
+   same.  The method for getting to those locations differs between
+   2D and 3D, but the 2D and 3D register functions do that magic
+   for us */
+
+#define MH_MMU_CONFIG                0x0040
+#define MH_MMU_VA_RANGE              0x0041
+#define MH_MMU_PT_BASE               0x0042
+#define MH_MMU_PAGE_FAULT            0x0043
+#define MH_MMU_TRAN_ERROR            0x0044
+#define MH_MMU_INVALIDATE            0x0045
+#define MH_MMU_MPU_BASE              0x0046
+#define MH_MMU_MPU_END               0x0047
+
+#define MH_INTERRUPT_MASK            0x0A42
+#define MH_INTERRUPT_STATUS          0x0A43
+#define MH_INTERRUPT_CLEAR           0x0A44
+#define MH_AXI_ERROR                 0x0A45
+
+/* MH_MMU_CONFIG bit definitions */
+
+#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT           0x00000004
+#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT           0x00000006
+#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT          0x00000008
+#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT          0x0000000a
+#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT          0x0000000c
+#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT          0x0000000e
+#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT          0x00000010
+#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT         0x00000012
+#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT         0x00000014
+#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT           0x00000016
+#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT           0x00000018
+
+/* MMU Flags */
+#define KGSL_MMUFLAGS_TLBFLUSH         0x10000000
+#define KGSL_MMUFLAGS_PTUPDATE         0x20000000
+
+#define MH_INTERRUPT_MASK__AXI_READ_ERROR                  0x00000001L
+#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR                 0x00000002L
+#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT                  0x00000004L
+
+#ifdef CONFIG_MSM_KGSL_MMU
+#define KGSL_MMU_INT_MASK \
+	(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+	 MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
+	 MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+#else
+#define KGSL_MMU_INT_MASK \
+	(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+	 MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+#endif
+
+/* Macros to manage TLB flushing */
+#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS     (sizeof(unsigned char) * 8)
+#define GSL_TLBFLUSH_FILTER_GET(superpte)			     \
+	      (*((unsigned char *)				    \
+	      (((unsigned int)pagetable->tlbflushfilter.base)    \
+	      + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
+#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte)				\
+	      (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 <<	    \
+	      (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
+#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte)			 \
+	      (GSL_TLBFLUSH_FILTER_GET((superpte)) &		  \
+	      (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_RESET() memset(pagetable->tlbflushfilter.base,\
+				      0, pagetable->tlbflushfilter.size)
+
+
+struct kgsl_device;
+
+struct kgsl_tlbflushfilter {
+	unsigned int *base;
+	unsigned int size;
+};
+
+struct kgsl_pagetable {
+	spinlock_t lock;
+	struct kref refcount;
+	struct kgsl_memdesc  base;
+	uint32_t      va_base;
+	unsigned int   va_range;
+	unsigned int   last_superpte;
+	unsigned int   max_entries;
+	struct gen_pool *pool;
+	struct list_head list;
+	unsigned int name;
+	/* Maintain filter to manage tlb flushing */
+	struct kgsl_tlbflushfilter tlbflushfilter;
+	unsigned int tlb_flags;
+	struct kobject *kobj;
+
+	struct {
+		unsigned int entries;
+		unsigned int mapped;
+		unsigned int max_mapped;
+		unsigned int max_entries;
+	} stats;
+};
+
+struct kgsl_mmu {
+	unsigned int     refcnt;
+	uint32_t      flags;
+	struct kgsl_device     *device;
+	unsigned int     config;
+	uint32_t        mpu_base;
+	int              mpu_range;
+	struct kgsl_memdesc    dummyspace;
+	/* current page table object being used by device mmu */
+	struct kgsl_pagetable  *defaultpagetable;
+	struct kgsl_pagetable  *hwpagetable;
+};
+
+struct kgsl_ptpool_chunk {
+	size_t size;
+	unsigned int count;
+	int dynamic;
+
+	void *data;
+	unsigned int phys;
+
+	unsigned long *bitmap;
+	struct list_head list;
+};
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
+
+#ifdef CONFIG_MSM_KGSL_MMU
+
+int kgsl_mmu_init(struct kgsl_device *device);
+int kgsl_mmu_start(struct kgsl_device *device);
+int kgsl_mmu_stop(struct kgsl_device *device);
+int kgsl_mmu_close(struct kgsl_device *device);
+void kgsl_mmu_setstate(struct kgsl_device *device,
+		      struct kgsl_pagetable *pagetable);
+int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+		 struct kgsl_memdesc *memdesc,
+		 unsigned int protflags);
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+			struct kgsl_memdesc *memdesc, unsigned int protflags);
+int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+		    struct kgsl_memdesc *memdesc);
+void kgsl_ptpool_destroy(struct kgsl_ptpool *pool);
+int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries);
+void kgsl_mh_intrcallback(struct kgsl_device *device);
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
+void kgsl_setstate(struct kgsl_device *device, uint32_t flags);
+void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags);
+
+static inline int kgsl_mmu_enabled(void)
+{
+	return 1;
+}
+
+#else
+
+static inline int kgsl_mmu_enabled(void)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_init(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_start(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_stop(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_close(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline void kgsl_mmu_setstate(struct kgsl_device *device,
+				    struct kgsl_pagetable *pagetable) { }
+
+static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+		 struct kgsl_memdesc *memdesc,
+		 unsigned int protflags)
+{
+	memdesc->gpuaddr = memdesc->physaddr;
+	return 0;
+}
+
+static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+				 struct kgsl_memdesc *memdesc)
+{
+	return 0;
+}
+
+static inline int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize,
+				    int entries)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+	struct kgsl_memdesc *memdesc, unsigned int protflags)
+{
+	memdesc->gpuaddr = memdesc->physaddr;
+	return 0;
+}
+
+static inline void kgsl_ptpool_destroy(struct kgsl_ptpool *pool) { }
+
+static inline void kgsl_mh_intrcallback(struct kgsl_device *device) { }
+
+static inline void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { }
+
+static inline unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
+{
+	return 0;
+}
+
+static inline void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
+{ }
+
+static inline void kgsl_default_setstate(struct kgsl_device *device,
+	uint32_t flags) { }
+#endif
+
+static inline unsigned int kgsl_pt_get_flags(struct kgsl_pagetable *pt,
+					     enum kgsl_deviceid id)
+{
+	unsigned int result = 0;
+
+	if (pt == NULL)
+		return 0;
+
+	spin_lock(&pt->lock);
+	if (pt->tlb_flags && (1<<id)) {
+		result = KGSL_MMUFLAGS_TLBFLUSH;
+		pt->tlb_flags &= ~(1<<id);
+	}
+	spin_unlock(&pt->lock);
+	return result;
+}
+
+#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
new file mode 100644
index 0000000..572e0e8
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -0,0 +1,643 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/interrupt.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_bus.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define GPU_SWFI_LATENCY	3
+
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+				unsigned int new_level)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	if (new_level < (pwr->num_pwrlevels - 1) &&
+		new_level >= pwr->thermal_pwrlevel &&
+		new_level != pwr->active_pwrlevel) {
+		pwr->active_pwrlevel = new_level;
+		if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
+			clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->active_pwrlevel].
+					gpu_freq);
+		if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+					pwr->pwrlevels[pwr->active_pwrlevel].
+					bus_freq);
+		KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
+					  pwr->active_pwrlevel);
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
+
+static int __gpuclk_store(int max, struct device *dev,
+						  struct device_attribute *attr,
+						  const char *buf, size_t count)
+{	int ret, i, delta = 5000000;
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	ret = sscanf(buf, "%ld", &val);
+	if (ret != 1)
+		return count;
+
+	mutex_lock(&device->mutex);
+	for (i = 0; i < pwr->num_pwrlevels; i++) {
+		if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
+			if (max)
+				pwr->thermal_pwrlevel = i;
+			break;
+		}
+	}
+
+	if (i == pwr->num_pwrlevels)
+		goto done;
+
+	/*
+	 * If the current or requested clock speed is greater than the
+	 * thermal limit, bump down immediately.
+	 */
+
+	if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
+	    pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+	else if (!max)
+		kgsl_pwrctrl_pwrlevel_change(device, i);
+
+done:
+	mutex_unlock(&device->mutex);
+	return count;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	return __gpuclk_store(1, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return __gpuclk_store(0, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char temp[20];
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int rc;
+
+	snprintf(temp, sizeof(temp), "%.*s",
+			 (int)min(count, sizeof(temp) - 1), buf);
+	rc = strict_strtoul(temp, 0, &val);
+	if (rc)
+		return rc;
+
+	mutex_lock(&device->mutex);
+
+	if (val == 1)
+		pwr->nap_allowed = true;
+	else if (val == 0)
+		pwr->nap_allowed = false;
+
+	mutex_unlock(&device->mutex);
+
+	return count;
+}
+
+static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return sprintf(buf, "%d\n", pwr->nap_allowed);
+}
+
+
+static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	char temp[20];
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	const long div = 1000/HZ;
+	static unsigned int org_interval_timeout = 1;
+	int rc;
+
+	snprintf(temp, sizeof(temp), "%.*s",
+			 (int)min(count, sizeof(temp) - 1), buf);
+	rc = strict_strtoul(temp, 0, &val);
+	if (rc)
+		return rc;
+
+	if (org_interval_timeout == 1)
+		org_interval_timeout = pwr->interval_timeout;
+
+	mutex_lock(&device->mutex);
+
+	/* Let the timeout be requested in ms, but convert to jiffies. */
+	val /= div;
+	if (val >= org_interval_timeout)
+		pwr->interval_timeout = val;
+
+	mutex_unlock(&device->mutex);
+
+	return count;
+}
+
+static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return sprintf(buf, "%d\n", pwr->interval_timeout);
+}
+
+DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
+DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
+	kgsl_pwrctrl_max_gpuclk_store);
+DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
+DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
+	kgsl_pwrctrl_idle_timer_store);
+
+static const struct device_attribute *pwrctrl_attr_list[] = {
+	&dev_attr_gpuclk,
+	&dev_attr_max_gpuclk,
+	&dev_attr_pwrnap,
+	&dev_attr_idle_timer,
+	NULL
+};
+
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
+{
+	return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
+{
+	kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i = 0;
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"clocks off, device %d\n", device->id);
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_disable(pwr->grp_clks[i]);
+			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+				(device->requested_state != KGSL_STATE_NAP))
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->num_pwrlevels - 1].
+					gpu_freq);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"clocks on, device %d\n", device->id);
+
+			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+				(device->state != KGSL_STATE_NAP))
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->active_pwrlevel].
+						gpu_freq);
+
+			/* as last step, enable grp_clk
+			   this is to let GPU interrupt to come */
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_enable(pwr->grp_clks[i]);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_clk);
+
+void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"axi off, device %d\n", device->id);
+			if (pwr->ebi1_clk)
+				clk_disable(pwr->ebi1_clk);
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+								    0);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"axi on, device %d\n", device->id);
+			if (pwr->ebi1_clk)
+				clk_enable(pwr->ebi1_clk);
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+					pwr->pwrlevels[pwr->active_pwrlevel].
+						bus_freq);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_axi);
+
+
+void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"power off, device %d\n", device->id);
+			if (pwr->gpu_reg)
+				regulator_disable(pwr->gpu_reg);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"power on, device %d\n", device->id);
+			if (pwr->gpu_reg)
+				regulator_enable(pwr->gpu_reg);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"irq on, device %d\n", device->id);
+			enable_irq(pwr->interrupt_num);
+			device->ftbl->irqctrl(device, 1);
+		}
+	} else if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"irq off, device %d\n", device->id);
+			device->ftbl->irqctrl(device, 0);
+			disable_irq(pwr->interrupt_num);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_irq);
+
+int kgsl_pwrctrl_init(struct kgsl_device *device)
+{
+	int i, result = 0;
+	struct clk *clk;
+	struct platform_device *pdev =
+		container_of(device->parentdev, struct platform_device, dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+	struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
+	const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
+						pdata_dev->clk.name.clk,
+						pdata_dev->clk.name.pclk,
+						pdata_dev->imem_clk_name.clk,
+						pdata_dev->imem_clk_name.pclk};
+
+	/*acquire clocks */
+	for (i = 1; i < KGSL_MAX_CLKS; i++) {
+		if (clk_names[i]) {
+			clk = clk_get(&pdev->dev, clk_names[i]);
+			if (IS_ERR(clk))
+				goto clk_err;
+			pwr->grp_clks[i] = clk;
+		}
+	}
+	/* Make sure we have a source clk for freq setting */
+	clk = clk_get(&pdev->dev, clk_names[0]);
+	pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
+
+	/* put the AXI bus into asynchronous mode with the graphics cores */
+	if (pdata_pwr->set_grp_async != NULL)
+		pdata_pwr->set_grp_async();
+
+	if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
+		KGSL_PWR_ERR(device, "invalid power level count: %d\n",
+					 pdata_pwr->num_levels);
+		result = -EINVAL;
+		goto done;
+	}
+	pwr->num_pwrlevels = pdata_pwr->num_levels;
+	pwr->active_pwrlevel = pdata_pwr->init_level;
+	for (i = 0; i < pdata_pwr->num_levels; i++) {
+		pwr->pwrlevels[i].gpu_freq =
+		(pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
+		clk_round_rate(pwr->grp_clks[0],
+					   pdata_pwr->pwrlevel[i].
+					   gpu_freq) : 0;
+		pwr->pwrlevels[i].bus_freq =
+			pdata_pwr->pwrlevel[i].bus_freq;
+	}
+	/* Do not set_rate for targets in sync with AXI */
+	if (pwr->pwrlevels[0].gpu_freq > 0)
+		clk_set_rate(pwr->grp_clks[0], pwr->
+				pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+
+	pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
+	if (IS_ERR(pwr->gpu_reg))
+		pwr->gpu_reg = NULL;
+
+	pwr->power_flags = 0;
+
+	pwr->nap_allowed = pdata_pwr->nap_allowed;
+	pwr->interval_timeout = pdata_pwr->idle_timeout;
+	pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
+	if (IS_ERR(pwr->ebi1_clk))
+		pwr->ebi1_clk = NULL;
+	else
+		clk_set_rate(pwr->ebi1_clk,
+					 pwr->pwrlevels[pwr->active_pwrlevel].
+						bus_freq);
+	if (pdata_dev->clk.bus_scale_table != NULL) {
+		pwr->pcl =
+			msm_bus_scale_register_client(pdata_dev->clk.
+							bus_scale_table);
+		if (!pwr->pcl) {
+			KGSL_PWR_ERR(device,
+					"msm_bus_scale_register_client failed: "
+					"id %d table %p", device->id,
+					pdata_dev->clk.bus_scale_table);
+			result = -EINVAL;
+			goto done;
+		}
+	}
+
+	/*acquire interrupt */
+	pwr->interrupt_num =
+		platform_get_irq_byname(pdev, pwr->irq_name);
+
+	if (pwr->interrupt_num <= 0) {
+		KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
+					 pwr->interrupt_num);
+		result = -EINVAL;
+		goto done;
+	}
+
+	register_early_suspend(&device->display_off);
+	return result;
+
+clk_err:
+	result = PTR_ERR(clk);
+	KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
+				 clk_names[i], result);
+
+done:
+	return result;
+}
+
+void kgsl_pwrctrl_close(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	KGSL_PWR_INFO(device, "close device %d\n", device->id);
+
+	unregister_early_suspend(&device->display_off);
+
+	if (pwr->interrupt_num > 0) {
+		if (pwr->have_irq) {
+			free_irq(pwr->interrupt_num, NULL);
+			pwr->have_irq = 0;
+		}
+		pwr->interrupt_num = 0;
+	}
+
+	clk_put(pwr->ebi1_clk);
+
+	if (pwr->pcl)
+		msm_bus_scale_unregister_client(pwr->pcl);
+
+	pwr->pcl = 0;
+
+	if (pwr->gpu_reg) {
+		regulator_put(pwr->gpu_reg);
+		pwr->gpu_reg = NULL;
+	}
+
+	for (i = 1; i < KGSL_MAX_CLKS; i++)
+		if (pwr->grp_clks[i]) {
+			clk_put(pwr->grp_clks[i]);
+			pwr->grp_clks[i] = NULL;
+		}
+
+	pwr->grp_clks[0] = NULL;
+	pwr->power_flags = 0;
+}
+
+void kgsl_idle_check(struct work_struct *work)
+{
+	struct kgsl_device *device = container_of(work, struct kgsl_device,
+							idle_check_ws);
+
+	mutex_lock(&device->mutex);
+	if (device->requested_state != KGSL_STATE_SLEEP)
+		kgsl_pwrscale_idle(device);
+
+	if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
+		if (kgsl_pwrctrl_sleep(device) != 0)
+			mod_timer(&device->idle_timer,
+					jiffies +
+					device->pwrctrl.interval_timeout);
+	} else if (device->state & (KGSL_STATE_HUNG |
+					KGSL_STATE_DUMP_AND_RECOVER)) {
+		device->requested_state = KGSL_STATE_NONE;
+	}
+
+	mutex_unlock(&device->mutex);
+}
+
+void kgsl_timer(unsigned long data)
+{
+	struct kgsl_device *device = (struct kgsl_device *) data;
+
+	KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
+	if (device->requested_state != KGSL_STATE_SUSPEND) {
+		device->requested_state = KGSL_STATE_SLEEP;
+		/* Have work run in a non-interrupt context. */
+		queue_work(device->work_queue, &device->idle_check_ws);
+	}
+}
+
+void kgsl_pre_hwaccess(struct kgsl_device *device)
+{
+	BUG_ON(!mutex_is_locked(&device->mutex));
+	if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
+		kgsl_pwrctrl_wake(device);
+}
+EXPORT_SYMBOL(kgsl_pre_hwaccess);
+
+void kgsl_check_suspended(struct kgsl_device *device)
+{
+	if (device->requested_state == KGSL_STATE_SUSPEND ||
+				device->state == KGSL_STATE_SUSPEND) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->hwaccess_gate);
+		mutex_lock(&device->mutex);
+	}
+	if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->recovery_gate);
+		mutex_lock(&device->mutex);
+	}
+ }
+
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+int kgsl_pwrctrl_sleep(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
+
+	/* Work through the legal state transitions */
+	if (device->requested_state == KGSL_STATE_NAP) {
+		if (device->ftbl->isidle(device))
+			goto nap;
+	} else if (device->requested_state == KGSL_STATE_SLEEP) {
+		if (device->state == KGSL_STATE_NAP ||
+			device->ftbl->isidle(device))
+			goto sleep;
+	}
+
+	device->requested_state = KGSL_STATE_NONE;
+	return -EBUSY;
+
+sleep:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+	if (pwr->pwrlevels[0].gpu_freq > 0)
+		clk_set_rate(pwr->grp_clks[0],
+				pwr->pwrlevels[pwr->num_pwrlevels - 1].
+				gpu_freq);
+	device->pwrctrl.time = 0;
+
+	kgsl_pwrscale_sleep(device);
+	goto clk_off;
+
+nap:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+clk_off:
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
+
+	device->state = device->requested_state;
+	device->requested_state = KGSL_STATE_NONE;
+	wake_unlock(&device->idle_wakelock);
+	pm_qos_update_request(&device->pm_qos_req_dma,
+				PM_QOS_DEFAULT_VALUE);
+	KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
+				  device->state, device->id);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+void kgsl_pwrctrl_wake(struct kgsl_device *device)
+{
+	if (device->state == KGSL_STATE_SUSPEND)
+		return;
+
+	if (device->state != KGSL_STATE_NAP) {
+		kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+		kgsl_pwrscale_wake(device);
+	}
+
+	/* Turn on the core clocks */
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
+
+	/* Enable state before turning on irq */
+	device->state = KGSL_STATE_ACTIVE;
+	KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+	/* Re-enable HW access */
+	mod_timer(&device->idle_timer,
+				jiffies + device->pwrctrl.interval_timeout);
+
+	wake_lock(&device->idle_wakelock);
+	pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
+	KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_wake);
+
+void kgsl_pwrctrl_enable(struct kgsl_device *device)
+{
+	/* Order pwrrail/clk sequence based upon platform */
+	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_enable);
+
+void kgsl_pwrctrl_disable(struct kgsl_device *device)
+{
+	/* Order pwrrail/clk sequence based upon platform */
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
new file mode 100644
index 0000000..fff5769
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_PWRCTRL_H
+#define __KGSL_PWRCTRL_H
+
+/*****************************************************************************
+** power flags
+*****************************************************************************/
+#define KGSL_PWRFLAGS_POWER_ON 0
+#define KGSL_PWRFLAGS_CLK_ON   1
+#define KGSL_PWRFLAGS_AXI_ON   2
+#define KGSL_PWRFLAGS_IRQ_ON   3
+
+#define KGSL_PWRFLAGS_ON   1
+#define KGSL_PWRFLAGS_OFF  0
+
+#define KGSL_PWRLEVEL_TURBO 0
+#define KGSL_PWRLEVEL_NOMINAL 1
+#define KGSL_MAX_CLKS 5
+
+struct platform_device;
+
+struct kgsl_pwrctrl {
+	int interrupt_num;
+	int have_irq;
+	struct clk *ebi1_clk;
+	struct clk *grp_clks[KGSL_MAX_CLKS];
+	unsigned long power_flags;
+	struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
+	unsigned int active_pwrlevel;
+	int thermal_pwrlevel;
+	unsigned int num_pwrlevels;
+	unsigned int interval_timeout;
+	struct regulator *gpu_reg;
+	uint32_t pcl;
+	unsigned int nap_allowed;
+	const char *regulator_name;
+	const char *irq_name;
+	const char *src_clk_name;
+	s64 time;
+};
+
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
+int kgsl_pwrctrl_init(struct kgsl_device *device);
+void kgsl_pwrctrl_close(struct kgsl_device *device);
+void kgsl_timer(unsigned long data);
+void kgsl_idle_check(struct work_struct *work);
+void kgsl_pre_hwaccess(struct kgsl_device *device);
+void kgsl_check_suspended(struct kgsl_device *device);
+int kgsl_pwrctrl_sleep(struct kgsl_device *device);
+void kgsl_pwrctrl_wake(struct kgsl_device *device);
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+	unsigned int level);
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_enable(struct kgsl_device *device);
+void kgsl_pwrctrl_disable(struct kgsl_device *device);
+static inline unsigned long kgsl_get_clkrate(struct clk *clk)
+{
+	return (clk != NULL) ? clk_get_rate(clk) : 0;
+}
+
+#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
new file mode 100644
index 0000000..0d3120f
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -0,0 +1,327 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+struct kgsl_pwrscale_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kgsl_device *device, char *buf);
+	ssize_t (*store)(struct kgsl_device *device, const char *buf,
+			 size_t count);
+};
+
+#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
+#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
+#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
+#define to_pwrscale_attr(a) \
+container_of(a, struct kgsl_pwrscale_attribute, attr)
+#define to_policy_attr(a) \
+container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
+
+#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
+struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
+__ATTR(_name, _mode, _show, _store)
+
+/* Master list of available policies */
+
+static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
+#ifdef CONFIG_MSM_SCM
+	&kgsl_pwrscale_policy_tz,
+#endif
+	NULL
+};
+
+static ssize_t pwrscale_policy_store(struct kgsl_device *device,
+				     const char *buf, size_t count)
+{
+	int i;
+	struct kgsl_pwrscale_policy *policy = NULL;
+
+	/* The special keyword none allows the user to detach all
+	   policies */
+	if (!strncmp("none", buf, 4)) {
+		kgsl_pwrscale_detach_policy(device);
+		return count;
+	}
+
+	for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+		if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
+			     strnlen(kgsl_pwrscale_policies[i]->name,
+				PAGE_SIZE))) {
+			policy = kgsl_pwrscale_policies[i];
+			break;
+		}
+	}
+
+	if (policy)
+		if (kgsl_pwrscale_attach_policy(device, policy))
+			return -EIO;
+
+	return count;
+}
+
+static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
+{
+	int ret;
+
+	if (device->pwrscale.policy)
+		ret = snprintf(buf, PAGE_SIZE, "%s\n",
+			       device->pwrscale.policy->name);
+	else
+		ret = snprintf(buf, PAGE_SIZE, "none\n");
+
+	return ret;
+}
+
+PWRSCALE_ATTR(policy, 0644, pwrscale_policy_show, pwrscale_policy_store);
+
+static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
+					    char *buf)
+{
+	int i, ret = 0;
+
+	for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
+				kgsl_pwrscale_policies[i]->name);
+	}
+
+	ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
+	return ret;
+}
+PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
+
+static struct attribute *pwrscale_attrs[] = {
+	&pwrscale_attr_policy.attr,
+	&pwrscale_attr_avail_policies.attr,
+	NULL
+};
+
+static ssize_t policy_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+	struct kgsl_device *device = pwrscale_to_device(pwrscale);
+	struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+	ssize_t ret;
+
+	if (pattr->show)
+		ret = pattr->show(device, pwrscale, buf);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static ssize_t policy_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+	struct kgsl_device *device = pwrscale_to_device(pwrscale);
+	struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+	ssize_t ret;
+
+	if (pattr->store)
+		ret = pattr->store(device, pwrscale, buf, count);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static void policy_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct kgsl_device *device = to_device(kobj);
+	struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+	ssize_t ret;
+
+	if (pattr->show)
+		ret = pattr->show(device, buf);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct kgsl_device *device = to_device(kobj);
+	struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+	ssize_t ret;
+
+	if (pattr->store)
+		ret = pattr->store(device, buf, count);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static void pwrscale_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops policy_sysfs_ops = {
+	.show = policy_sysfs_show,
+	.store = policy_sysfs_store
+};
+
+static const struct sysfs_ops pwrscale_sysfs_ops = {
+	.show = pwrscale_sysfs_show,
+	.store = pwrscale_sysfs_store
+};
+
+static struct kobj_type ktype_pwrscale_policy = {
+	.sysfs_ops = &policy_sysfs_ops,
+	.default_attrs = NULL,
+	.release = policy_sysfs_release
+};
+
+static struct kobj_type ktype_pwrscale = {
+	.sysfs_ops = &pwrscale_sysfs_ops,
+	.default_attrs = pwrscale_attrs,
+	.release = pwrscale_sysfs_release
+};
+
+void kgsl_pwrscale_sleep(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->sleep)
+		device->pwrscale.policy->sleep(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_sleep);
+
+void kgsl_pwrscale_wake(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->wake)
+		device->pwrscale.policy->wake(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_wake);
+
+void kgsl_pwrscale_busy(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->busy)
+		device->pwrscale.policy->busy(device, &device->pwrscale);
+}
+
+void kgsl_pwrscale_idle(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->idle)
+		device->pwrscale.policy->idle(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_idle);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+				   struct kgsl_pwrscale *pwrscale,
+				   struct attribute_group *attr_group)
+{
+	int ret;
+
+	ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
+		"%s", pwrscale->policy->name);
+
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&pwrscale->kobj, attr_group);
+
+	if (ret) {
+		kobject_del(&pwrscale->kobj);
+		kobject_put(&pwrscale->kobj);
+	}
+
+	return ret;
+}
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+				       struct kgsl_pwrscale *pwrscale,
+				       struct attribute_group *attr_group)
+{
+	sysfs_remove_group(&pwrscale->kobj, attr_group);
+	kobject_del(&pwrscale->kobj);
+	kobject_put(&pwrscale->kobj);
+}
+
+static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy != NULL)
+		device->pwrscale.policy->close(device, &device->pwrscale);
+	device->pwrscale.policy = NULL;
+}
+
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	_kgsl_pwrscale_detach_policy(device);
+	mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+				struct kgsl_pwrscale_policy *policy)
+{
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+
+	if (device->pwrscale.policy == policy)
+		goto done;
+
+	if (device->pwrscale.policy != NULL)
+		_kgsl_pwrscale_detach_policy(device);
+
+	device->pwrscale.policy = policy;
+
+	if (policy) {
+		ret = device->pwrscale.policy->init(device, &device->pwrscale);
+		if (ret)
+			device->pwrscale.policy = NULL;
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
+
+int kgsl_pwrscale_init(struct kgsl_device *device)
+{
+	int ret;
+
+	ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
+		&device->dev->kobj, "pwrscale");
+
+	if (ret)
+		return ret;
+
+	kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_init);
+
+void kgsl_pwrscale_close(struct kgsl_device *device)
+{
+	kobject_put(&device->pwrscale_kobj);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_close);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
new file mode 100644
index 0000000..6904608
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_PWRSCALE_H
+#define __KGSL_PWRSCALE_H
+
+struct kgsl_pwrscale;
+
+struct kgsl_pwrscale_policy  {
+	const char *name;
+	int (*init)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*close)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*idle)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*busy)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*sleep)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*wake)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+};
+
+struct kgsl_pwrscale {
+	struct kgsl_pwrscale_policy *policy;
+	struct kobject kobj;
+	void *priv;
+};
+
+struct kgsl_pwrscale_policy_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kgsl_device *device,
+			struct kgsl_pwrscale *pwrscale, char *buf);
+	ssize_t (*store)(struct kgsl_device *device,
+			 struct kgsl_pwrscale *pwrscale, const char *buf,
+			 size_t count);
+};
+
+#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store)          \
+	struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
+		__ATTR(_name, _mode, _show, _store)
+
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
+
+int kgsl_pwrscale_init(struct kgsl_device *device);
+void kgsl_pwrscale_close(struct kgsl_device *device);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+	struct kgsl_pwrscale_policy *policy);
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
+
+void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_busy(struct kgsl_device *device);
+void kgsl_pwrscale_sleep(struct kgsl_device *device);
+void kgsl_pwrscale_wake(struct kgsl_device *device);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+				   struct kgsl_pwrscale *pwrscale,
+				   struct attribute_group *attr_group);
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+				       struct kgsl_pwrscale *pwrscale,
+				       struct attribute_group *attr_group);
+#endif
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
new file mode 100644
index 0000000..a6fae30
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define TZ_GOVERNOR_PERFORMANCE 0
+#define TZ_GOVERNOR_ONDEMAND    1
+
+struct tz_priv {
+	int governor;
+	unsigned int no_switch_cnt;
+};
+
+#define SWITCH_OFF		200
+#define TZ_UPDATE_ID		0x01404000
+#define TZ_RESET_ID		0x01403000
+
+#ifdef CONFIG_MSM_SECURE_IO
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_entry(u32 cmd, u32 val)
+{
+	register u32 r0 asm("r0") = cmd;
+	register u32 r1 asm("r1") = 0x0;
+	register u32 r2 asm("r2") = val;
+
+	__iowmb();
+	asm(
+		__asmeq("%0", "r0")
+		__asmeq("%1", "r0")
+		__asmeq("%2", "r1")
+		__asmeq("%3", "r2")
+		"smc    #0      @ switch to secure world\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2)
+		);
+	return r0;
+}
+#else
+static int __secure_tz_entry(u32 cmd, u32 val)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SECURE_IO */
+
+static ssize_t tz_governor_show(struct kgsl_device *device,
+				struct kgsl_pwrscale *pwrscale,
+				char *buf)
+{
+	struct tz_priv *priv = pwrscale->priv;
+	int ret;
+
+	if (priv->governor == TZ_GOVERNOR_ONDEMAND)
+		ret = snprintf(buf, 10, "ondemand\n");
+	else
+		ret = snprintf(buf, 13, "performance\n");
+
+	return ret;
+}
+
+static ssize_t tz_governor_store(struct kgsl_device *device,
+				struct kgsl_pwrscale *pwrscale,
+				 const char *buf, size_t count)
+{
+	char str[20];
+	struct tz_priv *priv = pwrscale->priv;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret;
+
+	ret = sscanf(buf, "%20s", str);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&device->mutex);
+
+	if (!strncmp(str, "ondemand", 8))
+		priv->governor = TZ_GOVERNOR_ONDEMAND;
+	else if (!strncmp(str, "performance", 11))
+		priv->governor = TZ_GOVERNOR_PERFORMANCE;
+
+	if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+	mutex_unlock(&device->mutex);
+	return count;
+}
+
+PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
+
+static struct attribute *tz_attrs[] = {
+	&policy_attr_governor.attr,
+	NULL
+};
+
+static struct attribute_group tz_attr_group = {
+	.attrs = tz_attrs,
+};
+
+static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv = pwrscale->priv;
+	if (device->state != KGSL_STATE_NAP &&
+		priv->governor == TZ_GOVERNOR_ONDEMAND)
+		kgsl_pwrctrl_pwrlevel_change(device,
+					     device->pwrctrl.thermal_pwrlevel);
+}
+
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct tz_priv *priv = pwrscale->priv;
+	struct kgsl_power_stats stats;
+	int val;
+
+	/* In "performance" mode the clock speed always stays
+	   the same */
+
+	if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+		return;
+
+	device->ftbl->power_stats(device, &stats);
+	if (stats.total_time == 0)
+		return;
+
+	/* If the GPU has stayed in turbo mode for a while, *
+	 * stop writing out values. */
+	if (pwr->active_pwrlevel)
+		priv->no_switch_cnt = 0;
+	else if (priv->no_switch_cnt > SWITCH_OFF)
+		return;
+	priv->no_switch_cnt++;
+	val = __secure_tz_entry(TZ_UPDATE_ID,
+				stats.total_time - stats.busy_time);
+	if (val)
+		kgsl_pwrctrl_pwrlevel_change(device,
+					     pwr->active_pwrlevel + val);
+}
+
+static void tz_sleep(struct kgsl_device *device,
+	struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv = pwrscale->priv;
+
+	__secure_tz_entry(TZ_RESET_ID, 0);
+	priv->no_switch_cnt = 0;
+}
+
+static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv;
+
+	/* Trustzone is only valid for some SOCs */
+	if (!cpu_is_msm8x60())
+		return -EINVAL;
+
+	priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
+	if (pwrscale->priv == NULL)
+		return -ENOMEM;
+
+	priv->governor = TZ_GOVERNOR_ONDEMAND;
+	kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
+
+	return 0;
+}
+
+static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
+	kfree(pwrscale->priv);
+	pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
+	.name = "trustzone",
+	.init = tz_init,
+	.idle = tz_idle,
+	.sleep = tz_sleep,
+	.wake = tz_wake,
+	.close = tz_close
+};
+EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
new file mode 100644
index 0000000..61e148c
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -0,0 +1,639 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/vmalloc.h>
+#include <linux/memory_alloc.h>
+#include <asm/cacheflush.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_device.h"
+
+static struct kgsl_process_private *
+_get_priv_from_kobj(struct kobject *kobj)
+{
+	struct kgsl_process_private *private;
+	unsigned long name;
+
+	if (!kobj)
+		return NULL;
+
+	if (sscanf(kobj->name, "%ld", &name) != 1)
+		return NULL;
+
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+		if (private->pid == name)
+			return private;
+	}
+
+	return NULL;
+}
+
+/* sharedmem / memory sysfs files */
+
+static ssize_t
+process_show(struct kobject *kobj,
+	     struct kobj_attribute *attr,
+	     char *buf)
+{
+	struct kgsl_process_private *priv;
+	unsigned int val = 0;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	priv = _get_priv_from_kobj(kobj);
+
+	if (priv == NULL) {
+		mutex_unlock(&kgsl_driver.process_mutex);
+		return 0;
+	}
+
+	if (!strncmp(attr->attr.name, "user", 4))
+		val = priv->stats.user;
+	if (!strncmp(attr->attr.name, "user_max", 8))
+		val = priv->stats.user_max;
+	if (!strncmp(attr->attr.name, "mapped", 6))
+		val = priv->stats.mapped;
+	if (!strncmp(attr->attr.name, "mapped_max", 10))
+		val = priv->stats.mapped_max;
+	if (!strncmp(attr->attr.name, "flushes", 7))
+		val = priv->stats.flushes;
+
+	mutex_unlock(&kgsl_driver.process_mutex);
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+#define KGSL_MEMSTAT_ATTR(_name, _show) \
+	static struct kobj_attribute attr_##_name = \
+	__ATTR(_name, 0444, _show, NULL)
+
+KGSL_MEMSTAT_ATTR(user, process_show);
+KGSL_MEMSTAT_ATTR(user_max, process_show);
+KGSL_MEMSTAT_ATTR(mapped, process_show);
+KGSL_MEMSTAT_ATTR(mapped_max, process_show);
+KGSL_MEMSTAT_ATTR(flushes, process_show);
+
+static struct attribute *process_attrs[] = {
+	&attr_user.attr,
+	&attr_user_max.attr,
+	&attr_mapped.attr,
+	&attr_mapped_max.attr,
+	&attr_flushes.attr,
+	NULL
+};
+
+static struct attribute_group process_attr_group = {
+	.attrs = process_attrs,
+};
+
+void
+kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
+{
+	/* Remove the sysfs entry */
+	if (private->kobj) {
+		sysfs_remove_group(private->kobj, &process_attr_group);
+		kobject_put(private->kobj);
+	}
+}
+
+void
+kgsl_process_init_sysfs(struct kgsl_process_private *private)
+{
+	unsigned char name[16];
+
+	/* Add a entry to the sysfs device */
+	snprintf(name, sizeof(name), "%d", private->pid);
+	private->kobj = kobject_create_and_add(name, kgsl_driver.prockobj);
+
+	/* sysfs failure isn't fatal, just annoying */
+	if (private->kobj != NULL) {
+		if (sysfs_create_group(private->kobj, &process_attr_group)) {
+			kobject_put(private->kobj);
+			private->kobj = NULL;
+		}
+	}
+}
+
+static int kgsl_drv_memstat_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	unsigned int val = 0;
+
+	if (!strncmp(attr->attr.name, "vmalloc", 7))
+		val = kgsl_driver.stats.vmalloc;
+	else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
+		val = kgsl_driver.stats.vmalloc_max;
+	else if (!strncmp(attr->attr.name, "coherent", 8))
+		val = kgsl_driver.stats.coherent;
+	else if (!strncmp(attr->attr.name, "coherent_max", 12))
+		val = kgsl_driver.stats.coherent_max;
+	else if (!strncmp(attr->attr.name, "mapped", 6))
+		val = kgsl_driver.stats.mapped;
+	else if (!strncmp(attr->attr.name, "mapped_max", 10))
+		val = kgsl_driver.stats.mapped_max;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int kgsl_drv_histogram_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	int len = 0;
+	int i;
+
+	for (i = 0; i < 16; i++)
+		len += sprintf(buf + len, "%d ",
+			kgsl_driver.stats.histogram[i]);
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
+
+static const struct device_attribute *drv_attr_list[] = {
+	&dev_attr_vmalloc,
+	&dev_attr_vmalloc_max,
+	&dev_attr_coherent,
+	&dev_attr_coherent_max,
+	&dev_attr_mapped,
+	&dev_attr_mapped_max,
+	&dev_attr_histogram,
+	NULL
+};
+
+void
+kgsl_sharedmem_uninit_sysfs(void)
+{
+	kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
+}
+
+int
+kgsl_sharedmem_init_sysfs(void)
+{
+	return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
+		drv_attr_list);
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
+{
+	switch (op) {
+	case KGSL_CACHE_OP_FLUSH:
+		outer_flush_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_CLEAN:
+		outer_clean_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_INV:
+		outer_inv_range(addr, addr + size);
+		break;
+	}
+}
+#endif
+
+static unsigned long kgsl_vmalloc_physaddr(struct kgsl_memdesc *memdesc,
+					   unsigned int offset)
+{
+	unsigned int addr;
+
+	if (offset > memdesc->size)
+		return 0;
+
+	addr = vmalloc_to_pfn(memdesc->hostptr + offset);
+	return addr << PAGE_SHIFT;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_vmalloc_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	void *vaddr = memdesc->hostptr;
+	for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
+		unsigned long paddr = page_to_phys(vmalloc_to_page(vaddr));
+		_outer_cache_range_op(op, paddr, PAGE_SIZE);
+	}
+}
+#endif
+
+static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	unsigned long offset, pg;
+	struct page *page;
+
+	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+	pg = (unsigned long) memdesc->hostptr + offset;
+
+	page = vmalloc_to_page((void *) pg);
+	if (page == NULL)
+		return VM_FAULT_SIGBUS;
+
+	get_page(page);
+
+	vmf->page = page;
+	return 0;
+}
+
+static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
+{
+	return VM_RESERVED | VM_DONTEXPAND;
+}
+
+static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
+{
+	kgsl_driver.stats.vmalloc -= memdesc->size;
+	vfree(memdesc->hostptr);
+}
+
+static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
+{
+	return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+}
+
+static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	unsigned long offset, pfn;
+	int ret;
+
+	offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
+	ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
+
+	if (ret == -ENOMEM || ret == -EAGAIN)
+		return VM_FAULT_OOM;
+	else if (ret == -EFAULT)
+		return VM_FAULT_SIGBUS;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
+
+{
+	kgsl_driver.stats.coherent -= memdesc->size;
+	if (memdesc->hostptr)
+		iounmap(memdesc->hostptr);
+
+	free_contiguous_memory_by_paddr(memdesc->physaddr);
+}
+
+static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
+{
+	kgsl_driver.stats.coherent -= memdesc->size;
+	dma_free_coherent(NULL, memdesc->size,
+			  memdesc->hostptr, memdesc->physaddr);
+}
+
+static unsigned long kgsl_contiguous_physaddr(struct kgsl_memdesc *memdesc,
+					unsigned int offset)
+{
+	if (offset > memdesc->size)
+		return 0;
+
+	return memdesc->physaddr + offset;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_contiguous_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	_outer_cache_range_op(op, memdesc->physaddr, memdesc->size);
+}
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_userptr_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	void *vaddr = memdesc->hostptr;
+	for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
+		unsigned long paddr = kgsl_virtaddr_to_physaddr(vaddr);
+		if (paddr)
+			_outer_cache_range_op(op, paddr, PAGE_SIZE);
+	}
+}
+#endif
+
+static unsigned long kgsl_userptr_physaddr(struct kgsl_memdesc *memdesc,
+					   unsigned int offset)
+{
+	return kgsl_virtaddr_to_physaddr(memdesc->hostptr + offset);
+}
+
+/* Global - also used by kgsl_drm.c */
+struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
+	.physaddr = kgsl_vmalloc_physaddr,
+	.free = kgsl_vmalloc_free,
+	.vmflags = kgsl_vmalloc_vmflags,
+	.vmfault = kgsl_vmalloc_vmfault,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_vmalloc_outer_cache,
+#endif
+};
+EXPORT_SYMBOL(kgsl_vmalloc_ops);
+
+static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+	.free = kgsl_ebimem_free,
+	.vmflags = kgsl_contiguous_vmflags,
+	.vmfault = kgsl_contiguous_vmfault,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache,
+#endif
+};
+
+static struct kgsl_memdesc_ops kgsl_coherent_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+	.free = kgsl_coherent_free,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache,
+#endif
+};
+
+/* Global - also used by kgsl.c and kgsl_drm.c */
+struct kgsl_memdesc_ops kgsl_contiguous_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache
+#endif
+};
+EXPORT_SYMBOL(kgsl_contiguous_ops);
+
+/* Global - also used by kgsl.c */
+struct kgsl_memdesc_ops kgsl_userptr_ops = {
+	.physaddr = kgsl_userptr_physaddr,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_userptr_outer_cache,
+#endif
+};
+EXPORT_SYMBOL(kgsl_userptr_ops);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
+{
+	void *addr = memdesc->hostptr;
+	int size = memdesc->size;
+
+	switch (op) {
+	case KGSL_CACHE_OP_FLUSH:
+		dmac_flush_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_CLEAN:
+		dmac_clean_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_INV:
+		dmac_inv_range(addr, addr + size);
+		break;
+	}
+
+	if (memdesc->ops->outer_cache)
+		memdesc->ops->outer_cache(memdesc, op);
+}
+EXPORT_SYMBOL(kgsl_cache_range_op);
+
+static int
+_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			void *ptr, size_t size, unsigned int protflags)
+{
+	int result;
+
+	memdesc->size = size;
+	memdesc->pagetable = pagetable;
+	memdesc->priv = KGSL_MEMFLAGS_CACHED;
+	memdesc->ops = &kgsl_vmalloc_ops;
+	memdesc->hostptr = (void *) ptr;
+
+	kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
+
+	result = kgsl_mmu_map(pagetable, memdesc, protflags);
+
+	if (result) {
+		kgsl_sharedmem_free(memdesc);
+	} else {
+		int order;
+
+		KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
+			kgsl_driver.stats.vmalloc_max);
+
+		order = get_order(size);
+
+		if (order < 16)
+			kgsl_driver.stats.histogram[order]++;
+	}
+
+	return result;
+}
+
+int
+kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+		       struct kgsl_pagetable *pagetable, size_t size)
+{
+	void *ptr;
+
+	BUG_ON(size == 0);
+
+	size = ALIGN(size, PAGE_SIZE * 2);
+	ptr = vmalloc(size);
+
+	if (ptr  == NULL) {
+		KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
+		return -ENOMEM;
+	}
+
+	return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
+		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
+
+int
+kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
+			    struct kgsl_pagetable *pagetable,
+			    size_t size, int flags)
+{
+	void *ptr;
+	unsigned int protflags;
+
+	BUG_ON(size == 0);
+	ptr = vmalloc_user(size);
+
+	if (ptr == NULL) {
+		KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
+			      size, kgsl_driver.stats.vmalloc);
+		return -ENOMEM;
+	}
+
+	protflags = GSL_PT_PAGE_RV;
+	if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
+		protflags |= GSL_PT_PAGE_WV;
+
+	return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
+		protflags);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
+
+int
+kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
+{
+	size = ALIGN(size, PAGE_SIZE);
+
+	memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
+					      GFP_KERNEL);
+	if (memdesc->hostptr == NULL) {
+		KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+		return -ENOMEM;
+	}
+
+	memdesc->size = size;
+	memdesc->ops = &kgsl_coherent_ops;
+
+	/* Record statistics */
+
+	KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+		       kgsl_driver.stats.coherent_max);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
+{
+	if (memdesc == NULL || memdesc->size == 0)
+		return;
+
+	if (memdesc->gpuaddr)
+		kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+
+	if (memdesc->ops->free)
+		memdesc->ops->free(memdesc);
+
+	memset(memdesc, 0, sizeof(*memdesc));
+}
+EXPORT_SYMBOL(kgsl_sharedmem_free);
+
+static int
+_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable, size_t size)
+{
+	int result;
+
+	memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
+
+	if (memdesc->physaddr == 0) {
+		KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
+			size);
+		return -ENOMEM;
+	}
+
+	memdesc->size = size;
+	memdesc->pagetable = pagetable;
+	memdesc->ops = &kgsl_ebimem_ops;
+
+	result = kgsl_mmu_map(pagetable, memdesc,
+		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		kgsl_sharedmem_free(memdesc);
+
+	KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+		kgsl_driver.stats.coherent_max);
+
+	return result;
+}
+
+int
+kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			size_t size, int flags)
+{
+	size = ALIGN(size, PAGE_SIZE);
+	return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
+
+int
+kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable, size_t size)
+{
+	int result;
+	size = ALIGN(size, 8192);
+	result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+	if (result)
+		return result;
+
+	memdesc->hostptr = ioremap(memdesc->physaddr, size);
+
+	if (memdesc->hostptr == NULL) {
+		KGSL_CORE_ERR("ioremap failed\n");
+		kgsl_sharedmem_free(memdesc);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
+
+int
+kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+			uint32_t *dst,
+			unsigned int offsetbytes)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
+	WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
+
+	if (offsetbytes + sizeof(unsigned int) > memdesc->size)
+		return -ERANGE;
+
+	*dst = readl_relaxed(memdesc->hostptr + offsetbytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_readl);
+
+int
+kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes,
+			uint32_t src)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+	BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
+
+	kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes,
+		src, sizeof(uint));
+	writel_relaxed(src, memdesc->hostptr + offsetbytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_writel);
+
+int
+kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
+			unsigned int value, unsigned int sizebytes)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+	BUG_ON(offsetbytes + sizebytes > memdesc->size);
+
+	kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes, value,
+		sizebytes);
+	memset(memdesc->hostptr + offsetbytes, value, sizebytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_set);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
new file mode 100644
index 0000000..9e57e78
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_SHAREDMEM_H
+#define __KGSL_SHAREDMEM_H
+
+#include <linux/dma-mapping.h>
+
+struct kgsl_device;
+struct kgsl_process_private;
+
+#define KGSL_CACHE_OP_INV       0x01
+#define KGSL_CACHE_OP_FLUSH     0x02
+#define KGSL_CACHE_OP_CLEAN     0x03
+
+/** Set if the memdesc describes cached memory */
+#define KGSL_MEMFLAGS_CACHED    0x00000001
+
+struct kgsl_memdesc_ops {
+	unsigned long (*physaddr)(struct kgsl_memdesc *, unsigned int);
+	void (*outer_cache)(struct kgsl_memdesc *, int);
+	int (*vmflags)(struct kgsl_memdesc *);
+	int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
+		       struct vm_fault *);
+	void (*free)(struct kgsl_memdesc *memdesc);
+};
+
+extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
+extern struct kgsl_memdesc_ops kgsl_contiguous_ops;
+extern struct kgsl_memdesc_ops kgsl_userptr_ops;
+
+int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+			   struct kgsl_pagetable *pagetable, size_t size);
+
+int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
+				struct kgsl_pagetable *pagetable,
+				size_t size, int flags);
+
+int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
+
+int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+			     struct kgsl_pagetable *pagetable,
+			     size_t size, int flags);
+
+int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			size_t size);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
+
+int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+			uint32_t *dst,
+			unsigned int offsetbytes);
+
+int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes,
+			uint32_t src);
+
+int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes, unsigned int value,
+			unsigned int sizebytes);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
+
+void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
+
+int kgsl_sharedmem_init_sysfs(void);
+void kgsl_sharedmem_uninit_sysfs(void);
+
+static inline int
+kgsl_allocate(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable, size_t size)
+{
+#ifdef CONFIG_MSM_KGSL_MMU
+	return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
+#else
+	return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+#endif
+}
+
+static inline int
+kgsl_allocate_user(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable,
+		size_t size, unsigned int flags)
+{
+#ifdef CONFIG_MSM_KGSL_MMU
+	return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
+#else
+	return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, flags);
+#endif
+}
+
+static inline int
+kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
+{
+	int ret  = kgsl_sharedmem_alloc_coherent(memdesc, size);
+#ifndef CONFIG_MSM_KGSL_MMU
+	if (!ret)
+		memdesc->gpuaddr = memdesc->physaddr;
+#endif
+	return ret;
+}
+
+#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
new file mode 100644
index 0000000..2d3f5bd
--- /dev/null
+++ b/drivers/gpu/msm/z180.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/uaccess.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "z180.h"
+#include "z180_reg.h"
+
+#define DRIVER_VERSION_MAJOR   3
+#define DRIVER_VERSION_MINOR   1
+
+#define Z180_DEVICE(device) \
+		KGSL_CONTAINER_OF(device, struct z180_device, dev)
+
+#define GSL_VGC_INT_MASK \
+	 (REG_VGC_IRQSTATUS__MH_MASK | \
+	  REG_VGC_IRQSTATUS__G2D_MASK | \
+	  REG_VGC_IRQSTATUS__FIFO_MASK)
+
+#define VGV3_NEXTCMD_JUMP        0x01
+
+#define VGV3_NEXTCMD_NEXTCMD_FSHIFT 12
+#define VGV3_NEXTCMD_NEXTCMD_FMASK 0x7
+
+#define VGV3_CONTROL_MARKADD_FSHIFT 0
+#define VGV3_CONTROL_MARKADD_FMASK 0xfff
+
+#define Z180_PACKET_SIZE 15
+#define Z180_MARKER_SIZE 10
+#define Z180_CALL_CMD     0x1000
+#define Z180_MARKER_CMD   0x8000
+#define Z180_STREAM_END_CMD 0x9000
+#define Z180_STREAM_PACKET 0x7C000176
+#define Z180_STREAM_PACKET_CALL 0x7C000275
+#define Z180_PACKET_COUNT 8
+#define Z180_RB_SIZE (Z180_PACKET_SIZE*Z180_PACKET_COUNT \
+			  *sizeof(uint32_t))
+
+#define NUMTEXUNITS             4
+#define TEXUNITREGCOUNT         25
+#define VG_REGCOUNT             0x39
+
+#define PACKETSIZE_BEGIN        3
+#define PACKETSIZE_G2DCOLOR     2
+#define PACKETSIZE_TEXUNIT      (TEXUNITREGCOUNT * 2)
+#define PACKETSIZE_REG          (VG_REGCOUNT * 2)
+#define PACKETSIZE_STATE        (PACKETSIZE_TEXUNIT * NUMTEXUNITS + \
+				 PACKETSIZE_REG + PACKETSIZE_BEGIN + \
+				 PACKETSIZE_G2DCOLOR)
+#define PACKETSIZE_STATESTREAM  (ALIGN((PACKETSIZE_STATE * \
+				 sizeof(unsigned int)), 32) / \
+				 sizeof(unsigned int))
+
+#define Z180_INVALID_CONTEXT UINT_MAX
+
+/* z180 MH arbiter config*/
+#define Z180_CFG_MHARB \
+	(0x10 \
+		| (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+		| (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define Z180_TIMESTAMP_EPSILON 20000
+#define Z180_IDLE_COUNT_MAX 1000000
+
+enum z180_cmdwindow_type {
+	Z180_CMDWINDOW_2D = 0x00000000,
+	Z180_CMDWINDOW_MMU = 0x00000002,
+};
+
+#define Z180_CMDWINDOW_TARGET_MASK		0x000000FF
+#define Z180_CMDWINDOW_ADDR_MASK		0x00FFFF00
+#define Z180_CMDWINDOW_TARGET_SHIFT		0
+#define Z180_CMDWINDOW_ADDR_SHIFT		8
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram);
+static int z180_stop(struct kgsl_device *device);
+static int z180_wait(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs);
+static void z180_regread(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value);
+static void z180_regwrite(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int value);
+static void z180_cmdwindow_write(struct kgsl_device *device,
+				unsigned int addr,
+				unsigned int data);
+
+#define Z180_MMU_CONFIG					     \
+	(0x01							     \
+	| (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+	| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+	| (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+static const struct kgsl_functable z180_functable;
+
+static struct z180_device device_2d0 = {
+	.dev = {
+		.name = DEVICE_2D0_NAME,
+		.id = KGSL_DEVICE_2D0,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = Z180_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx2d0",
+			.irq_name = KGSL_2D0_IRQ,
+		},
+		.mutex = __MUTEX_INITIALIZER(device_2d0.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_2D0_REG_MEMORY,
+		.ftbl = &z180_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+};
+
+static struct z180_device device_2d1 = {
+	.dev = {
+		.name = DEVICE_2D1_NAME,
+		.id = KGSL_DEVICE_2D1,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = Z180_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx2d1",
+			.irq_name = KGSL_2D1_IRQ,
+		},
+		.mutex = __MUTEX_INITIALIZER(device_2d1.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_2D1_REG_MEMORY,
+		.ftbl = &z180_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+};
+
+static irqreturn_t z180_isr(int irq, void *data)
+{
+	irqreturn_t result = IRQ_NONE;
+	unsigned int status;
+	struct kgsl_device *device = (struct kgsl_device *) data;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status);
+
+	if (status & GSL_VGC_INT_MASK) {
+		z180_regwrite(device,
+			ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK);
+
+		result = IRQ_HANDLED;
+
+		if (status & REG_VGC_IRQSTATUS__FIFO_MASK)
+			KGSL_DRV_ERR(device, "z180 fifo interrupt\n");
+		if (status & REG_VGC_IRQSTATUS__MH_MASK)
+			kgsl_mh_intrcallback(device);
+		if (status & REG_VGC_IRQSTATUS__G2D_MASK) {
+			int count;
+
+			z180_regread(device,
+					 ADDR_VGC_IRQ_ACTIVE_CNT >> 2,
+					 &count);
+
+			count >>= 8;
+			count &= 255;
+			z180_dev->timestamp += count;
+
+			wake_up_interruptible(&device->wait_queue);
+
+			atomic_notifier_call_chain(
+				&(device->ts_notifier_list),
+				device->id, NULL);
+		}
+	}
+
+	if ((device->pwrctrl.nap_allowed == true) &&
+		(device->requested_state == KGSL_STATE_NONE)) {
+		device->requested_state = KGSL_STATE_NAP;
+		queue_work(device->work_queue, &device->idle_check_ws);
+	}
+	mod_timer(&device->idle_timer,
+			jiffies + device->pwrctrl.interval_timeout);
+
+	return result;
+}
+
+static int z180_cleanup_pt(struct kgsl_device *device,
+			       struct kgsl_pagetable *pagetable)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+	kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc);
+
+	return 0;
+}
+
+static int z180_setup_pt(struct kgsl_device *device,
+			     struct kgsl_pagetable *pagetable)
+{
+	int result = 0;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map_global(pagetable, &device->memstore,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto error_unmap_dummy;
+
+	result = kgsl_mmu_map_global(pagetable,
+				     &z180_dev->ringbuffer.cmdbufdesc,
+				     GSL_PT_PAGE_RV);
+	if (result)
+		goto error_unmap_memstore;
+	return result;
+
+error_unmap_dummy:
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+error_unmap_memstore:
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+error:
+	return result;
+}
+
+static inline unsigned int rb_offset(unsigned int index)
+{
+	return index*sizeof(unsigned int)*(Z180_PACKET_SIZE);
+}
+
+static void addmarker(struct z180_ringbuffer *rb, unsigned int index)
+{
+	char *ptr = (char *)(rb->cmdbufdesc.hostptr);
+	unsigned int *p = (unsigned int *)(ptr + rb_offset(index));
+
+	*p++ = Z180_STREAM_PACKET;
+	*p++ = (Z180_MARKER_CMD | 5);
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = Z180_STREAM_PACKET;
+	*p++ = 5;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void addcmd(struct z180_ringbuffer *rb, unsigned int index,
+			unsigned int cmd, unsigned int nextcnt)
+{
+	char * ptr = (char *)(rb->cmdbufdesc.hostptr);
+	unsigned int *p = (unsigned int *)(ptr + (rb_offset(index)
+			   + (Z180_MARKER_SIZE * sizeof(unsigned int))));
+
+	*p++ = Z180_STREAM_PACKET_CALL;
+	*p++ = cmd;
+	*p++ = Z180_CALL_CMD | nextcnt;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void z180_cmdstream_start(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
+
+	z180_dev->timestamp = 0;
+	z180_dev->current_timestamp = 0;
+
+	addmarker(&z180_dev->ringbuffer, 0);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR,
+			z180_dev->ringbuffer.cmdbufdesc.gpuaddr);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR,
+			device->memstore.gpuaddr);
+
+	cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK)
+			<< VGV3_CONTROL_MARKADD_FSHIFT);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+}
+
+static int room_in_rb(struct z180_device *device)
+{
+	int ts_diff;
+
+	ts_diff = device->current_timestamp - device->timestamp;
+
+	return ts_diff < Z180_PACKET_COUNT;
+}
+
+static int z180_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	int status = 0;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	if (z180_dev->current_timestamp > z180_dev->timestamp)
+		status = z180_wait(device, z180_dev->current_timestamp,
+					timeout);
+
+	if (status)
+		KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n");
+
+	return status;
+}
+
+static void z180_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	kgsl_default_setstate(device, flags);
+}
+
+int
+z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
+			struct kgsl_context *context,
+			struct kgsl_ibdesc *ibdesc,
+			unsigned int numibs,
+			uint32_t *timestamp,
+			unsigned int ctrl)
+{
+	unsigned int result = 0;
+	unsigned int ofs        = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
+	unsigned int cnt        = 5;
+	unsigned int nextaddr   = 0;
+	unsigned int index	= 0;
+	unsigned int nextindex;
+	unsigned int nextcnt    = Z180_STREAM_END_CMD | 5;
+	struct kgsl_memdesc tmp = {0};
+	unsigned int cmd;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int sizedwords;
+
+	if (device->state & KGSL_STATE_HUNG) {
+		return -EINVAL;
+		goto error;
+	}
+	if (numibs != 1) {
+		KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs);
+		result = -EINVAL;
+		goto error;
+	}
+	cmd = ibdesc[0].gpuaddr;
+	sizedwords = ibdesc[0].sizedwords;
+
+	tmp.hostptr = (void *)*timestamp;
+
+	KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n",
+		context->id, cmd, sizedwords);
+	/* context switch */
+	if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
+	    (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+		KGSL_CMD_INFO(device, "context switch %d -> %d\n",
+			context->id, z180_dev->ringbuffer.prevctx);
+		kgsl_mmu_setstate(device, pagetable);
+		cnt = PACKETSIZE_STATESTREAM;
+		ofs = 0;
+	}
+	z180_setstate(device, kgsl_pt_get_flags(device->mmu.hwpagetable,
+						    device->id));
+
+	result = wait_event_interruptible_timeout(device->wait_queue,
+				  room_in_rb(z180_dev),
+				  msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT));
+	if (result < 0) {
+		KGSL_CMD_ERR(device, "wait_event_interruptible_timeout "
+			"failed: %d\n", result);
+		goto error;
+	}
+	result = 0;
+
+	index = z180_dev->current_timestamp % Z180_PACKET_COUNT;
+	z180_dev->current_timestamp++;
+	nextindex = z180_dev->current_timestamp % Z180_PACKET_COUNT;
+	*timestamp = z180_dev->current_timestamp;
+
+	z180_dev->ringbuffer.prevctx = context->id;
+
+	addcmd(&z180_dev->ringbuffer, index, cmd + ofs, cnt);
+
+	/* Make sure the next ringbuffer entry has a marker */
+	addmarker(&z180_dev->ringbuffer, nextindex);
+
+	nextaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr
+		+ rb_offset(nextindex);
+
+	tmp.hostptr = (void *)(tmp.hostptr +
+			(sizedwords * sizeof(unsigned int)));
+	tmp.size = 12;
+
+	kgsl_sharedmem_writel(&tmp, 4, nextaddr);
+	kgsl_sharedmem_writel(&tmp, 8, nextcnt);
+
+	/* sync memory before activating the hardware for the new command*/
+	mb();
+
+	cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK)
+		<< VGV3_CONTROL_MARKADD_FSHIFT);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+error:
+	return result;
+}
+
+static int z180_ringbuffer_init(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+	z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+	return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
+		Z180_RB_SIZE);
+}
+
+static void z180_ringbuffer_close(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	kgsl_sharedmem_free(&z180_dev->ringbuffer.cmdbufdesc);
+	memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+}
+
+static int __devinit z180_probe(struct platform_device *pdev)
+{
+	int status = -EINVAL;
+	struct kgsl_device *device = NULL;
+	struct z180_device *z180_dev;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	device->parentdev = &pdev->dev;
+
+	z180_dev = Z180_DEVICE(device);
+	spin_lock_init(&z180_dev->cmdwin_lock);
+
+	status = z180_ringbuffer_init(device);
+	if (status != 0)
+		goto error;
+
+	status = kgsl_device_platform_probe(device, z180_isr);
+	if (status)
+		goto error_close_ringbuffer;
+
+	return status;
+
+error_close_ringbuffer:
+	z180_ringbuffer_close(device);
+error:
+	device->parentdev = NULL;
+	return status;
+}
+
+static int __devexit z180_remove(struct platform_device *pdev)
+{
+	struct kgsl_device *device = NULL;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+	kgsl_device_platform_remove(device);
+
+	z180_ringbuffer_close(device);
+
+	return 0;
+}
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram)
+{
+	int status = 0;
+
+	device->state = KGSL_STATE_INIT;
+	device->requested_state = KGSL_STATE_NONE;
+	KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
+
+	kgsl_pwrctrl_enable(device);
+
+	/* Set up MH arbiter.  MH offsets are considered to be dword
+	 * based, therefore no down shift. */
+	z180_regwrite(device, ADDR_MH_ARBITER_CONFIG, Z180_CFG_MHARB);
+
+	z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG1, 0x00030F27);
+	z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG2, 0x004B274F);
+
+	/* Set interrupts to 0 to ensure a good state */
+	z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0);
+
+	status = kgsl_mmu_start(device);
+	if (status)
+		goto error_clk_off;
+
+	z180_cmdstream_start(device);
+
+	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_ON);
+	return 0;
+
+error_clk_off:
+	z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+	kgsl_pwrctrl_disable(device);
+	return status;
+}
+
+static int z180_stop(struct kgsl_device *device)
+{
+	z180_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	del_timer(&device->idle_timer);
+
+	kgsl_mmu_stop(device);
+
+	/* Disable the clocks before the power rail. */
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	kgsl_pwrctrl_disable(device);
+
+	return 0;
+}
+
+static int z180_getproperty(struct kgsl_device *device,
+				enum kgsl_property_type type,
+				void *value,
+				unsigned int sizebytes)
+{
+	int status = -EINVAL;
+
+	switch (type) {
+	case KGSL_PROP_DEVICE_INFO:
+	{
+		struct kgsl_devinfo devinfo;
+
+		if (sizebytes != sizeof(devinfo)) {
+			status = -EINVAL;
+			break;
+		}
+
+		memset(&devinfo, 0, sizeof(devinfo));
+		devinfo.device_id = device->id+1;
+		devinfo.chip_id = 0;
+		devinfo.mmu_enabled = kgsl_mmu_enabled();
+
+		if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+				0) {
+			status = -EFAULT;
+			break;
+		}
+		status = 0;
+	}
+	break;
+	case KGSL_PROP_MMU_ENABLE:
+		{
+#ifdef CONFIG_MSM_KGSL_MMU
+			int mmuProp = 1;
+#else
+			int mmuProp = 0;
+#endif
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+
+	default:
+		KGSL_DRV_ERR(device, "invalid property: %d\n", type);
+		status = -EINVAL;
+	}
+	return status;
+}
+
+static unsigned int z180_isidle(struct kgsl_device *device)
+{
+	int status = false;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	int timestamp = z180_dev->timestamp;
+
+	if (timestamp == z180_dev->current_timestamp)
+		status = true;
+
+	return status;
+}
+
+static int z180_suspend_context(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+
+	return 0;
+}
+
+/* Not all Z180 registers are directly accessible.
+ * The _z180_(read|write)_simple functions below handle the ones that are.
+ */
+static void _z180_regread_simple(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords * sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+			+ (offsetwords << 2));
+
+	/*ensure this read finishes before the next one.
+	 * i.e. act like normal readl() */
+	*value = __raw_readl(reg);
+	rmb();
+
+}
+
+static void _z180_regwrite_simple(struct kgsl_device *device,
+				 unsigned int offsetwords,
+				 unsigned int value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+			+ (offsetwords << 2));
+	kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+	/*ensure previous writes post before this one,
+	 * i.e. act like normal writel() */
+	wmb();
+	__raw_writel(value, reg);
+}
+
+
+/* The MH registers must be accessed through via a 2 step write, (read|write)
+ * process. These registers may be accessed from interrupt context during
+ * the handling of MH or MMU error interrupts. Therefore a spin lock is used
+ * to ensure that the 2 step sequence is not interrupted.
+ */
+static void _z180_regread_mmu(struct kgsl_device *device,
+			     unsigned int offsetwords,
+			     unsigned int *value)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+	_z180_regwrite_simple(device, (ADDR_VGC_MH_READ_ADDR >> 2),
+				offsetwords);
+	_z180_regread_simple(device, (ADDR_VGC_MH_DATA_ADDR >> 2), value);
+	spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+
+static void _z180_regwrite_mmu(struct kgsl_device *device,
+			      unsigned int offsetwords,
+			      unsigned int value)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int cmdwinaddr;
+	unsigned long flags;
+
+	cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
+			Z180_CMDWINDOW_TARGET_MASK);
+	cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) &
+			Z180_CMDWINDOW_ADDR_MASK);
+
+	spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+	_z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2,
+			     cmdwinaddr);
+	_z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, value);
+	spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+/* the rest of the code doesn't want to think about if it is writing mmu
+ * registers or normal registers so handle it here
+ */
+static void z180_regread(struct kgsl_device *device,
+			unsigned int offsetwords,
+			unsigned int *value)
+{
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
+	     offsetwords <= ADDR_MH_AXI_HALT_CONTROL) ||
+	    (offsetwords >= MH_MMU_CONFIG &&
+	     offsetwords <= MH_MMU_MPU_END)) {
+		_z180_regread_mmu(device, offsetwords, value);
+	} else {
+		_z180_regread_simple(device, offsetwords, value);
+	}
+}
+
+static void z180_regwrite(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int value)
+{
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
+	     offsetwords <= ADDR_MH_CLNT_INTF_CTRL_CONFIG2) ||
+	    (offsetwords >= MH_MMU_CONFIG &&
+	     offsetwords <= MH_MMU_MPU_END)) {
+		_z180_regwrite_mmu(device, offsetwords, value);
+	} else {
+		_z180_regwrite_simple(device, offsetwords, value);
+	}
+}
+
+static void z180_cmdwindow_write(struct kgsl_device *device,
+		unsigned int addr, unsigned int data)
+{
+	unsigned int cmdwinaddr;
+
+	cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) &
+			Z180_CMDWINDOW_TARGET_MASK);
+	cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) &
+			Z180_CMDWINDOW_ADDR_MASK);
+
+	z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr);
+	z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data);
+}
+
+static unsigned int z180_readtimestamp(struct kgsl_device *device,
+			     enum kgsl_timestamp_type type)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	/* get current EOP timestamp */
+	return z180_dev->timestamp;
+}
+
+static int z180_waittimestamp(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	int status = -EINVAL;
+	mutex_unlock(&device->mutex);
+	status = z180_wait(device, timestamp, msecs);
+	mutex_lock(&device->mutex);
+
+	return status;
+}
+
+static int z180_wait(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	int status = -EINVAL;
+	long timeout = 0;
+
+	timeout = wait_io_event_interruptible_timeout(
+			device->wait_queue,
+			kgsl_check_timestamp(device, timestamp),
+			msecs_to_jiffies(msecs));
+
+	if (timeout > 0)
+		status = 0;
+	else if (timeout == 0) {
+		status = -ETIMEDOUT;
+		device->state = KGSL_STATE_HUNG;
+		KGSL_PWR_WARN(device, "state -> HUNG, device %d\n", device->id);
+	} else
+		status = timeout;
+
+	return status;
+}
+
+static void
+z180_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	if (z180_dev->ringbuffer.prevctx == context->id) {
+		z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+		device->mmu.hwpagetable = device->mmu.defaultpagetable;
+		kgsl_setstate(device, KGSL_MMUFLAGS_PTUPDATE);
+	}
+}
+
+static void z180_power_stats(struct kgsl_device *device,
+			    struct kgsl_power_stats *stats)
+{
+	stats->total_time = 0;
+	stats->busy_time = 0;
+}
+
+static void z180_irqctrl(struct kgsl_device *device, int state)
+{
+	/* Control interrupts for Z180 and the Z180 MMU */
+
+	if (state) {
+		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
+		z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+	} else {
+		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+		z180_regwrite(device, MH_INTERRUPT_MASK, 0);
+	}
+}
+
+static const struct kgsl_functable z180_functable = {
+	/* Mandatory functions */
+	.regread = z180_regread,
+	.regwrite = z180_regwrite,
+	.idle = z180_idle,
+	.isidle = z180_isidle,
+	.suspend_context = z180_suspend_context,
+	.start = z180_start,
+	.stop = z180_stop,
+	.getproperty = z180_getproperty,
+	.waittimestamp = z180_waittimestamp,
+	.readtimestamp = z180_readtimestamp,
+	.issueibcmds = z180_cmdstream_issueibcmds,
+	.setup_pt = z180_setup_pt,
+	.cleanup_pt = z180_cleanup_pt,
+	.power_stats = z180_power_stats,
+	.irqctrl = z180_irqctrl,
+	/* Optional functions */
+	.setstate = z180_setstate,
+	.drawctxt_create = NULL,
+	.drawctxt_destroy = z180_drawctxt_destroy,
+	.ioctl = NULL,
+};
+
+static struct platform_device_id z180_id_table[] = {
+	{ DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, },
+	{ DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, z180_id_table);
+
+static struct platform_driver z180_platform_driver = {
+	.probe = z180_probe,
+	.remove = __devexit_p(z180_remove),
+	.suspend = kgsl_suspend_driver,
+	.resume = kgsl_resume_driver,
+	.id_table = z180_id_table,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DEVICE_2D_NAME,
+		.pm = &kgsl_pm_ops,
+	}
+};
+
+static int __init kgsl_2d_init(void)
+{
+	return platform_driver_register(&z180_platform_driver);
+}
+
+static void __exit kgsl_2d_exit(void)
+{
+	platform_driver_unregister(&z180_platform_driver);
+}
+
+module_init(kgsl_2d_init);
+module_exit(kgsl_2d_exit);
+
+MODULE_DESCRIPTION("2D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_2d");
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
new file mode 100644
index 0000000..28b1cc6
--- /dev/null
+++ b/drivers/gpu/msm/z180.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z180_H
+#define __Z180_H
+
+#include "kgsl_device.h"
+
+#define DEVICE_2D_NAME "kgsl-2d"
+#define DEVICE_2D0_NAME "kgsl-2d0"
+#define DEVICE_2D1_NAME "kgsl-2d1"
+
+struct z180_ringbuffer {
+	unsigned int prevctx;
+	struct kgsl_memdesc      cmdbufdesc;
+};
+
+struct z180_device {
+	struct kgsl_device dev;    /* Must be first field in this struct */
+	int current_timestamp;
+	int timestamp;
+	struct z180_ringbuffer ringbuffer;
+	spinlock_t cmdwin_lock;
+};
+
+#endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_reg.h b/drivers/gpu/msm/z180_reg.h
new file mode 100644
index 0000000..a3b0412
--- /dev/null
+++ b/drivers/gpu/msm/z180_reg.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z80_REG_H
+#define __Z80_REG_H
+
+#define REG_VGC_IRQSTATUS__MH_MASK                         0x00000001L
+#define REG_VGC_IRQSTATUS__G2D_MASK                        0x00000002L
+#define REG_VGC_IRQSTATUS__FIFO_MASK                       0x00000004L
+
+#define	MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT    0x00000006
+#define	MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT            0x00000007
+#define	MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT       0x00000008
+#define	MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT           0x00000009
+#define	MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT                0x0000000a
+#define	MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT        0x0000000d
+#define	MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT       0x0000000e
+#define	MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT   0x0000000f
+#define	MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT          0x00000010
+#define	MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT           0x00000016
+#define	MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT          0x00000017
+#define	MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT           0x00000018
+#define	MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT           0x00000019
+#define	MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT           0x0000001a
+
+#define ADDR_MH_ARBITER_CONFIG           0x0A40
+#define ADDR_MH_AXI_HALT_CONTROL         0x0A50
+#define ADDR_MH_CLNT_INTF_CTRL_CONFIG1   0x0A54
+#define ADDR_MH_CLNT_INTF_CTRL_CONFIG2   0x0A55
+#define ADDR_VGC_MH_READ_ADDR            0x0510
+#define ADDR_VGC_MH_DATA_ADDR            0x0518
+#define ADDR_VGC_COMMANDSTREAM           0x0000
+#define ADDR_VGC_IRQENABLE               0x0438
+#define ADDR_VGC_IRQSTATUS               0x0418
+#define ADDR_VGC_IRQ_ACTIVE_CNT          0x04E0
+#define ADDR_VGC_MMUCOMMANDSTREAM        0x03FC
+#define ADDR_VGV3_CONTROL                0x0070
+#define ADDR_VGV3_LAST                   0x007F
+#define ADDR_VGV3_MODE                   0x0071
+#define ADDR_VGV3_NEXTADDR               0x0075
+#define ADDR_VGV3_NEXTCMD                0x0076
+#define ADDR_VGV3_WRITEADDR              0x0072
+
+#endif /* __Z180_REG_H */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5f888f7..ef3cb2e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -28,6 +28,14 @@
 	tristate
 	default n
 
+config SENSORS_WPCE775X
+	bool "Winbond WPCE775X"
+	depends on I2C
+	default n
+	help
+	  This driver provides support for the Winbond WPCE775XX Embedded
+	  Controller, which provides lcd backlight, LEDs, and Battery control.
+
 config HWMON_DEBUG_CHIP
 	bool "Hardware Monitoring Chip debugging messages"
 	default n
@@ -767,6 +775,19 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called max6650.
 
+config SENSORS_MSM_ADC
+	tristate "MSM ADC Driver for current measurement"
+	depends on ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_FSM9XXX
+	default n
+	help
+	  Provides interface for measuring the ADC's on AMUX channels of XOADC,
+	  MPP's and the XOTHERM on pmic8058 for msm8x60 and provides post processing
+	  of the channel for the ADC Raw Data. For reading LTC and EPM ADC channels
+	  say yes here to include support for measuring current in real-time
+	  from various power-rails on the Fluid board.  The ADC circuit
+	  internally uses an array of LTC2499 and EPM ADCs in a differential
+	  configuration to provide a flat set of channels that can be addressed.
+
 config SENSORS_PC87360
 	tristate "National Semiconductor PC87360 family"
 	select HWMON_VID
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 28061cf..eacfcb5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -118,6 +118,8 @@
 obj-$(CONFIG_SENSORS_W83L786NG)	+= w83l786ng.o
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
+obj-$(CONFIG_SENSORS_WPCE775X)	+= wpce775x.o
+obj-$(CONFIG_SENSORS_MSM_ADC)	+= msm_adc.o m_adcproc.o
 
 # PMBus drivers
 obj-$(CONFIG_PMBUS)		+= pmbus_core.o
diff --git a/drivers/hwmon/m_adcproc.c b/drivers/hwmon/m_adcproc.c
new file mode 100644
index 0000000..70e505e
--- /dev/null
+++ b/drivers/hwmon/m_adcproc.c
@@ -0,0 +1,469 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+
+#include <linux/msm_adc.h>
+
+#define KELVINMIL_DEGMIL	273160
+
+static const struct adc_map_pt adcmap_batttherm[] = {
+	{2020,	-30},
+	{1923,	-20},
+	{1796,	-10},
+	{1640,	  0},
+	{1459,	 10},
+	{1260,	 20},
+	{1159,	 25},
+	{1059,	 30},
+	{871,	 40},
+	{706,	 50},
+	{567,	 60},
+	{453,	 70},
+	{364,	 80}
+};
+
+static const struct adc_map_pt adcmap_msmtherm[] = {
+	{2150,	-30},
+	{2107,	-20},
+	{2037,	-10},
+	{1929,	  0},
+	{1776,	 10},
+	{1579,	 20},
+	{1467,	 25},
+	{1349,	 30},
+	{1108,	 40},
+	{878,	 50},
+	{677,	 60},
+	{513,	 70},
+	{385,	 80},
+	{287,	 90},
+	{215,	100},
+	{186,	110},
+	{107,	120}
+};
+
+static const struct adc_map_pt adcmap_ntcg104ef104fb[] = {
+	{696483,	-40960},
+	{649148,	-39936},
+	{605368,	-38912},
+	{564809,	-37888},
+	{527215,	-36864},
+	{492322,	-35840},
+	{460007,	-34816},
+	{429982,	-33792},
+	{402099,	-32768},
+	{376192,	-31744},
+	{352075,	-30720},
+	{329714,	-29696},
+	{308876,	-28672},
+	{289480,	-27648},
+	{271417,	-26624},
+	{254574,	-25600},
+	{238903,	-24576},
+	{224276,	-23552},
+	{210631,	-22528},
+	{197896,	-21504},
+	{186007,	-20480},
+	{174899,	-19456},
+	{164521,	-18432},
+	{154818,	-17408},
+	{145744,	-16384},
+	{137265,	-15360},
+	{129307,	-14336},
+	{121866,	-13312},
+	{114896,	-12288},
+	{108365,	-11264},
+	{102252,	-10240},
+	{96499,		-9216},
+	{91111,		-8192},
+	{86055,		-7168},
+	{81308,		-6144},
+	{76857,		-5120},
+	{72660,		-4096},
+	{68722,		-3072},
+	{65020,		-2048},
+	{61538,		-1024},
+	{58261,		0},
+	{55177,		1024},
+	{52274,		2048},
+	{49538,		3072},
+	{46962,		4096},
+	{44531,		5120},
+	{42243,		6144},
+	{40083,		7168},
+	{38045,		8192},
+	{36122,		9216},
+	{34308,		10240},
+	{32592,		11264},
+	{30972,		12288},
+	{29442,		13312},
+	{27995,		14336},
+	{26624,		15360},
+	{25333,		16384},
+	{24109,		17408},
+	{22951,		18432},
+	{21854,		19456},
+	{20807,		20480},
+	{19831,		21504},
+	{18899,		22528},
+	{18016,		23552},
+	{17178,		24576},
+	{16384,		25600},
+	{15631,		26624},
+	{14916,		27648},
+	{14237,		28672},
+	{13593,		29696},
+	{12976,		30720},
+	{12400,		31744},
+	{11848,		32768},
+	{11324,		33792},
+	{10825,		34816},
+	{10354,		35840},
+	{9900,		36864},
+	{9471,		37888},
+	{9062,		38912},
+	{8674,		39936},
+	{8306,		40960},
+	{7951,		41984},
+	{7616,		43008},
+	{7296,		44032},
+	{6991,		45056},
+	{6701,		46080},
+	{6424,		47104},
+	{6160,		48128},
+	{5908,		49152},
+	{5667,		50176},
+	{5439,		51200},
+	{5219,		52224},
+	{5010,		53248},
+	{4810,		54272},
+	{4619,		55296},
+	{4440,		56320},
+	{4263,		57344},
+	{4097,		58368},
+	{3938,		59392},
+	{3785,		60416},
+	{3637,		61440},
+	{3501,		62464},
+	{3368,		63488},
+	{3240,		64512},
+	{3118,		65536},
+	{2998,		66560},
+	{2889,		67584},
+	{2782,		68608},
+	{2680,		69632},
+	{2581,		70656},
+	{2490,		71680},
+	{2397,		72704},
+	{2310,		73728},
+	{2227,		74752},
+	{2147,		75776},
+	{2064,		76800},
+	{1998,		77824},
+	{1927,		78848},
+	{1860,		79872},
+	{1795,		80896},
+	{1736,		81920},
+	{1673,		82944},
+	{1615,		83968},
+	{1560,		84992},
+	{1507,		86016},
+	{1456,		87040},
+	{1407,		88064},
+	{1360,		89088},
+	{1314,		90112},
+	{1271,		91136},
+	{1228,		92160},
+	{1189,		93184},
+	{1150,		94208},
+	{1112,		95232},
+	{1076,		96256},
+	{1042,		97280},
+	{1008,		98304},
+	{976,		99328},
+	{945,		100352},
+	{915,		101376},
+	{886,		102400},
+	{859,		103424},
+	{832,		104448},
+	{807,		105472},
+	{782,		106496},
+	{756,		107520},
+	{735,		108544},
+	{712,		109568},
+	{691,		110592},
+	{670,		111616},
+	{650,		112640},
+	{631,		113664},
+	{612,		114688},
+	{594,		115712},
+	{577,		116736},
+	{560,		117760},
+	{544,		118784},
+	{528,		119808},
+	{513,		120832},
+	{498,		121856},
+	{483,		122880},
+	{470,		123904},
+	{457,		124928},
+	{444,		125952},
+	{431,		126976},
+	{419,		128000}
+};
+
+static int32_t
+	adc_map_linear(const struct adc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if ((pts == NULL) || (output == NULL))
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].x < pts[1].x)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].x < input)) {
+			/* table entry is less than measured
+				value and table is descending, stop */
+			break;
+		} else if ((descending == 0) &&
+				(pts[i].x > input)) {
+			/* table entry is greater than measured
+				value and table is ascending, stop */
+			break;
+		} else
+			i++;
+	}
+
+	if (i == 0)
+		*output = pts[0].y;
+	else if (i == tablesize)
+		*output = pts[tablesize-1].y;
+	else {
+	/* result is between search_index and search_index-1 */
+	/* interpolate linearly */
+	*output = (((int32_t) ((pts[i].y - pts[i-1].y)*
+			(input - pts[i-1].x))/
+			(pts[i].x - pts[i-1].x))+
+			pts[i-1].y);
+	}
+
+	return 0;
+}
+
+int32_t scale_default(int32_t adc_code,
+				const struct adc_properties *adc_properties,
+				const struct chan_properties *chan_properties,
+				struct adc_chan_result *adc_chan_result)
+{
+	bool negative_rawfromoffset = 0;
+	int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset;
+
+	if (!chan_properties->gain_numerator ||
+		!chan_properties->gain_denominator)
+		return -EINVAL;
+
+	adc_chan_result->adc_code = adc_code;
+	if (rawfromoffset < 0) {
+		if (adc_properties->bipolar) {
+			rawfromoffset = (rawfromoffset ^ -1) +  1;
+			negative_rawfromoffset = 1;
+		} else
+			rawfromoffset = 0;
+	}
+
+	if (rawfromoffset >= 1 << adc_properties->bitresolution)
+		rawfromoffset = (1 << adc_properties->bitresolution) - 1;
+
+	adc_chan_result->measurement = (int64_t)rawfromoffset*
+					chan_properties->adc_graph->dx*
+					chan_properties->gain_denominator;
+
+	/* do_div only perform positive integer division! */
+	do_div(adc_chan_result->measurement, chan_properties->adc_graph->dy*
+					chan_properties->gain_numerator);
+
+	if (negative_rawfromoffset)
+		adc_chan_result->measurement =
+		(adc_chan_result->measurement ^ -1) + 1;
+
+	/* Note: adc_chan_result->measurement is in the unit of
+	 * adc_properties.adc_reference. For generic channel processing,
+	 * channel measurement is a scale/ratio relative to the adc
+	 * reference input */
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
+
+int32_t scale_batt_therm(int32_t adc_code,
+				const struct adc_properties *adc_properties,
+				const struct chan_properties *chan_properties,
+				struct adc_chan_result *adc_chan_result)
+{
+	scale_default(adc_code, adc_properties, chan_properties,
+			adc_chan_result);
+	/* convert mV ---> degC using the table */
+	return adc_map_linear(
+			adcmap_batttherm,
+			sizeof(adcmap_batttherm)/sizeof(adcmap_batttherm[0]),
+			adc_chan_result->physical,
+			&adc_chan_result->physical);
+}
+
+int32_t scale_msm_therm(int32_t adc_code,
+		const struct adc_properties *adc_properties,
+		const struct chan_properties *chan_properties,
+		struct adc_chan_result *adc_chan_result)
+{
+	scale_default(adc_code, adc_properties, chan_properties,
+			adc_chan_result);
+	/* convert mV ---> degC using the table */
+	return adc_map_linear(
+			adcmap_msmtherm,
+			sizeof(adcmap_msmtherm)/sizeof(adcmap_msmtherm[0]),
+			adc_chan_result->physical,
+			&adc_chan_result->physical);
+}
+
+int32_t scale_pmic_therm(int32_t adc_code,
+				const struct adc_properties *adc_properties,
+				const struct chan_properties *chan_properties,
+				struct adc_chan_result *adc_chan_result)
+{
+	/* 2mV/K */
+	int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset;
+
+	if (!chan_properties->gain_numerator ||
+		!chan_properties->gain_denominator)
+		return -EINVAL;
+
+	adc_chan_result->adc_code = adc_code;
+	if (rawfromoffset > 0) {
+		if (rawfromoffset >= 1 << adc_properties->bitresolution)
+			rawfromoffset = (1 << adc_properties->bitresolution)
+									- 1;
+		adc_chan_result->measurement = (int64_t)rawfromoffset*
+					chan_properties->adc_graph->dx*
+					chan_properties->gain_denominator*1000;
+		do_div(adc_chan_result->measurement,
+			chan_properties->adc_graph->dy*
+			chan_properties->gain_numerator*2);
+	} else {
+		adc_chan_result->measurement = 0;
+	}
+	/* Note: adc_chan_result->measurement is in the unit of
+		adc_properties.adc_reference */
+	adc_chan_result->physical = (int32_t)adc_chan_result->measurement;
+	/* Change to .001 deg C */
+	adc_chan_result->physical -= KELVINMIL_DEGMIL;
+	adc_chan_result->measurement <<= 1;
+
+	return 0;
+}
+
+/* Scales the ADC code to 0.001 degrees C using the map
+ * table for the XO thermistor.
+ */
+int32_t tdkntcgtherm(int32_t adc_code,
+			const struct adc_properties *adc_properties,
+			const struct chan_properties *chan_properties,
+			struct adc_chan_result *adc_chan_result)
+{
+	int32_t offset = chan_properties->adc_graph->offset,
+		dy = chan_properties->adc_graph->dy,
+		dx = chan_properties->adc_graph->dx,
+		fullscale_calibrated_adc_code;
+
+	uint32_t rt_r25;
+	uint32_t num1, num2, denom;
+
+	adc_chan_result->adc_code = adc_code;
+	fullscale_calibrated_adc_code = dy + offset;
+	/* The above is a short cut in math that would reduce a lot of
+	   computation whereas the below expression
+		(adc_properties->adc_reference*dy+dx*offset+(dx>>1))/dx
+	   is a more generic formula when the 2 reference voltages are
+	   different than 0 and full scale voltage. */
+
+	if ((dy == 0) || (dx == 0) ||
+			(offset >= fullscale_calibrated_adc_code)) {
+		return -EINVAL;
+	} else {
+		if (adc_code >= fullscale_calibrated_adc_code) {
+			rt_r25 = (uint32_t)-1;
+		} else if (adc_code <= offset) {
+			rt_r25 = 0;
+		} else {
+	/* The formula used is (adc_code of current reading - offset)/
+	 * (the calibrated fullscale adc code - adc_code of current reading).
+	 * For this channel, at this time, chan_properties->gain_numerator =
+	 * chan_properties->gain_denominator = 1, so no need to incorporate
+	 * into the formula even though we could and multiply/divide by 1
+	 * which yields the same result but expensive on computation. */
+		num1 = (adc_code - offset) << 14;
+		num2 = (fullscale_calibrated_adc_code - adc_code) >> 1;
+		denom = fullscale_calibrated_adc_code - adc_code;
+
+			if ((int)denom <= 0)
+				rt_r25 = 0x7FFFFFFF;
+			else
+				rt_r25 = (num1 + num2) / denom;
+		}
+
+		if (rt_r25 > 0x7FFFFFFF)
+			rt_r25 = 0x7FFFFFFF;
+
+		adc_map_linear(adcmap_ntcg104ef104fb,
+		sizeof(adcmap_ntcg104ef104fb)/sizeof(adcmap_ntcg104ef104fb[0]),
+		(int32_t)rt_r25, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+
+int32_t scale_xtern_chgr_cur(int32_t adc_code,
+			const struct adc_properties *adc_properties,
+			const struct chan_properties *chan_properties,
+			struct adc_chan_result *adc_chan_result)
+{
+	int32_t rawfromoffset = adc_code - chan_properties->adc_graph->offset;
+
+	if (!chan_properties->gain_numerator ||
+		!chan_properties->gain_denominator)
+		return -EINVAL;
+
+	adc_chan_result->adc_code = adc_code;
+	if (rawfromoffset > 0) {
+		if (rawfromoffset >= 1 << adc_properties->bitresolution)
+			rawfromoffset = (1 << adc_properties->bitresolution)
+									- 1;
+		adc_chan_result->measurement = ((int64_t)rawfromoffset * 5)*
+						chan_properties->adc_graph->dx*
+					chan_properties->gain_denominator;
+		do_div(adc_chan_result->measurement,
+					chan_properties->adc_graph->dy*
+					chan_properties->gain_numerator);
+	} else {
+		adc_chan_result->measurement = 0;
+	}
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
diff --git a/drivers/hwmon/msm_adc.c b/drivers/hwmon/msm_adc.c
new file mode 100644
index 0000000..42bcd07
--- /dev/null
+++ b/drivers/hwmon/msm_adc.c
@@ -0,0 +1,1538 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/msm_adc.h>
+#include <linux/pmic8058-xoadc.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+#include <mach/dal.h>
+
+#define MSM_ADC_DRIVER_NAME		"msm_adc"
+#define MSM_ADC_MAX_FNAME		15
+
+#define MSM_ADC_DALRPC_DEVICEID		0x02000067
+#define MSM_ADC_DALRPC_PORT_NAME	"DAL00"
+#define MSM_ADC_DALRPC_CPU		SMD_APPS_MODEM
+
+#define MSM_ADC_DALRPC_CMD_REQ_CONV	9
+#define MSM_ADC_DALRPC_CMD_INPUT_PROP	11
+
+#define MSM_ADC_DALRC_CONV_TIMEOUT	(5 * HZ)  /* 5 seconds */
+
+enum dal_error {
+	DAL_ERROR_INVALID_DEVICE_IDX = 1,
+	DAL_ERROR_INVALID_CHANNEL_IDX,
+	DAL_ERROR_NULL_POINTER,
+	DAL_ERROR_DEVICE_QUEUE_FULL,
+	DAL_ERROR_INVALID_PROPERTY_LENGTH,
+	DAL_ERROR_REMOTE_EVENT_POOL_FULL
+};
+
+enum dal_result_status {
+	DAL_RESULT_STATUS_INVALID,
+	DAL_RESULT_STATUS_VALID
+};
+
+struct dal_conv_state {
+	struct dal_conv_slot		context[MSM_ADC_DEV_MAX_INFLIGHT];
+	struct list_head		slots;
+	struct mutex			list_lock;
+	struct semaphore		slot_count;
+};
+
+struct adc_dev {
+	char				*name;
+	uint32_t			nchans;
+	struct dal_conv_state		conv;
+	struct dal_translation		transl;
+	struct sensor_device_attribute	*sens_attr;
+	char				**fnames;
+};
+
+struct msm_adc_drv {
+	/*  Common to both XOADC and EPM  */
+	struct platform_device		*pdev;
+	struct device			*hwmon;
+	struct miscdevice		misc;
+	/*  XOADC variables  */
+	struct sensor_device_attribute	*sens_attr;
+	struct workqueue_struct		*wq;
+	atomic_t			online;
+	atomic_t			total_outst;
+	wait_queue_head_t		total_outst_wait;
+
+	/*  EPM variables  */
+	void				*dev_h;
+	struct adc_dev			*devs[MSM_ADC_MAX_NUM_DEVS];
+	struct mutex			prop_lock;
+	atomic_t			rpc_online;
+	atomic_t			rpc_total_outst;
+	wait_queue_head_t		rpc_total_outst_wait;
+};
+
+static bool epm_init;
+static bool epm_fluid_enabled;
+
+/* Needed to support file_op interfaces */
+static struct msm_adc_drv *msm_adc_drv;
+
+static ssize_t msm_adc_show_curr(struct device *dev,
+				struct device_attribute *devattr, char *buf);
+
+static int msm_rpc_adc_blocking_conversion(struct msm_adc_drv *msm_adc,
+				uint32_t chan, struct adc_chan_result *result);
+
+static int msm_adc_blocking_conversion(struct msm_adc_drv *msm_adc,
+				uint32_t chan, struct adc_chan_result *result);
+
+static int msm_adc_open(struct inode *inode, struct file *file)
+{
+	struct msm_client_data *client;
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+	struct platform_device *pdev = msm_adc->pdev;
+
+	client = kzalloc(sizeof(struct msm_client_data), GFP_KERNEL);
+	if (!client) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	if (!try_module_get(THIS_MODULE)) {
+		kfree(client);
+		return -EACCES;
+	}
+
+	mutex_init(&client->lock);
+	INIT_LIST_HEAD(&client->complete_list);
+	init_waitqueue_head(&client->data_wait);
+	init_waitqueue_head(&client->outst_wait);
+
+	client->online = 1;
+
+	file->private_data = client;
+
+	return nonseekable_open(inode, file);
+}
+
+static inline void msm_adc_restore_slot(struct dal_conv_state *conv_s,
+					struct dal_conv_slot *slot)
+{
+	mutex_lock(&conv_s->list_lock);
+	list_add(&slot->list, &conv_s->slots);
+	mutex_unlock(&conv_s->list_lock);
+
+	up(&conv_s->slot_count);
+}
+
+static int no_pending_client_requests(struct msm_client_data *client)
+{
+	mutex_lock(&client->lock);
+
+	if (client->num_outstanding == 0) {
+		mutex_unlock(&client->lock);
+		return 1;
+	}
+
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+
+static int data_avail(struct msm_client_data *client, uint32_t *pending)
+{
+	uint32_t completed;
+
+	mutex_lock(&client->lock);
+	completed = client->num_complete;
+	mutex_unlock(&client->lock);
+
+	if (completed > 0) {
+		if (pending != NULL)
+			*pending = completed;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int msm_adc_release(struct inode *inode, struct file *file)
+{
+	struct msm_client_data *client = file->private_data;
+	struct adc_conv_slot *slot, *tmp;
+	int rc;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = pdata->channel;
+
+	module_put(THIS_MODULE);
+
+	mutex_lock(&client->lock);
+
+	/* prevent any further requests while we teardown the client */
+	client->online = 0;
+
+	mutex_unlock(&client->lock);
+
+	/*
+	 * We may still have outstanding transactions in flight from this
+	 * client that have not completed. Make sure they're completed
+	 * before removing the client.
+	 */
+	rc = wait_event_interruptible(client->outst_wait,
+				      no_pending_client_requests(client));
+	if (rc) {
+		pr_err("%s: wait_event_interruptible failed rc = %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	/*
+	 * All transactions have completed. Add slot resources back to the
+	 * appropriate devices.
+	 */
+	list_for_each_entry_safe(slot, tmp, &client->complete_list, list) {
+		slot->client = NULL;
+		list_del(&slot->list);
+		channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot(
+		channel[slot->conv.result.chan].adc_dev_instance, slot);
+	}
+
+	kfree(client);
+
+	return 0;
+}
+
+static int msm_adc_translate_dal_to_hwmon(struct msm_adc_drv *msm_adc,
+					  uint32_t chan,
+					  struct adc_dev_spec *dest)
+{
+	struct dal_translation *transl;
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	int i;
+
+	for (i = 0; i < pdata->num_adc; i++) {
+		transl = &msm_adc->devs[i]->transl;
+		if (chan >= transl->hwmon_start &&
+		    chan <= transl->hwmon_end) {
+			dest->dal.dev_idx = transl->dal_dev_idx;
+			dest->hwmon_dev_idx = transl->hwmon_dev_idx;
+			dest->dal.chan_idx = chan - transl->hwmon_start;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int msm_adc_translate_hwmon_to_dal(struct msm_adc_drv *msm_adc,
+					  struct adc_dev_spec *source,
+					  uint32_t *chan)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	struct dal_translation *transl;
+	int i;
+
+	for (i = 0; i < pdata->num_adc; i++) {
+		transl = &msm_adc->devs[i]->transl;
+		if (source->dal.dev_idx != transl->dal_dev_idx)
+			continue;
+		*chan = transl->hwmon_start + source->dal.chan_idx;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int msm_adc_getinputproperties(struct msm_adc_drv *msm_adc,
+					  const char *lookup_name,
+					  struct adc_dev_spec *result)
+{
+	struct device *dev = &msm_adc->pdev->dev;
+	int rc;
+
+	mutex_lock(&msm_adc->prop_lock);
+
+	rc = dalrpc_fcn_8(MSM_ADC_DALRPC_CMD_INPUT_PROP, msm_adc->dev_h,
+			  lookup_name, strlen(lookup_name) + 1,
+			  &result->dal, sizeof(struct dal_dev_spec));
+	if (rc) {
+		dev_err(dev, "DAL getprop request failed: rc = %d\n", rc);
+		mutex_unlock(&msm_adc->prop_lock);
+		return -EIO;
+	}
+
+	mutex_unlock(&msm_adc->prop_lock);
+	return rc;
+}
+
+static int msm_adc_lookup(struct msm_adc_drv *msm_adc,
+			  struct msm_adc_lookup *lookup)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	struct adc_dev_spec target;
+	int rc = 0, i = 0;
+	uint32_t len = 0;
+
+	len = strnlen(lookup->name, MSM_ADC_MAX_CHAN_STR);
+	while (i < pdata->num_chan_supported) {
+		if (strncmp(lookup->name, pdata->channel[i].name, len))
+			i++;
+		else
+			break;
+	}
+
+	if (pdata->num_chan_supported > 0 && i < pdata->num_chan_supported) {
+		lookup->chan_idx = i;
+	} else if (msm_adc->dev_h) {
+		rc = msm_adc_getinputproperties(msm_adc, lookup->name, &target);
+		if (rc) {
+			pr_err("%s: Lookup failed for %s\n", __func__,
+				lookup->name);
+			return rc;
+		}
+		rc = msm_adc_translate_hwmon_to_dal(msm_adc, &target,
+						&lookup->chan_idx);
+		if (rc)
+			pr_err("%s: Translation failed for %s\n", __func__,
+						lookup->name);
+	} else {
+		pr_err("%s: Lookup failed for %s\n", __func__, lookup->name);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int msm_adc_aio_conversion(struct msm_adc_drv *msm_adc,
+				  struct adc_chan_result *request,
+				  struct msm_client_data *client)
+{
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = &pdata->channel[request->chan];
+	struct adc_conv_slot *slot;
+
+	/* we could block here, but only for a bounded time */
+	channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance,
+									&slot);
+
+	if (slot) {
+		atomic_inc(&msm_adc->total_outst);
+		mutex_lock(&client->lock);
+		client->num_outstanding++;
+		mutex_unlock(&client->lock);
+
+		/* indicates non blocking request to callback handler */
+		slot->blocking = 0;
+		slot->compk = NULL;/*For kernel space usage; n/a for usr space*/
+		slot->conv.result.chan = client->adc_chan = request->chan;
+		slot->client = client;
+		slot->adc_request = START_OF_CONV;
+		slot->chan_path = channel->chan_path_type;
+		slot->chan_adc_config = channel->adc_config_type;
+		slot->chan_adc_calib = channel->adc_calib_type;
+		queue_work(msm_adc->wq, &slot->work);
+		return 0;
+	}
+	return -EBUSY;
+}
+
+static int msm_adc_fluid_hw_deinit(struct msm_adc_drv *msm_adc)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+
+	if (!epm_init)
+		return -EINVAL;
+
+	if (pdata->gpio_config == APROC_CONFIG &&
+		epm_fluid_enabled && pdata->adc_fluid_disable != NULL) {
+		pdata->adc_fluid_disable();
+		epm_fluid_enabled = false;
+	}
+
+	return 0;
+}
+
+static int msm_adc_fluid_hw_init(struct msm_adc_drv *msm_adc)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+
+	if (!epm_init)
+		return -EINVAL;
+
+	if (!pdata->adc_fluid_enable)
+		return -ENODEV;
+
+	printk(KERN_DEBUG "msm_adc_fluid_hw_init: Calling adc_fluid_enable.\n");
+
+	if (pdata->gpio_config == APROC_CONFIG && !epm_fluid_enabled) {
+		pdata->adc_fluid_enable();
+		epm_fluid_enabled = true;
+	}
+
+  /* return success for now but check for errors from hw init configuration */
+	return 0;
+}
+
+static int msm_adc_poll_complete(struct msm_adc_drv *msm_adc,
+			     struct msm_client_data *client, uint32_t *pending)
+{
+	int rc;
+
+	/*
+	 * Don't proceed if there there's nothing queued on this client.
+	 * We could deadlock otherwise in a single threaded scenario.
+	 */
+	if (no_pending_client_requests(client) && !data_avail(client, pending))
+		return -EDEADLK;
+
+	rc = wait_event_interruptible(client->data_wait,
+				data_avail(client, pending));
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int msm_adc_read_result(struct msm_adc_drv *msm_adc,
+			       struct msm_client_data *client,
+			       struct adc_chan_result *result)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = pdata->channel;
+	struct adc_conv_slot *slot;
+	int rc = 0;
+
+	mutex_lock(&client->lock);
+
+	slot = list_first_entry(&client->complete_list,
+				struct adc_conv_slot, list);
+	if (!slot) {
+		mutex_unlock(&client->lock);
+		return -ENOMSG;
+	}
+
+	slot->client = NULL;
+	list_del(&slot->list);
+
+	client->num_complete--;
+
+	mutex_unlock(&client->lock);
+
+	*result = slot->conv.result;
+
+	/* restore this slot to reserve */
+	channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot(
+		channel[slot->conv.result.chan].adc_dev_instance, slot);
+
+	return rc;
+}
+
+static long msm_adc_ioctl(struct file *file, unsigned int cmd,
+					     unsigned long arg)
+{
+	struct msm_client_data *client = file->private_data;
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+	struct platform_device *pdev = msm_adc->pdev;
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	uint32_t block_res = 0;
+
+	int rc;
+
+	switch (cmd) {
+	case MSM_ADC_REQUEST:
+		{
+			struct adc_chan_result conv;
+
+			if (copy_from_user(&conv, (void __user *)arg,
+					sizeof(struct adc_chan_result)))
+				return -EFAULT;
+
+			if (conv.chan < pdata->num_chan_supported) {
+				rc = msm_adc_blocking_conversion(msm_adc,
+							conv.chan, &conv);
+			} else {
+				if (!msm_adc->dev_h)
+					return -EAGAIN;
+
+				rc = msm_rpc_adc_blocking_conversion(msm_adc,
+							conv.chan, &conv);
+			}
+			if (rc) {
+				dev_dbg(&pdev->dev, "BLK conversion failed\n");
+				return rc;
+			}
+
+			if (copy_to_user((void __user *)arg, &conv,
+					sizeof(struct adc_chan_result)))
+				return -EFAULT;
+			break;
+		}
+	case MSM_ADC_AIO_REQUEST_BLOCK_RES:
+		block_res = 1;
+	case MSM_ADC_AIO_REQUEST:
+		{
+			struct adc_chan_result conv;
+
+			if (copy_from_user(&conv, (void __user *)arg,
+					sizeof(struct adc_chan_result)))
+				return -EFAULT;
+
+			if (conv.chan >= pdata->num_chan_supported)
+				return -EINVAL;
+
+			rc = msm_adc_aio_conversion(msm_adc, &conv, client);
+			if (rc) {
+				dev_dbg(&pdev->dev, "AIO conversion failed\n");
+				return rc;
+			}
+			if (copy_to_user((void __user *)arg, &conv,
+					sizeof(struct adc_chan_result)))
+				return -EFAULT;
+			break;
+		}
+	case MSM_ADC_AIO_POLL:
+		{
+			uint32_t completed;
+
+			rc = msm_adc_poll_complete(msm_adc, client, &completed);
+			if (rc) {
+				dev_dbg(&pdev->dev, "poll request failed\n");
+				return rc;
+			}
+
+			if (copy_to_user((void __user *)arg, &completed,
+					sizeof(uint32_t)))
+				return -EFAULT;
+
+			break;
+		}
+	case MSM_ADC_AIO_READ:
+		{
+			struct adc_chan_result result;
+
+			rc = msm_adc_read_result(msm_adc, client, &result);
+			if (rc) {
+				dev_dbg(&pdev->dev, "read result failed\n");
+				return rc;
+			}
+
+			if (copy_to_user((void __user *)arg, &result,
+					sizeof(struct adc_chan_result)))
+				return -EFAULT;
+			break;
+		}
+	case MSM_ADC_LOOKUP:
+		{
+			struct msm_adc_lookup lookup;
+
+			if (copy_from_user(&lookup, (void __user *)arg,
+					sizeof(struct msm_adc_lookup)))
+				return -EFAULT;
+
+			rc = msm_adc_lookup(msm_adc, &lookup);
+			if (rc) {
+				dev_dbg(&pdev->dev, "No such channel: %s\n",
+						lookup.name);
+				return rc;
+			}
+
+			if (copy_to_user((void __user *)arg, &lookup,
+					sizeof(struct msm_adc_lookup)))
+				return -EFAULT;
+			break;
+		}
+	case MSM_ADC_FLUID_INIT:
+		{
+			uint32_t result;
+
+			result = msm_adc_fluid_hw_init(msm_adc);
+
+			if (copy_to_user((void __user *)arg, &result,
+						sizeof(uint32_t)))	{
+				printk(KERN_ERR "MSM_ADC_FLUID_INIT: "
+					"copy_to_user returned an error.\n");
+				return -EFAULT;
+			}
+			printk(KERN_DEBUG "MSM_ADC_FLUID_INIT: Success.\n");
+			break;
+		}
+	case MSM_ADC_FLUID_DEINIT:
+		{
+			uint32_t result;
+
+			result = msm_adc_fluid_hw_deinit(msm_adc);
+
+			if (copy_to_user((void __user *)arg, &result,
+						sizeof(uint32_t)))
+				return -EFAULT;
+			break;
+		}
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+const struct file_operations msm_adc_fops = {
+	.open = msm_adc_open,
+	.release = msm_adc_release,
+	.unlocked_ioctl = msm_adc_ioctl,
+};
+
+static ssize_t msm_adc_show_curr(struct device *dev,
+				 struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct msm_adc_drv *msm_adc = dev_get_drvdata(dev);
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	struct adc_chan_result result;
+	int rc;
+
+#ifdef CONFIG_PMIC8058_XOADC
+	rc = pm8058_xoadc_registered();
+	if (rc <= 0)
+		return -ENODEV;
+#endif
+	if (attr->index < pdata->num_chan_supported) {
+		rc = msm_adc_blocking_conversion(msm_adc,
+					attr->index, &result);
+	} else {
+		if (pdata->gpio_config == APROC_CONFIG && !epm_fluid_enabled
+					&& pdata->adc_fluid_enable != NULL) {
+			printk(KERN_DEBUG "This is to read ADC value for "
+				"Fluid EPM and init. Do it only once.\n");
+			pdata->adc_fluid_enable();
+			epm_fluid_enabled = true;
+		}
+		rc = msm_rpc_adc_blocking_conversion(msm_adc,
+					attr->index, &result);
+	}
+	if (rc)
+		return 0;
+
+	return sprintf(buf, "Result: %lld Raw: %d\n", result.physical,
+		result.adc_code);
+}
+
+static int msm_rpc_adc_blocking_conversion(struct msm_adc_drv *msm_adc,
+		uint32_t hwmon_chan, struct adc_chan_result *result)
+{
+	struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data;
+	struct dal_conv_request params;
+	struct device *dev = &msm_adc->pdev->dev;
+	struct adc_dev *adc_dev;
+	struct dal_conv_state *conv_s;
+	struct dal_conv_slot *slot;
+	struct adc_dev_spec dest;
+	int timeout, rc = 0;
+
+	if (pdata->gpio_config == APROC_CONFIG &&
+			pdata->adc_gpio_enable != NULL)
+		pdata->adc_gpio_enable(hwmon_chan-pdata->num_chan_supported);
+
+	rc = msm_adc_translate_dal_to_hwmon(msm_adc, hwmon_chan, &dest);
+	if (rc) {
+		dev_err(dev, "%s: translation from chan %u failed\n",
+							__func__, hwmon_chan);
+		if (pdata->gpio_config == APROC_CONFIG &&
+				pdata->adc_gpio_disable != NULL)
+			pdata->adc_gpio_disable(hwmon_chan
+					-pdata->num_chan_supported);
+		return -EINVAL;
+	}
+
+	adc_dev = msm_adc->devs[dest.hwmon_dev_idx];
+	conv_s = &adc_dev->conv;
+
+	down(&conv_s->slot_count);
+
+	mutex_lock(&conv_s->list_lock);
+
+	slot = list_first_entry(&conv_s->slots, struct dal_conv_slot, list);
+	list_del(&slot->list);
+	BUG_ON(!slot);
+
+	mutex_unlock(&conv_s->list_lock);
+
+	/* indicates blocking request to callback handler */
+	slot->blocking = 1;
+
+	params.target.dev_idx = dest.dal.dev_idx;
+	params.target.chan_idx = dest.dal.chan_idx;
+	params.cb_h = slot->cb_h;
+
+	rc = dalrpc_fcn_8(MSM_ADC_DALRPC_CMD_REQ_CONV, msm_adc->dev_h,
+			&params, sizeof(params), NULL, 0);
+	if (rc) {
+		dev_err(dev, "%s: Conversion for device = %u channel = %u"
+			     " failed\n", __func__, params.target.dev_idx,
+						    params.target.chan_idx);
+
+		rc = -EIO;
+		goto blk_conv_err;
+	}
+
+	timeout = wait_for_completion_interruptible_timeout(&slot->comp,
+					      MSM_ADC_DALRC_CONV_TIMEOUT);
+	if (timeout == 0) {
+		dev_err(dev, "read for device = %u channel = %u timed out\n",
+				params.target.dev_idx, params.target.chan_idx);
+		rc = -ETIMEDOUT;
+		goto blk_conv_err;
+	} else if (timeout < 0) {
+		rc = -EINTR;
+		goto blk_conv_err;
+	}
+
+	result->physical = (int64_t)slot->result.physical;
+
+	if (slot->result.status == DAL_RESULT_STATUS_INVALID)
+		rc = -ENODATA;
+
+blk_conv_err:
+	if (pdata->gpio_config == APROC_CONFIG &&
+			pdata->adc_gpio_disable != NULL)
+		pdata->adc_gpio_disable(hwmon_chan-pdata->num_chan_supported);
+	msm_adc_restore_slot(conv_s, slot);
+
+	return rc;
+}
+
+static int msm_adc_blocking_conversion(struct msm_adc_drv *msm_adc,
+			uint32_t hwmon_chan, struct adc_chan_result *result)
+{
+	struct adc_conv_slot *slot;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = &pdata->channel[hwmon_chan];
+
+	channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance,
+									&slot);
+	if (slot) {
+		slot->conv.result.chan = hwmon_chan;
+		/* indicates blocking request to callback handler */
+		slot->blocking = 1;
+		slot->adc_request = START_OF_CONV;
+		slot->chan_path = channel->chan_path_type;
+		slot->chan_adc_config = channel->adc_config_type;
+		slot->chan_adc_calib = channel->adc_calib_type;
+		queue_work(msm_adc_drv->wq, &slot->work);
+
+		wait_for_completion_interruptible(&slot->comp);
+		*result = slot->conv.result;
+		channel->adc_access_fn->adc_restore_slot(
+					channel->adc_dev_instance, slot);
+		return 0;
+	}
+	return -EBUSY;
+}
+
+int32_t adc_channel_open(uint32_t channel, void **h)
+{
+	struct msm_client_data *client;
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+	struct msm_adc_platform_data *pdata;
+	struct platform_device *pdev;
+	int i = 0;
+
+	if (!msm_adc_drv)
+		return -EFAULT;
+
+#ifdef CONFIG_PMIC8058_XOADC
+	if (pm8058_xoadc_registered() <= 0)
+		return -ENODEV;
+#endif
+	pdata = msm_adc->pdev->dev.platform_data;
+	pdev = msm_adc->pdev;
+
+	while (i < pdata->num_chan_supported) {
+		if (channel == pdata->channel[i].channel_name)
+			break;
+		else
+			i++;
+	}
+
+	if (i == pdata->num_chan_supported)
+		return -EBADF; /* unknown channel */
+
+	client = kzalloc(sizeof(struct msm_client_data), GFP_KERNEL);
+	if (!client) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	if (!try_module_get(THIS_MODULE)) {
+		kfree(client);
+		return -EACCES;
+	}
+
+	mutex_init(&client->lock);
+	INIT_LIST_HEAD(&client->complete_list);
+	init_waitqueue_head(&client->data_wait);
+	init_waitqueue_head(&client->outst_wait);
+
+	client->online = 1;
+	client->adc_chan = i;
+	*h = (void *)client;
+	return 0;
+}
+
+int32_t adc_channel_close(void *h)
+{
+	struct msm_client_data *client = (struct msm_client_data *)h;
+
+	kfree(client);
+	return 0;
+}
+
+int32_t adc_channel_request_conv(void *h, struct completion *conv_complete_evt)
+{
+	struct msm_client_data *client = (struct msm_client_data *)h;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = &pdata->channel[client->adc_chan];
+	struct adc_conv_slot *slot;
+
+	channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance,
+									&slot);
+
+	if (slot) {
+		atomic_inc(&msm_adc_drv->total_outst);
+		mutex_lock(&client->lock);
+		client->num_outstanding++;
+		mutex_unlock(&client->lock);
+
+		slot->conv.result.chan = client->adc_chan;
+		slot->blocking = 0;
+		slot->compk = conv_complete_evt;
+		slot->client = client;
+		slot->adc_request = START_OF_CONV;
+		slot->chan_path = channel->chan_path_type;
+		slot->chan_adc_config = channel->adc_config_type;
+		slot->chan_adc_calib = channel->adc_calib_type;
+		queue_work(msm_adc_drv->wq, &slot->work);
+		return 0;
+	}
+	return -EBUSY;
+}
+
+int32_t adc_channel_read_result(void *h, struct adc_chan_result *chan_result)
+{
+	struct msm_client_data *client = (struct msm_client_data *)h;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = pdata->channel;
+	struct adc_conv_slot *slot;
+	int rc = 0;
+
+	mutex_lock(&client->lock);
+
+	slot = list_first_entry(&client->complete_list,
+				struct adc_conv_slot, list);
+	if (!slot) {
+		mutex_unlock(&client->lock);
+		return -ENOMSG;
+	}
+
+	slot->client = NULL;
+	list_del(&slot->list);
+
+	client->num_complete--;
+
+	mutex_unlock(&client->lock);
+
+	*chan_result = slot->conv.result;
+
+	/* restore this slot to reserve */
+	channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot(
+		channel[slot->conv.result.chan].adc_dev_instance, slot);
+
+	return rc;
+}
+
+int32_t adc_calib_request(void *h, struct completion *calib_complete_evt)
+{
+	struct msm_client_data *client = (struct msm_client_data *)h;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = &pdata->channel[client->adc_chan];
+	struct adc_conv_slot *slot;
+	int rc, calib_status;
+
+	channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance,
+				&slot);
+	if (slot) {
+		slot->conv.result.chan = client->adc_chan;
+		slot->blocking = 0;
+		slot->compk = calib_complete_evt;
+		slot->adc_request = START_OF_CALIBRATION;
+		slot->chan_path = channel->chan_path_type;
+		slot->chan_adc_config = channel->adc_config_type;
+		slot->chan_adc_calib = channel->adc_calib_type;
+		rc = channel->adc_access_fn->adc_calibrate(
+			channel->adc_dev_instance, slot, &calib_status);
+
+		if (calib_status == CALIB_NOT_REQUIRED) {
+			channel->adc_access_fn->adc_restore_slot(
+					channel->adc_dev_instance, slot);
+			/* client will always wait in case when
+				calibration is not required */
+			complete(calib_complete_evt);
+		} else {
+			atomic_inc(&msm_adc_drv->total_outst);
+			mutex_lock(&client->lock);
+			client->num_outstanding++;
+			mutex_unlock(&client->lock);
+		}
+
+		return rc;
+	}
+	return -EBUSY;
+}
+
+static void msm_rpc_adc_conv_cb(void *context, u32 param,
+			    void *evt_buf, u32 len)
+{
+	struct dal_adc_result *result = evt_buf;
+	struct dal_conv_slot *slot = context;
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+
+	memcpy(&slot->result, result, sizeof(slot->result));
+
+	/* for blocking requests, signal complete */
+	if (slot->blocking)
+		complete(&slot->comp);
+
+	/* for non-blocking requests, add slot to the client completed list */
+	else {
+		struct msm_client_data *client = slot->client;
+
+		mutex_lock(&client->lock);
+
+		list_add(&slot->list, &client->complete_list);
+		client->num_complete++;
+		client->num_outstanding--;
+
+		/*
+		 * if the client release has been invoked and this is call
+		 * corresponds to the last request, then signal release
+		 * to complete.
+		 */
+		if (slot->client->online == 0 && client->num_outstanding == 0)
+			wake_up_interruptible_all(&client->outst_wait);
+
+		mutex_unlock(&client->lock);
+
+		wake_up_interruptible_all(&client->data_wait);
+
+		atomic_dec(&msm_adc->total_outst);
+
+		/* verify driver remove has not been invoked */
+		if (atomic_read(&msm_adc->online) == 0 &&
+				atomic_read(&msm_adc->total_outst) == 0)
+			wake_up_interruptible_all(&msm_adc->total_outst_wait);
+	}
+}
+
+void msm_adc_conv_cb(void *context, u32 param,
+			    void *evt_buf, u32 len)
+{
+	struct adc_conv_slot *slot = context;
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+
+	switch (slot->adc_request) {
+	case START_OF_CONV:
+		slot->adc_request = END_OF_CONV;
+	break;
+	case START_OF_CALIBRATION:
+		slot->adc_request = END_OF_CALIBRATION;
+	break;
+	case END_OF_CALIBRATION:
+	case END_OF_CONV:
+	break;
+	}
+	queue_work(msm_adc->wq, &slot->work);
+}
+
+static void msm_adc_teardown_device_conv(struct platform_device *pdev,
+				    struct adc_dev *adc_dev)
+{
+	struct dal_conv_state *conv_s = &adc_dev->conv;
+	struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev);
+	struct dal_conv_slot *slot;
+	int i;
+
+	for (i = 0; i < MSM_ADC_DEV_MAX_INFLIGHT; i++) {
+		slot = &conv_s->context[i];
+		if (slot->cb_h) {
+			dalrpc_dealloc_cb(msm_adc->dev_h, slot->cb_h);
+			slot->cb_h = NULL;
+		}
+	}
+}
+
+static void msm_rpc_adc_teardown_device(struct platform_device *pdev,
+				    struct adc_dev *adc_dev)
+{
+	struct dal_translation *transl = &adc_dev->transl;
+	int i, num_chans = transl->hwmon_end - transl->hwmon_start + 1;
+
+	if (adc_dev->sens_attr)
+		for (i = 0; i < num_chans; i++)
+			device_remove_file(&pdev->dev,
+					&adc_dev->sens_attr[i].dev_attr);
+
+	msm_adc_teardown_device_conv(pdev, adc_dev);
+
+	kfree(adc_dev->fnames);
+	kfree(adc_dev->sens_attr);
+	kfree(adc_dev);
+}
+
+static void msm_rpc_adc_teardown_devices(struct platform_device *pdev)
+{
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev);
+	int i, rc = 0;
+
+	for (i = 0; i < pdata->num_adc; i++) {
+		if (msm_adc->devs[i]) {
+			msm_rpc_adc_teardown_device(pdev, msm_adc->devs[i]);
+			msm_adc->devs[i] = NULL;
+		} else
+			break;
+	}
+
+	if (msm_adc->dev_h) {
+		rc = daldevice_detach(msm_adc->dev_h);
+		if (rc)
+			dev_err(&pdev->dev, "Cannot detach from dal device\n");
+		msm_adc->dev_h = NULL;
+	}
+
+}
+
+static void msm_adc_teardown_device(struct platform_device *pdev,
+				    struct msm_adc_drv *msm_adc)
+{
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	int i, num_chans = pdata->num_chan_supported;
+
+	if (pdata->num_chan_supported > 0) {
+		if (msm_adc->sens_attr)
+			for (i = 0; i < num_chans; i++)
+				device_remove_file(&pdev->dev,
+					&msm_adc->sens_attr[i].dev_attr);
+		kfree(msm_adc->sens_attr);
+	}
+}
+
+static void msm_adc_teardown(struct platform_device *pdev)
+{
+	struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev);
+
+	if (!msm_adc)
+		return;
+
+	misc_deregister(&msm_adc->misc);
+
+	if (msm_adc->hwmon)
+		hwmon_device_unregister(msm_adc->hwmon);
+
+	msm_rpc_adc_teardown_devices(pdev);
+	msm_adc_teardown_device(pdev, msm_adc);
+
+	kfree(msm_adc);
+	platform_set_drvdata(pdev, NULL);
+}
+
+static int __devinit msm_adc_device_conv_init(struct msm_adc_drv *msm_adc,
+					      struct adc_dev *adc_dev)
+{
+	struct platform_device *pdev = msm_adc->pdev;
+	struct dal_conv_state *conv_s = &adc_dev->conv;
+	struct dal_conv_slot *slot = conv_s->context;
+	int rc, i;
+
+	sema_init(&conv_s->slot_count, MSM_ADC_DEV_MAX_INFLIGHT);
+	mutex_init(&conv_s->list_lock);
+	INIT_LIST_HEAD(&conv_s->slots);
+
+	for (i = 0; i < MSM_ADC_DEV_MAX_INFLIGHT; i++) {
+		list_add(&slot->list, &conv_s->slots);
+		slot->cb_h = dalrpc_alloc_cb(msm_adc->dev_h,
+					     msm_rpc_adc_conv_cb, slot);
+		if (!slot->cb_h) {
+			dev_err(&pdev->dev, "Unable to allocate DAL callback"
+							" for slot %d\n", i);
+			rc = -ENOMEM;
+			goto dal_err_cb;
+		}
+		init_completion(&slot->comp);
+		slot->idx = i;
+		slot++;
+	}
+
+	return 0;
+
+dal_err_cb:
+	msm_adc_teardown_device_conv(pdev, adc_dev);
+
+	return rc;
+}
+
+static struct sensor_device_attribute msm_rpc_adc_curr_in_attr =
+	SENSOR_ATTR(NULL, S_IRUGO, msm_adc_show_curr, NULL, 0);
+
+static int __devinit msm_rpc_adc_device_init_hwmon(struct platform_device *pdev,
+						struct adc_dev *adc_dev)
+{
+	struct dal_translation *transl = &adc_dev->transl;
+	int i, rc, num_chans = transl->hwmon_end - transl->hwmon_start + 1;
+	const char prefix[] = "curr", postfix[] = "_input";
+	char tmpbuf[5];
+
+	adc_dev->fnames = kzalloc(num_chans * MSM_ADC_MAX_FNAME +
+				  num_chans * sizeof(char *), GFP_KERNEL);
+	if (!adc_dev->fnames) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_dev->sens_attr = kzalloc(num_chans *
+			    sizeof(struct sensor_device_attribute), GFP_KERNEL);
+	if (!adc_dev->sens_attr) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		rc = -ENOMEM;
+		goto hwmon_err_fnames;
+	}
+
+	for (i = 0; i < num_chans; i++) {
+		adc_dev->fnames[i] = (char *)adc_dev->fnames +
+			i * MSM_ADC_MAX_FNAME + num_chans * sizeof(char *);
+		strcpy(adc_dev->fnames[i], prefix);
+		sprintf(tmpbuf, "%d", transl->hwmon_start + i);
+		strcat(adc_dev->fnames[i], tmpbuf);
+		strcat(adc_dev->fnames[i], postfix);
+
+		msm_rpc_adc_curr_in_attr.index = transl->hwmon_start + i;
+		msm_rpc_adc_curr_in_attr.dev_attr.attr.name =
+					adc_dev->fnames[i];
+		memcpy(&adc_dev->sens_attr[i], &msm_rpc_adc_curr_in_attr,
+					sizeof(msm_rpc_adc_curr_in_attr));
+
+		rc = device_create_file(&pdev->dev,
+				&adc_dev->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&pdev->dev, "device_create_file failed for "
+						"dal dev %u chan %d\n",
+					    adc_dev->transl.dal_dev_idx, i);
+			goto hwmon_err_sens;
+		}
+	}
+
+	return 0;
+
+hwmon_err_sens:
+	kfree(adc_dev->sens_attr);
+hwmon_err_fnames:
+	kfree(adc_dev->fnames);
+
+	return rc;
+}
+
+static int __devinit msm_rpc_adc_device_init(struct platform_device *pdev)
+{
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev);
+	struct adc_dev *adc_dev;
+	struct adc_dev_spec target;
+	int i, rc;
+	int hwmon_cntr = pdata->num_chan_supported;
+
+	for (i = 0; i < pdata->num_adc; i++) {
+		adc_dev = kzalloc(sizeof(struct adc_dev), GFP_KERNEL);
+		if (!adc_dev) {
+			dev_err(&pdev->dev, "Unable to allocate memory\n");
+			rc = -ENOMEM;
+			goto dev_init_err;
+		}
+
+		msm_adc->devs[i] = adc_dev;
+		adc_dev->name = pdata->dev_names[i];
+
+		rc = msm_adc_device_conv_init(msm_adc, adc_dev);
+		if (rc) {
+			dev_err(&pdev->dev, "DAL device[%s] failed conv init\n",
+							adc_dev->name);
+			goto dev_init_err;
+		}
+
+		/* DAL device lookup */
+		rc = msm_adc_getinputproperties(msm_adc, adc_dev->name,
+								&target);
+		if (rc) {
+			dev_err(&pdev->dev, "No such DAL device[%s]\n",
+							adc_dev->name);
+			goto dev_init_err;
+		}
+
+		adc_dev->transl.dal_dev_idx = target.dal.dev_idx;
+		adc_dev->transl.hwmon_dev_idx = i;
+		adc_dev->nchans = target.dal.chan_idx;
+		adc_dev->transl.hwmon_start = hwmon_cntr;
+		adc_dev->transl.hwmon_end = hwmon_cntr + adc_dev->nchans - 1;
+		hwmon_cntr += adc_dev->nchans;
+
+		rc = msm_rpc_adc_device_init_hwmon(pdev, adc_dev);
+		if (rc)
+			goto dev_init_err;
+	}
+
+	return 0;
+
+dev_init_err:
+	msm_rpc_adc_teardown_devices(pdev);
+	return rc;
+}
+
+static int __devinit msm_rpc_adc_init(struct platform_device *pdev1)
+{
+	struct msm_adc_drv *msm_adc = msm_adc_drv;
+	struct platform_device *pdev = msm_adc->pdev;
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	int rc = 0;
+
+	dev_dbg(&pdev->dev, "msm_rpc_adc_init called\n");
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&msm_adc->prop_lock);
+
+	rc = daldevice_attach(MSM_ADC_DALRPC_DEVICEID,
+			MSM_ADC_DALRPC_PORT_NAME,
+			MSM_ADC_DALRPC_CPU,
+			&msm_adc->dev_h);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot attach to dal device\n");
+		return rc;
+	}
+
+	dev_dbg(&pdev->dev, "Attach to dal device Succeeded\n");
+
+	rc = msm_rpc_adc_device_init(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "msm_adc_dev_init failed\n");
+		goto err_cleanup;
+	}
+
+	init_waitqueue_head(&msm_adc->rpc_total_outst_wait);
+	atomic_set(&msm_adc->rpc_online, 1);
+	atomic_set(&msm_adc->rpc_total_outst, 0);
+	epm_init = true;
+	pr_info("msm_adc successfully registered\n");
+
+	return 0;
+
+err_cleanup:
+	msm_rpc_adc_teardown_devices(pdev);
+
+	return rc;
+}
+
+/*
+ * Process the deferred job
+ */
+void msm_adc_wq_work(struct work_struct *work)
+{
+	struct adc_properties *adc_properties;
+	struct adc_conv_slot *slot = container_of(work,
+						struct adc_conv_slot, work);
+	uint32_t idx = slot->conv.result.chan;
+	struct msm_adc_platform_data *pdata =
+					msm_adc_drv->pdev->dev.platform_data;
+	struct msm_adc_channels *channel = &pdata->channel[idx];
+	int32_t adc_code;
+
+	switch (slot->adc_request) {
+	case START_OF_CONV:
+			channel->adc_access_fn->adc_select_chan_and_start_conv(
+					channel->adc_dev_instance, slot);
+	break;
+	case END_OF_CONV:
+		adc_properties = channel->adc_access_fn->adc_get_properties(
+						channel->adc_dev_instance);
+		if (channel->adc_access_fn->adc_read_adc_code)
+			channel->adc_access_fn->adc_read_adc_code(
+					channel->adc_dev_instance, &adc_code);
+		if (channel->chan_processor)
+			channel->chan_processor(adc_code, adc_properties,
+				&slot->chan_properties, &slot->conv.result);
+		/* Intentionally a fall thru here.  Calibraton does not need
+		to perform channel processing, etc.  However, both
+		end of conversion and end of calibration requires the below
+		fall thru code to be executed. */
+	case END_OF_CALIBRATION:
+		/* for blocking requests, signal complete */
+		if (slot->blocking)
+			complete(&slot->comp);
+		else {
+			struct msm_client_data *client = slot->client;
+
+			mutex_lock(&client->lock);
+
+			if (slot->adc_request == END_OF_CONV) {
+				list_add(&slot->list, &client->complete_list);
+				client->num_complete++;
+			}
+			client->num_outstanding--;
+
+		/*
+		 * if the client release has been invoked and this is call
+		 * corresponds to the last request, then signal release
+		 * to complete.
+		 */
+			if (slot->client->online == 0 &&
+						client->num_outstanding == 0)
+				wake_up_interruptible_all(&client->outst_wait);
+
+			mutex_unlock(&client->lock);
+
+			wake_up_interruptible_all(&client->data_wait);
+
+			atomic_dec(&msm_adc_drv->total_outst);
+
+			/* verify driver remove has not been invoked */
+			if (atomic_read(&msm_adc_drv->online) == 0 &&
+				atomic_read(&msm_adc_drv->total_outst) == 0)
+				wake_up_interruptible_all(
+					&msm_adc_drv->total_outst_wait);
+
+			if (slot->compk) /* Kernel space request */
+				complete(slot->compk);
+			if (slot->adc_request == END_OF_CALIBRATION)
+				channel->adc_access_fn->adc_restore_slot(
+					channel->adc_dev_instance, slot);
+		}
+	break;
+	case START_OF_CALIBRATION: /* code here to please code reviewers
+					to satisfy silly compiler warnings */
+	break;
+	}
+}
+
+static struct sensor_device_attribute msm_adc_curr_in_attr =
+	SENSOR_ATTR(NULL, S_IRUGO, msm_adc_show_curr, NULL, 0);
+
+static int __devinit msm_adc_init_hwmon(struct platform_device *pdev,
+					       struct msm_adc_drv *msm_adc)
+{
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_adc_channels *channel = pdata->channel;
+	int i, rc, num_chans = pdata->num_chan_supported;
+
+	if (!channel)
+		return -EINVAL;
+
+	msm_adc->sens_attr = kzalloc(num_chans *
+			    sizeof(struct sensor_device_attribute), GFP_KERNEL);
+	if (!msm_adc->sens_attr) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		rc = -ENOMEM;
+		goto hwmon_err_sens;
+	}
+
+	for (i = 0; i < num_chans; i++) {
+		msm_adc_curr_in_attr.index = i;
+		msm_adc_curr_in_attr.dev_attr.attr.name = channel[i].name;
+		memcpy(&msm_adc->sens_attr[i], &msm_adc_curr_in_attr,
+						sizeof(msm_adc_curr_in_attr));
+
+		rc = device_create_file(&pdev->dev,
+				&msm_adc->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&pdev->dev, "device_create_file failed for "
+					    "dal dev %s\n",
+					    channel[i].name);
+			goto hwmon_err_sens;
+		}
+	}
+
+	return 0;
+
+hwmon_err_sens:
+	kfree(msm_adc->sens_attr);
+
+	return rc;
+}
+
+static struct platform_driver msm_adc_rpcrouter_remote_driver = {
+	.probe          = msm_rpc_adc_init,
+	.driver         = {
+		.name   = MSM_ADC_DALRPC_PORT_NAME,
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int msm_adc_probe(struct platform_device *pdev)
+{
+	struct msm_adc_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_adc_drv *msm_adc;
+	int rc = 0;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	msm_adc = kzalloc(sizeof(struct msm_adc_drv), GFP_KERNEL);
+	if (!msm_adc) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, msm_adc);
+	msm_adc_drv = msm_adc;
+	msm_adc->pdev = pdev;
+
+	if (pdata->target_hw == MSM_8x60 || pdata->target_hw == FSM_9xxx) {
+		rc = msm_adc_init_hwmon(pdev, msm_adc);
+		if (rc) {
+			dev_err(&pdev->dev, "msm_adc_dev_init failed\n");
+			goto err_cleanup;
+		}
+	}
+
+	msm_adc->hwmon = hwmon_device_register(&pdev->dev);
+	if (IS_ERR(msm_adc->hwmon)) {
+		dev_err(&pdev->dev, "hwmon_device_register failed\n");
+		rc = PTR_ERR(msm_adc->hwmon);
+		goto err_cleanup;
+	}
+
+	msm_adc->misc.name = MSM_ADC_DRIVER_NAME;
+	msm_adc->misc.minor = MISC_DYNAMIC_MINOR;
+	msm_adc->misc.fops = &msm_adc_fops;
+
+	if (misc_register(&msm_adc->misc)) {
+		dev_err(&pdev->dev, "Unable to register misc device!\n");
+		goto err_cleanup;
+	}
+
+	init_waitqueue_head(&msm_adc->total_outst_wait);
+	atomic_set(&msm_adc->online, 1);
+	atomic_set(&msm_adc->total_outst, 0);
+
+	msm_adc->wq = create_singlethread_workqueue("msm_adc");
+	if (!msm_adc->wq)
+		goto err_cleanup;
+
+	if (pdata->num_adc > 0) {
+		if (pdata->target_hw == MSM_8x60)
+			platform_driver_register(
+				&msm_adc_rpcrouter_remote_driver);
+		else
+			msm_rpc_adc_init(pdev);
+	}
+
+	pr_info("msm_adc successfully registered\n");
+
+	return 0;
+
+err_cleanup:
+	msm_adc_teardown(pdev);
+
+	return rc;
+}
+
+static int __devexit msm_adc_remove(struct platform_device *pdev)
+{
+	int rc;
+
+	struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev);
+
+	atomic_set(&msm_adc->online, 0);
+
+	atomic_set(&msm_adc->rpc_online, 0);
+
+	misc_deregister(&msm_adc->misc);
+
+	hwmon_device_unregister(msm_adc->hwmon);
+	msm_adc->hwmon = NULL;
+
+	/*
+	 * We may still have outstanding transactions in flight that have not
+	 * completed. Make sure they're completed before tearing down.
+	 */
+	rc = wait_event_interruptible(msm_adc->total_outst_wait,
+				      atomic_read(&msm_adc->total_outst) == 0);
+	if (rc) {
+		pr_err("%s: wait_event_interruptible failed rc = %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	rc = wait_event_interruptible(msm_adc->rpc_total_outst_wait,
+	      atomic_read(&msm_adc->rpc_total_outst) == 0);
+	if (rc) {
+		pr_err("%s: wait_event_interruptible failed rc = %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	msm_adc_teardown(pdev);
+
+	pr_info("msm_adc unregistered\n");
+
+	return 0;
+}
+
+static struct platform_driver msm_adc_driver = {
+	.probe = msm_adc_probe,
+	.remove = __devexit_p(msm_adc_remove),
+	.driver = {
+		.name = MSM_ADC_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_adc_init(void)
+{
+	return platform_driver_register(&msm_adc_driver);
+}
+module_init(msm_adc_init);
+
+static void __exit msm_adc_exit(void)
+{
+	platform_driver_unregister(&msm_adc_driver);
+}
+module_exit(msm_adc_exit);
+
+MODULE_DESCRIPTION("MSM ADC Driver");
+MODULE_ALIAS("platform:msm_adc");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
diff --git a/drivers/hwmon/wpce775x.c b/drivers/hwmon/wpce775x.c
new file mode 100644
index 0000000..2d00700
--- /dev/null
+++ b/drivers/hwmon/wpce775x.c
@@ -0,0 +1,167 @@
+/* Quanta EC driver for the Winbond Embedded Controller
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#define EC_ID_NAME          "qci-i2cec"
+#define EC_BUFFER_LEN		16
+#define EC_CMD_POWER_OFF	0xAC
+#define EC_CMD_RESTART	0xAB
+
+static struct i2c_client *g_i2cec_client;
+
+/* General structure to hold the driver data */
+struct i2cec_drv_data {
+		struct i2c_client *i2cec_client;
+		struct work_struct work;
+		char ec_data[EC_BUFFER_LEN+1];
+};
+
+static int __devinit wpce_probe(struct i2c_client *client,
+	const struct i2c_device_id *id);
+static int __devexit wpce_remove(struct i2c_client *kbd);
+
+#ifdef CONFIG_PM
+static int wpce_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int wpce_resume(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static struct dev_pm_ops wpce_pm_ops = {
+	.suspend  = wpce_suspend,
+	.resume   = wpce_resume,
+};
+#endif
+
+static const struct i2c_device_id wpce_idtable[] = {
+       { EC_ID_NAME, 0 },
+       { }
+};
+
+static struct i2c_driver wpce_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name  = EC_ID_NAME,
+#ifdef CONFIG_PM
+		.pm = &wpce_pm_ops,
+#endif
+	},
+	.probe	  = wpce_probe,
+	.remove	  = __devexit_p(wpce_remove),
+	.id_table   = wpce_idtable,
+};
+
+static int __devinit wpce_probe(struct i2c_client *client,
+				    const struct i2c_device_id *id)
+{
+	int err = -ENOMEM;
+	struct i2cec_drv_data *context = 0;
+
+	/* there is no need to call i2c_check_functionality() since it is the
+	client's job to use the interface (I2C vs SMBUS) appropriate for it. */
+	client->driver = &wpce_driver;
+	context = kzalloc(sizeof(struct i2cec_drv_data), GFP_KERNEL);
+	if (!context)
+		return err;
+
+	context->i2cec_client = client;
+	g_i2cec_client = client;
+	i2c_set_clientdata(context->i2cec_client, context);
+
+	return 0;
+}
+
+static int __devexit wpce_remove(struct i2c_client *dev)
+{
+	struct i2cec_drv_data *context = i2c_get_clientdata(dev);
+	g_i2cec_client = NULL;
+	kfree(context);
+
+	return 0;
+}
+
+static int __init wpce_init(void)
+{
+	return i2c_add_driver(&wpce_driver);
+}
+
+static void __exit wpce_exit(void)
+{
+	i2c_del_driver(&wpce_driver);
+}
+
+struct i2c_client *wpce_get_i2c_client(void)
+{
+	return g_i2cec_client;
+}
+EXPORT_SYMBOL_GPL(wpce_get_i2c_client);
+
+void wpce_poweroff(void)
+{
+	if (g_i2cec_client == NULL)
+		return;
+	i2c_smbus_write_byte(g_i2cec_client, EC_CMD_POWER_OFF);
+}
+EXPORT_SYMBOL_GPL(wpce_poweroff);
+
+void wpce_restart(void)
+{
+	if (g_i2cec_client == NULL)
+		return;
+	i2c_smbus_write_byte(g_i2cec_client, EC_CMD_RESTART);
+}
+EXPORT_SYMBOL_GPL(wpce_restart);
+
+int wpce_i2c_transfer(struct i2c_msg *msg)
+{
+	if (g_i2cec_client == NULL)
+		return -1;
+	msg->addr = g_i2cec_client->addr;
+	return i2c_transfer(g_i2cec_client->adapter, msg, 1);
+}
+EXPORT_SYMBOL_GPL(wpce_i2c_transfer);
+
+int wpce_smbus_write_word_data(u8 command, u16 value)
+{
+	if (g_i2cec_client == NULL)
+		return -1;
+	return i2c_smbus_write_word_data(g_i2cec_client, command, value);
+}
+EXPORT_SYMBOL_GPL(wpce_smbus_write_word_data);
+
+int wpce_smbus_write_byte_data(u8 command, u8 value)
+{
+	if (g_i2cec_client == NULL)
+		return -1;
+	return i2c_smbus_write_byte_data(g_i2cec_client, command, value);
+}
+EXPORT_SYMBOL_GPL(wpce_smbus_write_byte_data);
+
+module_init(wpce_init);
+module_exit(wpce_exit);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Quanta Embedded Controller I2C Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 646068e..ce4ac8c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -445,6 +445,32 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-mpc.
 
+config I2C_MSM
+	tristate "MSM"
+	depends on I2C && (ARCH_MSM || ARCH_QSD)
+	default y
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in I2C interface on the MSM or QSD family processors.
+
+config I2C_QUP
+	tristate "I2C_QUP"
+	depends on ARCH_MSM
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in I2C interface on the MSM family processors.
+
+config I2C_SSBI
+	tristate "Qualcomm Single-wire Serial Bus Interface (SSBI)"
+	depends on I2C && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_FSM9XXX)
+	default n
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in SSBI interface on the MSM family processors.
+
+          Note that SSBI is not an I2C device, but is functionally related
+          enough such that it is able to leverages the I2C framework.
+
 config I2C_MV64XXX
 	tristate "Marvell mv64xxx I2C Controller"
 	depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e6cf294..cf47b6a 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -42,6 +42,9 @@
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
 obj-$(CONFIG_I2C_IXP2000)	+= i2c-ixp2000.o
 obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
+obj-$(CONFIG_I2C_MSM)		+= i2c-msm.o
+obj-$(CONFIG_I2C_QUP)           += i2c-qup.o
+obj-$(CONFIG_I2C_SSBI)          += i2c-ssbi.o
 obj-$(CONFIG_I2C_MV64XXX)	+= i2c-mv64xxx.o
 obj-$(CONFIG_I2C_MXS)		+= i2c-mxs.o
 obj-$(CONFIG_I2C_NOMADIK)	+= i2c-nomadik.o
diff --git a/drivers/i2c/busses/i2c-msm.c b/drivers/i2c/busses/i2c-msm.c
new file mode 100644
index 0000000..7bf5314
--- /dev/null
+++ b/drivers/i2c/busses/i2c-msm.c
@@ -0,0 +1,797 @@
+/* drivers/i2c/busses/i2c-msm.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <mach/board.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/remote_spinlock.h>
+#include <linux/pm_qos_params.h>
+#include <mach/gpio.h>
+
+
+enum {
+	I2C_WRITE_DATA          = 0x00,
+	I2C_CLK_CTL             = 0x04,
+	I2C_STATUS              = 0x08,
+	I2C_READ_DATA           = 0x0c,
+	I2C_INTERFACE_SELECT    = 0x10,
+
+	I2C_WRITE_DATA_DATA_BYTE            = 0xff,
+	I2C_WRITE_DATA_ADDR_BYTE            = 1U << 8,
+	I2C_WRITE_DATA_LAST_BYTE            = 1U << 9,
+
+	I2C_CLK_CTL_FS_DIVIDER_VALUE        = 0xff,
+	I2C_CLK_CTL_HS_DIVIDER_VALUE        = 7U << 8,
+
+	I2C_STATUS_WR_BUFFER_FULL           = 1U << 0,
+	I2C_STATUS_RD_BUFFER_FULL           = 1U << 1,
+	I2C_STATUS_BUS_ERROR                = 1U << 2,
+	I2C_STATUS_PACKET_NACKED            = 1U << 3,
+	I2C_STATUS_ARB_LOST                 = 1U << 4,
+	I2C_STATUS_INVALID_WRITE            = 1U << 5,
+	I2C_STATUS_FAILED                   = 3U << 6,
+	I2C_STATUS_BUS_ACTIVE               = 1U << 8,
+	I2C_STATUS_BUS_MASTER               = 1U << 9,
+	I2C_STATUS_ERROR_MASK               = 0xfc,
+
+	I2C_INTERFACE_SELECT_INTF_SELECT    = 1U << 0,
+	I2C_INTERFACE_SELECT_SCL            = 1U << 8,
+	I2C_INTERFACE_SELECT_SDA            = 1U << 9,
+	I2C_STATUS_RX_DATA_STATE            = 3U << 11,
+	I2C_STATUS_LOW_CLK_STATE            = 3U << 13,
+};
+
+struct msm_i2c_dev {
+	struct device                *dev;
+	void __iomem                 *base;	/* virtual */
+	int                          irq;
+	struct clk                   *clk;
+	struct i2c_adapter           adap_pri;
+	struct i2c_adapter           adap_aux;
+
+	spinlock_t                   lock;
+
+	struct i2c_msg               *msg;
+	int                          rem;
+	int                          pos;
+	int                          cnt;
+	int                          err;
+	int                          flush_cnt;
+	int                          rd_acked;
+	int                          one_bit_t;
+	remote_mutex_t               r_lock;
+	int                          suspended;
+	struct mutex                 mlock;
+	struct msm_i2c_platform_data *pdata;
+	struct timer_list            pwr_timer;
+	int                          clk_state;
+	void                         *complete;
+
+	struct pm_qos_request_list pm_qos_req;
+};
+
+static void
+msm_i2c_pwr_mgmt(struct msm_i2c_dev *dev, unsigned int state)
+{
+	dev->clk_state = state;
+	if (state != 0)
+		clk_enable(dev->clk);
+	else
+		clk_disable(dev->clk);
+}
+
+static void
+msm_i2c_pwr_timer(unsigned long data)
+{
+	struct msm_i2c_dev *dev = (struct msm_i2c_dev *) data;
+	dev_dbg(dev->dev, "I2C_Power: Inactivity based power management\n");
+	if (dev->clk_state == 1)
+		msm_i2c_pwr_mgmt(dev, 0);
+}
+
+#ifdef DEBUG
+static void
+dump_status(uint32_t status)
+{
+	printk("STATUS (0x%.8x): ", status);
+	if (status & I2C_STATUS_BUS_MASTER)
+		printk("MST ");
+	if (status & I2C_STATUS_BUS_ACTIVE)
+		printk("ACT ");
+	if (status & I2C_STATUS_INVALID_WRITE)
+		printk("INV_WR ");
+	if (status & I2C_STATUS_ARB_LOST)
+		printk("ARB_LST ");
+	if (status & I2C_STATUS_PACKET_NACKED)
+		printk("NAK ");
+	if (status & I2C_STATUS_BUS_ERROR)
+		printk("BUS_ERR ");
+	if (status & I2C_STATUS_RD_BUFFER_FULL)
+		printk("RD_FULL ");
+	if (status & I2C_STATUS_WR_BUFFER_FULL)
+		printk("WR_FULL ");
+	if (status & I2C_STATUS_FAILED)
+		printk("FAIL 0x%x", (status & I2C_STATUS_FAILED));
+	printk("\n");
+}
+#endif
+
+static irqreturn_t
+msm_i2c_interrupt(int irq, void *devid)
+{
+	struct msm_i2c_dev *dev = devid;
+	uint32_t status = readl(dev->base + I2C_STATUS);
+	int err = 0;
+
+#ifdef DEBUG
+	dump_status(status);
+#endif
+
+	spin_lock(&dev->lock);
+	if (!dev->msg) {
+		printk(KERN_ERR "%s: IRQ but nothing to do!\n", __func__);
+		spin_unlock(&dev->lock);
+		return IRQ_HANDLED;
+	}
+
+	if (status & I2C_STATUS_ERROR_MASK) {
+		err = -EIO;
+		goto out_err;
+	}
+
+	if (dev->msg->flags & I2C_M_RD) {
+		if (status & I2C_STATUS_RD_BUFFER_FULL) {
+
+			/*
+			 * Theres something in the FIFO.
+			 * Are we expecting data or flush crap?
+			 */
+			if (dev->cnt) { /* DATA */
+				uint8_t *data = &dev->msg->buf[dev->pos];
+
+				/* This is in spin-lock. So there will be no
+				 * scheduling between reading the second-last
+				 * byte and writing LAST_BYTE to the controller.
+				 * So extra read-cycle-clock won't be generated
+				 * Per I2C MSM HW Specs: Write LAST_BYTE befure
+				 * reading 2nd last byte
+				 */
+				if (dev->cnt == 2)
+					writel(I2C_WRITE_DATA_LAST_BYTE,
+						dev->base + I2C_WRITE_DATA);
+				*data = readl(dev->base + I2C_READ_DATA);
+				dev->cnt--;
+				dev->pos++;
+				if (dev->msg->len == 1)
+					dev->rd_acked = 0;
+				if (dev->cnt == 0)
+					goto out_complete;
+
+			} else {
+				/* Now that extra read-cycle-clocks aren't
+				 * generated, this becomes error condition
+				 */
+				dev_err(dev->dev,
+					"read did not stop, status - %x\n",
+					status);
+				err = -EIO;
+				goto out_err;
+			}
+		} else if (dev->msg->len == 1 && dev->rd_acked == 0 &&
+				((status & I2C_STATUS_RX_DATA_STATE) ==
+				 I2C_STATUS_RX_DATA_STATE))
+			writel(I2C_WRITE_DATA_LAST_BYTE,
+				dev->base + I2C_WRITE_DATA);
+	} else {
+		uint16_t data;
+
+		if (status & I2C_STATUS_WR_BUFFER_FULL) {
+			dev_err(dev->dev,
+				"Write buffer full in ISR on write?\n");
+			err = -EIO;
+			goto out_err;
+		}
+
+		if (dev->cnt) {
+			/* Ready to take a byte */
+			data = dev->msg->buf[dev->pos];
+			if (dev->cnt == 1 && dev->rem == 1)
+				data |= I2C_WRITE_DATA_LAST_BYTE;
+
+			status = readl(dev->base + I2C_STATUS);
+			/*
+			 * Due to a hardware timing issue, data line setup time
+			 * may be reduced to less than recommended 250 ns.
+			 * This happens when next byte is written in a
+			 * particular window of clock line being low and master
+			 * not stretching the clock line. Due to setup time
+			 * violation, some slaves may miss first-bit of data, or
+			 * misinterprete data as start condition.
+			 * We introduce delay of just over 1/2 clock cycle to
+			 * ensure master stretches the clock line thereby
+			 * avoiding setup time violation. Delay is introduced
+			 * only if I2C clock FSM is LOW. The delay is not needed
+			 * if I2C clock FSM is HIGH or FORCED_LOW.
+			 */
+			if ((status & I2C_STATUS_LOW_CLK_STATE) ==
+					I2C_STATUS_LOW_CLK_STATE)
+				udelay((dev->one_bit_t >> 1) + 1);
+			writel(data, dev->base + I2C_WRITE_DATA);
+			dev->pos++;
+			dev->cnt--;
+		} else
+			goto out_complete;
+	}
+
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+
+ out_err:
+	dev->err = err;
+ out_complete:
+	complete(dev->complete);
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+}
+
+static int
+msm_i2c_poll_writeready(struct msm_i2c_dev *dev)
+{
+	uint32_t retries = 0;
+
+	while (retries != 2000) {
+		uint32_t status = readl(dev->base + I2C_STATUS);
+
+		if (!(status & I2C_STATUS_WR_BUFFER_FULL))
+			return 0;
+		if (retries++ > 1000)
+			usleep_range(100, 200);
+	}
+	return -ETIMEDOUT;
+}
+
+static int
+msm_i2c_poll_notbusy(struct msm_i2c_dev *dev)
+{
+	uint32_t retries = 0;
+
+	while (retries != 2000) {
+		uint32_t status = readl(dev->base + I2C_STATUS);
+
+		if (!(status & I2C_STATUS_BUS_ACTIVE))
+			return 0;
+		if (retries++ > 1000)
+			usleep_range(100, 200);
+	}
+	return -ETIMEDOUT;
+}
+
+static int
+msm_i2c_recover_bus_busy(struct msm_i2c_dev *dev, struct i2c_adapter *adap)
+{
+	int i;
+	int gpio_clk;
+	int gpio_dat;
+	uint32_t status = readl(dev->base + I2C_STATUS);
+	bool gpio_clk_status = false;
+
+	if (!(status & (I2C_STATUS_BUS_ACTIVE | I2C_STATUS_WR_BUFFER_FULL)))
+		return 0;
+
+	dev->pdata->msm_i2c_config_gpio(adap->nr, 0);
+	/* Even adapter is primary and Odd adapter is AUX */
+	if (adap->nr % 2) {
+		gpio_clk = dev->pdata->aux_clk;
+		gpio_dat = dev->pdata->aux_dat;
+	} else {
+		gpio_clk = dev->pdata->pri_clk;
+		gpio_dat = dev->pdata->pri_dat;
+	}
+
+	disable_irq(dev->irq);
+	if (status & I2C_STATUS_RD_BUFFER_FULL) {
+		dev_warn(dev->dev, "Read buffer full, status %x, intf %x\n",
+			 status, readl(dev->base + I2C_INTERFACE_SELECT));
+		writel(I2C_WRITE_DATA_LAST_BYTE, dev->base + I2C_WRITE_DATA);
+		readl(dev->base + I2C_READ_DATA);
+	} else if (status & I2C_STATUS_BUS_MASTER) {
+		dev_warn(dev->dev, "Still the bus master, status %x, intf %x\n",
+			 status, readl(dev->base + I2C_INTERFACE_SELECT));
+		writel(I2C_WRITE_DATA_LAST_BYTE | 0xff,
+		       dev->base + I2C_WRITE_DATA);
+	}
+
+	for (i = 0; i < 9; i++) {
+		if (gpio_get_value(gpio_dat) && gpio_clk_status)
+			break;
+		gpio_direction_output(gpio_clk, 0);
+		udelay(5);
+		gpio_direction_output(gpio_dat, 0);
+		udelay(5);
+		gpio_direction_input(gpio_clk);
+		udelay(5);
+		if (!gpio_get_value(gpio_clk))
+			usleep_range(20, 30);
+		if (!gpio_get_value(gpio_clk))
+			msleep(10);
+		gpio_clk_status = gpio_get_value(gpio_clk);
+		gpio_direction_input(gpio_dat);
+		udelay(5);
+	}
+	dev->pdata->msm_i2c_config_gpio(adap->nr, 1);
+	udelay(10);
+
+	status = readl(dev->base + I2C_STATUS);
+	if (!(status & I2C_STATUS_BUS_ACTIVE)) {
+		dev_info(dev->dev, "Bus busy cleared after %d clock cycles, "
+			 "status %x, intf %x\n",
+			 i, status, readl(dev->base + I2C_INTERFACE_SELECT));
+		enable_irq(dev->irq);
+		return 0;
+	}
+
+	dev_err(dev->dev, "Bus still busy, status %x, intf %x\n",
+		 status, readl(dev->base + I2C_INTERFACE_SELECT));
+	enable_irq(dev->irq);
+	return -EBUSY;
+}
+
+static int
+msm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	struct msm_i2c_dev *dev = i2c_get_adapdata(adap);
+	int ret;
+	int rem = num;
+	uint16_t addr;
+	long timeout;
+	unsigned long flags;
+	int check_busy = 1;
+
+	del_timer_sync(&dev->pwr_timer);
+	mutex_lock(&dev->mlock);
+	if (dev->suspended) {
+		mutex_unlock(&dev->mlock);
+		return -EIO;
+	}
+
+	if (dev->clk_state == 0) {
+		dev_dbg(dev->dev, "I2C_Power: Enable I2C clock(s)\n");
+		msm_i2c_pwr_mgmt(dev, 1);
+	}
+
+	/* Don't allow power collapse until we release remote spinlock */
+	pm_qos_update_request(&dev->pm_qos_req,  dev->pdata->pm_lat);
+	if (dev->pdata->rmutex) {
+		remote_mutex_lock(&dev->r_lock);
+		/* If other processor did some transactions, we may have
+		 * interrupt pending. Clear it
+		 */
+		irq_get_chip(dev->irq)->irq_ack(irq_get_irq_data(dev->irq));
+	}
+
+	if (adap == &dev->adap_pri)
+		writel(0, dev->base + I2C_INTERFACE_SELECT);
+	else
+		writel(I2C_INTERFACE_SELECT_INTF_SELECT,
+				dev->base + I2C_INTERFACE_SELECT);
+	enable_irq(dev->irq);
+	while (rem) {
+		addr = msgs->addr << 1;
+		if (msgs->flags & I2C_M_RD)
+			addr |= 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->msg = msgs;
+		dev->rem = rem;
+		dev->pos = 0;
+		dev->err = 0;
+		dev->flush_cnt = 0;
+		dev->cnt = msgs->len;
+		dev->complete = &complete;
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		if (check_busy) {
+			ret = msm_i2c_poll_notbusy(dev);
+			if (ret)
+				ret = msm_i2c_recover_bus_busy(dev, adap);
+				if (ret) {
+					dev_err(dev->dev,
+						"Error waiting for notbusy\n");
+					goto out_err;
+				}
+			check_busy = 0;
+		}
+
+		if (rem == 1 && msgs->len == 0)
+			addr |= I2C_WRITE_DATA_LAST_BYTE;
+
+		/* Wait for WR buffer not full */
+		ret = msm_i2c_poll_writeready(dev);
+		if (ret) {
+			ret = msm_i2c_recover_bus_busy(dev, adap);
+			if (ret) {
+				dev_err(dev->dev,
+				"Error waiting for write ready before addr\n");
+				goto out_err;
+			}
+		}
+
+		/* special case for doing 1 byte read.
+		 * There should be no scheduling between I2C controller becoming
+		 * ready to read and writing LAST-BYTE to I2C controller
+		 * This will avoid potential of I2C controller starting to latch
+		 * another extra byte.
+		 */
+		if ((msgs->len == 1) && (msgs->flags & I2C_M_RD)) {
+			uint32_t retries = 0;
+			spin_lock_irqsave(&dev->lock, flags);
+
+			writel(I2C_WRITE_DATA_ADDR_BYTE | addr,
+				dev->base + I2C_WRITE_DATA);
+
+			/* Poll for I2C controller going into RX_DATA mode to
+			 * ensure controller goes into receive mode.
+			 * Just checking write_buffer_full may not work since
+			 * there is delay between the write-buffer becoming
+			 * empty and the slave sending ACK to ensure I2C
+			 * controller goes in receive mode to receive data.
+			 */
+			while (retries != 2000) {
+				uint32_t status = readl(dev->base + I2C_STATUS);
+
+					if ((status & I2C_STATUS_RX_DATA_STATE)
+						== I2C_STATUS_RX_DATA_STATE)
+						break;
+				retries++;
+			}
+			if (retries >= 2000) {
+				dev->rd_acked = 0;
+				spin_unlock_irqrestore(&dev->lock, flags);
+				/* 1-byte-reads from slow devices in interrupt
+				 * context
+				 */
+				goto wait_for_int;
+			}
+
+			dev->rd_acked = 1;
+			writel(I2C_WRITE_DATA_LAST_BYTE,
+					dev->base + I2C_WRITE_DATA);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		} else {
+			writel(I2C_WRITE_DATA_ADDR_BYTE | addr,
+					 dev->base + I2C_WRITE_DATA);
+		}
+		/* Polling and waiting for write_buffer_empty is not necessary.
+		 * Even worse, if we do, it can result in invalid status and
+		 * error if interrupt(s) occur while polling.
+		 */
+
+		/*
+		 * Now that we've setup the xfer, the ISR will transfer the data
+		 * and wake us up with dev->err set if there was an error
+		 */
+wait_for_int:
+
+		timeout = wait_for_completion_timeout(&complete, HZ);
+		if (!timeout) {
+			dev_err(dev->dev, "Transaction timed out\n");
+			writel(I2C_WRITE_DATA_LAST_BYTE,
+				dev->base + I2C_WRITE_DATA);
+			msleep(100);
+			/* FLUSH */
+			readl(dev->base + I2C_READ_DATA);
+			readl(dev->base + I2C_STATUS);
+			ret = -ETIMEDOUT;
+			goto out_err;
+		}
+		if (dev->err) {
+			dev_err(dev->dev,
+				"(%04x) Error during data xfer (%d)\n",
+				addr, dev->err);
+			ret = dev->err;
+			goto out_err;
+		}
+
+		if (msgs->flags & I2C_M_RD)
+			check_busy = 1;
+
+		msgs++;
+		rem--;
+	}
+
+	ret = num;
+ out_err:
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->complete = NULL;
+	dev->msg = NULL;
+	dev->rem = 0;
+	dev->pos = 0;
+	dev->err = 0;
+	dev->flush_cnt = 0;
+	dev->cnt = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	disable_irq(dev->irq);
+	if (dev->pdata->rmutex)
+		remote_mutex_unlock(&dev->r_lock);
+	pm_qos_update_request(&dev->pm_qos_req,
+			      PM_QOS_DEFAULT_VALUE);
+	mod_timer(&dev->pwr_timer, (jiffies + 3*HZ));
+	mutex_unlock(&dev->mlock);
+	return ret;
+}
+
+static u32
+msm_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm msm_i2c_algo = {
+	.master_xfer	= msm_i2c_xfer,
+	.functionality	= msm_i2c_func,
+};
+
+static int
+msm_i2c_probe(struct platform_device *pdev)
+{
+	struct msm_i2c_dev	*dev;
+	struct resource		*mem, *irq, *ioarea;
+	int ret;
+	int fs_div;
+	int hs_div;
+	int i2c_clk;
+	int clk_ctl;
+	struct clk *clk;
+	struct msm_i2c_platform_data *pdata;
+
+	printk(KERN_INFO "msm_i2c_probe\n");
+
+	/* NOTE: driver uses the static register mapping */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "no mem resource?\n");
+		return -ENODEV;
+	}
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no irq resource?\n");
+		return -ENODEV;
+	}
+
+	ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1,
+			pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "I2C region already claimed\n");
+		return -EBUSY;
+	}
+	clk = clk_get(&pdev->dev, "i2c_clk");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Could not get clock\n");
+		ret = PTR_ERR(clk);
+		goto err_clk_get_failed;
+	}
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data not initialized\n");
+		ret = -ENOSYS;
+		goto err_clk_get_failed;
+	}
+	if (!pdata->msm_i2c_config_gpio) {
+		dev_err(&pdev->dev, "config_gpio function not initialized\n");
+		ret = -ENOSYS;
+		goto err_clk_get_failed;
+	}
+	/* We support frequencies upto FAST Mode(400KHz) */
+	if (pdata->clk_freq <= 0 || pdata->clk_freq > 400000) {
+		dev_err(&pdev->dev, "clock frequency not supported\n");
+		ret = -EIO;
+		goto err_clk_get_failed;
+	}
+
+	dev = kzalloc(sizeof(struct msm_i2c_dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto err_alloc_dev_failed;
+	}
+
+	dev->dev = &pdev->dev;
+	dev->irq = irq->start;
+	dev->clk = clk;
+	dev->pdata = pdata;
+	dev->base = ioremap(mem->start, (mem->end - mem->start) + 1);
+	if (!dev->base) {
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+
+	dev->one_bit_t = USEC_PER_SEC/pdata->clk_freq;
+	spin_lock_init(&dev->lock);
+	platform_set_drvdata(pdev, dev);
+
+	clk_enable(clk);
+
+	if (pdata->rmutex) {
+		struct remote_mutex_id rmid;
+		rmid.r_spinlock_id = pdata->rsl_id;
+		rmid.delay_us = 10000000/pdata->clk_freq;
+		if (remote_mutex_init(&dev->r_lock, &rmid) != 0)
+			pdata->rmutex = 0;
+	}
+	/* I2C_HS_CLK = I2C_CLK/(3*(HS_DIVIDER_VALUE+1) */
+	/* I2C_FS_CLK = I2C_CLK/(2*(FS_DIVIDER_VALUE+3) */
+	/* FS_DIVIDER_VALUE = ((I2C_CLK / I2C_FS_CLK) / 2) - 3 */
+	i2c_clk = 19200000; /* input clock */
+	fs_div = ((i2c_clk / pdata->clk_freq) / 2) - 3;
+	hs_div = 3;
+	clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
+	writel(clk_ctl, dev->base + I2C_CLK_CTL);
+	printk(KERN_INFO "msm_i2c_probe: clk_ctl %x, %d Hz\n",
+	       clk_ctl, i2c_clk / (2 * ((clk_ctl & 0xff) + 3)));
+
+	i2c_set_adapdata(&dev->adap_pri, dev);
+	dev->adap_pri.algo = &msm_i2c_algo;
+	strlcpy(dev->adap_pri.name,
+		"MSM I2C adapter-PRI",
+		sizeof(dev->adap_pri.name));
+
+	dev->adap_pri.nr = pdev->id;
+	ret = i2c_add_numbered_adapter(&dev->adap_pri);
+	if (ret) {
+		dev_err(&pdev->dev, "Primary i2c_add_adapter failed\n");
+		goto err_i2c_add_adapter_failed;
+	}
+
+	i2c_set_adapdata(&dev->adap_aux, dev);
+	dev->adap_aux.algo = &msm_i2c_algo;
+	strlcpy(dev->adap_aux.name,
+		"MSM I2C adapter-AUX",
+		sizeof(dev->adap_aux.name));
+
+	dev->adap_aux.nr = pdev->id + 1;
+	ret = i2c_add_numbered_adapter(&dev->adap_aux);
+	if (ret) {
+		dev_err(&pdev->dev, "auxiliary i2c_add_adapter failed\n");
+		i2c_del_adapter(&dev->adap_pri);
+		goto err_i2c_add_adapter_failed;
+	}
+	ret = request_irq(dev->irq, msm_i2c_interrupt,
+			IRQF_TRIGGER_RISING, pdev->name, dev);
+	if (ret) {
+		dev_err(&pdev->dev, "request_irq failed\n");
+		goto err_request_irq_failed;
+	}
+	pm_qos_add_request(&dev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+					     PM_QOS_DEFAULT_VALUE);
+	disable_irq(dev->irq);
+	dev->suspended = 0;
+	mutex_init(&dev->mlock);
+	dev->clk_state = 0;
+	/* Config GPIOs for primary and secondary lines */
+	pdata->msm_i2c_config_gpio(dev->adap_pri.nr, 1);
+	pdata->msm_i2c_config_gpio(dev->adap_aux.nr, 1);
+	clk_disable(dev->clk);
+	setup_timer(&dev->pwr_timer, msm_i2c_pwr_timer, (unsigned long) dev);
+
+	return 0;
+
+err_request_irq_failed:
+	i2c_del_adapter(&dev->adap_pri);
+	i2c_del_adapter(&dev->adap_aux);
+err_i2c_add_adapter_failed:
+	clk_disable(clk);
+	iounmap(dev->base);
+err_ioremap_failed:
+	kfree(dev);
+err_alloc_dev_failed:
+	clk_put(clk);
+err_clk_get_failed:
+	release_mem_region(mem->start, (mem->end - mem->start) + 1);
+	return ret;
+}
+
+static int
+msm_i2c_remove(struct platform_device *pdev)
+{
+	struct msm_i2c_dev	*dev = platform_get_drvdata(pdev);
+	struct resource		*mem;
+
+	/* Grab mutex to ensure ongoing transaction is over */
+	mutex_lock(&dev->mlock);
+	dev->suspended = 1;
+	mutex_unlock(&dev->mlock);
+	mutex_destroy(&dev->mlock);
+	del_timer_sync(&dev->pwr_timer);
+	if (dev->clk_state != 0)
+		msm_i2c_pwr_mgmt(dev, 0);
+	platform_set_drvdata(pdev, NULL);
+	pm_qos_remove_request(&dev->pm_qos_req);
+	free_irq(dev->irq, dev);
+	i2c_del_adapter(&dev->adap_pri);
+	i2c_del_adapter(&dev->adap_aux);
+	clk_put(dev->clk);
+	iounmap(dev->base);
+	kfree(dev);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem)
+		release_mem_region(mem->start, (mem->end - mem->start) + 1);
+	return 0;
+}
+
+static int msm_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct msm_i2c_dev *dev = platform_get_drvdata(pdev);
+	/* Wait until current transaction finishes
+	 * Make sure remote lock is released before we suspend
+	 */
+	if (dev) {
+		/* Grab mutex to ensure ongoing transaction is over */
+		mutex_lock(&dev->mlock);
+		dev->suspended = 1;
+		mutex_unlock(&dev->mlock);
+		del_timer_sync(&dev->pwr_timer);
+		if (dev->clk_state != 0)
+			msm_i2c_pwr_mgmt(dev, 0);
+	}
+
+	return 0;
+}
+
+static int msm_i2c_resume(struct platform_device *pdev)
+{
+	struct msm_i2c_dev *dev = platform_get_drvdata(pdev);
+	dev->suspended = 0;
+	return 0;
+}
+
+static struct platform_driver msm_i2c_driver = {
+	.probe		= msm_i2c_probe,
+	.remove		= msm_i2c_remove,
+	.suspend	= msm_i2c_suspend,
+	.resume		= msm_i2c_resume,
+	.driver		= {
+		.name	= "msm_i2c",
+		.owner	= THIS_MODULE,
+	},
+};
+
+/* I2C may be needed to bring up other drivers */
+static int __init
+msm_i2c_init_driver(void)
+{
+	return platform_driver_register(&msm_i2c_driver);
+}
+subsys_initcall(msm_i2c_init_driver);
+
+static void __exit msm_i2c_exit_driver(void)
+{
+	platform_driver_unregister(&msm_i2c_driver);
+}
+module_exit(msm_i2c_exit_driver);
+
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
new file mode 100644
index 0000000..edb643b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -0,0 +1,1305 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QUP driver for Qualcomm MSM platforms
+ *
+ */
+
+/* #define DEBUG */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <mach/board.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2");
+MODULE_ALIAS("platform:i2c_qup");
+
+/* QUP Registers */
+enum {
+	QUP_CONFIG              = 0x0,
+	QUP_STATE               = 0x4,
+	QUP_IO_MODE             = 0x8,
+	QUP_SW_RESET            = 0xC,
+	QUP_OPERATIONAL         = 0x18,
+	QUP_ERROR_FLAGS         = 0x1C,
+	QUP_ERROR_FLAGS_EN      = 0x20,
+	QUP_MX_READ_CNT         = 0x208,
+	QUP_MX_INPUT_CNT        = 0x200,
+	QUP_MX_WR_CNT           = 0x100,
+	QUP_OUT_DEBUG           = 0x108,
+	QUP_OUT_FIFO_CNT        = 0x10C,
+	QUP_OUT_FIFO_BASE       = 0x110,
+	QUP_IN_READ_CUR         = 0x20C,
+	QUP_IN_DEBUG            = 0x210,
+	QUP_IN_FIFO_CNT         = 0x214,
+	QUP_IN_FIFO_BASE        = 0x218,
+	QUP_I2C_CLK_CTL         = 0x400,
+	QUP_I2C_STATUS          = 0x404,
+};
+
+/* QUP States and reset values */
+enum {
+	QUP_RESET_STATE         = 0,
+	QUP_RUN_STATE           = 1U,
+	QUP_STATE_MASK          = 3U,
+	QUP_PAUSE_STATE         = 3U,
+	QUP_STATE_VALID         = 1U << 2,
+	QUP_I2C_MAST_GEN        = 1U << 4,
+	QUP_OPERATIONAL_RESET   = 0xFF0,
+	QUP_I2C_STATUS_RESET    = 0xFFFFFC,
+};
+
+/* QUP OPERATIONAL FLAGS */
+enum {
+	QUP_OUT_SVC_FLAG        = 1U << 8,
+	QUP_IN_SVC_FLAG         = 1U << 9,
+	QUP_MX_INPUT_DONE       = 1U << 11,
+};
+
+/* I2C mini core related values */
+enum {
+	I2C_MINI_CORE           = 2U << 8,
+	I2C_N_VAL               = 0xF,
+
+};
+
+/* Packing Unpacking words in FIFOs , and IO modes*/
+enum {
+	QUP_WR_BLK_MODE  = 1U << 10,
+	QUP_RD_BLK_MODE  = 1U << 12,
+	QUP_UNPACK_EN = 1U << 14,
+	QUP_PACK_EN = 1U << 15,
+};
+
+/* QUP tags */
+enum {
+	QUP_OUT_NOP   = 0,
+	QUP_OUT_START = 1U << 8,
+	QUP_OUT_DATA  = 2U << 8,
+	QUP_OUT_STOP  = 3U << 8,
+	QUP_OUT_REC   = 4U << 8,
+	QUP_IN_DATA   = 5U << 8,
+	QUP_IN_STOP   = 6U << 8,
+	QUP_IN_NACK   = 7U << 8,
+};
+
+/* Status, Error flags */
+enum {
+	I2C_STATUS_WR_BUFFER_FULL  = 1U << 0,
+	I2C_STATUS_BUS_ACTIVE      = 1U << 8,
+	I2C_STATUS_ERROR_MASK      = 0x38000FC,
+	QUP_I2C_NACK_FLAG          = 1U << 3,
+	QUP_IN_NOT_EMPTY           = 1U << 5,
+	QUP_STATUS_ERROR_FLAGS     = 0x7C,
+};
+
+/* Master status clock states */
+enum {
+	I2C_CLK_RESET_BUSIDLE_STATE	= 0,
+	I2C_CLK_FORCED_LOW_STATE	= 5,
+};
+
+#define QUP_MAX_CLK_STATE_RETRIES	300
+
+static char const * const i2c_rsrcs[] = {"i2c_clk", "i2c_sda"};
+
+struct qup_i2c_dev {
+	struct device                *dev;
+	void __iomem                 *base;		/* virtual */
+	void __iomem                 *gsbi;		/* virtual */
+	int                          in_irq;
+	int                          out_irq;
+	int                          err_irq;
+	int                          num_irqs;
+	struct clk                   *clk;
+	struct clk                   *pclk;
+	struct i2c_adapter           adapter;
+
+	struct i2c_msg               *msg;
+	int                          pos;
+	int                          cnt;
+	int                          err;
+	int                          mode;
+	int                          clk_ctl;
+	int                          one_bit_t;
+	int                          out_fifo_sz;
+	int                          in_fifo_sz;
+	int                          out_blk_sz;
+	int                          in_blk_sz;
+	int                          wr_sz;
+	struct msm_i2c_platform_data *pdata;
+	int                          suspended;
+	int                          clk_state;
+	struct timer_list            pwr_timer;
+	struct mutex                 mlock;
+	void                         *complete;
+	int                          i2c_gpios[ARRAY_SIZE(i2c_rsrcs)];
+};
+
+#ifdef DEBUG
+static void
+qup_print_status(struct qup_i2c_dev *dev)
+{
+	uint32_t val;
+	val = readl_relaxed(dev->base+QUP_CONFIG);
+	dev_dbg(dev->dev, "Qup config is :0x%x\n", val);
+	val = readl_relaxed(dev->base+QUP_STATE);
+	dev_dbg(dev->dev, "Qup state is :0x%x\n", val);
+	val = readl_relaxed(dev->base+QUP_IO_MODE);
+	dev_dbg(dev->dev, "Qup mode is :0x%x\n", val);
+}
+#else
+static inline void qup_print_status(struct qup_i2c_dev *dev)
+{
+}
+#endif
+
+static irqreturn_t
+qup_i2c_interrupt(int irq, void *devid)
+{
+	struct qup_i2c_dev *dev = devid;
+	uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
+	uint32_t status1 = readl_relaxed(dev->base + QUP_ERROR_FLAGS);
+	uint32_t op_flgs = readl_relaxed(dev->base + QUP_OPERATIONAL);
+	int err = 0;
+
+	if (!dev->msg || !dev->complete) {
+		/* Clear Error interrupt if it's a level triggered interrupt*/
+		if (dev->num_irqs == 1) {
+			writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE);
+			/* Ensure that state is written before ISR exits */
+			mb();
+		}
+		return IRQ_HANDLED;
+	}
+
+	if (status & I2C_STATUS_ERROR_MASK) {
+		dev_err(dev->dev, "QUP: I2C status flags :0x%x, irq:%d\n",
+			status, irq);
+		err = status;
+		/* Clear Error interrupt if it's a level triggered interrupt*/
+		if (dev->num_irqs == 1) {
+			writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE);
+			/* Ensure that state is written before ISR exits */
+			mb();
+		}
+		goto intr_done;
+	}
+
+	if (status1 & 0x7F) {
+		dev_err(dev->dev, "QUP: QUP status flags :0x%x\n", status1);
+		err = -status1;
+		/* Clear Error interrupt if it's a level triggered interrupt*/
+		if (dev->num_irqs == 1) {
+			writel_relaxed((status1 & QUP_STATUS_ERROR_FLAGS),
+				dev->base + QUP_ERROR_FLAGS);
+			/* Ensure that error flags are cleared before ISR
+			 * exits
+			 */
+			mb();
+		}
+		goto intr_done;
+	}
+
+	if ((dev->num_irqs == 3) && (dev->msg->flags == I2C_M_RD)
+		&& (irq == dev->out_irq))
+		return IRQ_HANDLED;
+	if (op_flgs & QUP_OUT_SVC_FLAG) {
+		writel_relaxed(QUP_OUT_SVC_FLAG, dev->base + QUP_OPERATIONAL);
+		/* Ensure that service flag is acknowledged before ISR exits */
+		mb();
+	}
+	if (dev->msg->flags == I2C_M_RD) {
+		if ((op_flgs & QUP_MX_INPUT_DONE) ||
+			(op_flgs & QUP_IN_SVC_FLAG)) {
+			writel_relaxed(QUP_IN_SVC_FLAG, dev->base
+					+ QUP_OPERATIONAL);
+			/* Ensure that service flag is acknowledged before ISR
+			 * exits
+			 */
+			mb();
+		} else
+			return IRQ_HANDLED;
+	}
+
+intr_done:
+	dev_dbg(dev->dev, "QUP intr= %d, i2c status=0x%x, qup status = 0x%x\n",
+			irq, status, status1);
+	qup_print_status(dev);
+	dev->err = err;
+	complete(dev->complete);
+	return IRQ_HANDLED;
+}
+
+static void
+qup_i2c_pwr_mgmt(struct qup_i2c_dev *dev, unsigned int state)
+{
+	dev->clk_state = state;
+	if (state != 0) {
+		clk_enable(dev->clk);
+		if (dev->pclk)
+			clk_enable(dev->pclk);
+	} else {
+		clk_disable(dev->clk);
+		if (dev->pclk)
+			clk_disable(dev->pclk);
+	}
+}
+
+static void
+qup_i2c_pwr_timer(unsigned long data)
+{
+	struct qup_i2c_dev *dev = (struct qup_i2c_dev *) data;
+	dev_dbg(dev->dev, "QUP_Power: Inactivity based power management\n");
+	if (dev->clk_state == 1)
+		qup_i2c_pwr_mgmt(dev, 0);
+}
+
+static int
+qup_i2c_poll_writeready(struct qup_i2c_dev *dev, int rem)
+{
+	uint32_t retries = 0;
+
+	while (retries != 2000) {
+		uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
+
+		if (!(status & I2C_STATUS_WR_BUFFER_FULL)) {
+			if (((dev->msg->flags & I2C_M_RD) || (rem == 0)) &&
+				!(status & I2C_STATUS_BUS_ACTIVE))
+				return 0;
+			else if ((dev->msg->flags == 0) && (rem > 0))
+				return 0;
+			else /* 1-bit delay before we check for bus busy */
+				udelay(dev->one_bit_t);
+		}
+		if (retries++ == 1000)
+			udelay(100);
+	}
+	qup_print_status(dev);
+	return -ETIMEDOUT;
+}
+
+static int qup_i2c_poll_clock_ready(struct qup_i2c_dev *dev)
+{
+	uint32_t retries = 0;
+
+	/*
+	 * Wait for the clock state to transition to either IDLE or FORCED
+	 * LOW.  This will usually happen within one cycle of the i2c clock.
+	 */
+
+	while (retries++ < QUP_MAX_CLK_STATE_RETRIES) {
+		uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
+		uint32_t clk_state = (status >> 13) & 0x7;
+
+		if (clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
+				clk_state == I2C_CLK_FORCED_LOW_STATE)
+			return 0;
+		/* 1-bit delay before we check again */
+		udelay(dev->one_bit_t);
+	}
+
+	dev_err(dev->dev, "Error waiting for clk ready\n");
+	return -ETIMEDOUT;
+}
+
+static int
+qup_i2c_poll_state(struct qup_i2c_dev *dev, uint32_t state)
+{
+	uint32_t retries = 0;
+
+	dev_dbg(dev->dev, "Polling Status for state:0x%x\n", state);
+
+	while (retries != 2000) {
+		uint32_t status = readl_relaxed(dev->base + QUP_STATE);
+
+		if ((status & (QUP_STATE_VALID | state)) ==
+				(QUP_STATE_VALID | state))
+			return 0;
+		else if (retries++ == 1000)
+			udelay(100);
+	}
+	return -ETIMEDOUT;
+}
+
+static inline int qup_i2c_request_gpios(struct qup_i2c_dev *dev)
+{
+	int i;
+	int result = 0;
+
+	for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
+		if (dev->i2c_gpios[i] >= 0) {
+			result = gpio_request(dev->i2c_gpios[i], i2c_rsrcs[i]);
+			if (result) {
+				dev_err(dev->dev,
+					"gpio_request for pin %d failed\
+					with error %d\n", dev->i2c_gpios[i],
+					result);
+				goto error;
+			}
+		}
+	}
+	return 0;
+
+error:
+	for (; --i >= 0;) {
+		if (dev->i2c_gpios[i] >= 0)
+			gpio_free(dev->i2c_gpios[i]);
+	}
+	return result;
+}
+
+static inline void qup_i2c_free_gpios(struct qup_i2c_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
+		if (dev->i2c_gpios[i] >= 0)
+			gpio_free(dev->i2c_gpios[i]);
+	}
+}
+
+#ifdef DEBUG
+static void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val,
+				uint32_t addr, int rdwr)
+{
+	if (rdwr)
+		dev_dbg(dev->dev, "RD:Wrote 0x%x to out_ff:0x%x\n", val, addr);
+	else
+		dev_dbg(dev->dev, "WR:Wrote 0x%x to out_ff:0x%x\n", val, addr);
+}
+#else
+static inline void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val,
+				uint32_t addr, int rdwr)
+{
+}
+#endif
+
+static void
+qup_issue_read(struct qup_i2c_dev *dev, struct i2c_msg *msg, int *idx,
+		uint32_t carry_over)
+{
+	uint16_t addr = (msg->addr << 1) | 1;
+	/* QUP limit 256 bytes per read. By HW design, 0 in the 8-bit field
+	 * is treated as 256 byte read.
+	 */
+	uint16_t rd_len = ((dev->cnt == 256) ? 0 : dev->cnt);
+
+	if (*idx % 4) {
+		writel_relaxed(carry_over | ((QUP_OUT_START | addr) << 16),
+		dev->base + QUP_OUT_FIFO_BASE);/* + (*idx-2)); */
+
+		qup_verify_fifo(dev, carry_over |
+			((QUP_OUT_START | addr) << 16), (uint32_t)dev->base
+			+ QUP_OUT_FIFO_BASE + (*idx - 2), 1);
+		writel_relaxed((QUP_OUT_REC | rd_len),
+			dev->base + QUP_OUT_FIFO_BASE);/* + (*idx+2)); */
+
+		qup_verify_fifo(dev, (QUP_OUT_REC | rd_len),
+		(uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx + 2), 1);
+	} else {
+		writel_relaxed(((QUP_OUT_REC | rd_len) << 16)
+			| QUP_OUT_START | addr,
+			dev->base + QUP_OUT_FIFO_BASE);/* + (*idx)); */
+
+		qup_verify_fifo(dev, QUP_OUT_REC << 16 | rd_len << 16 |
+		QUP_OUT_START | addr,
+		(uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx), 1);
+	}
+	*idx += 4;
+}
+
+static void
+qup_issue_write(struct qup_i2c_dev *dev, struct i2c_msg *msg, int rem,
+			int *idx, uint32_t *carry_over)
+{
+	int entries = dev->cnt;
+	int empty_sl = dev->wr_sz - ((*idx) >> 1);
+	int i = 0;
+	uint32_t val = 0;
+	uint32_t last_entry = 0;
+	uint16_t addr = msg->addr << 1;
+
+	if (dev->pos == 0) {
+		if (*idx % 4) {
+			writel_relaxed(*carry_over | ((QUP_OUT_START |
+							addr) << 16),
+					dev->base + QUP_OUT_FIFO_BASE);
+
+			qup_verify_fifo(dev, *carry_over | QUP_OUT_DATA << 16 |
+				addr << 16, (uint32_t)dev->base +
+				QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
+		} else
+			val = QUP_OUT_START | addr;
+		*idx += 2;
+		i++;
+		entries++;
+	} else {
+		/* Avoid setp time issue by adding 1 NOP when number of bytes
+		 * are more than FIFO/BLOCK size. setup time issue can't appear
+		 * otherwise since next byte to be written will always be ready
+		 */
+		val = (QUP_OUT_NOP | 1);
+		*idx += 2;
+		i++;
+		entries++;
+	}
+	if (entries > empty_sl)
+		entries = empty_sl;
+
+	for (; i < (entries - 1); i++) {
+		if (*idx % 4) {
+			writel_relaxed(val | ((QUP_OUT_DATA |
+				msg->buf[dev->pos]) << 16),
+				dev->base + QUP_OUT_FIFO_BASE);
+
+			qup_verify_fifo(dev, val | QUP_OUT_DATA << 16 |
+				msg->buf[dev->pos] << 16, (uint32_t)dev->base +
+				QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
+		} else
+			val = QUP_OUT_DATA | msg->buf[dev->pos];
+		(*idx) += 2;
+		dev->pos++;
+	}
+	if (dev->pos < (msg->len - 1))
+		last_entry = QUP_OUT_DATA;
+	else if (rem > 1) /* not last array entry */
+		last_entry = QUP_OUT_DATA;
+	else
+		last_entry = QUP_OUT_STOP;
+	if ((*idx % 4) == 0) {
+		/*
+		 * If read-start and read-command end up in different fifos, it
+		 * may result in extra-byte being read due to extra-read cycle.
+		 * Avoid that by inserting NOP as the last entry of fifo only
+		 * if write command(s) leave 1 space in fifo.
+		 */
+		if (rem > 1) {
+			struct i2c_msg *next = msg + 1;
+			if (next->addr == msg->addr && (next->flags | I2C_M_RD)
+				&& *idx == ((dev->wr_sz*2) - 4)) {
+				writel_relaxed(((last_entry |
+					msg->buf[dev->pos]) |
+					((1 | QUP_OUT_NOP) << 16)), dev->base +
+					QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
+
+				qup_verify_fifo(dev,
+					((last_entry | msg->buf[dev->pos]) |
+					((1 | QUP_OUT_NOP) << 16)),
+					(uint32_t)dev->base +
+					QUP_OUT_FIFO_BASE + (*idx), 0);
+				*idx += 2;
+			} else if (next->flags == 0 && dev->pos == msg->len - 1
+					&& *idx < (dev->wr_sz*2)) {
+				/* Last byte of an intermittent write */
+				writel_relaxed((last_entry |
+						msg->buf[dev->pos]),
+					dev->base + QUP_OUT_FIFO_BASE);
+
+				qup_verify_fifo(dev,
+					last_entry | msg->buf[dev->pos],
+					(uint32_t)dev->base +
+					QUP_OUT_FIFO_BASE + (*idx), 0);
+				*idx += 2;
+			} else
+				*carry_over = (last_entry | msg->buf[dev->pos]);
+		} else {
+			writel_relaxed((last_entry | msg->buf[dev->pos]),
+			dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
+
+			qup_verify_fifo(dev, last_entry | msg->buf[dev->pos],
+			(uint32_t)dev->base + QUP_OUT_FIFO_BASE +
+			(*idx), 0);
+		}
+	} else {
+		writel_relaxed(val | ((last_entry | msg->buf[dev->pos]) << 16),
+		dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */
+
+		qup_verify_fifo(dev, val | (last_entry << 16) |
+		(msg->buf[dev->pos] << 16), (uint32_t)dev->base +
+		QUP_OUT_FIFO_BASE + (*idx) - 2, 0);
+	}
+
+	*idx += 2;
+	dev->pos++;
+	dev->cnt = msg->len - dev->pos;
+}
+
+static int
+qup_update_state(struct qup_i2c_dev *dev, uint32_t state)
+{
+	if (qup_i2c_poll_state(dev, 0) != 0)
+		return -EIO;
+	writel_relaxed(state, dev->base + QUP_STATE);
+	if (qup_i2c_poll_state(dev, state) != 0)
+		return -EIO;
+	return 0;
+}
+
+static void
+qup_set_read_mode(struct qup_i2c_dev *dev, int rd_len)
+{
+	uint32_t wr_mode = (dev->wr_sz < dev->out_fifo_sz) ?
+				QUP_WR_BLK_MODE : 0;
+	if (rd_len > 256) {
+		dev_dbg(dev->dev, "HW limit: Breaking reads in chunk of 256\n");
+		rd_len = 256;
+	}
+	if (rd_len <= dev->in_fifo_sz) {
+		writel_relaxed(wr_mode | QUP_PACK_EN | QUP_UNPACK_EN,
+			dev->base + QUP_IO_MODE);
+		writel_relaxed(rd_len, dev->base + QUP_MX_READ_CNT);
+	} else {
+		writel_relaxed(wr_mode | QUP_RD_BLK_MODE |
+			QUP_PACK_EN | QUP_UNPACK_EN, dev->base + QUP_IO_MODE);
+		writel_relaxed(rd_len, dev->base + QUP_MX_INPUT_CNT);
+	}
+}
+
+static int
+qup_set_wr_mode(struct qup_i2c_dev *dev, int rem)
+{
+	int total_len = 0;
+	int ret = 0;
+	if (dev->msg->len >= (dev->out_fifo_sz - 1)) {
+		total_len = dev->msg->len + 1 +
+				(dev->msg->len/(dev->out_blk_sz-1));
+		writel_relaxed(QUP_WR_BLK_MODE | QUP_PACK_EN | QUP_UNPACK_EN,
+			dev->base + QUP_IO_MODE);
+		dev->wr_sz = dev->out_blk_sz;
+	} else
+		writel_relaxed(QUP_PACK_EN | QUP_UNPACK_EN,
+			dev->base + QUP_IO_MODE);
+
+	if (rem > 1) {
+		struct i2c_msg *next = dev->msg + 1;
+		if (next->addr == dev->msg->addr &&
+			next->flags == I2C_M_RD) {
+			qup_set_read_mode(dev, next->len);
+			/* make sure read start & read command are in 1 blk */
+			if ((total_len % dev->out_blk_sz) ==
+				(dev->out_blk_sz - 1))
+				total_len += 3;
+			else
+				total_len += 2;
+		}
+	}
+	/* WRITE COUNT register valid/used only in block mode */
+	if (dev->wr_sz == dev->out_blk_sz)
+		writel_relaxed(total_len, dev->base + QUP_MX_WR_CNT);
+	return ret;
+}
+
+static int
+qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	struct qup_i2c_dev *dev = i2c_get_adapdata(adap);
+	int ret;
+	int rem = num;
+	long timeout;
+	int err;
+
+	del_timer_sync(&dev->pwr_timer);
+	mutex_lock(&dev->mlock);
+
+	if (dev->suspended) {
+		mutex_unlock(&dev->mlock);
+		return -EIO;
+	}
+
+	if (dev->clk_state == 0) {
+		if (dev->clk_ctl == 0) {
+			if (dev->pdata->src_clk_rate > 0)
+				clk_set_rate(dev->clk,
+						dev->pdata->src_clk_rate);
+			else
+				dev->pdata->src_clk_rate = 19200000;
+		}
+		qup_i2c_pwr_mgmt(dev, 1);
+	}
+	/* Initialize QUP registers during first transfer */
+	if (dev->clk_ctl == 0) {
+		int fs_div;
+		int hs_div;
+		uint32_t fifo_reg;
+
+		if (dev->gsbi) {
+			writel_relaxed(0x2 << 4, dev->gsbi);
+			/* GSBI memory is not in the same 1K region as other
+			 * QUP registers. mb() here ensures that the GSBI
+			 * register is updated in correct order and that the
+			 * write has gone through before programming QUP core
+			 * registers
+			 */
+			mb();
+		}
+
+		fs_div = ((dev->pdata->src_clk_rate
+				/ dev->pdata->clk_freq) / 2) - 3;
+		hs_div = 3;
+		dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
+		fifo_reg = readl_relaxed(dev->base + QUP_IO_MODE);
+		if (fifo_reg & 0x3)
+			dev->out_blk_sz = (fifo_reg & 0x3) * 16;
+		else
+			dev->out_blk_sz = 16;
+		if (fifo_reg & 0x60)
+			dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16;
+		else
+			dev->in_blk_sz = 16;
+		/*
+		 * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag'
+		 * associated with each byte written/received
+		 */
+		dev->out_blk_sz /= 2;
+		dev->in_blk_sz /= 2;
+		dev->out_fifo_sz = dev->out_blk_sz *
+					(2 << ((fifo_reg & 0x1C) >> 2));
+		dev->in_fifo_sz = dev->in_blk_sz *
+					(2 << ((fifo_reg & 0x380) >> 7));
+		dev_dbg(dev->dev, "QUP IN:bl:%d, ff:%d, OUT:bl:%d, ff:%d\n",
+				dev->in_blk_sz, dev->in_fifo_sz,
+				dev->out_blk_sz, dev->out_fifo_sz);
+	}
+
+	writel_relaxed(1, dev->base + QUP_SW_RESET);
+	ret = qup_i2c_poll_state(dev, QUP_RESET_STATE);
+	if (ret) {
+		dev_err(dev->dev, "QUP Busy:Trying to recover\n");
+		goto out_err;
+	}
+
+	if (dev->num_irqs == 3) {
+		enable_irq(dev->in_irq);
+		enable_irq(dev->out_irq);
+	}
+	enable_irq(dev->err_irq);
+
+	/* Initialize QUP registers */
+	writel_relaxed(0, dev->base + QUP_CONFIG);
+	writel_relaxed(QUP_OPERATIONAL_RESET, dev->base + QUP_OPERATIONAL);
+	writel_relaxed(QUP_STATUS_ERROR_FLAGS, dev->base + QUP_ERROR_FLAGS_EN);
+
+	writel_relaxed(I2C_MINI_CORE | I2C_N_VAL, dev->base + QUP_CONFIG);
+
+	/* Initialize I2C mini core registers */
+	writel_relaxed(0, dev->base + QUP_I2C_CLK_CTL);
+	writel_relaxed(QUP_I2C_STATUS_RESET, dev->base + QUP_I2C_STATUS);
+
+	while (rem) {
+		bool filled = false;
+
+		dev->cnt = msgs->len - dev->pos;
+		dev->msg = msgs;
+
+		dev->wr_sz = dev->out_fifo_sz;
+		dev->err = 0;
+		dev->complete = &complete;
+
+		if (qup_i2c_poll_state(dev, QUP_I2C_MAST_GEN) != 0) {
+			ret = -EIO;
+			goto out_err;
+		}
+
+		qup_print_status(dev);
+		/* HW limits Read upto 256 bytes in 1 read without stop */
+		if (dev->msg->flags & I2C_M_RD) {
+			qup_set_read_mode(dev, dev->cnt);
+			if (dev->cnt > 256)
+				dev->cnt = 256;
+		} else {
+			ret = qup_set_wr_mode(dev, rem);
+			if (ret != 0)
+				goto out_err;
+			/* Don't fill block till we get interrupt */
+			if (dev->wr_sz == dev->out_blk_sz)
+				filled = true;
+		}
+
+		err = qup_update_state(dev, QUP_RUN_STATE);
+		if (err < 0) {
+			ret = err;
+			goto out_err;
+		}
+
+		qup_print_status(dev);
+		writel_relaxed(dev->clk_ctl, dev->base + QUP_I2C_CLK_CTL);
+		/* CLK_CTL register is not in the same 1K region as other QUP
+		 * registers. Ensure that clock control is written before
+		 * programming other QUP registers
+		 */
+		mb();
+
+		do {
+			int idx = 0;
+			uint32_t carry_over = 0;
+
+			/* Transition to PAUSE state only possible from RUN */
+			err = qup_update_state(dev, QUP_PAUSE_STATE);
+			if (err < 0) {
+				ret = err;
+				goto out_err;
+			}
+
+			qup_print_status(dev);
+			/* This operation is Write, check the next operation
+			 * and decide mode
+			 */
+			while (filled == false) {
+				if ((msgs->flags & I2C_M_RD))
+					qup_issue_read(dev, msgs, &idx,
+							carry_over);
+				else if (!(msgs->flags & I2C_M_RD))
+					qup_issue_write(dev, msgs, rem, &idx,
+							&carry_over);
+				if (idx >= (dev->wr_sz << 1))
+					filled = true;
+				/* Start new message */
+				if (filled == false) {
+					if (msgs->flags & I2C_M_RD)
+							filled = true;
+					else if (rem > 1) {
+						/* Only combine operations with
+						 * same address
+						 */
+						struct i2c_msg *next = msgs + 1;
+						if (next->addr != msgs->addr)
+							filled = true;
+						else {
+							rem--;
+							msgs++;
+							dev->msg = msgs;
+							dev->pos = 0;
+							dev->cnt = msgs->len;
+							if (msgs->len > 256)
+								dev->cnt = 256;
+						}
+					} else
+						filled = true;
+				}
+			}
+			err = qup_update_state(dev, QUP_RUN_STATE);
+			if (err < 0) {
+				ret = err;
+				goto out_err;
+			}
+			dev_dbg(dev->dev, "idx:%d, rem:%d, num:%d, mode:%d\n",
+				idx, rem, num, dev->mode);
+
+			qup_print_status(dev);
+			timeout = wait_for_completion_timeout(&complete, HZ);
+			if (!timeout) {
+				uint32_t istatus = readl_relaxed(dev->base +
+							QUP_I2C_STATUS);
+				uint32_t qstatus = readl_relaxed(dev->base +
+							QUP_ERROR_FLAGS);
+				uint32_t op_flgs = readl_relaxed(dev->base +
+							QUP_OPERATIONAL);
+
+				dev_err(dev->dev, "Transaction timed out\n");
+				dev_err(dev->dev, "I2C Status: %x\n", istatus);
+				dev_err(dev->dev, "QUP Status: %x\n", qstatus);
+				dev_err(dev->dev, "OP Flags: %x\n", op_flgs);
+				writel_relaxed(1, dev->base + QUP_SW_RESET);
+				/* Make sure that the write has gone through
+				 * before returning from the function
+				 */
+				mb();
+				ret = -ETIMEDOUT;
+				goto out_err;
+			}
+			if (dev->err) {
+				if (dev->err > 0 &&
+					dev->err & QUP_I2C_NACK_FLAG)
+					dev_err(dev->dev,
+					"I2C slave addr:0x%x not connected\n",
+					dev->msg->addr);
+				else if (dev->err < 0) {
+					dev_err(dev->dev,
+					"QUP data xfer error %d\n", dev->err);
+					ret = dev->err;
+					goto out_err;
+				}
+				ret = -dev->err;
+				goto out_err;
+			}
+			if (dev->msg->flags & I2C_M_RD) {
+				int i;
+				uint32_t dval = 0;
+				for (i = 0; dev->pos < dev->msg->len; i++,
+						dev->pos++) {
+					uint32_t rd_status =
+						readl_relaxed(dev->base
+							+ QUP_OPERATIONAL);
+					if (i % 2 == 0) {
+						if ((rd_status &
+							QUP_IN_NOT_EMPTY) == 0)
+							break;
+						dval = readl_relaxed(dev->base +
+							QUP_IN_FIFO_BASE);
+						dev->msg->buf[dev->pos] =
+							dval & 0xFF;
+					} else
+						dev->msg->buf[dev->pos] =
+							((dval & 0xFF0000) >>
+							 16);
+				}
+				dev->cnt -= i;
+			} else
+				filled = false; /* refill output FIFO */
+			dev_dbg(dev->dev, "pos:%d, len:%d, cnt:%d\n",
+					dev->pos, msgs->len, dev->cnt);
+		} while (dev->cnt > 0);
+		if (dev->cnt == 0) {
+			if (msgs->len == dev->pos) {
+				rem--;
+				msgs++;
+				dev->pos = 0;
+			}
+			if (rem) {
+				err = qup_i2c_poll_clock_ready(dev);
+				if (err < 0) {
+					ret = err;
+					goto out_err;
+				}
+				err = qup_update_state(dev, QUP_RESET_STATE);
+				if (err < 0) {
+					ret = err;
+					goto out_err;
+				}
+			}
+		}
+		/* Wait for I2C bus to be idle */
+		ret = qup_i2c_poll_writeready(dev, rem);
+		if (ret) {
+			dev_err(dev->dev,
+				"Error waiting for write ready\n");
+			goto out_err;
+		}
+	}
+
+	ret = num;
+ out_err:
+	disable_irq(dev->err_irq);
+	if (dev->num_irqs == 3) {
+		disable_irq(dev->in_irq);
+		disable_irq(dev->out_irq);
+	}
+	dev->complete = NULL;
+	dev->msg = NULL;
+	dev->pos = 0;
+	dev->err = 0;
+	dev->cnt = 0;
+	dev->pwr_timer.expires = jiffies + 3*HZ;
+	add_timer(&dev->pwr_timer);
+	mutex_unlock(&dev->mlock);
+	return ret;
+}
+
+static u32
+qup_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm qup_i2c_algo = {
+	.master_xfer	= qup_i2c_xfer,
+	.functionality	= qup_i2c_func,
+};
+
+static int __devinit
+qup_i2c_probe(struct platform_device *pdev)
+{
+	struct qup_i2c_dev	*dev;
+	struct resource         *qup_mem, *gsbi_mem, *qup_io, *gsbi_io, *res;
+	struct resource		*in_irq, *out_irq, *err_irq;
+	struct clk         *clk, *pclk;
+	int ret = 0;
+	int i;
+	struct msm_i2c_platform_data *pdata;
+	const char *qup_apps_clk_name = "qup_clk";
+
+	gsbi_mem = NULL;
+	dev_dbg(&pdev->dev, "qup_i2c_probe\n");
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data not initialized\n");
+		return -ENOSYS;
+	}
+	qup_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"qup_phys_addr");
+	if (!qup_mem) {
+		dev_err(&pdev->dev, "no qup mem resource?\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * We only have 1 interrupt for new hardware targets and in_irq,
+	 * out_irq will be NULL for those platforms
+	 */
+	in_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"qup_in_intr");
+
+	out_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"qup_out_intr");
+
+	err_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"qup_err_intr");
+	if (!err_irq) {
+		dev_err(&pdev->dev, "no error irq resource?\n");
+		return -ENODEV;
+	}
+
+	qup_io = request_mem_region(qup_mem->start, resource_size(qup_mem),
+					pdev->name);
+	if (!qup_io) {
+		dev_err(&pdev->dev, "QUP region already claimed\n");
+		return -EBUSY;
+	}
+	if (!pdata->use_gsbi_shared_mode) {
+		gsbi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"gsbi_qup_i2c_addr");
+		if (!gsbi_mem) {
+			dev_err(&pdev->dev, "no gsbi mem resource?\n");
+			return -ENODEV;
+		}
+		gsbi_io = request_mem_region(gsbi_mem->start,
+						resource_size(gsbi_mem),
+						pdev->name);
+		if (!gsbi_io) {
+			dev_err(&pdev->dev, "GSBI region already claimed\n");
+			return -EBUSY;
+		}
+	}
+
+	if (pdata->clk != NULL)
+		qup_apps_clk_name = pdata->clk;
+
+	clk = clk_get(&pdev->dev, qup_apps_clk_name);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Could not get clock\n");
+		ret = PTR_ERR(clk);
+		goto err_clk_get_failed;
+	}
+
+	if (pdata->pclk != NULL) {
+		pclk = clk_get(&pdev->dev, pdata->pclk);
+		if (IS_ERR(pclk)) {
+			dev_err(&pdev->dev, "Could not get pclock\n");
+			ret = PTR_ERR(pclk);
+			clk_put(clk);
+			goto err_clk_get_failed;
+		}
+	} else
+		pclk = NULL;
+
+	if (!(pdata->msm_i2c_config_gpio)) {
+		dev_err(&pdev->dev, "config_gpio function not initialized\n");
+		ret = -ENOSYS;
+		goto err_config_failed;
+	}
+
+	/* We support frequencies upto FAST Mode(400KHz) */
+	if (pdata->clk_freq <= 0 ||
+			pdata->clk_freq > 400000) {
+		dev_err(&pdev->dev, "clock frequency not supported\n");
+		ret = -EIO;
+		goto err_config_failed;
+	}
+
+	dev = kzalloc(sizeof(struct qup_i2c_dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto err_alloc_dev_failed;
+	}
+
+	dev->dev = &pdev->dev;
+	if (in_irq)
+		dev->in_irq = in_irq->start;
+	if (out_irq)
+		dev->out_irq = out_irq->start;
+	dev->err_irq = err_irq->start;
+	if (in_irq && out_irq)
+		dev->num_irqs = 3;
+	else
+		dev->num_irqs = 1;
+	dev->clk = clk;
+	dev->pclk = pclk;
+	dev->base = ioremap(qup_mem->start, resource_size(qup_mem));
+	if (!dev->base) {
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+
+	/* Configure GSBI block to use I2C functionality */
+	if (gsbi_mem) {
+		dev->gsbi = ioremap(gsbi_mem->start, resource_size(gsbi_mem));
+		if (!dev->gsbi) {
+			ret = -ENOMEM;
+			goto err_gsbi_failed;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+						   i2c_rsrcs[i]);
+		dev->i2c_gpios[i] = res ? res->start : -1;
+	}
+
+	ret = qup_i2c_request_gpios(dev);
+	if (ret)
+		goto err_request_gpio_failed;
+
+	platform_set_drvdata(pdev, dev);
+
+	dev->one_bit_t = USEC_PER_SEC/pdata->clk_freq;
+	dev->pdata = pdata;
+	dev->clk_ctl = 0;
+	dev->pos = 0;
+
+	/*
+	 * We use num_irqs to also indicate if we got 3 interrupts or just 1.
+	 * If we have just 1, we use err_irq as the general purpose irq
+	 * and handle the changes in ISR accordingly
+	 * Per Hardware guidelines, if we have 3 interrupts, they are always
+	 * edge triggering, and if we have 1, it's always level-triggering
+	 */
+	if (dev->num_irqs == 3) {
+		ret = request_irq(dev->in_irq, qup_i2c_interrupt,
+				IRQF_TRIGGER_RISING, "qup_in_intr", dev);
+		if (ret) {
+			dev_err(&pdev->dev, "request_in_irq failed\n");
+			goto err_request_irq_failed;
+		}
+		/*
+		 * We assume out_irq exists if in_irq does since platform
+		 * configuration either has 3 interrupts assigned to QUP or 1
+		 */
+		ret = request_irq(dev->out_irq, qup_i2c_interrupt,
+				IRQF_TRIGGER_RISING, "qup_out_intr", dev);
+		if (ret) {
+			dev_err(&pdev->dev, "request_out_irq failed\n");
+			free_irq(dev->in_irq, dev);
+			goto err_request_irq_failed;
+		}
+		ret = request_irq(dev->err_irq, qup_i2c_interrupt,
+				IRQF_TRIGGER_RISING, "qup_err_intr", dev);
+		if (ret) {
+			dev_err(&pdev->dev, "request_err_irq failed\n");
+			free_irq(dev->out_irq, dev);
+			free_irq(dev->in_irq, dev);
+			goto err_request_irq_failed;
+		}
+	} else {
+		ret = request_irq(dev->err_irq, qup_i2c_interrupt,
+				IRQF_TRIGGER_HIGH, "qup_err_intr", dev);
+		if (ret) {
+			dev_err(&pdev->dev, "request_err_irq failed\n");
+			goto err_request_irq_failed;
+		}
+	}
+	disable_irq(dev->err_irq);
+	if (dev->num_irqs == 3) {
+		disable_irq(dev->in_irq);
+		disable_irq(dev->out_irq);
+	}
+	i2c_set_adapdata(&dev->adapter, dev);
+	dev->adapter.algo = &qup_i2c_algo;
+	strlcpy(dev->adapter.name,
+		"QUP I2C adapter",
+		sizeof(dev->adapter.name));
+	dev->adapter.nr = pdev->id;
+	pdata->msm_i2c_config_gpio(dev->adapter.nr, 1);
+
+	dev->suspended = 0;
+	mutex_init(&dev->mlock);
+	dev->clk_state = 0;
+	setup_timer(&dev->pwr_timer, qup_i2c_pwr_timer, (unsigned long) dev);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	ret = i2c_add_numbered_adapter(&dev->adapter);
+	if (ret) {
+		dev_err(&pdev->dev, "i2c_add_adapter failed\n");
+		if (dev->num_irqs == 3) {
+			free_irq(dev->out_irq, dev);
+			free_irq(dev->in_irq, dev);
+		}
+		free_irq(dev->err_irq, dev);
+	} else
+		return 0;
+
+
+err_request_irq_failed:
+	qup_i2c_free_gpios(dev);
+	if (dev->gsbi)
+		iounmap(dev->gsbi);
+err_request_gpio_failed:
+err_gsbi_failed:
+	iounmap(dev->base);
+err_ioremap_failed:
+	kfree(dev);
+err_alloc_dev_failed:
+err_config_failed:
+	clk_put(clk);
+	if (pclk)
+		clk_put(pclk);
+err_clk_get_failed:
+	if (gsbi_mem)
+		release_mem_region(gsbi_mem->start, resource_size(gsbi_mem));
+	release_mem_region(qup_mem->start, resource_size(qup_mem));
+	return ret;
+}
+
+static int __devexit
+qup_i2c_remove(struct platform_device *pdev)
+{
+	struct qup_i2c_dev	*dev = platform_get_drvdata(pdev);
+	struct resource		*qup_mem, *gsbi_mem;
+
+	/* Grab mutex to ensure ongoing transaction is over */
+	mutex_lock(&dev->mlock);
+	dev->suspended = 1;
+	mutex_unlock(&dev->mlock);
+	mutex_destroy(&dev->mlock);
+	del_timer_sync(&dev->pwr_timer);
+	if (dev->clk_state != 0)
+		qup_i2c_pwr_mgmt(dev, 0);
+	platform_set_drvdata(pdev, NULL);
+	if (dev->num_irqs == 3) {
+		free_irq(dev->out_irq, dev);
+		free_irq(dev->in_irq, dev);
+	}
+	free_irq(dev->err_irq, dev);
+	i2c_del_adapter(&dev->adapter);
+	clk_put(dev->clk);
+	if (dev->pclk)
+		clk_put(dev->pclk);
+	qup_i2c_free_gpios(dev);
+	if (dev->gsbi)
+		iounmap(dev->gsbi);
+	iounmap(dev->base);
+
+	pm_runtime_disable(&pdev->dev);
+
+	kfree(dev);
+	if (!(dev->pdata->use_gsbi_shared_mode)) {
+		gsbi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"gsbi_qup_i2c_addr");
+		release_mem_region(gsbi_mem->start, resource_size(gsbi_mem));
+	}
+	qup_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"qup_phys_addr");
+	release_mem_region(qup_mem->start, resource_size(qup_mem));
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int qup_i2c_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
+
+	/* Grab mutex to ensure ongoing transaction is over */
+	mutex_lock(&dev->mlock);
+	dev->suspended = 1;
+	mutex_unlock(&dev->mlock);
+	del_timer_sync(&dev->pwr_timer);
+	if (dev->clk_state != 0)
+		qup_i2c_pwr_mgmt(dev, 0);
+	qup_i2c_free_gpios(dev);
+	return 0;
+}
+
+static int qup_i2c_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qup_i2c_dev *dev = platform_get_drvdata(pdev);
+	BUG_ON(qup_i2c_request_gpios(dev) != 0);
+	dev->suspended = 0;
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+static int i2c_qup_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idle...\n");
+	return 0;
+}
+
+static int i2c_qup_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int i2c_qup_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops i2c_qup_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		qup_i2c_suspend,
+		qup_i2c_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		i2c_qup_runtime_suspend,
+		i2c_qup_runtime_resume,
+		i2c_qup_runtime_idle
+	)
+};
+
+static struct platform_driver qup_i2c_driver = {
+	.probe		= qup_i2c_probe,
+	.remove		= __devexit_p(qup_i2c_remove),
+	.driver		= {
+		.name	= "qup_i2c",
+		.owner	= THIS_MODULE,
+		.pm = &i2c_qup_dev_pm_ops,
+	},
+};
+
+/* QUP may be needed to bring up other drivers */
+static int __init
+qup_i2c_init_driver(void)
+{
+	return platform_driver_register(&qup_i2c_driver);
+}
+arch_initcall(qup_i2c_init_driver);
+
+static void __exit qup_i2c_exit_driver(void)
+{
+	platform_driver_unregister(&qup_i2c_driver);
+}
+module_exit(qup_i2c_exit_driver);
+
diff --git a/drivers/i2c/busses/i2c-ssbi.c b/drivers/i2c/busses/i2c-ssbi.c
new file mode 100644
index 0000000..b371d47
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ssbi.c
@@ -0,0 +1,516 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * SSBI driver for Qualcomm MSM platforms
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/remote_spinlock.h>
+#include <mach/board.h>
+#include <linux/slab.h>
+
+/* SSBI 2.0 controller registers */
+#define SSBI2_CMD			0x0008
+#define SSBI2_RD			0x0010
+#define SSBI2_STATUS			0x0014
+#define SSBI2_MODE2			0x001C
+
+/* SSBI_CMD fields */
+#define SSBI_CMD_RDWRN			(0x01 << 24)
+#define SSBI_CMD_REG_ADDR_SHFT		(0x10)
+#define SSBI_CMD_REG_ADDR_MASK		(0xFF << SSBI_CMD_REG_ADDR_SHFT)
+#define SSBI_CMD_REG_DATA_SHFT		(0x00)
+#define SSBI_CMD_REG_DATA_MASK		(0xFF << SSBI_CMD_REG_DATA_SHFT)
+
+/* SSBI_STATUS fields */
+#define SSBI_STATUS_DATA_IN		0x10
+#define SSBI_STATUS_RD_CLOBBERED	0x08
+#define SSBI_STATUS_RD_READY		0x04
+#define SSBI_STATUS_READY		0x02
+#define SSBI_STATUS_MCHN_BUSY		0x01
+
+/* SSBI_RD fields */
+#define SSBI_RD_RDWRN			0x01000000
+#define SSBI_RD_REG_ADDR_SHFT		0x10
+#define SSBI_RD_REG_ADDR_MASK		(0xFF << SSBI_RD_REG_ADDR_SHFT)
+#define SSBI_RD_REG_DATA_SHFT		(0x00)
+#define SSBI_RD_REG_DATA_MASK		(0xFF << SSBI_RD_REG_DATA_SHFT)
+
+/* SSBI_MODE2 fields */
+#define SSBI_MODE2_REG_ADDR_15_8_SHFT	0x04
+#define SSBI_MODE2_REG_ADDR_15_8_MASK	(0x7F << SSBI_MODE2_REG_ADDR_15_8_SHFT)
+#define SSBI_MODE2_ADDR_WIDTH_SHFT	0x01
+#define SSBI_MODE2_ADDR_WIDTH_MASK	(0x07 << SSBI_MODE2_ADDR_WIDTH_SHFT)
+#define SSBI_MODE2_SSBI2_MODE		0x00000001
+
+#define SSBI_MODE2_REG_ADDR_15_8(MD, AD) \
+	(((MD) & 0x0F) | ((((AD) >> 8) << SSBI_MODE2_REG_ADDR_15_8_SHFT) & \
+	SSBI_MODE2_REG_ADDR_15_8_MASK))
+
+#define SSBI_MODE2_ADDR_WIDTH(N) \
+	((((N) - 8) << SSBI_MODE2_ADDR_WIDTH_SHFT) & SSBI_MODE2_ADDR_WIDTH_MASK)
+
+#define SSBI_TIMEOUT_US			100
+
+#define SSBI_CMD_READ(AD) \
+	(SSBI_CMD_RDWRN | (((AD) & 0xFF) << SSBI_CMD_REG_ADDR_SHFT))
+
+#define SSBI_CMD_WRITE(AD, DT) \
+	((((AD) & 0xFF) << SSBI_CMD_REG_ADDR_SHFT) | \
+	 (((DT) & 0xFF) << SSBI_CMD_REG_DATA_SHFT))
+
+/* SSBI PMIC Arbiter command registers */
+#define SSBI_PA_CMD			0x0000
+#define SSBI_PA_RD_STATUS		0x0004
+
+/* SSBI_PA_CMD fields */
+#define SSBI_PA_CMD_RDWRN		(0x01 << 24)
+#define SSBI_PA_CMD_REG_ADDR_14_8_SHFT	(0x10)
+#define SSBI_PA_CMD_REG_ADDR_14_8_MASK	(0x7F << SSBI_PA_CMD_REG_ADDR_14_8_SHFT)
+#define SSBI_PA_CMD_REG_ADDR_7_0_SHFT	(0x08)
+#define SSBI_PA_CMD_REG_ADDR_7_0_MASK	(0xFF << SSBI_PA_CMD_REG_ADDR_7_0_SHFT)
+#define SSBI_PA_CMD_REG_DATA_SHFT	(0x00)
+#define SSBI_PA_CMD_REG_DATA_MASK	(0xFF << SSBI_PA_CMD_REG_DATA_SHFT)
+
+#define SSBI_PA_CMD_REG_DATA(DT) \
+	(((DT) << SSBI_PA_CMD_REG_DATA_SHFT) & SSBI_PA_CMD_REG_DATA_MASK)
+
+#define SSBI_PA_CMD_REG_ADDR(AD) \
+	(((AD) << SSBI_PA_CMD_REG_ADDR_7_0_SHFT) & \
+	(SSBI_PA_CMD_REG_ADDR_14_8_MASK|SSBI_PA_CMD_REG_ADDR_7_0_MASK))
+
+/* SSBI_PA_RD_STATUS fields */
+#define SSBI_PA_RD_STATUS_TRANS_DONE	(0x01 << 27)
+#define SSBI_PA_RD_STATUS_TRANS_DENIED	(0x01 << 26)
+#define SSBI_PA_RD_STATUS_REG_DATA_SHFT	(0x00)
+#define SSBI_PA_RD_STATUS_REG_DATA_MASK	(0xFF << SSBI_PA_CMD_REG_DATA_SHFT)
+#define SSBI_PA_RD_STATUS_TRANS_COMPLETE \
+	(SSBI_PA_RD_STATUS_TRANS_DONE|SSBI_PA_RD_STATUS_TRANS_DENIED)
+
+/* SSBI_FSM Read and Write commands for the FSM9xxx SSBI implementation */
+#define SSBI_FSM_CMD_REG_ADDR_SHFT	(0x08)
+
+#define SSBI_FSM_CMD_READ(AD) \
+	(SSBI_CMD_RDWRN | (((AD) & 0xFFFF) << SSBI_FSM_CMD_REG_ADDR_SHFT))
+
+#define SSBI_FSM_CMD_WRITE(AD, DT) \
+	((((AD) & 0xFFFF) << SSBI_FSM_CMD_REG_ADDR_SHFT) | \
+	 (((DT) & 0xFF) << SSBI_CMD_REG_DATA_SHFT))
+
+#define SSBI_MSM_NAME			"i2c_ssbi"
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("2.0");
+MODULE_ALIAS("platform:i2c_ssbi");
+
+struct i2c_ssbi_dev {
+	void __iomem		*base;
+	struct device           *dev;
+	struct i2c_adapter	 adapter;
+	unsigned long		 mem_phys_addr;
+	size_t			 mem_size;
+	bool			 use_rlock;
+	remote_spinlock_t	 rspin_lock;
+	enum msm_ssbi_controller_type controller_type;
+	int (*read)(struct i2c_ssbi_dev *, struct i2c_msg *);
+	int (*write)(struct i2c_ssbi_dev *, struct i2c_msg *);
+};
+
+static inline u32 ssbi_readl(struct i2c_ssbi_dev *ssbi, u32 reg)
+{
+	return readl_relaxed(ssbi->base + reg);
+}
+
+static inline void ssbi_writel(struct i2c_ssbi_dev *ssbi, u32 reg, u32 val)
+{
+	writel_relaxed(val, ssbi->base + reg);
+}
+
+static inline int
+i2c_ssbi_poll_for_device_ready(struct i2c_ssbi_dev *ssbi)
+{
+	u32 timeout = SSBI_TIMEOUT_US;
+
+	while (!(ssbi_readl(ssbi, SSBI2_STATUS) & SSBI_STATUS_READY)) {
+		if (--timeout == 0) {
+			dev_err(ssbi->dev, "%s: timeout, status %x\n", __func__,
+				ssbi_readl(ssbi, SSBI2_STATUS));
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static inline int
+i2c_ssbi_poll_for_read_completed(struct i2c_ssbi_dev *ssbi)
+{
+	u32 timeout = SSBI_TIMEOUT_US;
+
+	while (!(ssbi_readl(ssbi, SSBI2_STATUS) & SSBI_STATUS_RD_READY)) {
+		if (--timeout == 0) {
+			dev_err(ssbi->dev, "%s: timeout, status %x\n", __func__,
+				ssbi_readl(ssbi, SSBI2_STATUS));
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static inline int
+i2c_ssbi_poll_for_transfer_completed(struct i2c_ssbi_dev *ssbi)
+{
+	u32 timeout = SSBI_TIMEOUT_US;
+
+	while ((ssbi_readl(ssbi, SSBI2_STATUS) & SSBI_STATUS_MCHN_BUSY)) {
+		if (--timeout == 0) {
+			dev_err(ssbi->dev, "%s: timeout, status %x\n", __func__,
+				ssbi_readl(ssbi, SSBI2_STATUS));
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int
+i2c_ssbi_read_bytes(struct i2c_ssbi_dev *ssbi, struct i2c_msg *msg)
+{
+	int ret = 0;
+	u8 *buf = msg->buf;
+	u16 len = msg->len;
+	u16 addr = msg->addr;
+	u32 read_cmd;
+
+	if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) {
+		u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2);
+		ssbi_writel(ssbi, SSBI2_MODE2,
+				SSBI_MODE2_REG_ADDR_15_8(mode2, addr));
+	}
+
+	if (ssbi->controller_type == FSM_SBI_CTRL_SSBI)
+		read_cmd = SSBI_FSM_CMD_READ(addr);
+	else
+		read_cmd = SSBI_CMD_READ(addr);
+
+	while (len) {
+		ret = i2c_ssbi_poll_for_device_ready(ssbi);
+		if (ret)
+			goto read_failed;
+
+		ssbi_writel(ssbi, SSBI2_CMD, read_cmd);
+
+		ret = i2c_ssbi_poll_for_read_completed(ssbi);
+		if (ret)
+			goto read_failed;
+
+		*buf++ = ssbi_readl(ssbi, SSBI2_RD) & SSBI_RD_REG_DATA_MASK;
+		len--;
+	}
+
+read_failed:
+	return ret;
+}
+
+static int
+i2c_ssbi_write_bytes(struct i2c_ssbi_dev *ssbi, struct i2c_msg *msg)
+{
+	int ret = 0;
+	u8 *buf = msg->buf;
+	u16 len = msg->len;
+	u16 addr = msg->addr;
+
+	if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) {
+		u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2);
+		ssbi_writel(ssbi, SSBI2_MODE2,
+				SSBI_MODE2_REG_ADDR_15_8(mode2, addr));
+	}
+
+	while (len) {
+		ret = i2c_ssbi_poll_for_device_ready(ssbi);
+		if (ret)
+			goto write_failed;
+
+		if (ssbi->controller_type == FSM_SBI_CTRL_SSBI)
+			ssbi_writel(ssbi, SSBI2_CMD,
+				SSBI_FSM_CMD_WRITE(addr, *buf++));
+		else
+			ssbi_writel(ssbi, SSBI2_CMD,
+				SSBI_CMD_WRITE(addr, *buf++));
+
+		ret = i2c_ssbi_poll_for_transfer_completed(ssbi);
+		if (ret)
+			goto write_failed;
+
+		len--;
+	}
+
+write_failed:
+	return ret;
+}
+
+static inline int
+i2c_ssbi_pa_transfer(struct i2c_ssbi_dev *ssbi, u32 cmd, u8 *data)
+{
+	u32 rd_status;
+	u32 timeout = SSBI_TIMEOUT_US;
+
+	ssbi_writel(ssbi, SSBI_PA_CMD, cmd);
+	rd_status = ssbi_readl(ssbi, SSBI_PA_RD_STATUS);
+
+	while ((rd_status & (SSBI_PA_RD_STATUS_TRANS_COMPLETE)) == 0) {
+
+		if (--timeout == 0) {
+			dev_err(ssbi->dev, "%s: timeout, status %x\n",
+					__func__, rd_status);
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+		rd_status = ssbi_readl(ssbi, SSBI_PA_RD_STATUS);
+	}
+
+	if (rd_status & SSBI_PA_RD_STATUS_TRANS_DENIED) {
+		dev_err(ssbi->dev, "%s: transaction denied, status %x\n",
+				__func__, rd_status);
+		return -EPERM;
+	}
+
+	if (data)
+		*data = (rd_status & SSBI_PA_RD_STATUS_REG_DATA_MASK) >>
+					SSBI_PA_CMD_REG_DATA_SHFT;
+	return 0;
+}
+
+static int
+i2c_ssbi_pa_read_bytes(struct i2c_ssbi_dev *ssbi, struct i2c_msg *msg)
+{
+	int ret = 0;
+	u8  data;
+	u8 *buf = msg->buf;
+	u16 len = msg->len;
+	u32 read_cmd = (SSBI_PA_CMD_RDWRN | SSBI_PA_CMD_REG_ADDR(msg->addr));
+
+	while (len) {
+
+		ret = i2c_ssbi_pa_transfer(ssbi, read_cmd, &data);
+		if (ret)
+			goto read_failed;
+
+		*buf++ = data;
+		len--;
+	}
+
+read_failed:
+	return ret;
+}
+
+static int
+i2c_ssbi_pa_write_bytes(struct i2c_ssbi_dev *ssbi, struct i2c_msg *msg)
+{
+	int ret = 0;
+	u8 *buf = msg->buf;
+	u16 len = msg->len;
+	u32 addr = SSBI_PA_CMD_REG_ADDR(msg->addr);
+
+	while (len) {
+
+		u32 write_cmd = addr | (*buf++ & SSBI_PA_CMD_REG_DATA_MASK);
+
+		ret = i2c_ssbi_pa_transfer(ssbi, write_cmd, NULL);
+		if (ret)
+			goto write_failed;
+		len--;
+	}
+
+write_failed:
+	return ret;
+}
+
+static int
+i2c_ssbi_transfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	int ret = 0;
+	int rem = num;
+	unsigned long flags = 0;
+	struct i2c_ssbi_dev *ssbi = i2c_get_adapdata(adap);
+
+	if (ssbi->use_rlock)
+		remote_spin_lock_irqsave(&ssbi->rspin_lock, flags);
+
+	while (rem) {
+		if (msgs->flags & I2C_M_RD) {
+			ret = ssbi->read(ssbi, msgs);
+			if (ret)
+				goto transfer_failed;
+		} else {
+			ret = ssbi->write(ssbi, msgs);
+			if (ret)
+				goto transfer_failed;
+		}
+
+		msgs++;
+		rem--;
+	}
+
+	if (ssbi->use_rlock)
+		remote_spin_unlock_irqrestore(&ssbi->rspin_lock, flags);
+
+	return num;
+
+transfer_failed:
+	if (ssbi->use_rlock)
+		remote_spin_unlock_irqrestore(&ssbi->rspin_lock, flags);
+	return ret;
+}
+
+static u32 i2c_ssbi_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm msm_i2c_algo = {
+	.master_xfer	= i2c_ssbi_transfer,
+	.functionality	= i2c_ssbi_i2c_func,
+};
+
+static int __init i2c_ssbi_probe(struct platform_device *pdev)
+{
+	int			 ret = 0;
+	struct resource		*ssbi_res;
+	struct i2c_ssbi_dev	*ssbi;
+	const struct msm_i2c_ssbi_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "platform data not initialized\n");
+		goto err_probe_exit;
+	}
+
+	ssbi = kzalloc(sizeof(struct i2c_ssbi_dev), GFP_KERNEL);
+	if (!ssbi) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "allocation failed\n");
+		goto err_probe_exit;
+	}
+
+	ssbi_res = platform_get_resource_byname(pdev,
+						IORESOURCE_MEM, "ssbi_base");
+	if (!ssbi_res) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "get_resource_byname failed\n");
+		goto err_probe_res;
+	}
+
+	ssbi->mem_phys_addr = ssbi_res->start;
+	ssbi->mem_size = resource_size(ssbi_res);
+	if (!request_mem_region(ssbi->mem_phys_addr, ssbi->mem_size,
+				SSBI_MSM_NAME)) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "request_mem_region failed\n");
+		goto err_probe_reqmem;
+	}
+
+	ssbi->base = ioremap(ssbi->mem_phys_addr, ssbi->mem_size);
+	if (!ssbi->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		goto err_probe_ioremap;
+	}
+
+	ssbi->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ssbi);
+
+	ssbi->controller_type = pdata->controller_type;
+	if (ssbi->controller_type == MSM_SBI_CTRL_PMIC_ARBITER) {
+		ssbi->read = i2c_ssbi_pa_read_bytes;
+		ssbi->write = i2c_ssbi_pa_write_bytes;
+	} else {
+		ssbi->read = i2c_ssbi_read_bytes;
+		ssbi->write = i2c_ssbi_write_bytes;
+	}
+
+	i2c_set_adapdata(&ssbi->adapter, ssbi);
+	ssbi->adapter.algo = &msm_i2c_algo;
+	strlcpy(ssbi->adapter.name,
+		"MSM SSBI adapter",
+		sizeof(ssbi->adapter.name));
+
+	if (pdata->rsl_id) {
+		ret = remote_spin_lock_init(&ssbi->rspin_lock, pdata->rsl_id);
+		if (ret) {
+			dev_err(&pdev->dev, "remote spinlock init failed\n");
+			goto err_remote_spinlock_init_failed;
+		}
+		ssbi->use_rlock = 1;
+	}
+
+	ssbi->adapter.nr = pdev->id;
+	ret = i2c_add_numbered_adapter(&ssbi->adapter);
+	if (ret) {
+		dev_err(&pdev->dev, "i2c_add_numbered_adapter failed\n");
+		goto err_add_adapter_failed;
+	}
+	return 0;
+
+err_add_adapter_failed:
+err_remote_spinlock_init_failed:
+	iounmap(ssbi->base);
+	platform_set_drvdata(pdev, NULL);
+err_probe_ioremap:
+	release_mem_region(ssbi->mem_phys_addr, ssbi->mem_size);
+err_probe_reqmem:
+err_probe_res:
+	kfree(ssbi);
+err_probe_exit:
+	return ret;
+}
+
+static int __devexit i2c_ssbi_remove(struct platform_device *pdev)
+{
+	struct i2c_ssbi_dev *ssbi = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	i2c_del_adapter(&ssbi->adapter);
+	iounmap(ssbi->base);
+	release_mem_region(ssbi->mem_phys_addr, ssbi->mem_size);
+	kfree(ssbi);
+	return 0;
+}
+
+static struct platform_driver i2c_ssbi_driver = {
+	.driver		= {
+		.name	= "i2c_ssbi",
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(i2c_ssbi_remove),
+};
+
+static int __init i2c_ssbi_init(void)
+{
+	return platform_driver_probe(&i2c_ssbi_driver, i2c_ssbi_probe);
+}
+arch_initcall(i2c_ssbi_init);
+
+static void __exit i2c_ssbi_exit(void)
+{
+	platform_driver_unregister(&i2c_ssbi_driver);
+}
+module_exit(i2c_ssbi_exit);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 07b6c81..d73f49c 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -61,6 +61,7 @@
 {
 	/* Interrupts are disabled, just acquire the lock. */
 	spin_lock(&client->buffer_lock);
+	wake_lock_timeout(&client->wake_lock, 5 * HZ);
 
 	wake_lock_timeout(&client->wake_lock, 5 * HZ);
 	client->buffer[client->head++] = *event;
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471..aaee448 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -329,4 +329,15 @@
 	  To compile this as a module choose M here: the module will be called
 	  maplecontrol.
 
+config TOUCHDISC_VTD518_SHINETSU
+	tristate "ShinEtsu VTD518 TouchDisc"
+	depends on I2C
+	default n
+	help
+	  Say Y here if you have the ShinEtsu VTD518 Touchdisc connected. It
+	  provides the detection of absolute and relative motions and dpad
+	  like buttons.
+
+	  To compile this as a module choose M here: the module will be called
+	  tdisc_vtd518_shinetsu.
 endif
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 92dc0de..7009c38 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -32,4 +32,4 @@
 obj-$(CONFIG_JOYSTICK_XPAD)		+= xpad.o
 obj-$(CONFIG_JOYSTICK_ZHENHUA)		+= zhenhua.o
 obj-$(CONFIG_JOYSTICK_WALKERA0701)	+= walkera0701.o
-
+obj-$(CONFIG_TOUCHDISC_VTD518_SHINETSU) += tdisc_vtd518_shinetsu.o
\ No newline at end of file
diff --git a/drivers/input/joystick/tdisc_vtd518_shinetsu.c b/drivers/input/joystick/tdisc_vtd518_shinetsu.c
new file mode 100644
index 0000000..efbe974
--- /dev/null
+++ b/drivers/input/joystick/tdisc_vtd518_shinetsu.c
@@ -0,0 +1,528 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/input/tdisc_shinetsu.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+/* Early-suspend level */
+#define TDISC_SUSPEND_LEVEL 1
+#endif
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Shinetsu Touchdisc driver");
+MODULE_ALIAS("platform:tdisc-shinetsu");
+
+#define TDSIC_BLK_READ_CMD		0x00
+#define TDISC_READ_DELAY		msecs_to_jiffies(25)
+#define X_MAX				(32)
+#define X_MIN				(-32)
+#define Y_MAX				(32)
+#define Y_MIN				(-32)
+#define PRESSURE_MAX			(32)
+#define PRESSURE_MIN			(0)
+#define TDISC_USER_ACTIVE_MASK		0x40
+#define TDISC_NORTH_SWITCH_MASK		0x20
+#define TDISC_SOUTH_SWITCH_MASK		0x10
+#define TDISC_EAST_SWITCH_MASK		0x08
+#define TDISC_WEST_SWITCH_MASK		0x04
+#define TDISC_CENTER_SWITCH		0x01
+#define TDISC_BUTTON_PRESS_MASK		0x3F
+
+#define DRIVER_NAME			"tdisc-shinetsu"
+#define DEVICE_NAME			"vtd518"
+#define TDISC_NAME			"tdisc_shinetsu"
+#define TDISC_INT			"tdisc_interrupt"
+
+struct tdisc_data {
+	struct input_dev  *tdisc_device;
+	struct i2c_client *clientp;
+	struct tdisc_platform_data *pdata;
+	struct delayed_work tdisc_work;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend	tdisc_early_suspend;
+#endif
+};
+
+static void process_tdisc_data(struct tdisc_data *dd, u8 *data)
+{
+	int i;
+	static bool button_press;
+	s8 x, y;
+
+	/* Check if the user is actively navigating */
+	if (!(data[7] & TDISC_USER_ACTIVE_MASK)) {
+		pr_debug(" TDISC ! No Data to report ! False positive \n");
+		return;
+	}
+
+	for (i = 0; i < 8 ; i++)
+		pr_debug(" Data[%d] = %x\n", i, data[i]);
+
+	/* Check if there is a button press */
+	if (dd->pdata->tdisc_report_keys)
+		if (data[7] & TDISC_BUTTON_PRESS_MASK || button_press == true) {
+			input_report_key(dd->tdisc_device, KEY_UP,
+				(data[7] & TDISC_NORTH_SWITCH_MASK));
+
+			input_report_key(dd->tdisc_device, KEY_DOWN,
+				(data[7] & TDISC_SOUTH_SWITCH_MASK));
+
+			input_report_key(dd->tdisc_device, KEY_RIGHT,
+				 (data[7] & TDISC_EAST_SWITCH_MASK));
+
+			input_report_key(dd->tdisc_device, KEY_LEFT,
+				 (data[7] & TDISC_WEST_SWITCH_MASK));
+
+			input_report_key(dd->tdisc_device, KEY_ENTER,
+				 (data[7] & TDISC_CENTER_SWITCH));
+
+			if (data[7] & TDISC_BUTTON_PRESS_MASK)
+				button_press = true;
+			else
+				button_press = false;
+		}
+
+	if (dd->pdata->tdisc_report_relative) {
+		/* Report relative motion values */
+		x = (s8) data[0];
+		y = (s8) data[1];
+
+		if (dd->pdata->tdisc_reverse_x)
+			x *= -1;
+		if (dd->pdata->tdisc_reverse_y)
+			y *= -1;
+
+		input_report_rel(dd->tdisc_device, REL_X, x);
+		input_report_rel(dd->tdisc_device, REL_Y, y);
+	}
+
+	if (dd->pdata->tdisc_report_absolute) {
+		input_report_abs(dd->tdisc_device, ABS_X, data[2]);
+		input_report_abs(dd->tdisc_device, ABS_Y, data[3]);
+		input_report_abs(dd->tdisc_device, ABS_PRESSURE, data[4]);
+	}
+
+	if (dd->pdata->tdisc_report_wheel)
+		input_report_rel(dd->tdisc_device, REL_WHEEL, (s8) data[6]);
+
+	input_sync(dd->tdisc_device);
+}
+
+static void tdisc_work_f(struct work_struct *work)
+{
+	int rc;
+	u8 data[8];
+	struct tdisc_data	*dd =
+		container_of(work, struct tdisc_data, tdisc_work.work);
+
+	/*
+	 * Read the value of the interrupt pin. If low, perform
+	 * an I2C read of 8 bytes to get the touch values and then
+	 * reschedule the work after 25ms. If pin is high, exit
+	 * and wait for next interrupt.
+	 */
+	rc = gpio_get_value_cansleep(dd->pdata->tdisc_gpio);
+	if (rc < 0) {
+		rc = pm_runtime_put_sync(&dd->clientp->dev);
+		if (rc < 0)
+			dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
+				" failed\n", __func__);
+		enable_irq(dd->clientp->irq);
+		return;
+	}
+
+	pr_debug("%s: TDISC gpio_get_value = %d\n", __func__, rc);
+	if (rc == 0) {
+		/* We have data to read */
+		rc = i2c_smbus_read_i2c_block_data(dd->clientp,
+				TDSIC_BLK_READ_CMD, 8, data);
+		if (rc < 0) {
+			pr_debug("%s:I2C read failed,trying again\n", __func__);
+			rc = i2c_smbus_read_i2c_block_data(dd->clientp,
+						TDSIC_BLK_READ_CMD, 8, data);
+			if (rc < 0) {
+				pr_err("%s:I2C read failed again, exiting\n",
+								 __func__);
+				goto fail_i2c_read;
+			}
+		}
+		pr_debug("%s: TDISC: I2C read success\n", __func__);
+		process_tdisc_data(dd, data);
+	} else {
+		/*
+		 * We have no data to read.
+		 * Enable the IRQ to receive further interrupts.
+		 */
+		enable_irq(dd->clientp->irq);
+
+		rc = pm_runtime_put_sync(&dd->clientp->dev);
+		if (rc < 0)
+			dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
+				" failed\n", __func__);
+		return;
+	}
+
+fail_i2c_read:
+	schedule_delayed_work(&dd->tdisc_work, TDISC_READ_DELAY);
+}
+
+static irqreturn_t tdisc_interrupt(int irq, void *dev_id)
+{
+	/*
+	 * The touch disc intially generates an interrupt on any
+	 * touch. The interrupt line is pulled low and remains low
+	 * untill there are touch operations being performed. In case
+	 * there are no further touch operations, the line goes high. The
+	 * same process repeats again the next time,when the disc is touched.
+	 *
+	 * We do the following operations once we receive an interrupt.
+	 * 1. Disable the IRQ for any further interrutps.
+	 * 2. Schedule work every 25ms if the GPIO is still low.
+	 * 3. In the work queue do a I2C read to get the touch data.
+	 * 4. If the GPIO is pulled high, enable the IRQ and cancel the work.
+	 */
+	struct tdisc_data *dd = dev_id;
+	int rc;
+
+	rc = pm_runtime_get(&dd->clientp->dev);
+	if (rc < 0)
+		dev_dbg(&dd->clientp->dev, "%s: pm_runtime_get"
+			" failed\n", __func__);
+	pr_debug("%s: TDISC IRQ ! :-)\n", __func__);
+
+	/* Schedule the work immediately */
+	disable_irq_nosync(dd->clientp->irq);
+	schedule_delayed_work(&dd->tdisc_work, 0);
+	return IRQ_HANDLED;
+}
+
+static int tdisc_open(struct input_dev *dev)
+{
+	int rc;
+	struct tdisc_data *dd = input_get_drvdata(dev);
+
+	if (!dd->clientp) {
+		/* Check if a valid i2c client is present */
+		pr_err("%s: no i2c adapter present \n", __func__);
+		return  -ENODEV;
+	}
+
+	/* Enable the device */
+	if (dd->pdata->tdisc_enable != NULL) {
+		rc = dd->pdata->tdisc_enable();
+		if (rc)
+			goto fail_open;
+	}
+	rc = request_any_context_irq(dd->clientp->irq, tdisc_interrupt,
+				 IRQF_TRIGGER_FALLING, TDISC_INT, dd);
+	if (rc < 0) {
+		pr_err("%s: request IRQ failed\n", __func__);
+		goto fail_irq_open;
+	}
+
+	return 0;
+
+fail_irq_open:
+	if (dd->pdata->tdisc_disable != NULL)
+		dd->pdata->tdisc_disable();
+fail_open:
+	return rc;
+}
+
+static void tdisc_close(struct input_dev *dev)
+{
+	struct tdisc_data *dd = input_get_drvdata(dev);
+
+	free_irq(dd->clientp->irq, dd);
+	cancel_delayed_work_sync(&dd->tdisc_work);
+	if (dd->pdata->tdisc_disable != NULL)
+		dd->pdata->tdisc_disable();
+}
+
+static int __devexit tdisc_remove(struct i2c_client *client)
+{
+	struct tdisc_data		*dd;
+
+	pm_runtime_disable(&client->dev);
+	dd = i2c_get_clientdata(client);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&dd->tdisc_early_suspend);
+#endif
+	input_unregister_device(dd->tdisc_device);
+	if (dd->pdata->tdisc_release != NULL)
+		dd->pdata->tdisc_release();
+	i2c_set_clientdata(client, NULL);
+	kfree(dd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int tdisc_suspend(struct device *dev)
+{
+	int rc;
+	struct tdisc_data *dd;
+
+	dd = dev_get_drvdata(dev);
+	if (device_may_wakeup(&dd->clientp->dev))
+		enable_irq_wake(dd->clientp->irq);
+	else {
+		disable_irq(dd->clientp->irq);
+
+		if (cancel_delayed_work_sync(&dd->tdisc_work))
+			enable_irq(dd->clientp->irq);
+
+		if (dd->pdata->tdisc_disable) {
+			rc = dd->pdata->tdisc_disable();
+			if (rc) {
+				pr_err("%s: Suspend failed\n", __func__);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tdisc_resume(struct device *dev)
+{
+	int rc;
+	struct tdisc_data *dd;
+
+	dd = dev_get_drvdata(dev);
+	if (device_may_wakeup(&dd->clientp->dev))
+		disable_irq_wake(dd->clientp->irq);
+	else {
+		if (dd->pdata->tdisc_enable) {
+			rc = dd->pdata->tdisc_enable();
+			if (rc) {
+				pr_err("%s: Resume failed\n", __func__);
+				return rc;
+			}
+		}
+		enable_irq(dd->clientp->irq);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void tdisc_early_suspend(struct early_suspend *h)
+{
+	struct tdisc_data *dd = container_of(h, struct tdisc_data,
+						tdisc_early_suspend);
+
+	tdisc_suspend(&dd->clientp->dev);
+}
+
+static void tdisc_late_resume(struct early_suspend *h)
+{
+	struct tdisc_data *dd = container_of(h, struct tdisc_data,
+						tdisc_early_suspend);
+
+	tdisc_resume(&dd->clientp->dev);
+}
+#endif
+
+static struct dev_pm_ops tdisc_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = tdisc_suspend,
+	.resume  = tdisc_resume,
+#endif
+};
+#endif
+
+static const struct i2c_device_id tdisc_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tdisc_id);
+
+static int __devinit tdisc_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int			rc = -1;
+	int	x_max, x_min, y_max, y_min, pressure_min, pressure_max;
+	struct tdisc_platform_data  *pd;
+	struct tdisc_data           *dd;
+
+	/* Check if the I2C adapter supports the BLOCK READ functionality */
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+		return -ENODEV;
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&client->dev);
+	if (rc < 0)
+		dev_dbg(&client->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&client->dev);
+
+	dd = kzalloc(sizeof *dd, GFP_KERNEL);
+	if (!dd) {
+		rc = -ENOMEM;
+		goto probe_exit;
+	}
+
+	i2c_set_clientdata(client, dd);
+	dd->clientp = client;
+	pd = client->dev.platform_data;
+	if (!pd) {
+		pr_err("%s: platform data not set \n", __func__);
+		rc = -EFAULT;
+		goto probe_free_exit;
+	}
+
+	dd->pdata = pd;
+
+	dd->tdisc_device = input_allocate_device();
+	if (!dd->tdisc_device) {
+		rc = -ENOMEM;
+		goto probe_free_exit;
+	}
+
+	input_set_drvdata(dd->tdisc_device, dd);
+	dd->tdisc_device->open       = tdisc_open;
+	dd->tdisc_device->close      = tdisc_close;
+	dd->tdisc_device->name       = TDISC_NAME;
+	dd->tdisc_device->id.bustype = BUS_I2C;
+	dd->tdisc_device->id.product = 1;
+	dd->tdisc_device->id.version = 1;
+
+	if (pd->tdisc_abs) {
+		x_max = pd->tdisc_abs->x_max;
+		x_min = pd->tdisc_abs->x_min;
+		y_max = pd->tdisc_abs->y_max;
+		y_min = pd->tdisc_abs->y_min;
+		pressure_max = pd->tdisc_abs->pressure_max;
+		pressure_min = pd->tdisc_abs->pressure_min;
+	} else {
+		x_max = X_MAX;
+		x_min = X_MIN;
+		y_max = Y_MAX;
+		y_min = Y_MIN;
+		pressure_max = PRESSURE_MAX;
+		pressure_min = PRESSURE_MIN;
+	}
+
+	/* Device capablities for relative motion */
+	input_set_capability(dd->tdisc_device, EV_REL, REL_X);
+	input_set_capability(dd->tdisc_device, EV_REL, REL_Y);
+	input_set_capability(dd->tdisc_device, EV_KEY, BTN_MOUSE);
+
+	/* Device capablities for absolute motion */
+	input_set_capability(dd->tdisc_device, EV_ABS, ABS_X);
+	input_set_capability(dd->tdisc_device, EV_ABS, ABS_Y);
+	input_set_capability(dd->tdisc_device, EV_ABS, ABS_PRESSURE);
+
+	input_set_abs_params(dd->tdisc_device, ABS_X, x_min, x_max, 0, 0);
+	input_set_abs_params(dd->tdisc_device, ABS_Y, y_min, y_max, 0, 0);
+	input_set_abs_params(dd->tdisc_device, ABS_PRESSURE, pressure_min,
+							pressure_max, 0, 0);
+
+	/* Device capabilities for scroll and buttons */
+	input_set_capability(dd->tdisc_device, EV_REL, REL_WHEEL);
+	input_set_capability(dd->tdisc_device, EV_KEY, KEY_LEFT);
+	input_set_capability(dd->tdisc_device, EV_KEY, KEY_RIGHT);
+	input_set_capability(dd->tdisc_device, EV_KEY, KEY_UP);
+	input_set_capability(dd->tdisc_device, EV_KEY, KEY_DOWN);
+	input_set_capability(dd->tdisc_device, EV_KEY, KEY_ENTER);
+
+	/* Setup the device for operation */
+	if (dd->pdata->tdisc_setup != NULL) {
+		rc = dd->pdata->tdisc_setup();
+		if (rc) {
+			pr_err("%s: Setup failed \n", __func__);
+			goto probe_unreg_free_exit;
+		}
+	}
+
+	/* Setup wakeup capability */
+	device_init_wakeup(&dd->clientp->dev, dd->pdata->tdisc_wakeup);
+
+	INIT_DELAYED_WORK(&dd->tdisc_work, tdisc_work_f);
+
+	rc = input_register_device(dd->tdisc_device);
+	if (rc) {
+		pr_err("%s: input register device failed \n", __func__);
+		rc = -EINVAL;
+		goto probe_register_fail;
+	}
+
+	pm_runtime_set_suspended(&client->dev);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	dd->tdisc_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						TDISC_SUSPEND_LEVEL;
+	dd->tdisc_early_suspend.suspend = tdisc_early_suspend;
+	dd->tdisc_early_suspend.resume = tdisc_late_resume;
+	register_early_suspend(&dd->tdisc_early_suspend);
+#endif
+	return 0;
+
+probe_register_fail:
+	if (dd->pdata->tdisc_release != NULL)
+		dd->pdata->tdisc_release();
+probe_unreg_free_exit:
+	input_free_device(dd->tdisc_device);
+probe_free_exit:
+	i2c_set_clientdata(client, NULL);
+	kfree(dd);
+probe_exit:
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+	return rc;
+}
+
+static struct i2c_driver tdisc_driver = {
+	.driver = {
+		.name   = DRIVER_NAME,
+		.owner  = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &tdisc_pm_ops,
+#endif
+	},
+	.probe   = tdisc_probe,
+	.remove  =  __devexit_p(tdisc_remove),
+	.id_table = tdisc_id,
+};
+
+static int __init tdisc_init(void)
+{
+	int rc;
+
+	rc = i2c_add_driver(&tdisc_driver);
+	if (rc)
+		pr_err("%s: i2c add driver failed \n", __func__);
+	return rc;
+}
+
+static void __exit tdisc_exit(void)
+{
+	i2c_del_driver(&tdisc_driver);
+}
+
+module_init(tdisc_init);
+module_exit(tdisc_exit);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b4dee9d..0822866 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -394,6 +394,24 @@
 	  To compile this driver as a module, choose M here; the
 	  module will be called opencores-kbd.
 
+config KEYBOARD_PM8058
+	bool "Qualcomm PM8058 Matrix Keypad support"
+	depends on PM8058
+	help
+	  Say Y here to enable the driver for the keypad matrix interface
+	  on the Qualcomm PM8058 power management I/C device.
+
+config KEYBOARD_PMIC8XXX
+	tristate "Qualcomm PMIC8XXX keypad support"
+	depends on MFD_PM8XXX
+	help
+	  Say Y here if you want to enable the driver for the PMIC8XXX
+	  keypad provided as a reference design from Qualcomm. This is intended
+	  to support upto 18x8 matrix based keypad design.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called pmic8xxx-keypad.
+
 config KEYBOARD_PXA27x
 	tristate "PXA27x/PXA3xx keypad support"
 	depends on PXA27x || PXA3xx || ARCH_MMP
@@ -553,6 +571,30 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called xtkbd.
 
+config KEYBOARD_QCIKBD
+   tristate "Quanta Computer Inc. keyboard"
+   depends on I2C
+   default n
+   help
+     Say Y here if you want to use the Quanta keyboard driver for ST 1.5
+     platform.
+
+config KEYBOARD_QCIKBD_REPEAT
+	bool "Enable Quanta Computer Inc. keyboard key repeat feature"
+	depends on KEYBOARD_QCIKBD
+	default n
+	help
+	  Say Y here if you want to enable Quanta keyboard driver's key repeat
+	  feature.
+
+config KEYBOARD_QCIKBD_LID
+	bool "Enable lid event for Quanta Computer Inc. keyboard"
+	depends on KEYBOARD_QCIKBD
+	default n
+	help
+	  Say Y here if you want to register lid event in Quanta keyboard
+	  driver.
+
 config KEYBOARD_W90P910
 	tristate "W90P910 Matrix Keypad support"
 	depends on ARCH_W90X900
@@ -563,4 +605,28 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called w90p910_keypad.
 
+config KEYBOARD_PMIC8058
+	tristate "Qualcomm PMIC8058 keypad"
+	depends on PMIC8058
+	default y
+	help
+	  Say Y here if you want to enable the driver for the PMIC8058
+	  keypad provided as a reference design from Qualcomm. This is intended
+	  to support upto 18x8 matrix based keypad design.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called pmic8058-keypad.
 endif
+config KEYBOARD_PMIC8058
+        tristate "Qualcomm PMIC8058 keypad"
+        depends on PMIC8058
+        default y
+        help
+          Say Y here if you want to enable the driver for the PMIC8058
+          keypad provided as a reference design from Qualcomm. This is intended
+          to support upto 18x8 matrix based keypad design.
+
+          To compile this driver as a module, choose M here: the module will
+          be called pmic8058-keypad.
+
+
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd..cf4c058 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_KEYBOARD_OMAP)		+= omap-keypad.o
 obj-$(CONFIG_KEYBOARD_OMAP4)		+= omap4-keypad.o
 obj-$(CONFIG_KEYBOARD_OPENCORES)	+= opencores-kbd.o
+obj-$(CONFIG_KEYBOARD_PM8058)		+= pm8058-keypad.o
 obj-$(CONFIG_KEYBOARD_PMIC8XXX)		+= pmic8xxx-keypad.o
 obj-$(CONFIG_KEYBOARD_PXA27x)		+= pxa27x_keypad.o
 obj-$(CONFIG_KEYBOARD_PXA930_ROTARY)	+= pxa930_rotary.o
@@ -50,4 +51,6 @@
 obj-$(CONFIG_KEYBOARD_TNETV107X)	+= tnetv107x-keypad.o
 obj-$(CONFIG_KEYBOARD_TWL4030)		+= twl4030_keypad.o
 obj-$(CONFIG_KEYBOARD_XTKBD)		+= xtkbd.o
+obj-$(CONFIG_KEYBOARD_QCIKBD)       += qci_kbd.o
 obj-$(CONFIG_KEYBOARD_W90P910)		+= w90p910_keypad.o
+obj-$(CONFIG_KEYBOARD_PMIC8058)		+= pmic8058-keypad.o
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b02e426..c7aa2ce 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -334,20 +334,21 @@
 				matrix_keypad_interrupt,
 				pdata->clustered_irq_flags,
 				"matrix-keypad", keypad);
-		if (err) {
+		if (err < 0) {
 			dev_err(&pdev->dev,
 				"Unable to acquire clustered interrupt\n");
 			goto err_free_rows;
 		}
 	} else {
 		for (i = 0; i < pdata->num_row_gpios; i++) {
-			err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+			err = request_any_context_irq(
+					gpio_to_irq(pdata->row_gpios[i]),
 					matrix_keypad_interrupt,
 					IRQF_DISABLED |
 					IRQF_TRIGGER_RISING |
 					IRQF_TRIGGER_FALLING,
 					"matrix-keypad", keypad);
-			if (err) {
+			if (err < 0) {
 				dev_err(&pdev->dev,
 					"Unable to acquire interrupt "
 					"for GPIO line %i\n",
diff --git a/drivers/input/keyboard/pmic8058-keypad.c b/drivers/input/keyboard/pmic8058-keypad.c
new file mode 100644
index 0000000..9c7588e
--- /dev/null
+++ b/drivers/input/keyboard/pmic8058-keypad.c
@@ -0,0 +1,948 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/bitops.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/input/pmic8058-keypad.h>
+
+#define PM8058_MAX_ROWS		18
+#define PM8058_MAX_COLS		8
+#define PM8058_ROW_SHIFT	3
+#define PM8058_MATRIX_MAX_SIZE	(PM8058_MAX_ROWS * PM8058_MAX_COLS)
+
+#define PM8058_MIN_ROWS		5
+#define PM8058_MIN_COLS		5
+
+#define MAX_SCAN_DELAY		128
+#define MIN_SCAN_DELAY		1
+
+/* in nanoseconds */
+#define MAX_ROW_HOLD_DELAY	122000
+#define MIN_ROW_HOLD_DELAY	30500
+
+#define MAX_DEBOUNCE_B0_TIME	20
+#define MIN_DEBOUNCE_B0_TIME	5
+
+#define MAX_DEBOUNCE_A0_TIME	8
+#define MIN_DEBOUNCE_A0_TIME	1
+
+#define KEYP_CTRL			0x148
+
+#define KEYP_CTRL_EVNTS			BIT(0)
+#define KEYP_CTRL_EVNTS_MASK		0x3
+
+#define KEYP_CTRL_SCAN_COLS_SHIFT	5
+#define KEYP_CTRL_SCAN_COLS_MIN		5
+#define KEYP_CTRL_SCAN_COLS_BITS	0x3
+
+#define KEYP_CTRL_SCAN_ROWS_SHIFT	2
+#define KEYP_CTRL_SCAN_ROWS_MIN		5
+#define KEYP_CTRL_SCAN_ROWS_BITS	0x7
+
+#define KEYP_CTRL_KEYP_EN		BIT(7)
+
+#define KEYP_SCAN			0x149
+
+#define KEYP_SCAN_READ_STATE		BIT(0)
+#define KEYP_SCAN_DBOUNCE_SHIFT		1
+#define KEYP_SCAN_PAUSE_SHIFT		3
+#define KEYP_SCAN_ROW_HOLD_SHIFT	6
+
+#define KEYP_TEST			0x14A
+
+#define KEYP_TEST_CLEAR_RECENT_SCAN	BIT(6)
+#define KEYP_TEST_CLEAR_OLD_SCAN	BIT(5)
+#define KEYP_TEST_READ_RESET		BIT(4)
+#define KEYP_TEST_DTEST_EN		BIT(3)
+#define KEYP_TEST_ABORT_READ		BIT(0)
+
+#define KEYP_TEST_DBG_SELECT_SHIFT	1
+
+/* bits of these register represent
+ * '0' for key press
+ * '1' for key release
+ */
+#define KEYP_RECENT_DATA		0x14B
+#define KEYP_OLD_DATA			0x14C
+
+#define KEYP_CLOCK_FREQ			32768
+
+/* Internal flags */
+#define KEYF_FIX_LAST_ROW		0x01
+
+
+/* ---------------------------------------------------------------------*/
+struct pmic8058_kp {
+	const struct pmic8058_keypad_data *pdata;
+	struct input_dev *input;
+	int key_sense_irq;
+	int key_stuck_irq;
+
+	unsigned short *keycodes;
+
+	struct device *dev;
+	u16 keystate[PM8058_MAX_ROWS];
+	u16 stuckstate[PM8058_MAX_ROWS];
+
+	u32	flags;
+	struct pm8058_chip	*pm_chip;
+
+	/* protect read/write */
+	struct mutex		mutex;
+	bool			user_disabled;
+	u32			disable_depth;
+
+	u8			ctrl_reg;
+};
+
+static int pmic8058_kp_write_u8(struct pmic8058_kp *kp,
+				 u8 data, u16 reg)
+{
+	int rc;
+
+	rc = pm8058_write(kp->pm_chip, reg, &data, 1);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error writing pmic8058: %X - ret %X\n",
+				reg, rc);
+	return rc;
+}
+
+static int pmic8058_kp_read(struct pmic8058_kp *kp,
+				 u8 *data, u16 reg, unsigned num_bytes)
+{
+	int rc;
+
+	rc = pm8058_read(kp->pm_chip, reg, data, num_bytes);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error reading pmic8058: %X - ret %X\n",
+				reg, rc);
+
+	return rc;
+}
+
+static int pmic8058_kp_read_u8(struct pmic8058_kp *kp,
+				 u8 *data, u16 reg)
+{
+	int rc;
+
+	rc = pmic8058_kp_read(kp, data, reg, 1);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error reading pmic8058: %X - ret %X\n",
+				reg, rc);
+	return rc;
+}
+
+static u8 pmic8058_col_state(struct pmic8058_kp *kp, u8 col)
+{
+	/* all keys pressed on that particular row? */
+	if (col == 0x00)
+		return 1 << kp->pdata->num_cols;
+	else
+		return col & ((1 << kp->pdata->num_cols) - 1);
+}
+/* REVISIT: just for debugging, will be removed in final working version */
+static void __dump_kp_regs(struct pmic8058_kp *kp, char *msg)
+{
+	u8 temp;
+
+	dev_dbg(kp->dev, "%s\n", msg);
+
+	pmic8058_kp_read_u8(kp, &temp, KEYP_CTRL);
+	dev_dbg(kp->dev, "KEYP_CTRL - %X\n", temp);
+	pmic8058_kp_read_u8(kp, &temp, KEYP_SCAN);
+	dev_dbg(kp->dev, "KEYP_SCAN - %X\n", temp);
+	pmic8058_kp_read_u8(kp, &temp, KEYP_TEST);
+	dev_dbg(kp->dev, "KEYP_TEST - %X\n", temp);
+}
+
+/* H/W constraint:
+ * One should read recent/old data registers equal to the
+ * number of columns programmed in the keyp_control register,
+ * otherwise h/w state machine may get stuck. In order to avoid this
+ * situation one should check readstate bit in keypad scan
+ * register to be '0' at the end of data read, to make sure
+ * the keypad state machine is not in READ state.
+ */
+static int pmic8058_chk_read_state(struct pmic8058_kp *kp, u16 data_reg)
+{
+	u8 temp, scan_val;
+	int retries = 10, rc;
+
+	do {
+		rc = pmic8058_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+		if (scan_val & 0x1)
+			rc = pmic8058_kp_read_u8(kp, &temp, data_reg);
+	} while ((scan_val & 0x1) && (--retries > 0));
+
+	if (retries == 0)
+		dev_dbg(kp->dev, "Unable to clear read state bit\n");
+
+	return 0;
+}
+/*
+ * Synchronous read protocol for RevB0 onwards:
+ *
+ * 1. Write '1' to ReadState bit in KEYP_SCAN register
+ * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
+ *    synchronously
+ * 3. Read rows in old array first if events are more than one
+ * 4. Read rows in recent array
+ * 5. Wait 4*32KHz clocks
+ * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can
+ *    synchronously exit read mode.
+ */
+static int pmic8058_chk_sync_read(struct pmic8058_kp *kp)
+{
+	int rc;
+	u8 scan_val;
+
+	rc = pmic8058_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+	scan_val |= 0x1;
+	rc = pmic8058_kp_write_u8(kp, scan_val, KEYP_SCAN);
+
+	/* 2 * 32KHz clocks */
+	udelay((2 * USEC_PER_SEC / KEYP_CLOCK_FREQ) + 1);
+
+	return rc;
+}
+
+static int pmic8058_kp_read_data(struct pmic8058_kp *kp, u16 *state,
+					u16 data_reg, int read_rows)
+{
+	int rc, row;
+	u8 new_data[PM8058_MAX_ROWS];
+
+	rc = pmic8058_kp_read(kp, new_data, data_reg, read_rows);
+
+	if (!rc) {
+		if (pm8058_rev(kp->pm_chip) == PM_8058_REV_1p0)
+			pmic8058_chk_read_state(kp, data_reg);
+		for (row = 0; row < kp->pdata->num_rows; row++) {
+			dev_dbg(kp->dev, "new_data[%d] = %d\n", row,
+						new_data[row]);
+			state[row] = pmic8058_col_state(kp, new_data[row]);
+		}
+	}
+
+	return rc;
+}
+
+static int pmic8058_kp_read_matrix(struct pmic8058_kp *kp, u16 *new_state,
+					 u16 *old_state)
+{
+	int rc, read_rows;
+	u8 scan_val;
+	static u8 rows[] = {
+		5, 6, 7, 8, 10, 10, 12, 12, 15, 15, 15, 18, 18, 18
+	};
+
+	if (kp->flags & KEYF_FIX_LAST_ROW &&
+			(kp->pdata->num_rows != PM8058_MAX_ROWS))
+		read_rows = rows[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN
+					 + 1];
+	else
+		read_rows = kp->pdata->num_rows;
+
+	if (pm8058_rev(kp->pm_chip) > PM_8058_REV_1p0)
+		pmic8058_chk_sync_read(kp);
+
+	if (old_state)
+		rc = pmic8058_kp_read_data(kp, old_state, KEYP_OLD_DATA,
+						read_rows);
+
+	rc = pmic8058_kp_read_data(kp, new_state, KEYP_RECENT_DATA,
+					 read_rows);
+
+	if (pm8058_rev(kp->pm_chip) > PM_8058_REV_1p0) {
+		/* 4 * 32KHz clocks */
+		udelay((4 * USEC_PER_SEC / KEYP_CLOCK_FREQ) + 1);
+
+		rc = pmic8058_kp_read(kp, &scan_val, KEYP_SCAN, 1);
+		scan_val &= 0xFE;
+		rc = pmic8058_kp_write_u8(kp, scan_val, KEYP_SCAN);
+	}
+
+	return rc;
+}
+
+static int __pmic8058_kp_scan_matrix(struct pmic8058_kp *kp, u16 *new_state,
+					 u16 *old_state)
+{
+	int row, col, code;
+
+	for (row = 0; row < kp->pdata->num_rows; row++) {
+		int bits_changed = new_state[row] ^ old_state[row];
+
+		if (!bits_changed)
+			continue;
+
+		for (col = 0; col < kp->pdata->num_cols; col++) {
+			if (!(bits_changed & (1 << col)))
+				continue;
+
+			dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col,
+					!(new_state[row] & (1 << col)) ?
+					"pressed" : "released");
+
+			code = MATRIX_SCAN_CODE(row, col, PM8058_ROW_SHIFT);
+			input_event(kp->input, EV_MSC, MSC_SCAN, code);
+			input_report_key(kp->input,
+					kp->keycodes[code],
+					!(new_state[row] & (1 << col)));
+
+			input_sync(kp->input);
+		}
+	}
+
+	return 0;
+}
+
+static int pmic8058_detect_ghost_keys(struct pmic8058_kp *kp, u16 *new_state)
+{
+	int row, found_first = -1;
+	u16 check, row_state;
+
+	check = 0;
+	for (row = 0; row < kp->pdata->num_rows; row++) {
+		row_state = (~new_state[row]) &
+				 ((1 << kp->pdata->num_cols) - 1);
+
+		if (hweight16(row_state) > 1) {
+			if (found_first == -1)
+				found_first = row;
+			if (check & row_state) {
+				dev_dbg(kp->dev, "detected ghost key on row[%d]"
+						 "row[%d]\n", found_first, row);
+				return 1;
+			}
+		}
+		check |= row_state;
+	}
+	return 0;
+}
+
+static int pmic8058_kp_scan_matrix(struct pmic8058_kp *kp, unsigned int events)
+{
+	u16 new_state[PM8058_MAX_ROWS];
+	u16 old_state[PM8058_MAX_ROWS];
+	int rc;
+
+	switch (events) {
+	case 0x1:
+		rc = pmic8058_kp_read_matrix(kp, new_state, NULL);
+		if (pmic8058_detect_ghost_keys(kp, new_state))
+			return -EINVAL;
+		__pmic8058_kp_scan_matrix(kp, new_state, kp->keystate);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	case 0x3: /* two events - eventcounter is gray-coded */
+		rc = pmic8058_kp_read_matrix(kp, new_state, old_state);
+		__pmic8058_kp_scan_matrix(kp, old_state, kp->keystate);
+		__pmic8058_kp_scan_matrix(kp, new_state, old_state);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	case 0x2:
+		dev_dbg(kp->dev, "Some key events are missed\n");
+		rc = pmic8058_kp_read_matrix(kp, new_state, old_state);
+		__pmic8058_kp_scan_matrix(kp, old_state, kp->keystate);
+		__pmic8058_kp_scan_matrix(kp, new_state, old_state);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	default:
+		rc = -1;
+	}
+	return rc;
+}
+
+static inline int pmic8058_kp_disabled(struct pmic8058_kp *kp)
+{
+	return kp->disable_depth != 0;
+}
+
+static void pmic8058_kp_enable(struct pmic8058_kp *kp)
+{
+	if (!pmic8058_kp_disabled(kp))
+		return;
+
+	if (--kp->disable_depth == 0) {
+
+		kp->ctrl_reg |= KEYP_CTRL_KEYP_EN;
+		pmic8058_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+
+		enable_irq(kp->key_sense_irq);
+		enable_irq(kp->key_stuck_irq);
+	}
+}
+
+static void pmic8058_kp_disable(struct pmic8058_kp *kp)
+{
+	if (kp->disable_depth++ == 0) {
+		disable_irq(kp->key_sense_irq);
+		disable_irq(kp->key_stuck_irq);
+
+		kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN;
+		pmic8058_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+	}
+}
+
+static ssize_t pmic8058_kp_disable_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct pmic8058_kp *kp = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", pmic8058_kp_disabled(kp));
+}
+
+static ssize_t pmic8058_kp_disable_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct pmic8058_kp *kp = dev_get_drvdata(dev);
+	long i = 0;
+	int rc;
+
+	rc = strict_strtoul(buf, 10, &i);
+	if (rc)
+		return -EINVAL;
+
+	i = !!i;
+
+	mutex_lock(&kp->mutex);
+	if (i == kp->user_disabled) {
+		mutex_unlock(&kp->mutex);
+		return count;
+	}
+
+	kp->user_disabled = i;
+
+	if (i)
+		pmic8058_kp_disable(kp);
+	else
+		pmic8058_kp_enable(kp);
+	mutex_unlock(&kp->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(disable_kp, 0664, pmic8058_kp_disable_show,
+			pmic8058_kp_disable_store);
+
+
+/*
+ * NOTE: We are reading recent and old data registers blindly
+ * whenever key-stuck interrupt happens, because events counter doesn't
+ * get updated when this interrupt happens due to key stuck doesn't get
+ * considered as key state change.
+ *
+ * We are not using old data register contents after they are being read
+ * because it might report the key which was pressed before the key being stuck
+ * as stuck key because it's pressed status is stored in the old data
+ * register.
+ */
+static irqreturn_t pmic8058_kp_stuck_irq(int irq, void *data)
+{
+	u16 new_state[PM8058_MAX_ROWS];
+	u16 old_state[PM8058_MAX_ROWS];
+	int rc;
+	struct pmic8058_kp *kp = data;
+
+	rc = pmic8058_kp_read_matrix(kp, new_state, old_state);
+	__pmic8058_kp_scan_matrix(kp, new_state, kp->stuckstate);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * NOTE: Any row multiple interrupt issue - PMIC4 Rev A0
+ *
+ * If the S/W responds to the key-event interrupt too early and reads the
+ * recent data, the keypad FSM will mistakenly go to the IDLE state, instead
+ * of the scan pause state as it is supposed too. Since the key is still
+ * pressed, the keypad scanner will go through the debounce, scan, and generate
+ * another key event interrupt. The workaround for this issue is to add delay
+ * of 1ms between servicing the key event interrupt and reading the recent data.
+ */
+static irqreturn_t pmic8058_kp_irq(int irq, void *data)
+{
+	struct pmic8058_kp *kp = data;
+	u8 ctrl_val, events;
+	int rc;
+
+	if (pm8058_rev(kp->pm_chip) == PM_8058_REV_1p0)
+		mdelay(1);
+
+	dev_dbg(kp->dev, "key sense irq\n");
+	__dump_kp_regs(kp, "pmic8058_kp_irq");
+
+	rc = pmic8058_kp_read(kp, &ctrl_val, KEYP_CTRL, 1);
+	events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
+
+	rc = pmic8058_kp_scan_matrix(kp, events);
+
+	return IRQ_HANDLED;
+}
+/*
+ * NOTE: Last row multi-interrupt issue
+ *
+ * In PMIC Rev A0, if any key in the last row of the keypad matrix
+ * is pressed and held, the H/W keeps on generating interrupts.
+ * Software work-arounds it by programming the keypad controller next level
+ * up rows (for 8x12 matrix it is 15 rows) so the keypad controller
+ * thinks of more-rows than the actual ones, so the actual last-row
+ * in the matrix won't generate multiple interrupts.
+ */
+static int pmic8058_kpd_init(struct pmic8058_kp *kp)
+{
+	int bits, rc, cycles;
+	u8 scan_val = 0, ctrl_val = 0;
+	static u8 row_bits[] = {
+		0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7,
+	};
+
+	/* Find column bits */
+	if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
+		bits = 0;
+	else
+		bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
+	ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) <<
+		KEYP_CTRL_SCAN_COLS_SHIFT;
+
+	/* Find row bits */
+	if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
+		bits = 0;
+	else if (kp->pdata->num_rows > PM8058_MAX_ROWS)
+		bits = KEYP_CTRL_SCAN_ROWS_BITS;
+	else
+		bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
+
+	/* Use max rows to fix last row problem if actual rows are less */
+	if (kp->flags & KEYF_FIX_LAST_ROW &&
+			 (kp->pdata->num_rows != PM8058_MAX_ROWS))
+		bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN
+					 + 1];
+
+	ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT);
+
+	rc = pmic8058_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
+
+	if (pm8058_rev(kp->pm_chip) == PM_8058_REV_1p0)
+		bits = fls(kp->pdata->debounce_ms[0]) - 1;
+	else
+		bits = (kp->pdata->debounce_ms[1] / 5) - 1;
+
+	scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT);
+
+	bits = fls(kp->pdata->scan_delay_ms) - 1;
+	scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT);
+
+	/* Row hold time is a multiple of 32KHz cycles. */
+	cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
+
+	scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT);
+
+	rc = pmic8058_kp_write_u8(kp, scan_val, KEYP_SCAN);
+
+	return rc;
+}
+
+static int pm8058_kp_config_drv(int gpio_start, int num_gpios)
+{
+	int	rc;
+	struct pm8058_gpio kypd_drv = {
+		.direction	= PM_GPIO_DIR_OUT,
+		.output_buffer	= PM_GPIO_OUT_BUF_OPEN_DRAIN,
+		.output_value	= 0,
+		.pull		= PM_GPIO_PULL_NO,
+		.vin_sel	= 2,
+		.out_strength	= PM_GPIO_STRENGTH_LOW,
+		.function	= PM_GPIO_FUNC_1,
+		.inv_int_pol	= 1,
+	};
+
+	if (gpio_start < 0 || num_gpios < 0 || num_gpios > PM8058_GPIOS)
+		return -EINVAL;
+
+	while (num_gpios--) {
+		rc = pm8058_gpio_config(gpio_start++, &kypd_drv);
+		if (rc) {
+			pr_err("%s: FAIL pm8058_gpio_config(): rc=%d.\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int pm8058_kp_config_sns(int gpio_start, int num_gpios)
+{
+	int	rc;
+	struct pm8058_gpio kypd_sns = {
+		.direction	= PM_GPIO_DIR_IN,
+		.pull		= PM_GPIO_PULL_UP_31P5,
+		.vin_sel	= 2,
+		.out_strength	= PM_GPIO_STRENGTH_NO,
+		.function	= PM_GPIO_FUNC_NORMAL,
+		.inv_int_pol	= 1,
+	};
+
+	if (gpio_start < 0 || num_gpios < 0 || num_gpios > PM8058_GPIOS)
+		return -EINVAL;
+
+	while (num_gpios--) {
+		rc = pm8058_gpio_config(gpio_start++, &kypd_sns);
+		if (rc) {
+			pr_err("%s: FAIL pm8058_gpio_config(): rc=%d.\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * keypad controller should be initialized in the following sequence
+ * only, otherwise it might get into FSM stuck state.
+ *
+ * - Initialize keypad control parameters, like no. of rows, columns,
+ *   timing values etc.,
+ * - configure rows and column gpios pull up/down.
+ * - set irq edge type.
+ * - enable the keypad controller.
+ */
+static int __devinit pmic8058_kp_probe(struct platform_device *pdev)
+{
+	struct pmic8058_keypad_data *pdata = pdev->dev.platform_data;
+	const struct matrix_keymap_data *keymap_data;
+	struct pmic8058_kp *kp;
+	int rc;
+	unsigned short *keycodes;
+	u8 ctrl_val;
+	struct pm8058_chip	*pm_chip;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (!pdata || !pdata->num_cols || !pdata->num_rows ||
+		pdata->num_cols > PM8058_MAX_COLS ||
+		pdata->num_rows > PM8058_MAX_ROWS ||
+		pdata->num_cols < PM8058_MIN_COLS ||
+		pdata->num_rows < PM8058_MIN_ROWS) {
+		dev_err(&pdev->dev, "invalid platform data\n");
+		return -EINVAL;
+	}
+
+	if (pdata->rows_gpio_start < 0 || pdata->cols_gpio_start < 0) {
+		dev_err(&pdev->dev, "invalid gpio_start platform data\n");
+		return -EINVAL;
+	}
+
+	if (!pdata->scan_delay_ms || pdata->scan_delay_ms > MAX_SCAN_DELAY
+		|| pdata->scan_delay_ms < MIN_SCAN_DELAY ||
+		!is_power_of_2(pdata->scan_delay_ms)) {
+		dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
+		return -EINVAL;
+	}
+
+	if (!pdata->row_hold_ns || pdata->row_hold_ns > MAX_ROW_HOLD_DELAY
+		|| pdata->row_hold_ns < MIN_ROW_HOLD_DELAY ||
+		((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
+		dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
+		return -EINVAL;
+	}
+
+	if (pm8058_rev(pm_chip) == PM_8058_REV_1p0) {
+		if (!pdata->debounce_ms
+			|| !is_power_of_2(pdata->debounce_ms[0])
+			|| pdata->debounce_ms[0] > MAX_DEBOUNCE_A0_TIME
+			|| pdata->debounce_ms[0] < MIN_DEBOUNCE_A0_TIME) {
+			dev_err(&pdev->dev, "invalid debounce time supplied\n");
+			return -EINVAL;
+		}
+	} else {
+		if (!pdata->debounce_ms
+			|| ((pdata->debounce_ms[1] % 5) != 0)
+			|| pdata->debounce_ms[1] > MAX_DEBOUNCE_B0_TIME
+			|| pdata->debounce_ms[1] < MIN_DEBOUNCE_B0_TIME) {
+			dev_err(&pdev->dev, "invalid debounce time supplied\n");
+			return -EINVAL;
+		}
+	}
+
+	keymap_data = pdata->keymap_data;
+	if (!keymap_data) {
+		dev_err(&pdev->dev, "no keymap data supplied\n");
+		return -EINVAL;
+	}
+
+	kp = kzalloc(sizeof(*kp), GFP_KERNEL);
+	if (!kp)
+		return -ENOMEM;
+
+	keycodes = kzalloc(PM8058_MATRIX_MAX_SIZE * sizeof(*keycodes),
+				 GFP_KERNEL);
+	if (!keycodes) {
+		rc = -ENOMEM;
+		goto err_alloc_mem;
+	}
+
+	platform_set_drvdata(pdev, kp);
+	mutex_init(&kp->mutex);
+
+	kp->pdata	= pdata;
+	kp->dev		= &pdev->dev;
+	kp->keycodes	= keycodes;
+	kp->pm_chip	= pm_chip;
+
+	if (pm8058_rev(pm_chip) == PM_8058_REV_1p0)
+		kp->flags |= KEYF_FIX_LAST_ROW;
+
+	kp->input = input_allocate_device();
+	if (!kp->input) {
+		dev_err(&pdev->dev, "unable to allocate input device\n");
+		rc = -ENOMEM;
+		goto err_alloc_device;
+	}
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pdev->dev);
+
+	kp->key_sense_irq = platform_get_irq(pdev, 0);
+	if (kp->key_sense_irq < 0) {
+		dev_err(&pdev->dev, "unable to get keypad sense irq\n");
+		rc = -ENXIO;
+		goto err_get_irq;
+	}
+
+	kp->key_stuck_irq = platform_get_irq(pdev, 1);
+	if (kp->key_stuck_irq < 0) {
+		dev_err(&pdev->dev, "unable to get keypad stuck irq\n");
+		rc = -ENXIO;
+		goto err_get_irq;
+	}
+
+	if (pdata->input_name)
+		kp->input->name = pdata->input_name;
+	else
+		kp->input->name = "PMIC8058 keypad";
+
+	if (pdata->input_phys_device)
+		kp->input->phys = pdata->input_phys_device;
+	else
+		kp->input->phys = "pmic8058_keypad/input0";
+
+	kp->input->dev.parent	= &pdev->dev;
+
+	kp->input->id.bustype	= BUS_HOST;
+	kp->input->id.version	= 0x0001;
+	kp->input->id.product	= 0x0001;
+	kp->input->id.vendor	= 0x0001;
+
+	kp->input->evbit[0]	= BIT_MASK(EV_KEY);
+
+	if (pdata->rep)
+		__set_bit(EV_REP, kp->input->evbit);
+
+	kp->input->keycode	= keycodes;
+	kp->input->keycodemax	= PM8058_MATRIX_MAX_SIZE;
+	kp->input->keycodesize	= sizeof(*keycodes);
+
+	matrix_keypad_build_keymap(keymap_data, PM8058_ROW_SHIFT,
+					kp->input->keycode, kp->input->keybit);
+
+	input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+	input_set_drvdata(kp->input, kp);
+
+	rc = input_register_device(kp->input);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "unable to register keypad input device\n");
+		goto err_get_irq;
+	}
+
+	/* initialize keypad state */
+	memset(kp->keystate, 0xff, sizeof(kp->keystate));
+	memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
+
+	rc = pmic8058_kpd_init(kp);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "unable to initialize keypad controller\n");
+		goto err_kpd_init;
+	}
+
+	rc = pm8058_kp_config_sns(pdata->cols_gpio_start,
+			pdata->num_cols);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "unable to configure keypad sense lines\n");
+		goto err_gpio_config;
+	}
+
+	rc = pm8058_kp_config_drv(pdata->rows_gpio_start,
+			pdata->num_rows);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "unable to configure keypad drive lines\n");
+		goto err_gpio_config;
+	}
+
+	rc = request_threaded_irq(kp->key_sense_irq, NULL, pmic8058_kp_irq,
+				 IRQF_TRIGGER_RISING, "pmic-keypad", kp);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "failed to request keypad sense irq\n");
+		goto err_req_sense_irq;
+	}
+
+	rc = request_threaded_irq(kp->key_stuck_irq, NULL,
+				 pmic8058_kp_stuck_irq, IRQF_TRIGGER_RISING,
+				 "pmic-keypad-stuck", kp);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "failed to request keypad stuck irq\n");
+		goto err_req_stuck_irq;
+	}
+
+	rc = pmic8058_kp_read_u8(kp, &ctrl_val, KEYP_CTRL);
+	ctrl_val |= KEYP_CTRL_KEYP_EN;
+	rc = pmic8058_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
+
+	kp->ctrl_reg = ctrl_val;
+
+	__dump_kp_regs(kp, "probe");
+
+	rc = device_create_file(&pdev->dev, &dev_attr_disable_kp);
+	if (rc < 0)
+		goto err_create_file;
+
+	device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+	return 0;
+
+err_create_file:
+	free_irq(kp->key_stuck_irq, NULL);
+err_req_stuck_irq:
+	free_irq(kp->key_sense_irq, NULL);
+err_req_sense_irq:
+err_gpio_config:
+err_kpd_init:
+	input_unregister_device(kp->input);
+	kp->input = NULL;
+err_get_irq:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	input_free_device(kp->input);
+err_alloc_device:
+	kfree(keycodes);
+err_alloc_mem:
+	kfree(kp);
+	return rc;
+}
+
+static int __devexit pmic8058_kp_remove(struct platform_device *pdev)
+{
+	struct pmic8058_kp *kp = platform_get_drvdata(pdev);
+
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	device_remove_file(&pdev->dev, &dev_attr_disable_kp);
+	device_init_wakeup(&pdev->dev, 0);
+	free_irq(kp->key_stuck_irq, NULL);
+	free_irq(kp->key_sense_irq, NULL);
+	input_unregister_device(kp->input);
+	platform_set_drvdata(pdev, NULL);
+	kfree(kp->input->keycode);
+	kfree(kp);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic8058_kp_suspend(struct device *dev)
+{
+	struct pmic8058_kp *kp = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) && !pmic8058_kp_disabled(kp)) {
+		enable_irq_wake(kp->key_sense_irq);
+	} else {
+		mutex_lock(&kp->mutex);
+		pmic8058_kp_disable(kp);
+		mutex_unlock(&kp->mutex);
+	}
+
+	return 0;
+}
+
+static int pmic8058_kp_resume(struct device *dev)
+{
+	struct pmic8058_kp *kp = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) && !pmic8058_kp_disabled(kp)) {
+		disable_irq_wake(kp->key_sense_irq);
+	} else {
+		mutex_lock(&kp->mutex);
+		pmic8058_kp_enable(kp);
+		mutex_unlock(&kp->mutex);
+	}
+
+	return 0;
+}
+
+static struct dev_pm_ops pm8058_kp_pm_ops = {
+	.suspend	= pmic8058_kp_suspend,
+	.resume		= pmic8058_kp_resume,
+};
+#endif
+
+static struct platform_driver pmic8058_kp_driver = {
+	.probe		= pmic8058_kp_probe,
+	.remove		= __devexit_p(pmic8058_kp_remove),
+	.driver		= {
+		.name = "pm8058-keypad",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &pm8058_kp_pm_ops,
+#endif
+	},
+};
+
+static int __init pmic8058_kp_init(void)
+{
+	return platform_driver_register(&pmic8058_kp_driver);
+}
+module_init(pmic8058_kp_init);
+
+static void __exit pmic8058_kp_exit(void)
+{
+	platform_driver_unregister(&pmic8058_kp_driver);
+}
+module_exit(pmic8058_kp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 keypad driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058_keypad");
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 6229c3e..f0629ce 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -116,6 +116,9 @@
 	int rc;
 
 	rc = pm8xxx_writeb(kp->dev->parent, reg, data);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error writing pmic8xxx: %X - ret %X\n",
+				reg, rc);
 	return rc;
 }
 
@@ -125,6 +128,10 @@
 	int rc;
 
 	rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error reading pmic8xxx: %X - ret %X\n",
+				reg, rc);
+
 	return rc;
 }
 
@@ -134,6 +141,9 @@
 	int rc;
 
 	rc = pmic8xxx_kp_read(kp, data, reg, 1);
+	if (rc < 0)
+		dev_warn(kp->dev, "Error reading pmic8xxx: %X - ret %X\n",
+				reg, rc);
 	return rc;
 }
 
@@ -463,7 +473,7 @@
 					__func__, gpio_start + i, rc);
 			return rc;
 		}
-	 }
+	}
 
 	return 0;
 }
@@ -532,7 +542,7 @@
 		.output_buffer	= PM_GPIO_OUT_BUF_OPEN_DRAIN,
 		.output_value	= 0,
 		.pull		= PM_GPIO_PULL_NO,
-		.vin_sel	= PM_GPIO_VIN_S3,
+		.vin_sel	= PM_GPIO_VIN_S4,
 		.out_strength	= PM_GPIO_STRENGTH_LOW,
 		.function	= PM_GPIO_FUNC_1,
 		.inv_int_pol	= 1,
@@ -541,7 +551,7 @@
 	struct pm_gpio kypd_sns = {
 		.direction	= PM_GPIO_DIR_IN,
 		.pull		= PM_GPIO_PULL_UP_31P5,
-		.vin_sel	= PM_GPIO_VIN_S3,
+		.vin_sel	= PM_GPIO_VIN_S4,
 		.out_strength	= PM_GPIO_STRENGTH_NO,
 		.function	= PM_GPIO_FUNC_NORMAL,
 		.inv_int_pol	= 1,
diff --git a/drivers/input/keyboard/qci_kbd.c b/drivers/input/keyboard/qci_kbd.c
new file mode 100644
index 0000000..d735012
--- /dev/null
+++ b/drivers/input/keyboard/qci_kbd.c
@@ -0,0 +1,721 @@
+/* Quanta I2C Keyboard Driver
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ * Author: Hsin Wu <hsin.wu@quantatw.com>
+ * Author: Austin Lai <austin.lai@quantatw.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+ /*
+ *
+ *  The Driver with I/O communications via the I2C Interface for ON2 of AP BU.
+ *  And it is only working on the nuvoTon WPCE775x Embedded Controller.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/keyboard.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include <linux/input/qci_kbd.h>
+
+/* Keyboard special scancode */
+#define RC_KEY_FN          0x70
+#define RC_KEY_BREAK       0x80
+#define KEY_ACK_FA         0xFA
+#define SCAN_EMUL0         0xE0
+#define SCAN_EMUL1         0xE1
+#define SCAN_PAUSE1        0x1D
+#define SCAN_PAUSE2        0x45
+#define SCAN_LIDSW_OPEN    0x70
+#define SCAN_LIDSW_CLOSE   0x71
+
+/* Keyboard keycodes */
+#define NOKEY           KEY_RESERVED
+#define KEY_LEFTWIN     KEY_LEFTMETA
+#define KEY_RIGHTWIN    KEY_RIGHTMETA
+#define KEY_APPS        KEY_COMPOSE
+#define KEY_PRINTSCR    KEY_SYSRQ
+
+#define KEYBOARD_ID_NAME          "qci-i2ckbd"
+#define KEYBOARD_NAME                "Quanta Keyboard"
+#define KEYBOARD_DEVICE             "/i2c/input0"
+#define KEYBOARD_CMD_ENABLE             0xF4
+#define KEYBOARD_CMD_SET_LED            0xED
+
+/*-----------------------------------------------------------------------------
+ * Keyboard scancode to linux keycode translation table
+ *---------------------------------------------------------------------------*/
+
+static const unsigned char on2_keycode[256] = {
+	[0]   = NOKEY,
+	[1]   = NOKEY,
+	[2]   = NOKEY,
+	[3]   = KEY_5,
+	[4]   = KEY_7,
+	[5]   = KEY_9,
+	[6]   = KEY_MINUS,
+	[7]   = NOKEY,
+	[8]   = NOKEY,
+	[9]   = NOKEY,
+	[10]  = NOKEY,
+	[11]  = KEY_LEFTBRACE,
+	[12]  = KEY_F10,
+	[13]  = KEY_INSERT,
+	[14]  = KEY_F11,
+	[15]  = KEY_ESC,
+	[16]  = NOKEY,
+	[17]  = NOKEY,
+	[18]  = NOKEY,
+	[19]  = KEY_4,
+	[20]  = KEY_6,
+	[21]  = KEY_8,
+	[22]  = KEY_0,
+	[23]  = KEY_EQUAL,
+	[24]  = NOKEY,
+	[25]  = NOKEY,
+	[26]  = NOKEY,
+	[27]  = KEY_P,
+	[28]  = KEY_F9,
+	[29]  = KEY_DELETE,
+	[30]  = KEY_F12,
+	[31]  = KEY_GRAVE,
+	[32]  = KEY_W,
+	[33]  = NOKEY,
+	[34]  = NOKEY,
+	[35]  = KEY_R,
+	[36]  = KEY_T,
+	[37]  = KEY_U,
+	[38]  = KEY_O,
+	[39]  = KEY_RIGHTBRACE,
+	[40]  = NOKEY,
+	[41]  = NOKEY,
+	[42]  = NOKEY,
+	[43]  = KEY_APOSTROPHE,
+	[44]  = KEY_BACKSPACE,
+	[45]  = NOKEY,
+	[46]  = KEY_F8,
+	[47]  = KEY_F5,
+	[48]  = KEY_S,
+	[49]  = NOKEY,
+	[50]  = NOKEY,
+	[51]  = KEY_E,
+	[52]  = KEY_H,
+	[53]  = KEY_Y,
+	[54]  = KEY_I,
+	[55]  = KEY_ENTER,
+	[56]  = NOKEY,
+	[57]  = NOKEY,
+	[58]  = NOKEY,
+	[59]  = KEY_SEMICOLON,
+	[60]  = KEY_3,
+	[61]  = KEY_PAGEUP,
+	[62]  = KEY_Q,
+	[63]  = KEY_TAB,
+	[64]  = KEY_A,
+	[65]  = NOKEY,
+	[66]  = NOKEY,
+	[67]  = KEY_F,
+	[68]  = KEY_G,
+	[69]  = KEY_J,
+	[70]  = KEY_L,
+	[71]  = NOKEY,
+	[72]  = KEY_RIGHTSHIFT,
+	[73]  = NOKEY,
+	[74]  = NOKEY,
+	[75]  = KEY_SLASH,
+	[76]  = KEY_2,
+	[77]  = KEY_PAGEDOWN,
+	[78]  = KEY_F4,
+	[79]  = KEY_F1,
+	[80]  = KEY_Z,
+	[81]  = NOKEY,
+	[82]  = NOKEY,
+	[83]  = KEY_D,
+	[84]  = KEY_V,
+	[85]  = KEY_N,
+	[86]  = KEY_K,
+	[87]  = NOKEY,
+	[88]  = KEY_LEFTSHIFT,
+	[89]  = KEY_RIGHTCTRL,
+	[90]  = NOKEY,
+	[91]  = KEY_DOT,
+	[92]  = KEY_UP,
+	[93]  = KEY_RIGHT,
+	[94]  = KEY_F3,
+	[95]  = KEY_F2,
+	[96]  = NOKEY,
+	[97]  = NOKEY,
+	[98]  = KEY_RIGHTALT,
+	[99]  = KEY_X,
+	[100] = KEY_C,
+	[101] = KEY_B,
+	[102] = KEY_COMMA,
+	[103] = NOKEY,
+	[104] = NOKEY,
+	[105] = NOKEY,
+	[106] = NOKEY,
+	[107] = NOKEY,
+	[108] = KEY_PRINTSCR,
+	[109] = KEY_DOWN,
+	[110] = KEY_1,
+	[111] = KEY_CAPSLOCK,
+	[112] = KEY_F24,
+	[113] = KEY_HOME,
+	[114] = KEY_LEFTALT,
+	[115] = NOKEY,
+	[116] = KEY_SPACE,
+	[117] = KEY_BACKSLASH,
+	[118] = KEY_M,
+	[119] = KEY_COMPOSE,
+	[120] = NOKEY,
+	[121] = KEY_LEFTCTRL,
+	[122] = NOKEY,
+	[123] = NOKEY,
+	[124] = KEY_PAUSE,
+	[125] = KEY_LEFT,
+	[126] = KEY_F7,
+	[127] = KEY_F6,
+	[128] = NOKEY,
+	[129] = NOKEY,
+	[130] = NOKEY,
+	[131] = NOKEY,
+	[132] = NOKEY,
+	[133] = NOKEY,
+	[134] = NOKEY,
+	[135] = NOKEY,
+	[136] = NOKEY,
+	[137] = NOKEY,
+	[138] = NOKEY,
+	[139] = NOKEY,
+	[140] = NOKEY,
+	[141] = NOKEY,
+	[142] = NOKEY,
+	[143] = NOKEY,
+	[144] = NOKEY,
+	[145] = NOKEY,
+	[146] = NOKEY,
+	[147] = NOKEY,
+	[148] = NOKEY,
+	[149] = NOKEY,
+	[150] = NOKEY,
+	[151] = NOKEY,
+	[152] = NOKEY,
+	[153] = NOKEY,
+	[154] = NOKEY,
+	[155] = NOKEY,
+	[156] = NOKEY,
+	[157] = NOKEY,
+	[158] = NOKEY,
+	[159] = NOKEY,
+	[160] = NOKEY,
+	[161] = NOKEY,
+	[162] = NOKEY,
+	[163] = NOKEY,
+	[164] = NOKEY,
+	[165] = NOKEY,
+	[166] = NOKEY,
+	[167] = NOKEY,
+	[168] = NOKEY,
+	[169] = NOKEY,
+	[170] = NOKEY,
+	[171] = NOKEY,
+	[172] = NOKEY,
+	[173] = NOKEY,
+	[174] = NOKEY,
+	[175] = NOKEY,
+	[176] = NOKEY,
+	[177] = NOKEY,
+	[178] = NOKEY,
+	[179] = NOKEY,
+	[180] = NOKEY,
+	[181] = NOKEY,
+	[182] = NOKEY,
+	[183] = NOKEY,
+	[184] = NOKEY,
+	[185] = NOKEY,
+	[186] = NOKEY,
+	[187] = NOKEY,
+	[188] = NOKEY,
+	[189] = KEY_HOME,
+	[190] = NOKEY,
+	[191] = NOKEY,
+	[192] = NOKEY,
+	[193] = NOKEY,
+	[194] = NOKEY,
+	[195] = NOKEY,
+	[196] = NOKEY,
+	[197] = NOKEY,
+	[198] = NOKEY,
+	[199] = NOKEY,
+	[200] = NOKEY,
+	[201] = NOKEY,
+	[202] = NOKEY,
+	[203] = NOKEY,
+	[204] = NOKEY,
+	[205] = KEY_END,
+	[206] = NOKEY,
+	[207] = NOKEY,
+	[208] = NOKEY,
+	[209] = NOKEY,
+	[210] = NOKEY,
+	[211] = NOKEY,
+	[212] = NOKEY,
+	[213] = NOKEY,
+	[214] = NOKEY,
+	[215] = NOKEY,
+	[216] = NOKEY,
+	[217] = NOKEY,
+	[218] = NOKEY,
+	[219] = NOKEY,
+	[220] = KEY_VOLUMEUP,
+	[221] = KEY_BRIGHTNESSUP,
+	[222] = NOKEY,
+	[223] = NOKEY,
+	[224] = NOKEY,
+	[225] = NOKEY,
+	[226] = NOKEY,
+	[227] = NOKEY,
+	[228] = NOKEY,
+	[229] = NOKEY,
+	[230] = NOKEY,
+	[231] = NOKEY,
+	[232] = NOKEY,
+	[233] = NOKEY,
+	[234] = NOKEY,
+	[235] = NOKEY,
+	[236] = NOKEY,
+	[237] = KEY_VOLUMEDOWN,
+	[238] = NOKEY,
+	[239] = NOKEY,
+	[240] = NOKEY,
+	[241] = NOKEY,
+	[242] = NOKEY,
+	[243] = NOKEY,
+	[244] = NOKEY,
+	[245] = NOKEY,
+	[246] = NOKEY,
+	[247] = NOKEY,
+	[248] = NOKEY,
+	[249] = NOKEY,
+	[250] = NOKEY,
+	[251] = NOKEY,
+	[252] = NOKEY,
+	[253] = KEY_BRIGHTNESSDOWN,
+	[254] = NOKEY,
+	[255] = NOKEY,
+};
+
+static const u8 emul0_map[128] = {
+	  0,   0,   0,  0,  0,  0,  0,   0,   0,   0,  0,   0,  0,   0,  0,   0,
+	  0,   0,   0,  0,  0,  0,  0,   0,   0,   0,  0,   0, 96,  97,  0,   0,
+	113,   0,   0,  0,  0,  0,  0,   0,   0,   0,  0,   0,  0,   0, 114,  0,
+	115,   0,   0,  0,  0, 98,  0,  99, 100,   0,  0,   0,  0,   0,  0,   0,
+	  0,   0,   0,  0,  0,  0,  0, 102, 103, 104,  0, 105,  0, 106,  0, 107,
+	108, 109, 110, 111, 0,  0,  0,   0,   0,   0,  0, 139,  0, 150,  0,   0,
+	  0,   0,   0,  0,  0,  0,  0,   0,   0,   0,  0,   0,  0,   0,  0,   0,
+	  0,   0,   0,  0,  0,  0,  0,   0,   0,   0,  0,   0,  0,   0,  0,   0,
+};
+
+/*-----------------------------------------------------------------------------
+ * Global variables
+ *---------------------------------------------------------------------------*/
+
+struct input_dev *g_qci_keyboard_dev;
+
+/* General structure to hold the driver data */
+struct i2ckbd_drv_data {
+	struct i2c_client *ki2c_client;
+	struct work_struct work;
+	struct input_dev *qcikbd_dev;
+	struct mutex kb_mutex;
+	unsigned int qcikbd_gpio; /* GPIO used for interrupt */
+	unsigned int qcikbd_irq;
+	unsigned int key_down;
+	unsigned int escape;
+	unsigned int pause_seq;
+	unsigned int fn;
+	unsigned char led_status;
+	bool standard_scancodes;
+	bool kb_leds;
+	bool event_led;
+	bool emul0;
+	bool emul1;
+	bool pause1;
+};
+#ifdef CONFIG_PM
+static int qcikbd_suspend(struct device *dev)
+{
+	struct i2ckbd_drv_data *context = input_get_drvdata(g_qci_keyboard_dev);
+
+	enable_irq_wake(context->qcikbd_irq);
+	return 0;
+}
+
+static int qcikbd_resume(struct device *dev)
+{
+	struct i2ckbd_drv_data *context = input_get_drvdata(g_qci_keyboard_dev);
+	struct i2c_client *ikbdclient = context->ki2c_client;
+
+	disable_irq_wake(context->qcikbd_irq);
+
+	/* consume any keypress generated while suspended */
+	i2c_smbus_read_byte(ikbdclient);
+	return 0;
+}
+#endif
+static int __devinit qcikbd_probe(struct i2c_client *client,
+	const struct i2c_device_id *id);
+static int __devexit qcikbd_remove(struct i2c_client *kbd);
+
+static const struct i2c_device_id qcikbd_idtable[] = {
+	{ KEYBOARD_ID_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, qcikbd_idtable);
+
+#ifdef CONFIG_PM
+static struct dev_pm_ops qcikbd_pm_ops = {
+	.suspend  = qcikbd_suspend,
+	.resume   = qcikbd_resume,
+};
+#endif
+static struct i2c_driver i2ckbd_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name  = KEYBOARD_ID_NAME,
+#ifdef CONFIG_PM
+		.pm = &qcikbd_pm_ops,
+#endif
+	},
+	.probe	  = qcikbd_probe,
+	.remove = __devexit_p(qcikbd_remove),
+	.id_table = qcikbd_idtable,
+};
+
+/*-----------------------------------------------------------------------------
+ * Driver functions
+ *---------------------------------------------------------------------------*/
+
+#ifdef CONFIG_KEYBOARD_QCIKBD_LID
+static void process_lid(struct input_dev *ikbdev, unsigned char scancode)
+{
+	if (scancode == SCAN_LIDSW_OPEN)
+		input_report_switch(ikbdev, SW_LID, 0);
+	else if (scancode == SCAN_LIDSW_CLOSE)
+		input_report_switch(ikbdev, SW_LID, 1);
+	else
+		return;
+	input_sync(ikbdev);
+}
+#endif
+
+static irqreturn_t qcikbd_interrupt(int irq, void *dev_id)
+{
+	struct i2ckbd_drv_data *ikbd_drv_data = dev_id;
+	schedule_work(&ikbd_drv_data->work);
+	return IRQ_HANDLED;
+}
+
+static void qcikbd_work_handler(struct work_struct *_work)
+{
+	unsigned char scancode;
+	unsigned char scancode_only;
+	unsigned int  keycode;
+
+	struct i2ckbd_drv_data *ikbd_drv_data =
+		container_of(_work, struct i2ckbd_drv_data, work);
+
+	struct i2c_client *ikbdclient = ikbd_drv_data->ki2c_client;
+	struct input_dev *ikbdev = ikbd_drv_data->qcikbd_dev;
+
+	mutex_lock(&ikbd_drv_data->kb_mutex);
+
+	if ((ikbd_drv_data->kb_leds) && (ikbd_drv_data->event_led)) {
+		i2c_smbus_write_byte(ikbdclient, KEYBOARD_CMD_SET_LED);
+		i2c_smbus_write_byte(ikbdclient, ikbd_drv_data->led_status);
+		ikbd_drv_data->event_led = 0;
+		goto work_exit;
+	}
+
+	scancode = i2c_smbus_read_byte(ikbdclient);
+
+	if (scancode == KEY_ACK_FA)
+		goto work_exit;
+
+	if (ikbd_drv_data->standard_scancodes) {
+		/* pause key is E1 1D 45 */
+		if (scancode == SCAN_EMUL1) {
+			ikbd_drv_data->emul1 = 1;
+			goto work_exit;
+		}
+		if (ikbd_drv_data->emul1) {
+			ikbd_drv_data->emul1 = 0;
+			if ((scancode & 0x7f) == SCAN_PAUSE1)
+				ikbd_drv_data->pause1 = 1;
+			goto work_exit;
+		}
+		if (ikbd_drv_data->pause1) {
+			ikbd_drv_data->pause1 = 0;
+			if ((scancode & 0x7f) == SCAN_PAUSE2) {
+				input_report_key(ikbdev, KEY_PAUSE,
+						 !(scancode & 0x80));
+				input_sync(ikbdev);
+			}
+			goto work_exit;
+		}
+
+		if (scancode == SCAN_EMUL0) {
+			ikbd_drv_data->emul0 = 1;
+			goto work_exit;
+		}
+		if (ikbd_drv_data->emul0) {
+			ikbd_drv_data->emul0 = 0;
+			scancode_only = scancode & 0x7f;
+#ifdef CONFIG_KEYBOARD_QCIKBD_LID
+			if ((scancode_only == SCAN_LIDSW_OPEN) ||
+			    (scancode_only == SCAN_LIDSW_CLOSE)) {
+				process_lid(ikbdev, scancode);
+				goto work_exit;
+			}
+#endif
+			keycode = emul0_map[scancode_only];
+			if (!keycode) {
+				dev_err(&ikbdev->dev,
+					"Unrecognized scancode %02x %02x\n",
+					SCAN_EMUL0, scancode);
+				goto work_exit;
+			}
+		} else {
+			keycode = scancode & 0x7f;
+		}
+		/* MS bit of scancode indicates direction of keypress */
+		ikbd_drv_data->key_down = !(scancode & 0x80);
+		if (keycode) {
+			input_event(ikbdev, EV_MSC, MSC_SCAN, scancode);
+			input_report_key(ikbdev, keycode,
+					 ikbd_drv_data->key_down);
+			input_sync(ikbdev);
+		}
+		goto work_exit;
+	}
+
+	mutex_unlock(&ikbd_drv_data->kb_mutex);
+
+	if (scancode == RC_KEY_FN) {
+		ikbd_drv_data->fn = 0x80;     /* select keycode table  > 0x7F */
+	} else {
+		ikbd_drv_data->key_down = 1;
+		if (scancode & RC_KEY_BREAK) {
+			ikbd_drv_data->key_down = 0;
+			if ((scancode & 0x7F) == RC_KEY_FN)
+				ikbd_drv_data->fn = 0;
+		}
+		keycode = on2_keycode[(scancode & 0x7F) | ikbd_drv_data->fn];
+		if (keycode != NOKEY) {
+			input_report_key(ikbdev,
+					 keycode,
+					 ikbd_drv_data->key_down);
+			input_sync(ikbdev);
+		}
+	}
+	return;
+
+work_exit:
+	mutex_unlock(&ikbd_drv_data->kb_mutex);
+}
+
+static int qcikbd_input_event(struct input_dev *dev, unsigned int type,
+			      unsigned int code, int value)
+{
+	struct i2ckbd_drv_data *ikbd_drv_data = input_get_drvdata(dev);
+	struct input_dev *ikbdev = ikbd_drv_data->qcikbd_dev;
+
+	if (type != EV_LED)
+		return -EINVAL;
+
+	ikbd_drv_data->led_status =
+		(test_bit(LED_SCROLLL, ikbdev->led) ? 1 : 0) |
+		(test_bit(LED_NUML, ikbdev->led) ? 2 : 0) |
+		(test_bit(LED_CAPSL, ikbdev->led) ? 4 : 0);
+	ikbd_drv_data->event_led = 1;
+
+	schedule_work(&ikbd_drv_data->work);
+	return 0;
+}
+
+static int qcikbd_open(struct input_dev *dev)
+{
+	struct i2ckbd_drv_data *ikbd_drv_data = input_get_drvdata(dev);
+	struct i2c_client *ikbdclient = ikbd_drv_data->ki2c_client;
+
+	/* Send F4h - enable keyboard */
+	i2c_smbus_write_byte(ikbdclient, KEYBOARD_CMD_ENABLE);
+	return 0;
+}
+
+static int __devinit qcikbd_probe(struct i2c_client *client,
+				    const struct i2c_device_id *id)
+{
+	int err;
+	int i;
+	struct i2ckbd_drv_data *context;
+	struct qci_kbd_platform_data *pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		pr_err("[KBD] platform data not supplied\n");
+		return -EINVAL;
+	}
+
+	context = kzalloc(sizeof(struct i2ckbd_drv_data), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+	i2c_set_clientdata(client, context);
+	context->ki2c_client = client;
+	context->qcikbd_gpio = client->irq;
+	client->driver = &i2ckbd_driver;
+
+	INIT_WORK(&context->work, qcikbd_work_handler);
+	mutex_init(&context->kb_mutex);
+
+	err = gpio_request(context->qcikbd_gpio, "qci-kbd");
+	if (err) {
+		pr_err("[KBD] err gpio request\n");
+		goto gpio_request_fail;
+	}
+
+	context->qcikbd_irq = gpio_to_irq(context->qcikbd_gpio);
+	err = request_irq(context->qcikbd_irq,
+			  qcikbd_interrupt,
+			  IRQF_TRIGGER_FALLING,
+			  KEYBOARD_ID_NAME,
+			  context);
+	if (err) {
+		pr_err("[KBD] err unable to get IRQ\n");
+		goto request_irq_fail;
+	}
+
+	context->standard_scancodes = pdata->standard_scancodes;
+	context->kb_leds = pdata->kb_leds;
+	context->qcikbd_dev = input_allocate_device();
+	if (!context->qcikbd_dev) {
+		pr_err("[KBD]allocting memory err\n");
+		err = -ENOMEM;
+		goto allocate_fail;
+	}
+
+	context->qcikbd_dev->name       = KEYBOARD_NAME;
+	context->qcikbd_dev->phys       = KEYBOARD_DEVICE;
+	context->qcikbd_dev->id.bustype = BUS_I2C;
+	context->qcikbd_dev->id.vendor  = 0x1050;
+	context->qcikbd_dev->id.product = 0x0006;
+	context->qcikbd_dev->id.version = 0x0004;
+	context->qcikbd_dev->open       = qcikbd_open;
+	set_bit(EV_KEY, context->qcikbd_dev->evbit);
+	__set_bit(MSC_SCAN, context->qcikbd_dev->mscbit);
+
+	if (pdata->repeat)
+		set_bit(EV_REP, context->qcikbd_dev->evbit);
+
+	/* Enable all supported keys */
+	for (i = 1; i < ARRAY_SIZE(on2_keycode) ; i++)
+		set_bit(on2_keycode[i], context->qcikbd_dev->keybit);
+
+	set_bit(KEY_POWER, context->qcikbd_dev->keybit);
+	set_bit(KEY_END, context->qcikbd_dev->keybit);
+	set_bit(KEY_VOLUMEUP, context->qcikbd_dev->keybit);
+	set_bit(KEY_VOLUMEDOWN, context->qcikbd_dev->keybit);
+	set_bit(KEY_ZOOMIN, context->qcikbd_dev->keybit);
+	set_bit(KEY_ZOOMOUT, context->qcikbd_dev->keybit);
+
+#ifdef CONFIG_KEYBOARD_QCIKBD_LID
+	set_bit(EV_SW, context->qcikbd_dev->evbit);
+	set_bit(SW_LID, context->qcikbd_dev->swbit);
+#endif
+
+	if (context->kb_leds) {
+		context->qcikbd_dev->event = qcikbd_input_event;
+		__set_bit(EV_LED, context->qcikbd_dev->evbit);
+		__set_bit(LED_NUML, context->qcikbd_dev->ledbit);
+		__set_bit(LED_CAPSL, context->qcikbd_dev->ledbit);
+		__set_bit(LED_SCROLLL, context->qcikbd_dev->ledbit);
+	}
+
+	input_set_drvdata(context->qcikbd_dev, context);
+	err = input_register_device(context->qcikbd_dev);
+	if (err) {
+		pr_err("[KBD] err input register device\n");
+		goto register_fail;
+	}
+	g_qci_keyboard_dev = context->qcikbd_dev;
+	return 0;
+register_fail:
+	input_free_device(context->qcikbd_dev);
+
+allocate_fail:
+	free_irq(context->qcikbd_irq, context);
+
+request_irq_fail:
+	gpio_free(context->qcikbd_gpio);
+
+gpio_request_fail:
+	i2c_set_clientdata(client, NULL);
+	kfree(context);
+	return err;
+}
+
+static int __devexit qcikbd_remove(struct i2c_client *dev)
+{
+	struct i2ckbd_drv_data *context = i2c_get_clientdata(dev);
+
+	free_irq(context->qcikbd_irq, context);
+	gpio_free(context->qcikbd_gpio);
+	input_free_device(context->qcikbd_dev);
+	input_unregister_device(context->qcikbd_dev);
+	kfree(context);
+
+	return 0;
+}
+
+static int __init qcikbd_init(void)
+{
+	return i2c_add_driver(&i2ckbd_driver);
+}
+
+static void __exit qcikbd_exit(void)
+{
+	i2c_del_driver(&i2ckbd_driver);
+}
+
+struct input_dev *nkbc_keypad_get_input_dev(void)
+{
+	return g_qci_keyboard_dev;
+}
+EXPORT_SYMBOL(nkbc_keypad_get_input_dev);
+module_init(qcikbd_init);
+module_exit(qcikbd_exit);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Quanta Embedded Controller I2C Keyboard Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index ce28bf6..9acebc0 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -321,6 +321,18 @@
 	help
 	  Say Y here if you want to support gpio based keys, wheels etc...
 
+config INPUT_ISA1200_FF_MEMLESS
+	tristate "ISA1200 haptic ff-memless support"
+	depends on I2C
+	select INPUT_FF_MEMLESS
+	help
+	  ISA1200 is a high performance enhanced haptic chip.
+	  Say Y here if you want to support ISA1200 connected via I2C,
+	  and select N if you are unsure.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called isa1200-ff-memless.
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
@@ -505,4 +517,43 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called xen-kbdfront.
 
+config PMIC8058_PWRKEY
+        tristate "PMIC8058 power key support"
+        default n
+        depends on PMIC8058
+        help
+          Say Y here if you want support for the PMIC8058 power key.
+
+          To compile this driver as a module, choose M here: the
+          module will be called pmic8058-pwrkey.
+
+config PMIC8058_OTHC
+        tristate "Qualcomm PMIC8058 OTHC support"
+        default n
+        depends on PMIC8058
+        help
+          Say Y here if you want support PMIC8058 OTHC.
+
+          To compile this driver as a module, choose M here: the
+          module will be called pmic8058-othc.
+
+config INPUT_PMIC8058_VIBRA_MEMLESS
+	tristate "Qualcomm PM8058 vibrator support (ff-memless)"
+	depends on PMIC8058 && INPUT_FF_MEMLESS
+	default n
+	help
+	  This option enables device driver support for the vibrator
+	  on Qualcomm PM8058 chip. This driver supports ff-memless interface
+	  from input framework.
+
+	  To compile this driver as module, choose M here: the
+	  module will be called pmic8058-vib-memless.
+
+config BOSCH_BMA150
+        tristate "SMB380/BMA150 acceleration sensor support"
+        depends on I2C=y
+        help
+          If you say yes here you get support for Bosch Sensortec's
+          acceleration sensors SMB380/BMA150.
+
 endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 014d45f..770eb96 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_INPUT_PCF50633_PMU)	+= pcf50633-input.o
 obj-$(CONFIG_INPUT_PCF8574)		+= pcf8574_keypad.o
 obj-$(CONFIG_INPUT_PCSPKR)		+= pcspkr.o
+obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY)	+= pmic8xxx-pwrkey.o
 obj-$(CONFIG_INPUT_POWERMATE)		+= powermate.o
 obj-$(CONFIG_INPUT_PWM_BEEPER)		+= pwm-beeper.o
 obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY)	+= pmic8xxx-pwrkey.o
@@ -48,4 +49,8 @@
 obj-$(CONFIG_INPUT_WM831X_ON)		+= wm831x-on.o
 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND)	+= xen-kbdfront.o
 obj-$(CONFIG_INPUT_YEALINK)		+= yealink.o
+obj-$(CONFIG_PMIC8058_PWRKEY)           += pmic8058-pwrkey.o
+obj-$(CONFIG_PMIC8058_OTHC)             += pmic8058-othc.o
+obj-$(CONFIG_INPUT_PMIC8058_VIBRA_MEMLESS) += pmic8058-vib-memless.o
+obj-$(CONFIG_BOSCH_BMA150)              += bma150.o
 
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
new file mode 100644
index 0000000..8911c0b
--- /dev/null
+++ b/drivers/input/misc/bma150.c
@@ -0,0 +1,791 @@
+/*  Date: 2011/3/7 11:00:00
+ *  Revision: 2.11
+ */
+
+/*
+ * This software program is licensed subject to the GNU General Public License
+ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
+
+ * (C) Copyright 2011 Bosch Sensortec GmbH
+ * All Rights Reserved
+ */
+
+
+/* file BMA150.c
+   brief This file contains all function implementations for the BMA150 in linux
+
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/bma150.h>
+
+#define SENSOR_NAME			"bma150"
+#define GRAVITY_EARTH		9806550
+#define ABSMIN_2G			(-GRAVITY_EARTH * 2)
+#define ABSMAX_2G			(GRAVITY_EARTH * 2)
+#define BMA150_MAX_DELAY	200
+#define BMA150_CHIP_ID		2
+#define BMA150_RANGE_SET	0
+#define BMA150_BW_SET		4
+
+
+
+#define BMA150_CHIP_ID_REG		0x00
+#define BMA150_X_AXIS_LSB_REG	0x02
+#define BMA150_X_AXIS_MSB_REG	0x03
+#define BMA150_Y_AXIS_LSB_REG	0x04
+#define BMA150_Y_AXIS_MSB_REG	0x05
+#define BMA150_Z_AXIS_LSB_REG	0x06
+#define BMA150_Z_AXIS_MSB_REG	0x07
+#define BMA150_STATUS_REG		0x09
+#define BMA150_CTRL_REG			0x0a
+#define BMA150_CONF1_REG		0x0b
+
+#define BMA150_CUSTOMER1_REG	0x12
+#define BMA150_CUSTOMER2_REG	0x13
+#define BMA150_RANGE_BWIDTH_REG	0x14
+#define BMA150_CONF2_REG		0x15
+
+#define BMA150_OFFS_GAIN_X_REG	0x16
+#define BMA150_OFFS_GAIN_Y_REG	0x17
+#define BMA150_OFFS_GAIN_Z_REG	0x18
+#define BMA150_OFFS_GAIN_T_REG	0x19
+#define BMA150_OFFSET_X_REG		0x1a
+#define BMA150_OFFSET_Y_REG		0x1b
+#define BMA150_OFFSET_Z_REG		0x1c
+#define BMA150_OFFSET_T_REG		0x1d
+
+#define BMA150_CHIP_ID__POS		0
+#define BMA150_CHIP_ID__MSK		0x07
+#define BMA150_CHIP_ID__LEN		3
+#define BMA150_CHIP_ID__REG		BMA150_CHIP_ID_REG
+
+/* DATA REGISTERS */
+
+#define BMA150_NEW_DATA_X__POS		0
+#define BMA150_NEW_DATA_X__LEN		1
+#define BMA150_NEW_DATA_X__MSK		0x01
+#define BMA150_NEW_DATA_X__REG		BMA150_X_AXIS_LSB_REG
+
+#define BMA150_ACC_X_LSB__POS		6
+#define BMA150_ACC_X_LSB__LEN		2
+#define BMA150_ACC_X_LSB__MSK		0xC0
+#define BMA150_ACC_X_LSB__REG		BMA150_X_AXIS_LSB_REG
+
+#define BMA150_ACC_X_MSB__POS		0
+#define BMA150_ACC_X_MSB__LEN		8
+#define BMA150_ACC_X_MSB__MSK		0xFF
+#define BMA150_ACC_X_MSB__REG		BMA150_X_AXIS_MSB_REG
+
+#define BMA150_ACC_Y_LSB__POS		6
+#define BMA150_ACC_Y_LSB__LEN		2
+#define BMA150_ACC_Y_LSB__MSK		0xC0
+#define BMA150_ACC_Y_LSB__REG		BMA150_Y_AXIS_LSB_REG
+
+#define BMA150_ACC_Y_MSB__POS		0
+#define BMA150_ACC_Y_MSB__LEN		8
+#define BMA150_ACC_Y_MSB__MSK		0xFF
+#define BMA150_ACC_Y_MSB__REG		BMA150_Y_AXIS_MSB_REG
+
+#define BMA150_ACC_Z_LSB__POS		6
+#define BMA150_ACC_Z_LSB__LEN		2
+#define BMA150_ACC_Z_LSB__MSK		0xC0
+#define BMA150_ACC_Z_LSB__REG		BMA150_Z_AXIS_LSB_REG
+
+#define BMA150_ACC_Z_MSB__POS		0
+#define BMA150_ACC_Z_MSB__LEN		8
+#define BMA150_ACC_Z_MSB__MSK		0xFF
+#define BMA150_ACC_Z_MSB__REG		BMA150_Z_AXIS_MSB_REG
+
+/* CONTROL BITS */
+
+#define BMA150_SLEEP__POS			0
+#define BMA150_SLEEP__LEN			1
+#define BMA150_SLEEP__MSK			0x01
+#define BMA150_SLEEP__REG			BMA150_CTRL_REG
+
+#define BMA150_SOFT_RESET__POS		1
+#define BMA150_SOFT_RESET__LEN		1
+#define BMA150_SOFT_RESET__MSK		0x02
+#define BMA150_SOFT_RESET__REG		BMA150_CTRL_REG
+
+#define BMA150_EE_W__POS			4
+#define BMA150_EE_W__LEN			1
+#define BMA150_EE_W__MSK			0x10
+#define BMA150_EE_W__REG			BMA150_CTRL_REG
+
+#define BMA150_UPDATE_IMAGE__POS	5
+#define BMA150_UPDATE_IMAGE__LEN	1
+#define BMA150_UPDATE_IMAGE__MSK	0x20
+#define BMA150_UPDATE_IMAGE__REG	BMA150_CTRL_REG
+
+#define BMA150_RESET_INT__POS		6
+#define BMA150_RESET_INT__LEN		1
+#define BMA150_RESET_INT__MSK		0x40
+#define BMA150_RESET_INT__REG		BMA150_CTRL_REG
+
+/* BANDWIDTH dependend definitions */
+
+#define BMA150_BANDWIDTH__POS				0
+#define BMA150_BANDWIDTH__LEN				3
+#define BMA150_BANDWIDTH__MSK				0x07
+#define BMA150_BANDWIDTH__REG				BMA150_RANGE_BWIDTH_REG
+
+/* RANGE */
+
+#define BMA150_RANGE__POS				3
+#define BMA150_RANGE__LEN				2
+#define BMA150_RANGE__MSK				0x18
+#define BMA150_RANGE__REG				BMA150_RANGE_BWIDTH_REG
+
+/* WAKE UP */
+
+#define BMA150_WAKE_UP__POS			0
+#define BMA150_WAKE_UP__LEN			1
+#define BMA150_WAKE_UP__MSK			0x01
+#define BMA150_WAKE_UP__REG			BMA150_CONF2_REG
+
+#define BMA150_WAKE_UP_PAUSE__POS		1
+#define BMA150_WAKE_UP_PAUSE__LEN		2
+#define BMA150_WAKE_UP_PAUSE__MSK		0x06
+#define BMA150_WAKE_UP_PAUSE__REG		BMA150_CONF2_REG
+
+#define BMA150_GET_BITSLICE(regvar, bitname)\
+	((regvar & bitname##__MSK) >> bitname##__POS)
+
+
+#define BMA150_SET_BITSLICE(regvar, bitname, val)\
+	((regvar & ~bitname##__MSK) | ((val<<bitname##__POS)&bitname##__MSK))
+
+/* range and bandwidth */
+
+#define BMA150_RANGE_2G			0
+#define BMA150_RANGE_4G			1
+#define BMA150_RANGE_8G			2
+
+#define BMA150_BW_25HZ		0
+#define BMA150_BW_50HZ		1
+#define BMA150_BW_100HZ		2
+#define BMA150_BW_190HZ		3
+#define BMA150_BW_375HZ		4
+#define BMA150_BW_750HZ		5
+#define BMA150_BW_1500HZ	6
+
+/* mode settings */
+
+#define BMA150_MODE_NORMAL      0
+#define BMA150_MODE_SLEEP       2
+#define BMA150_MODE_WAKE_UP     3
+
+struct bma150acc{
+	s16	x,
+		y,
+		z;
+} ;
+
+struct bma150_data {
+	struct i2c_client *bma150_client;
+	struct bma150_platform_data *platform_data;
+	int IRQ;
+	atomic_t delay;
+	unsigned char mode;
+	struct input_dev *input;
+	struct bma150acc value;
+	struct mutex value_mutex;
+	struct mutex mode_mutex;
+	struct delayed_work work;
+	struct work_struct irq_work;
+};
+
+static int bma150_smbus_read_byte(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data)
+{
+	s32 dummy;
+	dummy = i2c_smbus_read_byte_data(client, reg_addr);
+	if (dummy < 0)
+		return -EPERM;
+	*data = dummy & 0x000000ff;
+
+	return 0;
+}
+
+static int bma150_smbus_write_byte(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data)
+{
+	s32 dummy;
+	dummy = i2c_smbus_write_byte_data(client, reg_addr, *data);
+	if (dummy < 0)
+		return -EPERM;
+	return 0;
+}
+
+static int bma150_smbus_read_byte_block(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data, unsigned char len)
+{
+	s32 dummy;
+	dummy = i2c_smbus_read_i2c_block_data(client, reg_addr, len, data);
+	if (dummy < 0)
+		return -EPERM;
+	return 0;
+}
+
+static int bma150_set_mode(struct i2c_client *client, unsigned char Mode)
+{
+	int comres = 0;
+	unsigned char data1 = 0, data2 = 0;
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	if (client == NULL) {
+		comres = -1;
+	} else{
+		if (Mode < 4 && Mode != 1) {
+
+			comres = bma150_smbus_read_byte(client,
+						BMA150_WAKE_UP__REG, &data1);
+			data1 = BMA150_SET_BITSLICE(data1,
+						BMA150_WAKE_UP, Mode);
+			comres += bma150_smbus_read_byte(client,
+						BMA150_SLEEP__REG, &data2);
+			data2 = BMA150_SET_BITSLICE(data2,
+						BMA150_SLEEP, (Mode>>1));
+			comres += bma150_smbus_write_byte(client,
+						BMA150_WAKE_UP__REG, &data1);
+			comres += bma150_smbus_write_byte(client,
+						BMA150_SLEEP__REG, &data2);
+			mutex_lock(&bma150->mode_mutex);
+			bma150->mode = (unsigned char) Mode;
+			mutex_unlock(&bma150->mode_mutex);
+
+		} else{
+			comres = -1;
+		}
+	}
+
+	return comres;
+}
+
+
+static int bma150_set_range(struct i2c_client *client, unsigned char Range)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	if (client == NULL) {
+		comres = -1;
+	} else{
+		if (Range < 3) {
+
+			comres = bma150_smbus_read_byte(client,
+						BMA150_RANGE__REG, &data);
+			data = BMA150_SET_BITSLICE(data, BMA150_RANGE, Range);
+			comres += bma150_smbus_write_byte(client,
+						BMA150_RANGE__REG, &data);
+
+		} else{
+			comres = -1;
+		}
+	}
+
+	return comres;
+}
+
+static int bma150_get_range(struct i2c_client *client, unsigned char *Range)
+{
+	int comres = 0;
+	unsigned char data;
+
+	if (client == NULL) {
+		comres = -1;
+	} else{
+		comres = bma150_smbus_read_byte(client,
+						BMA150_RANGE__REG, &data);
+
+		*Range = BMA150_GET_BITSLICE(data, BMA150_RANGE);
+
+	}
+
+	return comres;
+}
+
+
+
+static int bma150_set_bandwidth(struct i2c_client *client, unsigned char BW)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	if (client == NULL) {
+		comres = -1;
+	} else{
+		if (BW < 8) {
+			comres = bma150_smbus_read_byte(client,
+						BMA150_BANDWIDTH__REG, &data);
+			data = BMA150_SET_BITSLICE(data, BMA150_BANDWIDTH, BW);
+			comres += bma150_smbus_write_byte(client,
+						BMA150_BANDWIDTH__REG, &data);
+
+		} else{
+			comres = -1;
+		}
+	}
+
+	return comres;
+}
+
+static int bma150_get_bandwidth(struct i2c_client *client, unsigned char *BW)
+{
+	int comres = 0;
+	unsigned char data;
+
+	if (client == NULL) {
+		comres = -1;
+	} else{
+
+
+		comres = bma150_smbus_read_byte(client,
+						BMA150_BANDWIDTH__REG, &data);
+
+		*BW = BMA150_GET_BITSLICE(data, BMA150_BANDWIDTH);
+
+
+	}
+
+	return comres;
+}
+
+static int bma150_read_accel_xyz(struct i2c_client *client,
+		struct bma150acc *acc)
+{
+	int comres;
+	unsigned char data[6];
+	if (client == NULL) {
+		comres = -1;
+	} else{
+
+
+		comres = bma150_smbus_read_byte_block(client,
+					BMA150_ACC_X_LSB__REG, &data[0], 6);
+
+		acc->x = BMA150_GET_BITSLICE(data[0], BMA150_ACC_X_LSB) |
+			(BMA150_GET_BITSLICE(data[1], BMA150_ACC_X_MSB)<<
+							BMA150_ACC_X_LSB__LEN);
+		acc->x = acc->x << (sizeof(short)*8-(BMA150_ACC_X_LSB__LEN+
+							BMA150_ACC_X_MSB__LEN));
+		acc->x = acc->x >> (sizeof(short)*8-(BMA150_ACC_X_LSB__LEN+
+							BMA150_ACC_X_MSB__LEN));
+
+		acc->y = BMA150_GET_BITSLICE(data[2], BMA150_ACC_Y_LSB) |
+			(BMA150_GET_BITSLICE(data[3], BMA150_ACC_Y_MSB)<<
+							BMA150_ACC_Y_LSB__LEN);
+		acc->y = acc->y << (sizeof(short)*8-(BMA150_ACC_Y_LSB__LEN +
+							BMA150_ACC_Y_MSB__LEN));
+		acc->y = acc->y >> (sizeof(short)*8-(BMA150_ACC_Y_LSB__LEN +
+							BMA150_ACC_Y_MSB__LEN));
+
+
+		acc->z = BMA150_GET_BITSLICE(data[4], BMA150_ACC_Z_LSB);
+		acc->z |= (BMA150_GET_BITSLICE(data[5], BMA150_ACC_Z_MSB)<<
+							BMA150_ACC_Z_LSB__LEN);
+		acc->z = acc->z << (sizeof(short)*8-(BMA150_ACC_Z_LSB__LEN+
+							BMA150_ACC_Z_MSB__LEN));
+		acc->z = acc->z >> (sizeof(short)*8-(BMA150_ACC_Z_LSB__LEN+
+							BMA150_ACC_Z_MSB__LEN));
+
+	}
+
+	return comres;
+}
+
+static void bma150_work_func(struct work_struct *work)
+{
+	struct bma150_data *bma150 = container_of((struct delayed_work *)work,
+			struct bma150_data, work);
+	static struct bma150acc acc;
+	unsigned long delay = msecs_to_jiffies(atomic_read(&bma150->delay));
+
+
+
+	bma150_read_accel_xyz(bma150->bma150_client, &acc);
+	input_report_abs(bma150->input, ABS_X, acc.x);
+	input_report_abs(bma150->input, ABS_Y, acc.y);
+	input_report_abs(bma150->input, ABS_Z, acc.z);
+	input_sync(bma150->input);
+	mutex_lock(&bma150->value_mutex);
+	bma150->value = acc;
+	mutex_unlock(&bma150->value_mutex);
+	schedule_delayed_work(&bma150->work, delay);
+}
+
+static ssize_t bma150_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	mutex_lock(&bma150->mode_mutex);
+	data = bma150->mode;
+	mutex_unlock(&bma150->mode_mutex);
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t bma150_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	error = strict_strtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (bma150_set_mode(bma150->bma150_client, (unsigned char) data) < 0)
+		return -EINVAL;
+
+
+	return count;
+}
+static ssize_t bma150_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	if (bma150_get_range(bma150->bma150_client, &data) < 0)
+		return sprintf(buf, "Read error\n");
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t bma150_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	error = strict_strtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (bma150_set_range(bma150->bma150_client, (unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t bma150_bandwidth_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	if (bma150_get_bandwidth(bma150->bma150_client, &data) < 0)
+		return sprintf(buf, "Read error\n");
+
+	return sprintf(buf, "%d\n", data);
+
+}
+
+static ssize_t bma150_bandwidth_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	error = strict_strtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (bma150_set_bandwidth(bma150->bma150_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t bma150_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bma150_data *bma150 = input_get_drvdata(input);
+	struct bma150acc acc_value;
+
+	mutex_lock(&bma150->value_mutex);
+	acc_value = bma150->value;
+	mutex_unlock(&bma150->value_mutex);
+
+	return sprintf(buf, "%d %d %d\n", acc_value.x, acc_value.y,
+			acc_value.z);
+}
+
+
+
+static ssize_t bma150_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	return sprintf(buf, "%d\n", atomic_read(&bma150->delay));
+
+}
+
+static ssize_t bma150_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct bma150_data *bma150 = i2c_get_clientdata(client);
+
+	error = strict_strtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (data > BMA150_MAX_DELAY)
+		data = BMA150_MAX_DELAY;
+	atomic_set(&bma150->delay, (unsigned int) data);
+
+	return count;
+}
+
+static DEVICE_ATTR(range, S_IRUGO|S_IWUSR|S_IWGRP,
+		bma150_range_show, bma150_range_store);
+static DEVICE_ATTR(bandwidth, S_IRUGO|S_IWUSR|S_IWGRP,
+		bma150_bandwidth_show, bma150_bandwidth_store);
+static DEVICE_ATTR(mode, S_IRUGO|S_IWUSR|S_IWGRP,
+		bma150_mode_show, bma150_mode_store);
+static DEVICE_ATTR(value, S_IRUGO|S_IWUSR|S_IWGRP,
+		bma150_value_show, NULL);
+static DEVICE_ATTR(delay, S_IRUGO|S_IWUSR|S_IWGRP|S_IWOTH,
+		bma150_delay_show, bma150_delay_store);
+
+static struct attribute *bma150_attributes[] = {
+	&dev_attr_range.attr,
+	&dev_attr_bandwidth.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_value.attr,
+	&dev_attr_delay.attr,
+	NULL
+};
+
+static struct attribute_group bma150_attribute_group = {
+	.attrs = bma150_attributes
+};
+
+static int bma150_detect(struct i2c_client *client,
+			  struct i2c_board_info *info)
+{
+	struct i2c_adapter *adapter = client->adapter;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	strlcpy(info->type, SENSOR_NAME, I2C_NAME_SIZE);
+
+	return 0;
+}
+
+static int bma150_input_init(struct bma150_data *bma150)
+{
+	struct input_dev *dev;
+	int err;
+
+	dev = input_allocate_device();
+	if (!dev)
+		return -ENOMEM;
+	dev->name = SENSOR_NAME;
+	dev->id.bustype = BUS_I2C;
+
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_abs_params(dev, ABS_X, ABSMIN_2G, ABSMAX_2G, 0, 0);
+	input_set_abs_params(dev, ABS_Y, ABSMIN_2G, ABSMAX_2G, 0, 0);
+	input_set_abs_params(dev, ABS_Z, ABSMIN_2G, ABSMAX_2G, 0, 0);
+	input_set_drvdata(dev, bma150);
+
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		return err;
+	}
+	bma150->input = dev;
+
+	return 0;
+}
+
+static void bma150_input_delete(struct bma150_data *bma150)
+{
+	struct input_dev *dev = bma150->input;
+
+	input_unregister_device(dev);
+	input_free_device(dev);
+}
+
+static int bma150_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int err = 0;
+	int tempvalue;
+	struct bma150_data *data;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		printk(KERN_INFO "i2c_check_functionality error\n");
+		goto exit;
+	}
+	data = kzalloc(sizeof(struct bma150_data), GFP_KERNEL);
+	if (!data) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	i2c_set_clientdata(client, data);
+	data->platform_data = client->dev.platform_data;
+
+	if (data->platform_data->power_on)
+		data->platform_data->power_on();
+	else
+		printk(KERN_ERR "power_on function not defined!!\n");
+
+	tempvalue = 0;
+	tempvalue = i2c_smbus_read_word_data(client, BMA150_CHIP_ID_REG);
+
+	if ((tempvalue&0x00FF) == BMA150_CHIP_ID) {
+		printk(KERN_INFO "Bosch Sensortec Device detected!\n" \
+				"BMA150 registered I2C driver!\n");
+	} else{
+		printk(KERN_INFO "Bosch Sensortec Device not found" \
+			"i2c error %d\n", tempvalue);
+		err = -1;
+		goto kfree_exit;
+	}
+	i2c_set_clientdata(client, data);
+	data->bma150_client = client;
+	mutex_init(&data->value_mutex);
+	mutex_init(&data->mode_mutex);
+	bma150_set_bandwidth(client, BMA150_BW_SET);
+	bma150_set_range(client, BMA150_RANGE_SET);
+
+
+	INIT_DELAYED_WORK(&data->work, bma150_work_func);
+	atomic_set(&data->delay, BMA150_MAX_DELAY);
+	err = bma150_input_init(data);
+	if (err < 0)
+		goto kfree_exit;
+
+	err = sysfs_create_group(&data->input->dev.kobj,
+			&bma150_attribute_group);
+	if (err < 0)
+		goto error_sysfs;
+
+	schedule_delayed_work(&data->work,
+			msecs_to_jiffies(atomic_read(&data->delay)));
+
+	return 0;
+
+error_sysfs:
+	bma150_input_delete(data);
+
+kfree_exit:
+	kfree(data);
+exit:
+	return err;
+}
+
+static int bma150_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct bma150_data *data = i2c_get_clientdata(client);
+
+	cancel_delayed_work_sync(&data->work);
+
+	bma150_set_mode(client, BMA150_MODE_SLEEP);
+
+	if ((data->platform_data) && (data->platform_data->power_off))
+		data->platform_data->power_off();
+
+	return 0;
+}
+
+static int bma150_resume(struct i2c_client *client)
+{
+	struct bma150_data *data = i2c_get_clientdata(client);
+
+	if ((data->platform_data) && (data->platform_data->power_on))
+		data->platform_data->power_on();
+
+	bma150_set_mode(client, BMA150_MODE_NORMAL);
+
+	schedule_delayed_work(&data->work,
+		msecs_to_jiffies(atomic_read(&data->delay)));
+
+	return 0;
+}
+
+static int bma150_remove(struct i2c_client *client)
+{
+	struct bma150_data *data = i2c_get_clientdata(client);
+
+	if (data->platform_data->power_off)
+		data->platform_data->power_off();
+	else
+		printk(KERN_ERR "power_off function not defined!!\n");
+
+	sysfs_remove_group(&data->input->dev.kobj, &bma150_attribute_group);
+	bma150_input_delete(data);
+	free_irq(data->IRQ, data);
+	kfree(data);
+
+	return 0;
+}
+
+static const struct i2c_device_id bma150_id[] = {
+	{ SENSOR_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, bma150_id);
+
+static struct i2c_driver bma150_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= SENSOR_NAME,
+	},
+	.class          = I2C_CLASS_HWMON,
+	.id_table	= bma150_id,
+	.probe		= bma150_probe,
+	.remove		= bma150_remove,
+	.detect		= bma150_detect,
+	.suspend    = bma150_suspend,
+	.resume     = bma150_resume,
+};
+
+static int __init BMA150_init(void)
+{
+	return i2c_add_driver(&bma150_driver);
+}
+
+static void __exit BMA150_exit(void)
+{
+	i2c_del_driver(&bma150_driver);
+}
+
+MODULE_DESCRIPTION("BMA150 driver");
+
+module_init(BMA150_init);
+module_exit(BMA150_exit);
+
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
index eaa9e89..56e91fd 100644
--- a/drivers/input/misc/gpio_matrix.c
+++ b/drivers/input/misc/gpio_matrix.c
@@ -181,12 +181,14 @@
 			gpio_set_value(gpio, polarity);
 		else
 			gpio_direction_output(gpio, polarity);
-		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+		hrtimer_start(timer, timespec_to_ktime(mi->settle_time),
+			HRTIMER_MODE_REL);
 		return HRTIMER_NORESTART;
 	}
 	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
 		if (kp->key_state_changed) {
-			hrtimer_start(&kp->timer, mi->debounce_delay,
+			hrtimer_start(&kp->timer,
+				timespec_to_ktime(mi->debounce_delay),
 				      HRTIMER_MODE_REL);
 			return HRTIMER_NORESTART;
 		}
@@ -202,7 +204,8 @@
 		report_sync(kp);
 	}
 	if (!kp->use_irq || kp->some_keys_pressed) {
-		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+		hrtimer_start(timer, timespec_to_ktime(mi->poll_time),
+			HRTIMER_MODE_REL);
 		return HRTIMER_NORESTART;
 	}
 
diff --git a/drivers/input/misc/isa1200-ff-memless.c b/drivers/input/misc/isa1200-ff-memless.c
new file mode 100644
index 0000000..f4e2c35
--- /dev/null
+++ b/drivers/input/misc/isa1200-ff-memless.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) 2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/i2c/isa1200.h>
+
+#define ISA1200_HCTRL0			0x30
+#define HCTRL0_MODE_CTRL_BIT		(3)
+#define HCTRL0_OVERDRIVE_HIGH_BIT	(5)
+#define HCTRL0_OVERDRIVE_EN_BIT		(6)
+#define HCTRL0_HAP_EN			(7)
+#define HCTRL0_RESET			0x01
+#define HCTRL1_RESET			0x4B
+
+#define ISA1200_HCTRL1			0x31
+#define HCTRL1_SMART_ENABLE_BIT		(3)
+#define HCTRL1_ERM_BIT			(5)
+#define HCTRL1_EXT_CLK_ENABLE_BIT	(7)
+
+#define ISA1200_HCTRL5			0x35
+#define HCTRL5_VIB_STRT			0xD5
+#define HCTRL5_VIB_STOP			0x6B
+
+#define DIVIDER_128			(128)
+#define DIVIDER_1024			(1024)
+#define DIVIDE_SHIFTER_128		(7)
+
+#define FREQ_22400			(22400)
+#define FREQ_172600			(172600)
+
+#define POR_DELAY_USEC			250
+
+struct isa1200_chip {
+	const struct isa1200_platform_data *pdata;
+	struct i2c_client *client;
+	struct input_dev *input_device;
+	struct pwm_device *pwm;
+	unsigned int period_ns;
+	unsigned int state;
+	struct work_struct work;
+};
+
+static void isa1200_vib_set(struct isa1200_chip *haptic, int enable)
+{
+	int rc;
+
+	if (enable) {
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+			int period_us = haptic->period_ns / NSEC_PER_USEC;
+			rc = pwm_config(haptic->pwm,
+				(period_us * haptic->pdata->duty) / 100,
+				period_us);
+			if (rc < 0)
+				pr_err("pwm_config fail\n");
+			rc = pwm_enable(haptic->pwm);
+			if (rc < 0)
+				pr_err("pwm_enable fail\n");
+		} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			rc = i2c_smbus_write_byte_data(haptic->client,
+						ISA1200_HCTRL5,
+						HCTRL5_VIB_STRT);
+			if (rc < 0)
+				pr_err("start vibration fail\n");
+		}
+	} else {
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+			pwm_disable(haptic->pwm);
+		else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			rc = i2c_smbus_write_byte_data(haptic->client,
+						ISA1200_HCTRL5,
+						HCTRL5_VIB_STOP);
+			if (rc < 0)
+				pr_err("stop vibration fail\n");
+		}
+	}
+}
+
+static int isa1200_setup(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int value, temp, rc;
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	udelay(POR_DELAY_USEC);
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+
+	value =	(haptic->pdata->smart_en << HCTRL1_SMART_ENABLE_BIT) |
+		(haptic->pdata->is_erm << HCTRL1_ERM_BIT) |
+		(haptic->pdata->ext_clk_en << HCTRL1_EXT_CLK_ENABLE_BIT);
+
+	rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL1, value);
+	if (rc < 0) {
+		pr_err("i2c write failure\n");
+		return rc;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_div;
+		if (temp < DIVIDER_128 || temp > DIVIDER_1024 ||
+					temp % DIVIDER_128) {
+			pr_err("Invalid divider\n");
+			rc = -EINVAL;
+			goto reset_hctrl1;
+		}
+		value = ((temp >> DIVIDE_SHIFTER_128) - 1);
+	} else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_freq;
+		if (temp < FREQ_22400 || temp > FREQ_172600 ||
+					temp % FREQ_22400) {
+			pr_err("Invalid frequency\n");
+			rc = -EINVAL;
+			goto reset_hctrl1;
+		}
+		value = ((temp / FREQ_22400) - 1);
+		haptic->period_ns = NSEC_PER_SEC / temp;
+	}
+	value |= (haptic->pdata->mode_ctrl << HCTRL0_MODE_CTRL_BIT) |
+		(haptic->pdata->overdrive_high << HCTRL0_OVERDRIVE_HIGH_BIT) |
+		(haptic->pdata->overdrive_en << HCTRL0_OVERDRIVE_EN_BIT) |
+		(haptic->pdata->chip_en << HCTRL0_HAP_EN);
+
+	rc = i2c_smbus_write_byte_data(client, ISA1200_HCTRL0, value);
+	if (rc < 0) {
+		pr_err("i2c write failure\n");
+		goto reset_hctrl1;
+	}
+
+	return 0;
+
+reset_hctrl1:
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				HCTRL1_RESET);
+	return rc;
+}
+
+static void isa1200_worker(struct work_struct *work)
+{
+	struct isa1200_chip *haptic;
+
+	haptic = container_of(work, struct isa1200_chip, work);
+	isa1200_vib_set(haptic, !!haptic->state);
+}
+
+static int isa1200_play_effect(struct input_dev *dev, void *data,
+				struct ff_effect *effect)
+{
+	struct isa1200_chip *haptic = input_get_drvdata(dev);
+
+	/* support basic vibration */
+	haptic->state = effect->u.rumble.strong_magnitude >> 8;
+	if (!haptic->state)
+		haptic->state = effect->u.rumble.weak_magnitude >> 9;
+
+	schedule_work(&haptic->work);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int isa1200_suspend(struct device *dev)
+{
+	struct isa1200_chip *haptic = dev_get_drvdata(dev);
+	int rc;
+
+	cancel_work_sync(&haptic->work);
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	if (haptic->pdata->power_on) {
+		rc = haptic->pdata->power_on(0);
+		if (rc) {
+			pr_err("power-down failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int isa1200_resume(struct device *dev)
+{
+	struct isa1200_chip *haptic = dev_get_drvdata(dev);
+	int rc;
+
+	if (haptic->pdata->power_on) {
+		rc = haptic->pdata->power_on(1);
+		if (rc) {
+			pr_err("power-up failed\n");
+			return rc;
+		}
+	}
+
+	isa1200_setup(haptic->client);
+	return 0;
+}
+#else
+#define isa1200_suspend		NULL
+#define isa1200_resume		NULL
+#endif
+
+static int isa1200_open(struct input_dev *dev)
+{
+	struct isa1200_chip *haptic = input_get_drvdata(dev);
+	int rc;
+
+	/* device setup */
+	if (haptic->pdata->dev_setup) {
+		rc = haptic->pdata->dev_setup(true);
+		if (rc < 0) {
+			pr_err("setup failed!\n");
+			return rc;
+		}
+	}
+
+	/* power on */
+	if (haptic->pdata->power_on) {
+		rc = haptic->pdata->power_on(true);
+		if (rc < 0) {
+			pr_err("power failed\n");
+			goto err_setup;
+		}
+	}
+
+	/* request gpio */
+	rc = gpio_is_valid(haptic->pdata->hap_en_gpio);
+	if (rc) {
+		rc = gpio_request(haptic->pdata->hap_en_gpio, "haptic_gpio");
+		if (rc) {
+			pr_err("gpio %d request failed\n",
+					haptic->pdata->hap_en_gpio);
+			goto err_power_on;
+		}
+	} else {
+		pr_err("Invalid gpio %d\n",
+					haptic->pdata->hap_en_gpio);
+		goto err_power_on;
+	}
+
+	rc = gpio_direction_output(haptic->pdata->hap_en_gpio, 0);
+	if (rc) {
+		pr_err("gpio %d set direction failed\n",
+					haptic->pdata->hap_en_gpio);
+		goto err_gpio_free;
+	}
+
+	/* setup registers */
+	rc = isa1200_setup(haptic->client);
+	if (rc < 0) {
+		pr_err("setup fail %d\n", rc);
+		goto err_gpio_free;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		haptic->pwm = pwm_request(haptic->pdata->pwm_ch_id,
+				haptic->client->driver->id_table->name);
+		if (IS_ERR(haptic->pwm)) {
+			pr_err("pwm request failed\n");
+			rc = PTR_ERR(haptic->pwm);
+			goto err_reset_hctrl0;
+		}
+	}
+
+	/* init workqeueue */
+	INIT_WORK(&haptic->work, isa1200_worker);
+	return 0;
+
+err_reset_hctrl0:
+	i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0,
+					HCTRL0_RESET);
+err_gpio_free:
+	gpio_free(haptic->pdata->hap_en_gpio);
+err_power_on:
+	if (haptic->pdata->power_on)
+		haptic->pdata->power_on(0);
+err_setup:
+	if (haptic->pdata->dev_setup)
+		haptic->pdata->dev_setup(false);
+
+	return rc;
+}
+
+static void isa1200_close(struct input_dev *dev)
+{
+	struct isa1200_chip *haptic = input_get_drvdata(dev);
+
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+		pwm_free(haptic->pwm);
+
+	gpio_free(haptic->pdata->hap_en_gpio);
+
+	/* reset hardware registers */
+	i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL0,
+				HCTRL0_RESET);
+	i2c_smbus_write_byte_data(haptic->client, ISA1200_HCTRL1,
+				HCTRL1_RESET);
+
+	if (haptic->pdata->dev_setup)
+		haptic->pdata->dev_setup(false);
+
+	/* power-off the chip */
+	if (haptic->pdata->power_on)
+		haptic->pdata->power_on(0);
+}
+
+static int __devinit isa1200_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct isa1200_chip *haptic;
+	int rc;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		pr_err("i2c is not supported\n");
+		return -EIO;
+	}
+
+	if (!client->dev.platform_data) {
+		pr_err("pdata is not avaiable\n");
+		return -EINVAL;
+	}
+
+	haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL);
+	if (!haptic) {
+		pr_err("no memory\n");
+		return -ENOMEM;
+	}
+
+	haptic->pdata = client->dev.platform_data;
+	haptic->client = client;
+
+	i2c_set_clientdata(client, haptic);
+
+	haptic->input_device = input_allocate_device();
+	if (!haptic->input_device) {
+		pr_err("input device alloc failed\n");
+		rc = -ENOMEM;
+		goto err_mem_alloc;
+	}
+
+	input_set_drvdata(haptic->input_device, haptic);
+	haptic->input_device->name = haptic->pdata->name ? :
+					"isa1200-ff-memless";
+
+	haptic->input_device->dev.parent = &client->dev;
+
+	input_set_capability(haptic->input_device, EV_FF, FF_RUMBLE);
+
+	haptic->input_device->open = isa1200_open;
+	haptic->input_device->close = isa1200_close;
+
+	rc = input_ff_create_memless(haptic->input_device, NULL,
+					isa1200_play_effect);
+	if (rc < 0) {
+		pr_err("unable to register with ff\n");
+		goto err_free_dev;
+	}
+
+	rc = input_register_device(haptic->input_device);
+	if (rc < 0) {
+		pr_err("unable to register input device\n");
+		goto err_ff_destroy;
+	}
+
+	return 0;
+
+err_ff_destroy:
+	input_ff_destroy(haptic->input_device);
+err_free_dev:
+	input_free_device(haptic->input_device);
+err_mem_alloc:
+	kfree(haptic);
+	return rc;
+}
+
+static int __devexit isa1200_remove(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+
+	input_unregister_device(haptic->input_device);
+	kfree(haptic);
+
+	return 0;
+}
+
+static const struct i2c_device_id isa1200_id_table[] = {
+	{"isa1200_1", 0},
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, isa1200_id_table);
+
+static const struct dev_pm_ops isa1200_pm_ops = {
+	.suspend = isa1200_suspend,
+	.resume = isa1200_resume,
+};
+
+static struct i2c_driver isa1200_driver = {
+	.driver = {
+		.name = "isa1200-ff-memless",
+		.owner = THIS_MODULE,
+		.pm = &isa1200_pm_ops,
+	},
+	.probe = isa1200_probe,
+	.remove = __devexit_p(isa1200_remove),
+	.id_table = isa1200_id_table,
+};
+
+static int __init isa1200_init(void)
+{
+	return i2c_add_driver(&isa1200_driver);
+}
+module_init(isa1200_init);
+
+static void __exit isa1200_exit(void)
+{
+	i2c_del_driver(&isa1200_driver);
+}
+module_exit(isa1200_exit);
+
+MODULE_DESCRIPTION("isa1200 based vibrator chip driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/input/misc/pmic8058-othc.c b/drivers/input/misc/pmic8058-othc.c
new file mode 100644
index 0000000..c6be119
--- /dev/null
+++ b/drivers/input/misc/pmic8058-othc.c
@@ -0,0 +1,1199 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/switch.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-othc.h>
+#include <linux/msm_adc.h>
+
+#define PM8058_OTHC_LOW_CURR_MASK	0xF0
+#define PM8058_OTHC_HIGH_CURR_MASK	0x0F
+#define PM8058_OTHC_EN_SIG_MASK		0x3F
+#define PM8058_OTHC_HYST_PREDIV_MASK	0xC7
+#define PM8058_OTHC_CLK_PREDIV_MASK	0xF8
+#define PM8058_OTHC_HYST_CLK_MASK	0x0F
+#define PM8058_OTHC_PERIOD_CLK_MASK	0xF0
+
+#define PM8058_OTHC_LOW_CURR_SHIFT	0x4
+#define PM8058_OTHC_EN_SIG_SHIFT	0x6
+#define PM8058_OTHC_HYST_PREDIV_SHIFT	0x3
+#define PM8058_OTHC_HYST_CLK_SHIFT	0x4
+
+#define OTHC_GPIO_MAX_LEN		25
+
+struct pm8058_othc {
+	bool othc_sw_state;
+	bool switch_reject;
+	bool othc_support_n_switch;
+	bool accessory_support;
+	bool accessories_adc_support;
+	int othc_base;
+	int othc_irq_sw;
+	int othc_irq_ir;
+	int othc_ir_state;
+	int num_accessories;
+	int curr_accessory_code;
+	int curr_accessory;
+	int video_out_gpio;
+	u32 sw_key_code;
+	u32 accessories_adc_channel;
+	int ir_gpio;
+	unsigned long switch_debounce_ms;
+	unsigned long detection_delay_ms;
+	void *adc_handle;
+	void *accessory_adc_handle;
+	spinlock_t lock;
+	struct regulator *othc_vreg;
+	struct input_dev *othc_ipd;
+	struct switch_dev othc_sdev;
+	struct pmic8058_othc_config_pdata *othc_pdata;
+	struct othc_accessory_info *accessory_info;
+	struct hrtimer timer;
+	struct othc_n_switch_config *switch_config;
+	struct pm8058_chip *pm_chip;
+	struct work_struct switch_work;
+	struct delayed_work detect_work;
+	struct delayed_work hs_work;
+};
+
+static struct pm8058_othc *config[OTHC_MICBIAS_MAX];
+
+static void hs_worker(struct work_struct *work)
+{
+	int rc;
+	struct pm8058_othc *dd =
+		container_of(work, struct pm8058_othc, hs_work.work);
+
+	rc = gpio_get_value_cansleep(dd->ir_gpio);
+	if (rc < 0) {
+		pr_err("Unable to read IR GPIO\n");
+		enable_irq(dd->othc_irq_ir);
+		return;
+	}
+
+	dd->othc_ir_state = !rc;
+	schedule_delayed_work(&dd->detect_work,
+				msecs_to_jiffies(dd->detection_delay_ms));
+}
+
+static irqreturn_t ir_gpio_irq(int irq, void *dev_id)
+{
+	unsigned long flags;
+	struct pm8058_othc *dd = dev_id;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	/* Enable the switch reject flag */
+	dd->switch_reject = true;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	/* Start the HR timer if one is not active */
+	if (hrtimer_active(&dd->timer))
+		hrtimer_cancel(&dd->timer);
+
+	hrtimer_start(&dd->timer,
+		ktime_set((dd->switch_debounce_ms / 1000),
+		(dd->switch_debounce_ms % 1000) * 1000000), HRTIMER_MODE_REL);
+
+	/* disable irq, this gets enabled in the workqueue */
+	disable_irq_nosync(dd->othc_irq_ir);
+	schedule_delayed_work(&dd->hs_work, 0);
+
+	return IRQ_HANDLED;
+}
+/*
+ * The API pm8058_micbias_enable() allows to configure
+ * the MIC_BIAS. Only the lines which are not used for
+ * headset detection can be configured using this API.
+ * The API returns an error code if it fails to configure
+ * the specified MIC_BIAS line, else it returns 0.
+ */
+int pm8058_micbias_enable(enum othc_micbias micbias,
+		enum othc_micbias_enable enable)
+{
+	int rc;
+	u8 reg;
+	struct pm8058_othc *dd = config[micbias];
+
+	if (dd == NULL) {
+		pr_err("MIC_BIAS not registered, cannot enable\n");
+		return -ENODEV;
+	}
+
+	if (dd->othc_pdata->micbias_capability != OTHC_MICBIAS) {
+		pr_err("MIC_BIAS enable capability not supported\n");
+		return -EINVAL;
+	}
+
+	rc = pm8058_read(dd->pm_chip, dd->othc_base + 1, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	reg &= PM8058_OTHC_EN_SIG_MASK;
+	reg |= (enable << PM8058_OTHC_EN_SIG_SHIFT);
+
+	rc = pm8058_write(dd->pm_chip, dd->othc_base + 1, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 write failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_micbias_enable);
+
+int pm8058_othc_svideo_enable(enum othc_micbias micbias, bool enable)
+{
+	struct pm8058_othc *dd = config[micbias];
+
+	if (dd == NULL) {
+		pr_err("MIC_BIAS not registered, cannot enable\n");
+		return -ENODEV;
+	}
+
+	if (dd->othc_pdata->micbias_capability != OTHC_MICBIAS_HSED) {
+		pr_err("MIC_BIAS enable capability not supported\n");
+		return -EINVAL;
+	}
+
+	if (dd->accessories_adc_support) {
+		/* GPIO state for MIC_IN = 0, SVIDEO = 1 */
+		gpio_set_value_cansleep(dd->video_out_gpio, !!enable);
+		if (enable) {
+			pr_debug("Enable the video path\n");
+			switch_set_state(&dd->othc_sdev, dd->curr_accessory);
+			input_report_switch(dd->othc_ipd,
+						dd->curr_accessory_code, 1);
+			input_sync(dd->othc_ipd);
+		} else {
+			pr_debug("Disable the video path\n");
+			switch_set_state(&dd->othc_sdev, 0);
+			input_report_switch(dd->othc_ipd,
+					dd->curr_accessory_code, 0);
+			input_sync(dd->othc_ipd);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_othc_svideo_enable);
+
+#ifdef CONFIG_PM
+static int pm8058_othc_suspend(struct device *dev)
+{
+	int rc = 0;
+	struct pm8058_othc *dd = dev_get_drvdata(dev);
+
+	if (dd->othc_pdata->micbias_capability == OTHC_MICBIAS_HSED) {
+		if (device_may_wakeup(dev)) {
+			enable_irq_wake(dd->othc_irq_sw);
+			enable_irq_wake(dd->othc_irq_ir);
+		}
+	}
+
+	if (!device_may_wakeup(dev)) {
+		rc = regulator_disable(dd->othc_vreg);
+		if (rc)
+			pr_err("othc micbais power off failed\n");
+	}
+
+	return rc;
+}
+
+static int pm8058_othc_resume(struct device *dev)
+{
+	int rc = 0;
+	struct pm8058_othc *dd = dev_get_drvdata(dev);
+
+	if (dd->othc_pdata->micbias_capability == OTHC_MICBIAS_HSED) {
+		if (device_may_wakeup(dev)) {
+			disable_irq_wake(dd->othc_irq_sw);
+			disable_irq_wake(dd->othc_irq_ir);
+		}
+	}
+
+	if (!device_may_wakeup(dev)) {
+		rc = regulator_enable(dd->othc_vreg);
+		if (rc)
+			pr_err("othc micbais power on failed\n");
+	}
+
+	return rc;
+}
+
+static struct dev_pm_ops pm8058_othc_pm_ops = {
+	.suspend = pm8058_othc_suspend,
+	.resume = pm8058_othc_resume,
+};
+#endif
+
+static int __devexit pm8058_othc_remove(struct platform_device *pd)
+{
+	struct pm8058_othc *dd = platform_get_drvdata(pd);
+
+	pm_runtime_set_suspended(&pd->dev);
+	pm_runtime_disable(&pd->dev);
+
+	if (dd->othc_pdata->micbias_capability == OTHC_MICBIAS_HSED) {
+		device_init_wakeup(&pd->dev, 0);
+		if (dd->othc_support_n_switch == true) {
+			adc_channel_close(dd->adc_handle);
+			cancel_work_sync(&dd->switch_work);
+		}
+
+		if (dd->accessory_support == true) {
+			int i;
+			for (i = 0; i < dd->num_accessories; i++) {
+				if (dd->accessory_info[i].detect_flags &
+							OTHC_GPIO_DETECT)
+					gpio_free(dd->accessory_info[i].gpio);
+			}
+		}
+		cancel_delayed_work_sync(&dd->detect_work);
+		cancel_delayed_work_sync(&dd->hs_work);
+		free_irq(dd->othc_irq_sw, dd);
+		free_irq(dd->othc_irq_ir, dd);
+		if (dd->ir_gpio != -1)
+			gpio_free(dd->ir_gpio);
+		input_unregister_device(dd->othc_ipd);
+	}
+	regulator_disable(dd->othc_vreg);
+	regulator_put(dd->othc_vreg);
+
+	kfree(dd);
+
+	return 0;
+}
+
+static enum hrtimer_restart pm8058_othc_timer(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct pm8058_othc *dd = container_of(timer,
+					struct pm8058_othc, timer);
+
+	spin_lock_irqsave(&dd->lock, flags);
+	dd->switch_reject = false;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+
+static void othc_report_switch(struct pm8058_othc *dd, u32 res)
+{
+	u8 i;
+	struct othc_switch_info *sw_info = dd->switch_config->switch_info;
+
+	for (i = 0; i < dd->switch_config->num_keys; i++) {
+		if (res >= sw_info[i].min_adc_threshold &&
+				res <= sw_info[i].max_adc_threshold) {
+			dd->othc_sw_state = true;
+			dd->sw_key_code = sw_info[i].key_code;
+			input_report_key(dd->othc_ipd, sw_info[i].key_code, 1);
+			input_sync(dd->othc_ipd);
+			return;
+		}
+	}
+
+	/*
+	 * If the switch is not present in a specified ADC range
+	 * report a default switch press.
+	 */
+	if (dd->switch_config->default_sw_en) {
+		dd->othc_sw_state = true;
+		dd->sw_key_code =
+			sw_info[dd->switch_config->default_sw_idx].key_code;
+		input_report_key(dd->othc_ipd, dd->sw_key_code, 1);
+		input_sync(dd->othc_ipd);
+	}
+}
+
+static void switch_work_f(struct work_struct *work)
+{
+	int rc, i;
+	u32 res = 0;
+	struct adc_chan_result adc_result;
+	struct pm8058_othc *dd =
+		container_of(work, struct pm8058_othc, switch_work);
+	DECLARE_COMPLETION_ONSTACK(adc_wait);
+	u8 num_adc_samples = dd->switch_config->num_adc_samples;
+
+	/* sleep for settling time */
+	msleep(dd->switch_config->voltage_settling_time_ms);
+
+	for (i = 0; i < num_adc_samples; i++) {
+		rc = adc_channel_request_conv(dd->adc_handle, &adc_wait);
+		if (rc) {
+			pr_err("adc_channel_request_conv failed\n");
+			goto bail_out;
+		}
+		rc = wait_for_completion_interruptible(&adc_wait);
+		if (rc) {
+			pr_err("wait_for_completion_interruptible failed\n");
+			goto bail_out;
+		}
+		rc = adc_channel_read_result(dd->adc_handle, &adc_result);
+		if (rc) {
+			pr_err("adc_channel_read_result failed\n");
+			goto bail_out;
+		}
+		res += adc_result.physical;
+	}
+bail_out:
+	if (i == num_adc_samples && num_adc_samples != 0) {
+		res /= num_adc_samples;
+		othc_report_switch(dd, res);
+	} else
+		pr_err("Insufficient ADC samples\n");
+
+	enable_irq(dd->othc_irq_sw);
+}
+
+static int accessory_adc_detect(struct pm8058_othc *dd, int accessory)
+{
+	int rc;
+	u32 res;
+	struct adc_chan_result accessory_adc_result;
+	DECLARE_COMPLETION_ONSTACK(accessory_adc_wait);
+
+	rc = adc_channel_request_conv(dd->accessory_adc_handle,
+						&accessory_adc_wait);
+	if (rc) {
+		pr_err("adc_channel_request_conv failed\n");
+		goto adc_failed;
+	}
+	rc = wait_for_completion_interruptible(&accessory_adc_wait);
+	if (rc) {
+		pr_err("wait_for_completion_interruptible failed\n");
+		goto adc_failed;
+	}
+	rc = adc_channel_read_result(dd->accessory_adc_handle,
+						&accessory_adc_result);
+	if (rc) {
+		pr_err("adc_channel_read_result failed\n");
+		goto adc_failed;
+	}
+
+	res = accessory_adc_result.physical;
+
+	if (res >= dd->accessory_info[accessory].adc_thres.min_threshold &&
+		res <= dd->accessory_info[accessory].adc_thres.max_threshold) {
+		pr_debug("Accessory on ADC detected!, ADC Value = %u\n", res);
+		return 1;
+	}
+
+adc_failed:
+	return 0;
+}
+
+
+static int pm8058_accessory_report(struct pm8058_othc *dd, int status)
+{
+	int i, rc, detected = 0;
+	u8 micbias_status, switch_status;
+
+	if (dd->accessory_support == false) {
+		/* Report default headset */
+		switch_set_state(&dd->othc_sdev, !!status);
+		input_report_switch(dd->othc_ipd, SW_HEADPHONE_INSERT,
+							!!status);
+		input_sync(dd->othc_ipd);
+		return 0;
+	}
+
+	/* For accessory */
+	if (dd->accessory_support == true && status == 0) {
+		/* Report removal of the accessory. */
+
+		/*
+		 * If the current accessory is video cable, reject the removal
+		 * interrupt.
+		 */
+		pr_info("Accessory [%d] removed\n", dd->curr_accessory);
+		if (dd->curr_accessory == OTHC_SVIDEO_OUT)
+			return 0;
+
+		switch_set_state(&dd->othc_sdev, 0);
+		input_report_switch(dd->othc_ipd, dd->curr_accessory_code, 0);
+		input_sync(dd->othc_ipd);
+		return 0;
+	}
+
+	if (dd->ir_gpio < 0) {
+		/* Check the MIC_BIAS status */
+		rc = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_ir);
+		if (rc < 0) {
+			pr_err("Unable to read IR status from PMIC\n");
+			goto fail_ir_accessory;
+		}
+		micbias_status = !!rc;
+	} else {
+		rc = gpio_get_value_cansleep(dd->ir_gpio);
+		if (rc < 0) {
+			pr_err("Unable to read IR status from GPIO\n");
+			goto fail_ir_accessory;
+		}
+		micbias_status = !rc;
+	}
+
+	/* Check the switch status */
+	rc = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_sw);
+	if (rc < 0) {
+		pr_err("Unable to read SWITCH status\n");
+		goto fail_ir_accessory;
+	}
+	switch_status = !!rc;
+
+	/* Loop through to check which accessory is connected */
+	for (i = 0; i < dd->num_accessories; i++) {
+		detected = 0;
+		if (dd->accessory_info[i].enabled == false)
+			continue;
+
+		if (dd->accessory_info[i].detect_flags & OTHC_MICBIAS_DETECT) {
+			if (micbias_status)
+				detected = 1;
+			else
+				continue;
+		}
+		if (dd->accessory_info[i].detect_flags & OTHC_SWITCH_DETECT) {
+			if (switch_status)
+				detected = 1;
+			else
+				continue;
+		}
+		if (dd->accessory_info[i].detect_flags & OTHC_GPIO_DETECT) {
+			rc = gpio_get_value_cansleep(
+						dd->accessory_info[i].gpio);
+			if (rc < 0)
+				continue;
+
+			if (rc ^ dd->accessory_info[i].active_low)
+				detected = 1;
+			else
+				continue;
+		}
+		if (dd->accessory_info[i].detect_flags & OTHC_ADC_DETECT)
+			detected = accessory_adc_detect(dd, i);
+
+		if (detected)
+			break;
+	}
+
+	if (detected) {
+		dd->curr_accessory = dd->accessory_info[i].accessory;
+		dd->curr_accessory_code = dd->accessory_info[i].key_code;
+
+		/* if Video out cable detected enable the video path*/
+		if (dd->curr_accessory == OTHC_SVIDEO_OUT) {
+			pm8058_othc_svideo_enable(
+					dd->othc_pdata->micbias_select, true);
+
+		} else {
+			switch_set_state(&dd->othc_sdev, dd->curr_accessory);
+			input_report_switch(dd->othc_ipd,
+						dd->curr_accessory_code, 1);
+			input_sync(dd->othc_ipd);
+		}
+		pr_info("Accessory [%d] inserted\n", dd->curr_accessory);
+	} else
+		pr_info("Unable to detect accessory. False interrupt!\n");
+
+	return 0;
+
+fail_ir_accessory:
+	return rc;
+}
+
+static void detect_work_f(struct work_struct *work)
+{
+	int rc;
+	struct pm8058_othc *dd =
+		container_of(work, struct pm8058_othc, detect_work.work);
+
+	if (dd->othc_ir_state) {
+		/* inserted */
+		rc = pm8058_accessory_report(dd, 1);
+		if (rc)
+			pr_err("Accessory could not be detected\n");
+	} else {
+		/* removed */
+		rc = pm8058_accessory_report(dd, 0);
+		if (rc)
+			pr_err("Accessory could not be detected\n");
+		/* Clear existing switch state */
+		dd->othc_sw_state = false;
+	}
+	enable_irq(dd->othc_irq_ir);
+}
+
+/*
+ * The pm8058_no_sw detects the switch press and release operation.
+ * The odd number call is press and even number call is release.
+ * The current state of the button is maintained in othc_sw_state variable.
+ * This isr gets called only for NO type headsets.
+ */
+static irqreturn_t pm8058_no_sw(int irq, void *dev_id)
+{
+	int level;
+	struct pm8058_othc *dd = dev_id;
+	unsigned long flags;
+
+	/* Check if headset has been inserted, else return */
+	if (!dd->othc_ir_state)
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (dd->switch_reject == true) {
+		pr_debug("Rejected switch interrupt\n");
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return IRQ_HANDLED;
+	}
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	level = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_sw);
+	if (level < 0) {
+		pr_err("Unable to read IRQ status register\n");
+		return IRQ_HANDLED;
+	}
+
+	if (dd->othc_support_n_switch == true) {
+		if (level == 0) {
+			dd->othc_sw_state = false;
+			input_report_key(dd->othc_ipd, dd->sw_key_code, 0);
+			input_sync(dd->othc_ipd);
+		} else {
+			disable_irq_nosync(dd->othc_irq_sw);
+			schedule_work(&dd->switch_work);
+		}
+		return IRQ_HANDLED;
+	}
+	/*
+	 * It is necessary to check the software state and the hardware state
+	 * to make sure that the residual interrupt after the debounce time does
+	 * not disturb the software state machine.
+	 */
+	if (level == 1 && dd->othc_sw_state == false) {
+		/*  Switch has been pressed */
+		dd->othc_sw_state = true;
+		input_report_key(dd->othc_ipd, KEY_MEDIA, 1);
+	} else if (level == 0 && dd->othc_sw_state == true) {
+		/* Switch has been released */
+		dd->othc_sw_state = false;
+		input_report_key(dd->othc_ipd, KEY_MEDIA, 0);
+	}
+	input_sync(dd->othc_ipd);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * The pm8058_nc_ir detects insert / remove of the headset (for NO),
+ * The current state of the headset is maintained in othc_ir_state variable.
+ * Due to a hardware bug, false switch interrupts are seen during headset
+ * insert. This is handled in the software by rejecting the switch interrupts
+ * for a small period of time after the headset has been inserted.
+ */
+static irqreturn_t pm8058_nc_ir(int irq, void *dev_id)
+{
+	unsigned long flags, rc;
+	struct pm8058_othc *dd = dev_id;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	/* Enable the switch reject flag */
+	dd->switch_reject = true;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	/* Start the HR timer if one is not active */
+	if (hrtimer_active(&dd->timer))
+		hrtimer_cancel(&dd->timer);
+
+	hrtimer_start(&dd->timer,
+		ktime_set((dd->switch_debounce_ms / 1000),
+		(dd->switch_debounce_ms % 1000) * 1000000), HRTIMER_MODE_REL);
+
+	/* disable irq, this gets enabled in the workqueue */
+	disable_irq_nosync(dd->othc_irq_ir);
+
+	/* Check the MIC_BIAS status, to check if inserted or removed */
+	rc = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_ir);
+	if (rc < 0) {
+		pr_err("Unable to read IR status\n");
+		goto fail_ir;
+	}
+
+	dd->othc_ir_state = rc;
+	schedule_delayed_work(&dd->detect_work,
+				msecs_to_jiffies(dd->detection_delay_ms));
+
+fail_ir:
+	return IRQ_HANDLED;
+}
+
+static int pm8058_configure_micbias(struct pm8058_othc *dd)
+{
+	int rc;
+	u8 reg, value;
+	u32 value1;
+	u16 base_addr = dd->othc_base;
+	struct hsed_bias_config *hsed_config =
+			dd->othc_pdata->hsed_config->hsed_bias_config;
+
+	/* Intialize the OTHC module */
+	/* Control Register 1*/
+	rc = pm8058_read(dd->pm_chip, base_addr, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	/* set iDAC high current threshold */
+	value = (hsed_config->othc_highcurr_thresh_uA / 100) - 2;
+	reg =  (reg & PM8058_OTHC_HIGH_CURR_MASK) | value;
+
+	rc = pm8058_write(dd->pm_chip, base_addr, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	/* Control register 2*/
+	rc = pm8058_read(dd->pm_chip, base_addr + 1, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	value = dd->othc_pdata->micbias_enable;
+	reg &= PM8058_OTHC_EN_SIG_MASK;
+	reg |= (value << PM8058_OTHC_EN_SIG_SHIFT);
+
+	value = 0;
+	value1 = (hsed_config->othc_hyst_prediv_us << 10) / USEC_PER_SEC;
+	while (value1 != 0) {
+		value1 = value1 >> 1;
+		value++;
+	}
+	if (value > 7) {
+		pr_err("Invalid input argument - othc_hyst_prediv_us\n");
+		return -EINVAL;
+	}
+	reg &= PM8058_OTHC_HYST_PREDIV_MASK;
+	reg |= (value << PM8058_OTHC_HYST_PREDIV_SHIFT);
+
+	value = 0;
+	value1 = (hsed_config->othc_period_clkdiv_us << 10) / USEC_PER_SEC;
+	while (value1 != 1) {
+		value1 = value1 >> 1;
+		value++;
+	}
+	if (value > 8) {
+		pr_err("Invalid input argument - othc_period_clkdiv_us\n");
+		return -EINVAL;
+	}
+	reg = (reg &  PM8058_OTHC_CLK_PREDIV_MASK) | (value - 1);
+
+	rc = pm8058_write(dd->pm_chip, base_addr + 1, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	/* Control register 3 */
+	rc = pm8058_read(dd->pm_chip, base_addr + 2 , &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	value = hsed_config->othc_hyst_clk_us /
+					hsed_config->othc_hyst_prediv_us;
+	if (value > 15) {
+		pr_err("Invalid input argument - othc_hyst_prediv_us\n");
+		return -EINVAL;
+	}
+	reg &= PM8058_OTHC_HYST_CLK_MASK;
+	reg |= value << PM8058_OTHC_HYST_CLK_SHIFT;
+
+	value = hsed_config->othc_period_clk_us /
+					hsed_config->othc_period_clkdiv_us;
+	if (value > 15) {
+		pr_err("Invalid input argument - othc_hyst_prediv_us\n");
+		return -EINVAL;
+	}
+	reg = (reg & PM8058_OTHC_PERIOD_CLK_MASK) | value;
+
+	rc = pm8058_write(dd->pm_chip, base_addr + 2, &reg, 1);
+	if (rc < 0) {
+		pr_err("PM8058 read failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static ssize_t othc_headset_print_name(struct switch_dev *sdev, char *buf)
+{
+	switch (switch_get_state(sdev)) {
+	case OTHC_NO_DEVICE:
+		return sprintf(buf, "No Device\n");
+	case OTHC_HEADSET:
+	case OTHC_HEADPHONE:
+	case OTHC_MICROPHONE:
+	case OTHC_ANC_HEADSET:
+	case OTHC_ANC_HEADPHONE:
+	case OTHC_ANC_MICROPHONE:
+		return sprintf(buf, "Headset\n");
+	}
+	return -EINVAL;
+}
+
+static int pm8058_configure_switch(struct pm8058_othc *dd)
+{
+	int rc, i;
+
+	if (dd->othc_support_n_switch == true) {
+		/* n-switch support */
+		rc = adc_channel_open(dd->switch_config->adc_channel,
+							&dd->adc_handle);
+		if (rc) {
+			pr_err("Unable to open ADC channel\n");
+			return -ENODEV;
+		}
+
+		for (i = 0; i < dd->switch_config->num_keys; i++) {
+			input_set_capability(dd->othc_ipd, EV_KEY,
+				dd->switch_config->switch_info[i].key_code);
+		}
+	} else /* Only single switch supported */
+		input_set_capability(dd->othc_ipd, EV_KEY, KEY_MEDIA);
+
+	return 0;
+}
+
+static int
+pm8058_configure_accessory(struct pm8058_othc *dd)
+{
+	int i, rc;
+	char name[OTHC_GPIO_MAX_LEN];
+
+	/*
+	 * Not bailing out if the gpio_* configure calls fail. This is required
+	 * as multiple accessories are detected by the same gpio.
+	 */
+	for (i = 0; i < dd->num_accessories; i++) {
+		if (dd->accessory_info[i].enabled == false)
+			continue;
+		if (dd->accessory_info[i].detect_flags & OTHC_GPIO_DETECT) {
+			snprintf(name, OTHC_GPIO_MAX_LEN, "%s%d",
+							"othc_acc_gpio_", i);
+			rc = gpio_request(dd->accessory_info[i].gpio, name);
+			if (rc) {
+				pr_debug("Unable to request GPIO [%d]\n",
+						dd->accessory_info[i].gpio);
+				continue;
+			}
+			rc = gpio_direction_input(dd->accessory_info[i].gpio);
+			if (rc) {
+				pr_debug("Unable to set-direction GPIO [%d]\n",
+						dd->accessory_info[i].gpio);
+				gpio_free(dd->accessory_info[i].gpio);
+				continue;
+			}
+		}
+		input_set_capability(dd->othc_ipd, EV_SW,
+					dd->accessory_info[i].key_code);
+	}
+
+	if (dd->accessories_adc_support) {
+		/*
+		 * Check if 3 switch is supported. If both are using the same
+		 * ADC channel, the same handle can be used.
+		 */
+		if (dd->othc_support_n_switch) {
+			if (dd->adc_handle != NULL &&
+				(dd->accessories_adc_channel ==
+				 dd->switch_config->adc_channel))
+				dd->accessory_adc_handle = dd->adc_handle;
+		} else {
+			rc = adc_channel_open(dd->accessories_adc_channel,
+						&dd->accessory_adc_handle);
+			if (rc) {
+				pr_err("Unable to open ADC channel\n");
+				rc = -ENODEV;
+				goto accessory_adc_fail;
+			}
+		}
+		if (dd->video_out_gpio != 0) {
+			rc = gpio_request(dd->video_out_gpio, "vout_enable");
+			if (rc < 0) {
+				pr_err("request VOUT gpio failed (%d)\n", rc);
+				goto accessory_adc_fail;
+			}
+			rc = gpio_direction_output(dd->video_out_gpio, 0);
+			if (rc < 0) {
+				pr_err("direction_out failed (%d)\n", rc);
+				goto accessory_adc_fail;
+			}
+		}
+
+	}
+
+	return 0;
+
+accessory_adc_fail:
+	for (i = 0; i < dd->num_accessories; i++) {
+		if (dd->accessory_info[i].enabled == false)
+			continue;
+		gpio_free(dd->accessory_info[i].gpio);
+	}
+	return rc;
+}
+
+static int
+othc_configure_hsed(struct pm8058_othc *dd, struct platform_device *pd)
+{
+	int rc;
+	struct input_dev *ipd;
+	struct pmic8058_othc_config_pdata *pdata = pd->dev.platform_data;
+	struct othc_hsed_config *hsed_config = pdata->hsed_config;
+
+	dd->othc_sdev.name = "h2w";
+	dd->othc_sdev.print_name = othc_headset_print_name;
+
+	rc = switch_dev_register(&dd->othc_sdev);
+	if (rc) {
+		pr_err("Unable to register switch device\n");
+		return rc;
+	}
+
+	ipd = input_allocate_device();
+	if (ipd == NULL) {
+		pr_err("Unable to allocate memory\n");
+		rc = -ENOMEM;
+		goto fail_input_alloc;
+	}
+
+	/* Get the IRQ for Headset Insert-remove and Switch-press */
+	dd->othc_irq_sw = platform_get_irq(pd, 0);
+	dd->othc_irq_ir = platform_get_irq(pd, 1);
+	if (dd->othc_irq_ir < 0 || dd->othc_irq_sw < 0) {
+		pr_err("othc resource:IRQs absent\n");
+		rc = -ENXIO;
+		goto fail_micbias_config;
+	}
+
+	if (pdata->hsed_name != NULL)
+		ipd->name = pdata->hsed_name;
+	else
+		ipd->name = "pmic8058_othc";
+
+	ipd->phys = "pmic8058_othc/input0";
+	ipd->dev.parent = &pd->dev;
+
+	dd->othc_ipd = ipd;
+	dd->ir_gpio = hsed_config->ir_gpio;
+	dd->othc_sw_state = false;
+	dd->switch_debounce_ms = hsed_config->switch_debounce_ms;
+	dd->othc_support_n_switch = hsed_config->othc_support_n_switch;
+	dd->accessory_support = pdata->hsed_config->accessories_support;
+	dd->detection_delay_ms = pdata->hsed_config->detection_delay_ms;
+
+	if (dd->othc_support_n_switch == true)
+		dd->switch_config = hsed_config->switch_config;
+
+	if (dd->accessory_support == true) {
+		dd->accessory_info = pdata->hsed_config->accessories;
+		dd->num_accessories = pdata->hsed_config->othc_num_accessories;
+		dd->accessories_adc_support =
+				pdata->hsed_config->accessories_adc_support;
+		dd->accessories_adc_channel =
+				pdata->hsed_config->accessories_adc_channel;
+		dd->video_out_gpio = pdata->hsed_config->video_out_gpio;
+	}
+
+	/* Configure the MIC_BIAS line for headset detection */
+	rc = pm8058_configure_micbias(dd);
+	if (rc < 0)
+		goto fail_micbias_config;
+
+	/* Configure for the switch events */
+	rc = pm8058_configure_switch(dd);
+	if (rc < 0)
+		goto fail_micbias_config;
+
+	/* Configure the accessory */
+	if (dd->accessory_support == true) {
+		rc = pm8058_configure_accessory(dd);
+		if (rc < 0)
+			goto fail_micbias_config;
+	}
+
+	input_set_drvdata(ipd, dd);
+	spin_lock_init(&dd->lock);
+
+	rc = input_register_device(ipd);
+	if (rc) {
+		pr_err("Unable to register OTHC device\n");
+		goto fail_micbias_config;
+	}
+
+	hrtimer_init(&dd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	dd->timer.function = pm8058_othc_timer;
+
+	/* Request the HEADSET IR interrupt */
+	if (dd->ir_gpio < 0) {
+		rc = request_threaded_irq(dd->othc_irq_ir, NULL, pm8058_nc_ir,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+					"pm8058_othc_ir", dd);
+		if (rc < 0) {
+			pr_err("Unable to request pm8058_othc_ir IRQ\n");
+			goto fail_ir_irq;
+		}
+	} else {
+		rc = gpio_request(dd->ir_gpio, "othc_ir_gpio");
+		if (rc) {
+			pr_err("Unable to request IR GPIO\n");
+			goto fail_ir_gpio_req;
+		}
+		rc = gpio_direction_input(dd->ir_gpio);
+		if (rc) {
+			pr_err("GPIO %d set_direction failed\n", dd->ir_gpio);
+			goto fail_ir_irq;
+		}
+		dd->othc_irq_ir = gpio_to_irq(dd->ir_gpio);
+		rc = request_any_context_irq(dd->othc_irq_ir, ir_gpio_irq,
+		IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				"othc_gpio_ir_irq", dd);
+		if (rc < 0) {
+			pr_err("could not request hs irq err=%d\n", rc);
+			goto fail_ir_irq;
+		}
+	}
+	/* Request the  SWITCH press/release interrupt */
+	rc = request_threaded_irq(dd->othc_irq_sw, NULL, pm8058_no_sw,
+	IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+			"pm8058_othc_sw", dd);
+	if (rc < 0) {
+		pr_err("Unable to request pm8058_othc_sw IRQ\n");
+		goto fail_sw_irq;
+	}
+
+	/* Check if the accessory is already inserted during boot up */
+	if (dd->ir_gpio < 0) {
+		rc = pm8058_irq_get_rt_status(dd->pm_chip, dd->othc_irq_ir);
+		if (rc < 0) {
+			pr_err("Unable to get accessory status at boot\n");
+			goto fail_ir_status;
+		}
+	} else {
+		rc = gpio_get_value_cansleep(dd->ir_gpio);
+		if (rc < 0) {
+			pr_err("Unable to get accessory status at boot\n");
+			goto fail_ir_status;
+		}
+		rc = !rc;
+	}
+	if (rc) {
+		pr_debug("Accessory inserted during boot up\n");
+		/* process the data and report the inserted accessory */
+		rc = pm8058_accessory_report(dd, 1);
+		if (rc)
+			pr_debug("Unabele to detect accessory at boot up\n");
+	}
+
+	device_init_wakeup(&pd->dev,
+			hsed_config->hsed_bias_config->othc_wakeup);
+
+	INIT_DELAYED_WORK(&dd->detect_work, detect_work_f);
+
+	INIT_DELAYED_WORK(&dd->hs_work, hs_worker);
+
+	if (dd->othc_support_n_switch == true)
+		INIT_WORK(&dd->switch_work, switch_work_f);
+
+
+	return 0;
+
+fail_ir_status:
+	free_irq(dd->othc_irq_sw, dd);
+fail_sw_irq:
+	free_irq(dd->othc_irq_ir, dd);
+fail_ir_irq:
+	if (dd->ir_gpio != -1)
+		gpio_free(dd->ir_gpio);
+fail_ir_gpio_req:
+	input_unregister_device(ipd);
+	dd->othc_ipd = NULL;
+fail_micbias_config:
+	input_free_device(ipd);
+fail_input_alloc:
+	switch_dev_unregister(&dd->othc_sdev);
+	return rc;
+}
+
+static int __devinit pm8058_othc_probe(struct platform_device *pd)
+{
+	int rc;
+	struct pm8058_othc *dd;
+	struct pm8058_chip *chip;
+	struct resource *res;
+	struct pmic8058_othc_config_pdata *pdata = pd->dev.platform_data;
+
+	chip = dev_get_drvdata(pd->dev.parent);
+	if (chip == NULL) {
+		pr_err("Invalid driver information\n");
+		return  -EINVAL;
+	}
+
+	/* Check PMIC8058 version. A0 version is not supported */
+	if (pm8058_rev(chip) == PM_8058_REV_1p0) {
+		pr_err("PMIC8058 version not supported\n");
+		return -ENODEV;
+	}
+
+	if (pdata == NULL) {
+		pr_err("Platform data not present\n");
+		return -EINVAL;
+	}
+
+	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+	if (dd == NULL) {
+		pr_err("Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&pd->dev);
+	if (rc < 0)
+		dev_dbg(&pd->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pd->dev);
+
+	res = platform_get_resource_byname(pd, IORESOURCE_IO, "othc_base");
+	if (res == NULL) {
+		pr_err("othc resource:Base address absent\n");
+		rc = -ENXIO;
+		goto fail_get_res;
+	}
+
+	dd->othc_pdata = pdata;
+	dd->pm_chip = chip;
+	dd->othc_base = res->start;
+	if (pdata->micbias_regulator == NULL) {
+		pr_err("OTHC regulator not specified\n");
+		goto fail_get_res;
+	}
+
+	dd->othc_vreg = regulator_get(NULL,
+				pdata->micbias_regulator->regulator);
+	if (IS_ERR(dd->othc_vreg)) {
+		pr_err("regulator get failed\n");
+		rc = PTR_ERR(dd->othc_vreg);
+		goto fail_get_res;
+	}
+
+	rc = regulator_set_voltage(dd->othc_vreg,
+				pdata->micbias_regulator->min_uV,
+				pdata->micbias_regulator->max_uV);
+	if (rc) {
+		pr_err("othc regulator set voltage failed\n");
+		goto fail_reg_enable;
+	}
+
+	rc = regulator_enable(dd->othc_vreg);
+	if (rc) {
+		pr_err("othc regulator enable failed\n");
+		goto fail_reg_enable;
+	}
+
+	platform_set_drvdata(pd, dd);
+
+	if (pdata->micbias_capability == OTHC_MICBIAS_HSED) {
+		/* HSED to be supported on this MICBIAS line */
+		if (pdata->hsed_config != NULL) {
+			rc = othc_configure_hsed(dd, pd);
+			if (rc < 0)
+				goto fail_othc_hsed;
+		} else {
+			pr_err("HSED config data not present\n");
+			rc = -EINVAL;
+			goto fail_othc_hsed;
+		}
+	}
+
+	/* Store the local driver data structure */
+	if (dd->othc_pdata->micbias_select < OTHC_MICBIAS_MAX)
+		config[dd->othc_pdata->micbias_select] = dd;
+
+	pr_debug("Device %s:%d successfully registered\n",
+			pd->name, pd->id);
+	return 0;
+
+fail_othc_hsed:
+	regulator_disable(dd->othc_vreg);
+fail_reg_enable:
+	regulator_put(dd->othc_vreg);
+fail_get_res:
+	pm_runtime_set_suspended(&pd->dev);
+	pm_runtime_disable(&pd->dev);
+
+	kfree(dd);
+	return rc;
+}
+
+static struct platform_driver pm8058_othc_driver = {
+	.driver = {
+		.name = "pm8058-othc",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &pm8058_othc_pm_ops,
+#endif
+	},
+	.probe = pm8058_othc_probe,
+	.remove = __devexit_p(pm8058_othc_remove),
+};
+
+static int __init pm8058_othc_init(void)
+{
+	return platform_driver_register(&pm8058_othc_driver);
+}
+
+static void __exit pm8058_othc_exit(void)
+{
+	platform_driver_unregister(&pm8058_othc_driver);
+}
+/*
+ * Move to late_initcall, to make sure that the ADC driver registration is
+ * completed before we open a ADC channel.
+ */
+late_initcall(pm8058_othc_init);
+module_exit(pm8058_othc_exit);
+
+MODULE_ALIAS("platform:pmic8058_othc");
+MODULE_DESCRIPTION("PMIC 8058 OTHC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/pmic8058-pwrkey.c b/drivers/input/misc/pmic8058-pwrkey.c
new file mode 100644
index 0000000..a981013
--- /dev/null
+++ b/drivers/input/misc/pmic8058-pwrkey.c
@@ -0,0 +1,375 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwrkey.h>
+#include <linux/log2.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#define PON_CNTL_1	0x1C
+#define PON_CNTL_PULL_UP BIT(7)
+#define PON_CNTL_TRIG_DELAY_MASK (0x7)
+
+struct pmic8058_pwrkey {
+	struct input_dev *pwr;
+	int key_press_irq;
+	int key_release_irq;
+	struct pm8058_chip	*pm_chip;
+	struct hrtimer timer;
+	bool key_pressed;
+	bool pressed_first;
+	struct pmic8058_pwrkey_pdata *pdata;
+	spinlock_t lock;
+};
+
+static enum hrtimer_restart pmic8058_pwrkey_timer(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct pmic8058_pwrkey *pwrkey = container_of(timer,
+						struct pmic8058_pwrkey,	timer);
+
+	spin_lock_irqsave(&pwrkey->lock, flags);
+	pwrkey->key_pressed = true;
+
+	input_report_key(pwrkey->pwr, KEY_POWER, 1);
+	input_sync(pwrkey->pwr);
+	spin_unlock_irqrestore(&pwrkey->lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
+{
+	struct pmic8058_pwrkey *pwrkey = _pwrkey;
+	struct pmic8058_pwrkey_pdata *pdata = pwrkey->pdata;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pwrkey->lock, flags);
+	if (pwrkey->pressed_first) {
+		/*
+		 * If pressed_first flag is set already then release interrupt
+		 * has occured first. Events are handled in the release IRQ so
+		 * return.
+		 */
+		pwrkey->pressed_first = false;
+		spin_unlock_irqrestore(&pwrkey->lock, flags);
+		return IRQ_HANDLED;
+	} else {
+		pwrkey->pressed_first = true;
+		/*no pwrkey time duration, means no end key simulation*/
+		if (!pwrkey->pdata->pwrkey_time_ms) {
+			input_report_key(pwrkey->pwr, KEY_POWER, 1);
+			input_sync(pwrkey->pwr);
+			spin_unlock_irqrestore(&pwrkey->lock, flags);
+			return IRQ_HANDLED;
+		}
+
+		input_report_key(pwrkey->pwr, KEY_END, 1);
+		input_sync(pwrkey->pwr);
+
+		hrtimer_start(&pwrkey->timer,
+				ktime_set(pdata->pwrkey_time_ms / 1000,
+				(pdata->pwrkey_time_ms % 1000) * 1000000),
+				HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&pwrkey->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey)
+{
+	struct pmic8058_pwrkey *pwrkey = _pwrkey;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pwrkey->lock, flags);
+	if (pwrkey->pressed_first) {
+		pwrkey->pressed_first = false;
+		/* no pwrkey time, means no delay in pwr key reporting */
+		if (!pwrkey->pdata->pwrkey_time_ms) {
+			input_report_key(pwrkey->pwr, KEY_POWER, 0);
+			input_sync(pwrkey->pwr);
+			spin_unlock_irqrestore(&pwrkey->lock, flags);
+			return IRQ_HANDLED;
+		}
+
+		hrtimer_cancel(&pwrkey->timer);
+
+		if (pwrkey->key_pressed) {
+			pwrkey->key_pressed = false;
+			input_report_key(pwrkey->pwr, KEY_POWER, 0);
+			input_sync(pwrkey->pwr);
+		}
+
+		input_report_key(pwrkey->pwr, KEY_END, 0);
+		input_sync(pwrkey->pwr);
+	} else {
+		/*
+		 * Set this flag true so that in the subsequent interrupt of
+		 * press we can know release interrupt came first
+		 */
+		pwrkey->pressed_first = true;
+		/* no pwrkey time, means no delay in pwr key reporting */
+		if (!pwrkey->pdata->pwrkey_time_ms) {
+			input_report_key(pwrkey->pwr, KEY_POWER, 1);
+			input_sync(pwrkey->pwr);
+			input_report_key(pwrkey->pwr, KEY_POWER, 0);
+			input_sync(pwrkey->pwr);
+			spin_unlock_irqrestore(&pwrkey->lock, flags);
+			return IRQ_HANDLED;
+		}
+		input_report_key(pwrkey->pwr, KEY_END, 1);
+		input_sync(pwrkey->pwr);
+		input_report_key(pwrkey->pwr, KEY_END, 0);
+		input_sync(pwrkey->pwr);
+	}
+	spin_unlock_irqrestore(&pwrkey->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+static int pmic8058_pwrkey_suspend(struct device *dev)
+{
+	struct pmic8058_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev)) {
+		enable_irq_wake(pwrkey->key_press_irq);
+		enable_irq_wake(pwrkey->key_release_irq);
+	}
+
+	return 0;
+}
+
+static int pmic8058_pwrkey_resume(struct device *dev)
+{
+	struct pmic8058_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev)) {
+		disable_irq_wake(pwrkey->key_press_irq);
+		disable_irq_wake(pwrkey->key_release_irq);
+	}
+
+	return 0;
+}
+
+static struct dev_pm_ops pm8058_pwr_key_pm_ops = {
+	.suspend	= pmic8058_pwrkey_suspend,
+	.resume		= pmic8058_pwrkey_resume,
+};
+#endif
+
+static int __devinit pmic8058_pwrkey_probe(struct platform_device *pdev)
+{
+	struct input_dev *pwr;
+	int key_release_irq = platform_get_irq(pdev, 0);
+	int key_press_irq = platform_get_irq(pdev, 1);
+	int err;
+	unsigned int delay;
+	u8 pon_cntl;
+	struct pmic8058_pwrkey *pwrkey;
+	struct pmic8058_pwrkey_pdata *pdata = pdev->dev.platform_data;
+	struct pm8058_chip	*pm_chip;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "power key platform data not supplied\n");
+		return -EINVAL;
+	}
+
+	if (pdata->kpd_trigger_delay_us > 62500) {
+		dev_err(&pdev->dev, "invalid pwr key trigger delay\n");
+		return -EINVAL;
+	}
+
+	if (pdata->pwrkey_time_ms &&
+	     (pdata->pwrkey_time_ms < 500 || pdata->pwrkey_time_ms > 1000)) {
+		dev_err(&pdev->dev, "invalid pwr key time supplied\n");
+		return -EINVAL;
+	}
+
+	pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
+	if (!pwrkey)
+		return -ENOMEM;
+
+	pwrkey->pm_chip = pm_chip;
+	pwrkey->pdata   = pdata;
+	pwrkey->pressed_first = false;
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	err = pm_runtime_set_active(&pdev->dev);
+	if (err < 0)
+		dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pdev->dev);
+
+	pwr = input_allocate_device();
+	if (!pwr) {
+		dev_dbg(&pdev->dev, "Can't allocate power button\n");
+		err = -ENOMEM;
+		goto free_pwrkey;
+	}
+
+	input_set_capability(pwr, EV_KEY, KEY_POWER);
+	input_set_capability(pwr, EV_KEY, KEY_END);
+
+	pwr->name = "pmic8058_pwrkey";
+	pwr->phys = "pmic8058_pwrkey/input0";
+	pwr->dev.parent = &pdev->dev;
+
+	delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
+	delay = 1 + ilog2(delay);
+
+	err = pm8058_read(pwrkey->pm_chip, PON_CNTL_1, &pon_cntl, 1);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
+		goto free_input_dev;
+	}
+
+
+	pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
+	pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
+	pon_cntl |= (pdata->pull_up ? PON_CNTL_PULL_UP : ~PON_CNTL_PULL_UP);
+	err = pm8058_write(pwrkey->pm_chip, PON_CNTL_1, &pon_cntl, 1);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
+		goto free_input_dev;
+	}
+
+	hrtimer_init(&pwrkey->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	pwrkey->timer.function = pmic8058_pwrkey_timer;
+
+	spin_lock_init(&pwrkey->lock);
+
+	err = input_register_device(pwr);
+	if (err) {
+		dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
+		goto free_input_dev;
+	}
+
+	pwrkey->key_press_irq = key_press_irq;
+	pwrkey->key_release_irq = key_release_irq;
+	pwrkey->pwr = pwr;
+
+	platform_set_drvdata(pdev, pwrkey);
+
+	/* Check if power-key is pressed at boot up */
+	err = pm8058_irq_get_rt_status(pwrkey->pm_chip, key_press_irq);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Key-press status at boot failed rc=%d\n",
+									err);
+		goto unreg_input_dev;
+	}
+	if (err) {
+		if (!pwrkey->pdata->pwrkey_time_ms)
+			input_report_key(pwrkey->pwr, KEY_POWER, 1);
+		else
+			input_report_key(pwrkey->pwr, KEY_END, 1);
+		input_sync(pwrkey->pwr);
+		pwrkey->pressed_first = true;
+	}
+
+	err = request_threaded_irq(key_press_irq, NULL, pwrkey_press_irq,
+			 IRQF_TRIGGER_RISING, "pmic8058_pwrkey_press", pwrkey);
+	if (err < 0) {
+		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+				 key_press_irq, err);
+		goto unreg_input_dev;
+	}
+
+	err = request_threaded_irq(key_release_irq, NULL, pwrkey_release_irq,
+			 IRQF_TRIGGER_RISING, "pmic8058_pwrkey_release",
+				 pwrkey);
+	if (err < 0) {
+		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+				 key_release_irq, err);
+
+		goto free_press_irq;
+	}
+
+	device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+	return 0;
+
+free_press_irq:
+	free_irq(key_press_irq, NULL);
+unreg_input_dev:
+	input_unregister_device(pwr);
+	pwr = NULL;
+free_input_dev:
+	input_free_device(pwr);
+free_pwrkey:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(pwrkey);
+	return err;
+}
+
+static int __devexit pmic8058_pwrkey_remove(struct platform_device *pdev)
+{
+	struct pmic8058_pwrkey *pwrkey = platform_get_drvdata(pdev);
+	int key_release_irq = platform_get_irq(pdev, 0);
+	int key_press_irq = platform_get_irq(pdev, 1);
+
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	device_init_wakeup(&pdev->dev, 0);
+
+	free_irq(key_press_irq, pwrkey);
+	free_irq(key_release_irq, pwrkey);
+	input_unregister_device(pwrkey->pwr);
+	kfree(pwrkey);
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_pwrkey_driver = {
+	.probe		= pmic8058_pwrkey_probe,
+	.remove		= __devexit_p(pmic8058_pwrkey_remove),
+	.driver		= {
+		.name	= "pm8058-pwrkey",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &pm8058_pwr_key_pm_ops,
+#endif
+	},
+};
+
+static int __init pmic8058_pwrkey_init(void)
+{
+	return platform_driver_register(&pmic8058_pwrkey_driver);
+}
+module_init(pmic8058_pwrkey_init);
+
+static void __exit pmic8058_pwrkey_exit(void)
+{
+	platform_driver_unregister(&pmic8058_pwrkey_driver);
+}
+module_exit(pmic8058_pwrkey_exit);
+
+MODULE_ALIAS("platform:pmic8058_pwrkey");
+MODULE_DESCRIPTION("PMIC8058 Power Key");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/pmic8058-vib-memless.c b/drivers/input/misc/pmic8058-vib-memless.c
new file mode 100644
index 0000000..ba05400
--- /dev/null
+++ b/drivers/input/misc/pmic8058-vib-memless.c
@@ -0,0 +1,282 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pmic8058-vibrator.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pm.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+#define VIB_DRV			0x4A
+
+#define VIB_DRV_SEL_MASK	0xf8
+#define VIB_DRV_SEL_SHIFT	0x03
+#define VIB_DRV_EN_MANUAL_MASK	0xfc
+
+#define VIB_MAX_LEVEL_mV	(3100)
+#define VIB_MIN_LEVEL_mV	(1200)
+#define VIB_MAX_LEVELS		(VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
+
+#define MAX_FF_SPEED		0xff
+
+struct pmic8058_vib {
+	struct input_dev *info;
+	spinlock_t lock;
+	struct work_struct work;
+
+	bool enabled;
+	int speed;
+	struct device *dev;
+	struct pmic8058_vibrator_pdata *pdata;
+	int state;
+	int level;
+	u8  reg_vib_drv;
+
+	struct pm8058_chip	*pm_chip;
+};
+
+/* REVISIT: just for debugging, will be removed in final working version */
+static void __dump_vib_regs(struct pmic8058_vib *vib, char *msg)
+{
+	u8 temp;
+
+	dev_dbg(vib->dev, "%s\n", msg);
+
+	pm8058_read(vib->pm_chip, VIB_DRV, &temp, 1);
+	dev_dbg(vib->dev, "VIB_DRV - %X\n", temp);
+}
+
+static int pmic8058_vib_read_u8(struct pmic8058_vib *vib,
+				 u8 *data, u16 reg)
+{
+	int rc;
+
+	rc = pm8058_read(vib->pm_chip, reg, data, 1);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error reading pmic8058: %X - ret %X\n",
+				reg, rc);
+
+	return rc;
+}
+
+static int pmic8058_vib_write_u8(struct pmic8058_vib *vib,
+				 u8 data, u16 reg)
+{
+	int rc;
+
+	rc = pm8058_write(vib->pm_chip, reg, &data, 1);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error writing pmic8058: %X - ret %X\n",
+				reg, rc);
+	return rc;
+}
+
+static int pmic8058_vib_set(struct pmic8058_vib *vib, int on)
+{
+	int rc;
+	u8 val;
+
+	if (on) {
+		val = vib->reg_vib_drv;
+		val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
+		rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+		vib->enabled = 1;
+
+	} else {
+		val = vib->reg_vib_drv;
+		val &= ~VIB_DRV_SEL_MASK;
+		rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+		vib->enabled = 0;
+	}
+	__dump_vib_regs(vib, "vib_set_end");
+
+	return rc;
+}
+
+static void pmic8058_work_handler(struct work_struct *work)
+{
+	u8 val;
+	int rc;
+	struct pmic8058_vib *info;
+
+	info  = container_of(work, struct pmic8058_vib, work);
+
+	rc = pmic8058_vib_read_u8(info, &val, VIB_DRV);
+	if (rc < 0)
+		return;
+
+	/*
+	 * Vibrator support voltage ranges from 1.2 to 3.1V, so
+	 * scale the FF speed to these range.
+	 */
+	if (info->speed) {
+		info->state = 1;
+		info->level = ((VIB_MAX_LEVELS * info->speed) / MAX_FF_SPEED) +
+						VIB_MIN_LEVEL_mV;
+		info->level /= 100;
+	} else {
+		info->state = 0;
+		info->level = VIB_MIN_LEVEL_mV / 100;
+	}
+	pmic8058_vib_set(info, info->state);
+}
+
+static int pmic8058_vib_play_effect(struct input_dev *dev, void *data,
+		      struct ff_effect *effect)
+{
+	struct pmic8058_vib *info = input_get_drvdata(dev);
+
+	info->speed = effect->u.rumble.strong_magnitude >> 8;
+	if (!info->speed)
+		info->speed = effect->u.rumble.weak_magnitude >> 9;
+	schedule_work(&info->work);
+	return 0;
+}
+
+static int __devinit pmic8058_vib_probe(struct platform_device *pdev)
+
+{
+	struct pmic8058_vibrator_pdata *pdata = pdev->dev.platform_data;
+	struct pmic8058_vib *vib;
+	u8 val;
+	int rc;
+
+	struct pm8058_chip	*pm_chip;
+
+	pm_chip = dev_get_drvdata(pdev->parent.dev);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (pdata->level_mV < VIB_MIN_LEVEL_mV ||
+			 pdata->level_mV > VIB_MAX_LEVEL_mV)
+		return -EINVAL;
+
+	vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+	if (!vib)
+		return -ENOMEM;
+
+	vib->pm_chip	= pm_chip;
+	vib->enabled	= 0;
+	vib->pdata	= pdata;
+	vib->level	= pdata->level_mV / 100;
+	vib->dev	= &pdev->dev;
+
+	spin_lock_init(&vib->lock);
+	INIT_WORK(&vib->work, pmic8058_work_handler);
+
+	vib->info = input_allocate_device();
+
+	if (vib->info == NULL) {
+		dev_err(&pdev->dev, "couldn't allocate input device\n");
+		return -ENOMEM;
+	}
+
+	input_set_drvdata(vib->info, vib);
+
+	vib->info->name = "pmic8058:vibrator";
+	vib->info->id.version = 1;
+	vib->info->dev.parent = pdev->dev.parent;
+
+	__set_bit(FF_RUMBLE, vib->info->ffbit);
+	__dump_vib_regs(vib, "boot_vib_default");
+
+	/* operate in manual mode */
+	rc = pmic8058_vib_read_u8(vib, &val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+	val &= ~VIB_DRV_EN_MANUAL_MASK;
+	rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+
+	vib->reg_vib_drv = val;
+
+	rc = input_ff_create_memless(vib->info, NULL, pmic8058_vib_play_effect);
+	if (rc < 0) {
+		dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
+		goto create_memless_err;
+	}
+
+	platform_set_drvdata(pdev, vib);
+
+	rc = input_register_device(vib->info);
+	if (rc < 0) {
+		dev_dbg(&pdev->dev, "couldn't register input device\n");
+		goto reg_err;
+	}
+
+	return 0;
+
+reg_err:
+	input_ff_destroy(vib->info);
+create_memless_err:
+	input_free_device(vib->info);
+err_read_vib:
+	kfree(vib);
+	return rc;
+}
+
+static int __devexit pmic8058_vib_remove(struct platform_device *pdev)
+{
+	struct pmic8058_vib *vib = platform_get_drvdata(pdev);
+
+	cancel_work_sync(&vib->work);
+	if (vib->enabled)
+		pmic8058_vib_set(vib, 0);
+
+	input_unregister_device(vib->info);
+	kfree(vib);
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_vib_driver = {
+	.probe		= pmic8058_vib_probe,
+	.remove		= __devexit_p(pmic8058_vib_remove),
+	.driver		= {
+		.name	= "pm8058-vib",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pmic8058_vib_init(void)
+{
+	return platform_driver_register(&pmic8058_vib_driver);
+}
+module_init(pmic8058_vib_init);
+
+static void __exit pmic8058_vib_exit(void)
+{
+	platform_driver_unregister(&pmic8058_vib_driver);
+}
+module_exit(pmic8058_vib_exit);
+
+MODULE_ALIAS("platform:pmic8058_vib");
+MODULE_DESCRIPTION("PMIC8058 vibrator driver memless framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b3cfb9c..02838e3 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -30,10 +30,12 @@
 /**
  * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
  * @key_press_irq: key press irq number
+ * @pdata: platform data
  */
 struct pmic8xxx_pwrkey {
 	struct input_dev *pwr;
 	int key_press_irq;
+	const struct pm8xxx_pwrkey_platform_data *pdata;
 };
 
 static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
@@ -107,6 +109,8 @@
 	if (!pwrkey)
 		return -ENOMEM;
 
+	pwrkey->pdata = pdata;
+
 	pwr = input_allocate_device();
 	if (!pwr) {
 		dev_dbg(&pdev->dev, "Can't allocate power button\n");
@@ -153,7 +157,7 @@
 
 	platform_set_drvdata(pdev, pwrkey);
 
-	err = request_irq(key_press_irq, pwrkey_press_irq,
+	err = request_threaded_irq(key_press_irq, NULL, pwrkey_press_irq,
 		IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
 	if (err < 0) {
 		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
@@ -161,7 +165,7 @@
 		goto unreg_input_dev;
 	}
 
-	err = request_irq(key_release_irq, pwrkey_release_irq,
+	err = request_threaded_irq(key_release_irq, NULL, pwrkey_release_irq,
 		 IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
 	if (err < 0) {
 		dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 9c1e6ee..9f74e88 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -322,4 +322,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called synaptics_i2c.
 
+config MOUSE_QCITP
+	tristate "Quanta Computer Inc. Touchpad"
+	depends on I2C
+	default n
+	help
+	  Say Y here if you want to use the Quanta touchpad driver for Quanta
+	  smartbook platforms.
+
+
 endif
diff --git a/drivers/input/mouse/qci_touchpad.c b/drivers/input/mouse/qci_touchpad.c
new file mode 100644
index 0000000..ef93a7e
--- /dev/null
+++ b/drivers/input/mouse/qci_touchpad.c
@@ -0,0 +1,309 @@
+/* Quanta I2C Touchpad Driver
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ * Author: Hsin Wu <hsin.wu@quantatw.com>
+ * Author: Austin Lai <austin.lai@quantatw.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+  /*
+ *
+ *  The Driver with I/O communications via the I2C Interface for ON2 of AP BU.
+ *  And it is only working on the nuvoTon WPCE775x Embedded Controller.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/keyboard.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#define TOUCHPAD_ID_NAME          "qci-i2cpad"
+#define TOUCHPAD_NAME                "PS2 Touchpad"
+#define TOUCHPAD_DEVICE             "/i2c/input1"
+#define TOUCHPAD_CMD_ENABLE             0xF4
+#define TOUCHPAD_INIT_DELAY_MS    100
+
+static int __devinit qcitp_probe(struct i2c_client *client,
+	const struct i2c_device_id *id);
+static int __devexit qcitp_remove(struct i2c_client *kbd);
+
+/* General structure to hold the driver data */
+struct i2ctpad_drv_data {
+	struct i2c_client *ti2c_client;
+	struct work_struct work;
+	struct input_dev *qcitp_dev;
+	struct kobject *tp_kobj;
+	unsigned int  qcitp_gpio;
+	unsigned int  qcitp_irq;
+	char ecdata[8];
+};
+
+static int tp_sense_val = 10;
+static ssize_t tp_sensitive_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char * buf)
+{
+	return sprintf(buf, "%d\n", tp_sense_val);
+}
+
+static ssize_t tp_sensitive_store(struct kobject *kobj,
+	struct kobj_attribute *attr, const char* buf, size_t n)
+{
+	unsigned int val = 0;
+	sscanf(buf, "%d", &val);
+
+	if (val >= 1 && val <= 10)
+		tp_sense_val = val;
+	else
+		return  -ENOSYS;
+
+	return sizeof(buf);
+}
+
+static struct kobj_attribute tp_sensitivity = __ATTR(tp_sensitivity ,
+						     0644 ,
+						     tp_sensitive_show ,
+						     tp_sensitive_store);
+
+static struct attribute *g_tp[] = {
+	&tp_sensitivity.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = g_tp,
+};
+
+/*-----------------------------------------------------------------------------
+ * Driver functions
+ *---------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int qcitp_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int qcitp_resume(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct i2c_device_id qcitp_idtable[] = {
+	{ TOUCHPAD_ID_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, qcitp_idtable);
+#ifdef CONFIG_PM
+static const struct dev_pm_ops qcitp_pm_ops = {
+	.suspend  = qcitp_suspend,
+	.resume   = qcitp_resume,
+};
+#endif
+static struct i2c_driver i2ctp_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name  = TOUCHPAD_ID_NAME,
+#ifdef CONFIG_PM
+		.pm = &qcitp_pm_ops,
+#endif
+	},
+	.probe	  = qcitp_probe,
+	.remove	  = __devexit_p(qcitp_remove),
+	.id_table = qcitp_idtable,
+};
+
+static void qcitp_fetch_data(struct i2c_client *tpad_client,
+	char *ec_data)
+{
+	struct i2c_msg tp_msg;
+	int ret;
+	tp_msg.addr = tpad_client->addr;
+	tp_msg.flags = I2C_M_RD;
+	tp_msg.len = 3;
+	tp_msg.buf = (char *)&ec_data[0];
+	ret = i2c_transfer(tpad_client->adapter, &tp_msg, 1);
+}
+
+static void qcitp_report_key(struct input_dev *tpad_dev, char *ec_data)
+{
+	int dx = 0;
+	int dy = 0;
+
+	if (ec_data[1])
+		dx = (int) ec_data[1] -
+		     (int) ((ec_data[0] << 4) & 0x100);
+
+	if (ec_data[2])
+		dy = (int) ((ec_data[0] << 3) & 0x100) -
+		     (int) ec_data[2];
+
+	dx = (dx * tp_sense_val)/10;
+	dy = (dy * tp_sense_val)/10;
+
+	input_report_key(tpad_dev, BTN_LEFT, ec_data[0] & 0x01);
+	input_report_key(tpad_dev, BTN_RIGHT, ec_data[0] & 0x02);
+	input_report_key(tpad_dev, BTN_MIDDLE, ec_data[0] & 0x04);
+	input_report_rel(tpad_dev, REL_X, dx);
+	input_report_rel(tpad_dev, REL_Y, dy);
+	input_sync(tpad_dev);
+}
+
+static void qcitp_work_handler(struct work_struct *_work)
+{
+	struct i2ctpad_drv_data *itpad_drv_data =
+		container_of(_work, struct i2ctpad_drv_data, work);
+
+	struct i2c_client *itpad_client = itpad_drv_data->ti2c_client;
+	struct input_dev *itpad_dev = itpad_drv_data->qcitp_dev;
+
+	qcitp_fetch_data(itpad_client, itpad_drv_data->ecdata);
+	qcitp_report_key(itpad_dev, itpad_drv_data->ecdata);
+}
+
+static irqreturn_t qcitp_interrupt(int irq, void *dev_id)
+{
+	struct i2ctpad_drv_data *itpad_drv_data = dev_id;
+	schedule_work(&itpad_drv_data->work);
+	return IRQ_HANDLED;
+}
+
+static int __devinit qcitp_probe(struct i2c_client *client,
+				    const struct i2c_device_id *id)
+{
+	int err = -ENOMEM;
+	struct i2ctpad_drv_data *context = 0;
+
+	context = kzalloc(sizeof(struct i2ctpad_drv_data), GFP_KERNEL);
+	if (!context)
+		return err;
+	i2c_set_clientdata(client, context);
+	context->ti2c_client = client;
+	context->qcitp_gpio = client->irq;
+
+	/* Enable mouse */
+	i2c_smbus_write_byte(client, TOUCHPAD_CMD_ENABLE);
+	msleep(TOUCHPAD_INIT_DELAY_MS);
+	i2c_smbus_read_byte(client);
+	/*allocate and register input device*/
+	context->qcitp_dev = input_allocate_device();
+	if (!context->qcitp_dev) {
+		pr_err("[TouchPad] allocting memory fail\n");
+		err = -ENOMEM;
+		goto allocate_fail;
+	}
+	context->qcitp_dev->name        = TOUCHPAD_NAME;
+	context->qcitp_dev->phys         = TOUCHPAD_DEVICE;
+	context->qcitp_dev->id.bustype = BUS_I2C;
+	context->qcitp_dev->id.vendor  = 0x1050;
+	context->qcitp_dev->id.product = 0x1;
+	context->qcitp_dev->id.version = 0x1;
+	context->qcitp_dev->evbit[0]  = BIT_MASK(EV_KEY) |
+					BIT_MASK(EV_REL);
+	context->qcitp_dev->relbit[0] = BIT_MASK(REL_X) |
+					BIT_MASK(REL_Y);
+	context->qcitp_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
+							 BIT_MASK(BTN_MIDDLE) |
+							 BIT_MASK(BTN_RIGHT);
+
+	input_set_drvdata(context->qcitp_dev, context);
+	err = input_register_device(context->qcitp_dev);
+	if (err) {
+		pr_err("[TouchPad] register device fail\n");
+		goto register_fail;
+	}
+
+	/*request intterrupt*/
+	INIT_WORK(&context->work, qcitp_work_handler);
+
+	err = gpio_request(context->qcitp_gpio, "qci-pad");
+	if (err) {
+		pr_err("[TouchPad]err gpio request\n");
+		goto gpio_request_fail;
+	}
+
+	context->qcitp_irq = gpio_to_irq(context->qcitp_gpio);
+	err = request_irq(context->qcitp_irq,
+			  qcitp_interrupt,
+			  IRQF_TRIGGER_FALLING,
+			  TOUCHPAD_ID_NAME,
+			  context);
+	if (err) {
+		pr_err("[TouchPad] unable to get IRQ\n");
+		goto request_irq_fail;
+	}
+	/*create touchpad kobject*/
+	context->tp_kobj = kobject_create_and_add("touchpad", NULL);
+
+	err = sysfs_create_group(context->tp_kobj, &attr_group);
+	if (err)
+		pr_warning("[TouchPad] sysfs create fail\n");
+
+	tp_sense_val = 10;
+
+	return 0;
+
+request_irq_fail:
+	gpio_free(context->qcitp_gpio);
+
+gpio_request_fail:
+	input_unregister_device(context->qcitp_dev);
+
+register_fail:
+	input_free_device(context->qcitp_dev);
+
+allocate_fail:
+	i2c_set_clientdata(client, NULL);
+	kfree(context);
+	return err;
+}
+
+static int __devexit qcitp_remove(struct i2c_client *dev)
+{
+	struct i2ctpad_drv_data *context = i2c_get_clientdata(dev);
+
+	free_irq(context->qcitp_irq, context);
+	gpio_free(context->qcitp_gpio);
+	input_free_device(context->qcitp_dev);
+	input_unregister_device(context->qcitp_dev);
+	kfree(context);
+
+	return 0;
+}
+
+static int __init qcitp_init(void)
+{
+	return i2c_add_driver(&i2ctp_driver);
+}
+
+
+static void __exit qcitp_exit(void)
+{
+	i2c_del_driver(&i2ctp_driver);
+}
+
+module_init(qcitp_init);
+module_exit(qcitp_exit);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Quanta Embedded Controller I2C Touch Pad Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 4104103..aaa650b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -56,6 +56,22 @@
 	  module will be called ad7877.
 
 config TOUCHSCREEN_AD7879
+	tristate
+	default n
+
+config TOUCHSCREEN_ATMEL_MAXTOUCH
+       tristate "Atmel maXTouch based touchscreens"
+       depends on I2C
+       default n
+       help
+         Say Y here if you have an Atmel Maxtouch based touchscreen.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called maXTouch.
+
+config TOUCHSCREEN_AD7879
 	tristate "Analog Devices AD7879-1/AD7889-1 touchscreen interface"
 	help
 	  Say Y here if you want to support a touchscreen interface using
@@ -363,6 +379,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called penmount.
 
+config TOUCHSCREEN_MSM
+	bool "Qualcomm MSM touchscreen controller"
+	depends on ARCH_MSM7X30 && MARIMBA_TSADC
+	default n
+	help
+	  Say Y here if you have a 4-wire resistive touchscreen panel
+	  connected to the TSSC touchscreen controller on a
+	  Qualcomm MSM/QSD based SoC.
+
 config TOUCHSCREEN_MIGOR
 	tristate "Renesas MIGO-R touchscreen"
 	depends on SH_MIGOR && I2C
@@ -679,6 +704,27 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tsc2007.
 
+config TOUCHSCREEN_MSM_LEGACY
+	default n
+	tristate "MSM Touchscreen"
+	depends on ARCH_MSM && !ARCH_MSM7X30
+	help
+	  Say Y here if you have a touchscreen interface using MSM
+	  touchscreen controller.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called msm_touch.
+
+config ANDROID_TOUCHSCREEN_MSM_HACKS
+	default y
+	depends on TOUCHSCREEN_MSM_LEGACY
+	bool "Android MSM Touchscreen hacks"
+	help
+	  Say Y here if you are running Android framework on Qualcomm
+	  MSM/QSD based Surf or FFAs. These hacks are required inorder
+	  to Android framework to receive adjusted x, y co-ordinates
+	  until proper calibration framework is in place.
+
 config TOUCHSCREEN_W90X900
 	tristate "W90P910 touchscreen driver"
 	depends on HAVE_CLK
@@ -732,4 +778,31 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tps6507x_ts.
 
+config TOUCHSCREEN_CY8C_TS
+	tristate "Cypress TMA300-TMG200 based touchscreens"
+	depends on I2C
+	default n
+	help
+	  Say Y here if you have a Cypress TMA300/TMG200 based touchscreen.
+	  TMA300 is a multi-touch screen which can report upto 10
+	  touches at a time. TMG200 supports 2 touches.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cy8c_ts.
+
+config TOUCHSCREEN_CYTTSP_I2C
+       tristate "Cypress TTSP based touchscreens"
+       depends on I2C
+       default n
+       help
+         Say Y here if you have a Cypress TTSP based touchscreen.
+         TMA300 is a multi-touch screen which can report upto 10
+         touches at a time.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cyttsp-i2c.
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 0738f19..bfe9daf 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,16 +12,19 @@
 obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C)	+= ad7879-i2c.o
 obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI)	+= ad7879-spi.o
 obj-$(CONFIG_TOUCHSCREEN_ADS7846)	+= ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH)	+= atmel_maxtouch.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT)	+= atmel_mxt_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC)	+= atmel_tsadcc.o
 obj-$(CONFIG_TOUCHSCREEN_BITSY)		+= h3600_ts_input.o
 obj-$(CONFIG_TOUCHSCREEN_BU21013)       += bu21013_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_TMG)	+= cy8c_tmg_ts.o
 obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110)	+= cy8ctmg110_ts.o
 obj-$(CONFIG_TOUCHSCREEN_DA9034)	+= da9034-ts.o
 obj-$(CONFIG_TOUCHSCREEN_DYNAPRO)	+= dynapro.o
 obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE)	+= hampshire.o
 obj-$(CONFIG_TOUCHSCREEN_GUNZE)		+= gunze.o
 obj-$(CONFIG_TOUCHSCREEN_EETI)		+= eeti_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ELAN_I2C_8232) += elan8232_i2c.o
 obj-$(CONFIG_TOUCHSCREEN_ELO)		+= elo.o
 obj-$(CONFIG_TOUCHSCREEN_FUJITSU)	+= fujitsu_ts.o
 obj-$(CONFIG_TOUCHSCREEN_INEXIO)	+= inexio.o
@@ -33,6 +36,7 @@
 obj-$(CONFIG_TOUCHSCREEN_MIGOR)		+= migor_ts.o
 obj-$(CONFIG_TOUCHSCREEN_MTOUCH)	+= mtouch.o
 obj-$(CONFIG_TOUCHSCREEN_MK712)		+= mk712.o
+obj-$(CONFIG_TOUCHSCREEN_MSM)		+= msm_ts.o
 obj-$(CONFIG_TOUCHSCREEN_HP600)		+= hp680_ts_input.o
 obj-$(CONFIG_TOUCHSCREEN_HP7XX)		+= jornada720_ts.o
 obj-$(CONFIG_TOUCHSCREEN_HTCPEN)	+= htcpen.o
@@ -61,3 +65,6 @@
 obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE)	+= zylonite-wm97xx.o
 obj-$(CONFIG_TOUCHSCREEN_W90X900)	+= w90p910_ts.o
 obj-$(CONFIG_TOUCHSCREEN_TPS6507X)	+= tps6507x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_MSM_LEGACY)		+= msm_touch.o
+obj-$(CONFIG_TOUCHSCREEN_CY8C_TS)	+= cy8c_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C)       += cyttsp-i2c.o
diff --git a/drivers/input/touchscreen/atmel_maxtouch.c b/drivers/input/touchscreen/atmel_maxtouch.c
new file mode 100644
index 0000000..de1834e
--- /dev/null
+++ b/drivers/input/touchscreen/atmel_maxtouch.c
@@ -0,0 +1,2338 @@
+/*
+ *  Atmel maXTouch Touchscreen Controller Driver
+ *
+ *  
+ *  Copyright (C) 2010 Atmel Corporation
+ *  Copyright (C) 2010 Ulf Samuelsson (ulf@atmel.com)
+ *  Copyright (C) 2009 Raphael Derosso Pereira <raphaelpereira@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * 
+ * Driver for Atmel maXTouch family of touch controllers.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/atmel_maxtouch.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+
+/* Early-suspend level */
+#define MXT_SUSPEND_LEVEL 1
+#endif
+
+
+#define DRIVER_VERSION "0.91a_mod"
+
+static int debug = DEBUG_INFO;
+static int comms = 0;
+module_param(debug, int, 0644);
+module_param(comms, int, 0644);
+
+MODULE_PARM_DESC(debug, "Activate debugging output");
+MODULE_PARM_DESC(comms, "Select communications mode");
+
+#define T7_DATA_SIZE 3
+
+/* Device Info descriptor */
+/* Parsed from maXTouch "Id information" inside device */
+struct mxt_device_info {
+	u8   family_id;
+	u8   variant_id;
+	u8   major;
+	u8   minor;
+	u8   build;
+	u8   num_objs;
+	u8   x_size;
+	u8   y_size;
+	char family_name[16];	 /* Family name */
+	char variant_name[16];    /* Variant name */
+	u16  num_nodes;           /* Number of sensor nodes */
+};
+
+/* object descriptor table, parsed from maXTouch "object table" */
+struct mxt_object {
+	u16 chip_addr;
+	u8  type;
+	u8  size;
+	u8  instances;
+	u8  num_report_ids;
+};
+
+
+/* Mapping from report id to object type and instance */
+struct report_id_map {
+	u8  object;
+	u8  instance;
+/*
+ * This is the first report ID belonging to object. It enables us to
+ * find out easily the touch number: each touch has different report
+ * ID (which are assigned to touches in increasing order). By
+ * subtracting the first report ID from current, we get the touch
+ * number.
+ */
+	u8  first_rid;
+};
+
+
+/* Driver datastructure */
+struct mxt_data {
+	struct i2c_client    *client;
+	struct input_dev     *input;
+	char                 phys_name[32];
+	int                  irq;
+
+	u16                  last_read_addr;
+	bool                 new_msgs;
+	u8                   *last_message;
+
+	int                  valid_irq_counter;
+	int                  invalid_irq_counter;
+	int                  irq_counter;
+	int                  message_counter;
+	int                  read_fail_counter;
+
+
+	int                  bytes_to_read;
+
+	struct delayed_work  dwork;
+	u8                   xpos_format;
+	u8                   ypos_format;
+
+	u8                   numtouch;
+
+	struct mxt_device_info	device_info;
+
+	u32		     info_block_crc;
+	u32                  configuration_crc;
+	u16                  report_id_count;
+	struct report_id_map *rid_map;
+	struct mxt_object    *object_table;
+
+	u16                  msg_proc_addr;
+	u8                   message_size;
+
+	u16                  min_x_val;
+	u16                  min_y_val;
+	u16                  max_x_val;
+	u16                  max_y_val;
+
+	int                  (*init_hw)(struct i2c_client *client);
+	int		     (*exit_hw)(struct i2c_client *client);
+	int		     (*power_on)(bool on);
+	u8                   (*valid_interrupt)(void);
+	u8                   (*read_chg)(void);
+
+	/* debugfs variables */
+	struct dentry        *debug_dir;
+	int                  current_debug_datap;
+
+	struct mutex         debug_mutex;
+	u16                  *debug_data;
+
+        /* Character device variables */
+	struct cdev          cdev;
+	struct cdev          cdev_messages;  /* 2nd Char dev for messages */
+	dev_t                dev_num;
+	struct class         *mxt_class;
+
+
+	u16                  address_pointer;
+	bool                 valid_ap;
+
+	/* Message buffer & pointers */
+	char                 *messages;
+	int                  msg_buffer_startp, msg_buffer_endp;
+        /* Put only non-touch messages to buffer if this is set */
+	char                 nontouch_msg_only; 
+	struct mutex         msg_mutex;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend		early_suspend;
+#endif
+	u8 t7_data[T7_DATA_SIZE];
+	bool is_suspended;
+};
+/*default value, enough to read versioning*/
+#define CONFIG_DATA_SIZE	6
+static u16 t38_size = CONFIG_DATA_SIZE;
+static int mxt_read_block(struct i2c_client *client, u16 addr, u16 length,
+			  u8 *value);
+static int mxt_write_byte(struct i2c_client *client, u16 addr, u8 value);
+static int mxt_write_block(struct i2c_client *client, u16 addr, u16 length,
+			   u8 *value);
+static u8 mxt_valid_interrupt_dummy(void)
+{
+	return 1;
+}
+
+#define I2C_RETRY_COUNT 5
+#define I2C_PAYLOAD_SIZE 254
+
+/* Returns the start address of object in mXT memory. */
+#define	MXT_BASE_ADDR(object_type, mxt)					\
+	get_object_address(object_type, 0, mxt->object_table,           \
+			   mxt->device_info.num_objs)
+
+/* Maps a report ID to an object type (object type number). */
+#define	REPORT_ID_TO_OBJECT(rid, mxt)			\
+	(((rid) == 0xff) ? 0 : mxt->rid_map[rid].object)
+
+/* Maps a report ID to an object type (string). */
+#define	REPORT_ID_TO_OBJECT_NAME(rid, mxt)			\
+	object_type_name[REPORT_ID_TO_OBJECT(rid, mxt)]
+
+/* Returns non-zero if given object is a touch object */
+#define IS_TOUCH_OBJECT(object) \
+	((object == MXT_TOUCH_MULTITOUCHSCREEN_T9) || \
+	 (object == MXT_TOUCH_KEYARRAY_T15) ||	\
+	 (object == MXT_TOUCH_PROXIMITY_T23) || \
+	 (object == MXT_TOUCH_SINGLETOUCHSCREEN_T10) || \
+	 (object == MXT_TOUCH_XSLIDER_T11) || \
+	 (object == MXT_TOUCH_YSLIDER_T12) || \
+	 (object == MXT_TOUCH_XWHEEL_T13) || \
+	 (object == MXT_TOUCH_YWHEEL_T14) || \
+	 (object == MXT_TOUCH_KEYSET_T31) || \
+	 (object == MXT_TOUCH_XSLIDERSET_T32) ? 1 : 0)
+
+#define mxt_debug(level, ...) \
+	do { \
+		if (debug >= (level)) \
+			pr_debug(__VA_ARGS__); \
+	} while (0) 
+
+
+/* 
+ * Check whether we have multi-touch enabled kernel; if not, report just the
+ * first touch (on mXT224, the maximum is 10 simultaneous touches).
+ * Because just the 1st one is reported, it might seem that the screen is not
+ * responding to touch if the first touch is removed while the screen is being
+ * touched by another finger, so beware. 
+ *
+ */
+
+#ifdef ABS_MT_TRACKING_ID
+static inline void report_mt(int touch_number, int size, int x, int y, struct
+			mxt_data *mxt) {
+	input_report_abs(mxt->input, ABS_MT_TRACKING_ID, touch_number);
+	input_report_abs(mxt->input, ABS_MT_TOUCH_MAJOR, size);
+	input_report_abs(mxt->input, ABS_MT_POSITION_X, x);
+	input_report_abs(mxt->input, ABS_MT_POSITION_Y, y);
+	input_mt_sync(mxt->input);
+}
+#else
+static inline void report_mt(int touch_number, int size, int x, int y, struct
+			mxt_data *mxt) {
+	if (touch_number == 0) {
+		input_report_abs(mxt->input, ABS_TOOL_WIDTH, size);
+		input_report_abs(mxt->input, ABS_X, x);
+		input_report_abs(mxt->input, ABS_Y, y);
+	}
+}
+#endif
+
+
+static inline void report_gesture(int data, struct mxt_data *mxt)
+{
+	input_event(mxt->input, EV_MSC, MSC_GESTURE, data); 
+}
+
+
+static const u8	*object_type_name[] = {
+	[0]  = "Reserved",
+	[5]  = "GEN_MESSAGEPROCESSOR_T5",
+	[6]  = "GEN_COMMANDPROCESSOR_T6",
+	[7]  = "GEN_POWERCONFIG_T7",
+	[8]  = "GEN_ACQUIRECONFIG_T8",
+	[9]  = "TOUCH_MULTITOUCHSCREEN_T9",
+	[15] = "TOUCH_KEYARRAY_T15",
+	[17] = "SPT_COMMSCONFIG_T18",
+	[19] = "SPT_GPIOPWM_T19",
+	[20] = "PROCI_GRIPFACESUPPRESSION_T20",
+	[22] = "PROCG_NOISESUPPRESSION_T22",
+	[23] = "TOUCH_PROXIMITY_T23",
+	[24] = "PROCI_ONETOUCHGESTUREPROCESSOR_T24",
+	[25] = "SPT_SELFTEST_T25",
+	[27] = "PROCI_TWOTOUCHGESTUREPROCESSOR_T27",
+	[28] = "SPT_CTECONFIG_T28",
+	[37] = "DEBUG_DIAGNOSTICS_T37",
+	[38] = "SPT_USER_DATA_T38",
+	[40] = "PROCI_GRIPSUPPRESSION_T40",
+	[41] = "PROCI_PALMSUPPRESSION_T41",
+	[42] = "PROCI_FACESUPPRESSION_T42",
+	[43] = "SPT_DIGITIZER_T43",
+	[44] = "SPT_MESSAGECOUNT_T44",
+};
+
+
+static u16 get_object_address(uint8_t object_type,
+			      uint8_t instance,
+			      struct mxt_object *object_table,
+			      int max_objs);
+
+int mxt_write_ap(struct mxt_data *mxt, u16 ap);
+
+static int mxt_read_block_wo_addr(struct i2c_client *client,
+			   u16 length,
+				u8 *value);
+
+ssize_t debug_data_read(struct mxt_data *mxt, char *buf, size_t count, 
+			loff_t *ppos, u8 debug_command){
+	int i;
+	u16 *data;
+	u16 diagnostics_reg;
+	int offset = 0;
+	int size;
+	int read_size;
+	int error;
+	char *buf_start;
+	u16 debug_data_addr;
+	u16 page_address;
+	u8 page;
+	u8 debug_command_reg;
+
+	data = mxt->debug_data;
+	if (data == NULL)
+		return -EIO;
+
+	/* If first read after open, read all data to buffer. */
+	if (mxt->current_debug_datap == 0){
+
+		diagnostics_reg = MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, 
+						mxt) + 
+			          MXT_ADR_T6_DIAGNOSTIC;
+		if (count > (mxt->device_info.num_nodes * 2))
+			count = mxt->device_info.num_nodes;
+	
+		debug_data_addr = MXT_BASE_ADDR(MXT_DEBUG_DIAGNOSTIC_T37, mxt)+ 
+			          MXT_ADR_T37_DATA;
+		page_address = MXT_BASE_ADDR(MXT_DEBUG_DIAGNOSTIC_T37, mxt) +
+			       MXT_ADR_T37_PAGE;
+		error = mxt_read_block(mxt->client, page_address, 1, &page);
+		if (error < 0)
+			return error;
+		mxt_debug(DEBUG_TRACE, "debug data page = %d\n", page);		
+		while (page != 0) {
+			error = mxt_write_byte(mxt->client, 
+					diagnostics_reg, 
+					MXT_CMD_T6_PAGE_DOWN);
+			if (error < 0)
+				return error;
+			/* Wait for command to be handled; when it has, the
+			   register will be cleared. */
+			debug_command_reg = 1;
+			while (debug_command_reg != 0) {
+				error = mxt_read_block(mxt->client, 
+						diagnostics_reg, 1,
+						&debug_command_reg);
+				if (error < 0)
+					return error;
+				mxt_debug(DEBUG_TRACE, 
+					"Waiting for debug diag command "
+					"to propagate...\n");
+
+			}
+		        error = mxt_read_block(mxt->client, page_address, 1, 
+					&page);
+			if (error < 0)
+				return error;
+			mxt_debug(DEBUG_TRACE, "debug data page = %d\n", page);	
+		}
+
+		/*
+		 * Lock mutex to prevent writing some unwanted data to debug
+		 * command register. User can still write through the char 
+		 * device interface though. TODO: fix?
+		 */
+
+		mutex_lock(&mxt->debug_mutex);
+		/* Configure Debug Diagnostics object to show deltas/refs */
+		error = mxt_write_byte(mxt->client, diagnostics_reg,
+				debug_command);
+
+                /* Wait for command to be handled; when it has, the
+		 * register will be cleared. */
+		debug_command_reg = 1;
+		while (debug_command_reg != 0) {
+			error = mxt_read_block(mxt->client, 
+					diagnostics_reg, 1,
+					&debug_command_reg);
+			if (error < 0)
+				return error;
+			mxt_debug(DEBUG_TRACE, "Waiting for debug diag command "
+				"to propagate...\n");
+
+		}	
+
+		if (error < 0) {
+			printk (KERN_WARNING 
+				"Error writing to maXTouch device!\n");
+			return error;
+		}
+	
+		size = mxt->device_info.num_nodes * sizeof(u16);
+
+		while (size > 0) {
+			read_size = size > 128 ? 128 : size;
+			mxt_debug(DEBUG_TRACE, 
+				"Debug data read loop, reading %d bytes...\n",
+				read_size);
+			error = mxt_read_block(mxt->client, 
+					       debug_data_addr, 
+					       read_size, 
+					       (u8 *) &data[offset]);
+			if (error < 0) {
+				printk(KERN_WARNING 
+				       "Error reading debug data\n");
+				goto error;
+			}
+			offset += read_size/2;
+			size -= read_size;
+
+			/* Select next page */
+			error = mxt_write_byte(mxt->client, diagnostics_reg, 
+					MXT_CMD_T6_PAGE_UP);
+			if (error < 0) {
+				printk(KERN_WARNING
+					"Error writing to maXTouch device!\n");
+				goto error;
+			}
+		}
+		mutex_unlock(&mxt->debug_mutex);
+	}
+
+	buf_start = buf;
+	i = mxt->current_debug_datap;
+
+	while (((buf- buf_start) < (count - 6)) && 
+		(i < mxt->device_info.num_nodes)){
+
+		mxt->current_debug_datap++;
+		if (debug_command == MXT_CMD_T6_REFERENCES_MODE)
+			buf += sprintf(buf, "%d: %5d\n", i,
+				       (u16) le16_to_cpu(data[i]));
+		else if (debug_command == MXT_CMD_T6_DELTAS_MODE)
+			buf += sprintf(buf, "%d: %5d\n", i,
+				       (s16) le16_to_cpu(data[i]));
+		i++;
+	}
+
+	return (buf - buf_start);
+error:
+	mutex_unlock(&mxt->debug_mutex);
+	return error;
+}
+
+ssize_t deltas_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+	return debug_data_read(file->private_data, buf, count, ppos, 
+			       MXT_CMD_T6_DELTAS_MODE);
+}
+
+ssize_t refs_read(struct file *file, char *buf, size_t count, 
+			loff_t *ppos)
+{
+	return debug_data_read(file->private_data, buf, count, ppos, 
+			       MXT_CMD_T6_REFERENCES_MODE);
+}
+
+int debug_data_open(struct inode *inode, struct file *file)
+{
+	struct mxt_data *mxt;
+	int i;
+	mxt = inode->i_private;
+	if (mxt == NULL)
+		return -EIO;
+	mxt->current_debug_datap = 0;
+	mxt->debug_data = kmalloc(mxt->device_info.num_nodes * sizeof(u16),
+				  GFP_KERNEL);
+	if (mxt->debug_data == NULL)
+		return -ENOMEM;
+
+	
+	for (i = 0; i < mxt->device_info.num_nodes; i++)
+		mxt->debug_data[i] = 7777;
+	
+
+	file->private_data = mxt;
+	return 0;
+}
+
+int debug_data_release(struct inode *inode, struct file *file)
+{
+	struct mxt_data *mxt;
+	mxt = file->private_data;
+	kfree(mxt->debug_data);
+	return 0;
+}
+
+static struct file_operations delta_fops = {
+	.owner = THIS_MODULE,
+	.open = debug_data_open,
+	.release = debug_data_release,
+	.read = deltas_read,
+};
+
+static struct file_operations refs_fops = {
+	.owner = THIS_MODULE,
+	.open = debug_data_open,
+	.release = debug_data_release,
+	.read = refs_read,
+};
+
+
+int mxt_memory_open(struct inode *inode, struct file *file)
+{
+	struct mxt_data *mxt;
+	mxt = container_of(inode->i_cdev, struct mxt_data, cdev);
+	if (mxt == NULL)
+		return -EIO;
+	file->private_data = mxt;
+	return 0;
+}
+
+int mxt_message_open(struct inode *inode, struct file *file)
+{
+	struct mxt_data *mxt;
+	mxt = container_of(inode->i_cdev, struct mxt_data, cdev_messages);
+	if (mxt == NULL)
+		return -EIO;
+	file->private_data = mxt;
+	return 0;
+}
+
+
+ssize_t mxt_memory_read(struct file *file, char *buf, size_t count, 
+			loff_t *ppos)
+{
+	int i;
+	struct mxt_data *mxt;
+
+	mxt = file->private_data;
+	if (mxt->valid_ap){
+		mxt_debug(DEBUG_TRACE, "Reading %d bytes from current ap\n",
+			  (int) count);
+		i = mxt_read_block_wo_addr(mxt->client, count, (u8 *) buf);
+	} else {
+		mxt_debug(DEBUG_TRACE, "Address pointer changed since set;"
+			  "writing AP (%d) before reading %d bytes", 
+			  mxt->address_pointer, (int) count);
+		i = mxt_read_block(mxt->client, mxt->address_pointer, count,
+			           buf);
+	}
+			
+	return i;
+}
+
+ssize_t mxt_memory_write(struct file *file, const char *buf, size_t count,
+			 loff_t *ppos)
+{
+	int i;
+	int whole_blocks;
+	int last_block_size;
+	struct mxt_data *mxt;
+	u16 address;
+	
+	mxt = file->private_data;
+	address = mxt->address_pointer;
+
+	mxt_debug(DEBUG_TRACE, "mxt_memory_write entered\n");
+	whole_blocks = count / I2C_PAYLOAD_SIZE;
+	last_block_size = count % I2C_PAYLOAD_SIZE;
+
+	for (i = 0; i < whole_blocks; i++) {
+		mxt_debug(DEBUG_TRACE, "About to write to %d...", 
+			address);
+		mxt_write_block(mxt->client, address, I2C_PAYLOAD_SIZE, 
+				(u8 *) buf);
+		address += I2C_PAYLOAD_SIZE;
+		buf += I2C_PAYLOAD_SIZE;
+	}
+
+	mxt_write_block(mxt->client, address, last_block_size, (u8 *) buf);
+
+	return count;
+}
+
+static long mxt_ioctl(struct file *file,
+		     unsigned int cmd, unsigned long arg)
+{
+	int retval;
+	struct mxt_data *mxt;
+
+	retval = 0;
+	mxt = file->private_data;
+
+	switch (cmd) {
+	case MXT_SET_ADDRESS_IOCTL:
+		retval = mxt_write_ap(mxt, (u16) arg);
+		if (retval >= 0) {
+			mxt->address_pointer = (u16) arg;
+			mxt->valid_ap = 1;
+		}
+		break;
+	case MXT_RESET_IOCTL:
+		retval = mxt_write_byte(mxt->client,
+			      MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) +
+			      MXT_ADR_T6_RESET,
+			      1);
+		break;
+	case MXT_CALIBRATE_IOCTL:
+		retval = mxt_write_byte(mxt->client,
+			      MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) +
+			      MXT_ADR_T6_CALIBRATE,
+			      1);
+
+		break;
+	case MXT_BACKUP_IOCTL:
+		retval = mxt_write_byte(mxt->client,
+			      MXT_BASE_ADDR(MXT_GEN_COMMANDPROCESSOR_T6, mxt) +
+			      MXT_ADR_T6_BACKUPNV,
+			      MXT_CMD_T6_BACKUP);
+		break;
+	case MXT_NONTOUCH_MSG_IOCTL:
+		mxt->nontouch_msg_only = 1;
+		break;
+	case MXT_ALL_MSG_IOCTL:
+		mxt->nontouch_msg_only = 0;
+		break;
+	default:
+		return -EIO;
+	}
+
+	return retval;
+} 
+
+/*
+ * Copies messages from buffer to user space.
+ *
+ * NOTE: if less than (mxt->message_size * 5 + 1) bytes requested,
+ * this will return 0!
+ * 
+ */
+ssize_t mxt_message_read(struct file *file, char *buf, size_t count, 
+			 loff_t *ppos)
+{
+	int i;
+	struct mxt_data *mxt;
+	char *buf_start;
+	
+	mxt = file->private_data;
+	if (mxt == NULL)
+		return -EIO;
+	buf_start = buf;
+
+	mutex_lock(&mxt->msg_mutex);
+	/* Copy messages until buffer empty, or 'count' bytes written */
+	while ((mxt->msg_buffer_startp != mxt->msg_buffer_endp) &&
+		((buf - buf_start) < (count - (5 * mxt->message_size) - 1))){
+
+		for (i = 0; i < mxt->message_size; i++){
+			buf += sprintf(buf, "[%2X] ",
+				*(mxt->messages + mxt->msg_buffer_endp *
+					mxt->message_size + i));
+		}
+		buf += sprintf(buf, "\n");
+		if (mxt->msg_buffer_endp < MXT_MESSAGE_BUFFER_SIZE)
+			mxt->msg_buffer_endp++;
+		else
+			mxt->msg_buffer_endp = 0;
+	}
+	mutex_unlock(&mxt->msg_mutex);
+	return (buf - buf_start);
+}
+
+static struct file_operations mxt_message_fops = {
+	.owner = THIS_MODULE,
+	.open = mxt_message_open,
+	.read = mxt_message_read,
+};
+
+static struct file_operations mxt_memory_fops = {
+	.owner = THIS_MODULE,
+	.open = mxt_memory_open,
+	.read = mxt_memory_read,
+	.write = mxt_memory_write,
+	.unlocked_ioctl = mxt_ioctl,
+};
+
+
+/* Writes the address pointer (to set up following reads). */
+
+int mxt_write_ap(struct mxt_data *mxt, u16 ap)
+{
+	struct i2c_client *client;
+	__le16	le_ap = cpu_to_le16(ap);
+	client = mxt->client;
+	if (mxt != NULL)
+		mxt->last_read_addr = -1;
+	if (i2c_master_send(client, (u8 *) &le_ap, 2) == 2) {
+		mxt_debug(DEBUG_TRACE, "Address pointer set to %d\n", ap);
+		return 0;
+	} else {
+		mxt_debug(DEBUG_INFO, "Error writing address pointer!\n");
+		return -EIO;
+	}
+}
+
+
+
+/* Calculates the 24-bit CRC sum. */
+static u32 CRC_24(u32 crc, u8 byte1, u8 byte2)
+{
+	static const u32 crcpoly = 0x80001B;
+	u32 result;
+	u32 data_word;
+
+	data_word = ((((u16) byte2) << 8u) | byte1);
+	result = ((crc << 1u) ^ data_word);
+	if (result & 0x1000000)
+		result ^= crcpoly;
+	return result;
+}
+
+/* Returns object address in mXT chip, or zero if object is not found */
+static u16 get_object_address(uint8_t object_type,
+			      uint8_t instance,
+			      struct mxt_object *object_table,
+			      int max_objs)
+{
+	uint8_t object_table_index = 0;
+	uint8_t address_found = 0;
+	uint16_t address = 0;
+	struct mxt_object *obj;
+
+	while ((object_table_index < max_objs) && !address_found) {
+		obj = &object_table[object_table_index];
+		if (obj->type == object_type) {
+			address_found = 1;
+			/* Are there enough instances defined in the FW? */
+			if (obj->instances >= instance) {
+				address = obj->chip_addr +
+					  (obj->size + 1) * instance;
+			} else {
+				return 0;
+			}
+		}
+		object_table_index++;
+	}
+	return address;
+}
+
+
+/*
+ * Reads a block of bytes from given address from mXT chip. If we are
+ * reading from message window, and previous read was from message window,
+ * there's no need to write the address pointer: the mXT chip will
+ * automatically set the address pointer back to message window start.
+ */
+
+static int mxt_read_block(struct i2c_client *client,
+		   u16 addr,
+		   u16 length,
+		   u8 *value)
+{
+	struct i2c_adapter *adapter = client->adapter;
+	struct i2c_msg msg[2];
+	__le16	le_addr;
+	struct mxt_data *mxt;
+
+	mxt = i2c_get_clientdata(client);
+
+	if (mxt != NULL) {
+		if ((mxt->last_read_addr == addr) &&
+			(addr == mxt->msg_proc_addr)) {
+			if  (i2c_master_recv(client, value, length) == length)
+				return length;
+			else
+				return -EIO;
+		} else {
+			mxt->last_read_addr = addr;
+		}
+	}
+
+	mxt_debug(DEBUG_TRACE, "Writing address pointer & reading %d bytes "
+		"in on i2c transaction...\n", length); 
+
+	le_addr = cpu_to_le16(addr);
+	msg[0].addr  = client->addr;
+	msg[0].flags = 0x00;
+	msg[0].len   = 2;
+	msg[0].buf   = (u8 *) &le_addr;
+
+	msg[1].addr  = client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len   = length;
+	msg[1].buf   = (u8 *) value;
+	if  (i2c_transfer(adapter, msg, 2) == 2)
+		return length;
+	else
+		return -EIO;
+
+}
+
+/* Reads a block of bytes from current address from mXT chip. */
+
+static int mxt_read_block_wo_addr(struct i2c_client *client,
+			   u16 length,
+			   u8 *value)
+{
+
+
+	if  (i2c_master_recv(client, value, length) == length) {
+		mxt_debug(DEBUG_TRACE, "I2C block read ok\n");
+		return length;
+	} else {
+		mxt_debug(DEBUG_INFO, "I2C block read failed\n");
+		return -EIO;
+	}
+
+}
+
+
+/* Writes one byte to given address in mXT chip. */
+
+static int mxt_write_byte(struct i2c_client *client, u16 addr, u8 value)
+{
+	struct {
+		__le16 le_addr;
+		u8 data;
+
+	} i2c_byte_transfer;
+
+	struct mxt_data *mxt;
+
+	mxt = i2c_get_clientdata(client);
+	if (mxt != NULL)
+		mxt->last_read_addr = -1;
+	i2c_byte_transfer.le_addr = cpu_to_le16(addr);
+	i2c_byte_transfer.data = value;
+	if  (i2c_master_send(client, (u8 *) &i2c_byte_transfer, 3) == 3)
+		return 0;
+	else
+		return -EIO;
+}
+
+
+/* Writes a block of bytes (max 256) to given address in mXT chip. */
+static int mxt_write_block(struct i2c_client *client,
+		    u16 addr,
+		    u16 length,
+		    u8 *value)
+{
+	int i;
+	struct {
+		__le16	le_addr;
+		u8	data[256];
+
+	} i2c_block_transfer;
+
+	struct mxt_data *mxt;
+
+	mxt_debug(DEBUG_TRACE, "Writing %d bytes to %d...", length, addr);
+	if (length > 256)
+		return -EINVAL;
+	mxt = i2c_get_clientdata(client);
+	if (mxt != NULL)
+		mxt->last_read_addr = -1;
+	for (i = 0; i < length; i++)
+		i2c_block_transfer.data[i] = *value++;
+	i2c_block_transfer.le_addr = cpu_to_le16(addr);
+	i = i2c_master_send(client, (u8 *) &i2c_block_transfer, length + 2);
+	if (i == (length + 2))
+		return length;
+	else
+		return -EIO;
+}
+
+/* Calculates the CRC value for mXT infoblock. */
+int calculate_infoblock_crc(u32 *crc_result, u8 *data, int crc_area_size)
+{
+	u32 crc = 0;
+	int i;
+
+	for (i = 0; i < (crc_area_size - 1); i = i + 2)
+		crc = CRC_24(crc, *(data + i), *(data + i + 1));
+	/* If uneven size, pad with zero */
+	if (crc_area_size & 0x0001)
+		crc = CRC_24(crc, *(data + i), 0);
+	/* Return only 24 bits of CRC. */
+	*crc_result = (crc & 0x00FFFFFF);
+
+	return 0;
+}
+
+/* Processes a touchscreen message. */
+void process_T9_message(u8 *message, struct mxt_data *mxt, int last_touch)
+{
+
+	struct	input_dev *input;
+	u8  status;
+	u16 xpos = 0xFFFF;
+	u16 ypos = 0xFFFF;
+	u8  touch_size = 255;
+	u8  touch_number;
+	u8  amplitude;
+	u8  report_id;
+
+	static int stored_size[10];
+	static int stored_x[10];
+	static int stored_y[10];
+	int i;
+	int active_touches = 0;
+	/*
+	 * If the 'last_touch' flag is set, we have received all the touch
+	 * messages
+	 * there are available in this cycle, so send the events for touches 
+	 * that are 
+  	 * active.
+ 	 */ 
+	if (last_touch){
+        /* TODO: For compatibility with single-touch systems, send ABS_X & 
+	 * ABS_Y */
+        /*
+        if (stored_size[0]){
+            input_report_abs(mxt->input, ABS_X, stored_x[0]);
+            input_report_abs(mxt->input, ABS_Y, stored_y[0]);
+        }*/
+
+    
+		for (i = 0; i < 10; i++){
+			if (stored_size[i]){
+				active_touches++;
+				input_report_abs(mxt->input, 
+						ABS_MT_TRACKING_ID,
+						i);
+				input_report_abs(mxt->input,
+						ABS_MT_TOUCH_MAJOR,
+						stored_size[i]);
+				input_report_abs(mxt->input,
+						ABS_MT_POSITION_X,
+						stored_x[i]);
+				input_report_abs(mxt->input,
+						ABS_MT_POSITION_Y,
+						stored_y[i]);
+				input_mt_sync(mxt->input);
+			}
+		}
+		if (active_touches == 0)
+			input_mt_sync(mxt->input);
+		input_sync(mxt->input);
+		
+	}else{	
+
+	input = mxt->input;
+	status = message[MXT_MSG_T9_STATUS];
+	report_id = message[0];
+
+	if (status & MXT_MSGB_T9_SUPPRESS) {
+		/* Touch has been suppressed by grip/face */
+		/* detection                              */
+		mxt_debug(DEBUG_TRACE, "SUPRESS");
+	} else {
+		/* Put together the 10-/12-bit coordinate values. */
+		xpos = message[MXT_MSG_T9_XPOSMSB] * 16 +
+			((message[MXT_MSG_T9_XYPOSLSB] >> 4) & 0xF);
+		ypos = message[MXT_MSG_T9_YPOSMSB] * 16 +
+			((message[MXT_MSG_T9_XYPOSLSB] >> 0) & 0xF);
+
+		if (mxt->max_x_val < 1024)
+			xpos >>= 2;
+		if (mxt->max_y_val < 1024)
+			ypos >>= 2;
+
+		touch_number = message[MXT_MSG_REPORTID] -
+			mxt->rid_map[report_id].first_rid;
+
+		stored_x[touch_number] = xpos;
+		stored_y[touch_number] = ypos;
+
+		if (status & MXT_MSGB_T9_DETECT) {
+			/*
+			 * mXT224 reports the number of touched nodes,
+			 * so the exact value for touch ellipse major
+			 * axis length in nodes would be 2*sqrt(touch_size/pi)
+			 * (assuming round touch shape), which would then need
+			 * to be scaled using information about how many sensor
+			 * lines we do have. So this is very much simplified,
+			 * but sufficient for most if not all apps?
+			 */
+			touch_size = message[MXT_MSG_T9_TCHAREA];
+			touch_size = touch_size >> 2;
+			if (!touch_size)
+				touch_size = 1;
+			/*
+             * report_mt(touch_number, touch_size, xpos, ypos, mxt);
+             */
+
+            stored_size[touch_number] = touch_size;
+
+			if (status & MXT_MSGB_T9_AMP)
+				/* Amplitude of touch has changed */
+				amplitude = message[MXT_MSG_T9_TCHAMPLITUDE];
+		}
+
+		if (status & MXT_MSGB_T9_RELEASE) {
+			/* The previously reported touch has been removed.*/
+			/* report_mt(touch_number, 0, xpos, ypos, mxt); */
+            stored_size[touch_number] = 0;
+		}
+
+		/* input_sync(input); */
+	}
+	
+	if (status & MXT_MSGB_T9_SUPPRESS) {
+		mxt_debug(DEBUG_TRACE, "SUPRESS");
+	} else {
+		if (status & MXT_MSGB_T9_DETECT) {
+			mxt_debug(DEBUG_TRACE, "DETECT:%s%s%s%s", 
+				((status & MXT_MSGB_T9_PRESS) ? " PRESS" : ""), 
+				((status & MXT_MSGB_T9_MOVE) ? " MOVE" : ""), 
+				((status & MXT_MSGB_T9_AMP) ? " AMP" : ""), 
+				((status & MXT_MSGB_T9_VECTOR) ? " VECT" : ""));
+
+		} else if (status & MXT_MSGB_T9_RELEASE) {
+			mxt_debug(DEBUG_TRACE, "RELEASE");
+		}
+	}
+	mxt_debug(DEBUG_TRACE, "X=%d, Y=%d, TOUCHSIZE=%d",
+		xpos, ypos, touch_size);
+    }
+	return;
+}
+
+
+int process_message(u8 *message, u8 object, struct mxt_data *mxt)
+{
+	struct i2c_client *client;
+	u8  status;
+	u16 xpos = 0xFFFF;
+	u16 ypos = 0xFFFF;
+	u8  event;
+	u8  direction;
+	u16 distance;
+	u8  length;
+	u8  report_id;
+	static u8 error_cond = 0; 
+
+	client = mxt->client;
+	length = mxt->message_size;
+	report_id = message[0];
+
+	if ((mxt->nontouch_msg_only == 0) ||
+	    (!IS_TOUCH_OBJECT(object))){
+		mutex_lock(&mxt->msg_mutex);
+		/* Copy the message to buffer */
+		if (mxt->msg_buffer_startp < MXT_MESSAGE_BUFFER_SIZE) {
+			mxt->msg_buffer_startp++;
+		} else {
+			mxt->msg_buffer_startp = 0;
+		}
+		
+		if (mxt->msg_buffer_startp == mxt->msg_buffer_endp) {
+			mxt_debug(DEBUG_TRACE, 
+				  "Message buf full, discarding last entry.\n");
+			if (mxt->msg_buffer_endp < MXT_MESSAGE_BUFFER_SIZE) {
+				mxt->msg_buffer_endp++;
+			} else {
+				mxt->msg_buffer_endp = 0;
+			}
+		}
+		memcpy((mxt->messages + mxt->msg_buffer_startp * length), 
+		       message,
+		       length);
+		mutex_unlock(&mxt->msg_mutex);
+	}
+
+	switch (object) {
+	case MXT_GEN_COMMANDPROCESSOR_T6:
+		status = message[1];
+
+		if (status & MXT_MSGB_T6_COMSERR) {
+			if ((!error_cond) & MXT_MSGB_T6_COMSERR){
+				dev_err(&client->dev,
+					"maXTouch checksum error\n");
+				error_cond |= MXT_MSGB_T6_COMSERR;
+			}
+		}
+		if (status & MXT_MSGB_T6_CFGERR) {
+			/* 
+			 * Configuration error. A proper configuration
+			 * needs to be written to chip and backed up.
+			 */
+			if ((!error_cond) & MXT_MSGB_T6_CFGERR){
+				dev_err(&client->dev,
+					"maXTouch configuration error\n");
+				error_cond |= MXT_MSGB_T6_CFGERR;
+			}
+		}
+		if (status & MXT_MSGB_T6_CAL) {
+			/* Calibration in action, no need to react */
+			dev_dbg(&client->dev,
+				"maXTouch calibration in progress\n");
+		}
+		if (status & MXT_MSGB_T6_SIGERR) {
+			/* 
+			 * Signal acquisition error, something is seriously
+			 * wrong, not much we can in the driver to correct
+			 * this
+			 */
+			if ((!error_cond) & MXT_MSGB_T6_SIGERR){
+				dev_err(&client->dev,
+					"maXTouch acquisition error\n");
+				error_cond |= MXT_MSGB_T6_SIGERR;
+			}
+		}
+		if (status & MXT_MSGB_T6_OFL) {
+			/*
+			 * Cycle overflow, the acquisition interval is too 
+			 * short.
+			 */
+			dev_err(&client->dev,
+				"maXTouch cycle overflow\n");
+		}
+		if (status & MXT_MSGB_T6_RESET) {
+			/* Chip has reseted, no need to react. */
+			dev_dbg(&client->dev,
+				"maXTouch chip reset\n");
+		}
+		if (status == 0) {
+			/* Chip status back to normal. */
+			dev_dbg(&client->dev,
+				"maXTouch status normal\n");
+			error_cond = 0;
+		}
+		break;
+
+	case MXT_TOUCH_MULTITOUCHSCREEN_T9:
+		process_T9_message(message, mxt, 0);
+		break;
+
+	case MXT_SPT_GPIOPWM_T19:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving GPIO message\n");
+		break;
+
+	case MXT_PROCI_GRIPFACESUPPRESSION_T20:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving face suppression msg\n");
+		break;
+
+	case MXT_PROCG_NOISESUPPRESSION_T22:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving noise suppression msg\n");
+		status = message[MXT_MSG_T22_STATUS];
+		if (status & MXT_MSGB_T22_FHCHG) {
+			if (debug >= DEBUG_TRACE)
+				dev_info(&client->dev,
+					"maXTouch: Freq changed\n");
+		}
+		if (status & MXT_MSGB_T22_GCAFERR) {
+			if (debug >= DEBUG_TRACE)
+				dev_info(&client->dev,
+					"maXTouch: High noise "
+					"level\n");
+		}
+		if (status & MXT_MSGB_T22_FHERR) {
+			if (debug >= DEBUG_TRACE)
+				dev_info(&client->dev,
+					"maXTouch: Freq changed - "
+					"Noise level too high\n");
+		}
+		break;
+
+	case MXT_PROCI_ONETOUCHGESTUREPROCESSOR_T24:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving one-touch gesture msg\n");
+
+		event = message[MXT_MSG_T24_STATUS] & 0x0F;
+		xpos = message[MXT_MSG_T24_XPOSMSB] * 16 +
+			((message[MXT_MSG_T24_XYPOSLSB] >> 4) & 0x0F);
+		ypos = message[MXT_MSG_T24_YPOSMSB] * 16 +
+			((message[MXT_MSG_T24_XYPOSLSB] >> 0) & 0x0F);
+		if (mxt->max_x_val < 1024)
+			xpos >>= 2;
+		if (mxt->max_y_val < 1024)
+			ypos >>= 2;
+		direction = message[MXT_MSG_T24_DIR];
+		distance = message[MXT_MSG_T24_DIST] +
+			   (message[MXT_MSG_T24_DIST + 1] << 16);
+
+		report_gesture((event << 24) | (direction << 16) | distance,
+			mxt);
+		report_gesture((xpos << 16) | ypos, mxt);
+
+		break;
+
+	case MXT_SPT_SELFTEST_T25:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving Self-Test msg\n");
+
+		if (message[MXT_MSG_T25_STATUS] == MXT_MSGR_T25_OK) {
+			if (debug >= DEBUG_TRACE)
+				dev_info(&client->dev,
+					"maXTouch: Self-Test OK\n");
+
+		} else  {
+			dev_err(&client->dev,
+				"maXTouch: Self-Test Failed [%02x]:"
+				"{%02x,%02x,%02x,%02x,%02x}\n",
+				message[MXT_MSG_T25_STATUS],
+				message[MXT_MSG_T25_STATUS + 0],
+				message[MXT_MSG_T25_STATUS + 1],
+				message[MXT_MSG_T25_STATUS + 2],
+				message[MXT_MSG_T25_STATUS + 3],
+				message[MXT_MSG_T25_STATUS + 4]
+				);
+		}
+		break;
+
+	case MXT_PROCI_TWOTOUCHGESTUREPROCESSOR_T27:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving 2-touch gesture message\n");
+
+		event = message[MXT_MSG_T27_STATUS] & 0xF0;
+		xpos = message[MXT_MSG_T27_XPOSMSB] * 16 +
+			((message[MXT_MSG_T27_XYPOSLSB] >> 4) & 0x0F);
+		ypos = message[MXT_MSG_T27_YPOSMSB] * 16 +
+			((message[MXT_MSG_T27_XYPOSLSB] >> 0) & 0x0F);
+		if (mxt->max_x_val < 1024)
+			xpos >>= 2;
+		if (mxt->max_y_val < 1024)
+			ypos >>= 2;
+
+		direction = message[MXT_MSG_T27_ANGLE];
+		distance = message[MXT_MSG_T27_SEPARATION] +
+			   (message[MXT_MSG_T27_SEPARATION + 1] << 16);
+
+		report_gesture((event << 24) | (direction << 16) | distance,
+			mxt);
+		report_gesture((xpos << 16) | ypos, mxt);
+
+
+		break;
+
+	case MXT_SPT_CTECONFIG_T28:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"Receiving CTE message...\n");
+		status = message[MXT_MSG_T28_STATUS];
+		if (status & MXT_MSGB_T28_CHKERR)
+			dev_err(&client->dev,
+				"maXTouch: Power-Up CRC failure\n");
+
+		break;
+	default:
+		if (debug >= DEBUG_TRACE)
+			dev_info(&client->dev,
+				"maXTouch: Unknown message!\n");
+
+		break;
+	}
+
+	return 0;
+}
+
+
+/*
+ * Processes messages when the interrupt line (CHG) is asserted. Keeps
+ * reading messages until a message with report ID 0xFF is received,
+ * which indicates that there is no more new messages.
+ *
+ */
+
+static void mxt_worker(struct work_struct *work)
+{
+	struct	mxt_data *mxt;
+	struct	i2c_client *client;
+
+	u8	*message;
+	u16	message_length;
+	u16	message_addr;
+	u8	report_id;
+	u8	object;
+	int	error;
+	int	i;
+	char    *message_string;
+	char    *message_start;
+
+	message = NULL;
+	mxt = container_of(work, struct mxt_data, dwork.work);
+	disable_irq(mxt->irq);
+	client = mxt->client;
+	message_addr = 	mxt->msg_proc_addr;
+	message_length = mxt->message_size;
+
+	if (message_length < 256) {
+		message = kmalloc(message_length, GFP_KERNEL);
+		if (message == NULL) {
+			dev_err(&client->dev, "Error allocating memory\n");
+			return;
+		}
+	} else {
+		dev_err(&client->dev,
+			"Message length larger than 256 bytes not supported\n");
+		return;
+	}
+
+	mxt_debug(DEBUG_TRACE, "maXTouch worker active: \n");
+	
+	do {
+		/* Read next message, reread on failure. */
+        /* TODO: message length, CRC included? */
+		mxt->message_counter++;
+		for (i = 1; i < I2C_RETRY_COUNT; i++) {
+			error = mxt_read_block(client,
+					       message_addr,
+					       message_length - 1,
+					       message);
+			if (error >= 0)
+				break;
+			mxt->read_fail_counter++;
+			dev_err(&client->dev,
+				"Failure reading maxTouch device\n");
+		}
+		if (error < 0) {
+			kfree(message);
+			return;
+		}
+		
+		if (mxt->address_pointer != message_addr)
+			mxt->valid_ap = 0;
+		report_id = message[0];
+
+		if (debug >= DEBUG_RAW) {
+			mxt_debug(DEBUG_RAW, "%s message [msg count: %08x]:",
+				  REPORT_ID_TO_OBJECT_NAME(report_id, mxt),
+				  mxt->message_counter
+			);
+			/* 5 characters per one byte */
+			message_string = kmalloc(message_length * 5, 
+						 GFP_KERNEL);
+			if (message_string == NULL) {
+				dev_err(&client->dev, 
+					"Error allocating memory\n");
+				kfree(message);
+				return;
+			}
+			message_start = message_string;
+			for (i = 0; i < message_length; i++) {
+				message_string += 
+					sprintf(message_string, 
+						"0x%02X ", message[i]);
+			}
+			mxt_debug(DEBUG_RAW, "%s", message_start);
+			kfree(message_start);
+		}
+		
+		if ((report_id != MXT_END_OF_MESSAGES) && (report_id != 0)) {
+			memcpy(mxt->last_message, message, message_length);
+			mxt->new_msgs = 1;
+			smp_wmb();
+			/* Get type of object and process the message */
+			object = mxt->rid_map[report_id].object;
+			process_message(message, object, mxt);
+		}
+
+		mxt_debug(DEBUG_TRACE, "chgline: %d\n", mxt->read_chg());
+	} while (comms ? (mxt->read_chg() == 0) : 
+		((report_id != MXT_END_OF_MESSAGES) && (report_id != 0)));
+
+	/* All messages processed, send the events) */
+	process_T9_message(NULL, mxt, 1);
+
+
+	kfree(message);
+	enable_irq(mxt->irq);
+	/* Make sure we just didn't miss a interrupt. */
+	if (mxt->read_chg() == 0){
+		schedule_delayed_work(&mxt->dwork, 0);
+	}
+
+}
+
+
+/*
+ * The maXTouch device will signal the host about a new message by asserting
+ * the CHG line. This ISR schedules a worker routine to read the message when
+ * that happens.
+ */
+
+static irqreturn_t mxt_irq_handler(int irq, void *_mxt)
+{
+	struct mxt_data *mxt = _mxt;
+
+	mxt->irq_counter++;
+	if (mxt->valid_interrupt()) {
+		/* Send the signal only if falling edge generated the irq. */
+		cancel_delayed_work(&mxt->dwork);
+		schedule_delayed_work(&mxt->dwork, 0);
+		mxt->valid_irq_counter++;
+	} else {
+		mxt->invalid_irq_counter++;
+		return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+}
+
+
+
+/******************************************************************************/
+/* Initialization of driver                                                   */
+/******************************************************************************/
+
+static int __devinit mxt_identify(struct i2c_client *client,
+				  struct mxt_data *mxt,
+				  u8 *id_block_data)
+{
+	u8 buf[MXT_ID_BLOCK_SIZE];
+	int error;
+	int identified;
+
+	identified = 0;
+
+	/* Read Device info to check if chip is valid */
+	error = mxt_read_block(client, MXT_ADDR_INFO_BLOCK, MXT_ID_BLOCK_SIZE,
+			       (u8 *) buf);
+
+	if (error < 0) {
+		mxt->read_fail_counter++;
+		dev_err(&client->dev, "Failure accessing maXTouch device\n");
+		return -EIO;
+	}
+
+	memcpy(id_block_data, buf, MXT_ID_BLOCK_SIZE);
+
+	mxt->device_info.family_id  = buf[0];
+	mxt->device_info.variant_id = buf[1];
+	mxt->device_info.major	    = ((buf[2] >> 4) & 0x0F);
+	mxt->device_info.minor      = (buf[2] & 0x0F);
+	mxt->device_info.build	    = buf[3];
+	mxt->device_info.x_size	    = buf[4];
+	mxt->device_info.y_size	    = buf[5];
+	mxt->device_info.num_objs   = buf[6];
+	mxt->device_info.num_nodes  = mxt->device_info.x_size *
+				      mxt->device_info.y_size;
+
+	/*
+         * Check Family & Variant Info; warn if not recognized but
+         * still continue.
+         */
+
+	/* MXT224 */
+	if (mxt->device_info.family_id == MXT224_FAMILYID) {
+		strcpy(mxt->device_info.family_name, "mXT224");
+
+		if (mxt->device_info.variant_id == MXT224_CAL_VARIANTID) {
+			strcpy(mxt->device_info.variant_name, "Calibrated");
+		} else if (mxt->device_info.variant_id == 
+			MXT224_UNCAL_VARIANTID) {
+			strcpy(mxt->device_info.variant_name, "Uncalibrated");
+		} else {
+			dev_err(&client->dev,
+				"Warning: maXTouch Variant ID [%d] not "
+				"supported\n",
+				mxt->device_info.variant_id);
+			strcpy(mxt->device_info.variant_name, "UNKNOWN");
+			/* identified = -ENXIO; */
+		}
+
+	/* MXT1386 */
+	} else if (mxt->device_info.family_id == MXT1386_FAMILYID) {
+		strcpy(mxt->device_info.family_name, "mXT1386");
+
+		if (mxt->device_info.variant_id == MXT1386_CAL_VARIANTID) {
+			strcpy(mxt->device_info.variant_name, "Calibrated");
+		} else {
+			dev_err(&client->dev,
+				"Warning: maXTouch Variant ID [%d] not "
+				"supported\n",
+				mxt->device_info.variant_id);
+			strcpy(mxt->device_info.variant_name, "UNKNOWN");
+			/* identified = -ENXIO; */
+		}
+	/* Unknown family ID! */
+	} else {
+		dev_err(&client->dev,
+			"Warning: maXTouch Family ID [%d] not supported\n",
+			mxt->device_info.family_id);
+		strcpy(mxt->device_info.family_name, "UNKNOWN");
+		strcpy(mxt->device_info.variant_name, "UNKNOWN");
+		/* identified = -ENXIO; */
+	}
+
+	dev_info(
+		&client->dev,
+		"Atmel maXTouch (Family %s (%X), Variant %s (%X)) Firmware "
+		"version [%d.%d] Build %d\n",
+		mxt->device_info.family_name,
+		mxt->device_info.family_id,
+		mxt->device_info.variant_name,
+		mxt->device_info.variant_id,
+		mxt->device_info.major,
+		mxt->device_info.minor,
+		mxt->device_info.build
+	);
+	dev_dbg(
+		&client->dev,
+		"Atmel maXTouch Configuration "
+		"[X: %d] x [Y: %d]\n",
+		mxt->device_info.x_size,
+		mxt->device_info.y_size
+	);
+	return identified;
+}
+
+/*
+ * Reads the object table from maXTouch chip to get object data like
+ * address, size, report id. For Info Block CRC calculation, already read
+ * id data is passed to this function too (Info Block consists of the ID
+ * block and object table).
+ *
+ */
+static int __devinit mxt_read_object_table(struct i2c_client *client,
+					   struct mxt_data *mxt,
+					   u8 *raw_id_data)
+{
+	u16	report_id_count;
+	u8	buf[MXT_OBJECT_TABLE_ELEMENT_SIZE];
+	u8      *raw_ib_data;
+	u8	object_type;
+	u16	object_address;
+	u16	object_size;
+	u8	object_instances;
+	u8	object_report_ids;
+	u16	object_info_address;
+	u32	crc;
+	u32     calculated_crc;
+	int	i;
+	int	error;
+
+	u8	object_instance;
+	u8	object_report_id;
+	u8	report_id;
+	int     first_report_id;
+	int     ib_pointer;
+	struct mxt_object *object_table;
+
+	mxt_debug(DEBUG_TRACE, "maXTouch driver reading configuration\n");
+
+	object_table = kzalloc(sizeof(struct mxt_object) *
+			       mxt->device_info.num_objs,
+			       GFP_KERNEL);
+	if (object_table == NULL) {
+		printk(KERN_WARNING "maXTouch: Memory allocation failed!\n");
+		error = -ENOMEM;
+		goto err_object_table_alloc;
+	}
+
+	raw_ib_data = kmalloc(MXT_OBJECT_TABLE_ELEMENT_SIZE *
+			mxt->device_info.num_objs + MXT_ID_BLOCK_SIZE,
+			GFP_KERNEL);
+	if (raw_ib_data == NULL) {
+		printk(KERN_WARNING "maXTouch: Memory allocation failed!\n");
+		error = -ENOMEM;
+		goto err_ib_alloc;
+	}
+
+	/* Copy the ID data for CRC calculation. */
+	memcpy(raw_ib_data, raw_id_data, MXT_ID_BLOCK_SIZE);
+	ib_pointer = MXT_ID_BLOCK_SIZE;
+
+	mxt->object_table = object_table;
+
+	mxt_debug(DEBUG_TRACE, "maXTouch driver Memory allocated\n");
+
+	object_info_address = MXT_ADDR_OBJECT_TABLE;
+
+	report_id_count = 0;
+	for (i = 0; i < mxt->device_info.num_objs; i++) {
+		mxt_debug(DEBUG_TRACE, "Reading maXTouch at [0x%04x]: ",
+			  object_info_address);
+
+		error = mxt_read_block(client, object_info_address,
+				       MXT_OBJECT_TABLE_ELEMENT_SIZE, buf);
+
+		if (error < 0) {
+			mxt->read_fail_counter++;
+			dev_err(&client->dev,
+				"maXTouch Object %d could not be read\n", i);
+			error = -EIO;
+			goto err_object_read;
+		}
+
+		memcpy(raw_ib_data + ib_pointer, buf, 
+		       MXT_OBJECT_TABLE_ELEMENT_SIZE);
+		ib_pointer += MXT_OBJECT_TABLE_ELEMENT_SIZE;
+
+		object_type       =  buf[0];
+		object_address    = (buf[2] << 8) + buf[1];
+		object_size       =  buf[3] + 1;
+		object_instances  =  buf[4] + 1;
+		object_report_ids =  buf[5];
+		mxt_debug(DEBUG_TRACE, "Type=%03d, Address=0x%04x, "
+			  "Size=0x%02x, %d instances, %d report id's\n",
+			  object_type,
+			  object_address,
+			  object_size,
+			  object_instances,
+			  object_report_ids
+		);
+
+		if (object_type == 38)
+			t38_size = object_size;
+		/* TODO: check whether object is known and supported? */
+		
+		/* Save frequently needed info. */
+		if (object_type == MXT_GEN_MESSAGEPROCESSOR_T5) {
+			mxt->msg_proc_addr = object_address;
+			mxt->message_size = object_size;
+		}
+
+		object_table[i].type            = object_type;
+		object_table[i].chip_addr       = object_address;
+		object_table[i].size            = object_size;
+		object_table[i].instances       = object_instances;
+		object_table[i].num_report_ids  = object_report_ids;
+		report_id_count += object_instances * object_report_ids;
+
+		object_info_address += MXT_OBJECT_TABLE_ELEMENT_SIZE;
+	}
+
+	mxt->rid_map =
+		kzalloc(sizeof(struct report_id_map) * (report_id_count + 1),
+			/* allocate for report_id 0, even if not used */
+			GFP_KERNEL);
+	if (mxt->rid_map == NULL) {
+		printk(KERN_WARNING "maXTouch: Can't allocate memory!\n");
+		error = -ENOMEM;
+		goto err_rid_map_alloc;
+	}
+
+	mxt->messages = kzalloc(mxt->message_size * MXT_MESSAGE_BUFFER_SIZE,
+				GFP_KERNEL);
+	if (mxt->messages == NULL) {
+		printk(KERN_WARNING "maXTouch: Can't allocate memory!\n");
+		error = -ENOMEM;
+		goto err_msg_alloc;
+	}
+
+	mxt->last_message = kzalloc(mxt->message_size, GFP_KERNEL);
+	if (mxt->last_message == NULL) {
+		printk(KERN_WARNING "maXTouch: Can't allocate memory!\n");
+		error = -ENOMEM;
+		goto err_msg_alloc;
+	}
+
+	mxt->report_id_count = report_id_count;
+	if (report_id_count > 254) {	/* 0 & 255 are reserved */
+			dev_err(&client->dev,
+				"Too many maXTouch report id's [%d]\n",
+				report_id_count);
+			error = -ENXIO;
+			goto err_max_rid;
+	}
+
+	/* Create a mapping from report id to object type */
+	report_id = 1; /* Start from 1, 0 is reserved. */
+
+	/* Create table associating report id's with objects & instances */
+	for (i = 0; i < mxt->device_info.num_objs; i++) {
+		for (object_instance = 0;
+		     object_instance < object_table[i].instances;
+		     object_instance++){
+			first_report_id = report_id;
+			for (object_report_id = 0;
+			     object_report_id < object_table[i].num_report_ids;
+			     object_report_id++) {
+				mxt->rid_map[report_id].object =
+					object_table[i].type;
+				mxt->rid_map[report_id].instance =
+					object_instance;
+				mxt->rid_map[report_id].first_rid =
+					first_report_id;
+				report_id++;
+			}
+		}
+	}
+
+	/* Read 3 byte CRC */
+	error = mxt_read_block(client, object_info_address, 3, buf);
+	if (error < 0) {
+		mxt->read_fail_counter++;
+		dev_err(&client->dev, "Error reading CRC\n");
+	}
+
+	crc = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+	if (calculate_infoblock_crc(&calculated_crc, raw_ib_data,
+				    ib_pointer)) {
+		printk(KERN_WARNING "Error while calculating CRC!\n");
+		calculated_crc = 0;
+	}
+	kfree(raw_ib_data);
+
+	mxt_debug(DEBUG_TRACE, "\nReported info block CRC = 0x%6X\n", crc);
+	mxt_debug(DEBUG_TRACE, "Calculated info block CRC = 0x%6X\n\n",
+		       calculated_crc);
+	
+	if (crc == calculated_crc) {
+		mxt->info_block_crc = crc;
+	} else {
+		mxt->info_block_crc = 0;
+		printk(KERN_ALERT "maXTouch: Info block CRC invalid!\n");
+	}
+
+	if (debug >= DEBUG_VERBOSE) {
+
+		dev_info(&client->dev, "maXTouch: %d Objects\n",
+				mxt->device_info.num_objs);
+
+		for (i = 0; i < mxt->device_info.num_objs; i++) {
+			dev_info(&client->dev, "Type:\t\t\t[%d]: %s\n",
+				 object_table[i].type,
+				 object_type_name[object_table[i].type]);
+			dev_info(&client->dev, "\tAddress:\t0x%04X\n",
+				object_table[i].chip_addr);
+			dev_info(&client->dev, "\tSize:\t\t%d Bytes\n",
+				 object_table[i].size);
+			dev_info(&client->dev, "\tInstances:\t%d\n",
+				 object_table[i].instances);
+			dev_info(&client->dev, "\tReport Id's:\t%d\n",
+				 object_table[i].num_report_ids);
+		}
+	}
+
+	return 0;
+
+
+err_max_rid:
+	kfree(mxt->last_message);
+err_msg_alloc:
+	kfree(mxt->rid_map);
+err_rid_map_alloc:
+err_object_read:
+	kfree(raw_ib_data);
+err_ib_alloc:
+	kfree(object_table);
+err_object_table_alloc:
+	return error;
+}
+
+#if defined(CONFIG_PM)
+static int mxt_suspend(struct device *dev)
+{
+	struct mxt_data *mxt = dev_get_drvdata(dev);
+	int error, i;
+	u8 t7_deepsl_data[T7_DATA_SIZE];
+	u16 t7_addr;
+
+	if (device_may_wakeup(dev)) {
+		enable_irq_wake(mxt->irq);
+		return 0;
+	}
+
+	disable_irq(mxt->irq);
+
+	flush_delayed_work_sync(&mxt->dwork);
+
+	for (i = 0; i < T7_DATA_SIZE; i++)
+		t7_deepsl_data[i] = 0;
+
+	t7_addr = MXT_BASE_ADDR(MXT_GEN_POWERCONFIG_T7, mxt);
+	/* save current power state values */
+	error = mxt_read_block(mxt->client, t7_addr,
+			ARRAY_SIZE(mxt->t7_data), mxt->t7_data);
+	if (error < 0)
+		goto err_enable_irq;
+
+	/* configure deep sleep mode */
+	error = mxt_write_block(mxt->client, t7_addr,
+			ARRAY_SIZE(t7_deepsl_data), t7_deepsl_data);
+	if (error < 0)
+		goto err_enable_irq;
+
+	/* power off the device */
+	if (mxt->power_on) {
+		error = mxt->power_on(false);
+		if (error) {
+			dev_err(dev, "power off failed");
+			goto err_write_block;
+		}
+	}
+	mxt->is_suspended = true;
+	return 0;
+
+err_write_block:
+	mxt_write_block(mxt->client, t7_addr,
+			ARRAY_SIZE(mxt->t7_data), mxt->t7_data);
+err_enable_irq:
+	enable_irq(mxt->irq);
+	return error;
+}
+
+static int mxt_resume(struct device *dev)
+{
+	struct mxt_data *mxt = dev_get_drvdata(dev);
+	int error;
+	u16 t7_addr;
+
+	if (device_may_wakeup(dev)) {
+		disable_irq_wake(mxt->irq);
+		return 0;
+	}
+
+	if (!mxt->is_suspended)
+		return 0;
+
+	/* power on the device */
+	if (mxt->power_on) {
+		error = mxt->power_on(true);
+		if (error) {
+			dev_err(dev, "power on failed");
+			return error;
+		}
+	}
+
+	t7_addr = MXT_BASE_ADDR(MXT_GEN_POWERCONFIG_T7, mxt);
+	/* restore the old power state values */
+	error = mxt_write_block(mxt->client, t7_addr,
+			ARRAY_SIZE(mxt->t7_data), mxt->t7_data);
+	if (error < 0)
+		goto err_write_block;
+
+	enable_irq(mxt->irq);
+
+	mxt->is_suspended = false;
+
+	/* Make sure we just didn't miss a interrupt. */
+	if (mxt->read_chg() == 0)
+		schedule_delayed_work(&mxt->dwork, 0);
+
+	return 0;
+
+err_write_block:
+	if (mxt->power_on)
+		mxt->power_on(false);
+	return error;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static void mxt_early_suspend(struct early_suspend *h)
+{
+	struct mxt_data *mxt = container_of(h, struct mxt_data, early_suspend);
+
+	mxt_suspend(&mxt->client->dev);
+}
+
+static void mxt_late_resume(struct early_suspend *h)
+{
+	struct mxt_data *mxt = container_of(h, struct mxt_data, early_suspend);
+
+	mxt_resume(&mxt->client->dev);
+}
+#endif
+
+static const struct dev_pm_ops mxt_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= mxt_suspend,
+	.resume		= mxt_resume,
+#endif
+};
+#endif
+
+static int __devinit mxt_probe(struct i2c_client *client,
+			       const struct i2c_device_id *id)
+{
+	struct mxt_data          *mxt;
+	struct mxt_platform_data *pdata;
+	struct input_dev         *input;
+	u8 *id_data;
+	u8 *t38_data;
+	u16 t38_addr;
+	int error;
+
+	mxt_debug(DEBUG_INFO, "mXT224: mxt_probe\n");
+
+	if (client == NULL) {
+		pr_debug("maXTouch: client == NULL\n");
+		return	-EINVAL;
+	} else if (client->adapter == NULL) {
+		pr_debug("maXTouch: client->adapter == NULL\n");
+		return	-EINVAL;
+	} else if (&client->dev == NULL) {
+		pr_debug("maXTouch: client->dev == NULL\n");
+		return	-EINVAL;
+	} else if (&client->adapter->dev == NULL) {
+		pr_debug("maXTouch: client->adapter->dev == NULL\n");
+		return	-EINVAL;
+	} else if (id == NULL) {
+		pr_debug("maXTouch: id == NULL\n");
+		return	-EINVAL;
+	}
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	error = pm_runtime_set_active(&client->dev);
+	if (error < 0)
+		dev_dbg(&client->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&client->dev);
+
+	mxt_debug(DEBUG_INFO, "maXTouch driver v. %s\n", DRIVER_VERSION);
+	mxt_debug(DEBUG_INFO, "\t \"%s\"\n", client->name);
+	mxt_debug(DEBUG_INFO, "\taddr:\t0x%04x\n", client->addr);
+	mxt_debug(DEBUG_INFO, "\tirq:\t%d\n", client->irq);
+	mxt_debug(DEBUG_INFO, "\tflags:\t0x%04x\n", client->flags);
+	mxt_debug(DEBUG_INFO, "\tadapter:\"%s\"\n", client->adapter->name);
+	mxt_debug(DEBUG_INFO, "\tdevice:\t\"%s\"\n", client->dev.init_name);
+
+	mxt_debug(DEBUG_TRACE, "maXTouch driver functionality OK\n");
+
+	/* Allocate structure - we need it to identify device */
+	mxt = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
+	if (mxt == NULL) {
+		dev_err(&client->dev, "insufficient memory\n");
+		error = -ENOMEM;
+		goto err_mxt_alloc;
+	}
+
+	id_data = kmalloc(MXT_ID_BLOCK_SIZE, GFP_KERNEL);
+	if (id_data == NULL) {
+		dev_err(&client->dev, "insufficient memory\n");
+		error = -ENOMEM;
+		goto err_id_alloc;
+	}
+
+	input = input_allocate_device();
+	if (!input) {
+		dev_err(&client->dev, "error allocating input device\n");
+		error = -ENOMEM;
+		goto err_input_dev_alloc;
+	}
+
+	/* Initialize Platform data */
+
+	pdata = client->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&client->dev, "platform data is required!\n");
+		error = -EINVAL;
+		goto err_pdata;
+	}
+	if (debug >= DEBUG_TRACE)
+		printk(KERN_INFO "Platform OK: pdata = 0x%08x\n",
+		       (unsigned int) pdata);
+
+	mxt->is_suspended = false;
+	mxt->read_fail_counter = 0;
+	mxt->message_counter   = 0;
+
+	if (pdata->min_x)
+		mxt->min_x_val = pdata->min_x;
+	else
+		mxt->min_x_val = 0;
+
+	if (pdata->min_y)
+		mxt->min_y_val = pdata->min_y;
+	else
+		mxt->min_y_val = 0;
+
+	mxt->max_x_val         = pdata->max_x;
+	mxt->max_y_val         = pdata->max_y;
+
+	/* Get data that is defined in board specific code. */
+	mxt->init_hw = pdata->init_platform_hw;
+	mxt->exit_hw = pdata->exit_platform_hw;
+	mxt->power_on = pdata->power_on;
+	mxt->read_chg = pdata->read_chg;
+
+	if (pdata->valid_interrupt != NULL)
+		mxt->valid_interrupt = pdata->valid_interrupt;
+	else
+		mxt->valid_interrupt = mxt_valid_interrupt_dummy;
+
+	if (mxt->init_hw) {
+		error = mxt->init_hw(client);
+		if (error) {
+			dev_err(&client->dev, "hw init failed");
+			goto err_init_hw;
+		}
+	}
+
+	/* power on the device */
+	if (mxt->power_on) {
+		error = mxt->power_on(true);
+		if (error) {
+			dev_err(&client->dev, "power on failed");
+			goto err_pwr_on;
+		}
+	}
+
+	if (debug >= DEBUG_TRACE)
+		printk(KERN_INFO "maXTouch driver identifying chip\n");
+
+	if (mxt_identify(client, mxt, id_data) < 0) {
+		dev_err(&client->dev, "Chip could not be identified\n");
+		error = -ENODEV;
+		goto err_identify;
+	}
+	/* Chip is valid and active. */
+	if (debug >= DEBUG_TRACE)
+		printk(KERN_INFO "maXTouch driver allocating input device\n");
+
+	mxt->client = client;
+	mxt->input  = input;
+
+	INIT_DELAYED_WORK(&mxt->dwork, mxt_worker);
+	mutex_init(&mxt->debug_mutex);
+	mutex_init(&mxt->msg_mutex);
+	mxt_debug(DEBUG_TRACE, "maXTouch driver creating device name\n");
+
+	snprintf(
+		mxt->phys_name,
+		sizeof(mxt->phys_name),
+		"%s/input0",
+		dev_name(&client->dev)
+	);
+	input->name = "Atmel maXTouch Touchscreen controller";
+	input->phys = mxt->phys_name;
+	input->id.bustype = BUS_I2C;
+	input->dev.parent = &client->dev;
+
+	mxt_debug(DEBUG_INFO, "maXTouch name: \"%s\"\n", input->name);
+	mxt_debug(DEBUG_INFO, "maXTouch phys: \"%s\"\n", input->phys);
+	mxt_debug(DEBUG_INFO, "maXTouch driver setting abs parameters\n");
+	
+	set_bit(BTN_TOUCH, input->keybit);
+
+	/* Single touch */
+	input_set_abs_params(input, ABS_X, mxt->min_x_val,
+				mxt->max_x_val, 0, 0);
+	input_set_abs_params(input, ABS_Y, mxt->min_y_val,
+				mxt->max_y_val, 0, 0);
+	input_set_abs_params(input, ABS_PRESSURE, 0, MXT_MAX_REPORTED_PRESSURE,
+			     0, 0);
+	input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MXT_MAX_REPORTED_WIDTH,
+			     0, 0);
+
+	/* Multitouch */
+	input_set_abs_params(input, ABS_MT_POSITION_X, mxt->min_x_val,
+				mxt->max_x_val, 0, 0);
+	input_set_abs_params(input, ABS_MT_POSITION_Y, mxt->min_y_val,
+				mxt->max_y_val, 0, 0);
+	input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MXT_MAX_TOUCH_SIZE,
+			     0, 0);
+	input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, MXT_MAX_NUM_TOUCHES,
+			     0, 0);
+	
+	__set_bit(EV_ABS, input->evbit);
+	__set_bit(EV_SYN, input->evbit);
+	__set_bit(EV_KEY, input->evbit);
+	__set_bit(EV_MSC, input->evbit);
+	input->mscbit[0] = BIT_MASK(MSC_GESTURE);
+
+	mxt_debug(DEBUG_TRACE, "maXTouch driver setting client data\n");
+	i2c_set_clientdata(client, mxt);
+	mxt_debug(DEBUG_TRACE, "maXTouch driver setting drv data\n");
+	input_set_drvdata(input, mxt);
+	mxt_debug(DEBUG_TRACE, "maXTouch driver input register device\n");
+	error = input_register_device(mxt->input);
+	if (error < 0) {
+		dev_err(&client->dev,
+			"Failed to register input device\n");
+		goto err_register_device;
+	}
+
+	error = mxt_read_object_table(client, mxt, id_data);
+	if (error < 0)
+		goto err_read_ot;
+
+
+	/* Create debugfs entries. */
+	mxt->debug_dir = debugfs_create_dir("maXTouch", NULL);
+	if (mxt->debug_dir == ERR_PTR(-ENODEV)) {
+		/* debugfs is not enabled. */
+		printk(KERN_WARNING "debugfs not enabled in kernel\n");
+	} else if (mxt->debug_dir == NULL) {
+		printk(KERN_WARNING "error creating debugfs dir\n");
+	} else {
+		mxt_debug(DEBUG_TRACE, "created \"maXTouch\" debugfs dir\n");
+		
+		debugfs_create_file("deltas", S_IRUSR, mxt->debug_dir, mxt, 
+				    &delta_fops);
+		debugfs_create_file("refs", S_IRUSR, mxt->debug_dir, mxt,
+				    &refs_fops);
+	}
+
+        /* Create character device nodes for reading & writing registers */
+	mxt->mxt_class = class_create(THIS_MODULE, "maXTouch_memory");
+	if (IS_ERR(mxt->mxt_class)){
+	  printk(KERN_WARNING "class create failed! exiting...");
+	  goto err_class_create;
+	  
+	}
+	/* 2 numbers; one for memory and one for messages */
+	error = alloc_chrdev_region(&mxt->dev_num, 0, 2, 
+				    "maXTouch_memory");
+	mxt_debug(DEBUG_VERBOSE, 
+		"device number %d allocated!\n", MAJOR(mxt->dev_num));
+	if (error){
+		printk(KERN_WARNING "Error registering device\n");
+	}
+	cdev_init(&mxt->cdev, &mxt_memory_fops);
+	cdev_init(&mxt->cdev_messages, &mxt_message_fops);
+	
+	mxt_debug(DEBUG_VERBOSE, "cdev initialized\n");
+	mxt->cdev.owner = THIS_MODULE;
+	mxt->cdev_messages.owner = THIS_MODULE;
+	
+	error = cdev_add(&mxt->cdev, mxt->dev_num, 1);
+	if (error){
+		printk(KERN_WARNING "Bad cdev\n");
+	}
+	
+	error = cdev_add(&mxt->cdev_messages, mxt->dev_num + 1, 1);
+	if (error){
+		printk(KERN_WARNING "Bad cdev\n");
+	}
+	
+	mxt_debug(DEBUG_VERBOSE, "cdev added\n");
+	
+	device_create(mxt->mxt_class, NULL, MKDEV(MAJOR(mxt->dev_num), 0), NULL,
+		"maXTouch");
+
+	device_create(mxt->mxt_class, NULL, MKDEV(MAJOR(mxt->dev_num), 1), NULL,
+		"maXTouch_messages");
+
+	mxt->msg_buffer_startp = 0;
+	mxt->msg_buffer_endp = 0;
+
+	/* Allocate the interrupt */
+	mxt_debug(DEBUG_TRACE, "maXTouch driver allocating interrupt...\n");
+	mxt->irq = client->irq;
+	mxt->valid_irq_counter = 0;
+	mxt->invalid_irq_counter = 0;
+	mxt->irq_counter = 0;
+	if (mxt->irq) {
+		/* Try to request IRQ with falling edge first. This is
+		 * not always supported. If it fails, try with any edge. */
+		error = request_irq(mxt->irq,
+				    mxt_irq_handler,
+				    IRQF_TRIGGER_FALLING,
+				    client->dev.driver->name,
+				    mxt);
+		if (error < 0) {
+			/* TODO: why only 0 works on STK1000? */
+			error = request_irq(mxt->irq,
+					    mxt_irq_handler,
+					    0,
+					    client->dev.driver->name,
+					    mxt);
+		}
+
+		if (error < 0) {
+			dev_err(&client->dev,
+				"failed to allocate irq %d\n", mxt->irq);
+			goto err_irq;
+		}
+	}
+
+        if (debug > DEBUG_INFO)
+		dev_info(&client->dev, "touchscreen, irq %d\n", mxt->irq);
+		
+	t38_data = kmalloc(t38_size*sizeof(u8), GFP_KERNEL);
+
+	if (t38_data == NULL) {
+		dev_err(&client->dev, "insufficient memory\n");
+		error = -ENOMEM;
+		goto err_t38;
+	}
+
+	t38_addr = MXT_BASE_ADDR(MXT_USER_INFO_T38, mxt);
+	mxt_read_block(client, t38_addr, t38_size, t38_data);
+	dev_info(&client->dev, "VERSION:%02x.%02x.%02x, DATE: %d/%d/%d\n",
+		t38_data[0], t38_data[1], t38_data[2],
+		t38_data[3], t38_data[4], t38_data[5]);
+
+	/* Schedule a worker routine to read any messages that might have
+	 * been sent before interrupts were enabled. */
+	cancel_delayed_work(&mxt->dwork);
+	schedule_delayed_work(&mxt->dwork, 0);
+	kfree(t38_data);
+	kfree(id_data);
+
+	device_init_wakeup(&client->dev, pdata->wakeup);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	mxt->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						MXT_SUSPEND_LEVEL;
+	mxt->early_suspend.suspend = mxt_early_suspend;
+	mxt->early_suspend.resume = mxt_late_resume;
+	register_early_suspend(&mxt->early_suspend);
+#endif
+
+	return 0;
+
+err_t38:
+	free_irq(mxt->irq, mxt);
+err_irq:
+	kfree(mxt->rid_map);
+	kfree(mxt->object_table);
+	kfree(mxt->last_message);
+err_class_create:
+	if (mxt->debug_dir)
+		debugfs_remove(mxt->debug_dir);
+	kfree(mxt->last_message);
+	kfree(mxt->rid_map);
+	kfree(mxt->object_table);
+err_read_ot:
+	input_unregister_device(mxt->input);
+	mxt->input = NULL;
+err_register_device:
+	mutex_destroy(&mxt->debug_mutex);
+	mutex_destroy(&mxt->msg_mutex);
+err_identify:
+	if (mxt->power_on)
+		mxt->power_on(false);
+err_pwr_on:
+	if (mxt->exit_hw != NULL)
+		mxt->exit_hw(client);
+err_init_hw:
+err_pdata:
+	input_free_device(input);
+err_input_dev_alloc:
+	kfree(id_data);
+err_id_alloc:
+	kfree(mxt);
+err_mxt_alloc:
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+	return error;
+}
+
+static int __devexit mxt_remove(struct i2c_client *client)
+{
+	struct mxt_data *mxt;
+
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+
+	mxt = i2c_get_clientdata(client);
+
+	/* Remove debug dir entries */
+	debugfs_remove_recursive(mxt->debug_dir);
+
+	device_init_wakeup(&client->dev, 0);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	unregister_early_suspend(&mxt->early_suspend);
+#endif
+
+	if (mxt != NULL) {
+		if (mxt->power_on)
+			mxt->power_on(false);
+
+		if (mxt->exit_hw != NULL)
+			mxt->exit_hw(client);
+
+		if (mxt->irq) {
+			free_irq(mxt->irq, mxt);
+		}
+
+		unregister_chrdev_region(mxt->dev_num, 2);
+		device_destroy(mxt->mxt_class, MKDEV(MAJOR(mxt->dev_num), 0));
+		device_destroy(mxt->mxt_class, MKDEV(MAJOR(mxt->dev_num), 1));
+		cdev_del(&mxt->cdev);
+		cdev_del(&mxt->cdev_messages);
+		cancel_delayed_work_sync(&mxt->dwork);
+		input_unregister_device(mxt->input);
+		class_destroy(mxt->mxt_class);
+		debugfs_remove(mxt->debug_dir);
+
+		kfree(mxt->rid_map);
+		kfree(mxt->object_table);
+		kfree(mxt->last_message);
+	}
+	kfree(mxt);
+
+	i2c_set_clientdata(client, NULL);
+	if (debug >= DEBUG_TRACE)
+		dev_info(&client->dev, "Touchscreen unregistered\n");
+
+	return 0;
+}
+
+static const struct i2c_device_id mxt_idtable[] = {
+	{"maXTouch", 0,},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, mxt_idtable);
+
+static struct i2c_driver mxt_driver = {
+	.driver = {
+		.name	= "maXTouch",
+		.owner  = THIS_MODULE,
+#if defined(CONFIG_PM)
+		.pm = &mxt_pm_ops,
+#endif
+	},
+
+	.id_table	= mxt_idtable,
+	.probe		= mxt_probe,
+	.remove		= __devexit_p(mxt_remove),
+};
+
+static int __init mxt_init(void)
+{
+	int err;
+	err = i2c_add_driver(&mxt_driver);
+	if (err) {
+		printk(KERN_WARNING "Adding maXTouch driver failed "
+		       "(errno = %d)\n", err);
+	} else {
+		mxt_debug(DEBUG_TRACE, "Successfully added driver %s\n",
+		          mxt_driver.driver.name);
+	}
+	return err;
+}
+
+static void __exit mxt_cleanup(void)
+{
+	i2c_del_driver(&mxt_driver);
+}
+
+
+module_init(mxt_init);
+module_exit(mxt_cleanup);
+
+MODULE_AUTHOR("Iiro Valkonen");
+MODULE_DESCRIPTION("Driver for Atmel maXTouch Touchscreen Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/cy8c_tmg_ts.c b/drivers/input/touchscreen/cy8c_tmg_ts.c
new file mode 100644
index 0000000..f48374e
--- /dev/null
+++ b/drivers/input/touchscreen/cy8c_tmg_ts.c
@@ -0,0 +1,467 @@
+/* drivers/input/touchscreen/cy8c_tmg_ts.c
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cy8c_tmg_ts.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CY8C_REG_START_NEW_SCAN 0x0F
+#define CY8C_REG_INTR_STATUS    0x3C
+#define CY8C_REG_VERSION        0x3E
+
+struct cy8c_ts_data {
+	struct i2c_client *client;
+	struct input_dev *input_dev;
+	int use_irq;
+	struct hrtimer timer;
+	struct work_struct work;
+	uint16_t version;
+	int (*power) (int on);
+	struct early_suspend early_suspend;
+};
+
+struct workqueue_struct *cypress_touch_wq;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cy8c_ts_early_suspend(struct early_suspend *h);
+static void cy8c_ts_late_resume(struct early_suspend *h);
+#endif
+
+uint16_t sample_count, X_mean, Y_mean, first_touch;
+
+static s32 cy8c_read_word_data(struct i2c_client *client,
+			       u8 command, uint16_t * data)
+{
+	s32 ret = i2c_smbus_read_word_data(client, command);
+	if (ret != -1) {
+		*data = (u16) ((ret << 8) | (ret >> 8));
+	}
+	return ret;
+}
+
+static int cy8c_init_panel(struct cy8c_ts_data *ts)
+{
+	int ret;
+	sample_count = X_mean = Y_mean = first_touch = 0;
+
+	/* clean intr busy */
+	ret = i2c_smbus_write_byte_data(ts->client, CY8C_REG_INTR_STATUS,
+					0x00);
+	if (ret < 0) {
+		dev_err(&ts->client->dev,
+			"cy8c_init_panel failed for clean intr busy\n");
+		goto exit;
+	}
+
+	/* start new scan */
+	ret = i2c_smbus_write_byte_data(ts->client, CY8C_REG_START_NEW_SCAN,
+					0x01);
+	if (ret < 0) {
+		dev_err(&ts->client->dev,
+			"cy8c_init_panel failed for start new scan\n");
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static void cy8c_ts_reset(struct i2c_client *client)
+{
+	struct cy8c_ts_data *ts = i2c_get_clientdata(client);
+
+	if (ts->power) {
+		ts->power(0);
+		msleep(10);
+		ts->power(1);
+		msleep(10);
+	}
+
+	cy8c_init_panel(ts);
+}
+
+static void cy8c_ts_work_func(struct work_struct *work)
+{
+	struct cy8c_ts_data *ts = container_of(work, struct cy8c_ts_data, work);
+	uint16_t x1, y1, x2, y2;
+	uint8_t is_touch, start_reg, force, area, finger2_pressed;
+	uint8_t buf[11];
+	struct i2c_msg msg[2];
+	int ret = 0;
+
+	x2 = y2 = 0;
+
+	/*printk("%s: enter\n",__func__);*/
+	is_touch = i2c_smbus_read_byte_data(ts->client, 0x20);
+	dev_dbg(&ts->client->dev, "fIsTouch %d,\n", is_touch);
+	if (is_touch < 0 || is_touch > 3) {
+		pr_err("%s: invalid is_touch = %d\n", __func__, is_touch);
+		cy8c_ts_reset(ts->client);
+		msleep(10);
+		goto done;
+	}
+
+	msg[0].addr = ts->client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	start_reg = 0x16;
+	msg[0].buf = &start_reg;
+
+	msg[1].addr = ts->client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = sizeof(buf);
+	msg[1].buf = buf;
+
+	ret = i2c_transfer(ts->client->adapter, msg, 2);
+	if (ret < 0)
+		goto done;
+
+	/* parse data */
+	force = buf[0];
+	area = buf[1];
+	x1 = (buf[2] << 8) | buf[3];
+	y1 = (buf[6] << 8) | buf[7];
+	is_touch = buf[10];
+
+	if (is_touch == 2) {
+		x2 = (buf[4] << 8) | buf[5];
+		y2 = (buf[8] << 8) | buf[9];
+		finger2_pressed = 1;
+	}
+
+	dev_dbg(&ts->client->dev,
+		"bFingerForce %d, bFingerArea %d \n", force, area);
+	dev_dbg(&ts->client->dev, "x1: %d, y1: %d \n", x1, y1);
+	if (finger2_pressed)
+		dev_dbg(&ts->client->dev, "x2: %d, y2: %d \n", x2, y2);
+
+	/* drop the first one? */
+	if ((is_touch == 1) && (first_touch == 0)) {
+		first_touch = 1;
+		goto done;
+	}
+
+	if (!first_touch)
+		goto done;
+
+	if (is_touch == 2)
+		finger2_pressed = 1;
+
+	input_report_abs(ts->input_dev, ABS_X, x1);
+	input_report_abs(ts->input_dev, ABS_Y, y1);
+	input_report_abs(ts->input_dev, ABS_PRESSURE, force);
+	input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, area);
+	input_report_key(ts->input_dev, BTN_TOUCH, is_touch);
+	input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+
+	if (finger2_pressed) {
+		input_report_abs(ts->input_dev, ABS_HAT0X, x2);
+		input_report_abs(ts->input_dev, ABS_HAT0Y, y2);
+	}
+	input_sync(ts->input_dev);
+
+done:
+	if (is_touch == 0)
+		first_touch = sample_count = 0;
+
+	/* prepare for next intr */
+	i2c_smbus_write_byte_data(ts->client, CY8C_REG_INTR_STATUS, 0x00);
+	if (!ts->use_irq)
+		hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+	else
+		enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart cy8c_ts_timer_func(struct hrtimer *timer)
+{
+	struct cy8c_ts_data *ts;
+
+	ts = container_of(timer, struct cy8c_ts_data, timer);
+	queue_work(cypress_touch_wq, &ts->work);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t cy8c_ts_irq_handler(int irq, void *dev_id)
+{
+	struct cy8c_ts_data *ts = dev_id;
+
+	disable_irq_nosync(ts->client->irq);
+	queue_work(cypress_touch_wq, &ts->work);
+	return IRQ_HANDLED;
+}
+
+static int cy8c_ts_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct cy8c_ts_data *ts;
+	struct cy8c_i2c_platform_data *pdata;
+	uint16_t panel_version;
+	int ret = 0;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "need I2C_FUNC_I2C\n");
+		ret = -ENODEV;
+		goto err_check_functionality_failed;
+	}
+
+	ts = kzalloc(sizeof(struct cy8c_ts_data), GFP_KERNEL);
+	if (ts == NULL) {
+		dev_err(&client->dev, "allocate cy8c_ts_data failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_data_failed;
+	}
+
+	INIT_WORK(&ts->work, cy8c_ts_work_func);
+	ts->client = client;
+	i2c_set_clientdata(client, ts);
+
+	pdata = client->dev.platform_data;
+	if (pdata) {
+		ts->version = pdata->version;
+		ts->power = pdata->power;
+	}
+
+	if (ts->power) {
+		ret = ts->power(1);
+		msleep(10);
+		if (ret < 0) {
+			dev_err(&client->dev, "power on failed\n");
+			goto err_power_failed;
+		}
+	}
+
+	ret = cy8c_read_word_data(ts->client, CY8C_REG_VERSION, &panel_version);
+	if (ret < 0) {
+		dev_err(&client->dev, "init panel failed\n");
+		goto err_detect_failed;
+	}
+	dev_info(&client->dev, "Panel Version %04X\n", panel_version);
+	if (pdata) {
+		while (pdata->version > panel_version) {
+			dev_info(&client->dev, "old tp detected, "
+				 "panel version = %x\n", panel_version);
+			pdata++;
+		}
+	}
+
+	ret = cy8c_init_panel(ts);
+	if (ret < 0) {
+		dev_err(&client->dev, "init panel failed\n");
+		goto err_detect_failed;
+	}
+
+	ts->input_dev = input_allocate_device();
+	if (ts->input_dev == NULL) {
+		ret = -ENOMEM;
+		dev_err(&client->dev, "Failed to allocate input device\n");
+		goto err_input_dev_alloc_failed;
+	}
+	ts->input_dev->name = "cy8c-touchscreen";
+
+	set_bit(EV_SYN, ts->input_dev->evbit);
+	set_bit(EV_ABS, ts->input_dev->evbit);
+	set_bit(EV_KEY, ts->input_dev->evbit);
+	input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
+	input_set_capability(ts->input_dev, EV_KEY, BTN_2);
+
+	input_set_abs_params(ts->input_dev, ABS_X,
+			     pdata->abs_x_min, pdata->abs_x_max, 5, 0);
+	input_set_abs_params(ts->input_dev, ABS_Y,
+			     pdata->abs_y_min, pdata->abs_y_max, 5, 0);
+	input_set_abs_params(ts->input_dev, ABS_HAT0X,
+			     pdata->abs_x_min, pdata->abs_x_max, 0, 0);
+	input_set_abs_params(ts->input_dev, ABS_HAT0Y,
+			     pdata->abs_y_min, pdata->abs_y_max, 0, 0);
+	input_set_abs_params(ts->input_dev, ABS_PRESSURE,
+			     pdata->abs_pressure_min, pdata->abs_pressure_max,
+			     0, 0);
+	input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH,
+			     pdata->abs_width_min, pdata->abs_width_max, 0, 0);
+
+	ret = input_register_device(ts->input_dev);
+	if (ret) {
+		dev_err(&client->dev,
+			"cy8c_ts_probe: Unable to register %s input device\n",
+			ts->input_dev->name);
+		goto err_input_register_device_failed;
+	}
+
+	if (client->irq) {
+		ret = request_irq(client->irq, cy8c_ts_irq_handler,
+				  IRQF_TRIGGER_LOW, CYPRESS_TMG_NAME, ts);
+		if (ret == 0)
+			ts->use_irq = 1;
+		else
+			dev_err(&client->dev, "request_irq failed\n");
+	}
+
+	if (!ts->use_irq) {
+		hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ts->timer.function = cy8c_ts_timer_func;
+		hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	ts->early_suspend.suspend = cy8c_ts_early_suspend;
+	ts->early_suspend.resume = cy8c_ts_late_resume;
+	register_early_suspend(&ts->early_suspend);
+#endif
+
+	dev_info(&client->dev, "Start touchscreen %s in %s mode\n",
+		 ts->input_dev->name, (ts->use_irq ? "interrupt" : "polling"));
+
+	return 0;
+
+err_input_register_device_failed:
+	input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+	if (ts->power)
+		ts->power(0);
+
+err_detect_failed:
+err_power_failed:
+	kfree(ts);
+
+err_alloc_data_failed:
+err_check_functionality_failed:
+	return ret;
+}
+
+static int cy8c_ts_remove(struct i2c_client *client)
+{
+	struct cy8c_ts_data *ts = i2c_get_clientdata(client);
+
+	unregister_early_suspend(&ts->early_suspend);
+
+	if (ts->use_irq)
+		free_irq(client->irq, ts);
+	else
+		hrtimer_cancel(&ts->timer);
+
+	input_unregister_device(ts->input_dev);
+	kfree(ts);
+
+	return 0;
+}
+
+static int cy8c_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct cy8c_ts_data *ts = i2c_get_clientdata(client);
+	int ret;
+
+	if (ts->use_irq)
+		disable_irq_nosync(client->irq);
+	else
+		hrtimer_cancel(&ts->timer);
+
+	ret = cancel_work_sync(&ts->work);
+	if (ret && ts->use_irq)
+		enable_irq(client->irq);
+
+	if (ts->power)
+		ts->power(0);
+
+	return 0;
+}
+
+static int cy8c_ts_resume(struct i2c_client *client)
+{
+	int ret;
+	struct cy8c_ts_data *ts = i2c_get_clientdata(client);
+
+	if (ts->power) {
+		ret = ts->power(1);
+		if (ret < 0)
+			dev_err(&client->dev,
+				"cy8c_ts_resume power on failed\n");
+		msleep(10);
+
+		cy8c_init_panel(ts);
+	}
+
+	if (ts->use_irq)
+		enable_irq(client->irq);
+	else
+		hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cy8c_ts_early_suspend(struct early_suspend *h)
+{
+	struct cy8c_ts_data *ts;
+	ts = container_of(h, struct cy8c_ts_data, early_suspend);
+	cy8c_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void cy8c_ts_late_resume(struct early_suspend *h)
+{
+	struct cy8c_ts_data *ts;
+	ts = container_of(h, struct cy8c_ts_data, early_suspend);
+	cy8c_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id cy8c_ts_i2c_id[] = {
+	{CYPRESS_TMG_NAME, 0},
+	{}
+};
+
+static struct i2c_driver cy8c_ts_driver = {
+	.id_table = cy8c_ts_i2c_id,
+	.probe = cy8c_ts_probe,
+	.remove = cy8c_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = cy8c_ts_suspend,
+	.resume = cy8c_ts_resume,
+#endif
+	.driver = {
+		.name = CYPRESS_TMG_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __devinit cy8c_ts_init(void)
+{
+	cypress_touch_wq = create_singlethread_workqueue("cypress_touch_wq");
+	if (!cypress_touch_wq)
+		return -ENOMEM;
+
+	return i2c_add_driver(&cy8c_ts_driver);
+}
+
+static void __exit cy8c_ts_exit(void)
+{
+	if (cypress_touch_wq)
+		destroy_workqueue(cypress_touch_wq);
+
+	i2c_del_driver(&cy8c_ts_driver);
+}
+
+module_init(cy8c_ts_init);
+module_exit(cy8c_ts_exit);
+
+MODULE_DESCRIPTION("Cypress TMG Touchscreen Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/touchscreen/cy8c_ts.c b/drivers/input/touchscreen/cy8c_ts.c
new file mode 100644
index 0000000..0b6406c
--- /dev/null
+++ b/drivers/input/touchscreen/cy8c_ts.c
@@ -0,0 +1,783 @@
+/* Source for:
+ * Cypress CY8CTMA300 Prototype touchscreen driver.
+ * drivers/input/touchscreen/cy8c_ts.c
+ *
+ * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
+ * Copyright (c) 2010, 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ * History:
+ *			(C) 2010 Cypress - Update for GPL distribution
+ *			(C) 2009 Cypress - Assume maintenance ownership
+ *			(C) 2009 Enea - Original prototype
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/input/cy8c_ts.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+
+/* Early-suspend level */
+#define CY8C_TS_SUSPEND_LEVEL 1
+#endif
+
+#define CY8CTMA300	0x0
+#define CY8CTMG200	0x1
+
+#define INVALID_DATA	0xff
+
+#define TOUCHSCREEN_TIMEOUT	(msecs_to_jiffies(10))
+#define INITIAL_DELAY		(msecs_to_jiffies(25000))
+
+struct cy8c_ts_data {
+	u8 x_index;
+	u8 y_index;
+	u8 z_index;
+	u8 id_index;
+	u8 touch_index;
+	u8 data_reg;
+	u8 status_reg;
+	u8 data_size;
+	u8 touch_bytes;
+	u8 update_data;
+	u8 touch_meta_data;
+	u8 finger_size;
+};
+
+static struct cy8c_ts_data devices[] = {
+	[0] = {
+		.x_index = 6,
+		.y_index = 4,
+		.z_index = 3,
+		.id_index = 0,
+		.data_reg = 0x3,
+		.status_reg = 0x1,
+		.update_data = 0x4,
+		.touch_bytes = 8,
+		.touch_meta_data = 3,
+		.finger_size = 70,
+	},
+	[1] = {
+		.x_index = 2,
+		.y_index = 4,
+		.id_index = 6,
+		.data_reg = 0x6,
+		.status_reg = 0x5,
+		.update_data = 0x1,
+		.touch_bytes = 12,
+		.finger_size = 70,
+	},
+};
+
+struct cy8c_ts {
+	struct i2c_client *client;
+	struct input_dev *input;
+	struct delayed_work work;
+	struct workqueue_struct *wq;
+	struct cy8c_ts_platform_data *pdata;
+	struct cy8c_ts_data *dd;
+	u8 *touch_data;
+	u8 device_id;
+	u8 prev_touches;
+	bool is_suspended;
+	bool int_pending;
+	struct mutex sus_lock;
+	u32 pen_irq;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend		early_suspend;
+#endif
+};
+
+static inline u16 join_bytes(u8 a, u8 b)
+{
+	u16 ab = 0;
+	ab = ab | a;
+	ab = ab << 8 | b;
+	return ab;
+}
+
+static s32 cy8c_ts_write_reg_u8(struct i2c_client *client, u8 reg, u8 val)
+{
+	s32 data;
+
+	data = i2c_smbus_write_byte_data(client, reg, val);
+	if (data < 0)
+		dev_err(&client->dev, "error %d in writing reg 0x%x\n",
+						 data, reg);
+
+	return data;
+}
+
+static s32 cy8c_ts_read_reg_u8(struct i2c_client *client, u8 reg)
+{
+	s32 data;
+
+	data = i2c_smbus_read_byte_data(client, reg);
+	if (data < 0)
+		dev_err(&client->dev, "error %d in reading reg 0x%x\n",
+						 data, reg);
+
+	return data;
+}
+
+static int cy8c_ts_read(struct i2c_client *client, u8 reg, u8 *buf, int num)
+{
+	struct i2c_msg xfer_msg[2];
+
+	xfer_msg[0].addr = client->addr;
+	xfer_msg[0].len = 1;
+	xfer_msg[0].flags = 0;
+	xfer_msg[0].buf = &reg;
+
+	xfer_msg[1].addr = client->addr;
+	xfer_msg[1].len = num;
+	xfer_msg[1].flags = I2C_M_RD;
+	xfer_msg[1].buf = buf;
+
+	return i2c_transfer(client->adapter, xfer_msg, 2);
+}
+
+static void report_data(struct cy8c_ts *ts, u16 x, u16 y, u8 pressure, u8 id)
+{
+	if (ts->pdata->swap_xy)
+		swap(x, y);
+
+	/* handle inverting coordinates */
+	if (ts->pdata->invert_x)
+		x = ts->pdata->res_x - x;
+	if (ts->pdata->invert_y)
+		y = ts->pdata->res_y - y;
+
+	input_report_abs(ts->input, ABS_MT_TRACKING_ID, id);
+	input_report_abs(ts->input, ABS_MT_POSITION_X, x);
+	input_report_abs(ts->input, ABS_MT_POSITION_Y, y);
+	input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, pressure);
+	input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, ts->dd->finger_size);
+	input_mt_sync(ts->input);
+}
+
+static void process_tma300_data(struct cy8c_ts *ts)
+{
+	u8 id, pressure, touches, i;
+	u16 x, y;
+
+	touches = ts->touch_data[ts->dd->touch_index];
+
+	for (i = 0; i < touches; i++) {
+		id = ts->touch_data[i * ts->dd->touch_bytes +
+						ts->dd->id_index];
+		pressure = ts->touch_data[i * ts->dd->touch_bytes +
+							ts->dd->z_index];
+		x = join_bytes(ts->touch_data[i * ts->dd->touch_bytes +
+							ts->dd->x_index],
+			ts->touch_data[i * ts->dd->touch_bytes +
+							ts->dd->x_index + 1]);
+		y = join_bytes(ts->touch_data[i * ts->dd->touch_bytes +
+							ts->dd->y_index],
+			ts->touch_data[i * ts->dd->touch_bytes +
+							ts->dd->y_index + 1]);
+
+		report_data(ts, x, y, pressure, id);
+	}
+
+	for (i = 0; i < ts->prev_touches - touches; i++) {
+		input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, 0);
+		input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, 0);
+		input_mt_sync(ts->input);
+	}
+
+	ts->prev_touches = touches;
+	input_sync(ts->input);
+}
+
+static void process_tmg200_data(struct cy8c_ts *ts)
+{
+	u8 id, touches, i;
+	u16 x, y;
+
+	touches = ts->touch_data[ts->dd->touch_index];
+
+	if (touches > 0) {
+		x = join_bytes(ts->touch_data[ts->dd->x_index],
+				ts->touch_data[ts->dd->x_index+1]);
+		y = join_bytes(ts->touch_data[ts->dd->y_index],
+				ts->touch_data[ts->dd->y_index+1]);
+		id = ts->touch_data[ts->dd->id_index];
+
+		report_data(ts, x, y, 255, id - 1);
+
+		if (touches == 2) {
+			x = join_bytes(ts->touch_data[ts->dd->x_index+5],
+					ts->touch_data[ts->dd->x_index+6]);
+			y = join_bytes(ts->touch_data[ts->dd->y_index+5],
+				ts->touch_data[ts->dd->y_index+6]);
+			id = ts->touch_data[ts->dd->id_index+5];
+
+			report_data(ts, x, y, 255, id - 1);
+		}
+	} else {
+		for (i = 0; i < ts->prev_touches; i++) {
+			input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,	0);
+			input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,	0);
+			input_mt_sync(ts->input);
+		}
+	}
+
+	input_sync(ts->input);
+	ts->prev_touches = touches;
+}
+
+static void cy8c_ts_xy_worker(struct work_struct *work)
+{
+	int rc;
+	struct cy8c_ts *ts = container_of(work, struct cy8c_ts,
+				 work.work);
+
+	mutex_lock(&ts->sus_lock);
+	if (ts->is_suspended == true) {
+		dev_dbg(&ts->client->dev, "TS is supended\n");
+		ts->int_pending = true;
+		mutex_unlock(&ts->sus_lock);
+		return;
+	}
+	mutex_unlock(&ts->sus_lock);
+
+	/* read data from DATA_REG */
+	rc = cy8c_ts_read(ts->client, ts->dd->data_reg, ts->touch_data,
+							ts->dd->data_size);
+	if (rc < 0) {
+		dev_err(&ts->client->dev, "read failed\n");
+		goto schedule;
+	}
+
+	if (ts->touch_data[ts->dd->touch_index] == INVALID_DATA)
+		goto schedule;
+
+	if (ts->device_id == CY8CTMA300)
+		process_tma300_data(ts);
+	else
+		process_tmg200_data(ts);
+
+schedule:
+	enable_irq(ts->pen_irq);
+
+	/* write to STATUS_REG to update coordinates*/
+	rc = cy8c_ts_write_reg_u8(ts->client, ts->dd->status_reg,
+						ts->dd->update_data);
+	if (rc < 0) {
+		dev_err(&ts->client->dev, "write failed, try once more\n");
+
+		rc = cy8c_ts_write_reg_u8(ts->client, ts->dd->status_reg,
+						ts->dd->update_data);
+		if (rc < 0)
+			dev_err(&ts->client->dev, "write failed, exiting\n");
+	}
+}
+
+static irqreturn_t cy8c_ts_irq(int irq, void *dev_id)
+{
+	struct cy8c_ts *ts = dev_id;
+
+	disable_irq_nosync(irq);
+
+	queue_delayed_work(ts->wq, &ts->work, 0);
+
+	return IRQ_HANDLED;
+}
+
+static int cy8c_ts_init_ts(struct i2c_client *client, struct cy8c_ts *ts)
+{
+	struct input_dev *input_device;
+	int rc = 0;
+
+	ts->dd = &devices[ts->device_id];
+
+	if (!ts->pdata->nfingers) {
+		dev_err(&client->dev, "Touches information not specified\n");
+		return -EINVAL;
+	}
+
+	if (ts->device_id == CY8CTMA300) {
+		if (ts->pdata->nfingers > 10) {
+			dev_err(&client->dev, "Touches >=1 & <= 10\n");
+			return -EINVAL;
+		}
+		ts->dd->data_size = ts->pdata->nfingers * ts->dd->touch_bytes +
+						ts->dd->touch_meta_data;
+		ts->dd->touch_index = ts->pdata->nfingers *
+						ts->dd->touch_bytes;
+	} else if (ts->device_id == CY8CTMG200) {
+		if (ts->pdata->nfingers > 2) {
+			dev_err(&client->dev, "Touches >=1 & <= 2\n");
+			return -EINVAL;
+		}
+		ts->dd->data_size = ts->dd->touch_bytes;
+		ts->dd->touch_index = 0x0;
+	}
+
+	ts->touch_data = kzalloc(ts->dd->data_size, GFP_KERNEL);
+	if (!ts->touch_data) {
+		pr_err("%s: Unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	ts->prev_touches = 0;
+
+	input_device = input_allocate_device();
+	if (!input_device) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+
+	ts->input = input_device;
+	input_device->name = ts->pdata->ts_name;
+	input_device->id.bustype = BUS_I2C;
+	input_device->dev.parent = &client->dev;
+	input_set_drvdata(input_device, ts);
+
+	__set_bit(EV_ABS, input_device->evbit);
+
+	input_set_abs_params(input_device, ABS_MT_POSITION_X,
+			ts->pdata->dis_min_x, ts->pdata->dis_max_x, 0, 0);
+	input_set_abs_params(input_device, ABS_MT_POSITION_Y,
+			ts->pdata->dis_min_y, ts->pdata->dis_max_y, 0, 0);
+	input_set_abs_params(input_device, ABS_MT_TOUCH_MAJOR,
+			ts->pdata->min_touch, ts->pdata->max_touch, 0, 0);
+	input_set_abs_params(input_device, ABS_MT_WIDTH_MAJOR,
+			ts->pdata->min_width, ts->pdata->max_width, 0, 0);
+	input_set_abs_params(input_device, ABS_MT_TRACKING_ID,
+			ts->pdata->min_tid, ts->pdata->max_tid, 0, 0);
+
+	ts->wq = create_singlethread_workqueue("kworkqueue_ts");
+	if (!ts->wq) {
+		dev_err(&client->dev, "Could not create workqueue\n");
+		goto error_wq_create;
+	}
+
+	INIT_DELAYED_WORK(&ts->work, cy8c_ts_xy_worker);
+
+	rc = input_register_device(input_device);
+	if (rc)
+		goto error_unreg_device;
+
+	return 0;
+
+error_unreg_device:
+	destroy_workqueue(ts->wq);
+error_wq_create:
+	input_free_device(input_device);
+error_alloc_dev:
+	kfree(ts->touch_data);
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static int cy8c_ts_suspend(struct device *dev)
+{
+	struct cy8c_ts *ts = dev_get_drvdata(dev);
+	int rc = 0;
+
+	if (device_may_wakeup(dev)) {
+		/* mark suspend flag */
+		mutex_lock(&ts->sus_lock);
+		ts->is_suspended = true;
+		mutex_unlock(&ts->sus_lock);
+
+		enable_irq_wake(ts->pen_irq);
+	} else {
+		disable_irq_nosync(ts->pen_irq);
+
+		rc = cancel_delayed_work_sync(&ts->work);
+
+		if (rc) {
+			/* missed the worker, write to STATUS_REG to
+			   acknowledge interrupt */
+			rc = cy8c_ts_write_reg_u8(ts->client,
+				ts->dd->status_reg, ts->dd->update_data);
+			if (rc < 0) {
+				dev_err(&ts->client->dev,
+					"write failed, try once more\n");
+
+				rc = cy8c_ts_write_reg_u8(ts->client,
+					ts->dd->status_reg,
+					ts->dd->update_data);
+				if (rc < 0)
+					dev_err(&ts->client->dev,
+						"write failed, exiting\n");
+			}
+
+			enable_irq(ts->pen_irq);
+		}
+
+		gpio_free(ts->pdata->irq_gpio);
+
+		if (ts->pdata->power_on) {
+			rc = ts->pdata->power_on(0);
+			if (rc) {
+				dev_err(dev, "unable to goto suspend\n");
+				return rc;
+			}
+		}
+	}
+	return 0;
+}
+
+static int cy8c_ts_resume(struct device *dev)
+{
+	struct cy8c_ts *ts = dev_get_drvdata(dev);
+	int rc = 0;
+
+	if (device_may_wakeup(dev)) {
+		disable_irq_wake(ts->pen_irq);
+
+		mutex_lock(&ts->sus_lock);
+		ts->is_suspended = false;
+
+		if (ts->int_pending == true) {
+			ts->int_pending = false;
+
+			/* start a delayed work */
+			queue_delayed_work(ts->wq, &ts->work, 0);
+		}
+		mutex_unlock(&ts->sus_lock);
+
+	} else {
+		if (ts->pdata->power_on) {
+			rc = ts->pdata->power_on(1);
+			if (rc) {
+				dev_err(dev, "unable to resume\n");
+				return rc;
+			}
+		}
+
+		/* configure touchscreen interrupt gpio */
+		rc = gpio_request(ts->pdata->irq_gpio, "cy8c_irq_gpio");
+		if (rc) {
+			pr_err("%s: unable to request gpio %d\n",
+				__func__, ts->pdata->irq_gpio);
+			goto err_power_off;
+		}
+
+		rc = gpio_direction_input(ts->pdata->irq_gpio);
+		if (rc) {
+			pr_err("%s: unable to set direction for gpio %d\n",
+				__func__, ts->pdata->irq_gpio);
+			goto err_gpio_free;
+		}
+
+		enable_irq(ts->pen_irq);
+	}
+	return 0;
+err_gpio_free:
+	gpio_free(ts->pdata->irq_gpio);
+err_power_off:
+	if (ts->pdata->power_on)
+		rc = ts->pdata->power_on(0);
+	return rc;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cy8c_ts_early_suspend(struct early_suspend *h)
+{
+	struct cy8c_ts *ts = container_of(h, struct cy8c_ts, early_suspend);
+
+	cy8c_ts_suspend(&ts->client->dev);
+}
+
+static void cy8c_ts_late_resume(struct early_suspend *h)
+{
+	struct cy8c_ts *ts = container_of(h, struct cy8c_ts, early_suspend);
+
+	cy8c_ts_resume(&ts->client->dev);
+}
+#endif
+
+static struct dev_pm_ops cy8c_ts_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= cy8c_ts_suspend,
+	.resume		= cy8c_ts_resume,
+#endif
+};
+#endif
+
+static int __devinit cy8c_ts_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct cy8c_ts *ts;
+	struct cy8c_ts_platform_data *pdata = client->dev.platform_data;
+	int rc, temp_reg;
+
+	if (!pdata) {
+		dev_err(&client->dev, "platform data is required!\n");
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+		dev_err(&client->dev, "I2C functionality not supported\n");
+		return -EIO;
+	}
+
+	ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+	if (!ts)
+		return -ENOMEM;
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&client->dev);
+	if (rc < 0)
+		dev_dbg(&client->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&client->dev);
+
+	ts->client = client;
+	ts->pdata = pdata;
+	i2c_set_clientdata(client, ts);
+	ts->device_id = id->driver_data;
+
+	if (ts->pdata->dev_setup) {
+		rc = ts->pdata->dev_setup(1);
+		if (rc < 0) {
+			dev_err(&client->dev, "dev setup failed\n");
+			goto error_touch_data_alloc;
+		}
+	}
+
+	/* power on the device */
+	if (ts->pdata->power_on) {
+		rc = ts->pdata->power_on(1);
+		if (rc) {
+			pr_err("%s: Unable to power on the device\n", __func__);
+			goto error_dev_setup;
+		}
+	}
+
+	/* read one byte to make sure i2c device exists */
+	if (id->driver_data == CY8CTMA300)
+		temp_reg = 0x01;
+	else
+		temp_reg = 0x05;
+
+	rc = cy8c_ts_read_reg_u8(client, temp_reg);
+	if (rc < 0) {
+		dev_err(&client->dev, "i2c sanity check failed\n");
+		goto error_power_on;
+	}
+
+	ts->is_suspended = false;
+	ts->int_pending = false;
+	mutex_init(&ts->sus_lock);
+
+	rc = cy8c_ts_init_ts(client, ts);
+	if (rc < 0) {
+		dev_err(&client->dev, "CY8CTMG200-TMA300 init failed\n");
+		goto error_mutex_destroy;
+	}
+
+	if (ts->pdata->resout_gpio < 0)
+		goto config_irq_gpio;
+
+	/* configure touchscreen reset out gpio */
+	rc = gpio_request(ts->pdata->resout_gpio, "cy8c_resout_gpio");
+	if (rc) {
+		pr_err("%s: unable to request gpio %d\n",
+			__func__, ts->pdata->resout_gpio);
+		goto error_uninit_ts;
+	}
+
+	rc = gpio_direction_output(ts->pdata->resout_gpio, 0);
+	if (rc) {
+		pr_err("%s: unable to set direction for gpio %d\n",
+			__func__, ts->pdata->resout_gpio);
+		goto error_resout_gpio_dir;
+	}
+	/* reset gpio stabilization time */
+	msleep(20);
+
+config_irq_gpio:
+	/* configure touchscreen interrupt gpio */
+	rc = gpio_request(ts->pdata->irq_gpio, "cy8c_irq_gpio");
+	if (rc) {
+		pr_err("%s: unable to request gpio %d\n",
+			__func__, ts->pdata->irq_gpio);
+		goto error_irq_gpio_req;
+	}
+
+	rc = gpio_direction_input(ts->pdata->irq_gpio);
+	if (rc) {
+		pr_err("%s: unable to set direction for gpio %d\n",
+			__func__, ts->pdata->irq_gpio);
+		goto error_irq_gpio_dir;
+	}
+
+	ts->pen_irq = gpio_to_irq(ts->pdata->irq_gpio);
+	rc = request_irq(ts->pen_irq, cy8c_ts_irq,
+				IRQF_TRIGGER_FALLING,
+				ts->client->dev.driver->name, ts);
+	if (rc) {
+		dev_err(&ts->client->dev, "could not request irq\n");
+		goto error_req_irq_fail;
+	}
+
+	/* Clear the status register of the TS controller */
+	rc = cy8c_ts_write_reg_u8(ts->client, ts->dd->status_reg,
+						ts->dd->update_data);
+	if (rc < 0) {
+		/* Do multiple writes in case of failure */
+		dev_err(&ts->client->dev, "%s: write failed %d"
+				"trying again\n", __func__, rc);
+		rc = cy8c_ts_write_reg_u8(ts->client,
+			ts->dd->status_reg, ts->dd->update_data);
+		if (rc < 0) {
+			dev_err(&ts->client->dev, "%s: write failed"
+				"second time(%d)\n", __func__, rc);
+		}
+	}
+
+	device_init_wakeup(&client->dev, ts->pdata->wakeup);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						CY8C_TS_SUSPEND_LEVEL;
+	ts->early_suspend.suspend = cy8c_ts_early_suspend;
+	ts->early_suspend.resume = cy8c_ts_late_resume;
+	register_early_suspend(&ts->early_suspend);
+#endif
+
+	return 0;
+error_req_irq_fail:
+error_irq_gpio_dir:
+	gpio_free(ts->pdata->irq_gpio);
+error_irq_gpio_req:
+error_resout_gpio_dir:
+	if (ts->pdata->resout_gpio >= 0)
+		gpio_free(ts->pdata->resout_gpio);
+error_uninit_ts:
+	destroy_workqueue(ts->wq);
+	input_unregister_device(ts->input);
+	kfree(ts->touch_data);
+error_mutex_destroy:
+	mutex_destroy(&ts->sus_lock);
+error_power_on:
+	if (ts->pdata->power_on)
+		ts->pdata->power_on(0);
+error_dev_setup:
+	if (ts->pdata->dev_setup)
+		ts->pdata->dev_setup(0);
+error_touch_data_alloc:
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+	kfree(ts);
+	return rc;
+}
+
+static int __devexit cy8c_ts_remove(struct i2c_client *client)
+{
+	struct cy8c_ts *ts = i2c_get_clientdata(client);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	unregister_early_suspend(&ts->early_suspend);
+#endif
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+
+	device_init_wakeup(&client->dev, 0);
+
+	cancel_delayed_work_sync(&ts->work);
+
+	free_irq(ts->pen_irq, ts);
+
+	gpio_free(ts->pdata->irq_gpio);
+
+	if (ts->pdata->resout_gpio >= 0)
+		gpio_free(ts->pdata->resout_gpio);
+
+	destroy_workqueue(ts->wq);
+
+	input_unregister_device(ts->input);
+
+	mutex_destroy(&ts->sus_lock);
+
+	if (ts->pdata->power_on)
+		ts->pdata->power_on(0);
+
+	if (ts->pdata->dev_setup)
+		ts->pdata->dev_setup(0);
+
+	kfree(ts->touch_data);
+	kfree(ts);
+
+	return 0;
+}
+
+static const struct i2c_device_id cy8c_ts_id[] = {
+	{"cy8ctma300", CY8CTMA300},
+	{"cy8ctmg200", CY8CTMG200},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, cy8c_ts_id);
+
+
+static struct i2c_driver cy8c_ts_driver = {
+	.driver = {
+		.name = "cy8c_ts",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &cy8c_ts_pm_ops,
+#endif
+	},
+	.probe		= cy8c_ts_probe,
+	.remove		= __devexit_p(cy8c_ts_remove),
+	.id_table	= cy8c_ts_id,
+};
+
+static int __init cy8c_ts_init(void)
+{
+	return i2c_add_driver(&cy8c_ts_driver);
+}
+/* Making this as late init to avoid power fluctuations
+ * during LCD initialization.
+ */
+late_initcall(cy8c_ts_init);
+
+static void __exit cy8c_ts_exit(void)
+{
+	return i2c_del_driver(&cy8c_ts_driver);
+}
+module_exit(cy8c_ts_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CY8CTMA300-CY8CTMG200 touchscreen controller driver");
+MODULE_AUTHOR("Cypress");
+MODULE_ALIAS("platform:cy8c_ts");
diff --git a/drivers/input/touchscreen/cyttsp-i2c.c b/drivers/input/touchscreen/cyttsp-i2c.c
new file mode 100644
index 0000000..7c7518a
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp-i2c.c
@@ -0,0 +1,3040 @@
+/* Source for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * drivers/input/touchscreen/cyttsp-i2c.c
+ *
+ * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/byteorder/generic.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#define CY_DECLARE_GLOBALS
+
+#include <linux/cyttsp.h>
+
+uint32_t cyttsp_tsdebug1 = 0xff;
+module_param_named(tsdebug1, cyttsp_tsdebug1, uint, 0664);
+
+#define FW_FNAME_LEN 40
+
+/* CY TTSP I2C Driver private data */
+struct cyttsp {
+	struct i2c_client *client;
+	struct input_dev *input;
+	struct work_struct work;
+	struct timer_list timer;
+	struct mutex mutex;
+	char phys[32];
+	struct cyttsp_platform_data *platform_data;
+	u8 num_prv_st_tch;
+	u16 act_trk[CY_NUM_TRK_ID];
+	u16 prv_st_tch[CY_NUM_ST_TCH_ID];
+	u16 prv_mt_tch[CY_NUM_MT_TCH_ID];
+	u16 prv_mt_pos[CY_NUM_TRK_ID][2];
+	atomic_t irq_enabled;
+	bool cyttsp_update_fw;
+	bool cyttsp_fwloader_mode;
+	bool is_suspended;
+	struct regulator **vdd;
+	char fw_fname[FW_FNAME_LEN];
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+};
+static u8 irq_cnt;		/* comparison counter with register valuw */
+static u32 irq_cnt_total;	/* total interrupts */
+static u32 irq_err_cnt;		/* count number of touch interrupts with err */
+#define CY_IRQ_CNT_MASK	0x000000FF	/* mapped for sizeof count in reg */
+#define CY_IRQ_CNT_REG	0x00		/* tt_undef[0]=reg 0x1B - Gen3 only */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cyttsp_early_suspend(struct early_suspend *handler);
+static void cyttsp_late_resume(struct early_suspend *handler);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+static struct workqueue_struct *cyttsp_ts_wq;
+
+
+/* ****************************************************************************
+ * Prototypes for static functions
+ * ************************************************************************** */
+static void cyttsp_xy_worker(struct work_struct *work);
+static irqreturn_t cyttsp_irq(int irq, void *handle);
+static int cyttsp_inlist(u16 prev_track[],
+			u8 cur_trk_id, u8 *prev_loc, u8 num_touches);
+static int cyttsp_next_avail_inlist(u16 cur_trk[],
+			u8 *new_loc, u8 num_touches);
+static int cyttsp_putbl(struct cyttsp *ts, int show,
+			int show_status, int show_version, int show_cid);
+static int __devinit cyttsp_probe(struct i2c_client *client,
+			const struct i2c_device_id *id);
+static int __devexit cyttsp_remove(struct i2c_client *client);
+static int cyttsp_resume(struct device *dev);
+static int cyttsp_suspend(struct device *dev);
+
+/* Static variables */
+static struct cyttsp_gen3_xydata_t g_xy_data;
+static struct cyttsp_bootloader_data_t g_bl_data;
+static struct cyttsp_sysinfo_data_t g_sysinfo_data;
+static const struct i2c_device_id cyttsp_id[] = {
+	{ CY_I2C_NAME, 0 },  { }
+};
+static u8 bl_cmd[] = {
+	CY_BL_FILE0, CY_BL_CMD, CY_BL_EXIT,
+	CY_BL_KEY0, CY_BL_KEY1, CY_BL_KEY2,
+	CY_BL_KEY3, CY_BL_KEY4, CY_BL_KEY5,
+	CY_BL_KEY6, CY_BL_KEY7};
+
+MODULE_DEVICE_TABLE(i2c, cyttsp_id);
+
+static const struct dev_pm_ops cyttsp_pm_ops = {
+	.suspend = cyttsp_suspend,
+	.resume = cyttsp_resume,
+};
+
+static struct i2c_driver cyttsp_driver = {
+	.driver = {
+		.name = CY_I2C_NAME,
+		.owner = THIS_MODULE,
+		.pm = &cyttsp_pm_ops,
+	},
+	.probe = cyttsp_probe,
+	.remove = __devexit_p(cyttsp_remove),
+	.id_table = cyttsp_id,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver");
+MODULE_AUTHOR("Cypress");
+
+static ssize_t cyttsp_irq_status(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct cyttsp *ts = i2c_get_clientdata(client);
+	return sprintf(buf, "%u\n", atomic_read(&ts->irq_enabled));
+}
+
+static ssize_t cyttsp_irq_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct cyttsp *ts = i2c_get_clientdata(client);
+	int err = 0;
+	unsigned long value;
+
+	if (size > 2)
+		return -EINVAL;
+
+	err = strict_strtoul(buf, 10, &value);
+	if (err != 0)
+		return err;
+
+	switch (value) {
+	case 0:
+		if (atomic_cmpxchg(&ts->irq_enabled, 1, 0)) {
+			pr_info("touch irq disabled!\n");
+			disable_irq_nosync(ts->client->irq);
+		}
+		err = size;
+		break;
+	case 1:
+		if (!atomic_cmpxchg(&ts->irq_enabled, 0, 1)) {
+			pr_info("touch irq enabled!\n");
+			enable_irq(ts->client->irq);
+		}
+		err = size;
+		break;
+	default:
+		pr_info("cyttsp_irq_enable failed -> irq_enabled = %d\n",
+		atomic_read(&ts->irq_enabled));
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static DEVICE_ATTR(irq_enable, 0777, cyttsp_irq_status, cyttsp_irq_enable);
+
+static ssize_t cyttsp_fw_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d.%d.%d\n", g_bl_data.appid_lo,
+				g_bl_data.appver_hi, g_bl_data.appver_lo);
+}
+
+static DEVICE_ATTR(cyttsp_fw_ver, 0777, cyttsp_fw_show, NULL);
+
+/* firmware flashing block */
+#define BLK_SIZE     16
+#define DATA_REC_LEN 64
+#define START_ADDR   0x0b00
+#define BLK_SEED     0xff
+#define RECAL_REG    0x1b
+
+enum bl_commands {
+	BL_CMD_WRBLK     = 0x39,
+	BL_CMD_INIT      = 0x38,
+	BL_CMD_TERMINATE = 0x3b,
+};
+/* TODO: Add key as part of platform data */
+#define KEY_CS  (0 + 1 + 2 + 3 + 4 + 5 + 6 + 7)
+#define KEY {0, 1, 2, 3, 4, 5, 6, 7}
+
+static const  char _key[] = KEY;
+#define KEY_LEN sizeof(_key)
+
+static int rec_cnt;
+struct fw_record {
+	u8 seed;
+	u8 cmd;
+	u8 key[KEY_LEN];
+	u8 blk_hi;
+	u8 blk_lo;
+	u8 data[DATA_REC_LEN];
+	u8 data_cs;
+	u8 rec_cs;
+};
+#define fw_rec_size (sizeof(struct fw_record))
+
+struct cmd_record {
+	u8 reg;
+	u8 seed;
+	u8 cmd;
+	u8 key[KEY_LEN];
+};
+#define cmd_rec_size (sizeof(struct cmd_record))
+
+static struct fw_record data_record = {
+	.seed = BLK_SEED,
+	.cmd = BL_CMD_WRBLK,
+	.key = KEY,
+};
+
+static const struct cmd_record terminate_rec = {
+	.reg = 0,
+	.seed = BLK_SEED,
+	.cmd = BL_CMD_TERMINATE,
+	.key = KEY,
+};
+static const struct cmd_record initiate_rec = {
+	.reg = 0,
+	.seed = BLK_SEED,
+	.cmd = BL_CMD_INIT,
+	.key = KEY,
+};
+
+#define BL_REC1_ADDR          0x0780
+#define BL_REC2_ADDR          0x07c0
+
+#define ID_INFO_REC           ":40078000"
+#define ID_INFO_OFFSET_IN_REC 77
+
+#define REC_START_CHR     ':'
+#define REC_LEN_OFFSET     1
+#define REC_ADDR_HI_OFFSET 3
+#define REC_ADDR_LO_OFFSET 5
+#define REC_TYPE_OFFSET    7
+#define REC_DATA_OFFSET    9
+#define REC_LINE_SIZE	141
+
+static int cyttsp_soft_reset(struct cyttsp *ts)
+{
+	int retval = 0, tries = 0;
+	u8 host_reg = CY_SOFT_RESET_MODE;
+
+	do {
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE, sizeof(host_reg), &host_reg);
+		if (retval < 0)
+			msleep(20);
+	} while (tries++ < 10 && (retval < 0));
+
+	if (retval < 0) {
+		pr_err("%s: failed\n", __func__);
+		return retval;
+	}
+
+	tries = 0;
+	do {
+		msleep(20);
+		cyttsp_putbl(ts, 1, true, true, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+
+	if (g_bl_data.bl_status != 0x11 && g_bl_data.bl_status != 0x10)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void cyttsp_exit_bl_mode(struct cyttsp *ts)
+{
+	int retval, tries = 0;
+
+	do {
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE, sizeof(bl_cmd), bl_cmd);
+		if (retval < 0)
+			msleep(20);
+	} while (tries++ < 10 && (retval < 0));
+}
+
+static void cyttsp_set_sysinfo_mode(struct cyttsp *ts)
+{
+	int retval, tries = 0;
+	u8 host_reg = CY_SYSINFO_MODE;
+
+	do {
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE, sizeof(host_reg), &host_reg);
+		if (retval < 0)
+			msleep(20);
+	} while (tries++ < 10 && (retval < 0));
+
+	/* wait for TTSP Device to complete switch to SysInfo mode */
+	if (!(retval < 0)) {
+		retval = i2c_smbus_read_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				sizeof(struct cyttsp_sysinfo_data_t),
+				(u8 *)&g_sysinfo_data);
+	} else
+		pr_err("%s: failed\n", __func__);
+}
+
+static void cyttsp_set_opmode(struct cyttsp *ts)
+{
+	int retval, tries = 0;
+	u8 host_reg = CY_OP_MODE;
+
+	do {
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE, sizeof(host_reg), &host_reg);
+		if (retval < 0)
+			msleep(20);
+	} while (tries++ < 10 && (retval < 0));
+}
+
+static int str2uc(char *str, u8 *val)
+{
+	char substr[3];
+	unsigned long ulval;
+	int rc;
+
+	if (!str && strlen(str) < 2)
+		return -EINVAL;
+
+	substr[0] = str[0];
+	substr[1] = str[1];
+	substr[2] = '\0';
+
+	rc = strict_strtoul(substr, 16, &ulval);
+	if (rc != 0)
+		return rc;
+
+	*val = (u8) ulval;
+
+	return 0;
+}
+
+static int flash_block(struct cyttsp *ts, u8 *blk, int len)
+{
+	int retval, i, tries = 0;
+	char buf[(2 * (BLK_SIZE + 1)) + 1];
+	char *p = buf;
+
+	for (i = 0; i < len; i++, p += 2)
+		sprintf(p, "%02x", blk[i]);
+	pr_debug("%s: size %d, pos %ld payload %s\n",
+		       __func__, len, (long)0, buf);
+
+	do {
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE, len, blk);
+		if (retval < 0)
+			msleep(20);
+	} while (tries++ < 20 && (retval < 0));
+
+	if (retval < 0) {
+		pr_err("%s: failed\n", __func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int flash_command(struct cyttsp *ts, const struct cmd_record *record)
+{
+	return flash_block(ts, (u8 *)record, cmd_rec_size);
+}
+
+static void init_data_record(struct fw_record *rec, unsigned short addr)
+{
+	addr >>= 6;
+	rec->blk_hi = (addr >> 8) & 0xff;
+	rec->blk_lo = addr & 0xff;
+	rec->rec_cs = rec->blk_hi + rec->blk_lo +
+			(unsigned char)(BLK_SEED + BL_CMD_WRBLK + KEY_CS);
+	rec->data_cs = 0;
+}
+
+static int check_record(u8 *rec)
+{
+	int rc;
+	u16 addr;
+	u8 r_len, type, hi_off, lo_off;
+
+	rc = str2uc(rec + REC_LEN_OFFSET, &r_len);
+	if (rc < 0)
+		return rc;
+
+	rc = str2uc(rec + REC_TYPE_OFFSET, &type);
+	if (rc < 0)
+		return rc;
+
+	if (*rec != REC_START_CHR || r_len != DATA_REC_LEN || type != 0)
+		return -EINVAL;
+
+	rc = str2uc(rec + REC_ADDR_HI_OFFSET, &hi_off);
+	if (rc < 0)
+		return rc;
+
+	rc = str2uc(rec + REC_ADDR_LO_OFFSET, &lo_off);
+	if (rc < 0)
+		return rc;
+
+	addr = (hi_off << 8) | lo_off;
+
+	if (addr >= START_ADDR || addr == BL_REC1_ADDR || addr == BL_REC2_ADDR)
+		return 0;
+
+	return -EINVAL;
+}
+
+static struct fw_record *prepare_record(u8 *rec)
+{
+	int i, rc;
+	u16 addr;
+	u8 hi_off, lo_off;
+	u8 *p;
+
+	rc = str2uc(rec + REC_ADDR_HI_OFFSET, &hi_off);
+	if (rc < 0)
+		return ERR_PTR((long) rc);
+
+	rc = str2uc(rec + REC_ADDR_LO_OFFSET, &lo_off);
+	if (rc < 0)
+		return ERR_PTR((long) rc);
+
+	addr = (hi_off << 8) | lo_off;
+
+	init_data_record(&data_record, addr);
+	p = rec + REC_DATA_OFFSET;
+	for (i = 0; i < DATA_REC_LEN; i++) {
+		rc = str2uc(p, &data_record.data[i]);
+		if (rc < 0)
+			return ERR_PTR((long) rc);
+		data_record.data_cs += data_record.data[i];
+		data_record.rec_cs += data_record.data[i];
+		p += 2;
+	}
+	data_record.rec_cs += data_record.data_cs;
+
+	return &data_record;
+}
+
+static int flash_record(struct cyttsp *ts, const struct fw_record *record)
+{
+	int len = fw_rec_size;
+	int blk_len, rc;
+	u8 *rec = (u8 *)record;
+	u8 data[BLK_SIZE + 1];
+	u8 blk_offset;
+
+	for (blk_offset = 0; len; len -= blk_len) {
+		data[0] = blk_offset;
+		blk_len = len > BLK_SIZE ? BLK_SIZE : len;
+		memcpy(data + 1, rec, blk_len);
+		rec += blk_len;
+		rc = flash_block(ts, data, blk_len + 1);
+		if (rc < 0)
+			return rc;
+		blk_offset += blk_len;
+	}
+	return 0;
+}
+
+static int flash_data_rec(struct cyttsp *ts, u8 *buf)
+{
+	struct fw_record *rec;
+	int rc, tries;
+
+	if (!buf)
+		return -EINVAL;
+
+	rc = check_record(buf);
+
+	if (rc < 0) {
+		pr_debug("%s: record ignored %s", __func__, buf);
+		return 0;
+	}
+
+	rec = prepare_record(buf);
+	if (IS_ERR_OR_NULL(rec))
+		return PTR_ERR(rec);
+
+	rc = flash_record(ts, rec);
+	if (rc < 0)
+		return rc;
+
+	tries = 0;
+	do {
+		if (rec_cnt%2)
+			msleep(20);
+		cyttsp_putbl(ts, 4, true, false, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+	rec_cnt++;
+	return rc;
+}
+
+static int cyttspfw_flash_firmware(struct cyttsp *ts, const u8 *data,
+					int data_len)
+{
+	u8 *buf;
+	int i, j;
+	int rc, tries = 0;
+
+	/* initiate bootload: this will erase all the existing data */
+	rc = flash_command(ts, &initiate_rec);
+	if (rc < 0)
+		return rc;
+
+	do {
+		msleep(100);
+		cyttsp_putbl(ts, 4, true, false, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+
+	buf = kzalloc(REC_LINE_SIZE + 1, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	rec_cnt = 0;
+	/* flash data records */
+	for (i = 0, j = 0; i < data_len; i++, j++) {
+		if ((data[i] == REC_START_CHR) && j) {
+			buf[j] = 0;
+			rc = flash_data_rec(ts, buf);
+			if (rc < 0)
+				return rc;
+			j = 0;
+		}
+		buf[j] = data[i];
+	}
+
+	/* flash last data record */
+	if (j) {
+		buf[j] = 0;
+		rc = flash_data_rec(ts, buf);
+		if (rc < 0)
+			return rc;
+	}
+
+	kfree(buf);
+
+	/* termiate bootload */
+	tries = 0;
+	rc = flash_command(ts, &terminate_rec);
+	do {
+		msleep(100);
+		cyttsp_putbl(ts, 4, true, false, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+
+	return rc;
+}
+
+static int get_hex_fw_ver(u8 *p, u8 *ttspver_hi, u8 *ttspver_lo,
+			u8 *appid_hi, u8 *appid_lo, u8 *appver_hi,
+			u8 *appver_lo, u8 *cid_0, u8 *cid_1, u8 *cid_2)
+{
+	int rc;
+
+	p = p + ID_INFO_OFFSET_IN_REC;
+	rc = str2uc(p, ttspver_hi);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, ttspver_lo);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, appid_hi);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, appid_lo);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, appver_hi);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, appver_lo);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, cid_0);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, cid_1);
+	if (rc < 0)
+		return rc;
+	p += 2;
+	rc = str2uc(p, cid_2);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static void cyttspfw_flash_start(struct cyttsp *ts, const u8 *data,
+				int data_len, u8 *buf, bool force)
+{
+	int rc;
+	u8 ttspver_hi = 0, ttspver_lo = 0, fw_upgrade = 0;
+	u8 appid_hi = 0, appid_lo = 0;
+	u8 appver_hi = 0, appver_lo = 0;
+	u8 cid_0 = 0, cid_1 = 0, cid_2 = 0;
+	char *p = buf;
+
+	/* get hex firmware version */
+	rc = get_hex_fw_ver(p, &ttspver_hi, &ttspver_lo,
+		&appid_hi, &appid_lo, &appver_hi,
+		&appver_lo, &cid_0, &cid_1, &cid_2);
+
+	if (rc < 0) {
+		pr_err("%s: unable to get hex firmware version\n", __func__);
+		return;
+	}
+
+	/* disable interrupts before flashing */
+	if (ts->client->irq == 0)
+		del_timer(&ts->timer);
+	else
+		disable_irq(ts->client->irq);
+
+	rc = cancel_work_sync(&ts->work);
+
+	if (rc && ts->client->irq)
+		enable_irq(ts->client->irq);
+
+	/* enter bootloader idle mode */
+	rc = cyttsp_soft_reset(ts);
+
+	if (rc < 0) {
+		pr_err("%s: try entering into idle mode"
+				" second time\n", __func__);
+		msleep(1000);
+		rc = cyttsp_soft_reset(ts);
+	}
+
+	if (rc < 0) {
+		pr_err("%s: try again later\n", __func__);
+		return;
+	}
+
+
+	pr_info("Current firmware: %d.%d.%d", g_bl_data.appid_lo,
+				g_bl_data.appver_hi, g_bl_data.appver_lo);
+	pr_info("New firmware: %d.%d.%d", appid_lo, appver_hi, appver_lo);
+
+	if (force)
+		fw_upgrade = 1;
+	else
+		if ((appid_hi == g_bl_data.appid_hi) &&
+			(appid_lo == g_bl_data.appid_lo)) {
+			if (appver_hi > g_bl_data.appver_hi) {
+				fw_upgrade = 1;
+			} else if ((appver_hi == g_bl_data.appver_hi) &&
+					 (appver_lo > g_bl_data.appver_lo)) {
+					fw_upgrade = 1;
+				} else {
+					fw_upgrade = 0;
+					pr_info("%s: Firmware version "
+					"lesser/equal to existing firmware, "
+					"upgrade not needed\n", __func__);
+				}
+		} else {
+			fw_upgrade = 0;
+			pr_info("%s: Firware versions do not match, "
+						"cannot upgrade\n", __func__);
+		}
+
+	if (fw_upgrade) {
+		pr_info("%s: Starting firmware upgrade\n", __func__);
+		rc = cyttspfw_flash_firmware(ts, data, data_len);
+		if (rc < 0)
+			pr_err("%s: firmware upgrade failed\n", __func__);
+		else
+			pr_info("%s: firmware upgrade success\n", __func__);
+	}
+
+	/* enter bootloader idle mode */
+	cyttsp_soft_reset(ts);
+	/* exit bootloader mode */
+	cyttsp_exit_bl_mode(ts);
+	msleep(100);
+	/* set sysinfo details */
+	cyttsp_set_sysinfo_mode(ts);
+	/* enter application mode */
+	cyttsp_set_opmode(ts);
+
+	/* enable interrupts */
+	if (ts->client->irq == 0)
+		mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
+	else
+		enable_irq(ts->client->irq);
+}
+
+static void cyttspfw_upgrade_start(struct cyttsp *ts, const u8 *data,
+					int data_len, bool force)
+{
+	int i, j;
+	u8 *buf;
+
+	buf = kzalloc(REC_LINE_SIZE + 1, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return;
+	}
+
+	for (i = 0, j = 0; i < data_len; i++, j++) {
+		if ((data[i] == REC_START_CHR) && j) {
+			buf[j] = 0;
+			j = 0;
+			if (!strncmp(buf, ID_INFO_REC, strlen(ID_INFO_REC))) {
+				cyttspfw_flash_start(ts, data, data_len,
+							buf, force);
+				break;
+			}
+		}
+		buf[j] = data[i];
+	}
+
+	/* check in the last record of firmware */
+	if (j) {
+		buf[j] = 0;
+		if (!strncmp(buf, ID_INFO_REC, strlen(ID_INFO_REC))) {
+			cyttspfw_flash_start(ts, data, data_len,
+						buf, force);
+		}
+	}
+
+	kfree(buf);
+}
+
+static void cyttspfw_upgrade(struct device *dev, bool force)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	const struct firmware *cyttsp_fw;
+	int retval = 0;
+
+	if (ts->is_suspended == true) {
+		pr_err("%s: in suspend state, resume it\n", __func__);
+		retval = cyttsp_resume(dev);
+		if (retval < 0) {
+			pr_err("%s: unable to resume\n", __func__);
+			return;
+		}
+	}
+
+	retval = request_firmware(&cyttsp_fw, ts->fw_fname, dev);
+	if (retval < 0) {
+		pr_err("%s: %s request failed(%d)\n", __func__,
+						ts->fw_fname, retval);
+	} else {
+		/* check and start upgrade */
+		cyttspfw_upgrade_start(ts, cyttsp_fw->data,
+				cyttsp_fw->size, force);
+		release_firmware(cyttsp_fw);
+	}
+}
+
+static ssize_t cyttsp_update_fw_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	return snprintf(buf, 2, "%d\n", ts->cyttsp_fwloader_mode);
+}
+
+static ssize_t cyttsp_force_update_fw_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	unsigned long val;
+	int rc;
+
+	if (size > 2)
+		return -EINVAL;
+
+	rc = strict_strtoul(buf, 10, &val);
+	if (rc != 0)
+		return rc;
+
+	mutex_lock(&ts->mutex);
+	if (!ts->cyttsp_fwloader_mode  && val) {
+		ts->cyttsp_fwloader_mode = 1;
+		cyttspfw_upgrade(dev, true);
+		ts->cyttsp_fwloader_mode = 0;
+	}
+	mutex_unlock(&ts->mutex);
+	return size;
+}
+
+static DEVICE_ATTR(cyttsp_force_update_fw, 0777, cyttsp_update_fw_show,
+					cyttsp_force_update_fw_store);
+
+static ssize_t cyttsp_update_fw_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	unsigned long val;
+	int rc;
+
+	if (size > 2)
+		return -EINVAL;
+
+	rc = strict_strtoul(buf, 10, &val);
+	if (rc != 0)
+		return rc;
+
+	mutex_lock(&ts->mutex);
+	if (!ts->cyttsp_fwloader_mode  && val) {
+		ts->cyttsp_fwloader_mode = 1;
+		cyttspfw_upgrade(dev, false);
+		ts->cyttsp_fwloader_mode = 0;
+	}
+	mutex_unlock(&ts->mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(cyttsp_update_fw, 0777, cyttsp_update_fw_show,
+					cyttsp_update_fw_store);
+
+static ssize_t cyttsp_fw_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	return snprintf(buf, FW_FNAME_LEN - 1, "%s\n", ts->fw_fname);
+}
+
+static ssize_t cyttsp_fw_name_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+
+	if (size > FW_FNAME_LEN - 1)
+		return -EINVAL;
+
+	strncpy(ts->fw_fname, buf, size);
+	if (ts->fw_fname[size-1] == '\n')
+		ts->fw_fname[size-1] = 0;
+
+	return size;
+}
+
+static DEVICE_ATTR(cyttsp_fw_name, 0777, cyttsp_fw_name_show,
+					cyttsp_fw_name_store);
+
+/* The cyttsp_xy_worker function reads the XY coordinates and sends them to
+ * the input layer.  It is scheduled from the interrupt (or timer).
+ */
+void cyttsp_xy_worker(struct work_struct *work)
+{
+	struct cyttsp *ts = container_of(work, struct cyttsp, work);
+	u8 id, tilt, rev_x, rev_y;
+	u8 i, loc;
+	u8 prv_tch;		/* number of previous touches */
+	u8 cur_tch;	/* number of current touches */
+	u16 tmp_trk[CY_NUM_MT_TCH_ID];
+	u16 snd_trk[CY_NUM_MT_TCH_ID];
+	u16 cur_trk[CY_NUM_TRK_ID];
+	u16 cur_st_tch[CY_NUM_ST_TCH_ID];
+	u16 cur_mt_tch[CY_NUM_MT_TCH_ID];
+	/* if NOT CY_USE_TRACKING_ID then
+	 * only uses CY_NUM_MT_TCH_ID positions */
+	u16 cur_mt_pos[CY_NUM_TRK_ID][2];
+	/* if NOT CY_USE_TRACKING_ID then
+	 * only uses CY_NUM_MT_TCH_ID positions */
+	u8 cur_mt_z[CY_NUM_TRK_ID];
+	u8 curr_tool_width;
+	u16 st_x1, st_y1;
+	u8 st_z1;
+	u16 st_x2, st_y2;
+	u8 st_z2;
+	s32 retval;
+
+	cyttsp_xdebug("TTSP worker start 1:\n");
+
+	/* get event data from CYTTSP device */
+	i = CY_NUM_RETRY;
+	do {
+		retval = i2c_smbus_read_i2c_block_data(ts->client,
+			CY_REG_BASE,
+			sizeof(struct cyttsp_gen3_xydata_t), (u8 *)&g_xy_data);
+	} while ((retval < CY_OK) && --i);
+
+	if (retval < CY_OK) {
+		/* return immediately on
+		 * failure to read device on the i2c bus */
+		goto exit_xy_worker;
+	}
+
+	cyttsp_xdebug("TTSP worker start 2:\n");
+
+	/* compare own irq counter with the device irq counter */
+	if (ts->client->irq) {
+		u8 host_reg;
+		u8 cur_cnt;
+		if (ts->platform_data->use_hndshk) {
+
+			host_reg = g_xy_data.hst_mode & CY_HNDSHK_BIT ?
+				g_xy_data.hst_mode & ~CY_HNDSHK_BIT :
+				g_xy_data.hst_mode | CY_HNDSHK_BIT;
+			retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE, sizeof(host_reg), &host_reg);
+		}
+		cur_cnt = g_xy_data.tt_undef[CY_IRQ_CNT_REG];
+		irq_cnt_total++;
+		irq_cnt++;
+		if (irq_cnt != cur_cnt) {
+			irq_err_cnt++;
+			cyttsp_debug("i_c_ER: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \
+				irq_cnt, \
+				cur_cnt, g_xy_data.hst_mode, \
+				(unsigned long)irq_cnt_total, \
+				(unsigned long)irq_err_cnt);
+		} else {
+			cyttsp_debug("i_c_ok: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \
+				irq_cnt, \
+				cur_cnt, g_xy_data.hst_mode, \
+				(unsigned long)irq_cnt_total, \
+				(unsigned long)irq_err_cnt);
+		}
+		irq_cnt = cur_cnt;
+	}
+
+	/* Get the current num touches and return if there are no touches */
+	if ((GET_BOOTLOADERMODE(g_xy_data.tt_mode) == 1) ||
+		(GET_HSTMODE(g_xy_data.hst_mode) != CY_OK)) {
+		u8 host_reg, tries;
+		/* the TTSP device has suffered spurious reset or mode switch */
+		cyttsp_debug( \
+			"Spurious err opmode (tt_mode=%02X hst_mode=%02X)\n", \
+			g_xy_data.tt_mode, g_xy_data.hst_mode);
+		cyttsp_debug("Reset TTSP Device; Terminating active tracks\n");
+		/* terminate all active tracks */
+		cur_tch = CY_NTCH;
+		/* reset TTSP part and take it back out of Bootloader mode */
+		/* reset TTSP Device back to bootloader mode */
+		host_reg = CY_SOFT_RESET_MODE;
+		retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+			sizeof(host_reg), &host_reg);
+		/* wait for TTSP Device to complete reset back to bootloader */
+		tries = 0;
+		do {
+			mdelay(1);
+			cyttsp_putbl(ts, 1, false, false, false);
+		} while (g_bl_data.bl_status != 0x10 &&
+			g_bl_data.bl_status != 0x11 &&
+			tries++ < 100);
+		retval = cyttsp_putbl(ts, 1, true, true, true);
+		/* switch back to operational mode */
+		/* take TTSP device out of bootloader mode;
+		 * switch back to TrueTouch operational mode */
+		if (!(retval < CY_OK)) {
+			int tries;
+			retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				sizeof(bl_cmd), bl_cmd);
+			/* wait for TTSP Device to complete
+			 * switch to Operational mode */
+			tries = 0;
+			do {
+				mdelay(100);
+				cyttsp_putbl(ts, 2, false, false, false);
+			} while (GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
+				tries++ < 100);
+			cyttsp_putbl(ts, 2, true, false, false);
+		}
+		goto exit_xy_worker;
+	} else {
+		cur_tch = GET_NUM_TOUCHES(g_xy_data.tt_stat);
+		if (IS_LARGE_AREA(g_xy_data.tt_stat)) {
+			/* terminate all active tracks */
+			cur_tch = CY_NTCH;
+			cyttsp_debug("Large obj detect (tt_stat=0x%02X). Terminate act trks\n", \
+			    g_xy_data.tt_stat);
+		} else if (cur_tch > CY_NUM_MT_TCH_ID) {
+			/* if the number of fingers on the touch surface
+			 * is more than the maximum then
+			 * there will be no new track information
+			 * even for the original touches.
+			 * Therefore, terminate all active tracks.
+			 */
+			cur_tch = CY_NTCH;
+			cyttsp_debug("Num touch err (tt_stat=0x%02X). Terminate act trks\n", \
+			    g_xy_data.tt_stat);
+		}
+	}
+
+	/* set tool size */
+	curr_tool_width = CY_SMALL_TOOL_WIDTH;
+
+	/* translate Gen2 interface data into comparable Gen3 data */
+	if (ts->platform_data->gen == CY_GEN2) {
+		struct cyttsp_gen2_xydata_t *pxy_gen2_data;
+		pxy_gen2_data = (struct cyttsp_gen2_xydata_t *)(&g_xy_data);
+
+		/* use test data? */
+		cyttsp_testdat(&g_xy_data, &tt_gen2_testray, \
+			sizeof(struct cyttsp_gen3_xydata_t));
+
+		if (pxy_gen2_data->evnt_idx == CY_GEN2_NOTOUCH) {
+			cur_tch = 0;
+		} else if (cur_tch == CY_GEN2_GHOST) {
+			cur_tch = 0;
+		} else if (cur_tch == CY_GEN2_2TOUCH) {
+			/* stuff artificial track ID1 and ID2 */
+			g_xy_data.touch12_id = 0x12;
+			g_xy_data.z1 = CY_MAXZ;
+			g_xy_data.z2 = CY_MAXZ;
+			cur_tch--;			/* 2 touches */
+		} else if (cur_tch == CY_GEN2_1TOUCH) {
+			/* stuff artificial track ID1 and ID2 */
+			g_xy_data.touch12_id = 0x12;
+			g_xy_data.z1 = CY_MAXZ;
+			g_xy_data.z2 = CY_NTCH;
+			if (pxy_gen2_data->evnt_idx == CY_GEN2_TOUCH2) {
+				/* push touch 2 data into touch1
+				 * (first finger up; second finger down) */
+				/* stuff artificial track ID1 for touch2 info */
+				g_xy_data.touch12_id = 0x20;
+				/* stuff touch 1 with touch 2 coordinate data */
+				g_xy_data.x1 = g_xy_data.x2;
+				g_xy_data.y1 = g_xy_data.y2;
+			}
+		} else {
+			cur_tch = 0;
+		}
+	} else {
+		/* use test data? */
+		cyttsp_testdat(&g_xy_data, &tt_gen3_testray, \
+			sizeof(struct cyttsp_gen3_xydata_t));
+	}
+
+
+
+	/* clear current active track ID array and count previous touches */
+	for (id = 0, prv_tch = CY_NTCH;
+		id < CY_NUM_TRK_ID; id++) {
+		cur_trk[id] = CY_NTCH;
+		prv_tch += ts->act_trk[id];
+	}
+
+	/* send no events if no previous touches and no new touches */
+	if ((prv_tch == CY_NTCH) &&
+		((cur_tch == CY_NTCH) ||
+		(cur_tch > CY_NUM_MT_TCH_ID))) {
+		goto exit_xy_worker;
+	}
+
+	cyttsp_debug("prev=%d  curr=%d\n", prv_tch, cur_tch);
+
+	for (id = 0; id < CY_NUM_ST_TCH_ID; id++) {
+		/* clear current single touches array */
+		cur_st_tch[id] = CY_IGNR_TCH;
+	}
+
+	/* clear single touch positions */
+	st_x1 = CY_NTCH;
+	st_y1 = CY_NTCH;
+	st_z1 = CY_NTCH;
+	st_x2 = CY_NTCH;
+	st_y2 = CY_NTCH;
+	st_z2 = CY_NTCH;
+
+	for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+		/* clear current multi-touches array and
+		 * multi-touch positions/z */
+		cur_mt_tch[id] = CY_IGNR_TCH;
+	}
+
+	if (ts->platform_data->use_trk_id) {
+		for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+			cur_mt_pos[id][CY_XPOS] = 0;
+			cur_mt_pos[id][CY_YPOS] = 0;
+			cur_mt_z[id] = 0;
+		}
+	} else {
+		for (id = 0; id < CY_NUM_TRK_ID; id++) {
+			cur_mt_pos[id][CY_XPOS] = 0;
+			cur_mt_pos[id][CY_YPOS] = 0;
+			cur_mt_z[id] = 0;
+		}
+	}
+
+	/* Determine if display is tilted */
+	if (FLIP_DATA(ts->platform_data->flags))
+		tilt = true;
+	else
+		tilt = false;
+
+	/* Check for switch in origin */
+	if (REVERSE_X(ts->platform_data->flags))
+		rev_x = true;
+	else
+		rev_x = false;
+
+	if (REVERSE_Y(ts->platform_data->flags))
+		rev_y = true;
+	else
+		rev_y = false;
+
+	if (cur_tch) {
+		struct cyttsp_gen2_xydata_t *pxy_gen2_data;
+		struct cyttsp_gen3_xydata_t *pxy_gen3_data;
+		switch (ts->platform_data->gen) {
+		case CY_GEN2: {
+			pxy_gen2_data =
+				(struct cyttsp_gen2_xydata_t *)(&g_xy_data);
+			cyttsp_xdebug("TTSP Gen2 report:\n");
+			cyttsp_xdebug("%02X %02X %02X\n", \
+				pxy_gen2_data->hst_mode, \
+				pxy_gen2_data->tt_mode, \
+				pxy_gen2_data->tt_stat);
+			cyttsp_xdebug("%04X %04X %02X  %02X\n", \
+				pxy_gen2_data->x1, \
+				pxy_gen2_data->y1, \
+				pxy_gen2_data->z1, \
+				pxy_gen2_data->evnt_idx);
+			cyttsp_xdebug("%04X %04X %02X\n", \
+				pxy_gen2_data->x2, \
+				pxy_gen2_data->y2, \
+				pxy_gen2_data->tt_undef1);
+			cyttsp_xdebug("%02X %02X %02X\n", \
+				pxy_gen2_data->gest_cnt, \
+				pxy_gen2_data->gest_id, \
+				pxy_gen2_data->gest_set);
+			break;
+		}
+		case CY_GEN3:
+		default: {
+			pxy_gen3_data =
+				(struct cyttsp_gen3_xydata_t *)(&g_xy_data);
+			cyttsp_xdebug("TTSP Gen3 report:\n");
+			cyttsp_xdebug("%02X %02X %02X\n", \
+				pxy_gen3_data->hst_mode,
+				pxy_gen3_data->tt_mode,
+				pxy_gen3_data->tt_stat);
+			cyttsp_xdebug("%04X %04X %02X  %02X", \
+				pxy_gen3_data->x1,
+				pxy_gen3_data->y1,
+				pxy_gen3_data->z1, \
+				pxy_gen3_data->touch12_id);
+			cyttsp_xdebug("%04X %04X %02X\n", \
+				pxy_gen3_data->x2, \
+				pxy_gen3_data->y2, \
+				pxy_gen3_data->z2);
+			cyttsp_xdebug("%02X %02X %02X\n", \
+				pxy_gen3_data->gest_cnt, \
+				pxy_gen3_data->gest_id, \
+				pxy_gen3_data->gest_set);
+			cyttsp_xdebug("%04X %04X %02X  %02X\n", \
+				pxy_gen3_data->x3, \
+				pxy_gen3_data->y3, \
+				pxy_gen3_data->z3, \
+				pxy_gen3_data->touch34_id);
+			cyttsp_xdebug("%04X %04X %02X\n", \
+				pxy_gen3_data->x4, \
+				pxy_gen3_data->y4, \
+				pxy_gen3_data->z4);
+			break;
+		}
+		}
+	}
+
+	/* process the touches */
+	switch (cur_tch) {
+	case 4: {
+		g_xy_data.x4 = be16_to_cpu(g_xy_data.x4);
+		g_xy_data.y4 = be16_to_cpu(g_xy_data.y4);
+		if (tilt)
+			FLIP_XY(g_xy_data.x4, g_xy_data.y4);
+
+		if (rev_x) {
+			g_xy_data.x4 = INVERT_X(g_xy_data.x4,
+						ts->platform_data->panel_maxx);
+			if (g_xy_data.x4 < 0)
+				pr_debug("X value is negative. Please configure"
+					" maxx in platform data structure\n");
+		}
+		if (rev_y) {
+			g_xy_data.y4 = INVERT_X(g_xy_data.y4,
+						ts->platform_data->panel_maxy);
+			if (g_xy_data.y4 < 0)
+				pr_debug("Y value is negative. Please configure"
+					" maxy in platform data structure\n");
+
+		}
+		id = GET_TOUCH4_ID(g_xy_data.touch34_id);
+		if (ts->platform_data->use_trk_id) {
+			cur_mt_pos[CY_MT_TCH4_IDX][CY_XPOS] =
+				g_xy_data.x4;
+			cur_mt_pos[CY_MT_TCH4_IDX][CY_YPOS] =
+				g_xy_data.y4;
+			cur_mt_z[CY_MT_TCH4_IDX] = g_xy_data.z4;
+		} else {
+			cur_mt_pos[id][CY_XPOS] = g_xy_data.x4;
+			cur_mt_pos[id][CY_YPOS] = g_xy_data.y4;
+			cur_mt_z[id] = g_xy_data.z4;
+		}
+		cur_mt_tch[CY_MT_TCH4_IDX] = id;
+		cur_trk[id] = CY_TCH;
+		if (ts->prv_st_tch[CY_ST_FNGR1_IDX] <
+			CY_NUM_TRK_ID) {
+			if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+				st_x1 = g_xy_data.x4;
+				st_y1 = g_xy_data.y4;
+				st_z1 = g_xy_data.z4;
+				cur_st_tch[CY_ST_FNGR1_IDX] = id;
+			} else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+				st_x2 = g_xy_data.x4;
+				st_y2 = g_xy_data.y4;
+				st_z2 = g_xy_data.z4;
+				cur_st_tch[CY_ST_FNGR2_IDX] = id;
+			}
+		}
+		cyttsp_xdebug("4th XYZ:% 3d,% 3d,% 3d  ID:% 2d\n\n", \
+			g_xy_data.x4, g_xy_data.y4, g_xy_data.z4, \
+			(g_xy_data.touch34_id & 0x0F));
+		/* do not break */
+	}
+	case 3: {
+		g_xy_data.x3 = be16_to_cpu(g_xy_data.x3);
+		g_xy_data.y3 = be16_to_cpu(g_xy_data.y3);
+		if (tilt)
+			FLIP_XY(g_xy_data.x3, g_xy_data.y3);
+
+		if (rev_x) {
+			g_xy_data.x3 = INVERT_X(g_xy_data.x3,
+						ts->platform_data->panel_maxx);
+			if (g_xy_data.x3 < 0)
+				pr_debug("X value is negative. Please configure"
+					" maxx in platform data structure\n");
+
+		}
+		if (rev_y) {
+			g_xy_data.y3 = INVERT_X(g_xy_data.y3,
+						ts->platform_data->panel_maxy);
+			if (g_xy_data.y3 < 0)
+				pr_debug("Y value is negative. Please configure"
+					" maxy in platform data structure\n");
+
+		}
+		id = GET_TOUCH3_ID(g_xy_data.touch34_id);
+		if (ts->platform_data->use_trk_id) {
+			cur_mt_pos[CY_MT_TCH3_IDX][CY_XPOS] =
+				g_xy_data.x3;
+			cur_mt_pos[CY_MT_TCH3_IDX][CY_YPOS] =
+				g_xy_data.y3;
+			cur_mt_z[CY_MT_TCH3_IDX] = g_xy_data.z3;
+		} else {
+			cur_mt_pos[id][CY_XPOS] = g_xy_data.x3;
+			cur_mt_pos[id][CY_YPOS] = g_xy_data.y3;
+			cur_mt_z[id] = g_xy_data.z3;
+		}
+		cur_mt_tch[CY_MT_TCH3_IDX] = id;
+		cur_trk[id] = CY_TCH;
+		if (ts->prv_st_tch[CY_ST_FNGR1_IDX] <
+			CY_NUM_TRK_ID) {
+			if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+				st_x1 = g_xy_data.x3;
+				st_y1 = g_xy_data.y3;
+				st_z1 = g_xy_data.z3;
+				cur_st_tch[CY_ST_FNGR1_IDX] = id;
+			} else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+				st_x2 = g_xy_data.x3;
+				st_y2 = g_xy_data.y3;
+				st_z2 = g_xy_data.z3;
+				cur_st_tch[CY_ST_FNGR2_IDX] = id;
+			}
+		}
+		cyttsp_xdebug("3rd XYZ:% 3d,% 3d,% 3d  ID:% 2d\n", \
+			g_xy_data.x3, g_xy_data.y3, g_xy_data.z3, \
+			((g_xy_data.touch34_id >> 4) & 0x0F));
+		/* do not break */
+	}
+	case 2: {
+		g_xy_data.x2 = be16_to_cpu(g_xy_data.x2);
+		g_xy_data.y2 = be16_to_cpu(g_xy_data.y2);
+		if (tilt)
+			FLIP_XY(g_xy_data.x2, g_xy_data.y2);
+
+		if (rev_x) {
+			g_xy_data.x2 = INVERT_X(g_xy_data.x2,
+						ts->platform_data->panel_maxx);
+			if (g_xy_data.x2 < 0)
+				pr_debug("X value is negative. Please configure"
+					" maxx in platform data structure\n");
+		}
+		if (rev_y) {
+			g_xy_data.y2 = INVERT_X(g_xy_data.y2,
+						ts->platform_data->panel_maxy);
+			if (g_xy_data.y2 < 0)
+				pr_debug("Y value is negative. Please configure"
+					" maxy in platform data structure\n");
+		}
+		id = GET_TOUCH2_ID(g_xy_data.touch12_id);
+		if (ts->platform_data->use_trk_id) {
+			cur_mt_pos[CY_MT_TCH2_IDX][CY_XPOS] =
+				g_xy_data.x2;
+			cur_mt_pos[CY_MT_TCH2_IDX][CY_YPOS] =
+				g_xy_data.y2;
+			cur_mt_z[CY_MT_TCH2_IDX] = g_xy_data.z2;
+		} else {
+			cur_mt_pos[id][CY_XPOS] = g_xy_data.x2;
+			cur_mt_pos[id][CY_YPOS] = g_xy_data.y2;
+			cur_mt_z[id] = g_xy_data.z2;
+		}
+		cur_mt_tch[CY_MT_TCH2_IDX] = id;
+		cur_trk[id] = CY_TCH;
+		if (ts->prv_st_tch[CY_ST_FNGR1_IDX] <
+			CY_NUM_TRK_ID) {
+			if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+				st_x1 = g_xy_data.x2;
+				st_y1 = g_xy_data.y2;
+				st_z1 = g_xy_data.z2;
+				cur_st_tch[CY_ST_FNGR1_IDX] = id;
+			} else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+				st_x2 = g_xy_data.x2;
+				st_y2 = g_xy_data.y2;
+				st_z2 = g_xy_data.z2;
+				cur_st_tch[CY_ST_FNGR2_IDX] = id;
+			}
+		}
+		cyttsp_xdebug("2nd XYZ:% 3d,% 3d,% 3d  ID:% 2d\n", \
+			g_xy_data.x2, g_xy_data.y2, g_xy_data.z2, \
+			(g_xy_data.touch12_id & 0x0F));
+		/* do not break */
+	}
+	case 1:	{
+		g_xy_data.x1 = be16_to_cpu(g_xy_data.x1);
+		g_xy_data.y1 = be16_to_cpu(g_xy_data.y1);
+		if (tilt)
+			FLIP_XY(g_xy_data.x1, g_xy_data.y1);
+
+		if (rev_x) {
+			g_xy_data.x1 = INVERT_X(g_xy_data.x1,
+						ts->platform_data->panel_maxx);
+			if (g_xy_data.x1 < 0)
+				pr_debug("X value is negative. Please configure"
+					" maxx in platform data structure\n");
+		}
+		if (rev_y) {
+			g_xy_data.y1 = INVERT_X(g_xy_data.y1,
+						ts->platform_data->panel_maxy);
+			if (g_xy_data.y1 < 0)
+				pr_debug("Y value is negative. Please configure"
+					" maxy in platform data structure");
+		}
+		id = GET_TOUCH1_ID(g_xy_data.touch12_id);
+		if (ts->platform_data->use_trk_id) {
+			cur_mt_pos[CY_MT_TCH1_IDX][CY_XPOS] =
+				g_xy_data.x1;
+			cur_mt_pos[CY_MT_TCH1_IDX][CY_YPOS] =
+				g_xy_data.y1;
+			cur_mt_z[CY_MT_TCH1_IDX] = g_xy_data.z1;
+		} else {
+			cur_mt_pos[id][CY_XPOS] = g_xy_data.x1;
+			cur_mt_pos[id][CY_YPOS] = g_xy_data.y1;
+			cur_mt_z[id] = g_xy_data.z1;
+		}
+		cur_mt_tch[CY_MT_TCH1_IDX] = id;
+		cur_trk[id] = CY_TCH;
+		if (ts->prv_st_tch[CY_ST_FNGR1_IDX] <
+			CY_NUM_TRK_ID) {
+			if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+				st_x1 = g_xy_data.x1;
+				st_y1 = g_xy_data.y1;
+				st_z1 = g_xy_data.z1;
+				cur_st_tch[CY_ST_FNGR1_IDX] = id;
+			} else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+				st_x2 = g_xy_data.x1;
+				st_y2 = g_xy_data.y1;
+				st_z2 = g_xy_data.z1;
+				cur_st_tch[CY_ST_FNGR2_IDX] = id;
+			}
+		}
+		cyttsp_xdebug("1st XYZ:% 3d,% 3d,% 3d  ID:% 2d\n", \
+			g_xy_data.x1, g_xy_data.y1, g_xy_data.z1, \
+			((g_xy_data.touch12_id >> 4) & 0x0F));
+		break;
+	}
+	case 0:
+	default:{
+		break;
+	}
+	}
+
+	/* handle Single Touch signals */
+	if (ts->platform_data->use_st) {
+		cyttsp_xdebug("ST STEP 0 - ST1 ID=%d  ST2 ID=%d\n", \
+			cur_st_tch[CY_ST_FNGR1_IDX], \
+			cur_st_tch[CY_ST_FNGR2_IDX]);
+		if (cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) {
+			/* reassign finger 1 and 2 positions to new tracks */
+			if (cur_tch > 0) {
+				/* reassign st finger1 */
+				if (ts->platform_data->use_trk_id) {
+					id = CY_MT_TCH1_IDX;
+					cur_st_tch[CY_ST_FNGR1_IDX] = cur_mt_tch[id];
+				} else {
+					id = GET_TOUCH1_ID(g_xy_data.touch12_id);
+					cur_st_tch[CY_ST_FNGR1_IDX] = id;
+				}
+				st_x1 = cur_mt_pos[id][CY_XPOS];
+				st_y1 = cur_mt_pos[id][CY_YPOS];
+				st_z1 = cur_mt_z[id];
+				cyttsp_xdebug("ST STEP 1 - ST1 ID=%3d\n", \
+					cur_st_tch[CY_ST_FNGR1_IDX]);
+				if ((cur_tch > 1) &&
+					(cur_st_tch[CY_ST_FNGR2_IDX] >
+					CY_NUM_TRK_ID)) {
+					/* reassign st finger2 */
+					if (cur_tch > 1) {
+						if (ts->platform_data->use_trk_id) {
+							id = CY_MT_TCH2_IDX;
+							cur_st_tch[CY_ST_FNGR2_IDX] = cur_mt_tch[id];
+						} else {
+							id = GET_TOUCH2_ID(g_xy_data.touch12_id);
+							cur_st_tch[CY_ST_FNGR2_IDX] = id;
+						}
+						st_x2 = cur_mt_pos[id][CY_XPOS];
+						st_y2 = cur_mt_pos[id][CY_YPOS];
+						st_z2 = cur_mt_z[id];
+						cyttsp_xdebug("ST STEP 2 - ST2 ID=%3d\n", \
+							cur_st_tch[CY_ST_FNGR2_IDX]);
+					}
+				}
+			}
+		} else if (cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID) {
+			if (cur_tch > 1) {
+				/* reassign st finger2 */
+				if (ts->platform_data->use_trk_id) {
+					/* reassign st finger2 */
+					id = CY_MT_TCH2_IDX;
+					cur_st_tch[CY_ST_FNGR2_IDX] =
+						cur_mt_tch[id];
+				} else {
+					/* reassign st finger2 */
+					id = GET_TOUCH2_ID(g_xy_data.touch12_id);
+					cur_st_tch[CY_ST_FNGR2_IDX] = id;
+				}
+				st_x2 = cur_mt_pos[id][CY_XPOS];
+				st_y2 = cur_mt_pos[id][CY_YPOS];
+				st_z2 = cur_mt_z[id];
+				cyttsp_xdebug("ST STEP 3 - ST2 ID=%3d\n", \
+					cur_st_tch[CY_ST_FNGR2_IDX]);
+			}
+		}
+		/* if the 1st touch is missing and there is a 2nd touch,
+		 * then set the 1st touch to 2nd touch and terminate 2nd touch
+		 */
+		if ((cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) &&
+		    (cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID)) {
+			st_x1 = st_x2;
+			st_y1 = st_y2;
+			st_z1 = st_z2;
+			cur_st_tch[CY_ST_FNGR1_IDX] =
+				cur_st_tch[CY_ST_FNGR2_IDX];
+			cur_st_tch[CY_ST_FNGR2_IDX] =
+				CY_IGNR_TCH;
+		}
+		/* if the 2nd touch ends up equal to the 1st touch,
+		 * then just report a single touch */
+		if (cur_st_tch[CY_ST_FNGR1_IDX] ==
+			cur_st_tch[CY_ST_FNGR2_IDX]) {
+			cur_st_tch[CY_ST_FNGR2_IDX] =
+				CY_IGNR_TCH;
+		}
+		/* set Single Touch current event signals */
+		if (cur_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+			input_report_abs(ts->input,
+				ABS_X, st_x1);
+			input_report_abs(ts->input,
+				ABS_Y, st_y1);
+			input_report_abs(ts->input,
+				ABS_PRESSURE, st_z1);
+			input_report_key(ts->input,
+				BTN_TOUCH,
+				CY_TCH);
+			input_report_abs(ts->input,
+				ABS_TOOL_WIDTH,
+				curr_tool_width);
+			cyttsp_debug("ST->F1:%3d X:%3d Y:%3d Z:%3d\n", \
+				cur_st_tch[CY_ST_FNGR1_IDX], \
+				st_x1, st_y1, st_z1);
+			if (cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID) {
+				input_report_key(ts->input, BTN_2, CY_TCH);
+				input_report_abs(ts->input, ABS_HAT0X, st_x2);
+				input_report_abs(ts->input, ABS_HAT0Y, st_y2);
+				cyttsp_debug("ST->F2:%3d X:%3d Y:%3d Z:%3d\n", \
+					cur_st_tch[CY_ST_FNGR2_IDX],
+					st_x2, st_y2, st_z2);
+			} else {
+				input_report_key(ts->input,
+					BTN_2,
+					CY_NTCH);
+			}
+		} else {
+			input_report_abs(ts->input, ABS_PRESSURE, CY_NTCH);
+			input_report_key(ts->input, BTN_TOUCH, CY_NTCH);
+			input_report_key(ts->input, BTN_2, CY_NTCH);
+		}
+		/* update platform data for the current single touch info */
+		ts->prv_st_tch[CY_ST_FNGR1_IDX] = cur_st_tch[CY_ST_FNGR1_IDX];
+		ts->prv_st_tch[CY_ST_FNGR2_IDX] = cur_st_tch[CY_ST_FNGR2_IDX];
+
+	}
+
+	/* handle Multi-touch signals */
+	if (ts->platform_data->use_mt) {
+		if (ts->platform_data->use_trk_id) {
+			/* terminate any previous touch where the track
+			 * is missing from the current event */
+			for (id = 0; id < CY_NUM_TRK_ID; id++) {
+				if ((ts->act_trk[id] != CY_NTCH) &&
+					(cur_trk[id] == CY_NTCH)) {
+					input_report_abs(ts->input,
+						ABS_MT_TRACKING_ID,
+						id);
+					input_report_abs(ts->input,
+						ABS_MT_TOUCH_MAJOR,
+						CY_NTCH);
+					input_report_abs(ts->input,
+						ABS_MT_WIDTH_MAJOR,
+						curr_tool_width);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_X,
+						ts->prv_mt_pos[id][CY_XPOS]);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_Y,
+						ts->prv_mt_pos[id][CY_YPOS]);
+					CY_MT_SYNC(ts->input);
+					ts->act_trk[id] = CY_NTCH;
+					ts->prv_mt_pos[id][CY_XPOS] = 0;
+					ts->prv_mt_pos[id][CY_YPOS] = 0;
+				}
+			}
+			/* set Multi-Touch current event signals */
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				if (cur_mt_tch[id] < CY_NUM_TRK_ID) {
+					input_report_abs(ts->input,
+						ABS_MT_TRACKING_ID,
+						cur_mt_tch[id]);
+					input_report_abs(ts->input,
+						ABS_MT_TOUCH_MAJOR,
+						cur_mt_z[id]);
+					input_report_abs(ts->input,
+						ABS_MT_WIDTH_MAJOR,
+						curr_tool_width);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_X,
+						cur_mt_pos[id][CY_XPOS]);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_Y,
+						cur_mt_pos[id][CY_YPOS]);
+					CY_MT_SYNC(ts->input);
+					ts->act_trk[id] = CY_TCH;
+					ts->prv_mt_pos[id][CY_XPOS] =
+						cur_mt_pos[id][CY_XPOS];
+					ts->prv_mt_pos[id][CY_YPOS] =
+						cur_mt_pos[id][CY_YPOS];
+				}
+			}
+		} else {
+			/* set temporary track array elements to voids */
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				tmp_trk[id] = CY_IGNR_TCH;
+				snd_trk[id] = CY_IGNR_TCH;
+			}
+
+			/* get what is currently active */
+			for (i = 0, id = 0;
+				id < CY_NUM_TRK_ID && i < CY_NUM_MT_TCH_ID;
+				id++) {
+				if (cur_trk[id] == CY_TCH) {
+					/* only incr counter if track found */
+					tmp_trk[i] = id;
+					i++;
+				}
+			}
+			cyttsp_xdebug("T1: t0=%d, t1=%d, t2=%d, t3=%d\n", \
+				tmp_trk[0], tmp_trk[1], tmp_trk[2], \
+				tmp_trk[3]);
+			cyttsp_xdebug("T1: p0=%d, p1=%d, p2=%d, p3=%d\n", \
+				ts->prv_mt_tch[0], ts->prv_mt_tch[1], \
+				ts->prv_mt_tch[2], ts->prv_mt_tch[3]);
+
+			/* pack in still active previous touches */
+			for (id = 0, prv_tch = 0;
+				id < CY_NUM_MT_TCH_ID; id++) {
+				if (tmp_trk[id] < CY_NUM_TRK_ID) {
+					if (cyttsp_inlist(ts->prv_mt_tch,
+						tmp_trk[id], &loc,
+						CY_NUM_MT_TCH_ID)) {
+						loc &= CY_NUM_MT_TCH_ID - 1;
+						snd_trk[loc] = tmp_trk[id];
+						prv_tch++;
+						cyttsp_xdebug("inlist s[%d]=%d t[%d]=%d l=%d p=%d\n", \
+							loc, snd_trk[loc], \
+							id, tmp_trk[id], \
+							loc, prv_tch);
+					} else {
+						cyttsp_xdebug("not inlist s[%d]=%d t[%d]=%d l=%d \n", \
+							id, snd_trk[id], \
+							id, tmp_trk[id], \
+							loc);
+					}
+				}
+			}
+			cyttsp_xdebug("S1: s0=%d, s1=%d, s2=%d, s3=%d p=%d\n", \
+				snd_trk[0], snd_trk[1], snd_trk[2], \
+				snd_trk[3], prv_tch);
+
+			/* pack in new touches */
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				if (tmp_trk[id] < CY_NUM_TRK_ID) {
+					if (!cyttsp_inlist(snd_trk, tmp_trk[id], &loc, CY_NUM_MT_TCH_ID)) {
+						cyttsp_xdebug("not inlist t[%d]=%d l=%d\n", \
+							id, tmp_trk[id], loc);
+						if (cyttsp_next_avail_inlist(snd_trk, &loc, CY_NUM_MT_TCH_ID)) {
+							loc &= CY_NUM_MT_TCH_ID - 1;
+							snd_trk[loc] = tmp_trk[id];
+							cyttsp_xdebug("put inlist s[%d]=%d t[%d]=%d\n",
+								loc, snd_trk[loc], id, tmp_trk[id]);
+						}
+					} else {
+						cyttsp_xdebug("is in list s[%d]=%d t[%d]=%d loc=%d\n", \
+							id, snd_trk[id], id, tmp_trk[id], loc);
+					}
+				}
+			}
+			cyttsp_xdebug("S2: s0=%d, s1=%d, s2=%d, s3=%d\n", \
+				snd_trk[0], snd_trk[1],
+				snd_trk[2], snd_trk[3]);
+
+			/* sync motion event signals for each current touch */
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				/* z will either be 0 (NOTOUCH) or
+				 * some pressure (TOUCH) */
+				cyttsp_xdebug("MT0 prev[%d]=%d temp[%d]=%d send[%d]=%d\n", \
+					id, ts->prv_mt_tch[id], \
+					id, tmp_trk[id], \
+					id, snd_trk[id]);
+				if (snd_trk[id] < CY_NUM_TRK_ID) {
+					input_report_abs(ts->input,
+						ABS_MT_TOUCH_MAJOR,
+						cur_mt_z[snd_trk[id]]);
+					input_report_abs(ts->input,
+						ABS_MT_WIDTH_MAJOR,
+						curr_tool_width);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_X,
+						cur_mt_pos[snd_trk[id]][CY_XPOS]);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_Y,
+						cur_mt_pos[snd_trk[id]][CY_YPOS]);
+					CY_MT_SYNC(ts->input);
+					cyttsp_debug("MT1->TID:%2d X:%3d Y:%3d Z:%3d touch-sent\n", \
+						snd_trk[id], \
+						cur_mt_pos[snd_trk[id]][CY_XPOS], \
+						cur_mt_pos[snd_trk[id]][CY_YPOS], \
+						cur_mt_z[snd_trk[id]]);
+				} else if (ts->prv_mt_tch[id] < CY_NUM_TRK_ID) {
+					/* void out this touch */
+					input_report_abs(ts->input,
+						ABS_MT_TOUCH_MAJOR,
+						CY_NTCH);
+					input_report_abs(ts->input,
+						ABS_MT_WIDTH_MAJOR,
+						curr_tool_width);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_X,
+						ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS]);
+					input_report_abs(ts->input,
+						ABS_MT_POSITION_Y,
+						ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS]);
+					CY_MT_SYNC(ts->input);
+					cyttsp_debug("MT2->TID:%2d X:%3d Y:%3d Z:%3d lift off-sent\n", \
+						ts->prv_mt_tch[id], \
+						ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS], \
+						ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS], \
+						CY_NTCH);
+				} else {
+					/* do not stuff any signals for this
+					 * previously and currently
+					 * void touches */
+					cyttsp_xdebug("MT3->send[%d]=%d - No touch - NOT sent\n", \
+							id, snd_trk[id]);
+				}
+			}
+
+			/* save current posted tracks to
+			 * previous track memory */
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				ts->prv_mt_tch[id] = snd_trk[id];
+				if (snd_trk[id] < CY_NUM_TRK_ID) {
+					ts->prv_mt_pos[snd_trk[id]][CY_XPOS] =
+						cur_mt_pos[snd_trk[id]][CY_XPOS];
+					ts->prv_mt_pos[snd_trk[id]][CY_YPOS] =
+						cur_mt_pos[snd_trk[id]][CY_YPOS];
+					cyttsp_xdebug("MT4->TID:%2d X:%3d Y:%3d Z:%3d save for previous\n", \
+						snd_trk[id], \
+						ts->prv_mt_pos[snd_trk[id]][CY_XPOS], \
+						ts->prv_mt_pos[snd_trk[id]][CY_YPOS], \
+						CY_NTCH);
+				}
+			}
+			for (id = 0; id < CY_NUM_TRK_ID; id++)
+				ts->act_trk[id] = CY_NTCH;
+			for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+				if (snd_trk[id] < CY_NUM_TRK_ID)
+					ts->act_trk[snd_trk[id]] = CY_TCH;
+			}
+		}
+	}
+
+	/* handle gestures */
+	if (ts->platform_data->use_gestures) {
+		if (g_xy_data.gest_id) {
+			input_report_key(ts->input,
+				BTN_3, CY_TCH);
+			input_report_abs(ts->input,
+				ABS_HAT1X, g_xy_data.gest_id);
+			input_report_abs(ts->input,
+				ABS_HAT2Y, g_xy_data.gest_cnt);
+		}
+	}
+
+	/* signal the view motion event */
+	input_sync(ts->input);
+
+	for (id = 0; id < CY_NUM_TRK_ID; id++) {
+		/* update platform data for the current MT information */
+		ts->act_trk[id] = cur_trk[id];
+	}
+
+exit_xy_worker:
+	if (cyttsp_disable_touch) {
+		/* Turn off the touch interrupts */
+		cyttsp_debug("Not enabling touch\n");
+	} else {
+		if (ts->client->irq == 0) {
+			/* restart event timer */
+			mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
+		} else {
+			/* re-enable the interrupt after processing */
+			enable_irq(ts->client->irq);
+		}
+	}
+	return;
+}
+
+static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id,
+			u8 *prev_loc, u8 num_touches)
+{
+	u8 id = 0;
+
+	*prev_loc = CY_IGNR_TCH;
+
+		cyttsp_xdebug("IN p[%d]=%d c=%d n=%d loc=%d\n", \
+			id, prev_track[id], cur_trk_id, \
+			num_touches, *prev_loc);
+	for (id = 0, *prev_loc = CY_IGNR_TCH;
+		(id < num_touches); id++) {
+		cyttsp_xdebug("p[%d]=%d c=%d n=%d loc=%d\n", \
+			id, prev_track[id], cur_trk_id, \
+			num_touches, *prev_loc);
+		if (prev_track[id] == cur_trk_id) {
+			*prev_loc = id;
+			break;
+		}
+	}
+	cyttsp_xdebug("OUT p[%d]=%d c=%d n=%d loc=%d\n", \
+		id, prev_track[id], cur_trk_id, num_touches, *prev_loc);
+
+	return ((*prev_loc < CY_NUM_TRK_ID) ? true : false);
+}
+
+static int cyttsp_next_avail_inlist(u16 cur_trk[],
+			u8 *new_loc, u8 num_touches)
+{
+	u8 id;
+
+	for (id = 0, *new_loc = CY_IGNR_TCH;
+		(id < num_touches); id++) {
+		if (cur_trk[id] > CY_NUM_TRK_ID) {
+			*new_loc = id;
+			break;
+		}
+	}
+
+	return ((*new_loc < CY_NUM_TRK_ID) ? true : false);
+}
+
+/* Timer function used as dummy interrupt driver */
+static void cyttsp_timer(unsigned long handle)
+{
+	struct cyttsp *ts = (struct cyttsp *) handle;
+
+	cyttsp_xdebug("TTSP Device timer event\n");
+
+	/* schedule motion signal handling */
+	queue_work(cyttsp_ts_wq, &ts->work);
+
+	return;
+}
+
+
+
+/* ************************************************************************
+ * ISR function. This function is general, initialized in drivers init
+ * function
+ * ************************************************************************ */
+static irqreturn_t cyttsp_irq(int irq, void *handle)
+{
+	struct cyttsp *ts = (struct cyttsp *) handle;
+
+	cyttsp_xdebug("%s: Got IRQ\n", CY_I2C_NAME);
+
+	/* disable further interrupts until this interrupt is processed */
+	disable_irq_nosync(ts->client->irq);
+
+	/* schedule motion signal handling */
+	queue_work(cyttsp_ts_wq, &ts->work);
+	return IRQ_HANDLED;
+}
+
+/* ************************************************************************
+ * Probe initialization functions
+ * ************************************************************************ */
+static int cyttsp_putbl(struct cyttsp *ts, int show,
+			int show_status, int show_version, int show_cid)
+{
+	int retval = CY_OK;
+
+	int num_bytes = (show_status * 3) + (show_version * 6) + (show_cid * 3);
+
+	if (show_cid)
+		num_bytes = sizeof(struct cyttsp_bootloader_data_t);
+	else if (show_version)
+		num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 3;
+	else
+		num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 9;
+
+	if (show) {
+		retval = i2c_smbus_read_i2c_block_data(ts->client,
+			CY_REG_BASE, num_bytes, (u8 *)&g_bl_data);
+		if (show_status) {
+			cyttsp_debug("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \
+				show, \
+				g_bl_data.bl_file, \
+				g_bl_data.bl_status, \
+				g_bl_data.bl_error, \
+				g_bl_data.blver_hi, g_bl_data.blver_lo, \
+				g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo);
+		}
+		if (show_version) {
+			cyttsp_debug("BL%d: ttspver=0x%02X%02X appid=0x%02X%02X appver=0x%02X%02X\n", \
+				show, \
+				g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
+				g_bl_data.appid_hi, g_bl_data.appid_lo, \
+				g_bl_data.appver_hi, g_bl_data.appver_lo);
+		}
+		if (show_cid) {
+			cyttsp_debug("BL%d: cid=0x%02X%02X%02X\n", \
+				show, \
+				g_bl_data.cid_0, \
+				g_bl_data.cid_1, \
+				g_bl_data.cid_2);
+		}
+	}
+
+	return retval;
+}
+
+#ifdef CY_INCLUDE_LOAD_FILE
+#define CY_MAX_I2C_LEN	256
+#define CY_MAX_TRY		10
+#define CY_BL_PAGE_SIZE	16
+#define CY_BL_NUM_PAGES	5
+static int cyttsp_i2c_wr_blk_chunks(struct cyttsp *ts, u8 command,
+	u8 length, const u8 *values)
+{
+	int retval = CY_OK;
+	int block = 1;
+
+	u8 dataray[CY_MAX_I2C_LEN];
+
+	/* first page already includes the bl page offset */
+	retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+		CY_BL_PAGE_SIZE+1, values);
+	values += CY_BL_PAGE_SIZE+1;
+	length -= CY_BL_PAGE_SIZE+1;
+
+	/* rem blocks require bl page offset stuffing */
+	while (length &&
+		(block < CY_BL_NUM_PAGES) &&
+		!(retval < CY_OK)) {
+		udelay(43*2);	/* TRM * 2 */
+		dataray[0] = CY_BL_PAGE_SIZE*block;
+		memcpy(&dataray[1], values,
+			length >= CY_BL_PAGE_SIZE ?
+			CY_BL_PAGE_SIZE : length);
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE,
+			length >= CY_BL_PAGE_SIZE ?
+			CY_BL_PAGE_SIZE + 1 : length+1, dataray);
+		values += CY_BL_PAGE_SIZE;
+		length = length >= CY_BL_PAGE_SIZE ?
+			length - CY_BL_PAGE_SIZE : 0;
+		block++;
+	}
+
+	return retval;
+}
+
+static int cyttsp_bootload_app(struct cyttsp *ts)
+{
+	int retval = CY_OK;
+	int i, tries;
+	u8 host_reg;
+
+	cyttsp_debug("load new firmware \n");
+	/* reset TTSP Device back to bootloader mode */
+	host_reg = CY_SOFT_RESET_MODE;
+	retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+		sizeof(host_reg), &host_reg);
+	/* wait for TTSP Device to complete reset back to bootloader */
+	tries = 0;
+	do {
+		mdelay(1);
+		cyttsp_putbl(ts, 3, false, false, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+	cyttsp_debug("load file - tver=0x%02X%02X a_id=0x%02X%02X aver=0x%02X%02X\n", \
+		cyttsp_fw_tts_verh, cyttsp_fw_tts_verl, \
+		cyttsp_fw_app_idh, cyttsp_fw_app_idl, \
+		cyttsp_fw_app_verh, cyttsp_fw_app_verl);
+
+	/* download new TTSP Application to the Bootloader */
+	if (!(retval < CY_OK)) {
+		i = 0;
+		/* send bootload initiation command */
+		if (cyttsp_fw[i].Command == CY_BL_INIT_LOAD) {
+			g_bl_data.bl_file = 0;
+			g_bl_data.bl_status = 0;
+			g_bl_data.bl_error = 0;
+			retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				cyttsp_fw[i].Length, cyttsp_fw[i].Block);
+			/* delay to allow bl to get ready for block writes */
+			i++;
+			tries = 0;
+			do {
+				mdelay(100);
+				cyttsp_putbl(ts, 4, false, false, false);
+			} while (g_bl_data.bl_status != 0x10 &&
+				g_bl_data.bl_status != 0x11 &&
+				tries++ < 100);
+			cyttsp_debug("wait init f=%02X, s=%02X, e=%02X t=%d\n", \
+				g_bl_data.bl_file, g_bl_data.bl_status, \
+				g_bl_data.bl_error, tries);
+			/* send bootload firmware load blocks */
+			if (!(retval < CY_OK)) {
+				while (cyttsp_fw[i].Command == CY_BL_WRITE_BLK) {
+					retval = cyttsp_i2c_wr_blk_chunks(ts,
+						CY_REG_BASE,
+						cyttsp_fw[i].Length,
+						cyttsp_fw[i].Block);
+					cyttsp_xdebug("BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n", \
+						cyttsp_fw[i].Record, \
+						cyttsp_fw[i].Length, \
+						cyttsp_fw[i].Address);
+					i++;
+					if (retval < CY_OK) {
+						cyttsp_debug("BL fail Rec=%3d retval=%d\n", \
+							cyttsp_fw[i-1].Record, \
+							retval);
+						break;
+					} else {
+						tries = 0;
+						cyttsp_putbl(ts, 5, false, false, false);
+						while (!((g_bl_data.bl_status == 0x10) &&
+							(g_bl_data.bl_error == 0x20)) &&
+							!((g_bl_data.bl_status == 0x11) &&
+							(g_bl_data.bl_error == 0x20)) &&
+							(tries++ < 100)) {
+							mdelay(1);
+							cyttsp_putbl(ts, 5, false, false, false);
+						}
+					}
+				}
+
+				if (!(retval < CY_OK)) {
+					while (i < cyttsp_fw_records) {
+						retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+							cyttsp_fw[i].Length,
+							cyttsp_fw[i].Block);
+						i++;
+						tries = 0;
+						do {
+							mdelay(100);
+							cyttsp_putbl(ts, 6, true, false, false);
+						} while (g_bl_data.bl_status != 0x10 &&
+							g_bl_data.bl_status != 0x11 &&
+							tries++ < 100);
+						cyttsp_debug("wait term f=%02X, s=%02X, e=%02X t=%d\n", \
+							g_bl_data.bl_file, \
+							g_bl_data.bl_status, \
+							g_bl_data.bl_error, \
+							tries);
+						if (retval < CY_OK)
+							break;
+					}
+				}
+			}
+		}
+	}
+
+	/* reset TTSP Device back to bootloader mode */
+	host_reg = CY_SOFT_RESET_MODE;
+	retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+		sizeof(host_reg), &host_reg);
+	/* wait for TTSP Device to complete reset back to bootloader */
+	tries = 0;
+	do {
+		mdelay(1);
+		cyttsp_putbl(ts, 3, false, false, false);
+	} while (g_bl_data.bl_status != 0x10 &&
+		g_bl_data.bl_status != 0x11 &&
+		tries++ < 100);
+
+	/* set arg2 to non-0 to activate */
+	retval = cyttsp_putbl(ts, 8, true, true, true);
+
+	return retval;
+}
+#else
+static int cyttsp_bootload_app(struct cyttsp *ts)
+{
+	cyttsp_debug("no-load new firmware \n");
+	return CY_OK;
+}
+#endif /* CY_INCLUDE_LOAD_FILE */
+
+
+static int cyttsp_power_on(struct cyttsp *ts)
+{
+	int retval = CY_OK;
+	u8 host_reg;
+	int tries;
+
+	cyttsp_debug("Power up \n");
+
+	/* check if the TTSP device has a bootloader installed */
+	host_reg = CY_SOFT_RESET_MODE;
+	retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+		sizeof(host_reg), &host_reg);
+	tries = 0;
+	do {
+		mdelay(1);
+
+		/* set arg2 to non-0 to activate */
+		retval = cyttsp_putbl(ts, 1, true, true, true);
+		cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X R=%d\n", \
+			101, \
+			g_bl_data.bl_file, g_bl_data.bl_status, \
+			g_bl_data.bl_error, \
+			g_bl_data.blver_hi, g_bl_data.blver_lo, \
+			g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo,
+			retval);
+		cyttsp_info("BL%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \
+			102, \
+			g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
+			g_bl_data.appid_hi, g_bl_data.appid_lo, \
+			g_bl_data.appver_hi, g_bl_data.appver_lo);
+		cyttsp_info("BL%d: c_id=%02X%02X%02X\n", \
+			103, \
+			g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2);
+	} while (!(retval < CY_OK) &&
+		!GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
+		!(g_bl_data.bl_file == CY_OP_MODE + CY_LOW_PWR_MODE) &&
+		tries++ < 100);
+
+	/* is bootloader missing? */
+	if (!(retval < CY_OK)) {
+		cyttsp_xdebug("Ret=%d  Check if bootloader is missing...\n", \
+			retval);
+		if (!GET_BOOTLOADERMODE(g_bl_data.bl_status)) {
+			/* skip all bl and sys info and go to op mode */
+			if (!(retval < CY_OK)) {
+				cyttsp_xdebug("Bl is missing (ret=%d)\n", \
+					retval);
+				host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/;
+				retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE,
+					sizeof(host_reg), &host_reg);
+				/* wait for TTSP Device to complete switch to
+				 * Operational mode */
+				mdelay(1000);
+				goto bypass;
+			}
+		}
+	}
+
+
+	/* take TTSP out of bootloader mode; go to TrueTouch operational mode */
+	if (!(retval < CY_OK)) {
+		cyttsp_xdebug1("exit bootloader; go operational\n");
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE, sizeof(bl_cmd), bl_cmd);
+		tries = 0;
+		do {
+			mdelay(100);
+			cyttsp_putbl(ts, 4, true, false, false);
+			cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \
+				104, \
+				g_bl_data.bl_file, g_bl_data.bl_status, \
+				g_bl_data.bl_error, \
+				g_bl_data.blver_hi, g_bl_data.blver_lo, \
+				g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo);
+		} while (GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
+			tries++ < 100);
+	}
+
+
+
+	if (!(retval < CY_OK) &&
+		cyttsp_app_load()) {
+		if (CY_DIFF(g_bl_data.ttspver_hi, cyttsp_tts_verh())  ||
+			CY_DIFF(g_bl_data.ttspver_lo, cyttsp_tts_verl())  ||
+			CY_DIFF(g_bl_data.appid_hi, cyttsp_app_idh())  ||
+			CY_DIFF(g_bl_data.appid_lo, cyttsp_app_idl())  ||
+			CY_DIFF(g_bl_data.appver_hi, cyttsp_app_verh())  ||
+			CY_DIFF(g_bl_data.appver_lo, cyttsp_app_verl())  ||
+			CY_DIFF(g_bl_data.cid_0, cyttsp_cid_0())  ||
+			CY_DIFF(g_bl_data.cid_1, cyttsp_cid_1())  ||
+			CY_DIFF(g_bl_data.cid_2, cyttsp_cid_2())  ||
+			cyttsp_force_fw_load()) {
+			cyttsp_debug("blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n", \
+				g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \
+				cyttsp_tts_verh(), cyttsp_tts_verl(), \
+				cyttsp_force_fw_load());
+			cyttsp_debug("blappid=0x%02X%02X flappid=0x%02X%02X\n", \
+				g_bl_data.appid_hi, g_bl_data.appid_lo, \
+				cyttsp_app_idh(), cyttsp_app_idl());
+			cyttsp_debug("blappver=0x%02X%02X flappver=0x%02X%02X\n", \
+				g_bl_data.appver_hi, g_bl_data.appver_lo, \
+				cyttsp_app_verh(), cyttsp_app_verl());
+			cyttsp_debug("blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n", \
+				g_bl_data.cid_0, \
+				g_bl_data.cid_1, \
+				g_bl_data.cid_2, \
+				cyttsp_cid_0(), \
+				cyttsp_cid_1(), \
+				cyttsp_cid_2());
+			/* enter bootloader to load new app into TTSP Device */
+			retval = cyttsp_bootload_app(ts);
+			/* take TTSP device out of bootloader mode;
+			 * switch back to TrueTouch operational mode */
+			if (!(retval < CY_OK)) {
+				retval = i2c_smbus_write_i2c_block_data(ts->client,
+					CY_REG_BASE,
+					sizeof(bl_cmd), bl_cmd);
+				/* wait for TTSP Device to complete
+				 * switch to Operational mode */
+				tries = 0;
+				do {
+					mdelay(100);
+					cyttsp_putbl(ts, 9, false, false, false);
+				} while (GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
+					tries++ < 100);
+				cyttsp_putbl(ts, 9, true, false, false);
+			}
+		}
+	}
+
+bypass:
+	/* switch to System Information mode to read versions
+	 * and set interval registers */
+	if (!(retval < CY_OK)) {
+		cyttsp_debug("switch to sysinfo mode \n");
+		host_reg = CY_SYSINFO_MODE;
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_BASE, sizeof(host_reg), &host_reg);
+		/* wait for TTSP Device to complete switch to SysInfo mode */
+		mdelay(100);
+		if (!(retval < CY_OK)) {
+			retval = i2c_smbus_read_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				sizeof(struct cyttsp_sysinfo_data_t),
+				(u8 *)&g_sysinfo_data);
+			cyttsp_debug("SI2: hst_mode=0x%02X mfg_cmd=0x%02X mfg_stat=0x%02X\n", \
+				g_sysinfo_data.hst_mode, \
+				g_sysinfo_data.mfg_cmd, \
+				g_sysinfo_data.mfg_stat);
+			cyttsp_debug("SI2: bl_ver=0x%02X%02X\n", \
+				g_sysinfo_data.bl_verh, \
+				g_sysinfo_data.bl_verl);
+			cyttsp_debug("SI2: sysinfo act_int=0x%02X tch_tmout=0x%02X lp_int=0x%02X\n", \
+				g_sysinfo_data.act_intrvl, \
+				g_sysinfo_data.tch_tmout, \
+				g_sysinfo_data.lp_intrvl);
+			cyttsp_info("SI%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \
+				102, \
+				g_sysinfo_data.tts_verh, \
+				g_sysinfo_data.tts_verl, \
+				g_sysinfo_data.app_idh, \
+				g_sysinfo_data.app_idl, \
+				g_sysinfo_data.app_verh, \
+				g_sysinfo_data.app_verl);
+			cyttsp_info("SI%d: c_id=%02X%02X%02X\n", \
+				103, \
+				g_sysinfo_data.cid[0], \
+				g_sysinfo_data.cid[1], \
+				g_sysinfo_data.cid[2]);
+			if (!(retval < CY_OK) &&
+				(CY_DIFF(ts->platform_data->act_intrvl,
+					CY_ACT_INTRVL_DFLT)  ||
+				CY_DIFF(ts->platform_data->tch_tmout,
+					CY_TCH_TMOUT_DFLT) ||
+				CY_DIFF(ts->platform_data->lp_intrvl,
+					CY_LP_INTRVL_DFLT))) {
+				if (!(retval < CY_OK)) {
+					u8 intrvl_ray[sizeof(ts->platform_data->act_intrvl) +
+						sizeof(ts->platform_data->tch_tmout) +
+						sizeof(ts->platform_data->lp_intrvl)];
+					u8 i = 0;
+
+					intrvl_ray[i++] =
+						ts->platform_data->act_intrvl;
+					intrvl_ray[i++] =
+						ts->platform_data->tch_tmout;
+					intrvl_ray[i++] =
+						ts->platform_data->lp_intrvl;
+
+					cyttsp_debug("SI2: platinfo act_intrvl=0x%02X tch_tmout=0x%02X lp_intrvl=0x%02X\n", \
+						ts->platform_data->act_intrvl, \
+						ts->platform_data->tch_tmout, \
+						ts->platform_data->lp_intrvl);
+					/* set intrvl registers */
+					retval = i2c_smbus_write_i2c_block_data(
+						ts->client,
+						CY_REG_ACT_INTRVL,
+						sizeof(intrvl_ray), intrvl_ray);
+					mdelay(CY_DLY_SYSINFO);
+				}
+			}
+		}
+		/* switch back to Operational mode */
+		cyttsp_debug("switch back to operational mode \n");
+		if (!(retval < CY_OK)) {
+			host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/;
+			retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				sizeof(host_reg), &host_reg);
+			/* wait for TTSP Device to complete
+			 * switch to Operational mode */
+			mdelay(100);
+		}
+	}
+	/* init gesture setup;
+	 * this is required even if not using gestures
+	 * in order to set the active distance */
+	if (!(retval < CY_OK)) {
+		u8 gesture_setup;
+		cyttsp_debug("init gesture setup \n");
+		gesture_setup = ts->platform_data->gest_set;
+		retval = i2c_smbus_write_i2c_block_data(ts->client,
+			CY_REG_GEST_SET,
+			sizeof(gesture_setup), &gesture_setup);
+		mdelay(CY_DLY_DFLT);
+	}
+
+	if (!(retval < CY_OK))
+		ts->platform_data->power_state = CY_ACTIVE_STATE;
+	else
+		ts->platform_data->power_state = CY_IDLE_STATE;
+
+	cyttsp_debug("Retval=%d Power state is %s\n", \
+		retval, \
+		ts->platform_data->power_state == CY_ACTIVE_STATE ? \
+		 "ACTIVE" : "IDLE");
+
+	return retval;
+}
+
+static int cyttsp_power_device(struct cyttsp *ts, bool on)
+{
+	int rc = 0, i;
+	const struct cyttsp_regulator *reg_info =
+				ts->platform_data->regulator_info;
+	u8 num_reg = ts->platform_data->num_regulators;
+
+	if (!reg_info) {
+		pr_err("regulator pdata not specified\n");
+		return -EINVAL;
+	}
+
+	if (on == false) /* Turn off the regulators */
+		goto ts_reg_disable;
+
+	ts->vdd = kzalloc(num_reg * sizeof(struct regulator *), GFP_KERNEL);
+	if (!ts->vdd) {
+		pr_err("unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_reg; i++) {
+		ts->vdd[i] = regulator_get(&ts->client->dev, reg_info[i].name);
+		if (IS_ERR(ts->vdd[i])) {
+			rc = PTR_ERR(ts->vdd[i]);
+			pr_err("%s:regulator get failed rc=%d\n",
+							__func__, rc);
+			goto error_vdd;
+		}
+
+		if (regulator_count_voltages(ts->vdd[i]) > 0) {
+			rc = regulator_set_voltage(ts->vdd[i],
+				reg_info[i].min_uV, reg_info[i].max_uV);
+			if (rc) {
+				pr_err("%s: regulator_set_voltage"
+					"failed rc =%d\n", __func__, rc);
+				regulator_put(ts->vdd[i]);
+				goto error_vdd;
+			}
+		}
+
+		rc = regulator_set_optimum_mode(ts->vdd[i],
+						reg_info[i].load_uA);
+		if (rc < 0) {
+			pr_err("%s: regulator_set_optimum_mode failed rc=%d\n",
+								__func__, rc);
+
+			regulator_set_voltage(ts->vdd[i], 0,
+						reg_info[i].max_uV);
+			regulator_put(ts->vdd[i]);
+			goto error_vdd;
+		}
+
+		rc = regulator_enable(ts->vdd[i]);
+		if (rc) {
+			pr_err("%s: regulator_enable failed rc =%d\n",
+								__func__, rc);
+			regulator_set_optimum_mode(ts->vdd[i], 0);
+			regulator_set_voltage(ts->vdd[i], 0,
+						reg_info[i].max_uV);
+			regulator_put(ts->vdd[i]);
+			goto error_vdd;
+		}
+	}
+
+	return rc;
+
+ts_reg_disable:
+	i = ts->platform_data->num_regulators;
+error_vdd:
+	while (--i >= 0) {
+		if (regulator_count_voltages(ts->vdd[i]) > 0)
+			regulator_set_voltage(ts->vdd[i], 0,
+						reg_info[i].max_uV);
+		regulator_set_optimum_mode(ts->vdd[i], 0);
+		regulator_disable(ts->vdd[i]);
+		regulator_put(ts->vdd[i]);
+	}
+	kfree(ts->vdd);
+	return rc;
+}
+
+/* cyttsp_initialize: Driver Initialization. This function takes
+ * care of the following tasks:
+ * 1. Create and register an input device with input layer
+ * 2. Take CYTTSP device out of bootloader mode; go operational
+ * 3. Start any timers/Work queues.  */
+static int cyttsp_initialize(struct i2c_client *client, struct cyttsp *ts)
+{
+	struct input_dev *input_device;
+	int error = 0;
+	int retval = CY_OK;
+	u8 id;
+
+	/* Create the input device and register it. */
+	input_device = input_allocate_device();
+	if (!input_device) {
+		error = -ENOMEM;
+		cyttsp_xdebug1("err input allocate device\n");
+		goto error_free_device;
+	}
+
+	if (!client) {
+		error = ~ENODEV;
+		cyttsp_xdebug1("err client is Null\n");
+		goto error_free_device;
+	}
+
+	if (!ts) {
+		error = ~ENODEV;
+		cyttsp_xdebug1("err context is Null\n");
+		goto error_free_device;
+	}
+
+	ts->input = input_device;
+	input_device->name = CY_I2C_NAME;
+	input_device->phys = ts->phys;
+	input_device->dev.parent = &client->dev;
+
+	/* init the touch structures */
+	ts->num_prv_st_tch = CY_NTCH;
+	for (id = 0; id < CY_NUM_TRK_ID; id++) {
+		ts->act_trk[id] = CY_NTCH;
+		ts->prv_mt_pos[id][CY_XPOS] = 0;
+		ts->prv_mt_pos[id][CY_YPOS] = 0;
+	}
+
+	for (id = 0; id < CY_NUM_MT_TCH_ID; id++)
+		ts->prv_mt_tch[id] = CY_IGNR_TCH;
+
+	for (id = 0; id < CY_NUM_ST_TCH_ID; id++)
+		ts->prv_st_tch[id] = CY_IGNR_TCH;
+
+	set_bit(EV_SYN, input_device->evbit);
+	set_bit(EV_KEY, input_device->evbit);
+	set_bit(EV_ABS, input_device->evbit);
+	set_bit(BTN_TOUCH, input_device->keybit);
+	set_bit(BTN_2, input_device->keybit);
+	if (ts->platform_data->use_gestures)
+		set_bit(BTN_3, input_device->keybit);
+
+	input_set_abs_params(input_device, ABS_X, ts->platform_data->disp_minx,
+		ts->platform_data->disp_maxx, 0, 0);
+	input_set_abs_params(input_device, ABS_Y, ts->platform_data->disp_miny,
+		ts->platform_data->disp_maxy, 0, 0);
+	input_set_abs_params(input_device,
+		ABS_TOOL_WIDTH, 0, CY_LARGE_TOOL_WIDTH, 0 , 0);
+	input_set_abs_params(input_device,
+		ABS_PRESSURE, 0, CY_MAXZ, 0, 0);
+	input_set_abs_params(input_device,
+		ABS_HAT0X, 0, ts->platform_data->panel_maxx, 0, 0);
+	input_set_abs_params(input_device,
+		ABS_HAT0Y, 0, ts->platform_data->panel_maxy, 0, 0);
+	if (ts->platform_data->use_gestures) {
+		input_set_abs_params(input_device,
+			ABS_HAT1X, 0, CY_MAXZ, 0, 0);
+		input_set_abs_params(input_device,
+			ABS_HAT1Y, 0, CY_MAXZ, 0, 0);
+	}
+	if (ts->platform_data->use_mt) {
+		input_set_abs_params(input_device, ABS_MT_POSITION_X,
+			ts->platform_data->disp_minx,
+			ts->platform_data->disp_maxx, 0, 0);
+		input_set_abs_params(input_device, ABS_MT_POSITION_Y,
+			ts->platform_data->disp_miny,
+			ts->platform_data->disp_maxy, 0, 0);
+		input_set_abs_params(input_device,
+			ABS_MT_TOUCH_MAJOR, 0, CY_MAXZ, 0, 0);
+		input_set_abs_params(input_device,
+			ABS_MT_WIDTH_MAJOR, 0, CY_LARGE_TOOL_WIDTH, 0, 0);
+		if (ts->platform_data->use_trk_id) {
+			input_set_abs_params(input_device,
+				ABS_MT_TRACKING_ID, 0, CY_NUM_TRK_ID, 0, 0);
+		}
+	}
+
+	/* set dummy key to make driver work with virtual keys */
+	input_set_capability(input_device, EV_KEY, KEY_PROG1);
+
+	cyttsp_info("%s: Register input device\n", CY_I2C_NAME);
+	error = input_register_device(input_device);
+	if (error) {
+		cyttsp_alert("%s: Failed to register input device\n", \
+			CY_I2C_NAME);
+		retval = error;
+		goto error_free_device;
+	}
+
+	/* Prepare our worker structure prior to setting up the timer/ISR */
+	INIT_WORK(&ts->work, cyttsp_xy_worker);
+
+	if (gpio_is_valid(ts->platform_data->resout_gpio)) {
+		/* configure touchscreen reset out gpio */
+		retval = gpio_request(ts->platform_data->resout_gpio,
+						"cyttsp_resout_gpio");
+		if (retval) {
+			pr_err("%s: unable to request reset gpio %d\n",
+				__func__, ts->platform_data->resout_gpio);
+			goto error_free_device;
+		}
+
+		retval = gpio_direction_output(
+					ts->platform_data->resout_gpio, 1);
+		if (retval) {
+			pr_err("%s: unable to set direction for gpio %d\n",
+				__func__, ts->platform_data->resout_gpio);
+			goto error_resout_gpio_dir;
+		}
+	}
+
+	if (gpio_is_valid(ts->platform_data->sleep_gpio)) {
+		/* configure touchscreen reset out gpio */
+		retval = gpio_request(ts->platform_data->sleep_gpio,
+						"cy8c_sleep_gpio");
+		if (retval) {
+			pr_err("%s: unable to request sleep gpio %d\n",
+				__func__, ts->platform_data->sleep_gpio);
+			goto error_sleep_gpio_req;
+		}
+
+		retval = gpio_direction_output(
+					ts->platform_data->sleep_gpio, 0);
+		if (retval) {
+			pr_err("%s: unable to set direction for gpio %d\n",
+			__func__, ts->platform_data->resout_gpio);
+			goto error_sleep_gpio_dir;
+		}
+	}
+
+	if (gpio_is_valid(ts->platform_data->irq_gpio)) {
+		/* configure touchscreen irq gpio */
+		retval = gpio_request(ts->platform_data->irq_gpio,
+							"ts_irq_gpio");
+		if (retval) {
+			pr_err("%s: unable to request gpio [%d]\n", __func__,
+						ts->platform_data->irq_gpio);
+			goto error_irq_gpio_req;
+		}
+		retval = gpio_direction_input(ts->platform_data->irq_gpio);
+		if (retval) {
+			pr_err("%s: unable to set_direction for gpio [%d]\n",
+					__func__, ts->platform_data->irq_gpio);
+			goto error_irq_gpio_dir;
+		}
+	}
+
+	if (ts->platform_data->regulator_info) {
+		retval = cyttsp_power_device(ts, true);
+		if (retval) {
+			pr_err("%s: Unable to power device %d\n",
+						 __func__, retval);
+			goto error_irq_gpio_dir;
+		}
+	}
+
+	/* Power on the chip and make sure that I/Os are set as specified
+	 * in the platform */
+	if (ts->platform_data->init) {
+		retval = ts->platform_data->init(client);
+		if (retval) {
+			pr_err("%s: ts init failed\n", __func__);
+			goto error_power_device;
+		}
+	}
+
+	msleep(100);
+
+	/* check this device active by reading first byte/register */
+	retval = i2c_smbus_read_byte_data(ts->client, 0x01);
+	if (retval < 0) {
+		pr_err("%s: i2c sanity check failed\n", __func__);
+		goto error_power_device;
+	}
+
+	retval = cyttsp_power_on(ts);
+	if (retval < 0) {
+		pr_err("%s: cyttsp_power_on failed\n", __func__);
+		goto error_power_device;
+	}
+
+	/* Timer or Interrupt setup */
+	if (ts->client->irq == 0) {
+		cyttsp_info("Setting up timer\n");
+		setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts);
+		mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
+	} else {
+		cyttsp_info("Setting up interrupt\n");
+		/* request_irq() will also call enable_irq() */
+		error = request_irq(client->irq, cyttsp_irq,
+			IRQF_TRIGGER_FALLING,
+			client->dev.driver->name, ts);
+		if (error) {
+			cyttsp_alert("error: could not request irq\n");
+			retval = error;
+			goto error_power_device;
+		}
+	}
+
+	irq_cnt = 0;
+	irq_cnt_total = 0;
+	irq_err_cnt = 0;
+
+	atomic_set(&ts->irq_enabled, 1);
+	retval = device_create_file(&ts->client->dev, &dev_attr_irq_enable);
+	if (retval < CY_OK) {
+		cyttsp_alert("File device creation failed: %d\n", retval);
+		retval = -ENODEV;
+		goto error_free_irq;
+	}
+
+	retval = device_create_file(&client->dev, &dev_attr_cyttsp_fw_ver);
+	if (retval) {
+		cyttsp_alert("sysfs entry for firmware version failed\n");
+		goto error_rm_dev_file_irq_en;
+	}
+
+	ts->cyttsp_fwloader_mode = 0;
+	retval = device_create_file(&client->dev, &dev_attr_cyttsp_update_fw);
+	if (retval) {
+		cyttsp_alert("sysfs entry for firmware update failed\n");
+		goto error_rm_dev_file_fw_ver;
+	}
+
+	retval = device_create_file(&client->dev,
+				&dev_attr_cyttsp_force_update_fw);
+	if (retval) {
+		cyttsp_alert("sysfs entry for force firmware update failed\n");
+		goto error_rm_dev_file_update_fw;
+	}
+	if (ts->platform_data->correct_fw_ver) {
+		if (g_bl_data.appid_lo != ts->platform_data->correct_fw_ver)
+			printk(KERN_INFO "Please update touchscreen firmware\n");
+	}
+
+	retval = device_create_file(&client->dev,
+				&dev_attr_cyttsp_fw_name);
+	if (retval) {
+		cyttsp_alert("sysfs entry for file name selection failed\n");
+		goto error_rm_dev_file_fupdate_fw;
+	}
+
+	cyttsp_info("%s: Successful registration\n", CY_I2C_NAME);
+
+	goto success;
+
+error_rm_dev_file_fupdate_fw:
+	device_remove_file(&client->dev, &dev_attr_cyttsp_force_update_fw);
+error_rm_dev_file_update_fw:
+	device_remove_file(&client->dev, &dev_attr_cyttsp_update_fw);
+error_rm_dev_file_fw_ver:
+	device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver);
+error_rm_dev_file_irq_en:
+	device_remove_file(&client->dev, &dev_attr_irq_enable);
+error_free_irq:
+	if (ts->client->irq)
+		free_irq(client->irq, ts);
+error_power_device:
+	if (ts->platform_data->regulator_info)
+		cyttsp_power_device(ts, false);
+error_irq_gpio_dir:
+	if (gpio_is_valid(ts->platform_data->irq_gpio))
+		gpio_free(ts->platform_data->irq_gpio);
+error_irq_gpio_req:
+	if (gpio_is_valid(ts->platform_data->sleep_gpio))
+		gpio_direction_output(ts->platform_data->sleep_gpio, 1);
+error_sleep_gpio_dir:
+	if (gpio_is_valid(ts->platform_data->sleep_gpio))
+		gpio_free(ts->platform_data->sleep_gpio);
+error_sleep_gpio_req:
+	if (gpio_is_valid(ts->platform_data->resout_gpio))
+		gpio_direction_output(ts->platform_data->resout_gpio, 0);
+error_resout_gpio_dir:
+	if (gpio_is_valid(ts->platform_data->resout_gpio))
+		gpio_free(ts->platform_data->resout_gpio);
+error_free_device:
+	if (input_device)
+		input_free_device(input_device);
+
+success:
+	return retval;
+}
+
+/* I2C driver probe function */
+static int __devinit cyttsp_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct cyttsp *ts;
+	int error;
+	int retval = CY_OK;
+
+	cyttsp_info("Start Probe 1.2\n");
+
+	/* allocate and clear memory */
+	ts = kzalloc(sizeof(struct cyttsp), GFP_KERNEL);
+	if (ts == NULL) {
+		cyttsp_xdebug1("err kzalloc for cyttsp\n");
+		retval = -ENOMEM;
+	}
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	error = pm_runtime_set_active(&client->dev);
+	if (error < 0)
+		dev_dbg(&client->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&client->dev);
+
+	if (!(retval < CY_OK)) {
+		/* register driver_data */
+		ts->client = client;
+		ts->platform_data = client->dev.platform_data;
+
+		if (ts->platform_data->fw_fname)
+			strncpy(ts->fw_fname, ts->platform_data->fw_fname,
+							FW_FNAME_LEN - 1);
+		else
+			strncpy(ts->fw_fname, "cyttsp.hex", FW_FNAME_LEN - 1);
+
+		i2c_set_clientdata(client, ts);
+
+		error = cyttsp_initialize(client, ts);
+		if (error) {
+			cyttsp_xdebug1("err cyttsp_initialize\n");
+			if (ts != NULL) {
+				/* deallocate memory */
+				kfree(ts);
+			}
+/*
+			i2c_del_driver(&cyttsp_driver);
+*/
+			retval = -ENODEV;
+		} else
+			cyttsp_openlog();
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	if (!(retval < CY_OK)) {
+		ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+		ts->early_suspend.suspend = cyttsp_early_suspend;
+		ts->early_suspend.resume = cyttsp_late_resume;
+		register_early_suspend(&ts->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+	device_init_wakeup(&client->dev, ts->platform_data->wakeup);
+	mutex_init(&ts->mutex);
+
+	cyttsp_info("Start Probe %s\n", \
+		(retval < CY_OK) ? "FAIL" : "PASS");
+
+	return retval;
+}
+
+/* Function to manage power-on resume */
+static int cyttsp_resume(struct device *dev)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	int retval = CY_OK;
+
+	cyttsp_debug("Wake Up\n");
+
+	if (ts->is_suspended == false) {
+		pr_err("%s: in wakeup state\n", __func__);
+		return 0;
+	}
+
+	if (device_may_wakeup(dev)) {
+		if (ts->client->irq)
+			disable_irq_wake(ts->client->irq);
+		return 0;
+	}
+
+	/* re-enable the interrupt prior to wake device */
+	if (ts->client->irq)
+		enable_irq(ts->client->irq);
+
+	if (ts->platform_data->use_sleep &&
+		(ts->platform_data->power_state != CY_ACTIVE_STATE)) {
+		if (ts->platform_data->resume)
+			retval = ts->platform_data->resume(ts->client);
+		if (!(retval < CY_OK)) {
+			/* take TTSP device out of bootloader mode;
+			 * switch back to TrueTouch operational mode */
+			if (!(retval < CY_OK)) {
+				int tries;
+				retval = i2c_smbus_write_i2c_block_data(ts->client,
+					CY_REG_BASE,
+					sizeof(bl_cmd), bl_cmd);
+				/* wait for TTSP Device to complete
+				 * switch to Operational mode */
+				tries = 0;
+				do {
+					mdelay(100);
+					cyttsp_putbl(ts, 16, false, false, false);
+				} while (GET_BOOTLOADERMODE(g_bl_data.bl_status) &&
+					tries++ < 100);
+				cyttsp_putbl(ts, 16, true, false, false);
+			}
+		}
+	}
+
+	if (!(retval < CY_OK) &&
+		(GET_HSTMODE(g_bl_data.bl_file) == CY_OK)) {
+		ts->platform_data->power_state = CY_ACTIVE_STATE;
+
+		/* re-enable the timer after resuming */
+		if (ts->client->irq == 0)
+			mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT);
+	} else
+		retval = -ENODEV;
+
+	ts->is_suspended = false;
+	cyttsp_debug("Wake Up %s\n", \
+		(retval < CY_OK) ? "FAIL" : "PASS");
+
+	return retval;
+}
+
+
+/* Function to manage low power suspend */
+static int cyttsp_suspend(struct device *dev)
+{
+	struct cyttsp *ts = dev_get_drvdata(dev);
+	u8 sleep_mode = CY_OK;
+	int retval = CY_OK;
+
+	cyttsp_debug("Enter Sleep\n");
+
+	if (ts->is_suspended == true) {
+		pr_err("%s: in sleep state\n", __func__);
+		return 0;
+	}
+
+	mutex_lock(&ts->mutex);
+	if (ts->cyttsp_fwloader_mode) {
+		pr_err("%s:firmware upgrade mode:"
+			"suspend not allowed\n", __func__);
+		mutex_unlock(&ts->mutex);
+		return -EBUSY;
+	}
+	mutex_unlock(&ts->mutex);
+
+	if (device_may_wakeup(dev)) {
+		if (ts->client->irq)
+			enable_irq_wake(ts->client->irq);
+		return 0;
+	}
+
+	/* disable worker */
+	if (ts->client->irq == 0)
+		del_timer(&ts->timer);
+	else
+		disable_irq_nosync(ts->client->irq);
+	retval = cancel_work_sync(&ts->work);
+
+	if (retval)
+		enable_irq(ts->client->irq);
+
+	if (!(retval < CY_OK)) {
+		if (ts->platform_data->use_sleep &&
+			(ts->platform_data->power_state == CY_ACTIVE_STATE)) {
+			if (ts->platform_data->use_sleep & CY_USE_DEEP_SLEEP_SEL)
+				sleep_mode = CY_DEEP_SLEEP_MODE;
+			else
+				sleep_mode = CY_LOW_PWR_MODE;
+
+			retval = i2c_smbus_write_i2c_block_data(ts->client,
+				CY_REG_BASE,
+				sizeof(sleep_mode), &sleep_mode);
+		}
+	}
+
+	if (!(retval < CY_OK)) {
+		if (sleep_mode == CY_DEEP_SLEEP_MODE)
+			ts->platform_data->power_state = CY_SLEEP_STATE;
+		else if (sleep_mode == CY_LOW_PWR_MODE)
+			ts->platform_data->power_state = CY_LOW_PWR_STATE;
+	}
+
+	ts->is_suspended = true;
+	cyttsp_debug("Sleep Power state is %s\n", \
+		(ts->platform_data->power_state == CY_ACTIVE_STATE) ? \
+		"ACTIVE" : \
+		((ts->platform_data->power_state == CY_SLEEP_STATE) ? \
+		"SLEEP" : "LOW POWER"));
+
+	return retval;
+}
+
+/* registered in driver struct */
+static int __devexit cyttsp_remove(struct i2c_client *client)
+{
+	/* clientdata registered on probe */
+	struct cyttsp *ts = i2c_get_clientdata(client);
+	int err;
+
+	cyttsp_alert("Unregister\n");
+
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_disable(&client->dev);
+
+	device_init_wakeup(&client->dev, 0);
+	device_remove_file(&ts->client->dev, &dev_attr_irq_enable);
+	device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver);
+	device_remove_file(&client->dev, &dev_attr_cyttsp_update_fw);
+	device_remove_file(&client->dev, &dev_attr_cyttsp_force_update_fw);
+	device_remove_file(&client->dev, &dev_attr_cyttsp_fw_name);
+
+	/* Start cleaning up by removing any delayed work and the timer */
+	if (cancel_delayed_work((struct delayed_work *)&ts->work) < CY_OK)
+		cyttsp_alert("error: could not remove work from workqueue\n");
+
+	/* free up timer or irq */
+	if (ts->client->irq == 0) {
+		err = del_timer(&ts->timer);
+		if (err < CY_OK)
+			cyttsp_alert("error: failed to delete timer\n");
+	} else
+		free_irq(client->irq, ts);
+
+	if (ts->platform_data->regulator_info)
+		cyttsp_power_device(ts, false);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&ts->early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+	mutex_destroy(&ts->mutex);
+
+	if (gpio_is_valid(ts->platform_data->sleep_gpio)) {
+		gpio_direction_output(ts->platform_data->sleep_gpio, 1);
+		gpio_free(ts->platform_data->sleep_gpio);
+	}
+
+	if (gpio_is_valid(ts->platform_data->resout_gpio)) {
+		gpio_direction_output(ts->platform_data->resout_gpio, 0);
+		gpio_free(ts->platform_data->resout_gpio);
+	}
+
+	if (gpio_is_valid(ts->platform_data->irq_gpio))
+		gpio_free(ts->platform_data->irq_gpio);
+
+	/* housekeeping */
+	if (ts != NULL)
+		kfree(ts);
+
+	cyttsp_alert("Leaving\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cyttsp_early_suspend(struct early_suspend *handler)
+{
+	struct cyttsp *ts;
+
+	ts = container_of(handler, struct cyttsp, early_suspend);
+	cyttsp_suspend(&ts->client->dev);
+}
+
+static void cyttsp_late_resume(struct early_suspend *handler)
+{
+	struct cyttsp *ts;
+
+	ts = container_of(handler, struct cyttsp, early_suspend);
+	cyttsp_resume(&ts->client->dev);
+}
+#endif  /* CONFIG_HAS_EARLYSUSPEND */
+
+static int cyttsp_init(void)
+{
+	int ret;
+
+	cyttsp_info("Cypress TrueTouch(R) Standard Product\n");
+	cyttsp_info("I2C Touchscreen Driver (Built %s @ %s)\n", \
+		__DATE__, __TIME__);
+
+	cyttsp_ts_wq = create_singlethread_workqueue("cyttsp_ts_wq");
+	if (cyttsp_ts_wq == NULL) {
+		cyttsp_debug("No memory for cyttsp_ts_wq\n");
+		return -ENOMEM;
+	}
+
+	ret = i2c_add_driver(&cyttsp_driver);
+
+	return ret;
+}
+
+static void cyttsp_exit(void)
+{
+	if (cyttsp_ts_wq)
+		destroy_workqueue(cyttsp_ts_wq);
+	return i2c_del_driver(&cyttsp_driver);
+}
+
+module_init(cyttsp_init);
+module_exit(cyttsp_exit);
+MODULE_FIRMWARE("cyttsp.fw");
+
diff --git a/drivers/input/touchscreen/cyttsp_fw.h b/drivers/input/touchscreen/cyttsp_fw.h
new file mode 100755
index 0000000..f14153e
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_fw.h
@@ -0,0 +1,4307 @@
+/* Header file for: 
+ * Cypress TrueTouch(TM) Standard Product touchscreen drivers.
+ * drivers/input/touchscreen/cyttsp_fw.h
+ *
+ * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#define CYTTSP_BL_OST_LEN 1
+#define CYTTSP_BL_CMD_LEN 2
+#define CYTTSP_BL_KEY_LEN 8
+#define CYTTSP_LD_ADR_LEN 2
+#define CYTTSP_LD_DAT_LEN 64
+#define CYTTSP_LD_CHK_LEN 2
+#define CYTTSP_LD_BLK_LEN (CYTTSP_BL_OST_LEN + CYTTSP_BL_CMD_LEN + CYTTSP_BL_KEY_LEN + \
+    CYTTSP_LD_ADR_LEN + CYTTSP_LD_DAT_LEN + CYTTSP_LD_CHK_LEN)
+
+typedef struct cyttsp_ld_blk_ray_t {
+    unsigned short Record;
+    unsigned short Length;
+    unsigned char  Command;
+    unsigned short Address;
+    unsigned char  Block[CYTTSP_LD_BLK_LEN];
+} cyttsp_ld_blk_ray, *pcyttsp_ld_blk_ray;
+
+cyttsp_ld_blk_ray cyttsp_fw[] = {
+	{
+		0,
+		11,
+		0x38,
+		-1,
+		{
+			0x00, 0xFF, 0x38, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+		}
+	},
+	{
+		1,
+		79,
+		0x39,
+		0x002C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2C, 0x40, 0x7D, 0x0B, 0x68, 0x30, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x10, 0x12, 0x7E, 0x7D, 0x10, 0x36, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x1F, 0x2A, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x20, 0x70, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x5B, 0x36
+		}
+	},
+	{
+		2,
+		79,
+		0x39,
+		0x002D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2D, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x40, 0x43, 0xE6, 0x02, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xE3, 0x02, 0x70, 0xCF, 0x41, 0xFF, 0xEF, 0x50, 0x80, 0x4E, 0x5D, 0xD5, 0x08, 0x62, 0x44, 0x09
+		}
+	},
+	{
+		3,
+		79,
+		0x39,
+		0x002E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2E, 0xD5, 0x00, 0x55, 0xFA, 0x01, 0x40, 0x50, 0x06, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x40, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x51, 0xFA, 0x60, 0xE8, 0x70, 0xCF, 0x18, 0x60, 0xD5, 0x55, 0xF8, 0x00, 0x55, 0xF9, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x9F, 0xFE, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x62, 0xD1, 0x0F, 0x50, 0x00, 0x4E, 0x62, 0xD3, 0x0F, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x35, 0xEC
+		}
+	},
+	{
+		4,
+		79,
+		0x39,
+		0x002F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2F, 0x00, 0x71, 0xC0, 0x7C, 0x0F, 0x76, 0x62, 0xD0, 0x00, 0x50, 0x0F, 0x57, 0x74, 0x08, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x4B, 0x51, 0xE9, 0x80, 0x04, 0x75, 0x09, 0x00, 0x62, 0xE3, 0x00, 0x08, 0x28, 0x60, 0xD5, 0x74, 0xA0, 0x4B, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0xA0, 0x1C, 0x53, 0xE8, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0xCD, 0x1D
+		}
+	},
+	{
+		5,
+		79,
+		0x39,
+		0x0030,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x30, 0x3F, 0xE9, 0x47, 0xE9, 0xFF, 0xB0, 0x06, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x18, 0x7A, 0xE8, 0xBF, 0xEB, 0x8F, 0xC9, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0x53, 0xE8, 0x50, 0x00, 0x3F, 0xE9, 0x47, 0xE9, 0xFF, 0xB0, 0x08, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x50, 0x00, 0x7A, 0xE8, 0xBF, 0xEF, 0x18, 0x8F, 0xAA, 0x18, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xEC, 0x10, 0x43, 0xE3, 0x00, 0x70, 0xCF, 0x62, 0x4D, 0x1E
+		}
+	},
+	{
+		6,
+		79,
+		0x39,
+		0x0031,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x31, 0xE0, 0x00, 0x41, 0xFE, 0xE7, 0x43, 0xFE, 0x10, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xE0, 0x53, 0x70, 0xCF, 0x62, 0xE2, 0x00, 0x7C, 0x3E, 0xD3, 0x8F, 0xFF, 0x7F, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xE9, 0x57
+		}
+	},
+	{
+		7,
+		79,
+		0x39,
+		0x0032,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x32, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xFA, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xF3, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xEC, 0x50, 0x18, 0x49, 0x04, 0x20, 0xAF, 0xE5, 0x60, 0xFF, 0x49, 0xC9, 0x01, 0xB0, 0x1A, 0x41, 0xD6, 0xFE, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x43, 0xD6, 0x01, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x70, 0xCF, 0x7F, 0x30, 0x30, 0x30, 0x81, 0x88
+		}
+	},
+	{
+		8,
+		79,
+		0x39,
+		0x0033,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x33, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x87
+		}
+	},
+	{
+		9,
+		79,
+		0x39,
+		0x0034,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x34, 0x0A, 0x20, 0x20, 0x51, 0x55, 0x41, 0x4C, 0x43, 0x4F, 0x4D, 0x4D, 0x20, 0x56, 0x50, 0x30, 0x34, 0x33, 0x2D, 0x48, 0x32, 0x20, 0x54, 0x4D, 0x41, 0x33, 0x30, 0x30, 0x45, 0x20, 0x46, 0x69, 0x72, 0x6D, 0x77, 0x61, 0x72, 0x65, 0x20, 0x49, 0x64, 0x65, 0x6E, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x56, 0x99, 0xBA
+		}
+	},
+	{
+		10,
+		79,
+		0x39,
+		0x0035,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x35, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x30, 0x32, 0x2E, 0x30, 0x34, 0x2E, 0x30, 0x30, 0x20, 0x43, 0x6F, 0x6D, 0x70, 0x69, 0x6C, 0x65, 0x64, 0x20, 0x4A, 0x75, 0x6C, 0x20, 0x31, 0x34, 0x20, 0x32, 0x30, 0x31, 0x30, 0x20, 0x31, 0x32, 0x3A, 0x35, 0x33, 0x3A, 0x31, 0x33, 0x0A, 0x20, 0x20, 0x45, 0x6E, 0x64, 0x20, 0x6F, 0x66, 0x20, 0x49, 0x44, 0x20, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x0A, 0x0D, 0xA3
+		}
+	},
+	{
+		11,
+		79,
+		0x39,
+		0x0036,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x36, 0x00, 0x03, 0x09, 0x10, 0x16, 0x06, 0x02, 0x02, 0x02, 0x01, 0xF4, 0x00, 0x0A, 0x01, 0xF4, 0x00, 0x0A, 0x01, 0xF4, 0x00, 0x0A, 0x14, 0x19, 0x19, 0x00, 0x32, 0x02, 0x14, 0x01, 0x01, 0xE0, 0x03, 0x98, 0x0C, 0x0C, 0x00, 0x10, 0x10, 0x08, 0x00, 0x04, 0x08, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x04, 0x08, 0x00, 0x00, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x80, 0x10, 0x01, 0x80, 0x50, 0x2A
+		}
+	},
+	{
+		12,
+		79,
+		0x39,
+		0x0037,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x37, 0x01, 0x40, 0x04, 0x02, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x02, 0x40, 0x08, 0x80, 0x20, 0x80, 0x08, 0x04, 0x02, 0x40, 0x20, 0x23, 0x04, 0x21, 0x20, 0x22, 0x00, 0x61, 0x00, 0xFD, 0x00, 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, 0xA8, 0x00, 0xA7, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0x79, 0x00, 0xCA, 0x24, 0xD6, 0x04, 0xCF, 0x00, 0xC8, 0x00, 0xA9, 0x00, 0xB7, 0x00, 0xB0, 0xB3, 0xF1
+		}
+	},
+	{
+		13,
+		79,
+		0x39,
+		0x0038,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x38, 0xCA, 0xB1, 0x0B, 0xB2, 0x00, 0xB3, 0x33, 0xB4, 0x33, 0xB5, 0x80, 0xB6, 0x00, 0x6C, 0x00, 0x6D, 0x00, 0x6E, 0x00, 0x6F, 0x00, 0xE6, 0x00, 0xE9, 0x00, 0xEC, 0x00, 0xE8, 0x20, 0xEB, 0x00, 0xEE, 0x00, 0xE7, 0x00, 0xEA, 0x00, 0xED, 0x00, 0xFF, 0x23, 0x00, 0x20, 0x20, 0x21, 0x07, 0x22, 0x40, 0x76, 0x00, 0xAF, 0x00, 0xD1, 0x00, 0xA1, 0x00, 0xD3, 0x00, 0xA3, 0x00, 0xD0, 0x00, 0xA0, 0x00, 0x69, 0x5E
+		}
+	},
+	{
+		14,
+		79,
+		0x39,
+		0x0039,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x39, 0xD2, 0x00, 0xA2, 0x00, 0xDC, 0x08, 0xE1, 0xFF, 0xE2, 0x01, 0xDF, 0xFF, 0xDE, 0x02, 0xDD, 0x00, 0x99, 0x00, 0x9C, 0x00, 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0x9E, 0x00, 0xAC, 0x00, 0xFF, 0x70, 0xCF, 0x62, 0x00, 0x04, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x00, 0xFF, 0x62, 0x01, 0xF6, 0x70, 0xCF, 0x62, 0x02, 0x00, 0x62, 0x01, 0x00, 0x62, 0x04, 0xAB, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xF2, 0x71
+		}
+	},
+	{
+		15,
+		79,
+		0x39,
+		0x003A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3A, 0x04, 0xEF, 0x62, 0x05, 0xFC, 0x70, 0xCF, 0x62, 0x06, 0x00, 0x62, 0x05, 0x00, 0x62, 0x08, 0x04, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x08, 0xFF, 0x62, 0x09, 0x8F, 0x70, 0xCF, 0x62, 0x0A, 0x00, 0x62, 0x09, 0x00, 0x62, 0x0C, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x0C, 0xFF, 0x62, 0x0D, 0xFF, 0x70, 0xCF, 0x62, 0x0E, 0x00, 0x62, 0x0D, 0x00, 0x62, 0x10, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x10, 0xD6, 0x3A
+		}
+	},
+	{
+		16,
+		79,
+		0x39,
+		0x003B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3B, 0xFF, 0x62, 0x11, 0xEF, 0x70, 0xCF, 0x62, 0x12, 0x00, 0x62, 0x11, 0x00, 0x70, 0xCF, 0x7F, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00, 0xAB, 0x00, 0xAC, 0x00, 0xAD, 0x00, 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, 0xAE, 0x00, 0xAF, 0x00, 0xB0, 0x00, 0x8C, 0x65, 0x59
+		}
+	},
+	{
+		17,
+		79,
+		0x39,
+		0x003C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, 0xB7, 0x00, 0xB8, 0x00, 0xB9, 0x00, 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, 0xBA, 0x00, 0xBB, 0x00, 0xBC, 0x00, 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00, 0xBD, 0x6D, 0x6A
+		}
+	},
+	{
+		18,
+		79,
+		0x39,
+		0x003D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3D, 0x00, 0xBE, 0x00, 0xBF, 0x00, 0xA4, 0x00, 0xC0, 0x00, 0xFF, 0x11, 0x06, 0x12, 0x02, 0x13, 0x87, 0x14, 0x03, 0x1B, 0x30, 0x1C, 0x00, 0x19, 0x24, 0x1A, 0x30, 0x0A, 0x3C, 0x0B, 0x3C, 0xFF, 0x01, 0x02, 0x06, 0x00, 0x01, 0x02, 0x01, 0x00, 0x02, 0x00, 0x02, 0x01, 0x02, 0x00, 0x01, 0x01, 0x02, 0x00, 0x02, 0x01, 0x00, 0x73, 0x97, 0x55, 0xAE, 0x04, 0x55, 0xAF, 0xAB, 0x55, 0xB0, 0x04, 0x55, 0x6F, 0x6F
+		}
+	},
+	{
+		19,
+		79,
+		0x39,
+		0x003E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3E, 0xB1, 0x00, 0x55, 0xB2, 0x00, 0x7C, 0x0F, 0x8C, 0x7C, 0x0E, 0x61, 0x7F, 0x10, 0x70, 0xCF, 0x50, 0x00, 0x08, 0x50, 0x0D, 0x57, 0xD5, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x01, 0x08, 0x50, 0x0E, 0x57, 0x28, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x02, 0x08, 0x50, 0x0E, 0x57, 0xCF, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x03, 0x08, 0x50, 0x0F, 0x57, 0x4A, 0x7C, 0x0F, 0xBF, 0x18, 0x70, 0xCF, 0x20, 0x7F, 0x38, 0x76, 0x7E
+		}
+	},
+	{
+		20,
+		79,
+		0x39,
+		0x003F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3F, 0x02, 0x10, 0x08, 0x4F, 0x52, 0xF9, 0x64, 0x08, 0x64, 0x03, 0x00, 0x54, 0xFC, 0x18, 0x18, 0x20, 0x70, 0xCF, 0x62, 0xE3, 0x00, 0x10, 0x08, 0x28, 0x39, 0xFF, 0xA0, 0x30, 0x4F, 0x54, 0xFD, 0x52, 0xFC, 0x39, 0x00, 0xA0, 0x13, 0x11, 0x06, 0xE0, 0x01, 0x70, 0xCF, 0x71, 0x10, 0x80, 0x09, 0x70, 0xCF, 0x71, 0x20, 0x80, 0x03, 0x71, 0x30, 0x18, 0x20, 0x75, 0x09, 0x00, 0x10, 0x08, 0x28, 0x4F, 0x47, 0x21
+		}
+	},
+	{
+		21,
+		79,
+		0x39,
+		0x0040,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x40, 0x59, 0xFD, 0x61, 0x00, 0x18, 0x20, 0x75, 0x09, 0x00, 0x8F, 0xC6, 0x38, 0xFC, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x08, 0x10, 0x5D, 0xD0, 0x08, 0x5D, 0xD3, 0x08, 0x5D, 0xD4, 0x08, 0x5D, 0xD5, 0x08, 0x70, 0x3F, 0x71, 0x80, 0x62, 0xD0, 0x00, 0x18, 0x60, 0xD5, 0x18, 0x60, 0xD4, 0x18, 0x60, 0xD3, 0x18, 0x60, 0xD0, 0x20, 0x18, 0x7E, 0x08, 0x51, 0x54, 0x04, 0x01, 0x51, 0x53, 0x0C, 0x00, 0x51, 0xB4, 0xFC
+		}
+	},
+	{
+		22,
+		79,
+		0x39,
+		0x0041,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x41, 0x54, 0x04, 0x03, 0x51, 0x53, 0x0C, 0x02, 0x51, 0x54, 0x04, 0x05, 0x51, 0x53, 0x0C, 0x04, 0x51, 0x54, 0x04, 0x07, 0x51, 0x53, 0x0C, 0x06, 0x18, 0x08, 0x51, 0x0B, 0x04, 0x0D, 0x51, 0x0A, 0x0C, 0x0C, 0x41, 0x23, 0xFE, 0x55, 0xBB, 0x00, 0x51, 0x55, 0x60, 0x21, 0x62, 0xDB, 0xFE, 0x43, 0x23, 0x01, 0x18, 0x7E, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x42, 0x08, 0x26, 0x42, 0xEF, 0x7C, 0x19, 0x73, 0xD7, 0x43
+		}
+	},
+	{
+		23,
+		79,
+		0x39,
+		0x0042,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x42, 0x7C, 0x19, 0x73, 0x62, 0xD0, 0x00, 0x18, 0x53, 0x42, 0x70, 0xBF, 0x57, 0x98, 0x62, 0xD3, 0x05, 0x52, 0x00, 0x73, 0x54, 0x00, 0x62, 0xD3, 0x05, 0x54, 0x00, 0x79, 0xDF, 0xF1, 0x7C, 0x19, 0x64, 0x7C, 0x19, 0x64, 0x70, 0xBF, 0x57, 0x98, 0x62, 0xD3, 0x05, 0x52, 0x00, 0x62, 0xD3, 0x08, 0x54, 0x00, 0x62, 0xD3, 0x07, 0x56, 0x00, 0x00, 0x79, 0xDF, 0xEE, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x7F, 0x12, 0xBA
+		}
+	},
+	{
+		24,
+		79,
+		0x39,
+		0x0043,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x43, 0x5C, 0x51, 0x41, 0xE0, 0x01, 0x80, 0x13, 0x80, 0x08, 0x80, 0x01, 0x5B, 0x9F, 0xF1, 0x80, 0x77, 0x62, 0xD3, 0x05, 0x51, 0x57, 0x54, 0x00, 0x80, 0x6E, 0x62, 0xD3, 0x05, 0x51, 0x57, 0x73, 0x53, 0x46, 0x47, 0x42, 0x07, 0xB0, 0x05, 0x54, 0x00, 0x80, 0x15, 0x47, 0x42, 0x04, 0xA0, 0x10, 0x62, 0xD3, 0x05, 0x3B, 0x00, 0xA0, 0x09, 0xC0, 0x04, 0x78, 0x80, 0x02, 0x74, 0x54, 0x00, 0x62, 0xD3, 0xA3, 0xDD
+		}
+	},
+	{
+		25,
+		79,
+		0x39,
+		0x0044,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x44, 0x08, 0x13, 0x00, 0xD0, 0x0E, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x00, 0x3C, 0x0E, 0x00, 0xB0, 0x37, 0x80, 0x16, 0x62, 0xD3, 0x02, 0x08, 0x11, 0x05, 0xD0, 0x03, 0x50, 0x00, 0x54, 0x00, 0x18, 0x3A, 0x15, 0xD0, 0x24, 0x51, 0x0E, 0xB0, 0x20, 0x62, 0xD3, 0x08, 0x52, 0x00, 0x53, 0x45, 0x51, 0x47, 0x12, 0x45, 0x1E, 0x45, 0x00, 0x62, 0xD3, 0x07, 0x03, 0x00, 0x0E, 0x45, 0x00, 0x54, 0x00, 0x51, 0x53, 0x3E
+		}
+	},
+	{
+		26,
+		79,
+		0x39,
+		0x0045,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x45, 0x45, 0x62, 0xD3, 0x08, 0x54, 0x00, 0x7F, 0x7C, 0x21, 0xD2, 0x08, 0x18, 0x7F, 0x50, 0xFF, 0x3C, 0x10, 0x80, 0xC0, 0x11, 0x34, 0x12, 0x76, 0x12, 0x34, 0x11, 0x0E, 0x11, 0x00, 0x34, 0x10, 0x0E, 0x10, 0x00, 0x53, 0x0F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x55, 0x46, 0x18, 0x65, 0x12, 0x6B, 0x11, 0x6B, 0x10, 0x6B, 0x4B, 0x6B, 0x4A, 0x6B, 0x49, 0x51, 0x4B, 0x1A, 0xFD, 0x93
+		}
+	},
+	{
+		27,
+		79,
+		0x39,
+		0x0046,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x46, 0x14, 0x51, 0x4A, 0x1A, 0x13, 0x51, 0x49, 0x19, 0x00, 0xC0, 0x0D, 0x53, 0x49, 0x51, 0x14, 0x14, 0x4B, 0x51, 0x13, 0x1C, 0x4A, 0x76, 0x12, 0x7A, 0x46, 0xBF, 0xD7, 0x50, 0xFF, 0x3C, 0x0F, 0x80, 0xC0, 0x11, 0x34, 0x12, 0x76, 0x12, 0x34, 0x11, 0x0E, 0x11, 0x00, 0x34, 0x10, 0x0E, 0x10, 0x00, 0x34, 0x0F, 0x7F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x51, 0x12, 0x04, 0xCE, 0x36
+		}
+	},
+	{
+		28,
+		79,
+		0x39,
+		0x0047,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x47, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x54, 0x90, 0x52, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x48, 0x90, 0x46, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x3C, 0x90, 0x3A, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x30, 0x90, 0x2E, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x24, 0x90, 0x22, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0xA3, 0xE1
+		}
+	},
+	{
+		29,
+		79,
+		0x39,
+		0x0048,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x48, 0x48, 0x90, 0x18, 0x90, 0x16, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x0C, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x02, 0x90, 0x00, 0x70, 0xFB, 0x6E, 0x48, 0x6E, 0x49, 0x6E, 0x4A, 0x6E, 0x4B, 0x7F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x9F, 0xE9, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xDF, 0x51, 0x12, 0x04, 0x49, 0xC6, 0x28
+		}
+	},
+	{
+		30,
+		79,
+		0x39,
+		0x0049,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xD5, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xCB, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xC1, 0x9F, 0xBF, 0x9F, 0xBD, 0x9F, 0xBB, 0x9F, 0xB9, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xAF, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xA5, 0x9F, 0xA3, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x32, 0x01
+		}
+	},
+	{
+		31,
+		79,
+		0x39,
+		0x004A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4A, 0x9F, 0x99, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0x8F, 0x9F, 0x8D, 0x8F, 0x8C, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x53, 0x44, 0x55, 0x0F, 0x80, 0x55, 0x10, 0x60, 0x55, 0x11, 0x00, 0x62, 0xD3, 0x02, 0x50, 0x10, 0x57, 0x98, 0x54, 0x00, 0x79, 0xDF, 0xFC, 0x62, 0xD3, 0x01, 0x51, 0x0F, 0x57, 0x1A, 0x54, 0xA0, 0x79, 0xDF, 0xFC, 0x55, 0x3D, 0x00, 0x7C, 0x17, 0x18, 0x55, 0x45, 0x6E, 0x7A
+		}
+	},
+	{
+		32,
+		79,
+		0x39,
+		0x004B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4B, 0x00, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x51, 0x10, 0x54, 0xC5, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x51, 0x0F, 0x54, 0xA0, 0x55, 0x4A, 0x80, 0x52, 0xC5, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA0, 0x60, 0xFD, 0x55, 0x4B, 0x10, 0x7C, 0x1B, 0x87, 0x51, 0xA0, 0x01, 0x00, 0x5C, 0x62, 0xD3, 0x02, 0x51, 0x45, 0x7C, 0x19, 0x8F, 0x43, 0xA4, 0x08, 0x47, 0x37, 0x0D
+		}
+	},
+	{
+		33,
+		79,
+		0x39,
+		0x004C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4C, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x7C, 0x1C, 0x8E, 0x7C, 0x1C, 0xE3, 0x55, 0x56, 0x00, 0x55, 0x57, 0xFF, 0x55, 0x48, 0x07, 0x62, 0xD3, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x2A, 0x52, 0x68, 0x08, 0x51, 0x48, 0x64, 0x5C, 0x52, 0x59, 0x20, 0x62, 0xD3, 0x02, 0x3A, 0x44, 0xD0, 0x06, 0x51, 0x4B, 0x73, 0x25, 0x00, 0x51, 0x4B, 0x67, 0x2D, 0x00, 0x52, 0x00, 0x3A, 0x56, 0x92, 0xC4
+		}
+	},
+	{
+		34,
+		79,
+		0x39,
+		0x004D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4D, 0xC0, 0x03, 0x53, 0x56, 0x3A, 0x57, 0xD0, 0x03, 0x53, 0x57, 0x7A, 0x48, 0xDF, 0xCA, 0x68, 0x4B, 0xDF, 0x9B, 0x51, 0x4A, 0xA0, 0x42, 0x47, 0x11, 0x01, 0xB0, 0x3D, 0x58, 0xA1, 0x62, 0xD3, 0x01, 0x51, 0x57, 0x02, 0x56, 0x39, 0x1F, 0xA0, 0x30, 0xD0, 0x06, 0x51, 0x4A, 0x73, 0x25, 0xA0, 0x51, 0x4A, 0x67, 0x21, 0x7F, 0x2D, 0xA0, 0x68, 0x4A, 0x26, 0x4A, 0x7F, 0x55, 0x48, 0x07, 0x62, 0xD3, 0xBE, 0x1D
+		}
+	},
+	{
+		35,
+		79,
+		0x39,
+		0x004E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4E, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x0A, 0x52, 0x68, 0x5C, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x10, 0x7A, 0x48, 0xDF, 0xEA, 0x8F, 0x4A, 0x47, 0x11, 0x02, 0xB0, 0x32, 0x3C, 0x56, 0x1F, 0xC0, 0x2D, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x3D, 0xC5, 0x40, 0xA0, 0x23, 0x17, 0xC5, 0x20, 0x55, 0x48, 0x07, 0x62, 0xD3, 0x00, 0x62, 0xD3, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x0A, 0x52, 0x68, 0xD7, 0x50
+		}
+	},
+	{
+		36,
+		79,
+		0x39,
+		0x004F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4F, 0x5C, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x10, 0x7A, 0x48, 0xDF, 0xEA, 0x8E, 0xFE, 0x76, 0xA1, 0x51, 0x45, 0x7C, 0x1A, 0xE9, 0x76, 0x45, 0x3C, 0x45, 0x03, 0xCE, 0xE7, 0x7C, 0x17, 0x55, 0x76, 0x3D, 0x3C, 0x3D, 0x09, 0xCE, 0xD7, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x44, 0x99, 0x53, 0x0F, 0x5A, 0x10, 0x55, 0x11, 0x03, 0x8E, 0xA6, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0xAE, 0xFF
+		}
+	},
+	{
+		37,
+		79,
+		0x39,
+		0x0050,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x50, 0x55, 0x44, 0x99, 0x53, 0x0F, 0x55, 0x10, 0x60, 0x55, 0x11, 0x01, 0x8E, 0x94, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x44, 0x99, 0x55, 0x0F, 0x80, 0x53, 0x10, 0x55, 0x11, 0x02, 0x8E, 0x82, 0x90, 0x11, 0x55, 0x38, 0x03, 0x51, 0x38, 0x90, 0x1A, 0x76, 0x38, 0x3C, 0x38, 0x0A, 0xCF, 0xF6, 0x90, 0x66, 0x7F, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x55, 0x34, 0x99, 0x55, 0x0E, 0x00, 0x55, 0x33, 0x76, 0x90
+		}
+	},
+	{
+		38,
+		79,
+		0x39,
+		0x0051,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x51, 0x01, 0x7F, 0x70, 0xBF, 0x62, 0xD3, 0x02, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x11, 0x02, 0x53, 0x45, 0x51, 0x33, 0x02, 0x4C, 0x53, 0x33, 0x53, 0x32, 0x55, 0x44, 0x01, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x24, 0x3B, 0x12, 0xC0, 0x20, 0x3B, 0x11, 0xC0, 0x1C, 0x3B, 0x10, 0xC0, 0x18, 0x3B, 0x01, 0xC0, 0x14, 0x78, 0x3B, 0xFF, 0xC0, 0x0F, 0x3B, 0xF0, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x6C, 0x7D
+		}
+	},
+	{
+		39,
+		79,
+		0x39,
+		0x0052,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x52, 0x07, 0x3B, 0xEE, 0xC0, 0x03, 0x91, 0x84, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xCB, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD3, 0x02, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x55, 0x32, 0x01, 0x55, 0x44, 0x01, 0x55, 0x45, 0x00, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x12, 0xC0, 0x14, 0x3B, 0x11, 0xC0, 0x10, 0x3B, 0x10, 0xC0, 0x0C, 0x3B, 0x06, 0xB2
+		}
+	},
+	{
+		40,
+		79,
+		0x39,
+		0x0053,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x53, 0x01, 0xC0, 0x08, 0x78, 0x3B, 0xFF, 0xC0, 0x03, 0x91, 0x41, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xD7, 0x55, 0x44, 0x01, 0x51, 0x4D, 0x78, 0x53, 0x45, 0x51, 0x4E, 0x12, 0x4C, 0x74, 0x74, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x01, 0xC0, 0x14, 0x78, 0x3B, 0xFF, 0xC0, 0x0F, 0x3B, 0xF0, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0xEE, 0xF2, 0x8B
+		}
+	},
+	{
+		41,
+		79,
+		0x39,
+		0x0054,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x54, 0xC0, 0x03, 0x91, 0x07, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xD7, 0x51, 0x4C, 0x78, 0x53, 0x44, 0x55, 0x45, 0x01, 0x02, 0x4C, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x11, 0xC0, 0x14, 0x3B, 0x10, 0xC0, 0x10, 0x78, 0x3B, 0xFF, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0xEE, 0xC0, 0x03, 0x90, 0xD1, 0x51, 0x4C, 0x04, 0x32, 0x76, 0x45, 0x88, 0xB8
+		}
+	},
+	{
+		42,
+		79,
+		0x39,
+		0x0055,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x55, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xBF, 0xD5, 0x55, 0x44, 0x00, 0x55, 0x45, 0x01, 0x51, 0x4C, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x12, 0xC0, 0x14, 0x3B, 0x11, 0xC0, 0x10, 0x3B, 0x01, 0xC0, 0x0C, 0x78, 0x3B, 0xF0, 0xC0, 0x07, 0x3B, 0xEF, 0xC0, 0x03, 0x90, 0x9B, 0x51, 0x4C, 0x04, 0x32, 0x76, 0x45, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xBF, 0xD5, 0x50, 0x00, 0x53, 0xA4, 0xF1
+		}
+	},
+	{
+		43,
+		79,
+		0x39,
+		0x0056,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x56, 0x44, 0x53, 0x45, 0x55, 0x32, 0x00, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x0F, 0x3B, 0x12, 0xC0, 0x0B, 0x3B, 0x11, 0xC0, 0x07, 0x3B, 0x01, 0xC0, 0x03, 0x90, 0x70, 0x55, 0x44, 0x00, 0x51, 0x4D, 0x78, 0x53, 0x45, 0x51, 0x4E, 0x12, 0x4C, 0x74, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x3B, 0x01, 0xC0, 0x0C, 0x78, 0x3B, 0xF0, 0xC0, 0x07, 0x3B, 0xEF, 0xC0, 0x03, 0x90, 0x4B, 0x9F, 0xE8
+		}
+	},
+	{
+		44,
+		79,
+		0x39,
+		0x0057,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x57, 0x55, 0x45, 0x00, 0x51, 0x4C, 0x78, 0x53, 0x44, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x3B, 0x11, 0xC0, 0x0C, 0x3B, 0x10, 0xC0, 0x08, 0x78, 0x3B, 0xFF, 0xC0, 0x03, 0x90, 0x2B, 0x51, 0x4D, 0x53, 0x45, 0x51, 0x4C, 0x53, 0x44, 0x51, 0x4E, 0x7A, 0x44, 0x7A, 0x45, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x78, 0x3B, 0xFF, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0x21
+		}
+	},
+	{
+		45,
+		79,
+		0x39,
+		0x0058,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x58, 0x3B, 0xEE, 0xC0, 0x03, 0x90, 0x05, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x51, 0x43, 0x01, 0x03, 0x3A, 0x0E, 0xC0, 0x0F, 0x51, 0x45, 0x3F, 0x34, 0x51, 0x44, 0x3F, 0x34, 0x51, 0x32, 0x3F, 0x34, 0x76, 0x0E, 0x7F, 0x84, 0x88, 0x8C, 0x90, 0x94, 0x98, 0x9C, 0x9C, 0x9C, 0x10, 0x80, 0x10, 0x80, 0x10, 0x80, 0x10, 0x20, 0x40, 0x62, 0xD0, 0x00, 0x55, 0x4C, 0x11, 0x55, 0x4D, 0x09, 0x55, 0x4E, 0x98, 0xAB, 0x02
+		}
+	},
+	{
+		46,
+		79,
+		0x39,
+		0x0059,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x59, 0x55, 0x4F, 0x03, 0x55, 0x50, 0x97, 0x55, 0x51, 0x01, 0x55, 0x52, 0xDF, 0x55, 0x15, 0x08, 0x55, 0x16, 0x08, 0x55, 0x17, 0x08, 0x55, 0x42, 0x1C, 0x55, 0x43, 0x04, 0x55, 0xA2, 0x00, 0x55, 0xA3, 0x00, 0x55, 0xA4, 0x48, 0x55, 0xA5, 0x04, 0x55, 0xA6, 0x08, 0x55, 0xA9, 0x01, 0x55, 0xA7, 0x0C, 0x55, 0xA8, 0x05, 0x55, 0x18, 0x04, 0x55, 0xAD, 0x02, 0x55, 0x40, 0x00, 0x55, 0x3F, 0x00, 0x51, 0xE1, 0x6F
+		}
+	},
+	{
+		47,
+		79,
+		0x39,
+		0x005A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5A, 0xA9, 0xA0, 0x08, 0x51, 0xA7, 0x58, 0xA8, 0x7C, 0x18, 0xF9, 0x70, 0xBF, 0x62, 0xD3, 0x01, 0x57, 0x3F, 0x50, 0x09, 0x28, 0x54, 0xA0, 0x79, 0xDF, 0xF9, 0x70, 0x3F, 0x71, 0xC0, 0x5D, 0xFC, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x76, 0x07, 0x43, 0xE2, 0x08, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA4, 0x01, 0x62, 0xC0, 0x00, 0x39, 0x04, 0xD0, 0x04, 0x43, 0xC8, 0x04, 0x7C, 0x19, 0x41, 0x70, 0xCF, 0x71, 0x3B, 0x24
+		}
+	},
+	{
+		48,
+		79,
+		0x39,
+		0x005B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5B, 0x20, 0x43, 0x81, 0x0E, 0x43, 0x85, 0x0E, 0x43, 0x89, 0x0E, 0x43, 0x8D, 0x0E, 0x43, 0x91, 0x0E, 0x43, 0x95, 0x0E, 0x43, 0x99, 0x0E, 0x43, 0x9D, 0x0E, 0x70, 0xCF, 0x55, 0x9A, 0x07, 0x55, 0x9C, 0x02, 0x55, 0x9E, 0x06, 0x55, 0x9D, 0x00, 0x50, 0x48, 0x57, 0x00, 0x7C, 0x18, 0xB7, 0x71, 0x30, 0x62, 0x1B, 0x40, 0x70, 0xCF, 0x62, 0xA2, 0x10, 0x7C, 0x2A, 0xD8, 0x50, 0x04, 0x7C, 0x18, 0xE7, 0x6B, 0x85
+		}
+	},
+	{
+		49,
+		79,
+		0x39,
+		0x005C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5C, 0x70, 0xCF, 0x7C, 0x19, 0x4E, 0x7F, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x00, 0x03, 0x06, 0x09, 0x0C, 0x0F, 0x12, 0x15, 0x18, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x51, 0x3D, 0xF0, 0x60, 0x5C, 0x51, 0x3D, 0xF0, 0x75, 0x73, 0x53, 0x09, 0x5E, 0x00, 0x22, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x71, 0x20, 0x51, 0x3D, 0xFE, 0xE9, 0x5C, 0x51, 0x3D, 0xFE, 0xED, 0x53, 0x09, 0xBF, 0x2E
+		}
+	},
+	{
+		50,
+		79,
+		0x39,
+		0x005D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5D, 0x5E, 0x00, 0x2A, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x51, 0x3D, 0xFF, 0xBA, 0x53, 0xA0, 0x51, 0x3D, 0xFF, 0xBD, 0x53, 0xA1, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x51, 0x3D, 0xFE, 0xC5, 0x5C, 0x51, 0x3D, 0xFE, 0xC9, 0x73, 0x53, 0x09, 0x5E, 0x00, 0x22, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x51, 0x3D, 0xF0, 0x10, 0x5C, 0x51, 0x3D, 0xF0, 0x25, 0x53, 0x09, 0x5E, 0x00, 0x2A, 0x09, 0x61, 0x00, 0x4E, 0x4D
+		}
+	},
+	{
+		51,
+		79,
+		0x39,
+		0x005E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5E, 0x70, 0xCF, 0x7F, 0x0C, 0x0C, 0x00, 0x10, 0x10, 0x08, 0x00, 0x04, 0x08, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x04, 0x08, 0x00, 0x00, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x80, 0x10, 0x01, 0x80, 0x01, 0x40, 0x04, 0x02, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x02, 0x40, 0x08, 0x80, 0x20, 0x80, 0x08, 0x04, 0x02, 0x40, 0x20, 0x62, 0xD0, 0x00, 0x55, 0x9D, 0x00, 0x51, 0xA6, 0x91, 0x11, 0xD4
+		}
+	},
+	{
+		52,
+		79,
+		0x39,
+		0x005F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5F, 0x76, 0x51, 0xA4, 0x58, 0xA3, 0x7C, 0x18, 0xB7, 0x51, 0xA5, 0x7C, 0x18, 0xE7, 0x70, 0xCF, 0x71, 0x20, 0x50, 0x00, 0x60, 0x80, 0x60, 0x84, 0x60, 0x88, 0x60, 0x8C, 0x60, 0x90, 0x60, 0x94, 0x60, 0x98, 0x60, 0x9C, 0x60, 0x82, 0x60, 0x86, 0x60, 0x8A, 0x60, 0x8E, 0x60, 0x92, 0x60, 0x96, 0x60, 0x9A, 0x60, 0x9E, 0x60, 0xC0, 0x43, 0x81, 0x04, 0x43, 0x85, 0x04, 0x43, 0x89, 0x04, 0x43, 0x8D, 0x86, 0xBF
+		}
+	},
+	{
+		53,
+		79,
+		0x39,
+		0x0060,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x60, 0x04, 0x43, 0x91, 0x04, 0x43, 0x95, 0x04, 0x43, 0x99, 0x04, 0x43, 0x9D, 0x04, 0x71, 0x30, 0x71, 0x30, 0x62, 0x1F, 0x00, 0x62, 0x1B, 0x70, 0x62, 0x13, 0x87, 0x70, 0xCF, 0x71, 0x10, 0x55, 0x09, 0x19, 0x51, 0x09, 0xFF, 0x5E, 0x5C, 0x51, 0x09, 0xFF, 0x73, 0x53, 0x45, 0x5E, 0x00, 0x2A, 0x45, 0x61, 0x00, 0x7A, 0x09, 0xDF, 0xEC, 0x70, 0xCF, 0x41, 0xA2, 0x3F, 0x55, 0x40, 0x00, 0x55, 0x3F, 0xDC, 0x6C
+		}
+	},
+	{
+		54,
+		79,
+		0x39,
+		0x0061,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x61, 0x00, 0x51, 0xA9, 0xA0, 0x08, 0x51, 0xA7, 0x58, 0xA8, 0x7C, 0x18, 0xF9, 0x7F, 0x41, 0xE0, 0xFB, 0x71, 0x30, 0x41, 0x1B, 0xBF, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x81, 0xFD, 0x41, 0x85, 0xFD, 0x41, 0x89, 0xFD, 0x41, 0x8D, 0xFD, 0x41, 0x91, 0xFD, 0x41, 0x95, 0xFD, 0x41, 0x99, 0xFD, 0x41, 0x9D, 0xFD, 0x70, 0xCF, 0x41, 0xA2, 0xEF, 0x7C, 0x19, 0x5A, 0x70, 0xCF, 0x71, 0x10, 0x41, 0xE2, 0xF7, 0x90, 0xD5
+		}
+	},
+	{
+		55,
+		79,
+		0x39,
+		0x0062,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x62, 0x70, 0xCF, 0x7F, 0x7C, 0x19, 0x41, 0x70, 0xCF, 0x71, 0x10, 0x43, 0xE2, 0x08, 0x71, 0x30, 0x43, 0x1B, 0x40, 0x70, 0xCF, 0x71, 0x20, 0x43, 0x81, 0x02, 0x43, 0x85, 0x02, 0x43, 0x89, 0x02, 0x43, 0x8D, 0x02, 0x43, 0x91, 0x02, 0x43, 0x95, 0x02, 0x43, 0x99, 0x02, 0x43, 0x9D, 0x02, 0x70, 0xCF, 0x43, 0xA2, 0x10, 0x7C, 0x19, 0x4E, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0x49, 0x5A, 0x48, 0x53, 0x9B, 0x24, 0xFE
+		}
+	},
+	{
+		56,
+		79,
+		0x39,
+		0x0063,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x63, 0x5B, 0x21, 0x01, 0xA0, 0x06, 0x2E, 0x9E, 0x01, 0x80, 0x04, 0x26, 0x9E, 0xFE, 0x68, 0x48, 0x6E, 0x49, 0x51, 0x49, 0x78, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xC9, 0x70, 0xCF, 0x7C, 0x2A, 0xBA, 0x7F, 0x00, 0x04, 0x0C, 0x1C, 0x3C, 0x7C, 0xFC, 0x62, 0xD0, 0x00, 0x53, 0x3E, 0x76, 0x3E, 0xFF, 0xF0, 0x26, 0x9C, 0x03, 0x2C, 0x9C, 0x7C, 0x2A, 0xBA, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xA7, 0x5A, 0xA8, 0xA6, 0x03
+		}
+	},
+	{
+		57,
+		79,
+		0x39,
+		0x0064,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x64, 0x90, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x08, 0x5A, 0x3E, 0x55, 0x40, 0xFF, 0x55, 0x3F, 0x01, 0x78, 0xA0, 0x0A, 0x06, 0x40, 0xFF, 0x0E, 0x3F, 0x01, 0x78, 0xBF, 0xF8, 0x51, 0x3E, 0x68, 0x3F, 0x6E, 0x40, 0x78, 0xDF, 0xFA, 0x16, 0x40, 0x7F, 0x1E, 0x3F, 0x00, 0x18, 0x78, 0x64, 0x64, 0x26, 0x9C, 0x03, 0x2C, 0x9C, 0x7C, 0x2A, 0xBA, 0x7F, 0x62, 0xD0, 0x00, 0x78, 0x53, 0x9A, 0x7C, 0x2A, 0xBA, 0x11, 0xDA
+		}
+	},
+	{
+		58,
+		79,
+		0x39,
+		0x0065,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x65, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA7, 0x89, 0x62, 0xA7, 0x49, 0x70, 0xCF, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x49, 0xC8, 0x08, 0xAF, 0xFC, 0x70, 0xCF, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA7, 0x09, 0x70, 0xCF, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x41, 0x00, 0x93, 0xA8, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x41, 0x02, 0x93, 0x99, 0x70, 0x3F, 0x71, 0xB9, 0x2B
+		}
+	},
+	{
+		59,
+		79,
+		0x39,
+		0x0066,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x66, 0xC0, 0x7F, 0x57, 0x98, 0x50, 0x0A, 0x28, 0x21, 0xE0, 0xB0, 0x04, 0x79, 0xDF, 0xF7, 0x7F, 0x70, 0xCF, 0x71, 0x10, 0x64, 0xE0, 0x01, 0x80, 0x09, 0x80, 0x75, 0x80, 0xE1, 0x81, 0x3D, 0x81, 0x49, 0x41, 0x00, 0xFD, 0x41, 0x0C, 0xF7, 0x41, 0x0C, 0xBF, 0x41, 0x00, 0x7F, 0x41, 0x10, 0xF7, 0x41, 0x10, 0xBF, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x04, 0x52, 0xE2, 0x7E
+		}
+	},
+	{
+		60,
+		79,
+		0x39,
+		0x0067,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x67, 0x06, 0x60, 0x83, 0x06, 0x68, 0x06, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x08, 0x52, 0x03, 0x60, 0x87, 0x06, 0x69, 0x03, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x04, 0x52, 0x00, 0x60, 0x8B, 0x06, 0x6A, 0x00, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x02, 0x52, 0x09, 0x60, 0x8F, 0x06, 0x6B, 0x09, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x01, 0x52, 0x0C, 0x60, 0x93, 0x06, 0x6C, 0x0C, 0x2E, 0x74, 0x01, 0x43, 0x94, 0x02, 0x8C, 0xD3
+		}
+	},
+	{
+		61,
+		79,
+		0x39,
+		0x0068,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x68, 0x52, 0x0F, 0x60, 0x97, 0x06, 0x6D, 0x0F, 0x2E, 0x75, 0x01, 0x43, 0xA3, 0x3F, 0x7F, 0x41, 0x04, 0xBF, 0x41, 0x0C, 0xFB, 0x41, 0x0C, 0xDF, 0x41, 0x00, 0xDF, 0x41, 0x10, 0xFB, 0x41, 0x10, 0xDF, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x02, 0x52, 0x07, 0x60, 0x83, 0x06, 0x68, 0x07, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x04, 0x52, 0x04, 0x60, 0x87, 0x06, 0x69, 0x46, 0x48
+		}
+	},
+	{
+		62,
+		79,
+		0x39,
+		0x0069,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x69, 0x04, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x02, 0x52, 0x01, 0x60, 0x8B, 0x06, 0x6A, 0x01, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x04, 0x52, 0x0A, 0x60, 0x8F, 0x06, 0x6B, 0x0A, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x02, 0x52, 0x0D, 0x60, 0x93, 0x06, 0x6C, 0x0D, 0x2E, 0x74, 0x01, 0x43, 0x94, 0x04, 0x52, 0x10, 0x60, 0x97, 0x06, 0x6D, 0x10, 0x2E, 0x75, 0x01, 0x43, 0xA3, 0x3F, 0x7F, 0x41, 0x08, 0xF7, 0x41, 0xC5, 0x47
+		}
+	},
+	{
+		63,
+		79,
+		0x39,
+		0x006A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6A, 0x0C, 0xFD, 0x41, 0x0C, 0xEF, 0x41, 0x08, 0x7F, 0x41, 0x10, 0xFD, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x01, 0x52, 0x08, 0x60, 0x83, 0x06, 0x68, 0x08, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x02, 0x52, 0x05, 0x60, 0x87, 0x06, 0x69, 0x05, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x01, 0x52, 0x02, 0x60, 0x8B, 0x06, 0x6A, 0x02, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x08, 0x52, 0x57, 0x6C
+		}
+	},
+	{
+		64,
+		79,
+		0x39,
+		0x006B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6B, 0x0B, 0x60, 0x8F, 0x06, 0x6B, 0x0B, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x04, 0x52, 0x0E, 0x60, 0x93, 0x06, 0x6C, 0x0E, 0x2E, 0x74, 0x01, 0x43, 0xA3, 0x1F, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0xA3, 0x00, 0x7F, 0x7F, 0x70, 0xCF, 0x71, 0x10, 0x64, 0xE0, 0x01, 0x80, 0x09, 0x80, 0x34, 0x80, 0x5F, 0x80, 0x84, 0x80, 0x8B, 0x43, 0x00, 0x02, 0x43, 0x0C, 0x08, 0x1D, 0xF9
+		}
+	},
+	{
+		65,
+		79,
+		0x39,
+		0x006C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6C, 0x43, 0x0C, 0x40, 0x43, 0x00, 0x80, 0x43, 0x10, 0x08, 0x43, 0x10, 0x40, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFB, 0x41, 0x84, 0xF7, 0x41, 0x88, 0xFB, 0x41, 0x8C, 0xFD, 0x41, 0x90, 0xFE, 0x41, 0x94, 0xFD, 0x62, 0xA3, 0x00, 0x80, 0x5E, 0x43, 0x04, 0x40, 0x43, 0x0C, 0x04, 0x43, 0x0C, 0x20, 0x43, 0x00, 0x20, 0x43, 0x10, 0x04, 0x43, 0x10, 0x20, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFD, 0x9E, 0xFC
+		}
+	},
+	{
+		66,
+		79,
+		0x39,
+		0x006D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6D, 0x41, 0x84, 0xFB, 0x41, 0x88, 0xFD, 0x41, 0x8C, 0xFB, 0x41, 0x90, 0xFD, 0x41, 0x94, 0xFB, 0x62, 0xA3, 0x00, 0x80, 0x31, 0x43, 0x08, 0x08, 0x43, 0x0C, 0x02, 0x43, 0x0C, 0x10, 0x43, 0x08, 0x80, 0x43, 0x10, 0x02, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFE, 0x41, 0x84, 0xFD, 0x41, 0x88, 0xFE, 0x41, 0x8C, 0xF7, 0x41, 0x90, 0xFB, 0x62, 0xA3, 0x00, 0x80, 0x0A, 0x70, 0xCF, 0x71, 0x20, 0x62, 0x2E, 0x1D
+		}
+	},
+	{
+		67,
+		79,
+		0x39,
+		0x006E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6E, 0xA3, 0x00, 0x80, 0x01, 0x70, 0xCF, 0x7F, 0x62, 0xD3, 0x00, 0x57, 0x07, 0x52, 0x70, 0x54, 0x80, 0x52, 0x68, 0x54, 0x78, 0x51, 0xA0, 0x56, 0x70, 0x00, 0x54, 0x68, 0x79, 0xDF, 0xEF, 0x7F, 0x62, 0xD5, 0x00, 0x62, 0xD3, 0x00, 0x58, 0xA0, 0x55, 0x09, 0x88, 0x50, 0x0A, 0x28, 0x21, 0x1F, 0x3F, 0x09, 0x75, 0x3C, 0x09, 0x99, 0xCF, 0xF4, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0xAA, 0x00, 0x55, 0xAC, 0xE5, 0x8C
+		}
+	},
+	{
+		68,
+		79,
+		0x39,
+		0x006F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6F, 0x00, 0x57, 0x07, 0x62, 0xD3, 0x00, 0x5B, 0x3D, 0x80, 0x00, 0xA0, 0x5A, 0x10, 0x64, 0x5C, 0x52, 0x58, 0x53, 0x56, 0x52, 0x59, 0x53, 0x57, 0x51, 0x3E, 0x6E, 0x56, 0x6E, 0x57, 0x78, 0xDF, 0xFA, 0x51, 0x40, 0x14, 0x57, 0x51, 0x3F, 0x1C, 0x56, 0xA0, 0x0E, 0xD0, 0x06, 0x55, 0x57, 0x00, 0x80, 0x04, 0x55, 0x57, 0xFF, 0x55, 0x56, 0x00, 0x20, 0x10, 0x52, 0x78, 0x5C, 0x62, 0xD3, 0x02, 0x51, 0x6C, 0x9B
+		}
+	},
+	{
+		69,
+		79,
+		0x39,
+		0x0070,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x70, 0x57, 0x73, 0x54, 0x00, 0x39, 0xDC, 0xD0, 0x05, 0x39, 0x23, 0xD0, 0x04, 0x55, 0xAB, 0x01, 0x62, 0xD3, 0x08, 0x13, 0x00, 0xC0, 0x07, 0x39, 0x0F, 0xD0, 0x0B, 0x80, 0x05, 0x39, 0xF1, 0xC0, 0x05, 0x04, 0xAA, 0x76, 0xAC, 0x20, 0x79, 0xDF, 0x9C, 0x51, 0xAC, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x39, 0x02, 0xC0, 0x18, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x67, 0x92
+		}
+	},
+	{
+		70,
+		79,
+		0x39,
+		0x0071,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x71, 0x67, 0x39, 0x02, 0xC0, 0x0A, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x57, 0x07, 0x62, 0xD3, 0x00, 0x3D, 0x80, 0x00, 0xA0, 0x33, 0x10, 0x52, 0x78, 0x5C, 0x62, 0xD3, 0x02, 0x52, 0x00, 0x53, 0x47, 0x47, 0x42, 0x10, 0xA0, 0x1B, 0x51, 0xAA, 0x15, 0x00, 0xD0, 0x0B, 0x47, 0xAA, 0x80, 0xB0, 0x0E, 0x56, 0x00, 0x00, 0x80, 0x09, 0x47, 0xAA, 0x80, 0xA0, 0x04, 0x56, 0x00, 0xFF, 0xE5, 0x8F
+		}
+	},
+	{
+		71,
+		79,
+		0x39,
+		0x0072,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x72, 0x52, 0x00, 0x73, 0x53, 0x57, 0x5B, 0x7C, 0x10, 0xC0, 0x20, 0x79, 0xDF, 0xC4, 0x7F, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x20, 0x49, 0xC4, 0x01, 0xAF, 0xFC, 0x41, 0xA4, 0xF7, 0x41, 0xC4, 0xFE, 0x5D, 0xA8, 0x53, 0x59, 0x5D, 0xA9, 0x53, 0x58, 0x5D, 0xAB, 0x53, 0x5B, 0x5D, 0xAC, 0x53, 0x5A, 0x5D, 0xAE, 0x53, 0x5D, 0x5D, 0xAF, 0x53, 0x5C, 0x5D, 0xB1, 0x53, 0x5F, 0x5D, 0xB2, 0x53, 0x5E, 0x2F, 0x24
+		}
+	},
+	{
+		72,
+		79,
+		0x39,
+		0x0073,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x73, 0x5D, 0xB4, 0x53, 0x61, 0x5D, 0xB5, 0x53, 0x60, 0x5D, 0xB7, 0x53, 0x63, 0x5D, 0xB8, 0x53, 0x62, 0x5D, 0xBA, 0x53, 0x65, 0x5D, 0xBB, 0x53, 0x64, 0x5D, 0xBD, 0x53, 0x67, 0x5D, 0xBE, 0x53, 0x66, 0x70, 0xCF, 0x7F, 0x62, 0xD3, 0x00, 0x57, 0x07, 0x5B, 0x3D, 0x70, 0x00, 0xA0, 0x25, 0x10, 0x64, 0x5C, 0x51, 0x3E, 0x6F, 0x58, 0x6F, 0x59, 0x78, 0xDF, 0xFA, 0x51, 0x40, 0x15, 0x59, 0x51, 0x3F, 0x50, 0x67
+		}
+	},
+	{
+		73,
+		79,
+		0x39,
+		0x0074,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x74, 0x1D, 0x58, 0xA0, 0x0E, 0xD0, 0x06, 0x56, 0x59, 0x00, 0x80, 0x04, 0x56, 0x59, 0xFF, 0x56, 0x58, 0x00, 0x20, 0x79, 0xDF, 0xD4, 0x7F, 0x55, 0x3D, 0x00, 0x55, 0x37, 0x01, 0x7C, 0x17, 0x18, 0x9E, 0x7E, 0x58, 0xA1, 0x62, 0xD3, 0x01, 0x52, 0xA0, 0x60, 0xFD, 0x52, 0xC5, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9E, 0x51, 0x50, 0x00, 0x57, 0x88, 0x9C, 0x53, 0x43, 0xA4, 0x08, 0x47, 0x25, 0x12
+		}
+	},
+	{
+		74,
+		79,
+		0x39,
+		0x0075,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x75, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x51, 0x3D, 0xA0, 0x05, 0x9E, 0x6A, 0x51, 0x3D, 0x9F, 0x3C, 0x50, 0x00, 0x9D, 0x93, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA1, 0x60, 0xFD, 0x52, 0xC6, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9E, 0x1C, 0x50, 0x01, 0x57, 0x88, 0x9C, 0x1E, 0x43, 0xA4, 0x08, 0x47, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x9E, 0x39, 0x9F, 0x07, 0xD7
+		}
+	},
+	{
+		75,
+		79,
+		0x39,
+		0x0076,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x76, 0x0D, 0x50, 0x01, 0x9D, 0x64, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA2, 0x60, 0xFD, 0x52, 0xC7, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9D, 0xED, 0x50, 0x02, 0x57, 0x88, 0x9B, 0xEF, 0x43, 0xA4, 0x08, 0x47, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x9E, 0x0A, 0x9E, 0xDE, 0x50, 0x02, 0x9D, 0x35, 0x7C, 0x17, 0x55, 0x76, 0x3D, 0x3C, 0x3D, 0x09, 0xCF, 0x5F, 0x62, 0xD3, 0x43, 0x50
+		}
+	},
+	{
+		76,
+		79,
+		0x39,
+		0x0077,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x77, 0x00, 0x9D, 0xC4, 0x9D, 0xF3, 0x7C, 0x14, 0x1F, 0x55, 0x37, 0x00, 0x7F, 0x43, 0xE0, 0x08, 0x7F, 0x41, 0xE0, 0xF7, 0x7F, 0x62, 0xE6, 0x04, 0x62, 0xD0, 0x00, 0x5A, 0x53, 0x53, 0x54, 0x10, 0x08, 0x51, 0x55, 0x08, 0x38, 0x03, 0x4F, 0x50, 0x00, 0x54, 0xFE, 0x54, 0xFD, 0x01, 0x08, 0x54, 0xFF, 0x48, 0xFC, 0x01, 0xA0, 0x09, 0x52, 0xFB, 0x05, 0xFE, 0x52, 0xFA, 0x0D, 0xFD, 0x6F, 0xFD, 0x6F, 0xCC, 0x63
+		}
+	},
+	{
+		77,
+		79,
+		0x39,
+		0x0078,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x78, 0xFE, 0x6F, 0xFC, 0x7B, 0xFF, 0xBF, 0xEA, 0x52, 0xFC, 0x60, 0xE8, 0x52, 0xFE, 0x60, 0xE7, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x38, 0xFA, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x57, 0xF0, 0x50, 0x00, 0x62, 0xE6, 0x04, 0x62, 0xE8, 0x01, 0x62, 0xE7, 0x00, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x62, 0xDA, 0xF7, 0x49, 0xDA, 0x08, 0xAF, 0xFC, 0x62, 0xDA, 0xF7, 0x08, 0xF1, 0xAE
+		}
+	},
+	{
+		78,
+		79,
+		0x39,
+		0x0079,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x79, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x51, 0x00, 0x18, 0x49, 0xDA, 0x08, 0xB0, 0x04, 0x40, 0x80, 0x05, 0x74, 0x62, 0xDA, 0xF7, 0x79, 0xBF, 0xE0, 0x49, 0xDA, 0x08, 0xA0, 0x02, 0x74, 0x62, 0xE6, 0x04, 0x60, 0xE8, 0x62, 0xE7, 0x00, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x62, 0xD0, 0x00, 0x53, 0x55, 0x55, 0x53, 0x00, 0x55, 0x54, 0x01, 0x7E, 0x55, 0x77
+		}
+	},
+	{
+		79,
+		79,
+		0x39,
+		0x007A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7A, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x05, 0x58, 0x04, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x01, 0x58, 0x00, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x03, 0x58, 0x02, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x07, 0x58, 0x06, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x9C, 0x06
+		}
+	},
+	{
+		80,
+		79,
+		0x39,
+		0x007B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7B, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x05, 0x5A, 0x04, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x01, 0x5A, 0x00, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x03, 0x5A, 0x02, 0x7E, 0x0E, 0x1E, 0x3D, 0x7A, 0xE3, 0x95
+		}
+	},
+	{
+		81,
+		79,
+		0x39,
+		0x007C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7C, 0x07, 0x03, 0x00, 0x00, 0x07, 0x0E, 0x1E, 0x3D, 0x03, 0x01, 0x00, 0x00, 0x1E, 0x3D, 0x7A, 0xF6, 0x0E, 0x07, 0x01, 0x00, 0x58, 0x45, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x79, 0xDF, 0xF6, 0x7A, 0x44, 0xBF, 0xF0, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x08, 0x10, 0x70, 0x3F, 0x71, 0x80, 0x5D, 0xD3, 0x08, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xB6, 0x60, 0xD3, 0x2E, 0xB3, 0x80, 0x48, 0x60
+		}
+	},
+	{
+		82,
+		79,
+		0x39,
+		0x007D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7D, 0x49, 0xD7, 0x08, 0xA0, 0x09, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x00, 0x80, 0x08, 0x49, 0xD7, 0x20, 0xA0, 0x03, 0x80, 0xA6, 0x51, 0xB3, 0x21, 0x0E, 0xE0, 0x01, 0x80, 0x11, 0x80, 0x67, 0x80, 0x79, 0x80, 0x47, 0x80, 0x96, 0x80, 0x94, 0x80, 0x92, 0x80, 0x90, 0x80, 0x97, 0x5D, 0xD8, 0x21, 0xFE, 0x39, 0x48, 0xA0, 0x06, 0x62, 0xD7, 0x00, 0x80, 0x8A, 0x49, 0xD8, 0x01, 0xB0, 0x0F, 0x55, 0xBA, 0x69, 0xA3
+		}
+	},
+	{
+		83,
+		79,
+		0x39,
+		0x007E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7E, 0x02, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x02, 0x62, 0xD7, 0x10, 0x80, 0x77, 0x55, 0xBA, 0x01, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x06, 0x5F, 0xB5, 0xB4, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x52, 0x00, 0x60, 0xD8, 0x76, 0xB5, 0x62, 0xD7, 0x14, 0x80, 0x5B, 0x51, 0xB8, 0x78, 0x3A, 0xB5, 0xC0, 0x0F, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x52, 0x00, 0x60, 0xD8, 0x76, 0xB5, 0x2E, 0xB3, 0x20, 0x60, 0xD8, 0x62, 0x18, 0x02
+		}
+	},
+	{
+		84,
+		79,
+		0x39,
+		0x007F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7F, 0xD7, 0x04, 0x80, 0x3F, 0x5D, 0xD8, 0x3A, 0xB8, 0xD0, 0x2B, 0xA0, 0x29, 0x53, 0xB5, 0x53, 0xB4, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x04, 0x80, 0x18, 0x51, 0xB9, 0x78, 0x3A, 0xB5, 0xC0, 0x16, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x5D, 0xD8, 0x54, 0x00, 0x2E, 0xB3, 0x10, 0x76, 0xB5, 0x80, 0x01, 0x62, 0xD7, 0x10, 0x80, 0x0F, 0x62, 0xD7, 0x00, 0x80, 0x0A, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x00, 0x55, 0xFC, 0xCB
+		}
+	},
+	{
+		85,
+		79,
+		0x39,
+		0x0080,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x80, 0xBA, 0x00, 0x18, 0x60, 0xD0, 0x18, 0x60, 0xD3, 0x20, 0x18, 0x7E, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x43, 0x05, 0xA0, 0x70, 0xCF, 0x26, 0xAF, 0x5F, 0x51, 0xAF, 0x60, 0x04, 0x55, 0xBA, 0x00, 0x90, 0x1F, 0x90, 0x24, 0x40, 0x40, 0x40, 0x40, 0x40, 0x50, 0x00, 0x53, 0xB4, 0x70, 0xCF, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x43, 0x05, 0xA0, 0x70, 0xCF, 0x2E, 0xAF, 0xA0, 0xAC, 0x2C
+		}
+	},
+	{
+		86,
+		79,
+		0x39,
+		0x0081,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x81, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x41, 0xE0, 0x7F, 0x43, 0xE0, 0x80, 0x7F, 0x43, 0xD6, 0x31, 0x7F, 0x41, 0xE0, 0x7F, 0x41, 0xD6, 0xFE, 0x7F, 0x62, 0xD0, 0x00, 0x4F, 0x52, 0xFD, 0x53, 0xB8, 0x52, 0xFC, 0x53, 0xB9, 0x52, 0xFB, 0x53, 0xB7, 0x52, 0xFA, 0x53, 0xB6, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x76, 0xBB, 0xD0, 0x04, 0x55, 0xBB, 0xFF, 0x7E, 0x43, 0xE1, 0x01, 0x7F, 0x41, 0xE1, 0xFE, 0x7F, 0xB7, 0x43
+		}
+	},
+	{
+		87,
+		79,
+		0x39,
+		0x0082,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x82, 0x43, 0x23, 0x01, 0x7F, 0x54, 0x00, 0x70, 0xFE, 0x41, 0x23, 0xFE, 0x18, 0x60, 0x22, 0x18, 0x60, 0x23, 0x18, 0x70, 0x3F, 0x71, 0xC0, 0x7E, 0x30, 0x62, 0xD0, 0x00, 0x53, 0xF8, 0x5D, 0xF7, 0x08, 0x21, 0xC0, 0xB0, 0x07, 0x56, 0x01, 0x00, 0x55, 0xF8, 0x00, 0x51, 0xF8, 0x70, 0x3F, 0x71, 0x80, 0x60, 0xD3, 0x55, 0xFD, 0x01, 0x3C, 0xFD, 0x01, 0xB0, 0xAE, 0x70, 0xCF, 0x71, 0x10, 0x5D, 0xE0, 0xFE, 0xD2
+		}
+	},
+	{
+		88,
+		79,
+		0x39,
+		0x0083,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x83, 0x08, 0x21, 0xF8, 0x49, 0xFE, 0x08, 0xB0, 0x0A, 0x49, 0xFE, 0x10, 0xB0, 0x09, 0x29, 0x01, 0x80, 0x07, 0x29, 0x02, 0x80, 0x03, 0x29, 0x00, 0x60, 0xE0, 0x70, 0xCF, 0x80, 0x01, 0x65, 0xFD, 0x3C, 0xFD, 0x02, 0xB0, 0x84, 0x65, 0xFD, 0x70, 0xCF, 0x71, 0x10, 0x49, 0xE4, 0x08, 0xA0, 0x05, 0x70, 0xCF, 0x80, 0x20, 0x70, 0xCF, 0x52, 0x00, 0x53, 0xFA, 0x51, 0xFD, 0x39, 0x04, 0xB0, 0x69, 0x08, 0xF8, 0xC7
+		}
+	},
+	{
+		89,
+		79,
+		0x39,
+		0x0084,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x84, 0x10, 0x50, 0x03, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x20, 0x18, 0x53, 0xFD, 0x3C, 0xF8, 0x00, 0xA0, 0x09, 0x55, 0xFF, 0x00, 0x55, 0xFD, 0x10, 0x80, 0x37, 0x65, 0xFD, 0x52, 0x00, 0x53, 0xFA, 0x52, 0x02, 0x53, 0xFB, 0x52, 0x01, 0x60, 0xD4, 0x52, 0x05, 0x53, 0xFC, 0x55, 0xFE, 0x56, 0x51, 0xFD, 0x39, 0x08, 0xB0, 0x33, 0x08, 0x10, 0x50, 0x02, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x20, 0x70, 0xB8
+		}
+	},
+	{
+		90,
+		79,
+		0x39,
+		0x0085,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x85, 0x18, 0x53, 0xFD, 0x55, 0xFF, 0x01, 0x3C, 0xF8, 0x00, 0xA0, 0x04, 0x55, 0xFF, 0x00, 0x65, 0xFD, 0x3C, 0xFD, 0x10, 0xB0, 0x13, 0x18, 0x70, 0xCF, 0x71, 0x10, 0x60, 0xE0, 0x70, 0xCF, 0x65, 0xFD, 0x51, 0xFF, 0x3C, 0xFD, 0x20, 0xA0, 0x04, 0x30, 0x8F, 0xFE, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x7E, 0x30, 0x30, 0x30, 0x51, 0xF8, 0x70, 0x3F, 0x71, 0x80, 0x60, 0xD3, 0x52, 0x35, 0x43
+		}
+	},
+	{
+		91,
+		79,
+		0x39,
+		0x0086,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x86, 0x02, 0x53, 0xFB, 0x52, 0x01, 0x60, 0xD5, 0x52, 0x03, 0x74, 0x53, 0xFD, 0x52, 0x04, 0x53, 0xFE, 0x50, 0x00, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x08, 0x52, 0x00, 0x5C, 0x18, 0x08, 0x28, 0x3F, 0xFB, 0x18, 0x75, 0xB0, 0x02, 0x74, 0x7A, 0xFE, 0xB0, 0x05, 0x7A, 0xFD, 0xA0, 0x0F, 0x3C, 0xFB, 0x00, 0x37, 0x48
+		}
+	},
+	{
+		92,
+		79,
+		0x39,
+		0x0087,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x87, 0xBF, 0xEB, 0x08, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x18, 0x8F, 0xE2, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x7E, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x47, 0x36, 0x40, 0xB0, 0x0F, 0x47, 0x36, 0x80, 0xA0, 0x0A, 0x26, 0x36, 0x3F, 0x51, 0x36, 0x3A, 0x0E, 0xA0, 0x01, 0x70, 0xBF, 0x51, 0x0E, 0xA1, 0x1A, 0x55, 0xBE, 0x00, 0x3C, 0x0E, 0x02, 0xC0, 0x04, 0x55, 0xBE, 0x01, 0x5F, 0x36, 0x0E, 0x62, 0xD4, 0xE5, 0xA5
+		}
+	},
+	{
+		93,
+		79,
+		0x39,
+		0x0088,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x88, 0x02, 0x62, 0xD5, 0x01, 0x55, 0x0E, 0x00, 0x55, 0x35, 0x99, 0x55, 0x34, 0xE0, 0x3E, 0x35, 0x53, 0x45, 0x3E, 0x35, 0x53, 0x44, 0x3E, 0x35, 0x53, 0x32, 0x3C, 0x45, 0x02, 0xC0, 0x94, 0x51, 0x4D, 0x11, 0x03, 0x3A, 0x45, 0xC0, 0x8C, 0x3C, 0x44, 0x02, 0xC0, 0x87, 0x51, 0x4C, 0x11, 0x03, 0x3A, 0x44, 0xC0, 0x7F, 0x62, 0xD3, 0x02, 0x58, 0x32, 0x52, 0xFE, 0x53, 0x23, 0x52, 0xFF, 0x53, 0x24, 0x10, 0xFC
+		}
+	},
+	{
+		94,
+		79,
+		0x39,
+		0x0089,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x89, 0x52, 0x00, 0x53, 0x25, 0x52, 0x01, 0x53, 0x26, 0x52, 0x02, 0x53, 0x27, 0x5B, 0x12, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x1E, 0x52, 0xFF, 0x53, 0x1F, 0x52, 0x00, 0x53, 0x20, 0x52, 0x01, 0x53, 0x21, 0x52, 0x02, 0x53, 0x22, 0x5B, 0x12, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x19, 0x52, 0xFF, 0x53, 0x1A, 0x52, 0x00, 0x53, 0x1B, 0x52, 0x01, 0x53, 0x1C, 0x52, 0x02, 0x53, 0x1D, 0x51, 0x32, 0x02, 0x4C, 0xF8, 0xCD
+		}
+	},
+	{
+		95,
+		79,
+		0x39,
+		0x008A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8A, 0x5C, 0x52, 0xFE, 0x53, 0x28, 0x52, 0xFF, 0x53, 0x29, 0x52, 0x00, 0x53, 0x2A, 0x52, 0x01, 0x53, 0x2B, 0x52, 0x02, 0x53, 0x2C, 0x5B, 0x02, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x2D, 0x52, 0xFF, 0x53, 0x2E, 0x52, 0x00, 0x53, 0x2F, 0x52, 0x01, 0x53, 0x30, 0x52, 0x02, 0x53, 0x31, 0x90, 0x62, 0x80, 0x44, 0x7C, 0x25, 0x70, 0x90, 0x5B, 0x55, 0xBD, 0x00, 0x51, 0x45, 0xA0, 0x18, 0x51, 0x4D, 0x78, 0xB8, 0x4E
+		}
+	},
+	{
+		96,
+		79,
+		0x39,
+		0x008B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8B, 0x3A, 0x45, 0xA0, 0x11, 0x51, 0x44, 0xA0, 0x1A, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xA0, 0x13, 0x7C, 0x26, 0x0E, 0x80, 0x21, 0x51, 0x44, 0xA0, 0x17, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xA0, 0x10, 0x80, 0x11, 0x51, 0x45, 0xA0, 0x0A, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xA0, 0x03, 0x80, 0x04, 0x55, 0xBD, 0x01, 0x7C, 0x26, 0x94, 0x51, 0x0E, 0x3A, 0x43, 0xC0, 0x05, 0x50, 0xFF, 0x80, 0x0C, 0x7C, 0x23, 0x96, 0x0B
+		}
+	},
+	{
+		97,
+		79,
+		0x39,
+		0x008C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8C, 0x48, 0x7A, 0x36, 0x51, 0x36, 0xBF, 0x07, 0x51, 0x0E, 0x55, 0x36, 0x00, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x55, 0x13, 0x00, 0x51, 0x1F, 0x02, 0x20, 0x0E, 0x13, 0x00, 0x02, 0x21, 0x0E, 0x13, 0x00, 0x02, 0x24, 0x0E, 0x13, 0x00, 0x02, 0x25, 0x0E, 0x13, 0x00, 0x02, 0x26, 0x0E, 0x13, 0x00, 0x02, 0x29, 0x0E, 0x13, 0x00, 0x02, 0x2A, 0x0E, 0x13, 0x00, 0x02, 0x2B, 0x0E, 0x13, 0x00, 0x3C, 0x13, 0xFB, 0xD6
+		}
+	},
+	{
+		98,
+		79,
+		0x39,
+		0x008D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8D, 0x00, 0xA0, 0x03, 0x50, 0xFF, 0x53, 0xBC, 0x7F, 0x3C, 0xBE, 0x01, 0xB0, 0x23, 0x50, 0x00, 0x53, 0x19, 0x53, 0x1A, 0x53, 0x1B, 0x53, 0x1C, 0x53, 0x1D, 0x53, 0x1E, 0x53, 0x22, 0x53, 0x23, 0x53, 0x27, 0x53, 0x28, 0x53, 0x2C, 0x53, 0x2D, 0x53, 0x2E, 0x53, 0x2F, 0x53, 0x30, 0x53, 0x31, 0x62, 0xD5, 0x01, 0x06, 0x34, 0x03, 0x50, 0x00, 0x53, 0x0F, 0x53, 0x10, 0x53, 0x12, 0x53, 0x13, 0x62, 0xD5, 0x8B
+		}
+	},
+	{
+		99,
+		79,
+		0x39,
+		0x008E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8E, 0xD3, 0x00, 0x10, 0x51, 0x31, 0x57, 0x17, 0x03, 0x19, 0x0E, 0x13, 0x00, 0x79, 0xDF, 0xF9, 0x53, 0x14, 0x20, 0x51, 0xBC, 0x80, 0x08, 0x3C, 0x13, 0x00, 0xA0, 0x03, 0x50, 0xFF, 0x3F, 0x34, 0x51, 0x31, 0x02, 0x2C, 0x0E, 0x10, 0x00, 0x02, 0x27, 0x0E, 0x10, 0x00, 0x02, 0x22, 0x0E, 0x10, 0x00, 0x02, 0x1D, 0x0E, 0x10, 0x00, 0x12, 0x19, 0x1E, 0x10, 0x00, 0x12, 0x1E, 0x1E, 0x10, 0x00, 0x12, 0x8E, 0xFE
+		}
+	},
+	{
+		100,
+		79,
+		0x39,
+		0x008F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8F, 0x23, 0x1E, 0x10, 0x00, 0x12, 0x28, 0x1E, 0x10, 0x00, 0x12, 0x2D, 0x1E, 0x10, 0x00, 0x64, 0x6B, 0x10, 0x02, 0x30, 0x0E, 0x10, 0x00, 0x02, 0x2B, 0x0E, 0x10, 0x00, 0x02, 0x26, 0x0E, 0x10, 0x00, 0x02, 0x21, 0x0E, 0x10, 0x00, 0x02, 0x1C, 0x0E, 0x10, 0x00, 0x12, 0x1A, 0x1E, 0x10, 0x00, 0x12, 0x1F, 0x1E, 0x10, 0x00, 0x12, 0x24, 0x1E, 0x10, 0x00, 0x12, 0x29, 0x1E, 0x10, 0x00, 0x12, 0x2E, 0x29, 0x35
+		}
+	},
+	{
+		101,
+		79,
+		0x39,
+		0x0090,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x90, 0x1E, 0x10, 0x00, 0x53, 0x11, 0x7C, 0x11, 0x4D, 0x51, 0x44, 0x06, 0x12, 0x80, 0x0C, 0x11, 0x0E, 0x10, 0x00, 0x47, 0x10, 0x80, 0xA0, 0x0A, 0x55, 0x10, 0x00, 0x55, 0x11, 0x00, 0x55, 0x12, 0x00, 0x7C, 0x12, 0x26, 0x47, 0x42, 0x08, 0xA0, 0x36, 0x62, 0xD3, 0x01, 0x4D, 0x34, 0x51, 0x48, 0x3B, 0x00, 0xC0, 0x1E, 0xB0, 0x09, 0x51, 0x49, 0x3B, 0x01, 0xA0, 0x21, 0xC0, 0x14, 0x51, 0x48, 0x3A, 0x02, 0xE8
+		}
+	},
+	{
+		102,
+		79,
+		0x39,
+		0x0091,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x91, 0x4F, 0xB0, 0x07, 0x51, 0x49, 0x3A, 0x50, 0xA0, 0x13, 0x7A, 0x49, 0x1E, 0x48, 0x00, 0x80, 0x0C, 0x51, 0x48, 0x2A, 0x49, 0xA0, 0x06, 0x76, 0x49, 0x0E, 0x48, 0x00, 0x4D, 0x34, 0x51, 0x48, 0x3A, 0x4F, 0xC0, 0x0B, 0xB0, 0x13, 0x51, 0x49, 0x3A, 0x50, 0xC0, 0x03, 0xB0, 0x0B, 0x51, 0x48, 0x3F, 0x34, 0x51, 0x49, 0x3F, 0x34, 0x80, 0x09, 0x51, 0x4F, 0x3F, 0x34, 0x51, 0x50, 0x3F, 0x34, 0x50, 0x45, 0x6F
+		}
+	},
+	{
+		103,
+		79,
+		0x39,
+		0x0092,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x92, 0x00, 0x53, 0x10, 0x53, 0x12, 0x51, 0x2D, 0x02, 0x2E, 0x0E, 0x10, 0x00, 0x02, 0x2F, 0x0E, 0x10, 0x00, 0x02, 0x30, 0x0E, 0x10, 0x00, 0x02, 0x31, 0x0E, 0x10, 0x00, 0x12, 0x19, 0x1E, 0x10, 0x00, 0x12, 0x1A, 0x1E, 0x10, 0x00, 0x12, 0x1B, 0x1E, 0x10, 0x00, 0x12, 0x1C, 0x1E, 0x10, 0x00, 0x12, 0x1D, 0x1E, 0x10, 0x00, 0x64, 0x6B, 0x10, 0x02, 0x28, 0x0E, 0x10, 0x00, 0x02, 0x29, 0x0E, 0x10, 0xBB, 0x5C
+		}
+	},
+	{
+		104,
+		79,
+		0x39,
+		0x0093,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x93, 0x00, 0x02, 0x2A, 0x0E, 0x10, 0x00, 0x02, 0x2B, 0x0E, 0x10, 0x00, 0x02, 0x2C, 0x0E, 0x10, 0x00, 0x12, 0x1E, 0x1E, 0x10, 0x00, 0x12, 0x1F, 0x1E, 0x10, 0x00, 0x12, 0x20, 0x1E, 0x10, 0x00, 0x12, 0x21, 0x1E, 0x10, 0x00, 0x12, 0x22, 0x1E, 0x10, 0x00, 0x53, 0x11, 0x7C, 0x11, 0x4D, 0x51, 0x45, 0x06, 0x12, 0x80, 0x0C, 0x11, 0x0E, 0x10, 0x00, 0x47, 0x10, 0x80, 0xA0, 0x0A, 0x55, 0x10, 0x00, 0x4E, 0x83
+		}
+	},
+	{
+		105,
+		79,
+		0x39,
+		0x0094,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x94, 0x55, 0x11, 0x00, 0x55, 0x12, 0x00, 0x7C, 0x11, 0xB3, 0x47, 0x42, 0x08, 0xA0, 0x36, 0x62, 0xD3, 0x01, 0x4D, 0x34, 0x51, 0x48, 0x3B, 0x00, 0xC0, 0x1E, 0xB0, 0x09, 0x51, 0x49, 0x3B, 0x01, 0xA0, 0x21, 0xC0, 0x14, 0x51, 0x48, 0x3A, 0x51, 0xB0, 0x07, 0x51, 0x49, 0x3A, 0x52, 0xA0, 0x13, 0x7A, 0x49, 0x1E, 0x48, 0x00, 0x80, 0x0C, 0x51, 0x48, 0x2A, 0x49, 0xA0, 0x06, 0x76, 0x49, 0x0E, 0x48, 0x31, 0x4A
+		}
+	},
+	{
+		106,
+		79,
+		0x39,
+		0x0095,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x95, 0x00, 0x4D, 0x34, 0x51, 0x48, 0x3A, 0x51, 0xC0, 0x0B, 0xB0, 0x13, 0x51, 0x49, 0x3A, 0x52, 0xC0, 0x03, 0xB0, 0x0B, 0x51, 0x48, 0x3F, 0x34, 0x51, 0x49, 0x3F, 0x34, 0x80, 0x09, 0x51, 0x51, 0x3F, 0x34, 0x51, 0x52, 0x3F, 0x34, 0x62, 0xD3, 0x02, 0x76, 0x0E, 0x51, 0x0E, 0x55, 0xBC, 0x00, 0x7F, 0x55, 0x12, 0x00, 0x5F, 0x11, 0x45, 0x06, 0x11, 0xFE, 0x5F, 0x10, 0x44, 0x06, 0x10, 0xFE, 0x51, 0x97, 0x17
+		}
+	},
+	{
+		107,
+		79,
+		0x39,
+		0x0096,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x96, 0x32, 0x08, 0x51, 0x4C, 0x14, 0x32, 0x14, 0x32, 0x16, 0x32, 0x02, 0x55, 0x0F, 0x06, 0x7A, 0x0F, 0x51, 0x0F, 0xA0, 0x74, 0x47, 0x11, 0x80, 0xB0, 0x44, 0x51, 0x4D, 0x78, 0x3A, 0x11, 0xC0, 0x3D, 0x55, 0x13, 0x06, 0x7A, 0x13, 0x51, 0x13, 0xA0, 0x4F, 0x47, 0x10, 0x80, 0xB0, 0x1E, 0x51, 0x4C, 0x78, 0x3A, 0x10, 0xC0, 0x17, 0x58, 0x32, 0x62, 0xD3, 0x02, 0x52, 0x00, 0x58, 0x12, 0x62, 0xD3, 0x19, 0x1C
+		}
+	},
+	{
+		108,
+		79,
+		0x39,
+		0x0097,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x97, 0x00, 0x54, 0x19, 0x76, 0x12, 0x76, 0x10, 0x76, 0x32, 0x8F, 0xD9, 0x58, 0x12, 0x62, 0xD3, 0x00, 0x56, 0x19, 0x00, 0x75, 0x5A, 0x12, 0x76, 0x10, 0x76, 0x32, 0x8F, 0xC8, 0x58, 0x12, 0x62, 0xD3, 0x00, 0x50, 0x00, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x5A, 0x12, 0x06, 0x32, 0x05, 0x76, 0x11, 0x5F, 0x10, 0x44, 0x06, 0x10, 0xFE, 0x51, 0xA0, 0x2B
+		}
+	},
+	{
+		109,
+		79,
+		0x39,
+		0x0098,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x98, 0x4C, 0x11, 0x05, 0x04, 0x32, 0x8F, 0x88, 0x62, 0xD3, 0x02, 0x18, 0x53, 0x32, 0x7F, 0x62, 0xD3, 0x00, 0x3C, 0x45, 0x01, 0xB0, 0x1B, 0x57, 0x05, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x14, 0x75, 0x5B, 0x39, 0x0A, 0xBF, 0xEB, 0x80, 0x21, 0x51, 0x4D, 0x11, 0x02, 0x3A, 0x45, 0xB0, 0x19, 0x57, 0x0F, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x4A, 0x80
+		}
+	},
+	{
+		110,
+		79,
+		0x39,
+		0x0099,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x99, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x1E, 0x75, 0x5B, 0x39, 0x14, 0xBF, 0xEB, 0x3C, 0x44, 0x01, 0xB0, 0x1D, 0x57, 0x01, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x18, 0x5B, 0x01, 0x05, 0x5C, 0x39, 0x15, 0xBF, 0xE9, 0x80, 0x23, 0x51, 0x4C, 0x11, 0x02, 0x3A, 0x44, 0xB0, 0x1B, 0x57, 0x03, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0xB7, 0x5B
+		}
+	},
+	{
+		111,
+		79,
+		0x39,
+		0x009A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9A, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x1A, 0x5B, 0x01, 0x05, 0x5C, 0x39, 0x17, 0xBF, 0xE9, 0x7F, 0x62, 0xD3, 0x00, 0x51, 0x45, 0xB0, 0x94, 0x55, 0x19, 0x04, 0x55, 0x1A, 0x10, 0x55, 0x1B, 0x10, 0x55, 0x1C, 0x10, 0x55, 0x1D, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x24, 0x5F, 0x12, 0x26, 0x5F, 0x14, 0x25, 0x92, 0x47, 0x53, 0x1B, 0x51, 0x2A, 0x43, 0x74
+		}
+	},
+	{
+		112,
+		79,
+		0x39,
+		0x009B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9B, 0xA0, 0x0F, 0x51, 0x29, 0x5F, 0x12, 0x2B, 0x5F, 0x14, 0x2A, 0x92, 0x37, 0x53, 0x1A, 0x53, 0x1C, 0x57, 0x04, 0x52, 0x23, 0x53, 0x4A, 0x52, 0x19, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x28, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0xD0, 0x8F
+		}
+	},
+	{
+		113,
+		79,
+		0x39,
+		0x009C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9C, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1E, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x19, 0x6D, 0x21, 0x7F, 0x05, 0x19, 0x79, 0xDF, 0xA5, 0x51, 0x4D, 0x11, 0x01, 0x3A, 0x45, 0xB0, 0x94, 0x55, 0x2D, 0x04, 0x55, 0x2E, 0x10, 0x55, 0x2F, 0x10, 0x55, 0xF3, 0xD6
+		}
+	},
+	{
+		114,
+		79,
+		0x39,
+		0x009D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9D, 0x30, 0x10, 0x55, 0x31, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x24, 0x5F, 0x12, 0x26, 0x5F, 0x14, 0x25, 0x91, 0xAC, 0x53, 0x2F, 0x51, 0x20, 0xA0, 0x0F, 0x51, 0x1F, 0x5F, 0x12, 0x21, 0x5F, 0x14, 0x20, 0x91, 0x9C, 0x53, 0x2E, 0x53, 0x30, 0x57, 0x04, 0x52, 0x23, 0x53, 0x4A, 0x52, 0x2D, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0x6F, 0xCF
+		}
+	},
+	{
+		115,
+		79,
+		0x39,
+		0x009E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9E, 0xFA, 0x52, 0x1E, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x28, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0xC2, 0x76
+		}
+	},
+	{
+		116,
+		79,
+		0x39,
+		0x009F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9F, 0x2D, 0x6D, 0x21, 0x7F, 0x05, 0x2D, 0x79, 0xDF, 0xA5, 0x3C, 0x44, 0x00, 0xB0, 0x97, 0x55, 0x19, 0x04, 0x55, 0x1E, 0x10, 0x55, 0x23, 0x10, 0x55, 0x28, 0x10, 0x55, 0x2D, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x20, 0x5F, 0x12, 0x2A, 0x5F, 0x14, 0x25, 0x91, 0x14, 0x53, 0x23, 0x51, 0x26, 0xA0, 0x0F, 0x51, 0x21, 0x5F, 0x12, 0x2B, 0x5F, 0x14, 0x26, 0x91, 0x04, 0x53, 0x38, 0x63
+		}
+	},
+	{
+		117,
+		79,
+		0x39,
+		0x00A0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA0, 0x1E, 0x53, 0x28, 0x57, 0x14, 0x52, 0x1B, 0x53, 0x4A, 0x52, 0x19, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x1C, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0xA1, 0x36
+		}
+	},
+	{
+		118,
+		79,
+		0x39,
+		0x00A1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA1, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1A, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x19, 0x6D, 0x21, 0x7F, 0x05, 0x19, 0x5B, 0x11, 0x05, 0x5C, 0xDF, 0xA2, 0x51, 0x4C, 0x11, 0x01, 0x3A, 0x44, 0xB0, 0x97, 0x55, 0x1D, 0x04, 0x55, 0x22, 0x10, 0x55, 0x27, 0x10, 0x55, 0x2C, 0x10, 0x55, 0x31, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0xD2, 0x99
+		}
+	},
+	{
+		119,
+		79,
+		0x39,
+		0x00A2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA2, 0x25, 0xA0, 0x0D, 0x51, 0x20, 0x5F, 0x12, 0x2A, 0x5F, 0x14, 0x25, 0x90, 0x76, 0x53, 0x27, 0x51, 0x24, 0xA0, 0x0F, 0x51, 0x1F, 0x5F, 0x12, 0x29, 0x5F, 0x14, 0x24, 0x90, 0x66, 0x53, 0x22, 0x53, 0x2C, 0x57, 0x14, 0x52, 0x1B, 0x53, 0x4A, 0x52, 0x1D, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x1A, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0xB3, 0x5C
+		}
+	},
+	{
+		120,
+		79,
+		0x39,
+		0x00A3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA3, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1C, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x1D, 0x6D, 0x21, 0x7F, 0x05, 0x1D, 0x5B, 0x11, 0x05, 0x5C, 0x0D, 0x11
+		}
+	},
+	{
+		121,
+		79,
+		0x39,
+		0x00A4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA4, 0xDF, 0xA2, 0x7F, 0x55, 0x11, 0x00, 0x04, 0x12, 0x0E, 0x11, 0x00, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x55, 0x10, 0x00, 0x55, 0x13, 0x00, 0x7C, 0x11, 0x4D, 0x51, 0x12, 0x39, 0x10, 0xD0, 0x03, 0x50, 0x10, 0x7F, 0x12, 0x4B, 0x55, 0x10, 0x08, 0x47, 0x4B, 0x01, 0xA0, 0x03, 0x02, 0x4A, 0x6D, 0x6E, 0x4B, 0x7A, 0x10, 0xBF, 0xF3, 0x1A, 0x2C
+		}
+	},
+	{
+		122,
+		79,
+		0x39,
+		0x00A5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA5, 0x53, 0x4A, 0x7F, 0x62, 0xD0, 0x00, 0x3C, 0x0E, 0x02, 0xC0, 0x0E, 0x55, 0x36, 0x00, 0x90, 0x09, 0x47, 0x36, 0x40, 0xA0, 0x04, 0x7C, 0x14, 0x1F, 0x7F, 0x70, 0xBF, 0x62, 0xD4, 0x02, 0x62, 0xD3, 0x02, 0x50, 0x00, 0x53, 0x32, 0x53, 0x35, 0x53, 0x44, 0x53, 0x45, 0x55, 0x34, 0x99, 0x3E, 0x34, 0x53, 0x19, 0x3E, 0x34, 0x53, 0x1A, 0x3E, 0x34, 0x53, 0x1B, 0x76, 0x45, 0x51, 0x45, 0x3A, 0x0E, 0x9D, 0x33
+		}
+	},
+	{
+		123,
+		79,
+		0x39,
+		0x00A6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA6, 0xD1, 0x1F, 0x3E, 0x34, 0x53, 0x1C, 0x3E, 0x34, 0x53, 0x1D, 0x3E, 0x34, 0x53, 0x1E, 0x51, 0x19, 0x12, 0x1C, 0xD0, 0x03, 0x73, 0x74, 0x53, 0x1F, 0x51, 0x1A, 0x12, 0x1D, 0xD0, 0x03, 0x73, 0x74, 0x53, 0x20, 0x51, 0x1F, 0xA0, 0x07, 0x39, 0x02, 0xA0, 0x03, 0x80, 0x2D, 0x51, 0x20, 0xA0, 0x05, 0x39, 0x02, 0xB0, 0x25, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x11, 0x51, 0x1B, 0x12, 0x4C, 0xEF, 0xD8
+		}
+	},
+	{
+		124,
+		79,
+		0x39,
+		0x00A7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA7, 0x53, 0x21, 0x80, 0x0C, 0x51, 0x1B, 0x02, 0x4C, 0x53, 0x21, 0x80, 0x04, 0x5F, 0x21, 0x1B, 0x51, 0x1A, 0x12, 0x1D, 0x67, 0x14, 0x21, 0x80, 0x8D, 0x3C, 0x1F, 0x02, 0xB0, 0x41, 0x3C, 0x20, 0x01, 0xB0, 0x3C, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x11, 0x51, 0x1B, 0x12, 0x4C, 0x53, 0x21, 0x80, 0x0C, 0x51, 0x1B, 0x02, 0x4C, 0x53, 0x21, 0x80, 0x04, 0x5F, 0x21, 0x1B, 0x51, 0x1A, 0x3A, 0x1F, 0x39
+		}
+	},
+	{
+		125,
+		79,
+		0x39,
+		0x00A8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA8, 0x1D, 0xC0, 0x0E, 0x58, 0x21, 0x52, 0x00, 0x79, 0x3B, 0x00, 0xD0, 0x10, 0x7A, 0x21, 0x80, 0x0C, 0x58, 0x21, 0x52, 0x00, 0x75, 0x3B, 0x00, 0xD0, 0x03, 0x76, 0x21, 0x80, 0x48, 0x3C, 0x1F, 0x01, 0xB0, 0x41, 0x3C, 0x20, 0x02, 0xB0, 0x3C, 0x51, 0x1A, 0x3A, 0x1D, 0xC0, 0x08, 0x51, 0x1B, 0x78, 0x53, 0x21, 0x80, 0x06, 0x51, 0x1B, 0x74, 0x53, 0x21, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x9C, 0x34
+		}
+	},
+	{
+		126,
+		79,
+		0x39,
+		0x00A9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA9, 0x11, 0x51, 0x21, 0x12, 0x4C, 0x53, 0x1F, 0x80, 0x0C, 0x51, 0x21, 0x02, 0x4C, 0x53, 0x1F, 0x80, 0x04, 0x55, 0x1F, 0x00, 0x58, 0x21, 0x52, 0x00, 0x58, 0x1F, 0x3B, 0x00, 0xD0, 0x03, 0x5A, 0x21, 0x80, 0x03, 0x8F, 0x17, 0x58, 0x1B, 0x52, 0x00, 0x58, 0x21, 0x13, 0x00, 0xCF, 0x0D, 0x3A, 0x18, 0xDF, 0x09, 0x58, 0x1E, 0x52, 0x00, 0x58, 0x21, 0x13, 0x00, 0xCE, 0xFF, 0x3A, 0x18, 0xDE, 0xFB, 0xB0, 0x5D
+		}
+	},
+	{
+		127,
+		79,
+		0x39,
+		0x00AA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAA, 0x58, 0x1B, 0x52, 0x00, 0x58, 0x1E, 0x3B, 0x00, 0xD0, 0x0A, 0x52, 0x00, 0x01, 0x01, 0x55, 0x36, 0x40, 0x80, 0x06, 0x01, 0x01, 0x55, 0x36, 0x40, 0x58, 0x21, 0x54, 0x00, 0x76, 0x32, 0x8E, 0xDB, 0x76, 0x44, 0x5F, 0x45, 0x44, 0x06, 0x35, 0x03, 0x51, 0x35, 0x55, 0x34, 0x99, 0x04, 0x34, 0x51, 0x44, 0x3A, 0x0E, 0xCE, 0xBA, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x06, 0x0A
+		}
+	},
+	{
+		128,
+		79,
+		0x39,
+		0x00AB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAB, 0x20, 0x51, 0x9A, 0x60, 0xA0, 0x51, 0x9C, 0x60, 0xA2, 0x51, 0x9B, 0x60, 0xA1, 0x51, 0x9E, 0x60, 0xC7, 0x51, 0x9D, 0x60, 0xA4, 0x70, 0xCF, 0x7F, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x62, 0xD3, 0x00, 0x55, 0xFA, 0x00, 0x50, 0x06, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x3C, 0xF8, 0x05, 0xB0, 0x12, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA6, 0x00, 0x71, 0x30, 0x62, 0x1B, 0x30, 0xAA, 0x53
+		}
+	},
+	{
+		129,
+		79,
+		0x39,
+		0x00AC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAC, 0x43, 0x1B, 0x40, 0x70, 0xCF, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x03, 0x51, 0xE1, 0x54, 0x01, 0x51, 0xE0, 0x54, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x7F, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x26, 0xAF, 0xFD, 0x7C, 0x72, 0x51, 0x26, 0xAE, 0xFB, 0x51, 0xAE, 0x60, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0xEF, 0xDE
+		}
+	},
+	{
+		130,
+		79,
+		0x39,
+		0x00AD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAD, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x02, 0x7C, 0x6F, 0x20, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x29, 0x04, 0x53, 0xAE, 0x51, 0xAE, 0x60, 0x00, 0x20, 0x7F, 0x7F, 0x7F, 0x08, 0x62, 0xD0, 0x00, 0x55, 0xFA, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x4F, 0x5B, 0x01, 0x03, 0x53, 0xF9, 0x55, 0xF8, 0x3A, 0x50, 0xE3, 0xC7
+		}
+	},
+	{
+		131,
+		79,
+		0x39,
+		0x00AE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAE, 0x06, 0x00, 0x20, 0x70, 0xBF, 0x62, 0xD3, 0x00, 0x52, 0xF8, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0xFA, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x4F, 0x5B, 0x01, 0x03, 0x53, 0xF9, 0x55, 0xF8, 0x3A, 0x50, 0x06, 0x00, 0x7F, 0x11, 0x04, 0x4B, 0xD0, 0x04, 0x78, 0xC0, 0x09, 0x3A, 0x80, 0x40, 0x79, 0x19, 0x00, 0xDF, 0xF9, 0x7F, 0x71, 0x40, 0xA0, 0x05, 0x70, 0xCF, 0x71, 0xD5, 0xAC
+		}
+	},
+	{
+		132,
+		79,
+		0x39,
+		0x00AF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAF, 0x10, 0x5E, 0x00, 0x70, 0xCF, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x3F, 0xE8, 0x77, 0x00, 0x3D, 0x00, 0x04, 0xCF, 0xEA, 0x62, 0xD0, 0x04, 0x55, 0xB6, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xB5, 0x00, 0x7C, 0x73, 0x74, 0x38, 0xFF, 0x20, 0x7F, 0x7F, 0x10, 0x4F, 0xBC, 0x7B
+		}
+	},
+	{
+		133,
+		79,
+		0x39,
+		0x00B0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB0, 0x38, 0x01, 0x10, 0x7C, 0x11, 0x47, 0x62, 0xD0, 0x00, 0x20, 0x54, 0x00, 0x50, 0x0F, 0x08, 0x10, 0x7C, 0x2B, 0x38, 0x38, 0xFE, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x56, 0x01, 0x00, 0x9F, 0xD7, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0x00, 0x08, 0x7C, 0x47, 0x34, 0x38, 0xFF, 0x52, 0x00, 0x08, 0x90, 0x46, 0x52, 0x00, 0x08, 0x62, 0xD0, 0x04, 0x51, 0x2E, 0x60
+		}
+	},
+	{
+		134,
+		79,
+		0x39,
+		0x00B1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB1, 0xB5, 0x08, 0x7C, 0x3A, 0x9B, 0x38, 0xFD, 0x62, 0xD0, 0x00, 0x54, 0x01, 0x5A, 0xE8, 0x06, 0xE8, 0x01, 0x50, 0x0F, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x08, 0x91, 0x47, 0x62, 0xD0, 0x00, 0x5A, 0xE8, 0x06, 0xE8, 0x01, 0x50, 0x0F, 0x08, 0x51, 0xE8, 0x08, 0x7C, 0x2B, 0x3C, 0x38, 0xFB, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xB5, 0x52, 0x01, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0xF0, 0xE5
+		}
+	},
+	{
+		135,
+		79,
+		0x39,
+		0x00B2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB2, 0x4F, 0x38, 0x06, 0x50, 0x04, 0x3B, 0xFC, 0xD0, 0x04, 0x56, 0xFC, 0x04, 0x56, 0x05, 0x00, 0x56, 0x04, 0x00, 0x80, 0x67, 0x56, 0x02, 0xE0, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x23, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x7C, 0x70, 0xCD, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x3B, 0x03, 0xB0, 0x03, 0x80, 0x0F, 0x07, 0x02, 0x08, 0x0F, 0x01, 0x00, 0x77, 0x09, 0x18
+		}
+	},
+	{
+		136,
+		79,
+		0x39,
+		0x00B3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB3, 0x00, 0x52, 0x00, 0x3B, 0xFC, 0xCF, 0xD9, 0x52, 0x00, 0x3B, 0xFC, 0xA0, 0x2C, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x18, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x3F, 0xE8, 0x7C, 0x6F, 0x18, 0x06, 0xE8, 0xC4, 0x7C, 0x6F, 0x54, 0x52, 0x03, 0x3F, 0xE8, 0x52, 0x02, 0x53, 0xE8, 0x52, 0x01, 0x60, 0xD5, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x05, 0x77, 0x04, 0x62, 0xD0, 0x04, 0x52, 0x04, 0x3A, 0xDB, 0xBD
+		}
+	},
+	{
+		137,
+		79,
+		0x39,
+		0x00B4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB4, 0xB6, 0xCF, 0x92, 0x52, 0x05, 0x62, 0xD0, 0x04, 0x53, 0xB6, 0x3D, 0x05, 0x04, 0xD0, 0x55, 0x56, 0x02, 0xE0, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x44, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x7C, 0x70, 0xCD, 0x3D, 0x03, 0xFF, 0xA0, 0x2F, 0x62, 0xD0, 0x04, 0x51, 0xB6, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x7C, 0x72, 0xE6, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0xC4, 0x09, 0x1A
+		}
+	},
+	{
+		138,
+		79,
+		0x39,
+		0x00B5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB5, 0x7C, 0x6F, 0x54, 0x52, 0x03, 0x7C, 0x72, 0xE6, 0x01, 0x01, 0x53, 0xB6, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xC0, 0x03, 0x80, 0x0F, 0x07, 0x02, 0x08, 0x0F, 0x01, 0x00, 0x77, 0x00, 0x52, 0x00, 0x3B, 0xFC, 0xCF, 0xB8, 0x56, 0x04, 0x00, 0x80, 0x32, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x72, 0x31, 0x06, 0xE6, 0xC0, 0x0E, 0x76, 0xF5
+		}
+	},
+	{
+		139,
+		79,
+		0x39,
+		0x00B6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB6, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x7C, 0x6E, 0xB6, 0x7C, 0x70, 0x1E, 0x06, 0xE6, 0xE0, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x77, 0x04, 0x52, 0x04, 0x3B, 0x05, 0xCF, 0xCA, 0x38, 0xFA, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x3D, 0xFC, 0x00, 0xB0, 0x06, 0x7C, 0x73, 0x74, 0x80, 0x28, 0x90, 0x29, 0x54, 0x00, 0x3D, 0x00, 0x00, 0xA0, 0x1F, 0x62, 0xD0, 0x04, 0x3C, 0xB4, 0x00, 0xF5, 0xF4
+		}
+	},
+	{
+		140,
+		79,
+		0x39,
+		0x00B7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB7, 0xB0, 0x10, 0x62, 0xD0, 0x00, 0x52, 0xFB, 0x53, 0xE8, 0x52, 0xFA, 0x60, 0xD5, 0x50, 0x01, 0x3F, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xB4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x6F, 0xC9, 0x80, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x62, 0xD0, 0x00, 0x51, 0x16, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x04, 0x13
+		}
+	},
+	{
+		141,
+		79,
+		0x39,
+		0x00B8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB8, 0xD0, 0x03, 0x77, 0x01, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xD0, 0x07, 0x50, 0x28, 0x3B, 0x01, 0xDF, 0xD5, 0x50, 0x28, 0x3B, 0x01, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x50, 0x10, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x0A, 0x3D, 0xFC, 0x00, 0xA0, 0x4E, 0x91, 0x78, 0x80, 0x4A, 0x3D, 0x00, 0x10, 0xB0, 0x03, 0x80, 0x43, 0xFB, 0x02
+		}
+	},
+	{
+		142,
+		79,
+		0x39,
+		0x00B9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB9, 0x3D, 0x00, 0x20, 0xB0, 0x2B, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x14, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0x10, 0x80, 0x0B, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x40, 0xA0, 0x03, 0xDB, 0xC3
+		}
+	},
+	{
+		143,
+		79,
+		0x39,
+		0x00BA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBA, 0x90, 0xE4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x08, 0x50, 0x23, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x04, 0x50, 0x04, 0x3A, 0xB5, 0xC0, 0xB7, 0x56, 0x03, 0x00, 0x80, 0xA9, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x52, 0x03, 0x64, 0x64, 0x64, 0x01, 0x8B, 0x24
+		}
+	},
+	{
+		144,
+		79,
+		0x39,
+		0x00BB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBB, 0x03, 0x54, 0x00, 0x7C, 0x6F, 0xF6, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x01, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x01, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x03, 0x7C, 0x6E, 0xA3, 0x08, 0x52, 0x00, 0x01, 0x03, 0x2A, 0x63
+		}
+	},
+	{
+		145,
+		79,
+		0x39,
+		0x00BC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBC, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x04, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0x0A, 0x24
+		}
+	},
+	{
+		146,
+		79,
+		0x39,
+		0x00BD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBD, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x06, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x77, 0x03, 0x62, 0xD0, 0x04, 0x52, 0x03, 0x3A, 0xB5, 0xCF, 0x50, 0x50, 0x00, 0x08, 0x50, 0x25, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x38, 0xFC, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0xCB, 0xA7
+		}
+	},
+	{
+		147,
+		79,
+		0x39,
+		0x00BE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBE, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x50, 0x03, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x50, 0x05, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x7F, 0x10, 0x4F, 0x38, 0x07, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0xB2
+		}
+	},
+	{
+		148,
+		79,
+		0x39,
+		0x00BF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBF, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x56, 0x00, 0x00, 0x80, 0xCA, 0x56, 0x04, 0x00, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xB5, 0xD0, 0x12, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x62, 0xD0, 0x00, 0x3F, 0x91
+		}
+	},
+	{
+		149,
+		79,
+		0x39,
+		0x00C0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC0, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x81, 0x0E, 0xE9, 0x0D, 0x7C, 0x6F, 0x5C, 0x54, 0x03, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x03, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFD, 0x62, 0xD0, 0xD7, 0xC2
+		}
+	},
+	{
+		150,
+		79,
+		0x39,
+		0x00C1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC1, 0x00, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x03, 0x01, 0x02, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x03, 0x7C, 0x6E, 0xA3, 0x08, 0x52, 0x03, 0x01, 0x04, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE8, 0x54, 0x05, 0xAB, 0x6B
+		}
+	},
+	{
+		151,
+		79,
+		0x39,
+		0x00C2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC2, 0x48, 0x00, 0x01, 0xA0, 0x18, 0x52, 0x05, 0x21, 0x0F, 0x53, 0xE9, 0x52, 0x06, 0x2A, 0xE9, 0x08, 0x52, 0x03, 0x11, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x0C, 0x52, 0x05, 0x62, 0xD0, 0x00, 0x64, 0x64, 0x64, 0x64, 0x54, 0x06, 0x77, 0x00, 0x3D, 0x00, 0x04, 0xCF, 0x33, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x4D, 0xB0
+		}
+	},
+	{
+		152,
+		79,
+		0x39,
+		0x00C3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC3, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x10, 0x7C, 0x20, 0x0B, 0x7C, 0x20, 0x50, 0x20, 0x10, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x50, 0xA0, 0x08, 0x08, 0x7C, 0x20, 0x57, 0x38, 0xFC, 0x20, 0x62, 0xC8, 0x0B, 0x62, 0xCA, 0x24, 0x43, 0xD6, 0x01, 0x62, 0xCD, 0x00, 0x56, 0x00, 0x20, 0x80, 0x06, 0x62, 0xCF, 0x00, 0x7B, 0x00, 0x3D, 0x00, 0x00, 0xBF, 0xF7, 0x41, 0xD6, 0xFE, 0x38, 0xFF, 0x54, 0xBF
+		}
+	},
+	{
+		153,
+		79,
+		0x39,
+		0x00C4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC4, 0x20, 0x7F, 0x10, 0x4F, 0x3D, 0xFC, 0x21, 0xD0, 0x0C, 0x41, 0xD6, 0xEF, 0x41, 0xE0, 0x7F, 0x62, 0xC8, 0x0B, 0x80, 0x0A, 0x62, 0xC8, 0x00, 0x43, 0xD6, 0x10, 0x43, 0xE0, 0x80, 0x20, 0x7F, 0x43, 0xD6, 0x01, 0x40, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x70, 0xCF, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x5D, 0xF7, 0x54, 0x00, 0x70, 0xFE, 0x7C, 0xDE, 0xD4
+		}
+	},
+	{
+		154,
+		79,
+		0x39,
+		0x00C5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC5, 0x70, 0xF8, 0xB0, 0x13, 0x7C, 0x73, 0x90, 0xBF, 0xFC, 0x71, 0x01, 0x40, 0x70, 0xFE, 0x62, 0xE3, 0x38, 0x41, 0xD6, 0xFE, 0x80, 0x06, 0x10, 0x7C, 0x33, 0x60, 0x20, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x48, 0x00, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x7C, 0x33, 0x60, 0x20, 0x71, 0x10, 0x41, 0x04, 0x7F, 0x17
+		}
+	},
+	{
+		155,
+		79,
+		0x39,
+		0x00C6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC6, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0xEC, 0x02, 0x70, 0xCF, 0x62, 0xDA, 0x7F, 0x43, 0xE0, 0x80, 0x9F, 0x83, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x9F, 0x92, 0x71, 0x10, 0x43, 0xEC, 0x02, 0x70, 0xCF, 0x62, 0xDA, 0x7F, 0x43, 0xE0, 0x80, 0x9F, 0x6D, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x73, 0x89, 0x38, 0xFF, 0x20, 0xF5, 0x04
+		}
+	},
+	{
+		156,
+		79,
+		0x39,
+		0x00C7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC7, 0x7F, 0x7C, 0x73, 0x89, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x5D, 0xC8, 0x39, 0x00, 0xB0, 0x18, 0x7C, 0x73, 0x90, 0xA0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x80, 0x1D, 0x5D, 0xC9, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x47, 0xE9, 0x01, 0xA0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0xEB, 0xF1
+		}
+	},
+	{
+		157,
+		79,
+		0x39,
+		0x00C8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC8, 0x52, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x70, 0xF8, 0xA0, 0x25, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x00, 0x43, 0xD6, 0x01, 0x52, 0xFC, 0x60, 0xCD, 0x52, 0xFB, 0x60, 0xCF, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x00, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x16, 0x3D, 0xFC, 0xA0, 0xD0, 0x11, 0x7C, 0x6F, 0x06, 0x28
+		}
+	},
+	{
+		158,
+		79,
+		0x39,
+		0x00C9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC9, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x7C, 0x71, 0x08, 0x52, 0xFB, 0x3F, 0xE8, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x52, 0xFB, 0x54, 0x01, 0x52, 0xFA, 0x54, 0x00, 0x7C, 0x70, 0xF8, 0xA0, 0x1C, 0x7C, 0x71, 0x65, 0x60, 0xCD, 0x52, 0x00, 0x60, 0xCF, 0x52, 0x01, 0x60, 0xCF, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x02, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x26, 0x3D, 0xFC, 0x41, 0x9F
+		}
+	},
+	{
+		159,
+		79,
+		0x39,
+		0x00CA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCA, 0x9F, 0xD0, 0x21, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6F, 0xD9, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x01, 0x7C, 0x71, 0x08, 0x52, 0x01, 0x3F, 0xE8, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x70, 0xF8, 0xA0, 0x29, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x01, 0x43, 0xD6, 0x01, 0x10, 0x52, 0xEA, 0xF2
+		}
+	},
+	{
+		160,
+		79,
+		0x39,
+		0x00CB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCB, 0xFC, 0x7C, 0x33, 0x49, 0x62, 0xD0, 0x00, 0x20, 0x54, 0x00, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x01, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x17, 0x3D, 0xFC, 0xA0, 0xD0, 0x12, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x00, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x70, 0xF8, 0xA0, 0x1B, 0x45, 0xA9
+		}
+	},
+	{
+		161,
+		79,
+		0x39,
+		0x00CC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCC, 0x7C, 0x71, 0x65, 0x08, 0x7C, 0x33, 0x53, 0x38, 0xFF, 0x7C, 0x72, 0x25, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x02, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x29, 0x3D, 0xFC, 0x9F, 0xD0, 0x24, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x00, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x01, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0xFD, 0x1A
+		}
+	},
+	{
+		162,
+		79,
+		0x39,
+		0x00CD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCD, 0x54, 0x01, 0x7C, 0x71, 0x2E, 0x38, 0xFD, 0x20, 0x7F, 0x60, 0xCD, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x5D, 0xCF, 0x7E, 0x60, 0xCD, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x5D, 0xCF, 0x5C, 0x5D, 0xCF, 0x7E, 0x49, 0xC9, 0x01, 0xBF, 0xFC, 0x41, 0xD6, 0xFE, 0x7F, 0x41, 0x05, 0xF7, 0x7C, 0x73, 0x82, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x08, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0x05, 0x08, 0x43, 0x04, 0xA4, 0x69
+		}
+	},
+	{
+		163,
+		79,
+		0x39,
+		0x00CE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCE, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x55, 0xB9, 0x00, 0x62, 0xD0, 0x03, 0x55, 0x99, 0x04, 0x55, 0x9A, 0x00, 0x55, 0x9B, 0xF8, 0x55, 0x9C, 0x00, 0x55, 0x9E, 0x64, 0x55, 0x9D, 0x32, 0x55, 0x9F, 0x00, 0x55, 0xA0, 0x00, 0x7C, 0x30, 0xB2, 0x90, 0x10, 0x7C, 0x6F, 0x64, 0x10, 0x57, 0x01, 0x50, 0xF4, 0x7C, 0x2B, 0xA8, 0x20, 0x7C, 0x6E, 0xE8, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x31, 0x35, 0x13, 0x48
+		}
+	},
+	{
+		164,
+		79,
+		0x39,
+		0x00CF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCF, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x56, 0x01, 0x20, 0x80, 0x04, 0x56, 0x01, 0xA0, 0x52, 0x01, 0x08, 0x7C, 0x31, 0x02, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0xFC, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x20, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x3D, 0x00, 0x00, 0xB0, 0x28, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x7C, 0x1B
+		}
+	},
+	{
+		165,
+		79,
+		0x39,
+		0x00D0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD0, 0x32, 0x0C, 0x38, 0xFE, 0x50, 0x00, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x08, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x82, 0x52, 0x3D, 0x00, 0x10, 0xB1, 0x87, 0x50, 0x00, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x7C, 0x40, 0x1F, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0xC0, 0x08, 0x50, 0x03, 0x08, 0x01, 0x26
+		}
+	},
+	{
+		166,
+		79,
+		0x39,
+		0x00D1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD1, 0x7C, 0x32, 0x0C, 0x50, 0xC1, 0x08, 0x50, 0x04, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0xC2, 0x08, 0x50, 0x05, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x06, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x10, 0x50, 0x00, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x01, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0xD6, 0xD1
+		}
+	},
+	{
+		167,
+		79,
+		0x39,
+		0x00D2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD2, 0x50, 0x08, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x02, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x09, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x03, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0A, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x04, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xAF, 0x84
+		}
+	},
+	{
+		168,
+		79,
+		0x39,
+		0x00D3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD3, 0xFE, 0x10, 0x50, 0x05, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0C, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x06, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0D, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x07, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0E, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x50, 0x07, 0x10, 0x06, 0x33
+		}
+	},
+	{
+		169,
+		79,
+		0x39,
+		0x00D4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD4, 0x08, 0x57, 0xA0, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x50, 0x0F, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFD, 0x50, 0x10, 0x08, 0x50, 0x12, 0x08, 0x50, 0x11, 0x08, 0x7C, 0x32, 0x52, 0x50, 0xA0, 0x08, 0x50, 0x02, 0x08, 0x50, 0x13, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x50, 0x04, 0x08, 0x50, 0x00, 0x08, 0x50, 0x15, 0x08, 0x7C, 0x62, 0xEC
+		}
+	},
+	{
+		170,
+		79,
+		0x39,
+		0x00D5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD5, 0x32, 0x52, 0x50, 0x00, 0x08, 0x50, 0x17, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x50, 0x00, 0x08, 0x50, 0x18, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x19, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x00, 0x08, 0x50, 0x1A, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x1B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x00, 0x08, 0x50, 0x1C, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x66, 0xF5
+		}
+	},
+	{
+		171,
+		79,
+		0x39,
+		0x00D6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD6, 0x03, 0x51, 0x9C, 0x08, 0x50, 0x1D, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x62, 0xD0, 0x03, 0x51, 0x9E, 0x08, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x08, 0x50, 0x1F, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x80, 0xC7, 0x3D, 0x00, 0x20, 0xB0, 0x03, 0x80, 0xC0, 0x3D, 0x00, 0x30, 0xB0, 0xBB, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x04, 0x08, 0x7D, 0x24
+		}
+	},
+	{
+		172,
+		79,
+		0x39,
+		0x00D7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD7, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x01, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x08, 0x50, 0x29, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x04, 0x08, 0x50, 0x2A, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x08, 0x08, 0x50, 0x2B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x08, 0x08, 0x50, 0x2C, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0xFF, 0x29
+		}
+	},
+	{
+		173,
+		79,
+		0x39,
+		0x00D8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD8, 0x48, 0x08, 0x50, 0x2D, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFB, 0x50, 0x1C, 0x08, 0x50, 0x2F, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x30, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x08, 0x08, 0x50, 0x31, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x08, 0x08, 0x50, 0x32, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x5A, 0x08, 0x50, 0x33, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x34, 0xD9, 0xDE
+		}
+	},
+	{
+		174,
+		79,
+		0x39,
+		0x00D9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD9, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x04, 0x08, 0x50, 0x35, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x0C, 0x08, 0x50, 0x36, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x05, 0x08, 0x50, 0x37, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x01, 0x08, 0x50, 0x38, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x7C, 0x2B, 0x1A, 0x7C, 0x31, 0x1F, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x32, 0x06, 0x7C, 0x20, 0x6D
+		}
+	},
+	{
+		175,
+		79,
+		0x39,
+		0x00DA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDA, 0x6F, 0x3C, 0x54, 0x00, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0xE8, 0x80, 0x03, 0x90, 0x4B, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0x01, 0x62, 0xD0, 0x00, 0x39, 0x01, 0xB0, 0x19, 0x7C, 0x31, 0x35, 0x62, 0xD4, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD1, 0x00, 0x62, 0xD3, 0x00, 0x62, 0xD0, 0x00, 0x62, 0xE3, 0x38, 0x50, 0x00, 0x00, 0x7C, 0x6F, 0x3C, 0x54, 0x01, 0x52, 0x01, 0x3B, 0x00, 0xA0, 0x17, 0xE4, 0xF6
+		}
+	},
+	{
+		176,
+		79,
+		0x39,
+		0x00DB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDB, 0x52, 0x01, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x1B, 0x9C, 0xEE, 0x52, 0x01, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x1F, 0x38, 0xFC, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x30, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9B, 0x47, 0x99, 0x02, 0xA0, 0x70, 0xFB, 0x25
+		}
+	},
+	{
+		177,
+		79,
+		0x39,
+		0x00DC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDC, 0x51, 0x99, 0x21, 0xFD, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x5E, 0x3D, 0x00, 0x10, 0xB0, 0x33, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x50, 0x1D, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9C, 0x50, 0x1F, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x9D, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0xA7, 0x46, 0xBC
+		}
+	},
+	{
+		178,
+		79,
+		0x39,
+		0x00DD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDD, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9E, 0x80, 0x27, 0x3D, 0x00, 0x20, 0xB0, 0x10, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x80, 0x13, 0x48, 0x00, 0x40, 0xA0, 0x0E, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xAA, 0x85
+		}
+	},
+	{
+		179,
+		79,
+		0x39,
+		0x00DE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDE, 0xD0, 0x00, 0x54, 0x00, 0x3D, 0x00, 0x01, 0xA0, 0x1F, 0x52, 0x00, 0x21, 0x70, 0x39, 0x30, 0xB0, 0x0E, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0xE7, 0x52, 0x00, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x80, 0xDE, 0x50, 0x29, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x9F, 0x50, 0x02, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x39, 0x81, 0xB0, 0x82, 0x36
+		}
+	},
+	{
+		180,
+		79,
+		0x39,
+		0x00DF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDF, 0xC4, 0x50, 0x2A, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0xA5, 0x50, 0x2B, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x15, 0x50, 0x2C, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0x16, 0x50, 0x2D, 0x08, 0x7C, 0x32, 0xF7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x00, 0x53, 0xA3, 0x18, 0x53, 0xA4, 0x50, 0x2F, 0x08, 0x7C, 0x28, 0x83
+		}
+	},
+	{
+		181,
+		79,
+		0x39,
+		0x00E0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE0, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x53, 0x42, 0x50, 0x30, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x04, 0x53, 0xB7, 0x50, 0x31, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA6, 0x50, 0x32, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0x17, 0x50, 0x36, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA7, 0x50, 0x37, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0x39, 0xA6
+		}
+	},
+	{
+		182,
+		79,
+		0x39,
+		0x00E1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE1, 0xD0, 0x00, 0x53, 0xA8, 0x50, 0x38, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA9, 0x10, 0x7C, 0x18, 0x83, 0x7C, 0x17, 0xB7, 0x20, 0x80, 0x04, 0x62, 0xE3, 0x38, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xBF, 0xF4, 0x7C, 0x71, 0x10, 0x7C, 0x49, 0x07, 0x62, 0xE3, 0x38, 0x52, 0x01, 0x71, 0x10, 0x60, 0xE0, 0x50, 0x01, 0x08, 0x50, 0x02, 0x08, 0x70, 0xCF, 0x7C, 0xFE, 0x31
+		}
+	},
+	{
+		183,
+		79,
+		0x39,
+		0x00E2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE2, 0x32, 0x0C, 0x38, 0xFE, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x3D, 0x00, 0x20, 0xA0, 0x06, 0x48, 0x00, 0x40, 0xA0, 0x26, 0x62, 0xD0, 0x04, 0x06, 0xB9, 0x40, 0x51, 0xB9, 0x29, 0x20, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x04, 0x62, 0xE3, 0x38, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0xA4, 0x7E
+		}
+	},
+	{
+		184,
+		79,
+		0x39,
+		0x00E3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE3, 0x00, 0x39, 0x00, 0xBF, 0xF4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x3D, 0x00, 0x20, 0xA0, 0x06, 0x48, 0x00, 0x40, 0xA0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x21, 0x80, 0x62, 0xD0, 0x04, 0x53, 0xB8, 0x38, 0xFF, 0xE6, 0x03
+		}
+	},
+	{
+		185,
+		79,
+		0x39,
+		0x00E4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE4, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x0A, 0x3D, 0xFC, 0x01, 0xB0, 0x19, 0x90, 0xC1, 0x80, 0x15, 0x3D, 0x00, 0x10, 0xB0, 0x05, 0x90, 0x63, 0x80, 0x0C, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0x21, 0x80, 0x03, 0x90, 0x56, 0x52, 0xFC, 0x08, 0x7C, 0x2B, 0x4C, 0x38, 0xFF, 0x38, 0xFF, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x62, 0xD0, 0x00, 0x67, 0x67, 0x67, 0x67, 0x15, 0x62
+		}
+	},
+	{
+		186,
+		79,
+		0x39,
+		0x00E5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE5, 0x67, 0x67, 0x21, 0x03, 0x7F, 0x50, 0x84, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x7C, 0x6F, 0x64, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x62, 0xE3, 0x38, 0x50, 0x01, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xA0, 0x10, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x39, 0x01, 0xAF, 0xDA, 0x7C, 0x6E, 0xE8, 0x7F, 0x10, 0x4F, 0xD5, 0xE3
+		}
+	},
+	{
+		187,
+		79,
+		0x39,
+		0x00E6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE6, 0x38, 0x02, 0x7C, 0x6F, 0x64, 0x56, 0x01, 0xFA, 0x56, 0x00, 0x00, 0x80, 0x36, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x80, 0x62, 0xD0, 0x04, 0x51, 0xB8, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x03, 0x80, 0x1C, 0x10, 0x57, 0x03, 0x50, 0xE3, 0x7C, 0x2B, 0xA8, 0x20, 0x62, 0xE3, 0x38, 0x7B, 0x01, 0x1F, 0xA4, 0x82
+		}
+	},
+	{
+		188,
+		79,
+		0x39,
+		0x00E7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE7, 0x00, 0x00, 0x3D, 0x00, 0x00, 0xBF, 0xC7, 0x3D, 0x01, 0x00, 0xBF, 0xC2, 0x7C, 0x6E, 0xE8, 0x38, 0xFE, 0x20, 0x7F, 0x7C, 0x6F, 0x64, 0x10, 0x57, 0x01, 0x50, 0xF4, 0x7C, 0x2B, 0xA8, 0x20, 0x7C, 0x6E, 0xE8, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x7F, 0x7C, 0x31, 0x77, 0x7F, 0x7C, 0x31, 0xC1, 0x7F, 0x43, 0x05, 0x08, 0x62, 0xD0, 0x00, 0x26, 0xB0, 0xFB, 0x51, 0xB0, 0x60, 0x00, 0x62, 0xDA, 0x4A, 0xCF
+		}
+	},
+	{
+		189,
+		79,
+		0x39,
+		0x00E8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE8, 0xEF, 0x43, 0xE0, 0x10, 0x7C, 0x31, 0x9D, 0x7F, 0x7C, 0x31, 0xB6, 0x7C, 0x73, 0x82, 0x41, 0x05, 0xF7, 0x62, 0xD0, 0x00, 0x51, 0xB0, 0x29, 0x04, 0x53, 0xB0, 0x51, 0xB0, 0x60, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xC2, 0x00, 0x62, 0xD0, 0x04, 0x09, 0x4E
+		}
+	},
+	{
+		190,
+		79,
+		0x39,
+		0x00E9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE9, 0x55, 0xC1, 0x00, 0x7C, 0x3A, 0x1F, 0x10, 0x7C, 0x49, 0x5B, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x21, 0xF0, 0x62, 0xD0, 0x04, 0x53, 0xC2, 0x7C, 0x6F, 0x2F, 0xA0, 0x06, 0x3D, 0x00, 0x30, 0xB0, 0x0B, 0x7C, 0x73, 0x26, 0x62, 0xD0, 0x00, 0x53, 0x39, 0x80, 0x07, 0x62, 0xD0, 0x00, 0x55, 0x39, 0x00, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3A, 0x4C, 0xD5
+		}
+	},
+	{
+		191,
+		79,
+		0x39,
+		0x00EA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEA, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3B, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3C, 0x7C, 0x3A, 0x21, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x54, 0x01, 0x10, 0x52, 0xFB, 0x7C, 0x49, 0x92, 0x20, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x3C, 0xC3, 0x00, 0xB0, 0x4F, 0x52, 0xFB, 0x3B, 0xFC, 0xA0, 0x49, 0x52, 0xFC, 0x3B, 0xFE, 0x3A
+		}
+	},
+	{
+		192,
+		79,
+		0x39,
+		0x00EB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEB, 0xFB, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x2F, 0x80, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x4F, 0x3D, 0xFB, 0x00, 0xB0, 0x16, 0x7C, 0x3A, 0x2C, 0x7C, 0x3A, 0x22, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xA0, 0x09, 0x7C, 0x3A, 0x22, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0C, 0x62, 0xD0, 0x04, 0x52, 0x01, 0x01, 0x01, 0x53, 0xCF, 0x63, 0x05
+		}
+	},
+	{
+		193,
+		79,
+		0x39,
+		0x00EC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEC, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x09, 0x52, 0xFB, 0x08, 0x7C, 0x3A, 0x28, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x08, 0x52, 0xFB, 0x08, 0x91, 0x86, 0x38, 0xFE, 0x39, 0x00, 0xA0, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0xFF, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0C, 0x62, 0xD0, 0x04, 0x52, 0x01, 0x01, 0x01, 0x53, 0x63, 0x06
+		}
+	},
+	{
+		194,
+		79,
+		0x39,
+		0x00ED,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xED, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x06, 0x56, 0x00, 0x01, 0x80, 0x04, 0x56, 0x00, 0x00, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x08, 0x50, 0x04, 0x08, 0x50, 0xC3, 0x08, 0x62, 0xD0, 0x00, 0x50, 0x0F, 0x08, 0x10, 0x7C, 0x2B, 0x40, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x2B, 0x90, 0x31, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x21, 0x41, 0xC3
+		}
+	},
+	{
+		195,
+		79,
+		0x39,
+		0x00EE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEE, 0x62, 0xD0, 0x04, 0x3C, 0xCF, 0x00, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0xFF, 0xA0, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x80, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x62, 0xD0, 0x04, 0x53, 0xC1, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x54, 0x00, 0x3C, 0xC3, 0x00, 0xA0, 0x0B, 0x7C, 0x6F, 0x3C, 0x62, 0xD0, 0x00, 0x39, 0x14, 0x6A
+		}
+	},
+	{
+		196,
+		79,
+		0x39,
+		0x00EF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEF, 0x30, 0xB0, 0x03, 0x80, 0xB1, 0x50, 0x10, 0x08, 0x50, 0x29, 0x08, 0x50, 0x28, 0x08, 0x90, 0xA9, 0x50, 0x20, 0x08, 0x50, 0x3F, 0x08, 0x50, 0x30, 0x08, 0x90, 0x9E, 0x38, 0xFA, 0x50, 0x40, 0x08, 0x50, 0x49, 0x08, 0x50, 0x48, 0x08, 0x90, 0x91, 0x50, 0x80, 0x08, 0x50, 0x9F, 0x08, 0x50, 0x90, 0x08, 0x90, 0x86, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x3B, 0x00, 0xA0, 0x6E, 0x3D, 0x00, 0x76, 0x2F
+		}
+	},
+	{
+		197,
+		79,
+		0x39,
+		0x00F0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF0, 0x28, 0xC0, 0x69, 0x50, 0x29, 0x3B, 0x00, 0xC0, 0x63, 0x62, 0xD0, 0x04, 0x51, 0xDF, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x65, 0xE9, 0x51, 0xE9, 0x01, 0x10, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xBF, 0x01, 0x01, 0x62, 0xD0, 0x04, 0x53, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x50, 0x10, 0x08, 0x50, 0x29, 0x67, 0x12
+		}
+	},
+	{
+		198,
+		79,
+		0x39,
+		0x00F1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF1, 0x08, 0x50, 0x28, 0x08, 0x90, 0x33, 0x50, 0x20, 0x08, 0x50, 0x3F, 0x08, 0x50, 0x30, 0x08, 0x90, 0x28, 0x38, 0xFA, 0x50, 0x40, 0x08, 0x50, 0x49, 0x08, 0x50, 0x48, 0x08, 0x90, 0x1B, 0x50, 0x80, 0x08, 0x50, 0x9F, 0x08, 0x50, 0x90, 0x08, 0x90, 0x10, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xBF, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0xF6, 0x31
+		}
+	},
+	{
+		199,
+		79,
+		0x39,
+		0x00F2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF2, 0x3B, 0xFC, 0xC0, 0x21, 0x62, 0xD0, 0x04, 0x52, 0xFB, 0x3A, 0xC3, 0xC0, 0x18, 0x62, 0xD0, 0x04, 0x51, 0xC2, 0x23, 0xFA, 0x39, 0x00, 0xB0, 0x0D, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0A, 0x50, 0x02, 0x3B, 0xFC, 0xD1, 0xA6, 0x7C, 0x73, 0x26, 0x54, 0x03, 0x56, 0x00, 0x00, 0x80, 0xCA, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x80, 0x46
+		}
+	},
+	{
+		200,
+		79,
+		0x39,
+		0x00F3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF3, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xDD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x02, 0x53, 0xE6, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE6, 0x50, 0x00, 0x3A, 0xE9, 0xB0, 0x07, 0x51, 0xE6, 0x3A, 0xC6, 0xD3
+		}
+	},
+	{
+		201,
+		79,
+		0x39,
+		0x00F4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF4, 0xE8, 0xA0, 0x03, 0x80, 0x84, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x06, 0x7C, 0x6E, 0xA3, 0x54, 0x04, 0x3E, 0xE8, 0x54, 0x05, 0x52, 0x02, 0x01, 0x04, 0x7C, 0x6E, 0xA3, 0x54, 0x06, 0x3E, 0xE8, 0x54, 0x07, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xF5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x04, 0x08, 0x52, 0x05, 0xBB, 0xBE
+		}
+	},
+	{
+		202,
+		79,
+		0x39,
+		0x00F5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF5, 0x08, 0x91, 0x1B, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x71, 0xD1, 0xC0, 0x31, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xA1, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x06, 0x08, 0x52, 0x07, 0x08, 0x90, 0xEB, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x71, 0xD1, 0xD0, 0x03, 0x80, 0xCA, 0xDD
+		}
+	},
+	{
+		203,
+		79,
+		0x39,
+		0x00F6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF6, 0x08, 0x77, 0x00, 0x7C, 0x72, 0x41, 0xCF, 0x33, 0x50, 0x04, 0x3B, 0xFC, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xC0, 0x04, 0x80, 0x08, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xC0, 0x7C, 0x72, 0x41, 0xA0, 0xAD, 0x56, 0x00, 0x00, 0x80, 0x89, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0x65, 0x14
+		}
+	},
+	{
+		204,
+		79,
+		0x39,
+		0x00F7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF7, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6F, 0xF6, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xDD, 0x0E, 0xE7, 0x02, 0x51, 0xE7, 0x60, 0xD5, 0x50, 0x00, 0x3F, 0xE6, 0x51, 0xE8, 0x3F, 0xE6, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xF5, 0x0E, 0xE7, 0xC3, 0xD1
+		}
+	},
+	{
+		205,
+		79,
+		0x39,
+		0x00F8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF8, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xA1, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x00, 0x7C, 0x72, 0x41, 0xCF, 0x74, 0x3D, 0xFB, 0x00, 0xB0, 0x09, 0x56, 0x09, 0x01, 0x56, 0x08, 0x00, 0x80, 0x53, 0xF2
+		}
+	},
+	{
+		206,
+		79,
+		0x39,
+		0x00F9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF9, 0x07, 0x56, 0x09, 0x00, 0x56, 0x08, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x09, 0x80, 0x0D, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xF6, 0x20, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x13, 0xFA, 0x52, 0xFB, 0x1B, 0xF9, 0xC0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x13, 0xFA, 0x53, 0xE8, 0x52, 0xFB, 0x1B, 0xF9, 0x53, 0xE9, 0x80, 0x10, 0x62, 0xD0, 0x00, 0x52, 0xFA, 0x95, 0x77
+		}
+	},
+	{
+		207,
+		79,
+		0x39,
+		0x00FA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFA, 0x13, 0xFC, 0x53, 0xE8, 0x52, 0xF9, 0x1B, 0xFB, 0x53, 0xE9, 0x20, 0x7F, 0x10, 0x4F, 0x7C, 0x72, 0x93, 0xB0, 0x22, 0x3D, 0xFC, 0x01, 0xB0, 0x32, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x08, 0x50, 0x0E, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x08, 0x50, 0x0F, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x80, 0x16, 0x7C, 0x6F, 0x3C, 0x39, 0x30, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0xA8, 0x9E
+		}
+	},
+	{
+		208,
+		79,
+		0x39,
+		0x00FB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFB, 0x08, 0x50, 0x24, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x3A, 0x2D, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xE3, 0x38, 0x7C, 0x2B, 0x06, 0x71, 0x01, 0x90, 0x67, 0x90, 0xC5, 0x80, 0x5D, 0x62, 0xD0, 0x00, 0x26, 0xAE, 0xFB, 0x51, 0xAE, 0x60, 0x00, 0x7C, 0x70, 0x4B, 0x51, 0xAE, 0x29, 0x04, 0x53, 0xAE, 0x51, 0xAE, 0x60, 0x00, 0x7C, 0x70, 0x4B, 0x26, 0x61, 0x11
+		}
+	},
+	{
+		209,
+		79,
+		0x39,
+		0x00FC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFC, 0xAF, 0xFD, 0x51, 0xAF, 0x60, 0x04, 0x7C, 0x70, 0x4B, 0x51, 0xAF, 0x29, 0x02, 0x7C, 0x6F, 0x20, 0x56, 0x01, 0x00, 0x80, 0x03, 0x77, 0x01, 0x3D, 0x01, 0x0A, 0xCF, 0xFA, 0x62, 0xE3, 0x38, 0x90, 0x9D, 0x62, 0xD0, 0x04, 0x3C, 0xC4, 0x00, 0xA0, 0x0C, 0x7C, 0x45, 0x53, 0x7C, 0x2C, 0x1E, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0x00, 0x08, 0x90, 0x9B, 0x52, 0x00, 0x08, 0x7C, 0x39, 0x02, 0x38, 0x9A, 0x84
+		}
+	},
+	{
+		210,
+		79,
+		0x39,
+		0x00FD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFD, 0xFE, 0x8F, 0xA3, 0x38, 0xFE, 0x20, 0x8F, 0xFF, 0x10, 0x4F, 0x38, 0x01, 0x10, 0x7C, 0x16, 0x34, 0x20, 0x10, 0x57, 0x13, 0x50, 0x88, 0x7C, 0x2B, 0xA8, 0x20, 0x62, 0xD0, 0x04, 0x55, 0xC4, 0x01, 0x62, 0xD0, 0x00, 0x55, 0xA7, 0x0C, 0x62, 0xD0, 0x00, 0x55, 0xA8, 0x05, 0x62, 0xD0, 0x00, 0x55, 0xA9, 0x01, 0x10, 0x7C, 0x17, 0xB7, 0x7C, 0x19, 0x82, 0x62, 0xD0, 0x00, 0x20, 0x39, 0x00, 0xA0, 0xEE, 0x2D
+		}
+	},
+	{
+		211,
+		79,
+		0x39,
+		0x00FE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFE, 0x1F, 0x71, 0x10, 0x5D, 0xE0, 0x54, 0x00, 0x41, 0xE0, 0xE7, 0x43, 0xE0, 0x18, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x7C, 0x49, 0x07, 0x62, 0xE3, 0x38, 0x52, 0x00, 0x7C, 0x72, 0xB2, 0x80, 0x06, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x38, 0xFF, 0x20, 0x7F, 0x7C, 0x33, 0x69, 0x7C, 0x40, 0x59, 0x7C, 0x45, 0x51, 0x7C, 0x2B, 0xCA, 0x7C, 0x3A, 0x31, 0x7C, 0x40, 0x10, 0x7C, 0x2B, 0x19, 0x7F, 0x7C, 0x40, 0x55, 0xFC
+		}
+	},
+	{
+		212,
+		79,
+		0x39,
+		0x00FF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFF, 0x98, 0x7C, 0x36, 0x78, 0x7C, 0x45, 0x52, 0x7C, 0x2B, 0xFD, 0x7C, 0x3A, 0x4C, 0x7C, 0x40, 0x1E, 0x7C, 0x2B, 0x23, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x08, 0x7C, 0x38, 0x88, 0x52, 0xFC, 0x08, 0x7C, 0x45, 0x4D, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x45, 0x76, 0x52, 0xFC, 0x08, 0x7C, 0x2E, 0x27, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x3E, 0x8C, 0x52, 0xFC, 0x08, 0x7C, 0x40, 0x28, 0x38, 0xFE, 0x1F, 0x91
+		}
+	},
+	{
+		213,
+		79,
+		0x39,
+		0x0100,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x00, 0x52, 0xFC, 0x08, 0x7C, 0x2B, 0x48, 0x52, 0xFC, 0x08, 0x7C, 0x38, 0xC9, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xC5, 0x04, 0x38, 0xFF, 0x20, 0x7F, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xC5, 0x62, 0xD0, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x00, 0x51, 0x54, 0x54, 0x01, 0x51, 0x53, 0x54, 0x00, 0x70, 0xFE, 0x10, 0x7C, 0x1E, 0xFE, 0x51
+		}
+	},
+	{
+		214,
+		79,
+		0x39,
+		0x0101,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x01, 0x1C, 0x62, 0xD0, 0x00, 0x20, 0x10, 0x52, 0x00, 0x08, 0x52, 0x01, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x62, 0xDA, 0xF7, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x7C, 0x1E, 0x1C, 0x20, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x55, 0x0D, 0x00, 0x55, 0x0C, 0x00, 0x7C, 0x72, 0x19, 0x92, 0x21, 0x7C, 0x73, 0x6D, 0x10, 0x7C, 0x1D, 0xCC, 0x7C, 0x20, 0x80, 0x7C, 0x20, 0x78, 0x20, 0x62, 0xD0, 0x00, 0x55, 0xFD, 0x50
+		}
+	},
+	{
+		215,
+		79,
+		0x39,
+		0x0102,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x02, 0xEF, 0x00, 0x55, 0xEE, 0x00, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x72, 0x93, 0xB0, 0xF9, 0x7C, 0x70, 0xE8, 0x54, 0x01, 0x51, 0x0C, 0x54, 0x00, 0x71, 0x01, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x02, 0xA0, 0x3F, 0x93, 0x58, 0x9F, 0x74, 0x7C, 0x70, 0xE8, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0xDD, 0x11
+		}
+	},
+	{
+		216,
+		79,
+		0x39,
+		0x0103,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x03, 0x00, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x71, 0x01, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x10, 0x7C, 0x18, 0x83, 0x20, 0x10, 0x57, 0x13, 0x50, 0x88, 0x7C, 0x2B, 0xA8, 0x7C, 0x10, 0x74, 0x20, 0x91, 0x3B, 0x7C, 0x6F, 0xEA, 0x81, 0x33, 0x62, 0xD0, 0x03, 0x51, 0x9A, 0x21, 0x0F, 0x54, 0x02, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0x0A, 0x6C
+		}
+	},
+	{
+		217,
+		79,
+		0x39,
+		0x0104,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x04, 0xF1, 0xB0, 0x45, 0x62, 0xD0, 0x03, 0x3C, 0x9C, 0x00, 0xA0, 0x03, 0x93, 0x67, 0x3D, 0x02, 0x00, 0xA0, 0x06, 0x7C, 0x6F, 0xEA, 0x80, 0x97, 0x7C, 0x73, 0x2E, 0xA0, 0x27, 0x62, 0xD0, 0x03, 0x52, 0x01, 0x12, 0xE5, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x03, 0x1A, 0xE4, 0x7C, 0x72, 0x49, 0x62, 0xD0, 0x03, 0x12, 0xDB, 0x7C, 0x70, 0xA1, 0x1A, 0xDA, 0xC0, 0x70, 0x91, 0x2A, 0x7B, 0x4F
+		}
+	},
+	{
+		218,
+		79,
+		0x39,
+		0x0105,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x05, 0x80, 0x6C, 0x7C, 0x6F, 0xEA, 0x80, 0x67, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF2, 0xB0, 0x1E, 0x3D, 0x02, 0x00, 0xB0, 0x06, 0x7C, 0x73, 0x2E, 0xB0, 0x08, 0x90, 0xCD, 0x7C, 0x6F, 0xEA, 0x80, 0x4E, 0x62, 0xD0, 0x03, 0x3C, 0x9D, 0x00, 0xA0, 0x46, 0x93, 0x0A, 0x80, 0x42, 0x9E, 0xBE, 0x7C, 0x70, 0xE8, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0x00, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x71, 0x01, 0x62, 0xD0, 0xC8, 0xEA
+		}
+	},
+	{
+		219,
+		79,
+		0x39,
+		0x0106,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x06, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x90, 0xFD, 0x90, 0x94, 0x7C, 0x6F, 0xEA, 0x80, 0x15, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF4, 0xA0, 0x0D, 0x10, 0x57, 0x00, 0x50, 0x01, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x73, 0x6D, 0x7C, 0x72, 0x93, 0xB0, 0x70, 0x7C, 0x70, 0xE8, 0x54, 0x01, 0x51, 0x0C, 0x54, 0x00, 0x71, 0x01, 0x62, 0xD0, 0x00, 0x44, 0xE3
+		}
+	},
+	{
+		220,
+		79,
+		0x39,
+		0x0107,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x07, 0x52, 0x01, 0x12, 0xEF, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x1A, 0xEE, 0x7C, 0x72, 0x49, 0x53, 0xE6, 0x51, 0xE9, 0x53, 0xE7, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x04, 0xCB, 0x62, 0xD0, 0x00, 0x51, 0xE7, 0x62, 0xD0, 0x03, 0x0C, 0xCA, 0x0E, 0xC9, 0x00, 0x0E, 0xC8, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x53, 0xEF, 0x52, 0x00, 0x53, 0xEE, 0x62, 0xD0, 0x03, 0x51, 0xCB, 0x39, 0xCE
+		}
+	},
+	{
+		221,
+		79,
+		0x39,
+		0x0108,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x08, 0x11, 0x30, 0x51, 0xCA, 0x19, 0x75, 0x51, 0xC9, 0x19, 0x00, 0x51, 0xC8, 0x19, 0x00, 0xC0, 0x12, 0x9E, 0x1A, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x68, 0x38, 0xFD, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF1, 0x62, 0xD0, 0x03, 0x51, 0x9C, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xD8, 0x00, 0x18, 0x53, 0xD9, 0x62, 0xD0, 0x03, 0xB7, 0xCB
+		}
+	},
+	{
+		222,
+		79,
+		0x39,
+		0x0109,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x09, 0x3C, 0x9C, 0x00, 0xA0, 0x14, 0x10, 0x62, 0xD0, 0x03, 0x51, 0xD8, 0x08, 0x51, 0xD9, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x70, 0xAA, 0x80, 0x0F, 0x10, 0x57, 0x00, 0x50, 0x01, 0x7C, 0x1D, 0xD4, 0x20, 0x70, 0xFE, 0x7C, 0x72, 0x19, 0x7C, 0x71, 0x54, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF2, 0x62, 0xD0, 0x03, 0x51, 0xD7, 0x08, 0x51, 0xD6, 0x62, 0xD0, 0x03, 0x53, 0xD8, 0x18, 0x53, 0xD9, 0xF0, 0x3E
+		}
+	},
+	{
+		223,
+		79,
+		0x39,
+		0x010A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0A, 0x10, 0x51, 0xD8, 0x08, 0x51, 0xD9, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x70, 0xAA, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x03, 0x50, 0x78, 0x3A, 0x9D, 0xD0, 0x07, 0x62, 0xD0, 0x03, 0x55, 0x9D, 0x78, 0x7C, 0x71, 0xD8, 0x53, 0xE9, 0x65, 0xE9, 0x7C, 0x71, 0xD8, 0x64, 0x64, 0x64, 0x02, 0xE9, 0x54, 0x00, 0x80, 0x09, 0x62, 0xD0, 0x03, 0x76, 0x9D, 0x07, 0x00, 0x0A, 0x62, 0xD0, 0x03, 0xA5, 0xA9
+		}
+	},
+	{
+		224,
+		79,
+		0x39,
+		0x010B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0B, 0x3C, 0x9D, 0x1A, 0xD0, 0x0A, 0x62, 0xD0, 0x03, 0x52, 0x00, 0x3A, 0x9C, 0xCF, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xD6, 0x00, 0x18, 0x53, 0xD7, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x7C, 0x70, 0x0E, 0x7C, 0x6D, 0x9C, 0x62, 0xD0, 0x03, 0x51, 0xD7, 0x62, 0xD0, 0x00, 0x04, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xD6, 0x62, 0xD0, 0x00, 0x0C, 0xE9, 0x7C, 0x70, 0x17, 0x08, 0x59, 0x12
+		}
+	},
+	{
+		225,
+		79,
+		0x39,
+		0x010C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0C, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xD6, 0x18, 0x53, 0xD7, 0x62, 0xD0, 0x03, 0x51, 0x9E, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xDA, 0x00, 0x18, 0x53, 0xDB, 0x51, 0xDB, 0x08, 0x51, 0xDA, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x18, 0x53, 0xE8, 0x7C, 0x6D, 0x9C, 0x62, 0xD0, 0x03, 0x51, 0xDB, 0x62, 0xD0, 0x00, 0x04, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xDA, 0x62, 0xD0, 0x00, 0x0C, 0xE9, 0x7C, 0x70, 0x17, 0x5E, 0x1D
+		}
+	},
+	{
+		226,
+		79,
+		0x39,
+		0x010D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0D, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xDA, 0x18, 0x53, 0xDB, 0x38, 0xFF, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x21, 0x0D, 0x60, 0x00, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x21, 0xBB, 0x60, 0x04, 0x62, 0xD0, 0x00, 0x51, 0xB0, 0x21, 0x74, 0x7C, 0x73, 0x1E, 0x21, 0x00, 0x7C, 0x73, 0x16, 0x21, 0x10, 0x60, 0x10, 0x71, 0x10, 0x5D, 0x00, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xCA, 0x71, 0x6E, 0x3E
+		}
+	},
+	{
+		227,
+		79,
+		0x39,
+		0x010E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0E, 0x10, 0x5D, 0x04, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC9, 0x71, 0x10, 0x5D, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC8, 0x71, 0x10, 0x5D, 0x0C, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC7, 0x71, 0x10, 0x5D, 0x10, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC6, 0x71, 0x10, 0x43, 0x00, 0xF2, 0x43, 0x04, 0x44, 0x43, 0x08, 0x8B, 0x43, 0x0C, 0xFF, 0x43, 0x10, 0xEF, 0x70, 0xCF, 0x7F, 0x62, 0x34, 0xCB
+		}
+	},
+	{
+		228,
+		79,
+		0x39,
+		0x010F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0F, 0xD0, 0x04, 0x51, 0xCA, 0x71, 0x10, 0x60, 0x00, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC9, 0x71, 0x10, 0x60, 0x04, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC8, 0x71, 0x10, 0x60, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC7, 0x71, 0x10, 0x60, 0x0C, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC6, 0x71, 0x10, 0x60, 0x10, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x60, 0x00, 0x62, 0xD0, 0x00, 0xB4, 0xCC
+		}
+	},
+	{
+		229,
+		79,
+		0x39,
+		0x0110,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x10, 0x7C, 0x72, 0x51, 0x51, 0xB0, 0x7C, 0x73, 0x1E, 0x7C, 0x73, 0x16, 0x60, 0x10, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x71, 0x10, 0x43, 0xEC, 0x01, 0x70, 0xFE, 0x70, 0xCF, 0x9F, 0x32, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xB0, 0x3A, 0x7C, 0x72, 0xCB, 0x10, 0x70, 0xCF, 0x7C, 0x1D, 0xD0, 0x7C, 0x20, 0x7C, 0x20, 0x62, 0xDB, 0xFE, 0x7C, 0x39, 0xF1, 0x71, 0x10, 0x43, 0xD7, 0x20, 0x43, 0x58, 0x15
+		}
+	},
+	{
+		230,
+		79,
+		0x39,
+		0x0111,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x11, 0xC9, 0x80, 0x70, 0xCF, 0x43, 0xFF, 0x08, 0x71, 0x10, 0x41, 0xC9, 0x7F, 0x41, 0xD7, 0xDF, 0x40, 0x7C, 0x73, 0x7B, 0x70, 0xCF, 0x7C, 0x3A, 0x08, 0x10, 0x7C, 0x20, 0x78, 0x7C, 0x1D, 0xCC, 0x20, 0x62, 0xD0, 0x00, 0x55, 0xBB, 0xFF, 0x62, 0xD0, 0x03, 0x26, 0x99, 0xFD, 0x9F, 0x51, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x56, 0x00, 0x00, 0x71, 0x10, 0x41, 0xEC, 0xFE, 0x27, 0xB4
+		}
+	},
+	{
+		231,
+		79,
+		0x39,
+		0x0112,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x12, 0x70, 0xFE, 0x70, 0xCF, 0x9E, 0xC9, 0x7C, 0x71, 0x10, 0x80, 0x6B, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x7C, 0x31, 0xC5, 0x39, 0x00, 0xB0, 0x5A, 0x3D, 0x00, 0x00, 0xB0, 0x55, 0x62, 0xD0, 0x00, 0x3C, 0xBB, 0xFF, 0xA0, 0x4D, 0x7C, 0x39, 0xE9, 0x62, 0xD0, 0x03, 0x51, 0xD9, 0x11, 0x02, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xD8, 0x19, 0x00, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x50, 0x07
+		}
+	},
+	{
+		232,
+		79,
+		0x39,
+		0x0113,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x13, 0x62, 0xD0, 0x00, 0x51, 0xBB, 0x62, 0xD0, 0x00, 0x7C, 0x70, 0xBF, 0xC0, 0x22, 0x7C, 0x72, 0xCB, 0x56, 0x00, 0x01, 0x10, 0x70, 0xCF, 0x7C, 0x20, 0x7C, 0x20, 0x62, 0xDB, 0xFE, 0x10, 0x7C, 0x0C, 0x80, 0x20, 0x71, 0x10, 0x7C, 0x73, 0x7B, 0x10, 0x70, 0xCF, 0x7C, 0x20, 0x78, 0x20, 0x7C, 0x39, 0xED, 0x71, 0x01, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x62, 0xD0, 0x03, 0x12, 0xD5, 0x62, 0x05, 0x72
+		}
+	},
+	{
+		233,
+		79,
+		0x39,
+		0x0114,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x14, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x00, 0x51, 0x0C, 0x62, 0xD0, 0x03, 0x1A, 0xD4, 0x7C, 0x72, 0x49, 0x62, 0xD0, 0x03, 0x12, 0xD9, 0x7C, 0x70, 0xA1, 0x1A, 0xD8, 0xCF, 0x6F, 0x52, 0x01, 0x7C, 0x72, 0xB2, 0x9E, 0x9B, 0x7C, 0x71, 0x54, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF2, 0xB0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x34, 0xD1
+		}
+	},
+	{
+		234,
+		79,
+		0x39,
+		0x0115,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x15, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x7F, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x10, 0x7C, 0x18, 0x83, 0x7C, 0x19, 0x64, 0x20, 0x56, 0x00, 0x01, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x34, 0x38, 0xFF, 0x10, 0x7C, 0x18, 0x4D, 0x20, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x7C, 0x6F, 0x3C, 0x54, 0x01, 0x3D, 0x3A, 0xDE
+		}
+	},
+	{
+		235,
+		79,
+		0x39,
+		0x0116,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x16, 0x01, 0x50, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x85, 0x3D, 0x01, 0x40, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0x92, 0x8F
+		}
+	},
+	{
+		236,
+		79,
+		0x39,
+		0x0117,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x17, 0xE9, 0x05, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x58, 0x3D, 0x01, 0x70, 0xB0, 0x61, 0x7C, 0x39, 0x34, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x01, 0x3C, 0xE9, 0x01, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x05, 0x7C, 0xC7, 0xFA
+		}
+	},
+	{
+		237,
+		79,
+		0x39,
+		0x0118,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x18, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x1B, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x08, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x80, 0xF3, 0x3D, 0x01, 0xF5, 0x57
+		}
+	},
+	{
+		238,
+		79,
+		0x39,
+		0x0119,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x19, 0x60, 0xB0, 0x95, 0x7C, 0x39, 0x34, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x01, 0x3C, 0xE9, 0x01, 0xB0, 0x5D, 0x50, 0x1B, 0x08, 0x50, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x09, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x00, 0x01, 0x08, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0xF8, 0x5E
+		}
+	},
+	{
+		239,
+		79,
+		0x39,
+		0x011A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1A, 0x00, 0x1B, 0xCF, 0xE0, 0x56, 0x02, 0x23, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x25, 0x0E, 0xE9, 0x09, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x02, 0x03, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x1B, 0xCF, 0xE0, 0x80, 0x82, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x0A, 0xCB, 0x05
+		}
+	},
+	{
+		240,
+		79,
+		0x39,
+		0x011B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1B, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x80, 0x5A, 0x3D, 0x01, 0x30, 0xB0, 0x55, 0x62, 0xD0, 0x03, 0x3C, 0x9F, 0x99, 0xD0, 0x4D, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x08, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x26, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0xD0, 0x10
+		}
+	},
+	{
+		241,
+		79,
+		0x39,
+		0x011C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1C, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x27, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x05, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x28, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x56, 0x00, 0x00, 0x80, 0x5A, 0x62, 0xD0, 0x00, 0x3B, 0xE7
+		}
+	},
+	{
+		242,
+		79,
+		0x39,
+		0x011D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1D, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE6, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x01, 0x3E, 0xE8, 0x54, 0x02, 0x7C, 0x70, 0x01, 0x7C, 0x70, 0xD4, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xB8, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE4, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x01, 0x3E, 0xE8, 0x54, 0x02, 0x7C, 0x70, 0xCA, 0x06
+		}
+	},
+	{
+		243,
+		79,
+		0x39,
+		0x011E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1E, 0xD4, 0x7C, 0x70, 0x01, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xB4, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xA3, 0x52, 0xFC, 0x08, 0x7C, 0x5F, 0x91, 0x38, 0xFF, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x60, 0xD5, 0x50, 0x04, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x01, 0x7C, 0x71, 0x76, 0x50, 0x01, 0x3F, 0x32, 0xD7
+		}
+	},
+	{
+		244,
+		79,
+		0x39,
+		0x011F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1F, 0xE8, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x7C, 0x6D, 0xE3, 0x47, 0xE9, 0x80, 0xBF, 0xF4, 0x52, 0xFC, 0x53, 0xE8, 0x52, 0xFB, 0x60, 0xD4, 0x3E, 0xE8, 0x39, 0x04, 0xB0, 0x73, 0x56, 0x00, 0x00, 0x80, 0x3F, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x52, 0xFC, 0x01, 0x02, 0x53, 0xE6, 0x52, 0xFB, 0x09, 0x00, 0x53, 0xE7, 0x51, 0xE8, 0x02, 0x69, 0x46
+		}
+	},
+	{
+		245,
+		79,
+		0x39,
+		0x0120,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x20, 0xE6, 0x53, 0xE6, 0x51, 0xE9, 0x0A, 0xE7, 0x53, 0xE7, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x51, 0xE9, 0x60, 0xD4, 0x51, 0xE7, 0x60, 0xD5, 0x10, 0x57, 0x08, 0x62, 0xD0, 0x00, 0x3E, 0xE8, 0x3F, 0xE6, 0x79, 0xBF, 0xF7, 0x20, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xBE, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x01, 0x22, 0x7C, 0x71, 0x76, 0x52, 0xF9, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x23, 0x7C, 0x71, 0xE7, 0x43
+		}
+	},
+	{
+		246,
+		79,
+		0x39,
+		0x0121,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x21, 0x76, 0x52, 0xFA, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x24, 0x7C, 0x71, 0x76, 0x62, 0xD0, 0x00, 0x51, 0xA2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x60, 0xD5, 0x50, 0x84, 0x3F, 0xE8, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x03, 0x55, 0xAC, 0x19, 0x55, 0xAB, 0x00, 0x55, 0xAA, 0x02, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xDD, 0x30
+		}
+	},
+	{
+		247,
+		79,
+		0x39,
+		0x0122,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x22, 0xE9, 0x0A, 0x7C, 0x6D, 0xAD, 0x7C, 0x6D, 0xC6, 0x51, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x20, 0x62, 0xD0, 0x03, 0x50, 0x00, 0x01, 0x80, 0x53, 0xAB, 0x50, 0x02, 0x09, 0x00, 0x53, 0xAA, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xE9, 0x0A, 0x7C, 0x6D, 0xC6, 0x7C, 0x6D, 0xAD, 0x7C, 0x73, 0x3C, 0x51, 0xE8, 0x62, 0xD0, 0xBC, 0xEF
+		}
+	},
+	{
+		248,
+		79,
+		0x39,
+		0x0123,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x23, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xAB, 0xA0, 0x55, 0xAA, 0x01, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xE9, 0x0A, 0x7C, 0x6D, 0xC6, 0x7C, 0x6D, 0xAD, 0x16, 0xE8, 0x02, 0x1E, 0xE9, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x62, 0xDB, 0x2E
+		}
+	},
+	{
+		249,
+		79,
+		0x39,
+		0x0124,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x24, 0xD0, 0x00, 0x20, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x50, 0x99, 0x7C, 0x12, 0x90, 0x20, 0x9F, 0x59, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x7F, 0x10, 0x4F, 0x10, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x20, 0x7C, 0x13, 0xEA, 0x20, 0x9F, 0x44, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x10, 0x52, 0xFC, 0x7C, 0x13, 0xFB, 0x20, 0x9F, 0x32, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x86, 0x85
+		}
+	},
+	{
+		250,
+		79,
+		0x39,
+		0x0125,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x25, 0x10, 0x52, 0xFC, 0x7C, 0x14, 0x0D, 0x20, 0x9F, 0x20, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x53, 0xA9, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0x39, 0x08, 0x55, 0x3A, 0x08, 0x55, 0x3B, 0x08, 0x55, 0x3C, 0x08, 0x55, 0x01, 0x00, 0x55, 0x00, 0x00, 0x55, 0x03, 0x00, 0x55, 0x02, 0x00, 0x55, 0x05, 0x00, 0x55, 0x04, 0x00, 0x55, 0x07, 0x00, 0x55, 0x71, 0x5C
+		}
+	},
+	{
+		251,
+		79,
+		0x39,
+		0x0126,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x26, 0x06, 0x00, 0x55, 0x55, 0x20, 0x55, 0x54, 0x01, 0x55, 0x53, 0x00, 0x43, 0xE6, 0x01, 0x43, 0xE0, 0x08, 0x7F, 0x49, 0xE0, 0x08, 0xB0, 0x05, 0x50, 0x00, 0x80, 0x07, 0x08, 0x7C, 0x54, 0x59, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xA4, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xB5, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xE2, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xF3, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x84, 0x83
+		}
+	},
+	{
+		252,
+		79,
+		0x39,
+		0x0127,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x27, 0x55, 0x04, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x55, 0x15, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x5B, 0xE0, 0x38, 0xFF, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x50, 0x00, 0x3D, 0xF9, 0x80, 0xC0, 0x06, 0x7C, 0x4A, 0x8B, 0x50, 0xC0, 0x3D, 0xF5, 0x80, 0xC0, 0x0C, 0x10, 0x4B, 0x11, 0x04, 0x4B, 0x7C, 0x4A, 0x8B, 0x31, 0x80, 0x20, 0x08, 0x7C, 0x4A, 0x28, 0x18, 0x6A, 0xD0, 0x04, 0x7C, 0x4A, 0x8B, 0x6A, 0xD0, 0x1F, 0xBA
+		}
+	},
+	{
+		253,
+		79,
+		0x39,
+		0x0128,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x28, 0x08, 0x4B, 0x11, 0x04, 0x4B, 0x7C, 0x4A, 0x8B, 0x38, 0xFF, 0x20, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x10, 0x4F, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x7C, 0x4A, 0xA3, 0x51, 0xE1, 0x54, 0xFB, 0x18, 0x60, 0xD0, 0x20, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xE9, 0x08, 0x50, 0x00, 0x53, 0xE9, 0x53, 0xE1, 0x53, 0xE0, 0x53, 0xDF, 0x56, 0x00, 0x20, 0x66, 0xFC, 0xD7, 0x2B
+		}
+	},
+	{
+		254,
+		79,
+		0x39,
+		0x0129,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x29, 0x6C, 0xFB, 0x6C, 0xFA, 0x6C, 0xF9, 0x6B, 0xDF, 0x6B, 0xE0, 0x6B, 0xE1, 0x6B, 0xE9, 0x51, 0xDF, 0x1B, 0xF8, 0x51, 0xE0, 0x1B, 0xF7, 0x51, 0xE1, 0x1B, 0xF6, 0x51, 0xE9, 0x1B, 0xF5, 0xC0, 0x11, 0x53, 0xE9, 0x52, 0xF8, 0x14, 0xDF, 0x52, 0xF7, 0x1C, 0xE0, 0x52, 0xF6, 0x1C, 0xE1, 0x77, 0xFC, 0x7B, 0x00, 0xBF, 0xCB, 0x51, 0xDF, 0x54, 0xF8, 0x51, 0xE0, 0x54, 0xF7, 0x51, 0xE1, 0x54, 0xF6, 0x3A, 0xF2
+		}
+	},
+	{
+		255,
+		79,
+		0x39,
+		0x012A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2A, 0x51, 0xE9, 0x54, 0xF5, 0x18, 0x53, 0xE9, 0x18, 0x60, 0xD0, 0x7F, 0x37, 0xFC, 0xFF, 0x77, 0xFC, 0x37, 0xFB, 0xFF, 0x0F, 0xFB, 0x00, 0x37, 0xFA, 0xFF, 0x0F, 0xFA, 0x00, 0x37, 0xF9, 0xFF, 0x0F, 0xF9, 0x00, 0x7F, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x08, 0x66, 0xFC, 0x6B, 0xE1, 0x51, 0xE1, 0x1B, 0xFB, 0xC0, 0x05, 0x53, 0xE1, 0x77, 0xFC, 0x7A, 0xE0, 0xBF, 0xEF, 0x7F, 0x08, 0x10, 0x4F, 0x50, 0x80, 0x7F
+		}
+	},
+	{
+		256,
+		79,
+		0x39,
+		0x012B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2B, 0x00, 0x6F, 0xFF, 0xD0, 0x03, 0x03, 0xFE, 0x66, 0xFE, 0xBF, 0xF7, 0x38, 0xFE, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x7C, 0x6E, 0x82, 0x7C, 0x6E, 0xC0, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xFE, 0x18, 0x53, 0xFF, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xFC, 0x18, 0x53, 0xFD, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0x7E, 0x7C
+		}
+	},
+	{
+		257,
+		79,
+		0x39,
+		0x012C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2C, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xFA, 0x18, 0x53, 0xFB, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xF8, 0x18, 0x53, 0xF9, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x02, 0xEF, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x0A, 0xEE, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA3, 0x18, 0x23, 0xC7
+		}
+	},
+	{
+		258,
+		79,
+		0x39,
+		0x012D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2D, 0x53, 0xA4, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x02, 0xED, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x0A, 0xEC, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x7C, 0x71, 0xC3, 0x7F, 0x10, 0x4F, 0x52, 0xFA, 0x13, 0xFC, 0x52, 0xF9, 0x1B, 0xFB, 0xD0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x13, 0xFA, 0x53, 0xE8, 0x52, 0xFB, 0x1B, 0xF9, 0x53, 0xE9, 0x80, 0x10, 0x62, 0xD0, 0xB7, 0xF0
+		}
+	},
+	{
+		259,
+		79,
+		0x39,
+		0x012E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2E, 0x00, 0x52, 0xFA, 0x13, 0xFC, 0x53, 0xE8, 0x52, 0xF9, 0x1B, 0xFB, 0x53, 0xE9, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x41, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9F, 0xB4, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEE, 0x8C, 0x9B
+		}
+	},
+	{
+		260,
+		79,
+		0x39,
+		0x012F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2F, 0x08, 0x51, 0xEF, 0x08, 0x9F, 0x9B, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x04, 0x04, 0xA0, 0x7C, 0x71, 0x7F, 0x0C, 0x9F, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x9F, 0x78, 0x38, 0xF8, 0x7C, 0x6F, 0x76, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x6A, 0x58
+		}
+	},
+	{
+		261,
+		79,
+		0x39,
+		0x0130,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x30, 0x9F, 0x5F, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x04, 0x04, 0x9E, 0x7C, 0x71, 0x7F, 0x0C, 0x9D, 0x7F, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x7C, 0x71, 0x22, 0x51, 0xAE, 0x62, 0xD0, 0x03, 0x12, 0xDD, 0x62, 0xD0, 0x04, 0x53, 0xA6, 0x62, 0xD0, 0x04, 0x51, 0xAD, 0x62, 0xD0, 0x03, 0x1A, 0xDC, 0x62, 0xD0, 0x04, 0x53, 0xA5, 0x62, 0xD0, 0x04, 0x51, 0x3D, 0xFF
+		}
+	},
+	{
+		262,
+		79,
+		0x39,
+		0x0131,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x31, 0xAE, 0x08, 0x51, 0xAD, 0x62, 0xD0, 0x03, 0x53, 0xDC, 0x18, 0x53, 0xDD, 0x62, 0xD0, 0x04, 0x51, 0xA6, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x04, 0x51, 0xA5, 0x62, 0xD0, 0x00, 0x53, 0xE7, 0x62, 0xD0, 0x04, 0x51, 0xA0, 0x08, 0x62, 0xD0, 0x00, 0x18, 0x53, 0xE5, 0x55, 0xE4, 0x00, 0x65, 0xE5, 0x65, 0xE4, 0x6B, 0xE5, 0x51, 0xE4, 0x53, 0xE2, 0x51, 0xE5, 0x53, 0xE3, 0x50, 0x00, 0x08, 0x8B, 0x9C
+		}
+	},
+	{
+		263,
+		79,
+		0x39,
+		0x0132,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x32, 0x08, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x08, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE3, 0x08, 0x51, 0xE2, 0x08, 0x7C, 0x49, 0xD3, 0x18, 0x53, 0xE6, 0x18, 0x53, 0xE7, 0x18, 0x18, 0x38, 0xFC, 0x51, 0xE6, 0x53, 0xE8, 0x51, 0xE7, 0x53, 0xE9, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x9B, 0x18, 0x53, 0x9C, 0x62, 0xD0, 0x04, 0x51, 0xA6, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x04, 0x57, 0x35
+		}
+	},
+	{
+		264,
+		79,
+		0x39,
+		0x0133,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x33, 0x51, 0xA5, 0x62, 0xD0, 0x00, 0x53, 0xE7, 0x62, 0xD0, 0x04, 0x51, 0x9E, 0x08, 0x62, 0xD0, 0x00, 0x18, 0x53, 0xE5, 0x55, 0xE4, 0x00, 0x65, 0xE5, 0x65, 0xE4, 0x6B, 0xE5, 0x51, 0xE4, 0x53, 0xE2, 0x51, 0xE5, 0x53, 0xE3, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x08, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE3, 0x08, 0x51, 0xE2, 0x08, 0x7C, 0x49, 0xD3, 0x18, 0x53, 0xE6, 0x18, 0x53, 0x0D, 0xA2
+		}
+	},
+	{
+		265,
+		79,
+		0x39,
+		0x0134,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x34, 0xE7, 0x18, 0x18, 0x38, 0xFC, 0x51, 0xE6, 0x53, 0xE8, 0x51, 0xE7, 0x53, 0xE9, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x99, 0x18, 0x53, 0x9A, 0x7C, 0x6E, 0x82, 0x7F, 0x10, 0x4F, 0x38, 0x08, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x00, 0x52, 0xFC, 0x03, 0xFA, 0x54, 0x01, 0x52, 0xFB, 0x0B, 0xF9, 0x54, 0x00, 0x52, 0xF8, 0x03, 0xF6, 0x54, 0x03, 0x0D, 0xA3
+		}
+	},
+	{
+		266,
+		79,
+		0x39,
+		0x0135,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x35, 0x52, 0xF7, 0x0B, 0xF5, 0x54, 0x02, 0x52, 0xFC, 0x03, 0xF6, 0x54, 0x05, 0x52, 0xFB, 0x0B, 0xF5, 0x54, 0x04, 0x52, 0xFA, 0x03, 0xF8, 0x54, 0x07, 0x52, 0xF9, 0x0B, 0xF7, 0x54, 0x06, 0x52, 0xFC, 0x13, 0xF8, 0x52, 0xFB, 0x1B, 0xF7, 0xC0, 0x43, 0x7C, 0x72, 0xD4, 0xD0, 0x1E, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x01, 0x52, 0x01, 0x13, 0x03, 0x52, 0x00, 0x1B, 0x02, 0xD0, 0x06, 0x7C, 0x73, 0x66, 0x05, 0x94
+		}
+	},
+	{
+		267,
+		79,
+		0x39,
+		0x0136,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x36, 0x80, 0x69, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x02, 0x80, 0x61, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x02, 0x52, 0x07, 0x13, 0x05, 0x52, 0x06, 0x1B, 0x04, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x02, 0x80, 0x49, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x03, 0x80, 0x41, 0x7C, 0x72, 0xD4, 0xC0, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x03, 0x52, 0x03, 0x13, 0x01, 0x52, 0x02, 0x1B, 0x00, 0xD0, 0x09, 0x62, 0xE1, 0x4D
+		}
+	},
+	{
+		268,
+		79,
+		0x39,
+		0x0137,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x37, 0xD0, 0x04, 0x55, 0xDD, 0x03, 0x80, 0x24, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x04, 0x80, 0x1C, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x04, 0x52, 0x05, 0x13, 0x07, 0x52, 0x04, 0x1B, 0x06, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x04, 0x80, 0x04, 0x7C, 0x73, 0x66, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x01, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF3, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xFA, 0x02, 0xE8, 0x77, 0x7A
+		}
+	},
+	{
+		269,
+		79,
+		0x39,
+		0x0138,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x38, 0x53, 0xE8, 0x52, 0xF9, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xF6, 0x51, 0xE9, 0x1B, 0xF5, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x01, 0x80, 0x97, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x02, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF4, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xF8, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xF7, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xFC, 0x51, 0xE9, 0x1B, 0xFB, 0x37, 0xFB
+		}
+	},
+	{
+		270,
+		79,
+		0x39,
+		0x0139,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x39, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x02, 0x80, 0x67, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x03, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF3, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xF6, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xF5, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xFA, 0x51, 0xE9, 0x1B, 0xF9, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x03, 0x80, 0x37, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x04, 0x10, 0xAE
+		}
+	},
+	{
+		271,
+		79,
+		0x39,
+		0x013A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3A, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF4, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xFC, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xFB, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xF8, 0x51, 0xE9, 0x1B, 0xF7, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x04, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x00, 0x62, 0xD0, 0x04, 0x3C, 0xDE, 0x04, 0xB0, 0x15, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x01, 0xB0, 0x0D, 0xA9, 0xE1
+		}
+	},
+	{
+		272,
+		79,
+		0x39,
+		0x013B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3B, 0x62, 0xD0, 0x04, 0x51, 0xDE, 0x01, 0x03, 0x62, 0xD0, 0x00, 0x80, 0x15, 0x62, 0xD0, 0x04, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x02, 0xDD, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x16, 0xE9, 0x02, 0x51, 0xE9, 0x38, 0xF8, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x09, 0x52, 0xF7, 0x3B, 0xFB, 0xB0, 0x1B, 0x52, 0xF8, 0x3B, 0xFC, 0xB0, 0x15, 0x52, 0xF5, 0x3B, 0xF9, 0xB0, 0x0F, 0x52, 0xF6, 0x3B, 0xFA, 0xB0, 0x09, 0xC2, 0x14
+		}
+	},
+	{
+		273,
+		79,
+		0x39,
+		0x013C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3C, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0xFE, 0x81, 0x17, 0x52, 0xF7, 0x3B, 0xFB, 0xB0, 0x07, 0x52, 0xF8, 0x3B, 0xFC, 0xA0, 0x0D, 0x52, 0xF5, 0x3B, 0xF9, 0xB0, 0x4E, 0x52, 0xF6, 0x3B, 0xFA, 0xB0, 0x48, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x00, 0xA0, 0x06, 0x3C, 0xCE, 0x07, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x00, 0x80, 0xEA, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x01, 0xA0, 0x06, 0x3C, 0xCE, 0x02, 0xC9, 0x23
+		}
+	},
+	{
+		274,
+		79,
+		0x39,
+		0x013D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3D, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x02, 0x80, 0xD5, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x03, 0xA0, 0x06, 0x3C, 0xCE, 0x04, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x04, 0x80, 0xC0, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x06, 0x80, 0xB8, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x08, 0x52, 0xF7, 0x08, 0x52, 0xF8, 0x08, 0x9B, 0xEC, 0x7C, 0x72, 0x25, 0x52, 0xF9, 0x08, 0x52, 0xFA, 0x08, 0x52, 0xF5, 0x56, 0x3E
+		}
+	},
+	{
+		275,
+		79,
+		0x39,
+		0x013E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3E, 0x08, 0x52, 0xF6, 0x08, 0x9B, 0xDB, 0x38, 0xF8, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x03, 0x51, 0xE9, 0x54, 0x02, 0x52, 0x01, 0x13, 0x03, 0x52, 0x00, 0x1B, 0x02, 0xD0, 0x23, 0x7C, 0x71, 0x2E, 0x7C, 0x70, 0x17, 0x7C, 0x73, 0x4A, 0xD0, 0x09, 0x56, 0x06, 0x01, 0x56, 0x05, 0x00, 0x80, 0x07, 0x56, 0x06, 0x00, 0x56, 0x05, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x54, 0x04, 0x80, 0x2C, 0x62, 0x31, 0xF5
+		}
+	},
+	{
+		276,
+		79,
+		0x39,
+		0x013F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3F, 0xD0, 0x00, 0x52, 0x03, 0x53, 0xE8, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x70, 0x17, 0x13, 0x01, 0x51, 0xE9, 0x1B, 0x00, 0xD0, 0x09, 0x56, 0x08, 0x01, 0x56, 0x07, 0x00, 0x80, 0x07, 0x56, 0x08, 0x00, 0x56, 0x07, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x08, 0x54, 0x04, 0x62, 0xD0, 0x04, 0x47, 0xCE, 0x01, 0xB0, 0x1B, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x31, 0x01, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xCE, 0x79, 0x86
+		}
+	},
+	{
+		277,
+		79,
+		0x39,
+		0x0140,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x40, 0x62, 0xD0, 0x00, 0x02, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xDF, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xCE, 0x03, 0x04, 0x62, 0xD0, 0x04, 0x53, 0xDF, 0x62, 0xD0, 0x04, 0x26, 0xDF, 0x07, 0x62, 0xD0, 0x04, 0x51, 0xDF, 0x01, 0x01, 0x62, 0xD0, 0x00, 0x38, 0xF7, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x56, 0x00, 0x00, 0x7C, 0x72, 0xAB, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0xC9, 0x27
+		}
+	},
+	{
+		278,
+		79,
+		0x39,
+		0x0141,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x41, 0x7C, 0x71, 0x22, 0x3D, 0xFC, 0x01, 0xA0, 0x62, 0x3D, 0xFC, 0x00, 0xB0, 0x41, 0x62, 0xD0, 0x04, 0x55, 0xD0, 0x00, 0x62, 0xD0, 0x04, 0x7C, 0x71, 0xEE, 0x3C, 0xAD, 0x00, 0xB0, 0x06, 0x3C, 0xAE, 0x00, 0xA0, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xAE, 0x08, 0x51, 0xAD, 0x62, 0xD0, 0x04, 0x53, 0xAF, 0x18, 0x53, 0xB0, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xB4, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xDD, 0x4B, 0x2C
+		}
+	},
+	{
+		279,
+		79,
+		0x39,
+		0x0142,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x42, 0x00, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xA6, 0x00, 0x55, 0xA5, 0x00, 0x7C, 0x72, 0x85, 0x62, 0xD0, 0x03, 0x55, 0xE3, 0x00, 0x55, 0xE2, 0x00, 0x7C, 0x73, 0x5F, 0x62, 0xD0, 0x04, 0x55, 0xD4, 0x00, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0xAD, 0x62, 0xD0, 0x04, 0x3C, 0xDC, 0x01, 0xA0, 0x2A, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xFE, 0x18, 0x53, 0x86, 0xA3
+		}
+	},
+	{
+		280,
+		79,
+		0x39,
+		0x0143,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x43, 0xFF, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xFC, 0x18, 0x53, 0xFD, 0x7C, 0x6E, 0x82, 0x62, 0xD0, 0x04, 0x55, 0xDC, 0x01, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xF6, 0x18, 0x53, 0xF7, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xF4, 0x18, 0x53, 0xF5, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x36, 0x04
+		}
+	},
+	{
+		281,
+		79,
+		0x39,
+		0x0144,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x44, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9B, 0xF5, 0x38, 0xF6, 0x7C, 0x71, 0x88, 0x54, 0x00, 0x3D, 0x00, 0x00, 0xA1, 0x23, 0x3D, 0xFB, 0x00, 0xA1, 0x1E, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0xAC, 0xF1
+		}
+	},
+	{
+		282,
+		79,
+		0x39,
+		0x0145,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x45, 0x62, 0xD0, 0x03, 0x51, 0xE3, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x03, 0x51, 0xE2, 0x21, 0x00, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xB0, 0x07, 0x51, 0xE6, 0x3A, 0xE8, 0xA0, 0xA4, 0x62, 0xD0, 0x03, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x52, 0x00, 0x2C, 0xE3, 0x3C, 0xE2, 0x12, 0xB0, 0x06, 0x3C, 0xE3, 0x34, 0xA0, 0xF9, 0x8C
+		}
+	},
+	{
+		283,
+		79,
+		0x39,
+		0x0146,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x46, 0x28, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x23, 0xB0, 0x06, 0x3C, 0xE3, 0x41, 0xA0, 0x1B, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x34, 0xB0, 0x06, 0x3C, 0xE3, 0x12, 0xA0, 0x0E, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x41, 0xB0, 0x14, 0x3C, 0xE3, 0x23, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x55, 0xD2, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x01, 0x80, 0x49, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x43, 0xB0, 0x06, 0x3C, 0xE3, 0xD5, 0x45
+		}
+	},
+	{
+		284,
+		79,
+		0x39,
+		0x0147,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x47, 0x21, 0xA0, 0x28, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x32, 0xB0, 0x06, 0x3C, 0xE3, 0x14, 0xA0, 0x1B, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x21, 0xB0, 0x06, 0x3C, 0xE3, 0x43, 0xA0, 0x0E, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x14, 0xB0, 0x14, 0x3C, 0xE3, 0x32, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x55, 0xD2, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x01, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x00, 0x7C, 0x72, 0x16, 0xC8
+		}
+	},
+	{
+		285,
+		79,
+		0x39,
+		0x0148,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x48, 0x85, 0x80, 0x3D, 0x7C, 0x73, 0x0E, 0x39, 0x00, 0xA0, 0x36, 0x62, 0xD0, 0x04, 0x3C, 0xD3, 0x01, 0xB0, 0x2E, 0x62, 0xD0, 0x00, 0x7C, 0x73, 0x0E, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD1, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0xD1, 0x80, 0x13, 0x62, 0xD0, 0x03, 0x55, 0xE3, 0x00, 0x55, 0xE2, 0x00, 0x7C, 0x72, 0x85, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x00, 0x62, 0x19, 0xCF
+		}
+	},
+	{
+		286,
+		79,
+		0x39,
+		0x0149,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x49, 0xD0, 0x04, 0x3C, 0xD3, 0x01, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x3C, 0xD2, 0x01, 0xB0, 0x06, 0x56, 0x00, 0x28, 0x80, 0x04, 0x56, 0x00, 0x29, 0x3D, 0x00, 0x00, 0xA0, 0x3E, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9C, 0x65, 0x38, 0x1E, 0xDA
+		}
+	},
+	{
+		287,
+		79,
+		0x39,
+		0x014A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4A, 0xF8, 0x54, 0x03, 0x3D, 0xFB, 0x00, 0xA0, 0x09, 0x62, 0xD0, 0x04, 0x3C, 0xD3, 0x00, 0xB0, 0x0A, 0x52, 0x03, 0x54, 0x00, 0x66, 0x00, 0x07, 0x00, 0x0E, 0x7C, 0x72, 0xF6, 0x39, 0x00, 0xA1, 0x5B, 0x3D, 0x00, 0x00, 0xA1, 0x56, 0x3D, 0x00, 0x28, 0xA1, 0x51, 0x3D, 0x00, 0x29, 0xA1, 0x4C, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0xB4, 0x07
+		}
+	},
+	{
+		288,
+		79,
+		0x39,
+		0x014B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4B, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xD4, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x01, 0x00, 0xB0, 0xD4, 0x62, 0xD0, 0x00, 0x3C, 0x39, 0x00, 0xB0, 0x73, 0x62, 0xD0, 0x00, 0x3C, 0xBA, 0x14
+		}
+	},
+	{
+		289,
+		79,
+		0x39,
+		0x014C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4C, 0x3A, 0x00, 0xB0, 0x6B, 0x62, 0xD0, 0x04, 0x3C, 0x9F, 0x00, 0xB0, 0x06, 0x3C, 0xA0, 0x00, 0xA0, 0x0E, 0x62, 0xD0, 0x04, 0x3C, 0x9D, 0x00, 0xB0, 0x56, 0x3C, 0x9E, 0x00, 0xB0, 0x51, 0x3D, 0x00, 0x10, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x14, 0xA0, 0x06, 0x3C, 0xD4, 0x1C, 0xB0, 0x3F, 0x56, 0x01, 0x01, 0x80, 0x3A, 0x3D, 0x00, 0x1C, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x18, 0x4D, 0x3B
+		}
+	},
+	{
+		290,
+		79,
+		0x39,
+		0x014D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4D, 0xA0, 0x06, 0x3C, 0xD4, 0x10, 0xB0, 0x28, 0x56, 0x01, 0x01, 0x80, 0x23, 0x3D, 0x00, 0x18, 0xA0, 0x06, 0x3D, 0x00, 0x14, 0xB0, 0x19, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x11, 0x04, 0x7C, 0x70, 0x27, 0xA0, 0x0A, 0x52, 0x00, 0x01, 0x04, 0x7C, 0x70, 0x27, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x00, 0x10, 0xB0, 0x18, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x10, 0xA0, 0x0B, 0x3C, 0xD4, 0x1E, 0xA0, 0x06, 0x64, 0x6A
+		}
+	},
+	{
+		291,
+		79,
+		0x39,
+		0x014E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4E, 0x3C, 0xD4, 0x12, 0xB0, 0x43, 0x56, 0x01, 0x01, 0x80, 0x3E, 0x3D, 0x00, 0x1E, 0xB0, 0x18, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x1E, 0xA0, 0x0B, 0x3C, 0xD4, 0x10, 0xA0, 0x06, 0x3C, 0xD4, 0x1C, 0xB0, 0x27, 0x56, 0x01, 0x01, 0x80, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xD4, 0x3B, 0x00, 0xA0, 0x16, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x11, 0x02, 0x7C, 0x70, 0x27, 0xA0, 0x0A, 0x52, 0x00, 0x01, 0x02, 0x7C, 0x91, 0xC5
+		}
+	},
+	{
+		292,
+		79,
+		0x39,
+		0x014F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4F, 0x70, 0x27, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x01, 0x01, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0xF6, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD5, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x1B, 0x7C, 0x6E, 0x82, 0x62, 0xD0, 0x04, 0x76, 0xD5, 0x56, 0x00, 0x00, 0x80, 0x0E, 0x7C, 0x73, 0x5F, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xD4, 0x56, 0x00, 0x00, 0x3D, 0x00, 0x00, 0xA0, 0x52, 0x62, 0x27, 0xF2
+		}
+	},
+	{
+		293,
+		79,
+		0x39,
+		0x0150,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x50, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x7C, 0x4C, 0x14, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xAE, 0x01
+		}
+	},
+	{
+		294,
+		79,
+		0x39,
+		0x0151,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x51, 0xD0, 0x04, 0x76, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x72, 0xAB, 0x56, 0x00, 0x00, 0x52, 0xFC, 0x08, 0x7C, 0x5B, 0xE0, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0xFC, 0x08, 0x90, 0x6D, 0x62, 0xD0, 0x00, 0x54, 0x01, 0x52, 0xFC, 0x08, 0x90, 0x96, 0x38, 0xC7, 0x34
+		}
+	},
+	{
+		295,
+		79,
+		0x39,
+		0x0152,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x52, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x02, 0x3D, 0x00, 0x00, 0xA0, 0x05, 0x52, 0x00, 0x80, 0x12, 0x3D, 0x01, 0x00, 0xA0, 0x08, 0x52, 0x01, 0x62, 0xD0, 0x00, 0x80, 0x06, 0x52, 0x02, 0x62, 0xD0, 0x00, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x9B, 0x7E, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x50, 0x01, 0x08, 0x52, 0x5D, 0x61
+		}
+	},
+	{
+		296,
+		79,
+		0x39,
+		0x0153,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x53, 0xFC, 0x08, 0x9B, 0x68, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x3D, 0x00, 0x28, 0xA0, 0x0A, 0x3D, 0x00, 0x29, 0xA0, 0x05, 0x50, 0x00, 0x80, 0x06, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x01, 0x08, 0x52, 0xFC, 0x08, 0x9B, 0x40, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x29, 0x38, 0xFE, 0x62, 0x5B, 0x5E
+		}
+	},
+	{
+		297,
+		79,
+		0x39,
+		0x0154,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x54, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x01, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x18, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x02, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x07, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0F, 0x56, 0x00, 0x00, 0x56, 0x04, 0x00, 0x7C, 0x72, 0xAB, 0x56, 0x03, 0x00, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0xE0, 0x69
+		}
+	},
+	{
+		298,
+		79,
+		0x39,
+		0x0155,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x55, 0x7C, 0x71, 0x22, 0x10, 0x7C, 0x1E, 0x8D, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xAB, 0x18, 0x53, 0xAC, 0x3D, 0xFC, 0x02, 0xD0, 0x61, 0x3D, 0xFC, 0x00, 0xB0, 0x41, 0x62, 0xD0, 0x04, 0x55, 0xD0, 0x00, 0x62, 0xD0, 0x04, 0x7C, 0x71, 0xEE, 0x3C, 0xAD, 0x00, 0xB0, 0x06, 0x3C, 0xAE, 0x00, 0xA0, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xAE, 0x08, 0x51, 0xAD, 0xF0, 0x8A
+		}
+	},
+	{
+		299,
+		79,
+		0x39,
+		0x0156,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x56, 0x62, 0xD0, 0x04, 0x53, 0xAF, 0x18, 0x53, 0xB0, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xB4, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xDD, 0x00, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xA6, 0x00, 0x55, 0xA5, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xDB, 0x00, 0x7C, 0x73, 0x58, 0x7C, 0x73, 0x51, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xCC, 0x20, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0x71, 0x3D, 0xFC, 0x02, 0x20, 0xEB
+		}
+	},
+	{
+		300,
+		79,
+		0x39,
+		0x0157,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x57, 0xB3, 0x0F, 0x62, 0xD0, 0x04, 0x3C, 0xDC, 0x02, 0xD0, 0x55, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x01, 0x20, 0x3C, 0xE9, 0x00, 0xB0, 0x05, 0x39, 0x00, 0xA0, 0x33, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x01, 0x53, 0xE8, 0x20, 0x62, 0xD0, 0x04, 0x51, 0xAC, 0x62, 0xD0, 0x00, 0x12, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xAB, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0xD0, 0x1A, 0x7C, 0xE0, 0x6C
+		}
+	},
+	{
+		301,
+		79,
+		0x39,
+		0x0158,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x58, 0x4A, 0xD9, 0x7C, 0x4A, 0xD2, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0x1F, 0x7C, 0x4A, 0xD9, 0x7C, 0x4A, 0xD2, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x04, 0x55, 0xDC, 0x02, 0x3D, 0xFB, 0x01, 0xA0, 0x06, 0x3D, 0xFB, 0x02, 0xB1, 0x49, 0x62, 0xD0, 0x01, 0x51, 0xEE, 0x08, 0x51, 0xEF, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4B, 0x61, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x7C, 0xA5
+		}
+	},
+	{
+		302,
+		79,
+		0x39,
+		0x0159,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x59, 0x08, 0x51, 0xE9, 0x54, 0x07, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xF8, 0x7C, 0x71, 0xF5, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x7C, 0x4B, 0x61, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0C, 0x51, 0xE9, 0x54, 0x0B, 0x62, 0xD0, 0x03, 0x51, 0x9A, 0xE2
+		}
+	},
+	{
+		303,
+		79,
+		0x39,
+		0x015A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5A, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xF8, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0E, 0x51, 0xE9, 0x54, 0x0D, 0x52, 0x08, 0x13, 0x0C, 0x54, 0x06, 0x52, 0x07, 0x1B, 0x0B, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3B, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0x96, 0xDB
+		}
+	},
+	{
+		304,
+		79,
+		0x39,
+		0x015B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5B, 0xD0, 0x06, 0x56, 0x00, 0x48, 0x80, 0x41, 0x52, 0x06, 0x11, 0x00, 0x52, 0x05, 0x31, 0x80, 0x19, 0x80, 0xD0, 0x35, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x73, 0x53, 0xE8, 0x52, 0x05, 0x73, 0x53, 0xE9, 0x51, 0xE8, 0x01, 0x01, 0x54, 0x06, 0x51, 0xE9, 0x09, 0x00, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3B, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0xF9, 0xA2
+		}
+	},
+	{
+		305,
+		79,
+		0x39,
+		0x015C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5C, 0x1A, 0xE1, 0xD0, 0x04, 0x56, 0x00, 0x49, 0x52, 0x0A, 0x13, 0x0E, 0x54, 0x06, 0x52, 0x09, 0x1B, 0x0D, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3C, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0xD0, 0x06, 0x56, 0x00, 0x48, 0x80, 0x41, 0x52, 0x06, 0x11, 0x00, 0x52, 0x05, 0x31, 0x80, 0x19, 0x80, 0xD0, 0x35, 0x62, 0xD0, 0x00, 0x52, 0x2B, 0x07
+		}
+	},
+	{
+		306,
+		79,
+		0x39,
+		0x015D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5D, 0x06, 0x73, 0x53, 0xE8, 0x52, 0x05, 0x73, 0x53, 0xE9, 0x51, 0xE8, 0x01, 0x01, 0x54, 0x06, 0x51, 0xE9, 0x09, 0x00, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3C, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0xD0, 0x04, 0x56, 0x00, 0x49, 0x3D, 0xFB, 0x00, 0xA0, 0x06, 0x3D, 0xFB, 0x02, 0xB1, 0x57, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x85, 0xBC
+		}
+	},
+	{
+		307,
+		79,
+		0x39,
+		0x015E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5E, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4D, 0x1E, 0x7C, 0x71, 0x88, 0x54, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x92, 0xD7
+		}
+	},
+	{
+		308,
+		79,
+		0x39,
+		0x015F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5F, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4E, 0xE4, 0x38, 0xEE, 0x54, 0x02, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xEE, 0x51, 0x56
+		}
+	},
+	{
+		309,
+		79,
+		0x39,
+		0x0160,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x60, 0x08, 0x51, 0xEF, 0x08, 0x7C, 0x4D, 0x1E, 0x7C, 0x71, 0x88, 0x05, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xEE, 0x08, 0x51, 0xEF, 0x08, 0x7C, 0x4E, 0xE4, 0x38, 0xEE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x3D, 0x01, 0x00, 0xA0, 0x95, 0x3D, 0x02, 0xFF, 0xA0, 0x19, 0xE7
+		}
+	},
+	{
+		310,
+		79,
+		0x39,
+		0x0161,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x61, 0x90, 0x3D, 0x03, 0xFF, 0xA0, 0x8B, 0x52, 0x02, 0x3B, 0x03, 0xA0, 0x0B, 0x3D, 0x02, 0x08, 0xB0, 0x0B, 0x3D, 0x03, 0x01, 0xB0, 0x06, 0x7C, 0x71, 0xB9, 0x80, 0x76, 0x3D, 0x03, 0x08, 0xB0, 0x11, 0x3D, 0x02, 0x01, 0xB0, 0x0C, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x61, 0x52, 0x03, 0x3B, 0x02, 0xD0, 0x2C, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x13, 0x03, 0x39, 0x01, 0xB0, 0xAA, 0x0A
+		}
+	},
+	{
+		311,
+		79,
+		0x39,
+		0x0162,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x62, 0x21, 0x50, 0x02, 0x08, 0x52, 0x02, 0x08, 0x7C, 0x4A, 0x10, 0x38, 0xFF, 0x18, 0x39, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0xB9, 0x80, 0x3B, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x30, 0x52, 0x02, 0x3B, 0x03, 0xD0, 0x2A, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x13, 0x02, 0x39, 0x01, 0xB0, 0x1F, 0x50, 0x02, 0x08, 0x52, 0x03, 0x08, 0x7C, 0x4A, 0x10, 0x38, 0xFF, 0x18, 0x39, 0x00, 0x26, 0x03
+		}
+	},
+	{
+		312,
+		79,
+		0x39,
+		0x0163,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x63, 0xB0, 0x0C, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x04, 0x7C, 0x71, 0xB9, 0x3D, 0x00, 0x00, 0xA0, 0x54, 0x3D, 0xFB, 0x01, 0xB0, 0x0D, 0x52, 0x00, 0x08, 0x90, 0x52, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x48, 0x3D, 0xFB, 0x00, 0xB0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x29, 0x30, 0x08, 0x91, 0x16, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x32, 0x3D, 0xFB, 0x02, 0xB0, 0xE9, 0x8A
+		}
+	},
+	{
+		313,
+		79,
+		0x39,
+		0x0164,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x64, 0x28, 0x3D, 0x00, 0x48, 0xA0, 0x06, 0x3D, 0x00, 0x49, 0xB0, 0x0D, 0x52, 0x00, 0x08, 0x90, 0x21, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x17, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x29, 0x30, 0x08, 0x90, 0xEA, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xF1, 0x20, 0x7F, 0x10, 0x4F, 0x7C, 0x72, 0xEE, 0x39, 0x00, 0xA0, 0x43, 0x3D, 0xFC, 0x00, 0xA0, 0x3E, 0x62, 0x09, 0xCB
+		}
+	},
+	{
+		314,
+		79,
+		0x39,
+		0x0165,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x65, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD7, 0xB0, 0x2C, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0xEE, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD8, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x21, 0x7C, 0x72, 0x39, 0x39, 0x00, 0xA0, 0x04, 0x7C, 0x72, 0x8C, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xD8, 0x56, 0xFC, 0x00, 0x80, 0x0A, 0x62, 0xD0, 0x04, 0x55, 0xD8, 0x00, 0x7C, 0x72, 0x59, 0x7C, 0x72, 0x39, 0x39, 0x4E, 0x56
+		}
+	},
+	{
+		315,
+		79,
+		0x39,
+		0x0166,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x66, 0x00, 0xA0, 0x4D, 0x3D, 0xFC, 0x00, 0xA0, 0x48, 0x62, 0xD0, 0x04, 0x3C, 0xDB, 0x00, 0xA0, 0x3D, 0x3C, 0xDB, 0x48, 0xA0, 0x38, 0x3C, 0xDB, 0x49, 0xA0, 0x33, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD7, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0x39, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD6, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x19, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xD6, 0x56, 0x79, 0xAD
+		}
+	},
+	{
+		316,
+		79,
+		0x39,
+		0x0167,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x67, 0xFC, 0x00, 0x80, 0x0C, 0x7C, 0x72, 0x8C, 0x7C, 0x72, 0x59, 0x80, 0x04, 0x7C, 0x72, 0x8C, 0x3D, 0xFC, 0x00, 0xA0, 0x31, 0x7C, 0x4B, 0x1A, 0x7C, 0x4B, 0x99, 0x7C, 0x4C, 0x14, 0x7C, 0x6E, 0xC0, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0x22, 0x00
+		}
+	},
+	{
+		317,
+		79,
+		0x39,
+		0x0168,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x68, 0xDB, 0x7C, 0x73, 0x58, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x05, 0x7C, 0x72, 0x64, 0x39, 0x00, 0xA1, 0x10, 0x56, 0x00, 0x00, 0x3D, 0xFC, 0x00, 0xA1, 0x08, 0x7C, 0x4B, 0x99, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD9, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0x00, 0x00, 0xB0, 0xC1, 0x62, 0xD0, 0x00, 0x3C, 0x39, 0x00, 0xB0, 0x73, 0x62, 0xD0, 0x00, 0x3C, 0x3A, 0x00, 0x73, 0xA3
+		}
+	},
+	{
+		318,
+		79,
+		0x39,
+		0x0169,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x69, 0xB0, 0x6B, 0x62, 0xD0, 0x04, 0x3C, 0x9F, 0x00, 0xB0, 0x06, 0x3C, 0xA0, 0x00, 0xA0, 0x0E, 0x62, 0xD0, 0x04, 0x3C, 0x9D, 0x00, 0xB0, 0x56, 0x3C, 0x9E, 0x00, 0xB0, 0x51, 0x3D, 0xFC, 0x30, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x34, 0xA0, 0x06, 0x3C, 0xD9, 0x3C, 0xB0, 0x3F, 0x56, 0x00, 0x01, 0x80, 0x3A, 0x3D, 0xFC, 0x3C, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x38, 0xA0, 0x06, 0x5F, 0x7C
+		}
+	},
+	{
+		319,
+		79,
+		0x39,
+		0x016A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6A, 0x3C, 0xD9, 0x30, 0xB0, 0x28, 0x56, 0x00, 0x01, 0x80, 0x23, 0x3D, 0xFC, 0x38, 0xA0, 0x06, 0x3D, 0xFC, 0x34, 0xB0, 0x19, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x11, 0x04, 0x7C, 0x70, 0x34, 0xA0, 0x0A, 0x52, 0xFC, 0x01, 0x04, 0x7C, 0x70, 0x34, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0xFC, 0x30, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x3E, 0xA0, 0x06, 0x3C, 0xD9, 0x32, 0xB0, 0x35, 0x56, 0x00, 0x1E, 0xFB
+		}
+	},
+	{
+		320,
+		79,
+		0x39,
+		0x016B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6B, 0x01, 0x80, 0x30, 0x3D, 0xFC, 0x3E, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x30, 0xA0, 0x06, 0x3C, 0xD9, 0x3C, 0xB0, 0x1E, 0x56, 0x00, 0x01, 0x80, 0x19, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x11, 0x02, 0x7C, 0x70, 0x34, 0xA0, 0x0A, 0x52, 0xFC, 0x01, 0x02, 0x7C, 0x70, 0x34, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0x00, 0x01, 0xB0, 0x1F, 0x7C, 0x72, 0x64, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0x16, 0xEC
+		}
+	},
+	{
+		321,
+		79,
+		0x39,
+		0x016C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6C, 0xDA, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x1E, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xDA, 0x56, 0xFC, 0x00, 0x80, 0x11, 0x62, 0xD0, 0x04, 0x55, 0xDA, 0x00, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD9, 0x56, 0xFC, 0x00, 0x3D, 0xFC, 0x00, 0xA0, 0xAE, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x03, 0x51, 0xED, 0x62, 0xD0, 0x03, 0x02, 0xE9, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0x43, 0x47
+		}
+	},
+	{
+		322,
+		79,
+		0x39,
+		0x016D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6D, 0xEC, 0x62, 0xD0, 0x03, 0x0A, 0xE8, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x54, 0x02, 0x51, 0xE9, 0x54, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xEB, 0x62, 0xD0, 0x03, 0x02, 0xE7, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x62, 0xD0, 0x03, 0x0A, 0xE6, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x54, 0x04, 0x51, 0xE9, 0x54, 0x03, 0x52, 0xCA, 0x56
+		}
+	},
+	{
+		323,
+		79,
+		0x39,
+		0x016E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6E, 0x01, 0x08, 0x52, 0x02, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xA3, 0x08, 0x51, 0xA4, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x52, 0x03, 0x08, 0x52, 0x04, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xA1, 0x08, 0x51, 0xA2, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x7C, 0x4C, 0x14, 0x7C, 0x6E, 0xC0, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0x9E, 0xFF
+		}
+	},
+	{
+		324,
+		79,
+		0x39,
+		0x016F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6F, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xDB, 0x7C, 0x73, 0x51, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x38, 0xFB, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x07, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x7C, 0x6F, 0xC9, 0x3D, 0xFB, 0x00, 0xA0, 0x37, 0x62, 0xD0, 0x04, 0xC4, 0x4C
+		}
+	},
+	{
+		325,
+		79,
+		0x39,
+		0x0170,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x70, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x62, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x8D, 0x28, 0x53, 0xE7, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x02, 0xE8, 0x53, 0xE8, 0x51, 0xE7, 0x0A, 0xE9, 0x10, 0x08, 0x51, 0xE8, 0x20, 0x7C, 0x1E, 0xE4, 0x20, 0x7C, 0x73, 0x43, 0x7C, 0x70, 0x41, 0x52, 0xFB, 0x62, 0xD0, 0x00, 0x83, 0x5B, 0x3D, 0xFC, 0x00, 0xB2, 0xB8, 0x7C, 0x70, 0x5B, 0x10, 0x7C, 0x1E, 0x9A, 0x62, 0xB4, 0x2D
+		}
+	},
+	{
+		326,
+		79,
+		0x39,
+		0x0171,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x71, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA9, 0x18, 0x53, 0xAA, 0x10, 0x7C, 0x1E, 0xA7, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA7, 0x18, 0x53, 0xA8, 0x62, 0xD0, 0x04, 0x3C, 0xA9, 0x00, 0xB0, 0x06, 0x3C, 0xAA, 0x00, 0xA1, 0x37, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x01, 0xB0, 0xFB, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x3F, 0x44
+		}
+	},
+	{
+		327,
+		79,
+		0x39,
+		0x0172,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x72, 0x08, 0x57, 0x8F, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x62, 0xD0, 0x04, 0x12, 0xAA, 0x7C, 0x71, 0x7F, 0x1A, 0xA9, 0xD0, 0xDD, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x8D, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x7C, 0x70, 0x77, 0xD0, 0xC4, 0x7C, 0x72, 0xB9, 0x3A, 0xFE, 0xB0, 0x08, 0x7C, 0x72, 0xC2, 0x3A, 0xFF, 0xA0, 0x96, 0x62, 0x15, 0xF1
+		}
+	},
+	{
+		328,
+		79,
+		0x39,
+		0x0173,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x73, 0xD0, 0x03, 0x51, 0xF4, 0x62, 0xD0, 0x03, 0x3A, 0xFC, 0xB0, 0x0D, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x62, 0xD0, 0x03, 0x3A, 0xFD, 0xA0, 0x7E, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x08, 0x51, 0xF7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xFE, 0x08, 0x51, 0xFF, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x96, 0x28, 0x20, 0x7C, 0x71, 0xD1, 0xC0, 0x27, 0x62, 0xD0, 0x03, 0x07, 0xD6
+		}
+	},
+	{
+		329,
+		79,
+		0x39,
+		0x0174,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x74, 0x51, 0xF4, 0x08, 0x51, 0xF5, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xFC, 0x08, 0x51, 0xFD, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x97, 0x28, 0x20, 0x7C, 0x71, 0xD1, 0xD0, 0x32, 0x56, 0x01, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x03, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x9D, 0x28, 0x53, 0xBC, 0x18, 0x75, 0x09, 0x00, 0x28, 0xDC, 0x81
+		}
+	},
+	{
+		330,
+		79,
+		0x39,
+		0x0175,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x75, 0x53, 0xBD, 0x20, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x9F, 0x28, 0x53, 0xBE, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xBF, 0x20, 0x3D, 0x01, 0x00, 0xB0, 0x1C, 0x62, 0xD0, 0x04, 0x3C, 0xCD, 0x00, 0xB0, 0x07, 0x7C, 0x6F, 0x87, 0x56, 0x00, 0x20, 0x62, 0xD0, 0x04, 0x76, 0xCD, 0x3C, 0xCD, 0x01, 0xB0, 0x04, 0x7C, 0x71, 0x3A, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x02, 0xB0, 0x2D, 0x62, 0xD0, 0x00, 0x50, 0xA9, 0x1C
+		}
+	},
+	{
+		331,
+		79,
+		0x39,
+		0x0176,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x76, 0x0D, 0x10, 0x08, 0x57, 0x8B, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x62, 0xD0, 0x04, 0x12, 0xAA, 0x7C, 0x71, 0x7F, 0x1A, 0xA9, 0xD0, 0x0F, 0x7C, 0x70, 0x62, 0x7C, 0x70, 0x77, 0xD0, 0x07, 0x56, 0x00, 0x40, 0x7C, 0x4B, 0x8F, 0x62, 0xD0, 0x03, 0x3C, 0xEE, 0x00, 0xB0, 0x06, 0x3C, 0xEF, 0x00, 0xA1, 0x26, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x00, 0xB0, 0x42, 0x62, 0xD0, 0x04, 0x2A, 0x1F
+		}
+	},
+	{
+		332,
+		79,
+		0x39,
+		0x0177,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x77, 0x3C, 0xCD, 0x01, 0xB0, 0x3A, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x04, 0x12, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x04, 0x1A, 0xA7, 0xD0, 0x0D, 0x7C, 0x72, 0x6F, 0x54, 0x03, 0x7C, 0x72, 0x7A, 0x54, 0x02, 0x80, 0x04, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x0D, 0x20, 0x7C, 0x73, 0x4A, 0xD0, 0xEA, 0x7C, 0x4B, 0x8F, 0x80, 0xE5, 0x62, 0xD0, 0x5E, 0x88
+		}
+	},
+	{
+		333,
+		79,
+		0x39,
+		0x0178,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x78, 0x04, 0x3C, 0xCC, 0x01, 0xB0, 0xDD, 0x7C, 0x73, 0x35, 0xB0, 0xD8, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x04, 0x12, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x04, 0x1A, 0xA7, 0xD0, 0x0D, 0x7C, 0x72, 0x6F, 0x53, 0xEF, 0x7C, 0x72, 0x7A, 0x53, 0xEE, 0x80, 0x04, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x93, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x77, 0xBB
+		}
+	},
+	{
+		334,
+		79,
+		0x39,
+		0x0179,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x79, 0x28, 0x20, 0x62, 0xD0, 0x03, 0x12, 0xEF, 0x7C, 0x70, 0xA1, 0x1A, 0xEE, 0xD0, 0x88, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x0D, 0x53, 0xE8, 0x20, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x00, 0x12, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0xD0, 0x66, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x08, 0x51, 0xF7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xBC, 0x08, 0x51, 0x4F, 0x6C
+		}
+	},
+	{
+		335,
+		79,
+		0x39,
+		0x017A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7A, 0xBD, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x95, 0x28, 0x20, 0x7C, 0x70, 0xBF, 0xD0, 0x2F, 0x62, 0xD0, 0x03, 0x51, 0xF4, 0x08, 0x51, 0xF5, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xBE, 0x08, 0x51, 0xBF, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x95, 0x28, 0x20, 0x7C, 0x70, 0xBF, 0xD0, 0x09, 0x56, 0x00, 0x22, 0x7C, 0x79, 0xC1
+		}
+	},
+	{
+		336,
+		79,
+		0x39,
+		0x017B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7B, 0x4B, 0x8F, 0x80, 0x1F, 0x7C, 0x6F, 0x87, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x01, 0x7C, 0x71, 0x3A, 0x56, 0x00, 0x20, 0x80, 0x0E, 0x7C, 0x4B, 0x8F, 0x80, 0x09, 0x7C, 0x73, 0x35, 0xB0, 0x04, 0x7C, 0x4B, 0x8F, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xE4, 0x20, 0x7C, 0x73, 0x43, 0x80, 0x53, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x00, 0xB0, 0x04, 0x7C, 0x4A, 0xD9, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0x43, 0x56
+		}
+	},
+	{
+		337,
+		79,
+		0x39,
+		0x017C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7C, 0xCC, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xF6, 0x18, 0x53, 0xF7, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xF4, 0x18, 0x53, 0xF5, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xF2, 0x18, 0x53, 0xF3, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xF0, 0x18, 0x53, 0x03, 0xD7
+		}
+	},
+	{
+		338,
+		79,
+		0x39,
+		0x017D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7D, 0xF1, 0x3D, 0x00, 0x40, 0xB0, 0x43, 0x7C, 0x72, 0xC2, 0x02, 0xF3, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x7C, 0x72, 0xB9, 0x0A, 0xF2, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA3, 0x18, 0x53, 0xA4, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x02, 0xF5, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xF4, 0x0A, 0xF4, 0x62, 0xD0, 0x00, 0x9F, 0x10
+		}
+	},
+	{
+		339,
+		79,
+		0x39,
+		0x017E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7E, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x7C, 0x71, 0xC3, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x06, 0x62, 0xD0, 0x00, 0x3C, 0x0E, 0x00, 0xA0, 0x06, 0x3D, 0xFC, 0xFF, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE2, 0x01, 0x85, 0xED, 0x62, 0xD0, 0x04, 0x3C, 0xE1, 0x00, 0xA0, 0x06, 0x3C, 0xE1, 0xFF, 0xB0, 0x74, 0x56, 0x00, 0x00, 0x56, 0x00, 0x00, 0x80, 0x65, 0x62, 0xDB, 0x89
+		}
+	},
+	{
+		340,
+		79,
+		0x39,
+		0x017F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7F, 0xD0, 0x00, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x7C, 0x71, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x7C, 0x71, 0x08, 0x7C, 0x72, 0x9A, 0x62, 0xD0, 0x04, 0x76, 0xE2, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0x45, 0x5E
+		}
+	},
+	{
+		341,
+		79,
+		0x39,
+		0x0180,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x80, 0xB0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x7C, 0x6D, 0xEA, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x98, 0x85, 0x6D, 0x62, 0xD0, 0x04, 0x50, 0x03, 0x3A, 0xE1, 0xC0, 0x0A, 0x62, 0xD0, 0x00, 0x50, 0x03, 0x3A, 0x0E, 0xD4, 0xAD, 0x7C, 0x71, 0x93, 0xC2, 0xFD, 0xCF
+		}
+	},
+	{
+		342,
+		79,
+		0x39,
+		0x0181,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x81, 0x0D, 0x7C, 0x6F, 0xC9, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x7C, 0x6F, 0x54, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x00, 0x7C, 0x71, 0xB1, 0xCF, 0xEA, 0x56, 0x00, 0x00, 0x80, 0x51, 0x56, 0x02, 0x00, 0x80, 0x41, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x29, 0x52, 0x02, 0x08, 0x95, 0x00
+		}
+	},
+	{
+		343,
+		79,
+		0x39,
+		0x0182,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x82, 0x52, 0x00, 0x08, 0x95, 0xA1, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x50, 0x0D, 0x10, 0x57, 0x85, 0x28, 0x20, 0x3B, 0x03, 0xC0, 0x0F, 0x52, 0x02, 0x08, 0x52, 0x00, 0x08, 0x95, 0x0D, 0x38, 0xFE, 0x77, 0x01, 0x80, 0x0C, 0x77, 0x02, 0x62, 0xD0, 0x04, 0x52, 0x02, 0x3A, 0xE1, 0xCF, 0xB8, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xAC, 0x7C, 0x71, 0xE1, 0x3D, 0x00, 0x02, 0xA0, 0x06, 0x3D, 0xB2, 0x3B
+		}
+	},
+	{
+		344,
+		79,
+		0x39,
+		0x0183,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x83, 0x00, 0x03, 0xB1, 0x09, 0x7C, 0x68, 0xBB, 0x52, 0x01, 0x08, 0x7C, 0x66, 0xDE, 0x52, 0x01, 0x08, 0x7C, 0x6A, 0x7F, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x56, 0x02, 0x00, 0x80, 0xDF, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0x73, 0xBE
+		}
+	},
+	{
+		345,
+		79,
+		0x39,
+		0x0184,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x84, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x7C, 0x71, 0x00, 0xCF, 0x77
+		}
+	},
+	{
+		346,
+		79,
+		0x39,
+		0x0185,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x85, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x25, 0x24
+		}
+	},
+	{
+		347,
+		79,
+		0x39,
+		0x0186,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x86, 0x83, 0x54, 0x03, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x72, 0x31, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x04, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x36, 0x47
+		}
+	},
+	{
+		348,
+		79,
+		0x39,
+		0x0187,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x87, 0x3F, 0xE6, 0x77, 0x02, 0x52, 0x02, 0x3B, 0x00, 0xCF, 0x1D, 0x83, 0x3A, 0x56, 0x00, 0x00, 0x80, 0x76, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x5E, 0x56, 0x03, 0x00, 0x56, 0x04, 0xFF, 0x56, 0x02, 0x00, 0x80, 0x3B, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x02, 0xE0
+		}
+	},
+	{
+		349,
+		79,
+		0x39,
+		0x0188,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x88, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x23, 0x52, 0x02, 0x08, 0x52, 0x00, 0x08, 0x94, 0x16, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x05, 0x3D, 0x03, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0x4B, 0x80, 0x0A, 0x52, 0x05, 0x3B, 0x03, 0xD0, 0x04, 0x7C, 0x71, 0x4B, 0x77, 0x02, 0x62, 0xD0, 0x04, 0x52, 0x02, 0x3A, 0xE1, 0xCF, 0xBE, 0x3D, 0x04, 0xFF, 0xA0, 0x0B, 0x52, 0x04, 0x08, 0x52, 0x00, 0x08, 0x89, 0xEF
+		}
+	},
+	{
+		350,
+		79,
+		0x39,
+		0x0189,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x89, 0x93, 0x6A, 0x38, 0xFE, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x87, 0x82, 0xB9, 0x7C, 0x6F, 0xC9, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x7C, 0x6F, 0x54, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xEA, 0x56, 0x00, 0x00, 0x80, 0x51, 0x56, 0x02, 0x00, 0x80, 0x41, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0xED, 0xB8
+		}
+	},
+	{
+		351,
+		79,
+		0x39,
+		0x018A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8A, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x29, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x93, 0x95, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x50, 0x0D, 0x10, 0x57, 0x85, 0x28, 0x20, 0x3B, 0x03, 0xC0, 0x0F, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x93, 0x01, 0x38, 0xFE, 0x77, 0x01, 0x80, 0x0C, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xB8, 0x77, 0x00, 0x7C, 0x71, 0x06, 0xEB
+		}
+	},
+	{
+		352,
+		79,
+		0x39,
+		0x018B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8B, 0xB1, 0xCF, 0xAC, 0x7C, 0x71, 0xE1, 0x50, 0x00, 0x3B, 0x00, 0xC0, 0x06, 0x3D, 0x00, 0x04, 0xD1, 0x24, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xD1, 0x16, 0x7C, 0x68, 0xBB, 0x52, 0x01, 0x08, 0x93, 0xF7, 0x52, 0x01, 0x08, 0x7C, 0x6B, 0x94, 0x38, 0xFE, 0x56, 0x02, 0x00, 0x80, 0xF9, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x9B, 0x16
+		}
+	},
+	{
+		353,
+		79,
+		0x39,
+		0x018C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8C, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x7C, 0x6F, 0x27, 0x98, 0x11
+		}
+	},
+	{
+		354,
+		79,
+		0x39,
+		0x018D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8D, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0xF0, 0x7C, 0x6F, 0xD0, 0x7C, 0x71, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xE5, 0xAC
+		}
+	},
+	{
+		355,
+		79,
+		0x39,
+		0x018E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8E, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x52, 0x03, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x06, 0xE6, 0x47, 0x71
+		}
+	},
+	{
+		356,
+		79,
+		0x39,
+		0x018F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8F, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x03, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x02, 0x52, 0x02, 0x3B, 0x00, 0xCF, 0x03, 0x80, 0x80, 0x56, 0x00, 0x00, 0x80, 0x76, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0x2C, 0x3C
+		}
+	},
+	{
+		357,
+		79,
+		0x39,
+		0x0190,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x90, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x5E, 0x56, 0x03, 0x00, 0x56, 0x04, 0xFF, 0x56, 0x02, 0x00, 0x80, 0x3B, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x23, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x91, 0xEE, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x05, 0x3D, 0x0E, 0x01
+		}
+	},
+	{
+		358,
+		79,
+		0x39,
+		0x0191,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x91, 0x03, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0x4B, 0x80, 0x0A, 0x52, 0x05, 0x3B, 0x03, 0xD0, 0x04, 0x7C, 0x71, 0x4B, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xBE, 0x3D, 0x04, 0xFF, 0xA0, 0x0B, 0x52, 0x00, 0x08, 0x52, 0x04, 0x08, 0x91, 0x42, 0x38, 0xFE, 0x77, 0x00, 0x7C, 0x71, 0xB1, 0xCF, 0x87, 0x7C, 0x71, 0x93, 0xD0, 0x8E, 0x7C, 0x72, 0xDD, 0x12, 0xE1, 0x62, 0xD0, 0x00, 0xD5, 0x90
+		}
+	},
+	{
+		359,
+		79,
+		0x39,
+		0x0192,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x92, 0x54, 0x00, 0x56, 0x02, 0x00, 0x80, 0x57, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x26, 0xE9, 0xF0, 0x3C, 0xE9, 0xF0, 0xB0, 0x35, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xD0, 0x7C, 0x6F, 0x54, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x52, 0x02, 0x7C, 0x6D, 0xD9, 0x06, 0xE8, 0xD5, 0x7C, 0x6F, 0xE2, 0x7C, 0x72, 0x81, 0xE9
+		}
+	},
+	{
+		360,
+		79,
+		0x39,
+		0x0193,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x93, 0x9A, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x08, 0x91, 0xCC, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x53, 0xE2, 0x7B, 0x00, 0x80, 0x08, 0x3D, 0x00, 0x00, 0xB0, 0x03, 0x80, 0x2B, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xA2, 0x80, 0x1E, 0x50, 0x00, 0x08, 0x91, 0xF1, 0x38, 0xFF, 0x7C, 0x71, 0x93, 0xC0, 0x0A, 0x50, 0x00, 0x08, 0x95, 0x86, 0x38, 0xFF, 0x80, 0x09, 0x50, 0x00, 0x08, 0xE4, 0xB0
+		}
+	},
+	{
+		361,
+		79,
+		0x39,
+		0x0194,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x94, 0x7C, 0x6B, 0x94, 0x38, 0xFF, 0x56, 0x00, 0x00, 0x80, 0x88, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xD0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x7C, 0x70, 0x1E, 0x06, 0xE6, 0xE0, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0x78, 0xD9
+		}
+	},
+	{
+		362,
+		79,
+		0x39,
+		0x0195,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x95, 0xE6, 0xB0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xD5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x7C, 0x73, 0x3C, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x7C, 0x67, 0xB8
+		}
+	},
+	{
+		363,
+		79,
+		0x39,
+		0x0196,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x96, 0x70, 0x1E, 0x06, 0xE6, 0xE1, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x75, 0x3D, 0xFC, 0xFF, 0xA0, 0x08, 0x7C, 0x72, 0xDD, 0x53, 0xE1, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xE1, 0xFF, 0x38, 0xFA, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x00, 0x52, 0xFB, 0x97, 0xD5, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x97, 0xC5, 0x40, 0x53, 0xCA, 0x7F
+		}
+	},
+	{
+		364,
+		79,
+		0x39,
+		0x0197,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x97, 0xE9, 0x52, 0xFC, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0xFB, 0x97, 0xB5, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x97, 0xA5, 0x40, 0x7C, 0x6E, 0xE1, 0x52, 0xFC, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x97, 0xFB, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x97, 0x84, 0x40, 0xDB, 0xA2
+		}
+	},
+	{
+		365,
+		79,
+		0x39,
+		0x0198,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x98, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0xF0, 0x7C, 0x6F, 0xD0, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x97, 0x6A, 0x40, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0x0F, 0x7C, 0x6F, 0xD0, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x97, 0x4B, 0x40, 0x54, 0x00, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0x7E, 0xE9
+		}
+	},
+	{
+		366,
+		79,
+		0x39,
+		0x0199,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x99, 0xB0, 0x0E, 0xE9, 0x03, 0x97, 0x3D, 0x40, 0x54, 0x01, 0x52, 0x00, 0x3B, 0x01, 0xD0, 0x08, 0x7C, 0x73, 0x06, 0x54, 0x02, 0x80, 0x06, 0x7C, 0x72, 0xFE, 0x54, 0x02, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x97, 0x1A, 0x40, 0x54, 0x00, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0xEA, 0x0E, 0xE9, 0x00, 0x97, 0x0C, 0x40, 0x54, 0x01, 0x52, 0x00, 0x3B, 0x01, 0xD0, 0x08, 0x98, 0x1E
+		}
+	},
+	{
+		367,
+		79,
+		0x39,
+		0x019A,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9A, 0x7C, 0x73, 0x06, 0x05, 0x02, 0x80, 0x06, 0x7C, 0x72, 0xFE, 0x05, 0x02, 0x52, 0x02, 0x62, 0xD0, 0x00, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x77, 0xFC, 0xB0, 0x03, 0x77, 0xFC, 0x50, 0x0F, 0x3B, 0xFC, 0xD0, 0x04, 0x56, 0xFC, 0x01, 0x52, 0xFC, 0x54, 0x01, 0x56, 0x00, 0x00, 0x80, 0x1A, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x96, 0xD2, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x96, 0xD9, 0xA1
+		}
+	},
+	{
+		368,
+		79,
+		0x39,
+		0x019B,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9B, 0xC2, 0x40, 0x3B, 0xFC, 0xB0, 0x03, 0x77, 0xFC, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xE3, 0x52, 0xFC, 0x3B, 0x01, 0xBF, 0xD4, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0B, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x03, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x03, 0x43, 0x76
+		}
+	},
+	{
+		369,
+		79,
+		0x39,
+		0x019C,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9C, 0x7C, 0x70, 0xDB, 0x56, 0x00, 0x00, 0x56, 0x01, 0x00, 0x81, 0xA7, 0x56, 0x04, 0x00, 0x81, 0x97, 0x3D, 0xFC, 0x00, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x96, 0x60, 0x40, 0x54, 0x05, 0x96, 0x7D, 0x40, 0x06, 0xE8, 0xB0, 0x0E, 0xE9, 0x03, 0x96, 0x52, 0x40, 0x54, 0x06, 0x80, 0x54, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x96, 0xBD, 0x40, 0x96, 0x43, 0xDE, 0xAD
+		}
+	},
+	{
+		370,
+		79,
+		0x39,
+		0x019D,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9D, 0x40, 0x54, 0x05, 0x96, 0x60, 0x40, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0x4D, 0x8C
+		}
+	},
+	{
+		371,
+		79,
+		0x39,
+		0x019E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9E, 0xC3, 0x0E, 0xE9, 0x02, 0x95, 0xFD, 0x40, 0x54, 0x06, 0x52, 0x05, 0x3B, 0x06, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x13, 0x05, 0x54, 0x07, 0x80, 0x0A, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x13, 0x06, 0x54, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x97, 0x97, 0x40, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x95, 0xCD, 0x40, 0x54, 0x05, 0x95, 0xEA, 0x40, 0x06, 0xE8, 0xEA, 0x0E, 0xC7, 0x81
+		}
+	},
+	{
+		372,
+		79,
+		0x39,
+		0x019F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9F, 0xE9, 0x00, 0x95, 0xBF, 0x40, 0x54, 0x06, 0x80, 0x54, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x96, 0x6E, 0x40, 0x95, 0xB0, 0x40, 0x54, 0x05, 0x95, 0xCD, 0x40, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0B, 0x0A
+		}
+	},
+	{
+		373,
+		79,
+		0x39,
+		0x01A0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA0, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x02, 0x95, 0x6A, 0x40, 0x54, 0x06, 0x52, 0x05, 0x3B, 0x06, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x13, 0x05, 0x54, 0x08, 0x80, 0x0A, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x13, 0x06, 0x54, 0x08, 0x62, 0xD0, 0x00, 0x52, 0x07, 0x53, 0xE8, 0x50, 0x00, 0x08, 0xD8, 0xA5
+		}
+	},
+	{
+		374,
+		79,
+		0x39,
+		0x01A1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA1, 0x51, 0xE8, 0x08, 0x52, 0x07, 0x08, 0x95, 0x13, 0x7C, 0x71, 0xF5, 0x52, 0x08, 0x53, 0xE6, 0x50, 0x00, 0x08, 0x51, 0xE6, 0x08, 0x52, 0x08, 0x08, 0x95, 0x01, 0x38, 0xFA, 0x62, 0xD0, 0x00, 0x52, 0x0A, 0x02, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xDF, 0x52, 0x09, 0x62, 0xD0, 0x00, 0x0A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xDE, 0x62, 0xD0, 0x00, 0x96, 0xCB, 0x40, 0x52, 0x01, 0x02, 0xE8, 0x53, 0xE8, 0x90, 0x16
+		}
+	},
+	{
+		375,
+		79,
+		0x39,
+		0x01A2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA2, 0x50, 0x00, 0x0A, 0xE9, 0x53, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xB1, 0x97, 0x53, 0x40, 0x62, 0xD0, 0x03, 0x51, 0xDE, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x77, 0x04, 0x52, 0x04, 0x3B, 0x02, 0xCE, 0x65, 0x77, 0x00, 0x07, 0x01, 0x03, 0x52, 0x00, 0x3B, 0x03, 0xCE, 0x55, 0x38, 0xF5, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x97, 0xD6, 0xA3
+		}
+	},
+	{
+		376,
+		79,
+		0x39,
+		0x01A3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA3, 0x08, 0x40, 0x80, 0x95, 0x62, 0xD0, 0x00, 0x94, 0xDC, 0x40, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x94, 0xB1, 0x40, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x7D, 0x52, 0x01, 0x95, 0x1C, 0x40, 0x95, 0xCD, 0x40, 0x06, 0xE6, 0xB8, 0x0E, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0xDE, 0x40, 0x52, 0x01, 0x95, 0x47, 0x40, 0x95, 0xB4, 0x40, 0x06, 0xE6, 0xB4, 0x0E, 0xE7, 0x7E, 0xF4
+		}
+	},
+	{
+		377,
+		79,
+		0x39,
+		0x01A4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA4, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0xC5, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0xD5, 0xA3
+		}
+	},
+	{
+		378,
+		79,
+		0x39,
+		0x01A5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA5, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x96, 0x86, 0x40, 0x77, 0x01, 0x77, 0x00, 0x96, 0x67, 0x40, 0xCF, 0x68, 0x96, 0x6A, 0x40, 0x81, 0x15, 0x62, 0xD0, 0x00, 0x94, 0x3E, 0x40, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x94, 0x13, 0x40, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0xFD, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0x51, 0x9C
+		}
+	},
+	{
+		379,
+		79,
+		0x39,
+		0x01A6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA6, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC3, 0x0E, 0xE9, 0x02, 0x94, 0xEF, 0x40, 0xA8, 0x4B
+		}
+	},
+	{
+		380,
+		79,
+		0x39,
+		0x01A7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA7, 0x06, 0xE6, 0xB0, 0x0E, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0x00, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0x20, 0x3C
+		}
+	},
+	{
+		381,
+		79,
+		0x39,
+		0x01A8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA8, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x02, 0x94, 0x96, 0x40, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x95, 0xA7, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0x6A, 0xD1
+		}
+	},
+	{
+		382,
+		79,
+		0x39,
+		0x01A9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA9, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x95, 0x68, 0x40, 0x77, 0x01, 0x77, 0x00, 0x97, 0x39, 0x40, 0xCE, 0xE8, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x0B, 0x14
+		}
+	},
+	{
+		383,
+		79,
+		0x39,
+		0x01AA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAA, 0x4F, 0x38, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x05, 0x96, 0x38, 0x40, 0x62, 0xD0, 0x00, 0x94, 0x6F, 0x40, 0x06, 0xE8, 0x5E, 0x0E, 0xE9, 0x0F, 0x94, 0xAA, 0x40, 0x54, 0x04, 0x56, 0x03, 0x00, 0x56, 0x01, 0x00, 0x97, 0xE4, 0x40, 0x80, 0xCB, 0xEE, 0xDB
+		}
+	},
+	{
+		384,
+		79,
+		0x39,
+		0x01AB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAB, 0x62, 0xD0, 0x03, 0x55, 0xDF, 0x00, 0x55, 0xDE, 0x00, 0x56, 0x06, 0x00, 0x80, 0x34, 0x96, 0xCE, 0x40, 0x52, 0x01, 0x94, 0x2B, 0x40, 0x10, 0x57, 0x03, 0x7C, 0x4A, 0xBC, 0x20, 0x03, 0x06, 0x54, 0x00, 0x92, 0xC2, 0x40, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xB1, 0x0E, 0xE9, 0x02, 0x92, 0x93, 0x40, 0x53, 0xE9, 0x3E, 0xE8, 0x62, 0xD0, 0x03, 0x04, 0xDF, 0x95, 0xA5, 0x40, 0x0C, 0xDE, 0x77, 0x92, 0x24
+		}
+	},
+	{
+		385,
+		79,
+		0x39,
+		0x01AC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAC, 0x06, 0x52, 0x06, 0x3B, 0x02, 0xCF, 0xC8, 0x95, 0x83, 0x40, 0xD0, 0x7A, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x08, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x53, 0xB1, 0x18, 0x53, 0xB2, 0x56, 0x06, 0x00, 0x80, 0x5F, 0x96, 0x7B, 0x40, 0x52, 0x01, 0x93, 0xD8, 0x40, 0x54, 0x00, 0x3D, 0xFC, 0x00, 0xB0, 0x42, 0x52, 0x00, 0x92, 0x56, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x92, 0x46, 0x40, 0x53, 0xE9, 0x64, 0xC9
+		}
+	},
+	{
+		386,
+		79,
+		0x39,
+		0x01AD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAD, 0x96, 0x67, 0x40, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x92, 0x9F, 0x40, 0x52, 0x00, 0x92, 0x3A, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x92, 0x2A, 0x40, 0x93, 0x85, 0x40, 0x52, 0x06, 0x93, 0x55, 0x40, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x92, 0x80, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0x80, 0x0D, 0x96, 0x2B, 0x40, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x94, 0x5D, 0x40, 0x77, 0x06, 0x52, 0x35, 0x6C
+		}
+	},
+	{
+		387,
+		79,
+		0x39,
+		0x01AE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAE, 0x06, 0x3B, 0x02, 0xCF, 0x9D, 0x77, 0x03, 0x07, 0x01, 0x03, 0x52, 0x03, 0x3B, 0x04, 0xCF, 0x31, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x01, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x01, 0x95, 0x23, 0x40, 0x62, 0xD0, 0x00, 0x93, 0x69, 0x40, 0x06, 0x99, 0x35
+		}
+	},
+	{
+		388,
+		79,
+		0x39,
+		0x01AF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAF, 0xE8, 0x5E, 0x0E, 0xE9, 0x0F, 0x93, 0x95, 0x40, 0x54, 0x04, 0x56, 0x03, 0x00, 0x56, 0x00, 0x00, 0x96, 0xCF, 0x40, 0x81, 0x6E, 0x62, 0xD0, 0x03, 0x55, 0xDF, 0x00, 0x55, 0xDE, 0x00, 0x56, 0x05, 0x00, 0x80, 0x39, 0x62, 0xD0, 0x00, 0x93, 0x30, 0x40, 0x52, 0x00, 0x93, 0x13, 0x40, 0x53, 0xE9, 0x10, 0x52, 0x05, 0x57, 0x03, 0x7C, 0x4A, 0xBC, 0x20, 0x02, 0xE9, 0x54, 0x06, 0x52, 0x06, 0x91, 0x39, 0x76
+		}
+	},
+	{
+		389,
+		79,
+		0x39,
+		0x01B0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB0, 0xD8, 0x40, 0x06, 0xE8, 0xB1, 0x0E, 0xE9, 0x02, 0x91, 0x79, 0x40, 0x53, 0xE9, 0x3E, 0xE8, 0x62, 0xD0, 0x03, 0x04, 0xDF, 0x94, 0x8B, 0x40, 0x0C, 0xDE, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x01, 0xCF, 0xC3, 0x94, 0x69, 0x40, 0xD1, 0x18, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x08, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x53, 0xB1, 0x18, 0x53, 0xB2, 0x56, 0x05, 0x00, 0x80, 0x2A, 0x3D, 0xFC, 0x00, 0xB0, 0x13, 0x78, 0xF5
+		}
+	},
+	{
+		390,
+		79,
+		0x39,
+		0x01B1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB1, 0x62, 0xD0, 0x00, 0x92, 0xD3, 0x40, 0x06, 0xE8, 0xD0, 0x93, 0x09, 0x40, 0x50, 0x00, 0x3F, 0xE8, 0x80, 0x11, 0x62, 0xD0, 0x00, 0x92, 0xC1, 0x40, 0x06, 0xE8, 0xFD, 0x93, 0x85, 0x40, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x02, 0xCF, 0xD2, 0x56, 0x05, 0x00, 0x80, 0x66, 0x62, 0xD0, 0x00, 0x92, 0xA4, 0x40, 0x52, 0x00, 0x92, 0x87, 0x40, 0x54, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x7F, 0x04
+		}
+	},
+	{
+		391,
+		79,
+		0x39,
+		0x01B2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB2, 0x42, 0x52, 0x05, 0x91, 0x05, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x90, 0xF5, 0x40, 0x53, 0xE9, 0x95, 0x16, 0x40, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x91, 0x4E, 0x40, 0x52, 0x05, 0x90, 0xE9, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x90, 0xD9, 0x40, 0x92, 0x34, 0x40, 0x52, 0x06, 0x92, 0x04, 0x40, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x91, 0x2F, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0xBE, 0x83
+		}
+	},
+	{
+		392,
+		79,
+		0x39,
+		0x01B3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB3, 0x80, 0x11, 0x62, 0xD0, 0x00, 0x92, 0x51, 0x40, 0x06, 0xE8, 0xFD, 0x93, 0x15, 0x40, 0x52, 0x06, 0x3F, 0xE8, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x01, 0xCF, 0x96, 0x3D, 0xFC, 0x00, 0xB0, 0x5F, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x04, 0x53, 0xE3, 0x56, 0x05, 0x00, 0x80, 0x4A, 0x62, 0xD0, 0x00, 0x92, 0x25, 0x40, 0x06, 0xE8, 0xD0, 0x0E, 0xE9, 0x03, 0x90, 0x87, 0x40, 0x39, 0x00, 0xB0, 0x0F, 0x26
+		}
+	},
+	{
+		393,
+		79,
+		0x39,
+		0x01B4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB4, 0x35, 0x92, 0x15, 0x40, 0x06, 0xE8, 0xD0, 0x92, 0x4B, 0x40, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x08, 0x7C, 0x66, 0x95, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x53, 0xE3, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x90, 0xAE, 0x40, 0x06, 0xE8, 0xD5, 0x92, 0xB1, 0x40, 0x95, 0x66, 0x40, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x02, 0xCF, 0xB2, 0x77, 0x03, 0x07, 0xE0, 0xC9
+		}
+	},
+	{
+		394,
+		79,
+		0x39,
+		0x01B5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB5, 0x00, 0x03, 0x52, 0x03, 0x3B, 0x04, 0xCE, 0x8E, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x62, 0xD0, 0x04, 0x53, 0xE2, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x92, 0x68, 0x40, 0x48, 0xFC, 0x01, 0xA0, 0x09, 0x52, 0xFB, 0x05, 0x01, 0x52, 0xFA, 0x0D, 0x00, 0x66, 0xFB, 0x6C, 0xFA, 0x70, 0xFB, 0x6F, 0xFC, 0x3D, 0xFC, 0x00, 0xBF, 0xE7, 0x93, 0xB0, 0x40, 0x38, 0x30, 0x6A
+		}
+	},
+	{
+		395,
+		79,
+		0x39,
+		0x01B6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB6, 0xFE, 0x20, 0x7F, 0x51, 0xE9, 0x60, 0xD4, 0x3E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x52, 0x00, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0x4E, 0xA7
+		}
+	},
+	{
+		396,
+		79,
+		0x39,
+		0x01B7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB7, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x60, 0xD4, 0x3E, 0xE8, 0x53, 0xE9, 0x7F, 0x51, 0xE7, 0x60, 0xD5, 0x51, 0xE9, 0x3F, 0xE6, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0x4C, 0xA4
+		}
+	},
+	{
+		397,
+		79,
+		0x39,
+		0x01B8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB8, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x02, 0x7F, 0x53, 0xE8, 0xA8, 0x5D
+		}
+	},
+	{
+		398,
+		79,
+		0x39,
+		0x01B9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB9, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCD, 0x0E, 0xE9, 0x80, 0x0E
+		}
+	},
+	{
+		399,
+		79,
+		0x39,
+		0x01BA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBA, 0x02, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xEC, 0x18, 0x53, 0xED, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xEA, 0x18, 0x53, 0xEB, 0x7F, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x60, 0xD4, 0x3E, 0xE8, 0x7F, 0x52, 0x00, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x65, 0xE6, 0x6B, 0xE7, 0x7F, 0x2B, 0x65
+		}
+	},
+	{
+		400,
+		79,
+		0x39,
+		0x01BB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBB, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xE8, 0x18, 0x53, 0xE9, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xE6, 0x18, 0x53, 0xE7, 0x7F, 0x53, 0xE9, 0x3E, 0xE8, 0x53, 0xE8, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x01, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x51, 0xAF, 0x29, 0x08, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x67, 0xDE
+		}
+	},
+	{
+		401,
+		79,
+		0x39,
+		0x01BC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBC, 0x02, 0xE8, 0x53, 0xE8, 0x50, 0x00, 0x0A, 0xE9, 0x53, 0xE9, 0x06, 0xE8, 0x62, 0x0E, 0xE9, 0x0F, 0x51, 0xE9, 0x10, 0x58, 0xE8, 0x28, 0x20, 0x7F, 0x52, 0x05, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x52, 0x02, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0x70, 0x54, 0x00, 0x3D, 0x00, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x11, 0x33
+		}
+	},
+	{
+		402,
+		79,
+		0x39,
+		0x01BD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBD, 0x99, 0x21, 0x70, 0x7F, 0x52, 0x04, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x53, 0xE8, 0x7F, 0x0E, 0xE9, 0x03, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x51, 0xE9, 0x10, 0x58, 0xE8, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x26, 0xAF, 0xF7, 0x51, 0xAF, 0x60, 0x04, 0x26, 0xAF, 0xFE, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x67, 0xE0
+		}
+	},
+	{
+		403,
+		79,
+		0x39,
+		0x01BE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBE, 0x04, 0x53, 0x9D, 0x18, 0x53, 0x9E, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF7, 0x08, 0x51, 0xF6, 0x62, 0xD0, 0x03, 0x53, 0xBC, 0x18, 0x53, 0xBD, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x08, 0x51, 0xF4, 0x62, 0xD0, 0x03, 0x53, 0xBE, 0x18, 0x53, 0xBF, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x9F, 0x18, 0x53, 0xA0, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x53, 0xE8, 0x85, 0x1D
+		}
+	},
+	{
+		404,
+		79,
+		0x39,
+		0x01BF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBF, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x3A, 0x0E, 0x7F, 0x56, 0x01, 0x00, 0x56, 0x00, 0x00, 0x7F, 0x51, 0xE9, 0x60, 0xD5, 0x51, 0xE7, 0x3F, 0xE8, 0x7F, 0x51, 0xE9, 0x60, 0xD5, 0x52, 0x00, 0x3F, 0xE8, 0x7F, 0x0E, 0xE9, 0x02, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x62, 0xD0, 0x03, 0x52, 0x01, 0x53, 0xE5, 0x52, 0x00, 0x53, 0xE4, 0x7F, 0x52, 0x02, 0x53, 0xE8, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE8, 0x95, 0x3E
+		}
+	},
+	{
+		405,
+		79,
+		0x39,
+		0x01C0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC0, 0x7F, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x65, 0xE8, 0x6B, 0xE9, 0x51, 0xE8, 0x7F, 0x65, 0xE6, 0x6B, 0xE7, 0x65, 0xE6, 0x6B, 0xE7, 0x7F, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD4, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x7F, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD9, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x2E, 0x71
+		}
+	},
+	{
+		406,
+		79,
+		0x39,
+		0x01C1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC1, 0x7F, 0x62, 0xD0, 0x03, 0x55, 0xEF, 0x00, 0x55, 0xEE, 0x00, 0x7F, 0x56, 0x01, 0x00, 0x80, 0x03, 0x77, 0x01, 0x3D, 0x01, 0x0A, 0xCF, 0xFA, 0x62, 0xD0, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x89, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xAA, 0x62, 0xD0, 0x00, 0x12, 0xA0, 0x56
+		}
+	},
+	{
+		407,
+		79,
+		0x39,
+		0x01C2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC2, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xA9, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x62, 0xD0, 0x04, 0x12, 0xB2, 0x62, 0xD0, 0x03, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x1A, 0xB1, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x7F, 0x70, 0xFE, 0x62, 0xD0, 0x03, 0x51, 0xD9, 0x08, 0x51, 0xD8, 0x62, 0xD0, 0x00, 0x53, 0x0A, 0x18, 0x53, 0x0B, 0x71, 0x01, 0x7F, 0x53, 0x76, 0x03
+		}
+	},
+	{
+		408,
+		79,
+		0x39,
+		0x01C3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC3, 0xE6, 0x55, 0xE7, 0x00, 0x51, 0xE8, 0x12, 0xE6, 0x51, 0xE9, 0x1A, 0xE7, 0x7F, 0x60, 0xD4, 0x3E, 0xE8, 0x54, 0x03, 0x7F, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0xFC, 0x62, 0xD0, 0x00, 0x54, 0x02, 0x7F, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x7F, 0x52, 0xFB, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x5D, 0xC8, 0x62, 0xD0, 0x00, 0x39, 0x00, 0x7F, 0x80, 0x18
+		}
+	},
+	{
+		409,
+		79,
+		0x39,
+		0x01C4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC4, 0x52, 0x03, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x0E, 0xE9, 0x01, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x71, 0x10, 0x5D, 0xE0, 0x54, 0x01, 0x41, 0xE0, 0xE7, 0x43, 0xE0, 0x18, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x7F, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xAD, 0x18, 0x53, 0xAE, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x53, 0xE8, 0x52, 0x00, 0x53, 0xE9, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA8, 0x08, 0xD8, 0xC9
+		}
+	},
+	{
+		410,
+		79,
+		0x39,
+		0x01C5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC5, 0x51, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x7F, 0x52, 0x05, 0x54, 0x03, 0x52, 0x02, 0x54, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0x03, 0x53, 0xD4, 0x18, 0x53, 0xD5, 0x7F, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x02, 0x43, 0xD6, 0x01, 0x52, 0xFC, 0x7F, 0x53, 0xE8, 0x52, 0xFB, 0x09, 0x00, 0x60, 0xD5, 0x7F, 0x62, 0xD2, 0xBE
+		}
+	},
+	{
+		411,
+		79,
+		0x39,
+		0x01C6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC6, 0xD0, 0x00, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x7F, 0x62, 0xD0, 0x04, 0x53, 0xCE, 0x62, 0xD0, 0x04, 0x51, 0xE0, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x62, 0xD0, 0x00, 0x3A, 0x0E, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x52, 0x06, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xE1, 0x7F, 0x52, 0x02, 0x03, 0x02, 0x54, 0x00, 0x07, 0x01, 0x1D
+		}
+	},
+	{
+		412,
+		79,
+		0x39,
+		0x01C7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC7, 0x00, 0x2E, 0x7F, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA1, 0x18, 0x53, 0xA2, 0x7F, 0x12, 0xE8, 0x50, 0x00, 0x1A, 0xE9, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x62, 0xD0, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x7F, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0A, 0x51, 0xE9, 0x54, 0x09, 0x45, 0xA6
+		}
+	},
+	{
+		413,
+		79,
+		0x39,
+		0x01C8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC8, 0x7F, 0x08, 0x57, 0x98, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x7F, 0x08, 0x57, 0x91, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0x0B, 0x01, 0x55, 0x0A, 0x00, 0x71, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x01, 0x51, 0xE9, 0x54, 0x00, 0x7F, 0x52, 0x04, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x88, 0x28, 0x20, 0x36, 0x89
+		}
+	},
+	{
+		414,
+		79,
+		0x39,
+		0x01C9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC9, 0x7F, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x51, 0xE8, 0x7F, 0x51, 0xAF, 0x60, 0x04, 0x62, 0xD0, 0x00, 0x7F, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD7, 0x56, 0xFC, 0x00, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x87, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA8, 0x62, 0xD0, 0x03, 0x12, 0xEF, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA7, 0x62, 0xF2, 0x02
+		}
+	},
+	{
+		415,
+		79,
+		0x39,
+		0x01CA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCA, 0xD0, 0x03, 0x1A, 0xEE, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD1, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD6, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x70, 0x7F, 0x50, 0x00, 0x3F, 0xE8, 0x3F, 0xE8, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xB2, 0xFF, 0x55, 0xB1, 0xFF, 0x7F, 0x56, 0x01, 0x00, 0x56, 0x02, 0x00, 0x7F, 0x71, 0x10, 0x60, 0xE0, 0x70, 0xCF, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x62, 0xD0, 0x51, 0xC1
+		}
+	},
+	{
+		416,
+		79,
+		0x39,
+		0x01CB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCB, 0x03, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF7, 0x62, 0xD0, 0x03, 0x7F, 0x71, 0x10, 0x43, 0xD7, 0x20, 0x43, 0xE0, 0x40, 0x7F, 0x52, 0xFA, 0x13, 0xF6, 0x52, 0xF9, 0x1B, 0xF5, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x62, 0xD0, 0x04, 0x7F, 0x3F, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xB6, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x86, 0x28, 0x20, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x9A, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x10, 0x40
+		}
+	},
+	{
+		417,
+		79,
+		0x39,
+		0x01CC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCC, 0x00, 0x52, 0x00, 0x13, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x13, 0x00, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x9B, 0x28, 0x20, 0x7F, 0x60, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xB2, 0x7F, 0x60, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xB1, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x21, 0x0F, 0x7F, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x04, 0x7F, 0x62, 0xD0, 0x04, 0x3C, 0xCD, 0x02, 0x7F, 0x06, 0xE8, 0x01, 0x0E, 0x82, 0x25
+		}
+	},
+	{
+		418,
+		79,
+		0x39,
+		0x01CD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCD, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCC, 0x00, 0x7F, 0x13, 0x03, 0x51, 0xE9, 0x1B, 0x02, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD7, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD9, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD5, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x01, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF4, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xB4, 0x00, 0x7F, 0x41, 0xD7, 0xDF, 0x41, 0xE0, 0x54, 0xCA
+		}
+	},
+	{
+		419,
+		79,
+		0x39,
+		0x01CE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCE, 0xBF, 0x7F, 0x41, 0xE0, 0xEF, 0x62, 0xDA, 0xEF, 0x7F, 0x41, 0xE0, 0x7F, 0x62, 0xDA, 0x7F, 0x7F, 0x62, 0xD0, 0x00, 0x3C, 0xBA, 0x00, 0x7F, 0x00, 0xBF, 0x00, 0x20, 0x00, 0xEA, 0x00, 0x06, 0x01, 0x00, 0x00, 0xA0, 0x02, 0xB1, 0x00, 0x4F, 0x03, 0x99, 0x00, 0x47, 0x03, 0xE0, 0x01, 0x0D, 0x03, 0xE1, 0x00, 0x1F, 0x04, 0x99, 0x00, 0x49, 0x04, 0xE2, 0x02, 0x01, 0x00, 0xFF, 0x00, 0x30, 0x30, 0xF0, 0x03
+		}
+	},
+	{
+		420,
+		79,
+		0x39,
+		0x01CF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x24
+		}
+	},
+	{
+		421,
+		79,
+		0x39,
+		0x01D0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x25
+		}
+	},
+	{
+		422,
+		79,
+		0x39,
+		0x01D1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x26
+		}
+	},
+	{
+		423,
+		79,
+		0x39,
+		0x01D2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x27
+		}
+	},
+	{
+		424,
+		79,
+		0x39,
+		0x01D3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x28
+		}
+	},
+	{
+		425,
+		79,
+		0x39,
+		0x01D4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x29
+		}
+	},
+	{
+		426,
+		79,
+		0x39,
+		0x01D5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2A
+		}
+	},
+	{
+		427,
+		79,
+		0x39,
+		0x01D6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2B
+		}
+	},
+	{
+		428,
+		79,
+		0x39,
+		0x01D7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2C
+		}
+	},
+	{
+		429,
+		79,
+		0x39,
+		0x01D8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2D
+		}
+	},
+	{
+		430,
+		79,
+		0x39,
+		0x01D9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2E
+		}
+	},
+	{
+		431,
+		79,
+		0x39,
+		0x01DA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2F
+		}
+	},
+	{
+		432,
+		79,
+		0x39,
+		0x01DB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x30
+		}
+	},
+	{
+		433,
+		79,
+		0x39,
+		0x01DC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x31
+		}
+	},
+	{
+		434,
+		79,
+		0x39,
+		0x01DD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDD, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x32
+		}
+	},
+	{
+		435,
+		79,
+		0x39,
+		0x01DE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x33
+		}
+	},
+	{
+		436,
+		79,
+		0x39,
+		0x01DF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x34
+		}
+	},
+	{
+		437,
+		79,
+		0x39,
+		0x01E0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x35
+		}
+	},
+	{
+		438,
+		79,
+		0x39,
+		0x01E1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x36
+		}
+	},
+	{
+		439,
+		79,
+		0x39,
+		0x01E2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x37
+		}
+	},
+	{
+		440,
+		79,
+		0x39,
+		0x01E3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x38
+		}
+	},
+	{
+		441,
+		79,
+		0x39,
+		0x01E4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x39
+		}
+	},
+	{
+		442,
+		79,
+		0x39,
+		0x01E5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3A
+		}
+	},
+	{
+		443,
+		79,
+		0x39,
+		0x01E6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3B
+		}
+	},
+	{
+		444,
+		79,
+		0x39,
+		0x01E7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3C
+		}
+	},
+	{
+		445,
+		79,
+		0x39,
+		0x01E8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3D
+		}
+	},
+	{
+		446,
+		79,
+		0x39,
+		0x01E9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3E
+		}
+	},
+	{
+		447,
+		79,
+		0x39,
+		0x01EA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3F
+		}
+	},
+	{
+		448,
+		79,
+		0x39,
+		0x01EB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x40
+		}
+	},
+	{
+		449,
+		79,
+		0x39,
+		0x01EC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x41
+		}
+	},
+	{
+		450,
+		79,
+		0x39,
+		0x01ED,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xED, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x42
+		}
+	},
+	{
+		451,
+		79,
+		0x39,
+		0x01EE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x43
+		}
+	},
+	{
+		452,
+		79,
+		0x39,
+		0x01EF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x44
+		}
+	},
+	{
+		453,
+		79,
+		0x39,
+		0x01F0,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x45
+		}
+	},
+	{
+		454,
+		79,
+		0x39,
+		0x01F1,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x46
+		}
+	},
+	{
+		455,
+		79,
+		0x39,
+		0x01F2,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x47
+		}
+	},
+	{
+		456,
+		79,
+		0x39,
+		0x01F3,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x48
+		}
+	},
+	{
+		457,
+		79,
+		0x39,
+		0x01F4,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x49
+		}
+	},
+	{
+		458,
+		79,
+		0x39,
+		0x01F5,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4A
+		}
+	},
+	{
+		459,
+		79,
+		0x39,
+		0x01F6,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4B
+		}
+	},
+	{
+		460,
+		79,
+		0x39,
+		0x01F7,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4C
+		}
+	},
+	{
+		461,
+		79,
+		0x39,
+		0x01F8,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4D
+		}
+	},
+	{
+		462,
+		79,
+		0x39,
+		0x01F9,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4E
+		}
+	},
+	{
+		463,
+		79,
+		0x39,
+		0x01FA,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4F
+		}
+	},
+	{
+		464,
+		79,
+		0x39,
+		0x01FB,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x50
+		}
+	},
+	{
+		465,
+		79,
+		0x39,
+		0x01FC,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x51
+		}
+	},
+	{
+		466,
+		79,
+		0x39,
+		0x01FD,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFD, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x52
+		}
+	},
+	{
+		467,
+		79,
+		0x39,
+		0x01FE,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x53
+		}
+	},
+	{
+		468,
+		79,
+		0x39,
+		0x01FF,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x54
+		}
+	},
+	{
+		469,
+		79,
+		0x39,
+		0x001E,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x1E, 0x19, 0xE5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x01, 0x0B, 0x10, 0x12, 0xA0, 0x02, 0x04, 0x00, 0xC0, 0xC1, 0xC2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xA5, 0xBC
+		}
+	},
+	{
+		470,
+		79,
+		0x39,
+		0x001F,
+		{
+			0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x1F, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xA0, 0x07, 0x5F, 0xF8, 0x3E, 0xEF
+		}
+	},
+	{
+		471,
+		11,
+		0x3B,
+		-1,
+		{
+			0x00, 0xFF, 0x3B, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+		}
+	},
+};
+
+unsigned short cyttsp_fw_records = 472;
+
+unsigned char cyttsp_fw_tts_verh = 0x10;
+unsigned char cyttsp_fw_tts_verl = 0x12;
+unsigned char cyttsp_fw_app_idh = 0xA0;
+unsigned char cyttsp_fw_app_idl = 0x02;
+unsigned char cyttsp_fw_app_verh = 0x04;
+unsigned char cyttsp_fw_app_verl = 0x00;
+unsigned char cyttsp_fw_cid_0 = 0xC0;
+unsigned char cyttsp_fw_cid_1 = 0xC1;
+unsigned char cyttsp_fw_cid_2 = 0xC2;
diff --git a/drivers/input/touchscreen/msm_touch.c b/drivers/input/touchscreen/msm_touch.c
new file mode 100644
index 0000000..7ba896a
--- /dev/null
+++ b/drivers/input/touchscreen/msm_touch.c
@@ -0,0 +1,317 @@
+/* drivers/input/touchscreen/msm_touch.c
+ *
+ * Copyright (c) 2008-2009, 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <mach/msm_touch.h>
+
+/* HW register map */
+#define TSSC_CTL_REG      0x100
+#define TSSC_SI_REG       0x108
+#define TSSC_OPN_REG      0x104
+#define TSSC_STATUS_REG   0x10C
+#define TSSC_AVG12_REG    0x110
+
+/* status bits */
+#define TSSC_STS_OPN_SHIFT 0x6
+#define TSSC_STS_OPN_BMSK  0x1C0
+#define TSSC_STS_NUMSAMP_SHFT 0x1
+#define TSSC_STS_NUMSAMP_BMSK 0x3E
+
+/* CTL bits */
+#define TSSC_CTL_EN		(0x1 << 0)
+#define TSSC_CTL_SW_RESET	(0x1 << 2)
+#define TSSC_CTL_MASTER_MODE	(0x3 << 3)
+#define TSSC_CTL_AVG_EN		(0x1 << 5)
+#define TSSC_CTL_DEB_EN		(0x1 << 6)
+#define TSSC_CTL_DEB_12_MS	(0x2 << 7)	/* 1.2 ms */
+#define TSSC_CTL_DEB_16_MS	(0x3 << 7)	/* 1.6 ms */
+#define TSSC_CTL_DEB_2_MS	(0x4 << 7)	/* 2 ms */
+#define TSSC_CTL_DEB_3_MS	(0x5 << 7)	/* 3 ms */
+#define TSSC_CTL_DEB_4_MS	(0x6 << 7)	/* 4 ms */
+#define TSSC_CTL_DEB_6_MS	(0x7 << 7)	/* 6 ms */
+#define TSSC_CTL_INTR_FLAG1	(0x1 << 10)
+#define TSSC_CTL_DATA		(0x1 << 11)
+#define TSSC_CTL_SSBI_CTRL_EN	(0x1 << 13)
+
+/* control reg's default state */
+#define TSSC_CTL_STATE	  ( \
+		TSSC_CTL_DEB_12_MS | \
+		TSSC_CTL_DEB_EN | \
+		TSSC_CTL_AVG_EN | \
+		TSSC_CTL_MASTER_MODE | \
+		TSSC_CTL_EN)
+
+#define TSSC_NUMBER_OF_OPERATIONS 2
+#define TS_PENUP_TIMEOUT_MS 20
+
+#define TS_DRIVER_NAME "msm_touchscreen"
+
+#define X_MAX	1024
+#define Y_MAX	1024
+#define P_MAX	256
+
+struct ts {
+	struct input_dev *input;
+	struct timer_list timer;
+	int irq;
+	unsigned int x_max;
+	unsigned int y_max;
+};
+
+static void __iomem *virt;
+#define TSSC_REG(reg) (virt + TSSC_##reg##_REG)
+
+static void ts_update_pen_state(struct ts *ts, int x, int y, int pressure)
+{
+	if (pressure) {
+		input_report_abs(ts->input, ABS_X, x);
+		input_report_abs(ts->input, ABS_Y, y);
+		input_report_abs(ts->input, ABS_PRESSURE, pressure);
+		input_report_key(ts->input, BTN_TOUCH, !!pressure);
+	} else {
+		input_report_abs(ts->input, ABS_PRESSURE, 0);
+		input_report_key(ts->input, BTN_TOUCH, 0);
+	}
+
+	input_sync(ts->input);
+}
+
+static void ts_timer(unsigned long arg)
+{
+	struct ts *ts = (struct ts *)arg;
+
+	ts_update_pen_state(ts, 0, 0, 0);
+}
+
+static irqreturn_t ts_interrupt(int irq, void *dev_id)
+{
+	u32 avgs, x, y, lx, ly;
+	u32 num_op, num_samp;
+	u32 status;
+
+	struct ts *ts = dev_id;
+
+	status = readl_relaxed(TSSC_REG(STATUS));
+	avgs = readl_relaxed(TSSC_REG(AVG12));
+	x = avgs & 0xFFFF;
+	y = avgs >> 16;
+
+	/* For pen down make sure that the data just read is still valid.
+	 * The DATA bit will still be set if the ARM9 hasn't clobbered
+	 * the TSSC. If it's not set, then it doesn't need to be cleared
+	 * here, so just return.
+	 */
+	if (!(readl_relaxed(TSSC_REG(CTL)) & TSSC_CTL_DATA))
+		goto out;
+
+	/* Data has been read, OK to clear the data flag */
+	writel_relaxed(TSSC_CTL_STATE, TSSC_REG(CTL));
+	/* barrier: Write to complete before the next sample */
+	mb();
+	/* Valid samples are indicated by the sample number in the status
+	 * register being the number of expected samples and the number of
+	 * samples collected being zero (this check is due to ADC contention).
+	 */
+	num_op = (status & TSSC_STS_OPN_BMSK) >> TSSC_STS_OPN_SHIFT;
+	num_samp = (status & TSSC_STS_NUMSAMP_BMSK) >> TSSC_STS_NUMSAMP_SHFT;
+
+	if ((num_op == TSSC_NUMBER_OF_OPERATIONS) && (num_samp == 0)) {
+		/* TSSC can do Z axis measurment, but driver doesn't support
+		 * this yet.
+		 */
+
+		/*
+		 * REMOVE THIS:
+		 * These x, y co-ordinates adjustments will be removed once
+		 * Android framework adds calibration framework.
+		 */
+#ifdef CONFIG_ANDROID_TOUCHSCREEN_MSM_HACKS
+		lx = ts->x_max - x;
+		ly = ts->y_max - y;
+#else
+		lx = x;
+		ly = y;
+#endif
+		ts_update_pen_state(ts, lx, ly, 255);
+		/* kick pen up timer - to make sure it expires again(!) */
+		mod_timer(&ts->timer,
+			jiffies + msecs_to_jiffies(TS_PENUP_TIMEOUT_MS));
+
+	} else
+		printk(KERN_INFO "Ignored interrupt: {%3d, %3d},"
+				" op = %3d samp = %3d\n",
+				 x, y, num_op, num_samp);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static int __devinit ts_probe(struct platform_device *pdev)
+{
+	int result;
+	struct input_dev *input_dev;
+	struct resource *res, *ioarea;
+	struct ts *ts;
+	unsigned int x_max, y_max, pressure_max;
+	struct msm_ts_platform_data *pdata = pdev->dev.platform_data;
+
+	/* The primary initialization of the TS Hardware
+	 * is taken care of by the ADC code on the modem side
+	 */
+
+	ts = kzalloc(sizeof(struct ts), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!input_dev || !ts) {
+		result = -ENOMEM;
+		goto fail_alloc_mem;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		result = -ENOENT;
+		goto fail_alloc_mem;
+	}
+
+	ts->irq = platform_get_irq(pdev, 0);
+	if (!ts->irq) {
+		dev_err(&pdev->dev, "Could not get IORESOURCE_IRQ\n");
+		result = -ENODEV;
+		goto fail_alloc_mem;
+	}
+
+	ioarea = request_mem_region(res->start, resource_size(res), pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "Could not allocate io region\n");
+		result = -EBUSY;
+		goto fail_alloc_mem;
+	}
+
+	virt = ioremap(res->start, resource_size(res));
+	if (!virt) {
+		dev_err(&pdev->dev, "Could not ioremap region\n");
+		result = -ENOMEM;
+		goto fail_ioremap;
+	}
+
+	input_dev->name = TS_DRIVER_NAME;
+	input_dev->phys = "msm_touch/input0";
+	input_dev->id.bustype = BUS_HOST;
+	input_dev->id.vendor = 0x0001;
+	input_dev->id.product = 0x0002;
+	input_dev->id.version = 0x0100;
+	input_dev->dev.parent = &pdev->dev;
+
+	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	input_dev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE);
+	input_dev->absbit[BIT_WORD(ABS_MISC)] = BIT_MASK(ABS_MISC);
+	input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	if (pdata) {
+		x_max = pdata->x_max ? : X_MAX;
+		y_max = pdata->y_max ? : Y_MAX;
+		pressure_max = pdata->pressure_max ? : P_MAX;
+	} else {
+		x_max = X_MAX;
+		y_max = Y_MAX;
+		pressure_max = P_MAX;
+	}
+
+	ts->x_max = x_max;
+	ts->y_max = y_max;
+
+	input_set_abs_params(input_dev, ABS_X, 0, x_max, 0, 0);
+	input_set_abs_params(input_dev, ABS_Y, 0, y_max, 0, 0);
+	input_set_abs_params(input_dev, ABS_PRESSURE, 0, pressure_max, 0, 0);
+
+	result = input_register_device(input_dev);
+	if (result)
+		goto fail_ip_reg;
+
+	ts->input = input_dev;
+
+	setup_timer(&ts->timer, ts_timer, (unsigned long)ts);
+	result = request_irq(ts->irq, ts_interrupt, IRQF_TRIGGER_RISING,
+				 "touchscreen", ts);
+	if (result)
+		goto fail_req_irq;
+
+	platform_set_drvdata(pdev, ts);
+
+	return 0;
+
+fail_req_irq:
+	input_unregister_device(input_dev);
+	input_dev = NULL;
+fail_ip_reg:
+	iounmap(virt);
+fail_ioremap:
+	release_mem_region(res->start, resource_size(res));
+fail_alloc_mem:
+	input_free_device(input_dev);
+	kfree(ts);
+	return result;
+}
+
+static int __devexit ts_remove(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct ts *ts = platform_get_drvdata(pdev);
+
+	free_irq(ts->irq, ts);
+	del_timer_sync(&ts->timer);
+
+	input_unregister_device(ts->input);
+	iounmap(virt);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+	platform_set_drvdata(pdev, NULL);
+	kfree(ts);
+
+	return 0;
+}
+
+static struct platform_driver ts_driver = {
+	.probe		= ts_probe,
+	.remove		= __devexit_p(ts_remove),
+	.driver		= {
+		.name = TS_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ts_init(void)
+{
+	return platform_driver_register(&ts_driver);
+}
+module_init(ts_init);
+
+static void __exit ts_exit(void)
+{
+	platform_driver_unregister(&ts_driver);
+}
+module_exit(ts_exit);
+
+MODULE_DESCRIPTION("MSM Touch Screen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msm_touchscreen");
diff --git a/drivers/input/touchscreen/msm_ts.c b/drivers/input/touchscreen/msm_ts.c
new file mode 100644
index 0000000..122b45d
--- /dev/null
+++ b/drivers/input/touchscreen/msm_ts.c
@@ -0,0 +1,513 @@
+/* drivers/input/touchscreen/msm_ts.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * TODO:
+ *      - Add a timer to simulate a pen_up in case there's a timeout.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/marimba-tsadc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+
+#include <linux/input/msm_ts.h>
+
+#define TSSC_CTL			0x100
+#define 	TSSC_CTL_PENUP_IRQ	(1 << 12)
+#define 	TSSC_CTL_DATA_FLAG	(1 << 11)
+#define 	TSSC_CTL_DEBOUNCE_EN	(1 << 6)
+#define 	TSSC_CTL_EN_AVERAGE	(1 << 5)
+#define 	TSSC_CTL_MODE_MASTER	(3 << 3)
+#define 	TSSC_CTL_SW_RESET	(1 << 2)
+#define 	TSSC_CTL_ENABLE		(1 << 0)
+#define TSSC_OPN			0x104
+#define 	TSSC_OPN_NOOP		0x00
+#define 	TSSC_OPN_4WIRE_X	0x01
+#define 	TSSC_OPN_4WIRE_Y	0x02
+#define 	TSSC_OPN_4WIRE_Z1	0x03
+#define 	TSSC_OPN_4WIRE_Z2	0x04
+#define TSSC_SAMPLING_INT		0x108
+#define TSSC_STATUS			0x10c
+#define TSSC_AVG_12			0x110
+#define TSSC_AVG_34			0x114
+#define TSSC_SAMPLE(op,samp)		((0x118 + ((op & 0x3) * 0x20)) + \
+					 ((samp & 0x7) * 0x4))
+#define TSSC_TEST_1			0x198
+	#define TSSC_TEST_1_EN_GATE_DEBOUNCE (1 << 2)
+#define TSSC_TEST_2			0x19c
+
+struct msm_ts {
+	struct msm_ts_platform_data	*pdata;
+	struct input_dev		*input_dev;
+	void __iomem			*tssc_base;
+	uint32_t			ts_down:1;
+	struct ts_virt_key		*vkey_down;
+	struct marimba_tsadc_client	*ts_client;
+
+	unsigned int			sample_irq;
+	unsigned int			pen_up_irq;
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend		early_suspend;
+#endif
+	struct device			*dev;
+};
+
+static uint32_t msm_tsdebug;
+module_param_named(tsdebug, msm_tsdebug, uint, 0664);
+
+#define tssc_readl(t, a)	(readl_relaxed(((t)->tssc_base) + (a)))
+#define tssc_writel(t, v, a)	do {writel_relaxed(v, \
+					((t)->tssc_base) + (a)); } \
+					while (0)
+
+static void setup_next_sample(struct msm_ts *ts)
+{
+	uint32_t tmp;
+
+	/* 1.2ms debounce time */
+	tmp = ((2 << 7) | TSSC_CTL_DEBOUNCE_EN | TSSC_CTL_EN_AVERAGE |
+	       TSSC_CTL_MODE_MASTER | TSSC_CTL_ENABLE);
+	tssc_writel(ts, tmp, TSSC_CTL);
+	/* barrier: Make sure the write completes before the next sample */
+	mb();
+}
+
+static struct ts_virt_key *find_virt_key(struct msm_ts *ts,
+					 struct msm_ts_virtual_keys *vkeys,
+					 uint32_t val)
+{
+	int i;
+
+	if (!vkeys)
+		return NULL;
+
+	for (i = 0; i < vkeys->num_keys; ++i)
+		if ((val >= vkeys->keys[i].min) && (val <= vkeys->keys[i].max))
+			return &vkeys->keys[i];
+	return NULL;
+}
+
+
+static irqreturn_t msm_ts_irq(int irq, void *dev_id)
+{
+	struct msm_ts *ts = dev_id;
+	struct msm_ts_platform_data *pdata = ts->pdata;
+
+	uint32_t tssc_avg12, tssc_avg34, tssc_status, tssc_ctl;
+	int x, y, z1, z2;
+	int was_down;
+	int down;
+
+	tssc_ctl = tssc_readl(ts, TSSC_CTL);
+	tssc_status = tssc_readl(ts, TSSC_STATUS);
+	tssc_avg12 = tssc_readl(ts, TSSC_AVG_12);
+	tssc_avg34 = tssc_readl(ts, TSSC_AVG_34);
+
+	setup_next_sample(ts);
+
+	x = tssc_avg12 & 0xffff;
+	y = tssc_avg12 >> 16;
+	z1 = tssc_avg34 & 0xffff;
+	z2 = tssc_avg34 >> 16;
+
+	/* invert the inputs if necessary */
+	if (pdata->inv_x) x = pdata->inv_x - x;
+	if (pdata->inv_y) y = pdata->inv_y - y;
+	if (x < 0) x = 0;
+	if (y < 0) y = 0;
+
+	down = !(tssc_ctl & TSSC_CTL_PENUP_IRQ);
+	was_down = ts->ts_down;
+	ts->ts_down = down;
+
+	/* no valid data */
+	if (down && !(tssc_ctl & TSSC_CTL_DATA_FLAG))
+		return IRQ_HANDLED;
+
+	if (msm_tsdebug & 2)
+		printk("%s: down=%d, x=%d, y=%d, z1=%d, z2=%d, status %x\n",
+		       __func__, down, x, y, z1, z2, tssc_status);
+
+	if (!was_down && down) {
+		struct ts_virt_key *vkey = NULL;
+
+		if (pdata->vkeys_y && (y > pdata->virt_y_start))
+			vkey = find_virt_key(ts, pdata->vkeys_y, x);
+		if (!vkey && ts->pdata->vkeys_x && (x > pdata->virt_x_start))
+			vkey = find_virt_key(ts, pdata->vkeys_x, y);
+
+		if (vkey) {
+			WARN_ON(ts->vkey_down != NULL);
+			if(msm_tsdebug)
+				printk("%s: virtual key down %d\n", __func__,
+				       vkey->key);
+			ts->vkey_down = vkey;
+			input_report_key(ts->input_dev, vkey->key, 1);
+			input_sync(ts->input_dev);
+			return IRQ_HANDLED;
+		}
+	} else if (ts->vkey_down != NULL) {
+		if (!down) {
+			if(msm_tsdebug)
+				printk("%s: virtual key up %d\n", __func__,
+				       ts->vkey_down->key);
+			input_report_key(ts->input_dev, ts->vkey_down->key, 0);
+			input_sync(ts->input_dev);
+			ts->vkey_down = NULL;
+		}
+		return IRQ_HANDLED;
+	}
+
+	if (down) {
+		input_report_abs(ts->input_dev, ABS_X, x);
+		input_report_abs(ts->input_dev, ABS_Y, y);
+		input_report_abs(ts->input_dev, ABS_PRESSURE, z1);
+	}
+	input_report_key(ts->input_dev, BTN_TOUCH, down);
+	input_sync(ts->input_dev);
+
+	return IRQ_HANDLED;
+}
+
+static void dump_tssc_regs(struct msm_ts *ts)
+{
+#define __dump_tssc_reg(r) \
+		do { printk(#r " %x\n", tssc_readl(ts, (r))); } while(0)
+
+	__dump_tssc_reg(TSSC_CTL);
+	__dump_tssc_reg(TSSC_OPN);
+	__dump_tssc_reg(TSSC_SAMPLING_INT);
+	__dump_tssc_reg(TSSC_STATUS);
+	__dump_tssc_reg(TSSC_AVG_12);
+	__dump_tssc_reg(TSSC_AVG_34);
+	__dump_tssc_reg(TSSC_TEST_1);
+#undef __dump_tssc_reg
+}
+
+static int __devinit msm_ts_hw_init(struct msm_ts *ts)
+{
+	uint32_t tmp;
+
+	/* Enable the register clock to tssc so we can configure it. */
+	tssc_writel(ts, TSSC_CTL_ENABLE, TSSC_CTL);
+	/* Enable software reset*/
+	tssc_writel(ts, TSSC_CTL_SW_RESET, TSSC_CTL);
+
+	/* op1 - measure X, 1 sample, 12bit resolution */
+	tmp = (TSSC_OPN_4WIRE_X << 16) | (2 << 8) | (2 << 0);
+	/* op2 - measure Y, 1 sample, 12bit resolution */
+	tmp |= (TSSC_OPN_4WIRE_Y << 20) | (2 << 10) | (2 << 2);
+	/* op3 - measure Z1, 1 sample, 8bit resolution */
+	tmp |= (TSSC_OPN_4WIRE_Z1 << 24) | (2 << 12) | (0 << 4);
+
+	/* XXX: we don't actually need to measure Z2 (thus 0 samples) when
+	 * doing voltage-driven measurement */
+	/* op4 - measure Z2, 0 samples, 8bit resolution */
+	tmp |= (TSSC_OPN_4WIRE_Z2 << 28) | (0 << 14) | (0 << 6);
+	tssc_writel(ts, tmp, TSSC_OPN);
+
+	/* 16ms sampling interval */
+	tssc_writel(ts, 16, TSSC_SAMPLING_INT);
+	/* Enable gating logic to fix the timing delays caused because of
+	 * enabling debounce logic */
+	tssc_writel(ts, TSSC_TEST_1_EN_GATE_DEBOUNCE, TSSC_TEST_1);
+
+	setup_next_sample(ts);
+
+	return 0;
+}
+
+static void msm_ts_enable(struct msm_ts *ts, bool enable)
+{
+	uint32_t val;
+
+	if (enable == true)
+		msm_ts_hw_init(ts);
+	else {
+		val = tssc_readl(ts, TSSC_CTL);
+		val &= ~TSSC_CTL_ENABLE;
+		tssc_writel(ts, val, TSSC_CTL);
+	}
+}
+
+#ifdef CONFIG_PM
+static int
+msm_ts_suspend(struct device *dev)
+{
+	struct msm_ts *ts =  dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) &&
+			device_may_wakeup(dev->parent))
+		enable_irq_wake(ts->sample_irq);
+	else {
+		disable_irq(ts->sample_irq);
+		disable_irq(ts->pen_up_irq);
+		msm_ts_enable(ts, false);
+	}
+
+	return 0;
+}
+
+static int
+msm_ts_resume(struct device *dev)
+{
+	struct msm_ts *ts =  dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev) &&
+			device_may_wakeup(dev->parent))
+		disable_irq_wake(ts->sample_irq);
+	else {
+		msm_ts_enable(ts, true);
+		enable_irq(ts->sample_irq);
+		enable_irq(ts->pen_up_irq);
+	}
+
+	return 0;
+}
+
+static struct dev_pm_ops msm_touchscreen_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= msm_ts_suspend,
+	.resume		= msm_ts_resume,
+#endif
+};
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void msm_ts_early_suspend(struct early_suspend *h)
+{
+	struct msm_ts *ts = container_of(h, struct msm_ts, early_suspend);
+
+	msm_ts_suspend(ts->dev);
+}
+
+static void msm_ts_late_resume(struct early_suspend *h)
+{
+	struct msm_ts *ts = container_of(h, struct msm_ts, early_suspend);
+
+	msm_ts_resume(ts->dev);
+}
+#endif
+
+
+static int __devinit msm_ts_probe(struct platform_device *pdev)
+{
+	struct msm_ts_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_ts *ts;
+	struct resource *tssc_res;
+	struct resource *irq1_res;
+	struct resource *irq2_res;
+	int err = 0;
+	int i;
+	struct marimba_tsadc_client *ts_client;
+
+	printk("%s\n", __func__);
+
+	tssc_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tssc");
+	irq1_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tssc1");
+	irq2_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tssc2");
+
+	if (!tssc_res || !irq1_res || !irq2_res) {
+		pr_err("%s: required resources not defined\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdata == NULL) {
+		pr_err("%s: missing platform_data\n", __func__);
+		return -ENODEV;
+	}
+
+	ts = kzalloc(sizeof(struct msm_ts), GFP_KERNEL);
+	if (ts == NULL) {
+		pr_err("%s: No memory for struct msm_ts\n", __func__);
+		return -ENOMEM;
+	}
+	ts->pdata = pdata;
+	ts->dev	  = &pdev->dev;
+
+	ts->sample_irq = irq1_res->start;
+	ts->pen_up_irq = irq2_res->start;
+
+	ts->tssc_base = ioremap(tssc_res->start, resource_size(tssc_res));
+	if (ts->tssc_base == NULL) {
+		pr_err("%s: Can't ioremap region (0x%08x - 0x%08x)\n", __func__,
+		       (uint32_t)tssc_res->start, (uint32_t)tssc_res->end);
+		err = -ENOMEM;
+		goto err_ioremap_tssc;
+	}
+
+	ts_client = marimba_tsadc_register(pdev, 1);
+	if (IS_ERR(ts_client)) {
+		pr_err("%s: Unable to register with TSADC\n", __func__);
+		err = -ENOMEM;
+		goto err_tsadc_register;
+	}
+	ts->ts_client = ts_client;
+
+	err = marimba_tsadc_start(ts_client);
+	if (err) {
+		pr_err("%s: Unable to start TSADC\n", __func__);
+		err = -EINVAL;
+		goto err_start_tsadc;
+	}
+
+	ts->input_dev = input_allocate_device();
+	if (ts->input_dev == NULL) {
+		pr_err("failed to allocate touchscreen input device\n");
+		err = -ENOMEM;
+		goto err_alloc_input_dev;
+	}
+	ts->input_dev->name = "msm-touchscreen";
+	ts->input_dev->dev.parent = &pdev->dev;
+
+	input_set_drvdata(ts->input_dev, ts);
+
+	input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
+	set_bit(EV_ABS, ts->input_dev->evbit);
+
+	input_set_abs_params(ts->input_dev, ABS_X, pdata->min_x, pdata->max_x,
+			     0, 0);
+	input_set_abs_params(ts->input_dev, ABS_Y, pdata->min_y, pdata->max_y,
+			     0, 0);
+	input_set_abs_params(ts->input_dev, ABS_PRESSURE, pdata->min_press,
+			     pdata->max_press, 0, 0);
+
+	for (i = 0; pdata->vkeys_x && (i < pdata->vkeys_x->num_keys); ++i)
+		input_set_capability(ts->input_dev, EV_KEY,
+				     pdata->vkeys_x->keys[i].key);
+	for (i = 0; pdata->vkeys_y && (i < pdata->vkeys_y->num_keys); ++i)
+		input_set_capability(ts->input_dev, EV_KEY,
+				     pdata->vkeys_y->keys[i].key);
+
+	err = input_register_device(ts->input_dev);
+	if (err != 0) {
+		pr_err("%s: failed to register input device\n", __func__);
+		goto err_input_dev_reg;
+	}
+
+	msm_ts_hw_init(ts);
+
+	err = request_irq(ts->sample_irq, msm_ts_irq,
+			  (irq1_res->flags & ~IORESOURCE_IRQ) | IRQF_DISABLED,
+			  "msm_touchscreen", ts);
+	if (err != 0) {
+		pr_err("%s: Cannot register irq1 (%d)\n", __func__, err);
+		goto err_request_irq1;
+	}
+
+	err = request_irq(ts->pen_up_irq, msm_ts_irq,
+			  (irq2_res->flags & ~IORESOURCE_IRQ) | IRQF_DISABLED,
+			  "msm_touchscreen", ts);
+	if (err != 0) {
+		pr_err("%s: Cannot register irq2 (%d)\n", __func__, err);
+		goto err_request_irq2;
+	}
+
+	platform_set_drvdata(pdev, ts);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						TSSC_SUSPEND_LEVEL;
+	ts->early_suspend.suspend = msm_ts_early_suspend;
+	ts->early_suspend.resume = msm_ts_late_resume;
+	register_early_suspend(&ts->early_suspend);
+#endif
+
+	device_init_wakeup(&pdev->dev, pdata->can_wakeup);
+	pr_info("%s: tssc_base=%p irq1=%d irq2=%d\n", __func__,
+		ts->tssc_base, (int)ts->sample_irq, (int)ts->pen_up_irq);
+	dump_tssc_regs(ts);
+	return 0;
+
+err_request_irq2:
+	free_irq(ts->sample_irq, ts);
+
+err_request_irq1:
+	/* disable the tssc */
+	tssc_writel(ts, TSSC_CTL_ENABLE, TSSC_CTL);
+
+err_input_dev_reg:
+	input_set_drvdata(ts->input_dev, NULL);
+	input_free_device(ts->input_dev);
+
+err_alloc_input_dev:
+err_start_tsadc:
+	marimba_tsadc_unregister(ts->ts_client);
+
+err_tsadc_register:
+	iounmap(ts->tssc_base);
+
+err_ioremap_tssc:
+	kfree(ts);
+	return err;
+}
+
+static int __devexit msm_ts_remove(struct platform_device *pdev)
+{
+	struct msm_ts *ts = platform_get_drvdata(pdev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	marimba_tsadc_unregister(ts->ts_client);
+	free_irq(ts->sample_irq, ts);
+	free_irq(ts->pen_up_irq, ts);
+	input_unregister_device(ts->input_dev);
+	iounmap(ts->tssc_base);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&ts->early_suspend);
+#endif
+	platform_set_drvdata(pdev, NULL);
+	kfree(ts);
+
+	return 0;
+}
+
+static struct platform_driver msm_touchscreen_driver = {
+	.driver = {
+		.name = "msm_touchscreen",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &msm_touchscreen_pm_ops,
+#endif
+	},
+	.probe		= msm_ts_probe,
+	.remove		= __devexit_p(msm_ts_remove),
+};
+
+static int __init msm_ts_init(void)
+{
+	return platform_driver_register(&msm_touchscreen_driver);
+}
+
+static void __exit msm_ts_exit(void)
+{
+	platform_driver_unregister(&msm_touchscreen_driver);
+}
+
+module_init(msm_ts_init);
+module_exit(msm_ts_exit);
+MODULE_DESCRIPTION("Qualcomm MSM/QSD Touchscreen controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:msm_touchscreen");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index fadc115..ac76cde 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -26,6 +26,12 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/i2c/tsc2007.h>
+#include <linux/pm.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#define TSC2007_SUSPEND_LEVEL 1
+#endif
 
 #define TSC2007_MEASURE_TEMP0		(0x0 << 4)
 #define TSC2007_MEASURE_AUX		(0x2 << 4)
@@ -79,8 +85,17 @@
 	bool			pendown;
 	int			irq;
 
+	bool			invert_x;
+	bool			invert_y;
+	bool			invert_z1;
+	bool			invert_z2;
+
 	int			(*get_pendown_state)(void);
 	void			(*clear_penirq)(void);
+	int			(*power_shutdown)(bool);
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend	early_suspend;
+#endif
 };
 
 static inline int tsc2007_xfer(struct tsc2007 *tsc, u8 cmd)
@@ -117,6 +132,18 @@
 	tc->z1 = tsc2007_xfer(tsc, READ_Z1);
 	tc->z2 = tsc2007_xfer(tsc, READ_Z2);
 
+	if (tsc->invert_x == true)
+		tc->x = MAX_12BIT - tc->x;
+
+	if (tsc->invert_y == true)
+		tc->y = MAX_12BIT - tc->y;
+
+	if (tsc->invert_z1 == true)
+		tc->z1 = MAX_12BIT - tc->z1;
+
+	if (tsc->invert_z2 == true)
+		tc->z2 = MAX_12BIT - tc->z2;
+
 	/* Prepare for next touch reading - power down ADC, enable PENIRQ */
 	tsc2007_xfer(tsc, PWRDOWN);
 }
@@ -263,6 +290,72 @@
 	}
 }
 
+#ifdef CONFIG_PM
+static int tsc2007_suspend(struct device *dev)
+{
+	int rc;
+	struct tsc2007	*ts = dev_get_drvdata(dev);
+
+	disable_irq(ts->irq);
+
+	if (cancel_delayed_work_sync(&ts->work))
+		enable_irq(ts->irq);
+
+	if (ts->power_shutdown) {
+		rc = ts->power_shutdown(true);
+		if (rc) {
+			pr_err("%s: Power off failed, suspend failed (%d)\n",
+							__func__, rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int tsc2007_resume(struct device *dev)
+{
+	int rc;
+	struct tsc2007	*ts = dev_get_drvdata(dev);
+
+	if (ts->power_shutdown) {
+		rc = ts->power_shutdown(false);
+		if (rc) {
+			pr_err("%s: Power on failed, resume failed (%d)\n",
+							 __func__, rc);
+			return rc;
+		}
+	}
+
+	enable_irq(ts->irq);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void tsc2007_early_suspend(struct early_suspend *h)
+{
+	struct tsc2007 *ts = container_of(h, struct tsc2007, early_suspend);
+
+	tsc2007_suspend(&ts->client->dev);
+}
+
+static void tsc2007_late_resume(struct early_suspend *h)
+{
+	struct tsc2007 *ts = container_of(h, struct tsc2007, early_suspend);
+
+	tsc2007_resume(&ts->client->dev);
+}
+#endif
+
+static const struct dev_pm_ops tsc2007_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= tsc2007_suspend,
+	.resume		= tsc2007_resume,
+#endif
+};
+#endif
+
 static int __devinit tsc2007_probe(struct i2c_client *client,
 				   const struct i2c_device_id *id)
 {
@@ -299,6 +392,11 @@
 	ts->poll_period       = pdata->poll_period ? : 1;
 	ts->get_pendown_state = pdata->get_pendown_state;
 	ts->clear_penirq      = pdata->clear_penirq;
+	ts->invert_x	      = pdata->invert_x;
+	ts->invert_y	      = pdata->invert_y;
+	ts->invert_z1	      = pdata->invert_z1;
+	ts->invert_z2	      = pdata->invert_z2;
+	ts->power_shutdown    = pdata->power_shutdown;
 
 	snprintf(ts->phys, sizeof(ts->phys),
 		 "%s/input0", dev_name(&client->dev));
@@ -318,7 +416,7 @@
 	if (pdata->init_platform_hw)
 		pdata->init_platform_hw();
 
-	err = request_irq(ts->irq, tsc2007_irq, 0,
+	err = request_irq(ts->irq, tsc2007_irq, pdata->irq_flags,
 			client->dev.driver->name, ts);
 	if (err < 0) {
 		dev_err(&client->dev, "irq %d busy?\n", ts->irq);
@@ -334,6 +432,14 @@
 	if (err)
 		goto err_free_irq;
 
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						TSC2007_SUSPEND_LEVEL;
+	ts->early_suspend.suspend = tsc2007_early_suspend;
+	ts->early_suspend.resume = tsc2007_late_resume;
+	register_early_suspend(&ts->early_suspend);
+#endif
+
 	i2c_set_clientdata(client, ts);
 
 	return 0;
@@ -358,6 +464,9 @@
 	if (pdata->exit_platform_hw)
 		pdata->exit_platform_hw();
 
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&ts->early_suspend);
+#endif
 	input_unregister_device(ts->input);
 	kfree(ts);
 
@@ -374,7 +483,10 @@
 static struct i2c_driver tsc2007_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
-		.name	= "tsc2007"
+		.name	= "tsc2007",
+#ifdef CONFIG_PM
+		.pm = &tsc2007_pm_ops,
+#endif
 	},
 	.id_table	= tsc2007_idtable,
 	.probe		= tsc2007_probe,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b84e46b..feb345d 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -182,6 +182,18 @@
 	  defined as platform devices and/or OpenFirmware platform devices.
 	  The code to use these bindings can be selected below.
 
+config LEDS_MSM_PDM
+	tristate "LED Support through PDM"
+	depends on LEDS_CLASS
+	help
+	  This option enables support for the LEDs operated through Pulse
+	  Denisty Modulation.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called leds-msm-pdm.
+
 config LEDS_GPIO_PLATFORM
 	bool "Platform device bindings for GPIO LEDs"
 	depends on LEDS_GPIO
@@ -211,6 +223,12 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called leds-lp3944.
 
+config LEDS_CPLD
+	tristate "LED Support for CPLD connected LEDs"
+	depends on LEDS_CLASS
+	help
+	  This option enables support for the LEDs connected to CPLD
+
 config LEDS_LP5521
 	tristate "LED Support for N.S. LP5521 LED driver chip"
 	depends on LEDS_CLASS && I2C
@@ -269,6 +287,17 @@
 	  LED driver chips accessed via the I2C bus.  Supported
 	  devices include PCA9550, PCA9551, PCA9552, and PCA9553.
 
+config LEDS_PM8XXX
+	tristate "LED Support for Qualcomm PMIC8XXX"
+	depends on MFD_PM8XXX
+	help
+	  This option enables support for LEDs connected over PMIC8XXX
+	  (Power Management IC) chip on Qualcomm reference boards,
+	  for example SURF and FFAs.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called leds-pmic8xxx.
+
 config LEDS_WM831X_STATUS
 	tristate "LED support for status LEDs on WM831x PMICs"
 	depends on LEDS_CLASS
@@ -323,6 +352,24 @@
 	  This option enables support for BD2802GU RGB LED driver chips
 	  accessed via the I2C bus.
 
+config LEDS_MSM_PMIC
+        tristate "LED Support for Qualcomm PMIC connected LEDs"
+        default y
+        depends on ARCH_MSM
+        help
+          This option enables support for LEDs connected over PMIC
+          (Power Management IC) chip on Qualcomm reference boards,
+          for example SURF and FFAs.
+
+config LEDS_PMIC8058
+	tristate "LED Support for Qualcomm PMIC8058"
+	default n
+	depends on PMIC8058
+	help
+	  This option enables support for LEDs connected over PMIC8058
+	  (Power Management IC) chip on Qualcomm reference boards,
+	  for example SURF and FFAs.
+
 config LEDS_INTEL_SS4200
 	tristate "LED driver for Intel NAS SS4200 series"
 	depends on LEDS_CLASS
@@ -369,6 +416,14 @@
 	  This option enable support for on-chip LED drivers found
 	  on Freescale Semiconductor MC13783 PMIC.
 
+config LEDS_QCIBL
+	tristate "LED Support for Quanta LCD backlight"
+	depends on SENSORS_WPCE775X && ARCH_MSM_SCORPION
+	default n
+	help
+	  Say Y here if you want to use the Quanta backlight driver for ST15
+	  platform.
+
 config LEDS_NS2
 	tristate "LED support for Network Space v2 GPIO LEDs"
 	depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index cb77b9b..819abc0 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -21,15 +21,18 @@
 obj-$(CONFIG_LEDS_COBALT_RAQ)		+= leds-cobalt-raq.o
 obj-$(CONFIG_LEDS_SUNFIRE)		+= leds-sunfire.o
 obj-$(CONFIG_LEDS_PCA9532)		+= leds-pca9532.o
+obj-$(CONFIG_LEDS_PM8XXX)		+= leds-pm8xxx.o
 obj-$(CONFIG_LEDS_GPIO_REGISTER)	+= leds-gpio-register.o
 obj-$(CONFIG_LEDS_GPIO)			+= leds-gpio.o
 obj-$(CONFIG_LEDS_LP3944)		+= leds-lp3944.o
 obj-$(CONFIG_LEDS_LP5521)		+= leds-lp5521.o
 obj-$(CONFIG_LEDS_LP5523)		+= leds-lp5523.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)		+= leds-clevo-mail.o
+obj-$(CONFIG_LEDS_CPLD)			+= leds-cpld.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)		+= leds-pca955x.o
+obj-$(CONFIG_LEDS_MSM_PMIC)             += leds-msm-pmic.o
 obj-$(CONFIG_LEDS_DA903X)		+= leds-da903x.o
 obj-$(CONFIG_LEDS_WM831X_STATUS)	+= leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
@@ -43,6 +46,9 @@
 obj-$(CONFIG_LEDS_NS2)			+= leds-ns2.o
 obj-$(CONFIG_LEDS_NETXBIG)		+= leds-netxbig.o
 obj-$(CONFIG_LEDS_ASIC3)		+= leds-asic3.o
+obj-$(CONFIG_LEDS_PMIC8058)		+= leds-pmic8058.o
+obj-$(CONFIG_LEDS_QCIBL)		+= leds-qci-backlight.o
+obj-$(CONFIG_LEDS_MSM_PDM)		+= leds-msm-pdm.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index dc3d3d8..43273fe 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -20,10 +20,51 @@
 #include <linux/err.h>
 #include <linux/ctype.h>
 #include <linux/leds.h>
+#include <linux/slab.h>
 #include "leds.h"
 
 static struct class *leds_class;
 
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+static void change_brightness(struct work_struct *brightness_change_data)
+{
+	struct deferred_brightness_change *brightness_change = container_of(
+			brightness_change_data,
+			struct deferred_brightness_change,
+			brightness_change_work);
+	struct led_classdev *led_cdev = brightness_change->led_cdev;
+	enum led_brightness value = brightness_change->value;
+
+	led_cdev->brightness_set(led_cdev, value);
+
+	/* Free up memory for the brightness_change structure. */
+	kfree(brightness_change);
+}
+
+int queue_brightness_change(struct led_classdev *led_cdev,
+	enum led_brightness value)
+{
+	/* Initialize the brightness_change_work and its super-struct. */
+	struct deferred_brightness_change *brightness_change =
+		kzalloc(sizeof(struct deferred_brightness_change), GFP_KERNEL);
+
+	if (!brightness_change)
+		return -ENOMEM;
+
+	brightness_change->led_cdev = led_cdev;
+	brightness_change->value = value;
+
+	INIT_WORK(&(brightness_change->brightness_change_work),
+		change_brightness);
+	queue_work(suspend_work_queue,
+		&(brightness_change->brightness_change_work));
+
+	return 0;
+}
+
+#endif
+
 static void led_update_brightness(struct led_classdev *led_cdev)
 {
 	if (led_cdev->brightness_get)
@@ -64,6 +105,25 @@
 	return ret;
 }
 
+static ssize_t led_max_brightness_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret = -EINVAL;
+	unsigned long state = 0;
+
+	ret = strict_strtoul(buf, 10, &state);
+	if (!ret) {
+		ret = size;
+		if (state > LED_FULL)
+			state = LED_FULL;
+		led_cdev->max_brightness = state;
+		led_set_brightness(led_cdev, led_cdev->brightness);
+	}
+
+	return ret;
+}
+
 static ssize_t led_max_brightness_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -74,7 +134,8 @@
 
 static struct device_attribute led_class_attrs[] = {
 	__ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
-	__ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
+	__ATTR(max_brightness, 0644, led_max_brightness_show,
+			led_max_brightness_store),
 #ifdef CONFIG_LEDS_TRIGGERS
 	__ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
 #endif
diff --git a/drivers/leds/leds-cpld.c b/drivers/leds/leds-cpld.c
new file mode 100644
index 0000000..eab004c
--- /dev/null
+++ b/drivers/leds/leds-cpld.c
@@ -0,0 +1,405 @@
+/* include/asm/mach-msm/leds-cpld.c
+ *
+ * Copyright (C) 2008 HTC Corporation.
+ *
+ * Author: Farmer Tseng
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/mach-types.h>
+
+#define DEBUG_LED_CHANGE 0
+
+static int _g_cpld_led_addr;
+
+struct CPLD_LED_data {
+	spinlock_t data_lock;
+	struct led_classdev leds[4];	/* blue, green, red */
+};
+
+static ssize_t led_blink_solid_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct CPLD_LED_data *CPLD_LED;
+	int idx = 2;
+	uint8_t reg_val;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	ssize_t ret = 0;
+
+	if (!strcmp(led_cdev->name, "red"))
+		idx = 0;
+	else if (!strcmp(led_cdev->name, "green"))
+		idx = 1;
+	else
+		idx = 2;
+
+	CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]);
+
+	spin_lock(&CPLD_LED->data_lock);
+	reg_val = readb(_g_cpld_led_addr);
+	reg_val = reg_val >> (2 * idx + 1);
+	reg_val &= 0x1;
+	spin_unlock(&CPLD_LED->data_lock);
+
+	/* no lock needed for this */
+	sprintf(buf, "%u\n", reg_val);
+	ret = strlen(buf) + 1;
+
+	return ret;
+}
+
+static ssize_t led_blink_solid_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct CPLD_LED_data *CPLD_LED;
+	int idx = 2;
+	uint8_t reg_val;
+	char *after;
+	unsigned long state;
+	ssize_t ret = -EINVAL;
+	size_t count;
+
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	if (!strcmp(led_cdev->name, "red"))
+		idx = 0;
+	else if (!strcmp(led_cdev->name, "green"))
+		idx = 1;
+	else
+		idx = 2;
+
+	CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]);
+
+	state = simple_strtoul(buf, &after, 10);
+
+	count = after - buf;
+
+	if (*after && isspace(*after))
+		count++;
+
+	if (count == size) {
+		ret = count;
+		spin_lock(&CPLD_LED->data_lock);
+		reg_val = readb(_g_cpld_led_addr);
+		if (state)
+			reg_val |= 1 << (2 * idx + 1);
+		else
+			reg_val &= ~(1 << (2 * idx + 1));
+
+		writeb(reg_val, _g_cpld_led_addr);
+		spin_unlock(&CPLD_LED->data_lock);
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(blink, 0644, led_blink_solid_show, led_blink_solid_store);
+
+static ssize_t cpldled_blink_all_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	uint8_t reg_val;
+	struct CPLD_LED_data *CPLD_LED = dev_get_drvdata(dev);
+	ssize_t ret = 0;
+
+	spin_lock(&CPLD_LED->data_lock);
+	reg_val = readb(_g_cpld_led_addr);
+	reg_val &= 0x2A;
+	if (reg_val == 0x2A)
+		reg_val = 1;
+	else
+		reg_val = 0;
+	spin_unlock(&CPLD_LED->data_lock);
+
+	/* no lock needed for this */
+	sprintf(buf, "%u\n", reg_val);
+	ret = strlen(buf) + 1;
+
+	return ret;
+}
+
+static ssize_t cpldled_blink_all_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t size)
+{
+	uint8_t reg_val;
+	char *after;
+	unsigned long state;
+	ssize_t ret = -EINVAL;
+	size_t count;
+	struct CPLD_LED_data *CPLD_LED = dev_get_drvdata(dev);
+
+	state = simple_strtoul(buf, &after, 10);
+
+	count = after - buf;
+
+	if (*after && isspace(*after))
+		count++;
+
+	if (count == size) {
+		ret = count;
+		spin_lock(&CPLD_LED->data_lock);
+		reg_val = readb(_g_cpld_led_addr);
+		if (state)
+			reg_val |= 0x2A;
+		else
+			reg_val &= ~0x2A;
+
+		writeb(reg_val, _g_cpld_led_addr);
+		spin_unlock(&CPLD_LED->data_lock);
+	}
+
+	return ret;
+}
+
+static struct device_attribute dev_attr_blink_all = {
+	.attr = {
+		 .name = "blink",
+		 .mode = 0644,
+		 },
+	.show = cpldled_blink_all_show,
+	.store = cpldled_blink_all_store,
+};
+
+static void led_brightness_set(struct led_classdev *led_cdev,
+			       enum led_brightness brightness)
+{
+	struct CPLD_LED_data *CPLD_LED;
+	int idx = 2;
+	struct led_classdev *led;
+	uint8_t reg_val;
+
+	if (!strcmp(led_cdev->name, "jogball-backlight")) {
+		if (brightness > 7)
+			reg_val = 1;
+		else
+			reg_val = brightness;
+		writeb(0, _g_cpld_led_addr + 0x8);
+		writeb(reg_val, _g_cpld_led_addr + 0x8);
+#if DEBUG_LED_CHANGE
+		printk(KERN_INFO "LED change: jogball backlight = %d \n",
+		       reg_val);
+#endif
+		return;
+	} else if (!strcmp(led_cdev->name, "red")) {
+		idx = 0;
+	} else if (!strcmp(led_cdev->name, "green")) {
+		idx = 1;
+	} else {
+		idx = 2;
+	}
+
+	CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]);
+	spin_lock(&CPLD_LED->data_lock);
+	reg_val = readb(_g_cpld_led_addr);
+	led = &CPLD_LED->leds[idx];
+
+	if (led->brightness > LED_OFF)
+		reg_val |= 1 << (2 * idx);
+	else
+		reg_val &= ~(1 << (2 * idx));
+
+	writeb(reg_val, _g_cpld_led_addr);
+#if DEBUG_LED_CHANGE
+	printk(KERN_INFO "LED change: %s = %d \n", led_cdev->name, led->brightness);
+#endif
+	spin_unlock(&CPLD_LED->data_lock);
+}
+
+static ssize_t cpldled_grpfreq_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", 0);
+}
+
+static ssize_t cpldled_grpfreq_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return 0;
+}
+
+static DEVICE_ATTR(grpfreq, 0644, cpldled_grpfreq_show, cpldled_grpfreq_store);
+
+static ssize_t cpldled_grppwm_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", 0);
+}
+
+static ssize_t cpldled_grppwm_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	return 0;
+}
+
+static DEVICE_ATTR(grppwm, 0644, cpldled_grppwm_show, cpldled_grppwm_store);
+
+static int CPLD_LED_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i, j;
+	struct resource *res;
+	struct CPLD_LED_data *CPLD_LED;
+
+	CPLD_LED = kzalloc(sizeof(struct CPLD_LED_data), GFP_KERNEL);
+	if (CPLD_LED == NULL) {
+		printk(KERN_ERR "CPLD_LED_probe: no memory for device\n");
+		ret = -ENOMEM;
+		goto err_alloc_failed;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENOMEM;
+		goto err_alloc_failed;
+	}
+
+	_g_cpld_led_addr = res->start;
+	if (!_g_cpld_led_addr) {
+		ret = -ENOMEM;
+		goto err_alloc_failed;
+	}
+
+	memset(CPLD_LED, 0, sizeof(struct CPLD_LED_data));
+	writeb(0x00, _g_cpld_led_addr);
+
+	CPLD_LED->leds[0].name = "red";
+	CPLD_LED->leds[0].brightness_set = led_brightness_set;
+
+	CPLD_LED->leds[1].name = "green";
+	CPLD_LED->leds[1].brightness_set = led_brightness_set;
+
+	CPLD_LED->leds[2].name = "blue";
+	CPLD_LED->leds[2].brightness_set = led_brightness_set;
+
+	CPLD_LED->leds[3].name = "jogball-backlight";
+	CPLD_LED->leds[3].brightness_set = led_brightness_set;
+
+	spin_lock_init(&CPLD_LED->data_lock);
+
+	for (i = 0; i < 4; i++) {	/* red, green, blue jogball */
+		ret = led_classdev_register(&pdev->dev, &CPLD_LED->leds[i]);
+		if (ret) {
+			printk(KERN_ERR
+			       "CPLD_LED: led_classdev_register failed\n");
+			goto err_led_classdev_register_failed;
+		}
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret =
+		    device_create_file(CPLD_LED->leds[i].dev, &dev_attr_blink);
+		if (ret) {
+			printk(KERN_ERR
+			       "CPLD_LED: device_create_file failed\n");
+			goto err_out_attr_blink;
+		}
+	}
+
+	dev_set_drvdata(&pdev->dev, CPLD_LED);
+	ret = device_create_file(&pdev->dev, &dev_attr_blink_all);
+	if (ret) {
+		printk(KERN_ERR
+		       "CPLD_LED: create dev_attr_blink_all failed\n");
+		goto err_out_attr_blink;
+	}
+	ret = device_create_file(&pdev->dev, &dev_attr_grppwm);
+	if (ret) {
+		printk(KERN_ERR
+		       "CPLD_LED: create dev_attr_grppwm failed\n");
+		goto err_out_attr_grppwm;
+	}
+	ret = device_create_file(&pdev->dev, &dev_attr_grpfreq);
+	if (ret) {
+		printk(KERN_ERR
+		       "CPLD_LED: create dev_attr_grpfreq failed\n");
+		goto err_out_attr_grpfreq;
+	}
+
+	return 0;
+
+err_out_attr_grpfreq:
+	device_remove_file(&pdev->dev, &dev_attr_grppwm);
+err_out_attr_grppwm:
+	device_remove_file(&pdev->dev, &dev_attr_blink_all);
+err_out_attr_blink:
+	for (j = 0; j < i; j++)
+		device_remove_file(CPLD_LED->leds[j].dev, &dev_attr_blink);
+	i = 3;
+
+err_led_classdev_register_failed:
+	for (j = 0; j < i; j++)
+		led_classdev_unregister(&CPLD_LED->leds[j]);
+
+err_alloc_failed:
+	kfree(CPLD_LED);
+
+	return ret;
+}
+
+static int __devexit CPLD_LED_remove(struct platform_device *pdev)
+{
+	struct CPLD_LED_data *CPLD_LED;
+	int i;
+
+	CPLD_LED = platform_get_drvdata(pdev);
+
+	for (i = 0; i < 3; i++) {
+		device_remove_file(CPLD_LED->leds[i].dev, &dev_attr_blink);
+		led_classdev_unregister(&CPLD_LED->leds[i]);
+	}
+
+	device_remove_file(&pdev->dev, &dev_attr_blink_all);
+	device_remove_file(&pdev->dev, &dev_attr_grppwm);
+	device_remove_file(&pdev->dev, &dev_attr_grpfreq);
+
+	kfree(CPLD_LED);
+	return 0;
+}
+
+static struct platform_driver CPLD_LED_driver = {
+	.probe = CPLD_LED_probe,
+	.remove = __devexit_p(CPLD_LED_remove),
+	.driver = {
+		   .name = "leds-cpld",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __init CPLD_LED_init(void)
+{
+	return platform_driver_register(&CPLD_LED_driver);
+}
+
+static void __exit CPLD_LED_exit(void)
+{
+	platform_driver_unregister(&CPLD_LED_driver);
+}
+
+MODULE_AUTHOR("Farmer Tseng");
+MODULE_DESCRIPTION("CPLD_LED driver");
+MODULE_LICENSE("GPL");
+
+module_init(CPLD_LED_init);
+module_exit(CPLD_LED_exit);
diff --git a/drivers/leds/leds-msm-pdm.c b/drivers/leds/leds-msm-pdm.c
new file mode 100644
index 0000000..9509514
--- /dev/null
+++ b/drivers/leds/leds-msm-pdm.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+
+/* Early-suspend level */
+#define LED_SUSPEND_LEVEL 1
+#endif
+
+#define PDM_DUTY_MAXVAL BIT(16)
+#define PDM_DUTY_REFVAL BIT(15)
+
+struct pdm_led_data {
+	struct led_classdev cdev;
+	void __iomem *perph_base;
+	int pdm_offset;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+};
+
+static void msm_led_brightness_set_percent(struct pdm_led_data *led,
+						int duty_per)
+{
+	u16 duty_val;
+
+	duty_val = PDM_DUTY_REFVAL - ((PDM_DUTY_MAXVAL * duty_per) / 100);
+
+	if (!duty_per)
+		duty_val--;
+
+	writel_relaxed(duty_val, led->perph_base + led->pdm_offset);
+}
+
+static void msm_led_brightness_set(struct led_classdev *led_cdev,
+				enum led_brightness value)
+{
+	struct pdm_led_data *led =
+		container_of(led_cdev, struct pdm_led_data, cdev);
+
+	msm_led_brightness_set_percent(led, (value * 100) / LED_FULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_led_pdm_suspend(struct device *dev)
+{
+	struct pdm_led_data *led = dev_get_drvdata(dev);
+
+	msm_led_brightness_set_percent(led, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void msm_led_pdm_early_suspend(struct early_suspend *h)
+{
+	struct pdm_led_data *led = container_of(h,
+			struct pdm_led_data, early_suspend);
+
+	msm_led_pdm_suspend(led->cdev.dev->parent);
+}
+
+#endif
+
+static const struct dev_pm_ops msm_led_pdm_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend	= msm_led_pdm_suspend,
+#endif
+};
+#endif
+
+static int __devinit msm_pdm_led_probe(struct platform_device *pdev)
+{
+	const struct led_info *pdata = pdev->dev.platform_data;
+	struct pdm_led_data *led;
+	struct resource *res, *ioregion;
+	u32 tcxo_pdm_ctl;
+	int rc;
+
+	if (!pdata) {
+		pr_err("platform data is invalid\n");
+		return -EINVAL;
+	}
+
+	if (pdev->id > 2) {
+		pr_err("pdm id is invalid\n");
+		return -EINVAL;
+	}
+
+	led = kzalloc(sizeof(struct pdm_led_data), GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pdev->dev);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("get resource failed\n");
+		rc = -EINVAL;
+		goto err_get_res;
+	}
+
+	ioregion = request_mem_region(res->start, resource_size(res),
+						pdev->name);
+	if (!ioregion) {
+		pr_err("request for mem region failed\n");
+		rc = -ENOMEM;
+		goto err_get_res;
+	}
+
+	led->perph_base = ioremap(res->start, resource_size(res));
+	if (!led->perph_base) {
+		pr_err("ioremap failed\n");
+		rc = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	/* Pulse Density Modulation(PDM) ids start with 0 and
+	 * every PDM register takes 4 bytes
+	 */
+	led->pdm_offset = ((pdev->id) + 1) * 4;
+
+	/* program tcxo_pdm_ctl register to enable pdm*/
+	tcxo_pdm_ctl = readl_relaxed(led->perph_base);
+	tcxo_pdm_ctl |= (1 << pdev->id);
+	writel_relaxed(tcxo_pdm_ctl, led->perph_base);
+
+	/* Start with LED in off state */
+	msm_led_brightness_set_percent(led, 0);
+
+	led->cdev.brightness_set = msm_led_brightness_set;
+	led->cdev.name = pdata->name ? : "leds-msm-pdm";
+
+	rc = led_classdev_register(&pdev->dev, &led->cdev);
+	if (rc) {
+		pr_err("led class registration failed\n");
+		goto err_led_reg;
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	led->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						LED_SUSPEND_LEVEL;
+	led->early_suspend.suspend = msm_led_pdm_early_suspend;
+	register_early_suspend(&led->early_suspend);
+#endif
+
+	platform_set_drvdata(pdev, led);
+	return 0;
+
+err_led_reg:
+	iounmap(led->perph_base);
+err_ioremap:
+	release_mem_region(res->start, resource_size(res));
+err_get_res:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(led);
+	return rc;
+}
+
+static int __devexit msm_pdm_led_remove(struct platform_device *pdev)
+{
+	struct pdm_led_data *led = platform_get_drvdata(pdev);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&led->early_suspend);
+#endif
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	led_classdev_unregister(&led->cdev);
+	msm_led_brightness_set_percent(led, 0);
+	iounmap(led->perph_base);
+	release_mem_region(res->start, resource_size(res));
+	kfree(led);
+
+	return 0;
+}
+
+static struct platform_driver msm_pdm_led_driver = {
+	.probe		= msm_pdm_led_probe,
+	.remove		= __devexit_p(msm_pdm_led_remove),
+	.driver		= {
+		.name	= "leds-msm-pdm",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+		.pm	= &msm_led_pdm_pm_ops,
+#endif
+	},
+};
+
+static int __init msm_pdm_led_init(void)
+{
+	return platform_driver_register(&msm_pdm_led_driver);
+}
+module_init(msm_pdm_led_init);
+
+static void __exit msm_pdm_led_exit(void)
+{
+	platform_driver_unregister(&msm_pdm_led_driver);
+}
+module_exit(msm_pdm_led_exit);
+
+MODULE_DESCRIPTION("MSM PDM LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds-msm-pdm");
diff --git a/drivers/leds/leds-msm-pmic.c b/drivers/leds/leds-msm-pmic.c
new file mode 100644
index 0000000..b9c6a53
--- /dev/null
+++ b/drivers/leds/leds-msm-pmic.c
@@ -0,0 +1,105 @@
+/*
+ * leds-msm-pmic.c - MSM PMIC LEDs driver.
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+
+#include <mach/pmic.h>
+
+#define MAX_KEYPAD_BL_LEVEL	16
+
+static void msm_keypad_bl_led_set(struct led_classdev *led_cdev,
+	enum led_brightness value)
+{
+	int ret;
+
+	ret = pmic_set_led_intensity(LED_KEYPAD, value / MAX_KEYPAD_BL_LEVEL);
+	if (ret)
+		dev_err(led_cdev->dev, "can't set keypad backlight\n");
+}
+
+static struct led_classdev msm_kp_bl_led = {
+	.name			= "keyboard-backlight",
+	.brightness_set		= msm_keypad_bl_led_set,
+	.brightness		= LED_OFF,
+};
+
+static int msm_pmic_led_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	rc = led_classdev_register(&pdev->dev, &msm_kp_bl_led);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to register led class driver\n");
+		return rc;
+	}
+	msm_keypad_bl_led_set(&msm_kp_bl_led, LED_OFF);
+	return rc;
+}
+
+static int __devexit msm_pmic_led_remove(struct platform_device *pdev)
+{
+	led_classdev_unregister(&msm_kp_bl_led);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_pmic_led_suspend(struct platform_device *dev,
+		pm_message_t state)
+{
+	led_classdev_suspend(&msm_kp_bl_led);
+
+	return 0;
+}
+
+static int msm_pmic_led_resume(struct platform_device *dev)
+{
+	led_classdev_resume(&msm_kp_bl_led);
+
+	return 0;
+}
+#else
+#define msm_pmic_led_suspend NULL
+#define msm_pmic_led_resume NULL
+#endif
+
+static struct platform_driver msm_pmic_led_driver = {
+	.probe		= msm_pmic_led_probe,
+	.remove		= __devexit_p(msm_pmic_led_remove),
+	.suspend	= msm_pmic_led_suspend,
+	.resume		= msm_pmic_led_resume,
+	.driver		= {
+		.name	= "pmic-leds",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init msm_pmic_led_init(void)
+{
+	return platform_driver_register(&msm_pmic_led_driver);
+}
+module_init(msm_pmic_led_init);
+
+static void __exit msm_pmic_led_exit(void)
+{
+	platform_driver_unregister(&msm_pmic_led_driver);
+}
+module_exit(msm_pmic_led_exit);
+
+MODULE_DESCRIPTION("MSM PMIC LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pmic-leds");
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
new file mode 100644
index 0000000..c34bf30
--- /dev/null
+++ b/drivers/leds/leds-pm8xxx.c
@@ -0,0 +1,339 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/leds-pm8xxx.h>
+
+#define SSBI_REG_ADDR_DRV_KEYPAD	0x48
+#define PM8XXX_DRV_KEYPAD_BL_MASK	0xf0
+#define PM8XXX_DRV_KEYPAD_BL_SHIFT	0x04
+
+#define SSBI_REG_ADDR_FLASH_DRV0        0x49
+#define PM8XXX_DRV_FLASH_MASK           0xf0
+#define PM8XXX_DRV_FLASH_SHIFT          0x04
+
+#define SSBI_REG_ADDR_FLASH_DRV1        0xFB
+
+#define SSBI_REG_ADDR_LED_CTRL_BASE	0x131
+#define SSBI_REG_ADDR_LED_CTRL(n)	(SSBI_REG_ADDR_LED_CTRL_BASE + (n))
+#define PM8XXX_DRV_LED_CTRL_MASK	0xf8
+#define PM8XXX_DRV_LED_CTRL_SHIFT	0x03
+
+#define MAX_FLASH_LED_CURRENT	300
+#define MAX_LC_LED_CURRENT	40
+#define MAX_KP_BL_LED_CURRENT	300
+
+#define MAX_KEYPAD_BL_LEVEL	(1 << 4)
+#define MAX_LED_DRV_LEVEL	20 /* 2 * 20 mA */
+
+#define PM8XXX_LED_OFFSET(id) ((id) - PM8XXX_ID_LED_0)
+
+#define MAX_KB_LED_BRIGHTNESS		15
+#define MAX_LC_LED_BRIGHTNESS		20
+#define MAX_FLASH_LED_BRIGHTNESS	15
+
+/**
+ * struct pm8xxx_led_data - internal led data structure
+ * @led_classdev - led class device
+ * @id - led index
+ * @led_brightness - led brightness levels
+ * @work - workqueue for led
+ * @lock - to protect the transactions
+ * @reg - cached value of led register
+ */
+struct pm8xxx_led_data {
+	struct led_classdev	cdev;
+	int			id;
+	u8			reg;
+	enum led_brightness	brightness;
+	struct device		*dev;
+	struct work_struct	work;
+	struct mutex		lock;
+};
+
+static void led_kp_set(struct pm8xxx_led_data *led, enum led_brightness value)
+{
+	int rc;
+	u8 level;
+
+	level = (value << PM8XXX_DRV_KEYPAD_BL_SHIFT) &
+				 PM8XXX_DRV_KEYPAD_BL_MASK;
+
+	led->reg &= ~PM8XXX_DRV_KEYPAD_BL_MASK;
+	led->reg |= level;
+
+	rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_DRV_KEYPAD,
+								led->reg);
+	if (rc < 0)
+		dev_err(led->cdev.dev,
+			"can't set keypad backlight level rc=%d\n", rc);
+}
+
+static void led_lc_set(struct pm8xxx_led_data *led, enum led_brightness value)
+{
+	int rc, offset;
+	u8 level;
+
+	level = (value << PM8XXX_DRV_LED_CTRL_SHIFT) &
+				PM8XXX_DRV_LED_CTRL_MASK;
+
+	offset = PM8XXX_LED_OFFSET(led->id);
+
+	led->reg &= ~PM8XXX_DRV_LED_CTRL_MASK;
+	led->reg |= level;
+
+	rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_LED_CTRL(offset),
+								led->reg);
+	if (rc)
+		dev_err(led->cdev.dev, "can't set (%d) led value rc=%d\n",
+				led->id, rc);
+}
+
+static void
+led_flash_set(struct pm8xxx_led_data *led, enum led_brightness value)
+{
+	int rc;
+	u8 level;
+	u16 reg_addr;
+
+	level = (value << PM8XXX_DRV_FLASH_SHIFT) &
+				 PM8XXX_DRV_FLASH_MASK;
+
+	led->reg &= ~PM8XXX_DRV_FLASH_MASK;
+	led->reg |= level;
+
+	if (led->id == PM8XXX_ID_FLASH_LED_0)
+		reg_addr = SSBI_REG_ADDR_FLASH_DRV0;
+	else
+		reg_addr = SSBI_REG_ADDR_FLASH_DRV1;
+
+	rc = pm8xxx_writeb(led->dev->parent, reg_addr, led->reg);
+	if (rc < 0)
+		dev_err(led->cdev.dev, "can't set flash led%d level rc=%d\n",
+			 led->id, rc);
+}
+
+static void pm8xxx_led_work(struct work_struct *work)
+{
+	struct pm8xxx_led_data *led = container_of(work,
+					 struct pm8xxx_led_data, work);
+
+	mutex_lock(&led->lock);
+
+	switch (led->id) {
+	case PM8XXX_ID_LED_KB_LIGHT:
+		led_kp_set(led, led->brightness);
+	break;
+	case PM8XXX_ID_LED_0:
+	case PM8XXX_ID_LED_1:
+	case PM8XXX_ID_LED_2:
+		led_lc_set(led, led->brightness);
+	break;
+	case PM8XXX_ID_FLASH_LED_0:
+	case PM8XXX_ID_FLASH_LED_1:
+		led_flash_set(led, led->brightness);
+	break;
+	}
+
+	mutex_unlock(&led->lock);
+}
+
+static void pm8xxx_led_set(struct led_classdev *led_cdev,
+	enum led_brightness value)
+{
+	struct pm8xxx_led_data *led;
+
+	led = container_of(led_cdev, struct pm8xxx_led_data, cdev);
+
+	led->brightness = value;
+	schedule_work(&led->work);
+}
+
+static enum led_brightness pm8xxx_led_get(struct led_classdev *led_cdev)
+{
+	struct pm8xxx_led_data *led;
+
+	led = container_of(led_cdev, struct pm8xxx_led_data, cdev);
+
+	return led->brightness;
+}
+
+static int __devinit get_max_brightness(enum pm8xxx_leds id)
+{
+	switch (id) {
+	case PM8XXX_ID_LED_KB_LIGHT:
+		return MAX_KB_LED_BRIGHTNESS;
+	case PM8XXX_ID_LED_0:
+	case PM8XXX_ID_LED_1:
+	case PM8XXX_ID_LED_2:
+		return MAX_LC_LED_BRIGHTNESS;
+	case PM8XXX_ID_FLASH_LED_0:
+	case PM8XXX_ID_FLASH_LED_1:
+		return MAX_FLASH_LED_CURRENT;
+	default:
+		return 0;
+	}
+}
+
+static int __devinit get_init_value(struct pm8xxx_led_data *led, u8 *val)
+{
+	int rc, offset;
+	u16 addr;
+
+	switch (led->id) {
+	case PM8XXX_ID_LED_KB_LIGHT:
+		addr = SSBI_REG_ADDR_DRV_KEYPAD;
+		break;
+	case PM8XXX_ID_LED_0:
+	case PM8XXX_ID_LED_1:
+	case PM8XXX_ID_LED_2:
+		offset = PM8XXX_LED_OFFSET(led->id);
+		addr = SSBI_REG_ADDR_LED_CTRL(offset);
+		break;
+	case PM8XXX_ID_FLASH_LED_0:
+		addr = SSBI_REG_ADDR_FLASH_DRV0;
+		break;
+	case PM8XXX_ID_FLASH_LED_1:
+		addr = SSBI_REG_ADDR_FLASH_DRV1;
+		break;
+	}
+
+	rc = pm8xxx_readb(led->dev->parent, addr, val);
+	if (rc)
+		dev_err(led->cdev.dev, "can't get led(%d) level rc=%d\n",
+							led->id, rc);
+
+	return rc;
+}
+
+static int __devinit pm8xxx_led_probe(struct platform_device *pdev)
+{
+	const struct led_platform_data *pdata = pdev->dev.platform_data;
+	struct led_info *curr_led;
+	struct pm8xxx_led_data *led, *led_dat;
+	int rc, i;
+
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "platform data not supplied\n");
+		return -EINVAL;
+	}
+
+	led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+	if (led == NULL) {
+		dev_err(&pdev->dev, "failed to alloc memory\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < pdata->num_leds; i++) {
+		curr_led	= &pdata->leds[i];
+		led_dat		= &led[i];
+		/* the flags variable is used for led-id */
+		led_dat->id     = curr_led->flags;
+
+		if (!((led_dat->id >= PM8XXX_ID_LED_KB_LIGHT) &&
+				(led_dat->id <= PM8XXX_ID_FLASH_LED_1))) {
+			dev_err(&pdev->dev, "invalid LED ID (%d) specified\n",
+						 led_dat->id);
+			rc = -EINVAL;
+			goto fail_id_check;
+		}
+
+		led_dat->cdev.name		= curr_led->name;
+		led_dat->cdev.default_trigger   = curr_led->default_trigger;
+		led_dat->cdev.brightness_set    = pm8xxx_led_set;
+		led_dat->cdev.brightness_get    = pm8xxx_led_get;
+		led_dat->cdev.brightness	= LED_OFF;
+		led_dat->cdev.flags		= LED_CORE_SUSPENDRESUME;
+
+		led_dat->cdev.max_brightness = get_max_brightness(led_dat->id);
+		led_dat->dev = &pdev->dev;
+
+		rc =  get_init_value(led_dat, &led_dat->reg);
+		if (rc < 0)
+			goto fail_id_check;
+
+		mutex_init(&led_dat->lock);
+		INIT_WORK(&led_dat->work, pm8xxx_led_work);
+
+		rc = led_classdev_register(&pdev->dev, &led_dat->cdev);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to register led %d,rc=%d\n",
+						 led_dat->id, rc);
+			goto fail_id_check;
+		}
+	}
+
+	platform_set_drvdata(pdev, led);
+
+	return 0;
+
+fail_id_check:
+	if (i > 0) {
+		for (i = i - 1; i >= 0; i--) {
+			mutex_destroy(&led[i].lock);
+			led_classdev_unregister(&led[i].cdev);
+		}
+	}
+	kfree(led);
+	return rc;
+}
+
+static int __devexit pm8xxx_led_remove(struct platform_device *pdev)
+{
+	int i;
+	const struct led_platform_data *pdata =
+				pdev->dev.platform_data;
+	struct pm8xxx_led_data *led = platform_get_drvdata(pdev);
+
+	for (i = 0; i < pdata->num_leds; i++) {
+		cancel_work_sync(&led[i].work);
+		mutex_destroy(&led[i].lock);
+		led_classdev_unregister(&led[i].cdev);
+	}
+
+	kfree(led);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_led_driver = {
+	.probe		= pm8xxx_led_probe,
+	.remove		= __devexit_p(pm8xxx_led_remove),
+	.driver		= {
+		.name	= PM8XXX_LEDS_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_led_init(void)
+{
+	return platform_driver_register(&pm8xxx_led_driver);
+}
+module_init(pm8xxx_led_init);
+
+static void __exit pm8xxx_led_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_led_driver);
+}
+module_exit(pm8xxx_led_exit);
+
+MODULE_DESCRIPTION("PM8XXX LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8xxx-led");
diff --git a/drivers/leds/leds-pmic8058.c b/drivers/leds/leds-pmic8058.c
new file mode 100644
index 0000000..d1aed3f
--- /dev/null
+++ b/drivers/leds/leds-pmic8058.c
@@ -0,0 +1,434 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/leds-pmic8058.h>
+
+#define SSBI_REG_ADDR_DRV_KEYPAD	0x48
+#define PM8058_DRV_KEYPAD_BL_MASK	0xf0
+#define PM8058_DRV_KEYPAD_BL_SHIFT	0x04
+
+#define SSBI_REG_ADDR_FLASH_DRV0        0x49
+#define PM8058_DRV_FLASH_MASK           0xf0
+#define PM8058_DRV_FLASH_SHIFT          0x04
+
+#define SSBI_REG_ADDR_FLASH_DRV1        0xFB
+
+#define SSBI_REG_ADDR_LED_CTRL_BASE	0x131
+#define SSBI_REG_ADDR_LED_CTRL(n)	(SSBI_REG_ADDR_LED_CTRL_BASE + (n))
+#define PM8058_DRV_LED_CTRL_MASK	0xf8
+#define PM8058_DRV_LED_CTRL_SHIFT	0x03
+
+#define MAX_FLASH_CURRENT	300
+#define MAX_KEYPAD_CURRENT 300
+#define MAX_KEYPAD_BL_LEVEL	(1 << 4)
+#define MAX_LED_DRV_LEVEL	20 /* 2 * 20 mA */
+
+#define PMIC8058_LED_OFFSET(id) ((id) - PMIC8058_ID_LED_0)
+
+struct pmic8058_led_data {
+	struct led_classdev	cdev;
+	int			id;
+	enum led_brightness	brightness;
+	u8			flags;
+	struct pm8058_chip	*pm_chip;
+	struct work_struct	work;
+	struct mutex		lock;
+	spinlock_t		value_lock;
+	u8			reg_kp;
+	u8			reg_led_ctrl[3];
+	u8			reg_flash_led0;
+	u8			reg_flash_led1;
+};
+
+#define PM8058_MAX_LEDS		7
+static struct pmic8058_led_data led_data[PM8058_MAX_LEDS];
+
+static void kp_bl_set(struct pmic8058_led_data *led, enum led_brightness value)
+{
+	int rc;
+	u8 level;
+	unsigned long flags;
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	level = (value << PM8058_DRV_KEYPAD_BL_SHIFT) &
+				 PM8058_DRV_KEYPAD_BL_MASK;
+
+	led->reg_kp &= ~PM8058_DRV_KEYPAD_BL_MASK;
+	led->reg_kp |= level;
+	spin_unlock_irqrestore(&led->value_lock, flags);
+
+	rc = pm8058_write(led->pm_chip, SSBI_REG_ADDR_DRV_KEYPAD,
+				 &led->reg_kp, 1);
+	if (rc)
+		pr_err("%s: can't set keypad backlight level\n", __func__);
+}
+
+static enum led_brightness kp_bl_get(struct pmic8058_led_data *led)
+{
+	if ((led->reg_kp & PM8058_DRV_KEYPAD_BL_MASK) >>
+			 PM8058_DRV_KEYPAD_BL_SHIFT)
+		return LED_FULL;
+	else
+		return LED_OFF;
+}
+
+static void led_lc_set(struct pmic8058_led_data *led, enum led_brightness value)
+{
+	unsigned long flags;
+	int rc, offset;
+	u8 level, tmp;
+
+	spin_lock_irqsave(&led->value_lock, flags);
+
+	level = (led->brightness << PM8058_DRV_LED_CTRL_SHIFT) &
+		PM8058_DRV_LED_CTRL_MASK;
+
+	offset = PMIC8058_LED_OFFSET(led->id);
+	tmp = led->reg_led_ctrl[offset];
+
+	tmp &= ~PM8058_DRV_LED_CTRL_MASK;
+	tmp |= level;
+	spin_unlock_irqrestore(&led->value_lock, flags);
+
+	rc = pm8058_write(led->pm_chip,	SSBI_REG_ADDR_LED_CTRL(offset),
+			&tmp, 1);
+	if (rc) {
+		dev_err(led->cdev.dev, "can't set (%d) led value\n",
+				led->id);
+		return;
+	}
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	led->reg_led_ctrl[offset] = tmp;
+	spin_unlock_irqrestore(&led->value_lock, flags);
+}
+
+static enum led_brightness led_lc_get(struct pmic8058_led_data *led)
+{
+	int offset;
+	u8 value;
+
+	offset = PMIC8058_LED_OFFSET(led->id);
+	value = led->reg_led_ctrl[offset];
+
+	if ((value & PM8058_DRV_LED_CTRL_MASK) >>
+			PM8058_DRV_LED_CTRL_SHIFT)
+		return LED_FULL;
+	else
+		return LED_OFF;
+}
+
+static void
+led_flash_set(struct pmic8058_led_data *led, enum led_brightness value)
+{
+	int rc;
+	u8 level;
+	unsigned long flags;
+	u8 reg_flash_led;
+	u16 reg_addr;
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	level = (value << PM8058_DRV_FLASH_SHIFT) &
+				 PM8058_DRV_FLASH_MASK;
+
+	if (led->id == PMIC8058_ID_FLASH_LED_0) {
+		led->reg_flash_led0 &= ~PM8058_DRV_FLASH_MASK;
+		led->reg_flash_led0 |= level;
+		reg_flash_led	    = led->reg_flash_led0;
+		reg_addr	    = SSBI_REG_ADDR_FLASH_DRV0;
+	} else {
+		led->reg_flash_led1 &= ~PM8058_DRV_FLASH_MASK;
+		led->reg_flash_led1 |= level;
+		reg_flash_led	    = led->reg_flash_led1;
+		reg_addr	    = SSBI_REG_ADDR_FLASH_DRV1;
+	}
+	spin_unlock_irqrestore(&led->value_lock, flags);
+
+	rc = pm8058_write(led->pm_chip, reg_addr, &reg_flash_led, 1);
+	if (rc)
+		pr_err("%s: can't set flash led%d level %d\n", __func__,
+			led->id, rc);
+}
+
+int pm8058_set_flash_led_current(enum pmic8058_leds id, unsigned mA)
+{
+	struct pmic8058_led_data *led;
+
+	if ((id < PMIC8058_ID_FLASH_LED_0) || (id > PMIC8058_ID_FLASH_LED_1)) {
+		pr_err("%s: invalid LED ID (%d) specified\n", __func__, id);
+		return -EINVAL;
+	}
+
+	led = &led_data[id];
+	if (!led) {
+		pr_err("%s: flash led not available\n", __func__);
+		return -EINVAL;
+	}
+
+	if (mA > MAX_FLASH_CURRENT)
+		return -EINVAL;
+
+	led_flash_set(led, mA / 20);
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_set_flash_led_current);
+
+int pm8058_set_led_current(enum pmic8058_leds id, unsigned mA)
+{
+	struct pmic8058_led_data *led;
+	int brightness = 0;
+
+	if ((id < PMIC8058_ID_LED_KB_LIGHT) || (id > PMIC8058_ID_FLASH_LED_1)) {
+		pr_err("%s: invalid LED ID (%d) specified\n", __func__, id);
+		return -EINVAL;
+	}
+
+	led = &led_data[id];
+	if (!led) {
+		pr_err("%s: flash led not available\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (id) {
+	case PMIC8058_ID_LED_0:
+	case PMIC8058_ID_LED_1:
+	case PMIC8058_ID_LED_2:
+		brightness = mA / 2;
+		if (brightness  > led->cdev.max_brightness)
+			return -EINVAL;
+		led_lc_set(led, brightness);
+		break;
+
+	case PMIC8058_ID_LED_KB_LIGHT:
+	case PMIC8058_ID_FLASH_LED_0:
+	case PMIC8058_ID_FLASH_LED_1:
+		brightness = mA / 20;
+		if (brightness  > led->cdev.max_brightness)
+			return -EINVAL;
+		if (id == PMIC8058_ID_LED_KB_LIGHT)
+			kp_bl_set(led, brightness);
+		else
+			led_flash_set(led, brightness);
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_set_led_current);
+
+static void pmic8058_led_set(struct led_classdev *led_cdev,
+	enum led_brightness value)
+{
+	struct pmic8058_led_data *led;
+	unsigned long flags;
+
+	led = container_of(led_cdev, struct pmic8058_led_data, cdev);
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	led->brightness = value;
+	schedule_work(&led->work);
+	spin_unlock_irqrestore(&led->value_lock, flags);
+}
+
+static void pmic8058_led_work(struct work_struct *work)
+{
+	struct pmic8058_led_data *led = container_of(work,
+					 struct pmic8058_led_data, work);
+
+	mutex_lock(&led->lock);
+
+	switch (led->id) {
+	case PMIC8058_ID_LED_KB_LIGHT:
+		kp_bl_set(led, led->brightness);
+		break;
+	case PMIC8058_ID_LED_0:
+	case PMIC8058_ID_LED_1:
+	case PMIC8058_ID_LED_2:
+		led_lc_set(led, led->brightness);
+		break;
+	case PMIC8058_ID_FLASH_LED_0:
+	case PMIC8058_ID_FLASH_LED_1:
+		led_flash_set(led, led->brightness);
+		break;
+	}
+
+	mutex_unlock(&led->lock);
+}
+
+static enum led_brightness pmic8058_led_get(struct led_classdev *led_cdev)
+{
+	struct pmic8058_led_data *led;
+
+	led = container_of(led_cdev, struct pmic8058_led_data, cdev);
+
+	switch (led->id) {
+	case PMIC8058_ID_LED_KB_LIGHT:
+		return kp_bl_get(led);
+	case PMIC8058_ID_LED_0:
+	case PMIC8058_ID_LED_1:
+	case PMIC8058_ID_LED_2:
+		return led_lc_get(led);
+	}
+	return LED_OFF;
+}
+
+static int pmic8058_led_probe(struct platform_device *pdev)
+{
+	struct pmic8058_leds_platform_data *pdata = pdev->dev.platform_data;
+	struct pmic8058_led_data *led_dat;
+	struct pmic8058_led *curr_led;
+	int rc, i = 0;
+	struct pm8058_chip	*pm_chip;
+	u8			reg_kp;
+	u8			reg_led_ctrl[3];
+	u8			reg_flash_led0;
+	u8			reg_flash_led1;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "platform data not supplied\n");
+		return -EINVAL;
+	}
+
+	rc = pm8058_read(pm_chip, SSBI_REG_ADDR_DRV_KEYPAD, &reg_kp,
+				1);
+	if (rc) {
+		dev_err(&pdev->dev, "can't get keypad backlight level\n");
+		goto err_reg_read;
+	}
+
+	rc = pm8058_read(pm_chip, SSBI_REG_ADDR_LED_CTRL_BASE,
+			reg_led_ctrl, 3);
+	if (rc) {
+		dev_err(&pdev->dev, "can't get led levels\n");
+		goto err_reg_read;
+	}
+
+	rc = pm8058_read(pm_chip, SSBI_REG_ADDR_FLASH_DRV0,
+			&reg_flash_led0, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "can't read flash led0\n");
+		goto err_reg_read;
+	}
+
+	rc = pm8058_read(pm_chip, SSBI_REG_ADDR_FLASH_DRV1,
+			&reg_flash_led1, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "can't get flash led1\n");
+		goto err_reg_read;
+	}
+
+	for (i = 0; i < pdata->num_leds; i++) {
+		curr_led	= &pdata->leds[i];
+		led_dat		= &led_data[curr_led->id];
+
+		led_dat->cdev.name		= curr_led->name;
+		led_dat->cdev.default_trigger   = curr_led->default_trigger;
+		led_dat->cdev.brightness_set    = pmic8058_led_set;
+		led_dat->cdev.brightness_get    = pmic8058_led_get;
+		led_dat->cdev.brightness	= LED_OFF;
+		led_dat->cdev.max_brightness	= curr_led->max_brightness;
+		led_dat->cdev.flags		= LED_CORE_SUSPENDRESUME;
+
+		led_dat->id		        = curr_led->id;
+		led_dat->reg_kp			= reg_kp;
+		memcpy(led_data->reg_led_ctrl, reg_led_ctrl,
+					 sizeof(reg_led_ctrl));
+		led_dat->reg_flash_led0		= reg_flash_led0;
+		led_dat->reg_flash_led1		= reg_flash_led1;
+
+		if (!((led_dat->id >= PMIC8058_ID_LED_KB_LIGHT) &&
+				(led_dat->id <= PMIC8058_ID_FLASH_LED_1))) {
+			dev_err(&pdev->dev, "invalid LED ID (%d) specified\n",
+						 led_dat->id);
+			rc = -EINVAL;
+			goto fail_id_check;
+		}
+
+		led_dat->pm_chip		= pm_chip;
+
+		mutex_init(&led_dat->lock);
+		spin_lock_init(&led_dat->value_lock);
+		INIT_WORK(&led_dat->work, pmic8058_led_work);
+
+		rc = led_classdev_register(&pdev->dev, &led_dat->cdev);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to register led %d\n",
+						 led_dat->id);
+			goto fail_id_check;
+		}
+	}
+
+	platform_set_drvdata(pdev, led_data);
+
+	return 0;
+
+err_reg_read:
+fail_id_check:
+	if (i > 0) {
+		for (i = i - 1; i >= 0; i--)
+			led_classdev_unregister(&led_data[i].cdev);
+	}
+	return rc;
+}
+
+static int __devexit pmic8058_led_remove(struct platform_device *pdev)
+{
+	int i;
+	struct pmic8058_leds_platform_data *pdata = pdev->dev.platform_data;
+	struct pmic8058_led_data *led = platform_get_drvdata(pdev);
+
+	for (i = 0; i < pdata->num_leds; i++) {
+		led_classdev_unregister(&led[led->id].cdev);
+		cancel_work_sync(&led[led->id].work);
+	}
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_led_driver = {
+	.probe		= pmic8058_led_probe,
+	.remove		= __devexit_p(pmic8058_led_remove),
+	.driver		= {
+		.name	= "pm8058-led",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pmic8058_led_init(void)
+{
+	return platform_driver_register(&pmic8058_led_driver);
+}
+module_init(pmic8058_led_init);
+
+static void __exit pmic8058_led_exit(void)
+{
+	platform_driver_unregister(&pmic8058_led_driver);
+}
+module_exit(pmic8058_led_exit);
+
+MODULE_DESCRIPTION("PMIC8058 LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-led");
diff --git a/drivers/leds/leds-qci-backlight.c b/drivers/leds/leds-qci-backlight.c
new file mode 100644
index 0000000..67502e8
--- /dev/null
+++ b/drivers/leds/leds-qci-backlight.c
@@ -0,0 +1,71 @@
+/* Quanta I2C Backlight Driver
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ *
+ *  The Driver with I/O communications via the I2C Interface for ST15 platform.
+ *  And it is only working on the nuvoTon WPCE775x Embedded Controller.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/wpce775x.h>
+
+#define EC_CMD_SET_BACKLIGHT 0xB1
+
+static void qci_backlight_store(struct led_classdev *led_cdev,
+	enum led_brightness val);
+
+static struct platform_device *bl_pdev;
+static struct led_classdev lcd_backlight = {
+	.name = "lcd-backlight",
+	.brightness = 147,
+	.brightness_set = qci_backlight_store,
+};
+
+static void qci_backlight_store(struct led_classdev *led_cdev,
+	enum led_brightness val)
+{
+	u16 value = val;
+	wpce_smbus_write_word_data(EC_CMD_SET_BACKLIGHT, value);
+	msleep(10);
+
+	dev_dbg(&bl_pdev->dev, "[backlight_store] : value  = %d\n", value);
+}
+
+static int __init qci_backlight_init(void)
+{
+	int err = 0;
+	bl_pdev = platform_device_register_simple("backlight", 0, NULL, 0);
+	err = led_classdev_register(&bl_pdev->dev, &lcd_backlight);
+	return err;
+}
+
+static void __exit qci_backlight_exit(void)
+{
+	led_classdev_unregister(&lcd_backlight);
+	platform_device_unregister(bl_pdev);
+}
+
+module_init(qci_backlight_init);
+module_exit(qci_backlight_exit);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Quanta Embedded Controller I2C Backlight Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index e77c7f8..593a63c 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -16,6 +16,21 @@
 #include <linux/device.h>
 #include <linux/rwsem.h>
 #include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+extern struct workqueue_struct *suspend_work_queue;
+extern int queue_brightness_change(struct led_classdev *led_cdev,
+	enum led_brightness value);
+
+struct deferred_brightness_change {
+	struct work_struct brightness_change_work;
+	struct led_classdev *led_cdev;
+	enum led_brightness value;
+};
+
+#endif
 
 static inline void led_set_brightness(struct led_classdev *led_cdev,
 					enum led_brightness value)
@@ -23,8 +38,12 @@
 	if (value > led_cdev->max_brightness)
 		value = led_cdev->max_brightness;
 	led_cdev->brightness = value;
-	if (!(led_cdev->flags & LED_SUSPENDED))
-		led_cdev->brightness_set(led_cdev, value);
+	if (!(led_cdev->flags & LED_SUSPENDED)) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (queue_brightness_change(led_cdev, value) != 0)
+#endif
+			led_cdev->brightness_set(led_cdev, value);
+	}
 }
 
 static inline int led_get_brightness(struct led_classdev *led_cdev)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 52798a1..4ba9432 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -443,4 +443,38 @@
 # TI's ST based wl128x FM radio
 source "drivers/media/radio/wl128x/Kconfig"
 
+config RADIO_TAVARUA
+	tristate "Qualcomm Tavaraua I2C FM support"
+	depends on I2C && VIDEO_V4L2 && MARIMBA_CORE
+	default n
+	---help---
+	  Say Y here if you want to use the Qualcomm FM chip (Tavarua).
+	  This FM chip uses I2C interface.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called radio-tavarua.
+
+config RADIO_IRIS
+        tristate "Qualcomm IRIS FM support"
+        depends on VIDEO_V4L2
+        default n
+        ---help---
+          Say Y here if you want to use the Qualcomm FM chip (IRIS).
+          This FM chip uses SMD interface
+
+          To compile this driver as a module, choose M here: the
+          module will be called radio-iris.
+
+
+config RADIO_IRIS_TRANSPORT
+        tristate "Qualcomm IRIS Transport"
+        depends on RADIO_IRIS
+        default n
+        ---help---
+          Say Y here if you want to use the Qualcomm FM chip (IRIS).
+          with SMD as transport.
+
+          To compile this driver as a module, choose M here: the
+          module will be called radio-iris-transport.
+
 endif # RADIO_ADAPTERS
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index f484a6e..3337f4b 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -26,5 +26,8 @@
 obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
 obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
 obj-$(CONFIG_RADIO_WL128X) += wl128x/
+obj-$(CONFIG_RADIO_TAVARUA) += radio-tavarua.o
+obj-$(CONFIG_RADIO_IRIS) += radio-iris.o
+obj-$(CONFIG_RADIO_IRIS_TRANSPORT) += radio-iris-transport.o
 
 EXTRA_CFLAGS += -Isound
diff --git a/drivers/media/radio/radio-iris-transport.c b/drivers/media/radio/radio-iris-transport.c
new file mode 100644
index 0000000..6628c9d
--- /dev/null
+++ b/drivers/media/radio/radio-iris-transport.c
@@ -0,0 +1,165 @@
+/*
+ *  Qualcomm's FM Shared Memory Transport Driver
+ *
+ *  FM HCI_SMD ( FM HCI Shared Memory Driver) is Qualcomm's Shared memory driver
+ *  for the HCI protocol. This file is based on drivers/bluetooth/hci_vhci.c
+ *
+ *  Copyright (c) 2000-2001, 2011 Code Aurora Forum. All rights reserved.
+ *
+ *  Copyright (C) 2002-2003  Maxim Krasnyansky <maxk@qualcomm.com>
+ *  Copyright (C) 2004-2006  Marcel Holtmann <marcel@holtmann.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <mach/msm_smd.h>
+#include <media/radio-iris.h>
+
+struct radio_data {
+	struct radio_hci_dev *hdev;
+	struct tasklet_struct   rx_task;
+	struct smd_channel  *fm_channel;
+};
+struct radio_data hs;
+
+static void radio_hci_smd_destruct(struct radio_hci_dev *hdev)
+{
+	radio_hci_unregister_dev(hs.hdev);
+}
+
+
+static void radio_hci_smd_recv_event(unsigned long temp)
+{
+	int len;
+	int rc;
+	struct sk_buff *skb;
+	unsigned  char *buf;
+	struct radio_data *hsmd = &hs;
+	len = smd_read_avail(hsmd->fm_channel);
+
+	while (len) {
+		skb = alloc_skb(len, GFP_KERNEL);
+		if (!skb) {
+			FMDERR("Memory not allocated for the socket");
+			return;
+		}
+
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf) {
+			kfree_skb(skb);
+			FMDERR("Error in allocating buffer memory");
+			return;
+		}
+
+		rc = smd_read_from_cb(hsmd->fm_channel, (void *)buf, len);
+
+		memcpy(skb_put(skb, len), buf, len);
+
+		skb_orphan(skb);
+		skb->dev = (struct net_device   *)hs.hdev;
+
+		rc = radio_hci_recv_frame(skb);
+
+		kfree(buf);
+		len = smd_read_avail(hsmd->fm_channel);
+	}
+}
+
+static int radio_hci_smd_send_frame(struct sk_buff *skb)
+{
+	int len = 0;
+
+	len = smd_write(hs.fm_channel, skb->data, skb->len);
+	if (len < skb->len) {
+		FMDERR("Failed to write Data %d", len);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static void radio_hci_smd_notify_cmd(void *data, unsigned int event)
+{
+	struct radio_hci_dev *hdev = hs.hdev;
+
+	if (!hdev) {
+		FMDERR("Frame for unknown HCI device (hdev=NULL)");
+		return;
+	}
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		tasklet_schedule(&hs.rx_task);
+		break;
+	case SMD_EVENT_OPEN:
+	case SMD_EVENT_CLOSE:
+		break;
+	default:
+		break;
+	}
+}
+
+static int radio_hci_smd_register_dev(struct radio_data *hsmd)
+{
+	struct radio_hci_dev *hdev;
+	int rc;
+
+	hdev = kmalloc(sizeof(struct radio_hci_dev), GFP_KERNEL);
+	hsmd->hdev = hdev;
+	tasklet_init(&hsmd->rx_task, radio_hci_smd_recv_event,
+		(unsigned long) hsmd);
+	hdev->send  = radio_hci_smd_send_frame;
+	hdev->destruct = radio_hci_smd_destruct;
+
+	/* Open the SMD Channel and device and register the callback function */
+	rc = smd_named_open_on_edge("APPS_FM", SMD_APPS_WCNSS,
+		&hsmd->fm_channel, hdev, radio_hci_smd_notify_cmd);
+
+	if (rc < 0) {
+		FMDERR("Cannot open the command channel");
+		return -ENODEV;
+	}
+
+	smd_disable_read_intr(hsmd->fm_channel);
+
+	if (radio_hci_register_dev(hdev) < 0) {
+		FMDERR("Can't register HCI device");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void radio_hci_smd_deregister(void)
+{
+	smd_close(hs.fm_channel);
+	hs.fm_channel = 0;
+}
+
+static int radio_hci_smd_init(void)
+{
+	return radio_hci_smd_register_dev(&hs);
+}
+module_init(radio_hci_smd_init);
+
+static void __exit radio_hci_smd_exit(void)
+{
+	radio_hci_smd_deregister();
+}
+module_exit(radio_hci_smd_exit);
+
+MODULE_DESCRIPTION("Bluetooth SMD driver");
+MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
new file mode 100644
index 0000000..fe53ca8
--- /dev/null
+++ b/drivers/media/radio/radio-iris.c
@@ -0,0 +1,2220 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_AUTHOR "Archana Ramchandran <archanar@codeaurora.org>"
+#define DRIVER_NAME "radio-iris"
+#define DRIVER_CARD "Qualcomm FM Radio Transceiver"
+#define DRIVER_DESC "Driver for Qualcomm FM Radio Transceiver "
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+#include <linux/param.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/radio-iris.h>
+#include <asm/unaligned.h>
+
+static unsigned int rds_buf = 100;
+module_param(rds_buf, uint, 0);
+MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
+
+static void radio_hci_cmd_task(unsigned long arg);
+static void radio_hci_rx_task(unsigned long arg);
+static struct video_device *video_get_dev(void);
+static DEFINE_RWLOCK(hci_task_lock);
+
+struct iris_device {
+	struct device *dev;
+	struct kfifo data_buf[IRIS_BUF_MAX];
+
+	int pending_xfrs[IRIS_XFR_MAX];
+	int xfr_bytes_left;
+	int xfr_in_progress;
+	struct completion sync_xfr_start;
+	int tune_req;
+
+	struct video_device *videodev;
+
+	struct mutex lock;
+	spinlock_t buf_lock[IRIS_BUF_MAX];
+	wait_queue_head_t event_queue;
+	wait_queue_head_t read_queue;
+
+	struct radio_hci_dev *fm_hdev;
+
+	struct v4l2_capability *g_cap;
+	struct v4l2_control *g_ctl;
+
+	struct hci_fm_mute_mode_req mute_mode;
+	struct hci_fm_stereo_mode_req stereo_mode;
+	struct hci_fm_station_rsp fm_st_rsp;
+	struct hci_fm_search_station_req srch_st;
+	struct hci_fm_search_rds_station_req srch_rds;
+	struct hci_fm_search_station_list_req srch_st_list;
+	struct hci_fm_recv_conf_req recv_conf;
+	struct hci_fm_rds_grp_req rds_grp;
+	unsigned char g_search_mode;
+	unsigned char g_scan_time;
+	unsigned int g_antenna;
+	unsigned int g_rds_grp_proc_ps;
+	enum iris_region_t region;
+	struct hci_fm_dbg_param_rsp st_dbg_param;
+	struct hci_ev_srch_list_compl srch_st_result;
+};
+
+static struct video_device *priv_videodev;
+
+static struct v4l2_queryctrl iris_v4l2_queryctrl[] = {
+	{
+	.id	= V4L2_CID_AUDIO_VOLUME,
+	.type	= V4L2_CTRL_TYPE_INTEGER,
+	.name	= "Volume",
+	.minimum	= 0,
+	.maximum	= 15,
+	.step	=	1,
+	.default_value	=	15,
+	},
+	{
+	.id	=	V4L2_CID_AUDIO_BALANCE,
+	.flags	= V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+	.id	=	V4L2_CID_AUDIO_BASS,
+	.flags	=	V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+	.id	=	V4L2_CID_AUDIO_TREBLE,
+	.flags	=	V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+	.id	=	V4L2_CID_AUDIO_MUTE,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"Mute",
+	.minimum	=	0,
+	.maximum	=	1,
+	.step	=	1,
+	.default_value	= 1,
+	},
+	{
+	.id	=	V4L2_CID_AUDIO_LOUDNESS,
+	.flags	=	V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SRCHMODE,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Search mode",
+	.minimum	=	0,
+	.maximum	= 7,
+	.step	= 1,
+	.default_value	= 0,
+	},
+	{
+	.id	= V4L2_CID_PRIVATE_IRIS_SCANDWELL,
+	.type	= V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Search dwell time",
+	.minimum	= 0,
+	.maximum	= 7,
+	.step	= 1,
+	.default_value	= 0,
+	},
+	{
+	.id	= V4L2_CID_PRIVATE_IRIS_SRCHON,
+	.type	= V4L2_CTRL_TYPE_BOOLEAN,
+	.name	= "Search on/off",
+	.minimum	= 0,
+	.maximum	= 1,
+	.step	= 1,
+	.default_value	= 1,
+
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_STATE,
+	.type	= V4L2_CTRL_TYPE_INTEGER,
+	.name	= "radio 0ff/rx/tx/reset",
+	.minimum	= 0,
+	.maximum	= 3,
+	.step	= 1,
+	.default_value	=	1,
+
+	},
+	{
+	.id	= V4L2_CID_PRIVATE_IRIS_REGION,
+	.type	= V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"radio standard",
+	.minimum	=	0,
+	.maximum	=	2,
+	.step	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SIGNAL_TH,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Signal Threshold",
+	.minimum	=	0x80,
+	.maximum	=	0x7F,
+	.step	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SRCH_PTY,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Search PTY",
+	.minimum	=	0,
+	.maximum	=	31,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SRCH_PI,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Search PI",
+	.minimum	=	0,
+	.maximum	=	0xFF,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SRCH_CNT,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Preset num",
+	.minimum	=	0,
+	.maximum	=	12,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_EMPHASIS,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"Emphasis",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_RDS_STD,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"RDS standard",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_SPACING,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Channel spacing",
+	.minimum	=	0,
+	.maximum	=	2,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_RDSON,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"RDS on/off",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"RDS group mask",
+	.minimum	=	0,
+	.maximum	=	0xFFFFFFFF,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"RDS processing",
+	.minimum	=	0,
+	.maximum	=	0xFF,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_RDSD_BUF,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"RDS data groups to buffer",
+	.minimum	=	1,
+	.maximum	=	21,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_PSALL,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"pass all ps strings",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_LP_MODE,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"Low power mode",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_ANTENNA,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"headset/internal",
+	.minimum	=	0,
+	.maximum	=	1,
+	.default_value	=	0,
+	},
+
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT,
+	.type	=	V4L2_CTRL_TYPE_INTEGER,
+	.name	=	"Set PS REPEATCOUNT",
+	.minimum	=	0,
+	.maximum	=	15,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"Stop PS NAME",
+	.minimum	=	0,
+	.maximum	=	1,
+	},
+	{
+	.id	=	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT,
+	.type	=	V4L2_CTRL_TYPE_BOOLEAN,
+	.name	=	"Stop RT",
+	.minimum	=	0,
+	.maximum	=	1,
+	},
+
+};
+
+static void iris_q_event(struct iris_device *radio,
+				enum iris_evt_t event)
+{
+	struct kfifo *data_b = &radio->data_buf[IRIS_BUF_EVENTS];
+	unsigned char evt = event;
+	if (kfifo_in_locked(data_b, &evt, 1, &radio->buf_lock[IRIS_BUF_EVENTS]))
+		wake_up_interruptible(&radio->event_queue);
+}
+
+static int hci_send_frame(struct sk_buff *skb)
+{
+	struct radio_hci_dev *hdev = (struct radio_hci_dev *) skb->dev;
+
+	if (!hdev) {
+		kfree_skb(skb);
+		return -ENODEV;
+	}
+
+	__net_timestamp(skb);
+
+	skb_orphan(skb);
+	return hdev->send(skb);
+}
+
+static void radio_hci_cmd_task(unsigned long arg)
+{
+	struct radio_hci_dev *hdev = (struct radio_hci_dev *) arg;
+	struct sk_buff *skb;
+	if (!(atomic_read(&hdev->cmd_cnt))
+		&& time_after(jiffies, hdev->cmd_last_tx + HZ)) {
+		FMDERR("%s command tx timeout", hdev->name);
+		atomic_set(&hdev->cmd_cnt, 1);
+	}
+
+	skb = skb_dequeue(&hdev->cmd_q);
+	if (atomic_read(&hdev->cmd_cnt) && skb) {
+		kfree_skb(hdev->sent_cmd);
+		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+		if (hdev->sent_cmd) {
+			atomic_dec(&hdev->cmd_cnt);
+			hci_send_frame(skb);
+			hdev->cmd_last_tx = jiffies;
+		} else {
+			skb_queue_head(&hdev->cmd_q, skb);
+			tasklet_schedule(&hdev->cmd_task);
+		}
+	}
+
+}
+
+static void radio_hci_rx_task(unsigned long arg)
+{
+	struct radio_hci_dev *hdev = (struct radio_hci_dev *) arg;
+	struct sk_buff *skb;
+
+	read_lock(&hci_task_lock);
+
+	skb = skb_dequeue(&hdev->rx_q);
+	radio_hci_event_packet(hdev, skb);
+
+	read_unlock(&hci_task_lock);
+}
+
+int radio_hci_register_dev(struct radio_hci_dev *hdev)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	if (!radio) {
+		FMDERR(":radio is null");
+		return -EINVAL;
+	}
+
+	if (!hdev) {
+		FMDERR("hdev is null");
+		return -EINVAL;
+	}
+
+	hdev->flags = 0;
+
+	tasklet_init(&hdev->cmd_task, radio_hci_cmd_task, (unsigned long)
+		hdev);
+	tasklet_init(&hdev->rx_task, radio_hci_rx_task, (unsigned long)
+		hdev);
+
+	init_waitqueue_head(&hdev->req_wait_q);
+
+	skb_queue_head_init(&hdev->rx_q);
+	skb_queue_head_init(&hdev->cmd_q);
+	skb_queue_head_init(&hdev->raw_q);
+
+	if (!radio)
+		FMDERR(":radio is null");
+
+	radio->fm_hdev = hdev;
+
+	return 0;
+}
+EXPORT_SYMBOL(radio_hci_register_dev);
+
+int radio_hci_unregister_dev(struct radio_hci_dev *hdev)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	if (!radio) {
+		FMDERR(":radio is null");
+		return -EINVAL;
+	}
+
+	tasklet_kill(&hdev->rx_task);
+	tasklet_kill(&hdev->cmd_task);
+	skb_queue_purge(&hdev->rx_q);
+	skb_queue_purge(&hdev->cmd_q);
+	skb_queue_purge(&hdev->raw_q);
+	kfree(radio->fm_hdev);
+	kfree(radio->videodev);
+
+	return 0;
+}
+EXPORT_SYMBOL(radio_hci_unregister_dev);
+
+int radio_hci_recv_frame(struct sk_buff *skb)
+{
+	struct radio_hci_dev *hdev = (struct radio_hci_dev *) skb->dev;
+	if (!hdev) {
+		FMDERR("%s hdev is null while receiving frame", hdev->name);
+		kfree_skb(skb);
+		return -ENXIO;
+	}
+
+	__net_timestamp(skb);
+
+	radio_hci_event_packet(hdev, skb);
+
+	return 0;
+}
+EXPORT_SYMBOL(radio_hci_recv_frame);
+
+int radio_hci_send_cmd(struct radio_hci_dev *hdev, __u16 opcode, __u32 plen,
+		void *param)
+{
+	int len = RADIO_HCI_COMMAND_HDR_SIZE + plen;
+	struct radio_hci_command_hdr *hdr;
+	struct sk_buff *skb;
+	int ret = 0;
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb) {
+		FMDERR("%s no memory for command", hdev->name);
+		return -ENOMEM;
+	}
+
+	hdr = (struct radio_hci_command_hdr *) skb_put(skb,
+		RADIO_HCI_COMMAND_HDR_SIZE);
+	hdr->opcode = cpu_to_le16(opcode);
+	hdr->plen   = plen;
+
+	if (plen)
+		memcpy(skb_put(skb, plen), param, plen);
+
+	skb->dev = (void *) hdev;
+
+	ret = hci_send_frame(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL(radio_hci_send_cmd);
+
+static int hci_fm_enable_recv_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_ENABLE_RECV_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_disable_recv_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_DISABLE_RECV_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_get_fm_recv_conf_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_RECV_CONF_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_set_fm_recv_conf_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	struct hci_fm_recv_conf_req *recv_conf_req =
+		(struct hci_fm_recv_conf_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SET_RECV_CONF_REQ);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*recv_conf_req)),
+		recv_conf_req);
+}
+
+static int hci_fm_get_station_param_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_STATION_PARAM_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_set_fm_mute_mode_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_mute_mode_req *mute_mode_req =
+		(struct hci_fm_mute_mode_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SET_MUTE_MODE_REQ);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*mute_mode_req)),
+		mute_mode_req);
+}
+
+static int hci_set_fm_stereo_mode_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_stereo_mode_req *stereo_mode_req =
+		(struct hci_fm_stereo_mode_req *) param;
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SET_STEREO_MODE_REQ);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*stereo_mode_req)),
+		stereo_mode_req);
+}
+
+static int hci_fm_set_antenna_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u8 antenna = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SET_ANTENNA);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(antenna), &antenna);
+}
+
+static int hci_fm_set_sig_threshold_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u8 sig_threshold = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_SET_SIGNAL_THRESHOLD);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(sig_threshold),
+		&sig_threshold);
+}
+
+static int hci_fm_get_sig_threshold_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_SIGNAL_THRESHOLD);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_get_program_service_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_PROGRAM_SERVICE_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_get_radio_text_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_RADIO_TEXT_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_get_af_list_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_AF_LIST_REQ);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_search_stations_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_search_station_req *srch_stations =
+		(struct hci_fm_search_station_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SEARCH_STATIONS);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*srch_stations)),
+		srch_stations);
+}
+
+static int hci_fm_srch_rds_stations_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_search_rds_station_req *srch_stations =
+		(struct hci_fm_search_rds_station_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SEARCH_RDS_STATIONS);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*srch_stations)),
+		srch_stations);
+}
+
+static int hci_fm_srch_station_list_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_search_station_list_req *srch_list =
+		(struct hci_fm_search_station_list_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SEARCH_STATIONS_LIST);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*srch_list)),
+		srch_list);
+}
+
+static int hci_fm_cancel_search_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_CANCEL_SEARCH);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_rds_grp_process_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u32 fm_grps_process = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_RDS_GRP_PROCESS);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(fm_grps_process),
+		&fm_grps_process);
+}
+
+static int hci_fm_tune_station_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u32 tune_freq = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_TUNE_STATION_REQ);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(tune_freq), &tune_freq);
+}
+
+static int hci_def_data_read_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_def_data_rd_req *def_data_rd =
+		(struct hci_fm_def_data_rd_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_DEFAULT_DATA_READ);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*def_data_rd)),
+	def_data_rd);
+}
+
+static int hci_def_data_write_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_def_data_wr_req *def_data_wr =
+		(struct hci_fm_def_data_wr_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_DEFAULT_DATA_WRITE);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*def_data_wr)),
+	def_data_wr);
+}
+
+static int hci_fm_reset_req(struct radio_hci_dev *hdev, unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_RESET);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_get_feature_lists_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_GET_FEATURE_LIST);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int hci_fm_do_calibration_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u8 mode = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_DO_CALIBRATION);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(mode), &mode);
+}
+
+static int hci_read_grp_counters_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	__u8 reset_counters = param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_READ_GRP_COUNTERS);
+	return radio_hci_send_cmd(hdev, opcode, sizeof(reset_counters),
+		&reset_counters);
+}
+
+static int hci_peek_data_req(struct radio_hci_dev *hdev, unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_peek_req *peek_data = (struct hci_fm_peek_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_PEEK_DATA);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*peek_data)),
+	peek_data);
+}
+
+static int hci_poke_data_req(struct radio_hci_dev *hdev, unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_poke_req *poke_data = (struct hci_fm_poke_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_POKE_DATA);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*poke_data)),
+	poke_data);
+}
+
+static int hci_ssbi_peek_reg_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_ssbi_req *ssbi_peek = (struct hci_fm_ssbi_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SSBI_PEEK_REG);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*ssbi_peek)),
+	ssbi_peek);
+}
+
+static int hci_ssbi_poke_reg_req(struct radio_hci_dev *hdev,
+	unsigned long param)
+{
+	__u16 opcode = 0;
+	struct hci_fm_ssbi_req *ssbi_poke = (struct hci_fm_ssbi_req *) param;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ,
+		HCI_OCF_FM_SSBI_POKE_REG);
+	return radio_hci_send_cmd(hdev, opcode, sizeof((*ssbi_poke)),
+	ssbi_poke);
+}
+
+static int hci_fm_get_station_dbg_param_req(struct radio_hci_dev *hdev,
+		unsigned long param)
+{
+	__u16 opcode = 0;
+
+	opcode = hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ,
+		HCI_OCF_FM_STATION_DBG_PARAM);
+	return radio_hci_send_cmd(hdev, opcode, 0, NULL);
+}
+
+static int radio_hci_err(__u16 code)
+{
+	switch (code) {
+	case 0:
+		return 0;
+	case 0x01:
+		return -EBADRQC;
+	case 0x02:
+		return -ENOTCONN;
+	case 0x03:
+		return -EIO;
+	case 0x07:
+		return -ENOMEM;
+	case 0x0c:
+		return -EBUSY;
+	case 0x11:
+		return -EOPNOTSUPP;
+	case 0x12:
+		return -EINVAL;
+	default:
+		return -ENOSYS;
+	}
+}
+
+static int __radio_hci_request(struct radio_hci_dev *hdev,
+		int (*req)(struct radio_hci_dev *hdev,
+			unsigned long param),
+			unsigned long param, __u32 timeout)
+{
+	int err = 0;
+
+	DECLARE_WAITQUEUE(wait, current);
+
+	hdev->req_status = HCI_REQ_PEND;
+
+	add_wait_queue(&hdev->req_wait_q, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	err = req(hdev, param);
+
+	schedule_timeout(timeout);
+
+	remove_wait_queue(&hdev->req_wait_q, &wait);
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	switch (hdev->req_status) {
+	case HCI_REQ_DONE:
+	case HCI_REQ_STATUS:
+		err = radio_hci_err(hdev->req_result);
+		break;
+
+	case HCI_REQ_CANCELED:
+		err = -hdev->req_result;
+		break;
+
+	default:
+		err = -ETIMEDOUT;
+		break;
+	}
+
+	hdev->req_status = hdev->req_result = 0;
+
+	return err;
+}
+
+static inline int radio_hci_request(struct radio_hci_dev *hdev,
+		int (*req)(struct
+		radio_hci_dev * hdev, unsigned long param),
+		unsigned long param, __u32 timeout)
+{
+	int ret = 0;
+
+	ret = __radio_hci_request(hdev, req, param, timeout);
+
+	return ret;
+}
+
+static int hci_set_fm_recv_conf(struct hci_fm_recv_conf_req *arg,
+		struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_recv_conf_req *set_recv_conf = arg;
+
+	ret = radio_hci_request(hdev, hci_set_fm_recv_conf_req, (unsigned
+		long)set_recv_conf, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_tune_station(__u32 *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u32 tune_freq = *arg;
+
+	ret = radio_hci_request(hdev, hci_fm_tune_station_req, tune_freq,
+		RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_set_fm_mute_mode(struct hci_fm_mute_mode_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_mute_mode_req *set_mute_conf = arg;
+
+	ret = radio_hci_request(hdev, hci_set_fm_mute_mode_req, (unsigned
+		long)set_mute_conf, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_set_fm_stereo_mode(struct hci_fm_stereo_mode_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_stereo_mode_req *set_stereo_conf = arg;
+
+	ret = radio_hci_request(hdev, hci_set_fm_stereo_mode_req, (unsigned
+		long)set_stereo_conf, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_set_antenna(__u8 *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u8 antenna = *arg;
+
+	ret = radio_hci_request(hdev, hci_fm_set_antenna_req, antenna,
+		RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_set_signal_threshold(__u8 *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u8 sig_threshold = *arg;
+
+	ret = radio_hci_request(hdev, hci_fm_set_sig_threshold_req,
+		sig_threshold, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_search_stations(struct hci_fm_search_station_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_search_station_req *srch_stations = arg;
+
+	ret = radio_hci_request(hdev, hci_fm_search_stations_req, (unsigned
+		long)srch_stations, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_search_rds_stations(struct hci_fm_search_rds_station_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_search_rds_station_req *srch_stations = arg;
+
+	ret = radio_hci_request(hdev, hci_fm_srch_rds_stations_req, (unsigned
+		long)srch_stations, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_search_station_list
+	(struct hci_fm_search_station_list_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_search_station_list_req *srch_list = arg;
+
+	ret = radio_hci_request(hdev, hci_fm_srch_station_list_req, (unsigned
+		long)srch_list, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_fm_rds_grp(struct hci_fm_rds_grp_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	return 0;
+}
+
+static int hci_fm_rds_grps_process(__u32 *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u32 fm_grps_process = *arg;
+
+	ret = radio_hci_request(hdev, hci_fm_rds_grp_process_req,
+		fm_grps_process, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_def_data_read(struct hci_fm_def_data_rd_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_def_data_rd_req *def_data_rd = arg;
+
+	ret = radio_hci_request(hdev, hci_def_data_read_req, (unsigned
+		long)def_data_rd, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_def_data_write(struct hci_fm_def_data_wr_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_def_data_wr_req *def_data_wr = arg;
+
+	ret = radio_hci_request(hdev, hci_def_data_write_req, (unsigned
+		long)def_data_wr, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_fm_do_calibration(__u8 *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u8 mode = *arg;
+
+	ret = radio_hci_request(hdev, hci_fm_do_calibration_req, mode,
+		RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_read_grp_counters(__u8 *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	__u8 reset_counters = *arg;
+
+	ret = radio_hci_request(hdev, hci_read_grp_counters_req,
+		reset_counters, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_peek_data(struct hci_fm_peek_req *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_peek_req *peek_data = arg;
+
+	ret = radio_hci_request(hdev, hci_peek_data_req, (unsigned
+		long)peek_data, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_poke_data(struct hci_fm_poke_req *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_poke_req *poke_data = arg;
+
+	ret = radio_hci_request(hdev, hci_poke_data_req, (unsigned
+		long)poke_data, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_ssbi_peek_reg(struct hci_fm_ssbi_req *arg,
+	struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_ssbi_req *ssbi_peek_reg = arg;
+
+	ret = radio_hci_request(hdev, hci_ssbi_peek_reg_req, (unsigned
+		long)ssbi_peek_reg, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+int hci_ssbi_poke_reg(struct hci_fm_ssbi_req *arg, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	struct hci_fm_ssbi_req *ssbi_poke_reg = arg;
+
+	ret = radio_hci_request(hdev, hci_ssbi_poke_reg_req, (unsigned
+		long)ssbi_poke_reg, RADIO_HCI_TIMEOUT);
+
+	return ret;
+}
+
+static int hci_cmd(unsigned int cmd, struct radio_hci_dev *hdev)
+{
+	int ret = 0;
+	unsigned long arg = 0;
+
+	switch (cmd) {
+	case HCI_FM_ENABLE_RECV_CMD:
+		ret = radio_hci_request(hdev, hci_fm_enable_recv_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_DISABLE_RECV_CMD:
+		ret = radio_hci_request(hdev, hci_fm_disable_recv_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_RECV_CONF_CMD:
+		ret = radio_hci_request(hdev, hci_get_fm_recv_conf_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_STATION_PARAM_CMD:
+		ret = radio_hci_request(hdev,
+			hci_fm_get_station_param_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_SIGNAL_TH_CMD:
+		ret = radio_hci_request(hdev,
+			hci_fm_get_sig_threshold_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_PROGRAM_SERVICE_CMD:
+		ret = radio_hci_request(hdev,
+			hci_fm_get_program_service_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_RADIO_TEXT_CMD:
+		ret = radio_hci_request(hdev, hci_fm_get_radio_text_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_AF_LIST_CMD:
+		ret = radio_hci_request(hdev, hci_fm_get_af_list_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_CANCEL_SEARCH_CMD:
+		ret = radio_hci_request(hdev, hci_fm_cancel_search_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_RESET_CMD:
+		ret = radio_hci_request(hdev, hci_fm_reset_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_GET_FEATURES_CMD:
+		ret = radio_hci_request(hdev,
+		hci_fm_get_feature_lists_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	case HCI_FM_STATION_DBG_PARAM_CMD:
+		ret = radio_hci_request(hdev,
+		hci_fm_get_station_dbg_param_req, arg,
+			msecs_to_jiffies(RADIO_HCI_TIMEOUT));
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static void radio_hci_req_complete(struct radio_hci_dev *hdev, int result)
+{
+	hdev->req_result = result;
+	hdev->req_status = HCI_REQ_DONE;
+	wake_up_interruptible(&hdev->req_wait_q);
+}
+
+static void radio_hci_status_complete(struct radio_hci_dev *hdev, int result)
+{
+	hdev->req_result = result;
+	hdev->req_status = HCI_REQ_STATUS;
+	wake_up_interruptible(&hdev->req_wait_q);
+}
+
+static void hci_cc_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	__u8 status = *((__u8 *) skb->data);
+
+	if (status)
+		return;
+
+	radio_hci_req_complete(hdev, status);
+}
+
+static void hci_cc_fm_disable_rsp(struct radio_hci_dev *hdev,
+	struct sk_buff *skb)
+{
+	__u8 status = *((__u8 *) skb->data);
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+
+	if (status)
+		return;
+
+	iris_q_event(radio, IRIS_EVT_RADIO_READY);
+
+	radio_hci_req_complete(hdev, status);
+}
+
+static void hci_cc_conf_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_fm_conf_rsp  *rsp = (void *)skb->data;
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+
+	if (rsp->status)
+		return;
+
+	radio->recv_conf = rsp->recv_conf_rsp;
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_fm_enable_rsp(struct radio_hci_dev *hdev,
+	struct sk_buff *skb)
+{
+	struct hci_fm_conf_rsp  *rsp = (void *)skb->data;
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+
+	if (rsp->status)
+		return;
+
+	iris_q_event(radio, IRIS_EVT_RADIO_READY);
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_sig_threshold_rsp(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct hci_fm_sig_threshold_rsp  *rsp = (void *)skb->data;
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	struct v4l2_control *v4l_ctl = radio->g_ctl;
+
+	if (rsp->status)
+		return;
+
+	v4l_ctl->value = rsp->sig_threshold;
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_station_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	struct hci_fm_station_rsp *rsp = (void *)skb->data;
+	radio->fm_st_rsp = *(rsp);
+
+	/* Tune is always succesful */
+	radio_hci_req_complete(hdev, 0);
+}
+
+static void hci_cc_prg_srv_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_fm_prgm_srv_rsp  *rsp = (void *)skb->data;
+
+	if (rsp->status)
+		return;
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_rd_txt_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_fm_radio_txt_rsp  *rsp = (void *)skb->data;
+
+	if (rsp->status)
+		return;
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_af_list_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_fm_af_list_rsp  *rsp = (void *)skb->data;
+
+	if (rsp->status)
+		return;
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_data_rd_rsp(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_fm_data_rd_rsp  *rsp = (void *)skb->data;
+
+	if (rsp->status)
+		return;
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_feature_list_rsp(struct radio_hci_dev *hdev,
+	struct sk_buff *skb)
+{
+	struct hci_fm_feature_list_rsp  *rsp = (void *)skb->data;
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	struct v4l2_capability *v4l_cap = radio->g_cap;
+
+	if (rsp->status)
+		return;
+	v4l_cap->capabilities = (rsp->feature_mask & 0x000002) |
+		(rsp->feature_mask & 0x000001);
+
+	radio_hci_req_complete(hdev, rsp->status);
+}
+
+static void hci_cc_dbg_param_rsp(struct radio_hci_dev *hdev,
+	struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	struct hci_fm_dbg_param_rsp *rsp = (void *)skb->data;
+	radio->st_dbg_param = *(rsp);
+
+	if (radio->st_dbg_param.status)
+		return;
+
+	radio_hci_req_complete(hdev, radio->st_dbg_param.status);
+}
+
+static inline void hci_cmd_complete_event(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct hci_ev_cmd_complete *cmd_compl_ev = (void *) skb->data;
+	__u16 opcode;
+
+	skb_pull(skb, sizeof(*cmd_compl_ev));
+
+	opcode = __le16_to_cpu(cmd_compl_ev->cmd_opcode);
+
+	switch (opcode) {
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_ENABLE_RECV_REQ):
+		hci_cc_fm_enable_rsp(hdev, skb);
+		break;
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_RECV_CONF_REQ):
+		hci_cc_conf_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_DISABLE_RECV_REQ):
+		hci_cc_fm_disable_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_SET_RECV_CONF_REQ):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_SET_MUTE_MODE_REQ):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_SET_STEREO_MODE_REQ):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_SET_ANTENNA):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_SET_SIGNAL_THRESHOLD):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_CANCEL_SEARCH):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_RDS_GRP):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_RDS_GRP_PROCESS):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_EN_WAN_AVD_CTRL):
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_EN_NOTCH_CTRL):
+	case hci_common_cmd_op_pack(HCI_OCF_FM_DEFAULT_DATA_WRITE):
+	case hci_common_cmd_op_pack(HCI_OCF_FM_RESET):
+	case hci_status_param_op_pack(HCI_OCF_FM_READ_GRP_COUNTERS):
+	case hci_diagnostic_cmd_op_pack(HCI_OCF_FM_POKE_DATA):
+	case hci_diagnostic_cmd_op_pack(HCI_OCF_FM_SSBI_PEEK_REG):
+	case hci_diagnostic_cmd_op_pack(HCI_OCF_FM_SSBI_POKE_REG):
+		hci_cc_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_SIGNAL_THRESHOLD):
+		hci_cc_sig_threshold_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_STATION_PARAM_REQ):
+		hci_cc_station_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_PROGRAM_SERVICE_REQ):
+		hci_cc_prg_srv_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_RADIO_TEXT_REQ):
+		hci_cc_rd_txt_rsp(hdev, skb);
+		break;
+
+	case hci_recv_ctrl_cmd_op_pack(HCI_OCF_FM_GET_AF_LIST_REQ):
+		hci_cc_af_list_rsp(hdev, skb);
+		break;
+
+	case hci_common_cmd_op_pack(HCI_OCF_FM_DEFAULT_DATA_READ):
+	case hci_diagnostic_cmd_op_pack(HCI_OCF_FM_PEEK_DATA):
+		hci_cc_data_rd_rsp(hdev, skb);
+		break;
+
+	case hci_common_cmd_op_pack(HCI_OCF_FM_GET_FEATURE_LIST):
+		hci_cc_feature_list_rsp(hdev, skb);
+		break;
+
+	case hci_diagnostic_cmd_op_pack(HCI_OCF_FM_STATION_DBG_PARAM):
+		hci_cc_dbg_param_rsp(hdev, skb);
+		break;
+
+	default:
+		FMDERR("%s opcode 0x%x", hdev->name, opcode);
+		break;
+	}
+
+}
+
+static inline void hci_cmd_status_event(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct hci_ev_cmd_status *ev = (void *) skb->data;
+	radio_hci_status_complete(hdev, ev->status);
+}
+
+static inline void hci_ev_tune_status(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	int i;
+	int len;
+
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+
+	len = sizeof(struct hci_fm_station_rsp);
+
+	memcpy(&radio->fm_st_rsp.station_rsp, skb_pull(skb, len), len);
+
+	iris_q_event(radio, IRIS_EVT_TUNE_SUCC);
+
+	for (i = 0; i < IRIS_BUF_MAX; i++) {
+		if (i >= IRIS_BUF_RT_RDS)
+			kfifo_reset(&radio->data_buf[i]);
+	}
+
+	if (radio->fm_st_rsp.station_rsp.rssi)
+		iris_q_event(radio, IRIS_EVT_ABOVE_TH);
+	else
+		iris_q_event(radio, IRIS_EVT_BELOW_TH);
+
+	if (radio->fm_st_rsp.station_rsp.stereo_prg)
+		iris_q_event(radio, IRIS_EVT_STEREO);
+
+	if (radio->fm_st_rsp.station_rsp.mute_mode)
+		iris_q_event(radio, IRIS_EVT_MONO);
+
+	if (radio->fm_st_rsp.station_rsp.rds_sync_status)
+		iris_q_event(radio, IRIS_EVT_RDS_AVAIL);
+	else
+		iris_q_event(radio, IRIS_EVT_RDS_NOT_AVAIL);
+}
+
+static inline void hci_ev_search_compl(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	iris_q_event(radio, IRIS_EVT_SEEK_COMPLETE);
+}
+
+static inline void hci_ev_srch_st_list_compl(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	struct hci_ev_srch_list_compl *ev = (void *) skb->data;
+	radio->srch_st_result = *ev;
+}
+
+static inline void hci_ev_search_next(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	iris_q_event(radio, IRIS_EVT_SCAN_NEXT);
+}
+
+static inline void hci_ev_stereo_status(struct radio_hci_dev *hdev,
+		struct sk_buff *skb)
+{
+	struct iris_device *radio = video_get_drvdata(video_get_dev());
+	__u8 st_status = *((__u8 *) skb->data);
+	if (st_status)
+		iris_q_event(radio, IRIS_EVT_STEREO);
+	else
+		iris_q_event(radio, IRIS_EVT_MONO);
+}
+
+void radio_hci_event_packet(struct radio_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct radio_hci_event_hdr *hdr = (void *) skb->data;
+	__u8 event = hdr->evt;
+
+	skb_pull(skb, RADIO_HCI_EVENT_HDR_SIZE);
+
+	switch (event) {
+	case HCI_EV_TUNE_STATUS:
+		hci_ev_tune_status(hdev, skb);
+		break;
+	case HCI_EV_SEARCH_PROGRESS:
+	case HCI_EV_SEARCH_RDS_PROGRESS:
+	case HCI_EV_SEARCH_LIST_PROGRESS:
+		hci_ev_search_next(hdev, skb);
+		break;
+	case HCI_EV_STEREO_STATUS:
+		hci_ev_stereo_status(hdev, skb);
+		break;
+	case HCI_EV_RDS_LOCK_STATUS:
+	case HCI_EV_SERVICE_AVAILABLE:
+	case HCI_EV_RDS_RX_DATA:
+	case HCI_EV_PROGRAM_SERVICE:
+	case HCI_EV_RADIO_TEXT:
+	case HCI_EV_FM_AF_LIST:
+	case HCI_EV_TX_RDS_GRP_COMPL:
+	case HCI_EV_TX_RDS_CONT_GRP_COMPL:
+		break;
+
+	case HCI_EV_CMD_COMPLETE:
+		hci_cmd_complete_event(hdev, skb);
+		break;
+
+	case HCI_EV_CMD_STATUS:
+		hci_cmd_status_event(hdev, skb);
+		break;
+
+	case HCI_EV_SEARCH_COMPLETE:
+	case HCI_EV_SEARCH_RDS_COMPLETE:
+		hci_ev_search_compl(hdev, skb);
+		break;
+
+	case HCI_EV_SEARCH_LIST_COMPLETE:
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * fops/IOCTL helper functions
+ */
+
+static int iris_search(struct iris_device *radio, int on, int dir)
+{
+	int retval = 0;
+	enum search_t srch = radio->g_search_mode & SRCH_MODE;
+
+	if (on) {
+		switch (srch) {
+		case SCAN_FOR_STRONG:
+		case SCAN_FOR_WEAK:
+			radio->srch_st_list.srch_list_dir = dir;
+			radio->srch_st_list.srch_list_mode = srch;
+			radio->srch_st_list.srch_list_max = 0;
+			retval = hci_fm_search_station_list(
+				&radio->srch_st_list, radio->fm_hdev);
+			break;
+		case RDS_SEEK_PTY:
+		case RDS_SCAN_PTY:
+		case RDS_SEEK_PI:
+			radio->srch_rds.srch_station.srch_mode = srch;
+			radio->srch_rds.srch_station.srch_dir = dir;
+			radio->srch_rds.srch_station.scan_time =
+				radio->g_scan_time;
+			retval = hci_fm_search_rds_stations(&radio->srch_rds,
+				radio->fm_hdev);
+			break;
+		default:
+			radio->srch_st.srch_mode = srch;
+			radio->srch_st.scan_time = radio->g_scan_time;
+			radio->srch_st.srch_dir = dir;
+			retval = hci_fm_search_stations(
+				&radio->srch_st, radio->fm_hdev);
+			break;
+		}
+
+	} else {
+		retval = hci_cmd(HCI_FM_CANCEL_SEARCH_CMD, radio->fm_hdev);
+	}
+
+	return retval;
+}
+
+static int iris_set_region(struct iris_device *radio, int req_region)
+{
+	int retval;
+	radio->region = req_region;
+
+	switch (radio->region) {
+	case IRIS_REGION_US:
+		{
+			radio->recv_conf.band_low_limit = 88100;
+			radio->recv_conf.band_high_limit = 108000;
+			radio->recv_conf.emphasis = 0;
+			radio->recv_conf.hlsi = 0;
+			radio->recv_conf.ch_spacing = 0;
+			radio->recv_conf.rds_std = 0;
+		}
+		break;
+	case IRIS_REGION_EU:
+		{
+			radio->recv_conf.band_low_limit = 88100;
+			radio->recv_conf.band_high_limit = 108000;
+			radio->recv_conf.emphasis = 0;
+			radio->recv_conf.hlsi = 0;
+			radio->recv_conf.ch_spacing = 0;
+			radio->recv_conf.rds_std = 0;
+		}
+		break;
+	case IRIS_REGION_JAPAN:
+		{
+			radio->recv_conf.band_low_limit = 76000;
+			radio->recv_conf.band_high_limit = 108000;
+			radio->recv_conf.emphasis = 0;
+			radio->recv_conf.hlsi = 0;
+			radio->recv_conf.ch_spacing = 0;
+		}
+		break;
+	default:
+		{
+			radio->recv_conf.emphasis = 0;
+			radio->recv_conf.hlsi = 0;
+			radio->recv_conf.ch_spacing = 0;
+			radio->recv_conf.rds_std = 0;
+		}
+		break;
+	}
+
+
+	retval = hci_set_fm_recv_conf(
+			&radio->recv_conf,
+			radio->fm_hdev);
+
+	return retval;
+}
+
+static int iris_set_freq(struct iris_device *radio, unsigned int freq)
+{
+
+	int retval;
+	retval = hci_fm_tune_station(&freq, radio->fm_hdev);
+	if (retval < 0)
+		FMDERR("Error while setting the frequency : %d\n", retval);
+	return retval;
+}
+
+
+static int iris_vidioc_queryctrl(struct file *file, void *priv,
+		struct v4l2_queryctrl *qc)
+{
+	unsigned char i;
+	int retval = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(iris_v4l2_queryctrl); i++) {
+		if (qc->id && qc->id == iris_v4l2_queryctrl[i].id) {
+			memcpy(qc, &(iris_v4l2_queryctrl[i]), sizeof(*qc));
+			retval = 0;
+			break;
+		}
+	}
+
+	return retval;
+}
+
+static int iris_vidioc_g_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_VOLUME:
+		break;
+	case V4L2_CID_AUDIO_MUTE:
+		ctrl->value = radio->mute_mode.hard_mute;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCHMODE:
+		ctrl->value = radio->g_search_mode;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SCANDWELL:
+		ctrl->value = radio->g_scan_time;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCHON:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_STATE:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_IOVERC:
+		retval = hci_cmd(HCI_FM_STATION_DBG_PARAM_CMD, radio->fm_hdev);
+		if (retval < 0)
+			return retval;
+		ctrl->value = radio->st_dbg_param.io_verc;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_INTDET:
+		retval = hci_cmd(HCI_FM_STATION_DBG_PARAM_CMD, radio->fm_hdev);
+		if (retval < 0)
+			return retval;
+		ctrl->value = radio->st_dbg_param.in_det_out;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_REGION:
+		ctrl->value = radio->region;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SIGNAL_TH:
+		retval = hci_cmd(HCI_FM_GET_SIGNAL_TH_CMD, radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_PTY:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_PI:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_CNT:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_EMPHASIS:
+		retval = hci_cmd(HCI_FM_GET_RECV_CONF_CMD,
+							 radio->fm_hdev);
+		if (retval < 0)
+			FMDERR("Error get FM recv conf"
+				" %d\n", retval);
+		else
+			ctrl->value = radio->recv_conf.emphasis;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDS_STD:
+		retval = hci_cmd(HCI_FM_GET_RECV_CONF_CMD,
+				 radio->fm_hdev);
+		if (retval < 0)
+			FMDERR("Error get FM recv conf"
+				" %d\n", retval);
+		else
+			ctrl->value = radio->recv_conf.rds_std;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SPACING:
+		retval = hci_cmd(HCI_FM_GET_RECV_CONF_CMD,
+				radio->fm_hdev);
+		if (retval < 0)
+			FMDERR("Error get FM recv conf"
+				" %d\n", retval);
+		else
+			ctrl->value = radio->recv_conf.ch_spacing;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSON:
+		retval = hci_cmd(HCI_FM_GET_RECV_CONF_CMD,
+				radio->fm_hdev);
+		if (retval < 0)
+			FMDERR("Error get FM recv conf"
+				" %d\n", retval);
+		else
+			ctrl->value = radio->recv_conf.rds_std;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK:
+		ctrl->value = radio->rds_grp.rds_grp_enable_mask;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSD_BUF:
+		ctrl->value = radio->rds_grp.rds_buf_size;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_PSALL:
+		ctrl->value = radio->g_rds_grp_proc_ps;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_LP_MODE:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_ANTENNA:
+		ctrl->value = radio->g_antenna;
+		break;
+	default:
+		retval = -EINVAL;
+	}
+	if (retval < 0)
+		FMDERR("get control failed with %d, id: %d\n",
+			retval, ctrl->id);
+	return retval;
+}
+
+static int iris_vidioc_s_ext_ctrls(struct file *file, void *priv,
+			struct v4l2_ext_controls *ctrl)
+{
+	return -ENOTSUPP;
+}
+
+static int iris_vidioc_s_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = 0;
+	unsigned int rds_grps_proc = 0;
+	__u8 temp_val = 0;
+	radio->recv_conf.emphasis = 0;
+	radio->recv_conf.ch_spacing = 0;
+	radio->recv_conf.hlsi = 0;
+	radio->recv_conf.band_low_limit = 87500;
+	radio->recv_conf.band_high_limit = 108000;
+	radio->recv_conf.rds_std = 0;
+
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_VOLUME:
+		break;
+	case V4L2_CID_AUDIO_MUTE:
+		radio->mute_mode.hard_mute = ctrl->value;
+		radio->mute_mode.soft_mute = IOC_SFT_MUTE;
+		retval = hci_set_fm_mute_mode(
+				&radio->mute_mode,
+				radio->fm_hdev);
+		if (retval < 0)
+			FMDERR("Error while set FM hard mute"" %d\n",
+			retval);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCHMODE:
+		radio->g_search_mode = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SCANDWELL:
+		radio->g_scan_time = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCHON:
+		iris_search(radio, ctrl->value, SRCH_DIR_UP);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_STATE:
+		if (ctrl->value == FM_RECV) {
+			retval = hci_cmd(HCI_FM_ENABLE_RECV_CMD,
+							 radio->fm_hdev);
+		} else {
+			if (ctrl->value == FM_OFF) {
+				retval = hci_cmd(
+							HCI_FM_DISABLE_RECV_CMD,
+							radio->fm_hdev);
+				if (retval < 0)
+					FMDERR("Error on disable FM"
+							" %d\n", retval);
+			}
+		}
+		break;
+	case V4L2_CID_PRIVATE_IRIS_REGION:
+		retval = iris_set_region(radio, ctrl->value);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SIGNAL_TH:
+		temp_val = ctrl->value;
+		retval = hci_fm_set_signal_threshold(
+				&temp_val,
+				radio->fm_hdev);
+		if (retval < 0) {
+			FMDERR("Error while setting signal threshold\n");
+			break;
+		}
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_PTY:
+		radio->srch_rds.srch_pty = ctrl->value;
+		radio->srch_st_list.srch_pty = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_PI:
+		radio->srch_rds.srch_pi = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SRCH_CNT:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_SPACING:
+		radio->recv_conf.ch_spacing = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_EMPHASIS:
+		radio->recv_conf.emphasis = ctrl->value;
+		retval =
+		hci_set_fm_recv_conf(&radio->recv_conf, radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDS_STD:
+		radio->recv_conf.rds_std = ctrl->value;
+		retval =
+		hci_set_fm_recv_conf(&radio->recv_conf, radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSON:
+		radio->recv_conf.rds_std = ctrl->value;
+		retval =
+		hci_set_fm_recv_conf(&radio->recv_conf, radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK:
+		radio->rds_grp.rds_grp_enable_mask = ctrl->value;
+		retval = hci_fm_rds_grp(&radio->rds_grp, radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC:
+		rds_grps_proc = radio->g_rds_grp_proc_ps | ctrl->value;
+		retval = hci_fm_rds_grps_process(
+				&rds_grps_proc,
+				radio->fm_hdev);
+		break;
+	case V4L2_CID_PRIVATE_IRIS_RDSD_BUF:
+		radio->rds_grp.rds_buf_size = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_PSALL:
+		radio->g_rds_grp_proc_ps = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_IRIS_LP_MODE:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_ANTENNA:
+		temp_val = ctrl->value;
+		retval = hci_fm_set_antenna(&temp_val, radio->fm_hdev);
+		break;
+	case V4L2_CID_RDS_TX_PTY:
+		break;
+	case V4L2_CID_RDS_TX_PI:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT:
+		break;
+	case V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT:
+		break;
+	case V4L2_CID_TUNE_POWER_LEVEL:
+		break;
+	default:
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static int iris_vidioc_g_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *tuner)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int retval;
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	retval = hci_cmd(HCI_FM_GET_STATION_PARAM_CMD, radio->fm_hdev);
+	if (retval < 0)
+		return retval;
+
+	tuner->type = V4L2_TUNER_RADIO;
+	tuner->rangelow  = radio->recv_conf.band_low_limit * TUNE_PARAM;
+	tuner->rangehigh = radio->recv_conf.band_high_limit * TUNE_PARAM;
+	tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+	tuner->capability = V4L2_TUNER_CAP_LOW;
+	tuner->signal = radio->fm_st_rsp.station_rsp.rssi;
+	tuner->audmode = radio->fm_st_rsp.station_rsp.stereo_prg;
+	tuner->afc = 0;
+
+	return 0;
+}
+
+static int iris_vidioc_s_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *tuner)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int retval;
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	radio->recv_conf.band_low_limit = tuner->rangelow / TUNE_PARAM;
+	radio->recv_conf.band_high_limit = tuner->rangehigh / TUNE_PARAM;
+	if (tuner->audmode == V4L2_TUNER_MODE_MONO) {
+		radio->stereo_mode.stereo_mode = 0x01;
+		retval = hci_set_fm_stereo_mode(
+				&radio->stereo_mode,
+				radio->fm_hdev);
+	} else {
+		radio->stereo_mode.stereo_mode = 0x00;
+		retval = hci_set_fm_stereo_mode(
+				&radio->stereo_mode,
+				radio->fm_hdev);
+	}
+	if (retval < 0)
+		FMDERR(": set tuner failed with %d\n", retval);
+	return retval;
+}
+
+static int iris_vidioc_g_frequency(struct file *file, void *priv,
+		struct v4l2_frequency *freq)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int retval;
+
+	freq->type = V4L2_TUNER_RADIO;
+	retval = hci_cmd(HCI_FM_GET_STATION_PARAM_CMD, radio->fm_hdev);
+	if (retval < 0)
+		FMDERR("get frequency failed %d\n", retval);
+	else
+		freq->frequency =
+			radio->fm_st_rsp.station_rsp.station_freq * TUNE_PARAM;
+	return retval;
+}
+
+static int iris_vidioc_s_frequency(struct file *file, void *priv,
+					struct v4l2_frequency *freq)
+{
+	struct iris_device  *radio = video_get_drvdata(video_devdata(file));
+	int retval = -1;
+	freq->frequency = freq->frequency / TUNE_PARAM;
+
+	if (freq->type != V4L2_TUNER_RADIO)
+		return -EINVAL;
+
+	retval = iris_set_freq(radio, freq->frequency);
+	if (retval < 0)
+		FMDERR(" set frequency failed with %d\n", retval);
+	return retval;
+}
+
+static int iris_vidioc_dqbuf(struct file *file, void *priv,
+				struct v4l2_buffer *buffer)
+{
+	struct iris_device  *radio = video_get_drvdata(video_devdata(file));
+	enum iris_buf_t buf_type = buffer->index;
+	struct kfifo *data_fifo;
+	unsigned char *buf = (unsigned char *)buffer->m.userptr;
+	unsigned int len = buffer->length;
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+	if ((buf_type < IRIS_BUF_MAX) && (buf_type >= 0)) {
+		data_fifo = &radio->data_buf[buf_type];
+		if (buf_type == IRIS_BUF_EVENTS)
+			if (wait_event_interruptible(radio->event_queue,
+				kfifo_len(data_fifo)) < 0)
+				return -EINTR;
+	} else {
+		FMDERR("invalid buffer type\n");
+		return -EINVAL;
+	}
+	buffer->bytesused = kfifo_out_locked(data_fifo, buf, len,
+					&radio->buf_lock[buf_type]);
+
+	return 0;
+}
+
+static int iris_vidioc_g_fmt_type_private(struct file *file, void *priv,
+						struct v4l2_format *f)
+{
+	return 0;
+
+}
+
+static int iris_vidioc_s_hw_freq_seek(struct file *file, void *priv,
+					struct v4l2_hw_freq_seek *seek)
+{
+	struct iris_device *radio = video_get_drvdata(video_devdata(file));
+	int dir;
+	if (seek->seek_upward)
+		dir = SRCH_DIR_UP;
+	else
+		dir = SRCH_DIR_DOWN;
+	return iris_search(radio, CTRL_ON, dir);
+}
+
+static int iris_vidioc_querycap(struct file *file, void *priv,
+	struct v4l2_capability *capability)
+{
+	struct iris_device *radio;
+	radio = video_get_drvdata(video_devdata(file));
+	strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
+	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
+	radio->g_cap = capability;
+	return 0;
+}
+
+
+static const struct v4l2_ioctl_ops iris_ioctl_ops = {
+	.vidioc_querycap              = iris_vidioc_querycap,
+	.vidioc_queryctrl             = iris_vidioc_queryctrl,
+	.vidioc_g_ctrl                = iris_vidioc_g_ctrl,
+	.vidioc_s_ctrl                = iris_vidioc_s_ctrl,
+	.vidioc_g_tuner               = iris_vidioc_g_tuner,
+	.vidioc_s_tuner               = iris_vidioc_s_tuner,
+	.vidioc_g_frequency           = iris_vidioc_g_frequency,
+	.vidioc_s_frequency           = iris_vidioc_s_frequency,
+	.vidioc_s_hw_freq_seek        = iris_vidioc_s_hw_freq_seek,
+	.vidioc_dqbuf                 = iris_vidioc_dqbuf,
+	.vidioc_g_fmt_type_private    = iris_vidioc_g_fmt_type_private,
+	.vidioc_s_ext_ctrls           = iris_vidioc_s_ext_ctrls,
+};
+
+static const struct v4l2_file_operations iris_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static struct video_device iris_viddev_template = {
+	.fops                   = &iris_fops,
+	.ioctl_ops              = &iris_ioctl_ops,
+	.name                   = DRIVER_NAME,
+	.release                = video_device_release,
+};
+
+static struct video_device *video_get_dev(void)
+{
+	return priv_videodev;
+}
+
+static int __init iris_probe(struct platform_device *pdev)
+{
+	struct iris_device *radio;
+	int retval;
+	int radio_nr = -1;
+	int i;
+
+	if (!pdev) {
+		FMDERR(": pdev is null\n");
+		return -ENOMEM;
+	}
+
+	radio = kzalloc(sizeof(struct iris_device), GFP_KERNEL);
+	if (!radio) {
+		FMDERR(": Could not allocate radio device\n");
+		return -ENOMEM;
+	}
+
+	radio->dev = &pdev->dev;
+	platform_set_drvdata(pdev, radio);
+
+	radio->videodev = video_device_alloc();
+	if (!radio->videodev) {
+		FMDERR(": Could not allocate V4L device\n");
+		kfree(radio);
+		return -ENOMEM;
+	}
+
+	memcpy(radio->videodev, &iris_viddev_template,
+	  sizeof(iris_viddev_template));
+
+	for (i = 0; i < IRIS_BUF_MAX; i++) {
+		int kfifo_alloc_rc = 0;
+		spin_lock_init(&radio->buf_lock[i]);
+
+		if (i == IRIS_BUF_RAW_RDS)
+			kfifo_alloc_rc = kfifo_alloc(&radio->data_buf[i],
+				rds_buf*3, GFP_KERNEL);
+		else
+			kfifo_alloc_rc = kfifo_alloc(&radio->data_buf[i],
+				STD_BUF_SIZE, GFP_KERNEL);
+
+		if (kfifo_alloc_rc != 0) {
+			FMDERR("failed allocating buffers %d\n",
+				   kfifo_alloc_rc);
+			for (; i > -1; i--) {
+				kfifo_free(&radio->data_buf[i]);
+				kfree(radio);
+				return -ENOMEM;
+			}
+		}
+	}
+
+	mutex_init(&radio->lock);
+	init_completion(&radio->sync_xfr_start);
+	radio->tune_req = 0;
+	init_waitqueue_head(&radio->event_queue);
+	init_waitqueue_head(&radio->read_queue);
+
+	video_set_drvdata(radio->videodev, radio);
+
+	if (NULL == video_get_drvdata(radio->videodev))
+		FMDERR(": video_get_drvdata failed\n");
+
+	retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
+								   radio_nr);
+	if (retval) {
+		FMDERR(": Could not register video device\n");
+		video_device_release(radio->videodev);
+		for (; i > -1; i--)
+			kfifo_free(&radio->data_buf[i]);
+		kfree(radio);
+		return retval;
+	} else {
+		priv_videodev = kzalloc(sizeof(struct video_device),
+			GFP_KERNEL);
+		memcpy(priv_videodev, radio->videodev,
+			sizeof(struct video_device));
+	}
+	return 0;
+}
+
+
+static int __devexit iris_remove(struct platform_device *pdev)
+{
+	int i;
+	struct iris_device *radio = platform_get_drvdata(pdev);
+
+	video_unregister_device(radio->videodev);
+
+	for (i = 0; i < IRIS_BUF_MAX; i++)
+		kfifo_free(&radio->data_buf[i]);
+
+	kfree(radio);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver iris_driver = {
+	.driver = {
+		.owner  = THIS_MODULE,
+		.name   = "iris_fm",
+	},
+	.remove = __devexit_p(iris_remove),
+};
+
+static int __init iris_radio_init(void)
+{
+	return platform_driver_probe(&iris_driver, iris_probe);
+}
+module_init(iris_radio_init);
+
+static void __exit iris_radio_exit(void)
+{
+	platform_driver_unregister(&iris_driver);
+}
+module_exit(iris_radio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/radio/radio-tavarua.c b/drivers/media/radio/radio-tavarua.c
new file mode 100644
index 0000000..f04dfe5
--- /dev/null
+++ b/drivers/media/radio/radio-tavarua.c
@@ -0,0 +1,3755 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm Tavarua FM core driver
+ */
+
+/* driver definitions */
+#define DRIVER_AUTHOR "Qualcomm"
+#define DRIVER_NAME "radio-tavarua"
+#define DRIVER_CARD "Qualcomm FM Radio Transceiver"
+#define DRIVER_DESC "I2C radio driver for Qualcomm FM Radio Transceiver "
+#define DRIVER_VERSION "1.0.0"
+
+#include <linux/version.h>
+#include <linux/init.h>         /* Initdata                     */
+#include <linux/delay.h>        /* udelay                       */
+#include <linux/uaccess.h>      /* copy to/from user            */
+#include <linux/kfifo.h>        /* lock free circular buffer    */
+#include <linux/param.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+/* kernel includes */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <asm/unaligned.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/unistd.h>
+#include <asm/atomic.h>
+#include <media/tavarua.h>
+#include <linux/mfd/marimba.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+/*
+regional parameters for radio device
+*/
+struct region_params_t {
+	enum tavarua_region_t region;
+	unsigned int band_high;
+	unsigned int band_low;
+	char emphasis;
+	char rds_std;
+	char spacing;
+};
+
+struct srch_params_t {
+	unsigned short srch_pi;
+	unsigned char srch_pty;
+	unsigned int preset_num;
+	int get_list;
+};
+
+/* Main radio device structure,
+acts as a shadow copy of the
+actual tavaura registers */
+struct tavarua_device {
+	struct video_device *videodev;
+	/* driver management */
+	int users;
+	/* top level driver data */
+	struct marimba *marimba;
+	struct device *dev;
+	/* platform specific functionality */
+	struct marimba_fm_platform_data *pdata;
+	unsigned int chipID;
+	/*RDS buffers + Radio event buffer*/
+	struct kfifo data_buf[TAVARUA_BUF_MAX];
+	/* search paramters */
+	struct srch_params_t srch_params;
+	/* keep track of pending xfrs */
+	int pending_xfrs[TAVARUA_XFR_MAX];
+	int xfr_bytes_left;
+	int xfr_in_progress;
+	/* Transmit data */
+	enum tavarua_xfr_ctrl_t tx_mode;
+	/* synchrnous xfr data */
+	unsigned char sync_xfr_regs[XFR_REG_NUM];
+	struct completion sync_xfr_start;
+	struct completion sync_req_done;
+	int tune_req;
+	/* internal register status */
+	unsigned char registers[RADIO_REGISTERS];
+	/* regional settings */
+	struct region_params_t region_params;
+	/* power mode */
+	int lp_mode;
+	int handle_irq;
+	/* global lock */
+	struct mutex lock;
+	/* buffer locks*/
+	spinlock_t buf_lock[TAVARUA_BUF_MAX];
+	/* work queue */
+	struct workqueue_struct *wqueue;
+	struct delayed_work work;
+	/* wait queue for blocking event read */
+	wait_queue_head_t event_queue;
+	/* wait queue for raw rds read */
+	wait_queue_head_t read_queue;
+	/* PTY for FM Tx */
+	int pty;
+	/* PI for FM TX */
+	int pi;
+	/*PS repeatcount for PS Tx */
+	int ps_repeatcount;
+};
+
+/**************************************************************************
+ * Module Parameters
+ **************************************************************************/
+
+/* Radio Nr */
+static int radio_nr = -1;
+module_param(radio_nr, int, 0);
+MODULE_PARM_DESC(radio_nr, "Radio Nr");
+static int wait_timeout = WAIT_TIMEOUT;
+/* Bahama's version*/
+static u8 bahama_version;
+/* RDS buffer blocks */
+static unsigned int rds_buf = 100;
+module_param(rds_buf, uint, 0);
+MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
+/* static variables */
+static struct tavarua_device *private_data;
+/* forward declerations */
+static int tavarua_disable_interrupts(struct tavarua_device *radio);
+static int tavarua_setup_interrupts(struct tavarua_device *radio,
+					enum radio_state_t state);
+static int tavarua_start(struct tavarua_device *radio,
+			enum radio_state_t state);
+static int tavarua_request_irq(struct tavarua_device *radio);
+static void start_pending_xfr(struct tavarua_device *radio);
+/* work function */
+static void read_int_stat(struct work_struct *work);
+
+static int is_bahama(void)
+{
+	int id = 0;
+
+	switch (id = adie_get_detected_connectivity_type()) {
+	case BAHAMA_ID:
+		FMDBG("It is Bahama\n");
+		return 1;
+
+	case MARIMBA_ID:
+		FMDBG("It is Marimba\n");
+		return 0;
+	default:
+		printk(KERN_ERR "%s: unexpected adie connectivity type: %d\n",
+			__func__, id);
+		return -ENODEV;
+	}
+}
+
+static int set_fm_slave_id(struct tavarua_device *radio)
+{
+	int bahama_present = is_bahama();
+
+	if (bahama_present == -ENODEV)
+		return -ENODEV;
+
+	if (bahama_present)
+		radio->marimba->mod_id = SLAVE_ID_BAHAMA_FM;
+	else
+		radio->marimba->mod_id = MARIMBA_SLAVE_ID_FM;
+
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_isr
+=============================================================================*/
+/**
+  This function is called when GPIO is toggled. This functions queues the event
+  to interrupt queue, which is later handled by isr handling funcion.
+  i.e. INIT_DELAYED_WORK(&radio->work, read_int_stat);
+
+  @param irq: irq that is toggled.
+  @param dev_id: structure pointer passed by client.
+
+  @return IRQ_HANDLED.
+*/
+static irqreturn_t tavarua_isr(int irq, void *dev_id)
+{
+	struct tavarua_device *radio = dev_id;
+	/* schedule a tasklet to handle host intr */
+  /* The call to queue_delayed_work ensures that a minimum delay (in jiffies)
+   * passes before the work is actually executed. The return value from the
+   * function is nonzero if the work_struct was actually added to queue
+   * (otherwise, it may have already been there and will not be added a second
+   * time).
+   */
+	queue_delayed_work(radio->wqueue, &radio->work,
+				msecs_to_jiffies(TAVARUA_DELAY));
+	return IRQ_HANDLED;
+}
+
+/**************************************************************************
+ * Interface to radio internal registers over top level marimba driver
+ *************************************************************************/
+
+/*=============================================================================
+FUNCTION:  tavarua_read_registers
+=============================================================================*/
+/**
+  This function is called to read a number of bytes from an I2C interface.
+  The bytes read are stored in internal register status (shadow copy).
+
+  @param radio: structure pointer passed by client.
+  @param offset: register offset.
+  @param len: num of bytes.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_read_registers(struct tavarua_device *radio,
+				unsigned char offset, int len)
+{
+	int retval = 0, i = 0;
+	retval = set_fm_slave_id(radio);
+
+	if (retval == -ENODEV)
+		return retval;
+
+	FMDBG_I2C("I2C Slave: %x, Read Offset(%x): Data [",
+						radio->marimba->mod_id,
+						offset);
+
+	retval =  marimba_read(radio->marimba, offset,
+				&radio->registers[offset], len);
+
+	if (retval > 0) {
+		for (i = 0; i < len; i++)
+			FMDBG_I2C("%02x ", radio->registers[offset+i]);
+		FMDBG_I2C(" ]\n");
+
+	}
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_write_register
+=============================================================================*/
+/**
+  This function is called to write a byte over the I2C interface.
+  The corresponding shadow copy is stored in internal register status.
+
+  @param radio: structure pointer passed by client.
+  @param offset: register offset.
+  @param value: buffer to be written to the registers.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_write_register(struct tavarua_device *radio,
+			unsigned char offset, unsigned char value)
+{
+	int retval;
+	retval = set_fm_slave_id(radio);
+
+	if (retval == -ENODEV)
+		return retval;
+
+	FMDBG_I2C("I2C Slave: %x, Write Offset(%x): Data[",
+						radio->marimba->mod_id,
+						offset);
+	retval = marimba_write(radio->marimba, offset, &value, 1);
+	if (retval > 0) {
+		if (offset < RADIO_REGISTERS) {
+			radio->registers[offset] = value;
+			FMDBG_I2C("%02x ", radio->registers[offset]);
+		}
+		FMDBG_I2C(" ]\n");
+	}
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_write_registers
+=============================================================================*/
+/**
+  This function is called to write a number of bytes over the I2C interface.
+  The corresponding shadow copy is stored in internal register status.
+
+  @param radio: structure pointer passed by client.
+  @param offset: register offset.
+  @param buf: buffer to be written to the registers.
+  @param len: num of bytes.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_write_registers(struct tavarua_device *radio,
+			unsigned char offset, unsigned char *buf, int len)
+{
+
+	int i;
+	int retval;
+	retval = set_fm_slave_id(radio);
+
+	if (retval == -ENODEV)
+		return retval;
+
+	FMDBG_I2C("I2C Slave: %x, Write Offset(%x): Data[",
+						radio->marimba->mod_id,
+						offset);
+	retval = marimba_write(radio->marimba, offset, buf, len);
+	if (retval > 0) { /* if write successful, update internal state too */
+		for (i = 0; i < len; i++) {
+			if ((offset+i) < RADIO_REGISTERS) {
+				radio->registers[offset+i] = buf[i];
+				FMDBG_I2C("%x ",  radio->registers[offset+i]);
+			}
+		}
+		FMDBG_I2C(" ]\n");
+	}
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  read_data_blocks
+=============================================================================*/
+/**
+  This function reads Raw RDS blocks from Core regs to driver
+  internal regs (shadow copy).
+
+  @param radio: structure pointer passed by client.
+  @param offset: register offset.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int read_data_blocks(struct tavarua_device *radio, unsigned char offset)
+{
+	/* read all 3 RDS blocks */
+	return tavarua_read_registers(radio, offset, RDS_BLOCK*4);
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_rds_read
+=============================================================================*/
+/**
+  This is a rds processing function reads that reads Raw RDS blocks from Core
+  regs to driver internal regs (shadow copy). It then fills the V4L2 RDS buffer,
+  which is read by App using JNI interface.
+
+  @param radio: structure pointer passed by client.
+
+  @return None.
+*/
+static void tavarua_rds_read(struct tavarua_device *radio)
+{
+	struct kfifo *rds_buf = &radio->data_buf[TAVARUA_BUF_RAW_RDS];
+	unsigned char blocknum;
+	unsigned char tmp[3];
+
+	if (read_data_blocks(radio, RAW_RDS) < 0)
+		return;
+	 /* copy all four RDS blocks to internal buffer */
+	for (blocknum = 0; blocknum < RDS_BLOCKS_NUM; blocknum++) {
+		/* Fill the V4L2 RDS buffer */
+		put_unaligned(cpu_to_le16(radio->registers[RAW_RDS +
+			blocknum*RDS_BLOCK]), (unsigned short *) tmp);
+		tmp[2] = blocknum;		/* offset name */
+		tmp[2] |= blocknum << 3;	/* received offset */
+		tmp[2] |= 0x40; /* corrected error(s) */
+
+		/* copy RDS block to internal buffer */
+		kfifo_in_locked(rds_buf, tmp, 3, &radio->buf_lock[TAVARUA_BUF_RAW_RDS]);
+	}
+	/* wake up read queue */
+	if (kfifo_len(rds_buf))
+		wake_up_interruptible(&radio->read_queue);
+
+}
+
+/*=============================================================================
+FUNCTION:  request_read_xfr
+=============================================================================*/
+/**
+  This function sets the desired MODE in the XFRCTRL register and also sets the
+  CTRL field to read.
+  This is an asynchronous way of reading the XFR registers. Client would request
+  by setting the desired mode in the XFRCTRL register and then would initiate
+  the actual data register read by calling copy_from_xfr up on SOC signals
+  success.
+
+  NOTE:
+
+  The Data Transfer (XFR) registers are used to pass various data and
+  configuration parameters between the Core and host processor.
+
+  To read from the XFR registers, the host processor must set the desired MODE
+  in the XFRCTRL register and set the CTRL field to read. The Core will then
+  populate the XFRDAT0 - XFRDAT15 registers with the defined mode bytes. The
+  Core will set the TRANSFER interrupt status bit and interrupt the host if the
+  TRANSFERCTRL interrupt control bit is set. The host can then extract the XFR
+  mode bytes once it detects that the Core has updated the registers.
+
+  @param radio: structure pointer passed by client.
+
+  @return Always returns 0.
+*/
+static int request_read_xfr(struct tavarua_device *radio,
+				enum tavarua_xfr_ctrl_t mode){
+
+	tavarua_write_register(radio, XFRCTRL, mode);
+	msleep(TAVARUA_DELAY);
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  copy_from_xfr
+=============================================================================*/
+/**
+  This function is used to read XFR mode bytes once it detects that the Core
+  has updated the registers. It also updates XFR regs to the appropriate
+  internal buffer n bytes.
+
+  NOTE:
+
+  This function should be used in conjuction with request_read_xfr. Refer
+  request_read_xfr for XFR mode transaction details.
+
+  @param radio: structure pointer passed by client.
+  @param buf_type: Index into RDS/Radio event buffer to use.
+  @param len: num of bytes.
+
+  @return Always returns 0.
+*/
+static int copy_from_xfr(struct tavarua_device *radio,
+		enum tavarua_buf_t buf_type, unsigned int n){
+
+	struct kfifo *data_fifo = &radio->data_buf[buf_type];
+	unsigned char *xfr_regs = &radio->registers[XFRCTRL+1];
+	kfifo_in_locked(data_fifo, xfr_regs, n, &radio->buf_lock[buf_type]);
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  write_to_xfr
+=============================================================================*/
+/**
+  This function sets the desired MODE in the XFRCTRL register and it also sets
+  the CTRL field and data to write.
+  This also writes all the XFRDATx registers with the desired input buffer.
+
+  NOTE:
+
+  The Data Transfer (XFR) registers are used to pass various data and
+  configuration parameters between the Core and host processor.
+
+  To write data to the Core, the host processor updates XFRDAT0 - XFRDAT15 with
+  the appropriate mode bytes. The host processor must then set the desired MODE
+  in the XFRCTRL register and set the CTRL field to write. The core will detect
+  that the XFRCTRL register was written to and will read the XFR mode bytes.
+  After reading all the mode bytes, the Core will set the TRANSFER interrupt
+  status bit and interrupt the host if the TRANSFERCTRL interrupt control bit
+  is set.
+
+  @param radio: structure pointer passed by client.
+  @param mode: XFR mode to write in XFRCTRL register.
+  @param buf: buffer to be written to the registers.
+  @param len: num of bytes.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int write_to_xfr(struct tavarua_device *radio, unsigned char mode,
+			char *buf, int len)
+{
+	char buffer[len+1];
+	memcpy(buffer+1, buf, len);
+	/* buffer[0] corresponds to XFRCTRL register
+	   set the CTRL bit to 1 for write mode
+	*/
+	buffer[0] = ((1<<7) | mode);
+	return tavarua_write_registers(radio, XFRCTRL, buffer, sizeof(buffer));
+}
+
+/*=============================================================================
+FUNCTION:  xfr_intf_own
+=============================================================================*/
+/**
+  This function is used to check if there is any pending XFR mode operation.
+  If yes, wait for it to complete, else update the flag to indicate XFR
+  operation is in progress
+
+  @param radio: structure pointer passed by client.
+
+  @return 0      on success.
+	-ETIME on timeout.
+*/
+static int xfr_intf_own(struct tavarua_device *radio)
+{
+
+	mutex_lock(&radio->lock);
+	if (radio->xfr_in_progress) {
+		radio->pending_xfrs[TAVARUA_XFR_SYNC] = 1;
+		mutex_unlock(&radio->lock);
+		if (!wait_for_completion_timeout(&radio->sync_xfr_start,
+			msecs_to_jiffies(wait_timeout)))
+			return -ETIME;
+	} else {
+		FMDBG("gained ownership of xfr\n");
+		radio->xfr_in_progress = 1;
+		mutex_unlock(&radio->lock);
+	}
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  sync_read_xfr
+=============================================================================*/
+/**
+  This function is used to do synchronous XFR read operation.
+
+  @param radio: structure pointer passed by client.
+  @param xfr_type: XFR mode to write in XFRCTRL register.
+  @param buf: buffer to be read from the core.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int sync_read_xfr(struct tavarua_device *radio,
+			enum tavarua_xfr_ctrl_t xfr_type, unsigned char *buf)
+{
+	int retval;
+	retval = xfr_intf_own(radio);
+	if (retval < 0)
+		return retval;
+	retval = tavarua_write_register(radio, XFRCTRL, xfr_type);
+
+	if (retval >= 0) {
+		/* Wait for interrupt i.e. complete
+		(&radio->sync_req_done); call */
+		if (!wait_for_completion_timeout(&radio->sync_req_done,
+			msecs_to_jiffies(wait_timeout)) || (retval < 0)) {
+			retval = -ETIME;
+		} else {
+			memcpy(buf, radio->sync_xfr_regs, XFR_REG_NUM);
+		}
+	}
+	radio->xfr_in_progress = 0;
+	start_pending_xfr(radio);
+	FMDBG("%s: %d\n", __func__, retval);
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  sync_write_xfr
+=============================================================================*/
+/**
+  This function is used to do synchronous XFR write operation.
+
+  @param radio: structure pointer passed by client.
+  @param xfr_type: XFR mode to write in XFRCTRL register.
+  @param buf: buffer to be written to the core.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int sync_write_xfr(struct tavarua_device *radio,
+		enum tavarua_xfr_ctrl_t xfr_type, unsigned char *buf)
+{
+	int retval;
+	retval = xfr_intf_own(radio);
+	if (retval < 0)
+		return retval;
+	retval = write_to_xfr(radio, xfr_type, buf, XFR_REG_NUM);
+
+	if (retval >= 0) {
+		/* Wait for interrupt i.e. complete
+		(&radio->sync_req_done); call */
+		if (!wait_for_completion_timeout(&radio->sync_req_done,
+			msecs_to_jiffies(wait_timeout)) || (retval < 0)) {
+			FMDBG("Write xfr timeout");
+		}
+	}
+	radio->xfr_in_progress = 0;
+	start_pending_xfr(radio);
+	FMDBG("%s: %d\n", __func__,  retval);
+	return retval;
+}
+
+
+/*=============================================================================
+FUNCTION:  start_pending_xfr
+=============================================================================*/
+/**
+  This function checks if their are any pending xfr interrupts and if
+  the interrupts are either RDS PS, RDS RT, RDS AF, SCANNEXT, SEARCH or SYNC
+  then initiates corresponding read operation. Preference is given to RAW RDS
+  data (SYNC) over processed data (PS, RT, AF, etc) from core.
+
+  @param radio: structure pointer passed by client.
+
+  @return None.
+*/
+static void start_pending_xfr(struct tavarua_device *radio)
+{
+	int i;
+	enum tavarua_xfr_t xfr;
+	for (i = 0; i < TAVARUA_XFR_MAX; i++) {
+		if (radio->pending_xfrs[i]) {
+			radio->xfr_in_progress = 1;
+			xfr = (enum tavarua_xfr_t)i;
+			switch (xfr) {
+			/* priority given to synchronous xfrs */
+			case TAVARUA_XFR_SYNC:
+				complete(&radio->sync_xfr_start);
+				break;
+			/* asynchrnous xfrs */
+			case TAVARUA_XFR_SRCH_LIST:
+				request_read_xfr(radio, RX_STATIONS_0);
+				break;
+			case TAVARUA_XFR_RT_RDS:
+				request_read_xfr(radio, RDS_RT_0);
+				break;
+			case TAVARUA_XFR_PS_RDS:
+				request_read_xfr(radio, RDS_PS_0);
+				break;
+			case TAVARUA_XFR_AF_LIST:
+				request_read_xfr(radio, RDS_AF_0);
+				break;
+			default:
+				FMDERR("%s: Unsupported XFR %d\n",
+					 __func__, xfr);
+			}
+			radio->pending_xfrs[i] = 0;
+			FMDBG("resurrect xfr %d\n", i);
+			}
+	}
+	return;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_q_event
+=============================================================================*/
+/**
+  This function is called to queue an event for user.
+
+  NOTE:
+  Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or
+  filled (output) buffer in the driver's incoming queue.
+
+  Pleaes refer tavarua_probe where we register different ioctl's for FM.
+
+  @param radio: structure pointer passed by client.
+  @param event: event to be queued.
+
+  @return None.
+*/
+static void tavarua_q_event(struct tavarua_device *radio,
+				enum tavarua_evt_t event)
+{
+
+	struct kfifo *data_b = &radio->data_buf[TAVARUA_BUF_EVENTS];
+	unsigned char evt = event;
+	FMDBG("updating event_q with event %x\n", event);
+	if (kfifo_in_locked(data_b, &evt, 1, &radio->buf_lock[TAVARUA_BUF_EVENTS]))
+		wake_up_interruptible(&radio->event_queue);
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_start_xfr
+=============================================================================*/
+/**
+  This function is called to process interrupts which require multiple XFR
+  operations (RDS search, RDS PS, RDS RT, etc). if any XFR operation is
+  already in progress we store information about pending interrupt, which
+  will be processed in future when current pending operation is done.
+
+  @param radio: structure pointer passed by client.
+  @param pending_id: XFR operation (which requires multiple XFR operations in
+	steps) to start.
+  @param xfr_id: XFR mode to write in XFRCTRL register.
+
+  @return None.
+*/
+static void tavarua_start_xfr(struct tavarua_device *radio,
+		enum tavarua_xfr_t pending_id, enum tavarua_xfr_ctrl_t xfr_id)
+{
+		if (radio->xfr_in_progress)
+			radio->pending_xfrs[pending_id] = 1;
+		else {
+			radio->xfr_in_progress = 1;
+			request_read_xfr(radio, xfr_id);
+		}
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_handle_interrupts
+=============================================================================*/
+/**
+  This function processes the interrupts.
+
+  NOTE:
+  tavarua_q_event is used to queue events in App buffer. i.e. App calls the
+  VIDIOC_QBUF ioctl to enqueue an empty (capturing) buffer, which is filled
+  by tavarua_q_event call.
+
+  Any async event that requires multiple steps, i.e. search, RT, PS, etc is
+  handled one at a time. (We preserve other interrupts when processing one).
+  Sync interrupts are given priority.
+
+  @param radio: structure pointer passed by client.
+
+  @return None.
+*/
+static void tavarua_handle_interrupts(struct tavarua_device *radio)
+{
+	int i;
+	int retval;
+	unsigned char xfr_status;
+	if (!radio->handle_irq) {
+		FMDBG("IRQ happend, but I wont handle it\n");
+		return;
+	}
+	mutex_lock(&radio->lock);
+	tavarua_read_registers(radio, STATUS_REG1, STATUS_REG_NUM);
+
+	FMDBG("INTSTAT1 <%x>\n", radio->registers[STATUS_REG1]);
+	FMDBG("INTSTAT2 <%x>\n", radio->registers[STATUS_REG2]);
+	FMDBG("INTSTAT3 <%x>\n", radio->registers[STATUS_REG3]);
+
+	if (radio->registers[STATUS_REG1] & READY) {
+		complete(&radio->sync_req_done);
+		tavarua_q_event(radio, TAVARUA_EVT_RADIO_READY);
+	}
+
+	/* Tune completed */
+	if (radio->registers[STATUS_REG1] & TUNE) {
+		if (radio->tune_req) {
+			complete(&radio->sync_req_done);
+			radio->tune_req = 0;
+		}
+		tavarua_q_event(radio, TAVARUA_EVT_TUNE_SUCC);
+		if (radio->srch_params.get_list) {
+			tavarua_start_xfr(radio, TAVARUA_XFR_SRCH_LIST,
+							RX_STATIONS_0);
+		}
+		radio->srch_params.get_list = 0;
+		radio->xfr_in_progress = 0;
+		radio->xfr_bytes_left = 0;
+		for (i = 0; i < TAVARUA_BUF_MAX; i++) {
+			if (i >= TAVARUA_BUF_RT_RDS)
+				kfifo_reset(&radio->data_buf[i]);
+		}
+		for (i = 0; i < TAVARUA_XFR_MAX; i++) {
+			if (i >= TAVARUA_XFR_RT_RDS)
+				radio->pending_xfrs[i] = 0;
+		}
+		retval = tavarua_read_registers(radio, TUNECTRL, 1);
+		/* send to user station parameters */
+		if (retval > -1) {
+			/* Signal strength */
+			if (!(radio->registers[TUNECTRL] & SIGSTATE))
+				tavarua_q_event(radio, TAVARUA_EVT_BELOW_TH);
+			else
+				tavarua_q_event(radio, TAVARUA_EVT_ABOVE_TH);
+			/* mono/stereo */
+			if ((radio->registers[TUNECTRL] & MOSTSTATE))
+				tavarua_q_event(radio, TAVARUA_EVT_STEREO);
+			else
+				tavarua_q_event(radio, TAVARUA_EVT_MONO);
+			/* is RDS available */
+			if ((radio->registers[TUNECTRL] & RDSSYNC))
+				tavarua_q_event(radio, TAVARUA_EVT_RDS_AVAIL);
+			else
+				tavarua_q_event(radio,
+						TAVARUA_EVT_RDS_NOT_AVAIL);
+		}
+
+	} else {
+		if (radio->tune_req) {
+			FMDERR("Tune INT is pending\n");
+			mutex_unlock(&radio->lock);
+			return;
+		}
+	}
+	/* Search completed (read FREQ) */
+	if (radio->registers[STATUS_REG1] & SEARCH)
+		tavarua_q_event(radio, TAVARUA_EVT_SEEK_COMPLETE);
+
+	/* Scanning for next station */
+	if (radio->registers[STATUS_REG1] & SCANNEXT)
+		tavarua_q_event(radio, TAVARUA_EVT_SCAN_NEXT);
+
+	/* Signal indicator change (read SIGSTATE) */
+	if (radio->registers[STATUS_REG1] & SIGNAL) {
+		retval = tavarua_read_registers(radio, TUNECTRL, 1);
+		if (retval > -1) {
+			if (!(radio->registers[TUNECTRL] & SIGSTATE))
+				tavarua_q_event(radio, TAVARUA_EVT_BELOW_TH);
+			else
+				tavarua_q_event(radio, TAVARUA_EVT_ABOVE_TH);
+		}
+	}
+
+	/* RDS synchronization state change (read RDSSYNC) */
+	if (radio->registers[STATUS_REG1] & SYNC) {
+		retval = tavarua_read_registers(radio, TUNECTRL, 1);
+		if (retval > -1) {
+			if ((radio->registers[TUNECTRL] & RDSSYNC))
+				tavarua_q_event(radio, TAVARUA_EVT_RDS_AVAIL);
+			else
+				tavarua_q_event(radio,
+						TAVARUA_EVT_RDS_NOT_AVAIL);
+		}
+	}
+
+	/* Audio Control indicator (read AUDIOIND) */
+	if (radio->registers[STATUS_REG1] & AUDIO) {
+		retval = tavarua_read_registers(radio, AUDIOIND, 1);
+		if (retval > -1) {
+			if ((radio->registers[AUDIOIND] & 0x01))
+				tavarua_q_event(radio, TAVARUA_EVT_STEREO);
+			else
+				tavarua_q_event(radio, TAVARUA_EVT_MONO);
+		}
+	}
+
+	/* interrupt register 2 */
+
+	/* New unread RDS data group available */
+	if (radio->registers[STATUS_REG2] & RDSDAT) {
+		FMDBG("Raw RDS Available\n");
+		tavarua_rds_read(radio);
+		tavarua_q_event(radio, TAVARUA_EVT_NEW_RAW_RDS);
+	}
+
+	/* New RDS Program Service Table available */
+	if (radio->registers[STATUS_REG2] & RDSPS) {
+		FMDBG("New PS RDS\n");
+		tavarua_start_xfr(radio, TAVARUA_XFR_PS_RDS, RDS_PS_0);
+	}
+
+	/* New RDS Radio Text available */
+	if (radio->registers[STATUS_REG2] & RDSRT) {
+		FMDBG("New RT RDS\n");
+		tavarua_start_xfr(radio, TAVARUA_XFR_RT_RDS, RDS_RT_0);
+	}
+
+	/* New RDS Radio Text available */
+	if (radio->registers[STATUS_REG2] & RDSAF) {
+		FMDBG("New AF RDS\n");
+		tavarua_start_xfr(radio, TAVARUA_XFR_AF_LIST, RDS_AF_0);
+	}
+	/* Trasmitter an RDS Group */
+	if (radio->registers[STATUS_REG2] & TXRDSDAT) {
+		FMDBG("New TXRDSDAT\n");
+		tavarua_q_event(radio, TAVARUA_EVT_TXRDSDAT);
+	}
+
+	/* Complete RDS buffer is available for transmission */
+	if (radio->registers[STATUS_REG2] & TXRDSDONE) {
+		FMDBG("New TXRDSDAT\n");
+		tavarua_q_event(radio, TAVARUA_EVT_TXRDSDONE);
+	}
+	/* interrupt register 3 */
+
+	/* Data transfer (XFR) completed */
+	if (radio->registers[STATUS_REG3] & TRANSFER) {
+		FMDBG("XFR Interrupt\n");
+		tavarua_read_registers(radio, XFRCTRL, XFR_REG_NUM+1);
+		FMDBG("XFRCTRL IS: %x\n", radio->registers[XFRCTRL]);
+		xfr_status = radio->registers[XFRCTRL];
+		switch (xfr_status) {
+		case RDS_PS_0:
+			FMDBG("PS Header\n");
+			copy_from_xfr(radio, TAVARUA_BUF_PS_RDS, 5);
+			radio->xfr_bytes_left = (radio->registers[XFRCTRL+1] &
+								0x0F) * 8;
+			FMDBG("PS RDS Length: %d\n", radio->xfr_bytes_left);
+			if ((radio->xfr_bytes_left > 0) &&
+			    (radio->xfr_bytes_left < 97))
+				request_read_xfr(radio,	RDS_PS_1);
+			else
+				radio->xfr_in_progress = 0;
+			break;
+		case RDS_PS_1:
+		case RDS_PS_2:
+		case RDS_PS_3:
+		case RDS_PS_4:
+		case RDS_PS_5:
+		case RDS_PS_6:
+			FMDBG("PS Data\n");
+			copy_from_xfr(radio, TAVARUA_BUF_PS_RDS, XFR_REG_NUM);
+			radio->xfr_bytes_left -= XFR_REG_NUM;
+			if (radio->xfr_bytes_left > 0) {
+				if ((xfr_status + 1) > RDS_PS_6)
+					request_read_xfr(radio,	RDS_PS_6);
+				else
+					request_read_xfr(radio,	xfr_status+1);
+			} else {
+				radio->xfr_in_progress = 0;
+				tavarua_q_event(radio, TAVARUA_EVT_NEW_PS_RDS);
+			}
+			break;
+		case RDS_RT_0:
+			FMDBG("RT Header\n");
+			copy_from_xfr(radio, TAVARUA_BUF_RT_RDS, 5);
+			radio->xfr_bytes_left = radio->registers[XFRCTRL+1]
+									& 0x7F;
+			FMDBG("RT RDS Length: %d\n", radio->xfr_bytes_left);
+			/*RT_1 to RT_4  16 byte registers so 64 bytes */
+			if ((radio->xfr_bytes_left > 0)
+					 && (radio->xfr_bytes_left < 65))
+				request_read_xfr(radio, RDS_RT_1);
+			break;
+		case RDS_RT_1:
+		case RDS_RT_2:
+		case RDS_RT_3:
+		case RDS_RT_4:
+			FMDBG("xfr interrupt RT data\n");
+			copy_from_xfr(radio, TAVARUA_BUF_RT_RDS, XFR_REG_NUM);
+			radio->xfr_bytes_left -= XFR_REG_NUM;
+			if (radio->xfr_bytes_left > 0)
+				request_read_xfr(radio,	xfr_status+1);
+			else {
+				radio->xfr_in_progress = 0;
+				tavarua_q_event(radio, TAVARUA_EVT_NEW_RT_RDS);
+			}
+			break;
+		case RDS_AF_0:
+			copy_from_xfr(radio, TAVARUA_BUF_AF_LIST,
+						XFR_REG_NUM);
+			radio->xfr_bytes_left = radio->registers[XFRCTRL+5]-11;
+			if (radio->xfr_bytes_left > 0)
+				request_read_xfr(radio,	RDS_AF_1);
+			else
+				radio->xfr_in_progress = 0;
+			break;
+		case RDS_AF_1:
+			copy_from_xfr(radio, TAVARUA_BUF_AF_LIST,
+						radio->xfr_bytes_left);
+			tavarua_q_event(radio, TAVARUA_EVT_NEW_AF_LIST);
+			radio->xfr_in_progress = 0;
+			break;
+		case RX_CONFIG:
+		case RADIO_CONFIG:
+		case RDS_CONFIG:
+			memcpy(radio->sync_xfr_regs,
+				&radio->registers[XFRCTRL+1], XFR_REG_NUM);
+			complete(&radio->sync_req_done);
+			break;
+		case RX_STATIONS_0:
+			FMDBG("Search list has %d stations\n",
+						radio->registers[XFRCTRL+1]);
+			radio->xfr_bytes_left = radio->registers[XFRCTRL+1]*2;
+			if (radio->xfr_bytes_left > 14) {
+				copy_from_xfr(radio, TAVARUA_BUF_SRCH_LIST,
+							XFR_REG_NUM);
+				request_read_xfr(radio,	RX_STATIONS_1);
+			} else if (radio->xfr_bytes_left) {
+				FMDBG("In else RX_STATIONS_0\n");
+				copy_from_xfr(radio, TAVARUA_BUF_SRCH_LIST,
+						radio->xfr_bytes_left+1);
+				tavarua_q_event(radio,
+						TAVARUA_EVT_NEW_SRCH_LIST);
+				radio->xfr_in_progress = 0;
+			}
+			break;
+		case RX_STATIONS_1:
+			FMDBG("In RX_STATIONS_1");
+			copy_from_xfr(radio, TAVARUA_BUF_SRCH_LIST,
+						radio->xfr_bytes_left);
+			tavarua_q_event(radio, TAVARUA_EVT_NEW_SRCH_LIST);
+			radio->xfr_in_progress = 0;
+			break;
+		case PHY_TXGAIN:
+			FMDBG("read PHY_TXGAIN is successful");
+			complete(&radio->sync_req_done);
+			break;
+		case (0x80 | RX_CONFIG):
+		case (0x80 | RADIO_CONFIG):
+		case (0x80 | RDS_CONFIG):
+		case (0x80 | INT_CTRL):
+			complete(&radio->sync_req_done);
+			break;
+		case (0x80 | RDS_RT_0):
+			FMDBG("RT Header Sent\n");
+			complete(&radio->sync_req_done);
+			break;
+		case (0x80 | RDS_RT_1):
+		case (0x80 | RDS_RT_2):
+		case (0x80 | RDS_RT_3):
+		case (0x80 | RDS_RT_4):
+			FMDBG("xfr interrupt RT data Sent\n");
+			complete(&radio->sync_req_done);
+			break;
+		/*TX Specific transfer */
+		case (0x80 | RDS_PS_0):
+			FMDBG("PS Header Sent\n");
+			complete(&radio->sync_req_done);
+			break;
+		case (0x80 | RDS_PS_1):
+		case (0x80 | RDS_PS_2):
+		case (0x80 | RDS_PS_3):
+		case (0x80 | RDS_PS_4):
+		case (0x80 | RDS_PS_5):
+		case (0x80 | RDS_PS_6):
+			FMDBG("xfr interrupt PS data Sent\n");
+			complete(&radio->sync_req_done);
+			break;
+		case (0x80 | PHY_TXGAIN):
+			FMDBG("write PHY_TXGAIN is successful");
+			complete(&radio->sync_req_done);
+			break;
+		default:
+			FMDERR("UNKNOWN XFR = %d\n", xfr_status);
+		}
+		if (!radio->xfr_in_progress)
+			start_pending_xfr(radio);
+
+	}
+
+	/* Error occurred. Read ERRCODE to determine cause */
+	if (radio->registers[STATUS_REG3] & ERROR) {
+#ifdef FM_DEBUG
+		unsigned char xfr_buf[XFR_REG_NUM];
+		int retval = sync_read_xfr(radio, ERROR_CODE, xfr_buf);
+		FMDBG("retval of ERROR_CODE read : %d\n", retval);
+#endif
+		FMDERR("ERROR STATE\n");
+	}
+
+	mutex_unlock(&radio->lock);
+	FMDBG("Work is done\n");
+
+}
+
+/*=============================================================================
+FUNCTION:  read_int_stat
+=============================================================================*/
+/**
+  This function is scheduled whenever there is an interrupt pending in interrupt
+  queue. i.e. kfmradio.
+
+  Whenever there is a GPIO interrupt, a delayed work will be queued in to the
+  'kfmradio' work queue. Upon execution of this work in the queue, a  a call
+  to read_int_stat function will be made , which would in turn handle the
+  interrupts by reading the INTSTATx registers.
+  NOTE:
+  Tasks to be run out of a workqueue need to be packaged in a struct
+  work_struct structure.
+
+  @param work: work_struct structure.
+
+  @return None.
+*/
+static void read_int_stat(struct work_struct *work)
+{
+	struct tavarua_device *radio = container_of(work,
+					struct tavarua_device, work.work);
+	tavarua_handle_interrupts(radio);
+}
+
+/*************************************************************************
+ * irq helper functions
+ ************************************************************************/
+
+/*=============================================================================
+FUNCTION:  tavarua_request_irq
+=============================================================================*/
+/**
+  This function is called to acquire a FM GPIO and enable FM interrupts.
+
+  @param radio: structure pointer passed by client.
+
+  @return 0 if success else otherwise.
+*/
+static int tavarua_request_irq(struct tavarua_device *radio)
+{
+	int retval;
+	int irq = radio->pdata->irq;
+	if (radio == NULL)
+		return -EINVAL;
+
+  /* A workqueue created with create_workqueue() will have one worker thread
+   * for each CPU on the system; create_singlethread_workqueue(), instead,
+   * creates a workqueue with a single worker process. The name of the queue
+   * is limited to ten characters; it is only used for generating the "command"
+   * for the kernel thread(s) (which can be seen in ps or top).
+   */
+	radio->wqueue  = create_singlethread_workqueue("kfmradio");
+	if (!radio->wqueue)
+		return -ENOMEM;
+  /* allocate an interrupt line */
+  /* On success, request_irq() returns 0 if everything goes  as
+     planned.  Your interrupt handler will start receiving its
+     interrupts immediately. On failure, request_irq()
+     returns:
+	-EINVAL
+		The  IRQ  number  you  requested  was either
+		invalid or reserved, or your passed  a  NULL
+		pointer for the handler() parameter.
+
+	-EBUSY The  IRQ you requested is already being
+		handled, and the IRQ cannot  be  shared.
+
+	-ENXIO The m68k returns this value for  an  invalid
+		IRQ number.
+  */
+	/* Use request_any_context_irq, So that it might work for nested or
+	nested interrupts. in MSM8x60, FM is connected to PMIC GPIO and it
+	is a nested interrupt*/
+	retval = request_any_context_irq(irq, tavarua_isr,
+				IRQ_TYPE_EDGE_FALLING, "fm interrupt", radio);
+	if (retval < 0) {
+		FMDERR("Couldn't acquire FM gpio %d\n", irq);
+		return retval;
+	} else {
+		FMDBG("FM GPIO %d registered\n", irq);
+	}
+	retval = enable_irq_wake(irq);
+	if (retval < 0) {
+		FMDERR("Could not enable FM interrupt\n ");
+		free_irq(irq , radio);
+	}
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_disable_irq
+=============================================================================*/
+/**
+  This function is called to disable FM irq and free up FM interrupt handling
+  resources.
+
+  @param radio: structure pointer passed by client.
+
+  @return 0 if success else otherwise.
+*/
+static int tavarua_disable_irq(struct tavarua_device *radio)
+{
+	int irq;
+	if (!radio)
+		return -EINVAL;
+	irq = radio->pdata->irq;
+	disable_irq_wake(irq);
+	cancel_delayed_work_sync(&radio->work);
+	flush_workqueue(radio->wqueue);
+	free_irq(irq, radio);
+	destroy_workqueue(radio->wqueue);
+	return 0;
+}
+
+/*************************************************************************
+ * fops/IOCTL helper functions
+ ************************************************************************/
+
+/*=============================================================================
+FUNCTION:  tavarua_search
+=============================================================================*/
+/**
+  This interface sets the search control features.
+
+  @param radio: structure pointer passed by client.
+  @param on: The value of a control.
+  @param dir: FM search direction.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_search(struct tavarua_device *radio, int on, int dir)
+{
+	enum search_t srch = radio->registers[SRCHCTRL] & SRCH_MODE;
+
+	FMDBG("In tavarua_search\n");
+	if (on) {
+		radio->registers[SRCHRDS1] = 0x00;
+		radio->registers[SRCHRDS2] = 0x00;
+		/* Set freq band */
+		switch (srch) {
+		case SCAN_FOR_STRONG:
+		case SCAN_FOR_WEAK:
+			radio->srch_params.get_list = 1;
+			radio->registers[SRCHRDS2] =
+					radio->srch_params.preset_num;
+			break;
+		case RDS_SEEK_PTY:
+		case RDS_SCAN_PTY:
+			radio->registers[SRCHRDS2] =
+					radio->srch_params.srch_pty;
+			break;
+		case RDS_SEEK_PI:
+			radio->registers[SRCHRDS1] =
+				(radio->srch_params.srch_pi & 0xFF00) >> 8;
+			radio->registers[SRCHRDS2] =
+				(radio->srch_params.srch_pi & 0x00FF);
+			break;
+		default:
+			break;
+		}
+		radio->registers[SRCHCTRL] |= SRCH_ON;
+	} else {
+		radio->registers[SRCHCTRL] &= ~SRCH_ON;
+		radio->srch_params.get_list = 0;
+	}
+	radio->registers[SRCHCTRL] = (dir << 3) |
+				(radio->registers[SRCHCTRL] & 0xF7);
+
+	FMDBG("SRCHCTRL <%x>\n", radio->registers[SRCHCTRL]);
+	FMDBG("Search Started\n");
+	return tavarua_write_registers(radio, SRCHRDS1,
+				&radio->registers[SRCHRDS1], 3);
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_set_region
+=============================================================================*/
+/**
+  This interface configures the FM radio.
+
+  @param radio: structure pointer passed by client.
+  @param req_region: FM band types.  These types defines the FM band minimum and
+  maximum frequencies in the FM band.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_set_region(struct tavarua_device *radio,
+				int req_region)
+{
+	int retval = 0;
+	unsigned char xfr_buf[XFR_REG_NUM];
+	unsigned char value;
+	unsigned int spacing = 0.100 * FREQ_MUL;
+	unsigned int band_low, band_high;
+	unsigned int low_band_limit = 76.0 * FREQ_MUL;
+	enum tavarua_region_t region = req_region;
+
+	/* Set freq band */
+	switch (region) {
+	case TAVARUA_REGION_US:
+	case TAVARUA_REGION_EU:
+	case TAVARUA_REGION_JAPAN_WIDE:
+		SET_REG_FIELD(radio->registers[RDCTRL], 0,
+			RDCTRL_BAND_OFFSET, RDCTRL_BAND_MASK);
+		break;
+	case TAVARUA_REGION_JAPAN:
+		SET_REG_FIELD(radio->registers[RDCTRL], 1,
+			RDCTRL_BAND_OFFSET, RDCTRL_BAND_MASK);
+		break;
+	default:
+		retval = sync_read_xfr(radio, RADIO_CONFIG, xfr_buf);
+		if (retval < 0) {
+			FMDERR("failed to get RADIO_CONFIG\n");
+			return retval;
+		}
+		band_low = (radio->region_params.band_low -
+					low_band_limit) / spacing;
+		band_high = (radio->region_params.band_high -
+					low_band_limit) / spacing;
+		FMDBG("low_band: %x, high_band: %x\n", band_low, band_high);
+		xfr_buf[0] = band_low >> 8;
+		xfr_buf[1] = band_low & 0xFF;
+		xfr_buf[2] = band_high >> 8;
+		xfr_buf[3] = band_high & 0xFF;
+		retval = sync_write_xfr(radio, RADIO_CONFIG, xfr_buf);
+		if (retval < 0) {
+			FMDERR("Could not set regional settings\n");
+			return retval;
+		}
+		break;
+	}
+
+	/* Set channel spacing */
+	switch (region) {
+	case TAVARUA_REGION_US:
+	case TAVARUA_REGION_EU:
+		value = 0;
+		break;
+	case TAVARUA_REGION_JAPAN:
+		value = 1;
+		break;
+	case TAVARUA_REGION_JAPAN_WIDE:
+		value = 2;
+		break;
+	default:
+		value = radio->region_params.spacing;
+	}
+
+	SET_REG_FIELD(radio->registers[RDCTRL], value,
+		RDCTRL_CHSPACE_OFFSET, RDCTRL_CHSPACE_MASK);
+
+	/* Set De-emphasis and soft band range*/
+	switch (region) {
+	case TAVARUA_REGION_US:
+	case TAVARUA_REGION_JAPAN:
+	case TAVARUA_REGION_JAPAN_WIDE:
+		value = 0;
+		break;
+	case TAVARUA_REGION_EU:
+		value = 1;
+		break;
+	default:
+		value = radio->region_params.emphasis;
+	}
+
+	SET_REG_FIELD(radio->registers[RDCTRL], value,
+		RDCTRL_DEEMPHASIS_OFFSET, RDCTRL_DEEMPHASIS_MASK);
+
+	/* set RDS standard */
+	switch (region) {
+	default:
+		value = radio->region_params.rds_std;
+		break;
+	case TAVARUA_REGION_US:
+		value = 0;
+		break;
+	case TAVARUA_REGION_EU:
+		value = 1;
+		break;
+	}
+	SET_REG_FIELD(radio->registers[RDSCTRL], value,
+		RDSCTRL_STANDARD_OFFSET, RDSCTRL_STANDARD_MASK);
+
+	FMDBG("RDSCTRLL %x\n", radio->registers[RDSCTRL]);
+	retval = tavarua_write_register(radio, RDSCTRL,
+					radio->registers[RDSCTRL]);
+	if (retval < 0)
+		return retval;
+
+	FMDBG("RDCTRL: %x\n", radio->registers[RDCTRL]);
+	retval = tavarua_write_register(radio, RDCTRL,
+					radio->registers[RDCTRL]);
+	if (retval < 0) {
+		FMDERR("Could not set region in rdctrl\n");
+		return retval;
+	}
+
+	/* setting soft band */
+	switch (region) {
+	case TAVARUA_REGION_US:
+	case TAVARUA_REGION_EU:
+		radio->region_params.band_low = 87.5 * FREQ_MUL;
+		radio->region_params.band_high = 108 * FREQ_MUL;
+		break;
+	case TAVARUA_REGION_JAPAN:
+		radio->region_params.band_low = 76 * FREQ_MUL;
+		radio->region_params.band_high = 90 * FREQ_MUL;
+		break;
+	case TAVARUA_REGION_JAPAN_WIDE:
+		radio->region_params.band_low = 90 * FREQ_MUL;
+		radio->region_params.band_high = 108 * FREQ_MUL;
+		break;
+	default:
+		break;
+	}
+	radio->region_params.region = region;
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_get_freq
+=============================================================================*/
+/**
+  This interface gets the current frequency.
+
+  @param radio: structure pointer passed by client.
+  @param freq: struct v4l2_frequency. This will be set to the resultant
+  frequency in units of 62.5 kHz on success.
+
+  NOTE:
+  To get the current tuner or modulator radio frequency applications set the
+  tuner field of a struct v4l2_frequency to the respective tuner or modulator
+  number (only input devices have tuners, only output devices have modulators),
+  zero out the reserved array and call the VIDIOC_G_FREQUENCY ioctl with a
+  pointer to this structure. The driver stores the current frequency in the
+  frequency field.
+
+  Tuning frequency is in units of 62.5 kHz, or if the struct v4l2_tuner or
+  struct v4l2_modulator capabilities flag V4L2_TUNER_CAP_LOW is set, in
+  units of 62.5 Hz.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_get_freq(struct tavarua_device *radio,
+				struct v4l2_frequency *freq)
+{
+	int retval;
+	unsigned short chan;
+	unsigned int band_bottom;
+	unsigned int spacing;
+	band_bottom = radio->region_params.band_low;
+	spacing  = 0.100 * FREQ_MUL;
+	/* read channel */
+	retval = tavarua_read_registers(radio, FREQ, 2);
+	chan = radio->registers[FREQ];
+
+	/* Frequency (MHz) = 100 (kHz) x Channel + Bottom of Band (MHz) */
+	freq->frequency = spacing * chan + band_bottom;
+	if (radio->registers[TUNECTRL] & ADD_OFFSET)
+		freq->frequency += 800;
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_set_freq
+=============================================================================*/
+/**
+  This interface sets the current frequency.
+
+  @param radio: structure pointer passed by client.
+  @param freq: desired frequency sent by the client in 62.5 kHz units.
+
+  NOTE:
+  To change the current tuner or modulator radio frequency, applications
+  initialize the tuner, type and frequency fields, and the reserved array of a
+  struct v4l2_frequency and call the VIDIOC_S_FREQUENCY ioctl with a pointer to
+  this structure. When the requested frequency is not possible the driver
+  assumes the closest possible value. However VIDIOC_S_FREQUENCY is a
+  write-only ioctl, it does not return the actual new frequency.
+
+  Tuning frequency is in units of 62.5 kHz, or if the struct v4l2_tuner
+  or struct v4l2_modulator capabilities flag V4L2_TUNER_CAP_LOW is set,
+  in units of 62.5 Hz.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_set_freq(struct tavarua_device *radio, unsigned int freq)
+{
+
+	unsigned int band_bottom;
+	unsigned char chan;
+	unsigned char cmd[] = {0x00, 0x00};
+	unsigned int spacing;
+	int retval;
+	band_bottom = radio->region_params.band_low;
+	spacing  = 0.100 * FREQ_MUL;
+	if ((freq % 1600) == 800) {
+		cmd[1] = ADD_OFFSET;
+		freq -= 800;
+	}
+	/* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / 100 (kHz) */
+	chan = (freq - band_bottom) / spacing;
+
+	cmd[0] = chan;
+	cmd[1] |= TUNE_STATION;
+	radio->tune_req = 1;
+	retval = tavarua_write_registers(radio, FREQ, cmd, 2);
+	if (retval < 0)
+		radio->tune_req = 0;
+	return retval;
+
+}
+
+/**************************************************************************
+ * File Operations Interface
+ *************************************************************************/
+
+/*=============================================================================
+FUNCTION:  tavarua_fops_read
+=============================================================================*/
+/**
+  This function is called when a process, which already opened the dev file,
+  attempts to read from it.
+
+  In case of tavarua driver, it is called to read RDS data.
+
+  @param file: file descriptor.
+	@param buf: The buffer to fill with data.
+	@param count: The length of the buffer in bytes.
+	@param ppos: Our offset in the file.
+
+  @return The number of bytes put into the buffer on sucess.
+	-EFAULT if there is no access to user buffer
+*/
+static ssize_t tavarua_fops_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	struct kfifo *rds_buf = &radio->data_buf[TAVARUA_BUF_RAW_RDS];
+
+	/* block if no new data available */
+	while (!kfifo_len(rds_buf)) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EWOULDBLOCK;
+		if (wait_event_interruptible(radio->read_queue,
+			kfifo_len(rds_buf)) < 0)
+			return -EINTR;
+	}
+
+	/* calculate block count from byte count */
+	count /= BYTES_PER_BLOCK;
+
+
+	/* check if we can write to the user buffer */
+	if (!access_ok(VERIFY_WRITE, buf, count*BYTES_PER_BLOCK))
+		return -EFAULT;
+
+	/* copy RDS block out of internal buffer and to user buffer */
+	return kfifo_out_locked(rds_buf, buf, count*BYTES_PER_BLOCK,
+				&radio->buf_lock[TAVARUA_BUF_RAW_RDS]);
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_fops_write
+=============================================================================*/
+/**
+  This function is called when a process, which already opened the dev file,
+  attempts to write to it.
+
+  In case of tavarua driver, it is called to write RDS data to host.
+
+  @param file: file descriptor.
+	@param buf: The buffer which has data to write.
+	@param count: The length of the buffer.
+	@param ppos: Our offset in the file.
+
+  @return The number of bytes written from the buffer.
+*/
+static ssize_t tavarua_fops_write(struct file *file, const char __user *data,
+			size_t count, loff_t *ppos)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = 0;
+	int bytes_to_copy;
+	int bytes_copied = 0;
+	int bytes_left;
+	int chunk_index = 0;
+	unsigned char tx_data[XFR_REG_NUM];
+	/* Disable TX of this type first */
+	switch (radio->tx_mode) {
+	case TAVARUA_TX_RT:
+		bytes_left = min((int)count, MAX_RT_LENGTH);
+		tx_data[1] = 0;
+		break;
+	case TAVARUA_TX_PS:
+		bytes_left = min((int)count, MAX_PS_LENGTH);
+		tx_data[4] = 0;
+		break;
+	default:
+		FMDERR("%s: Unknown TX mode\n", __func__);
+		return -1;
+	}
+	retval = sync_write_xfr(radio, radio->tx_mode, tx_data);
+	if (retval < 0)
+		return retval;
+
+	/* send payload to FM hardware */
+	while (bytes_left) {
+		chunk_index++;
+		bytes_to_copy = min(bytes_left, XFR_REG_NUM);
+		if (copy_from_user(tx_data, data + bytes_copied, bytes_to_copy))
+			return -EFAULT;
+		retval = sync_write_xfr(radio, radio->tx_mode +
+						chunk_index, tx_data);
+		if (retval < 0)
+			return retval;
+
+		bytes_copied += bytes_to_copy;
+		bytes_left -= bytes_to_copy;
+	}
+
+	/* send the header */
+	switch (radio->tx_mode) {
+	case TAVARUA_TX_RT:
+		FMDBG("Writing RT header\n");
+		tx_data[0] = bytes_copied;
+		tx_data[1] = TX_ON | 0x03; /* on | PTY */
+		tx_data[2] = 0x12; /* PI high */
+		tx_data[3] = 0x34; /* PI low */
+		break;
+	case TAVARUA_TX_PS:
+		FMDBG("Writing PS header\n");
+		tx_data[0] = chunk_index;
+		tx_data[1] = 0x03; /* PTY */
+		tx_data[2] = 0x12; /* PI high */
+		tx_data[3] = 0x34; /* PI low */
+		tx_data[4] = TX_ON | 0x01;
+		break;
+	default:
+		FMDERR("%s: Unknown TX mode\n", __func__);
+		return -1;
+	}
+	retval = sync_write_xfr(radio, radio->tx_mode, tx_data);
+	if (retval < 0)
+		return retval;
+	FMDBG("done writing: %d\n", retval);
+	return bytes_copied;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_fops_open
+=============================================================================*/
+/**
+  This function is called when a process tries to open the device file, like
+	"cat /dev/mycharfile"
+
+  @param file: file descriptor.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_fops_open(struct file *file)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = -ENODEV;
+	unsigned char value;
+	/* FM core bring up */
+	int i = 0;
+	char fm_ctl0_part1[] = { 0xCA, 0xCE, 0xD6 };
+	char fm_ctl1[] = { 0x03 };
+	char fm_ctl0_part2[] = { 0xB6, 0xB7 };
+	char buffer[] = {0x00, 0x48, 0x8A, 0x8E, 0x97, 0xB7};
+	int bahama_present = -ENODEV;
+
+	mutex_lock(&radio->lock);
+	if (radio->users) {
+		mutex_unlock(&radio->lock);
+		return -EBUSY;
+	} else {
+		radio->users++;
+	}
+	mutex_unlock(&radio->lock);
+
+	/* initial gpio pin config & Power up */
+	retval = radio->pdata->fm_setup(radio->pdata);
+	if (retval) {
+		printk(KERN_ERR "%s: failed config gpio & pmic\n", __func__);
+		goto open_err_setup;
+	}
+	if (radio->pdata->config_i2s_gpio != NULL) {
+		retval = radio->pdata->config_i2s_gpio(FM_I2S_ON);
+		if (retval) {
+			printk(KERN_ERR "%s: failed config gpio\n", __func__);
+			goto config_i2s_err;
+		}
+	}
+	/* enable irq */
+	retval = tavarua_request_irq(radio);
+	if (retval < 0) {
+		printk(KERN_ERR "%s: failed to request irq\n", __func__);
+		goto open_err_req_irq;
+	}
+	/* call top level marimba interface here to enable FM core */
+	FMDBG("initializing SoC\n");
+
+	bahama_present = is_bahama();
+
+	if (bahama_present == -ENODEV)
+		return -ENODEV;
+
+	if (bahama_present)
+		radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+	else
+		radio->marimba->mod_id = MARIMBA_SLAVE_ID_MARIMBA;
+
+	value = FM_ENABLE;
+	retval = marimba_write_bit_mask(radio->marimba,
+			MARIMBA_XO_BUFF_CNTRL, &value, 1, value);
+	if (retval < 0) {
+		printk(KERN_ERR "%s:XO_BUFF_CNTRL write failed\n",
+					__func__);
+		goto open_err_all;
+	}
+
+
+	/* Bring up FM core */
+	if (bahama_present)	{
+
+		radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+		/* Read the Bahama version*/
+		retval = marimba_read_bit_mask(radio->marimba,
+				0x00,  &bahama_version, 1, 0x1F);
+		if (retval < 0) {
+			printk(KERN_ERR "%s: version read failed",
+				__func__);
+			goto open_err_all;
+		}
+		/* Check for Bahama V2 variant*/
+		if (bahama_version == 0x09)	{
+
+			/* In case of Bahama v2, forcefully enable the
+			 * internal analog and digital voltage controllers
+			 */
+			value = 0x06;
+			/* value itself used as mask in these writes*/
+			retval = marimba_write_bit_mask(radio->marimba,
+			BAHAMA_LDO_DREG_CTL0, &value, 1, value);
+			if (retval < 0) {
+				printk(KERN_ERR "%s:0xF0 write failed\n",
+					__func__);
+				goto open_err_all;
+			}
+			value = 0x86;
+			retval = marimba_write_bit_mask(radio->marimba,
+				BAHAMA_LDO_AREG_CTL0, &value, 1, value);
+			if (retval < 0) {
+				printk(KERN_ERR "%s:0xF4 write failed\n",
+					__func__);
+				goto open_err_all;
+			}
+		}
+
+		/*write FM mode*/
+		retval = tavarua_write_register(radio, BAHAMA_FM_MODE_REG,
+					BAHAMA_FM_MODE_NORMAL);
+		if (retval < 0) {
+			printk(KERN_ERR "failed to set the FM mode: %d\n",
+					retval);
+			goto open_err_all;
+		}
+		/*Write first sequence of bytes to FM_CTL0*/
+		for (i = 0; i < 3; i++)  {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL0_REG, fm_ctl0_part1[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL0:set-1 failure: %d\n",
+							retval);
+				goto open_err_all;
+			}
+		}
+		/*Write the FM_CTL1 sequence*/
+		for (i = 0; i < 1; i++)  {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL1_REG, fm_ctl1[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL1 write failure: %d\n",
+							retval);
+				goto open_err_all;
+			}
+		}
+		/*Write second sequence of bytes to FM_CTL0*/
+		for (i = 0; i < 2; i++)  {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL0_REG, fm_ctl0_part2[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL0:set-2 failure: %d\n",
+					retval);
+			goto open_err_all;
+			}
+		}
+	} else {
+		retval = tavarua_write_registers(radio, LEAKAGE_CNTRL,
+						buffer, 6);
+		if (retval < 0) {
+			printk(KERN_ERR "%s: failed to bring up FM Core\n",
+						__func__);
+			goto open_err_all;
+		}
+	}
+	/* Wait for interrupt i.e. complete(&radio->sync_req_done); call */
+	/*Initialize the completion variable for
+	for the proper behavior*/
+	init_completion(&radio->sync_req_done);
+	if (!wait_for_completion_timeout(&radio->sync_req_done,
+		msecs_to_jiffies(wait_timeout))) {
+		retval = -1;
+		FMDERR("Timeout waiting for initialization\n");
+	}
+
+	/* get Chip ID */
+	retval = tavarua_write_register(radio, XFRCTRL, CHIPID);
+	if (retval < 0)
+		goto open_err_all;
+	msleep(TAVARUA_DELAY);
+	tavarua_read_registers(radio, XFRCTRL, XFR_REG_NUM+1);
+	if (radio->registers[XFRCTRL] != CHIPID)
+		goto open_err_all;
+
+	radio->chipID = (radio->registers[XFRCTRL+2] << 24) |
+			(radio->registers[XFRCTRL+5] << 16) |
+			(radio->registers[XFRCTRL+6] << 8)  |
+			(radio->registers[XFRCTRL+7]);
+
+	printk(KERN_WARNING DRIVER_NAME ": Chip ID %x\n", radio->chipID);
+	if (radio->chipID == MARIMBA_A0) {
+		printk(KERN_WARNING DRIVER_NAME ": Unsupported hardware: %x\n",
+						radio->chipID);
+		retval = -1;
+		goto open_err_all;
+	}
+
+	radio->handle_irq = 0;
+	radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+	marimba_set_fm_status(radio->marimba, true);
+	return 0;
+
+
+open_err_all:
+    /*Disable FM in case of error*/
+	value = 0x00;
+	marimba_write_bit_mask(radio->marimba, MARIMBA_XO_BUFF_CNTRL,
+							&value, 1, value);
+	tavarua_disable_irq(radio);
+open_err_req_irq:
+	if (radio->pdata->config_i2s_gpio != NULL)
+		radio->pdata->config_i2s_gpio(FM_I2S_OFF);
+config_i2s_err:
+	radio->pdata->fm_shutdown(radio->pdata);
+open_err_setup:
+	radio->handle_irq = 1;
+	radio->users = 0;
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_fops_release
+=============================================================================*/
+/**
+  This function is called when a process closes the device file.
+
+  @param file: file descriptor.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_fops_release(struct file *file)
+{
+	int retval;
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	unsigned char value;
+	int i = 0;
+	/*FM Core shutdown sequence for Bahama*/
+	char fm_ctl0_part1[] = { 0xB7 };
+	char fm_ctl1[] = { 0x03 };
+	char fm_ctl0_part2[] = { 0x9F, 0x48, 0x02 };
+	int bahama_present = -ENODEV;
+	/*FM Core shutdown sequence for Marimba*/
+	char buffer[] = {0x18, 0xB7, 0x48};
+	bool bt_status = false;
+	int index;
+	/* internal regulator controllers DREG_CTL0, AREG_CTL0
+	 * has to be kept in the valid state based on the bt status.
+	 * 1st row is the state when no clients are active,
+	 * and the second when bt is in on state.
+	 */
+	char internal_vreg_ctl[2][2] = {
+		{ 0x04, 0x84 },
+		{ 0x00, 0x80 }
+	};
+
+	if (!radio)
+		return -ENODEV;
+	FMDBG("In %s", __func__);
+
+	/* disable radio ctrl */
+	retval = tavarua_write_register(radio, RDCTRL, 0x00);
+
+	FMDBG("%s, Disable IRQs\n", __func__);
+	/* disable irq */
+	retval = tavarua_disable_irq(radio);
+	if (retval < 0) {
+		printk(KERN_ERR "%s: failed to disable irq\n", __func__);
+		return retval;
+	}
+
+	bahama_present = is_bahama();
+
+	if (bahama_present == -ENODEV)
+		return -ENODEV;
+
+	if (bahama_present)	{
+		/*Write first sequence of bytes to FM_CTL0*/
+		for (i = 0; i < 1; i++) {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL0_REG, fm_ctl0_part1[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL0:Set-1 failure: %d\n",
+						retval);
+				break;
+			}
+		}
+		/*Write the FM_CTL1 sequence*/
+		for (i = 0; i < 1; i++)  {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL1_REG, fm_ctl1[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL1 failure: %d\n",
+						retval);
+				break;
+			}
+		}
+		/*Write second sequence of bytes to FM_CTL0*/
+		for (i = 0; i < 3; i++)   {
+			retval = tavarua_write_register(radio,
+					BAHAMA_FM_CTL0_REG, fm_ctl0_part2[i]);
+			if (retval < 0) {
+				printk(KERN_ERR "FM_CTL0:Set-2 failure: %d\n",
+						retval);
+			break;
+			}
+		}
+	}	else	{
+
+		retval = tavarua_write_registers(radio, FM_CTL0,
+				buffer, sizeof(buffer)/sizeof(buffer[0]));
+		if (retval < 0) {
+			printk(KERN_ERR "%s: failed to bring down the  FM Core\n",
+							__func__);
+			return retval;
+		}
+	}
+	radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+	bt_status = marimba_get_bt_status(radio->marimba);
+	/* Set the index based on the bt status*/
+	index = bt_status ?  1 : 0;
+	/* Check for Bahama's existance and Bahama V2 variant*/
+	if (bahama_present && (bahama_version == 0x09))   {
+		radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+		/* actual value itself used as mask*/
+		retval = marimba_write_bit_mask(radio->marimba,
+			BAHAMA_LDO_DREG_CTL0, &internal_vreg_ctl[bt_status][0],
+			 1, internal_vreg_ctl[index][0]);
+		if (retval < 0) {
+			printk(KERN_ERR "%s:0xF0 write failed\n", __func__);
+			return retval;
+		}
+		/* actual value itself used as mask*/
+		retval = marimba_write_bit_mask(radio->marimba,
+			BAHAMA_LDO_AREG_CTL0, &internal_vreg_ctl[bt_status][1],
+			1, internal_vreg_ctl[index][1]);
+		if (retval < 0) {
+			printk(KERN_ERR "%s:0xF4 write failed\n", __func__);
+			return retval;
+		}
+	} else    {
+		/* disable fm core */
+		radio->marimba->mod_id = MARIMBA_SLAVE_ID_MARIMBA;
+	}
+
+	value = 0x00;
+	retval = marimba_write_bit_mask(radio->marimba, MARIMBA_XO_BUFF_CNTRL,
+							&value, 1, FM_ENABLE);
+	if (retval < 0) {
+		printk(KERN_ERR "%s:XO_BUFF_CNTRL write failed\n", __func__);
+		return retval;
+	}
+	FMDBG("%s, Calling fm_shutdown\n", __func__);
+	/* teardown gpio and pmic */
+	radio->pdata->fm_shutdown(radio->pdata);
+	if (radio->pdata->config_i2s_gpio != NULL)
+		radio->pdata->config_i2s_gpio(FM_I2S_OFF);
+	radio->handle_irq = 1;
+	radio->users = 0;
+	radio->marimba->mod_id = SLAVE_ID_BAHAMA;
+	marimba_set_fm_status(radio->marimba, false);
+	return 0;
+}
+
+/*
+ * tavarua_fops - file operations interface
+ */
+static const struct v4l2_file_operations tavarua_fops = {
+	.owner = THIS_MODULE,
+	.read = tavarua_fops_read,
+	.write = tavarua_fops_write,
+	.ioctl = video_ioctl2,
+	.open  = tavarua_fops_open,
+	.release = tavarua_fops_release,
+};
+
+/*************************************************************************
+ * Video4Linux Interface
+ *************************************************************************/
+
+/*
+ * tavarua_v4l2_queryctrl - query control
+ */
+static struct v4l2_queryctrl tavarua_v4l2_queryctrl[] = {
+	{
+		.id	       = V4L2_CID_AUDIO_VOLUME,
+		.type	       = V4L2_CTRL_TYPE_INTEGER,
+		.name	       = "Volume",
+		.minimum       = 0,
+		.maximum       = 15,
+		.step	       = 1,
+		.default_value = 15,
+	},
+	{
+		.id	       = V4L2_CID_AUDIO_BALANCE,
+		.flags	       = V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id	       = V4L2_CID_AUDIO_BASS,
+		.flags	       = V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id	       = V4L2_CID_AUDIO_TREBLE,
+		.flags	       = V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id	       = V4L2_CID_AUDIO_MUTE,
+		.type	       = V4L2_CTRL_TYPE_BOOLEAN,
+		.name	       = "Mute",
+		.minimum       = 0,
+		.maximum       = 1,
+		.step	       = 1,
+		.default_value = 1,
+	},
+	{
+		.id	       = V4L2_CID_AUDIO_LOUDNESS,
+		.flags	       = V4L2_CTRL_FLAG_DISABLED,
+	},
+	{
+		.id	       = V4L2_CID_PRIVATE_TAVARUA_SRCHMODE,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name	       = "Search mode",
+		.minimum       = 0,
+		.maximum       = 7,
+		.step	       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SCANDWELL,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Search dwell time",
+		.minimum       = 0,
+		.maximum       = 7,
+		.step          = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SRCHON,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "Search on/off",
+		.minimum       = 0,
+		.maximum       = 1,
+		.step          = 1,
+		.default_value = 1,
+
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_STATE,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "radio 0ff/rx/tx/reset",
+		.minimum       = 0,
+		.maximum       = 3,
+		.step          = 1,
+		.default_value = 1,
+
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_REGION,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "radio standard",
+		.minimum       = 0,
+		.maximum       = 2,
+		.step          = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Signal Threshold",
+		.minimum       = 0x80,
+		.maximum       = 0x7F,
+		.step          = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SRCH_PTY,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Search PTY",
+		.minimum       = 0,
+		.maximum       = 31,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SRCH_PI,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Search PI",
+		.minimum       = 0,
+		.maximum       = 0xFF,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SRCH_CNT,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Preset num",
+		.minimum       = 0,
+		.maximum       = 12,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_EMPHASIS,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "Emphasis",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_RDS_STD,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "RDS standard",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_SPACING,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Channel spacing",
+		.minimum       = 0,
+		.maximum       = 2,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_RDSON,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "RDS on/off",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_MASK,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "RDS group mask",
+		.minimum       = 0,
+		.maximum       = 0xFFFFFFFF,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_PROC,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "RDS processing",
+		.minimum       = 0,
+		.maximum       = 0xFF,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_RDSD_BUF,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "RDS data groups to buffer",
+		.minimum       = 1,
+		.maximum       = 21,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_PSALL,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "pass all ps strings",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_LP_MODE,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "Low power mode",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_ANTENNA,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "headset/internal",
+		.minimum       = 0,
+		.maximum       = 1,
+		.default_value = 0,
+	},
+	/* Private controls for FM TX*/
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_TX_SETPSREPEATCOUNT,
+		.type          = V4L2_CTRL_TYPE_INTEGER,
+		.name          = "Set PS REPEATCOUNT",
+		.minimum       = 0,
+		.maximum       = 15,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_STOP_RDS_TX_PS_NAME,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "Stop PS NAME",
+		.minimum       = 0,
+		.maximum       = 1,
+	},
+	{
+		.id            = V4L2_CID_PRIVATE_TAVARUA_STOP_RDS_TX_RT,
+		.type          = V4L2_CTRL_TYPE_BOOLEAN,
+		.name          = "Stop RT",
+		.minimum       = 0,
+		.maximum       = 1,
+	},
+
+};
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_querycap
+=============================================================================*/
+/**
+  This function is called to query device capabilities.
+
+  NOTE:
+  All V4L2 devices support the VIDIOC_QUERYCAP ioctl. It is used to identify
+  kernel devices compatible with this specification and to obtain information
+  about driver and hardware capabilities. The ioctl takes a pointer to a struct
+  v4l2_capability which is filled by the driver. When the driver is not
+  compatible with this specification the ioctl returns an EINVAL error code.
+
+  @param file: File descriptor returned by open().
+  @param capability: pointer to struct v4l2_capability.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The device is not compatible with this specification.
+*/
+static int tavarua_vidioc_querycap(struct file *file, void *priv,
+		struct v4l2_capability *capability)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+
+	strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
+	strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
+	sprintf(capability->bus_info, "I2C");
+	capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+
+	capability->version = radio->chipID;
+
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_queryctrl
+=============================================================================*/
+/**
+  This function is called to query the device and driver for supported video
+  controls (enumerate control items).
+
+  NOTE:
+  To query the attributes of a control, the applications set the id field of
+  a struct v4l2_queryctrl and call the VIDIOC_QUERYCTRL ioctl with a pointer
+  to this structure. The driver fills the rest of the structure or returns an
+  EINVAL error code when the id is invalid.
+
+  @param file: File descriptor returned by open().
+  @param qc: pointer to struct v4l2_queryctrl.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The struct v4l2_queryctrl id is invalid.
+*/
+static int tavarua_vidioc_queryctrl(struct file *file, void *priv,
+		struct v4l2_queryctrl *qc)
+{
+	unsigned char i;
+	int retval = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(tavarua_v4l2_queryctrl); i++) {
+		if (qc->id && qc->id == tavarua_v4l2_queryctrl[i].id) {
+			memcpy(qc, &(tavarua_v4l2_queryctrl[i]), sizeof(*qc));
+			retval = 0;
+			break;
+		}
+	}
+	if (retval < 0)
+		printk(KERN_WARNING DRIVER_NAME
+			": query conv4ltrol failed with %d\n", retval);
+
+	return retval;
+}
+static int peek_MPX_DCC(struct tavarua_device *radio)
+{
+	int retval = 0;
+	unsigned char xfr_buf[XFR_REG_NUM];
+	int MPX_DCC[] = { 0 };
+	int DCC = 0;
+	int ct = 0;
+	unsigned char size = 0;
+
+	/*
+	Poking the MPX_DCC_BYPASS register to freeze the
+	value of MPX_DCC from changing while we access it
+	*/
+
+	/*Poking the MPX_DCC_BYPASS register : 0x88C0 */
+	size = 0x01;
+	xfr_buf[0] = (XFR_POKE_MODE | (size << 1));
+	xfr_buf[1] = MPX_DCC_BYPASS_POKE_MSB;
+	xfr_buf[2] = MPX_DCC_BYPASS_POKE_LSB;
+	xfr_buf[3] = 0x01;
+
+	retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, 4);
+	if (retval < 0) {
+		FMDBG("Failed to write\n");
+		return retval;
+	}
+	/*Wait for the XFR interrupt */
+	msleep(TAVARUA_DELAY*15);
+
+	for (ct = 0; ct < 5; ct++)
+		xfr_buf[ct] = 0;
+
+	/* Peeking Regs 0x88C2-0x88C4 */
+	size = 0x03;
+	xfr_buf[0] = (XFR_PEEK_MODE | (size << 1));
+	xfr_buf[1] = MPX_DCC_PEEK_MSB_REG1;
+	xfr_buf[2] = MPX_DCC_PEEK_LSB_REG1;
+	retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, 3);
+	if (retval < 0) {
+		FMDBG("Failed to write\n");
+		return retval;
+	}
+	/*Wait for the XFR interrupt */
+	msleep(TAVARUA_DELAY*10);
+	retval = tavarua_read_registers(radio, XFRDAT0, 3);
+	if (retval < 0) {
+		printk(KERN_INFO "INT_DET: Read failure\n");
+		return retval;
+	}
+	MPX_DCC[0] = (int)radio->registers[XFRDAT0];
+	MPX_DCC[1] = (int)radio->registers[XFRDAT1];
+	MPX_DCC[2] = (int)radio->registers[XFRDAT2];
+
+	/*
+	Form the final MPX_DCC parameter
+	MPX_DCC[0] will form the LSB part
+	MPX_DCC[1] will be the middle part and 4 bits of
+	MPX_DCC[2] will be the MSB par of the 20-bit signed MPX_DCC
+	*/
+
+	DCC = ((int)MPX_DCC[2] << 16) | ((int)MPX_DCC[1] << 8) |
+		((int)MPX_DCC[0]);
+
+	/*
+	if bit-19 is '1',set remaining bits to '1' &  make it -tive
+	*/
+	if (DCC & 0x00080000) {
+		FMDBG(KERN_INFO "bit-19 is '1'\n");
+		DCC |= 0xFFF00000;
+	}
+
+	/*
+	Poking the MPX_DCC_BYPASS register to be back to normal
+	*/
+
+	/*Poking the MPX_DCC_BYPASS register : 0x88C0 */
+	size = 0x01;
+	xfr_buf[0] = (XFR_POKE_MODE | (size << 1));
+	xfr_buf[1] = MPX_DCC_BYPASS_POKE_MSB;
+	xfr_buf[2] = MPX_DCC_BYPASS_POKE_LSB;
+	xfr_buf[3] = 0x00;
+
+	retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, 4);
+	if (retval < 0) {
+		FMDBG("Failed to write\n");
+		return retval;
+	}
+	/*Wait for the XFR interrupt */
+	msleep(TAVARUA_DELAY*10);
+
+	return DCC;
+}
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_g_ctrl
+=============================================================================*/
+/**
+  This function is called to get the value of a control.
+
+  NOTE:
+  To get the current value of a control, applications initialize the id field
+  of a struct v4l2_control and call the VIDIOC_G_CTRL ioctl with a pointer to
+  this structure.
+
+  When the id is invalid drivers return an EINVAL error code. When the value is
+  out of bounds drivers can choose to take the closest valid value or return an
+  ERANGE error code, whatever seems more appropriate.
+
+  @param file: File descriptor returned by open().
+  @param ctrl: pointer to struct v4l2_control.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The struct v4l2_control id is invalid.
+  @return ERANGE: The struct v4l2_control value is out of bounds.
+  @return EBUSY: The control is temporarily not changeable, possibly because
+  another applications took over control of the device function this control
+  belongs to.
+*/
+static int tavarua_vidioc_g_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = 0;
+	unsigned char xfr_buf[XFR_REG_NUM];
+	signed char cRmssiThreshold;
+	signed char ioc;
+	unsigned char size = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_VOLUME:
+		break;
+	case V4L2_CID_AUDIO_MUTE:
+		ctrl->value = radio->registers[IOCTRL] & 0x03 ;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCHMODE:
+		ctrl->value = radio->registers[SRCHCTRL] & SRCH_MODE;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SCANDWELL:
+		ctrl->value = (radio->registers[SRCHCTRL] & SCAN_DWELL) >> 4;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCHON:
+		ctrl->value = (radio->registers[SRCHCTRL] & SRCH_ON) >> 7 ;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_STATE:
+		ctrl->value = (radio->registers[RDCTRL] & 0x03);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_IOVERC:
+		retval = tavarua_read_registers(radio, IOVERC, 1);
+		if (retval < 0)
+			return retval;
+		ioc = radio->registers[IOVERC];
+		ctrl->value = ioc;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_INTDET:
+		size = 0x1;
+		xfr_buf[0] = (XFR_PEEK_MODE | (size << 1));
+		xfr_buf[1] = INTDET_PEEK_MSB;
+		xfr_buf[2] = INTDET_PEEK_LSB;
+		retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, 3);
+		if (retval < 0) {
+			FMDBG("Failed to write\n");
+			return retval;
+		}
+		FMDBG("INT_DET:Sync write success\n");
+		/*Wait for the XFR interrupt */
+		msleep(TAVARUA_DELAY*10);
+		/* Read the XFRDAT0 register populated by FM SoC */
+		retval = tavarua_read_registers(radio, XFRDAT0, 3);
+		if (retval < 0) {
+			FMDBG("INT_DET: Read failure\n");
+			return retval;
+		}
+		ctrl->value = radio->registers[XFRDAT0];
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_MPX_DCC:
+		ctrl->value = peek_MPX_DCC(radio);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_REGION:
+		ctrl->value = radio->region_params.region;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH:
+		retval = sync_read_xfr(radio, RX_CONFIG, xfr_buf);
+		if (retval < 0) {
+			FMDBG("[G IOCTL=V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH]\n");
+			FMDBG("sync_read_xfr error: [retval=%d]\n", retval);
+			break;
+		}
+		/* Since RMSSI Threshold is signed value */
+		cRmssiThreshold = (signed char)xfr_buf[0];
+		ctrl->value  = cRmssiThreshold;
+		FMDBG("cRmssiThreshold: %d\n", cRmssiThreshold);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_PTY:
+		ctrl->value = radio->srch_params.srch_pty;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_PI:
+		ctrl->value = radio->srch_params.srch_pi;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_CNT:
+		ctrl->value = radio->srch_params.preset_num;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_EMPHASIS:
+		ctrl->value = radio->region_params.emphasis;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDS_STD:
+		ctrl->value = radio->region_params.rds_std;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SPACING:
+		ctrl->value = radio->region_params.spacing;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSON:
+		ctrl->value = radio->registers[RDSCTRL] & RDS_ON;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_MASK:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		if (retval > -1)
+			ctrl->value =   (xfr_buf[8] << 24) |
+					(xfr_buf[9] << 16) |
+					(xfr_buf[10] << 8) |
+					 xfr_buf[11];
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_PROC:
+		retval = tavarua_read_registers(radio, ADVCTRL, 1);
+		if (retval > -1)
+			ctrl->value = radio->registers[ADVCTRL];
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSD_BUF:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		if (retval > -1)
+			ctrl->value = xfr_buf[1];
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_PSALL:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		if (retval > -1)
+			ctrl->value = xfr_buf[12] & RDS_CONFIG_PSALL;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_LP_MODE:
+		ctrl->value = radio->lp_mode;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_ANTENNA:
+		ctrl->value = GET_REG_FIELD(radio->registers[IOCTRL],
+			IOC_ANTENNA_OFFSET, IOC_ANTENNA_MASK);
+		break;
+	default:
+		retval = -EINVAL;
+	}
+	if (retval < 0)
+		printk(KERN_WARNING DRIVER_NAME
+		": get control failed with %d, id: %d\n", retval, ctrl->id);
+
+	return retval;
+}
+
+static int tavarua_vidioc_s_ext_ctrls(struct file *file, void *priv,
+			struct v4l2_ext_controls *ctrl)
+{
+	int retval = 0;
+	int bytes_to_copy;
+	int bytes_copied = 0;
+	int bytes_left = 0;
+	int chunk_index = 0;
+	char tx_data[XFR_REG_NUM];
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	char *data = NULL;
+	int extra_name_byte = 0;
+	int name_bytes = 0;
+
+	switch ((ctrl->controls[0]).id)	{
+	case V4L2_CID_RDS_TX_PS_NAME: {
+		FMDBG("In V4L2_CID_RDS_TX_PS_NAME\n");
+		/*Pass a sample PS string */
+
+		chunk_index = 0;
+		bytes_copied = 0;
+		bytes_left = min((int)(ctrl->controls[0]).size,
+			MAX_PS_LENGTH);
+		data = (ctrl->controls[0]).string;
+
+		/* send payload to FM hardware */
+		while (bytes_left) {
+			chunk_index++;
+			FMDBG("chunk is %d", chunk_index);
+			bytes_to_copy = min(bytes_left, XFR_REG_NUM);
+			/*Clear the tx_data */
+			memset(tx_data, 0, XFR_REG_NUM);
+			if (copy_from_user(tx_data,
+				data + bytes_copied, bytes_to_copy))
+				return -EFAULT;
+			retval = sync_write_xfr(radio,
+				RDS_PS_0 + chunk_index, tx_data);
+			if (retval < 0)	{
+				FMDBG("sync_write_xfr:  %d", retval);
+				return retval;
+			}
+			bytes_copied += bytes_to_copy;
+			bytes_left -= bytes_to_copy;
+		}
+		memset(tx_data, 0, XFR_REG_NUM);
+		/*Write the PS Header*/
+		FMDBG("Writing PS header\n");
+		extra_name_byte = (bytes_copied%8) ? 1 : 0;
+		name_bytes = (bytes_copied/8) + extra_name_byte;
+		/*8 bytes are grouped as 1 name */
+		tx_data[0] = (name_bytes) & MASK_TXREPCOUNT;
+		tx_data[1] = radio->pty & MASK_PTY; /* PTY */
+		tx_data[2] = ((radio->pi & MASK_PI_MSB) >> 8);
+		tx_data[3] = radio->pi & MASK_PI_LSB;
+		/* TX ctrl + repeatCount*/
+		tx_data[4] = TX_ON |
+		    (radio->ps_repeatcount & MASK_TXREPCOUNT);
+		retval = sync_write_xfr(radio, RDS_PS_0, tx_data);
+		if (retval < 0)	{
+			FMDBG("sync_write_xfr returned %d", retval);
+			return retval;
+		}
+	} break;
+	case V4L2_CID_RDS_TX_RADIO_TEXT: {
+		chunk_index = 0;
+		bytes_copied = 0;
+		FMDBG("In V4L2_CID_RDS_TX_RADIO_TEXT\n");
+		/*Pass a sample PS string */
+		FMDBG("Passed RT String : %s\n",
+			(ctrl->controls[0]).string);
+		bytes_left =
+		    min((int)(ctrl->controls[0]).size, MAX_RT_LENGTH);
+		data = (ctrl->controls[0]).string;
+		/* send payload to FM hardware */
+		while (bytes_left) {
+			chunk_index++;
+			bytes_to_copy = min(bytes_left, XFR_REG_NUM);
+			memset(tx_data, 0, XFR_REG_NUM);
+			if (copy_from_user(tx_data,
+				    data + bytes_copied, bytes_to_copy))
+				return -EFAULT;
+			retval = sync_write_xfr(radio,
+				RDS_RT_0 + chunk_index, tx_data);
+			if (retval < 0)
+				return retval;
+			bytes_copied += bytes_to_copy;
+			bytes_left -= bytes_to_copy;
+		}
+		/*Write the RT  Header */
+		tx_data[0] = bytes_copied;
+		/* PTY */
+		tx_data[1] = TX_ON | ((radio->pty & MASK_PTY) >> 8);
+		/* PI high */
+		tx_data[2] = ((radio->pi & MASK_PI_MSB) >> 8);
+		/* PI low */
+		tx_data[3] = radio->pi & MASK_PI_LSB;
+		retval = sync_write_xfr(radio, RDS_RT_0 , tx_data);
+		if (retval < 0)
+			return retval;
+		FMDBG("done RT writing: %d\n", retval);
+	} break;
+	default:
+	{
+		FMDBG("Shouldn't reach here\n");
+		retval = -1;
+	}
+	}
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_s_ctrl
+=============================================================================*/
+/**
+  This function is called to set the value of a control.
+
+  NOTE:
+  To change the value of a control, applications initialize the id and value
+  fields of a struct v4l2_control and call the VIDIOC_S_CTRL ioctl.
+
+  When the id is invalid drivers return an EINVAL error code. When the value is
+  out of bounds drivers can choose to take the closest valid value or return an
+  ERANGE error code, whatever seems more appropriate.
+
+  @param file: File descriptor returned by open().
+  @param ctrl: pointer to struct v4l2_control.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The struct v4l2_control id is invalid.
+  @return ERANGE: The struct v4l2_control value is out of bounds.
+  @return EBUSY: The control is temporarily not changeable, possibly because
+  another applications took over control of the device function this control
+  belongs to.
+*/
+static int tavarua_vidioc_s_ctrl(struct file *file, void *priv,
+		struct v4l2_control *ctrl)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = 0;
+	unsigned char value;
+	unsigned char xfr_buf[XFR_REG_NUM];
+	unsigned char tx_data[XFR_REG_NUM];
+
+	switch (ctrl->id) {
+	case V4L2_CID_AUDIO_VOLUME:
+		break;
+	case V4L2_CID_AUDIO_MUTE:
+		value = (radio->registers[IOCTRL] & ~IOC_HRD_MUTE) |
+							(ctrl->value & 0x03);
+		retval = tavarua_write_register(radio, IOCTRL, value);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCHMODE:
+		value = (radio->registers[SRCHCTRL] & ~SRCH_MODE) |
+							ctrl->value;
+		radio->registers[SRCHCTRL] = value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SCANDWELL:
+		value = (radio->registers[SRCHCTRL] & ~SCAN_DWELL) |
+						(ctrl->value << 4);
+		radio->registers[SRCHCTRL] = value;
+		break;
+	/* start/stop search */
+	case V4L2_CID_PRIVATE_TAVARUA_SRCHON:
+		FMDBG("starting search\n");
+		tavarua_search(radio, ctrl->value, SRCH_DIR_UP);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_STATE:
+		/* check if already on */
+		radio->handle_irq = 1;
+		if (((ctrl->value == FM_RECV) || (ctrl->value == FM_TRANS))
+				    && !(radio->registers[RDCTRL] &
+							ctrl->value)) {
+			FMDBG("clearing flags\n");
+			init_completion(&radio->sync_xfr_start);
+			init_completion(&radio->sync_req_done);
+			radio->xfr_in_progress = 0;
+			radio->xfr_bytes_left = 0;
+			FMDBG("turning on ..\n");
+			retval = tavarua_start(radio, ctrl->value);
+			if (retval >= 0) {
+				FMDBG("Setting audio path ...\n");
+				retval = tavarua_set_audio_path(
+					TAVARUA_AUDIO_OUT_DIGITAL_ON,
+					TAVARUA_AUDIO_OUT_ANALOG_OFF);
+				if (retval < 0) {
+					FMDERR("Error in tavarua_set_audio_path"
+						" %d\n", retval);
+				}
+			 /* Enabling 'SoftMute' and 'SignalBlending' features */
+			value = (radio->registers[IOCTRL] |
+				    IOC_SFT_MUTE | IOC_SIG_BLND);
+			retval = tavarua_write_register(radio, IOCTRL, value);
+			if (retval < 0)
+				FMDBG("SMute and SBlending not enabled\n");
+			}
+		}
+		/* check if off */
+		else if ((ctrl->value == FM_OFF) && radio->registers[RDCTRL]) {
+			FMDBG("turning off...\n");
+			retval = tavarua_write_register(radio, RDCTRL,
+							ctrl->value);
+			/*Make it synchronous
+			Block it till READY interrupt
+			Wait for interrupt i.e.
+			complete(&radio->sync_req_done)
+			*/
+
+			if (retval >= 0) {
+
+				if (!wait_for_completion_timeout(
+					&radio->sync_req_done,
+					msecs_to_jiffies(wait_timeout)))
+					FMDBG("turning off timedout...\n");
+			}
+		}
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_REGION:
+		retval = tavarua_set_region(radio, ctrl->value);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH:
+		retval = sync_read_xfr(radio, RX_CONFIG, xfr_buf);
+		if (retval < 0)	{
+			FMDERR("V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH]\n");
+			FMDERR("sync_read_xfr [retval=%d]\n", retval);
+			break;
+		}
+		/* RMSSI Threshold is a signed 8 bit value */
+		xfr_buf[0] = (unsigned char)ctrl->value;
+		xfr_buf[1] = (unsigned char)ctrl->value;
+		xfr_buf[4] = 0x01;
+		retval = sync_write_xfr(radio, RX_CONFIG, xfr_buf);
+		if (retval < 0) {
+			FMDERR("V4L2_CID_PRIVATE_TAVARUA_SIGNAL_TH]\n");
+			FMDERR("sync_write_xfr [retval=%d]\n", retval);
+			break;
+		}
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_PTY:
+		radio->srch_params.srch_pty = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_PI:
+		radio->srch_params.srch_pi = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SRCH_CNT:
+		radio->srch_params.preset_num = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_EMPHASIS:
+		radio->region_params.emphasis = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDS_STD:
+		radio->region_params.rds_std = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_SPACING:
+		radio->region_params.spacing = ctrl->value;
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSON:
+		retval = 0;
+		if (ctrl->value != (radio->registers[RDSCTRL] & RDS_ON)) {
+			value = radio->registers[RDSCTRL] | ctrl->value;
+			retval = tavarua_write_register(radio, RDSCTRL, value);
+		}
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_MASK:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		if (retval < 0)
+			break;
+		xfr_buf[8] = (ctrl->value & 0xFF000000) >> 24;
+		xfr_buf[9] = (ctrl->value & 0x00FF0000) >> 16;
+		xfr_buf[10] = (ctrl->value & 0x0000FF00) >> 8;
+		xfr_buf[11] = (ctrl->value & 0x000000FF);
+		retval = sync_write_xfr(radio, RDS_CONFIG, xfr_buf);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSGROUP_PROC:
+		value  = radio->registers[ADVCTRL] | ctrl->value  ;
+		retval = tavarua_write_register(radio, ADVCTRL, value);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_RDSD_BUF:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		if (retval < 0)
+			break;
+		xfr_buf[1] = ctrl->value;
+		retval = sync_write_xfr(radio, RDS_CONFIG, xfr_buf);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_PSALL:
+		retval = sync_read_xfr(radio, RDS_CONFIG, xfr_buf);
+		value = ctrl->value & RDS_CONFIG_PSALL;
+		if (retval < 0)
+			break;
+		xfr_buf[12] &= ~RDS_CONFIG_PSALL;
+		xfr_buf[12] |= value;
+		retval = sync_write_xfr(radio, RDS_CONFIG, xfr_buf);
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_LP_MODE:
+		retval = 0;
+		if (ctrl->value == radio->lp_mode)
+			break;
+		if (ctrl->value) {
+			FMDBG("going into low power mode\n");
+			retval = tavarua_disable_interrupts(radio);
+		} else {
+			FMDBG("going into normal power mode\n");
+			tavarua_setup_interrupts(radio,
+				(radio->registers[RDCTRL] & 0x03));
+		}
+		break;
+	case V4L2_CID_PRIVATE_TAVARUA_ANTENNA:
+		SET_REG_FIELD(radio->registers[IOCTRL], ctrl->value,
+					IOC_ANTENNA_OFFSET, IOC_ANTENNA_MASK);
+		break;
+	/* TX Controls */
+
+	case V4L2_CID_RDS_TX_PTY: {
+			radio->pty = ctrl->value;
+		} break;
+	case V4L2_CID_RDS_TX_PI: {
+			radio->pi = ctrl->value;
+		} break;
+	case V4L2_CID_PRIVATE_TAVARUA_STOP_RDS_TX_PS_NAME: {
+			FMDBG("In STOP_RDS_TX_PS_NAME\n");
+			/*Pass a sample PS string */
+			memset(tx_data, '0', XFR_REG_NUM);
+			FMDBG("Writing PS header\n");
+			retval = sync_write_xfr(radio, RDS_PS_0, tx_data);
+			FMDBG("retval of PS Header write: %d", retval);
+
+		} break;
+
+	case V4L2_CID_PRIVATE_TAVARUA_STOP_RDS_TX_RT: {
+			memset(tx_data, '0', XFR_REG_NUM);
+			FMDBG("Writing RT header\n");
+			retval = sync_write_xfr(radio, RDS_RT_0, tx_data);
+			FMDBG("retval of Header write: %d", retval);
+
+		} break;
+
+	case V4L2_CID_PRIVATE_TAVARUA_TX_SETPSREPEATCOUNT: {
+			radio->ps_repeatcount = ctrl->value;
+		} break;
+	case V4L2_CID_TUNE_POWER_LEVEL: {
+		unsigned char tx_power_lvl_config[FM_TX_PWR_LVL_MAX+1] = {
+			0x85, /* tx_da<5:3> = 0  lpf<2:0> = 5*/
+			0x95, /* tx_da<5:3> = 2  lpf<2:0> = 5*/
+			0x9D, /* tx_da<5:3> = 3  lpf<2:0> = 5*/
+			0xA5, /* tx_da<5:3> = 4  lpf<2:0> = 5*/
+			0xAD, /* tx_da<5:3> = 5  lpf<2:0> = 5*/
+			0xB5, /* tx_da<5:3> = 6  lpf<2:0> = 5*/
+			0xBD, /* tx_da<5:3> = 7  lpf<2:0> = 5*/
+			0xBF  /* tx_da<5:3> = 7  lpf<2:0> = 7*/
+		};
+		if (ctrl->value > FM_TX_PWR_LVL_MAX)
+			ctrl->value = FM_TX_PWR_LVL_MAX;
+		if (ctrl->value < FM_TX_PWR_LVL_0)
+			ctrl->value = FM_TX_PWR_LVL_0;
+		retval = sync_read_xfr(radio, PHY_TXGAIN, xfr_buf);
+		FMDBG("return for PHY_TXGAIN is %d", retval);
+		if (retval < 0) {
+			FMDBG("read failed");
+			break;
+		}
+		xfr_buf[2] = tx_power_lvl_config[ctrl->value];
+		retval = sync_write_xfr(radio, PHY_TXGAIN, xfr_buf);
+		FMDBG("return for write PHY_TXGAIN is %d", retval);
+		if (retval < 0)
+			FMDBG("write failed");
+	} break;
+
+	default:
+		retval = -EINVAL;
+	}
+	if (retval < 0)
+		printk(KERN_WARNING DRIVER_NAME
+		": set control failed with %d, id : %d\n", retval, ctrl->id);
+
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_g_tuner
+=============================================================================*/
+/**
+  This function is called to get tuner attributes.
+
+  NOTE:
+  To query the attributes of a tuner, applications initialize the index field
+  and zero out the reserved array of a struct v4l2_tuner and call the
+  VIDIOC_G_TUNER ioctl with a pointer to this structure. Drivers fill the rest
+  of the structure or return an EINVAL error code when the index is out of
+  bounds. To enumerate all tuners applications shall begin at index zero,
+  incrementing by one until the driver returns EINVAL.
+
+  @param file: File descriptor returned by open().
+  @param tuner: pointer to struct v4l2_tuner.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The struct v4l2_tuner index is out of bounds.
+*/
+static int tavarua_vidioc_g_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *tuner)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval;
+	unsigned char xfr_buf[XFR_REG_NUM];
+	char rmssi = 0;
+	unsigned char size = 0;
+
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	/* read status rssi */
+	retval = tavarua_read_registers(radio, IOCTRL, 1);
+	if (retval < 0)
+		return retval;
+	/* read RMSSI */
+	size = 0x1;
+	xfr_buf[0] = (XFR_PEEK_MODE | (size << 1));
+	xfr_buf[1] = RMSSI_PEEK_MSB;
+	xfr_buf[2] = RMSSI_PEEK_LSB;
+	retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, 3);
+	msleep(TAVARUA_DELAY*10);
+	retval = tavarua_read_registers(radio, XFRDAT0, 3);
+	rmssi = radio->registers[XFRDAT0];
+	tuner->signal = rmssi;
+
+	strcpy(tuner->name, "FM");
+	tuner->type = V4L2_TUNER_RADIO;
+	tuner->rangelow  =  radio->region_params.band_low;
+	tuner->rangehigh =  radio->region_params.band_high;
+	tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+	tuner->capability = V4L2_TUNER_CAP_LOW;
+
+	/* Stereo indicator == Stereo (instead of Mono) */
+	if (radio->registers[IOCTRL] & IOC_MON_STR)
+		tuner->audmode = V4L2_TUNER_MODE_STEREO;
+	else
+	  tuner->audmode = V4L2_TUNER_MODE_MONO;
+
+	/* automatic frequency control: -1: freq to low, 1 freq to high */
+	tuner->afc = 0;
+
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_s_tuner
+=============================================================================*/
+/**
+  This function is called to set tuner attributes. Used to set mono/stereo mode.
+
+  NOTE:
+  Tuners have two writable properties, the audio mode and the radio frequency.
+  To change the audio mode, applications initialize the index, audmode and
+  reserved fields and call the VIDIOC_S_TUNER ioctl. This will not change the
+  current tuner, which is determined by the current video input. Drivers may
+  choose a different audio mode if the requested mode is invalid or unsupported.
+  Since this is a write-only ioctl, it does not return the actually selected
+  audio mode.
+
+  To change the radio frequency the VIDIOC_S_FREQUENCY ioctl is available.
+
+  @param file: File descriptor returned by open().
+  @param tuner: pointer to struct v4l2_tuner.
+
+  @return On success 0 is returned, else error code.
+  @return -EINVAL: The struct v4l2_tuner index is out of bounds.
+*/
+static int tavarua_vidioc_s_tuner(struct file *file, void *priv,
+		struct v4l2_tuner *tuner)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval;
+	int audmode;
+	if (tuner->index > 0)
+		return -EINVAL;
+
+	FMDBG("%s: set low to %d\n", __func__, tuner->rangelow);
+	radio->region_params.band_low = tuner->rangelow;
+	radio->region_params.band_high = tuner->rangehigh;
+	if (tuner->audmode == V4L2_TUNER_MODE_MONO)
+		/* Mono */
+		audmode = (radio->registers[IOCTRL] | IOC_MON_STR);
+	 else
+		/* Stereo */
+		audmode = (radio->registers[IOCTRL] & ~IOC_MON_STR);
+	retval = tavarua_write_register(radio, IOCTRL, audmode);
+	if (retval < 0)
+		printk(KERN_WARNING DRIVER_NAME
+			": set tuner failed with %d\n", retval);
+
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_g_frequency
+=============================================================================*/
+/**
+  This function is called to get tuner or modulator radio frequency.
+
+  NOTE:
+  To get the current tuner or modulator radio frequency applications set the
+  tuner field of a struct v4l2_frequency to the respective tuner or modulator
+  number (only input devices have tuners, only output devices have modulators),
+  zero out the reserved array and call the VIDIOC_G_FREQUENCY ioctl with a
+  pointer to this structure. The driver stores the current frequency in the
+  frequency field.
+
+  @param file: File descriptor returned by open().
+  @param freq: pointer to struct v4l2_frequency. This will be set to the
+   resultant
+  frequency in 62.5 khz on success.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The tuner index is out of bounds or the value in the type
+  field is wrong.
+*/
+static int tavarua_vidioc_g_frequency(struct file *file, void *priv,
+		struct v4l2_frequency *freq)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	freq->type = V4L2_TUNER_RADIO;
+	return tavarua_get_freq(radio, freq);
+
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_s_frequency
+=============================================================================*/
+/**
+  This function is called to set tuner or modulator radio frequency.
+
+  NOTE:
+  To change the current tuner or modulator radio frequency applications
+  initialize the tuner, type and frequency fields, and the reserved array of
+  a struct v4l2_frequency and call the VIDIOC_S_FREQUENCY ioctl with a pointer
+  to this structure. When the requested frequency is not possible the driver
+  assumes the closest possible value. However VIDIOC_S_FREQUENCY is a
+  write-only ioctl, it does not return the actual new frequency.
+
+  @param file: File descriptor returned by open().
+  @param freq: pointer to struct v4l2_frequency.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The tuner index is out of bounds or the value in the type
+  field is wrong.
+*/
+static int tavarua_vidioc_s_frequency(struct file *file, void *priv,
+					struct v4l2_frequency *freq)
+{
+	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
+	int retval = -1;
+	struct v4l2_frequency getFreq;
+
+	FMDBG("%s\n", __func__);
+
+	if (freq->type != V4L2_TUNER_RADIO)
+		return -EINVAL;
+
+	FMDBG("Calling tavarua_set_freq\n");
+
+	INIT_COMPLETION(radio->sync_req_done);
+	retval = tavarua_set_freq(radio, freq->frequency);
+	if (retval < 0) {
+		printk(KERN_WARNING DRIVER_NAME
+			": set frequency failed with %d\n", retval);
+	} else {
+		/* Wait for interrupt i.e. complete
+		(&radio->sync_req_done); call */
+		if (!wait_for_completion_timeout(&radio->sync_req_done,
+			msecs_to_jiffies(wait_timeout))) {
+			FMDERR("Timeout: No Tune response");
+			retval = tavarua_get_freq(radio, &getFreq);
+			radio->tune_req = 0;
+			if (retval > 0) {
+				if (getFreq.frequency == freq->frequency) {
+					/** This is success, queut the event*/
+					tavarua_q_event(radio,
+						TAVARUA_EVT_TUNE_SUCC);
+					return 0;
+				} else {
+					return -EIO;
+				}
+			}
+		}
+	}
+	radio->tune_req = 0;
+	return retval;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_dqbuf
+=============================================================================*/
+/**
+  This function is called to exchange a buffer with the driver.
+  This is main buffer function, in essense its equivalent to a blocking
+  read call.
+
+  Applications call the VIDIOC_DQBUF ioctl to dequeue a filled (capturing) or
+  displayed (output) buffer from the driver's outgoing queue. They just set
+  the type and memory fields of a struct v4l2_buffer as above, when VIDIOC_DQBUF
+  is called with a pointer to this structure the driver fills the remaining
+  fields or returns an error code.
+
+  NOTE:
+  By default VIDIOC_DQBUF blocks when no buffer is in the outgoing queue.
+  When the O_NONBLOCK flag was given to the open() function, VIDIOC_DQBUF
+  returns immediately with an EAGAIN error code when no buffer is available.
+
+  @param file: File descriptor returned by open().
+  @param buffer: pointer to struct v4l2_buffer.
+
+  @return On success 0 is returned, else error code.
+  @return EAGAIN: Non-blocking I/O has been selected using O_NONBLOCK and no
+  buffer was in the outgoing queue.
+  @return EINVAL: The buffer type is not supported, or the index is out of
+  bounds, or no buffers have been allocated yet, or the userptr or length are
+  invalid.
+  @return ENOMEM: Not enough physical or virtual memory was available to enqueue
+  a user pointer buffer.
+  @return EIO: VIDIOC_DQBUF failed due to an internal error. Can also indicate
+  temporary problems like signal loss. Note the driver might dequeue an (empty)
+  buffer despite returning an error, or even stop capturing.
+*/
+static int tavarua_vidioc_dqbuf(struct file *file, void *priv,
+				struct v4l2_buffer *buffer)
+{
+
+	struct tavarua_device  *radio = video_get_drvdata(video_devdata(file));
+	enum tavarua_buf_t buf_type = buffer->index;
+	struct kfifo *data_fifo;
+	unsigned char *buf = (unsigned char *)buffer->m.userptr;
+	unsigned int len = buffer->length;
+	FMDBG("%s: requesting buffer %d\n", __func__, buf_type);
+	/* check if we can access the user buffer */
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+	if ((buf_type < TAVARUA_BUF_MAX) && (buf_type >= 0)) {
+		data_fifo = &radio->data_buf[buf_type];
+		if (buf_type == TAVARUA_BUF_EVENTS) {
+			if (wait_event_interruptible(radio->event_queue,
+				kfifo_len(data_fifo)) < 0) {
+				return -EINTR;
+			}
+		}
+	} else {
+		FMDERR("invalid buffer type\n");
+		return -EINVAL;
+	}
+	buffer->bytesused = kfifo_out_locked(data_fifo, buf, len,
+					&radio->buf_lock[buf_type]);
+
+	return 0;
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_g_fmt_type_private
+=============================================================================*/
+/**
+  This function is here to make the v4l2 framework happy.
+  We cannot use private buffers without it.
+
+  @param file: File descriptor returned by open().
+  @param f: pointer to struct v4l2_format.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The tuner index is out of bounds or the value in the type
+  field is wrong.
+*/
+static int tavarua_vidioc_g_fmt_type_private(struct file *file, void *priv,
+						struct v4l2_format *f)
+{
+	return 0;
+
+}
+
+/*=============================================================================
+FUNCTION:  tavarua_vidioc_s_hw_freq_seek
+=============================================================================*/
+/**
+  This function is called to perform a hardware frequency seek.
+
+  Start a hardware frequency seek from the current frequency. To do this
+  applications initialize the tuner, type, seek_upward and wrap_around fields,
+  and zero out the reserved array of a struct v4l2_hw_freq_seek and call the
+  VIDIOC_S_HW_FREQ_SEEK ioctl with a pointer to this structure.
+
+  This ioctl is supported if the V4L2_CAP_HW_FREQ_SEEK capability is set.
+
+  @param file: File descriptor returned by open().
+  @param seek: pointer to struct v4l2_hw_freq_seek.
+
+  @return On success 0 is returned, else error code.
+  @return EINVAL: The tuner index is out of bounds or the value in the type
+  field is wrong.
+  @return EAGAIN: The ioctl timed-out. Try again.
+*/
+static int tavarua_vidioc_s_hw_freq_seek(struct file *file, void *priv,
+					struct v4l2_hw_freq_seek *seek)
+{
+	struct tavarua_device  *radio = video_get_drvdata(video_devdata(file));
+	int dir;
+	if (seek->seek_upward)
+		dir = SRCH_DIR_UP;
+	else
+		dir = SRCH_DIR_DOWN;
+	FMDBG("starting search\n");
+	return tavarua_search(radio, CTRL_ON, dir);
+}
+
+/*
+ * tavarua_viddev_tamples - video device interface
+ */
+static const struct v4l2_ioctl_ops tavarua_ioctl_ops = {
+	.vidioc_querycap              = tavarua_vidioc_querycap,
+	.vidioc_queryctrl             = tavarua_vidioc_queryctrl,
+	.vidioc_g_ctrl                = tavarua_vidioc_g_ctrl,
+	.vidioc_s_ctrl                = tavarua_vidioc_s_ctrl,
+	.vidioc_g_tuner               = tavarua_vidioc_g_tuner,
+	.vidioc_s_tuner               = tavarua_vidioc_s_tuner,
+	.vidioc_g_frequency           = tavarua_vidioc_g_frequency,
+	.vidioc_s_frequency           = tavarua_vidioc_s_frequency,
+	.vidioc_s_hw_freq_seek        = tavarua_vidioc_s_hw_freq_seek,
+	.vidioc_dqbuf                 = tavarua_vidioc_dqbuf,
+	.vidioc_g_fmt_type_private    = tavarua_vidioc_g_fmt_type_private,
+	.vidioc_s_ext_ctrls           = tavarua_vidioc_s_ext_ctrls,
+};
+
+static struct video_device tavarua_viddev_template = {
+	.fops                   = &tavarua_fops,
+	.ioctl_ops              = &tavarua_ioctl_ops,
+	.name                   = DRIVER_NAME,
+	.release                = video_device_release,
+};
+
+/*==============================================================
+FUNCTION:  FmQSocCom_EnableInterrupts
+==============================================================*/
+/**
+  This function enable interrupts.
+
+  @param radio: structure pointer passed by client.
+  @param state: FM radio state (receiver/transmitter/off/reset).
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_setup_interrupts(struct tavarua_device *radio,
+					enum radio_state_t state)
+{
+	int retval;
+	unsigned char int_ctrl[XFR_REG_NUM];
+
+	if (!radio->lp_mode)
+		return 0;
+
+	int_ctrl[STATUS_REG1] = READY | TUNE | SEARCH | SCANNEXT |
+				SIGNAL | INTF | SYNC | AUDIO;
+	if (state == FM_RECV)
+		int_ctrl[STATUS_REG2] =  RDSDAT | RDSRT | RDSPS | RDSAF;
+	else
+		int_ctrl[STATUS_REG2] = TXRDSDAT | TXRDSDONE;
+
+	int_ctrl[STATUS_REG3] = TRANSFER | ERROR;
+
+	/* use xfr for interrupt setup */
+    if (radio->chipID == MARIMBA_2_1 || radio->chipID == BAHAMA_1_0
+		|| radio->chipID == BAHAMA_2_0) {
+		FMDBG("Setting interrupts\n");
+		retval =  sync_write_xfr(radio, INT_CTRL, int_ctrl);
+	/* use register write to setup interrupts */
+	} else {
+		retval = tavarua_write_register(radio,
+					STATUS_REG1, int_ctrl[STATUS_REG1]);
+		if (retval < 0)
+			return retval;
+
+		retval = tavarua_write_register(radio,
+					STATUS_REG2, int_ctrl[STATUS_REG2]);
+		if (retval < 0)
+			return retval;
+
+		retval = tavarua_write_register(radio,
+					STATUS_REG3, int_ctrl[STATUS_REG3]);
+		if (retval < 0)
+			return retval;
+	}
+
+	radio->lp_mode = 0;
+	/* tavarua_handle_interrupts force reads all the interrupt status
+	*  registers and it is not valid for MBA 2.1
+	*/
+	if ((radio->chipID != MARIMBA_2_1) && (radio->chipID != BAHAMA_1_0)
+		&& (radio->chipID != BAHAMA_2_0))
+		tavarua_handle_interrupts(radio);
+
+	return retval;
+
+}
+
+/*==============================================================
+FUNCTION:  tavarua_disable_interrupts
+==============================================================*/
+/**
+  This function disables interrupts.
+
+  @param radio: structure pointer passed by client.
+
+  @return => 0 if successful.
+  @return < 0 if failure.
+*/
+static int tavarua_disable_interrupts(struct tavarua_device *radio)
+{
+	unsigned char lpm_buf[XFR_REG_NUM];
+	int retval;
+	if (radio->lp_mode)
+		return 0;
+	FMDBG("%s\n", __func__);
+	/* In Low power mode, disable all the interrupts that are not being
+		 waited by the Application */
+	lpm_buf[STATUS_REG1] = TUNE | SEARCH | SCANNEXT;
+	lpm_buf[STATUS_REG2] = 0x00;
+	lpm_buf[STATUS_REG3] = TRANSFER;
+	/* use xfr for interrupt setup */
+	wait_timeout = 100;
+	if (radio->chipID == MARIMBA_2_1 || radio->chipID == BAHAMA_1_0
+		|| radio->chipID == BAHAMA_2_0)
+		retval = sync_write_xfr(radio, INT_CTRL, lpm_buf);
+	/* use register write to setup interrupts */
+	else
+		retval = tavarua_write_registers(radio, STATUS_REG1, lpm_buf,
+							ARRAY_SIZE(lpm_buf));
+
+	/*INT_CTL writes may fail with TIME_OUT as all the
+	interrupts have been disabled
+	*/
+	if (retval > -1 || retval == -ETIME) {
+		radio->lp_mode = 1;
+		/*Consider timeout as a valid case here*/
+		retval = 0;
+	}
+	wait_timeout = WAIT_TIMEOUT;
+	return retval;
+
+}
+
+/*==============================================================
+FUNCTION:  tavarua_start
+==============================================================*/
+/**
+  Starts/enables the device (FM radio).
+
+  @param radio: structure pointer passed by client.
+  @param state: FM radio state (receiver/transmitter/off/reset).
+
+  @return On success 0 is returned, else error code.
+*/
+static int tavarua_start(struct tavarua_device *radio,
+				enum radio_state_t state)
+{
+
+	int retval;
+	FMDBG("%s <%d>\n", __func__, state);
+	/* set geographic region */
+	radio->region_params.region = TAVARUA_REGION_US;
+
+	/* set radio mode */
+	retval = tavarua_write_register(radio, RDCTRL, state);
+	if (retval < 0)
+		return retval;
+	/* wait for radio to init */
+	msleep(RADIO_INIT_TIME);
+	/* enable interrupts */
+	tavarua_setup_interrupts(radio, state);
+	/* default region is US */
+	radio->region_params.band_low = US_LOW_BAND * FREQ_MUL;
+	radio->region_params.band_high = US_HIGH_BAND * FREQ_MUL;
+
+	return 0;
+}
+
+/*==============================================================
+FUNCTION:  tavarua_suspend
+==============================================================*/
+/**
+  Save state and stop all devices in system.
+
+  @param pdev: platform device to be suspended.
+  @param state: Power state to put each device in.
+
+  @return On success 0 is returned, else error code.
+*/
+static int tavarua_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct tavarua_device *radio = platform_get_drvdata(pdev);
+	int retval;
+	int users = 0;
+	printk(KERN_INFO DRIVER_NAME "%s: radio suspend\n\n", __func__);
+	if (radio) {
+		mutex_lock(&radio->lock);
+		users = radio->users;
+		mutex_unlock(&radio->lock);
+		if (users) {
+			retval = tavarua_disable_interrupts(radio);
+			if (retval < 0) {
+				printk(KERN_INFO DRIVER_NAME
+					"tavarua_suspend error %d\n", retval);
+				return -EIO;
+			}
+		}
+	}
+	return 0;
+}
+
+/*==============================================================
+FUNCTION:  tavarua_resume
+==============================================================*/
+/**
+  Restore state of each device in system.
+
+  @param pdev: platform device to be resumed.
+
+  @return On success 0 is returned, else error code.
+*/
+static int tavarua_resume(struct platform_device *pdev)
+{
+
+	struct tavarua_device *radio = platform_get_drvdata(pdev);
+	int retval;
+	int users = 0;
+	printk(KERN_INFO DRIVER_NAME "%s: radio resume\n\n", __func__);
+	if (radio) {
+		mutex_lock(&radio->lock);
+		users = radio->users;
+		mutex_unlock(&radio->lock);
+
+		if (users) {
+			retval = tavarua_setup_interrupts(radio,
+			(radio->registers[RDCTRL] & 0x03));
+			if (retval < 0) {
+				printk(KERN_INFO DRIVER_NAME "Error in \
+					tavarua_resume %d\n", retval);
+				return -EIO;
+			}
+		}
+	}
+	return 0;
+}
+
+/*==============================================================
+FUNCTION:  tavarua_set_audio_path
+==============================================================*/
+/**
+  This function will configure the audio path to and from the
+  FM core.
+
+  This interface is expected to be called from the multimedia
+  driver's thread.  This interface should only be called when
+  the FM hardware is enabled.  If the FM hardware is not
+  currently enabled, this interface will return an error.
+
+  @param digital_on: Digital audio from the FM core should be enabled/disbled.
+  @param analog_on: Analog audio from the FM core should be enabled/disbled.
+
+  @return On success 0 is returned, else error code.
+*/
+int tavarua_set_audio_path(int digital_on, int analog_on)
+{
+	struct tavarua_device *radio = private_data;
+	int rx_on = radio->registers[RDCTRL] & FM_RECV;
+	if (!radio)
+		return -ENOMEM;
+	/* RX */
+	FMDBG("%s: digital: %d analog: %d\n", __func__, digital_on, analog_on);
+	SET_REG_FIELD(radio->registers[AUDIOCTRL],
+		((rx_on && analog_on) ? 1 : 0),
+		AUDIORX_ANALOG_OFFSET,
+		AUDIORX_ANALOG_MASK);
+	SET_REG_FIELD(radio->registers[AUDIOCTRL],
+		((rx_on && digital_on) ? 1 : 0),
+		AUDIORX_DIGITAL_OFFSET,
+		AUDIORX_DIGITAL_MASK);
+	SET_REG_FIELD(radio->registers[AUDIOCTRL],
+		(rx_on ? 0 : 1),
+		AUDIOTX_OFFSET,
+		AUDIOTX_MASK);
+	/*
+
+	I2S Master/Slave configuration:
+	Setting the FM SoC as I2S Master/Slave
+		'false'		- FM SoC is I2S Slave
+		'true'		- FM SoC is I2S Master
+
+	We get this infomation from the respective target's board file :
+		MSM7x30         - FM SoC is I2S Slave
+		MSM8x60         - FM SoC is I2S Slave
+		MSM7x27A        - FM SoC is I2S Master
+	*/
+
+	if (!radio->pdata->is_fm_soc_i2s_master) {
+		FMDBG("FM SoC is I2S Slave\n");
+		SET_REG_FIELD(radio->registers[AUDIOCTRL],
+		(0),
+		I2SCTRL_OFFSET,
+		I2SCTRL_MASK);
+	} else {
+		FMDBG("FM SoC is I2S Master\n");
+		SET_REG_FIELD(radio->registers[AUDIOCTRL],
+		(1),
+		I2SCTRL_OFFSET,
+		I2SCTRL_MASK);
+	}
+	FMDBG("%s: %x\n", __func__, radio->registers[AUDIOCTRL]);
+	return tavarua_write_register(radio, AUDIOCTRL,
+					radio->registers[AUDIOCTRL]);
+
+}
+
+/*==============================================================
+FUNCTION:  tavarua_probe
+==============================================================*/
+/**
+  Once called this functions initiates, allocates resources and registers video
+  tuner device with the v4l2 framework.
+
+  NOTE:
+  probe() should verify that the specified device hardware
+  actually exists; sometimes platform setup code can't be sure.  The probing
+  can use device resources, including clocks, and device platform_data.
+
+  @param pdev: platform device to be probed.
+
+  @return On success 0 is returned, else error code.
+	-ENOMEM in low memory cases
+*/
+static int  __init tavarua_probe(struct platform_device *pdev)
+{
+
+	struct marimba_fm_platform_data *tavarua_pdata;
+	struct tavarua_device *radio;
+	int retval;
+	int i;
+	FMDBG("%s: probe called\n", __func__);
+	/* private data allocation */
+	radio = kzalloc(sizeof(struct tavarua_device), GFP_KERNEL);
+	if (!radio) {
+		retval = -ENOMEM;
+	goto err_initial;
+	}
+
+	radio->marimba = platform_get_drvdata(pdev);
+	tavarua_pdata = pdev->dev.platform_data;
+	radio->pdata = tavarua_pdata;
+	radio->dev = &pdev->dev;
+	platform_set_drvdata(pdev, radio);
+
+	/* video device allocation */
+	radio->videodev = video_device_alloc();
+	if (!radio->videodev)
+		goto err_radio;
+
+	/* initial configuration */
+	memcpy(radio->videodev, &tavarua_viddev_template,
+	  sizeof(tavarua_viddev_template));
+
+	/*allocate internal buffers for decoded rds and event buffer*/
+	for (i = 0; i < TAVARUA_BUF_MAX; i++) {
+		int kfifo_alloc_rc=0;
+		spin_lock_init(&radio->buf_lock[i]);
+
+		if (i == TAVARUA_BUF_RAW_RDS)
+			kfifo_alloc_rc = kfifo_alloc(&radio->data_buf[i],
+				rds_buf*3, GFP_KERNEL);
+		else
+			kfifo_alloc_rc = kfifo_alloc(&radio->data_buf[i],
+				STD_BUF_SIZE, GFP_KERNEL);
+
+		if (kfifo_alloc_rc!=0) {
+			printk(KERN_ERR "%s: failed allocating buffers %d\n",
+				__func__, kfifo_alloc_rc);
+			goto err_bufs;
+		}
+	}
+	/* init xfr status */
+	radio->users = 0;
+	radio->xfr_in_progress = 0;
+	radio->xfr_bytes_left = 0;
+	for (i = 0; i < TAVARUA_XFR_MAX; i++)
+		radio->pending_xfrs[i] = 0;
+
+	/* init transmit data */
+	radio->tx_mode = TAVARUA_TX_RT;
+		/* Init RT and PS Tx datas*/
+	radio->pty = 0;
+	radio->pi = 0;
+	radio->ps_repeatcount = 0;
+		/* init search params */
+	radio->srch_params.srch_pty = 0;
+	radio->srch_params.srch_pi = 0;
+	radio->srch_params.preset_num = 0;
+	radio->srch_params.get_list = 0;
+	/* radio initializes to low power mode */
+	radio->lp_mode = 1;
+	radio->handle_irq = 1;
+	/* init lock */
+	mutex_init(&radio->lock);
+	/* init completion flags */
+	init_completion(&radio->sync_xfr_start);
+	init_completion(&radio->sync_req_done);
+	radio->tune_req = 0;
+	/* initialize wait queue for event read */
+	init_waitqueue_head(&radio->event_queue);
+	/* initialize wait queue for raw rds read */
+	init_waitqueue_head(&radio->read_queue);
+
+	video_set_drvdata(radio->videodev, radio);
+    /*Start the worker thread for event handling and register read_int_stat
+	as worker function*/
+	INIT_DELAYED_WORK(&radio->work, read_int_stat);
+
+	/* register video device */
+	if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) {
+		printk(KERN_WARNING DRIVER_NAME
+				": Could not register video device\n");
+		goto err_all;
+	}
+	private_data = radio;
+	return 0;
+
+err_all:
+	video_device_release(radio->videodev);
+err_bufs:
+	for (; i > -1; i--)
+		kfifo_free(&radio->data_buf[i]);
+err_radio:
+	kfree(radio);
+err_initial:
+	return retval;
+}
+
+/*==============================================================
+FUNCTION:  tavarua_remove
+==============================================================*/
+/**
+  Removes the device.
+
+  @param pdev: platform device to be removed.
+
+  @return On success 0 is returned, else error code.
+*/
+static int __devexit tavarua_remove(struct platform_device *pdev)
+{
+	int i;
+	struct tavarua_device *radio = platform_get_drvdata(pdev);
+
+	/* disable irq */
+	tavarua_disable_irq(radio);
+
+	video_unregister_device(radio->videodev);
+
+	/* free internal buffers */
+	for (i = 0; i < TAVARUA_BUF_MAX; i++)
+		kfifo_free(&radio->data_buf[i]);
+
+	/* free state struct */
+	kfree(radio);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/*
+ Platform drivers follow the standard driver model convention, where
+ discovery/enumeration is handled outside the drivers, and drivers
+ provide probe() and remove() methods.  They support power management
+ and shutdown notifications using the standard conventions.
+*/
+static struct platform_driver tavarua_driver = {
+	.driver = {
+		.owner  = THIS_MODULE,
+		.name   = "marimba_fm",
+	},
+	.remove = __devexit_p(tavarua_remove),
+	.suspend = tavarua_suspend,
+	.resume = tavarua_resume,
+}; /* platform device we're adding */
+
+
+/*************************************************************************
+ * Module Interface
+ ************************************************************************/
+
+/*==============================================================
+FUNCTION:  radio_module_init
+==============================================================*/
+/**
+  Module entry - add a platform-level device.
+
+  @return Returns zero if the driver registered and bound to a device, else
+  returns a negative error code when the driver not registered.
+*/
+static int __init radio_module_init(void)
+{
+	printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n");
+	return platform_driver_probe(&tavarua_driver, tavarua_probe);
+}
+
+/*==============================================================
+FUNCTION:  radio_module_exit
+==============================================================*/
+/**
+  Module exit - removes a platform-level device.
+
+  NOTE:
+  Note that this function will also release all memory- and port-based
+  resources owned by the device (dev->resource).
+
+  @return none.
+*/
+static void __exit radio_module_exit(void)
+{
+  platform_driver_unregister(&tavarua_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(radio_module_init);
+module_exit(radio_module_exit);
+
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bb53de7..95255fe 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -1031,6 +1031,31 @@
 	  This driver can be compiled as a module, called s2255drv.
 
 endif # V4L_USB_DRIVERS
+
+#
+# MSM camera configuration
+#
+
+comment "Qualcomm MSM Camera And Video"
+
+menuconfig MSM_CAMERA
+	bool "Qualcomm MSM camera and video capture support"
+	depends on ARCH_MSM && VIDEO_V4L2 && I2C
+	default y
+	help
+	  Say Y here to enable selecting the video adapters for
+	  Qualcomm msm camera and video encoding
+
+config MSM_CAMERA_DEBUG
+	bool "Qualcomm MSM camera debugging with printk"
+	depends on MSM_CAMERA
+	default n
+	help
+	  Enable printk() debug for msm camera
+
+
+source "drivers/media/video/msm/Kconfig"
+
 endif # VIDEO_CAPTURE_DRIVERS
 
 menuconfig V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index f0fecd6..724c7a3 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -10,8 +10,8 @@
 
 omap2cam-objs	:=	omap24xxcam.o omap24xxcam-dma.o
 
-videodev-objs	:=	v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
-			v4l2-event.o v4l2-ctrls.o v4l2-subdev.o
+videodev-objs	:=	v4l2-dev.o v4l2-ioctl.o v4l2-device.o   v4l2-fh.o \
+			v4l2-event.o v4l2-ctrls.o v4l2-subdev.o videobuf-core.o videobuf-msm-mem.o videobuf-dma-contig.o
 
 # V4L2 core modules
 
@@ -182,6 +182,7 @@
 
 obj-y	+= davinci/
 
+obj-$(CONFIG_MSM_CAMERA) += msm/
 obj-$(CONFIG_ARCH_OMAP)	+= omap/
 
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
new file mode 100644
index 0000000..6ba1abd
--- /dev/null
+++ b/drivers/media/video/msm/Kconfig
@@ -0,0 +1,194 @@
+config MSM_CAMERA_V4L2
+        bool "MSM Camera V4L2 Interface"
+        depends on MSM_CAMERA
+        default n
+        ---help---
+          This flag enables V4L2 interface of MSM
+          camera driver. If enabled, application interacts
+          with /dev/video0 through V4L2 APIs. Otherwise,
+          native APIs are used through /dev/config0, /dev/frame0,
+          and /dev/control0.
+
+comment "Camera Sensor Selection"
+config MT9T013
+	bool "Sensor mt9t013 (BAYER 3M)"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !ARCH_MSM8960 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  MICRON 3M Bayer Sensor with AutoFocus
+config MT9D113
+	bool "Sensor mt9d113 (YUV 2M)"
+	depends on MSM_CAMERA && ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  MICRON 2M YUV Sensor
+	  This sensor is the front camera on QT8660.
+	  This uses csi mipi interface.
+	  This sensor is used only on QT device.
+config MT9D112
+	bool "Sensor mt9d112 (YUV 2M)"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !ARCH_MSM8960 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  MICRON 2M YUV Sensor
+config IMX074
+	bool "Sensor IMX074 (BAYER 13.5M)"
+	depends on MSM_CAMERA && (ARCH_MSM8X60 || ARCH_MSM8960)
+	default y
+	---help---
+	SONY 13.5 MP Bayer Sensor
+config WEBCAM_OV7692
+	bool "Sensor OV7692 (VGA YUV)"
+	depends on MSM_CAMERA && ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  Omni Vision VGA YUV Sensor.
+config WEBCAM_OV9726
+	bool "Sensor OV9726 (VGA Bayer)"
+	depends on MSM_CAMERA && (ARCH_MSM8X60 || ARCH_MSM7X30 || ARCH_MSM7X27A) && !MSM_CAMERA_V4L2
+	default n
+	---help---
+	  Omni Vision VGA Bayer Sensor.
+#	This Senosr is used as a webcam.
+#	This uses the CSI interface.
+config VX6953
+	bool "Sensor VX6953 (BAYER 5M)"
+	depends on MSM_CAMERA && ARCH_MSM7X30
+	default y
+	---help---
+	STM 5M Bayer Sensor with EDOF
+config SN12M0PZ
+	bool "Sensor sn12m0pz (Bayer 12 MP)"
+	depends on MSM_CAMERA && ARCH_MSM7X30 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  Sony 12 MP Bayer Sensor
+config MT9P012
+	bool "Sensor mt9p012 (BAYER 5M)"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  MICRON 5M Bayer Sensor with Autofocus
+
+choice
+	prompt "AF module"
+	depends on MT9P012 && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default MSM_CAMERA_AF_FOXCONN
+
+config MSM_CAMERA_AF_FOXCONN
+	bool "FOXCONN Module"
+	help
+	  This driver supports FOXCONN AF module for 5M Bayer sensor
+
+config MSM_CAMERA_AF_BAM
+	bool "BAM Module"
+	help
+	  This driver supports BAM AF module for 5M Bayer sensor
+
+endchoice
+
+config MT9P012_KM
+	bool "Sensor mt9p012 KM module (BAYER 5M)"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  MICRON 5M Bayer Sensor KM modules with Autofocus
+
+config MT9E013
+	bool "Sensor mt9e013 module (BAYER 8M)"
+	depends on MSM_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM7X27A) && !MSM_CAMERA_V4L2
+	default n
+	---help---
+	  Aptina 8M Bayer Sensor modules with Autofocus
+
+config S5K3E2FX
+	bool "Sensor s5k3e2fx (Samsung 5M)"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  Samsung 5M with Autofocus
+
+config QS_S5K4E1
+	bool "Sensor qs_s5k4e1 (Samsung 5M)"
+	depends on MSM_CAMERA && ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	default y
+	---help---
+	  Samsung 5M with Autofocus
+
+config S5K4E1
+	bool "Sensor Sensor s5k4e1 (Samsung 5M)"
+	depends on MSM_CAMERA
+	default n
+	---help---
+	  Support for S5k4E1 samsung sensor driver.
+	  It is a Bayer 5MP sensor with auto focus and it supports
+	  two mipi lanes, required for msm7x2xA platform.
+	  Say Y here if this is msm7x2xA variant platform.
+
+config MSM_CAMERA_FLASH_SC628A
+	bool "Qualcomm MSM camera sc628a flash support"
+	depends on MSM_CAMERA
+	default n
+	---help---
+	  Enable support for LED flash for msm camera.
+	  It is a samtech charge pump flash driver and it
+	  supports spotlight and flash light modes with
+	  differrent current levels.
+
+config IMX072
+	bool "Sensor imx072 (Sony 5M)"
+	default n
+	---help---
+	  Support for IMX072 sony sensor driver.
+	  It is a Bayer 5MP sensor with auto focus and it supports
+	  two mipi lanes, required for msm7x2xA platform.
+	  Say Y here if this is msm7x2xA variant platform.
+
+config OV2720
+	bool "Sensor ov2720 (Omnivision 2MP)"
+	depends on MSM_CAMERA && ARCH_MSM8960
+	default y
+
+config VB6801
+	bool "Sensor vb6801"
+	depends on MSM_CAMERA && !ARCH_MSM8X60 && !MSM_CAMERA_V4L2
+	---help---
+	  5M with flash
+
+config MSM_CAMERA_FLASH
+	bool "Qualcomm MSM camera flash support"
+	depends on MSM_CAMERA
+	default y
+	---help---
+	  Enable support for LED flash for msm camera
+
+config MSM_CAMERA_SENSOR
+	bool "Qualcomm MSM camera sensor support"
+	depends on MSM_CAMERA
+	default y
+
+config MSM_GEMINI
+	tristate "Qualcomm MSM Gemini Jpeg Engine support"
+	depends on MSM_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
+	default n
+	---help---
+	  Enable support for Gemini Jpeg Engine
+
+config MSM_VPE
+	tristate "Qualcomm MSM Video Pre-processing Engine support"
+	depends on MSM_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60)
+	default y
+	---help---
+	  Enable support for Video Pre-processing Engine
+
+config QUP_EXCLUSIVE_TO_CAMERA
+	bool "QUP exclusive to camera"
+	depends on MSM_CAMERA
+	default y
+	---help---
+	  This flag enabled states that QUP
+	  is exclusive to camera. In case this
+	  is disabled, the lvs1 voltage is enabled
+	  by QUP in the board file as QUP is used by
+	  applications other than camera.
+
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
new file mode 100644
index 0000000..c366834
--- /dev/null
+++ b/drivers/media/video/msm/Makefile
@@ -0,0 +1,46 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ifeq ($(GCC_VERSION),0404)
+CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
+endif
+
+ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o
+else
+  obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
+endif
+obj-$(CONFIG_MSM_CAMERA) += msm_axi_qos.o
+obj-$(CONFIG_MSM_CAMERA_FLASH) += flash.o
+obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor.o
+obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o msm_io7x.o
+obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o msm_io_7x27a.o
+obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o msm_io_vfe31.o msm_vpe1.o
+obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o msm_io8x.o
+obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o msm_io_8x60.o msm_vpe1.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_io_8960.o msm_ispif.o msm_vfe32.o msm_vpe1.o
+obj-$(CONFIG_MT9T013) += mt9t013.o mt9t013_reg.o
+obj-$(CONFIG_SN12M0PZ) += sn12m0pz.o sn12m0pz_reg.o
+obj-$(CONFIG_MT9P012) += mt9p012_reg.o
+obj-$(CONFIG_MSM_CAMERA_AF_FOXCONN) += mt9p012_fox.o
+obj-$(CONFIG_MSM_CAMERA_AF_BAM) += mt9p012_bam.o
+obj-$(CONFIG_MT9P012_KM) += mt9p012_km.o mt9p012_km_reg.o
+obj-$(CONFIG_MT9E013) += mt9e013.o mt9e013_reg.o
+obj-$(CONFIG_S5K3E2FX) += s5k3e2fx.o
+obj-$(CONFIG_S5K4E1) += s5k4e1.o s5k4e1_reg.o
+#FIXME: Merge the two ifeq causes VX6953 preview not coming up.
+ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  obj-$(CONFIG_VX6953) += vx6953_v4l2.o vx6953_reg_v4l2.o
+  obj-$(CONFIG_IMX074) += imx074_v4l2.o imx074_reg.o
+else
+  obj-$(CONFIG_VX6953) += vx6953.o vx6953_reg.o
+  obj-$(CONFIG_IMX074) += imx074.o imx074_reg.o
+endif
+obj-$(CONFIG_QS_S5K4E1) += qs_s5k4e1.o qs_s5k4e1_reg.o
+obj-$(CONFIG_VB6801) += vb6801.o
+obj-$(CONFIG_IMX072) += imx072.o imx072_reg.o
+obj-$(CONFIG_OV2720) += ov2720.o
+obj-$(CONFIG_WEBCAM_OV9726) += ov9726.o ov9726_reg.o
+obj-$(CONFIG_WEBCAM_OV7692) += ov7692.o
+obj-$(CONFIG_MT9D112) += mt9d112.o mt9d112_reg.o
+
+obj-$(CONFIG_MT9D113) += mt9d113.o mt9d113_reg.o
+obj-$(CONFIG_MSM_GEMINI) += msm_gemini_dev.o msm_gemini_sync.o msm_gemini_core.o msm_gemini_hw.o msm_gemini_platform.o
diff --git a/drivers/media/video/msm/flash.c b/drivers/media/video/msm/flash.c
new file mode 100644
index 0000000..cd81125
--- /dev/null
+++ b/drivers/media/video/msm/flash.c
@@ -0,0 +1,563 @@
+
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/leds-pmic8058.h>
+#include <linux/pwm.h>
+#include <linux/pmic8058-pwm.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <mach/pmic.h>
+#include <mach/camera.h>
+#include <mach/gpio.h>
+
+struct timer_list timer_flash;
+
+enum msm_cam_flash_stat{
+	MSM_CAM_FLASH_OFF,
+	MSM_CAM_FLASH_ON,
+};
+
+#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
+static struct sc628a_work_t *sc628a_flash;
+static struct i2c_client *sc628a_client;
+static DECLARE_WAIT_QUEUE_HEAD(sc628a_wait_queue);
+
+struct sc628a_work_t {
+	struct work_struct work;
+};
+
+static const struct i2c_device_id sc628a_i2c_id[] = {
+	{"sc628a", 0},
+	{ }
+};
+
+static int32_t sc628a_i2c_txdata(unsigned short saddr,
+		unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+	if (i2c_transfer(sc628a_client->adapter, msg, 1) < 0) {
+		pr_err("sc628a_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t sc628a_i2c_write_b_flash(uint8_t waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = waddr;
+	buf[1] = bdata;
+
+	rc = sc628a_i2c_txdata(sc628a_client->addr, buf, 2);
+	if (rc < 0) {
+		pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+				waddr, bdata);
+	}
+	return rc;
+}
+
+static int sc628a_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&sc628a_wait_queue);
+	return 0;
+}
+
+static int sc628a_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("sc628a_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		pr_err("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	sc628a_flash = kzalloc(sizeof(struct sc628a_work_t), GFP_KERNEL);
+	if (!sc628a_flash) {
+		pr_err("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, sc628a_flash);
+	sc628a_init_client(client);
+	sc628a_client = client;
+
+	msleep(50);
+
+	CDBG("sc628a_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	pr_err("sc628a_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static struct i2c_driver sc628a_i2c_driver = {
+	.id_table = sc628a_i2c_id,
+	.probe  = sc628a_i2c_probe,
+	.remove = __exit_p(sc628a_i2c_remove),
+	.driver = {
+		.name = "sc628a",
+	},
+};
+#endif
+
+static int config_flash_gpio_table(enum msm_cam_flash_stat stat,
+			struct msm_camera_sensor_strobe_flash_data *sfdata)
+{
+	int rc = 0, i = 0;
+	int msm_cam_flash_gpio_tbl[][2] = {
+		{sfdata->flash_trigger, 1},
+		{sfdata->flash_charge, 1},
+		{sfdata->flash_charge_done, 0}
+	};
+
+	if (stat == MSM_CAM_FLASH_ON) {
+		for (i = 0; i < ARRAY_SIZE(msm_cam_flash_gpio_tbl); i++) {
+			rc = gpio_request(msm_cam_flash_gpio_tbl[i][0],
+							  "CAM_FLASH_GPIO");
+			if (unlikely(rc < 0)) {
+				pr_err("%s not able to get gpio\n", __func__);
+				for (i--; i >= 0; i--)
+					gpio_free(msm_cam_flash_gpio_tbl[i][0]);
+				break;
+			}
+			if (msm_cam_flash_gpio_tbl[i][1])
+				gpio_direction_output(
+					msm_cam_flash_gpio_tbl[i][0], 0);
+			else
+				gpio_direction_input(
+					msm_cam_flash_gpio_tbl[i][0]);
+		}
+	} else {
+		for (i = 0; i < ARRAY_SIZE(msm_cam_flash_gpio_tbl); i++) {
+			gpio_direction_input(msm_cam_flash_gpio_tbl[i][0]);
+			gpio_free(msm_cam_flash_gpio_tbl[i][0]);
+		}
+	}
+	return rc;
+}
+
+int msm_camera_flash_current_driver(
+	struct msm_camera_sensor_flash_current_driver *current_driver,
+	unsigned led_state)
+{
+	int rc = 0;
+#if defined CONFIG_LEDS_PMIC8058
+	int idx;
+	const struct pmic8058_leds_platform_data *driver_channel =
+		current_driver->driver_channel;
+	int num_leds = driver_channel->num_leds;
+
+	CDBG("%s: led_state = %d\n", __func__, led_state);
+
+	/* Evenly distribute current across all channels */
+	switch (led_state) {
+	case MSM_CAMERA_LED_OFF:
+		for (idx = 0; idx < num_leds; ++idx) {
+			rc = pm8058_set_led_current(
+				driver_channel->leds[idx].id, 0);
+			if (rc < 0)
+				pr_err(
+					"%s: FAIL name = %s, rc = %d\n",
+					__func__,
+					driver_channel->leds[idx].name,
+					rc);
+		}
+		break;
+
+	case MSM_CAMERA_LED_LOW:
+		for (idx = 0; idx < num_leds; ++idx) {
+			rc = pm8058_set_led_current(
+				driver_channel->leds[idx].id,
+				current_driver->low_current/num_leds);
+			if (rc < 0)
+				pr_err(
+					"%s: FAIL name = %s, rc = %d\n",
+					__func__,
+					driver_channel->leds[idx].name,
+					rc);
+		}
+		break;
+
+	case MSM_CAMERA_LED_HIGH:
+		for (idx = 0; idx < num_leds; ++idx) {
+			rc = pm8058_set_led_current(
+				driver_channel->leds[idx].id,
+				current_driver->high_current/num_leds);
+			if (rc < 0)
+				pr_err(
+					"%s: FAIL name = %s, rc = %d\n",
+					__func__,
+					driver_channel->leds[idx].name,
+					rc);
+		}
+		break;
+
+	default:
+		rc = -EFAULT;
+		break;
+	}
+	CDBG("msm_camera_flash_led_pmic8058: return %d\n", rc);
+#endif /* CONFIG_LEDS_PMIC8058 */
+#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
+	if (!sc628a_client) {
+		rc = i2c_add_driver(&sc628a_i2c_driver);
+		if (rc < 0 || sc628a_client == NULL) {
+			rc = -ENOTSUPP;
+			pr_err("I2C add driver failed");
+			return rc;
+		}
+		rc = gpio_request(current_driver->led1, "sc628a");
+		if (!rc) {
+			gpio_direction_output(current_driver->led1, 0);
+			gpio_set_value_cansleep(current_driver->led1, 1);
+		} else
+			i2c_del_driver(&sc628a_i2c_driver);
+		rc = gpio_request(current_driver->led2, "sc628a");
+		if (!rc) {
+			gpio_direction_output(current_driver->led2, 0);
+			gpio_set_value_cansleep(current_driver->led2, 1);
+		} else {
+			i2c_del_driver(&sc628a_i2c_driver);
+			gpio_free(current_driver->led1);
+		}
+	}
+	switch (led_state) {
+	case MSM_CAMERA_LED_OFF:
+		sc628a_i2c_write_b_flash(0x02, 0x0);
+		break;
+	case MSM_CAMERA_LED_LOW:
+		sc628a_i2c_write_b_flash(0x02, 0x06);
+		break;
+	case MSM_CAMERA_LED_HIGH:
+		sc628a_i2c_write_b_flash(0x02, 0x49);
+		break;
+	default:
+		rc = -EFAULT;
+		break;
+	}
+#endif
+
+	return rc;
+}
+
+
+static int msm_camera_flash_pwm(
+	struct msm_camera_sensor_flash_pwm *pwm,
+	unsigned led_state)
+{
+	int rc = 0;
+	int PWM_PERIOD = USEC_PER_SEC / pwm->freq;
+
+	static struct pwm_device *flash_pwm;
+
+	if (!flash_pwm) {
+		flash_pwm = pwm_request(pwm->channel, "camera-flash");
+		if (flash_pwm == NULL || IS_ERR(flash_pwm)) {
+			pr_err("%s: FAIL pwm_request(): flash_pwm=%p\n",
+			       __func__, flash_pwm);
+			flash_pwm = NULL;
+			return -ENXIO;
+		}
+	}
+
+	switch (led_state) {
+	case MSM_CAMERA_LED_LOW:
+		rc = pwm_config(flash_pwm,
+			(PWM_PERIOD/pwm->max_load)*pwm->low_load,
+			PWM_PERIOD);
+		if (rc >= 0)
+			rc = pwm_enable(flash_pwm);
+		break;
+
+	case MSM_CAMERA_LED_HIGH:
+		rc = pwm_config(flash_pwm,
+			(PWM_PERIOD/pwm->max_load)*pwm->high_load,
+			PWM_PERIOD);
+		if (rc >= 0)
+			rc = pwm_enable(flash_pwm);
+		break;
+
+	case MSM_CAMERA_LED_OFF:
+		pwm_disable(flash_pwm);
+		break;
+
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	return rc;
+}
+
+int msm_camera_flash_pmic(
+	struct msm_camera_sensor_flash_pmic *pmic,
+	unsigned led_state)
+{
+	int rc = 0;
+
+	switch (led_state) {
+	case MSM_CAMERA_LED_OFF:
+		rc = pmic->pmic_set_current(pmic->led_src_1, 0);
+		if (pmic->num_of_src > 1)
+			rc = pmic->pmic_set_current(pmic->led_src_2, 0);
+		break;
+
+	case MSM_CAMERA_LED_LOW:
+		rc = pmic->pmic_set_current(pmic->led_src_1,
+				pmic->low_current);
+		if (pmic->num_of_src > 1)
+			rc = pmic->pmic_set_current(pmic->led_src_2, 0);
+		break;
+
+	case MSM_CAMERA_LED_HIGH:
+		rc = pmic->pmic_set_current(pmic->led_src_1,
+			pmic->high_current);
+		if (pmic->num_of_src > 1)
+			rc = pmic->pmic_set_current(pmic->led_src_2,
+				pmic->high_current);
+		break;
+
+	default:
+		rc = -EFAULT;
+		break;
+	}
+	CDBG("flash_set_led_state: return %d\n", rc);
+
+	return rc;
+}
+
+int32_t msm_camera_flash_set_led_state(
+	struct msm_camera_sensor_flash_data *fdata, unsigned led_state)
+{
+	int32_t rc;
+
+	CDBG("flash_set_led_state: %d flash_sr_type=%d\n", led_state,
+	    fdata->flash_src->flash_sr_type);
+
+	if (fdata->flash_type != MSM_CAMERA_FLASH_LED)
+		return -ENODEV;
+
+	switch (fdata->flash_src->flash_sr_type) {
+	case MSM_CAMERA_FLASH_SRC_PMIC:
+		rc = msm_camera_flash_pmic(&fdata->flash_src->_fsrc.pmic_src,
+			led_state);
+		break;
+
+	case MSM_CAMERA_FLASH_SRC_PWM:
+		rc = msm_camera_flash_pwm(&fdata->flash_src->_fsrc.pwm_src,
+			led_state);
+		break;
+
+	case MSM_CAMERA_FLASH_SRC_CURRENT_DRIVER:
+		rc = msm_camera_flash_current_driver(
+			&fdata->flash_src->_fsrc.current_driver_src,
+			led_state);
+		break;
+
+	default:
+		rc = -ENODEV;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_strobe_flash_xenon_charge(int32_t flash_charge,
+		int32_t charge_enable, uint32_t flash_recharge_duration)
+{
+	gpio_set_value_cansleep(flash_charge, charge_enable);
+	if (charge_enable) {
+		timer_flash.expires = jiffies +
+			msecs_to_jiffies(flash_recharge_duration);
+		/* add timer for the recharge */
+		if (!timer_pending(&timer_flash))
+			add_timer(&timer_flash);
+	} else
+		del_timer_sync(&timer_flash);
+	return 0;
+}
+
+static void strobe_flash_xenon_recharge_handler(unsigned long data)
+{
+	unsigned long flags;
+	struct msm_camera_sensor_strobe_flash_data *sfdata =
+		(struct msm_camera_sensor_strobe_flash_data *)data;
+
+	spin_lock_irqsave(&sfdata->timer_lock, flags);
+	msm_strobe_flash_xenon_charge(sfdata->flash_charge, 1,
+		sfdata->flash_recharge_duration);
+	spin_unlock_irqrestore(&sfdata->timer_lock, flags);
+
+	return;
+}
+
+static irqreturn_t strobe_flash_charge_ready_irq(int irq_num, void *data)
+{
+	struct msm_camera_sensor_strobe_flash_data *sfdata =
+		(struct msm_camera_sensor_strobe_flash_data *)data;
+
+	/* put the charge signal to low */
+	gpio_set_value_cansleep(sfdata->flash_charge, 0);
+
+	return IRQ_HANDLED;
+}
+
+static int msm_strobe_flash_xenon_init(
+	struct msm_camera_sensor_strobe_flash_data *sfdata)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&sfdata->spin_lock, flags);
+	if (!sfdata->state) {
+
+		rc = config_flash_gpio_table(MSM_CAM_FLASH_ON, sfdata);
+		if (rc < 0) {
+			pr_err("%s: gpio_request failed\n", __func__);
+			goto go_out;
+		}
+		rc = request_irq(sfdata->irq, strobe_flash_charge_ready_irq,
+			IRQF_TRIGGER_RISING, "charge_ready", sfdata);
+		if (rc < 0) {
+			pr_err("%s: request_irq failed %d\n", __func__, rc);
+			goto go_out;
+		}
+
+		spin_lock_init(&sfdata->timer_lock);
+		/* setup timer */
+		init_timer(&timer_flash);
+		timer_flash.function = strobe_flash_xenon_recharge_handler;
+		timer_flash.data = (unsigned long)sfdata;
+	}
+	sfdata->state++;
+go_out:
+	spin_unlock_irqrestore(&sfdata->spin_lock, flags);
+
+	return rc;
+}
+
+static int msm_strobe_flash_xenon_release
+(struct msm_camera_sensor_strobe_flash_data *sfdata, int32_t final_release)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sfdata->spin_lock, flags);
+	if (sfdata->state > 0) {
+		if (final_release)
+			sfdata->state = 0;
+		else
+			sfdata->state--;
+
+		if (!sfdata->state) {
+			free_irq(sfdata->irq, sfdata);
+			config_flash_gpio_table(MSM_CAM_FLASH_OFF, sfdata);
+			if (timer_pending(&timer_flash))
+				del_timer_sync(&timer_flash);
+		}
+	}
+	spin_unlock_irqrestore(&sfdata->spin_lock, flags);
+	return 0;
+}
+
+static void msm_strobe_flash_xenon_fn_init
+	(struct msm_strobe_flash_ctrl *strobe_flash_ptr)
+{
+	strobe_flash_ptr->strobe_flash_init =
+				msm_strobe_flash_xenon_init;
+	strobe_flash_ptr->strobe_flash_charge =
+				msm_strobe_flash_xenon_charge;
+	strobe_flash_ptr->strobe_flash_release =
+				msm_strobe_flash_xenon_release;
+}
+
+int msm_strobe_flash_init(struct msm_sync *sync, uint32_t sftype)
+{
+	int rc = 0;
+	switch (sftype) {
+	case MSM_CAMERA_STROBE_FLASH_XENON:
+		if (sync->sdata->strobe_flash_data) {
+			msm_strobe_flash_xenon_fn_init(&sync->sfctrl);
+			rc = sync->sfctrl.strobe_flash_init(
+			sync->sdata->strobe_flash_data);
+		} else
+			return -ENODEV;
+		break;
+	default:
+		rc = -ENODEV;
+	}
+	return rc;
+}
+
+int msm_strobe_flash_ctrl(struct msm_camera_sensor_strobe_flash_data *sfdata,
+	struct strobe_flash_ctrl_data *strobe_ctrl)
+{
+	int rc = 0;
+	switch (strobe_ctrl->type) {
+	case STROBE_FLASH_CTRL_INIT:
+		if (!sfdata)
+			return -ENODEV;
+		rc = msm_strobe_flash_xenon_init(sfdata);
+		break;
+	case STROBE_FLASH_CTRL_CHARGE:
+		rc = msm_strobe_flash_xenon_charge(sfdata->flash_charge,
+			strobe_ctrl->charge_en,
+			sfdata->flash_recharge_duration);
+		break;
+	case STROBE_FLASH_CTRL_RELEASE:
+		if (sfdata)
+			rc = msm_strobe_flash_xenon_release(sfdata, 0);
+		break;
+	default:
+		pr_err("Invalid Strobe Flash State\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+int msm_flash_ctrl(struct msm_camera_sensor_info *sdata,
+	struct flash_ctrl_data *flash_info)
+{
+	int rc = 0;
+	switch (flash_info->flashtype) {
+	case LED_FLASH:
+		rc = msm_camera_flash_set_led_state(sdata->flash_data,
+			flash_info->ctrl_data.led_state);
+			break;
+	case STROBE_FLASH:
+		rc = msm_strobe_flash_ctrl(sdata->strobe_flash_data,
+			&(flash_info->ctrl_data.strobe_ctrl));
+		break;
+	default:
+		pr_err("Invalid Flash MODE\n");
+		rc = -EINVAL;
+	}
+	return rc;
+}
diff --git a/drivers/media/video/msm/imx072.c b/drivers/media/video/msm/imx072.c
new file mode 100644
index 0000000..d9ee051
--- /dev/null
+++ b/drivers/media/video/msm/imx072.c
@@ -0,0 +1,1164 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "imx072.h"
+
+/* SENSOR REGISTER DEFINES */
+#define REG_GROUPED_PARAMETER_HOLD		0x0104
+#define GROUPED_PARAMETER_HOLD_OFF		0x00
+#define GROUPED_PARAMETER_HOLD			0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME		0x0202
+/* Gain */
+#define REG_GLOBAL_GAIN					0x0204
+
+/* PLL registers */
+#define REG_FRAME_LENGTH_LINES			0x0340
+#define REG_LINE_LENGTH_PCK				0x0342
+
+/* 16bit address - 8 bit context register structure */
+#define Q8  0x00000100
+#define Q10 0x00000400
+#define IMX072_MASTER_CLK_RATE 24000000
+#define IMX072_OFFSET		3
+
+/* AF Total steps parameters */
+#define IMX072_AF_I2C_ADDR	0x18
+#define IMX072_TOTAL_STEPS_NEAR_TO_FAR    30
+
+static uint16_t imx072_step_position_table[IMX072_TOTAL_STEPS_NEAR_TO_FAR+1];
+static uint16_t imx072_nl_region_boundary1;
+static uint16_t imx072_nl_region_code_per_step1;
+static uint16_t imx072_l_region_code_per_step = 12;
+static uint16_t imx072_sw_damping_time_wait = 8;
+static uint16_t imx072_af_initial_code = 350;
+static uint16_t imx072_damping_threshold = 10;
+
+struct imx072_work_t {
+	struct work_struct work;
+};
+
+static struct imx072_work_t *imx072_sensorw;
+static struct i2c_client *imx072_client;
+
+struct imx072_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	uint16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum imx072_resolution_t prev_res;
+	enum imx072_resolution_t pict_res;
+	enum imx072_resolution_t curr_res;
+	enum imx072_test_mode_t  set_test;
+	enum imx072_cam_mode_t cam_mode;
+};
+
+static uint16_t prev_line_length_pck;
+static uint16_t prev_frame_length_lines;
+static uint16_t snap_line_length_pck;
+static uint16_t snap_frame_length_lines;
+
+static bool CSI_CONFIG;
+static struct imx072_ctrl_t *imx072_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(imx072_wait_queue);
+DEFINE_MUTEX(imx072_mut);
+
+#ifdef CONFIG_DEBUG_FS
+static int cam_debug_init(void);
+static struct dentry *debugfs_base;
+#endif
+
+static int imx072_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = length,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = length,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(imx072_client->adapter, msgs, 2) < 0) {
+		pr_err("imx072_i2c_rxdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t imx072_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(imx072_client->adapter, msg, 1) < 0) {
+		pr_err("imx072_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t imx072_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = imx072_i2c_rxdata(imx072_client->addr>>1, buf, rlen);
+	if (rc < 0) {
+		pr_err("imx072_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("imx072_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+	return rc;
+}
+
+static int32_t imx072_i2c_write_w_sensor(unsigned short waddr,
+	uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
+	rc = imx072_i2c_txdata(imx072_client->addr>>1, buf, 4);
+	if (rc < 0) {
+		pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	}
+	return rc;
+}
+
+static int32_t imx072_i2c_write_b_sensor(unsigned short waddr,
+	uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = imx072_i2c_txdata(imx072_client->addr>>1, buf, 3);
+	if (rc < 0)
+		pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	return rc;
+}
+
+static int32_t imx072_i2c_write_b_af(uint8_t msb, uint8_t lsb)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[2];
+
+	buf[0] = msb;
+	buf[1] = lsb;
+	rc = imx072_i2c_txdata(IMX072_AF_I2C_ADDR>>1, buf, 2);
+	if (rc < 0)
+		pr_err("af_i2c_write faield msb = 0x%x lsb = 0x%x",
+			msb, lsb);
+	return rc;
+}
+
+static int32_t imx072_i2c_write_w_table(struct imx072_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = imx072_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static void imx072_group_hold_on(void)
+{
+	imx072_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD);
+}
+
+static void imx072_group_hold_off(void)
+{
+	imx072_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD_OFF);
+}
+
+static void imx072_start_stream(void)
+{
+	imx072_i2c_write_b_sensor(0x0100, 0x01);
+}
+
+static void imx072_stop_stream(void)
+{
+	imx072_i2c_write_b_sensor(0x0100, 0x00);
+}
+
+static void imx072_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider, d1, d2;
+
+	d1 = prev_frame_length_lines * 0x00000400 / snap_frame_length_lines;
+	d2 = prev_line_length_pck * 0x00000400 / snap_line_length_pck;
+	divider = d1 * d2 / 0x400;
+
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+}
+
+static uint16_t imx072_get_prev_lines_pf(void)
+{
+	return prev_frame_length_lines;
+}
+
+static uint16_t imx072_get_prev_pixels_pl(void)
+{
+	return prev_line_length_pck;
+}
+
+static uint16_t imx072_get_pict_lines_pf(void)
+{
+	return snap_frame_length_lines;
+}
+
+static uint16_t imx072_get_pict_pixels_pl(void)
+{
+	return snap_line_length_pck;
+}
+
+static uint32_t imx072_get_pict_max_exp_lc(void)
+{
+	return snap_frame_length_lines  * 24;
+}
+
+static int32_t imx072_set_fps(struct fps_cfg   *fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	total_lines_per_frame = (uint16_t)
+		((prev_frame_length_lines *
+		imx072_ctrl->fps_divider)/0x400);
+	imx072_ctrl->fps_divider = fps->fps_div;
+	imx072_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	imx072_group_hold_on();
+	rc = imx072_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES,
+							total_lines_per_frame);
+	imx072_group_hold_off();
+	return rc;
+}
+
+static int32_t imx072_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint32_t fl_lines = 0;
+	uint8_t offset;
+	int32_t rc = 0;
+	if (imx072_ctrl->curr_res == imx072_ctrl->prev_res)
+		fl_lines = prev_frame_length_lines;
+	else if (imx072_ctrl->curr_res == imx072_ctrl->pict_res)
+		fl_lines = snap_frame_length_lines;
+	line = (line * imx072_ctrl->fps_divider) / Q10;
+	offset = IMX072_OFFSET;
+	if (line > (fl_lines - offset))
+		fl_lines = line + offset;
+
+	imx072_group_hold_on();
+	rc = imx072_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES, fl_lines);
+	rc = imx072_i2c_write_w_sensor(REG_COARSE_INTEGRATION_TIME, line);
+	rc = imx072_i2c_write_w_sensor(REG_GLOBAL_GAIN, gain);
+	imx072_group_hold_off();
+	return rc;
+}
+
+static int32_t imx072_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = imx072_write_exp_gain(gain, line);
+	return rc;
+}
+
+static int32_t imx072_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	struct msm_camera_csi_params imx072_csi_params;
+
+	imx072_stop_stream();
+	msleep(30);
+	if (update_type == REG_INIT) {
+		msleep(20);
+		CSI_CONFIG = 0;
+		imx072_i2c_write_w_table(imx072_regs.rec_settings,
+			imx072_regs.rec_size);
+	} else if (update_type == UPDATE_PERIODIC) {
+#ifdef CONFIG_DEBUG_FS
+		cam_debug_init();
+#endif
+		msleep(20);
+		if (!CSI_CONFIG) {
+			imx072_csi_params.lane_cnt = 2;
+			imx072_csi_params.data_format = CSI_10BIT;
+			imx072_csi_params.lane_assign = 0xe4;
+			imx072_csi_params.dpcm_scheme = 0;
+			imx072_csi_params.settle_cnt = 0x18;
+			msm_camio_vfe_clk_rate_set(192000000);
+			rc = msm_camio_csi_config(&imx072_csi_params);
+			msleep(100);
+			CSI_CONFIG = 1;
+		}
+		imx072_i2c_write_w_table(
+			imx072_regs.conf_array[rt].conf,
+			imx072_regs.conf_array[rt].size);
+		imx072_start_stream();
+		msleep(30);
+	}
+	return rc;
+}
+
+static int32_t imx072_video_config(int mode)
+{
+
+	int32_t rc = 0;
+	/* change sensor resolution if needed */
+	if (imx072_sensor_setting(UPDATE_PERIODIC,
+		imx072_ctrl->prev_res) < 0)
+		return rc;
+
+	imx072_ctrl->curr_res = imx072_ctrl->prev_res;
+	imx072_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t imx072_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/*change sensor resolution if needed */
+	if (imx072_ctrl->curr_res != imx072_ctrl->pict_res) {
+		if (imx072_sensor_setting(UPDATE_PERIODIC,
+					imx072_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	imx072_ctrl->curr_res = imx072_ctrl->pict_res;
+	imx072_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t imx072_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/* change sensor resolution if needed */
+	if (imx072_ctrl->curr_res != imx072_ctrl->pict_res) {
+		if (imx072_sensor_setting(UPDATE_PERIODIC,
+					imx072_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	imx072_ctrl->curr_res = imx072_ctrl->pict_res;
+	imx072_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t imx072_mode_init(int mode, struct sensor_init_cfg init_info)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	if (mode != imx072_ctrl->cam_mode) {
+		imx072_ctrl->prev_res = init_info.prev_res;
+		imx072_ctrl->pict_res = init_info.pict_res;
+		imx072_ctrl->cam_mode = mode;
+
+		prev_frame_length_lines =
+			imx072_regs.conf_array[imx072_ctrl->prev_res].
+			conf[IMX072_FRAME_LENGTH_LINES_HI].wdata << 8 |
+			imx072_regs.conf_array[imx072_ctrl->prev_res].
+			conf[IMX072_FRAME_LENGTH_LINES_LO].wdata;
+		prev_line_length_pck =
+			imx072_regs.conf_array[imx072_ctrl->prev_res].
+			conf[IMX072_LINE_LENGTH_PCK_HI].wdata << 8 |
+			imx072_regs.conf_array[imx072_ctrl->prev_res].
+			conf[IMX072_LINE_LENGTH_PCK_LO].wdata;
+		snap_frame_length_lines =
+			imx072_regs.conf_array[imx072_ctrl->pict_res].
+			conf[IMX072_FRAME_LENGTH_LINES_HI].wdata << 8 |
+			imx072_regs.conf_array[imx072_ctrl->pict_res].
+			conf[IMX072_FRAME_LENGTH_LINES_LO].wdata;
+		snap_line_length_pck =
+			imx072_regs.conf_array[imx072_ctrl->pict_res].
+			conf[IMX072_LINE_LENGTH_PCK_HI].wdata << 8 |
+			imx072_regs.conf_array[imx072_ctrl->pict_res].
+			conf[IMX072_LINE_LENGTH_PCK_LO].wdata;
+
+		rc = imx072_sensor_setting(REG_INIT,
+			imx072_ctrl->prev_res);
+	}
+	return rc;
+}
+
+static int32_t imx072_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		imx072_ctrl->prev_res = res;
+		rc = imx072_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		imx072_ctrl->pict_res = res;
+		rc = imx072_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		imx072_ctrl->pict_res = res;
+		rc = imx072_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+#define DIV_CEIL(x, y) ((x/y + ((x%y) ? 1 : 0)))
+static int32_t imx072_move_focus(int direction,
+	int32_t num_steps)
+{
+	int32_t rc = 0;
+	int16_t step_direction, dest_lens_position, dest_step_position;
+	uint8_t code_val_msb, code_val_lsb;
+	int16_t next_lens_position, target_dist, small_step;
+
+	if (direction == MOVE_NEAR)
+		step_direction = 1;
+	else if (direction == MOVE_FAR)
+		step_direction = -1;
+	else {
+		pr_err("Illegal focus direction\n");
+		return -EINVAL;
+	}
+	dest_step_position = imx072_ctrl->curr_step_pos +
+			(step_direction * num_steps);
+
+	if (dest_step_position < 0)
+		dest_step_position = 0;
+	else if (dest_step_position > IMX072_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
+
+	if (dest_step_position == imx072_ctrl->curr_step_pos) {
+		CDBG("imx072 same position No-Move exit\n");
+		return rc;
+	}
+	CDBG("%s Index = [%d]\n", __func__, dest_step_position);
+
+	dest_lens_position = imx072_step_position_table[dest_step_position];
+	CDBG("%s lens_position value = %d\n", __func__, dest_lens_position);
+	target_dist = step_direction * (dest_lens_position -
+		imx072_ctrl->curr_lens_pos);
+	if (step_direction < 0 && (target_dist >=
+		(imx072_step_position_table[imx072_damping_threshold]
+			- imx072_af_initial_code))) {
+		small_step = DIV_CEIL(target_dist, 10);
+		imx072_sw_damping_time_wait = 30;
+	} else {
+		small_step = DIV_CEIL(target_dist, 4);
+		imx072_sw_damping_time_wait = 20;
+	}
+
+	CDBG("%s: small_step:%d, wait_time:%d\n", __func__, small_step,
+		imx072_sw_damping_time_wait);
+	for (next_lens_position = imx072_ctrl->curr_lens_pos +
+		(step_direction * small_step);
+		(step_direction * next_lens_position) <=
+		(step_direction * dest_lens_position);
+		next_lens_position += (step_direction * small_step)) {
+
+		code_val_msb = ((next_lens_position & 0x03F0) >> 4);
+		code_val_lsb = ((next_lens_position & 0x000F) << 4);
+		CDBG("position value = %d\n", next_lens_position);
+		CDBG("movefocus vcm_msb = %d\n", code_val_msb);
+		CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
+		rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
+		if (rc < 0) {
+			pr_err("imx072_move_focus failed writing i2c\n");
+			return rc;
+			}
+		imx072_ctrl->curr_lens_pos = next_lens_position;
+		usleep(imx072_sw_damping_time_wait*100);
+	}
+	if (imx072_ctrl->curr_lens_pos != dest_lens_position) {
+		code_val_msb = ((dest_lens_position & 0x03F0) >> 4);
+		code_val_lsb = ((dest_lens_position & 0x000F) << 4);
+		CDBG("position value = %d\n", dest_lens_position);
+		CDBG("movefocus vcm_msb = %d\n", code_val_msb);
+		CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
+		rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
+		if (rc < 0) {
+			pr_err("imx072_move_focus failed writing i2c\n");
+			return rc;
+			}
+		usleep(imx072_sw_damping_time_wait * 100);
+	}
+	imx072_ctrl->curr_lens_pos = dest_lens_position;
+	imx072_ctrl->curr_step_pos = dest_step_position;
+	return rc;
+
+}
+
+static int32_t imx072_init_focus(void)
+{
+	uint8_t i;
+	int32_t rc = 0;
+
+	imx072_step_position_table[0] = imx072_af_initial_code;
+	for (i = 1; i <= IMX072_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+		if (i <= imx072_nl_region_boundary1)
+			imx072_step_position_table[i] =
+				imx072_step_position_table[i-1]
+				+ imx072_nl_region_code_per_step1;
+		else
+			imx072_step_position_table[i] =
+				imx072_step_position_table[i-1]
+				+ imx072_l_region_code_per_step;
+
+		if (imx072_step_position_table[i] > 1023)
+			imx072_step_position_table[i] = 1023;
+	}
+	imx072_ctrl->curr_lens_pos = 0;
+
+	return rc;
+}
+
+static int32_t imx072_set_default_focus(void)
+{
+	int32_t rc = 0;
+	uint8_t code_val_msb, code_val_lsb;
+	int16_t dest_lens_position = 0;
+
+	CDBG("%s Index = [%d]\n", __func__, 0);
+	if (imx072_ctrl->curr_step_pos != 0)
+		rc = imx072_move_focus(MOVE_FAR,
+		imx072_ctrl->curr_step_pos);
+	else {
+		dest_lens_position = imx072_af_initial_code;
+		code_val_msb = ((dest_lens_position & 0x03F0) >> 4);
+		code_val_lsb = ((dest_lens_position & 0x000F) << 4);
+
+		CDBG("position value = %d\n", dest_lens_position);
+		CDBG("movefocus vcm_msb = %d\n", code_val_msb);
+		CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
+		rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
+		if (rc < 0) {
+			pr_err("imx072_set_default_focus failed writing i2c\n");
+			return rc;
+		}
+
+		imx072_ctrl->curr_lens_pos = dest_lens_position;
+		imx072_ctrl->curr_step_pos = 0;
+
+	}
+	usleep(5000);
+	return rc;
+}
+
+static int32_t imx072_af_power_down(void)
+{
+	int32_t rc = 0;
+	int32_t i = 0;
+	int16_t dest_lens_position = imx072_af_initial_code;
+
+	if (imx072_ctrl->curr_lens_pos != 0) {
+		rc = imx072_set_default_focus();
+		CDBG("%s after imx072_set_default_focus\n", __func__);
+		msleep(40);
+		/*to avoid the sound during the power off.
+		brings the actuator to mechanical infinity gradually.*/
+		for (i = 0; i < IMX072_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+			dest_lens_position = dest_lens_position -
+				(imx072_af_initial_code /
+					IMX072_TOTAL_STEPS_NEAR_TO_FAR);
+			CDBG("position value = %d\n", dest_lens_position);
+			rc = imx072_i2c_write_b_af(
+				((dest_lens_position & 0x03F0) >> 4),
+				((dest_lens_position & 0x000F) << 4));
+			CDBG("count = %d\n", i);
+			msleep(20);
+			if (rc < 0) {
+				pr_err("imx072_set_default_focus failed writing i2c\n");
+				return rc;
+			}
+		}
+		rc = imx072_i2c_write_b_af(0x00, 00);
+		msleep(40);
+	}
+	rc = imx072_i2c_write_b_af(0x80, 00);
+	return rc;
+}
+
+static int32_t imx072_power_down(void)
+{
+	int32_t rc = 0;
+
+	rc = imx072_af_power_down();
+	return rc;
+}
+
+static int imx072_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	pr_err("probe done\n");
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int imx072_probe_init_sensor(
+	const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	uint16_t chipid = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "imx072");
+	CDBG(" imx072_probe_init_sensor\n");
+	if (!rc) {
+		pr_err("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(50);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		msleep(20);
+	} else {
+		goto init_probe_done;
+	}
+
+	CDBG(" imx072_probe_init_sensor is called\n");
+	rc = imx072_i2c_read(0x0, &chipid, 2);
+	CDBG("ID: %d\n", chipid);
+	/* 4. Compare sensor ID to IMX072 ID: */
+	if (chipid != 0x0045) {
+		rc = -ENODEV;
+		pr_err("imx072_probe_init_sensor chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	pr_err(" imx072_probe_init_sensor fails\n");
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	imx072_probe_init_done(data);
+init_probe_done:
+	pr_err(" imx072_probe_init_sensor finishes\n");
+	return rc;
+}
+
+int imx072_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	imx072_ctrl = kzalloc(sizeof(struct imx072_ctrl_t), GFP_KERNEL);
+	if (!imx072_ctrl) {
+		pr_err("imx072_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	imx072_ctrl->fps_divider = 1 * 0x00000400;
+	imx072_ctrl->pict_fps_divider = 1 * 0x00000400;
+	imx072_ctrl->set_test = TEST_OFF;
+	imx072_ctrl->cam_mode = MODE_INVALID;
+
+	if (data)
+		imx072_ctrl->sensordata = data;
+	if (rc < 0) {
+		pr_err("Calling imx072_sensor_open_init fail1\n");
+		return rc;
+	}
+	CDBG("%s: %d\n", __func__, __LINE__);
+	/* enable mclk first */
+	msm_camio_clk_rate_set(IMX072_MASTER_CLK_RATE);
+	rc = imx072_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+
+	imx072_init_focus();
+	imx072_ctrl->fps = 30*Q8;
+	if (rc < 0) {
+		gpio_set_value_cansleep(data->sensor_reset, 0);
+		goto init_fail;
+	} else
+		goto init_done;
+init_fail:
+	pr_err("init_fail\n");
+	imx072_probe_init_done(data);
+init_done:
+	pr_err("init_done\n");
+	return rc;
+}
+
+static int imx072_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&imx072_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id imx072_i2c_id[] = {
+	{"imx072", 0},
+	{ }
+};
+
+static int imx072_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("imx072_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		pr_err("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	imx072_sensorw = kzalloc(sizeof(struct imx072_work_t),
+			GFP_KERNEL);
+	if (!imx072_sensorw) {
+		pr_err("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, imx072_sensorw);
+	imx072_init_client(client);
+	imx072_client = client;
+
+	msleep(50);
+
+	CDBG("imx072_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	pr_err("imx072_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int imx072_send_wb_info(struct wb_info_cfg *wb)
+{
+	return 0;
+
+}
+
+static int __exit imx072_remove(struct i2c_client *client)
+{
+	struct imx072_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	imx072_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver imx072_i2c_driver = {
+	.id_table = imx072_i2c_id,
+	.probe  = imx072_i2c_probe,
+	.remove = __exit_p(imx072_i2c_remove),
+	.driver = {
+		.name = "imx072",
+	},
+};
+
+int imx072_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&imx072_mut);
+	CDBG("imx072_sensor_config: cfgtype = %d\n",
+		 cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		imx072_get_pict_fps(
+			cdata.cfg.gfps.prevfps,
+			&(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf =
+		imx072_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl =
+			imx072_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf =
+			imx072_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			imx072_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			imx072_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = imx072_set_fps(&(cdata.cfg.fps));
+		break;
+	case CFG_SET_EXP_GAIN:
+		rc = imx072_write_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc = imx072_set_pict_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_MODE:
+		rc = imx072_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+	case CFG_PWR_DOWN:
+		rc = imx072_power_down();
+		break;
+	case CFG_MOVE_FOCUS:
+		rc = imx072_move_focus(cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+		break;
+	case CFG_SET_DEFAULT_FOCUS:
+		imx072_set_default_focus();
+		break;
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_SET_EFFECT:
+		break;
+	case CFG_SEND_WB_INFO:
+		rc = imx072_send_wb_info(
+			&(cdata.cfg.wb_info));
+	break;
+	case CFG_SENSOR_INIT:
+		rc = imx072_mode_init(cdata.mode,
+				cdata.cfg.init_info);
+	break;
+	case CFG_SET_LENS_SHADING:
+		break;
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&imx072_mut);
+
+	return rc;
+}
+
+static int imx072_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&imx072_mut);
+	imx072_power_down();
+	gpio_set_value_cansleep(imx072_ctrl->sensordata->sensor_reset, 0);
+	msleep(20);
+	gpio_free(imx072_ctrl->sensordata->sensor_reset);
+	kfree(imx072_ctrl);
+	imx072_ctrl = NULL;
+	pr_err("imx072_release completed\n");
+	mutex_unlock(&imx072_mut);
+
+	return rc;
+}
+
+static int imx072_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&imx072_i2c_driver);
+	if (rc < 0 || imx072_client == NULL) {
+		rc = -ENOTSUPP;
+		pr_err("I2C add driver failed");
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(IMX072_MASTER_CLK_RATE);
+	rc = imx072_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = imx072_sensor_open_init;
+	s->s_release = imx072_sensor_release;
+	s->s_config  = imx072_sensor_config;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	imx072_probe_init_done(info);
+	pr_info("imx072_sensor_probe : SUCCESS\n");
+	return rc;
+
+probe_fail:
+	pr_err("imx072_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __imx072_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, imx072_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __imx072_probe,
+	.driver = {
+		.name = "msm_camera_imx072",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init imx072_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(imx072_init);
+void imx072_exit(void)
+{
+	i2c_del_driver(&imx072_i2c_driver);
+}
+MODULE_DESCRIPTION("Aptina 8 MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
+#ifdef CONFIG_DEBUG_FS
+static bool streaming = 1;
+
+static int cam_debug_stream_set(void *data, u64 val)
+{
+	int rc = 0;
+
+	if (val) {
+		imx072_start_stream();
+		streaming = 1;
+	} else {
+		imx072_stop_stream();
+		streaming = 0;
+	}
+
+	return rc;
+}
+
+static int cam_debug_stream_get(void *data, u64 *val)
+{
+	*val = streaming;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cam_stream, cam_debug_stream_get,
+			cam_debug_stream_set, "%llu\n");
+
+
+
+static int imx072_set_af_codestep(void *data, u64 val)
+{
+	imx072_l_region_code_per_step = val;
+	imx072_init_focus();
+	return 0;
+}
+
+static int imx072_get_af_codestep(void *data, u64 *val)
+{
+	*val = imx072_l_region_code_per_step;
+	return 0;
+}
+
+static uint16_t imx072_linear_total_step = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
+static int imx072_set_linear_total_step(void *data, u64 val)
+{
+	imx072_linear_total_step = val;
+	return 0;
+}
+
+static int imx072_af_linearity_test(void *data, u64 *val)
+{
+	int i = 0;
+
+	imx072_set_default_focus();
+	msleep(3000);
+	for (i = 0; i < imx072_linear_total_step; i++) {
+		imx072_move_focus(MOVE_NEAR, 1);
+		CDBG("moved to index =[%d]\n", i);
+		msleep(1000);
+	}
+
+	for (i = 0; i < imx072_linear_total_step; i++) {
+		imx072_move_focus(MOVE_FAR, 1);
+		CDBG("moved to index =[%d]\n", i);
+		msleep(1000);
+	}
+	return 0;
+}
+
+static uint16_t imx072_step_val = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
+static uint8_t imx072_step_dir = MOVE_NEAR;
+static int imx072_af_step_config(void *data, u64 val)
+{
+	imx072_step_val = val & 0xFFFF;
+	imx072_step_dir = (val >> 16) & 0x1;
+	return 0;
+}
+
+static int imx072_af_step(void *data, u64 *val)
+{
+	int i = 0;
+	int dir = MOVE_NEAR;
+	imx072_set_default_focus();
+	msleep(3000);
+	if (imx072_step_dir == 1)
+		dir = MOVE_FAR;
+
+	for (i = 0; i < imx072_step_val; i += 4) {
+		imx072_move_focus(dir, 4);
+		msleep(1000);
+	}
+	imx072_set_default_focus();
+	msleep(3000);
+	return 0;
+}
+
+static int imx072_af_set_resolution(void *data, u64 val)
+{
+	imx072_init_focus();
+	return 0;
+}
+
+static int imx072_af_get_resolution(void *data, u64 *val)
+{
+	*val = 0xFF;
+	return 0;
+}
+
+
+
+DEFINE_SIMPLE_ATTRIBUTE(af_codeperstep, imx072_get_af_codestep,
+			imx072_set_af_codestep, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(af_linear, imx072_af_linearity_test,
+			imx072_set_linear_total_step, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(af_step, imx072_af_step,
+			imx072_af_step_config, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(af_step_res, imx072_af_get_resolution,
+			imx072_af_set_resolution, "%llu\n");
+
+static int cam_debug_init(void)
+{
+	struct dentry *cam_dir;
+	debugfs_base = debugfs_create_dir("sensor", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	cam_dir = debugfs_create_dir("imx072", debugfs_base);
+	if (!cam_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_stream))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("af_codeperstep", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &af_codeperstep))
+		return -ENOMEM;
+	if (!debugfs_create_file("af_linear", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &af_linear))
+		return -ENOMEM;
+	if (!debugfs_create_file("af_step", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &af_step))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("af_step_res", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &af_step_res))
+		return -ENOMEM;
+
+	return 0;
+}
+#endif
diff --git a/drivers/media/video/msm/imx072.h b/drivers/media/video/msm/imx072.h
new file mode 100644
index 0000000..e3d279f
--- /dev/null
+++ b/drivers/media/video/msm/imx072.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef IMX072_H
+#define IMX072_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct imx072_reg imx072_regs;
+
+struct imx072_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+struct imx072_i2c_conf_array {
+	struct imx072_i2c_reg_conf *conf;
+	unsigned short size;
+};
+
+enum imx072_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum imx072_resolution_t {
+	QTR_2D_SIZE,
+	FULL_2D_SIZE,
+	QTR_3D_SIZE,
+	FULL_3D_SIZE,
+	INVALID_SIZE
+};
+enum imx072_setting {
+	RES_PREVIEW,
+	RES_CAPTURE,
+	RES_3D_PREVIEW,
+	RES_3D_CAPTURE
+};
+enum imx072_cam_mode_t {
+	MODE_2D_RIGHT,
+	MODE_2D_LEFT,
+	MODE_3D,
+	MODE_INVALID
+};
+enum imx072_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum imx072_reg_mode {
+	IMX072_FRAME_LENGTH_LINES_HI = 0,
+	IMX072_FRAME_LENGTH_LINES_LO,
+	IMX072_LINE_LENGTH_PCK_HI,
+	IMX072_LINE_LENGTH_PCK_LO,
+};
+
+struct imx072_reg {
+	const struct imx072_i2c_reg_conf *rec_settings;
+	const unsigned short rec_size;
+	const struct imx072_i2c_conf_array *conf_array;
+};
+#endif /* IMX072_H */
diff --git a/drivers/media/video/msm/imx072_reg.c b/drivers/media/video/msm/imx072_reg.c
new file mode 100644
index 0000000..ea75548
--- /dev/null
+++ b/drivers/media/video/msm/imx072_reg.c
@@ -0,0 +1,153 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "imx072.h"
+
+struct imx072_i2c_reg_conf imx072_prev_settings[] = {
+	{0x0340, 0x03},/*frame_length*/
+	{0x0341, 0xF7},/*frame_length*/
+	{0x0342, 0x0A},/*line_length*/
+	{0x0343, 0xE0},/*line_length*/
+	{0x0344, 0x00},/*x_addr_start*/
+	{0x0345, 0x00},/*x_addr_start*/
+	{0x0346, 0x00},/*y_addr_start*/
+	{0x0347, 0x00},/*y_addr_start*/
+	{0x0348, 0x0A},/*x_addr_end*/
+	{0x0349, 0x2F},/*x_addr_end*/
+	{0x034A, 0x07},/*y_addr_end*/
+	{0x034B, 0xA7},/*y_addr_end*/
+	{0x034C, 0x05},/*x_out_size*/
+	{0x034D, 0x18},/*x_out_size*/
+	{0x034E, 0x03},/*y_out_size*/
+	{0x034F, 0xD4},/*y_out_size*/
+	{0x0381, 0x01},/*x_even_inc*/
+	{0x0383, 0x03},/*x_odd_inc*/
+	{0x0385, 0x01},/*y_even_inc*/
+	{0x0387, 0x03},/*y_odd_inc*/
+	{0x3016, 0x06},/*VMODEADD*/
+	{0x3017, 0x40},
+	{0x3069, 0x24},
+	{0x306A, 0x00},
+	{0x306B, 0xCB},
+	{0x306C, 0x07},
+	{0x30E8, 0x86},
+	{0x3304, 0x03},
+	{0x3305, 0x02},
+	{0x3306, 0x0A},
+	{0x3307, 0x02},
+	{0x3308, 0x11},
+	{0x3309, 0x04},
+	{0x330A, 0x05},
+	{0x330B, 0x04},
+	{0x330C, 0x05},
+	{0x330D, 0x04},
+	{0x330E, 0x01},
+	{0x3301, 0x80},
+};
+
+struct imx072_i2c_reg_conf imx072_snap_settings[] = {
+	{0x0340, 0x07},/*frame_length*/
+	{0x0341, 0xEE},/*frame_length*/
+	{0x0342, 0x0A},/*line_length*/
+	{0x0343, 0xE0},/*line_length*/
+	{0x0344, 0x00},/*x_addr_start*/
+	{0x0345, 0x00},/*x_addr_start*/
+	{0x0346, 0x00},/*y_addr_start*/
+	{0x0347, 0x00},/*y_addr_start*/
+	{0x0348, 0x0A},/*x_addr_end*/
+	{0x0349, 0x2F},/*x_addr_end*/
+	{0x034A, 0x07},/*y_addr_end*/
+	{0x034B, 0xA7},/*y_addr_end*/
+	{0x034C, 0x0A},/*x_out_size*/
+	{0x034D, 0x30},/*x_out_size*/
+	{0x034E, 0x07},/*y_out_size*/
+	{0x034F, 0xA8},/*y_out_size*/
+	{0x0381, 0x01},/*x_even_inc*/
+	{0x0383, 0x01},/*x_odd_inc*/
+	{0x0385, 0x01},/*y_even_inc*/
+	{0x0387, 0x01},/*y_odd_inc*/
+	{0x3016, 0x06},/*VMODEADD*/
+	{0x3017, 0x40},
+	{0x3069, 0x24},
+	{0x306A, 0x00},
+	{0x306B, 0xCB},
+	{0x306C, 0x07},
+	{0x30E8, 0x06},
+	{0x3304, 0x05},
+	{0x3305, 0x04},
+	{0x3306, 0x15},
+	{0x3307, 0x02},
+	{0x3308, 0x11},
+	{0x3309, 0x07},
+	{0x330A, 0x05},
+	{0x330B, 0x04},
+	{0x330C, 0x05},
+	{0x330D, 0x04},
+	{0x330E, 0x01},
+	{0x3301, 0x00},
+};
+
+struct imx072_i2c_reg_conf imx072_recommend_settings[] = {
+	{0x0307, 0x12},
+	{0x302B, 0x4B},
+	{0x0101, 0x03},
+	{0x300A, 0x80},
+	{0x3014, 0x08},
+	{0x3015, 0x37},
+	{0x3017, 0x40},
+	{0x301C, 0x01},
+	{0x3031, 0x28},
+	{0x3040, 0x00},
+	{0x3041, 0x60},
+	{0x3051, 0x24},
+	{0x3053, 0x34},
+	{0x3055, 0x3B},
+	{0x3057, 0xC0},
+	{0x3060, 0x30},
+	{0x3065, 0x00},
+	{0x30AA, 0x88},
+	{0x30AB, 0x1C},
+	{0x30B0, 0x32},
+	{0x30B2, 0x83},
+	{0x30D3, 0x04},
+	{0x310E, 0xDD},
+	{0x31A4, 0xD8},
+	{0x31A6, 0x17},
+	{0x31AC, 0xCF},
+	{0x31AE, 0xF1},
+	{0x31B4, 0xD8},
+	{0x31B6, 0x17},
+	{0x3304, 0x05},
+	{0x3305, 0x04},
+	{0x3306, 0x15},
+	{0x3307, 0x02},
+	{0x3308, 0x11},
+	{0x3309, 0x07},
+	{0x330A, 0x05},
+	{0x330B, 0x04},
+	{0x330C, 0x05},
+	{0x330D, 0x04},
+	{0x330E, 0x01},
+	{0x30d8, 0x20},
+};
+
+struct imx072_i2c_conf_array imx072_confs[] = {
+	{&imx072_prev_settings[0], ARRAY_SIZE(imx072_prev_settings)},
+	{&imx072_snap_settings[0], ARRAY_SIZE(imx072_snap_settings)},
+};
+
+struct imx072_reg imx072_regs = {
+	.rec_settings = &imx072_recommend_settings[0],
+	.rec_size = ARRAY_SIZE(imx072_recommend_settings),
+	.conf_array = &imx072_confs[0],
+};
diff --git a/drivers/media/video/msm/imx074.c b/drivers/media/video/msm/imx074.c
new file mode 100644
index 0000000..636b402
--- /dev/null
+++ b/drivers/media/video/msm/imx074.c
@@ -0,0 +1,1414 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <asm/mach-types.h>
+#include "imx074.h"
+
+/*SENSOR REGISTER DEFINES*/
+#define	IMX074_EEPROM_SLAVE_ADDR			0x52
+#define REG_GROUPED_PARAMETER_HOLD			0x0104
+#define GROUPED_PARAMETER_HOLD_OFF			0x00
+#define GROUPED_PARAMETER_HOLD				0x01
+#define REG_MODE_SELECT					0x100
+#define MODE_SELECT_STANDBY_MODE			0x00
+#define MODE_SELECT_STREAM				0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME_HI			0x0202
+#define REG_COARSE_INTEGRATION_TIME_LO			0x0203
+/* Gain */
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_HI		0x0204
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LO		0x0205
+/* PLL registers */
+#define REG_PLL_MULTIPLIER				0x0307
+#define REG_PRE_PLL_CLK_DIV				0x0305
+#define REG_PLSTATIM					0x302b
+#define REG_3024					0x3024
+#define REG_IMAGE_ORIENTATION				0x0101
+#define REG_VNDMY_ABLMGSHLMT				0x300a
+#define REG_Y_OPBADDR_START_DI				0x3014
+#define REG_3015					0x3015
+#define REG_301C					0x301C
+#define REG_302C					0x302C
+#define REG_3031					0x3031
+#define REG_3041					0x3041
+#define REG_3051					0x3051
+#define REG_3053					0x3053
+#define REG_3057					0x3057
+#define REG_305C					0x305C
+#define REG_305D					0x305D
+#define REG_3060					0x3060
+#define REG_3065					0x3065
+#define REG_30AA					0x30AA
+#define REG_30AB					0x30AB
+#define REG_30B0					0x30B0
+#define REG_30B2					0x30B2
+#define REG_30D3					0x30D3
+#define REG_3106					0x3106
+#define REG_310C					0x310C
+#define REG_3304					0x3304
+#define REG_3305					0x3305
+#define REG_3306					0x3306
+#define REG_3307					0x3307
+#define REG_3308					0x3308
+#define REG_3309					0x3309
+#define REG_330A					0x330A
+#define REG_330B					0x330B
+#define REG_330C					0x330C
+#define REG_330D					0x330D
+#define REG_330F					0x330F
+#define REG_3381					0x3381
+
+/* mode setting */
+#define REG_FRAME_LENGTH_LINES_HI			0x0340
+#define REG_FRAME_LENGTH_LINES_LO			0x0341
+#define REG_YADDR_START					0x0347
+#define REG_YAAAR_END					0x034b
+#define REG_X_OUTPUT_SIZE_MSB				0x034c
+#define REG_X_OUTPUT_SIZE_LSB				0x034d
+#define REG_Y_OUTPUT_SIZE_MSB				0x034e
+#define REG_Y_OUTPUT_SIZE_LSB				0x034f
+#define REG_X_EVEN_INC					0x0381
+#define REG_X_ODD_INC					0x0383
+#define REG_Y_EVEN_INC					0x0385
+#define REG_Y_ODD_INC					0x0387
+#define REG_HMODEADD					0x3001
+#define REG_VMODEADD					0x3016
+#define REG_VAPPLINE_START				0x3069
+#define REG_VAPPLINE_END				0x306b
+#define REG_SHUTTER					0x3086
+#define REG_HADDAVE					0x30e8
+#define REG_LANESEL					0x3301
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE				0x0601
+
+#define REG_LINE_LENGTH_PCK_HI				0x0342
+#define REG_LINE_LENGTH_PCK_LO				0x0343
+/*..... TYPE DECLARATIONS.....*/
+#define	IMX074_OFFSET					3
+#define	IMX074_DEFAULT_MASTER_CLK_RATE			24000000
+/* Full	Size */
+#define	IMX074_FULL_SIZE_WIDTH				4208
+#define	IMX074_FULL_SIZE_HEIGHT				3120
+#define	IMX074_FULL_SIZE_DUMMY_PIXELS			0
+#define	IMX074_FULL_SIZE_DUMMY_LINES			0
+/* Quarter Size	*/
+#define	IMX074_QTR_SIZE_WIDTH				2104
+#define	IMX074_QTR_SIZE_HEIGHT				1560
+#define	IMX074_QTR_SIZE_DUMMY_PIXELS			0
+#define	IMX074_QTR_SIZE_DUMMY_LINES			0
+/* Blanking as measured	on the scope */
+/* Full	Size */
+#define	IMX074_HRZ_FULL_BLK_PIXELS			264
+#define	IMX074_VER_FULL_BLK_LINES			96
+/* Quarter Size	*/
+#define	IMX074_HRZ_QTR_BLK_PIXELS			2368
+#define	IMX074_VER_QTR_BLK_LINES			21
+#define	Q8						0x100
+#define	Q10						0x400
+#define	IMX074_AF_I2C_SLAVE_ID				0x72
+#define	IMX074_STEPS_NEAR_TO_CLOSEST_INF		52
+#define	IMX074_TOTAL_STEPS_NEAR_TO_FAR			52
+static uint32_t imx074_l_region_code_per_step = 2;
+
+struct imx074_work_t {
+	struct work_struct work;
+};
+
+static struct imx074_work_t *imx074_sensorw;
+static struct i2c_client *imx074_client;
+
+struct imx074_ctrl_t {
+	const struct msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+	enum imx074_resolution_t prev_res;
+	enum imx074_resolution_t pict_res;
+	enum imx074_resolution_t curr_res;
+	enum imx074_test_mode_t set_test;
+	unsigned short imgaddr;
+};
+static uint8_t imx074_delay_msecs_stdby = 5;
+static uint16_t imx074_delay_msecs_stream = 5;
+static int32_t config_csi;
+
+static struct imx074_ctrl_t *imx074_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(imx074_wait_queue);
+DEFINE_MUTEX(imx074_mut);
+
+/*=============================================================*/
+
+static int imx074_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(imx074_client->adapter, msgs, 2) < 0) {
+		CDBG("imx074_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+static int32_t imx074_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(imx074_client->adapter, msg, 1) < 0) {
+		CDBG("imx074_i2c_txdata faild 0x%x\n", imx074_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+static int32_t imx074_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = imx074_i2c_rxdata(imx074_client->addr, buf, rlen);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	return rc;
+}
+
+static int imx074_af_i2c_rxdata_b(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+		.addr  = saddr,
+		.flags = 0,
+		.len   = 1,
+		.buf   = rxdata,
+		},
+		{
+		.addr  = saddr,
+		.flags = I2C_M_RD,
+		.len   = 1,
+		.buf   = rxdata,
+		},
+	};
+
+	if (i2c_transfer(imx074_client->adapter, msgs, 2) < 0) {
+		CDBG("imx074_i2c_rxdata_b failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t imx074_i2c_read_w_eeprom(unsigned short raddr,
+	unsigned short *rdata)
+{
+	int32_t rc;
+	unsigned char buf;
+	if (!rdata)
+		return -EIO;
+	/* Read 2 bytes in sequence */
+	buf = (raddr & 0x00FF);
+	rc = imx074_af_i2c_rxdata_b(IMX074_EEPROM_SLAVE_ADDR, &buf, 1);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read_eeprom 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = buf<<8;
+
+	/* Read Second byte of data */
+	buf = (raddr & 0x00FF) + 1;
+	rc = imx074_af_i2c_rxdata_b(IMX074_EEPROM_SLAVE_ADDR, &buf, 1);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read_eeprom 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata |= buf;
+	return rc;
+}
+
+static int32_t imx074_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = imx074_i2c_txdata(imx074_client->addr, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+static int16_t imx074_i2c_write_b_af(unsigned short saddr,
+	unsigned short baddr, unsigned short bdata)
+{
+	int32_t rc;
+	unsigned char buf[2];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = imx074_i2c_txdata(saddr, buf, 2);
+	if (rc < 0)
+		CDBG("AFi2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!",
+			saddr, baddr, bdata);
+	return rc;
+}
+
+static int32_t imx074_i2c_write_w_table(struct imx074_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = imx074_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+static int16_t imx074_af_init(void)
+{
+	int32_t rc;
+	/* Initialize waveform */
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x01, 0xA9);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x02, 0xD2);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x03, 0x0C);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x04, 0x14);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x05, 0xB6);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x06, 0x4F);
+	return rc;
+}
+
+static void imx074_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint16_t preview_frame_length_lines, snapshot_frame_length_lines;
+	uint32_t divider, d1;
+	uint32_t pclk_mult;/*Q10 */
+	/* Total frame_length_lines and line_length_pck for preview */
+	preview_frame_length_lines = IMX074_QTR_SIZE_HEIGHT +
+		IMX074_VER_QTR_BLK_LINES;
+	/* Total frame_length_lines and line_length_pck for snapshot */
+	snapshot_frame_length_lines = IMX074_FULL_SIZE_HEIGHT +
+		IMX074_VER_FULL_BLK_LINES;
+	d1 = preview_frame_length_lines * 0x00010000 /
+		snapshot_frame_length_lines;
+	pclk_mult =
+		(uint32_t) ((imx074_regs.reg_pat[RES_CAPTURE].pll_multiplier *
+		0x00010000) /
+		(imx074_regs.reg_pat[RES_PREVIEW].pll_multiplier));
+	divider = d1 * pclk_mult / 0x00010000;
+	*pfps = (uint16_t) (fps * divider / 0x00010000);
+}
+
+static uint16_t imx074_get_prev_lines_pf(void)
+{
+	if (imx074_ctrl->prev_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_HEIGHT + IMX074_VER_QTR_BLK_LINES;
+	else
+		return IMX074_FULL_SIZE_HEIGHT + IMX074_VER_FULL_BLK_LINES;
+
+}
+
+static uint16_t imx074_get_prev_pixels_pl(void)
+{
+	if (imx074_ctrl->prev_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_WIDTH + IMX074_HRZ_QTR_BLK_PIXELS;
+	else
+		return IMX074_FULL_SIZE_WIDTH + IMX074_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t imx074_get_pict_lines_pf(void)
+{
+		if (imx074_ctrl->pict_res == QTR_SIZE)
+			return IMX074_QTR_SIZE_HEIGHT +
+				IMX074_VER_QTR_BLK_LINES;
+		else
+			return IMX074_FULL_SIZE_HEIGHT +
+				IMX074_VER_FULL_BLK_LINES;
+}
+
+static uint16_t imx074_get_pict_pixels_pl(void)
+{
+	if (imx074_ctrl->pict_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_WIDTH +
+			IMX074_HRZ_QTR_BLK_PIXELS;
+	else
+		return IMX074_FULL_SIZE_WIDTH +
+			IMX074_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t imx074_get_pict_max_exp_lc(void)
+{
+	if (imx074_ctrl->pict_res == QTR_SIZE)
+		return (IMX074_QTR_SIZE_HEIGHT +
+			IMX074_VER_QTR_BLK_LINES)*24;
+	else
+		return (IMX074_FULL_SIZE_HEIGHT +
+			IMX074_VER_FULL_BLK_LINES)*24;
+}
+
+static int32_t imx074_set_fps(struct fps_cfg	*fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	imx074_ctrl->fps_divider = fps->fps_div;
+	imx074_ctrl->pict_fps_divider = fps->pict_fps_div;
+	if (imx074_ctrl->curr_res  == QTR_SIZE) {
+		total_lines_per_frame = (uint16_t)(((IMX074_QTR_SIZE_HEIGHT +
+			IMX074_VER_QTR_BLK_LINES) *
+			imx074_ctrl->fps_divider) / 0x400);
+	} else {
+		total_lines_per_frame = (uint16_t)(((IMX074_FULL_SIZE_HEIGHT +
+			IMX074_VER_FULL_BLK_LINES) *
+			imx074_ctrl->pict_fps_divider) / 0x400);
+	}
+	if (imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+		((total_lines_per_frame & 0xFF00) >> 8)) < 0)
+		return rc;
+	if (imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+		(total_lines_per_frame & 0x00FF)) < 0)
+		return rc;
+	return rc;
+}
+
+static int32_t imx074_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	static uint16_t max_legal_gain = 0x00E0;
+	uint8_t gain_msb, gain_lsb;
+	uint8_t intg_time_msb, intg_time_lsb;
+	uint8_t frame_length_line_msb, frame_length_line_lsb;
+	uint16_t frame_length_lines;
+	int32_t rc = -1;
+
+	CDBG("imx074_write_exp_gain : gain = %d line = %d", gain, line);
+	if (imx074_ctrl->curr_res  == QTR_SIZE) {
+		frame_length_lines = IMX074_QTR_SIZE_HEIGHT +
+			IMX074_VER_QTR_BLK_LINES;
+		frame_length_lines = frame_length_lines *
+			imx074_ctrl->fps_divider / 0x400;
+	} else {
+		frame_length_lines = IMX074_FULL_SIZE_HEIGHT +
+			IMX074_VER_FULL_BLK_LINES;
+		frame_length_lines = frame_length_lines *
+			imx074_ctrl->pict_fps_divider / 0x400;
+	}
+	if (line > (frame_length_lines - IMX074_OFFSET))
+		frame_length_lines = line + IMX074_OFFSET;
+
+	CDBG("imx074 setting line = %d\n", line);
+
+
+	CDBG("imx074 setting frame_length_lines = %d\n",
+					frame_length_lines);
+
+	if (gain > max_legal_gain)
+		/* range: 0 to 224 */
+		gain = max_legal_gain;
+
+	/* update gain registers */
+	gain_msb = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lsb = (uint8_t) (gain & 0x00FF);
+
+	frame_length_line_msb = (uint8_t) ((frame_length_lines & 0xFF00) >> 8);
+	frame_length_line_lsb = (uint8_t) (frame_length_lines & 0x00FF);
+
+	/* update line count registers */
+	intg_time_msb = (uint8_t) ((line & 0xFF00) >> 8);
+	intg_time_lsb = (uint8_t) (line & 0x00FF);
+
+	rc = imx074_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+	CDBG("imx074 setting REG_ANALOGUE_GAIN_CODE_GLOBAL_HI = 0x%X\n",
+					gain_msb);
+	rc = imx074_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_HI,
+					gain_msb);
+	if (rc < 0)
+		return rc;
+	CDBG("imx074 setting REG_ANALOGUE_GAIN_CODE_GLOBAL_LO = 0x%X\n",
+					gain_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+					gain_lsb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_FRAME_LENGTH_LINES_HI = 0x%X\n",
+					frame_length_line_msb);
+	rc = imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+			frame_length_line_msb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_FRAME_LENGTH_LINES_LO = 0x%X\n",
+			frame_length_line_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+			frame_length_line_lsb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_COARSE_INTEGRATION_TIME_HI = 0x%X\n",
+					intg_time_msb);
+	rc = imx074_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_HI,
+					intg_time_msb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_COARSE_INTEGRATION_TIME_LO = 0x%X\n",
+					intg_time_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_LO,
+					intg_time_lsb);
+	if (rc < 0)
+		return rc;
+
+	rc = imx074_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t imx074_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = imx074_write_exp_gain(gain, line);
+	return rc;
+}
+
+static int32_t imx074_move_focus(int direction,
+	int32_t num_steps)
+{
+	int32_t step_direction, dest_step_position, bit_mask;
+	int32_t rc = 0;
+
+	if (num_steps == 0)
+		return rc;
+
+	if (direction == MOVE_NEAR) {
+		step_direction = 1;
+		bit_mask = 0x80;
+	} else if (direction == MOVE_FAR) {
+		step_direction = -1;
+		bit_mask = 0x00;
+	} else {
+		CDBG("imx074_move_focus: Illegal focus direction");
+		return -EINVAL;
+	}
+	dest_step_position = imx074_ctrl->curr_step_pos +
+		(step_direction * num_steps);
+	if (dest_step_position < 0)
+		dest_step_position = 0;
+	else if (dest_step_position > IMX074_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = IMX074_TOTAL_STEPS_NEAR_TO_FAR;
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00,
+		((num_steps * imx074_l_region_code_per_step) | bit_mask));
+	CDBG("%s: Index: %d\n", __func__, dest_step_position);
+	imx074_ctrl->curr_step_pos = dest_step_position;
+	return rc;
+}
+
+
+static int32_t imx074_set_default_focus(uint8_t af_step)
+{
+	int32_t rc;
+	/* Initialize to infinity */
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00, 0x7F);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00, 0x7F);
+	imx074_ctrl->curr_step_pos = 0;
+	return rc;
+}
+static int32_t imx074_test(enum imx074_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* Set mo to 2 inorder to enable test pattern*/
+		if (imx074_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+static int32_t imx074_sensor_setting(int update_type, int rt)
+{
+	int32_t rc = 0;
+	struct msm_camera_csi_params imx074_csi_params;
+	switch (update_type) {
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct imx074_i2c_reg_conf init_tbl[] = {
+				{REG_PRE_PLL_CLK_DIV,
+					imx074_regs.reg_pat_init[0].
+					pre_pll_clk_div},
+				{REG_PLSTATIM,
+					imx074_regs.reg_pat_init[0].
+					plstatim},
+				{REG_3024,
+					imx074_regs.reg_pat_init[0].
+					reg_3024},
+				{REG_IMAGE_ORIENTATION,
+					imx074_regs.reg_pat_init[0].
+					image_orientation},
+				{REG_VNDMY_ABLMGSHLMT,
+					imx074_regs.reg_pat_init[0].
+					vndmy_ablmgshlmt},
+				{REG_Y_OPBADDR_START_DI,
+					imx074_regs.reg_pat_init[0].
+					y_opbaddr_start_di},
+				{REG_3015,
+					imx074_regs.reg_pat_init[0].
+					reg_0x3015},
+				{REG_301C,
+					imx074_regs.reg_pat_init[0].
+					reg_0x301c},
+				{REG_302C,
+					imx074_regs.reg_pat_init[0].
+					reg_0x302c},
+				{REG_3031,
+					imx074_regs.reg_pat_init[0].reg_0x3031},
+				{REG_3041,
+					imx074_regs.reg_pat_init[0].reg_0x3041},
+				{REG_3051,
+					imx074_regs.reg_pat_init[0].reg_0x3051},
+				{REG_3053,
+					imx074_regs.reg_pat_init[0].reg_0x3053},
+				{REG_3057,
+					imx074_regs.reg_pat_init[0].reg_0x3057},
+				{REG_305C,
+					imx074_regs.reg_pat_init[0].reg_0x305c},
+				{REG_305D,
+					imx074_regs.reg_pat_init[0].reg_0x305d},
+				{REG_3060,
+					imx074_regs.reg_pat_init[0].reg_0x3060},
+				{REG_3065,
+					imx074_regs.reg_pat_init[0].reg_0x3065},
+				{REG_30AA,
+					imx074_regs.reg_pat_init[0].reg_0x30aa},
+				{REG_30AB,
+					imx074_regs.reg_pat_init[0].reg_0x30ab},
+				{REG_30B0,
+					imx074_regs.reg_pat_init[0].reg_0x30b0},
+				{REG_30B2,
+					imx074_regs.reg_pat_init[0].reg_0x30b2},
+				{REG_30D3,
+					imx074_regs.reg_pat_init[0].reg_0x30d3},
+				{REG_3106,
+					imx074_regs.reg_pat_init[0].reg_0x3106},
+				{REG_310C,
+					imx074_regs.reg_pat_init[0].reg_0x310c},
+				{REG_3304,
+					imx074_regs.reg_pat_init[0].reg_0x3304},
+				{REG_3305,
+					imx074_regs.reg_pat_init[0].reg_0x3305},
+				{REG_3306,
+					imx074_regs.reg_pat_init[0].reg_0x3306},
+				{REG_3307,
+					imx074_regs.reg_pat_init[0].reg_0x3307},
+				{REG_3308,
+					imx074_regs.reg_pat_init[0].reg_0x3308},
+				{REG_3309,
+					imx074_regs.reg_pat_init[0].reg_0x3309},
+				{REG_330A,
+					imx074_regs.reg_pat_init[0].reg_0x330a},
+				{REG_330B,
+					imx074_regs.reg_pat_init[0].reg_0x330b},
+				{REG_330C,
+					imx074_regs.reg_pat_init[0].reg_0x330c},
+				{REG_330D,
+					imx074_regs.reg_pat_init[0].reg_0x330d},
+				{REG_330F,
+					imx074_regs.reg_pat_init[0].reg_0x330f},
+				{REG_3381,
+					imx074_regs.reg_pat_init[0].reg_0x3381},
+			};
+			struct imx074_i2c_reg_conf init_mode_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD},
+				{REG_PLL_MULTIPLIER,
+					imx074_regs.reg_pat[rt].
+					pll_multiplier},
+				{REG_FRAME_LENGTH_LINES_HI,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_YADDR_START ,
+					imx074_regs.reg_pat[rt].
+					y_addr_start},
+				{REG_YAAAR_END,
+					imx074_regs.reg_pat[rt].
+					y_add_end},
+				{REG_X_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_msb},
+				{REG_X_OUTPUT_SIZE_LSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_lsb},
+				{REG_Y_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					y_output_size_msb},
+				{REG_Y_OUTPUT_SIZE_LSB ,
+					imx074_regs.reg_pat[rt].
+					y_output_size_lsb},
+				{REG_X_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					x_even_inc},
+				{REG_X_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					x_odd_inc},
+				{REG_Y_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					y_even_inc},
+				{REG_Y_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					y_odd_inc},
+				{REG_HMODEADD,
+					imx074_regs.reg_pat[rt].
+					hmodeadd},
+				{REG_VMODEADD,
+					imx074_regs.reg_pat[rt].
+					vmodeadd},
+				{REG_VAPPLINE_START,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_start},
+				{REG_VAPPLINE_END,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_end},
+				{REG_SHUTTER,
+					imx074_regs.reg_pat[rt].
+					shutter},
+				{REG_HADDAVE,
+					imx074_regs.reg_pat[rt].
+					haddave},
+				{REG_LANESEL,
+					imx074_regs.reg_pat[rt].
+					lanesel},
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF},
+
+			};
+			/* reset fps_divider */
+			imx074_ctrl->fps = 30 * Q8;
+			imx074_ctrl->fps_divider = 1 * 0x400;
+			/* stop streaming */
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE);
+			if (rc < 0)
+				return rc;
+			msleep(imx074_delay_msecs_stdby);
+			rc = imx074_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+			rc = imx074_i2c_write_w_table(&init_mode_tbl[0],
+				ARRAY_SIZE(init_mode_tbl));
+			if (rc < 0)
+				return rc;
+			rc = imx074_test(imx074_ctrl->set_test);
+			return rc;
+		}
+		break;
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct imx074_i2c_reg_conf mode_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD},
+				{REG_PLL_MULTIPLIER,
+					imx074_regs.reg_pat[rt].
+					pll_multiplier},
+				{REG_FRAME_LENGTH_LINES_HI,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_YADDR_START ,
+					imx074_regs.reg_pat[rt].
+					y_addr_start},
+				{REG_YAAAR_END,
+					imx074_regs.reg_pat[rt].
+					y_add_end},
+				{REG_X_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_msb},
+				{REG_X_OUTPUT_SIZE_LSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_lsb},
+				{REG_Y_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					y_output_size_msb},
+				{REG_Y_OUTPUT_SIZE_LSB ,
+					imx074_regs.reg_pat[rt].
+					y_output_size_lsb},
+				{REG_X_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					x_even_inc},
+				{REG_X_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					x_odd_inc},
+				{REG_Y_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					y_even_inc},
+				{REG_Y_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					y_odd_inc},
+				{REG_HMODEADD,
+					imx074_regs.reg_pat[rt].
+					hmodeadd},
+				{REG_VMODEADD,
+					imx074_regs.reg_pat[rt].
+					vmodeadd},
+				{REG_VAPPLINE_START,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_start},
+				{REG_VAPPLINE_END,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_end},
+				{REG_SHUTTER,
+					imx074_regs.reg_pat[rt].
+					shutter},
+				{REG_HADDAVE,
+					imx074_regs.reg_pat[rt].
+					haddave},
+				{REG_LANESEL,
+					imx074_regs.reg_pat[rt].
+					lanesel},
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF},
+			};
+
+			/* stop streaming */
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE);
+			msleep(imx074_delay_msecs_stdby);
+			if (config_csi == 0) {
+				imx074_csi_params.lane_cnt = 4;
+				imx074_csi_params.data_format = CSI_10BIT;
+				imx074_csi_params.lane_assign = 0xe4;
+				imx074_csi_params.dpcm_scheme = 0;
+				imx074_csi_params.settle_cnt = 0x14;
+				rc = msm_camio_csi_config(&imx074_csi_params);
+				/*imx074_delay_msecs_stdby*/
+				msleep(imx074_delay_msecs_stream);
+				config_csi = 1;
+			}
+			rc = imx074_i2c_write_w_table(&mode_tbl[0],
+				ARRAY_SIZE(mode_tbl));
+			if (rc < 0)
+				return rc;
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM);
+			if (rc < 0)
+				return rc;
+			msleep(imx074_delay_msecs_stream);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+
+static int32_t imx074_video_config(int mode)
+{
+
+	int32_t	rc = 0;
+	int	rt;
+	/* change sensor resolution	if needed */
+	if (imx074_ctrl->prev_res == QTR_SIZE) {
+		rt = RES_PREVIEW;
+	} else {
+		rt = RES_CAPTURE;
+	}
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->prev_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t imx074_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt = RES_PREVIEW; /* TODO: Used without initialization, guessing. */
+	/* change sensor resolution if needed */
+	if (imx074_ctrl->curr_res != imx074_ctrl->pict_res) {
+		if (imx074_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+		} else {
+			rt = RES_CAPTURE;
+		}
+	}
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->pict_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+static int32_t imx074_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt = RES_PREVIEW; /* TODO: Used without initialization, guessing. */
+	/* change sensor resolution if needed */
+	if (imx074_ctrl->curr_res != imx074_ctrl->pict_res) {
+		if (imx074_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+		} else {
+			rt = RES_CAPTURE;
+		}
+	}
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->pict_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+static int32_t imx074_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = imx074_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = imx074_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = imx074_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+static int32_t imx074_power_down(void)
+{
+	imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+		MODE_SELECT_STANDBY_MODE);
+	msleep(imx074_delay_msecs_stdby);
+	return 0;
+}
+static int imx074_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	gpio_direction_input(data->sensor_reset);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int imx074_read_eeprom_data(struct sensor_cfg_data *cfg)
+{
+	int32_t rc = 0;
+	uint16_t eepromdata = 0;
+	uint8_t addr = 0;
+
+	addr = 0x10;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.r_over_g = eepromdata;
+
+	addr = 0x12;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.b_over_g = eepromdata;
+
+	addr = 0x14;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.gr_over_gb = eepromdata;
+
+	addr = 0x1A;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.macro_2_inf = eepromdata;
+
+	addr = 0x1C;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.inf_2_macro = eepromdata;
+
+	addr = 0x1E;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.stroke_amt = eepromdata;
+
+	addr = 0x20;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.af_pos_1m = eepromdata;
+
+	addr = 0x22;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.af_pos_inf = eepromdata;
+
+	return rc;
+}
+
+static int imx074_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	unsigned short chipidl, chipidh;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "imx074");
+	CDBG(" imx074_probe_init_sensor \n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		usleep_range(5000, 6000);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		usleep_range(5000, 6000);
+	} else {
+		CDBG("gpio reset fail");
+		goto init_probe_done;
+	}
+	CDBG("imx074_probe_init_sensor is called\n");
+	/* 3. Read sensor Model ID: */
+	rc = imx074_i2c_read(0x0000, &chipidh, 1);
+	if (rc < 0) {
+		CDBG("Model read failed\n");
+		goto init_probe_fail;
+	}
+	rc = imx074_i2c_read(0x0001, &chipidl, 1);
+	if (rc < 0) {
+		CDBG("Model read failed\n");
+		goto init_probe_fail;
+	}
+	CDBG("imx074 model_id = 0x%x  0x%x\n", chipidh, chipidl);
+	/* 4. Compare sensor ID to IMX074 ID: */
+	if (chipidh != 0x00 || chipidl != 0x74) {
+		rc = -ENODEV;
+		CDBG("imx074_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	CDBG("imx074_probe_init_sensor fails\n");
+	imx074_probe_init_done(data);
+init_probe_done:
+	CDBG(" imx074_probe_init_sensor finishes\n");
+	return rc;
+	}
+static int32_t imx074_poweron_af(void)
+{
+	int32_t rc = 0;
+	CDBG("imx074 enable AF actuator, gpio = %d\n",
+			imx074_ctrl->sensordata->vcm_pwd);
+	rc = gpio_request(imx074_ctrl->sensordata->vcm_pwd, "imx074");
+	if (!rc) {
+		gpio_direction_output(imx074_ctrl->sensordata->vcm_pwd, 1);
+		msleep(20);
+		rc = imx074_af_init();
+		if (rc < 0)
+			CDBG("imx074 AF initialisation failed\n");
+	} else {
+		CDBG("%s: AF PowerON gpio_request failed %d\n", __func__, rc);
+	 }
+	return rc;
+}
+static void imx074_poweroff_af(void)
+{
+	gpio_set_value_cansleep(imx074_ctrl->sensordata->vcm_pwd, 0);
+	gpio_free(imx074_ctrl->sensordata->vcm_pwd);
+}
+/* camsensor_iu060f_imx074_reset */
+int imx074_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling imx074_sensor_open_init\n");
+	imx074_ctrl = kzalloc(sizeof(struct imx074_ctrl_t), GFP_KERNEL);
+	if (!imx074_ctrl) {
+		CDBG("imx074_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	imx074_ctrl->fps_divider = 1 * 0x00000400;
+	imx074_ctrl->pict_fps_divider = 1 * 0x00000400;
+	imx074_ctrl->fps = 30 * Q8;
+	imx074_ctrl->set_test = TEST_OFF;
+	imx074_ctrl->prev_res = QTR_SIZE;
+	imx074_ctrl->pict_res = FULL_SIZE;
+	imx074_ctrl->curr_res = INVALID_SIZE;
+	config_csi = 0;
+
+	if (data)
+		imx074_ctrl->sensordata = data;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(IMX074_DEFAULT_MASTER_CLK_RATE);
+	usleep_range(1000, 2000);
+	rc = imx074_probe_init_sensor(data);
+	if (rc < 0) {
+		CDBG("Calling imx074_sensor_open_init fail\n");
+		goto probe_fail;
+	}
+
+	rc = imx074_sensor_setting(REG_INIT, RES_PREVIEW);
+	if (rc < 0) {
+		CDBG("imx074_sensor_setting failed\n");
+		goto init_fail;
+	}
+	if (machine_is_msm8x60_fluid())
+		rc = imx074_poweron_af();
+	else
+		rc = imx074_af_init();
+	if (rc < 0) {
+		CDBG("AF initialisation failed\n");
+		goto init_fail;
+	} else
+		goto init_done;
+probe_fail:
+	CDBG(" imx074_sensor_open_init probe fail\n");
+	kfree(imx074_ctrl);
+	return rc;
+init_fail:
+	CDBG(" imx074_sensor_open_init fail\n");
+	imx074_probe_init_done(data);
+	kfree(imx074_ctrl);
+init_done:
+	CDBG("imx074_sensor_open_init done\n");
+	return rc;
+}
+static int imx074_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&imx074_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id imx074_i2c_id[] = {
+	{"imx074", 0},
+	{ }
+};
+
+static int imx074_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("imx074_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	imx074_sensorw = kzalloc(sizeof(struct imx074_work_t), GFP_KERNEL);
+	if (!imx074_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, imx074_sensorw);
+	imx074_init_client(client);
+	imx074_client = client;
+
+
+	CDBG("imx074_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("imx074_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __exit imx074_remove(struct i2c_client *client)
+{
+	struct imx074_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	imx074_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver imx074_i2c_driver = {
+	.id_table = imx074_i2c_id,
+	.probe  = imx074_i2c_probe,
+	.remove = __exit_p(imx074_i2c_remove),
+	.driver = {
+		.name = "imx074",
+	},
+};
+
+int imx074_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&imx074_mut);
+	CDBG("imx074_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		imx074_get_pict_fps(
+			cdata.cfg.gfps.prevfps,
+			&(cdata.cfg.gfps.pictfps));
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf =
+			imx074_get_prev_lines_pf();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl =
+			imx074_get_prev_pixels_pl();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf =
+			imx074_get_pict_lines_pf();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			imx074_get_pict_pixels_pl();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			imx074_get_pict_max_exp_lc();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = imx074_set_fps(&(cdata.cfg.fps));
+		break;
+	case CFG_SET_EXP_GAIN:
+		rc =
+			imx074_write_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+			break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc =
+			imx074_set_pict_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+			break;
+	case CFG_SET_MODE:
+		rc = imx074_set_sensor_mode(cdata.mode,
+			cdata.rs);
+			break;
+	case CFG_PWR_DOWN:
+		rc = imx074_power_down();
+			break;
+	case CFG_GET_CALIB_DATA:
+		rc = imx074_read_eeprom_data(&cdata);
+		if (rc < 0)
+			break;
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(cdata)))
+			rc = -EFAULT;
+		break;
+	case CFG_MOVE_FOCUS:
+		rc =
+			imx074_move_focus(
+			cdata.cfg.focus.dir,
+			cdata.cfg.focus.steps);
+			break;
+	case CFG_SET_DEFAULT_FOCUS:
+		rc =
+			imx074_set_default_focus(
+			cdata.cfg.focus.steps);
+			break;
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = IMX074_STEPS_NEAR_TO_CLOSEST_INF;
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&imx074_mut);
+
+	return rc;
+}
+static int imx074_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&imx074_mut);
+	if (machine_is_msm8x60_fluid())
+		imx074_poweroff_af();
+	imx074_power_down();
+	gpio_set_value_cansleep(imx074_ctrl->sensordata->sensor_reset, 0);
+	msleep(5);
+	gpio_direction_input(imx074_ctrl->sensordata->sensor_reset);
+	gpio_free(imx074_ctrl->sensordata->sensor_reset);
+	kfree(imx074_ctrl);
+	imx074_ctrl = NULL;
+	CDBG("imx074_release completed\n");
+	mutex_unlock(&imx074_mut);
+
+	return rc;
+}
+
+static int imx074_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&imx074_i2c_driver);
+	if (rc < 0 || imx074_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(IMX074_DEFAULT_MASTER_CLK_RATE);
+	rc = imx074_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = imx074_sensor_open_init;
+	s->s_release = imx074_sensor_release;
+	s->s_config  = imx074_sensor_config;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+	imx074_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("imx074_sensor_probe: SENSOR PROBE FAILS!\n");
+	i2c_del_driver(&imx074_i2c_driver);
+	return rc;
+}
+
+static int __imx074_probe(struct platform_device *pdev)
+{
+
+	return msm_camera_drv_start(pdev, imx074_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __imx074_probe,
+	.driver = {
+		.name = "msm_camera_imx074",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init imx074_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(imx074_init);
+
+MODULE_DESCRIPTION("Sony 13 MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/video/msm/imx074.h b/drivers/media/video/msm/imx074.h
new file mode 100644
index 0000000..8be0fb7
--- /dev/null
+++ b/drivers/media/video/msm/imx074.h
@@ -0,0 +1,118 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IMX074_H
+#define IMX074_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct imx074_reg imx074_regs;
+struct reg_struct_init {
+    /* PLL setting */
+	uint8_t pre_pll_clk_div; /* 0x0305 */
+	uint8_t plstatim; /* 0x302b */
+	uint8_t reg_3024; /*ox3024*/
+	uint8_t image_orientation;  /* 0x0101*/
+	uint8_t vndmy_ablmgshlmt; /*0x300a*/
+	uint8_t y_opbaddr_start_di; /*0x3014*/
+	uint8_t reg_0x3015; /*0x3015*/
+	uint8_t reg_0x301c; /*0x301c*/
+	uint8_t reg_0x302c; /*0x302c*/
+	uint8_t reg_0x3031; /*0x3031*/
+	uint8_t reg_0x3041; /* 0x3041 */
+	uint8_t reg_0x3051; /* 0x3051 */
+	uint8_t reg_0x3053; /* 0x3053 */
+	uint8_t reg_0x3057; /* 0x3057 */
+	uint8_t reg_0x305c; /* 0x305c */
+	uint8_t reg_0x305d; /* 0x305d */
+	uint8_t reg_0x3060; /* 0x3060 */
+	uint8_t reg_0x3065; /* 0x3065 */
+	uint8_t reg_0x30aa; /* 0x30aa */
+	uint8_t reg_0x30ab;
+	uint8_t reg_0x30b0;
+	uint8_t reg_0x30b2;
+	uint8_t reg_0x30d3;
+	uint8_t reg_0x3106;
+	uint8_t reg_0x310c;
+	uint8_t reg_0x3304;
+	uint8_t reg_0x3305;
+	uint8_t reg_0x3306;
+	uint8_t reg_0x3307;
+	uint8_t reg_0x3308;
+	uint8_t reg_0x3309;
+	uint8_t reg_0x330a;
+	uint8_t reg_0x330b;
+	uint8_t reg_0x330c;
+	uint8_t reg_0x330d;
+	uint8_t reg_0x330f;
+	uint8_t reg_0x3381;
+};
+
+struct reg_struct {
+	uint8_t pll_multiplier; /* 0x0307 */
+	uint8_t frame_length_lines_hi; /* 0x0340*/
+	uint8_t frame_length_lines_lo; /* 0x0341*/
+	uint8_t y_addr_start;  /* 0x347 */
+	uint8_t y_add_end;  /* 0x034b */
+	uint8_t x_output_size_msb;  /* 0x034c */
+	uint8_t x_output_size_lsb;  /* 0x034d */
+	uint8_t y_output_size_msb; /* 0x034e */
+	uint8_t y_output_size_lsb; /* 0x034f */
+	uint8_t x_even_inc;  /* 0x0381 */
+	uint8_t x_odd_inc; /* 0x0383 */
+	uint8_t y_even_inc;  /* 0x0385 */
+	uint8_t y_odd_inc; /* 0x0387 */
+	uint8_t hmodeadd;   /* 0x3001 */
+	uint8_t vmodeadd;   /* 0x3016 */
+	uint8_t vapplinepos_start;/*ox3069*/
+	uint8_t vapplinepos_end;/*306b*/
+	uint8_t shutter;	/* 0x3086 */
+	uint8_t haddave;	/* 0x30e8 */
+	uint8_t lanesel;    /* 0x3301 */
+};
+
+struct imx074_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+enum imx074_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum imx074_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+enum imx074_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+struct imx074_reg {
+	const struct reg_struct_init  *reg_pat_init;
+	const struct reg_struct  *reg_pat;
+};
+#endif /* IMX074_H */
diff --git a/drivers/media/video/msm/imx074_reg.c b/drivers/media/video/msm/imx074_reg.c
new file mode 100644
index 0000000..ccc9b2f
--- /dev/null
+++ b/drivers/media/video/msm/imx074_reg.c
@@ -0,0 +1,111 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "imx074.h"
+const struct reg_struct_init imx074_reg_init[1] = {
+	{
+		/* PLL setting */
+		0x02,	/* pll_divider 0x0305 */
+		0x4B,	/* plstatim 0x302b */
+		0x03,	/* reg_3024 */
+		0x00,	/* image_orientation 0x0101 */
+		0x80,	/* vndmy_ablmgshlmt 0x300a*/
+		0x08,	/* y_opbaddr_start_di 3014*/
+		0x37,	/* 0x3015*/
+		0x01,	/* 0x301c*/
+		0x05,	/* 0x302c*/
+		0x26,	/* 0x3031*/
+		0x60,	/* 0x3041*/
+		0x24,	/* 0x3051 CLK DIV*/
+		0x34,	/* 0x3053*/
+		0xc0,	/* 0x3057*/
+		0x09,	/* 0x305c*/
+		0x07,	/* 0x305d */
+		0x30,	/* 0x3060 */
+		0x00,	/* 0x3065 */
+		0x08,	/* 0x30aa */
+		0x1c,	/* 0x30ab */
+		0x32,	/* 0x30b0 */
+		0x83,	/* 0x30b2 */
+		0x04,	/* 0x30d3 */
+		0x78,	/* 0x3106 */
+		0x82,	/* 0x310c */
+		0x05,	/* 0x3304 */
+		0x04,	/* 0x3305 */
+		0x11,	/* 0x3306 */
+		0x02,	/* 0x3307 */
+		0x0c,	/* 0x3308 */
+		0x06,	/* 0x3309 */
+		0x08,	/* 0x330a */
+		0x04,	/* 0x330b */
+		0x08,	/* 0x330c */
+		0x06,	/* 0x330d */
+		0x01,	/* 0x330f */
+		0x00,	/* 0x3381 */
+
+	}
+};
+
+/* Preview / Snapshot register settings	*/
+const struct reg_struct	imx074_reg_pat[2] = {
+	/*preview*/
+	{
+		0x2D, /*pll_multiplier*/
+		0x06, /*frame_length_lines_hi 0x0340*/
+		0x2D, /* frame_length_lines_lo 0x0341*/
+		0x00, /* y_addr_start 0x347 */
+		0x2F, /* y_add_end 0x034b */
+		0x08, /* x_output_size_msb0x034c */
+		0x38, /* x_output_size_lsb0x034d */
+		0x06, /*  y_output_size_msb0x034e */
+		0x18, /*  y_output_size_lsb0x034f */
+		0x01, /* x_even_inc 0x0381 */
+		0x03, /* x_odd_inc 0x0383 */
+		0x01, /* y_even_inc 0x0385 */
+		0x03, /* y_odd_inc 0x0387 */
+		0x80, /* hmodeadd0x3001 */
+		0x16, /* vmodeadd0x3016 */
+		0x24, /* vapplinepos_startox3069*/
+		0x53, /* vapplinepos_end306b*/
+		0x00,/*  shutter 0x3086 */
+		0x80, /* haddave 0x30e8 */
+		0x83, /* lanesel 0x3301 */
+	},
+
+	/*snapshot*/
+	{
+		0x26, /*pll_multiplier*/
+		0x0C, /* frame_length_lines_hi 0x0340*/
+		0x90, /* frame_length_lines_lo 0x0341*/
+		0x00, /* y_addr_start 0x347 */
+		0x2F, /* y_add_end 0x034b */
+		0x10, /* x_output_size_msb0x034c */
+		0x70, /* x_output_size_lsb0x034d */
+		0x0c, /* y_output_size_msb0x034e */
+		0x30, /* y_output_size_lsb0x034f */
+		0x01, /* x_even_inc 0x0381 */
+		0x01, /* x_odd_inc 0x0383 */
+		0x01, /* y_even_inc 0x0385 */
+		0x01, /* y_odd_inc 0x0387 */
+		0x00, /* hmodeadd0x3001 */
+		0x06, /* vmodeadd0x3016 */
+		0x24, /* vapplinepos_startox3069*/
+		0x53, /* vapplinepos_end306b*/
+		0x00, /* shutter 0x3086 */
+		0x00, /* haddave 0x30e8 */
+		0x03, /* lanesel 0x3301 */
+	}
+};
+struct imx074_reg imx074_regs = {
+	.reg_pat_init = &imx074_reg_init[0],
+	.reg_pat = &imx074_reg_pat[0],
+};
diff --git a/drivers/media/video/msm/imx074_v4l2.c b/drivers/media/video/msm/imx074_v4l2.c
new file mode 100644
index 0000000..18a653d
--- /dev/null
+++ b/drivers/media/video/msm/imx074_v4l2.c
@@ -0,0 +1,1476 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <media/v4l2-subdev.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <asm/mach-types.h>
+#include "imx074.h"
+#include "msm.h"
+
+/*SENSOR REGISTER DEFINES*/
+#define	IMX074_EEPROM_SLAVE_ADDR			0x52
+#define REG_GROUPED_PARAMETER_HOLD			0x0104
+#define GROUPED_PARAMETER_HOLD_OFF			0x00
+#define GROUPED_PARAMETER_HOLD				0x01
+#define REG_MODE_SELECT					0x100
+#define MODE_SELECT_STANDBY_MODE			0x00
+#define MODE_SELECT_STREAM				0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME_HI			0x0202
+#define REG_COARSE_INTEGRATION_TIME_LO			0x0203
+/* Gain */
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_HI		0x0204
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LO		0x0205
+/* PLL registers */
+#define REG_PLL_MULTIPLIER				0x0307
+#define REG_PRE_PLL_CLK_DIV				0x0305
+#define REG_PLSTATIM					0x302b
+#define REG_3024					0x3024
+#define REG_IMAGE_ORIENTATION				0x0101
+#define REG_VNDMY_ABLMGSHLMT				0x300a
+#define REG_Y_OPBADDR_START_DI				0x3014
+#define REG_3015					0x3015
+#define REG_301C					0x301C
+#define REG_302C					0x302C
+#define REG_3031					0x3031
+#define REG_3041					0x3041
+#define REG_3051					0x3051
+#define REG_3053					0x3053
+#define REG_3057					0x3057
+#define REG_305C					0x305C
+#define REG_305D					0x305D
+#define REG_3060					0x3060
+#define REG_3065					0x3065
+#define REG_30AA					0x30AA
+#define REG_30AB					0x30AB
+#define REG_30B0					0x30B0
+#define REG_30B2					0x30B2
+#define REG_30D3					0x30D3
+#define REG_3106					0x3106
+#define REG_310C					0x310C
+#define REG_3304					0x3304
+#define REG_3305					0x3305
+#define REG_3306					0x3306
+#define REG_3307					0x3307
+#define REG_3308					0x3308
+#define REG_3309					0x3309
+#define REG_330A					0x330A
+#define REG_330B					0x330B
+#define REG_330C					0x330C
+#define REG_330D					0x330D
+#define REG_330F					0x330F
+#define REG_3381					0x3381
+
+/* mode setting */
+#define REG_FRAME_LENGTH_LINES_HI			0x0340
+#define REG_FRAME_LENGTH_LINES_LO			0x0341
+#define REG_YADDR_START					0x0347
+#define REG_YAAAR_END					0x034b
+#define REG_X_OUTPUT_SIZE_MSB				0x034c
+#define REG_X_OUTPUT_SIZE_LSB				0x034d
+#define REG_Y_OUTPUT_SIZE_MSB				0x034e
+#define REG_Y_OUTPUT_SIZE_LSB				0x034f
+#define REG_X_EVEN_INC					0x0381
+#define REG_X_ODD_INC					0x0383
+#define REG_Y_EVEN_INC					0x0385
+#define REG_Y_ODD_INC					0x0387
+#define REG_HMODEADD					0x3001
+#define REG_VMODEADD					0x3016
+#define REG_VAPPLINE_START				0x3069
+#define REG_VAPPLINE_END				0x306b
+#define REG_SHUTTER					0x3086
+#define REG_HADDAVE					0x30e8
+#define REG_LANESEL					0x3301
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE				0x0601
+
+#define REG_LINE_LENGTH_PCK_HI				0x0342
+#define REG_LINE_LENGTH_PCK_LO				0x0343
+/*..... TYPE DECLARATIONS.....*/
+#define	IMX074_OFFSET					3
+#define	IMX074_DEFAULT_MASTER_CLK_RATE			24000000
+/* Full	Size */
+#define	IMX074_FULL_SIZE_WIDTH				4208
+#define	IMX074_FULL_SIZE_HEIGHT				3120
+#define	IMX074_FULL_SIZE_DUMMY_PIXELS			0
+#define	IMX074_FULL_SIZE_DUMMY_LINES			0
+/* Quarter Size	*/
+#define	IMX074_QTR_SIZE_WIDTH				2104
+#define	IMX074_QTR_SIZE_HEIGHT				1560
+#define	IMX074_QTR_SIZE_DUMMY_PIXELS			0
+#define	IMX074_QTR_SIZE_DUMMY_LINES			0
+/* Blanking as measured	on the scope */
+/* Full	Size */
+#define	IMX074_HRZ_FULL_BLK_PIXELS			264
+#define	IMX074_VER_FULL_BLK_LINES			96
+/* Quarter Size	*/
+#define	IMX074_HRZ_QTR_BLK_PIXELS			2368
+#define	IMX074_VER_QTR_BLK_LINES			21
+#define	Q8						0x100
+#define	Q10						0x400
+#define	IMX074_AF_I2C_SLAVE_ID				0x72
+#define	IMX074_STEPS_NEAR_TO_CLOSEST_INF		52
+#define	IMX074_TOTAL_STEPS_NEAR_TO_FAR			52
+static uint32_t imx074_l_region_code_per_step = 2;
+
+struct imx074_work_t {
+	struct work_struct work;
+};
+
+static struct imx074_work_t *imx074_sensorw;
+static struct i2c_client *imx074_client;
+
+struct imx074_ctrl_t {
+	const struct msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+	enum imx074_resolution_t prev_res;
+	enum imx074_resolution_t pict_res;
+	enum imx074_resolution_t curr_res;
+	enum imx074_test_mode_t set_test;
+	unsigned short imgaddr;
+
+	struct v4l2_subdev *sensor_dev;
+	struct imx074_format *fmt;
+};
+static uint8_t imx074_delay_msecs_stdby = 5;
+static uint16_t imx074_delay_msecs_stream = 5;
+static int32_t config_csi;
+
+static struct imx074_ctrl_t *imx074_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(imx074_wait_queue);
+DEFINE_MUTEX(imx074_mut);
+
+struct imx074_format {
+	enum v4l2_mbus_pixelcode code;
+	enum v4l2_colorspace colorspace;
+	u16 fmt;
+	u16 order;
+};
+/*=============================================================*/
+
+static int imx074_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(imx074_client->adapter, msgs, 2) < 0) {
+		CDBG("imx074_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+static int32_t imx074_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(imx074_client->adapter, msg, 1) < 0) {
+		CDBG("imx074_i2c_txdata faild 0x%x\n", imx074_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+static int32_t imx074_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = imx074_i2c_rxdata(imx074_client->addr, buf, rlen);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	return rc;
+}
+
+static int imx074_af_i2c_rxdata_b(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+		.addr  = saddr,
+		.flags = 0,
+		.len   = 1,
+		.buf   = rxdata,
+		},
+		{
+		.addr  = saddr,
+		.flags = I2C_M_RD,
+		.len   = 1,
+		.buf   = rxdata,
+		},
+	};
+
+	if (i2c_transfer(imx074_client->adapter, msgs, 2) < 0) {
+		CDBG("imx074_i2c_rxdata_b failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t imx074_i2c_read_w_eeprom(unsigned short raddr,
+	unsigned short *rdata)
+{
+	int32_t rc;
+	unsigned char buf;
+	if (!rdata)
+		return -EIO;
+	/* Read 2 bytes in sequence */
+	buf = (raddr & 0x00FF);
+	rc = imx074_af_i2c_rxdata_b(IMX074_EEPROM_SLAVE_ADDR, &buf, 1);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read_eeprom 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = buf<<8;
+
+	/* Read Second byte of data */
+	buf = (raddr & 0x00FF) + 1;
+	rc = imx074_af_i2c_rxdata_b(IMX074_EEPROM_SLAVE_ADDR, &buf, 1);
+	if (rc < 0) {
+		CDBG("imx074_i2c_read_eeprom 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata |= buf;
+	return rc;
+}
+
+static int32_t imx074_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = imx074_i2c_txdata(imx074_client->addr, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+static int16_t imx074_i2c_write_b_af(unsigned short saddr,
+	unsigned short baddr, unsigned short bdata)
+{
+	int32_t rc;
+	unsigned char buf[2];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = imx074_i2c_txdata(saddr, buf, 2);
+	if (rc < 0)
+		CDBG("AFi2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!",
+			saddr, baddr, bdata);
+	return rc;
+}
+
+static int32_t imx074_i2c_write_w_table(struct imx074_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = imx074_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+static int16_t imx074_af_init(void)
+{
+	int32_t rc;
+	/* Initialize waveform */
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x01, 0xA9);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x02, 0xD2);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x03, 0x0C);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x04, 0x14);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x05, 0xB6);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x06, 0x4F);
+	return rc;
+}
+
+static void imx074_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint16_t preview_frame_length_lines, snapshot_frame_length_lines;
+	uint32_t divider, d1;
+	uint32_t pclk_mult;/*Q10 */
+	/* Total frame_length_lines and line_length_pck for preview */
+	preview_frame_length_lines = IMX074_QTR_SIZE_HEIGHT +
+		IMX074_VER_QTR_BLK_LINES;
+	/* Total frame_length_lines and line_length_pck for snapshot */
+	snapshot_frame_length_lines = IMX074_FULL_SIZE_HEIGHT +
+		IMX074_VER_FULL_BLK_LINES;
+	d1 = preview_frame_length_lines * 0x00010000 /
+		snapshot_frame_length_lines;
+	pclk_mult =
+		(uint32_t) ((imx074_regs.reg_pat[RES_CAPTURE].pll_multiplier *
+		0x00010000) /
+		(imx074_regs.reg_pat[RES_PREVIEW].pll_multiplier));
+	divider = d1 * pclk_mult / 0x00010000;
+	*pfps = (uint16_t) (fps * divider / 0x00010000);
+}
+
+static uint16_t imx074_get_prev_lines_pf(void)
+{
+	if (imx074_ctrl->prev_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_HEIGHT + IMX074_VER_QTR_BLK_LINES;
+	else
+		return IMX074_FULL_SIZE_HEIGHT + IMX074_VER_FULL_BLK_LINES;
+
+}
+
+static uint16_t imx074_get_prev_pixels_pl(void)
+{
+	if (imx074_ctrl->prev_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_WIDTH + IMX074_HRZ_QTR_BLK_PIXELS;
+	else
+		return IMX074_FULL_SIZE_WIDTH + IMX074_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t imx074_get_pict_lines_pf(void)
+{
+		if (imx074_ctrl->pict_res == QTR_SIZE)
+			return IMX074_QTR_SIZE_HEIGHT +
+				IMX074_VER_QTR_BLK_LINES;
+		else
+			return IMX074_FULL_SIZE_HEIGHT +
+				IMX074_VER_FULL_BLK_LINES;
+}
+
+static uint16_t imx074_get_pict_pixels_pl(void)
+{
+	if (imx074_ctrl->pict_res == QTR_SIZE)
+		return IMX074_QTR_SIZE_WIDTH +
+			IMX074_HRZ_QTR_BLK_PIXELS;
+	else
+		return IMX074_FULL_SIZE_WIDTH +
+			IMX074_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t imx074_get_pict_max_exp_lc(void)
+{
+	if (imx074_ctrl->pict_res == QTR_SIZE)
+		return (IMX074_QTR_SIZE_HEIGHT +
+			IMX074_VER_QTR_BLK_LINES)*24;
+	else
+		return (IMX074_FULL_SIZE_HEIGHT +
+			IMX074_VER_FULL_BLK_LINES)*24;
+}
+
+static int32_t imx074_set_fps(struct fps_cfg	*fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	imx074_ctrl->fps_divider = fps->fps_div;
+	imx074_ctrl->pict_fps_divider = fps->pict_fps_div;
+	total_lines_per_frame = (uint16_t)(((IMX074_QTR_SIZE_HEIGHT +
+		IMX074_VER_QTR_BLK_LINES) * imx074_ctrl->fps_divider) / 0x400);
+	if (imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+		((total_lines_per_frame & 0xFF00) >> 8)) < 0)
+		return rc;
+	if (imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+		(total_lines_per_frame & 0x00FF)) < 0)
+		return rc;
+	return rc;
+}
+
+static int32_t imx074_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	static uint16_t max_legal_gain = 0x00E0;
+	uint8_t gain_msb, gain_lsb;
+	uint8_t intg_time_msb, intg_time_lsb;
+	uint8_t frame_length_line_msb, frame_length_line_lsb;
+	uint16_t frame_length_lines;
+	int32_t rc = -1;
+	CDBG("imx074_write_exp_gain : gain = %d line = %d", gain, line);
+	if (imx074_ctrl->curr_res  == QTR_SIZE) {
+		frame_length_lines = IMX074_QTR_SIZE_HEIGHT +
+			IMX074_VER_QTR_BLK_LINES;
+		frame_length_lines = frame_length_lines *
+			imx074_ctrl->fps_divider / 0x400;
+	} else {
+		frame_length_lines = IMX074_FULL_SIZE_HEIGHT +
+			IMX074_VER_FULL_BLK_LINES;
+		frame_length_lines = frame_length_lines *
+			imx074_ctrl->pict_fps_divider / 0x400;
+	}
+	if (line > (frame_length_lines - IMX074_OFFSET))
+		frame_length_lines = line + IMX074_OFFSET;
+
+	CDBG("imx074 setting line = %d\n", line);
+
+
+	CDBG("imx074 setting frame_length_lines = %d\n",
+					frame_length_lines);
+
+	if (gain > max_legal_gain)
+		/* range: 0 to 224 */
+		gain = max_legal_gain;
+
+	/* update gain registers */
+	gain_msb = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lsb = (uint8_t) (gain & 0x00FF);
+
+	frame_length_line_msb = (uint8_t) ((frame_length_lines & 0xFF00) >> 8);
+	frame_length_line_lsb = (uint8_t) (frame_length_lines & 0x00FF);
+
+	/* update line count registers */
+	intg_time_msb = (uint8_t) ((line & 0xFF00) >> 8);
+	intg_time_lsb = (uint8_t) (line & 0x00FF);
+
+	rc = imx074_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+	CDBG("imx074 setting REG_ANALOGUE_GAIN_CODE_GLOBAL_HI = 0x%X\n",
+					gain_msb);
+	rc = imx074_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_HI,
+					gain_msb);
+	if (rc < 0)
+		return rc;
+	CDBG("imx074 setting REG_ANALOGUE_GAIN_CODE_GLOBAL_LO = 0x%X\n",
+					gain_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+					gain_lsb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_FRAME_LENGTH_LINES_HI = 0x%X\n",
+					frame_length_line_msb);
+	rc = imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+			frame_length_line_msb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_FRAME_LENGTH_LINES_LO = 0x%X\n",
+			frame_length_line_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+			frame_length_line_lsb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_COARSE_INTEGRATION_TIME_HI = 0x%X\n",
+					intg_time_msb);
+	rc = imx074_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_HI,
+					intg_time_msb);
+	if (rc < 0)
+		return rc;
+
+	CDBG("imx074 setting REG_COARSE_INTEGRATION_TIME_LO = 0x%X\n",
+					intg_time_lsb);
+	rc = imx074_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_LO,
+					intg_time_lsb);
+	if (rc < 0)
+		return rc;
+
+	rc = imx074_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t imx074_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = imx074_write_exp_gain(gain, line);
+	return rc;
+}
+
+static int32_t imx074_move_focus(int direction,
+	int32_t num_steps)
+{
+	int32_t step_direction, dest_step_position, bit_mask;
+	int32_t rc = 0;
+
+	if (num_steps == 0)
+		return rc;
+
+	if (direction == MOVE_NEAR) {
+		step_direction = 1;
+		bit_mask = 0x80;
+	} else if (direction == MOVE_FAR) {
+		step_direction = -1;
+		bit_mask = 0x00;
+	} else {
+		CDBG("imx074_move_focus: Illegal focus direction");
+		return -EINVAL;
+	}
+	dest_step_position = imx074_ctrl->curr_step_pos +
+		(step_direction * num_steps);
+	if (dest_step_position < 0)
+		dest_step_position = 0;
+	else if (dest_step_position > IMX074_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = IMX074_TOTAL_STEPS_NEAR_TO_FAR;
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00,
+		((num_steps * imx074_l_region_code_per_step) | bit_mask));
+	CDBG("%s: Index: %d\n", __func__, dest_step_position);
+	imx074_ctrl->curr_step_pos = dest_step_position;
+	return rc;
+}
+
+
+static int32_t imx074_set_default_focus(uint8_t af_step)
+{
+	int32_t rc;
+	/* Initialize to infinity */
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00, 0x7F);
+	rc = imx074_i2c_write_b_af(IMX074_AF_I2C_SLAVE_ID, 0x00, 0x7F);
+	imx074_ctrl->curr_step_pos = 0;
+	return rc;
+}
+static int32_t imx074_test(enum imx074_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* Set mo to 2 inorder to enable test pattern*/
+		if (imx074_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+static int32_t imx074_sensor_setting(int update_type, int rt)
+{
+	int32_t rc = 0;
+	struct msm_camera_csid_params imx074_csid_params;
+	struct msm_camera_csiphy_params imx074_csiphy_params;
+	switch (update_type) {
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct imx074_i2c_reg_conf init_tbl[] = {
+				{REG_PRE_PLL_CLK_DIV,
+					imx074_regs.reg_pat_init[0].
+					pre_pll_clk_div},
+				{REG_PLSTATIM,
+					imx074_regs.reg_pat_init[0].
+					plstatim},
+				{REG_3024,
+					imx074_regs.reg_pat_init[0].
+					reg_3024},
+				{REG_IMAGE_ORIENTATION,
+					imx074_regs.reg_pat_init[0].
+					image_orientation},
+				{REG_VNDMY_ABLMGSHLMT,
+					imx074_regs.reg_pat_init[0].
+					vndmy_ablmgshlmt},
+				{REG_Y_OPBADDR_START_DI,
+					imx074_regs.reg_pat_init[0].
+					y_opbaddr_start_di},
+				{REG_3015,
+					imx074_regs.reg_pat_init[0].
+					reg_0x3015},
+				{REG_301C,
+					imx074_regs.reg_pat_init[0].
+					reg_0x301c},
+				{REG_302C,
+					imx074_regs.reg_pat_init[0].
+					reg_0x302c},
+				{REG_3031,
+					imx074_regs.reg_pat_init[0].reg_0x3031},
+				{REG_3041,
+					imx074_regs.reg_pat_init[0].reg_0x3041},
+				{REG_3051,
+					imx074_regs.reg_pat_init[0].reg_0x3051},
+				{REG_3053,
+					imx074_regs.reg_pat_init[0].reg_0x3053},
+				{REG_3057,
+					imx074_regs.reg_pat_init[0].reg_0x3057},
+				{REG_305C,
+					imx074_regs.reg_pat_init[0].reg_0x305c},
+				{REG_305D,
+					imx074_regs.reg_pat_init[0].reg_0x305d},
+				{REG_3060,
+					imx074_regs.reg_pat_init[0].reg_0x3060},
+				{REG_3065,
+					imx074_regs.reg_pat_init[0].reg_0x3065},
+				{REG_30AA,
+					imx074_regs.reg_pat_init[0].reg_0x30aa},
+				{REG_30AB,
+					imx074_regs.reg_pat_init[0].reg_0x30ab},
+				{REG_30B0,
+					imx074_regs.reg_pat_init[0].reg_0x30b0},
+				{REG_30B2,
+					imx074_regs.reg_pat_init[0].reg_0x30b2},
+				{REG_30D3,
+					imx074_regs.reg_pat_init[0].reg_0x30d3},
+				{REG_3106,
+					imx074_regs.reg_pat_init[0].reg_0x3106},
+				{REG_310C,
+					imx074_regs.reg_pat_init[0].reg_0x310c},
+				{REG_3304,
+					imx074_regs.reg_pat_init[0].reg_0x3304},
+				{REG_3305,
+					imx074_regs.reg_pat_init[0].reg_0x3305},
+				{REG_3306,
+					imx074_regs.reg_pat_init[0].reg_0x3306},
+				{REG_3307,
+					imx074_regs.reg_pat_init[0].reg_0x3307},
+				{REG_3308,
+					imx074_regs.reg_pat_init[0].reg_0x3308},
+				{REG_3309,
+					imx074_regs.reg_pat_init[0].reg_0x3309},
+				{REG_330A,
+					imx074_regs.reg_pat_init[0].reg_0x330a},
+				{REG_330B,
+					imx074_regs.reg_pat_init[0].reg_0x330b},
+				{REG_330C,
+					imx074_regs.reg_pat_init[0].reg_0x330c},
+				{REG_330D,
+					imx074_regs.reg_pat_init[0].reg_0x330d},
+				{REG_330F,
+					imx074_regs.reg_pat_init[0].reg_0x330f},
+				{REG_3381,
+					imx074_regs.reg_pat_init[0].reg_0x3381},
+			};
+			struct imx074_i2c_reg_conf init_mode_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD},
+				{REG_PLL_MULTIPLIER,
+					imx074_regs.reg_pat[rt].
+					pll_multiplier},
+				{REG_FRAME_LENGTH_LINES_HI,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_YADDR_START ,
+					imx074_regs.reg_pat[rt].
+					y_addr_start},
+				{REG_YAAAR_END,
+					imx074_regs.reg_pat[rt].
+					y_add_end},
+				{REG_X_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_msb},
+				{REG_X_OUTPUT_SIZE_LSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_lsb},
+				{REG_Y_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					y_output_size_msb},
+				{REG_Y_OUTPUT_SIZE_LSB ,
+					imx074_regs.reg_pat[rt].
+					y_output_size_lsb},
+				{REG_X_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					x_even_inc},
+				{REG_X_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					x_odd_inc},
+				{REG_Y_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					y_even_inc},
+				{REG_Y_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					y_odd_inc},
+				{REG_HMODEADD,
+					imx074_regs.reg_pat[rt].
+					hmodeadd},
+				{REG_VMODEADD,
+					imx074_regs.reg_pat[rt].
+					vmodeadd},
+				{REG_VAPPLINE_START,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_start},
+				{REG_VAPPLINE_END,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_end},
+				{REG_SHUTTER,
+					imx074_regs.reg_pat[rt].
+					shutter},
+				{REG_HADDAVE,
+					imx074_regs.reg_pat[rt].
+					haddave},
+				{REG_LANESEL,
+					imx074_regs.reg_pat[rt].
+					lanesel},
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF},
+
+			};
+			/* reset fps_divider */
+			imx074_ctrl->fps = 30 * Q8;
+			imx074_ctrl->fps_divider = 1 * 0x400;
+			/* stop streaming */
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE);
+			if (rc < 0)
+				return rc;
+			msleep(imx074_delay_msecs_stdby);
+			rc = imx074_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+			rc = imx074_i2c_write_w_table(&init_mode_tbl[0],
+				ARRAY_SIZE(init_mode_tbl));
+			if (rc < 0)
+				return rc;
+			rc = imx074_test(imx074_ctrl->set_test);
+			return rc;
+		}
+		break;
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct imx074_i2c_reg_conf mode_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD},
+				{REG_PLL_MULTIPLIER,
+					imx074_regs.reg_pat[rt].
+					pll_multiplier},
+				{REG_FRAME_LENGTH_LINES_HI,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					imx074_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_YADDR_START ,
+					imx074_regs.reg_pat[rt].
+					y_addr_start},
+				{REG_YAAAR_END,
+					imx074_regs.reg_pat[rt].
+					y_add_end},
+				{REG_X_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_msb},
+				{REG_X_OUTPUT_SIZE_LSB,
+					imx074_regs.reg_pat[rt].
+					x_output_size_lsb},
+				{REG_Y_OUTPUT_SIZE_MSB,
+					imx074_regs.reg_pat[rt].
+					y_output_size_msb},
+				{REG_Y_OUTPUT_SIZE_LSB ,
+					imx074_regs.reg_pat[rt].
+					y_output_size_lsb},
+				{REG_X_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					x_even_inc},
+				{REG_X_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					x_odd_inc},
+				{REG_Y_EVEN_INC,
+					imx074_regs.reg_pat[rt].
+					y_even_inc},
+				{REG_Y_ODD_INC,
+					imx074_regs.reg_pat[rt].
+					y_odd_inc},
+				{REG_HMODEADD,
+					imx074_regs.reg_pat[rt].
+					hmodeadd},
+				{REG_VMODEADD,
+					imx074_regs.reg_pat[rt].
+					vmodeadd},
+				{REG_VAPPLINE_START,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_start},
+				{REG_VAPPLINE_END,
+					imx074_regs.reg_pat[rt].
+					vapplinepos_end},
+				{REG_SHUTTER,
+					imx074_regs.reg_pat[rt].
+					shutter},
+				{REG_HADDAVE,
+					imx074_regs.reg_pat[rt].
+					haddave},
+				{REG_LANESEL,
+					imx074_regs.reg_pat[rt].
+					lanesel},
+				{REG_GROUPED_PARAMETER_HOLD,
+					GROUPED_PARAMETER_HOLD_OFF},
+			};
+
+			/* stop streaming */
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE);
+			msleep(imx074_delay_msecs_stdby);
+			rc = imx074_i2c_write_w_table(&mode_tbl[0],
+				ARRAY_SIZE(mode_tbl));
+			if (config_csi == 0) {
+				struct msm_camera_csid_vc_cfg imx074_vccfg[] = {
+					{0, CSI_RAW10, CSI_DECODE_10BIT},
+					{1, CSI_EMBED_DATA, CSI_DECODE_8BIT},
+				};
+				imx074_csid_params.lane_cnt = 4;
+				imx074_csid_params.lane_assign = 0xe4;
+				imx074_csid_params.lut_params.num_cid =
+					ARRAY_SIZE(imx074_vccfg);
+				imx074_csid_params.lut_params.vc_cfg =
+					&imx074_vccfg[0];
+				imx074_csiphy_params.lane_cnt = 4;
+				imx074_csiphy_params.settle_cnt = 0x1B;
+				rc = msm_camio_csid_config(&imx074_csid_params);
+				v4l2_subdev_notify(imx074_ctrl->sensor_dev,
+						NOTIFY_CID_CHANGE, NULL);
+				mb();
+				rc = msm_camio_csiphy_config
+					(&imx074_csiphy_params);
+				mb();
+				/*imx074_delay_msecs_stdby*/
+				msleep(imx074_delay_msecs_stream);
+				config_csi = 1;
+			}
+			if (rc < 0)
+				return rc;
+			rc = imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM);
+			if (rc < 0)
+				return rc;
+			msleep(imx074_delay_msecs_stream);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+
+static int32_t imx074_video_config(int mode)
+{
+
+	int32_t	rc = 0;
+	int	rt;
+	/* change sensor resolution	if needed */
+	if (imx074_ctrl->prev_res == QTR_SIZE)
+		rt = RES_PREVIEW;
+	else
+		rt = RES_CAPTURE;
+
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->prev_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t imx074_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt = RES_PREVIEW; /* TODO: Used without initialization, guessing. */
+	/* change sensor resolution if needed */
+	if (imx074_ctrl->curr_res != imx074_ctrl->pict_res) {
+		if (imx074_ctrl->pict_res == QTR_SIZE)
+			rt = RES_PREVIEW;
+		else
+			rt = RES_CAPTURE;
+	}
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->pict_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+static int32_t imx074_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt = RES_PREVIEW; /* TODO: Used without initialization, guessing. */
+	/* change sensor resolution if needed */
+	if (imx074_ctrl->curr_res != imx074_ctrl->pict_res) {
+		if (imx074_ctrl->pict_res == QTR_SIZE)
+			rt = RES_PREVIEW;
+		else
+			rt = RES_CAPTURE;
+	}
+	if (imx074_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	imx074_ctrl->curr_res = imx074_ctrl->pict_res;
+	imx074_ctrl->sensormode = mode;
+	return rc;
+}
+static int32_t imx074_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = imx074_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = imx074_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = imx074_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+static int32_t imx074_power_down(void)
+{
+	imx074_i2c_write_b_sensor(REG_MODE_SELECT,
+		MODE_SELECT_STANDBY_MODE);
+	msleep(imx074_delay_msecs_stdby);
+	return 0;
+}
+static int imx074_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	gpio_direction_input(data->sensor_reset);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int imx074_read_eeprom_data(struct sensor_cfg_data *cfg)
+{
+	int32_t rc = 0;
+	uint16_t eepromdata = 0;
+	uint8_t addr = 0;
+
+	addr = 0x10;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.r_over_g = eepromdata;
+
+	addr = 0x12;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.b_over_g = eepromdata;
+
+	addr = 0x14;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.gr_over_gb = eepromdata;
+
+	addr = 0x1A;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.macro_2_inf = eepromdata;
+
+	addr = 0x1C;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.inf_2_macro = eepromdata;
+
+	addr = 0x1E;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.stroke_amt = eepromdata;
+
+	addr = 0x20;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.af_pos_1m = eepromdata;
+
+	addr = 0x22;
+	rc = imx074_i2c_read_w_eeprom(addr, &eepromdata);
+	if (rc < 0) {
+		CDBG("%s: Error Reading EEPROM @ 0x%x\n", __func__, addr);
+		return rc;
+	}
+	cfg->cfg.calib_info.af_pos_inf = eepromdata;
+
+	return rc;
+}
+
+static int imx074_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	unsigned short chipidl, chipidh;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "imx074");
+	CDBG("imx074_probe_init_sensor\n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		usleep_range(5000, 6000);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		usleep_range(5000, 6000);
+	} else {
+		CDBG("gpio reset fail");
+		goto init_probe_done;
+	}
+	CDBG("imx074_probe_init_sensor is called\n");
+	/* 3. Read sensor Model ID: */
+	rc = imx074_i2c_read(0x0000, &chipidh, 1);
+	if (rc < 0) {
+		CDBG("Model read failed\n");
+		goto init_probe_fail;
+	}
+	rc = imx074_i2c_read(0x0001, &chipidl, 1);
+	if (rc < 0) {
+		CDBG("Model read failed\n");
+		goto init_probe_fail;
+	}
+	CDBG("imx074 model_id = 0x%x  0x%x\n", chipidh, chipidl);
+	/* 4. Compare sensor ID to IMX074 ID: */
+	if (chipidh != 0x00 || chipidl != 0x74) {
+		rc = -ENODEV;
+		CDBG("imx074_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	CDBG("imx074_probe_init_sensor fails\n");
+	imx074_probe_init_done(data);
+init_probe_done:
+	CDBG(" imx074_probe_init_sensor finishes\n");
+	return rc;
+	}
+static int32_t imx074_poweron_af(void)
+{
+	int32_t rc = 0;
+	CDBG("imx074 enable AF actuator, gpio = %d\n",
+			imx074_ctrl->sensordata->vcm_pwd);
+	rc = gpio_request(imx074_ctrl->sensordata->vcm_pwd, "imx074");
+	if (!rc) {
+		gpio_direction_output(imx074_ctrl->sensordata->vcm_pwd, 1);
+		msleep(20);
+		rc = imx074_af_init();
+		if (rc < 0)
+			CDBG("imx074 AF initialisation failed\n");
+	} else {
+		CDBG("%s: AF PowerON gpio_request failed %d\n", __func__, rc);
+	 }
+	return rc;
+}
+static void imx074_poweroff_af(void)
+{
+	gpio_set_value_cansleep(imx074_ctrl->sensordata->vcm_pwd, 0);
+	gpio_free(imx074_ctrl->sensordata->vcm_pwd);
+}
+/* camsensor_iu060f_imx074_reset */
+int imx074_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling imx074_sensor_open_init\n");
+	imx074_ctrl->fps_divider = 1 * 0x00000400;
+	imx074_ctrl->pict_fps_divider = 1 * 0x00000400;
+	imx074_ctrl->fps = 30 * Q8;
+	imx074_ctrl->set_test = TEST_OFF;
+	imx074_ctrl->prev_res = QTR_SIZE;
+	imx074_ctrl->pict_res = FULL_SIZE;
+	imx074_ctrl->curr_res = INVALID_SIZE;
+	config_csi = 0;
+
+	if (data)
+		imx074_ctrl->sensordata = data;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(IMX074_DEFAULT_MASTER_CLK_RATE);
+	usleep_range(1000, 2000);
+	rc = imx074_probe_init_sensor(data);
+	if (rc < 0) {
+		CDBG("Calling imx074_sensor_open_init fail\n");
+		goto probe_fail;
+	}
+
+	rc = imx074_sensor_setting(REG_INIT, RES_PREVIEW);
+	if (rc < 0) {
+		CDBG("imx074_sensor_setting failed\n");
+		goto init_fail;
+	}
+	if (machine_is_msm8x60_fluid())
+		rc = imx074_poweron_af();
+	else
+		rc = imx074_af_init();
+	if (rc < 0) {
+		CDBG("AF initialisation failed\n");
+		goto init_fail;
+	} else
+		goto init_done;
+probe_fail:
+	CDBG(" imx074_sensor_open_init probe fail\n");
+	kfree(imx074_ctrl);
+	return rc;
+init_fail:
+	CDBG(" imx074_sensor_open_init fail\n");
+	imx074_probe_init_done(data);
+	kfree(imx074_ctrl);
+init_done:
+	CDBG("imx074_sensor_open_init done\n");
+	return rc;
+}
+static int imx074_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&imx074_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id imx074_i2c_id[] = {
+	{"imx074", 0},
+	{ }
+};
+
+static int imx074_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("imx074_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	imx074_sensorw = kzalloc(sizeof(struct imx074_work_t), GFP_KERNEL);
+	if (!imx074_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, imx074_sensorw);
+	imx074_init_client(client);
+	imx074_client = client;
+
+
+	CDBG("imx074_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("imx074_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __exit imx074_remove(struct i2c_client *client)
+{
+	struct imx074_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	imx074_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver imx074_i2c_driver = {
+	.id_table = imx074_i2c_id,
+	.probe  = imx074_i2c_probe,
+	.remove = __exit_p(imx074_i2c_remove),
+	.driver = {
+		.name = "imx074",
+	},
+};
+
+int imx074_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&imx074_mut);
+	CDBG("imx074_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		imx074_get_pict_fps(
+			cdata.cfg.gfps.prevfps,
+			&(cdata.cfg.gfps.pictfps));
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf =
+			imx074_get_prev_lines_pf();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl =
+			imx074_get_prev_pixels_pl();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf =
+			imx074_get_pict_lines_pf();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			imx074_get_pict_pixels_pl();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			imx074_get_pict_max_exp_lc();
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = imx074_set_fps(&(cdata.cfg.fps));
+		break;
+	case CFG_SET_EXP_GAIN:
+		rc =
+			imx074_write_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+			break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc =
+			imx074_set_pict_exp_gain(
+			cdata.cfg.exp_gain.gain,
+			cdata.cfg.exp_gain.line);
+			break;
+	case CFG_SET_MODE:
+		rc = imx074_set_sensor_mode(cdata.mode,
+			cdata.rs);
+			break;
+	case CFG_PWR_DOWN:
+		rc = imx074_power_down();
+			break;
+	case CFG_GET_CALIB_DATA:
+		rc = imx074_read_eeprom_data(&cdata);
+		if (rc < 0)
+			break;
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(cdata)))
+			rc = -EFAULT;
+		break;
+	case CFG_MOVE_FOCUS:
+		rc =
+			imx074_move_focus(
+			cdata.cfg.focus.dir,
+			cdata.cfg.focus.steps);
+			break;
+	case CFG_SET_DEFAULT_FOCUS:
+		rc =
+			imx074_set_default_focus(
+			cdata.cfg.focus.steps);
+			break;
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = IMX074_STEPS_NEAR_TO_CLOSEST_INF;
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&imx074_mut);
+
+	return rc;
+}
+static int imx074_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&imx074_mut);
+	if (machine_is_msm8x60_fluid())
+		imx074_poweroff_af();
+	imx074_power_down();
+	gpio_set_value_cansleep(imx074_ctrl->sensordata->sensor_reset, 0);
+	usleep_range(5000, 6000);
+	gpio_direction_input(imx074_ctrl->sensordata->sensor_reset);
+	gpio_free(imx074_ctrl->sensordata->sensor_reset);
+	CDBG("imx074_release completed\n");
+	mutex_unlock(&imx074_mut);
+
+	return rc;
+}
+
+static int imx074_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&imx074_i2c_driver);
+	if (rc < 0 || imx074_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(IMX074_DEFAULT_MASTER_CLK_RATE);
+	rc = imx074_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = imx074_sensor_open_init;
+	s->s_release = imx074_sensor_release;
+	s->s_config  = imx074_sensor_config;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+	imx074_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("imx074_sensor_probe: SENSOR PROBE FAILS!\n");
+	i2c_del_driver(&imx074_i2c_driver);
+	return rc;
+}
+
+static struct imx074_format imx074_subdev_info[] = {
+	{
+	.code   = V4L2_MBUS_FMT_SBGGR10_1X10,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.fmt    = 1,
+	.order    = 0,
+	},
+	/* more can be supported, to be added later */
+};
+
+static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+			   enum v4l2_mbus_pixelcode *code)
+{
+	printk(KERN_DEBUG "Index is %d\n", index);
+	if ((unsigned int)index >= ARRAY_SIZE(imx074_subdev_info))
+		return -EINVAL;
+
+	*code = imx074_subdev_info[index].code;
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops imx074_subdev_core_ops;
+static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
+	.enum_mbus_fmt = imx074_enum_fmt,
+};
+
+static struct v4l2_subdev_ops imx074_subdev_ops = {
+	.core = &imx074_subdev_core_ops,
+	.video  = &imx074_subdev_video_ops,
+};
+
+
+static int imx074_sensor_probe_cb(const struct msm_camera_sensor_info *info,
+	struct v4l2_subdev *sdev, struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = imx074_sensor_probe(info, s);
+	if (rc < 0)
+		return rc;
+
+	imx074_ctrl = kzalloc(sizeof(struct imx074_ctrl_t), GFP_KERNEL);
+	if (!imx074_ctrl) {
+		CDBG("imx074_sensor_probe failed!\n");
+		return -ENOMEM;
+	}
+
+	/* probe is successful, init a v4l2 subdevice */
+	printk(KERN_DEBUG "going into v4l2_i2c_subdev_init\n");
+	if (sdev) {
+		v4l2_i2c_subdev_init(sdev, imx074_client,
+						&imx074_subdev_ops);
+		imx074_ctrl->sensor_dev = sdev;
+	}
+	return rc;
+}
+
+static int __imx074_probe(struct platform_device *pdev)
+{
+	return msm_sensor_register(pdev, imx074_sensor_probe_cb);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __imx074_probe,
+	.driver = {
+		.name = "msm_camera_imx074",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init imx074_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(imx074_init);
+
+MODULE_DESCRIPTION("Sony 13 MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
new file mode 100644
index 0000000..da28403
--- /dev/null
+++ b/drivers/media/video/msm/msm.c
@@ -0,0 +1,2080 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include "msm.h"
+
+
+#define MSM_MAX_CAMERA_SENSORS 5
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("msm: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+static unsigned msm_camera_v4l2_nr = -1;
+static struct msm_cam_server_dev g_server_dev;
+static struct class *msm_class;
+static dev_t msm_devno;
+static int vnode_count;
+
+module_param(msm_camera_v4l2_nr, uint, 0644);
+MODULE_PARM_DESC(msm_camera_v4l2_nr, "videoX start number, -1 is autodetect");
+
+static int msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
+				  struct video_device *pvdev);
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+	D("%s\n", __func__);
+	spin_lock_init(&queue->lock);
+	queue->len = 0;
+	queue->max = 0;
+	queue->name = name;
+	INIT_LIST_HEAD(&queue->list);
+	init_waitqueue_head(&queue->wait);
+}
+
+static void msm_enqueue(struct msm_device_queue *queue,
+			struct list_head *entry)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&queue->lock, flags);
+	queue->len++;
+	if (queue->len > queue->max) {
+		queue->max = queue->len;
+		pr_info("%s: queue %s new max is %d\n", __func__,
+			queue->name, queue->max);
+	}
+	list_add_tail(entry, &queue->list);
+	wake_up(&queue->wait);
+	D("%s: woke up %s\n", __func__, queue->name);
+	spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+/* callback function from all subdevices of a msm_cam_v4l2_device */
+static void msm_cam_v4l2_subdev_notify(struct v4l2_subdev *sd,
+				    unsigned int notification, void *arg)
+{
+	struct msm_cam_v4l2_device *pcam;
+
+	if (sd == NULL)
+		return;
+
+	pcam = to_pcam(sd->v4l2_dev);
+
+	if (pcam == NULL)
+		return;
+
+	/* forward to media controller for any changes*/
+	if (pcam->mctl.mctl_notify) {
+		pcam->mctl.mctl_notify(&pcam->mctl, notification, arg);
+	}
+}
+
+static int msm_ctrl_cmd_done(void __user *arg)
+{
+	void __user *uptr;
+	struct msm_queue_cmd *qcmd;
+	struct msm_ctrl_cmd *command = &g_server_dev.ctrl;
+
+	D("%s\n", __func__);
+
+	if (copy_from_user(command, arg,
+			sizeof(struct msm_ctrl_cmd)))
+		return -EINVAL;
+
+	qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+	atomic_set(&qcmd->on_heap, 0);
+	uptr = command->value;
+	qcmd->command = command;
+
+	if (command->length > 0) {
+		command->value = g_server_dev.ctrl_data;
+		if (command->length > sizeof(g_server_dev.ctrl_data)) {
+			pr_err("%s: user data %d is too big (max %d)\n",
+				__func__, command->length,
+				sizeof(g_server_dev.ctrl_data));
+			return -EINVAL;
+		}
+		if (copy_from_user(command->value, uptr, command->length))
+			return -EINVAL;
+	}
+
+	msm_enqueue(&g_server_dev.ctrl_q, &qcmd->list_control);
+	return 0;
+}
+
+/* send control command to config and wait for results*/
+static int msm_server_control(struct msm_cam_server_dev *server_dev,
+				struct msm_ctrl_cmd *out)
+{
+	int rc = 0;
+	void *value;
+	struct msm_queue_cmd *rcmd;
+	struct msm_ctrl_cmd *ctrlcmd;
+	struct msm_device_queue *queue =  &server_dev->ctrl_q;
+
+	struct v4l2_event v4l2_evt;
+	struct msm_isp_stats_event_ctrl *isp_event;
+	D("%s\n", __func__);
+
+	v4l2_evt.type = V4L2_EVENT_PRIVATE_START + MSM_CAM_RESP_V4L2;
+
+	/* setup event object to transfer the command; */
+	isp_event = (struct msm_isp_stats_event_ctrl *)v4l2_evt.u.data;
+	isp_event->resptype = MSM_CAM_RESP_V4L2;
+	isp_event->isp_data.ctrl = *out;
+
+	/* now send command to config thread in usersspace,
+	 * and wait for results */
+	v4l2_event_queue(server_dev->server_command_queue.pvdev,
+					  &v4l2_evt);
+
+	D("%s v4l2_event_queue: type = 0x%x\n", __func__, v4l2_evt.type);
+
+	/* wait for config return status */
+	D("Waiting for config status\n");
+	rc = wait_event_interruptible_timeout(queue->wait,
+		!list_empty_careful(&queue->list),
+		out->timeout_ms);
+	D("Waiting is over for config status\n");
+	if (list_empty_careful(&queue->list)) {
+		if (!rc)
+			rc = -ETIMEDOUT;
+		if (rc < 0) {
+			pr_err("%s: wait_event error %d\n", __func__, rc);
+			return rc;
+		}
+	}
+
+	rcmd = msm_dequeue(queue, list_control);
+	BUG_ON(!rcmd);
+	D("%s Finished servicing ioctl\n", __func__);
+
+	ctrlcmd = (struct msm_ctrl_cmd *)(rcmd->command);
+	value = out->value;
+	if (ctrlcmd->length > 0)
+		memcpy(value, ctrlcmd->value, ctrlcmd->length);
+
+	memcpy(out, ctrlcmd, sizeof(struct msm_ctrl_cmd));
+	out->value = value;
+
+	free_qcmd(rcmd);
+	D("%s: rc %d\n", __func__, rc);
+	/* rc is the time elapsed. */
+	if (rc >= 0) {
+		/* TODO: Refactor msm_ctrl_cmd::status field */
+		if (out->status == 0)
+			rc = -1;
+		else if (out->status == 1)
+			rc = 0;
+		else
+			rc = -EINVAL;
+	}
+	return rc;
+}
+
+/*send open command to server*/
+static int msm_send_open_server(void)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	D("%s\n", __func__);
+	ctrlcmd.type	   = MSM_V4L2_OPEN;
+	ctrlcmd.timeout_ms = 10000;
+	ctrlcmd.length	 = 0;
+	ctrlcmd.value    = NULL;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	return rc;
+}
+
+static int msm_send_close_server(void)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	D("%s\n", __func__);
+	ctrlcmd.type	   = MSM_V4L2_CLOSE;
+	ctrlcmd.timeout_ms = 10000;
+	ctrlcmd.length	 = 0;
+	ctrlcmd.value    = NULL;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	return rc;
+}
+
+static int msm_server_set_fmt(struct msm_cam_v4l2_device *pcam, int idx,
+				 struct v4l2_format *pfmt)
+{
+	int rc = 0;
+	int i = 0;
+	struct v4l2_pix_format *pix = &pfmt->fmt.pix;
+	struct msm_ctrl_cmd ctrlcmd;
+
+	D("%s: %d, %d, 0x%x\n", __func__,
+		pfmt->fmt.pix.width, pfmt->fmt.pix.height,
+		pfmt->fmt.pix.pixelformat);
+
+	if (pfmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		D("%s, Attention! Wrong buf-type %d\n", __func__, pfmt->type);
+
+	for (i = 0; i < pcam->num_fmts; i++)
+		if (pcam->usr_fmts[i].fourcc == pix->pixelformat)
+			break;
+	if (i == pcam->num_fmts) {
+		pr_err("%s: User requested pixelformat %x not supported\n",
+						__func__, pix->pixelformat);
+		return -EINVAL;
+	}
+
+	ctrlcmd.type       = MSM_V4L2_VID_CAP_TYPE;
+	ctrlcmd.length     = MSM_V4L2_DIMENSION_SIZE;
+	ctrlcmd.value      = (void *)pfmt->fmt.pix.priv;
+	ctrlcmd.timeout_ms = 10000;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	if (rc >= 0) {
+		pcam->dev_inst[idx]->vid_fmt.fmt.pix.width    = pix->width;
+		pcam->dev_inst[idx]->vid_fmt.fmt.pix.height   = pix->height;
+		pcam->dev_inst[idx]->vid_fmt.fmt.pix.field    = pix->field;
+		pcam->dev_inst[idx]->vid_fmt.fmt.pix.pixelformat =
+							pix->pixelformat;
+		pcam->dev_inst[idx]->vid_fmt.fmt.pix.bytesperline =
+							pix->bytesperline;
+		pcam->dev_inst[idx]->vid_bufq.field   = pix->field;
+		pcam->dev_inst[idx]->sensor_pxlcode
+					= pcam->usr_fmts[i].pxlcode;
+		D("%s:inst=0x%x,idx=%d,width=%d,heigth=%d\n",
+			 __func__, (u32)pcam->dev_inst[idx], idx,
+			 pcam->dev_inst[idx]->vid_fmt.fmt.pix.width,
+			 pcam->dev_inst[idx]->vid_fmt.fmt.pix.height);
+	}
+
+	return rc;
+}
+
+static int msm_server_streamon(struct msm_cam_v4l2_device *pcam, int idx)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	D("%s\n", __func__);
+	ctrlcmd.type	   = MSM_V4L2_STREAM_ON;
+	ctrlcmd.timeout_ms = 10000;
+	ctrlcmd.length	 = 0;
+	ctrlcmd.value    = NULL;
+	ctrlcmd.stream_type = pcam->dev_inst[idx]->image_mode;
+	ctrlcmd.vnode_id = pcam->vnode_id;
+
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	return rc;
+}
+
+static int msm_server_streamoff(struct msm_cam_v4l2_device *pcam, int idx)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+
+	D("%s, pcam = 0x%x\n", __func__, (u32)pcam);
+	ctrlcmd.type	   = MSM_V4L2_STREAM_OFF;
+	ctrlcmd.timeout_ms = 10000;
+	ctrlcmd.length	 = 0;
+	ctrlcmd.value    = NULL;
+	ctrlcmd.stream_type = pcam->dev_inst[idx]->image_mode;
+	ctrlcmd.vnode_id = pcam->vnode_id;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	return rc;
+}
+
+static int msm_server_proc_ctrl_cmd(struct msm_cam_v4l2_device *pcam,
+				 struct v4l2_control *ctrl, int is_set_cmd)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd, *tmp_cmd;
+	uint8_t *ctrl_data = NULL;
+	void __user *uptr_cmd;
+	void __user *uptr_value;
+	uint32_t cmd_len = sizeof(struct msm_ctrl_cmd);
+	uint32_t value_len;
+
+	tmp_cmd = (struct msm_ctrl_cmd *)ctrl->value;
+	uptr_cmd = (void __user *)ctrl->value;
+	uptr_value = (void __user *)tmp_cmd->value;
+	value_len = tmp_cmd->length;
+
+	D("%s: cmd type = %d, up1=0x%x, ulen1=%d, up2=0x%x, ulen2=%d\n",
+		__func__, tmp_cmd->type, (uint32_t)uptr_cmd, cmd_len,
+		(uint32_t)uptr_value, tmp_cmd->length);
+
+	ctrl_data = kzalloc(value_len+cmd_len, GFP_KERNEL);
+	if (ctrl_data == 0) {
+		pr_err("%s could not allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+	tmp_cmd = (struct msm_ctrl_cmd *)ctrl_data;
+	if (copy_from_user((void *)ctrl_data, uptr_cmd,
+					cmd_len)) {
+		pr_err("%s: copy_from_user failed.\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+	tmp_cmd->value = (void *)(ctrl_data+cmd_len);
+	if (uptr_value && tmp_cmd->length > 0) {
+		if (copy_from_user((void *)tmp_cmd->value, uptr_value,
+						value_len)) {
+			pr_err("%s: copy_from_user failed, size=%d\n",
+				__func__, value_len);
+			rc = -EINVAL;
+			goto end;
+		}
+	} else
+	tmp_cmd->value = NULL;
+
+	ctrlcmd.type = MSM_V4L2_SET_CTRL_CMD;
+	ctrlcmd.length = cmd_len + value_len;
+	ctrlcmd.value = (void *)ctrl_data;
+	ctrlcmd.timeout_ms = 1000;
+	ctrlcmd.vnode_id = pcam->vnode_id;
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+	pr_err("%s: msm_server_control rc=%d\n", __func__, rc);
+	if (rc == 0) {
+		if (uptr_value && tmp_cmd->length > 0 &&
+			copy_to_user((void __user *)uptr_value,
+				(void *)(ctrl_data+cmd_len), tmp_cmd->length)) {
+			pr_err("%s: copy_to_user failed, size=%d\n",
+				__func__, tmp_cmd->length);
+			rc = -EINVAL;
+			goto end;
+		}
+		tmp_cmd->value = uptr_value;
+		if (copy_to_user((void __user *)uptr_cmd,
+			(void *)tmp_cmd, cmd_len)) {
+			pr_err("%s: copy_to_user failed in cpy, size=%d\n",
+				__func__, cmd_len);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+end:
+	pr_err("%s: END, type = %d, vaddr = 0x%x, vlen = %d, status = %d, rc = %d\n",
+		__func__, tmp_cmd->type, (uint32_t)tmp_cmd->value,
+		tmp_cmd->length, tmp_cmd->status, rc);
+	kfree(ctrl_data);
+	return rc;
+}
+
+static int msm_server_s_ctrl(struct msm_cam_v4l2_device *pcam,
+				 struct v4l2_control *ctrl)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	uint8_t ctrl_data[max_control_command_size];
+
+	WARN_ON(ctrl == NULL);
+
+	if (ctrl && ctrl->id == MSM_V4L2_PID_CTRL_CMD)
+		return msm_server_proc_ctrl_cmd(pcam, ctrl, 1);
+
+	memset(ctrl_data, 0, sizeof(ctrl_data));
+
+	ctrlcmd.type = MSM_V4L2_SET_CTRL;
+	ctrlcmd.length = sizeof(struct v4l2_control);
+	ctrlcmd.value = (void *)ctrl_data;
+	memcpy(ctrlcmd.value, ctrl, ctrlcmd.length);
+	ctrlcmd.timeout_ms = 1000;
+	ctrlcmd.vnode_id = pcam->vnode_id;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	return rc;
+}
+
+static int msm_server_g_ctrl(struct msm_cam_v4l2_device *pcam,
+				 struct v4l2_control *ctrl)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	uint8_t ctrl_data[max_control_command_size];
+
+	WARN_ON(ctrl == NULL);
+	if (ctrl && ctrl->id == MSM_V4L2_PID_CTRL_CMD)
+		return msm_server_proc_ctrl_cmd(pcam, ctrl, 0);
+
+	memset(ctrl_data, 0, sizeof(ctrl_data));
+
+	ctrlcmd.type = MSM_V4L2_GET_CTRL;
+	ctrlcmd.length = sizeof(struct v4l2_control);
+	ctrlcmd.value = (void *)ctrl_data;
+	memcpy(ctrlcmd.value, ctrl, ctrlcmd.length);
+	ctrlcmd.timeout_ms = 1000;
+
+	/* send command to config thread in usersspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+
+	ctrl->value = ((struct v4l2_control *)ctrlcmd.value)->value;
+
+	return rc;
+}
+
+static int msm_server_q_ctrl(struct msm_cam_v4l2_device *pcam,
+			struct v4l2_queryctrl *queryctrl)
+{
+	int rc = 0;
+	struct msm_ctrl_cmd ctrlcmd;
+	uint8_t ctrl_data[max_control_command_size];
+
+	WARN_ON(queryctrl == NULL);
+	memset(ctrl_data, 0, sizeof(ctrl_data));
+
+	ctrlcmd.type = MSM_V4L2_QUERY_CTRL;
+	ctrlcmd.length = sizeof(struct v4l2_queryctrl);
+	ctrlcmd.value = (void *)ctrl_data;
+	memcpy(ctrlcmd.value, queryctrl, ctrlcmd.length);
+	ctrlcmd.timeout_ms = 1000;
+
+	/* send command to config thread in userspace, and get return value */
+	rc = msm_server_control(&g_server_dev, &ctrlcmd);
+	D("%s: rc = %d\n", __func__, rc);
+
+	if (rc >= 0)
+		memcpy(queryctrl, ctrlcmd.value, sizeof(struct v4l2_queryctrl));
+
+	return rc;
+}
+
+static int msm_server_get_fmt(struct msm_cam_v4l2_device *pcam,
+		 int idx, struct v4l2_format *pfmt)
+{
+	struct v4l2_pix_format *pix = &pfmt->fmt.pix;
+
+	pix->width	  = pcam->dev_inst[idx]->vid_fmt.fmt.pix.width;
+	pix->height	 = pcam->dev_inst[idx]->vid_fmt.fmt.pix.height;
+	pix->field	  = pcam->dev_inst[idx]->vid_fmt.fmt.pix.field;
+	pix->pixelformat = pcam->dev_inst[idx]->vid_fmt.fmt.pix.pixelformat;
+	pix->bytesperline = pcam->dev_inst[idx]->vid_fmt.fmt.pix.bytesperline;
+	pix->colorspace	 = pcam->dev_inst[idx]->vid_fmt.fmt.pix.colorspace;
+	if (pix->bytesperline < 0)
+		return pix->bytesperline;
+
+	pix->sizeimage	  = pix->height * pix->bytesperline;
+
+	return 0;
+}
+
+static int msm_server_try_fmt(struct msm_cam_v4l2_device *pcam,
+				 struct v4l2_format *pfmt)
+{
+	int rc = 0;
+	int i = 0;
+	struct v4l2_pix_format *pix = &pfmt->fmt.pix;
+	struct v4l2_mbus_framefmt sensor_fmt;
+
+	D("%s: 0x%x\n", __func__, pix->pixelformat);
+
+	if (pfmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		pr_err("%s: pfmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE!\n",
+							__func__);
+		return -EINVAL;
+	}
+
+	/* check if the format is supported by this host-sensor combo */
+	for (i = 0; i < pcam->num_fmts; i++) {
+		D("%s: usr_fmts.fourcc: 0x%x\n", __func__,
+			pcam->usr_fmts[i].fourcc);
+		if (pcam->usr_fmts[i].fourcc == pix->pixelformat)
+			break;
+	}
+
+	if (i == pcam->num_fmts) {
+		pr_err("%s: Format %x not found\n", __func__, pix->pixelformat);
+		return -EINVAL;
+	}
+
+	sensor_fmt.width  = pix->width;
+	sensor_fmt.height = pix->height;
+	sensor_fmt.field  = pix->field;
+	sensor_fmt.colorspace = pix->colorspace;
+	sensor_fmt.code   = pcam->usr_fmts[i].pxlcode;
+
+	pix->width	= sensor_fmt.width;
+	pix->height   = sensor_fmt.height;
+	pix->field	= sensor_fmt.field;
+	pix->colorspace   = sensor_fmt.colorspace;
+
+	return rc;
+}
+
+/*
+ *
+ * implementation of v4l2_ioctl_ops
+ *
+ */
+static int msm_camera_v4l2_querycap(struct file *f, void *pctx,
+				struct v4l2_capability *pcaps)
+{
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	/* some other day, some other time */
+	/*cap->version = LINUX_VERSION_CODE; */
+	strlcpy(pcaps->driver, pcam->pdev->name, sizeof(pcaps->driver));
+	pcaps->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	return 0;
+}
+
+static int msm_camera_v4l2_queryctrl(struct file *f, void *pctx,
+				struct v4l2_queryctrl *pqctrl)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	mutex_lock(&pcam->vid_lock);
+	rc = msm_server_q_ctrl(pcam, pqctrl);
+	mutex_unlock(&pcam->vid_lock);
+	return rc;
+}
+
+static int msm_camera_v4l2_g_ctrl(struct file *f, void *pctx,
+					struct v4l2_control *c)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	mutex_lock(&pcam->vid_lock);
+	rc = msm_server_g_ctrl(pcam, c);
+	mutex_unlock(&pcam->vid_lock);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_s_ctrl(struct file *f, void *pctx,
+					struct v4l2_control *ctrl)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+	mutex_lock(&pcam->vid_lock);
+	if (ctrl->id == MSM_V4L2_PID_CAM_MODE)
+		pcam->op_mode = ctrl->value;
+	rc = msm_server_s_ctrl(pcam, ctrl);
+	mutex_unlock(&pcam->vid_lock);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_reqbufs(struct file *f, void *pctx,
+				struct v4l2_requestbuffers *pb)
+{
+	int rc = 0;
+	int i = 0;
+	/*struct msm_cam_v4l2_device *pcam  = video_drvdata(f);*/
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	if (!pb->count) {
+		if (pcam_inst->vid_bufq.streaming)
+			videobuf_stop(&pcam_inst->vid_bufq);
+		else
+			videobuf_queue_cancel(&pcam_inst->vid_bufq);
+
+		/* free the queue: function name is ambiguous it frees all
+		types of buffers (mmap or userptr - it doesn't matter) */
+		rc = videobuf_mmap_free(&pcam_inst->vid_bufq);
+	} else {
+		rc = videobuf_reqbufs(&pcam_inst->vid_bufq, pb);
+		if (rc < 0)
+			return rc;
+		/* Now initialize the local msm_frame_buffer structure */
+		for (i = 0; i < pb->count; i++) {
+			struct msm_frame_buffer *buf = container_of(
+						pcam_inst->vid_bufq.bufs[i],
+						struct msm_frame_buffer,
+						vidbuf);
+			buf->inuse = 0;
+			INIT_LIST_HEAD(&buf->vidbuf.queue);
+		}
+	}
+	pcam_inst->buf_count = pb->count;
+	return rc;
+}
+
+static int msm_camera_v4l2_querybuf(struct file *f, void *pctx,
+					struct v4l2_buffer *pb)
+{
+	/* get the video device */
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	return videobuf_querybuf(&pcam_inst->vid_bufq, pb);
+}
+
+static int msm_camera_v4l2_qbuf(struct file *f, void *pctx,
+					struct v4l2_buffer *pb)
+{
+	int rc = 0;
+	/* get the camera device */
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	rc = videobuf_qbuf(&pcam_inst->vid_bufq, pb);
+	D("%s, videobuf_qbuf returns %d\n", __func__, rc);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_dqbuf(struct file *f, void *pctx,
+					struct v4l2_buffer *pb)
+{
+	int rc = 0;
+	/* get the camera device */
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	rc = videobuf_dqbuf(&pcam_inst->vid_bufq, pb, f->f_flags & O_NONBLOCK);
+	D("%s, videobuf_dqbuf returns %d\n", __func__, rc);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_streamon(struct file *f, void *pctx,
+					enum v4l2_buf_type i)
+{
+	int rc = 0;
+	struct videobuf_buffer *buf;
+	int cnt = 0;
+	/* get the camera device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	D("%s Calling videobuf_streamon", __func__);
+	/* if HW streaming on is successful, start buffer streaming */
+	rc = videobuf_streamon(&pcam_inst->vid_bufq);
+	D("%s, videobuf_streamon returns %d\n", __func__, rc);
+
+	mutex_lock(&pcam->vid_lock);
+	/* turn HW (VFE/sensor) streaming */
+	rc = msm_server_streamon(pcam, pcam_inst->my_index);
+	mutex_unlock(&pcam->vid_lock);
+	D("%s rc = %d\n", __func__, rc);
+	if (rc < 0) {
+		pr_err("%s: hw failed to start streaming\n", __func__);
+		return rc;
+	}
+
+	list_for_each_entry(buf, &pcam_inst->vid_bufq.stream, stream) {
+		D("%s index %d, state %d\n", __func__, cnt, buf->state);
+		cnt++;
+	}
+
+	return rc;
+}
+
+static int msm_camera_v4l2_streamoff(struct file *f, void *pctx,
+					enum v4l2_buf_type i)
+{
+	int rc = 0;
+	/* get the camera device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	/* first turn of HW (VFE/sensor) streaming so that buffers are
+	  not in use when we free the buffers */
+	mutex_lock(&pcam->vid_lock);
+	rc = msm_server_streamoff(pcam, pcam_inst->my_index);
+	mutex_unlock(&pcam->vid_lock);
+	if (rc < 0)
+		pr_err("%s: hw failed to stop streaming\n", __func__);
+
+	/* stop buffer streaming */
+	rc = videobuf_streamoff(&pcam_inst->vid_bufq);
+	D("%s, videobuf_streamoff returns %d\n", __func__, rc);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_enum_fmt_cap(struct file *f, void *pctx,
+					struct v4l2_fmtdesc *pfmtdesc)
+{
+	/* get the video device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	const struct msm_isp_color_fmt *isp_fmt;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	if (pfmtdesc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	if (pfmtdesc->index >= pcam->num_fmts)
+		return -EINVAL;
+
+	isp_fmt = &pcam->usr_fmts[pfmtdesc->index];
+
+	if (isp_fmt->name)
+		strlcpy(pfmtdesc->description, isp_fmt->name,
+						sizeof(pfmtdesc->description));
+
+	pfmtdesc->pixelformat = isp_fmt->fourcc;
+
+	D("%s: [%d] 0x%x, %s\n", __func__, pfmtdesc->index,
+		isp_fmt->fourcc, isp_fmt->name);
+	return 0;
+}
+
+static int msm_camera_v4l2_g_fmt_cap(struct file *f,
+		void *pctx, struct v4l2_format *pfmt)
+{
+	int rc = 0;
+	/* get the video device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	if (pfmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	rc = msm_server_get_fmt(pcam, pcam_inst->my_index, pfmt);
+
+	D("%s: current_fmt->fourcc: 0x%08x, rc = %d\n", __func__,
+				pfmt->fmt.pix.pixelformat, rc);
+	return rc;
+}
+
+/* This function will readjust the format parameters based in HW
+  capabilities. Called by s_fmt_cap
+*/
+static int msm_camera_v4l2_try_fmt_cap(struct file *f, void *pctx,
+					struct v4l2_format *pfmt)
+{
+	int rc = 0;
+	/* get the video device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	rc = msm_server_try_fmt(pcam, pfmt);
+	if (rc)
+		pr_err("Format %x not found, rc = %d\n",
+				pfmt->fmt.pix.pixelformat, rc);
+
+	return rc;
+}
+
+/* This function will reconfig the v4l2 driver and HW device, it should be
+   called after the streaming is stopped.
+*/
+static int msm_camera_v4l2_s_fmt_cap(struct file *f, void *pctx,
+					struct v4l2_format *pfmt)
+{
+	int rc;
+	void __user *uptr;
+	/* get the video device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	D("%s, inst=0x%x,idx=%d,priv = 0x%p\n",
+		__func__, (u32)pcam_inst, pcam_inst->my_index,
+		(void *)pfmt->fmt.pix.priv);
+	WARN_ON(pctx != f->private_data);
+
+	uptr = (void __user *)pfmt->fmt.pix.priv;
+	pfmt->fmt.pix.priv = (__u32)kzalloc(MSM_V4L2_DIMENSION_SIZE,
+							GFP_KERNEL);
+
+	if (!pfmt->fmt.pix.priv) {
+		pr_err("%s could not allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+	D("%s Copying priv data:n", __func__);
+	if (copy_from_user((void *)pfmt->fmt.pix.priv, uptr,
+					MSM_V4L2_DIMENSION_SIZE)) {
+		pr_err("%s: copy_from_user failed.\n", __func__);
+		kfree((void *)pfmt->fmt.pix.priv);
+		return -EINVAL;
+	}
+	D("%s Done Copying priv data\n", __func__);
+
+	mutex_lock(&pcam->vid_lock);
+
+	rc = msm_server_set_fmt(pcam, pcam_inst->my_index, pfmt);
+	if (rc < 0) {
+		pr_err("%s: msm_server_set_fmt Error: %d\n",
+				__func__, rc);
+		goto done;
+	}
+
+	if (copy_to_user(uptr, (const void *)pfmt->fmt.pix.priv,
+					MSM_V4L2_DIMENSION_SIZE)) {
+		pr_err("%s: copy_to_user failed\n", __func__);
+		rc = -EINVAL;
+	}
+
+done:
+	kfree((void *)pfmt->fmt.pix.priv);
+	pfmt->fmt.pix.priv = (__u32)uptr;
+
+	mutex_unlock(&pcam->vid_lock);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_g_jpegcomp(struct file *f, void *pctx,
+				struct v4l2_jpegcompression *pcomp)
+{
+	int rc = -EINVAL;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_s_jpegcomp(struct file *f, void *pctx,
+				struct v4l2_jpegcompression *pcomp)
+{
+	int rc = -EINVAL;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	return rc;
+}
+
+
+static int msm_camera_v4l2_g_crop(struct file *f, void *pctx,
+					struct v4l2_crop *a)
+{
+	int rc = -EINVAL;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	return rc;
+}
+
+static int msm_camera_v4l2_s_crop(struct file *f, void *pctx,
+					struct v4l2_crop *a)
+{
+	int rc = -EINVAL;
+
+	D("%s\n", __func__);
+	WARN_ON(pctx != f->private_data);
+
+	return rc;
+}
+
+/* Stream type-dependent parameter ioctls */
+static int msm_camera_v4l2_g_parm(struct file *f, void *pctx,
+				struct v4l2_streamparm *a)
+{
+	int rc = -EINVAL;
+	return rc;
+}
+static int msm_vidbuf_get_path(u32 extendedmode)
+{
+	switch (extendedmode) {
+	case MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL:
+		return OUTPUT_TYPE_T;
+	case MSM_V4L2_EXT_CAPTURE_MODE_MAIN:
+		return OUTPUT_TYPE_S;
+	case MSM_V4L2_EXT_CAPTURE_MODE_VIDEO:
+		return OUTPUT_TYPE_V;
+	case MSM_V4L2_EXT_CAPTURE_MODE_DEFAULT:
+	case MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW:
+	default:
+		return OUTPUT_TYPE_P;
+	}
+}
+
+static int msm_camera_v4l2_s_parm(struct file *f, void *pctx,
+				struct v4l2_streamparm *a)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+	pcam_inst->image_mode = a->parm.capture.extendedmode;
+	pcam_inst->pcam->dev_inst_map[pcam_inst->image_mode] = pcam_inst;
+	pcam_inst->path = msm_vidbuf_get_path(pcam_inst->image_mode);
+	D("%spath=%d,rc=%d\n", __func__,
+		pcam_inst->path, rc);
+	return rc;
+}
+
+static int msm_camera_v4l2_subscribe_event(struct v4l2_fh *fh,
+			struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+
+	D("%s\n", __func__);
+	D("fh = 0x%x\n", (u32)fh);
+
+	/* handle special case where user wants to subscribe to all
+	the events */
+	D("sub->type = 0x%x\n", sub->type);
+
+	if (sub->type == V4L2_EVENT_ALL) {
+		/*sub->type = MSM_ISP_EVENT_START;*/
+		sub->type = V4L2_EVENT_PRIVATE_START + MSM_CAM_RESP_CTRL;
+
+		D("sub->type start = 0x%x\n", sub->type);
+		do {
+			rc = v4l2_event_subscribe(fh, sub);
+			if (rc < 0) {
+				D("%s: failed for evtType = 0x%x, rc = %d\n",
+						__func__, sub->type, rc);
+			/* unsubscribe all events here and return */
+			sub->type = V4L2_EVENT_ALL;
+			v4l2_event_unsubscribe(fh, sub);
+			return rc;
+			} else
+				D("%s: subscribed evtType = 0x%x, rc = %d\n",
+						__func__, sub->type, rc);
+			sub->type++;
+			D("sub->type while = 0x%x\n", sub->type);
+		} while (sub->type !=
+			V4L2_EVENT_PRIVATE_START + MSM_CAM_RESP_MAX);
+	} else {
+		D("sub->type not V4L2_EVENT_ALL = 0x%x\n", sub->type);
+		rc = v4l2_event_subscribe(fh, sub);
+		if (rc < 0)
+			D("%s: failed for evtType = 0x%x, rc = %d\n",
+						__func__, sub->type, rc);
+	}
+
+	D("%s: rc = %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_camera_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+			struct v4l2_event_subscription *sub)
+{
+	int rc = 0;
+
+	D("%s\n", __func__);
+	D("fh = 0x%x\n", (u32)fh);
+
+	rc = v4l2_event_unsubscribe(fh, sub);
+
+	D("%s: rc = %d\n", __func__, rc);
+	return rc;
+}
+
+/* v4l2_ioctl_ops */
+static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
+	.vidioc_querycap = msm_camera_v4l2_querycap,
+
+	.vidioc_s_crop = msm_camera_v4l2_s_crop,
+	.vidioc_g_crop = msm_camera_v4l2_g_crop,
+
+	.vidioc_queryctrl = msm_camera_v4l2_queryctrl,
+	.vidioc_g_ctrl = msm_camera_v4l2_g_ctrl,
+	.vidioc_s_ctrl = msm_camera_v4l2_s_ctrl,
+
+	.vidioc_reqbufs = msm_camera_v4l2_reqbufs,
+	.vidioc_querybuf = msm_camera_v4l2_querybuf,
+	.vidioc_qbuf = msm_camera_v4l2_qbuf,
+	.vidioc_dqbuf = msm_camera_v4l2_dqbuf,
+
+	.vidioc_streamon = msm_camera_v4l2_streamon,
+	.vidioc_streamoff = msm_camera_v4l2_streamoff,
+
+	/* format ioctls */
+	.vidioc_enum_fmt_vid_cap = msm_camera_v4l2_enum_fmt_cap,
+	.vidioc_try_fmt_vid_cap = msm_camera_v4l2_try_fmt_cap,
+	.vidioc_g_fmt_vid_cap = msm_camera_v4l2_g_fmt_cap,
+	.vidioc_s_fmt_vid_cap = msm_camera_v4l2_s_fmt_cap,
+
+	.vidioc_g_jpegcomp = msm_camera_v4l2_g_jpegcomp,
+	.vidioc_s_jpegcomp = msm_camera_v4l2_s_jpegcomp,
+
+	/* Stream type-dependent parameter ioctls */
+	.vidioc_g_parm =  msm_camera_v4l2_g_parm,
+	.vidioc_s_parm =  msm_camera_v4l2_s_parm,
+
+	/* event subscribe/unsubscribe */
+	.vidioc_subscribe_event = msm_camera_v4l2_subscribe_event,
+	.vidioc_unsubscribe_event = msm_camera_v4l2_unsubscribe_event,
+};
+
+/* open an active camera session to manage the streaming logic */
+static int msm_cam_server_open_session(struct msm_cam_server_dev *ps,
+	struct msm_cam_v4l2_device *pcam)
+{
+	int rc = 0;
+	D("%s\n", __func__);
+
+	if (!ps || !pcam) {
+		pr_err("%s NULL pointer passed in!\n", __func__);
+		return rc;
+	}
+
+	/* book keeping this camera session*/
+	ps->pcam_active = pcam;
+	atomic_inc(&ps->number_pcam_active);
+
+	D("config pcam = 0x%p\n", ps->pcam_active);
+
+	/* initialization the media controller module*/
+	msm_mctl_init_module(pcam);
+
+	/*yyan: for single VFE msms (8660, 8960v1), just populate the session
+	with our VFE devices that registered*/
+	pcam->mctl.sensor_sdev = &(pcam->sensor_sdev);
+
+	pcam->mctl.isp_sdev = ps->isp_subdev[0];
+	pcam->mctl.ispif_fns = &ps->ispif_fns;
+
+
+	/*yyan: 8960 bring up - no VPE and flash; populate later*/
+	pcam->mctl.vpe_sdev = NULL;
+	pcam->mctl.flash_sdev = NULL;
+
+	return rc;
+
+}
+
+/* close an active camera session to server */
+static int msm_cam_server_close_session(struct msm_cam_server_dev *ps,
+	struct msm_cam_v4l2_device *pcam)
+{
+	int rc = 0;
+	D("%s\n", __func__);
+
+	if (!ps || !pcam) {
+		D("%s NULL pointer passed in!\n", __func__);
+		return rc;
+	}
+
+
+	atomic_dec(&ps->number_pcam_active);
+	ps->pcam_active = NULL;
+
+	return rc;
+}
+/* v4l2_file_operations */
+static int msm_open(struct file *f)
+{
+	int i;
+	int rc = -EINVAL;
+	/*struct msm_isp_ops *p_isp = 0;*/
+	/* get the video device */
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst;
+
+	D("%s\n", __func__);
+
+	if (!pcam) {
+		pr_err("%s NULL pointer passed in!\n", __func__);
+		return rc;
+	}
+	mutex_lock(&pcam->vid_lock);
+	for (i = 0; i < MSM_DEV_INST_MAX; i++) {
+		if (pcam->dev_inst[i] == NULL) {
+			mutex_unlock(&pcam->vid_lock);
+			break;
+		}
+	}
+	/* if no instance is available, return error */
+	if (i == MSM_DEV_INST_MAX) {
+		mutex_unlock(&pcam->vid_lock);
+		return rc;
+	}
+	pcam_inst = kzalloc(sizeof(struct msm_cam_v4l2_dev_inst), GFP_KERNEL);
+	if (!pcam_inst) {
+		mutex_unlock(&pcam->vid_lock);
+		return rc;
+	}
+	pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
+	pcam_inst->my_index = i;
+	pcam_inst->pcam = pcam;
+	pcam->dev_inst[i] = pcam_inst;
+
+	D("%s for %s\n", __func__, pcam->pdev->name);
+	pcam->use_count++;
+	if (pcam->use_count == 1) {
+
+		rc = msm_cam_server_open_session(&g_server_dev, pcam);
+		if (rc < 0) {
+			pr_err("%s: cam_server_open_session failed %d\n",
+			__func__, rc);
+			mutex_unlock(&pcam->vid_lock);
+			return rc;
+		}
+
+		/* Should be set to sensor ops if any but right now its OK!! */
+		if (!pcam->mctl.mctl_open) {
+			D("%s: media contoller is not inited\n",
+				 __func__);
+			mutex_unlock(&pcam->vid_lock);
+			return -ENODEV;
+		}
+
+		/* Now we really have to activate the camera */
+		D("%s: call mctl_open\n", __func__);
+		rc = pcam->mctl.mctl_open(&(pcam->mctl), MSM_APPS_ID_V4L2);
+
+		if (rc < 0) {
+			mutex_unlock(&pcam->vid_lock);
+			pr_err("%s: HW open failed rc = 0x%x\n",  __func__, rc);
+			return rc;
+		}
+		pcam->mctl.sync.pcam_sync = pcam;
+
+		/* Register isp subdev */
+		rc = v4l2_device_register_subdev(&pcam->v4l2_dev,
+					&pcam->mctl.isp_sdev->sd);
+		if (rc < 0) {
+			mutex_unlock(&pcam->vid_lock);
+			pr_err("%s: v4l2_device_register_subdev failed rc = %d\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+
+	/* Initialize the video queue */
+	rc = pcam->mctl.mctl_vidbuf_init(pcam_inst, &pcam_inst->vid_bufq);
+	if (rc < 0) {
+		mutex_unlock(&pcam->vid_lock);
+		return rc;
+	}
+
+
+	f->private_data = pcam_inst;
+
+	D("f->private_data = 0x%x, pcam = 0x%x\n",
+		(u32)f->private_data, (u32)pcam_inst);
+
+
+	if (pcam->use_count == 1) {
+		rc = msm_send_open_server();
+		if (rc < 0) {
+			mutex_unlock(&pcam->vid_lock);
+			pr_err("%s failed\n", __func__);
+			return rc;
+		}
+	}
+	mutex_unlock(&pcam->vid_lock);
+	/* rc = msm_cam_server_open_session(g_server_dev, pcam);*/
+	return rc;
+}
+
+static int msm_mmap(struct file *f, struct vm_area_struct *vma)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+	rc = videobuf_mmap_mapper(&pcam_inst->vid_bufq, vma);
+
+	D("vma start=0x%08lx, size=%ld, ret=%d\n",
+		(unsigned long)vma->vm_start,
+		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
+		rc);
+
+	return rc;
+}
+
+static int msm_close(struct file *f)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	if (!pcam) {
+		pr_err("%s NULL pointer of camera device!\n", __func__);
+		return -EINVAL;
+	}
+
+
+	mutex_lock(&pcam->vid_lock);
+	pcam->use_count--;
+	pcam->dev_inst_map[pcam_inst->image_mode] = NULL;
+	videobuf_stop(&pcam_inst->vid_bufq);
+	/* free the queue: function name is ambiguous it frees all
+	types of buffers (mmap or userptr - it doesn't matter) */
+	rc = videobuf_mmap_free(&pcam_inst->vid_bufq);
+	if (rc  < 0)
+		pr_err("%s: unable to free buffers\n", __func__);
+	pcam->dev_inst[pcam_inst->my_index] = NULL;
+	kfree(pcam_inst);
+	f->private_data = NULL;
+
+	if (pcam->use_count == 0) {
+		if (pcam->mctl.mctl_release) {
+			rc = pcam->mctl.mctl_release(&(pcam->mctl));
+			if (rc < 0)
+				pr_err("mctl_release fails %d\n", rc);
+		}
+
+		v4l2_device_unregister_subdev(&pcam->mctl.isp_sdev->sd);
+
+		rc = msm_cam_server_close_session(&g_server_dev, pcam);
+		if (rc < 0)
+			pr_err("msm_cam_server_close_session fails %d\n", rc);
+
+		rc = msm_send_close_server();
+		if (rc < 0)
+			pr_err("msm_send_close_server failed %d\n", rc);
+
+		dma_release_declared_memory(&pcam->pdev->dev);
+	}
+	mutex_unlock(&pcam->vid_lock);
+	return rc;
+}
+
+static unsigned int msm_poll(struct file *f, struct poll_table_struct *wait)
+{
+	int rc = 0;
+	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
+	struct msm_cam_v4l2_dev_inst *pcam_inst  = f->private_data;
+
+	D("%s\n", __func__);
+	if (!pcam) {
+		pr_err("%s NULL pointer of camera device!\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!pcam_inst->vid_bufq.streaming) {
+		D("%s vid_bufq.streaming is off, inst=0x%x\n",
+			__func__, (u32)pcam_inst);
+		return -EINVAL;
+	}
+
+	rc |= videobuf_poll_stream(f, &pcam_inst->vid_bufq, wait);
+	D("%s returns, rc  = 0x%x\n", __func__, rc);
+
+	return rc;
+}
+
+static unsigned int msm_poll_server(struct file *fp,
+					struct poll_table_struct *wait)
+{
+	int rc = 0;
+
+	D("%s\n", __func__);
+	poll_wait(fp,
+		 &g_server_dev.server_command_queue.eventHandle.events->wait,
+		 wait);
+	if (v4l2_event_pending(&g_server_dev.server_command_queue.eventHandle))
+		rc |= POLLPRI;
+
+	return rc;
+}
+static long msm_ioctl_server(struct file *fp, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc = -EINVAL;
+	struct v4l2_event ev;
+	struct msm_camera_info temp_cam_info;
+	struct msm_cam_config_dev_info temp_config_info;
+	struct v4l2_event_subscription temp_sub;
+	int i;
+
+	D("%s: cmd %d\n", __func__, _IOC_NR(cmd));
+
+	switch (cmd) {
+	case MSM_CAM_IOCTL_GET_CAMERA_INFO:
+		if (copy_from_user(&temp_cam_info, (void __user *)arg,
+					  sizeof(struct msm_camera_info))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		for (i = 0; i < g_server_dev.camera_info.num_cameras; i++) {
+			if (copy_to_user((void __user *)
+			temp_cam_info.video_dev_name[i],
+			 g_server_dev.camera_info.video_dev_name[i],
+			strlen(g_server_dev.camera_info.video_dev_name[i]))) {
+				rc = -EINVAL;
+				return rc;
+			}
+			temp_cam_info.has_3d_support[i] =
+				g_server_dev.camera_info.has_3d_support[i];
+			temp_cam_info.is_internal_cam[i] =
+				g_server_dev.camera_info.is_internal_cam[i];
+			temp_cam_info.s_mount_angle[i] =
+				g_server_dev.camera_info.s_mount_angle[i];
+			temp_cam_info.sensor_type[i] =
+				g_server_dev.camera_info.sensor_type[i];
+
+		}
+		temp_cam_info.num_cameras =
+			g_server_dev.camera_info.num_cameras;
+		if (copy_to_user((void __user *)arg,
+							  &temp_cam_info,
+				sizeof(struct msm_camera_info))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		rc = 0;
+		break;
+
+	case MSM_CAM_IOCTL_GET_CONFIG_INFO:
+		if (copy_from_user(&temp_config_info, (void __user *)arg,
+				  sizeof(struct msm_cam_config_dev_info))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		for (i = 0;
+		 i < g_server_dev.config_info.num_config_nodes; i++) {
+			if (copy_to_user(
+			(void __user *)temp_config_info.config_dev_name[i],
+			g_server_dev.config_info.config_dev_name[i],
+			strlen(g_server_dev.config_info.config_dev_name[i]))) {
+				rc = -EINVAL;
+				return rc;
+			}
+		}
+		temp_config_info.num_config_nodes =
+			g_server_dev.config_info.num_config_nodes;
+		if (copy_to_user((void __user *)arg,
+							  &temp_config_info,
+				sizeof(struct msm_cam_config_dev_info))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		rc = 0;
+		break;
+
+	case VIDIOC_SUBSCRIBE_EVENT:
+		if (copy_from_user(&temp_sub, (void __user *)arg,
+				  sizeof(struct v4l2_event_subscription))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		rc = msm_camera_v4l2_subscribe_event
+			(&g_server_dev.server_command_queue.eventHandle,
+			 &temp_sub);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	case VIDIOC_DQEVENT: {
+		void __user *u_ctrl_value = NULL;
+		struct msm_isp_stats_event_ctrl *u_isp_event;
+		struct msm_isp_stats_event_ctrl *k_isp_event;
+
+		/* Make a copy of control value and event data pointer */
+		D("%s: VIDIOC_DQEVENT\n", __func__);
+		if (copy_from_user(&ev, (void __user *)arg,
+				sizeof(struct v4l2_event)))
+			break;
+		u_isp_event = (struct msm_isp_stats_event_ctrl *)ev.u.data;
+		u_ctrl_value = u_isp_event->isp_data.ctrl.value;
+
+		rc = v4l2_event_dequeue(
+			&g_server_dev.server_command_queue.eventHandle,
+			 &ev, fp->f_flags & O_NONBLOCK);
+		if (rc < 0) {
+			pr_err("no pending events?");
+			break;
+		}
+
+		k_isp_event = (struct msm_isp_stats_event_ctrl *)ev.u.data;
+		if (ev.type == V4L2_EVENT_PRIVATE_START+MSM_CAM_RESP_V4L2 &&
+				k_isp_event->isp_data.ctrl.length > 0) {
+			void *k_ctrl_value = k_isp_event->isp_data.ctrl.value;
+			if (copy_to_user(u_ctrl_value, k_ctrl_value,
+				u_isp_event->isp_data.ctrl.length)) {
+				rc = -EINVAL;
+				break;
+			}
+		}
+		k_isp_event->isp_data.ctrl.value = u_ctrl_value;
+
+		if (copy_to_user((void __user *)arg, &ev,
+				sizeof(struct v4l2_event))) {
+			rc = -EINVAL;
+			break;
+		}
+		}
+
+		break;
+
+	case MSM_CAM_IOCTL_CTRL_CMD_DONE:
+		D("%s: MSM_CAM_IOCTL_CTRL_CMD_DONE\n", __func__);
+		rc = msm_ctrl_cmd_done((void __user *)arg);
+		break;
+
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int msm_open_server(struct inode *inode, struct file *fp)
+{
+	int rc;
+	D("%s: open %s\n", __func__, fp->f_path.dentry->d_name.name);
+
+	rc = nonseekable_open(inode, fp);
+	if (rc < 0) {
+		pr_err("%s: nonseekable_open error %d\n", __func__, rc);
+		return rc;
+	}
+	g_server_dev.use_count++;
+	if (g_server_dev.use_count == 1)
+		msm_queue_init(&g_server_dev.ctrl_q, "control");
+
+	return rc;
+}
+
+static unsigned int msm_poll_config(struct file *fp,
+					struct poll_table_struct *wait)
+{
+	int rc = 0;
+	struct msm_cam_config_dev *config = fp->private_data;
+	if (config == NULL)
+		return -EINVAL;
+
+	D("%s\n", __func__);
+
+	poll_wait(fp,
+	&config->config_stat_event_queue.eventHandle.events->wait, wait);
+	if (v4l2_event_pending(&config->config_stat_event_queue.eventHandle))
+		rc |= POLLPRI;
+	return rc;
+}
+
+static long msm_ioctl_config(struct file *fp, unsigned int cmd,
+	unsigned long arg)
+{
+
+	int rc = 0;
+	struct v4l2_event ev;
+	struct msm_cam_config_dev *config_cam = fp->private_data;
+	struct v4l2_event_subscription temp_sub;
+
+	D("%s: cmd %d\n", __func__, _IOC_NR(cmd));
+
+	switch (cmd) {
+		/* memory management shall be handeld here*/
+	case MSM_CAM_IOCTL_REGISTER_PMEM:
+		   return msm_register_pmem(
+			&config_cam->p_mctl->sync.pmem_stats,
+			(void __user *)arg);
+		   break;
+
+	case MSM_CAM_IOCTL_UNREGISTER_PMEM:
+		   return msm_pmem_table_del(
+			&config_cam->p_mctl->sync.pmem_stats,
+			(void __user *)arg);
+		   break;
+	case VIDIOC_SUBSCRIBE_EVENT:
+			if (copy_from_user(&temp_sub,
+			(void __user *)arg,
+			sizeof(struct v4l2_event_subscription))) {
+				rc = -EINVAL;
+				return rc;
+			}
+			rc = msm_camera_v4l2_subscribe_event
+			(&config_cam->config_stat_event_queue.eventHandle,
+				 &temp_sub);
+			if (rc < 0)
+				return rc;
+
+			break;
+
+	case VIDIOC_UNSUBSCRIBE_EVENT:
+		if (copy_from_user(&temp_sub, (void __user *)arg,
+			  sizeof(struct v4l2_event_subscription))) {
+			rc = -EINVAL;
+			return rc;
+		}
+		rc = msm_camera_v4l2_unsubscribe_event
+			(&config_cam->config_stat_event_queue.eventHandle,
+			 &temp_sub);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	case VIDIOC_DQEVENT: {
+		void __user *u_msg_value = NULL;
+		struct msm_isp_stats_event_ctrl *u_isp_event;
+		struct msm_isp_stats_event_ctrl *k_isp_event;
+
+		/* Make a copy of control value and event data pointer */
+		D("%s: VIDIOC_DQEVENT\n", __func__);
+		if (copy_from_user(&ev, (void __user *)arg,
+				sizeof(struct v4l2_event)))
+			break;
+		u_isp_event = (struct msm_isp_stats_event_ctrl *)ev.u.data;
+		u_msg_value = u_isp_event->isp_data.isp_msg.data;
+
+		rc = v4l2_event_dequeue(
+		&config_cam->config_stat_event_queue.eventHandle,
+			 &ev, fp->f_flags & O_NONBLOCK);
+		if (rc < 0) {
+			pr_err("no pending events?");
+			break;
+		}
+
+		k_isp_event = (struct msm_isp_stats_event_ctrl *)ev.u.data;
+		if (ev.type ==
+			V4L2_EVENT_PRIVATE_START+MSM_CAM_RESP_STAT_EVT_MSG &&
+			k_isp_event->isp_data.isp_msg.len > 0) {
+			void *k_msg_value = k_isp_event->isp_data.isp_msg.data;
+			if (copy_to_user(u_msg_value, k_msg_value,
+				k_isp_event->isp_data.isp_msg.len)) {
+				rc = -EINVAL;
+				break;
+			}
+			kfree(k_msg_value);
+		}
+		k_isp_event->isp_data.isp_msg.data = u_msg_value;
+
+		if (copy_to_user((void __user *)arg, &ev,
+				sizeof(struct v4l2_event))) {
+			rc = -EINVAL;
+			break;
+		}
+		}
+
+		break;
+
+	default:{
+		/* For the rest of config command, forward to media controller*/
+		struct msm_cam_media_controller *p_mctl = config_cam->p_mctl;
+		if (p_mctl && p_mctl->mctl_cmd) {
+			rc = config_cam->p_mctl->mctl_cmd(p_mctl, cmd, arg);
+		} else {
+			rc = -EINVAL;
+			pr_err("%s: media controller is null\n", __func__);
+		}
+
+		break;
+	} /* end of default*/
+	} /* end of switch*/
+	return rc;
+}
+
+static int msm_open_config(struct inode *inode, struct file *fp)
+{
+	int rc;
+
+	struct msm_cam_config_dev *config_cam =
+	container_of(inode->i_cdev, struct msm_cam_config_dev, config_cdev);
+
+	D("%s: open %s\n", __func__, fp->f_path.dentry->d_name.name);
+
+	rc = nonseekable_open(inode, fp);
+	if (rc < 0) {
+		pr_err("%s: nonseekable_open error %d\n", __func__, rc);
+		return rc;
+	}
+	config_cam->use_count++;
+
+	/*config_cam->isp_subdev = g_server_dev.pcam_active->mctl.isp_sdev;*/
+	/* assume there is only one active camera possible*/
+	config_cam->p_mctl = &g_server_dev.pcam_active->mctl;
+
+	INIT_HLIST_HEAD(&config_cam->p_mctl->sync.pmem_stats);
+	spin_lock_init(&config_cam->p_mctl->sync.pmem_stats_spinlock);
+
+	config_cam->p_mctl->config_device = config_cam;
+	fp->private_data = config_cam;
+	return rc;
+}
+
+static struct v4l2_file_operations g_msm_fops = {
+	.owner   = THIS_MODULE,
+	.open	= msm_open,
+	.poll	= msm_poll,
+	.mmap	= msm_mmap,
+	.release = msm_close,
+	.ioctl   = video_ioctl2,
+};
+
+/* Init a config node for ISP control,
+   which will create a config device (/dev/config0/ and plug in
+   ISP's operation "v4l2_ioctl_ops*"
+*/
+static const struct file_operations msm_fops_server = {
+	.owner = THIS_MODULE,
+	.open  = msm_open_server,
+	.poll  = msm_poll_server,
+	.unlocked_ioctl = msm_ioctl_server,
+};
+
+static const struct file_operations msm_fops_config = {
+	.owner = THIS_MODULE,
+	.open  = msm_open_config,
+	.poll  = msm_poll_config,
+	.unlocked_ioctl = msm_ioctl_config,
+};
+
+static int msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
+				  struct video_device *pvdev)
+{
+	int rc = 0;
+	/* v4l2_fh support */
+	spin_lock_init(&pvdev->fh_lock);
+	INIT_LIST_HEAD(&pvdev->fh_list);
+
+	rc = v4l2_fh_init(eventHandle, pvdev);
+	if (rc < 0)
+		return rc;
+
+	rc = v4l2_event_init(eventHandle);
+	if (rc < 0)
+		return rc;
+
+	/* queue of max size 30 */
+	rc = v4l2_event_alloc(eventHandle, 30);
+	if (rc < 0)
+		return rc;
+
+	v4l2_fh_add(eventHandle);
+	return rc;
+
+}
+
+static int msm_setup_config_dev(int node, char *device_name)
+{
+	int rc = -ENODEV;
+	struct device *device_config;
+	int dev_num = node;
+	dev_t devno;
+	struct msm_cam_config_dev *config_cam;
+
+	config_cam = kzalloc(sizeof(*config_cam), GFP_KERNEL);
+	if (!config_cam) {
+		pr_err("%s: could not allocate memory for msm_cam_config_device\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	D("%s\n", __func__);
+
+	devno = MKDEV(MAJOR(msm_devno), dev_num+1);
+	device_config = device_create(msm_class, NULL, devno, NULL,
+				    "%s%d", device_name, dev_num);
+
+	if (IS_ERR(device_config)) {
+		rc = PTR_ERR(device_config);
+		pr_err("%s: error creating device: %d\n", __func__, rc);
+		return rc;
+	}
+
+	cdev_init(&config_cam->config_cdev,
+			   &msm_fops_config);
+	config_cam->config_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&config_cam->config_cdev, devno, 1);
+	if (rc < 0) {
+		pr_err("%s: error adding cdev: %d\n", __func__, rc);
+		device_destroy(msm_class, devno);
+		return rc;
+	}
+	g_server_dev.config_info.config_dev_name[dev_num]
+		= dev_name(device_config);
+	D("%s Connected config device %s\n", __func__,
+		g_server_dev.config_info.config_dev_name[dev_num]);
+
+	config_cam->config_stat_event_queue.pvdev = video_device_alloc();
+	if (config_cam->config_stat_event_queue.pvdev == NULL) {
+		pr_err("%s: video_device_alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = msm_setup_v4l2_event_queue(
+		&config_cam->config_stat_event_queue.eventHandle,
+		config_cam->config_stat_event_queue.pvdev);
+	if (rc < 0)
+		pr_err("%s failed to initialize event queue\n", __func__);
+
+	return rc;
+}
+
+static int msm_setup_server_dev(int node, char *device_name)
+{
+	int rc = -ENODEV;
+	struct device *device_server;
+	int dev_num = node;
+	dev_t devno;
+
+	D("%s\n", __func__);
+
+	devno = MKDEV(MAJOR(msm_devno), dev_num);
+	device_server = device_create(msm_class, NULL,
+			devno, NULL, "%s", device_name);
+
+	if (IS_ERR(device_server)) {
+		rc = PTR_ERR(device_server);
+		pr_err("%s: error creating device: %d\n", __func__, rc);
+		return rc;
+	}
+
+	cdev_init(&g_server_dev.server_cdev, &msm_fops_server);
+	g_server_dev.server_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&g_server_dev.server_cdev, devno, 1);
+	if (rc < 0) {
+		pr_err("%s: error adding cdev: %d\n", __func__, rc);
+		device_destroy(msm_class, devno);
+		return rc;
+	}
+
+	g_server_dev.pcam_active = NULL;
+	g_server_dev.camera_info.num_cameras = 0;
+	atomic_set(&g_server_dev.number_pcam_active, 0);
+	g_server_dev.ispif_fns.ispif_config = NULL;
+
+	/*initialize fake video device and event queue*/
+
+	g_server_dev.server_command_queue.pvdev = video_device_alloc();
+	if (g_server_dev.server_command_queue.pvdev == NULL) {
+		pr_err("%s: video_device_alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	rc = msm_setup_v4l2_event_queue(
+		&g_server_dev.server_command_queue.eventHandle,
+		g_server_dev.server_command_queue.pvdev);
+	if (rc < 0)
+		pr_err("%s failed to initialize event queue\n", __func__);
+
+	return rc;
+}
+
+static int msm_cam_dev_init(struct msm_cam_v4l2_device *pcam)
+{
+	int rc = -ENOMEM;
+	struct video_device *pvdev = NULL;
+	D("%s\n", __func__);
+
+	/* first register the v4l2 device */
+	pcam->v4l2_dev.dev = &pcam->pdev->dev;
+	rc = v4l2_device_register(pcam->v4l2_dev.dev, &pcam->v4l2_dev);
+	if (rc < 0)
+		return -EINVAL;
+	else
+		pcam->v4l2_dev.notify = msm_cam_v4l2_subdev_notify;
+
+
+	/* now setup video device */
+	pvdev = video_device_alloc();
+	if (pvdev == NULL) {
+		pr_err("%s: video_device_alloc failed\n", __func__);
+		return rc;
+	}
+
+	/* init video device's driver interface */
+	D("sensor name = %s, sizeof(pvdev->name)=%d\n",
+		pcam->pdev->name, sizeof(pvdev->name));
+
+	/* device info - strlcpy is safer than strncpy but
+	   only if architecture supports*/
+	strlcpy(pvdev->name, pcam->pdev->name, sizeof(pvdev->name));
+
+	pvdev->release   = video_device_release;
+	pvdev->fops	  = &g_msm_fops;
+	pvdev->ioctl_ops = &g_msm_ioctl_ops;
+	pvdev->minor	 = -1;
+	pvdev->vfl_type  = 1;
+
+	/* register v4l2 video device to kernel as /dev/videoXX */
+	D("video_register_device\n");
+	rc = video_register_device(pvdev,
+				   VFL_TYPE_GRABBER,
+				   msm_camera_v4l2_nr);
+	if (rc) {
+		pr_err("%s: video_register_device failed\n", __func__);
+		goto reg_fail;
+	}
+	D("%s: video device registered as /dev/video%d\n",
+		__func__, pvdev->num);
+
+	/* connect pcam and video dev to each other */
+	pcam->pvdev	= pvdev;
+	video_set_drvdata(pcam->pvdev, pcam);
+
+	/* If isp HW registeration is successful, then create event queue to
+	   receievent event froms HW
+	 */
+	/* yyan: no global - each sensor will create a new vidoe node! */
+	/* g_pmsm_camera_v4l2_dev = pmsm_camera_v4l2_dev; */
+	/* g_pmsm_camera_v4l2_dev->pvdev = pvdev; */
+
+	return rc ;
+
+reg_fail:
+	video_device_release(pvdev);
+	v4l2_device_unregister(&pcam->v4l2_dev);
+	pcam->v4l2_dev.dev = NULL;
+	return rc;
+}
+
+static int msm_sync_destroy(struct msm_sync *sync)
+{
+	if (sync)
+		wake_lock_destroy(&sync->wake_lock);
+	return 0;
+}
+static int msm_sync_init(struct msm_sync *sync,
+	struct platform_device *pdev, struct msm_sensor_ctrl *sctrl)
+{
+	int rc = 0;
+
+	sync->sdata = pdev->dev.platform_data;
+
+	wake_lock_init(&sync->wake_lock, WAKE_LOCK_IDLE, "msm_camera");
+
+	sync->pdev = pdev;
+	sync->sctrl = *sctrl;
+	sync->opencnt = 0;
+	mutex_init(&sync->lock);
+	D("%s: initialized %s\n", __func__, sync->sdata->sensor_name);
+	return rc;
+}
+
+int msm_ispif_register(struct msm_ispif_fns *ispif)
+{
+	int rc = -EINVAL;
+	if (ispif != NULL) {
+		/*save ispif into server dev*/
+		g_server_dev.ispif_fns.ispif_config = ispif->ispif_config;
+		g_server_dev.ispif_fns.ispif_start_intf_transfer
+			= ispif->ispif_start_intf_transfer;
+		rc = 0;
+	}
+	return rc;
+}
+EXPORT_SYMBOL(msm_ispif_register);
+
+/* register a msm sensor into the msm device, which will probe the
+   sensor HW. if the HW exist then create a video device (/dev/videoX/)
+   to represent this sensor */
+int msm_sensor_register(struct platform_device *pdev,
+		int (*sensor_probe)(const struct msm_camera_sensor_info *,
+			struct v4l2_subdev *, struct msm_sensor_ctrl *))
+{
+
+	int rc = -EINVAL;
+	struct msm_camera_sensor_info *sdata = pdev->dev.platform_data;
+	struct msm_cam_v4l2_device *pcam;
+	struct v4l2_subdev *sdev;
+	struct msm_sensor_ctrl sctrl;
+
+	D("%s for %s\n", __func__, pdev->name);
+
+	/* allocate the memory for the camera device first */
+	pcam = kzalloc(sizeof(*pcam), GFP_KERNEL);
+	if (!pcam) {
+		pr_err("%s: could not allocate memory for msm_cam_v4l2_device\n",
+			__func__);
+		return -ENOMEM;
+	} else{
+		sdev = &(pcam->sensor_sdev);
+		snprintf(sdev->name, sizeof(sdev->name), "%s", pdev->name);
+	}
+
+	/* come sensor probe logic */
+	rc = msm_camio_probe_on(pdev);
+	if (rc < 0) {
+		kzfree(pcam);
+		return rc;
+	}
+
+	rc = sensor_probe(sdata, sdev, &sctrl);
+
+	msm_camio_probe_off(pdev);
+	if (rc < 0) {
+		pr_err("%s: failed to detect %s\n",
+			__func__,
+		sdata->sensor_name);
+		kzfree(pcam);
+		return rc;
+	}
+
+	/* if the probe is successfull, allocat the camera driver object
+	   for this sensor */
+
+	pcam->sync = kzalloc(sizeof(struct msm_sync), GFP_ATOMIC);
+	if (!pcam->sync) {
+		pr_err("%s: could not allocate memory for msm_sync object\n",
+			__func__);
+		kzfree(pcam);
+		return -ENOMEM;
+	}
+
+	/* setup a manager object*/
+	rc = msm_sync_init(pcam->sync, pdev, &sctrl);
+	if (rc < 0)
+		goto failure;
+	D("%s: pcam =0x%p\n", __func__, pcam);
+	D("%s: pcam->sync =0x%p\n", __func__, pcam->sync);
+
+	(pcam->sync)->pcam_sync = pcam;
+	/* bind the driver device to the sensor device */
+	pcam->pdev = pdev;
+	pcam->sctrl = sctrl;
+
+	/* init the user count and lock*/
+	pcam->use_count = 0;
+	mutex_init(&pcam->vid_lock);
+
+	/* Initialize the formats supported */
+	rc  = msm_mctl_init_user_formats(pcam);
+	if (rc < 0)
+		goto failure;
+
+	/* now initialize the camera device object */
+	rc  = msm_cam_dev_init(pcam);
+	if (rc < 0)
+		goto failure;
+
+	g_server_dev.camera_info.video_dev_name
+	[g_server_dev.camera_info.num_cameras]
+	= video_device_node_name(pcam->pvdev);
+	D("%s Connected video device %s\n", __func__,
+	  g_server_dev.camera_info.video_dev_name
+		[g_server_dev.camera_info.num_cameras]);
+	g_server_dev.camera_info.num_cameras++;
+
+	D("%s done, rc = %d\n", __func__, rc);
+	D("%s number of sensors connected is %d\n", __func__,
+			g_server_dev.camera_info.num_cameras);
+/*
+	if (g_server_dev.camera_info.num_cameras == 1) {
+		rc = add_axi_qos();
+		if (rc < 0)
+			goto failure;
+	}
+*/
+	/* register the subdevice, must be done for callbacks */
+	rc = v4l2_device_register_subdev(&pcam->v4l2_dev, sdev);
+	pcam->vnode_id = vnode_count++;
+	return rc;
+
+failure:
+	/* mutex_destroy not needed at this moment as the associated
+	implemenation of mutex_init is not consuming resources */
+	msm_sync_destroy(pcam->sync);
+	pcam->pdev = NULL;
+	kzfree(pcam->sync);
+	kzfree(pcam);
+	return rc;
+}
+EXPORT_SYMBOL(msm_sensor_register);
+
+static int __init msm_camera_init(void)
+{
+	int rc = 0, i;
+	/*for now just create a config 0 node
+	  put logic here later to know how many configs to create*/
+	g_server_dev.config_info.num_config_nodes = 1;
+
+	rc = msm_isp_init_module(g_server_dev.config_info.num_config_nodes);
+	if (rc < 0) {
+		pr_err("Failed to initialize isp\n");
+		return rc;
+	}
+
+	if (!msm_class) {
+		rc = alloc_chrdev_region(&msm_devno, 0,
+		g_server_dev.config_info.num_config_nodes+1, "msm_camera");
+		if (rc < 0) {
+			pr_err("%s: failed to allocate chrdev: %d\n", __func__,
+			rc);
+			return rc;
+		}
+
+		msm_class = class_create(THIS_MODULE, "msm_camera");
+		if (IS_ERR(msm_class)) {
+			rc = PTR_ERR(msm_class);
+			pr_err("%s: create device class failed: %d\n",
+			__func__, rc);
+			return rc;
+		}
+	}
+
+	D("creating server and config nodes\n");
+	rc = msm_setup_server_dev(0, "video_msm");
+	if (rc < 0) {
+		pr_err("%s: failed to create server dev: %d\n", __func__,
+		rc);
+		return rc;
+	}
+
+	for (i = 0; i < g_server_dev.config_info.num_config_nodes; i++) {
+		rc = msm_setup_config_dev(i, "config");
+		if (rc < 0) {
+			pr_err("%s:failed to create config dev: %d\n",
+			 __func__, rc);
+			return rc;
+		}
+	}
+
+	msm_isp_register(&g_server_dev);
+	return rc;
+}
+
+static void __exit msm_camera_exit(void)
+{
+	msm_isp_unregister(&g_server_dev);
+}
+
+module_init(msm_camera_init);
+module_exit(msm_camera_exit);
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
new file mode 100644
index 0000000..0049cc1
--- /dev/null
+++ b/drivers/media/video/msm/msm.h
@@ -0,0 +1,372 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_H
+#define _MSM_H
+
+#ifdef __KERNEL__
+
+/* Header files */
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/pm_qos_params.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-msm-mem.h>
+#include <mach/camera.h>
+
+#define MSM_V4L2_DIMENSION_SIZE 96
+
+#define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
+				__func__, __LINE__, ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+#define ERR_COPY_TO_USER() ERR_USER_COPY(1)
+
+/* msm queue management APIs*/
+
+#define msm_dequeue(queue, member) ({	   \
+	unsigned long flags;		  \
+	struct msm_device_queue *__q = (queue);	 \
+	struct msm_queue_cmd *qcmd = 0;	   \
+	spin_lock_irqsave(&__q->lock, flags);	 \
+	if (!list_empty(&__q->list)) {		\
+		__q->len--;		 \
+		qcmd = list_first_entry(&__q->list,   \
+		struct msm_queue_cmd, member);  \
+		list_del_init(&qcmd->member);	 \
+	}			 \
+	spin_unlock_irqrestore(&__q->lock, flags);  \
+	qcmd;			 \
+})
+
+#define msm_queue_drain(queue, member) do {	 \
+	unsigned long flags;		  \
+	struct msm_device_queue *__q = (queue);	 \
+	struct msm_queue_cmd *qcmd;	   \
+	spin_lock_irqsave(&__q->lock, flags);	 \
+	while (!list_empty(&__q->list)) {	 \
+		qcmd = list_first_entry(&__q->list,   \
+			struct msm_queue_cmd, member);	\
+			list_del_init(&qcmd->member);	 \
+			free_qcmd(qcmd);		\
+	 };			  \
+	spin_unlock_irqrestore(&__q->lock, flags);	\
+} while (0)
+
+static inline void free_qcmd(struct msm_queue_cmd *qcmd)
+{
+	if (!qcmd || !atomic_read(&qcmd->on_heap))
+		return;
+	if (!atomic_sub_return(1, &qcmd->on_heap))
+		kfree(qcmd);
+}
+
+/* message id for v4l2_subdev_notify*/
+enum msm_camera_v4l2_subdev_notify {
+	NOTIFY_CID_CHANGE, /* arg = msm_camera_csid_params */
+	NOTIFY_VFE_MSG_EVT, /* arg = msm_vfe_resp */
+	NOTIFY_INVALID
+};
+
+enum isp_vfe_cmd_id {
+	/*
+	*Important! Command_ID are arranged in order.
+	*Don't change!*/
+	ISP_VFE_CMD_ID_STREAM_ON,
+	ISP_VFE_CMD_ID_STREAM_OFF,
+	ISP_VFE_CMD_ID_FRAME_BUF_RELEASE
+};
+
+struct msm_cam_v4l2_device;
+struct msm_cam_v4l2_dev_inst;
+
+/* buffer for one video frame */
+struct msm_frame_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct videobuf_buffer    vidbuf;
+	enum v4l2_mbus_pixelcode  pxlcode;
+	int                       inuse;
+	int                       active;
+};
+
+struct msm_isp_color_fmt {
+	char *name;
+	int depth;
+	int bitsperpxl;
+	u32 fourcc;
+	enum v4l2_mbus_pixelcode pxlcode;
+	enum v4l2_colorspace colorspace;
+};
+
+enum ispif_op_id {
+	/*
+	*Important! Command_ID are arranged in order.
+	*Don't change!*/
+	ISPIF_ENABLE,
+	ISPIF_DISABLE,
+	ISPIF_RESET,
+	ISPIF_CONFIG
+};
+
+struct msm_ispif_ops {
+
+	int (*ispif_op)(struct msm_ispif_ops *p_ispif,
+		enum ispif_op_id ispif_op_id_used, unsigned long arg);
+};
+
+struct msm_ispif_fns {
+	int (*ispif_config)(struct msm_ispif_params *ispif_params,
+						 uint8_t num_of_intf);
+	int (*ispif_start_intf_transfer)
+		(struct msm_ispif_params *ispif_params);
+};
+
+extern int msm_ispif_init_module(struct msm_ispif_ops *p_ispif);
+
+/*"Media Controller" represents a camera steaming session, which consists
+   of a "sensor" device and an "isp" device (such as VFE, if needed),
+   connected via an "IO" device, (such as IPIF on 8960, or none on 8660)
+   plus other extra sub devices such as VPE and flash
+*/
+
+struct msm_cam_media_controller {
+
+	int (*mctl_open)(struct msm_cam_media_controller *p_mctl,
+					 const char *const apps_id);
+	int (*mctl_cb)(void);
+	int (*mctl_notify)(struct msm_cam_media_controller *p_mctl,
+			unsigned int notification, void *arg);
+	int (*mctl_cmd)(struct msm_cam_media_controller *p_mctl,
+					unsigned int cmd, unsigned long arg);
+	int (*mctl_release)(struct msm_cam_media_controller *p_mctl);
+	int (*mctl_vidbuf_init)(struct msm_cam_v4l2_dev_inst *pcam,
+						struct videobuf_queue *);
+	int (*mctl_ufmt_init)(struct msm_cam_media_controller *p_mctl);
+
+	struct v4l2_device v4l2_dev;
+	struct v4l2_fh  eventHandle; /* event queue to export events */
+	/* most-frequently accessed manager object*/
+	struct msm_sync sync;
+
+
+	/* the following reflect the HW topology information*/
+	/*mandatory*/
+	struct v4l2_subdev *sensor_sdev; /* sensor sub device */
+	struct v4l2_subdev mctl_sdev;   /*  media control sub device */
+	struct platform_device *plat_dev;
+	/*optional*/
+	struct msm_isp_ops *isp_sdev;    /* isp sub device : camif/VFE */
+	struct v4l2_subdev *vpe_sdev;    /* vpe sub device : VPE */
+	struct v4l2_subdev *flash_sdev;    /* vpe sub device : VPE */
+	struct msm_cam_config_dev *config_device;
+	struct msm_ispif_fns *ispif_fns;
+
+	struct pm_qos_request_list pm_qos_req_list;
+};
+
+/* abstract camera device represents a VFE and connected sensor */
+struct msm_isp_ops {
+	char *config_dev_name;
+
+	/*int (*isp_init)(struct msm_cam_v4l2_device *pcam);*/
+	int (*isp_open)(struct v4l2_subdev *sd, struct msm_sync *sync);
+	int (*isp_config)(struct msm_cam_media_controller *pmctl,
+		 unsigned int cmd, unsigned long arg);
+	int (*isp_enqueue)(struct msm_cam_media_controller *pcam,
+		struct msm_vfe_resp *data,
+		enum msm_queue qtype);
+	int (*isp_notify)(struct v4l2_subdev *sd, void *arg);
+
+	void (*isp_release)(struct msm_sync *psync);
+
+	/* vfe subdevice */
+	struct v4l2_subdev sd;
+};
+
+struct msm_isp_buf_info {
+	int type;
+	unsigned long buffer;
+	int fd;
+};
+#define MSM_DEV_INST_MAX                    16
+struct msm_cam_v4l2_dev_inst {
+	struct videobuf_queue vid_bufq;
+	spinlock_t vb_irqlock;
+	struct v4l2_format vid_fmt;
+	/* senssor pixel code*/
+	enum v4l2_mbus_pixelcode sensor_pxlcode;
+	struct msm_cam_v4l2_device *pcam;
+	int my_index;
+	int image_mode;
+	int path;
+	int buf_count;
+};
+#define MSM_MAX_IMG_MODE                5
+/* abstract camera device for each sensor successfully probed*/
+struct msm_cam_v4l2_device {
+	/* standard device interfaces */
+	/* parent of video device to trace back */
+	struct device dev;
+	/* sensor's platform device*/
+	struct platform_device *pdev;
+	/* V4l2 device */
+	struct v4l2_device v4l2_dev;
+	/* will be registered as /dev/video*/
+	struct video_device *pvdev;
+	int use_count;
+	/* will be used to init/release HW */
+	struct msm_cam_media_controller mctl;
+	/* sensor subdevice */
+	struct v4l2_subdev sensor_sdev;
+	struct msm_sensor_ctrl sctrl;
+
+	/* parent device */
+	struct device *parent_dev;
+
+	struct mutex vid_lock;
+	/* v4l2 format support */
+	struct msm_isp_color_fmt *usr_fmts;
+	int num_fmts;
+	/* preview or snapshot */
+	u32 mode;
+	u32 memsize;
+
+	int op_mode;
+	int vnode_id;
+	struct msm_cam_v4l2_dev_inst *dev_inst[MSM_DEV_INST_MAX];
+	struct msm_cam_v4l2_dev_inst *dev_inst_map[MSM_MAX_IMG_MODE];
+	/* native config device */
+	struct cdev cdev;
+
+	/* most-frequently accessed manager object*/
+	struct msm_sync *sync;
+
+	/* The message queue is used by the control thread to send commands
+	 * to the config thread, and also by the HW to send messages to the
+	 * config thread.  Thus it is the only queue that is accessed from
+	 * both interrupt and process context.
+	 */
+	/* struct msm_device_queue event_q; */
+
+	/* This queue used by the config thread to send responses back to the
+	 * control thread.  It is accessed only from a process context.
+	 * TO BE REMOVED
+	 */
+	struct msm_device_queue ctrl_q;
+
+	struct mutex lock;
+	uint8_t ctrl_data[max_control_command_size];
+	struct msm_ctrl_cmd ctrl;
+};
+static inline struct msm_cam_v4l2_device *to_pcam(
+	struct v4l2_device *v4l2_dev)
+{
+	return container_of(v4l2_dev, struct msm_cam_v4l2_device, v4l2_dev);
+}
+
+/*pseudo v4l2 device and v4l2 event queue
+  for server and config cdevs*/
+struct v4l2_queue_util {
+	struct video_device *pvdev;
+	struct v4l2_fh  eventHandle;
+};
+
+/* abstract config device for all sensor successfully probed*/
+struct msm_cam_config_dev {
+	struct cdev config_cdev;
+	struct v4l2_queue_util config_stat_event_queue;
+	int use_count;
+	/*struct msm_isp_ops* isp_subdev;*/
+	struct msm_cam_media_controller *p_mctl;
+};
+
+/* abstract camera server device for all sensor successfully probed*/
+struct msm_cam_server_dev {
+
+	/* config node device*/
+	struct cdev server_cdev;
+	/* info of sensors successfully probed*/
+	struct msm_camera_info camera_info;
+	/* info of configs successfully created*/
+	struct msm_cam_config_dev_info config_info;
+	/* active working camera device - only one allowed at this time*/
+	struct msm_cam_v4l2_device *pcam_active;
+	/* number of camera devices opened*/
+	atomic_t number_pcam_active;
+	struct v4l2_queue_util server_command_queue;
+	/* This queue used by the config thread to send responses back to the
+	 * control thread.  It is accessed only from a process context.
+	 */
+	struct msm_device_queue ctrl_q;
+	uint8_t ctrl_data[max_control_command_size];
+	struct msm_ctrl_cmd ctrl;
+	int use_count;
+    /* all the registered ISP subdevice*/
+	struct msm_isp_ops *isp_subdev[MSM_MAX_CAMERA_CONFIGS];
+	struct msm_ispif_fns ispif_fns;
+
+};
+
+/* camera server related functions */
+
+
+/* ISP related functions */
+void msm_isp_vfe_dev_init(struct v4l2_subdev *vd);
+/*
+int msm_isp_register(struct msm_cam_v4l2_device *pcam);
+*/
+int msm_isp_register(struct msm_cam_server_dev *psvr);
+void msm_isp_unregister(struct msm_cam_server_dev *psvr);
+int msm_ispif_register(struct msm_ispif_fns *ispif);
+int msm_sensor_register(struct platform_device *pdev,
+	int (*sensor_probe)(const struct msm_camera_sensor_info *,
+	struct v4l2_subdev *, struct msm_sensor_ctrl *));
+int msm_isp_init_module(int g_num_config_nodes);
+
+int msm_mctl_init_module(struct msm_cam_v4l2_device *pcam);
+int msm_mctl_init_user_formats(struct msm_cam_v4l2_device *pcam);
+int msm_mctl_buf_done(struct msm_cam_media_controller *pmctl,
+			int msg_type, uint32_t y_phy);
+/*Memory(PMEM) functions*/
+int msm_register_pmem(struct hlist_head *ptype, void __user *arg);
+int msm_pmem_table_del(struct hlist_head *ptype, void __user *arg);
+uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
+	int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount);
+uint8_t msm_pmem_region_lookup_2(struct hlist_head *ptype,
+					int pmem_type,
+					struct msm_pmem_region *reg,
+					uint8_t maxcount);
+uint8_t msm_pmem_region_lookup_3(struct msm_cam_v4l2_device *pcam, int idx,
+						struct msm_pmem_region *reg,
+						int mem_type);
+unsigned long msm_pmem_stats_vtop_lookup(
+				struct msm_sync *sync,
+				unsigned long buffer,
+				int fd);
+unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync,
+						unsigned long addr, int *fd);
+
+int msm_vfe_subdev_init(struct v4l2_subdev *sd, void *data,
+					struct platform_device *pdev);
+void msm_vfe_subdev_release(struct platform_device *pdev);
+
+int msm_isp_subdev_ioctl(struct v4l2_subdev *sd,
+	struct msm_vfe_cfg_cmd *cfgcmd, void *data);
+#endif /* __KERNEL__ */
+
+#endif /* _MSM_H */
diff --git a/drivers/media/video/msm/msm_axi_qos.c b/drivers/media/video/msm/msm_axi_qos.c
new file mode 100644
index 0000000..3969547
--- /dev/null
+++ b/drivers/media/video/msm/msm_axi_qos.c
@@ -0,0 +1,47 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <mach/camera.h>
+#define MSM_AXI_QOS_NAME "msm_camera"
+
+static struct clk *ebi1_clk;
+
+int add_axi_qos(void)
+{
+	ebi1_clk = clk_get(NULL, "ebi1_vfe_clk");
+	if (IS_ERR(ebi1_clk))
+		ebi1_clk = NULL;
+	else
+		clk_enable(ebi1_clk);
+
+	return 0;
+}
+
+int update_axi_qos(uint32_t rate)
+{
+	if (!ebi1_clk)
+		return 0;
+
+	return clk_set_rate(ebi1_clk, rate * 1000);
+}
+
+void release_axi_qos(void)
+{
+	if (!ebi1_clk)
+		return;
+
+	clk_disable(ebi1_clk);
+	clk_put(ebi1_clk);
+	ebi1_clk = NULL;
+}
diff --git a/drivers/media/video/msm/msm_camera.c b/drivers/media/video/msm/msm_camera.c
new file mode 100644
index 0000000..3fddb8e
--- /dev/null
+++ b/drivers/media/video/msm/msm_camera.c
@@ -0,0 +1,4049 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+//FIXME: most allocations need not be GFP_ATOMIC
+/* FIXME: management of mutexes */
+/* FIXME: msm_pmem_region_lookup return values */
+/* FIXME: way too many copy to/from user */
+/* FIXME: does region->active mean free */
+/* FIXME: check limits on command lenghts passed from userspace */
+/* FIXME: __msm_release: which queues should we flush when opencnt != 0 */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <mach/board.h>
+
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/android_pmem.h>
+#include <linux/poll.h>
+#include <media/msm_camera.h>
+#include <mach/camera.h>
+#include <linux/syscalls.h>
+#include <linux/hrtimer.h>
+DEFINE_MUTEX(ctrl_cmd_lock);
+
+#define CAMERA_STOP_VIDEO 58
+spinlock_t pp_prev_spinlock;
+spinlock_t pp_stereocam_spinlock;
+spinlock_t st_frame_spinlock;
+
+#define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
+				__func__, __LINE__, ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+#define ERR_COPY_TO_USER() ERR_USER_COPY(1)
+#define MAX_PMEM_CFG_BUFFERS 10
+
+static struct class *msm_class;
+static dev_t msm_devno;
+static LIST_HEAD(msm_sensors);
+struct  msm_control_device *g_v4l2_control_device;
+int g_v4l2_opencnt;
+static int camera_node;
+static enum msm_camera_type camera_type[MSM_MAX_CAMERA_SENSORS];
+static uint32_t sensor_mount_angle[MSM_MAX_CAMERA_SENSORS];
+
+static const char *vfe_config_cmd[] = {
+	"CMD_GENERAL",  /* 0 */
+	"CMD_AXI_CFG_OUT1",
+	"CMD_AXI_CFG_SNAP_O1_AND_O2",
+	"CMD_AXI_CFG_OUT2",
+	"CMD_PICT_T_AXI_CFG",
+	"CMD_PICT_M_AXI_CFG",  /* 5 */
+	"CMD_RAW_PICT_AXI_CFG",
+	"CMD_FRAME_BUF_RELEASE",
+	"CMD_PREV_BUF_CFG",
+	"CMD_SNAP_BUF_RELEASE",
+	"CMD_SNAP_BUF_CFG",  /* 10 */
+	"CMD_STATS_DISABLE",
+	"CMD_STATS_AEC_AWB_ENABLE",
+	"CMD_STATS_AF_ENABLE",
+	"CMD_STATS_AEC_ENABLE",
+	"CMD_STATS_AWB_ENABLE",  /* 15 */
+	"CMD_STATS_ENABLE",
+	"CMD_STATS_AXI_CFG",
+	"CMD_STATS_AEC_AXI_CFG",
+	"CMD_STATS_AF_AXI_CFG",
+	"CMD_STATS_AWB_AXI_CFG",  /* 20 */
+	"CMD_STATS_RS_AXI_CFG",
+	"CMD_STATS_CS_AXI_CFG",
+	"CMD_STATS_IHIST_AXI_CFG",
+	"CMD_STATS_SKIN_AXI_CFG",
+	"CMD_STATS_BUF_RELEASE",  /* 25 */
+	"CMD_STATS_AEC_BUF_RELEASE",
+	"CMD_STATS_AF_BUF_RELEASE",
+	"CMD_STATS_AWB_BUF_RELEASE",
+	"CMD_STATS_RS_BUF_RELEASE",
+	"CMD_STATS_CS_BUF_RELEASE",  /* 30 */
+	"CMD_STATS_IHIST_BUF_RELEASE",
+	"CMD_STATS_SKIN_BUF_RELEASE",
+	"UPDATE_STATS_INVALID",
+	"CMD_AXI_CFG_SNAP_GEMINI",
+	"CMD_AXI_CFG_SNAP",  /* 35 */
+	"CMD_AXI_CFG_PREVIEW",
+	"CMD_AXI_CFG_VIDEO",
+	"CMD_STATS_IHIST_ENABLE",
+	"CMD_STATS_RS_ENABLE",
+	"CMD_STATS_CS_ENABLE",  /* 40 */
+	"CMD_VPE",
+	"CMD_AXI_CFG_VPE",
+	"CMD_AXI_CFG_SNAP_VPE",
+	"CMD_AXI_CFG_SNAP_THUMB_VPE",
+};
+#define __CONTAINS(r, v, l, field) ({				\
+	typeof(r) __r = r;					\
+	typeof(v) __v = v;					\
+	typeof(v) __e = __v + l;				\
+	int res = __v >= __r->field &&				\
+		__e <= __r->field + __r->len;			\
+	res;							\
+})
+
+#define CONTAINS(r1, r2, field) ({				\
+	typeof(r2) __r2 = r2;					\
+	__CONTAINS(r1, __r2->field, __r2->len, field);		\
+})
+
+#define IN_RANGE(r, v, field) ({				\
+	typeof(r) __r = r;					\
+	typeof(v) __vv = v;					\
+	int res = ((__vv >= __r->field) &&			\
+		(__vv < (__r->field + __r->len)));		\
+	res;							\
+})
+
+#define OVERLAPS(r1, r2, field) ({				\
+	typeof(r1) __r1 = r1;					\
+	typeof(r2) __r2 = r2;					\
+	typeof(__r2->field) __v = __r2->field;			\
+	typeof(__v) __e = __v + __r2->len - 1;			\
+	int res = (IN_RANGE(__r1, __v, field) ||		\
+		   IN_RANGE(__r1, __e, field));                 \
+	res;							\
+})
+
+static inline void free_qcmd(struct msm_queue_cmd *qcmd)
+{
+	if (!qcmd || !atomic_read(&qcmd->on_heap))
+		return;
+	if (!atomic_sub_return(1, &qcmd->on_heap))
+		kfree(qcmd);
+}
+
+static void msm_region_init(struct msm_sync *sync)
+{
+	INIT_HLIST_HEAD(&sync->pmem_frames);
+	INIT_HLIST_HEAD(&sync->pmem_stats);
+	spin_lock_init(&sync->pmem_frame_spinlock);
+	spin_lock_init(&sync->pmem_stats_spinlock);
+}
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+	spin_lock_init(&queue->lock);
+	queue->len = 0;
+	queue->max = 0;
+	queue->name = name;
+	INIT_LIST_HEAD(&queue->list);
+	init_waitqueue_head(&queue->wait);
+}
+
+static void msm_enqueue(struct msm_device_queue *queue,
+		struct list_head *entry)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&queue->lock, flags);
+	queue->len++;
+	if (queue->len > queue->max) {
+		queue->max = queue->len;
+		CDBG("%s: queue %s new max is %d\n", __func__,
+			queue->name, queue->max);
+	}
+	list_add_tail(entry, &queue->list);
+	wake_up(&queue->wait);
+	CDBG("%s: woke up %s\n", __func__, queue->name);
+	spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static void msm_enqueue_vpe(struct msm_device_queue *queue,
+		struct list_head *entry)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&queue->lock, flags);
+	queue->len++;
+	if (queue->len > queue->max) {
+		queue->max = queue->len;
+		CDBG("%s: queue %s new max is %d\n", __func__,
+			queue->name, queue->max);
+	}
+	list_add_tail(entry, &queue->list);
+    CDBG("%s: woke up %s\n", __func__, queue->name);
+	spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+#define msm_dequeue(queue, member) ({				\
+	unsigned long flags;					\
+	struct msm_device_queue *__q = (queue);			\
+	struct msm_queue_cmd *qcmd = 0;				\
+	spin_lock_irqsave(&__q->lock, flags);			\
+	if (!list_empty(&__q->list)) {				\
+		__q->len--;					\
+		qcmd = list_first_entry(&__q->list,		\
+				struct msm_queue_cmd, member);	\
+		if ((qcmd) && (&qcmd->member) && (&qcmd->member.next))	\
+			list_del_init(&qcmd->member);			\
+	}							\
+	spin_unlock_irqrestore(&__q->lock, flags);	\
+	qcmd;							\
+})
+
+#define msm_delete_entry(queue, member, q_cmd) ({		\
+	unsigned long flags;					\
+	struct msm_device_queue *__q = (queue);			\
+	struct msm_queue_cmd *qcmd = 0;				\
+	spin_lock_irqsave(&__q->lock, flags);			\
+	if (!list_empty(&__q->list)) {				\
+		list_for_each_entry(qcmd, &__q->list, member)	\
+		if (qcmd == q_cmd) {				\
+			__q->len--;				\
+			list_del_init(&qcmd->member);		\
+			CDBG("msm_delete_entry, match found\n");\
+			kfree(q_cmd);				\
+			q_cmd = NULL;				\
+			break;					\
+		}						\
+	}							\
+	spin_unlock_irqrestore(&__q->lock, flags);		\
+	q_cmd;		\
+})
+
+#define msm_queue_drain(queue, member) do {			\
+	unsigned long flags;					\
+	struct msm_device_queue *__q = (queue);			\
+	struct msm_queue_cmd *qcmd;				\
+	spin_lock_irqsave(&__q->lock, flags);			\
+	while (!list_empty(&__q->list)) {			\
+		__q->len--;					\
+		qcmd = list_first_entry(&__q->list,		\
+			struct msm_queue_cmd, member);		\
+		if (qcmd) {					\
+			if ((&qcmd->member) && (&qcmd->member.next))	\
+				list_del_init(&qcmd->member);		\
+			free_qcmd(qcmd);				\
+		}							\
+	}							\
+	spin_unlock_irqrestore(&__q->lock, flags);		\
+} while (0)
+
+static int check_overlap(struct hlist_head *ptype,
+			unsigned long paddr,
+			unsigned long len)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region t = { .paddr = paddr, .len = len };
+	struct hlist_node *node;
+
+	hlist_for_each_entry(region, node, ptype, list) {
+		if (CONTAINS(region, &t, paddr) ||
+				CONTAINS(&t, region, paddr) ||
+				OVERLAPS(region, &t, paddr)) {
+			CDBG(" region (PHYS %p len %ld)"
+				" clashes with registered region"
+				" (paddr %p len %ld)\n",
+				(void *)t.paddr, t.len,
+				(void *)region->paddr, region->len);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int check_pmem_info(struct msm_pmem_info *info, int len)
+{
+	if (info->offset < len &&
+	    info->offset + info->len <= len &&
+	    info->y_off < len &&
+	    info->cbcr_off < len)
+		return 0;
+
+	pr_err("%s: check failed: off %d len %d y %d cbcr %d (total len %d)\n",
+		__func__,
+		info->offset,
+		info->len,
+		info->y_off,
+		info->cbcr_off,
+		len);
+	return -EINVAL;
+}
+static int msm_pmem_table_add(struct hlist_head *ptype,
+	struct msm_pmem_info *info, spinlock_t* pmem_spinlock,
+	struct msm_sync *sync)
+{
+	struct file *file;
+	unsigned long paddr;
+	unsigned long kvstart;
+	unsigned long len;
+	int rc;
+	struct msm_pmem_region *region;
+	unsigned long flags;
+
+
+	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
+	if (rc < 0) {
+		pr_err("%s: get_pmem_file fd %d error %d\n",
+			__func__,
+			info->fd, rc);
+		return rc;
+	}
+
+	if (!info->len)
+		info->len = len;
+
+	rc = check_pmem_info(info, len);
+	if (rc < 0)
+		return rc;
+
+	paddr += info->offset;
+	len = info->len;
+
+	spin_lock_irqsave(pmem_spinlock, flags);
+	if (check_overlap(ptype, paddr, len) < 0) {
+		spin_unlock_irqrestore(pmem_spinlock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(pmem_spinlock, flags);
+
+
+	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+
+	spin_lock_irqsave(pmem_spinlock, flags);
+	INIT_HLIST_NODE(&region->list);
+
+	region->paddr = paddr;
+	region->len = len;
+	region->file = file;
+	memcpy(&region->info, info, sizeof(region->info));
+
+	hlist_add_head(&(region->list), ptype);
+	spin_unlock_irqrestore(pmem_spinlock, flags);
+	CDBG("%s: type %d, paddr 0x%lx, vaddr 0x%lx\n",
+		__func__, info->type, paddr, (unsigned long)info->vaddr);
+
+	return 0;
+}
+
+/* return of 0 means failure */
+static uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
+	int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount,
+	spinlock_t *pmem_spinlock)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region *regptr;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	uint8_t rc = 0;
+
+	regptr = reg;
+	spin_lock_irqsave(pmem_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, ptype, list) {
+		if (region->info.type == pmem_type && region->info.active) {
+			*regptr = *region;
+			rc += 1;
+			if (rc >= maxcount)
+				break;
+			regptr++;
+		}
+	}
+	spin_unlock_irqrestore(pmem_spinlock, flags);
+	/* After lookup failure, dump all the list entries...*/
+	if (rc == 0) {
+		pr_err("%s: pmem_type = %d\n", __func__, pmem_type);
+		hlist_for_each_entry_safe(region, node, n, ptype, list) {
+			pr_err("listed region->info.type = %d, active = %d",
+				region->info.type, region->info.active);
+		}
+
+	}
+	return rc;
+}
+
+static uint8_t msm_pmem_region_lookup_2(struct hlist_head *ptype,
+					int pmem_type,
+					struct msm_pmem_region *reg,
+					uint8_t maxcount,
+					spinlock_t *pmem_spinlock)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region *regptr;
+	struct hlist_node *node, *n;
+	uint8_t rc = 0;
+	unsigned long flags = 0;
+	regptr = reg;
+	spin_lock_irqsave(pmem_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, ptype, list) {
+		CDBG("%s:info.type=%d, pmem_type = %d,"
+						"info.active = %d\n",
+		__func__, region->info.type, pmem_type, region->info.active);
+
+		if (region->info.type == pmem_type && region->info.active) {
+			CDBG("%s:info.type=%d, pmem_type = %d,"
+							"info.active = %d,\n",
+				__func__, region->info.type, pmem_type,
+				region->info.active);
+			*regptr = *region;
+			region->info.type = MSM_PMEM_VIDEO;
+			rc += 1;
+			if (rc >= maxcount)
+				break;
+			regptr++;
+		}
+	}
+	spin_unlock_irqrestore(pmem_spinlock, flags);
+	return rc;
+}
+
+static int msm_pmem_frame_ptov_lookup(struct msm_sync *sync,
+		unsigned long pyaddr,
+		unsigned long pcbcraddr,
+		struct msm_pmem_info *pmem_info,
+		int clear_active)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&sync->pmem_frame_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_frames, list) {
+		if (pyaddr == (region->paddr + region->info.y_off) &&
+				pcbcraddr == (region->paddr +
+						region->info.cbcr_off) &&
+				region->info.active) {
+			/* offset since we could pass vaddr inside
+			 * a registerd pmem buffer
+			 */
+			memcpy(pmem_info, &region->info, sizeof(*pmem_info));
+			if (clear_active)
+				region->info.active = 0;
+			spin_unlock_irqrestore(&sync->pmem_frame_spinlock,
+				flags);
+			return 0;
+		}
+	}
+	/* After lookup failure, dump all the list entries... */
+	pr_err("%s, for pyaddr 0x%lx, pcbcraddr 0x%lx\n",
+			__func__, pyaddr, pcbcraddr);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_frames, list) {
+		pr_err("listed pyaddr 0x%lx, pcbcraddr 0x%lx, active = %d",
+				(region->paddr + region->info.y_off),
+				(region->paddr + region->info.cbcr_off),
+				region->info.active);
+	}
+
+	spin_unlock_irqrestore(&sync->pmem_frame_spinlock, flags);
+	return -EINVAL;
+}
+
+static int msm_pmem_frame_ptov_lookup2(struct msm_sync *sync,
+		unsigned long pyaddr,
+		struct msm_pmem_info *pmem_info,
+		int clear_active)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&sync->pmem_frame_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_frames, list) {
+		if (pyaddr == (region->paddr + region->info.y_off) &&
+				region->info.active) {
+			/* offset since we could pass vaddr inside
+			 * a registerd pmem buffer
+			 */
+			memcpy(pmem_info, &region->info, sizeof(*pmem_info));
+			if (clear_active)
+				region->info.active = 0;
+			spin_unlock_irqrestore(&sync->pmem_frame_spinlock,
+				flags);
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&sync->pmem_frame_spinlock, flags);
+	return -EINVAL;
+}
+
+static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync,
+		unsigned long addr, int *fd)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&sync->pmem_stats_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		if (addr == region->paddr && region->info.active) {
+			/* offset since we could pass vaddr inside a
+			 * registered pmem buffer */
+			*fd = region->info.fd;
+			region->info.active = 0;
+			spin_unlock_irqrestore(&sync->pmem_stats_spinlock,
+				flags);
+			return (unsigned long)(region->info.vaddr);
+		}
+	}
+	/* After lookup failure, dump all the list entries... */
+	pr_err("%s, lookup failure, for paddr 0x%lx\n",
+			__func__, addr);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		pr_err("listed paddr 0x%lx, active = %d",
+				region->paddr,
+				region->info.active);
+	}
+	spin_unlock_irqrestore(&sync->pmem_stats_spinlock, flags);
+
+	return 0;
+}
+
+static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync *sync,
+		unsigned long buffer,
+		uint32_t yoff, uint32_t cbcroff, int fd, int change_flag)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&sync->pmem_frame_spinlock, flags);
+	hlist_for_each_entry_safe(region,
+		node, n, &sync->pmem_frames, list) {
+		if (((unsigned long)(region->info.vaddr) == buffer) &&
+				(region->info.y_off == yoff) &&
+				(region->info.cbcr_off == cbcroff) &&
+				(region->info.fd == fd) &&
+				(region->info.active == 0)) {
+			if (change_flag)
+				region->info.active = 1;
+			spin_unlock_irqrestore(&sync->pmem_frame_spinlock,
+				flags);
+			return region->paddr;
+		}
+	}
+	/* After lookup failure, dump all the list entries... */
+	pr_err("%s, failed for vaddr 0x%lx, yoff %d cbcroff %d\n",
+			__func__, buffer, yoff, cbcroff);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_frames, list) {
+		pr_err("listed vaddr 0x%p, cbcroff %d, active = %d",
+				(region->info.vaddr),
+				(region->info.cbcr_off),
+				region->info.active);
+	}
+
+	spin_unlock_irqrestore(&sync->pmem_frame_spinlock, flags);
+
+	return 0;
+}
+
+static unsigned long msm_pmem_stats_vtop_lookup(
+		struct msm_sync *sync,
+		unsigned long buffer,
+		int fd)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&sync->pmem_stats_spinlock, flags);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		if (((unsigned long)(region->info.vaddr) == buffer) &&
+				(region->info.fd == fd) &&
+				region->info.active == 0) {
+			region->info.active = 1;
+			spin_unlock_irqrestore(&sync->pmem_stats_spinlock,
+				flags);
+			return region->paddr;
+		}
+	}
+	/* After lookup failure, dump all the list entries... */
+	pr_err("%s,look up error for vaddr %ld\n",
+			__func__, buffer);
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		pr_err("listed vaddr 0x%p, active = %d",
+				region->info.vaddr,
+				region->info.active);
+	}
+	spin_unlock_irqrestore(&sync->pmem_stats_spinlock, flags);
+
+	return 0;
+}
+
+static int __msm_pmem_table_del(struct msm_sync *sync,
+		struct msm_pmem_info *pinfo)
+{
+	int rc = 0;
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+	unsigned long flags = 0;
+
+	switch (pinfo->type) {
+	case MSM_PMEM_PREVIEW:
+	case MSM_PMEM_THUMBNAIL:
+	case MSM_PMEM_MAINIMG:
+	case MSM_PMEM_RAW_MAINIMG:
+	case MSM_PMEM_C2D:
+	case MSM_PMEM_MAINIMG_VPE:
+	case MSM_PMEM_THUMBNAIL_VPE:
+		spin_lock_irqsave(&sync->pmem_frame_spinlock, flags);
+		hlist_for_each_entry_safe(region, node, n,
+			&sync->pmem_frames, list) {
+
+			if (pinfo->type == region->info.type &&
+					pinfo->vaddr == region->info.vaddr &&
+					pinfo->fd == region->info.fd) {
+				hlist_del(node);
+				put_pmem_file(region->file);
+				kfree(region);
+				CDBG("%s: type %d, vaddr  0x%p\n",
+					__func__, pinfo->type, pinfo->vaddr);
+			}
+		}
+		spin_unlock_irqrestore(&sync->pmem_frame_spinlock, flags);
+		break;
+
+	case MSM_PMEM_VIDEO:
+	case MSM_PMEM_VIDEO_VPE:
+		spin_lock_irqsave(&sync->pmem_frame_spinlock, flags);
+		hlist_for_each_entry_safe(region, node, n,
+			&sync->pmem_frames, list) {
+
+			if (((region->info.type == MSM_PMEM_VIDEO) ||
+				(region->info.type == MSM_PMEM_VIDEO_VPE)) &&
+				pinfo->vaddr == region->info.vaddr &&
+				pinfo->fd == region->info.fd) {
+				hlist_del(node);
+				put_pmem_file(region->file);
+				kfree(region);
+				CDBG("%s: type %d, vaddr  0x%p\n",
+					__func__, pinfo->type, pinfo->vaddr);
+			}
+		}
+		spin_unlock_irqrestore(&sync->pmem_frame_spinlock, flags);
+		break;
+
+	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_AF:
+		spin_lock_irqsave(&sync->pmem_stats_spinlock, flags);
+		hlist_for_each_entry_safe(region, node, n,
+			&sync->pmem_stats, list) {
+
+			if (pinfo->type == region->info.type &&
+					pinfo->vaddr == region->info.vaddr &&
+					pinfo->fd == region->info.fd) {
+				hlist_del(node);
+				put_pmem_file(region->file);
+				kfree(region);
+				CDBG("%s: type %d, vaddr  0x%p\n",
+					__func__, pinfo->type, pinfo->vaddr);
+			}
+		}
+		spin_unlock_irqrestore(&sync->pmem_stats_spinlock, flags);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_pmem_table_del(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_pmem_info info;
+
+	if (copy_from_user(&info, arg, sizeof(info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	return __msm_pmem_table_del(sync, &info);
+}
+
+static int __msm_get_frame(struct msm_sync *sync,
+		struct msm_frame *frame)
+{
+	int rc = 0;
+
+	struct msm_pmem_info pmem_info;
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_vfe_resp *vdata;
+	struct msm_vfe_phy_info *pphy;
+
+	qcmd = msm_dequeue(&sync->frame_q, list_frame);
+
+	if (!qcmd) {
+		pr_err("%s: no preview frame.\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((!qcmd->command) && (qcmd->error_code & MSM_CAMERA_ERR_MASK)) {
+		frame->error_code = qcmd->error_code;
+		pr_err("%s: fake frame with camera error code = %d\n",
+			__func__, frame->error_code);
+		goto err;
+	}
+
+	vdata = (struct msm_vfe_resp *)(qcmd->command);
+	pphy = &vdata->phy;
+
+	rc = msm_pmem_frame_ptov_lookup(sync,
+			pphy->y_phy,
+			pphy->cbcr_phy,
+			&pmem_info,
+			1); /* Clear the active flag */
+
+	if (rc < 0) {
+		pr_err("%s: cannot get frame, invalid lookup address "
+			"y %x cbcr %x\n",
+			__func__,
+			pphy->y_phy,
+			pphy->cbcr_phy);
+		goto err;
+	}
+
+	frame->ts = qcmd->ts;
+	frame->buffer = (unsigned long)pmem_info.vaddr;
+	frame->y_off = pmem_info.y_off;
+	frame->cbcr_off = pmem_info.cbcr_off;
+	frame->fd = pmem_info.fd;
+	frame->path = vdata->phy.output_id;
+	frame->frame_id = vdata->phy.frame_id;
+
+	CDBG("%s: y %x, cbcr %x, qcmd %x, virt_addr %x\n",
+		__func__,
+		pphy->y_phy, pphy->cbcr_phy, (int) qcmd, (int) frame->buffer);
+
+err:
+	free_qcmd(qcmd);
+	return rc;
+}
+
+static int msm_get_frame(struct msm_sync *sync, void __user *arg)
+{
+	int rc = 0;
+	struct msm_frame frame;
+
+	if (copy_from_user(&frame,
+				arg,
+				sizeof(struct msm_frame))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	rc = __msm_get_frame(sync, &frame);
+	if (rc < 0)
+		return rc;
+
+	mutex_lock(&sync->lock);
+	if (sync->croplen && (!sync->stereocam_enabled)) {
+		if (frame.croplen != sync->croplen) {
+			pr_err("%s: invalid frame croplen %d,"
+				"expecting %d\n",
+				__func__,
+				frame.croplen,
+				sync->croplen);
+			mutex_unlock(&sync->lock);
+			return -EINVAL;
+		}
+
+		if (copy_to_user((void *)frame.cropinfo,
+				sync->cropinfo,
+				sync->croplen)) {
+			ERR_COPY_TO_USER();
+			mutex_unlock(&sync->lock);
+			return -EFAULT;
+		}
+	}
+
+	if (sync->fdroiinfo.info) {
+		if (copy_to_user((void *)frame.roi_info.info,
+			sync->fdroiinfo.info,
+			sync->fdroiinfo.info_len)) {
+			ERR_COPY_TO_USER();
+			mutex_unlock(&sync->lock);
+			return -EFAULT;
+		}
+	}
+
+	if (sync->stereocam_enabled) {
+		frame.stcam_conv_value = sync->stcam_conv_value;
+		frame.stcam_quality_ind = sync->stcam_quality_ind;
+	}
+
+	if (copy_to_user((void *)arg,
+				&frame, sizeof(struct msm_frame))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+	}
+
+	mutex_unlock(&sync->lock);
+	CDBG("%s: got frame\n", __func__);
+
+	return rc;
+}
+
+static int msm_enable_vfe(struct msm_sync *sync, void __user *arg)
+{
+	int rc = -EIO;
+	struct camera_enable_cmd cfg;
+
+	if (copy_from_user(&cfg,
+			arg,
+			sizeof(struct camera_enable_cmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	if (sync->vfefn.vfe_enable)
+		rc = sync->vfefn.vfe_enable(&cfg);
+
+	return rc;
+}
+
+static int msm_disable_vfe(struct msm_sync *sync, void __user *arg)
+{
+	int rc = -EIO;
+	struct camera_enable_cmd cfg;
+
+	if (copy_from_user(&cfg,
+			arg,
+			sizeof(struct camera_enable_cmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	if (sync->vfefn.vfe_disable)
+		rc = sync->vfefn.vfe_disable(&cfg, NULL);
+
+	return rc;
+}
+
+static struct msm_queue_cmd *__msm_control(struct msm_sync *sync,
+		struct msm_device_queue *queue,
+		struct msm_queue_cmd *qcmd,
+		int timeout)
+{
+	int rc;
+
+	CDBG("Inside __msm_control\n");
+	if (sync->event_q.len <= 100 && sync->frame_q.len <= 100) {
+		/* wake up config thread */
+		msm_enqueue(&sync->event_q, &qcmd->list_config);
+	} else {
+		pr_err("%s, Error Queue limit exceeded e_q = %d, f_q = %d\n",
+			__func__, sync->event_q.len, sync->frame_q.len);
+		free_qcmd(qcmd);
+		return NULL;
+	}
+	if (!queue)
+		return NULL;
+
+	/* wait for config status */
+	CDBG("Waiting for config status \n");
+	rc = wait_event_interruptible_timeout(
+			queue->wait,
+			!list_empty_careful(&queue->list),
+			timeout);
+	CDBG("Waiting over for config status\n");
+	if (list_empty_careful(&queue->list)) {
+		if (!rc) {
+			rc = -ETIMEDOUT;
+			pr_err("%s: wait_event error %d\n", __func__, rc);
+			return ERR_PTR(rc);
+		} else if (rc < 0) {
+			pr_err("%s: wait_event error %d\n", __func__, rc);
+			if (msm_delete_entry(&sync->event_q,
+				list_config, qcmd)) {
+				sync->ignore_qcmd = true;
+				sync->ignore_qcmd_type =
+					(int16_t)((struct msm_ctrl_cmd *)
+					(qcmd->command))->type;
+			}
+			return ERR_PTR(rc);
+		}
+	}
+	qcmd = msm_dequeue(queue, list_control);
+	BUG_ON(!qcmd);
+	CDBG("__msm_control done \n");
+	return qcmd;
+}
+
+static struct msm_queue_cmd *__msm_control_nb(struct msm_sync *sync,
+					struct msm_queue_cmd *qcmd_to_copy)
+{
+	/* Since this is a non-blocking command, we cannot use qcmd_to_copy and
+	 * its data, since they are on the stack.  We replicate them on the heap
+	 * and mark them on_heap so that they get freed when the config thread
+	 * dequeues them.
+	 */
+
+	struct msm_ctrl_cmd *udata;
+	struct msm_ctrl_cmd *udata_to_copy = qcmd_to_copy->command;
+
+	struct msm_queue_cmd *qcmd =
+			kmalloc(sizeof(*qcmd_to_copy) +
+				sizeof(*udata_to_copy) +
+				udata_to_copy->length,
+				GFP_KERNEL);
+	if (!qcmd) {
+		pr_err("%s: out of memory\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+	*qcmd = *qcmd_to_copy;
+	udata = qcmd->command = qcmd + 1;
+	memcpy(udata, udata_to_copy, sizeof(*udata));
+	udata->value = udata + 1;
+	memcpy(udata->value, udata_to_copy->value, udata_to_copy->length);
+
+	atomic_set(&qcmd->on_heap, 1);
+
+	/* qcmd_resp will be set to NULL */
+	return __msm_control(sync, NULL, qcmd, 0);
+}
+
+static int msm_control(struct msm_control_device *ctrl_pmsm,
+			int block,
+			void __user *arg)
+{
+	int rc = 0;
+
+	struct msm_sync *sync = ctrl_pmsm->pmsm->sync;
+	void __user *uptr;
+	struct msm_ctrl_cmd udata_resp;
+	struct msm_queue_cmd *qcmd_resp = NULL;
+	uint8_t data[max_control_command_size];
+	struct msm_ctrl_cmd *udata;
+	struct msm_queue_cmd *qcmd =
+		kmalloc(sizeof(struct msm_queue_cmd) +
+			sizeof(struct msm_ctrl_cmd), GFP_ATOMIC);
+	if (!qcmd) {
+		pr_err("%s: out of memory\n", __func__);
+		return -ENOMEM;
+	}
+	udata = (struct msm_ctrl_cmd *)(qcmd + 1);
+	atomic_set(&(qcmd->on_heap), 1);
+	CDBG("Inside msm_control\n");
+	if (copy_from_user(udata, arg, sizeof(struct msm_ctrl_cmd))) {
+		ERR_COPY_FROM_USER();
+		rc = -EFAULT;
+		goto end;
+	}
+
+	uptr = udata->value;
+	udata->value = data;
+	qcmd->type = MSM_CAM_Q_CTRL;
+	qcmd->command = udata;
+
+	if (udata->length) {
+		if (udata->length > sizeof(data)) {
+			pr_err("%s: user data too large (%d, max is %d)\n",
+					__func__,
+					udata->length,
+					sizeof(data));
+			rc = -EIO;
+			goto end;
+		}
+		if (copy_from_user(udata->value, uptr, udata->length)) {
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+			goto end;
+		}
+	}
+
+	if (unlikely(!block)) {
+		qcmd_resp = __msm_control_nb(sync, qcmd);
+		goto end;
+	}
+
+	qcmd_resp = __msm_control(sync,
+				  &ctrl_pmsm->ctrl_q,
+				  qcmd, msecs_to_jiffies(10000));
+
+	/* ownership of qcmd will be transfered to event queue */
+	qcmd = NULL;
+
+	if (!qcmd_resp || IS_ERR(qcmd_resp)) {
+		/* Do not free qcmd_resp here.  If the config thread read it,
+		 * then it has already been freed, and we timed out because
+		 * we did not receive a MSM_CAM_IOCTL_CTRL_CMD_DONE.  If the
+		 * config thread itself is blocked and not dequeueing commands,
+		 * then it will either eventually unblock and process them,
+		 * or when it is killed, qcmd will be freed in
+		 * msm_release_config.
+		 */
+		rc = PTR_ERR(qcmd_resp);
+		qcmd_resp = NULL;
+		goto end;
+	}
+
+	if (qcmd_resp->command) {
+		udata_resp = *(struct msm_ctrl_cmd *)qcmd_resp->command;
+		if (udata_resp.length > 0) {
+			if (copy_to_user(uptr,
+					 udata_resp.value,
+					 udata_resp.length)) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto end;
+			}
+		}
+		udata_resp.value = uptr;
+
+		if (copy_to_user((void *)arg, &udata_resp,
+				sizeof(struct msm_ctrl_cmd))) {
+			ERR_COPY_TO_USER();
+			rc = -EFAULT;
+			goto end;
+		}
+	}
+
+end:
+	free_qcmd(qcmd);
+	CDBG("%s: done rc = %d\n", __func__, rc);
+	return rc;
+}
+
+/* Divert frames for post-processing by delivering them to the config thread;
+ * when post-processing is done, it will return the frame to the frame thread.
+ */
+static int msm_divert_frame(struct msm_sync *sync,
+		struct msm_vfe_resp *data,
+		struct msm_stats_event_ctrl *se)
+{
+	struct msm_pmem_info pinfo;
+	struct msm_postproc buf;
+	int rc;
+
+	CDBG("%s: Frame PP sync->pp_mask %d\n", __func__, sync->pp_mask);
+
+	if (!(sync->pp_mask & PP_PREV)  && !(sync->pp_mask & PP_SNAP)) {
+		pr_err("%s: diverting frame, not in PP_PREV or PP_SNAP!\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = msm_pmem_frame_ptov_lookup(sync, data->phy.y_phy,
+			data->phy.cbcr_phy, &pinfo,
+			0); /* do not clear the active flag */
+
+	if (rc < 0) {
+		pr_err("%s: msm_pmem_frame_ptov_lookup failed\n", __func__);
+		return rc;
+	}
+
+	buf.fmain.buffer = (unsigned long)pinfo.vaddr;
+	buf.fmain.y_off = pinfo.y_off;
+	buf.fmain.cbcr_off = pinfo.cbcr_off;
+	buf.fmain.fd = pinfo.fd;
+
+	CDBG("%s: buf 0x%x fd %d\n", __func__, (unsigned int)buf.fmain.buffer,
+		 buf.fmain.fd);
+	if (copy_to_user((void *)(se->stats_event.data),
+			&(buf.fmain), sizeof(struct msm_frame))) {
+		ERR_COPY_TO_USER();
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/* Divert stereo frames for post-processing by delivering
+ * them to the config thread.
+ */
+static int msm_divert_st_frame(struct msm_sync *sync,
+	struct msm_vfe_resp *data, struct msm_stats_event_ctrl *se, int path)
+{
+	struct msm_pmem_info pinfo;
+	struct msm_st_frame buf;
+	struct video_crop_t *crop = NULL;
+	int rc = 0;
+
+	if (se->stats_event.msg_id == OUTPUT_TYPE_ST_L) {
+		buf.type = OUTPUT_TYPE_ST_L;
+	} else if (se->stats_event.msg_id == OUTPUT_TYPE_ST_R) {
+		buf.type = OUTPUT_TYPE_ST_R;
+	} else {
+		if (se->resptype == MSM_CAM_RESP_STEREO_OP_1) {
+			rc = msm_pmem_frame_ptov_lookup(sync, data->phy.y_phy,
+					data->phy.cbcr_phy, &pinfo,
+					1);  /* do clear the active flag */
+			buf.buf_info.path = path;
+		} else if (se->resptype == MSM_CAM_RESP_STEREO_OP_2) {
+			rc = msm_pmem_frame_ptov_lookup(sync, data->phy.y_phy,
+					data->phy.cbcr_phy, &pinfo,
+					0); /* do not clear the active flag */
+			buf.buf_info.path = path;
+		} else
+			CDBG("%s: Invalid resptype = %d\n", __func__,
+				se->resptype);
+
+		if (rc < 0) {
+			CDBG("%s: msm_pmem_frame_ptov_lookup failed\n",
+				__func__);
+			return rc;
+		}
+
+		buf.type = OUTPUT_TYPE_ST_D;
+
+		if (sync->cropinfo != NULL) {
+			crop = sync->cropinfo;
+			switch (path) {
+			case OUTPUT_TYPE_P:
+			case OUTPUT_TYPE_T: {
+				buf.L.stCropInfo.in_w = crop->in1_w;
+				buf.L.stCropInfo.in_h = crop->in1_h;
+				buf.L.stCropInfo.out_w = crop->out1_w;
+				buf.L.stCropInfo.out_h = crop->out1_h;
+				buf.R.stCropInfo = buf.L.stCropInfo;
+				break;
+			}
+
+			case OUTPUT_TYPE_V:
+			case OUTPUT_TYPE_S: {
+				buf.L.stCropInfo.in_w = crop->in2_w;
+				buf.L.stCropInfo.in_h = crop->in2_h;
+				buf.L.stCropInfo.out_w = crop->out2_w;
+				buf.L.stCropInfo.out_h = crop->out2_h;
+				buf.R.stCropInfo = buf.L.stCropInfo;
+				break;
+			}
+			default: {
+				pr_warning("%s: invalid frame path %d\n",
+					__func__, path);
+				break;
+			}
+			}
+		} else {
+			buf.L.stCropInfo.in_w = 0;
+			buf.L.stCropInfo.in_h = 0;
+			buf.L.stCropInfo.out_w = 0;
+			buf.L.stCropInfo.out_h = 0;
+			buf.R.stCropInfo = buf.L.stCropInfo;
+		}
+
+		/* hardcode for now. */
+		if ((path == OUTPUT_TYPE_S) || (path == OUTPUT_TYPE_T))
+			buf.packing = sync->sctrl.s_snap_packing;
+		else
+			buf.packing = sync->sctrl.s_video_packing;
+
+		buf.buf_info.buffer = (unsigned long)pinfo.vaddr;
+		buf.buf_info.phy_offset = pinfo.offset;
+		buf.buf_info.y_off = pinfo.y_off;
+		buf.buf_info.cbcr_off = pinfo.cbcr_off;
+		buf.buf_info.fd = pinfo.fd;
+
+		CDBG("%s: buf 0x%x fd %d\n", __func__,
+			(unsigned int)buf.buf_info.buffer, buf.buf_info.fd);
+	}
+
+	if (copy_to_user((void *)(se->stats_event.data),
+			&buf, sizeof(struct msm_st_frame))) {
+		ERR_COPY_TO_USER();
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int msm_get_stats(struct msm_sync *sync, void __user *arg)
+{
+	int rc = 0;
+
+	struct msm_stats_event_ctrl se;
+
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_ctrl_cmd  *ctrl = NULL;
+	struct msm_vfe_resp  *data = NULL;
+	struct msm_vpe_resp  *vpe_data = NULL;
+	struct msm_stats_buf stats;
+
+	if (copy_from_user(&se, arg,
+			sizeof(struct msm_stats_event_ctrl))) {
+		ERR_COPY_FROM_USER();
+		pr_err("%s, ERR_COPY_FROM_USER\n", __func__);
+		return -EFAULT;
+	}
+
+	rc = 0;
+
+	qcmd = msm_dequeue(&sync->event_q, list_config);
+	if (!qcmd) {
+		/* Should be associated with wait_event
+			error -512 from __msm_control*/
+		pr_err("%s, qcmd is Null\n", __func__);
+		rc = -ETIMEDOUT;
+		return rc;
+	}
+
+	CDBG("%s: received from DSP %d\n", __func__, qcmd->type);
+
+	switch (qcmd->type) {
+	case MSM_CAM_Q_VPE_MSG:
+		/* Complete VPE response. */
+		vpe_data = (struct msm_vpe_resp *)(qcmd->command);
+		se.resptype = MSM_CAM_RESP_STEREO_OP_2;
+		se.stats_event.type   = vpe_data->evt_msg.type;
+		se.stats_event.msg_id = vpe_data->evt_msg.msg_id;
+		se.stats_event.len    = vpe_data->evt_msg.len;
+
+		if (vpe_data->type == VPE_MSG_OUTPUT_ST_L) {
+			CDBG("%s: Change msg_id to OUTPUT_TYPE_ST_L\n",
+				__func__);
+			se.stats_event.msg_id = OUTPUT_TYPE_ST_L;
+			rc = msm_divert_st_frame(sync, data, &se,
+				OUTPUT_TYPE_V);
+		} else if (vpe_data->type == VPE_MSG_OUTPUT_ST_R) {
+			CDBG("%s: Change msg_id to OUTPUT_TYPE_ST_R\n",
+				__func__);
+			se.stats_event.msg_id = OUTPUT_TYPE_ST_R;
+			rc = msm_divert_st_frame(sync, data, &se,
+				OUTPUT_TYPE_V);
+		} else {
+			pr_warning("%s: invalid vpe_data->type = %d\n",
+				__func__, vpe_data->type);
+		}
+		break;
+
+	case MSM_CAM_Q_VFE_EVT:
+	case MSM_CAM_Q_VFE_MSG:
+		data = (struct msm_vfe_resp *)(qcmd->command);
+
+		/* adsp event and message */
+		se.resptype = MSM_CAM_RESP_STAT_EVT_MSG;
+
+		/* 0 - msg from aDSP, 1 - event from mARM */
+		se.stats_event.type   = data->evt_msg.type;
+		se.stats_event.msg_id = data->evt_msg.msg_id;
+		se.stats_event.len    = data->evt_msg.len;
+		se.stats_event.frame_id = data->evt_msg.frame_id;
+
+		CDBG("%s: qcmd->type %d length %d msd_id %d\n", __func__,
+			qcmd->type,
+			se.stats_event.len,
+			se.stats_event.msg_id);
+
+		if (data->type == VFE_MSG_COMMON) {
+			stats.status_bits = data->stats_msg.status_bits;
+			if (data->stats_msg.aec_buff) {
+				stats.aec.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.aec_buff,
+						&(stats.aec.fd));
+				if (!stats.aec.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+
+			} else {
+				stats.aec.buff = 0;
+			}
+			if (data->stats_msg.awb_buff) {
+				stats.awb.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.awb_buff,
+						&(stats.awb.fd));
+				if (!stats.awb.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+
+			} else {
+				stats.awb.buff = 0;
+			}
+			if (data->stats_msg.af_buff) {
+				stats.af.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.af_buff,
+						&(stats.af.fd));
+				if (!stats.af.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+
+			} else {
+				stats.af.buff = 0;
+			}
+			if (data->stats_msg.ihist_buff) {
+				stats.ihist.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.ihist_buff,
+						&(stats.ihist.fd));
+				if (!stats.ihist.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+
+			} else {
+				stats.ihist.buff = 0;
+			}
+
+			if (data->stats_msg.rs_buff) {
+				stats.rs.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.rs_buff,
+						&(stats.rs.fd));
+				if (!stats.rs.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+
+			} else {
+				stats.rs.buff = 0;
+			}
+
+			if (data->stats_msg.cs_buff) {
+				stats.cs.buff =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->stats_msg.cs_buff,
+						&(stats.cs.fd));
+				if (!stats.cs.buff) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+				}
+			} else {
+				stats.cs.buff = 0;
+			}
+
+			se.stats_event.frame_id = data->phy.frame_id;
+			if (copy_to_user((void *)(se.stats_event.data),
+					&stats,
+					sizeof(struct msm_stats_buf))) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto failure;
+			}
+		} else if ((data->type >= VFE_MSG_STATS_AEC) &&
+			(data->type <=  VFE_MSG_STATS_WE)) {
+			/* the check above includes all stats type. */
+			stats.buffer =
+				msm_pmem_stats_ptov_lookup(sync,
+						data->phy.sbuf_phy,
+						&(stats.fd));
+			if (!stats.buffer) {
+					pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+						__func__);
+					rc = -EINVAL;
+					goto failure;
+			}
+			se.stats_event.frame_id = data->phy.frame_id;
+			if (copy_to_user((void *)(se.stats_event.data),
+					&stats,
+					sizeof(struct msm_stats_buf))) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto failure;
+			}
+		} else if ((data->evt_msg.len > 0) &&
+				(data->type == VFE_MSG_GENERAL)) {
+			if (copy_to_user((void *)(se.stats_event.data),
+					data->evt_msg.data,
+					data->evt_msg.len)) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto failure;
+			}
+		} else {
+			if (sync->stereocam_enabled) {
+				if (data->type == VFE_MSG_OUTPUT_P) {
+					CDBG("%s: Preview mark as st op 1\n",
+						__func__);
+					se.resptype = MSM_CAM_RESP_STEREO_OP_1;
+					rc = msm_divert_st_frame(sync, data,
+						&se, OUTPUT_TYPE_P);
+					break;
+				} else if (data->type == VFE_MSG_OUTPUT_V) {
+					CDBG("%s: Video mark as st op 2\n",
+						__func__);
+					se.resptype = MSM_CAM_RESP_STEREO_OP_2;
+					rc = msm_divert_st_frame(sync, data,
+						&se, OUTPUT_TYPE_V);
+					break;
+				} else if (data->type == VFE_MSG_OUTPUT_S) {
+					CDBG("%s: Main img mark as st op 2\n",
+						__func__);
+					se.resptype = MSM_CAM_RESP_STEREO_OP_2;
+					rc = msm_divert_st_frame(sync, data,
+						&se, OUTPUT_TYPE_S);
+					break;
+				} else if (data->type == VFE_MSG_OUTPUT_T) {
+					CDBG("%s: Thumb img mark as st op 2\n",
+						__func__);
+					se.resptype = MSM_CAM_RESP_STEREO_OP_2;
+					rc = msm_divert_st_frame(sync, data,
+						&se, OUTPUT_TYPE_T);
+					break;
+				} else
+					CDBG("%s: VFE_MSG Fall Through\n",
+						__func__);
+			}
+			if ((sync->pp_frame_avail == 1) &&
+				(sync->pp_mask & PP_PREV) &&
+				(data->type == VFE_MSG_OUTPUT_P)) {
+					CDBG("%s:%d:preiew PP\n",
+					__func__, __LINE__);
+					se.stats_event.frame_id =
+							data->phy.frame_id;
+					rc = msm_divert_frame(sync, data, &se);
+					sync->pp_frame_avail = 0;
+			} else {
+				if ((sync->pp_mask & PP_PREV) &&
+					(data->type == VFE_MSG_OUTPUT_P)) {
+					se.stats_event.frame_id =
+							data->phy.frame_id;
+					free_qcmd(qcmd);
+					return 0;
+				} else
+					CDBG("%s:indication type is %d\n",
+						__func__, data->type);
+			}
+			if (sync->pp_mask & PP_SNAP)
+				if (data->type == VFE_MSG_OUTPUT_S ||
+					data->type == VFE_MSG_OUTPUT_T)
+					rc = msm_divert_frame(sync, data, &se);
+		}
+		break;
+
+	case MSM_CAM_Q_CTRL:
+		/* control command from control thread */
+		ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
+
+		CDBG("%s: qcmd->type %d length %d\n", __func__,
+			qcmd->type, ctrl->length);
+
+		if (ctrl->length > 0) {
+			if (copy_to_user((void *)(se.ctrl_cmd.value),
+						ctrl->value,
+						ctrl->length)) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto failure;
+			}
+		}
+
+		se.resptype = MSM_CAM_RESP_CTRL;
+
+		/* what to control */
+		se.ctrl_cmd.type = ctrl->type;
+		se.ctrl_cmd.length = ctrl->length;
+		se.ctrl_cmd.resp_fd = ctrl->resp_fd;
+		break;
+
+	case MSM_CAM_Q_V4L2_REQ:
+		/* control command from v4l2 client */
+		ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
+		if (ctrl->length > 0) {
+			if (copy_to_user((void *)(se.ctrl_cmd.value),
+					ctrl->value, ctrl->length)) {
+				ERR_COPY_TO_USER();
+				rc = -EFAULT;
+				goto failure;
+			}
+		}
+
+		/* 2 tells config thread this is v4l2 request */
+		se.resptype = MSM_CAM_RESP_V4L2;
+
+		/* what to control */
+		se.ctrl_cmd.type   = ctrl->type;
+		se.ctrl_cmd.length = ctrl->length;
+		break;
+
+	default:
+		rc = -EFAULT;
+		goto failure;
+	} /* switch qcmd->type */
+	if (copy_to_user((void *)arg, &se, sizeof(se))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+		goto failure;
+	}
+
+failure:
+	free_qcmd(qcmd);
+
+	CDBG("%s: %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_ctrl_cmd_done(struct msm_control_device *ctrl_pmsm,
+		void __user *arg)
+{
+	void __user *uptr;
+	struct msm_queue_cmd *qcmd = &ctrl_pmsm->qcmd;
+	struct msm_ctrl_cmd *command = &ctrl_pmsm->ctrl;
+
+	if (copy_from_user(command, arg, sizeof(*command))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	atomic_set(&qcmd->on_heap, 0);
+	qcmd->command = command;
+	uptr = command->value;
+
+	if (command->length > 0) {
+		command->value = ctrl_pmsm->ctrl_data;
+		if (command->length > sizeof(ctrl_pmsm->ctrl_data)) {
+			pr_err("%s: user data %d is too big (max %d)\n",
+				__func__, command->length,
+				sizeof(ctrl_pmsm->ctrl_data));
+			return -EINVAL;
+		}
+
+		if (copy_from_user(command->value,
+					uptr,
+					command->length)) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	} else
+		command->value = NULL;
+
+	/* Ignore the command if the ctrl cmd has
+	   return back due to signaling */
+	/* Should be associated with wait_event
+	   error -512 from __msm_control*/
+	if (ctrl_pmsm->pmsm->sync->ignore_qcmd == true &&
+	   ctrl_pmsm->pmsm->sync->ignore_qcmd_type == (int16_t)command->type) {
+		ctrl_pmsm->pmsm->sync->ignore_qcmd = false;
+		ctrl_pmsm->pmsm->sync->ignore_qcmd_type = -1;
+	} else /* wake up control thread */
+		msm_enqueue(&ctrl_pmsm->ctrl_q, &qcmd->list_control);
+
+	return 0;
+}
+
+static int msm_config_vpe(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_vpe_cfg_cmd cfgcmd;
+	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+	CDBG("%s: cmd_type %s\n", __func__, vfe_config_cmd[cfgcmd.cmd_type]);
+	switch (cfgcmd.cmd_type) {
+	case CMD_VPE:
+		return sync->vpefn.vpe_config(&cfgcmd, NULL);
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd.cmd_type);
+	}
+	return -EINVAL;
+}
+
+static int msm_config_vfe(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+	struct msm_pmem_region region[8];
+	struct axidata axi_data;
+
+	if (!sync->vfefn.vfe_config) {
+		pr_err("%s: no vfe_config!\n", __func__);
+		return -EIO;
+	}
+
+	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	memset(&axi_data, 0, sizeof(axi_data));
+	CDBG("%s: cmd_type %s\n", __func__, vfe_config_cmd[cfgcmd.cmd_type]);
+	switch (cfgcmd.cmd_type) {
+	case CMD_STATS_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+				MSM_PMEM_AEC_AWB, &region[0],
+				NUM_STAT_OUTPUT_BUFFERS,
+				&sync->pmem_stats_spinlock);
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+				MSM_PMEM_AF, &region[axi_data.bufnum1],
+				NUM_STAT_OUTPUT_BUFFERS,
+				&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1 || !axi_data.bufnum2) {
+			pr_err("%s: pmem region lookup error\n", __func__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+	case CMD_STATS_AF_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+				MSM_PMEM_AF, &region[0],
+				NUM_STAT_OUTPUT_BUFFERS,
+				&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+	case CMD_STATS_AEC_AWB_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+				MSM_PMEM_AEC_AWB, &region[0],
+				NUM_STAT_OUTPUT_BUFFERS,
+				&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+	case CMD_STATS_AEC_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_AEC, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS,
+			&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+	case CMD_STATS_AWB_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_AWB, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS,
+			&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+
+
+	case CMD_STATS_IHIST_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_IHIST, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS,
+			&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+
+	case CMD_STATS_RS_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_RS, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS,
+			&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+
+	case CMD_STATS_CS_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_CS, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS,
+			&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return sync->vfefn.vfe_config(&cfgcmd, &axi_data);
+
+	case CMD_GENERAL:
+	case CMD_STATS_DISABLE:
+		return sync->vfefn.vfe_config(&cfgcmd, NULL);
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd.cmd_type);
+	}
+
+	return -EINVAL;
+}
+static int msm_vpe_frame_cfg(struct msm_sync *sync,
+				void *cfgcmdin)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+	struct msm_pmem_region region[8];
+	int pmem_type;
+
+	struct msm_vpe_cfg_cmd *cfgcmd;
+	cfgcmd = (struct msm_vpe_cfg_cmd *)cfgcmdin;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+	CDBG("In vpe_frame_cfg cfgcmd->cmd_type = %s\n",
+		vfe_config_cmd[cfgcmd->cmd_type]);
+	switch (cfgcmd->cmd_type) {
+	case CMD_AXI_CFG_VPE:
+		pmem_type = MSM_PMEM_VIDEO_VPE;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup_2(&sync->pmem_frames, pmem_type,
+				&region[0], 8, &sync->pmem_frame_spinlock);
+		CDBG("axi_data.bufnum1 = %d\n", axi_data.bufnum1);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		pmem_type = MSM_PMEM_VIDEO;
+		break;
+	case CMD_AXI_CFG_SNAP_THUMB_VPE:
+		CDBG("%s: CMD_AXI_CFG_SNAP_THUMB_VPE", __func__);
+		pmem_type = MSM_PMEM_THUMBNAIL_VPE;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+			&region[0], 8, &sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s: THUMBNAIL_VPE pmem region lookup error\n",
+				__func__);
+			return -EINVAL;
+		}
+		break;
+	case CMD_AXI_CFG_SNAP_VPE:
+		CDBG("%s: CMD_AXI_CFG_SNAP_VPE", __func__);
+		pmem_type = MSM_PMEM_MAINIMG_VPE;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], 8, &sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s: MAINIMG_VPE pmem region lookup error\n",
+				__func__);
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		break;
+	}
+	axi_data.region = &region[0];
+	CDBG("out vpe_frame_cfg cfgcmd->cmd_type = %s\n",
+		vfe_config_cmd[cfgcmd->cmd_type]);
+	/* send the AXI configuration command to driver */
+	if (sync->vpefn.vpe_config)
+		rc = sync->vpefn.vpe_config(cfgcmd, data);
+	return rc;
+}
+
+static int msm_frame_axi_cfg(struct msm_sync *sync,
+		struct msm_vfe_cfg_cmd *cfgcmd)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+	struct msm_pmem_region region[MAX_PMEM_CFG_BUFFERS];
+	int pmem_type;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+
+	switch (cfgcmd->cmd_type) {
+
+	case CMD_AXI_CFG_PREVIEW:
+		pmem_type = MSM_PMEM_PREVIEW;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], MAX_PMEM_CFG_BUFFERS,
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error (empty %d)\n",
+				__func__, __LINE__,
+				hlist_empty(&sync->pmem_frames));
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_AXI_CFG_VIDEO:
+		pmem_type = MSM_PMEM_PREVIEW;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], MAX_PMEM_CFG_BUFFERS,
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pmem_type = MSM_PMEM_VIDEO;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[axi_data.bufnum1],
+				(MAX_PMEM_CFG_BUFFERS-(axi_data.bufnum1)),
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_AXI_CFG_SNAP:
+		CDBG("%s, CMD_AXI_CFG_SNAP, type=%d\n", __func__,
+			cfgcmd->cmd_type);
+		pmem_type = MSM_PMEM_THUMBNAIL;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], MAX_PMEM_CFG_BUFFERS,
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pmem_type = MSM_PMEM_MAINIMG;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[axi_data.bufnum1],
+				(MAX_PMEM_CFG_BUFFERS-(axi_data.bufnum1)),
+				 &sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_AXI_CFG_ZSL:
+		CDBG("%s, CMD_AXI_CFG_ZSL, type = %d\n", __func__,
+			cfgcmd->cmd_type);
+		pmem_type = MSM_PMEM_PREVIEW;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], MAX_PMEM_CFG_BUFFERS,
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pmem_type = MSM_PMEM_THUMBNAIL;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[axi_data.bufnum1],
+				(MAX_PMEM_CFG_BUFFERS-(axi_data.bufnum1)),
+				 &sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pmem_type = MSM_PMEM_MAINIMG;
+		axi_data.bufnum3 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[axi_data.bufnum1 + axi_data.bufnum2],
+				(MAX_PMEM_CFG_BUFFERS - axi_data.bufnum1 -
+				axi_data.bufnum2), &sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum3) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_RAW_PICT_AXI_CFG:
+		pmem_type = MSM_PMEM_RAW_MAINIMG;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup(&sync->pmem_frames, pmem_type,
+				&region[0], MAX_PMEM_CFG_BUFFERS,
+				&sync->pmem_frame_spinlock);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_GENERAL:
+		data = NULL;
+		break;
+
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		return -EINVAL;
+	}
+
+	axi_data.region = &region[0];
+
+	/* send the AXI configuration command to driver */
+	if (sync->vfefn.vfe_config)
+		rc = sync->vfefn.vfe_config(cfgcmd, data);
+
+	return rc;
+}
+
+static int msm_get_sensor_info(struct msm_sync *sync, void __user *arg)
+{
+	int rc = 0;
+	struct msm_camsensor_info info;
+	struct msm_camera_sensor_info *sdata;
+
+	if (copy_from_user(&info,
+			arg,
+			sizeof(struct msm_camsensor_info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	sdata = sync->pdev->dev.platform_data;
+	if (sync->sctrl.s_camera_type == BACK_CAMERA_3D)
+		info.support_3d = true;
+	else
+		info.support_3d = false;
+	memcpy(&info.name[0],
+		sdata->sensor_name,
+		MAX_SENSOR_NAME);
+	info.flash_enabled = sdata->flash_data->flash_type !=
+		MSM_CAMERA_FLASH_NONE;
+
+	/* copy back to user space */
+	if (copy_to_user((void *)arg,
+			&info,
+			sizeof(struct msm_camsensor_info))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+static int msm_get_camera_info(void __user *arg)
+{
+	int rc = 0;
+	int i = 0;
+	struct msm_camera_info info;
+
+	if (copy_from_user(&info, arg, sizeof(struct msm_camera_info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	CDBG("%s: camera_node %d\n", __func__, camera_node);
+	info.num_cameras = camera_node;
+
+	for (i = 0; i < camera_node; i++) {
+		info.has_3d_support[i] = 0;
+		info.is_internal_cam[i] = 0;
+		info.s_mount_angle[i] = sensor_mount_angle[i];
+		switch (camera_type[i]) {
+		case FRONT_CAMERA_2D:
+			info.is_internal_cam[i] = 1;
+			break;
+		case BACK_CAMERA_3D:
+			info.has_3d_support[i] = 1;
+			break;
+		case BACK_CAMERA_2D:
+		default:
+			break;
+		}
+	}
+	/* copy back to user space */
+	if (copy_to_user((void *)arg, &info, sizeof(struct msm_camera_info))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+	}
+	return rc;
+}
+
+static int __msm_put_frame_buf(struct msm_sync *sync,
+		struct msm_frame *pb)
+{
+	unsigned long pphy;
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	int rc = -EIO;
+
+	/* Change the active flag. */
+	pphy = msm_pmem_frame_vtop_lookup(sync,
+		pb->buffer,
+		pb->y_off, pb->cbcr_off, pb->fd, 1);
+
+	if (pphy != 0) {
+		CDBG("%s: rel: vaddr %lx, paddr %lx\n",
+			__func__,
+			pb->buffer, pphy);
+		cfgcmd.cmd_type = CMD_FRAME_BUF_RELEASE;
+		cfgcmd.value    = (void *)pb;
+		if (sync->vfefn.vfe_config)
+			rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
+	} else {
+		pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
+			__func__);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+static int __msm_put_pic_buf(struct msm_sync *sync,
+		struct msm_frame *pb)
+{
+	unsigned long pphy;
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	int rc = -EIO;
+
+	pphy = msm_pmem_frame_vtop_lookup(sync,
+		pb->buffer,
+		pb->y_off, pb->cbcr_off, pb->fd, 1);
+
+	if (pphy != 0) {
+		CDBG("%s: rel: vaddr %lx, paddr %lx\n",
+			__func__,
+			pb->buffer, pphy);
+		cfgcmd.cmd_type = CMD_SNAP_BUF_RELEASE;
+		cfgcmd.value    = (void *)pb;
+		if (sync->vfefn.vfe_config)
+			rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
+	} else {
+		pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
+			__func__);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+
+static int msm_put_frame_buffer(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_frame buf_t;
+
+	if (copy_from_user(&buf_t,
+				arg,
+				sizeof(struct msm_frame))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	return __msm_put_frame_buf(sync, &buf_t);
+}
+
+
+static int msm_put_pic_buffer(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_frame buf_t;
+
+	if (copy_from_user(&buf_t,
+				arg,
+				sizeof(struct msm_frame))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	return __msm_put_pic_buf(sync, &buf_t);
+}
+
+static int __msm_register_pmem(struct msm_sync *sync,
+		struct msm_pmem_info *pinfo)
+{
+	int rc = 0;
+
+	switch (pinfo->type) {
+	case MSM_PMEM_VIDEO:
+	case MSM_PMEM_PREVIEW:
+	case MSM_PMEM_THUMBNAIL:
+	case MSM_PMEM_MAINIMG:
+	case MSM_PMEM_RAW_MAINIMG:
+	case MSM_PMEM_VIDEO_VPE:
+	case MSM_PMEM_C2D:
+	case MSM_PMEM_MAINIMG_VPE:
+	case MSM_PMEM_THUMBNAIL_VPE:
+		rc = msm_pmem_table_add(&sync->pmem_frames, pinfo,
+			&sync->pmem_frame_spinlock, sync);
+		break;
+
+	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_AF:
+	case MSM_PMEM_AEC:
+	case MSM_PMEM_AWB:
+	case MSM_PMEM_RS:
+	case MSM_PMEM_CS:
+	case MSM_PMEM_IHIST:
+	case MSM_PMEM_SKIN:
+
+		rc = msm_pmem_table_add(&sync->pmem_stats, pinfo,
+			 &sync->pmem_stats_spinlock, sync);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_register_pmem(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_pmem_info info;
+
+	if (copy_from_user(&info, arg, sizeof(info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	return __msm_register_pmem(sync, &info);
+}
+
+static int msm_stats_axi_cfg(struct msm_sync *sync,
+		struct msm_vfe_cfg_cmd *cfgcmd)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+
+	struct msm_pmem_region region[3];
+	int pmem_type = MSM_PMEM_MAX;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+
+	switch (cfgcmd->cmd_type) {
+	case CMD_STATS_AXI_CFG:
+		pmem_type = MSM_PMEM_AEC_AWB;
+		break;
+	case CMD_STATS_AF_AXI_CFG:
+		pmem_type = MSM_PMEM_AF;
+		break;
+	case CMD_GENERAL:
+		data = NULL;
+		break;
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		return -EINVAL;
+	}
+
+	if (cfgcmd->cmd_type != CMD_GENERAL) {
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats, pmem_type,
+				&region[0], NUM_STAT_OUTPUT_BUFFERS,
+				&sync->pmem_stats_spinlock);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+	axi_data.region = &region[0];
+	}
+
+	/* send the AEC/AWB STATS configuration command to driver */
+	if (sync->vfefn.vfe_config)
+		rc = sync->vfefn.vfe_config(cfgcmd, &axi_data);
+
+	return rc;
+}
+
+static int msm_put_stats_buffer(struct msm_sync *sync, void __user *arg)
+{
+	int rc = -EIO;
+
+	struct msm_stats_buf buf;
+	unsigned long pphy;
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	if (copy_from_user(&buf, arg,
+				sizeof(struct msm_stats_buf))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	CDBG("%s\n", __func__);
+	pphy = msm_pmem_stats_vtop_lookup(sync, buf.buffer, buf.fd);
+
+	if (pphy != 0) {
+		if (buf.type == STAT_AEAW)
+			cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
+		else if (buf.type == STAT_AF)
+			cfgcmd.cmd_type = CMD_STATS_AF_BUF_RELEASE;
+		else if (buf.type == STAT_AEC)
+			cfgcmd.cmd_type = CMD_STATS_AEC_BUF_RELEASE;
+		else if (buf.type == STAT_AWB)
+			cfgcmd.cmd_type = CMD_STATS_AWB_BUF_RELEASE;
+		else if (buf.type == STAT_IHIST)
+			cfgcmd.cmd_type = CMD_STATS_IHIST_BUF_RELEASE;
+		else if (buf.type == STAT_RS)
+			cfgcmd.cmd_type = CMD_STATS_RS_BUF_RELEASE;
+		else if (buf.type == STAT_CS)
+			cfgcmd.cmd_type = CMD_STATS_CS_BUF_RELEASE;
+
+		else {
+			pr_err("%s: invalid buf type %d\n",
+				__func__,
+				buf.type);
+			rc = -EINVAL;
+			goto put_done;
+		}
+
+		cfgcmd.value = (void *)&buf;
+
+		if (sync->vfefn.vfe_config) {
+			rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
+			if (rc < 0)
+				pr_err("%s: vfe_config error %d\n",
+					__func__, rc);
+		} else
+			pr_err("%s: vfe_config is NULL\n", __func__);
+	} else {
+		pr_err("%s: NULL physical address\n", __func__);
+		rc = -EINVAL;
+	}
+
+put_done:
+	return rc;
+}
+
+static int msm_axi_config(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	switch (cfgcmd.cmd_type) {
+	case CMD_AXI_CFG_VIDEO:
+	case CMD_AXI_CFG_PREVIEW:
+	case CMD_AXI_CFG_SNAP:
+	case CMD_RAW_PICT_AXI_CFG:
+	case CMD_AXI_CFG_ZSL:
+		CDBG("%s, cfgcmd.cmd_type = %d\n", __func__, cfgcmd.cmd_type);
+		return msm_frame_axi_cfg(sync, &cfgcmd);
+
+	case CMD_AXI_CFG_VPE:
+	case CMD_AXI_CFG_SNAP_VPE:
+	case CMD_AXI_CFG_SNAP_THUMB_VPE:
+		return msm_vpe_frame_cfg(sync, (void *)&cfgcmd);
+
+	case CMD_STATS_AXI_CFG:
+	case CMD_STATS_AF_AXI_CFG:
+		return msm_stats_axi_cfg(sync, &cfgcmd);
+
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__,
+			cfgcmd.cmd_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __msm_get_pic(struct msm_sync *sync,
+		struct msm_frame *frame)
+{
+
+	int rc = 0;
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_vfe_resp *vdata;
+	struct msm_vfe_phy_info *pphy;
+	struct msm_pmem_info pmem_info;
+	struct msm_frame *pframe;
+
+	qcmd = msm_dequeue(&sync->pict_q, list_pict);
+
+	if (!qcmd) {
+		pr_err("%s: no pic frame.\n", __func__);
+		return -EAGAIN;
+	}
+
+	if (MSM_CAM_Q_PP_MSG != qcmd->type) {
+		vdata = (struct msm_vfe_resp *)(qcmd->command);
+		pphy = &vdata->phy;
+
+		rc = msm_pmem_frame_ptov_lookup2(sync,
+				pphy->y_phy,
+				&pmem_info,
+				1); /* mark pic frame in use */
+
+		if (rc < 0) {
+			pr_err("%s: cannot get pic frame, invalid lookup"
+				" address y %x cbcr %x\n",
+				__func__, pphy->y_phy, pphy->cbcr_phy);
+			goto err;
+		}
+
+		frame->ts = qcmd->ts;
+		frame->buffer = (unsigned long)pmem_info.vaddr;
+		frame->y_off = pmem_info.y_off;
+		frame->cbcr_off = pmem_info.cbcr_off;
+		frame->fd = pmem_info.fd;
+		if (sync->stereocam_enabled &&
+			sync->stereo_state != STEREO_RAW_SNAP_STARTED) {
+			if (pmem_info.type == MSM_PMEM_THUMBNAIL_VPE)
+				frame->path = OUTPUT_TYPE_T;
+			else
+				frame->path = OUTPUT_TYPE_S;
+		} else
+			frame->path = vdata->phy.output_id;
+
+		CDBG("%s: y %x, cbcr %x, qcmd %x, virt_addr %x\n",
+			__func__, pphy->y_phy,
+			pphy->cbcr_phy, (int) qcmd, (int) frame->buffer);
+	} else { /* PP */
+		pframe = (struct msm_frame *)(qcmd->command);
+		frame->ts = qcmd->ts;
+		frame->buffer = pframe->buffer;
+		frame->y_off = pframe->y_off;
+		frame->cbcr_off = pframe->cbcr_off;
+		frame->fd = pframe->fd;
+		frame->path = pframe->path;
+		CDBG("%s: PP y_off %x, cbcr_off %x, path %d vaddr 0x%x\n",
+			__func__, frame->y_off, frame->cbcr_off, frame->path,
+			(int) frame->buffer);
+	}
+
+err:
+	free_qcmd(qcmd);
+
+	return rc;
+}
+
+static int msm_get_pic(struct msm_sync *sync, void __user *arg)
+{
+	int rc = 0;
+	struct msm_frame frame;
+
+	if (copy_from_user(&frame,
+				arg,
+				sizeof(struct msm_frame))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	rc = __msm_get_pic(sync, &frame);
+	if (rc < 0)
+		return rc;
+
+	if (sync->croplen && (!sync->stereocam_enabled)) {
+		if (frame.croplen != sync->croplen) {
+			pr_err("%s: invalid frame croplen %d,"
+				"expecting %d\n",
+				__func__,
+				frame.croplen,
+				sync->croplen);
+			return -EINVAL;
+		}
+
+		if (copy_to_user((void *)frame.cropinfo,
+				sync->cropinfo,
+				sync->croplen)) {
+			ERR_COPY_TO_USER();
+			return -EFAULT;
+		}
+	}
+	CDBG("%s: copy snapshot frame to user\n", __func__);
+	if (copy_to_user((void *)arg,
+				&frame, sizeof(struct msm_frame))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+	}
+
+	CDBG("%s: got pic frame\n", __func__);
+
+	return rc;
+}
+
+static int msm_set_crop(struct msm_sync *sync, void __user *arg)
+{
+	struct crop_info crop;
+
+	mutex_lock(&sync->lock);
+	if (copy_from_user(&crop,
+				arg,
+				sizeof(struct crop_info))) {
+		ERR_COPY_FROM_USER();
+		mutex_unlock(&sync->lock);
+		return -EFAULT;
+	}
+
+	if (crop.len != CROP_LEN) {
+		mutex_unlock(&sync->lock);
+		return -EINVAL;
+	}
+
+	if (!sync->croplen) {
+		sync->cropinfo = kmalloc(crop.len, GFP_KERNEL);
+		if (!sync->cropinfo) {
+			mutex_unlock(&sync->lock);
+			return -ENOMEM;
+		}
+	}
+
+	if (copy_from_user(sync->cropinfo,
+				crop.info,
+				crop.len)) {
+		ERR_COPY_FROM_USER();
+		sync->croplen = 0;
+		kfree(sync->cropinfo);
+		mutex_unlock(&sync->lock);
+		return -EFAULT;
+	}
+
+	sync->croplen = crop.len;
+
+	mutex_unlock(&sync->lock);
+	return 0;
+}
+
+static int msm_error_config(struct msm_sync *sync, void __user *arg)
+{
+	struct msm_queue_cmd *qcmd =
+		kmalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+
+	qcmd->command = NULL;
+
+	if (qcmd)
+		atomic_set(&(qcmd->on_heap), 1);
+
+	if (copy_from_user(&(qcmd->error_code), arg, sizeof(uint32_t))) {
+		ERR_COPY_FROM_USER();
+		free_qcmd(qcmd);
+		return -EFAULT;
+	}
+
+	pr_err("%s: Enqueue Fake Frame with error code = %d\n", __func__,
+		qcmd->error_code);
+	msm_enqueue(&sync->frame_q, &qcmd->list_frame);
+	return 0;
+}
+
+static int msm_set_fd_roi(struct msm_sync *sync, void __user *arg)
+{
+	struct fd_roi_info fd_roi;
+
+	mutex_lock(&sync->lock);
+	if (copy_from_user(&fd_roi,
+			arg,
+			sizeof(struct fd_roi_info))) {
+		ERR_COPY_FROM_USER();
+		mutex_unlock(&sync->lock);
+		return -EFAULT;
+	}
+	if (fd_roi.info_len <= 0) {
+		mutex_unlock(&sync->lock);
+		return -EFAULT;
+	}
+
+	if (!sync->fdroiinfo.info) {
+		sync->fdroiinfo.info = kmalloc(fd_roi.info_len, GFP_KERNEL);
+		if (!sync->fdroiinfo.info) {
+			mutex_unlock(&sync->lock);
+			return -ENOMEM;
+		}
+		sync->fdroiinfo.info_len = fd_roi.info_len;
+	} else if (sync->fdroiinfo.info_len < fd_roi.info_len) {
+		mutex_unlock(&sync->lock);
+		return -EINVAL;
+    }
+
+	if (copy_from_user(sync->fdroiinfo.info,
+			fd_roi.info,
+			fd_roi.info_len)) {
+		ERR_COPY_FROM_USER();
+		kfree(sync->fdroiinfo.info);
+		sync->fdroiinfo.info = NULL;
+		mutex_unlock(&sync->lock);
+		return -EFAULT;
+	}
+	mutex_unlock(&sync->lock);
+	return 0;
+}
+
+static int msm_pp_grab(struct msm_sync *sync, void __user *arg)
+{
+	uint32_t enable;
+	if (copy_from_user(&enable, arg, sizeof(enable))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	} else {
+		enable &= PP_MASK;
+		if (enable & (enable - 1)) {
+			pr_err("%s: error: more than one PP request!\n",
+				__func__);
+			return -EINVAL;
+		}
+		if (sync->pp_mask) {
+			if (enable) {
+				pr_err("%s: postproc %x is already enabled\n",
+					__func__, sync->pp_mask & enable);
+				return -EINVAL;
+			} else {
+				sync->pp_mask &= enable;
+				CDBG("%s: sync->pp_mask %d enable %d\n",
+					__func__, sync->pp_mask, enable);
+				return 0;
+			}
+		}
+
+		CDBG("%s: sync->pp_mask %d enable %d\n", __func__,
+			sync->pp_mask, enable);
+		sync->pp_mask |= enable;
+	}
+
+	return 0;
+}
+
+static int msm_put_st_frame(struct msm_sync *sync, void __user *arg)
+{
+	unsigned long flags;
+	unsigned long st_pphy;
+	if (sync->stereocam_enabled) {
+		/* Make stereo frame ready for VPE. */
+		struct msm_st_frame stereo_frame_half;
+
+		if (copy_from_user(&stereo_frame_half, arg,
+			sizeof(stereo_frame_half))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+
+		if (stereo_frame_half.type == OUTPUT_TYPE_ST_L) {
+			struct msm_vfe_resp *vfe_rp;
+			struct msm_queue_cmd *qcmd;
+
+			spin_lock_irqsave(&pp_stereocam_spinlock, flags);
+			if (!sync->pp_stereocam) {
+				pr_warning("%s: no stereo frame to deliver!\n",
+					__func__);
+				spin_unlock_irqrestore(&pp_stereocam_spinlock,
+					flags);
+				return -EINVAL;
+			}
+			CDBG("%s: delivering left frame to VPE\n", __func__);
+
+			qcmd = sync->pp_stereocam;
+			sync->pp_stereocam = NULL;
+			spin_unlock_irqrestore(&pp_stereocam_spinlock, flags);
+
+			vfe_rp = (struct msm_vfe_resp *)qcmd->command;
+
+			CDBG("%s: Left Py = 0x%x y_off = %d cbcr_off = %d\n",
+				__func__, vfe_rp->phy.y_phy,
+				stereo_frame_half.L.buf_y_off,
+				stereo_frame_half.L.buf_cbcr_off);
+
+			sync->vpefn.vpe_cfg_offset(stereo_frame_half.packing,
+			vfe_rp->phy.y_phy + stereo_frame_half.L.buf_y_off,
+			vfe_rp->phy.y_phy + stereo_frame_half.L.buf_cbcr_off,
+			&(qcmd->ts), OUTPUT_TYPE_ST_L, stereo_frame_half.L,
+			stereo_frame_half.frame_id);
+
+			free_qcmd(qcmd);
+		} else if (stereo_frame_half.type == OUTPUT_TYPE_ST_R) {
+			CDBG("%s: delivering right frame to VPE\n", __func__);
+			spin_lock_irqsave(&st_frame_spinlock, flags);
+
+			sync->stcam_conv_value =
+				stereo_frame_half.buf_info.stcam_conv_value;
+			sync->stcam_quality_ind =
+				stereo_frame_half.buf_info.stcam_quality_ind;
+
+			st_pphy = msm_pmem_frame_vtop_lookup(sync,
+				stereo_frame_half.buf_info.buffer,
+				stereo_frame_half.buf_info.y_off,
+				stereo_frame_half.buf_info.cbcr_off,
+				stereo_frame_half.buf_info.fd,
+				0); /* Do not change the active flag. */
+
+			sync->vpefn.vpe_cfg_offset(stereo_frame_half.packing,
+				st_pphy + stereo_frame_half.R.buf_y_off,
+				st_pphy + stereo_frame_half.R.buf_cbcr_off,
+				NULL, OUTPUT_TYPE_ST_R, stereo_frame_half.R,
+				stereo_frame_half.frame_id);
+
+			spin_unlock_irqrestore(&st_frame_spinlock, flags);
+		} else {
+			CDBG("%s: Invalid Msg\n", __func__);
+		}
+	}
+
+	return 0;
+}
+
+static struct msm_queue_cmd *msm_get_pp_qcmd(struct msm_frame* frame)
+{
+	struct msm_queue_cmd *qcmd =
+		kmalloc(sizeof(struct msm_queue_cmd) +
+			sizeof(struct msm_frame), GFP_ATOMIC);
+	qcmd->command = (struct msm_frame *)(qcmd + 1);
+
+	qcmd->type = MSM_CAM_Q_PP_MSG;
+
+	ktime_get_ts(&(qcmd->ts));
+	memcpy(qcmd->command, frame, sizeof(struct msm_frame));
+	atomic_set(&(qcmd->on_heap), 1);
+	return qcmd;
+}
+
+static int msm_pp_release(struct msm_sync *sync, void __user *arg)
+{
+	unsigned long flags;
+
+	if (!sync->pp_mask) {
+		pr_warning("%s: pp not in progress for\n", __func__);
+		return -EINVAL;
+	}
+	if (sync->pp_mask & PP_PREV) {
+
+
+		spin_lock_irqsave(&pp_prev_spinlock, flags);
+		if (!sync->pp_prev) {
+			pr_err("%s: no preview frame to deliver!\n",
+				__func__);
+			spin_unlock_irqrestore(&pp_prev_spinlock,
+				flags);
+			return -EINVAL;
+		}
+		CDBG("%s: delivering pp_prev\n", __func__);
+
+			if (sync->frame_q.len <= 100 &&
+				sync->event_q.len <= 100) {
+					msm_enqueue(&sync->frame_q,
+						&sync->pp_prev->list_frame);
+			} else {
+				pr_err("%s, Error Queue limit exceeded f_q=%d,\
+					e_q = %d\n",
+					__func__, sync->frame_q.len,
+					sync->event_q.len);
+				free_qcmd(sync->pp_prev);
+				goto done;
+			}
+
+			sync->pp_prev = NULL;
+			spin_unlock_irqrestore(&pp_prev_spinlock, flags);
+		goto done;
+	}
+
+	if ((sync->pp_mask & PP_SNAP) ||
+		(sync->pp_mask & PP_RAW_SNAP)) {
+		struct msm_frame frame;
+		struct msm_queue_cmd *qcmd;
+
+		if (copy_from_user(&frame,
+			arg,
+			sizeof(struct msm_frame))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+		qcmd = msm_get_pp_qcmd(&frame);
+		if (!qcmd) {
+			pr_err("%s: no snapshot to deliver!\n", __func__);
+			return -EINVAL;
+		}
+		CDBG("%s: delivering pp snap\n", __func__);
+		msm_enqueue(&sync->pict_q, &qcmd->list_pict);
+	}
+
+done:
+	return 0;
+}
+
+static long msm_ioctl_common(struct msm_cam_device *pmsm,
+		unsigned int cmd,
+		void __user *argp)
+{
+	switch (cmd) {
+	case MSM_CAM_IOCTL_REGISTER_PMEM:
+		CDBG("%s cmd = MSM_CAM_IOCTL_REGISTER_PMEM\n", __func__);
+		return msm_register_pmem(pmsm->sync, argp);
+	case MSM_CAM_IOCTL_UNREGISTER_PMEM:
+		CDBG("%s cmd = MSM_CAM_IOCTL_UNREGISTER_PMEM\n", __func__);
+		return msm_pmem_table_del(pmsm->sync, argp);
+	case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER:
+		CDBG("%s cmd = MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER\n", __func__);
+		return msm_put_frame_buffer(pmsm->sync, argp);
+		break;
+	default:
+		CDBG("%s cmd invalid\n", __func__);
+		return -EINVAL;
+	}
+}
+
+static long msm_ioctl_config(struct file *filep, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	struct msm_cam_device *pmsm = filep->private_data;
+
+	CDBG("%s: cmd %d\n", __func__, _IOC_NR(cmd));
+
+	switch (cmd) {
+	case MSM_CAM_IOCTL_GET_SENSOR_INFO:
+		rc = msm_get_sensor_info(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_CONFIG_VFE:
+		/* Coming from config thread for update */
+		rc = msm_config_vfe(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_CONFIG_VPE:
+		/* Coming from config thread for update */
+		rc = msm_config_vpe(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_GET_STATS:
+		/* Coming from config thread wait
+		 * for vfe statistics and control requests */
+		rc = msm_get_stats(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_ENABLE_VFE:
+		/* This request comes from control thread:
+		 * enable either QCAMTASK or VFETASK */
+		rc = msm_enable_vfe(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_DISABLE_VFE:
+		/* This request comes from control thread:
+		 * disable either QCAMTASK or VFETASK */
+		rc = msm_disable_vfe(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_VFE_APPS_RESET:
+		msm_camio_vfe_blk_reset();
+		rc = 0;
+		break;
+
+	case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER:
+		rc = msm_put_stats_buffer(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_AXI_CONFIG:
+	case MSM_CAM_IOCTL_AXI_VPE_CONFIG:
+		rc = msm_axi_config(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_SET_CROP:
+		rc = msm_set_crop(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_SET_FD_ROI:
+		rc = msm_set_fd_roi(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_PICT_PP:
+		/* Grab one preview frame or one snapshot
+		 * frame.
+		 */
+		rc = msm_pp_grab(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_PICT_PP_DONE:
+		/* Release the preview of snapshot frame
+		 * that was grabbed.
+		 */
+		rc = msm_pp_release(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_PUT_ST_FRAME:
+		/* Release the left or right frame
+		 * that was sent for stereo processing.
+		 */
+		rc = msm_put_st_frame(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_SENSOR_IO_CFG:
+		rc = pmsm->sync->sctrl.s_config(argp);
+		break;
+
+	case MSM_CAM_IOCTL_FLASH_LED_CFG: {
+		uint32_t led_state;
+		if (copy_from_user(&led_state, argp, sizeof(led_state))) {
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+		} else
+			rc = msm_camera_flash_set_led_state(pmsm->sync->
+					sdata->flash_data, led_state);
+		break;
+	}
+
+	case MSM_CAM_IOCTL_STROBE_FLASH_CFG: {
+		uint32_t flash_type;
+		if (copy_from_user(&flash_type, argp, sizeof(flash_type))) {
+			pr_err("msm_strobe_flash_init failed");
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+		} else {
+			CDBG("msm_strobe_flash_init enter");
+			rc = msm_strobe_flash_init(pmsm->sync, flash_type);
+		}
+		break;
+	}
+
+	case MSM_CAM_IOCTL_STROBE_FLASH_RELEASE:
+		if (pmsm->sync->sdata->strobe_flash_data) {
+			rc = pmsm->sync->sfctrl.strobe_flash_release(
+				pmsm->sync->sdata->strobe_flash_data, 0);
+		}
+		break;
+
+	case MSM_CAM_IOCTL_STROBE_FLASH_CHARGE: {
+		uint32_t charge_en;
+		if (copy_from_user(&charge_en, argp, sizeof(charge_en))) {
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+		} else
+			rc = pmsm->sync->sfctrl.strobe_flash_charge(
+			pmsm->sync->sdata->strobe_flash_data->flash_charge,
+			charge_en, pmsm->sync->sdata->strobe_flash_data->
+				flash_recharge_duration);
+		break;
+	}
+
+	case MSM_CAM_IOCTL_FLASH_CTRL: {
+		struct flash_ctrl_data flash_info;
+		if (copy_from_user(&flash_info, argp, sizeof(flash_info))) {
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+		} else
+			rc = msm_flash_ctrl(pmsm->sync->sdata, &flash_info);
+
+		break;
+	}
+
+	case MSM_CAM_IOCTL_ERROR_CONFIG:
+		rc = msm_error_config(pmsm->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_ABORT_CAPTURE: {
+		unsigned long flags = 0;
+		CDBG("get_pic:MSM_CAM_IOCTL_ABORT_CAPTURE\n");
+		spin_lock_irqsave(&pmsm->sync->abort_pict_lock, flags);
+		pmsm->sync->get_pic_abort = 1;
+		spin_unlock_irqrestore(&pmsm->sync->abort_pict_lock, flags);
+		wake_up(&(pmsm->sync->pict_q.wait));
+		rc = 0;
+		break;
+	}
+
+	default:
+		rc = msm_ioctl_common(pmsm, cmd, argp);
+		break;
+	}
+
+	CDBG("%s: cmd %d DONE\n", __func__, _IOC_NR(cmd));
+	return rc;
+}
+
+static int msm_unblock_poll_frame(struct msm_sync *);
+
+static long msm_ioctl_frame(struct file *filep, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	struct msm_cam_device *pmsm = filep->private_data;
+
+
+	switch (cmd) {
+	case MSM_CAM_IOCTL_GETFRAME:
+		/* Coming from frame thread to get frame
+		 * after SELECT is done */
+		rc = msm_get_frame(pmsm->sync, argp);
+		break;
+	case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER:
+		rc = msm_put_frame_buffer(pmsm->sync, argp);
+		break;
+	case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME:
+		rc = msm_unblock_poll_frame(pmsm->sync);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_unblock_poll_pic(struct msm_sync *sync);
+static long msm_ioctl_pic(struct file *filep, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	struct msm_cam_device *pmsm = filep->private_data;
+
+
+	switch (cmd) {
+	case MSM_CAM_IOCTL_GET_PICTURE:
+		rc = msm_get_pic(pmsm->sync, argp);
+		break;
+	case MSM_CAM_IOCTL_RELEASE_PIC_BUFFER:
+		rc = msm_put_pic_buffer(pmsm->sync, argp);
+		break;
+	case MSM_CAM_IOCTL_UNBLOCK_POLL_PIC_FRAME:
+		rc = msm_unblock_poll_pic(pmsm->sync);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+
+static long msm_ioctl_control(struct file *filep, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	struct msm_control_device *ctrl_pmsm = filep->private_data;
+	struct msm_cam_device *pmsm = ctrl_pmsm->pmsm;
+
+	switch (cmd) {
+	case MSM_CAM_IOCTL_CTRL_COMMAND:
+		/* Coming from control thread, may need to wait for
+		 * command status */
+		CDBG("calling msm_control kernel msm_ioctl_control\n");
+		mutex_lock(&ctrl_cmd_lock);
+		rc = msm_control(ctrl_pmsm, 1, argp);
+		mutex_unlock(&ctrl_cmd_lock);
+		break;
+	case MSM_CAM_IOCTL_CTRL_COMMAND_2:
+		/* Sends a message, returns immediately */
+		rc = msm_control(ctrl_pmsm, 0, argp);
+		break;
+	case MSM_CAM_IOCTL_CTRL_CMD_DONE:
+		/* Config thread calls the control thread to notify it
+		 * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND.
+		 */
+		rc = msm_ctrl_cmd_done(ctrl_pmsm, argp);
+		break;
+	case MSM_CAM_IOCTL_GET_SENSOR_INFO:
+		rc = msm_get_sensor_info(pmsm->sync, argp);
+		break;
+	case MSM_CAM_IOCTL_GET_CAMERA_INFO:
+		rc = msm_get_camera_info(argp);
+		break;
+	default:
+		rc = msm_ioctl_common(pmsm, cmd, argp);
+		break;
+	}
+
+	return rc;
+}
+
+static int __msm_release(struct msm_sync *sync)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *hnode;
+	struct hlist_node *n;
+
+	mutex_lock(&sync->lock);
+	if (sync->opencnt)
+		sync->opencnt--;
+	pr_info("%s, open count =%d\n", __func__, sync->opencnt);
+	if (!sync->opencnt) {
+		/* need to clean up system resource */
+		pr_info("%s, release VFE\n", __func__);
+		if (sync->core_powered_on) {
+			if (sync->vfefn.vfe_release)
+				sync->vfefn.vfe_release(sync->pdev);
+			/*sensor release */
+			pr_info("%s, release Sensor\n", __func__);
+			sync->sctrl.s_release();
+			CDBG("%s, msm_camio_sensor_clk_off\n", __func__);
+			msm_camio_sensor_clk_off(sync->pdev);
+			if (sync->sfctrl.strobe_flash_release) {
+				CDBG("%s, strobe_flash_release\n", __func__);
+				sync->sfctrl.strobe_flash_release(
+				sync->sdata->strobe_flash_data, 1);
+			}
+		}
+		kfree(sync->cropinfo);
+		sync->cropinfo = NULL;
+		sync->croplen = 0;
+		CDBG("%s, free frame pmem region\n", __func__);
+		hlist_for_each_entry_safe(region, hnode, n,
+				&sync->pmem_frames, list) {
+			hlist_del(hnode);
+			put_pmem_file(region->file);
+			kfree(region);
+		}
+		CDBG("%s, free stats pmem region\n", __func__);
+		hlist_for_each_entry_safe(region, hnode, n,
+				&sync->pmem_stats, list) {
+			hlist_del(hnode);
+			put_pmem_file(region->file);
+			kfree(region);
+		}
+		msm_queue_drain(&sync->pict_q, list_pict);
+		msm_queue_drain(&sync->event_q, list_config);
+
+		wake_unlock(&sync->wake_lock);
+		sync->apps_id = NULL;
+		sync->core_powered_on = 0;
+	}
+	mutex_unlock(&sync->lock);
+
+	return 0;
+}
+
+static int msm_release_config(struct inode *node, struct file *filep)
+{
+	int rc;
+	struct msm_cam_device *pmsm = filep->private_data;
+	pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name);
+	rc = __msm_release(pmsm->sync);
+	if (!rc) {
+		msm_queue_drain(&pmsm->sync->event_q, list_config);
+		atomic_set(&pmsm->opened, 0);
+	}
+	return rc;
+}
+
+static int msm_release_control(struct inode *node, struct file *filep)
+{
+	int rc;
+	struct msm_control_device *ctrl_pmsm = filep->private_data;
+	struct msm_cam_device *pmsm = ctrl_pmsm->pmsm;
+	pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name);
+	g_v4l2_opencnt--;
+	mutex_lock(&pmsm->sync->lock);
+	if (pmsm->sync->core_powered_on && pmsm->sync->vfefn.vfe_stop) {
+		pr_info("%s, stop vfe if active\n", __func__);
+		pmsm->sync->vfefn.vfe_stop();
+	}
+	mutex_unlock(&pmsm->sync->lock);
+	rc = __msm_release(pmsm->sync);
+	if (!rc) {
+		msm_queue_drain(&ctrl_pmsm->ctrl_q, list_control);
+		kfree(ctrl_pmsm);
+	}
+	return rc;
+}
+
+static int msm_release_frame(struct inode *node, struct file *filep)
+{
+	int rc;
+	struct msm_cam_device *pmsm = filep->private_data;
+	pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name);
+	rc = __msm_release(pmsm->sync);
+	if (!rc) {
+		msm_queue_drain(&pmsm->sync->frame_q, list_frame);
+		atomic_set(&pmsm->opened, 0);
+	}
+	return rc;
+}
+
+
+static int msm_release_pic(struct inode *node, struct file *filep)
+{
+	int rc;
+	struct msm_cam_device *pmsm = filep->private_data;
+	CDBG("%s: %s\n", __func__, filep->f_path.dentry->d_name.name);
+	rc = __msm_release(pmsm->sync);
+	if (!rc) {
+		msm_queue_drain(&pmsm->sync->pict_q, list_pict);
+		atomic_set(&pmsm->opened, 0);
+	}
+	return rc;
+}
+
+static int msm_unblock_poll_pic(struct msm_sync *sync)
+{
+	unsigned long flags;
+	CDBG("%s\n", __func__);
+	spin_lock_irqsave(&sync->pict_q.lock, flags);
+	sync->unblock_poll_pic_frame = 1;
+	wake_up(&sync->pict_q.wait);
+	spin_unlock_irqrestore(&sync->pict_q.lock, flags);
+	return 0;
+}
+
+static int msm_unblock_poll_frame(struct msm_sync *sync)
+{
+	unsigned long flags;
+	CDBG("%s\n", __func__);
+	spin_lock_irqsave(&sync->frame_q.lock, flags);
+	sync->unblock_poll_frame = 1;
+	wake_up(&sync->frame_q.wait);
+	spin_unlock_irqrestore(&sync->frame_q.lock, flags);
+	return 0;
+}
+
+static unsigned int __msm_poll_frame(struct msm_sync *sync,
+		struct file *filep,
+		struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	poll_wait(filep, &sync->frame_q.wait, pll_table);
+
+	spin_lock_irqsave(&sync->frame_q.lock, flags);
+	if (!list_empty_careful(&sync->frame_q.list))
+		/* frame ready */
+		rc = POLLIN | POLLRDNORM;
+	if (sync->unblock_poll_frame) {
+		CDBG("%s: sync->unblock_poll_frame is true\n", __func__);
+		rc |= POLLPRI;
+		sync->unblock_poll_frame = 0;
+	}
+	spin_unlock_irqrestore(&sync->frame_q.lock, flags);
+
+	return rc;
+}
+
+static unsigned int __msm_poll_pic(struct msm_sync *sync,
+		struct file *filep,
+		struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	poll_wait(filep, &sync->pict_q.wait , pll_table);
+	spin_lock_irqsave(&sync->abort_pict_lock, flags);
+	if (sync->get_pic_abort == 1) {
+		/* TODO: need to pass an error case */
+		sync->get_pic_abort = 0;
+	}
+	spin_unlock_irqrestore(&sync->abort_pict_lock, flags);
+
+	spin_lock_irqsave(&sync->pict_q.lock, flags);
+	if (!list_empty_careful(&sync->pict_q.list))
+		/* frame ready */
+		rc = POLLIN | POLLRDNORM;
+	if (sync->unblock_poll_pic_frame) {
+		CDBG("%s: sync->unblock_poll_pic_frame is true\n", __func__);
+		rc |= POLLPRI;
+		sync->unblock_poll_pic_frame = 0;
+	}
+	spin_unlock_irqrestore(&sync->pict_q.lock, flags);
+
+	return rc;
+}
+
+static unsigned int msm_poll_frame(struct file *filep,
+	struct poll_table_struct *pll_table)
+{
+	struct msm_cam_device *pmsm = filep->private_data;
+	return __msm_poll_frame(pmsm->sync, filep, pll_table);
+}
+
+static unsigned int msm_poll_pic(struct file *filep,
+	struct poll_table_struct *pll_table)
+{
+	struct msm_cam_device *pmsm = filep->private_data;
+	return __msm_poll_pic(pmsm->sync, filep, pll_table);
+}
+
+static unsigned int __msm_poll_config(struct msm_sync *sync,
+		struct file *filep,
+		struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	poll_wait(filep, &sync->event_q.wait, pll_table);
+
+	spin_lock_irqsave(&sync->event_q.lock, flags);
+	if (!list_empty_careful(&sync->event_q.list))
+		/* event ready */
+		rc = POLLIN | POLLRDNORM;
+	spin_unlock_irqrestore(&sync->event_q.lock, flags);
+
+	return rc;
+}
+
+static unsigned int msm_poll_config(struct file *filep,
+	struct poll_table_struct *pll_table)
+{
+	struct msm_cam_device *pmsm = filep->private_data;
+	return __msm_poll_config(pmsm->sync, filep, pll_table);
+}
+
+/*
+ * This function executes in interrupt context.
+ */
+
+static void *msm_vfe_sync_alloc(int size,
+			void *syncdata __attribute__((unused)),
+			gfp_t gfp)
+{
+	struct msm_queue_cmd *qcmd =
+		kzalloc(sizeof(struct msm_queue_cmd) + size, gfp);
+	if (qcmd) {
+		atomic_set(&qcmd->on_heap, 1);
+		return qcmd + 1;
+	}
+	return NULL;
+}
+
+static void *msm_vpe_sync_alloc(int size,
+			void *syncdata __attribute__((unused)),
+			gfp_t gfp)
+{
+	struct msm_queue_cmd *qcmd =
+		kzalloc(sizeof(struct msm_queue_cmd) + size, gfp);
+	if (qcmd) {
+		atomic_set(&qcmd->on_heap, 1);
+		return qcmd + 1;
+	}
+	return NULL;
+}
+
+static void msm_vfe_sync_free(void *ptr)
+{
+	if (ptr) {
+		struct msm_queue_cmd *qcmd =
+			(struct msm_queue_cmd *)ptr;
+		qcmd--;
+		if (atomic_read(&qcmd->on_heap))
+			kfree(qcmd);
+	}
+}
+
+static void msm_vpe_sync_free(void *ptr)
+{
+	if (ptr) {
+		struct msm_queue_cmd *qcmd =
+			(struct msm_queue_cmd *)ptr;
+		qcmd--;
+		if (atomic_read(&qcmd->on_heap))
+			kfree(qcmd);
+	}
+}
+
+/*
+ * This function executes in interrupt context.
+ */
+
+static void msm_vfe_sync(struct msm_vfe_resp *vdata,
+		enum msm_queue qtype, void *syncdata,
+		gfp_t gfp)
+{
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_sync *sync = (struct msm_sync *)syncdata;
+	unsigned long flags;
+
+	if (!sync) {
+		pr_err("%s: no context in dsp callback.\n", __func__);
+		return;
+	}
+
+	qcmd = ((struct msm_queue_cmd *)vdata) - 1;
+	qcmd->type = qtype;
+	qcmd->command = vdata;
+
+	ktime_get_ts(&(qcmd->ts));
+
+	if (qtype != MSM_CAM_Q_VFE_MSG)
+		goto vfe_for_config;
+
+	CDBG("%s: vdata->type %d\n", __func__, vdata->type);
+
+	switch (vdata->type) {
+	case VFE_MSG_OUTPUT_P:
+		if (sync->pp_mask & PP_PREV) {
+			CDBG("%s: PP_PREV in progress: phy_y %x phy_cbcr %x\n",
+				__func__,
+				vdata->phy.y_phy,
+				vdata->phy.cbcr_phy);
+			spin_lock_irqsave(&pp_prev_spinlock, flags);
+			if (sync->pp_prev)
+				CDBG("%s: overwriting pp_prev!\n",
+					__func__);
+			CDBG("%s: sending preview to config\n", __func__);
+			sync->pp_prev = qcmd;
+			spin_unlock_irqrestore(&pp_prev_spinlock, flags);
+			sync->pp_frame_avail = 1;
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			break;
+		}
+		CDBG("%s: msm_enqueue frame_q\n", __func__);
+		if (sync->stereocam_enabled)
+			CDBG("%s: Enqueue VFE_MSG_OUTPUT_P to event_q for "
+				"stereo processing\n", __func__);
+		else {
+			if (sync->frame_q.len <= 100 &&
+				sync->event_q.len <= 100) {
+				if (atomic_read(&qcmd->on_heap))
+					atomic_add(1, &qcmd->on_heap);
+				msm_enqueue(&sync->frame_q, &qcmd->list_frame);
+			} else {
+				pr_err("%s, Error Queue limit exceeded "
+					"f_q = %d, e_q = %d\n",	__func__,
+					sync->frame_q.len, sync->event_q.len);
+				free_qcmd(qcmd);
+				return;
+			}
+		}
+		break;
+
+	case VFE_MSG_OUTPUT_T:
+		if (sync->stereocam_enabled) {
+			spin_lock_irqsave(&pp_stereocam_spinlock, flags);
+
+			/* if out1/2 is currently in progress, save the qcmd
+			and issue only ionce the 1st one completes the 3D
+			pipeline */
+			if (STEREO_SNAP_BUFFER1_PROCESSING ==
+				sync->stereo_state) {
+				sync->pp_stereocam2 = qcmd;
+				spin_unlock_irqrestore(&pp_stereocam_spinlock,
+					flags);
+				if (atomic_read(&qcmd->on_heap))
+					atomic_add(1, &qcmd->on_heap);
+				CDBG("%s: snapshot stereo in progress\n",
+					__func__);
+				return;
+			}
+
+			if (sync->pp_stereocam)
+				CDBG("%s: overwriting pp_stereocam!\n",
+					__func__);
+
+			CDBG("%s: sending stereo frame to config\n", __func__);
+			sync->pp_stereocam = qcmd;
+			sync->stereo_state =
+				STEREO_SNAP_BUFFER1_PROCESSING;
+
+			spin_unlock_irqrestore(&pp_stereocam_spinlock, flags);
+
+			/* Increament on_heap by one because the same qcmd will
+			be used for VPE in msm_pp_release. */
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			CDBG("%s: Enqueue VFE_MSG_OUTPUT_T to event_q for "
+				"stereo processing.\n", __func__);
+			break;
+		}
+		if (sync->pp_mask & PP_SNAP) {
+			CDBG("%s: pp sending thumbnail to config\n",
+				__func__);
+		} else {
+			msm_enqueue(&sync->pict_q, &qcmd->list_pict);
+			return;
+		}
+
+	case VFE_MSG_OUTPUT_S:
+		if (sync->stereocam_enabled &&
+			sync->stereo_state != STEREO_RAW_SNAP_STARTED) {
+			spin_lock_irqsave(&pp_stereocam_spinlock, flags);
+
+			/* if out1/2 is currently in progress, save the qcmd
+			and issue only once the 1st one completes the 3D
+			pipeline */
+			if (STEREO_SNAP_BUFFER1_PROCESSING ==
+				sync->stereo_state) {
+				sync->pp_stereocam2 = qcmd;
+				spin_unlock_irqrestore(&pp_stereocam_spinlock,
+					flags);
+				if (atomic_read(&qcmd->on_heap))
+					atomic_add(1, &qcmd->on_heap);
+				CDBG("%s: snapshot stereo in progress\n",
+					__func__);
+				return;
+			}
+			if (sync->pp_stereocam)
+				CDBG("%s: overwriting pp_stereocam!\n",
+					__func__);
+
+			CDBG("%s: sending stereo frame to config\n", __func__);
+			sync->pp_stereocam = qcmd;
+			sync->stereo_state =
+				STEREO_SNAP_BUFFER1_PROCESSING;
+
+			spin_unlock_irqrestore(&pp_stereocam_spinlock, flags);
+
+			/* Increament on_heap by one because the same qcmd will
+			be used for VPE in msm_pp_release. */
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			CDBG("%s: Enqueue VFE_MSG_OUTPUT_S to event_q for "
+				"stereo processing.\n", __func__);
+			break;
+		}
+		if (sync->pp_mask & PP_SNAP) {
+			CDBG("%s: pp sending main image to config\n",
+				__func__);
+		} else {
+			CDBG("%s: enqueue to picture queue\n", __func__);
+			msm_enqueue(&sync->pict_q, &qcmd->list_pict);
+			return;
+		}
+		break;
+
+	case VFE_MSG_OUTPUT_V:
+		if (sync->stereocam_enabled) {
+			spin_lock_irqsave(&pp_stereocam_spinlock, flags);
+
+			if (sync->pp_stereocam)
+				CDBG("%s: overwriting pp_stereocam!\n",
+					__func__);
+
+			CDBG("%s: sending stereo frame to config\n", __func__);
+			sync->pp_stereocam = qcmd;
+
+			spin_unlock_irqrestore(&pp_stereocam_spinlock, flags);
+
+			/* Increament on_heap by one because the same qcmd will
+			be used for VPE in msm_pp_release. */
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			CDBG("%s: Enqueue VFE_MSG_OUTPUT_V to event_q for "
+				"stereo processing.\n", __func__);
+			break;
+		}
+		if (sync->vpefn.vpe_cfg_update) {
+			CDBG("dis_en = %d\n", *sync->vpefn.dis);
+			if (*(sync->vpefn.dis)) {
+				memset(&(vdata->vpe_bf), 0,
+					sizeof(vdata->vpe_bf));
+
+				if (sync->cropinfo != NULL)
+					vdata->vpe_bf.vpe_crop =
+				*(struct video_crop_t *)(sync->cropinfo);
+
+				vdata->vpe_bf.y_phy = vdata->phy.y_phy;
+				vdata->vpe_bf.cbcr_phy = vdata->phy.cbcr_phy;
+				vdata->vpe_bf.ts = (qcmd->ts);
+				vdata->vpe_bf.frame_id = vdata->phy.frame_id;
+				qcmd->command = vdata;
+				msm_enqueue_vpe(&sync->vpe_q,
+					&qcmd->list_vpe_frame);
+				return;
+			} else if (sync->vpefn.vpe_cfg_update(sync->cropinfo)) {
+				CDBG("%s: msm_enqueue video frame to vpe time "
+					"= %ld\n", __func__, qcmd->ts.tv_nsec);
+
+				sync->vpefn.send_frame_to_vpe(
+					vdata->phy.y_phy,
+					vdata->phy.cbcr_phy,
+					&(qcmd->ts), OUTPUT_TYPE_V);
+
+				free_qcmd(qcmd);
+				return;
+			} else {
+				CDBG("%s: msm_enqueue video frame_q\n",
+					__func__);
+				if (sync->liveshot_enabled) {
+					CDBG("%s: msm_enqueue liveshot\n",
+						__func__);
+					vdata->phy.output_id |= OUTPUT_TYPE_L;
+					sync->liveshot_enabled = false;
+				}
+				if (sync->frame_q.len <= 100 &&
+					sync->event_q.len <= 100) {
+						msm_enqueue(&sync->frame_q,
+							&qcmd->list_frame);
+				} else {
+					pr_err("%s, Error Queue limit exceeded\
+						f_q = %d, e_q = %d\n",
+						__func__, sync->frame_q.len,
+						sync->event_q.len);
+					free_qcmd(qcmd);
+				}
+
+				return;
+			}
+		} else {
+			CDBG("%s: msm_enqueue video frame_q\n",	__func__);
+			if (sync->frame_q.len <= 100 &&
+				sync->event_q.len <= 100) {
+				msm_enqueue(&sync->frame_q, &qcmd->list_frame);
+			} else {
+				pr_err("%s, Error Queue limit exceeded\
+					f_q = %d, e_q = %d\n",
+					__func__, sync->frame_q.len,
+					sync->event_q.len);
+				free_qcmd(qcmd);
+			}
+
+			return;
+		}
+
+	case VFE_MSG_SNAPSHOT:
+		if (sync->pp_mask & (PP_SNAP | PP_RAW_SNAP)) {
+			CDBG("%s: PP_SNAP in progress: pp_mask %x\n",
+				__func__, sync->pp_mask);
+		} else {
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			CDBG("%s: VFE_MSG_SNAPSHOT store\n",
+				__func__);
+			if (sync->stereocam_enabled &&
+				sync->stereo_state != STEREO_RAW_SNAP_STARTED) {
+				sync->pp_stereosnap = qcmd;
+				return;
+			}
+		}
+		break;
+
+	case VFE_MSG_COMMON:
+		CDBG("%s: qtype %d, comp stats, enqueue event_q.\n",
+			__func__, vdata->type);
+		break;
+
+	case VFE_MSG_GENERAL:
+		CDBG("%s: qtype %d, general msg, enqueue event_q.\n",
+			__func__, vdata->type);
+		break;
+
+	default:
+		CDBG("%s: qtype %d not handled\n", __func__, vdata->type);
+		/* fall through, send to config. */
+	}
+
+vfe_for_config:
+	CDBG("%s: msm_enqueue event_q\n", __func__);
+	if (sync->frame_q.len <= 100 && sync->event_q.len <= 100) {
+		msm_enqueue(&sync->event_q, &qcmd->list_config);
+	} else {
+		pr_err("%s, Error Queue limit exceeded f_q = %d, e_q = %d\n",
+			__func__, sync->frame_q.len, sync->event_q.len);
+		free_qcmd(qcmd);
+	}
+
+}
+
+static void msm_vpe_sync(struct msm_vpe_resp *vdata,
+	enum msm_queue qtype, void *syncdata, void *ts, gfp_t gfp)
+{
+	struct msm_queue_cmd *qcmd = NULL;
+	unsigned long flags;
+
+	struct msm_sync *sync = (struct msm_sync *)syncdata;
+	if (!sync) {
+		pr_err("%s: no context in dsp callback.\n", __func__);
+		return;
+	}
+
+	qcmd = ((struct msm_queue_cmd *)vdata) - 1;
+	qcmd->type = qtype;
+	qcmd->command = vdata;
+	qcmd->ts = *((struct timespec *)ts);
+
+	if (qtype != MSM_CAM_Q_VPE_MSG) {
+		pr_err("%s: Invalid qcmd type = %d.\n", __func__, qcmd->type);
+		free_qcmd(qcmd);
+		return;
+	}
+
+	CDBG("%s: vdata->type %d\n", __func__, vdata->type);
+	switch (vdata->type) {
+	case VPE_MSG_OUTPUT_V:
+		if (sync->liveshot_enabled) {
+			CDBG("%s: msm_enqueue liveshot %d\n", __func__,
+				sync->liveshot_enabled);
+			vdata->phy.output_id |= OUTPUT_TYPE_L;
+			sync->liveshot_enabled = false;
+		}
+		if (sync->frame_q.len <= 100 && sync->event_q.len <= 100) {
+			CDBG("%s: enqueue to frame_q from VPE\n", __func__);
+			msm_enqueue(&sync->frame_q, &qcmd->list_frame);
+		} else {
+			pr_err("%s, Error Queue limit exceeded f_q = %d, "
+				"e_q = %d\n", __func__, sync->frame_q.len,
+				sync->event_q.len);
+			free_qcmd(qcmd);
+		}
+		return;
+
+	case VPE_MSG_OUTPUT_ST_L:
+		CDBG("%s: enqueue left frame done msg to event_q from VPE\n",
+			__func__);
+		msm_enqueue(&sync->event_q, &qcmd->list_config);
+		return;
+
+	case VPE_MSG_OUTPUT_ST_R:
+		spin_lock_irqsave(&pp_stereocam_spinlock, flags);
+		CDBG("%s: received VPE_MSG_OUTPUT_ST_R state %d\n", __func__,
+			sync->stereo_state);
+
+		if (STEREO_SNAP_BUFFER1_PROCESSING == sync->stereo_state) {
+			msm_enqueue(&sync->pict_q, &qcmd->list_pict);
+			qcmd = sync->pp_stereocam2;
+			sync->pp_stereocam = sync->pp_stereocam2;
+			sync->pp_stereocam2 = NULL;
+			msm_enqueue(&sync->event_q, &qcmd->list_config);
+			sync->stereo_state =
+				STEREO_SNAP_BUFFER2_PROCESSING;
+		} else if (STEREO_SNAP_BUFFER2_PROCESSING ==
+				sync->stereo_state) {
+			sync->stereo_state = STEREO_SNAP_IDLE;
+			/* Send snapshot DONE */
+			msm_enqueue(&sync->pict_q, &qcmd->list_pict);
+			qcmd = sync->pp_stereosnap;
+			sync->pp_stereosnap = NULL;
+			CDBG("%s: send SNAPSHOT_DONE message\n", __func__);
+			msm_enqueue(&sync->event_q, &qcmd->list_config);
+		} else {
+			if (atomic_read(&qcmd->on_heap))
+				atomic_add(1, &qcmd->on_heap);
+			msm_enqueue(&sync->event_q, &qcmd->list_config);
+			if (sync->stereo_state == STEREO_VIDEO_ACTIVE) {
+				CDBG("%s: st frame to frame_q from VPE\n",
+					__func__);
+				msm_enqueue(&sync->frame_q, &qcmd->list_frame);
+			}
+		}
+		spin_unlock_irqrestore(&pp_stereocam_spinlock, flags);
+		return;
+
+	default:
+		pr_err("%s: qtype %d not handled\n", __func__, vdata->type);
+	}
+	pr_err("%s: Should not come here. Error.\n", __func__);
+}
+
+static struct msm_vpe_callback msm_vpe_s = {
+	.vpe_resp = msm_vpe_sync,
+	.vpe_alloc = msm_vpe_sync_alloc,
+	.vpe_free = msm_vpe_sync_free,
+};
+
+static struct msm_vfe_callback msm_vfe_s = {
+	.vfe_resp = msm_vfe_sync,
+	.vfe_alloc = msm_vfe_sync_alloc,
+	.vfe_free = msm_vfe_sync_free,
+};
+
+static int __msm_open(struct msm_cam_device *pmsm, const char *const apps_id,
+			int is_controlnode)
+{
+	int rc = 0;
+	struct msm_sync *sync = pmsm->sync;
+
+	mutex_lock(&sync->lock);
+	if (sync->apps_id && strcmp(sync->apps_id, apps_id)
+				&& (!strcmp(MSM_APPS_ID_V4L2, apps_id))) {
+		pr_err("%s(%s): sensor %s is already opened for %s\n",
+			__func__,
+			apps_id,
+			sync->sdata->sensor_name,
+			sync->apps_id);
+		rc = -EBUSY;
+		goto msm_open_done;
+	}
+
+	sync->apps_id = apps_id;
+
+	if (!sync->core_powered_on && !is_controlnode) {
+		wake_lock(&sync->wake_lock);
+
+		msm_camvfe_fn_init(&sync->vfefn, sync);
+		if (sync->vfefn.vfe_init) {
+			sync->pp_frame_avail = 0;
+			sync->get_pic_abort = 0;
+			rc = msm_camio_sensor_clk_on(sync->pdev);
+			if (rc < 0) {
+				pr_err("%s: setting sensor clocks failed: %d\n",
+					__func__, rc);
+				goto msm_open_err;
+			}
+			rc = sync->sctrl.s_init(sync->sdata);
+			if (rc < 0) {
+				pr_err("%s: sensor init failed: %d\n",
+					__func__, rc);
+				msm_camio_sensor_clk_off(sync->pdev);
+				goto msm_open_err;
+			}
+			rc = sync->vfefn.vfe_init(&msm_vfe_s,
+				sync->pdev);
+			if (rc < 0) {
+				pr_err("%s: vfe_init failed at %d\n",
+					__func__, rc);
+				sync->sctrl.s_release();
+				msm_camio_sensor_clk_off(sync->pdev);
+				goto msm_open_err;
+			}
+		} else {
+			pr_err("%s: no sensor init func\n", __func__);
+			rc = -ENODEV;
+			goto msm_open_err;
+		}
+		msm_camvpe_fn_init(&sync->vpefn, sync);
+
+		spin_lock_init(&sync->abort_pict_lock);
+		spin_lock_init(&pp_prev_spinlock);
+		spin_lock_init(&pp_stereocam_spinlock);
+		spin_lock_init(&st_frame_spinlock);
+		if (rc >= 0) {
+			msm_region_init(sync);
+			if (sync->vpefn.vpe_reg)
+				sync->vpefn.vpe_reg(&msm_vpe_s);
+			sync->unblock_poll_frame = 0;
+			sync->unblock_poll_pic_frame = 0;
+		}
+		sync->core_powered_on = 1;
+	}
+	sync->opencnt++;
+
+msm_open_done:
+	mutex_unlock(&sync->lock);
+	return rc;
+
+msm_open_err:
+	atomic_set(&pmsm->opened, 0);
+	mutex_unlock(&sync->lock);
+	return rc;
+}
+
+static int msm_open_common(struct inode *inode, struct file *filep,
+			int once, int is_controlnode)
+{
+	int rc;
+	struct msm_cam_device *pmsm =
+		container_of(inode->i_cdev, struct msm_cam_device, cdev);
+
+	CDBG("%s: open %s\n", __func__, filep->f_path.dentry->d_name.name);
+
+	if (atomic_cmpxchg(&pmsm->opened, 0, 1) && once) {
+		pr_err("%s: %s is already opened.\n",
+			__func__,
+			filep->f_path.dentry->d_name.name);
+		return -EBUSY;
+	}
+
+	rc = nonseekable_open(inode, filep);
+	if (rc < 0) {
+		pr_err("%s: nonseekable_open error %d\n", __func__, rc);
+		return rc;
+	}
+
+	rc = __msm_open(pmsm, MSM_APPS_ID_PROP, is_controlnode);
+	if (rc < 0)
+		return rc;
+	filep->private_data = pmsm;
+	CDBG("%s: rc %d\n", __func__, rc);
+	return rc;
+}
+
+static int msm_open(struct inode *inode, struct file *filep)
+{
+	return msm_open_common(inode, filep, 1, 0);
+}
+
+static int msm_open_control(struct inode *inode, struct file *filep)
+{
+	int rc;
+
+	struct msm_control_device *ctrl_pmsm =
+		kmalloc(sizeof(struct msm_control_device), GFP_KERNEL);
+	if (!ctrl_pmsm)
+		return -ENOMEM;
+
+	rc = msm_open_common(inode, filep, 0, 1);
+	if (rc < 0) {
+		kfree(ctrl_pmsm);
+		return rc;
+	}
+	ctrl_pmsm->pmsm = filep->private_data;
+	filep->private_data = ctrl_pmsm;
+
+	msm_queue_init(&ctrl_pmsm->ctrl_q, "control");
+
+	if (!g_v4l2_opencnt)
+		g_v4l2_control_device = ctrl_pmsm;
+	g_v4l2_opencnt++;
+	CDBG("%s: rc %d\n", __func__, rc);
+	return rc;
+}
+
+static const struct file_operations msm_fops_config = {
+	.owner = THIS_MODULE,
+	.open = msm_open,
+	.unlocked_ioctl = msm_ioctl_config,
+	.release = msm_release_config,
+	.poll = msm_poll_config,
+};
+
+static const struct file_operations msm_fops_control = {
+	.owner = THIS_MODULE,
+	.open = msm_open_control,
+	.unlocked_ioctl = msm_ioctl_control,
+	.release = msm_release_control,
+};
+
+static const struct file_operations msm_fops_frame = {
+	.owner = THIS_MODULE,
+	.open = msm_open,
+	.unlocked_ioctl = msm_ioctl_frame,
+	.release = msm_release_frame,
+	.poll = msm_poll_frame,
+};
+
+static const struct file_operations msm_fops_pic = {
+	.owner = THIS_MODULE,
+	.open = msm_open,
+	.unlocked_ioctl = msm_ioctl_pic,
+	.release = msm_release_pic,
+	.poll = msm_poll_pic,
+};
+
+static int msm_setup_cdev(struct msm_cam_device *msm,
+			int node,
+			dev_t devno,
+			const char *suffix,
+			const struct file_operations *fops)
+{
+	int rc = -ENODEV;
+
+	struct device *device =
+		device_create(msm_class, NULL,
+			devno, NULL,
+			"%s%d", suffix, node);
+
+	if (IS_ERR(device)) {
+		rc = PTR_ERR(device);
+		pr_err("%s: error creating device: %d\n", __func__, rc);
+		return rc;
+	}
+
+	cdev_init(&msm->cdev, fops);
+	msm->cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&msm->cdev, devno, 1);
+	if (rc < 0) {
+		pr_err("%s: error adding cdev: %d\n", __func__, rc);
+		device_destroy(msm_class, devno);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int msm_tear_down_cdev(struct msm_cam_device *msm, dev_t devno)
+{
+	cdev_del(&msm->cdev);
+	device_destroy(msm_class, devno);
+	return 0;
+}
+
+static int msm_sync_init(struct msm_sync *sync,
+		struct platform_device *pdev,
+		int (*sensor_probe)(const struct msm_camera_sensor_info *,
+				struct msm_sensor_ctrl *))
+{
+	int rc = 0;
+	struct msm_sensor_ctrl sctrl;
+	sync->sdata = pdev->dev.platform_data;
+
+	msm_queue_init(&sync->event_q, "event");
+	msm_queue_init(&sync->frame_q, "frame");
+	msm_queue_init(&sync->pict_q, "pict");
+	msm_queue_init(&sync->vpe_q, "vpe");
+
+	wake_lock_init(&sync->wake_lock, WAKE_LOCK_IDLE, "msm_camera");
+
+	rc = msm_camio_probe_on(pdev);
+	if (rc < 0) {
+		wake_lock_destroy(&sync->wake_lock);
+		return rc;
+	}
+	rc = sensor_probe(sync->sdata, &sctrl);
+	if (rc >= 0) {
+		sync->pdev = pdev;
+		sync->sctrl = sctrl;
+	}
+	msm_camio_probe_off(pdev);
+	if (rc < 0) {
+		pr_err("%s: failed to initialize %s\n",
+			__func__,
+			sync->sdata->sensor_name);
+		wake_lock_destroy(&sync->wake_lock);
+		return rc;
+	}
+
+	sync->opencnt = 0;
+	sync->core_powered_on = 0;
+	sync->ignore_qcmd = false;
+	sync->ignore_qcmd_type = -1;
+	mutex_init(&sync->lock);
+	if (sync->sdata->strobe_flash_data) {
+		sync->sdata->strobe_flash_data->state = 0;
+		spin_lock_init(&sync->sdata->strobe_flash_data->spin_lock);
+	}
+	CDBG("%s: initialized %s\n", __func__, sync->sdata->sensor_name);
+	return rc;
+}
+
+static int msm_sync_destroy(struct msm_sync *sync)
+{
+	wake_lock_destroy(&sync->wake_lock);
+	return 0;
+}
+
+static int msm_device_init(struct msm_cam_device *pmsm,
+		struct msm_sync *sync,
+		int node)
+{
+	int dev_num = 4 * node;
+	int rc = msm_setup_cdev(pmsm, node,
+		MKDEV(MAJOR(msm_devno), dev_num),
+		"control", &msm_fops_control);
+	if (rc < 0) {
+		pr_err("%s: error creating control node: %d\n", __func__, rc);
+		return rc;
+	}
+
+	rc = msm_setup_cdev(pmsm + 1, node,
+		MKDEV(MAJOR(msm_devno), dev_num + 1),
+		"config", &msm_fops_config);
+	if (rc < 0) {
+		pr_err("%s: error creating config node: %d\n", __func__, rc);
+		msm_tear_down_cdev(pmsm, MKDEV(MAJOR(msm_devno),
+				dev_num));
+		return rc;
+	}
+
+	rc = msm_setup_cdev(pmsm + 2, node,
+		MKDEV(MAJOR(msm_devno), dev_num + 2),
+		"frame", &msm_fops_frame);
+	if (rc < 0) {
+		pr_err("%s: error creating frame node: %d\n", __func__, rc);
+		msm_tear_down_cdev(pmsm,
+			MKDEV(MAJOR(msm_devno), dev_num));
+		msm_tear_down_cdev(pmsm + 1,
+			MKDEV(MAJOR(msm_devno), dev_num + 1));
+		return rc;
+	}
+
+	rc = msm_setup_cdev(pmsm + 3, node,
+		MKDEV(MAJOR(msm_devno), dev_num + 3),
+		"pic", &msm_fops_pic);
+	if (rc < 0) {
+		pr_err("%s: error creating pic node: %d\n", __func__, rc);
+		msm_tear_down_cdev(pmsm,
+			MKDEV(MAJOR(msm_devno), dev_num));
+		msm_tear_down_cdev(pmsm + 1,
+			MKDEV(MAJOR(msm_devno), dev_num + 1));
+		msm_tear_down_cdev(pmsm + 2,
+			MKDEV(MAJOR(msm_devno), dev_num + 2));
+		return rc;
+	}
+
+
+	atomic_set(&pmsm[0].opened, 0);
+	atomic_set(&pmsm[1].opened, 0);
+	atomic_set(&pmsm[2].opened, 0);
+	atomic_set(&pmsm[3].opened, 0);
+
+	pmsm[0].sync = sync;
+	pmsm[1].sync = sync;
+	pmsm[2].sync = sync;
+	pmsm[3].sync = sync;
+
+	return rc;
+}
+
+int msm_camera_drv_start(struct platform_device *dev,
+		int (*sensor_probe)(const struct msm_camera_sensor_info *,
+			struct msm_sensor_ctrl *))
+{
+	struct msm_cam_device *pmsm = NULL;
+	struct msm_sync *sync;
+	int rc = -ENODEV;
+
+	if (camera_node >= MSM_MAX_CAMERA_SENSORS) {
+		pr_err("%s: too many camera sensors\n", __func__);
+		return rc;
+	}
+
+	if (!msm_class) {
+		/* There are three device nodes per sensor */
+		rc = alloc_chrdev_region(&msm_devno, 0,
+				4 * MSM_MAX_CAMERA_SENSORS,
+				"msm_camera");
+		if (rc < 0) {
+			pr_err("%s: failed to allocate chrdev: %d\n", __func__,
+				rc);
+			return rc;
+		}
+
+		msm_class = class_create(THIS_MODULE, "msm_camera");
+		if (IS_ERR(msm_class)) {
+			rc = PTR_ERR(msm_class);
+			pr_err("%s: create device class failed: %d\n",
+				__func__, rc);
+			return rc;
+		}
+	}
+
+	pmsm = kzalloc(sizeof(struct msm_cam_device) * 4 +
+			sizeof(struct msm_sync), GFP_ATOMIC);
+	if (!pmsm)
+		return -ENOMEM;
+	sync = (struct msm_sync *)(pmsm + 4);
+
+	rc = msm_sync_init(sync, dev, sensor_probe);
+	if (rc < 0) {
+		kfree(pmsm);
+		return rc;
+	}
+
+	CDBG("%s: setting camera node %d\n", __func__, camera_node);
+	rc = msm_device_init(pmsm, sync, camera_node);
+	if (rc < 0) {
+		msm_sync_destroy(sync);
+		kfree(pmsm);
+		return rc;
+	}
+
+	camera_type[camera_node] = sync->sctrl.s_camera_type;
+	sensor_mount_angle[camera_node] = sync->sctrl.s_mount_angle;
+	camera_node++;
+
+	list_add(&sync->list, &msm_sensors);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_drv_start);
diff --git a/drivers/media/video/msm/msm_gemini_common.h b/drivers/media/video/msm/msm_gemini_common.h
new file mode 100644
index 0000000..0ddedc5
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_common.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_COMMON_H
+#define MSM_GEMINI_COMMON_H
+
+#define MSM_GEMINI_DEBUG
+#ifdef MSM_GEMINI_DEBUG
+#define GMN_DBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define GMN_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define GMN_PR_ERR   pr_err
+
+enum GEMINI_MODE {
+	GEMINI_MODE_DISABLE,
+	GEMINI_MODE_OFFLINE,
+	GEMINI_MODE_REALTIME,
+	GEMINI_MODE_REALTIME_ROTATION
+};
+
+enum GEMINI_ROTATION {
+	GEMINI_ROTATION_0,
+	GEMINI_ROTATION_90,
+	GEMINI_ROTATION_180,
+	GEMINI_ROTATION_270
+};
+
+#endif /* MSM_GEMINI_COMMON_H */
diff --git a/drivers/media/video/msm/msm_gemini_core.c b/drivers/media/video/msm/msm_gemini_core.c
new file mode 100644
index 0000000..58c2e7c
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_core.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include "msm_gemini_hw.h"
+#include "msm_gemini_core.h"
+#include "msm_gemini_platform.h"
+#include "msm_gemini_common.h"
+
+static struct msm_gemini_hw_pingpong fe_pingpong_buf;
+static struct msm_gemini_hw_pingpong we_pingpong_buf;
+static int we_pingpong_index;
+static int reset_done_ack;
+static spinlock_t reset_lock;
+static wait_queue_head_t reset_wait;
+
+int msm_gemini_core_reset(uint8_t op_mode, void *base, int size)
+{
+	unsigned long flags;
+	int rc = 0;
+	int tm = 500; /*500ms*/
+	memset(&fe_pingpong_buf, 0, sizeof(fe_pingpong_buf));
+	fe_pingpong_buf.is_fe = 1;
+	we_pingpong_index = 0;
+	memset(&we_pingpong_buf, 0, sizeof(we_pingpong_buf));
+	spin_lock_irqsave(&reset_lock, flags);
+	reset_done_ack = 0;
+	msm_gemini_hw_reset(base, size);
+	spin_unlock_irqrestore(&reset_lock, flags);
+	rc = wait_event_interruptible_timeout(
+			reset_wait,
+			reset_done_ack,
+			msecs_to_jiffies(tm));
+
+	if (!reset_done_ack) {
+		GMN_DBG("%s: reset ACK failed %d", __func__, rc);
+		return -EBUSY;
+	}
+
+	GMN_DBG("%s: reset_done_ack rc %d", __func__, rc);
+	spin_lock_irqsave(&reset_lock, flags);
+	reset_done_ack = 0;
+	spin_unlock_irqrestore(&reset_lock, flags);
+
+	if (op_mode == MSM_GEMINI_MODE_REALTIME_ENCODE) {
+		/* Nothing needed for fe buffer cfg, config we only */
+		msm_gemini_hw_we_buffer_cfg(1);
+	} else {
+		/* Nothing needed for fe buffer cfg, config we only */
+		msm_gemini_hw_we_buffer_cfg(0);
+	}
+
+	/* @todo wait for reset done irq */
+
+	return 0;
+}
+
+void msm_gemini_core_release(int release_buf)
+{
+	int i = 0;
+	for (i = 0; i < 2; i++) {
+		if (we_pingpong_buf.buf_status[i] && release_buf)
+			msm_gemini_platform_p2v(we_pingpong_buf.buf[i].file);
+		we_pingpong_buf.buf_status[i] = 0;
+	}
+}
+
+void msm_gemini_core_init(void)
+{
+	init_waitqueue_head(&reset_wait);
+	spin_lock_init(&reset_lock);
+}
+
+int msm_gemini_core_fe_start(void)
+{
+	msm_gemini_hw_fe_start();
+	return 0;
+}
+
+/* fetch engine */
+int msm_gemini_core_fe_buf_update(struct msm_gemini_core_buf *buf)
+{
+	GMN_DBG("%s:%d] 0x%08x %d 0x%08x %d\n", __func__, __LINE__,
+		(int) buf->y_buffer_addr, buf->y_len,
+		(int) buf->cbcr_buffer_addr, buf->cbcr_len);
+	return msm_gemini_hw_pingpong_update(&fe_pingpong_buf, buf);
+}
+
+void *msm_gemini_core_fe_pingpong_irq(int gemini_irq_status, void *context)
+{
+	return msm_gemini_hw_pingpong_irq(&fe_pingpong_buf);
+}
+
+/* write engine */
+int msm_gemini_core_we_buf_update(struct msm_gemini_core_buf *buf)
+{
+	int rc;
+	GMN_DBG("%s:%d] 0x%08x 0x%08x %d\n", __func__, __LINE__,
+		(int) buf->y_buffer_addr, (int) buf->cbcr_buffer_addr,
+		buf->y_len);
+	we_pingpong_buf.buf_status[we_pingpong_index] = 0;
+	we_pingpong_index = (we_pingpong_index + 1)%2;
+	rc = msm_gemini_hw_pingpong_update(&we_pingpong_buf, buf);
+	return 0;
+}
+
+int msm_gemini_core_we_buf_reset(struct msm_gemini_hw_buf *buf)
+{
+	int i = 0;
+	for (i = 0; i < 2; i++) {
+		if (we_pingpong_buf.buf[i].y_buffer_addr
+					== buf->y_buffer_addr)
+			we_pingpong_buf.buf_status[i] = 0;
+	}
+	return 0;
+}
+
+void *msm_gemini_core_we_pingpong_irq(int gemini_irq_status, void *context)
+{
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+
+	return msm_gemini_hw_pingpong_irq(&we_pingpong_buf);
+}
+
+void *msm_gemini_core_framedone_irq(int gemini_irq_status, void *context)
+{
+	struct msm_gemini_hw_buf *buf_p;
+
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+
+	buf_p = msm_gemini_hw_pingpong_active_buffer(&we_pingpong_buf);
+	if (buf_p) {
+		buf_p->framedone_len = msm_gemini_hw_encode_output_size();
+		GMN_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__,
+			buf_p->framedone_len);
+	}
+
+	return buf_p;
+}
+
+void *msm_gemini_core_reset_ack_irq(int gemini_irq_status, void *context)
+{
+	/* @todo return the status back to msm_gemini_core_reset */
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+	return NULL;
+}
+
+void *msm_gemini_core_err_irq(int gemini_irq_status, void *context)
+{
+	GMN_PR_ERR("%s:%d]\n", __func__, gemini_irq_status);
+	return NULL;
+}
+
+static int (*msm_gemini_irq_handler) (int, void *, void *);
+
+irqreturn_t msm_gemini_core_irq(int irq_num, void *context)
+{
+	void *data = NULL;
+	unsigned long flags;
+	int gemini_irq_status;
+
+	GMN_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num);
+
+	spin_lock_irqsave(&reset_lock, flags);
+	reset_done_ack = 1;
+	spin_unlock_irqrestore(&reset_lock, flags);
+	gemini_irq_status = msm_gemini_hw_irq_get_status();
+
+	GMN_DBG("%s:%d] gemini_irq_status = %0x\n", __func__, __LINE__,
+		gemini_irq_status);
+
+	/*For reset and framedone IRQs, clear all bits*/
+	if (gemini_irq_status & 0x400) {
+		wake_up(&reset_wait);
+		msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK,
+			JPEG_IRQ_CLEAR_ALL);
+	} else if (gemini_irq_status & 0x1) {
+		msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK,
+			JPEG_IRQ_CLEAR_ALL);
+	} else {
+		msm_gemini_hw_irq_clear(HWIO_JPEG_IRQ_CLEAR_RMSK,
+			gemini_irq_status);
+	}
+
+	if (msm_gemini_hw_irq_is_frame_done(gemini_irq_status)) {
+		data = msm_gemini_core_framedone_irq(gemini_irq_status,
+			context);
+		if (msm_gemini_irq_handler)
+			msm_gemini_irq_handler(
+				MSM_GEMINI_HW_MASK_COMP_FRAMEDONE,
+				context, data);
+	}
+
+	if (msm_gemini_hw_irq_is_fe_pingpong(gemini_irq_status)) {
+		data = msm_gemini_core_fe_pingpong_irq(gemini_irq_status,
+			context);
+		if (msm_gemini_irq_handler)
+			msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_FE,
+				context, data);
+	}
+
+	if (msm_gemini_hw_irq_is_we_pingpong(gemini_irq_status) &&
+	    !msm_gemini_hw_irq_is_frame_done(gemini_irq_status)) {
+		data = msm_gemini_core_we_pingpong_irq(gemini_irq_status,
+			context);
+		if (msm_gemini_irq_handler)
+			msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_WE,
+				context, data);
+	}
+
+	if (msm_gemini_hw_irq_is_reset_ack(gemini_irq_status)) {
+		data = msm_gemini_core_reset_ack_irq(gemini_irq_status,
+			context);
+		if (msm_gemini_irq_handler)
+			msm_gemini_irq_handler(
+				MSM_GEMINI_HW_MASK_COMP_RESET_ACK,
+				context, data);
+	}
+
+	/* Unexpected/unintended HW interrupt */
+	if (msm_gemini_hw_irq_is_err(gemini_irq_status)) {
+		data = msm_gemini_core_err_irq(gemini_irq_status, context);
+		if (msm_gemini_irq_handler)
+			msm_gemini_irq_handler(MSM_GEMINI_HW_MASK_COMP_ERR,
+				context, data);
+	}
+
+	return IRQ_HANDLED;
+}
+
+void msm_gemini_core_irq_install(int (*irq_handler) (int, void *, void *))
+{
+	msm_gemini_irq_handler = irq_handler;
+}
+
+void msm_gemini_core_irq_remove(void)
+{
+	msm_gemini_irq_handler = NULL;
+}
diff --git a/drivers/media/video/msm/msm_gemini_core.h b/drivers/media/video/msm/msm_gemini_core.h
new file mode 100644
index 0000000..f240505
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_core.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_CORE_H
+#define MSM_GEMINI_CORE_H
+
+#include <linux/interrupt.h>
+#include "msm_gemini_hw.h"
+
+#define msm_gemini_core_buf msm_gemini_hw_buf
+
+irqreturn_t msm_gemini_core_irq(int irq_num, void *context);
+
+void msm_gemini_core_irq_install(int (*irq_handler) (int, void *, void *));
+void msm_gemini_core_irq_remove(void);
+
+int msm_gemini_core_fe_buf_update(struct msm_gemini_core_buf *buf);
+int msm_gemini_core_we_buf_update(struct msm_gemini_core_buf *buf);
+int msm_gemini_core_we_buf_reset(struct msm_gemini_hw_buf *buf);
+
+int msm_gemini_core_reset(uint8_t op_mode, void *base, int size);
+int msm_gemini_core_fe_start(void);
+
+void msm_gemini_core_release(int);
+void msm_gemini_core_init(void);
+#endif /* MSM_GEMINI_CORE_H */
diff --git a/drivers/media/video/msm/msm_gemini_dev.c b/drivers/media/video/msm/msm_gemini_dev.c
new file mode 100644
index 0000000..1156bb0
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_dev.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <mach/board.h>
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+
+#include <media/msm_gemini.h>
+#include "msm_gemini_sync.h"
+#include "msm_gemini_common.h"
+
+#define MSM_GEMINI_NAME "gemini"
+
+static int msm_gemini_open(struct inode *inode, struct file *filp)
+{
+	int rc;
+
+	struct msm_gemini_device *pgmn_dev = container_of(inode->i_cdev,
+		struct msm_gemini_device, cdev);
+	filp->private_data = pgmn_dev;
+
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+
+	rc = __msm_gemini_open(pgmn_dev);
+
+	GMN_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+		filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+
+	return rc;
+}
+
+static int msm_gemini_release(struct inode *inode, struct file *filp)
+{
+	int rc;
+
+	struct msm_gemini_device *pgmn_dev = filp->private_data;
+
+	GMN_DBG(KERN_INFO "%s:%d]\n", __func__, __LINE__);
+
+	rc = __msm_gemini_release(pgmn_dev);
+
+	GMN_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+		filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+	return rc;
+}
+
+static long msm_gemini_ioctl(struct file *filp, unsigned int cmd,
+	unsigned long arg)
+{
+	int rc;
+	struct msm_gemini_device *pgmn_dev = filp->private_data;
+
+	GMN_DBG(KERN_INFO "%s:%d] cmd = %d\n", __func__, __LINE__,
+		_IOC_NR(cmd));
+
+	rc = __msm_gemini_ioctl(pgmn_dev, cmd, arg);
+
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+	return rc;
+}
+
+static const struct file_operations msm_gemini_fops = {
+	.owner	  = THIS_MODULE,
+	.open	   = msm_gemini_open,
+	.release	= msm_gemini_release,
+	.unlocked_ioctl = msm_gemini_ioctl,
+};
+
+static struct class *msm_gemini_class;
+static dev_t msm_gemini_devno;
+static struct msm_gemini_device *msm_gemini_device_p;
+
+static int msm_gemini_init(struct platform_device *pdev)
+{
+	int rc = -1;
+	struct device *dev;
+
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+
+	msm_gemini_device_p = __msm_gemini_init(pdev);
+	if (msm_gemini_device_p == NULL) {
+		GMN_PR_ERR("%s: initialization failed\n", __func__);
+		goto fail;
+	}
+
+	rc = alloc_chrdev_region(&msm_gemini_devno, 0, 1, MSM_GEMINI_NAME);
+	if (rc < 0) {
+		GMN_PR_ERR("%s: failed to allocate chrdev\n", __func__);
+		goto fail_1;
+	}
+
+	if (!msm_gemini_class) {
+		msm_gemini_class = class_create(THIS_MODULE, MSM_GEMINI_NAME);
+		if (IS_ERR(msm_gemini_class)) {
+			rc = PTR_ERR(msm_gemini_class);
+			GMN_PR_ERR("%s: create device class failed\n",
+				__func__);
+			goto fail_2;
+		}
+	}
+
+	dev = device_create(msm_gemini_class, NULL,
+		MKDEV(MAJOR(msm_gemini_devno), MINOR(msm_gemini_devno)), NULL,
+		"%s%d", MSM_GEMINI_NAME, 0);
+
+	if (IS_ERR(dev)) {
+		GMN_PR_ERR("%s: error creating device\n", __func__);
+		rc = -ENODEV;
+		goto fail_3;
+	}
+
+	cdev_init(&msm_gemini_device_p->cdev, &msm_gemini_fops);
+	msm_gemini_device_p->cdev.owner = THIS_MODULE;
+	msm_gemini_device_p->cdev.ops   =
+		(const struct file_operations *) &msm_gemini_fops;
+	rc = cdev_add(&msm_gemini_device_p->cdev, msm_gemini_devno, 1);
+	if (rc < 0) {
+		GMN_PR_ERR("%s: error adding cdev\n", __func__);
+		rc = -ENODEV;
+		goto fail_4;
+	}
+
+	GMN_DBG("%s %s: success\n", __func__, MSM_GEMINI_NAME);
+
+	return rc;
+
+fail_4:
+	device_destroy(msm_gemini_class, msm_gemini_devno);
+
+fail_3:
+	class_destroy(msm_gemini_class);
+
+fail_2:
+	unregister_chrdev_region(msm_gemini_devno, 1);
+
+fail_1:
+	__msm_gemini_exit(msm_gemini_device_p);
+
+fail:
+	return rc;
+}
+
+static void msm_gemini_exit(void)
+{
+	cdev_del(&msm_gemini_device_p->cdev);
+	device_destroy(msm_gemini_class, msm_gemini_devno);
+	class_destroy(msm_gemini_class);
+	unregister_chrdev_region(msm_gemini_devno, 1);
+
+	__msm_gemini_exit(msm_gemini_device_p);
+}
+
+static int __msm_gemini_probe(struct platform_device *pdev)
+{
+	int rc;
+	rc = msm_gemini_init(pdev);
+	return rc;
+}
+
+static int __msm_gemini_remove(struct platform_device *pdev)
+{
+	msm_gemini_exit();
+	return 0;
+}
+
+static struct platform_driver msm_gemini_driver = {
+	.probe  = __msm_gemini_probe,
+	.remove = __msm_gemini_remove,
+	.driver = {
+		.name = "msm_gemini",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_gemini_driver_init(void)
+{
+	int rc;
+	rc = platform_driver_register(&msm_gemini_driver);
+	return rc;
+}
+
+static void __exit msm_gemini_driver_exit(void)
+{
+	platform_driver_unregister(&msm_gemini_driver);
+}
+
+MODULE_DESCRIPTION("msm gemini jpeg driver");
+MODULE_VERSION("msm gemini 0.1");
+
+module_init(msm_gemini_driver_init);
+module_exit(msm_gemini_driver_exit);
+
diff --git a/drivers/media/video/msm/msm_gemini_hw.c b/drivers/media/video/msm/msm_gemini_hw.c
new file mode 100644
index 0000000..ba8f353
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_hw.c
@@ -0,0 +1,525 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "msm_gemini_hw.h"
+#include "msm_gemini_common.h"
+
+#include <linux/io.h>
+
+static void *gemini_region_base;
+static uint32_t gemini_region_size;
+
+int msm_gemini_hw_pingpong_update(struct msm_gemini_hw_pingpong *pingpong_hw,
+	struct msm_gemini_hw_buf *buf)
+{
+	int buf_free_index = -1;
+
+	if (!pingpong_hw->buf_status[0]) {
+		buf_free_index = 0;
+	} else if (!pingpong_hw->buf_status[1]) {
+		buf_free_index = 1;
+	} else {
+		GMN_PR_ERR("%s:%d: pingpong buffer busy\n", __func__, __LINE__);
+		return -1;
+	}
+
+	pingpong_hw->buf[buf_free_index] = *buf;
+	pingpong_hw->buf_status[buf_free_index] = 1;
+
+	if (pingpong_hw->is_fe) {
+		/* it is fe */
+		msm_gemini_hw_fe_buffer_update(
+			&pingpong_hw->buf[buf_free_index], buf_free_index);
+	} else {
+		/* it is we */
+		msm_gemini_hw_we_buffer_update(
+			&pingpong_hw->buf[buf_free_index], buf_free_index);
+	}
+	return 0;
+}
+
+void *msm_gemini_hw_pingpong_irq(struct msm_gemini_hw_pingpong *pingpong_hw)
+{
+	struct msm_gemini_hw_buf *buf_p = NULL;
+
+	if (pingpong_hw->buf_status[pingpong_hw->buf_active_index]) {
+		buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+		pingpong_hw->buf_status[pingpong_hw->buf_active_index] = 0;
+	}
+
+	pingpong_hw->buf_active_index = !pingpong_hw->buf_active_index;
+
+	return (void *) buf_p;
+}
+
+void *msm_gemini_hw_pingpong_active_buffer(
+	struct msm_gemini_hw_pingpong *pingpong_hw)
+{
+	struct msm_gemini_hw_buf *buf_p = NULL;
+
+	if (pingpong_hw->buf_status[pingpong_hw->buf_active_index])
+		buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+
+	return (void *) buf_p;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_irq_get_status[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_READ, 1, HWIO_JPEG_IRQ_STATUS_ADDR,
+		HWIO_JPEG_IRQ_STATUS_RMSK, {0} },
+};
+
+int msm_gemini_hw_irq_get_status(void)
+{
+	uint32_t n_irq_status = 0;
+	rmb();
+	n_irq_status = msm_gemini_hw_read(&hw_cmd_irq_get_status[0]);
+	rmb();
+	return n_irq_status;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_encode_output_size[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_READ, 1,
+		HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_ADDR,
+		HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK, {0} },
+};
+
+long msm_gemini_hw_encode_output_size(void)
+{
+	uint32_t encode_output_size = 0;
+
+	encode_output_size = msm_gemini_hw_read(&hw_cmd_encode_output_size[0]);
+
+	return encode_output_size;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_irq_clear[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_IRQ_CLEAR_ADDR,
+		HWIO_JPEG_IRQ_CLEAR_RMSK, {JPEG_IRQ_CLEAR_ALL} },
+};
+
+void msm_gemini_hw_irq_clear(uint32_t mask, uint32_t data)
+{
+	GMN_DBG("%s:%d] mask %0x data %0x", __func__, __LINE__, mask, data);
+	hw_cmd_irq_clear[0].mask = mask;
+	hw_cmd_irq_clear[0].data = data;
+	msm_gemini_hw_write(&hw_cmd_irq_clear[0]);
+}
+
+struct msm_gemini_hw_cmd hw_cmd_fe_ping_update[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_BUFFER_CFG_ADDR,
+		HWIO_JPEG_FE_BUFFER_CFG_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_Y_PING_ADDR_ADDR,
+		HWIO_JPEG_FE_Y_PING_ADDR_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_CBCR_PING_ADDR_ADDR,
+		HWIO_JPEG_FE_CBCR_PING_ADDR_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_CMD_ADDR,
+		HWIO_JPEG_FE_CMD_RMSK, {JPEG_FE_CMD_BUFFERRELOAD} },
+};
+
+struct msm_gemini_hw_cmd hw_cmd_fe_pong_update[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_BUFFER_CFG_ADDR,
+		HWIO_JPEG_FE_BUFFER_CFG_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_Y_PONG_ADDR_ADDR,
+		HWIO_JPEG_FE_Y_PONG_ADDR_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_CBCR_PONG_ADDR_ADDR,
+		HWIO_JPEG_FE_CBCR_PONG_ADDR_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_CMD_ADDR,
+		HWIO_JPEG_FE_CMD_RMSK, {JPEG_FE_CMD_BUFFERRELOAD} },
+};
+
+void msm_gemini_hw_fe_buffer_update(struct msm_gemini_hw_buf *p_input,
+	uint8_t pingpong_index)
+{
+	uint32_t n_reg_val = 0;
+
+	struct msm_gemini_hw_cmd *hw_cmd_p;
+
+	if (pingpong_index == 0) {
+		hw_cmd_p = &hw_cmd_fe_ping_update[0];
+		n_reg_val = ((((p_input->num_of_mcu_rows - 1) <<
+			HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_SHFT) &
+			HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_BMSK) |
+			(((p_input->num_of_mcu_rows - 1) <<
+			HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_SHFT) &
+			HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_BMSK));
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = ((p_input->y_buffer_addr <<
+			HWIO_JPEG_FE_Y_PING_ADDR_FE_Y_PING_START_ADDR_SHFT) &
+			HWIO_JPEG_FE_Y_PING_ADDR_FE_Y_PING_START_ADDR_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = ((p_input->cbcr_buffer_addr<<
+		HWIO_JPEG_FE_CBCR_PING_ADDR_FE_CBCR_PING_START_ADDR_SHFT) &
+		HWIO_JPEG_FE_CBCR_PING_ADDR_FE_CBCR_PING_START_ADDR_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		msm_gemini_hw_write(hw_cmd_p);
+	} else if (pingpong_index == 1) {
+		hw_cmd_p = &hw_cmd_fe_pong_update[0];
+		n_reg_val = ((((p_input->num_of_mcu_rows - 1) <<
+			HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_SHFT) &
+			HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_BMSK) |
+			(((p_input->num_of_mcu_rows - 1) <<
+			HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_SHFT) &
+			HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_BMSK));
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = ((p_input->y_buffer_addr <<
+			HWIO_JPEG_FE_Y_PONG_ADDR_FE_Y_PONG_START_ADDR_SHFT) &
+			HWIO_JPEG_FE_Y_PONG_ADDR_FE_Y_PONG_START_ADDR_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = ((p_input->cbcr_buffer_addr<<
+		HWIO_JPEG_FE_CBCR_PONG_ADDR_FE_CBCR_PONG_START_ADDR_SHFT) &
+		HWIO_JPEG_FE_CBCR_PONG_ADDR_FE_CBCR_PONG_START_ADDR_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		msm_gemini_hw_write(hw_cmd_p);
+	} else {
+		/* shall not get to here */
+	}
+
+	return;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_fe_start[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_FE_CMD_ADDR,
+		HWIO_JPEG_FE_CMD_RMSK, {JPEG_OFFLINE_CMD_START} },
+};
+
+void msm_gemini_hw_fe_start(void)
+{
+	msm_gemini_hw_write(&hw_cmd_fe_start[0]);
+
+	return;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_we_buffer_cfg[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_THRESHOLD_ADDR,
+		HWIO_JPEG_WE_Y_THRESHOLD_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_UB_CFG_ADDR,
+		HWIO_JPEG_WE_Y_UB_CFG_RMSK, {JPEG_WE_YUB_ENCODE} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_CBCR_THRESHOLD_ADDR,
+		HWIO_JPEG_WE_CBCR_THRESHOLD_RMSK, {0} },
+};
+
+/* first dimension is WE_ASSERT_STALL_TH and WE_DEASSERT_STALL_TH
+   second dimension is for offline and real-time settings
+ */
+static const uint32_t GEMINI_WE_Y_THRESHOLD[2][2] = {
+	{ 0x00000190, 0x000001ff },
+	{ 0x0000016a, 0x000001ff }
+};
+
+/* first dimension is WE_ASSERT_STALL_TH and WE_DEASSERT_STALL_TH
+   second dimension is for offline and real-time settings
+ */
+static const uint32_t GEMINI_WE_CBCR_THRESHOLD[2][2] = {
+	{ 0x00000190, 0x000001ff },
+	{ 0x0000016a, 0x000001ff }
+};
+
+void msm_gemini_hw_we_buffer_cfg(uint8_t is_realtime)
+{
+	uint32_t              n_reg_val = 0;
+
+	struct msm_gemini_hw_cmd *hw_cmd_p = &hw_cmd_we_buffer_cfg[0];
+
+	n_reg_val = (((GEMINI_WE_Y_THRESHOLD[1][is_realtime] <<
+		HWIO_JPEG_WE_Y_THRESHOLD_WE_DEASSERT_STALL_TH_SHFT) &
+		HWIO_JPEG_WE_Y_THRESHOLD_WE_DEASSERT_STALL_TH_BMSK) |
+		((GEMINI_WE_Y_THRESHOLD[0][is_realtime] <<
+		HWIO_JPEG_WE_Y_THRESHOLD_WE_ASSERT_STALL_TH_SHFT) &
+		HWIO_JPEG_WE_Y_THRESHOLD_WE_ASSERT_STALL_TH_BMSK));
+	hw_cmd_p->data = n_reg_val;
+	msm_gemini_hw_write(hw_cmd_p++);
+
+	msm_gemini_hw_write(hw_cmd_p++);
+
+	/* @todo maybe not for realtime? */
+	n_reg_val = (((GEMINI_WE_CBCR_THRESHOLD[1][is_realtime] <<
+		HWIO_JPEG_WE_CBCR_THRESHOLD_WE_DEASSERT_STALL_TH_SHFT) &
+		HWIO_JPEG_WE_CBCR_THRESHOLD_WE_DEASSERT_STALL_TH_BMSK) |
+		((GEMINI_WE_CBCR_THRESHOLD[0][is_realtime] <<
+		HWIO_JPEG_WE_CBCR_THRESHOLD_WE_ASSERT_STALL_TH_SHFT) &
+		HWIO_JPEG_WE_CBCR_THRESHOLD_WE_ASSERT_STALL_TH_BMSK));
+	hw_cmd_p->data = n_reg_val;
+	msm_gemini_hw_write(hw_cmd_p);
+
+	return;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_we_ping_update[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_PING_BUFFER_CFG_ADDR,
+		HWIO_JPEG_WE_Y_PING_BUFFER_CFG_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_PING_ADDR_ADDR,
+		HWIO_JPEG_WE_Y_PING_ADDR_RMSK, {0} },
+};
+
+struct msm_gemini_hw_cmd hw_cmd_we_pong_update[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_ADDR,
+		HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_RMSK, {0} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_WE_Y_PONG_ADDR_ADDR,
+		HWIO_JPEG_WE_Y_PONG_ADDR_RMSK, {0} },
+};
+
+void msm_gemini_hw_we_buffer_update(struct msm_gemini_hw_buf *p_input,
+	uint8_t pingpong_index)
+{
+	uint32_t n_reg_val = 0;
+
+	struct msm_gemini_hw_cmd *hw_cmd_p;
+
+	if (pingpong_index == 0) {
+		hw_cmd_p = &hw_cmd_we_ping_update[0];
+
+		n_reg_val = ((p_input->y_len <<
+			HWIO_JPEG_WE_Y_PING_BUFFER_CFG_WE_BUFFER_LENGTH_SHFT) &
+			HWIO_JPEG_WE_Y_PING_BUFFER_CFG_WE_BUFFER_LENGTH_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = p_input->y_buffer_addr;
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+	} else if (pingpong_index == 1) {
+		hw_cmd_p = &hw_cmd_we_pong_update[0];
+
+		n_reg_val = ((p_input->y_len <<
+			HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_WE_BUFFER_LENGTH_SHFT) &
+			HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_WE_BUFFER_LENGTH_BMSK);
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+
+		n_reg_val = p_input->y_buffer_addr;
+		hw_cmd_p->data = n_reg_val;
+		msm_gemini_hw_write(hw_cmd_p++);
+	} else {
+		/* shall not get to here */
+	}
+
+	return;
+}
+
+struct msm_gemini_hw_cmd hw_cmd_reset[] = {
+	/* type, repeat n times, offset, mask, data or pdata */
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_IRQ_MASK_ADDR,
+		HWIO_JPEG_IRQ_MASK_RMSK, {JPEG_IRQ_DISABLE_ALL} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_IRQ_CLEAR_ADDR,
+		HWIO_JPEG_IRQ_MASK_RMSK, {JPEG_IRQ_CLEAR_ALL} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_IRQ_MASK_ADDR,
+		HWIO_JPEG_IRQ_MASK_RMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+	{MSM_GEMINI_HW_CMD_TYPE_WRITE, 1, HWIO_JPEG_RESET_CMD_ADDR,
+		HWIO_JPEG_RESET_CMD_RMSK, {JPEG_RESET_DEFAULT} },
+};
+
+void msm_gemini_hw_init(void *base, int size)
+{
+	gemini_region_base = base;
+	gemini_region_size = size;
+}
+
+void msm_gemini_hw_reset(void *base, int size)
+{
+	struct msm_gemini_hw_cmd *hw_cmd_p;
+
+	hw_cmd_p = &hw_cmd_reset[0];
+
+	wmb();
+	msm_gemini_hw_write(hw_cmd_p++);
+	msm_gemini_hw_write(hw_cmd_p++);
+	msm_gemini_hw_write(hw_cmd_p++);
+	msm_gemini_hw_write(hw_cmd_p);
+	wmb();
+
+	return;
+}
+
+uint32_t msm_gemini_hw_read(struct msm_gemini_hw_cmd *hw_cmd_p)
+{
+	uint32_t *paddr;
+	uint32_t data;
+
+	paddr = gemini_region_base + hw_cmd_p->offset;
+
+	data = readl(paddr);
+	data &= hw_cmd_p->mask;
+
+	GMN_DBG("%s:%d] type-%d n-%d offset-0x%4x mask-0x%8x data-0x%8x\n",
+		__func__, __LINE__, hw_cmd_p->type, hw_cmd_p->n,
+		hw_cmd_p->offset, hw_cmd_p->mask, data);
+	return data;
+}
+
+void msm_gemini_hw_write(struct msm_gemini_hw_cmd *hw_cmd_p)
+{
+	uint32_t *paddr;
+	uint32_t old_data, new_data;
+
+	/* type, repeat n times, offset, mask, data or pdata */
+	GMN_DBG("%s:%d] type-%d n-%d offset-0x%4x mask-0x%8x data-0x%8x\n",
+		__func__, __LINE__, hw_cmd_p->type, hw_cmd_p->n,
+		hw_cmd_p->offset, hw_cmd_p->mask, hw_cmd_p->data);
+
+	paddr = gemini_region_base + hw_cmd_p->offset;
+
+	if (hw_cmd_p->mask == 0xffffffff) {
+		old_data = 0;
+	} else {
+		old_data = readl(paddr);
+		old_data &= ~hw_cmd_p->mask;
+	}
+
+	new_data = hw_cmd_p->data & hw_cmd_p->mask;
+	new_data |= old_data;
+	writel(new_data, paddr);
+}
+
+int msm_gemini_hw_wait(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us)
+{
+	int tm = hw_cmd_p->n;
+	uint32_t data;
+	uint32_t wait_data = hw_cmd_p->data & hw_cmd_p->mask;
+
+	data = msm_gemini_hw_read(hw_cmd_p);
+	if (data != wait_data) {
+		while (tm) {
+			udelay(m_us);
+			data = msm_gemini_hw_read(hw_cmd_p);
+			if (data == wait_data)
+				break;
+			tm--;
+		}
+	}
+	hw_cmd_p->data = data;
+	return tm;
+}
+
+void msm_gemini_hw_delay(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us)
+{
+	int tm = hw_cmd_p->n;
+	while (tm) {
+		udelay(m_us);
+		tm--;
+	}
+}
+
+int msm_gemini_hw_exec_cmds(struct msm_gemini_hw_cmd *hw_cmd_p, int m_cmds)
+{
+	int is_copy_to_user = -1;
+	uint32_t data;
+
+	while (m_cmds--) {
+		if (hw_cmd_p->offset > gemini_region_size) {
+			GMN_PR_ERR("%s:%d] %d exceed hw region %d\n", __func__,
+				__LINE__, hw_cmd_p->offset, gemini_region_size);
+			return -EFAULT;
+		}
+
+		switch (hw_cmd_p->type) {
+		case MSM_GEMINI_HW_CMD_TYPE_READ:
+			hw_cmd_p->data = msm_gemini_hw_read(hw_cmd_p);
+			is_copy_to_user = 1;
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_WRITE:
+			msm_gemini_hw_write(hw_cmd_p);
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_WRITE_OR:
+			data = msm_gemini_hw_read(hw_cmd_p);
+			hw_cmd_p->data = (hw_cmd_p->data & hw_cmd_p->mask) |
+				data;
+			msm_gemini_hw_write(hw_cmd_p);
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_UWAIT:
+			msm_gemini_hw_wait(hw_cmd_p, 1);
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_MWAIT:
+			msm_gemini_hw_wait(hw_cmd_p, 1000);
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_UDELAY:
+			msm_gemini_hw_delay(hw_cmd_p, 1);
+			break;
+
+		case MSM_GEMINI_HW_CMD_TYPE_MDELAY:
+			msm_gemini_hw_delay(hw_cmd_p, 1000);
+			break;
+
+		default:
+			GMN_PR_ERR("wrong hw command type\n");
+			break;
+		}
+
+		hw_cmd_p++;
+	}
+	return is_copy_to_user;
+}
+
+void msm_gemini_hw_region_dump(int size)
+{
+	uint32_t *p;
+	uint8_t *p8;
+
+	if (size > gemini_region_size)
+		GMN_PR_ERR("%s:%d] wrong region dump size\n",
+			__func__, __LINE__);
+
+	p = (uint32_t *) gemini_region_base;
+	while (size >= 16) {
+		GMN_DBG("0x%08X] %08X %08X %08X %08X\n",
+			gemini_region_size - size,
+			readl(p), readl(p+1), readl(p+2), readl(p+3));
+		p += 4;
+		size -= 16;
+	}
+
+	if (size > 0) {
+		uint32_t d;
+		GMN_DBG("0x%08X] ", gemini_region_size - size);
+		while (size >= 4) {
+			GMN_DBG("%08X ", readl(p++));
+			size -= 4;
+		}
+
+		d = readl(p);
+		p8 = (uint8_t *) &d;
+		while (size) {
+			GMN_DBG("%02X", *p8++);
+			size--;
+		}
+
+		GMN_DBG("\n");
+	}
+}
+
diff --git a/drivers/media/video/msm/msm_gemini_hw.h b/drivers/media/video/msm/msm_gemini_hw.h
new file mode 100644
index 0000000..ee1eac3
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_hw.h
@@ -0,0 +1,101 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_HW_H
+#define MSM_GEMINI_HW_H
+
+#include <media/msm_gemini.h>
+#include "msm_gemini_hw_reg.h"
+
+struct msm_gemini_hw_buf {
+	struct msm_gemini_buf vbuf;
+	struct file  *file;
+	uint32_t framedone_len;
+	uint32_t y_buffer_addr;
+	uint32_t y_len;
+	uint32_t cbcr_buffer_addr;
+	uint32_t cbcr_len;
+	uint32_t num_of_mcu_rows;
+};
+
+struct msm_gemini_hw_pingpong {
+	uint8_t is_fe; /* 1: fe; 0: we */
+	struct  msm_gemini_hw_buf buf[2];
+	int     buf_status[2];
+	int     buf_active_index;
+};
+
+int msm_gemini_hw_pingpong_update(struct msm_gemini_hw_pingpong *pingpong_hw,
+	struct msm_gemini_hw_buf *buf);
+void *msm_gemini_hw_pingpong_irq(struct msm_gemini_hw_pingpong *pingpong_hw);
+void *msm_gemini_hw_pingpong_active_buffer(struct msm_gemini_hw_pingpong
+	*pingpong_hw);
+
+void msm_gemini_hw_irq_clear(uint32_t, uint32_t);
+int msm_gemini_hw_irq_get_status(void);
+long msm_gemini_hw_encode_output_size(void);
+#define MSM_GEMINI_HW_MASK_COMP_FRAMEDONE \
+		MSM_GEMINI_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define MSM_GEMINI_HW_MASK_COMP_FE \
+		MSM_GEMINI_HW_IRQ_STATUS_FE_RD_DONE_MASK
+#define MSM_GEMINI_HW_MASK_COMP_WE \
+		(MSM_GEMINI_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK | \
+		 MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK)
+#define MSM_GEMINI_HW_MASK_COMP_RESET_ACK \
+		MSM_GEMINI_HW_IRQ_STATUS_RESET_ACK_MASK
+#define MSM_GEMINI_HW_MASK_COMP_ERR \
+		(MSM_GEMINI_HW_IRQ_STATUS_FE_RTOVF_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_FE_VFE_OVERFLOW_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_WE_Y_BUFFER_OVERFLOW_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_BUFFER_OVERFLOW_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_WE_CH0_DATAFIFO_OVERFLOW_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_WE_CH1_DATAFIFO_OVERFLOW_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_BUS_ERROR_MASK | \
+		MSM_GEMINI_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define msm_gemini_hw_irq_is_frame_done(gemini_irq_status) \
+	(gemini_irq_status & MSM_GEMINI_HW_MASK_COMP_FRAMEDONE)
+#define msm_gemini_hw_irq_is_fe_pingpong(gemini_irq_status) \
+	(gemini_irq_status & MSM_GEMINI_HW_MASK_COMP_FE)
+#define msm_gemini_hw_irq_is_we_pingpong(gemini_irq_status) \
+	(gemini_irq_status & MSM_GEMINI_HW_MASK_COMP_WE)
+#define msm_gemini_hw_irq_is_reset_ack(gemini_irq_status) \
+	(gemini_irq_status & MSM_GEMINI_HW_MASK_COMP_RESET_ACK)
+#define msm_gemini_hw_irq_is_err(gemini_irq_status) \
+	(gemini_irq_status & MSM_GEMINI_HW_MASK_COMP_ERR)
+
+void msm_gemini_hw_fe_buffer_update(struct msm_gemini_hw_buf *p_input,
+	uint8_t pingpong_index);
+void msm_gemini_hw_we_buffer_update(struct msm_gemini_hw_buf *p_input,
+	uint8_t pingpong_index);
+
+void msm_gemini_hw_we_buffer_cfg(uint8_t is_realtime);
+
+void msm_gemini_hw_fe_start(void);
+void msm_gemini_hw_clk_cfg(void);
+
+void msm_gemini_hw_reset(void *base, int size);
+void msm_gemini_hw_irq_cfg(void);
+void msm_gemini_hw_init(void *base, int size);
+
+uint32_t msm_gemini_hw_read(struct msm_gemini_hw_cmd *hw_cmd_p);
+void msm_gemini_hw_write(struct msm_gemini_hw_cmd *hw_cmd_p);
+int msm_gemini_hw_wait(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us);
+void msm_gemini_hw_delay(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us);
+int msm_gemini_hw_exec_cmds(struct msm_gemini_hw_cmd *hw_cmd_p, int m_cmds);
+void msm_gemini_hw_region_dump(int size);
+
+#define MSM_GEMINI_PIPELINE_CLK_128MHZ 128 /* 8MP  128MHz */
+#define MSM_GEMINI_PIPELINE_CLK_140MHZ 140 /* 9MP  140MHz */
+#define MSM_GEMINI_PIPELINE_CLK_200MHZ 153 /* 12MP 153MHz */
+
+#endif /* MSM_GEMINI_HW_H */
diff --git a/drivers/media/video/msm/msm_gemini_hw_reg.h b/drivers/media/video/msm/msm_gemini_hw_reg.h
new file mode 100644
index 0000000..4bddfbb
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_hw_reg.h
@@ -0,0 +1,176 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_HW_REG_H
+#define MSM_GEMINI_HW_REG_H
+
+#define GEMINI_REG_BASE 0
+
+#define MSM_GEMINI_HW_IRQ_MASK_ADDR 0x00000014
+#define MSM_GEMINI_HW_IRQ_MASK_RMSK 0xffffffff
+#define MSM_GEMINI_HW_IRQ_MASK_SHFT 0
+#define MSM_GEMINI_HW_IRQ_DISABLE 0
+#define MSM_GEMINI_HW_IRQ_ENABLE 0xffffffff
+
+#define MSM_GEMINI_HW_IRQ_CLEAR_ADDR 0x00000018
+#define MSM_GEMINI_HW_IRQ_CLEAR_RMSK 0xffffffff
+#define MSM_GEMINI_HW_IRQ_CLEAR_SHFT 0
+#define MSM_GEMINI_HW_IRQ_CLEAR  0xffffffff
+
+#define MSM_GEMINI_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define MSM_GEMINI_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_RD_DONE_MASK 0x00000002
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_RD_DONE_SHIFT 0x00000001
+
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_RTOVF_MASK 0x00000004
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_RTOVF_SHIFT 0x00000002
+
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_VFE_OVERFLOW_MASK 0x00000008
+#define MSM_GEMINI_HW_IRQ_STATUS_FE_VFE_OVERFLOW_SHIFT 0x00000003
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK 0x00000010
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_Y_PINGPONG_SHIFT 0x00000004
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK 0x00000020
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_PINGPONG_SHIFT 0x00000005
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_Y_BUFFER_OVERFLOW_MASK 0x00000040
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_Y_BUFFER_OVERFLOW_SHIFT 0x00000006
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_BUFFER_OVERFLOW_MASK 0x00000080
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CBCR_BUFFER_OVERFLOW_SHIFT 0x00000007
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CH0_DATAFIFO_OVERFLOW_MASK 0x00000100
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CH0_DATAFIFO_OVERFLOW_SHIFT 0x00000008
+
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CH1_DATAFIFO_OVERFLOW_MASK 0x00000200
+#define MSM_GEMINI_HW_IRQ_STATUS_WE_CH1_DATAFIFO_OVERFLOW_SHIFT 0x00000009
+
+#define MSM_GEMINI_HW_IRQ_STATUS_RESET_ACK_MASK 0x00000400
+#define MSM_GEMINI_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define MSM_GEMINI_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define MSM_GEMINI_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define MSM_GEMINI_HW_IRQ_STATUS_VIOLATION_MASK 0x00001000
+#define MSM_GEMINI_HW_IRQ_STATUS_VIOLATION_SHIFT 0x0000000c
+
+#define JPEG_BUS_CMD_HALT_REQ 0x00000001
+
+#define JPEG_REALTIME_CMD_STOP_FB 0x00000000
+#define JPEG_REALTIME_CMD_STOP_IM 0x00000003
+#define JPEG_REALTIME_CMD_START 0x00000001
+
+#define JPEG_OFFLINE_CMD_START 0x00000003
+
+#define JPEG_DMI_CFG_DISABLE 0x00000000
+#define JPEG_DMI_ADDR_START 0x00000000
+
+#define JPEG_FE_CMD_BUFFERRELOAD 0x00000001
+
+#define JPEG_WE_YUB_ENCODE 0x01ff0000
+
+#define JPEG_RESET_DEFAULT 0x0004ffff /* cfff? */
+
+#define JPEG_IRQ_DISABLE_ALL 0x00000000
+#define JPEG_IRQ_CLEAR_ALL 0xffffffff
+#define JPEG_IRQ_ALLSOURCES_ENABLE 0xffffffff
+
+#define HWIO_JPEG_FE_BUFFER_CFG_ADDR (GEMINI_REG_BASE + 0x00000080)
+#define HWIO_JPEG_FE_BUFFER_CFG_RMSK 0x1fff1fff
+
+#define HWIO_JPEG_FE_Y_PING_ADDR_ADDR (GEMINI_REG_BASE + 0x00000084)
+#define HWIO_JPEG_FE_Y_PING_ADDR_RMSK 0xffffffff
+
+#define HWIO_JPEG_FE_Y_PONG_ADDR_ADDR (GEMINI_REG_BASE + 0x00000088)
+#define HWIO_JPEG_FE_Y_PONG_ADDR_RMSK 0xffffffff
+
+#define HWIO_JPEG_FE_CBCR_PING_ADDR_ADDR (GEMINI_REG_BASE + 0x0000008c)
+#define HWIO_JPEG_FE_CBCR_PING_ADDR_RMSK 0xffffffff
+
+#define HWIO_JPEG_FE_CBCR_PONG_ADDR_ADDR (GEMINI_REG_BASE + 0x00000090)
+#define HWIO_JPEG_FE_CBCR_PONG_ADDR_RMSK 0xffffffff
+
+#define HWIO_JPEG_FE_CMD_ADDR (GEMINI_REG_BASE + 0x00000094)
+#define HWIO_JPEG_FE_CMD_RMSK 0x3
+
+#define HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_BMSK 0x1fff0000
+#define HWIO_JPEG_FE_BUFFER_CFG_CBCR_MCU_ROWS_SHFT 0x10
+#define HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_BMSK 0x1fff
+#define HWIO_JPEG_FE_BUFFER_CFG_Y_MCU_ROWS_SHFT 0
+
+#define HWIO_JPEG_FE_Y_PING_ADDR_FE_Y_PING_START_ADDR_BMSK 0xffffffff
+#define HWIO_JPEG_FE_Y_PING_ADDR_FE_Y_PING_START_ADDR_SHFT 0
+
+#define HWIO_JPEG_FE_CBCR_PING_ADDR_FE_CBCR_PING_START_ADDR_BMSK 0xffffffff
+#define HWIO_JPEG_FE_CBCR_PING_ADDR_FE_CBCR_PING_START_ADDR_SHFT 0
+
+#define HWIO_JPEG_FE_Y_PONG_ADDR_FE_Y_PONG_START_ADDR_BMSK 0xffffffff
+#define HWIO_JPEG_FE_Y_PONG_ADDR_FE_Y_PONG_START_ADDR_SHFT 0
+
+#define HWIO_JPEG_FE_CBCR_PONG_ADDR_FE_CBCR_PONG_START_ADDR_BMSK 0xffffffff
+#define HWIO_JPEG_FE_CBCR_PONG_ADDR_FE_CBCR_PONG_START_ADDR_SHFT 0
+
+#define HWIO_JPEG_WE_Y_THRESHOLD_ADDR (GEMINI_REG_BASE + 0x000000c0)
+#define HWIO_JPEG_WE_Y_THRESHOLD_RMSK 0x1ff01ff
+
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_ADDR (GEMINI_REG_BASE      + 0x000000c4)
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_RMSK 0x1ff01ff
+
+#define HWIO_JPEG_WE_Y_UB_CFG_ADDR (GEMINI_REG_BASE + 0x000000e8)
+#define HWIO_JPEG_WE_Y_UB_CFG_RMSK 0x1ff01ff
+
+#define HWIO_JPEG_WE_Y_THRESHOLD_WE_DEASSERT_STALL_TH_BMSK 0x1ff0000
+#define HWIO_JPEG_WE_Y_THRESHOLD_WE_DEASSERT_STALL_TH_SHFT 0x10
+#define HWIO_JPEG_WE_Y_THRESHOLD_WE_ASSERT_STALL_TH_BMSK 0x1ff
+#define HWIO_JPEG_WE_Y_THRESHOLD_WE_ASSERT_STALL_TH_SHFT 0
+
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_WE_DEASSERT_STALL_TH_BMSK 0x1ff0000
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_WE_DEASSERT_STALL_TH_SHFT 0x10
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_WE_ASSERT_STALL_TH_BMSK 0x1ff
+#define HWIO_JPEG_WE_CBCR_THRESHOLD_WE_ASSERT_STALL_TH_SHFT 0
+
+#define HWIO_JPEG_WE_Y_PING_BUFFER_CFG_ADDR (GEMINI_REG_BASE + 0x000000c8)
+#define HWIO_JPEG_WE_Y_PING_BUFFER_CFG_RMSK 0x7fffff
+
+#define HWIO_JPEG_WE_Y_PING_ADDR_ADDR (GEMINI_REG_BASE + 0x000000d8)
+#define HWIO_JPEG_WE_Y_PING_ADDR_RMSK 0xfffffff8
+
+#define HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_ADDR (GEMINI_REG_BASE + 0x000000cc)
+#define HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_RMSK 0x7fffff
+
+#define HWIO_JPEG_WE_Y_PONG_ADDR_ADDR (GEMINI_REG_BASE + 0x000000dc)
+#define HWIO_JPEG_WE_Y_PONG_ADDR_RMSK 0xfffffff8
+
+#define HWIO_JPEG_WE_Y_PING_BUFFER_CFG_WE_BUFFER_LENGTH_BMSK 0x7fffff
+#define HWIO_JPEG_WE_Y_PING_BUFFER_CFG_WE_BUFFER_LENGTH_SHFT 0
+
+#define HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_WE_BUFFER_LENGTH_BMSK 0x7fffff
+#define HWIO_JPEG_WE_Y_PONG_BUFFER_CFG_WE_BUFFER_LENGTH_SHFT 0
+
+#define HWIO_JPEG_IRQ_MASK_ADDR (GEMINI_REG_BASE + 0x00000014)
+#define HWIO_JPEG_IRQ_MASK_RMSK 0xffffffff
+
+#define HWIO_JPEG_IRQ_CLEAR_ADDR (GEMINI_REG_BASE + 0x00000018)
+#define HWIO_JPEG_IRQ_CLEAR_RMSK 0xffffffff
+
+#define HWIO_JPEG_RESET_CMD_ADDR (GEMINI_REG_BASE + 0x00000004)
+#define HWIO_JPEG_RESET_CMD_RMSK 0xe004ffff
+
+#define HWIO_JPEG_IRQ_STATUS_ADDR (GEMINI_REG_BASE + 0x0000001c)
+#define HWIO_JPEG_IRQ_STATUS_RMSK 0xffffffff
+
+#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_ADDR (GEMINI_REG_BASE + 0x00000034)
+#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK 0xffffff
+
+#endif /* MSM_GEMINI_HW_REG_H */
diff --git a/drivers/media/video/msm/msm_gemini_platform.c b/drivers/media/video/msm/msm_gemini_platform.c
new file mode 100644
index 0000000..140d5d0
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_platform.c
@@ -0,0 +1,154 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pm_qos_params.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <linux/io.h>
+#include <linux/android_pmem.h>
+#include <mach/msm_reqs.h>
+#include <mach/camera.h>
+
+#include "msm_gemini_platform.h"
+#include "msm_gemini_common.h"
+#include "msm_gemini_hw.h"
+
+#ifdef CONFIG_MSM_NPA_SYSTEM_BUS
+/* NPA Flow ID */
+#define MSM_SYSTEM_BUS_RATE	MSM_AXI_FLOW_JPEG_12MP
+#else
+/* AXI rate in KHz */
+#define MSM_SYSTEM_BUS_RATE	160000
+#endif
+
+void msm_gemini_platform_p2v(struct file  *file)
+{
+#ifdef CONFIG_ANDROID_PMEM
+	put_pmem_file(file);
+#endif
+}
+
+uint32_t msm_gemini_platform_v2p(int fd, uint32_t len, struct file **file_p)
+{
+	unsigned long paddr;
+	unsigned long size;
+	int rc;
+
+#ifdef CONFIG_ANDROID_PMEM
+	unsigned long kvstart;
+	rc = get_pmem_file(fd, &paddr, &kvstart, &size, file_p);
+#else
+	rc = 0;
+	paddr = 0;
+	size = 0;
+#endif
+	if (rc < 0) {
+		GMN_PR_ERR("%s: get_pmem_file fd %d error %d\n", __func__, fd,
+			rc);
+		return 0;
+	}
+
+	/* validate user input */
+	if (len > size) {
+		GMN_PR_ERR("%s: invalid offset + len\n", __func__);
+		return 0;
+	}
+
+	return paddr;
+}
+
+int msm_gemini_platform_init(struct platform_device *pdev,
+	struct resource **mem,
+	void **base,
+	int *irq,
+	irqreturn_t (*handler) (int, void *),
+	void *context)
+{
+	int rc = -1;
+	int gemini_irq;
+	struct resource *gemini_mem, *gemini_io, *gemini_irq_res;
+	void *gemini_base;
+
+	gemini_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!gemini_mem) {
+		GMN_PR_ERR("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	gemini_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!gemini_irq_res) {
+		GMN_PR_ERR("no irq resource?\n");
+		return -ENODEV;
+	}
+	gemini_irq = gemini_irq_res->start;
+
+	gemini_io = request_mem_region(gemini_mem->start,
+		resource_size(gemini_mem), pdev->name);
+	if (!gemini_io) {
+		GMN_PR_ERR("%s: region already claimed\n", __func__);
+		return -EBUSY;
+	}
+
+	gemini_base = ioremap(gemini_mem->start, resource_size(gemini_mem));
+	if (!gemini_base) {
+		rc = -ENOMEM;
+		GMN_PR_ERR("%s: ioremap failed\n", __func__);
+		goto fail1;
+	}
+
+	rc = msm_camio_jpeg_clk_enable();
+	if (rc) {
+		GMN_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
+		goto fail2;
+	}
+
+	msm_gemini_hw_init(gemini_base, resource_size(gemini_mem));
+	rc = request_irq(gemini_irq, handler, IRQF_TRIGGER_RISING, "gemini",
+		context);
+	if (rc) {
+		GMN_PR_ERR("%s: request_irq failed, %d\n", __func__,
+			gemini_irq);
+		goto fail3;
+	}
+
+	*mem  = gemini_mem;
+	*base = gemini_base;
+	*irq  = gemini_irq;
+	GMN_DBG("%s:%d] success\n", __func__, __LINE__);
+
+	return rc;
+
+fail3:
+	msm_camio_jpeg_clk_disable();
+fail2:
+	iounmap(gemini_base);
+fail1:
+	release_mem_region(gemini_mem->start, resource_size(gemini_mem));
+	GMN_DBG("%s:%d] fail\n", __func__, __LINE__);
+	return rc;
+}
+
+int msm_gemini_platform_release(struct resource *mem, void *base, int irq,
+	void *context)
+{
+	int result;
+
+	free_irq(irq, context);
+	result = msm_camio_jpeg_clk_disable();
+	iounmap(base);
+	release_mem_region(mem->start, resource_size(mem));
+
+	GMN_DBG("%s:%d] success\n", __func__, __LINE__);
+	return result;
+}
+
diff --git a/drivers/media/video/msm/msm_gemini_platform.h b/drivers/media/video/msm/msm_gemini_platform.h
new file mode 100644
index 0000000..49b1db6
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_platform.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_PLATFORM_H
+#define MSM_GEMINI_PLATFORM_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+void msm_gemini_platform_p2v(struct file  *file);
+uint32_t msm_gemini_platform_v2p(int fd, uint32_t len, struct file **file);
+
+int msm_gemini_platform_clk_enable(void);
+int msm_gemini_platform_clk_disable(void);
+
+int msm_gemini_platform_init(struct platform_device *pdev,
+	struct resource **mem,
+	void **base,
+	int *irq,
+	irqreturn_t (*handler) (int, void *),
+	void *context);
+int msm_gemini_platform_release(struct resource *mem, void *base, int irq,
+	void *context);
+
+#endif /* MSM_GEMINI_PLATFORM_H */
diff --git a/drivers/media/video/msm/msm_gemini_sync.c b/drivers/media/video/msm/msm_gemini_sync.c
new file mode 100644
index 0000000..2ad0467
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_sync.c
@@ -0,0 +1,839 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <media/msm_gemini.h>
+#include "msm_gemini_sync.h"
+#include "msm_gemini_core.h"
+#include "msm_gemini_platform.h"
+#include "msm_gemini_common.h"
+
+static int release_buf;
+
+/*************** queue helper ****************/
+inline void msm_gemini_q_init(char const *name, struct msm_gemini_q *q_p)
+{
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, name);
+	q_p->name = name;
+	spin_lock_init(&q_p->lck);
+	INIT_LIST_HEAD(&q_p->q);
+	init_waitqueue_head(&q_p->wait);
+	q_p->unblck = 0;
+}
+
+inline void *msm_gemini_q_out(struct msm_gemini_q *q_p)
+{
+	unsigned long flags;
+	struct msm_gemini_q_entry *q_entry_p = NULL;
+	void *data = NULL;
+
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+	spin_lock_irqsave(&q_p->lck, flags);
+	if (!list_empty(&q_p->q)) {
+		q_entry_p = list_first_entry(&q_p->q, struct msm_gemini_q_entry,
+			list);
+		list_del_init(&q_entry_p->list);
+	}
+	spin_unlock_irqrestore(&q_p->lck, flags);
+
+	if (q_entry_p) {
+		data = q_entry_p->data;
+		kfree(q_entry_p);
+	} else {
+		GMN_DBG("%s:%d] %s no entry\n", __func__, __LINE__,
+			q_p->name);
+	}
+
+	return data;
+}
+
+inline int msm_gemini_q_in(struct msm_gemini_q *q_p, void *data)
+{
+	unsigned long flags;
+
+	struct msm_gemini_q_entry *q_entry_p;
+
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+
+	q_entry_p = kmalloc(sizeof(struct msm_gemini_q_entry), GFP_ATOMIC);
+	if (!q_entry_p) {
+		GMN_PR_ERR("%s: no mem\n", __func__);
+		return -1;
+	}
+	q_entry_p->data = data;
+
+	spin_lock_irqsave(&q_p->lck, flags);
+	list_add_tail(&q_entry_p->list, &q_p->q);
+	spin_unlock_irqrestore(&q_p->lck, flags);
+
+	return 0;
+}
+
+inline int msm_gemini_q_in_buf(struct msm_gemini_q *q_p,
+	struct msm_gemini_core_buf *buf)
+{
+	struct msm_gemini_core_buf *buf_p;
+
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+	buf_p = kmalloc(sizeof(struct msm_gemini_core_buf), GFP_ATOMIC);
+	if (!buf_p) {
+		GMN_PR_ERR("%s: no mem\n", __func__);
+		return -1;
+	}
+
+	memcpy(buf_p, buf, sizeof(struct msm_gemini_core_buf));
+
+	msm_gemini_q_in(q_p, buf_p);
+	return 0;
+}
+
+inline int msm_gemini_q_wait(struct msm_gemini_q *q_p)
+{
+	int tm = MAX_SCHEDULE_TIMEOUT; /* 500ms */
+	int rc;
+
+	GMN_DBG("%s:%d] %s wait\n", __func__, __LINE__, q_p->name);
+	rc = wait_event_interruptible_timeout(q_p->wait,
+		(!list_empty_careful(&q_p->q) || q_p->unblck),
+		msecs_to_jiffies(tm));
+	GMN_DBG("%s:%d] %s wait done\n", __func__, __LINE__, q_p->name);
+	if (list_empty_careful(&q_p->q)) {
+		if (rc == 0) {
+			rc = -ETIMEDOUT;
+			GMN_PR_ERR("%s:%d] %s timeout\n", __func__, __LINE__,
+				q_p->name);
+		} else if (q_p->unblck) {
+			GMN_DBG("%s:%d] %s unblock is true\n", __func__,
+				__LINE__, q_p->name);
+			q_p->unblck = 0;
+			rc = -ECANCELED;
+		} else if (rc < 0) {
+			GMN_PR_ERR("%s:%d] %s rc %d\n", __func__, __LINE__,
+				q_p->name, rc);
+		}
+	}
+	return rc;
+}
+
+inline int msm_gemini_q_wakeup(struct msm_gemini_q *q_p)
+{
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+	wake_up(&q_p->wait);
+	return 0;
+}
+
+inline int msm_gemini_q_unblock(struct msm_gemini_q *q_p)
+{
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+	q_p->unblck = 1;
+	wake_up(&q_p->wait);
+	return 0;
+}
+
+inline void msm_gemini_outbuf_q_cleanup(struct msm_gemini_q *q_p)
+{
+	struct msm_gemini_core_buf *buf_p;
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+	do {
+		buf_p = msm_gemini_q_out(q_p);
+		if (buf_p) {
+			msm_gemini_platform_p2v(buf_p->file);
+			GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+			kfree(buf_p);
+		}
+	} while (buf_p);
+	q_p->unblck = 0;
+}
+
+inline void msm_gemini_q_cleanup(struct msm_gemini_q *q_p)
+{
+	void *data;
+	GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+	do {
+		data = msm_gemini_q_out(q_p);
+		if (data) {
+			GMN_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+			kfree(data);
+		}
+	} while (data);
+	q_p->unblck = 0;
+}
+
+/*************** event queue ****************/
+
+int msm_gemini_framedone_irq(struct msm_gemini_device *pgmn_dev,
+	struct msm_gemini_core_buf *buf_in)
+{
+	int rc = 0;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+	if (buf_in) {
+		buf_in->vbuf.framedone_len = buf_in->framedone_len;
+		buf_in->vbuf.type = MSM_GEMINI_EVT_FRAMEDONE;
+		GMN_DBG("%s:%d] 0x%08x %d framedone_len %d\n",
+			__func__, __LINE__,
+			(int) buf_in->y_buffer_addr, buf_in->y_len,
+			buf_in->vbuf.framedone_len);
+		rc = msm_gemini_q_in_buf(&pgmn_dev->evt_q, buf_in);
+	} else {
+		GMN_PR_ERR("%s:%d] no output return buffer\n",
+			__func__, __LINE__);
+		rc = -1;
+	}
+
+	if (buf_in)
+		rc = msm_gemini_q_wakeup(&pgmn_dev->evt_q);
+
+	return rc;
+}
+
+int msm_gemini_evt_get(struct msm_gemini_device *pgmn_dev,
+	void __user *to)
+{
+	struct msm_gemini_core_buf *buf_p;
+	struct msm_gemini_ctrl_cmd ctrl_cmd;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+	msm_gemini_q_wait(&pgmn_dev->evt_q);
+	buf_p = msm_gemini_q_out(&pgmn_dev->evt_q);
+
+	if (!buf_p) {
+		GMN_DBG("%s:%d] no buffer\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	ctrl_cmd.type = buf_p->vbuf.type;
+	kfree(buf_p);
+
+	GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+		(int) ctrl_cmd.value, ctrl_cmd.len);
+
+	if (copy_to_user(to, &ctrl_cmd, sizeof(ctrl_cmd))) {
+		GMN_PR_ERR("%s:%d]\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int msm_gemini_evt_get_unblock(struct msm_gemini_device *pgmn_dev)
+{
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	msm_gemini_q_unblock(&pgmn_dev->evt_q);
+	return 0;
+}
+
+void msm_gemini_reset_ack_irq(struct msm_gemini_device *pgmn_dev)
+{
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+}
+
+void msm_gemini_err_irq(struct msm_gemini_device *pgmn_dev,
+	int event)
+{
+	int rc = 0;
+	struct msm_gemini_core_buf buf;
+
+	GMN_PR_ERR("%s:%d] error: %d\n", __func__, __LINE__, event);
+
+	buf.vbuf.type = MSM_GEMINI_EVT_ERR;
+	rc = msm_gemini_q_in_buf(&pgmn_dev->evt_q, &buf);
+	if (!rc)
+		rc = msm_gemini_q_wakeup(&pgmn_dev->evt_q);
+
+	if (!rc)
+		GMN_PR_ERR("%s:%d] err err\n", __func__, __LINE__);
+
+	return;
+}
+
+/*************** output queue ****************/
+
+int msm_gemini_we_pingpong_irq(struct msm_gemini_device *pgmn_dev,
+	struct msm_gemini_core_buf *buf_in)
+{
+	int rc = 0;
+	struct msm_gemini_core_buf *buf_out;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	if (buf_in) {
+		GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+			(int) buf_in->y_buffer_addr, buf_in->y_len);
+		rc = msm_gemini_q_in_buf(&pgmn_dev->output_rtn_q, buf_in);
+	} else {
+		GMN_DBG("%s:%d] no output return buffer\n", __func__,
+			__LINE__);
+		rc = -1;
+	}
+
+	buf_out = msm_gemini_q_out(&pgmn_dev->output_buf_q);
+
+	if (buf_out) {
+		rc = msm_gemini_core_we_buf_update(buf_out);
+		kfree(buf_out);
+	} else {
+		msm_gemini_core_we_buf_reset(buf_in);
+		GMN_DBG("%s:%d] no output buffer\n", __func__, __LINE__);
+		rc = -2;
+	}
+
+	if (buf_in)
+		rc = msm_gemini_q_wakeup(&pgmn_dev->output_rtn_q);
+
+	return rc;
+}
+
+int msm_gemini_output_get(struct msm_gemini_device *pgmn_dev, void __user *to)
+{
+	struct msm_gemini_core_buf *buf_p;
+	struct msm_gemini_buf buf_cmd;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+	msm_gemini_q_wait(&pgmn_dev->output_rtn_q);
+	buf_p = msm_gemini_q_out(&pgmn_dev->output_rtn_q);
+
+	if (!buf_p) {
+		GMN_DBG("%s:%d] no output buffer return\n",
+			__func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	buf_cmd = buf_p->vbuf;
+	msm_gemini_platform_p2v(buf_p->file);
+	kfree(buf_p);
+
+	GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+		(int) buf_cmd.vaddr, buf_cmd.y_len);
+
+	if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+		GMN_PR_ERR("%s:%d]", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int msm_gemini_output_get_unblock(struct msm_gemini_device *pgmn_dev)
+{
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	msm_gemini_q_unblock(&pgmn_dev->output_rtn_q);
+	return 0;
+}
+
+int msm_gemini_output_buf_enqueue(struct msm_gemini_device *pgmn_dev,
+	void __user *arg)
+{
+	struct msm_gemini_buf buf_cmd;
+	struct msm_gemini_core_buf *buf_p;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	buf_p = kmalloc(sizeof(struct msm_gemini_core_buf), GFP_ATOMIC);
+	if (!buf_p) {
+		GMN_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+		return -1;
+	}
+
+	GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__, (int) buf_cmd.vaddr,
+		buf_cmd.y_len);
+
+	buf_p->y_buffer_addr = msm_gemini_platform_v2p(buf_cmd.fd,
+		buf_cmd.y_len, &buf_p->file);
+	if (!buf_p->y_buffer_addr) {
+		GMN_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+		kfree(buf_p);
+		return -1;
+	}
+	buf_p->y_len = buf_cmd.y_len;
+	buf_p->vbuf = buf_cmd;
+
+	msm_gemini_q_in(&pgmn_dev->output_buf_q, buf_p);
+	return 0;
+}
+
+/*************** input queue ****************/
+
+int msm_gemini_fe_pingpong_irq(struct msm_gemini_device *pgmn_dev,
+	struct msm_gemini_core_buf *buf_in)
+{
+	struct msm_gemini_core_buf *buf_out;
+	int rc = 0;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	if (buf_in) {
+		GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+			(int) buf_in->y_buffer_addr, buf_in->y_len);
+		rc = msm_gemini_q_in_buf(&pgmn_dev->input_rtn_q, buf_in);
+	} else {
+		GMN_DBG("%s:%d] no input return buffer\n", __func__,
+			__LINE__);
+		rc = -1;
+	}
+
+	buf_out = msm_gemini_q_out(&pgmn_dev->input_buf_q);
+
+	if (buf_out) {
+		rc = msm_gemini_core_fe_buf_update(buf_out);
+		kfree(buf_out);
+		msm_gemini_core_fe_start();
+	} else {
+		GMN_DBG("%s:%d] no input buffer\n", __func__, __LINE__);
+		rc = -2;
+	}
+
+	if (buf_in)
+		rc = msm_gemini_q_wakeup(&pgmn_dev->input_rtn_q);
+
+	return rc;
+}
+
+int msm_gemini_input_get(struct msm_gemini_device *pgmn_dev, void __user * to)
+{
+	struct msm_gemini_core_buf *buf_p;
+	struct msm_gemini_buf buf_cmd;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	msm_gemini_q_wait(&pgmn_dev->input_rtn_q);
+	buf_p = msm_gemini_q_out(&pgmn_dev->input_rtn_q);
+
+	if (!buf_p) {
+		GMN_DBG("%s:%d] no input buffer return\n",
+			__func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	buf_cmd = buf_p->vbuf;
+	msm_gemini_platform_p2v(buf_p->file);
+	kfree(buf_p);
+
+	GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+		(int) buf_cmd.vaddr, buf_cmd.y_len);
+
+	if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+		GMN_PR_ERR("%s:%d]\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int msm_gemini_input_get_unblock(struct msm_gemini_device *pgmn_dev)
+{
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	msm_gemini_q_unblock(&pgmn_dev->input_rtn_q);
+	return 0;
+}
+
+int msm_gemini_input_buf_enqueue(struct msm_gemini_device *pgmn_dev,
+	void __user *arg)
+{
+	struct msm_gemini_core_buf *buf_p;
+	struct msm_gemini_buf buf_cmd;
+
+	if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	buf_p = kmalloc(sizeof(struct msm_gemini_core_buf), GFP_ATOMIC);
+	if (!buf_p) {
+		GMN_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+		return -1;
+	}
+
+	GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+		(int) buf_cmd.vaddr, buf_cmd.y_len);
+
+	buf_p->y_buffer_addr    = msm_gemini_platform_v2p(buf_cmd.fd,
+		buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file)
+		+ buf_cmd.offset;
+	buf_p->y_len          = buf_cmd.y_len;
+
+	buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr + buf_cmd.y_len;
+	buf_p->cbcr_len       = buf_cmd.cbcr_len;
+
+	buf_p->num_of_mcu_rows = buf_cmd.num_of_mcu_rows;
+
+	if (!buf_p->y_buffer_addr || !buf_p->cbcr_buffer_addr) {
+		GMN_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+		kfree(buf_p);
+		return -1;
+	}
+	buf_p->vbuf           = buf_cmd;
+
+	msm_gemini_q_in(&pgmn_dev->input_buf_q, buf_p);
+
+	return 0;
+}
+
+int msm_gemini_irq(int event, void *context, void *data)
+{
+	struct msm_gemini_device *pgmn_dev =
+		(struct msm_gemini_device *) context;
+
+	switch (event) {
+	case MSM_GEMINI_HW_MASK_COMP_FRAMEDONE:
+		msm_gemini_framedone_irq(pgmn_dev, data);
+		msm_gemini_we_pingpong_irq(pgmn_dev, data);
+		break;
+
+	case MSM_GEMINI_HW_MASK_COMP_FE:
+		msm_gemini_fe_pingpong_irq(pgmn_dev, data);
+		break;
+
+	case MSM_GEMINI_HW_MASK_COMP_WE:
+		msm_gemini_we_pingpong_irq(pgmn_dev, data);
+		break;
+
+	case MSM_GEMINI_HW_MASK_COMP_RESET_ACK:
+		msm_gemini_reset_ack_irq(pgmn_dev);
+		break;
+
+	case MSM_GEMINI_HW_MASK_COMP_ERR:
+	default:
+		msm_gemini_err_irq(pgmn_dev, event);
+		break;
+	}
+
+	return 0;
+}
+
+int __msm_gemini_open(struct msm_gemini_device *pgmn_dev)
+{
+	int rc;
+
+	mutex_lock(&pgmn_dev->lock);
+	if (pgmn_dev->open_count) {
+		/* only open once */
+		GMN_PR_ERR("%s:%d] busy\n", __func__, __LINE__);
+		mutex_unlock(&pgmn_dev->lock);
+		return -EBUSY;
+	}
+	pgmn_dev->open_count++;
+	mutex_unlock(&pgmn_dev->lock);
+
+	msm_gemini_core_irq_install(msm_gemini_irq);
+	rc = msm_gemini_platform_init(pgmn_dev->pdev,
+		&pgmn_dev->mem, &pgmn_dev->base,
+		&pgmn_dev->irq, msm_gemini_core_irq, pgmn_dev);
+	if (rc) {
+		GMN_PR_ERR("%s:%d] platform_init fail %d\n", __func__,
+			__LINE__, rc);
+		return rc;
+	}
+
+	GMN_DBG("%s:%d] platform resources - mem %p, base %p, irq %d\n",
+		__func__, __LINE__,
+		pgmn_dev->mem, pgmn_dev->base, pgmn_dev->irq);
+
+	msm_gemini_q_cleanup(&pgmn_dev->evt_q);
+	msm_gemini_q_cleanup(&pgmn_dev->output_rtn_q);
+	msm_gemini_outbuf_q_cleanup(&pgmn_dev->output_buf_q);
+	msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
+	msm_gemini_q_cleanup(&pgmn_dev->input_buf_q);
+	msm_gemini_core_init();
+
+	GMN_DBG("%s:%d] success\n", __func__, __LINE__);
+	return rc;
+}
+
+int __msm_gemini_release(struct msm_gemini_device *pgmn_dev)
+{
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	mutex_lock(&pgmn_dev->lock);
+	if (!pgmn_dev->open_count) {
+		GMN_PR_ERR(KERN_ERR "%s: not opened\n", __func__);
+		mutex_unlock(&pgmn_dev->lock);
+		return -EINVAL;
+	}
+	pgmn_dev->open_count--;
+	mutex_unlock(&pgmn_dev->lock);
+
+	msm_gemini_core_release(release_buf);
+	msm_gemini_q_cleanup(&pgmn_dev->evt_q);
+	msm_gemini_q_cleanup(&pgmn_dev->output_rtn_q);
+	msm_gemini_outbuf_q_cleanup(&pgmn_dev->output_buf_q);
+	msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
+	msm_gemini_outbuf_q_cleanup(&pgmn_dev->input_buf_q);
+
+	if (pgmn_dev->open_count)
+		GMN_PR_ERR(KERN_ERR "%s: multiple opens\n", __func__);
+
+	msm_gemini_platform_release(pgmn_dev->mem, pgmn_dev->base,
+		pgmn_dev->irq, pgmn_dev);
+
+	return 0;
+}
+
+int msm_gemini_ioctl_hw_cmd(struct msm_gemini_device *pgmn_dev,
+	void * __user arg)
+{
+	struct msm_gemini_hw_cmd hw_cmd;
+	int is_copy_to_user;
+
+	if (copy_from_user(&hw_cmd, arg, sizeof(struct msm_gemini_hw_cmd))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	is_copy_to_user = msm_gemini_hw_exec_cmds(&hw_cmd, 1);
+	GMN_DBG("%s:%d] type %d, n %d, offset %d, mask %x, data %x, pdata %x\n",
+		__func__, __LINE__, hw_cmd.type, hw_cmd.n, hw_cmd.offset,
+		hw_cmd.mask, hw_cmd.data, (int) hw_cmd.pdata);
+
+	if (is_copy_to_user >= 0) {
+		if (copy_to_user(arg, &hw_cmd, sizeof(hw_cmd))) {
+			GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int msm_gemini_ioctl_hw_cmds(struct msm_gemini_device *pgmn_dev,
+	void * __user arg)
+{
+	int is_copy_to_user;
+	int len;
+	uint32_t m;
+	struct msm_gemini_hw_cmds *hw_cmds_p;
+	struct msm_gemini_hw_cmd *hw_cmd_p;
+
+	if (copy_from_user(&m, arg, sizeof(m))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	len = sizeof(struct msm_gemini_hw_cmds) +
+		sizeof(struct msm_gemini_hw_cmd) * (m - 1);
+	hw_cmds_p = kmalloc(len, GFP_KERNEL);
+	if (!hw_cmds_p) {
+		GMN_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len);
+		return -EFAULT;
+	}
+
+	if (copy_from_user(hw_cmds_p, arg, len)) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		kfree(hw_cmds_p);
+		return -EFAULT;
+	}
+
+	hw_cmd_p = (struct msm_gemini_hw_cmd *) &(hw_cmds_p->hw_cmd);
+
+	is_copy_to_user = msm_gemini_hw_exec_cmds(hw_cmd_p, m);
+
+	if (is_copy_to_user >= 0) {
+		if (copy_to_user(arg, hw_cmds_p, len)) {
+			GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+			kfree(hw_cmds_p);
+			return -EFAULT;
+		}
+	}
+	kfree(hw_cmds_p);
+	return 0;
+}
+
+int msm_gemini_start(struct msm_gemini_device *pgmn_dev, void * __user arg)
+{
+	struct msm_gemini_core_buf *buf_out;
+	struct msm_gemini_core_buf *buf_out_free[2] = {NULL, NULL};
+	int i, rc;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+	release_buf = 1;
+	for (i = 0; i < 2; i++) {
+		buf_out = msm_gemini_q_out(&pgmn_dev->input_buf_q);
+
+		if (buf_out) {
+			msm_gemini_core_fe_buf_update(buf_out);
+			kfree(buf_out);
+		} else {
+			GMN_DBG("%s:%d] no input buffer\n", __func__, __LINE__);
+			break;
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		buf_out_free[i] = msm_gemini_q_out(&pgmn_dev->output_buf_q);
+
+		if (buf_out_free[i]) {
+			msm_gemini_core_we_buf_update(buf_out_free[i]);
+		} else if (i == 1) {
+			/* set the pong to same address as ping */
+			buf_out_free[0]->y_len >>= 1;
+			buf_out_free[0]->y_buffer_addr +=
+				buf_out_free[0]->y_len;
+			msm_gemini_core_we_buf_update(buf_out_free[0]);
+			/* since ping and pong are same buf release only once*/
+			release_buf = 0;
+		} else {
+			GMN_DBG("%s:%d] no output buffer\n",
+			__func__, __LINE__);
+			break;
+		}
+	}
+
+	for (i = 0; i < 2; i++)
+		kfree(buf_out_free[i]);
+
+	rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, arg);
+	GMN_DBG("%s:%d]\n", __func__, __LINE__);
+	return rc;
+}
+
+int msm_gemini_ioctl_reset(struct msm_gemini_device *pgmn_dev,
+	void * __user arg)
+{
+	int rc;
+	struct msm_gemini_ctrl_cmd ctrl_cmd;
+
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	if (copy_from_user(&ctrl_cmd, arg, sizeof(ctrl_cmd))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	pgmn_dev->op_mode = ctrl_cmd.type;
+
+	rc = msm_gemini_core_reset(pgmn_dev->op_mode, pgmn_dev->base,
+		resource_size(pgmn_dev->mem));
+	return rc;
+}
+
+int msm_gemini_ioctl_test_dump_region(struct msm_gemini_device *pgmn_dev,
+	unsigned long arg)
+{
+	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	msm_gemini_hw_region_dump(arg);
+	return 0;
+}
+
+long __msm_gemini_ioctl(struct msm_gemini_device *pgmn_dev,
+	unsigned int cmd, unsigned long arg)
+{
+	int rc = 0;
+	switch (cmd) {
+	case MSM_GMN_IOCTL_GET_HW_VERSION:
+		GMN_DBG("%s:%d] VERSION 1\n", __func__, __LINE__);
+		rc = msm_gemini_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_RESET:
+		rc = msm_gemini_ioctl_reset(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_STOP:
+		rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_START:
+		rc = msm_gemini_start(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_INPUT_BUF_ENQUEUE:
+		rc = msm_gemini_input_buf_enqueue(pgmn_dev,
+			(void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_INPUT_GET:
+		rc = msm_gemini_input_get(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_INPUT_GET_UNBLOCK:
+		rc = msm_gemini_input_get_unblock(pgmn_dev);
+		break;
+
+	case MSM_GMN_IOCTL_OUTPUT_BUF_ENQUEUE:
+		rc = msm_gemini_output_buf_enqueue(pgmn_dev,
+			(void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_OUTPUT_GET:
+		rc = msm_gemini_output_get(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_OUTPUT_GET_UNBLOCK:
+		rc = msm_gemini_output_get_unblock(pgmn_dev);
+		break;
+
+	case MSM_GMN_IOCTL_EVT_GET:
+		rc = msm_gemini_evt_get(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_EVT_GET_UNBLOCK:
+		rc = msm_gemini_evt_get_unblock(pgmn_dev);
+		break;
+
+	case MSM_GMN_IOCTL_HW_CMD:
+		rc = msm_gemini_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_HW_CMDS:
+		rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+		break;
+
+	case MSM_GMN_IOCTL_TEST_DUMP_REGION:
+		rc = msm_gemini_ioctl_test_dump_region(pgmn_dev, arg);
+		break;
+
+	default:
+		GMN_PR_ERR(KERN_INFO "%s:%d] cmd = %d not supported\n",
+			__func__, __LINE__, _IOC_NR(cmd));
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+struct msm_gemini_device *__msm_gemini_init(struct platform_device *pdev)
+{
+	struct msm_gemini_device *pgmn_dev;
+
+	pgmn_dev = kzalloc(sizeof(struct msm_gemini_device), GFP_ATOMIC);
+	if (!pgmn_dev) {
+		GMN_PR_ERR("%s:%d]no mem\n", __func__, __LINE__);
+		return NULL;
+	}
+
+	mutex_init(&pgmn_dev->lock);
+
+	pgmn_dev->pdev = pdev;
+
+	msm_gemini_q_init("evt_q", &pgmn_dev->evt_q);
+	msm_gemini_q_init("output_rtn_q", &pgmn_dev->output_rtn_q);
+	msm_gemini_q_init("output_buf_q", &pgmn_dev->output_buf_q);
+	msm_gemini_q_init("input_rtn_q", &pgmn_dev->input_rtn_q);
+	msm_gemini_q_init("input_buf_q", &pgmn_dev->input_buf_q);
+
+	return pgmn_dev;
+}
+
+int __msm_gemini_exit(struct msm_gemini_device *pgmn_dev)
+{
+	mutex_destroy(&pgmn_dev->lock);
+	kfree(pgmn_dev);
+	return 0;
+}
+
diff --git a/drivers/media/video/msm/msm_gemini_sync.h b/drivers/media/video/msm/msm_gemini_sync.h
new file mode 100644
index 0000000..6c69a92
--- /dev/null
+++ b/drivers/media/video/msm/msm_gemini_sync.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GEMINI_SYNC_H
+#define MSM_GEMINI_SYNC_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include "msm_gemini_core.h"
+
+struct msm_gemini_q {
+	char const	*name;
+	struct list_head  q;
+	spinlock_t	lck;
+	wait_queue_head_t wait;
+	int	       unblck;
+};
+
+struct msm_gemini_q_entry {
+	struct list_head list;
+	void   *data;
+};
+
+struct msm_gemini_device {
+	struct platform_device *pdev;
+	struct resource        *mem;
+	int                     irq;
+	void                   *base;
+
+	struct device *device;
+	struct cdev   cdev;
+	struct mutex  lock;
+	char	  open_count;
+	uint8_t       op_mode;
+
+	/* event queue including frame done & err indications
+	 */
+	struct msm_gemini_q evt_q;
+
+	/* output return queue
+	 */
+	struct msm_gemini_q output_rtn_q;
+
+	/* output buf queue
+	 */
+	struct msm_gemini_q output_buf_q;
+
+	/* input return queue
+	 */
+	struct msm_gemini_q input_rtn_q;
+
+	/* input buf queue
+	 */
+	struct msm_gemini_q input_buf_q;
+};
+
+int __msm_gemini_open(struct msm_gemini_device *pgmn_dev);
+int __msm_gemini_release(struct msm_gemini_device *pgmn_dev);
+
+long __msm_gemini_ioctl(struct msm_gemini_device *pgmn_dev,
+	unsigned int cmd, unsigned long arg);
+
+struct msm_gemini_device *__msm_gemini_init(struct platform_device *pdev);
+int __msm_gemini_exit(struct msm_gemini_device *pgmn_dev);
+
+#endif /* MSM_GEMINI_SYNC_H */
diff --git a/drivers/media/video/msm/msm_io7x.c b/drivers/media/video/msm/msm_io7x.c
new file mode 100644
index 0000000..1befec6
--- /dev/null
+++ b/drivers/media/video/msm/msm_io7x.c
@@ -0,0 +1,318 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+
+#define CAMIF_CFG_RMSK 0x1fffff
+#define CAM_SEL_BMSK 0x2
+#define CAM_PCLK_SRC_SEL_BMSK 0x60000
+#define CAM_PCLK_INVERT_BMSK 0x80000
+#define CAM_PAD_REG_SW_RESET_BMSK 0x100000
+
+#define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000
+#define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000
+#define MDDI_CLK_CHICKEN_BIT_BMSK  0x80
+
+#define CAM_SEL_SHFT 0x1
+#define CAM_PCLK_SRC_SEL_SHFT 0x11
+#define CAM_PCLK_INVERT_SHFT 0x13
+#define CAM_PAD_REG_SW_RESET_SHFT 0x14
+
+#define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10
+#define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF
+#define MDDI_CLK_CHICKEN_BIT_SHFT  0x7
+#define APPS_RESET_OFFSET 0x00000210
+
+static struct clk *camio_vfe_mdc_clk;
+static struct clk *camio_mdc_clk;
+static struct clk *camio_vfe_clk;
+
+static struct msm_camera_io_ext camio_ext;
+static struct resource *appio, *mdcio;
+void __iomem *appbase, *mdcbase;
+
+static struct resource *appio, *mdcio;
+void __iomem *appbase, *mdcbase;
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = -1;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		clk = camio_vfe_mdc_clk = clk_get(NULL, "vfe_mdc_clk");
+		break;
+
+	case CAMIO_MDC_CLK:
+		clk = camio_mdc_clk = clk_get(NULL, "mdc_clk");
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk = clk_get(NULL, "vfe_clk");
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_enable(clk);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = -1;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		clk = camio_vfe_mdc_clk;
+		break;
+
+	case CAMIO_MDC_CLK:
+		clk = camio_mdc_clk;
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_clk;
+
+	if (clk != ERR_PTR(-ENOENT))
+		clk_set_rate(clk, rate);
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+
+	camio_ext = camdev->ioext;
+
+	appio = request_mem_region(camio_ext.appphy,
+		camio_ext.appsz, pdev->name);
+	if (!appio) {
+		rc = -EBUSY;
+		goto enable_fail;
+	}
+
+	appbase = ioremap(camio_ext.appphy,
+		camio_ext.appsz);
+	if (!appbase) {
+		rc = -ENOMEM;
+		goto apps_no_mem;
+	}
+
+	msm_camio_clk_enable(CAMIO_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_MDC_CLK);
+	return 0;
+apps_no_mem:
+	release_mem_region(camio_ext.appphy, camio_ext.appsz);
+enable_fail:
+	return rc;
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	int32_t rc = 0;
+	camio_ext = camdev->ioext;
+	mdcio = request_mem_region(camio_ext.mdcphy,
+		camio_ext.mdcsz, pdev->name);
+	if (!mdcio)
+		rc = -EBUSY;
+	mdcbase = ioremap(camio_ext.mdcphy,
+		camio_ext.mdcsz);
+	if (!mdcbase) {
+		rc = -EINVAL;
+		goto mdc_no_mem;
+	}
+	camdev->camera_gpio_on();
+	return msm_camio_clk_enable(CAMIO_VFE_MDC_CLK);
+
+mdc_no_mem:
+	release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz);
+	return rc;
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+	iounmap(mdcbase);
+	release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz);
+	return msm_camio_clk_disable(CAMIO_VFE_MDC_CLK);
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	iounmap(appbase);
+	release_mem_region(camio_ext.appphy, camio_ext.appsz);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_MDC_CLK);
+}
+
+void msm_disable_io_gpio_clk(struct platform_device *pdev)
+{
+	return;
+}
+
+void msm_camio_camif_pad_reg_reset(void)
+{
+	uint32_t reg;
+	uint32_t mask, value;
+
+	/* select CLKRGM_VFE_SRC_CAM_VFE_SRC:  internal source */
+	msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+
+	mask = CAM_SEL_BMSK |
+		CAM_PCLK_SRC_SEL_BMSK |
+		CAM_PCLK_INVERT_BMSK;
+
+	value = 1 << CAM_SEL_SHFT |
+		3 << CAM_PCLK_SRC_SEL_SHFT |
+		0 << CAM_PCLK_INVERT_SHFT;
+
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 1 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 0 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_EXTERNAL);
+	msleep(10);
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	uint32_t val;
+
+	/* do apps reset */
+	val = readl(appbase + 0x00000210);
+	val |= 0x1;
+	writel(val, appbase + 0x00000210);
+	mdelay(10);
+
+	val = readl(appbase + 0x00000210);
+	val &= ~0x1;
+	writel(val, appbase + 0x00000210);
+	mdelay(10);
+
+	/* do axi reset */
+	val = readl(appbase + 0x00000208);
+	val |= 0x1;
+	writel(val, appbase + 0x00000208);
+	mdelay(10);
+
+	val = readl(appbase + 0x00000208);
+	val &= ~0x1;
+	writel(val, appbase + 0x00000208);
+	mdelay(10);
+}
+
+void msm_camio_camif_pad_reg_reset_2(void)
+{
+	uint32_t reg;
+	uint32_t mask, value;
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 1 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	mdelay(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 0 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	mdelay(10);
+}
+
+void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype)
+{
+	struct clk *clk = NULL;
+
+	clk = camio_vfe_clk;
+
+	if (clk != NULL && clk != ERR_PTR(-ENOENT)) {
+		switch (srctype) {
+		case MSM_CAMIO_CLK_SRC_INTERNAL:
+			clk_set_flags(clk, 0x00000100 << 1);
+			break;
+
+		case MSM_CAMIO_CLK_SRC_EXTERNAL:
+			clk_set_flags(clk, 0x00000100);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_on();
+	return msm_camio_clk_enable(CAMIO_VFE_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_VFE_CLK);
+}
diff --git a/drivers/media/video/msm/msm_io8x.c b/drivers/media/video/msm/msm_io8x.c
new file mode 100644
index 0000000..6bc92b0
--- /dev/null
+++ b/drivers/media/video/msm/msm_io8x.c
@@ -0,0 +1,331 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+
+#define CAMIF_CFG_RMSK 0x1fffff
+#define CAM_SEL_BMSK 0x2
+#define CAM_PCLK_SRC_SEL_BMSK 0x60000
+#define CAM_PCLK_INVERT_BMSK 0x80000
+#define CAM_PAD_REG_SW_RESET_BMSK 0x100000
+
+#define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000
+#define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000
+#define MDDI_CLK_CHICKEN_BIT_BMSK  0x80
+
+#define CAM_SEL_SHFT 0x1
+#define CAM_PCLK_SRC_SEL_SHFT 0x11
+#define CAM_PCLK_INVERT_SHFT 0x13
+#define CAM_PAD_REG_SW_RESET_SHFT 0x14
+
+#define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10
+#define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF
+#define MDDI_CLK_CHICKEN_BIT_SHFT  0x7
+#define APPS_RESET_OFFSET 0x00000214
+
+static struct clk *camio_vfe_mdc_clk;
+static struct clk *camio_mdc_clk;
+static struct clk *camio_vfe_clk;
+static struct clk *camio_vfe_axi_clk;
+static struct msm_camera_io_ext camio_ext;
+static struct resource *appio, *mdcio;
+
+void __iomem *appbase, *mdcbase;
+
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		camio_vfe_mdc_clk = clk = clk_get(NULL, "vfe_mdc_clk");
+		break;
+
+	case CAMIO_MDC_CLK:
+		camio_mdc_clk = clk = clk_get(NULL, "mdc_clk");
+		break;
+
+	case CAMIO_VFE_CLK:
+		camio_vfe_clk = clk = clk_get(NULL, "vfe_clk");
+		break;
+
+	case CAMIO_VFE_AXI_CLK:
+		camio_vfe_axi_clk = clk = clk_get(NULL, "vfe_axi_clk");
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk))
+		clk_enable(clk);
+	else
+		rc = -1;
+
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		clk = camio_vfe_mdc_clk;
+		break;
+
+	case CAMIO_MDC_CLK:
+		clk = camio_mdc_clk;
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+
+	case CAMIO_VFE_AXI_CLK:
+		clk = camio_vfe_axi_clk;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+	} else
+		rc = -1;
+
+	return rc;
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_mdc_clk;
+
+	/* TODO: check return */
+	clk_set_rate(clk, rate);
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	appio = request_mem_region(camio_ext.appphy,
+		camio_ext.appsz, pdev->name);
+	if (!appio) {
+		rc = -EBUSY;
+		goto enable_fail;
+	}
+
+	appbase = ioremap(camio_ext.appphy, camio_ext.appsz);
+	if (!appbase) {
+		rc = -ENOMEM;
+		goto apps_no_mem;
+	}
+	msm_camio_clk_enable(CAMIO_MDC_CLK);
+	msm_camio_clk_enable(CAMIO_VFE_AXI_CLK);
+	return 0;
+
+apps_no_mem:
+	release_mem_region(camio_ext.appphy, camio_ext.appsz);
+enable_fail:
+	return rc;
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	iounmap(appbase);
+	release_mem_region(camio_ext.appphy, camio_ext.appsz);
+	msm_camio_clk_disable(CAMIO_MDC_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_AXI_CLK);
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	int32_t rc = 0;
+	camio_ext = camdev->ioext;
+
+	mdcio = request_mem_region(camio_ext.mdcphy,
+		camio_ext.mdcsz, pdev->name);
+	if (!mdcio)
+		rc = -EBUSY;
+	mdcbase = ioremap(camio_ext.mdcphy,
+		camio_ext.mdcsz);
+	if (!mdcbase)
+		goto mdc_no_mem;
+	camdev->camera_gpio_on();
+
+	msm_camio_clk_enable(CAMIO_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_VFE_MDC_CLK);
+	return rc;
+
+
+mdc_no_mem:
+	release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz);
+	return -EINVAL;
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+	iounmap(mdcbase);
+	release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	return msm_camio_clk_disable(CAMIO_VFE_MDC_CLK);
+
+}
+
+void msm_disable_io_gpio_clk(struct platform_device *pdev)
+{
+	return;
+}
+
+void msm_camio_camif_pad_reg_reset(void)
+{
+	uint32_t reg;
+	uint32_t mask, value;
+
+	/* select CLKRGM_VFE_SRC_CAM_VFE_SRC:  internal source */
+	msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+
+	mask = CAM_SEL_BMSK |
+		CAM_PCLK_SRC_SEL_BMSK |
+		CAM_PCLK_INVERT_BMSK |
+		EXT_CAM_HSYNC_POL_SEL_BMSK |
+	    EXT_CAM_VSYNC_POL_SEL_BMSK | MDDI_CLK_CHICKEN_BIT_BMSK;
+
+	value = 1 << CAM_SEL_SHFT |
+		3 << CAM_PCLK_SRC_SEL_SHFT |
+		0 << CAM_PCLK_INVERT_SHFT |
+		0 << EXT_CAM_HSYNC_POL_SEL_SHFT |
+	    0 << EXT_CAM_VSYNC_POL_SEL_SHFT | 0 << MDDI_CLK_CHICKEN_BIT_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 1 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 0 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	msleep(10);
+
+	msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_EXTERNAL);
+
+	msleep(10);
+
+	/* todo: check return */
+	if (camio_vfe_clk)
+		clk_set_rate(camio_vfe_clk, 96000000);
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	uint32_t val;
+
+	val = readl(appbase + APPS_RESET_OFFSET);
+	val |= 0x1;
+	writel(val, appbase + APPS_RESET_OFFSET);
+	mdelay(10);
+
+	val = readl(appbase + APPS_RESET_OFFSET);
+	val &= ~0x1;
+	writel(val, appbase + APPS_RESET_OFFSET);
+	mdelay(10);
+}
+
+void msm_camio_camif_pad_reg_reset_2(void)
+{
+	uint32_t reg;
+	uint32_t mask, value;
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 1 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	mdelay(10);
+
+	reg = (readl(mdcbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 0 << CAM_PAD_REG_SW_RESET_SHFT;
+	writel((reg & (~mask)) | (value & mask), mdcbase);
+	mdelay(10);
+}
+
+void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype)
+{
+	struct clk *clk = NULL;
+
+	clk = camio_vfe_clk;
+
+	if (clk != NULL) {
+		switch (srctype) {
+		case MSM_CAMIO_CLK_SRC_INTERNAL:
+			clk_set_flags(clk, 0x00000100 << 1);
+			break;
+
+		case MSM_CAMIO_CLK_SRC_EXTERNAL:
+			clk_set_flags(clk, 0x00000100);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+void msm_camio_clk_axi_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_axi_clk;
+	/* todo: check return */
+	clk_set_rate(clk, rate);
+}
+
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+
+	camdev->camera_gpio_on();
+	return msm_camio_clk_enable(CAMIO_VFE_MDC_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_VFE_MDC_CLK);
+}
diff --git a/drivers/media/video/msm/msm_io_7x27a.c b/drivers/media/video/msm/msm_io_7x27a.c
new file mode 100644
index 0000000..c70cfca
--- /dev/null
+++ b/drivers/media/video/msm/msm_io_7x27a.c
@@ -0,0 +1,612 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pm_qos_params.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+
+
+/* MIPI	CSI controller registers */
+#define	MIPI_PHY_CONTROL		0x00000000
+#define	MIPI_PROTOCOL_CONTROL		0x00000004
+#define	MIPI_INTERRUPT_STATUS		0x00000008
+#define	MIPI_INTERRUPT_MASK		0x0000000C
+#define	MIPI_CAMERA_CNTL		0x00000024
+#define	MIPI_CALIBRATION_CONTROL	0x00000018
+#define	MIPI_PHY_D0_CONTROL2		0x00000038
+#define	MIPI_PHY_D1_CONTROL2		0x0000003C
+#define	MIPI_PHY_D2_CONTROL2		0x00000040
+#define	MIPI_PHY_D3_CONTROL2		0x00000044
+#define	MIPI_PHY_CL_CONTROL		0x00000048
+#define	MIPI_PHY_D0_CONTROL		0x00000034
+#define	MIPI_PHY_D1_CONTROL		0x00000020
+#define	MIPI_PHY_D2_CONTROL		0x0000002C
+#define	MIPI_PHY_D3_CONTROL		0x00000030
+#define	MIPI_PWR_CNTL			0x00000054
+
+/*
+ * MIPI_PROTOCOL_CONTROL register bits to enable/disable the features of
+ * CSI Rx Block
+ */
+
+/* DPCM scheme */
+#define	MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT			0x1e
+/* SW_RST to issue a SW reset to the CSI core */
+#define	MIPI_PROTOCOL_CONTROL_SW_RST_BMSK			0x8000000
+/* To Capture Long packet Header Info in MIPI_PROTOCOL_STATUS register */
+#define	MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK	0x200000
+/* Data format for unpacking purpose */
+#define	MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT			0x13
+/* Enable decoding of payload based on data type filed of packet hdr */
+#define	MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK			0x00000
+/* Enable error correction on packet headers */
+#define	MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK			0x20000
+
+/*
+ * MIPI_CALIBRATION_CONTROL register contains control info for
+ * calibration impledence controller
+*/
+
+/* Enable bit for calibration pad */
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT		0x16
+/* With SWCAL_STRENGTH_OVERRIDE_EN, SW_CAL_EN and MANUAL_OVERRIDE_EN
+ * the hardware calibration circuitry associated with CAL_SW_HW_MODE
+ * is bypassed
+*/
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT	0x15
+/* To indicate the Calibration process is in the control of HW/SW */
+#define	MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT		0x14
+/* When this is set the strength value of the data and clk lane impedence
+ * termination is updated with MANUAL_STRENGTH settings and calibration
+ * sensing logic is idle.
+*/
+#define	MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT	0x7
+
+/* Data lane0 control */
+/* T-hs Settle count value  for Rx */
+#define	MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT			0x18
+/* Rx termination control */
+#define	MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT			0x10
+/* LP Rx enable */
+#define	MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT			0x4
+/*
+ * Enable for error in sync sequence
+ * 1 - one bit error in sync seq
+ * 0 - requires all 8 bit correct seq
+*/
+#define	MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+
+/* Comments are same as D0 */
+#define	MIPI_PHY_D1_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D1_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D1_CONTROL2_LP_REC_EN_SHFT			0x4
+#define	MIPI_PHY_D1_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+
+/* Comments are same as D0 */
+#define	MIPI_PHY_D2_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D2_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D2_CONTROL2_LP_REC_EN_SHFT			0x4
+#define	MIPI_PHY_D2_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+
+/* Comments are same as D0 */
+#define	MIPI_PHY_D3_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D3_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D3_CONTROL2_LP_REC_EN_SHFT			0x4
+#define	MIPI_PHY_D3_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+
+/* PHY_CL_CTRL programs the parameters of clk lane of CSIRXPHY */
+/* HS Rx termination control */
+#define	MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT			0x18
+/* Start signal for T-hs delay */
+#define	MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT			0x2
+
+/* PHY DATA lane 0 control */
+/*
+ * HS RX equalizer strength control
+ * 00 - 0db 01 - 3db 10 - 5db 11 - 7db
+*/
+#define	MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT			0x1c
+
+/* PHY DATA lane 1 control */
+/* Shutdown signal for MIPI clk phy line */
+#define	MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT		0x9
+/* Shutdown signal for MIPI data phy line */
+#define	MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT	0x8
+
+#define MSM_AXI_QOS_PREVIEW 200000
+#define MSM_AXI_QOS_SNAPSHOT 200000
+#define MSM_AXI_QOS_RECORDING 200000
+
+#define MIPI_PWR_CNTL_ENA	0x07
+#define MIPI_PWR_CNTL_DIS	0x0
+
+static struct clk *camio_cam_clk;
+static struct clk *camio_vfe_clk;
+static struct clk *camio_csi_src_clk;
+static struct clk *camio_csi0_vfe_clk;
+static struct clk *camio_csi1_vfe_clk;
+static struct clk *camio_csi0_clk;
+static struct clk *camio_csi1_clk;
+static struct clk *camio_csi0_pclk;
+static struct clk *camio_csi1_pclk;
+
+static struct msm_camera_io_ext camio_ext;
+static struct msm_camera_io_clk camio_clk;
+static struct platform_device *camio_dev;
+void __iomem *csibase;
+void __iomem *appbase;
+
+
+
+void msm_io_w(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	writel((data), (addr));
+}
+
+u32 msm_io_r(void __iomem *addr)
+{
+	uint32_t data = readl(addr);
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+void msm_camio_vfe_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_clk;
+	if (rate > clk_get_rate(clk))
+		clk_set_rate(clk, rate);
+}
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		clk = clk_get(NULL, "cam_m_clk");
+		camio_cam_clk = clk;
+		msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate);
+		break;
+	case CAMIO_VFE_CLK:
+		clk = clk_get(NULL, "vfe_clk");
+		camio_vfe_clk = clk;
+		msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate);
+		break;
+	case CAMIO_CSI0_VFE_CLK:
+		clk = clk_get(&camio_dev->dev, "csi_vfe_clk");
+		camio_csi0_vfe_clk = clk;
+		break;
+	case CAMIO_CSI1_VFE_CLK:
+		clk = clk_get(NULL, "csi_vfe_clk");
+		camio_csi1_vfe_clk = clk;
+		break;
+	case CAMIO_CSI_SRC_CLK:
+		clk = clk_get(NULL, "csi_src_clk");
+		camio_csi_src_clk = clk;
+		break;
+	case CAMIO_CSI0_CLK:
+		clk = clk_get(&camio_dev->dev, "csi_clk");
+		camio_csi0_clk = clk;
+		msm_camio_clk_rate_set_2(clk, 400000000);
+		break;
+	case CAMIO_CSI1_CLK:
+		clk = clk_get(NULL, "csi_clk");
+		camio_csi1_clk = clk;
+		break;
+	case CAMIO_CSI0_PCLK:
+		clk = clk_get(&camio_dev->dev, "csi_pclk");
+		camio_csi0_pclk = clk;
+		break;
+	case CAMIO_CSI1_PCLK:
+		clk = clk_get(NULL, "csi_pclk");
+		camio_csi1_pclk = clk;
+		break;
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk))
+		clk_enable(clk);
+	else
+		rc = -1;
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		clk = camio_cam_clk;
+		break;
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+	case CAMIO_CSI_SRC_CLK:
+		clk = camio_csi_src_clk;
+		break;
+	case CAMIO_CSI0_VFE_CLK:
+		clk = camio_csi0_vfe_clk;
+		break;
+	case CAMIO_CSI1_VFE_CLK:
+		clk = camio_csi1_vfe_clk;
+		break;
+	case CAMIO_CSI0_CLK:
+		clk = camio_csi0_clk;
+		break;
+	case CAMIO_CSI1_CLK:
+		clk = camio_csi1_clk;
+		break;
+	case CAMIO_CSI0_PCLK:
+		clk = camio_csi0_pclk;
+		break;
+	case CAMIO_CSI1_PCLK:
+		clk = camio_csi1_pclk;
+		break;
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+	} else
+		rc = -1;
+	return rc;
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_cam_clk;
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set_2(struct clk *clk, int rate)
+{
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_set_min_rate(struct clk *clk, int rate)
+{
+	clk_set_min_rate(clk, rate);
+}
+
+static irqreturn_t msm_io_csi_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+
+	irq = msm_io_r(csibase + MIPI_INTERRUPT_STATUS);
+	CDBG("%s MIPI_INTERRUPT_STATUS = 0x%x\n", __func__, irq);
+	msm_io_w(irq, csibase + MIPI_INTERRUPT_STATUS);
+
+	/* TODO: Needs to send this info to upper layers */
+	if ((irq >> 19) & 0x1)
+		pr_info("Unsupported packet format is received\n");
+	return IRQ_HANDLED;
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+	const struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint32_t val;
+
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+
+	msm_camio_clk_enable(CAMIO_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI0_CLK);
+	msm_camio_clk_enable(CAMIO_CSI1_CLK);
+	msm_camio_clk_enable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_enable(CAMIO_CSI1_PCLK);
+
+	csibase = ioremap(camio_ext.csiphy, camio_ext.csisz);
+	if (!csibase) {
+		rc = -ENOMEM;
+		goto csi_busy;
+	}
+	rc = request_irq(camio_ext.csiirq, msm_io_csi_irq,
+				IRQF_TRIGGER_RISING, "csi", 0);
+	if (rc < 0)
+		goto csi_irq_fail;
+
+	msleep(20);
+	val = (20 <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+
+	appbase = ioremap(camio_ext.appphy,
+		camio_ext.appsz);
+	if (!appbase) {
+		rc = -ENOMEM;
+		goto csi_irq_fail;
+	}
+	return 0;
+
+csi_irq_fail:
+	iounmap(csibase);
+csi_busy:
+	msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI1_PCLK);
+	camdev->camera_gpio_off();
+	return rc;
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	uint32_t val;
+
+	val = (20 <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+	msleep(20);
+
+	free_irq(camio_ext.csiirq, 0);
+	iounmap(csibase);
+	iounmap(appbase);
+	CDBG("disable clocks\n");
+
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI1_PCLK);
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	const struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	const struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	uint32_t val;
+
+	/* do apps reset */
+	val = readl_relaxed(appbase + 0x00000210);
+	val |= 0x1;
+	writel_relaxed(val, appbase + 0x00000210);
+	usleep_range(10000, 11000);
+
+	val = readl_relaxed(appbase + 0x00000210);
+	val &= ~0x1;
+	writel_relaxed(val, appbase + 0x00000210);
+	usleep_range(10000, 11000);
+
+	/* do axi reset */
+	val = readl_relaxed(appbase + 0x00000208);
+	val |= 0x1;
+	writel_relaxed(val, appbase + 0x00000208);
+	usleep_range(10000, 11000);
+
+	val = readl_relaxed(appbase + 0x00000208);
+	val &= ~0x1;
+	writel_relaxed(val, appbase + 0x00000208);
+	mb();
+	usleep_range(10000, 11000);
+	return;
+}
+
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	const struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+
+	msm_camio_clk_enable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_enable(CAMIO_CSI1_PCLK);
+
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	const struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+
+	csibase = ioremap(camdev->ioext.csiphy, camdev->ioext.csisz);
+	if (!csibase) {
+		pr_err("ioremap failed for CSIBASE\n");
+		goto ioremap_fail;
+	}
+	msm_io_w(MIPI_PWR_CNTL_DIS, csibase + MIPI_PWR_CNTL);
+	iounmap(csibase);
+ioremap_fail:
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI1_PCLK);
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_csi_config(struct msm_camera_csi_params *csi_params)
+{
+	int rc = 0;
+	uint32_t val = 0;
+
+	CDBG("msm_camio_csi_config\n");
+
+	/* Enable error correction for DATA lane. Applies to all data lanes */
+	msm_io_w(0x4, csibase + MIPI_PHY_CONTROL);
+
+	msm_io_w(MIPI_PROTOCOL_CONTROL_SW_RST_BMSK,
+		csibase + MIPI_PROTOCOL_CONTROL);
+
+	val = MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK |
+		MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK |
+		MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK;
+	val |= (uint32_t)(csi_params->data_format) <<
+		MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT;
+	val |= csi_params->dpcm_scheme <<
+		MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT;
+	CDBG("%s MIPI_PROTOCOL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PROTOCOL_CONTROL);
+
+	val = (0x1 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) |
+		(0x1 <<
+		MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT);
+	CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_CALIBRATION_CONTROL);
+
+	val = (csi_params->settle_cnt <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+
+	val = 0 << MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT;
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL);
+
+	val = (0x1 << MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT) |
+		(0x1 << MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT);
+	CDBG("%s MIPI_PHY_D1_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL);
+
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D2_CONTROL);
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D3_CONTROL);
+
+	/* program number of lanes and lane mapping */
+	switch (csi_params->lane_cnt) {
+	case 1:
+		msm_io_w(csi_params->lane_assign << 8 | 0x4,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 2:
+		msm_io_w(csi_params->lane_assign << 8 | 0x5,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 3:
+		msm_io_w(csi_params->lane_assign << 8 | 0x6,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 4:
+		msm_io_w(csi_params->lane_assign << 8 | 0x7,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	}
+
+	msm_io_w(0xFFFFF3FF, csibase + MIPI_INTERRUPT_MASK);
+	/*clear IRQ bits - write 1 clears the status*/
+	msm_io_w(0xFFFFF3FF, csibase + MIPI_INTERRUPT_STATUS);
+
+	return rc;
+}
+
+void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
+{
+	switch (perf_setting) {
+	case S_INIT:
+		add_axi_qos();
+		break;
+	case S_PREVIEW:
+		update_axi_qos(MSM_AXI_QOS_PREVIEW);
+		break;
+	case S_VIDEO:
+		update_axi_qos(MSM_AXI_QOS_RECORDING);
+		break;
+	case S_CAPTURE:
+		update_axi_qos(MSM_AXI_QOS_SNAPSHOT);
+		break;
+	case S_DEFAULT:
+		update_axi_qos(PM_QOS_DEFAULT_VALUE);
+		break;
+	case S_EXIT:
+		release_axi_qos();
+		break;
+	default:
+		CDBG("%s: INVALID CASE\n", __func__);
+	}
+}
diff --git a/drivers/media/video/msm/msm_io_8960.c b/drivers/media/video/msm/msm_io_8960.c
new file mode 100644
index 0000000..eb29d08
--- /dev/null
+++ b/drivers/media/video/msm/msm_io_8960.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include "msm_ispif.h"
+
+#define DBG_CSID 0
+#define DBG_CSIPHY 0
+
+/* MIPI	CSI	PHY registers */
+#define MIPI_CSIPHY_LNn_CFG1_ADDR                0x0
+#define MIPI_CSIPHY_LNn_CFG2_ADDR                0x4
+#define MIPI_CSIPHY_LNn_CFG3_ADDR                0x8
+#define MIPI_CSIPHY_LNn_CFG4_ADDR                0xC
+#define MIPI_CSIPHY_LNn_CFG5_ADDR                0x10
+#define MIPI_CSIPHY_LNCK_CFG1_ADDR               0x100
+#define MIPI_CSIPHY_LNCK_CFG2_ADDR               0x104
+#define MIPI_CSIPHY_LNCK_CFG3_ADDR               0x108
+#define MIPI_CSIPHY_LNCK_CFG4_ADDR               0x10C
+#define MIPI_CSIPHY_LNCK_CFG5_ADDR               0x110
+#define MIPI_CSIPHY_LNCK_MISC1_ADDR              0x128
+#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR        0x1E0
+#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR           0x1E8
+#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR           0x0144
+#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR      0x0180
+#define MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR      0x0184
+#define MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR      0x0188
+#define MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR      0x018C
+#define MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR      0x0190
+#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR        0x01A0
+#define MIPI_CSIPHY_INTERRUPT_MASK1_ADDR        0x01A4
+#define MIPI_CSIPHY_INTERRUPT_MASK2_ADDR        0x01A8
+#define MIPI_CSIPHY_INTERRUPT_MASK3_ADDR        0x01AC
+#define MIPI_CSIPHY_INTERRUPT_MASK4_ADDR        0x01B0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR       0x01C0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR       0x01C4
+#define MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR       0x01C8
+#define MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR       0x01CC
+#define MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR       0x01D0
+
+/* MIPI	CSID registers */
+#define CSID_CORE_CTRL_ADDR                     0x4
+#define CSID_RST_CMD_ADDR                       0x8
+#define CSID_CID_LUT_VC_0_ADDR                  0xc
+#define CSID_CID_LUT_VC_1_ADDR                  0x10
+#define CSID_CID_LUT_VC_2_ADDR                  0x14
+#define CSID_CID_LUT_VC_3_ADDR                  0x18
+#define CSID_CID_n_CFG_ADDR                     0x1C
+#define CSID_IRQ_CLEAR_CMD_ADDR                 0x5c
+#define CSID_IRQ_MASK_ADDR                      0x60
+#define CSID_IRQ_STATUS_ADDR                    0x64
+#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR    0x68
+#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR     0x6c
+#define CSID_CAPTURED_SHORT_PKT_ADDR                0x70
+#define CSID_CAPTURED_LONG_PKT_HDR_ADDR             0x74
+#define CSID_CAPTURED_LONG_PKT_FTR_ADDR             0x78
+#define CSID_PIF_MISR_DL0_ADDR                      0x7C
+#define CSID_PIF_MISR_DL1_ADDR                      0x80
+#define CSID_PIF_MISR_DL2_ADDR                      0x84
+#define CSID_PIF_MISR_DL3_ADDR                      0x88
+#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR             0x8C
+#define CSID_STATS_ECC_ADDR                         0x90
+#define CSID_STATS_CRC_ADDR                         0x94
+#define CSID_TG_CTRL_ADDR                           0x9C
+#define CSID_TG_VC_CFG_ADDR                         0xA0
+#define CSID_TG_DT_n_CFG_0_ADDR                     0xA8
+#define CSID_TG_DT_n_CFG_1_ADDR                     0xAC
+#define CSID_TG_DT_n_CFG_2_ADDR                     0xB0
+#define CSID_TG_DT_n_CFG_3_ADDR                     0xD8
+
+/* Regulator Voltage and Current */
+
+#define CAM_VAF_MINUV                 2800000
+#define CAM_VAF_MAXUV                 2800000
+#define CAM_VDIG_MINUV                    1200000
+#define CAM_VDIG_MAXUV                    1200000
+#define CAM_VANA_MINUV                    2800000
+#define CAM_VANA_MAXUV                    2850000
+#define CAM_CSI_VDD_MINUV                  1200000
+#define CAM_CSI_VDD_MAXUV                  1200000
+
+#define CAM_VAF_LOAD_UA               300000
+#define CAM_VDIG_LOAD_UA                  105000
+#define CAM_VANA_LOAD_UA                  85600
+#define CAM_CSI_LOAD_UA                    20000
+
+static struct clk *camio_cam_clk;
+static struct clk *camio_vfe_clk;
+static struct clk *camio_csi_src_clk;
+static struct clk *camio_csi1_src_clk;
+static struct clk *camio_csi0_vfe_clk;
+static struct clk *camio_csi0_clk;
+static struct clk *camio_csi0_pclk;
+static struct clk *camio_csi_pix_clk;
+static struct clk *camio_csi_rdi_clk;
+static struct clk *camio_csiphy0_timer_clk;
+static struct clk *camio_csiphy1_timer_clk;
+static struct clk *camio_vfe_axi_clk;
+static struct clk *camio_vfe_pclk;
+static struct clk *camio_csi0_phy_clk;
+static struct clk *camio_csiphy_timer_src_clk;
+
+/*static struct clk *camio_vfe_pclk;*/
+static struct clk *camio_jpeg_clk;
+static struct clk *camio_jpeg_pclk;
+static struct clk *camio_vpe_clk;
+static struct clk *camio_vpe_pclk;
+static struct regulator *fs_vfe;
+static struct regulator *fs_ijpeg;
+static struct regulator *fs_vpe;
+static struct regulator *cam_vana;
+static struct regulator *cam_vio;
+static struct regulator *cam_vdig;
+static struct regulator *cam_vaf;
+static struct regulator *mipi_csi_vdd;
+
+static struct msm_camera_io_clk camio_clk;
+static struct platform_device *camio_dev;
+static struct resource *csidio, *csiphyio;
+static struct resource *csid_mem, *csiphy_mem;
+static struct resource *csid_irq, *csiphy_irq;
+void __iomem *csidbase, *csiphybase;
+
+static struct msm_bus_vectors cam_init_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_VPE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors cam_preview_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 1521190000,
+		.ib  = 1521190000,
+	},
+	{
+		.src = MSM_BUS_MASTER_VPE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors cam_video_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 1521190000,
+		.ib  = 1521190000,
+	},
+	{
+		.src = MSM_BUS_MASTER_VPE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 1521190000,
+		.ib  = 1521190000,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors cam_snapshot_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_VFE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 1521190000,
+		.ib  = 1521190000,
+	},
+	{
+		.src = MSM_BUS_MASTER_VPE,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 1521190000,
+		.ib  = 1521190000,
+	},
+};
+
+static struct msm_bus_paths cam_bus_client_config[] = {
+	{
+		ARRAY_SIZE(cam_init_vectors),
+		cam_init_vectors,
+	},
+	{
+		ARRAY_SIZE(cam_preview_vectors),
+		cam_preview_vectors,
+	},
+	{
+		ARRAY_SIZE(cam_video_vectors),
+		cam_video_vectors,
+	},
+	{
+		ARRAY_SIZE(cam_snapshot_vectors),
+		cam_snapshot_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata cam_bus_client_pdata = {
+		cam_bus_client_config,
+		ARRAY_SIZE(cam_bus_client_config),
+		.name = "msm_camera",
+};
+
+
+void msm_io_w(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	writel_relaxed((data), (addr));
+}
+
+void msm_io_w_mb(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	wmb();
+	writel_relaxed((data), (addr));
+	wmb();
+}
+
+u32 msm_io_r(void __iomem *addr)
+{
+	uint32_t data = readl_relaxed(addr);
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+u32 msm_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+	rmb();
+	data = readl_relaxed(addr);
+	rmb();
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+void msm_io_memcpy_toio(void __iomem *dest_addr,
+	void __iomem *src_addr, u32 len)
+{
+	int i;
+	u32 *d = (u32 *) dest_addr;
+	u32 *s = (u32 *) src_addr;
+	/* memcpy_toio does not work. Use writel_relaxed for now */
+	for (i = 0; i < len; i++)
+		writel_relaxed(*s++, d++);
+}
+
+void msm_io_dump(void __iomem *addr, int size)
+{
+	char line_str[128], *p_str;
+	int i;
+	u32 *p = (u32 *) addr;
+	u32 data;
+	CDBG("%s: %p %d\n", __func__, addr, size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+			sprintf(p_str, "%08x: ", (u32) p);
+			p_str += 10;
+		}
+		data = readl_relaxed(p++);
+		sprintf(p_str, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			CDBG("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CDBG("%s\n", line_str);
+}
+
+void msm_io_memcpy(void __iomem *dest_addr, void __iomem *src_addr, u32 len)
+{
+	CDBG("%s: %p %p %d\n", __func__, dest_addr, src_addr, len);
+	msm_io_memcpy_toio(dest_addr, src_addr, len / 4);
+	msm_io_dump(dest_addr, len);
+}
+
+static int msm_camera_vreg_enable(struct platform_device *pdev)
+{
+	if (mipi_csi_vdd == NULL) {
+		mipi_csi_vdd = regulator_get(&pdev->dev, "mipi_csi_vdd");
+		if (IS_ERR(mipi_csi_vdd)) {
+			CDBG("%s: VREG MIPI CSI VDD get failed\n", __func__);
+			mipi_csi_vdd = NULL;
+			return -ENODEV;
+		}
+		if (regulator_set_voltage(mipi_csi_vdd, CAM_CSI_VDD_MINUV,
+			CAM_CSI_VDD_MAXUV)) {
+			CDBG("%s: VREG MIPI CSI VDD set voltage failed\n",
+				__func__);
+			goto mipi_csi_vdd_put;
+		}
+		if (regulator_set_optimum_mode(mipi_csi_vdd,
+			CAM_CSI_LOAD_UA) < 0) {
+			CDBG("%s: VREG MIPI CSI set optimum mode failed\n",
+				__func__);
+			goto mipi_csi_vdd_release;
+		}
+		if (regulator_enable(mipi_csi_vdd)) {
+			CDBG("%s: VREG MIPI CSI VDD enable failed\n",
+				__func__);
+			goto mipi_csi_vdd_disable;
+		}
+	}
+	if (cam_vana == NULL) {
+		cam_vana = regulator_get(&pdev->dev, "cam_vana");
+		if (IS_ERR(cam_vana)) {
+			CDBG("%s: VREG CAM VANA get failed\n", __func__);
+			cam_vana = NULL;
+			goto mipi_csi_vdd_disable;
+		}
+		if (regulator_set_voltage(cam_vana, CAM_VANA_MINUV,
+			CAM_VANA_MAXUV)) {
+			CDBG("%s: VREG CAM VANA set voltage failed\n",
+				__func__);
+			goto cam_vana_put;
+		}
+		if (regulator_set_optimum_mode(cam_vana,
+			CAM_VANA_LOAD_UA) < 0) {
+			CDBG("%s: VREG CAM VANA set optimum mode failed\n",
+				__func__);
+			goto cam_vana_release;
+		}
+		if (regulator_enable(cam_vana)) {
+			CDBG("%s: VREG CAM VANA enable failed\n", __func__);
+			goto cam_vana_disable;
+		}
+	}
+	if (cam_vio == NULL) {
+		cam_vio = regulator_get(&pdev->dev, "cam_vio");
+		if (IS_ERR(cam_vio)) {
+			CDBG("%s: VREG VIO get failed\n", __func__);
+			cam_vio = NULL;
+			goto cam_vana_disable;
+		}
+		if (regulator_enable(cam_vio)) {
+			CDBG("%s: VREG VIO enable failed\n", __func__);
+			goto cam_vio_put;
+		}
+	}
+	if (cam_vdig == NULL) {
+		cam_vdig = regulator_get(&pdev->dev, "cam_vdig");
+		if (IS_ERR(cam_vdig)) {
+			CDBG("%s: VREG CAM VDIG get failed\n", __func__);
+			cam_vdig = NULL;
+			goto cam_vio_disable;
+		}
+		if (regulator_set_voltage(cam_vdig, CAM_VDIG_MINUV,
+			CAM_VDIG_MAXUV)) {
+			CDBG("%s: VREG CAM VDIG set voltage failed\n",
+				__func__);
+			goto cam_vdig_put;
+		}
+		if (regulator_set_optimum_mode(cam_vdig,
+			CAM_VDIG_LOAD_UA) < 0) {
+			CDBG("%s: VREG CAM VDIG set optimum mode failed\n",
+				__func__);
+			goto cam_vdig_release;
+		}
+		if (regulator_enable(cam_vdig)) {
+			CDBG("%s: VREG CAM VDIG enable failed\n", __func__);
+			goto cam_vdig_disable;
+		}
+	}
+	if (cam_vaf == NULL) {
+		cam_vaf = regulator_get(&pdev->dev, "cam_vaf");
+		if (IS_ERR(cam_vaf)) {
+			CDBG("%s: VREG CAM VAF get failed\n", __func__);
+			cam_vaf = NULL;
+			goto cam_vdig_disable;
+		}
+		if (regulator_set_voltage(cam_vaf, CAM_VAF_MINUV,
+			CAM_VAF_MAXUV)) {
+			CDBG("%s: VREG CAM VAF set voltage failed\n",
+				__func__);
+			goto cam_vaf_put;
+		}
+		if (regulator_set_optimum_mode(cam_vaf,
+			CAM_VAF_LOAD_UA) < 0) {
+			CDBG("%s: VREG CAM VAF set optimum mode failed\n",
+				__func__);
+			goto cam_vaf_release;
+		}
+		if (regulator_enable(cam_vaf)) {
+			CDBG("%s: VREG CAM VAF enable failed\n", __func__);
+			goto cam_vaf_disable;
+		}
+	}
+	if (fs_vfe == NULL) {
+		fs_vfe = regulator_get(&pdev->dev, "fs_vfe");
+		if (IS_ERR(fs_vfe)) {
+			CDBG("%s: Regulator FS_VFE get failed %ld\n", __func__,
+				PTR_ERR(fs_vfe));
+			fs_vfe = NULL;
+		} else if (regulator_enable(fs_vfe)) {
+			CDBG("%s: Regulator FS_VFE enable failed\n", __func__);
+			regulator_put(fs_vfe);
+		}
+	}
+	return 0;
+
+cam_vaf_disable:
+	regulator_set_optimum_mode(cam_vaf, 0);
+cam_vaf_release:
+	regulator_set_voltage(cam_vaf, 0, CAM_VAF_MAXUV);
+	regulator_disable(cam_vaf);
+cam_vaf_put:
+	regulator_put(cam_vaf);
+	cam_vaf = NULL;
+cam_vdig_disable:
+	regulator_set_optimum_mode(cam_vdig, 0);
+cam_vdig_release:
+	regulator_set_voltage(cam_vdig, 0, CAM_VDIG_MAXUV);
+	regulator_disable(cam_vdig);
+cam_vdig_put:
+	regulator_put(cam_vdig);
+	cam_vdig = NULL;
+cam_vio_disable:
+	regulator_disable(cam_vio);
+cam_vio_put:
+	regulator_put(cam_vio);
+	cam_vio = NULL;
+cam_vana_disable:
+	regulator_set_optimum_mode(cam_vana, 0);
+cam_vana_release:
+	regulator_set_voltage(cam_vana, 0, CAM_VANA_MAXUV);
+	regulator_disable(cam_vana);
+cam_vana_put:
+	regulator_put(cam_vana);
+	cam_vana = NULL;
+mipi_csi_vdd_disable:
+	regulator_set_optimum_mode(mipi_csi_vdd, 0);
+mipi_csi_vdd_release:
+	regulator_set_voltage(mipi_csi_vdd, 0, CAM_CSI_VDD_MAXUV);
+	regulator_disable(mipi_csi_vdd);
+
+mipi_csi_vdd_put:
+	regulator_put(mipi_csi_vdd);
+	mipi_csi_vdd = NULL;
+	return -ENODEV;
+}
+
+static void msm_camera_vreg_disable(void)
+{
+	if (mipi_csi_vdd) {
+		regulator_set_voltage(mipi_csi_vdd, 0, CAM_CSI_VDD_MAXUV);
+		regulator_set_optimum_mode(mipi_csi_vdd, 0);
+		regulator_disable(mipi_csi_vdd);
+		regulator_put(mipi_csi_vdd);
+		mipi_csi_vdd = NULL;
+	}
+
+	if (cam_vana) {
+		regulator_set_voltage(cam_vana, 0, CAM_VANA_MAXUV);
+		regulator_set_optimum_mode(cam_vana, 0);
+		regulator_disable(cam_vana);
+		regulator_put(cam_vana);
+		cam_vana = NULL;
+	}
+
+	if (cam_vio) {
+		regulator_disable(cam_vio);
+		regulator_put(cam_vio);
+		cam_vio = NULL;
+	}
+
+	if (cam_vdig) {
+		regulator_set_voltage(cam_vdig, 0, CAM_VDIG_MAXUV);
+		regulator_set_optimum_mode(cam_vdig, 0);
+		regulator_disable(cam_vdig);
+		regulator_put(cam_vdig);
+		cam_vdig = NULL;
+	}
+
+	if (cam_vaf) {
+		regulator_set_voltage(cam_vaf, 0, CAM_VAF_MAXUV);
+		regulator_set_optimum_mode(cam_vaf, 0);
+		regulator_disable(cam_vaf);
+		regulator_put(cam_vaf);
+		cam_vaf = NULL;
+	}
+
+	if (fs_vfe) {
+		regulator_disable(fs_vfe);
+		regulator_put(fs_vfe);
+		fs_vfe = NULL;
+	}
+}
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+	struct msm_camera_sensor_info *sinfo = camio_dev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint8_t csid_core = camdev->csid_core;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		camio_cam_clk =
+		clk = clk_get(&camio_dev->dev, "cam_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate);
+		break;
+
+	case CAMIO_VFE_CLK:
+		camio_vfe_clk =
+		clk = clk_get(NULL, "vfe_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate);
+		break;
+
+	case CAMIO_VFE_AXI_CLK:
+		camio_vfe_axi_clk =
+		clk = clk_get(NULL, "vfe_axi_clk");
+		break;
+
+	case CAMIO_VFE_PCLK:
+		camio_vfe_pclk =
+		clk = clk_get(NULL, "vfe_pclk");
+		break;
+
+	case CAMIO_CSI0_VFE_CLK:
+		camio_csi0_vfe_clk =
+		clk = clk_get(NULL, "csi_vfe_clk");
+		break;
+/*
+	case CAMIO_CSI1_VFE_CLK:
+		camio_csi1_vfe_clk =
+		clk = clk_get(&camio_dev->dev, "csi_vfe_clk");
+		break;
+*/
+	case CAMIO_CSI_SRC_CLK:
+		camio_csi_src_clk =
+		clk = clk_get(NULL, "csi_src_clk");
+		msm_camio_clk_rate_set_2(clk, 177780000);
+		break;
+
+	case CAMIO_CSI1_SRC_CLK:
+		camio_csi1_src_clk =
+		clk = clk_get(&camio_dev->dev, "csi_src_clk");
+		msm_camio_clk_rate_set_2(clk, 177780000);
+		break;
+
+	case CAMIO_CSI0_CLK:
+		camio_csi0_clk =
+		clk = clk_get(&camio_dev->dev, "csi_clk");
+		break;
+
+	case CAMIO_CSI0_PHY_CLK:
+		camio_csi0_phy_clk =
+		clk = clk_get(&camio_dev->dev, "csi_phy_clk");
+		break;
+
+	case CAMIO_CSI_PIX_CLK:
+		camio_csi_pix_clk =
+		clk = clk_get(NULL, "csi_pix_clk");
+		/* mux to select between csid0 and csid1 */
+		msm_camio_clk_rate_set_2(clk, csid_core);
+		break;
+
+	case CAMIO_CSI_RDI_CLK:
+		camio_csi_rdi_clk =
+		clk = clk_get(NULL, "csi_rdi_clk");
+		/* mux to select between csid0 and csid1 */
+		msm_camio_clk_rate_set_2(clk, csid_core);
+		break;
+
+	case CAMIO_CSIPHY0_TIMER_CLK:
+		camio_csiphy0_timer_clk =
+		clk = clk_get(NULL, "csi0phy_timer_clk");
+		break;
+
+	case CAMIO_CSIPHY1_TIMER_CLK:
+		camio_csiphy1_timer_clk =
+		clk = clk_get(NULL, "csi1phy_timer_clk");
+		break;
+
+	case CAMIO_CSIPHY_TIMER_SRC_CLK:
+		camio_csiphy_timer_src_clk =
+		clk = clk_get(NULL, "csiphy_timer_src_clk");
+		msm_camio_clk_rate_set_2(clk, 177780000);
+		break;
+
+	case CAMIO_CSI0_PCLK:
+		camio_csi0_pclk =
+		clk = clk_get(NULL, "csi_pclk");
+		break;
+
+	case CAMIO_JPEG_CLK:
+		camio_jpeg_clk =
+		clk = clk_get(NULL, "ijpeg_clk");
+		clk_set_min_rate(clk, 144000000);
+		break;
+
+	case CAMIO_JPEG_PCLK:
+		camio_jpeg_pclk =
+		clk = clk_get(NULL, "ijpeg_pclk");
+		break;
+
+	case CAMIO_VPE_CLK:
+		camio_vpe_clk =
+		clk = clk_get(NULL, "vpe_clk");
+		msm_camio_clk_set_min_rate(clk, 150000000);
+		break;
+
+	case CAMIO_VPE_PCLK:
+		camio_vpe_pclk =
+		clk = clk_get(NULL, "vpe_pclk");
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk))
+		rc = clk_enable(clk);
+	else
+		rc = PTR_ERR(clk);
+
+	if (rc < 0)
+		pr_err("%s(%d) failed %d\n", __func__, clktype, rc);
+
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		clk = camio_cam_clk;
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+
+	case CAMIO_VFE_AXI_CLK:
+		clk = camio_vfe_axi_clk;
+		break;
+
+	case CAMIO_VFE_PCLK:
+		clk = camio_vfe_pclk;
+		break;
+
+	case CAMIO_CSI0_VFE_CLK:
+		clk = camio_csi0_vfe_clk;
+		break;
+
+	case CAMIO_CSI_SRC_CLK:
+		clk = camio_csi_src_clk;
+		break;
+
+	case CAMIO_CSI0_CLK:
+		clk = camio_csi0_clk;
+		break;
+
+	case CAMIO_CSI0_PHY_CLK:
+		clk = camio_csi0_phy_clk;
+		break;
+
+	case CAMIO_CSI_PIX_CLK:
+		clk = camio_csi_pix_clk;
+		break;
+
+	case CAMIO_CSI_RDI_CLK:
+		clk = camio_csi_rdi_clk;
+		break;
+
+	case CAMIO_CSIPHY0_TIMER_CLK:
+		clk = camio_csiphy0_timer_clk;
+		break;
+
+	case CAMIO_CSIPHY_TIMER_SRC_CLK:
+		clk = camio_csiphy_timer_src_clk;
+		break;
+
+	case CAMIO_CSI0_PCLK:
+		clk = camio_csi0_pclk;
+		break;
+
+	case CAMIO_JPEG_CLK:
+		clk = camio_jpeg_clk;
+		break;
+
+	case CAMIO_JPEG_PCLK:
+		clk = camio_jpeg_pclk;
+		break;
+
+	case CAMIO_VPE_CLK:
+		clk = camio_vpe_clk;
+		break;
+
+	case CAMIO_VPE_PCLK:
+		clk = camio_vpe_pclk;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+	} else
+		rc = PTR_ERR(clk);
+
+	if (rc < 0)
+		pr_err("%s(%d) failed %d\n", __func__, clktype, rc);
+
+	return rc;
+}
+
+void msm_camio_vfe_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_clk;
+	if (rate > clk_get_rate(clk))
+		clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_cam_clk;
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set_2(struct clk *clk, int rate)
+{
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_set_min_rate(struct clk *clk, int rate)
+{
+	clk_set_min_rate(clk, rate);
+}
+
+#if DBG_CSID
+static irqreturn_t msm_io_csi_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	irq = msm_io_r(csidbase + CSID_IRQ_STATUS_ADDR);
+	CDBG("%s CSID_IRQ_STATUS_ADDR = 0x%x\n", __func__, irq);
+	msm_io_w(irq, csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
+	irq = msm_io_r(csidbase + 0x7C);
+	CDBG("%s CSID_PIF_MISR_DL0 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csidbase + 0x80);
+	CDBG("%s CSID_PIF_MISR_DL1 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csidbase + 0x84);
+	CDBG("%s CSID_PIF_MISR_DL2 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csidbase + 0x88);
+	CDBG("%s CSID_PIF_MISR_DL3 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csidbase + 0x8C);
+	CDBG("%s PACKET Count = %d\n", __func__, irq);
+	return IRQ_HANDLED;
+}
+#endif
+/*
+void msm_io_read_interrupt(void)
+{
+	uint32_t irq;
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS0 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS0 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS1 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS2 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS3 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS4 = 0x%x\n", __func__, irq);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR);
+	msm_io_w(0x1, csiphybase + 0x164);
+	msm_io_w(0x0, csiphybase + 0x164);
+	return;
+}
+*/
+#if DBG_CSIPHY
+static irqreturn_t msm_io_csiphy_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS0 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS1 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS2 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS3 = 0x%x\n", __func__, irq);
+	irq = msm_io_r(csiphybase + MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR);
+	msm_io_w(irq, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR);
+	CDBG("%s MIPI_CSIPHY_INTERRUPT_STATUS4 = 0x%x\n", __func__, irq);
+	msm_io_w(0x1, csiphybase + 0x164);
+	msm_io_w(0x0, csiphybase + 0x164);
+	return IRQ_HANDLED;
+}
+#endif
+static int msm_camio_enable_all_clks(uint8_t csid_core)
+{
+	int rc = 0;
+
+	rc = msm_camio_clk_enable(CAMIO_CSI_SRC_CLK);
+	if (rc < 0)
+		goto csi_src_fail;
+	if (csid_core == 1) {
+		rc = msm_camio_clk_enable(CAMIO_CSI1_SRC_CLK);
+		if (rc < 0)
+			goto csi1_src_fail;
+	}
+	rc = msm_camio_clk_enable(CAMIO_CSI0_CLK);
+	if (rc < 0)
+		goto csi0_fail;
+	rc = msm_camio_clk_enable(CAMIO_CSI0_PHY_CLK);
+	if (rc < 0)
+		goto csi0_phy_fail;
+	rc = msm_camio_clk_enable(CAMIO_CSIPHY_TIMER_SRC_CLK);
+	if (rc < 0)
+		goto csiphy_timer_src_fail;
+	if (csid_core == 0) {
+		rc = msm_camio_clk_enable(CAMIO_CSIPHY0_TIMER_CLK);
+		if (rc < 0)
+			goto csiphy0_timer_fail;
+	} else if (csid_core == 1) {
+		rc = msm_camio_clk_enable(CAMIO_CSIPHY1_TIMER_CLK);
+		if (rc < 0)
+			goto csiphy1_timer_fail;
+	}
+	rc = msm_camio_clk_enable(CAMIO_CSI0_PCLK);
+	if (rc < 0)
+		goto csi0p_fail;
+
+	rc = msm_camio_clk_enable(CAMIO_VFE_CLK);
+	if (rc < 0)
+		goto vfe_fail;
+	rc = msm_camio_clk_enable(CAMIO_VFE_AXI_CLK);
+	if (rc < 0)
+		goto axi_fail;
+	rc = msm_camio_clk_enable(CAMIO_VFE_PCLK);
+	if (rc < 0)
+		goto vfep_fail;
+
+	rc = msm_camio_clk_enable(CAMIO_CSI0_VFE_CLK);
+	if (rc < 0)
+		goto csi0_vfe_fail;
+	rc = msm_camio_clk_enable(CAMIO_CSI_PIX_CLK);
+	if (rc < 0)
+		goto csi_pix_fail;
+	rc = msm_camio_clk_enable(CAMIO_CSI_RDI_CLK);
+	if (rc < 0)
+		goto csi_rdi_fail;
+	return rc;
+
+csi_rdi_fail:
+	msm_camio_clk_disable(CAMIO_CSI_PIX_CLK);
+csi_pix_fail:
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+csi0_vfe_fail:
+	msm_camio_clk_disable(CAMIO_VFE_PCLK);
+vfep_fail:
+	msm_camio_clk_disable(CAMIO_VFE_AXI_CLK);
+axi_fail:
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+vfe_fail:
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+csi0p_fail:
+	msm_camio_clk_disable(CAMIO_CSIPHY0_TIMER_CLK);
+csiphy1_timer_fail:
+	msm_camio_clk_disable(CAMIO_CSIPHY1_TIMER_CLK);
+csiphy0_timer_fail:
+	msm_camio_clk_disable(CAMIO_CSIPHY_TIMER_SRC_CLK);
+csiphy_timer_src_fail:
+	msm_camio_clk_disable(CAMIO_CSI0_PHY_CLK);
+csi0_phy_fail:
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+csi0_fail:
+	msm_camio_clk_disable(CAMIO_CSI1_SRC_CLK);
+csi1_src_fail:
+	msm_camio_clk_disable(CAMIO_CSI_SRC_CLK);
+csi_src_fail:
+	return rc;
+}
+
+static void msm_camio_disable_all_clks(uint8_t csid_core)
+{
+	msm_camio_clk_disable(CAMIO_CSI_RDI_CLK);
+	msm_camio_clk_disable(CAMIO_CSI_PIX_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_PCLK);
+	msm_camio_clk_disable(CAMIO_VFE_AXI_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	if (csid_core == 0)
+		msm_camio_clk_disable(CAMIO_CSIPHY0_TIMER_CLK);
+	else if (csid_core == 1)
+		msm_camio_clk_disable(CAMIO_CSIPHY1_TIMER_CLK);
+	msm_camio_clk_disable(CAMIO_CSIPHY_TIMER_SRC_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PHY_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+	if (csid_core == 1)
+		msm_camio_clk_disable(CAMIO_CSI1_SRC_CLK);
+	msm_camio_clk_disable(CAMIO_CSI_SRC_CLK);
+}
+
+int msm_camio_jpeg_clk_disable(void)
+{
+	int rc = 0;
+	if (fs_ijpeg) {
+		rc = regulator_disable(fs_ijpeg);
+		if (rc < 0) {
+			CDBG("%s: Regulator disable failed %d\n", __func__, rc);
+			return rc;
+		}
+		regulator_put(fs_ijpeg);
+	}
+	rc = msm_camio_clk_disable(CAMIO_JPEG_PCLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_disable(CAMIO_JPEG_CLK);
+	CDBG("%s: exit %d\n", __func__, rc);
+	return rc;
+}
+
+int msm_camio_jpeg_clk_enable(void)
+{
+	int rc = 0;
+	rc = msm_camio_clk_enable(CAMIO_JPEG_CLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_enable(CAMIO_JPEG_PCLK);
+	if (rc < 0)
+		return rc;
+	fs_ijpeg = regulator_get(NULL, "fs_ijpeg");
+	if (IS_ERR(fs_ijpeg)) {
+		CDBG("%s: Regulator FS_IJPEG get failed %ld\n", __func__,
+			PTR_ERR(fs_ijpeg));
+		fs_ijpeg = NULL;
+	} else if (regulator_enable(fs_ijpeg)) {
+		CDBG("%s: Regulator FS_IJPEG enable failed\n", __func__);
+		regulator_put(fs_ijpeg);
+	}
+	CDBG("%s: exit %d\n", __func__, rc);
+	return rc;
+}
+
+int msm_camio_vpe_clk_disable(void)
+{
+	int rc = 0;
+	if (fs_vpe) {
+		regulator_disable(fs_vpe);
+		regulator_put(fs_vpe);
+	}
+
+	rc = msm_camio_clk_disable(CAMIO_VPE_CLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_disable(CAMIO_VPE_PCLK);
+	return rc;
+}
+
+int msm_camio_vpe_clk_enable(uint32_t clk_rate)
+{
+	int rc = 0;
+	(void)clk_rate;
+	fs_vpe = regulator_get(NULL, "fs_vpe");
+	if (IS_ERR(fs_vpe)) {
+		CDBG("%s: Regulator FS_VPE get failed %ld\n", __func__,
+			PTR_ERR(fs_vpe));
+		fs_vpe = NULL;
+	} else if (regulator_enable(fs_vpe)) {
+		CDBG("%s: Regulator FS_VPE enable failed\n", __func__);
+		regulator_put(fs_vpe);
+	}
+
+	rc = msm_camio_clk_enable(CAMIO_VPE_CLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_enable(CAMIO_VPE_PCLK);
+	return rc;
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint8_t csid_core = camdev->csid_core;
+	char csid[] = "csid0";
+	char csiphy[] = "csiphy0";
+	if (csid_core < 0 || csid_core > 2)
+		return -ENODEV;
+
+	csid[4] = '0' + csid_core;
+	csiphy[6] = '0' + csid_core;
+
+	camio_dev = pdev;
+	camio_clk = camdev->ioclk;
+
+	rc = msm_camio_enable_all_clks(csid_core);
+	if (rc < 0)
+		return rc;
+
+	csid_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, csid);
+	if (!csid_mem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+	csid_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, csid);
+	if (!csid_irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+	csiphy_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, csiphy);
+	if (!csiphy_mem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+	csiphy_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, csiphy);
+	if (!csiphy_irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	csidio = request_mem_region(csid_mem->start,
+		resource_size(csid_mem), pdev->name);
+	if (!csidio) {
+		rc = -EBUSY;
+		goto common_fail;
+	}
+	csidbase = ioremap(csid_mem->start,
+		resource_size(csid_mem));
+	if (!csidbase) {
+		rc = -ENOMEM;
+		goto csi_busy;
+	}
+#if DBG_CSID
+	rc = request_irq(csid_irq->start, msm_io_csi_irq,
+		IRQF_TRIGGER_RISING, "csid", 0);
+	if (rc < 0)
+		goto csi_irq_fail;
+#endif
+	csiphyio = request_mem_region(csiphy_mem->start,
+		resource_size(csiphy_mem), pdev->name);
+	if (!csidio) {
+		rc = -EBUSY;
+		goto csi_irq_fail;
+	}
+	csiphybase = ioremap(csiphy_mem->start,
+		resource_size(csiphy_mem));
+	if (!csiphybase) {
+		rc = -ENOMEM;
+		goto csiphy_busy;
+	}
+#if DBG_CSIPHY
+	rc = request_irq(csiphy_irq->start, msm_io_csiphy_irq,
+		IRQF_TRIGGER_RISING, "csiphy", 0);
+	if (rc < 0)
+		goto csiphy_irq_fail;
+#endif
+	rc = msm_ispif_init(pdev);
+	if (rc < 0)
+		goto csiphy_irq_fail;
+	CDBG("camio enable done\n");
+	return 0;
+csiphy_irq_fail:
+	iounmap(csiphybase);
+csiphy_busy:
+	release_mem_region(csiphy_mem->start, resource_size(csiphy_mem));
+csi_irq_fail:
+	iounmap(csidbase);
+csi_busy:
+	release_mem_region(csid_mem->start, resource_size(csid_mem));
+common_fail:
+	msm_camio_disable_all_clks(csid_core);
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return rc;
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint8_t csid_core = camdev->csid_core;
+#if DBG_CSIPHY
+	free_irq(csiphy_irq->start, 0);
+#endif
+	iounmap(csiphybase);
+	release_mem_region(csiphy_mem->start, resource_size(csiphy_mem));
+
+#if DBG_CSID
+	free_irq(csid_irq, 0);
+#endif
+	iounmap(csidbase);
+	release_mem_region(csid_mem->start, resource_size(csid_mem));
+
+	msm_camio_disable_all_clks(csid_core);
+	msm_ispif_release(pdev);
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_clk = camdev->ioclk;
+
+	msm_camera_vreg_enable(pdev);
+	msleep(20);
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	return;
+}
+
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_clk = camdev->ioclk;
+
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	msm_camera_vreg_enable(pdev);
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_csid_cid_lut(struct msm_camera_csid_lut_params *csid_lut_params)
+{
+	int rc = 0, i = 0;
+	uint32_t val = 0;
+
+	for (i = 0; i < csid_lut_params->num_cid && i < 4; i++)	{
+		if (csid_lut_params->vc_cfg[i].dt < 0x12 ||
+			csid_lut_params->vc_cfg[i].dt > 0x37) {
+			CDBG("%s: unsupported data type 0x%x\n",
+				 __func__, csid_lut_params->vc_cfg[i].dt);
+			return rc;
+		}
+		val = msm_io_r(csidbase + CSID_CID_LUT_VC_0_ADDR +
+		(csid_lut_params->vc_cfg[i].cid >> 2) * 4)
+		& ~(0xFF << csid_lut_params->vc_cfg[i].cid * 8);
+		val |= csid_lut_params->vc_cfg[i].dt <<
+			csid_lut_params->vc_cfg[i].cid * 8;
+		msm_io_w(val, csidbase + CSID_CID_LUT_VC_0_ADDR +
+			(csid_lut_params->vc_cfg[i].cid >> 2) * 4);
+		val = csid_lut_params->vc_cfg[i].decode_format << 4 | 0x3;
+		msm_io_w(val, csidbase + CSID_CID_n_CFG_ADDR +
+			(csid_lut_params->vc_cfg[i].cid * 4));
+	}
+	return rc;
+}
+
+int msm_camio_csid_config(struct msm_camera_csid_params *csid_params)
+{
+	int rc = 0;
+	uint32_t val = 0;
+	val = csid_params->lane_cnt - 1;
+	val |= csid_params->lane_assign << 2;
+	val |= 0x1 << 10;
+	val |= 0x1 << 11;
+	val |= 0x1 << 12;
+	val |= 0x1 << 28;
+	msm_io_w(val, csidbase + CSID_CORE_CTRL_ADDR);
+
+	rc = msm_camio_csid_cid_lut(&csid_params->lut_params);
+	if (rc < 0)
+		return rc;
+
+	msm_io_w(0xFFFFFFFF, csidbase + CSID_IRQ_MASK_ADDR);
+	msm_io_w(0xFFFFFFFF, csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
+
+	msleep(20);
+	return rc;
+}
+
+int msm_camio_csiphy_config(struct msm_camera_csiphy_params *csiphy_params)
+{
+	int rc = 0;
+	int i = 0;
+	uint32_t val = 0;
+	if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
+		CDBG("%s: unsupported lane cnt %d\n",
+			__func__, csiphy_params->lane_cnt);
+		return rc;
+	}
+
+	val = 0x3;
+	msm_io_w((((1 << csiphy_params->lane_cnt) - 1) << 2) | val,
+			 csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+	msm_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
+	msm_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
+
+	for (i = 0; i < csiphy_params->lane_cnt; i++) {
+		msm_io_w(0x10, csiphybase + MIPI_CSIPHY_LNn_CFG1_ADDR + 0x40*i);
+		msm_io_w(0x5F, csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+		msm_io_w(csiphy_params->settle_cnt,
+			csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*i);
+		msm_io_w(0x00000052,
+			csiphybase + MIPI_CSIPHY_LNn_CFG5_ADDR + 0x40*i);
+	}
+
+	msm_io_w(0x00000000, csiphybase + MIPI_CSIPHY_LNCK_CFG1_ADDR);
+	msm_io_w(0x5F, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
+	msm_io_w(csiphy_params->settle_cnt,
+			 csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR);
+	msm_io_w(0x5, csiphybase + MIPI_CSIPHY_LNCK_CFG4_ADDR);
+	msm_io_w(0x2, csiphybase + MIPI_CSIPHY_LNCK_CFG5_ADDR);
+	msm_io_w(0x0, csiphybase + 0x128);
+
+	for (i = 0; i <= csiphy_params->lane_cnt; i++) {
+		msm_io_w(0xFFFFFFFF,
+			csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR + 0x4*i);
+		msm_io_w(0xFFFFFFFF,
+			csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR + 0x4*i);
+	}
+	return rc;
+}
+
+void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
+{
+	static uint32_t bus_perf_client;
+	int rc = 0;
+	switch (perf_setting) {
+	case S_INIT:
+		bus_perf_client =
+			msm_bus_scale_register_client(&cam_bus_client_pdata);
+		if (!bus_perf_client) {
+			CDBG("%s: Registration Failed!!!\n", __func__);
+			bus_perf_client = 0;
+			return;
+		}
+		CDBG("%s: S_INIT rc = %u\n", __func__, bus_perf_client);
+		break;
+	case S_EXIT:
+		if (bus_perf_client) {
+			CDBG("%s: S_EXIT\n", __func__);
+			msm_bus_scale_unregister_client(bus_perf_client);
+		} else
+			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_PREVIEW:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 1);
+			CDBG("%s: S_PREVIEW rc = %d\n", __func__, rc);
+		} else
+			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_VIDEO:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 2);
+			CDBG("%s: S_VIDEO rc = %d\n", __func__, rc);
+		} else
+			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_CAPTURE:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 3);
+			CDBG("%s: S_CAPTURE rc = %d\n", __func__, rc);
+		} else
+			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_DEFAULT:
+		break;
+	default:
+		pr_warning("%s: INVALID CASE\n", __func__);
+	}
+}
diff --git a/drivers/media/video/msm/msm_io_8x60.c b/drivers/media/video/msm/msm_io_8x60.c
new file mode 100644
index 0000000..845777d
--- /dev/null
+++ b/drivers/media/video/msm/msm_io_8x60.c
@@ -0,0 +1,900 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+
+
+/* MIPI	CSI	controller registers */
+#define	MIPI_PHY_CONTROL			0x00000000
+#define	MIPI_PROTOCOL_CONTROL		0x00000004
+#define	MIPI_INTERRUPT_STATUS		0x00000008
+#define	MIPI_INTERRUPT_MASK			0x0000000C
+#define	MIPI_CAMERA_CNTL			0x00000024
+#define	MIPI_CALIBRATION_CONTROL	0x00000018
+#define	MIPI_PHY_D0_CONTROL2		0x00000038
+#define	MIPI_PHY_D1_CONTROL2		0x0000003C
+#define	MIPI_PHY_D2_CONTROL2		0x00000040
+#define	MIPI_PHY_D3_CONTROL2		0x00000044
+#define	MIPI_PHY_CL_CONTROL			0x00000048
+#define	MIPI_PHY_D0_CONTROL			0x00000034
+#define	MIPI_PHY_D1_CONTROL			0x00000020
+#define	MIPI_PHY_D2_CONTROL			0x0000002C
+#define	MIPI_PHY_D3_CONTROL			0x00000030
+#define	MIPI_PROTOCOL_CONTROL_SW_RST_BMSK			0x8000000
+#define	MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK	0x200000
+#define	MIPI_PROTOCOL_CONTROL_DATA_FORMAT_BMSK			0x180000
+#define	MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK			0x40000
+#define	MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK			0x20000
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT		0x16
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT	0x15
+#define	MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT		0x14
+#define	MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT	0x7
+#define	MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT			0x13
+#define	MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT			0x1e
+#define	MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D1_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D1_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D1_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D1_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D2_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D2_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D2_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D2_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D3_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D3_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D3_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D3_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT			0x18
+#define	MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT				0x2
+#define	MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT				0x1c
+#define	MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT		0x9
+#define	MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT	0x8
+
+static struct clk *camio_cam_clk;
+static struct clk *camio_vfe_clk;
+static struct clk *camio_csi_src_clk;
+static struct clk *camio_csi0_vfe_clk;
+static struct clk *camio_csi1_vfe_clk;
+static struct clk *camio_csi0_clk;
+static struct clk *camio_csi1_clk;
+static struct clk *camio_csi0_pclk;
+static struct clk *camio_csi1_pclk;
+static struct clk *camio_vfe_pclk;
+static struct clk *camio_jpeg_clk;
+static struct clk *camio_jpeg_pclk;
+static struct clk *camio_vpe_clk;
+static struct clk *camio_vpe_pclk;
+static struct regulator *fs_vfe;
+static struct regulator *fs_ijpeg;
+static struct regulator *fs_vpe;
+static struct regulator *ldo15;
+static struct regulator *lvs0;
+static struct regulator *ldo25;
+
+static struct msm_camera_io_ext camio_ext;
+static struct msm_camera_io_clk camio_clk;
+static struct platform_device *camio_dev;
+static struct resource *csiio;
+void __iomem *csibase;
+static int vpe_clk_rate;
+struct msm_bus_scale_pdata *cam_bus_scale_table;
+
+void msm_io_w(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	writel_relaxed((data), (addr));
+}
+
+void msm_io_w_mb(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	wmb();
+	writel_relaxed((data), (addr));
+	wmb();
+}
+
+u32 msm_io_r(void __iomem *addr)
+{
+	uint32_t data = readl_relaxed(addr);
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+u32 msm_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+	rmb();
+	data = readl_relaxed(addr);
+	rmb();
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+void msm_io_memcpy_toio(void __iomem *dest_addr,
+	void __iomem *src_addr, u32 len)
+{
+	int i;
+	u32 *d = (u32 *) dest_addr;
+	u32 *s = (u32 *) src_addr;
+	/* memcpy_toio does not work. Use writel for now */
+	for (i = 0; i < len; i++)
+		writel_relaxed(*s++, d++);
+}
+
+void msm_io_dump(void __iomem *addr, int size)
+{
+	char line_str[128], *p_str;
+	int i;
+	u32 *p = (u32 *) addr;
+	u32 data;
+	CDBG("%s: %p %d\n", __func__, addr, size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+			sprintf(p_str, "%08x: ", (u32) p);
+			p_str += 10;
+		}
+		data = readl_relaxed(p++);
+		sprintf(p_str, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			CDBG("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CDBG("%s\n", line_str);
+}
+
+void msm_io_memcpy(void __iomem *dest_addr, void __iomem *src_addr, u32 len)
+{
+	CDBG("%s: %p %p %d\n", __func__, dest_addr, src_addr, len);
+	msm_io_memcpy_toio(dest_addr, src_addr, len / 4);
+	msm_io_dump(dest_addr, len);
+}
+
+static void msm_camera_vreg_enable(void)
+{
+	ldo15 = regulator_get(NULL, "8058_l15");
+	if (IS_ERR(ldo15)) {
+		pr_err("%s: VREG LDO15 get failed\n", __func__);
+		ldo15 = NULL;
+		return;
+	}
+	if (regulator_set_voltage(ldo15, 2850000, 2850000)) {
+		pr_err("%s: VREG LDO15 set voltage failed\n",  __func__);
+		goto ldo15_disable;
+	}
+	if (regulator_enable(ldo15)) {
+		pr_err("%s: VREG LDO15 enable failed\n", __func__);
+		goto ldo15_put;
+	}
+
+	lvs0 = regulator_get(NULL, "8058_lvs0");
+	if (IS_ERR(lvs0)) {
+		pr_err("%s: VREG LVS0 get failed\n", __func__);
+		lvs0 = NULL;
+		goto ldo15_disable;
+	}
+	if (regulator_enable(lvs0)) {
+		pr_err("%s: VREG LVS0 enable failed\n", __func__);
+		goto lvs0_put;
+	}
+
+	ldo25 = regulator_get(NULL, "8058_l25");
+	if (IS_ERR(ldo25)) {
+		pr_err("%s: VREG LDO25 get failed\n", __func__);
+		ldo25 = NULL;
+		goto lvs0_disable;
+	}
+	if (regulator_set_voltage(ldo25, 1200000, 1200000)) {
+		pr_err("%s: VREG LDO25 set voltage failed\n",  __func__);
+		goto ldo25_disable;
+	}
+	if (regulator_enable(ldo25)) {
+		pr_err("%s: VREG LDO25 enable failed\n", __func__);
+		goto ldo25_put;
+	}
+
+	fs_vfe = regulator_get(NULL, "fs_vfe");
+	if (IS_ERR(fs_vfe)) {
+		CDBG("%s: Regulator FS_VFE get failed %ld\n", __func__,
+			PTR_ERR(fs_vfe));
+		fs_vfe = NULL;
+	} else if (regulator_enable(fs_vfe)) {
+		CDBG("%s: Regulator FS_VFE enable failed\n", __func__);
+		regulator_put(fs_vfe);
+	}
+	return;
+
+ldo25_disable:
+	regulator_disable(ldo25);
+ldo25_put:
+	regulator_put(ldo25);
+lvs0_disable:
+	regulator_disable(lvs0);
+lvs0_put:
+	regulator_put(lvs0);
+ldo15_disable:
+	regulator_disable(ldo15);
+ldo15_put:
+	regulator_put(ldo15);
+}
+
+static void msm_camera_vreg_disable(void)
+{
+	if (ldo15) {
+		regulator_disable(ldo15);
+		regulator_put(ldo15);
+	}
+
+	if (lvs0) {
+		regulator_disable(lvs0);
+		regulator_put(lvs0);
+	}
+
+	if (ldo25) {
+		regulator_disable(ldo25);
+		regulator_put(ldo25);
+	}
+
+	if (fs_vfe) {
+		regulator_disable(fs_vfe);
+		regulator_put(fs_vfe);
+	}
+}
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		camio_cam_clk =
+		clk = clk_get(NULL, "cam_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate);
+		break;
+
+	case CAMIO_VFE_CLK:
+		camio_vfe_clk =
+		clk = clk_get(NULL, "vfe_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate);
+		break;
+
+	case CAMIO_CSI0_VFE_CLK:
+		camio_csi0_vfe_clk =
+		clk = clk_get(NULL, "csi_vfe_clk");
+		break;
+
+	case CAMIO_CSI1_VFE_CLK:
+		camio_csi1_vfe_clk =
+		clk = clk_get(&camio_dev->dev, "csi_vfe_clk");
+		break;
+
+	case CAMIO_CSI_SRC_CLK:
+		camio_csi_src_clk =
+		clk = clk_get(NULL, "csi_src_clk");
+		msm_camio_clk_rate_set_2(clk, 384000000);
+		break;
+
+	case CAMIO_CSI0_CLK:
+		camio_csi0_clk =
+		clk = clk_get(NULL, "csi_clk");
+		break;
+
+	case CAMIO_CSI1_CLK:
+		camio_csi1_clk =
+		clk = clk_get(&camio_dev->dev, "csi_clk");
+		break;
+
+	case CAMIO_VFE_PCLK:
+		camio_vfe_pclk =
+		clk = clk_get(NULL, "vfe_pclk");
+		break;
+
+	case CAMIO_CSI0_PCLK:
+		camio_csi0_pclk =
+		clk = clk_get(NULL, "csi_pclk");
+		break;
+
+	case CAMIO_CSI1_PCLK:
+		camio_csi1_pclk =
+		clk = clk_get(&camio_dev->dev, "csi_pclk");
+		break;
+
+	case CAMIO_JPEG_CLK:
+		camio_jpeg_clk =
+		clk = clk_get(NULL, "ijpeg_clk");
+		msm_camio_clk_rate_set_2(clk, 228571000);
+		break;
+
+	case CAMIO_JPEG_PCLK:
+		camio_jpeg_pclk =
+		clk = clk_get(NULL, "ijpeg_pclk");
+		break;
+
+	case CAMIO_VPE_CLK:
+		camio_vpe_clk =
+		clk = clk_get(NULL, "vpe_clk");
+		msm_camio_clk_set_min_rate(camio_vpe_clk, vpe_clk_rate);
+		break;
+
+	case CAMIO_VPE_PCLK:
+		camio_vpe_pclk =
+		clk = clk_get(NULL, "vpe_pclk");
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk))
+		clk_enable(clk);
+	else
+		rc = -1;
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_CAM_MCLK_CLK:
+		clk = camio_cam_clk;
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+
+	case CAMIO_CSI_SRC_CLK:
+		clk = camio_csi_src_clk;
+		break;
+
+	case CAMIO_CSI0_VFE_CLK:
+		clk = camio_csi0_vfe_clk;
+		break;
+
+	case CAMIO_CSI1_VFE_CLK:
+		clk = camio_csi1_vfe_clk;
+		break;
+
+	case CAMIO_CSI0_CLK:
+		clk = camio_csi0_clk;
+		break;
+
+	case CAMIO_CSI1_CLK:
+		clk = camio_csi1_clk;
+		break;
+
+	case CAMIO_VFE_PCLK:
+		clk = camio_vfe_pclk;
+		break;
+
+	case CAMIO_CSI0_PCLK:
+		clk = camio_csi0_pclk;
+		break;
+
+	case CAMIO_CSI1_PCLK:
+		clk = camio_csi1_pclk;
+		break;
+
+	case CAMIO_JPEG_CLK:
+		clk = camio_jpeg_clk;
+		break;
+
+	case CAMIO_JPEG_PCLK:
+		clk = camio_jpeg_pclk;
+		break;
+
+	case CAMIO_VPE_CLK:
+		clk = camio_vpe_clk;
+		break;
+
+	case CAMIO_VPE_PCLK:
+		clk = camio_vpe_pclk;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+	} else
+		rc = -1;
+	return rc;
+}
+
+void msm_camio_vfe_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_clk;
+	if (rate > clk_get_rate(clk))
+		clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_cam_clk;
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set_2(struct clk *clk, int rate)
+{
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_set_min_rate(struct clk *clk, int rate)
+{
+	clk_set_min_rate(clk, rate);
+}
+
+static irqreturn_t msm_io_csi_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	irq = msm_io_r(csibase + MIPI_INTERRUPT_STATUS);
+	CDBG("%s MIPI_INTERRUPT_STATUS = 0x%x\n", __func__, irq);
+	msm_io_w(irq, csibase + MIPI_INTERRUPT_STATUS);
+	return IRQ_HANDLED;
+}
+
+int msm_camio_jpeg_clk_disable(void)
+{
+	int rc = 0;
+	if (fs_ijpeg) {
+		rc = regulator_disable(fs_ijpeg);
+		if (rc < 0) {
+			CDBG("%s: Regulator disable failed %d\n", __func__, rc);
+			return rc;
+		}
+		regulator_put(fs_ijpeg);
+	}
+	rc = msm_camio_clk_disable(CAMIO_JPEG_PCLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_disable(CAMIO_JPEG_CLK);
+	CDBG("%s: exit %d\n", __func__, rc);
+	return rc;
+}
+
+int msm_camio_jpeg_clk_enable(void)
+{
+	int rc = 0;
+	rc = msm_camio_clk_enable(CAMIO_JPEG_CLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_enable(CAMIO_JPEG_PCLK);
+	if (rc < 0)
+		return rc;
+	fs_ijpeg = regulator_get(NULL, "fs_ijpeg");
+	if (IS_ERR(fs_ijpeg)) {
+		CDBG("%s: Regulator FS_IJPEG get failed %ld\n", __func__,
+			PTR_ERR(fs_ijpeg));
+		fs_ijpeg = NULL;
+	} else if (regulator_enable(fs_ijpeg)) {
+		CDBG("%s: Regulator FS_IJPEG enable failed\n", __func__);
+		regulator_put(fs_ijpeg);
+	}
+	CDBG("%s: exit %d\n", __func__, rc);
+	return rc;
+}
+
+int msm_camio_vpe_clk_disable(void)
+{
+	int rc = 0;
+	if (fs_vpe) {
+		regulator_disable(fs_vpe);
+		regulator_put(fs_vpe);
+	}
+
+	rc = msm_camio_clk_disable(CAMIO_VPE_CLK);
+	if (rc < 0)
+		return rc;
+	rc = msm_camio_clk_disable(CAMIO_VPE_PCLK);
+	return rc;
+}
+
+int msm_camio_vpe_clk_enable(uint32_t clk_rate)
+{
+	int rc = 0;
+	fs_vpe = regulator_get(NULL, "fs_vpe");
+	if (IS_ERR(fs_vpe)) {
+		CDBG("%s: Regulator FS_VPE get failed %ld\n", __func__,
+			PTR_ERR(fs_vpe));
+		fs_vpe = NULL;
+	} else if (regulator_enable(fs_vpe)) {
+		CDBG("%s: Regulator FS_VPE enable failed\n", __func__);
+		regulator_put(fs_vpe);
+	}
+
+	vpe_clk_rate = clk_rate;
+	rc = msm_camio_clk_enable(CAMIO_VPE_CLK);
+	if (rc < 0)
+		return rc;
+
+	rc = msm_camio_clk_enable(CAMIO_VPE_PCLK);
+	return rc;
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint32_t val;
+
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+	cam_bus_scale_table = camdev->cam_bus_scale_table;
+
+	msm_camio_clk_enable(CAMIO_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_enable(CAMIO_CSI_SRC_CLK);
+	msm_camio_clk_enable(CAMIO_CSI0_CLK);
+	msm_camio_clk_enable(CAMIO_CSI1_CLK);
+	msm_camio_clk_enable(CAMIO_VFE_PCLK);
+	msm_camio_clk_enable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_enable(CAMIO_CSI1_PCLK);
+
+	csiio = request_mem_region(camio_ext.csiphy,
+		camio_ext.csisz, pdev->name);
+	if (!csiio) {
+		rc = -EBUSY;
+		goto common_fail;
+	}
+	csibase = ioremap(camio_ext.csiphy,
+		camio_ext.csisz);
+	if (!csibase) {
+		rc = -ENOMEM;
+		goto csi_busy;
+	}
+	rc = request_irq(camio_ext.csiirq, msm_io_csi_irq,
+		IRQF_TRIGGER_RISING, "csi", 0);
+	if (rc < 0)
+		goto csi_irq_fail;
+
+	msleep(10);
+	val = (20 <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+	return 0;
+
+csi_irq_fail:
+	iounmap(csibase);
+csi_busy:
+	release_mem_region(camio_ext.csiphy, camio_ext.csisz);
+common_fail:
+	msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI1_PCLK);
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return rc;
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	uint32_t val;
+	val = (0x0 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) |
+		(0x0 <<
+		MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT) |
+		(0x0 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) |
+		(0x0 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT);
+	CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_CALIBRATION_CONTROL);
+
+	val = (20 <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+	msleep(10);
+
+	val = msm_io_r(csibase + MIPI_PHY_D1_CONTROL);
+	val &= ~((0x1 << MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT) |
+	(0x1 << MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT));
+	CDBG("%s MIPI_PHY_D1_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL);
+	usleep_range(5000, 6000);
+	free_irq(camio_ext.csiirq, 0);
+	iounmap(csibase);
+	release_mem_region(camio_ext.csiphy, camio_ext.csisz);
+	CDBG("disable clocks\n");
+
+	msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI0_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CSI1_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI1_PCLK);
+	msm_camio_clk_disable(CAMIO_CSI_SRC_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+
+	msm_camera_vreg_enable();
+	msleep(10);
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	return;
+}
+
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_dev = pdev;
+	camio_ext = camdev->ioext;
+	camio_clk = camdev->ioclk;
+
+	rc = camdev->camera_gpio_on();
+	if (rc < 0)
+		return rc;
+	msm_camera_vreg_enable();
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_csi_config(struct msm_camera_csi_params *csi_params)
+{
+	int rc = 0;
+	uint32_t val = 0;
+
+	CDBG("msm_camio_csi_config \n");
+
+	/* SOT_ECC_EN enable error correction for SYNC (data-lane) */
+	msm_io_w(0x4, csibase + MIPI_PHY_CONTROL);
+
+	/* SW_RST to the CSI core */
+	msm_io_w(MIPI_PROTOCOL_CONTROL_SW_RST_BMSK,
+		csibase + MIPI_PROTOCOL_CONTROL);
+
+	/* PROTOCOL CONTROL */
+	val = MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK |
+		MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK |
+		MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK;
+	val |= (uint32_t)(csi_params->data_format) <<
+		MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT;
+	val |= csi_params->dpcm_scheme <<
+		MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT;
+	CDBG("%s MIPI_PROTOCOL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PROTOCOL_CONTROL);
+
+	/* SW CAL EN */
+	val = (0x1 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) |
+		(0x1 <<
+		MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT);
+	CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_CALIBRATION_CONTROL);
+
+	/* settle_cnt is very sensitive to speed!
+	increase this value to run at higher speeds */
+	val = (csi_params->settle_cnt <<
+		MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+
+	val = 0 << MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT;
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL);
+
+	val = (0x1 << MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT) |
+		(0x1 << MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT);
+	CDBG("%s MIPI_PHY_D1_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL);
+
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D2_CONTROL);
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D3_CONTROL);
+
+	/* halcyon only supports 1 or 2 lane */
+	switch (csi_params->lane_cnt) {
+	case 1:
+		msm_io_w(csi_params->lane_assign << 8 | 0x4,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 2:
+		msm_io_w(csi_params->lane_assign << 8 | 0x5,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 3:
+		msm_io_w(csi_params->lane_assign << 8 | 0x6,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 4:
+		msm_io_w(csi_params->lane_assign << 8 | 0x7,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	}
+
+	/* mask out ID_ERROR[19], DATA_CMM_ERR[11]
+	and CLK_CMM_ERR[10] - de-featured */
+	msm_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_MASK);
+	/*clear IRQ bits*/
+	msm_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_STATUS);
+
+	return rc;
+}
+
+void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
+{
+	static uint32_t bus_perf_client;
+	int rc = 0;
+	switch (perf_setting) {
+	case S_INIT:
+		bus_perf_client =
+			msm_bus_scale_register_client(cam_bus_scale_table);
+		if (!bus_perf_client) {
+			pr_err("%s: Registration Failed!!!\n", __func__);
+			bus_perf_client = 0;
+			return;
+		}
+		CDBG("%s: S_INIT rc = %u\n", __func__, bus_perf_client);
+		break;
+	case S_EXIT:
+		if (bus_perf_client) {
+			CDBG("%s: S_EXIT\n", __func__);
+			msm_bus_scale_unregister_client(bus_perf_client);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_PREVIEW:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 1);
+			CDBG("%s: S_PREVIEW rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_VIDEO:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 2);
+			CDBG("%s: S_VIDEO rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_CAPTURE:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 3);
+			CDBG("%s: S_CAPTURE rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+
+	case S_ZSL:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 4);
+			CDBG("%s: S_ZSL rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_STEREO_VIDEO:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 5);
+			CDBG("%s: S_STEREO_VIDEO rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_STEREO_CAPTURE:
+		if (bus_perf_client) {
+			rc = msm_bus_scale_client_update_request(
+				bus_perf_client, 6);
+			CDBG("%s: S_STEREO_VIDEO rc = %d\n", __func__, rc);
+		} else
+			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+		break;
+	case S_DEFAULT:
+		break;
+	default:
+		pr_warning("%s: INVALID CASE\n", __func__);
+	}
+}
diff --git a/drivers/media/video/msm/msm_io_vfe31.c b/drivers/media/video/msm/msm_io_vfe31.c
new file mode 100644
index 0000000..6279b36
--- /dev/null
+++ b/drivers/media/video/msm/msm_io_vfe31.c
@@ -0,0 +1,924 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pm_qos_params.h>
+#include <linux/regulator/consumer.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <mach/clk.h>
+
+#define CAMIF_CFG_RMSK             0x1fffff
+#define CAM_SEL_BMSK               0x2
+#define CAM_PCLK_SRC_SEL_BMSK      0x60000
+#define CAM_PCLK_INVERT_BMSK       0x80000
+#define CAM_PAD_REG_SW_RESET_BMSK  0x100000
+
+#define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000
+#define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000
+#define MDDI_CLK_CHICKEN_BIT_BMSK  0x80
+
+#define CAM_SEL_SHFT               0x1
+#define CAM_PCLK_SRC_SEL_SHFT      0x11
+#define CAM_PCLK_INVERT_SHFT       0x13
+#define CAM_PAD_REG_SW_RESET_SHFT  0x14
+
+#define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10
+#define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF
+#define MDDI_CLK_CHICKEN_BIT_SHFT  0x7
+
+/* MIPI	CSI	controller registers */
+#define	MIPI_PHY_CONTROL			0x00000000
+#define	MIPI_PROTOCOL_CONTROL		0x00000004
+#define	MIPI_INTERRUPT_STATUS		0x00000008
+#define	MIPI_INTERRUPT_MASK			0x0000000C
+#define	MIPI_CAMERA_CNTL			0x00000024
+#define	MIPI_CALIBRATION_CONTROL	0x00000018
+#define	MIPI_PHY_D0_CONTROL2		0x00000038
+#define	MIPI_PHY_D1_CONTROL2		0x0000003C
+#define	MIPI_PHY_D2_CONTROL2		0x00000040
+#define	MIPI_PHY_D3_CONTROL2		0x00000044
+#define	MIPI_PHY_CL_CONTROL			0x00000048
+#define	MIPI_PHY_D0_CONTROL			0x00000034
+#define	MIPI_PHY_D1_CONTROL			0x00000020
+#define	MIPI_PHY_D2_CONTROL			0x0000002C
+#define	MIPI_PHY_D3_CONTROL			0x00000030
+#define	MIPI_PROTOCOL_CONTROL_SW_RST_BMSK			0x8000000
+#define	MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK	0x200000
+#define	MIPI_PROTOCOL_CONTROL_DATA_FORMAT_BMSK			0x180000
+#define	MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK			0x40000
+#define	MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK			0x20000
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT		0x16
+#define	MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT	0x15
+#define	MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT		0x14
+#define	MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT	0x7
+#define	MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT			0x13
+#define	MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT			0x1e
+#define	MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D1_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D1_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D1_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D1_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D2_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D2_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D2_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D2_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_D3_CONTROL2_SETTLE_COUNT_SHFT			0x18
+#define	MIPI_PHY_D3_CONTROL2_HS_TERM_IMP_SHFT			0x10
+#define	MIPI_PHY_D3_CONTROL2_LP_REC_EN_SHFT				0x4
+#define	MIPI_PHY_D3_CONTROL2_ERR_SOT_HS_EN_SHFT			0x3
+#define	MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT			0x18
+#define	MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT				0x2
+#define	MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT				0x1c
+#define	MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT		0x9
+#define	MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT	0x8
+
+#define	CAMIO_VFE_CLK_SNAP			122880000
+#define	CAMIO_VFE_CLK_PREV			122880000
+
+#ifdef CONFIG_MSM_NPA_SYSTEM_BUS
+/* NPA Flow IDs */
+#define MSM_AXI_QOS_PREVIEW     MSM_AXI_FLOW_CAMERA_PREVIEW_HIGH
+#define MSM_AXI_QOS_SNAPSHOT    MSM_AXI_FLOW_CAMERA_SNAPSHOT_12MP
+#define MSM_AXI_QOS_RECORDING   MSM_AXI_FLOW_CAMERA_RECORDING_720P
+#else
+/* AXI rates in KHz */
+#define MSM_AXI_QOS_PREVIEW     192000
+#define MSM_AXI_QOS_SNAPSHOT    192000
+#define MSM_AXI_QOS_RECORDING   192000
+#endif
+
+static struct clk *camio_vfe_mdc_clk;
+static struct clk *camio_mdc_clk;
+static struct clk *camio_vfe_clk;
+static struct clk *camio_vfe_camif_clk;
+static struct clk *camio_vfe_pbdg_clk;
+static struct clk *camio_cam_m_clk;
+static struct clk *camio_camif_pad_pbdg_clk;
+static struct clk *camio_csi_clk;
+static struct clk *camio_csi_pclk;
+static struct clk *camio_csi_vfe_clk;
+static struct clk *camio_jpeg_clk;
+static struct clk *camio_jpeg_pclk;
+static struct clk *camio_vpe_clk;
+static struct vreg *vreg_gp2;
+static struct vreg *vreg_lvsw1;
+static struct vreg *vreg_gp6;
+static struct vreg *vreg_gp16;
+static struct regulator *fs_vfe;
+static struct regulator *fs_vpe;
+static struct msm_camera_io_ext camio_ext;
+static struct msm_camera_io_clk camio_clk;
+static struct resource *camifpadio, *csiio;
+void __iomem *camifpadbase, *csibase;
+static uint32_t vpe_clk_rate;
+
+void msm_io_w(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	writel_relaxed((data), (addr));
+}
+
+void msm_io_w_mb(u32 data, void __iomem *addr)
+{
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	wmb();
+	writel_relaxed((data), (addr));
+	wmb();
+}
+
+u32 msm_io_r(void __iomem *addr)
+{
+	uint32_t data = readl_relaxed(addr);
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+u32 msm_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+	rmb();
+	data = readl_relaxed(addr);
+	rmb();
+	CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data));
+	return data;
+}
+
+void msm_io_memcpy_toio(void __iomem *dest_addr,
+	void __iomem *src_addr, u32 len)
+{
+	int i;
+	u32 *d = (u32 *) dest_addr;
+	u32 *s = (u32 *) src_addr;
+	/* memcpy_toio does not work. Use writel for now */
+	for (i = 0; i < len; i++)
+		writel_relaxed(*s++, d++);
+}
+
+void msm_io_dump(void __iomem *addr, int size)
+{
+	char line_str[128], *p_str;
+	int i;
+	u32 *p = (u32 *) addr;
+	u32 data;
+	CDBG("%s: %p %d\n", __func__, addr, size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+			sprintf(p_str, "%08x: ", (u32) p);
+			p_str += 10;
+		}
+		data = readl_relaxed(p++);
+		sprintf(p_str, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			CDBG("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CDBG("%s\n", line_str);
+}
+
+void msm_io_memcpy(void __iomem *dest_addr, void __iomem *src_addr, u32 len)
+{
+	CDBG("%s: %p %p %d\n", __func__, dest_addr, src_addr, len);
+	msm_io_memcpy_toio(dest_addr, src_addr, len / 4);
+	msm_io_dump(dest_addr, len);
+}
+
+static void msm_camera_vreg_enable(struct platform_device *pdev)
+{
+	vreg_gp2 = vreg_get(NULL, "gp2");
+	if (IS_ERR(vreg_gp2)) {
+		pr_err("%s: VREG GP2 get failed %ld\n", __func__,
+			PTR_ERR(vreg_gp2));
+		vreg_gp2 = NULL;
+		return;
+	}
+
+	if (vreg_set_level(vreg_gp2, 2600)) {
+		pr_err("%s: VREG GP2 set failed\n", __func__);
+		goto gp2_put;
+	}
+
+	if (vreg_enable(vreg_gp2)) {
+		pr_err("%s: VREG GP2 enable failed\n", __func__);
+		goto gp2_put;
+	}
+
+	vreg_lvsw1 = vreg_get(NULL, "lvsw1");
+	if (IS_ERR(vreg_lvsw1)) {
+		pr_err("%s: VREG LVSW1 get failed %ld\n", __func__,
+			PTR_ERR(vreg_lvsw1));
+		vreg_lvsw1 = NULL;
+		goto gp2_disable;
+		}
+	if (vreg_set_level(vreg_lvsw1, 1800)) {
+		pr_err("%s: VREG LVSW1 set failed\n", __func__);
+		goto lvsw1_put;
+	}
+	if (vreg_enable(vreg_lvsw1)) {
+		pr_err("%s: VREG LVSW1 enable failed\n", __func__);
+		goto lvsw1_put;
+	}
+
+	if (!strcmp(pdev->name, "msm_camera_sn12m0pz")) {
+		vreg_gp6 = vreg_get(NULL, "gp6");
+		if (IS_ERR(vreg_gp6)) {
+			pr_err("%s: VREG GP6 get failed %ld\n", __func__,
+				PTR_ERR(vreg_gp6));
+			vreg_gp6 = NULL;
+			goto lvsw1_disable;
+		}
+
+		if (vreg_set_level(vreg_gp6, 3050)) {
+			pr_err("%s: VREG GP6 set failed\n", __func__);
+			goto gp6_put;
+		}
+
+		if (vreg_enable(vreg_gp6)) {
+			pr_err("%s: VREG GP6 enable failed\n", __func__);
+			goto gp6_put;
+		}
+		vreg_gp16 = vreg_get(NULL, "gp16");
+		if (IS_ERR(vreg_gp16)) {
+			pr_err("%s: VREG GP16 get failed %ld\n", __func__,
+				PTR_ERR(vreg_gp16));
+			vreg_gp16 = NULL;
+			goto gp6_disable;
+		}
+
+		if (vreg_set_level(vreg_gp16, 1200)) {
+			pr_err("%s: VREG GP16 set failed\n", __func__);
+			goto gp16_put;
+		}
+
+		if (vreg_enable(vreg_gp16)) {
+			pr_err("%s: VREG GP16 enable failed\n", __func__);
+			goto gp16_put;
+		}
+	}
+
+	fs_vfe = regulator_get(NULL, "fs_vfe");
+	if (IS_ERR(fs_vfe)) {
+		pr_err("%s: Regulator FS_VFE get failed %ld\n", __func__,
+			PTR_ERR(fs_vfe));
+		fs_vfe = NULL;
+	} else if (regulator_enable(fs_vfe)) {
+		pr_err("%s: Regulator FS_VFE enable failed\n", __func__);
+		regulator_put(fs_vfe);
+	}
+
+	return;
+
+gp16_put:
+	vreg_put(vreg_gp16);
+	vreg_gp16 = NULL;
+gp6_disable:
+	 vreg_disable(vreg_gp6);
+gp6_put:
+	vreg_put(vreg_gp6);
+	vreg_gp6 = NULL;
+lvsw1_disable:
+	vreg_disable(vreg_lvsw1);
+lvsw1_put:
+	vreg_put(vreg_lvsw1);
+	vreg_lvsw1 = NULL;
+gp2_disable:
+	vreg_disable(vreg_gp2);
+gp2_put:
+	vreg_put(vreg_gp2);
+	vreg_gp2 = NULL;
+}
+
+static void msm_camera_vreg_disable(void)
+{
+	if (vreg_gp2) {
+		vreg_disable(vreg_gp2);
+		vreg_put(vreg_gp2);
+		vreg_gp2 = NULL;
+	}
+	if (vreg_lvsw1) {
+		vreg_disable(vreg_lvsw1);
+		vreg_put(vreg_lvsw1);
+		vreg_lvsw1 = NULL;
+	}
+	if (vreg_gp6) {
+		vreg_disable(vreg_gp6);
+		vreg_put(vreg_gp6);
+		vreg_gp6 = NULL;
+	}
+	if (vreg_gp16) {
+		vreg_disable(vreg_gp16);
+		vreg_put(vreg_gp16);
+		vreg_gp16 = NULL;
+	}
+	if (fs_vfe) {
+		regulator_disable(fs_vfe);
+		regulator_put(fs_vfe);
+	}
+}
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		camio_vfe_mdc_clk =
+		clk = clk_get(NULL, "vfe_mdc_clk");
+		break;
+
+	case CAMIO_MDC_CLK:
+		camio_mdc_clk =
+		clk = clk_get(NULL, "mdc_clk");
+		break;
+
+	case CAMIO_VFE_CLK:
+		camio_vfe_clk =
+		clk = clk_get(NULL, "vfe_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate);
+		break;
+
+	case CAMIO_VFE_CAMIF_CLK:
+		camio_vfe_camif_clk =
+		clk = clk_get(NULL, "vfe_camif_clk");
+		break;
+
+	case CAMIO_VFE_PBDG_CLK:
+		camio_vfe_pbdg_clk =
+		clk = clk_get(NULL, "vfe_pclk");
+		break;
+
+	case CAMIO_CAM_MCLK_CLK:
+		camio_cam_m_clk =
+		clk = clk_get(NULL, "cam_m_clk");
+		msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate);
+		break;
+
+	case CAMIO_CAMIF_PAD_PBDG_CLK:
+		camio_camif_pad_pbdg_clk =
+		clk = clk_get(NULL, "camif_pad_pclk");
+		break;
+
+	case CAMIO_CSI0_CLK:
+		camio_csi_clk =
+		clk = clk_get(NULL, "csi_clk");
+		msm_camio_clk_rate_set_2(clk, 153600000);
+		break;
+	case CAMIO_CSI0_VFE_CLK:
+		camio_csi_vfe_clk =
+		clk = clk_get(NULL, "csi_vfe_clk");
+		break;
+	case CAMIO_CSI0_PCLK:
+		camio_csi_pclk =
+		clk = clk_get(NULL, "csi_pclk");
+		break;
+
+	case CAMIO_JPEG_CLK:
+		camio_jpeg_clk =
+		clk = clk_get(NULL, "jpeg_clk");
+		clk_set_min_rate(clk, 144000000);
+		break;
+	case CAMIO_JPEG_PCLK:
+		camio_jpeg_pclk =
+		clk = clk_get(NULL, "jpeg_pclk");
+		break;
+	case CAMIO_VPE_CLK:
+		camio_vpe_clk =
+		clk = clk_get(NULL, "vpe_clk");
+		msm_camio_clk_set_min_rate(clk, vpe_clk_rate);
+		break;
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk))
+		clk_enable(clk);
+	else
+		rc = -1;
+	return rc;
+}
+
+int msm_camio_clk_disable(enum msm_camio_clk_type clktype)
+{
+	int rc = 0;
+	struct clk *clk = NULL;
+
+	switch (clktype) {
+	case CAMIO_VFE_MDC_CLK:
+		clk = camio_vfe_mdc_clk;
+		break;
+
+	case CAMIO_MDC_CLK:
+		clk = camio_mdc_clk;
+		break;
+
+	case CAMIO_VFE_CLK:
+		clk = camio_vfe_clk;
+		break;
+
+	case CAMIO_VFE_CAMIF_CLK:
+		clk = camio_vfe_camif_clk;
+		break;
+
+	case CAMIO_VFE_PBDG_CLK:
+		clk = camio_vfe_pbdg_clk;
+		break;
+
+	case CAMIO_CAM_MCLK_CLK:
+		clk = camio_cam_m_clk;
+		break;
+
+	case CAMIO_CAMIF_PAD_PBDG_CLK:
+		clk = camio_camif_pad_pbdg_clk;
+		break;
+	case CAMIO_CSI0_CLK:
+		clk = camio_csi_clk;
+		break;
+	case CAMIO_CSI0_VFE_CLK:
+		clk = camio_csi_vfe_clk;
+		break;
+	case CAMIO_CSI0_PCLK:
+		clk = camio_csi_pclk;
+		break;
+	case CAMIO_JPEG_CLK:
+		clk = camio_jpeg_clk;
+		break;
+	case CAMIO_JPEG_PCLK:
+		clk = camio_jpeg_pclk;
+		break;
+	case CAMIO_VPE_CLK:
+		clk = camio_vpe_clk;
+		break;
+	default:
+		break;
+	}
+
+	if (!IS_ERR(clk)) {
+		clk_disable(clk);
+		clk_put(clk);
+	} else
+		rc = -1;
+
+	return rc;
+}
+
+void msm_camio_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_cam_m_clk;
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_vfe_clk_rate_set(int rate)
+{
+	struct clk *clk = camio_vfe_clk;
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_rate_set_2(struct clk *clk, int rate)
+{
+	clk_set_rate(clk, rate);
+}
+
+void msm_camio_clk_set_min_rate(struct clk *clk, int rate)
+{
+	clk_set_min_rate(clk, rate);
+}
+
+static irqreturn_t msm_io_csi_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	irq = msm_io_r(csibase + MIPI_INTERRUPT_STATUS);
+	CDBG("%s MIPI_INTERRUPT_STATUS = 0x%x\n", __func__, irq);
+	msm_io_w(irq, csibase + MIPI_INTERRUPT_STATUS);
+	return IRQ_HANDLED;
+}
+
+int msm_camio_jpeg_clk_disable(void)
+{
+	msm_camio_clk_disable(CAMIO_JPEG_CLK);
+	msm_camio_clk_disable(CAMIO_JPEG_PCLK);
+	/* Need to add the code for remove PM QOS requirement */
+	return 0;
+}
+
+
+int msm_camio_jpeg_clk_enable(void)
+{
+	msm_camio_clk_enable(CAMIO_JPEG_CLK);
+	msm_camio_clk_enable(CAMIO_JPEG_PCLK);
+	return 0;
+}
+
+int msm_camio_vpe_clk_disable(void)
+{
+	msm_camio_clk_disable(CAMIO_VPE_CLK);
+
+	if (fs_vpe) {
+		regulator_disable(fs_vpe);
+		regulator_put(fs_vpe);
+	}
+
+	return 0;
+}
+
+int msm_camio_vpe_clk_enable(uint32_t clk_rate)
+{
+	fs_vpe = regulator_get(NULL, "fs_vpe");
+	if (IS_ERR(fs_vpe)) {
+		pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
+			PTR_ERR(fs_vpe));
+		fs_vpe = NULL;
+	} else if (regulator_enable(fs_vpe)) {
+		pr_err("%s: Regulator FS_VPE enable failed\n", __func__);
+		regulator_put(fs_vpe);
+	}
+
+	vpe_clk_rate = clk_rate;
+	msm_camio_clk_enable(CAMIO_VPE_CLK);
+	return 0;
+}
+
+int msm_camio_enable(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t val;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	msm_camio_clk_enable(CAMIO_VFE_PBDG_CLK);
+	if (!sinfo->csi_if)
+		msm_camio_clk_enable(CAMIO_VFE_CAMIF_CLK);
+	else {
+		msm_camio_clk_enable(CAMIO_VFE_CLK);
+		csiio = request_mem_region(camio_ext.csiphy,
+			camio_ext.csisz, pdev->name);
+		if (!csiio) {
+			rc = -EBUSY;
+			goto common_fail;
+		}
+		csibase = ioremap(camio_ext.csiphy,
+			camio_ext.csisz);
+		if (!csibase) {
+			rc = -ENOMEM;
+			goto csi_busy;
+		}
+		rc = request_irq(camio_ext.csiirq, msm_io_csi_irq,
+			IRQF_TRIGGER_RISING, "csi", 0);
+		if (rc < 0)
+			goto csi_irq_fail;
+		/* enable required clocks for CSI */
+		msm_camio_clk_enable(CAMIO_CSI0_PCLK);
+		msm_camio_clk_enable(CAMIO_CSI0_VFE_CLK);
+		msm_camio_clk_enable(CAMIO_CSI0_CLK);
+
+		msleep(10);
+		val = (20 <<
+			MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+			(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+			(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+			(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+		CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+		msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+		val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+			(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+		CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+		msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+	}
+	return 0;
+csi_irq_fail:
+	iounmap(csibase);
+csi_busy:
+	release_mem_region(camio_ext.csiphy, camio_ext.csisz);
+common_fail:
+	msm_camio_clk_disable(CAMIO_VFE_PBDG_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	return rc;
+}
+
+void msm_camio_disable(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	uint32_t val;
+	if (!sinfo->csi_if) {
+		msm_camio_clk_disable(CAMIO_VFE_CAMIF_CLK);
+	} else {
+		val = (0x0 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) |
+		(0x0<<MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT)|
+		(0x0 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) |
+		(0x0 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT);
+		CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val);
+		msm_io_w(val, csibase + MIPI_CALIBRATION_CONTROL);
+
+		val = (20 <<
+			MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+			(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+			(0x0 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+			(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+		CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+		msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+		msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+		val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+			(0x0 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+		CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+		msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+		msleep(10);
+		free_irq(camio_ext.csiirq, 0);
+		msm_camio_clk_disable(CAMIO_CSI0_PCLK);
+		msm_camio_clk_disable(CAMIO_CSI0_VFE_CLK);
+		msm_camio_clk_disable(CAMIO_CSI0_CLK);
+		msm_camio_clk_disable(CAMIO_VFE_CLK);
+		iounmap(csibase);
+		release_mem_region(camio_ext.csiphy, camio_ext.csisz);
+	}
+	msm_camio_clk_disable(CAMIO_VFE_PBDG_CLK);
+}
+
+void msm_camio_camif_pad_reg_reset(void)
+{
+	uint32_t reg;
+
+	msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL);
+	msleep(10);
+
+	reg = (msm_io_r(camifpadbase)) & CAMIF_CFG_RMSK;
+	reg |= 0x3;
+	msm_io_w(reg, camifpadbase);
+	msleep(10);
+
+	reg = (msm_io_r(camifpadbase)) & CAMIF_CFG_RMSK;
+	reg |= 0x10;
+	msm_io_w(reg, camifpadbase);
+	msleep(10);
+
+	reg = (msm_io_r(camifpadbase)) & CAMIF_CFG_RMSK;
+	/* Need to be uninverted*/
+	reg &= 0x03;
+	msm_io_w(reg, camifpadbase);
+	msleep(10);
+}
+
+void msm_camio_vfe_blk_reset(void)
+{
+	return;
+
+
+}
+
+void msm_camio_camif_pad_reg_reset_2(void)
+{
+	uint32_t reg;
+	uint32_t mask, value;
+	reg = (msm_io_r(camifpadbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 1 << CAM_PAD_REG_SW_RESET_SHFT;
+	msm_io_w((reg & (~mask)) | (value & mask), camifpadbase);
+	mdelay(10);
+	reg = (msm_io_r(camifpadbase)) & CAMIF_CFG_RMSK;
+	mask = CAM_PAD_REG_SW_RESET_BMSK;
+	value = 0 << CAM_PAD_REG_SW_RESET_SHFT;
+	msm_io_w((reg & (~mask)) | (value & mask), camifpadbase);
+	mdelay(10);
+}
+
+void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype)
+{
+	struct clk *clk = NULL;
+
+	clk = camio_vfe_clk;
+
+	if (clk != NULL) {
+		switch (srctype) {
+		case MSM_CAMIO_CLK_SRC_INTERNAL:
+			clk_set_flags(clk, 0x00000100 << 1);
+			break;
+
+		case MSM_CAMIO_CLK_SRC_EXTERNAL:
+			clk_set_flags(clk, 0x00000100);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+int msm_camio_probe_on(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_clk = camdev->ioclk;
+	camio_ext = camdev->ioext;
+	camdev->camera_gpio_on();
+	msm_camera_vreg_enable(pdev);
+	return msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_probe_off(struct platform_device *pdev)
+{
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+}
+
+int msm_camio_sensor_clk_on(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camio_clk = camdev->ioclk;
+	camio_ext = camdev->ioext;
+	camdev->camera_gpio_on();
+	msm_camera_vreg_enable(pdev);
+	msm_camio_clk_enable(CAMIO_CAM_MCLK_CLK);
+	msm_camio_clk_enable(CAMIO_CAMIF_PAD_PBDG_CLK);
+	if (!sinfo->csi_if) {
+		camifpadio = request_mem_region(camio_ext.camifpadphy,
+			camio_ext.camifpadsz, pdev->name);
+		msm_camio_clk_enable(CAMIO_VFE_CLK);
+		if (!camifpadio) {
+			rc = -EBUSY;
+			goto common_fail;
+		}
+		camifpadbase = ioremap(camio_ext.camifpadphy,
+			camio_ext.camifpadsz);
+		if (!camifpadbase) {
+			CDBG("msm_camio_sensor_clk_on fail\n");
+			rc = -ENOMEM;
+			goto parallel_busy;
+		}
+	}
+	return rc;
+parallel_busy:
+	release_mem_region(camio_ext.camifpadphy, camio_ext.camifpadsz);
+	goto common_fail;
+common_fail:
+	msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+	msm_camio_clk_disable(CAMIO_VFE_CLK);
+	msm_camio_clk_disable(CAMIO_CAMIF_PAD_PBDG_CLK);
+	msm_camera_vreg_disable();
+	camdev->camera_gpio_off();
+	return rc;
+}
+
+int msm_camio_sensor_clk_off(struct platform_device *pdev)
+{
+	uint32_t rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	camdev->camera_gpio_off();
+	msm_camera_vreg_disable();
+	rc = msm_camio_clk_disable(CAMIO_CAM_MCLK_CLK);
+	rc = msm_camio_clk_disable(CAMIO_CAMIF_PAD_PBDG_CLK);
+	if (!sinfo->csi_if) {
+		iounmap(camifpadbase);
+		release_mem_region(camio_ext.camifpadphy, camio_ext.camifpadsz);
+		rc = msm_camio_clk_disable(CAMIO_VFE_CLK);
+	}
+	return rc;
+}
+
+int msm_camio_csi_config(struct msm_camera_csi_params *csi_params)
+{
+	int rc = 0;
+	uint32_t val = 0;
+
+	CDBG("msm_camio_csi_config \n");
+
+	/* SOT_ECC_EN enable error correction for SYNC (data-lane) */
+	msm_io_w(0x4, csibase + MIPI_PHY_CONTROL);
+
+	/* SW_RST to the CSI core */
+	msm_io_w(MIPI_PROTOCOL_CONTROL_SW_RST_BMSK,
+		csibase + MIPI_PROTOCOL_CONTROL);
+
+	/* PROTOCOL CONTROL */
+	val = MIPI_PROTOCOL_CONTROL_LONG_PACKET_HEADER_CAPTURE_BMSK |
+		MIPI_PROTOCOL_CONTROL_DECODE_ID_BMSK |
+		MIPI_PROTOCOL_CONTROL_ECC_EN_BMSK;
+	val |= (uint32_t)(csi_params->data_format) <<
+		MIPI_PROTOCOL_CONTROL_DATA_FORMAT_SHFT;
+	val |= csi_params->dpcm_scheme <<
+		MIPI_PROTOCOL_CONTROL_DPCM_SCHEME_SHFT;
+	CDBG("%s MIPI_PROTOCOL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PROTOCOL_CONTROL);
+
+	/* SW CAL EN */
+	val = (0x1 << MIPI_CALIBRATION_CONTROL_SWCAL_CAL_EN_SHFT) |
+		(0x1 <<
+		MIPI_CALIBRATION_CONTROL_SWCAL_STRENGTH_OVERRIDE_EN_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_CAL_SW_HW_MODE_SHFT) |
+		(0x1 << MIPI_CALIBRATION_CONTROL_MANUAL_OVERRIDE_EN_SHFT);
+	CDBG("%s MIPI_CALIBRATION_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_CALIBRATION_CONTROL);
+
+	/* settle_cnt is very sensitive to speed!
+	increase this value to run at higher speeds */
+	val = (csi_params->settle_cnt <<
+			MIPI_PHY_D0_CONTROL2_SETTLE_COUNT_SHFT) |
+		(0x0F << MIPI_PHY_D0_CONTROL2_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_LP_REC_EN_SHFT) |
+		(0x1 << MIPI_PHY_D0_CONTROL2_ERR_SOT_HS_EN_SHFT);
+	CDBG("%s MIPI_PHY_D0_CONTROL2 val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D2_CONTROL2);
+	msm_io_w(val, csibase + MIPI_PHY_D3_CONTROL2);
+
+
+	val = (0x0F << MIPI_PHY_CL_CONTROL_HS_TERM_IMP_SHFT) |
+		(0x1 << MIPI_PHY_CL_CONTROL_LP_REC_EN_SHFT);
+	CDBG("%s MIPI_PHY_CL_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_CL_CONTROL);
+
+	val = 0 << MIPI_PHY_D0_CONTROL_HS_REC_EQ_SHFT;
+	msm_io_w(val, csibase + MIPI_PHY_D0_CONTROL);
+
+	val = (0x1 << MIPI_PHY_D1_CONTROL_MIPI_CLK_PHY_SHUTDOWNB_SHFT) |
+		(0x1 << MIPI_PHY_D1_CONTROL_MIPI_DATA_PHY_SHUTDOWNB_SHFT);
+	CDBG("%s MIPI_PHY_D1_CONTROL val=0x%x\n", __func__, val);
+	msm_io_w(val, csibase + MIPI_PHY_D1_CONTROL);
+
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D2_CONTROL);
+	msm_io_w(0x00000000, csibase + MIPI_PHY_D3_CONTROL);
+
+	/* halcyon only supports 1 or 2 lane */
+	switch (csi_params->lane_cnt) {
+	case 1:
+		msm_io_w(csi_params->lane_assign << 8 | 0x4,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 2:
+		msm_io_w(csi_params->lane_assign << 8 | 0x5,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 3:
+		msm_io_w(csi_params->lane_assign << 8 | 0x6,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	case 4:
+		msm_io_w(csi_params->lane_assign << 8 | 0x7,
+			csibase + MIPI_CAMERA_CNTL);
+		break;
+	}
+
+	/* mask out ID_ERROR[19], DATA_CMM_ERR[11]
+	and CLK_CMM_ERR[10] - de-featured */
+	msm_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_MASK);
+	/*clear IRQ bits*/
+	msm_io_w(0xFFF7F3FF, csibase + MIPI_INTERRUPT_STATUS);
+
+	return rc;
+}
+void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
+{
+	switch (perf_setting) {
+	case S_INIT:
+		add_axi_qos();
+		break;
+	case S_PREVIEW:
+		update_axi_qos(MSM_AXI_QOS_PREVIEW);
+		break;
+	case S_VIDEO:
+		update_axi_qos(MSM_AXI_QOS_RECORDING);
+		break;
+	case S_CAPTURE:
+		update_axi_qos(MSM_AXI_QOS_SNAPSHOT);
+		break;
+	case S_DEFAULT:
+		update_axi_qos(PM_QOS_DEFAULT_VALUE);
+		break;
+	case S_EXIT:
+		release_axi_qos();
+		break;
+	default:
+		CDBG("%s: INVALID CASE\n", __func__);
+	}
+}
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
new file mode 100644
index 0000000..480c626
--- /dev/null
+++ b/drivers/media/video/msm/msm_isp.c
@@ -0,0 +1,814 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/android_pmem.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+
+#include "msm.h"
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("msm_isp: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+#define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
+				__func__, __LINE__, ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+#define ERR_COPY_TO_USER() ERR_USER_COPY(1)
+
+#define MSM_FRAME_AXI_MAX_BUF 16
+/* This will enqueue ISP events or signal buffer completion */
+static int msm_isp_enqueue(struct msm_cam_media_controller *pmctl,
+				struct msm_vfe_resp *data,
+				enum msm_queue qtype)
+{
+	struct v4l2_event v4l2_evt;
+
+	struct msm_stats_buf stats;
+	struct msm_isp_stats_event_ctrl *isp_event;
+	isp_event = (struct msm_isp_stats_event_ctrl *)v4l2_evt.u.data;
+	if (!data) {
+		pr_err("%s !!!!data = 0x%p\n", __func__, data);
+		return -EINVAL;
+	}
+
+	D("%s data->type = %d\n", __func__, data->type);
+
+	switch (qtype) {
+	case MSM_CAM_Q_VFE_EVT:
+	case MSM_CAM_Q_VFE_MSG:
+		/* adsp event and message */
+		v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
+					MSM_CAM_RESP_STAT_EVT_MSG;
+
+		isp_event->resptype = MSM_CAM_RESP_STAT_EVT_MSG;
+
+		/* 0 - msg from aDSP, 1 - event from mARM */
+		isp_event->isp_data.isp_msg.type   = data->evt_msg.type;
+		isp_event->isp_data.isp_msg.msg_id = data->evt_msg.msg_id;
+		isp_event->isp_data.isp_msg.len	= 0;
+
+		D("%s: qtype %d length %d msd_id %d\n", __func__,
+					qtype,
+					isp_event->isp_data.isp_msg.len,
+					isp_event->isp_data.isp_msg.msg_id);
+
+		if ((data->type >= VFE_MSG_STATS_AEC) &&
+			(data->type <=  VFE_MSG_STATS_WE)) {
+
+			D("%s data->phy.sbuf_phy = 0x%x\n", __func__,
+						data->phy.sbuf_phy);
+			stats.buffer = msm_pmem_stats_ptov_lookup(&pmctl->sync,
+							data->phy.sbuf_phy,
+							&(stats.fd));
+			if (!stats.buffer) {
+				pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
+								__func__);
+				isp_event->isp_data.isp_msg.len = 0;
+			} else {
+				struct msm_stats_buf *stats_buf =
+					kmalloc(sizeof(struct msm_stats_buf),
+								GFP_ATOMIC);
+				if (!stats_buf) {
+					pr_err("%s: out of memory.\n",
+								__func__);
+					return -ENOMEM;
+				}
+
+				*stats_buf = stats;
+				isp_event->isp_data.isp_msg.len	=
+						sizeof(struct msm_stats_buf);
+				isp_event->isp_data.isp_msg.data = stats_buf;
+			}
+
+		} else if ((data->evt_msg.len > 0) &&
+				(data->type == VFE_MSG_GENERAL)) {
+			isp_event->isp_data.isp_msg.data =
+					kmalloc(data->evt_msg.len, GFP_ATOMIC);
+			if (!isp_event->isp_data.isp_msg.data) {
+				pr_err("%s: out of memory.\n", __func__);
+				return -ENOMEM;
+			}
+			memcpy(isp_event->isp_data.isp_msg.data,
+						data->evt_msg.data,
+						data->evt_msg.len);
+		} else if (data->type == VFE_MSG_OUTPUT_P ||
+			data->type == VFE_MSG_OUTPUT_V ||
+			data->type == VFE_MSG_OUTPUT_S ||
+			data->type == VFE_MSG_OUTPUT_T) {
+			msm_mctl_buf_done(pmctl, data->type,
+					(u32)data->phy.y_phy);
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* now queue the event */
+	v4l2_event_queue(pmctl->config_device->config_stat_event_queue.pvdev,
+					  &v4l2_evt);
+	return 0;
+}
+
+/*
+ * This function executes in interrupt context.
+ */
+
+void *msm_isp_sync_alloc(int size,
+	  void *syncdata __attribute__((unused)),
+	  gfp_t gfp)
+{
+	struct msm_queue_cmd *qcmd =
+		kmalloc(sizeof(struct msm_queue_cmd) + size, gfp);
+
+	if (qcmd) {
+		atomic_set(&qcmd->on_heap, 1);
+		return qcmd + 1;
+	}
+	return NULL;
+}
+
+void msm_isp_sync_free(void *ptr)
+{
+	if (ptr) {
+		struct msm_queue_cmd *qcmd =
+			(struct msm_queue_cmd *)ptr;
+		qcmd--;
+		if (atomic_read(&qcmd->on_heap))
+			kfree(qcmd);
+	}
+}
+
+/*
+ * This function executes in interrupt context.
+ */
+static int msm_isp_notify(struct v4l2_subdev *sd, void *arg)
+{
+	int rc = -EINVAL;
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_sync *sync =
+		(struct msm_sync *)v4l2_get_subdev_hostdata(sd);
+	struct msm_vfe_resp *vdata = (struct msm_vfe_resp *)arg;
+
+	if (!sync) {
+		pr_err("%s: no context in dsp callback.\n", __func__);
+		return rc;
+	}
+
+	qcmd = ((struct msm_queue_cmd *)vdata) - 1;
+	qcmd->type = MSM_CAM_Q_VFE_MSG;
+	qcmd->command = vdata;
+
+	D("%s: vdata->type %d\n", __func__, vdata->type);
+	switch (vdata->type) {
+	case VFE_MSG_STATS_AWB:
+		D("%s: qtype %d, AWB stats, enqueue event_q.\n",
+					__func__, vdata->type);
+		break;
+
+	case VFE_MSG_STATS_AEC:
+		D("%s: qtype %d, AEC stats, enqueue event_q.\n",
+					__func__, vdata->type);
+		break;
+
+	case VFE_MSG_STATS_IHIST:
+		D("%s: qtype %d, ihist stats, enqueue event_q.\n",
+					__func__, vdata->type);
+		break;
+
+	case VFE_MSG_STATS_RS:
+		D("%s: qtype %d, rs stats, enqueue event_q.\n",
+					__func__, vdata->type);
+		break;
+
+	case VFE_MSG_STATS_CS:
+		D("%s: qtype %d, cs stats, enqueue event_q.\n",
+					__func__, vdata->type);
+	break;
+
+	case VFE_MSG_GENERAL:
+		D("%s: qtype %d, general msg, enqueue event_q.\n",
+					__func__, vdata->type);
+		break;
+	default:
+		D("%s: qtype %d not handled\n", __func__, vdata->type);
+		/* fall through, send to config. */
+	}
+
+	D("%s: msm_enqueue event_q\n", __func__);
+	rc = msm_isp_enqueue(&sync->pcam_sync->mctl, vdata, MSM_CAM_Q_VFE_MSG);
+
+	msm_isp_sync_free(vdata);
+
+	return rc;
+}
+
+/* This function is called by open() function, so we need to init HW*/
+static int msm_isp_open(struct v4l2_subdev *sd, struct msm_sync *sync)
+{
+	/* init vfe and senor, register sync callbacks for init*/
+	int rc = 0;
+	D("%s\n", __func__);
+	if (!sync) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_vfe_subdev_init(sd, sync, sync->pdev);
+	if (rc < 0) {
+		pr_err("%s: vfe_init failed at %d\n",
+					__func__, rc);
+	}
+
+	return rc;
+}
+
+static void msm_isp_release(struct msm_sync *psync)
+{
+	D("%s\n", __func__);
+	msm_vfe_subdev_release(psync->pdev);
+}
+
+static int msm_config_vfe(struct v4l2_subdev *sd,
+		struct msm_sync *sync, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+	struct msm_pmem_region region[8];
+	struct axidata axi_data;
+
+	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	memset(&axi_data, 0, sizeof(axi_data));
+	CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type);
+	switch (cfgcmd.cmd_type) {
+	case CMD_STATS_AF_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+					MSM_PMEM_AF, &region[0],
+					NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_STATS_AEC_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_AEC, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_STATS_AWB_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_AWB, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_STATS_IHIST_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_IHIST, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_STATS_RS_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_RS, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_STATS_CS_ENABLE:
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats,
+			MSM_PMEM_CS, &region[0],
+			NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	case CMD_GENERAL:
+	case CMD_STATS_DISABLE:
+		return msm_isp_subdev_ioctl(sd, &cfgcmd,
+							&axi_data);
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd.cmd_type);
+	}
+
+	return -EINVAL;
+}
+
+static int msm_vpe_frame_cfg(struct msm_sync *sync,
+				void *cfgcmdin)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+	struct msm_pmem_region region[8];
+	int pmem_type;
+
+	struct msm_vpe_cfg_cmd *cfgcmd;
+	cfgcmd = (struct msm_vpe_cfg_cmd *)cfgcmdin;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+	CDBG("In vpe_frame_cfg cfgcmd->cmd_type = %d\n",
+		cfgcmd->cmd_type);
+	switch (cfgcmd->cmd_type) {
+	case CMD_AXI_CFG_VPE:
+		pmem_type = MSM_PMEM_VIDEO_VPE;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup_2(&sync->pmem_frames, pmem_type,
+								&region[0], 8);
+		CDBG("axi_data.bufnum1 = %d\n", axi_data.bufnum1);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		pmem_type = MSM_PMEM_VIDEO;
+		break;
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		break;
+	}
+	axi_data.region = &region[0];
+	CDBG("out vpe_frame_cfg cfgcmd->cmd_type = %d\n",
+		cfgcmd->cmd_type);
+	/* send the AXI configuration command to driver */
+	if (sync->vpefn.vpe_config)
+		rc = sync->vpefn.vpe_config(cfgcmd, data);
+	return rc;
+}
+
+static int msm_stats_axi_cfg(struct v4l2_subdev *sd,
+		struct msm_sync *sync, struct msm_vfe_cfg_cmd *cfgcmd)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+	struct msm_pmem_region region[3];
+	int pmem_type = MSM_PMEM_MAX;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+
+	switch (cfgcmd->cmd_type) {
+	case CMD_STATS_AXI_CFG:
+		pmem_type = MSM_PMEM_AEC_AWB;
+		break;
+	case CMD_STATS_AF_AXI_CFG:
+		pmem_type = MSM_PMEM_AF;
+		break;
+	case CMD_GENERAL:
+		data = NULL;
+		break;
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		return -EINVAL;
+	}
+
+	if (cfgcmd->cmd_type != CMD_GENERAL) {
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup(&sync->pmem_stats, pmem_type,
+				&region[0], NUM_STAT_OUTPUT_BUFFERS);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		axi_data.region = &region[0];
+	}
+
+	/* send the AEC/AWB STATS configuration command to driver */
+	rc = msm_isp_subdev_ioctl(sd, cfgcmd, data);
+	return rc;
+}
+
+static int msm_frame_axi_cfg(struct v4l2_subdev *sd,
+	struct msm_sync *sync, struct msm_vfe_cfg_cmd *cfgcmd)
+{
+	int rc = -EIO;
+	struct axidata axi_data;
+	void *data = &axi_data;
+	struct msm_pmem_region region[MSM_FRAME_AXI_MAX_BUF];
+	int pmem_type;
+	int i = 0;
+	int idx = 0;
+	struct msm_cam_v4l2_device *pcam = sync->pcam_sync;
+	struct msm_cam_v4l2_dev_inst *pcam_inst;
+
+	memset(&axi_data, 0, sizeof(axi_data));
+
+	switch (cfgcmd->cmd_type) {
+
+	case CMD_AXI_CFG_PREVIEW:
+		pcam_inst =
+		pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+		else
+			return rc;
+		pmem_type = MSM_PMEM_PREVIEW;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[0], pmem_type);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region 3 lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		D("%s __func__ axi_data.bufnum2 = %d\n", __func__,
+						axi_data.bufnum2);
+		break;
+
+	case CMD_AXI_CFG_VIDEO:
+		pcam_inst =
+		pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+		else
+			return rc;
+		pmem_type = MSM_PMEM_PREVIEW;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[0], pmem_type);
+		D("%s bufnum1 = %d\n", __func__, axi_data.bufnum1);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pcam_inst
+		= pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_VIDEO];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+			pmem_type = MSM_PMEM_VIDEO;
+			axi_data.bufnum2 =
+			msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[axi_data.bufnum1], pmem_type);
+		D("%s bufnum2 = %d\n", __func__, axi_data.bufnum2);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+
+	case CMD_AXI_CFG_SNAP:
+		pcam_inst
+		= pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+		else
+			return rc;
+		pmem_type = MSM_PMEM_THUMBNAIL;
+		axi_data.bufnum1 =
+			msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[0], pmem_type);
+		if (!axi_data.bufnum1) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+
+		pcam_inst
+		= pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_MAIN];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+		else
+			return rc;
+		pmem_type = MSM_PMEM_MAINIMG;
+		axi_data.bufnum2 =
+		msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[axi_data.bufnum1], pmem_type);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_RAW_PICT_AXI_CFG:
+		pcam_inst
+		= pcam->dev_inst_map[MSM_V4L2_EXT_CAPTURE_MODE_MAIN];
+		if (pcam_inst)
+			idx = pcam_inst->my_index;
+		else
+			return rc;
+		pmem_type = MSM_PMEM_RAW_MAINIMG;
+		axi_data.bufnum2 =
+			msm_pmem_region_lookup_3(sync->pcam_sync, idx,
+				&region[0], pmem_type);
+		if (!axi_data.bufnum2) {
+			pr_err("%s %d: pmem region lookup error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		break;
+
+	case CMD_GENERAL:
+		data = NULL;
+		break;
+
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__, cfgcmd->cmd_type);
+		return -EINVAL;
+	}
+
+	axi_data.region = &region[0];
+	D("%s bufnum1 = %d, bufnum2 = %d\n", __func__,
+	  axi_data.bufnum1, axi_data.bufnum2);
+	for (i = 0; i < MSM_FRAME_AXI_MAX_BUF; i++) {
+		D("%s region %d paddr = 0x%p\n", __func__, i,
+					(void *)region[i].paddr);
+		D("%s region y_off = %d cbcr_off = %d\n", __func__,
+			region[i].info.y_off, region[i].info.cbcr_off);
+	}
+	/* send the AXI configuration command to driver */
+	rc = msm_isp_subdev_ioctl(sd, cfgcmd, data);
+	return rc;
+}
+
+static int msm_axi_config(struct v4l2_subdev *sd,
+			struct msm_sync *sync, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	switch (cfgcmd.cmd_type) {
+	case CMD_AXI_CFG_VIDEO:
+	case CMD_AXI_CFG_PREVIEW:
+	case CMD_AXI_CFG_SNAP:
+	case CMD_RAW_PICT_AXI_CFG:
+		return msm_frame_axi_cfg(sd, sync, &cfgcmd);
+	case CMD_AXI_CFG_VPE:
+		return 0;
+		return msm_vpe_frame_cfg(sync, (void *)&cfgcmd);
+
+	case CMD_STATS_AXI_CFG:
+	case CMD_STATS_AF_AXI_CFG:
+		return msm_stats_axi_cfg(sd, sync, &cfgcmd);
+
+	default:
+		pr_err("%s: unknown command type %d\n",
+			__func__,
+			cfgcmd.cmd_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_set_crop(struct msm_sync *sync, void __user *arg)
+{
+	struct crop_info crop;
+
+	if (copy_from_user(&crop,
+				arg,
+				sizeof(struct crop_info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	if (!sync->croplen) {
+		sync->cropinfo = kmalloc(crop.len, GFP_KERNEL);
+		if (!sync->cropinfo)
+			return -ENOMEM;
+	} else if (sync->croplen < crop.len)
+		return -EINVAL;
+
+	if (copy_from_user(sync->cropinfo,
+				crop.info,
+				crop.len)) {
+		ERR_COPY_FROM_USER();
+		kfree(sync->cropinfo);
+		return -EFAULT;
+	}
+
+	sync->croplen = crop.len;
+
+	return 0;
+}
+
+static int msm_put_stats_buffer(struct v4l2_subdev *sd,
+			struct msm_sync *sync, void __user *arg)
+{
+	int rc = -EIO;
+
+	struct msm_stats_buf buf;
+	unsigned long pphy;
+	struct msm_vfe_cfg_cmd cfgcmd;
+
+	if (copy_from_user(&buf, arg,
+				sizeof(struct msm_stats_buf))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	CDBG("%s\n", __func__);
+	pphy = msm_pmem_stats_vtop_lookup(sync, buf.buffer, buf.fd);
+
+	if (pphy != 0) {
+		if (buf.type == STAT_AF)
+			cfgcmd.cmd_type = CMD_STATS_AF_BUF_RELEASE;
+		else if (buf.type == STAT_AEC)
+			cfgcmd.cmd_type = CMD_STATS_AEC_BUF_RELEASE;
+		else if (buf.type == STAT_AWB)
+			cfgcmd.cmd_type = CMD_STATS_AWB_BUF_RELEASE;
+		else if (buf.type == STAT_IHIST)
+			cfgcmd.cmd_type = CMD_STATS_IHIST_BUF_RELEASE;
+		else if (buf.type == STAT_RS)
+			cfgcmd.cmd_type = CMD_STATS_RS_BUF_RELEASE;
+		else if (buf.type == STAT_CS)
+			cfgcmd.cmd_type = CMD_STATS_CS_BUF_RELEASE;
+
+		else {
+			pr_err("%s: invalid buf type %d\n",
+				__func__,
+				buf.type);
+			rc = -EINVAL;
+			goto put_done;
+		}
+
+		cfgcmd.value = (void *)&buf;
+
+		rc = msm_isp_subdev_ioctl(sd, &cfgcmd, &pphy);
+	} else {
+		pr_err("%s: NULL physical address\n", __func__);
+		rc = -EINVAL;
+	}
+
+put_done:
+	return rc;
+}
+
+/* config function simliar to origanl msm_ioctl_config*/
+static int msm_isp_config(struct msm_cam_media_controller *pmctl,
+			 unsigned int cmd, unsigned long arg)
+{
+
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	struct v4l2_subdev *sd = &pmctl->isp_sdev->sd;
+
+	D("%s: cmd %d\n", __func__, _IOC_NR(cmd));
+	switch (cmd) {
+	case MSM_CAM_IOCTL_PICT_PP_DONE:
+		/* Release the preview of snapshot frame
+		 * that was grabbed.
+		 */
+		/*rc = msm_pp_release(pmsm->sync, arg);*/
+		break;
+
+	case MSM_CAM_IOCTL_CONFIG_VFE:
+		/* Coming from config thread for update */
+		rc = msm_config_vfe(sd, &pmctl->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_CONFIG_VPE:
+		/* Coming from config thread for update */
+		/*rc = msm_config_vpe(pmsm->sync, argp);*/
+		rc = 0;
+		break;
+
+	case MSM_CAM_IOCTL_AXI_CONFIG:
+	case MSM_CAM_IOCTL_AXI_VPE_CONFIG:
+		D("Received MSM_CAM_IOCTL_AXI_CONFIG\n");
+		rc = msm_axi_config(sd, &pmctl->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_SET_CROP:
+		rc = msm_set_crop(&pmctl->sync, argp);
+		break;
+
+	case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER:
+		rc = msm_put_stats_buffer(sd, &pmctl->sync, argp);
+		break;
+
+	default:
+		break;
+	}
+
+	D("%s: cmd %d DONE\n", __func__, _IOC_NR(cmd));
+
+	return rc;
+}
+
+static struct msm_isp_ops isp_subdev[MSM_MAX_CAMERA_CONFIGS];
+
+/**/
+int msm_isp_init_module(int g_num_config_nodes)
+{
+	int i = 0;
+
+	for (i = 0; i < g_num_config_nodes; i++) {
+		isp_subdev[i].isp_open = msm_isp_open;
+		isp_subdev[i].isp_config = msm_isp_config;
+		isp_subdev[i].isp_release  = msm_isp_release;
+		isp_subdev[i].isp_enqueue = msm_isp_enqueue;
+		isp_subdev[i].isp_notify = msm_isp_notify;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_isp_init_module);
+
+/*
+*/
+int msm_isp_register(struct msm_cam_server_dev *psvr)
+{
+	int i = 0;
+
+	D("%s\n", __func__);
+
+	BUG_ON(!psvr);
+
+	/* Initialize notify function for v4l2_dev */
+	for (i = 0; i < psvr->config_info.num_config_nodes; i++)
+		psvr->isp_subdev[i] = &(isp_subdev[i]);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_isp_register);
+
+/**/
+void msm_isp_unregister(struct msm_cam_server_dev *psvr)
+{
+	int i = 0;
+	for (i = 0; i < psvr->config_info.num_config_nodes; i++)
+		psvr->isp_subdev[i] = NULL;
+}
+
+int msm_isp_subdev_ioctl(struct v4l2_subdev *isp_subdev,
+	struct msm_vfe_cfg_cmd *cfgcmd, void *data)
+{
+	struct msm_camvfe_params vfe_params;
+	vfe_params.vfe_cfg = cfgcmd;
+	vfe_params.data = data;
+	return v4l2_subdev_call(isp_subdev, core, ioctl, 0, &vfe_params);
+}
diff --git a/drivers/media/video/msm/msm_ispif.c b/drivers/media/video/msm/msm_ispif.c
new file mode 100644
index 0000000..4611d06
--- /dev/null
+++ b/drivers/media/video/msm/msm_ispif.c
@@ -0,0 +1,397 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include "msm_ispif.h"
+#include "msm.h"
+
+#define DBG_ISPIF 0
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR                        0X00
+#define ISPIF_INTF_CMD_ADDR                       0X04
+#define ISPIF_CTRL_ADDR                           0X08
+#define ISPIF_INPUT_SEL_ADDR                      0X0C
+#define ISPIF_PIX_INTF_CID_MASK_ADDR              0X10
+#define ISPIF_RDI_INTF_CID_MASK_ADDR              0X14
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR            0X38
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR            0X3C
+#define ISPIF_PIX_STATUS_ADDR                     0X24
+#define ISPIF_RDI_STATUS_ADDR                     0X28
+#define ISPIF_RDI_1_STATUS_ADDR                   0X64
+#define ISPIF_IRQ_MASK_ADDR                     0X0100
+#define ISPIF_IRQ_CLEAR_ADDR                    0X0104
+#define ISPIF_IRQ_STATUS_ADDR                   0X0108
+#define ISPIF_IRQ_MASK_1_ADDR                   0X010C
+#define ISPIF_IRQ_CLEAR_1_ADDR                  0X0110
+#define ISPIF_IRQ_STATUS_1_ADDR                 0X0114
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST           31
+#define RDI_CLK_DOMAIN_RST           30
+#define PIX_CLK_DOMAIN_RST           29
+#define AHB_CLK_DOMAIN_RST           28
+#define RDI_1_CLK_DOMAIN_RST         27
+#define RDI_1_VFE_RST_STB            13
+#define RDI_1_CSID_RST_STB           12
+#define RDI_VFE_RST_STB              7
+#define RDI_CSID_RST_STB             6
+#define PIX_VFE_RST_STB              4
+#define PIX_CSID_RST_STB             3
+#define SW_REG_RST_STB               2
+#define MISC_LOGIC_RST_STB           1
+#define STROBED_RST_EN               0
+
+#define PIX_INTF_0_OVERFLOW_IRQ      12
+#define RAW_INTF_0_OVERFLOW_IRQ      25
+#define RAW_INTF_1_OVERFLOW_IRQ      25
+#define RESET_DONE_IRQ               27
+
+#define MAX_CID 15
+DEFINE_MUTEX(msm_ispif_mut);
+
+static struct resource *ispif_mem;
+static struct resource *ispif_irq;
+static struct resource *ispifio;
+void __iomem *ispifbase;
+static uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
+#if DBG_ISPIF
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out)
+{
+	uint32_t *temp;
+	memset(out, 0, sizeof(struct ispif_irq_status));
+	temp = (uint32_t *)(ispifbase + ISPIF_IRQ_STATUS_ADDR);
+	out->ispifIrqStatus0 = msm_io_r(temp);
+	pr_err("ispif_irq: Irq_status0 = 0x%x\n",
+		out->ispifIrqStatus0);
+	msm_io_w(out->ispifIrqStatus0, ispifbase + ISPIF_IRQ_CLEAR_ADDR);
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+	struct ispif_irq_status irq;
+	msm_ispif_read_irq_status(&irq);
+	return IRQ_HANDLED;
+}
+#endif
+int msm_ispif_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_ispif_fns ispif_fns;
+
+	ispif_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ispif");
+	if (!ispif_mem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+	ispif_irq = platform_get_resource_byname(pdev,
+					IORESOURCE_IRQ, "ispif");
+	if (!ispif_irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	ispifio  = request_mem_region(ispif_mem->start,
+		resource_size(ispif_mem), pdev->name);
+	if (!ispifio)
+		return -EBUSY;
+	ispifbase = ioremap(ispif_mem->start,
+		resource_size(ispif_mem));
+	if (!ispifbase) {
+		rc = -ENOMEM;
+		goto ispif_no_mem;
+	}
+#if DBG_ISPIF
+	rc = request_irq(ispif_irq->start, msm_io_ispif_irq,
+		IRQF_TRIGGER_RISING, "ispif", 0);
+	if (rc < 0)
+		goto ispif_irq_fail;
+#endif
+	global_intf_cmd_mask = 0xFFFFFFFF;
+	ispif_fns.ispif_config = msm_ispif_config;
+	ispif_fns.ispif_start_intf_transfer =
+		msm_ispif_start_intf_transfer;
+	rc = msm_ispif_register(&ispif_fns);
+	if (rc < 0)
+		goto ispif_irq_fail;
+
+	msm_ispif_reset();
+	return 0;
+
+ispif_irq_fail:
+	iounmap(ispifbase);
+ispif_no_mem:
+	release_mem_region(ispif_mem->start, resource_size(ispif_mem));
+	return rc;
+}
+
+void msm_ispif_release(struct platform_device *pdev)
+{
+	CDBG("%s, free_irq\n", __func__);
+#if DBG_ISPIF
+	free_irq(ispif_irq->start, 0);
+#endif
+	iounmap(ispifbase);
+	release_mem_region(ispif_mem->start, resource_size(ispif_mem));
+}
+
+void msm_ispif_intf_reset(uint8_t intftype)
+{
+	uint32_t data = 0x01 , data1 = 0x01;
+
+	msm_io_w(0x1<<STROBED_RST_EN, ispifbase + ISPIF_RST_CMD_ADDR);
+	switch (intftype) {
+	case PIX0:
+		data |= 0x1 << PIX_VFE_RST_STB;
+		msm_io_w(data, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		data1 |= 0x1 << PIX_CSID_RST_STB;
+		msm_io_w(data1, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		break;
+
+	case RDI0:
+		data |= 0x1 << RDI_VFE_RST_STB;
+		msm_io_w(data, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		data1 |= 0x1 << RDI_CSID_RST_STB;
+		msm_io_w(data1, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		break;
+
+	case RDI1:
+		data |= 0x1 << RDI_1_VFE_RST_STB;
+		msm_io_w(data, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		data1 |= 0x1 << RDI_1_CSID_RST_STB;
+		msm_io_w(data1, ispifbase + ISPIF_RST_CMD_ADDR);
+		usleep_range(11000, 12000);
+		break;
+
+	default:
+		break;
+	}
+}
+
+void msm_ispif_swreg_misc_reset(void)
+{
+	uint32_t data = 0x01, data1 = 0x01;
+
+	data |= 0x1 << SW_REG_RST_STB;
+	msm_io_w(data, ispifbase + ISPIF_RST_CMD_ADDR);
+	usleep_range(11000, 12000);
+	data1 |= 0x1 << MISC_LOGIC_RST_STB;
+	msm_io_w(data1, ispifbase + ISPIF_RST_CMD_ADDR);
+	usleep_range(11000, 12000);
+}
+
+void msm_ispif_reset(void)
+{
+	msm_ispif_swreg_misc_reset();
+	msm_ispif_intf_reset(PIX0);
+	msm_ispif_intf_reset(RDI0);
+}
+
+void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid)
+{
+	uint32_t data;
+	data = msm_io_r(ispifbase + ISPIF_INPUT_SEL_ADDR);
+	data |= csid<<(intftype*4);
+	msm_io_w(data, ispifbase + ISPIF_INPUT_SEL_ADDR);
+}
+
+static void
+msm_ispif_intf_cmd(uint8_t intftype, uint16_t cid_mask, uint8_t intf_cmd_mask)
+{
+	uint8_t vc = 0, val = 0;
+	while (cid_mask != 0) {
+		if ((cid_mask & 0xf) != 0x0) {
+			val = (intf_cmd_mask>>(vc*2)) & 0x3;
+			global_intf_cmd_mask &= ~((0x3 & ~val)
+				<<((vc*2)+(intftype*8)));
+			CDBG("intf cmd  0x%x\n", global_intf_cmd_mask);
+			msm_io_w(global_intf_cmd_mask,
+				ispifbase + ISPIF_INTF_CMD_ADDR);
+		}
+		vc++;
+		cid_mask >>= 4;
+	}
+}
+
+void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask)
+{
+	uint32_t data;
+	mutex_lock(&msm_ispif_mut);
+	switch (intftype) {
+	case PIX0:
+		data = msm_io_r(ispifbase + ISPIF_PIX_INTF_CID_MASK_ADDR);
+		data |= cid_mask;
+		msm_io_w(data, ispifbase + ISPIF_PIX_INTF_CID_MASK_ADDR);
+		break;
+
+	case RDI0:
+		data = msm_io_r(ispifbase + ISPIF_RDI_INTF_CID_MASK_ADDR);
+		data |= cid_mask;
+		msm_io_w(data, ispifbase + ISPIF_RDI_INTF_CID_MASK_ADDR);
+		break;
+
+	case RDI1:
+		data = msm_io_r(ispifbase + ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+		data |= cid_mask;
+		msm_io_w(data, ispifbase + ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+		break;
+	}
+	mutex_unlock(&msm_ispif_mut);
+}
+
+int msm_ispif_abort_intf_transfer(struct msm_ispif_params *ispif_params)
+{
+	int rc = 0;
+	uint8_t intf_cmd_mask = 0xAA;
+
+	CDBG("abort stream request\n");
+	mutex_lock(&msm_ispif_mut);
+	msm_ispif_intf_cmd(ispif_params->intftype, ispif_params->cid_mask,
+		 intf_cmd_mask);
+	msm_ispif_intf_reset(ispif_params->intftype);
+	global_intf_cmd_mask |= 0xFF<<(ispif_params->intftype * 8);
+	mutex_unlock(&msm_ispif_mut);
+	return rc;
+}
+
+int msm_ispif_start_intf_transfer(struct msm_ispif_params *ispif_params)
+{
+	uint32_t data;
+	uint8_t intf_cmd_mask = 0x55;
+	int rc = 0;
+
+	CDBG("start stream request\n");
+	mutex_lock(&msm_ispif_mut);
+	switch (ispif_params->intftype) {
+	case PIX0:
+		data = msm_io_r(ispifbase + ISPIF_PIX_STATUS_ADDR);
+		if ((data & 0xf) != 0xf) {
+			CDBG("interface is busy\n");
+			mutex_unlock(&msm_ispif_mut);
+			return -EBUSY;
+		}
+		break;
+
+	case RDI0:
+		data  = msm_io_r(ispifbase + ISPIF_RDI_STATUS_ADDR);
+		break;
+
+	case RDI1:
+		data  = msm_io_r(ispifbase + ISPIF_RDI_1_STATUS_ADDR);
+		break;
+	}
+	msm_ispif_intf_cmd(ispif_params->intftype,
+		ispif_params->cid_mask, intf_cmd_mask);
+	mutex_unlock(&msm_ispif_mut);
+	return rc;
+}
+
+int msm_ispif_stop_intf_transfer(struct msm_ispif_params *ispif_params)
+{
+	int rc = 0;
+	uint8_t intf_cmd_mask = 0x00;
+	CDBG("stop stream request\n");
+	mutex_lock(&msm_ispif_mut);
+	msm_ispif_intf_cmd(ispif_params->intftype,
+		ispif_params->cid_mask, intf_cmd_mask);
+	switch (ispif_params->intftype) {
+	case PIX0:
+		while ((msm_io_r(ispifbase + ISPIF_PIX_STATUS_ADDR) & 0xf)
+			!= 0xf) {
+			CDBG("Wait for Idle\n");
+		}
+		break;
+
+	case RDI0:
+		while ((msm_io_r(ispifbase + ISPIF_RDI_STATUS_ADDR) & 0xf)
+			!= 0xf) {
+			CDBG("Wait for Idle\n");
+		}
+		break;
+	default:
+		break;
+	}
+	global_intf_cmd_mask |= 0xFF<<(ispif_params->intftype * 8);
+	mutex_unlock(&msm_ispif_mut);
+	return rc;
+}
+
+int msm_ispif_config(struct msm_ispif_params *ispif_params, uint8_t num_of_intf)
+{
+	uint32_t data, data1;
+	int rc = 0, i = 0;
+	CDBG("Enable interface\n");
+	data = msm_io_r(ispifbase + ISPIF_PIX_STATUS_ADDR);
+	data1 = msm_io_r(ispifbase + ISPIF_RDI_STATUS_ADDR);
+	if (((data & 0xf) != 0xf) || ((data1 & 0xf) != 0xf))
+		return -EBUSY;
+	msm_io_w(0x00000000, ispifbase + ISPIF_IRQ_MASK_ADDR);
+	for (i = 0; i < num_of_intf; i++) {
+		msm_ispif_sel_csid_core(ispif_params[i].intftype,
+			ispif_params[i].csid);
+		msm_ispif_enable_intf_cids(ispif_params[i].intftype,
+			ispif_params[i].cid_mask);
+	}
+	msm_io_w(0x0BFFFFFF, ispifbase + ISPIF_IRQ_MASK_ADDR);
+	msm_io_w(0x0BFFFFFF, ispifbase + ISPIF_IRQ_CLEAR_ADDR);
+	return rc;
+}
+
+void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num)
+{
+	uint32_t data = 0;
+	int i = 0, j = 0;
+	switch (intftype) {
+	case PIX0:
+		data = msm_io_r(ispifbase +
+			ISPIF_PIX_INTF_CID_MASK_ADDR);
+		break;
+
+	case RDI0:
+		data = msm_io_r(ispifbase +
+			ISPIF_RDI_INTF_CID_MASK_ADDR);
+		break;
+
+	case RDI1:
+		data = msm_io_r(ispifbase +
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+		break;
+
+	default:
+		break;
+	}
+	for (i = 0; i <= MAX_CID; i++) {
+		if ((data & 0x1) == 0x1) {
+			cids[j++] = i;
+			(*num)++;
+		}
+		data >>= 1;
+	}
+}
diff --git a/drivers/media/video/msm/msm_ispif.h b/drivers/media/video/msm/msm_ispif.h
new file mode 100644
index 0000000..e5e0c23
--- /dev/null
+++ b/drivers/media/video/msm/msm_ispif.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <mach/camera.h>
+#include <mach/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+
+
+struct ispif_irq_status {
+	uint32_t ispifIrqStatus0;
+	uint32_t ispifIrqStatus1;
+};
+
+int msm_ispif_init(struct platform_device *pdev);
+void msm_ispif_release(struct platform_device *pdev);
+void msm_ispif_intf_reset(uint8_t intftype);
+void msm_ispif_swreg_misc_reset(void);
+void msm_ispif_reset(void);
+void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid);
+void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask);
+int msm_ispif_start_intf_transfer(struct msm_ispif_params *ispif_params);
+int msm_ispif_stop_intf_transfer(struct msm_ispif_params *ispif_params);
+int msm_ispif_abort_intf_transfer(struct msm_ispif_params *ispif_params);
+int msm_ispif_config(struct msm_ispif_params *ispif_params, \
+	uint8_t num_of_intf);
+void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num);
+
+#endif
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
new file mode 100644
index 0000000..35cb68a
--- /dev/null
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -0,0 +1,838 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+
+#include <linux/android_pmem.h>
+
+#include "msm.h"
+#include "msm_ispif.h"
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("msm_mctl: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+#define MSM_V4L2_SWFI_LATENCY 3
+
+/* VFE required buffer number for streaming */
+static struct msm_isp_color_fmt msm_isp_formats[] = {
+	{
+	.name	   = "NV12YUV",
+	.depth	  = 12,
+	.bitsperpxl = 8,
+	.fourcc	 = V4L2_PIX_FMT_NV12,
+	.pxlcode	= V4L2_MBUS_FMT_YUYV8_2X8, /* YUV sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
+	{
+	.name	   = "NV21YUV",
+	.depth	  = 12,
+	.bitsperpxl = 8,
+	.fourcc	 = V4L2_PIX_FMT_NV21,
+	.pxlcode	= V4L2_MBUS_FMT_YUYV8_2X8, /* YUV sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
+	{
+	.name	   = "NV12BAYER",
+	.depth	  = 8,
+	.bitsperpxl = 8,
+	.fourcc	 = V4L2_PIX_FMT_NV12,
+	.pxlcode	= V4L2_MBUS_FMT_SBGGR10_1X10, /* Bayer sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
+	{
+	.name	   = "NV21BAYER",
+	.depth	  = 8,
+	.bitsperpxl = 8,
+	.fourcc	 = V4L2_PIX_FMT_NV21,
+	.pxlcode	= V4L2_MBUS_FMT_SBGGR10_1X10, /* Bayer sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
+	{
+	.name	   = "RAWBAYER",
+	.depth	  = 10,
+	.bitsperpxl = 10,
+	.fourcc	 = V4L2_PIX_FMT_SBGGR10,
+	.pxlcode	= V4L2_MBUS_FMT_SBGGR10_1X10, /* Bayer sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
+
+};
+/* master controller instance counter
+static atomic_t mctl_instance = ATOMIC_INIT(0);
+*/
+
+static int buffer_size(int width, int height, int pixelformat)
+{
+	int size;
+
+	switch (pixelformat) {
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV12:
+		size = width * height * 3/2;
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+		size = width * height;
+		break;
+	default:
+		pr_err("%s: pixelformat %d not supported.\n",
+			__func__, pixelformat);
+		size = -EINVAL;
+	}
+
+	return size;
+}
+/*
+ *  Videobuf operations
+ */
+
+static void free_buffer(struct videobuf_queue *vq,
+			struct msm_frame_buffer *buf)
+{
+	struct videobuf_buffer *vb = &buf->vidbuf;
+
+	BUG_ON(in_interrupt());
+
+	D("%s (vb=0x%p) 0x%08lx %d\n", __func__,
+			vb, vb->baddr, vb->bsize);
+
+	/* This waits until this buffer is out of danger, i.e.,
+	 * until it is no longer in STATE_QUEUED or STATE_ACTIVE */
+	videobuf_waiton(vq, vb, 0, 0);
+	videobuf_pmem_contig_free(vq, vb);
+	vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/* Setup # of buffers and size of each buffer for the videobuf_queue.
+   This is called when videobuf_reqbufs() is called, so this function
+   should tell how many buffer should be used and how big the size is.
+
+   The caller will allocate the real buffers, either in user space or
+   in kernel */
+static int msm_vidbuf_setup(struct videobuf_queue *vq, unsigned int *count,
+							unsigned int *size)
+{
+	/* get the video device */
+	struct msm_cam_v4l2_dev_inst *pcam_inst = vq->priv_data;
+	struct msm_cam_v4l2_device *pcam = NULL;
+
+	pcam = pcam_inst->pcam;
+
+	D("%s\n", __func__);
+	if (!pcam || !count || !size) {
+		pr_err("%s error : invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	D("%s, inst=0x%x,idx=%d, width = %d\n", __func__,
+		(u32)pcam_inst, pcam_inst->my_index,
+		pcam_inst->vid_fmt.fmt.pix.width);
+	D("%s, inst=0x%x,idx=%d, height = %d\n", __func__,
+		(u32)pcam_inst, pcam_inst->my_index,
+		pcam_inst->vid_fmt.fmt.pix.height);
+	*size = buffer_size(pcam_inst->vid_fmt.fmt.pix.width,
+				pcam_inst->vid_fmt.fmt.pix.height,
+				pcam_inst->vid_fmt.fmt.pix.pixelformat);
+	D("%s:inst=0x%x,idx=%d,count=%d, size=%d\n", __func__,
+		(u32)pcam_inst, pcam_inst->my_index, *count, *size);
+	return 0;
+}
+
+/* Prepare the buffer before it is put into the videobuf_queue for streaming.
+   This is called when videobuf_qbuf() is called, so this function should
+   setup the video buffer to receieve the VFE output. */
+static int msm_vidbuf_prepare(struct videobuf_queue *vq,
+	struct videobuf_buffer *vb, enum v4l2_field field)
+{
+	int rc = 0;
+	/*struct msm_cam_v4l2_device *pcam = vq->priv_data;*/
+	struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
+	struct msm_cam_v4l2_device *pcam = NULL;
+	struct msm_frame_buffer *buf = NULL;
+
+	D("%s\n", __func__);
+	if (!vb || !vq) {
+		pr_err("%s error : input is NULL\n", __func__);
+		return -EINVAL;
+	}
+	pcam_inst = vq->priv_data;
+	pcam = pcam_inst->pcam;
+	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
+
+	if (!pcam || !buf) {
+		pr_err("%s error : pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	D("%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	/* by this time vid_fmt should be already set */
+	/* return error if it is not */
+	if ((pcam_inst->vid_fmt.fmt.pix.width == 0) ||
+		(pcam_inst->vid_fmt.fmt.pix.height == 0)) {
+		pr_err("%s error : pcam vid_fmt is not set\n", __func__);
+		return -EINVAL;
+	}
+
+	buf->inuse = 1;
+
+	D("buf->pxlcode=%d, pcam->sensor_pxlcode=%d, vb->width=%d,"
+		"pcam->vid_fmt.fmt.pix.width = %d, vb->height = %d,"
+		"pcam->vid_fmt.fmt.pix.height=%d, vb->field=%d, field=%d\n",
+		buf->pxlcode, pcam_inst->sensor_pxlcode, vb->width,
+		pcam_inst->vid_fmt.fmt.pix.width, vb->height,
+		pcam_inst->vid_fmt.fmt.pix.height, vb->field, field);
+
+	if (buf->pxlcode != pcam_inst->sensor_pxlcode ||
+		vb->width   != pcam_inst->vid_fmt.fmt.pix.width ||
+		vb->height	!= pcam_inst->vid_fmt.fmt.pix.height ||
+		vb->field   != field) {
+		buf->pxlcode  = pcam_inst->sensor_pxlcode;
+		vb->width = pcam_inst->vid_fmt.fmt.pix.width;
+		vb->height  = pcam_inst->vid_fmt.fmt.pix.height;
+		vb->field = field;
+		vb->state = VIDEOBUF_NEEDS_INIT;
+		D("VIDEOBUF_NEEDS_INIT\n");
+	}
+
+	vb->size = buffer_size(pcam_inst->vid_fmt.fmt.pix.width, vb->height,
+				pcam_inst->vid_fmt.fmt.pix.pixelformat);
+
+	D("vb->size=%lu, vb->bsize=%u, vb->baddr=0x%x\n",
+		vb->size, vb->bsize, (uint32_t)vb->baddr);
+
+	if (0 != vb->baddr && vb->bsize < vb->size) {
+		pr_err("Something wrong vb->size=%lu, vb->bsize=%u,\
+					vb->baddr=0x%x\n",
+					vb->size, vb->bsize,
+					(uint32_t)vb->baddr);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (vb->state == VIDEOBUF_NEEDS_INIT) {
+		rc = videobuf_iolock(vq, vb, NULL);
+		if (rc)
+			goto fail;
+		D("%s: setting buffer state to prepared\n", __func__);
+		vb->state = VIDEOBUF_PREPARED;
+	}
+
+	buf->inuse = 0;
+
+	/* finally if everything is oK, set the VIDEOBUF_PREPARED state*/
+	if (0 == rc)
+		vb->state = VIDEOBUF_PREPARED;
+	return rc;
+
+fail:
+	free_buffer(vq, buf);
+
+out:
+	buf->inuse = 0;
+	return rc;
+}
+
+/* Called under spin_lock_irqsave(q->irqlock, flags) in videobuf-core.c*/
+static void msm_vidbuf_queue(struct videobuf_queue *vq,
+				struct videobuf_buffer *vb)
+{
+	/*struct msm_cam_v4l2_device *pcam = vq->priv_data;*/
+	struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
+	struct msm_cam_v4l2_device *pcam = NULL;
+	unsigned long phyaddr = 0;
+	int rc;
+
+	D("%s\n", __func__);
+	if (!vb || !vq) {
+		pr_err("%s error : input is NULL\n", __func__);
+		return ;
+	}
+	pcam_inst = vq->priv_data;
+	pcam = pcam_inst->pcam;
+	if (!pcam) {
+		pr_err("%s error : pcam is NULL\n", __func__);
+		return;
+	}
+	D("%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize);
+
+
+	vb->state = VIDEOBUF_QUEUED;
+	if (vq->streaming) {
+		struct msm_frame frame;
+		struct msm_vfe_cfg_cmd cfgcmd;
+		/* we are returning a buffer to the queue */
+		struct videobuf_contig_pmem *mem = vb->priv;
+		/* get the physcial address of the buffer */
+		phyaddr = (unsigned long) videobuf_to_pmem_contig(vb);
+
+		D("%s buffer type is %d\n", __func__, mem->buffer_type);
+		frame.path = pcam_inst->path;
+		frame.buffer = 0;
+		frame.y_off = mem->y_off;
+		frame.cbcr_off = mem->cbcr_off;
+
+		/* now release frame to vfe */
+		cfgcmd.cmd_type = CMD_FRAME_BUF_RELEASE;
+		cfgcmd.value    = (void *)&frame;
+		/* yyan: later change to mctl APIs*/
+		rc = msm_isp_subdev_ioctl(&pcam->mctl.isp_sdev->sd,
+			&cfgcmd, &phyaddr);
+	}
+}
+
+/* This will be called when streamingoff is called. */
+static void msm_vidbuf_release(struct videobuf_queue *vq,
+				struct videobuf_buffer *vb)
+{
+	struct msm_cam_v4l2_dev_inst *pcam_inst = vq->priv_data;
+	struct msm_cam_v4l2_device *pcam = pcam_inst->pcam;
+	struct msm_frame_buffer *buf = container_of(vb, struct msm_frame_buffer,
+									vidbuf);
+
+	D("%s\n", __func__);
+	if (!pcam || !vb || !vq) {
+		pr_err("%s error : input is NULL\n", __func__);
+		return ;
+	}
+#ifdef DEBUG
+	D("%s (vb=0x%p) 0x%08lx %d\n", __func__,
+		vb, vb->baddr, vb->bsize);
+
+	switch (vb->state) {
+	case VIDEOBUF_ACTIVE:
+		D("%s (active)\n", __func__);
+		break;
+	case VIDEOBUF_QUEUED:
+		D("%s (queued)\n", __func__);
+		break;
+	case VIDEOBUF_PREPARED:
+		D("%s (prepared)\n", __func__);
+		break;
+	default:
+		D("%s (unknown) state = %d\n", __func__, vb->state);
+		break;
+	}
+#endif
+
+	/* free the buffer */
+	free_buffer(vq, buf);
+}
+
+
+static struct videobuf_queue_ops msm_vidbuf_ops = {
+	.buf_setup  = msm_vidbuf_setup,
+	.buf_prepare  = msm_vidbuf_prepare,
+	.buf_queue  = msm_vidbuf_queue,
+	.buf_release  = msm_vidbuf_release,
+};
+
+
+
+/* prepare a video buffer queue for a vl42 device*/
+static int msm_vidbuf_init(struct msm_cam_v4l2_dev_inst *pcam_inst,
+						   struct videobuf_queue *q)
+{
+	int rc = 0;
+	struct resource *res;
+	struct platform_device *pdev = NULL;
+	struct msm_cam_v4l2_device *pcam = pcam_inst->pcam;
+	D("%s\n", __func__);
+	if (!pcam || !q) {
+		pr_err("%s error : input is NULL\n", __func__);
+		return -EINVAL;
+	} else
+		pdev = pcam->mctl.sync.pdev;
+
+	if (!pdev) {
+		pr_err("%s error : pdev is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (pcam->use_count == 1) {
+		/* first check if we have resources */
+		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+		if (res) {
+			D("res->start = 0x%x\n", (u32)res->start);
+			D("res->size = 0x%x\n", (u32)resource_size(res));
+			D("res->end = 0x%x\n", (u32)res->end);
+			rc = dma_declare_coherent_memory(&pdev->dev, res->start,
+				res->start,
+				resource_size(res),
+				DMA_MEMORY_MAP |
+				DMA_MEMORY_EXCLUSIVE);
+			if (!rc) {
+				pr_err("%s: Unable to declare coherent memory.\n",
+				 __func__);
+				rc = -ENXIO;
+				return rc;
+			}
+
+			/*pcam->memsize = resource_size(res);*/
+			D("%s: found DMA capable resource\n", __func__);
+		} else {
+			pr_err("%s: no DMA capable resource\n", __func__);
+			return -ENOMEM;
+		}
+	}
+	spin_lock_init(&pcam_inst->vb_irqlock);
+
+	videobuf_queue_pmem_contig_init(q, &msm_vidbuf_ops, &pdev->dev,
+		&pcam_inst->vb_irqlock,
+		V4L2_BUF_TYPE_VIDEO_CAPTURE,
+		V4L2_FIELD_NONE,
+		sizeof(struct msm_frame_buffer), pcam_inst, NULL);
+
+
+	return 0;
+}
+
+/*
+ *  V4l2 subdevice operations
+ */
+static	int mctl_subdev_log_status(struct v4l2_subdev *sd)
+{
+	return -EINVAL;
+}
+
+static long mctl_subdev_ioctl(struct v4l2_subdev *sd,
+				 unsigned int cmd, void *arg)
+{
+	struct msm_cam_media_controller *pmctl = NULL;
+	if (!sd) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	} else
+		pmctl = (struct msm_cam_media_controller *)
+		v4l2_get_subdevdata(sd);
+
+
+	return -EINVAL;
+}
+
+
+static int mctl_subdev_g_mbus_fmt(struct v4l2_subdev *sd,
+					 struct v4l2_mbus_framefmt *mf)
+{
+	return -EINVAL;
+}
+
+static struct v4l2_subdev_core_ops mctl_subdev_core_ops = {
+	.log_status = mctl_subdev_log_status,
+	.ioctl = mctl_subdev_ioctl,
+};
+
+static struct v4l2_subdev_video_ops mctl_subdev_video_ops = {
+	.g_mbus_fmt = mctl_subdev_g_mbus_fmt,
+};
+
+static struct v4l2_subdev_ops mctl_subdev_ops = {
+	.core = &mctl_subdev_core_ops,
+	.video  = &mctl_subdev_video_ops,
+};
+
+static int msm_get_sensor_info(struct msm_sync *sync, void __user *arg)
+{
+	int rc = 0;
+	struct msm_camsensor_info info;
+	struct msm_camera_sensor_info *sdata;
+
+	if (copy_from_user(&info,
+			arg,
+			sizeof(struct msm_camsensor_info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	sdata = sync->pdev->dev.platform_data;
+	D("%s: sensor_name %s\n", __func__, sdata->sensor_name);
+
+	memcpy(&info.name[0], sdata->sensor_name, MAX_SENSOR_NAME);
+	info.flash_enabled = sdata->flash_data->flash_type !=
+					MSM_CAMERA_FLASH_NONE;
+
+	/* copy back to user space */
+	if (copy_to_user((void *)arg,
+				&info,
+				sizeof(struct msm_camsensor_info))) {
+		ERR_COPY_TO_USER();
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+/* called by other subdev to notify any changes*/
+
+static int msm_mctl_notify(struct msm_cam_media_controller *p_mctl,
+			unsigned int notification, void *arg)
+{
+	int rc = -EINVAL;
+	struct msm_ispif_params ispif_params;
+	struct msm_camera_sensor_info *sinfo =
+			p_mctl->plat_dev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+	uint8_t csid_core = camdev->csid_core;
+	switch (notification) {
+	case NOTIFY_CID_CHANGE:
+		/* reconfig the ISPIF*/
+		if (p_mctl->ispif_fns->ispif_config) {
+			ispif_params.intftype = PIX0;
+			ispif_params.cid_mask = 0x0001;
+			ispif_params.csid = csid_core;
+
+			rc = p_mctl->ispif_fns->ispif_config(&ispif_params, 1);
+			if (rc < 0)
+				return rc;
+			rc = p_mctl->ispif_fns->ispif_start_intf_transfer
+					(&ispif_params);
+			if (rc < 0)
+				return rc;
+			msleep(20);
+		}
+		break;
+	case NOTIFY_VFE_MSG_EVT:
+		if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_notify) {
+			rc = p_mctl->isp_sdev->isp_notify(
+				&p_mctl->isp_sdev->sd, arg);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/* called by the server or the config nodes to handle user space
+  commands*/
+static int msm_mctl_cmd(struct msm_cam_media_controller *p_mctl,
+			unsigned int cmd, unsigned long arg)
+{
+	int rc = -EINVAL;
+	void __user *argp = (void __user *)arg;
+	if (!p_mctl) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	}
+	D("%s start cmd = %d\n", __func__, _IOC_NR(cmd));
+
+	/* ... call sensor, ISPIF or VEF subdev*/
+	switch (cmd) {
+		/* sensor config*/
+	case MSM_CAM_IOCTL_GET_SENSOR_INFO:
+			rc = msm_get_sensor_info(&p_mctl->sync, argp);
+			break;
+
+	case MSM_CAM_IOCTL_SENSOR_IO_CFG:
+			rc = p_mctl->sync.sctrl.s_config(argp);
+			break;
+
+	case MSM_CAM_IOCTL_FLASH_CTRL: {
+		struct flash_ctrl_data flash_info;
+		if (copy_from_user(&flash_info, argp, sizeof(flash_info))) {
+			ERR_COPY_FROM_USER();
+			rc = -EFAULT;
+		} else {
+			rc = msm_flash_ctrl(p_mctl->sync.sdata, &flash_info);
+		}
+		break;
+	}
+
+			/* ISFIF config*/
+
+	default:
+		/* ISP config*/
+		rc = p_mctl->isp_sdev->isp_config(p_mctl, cmd, arg);
+		break;
+	}
+	D("%s: !!! cmd = %d, rc = %d\n", __func__, _IOC_NR(cmd), rc);
+	return rc;
+}
+
+static int msm_sync_init(struct msm_sync *sync,
+	struct platform_device *pdev, struct msm_sensor_ctrl *sctrl)
+{
+	int rc = 0;
+	if (!sync) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	}
+
+	sync->sdata = pdev->dev.platform_data;
+
+	wake_lock_init(&sync->wake_lock, WAKE_LOCK_IDLE, "msm_camera");
+
+	sync->pdev = pdev;
+	sync->sctrl = *sctrl;
+	sync->opencnt = 0;
+	mutex_init(&sync->lock);
+	D("%s: initialized %s\n", __func__, sync->sdata->sensor_name);
+	return rc;
+}
+
+static int msm_mctl_open(struct msm_cam_media_controller *p_mctl,
+				 const char *const apps_id)
+{
+	int rc = 0;
+	struct msm_sync *sync = NULL;
+	D("%s\n", __func__);
+	if (!p_mctl) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	}
+
+	/* msm_sync_init() muct be called before*/
+	sync = &(p_mctl->sync);
+
+	mutex_lock(&sync->lock);
+	/* open sub devices - once only*/
+	if (!sync->opencnt) {
+		wake_lock(&sync->wake_lock);
+
+		/* turn on clock */
+		rc = msm_camio_sensor_clk_on(sync->pdev);
+		if (rc < 0) {
+			pr_err("%s: msm_camio_sensor_clk_on failed:%d\n",
+			 __func__, rc);
+			goto msm_open_done;
+		}
+
+		/* ISP first*/
+		if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_open)
+			rc = p_mctl->isp_sdev->isp_open(
+				&p_mctl->isp_sdev->sd, sync);
+
+		if (rc < 0) {
+			pr_err("%s: isp init failed: %d\n", __func__, rc);
+			goto msm_open_done;
+		}
+
+		/* then sensor - move sub dev later*/
+		if (sync->sctrl.s_init)
+			rc = sync->sctrl.s_init(sync->sdata);
+
+		if (rc < 0) {
+			pr_err("%s: isp init failed: %d\n", __func__, rc);
+			goto msm_open_done;
+		}
+
+		pm_qos_add_request(&p_mctl->pm_qos_req_list,
+					PM_QOS_CPU_DMA_LATENCY,
+					PM_QOS_DEFAULT_VALUE);
+		pm_qos_update_request(&p_mctl->pm_qos_req_list,
+					MSM_V4L2_SWFI_LATENCY);
+
+		sync->apps_id = apps_id;
+		sync->opencnt++;
+	}
+
+msm_open_done:
+	mutex_unlock(&sync->lock);
+	return rc;
+}
+
+static int msm_mctl_release(struct msm_cam_media_controller *p_mctl)
+{
+	struct msm_sync *sync = NULL;
+	int rc = 0;
+
+	sync = &(p_mctl->sync);
+
+	if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release)
+		p_mctl->isp_sdev->isp_release(&p_mctl->sync);
+
+	if (p_mctl->sync.sctrl.s_release)
+		p_mctl->sync.sctrl.s_release();
+
+	rc = msm_camio_sensor_clk_off(sync->pdev);
+	if (rc < 0)
+		pr_err("%s: msm_camio_sensor_clk_off failed:%d\n",
+			 __func__, rc);
+
+	pm_qos_update_request(&p_mctl->pm_qos_req_list,
+				PM_QOS_DEFAULT_VALUE);
+	pm_qos_remove_request(&p_mctl->pm_qos_req_list);
+
+	return rc;
+}
+
+int msm_mctl_init_user_formats(struct msm_cam_v4l2_device *pcam)
+{
+	struct v4l2_subdev *sd = &pcam->sensor_sdev;
+	enum v4l2_mbus_pixelcode pxlcode;
+	int numfmt_sensor = 0;
+	int numfmt = 0;
+	int rc = 0;
+	int i, j;
+
+	D("%s\n", __func__);
+	while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, numfmt_sensor,
+								&pxlcode))
+		numfmt_sensor++;
+
+	D("%s, numfmt_sensor = %d\n", __func__, numfmt_sensor);
+	if (!numfmt_sensor)
+		return -ENXIO;
+
+	pcam->usr_fmts = vmalloc(numfmt_sensor * ARRAY_SIZE(msm_isp_formats) *
+				sizeof(struct msm_isp_color_fmt));
+	if (!pcam->usr_fmts)
+		return -ENOMEM;
+
+	/* from sensor to ISP.. fill the data structure */
+	for (i = 0; i < numfmt_sensor; i++) {
+		rc = v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &pxlcode);
+		D("rc is  %d\n", rc);
+		if (rc < 0) {
+			vfree(pcam->usr_fmts);
+			return rc;
+		}
+
+		for (j = 0; j < ARRAY_SIZE(msm_isp_formats); j++) {
+			/* find the corresponding format */
+			if (pxlcode == msm_isp_formats[j].pxlcode) {
+				pcam->usr_fmts[numfmt] = msm_isp_formats[j];
+				D("pcam->usr_fmts=0x%x\n", (u32)pcam->usr_fmts);
+				D("format pxlcode 0x%x (0x%x) found\n",
+					  pcam->usr_fmts[numfmt].pxlcode,
+					  pcam->usr_fmts[numfmt].fourcc);
+				numfmt++;
+			}
+		}
+	}
+
+	pcam->num_fmts = numfmt;
+
+	if (numfmt == 0) {
+		pr_err("%s: No supported formats.\n", __func__);
+		vfree(pcam->usr_fmts);
+		return -EINVAL;
+	}
+
+	D("Found %d supported formats.\n", pcam->num_fmts);
+	/* set the default pxlcode, in any case, it will be set through
+	 * setfmt */
+	return 0;
+}
+
+/* this function plug in the implementation of a v4l2_subdev */
+int msm_mctl_init_module(struct msm_cam_v4l2_device *pcam)
+{
+
+	struct msm_cam_media_controller *pmctl = NULL;
+	D("%s\n", __func__);
+	if (!pcam) {
+		pr_err("%s: param is NULL", __func__);
+		return -EINVAL;
+	} else
+		pmctl = &pcam->mctl;
+
+	/* init module sync object*/
+	msm_sync_init(&pmctl->sync, pcam->pdev, &pcam->sctrl);
+
+	/* init module operations*/
+	pmctl->mctl_open = msm_mctl_open;
+	pmctl->mctl_cmd = msm_mctl_cmd;
+	pmctl->mctl_notify = msm_mctl_notify;
+	pmctl->mctl_vidbuf_init = msm_vidbuf_init;
+	pmctl->mctl_release = msm_mctl_release;
+
+	pmctl->plat_dev = pcam->pdev;
+	/* init sub device*/
+	v4l2_subdev_init(&(pmctl->mctl_sdev), &mctl_subdev_ops);
+	v4l2_set_subdevdata(&(pmctl->mctl_sdev), pmctl);
+
+	return 0;
+}
+static int msm_mctl_out_type_to_inst_index(struct msm_cam_v4l2_device *pcam,
+					int out_type)
+{
+	switch (out_type) {
+	case VFE_MSG_OUTPUT_P:
+		return pcam->dev_inst_map
+			[MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW]->my_index;
+	case VFE_MSG_OUTPUT_V:
+		return pcam->dev_inst_map
+			[MSM_V4L2_EXT_CAPTURE_MODE_VIDEO]->my_index;
+	case VFE_MSG_OUTPUT_S:
+		return pcam->dev_inst_map
+			[MSM_V4L2_EXT_CAPTURE_MODE_MAIN]->my_index;
+	case VFE_MSG_OUTPUT_T:
+		return pcam->dev_inst_map
+			[MSM_V4L2_EXT_CAPTURE_MODE_THUMBNAIL]->my_index;
+	default:
+		return 0;
+	}
+	return 0;
+}
+
+int msm_mctl_buf_done(struct msm_cam_media_controller *pmctl,
+			int msg_type, uint32_t y_phy)
+{
+	struct videobuf_queue *q;
+	struct videobuf_buffer *buf = NULL;
+	uint32_t buf_phyaddr = 0;
+	int i, idx;
+	unsigned long flags = 0;
+
+	idx = msm_mctl_out_type_to_inst_index(pmctl->sync.pcam_sync, msg_type);
+	q = &(pmctl->sync.pcam_sync->dev_inst[idx]->vid_bufq);
+
+	D("q=0x%x\n", (u32)q);
+
+	/* find the videobuf which is done */
+	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+		if (NULL == q->bufs[i])
+			continue;
+		buf = q->bufs[i];
+		buf_phyaddr = videobuf_to_pmem_contig(buf);
+		D("buf_phyaddr=0x%x\n", (u32)buf_phyaddr);
+		D("data->phy.y_phy=0x%x\n",
+				y_phy);
+		D("buf = 0x%x\n", (u32)buf);
+		if (buf_phyaddr == y_phy) {
+			/* signal that buffer is done */
+			/* get the buf lock first */
+			spin_lock_irqsave(q->irqlock, flags);
+			buf->state = VIDEOBUF_DONE;
+			D("queuedequeue video_buffer 0x%x,"
+				"phyaddr = 0x%x\n",
+				(u32)buf, y_phy);
+
+			do_gettimeofday(&buf->ts);
+			buf->field_count++;
+			wake_up(&buf->done);
+			spin_unlock_irqrestore(q->irqlock, flags);
+			break;
+		}
+	}
+	return 0;
+}
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
new file mode 100644
index 0000000..5232f7c
--- /dev/null
+++ b/drivers/media/video/msm/msm_mem.c
@@ -0,0 +1,403 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+
+#include <linux/android_pmem.h>
+
+#include "msm.h"
+#include "msm_vfe31.h"
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("msm_isp: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+
+#define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
+				__func__, __LINE__, ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+#define ERR_COPY_TO_USER() ERR_USER_COPY(1)
+
+
+#define PAD_TO_WORD(a)	  (((a) + 3) & ~3)
+
+#define __CONTAINS(r, v, l, field) ({			   \
+	typeof(r) __r = r;				  \
+	typeof(v) __v = v;				  \
+	typeof(v) __e = __v + l;				\
+	int res = __v >= __r->field &&			  \
+		__e <= __r->field + __r->len;		   \
+	res;							\
+})
+
+#define CONTAINS(r1, r2, field) ({			  \
+	typeof(r2) __r2 = r2;				   \
+	__CONTAINS(r1, __r2->field, __r2->len, field);	  \
+})
+
+#define IN_RANGE(r, v, field) ({				\
+	typeof(r) __r = r;				  \
+	typeof(v) __vv = v;				 \
+	int res = ((__vv >= __r->field) &&		  \
+		(__vv < (__r->field + __r->len)));	  \
+	res;							\
+})
+
+#define OVERLAPS(r1, r2, field) ({			  \
+	typeof(r1) __r1 = r1;				   \
+	typeof(r2) __r2 = r2;				   \
+	typeof(__r2->field) __v = __r2->field;		  \
+	typeof(__v) __e = __v + __r2->len - 1;		  \
+	int res = (IN_RANGE(__r1, __v, field) ||		\
+		IN_RANGE(__r1, __e, field));				 \
+	res;							\
+})
+
+static DEFINE_MUTEX(hlist_mut);
+
+#ifdef CONFIG_ANDROID_PMEM
+static int check_pmem_info(struct msm_pmem_info *info, int len)
+{
+	if (info->offset < len &&
+		info->offset + info->len <= len &&
+		info->y_off < len &&
+		info->cbcr_off < len)
+		return 0;
+
+	pr_err("%s: check failed: off %d len %d y %d cbcr %d (total len %d)\n",
+						__func__,
+						info->offset,
+						info->len,
+						info->y_off,
+						info->cbcr_off,
+						len);
+	return -EINVAL;
+}
+#endif
+
+static int check_overlap(struct hlist_head *ptype,
+				unsigned long paddr,
+				unsigned long len)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region t = { .paddr = paddr, .len = len };
+	struct hlist_node *node;
+
+	hlist_for_each_entry(region, node, ptype, list) {
+		if (CONTAINS(region, &t, paddr) ||
+			CONTAINS(&t, region, paddr) ||
+			OVERLAPS(region, &t, paddr)) {
+			CDBG(" region (PHYS %p len %ld)"
+				" clashes with registered region"
+				" (paddr %p len %ld)\n",
+				(void *)t.paddr, t.len,
+				(void *)region->paddr, region->len);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int msm_pmem_table_add(struct hlist_head *ptype,
+	struct msm_pmem_info *info)
+{
+	struct file *file;
+	unsigned long paddr;
+#ifdef CONFIG_ANDROID_PMEM
+	unsigned long kvstart;
+	int rc;
+#endif
+	unsigned long len;
+	struct msm_pmem_region *region;
+#ifdef CONFIG_ANDROID_PMEM
+	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
+	if (rc < 0) {
+		pr_err("%s: get_pmem_file fd %d error %d\n",
+						__func__,
+						info->fd, rc);
+		return rc;
+	}
+	if (!info->len)
+		info->len = len;
+
+	rc = check_pmem_info(info, len);
+	if (rc < 0)
+		return rc;
+#else
+	paddr = 0;
+	file = NULL;
+#endif
+	paddr += info->offset;
+	len = info->len;
+
+	if (check_overlap(ptype, paddr, len) < 0)
+		return -EINVAL;
+
+	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
+		__func__, info->type, info->active, paddr,
+		(unsigned long)info->vaddr);
+
+	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+
+	INIT_HLIST_NODE(&region->list);
+
+	region->paddr = paddr;
+	region->len = len;
+	region->file = file;
+	memcpy(&region->info, info, sizeof(region->info));
+	D("%s Adding region to list with type %d\n", __func__,
+						region->info.type);
+	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
+	hlist_add_head(&(region->list), ptype);
+
+	return 0;
+}
+
+static int __msm_register_pmem(struct hlist_head *ptype,
+			struct msm_pmem_info *pinfo)
+{
+	int rc = 0;
+
+	switch (pinfo->type) {
+	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_AF:
+	case MSM_PMEM_AEC:
+	case MSM_PMEM_AWB:
+	case MSM_PMEM_RS:
+	case MSM_PMEM_CS:
+	case MSM_PMEM_IHIST:
+	case MSM_PMEM_SKIN:
+		rc = msm_pmem_table_add(ptype, pinfo);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int __msm_pmem_table_del(struct hlist_head *ptype,
+			struct msm_pmem_info *pinfo)
+{
+	int rc = 0;
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+
+	switch (pinfo->type) {
+	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_AF:
+		hlist_for_each_entry_safe(region, node, n,
+				ptype, list) {
+
+			if (pinfo->type == region->info.type &&
+				pinfo->vaddr == region->info.vaddr &&
+				pinfo->fd == region->info.fd) {
+				hlist_del(node);
+#ifdef CONFIG_ANDROID_PMEM
+				put_pmem_file(region->file);
+#else
+
+#endif
+				kfree(region);
+			}
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/* return of 0 means failure */
+uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
+	int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region *regptr;
+	struct hlist_node *node, *n;
+
+	uint8_t rc = 0;
+	D("%s\n", __func__);
+	regptr = reg;
+	mutex_lock(&hlist_mut);
+	hlist_for_each_entry_safe(region, node, n, ptype, list) {
+		if (region->info.type == pmem_type && region->info.active) {
+			*regptr = *region;
+			rc += 1;
+			if (rc >= maxcount)
+				break;
+			regptr++;
+		}
+	}
+	D("%s finished, rc=%d\n", __func__, rc);
+	mutex_unlock(&hlist_mut);
+	return rc;
+}
+
+uint8_t msm_pmem_region_lookup_2(struct hlist_head *ptype,
+					int pmem_type,
+					struct msm_pmem_region *reg,
+					uint8_t maxcount)
+{
+	struct msm_pmem_region *region;
+	struct msm_pmem_region *regptr;
+	struct hlist_node *node, *n;
+	uint8_t rc = 0;
+	regptr = reg;
+	mutex_lock(&hlist_mut);
+	hlist_for_each_entry_safe(region, node, n, ptype, list) {
+		D("Mio: info.type=%d, pmem_type = %d,"
+						"info.active = %d\n",
+		region->info.type, pmem_type, region->info.active);
+
+		if (region->info.type == pmem_type && region->info.active) {
+			D("info.type=%d, pmem_type = %d,"
+							"info.active = %d,\n",
+				region->info.type, pmem_type,
+				region->info.active);
+			*regptr = *region;
+			region->info.type = MSM_PMEM_VIDEO;
+			rc += 1;
+			if (rc >= maxcount)
+				break;
+			regptr++;
+		}
+	}
+	mutex_unlock(&hlist_mut);
+	return rc;
+}
+
+uint8_t msm_pmem_region_lookup_3(struct msm_cam_v4l2_device *pcam, int idx,
+						struct msm_pmem_region *reg,
+						int mem_type)
+{
+	struct videobuf_contig_pmem *mem;
+	uint8_t rc = 0;
+	struct videobuf_queue *q = &pcam->dev_inst[idx]->vid_bufq;
+	struct videobuf_buffer *buf = NULL;
+
+	videobuf_queue_lock(q);
+	list_for_each_entry(buf, &q->stream, stream) {
+		mem = buf->priv;
+		reg->paddr = mem->phyaddr;
+		D("%s paddr for buf %d is 0x%p\n", __func__,
+						buf->i,
+						(void *)reg->paddr);
+		reg->len = sizeof(struct msm_pmem_info);
+		reg->file = NULL;
+		reg->info.len = mem->size;
+
+		reg->info.vaddr =
+			(void *)(buf->baddr);
+
+		reg->info.type = mem_type;
+
+		reg->info.offset = 0;
+		reg->info.y_off = mem->y_off;
+		reg->info.cbcr_off = PAD_TO_WORD(mem->cbcr_off);
+		D("%s y_off = %d, cbcr_off = %d\n", __func__,
+			reg->info.y_off, reg->info.cbcr_off);
+		rc += 1;
+		reg++;
+	}
+	videobuf_queue_unlock(q);
+
+	return rc;
+}
+
+unsigned long msm_pmem_stats_vtop_lookup(
+				struct msm_sync *sync,
+				unsigned long buffer,
+				int fd)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		if (((unsigned long)(region->info.vaddr) == buffer) &&
+						(region->info.fd == fd) &&
+						region->info.active == 0) {
+			region->info.active = 1;
+			return region->paddr;
+		}
+	}
+
+	return 0;
+}
+
+unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync,
+						unsigned long addr, int *fd)
+{
+	struct msm_pmem_region *region;
+	struct hlist_node *node, *n;
+
+	hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) {
+		if (addr == region->paddr && region->info.active) {
+			/* offset since we could pass vaddr inside a
+			 * registered pmem buffer */
+			*fd = region->info.fd;
+			region->info.active = 0;
+			return (unsigned long)(region->info.vaddr);
+		}
+	}
+
+	return 0;
+}
+
+int msm_register_pmem(struct hlist_head *ptype, void __user *arg)
+{
+	struct msm_pmem_info info;
+
+	if (copy_from_user(&info, arg, sizeof(info))) {
+		ERR_COPY_FROM_USER();
+			return -EFAULT;
+	}
+
+	return __msm_register_pmem(ptype, &info);
+}
+EXPORT_SYMBOL(msm_register_pmem);
+
+int msm_pmem_table_del(struct hlist_head *ptype, void __user *arg)
+{
+	struct msm_pmem_info info;
+
+	if (copy_from_user(&info, arg, sizeof(info))) {
+		ERR_COPY_FROM_USER();
+		return -EFAULT;
+	}
+
+	return __msm_pmem_table_del(ptype, &info);
+}
+EXPORT_SYMBOL(msm_pmem_table_del);
diff --git a/drivers/media/video/msm/msm_sensor.c b/drivers/media/video/msm/msm_sensor.c
new file mode 100644
index 0000000..83fe643
--- /dev/null
+++ b/drivers/media/video/msm/msm_sensor.c
@@ -0,0 +1,814 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <media/msm_camera.h>
+#include "msm_sensor.h"
+
+/*=============================================================*/
+
+int32_t msm_sensor_i2c_rxdata(struct msm_sensor_ctrl_t *s_ctrl,
+	unsigned char *rxdata, int length)
+{
+	uint16_t saddr = s_ctrl->msm_sensor_client->addr >> 1;
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = length,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(s_ctrl->msm_sensor_client->adapter, msgs, 2) < 0) {
+		CDBG("msm_sensor_i2c_rxdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+int32_t msm_sensor_i2c_txdata(struct msm_sensor_ctrl_t *s_ctrl,
+				unsigned char *txdata, int length)
+{
+	uint16_t saddr = s_ctrl->msm_sensor_client->addr >> 1;
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(s_ctrl->msm_sensor_client->adapter, msg, 1) < 0) {
+		CDBG("msm_sensor_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+int32_t msm_sensor_i2c_waddr_write_b(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("%s waddr = 0x%x, wdata = 0x%x\n", __func__, waddr, bdata);
+	rc = msm_sensor_i2c_txdata(s_ctrl, buf, 3);
+	if (rc < 0)
+		CDBG("%s fail\n", __func__);
+	return rc;
+}
+
+int32_t msm_sensor_i2c_waddr_write_w(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("%s waddr = 0x%x, wdata = 0x%x\n", __func__, waddr, wdata);
+	rc = msm_sensor_i2c_txdata(s_ctrl, buf, 4);
+	if (rc < 0)
+		CDBG("%s fail\n", __func__);
+	return rc;
+}
+
+int32_t msm_sensor_i2c_waddr_write_b_tbl(struct msm_sensor_ctrl_t *s_ctrl,
+	struct msm_sensor_i2c_reg_conf const *reg_conf_tbl, uint8_t size)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < size; i++) {
+		rc = msm_sensor_i2c_waddr_write_b(
+			s_ctrl,
+			reg_conf_tbl->reg_addr,
+			reg_conf_tbl->reg_data);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+int32_t msm_sensor_i2c_waddr_write_w_tbl(struct msm_sensor_ctrl_t *s_ctrl,
+	struct msm_sensor_i2c_reg_conf const *reg_conf_tbl, uint8_t size)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < size; i++) {
+		rc = msm_sensor_i2c_waddr_write_w(
+			s_ctrl,
+			reg_conf_tbl->reg_addr,
+			reg_conf_tbl->reg_data);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+int32_t msm_sensor_i2c_waddr_read_w(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint16_t *data)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!data)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	rc = msm_sensor_i2c_rxdata(s_ctrl, buf, 2);
+	if (rc < 0) {
+		CDBG("%s fail\n", __func__);
+		return rc;
+	}
+	*data = (buf[0] << 8 | buf[1]);
+	CDBG("%s waddr = 0x%x val = 0x%x!\n", __func__,
+	waddr, *data);
+	return rc;
+}
+
+int msm_sensor_write_b_conf_array(struct msm_sensor_ctrl_t *s_ctrl,
+			struct msm_sensor_i2c_conf_array *array, uint16_t index)
+{
+	return msm_sensor_i2c_waddr_write_b_tbl(
+		s_ctrl, array[index].conf, array[index].size);
+}
+
+int msm_sensor_write_w_conf_array(struct msm_sensor_ctrl_t *s_ctrl,
+			struct msm_sensor_i2c_conf_array *array, uint16_t index)
+{
+	return msm_sensor_i2c_waddr_write_w_tbl(
+		s_ctrl, array[index].conf, array[index].size);
+}
+
+int msm_sensor_write_b_init_settings(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0, i;
+	for (i = 0; i < s_ctrl->msm_sensor_reg.init_size; i++) {
+		rc = msm_sensor_write_b_conf_array(
+			s_ctrl, s_ctrl->msm_sensor_reg.init_settings, i);
+		msleep(s_ctrl->msm_sensor_reg.init_settings[i].delay);
+		if (rc < 0)
+			break;
+	}
+	return rc;
+}
+
+int msm_sensor_write_w_init_settings(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0, i;
+	for (i = 0; i < s_ctrl->msm_sensor_reg.init_size; i++) {
+		rc = msm_sensor_write_w_conf_array(
+			s_ctrl, s_ctrl->msm_sensor_reg.init_settings, i);
+		msleep(s_ctrl->msm_sensor_reg.init_settings[i].delay);
+		if (rc < 0)
+			break;
+	}
+	return rc;
+}
+
+int msm_sensor_write_b_res_settings(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t res)
+{
+	int rc = 0;
+	rc = msm_sensor_write_b_conf_array(
+		s_ctrl, s_ctrl->msm_sensor_reg.res_settings, res);
+	msleep(s_ctrl->msm_sensor_reg.res_settings[res].delay);
+	return rc;
+}
+
+int msm_sensor_write_w_res_settings(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t res)
+{
+	int rc = 0;
+	rc = msm_sensor_write_w_conf_array(
+		s_ctrl, s_ctrl->msm_sensor_reg.res_settings, res);
+	msleep(s_ctrl->msm_sensor_reg.res_settings[res].delay);
+	return rc;
+}
+
+uint16_t msm_sensor_read_b_conf_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			enum msm_sensor_resolution_t res, int8_t array_addr)
+{
+	return
+	s_ctrl->msm_sensor_reg.res_settings[res].
+	conf[array_addr].reg_data << 8 |
+	s_ctrl->msm_sensor_reg.res_settings[res].
+	conf[array_addr+1].reg_data;
+}
+
+uint16_t msm_sensor_read_w_conf_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			enum msm_sensor_resolution_t res, int8_t array_addr)
+{
+	return
+	s_ctrl->msm_sensor_reg.res_settings[res].
+	conf[array_addr].reg_data;
+}
+
+void msm_sensor_start_stream(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	msm_sensor_i2c_waddr_write_b_tbl(s_ctrl,
+		s_ctrl->msm_sensor_reg.start_stream_conf,
+		s_ctrl->msm_sensor_reg.start_stream_conf_size);
+}
+
+void msm_sensor_stop_stream(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	msm_sensor_i2c_waddr_write_b_tbl(s_ctrl,
+		s_ctrl->msm_sensor_reg.stop_stream_conf,
+		s_ctrl->msm_sensor_reg.stop_stream_conf_size);
+}
+
+void msm_sensor_group_hold_on(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	msm_sensor_i2c_waddr_write_b_tbl(s_ctrl,
+		s_ctrl->msm_sensor_reg.group_hold_on_conf,
+		s_ctrl->msm_sensor_reg.group_hold_on_conf_size);
+}
+
+void msm_sensor_group_hold_off(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	msm_sensor_i2c_waddr_write_b_tbl(s_ctrl,
+		s_ctrl->msm_sensor_reg.group_hold_off_conf,
+		s_ctrl->msm_sensor_reg.group_hold_off_conf_size);
+}
+
+uint16_t msm_sensor_get_prev_lines_pf(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	return s_ctrl->prev_frame_length_lines;
+}
+
+uint16_t msm_sensor_get_prev_pixels_pl(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	return s_ctrl->prev_line_length_pck;
+}
+
+uint16_t msm_sensor_get_pict_lines_pf(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	return s_ctrl->snap_frame_length_lines;
+}
+
+uint16_t msm_sensor_get_pict_pixels_pl(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	return s_ctrl->snap_line_length_pck;
+}
+
+uint32_t msm_sensor_get_pict_max_exp_lc(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	return s_ctrl->snap_frame_length_lines  * 24;
+}
+
+void msm_sensor_get_pict_fps(struct msm_sensor_ctrl_t *s_ctrl,
+			uint16_t fps, uint16_t *pfps)
+{
+	uint32_t divider, d1, d2;
+	d1 = s_ctrl->prev_frame_length_lines * Q10 /
+		s_ctrl->snap_frame_length_lines;
+	d2 = s_ctrl->prev_line_length_pck * Q10 /
+		s_ctrl->snap_line_length_pck;
+	divider = d1 * d2 / Q10;
+	*pfps = (uint16_t) (fps * divider / Q10);
+}
+
+int32_t msm_sensor_set_fps(struct msm_sensor_ctrl_t *s_ctrl,
+						struct fps_cfg *fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	s_ctrl->fps_divider = fps->fps_div;
+
+	total_lines_per_frame = (uint16_t)
+		((s_ctrl->prev_frame_length_lines) *
+		s_ctrl->fps_divider/Q10);
+
+	rc = msm_sensor_i2c_waddr_write_w(s_ctrl,
+				s_ctrl->frame_length_lines_addr,
+				total_lines_per_frame);
+	return rc;
+}
+
+int32_t msm_sensor_write_exp_gain1(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line)
+{
+	uint32_t fl_lines;
+	uint8_t offset;
+	fl_lines = s_ctrl->curr_frame_length_lines;
+	line = (line * s_ctrl->fps_divider) / Q10;
+	offset = s_ctrl->vert_offset;
+	if (line > (fl_lines - offset))
+		fl_lines = line + offset;
+
+	s_ctrl->func_tbl.sensor_group_hold_on(s_ctrl);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->frame_length_lines_addr, fl_lines);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->coarse_int_time_addr, line);
+	msm_sensor_i2c_waddr_write_w(s_ctrl, s_ctrl->global_gain_addr, gain);
+	s_ctrl->func_tbl.sensor_group_hold_off(s_ctrl);
+	return 0;
+}
+
+int32_t msm_sensor_write_exp_gain2(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line)
+{
+	uint32_t fl_lines, ll_pclk, ll_ratio;
+	uint8_t offset;
+	fl_lines = s_ctrl->curr_frame_length_lines;
+	ll_pclk = s_ctrl->curr_line_length_pck;
+	line = (line * s_ctrl->fps_divider) / Q10;
+	offset = s_ctrl->vert_offset;
+	if (line > (fl_lines - offset)) {
+		ll_ratio = (line * Q10) / (fl_lines - offset);
+		ll_pclk = ll_pclk * ll_ratio;
+		line = fl_lines - offset;
+	}
+
+	s_ctrl->func_tbl.sensor_group_hold_on(s_ctrl);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->line_length_pck_addr, ll_pclk);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->coarse_int_time_addr, line);
+	msm_sensor_i2c_waddr_write_w(s_ctrl, s_ctrl->global_gain_addr, gain);
+	s_ctrl->func_tbl.sensor_group_hold_off(s_ctrl);
+	return 0;
+}
+
+int32_t msm_sensor_set_sensor_mode_b(struct msm_sensor_ctrl_t *s_ctrl,
+	int mode, int res)
+{
+	int32_t rc = 0;
+
+	if (s_ctrl->curr_res != res) {
+		switch (mode) {
+		case SENSOR_PREVIEW_MODE:
+			s_ctrl->prev_res = res;
+			break;
+		case SENSOR_SNAPSHOT_MODE:
+		case SENSOR_RAW_SNAPSHOT_MODE:
+			s_ctrl->pict_res = res;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		s_ctrl->curr_frame_length_lines =
+			msm_sensor_read_b_conf_wdata
+			(s_ctrl, res, s_ctrl->frame_length_lines_array_addr);
+		s_ctrl->curr_line_length_pck =
+			msm_sensor_read_b_conf_wdata
+			(s_ctrl, res, s_ctrl->line_length_pck_array_addr);
+
+		if (s_ctrl->func_tbl.sensor_setting
+			(s_ctrl, MSM_SENSOR_UPDATE_PERIODIC, res) < 0)
+			return rc;
+	}
+	s_ctrl->curr_res = res;
+	return rc;
+}
+
+int32_t msm_sensor_set_sensor_mode_w(struct msm_sensor_ctrl_t *s_ctrl,
+	int mode, int res)
+{
+	int32_t rc = 0;
+
+	if (s_ctrl->curr_res != res) {
+		switch (mode) {
+		case SENSOR_PREVIEW_MODE:
+			s_ctrl->prev_res = res;
+			break;
+		case SENSOR_SNAPSHOT_MODE:
+		case SENSOR_RAW_SNAPSHOT_MODE:
+			s_ctrl->pict_res = res;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		s_ctrl->curr_frame_length_lines =
+			msm_sensor_read_w_conf_wdata
+			(s_ctrl, res, s_ctrl->frame_length_lines_array_addr);
+		s_ctrl->curr_line_length_pck =
+			msm_sensor_read_w_conf_wdata
+			(s_ctrl, res, s_ctrl->line_length_pck_array_addr);
+
+		if (s_ctrl->func_tbl.sensor_setting
+			(s_ctrl, MSM_SENSOR_UPDATE_PERIODIC, res) < 0)
+			return rc;
+	}
+	s_ctrl->curr_res = res;
+	return rc;
+}
+
+int32_t msm_sensor_mode_init_bdata(struct msm_sensor_ctrl_t *s_ctrl,
+			int mode, struct sensor_init_cfg *init_info)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	if (mode != s_ctrl->cam_mode) {
+		if (init_info->prev_res >=
+			s_ctrl->msm_sensor_reg.num_conf ||
+			init_info->pict_res >=
+			s_ctrl->msm_sensor_reg.num_conf) {
+			CDBG("Resolution does not exist");
+			return -EINVAL;
+		}
+
+		s_ctrl->prev_res = init_info->prev_res;
+		s_ctrl->pict_res = init_info->pict_res;
+		s_ctrl->curr_res = MSM_SENSOR_INVALID_RES;
+		s_ctrl->cam_mode = mode;
+
+		s_ctrl->prev_frame_length_lines =
+			msm_sensor_read_b_conf_wdata(s_ctrl,
+				s_ctrl->prev_res,
+				s_ctrl->frame_length_lines_array_addr);
+		s_ctrl->prev_line_length_pck =
+			msm_sensor_read_b_conf_wdata(s_ctrl,
+				s_ctrl->prev_res,
+				s_ctrl->line_length_pck_array_addr);
+
+		s_ctrl->snap_frame_length_lines =
+			msm_sensor_read_b_conf_wdata(s_ctrl,
+				s_ctrl->pict_res,
+				s_ctrl->frame_length_lines_array_addr);
+
+		s_ctrl->snap_line_length_pck =
+			msm_sensor_read_b_conf_wdata(s_ctrl,
+				s_ctrl->pict_res,
+				s_ctrl->line_length_pck_array_addr);
+
+
+		rc = s_ctrl->func_tbl.sensor_setting(s_ctrl,
+			MSM_SENSOR_REG_INIT, s_ctrl->prev_res);
+	}
+	return rc;
+}
+
+int32_t msm_sensor_mode_init_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			int mode, struct sensor_init_cfg *init_info)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	if (mode != s_ctrl->cam_mode) {
+		if (init_info->prev_res >=
+			s_ctrl->msm_sensor_reg.num_conf ||
+			init_info->pict_res >=
+			s_ctrl->msm_sensor_reg.num_conf) {
+			CDBG("Resolution does not exist");
+			return -EINVAL;
+		}
+
+		s_ctrl->prev_res = init_info->prev_res;
+		s_ctrl->pict_res = init_info->pict_res;
+		s_ctrl->curr_res = MSM_SENSOR_INVALID_RES;
+		s_ctrl->cam_mode = mode;
+
+		s_ctrl->prev_frame_length_lines =
+			msm_sensor_read_w_conf_wdata(s_ctrl,
+				s_ctrl->prev_res,
+				s_ctrl->frame_length_lines_array_addr);
+		s_ctrl->prev_line_length_pck =
+			msm_sensor_read_w_conf_wdata(s_ctrl,
+				s_ctrl->prev_res,
+				s_ctrl->line_length_pck_array_addr);
+
+		s_ctrl->snap_frame_length_lines =
+			msm_sensor_read_w_conf_wdata(s_ctrl,
+				s_ctrl->pict_res,
+				s_ctrl->frame_length_lines_array_addr);
+
+		s_ctrl->snap_line_length_pck =
+			msm_sensor_read_w_conf_wdata(s_ctrl,
+				s_ctrl->pict_res,
+				s_ctrl->line_length_pck_array_addr);
+
+
+		rc = s_ctrl->func_tbl.sensor_setting(s_ctrl,
+			MSM_SENSOR_REG_INIT, s_ctrl->prev_res);
+	}
+	return rc;
+}
+
+int32_t msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(s_ctrl->msm_sensor_mutex);
+	CDBG("msm_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+		switch (cdata.cfgtype) {
+		case CFG_GET_PICT_FPS:
+			s_ctrl->func_tbl.
+			sensor_get_pict_fps(
+				s_ctrl,
+				cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_L_PF:
+			cdata.cfg.prevl_pf =
+				s_ctrl->func_tbl.
+				sensor_get_prev_lines_pf
+				(s_ctrl);
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_P_PL:
+			cdata.cfg.prevp_pl =
+				s_ctrl->func_tbl.
+				sensor_get_prev_pixels_pl
+				(s_ctrl);
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_L_PF:
+			cdata.cfg.pictl_pf =
+				s_ctrl->func_tbl.
+				sensor_get_pict_lines_pf
+				(s_ctrl);
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_P_PL:
+			cdata.cfg.pictp_pl =
+				s_ctrl->func_tbl.
+				sensor_get_pict_pixels_pl
+				(s_ctrl);
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_MAX_EXP_LC:
+			cdata.cfg.pict_max_exp_lc =
+				s_ctrl->func_tbl.
+				sensor_get_pict_max_exp_lc
+				(s_ctrl);
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_FPS:
+		case CFG_SET_PICT_FPS:
+			rc = s_ctrl->func_tbl.
+				sensor_set_fps(
+				s_ctrl,
+				&(cdata.cfg.fps));
+			break;
+
+		case CFG_SET_EXP_GAIN:
+			rc =
+				s_ctrl->func_tbl.
+				sensor_write_exp_gain(
+					s_ctrl,
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_PICT_EXP_GAIN:
+			rc =
+				s_ctrl->func_tbl.
+				sensor_write_exp_gain(
+					s_ctrl,
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_MODE:
+			rc = s_ctrl->func_tbl.
+				sensor_set_sensor_mode(
+					s_ctrl,
+					cdata.mode,
+					cdata.rs);
+			break;
+
+		case CFG_PWR_DOWN:
+			break;
+
+		case CFG_MOVE_FOCUS:
+			break;
+
+		case CFG_SET_DEFAULT_FOCUS:
+			break;
+
+		case CFG_GET_AF_MAX_STEPS:
+			cdata.max_steps = 32;
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_EFFECT:
+			break;
+
+
+		case CFG_SEND_WB_INFO:
+			break;
+
+		case CFG_SENSOR_INIT:
+			rc = s_ctrl->func_tbl.
+				sensor_mode_init(
+				s_ctrl,
+				cdata.mode,
+				&(cdata.cfg.init_info));
+			break;
+
+		default:
+			rc = -EFAULT;
+			break;
+		}
+
+	mutex_unlock(s_ctrl->msm_sensor_mutex);
+
+	return rc;
+}
+
+int16_t msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0;
+	uint16_t chipid = 0;
+	rc = msm_sensor_i2c_waddr_read_w(s_ctrl,
+			s_ctrl->sensor_id_addr, &chipid);
+	CDBG("msm_sensor id: %d\n", chipid);
+	if (chipid != s_ctrl->sensor_id) {
+		CDBG("msm_sensor_match_id chip id doesnot match\n");
+		return -ENODEV;
+	}
+	return rc;
+}
+
+int msm_sensor_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct msm_sensor_ctrl_t *this_ctrl;
+	CDBG("%s_i2c_probe called\n", client->name);
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	this_ctrl = (struct msm_sensor_ctrl_t *)(id->driver_data);
+	this_ctrl->msm_sensor_client = client;
+	return 0;
+
+probe_failure:
+	CDBG("%s_i2c_probe failed\n", client->name);
+	return rc;
+}
+
+int msm_sensor_probe(struct msm_sensor_ctrl_t *s_ctrl,
+		const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(s_ctrl->msm_sensor_i2c_driver);
+	if (rc < 0 || s_ctrl->msm_sensor_client == NULL) {
+		rc = -ENOTSUPP;
+		CDBG("I2C add driver failed");
+		goto probe_fail;
+	}
+
+	rc = s_ctrl->func_tbl.sensor_power_up(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = s_ctrl->func_tbl.sensor_open_init;
+	s->s_release = s_ctrl->func_tbl.sensor_release;
+	s->s_config  = s_ctrl->func_tbl.sensor_config;
+	s->s_camera_type = s_ctrl->camera_type;
+	s->s_mount_angle = 0;
+	s_ctrl->func_tbl.sensor_power_down(info);
+	return rc;
+probe_fail:
+	return rc;
+}
+
+int msm_sensor_v4l2_probe(struct msm_sensor_ctrl_t *s_ctrl,
+	const struct msm_camera_sensor_info *info,
+	struct v4l2_subdev *sdev, struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = s_ctrl->func_tbl.sensor_probe(s_ctrl, info, s);
+	if (rc < 0)
+		return rc;
+
+	s_ctrl->sensor_v4l2_subdev = sdev;
+	v4l2_i2c_subdev_init(s_ctrl->sensor_v4l2_subdev,
+		s_ctrl->msm_sensor_client, s_ctrl->sensor_v4l2_subdev_ops);
+	s_ctrl->sensor_v4l2_subdev->dev_priv = (void *) s_ctrl;
+	return rc;
+}
+
+int msm_sensor_v4l2_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+			   enum v4l2_mbus_pixelcode *code)
+{
+	struct msm_sensor_ctrl_t *s_ctrl =
+		(struct msm_sensor_ctrl_t *) sd->dev_priv;
+	if ((unsigned int)index >= s_ctrl->sensor_v4l2_subdev_info_size)
+		return -EINVAL;
+
+	*code = s_ctrl->sensor_v4l2_subdev_info[index].code;
+	return 0;
+}
+
+static int msm_sensor_debugfs_stream_s(void *data, u64 val)
+{
+	struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *) data;
+	if (val)
+		s_ctrl->func_tbl.sensor_start_stream(s_ctrl);
+	else
+		s_ctrl->func_tbl.sensor_stop_stream(s_ctrl);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sensor_debugfs_stream, NULL,
+			msm_sensor_debugfs_stream_s, "%llu\n");
+
+static int msm_sensor_debugfs_test_s(void *data, u64 val)
+{
+	CDBG("val: %llu\n", val);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sensor_debugfs_test, NULL,
+			msm_sensor_debugfs_test_s, "%llu\n");
+
+int msm_sensor_enable_debugfs(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	struct dentry *debugfs_base, *sensor_dir;
+	debugfs_base = debugfs_create_dir("msm_sensor", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	sensor_dir = debugfs_create_dir
+		(s_ctrl->sensordata->sensor_name, debugfs_base);
+	if (!sensor_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, sensor_dir,
+			(void *) s_ctrl, &sensor_debugfs_stream))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("test", S_IRUGO | S_IWUSR, sensor_dir,
+			(void *) s_ctrl, &sensor_debugfs_test))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/media/video/msm/msm_sensor.h b/drivers/media/video/msm/msm_sensor.h
new file mode 100644
index 0000000..d8224ce
--- /dev/null
+++ b/drivers/media/video/msm/msm_sensor.h
@@ -0,0 +1,246 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <mach/camera.h>
+#include <media/msm_camera.h>
+#include <media/v4l2-subdev.h>
+#define Q8  0x00000100
+#define Q10 0x00000400
+
+enum msm_sensor_resolution_t {
+	MSM_SENSOR_RES_0,
+	MSM_SENSOR_RES_1,
+	MSM_SENSOR_RES_2,
+	MSM_SENSOR_RES_3,
+	MSM_SENSOR_RES_4,
+	MSM_SENSOR_RES_5,
+	MSM_SENSOR_RES_6,
+	MSM_SENSOR_RES_7,
+	MSM_SENSOR_INVALID_RES,
+};
+
+#define MSM_SENSOR_MCLK_8HZ 8000000
+#define MSM_SENSOR_MCLK_16HZ 16000000
+#define MSM_SENSOR_MCLK_24HZ 24000000
+
+enum msm_sensor_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	MSM_SENSOR_REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	MSM_SENSOR_UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	MSM_SENSOR_UPDATE_ALL,
+	/* Not valid update */
+	MSM_SENSOR_UPDATE_INVALID
+};
+
+enum msm_sensor_cam_mode_t {
+	MSM_SENSOR_MODE_2D_RIGHT,
+	MSM_SENSOR_MODE_2D_LEFT,
+	MSM_SENSOR_MODE_3D,
+	MSM_SENSOR_MODE_INVALID
+};
+
+struct msm_sensor_i2c_reg_conf {
+	unsigned short reg_addr;
+	unsigned short reg_data;
+};
+
+struct msm_sensor_i2c_conf_array {
+	struct msm_sensor_i2c_reg_conf *conf;
+	unsigned short size;
+	unsigned short delay;
+};
+
+struct msm_sensor_reg_t {
+	struct msm_sensor_i2c_reg_conf *start_stream_conf;
+	uint8_t start_stream_conf_size;
+	struct msm_sensor_i2c_reg_conf *stop_stream_conf;
+	uint8_t stop_stream_conf_size;
+	struct msm_sensor_i2c_reg_conf *group_hold_on_conf;
+	uint8_t group_hold_on_conf_size;
+	struct msm_sensor_i2c_reg_conf *group_hold_off_conf;
+	uint8_t group_hold_off_conf_size;
+	struct msm_sensor_i2c_conf_array *init_settings;
+	uint8_t init_size;
+	struct msm_sensor_i2c_conf_array *res_settings;
+	uint8_t num_conf;
+};
+
+struct v4l2_subdev_info {
+	enum v4l2_mbus_pixelcode code;
+	enum v4l2_colorspace colorspace;
+	uint16_t fmt;
+	uint16_t order;
+};
+
+struct msm_sensor_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+	struct i2c_client *msm_sensor_client;
+	struct i2c_driver *msm_sensor_i2c_driver;
+	struct msm_sensor_reg_t msm_sensor_reg;
+
+	uint16_t sensor_id_addr;
+	uint16_t sensor_id;
+	uint16_t frame_length_lines_addr;
+	uint16_t line_length_pck_addr;
+	uint16_t global_gain_addr;
+	uint16_t coarse_int_time_addr;
+
+	uint8_t frame_length_lines_array_addr;
+	uint8_t line_length_pck_array_addr;
+
+	uint16_t curr_line_length_pck;
+	uint16_t curr_frame_length_lines;
+	uint16_t prev_line_length_pck;
+	uint16_t prev_frame_length_lines;
+	uint16_t snap_line_length_pck;
+	uint16_t snap_frame_length_lines;
+	uint16_t vert_offset;
+
+	uint16_t fps;
+	uint32_t fps_divider;
+	enum msm_sensor_resolution_t prev_res;
+	enum msm_sensor_resolution_t pict_res;
+	enum msm_sensor_resolution_t curr_res;
+	enum msm_sensor_cam_mode_t cam_mode;
+	enum msm_camera_type camera_type;
+
+	struct mutex *msm_sensor_mutex;
+	bool config_csi_flag;
+	struct msm_camera_csi_params *csi_params;
+
+	/*To Do: Changing v4l2_subdev to a pointer according to yupeng*/
+	struct v4l2_subdev *sensor_v4l2_subdev;
+	struct v4l2_subdev_info *sensor_v4l2_subdev_info;
+	uint8_t sensor_v4l2_subdev_info_size;
+	struct v4l2_subdev_ops *sensor_v4l2_subdev_ops;
+
+	struct msm_sensor_fn_t {
+		void (*sensor_start_stream) (struct msm_sensor_ctrl_t *);
+		void (*sensor_stop_stream) (struct msm_sensor_ctrl_t *);
+		void (*sensor_group_hold_on) (struct msm_sensor_ctrl_t *);
+		void (*sensor_group_hold_off) (struct msm_sensor_ctrl_t *);
+
+		uint16_t (*sensor_get_prev_lines_pf)
+			(struct msm_sensor_ctrl_t *);
+		uint16_t (*sensor_get_prev_pixels_pl)
+			(struct msm_sensor_ctrl_t *);
+		uint16_t (*sensor_get_pict_lines_pf)
+			(struct msm_sensor_ctrl_t *);
+		uint16_t (*sensor_get_pict_pixels_pl)
+			(struct msm_sensor_ctrl_t *);
+		uint32_t (*sensor_get_pict_max_exp_lc)
+			(struct msm_sensor_ctrl_t *);
+		void (*sensor_get_pict_fps) (struct msm_sensor_ctrl_t *,
+				uint16_t, uint16_t *);
+		int32_t (*sensor_set_fps) (struct msm_sensor_ctrl_t *,
+				struct fps_cfg *);
+		int32_t (*sensor_write_exp_gain) (struct msm_sensor_ctrl_t *,
+				uint16_t, uint32_t);
+		int32_t (*sensor_setting) (struct msm_sensor_ctrl_t *,
+				int update_type, int rt);
+		int32_t (*sensor_set_sensor_mode)
+				(struct msm_sensor_ctrl_t *, int, int);
+		int32_t (*sensor_mode_init) (struct msm_sensor_ctrl_t *,
+			int, struct sensor_init_cfg *);
+		int (*sensor_config) (void __user *);
+		int (*sensor_open_init) (const struct msm_camera_sensor_info *);
+		int (*sensor_release) (void);
+		int (*sensor_power_down)
+			(const struct msm_camera_sensor_info *);
+		int (*sensor_power_up) (const struct msm_camera_sensor_info *);
+		int (*sensor_probe) (struct msm_sensor_ctrl_t *s_ctrl,
+				const struct msm_camera_sensor_info *info,
+				struct msm_sensor_ctrl *s);
+	} func_tbl;
+};
+
+int32_t msm_sensor_i2c_rxdata(struct msm_sensor_ctrl_t *s_ctrl,
+	unsigned char *rxdata, int length);
+
+int32_t msm_sensor_i2c_txdata(struct msm_sensor_ctrl_t *s_ctrl,
+	unsigned char *txdata, int length);
+
+int32_t msm_sensor_i2c_waddr_write_b(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint8_t bdata);
+
+int32_t msm_sensor_i2c_waddr_write_w(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint16_t wdata);
+
+int32_t msm_sensor_i2c_waddr_read_w(struct msm_sensor_ctrl_t *s_ctrl,
+	uint16_t waddr, uint16_t *data);
+
+int32_t msm_sensor_i2c_waddr_write_b_tbl(struct msm_sensor_ctrl_t *s_ctrl,
+	struct msm_sensor_i2c_reg_conf const *reg_conf_tbl, uint8_t size);
+
+int32_t msm_sensor_i2c_waddr_write_w_tbl(struct msm_sensor_ctrl_t *s_ctrl,
+	struct msm_sensor_i2c_reg_conf const *reg_conf_tbl, uint8_t size);
+
+void msm_sensor_start_stream(struct msm_sensor_ctrl_t *s_ctrl);
+void msm_sensor_stop_stream(struct msm_sensor_ctrl_t *s_ctrl);
+void msm_sensor_group_hold_on(struct msm_sensor_ctrl_t *s_ctrl);
+void msm_sensor_group_hold_off(struct msm_sensor_ctrl_t *s_ctrl);
+
+uint16_t msm_sensor_get_prev_lines_pf(struct msm_sensor_ctrl_t *s_ctrl);
+uint16_t msm_sensor_get_prev_pixels_pl(struct msm_sensor_ctrl_t *s_ctrl);
+uint16_t msm_sensor_get_pict_lines_pf(struct msm_sensor_ctrl_t *s_ctrl);
+uint16_t msm_sensor_get_pict_pixels_pl(struct msm_sensor_ctrl_t *s_ctrl);
+uint32_t msm_sensor_get_pict_max_exp_lc(struct msm_sensor_ctrl_t *s_ctrl);
+void msm_sensor_get_pict_fps(struct msm_sensor_ctrl_t *s_ctrl,
+			uint16_t fps, uint16_t *pfps);
+int32_t msm_sensor_set_fps(struct msm_sensor_ctrl_t *s_ctrl,
+			struct fps_cfg   *fps);
+int32_t msm_sensor_write_exp_gain1(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line);
+int32_t msm_sensor_write_exp_gain2(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line);
+int32_t msm_sensor_set_sensor_mode_b(struct msm_sensor_ctrl_t *s_ctrl,
+	int mode, int res);
+int32_t msm_sensor_set_sensor_mode_w(struct msm_sensor_ctrl_t *s_ctrl,
+	int mode, int res);
+int32_t msm_sensor_mode_init_bdata(struct msm_sensor_ctrl_t *s_ctrl,
+			int mode, struct sensor_init_cfg *init_info);
+int32_t msm_sensor_mode_init_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			int mode, struct sensor_init_cfg *init_info);
+int32_t msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
+			void __user *argp);
+int16_t msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
+uint16_t msm_sensor_read_b_conf_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			enum msm_sensor_resolution_t res, int8_t array_addr);
+uint16_t msm_sensor_read_w_conf_wdata(struct msm_sensor_ctrl_t *s_ctrl,
+			enum msm_sensor_resolution_t res, int8_t array_addr);
+
+int msm_sensor_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id);
+
+int msm_sensor_probe(struct msm_sensor_ctrl_t *s_ctrl,
+		const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s);
+
+int msm_sensor_v4l2_probe(struct msm_sensor_ctrl_t *s_ctrl,
+	const struct msm_camera_sensor_info *info,
+	struct v4l2_subdev *sdev, struct msm_sensor_ctrl *s);
+
+int msm_sensor_v4l2_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+			enum v4l2_mbus_pixelcode *code);
+
+int msm_sensor_write_b_init_settings(struct msm_sensor_ctrl_t *s_ctrl);
+int msm_sensor_write_w_init_settings(struct msm_sensor_ctrl_t *s_ctrl);
+int msm_sensor_write_b_res_settings
+	(struct msm_sensor_ctrl_t *s_ctrl, uint16_t res);
+int msm_sensor_write_w_res_settings
+	(struct msm_sensor_ctrl_t *s_ctrl, uint16_t res);
+
+int msm_sensor_enable_debugfs(struct msm_sensor_ctrl_t *s_ctrl);
diff --git a/drivers/media/video/msm/msm_vfe31.c b/drivers/media/video/msm/msm_vfe31.c
new file mode 100644
index 0000000..aa35096
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe31.c
@@ -0,0 +1,3729 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <mach/msm_reqs.h>
+#include <asm/atomic.h>
+
+#include "msm_vfe31.h"
+#include "msm_vpe1.h"
+
+atomic_t irq_cnt;
+
+#define CHECKED_COPY_FROM_USER(in) {					\
+	if (copy_from_user((in), (void __user *)cmd->value,		\
+			cmd->length)) {					\
+		rc = -EFAULT;						\
+		break;							\
+	}								\
+}
+
+static struct vfe31_ctrl_type *vfe31_ctrl;
+static struct msm_camera_io_clk camio_clk;
+static void *vfe_syncdata;
+static void vfe31_send_msg_no_payload(enum VFE31_MESSAGE_ID id);
+static void vfe31_reset_hist_cfg(void);
+
+struct vfe31_isr_queue_cmd {
+	struct list_head list;
+	uint32_t                           vfeInterruptStatus0;
+	uint32_t                           vfeInterruptStatus1;
+	uint32_t                           vfePingPongStatus;
+	struct vfe_frame_asf_info          vfeAsfFrameInfo;
+	struct vfe_frame_bpc_info          vfeBpcFrameInfo;
+	struct vfe_msg_camif_status        vfeCamifStatusLocal;
+};
+
+static struct vfe31_cmd_type vfe31_cmd[] = {
+/* 0*/	{V31_DUMMY_0},
+		{V31_SET_CLK},
+		{V31_RESET},
+		{V31_START},
+		{V31_TEST_GEN_START},
+/* 5*/	{V31_OPERATION_CFG, V31_OPERATION_CFG_LEN},
+		{V31_AXI_OUT_CFG, V31_AXI_OUT_LEN, V31_AXI_OUT_OFF, 0xFF},
+		{V31_CAMIF_CFG, V31_CAMIF_LEN, V31_CAMIF_OFF, 0xFF},
+		{V31_AXI_INPUT_CFG},
+		{V31_BLACK_LEVEL_CFG, V31_BLACK_LEVEL_LEN, V31_BLACK_LEVEL_OFF,
+		0xFF},
+/*10*/  {V31_ROLL_OFF_CFG, V31_ROLL_OFF_CFG_LEN, V31_ROLL_OFF_CFG_OFF,
+		0xFF},
+		{V31_DEMUX_CFG, V31_DEMUX_LEN, V31_DEMUX_OFF, 0xFF},
+		{V31_DEMOSAIC_0_CFG, V31_DEMOSAIC_0_LEN, V31_DEMOSAIC_0_OFF,
+		0xFF},
+		{V31_DEMOSAIC_1_CFG, V31_DEMOSAIC_1_LEN, V31_DEMOSAIC_1_OFF,
+		0xFF},
+		{V31_DEMOSAIC_2_CFG, V31_DEMOSAIC_2_LEN, V31_DEMOSAIC_2_OFF,
+		0xFF},
+/*15*/	{V31_FOV_CFG, V31_FOV_LEN, V31_FOV_OFF, 0xFF},
+		{V31_MAIN_SCALER_CFG, V31_MAIN_SCALER_LEN, V31_MAIN_SCALER_OFF,
+		0xFF},
+		{V31_WB_CFG, V31_WB_LEN, V31_WB_OFF, 0xFF},
+		{V31_COLOR_COR_CFG, V31_COLOR_COR_LEN, V31_COLOR_COR_OFF, 0xFF},
+		{V31_RGB_G_CFG, V31_RGB_G_LEN, V31_RGB_G_OFF, 0xFF},
+/*20*/	{V31_LA_CFG, V31_LA_LEN, V31_LA_OFF, 0xFF },
+		{V31_CHROMA_EN_CFG, V31_CHROMA_EN_LEN, V31_CHROMA_EN_OFF, 0xFF},
+		{V31_CHROMA_SUP_CFG, V31_CHROMA_SUP_LEN, V31_CHROMA_SUP_OFF,
+		0xFF},
+		{V31_MCE_CFG, V31_MCE_LEN, V31_MCE_OFF, 0xFF},
+		{V31_SK_ENHAN_CFG, V31_SCE_LEN, V31_SCE_OFF, 0xFF},
+/*25*/	{V31_ASF_CFG, V31_ASF_LEN, V31_ASF_OFF, 0xFF},
+		{V31_S2Y_CFG, V31_S2Y_LEN, V31_S2Y_OFF, 0xFF},
+		{V31_S2CbCr_CFG, V31_S2CbCr_LEN, V31_S2CbCr_OFF, 0xFF},
+		{V31_CHROMA_SUBS_CFG, V31_CHROMA_SUBS_LEN, V31_CHROMA_SUBS_OFF,
+		0xFF},
+		{V31_OUT_CLAMP_CFG, V31_OUT_CLAMP_LEN, V31_OUT_CLAMP_OFF,
+		0xFF},
+/*30*/	{V31_FRAME_SKIP_CFG, V31_FRAME_SKIP_LEN, V31_FRAME_SKIP_OFF,
+		0xFF},
+		{V31_DUMMY_1},
+		{V31_DUMMY_2},
+		{V31_DUMMY_3},
+		{V31_UPDATE},
+/*35*/	{V31_BL_LVL_UPDATE, V31_BLACK_LEVEL_LEN, V31_BLACK_LEVEL_OFF,
+		0xFF},
+		{V31_DEMUX_UPDATE, V31_DEMUX_LEN, V31_DEMUX_OFF, 0xFF},
+		{V31_DEMOSAIC_1_UPDATE, V31_DEMOSAIC_1_LEN, V31_DEMOSAIC_1_OFF,
+		0xFF},
+		{V31_DEMOSAIC_2_UPDATE, V31_DEMOSAIC_2_LEN, V31_DEMOSAIC_2_OFF,
+		0xFF},
+		{V31_FOV_UPDATE, V31_FOV_LEN, V31_FOV_OFF, 0xFF},
+/*40*/	{V31_MAIN_SCALER_UPDATE, V31_MAIN_SCALER_LEN, V31_MAIN_SCALER_OFF,
+		0xFF},
+		{V31_WB_UPDATE, V31_WB_LEN, V31_WB_OFF, 0xFF},
+		{V31_COLOR_COR_UPDATE, V31_COLOR_COR_LEN, V31_COLOR_COR_OFF,
+		0xFF},
+		{V31_RGB_G_UPDATE, V31_RGB_G_LEN, V31_CHROMA_EN_OFF, 0xFF},
+		{V31_LA_UPDATE, V31_LA_LEN, V31_LA_OFF, 0xFF },
+/*45*/	{V31_CHROMA_EN_UPDATE, V31_CHROMA_EN_LEN, V31_CHROMA_EN_OFF,
+		0xFF},
+		{V31_CHROMA_SUP_UPDATE, V31_CHROMA_SUP_LEN, V31_CHROMA_SUP_OFF,
+		0xFF},
+		{V31_MCE_UPDATE, V31_MCE_LEN, V31_MCE_OFF, 0xFF},
+		{V31_SK_ENHAN_UPDATE, V31_SCE_LEN, V31_SCE_OFF, 0xFF},
+		{V31_S2CbCr_UPDATE, V31_S2CbCr_LEN, V31_S2CbCr_OFF, 0xFF},
+/*50*/	{V31_S2Y_UPDATE, V31_S2Y_LEN, V31_S2Y_OFF, 0xFF},
+		{V31_ASF_UPDATE, V31_ASF_UPDATE_LEN, V31_ASF_OFF, 0xFF},
+		{V31_FRAME_SKIP_UPDATE},
+		{V31_CAMIF_FRAME_UPDATE},
+		{V31_STATS_AF_UPDATE, V31_STATS_AF_LEN, V31_STATS_AF_OFF},
+/*55*/	{V31_STATS_AE_UPDATE, V31_STATS_AE_LEN, V31_STATS_AE_OFF},
+		{V31_STATS_AWB_UPDATE, V31_STATS_AWB_LEN, V31_STATS_AWB_OFF},
+		{V31_STATS_RS_UPDATE, V31_STATS_RS_LEN, V31_STATS_RS_OFF},
+		{V31_STATS_CS_UPDATE, V31_STATS_CS_LEN, V31_STATS_CS_OFF},
+		{V31_STATS_SKIN_UPDATE},
+/*60*/	{V31_STATS_IHIST_UPDATE, V31_STATS_IHIST_LEN, V31_STATS_IHIST_OFF},
+		{V31_DUMMY_4},
+		{V31_EPOCH1_ACK},
+		{V31_EPOCH2_ACK},
+		{V31_START_RECORDING},
+/*65*/	{V31_STOP_RECORDING},
+		{V31_DUMMY_5},
+		{V31_DUMMY_6},
+		{V31_CAPTURE, V31_CAPTURE_LEN, 0xFF},
+		{V31_DUMMY_7},
+/*70*/	{V31_STOP},
+		{V31_GET_HW_VERSION},
+		{V31_GET_FRAME_SKIP_COUNTS},
+		{V31_OUTPUT1_BUFFER_ENQ},
+		{V31_OUTPUT2_BUFFER_ENQ},
+/*75*/	{V31_OUTPUT3_BUFFER_ENQ},
+		{V31_JPEG_OUT_BUF_ENQ},
+		{V31_RAW_OUT_BUF_ENQ},
+		{V31_RAW_IN_BUF_ENQ},
+		{V31_STATS_AF_ENQ},
+/*80*/	{V31_STATS_AE_ENQ},
+		{V31_STATS_AWB_ENQ},
+		{V31_STATS_RS_ENQ},
+		{V31_STATS_CS_ENQ},
+		{V31_STATS_SKIN_ENQ},
+/*85*/	{V31_STATS_IHIST_ENQ},
+		{V31_DUMMY_8},
+		{V31_JPEG_ENC_CFG},
+		{V31_DUMMY_9},
+		{V31_STATS_AF_START, V31_STATS_AF_LEN, V31_STATS_AF_OFF},
+/*90*/	{V31_STATS_AF_STOP},
+		{V31_STATS_AE_START, V31_STATS_AE_LEN, V31_STATS_AE_OFF},
+		{V31_STATS_AE_STOP},
+		{V31_STATS_AWB_START, V31_STATS_AWB_LEN, V31_STATS_AWB_OFF},
+		{V31_STATS_AWB_STOP},
+/*95*/	{V31_STATS_RS_START, V31_STATS_RS_LEN, V31_STATS_RS_OFF},
+		{V31_STATS_RS_STOP},
+		{V31_STATS_CS_START, V31_STATS_CS_LEN, V31_STATS_CS_OFF},
+		{V31_STATS_CS_STOP},
+		{V31_STATS_SKIN_START},
+/*100*/	{V31_STATS_SKIN_STOP},
+		{V31_STATS_IHIST_START,
+		V31_STATS_IHIST_LEN, V31_STATS_IHIST_OFF},
+		{V31_STATS_IHIST_STOP},
+		{V31_DUMMY_10},
+		{V31_SYNC_TIMER_SETTING, V31_SYNC_TIMER_LEN,
+			V31_SYNC_TIMER_OFF},
+/*105*/	{V31_ASYNC_TIMER_SETTING, V31_ASYNC_TIMER_LEN, V31_ASYNC_TIMER_OFF},
+		{V31_LIVESHOT},
+		{V31_ZSL, V31_CAPTURE_LEN, 0xFF},
+		{V31_STEREOCAM},
+		{V31_LA_SETUP},
+/*110*/	{V31_XBAR_CFG, V31_XBAR_CFG_LEN, V31_XBAR_CFG_OFF},
+/*111*/	{V31_EZTUNE_CFG, V31_EZTUNE_CFG_LEN, V31_EZTUNE_CFG_OFF},
+};
+
+uint32_t vfe31_AXI_WM_CFG[] = {
+	0x0000004C,
+	0x00000064,
+	0x0000007C,
+	0x00000094,
+	0x000000AC,
+	0x000000C4,
+	0x000000DC,
+};
+
+static const char *vfe31_general_cmd[] = {
+	"DUMMY_0",  /* 0 */
+	"SET_CLK",
+	"RESET",
+	"START",
+	"TEST_GEN_START",
+	"OPERATION_CFG",  /* 5 */
+	"AXI_OUT_CFG",
+	"CAMIF_CFG",
+	"AXI_INPUT_CFG",
+	"BLACK_LEVEL_CFG",
+	"ROLL_OFF_CFG",  /* 10 */
+	"DEMUX_CFG",
+	"DEMOSAIC_0_CFG",  /* general */
+	"DEMOSAIC_1_CFG",  /* ABF     */
+	"DEMOSAIC_2_CFG",  /* BPC     */
+	"FOV_CFG",  /* 15  */
+	"MAIN_SCALER_CFG",
+	"WB_CFG",
+	"COLOR_COR_CFG",
+	"RGB_G_CFG",
+	"LA_CFG",  /* 20 */
+	"CHROMA_EN_CFG",
+	"CHROMA_SUP_CFG",
+	"MCE_CFG",
+	"SK_ENHAN_CFG",
+	"ASF_CFG",  /* 25 */
+	"S2Y_CFG",
+	"S2CbCr_CFG",
+	"CHROMA_SUBS_CFG",
+	"OUT_CLAMP_CFG",
+	"FRAME_SKIP_CFG",  /* 30 */
+	"DUMMY_1",
+	"DUMMY_2",
+	"DUMMY_3",
+	"UPDATE",
+	"BL_LVL_UPDATE",  /* 35 */
+	"DEMUX_UPDATE",
+	"DEMOSAIC_1_UPDATE",  /* BPC */
+	"DEMOSAIC_2_UPDATE",  /* ABF */
+	"FOV_UPDATE",
+	"MAIN_SCALER_UPDATE",  /* 40 */
+	"WB_UPDATE",
+	"COLOR_COR_UPDATE",
+	"RGB_G_UPDATE",
+	"LA_UPDATE",
+	"CHROMA_EN_UPDATE",  /* 45 */
+	"CHROMA_SUP_UPDATE",
+	"MCE_UPDATE",
+	"SK_ENHAN_UPDATE",
+	"S2CbCr_UPDATE",
+	"S2Y_UPDATE",  /* 50 */
+	"ASF_UPDATE",
+	"FRAME_SKIP_UPDATE",
+	"CAMIF_FRAME_UPDATE",
+	"STATS_AF_UPDATE",
+	"STATS_AE_UPDATE",  /* 55 */
+	"STATS_AWB_UPDATE",
+	"STATS_RS_UPDATE",
+	"STATS_CS_UPDATE",
+	"STATS_SKIN_UPDATE",
+	"STATS_IHIST_UPDATE",  /* 60 */
+	"DUMMY_4",
+	"EPOCH1_ACK",
+	"EPOCH2_ACK",
+	"START_RECORDING",
+	"STOP_RECORDING",  /* 65 */
+	"DUMMY_5",
+	"DUMMY_6",
+	"CAPTURE",
+	"DUMMY_7",
+	"STOP",  /* 70 */
+	"GET_HW_VERSION",
+	"GET_FRAME_SKIP_COUNTS",
+	"OUTPUT1_BUFFER_ENQ",
+	"OUTPUT2_BUFFER_ENQ",
+	"OUTPUT3_BUFFER_ENQ",  /* 75 */
+	"JPEG_OUT_BUF_ENQ",
+	"RAW_OUT_BUF_ENQ",
+	"RAW_IN_BUF_ENQ",
+	"STATS_AF_ENQ",
+	"STATS_AE_ENQ",  /* 80 */
+	"STATS_AWB_ENQ",
+	"STATS_RS_ENQ",
+	"STATS_CS_ENQ",
+	"STATS_SKIN_ENQ",
+	"STATS_IHIST_ENQ",  /* 85 */
+	"DUMMY_8",
+	"JPEG_ENC_CFG",
+	"DUMMY_9",
+	"STATS_AF_START",
+	"STATS_AF_STOP",  /* 90 */
+	"STATS_AE_START",
+	"STATS_AE_STOP",
+	"STATS_AWB_START",
+	"STATS_AWB_STOP",
+	"STATS_RS_START",  /* 95 */
+	"STATS_RS_STOP",
+	"STATS_CS_START",
+	"STATS_CS_STOP",
+	"STATS_SKIN_START",
+	"STATS_SKIN_STOP",  /* 100 */
+	"STATS_IHIST_START",
+	"STATS_IHIST_STOP",
+	"DUMMY_10",
+	"SYNC_TIMER_SETTING",
+	"ASYNC_TIMER_SETTING",  /* 105 */
+	"V31_LIVESHOT",
+	"V31_ZSL",
+	"V31_STEREOCAM",
+	"V31_LA_SETUP",
+	"V31_XBAR_CFG",
+};
+
+static void vfe_addr_convert(struct msm_vfe_phy_info *pinfo,
+	enum vfe_resp_msg type, void *data, void **ext, int32_t *elen)
+{
+	uint8_t outid;
+	switch (type) {
+	case VFE_MSG_OUTPUT_T:
+	case VFE_MSG_OUTPUT_P:
+	case VFE_MSG_OUTPUT_S:
+	case VFE_MSG_OUTPUT_V:
+	{
+		pinfo->output_id =
+			((struct vfe_message *)data)->_u.msgOut.output_id;
+
+		switch (type) {
+		case VFE_MSG_OUTPUT_P:
+			outid = OUTPUT_TYPE_P;
+			break;
+		case VFE_MSG_OUTPUT_V:
+			outid = OUTPUT_TYPE_V;
+			break;
+		case VFE_MSG_OUTPUT_T:
+			outid = OUTPUT_TYPE_T;
+			break;
+		case VFE_MSG_OUTPUT_S:
+			outid = OUTPUT_TYPE_S;
+			break;
+		default:
+			outid = 0xff;
+			break;
+		}
+		pinfo->output_id = outid;
+		pinfo->y_phy =
+			((struct vfe_message *)data)->_u.msgOut.yBuffer;
+		pinfo->cbcr_phy =
+			((struct vfe_message *)data)->_u.msgOut.cbcrBuffer;
+
+		pinfo->frame_id =
+		((struct vfe_message *)data)->_u.msgOut.frameCounter;
+
+		((struct vfe_msg_output *)(vfe31_ctrl->extdata))->bpcInfo =
+		((struct vfe_message *)data)->_u.msgOut.bpcInfo;
+		((struct vfe_msg_output *)(vfe31_ctrl->extdata))->asfInfo =
+		((struct vfe_message *)data)->_u.msgOut.asfInfo;
+		((struct vfe_msg_output *)(vfe31_ctrl->extdata))->frameCounter =
+		((struct vfe_message *)data)->_u.msgOut.frameCounter;
+		*ext  = vfe31_ctrl->extdata;
+		*elen = vfe31_ctrl->extlen;
+	}
+		break;
+
+	default:
+		break;
+	} /* switch */
+}
+
+
+static void vfe31_proc_ops(enum VFE31_MESSAGE_ID id, void *msg, size_t len)
+{
+	struct msm_vfe_resp *rp;
+
+	rp = vfe31_ctrl->resp->vfe_alloc(sizeof(struct msm_vfe_resp),
+		vfe31_ctrl->syncdata, GFP_ATOMIC);
+	if (!rp) {
+		CDBG("rp: cannot allocate buffer\n");
+		return;
+	}
+	CDBG("vfe31_proc_ops, msgId = %d\n", id);
+	rp->evt_msg.type   = MSM_CAMERA_MSG;
+	rp->evt_msg.msg_id = id;
+	rp->evt_msg.len    = len;
+	rp->evt_msg.data   = msg;
+
+	switch (rp->evt_msg.msg_id) {
+	case MSG_ID_SNAPSHOT_DONE:
+		rp->type = VFE_MSG_SNAPSHOT;
+		break;
+
+	case MSG_ID_OUTPUT_P:
+		rp->type = VFE_MSG_OUTPUT_P;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_P,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_T:
+		rp->type = VFE_MSG_OUTPUT_T;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_T,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_S:
+		rp->type = VFE_MSG_OUTPUT_S;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_S,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_V:
+		rp->type = VFE_MSG_OUTPUT_V;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_V,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_COMMON:
+		rp->type = VFE_MSG_COMMON;
+		rp->stats_msg.status_bits = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.status_bits;
+		rp->stats_msg.frame_id = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.frameCounter;
+
+		rp->stats_msg.aec_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.aec;
+		rp->stats_msg.awb_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.awb;
+		rp->stats_msg.af_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.af;
+		rp->stats_msg.ihist_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.ihist;
+		rp->stats_msg.rs_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.rs;
+		rp->stats_msg.cs_buff = ((struct vfe_message *)
+			rp->evt_msg.data)->_u.msgStats.buff.cs;
+		break;
+
+	case MSG_ID_SYNC_TIMER0_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER0;
+		break;
+
+	case MSG_ID_SYNC_TIMER1_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER1;
+		break;
+
+	case MSG_ID_SYNC_TIMER2_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER2;
+		break;
+
+	default:
+		rp->type = VFE_MSG_GENERAL;
+		break;
+	}
+
+	/* save the frame id.*/
+	rp->evt_msg.frame_id = rp->phy.frame_id;
+
+	vfe31_ctrl->resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, vfe31_ctrl->syncdata,
+		GFP_ATOMIC);
+}
+
+static void vfe_send_outmsg(uint8_t msgid, uint32_t pyaddr,
+	uint32_t pcbcraddr)
+{
+	struct vfe_message msg;
+	uint8_t outid;
+
+	msg._d = msgid;   /* now the output mode is redundnat. */
+	msg._u.msgOut.frameCounter = vfe31_ctrl->vfeFrameId;
+
+	switch (msgid) {
+	case MSG_ID_OUTPUT_P:
+		outid = OUTPUT_TYPE_P;
+		break;
+	case MSG_ID_OUTPUT_V:
+		outid = OUTPUT_TYPE_V;
+		break;
+	case MSG_ID_OUTPUT_T:
+		outid = OUTPUT_TYPE_T;
+		break;
+	case MSG_ID_OUTPUT_S:
+		outid = OUTPUT_TYPE_S;
+		break;
+	default:
+		outid = 0xff;  /* -1 for error condition.*/
+		break;
+	}
+	msg._u.msgOut.output_id   = msgid;
+	msg._u.msgOut.yBuffer     = pyaddr;
+	msg._u.msgOut.cbcrBuffer  = pcbcraddr;
+
+	vfe31_proc_ops(msgid, &msg, sizeof(struct vfe_message));
+	return;
+}
+static int vfe31_enable(struct camera_enable_cmd *enable)
+{
+	return 0;
+}
+
+static void vfe31_stop(void)
+{
+	atomic_set(&vfe31_ctrl->vstate, 0);
+	atomic_set(&vfe31_ctrl->stop_ack_pending, 1);
+
+	/* in either continuous or snapshot mode, stop command can be issued
+	 * at any time. stop camif immediately. */
+	msm_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY,
+		vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+	/* disable all interrupts.  */
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1,
+		vfe31_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* now enable only halt_irq & reset_irq */
+	msm_io_w(0xf0000000,          /* this is for async timer. */
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_IMASK_AXI_HALT,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* then apply axi halt command. */
+	msm_io_w_mb(AXI_HALT,
+		vfe31_ctrl->vfebase + VFE_AXI_CMD);
+}
+
+static int vfe31_disable(struct camera_enable_cmd *enable,
+	struct platform_device *dev)
+{
+	msm_camio_set_perf_lvl(S_EXIT);
+	msm_camio_disable(dev);
+	return 0;
+}
+
+static int vfe31_add_free_buf2(struct vfe31_output_ch *outch,
+	uint32_t paddr, uint32_t y_off, uint32_t cbcr_off)
+{
+	struct vfe31_free_buf *free_buf = NULL;
+	unsigned long flags = 0;
+	free_buf = kmalloc(sizeof(struct vfe31_free_buf), GFP_KERNEL);
+	if (!free_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	free_buf->paddr = paddr;
+	free_buf->y_off = y_off;
+	free_buf->cbcr_off = cbcr_off;
+	list_add_tail(&free_buf->node, &outch->free_buf_head);
+
+	CDBG("%s: free_buf paddr = 0x%x, y_off = %d, cbcr_off = %d\n",
+		__func__, free_buf->paddr, free_buf->y_off,
+		free_buf->cbcr_off);
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+	return 0;
+}
+
+#define vfe31_add_free_buf(outch, regptr) \
+	vfe31_add_free_buf2(outch, regptr->paddr, regptr->info.y_off,	\
+		regptr->info.cbcr_off)
+
+#define vfe31_free_buf_available(outch) \
+	(!list_empty(&outch.free_buf_head))
+
+static inline struct vfe31_free_buf *vfe31_get_free_buf(
+	struct vfe31_output_ch *outch)
+{
+	unsigned long flags = 0;
+	struct vfe31_free_buf *free_buf = NULL;
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	if (!list_empty(&outch->free_buf_head)) {
+		free_buf = list_first_entry(&outch->free_buf_head,
+			struct vfe31_free_buf, node);
+		if (free_buf)
+			list_del_init(&free_buf->node);
+	}
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+	return free_buf;
+}
+
+static inline void vfe31_reset_free_buf_queue(
+	struct vfe31_output_ch *outch)
+{
+	unsigned long flags = 0;
+	struct vfe31_free_buf *free_buf = NULL;
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	while (!list_empty(&outch->free_buf_head)) {
+		free_buf = list_first_entry(&outch->free_buf_head,
+			struct vfe31_free_buf, node);
+		if (free_buf) {
+			list_del_init(&free_buf->node);
+			kfree(free_buf);
+		}
+	}
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+}
+
+#define vfe31_init_free_buf_queue() do {	\
+	INIT_LIST_HEAD(&vfe31_ctrl->outpath.out0.free_buf_head);	\
+	INIT_LIST_HEAD(&vfe31_ctrl->outpath.out1.free_buf_head);	\
+	INIT_LIST_HEAD(&vfe31_ctrl->outpath.out2.free_buf_head);	\
+	spin_lock_init(&vfe31_ctrl->outpath.out0.free_buf_lock);	\
+	spin_lock_init(&vfe31_ctrl->outpath.out1.free_buf_lock);	\
+	spin_lock_init(&vfe31_ctrl->outpath.out2.free_buf_lock);	\
+} while (0)
+
+#define vfe31_reset_free_buf_queue_all() do {	\
+	vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out0);	\
+	vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out1);	\
+	vfe31_reset_free_buf_queue(&vfe31_ctrl->outpath.out2);	\
+} while (0)
+
+static int vfe31_config_axi(int mode, struct axidata *ad, uint32_t *ao)
+{
+	int i;
+	uint32_t *p, *p1, *p2, *p3;
+	int32_t *ch_info;
+	struct vfe31_output_ch *outp1, *outp2, *outp3;
+	struct msm_pmem_region *regp1 = NULL;
+	struct msm_pmem_region *regp2 = NULL;
+	struct msm_pmem_region *regp3 = NULL;
+	int ret;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+
+	outp1 = NULL;
+	outp2 = NULL;
+	outp3 = NULL;
+
+	p = ao + 2;
+
+	/* Update the corresponding write masters for each output*/
+	ch_info = ao + V31_AXI_CFG_LEN;
+	vfe31_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
+	vfe31_ctrl->outpath.out0.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe31_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info++;
+	vfe31_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
+	vfe31_ctrl->outpath.out1.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe31_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info++;
+	vfe31_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
+	vfe31_ctrl->outpath.out2.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe31_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+
+	CDBG("vfe31_config_axi: mode = %d, bufnum1 = %d, bufnum2 = %d"
+		"bufnum3 = %d", mode, ad->bufnum1, ad->bufnum2, ad->bufnum3);
+
+	switch (mode) {
+
+	case OUTPUT_2: {
+		if (ad->bufnum2 != 3)
+			return -EINVAL;
+		regp1 = &(ad->region[ad->bufnum1]);
+		outp1 = &(vfe31_ctrl->outpath.out0);
+		vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_PT;
+
+		for (i = 0; i < 2; i++) {
+			p1 = ao + 6 + i;    /* wm0 for y  */
+			*p1 = (regp1->paddr + regp1->info.y_off);
+
+			p1 = ao + 12 + i;  /* wm1 for cbcr */
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			regp1++;
+		}
+		ret = vfe31_add_free_buf(outp1, regp1);
+		if (ret < 0)
+			return ret;
+	}
+		break;
+
+	case OUTPUT_1_AND_2:
+		/* use wm0& 4 for thumbnail, wm1&5 for main image.*/
+		if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1))
+			return -EINVAL;
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_S;  /* main image.*/
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_PT;  /* thumbnail. */
+
+		/* this is thumbnail buffer. */
+		regp1 = &(ad->region[ad->bufnum1-1]);
+		/* this is main image buffer. */
+		regp2 = &(ad->region[ad->bufnum1+ad->bufnum2-1]);
+
+		outp1 = &(vfe31_ctrl->outpath.out0);
+		outp2 = &(vfe31_ctrl->outpath.out1); /* snapshot */
+
+		/*  Parse the buffers!!! */
+		if (ad->bufnum2 == 1) {	/* assuming bufnum1 = bufnum2 */
+			p1 = ao + 6;   /* wm0 ping */
+			*p1++ = (regp1->paddr + regp1->info.y_off);
+
+			/* this is to duplicate ping address to pong.*/
+			*p1 = (regp1->paddr + regp1->info.y_off);
+
+			p1 = ao + 30;  /* wm4 ping */
+			*p1++ = (regp1->paddr + regp1->info.cbcr_off);
+			CDBG("%s: regp1->info.cbcr_off = 0x%x\n", __func__,
+						 regp1->info.cbcr_off);
+
+			/* this is to duplicate ping address to pong.*/
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+
+			p1 = ao + 12;   /* wm1 ping */
+			*p1++ = (regp2->paddr + regp2->info.y_off);
+
+			/* pong = ping,*/
+			*p1 = (regp2->paddr + regp2->info.y_off);
+
+			p1 = ao + 36;  /* wm5 */
+			*p1++ = (regp2->paddr + regp2->info.cbcr_off);
+			CDBG("%s: regp2->info.cbcr_off = 0x%x\n", __func__,
+						 regp2->info.cbcr_off);
+
+			/* pong = ping,*/
+			*p1 = (regp2->paddr + regp2->info.cbcr_off);
+		} else { /* more than one snapshot */
+			/* first fill ping & pong */
+			for (i = 0; i < 2; i++) {
+				p1 = ao + 6 + i;    /* wm0 for y  */
+				*p1 = (regp1->paddr + regp1->info.y_off);
+				p1 = ao + 30 + i;  /* wm4 for cbcr */
+				*p1 = (regp1->paddr + regp1->info.cbcr_off);
+				regp1--;
+			}
+
+			for (i = 0; i < 2; i++) {
+				p2 = ao + 12 + i;    /* wm1 for y  */
+				*p2 = (regp2->paddr + regp2->info.y_off);
+				p2 = ao + 36 + i;  /* wm5 for cbcr */
+				*p2 = (regp2->paddr + regp2->info.cbcr_off);
+				regp2--;
+			}
+
+			for (i = 2; i < ad->bufnum1; i++) {
+				ret = vfe31_add_free_buf(outp1, regp1);
+				if (ret < 0)
+					return ret;
+				regp1--;
+			}
+
+			for (i = 2; i < ad->bufnum2; i++) {
+				ret = vfe31_add_free_buf(outp2, regp2);
+				if (ret < 0)
+					return ret;
+				regp2--;
+			}
+		}
+		break;
+
+	case OUTPUT_1_2_AND_3:
+		CDBG("%s: OUTPUT_1_2_AND_3", __func__);
+		CDBG("%s: %d %d %d", __func__, ad->bufnum1, ad->bufnum2,
+			ad->bufnum3);
+		/* use wm0& 4 for postview, wm1&5 for preview.*/
+		/* use wm2& 6 for main img */
+		if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1) || (ad->bufnum3 < 1))
+			return -EINVAL;
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_S;  /* main image.*/
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_P;  /* preview. */
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_T;  /* thumbnail. */
+
+		/* this is preview buffer. */
+		regp1 = &(ad->region[0]);
+		/* this is thumbnail buffer. */
+		regp2 = &(ad->region[ad->bufnum1]);
+		/* this is main image buffer. */
+		regp3 = &(ad->region[ad->bufnum1+ad->bufnum2]);
+		outp1 = &(vfe31_ctrl->outpath.out0);
+		outp2 = &(vfe31_ctrl->outpath.out1);
+		outp3 = &(vfe31_ctrl->outpath.out2);
+
+		/*  Parse the buffers!!! */
+		/* first fill ping & pong */
+		for (i = 0; i < 2; i++) {
+			p1 = ao + 6 + i;    /* wm0 for y  */
+			*p1 = (regp1->paddr + regp1->info.y_off);
+			p1 = ao + 30 + i;  /* wm4 for cbcr */
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			regp1++;
+		}
+
+		for (i = 0; i < 2; i++) {
+			p2 = ao + 12 + i;    /* wm1 for y  */
+			*p2 = (regp2->paddr + regp2->info.y_off);
+			p2 = ao + 36 + i;  /* wm5 for cbcr */
+			*p2 = (regp2->paddr + regp2->info.cbcr_off);
+			regp2++;
+		}
+
+		for (i = 0; i < 2; i++) {
+			p3 = ao + 18 + i;    /* wm2 for y  */
+			*p3 = (regp3->paddr + regp3->info.y_off);
+			p3 = ao + 42 + i;  /* wm6 for cbcr */
+			*p3 = (regp3->paddr + regp3->info.cbcr_off);
+			regp3++;
+		}
+
+		for (i = 2; i < ad->bufnum1; i++) {
+			ret = vfe31_add_free_buf(outp1, regp1);
+			if (ret < 0)
+				return ret;
+			regp1++;
+		}
+
+		for (i = 2; i < ad->bufnum2; i++) {
+			ret = vfe31_add_free_buf(outp2, regp2);
+			if (ret < 0)
+				return ret;
+			regp2++;
+		}
+
+		for (i = 2; i < ad->bufnum3; i++) {
+			ret = vfe31_add_free_buf(outp3, regp3);
+			if (ret < 0)
+				return ret;
+			regp3++;
+		}
+		break;
+
+	case OUTPUT_1_AND_3: {
+		/* use wm0&4 for preview, wm1&5 for video.*/
+		if ((ad->bufnum1 < 2) || (ad->bufnum2 < 2))
+			return -EINVAL;
+
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		*p++ = 0x1;    /* xbar cfg0 */
+		*p = 0x1a03;    /* xbar cfg1 */
+#endif
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_V;  /* video*/
+		vfe31_ctrl->outpath.output_mode |=
+			VFE31_OUTPUT_MODE_PT;  /* preview */
+
+		regp1 = &(ad->region[0]); /* this is preview buffer. */
+		regp2 = &(ad->region[ad->bufnum1]);/* this is video buffer. */
+		outp1 = &(vfe31_ctrl->outpath.out0); /* preview */
+		outp2 = &(vfe31_ctrl->outpath.out2); /* video */
+
+
+		for (i = 0; i < 2; i++) {
+			p1 = ao + 6 + i;    /* wm0 for y  */
+			*p1 = (regp1->paddr + regp1->info.y_off);
+
+			p1 = ao + 30 + i;  /* wm4 for cbcr */
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			regp1++;
+		}
+
+		for (i = 0; i < 2; i++) {
+			p2 = ao + 12 + i;    /* wm1 for y  */
+			*p2 = (regp2->paddr + regp2->info.y_off);
+
+			p2 = ao + 36 + i;  /* wm5 for cbcr */
+			*p2 = (regp2->paddr + regp2->info.cbcr_off);
+			regp2++;
+		}
+		for (i = 2; i < ad->bufnum1; i++) {
+			ret = vfe31_add_free_buf(outp1, regp1);
+			if (ret < 0)
+				return ret;
+			regp1++;
+		}
+
+		for (i = 2; i < ad->bufnum2; i++) {
+			ret = vfe31_add_free_buf(outp2, regp2);
+			if (ret < 0)
+				return ret;
+			regp2++;
+		}
+	}
+		break;
+	case CAMIF_TO_AXI_VIA_OUTPUT_2: {  /* use wm0 only */
+		if (ad->bufnum2 < 1)
+			return -EINVAL;
+		CDBG("config axi for raw snapshot.\n");
+		vfe31_ctrl->outpath.out1.ch0 = 0; /* raw */
+		regp1 = &(ad->region[ad->bufnum1]);
+		vfe31_ctrl->outpath.output_mode |= VFE31_OUTPUT_MODE_S;
+		p1 = ao + 6;    /* wm0 for y  */
+		*p1 = (regp1->paddr + regp1->info.y_off);
+		if (p_sync->stereocam_enabled)
+			p_sync->stereo_state = STEREO_RAW_SNAP_IDLE;
+	}
+		break;
+	default:
+		break;
+	}
+	msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[V31_AXI_OUT_CFG].offset,
+		ao, vfe31_cmd[V31_AXI_OUT_CFG].length - V31_AXI_CH_INF_LEN);
+
+	return 0;
+}
+
+static void vfe31_reset_internal_variables(void)
+{
+	unsigned long flags;
+	vfe31_ctrl->vfeImaskCompositePacked = 0;
+	/* state control variables */
+	vfe31_ctrl->start_ack_pending = FALSE;
+	atomic_set(&irq_cnt, 0);
+
+	spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags);
+	vfe31_ctrl->xbar_update_pending = 0;
+	spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags);
+
+	atomic_set(&vfe31_ctrl->stop_ack_pending, 0);
+	atomic_set(&vfe31_ctrl->vstate, 0);
+
+	vfe31_ctrl->aec_ack_pending = FALSE;
+	vfe31_ctrl->af_ack_pending = FALSE;
+	vfe31_ctrl->awb_ack_pending = FALSE;
+	vfe31_ctrl->ihist_ack_pending = FALSE;
+	vfe31_ctrl->rs_ack_pending = FALSE;
+	vfe31_ctrl->cs_ack_pending = FALSE;
+
+	vfe31_ctrl->reset_ack_pending  = FALSE;
+
+	spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags);
+	vfe31_ctrl->update_ack_pending = FALSE;
+	spin_unlock_irqrestore(&vfe31_ctrl->update_ack_lock, flags);
+
+	vfe31_ctrl->recording_state = VFE_REC_STATE_IDLE;
+
+	/* 0 for continuous mode, 1 for snapshot mode */
+	vfe31_ctrl->operation_mode = VFE_MODE_OF_OPERATION_CONTINUOUS;
+	vfe31_ctrl->outpath.output_mode = 0;
+	vfe31_ctrl->vfe_capture_count = 0;
+
+	/* this is unsigned 32 bit integer. */
+	vfe31_ctrl->vfeFrameId = 0;
+
+	vfe31_ctrl->output1Pattern = 0xffffffff;
+	vfe31_ctrl->output1Period  = 31;
+	vfe31_ctrl->output2Pattern = 0xffffffff;
+	vfe31_ctrl->output2Period  = 31;
+	vfe31_ctrl->vfeFrameSkipCount   = 0;
+	vfe31_ctrl->vfeFrameSkipPeriod  = 31;
+
+	/* Stats control variables. */
+	memset(&(vfe31_ctrl->afStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe31_ctrl->awbStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe31_ctrl->aecStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe31_ctrl->ihistStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe31_ctrl->rsStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe31_ctrl->csStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+}
+
+static void vfe31_reset(void)
+{
+	uint32_t vfe_version;
+	vfe31_reset_free_buf_queue_all();
+	vfe31_reset_internal_variables();
+
+	vfe31_reset_hist_cfg();
+	vfe_version = msm_io_r(vfe31_ctrl->vfebase);
+	CDBG("vfe_version = 0x%x\n", vfe_version);
+	/* disable all interrupts.  vfeImaskLocal is also reset to 0
+	* to begin with. */
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(VFE_CLEAR_ALL_IRQS, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* enable reset_ack interrupt.  */
+	msm_io_w(VFE_IMASK_RESET,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
+	 * is done, hardware interrupt will be generated.  VFE ist processes
+	 * the interrupt to complete the function call.  Note that the reset
+	 * function is synchronous. */
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(VFE_RESET_UPON_RESET_CMD,
+		vfe31_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe31_operation_config(uint32_t *cmd)
+{
+	uint32_t *p = cmd;
+
+	vfe31_ctrl->operation_mode = *p;
+	vpe_ctrl->pad_2k_bool = (vfe31_ctrl->operation_mode & 1) ?
+		FALSE : TRUE;
+
+	vfe31_ctrl->stats_comp = *(++p);
+	vfe31_ctrl->hfr_mode = *(++p);
+
+	msm_io_w(*(++p), vfe31_ctrl->vfebase + VFE_CFG_OFF);
+	msm_io_w(*(++p), vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+	msm_io_w(*(++p), vfe31_ctrl->vfebase + VFE_REALIGN_BUF);
+	msm_io_w(*(++p), vfe31_ctrl->vfebase + VFE_CHROMA_UP);
+	msm_io_w(*(++p), vfe31_ctrl->vfebase + VFE_STATS_CFG);
+	wmb();
+	return 0;
+}
+static uint32_t vfe_stats_awb_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+
+static uint32_t vfe_stats_aec_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+
+	vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_af_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+	vfe31_ctrl->afStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_ihist_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+
+	vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_rs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PONG_ADDR);
+
+	vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+static uint32_t vfe_stats_cs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PONG_ADDR);
+
+	vfe31_ctrl->csStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static void vfe31_start_common(void)
+{
+	uint32_t irq_mask = 0x00E00021;
+	vfe31_ctrl->start_ack_pending = TRUE;
+	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
+		vfe31_ctrl->operation_mode, vfe31_ctrl->outpath.output_mode);
+	/* Enable IRQ for comp stats, Image master, SOF & Reg Update*/
+	if (vfe31_ctrl->stats_comp)
+		irq_mask |= 0x01000000;
+	else /* Enable IRQ for Image masters, AF stats, SOF & Reg Update */
+		irq_mask |= 0x00004000;
+
+	/* Enable EOF for video mode */
+	if (VFE_MODE_OF_OPERATION_VIDEO == vfe31_ctrl->operation_mode)
+		irq_mask |= 0x4;
+
+	msm_io_w(irq_mask, vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+	msm_io_w(VFE_IMASK_RESET,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+	/* enable out of order option */
+	msm_io_w(0x80000000, vfe31_ctrl->vfebase + VFE_AXI_CFG);
+	/* enable performance monitor */
+	msm_io_w(1, vfe31_ctrl->vfebase + VFE_BUS_PM_CFG);
+	msm_io_w(1, vfe31_ctrl->vfebase + VFE_BUS_PM_CMD);
+
+
+	msm_io_dump(vfe31_ctrl->vfebase, 0x600);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_io_w(1, vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND);
+	wmb();
+
+	atomic_set(&vfe31_ctrl->vstate, 1);
+}
+
+static int vfe31_start_recording(void)
+{
+	msm_camio_set_perf_lvl(S_VIDEO);
+	usleep(1000);
+	vfe31_ctrl->recording_state = VFE_REC_STATE_START_REQUESTED;
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return 0;
+}
+
+static int vfe31_stop_recording(void)
+{
+	vfe31_ctrl->recording_state = VFE_REC_STATE_STOP_REQUESTED;
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_camio_set_perf_lvl(S_PREVIEW);
+	return 0;
+}
+
+static void vfe31_liveshot(void)
+{
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+	if (p_sync)
+		p_sync->liveshot_enabled = true;
+}
+
+static void vfe31_stereocam(uint32_t enable)
+{
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+	if (p_sync) {
+		CDBG("%s: Enable StereoCam %d!!!\n", __func__, enable);
+		p_sync->stereocam_enabled = enable;
+	}
+}
+
+static int vfe31_zsl(void)
+{
+	uint32_t irq_comp_mask = 0;
+	/* capture command is valid for both idle and active state. */
+	irq_comp_mask	=
+		msm_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	CDBG("%s:op mode %d O/P Mode %d\n", __func__,
+		vfe31_ctrl->operation_mode, vfe31_ctrl->outpath.output_mode);
+	if ((vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_ZSL)) {
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P) {
+			irq_comp_mask |=
+				((0x1 << (vfe31_ctrl->outpath.out0.ch0)) |
+				(0x1 << (vfe31_ctrl->outpath.out0.ch1)));
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_T) {
+			irq_comp_mask |=
+				((0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8)) |
+				(0x1 << (vfe31_ctrl->outpath.out1.ch1 + 8)));
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) {
+			irq_comp_mask |=
+			((0x1 << (vfe31_ctrl->outpath.out2.ch0 + 8)) |
+			(0x1 << (vfe31_ctrl->outpath.out2.ch1 + 8)));
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_P) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]);
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_T) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch1]);
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]);
+		}
+	}
+	msm_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	vfe31_start_common();
+	msm_camio_set_perf_lvl(S_ZSL);
+	usleep(1000);
+	/* for debug */
+	msm_io_w(1, vfe31_ctrl->vfebase + 0x18C);
+	msm_io_w(1, vfe31_ctrl->vfebase + 0x188);
+	return 0;
+}
+
+static int vfe31_capture(uint32_t num_frames_capture)
+{
+	uint32_t irq_comp_mask = 0;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+
+	/* capture command is valid for both idle and active state. */
+	vfe31_ctrl->vfe_capture_count = num_frames_capture;
+	if (p_sync) {
+		p_sync->snap_count = num_frames_capture;
+		p_sync->thumb_count = num_frames_capture;
+	}
+
+	irq_comp_mask	=
+		msm_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if ((vfe31_ctrl->operation_mode ==
+		 VFE_MODE_OF_OPERATION_SNAPSHOT) ||
+		(vfe31_ctrl->operation_mode ==
+		 VFE_MODE_OF_OPERATION_ZSL)){
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) {
+			irq_comp_mask |=
+				((0x1 << (vfe31_ctrl->outpath.out0.ch0 + 8)) |
+				(0x1 << (vfe31_ctrl->outpath.out0.ch1 + 8)));
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) {
+			irq_comp_mask |=
+			((0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8)) |
+			(0x1 << (vfe31_ctrl->outpath.out1.ch1 + 8)));
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]);
+		}
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch1]);
+		}
+	} else {  /* this is raw snapshot mode. */
+		CDBG("config the comp imask for raw snapshot mode.\n");
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_S) {
+			irq_comp_mask |=
+			(0x1 << (vfe31_ctrl->outpath.out1.ch0 + 8));
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out1.ch0]);
+		}
+	}
+	msm_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	if (p_sync->stereocam_enabled)
+		msm_camio_set_perf_lvl(S_STEREO_CAPTURE);
+	else
+		msm_camio_set_perf_lvl(S_CAPTURE);
+
+	usleep(1000);
+	vfe31_start_common();
+	return 0;
+}
+
+static int vfe31_start(void)
+{
+	uint32_t irq_comp_mask = 0;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+	/* start command now is only good for continuous mode. */
+	if ((vfe31_ctrl->operation_mode != VFE_MODE_OF_OPERATION_CONTINUOUS) &&
+		(vfe31_ctrl->operation_mode != VFE_MODE_OF_OPERATION_VIDEO))
+		return 0;
+	irq_comp_mask	=
+		msm_io_r(vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) {
+		irq_comp_mask |= (0x1 << vfe31_ctrl->outpath.out0.ch0 |
+			0x1 << vfe31_ctrl->outpath.out0.ch1);
+	}
+
+	if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) {
+		irq_comp_mask |= (0x1 << (vfe31_ctrl->outpath.out2.ch0 + 16)|
+			0x1 << (vfe31_ctrl->outpath.out2.ch1 + 16));
+	}
+
+	msm_io_w(irq_comp_mask, vfe31_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+
+	if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_PT) {
+		msm_io_w(1, vfe31_ctrl->vfebase +
+			vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch0]);
+		msm_io_w(1, vfe31_ctrl->vfebase +
+			vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out0.ch1]);
+	}
+	if (p_sync->stereocam_enabled)
+		msm_camio_set_perf_lvl(S_STEREO_VIDEO);
+	else
+		msm_camio_set_perf_lvl(S_PREVIEW);
+
+	usleep(1000);
+	vfe31_start_common();
+	return 0;
+}
+
+static void vfe31_update(void)
+{
+	unsigned long flags;
+	CDBG("vfe31_update\n");
+
+	if (vfe31_ctrl->update_gamma) {
+		if (!msm_io_r(vfe31_ctrl->vfebase + V31_GAMMA_CFG_OFF))
+			msm_io_w(7, vfe31_ctrl->vfebase+V31_GAMMA_CFG_OFF);
+		else
+			msm_io_w(0, vfe31_ctrl->vfebase+V31_GAMMA_CFG_OFF);
+		vfe31_ctrl->update_gamma = false;
+	}
+	if (vfe31_ctrl->update_luma) {
+		if (!msm_io_r(vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF))
+			msm_io_w(1, vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF);
+		else
+			msm_io_w(0, vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF);
+		vfe31_ctrl->update_luma = false;
+	}
+	spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags);
+	vfe31_ctrl->update_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe31_ctrl->update_ack_lock, flags);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return;
+}
+
+static void vfe31_sync_timer_stop(void)
+{
+	uint32_t value = 0;
+	vfe31_ctrl->sync_timer_state = 0;
+	if (vfe31_ctrl->sync_timer_number == 0)
+		value = 0x10000;
+	else if (vfe31_ctrl->sync_timer_number == 1)
+		value = 0x20000;
+	else if (vfe31_ctrl->sync_timer_number == 2)
+		value = 0x40000;
+
+	/* Timer Stop */
+	msm_io_w_mb(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF);
+}
+
+static void vfe31_sync_timer_start(const uint32_t *tbl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = 1;
+	uint32_t val;
+
+	vfe31_ctrl->sync_timer_state = *tbl++;
+	vfe31_ctrl->sync_timer_repeat_count = *tbl++;
+	vfe31_ctrl->sync_timer_number = *tbl++;
+	CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n",
+		 __func__, vfe31_ctrl->sync_timer_state,
+		 vfe31_ctrl->sync_timer_repeat_count,
+		 vfe31_ctrl->sync_timer_number);
+
+	if (vfe31_ctrl->sync_timer_state) { /* Start Timer */
+		value = value << vfe31_ctrl->sync_timer_number;
+	} else { /* Stop Timer */
+		CDBG("Failed to Start timer\n");
+		 return;
+	}
+
+	/* Timer Start */
+	msm_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF);
+	/* Sync Timer Line Start */
+	value = *tbl++;
+	msm_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF +
+		4 + ((vfe31_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Start */
+	value = *tbl++;
+	msm_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF +
+			 8 + ((vfe31_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Duration */
+	value = *tbl++;
+	val = camio_clk.vfe_clk_rate / 10000;
+	val = 10000000 / val;
+	val = value * 10000 / val;
+	CDBG("%s: Pixel Clk Cycles!!! %d \n", __func__, val);
+	msm_io_w(val, vfe31_ctrl->vfebase + V31_SYNC_TIMER_OFF +
+		12 + ((vfe31_ctrl->sync_timer_number) * 12));
+	/* Timer0 Active High/LOW */
+	value = *tbl++;
+	msm_io_w(value, vfe31_ctrl->vfebase + V31_SYNC_TIMER_POLARITY_OFF);
+	/* Selects sync timer 0 output to drive onto timer1 port */
+	value = 0;
+	msm_io_w(value, vfe31_ctrl->vfebase + V31_TIMER_SELECT_OFF);
+	wmb();
+}
+
+static void vfe31_program_dmi_cfg(enum VFE31_DMI_RAM_SEL bankSel)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = VFE_DMI_CFG_DEFAULT;
+	value += (uint32_t)bankSel;
+
+	msm_io_w_mb(value, vfe31_ctrl->vfebase + VFE_DMI_CFG);
+	/* by default, always starts with offset 0.*/
+	msm_io_w(0, vfe31_ctrl->vfebase + VFE_DMI_ADDR);
+	wmb();
+}
+static void vfe31_write_gamma_cfg(enum VFE31_DMI_RAM_SEL channel_sel,
+						const uint32_t *tbl)
+{
+	int i;
+	uint32_t value, value1, value2;
+	vfe31_program_dmi_cfg(channel_sel);
+	/* for loop for extracting init table. */
+	for (i = 0 ; i < (VFE31_GAMMA_NUM_ENTRIES/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_io_w((value1), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_io_w((value2), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe31_program_dmi_cfg(NO_MEM_SELECTED);
+}
+
+static void vfe31_reset_hist_cfg()
+{
+	uint32_t i;
+	uint32_t value = 0;
+
+	vfe31_program_dmi_cfg(STATS_HIST_RAM);
+	for (i = 0 ; i < VFE31_HIST_TABLE_LENGTH ; i++)
+		msm_io_w(value, vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+	vfe31_program_dmi_cfg(NO_MEM_SELECTED);
+}
+
+static void vfe31_write_la_cfg(enum VFE31_DMI_RAM_SEL channel_sel,
+						const uint32_t *tbl)
+{
+	uint32_t i;
+	uint32_t value, value1, value2;
+
+	vfe31_program_dmi_cfg(channel_sel);
+	/* for loop for extracting init table. */
+	for (i = 0 ; i < (VFE31_LA_TABLE_LENGTH/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_io_w((value1), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_io_w((value2), vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe31_program_dmi_cfg(NO_MEM_SELECTED);
+}
+
+static int vfe31_proc_general(struct msm_vfe31_cmd *cmd)
+{
+	int i , rc = 0;
+	uint32_t old_val = 0 , new_val = 0;
+	uint32_t *cmdp = NULL;
+	uint32_t *cmdp_local = NULL;
+	uint32_t snapshot_cnt = 0;
+	uint32_t stereo_cam_enable = 0;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+
+	CDBG("vfe31_proc_general: cmdID = %s, length = %d\n",
+		vfe31_general_cmd[cmd->id], cmd->length);
+	switch (cmd->id) {
+	case V31_RESET:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		vfe31_reset();
+		break;
+	case V31_START:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		rc = vfe31_start();
+		break;
+	case V31_UPDATE:
+		vfe31_update();
+		break;
+	case V31_ZSL:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		vfe31_zsl();
+		break;
+	case V31_CAPTURE:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe31_capture(snapshot_cnt);
+		break;
+	case V31_START_RECORDING:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		rc = vfe31_start_recording();
+		if (p_sync->stereocam_enabled)
+			p_sync->stereo_state = STEREO_VIDEO_ACTIVE;
+		break;
+	case V31_STOP_RECORDING:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		rc = vfe31_stop_recording();
+		if (p_sync->stereocam_enabled)
+			p_sync->stereo_state = STEREO_VIDEO_IDLE;
+		break;
+	case V31_OPERATION_CFG: {
+		if (cmd->length != V31_OPERATION_CFG_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V31_OPERATION_CFG_LEN, GFP_ATOMIC);
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			V31_OPERATION_CFG_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe31_operation_config(cmdp);
+		}
+		break;
+
+	case V31_STATS_AE_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AE_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+		cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+	case V31_STATS_AF_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AF_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+		cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+	case V31_STATS_AWB_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AWB_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+				cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+
+	case V31_STATS_IHIST_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= IHIST_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+				cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+
+	case V31_XBAR_CFG: {
+		unsigned long flags = 0;
+		spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags);
+		if ((cmd->length != V31_XBAR_CFG_LEN)
+			|| vfe31_ctrl->xbar_update_pending) {
+			rc = -EINVAL;
+			spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags);
+			goto proc_general_done;
+		}
+		spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags);
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags);
+		vfe31_ctrl->xbar_cfg[0] = *cmdp;
+		vfe31_ctrl->xbar_cfg[1] = *(cmdp+1);
+		vfe31_ctrl->xbar_update_pending = 1;
+		spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags);
+		CDBG("%s: xbar0 0x%x xbar1 0x%x", __func__,
+			vfe31_ctrl->xbar_cfg[0],
+			vfe31_ctrl->xbar_cfg[1]);
+		}
+		break;
+
+	case V31_STATS_RS_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/*
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= RS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		*/
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+				cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+
+	case V31_STATS_CS_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/*
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= CS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		*/
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+				cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+
+	case V31_MCE_UPDATE:
+	case V31_MCE_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		the 2nd register has the mce_enable bit */
+		old_val = msm_io_r(vfe31_ctrl->vfebase +
+						V31_CHROMA_SUP_OFF + 4);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+		old_val &= MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 4,
+					&new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_io_r(vfe31_ctrl->vfebase +
+						V31_CHROMA_SUP_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_CHROMA_SUP_OFF + 8,
+		&new_val, 4);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+		cmdp_local, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+	case V31_DEMOSAIC_2_UPDATE: /* 38 BPC update   */
+	case V31_DEMOSAIC_2_CFG: {  /* 14 BPC config   */
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_io_r(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF);
+		old_val &= BPC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF,
+					cmdp_local, 4);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+			cmdp_local, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+	case V31_DEMOSAIC_1_UPDATE:/* 37 ABF update  */
+	case V31_DEMOSAIC_1_CFG: { /* 13 ABF config  */
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_io_r(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF);
+		old_val &= ABF_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_DEMOSAIC_0_OFF,
+		    cmdp_local, 4);
+
+		cmdp_local += 1;
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+		cmdp_local, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+	case V31_ROLL_OFF_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value) , cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+		cmdp_local, 16);
+		cmdp_local += 4;
+		vfe31_program_dmi_cfg(ROLLOFF_RAM);
+		/* for loop for extrcting init table. */
+		for (i = 0 ; i < (VFE31_ROLL_OFF_INIT_TABLE_SIZE * 2) ; i++) {
+			msm_io_w(*cmdp_local ,
+			vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+			cmdp_local++;
+		}
+		CDBG("done writing init table \n");
+		/* by default, always starts with offset 0. */
+		msm_io_w(LENS_ROLL_OFF_DELTA_TABLE_OFFSET,
+		vfe31_ctrl->vfebase + VFE_DMI_ADDR);
+		/* for loop for extracting delta table. */
+		for (i = 0 ; i < (VFE31_ROLL_OFF_DELTA_TABLE_SIZE * 2) ; i++) {
+			msm_io_w(*cmdp_local,
+			vfe31_ctrl->vfebase + VFE_DMI_DATA_LO);
+			cmdp_local++;
+		}
+		vfe31_program_dmi_cfg(NO_MEM_SELECTED);
+		}
+		break;
+
+	case V31_LA_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/* Select Bank 0*/
+		*cmdp = 0;
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+				cmdp, (vfe31_cmd[cmd->id].length));
+		cmdp += 1;
+		vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0 , cmdp);
+		cmdp -= 1;
+		}
+		break;
+
+	case V31_LA_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + V31_LUMA_CFG_OFF);
+		cmdp += 1;
+		if (old_val != 0x0)
+			vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0 , cmdp);
+		else
+			vfe31_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1 , cmdp);
+		vfe31_ctrl->update_luma = true;
+		cmdp -= 1;
+		}
+		break;
+
+	case V31_SK_ENHAN_CFG:
+	case V31_SK_ENHAN_UPDATE:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_SCE_OFF,
+				cmdp, V31_SCE_LEN);
+		}
+		break;
+
+	case V31_LIVESHOT:
+		vfe31_liveshot();
+		break;
+
+	case V31_STEREOCAM:
+		if (copy_from_user(&stereo_cam_enable,
+			(void __user *)(cmd->value), sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		vfe31_stereocam(stereo_cam_enable);
+		break;
+
+	case V31_RGB_G_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/* Select Bank 0*/
+		*cmdp = 0;
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_RGB_G_OFF,
+				cmdp, 4);
+		cmdp += 1;
+		vfe31_write_gamma_cfg(RGBLUT_CHX_BANK0, cmdp);
+		cmdp -= 1;
+		}
+		break;
+
+	case V31_RGB_G_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe31_ctrl->vfebase + V31_GAMMA_CFG_OFF);
+		cmdp += 1;
+
+		if (!old_val) {
+			vfe31_write_gamma_cfg(RGBLUT_CHX_BANK1, cmdp);
+		} else {
+			vfe31_write_gamma_cfg(RGBLUT_CHX_BANK0, cmdp);
+			}
+		vfe31_ctrl->update_gamma = true;
+		cmdp -= 1;
+		}
+		break;
+
+	case V31_STATS_AWB_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AWB_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V31_STATS_AE_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AE_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V31_STATS_AF_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AF_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V31_STATS_IHIST_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~IHIST_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V31_STATS_RS_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~RS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V31_STATS_CS_STOP: {
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~CS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V31_STOP:
+		pr_info("vfe31_proc_general: cmdID = %s\n",
+			vfe31_general_cmd[cmd->id]);
+		vfe31_stop();
+		break;
+
+	case V31_SYNC_TIMER_SETTING:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		vfe31_sync_timer_start(cmdp);
+		break;
+
+	case V31_EZTUNE_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		*cmdp &= ~STATS_ENABLE_MASK;
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= STATS_ENABLE_MASK;
+		*cmdp |= old_val;
+
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+			cmdp, (vfe31_cmd[cmd->id].length));
+		}
+		break;
+
+	default: {
+		if (cmd->length != vfe31_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe31_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		CHECKED_COPY_FROM_USER(cmdp);
+		msm_io_memcpy(vfe31_ctrl->vfebase + vfe31_cmd[cmd->id].offset,
+			cmdp, (vfe31_cmd[cmd->id].length));
+	}
+	break;
+
+	}
+
+proc_general_done:
+	kfree(cmdp);
+
+	return rc;
+}
+
+static void vfe31_stats_af_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->afStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->af_ack_pending = FALSE;
+}
+
+static void vfe31_stats_awb_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->awbStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->awb_ack_pending = FALSE;
+}
+
+static void vfe31_stats_aec_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->aecStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->aec_ack_pending = FALSE;
+}
+
+static void vfe31_stats_ihist_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->ihist_ack_pending = FALSE;
+}
+
+static void vfe31_stats_rs_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->rsStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->rs_ack_pending = FALSE;
+}
+
+static void vfe31_stats_cs_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe31_ctrl->csStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe31_ctrl->cs_ack_pending = FALSE;
+}
+
+static int vfe31_config(struct msm_vfe_cfg_cmd *cmd, void *data)
+{
+	struct msm_vfe31_cmd vfecmd;
+
+	long rc = 0;
+	uint32_t i = 0;
+	struct vfe_cmd_stats_buf *scfg = NULL;
+	struct msm_pmem_region   *regptr = NULL;
+	struct vfe_cmd_stats_ack *sack = NULL;
+
+	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+		cmd->cmd_type != CMD_SNAP_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+		if (copy_from_user(&vfecmd,
+				(void __user *)(cmd->value),
+				sizeof(vfecmd))) {
+			pr_err("%s %d: copy_from_user failed\n", __func__,
+				__LINE__);
+			return -EFAULT;
+		}
+	} else {
+	/* here eith stats release or frame release. */
+		if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+			cmd->cmd_type != CMD_SNAP_BUF_RELEASE) {
+			/* then must be stats release. */
+			if (!data)
+				return -EFAULT;
+				sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+				GFP_ATOMIC);
+				if (!sack)
+					return -ENOMEM;
+
+				sack->nextStatsBuf = *(uint32_t *)data;
+			}
+	}
+
+	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+	if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+		struct axidata *axid;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto vfe31_config_done;
+		}
+
+		scfg =
+			kmalloc(sizeof(struct vfe_cmd_stats_buf),
+				GFP_ATOMIC);
+		if (!scfg) {
+			rc = -ENOMEM;
+			goto vfe31_config_done;
+		}
+		regptr = axid->region;
+		if (axid->bufnum1 > 0) {
+			for (i = 0; i < axid->bufnum1; i++) {
+				scfg->statsBuf[i] =
+					(uint32_t)(regptr->paddr);
+				regptr++;
+			}
+		}
+		/* individual */
+		switch (cmd->cmd_type) {
+		case CMD_STATS_AEC_ENABLE:
+			rc = vfe_stats_aec_buf_init(scfg);
+			break;
+		case CMD_STATS_AF_ENABLE:
+			rc = vfe_stats_af_buf_init(scfg);
+			break;
+		case CMD_STATS_AWB_ENABLE:
+			rc = vfe_stats_awb_buf_init(scfg);
+			break;
+		case CMD_STATS_IHIST_ENABLE:
+			rc = vfe_stats_ihist_buf_init(scfg);
+			break;
+		case CMD_STATS_RS_ENABLE:
+			rc = vfe_stats_rs_buf_init(scfg);
+			break;
+		case CMD_STATS_CS_ENABLE:
+			rc = vfe_stats_cs_buf_init(scfg);
+			break;
+		}
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_GENERAL:
+		rc = vfe31_proc_general(&vfecmd);
+		break;
+
+	case CMD_FRAME_BUF_RELEASE: {
+		struct msm_frame *b;
+		unsigned long p;
+		int ret;
+		struct vfe31_output_ch *outch = NULL;
+		if (!data) {
+			rc = -EFAULT;
+			break;
+		}
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+		CDBG("CMD_FRAME_BUF_RELEASE b->path = %d\n", b->path);
+
+		if (b->path & OUTPUT_TYPE_P) {
+			CDBG("CMD_FRAME_BUF_RELEASE got free buffer\n");
+			outch = &vfe31_ctrl->outpath.out0;
+		} else if (b->path & OUTPUT_TYPE_S) {
+			outch = &vfe31_ctrl->outpath.out1;
+		} else if (b->path & OUTPUT_TYPE_V) {
+			outch = &vfe31_ctrl->outpath.out2;
+		} else {
+			rc = -EFAULT;
+			break;
+		}
+
+		ret = vfe31_add_free_buf2(outch, p, b->y_off, b->cbcr_off);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	case CMD_SNAP_BUF_RELEASE: {
+		struct msm_frame *b;
+		unsigned long p;
+		int ret;
+		struct vfe31_output_ch *outch = NULL;
+		if (!data)
+			return -EFAULT;
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+		CDBG("CMD_PIC_BUF_RELEASE b->path = %d\n", b->path);
+
+		if (b->path & OUTPUT_TYPE_T) {
+			CDBG("CMD_FRAME_BUF_RELEASE got free buffer\n");
+			outch = &vfe31_ctrl->outpath.out1;
+		} else if (b->path & OUTPUT_TYPE_S) {
+			outch = &vfe31_ctrl->outpath.out2;
+		} else
+			return -EFAULT;
+
+		ret = vfe31_add_free_buf2(outch, p, b->y_off, b->cbcr_off);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	case CMD_STATS_AEC_BUF_RELEASE:
+		vfe31_stats_aec_ack(sack);
+		break;
+
+	case CMD_STATS_AF_BUF_RELEASE:
+		vfe31_stats_af_ack(sack);
+		break;
+
+	case CMD_STATS_AWB_BUF_RELEASE:
+		vfe31_stats_awb_ack(sack);
+		break;
+
+	case CMD_STATS_IHIST_BUF_RELEASE:
+		vfe31_stats_ihist_ack(sack);
+		break;
+
+	case CMD_STATS_RS_BUF_RELEASE:
+		vfe31_stats_rs_ack(sack);
+		break;
+
+	case CMD_STATS_CS_BUF_RELEASE:
+		vfe31_stats_cs_ack(sack);
+		break;
+
+	case CMD_AXI_CFG_PREVIEW: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+		axio =
+			kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe31_cmd[V31_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe31_config_axi(OUTPUT_2, axid, axio);
+		kfree(axio);
+		break;
+	}
+
+	case CMD_RAW_PICT_AXI_CFG: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+		axio =
+			kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe31_cmd[V31_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe31_config_axi(CAMIF_TO_AXI_VIA_OUTPUT_2, axid, axio);
+		kfree(axio);
+		break;
+	}
+
+	case CMD_AXI_CFG_SNAP: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		CDBG("%s, CMD_AXI_CFG_SNAP\n", __func__);
+		axid = data;
+		if (!axid)
+			return -EFAULT;
+		axio =
+			kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe31_cmd[V31_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe31_config_axi(OUTPUT_1_AND_2, axid, axio);
+		kfree(axio);
+		break;
+	}
+
+	case CMD_AXI_CFG_ZSL: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		CDBG("%s, CMD_AXI_CFG_ZSL\n", __func__);
+		axid = data;
+		if (!axid)
+			return -EFAULT;
+		axio =
+			kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe31_cmd[V31_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe31_config_axi(OUTPUT_1_2_AND_3, axid, axio);
+		kfree(axio);
+	}
+		break;
+
+	case CMD_AXI_CFG_VIDEO: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+
+		axio =
+			kmalloc(vfe31_cmd[V31_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe31_cmd[V31_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe31_config_axi(OUTPUT_1_AND_3, axid, axio);
+		kfree(axio);
+		break;
+	}
+
+	default:
+		break;
+	}
+vfe31_config_done:
+	kfree(scfg);
+	kfree(sack);
+	CDBG("%s done: rc = %d\n", __func__, (int) rc);
+	return rc;
+}
+
+static void vfe31_send_msg_no_payload(enum VFE31_MESSAGE_ID id)
+{
+	struct vfe_message msg;
+
+	CDBG("vfe31_send_msg_no_payload\n");
+	msg._d = id;
+	vfe31_proc_ops(id, &msg, 0);
+}
+
+static void vfe31_process_reg_update_irq(void)
+{
+	uint32_t  temp, old_val;
+	unsigned long flags;
+	if (vfe31_ctrl->recording_state == VFE_REC_STATE_START_REQUESTED) {
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) {
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]);
+			msm_io_w(1, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]);
+		}
+		vfe31_ctrl->recording_state = VFE_REC_STATE_STARTED;
+		if (vpe_ctrl->dis_en) {
+			old_val = msm_io_r(
+				vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+			old_val |= RS_CS_ENABLE_MASK;
+			msm_io_w(old_val,
+				vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		CDBG("start video triggered .\n");
+	} else if (vfe31_ctrl->recording_state
+			== VFE_REC_STATE_STOP_REQUESTED) {
+		if (vfe31_ctrl->outpath.output_mode & VFE31_OUTPUT_MODE_V) {
+			msm_io_w(0, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch0]);
+			msm_io_w(0, vfe31_ctrl->vfebase +
+				vfe31_AXI_WM_CFG[vfe31_ctrl->outpath.out2.ch1]);
+		}
+
+		/*disable rs& cs when stop recording. */
+		old_val = msm_io_r(vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= (~RS_CS_ENABLE_MASK);
+		msm_io_w(old_val,
+				vfe31_ctrl->vfebase + VFE_MODULE_CFG);
+		CDBG("stop video triggered\n");
+	}
+	if (vfe31_ctrl->start_ack_pending == TRUE) {
+		vfe31_send_msg_no_payload(MSG_ID_START_ACK);
+		vfe31_ctrl->start_ack_pending = FALSE;
+	} else {
+		if (vfe31_ctrl->recording_state ==
+			VFE_REC_STATE_STOP_REQUESTED) {
+			vfe31_ctrl->recording_state = VFE_REC_STATE_STOPPED;
+			msm_io_w_mb(1, vfe31_ctrl->vfebase +
+						VFE_REG_UPDATE_CMD);
+		} else if (vfe31_ctrl->recording_state ==
+			VFE_REC_STATE_STOPPED) {
+			CDBG("sent stop video rec ACK");
+			vfe31_send_msg_no_payload(MSG_ID_STOP_REC_ACK);
+			vfe31_ctrl->recording_state = VFE_REC_STATE_IDLE;
+		}
+		spin_lock_irqsave(&vfe31_ctrl->update_ack_lock, flags);
+		if (vfe31_ctrl->update_ack_pending == TRUE) {
+			vfe31_ctrl->update_ack_pending = FALSE;
+			spin_unlock_irqrestore(
+				&vfe31_ctrl->update_ack_lock, flags);
+			vfe31_send_msg_no_payload(MSG_ID_UPDATE_ACK);
+		} else {
+			spin_unlock_irqrestore(
+				&vfe31_ctrl->update_ack_lock, flags);
+		}
+	}
+	/* in snapshot mode */
+	if (vfe31_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_SNAPSHOT) {
+		/* later we need to add check for live snapshot mode. */
+
+		if (vfe31_ctrl->vfe_capture_count)
+			vfe31_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe31_ctrl->vfe_capture_count == 0) {
+			/* stop the bus output:  write master enable = 0*/
+			if (vfe31_ctrl->outpath.output_mode &
+					VFE31_OUTPUT_MODE_PT) {
+				msm_io_w(0, vfe31_ctrl->vfebase +
+					vfe31_AXI_WM_CFG[
+						vfe31_ctrl->outpath.out0.ch0]);
+				msm_io_w(0, vfe31_ctrl->vfebase +
+					vfe31_AXI_WM_CFG[vfe31_ctrl->
+						outpath.out0.ch1]);
+			}
+			if (vfe31_ctrl->outpath.output_mode &
+					VFE31_OUTPUT_MODE_S) {
+				msm_io_w(0, vfe31_ctrl->vfebase +
+					vfe31_AXI_WM_CFG[vfe31_ctrl->
+						outpath.out1.ch0]);
+				msm_io_w(0, vfe31_ctrl->vfebase +
+					vfe31_AXI_WM_CFG[vfe31_ctrl->
+						outpath.out1.ch1]);
+			}
+
+			/* Ensure the write order while writing
+			to the command register using the barrier */
+			msm_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+			/* Ensure the read order while reading
+			to the command register using the barrier */
+			temp = msm_io_r_mb(vfe31_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+		}
+		/* then do reg_update. */
+		msm_io_w_mb(1, vfe31_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+	} /* if snapshot mode. */
+}
+
+static void vfe31_set_default_reg_values(void)
+{
+	msm_io_w(0x800080, vfe31_ctrl->vfebase + VFE_DEMUX_GAIN_0);
+	msm_io_w(0x800080, vfe31_ctrl->vfebase + VFE_DEMUX_GAIN_1);
+	msm_io_w(0xFFFFF, vfe31_ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+	/* default frame drop period and pattern */
+	msm_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG);
+	msm_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_CFG);
+	msm_io_w(0xFFFFFFFF, vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN);
+	msm_io_w(0xFFFFFFFF,
+		vfe31_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_PATTERN);
+	msm_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y);
+	msm_io_w(0x1f, vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR);
+	msm_io_w(0xFFFFFFFF,
+		vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN);
+	msm_io_w(0xFFFFFFFF,
+		vfe31_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_PATTERN);
+	msm_io_w(0, vfe31_ctrl->vfebase + VFE_CLAMP_MIN);
+	msm_io_w(0xFFFFFF, vfe31_ctrl->vfebase + VFE_CLAMP_MAX);
+
+	/* stats UB config */
+	msm_io_w(0x3980007, vfe31_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
+	msm_io_w(0x3A00007, vfe31_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
+	msm_io_w(0x3A8000F, vfe31_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
+	msm_io_w(0x3B80007, vfe31_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
+	msm_io_w(0x3C0001F, vfe31_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
+	msm_io_w(0x3E0001F, vfe31_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+}
+
+static void vfe31_process_reset_irq(void)
+{
+	atomic_set(&vfe31_ctrl->vstate, 0);
+	vfe31_ctrl->while_stopping_mask = VFE_IMASK_WHILE_STOPPING_1;
+	if (atomic_read(&vfe31_ctrl->stop_ack_pending)) {
+		/* this is from the stop command. */
+		atomic_set(&vfe31_ctrl->stop_ack_pending, 0);
+		vfe31_send_msg_no_payload(MSG_ID_STOP_ACK);
+	} else {
+		/* this is from reset command. */
+		vfe31_set_default_reg_values();
+
+		/* reload all write masters. (frame & line)*/
+		msm_io_w_mb(0x7FFF, vfe31_ctrl->vfebase + VFE_BUS_CMD);
+		vfe31_send_msg_no_payload(MSG_ID_RESET_ACK);
+	}
+}
+
+
+static void vfe31_process_axi_halt_irq(void)
+{
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(AXI_HALT_CLEAR,
+		vfe31_ctrl->vfebase + VFE_AXI_CMD);
+	vfe31_ctrl->while_stopping_mask = VFE_IMASK_RESET;
+
+	/* disable all interrupts.  */
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1,
+		vfe31_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* now enable only halt_irq & reset_irq */
+	msm_io_w(0xf0000000,          /* this is for async timer. */
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_IMASK_RESET,
+		vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	CDBG("%s: about to reset vfe...\n", __func__);
+	msm_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+		vfe31_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+}
+
+static void vfe31_process_camif_sof_irq(void)
+{
+	uint32_t  temp;
+
+	/* in raw snapshot mode */
+	if (vfe31_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) {
+		if (vfe31_ctrl->start_ack_pending) {
+			vfe31_send_msg_no_payload(MSG_ID_START_ACK);
+			vfe31_ctrl->start_ack_pending = FALSE;
+		}
+		if (vfe31_ctrl->vfe_capture_count)
+			vfe31_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe31_ctrl->vfe_capture_count == 0) {
+			/* Ensure the write order while writing
+			to the command register using the barrier */
+			msm_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe31_ctrl->vfebase + VFE_CAMIF_COMMAND);
+			temp = msm_io_r_mb(vfe31_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+		}
+	} /* if raw snapshot mode. */
+
+	if ((vfe31_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe31_ctrl->operation_mode == VFE_MODE_OF_OPERATION_VIDEO) &&
+		(vfe31_ctrl->vfeFrameId % vfe31_ctrl->hfr_mode != 0)) {
+		vfe31_ctrl->vfeFrameId++;
+		CDBG("Skip the SOF notification when HFR enabled\n");
+		return;
+	}
+	vfe31_send_msg_no_payload(MSG_ID_SOF_ACK);
+	vfe31_ctrl->vfeFrameId++;
+	CDBG("camif_sof_irq, frameId = %d\n", vfe31_ctrl->vfeFrameId);
+
+	if (vfe31_ctrl->sync_timer_state) {
+		if (vfe31_ctrl->sync_timer_repeat_count == 0)
+			vfe31_sync_timer_stop();
+		else
+		vfe31_ctrl->sync_timer_repeat_count--;
+	}
+}
+
+static void vfe31_process_error_irq(uint32_t errStatus)
+{
+	uint32_t camifStatus, read_val;
+	uint32_t *temp;
+
+	if (errStatus & VFE31_IMASK_CAMIF_ERROR) {
+		pr_err("vfe31_irq: camif errors\n");
+		temp = (uint32_t *)(vfe31_ctrl->vfebase + VFE_CAMIF_STATUS);
+		camifStatus = msm_io_r(temp);
+		pr_err("camifStatus  = 0x%x\n", camifStatus);
+		vfe31_send_msg_no_payload(MSG_ID_CAMIF_ERROR);
+	}
+
+	if (errStatus & VFE31_IMASK_STATS_CS_OVWR)
+		pr_err("vfe31_irq: stats cs overwrite\n");
+
+	if (errStatus & VFE31_IMASK_STATS_IHIST_OVWR)
+		pr_err("vfe31_irq: stats ihist overwrite\n");
+
+	if (errStatus & VFE31_IMASK_REALIGN_BUF_Y_OVFL)
+		pr_err("vfe31_irq: realign bug Y overflow\n");
+
+	if (errStatus & VFE31_IMASK_REALIGN_BUF_CB_OVFL)
+		pr_err("vfe31_irq: realign bug CB overflow\n");
+
+	if (errStatus & VFE31_IMASK_REALIGN_BUF_CR_OVFL)
+		pr_err("vfe31_irq: realign bug CR overflow\n");
+
+	if (errStatus & VFE31_IMASK_VIOLATION)
+		pr_err("vfe31_irq: violation interrupt\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_0_BUS_OVFL)
+		pr_err("vfe31_irq: image master 0 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_1_BUS_OVFL)
+		pr_err("vfe31_irq: image master 1 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_2_BUS_OVFL)
+		pr_err("vfe31_irq: image master 2 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_3_BUS_OVFL)
+		pr_err("vfe31_irq: image master 3 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_4_BUS_OVFL)
+		pr_err("vfe31_irq: image master 4 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_5_BUS_OVFL)
+		pr_err("vfe31_irq: image master 5 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_IMG_MAST_6_BUS_OVFL)
+		pr_err("vfe31_irq: image master 6 bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_AE_BUS_OVFL)
+		pr_err("vfe31_irq: ae stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_AF_BUS_OVFL)
+		pr_err("vfe31_irq: af stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_AWB_BUS_OVFL)
+		pr_err("vfe31_irq: awb stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_RS_BUS_OVFL)
+		pr_err("vfe31_irq: rs stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_CS_BUS_OVFL)
+		pr_err("vfe31_irq: cs stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_IHIST_BUS_OVFL)
+		pr_err("vfe31_irq: ihist stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_STATS_SKIN_BUS_OVFL)
+		pr_err("vfe31_irq: skin stats bus overflow\n");
+
+	if (errStatus & VFE31_IMASK_AXI_ERROR) {
+		pr_err("vfe31_irq: axi error\n");
+		/* read status too when overflow happens.*/
+		read_val = msm_io_r(vfe31_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+		pr_debug("VFE_BUS_PING_PONG_STATUS = 0x%x\n", read_val);
+		read_val = msm_io_r(vfe31_ctrl->vfebase +
+			VFE_BUS_OPERATION_STATUS);
+		pr_debug("VFE_BUS_OPERATION_STATUS = 0x%x\n", read_val);
+		read_val = msm_io_r(vfe31_ctrl->vfebase +
+			VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_0);
+		pr_debug("VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_0 = 0x%x\n",
+			read_val);
+		read_val = msm_io_r(vfe31_ctrl->vfebase +
+			VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_1);
+		pr_debug("VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_1 = 0x%x\n",
+			read_val);
+		read_val = msm_io_r(vfe31_ctrl->vfebase +
+			VFE_AXI_STATUS);
+		pr_debug("VFE_AXI_STATUS = 0x%x\n", read_val);
+	}
+}
+
+#define VFE31_AXI_OFFSET 0x0050
+#define vfe31_get_ch_ping_addr(chn) \
+	(msm_io_r(vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn)))
+#define vfe31_get_ch_pong_addr(chn) \
+	(msm_io_r(vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4))
+#define vfe31_get_ch_addr(ping_pong, chn) \
+	(((ping_pong) & (1 << (chn))) == 0 ? \
+	vfe31_get_ch_pong_addr(chn) : vfe31_get_ch_ping_addr(chn))
+
+#define vfe31_put_ch_ping_addr(chn, addr) \
+	(msm_io_w((addr), vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn)))
+#define vfe31_put_ch_pong_addr(chn, addr) \
+	(msm_io_w((addr), vfe31_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4))
+#define vfe31_put_ch_addr(ping_pong, chn, addr) \
+	(((ping_pong) & (1 << (chn))) == 0 ?   \
+	vfe31_put_ch_pong_addr((chn), (addr)) : \
+	vfe31_put_ch_ping_addr((chn), (addr)))
+
+static void vfe31_process_output_path_irq_0(uint32_t ping_pong)
+{
+	uint32_t pyaddr, pcbcraddr;
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	struct vfe31_free_buf *free_buf = NULL;
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	*/
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out0);
+
+	if (free_buf) {
+		/* Y channel */
+		pyaddr = vfe31_get_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr = vfe31_get_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch1);
+
+		CDBG("output path 0, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+			 pyaddr, pcbcraddr);
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+
+		kfree(free_buf);
+		/* if continuous mode, for display. (preview) */
+		vfe_send_outmsg(MSG_ID_OUTPUT_P, pyaddr, pcbcraddr);
+	} else {
+		vfe31_ctrl->outpath.out0.frame_drop_cnt++;
+		pr_warning("path_irq_0 - no free buffer!\n");
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out0.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out0.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+			(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out0.ch0,
+			pyaddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out0.ch0,
+			pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out0.ch1,
+			pcbcraddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out0.ch1,
+			pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+}
+
+static void vfe31_process_snapshot_frame(uint32_t ping_pong)
+{
+	uint32_t pyaddr, pcbcraddr;
+	struct vfe31_free_buf *free_buf = NULL;
+	/* Y channel- Main Image */
+	pyaddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch0);
+	/* Chroma channel - TN Image */
+	pcbcraddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch1);
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1);
+	CDBG("%s: snapshot main, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		__func__, pyaddr, pcbcraddr);
+	if (free_buf) {
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+	}
+	vfe_send_outmsg(MSG_ID_OUTPUT_S, pyaddr, pcbcraddr);
+
+	/* Y channel- TN Image */
+	pyaddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out0.ch0);
+	/* Chroma channel - TN Image */
+	pcbcraddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out0.ch1);
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out0);
+	CDBG("%s: snapshot TN, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		__func__, pyaddr, pcbcraddr);
+	if (free_buf) {
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out0.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+	}
+
+	vfe_send_outmsg(MSG_ID_OUTPUT_T, pyaddr, pcbcraddr);
+
+	/* in snapshot mode if done then send
+		snapshot done message */
+	if (vfe31_ctrl->vfe_capture_count == 0) {
+		vfe31_send_msg_no_payload(MSG_ID_SNAPSHOT_DONE);
+		/* Ensure the write order while writing
+			to the cmd register using barrier */
+		msm_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY,
+			vfe31_ctrl->vfebase +
+			VFE_CAMIF_COMMAND);
+	}
+}
+
+static void vfe31_process_raw_snapshot_frame(uint32_t ping_pong)
+{
+	uint32_t pyaddr, pcbcraddr;
+	struct vfe31_free_buf *free_buf = NULL;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+
+	if (p_sync->stereocam_enabled)
+		p_sync->stereo_state = STEREO_RAW_SNAP_STARTED;
+
+	/* Y channel- Main Image */
+	pyaddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch0);
+	/* Chroma channel - Main Image */
+	pcbcraddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch1);
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1);
+	CDBG("%s: snapshot raw, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		__func__, pyaddr, pcbcraddr);
+	if (free_buf) {
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+	}
+	 vfe_send_outmsg(MSG_ID_OUTPUT_S, pyaddr, pcbcraddr);
+
+	/* in snapshot mode if done then send
+		snapshot done message */
+	if (vfe31_ctrl->vfe_capture_count == 0) {
+		vfe31_send_msg_no_payload(MSG_ID_SNAPSHOT_DONE);
+		/* Ensure the write order while writing
+		to the cmd register using barrier */
+		msm_io_w_mb(CAMIF_COMMAND_STOP_IMMEDIATELY,
+			vfe31_ctrl->vfebase +
+			VFE_CAMIF_COMMAND);
+	}
+}
+static void vfe31_process_zsl_frame(uint32_t ping_pong)
+{
+	uint32_t pyaddr, pcbcraddr;
+	struct vfe31_free_buf *free_buf = NULL;
+	/* Y channel- Main Image */
+	pyaddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out2.ch0);
+	/* Chroma channel - Main Image */
+	pcbcraddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out2.ch1);
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out2);
+	CDBG("%s: snapshot main, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		__func__, pyaddr, pcbcraddr);
+	if (free_buf) {
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out2.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out2.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+	}
+	 vfe_send_outmsg(MSG_ID_OUTPUT_S, pyaddr, pcbcraddr);
+
+	/* Y channel- TN Image */
+	pyaddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch0);
+	/* Chroma channel - TN Image */
+	pcbcraddr = vfe31_get_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out1.ch1);
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out1);
+	CDBG("%s: snapshot TN, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		__func__, pyaddr, pcbcraddr);
+	if (free_buf) {
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch0,
+			free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out1.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+	}
+
+	vfe_send_outmsg(MSG_ID_OUTPUT_T, pyaddr, pcbcraddr);
+}
+
+static void vfe31_process_output_path_irq_1(uint32_t ping_pong)
+{
+
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	CDBG("%s, operation_mode = %d, cap_cnt = %d\n", __func__,
+		vfe31_ctrl->operation_mode, vfe31_ctrl->vfe_capture_count);
+
+	/* In Snapshot mode */
+	if ((VFE_MODE_OF_OPERATION_SNAPSHOT == vfe31_ctrl->operation_mode)
+		&& ((vfe31_ctrl->vfe_capture_count <= 1)
+		|| (vfe31_free_buf_available(vfe31_ctrl->outpath.out0) &&
+		vfe31_free_buf_available(vfe31_ctrl->outpath.out1)))) {
+		vfe31_process_snapshot_frame(ping_pong);
+	} else if ((VFE_MODE_OF_OPERATION_RAW_SNAPSHOT ==
+		vfe31_ctrl->operation_mode) &&
+		((vfe31_ctrl->vfe_capture_count <= 1) ||
+		vfe31_free_buf_available(vfe31_ctrl->outpath.out1))) {
+		vfe31_process_raw_snapshot_frame(ping_pong);
+	} else if ((VFE_MODE_OF_OPERATION_ZSL == vfe31_ctrl->operation_mode)
+		&& (vfe31_free_buf_available(vfe31_ctrl->outpath.out1)
+		&& vfe31_free_buf_available(vfe31_ctrl->outpath.out2))) {
+		vfe31_process_zsl_frame(ping_pong);
+	} else {
+		vfe31_ctrl->outpath.out1.frame_drop_cnt++;
+		pr_info("path_irq_1 - no free buffer!\n");
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out1.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out1.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+			(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out1.ch0,
+			pyaddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out1.ch0,
+			pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out1.ch1,
+			pcbcraddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out1.ch1,
+			pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+
+}
+
+static void vfe31_process_output_path_irq_2(uint32_t ping_pong)
+{
+	uint32_t pyaddr, pcbcraddr;
+	struct vfe31_free_buf *free_buf = NULL;
+
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	*/
+	CDBG("%s, operation_mode = %d, state %d\n", __func__,
+		vfe31_ctrl->operation_mode,
+		vfe31_ctrl->recording_state);
+	/* Ensure that both wm1 and wm5 ping and pong buffers are active*/
+	if (!(((ping_pong & 0x22) == 0x22) ||
+		((ping_pong & 0x22) == 0x0))) {
+		pr_err(" Irq_2 - skip the frame pp_status is not proper"
+			"PP_status = 0x%x\n", ping_pong);
+		return;
+	}
+	if (vfe31_ctrl->recording_state == VFE_REC_STATE_STOPPED) {
+		vfe31_ctrl->outpath.out2.frame_drop_cnt++;
+		pr_warning("path_irq_2 - recording stopped\n");
+		return;
+	}
+
+	free_buf = vfe31_get_free_buf(&vfe31_ctrl->outpath.out2);
+
+	if (free_buf) {
+		/* Y channel */
+		pyaddr = vfe31_get_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr = vfe31_get_ch_addr(ping_pong,
+			vfe31_ctrl->outpath.out2.ch1);
+
+		CDBG("video output, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+			pyaddr, pcbcraddr);
+
+		/* Y channel */
+		vfe31_put_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out2.ch0,
+		free_buf->paddr + free_buf->y_off);
+		/* Chroma channel */
+		vfe31_put_ch_addr(ping_pong,
+		vfe31_ctrl->outpath.out2.ch1,
+		free_buf->paddr + free_buf->cbcr_off);
+		kfree(free_buf);
+		vfe_send_outmsg(MSG_ID_OUTPUT_V, pyaddr, pcbcraddr);
+	} else {
+		vfe31_ctrl->outpath.out2.frame_drop_cnt++;
+		pr_warning("path_irq_2 - no free buffer!\n");
+
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe31_get_ch_ping_addr(
+			vfe31_ctrl->outpath.out2.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe31_get_ch_pong_addr(
+			vfe31_ctrl->outpath.out2.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+			(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out2.ch0,
+			pyaddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out2.ch0,
+			pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe31_put_ch_ping_addr(vfe31_ctrl->outpath.out2.ch1,
+			pcbcraddr_pong);
+		vfe31_put_ch_pong_addr(vfe31_ctrl->outpath.out2.ch1,
+			pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+}
+
+
+static uint32_t  vfe31_process_stats_irq_common(uint32_t statsNum,
+						uint32_t newAddr) {
+
+	uint32_t pingpongStatus;
+	uint32_t returnAddr;
+	uint32_t pingpongAddr;
+
+	/* must be 0=ping, 1=pong */
+	pingpongStatus =
+		((msm_io_r(vfe31_ctrl->vfebase +
+		VFE_BUS_PING_PONG_STATUS))
+	& ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7);
+	/* stats bits starts at 7 */
+	CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus);
+	pingpongAddr =
+		((uint32_t)(vfe31_ctrl->vfebase +
+				VFE_BUS_STATS_PING_PONG_BASE)) +
+				(3*statsNum)*4 + (1-pingpongStatus)*4;
+	returnAddr = msm_io_r((uint32_t *)pingpongAddr);
+	msm_io_w(newAddr, (uint32_t *)pingpongAddr);
+	return returnAddr;
+}
+
+static void vfe_send_stats_msg(void)
+{
+	struct  vfe_message msg;
+	/* fill message with right content. */
+	msg._u.msgStats.frameCounter = vfe31_ctrl->vfeFrameId;
+	msg._u.msgStats.status_bits = vfe31_ctrl->status_bits;
+	msg._d = MSG_ID_COMMON;
+
+	msg._u.msgStats.buff.aec = vfe31_ctrl->aecStatsControl.bufToRender;
+	msg._u.msgStats.buff.awb = vfe31_ctrl->awbStatsControl.bufToRender;
+	msg._u.msgStats.buff.af = vfe31_ctrl->afStatsControl.bufToRender;
+
+	msg._u.msgStats.buff.ihist = vfe31_ctrl->ihistStatsControl.bufToRender;
+	msg._u.msgStats.buff.rs = vfe31_ctrl->rsStatsControl.bufToRender;
+	msg._u.msgStats.buff.cs = vfe31_ctrl->csStatsControl.bufToRender;
+
+	vfe31_proc_ops(msg._d,
+		&msg, sizeof(struct vfe_message));
+	return;
+}
+
+static void vfe31_process_stats(void)
+{
+	int32_t process_stats = false;
+
+	CDBG("%s, stats = 0x%x\n", __func__, vfe31_ctrl->status_bits);
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
+		if (!vfe31_ctrl->aec_ack_pending) {
+			vfe31_ctrl->aec_ack_pending = TRUE;
+			vfe31_ctrl->aecStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsAeNum,
+				vfe31_ctrl->aecStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else{
+			vfe31_ctrl->aecStatsControl.bufToRender = 0;
+			vfe31_ctrl->aecStatsControl.droppedStatsFrameCount++;
+		}
+	} else {
+		vfe31_ctrl->aecStatsControl.bufToRender = 0;
+	}
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
+		if (!vfe31_ctrl->awb_ack_pending) {
+			vfe31_ctrl->awb_ack_pending = TRUE;
+			vfe31_ctrl->awbStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsAwbNum,
+				vfe31_ctrl->awbStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else{
+			vfe31_ctrl->awbStatsControl.droppedStatsFrameCount++;
+			vfe31_ctrl->awbStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe31_ctrl->awbStatsControl.bufToRender = 0;
+	}
+
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_AF) {
+		if (!vfe31_ctrl->af_ack_pending) {
+			vfe31_ctrl->af_ack_pending = TRUE;
+			vfe31_ctrl->afStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsAfNum,
+				vfe31_ctrl->afStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else {
+			vfe31_ctrl->afStatsControl.bufToRender = 0;
+			vfe31_ctrl->afStatsControl.droppedStatsFrameCount++;
+		}
+	} else {
+		vfe31_ctrl->afStatsControl.bufToRender = 0;
+	}
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
+		if (!vfe31_ctrl->ihist_ack_pending) {
+			vfe31_ctrl->ihist_ack_pending = TRUE;
+			vfe31_ctrl->ihistStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsIhistNum,
+				vfe31_ctrl->ihistStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else {
+			vfe31_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+			vfe31_ctrl->ihistStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe31_ctrl->ihistStatsControl.bufToRender = 0;
+	}
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_RS) {
+		if (!vfe31_ctrl->rs_ack_pending) {
+			vfe31_ctrl->rs_ack_pending = TRUE;
+			vfe31_ctrl->rsStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsRsNum,
+				vfe31_ctrl->rsStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else {
+			vfe31_ctrl->rsStatsControl.droppedStatsFrameCount++;
+			vfe31_ctrl->rsStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe31_ctrl->rsStatsControl.bufToRender = 0;
+	}
+
+
+	if (vfe31_ctrl->status_bits & VFE_IRQ_STATUS0_STATS_CS) {
+		if (!vfe31_ctrl->cs_ack_pending) {
+			vfe31_ctrl->cs_ack_pending = TRUE;
+			vfe31_ctrl->csStatsControl.bufToRender =
+				vfe31_process_stats_irq_common(statsCsNum,
+				vfe31_ctrl->csStatsControl.nextFrameAddrBuf);
+			process_stats = true;
+		} else {
+			vfe31_ctrl->csStatsControl.droppedStatsFrameCount++;
+			vfe31_ctrl->csStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe31_ctrl->csStatsControl.bufToRender = 0;
+	}
+
+	if (process_stats)
+		vfe_send_stats_msg();
+
+	return;
+}
+
+static void vfe31_process_stats_irq(uint32_t *irqstatus)
+{
+	/* Subsample the stats according to the hfr speed*/
+	if ((vfe31_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe31_ctrl->vfeFrameId % vfe31_ctrl->hfr_mode != 0)) {
+		CDBG("Skip the stats when HFR enabled\n");
+		return;
+	}
+
+	vfe31_ctrl->status_bits = VFE_COM_STATUS & *irqstatus;
+	vfe31_process_stats();
+	return;
+}
+
+static void vfe31_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+
+	struct vfe31_isr_queue_cmd *qcmd = NULL;
+
+	CDBG("=== vfe31_do_tasklet start === \n");
+
+	while (atomic_read(&irq_cnt)) {
+		spin_lock_irqsave(&vfe31_ctrl->tasklet_lock, flags);
+		qcmd = list_first_entry(&vfe31_ctrl->tasklet_q,
+			struct vfe31_isr_queue_cmd, list);
+		atomic_sub(1, &irq_cnt);
+
+		if (!qcmd) {
+			spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock,
+				flags);
+			return;
+		}
+
+		list_del(&qcmd->list);
+		spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock,
+			flags);
+
+		/* interrupt to be processed,  *qcmd has the payload.  */
+		if (qcmd->vfeInterruptStatus0 &
+			VFE_IRQ_STATUS0_REG_UPDATE_MASK) {
+			CDBG("irq regUpdateIrq\n");
+			vfe31_process_reg_update_irq();
+		}
+
+		if (qcmd->vfeInterruptStatus1 &
+			VFE_IMASK_RESET) {
+			CDBG("irq resetAckIrq\n");
+			vfe31_process_reset_irq();
+		}
+
+
+		if (qcmd->vfeInterruptStatus1 &
+			VFE_IMASK_AXI_HALT) {
+			CDBG("irq axi halt irq\n");
+			vfe31_process_axi_halt_irq();
+		}
+
+		if (atomic_read(&vfe31_ctrl->vstate)) {
+			if (qcmd->vfeInterruptStatus1 &
+					VFE31_IMASK_ERROR_ONLY_1) {
+				pr_err("irq	errorIrq\n");
+				vfe31_process_error_irq(
+					qcmd->vfeInterruptStatus1 &
+					VFE31_IMASK_ERROR_ONLY_1);
+			}
+
+			/* irqs below are only valid when in active state. */
+			/* next, check output path related interrupts. */
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) {
+				CDBG("Image composite done 0 irq occured.\n");
+				vfe31_process_output_path_irq_0(
+					qcmd->vfePingPongStatus);
+			}
+
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) {
+				CDBG("Image composite done 1 irq occured.\n");
+				vfe31_process_output_path_irq_1(
+					qcmd->vfePingPongStatus);
+			}
+
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK) {
+				CDBG("Image composite done 2 irq occured.\n");
+				vfe31_process_output_path_irq_2(
+					qcmd->vfePingPongStatus);
+			}
+
+			/* then process stats irq. */
+			if (vfe31_ctrl->stats_comp) {
+				/* process stats comb interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK) {
+					CDBG("Stats composite irq occured.\n");
+					vfe31_process_stats_irq(
+						&qcmd->vfeInterruptStatus0);
+				}
+			} else {
+				/* process individual stats interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_COM_STATUS) {
+					CDBG("VFE stats occured.\n");
+					vfe31_process_stats_irq(
+						&qcmd->vfeInterruptStatus0);
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER0) {
+					CDBG("SYNC_TIMER 0 irq occured.\n");
+					vfe31_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER0_DONE);
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER1) {
+					CDBG("SYNC_TIMER 1 irq occured.\n");
+					vfe31_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER1_DONE);
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER2) {
+					CDBG("SYNC_TIMER 2 irq occured.\n");
+					vfe31_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER2_DONE);
+				}
+			}
+		}
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_CAMIF_SOF_MASK) {
+			CDBG("irq	camifSofIrq\n");
+			vfe31_process_camif_sof_irq();
+		}
+		kfree(qcmd);
+	}
+	CDBG("=== vfe31_do_tasklet end === \n");
+}
+
+DECLARE_TASKLET(vfe31_tasklet, vfe31_do_tasklet, 0);
+
+static irqreturn_t vfe31_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct vfe31_irq_status irq;
+	struct vfe31_isr_queue_cmd *qcmd;
+	uint32_t *val;
+	CDBG("vfe_parse_irq\n");
+	memset(&irq, 0, sizeof(struct vfe31_irq_status));
+
+	val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_IRQ_STATUS_0);
+	irq.vfeIrqStatus0 = msm_io_r(val);
+
+	val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_IRQ_STATUS_1);
+	irq.vfeIrqStatus1 = msm_io_r(val);
+
+	if (irq.vfeIrqStatus1 & VFE_IMASK_AXI_HALT) {
+		msm_io_w(VFE_IMASK_RESET, vfe31_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+		msm_io_w_mb(AXI_HALT_CLEAR,
+			vfe31_ctrl->vfebase + VFE_AXI_CMD);
+	}
+
+	val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_CAMIF_STATUS);
+	irq.camifStatus = msm_io_r(val);
+	CDBG("camifStatus  = 0x%x\n", irq.camifStatus);
+
+	val = (uint32_t *)(vfe31_ctrl->vfebase + VFE_BUS_PING_PONG_STATUS);
+	irq.vfePingPongStatus = msm_io_r(val);
+
+	/* clear the pending interrupt of the same kind.*/
+	msm_io_w(irq.vfeIrqStatus0, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(irq.vfeIrqStatus1, vfe31_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe31_ctrl->vfebase + VFE_IRQ_CMD);
+
+	if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) {
+		CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n");
+		return IRQ_HANDLED;
+	}
+
+	qcmd = kzalloc(sizeof(struct vfe31_isr_queue_cmd),
+		GFP_ATOMIC);
+	if (!qcmd) {
+		pr_err("vfe_parse_irq: qcmd malloc failed!\n");
+		return IRQ_HANDLED;
+	}
+
+	if (atomic_read(&vfe31_ctrl->stop_ack_pending)) {
+		irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0;
+		irq.vfeIrqStatus1 &= vfe31_ctrl->while_stopping_mask;
+	}
+
+	spin_lock_irqsave(&vfe31_ctrl->xbar_lock, flags);
+	if ((irq.vfeIrqStatus0 &
+		VFE_IRQ_STATUS0_CAMIF_EOF_MASK) &&
+		vfe31_ctrl->xbar_update_pending) {
+		CDBG("irq camifEofIrq\n");
+		msm_io_memcpy(vfe31_ctrl->vfebase + V31_XBAR_CFG_OFF,
+			(void *)vfe31_ctrl->xbar_cfg, V31_XBAR_CFG_LEN);
+		vfe31_ctrl->xbar_update_pending = 0;
+	}
+	spin_unlock_irqrestore(&vfe31_ctrl->xbar_lock, flags);
+	CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n",
+		irq.vfeIrqStatus0, irq.vfeIrqStatus1);
+
+	qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0;
+	qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1;
+	qcmd->vfePingPongStatus = irq.vfePingPongStatus;
+
+	spin_lock_irqsave(&vfe31_ctrl->tasklet_lock, flags);
+	list_add_tail(&qcmd->list, &vfe31_ctrl->tasklet_q);
+
+	atomic_add(1, &irq_cnt);
+	spin_unlock_irqrestore(&vfe31_ctrl->tasklet_lock, flags);
+	tasklet_schedule(&vfe31_tasklet);
+	return IRQ_HANDLED;
+}
+
+static void vfe31_release(struct platform_device *pdev)
+{
+	struct resource	*vfemem, *vfeio;
+
+	vfe31_reset_free_buf_queue_all();
+	CDBG("%s, free_irq\n", __func__);
+	free_irq(vfe31_ctrl->vfeirq, 0);
+	tasklet_kill(&vfe31_tasklet);
+
+	if (atomic_read(&irq_cnt))
+		pr_warning("%s, Warning IRQ Count not ZERO\n", __func__);
+
+	vfemem = vfe31_ctrl->vfemem;
+	vfeio  = vfe31_ctrl->vfeio;
+
+	msm_vpe_release();
+
+	kfree(vfe31_ctrl->extdata);
+	iounmap(vfe31_ctrl->vfebase);
+	kfree(vfe31_ctrl);
+	vfe31_ctrl = NULL;
+	release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	CDBG("%s, msm_camio_disable\n", __func__);
+	msm_camio_disable(pdev);
+	msm_camio_set_perf_lvl(S_EXIT);
+
+	vfe_syncdata = NULL;
+}
+
+static int vfe31_resource_init(struct msm_vfe_callback *presp,
+	struct platform_device *pdev, void *sdata)
+{
+	struct resource	*vfemem, *vfeirq, *vfeio;
+	int rc;
+	struct msm_camera_sensor_info *s_info;
+	s_info = pdev->dev.platform_data;
+
+	pdev->resource = s_info->resource;
+	pdev->num_resources = s_info->num_resources;
+
+	vfemem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!vfemem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!vfeirq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeio = request_mem_region(vfemem->start,
+		resource_size(vfemem), pdev->name);
+	if (!vfeio) {
+		pr_err("%s: VFE region already claimed\n", __func__);
+		return -EBUSY;
+	}
+
+	vfe31_ctrl = kzalloc(sizeof(struct vfe31_ctrl_type), GFP_KERNEL);
+	if (!vfe31_ctrl) {
+		rc = -ENOMEM;
+		goto cmd_init_failed1;
+	}
+
+	vfe31_ctrl->vfeirq = vfeirq->start;
+
+	vfe31_ctrl->vfebase =
+		ioremap(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	if (!vfe31_ctrl->vfebase) {
+		rc = -ENOMEM;
+		pr_err("%s: vfe ioremap failed\n", __func__);
+		goto cmd_init_failed2;
+	}
+
+	if (presp && presp->vfe_resp)
+		vfe31_ctrl->resp = presp;
+	else {
+		rc = -EINVAL;
+		goto cmd_init_failed3;
+	}
+
+	vfe31_ctrl->extdata =
+		kmalloc(sizeof(struct vfe31_frame_extra), GFP_KERNEL);
+	if (!vfe31_ctrl->extdata) {
+		rc = -ENOMEM;
+		goto cmd_init_failed3;
+	}
+
+	vfe31_ctrl->extlen = sizeof(struct vfe31_frame_extra);
+
+	spin_lock_init(&vfe31_ctrl->io_lock);
+	spin_lock_init(&vfe31_ctrl->update_ack_lock);
+	spin_lock_init(&vfe31_ctrl->tasklet_lock);
+
+	INIT_LIST_HEAD(&vfe31_ctrl->tasklet_q);
+	vfe31_init_free_buf_queue();
+
+	vfe31_ctrl->syncdata = sdata;
+	vfe31_ctrl->vfemem = vfemem;
+	vfe31_ctrl->vfeio  = vfeio;
+	vfe31_ctrl->update_gamma = false;
+	vfe31_ctrl->update_luma = false;
+	vfe31_ctrl->s_info = s_info;
+	vfe31_ctrl->stats_comp = 0;
+	vfe31_ctrl->hfr_mode = HFR_MODE_OFF;
+	return 0;
+
+cmd_init_failed3:
+	free_irq(vfe31_ctrl->vfeirq, 0);
+	iounmap(vfe31_ctrl->vfebase);
+cmd_init_failed2:
+	kfree(vfe31_ctrl);
+cmd_init_failed1:
+	release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	return rc;
+}
+
+static int vfe31_init(struct msm_vfe_callback *presp,
+	struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+
+	camio_clk = camdev->ioclk;
+
+	rc = vfe31_resource_init(presp, pdev, vfe_syncdata);
+	if (rc < 0)
+		return rc;
+	/* Bring up all the required GPIOs and Clocks */
+	rc = msm_camio_enable(pdev);
+	msm_camio_set_perf_lvl(S_INIT);
+	if (msm_vpe_open() < 0)
+		CDBG("%s: vpe_open failed\n", __func__);
+
+	/* TO DO: Need to release the VFE resources */
+	rc = request_irq(vfe31_ctrl->vfeirq, vfe31_parse_irq,
+			IRQF_TRIGGER_RISING, "vfe", 0);
+
+	return rc;
+}
+
+void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data)
+{
+	fptr->vfe_init    = vfe31_init;
+	fptr->vfe_enable  = vfe31_enable;
+	fptr->vfe_config  = vfe31_config;
+	fptr->vfe_disable = vfe31_disable;
+	fptr->vfe_release = vfe31_release;
+	fptr->vfe_stop = vfe31_stop;
+	vfe_syncdata = data;
+}
+
+void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data)
+{
+	fptr->vpe_reg		= msm_vpe_reg;
+	fptr->send_frame_to_vpe	= msm_send_frame_to_vpe;
+	fptr->vpe_config	= msm_vpe_config;
+	fptr->vpe_cfg_update	= msm_vpe_cfg_update;
+	fptr->dis		= &(vpe_ctrl->dis_en);
+	fptr->vpe_cfg_offset = msm_vpe_offset_update;
+	vpe_ctrl->syncdata = data;
+}
diff --git a/drivers/media/video/msm/msm_vfe31.h b/drivers/media/video/msm/msm_vfe31.h
new file mode 100644
index 0000000..e3c06ee
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe31.h
@@ -0,0 +1,1113 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VFE31_H__
+#define __MSM_VFE31_H__
+
+#define TRUE  1
+#define FALSE 0
+
+/* at start of camif,  bit 1:0 = 0x01:enable
+ * image data capture at frame boundary. */
+#define CAMIF_COMMAND_START  0x00000005
+
+/* bit 2= 0x1:clear the CAMIF_STATUS register
+ * value. */
+#define CAMIF_COMMAND_CLEAR  0x00000004
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY  0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY  0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT  0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR  0x00000000
+
+/* clear axi_halt_irq */
+#define MASK_AXI_HALT_IRQ	0xFF7FFFFF
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-31 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD  0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-31 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD  0x000003ff
+
+/* bit 5 is for axi status idle or busy.
+ * 1 =  halted,  0 = busy */
+#define AXI_STATUS_BUSY_MASK 0x00000020
+
+/* bit 0 & bit 1 = 1, both y and cbcr irqs need to be present
+ * for frame done interrupt */
+#define VFE_COMP_IRQ_BOTH_Y_CBCR 3
+
+/* bit 1 = 1, only cbcr irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_CBCR_ONLY 2
+
+/* bit 0 = 1, only y irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_Y_ONLY 1
+
+/* bit 0 = 1, PM go;   bit1 = 1, PM stop */
+#define VFE_PERFORMANCE_MONITOR_GO   0x00000001
+#define VFE_PERFORMANCE_MONITOR_STOP 0x00000002
+
+/* bit 0 = 1, test gen go;   bit1 = 1, test gen stop */
+#define VFE_TEST_GEN_GO   0x00000001
+#define VFE_TEST_GEN_STOP 0x00000002
+
+/* the chroma is assumed to be interpolated between
+ * the luma samples.  JPEG 4:2:2 */
+#define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
+#define VFE_CLEAR_ALL_IRQS   0xffffffff
+
+#define VFE_IRQ_STATUS0_CAMIF_SOF_MASK            0x00000001
+#define VFE_IRQ_STATUS0_CAMIF_EOF_MASK            0x00000004
+#define VFE_IRQ_STATUS0_REG_UPDATE_MASK           0x00000020
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK 0x00200000
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK 0x00400000
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK 0x00800000
+#define VFE_IRQ_STATUS1_RESET_AXI_HALT_ACK_MASK   0x00800000
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK       0x01000000
+
+#define VFE_IRQ_STATUS0_STATS_AEC     0x2000  /* bit 13 */
+#define VFE_IRQ_STATUS0_STATS_AF      0x4000  /* bit 14 */
+#define VFE_IRQ_STATUS0_STATS_AWB     0x8000  /* bit 15 */
+#define VFE_IRQ_STATUS0_STATS_RS      0x10000  /* bit 16 */
+#define VFE_IRQ_STATUS0_STATS_CS      0x20000  /* bit 17 */
+#define VFE_IRQ_STATUS0_STATS_IHIST   0x40000  /* bit 18 */
+
+#define VFE_IRQ_STATUS0_SYNC_TIMER0   0x2000000  /* bit 25 */
+#define VFE_IRQ_STATUS0_SYNC_TIMER1   0x4000000  /* bit 26 */
+#define VFE_IRQ_STATUS0_SYNC_TIMER2   0x8000000  /* bit 27 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER0  0x10000000  /* bit 28 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER1  0x20000000  /* bit 29 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER2  0x40000000  /* bit 30 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER3  0x80000000  /* bit 31 */
+
+/* imask for while waiting for stop ack,  driver has already
+ * requested stop, waiting for reset irq, and async timer irq.
+ * For irq_status_0, bit 28-31 are for async timer. For
+ * irq_status_1, bit 22 for reset irq, bit 23 for axi_halt_ack
+   irq */
+#define VFE_IMASK_WHILE_STOPPING_0  0xF0000000
+#define VFE_IMASK_WHILE_STOPPING_1  0x00C00000
+#define VFE_IMASK_RESET             0x00400000
+#define VFE_IMASK_AXI_HALT          0x00800000
+
+
+/* no error irq in mask 0 */
+#define VFE_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE_IMASK_ERROR_ONLY_1  0x003fffff
+
+/* For BPC bit 0,bit 12-17 and bit 26 -20 are set to zero and other's 1 */
+#define BPC_MASK 0xF80C0FFE
+
+/* For BPC bit 1 and 2 are set to zero and other's 1 */
+#define ABF_MASK 0xFFFFFFF9
+
+/* For MCE enable bit 28 set to zero and other's 1 */
+#define MCE_EN_MASK 0xEFFFFFFF
+
+/* For MCE Q_K bit 28 to 31 set to zero and other's 1 */
+#define MCE_Q_K_MASK 0x0FFFFFFF
+
+#define AWB_ENABLE_MASK 0x00000080     /* bit 7 */
+#define AF_ENABLE_MASK 0x00000040      /* bit 6 */
+#define AE_ENABLE_MASK 0x00000020      /* bit 5 */
+#define IHIST_ENABLE_MASK 0x00008000   /* bit 15 */
+#define RS_ENABLE_MASK 0x00000100      /* bit 8  */
+#define CS_ENABLE_MASK 0x00000200      /* bit 9  */
+#define RS_CS_ENABLE_MASK 0x00000300   /* bit 8,9  */
+#define STATS_ENABLE_MASK 0x000483E0   /* bit 18,15,9,8,7,6,5*/
+
+#define VFE_REG_UPDATE_TRIGGER           1
+#define VFE_PM_BUF_MAX_CNT_MASK          0xFF
+#define VFE_DMI_CFG_DEFAULT              0x00000100
+#define LENS_ROLL_OFF_DELTA_TABLE_OFFSET 32
+#define VFE_AE_PINGPONG_STATUS_BIT       0x80
+#define VFE_AF_PINGPONG_STATUS_BIT       0x100
+#define VFE_AWB_PINGPONG_STATUS_BIT      0x200
+
+#define HFR_MODE_OFF 1
+
+enum VFE31_DMI_RAM_SEL {
+	 NO_MEM_SELECTED          = 0,
+	 ROLLOFF_RAM              = 0x1,
+	 RGBLUT_RAM_CH0_BANK0     = 0x2,
+	 RGBLUT_RAM_CH0_BANK1     = 0x3,
+	 RGBLUT_RAM_CH1_BANK0     = 0x4,
+	 RGBLUT_RAM_CH1_BANK1     = 0x5,
+	 RGBLUT_RAM_CH2_BANK0     = 0x6,
+	 RGBLUT_RAM_CH2_BANK1     = 0x7,
+	 STATS_HIST_RAM           = 0x8,
+	 RGBLUT_CHX_BANK0         = 0x9,
+	 RGBLUT_CHX_BANK1         = 0xa,
+	 LUMA_ADAPT_LUT_RAM_BANK0 = 0xb,
+	 LUMA_ADAPT_LUT_RAM_BANK1 = 0xc
+};
+
+enum  VFE_STATE {
+	VFE_STATE_IDLE,
+	VFE_STATE_ACTIVE
+};
+
+enum  vfe_recording_state {
+	VFE_REC_STATE_IDLE,
+	VFE_REC_STATE_START_REQUESTED,
+	VFE_REC_STATE_STARTED,
+	VFE_REC_STATE_STOP_REQUESTED,
+	VFE_REC_STATE_STOPPED,
+};
+
+#define V31_DUMMY_0               0
+#define V31_SET_CLK               1
+#define V31_RESET                 2
+#define V31_START                 3
+#define V31_TEST_GEN_START        4
+#define V31_OPERATION_CFG         5
+#define V31_AXI_OUT_CFG           6
+#define V31_CAMIF_CFG             7
+#define V31_AXI_INPUT_CFG         8
+#define V31_BLACK_LEVEL_CFG       9
+#define V31_ROLL_OFF_CFG          10
+#define V31_DEMUX_CFG             11
+#define V31_DEMOSAIC_0_CFG        12 /* general */
+#define V31_DEMOSAIC_1_CFG        13 /* ABF     */
+#define V31_DEMOSAIC_2_CFG        14 /* BPC     */
+#define V31_FOV_CFG               15
+#define V31_MAIN_SCALER_CFG       16
+#define V31_WB_CFG                17
+#define V31_COLOR_COR_CFG         18
+#define V31_RGB_G_CFG             19
+#define V31_LA_CFG                20
+#define V31_CHROMA_EN_CFG         21
+#define V31_CHROMA_SUP_CFG        22
+#define V31_MCE_CFG               23
+#define V31_SK_ENHAN_CFG          24
+#define V31_ASF_CFG               25
+#define V31_S2Y_CFG               26
+#define V31_S2CbCr_CFG            27
+#define V31_CHROMA_SUBS_CFG       28
+#define V31_OUT_CLAMP_CFG         29
+#define V31_FRAME_SKIP_CFG        30
+#define V31_DUMMY_1               31
+#define V31_DUMMY_2               32
+#define V31_DUMMY_3               33
+#define V31_UPDATE                34
+#define V31_BL_LVL_UPDATE         35
+#define V31_DEMUX_UPDATE          36
+#define V31_DEMOSAIC_1_UPDATE     37 /* BPC */
+#define V31_DEMOSAIC_2_UPDATE     38 /* ABF */
+#define V31_FOV_UPDATE            39
+#define V31_MAIN_SCALER_UPDATE    40
+#define V31_WB_UPDATE             41
+#define V31_COLOR_COR_UPDATE      42
+#define V31_RGB_G_UPDATE          43
+#define V31_LA_UPDATE             44
+#define V31_CHROMA_EN_UPDATE      45
+#define V31_CHROMA_SUP_UPDATE     46
+#define V31_MCE_UPDATE            47
+#define V31_SK_ENHAN_UPDATE       48
+#define V31_S2CbCr_UPDATE         49
+#define V31_S2Y_UPDATE            50
+#define V31_ASF_UPDATE            51
+#define V31_FRAME_SKIP_UPDATE     52
+#define V31_CAMIF_FRAME_UPDATE    53
+#define V31_STATS_AF_UPDATE       54
+#define V31_STATS_AE_UPDATE       55
+#define V31_STATS_AWB_UPDATE      56
+#define V31_STATS_RS_UPDATE       57
+#define V31_STATS_CS_UPDATE       58
+#define V31_STATS_SKIN_UPDATE     59
+#define V31_STATS_IHIST_UPDATE    60
+#define V31_DUMMY_4               61
+#define V31_EPOCH1_ACK            62
+#define V31_EPOCH2_ACK            63
+#define V31_START_RECORDING       64
+#define V31_STOP_RECORDING        65
+#define V31_DUMMY_5               66
+#define V31_DUMMY_6               67
+#define V31_CAPTURE               68
+#define V31_DUMMY_7               69
+#define V31_STOP                  70
+#define V31_GET_HW_VERSION        71
+#define V31_GET_FRAME_SKIP_COUNTS 72
+#define V31_OUTPUT1_BUFFER_ENQ    73
+#define V31_OUTPUT2_BUFFER_ENQ    74
+#define V31_OUTPUT3_BUFFER_ENQ    75
+#define V31_JPEG_OUT_BUF_ENQ      76
+#define V31_RAW_OUT_BUF_ENQ       77
+#define V31_RAW_IN_BUF_ENQ        78
+#define V31_STATS_AF_ENQ          79
+#define V31_STATS_AE_ENQ          80
+#define V31_STATS_AWB_ENQ         81
+#define V31_STATS_RS_ENQ          82
+#define V31_STATS_CS_ENQ          83
+#define V31_STATS_SKIN_ENQ        84
+#define V31_STATS_IHIST_ENQ       85
+#define V31_DUMMY_8               86
+#define V31_JPEG_ENC_CFG          87
+#define V31_DUMMY_9               88
+#define V31_STATS_AF_START        89
+#define V31_STATS_AF_STOP         90
+#define V31_STATS_AE_START        91
+#define V31_STATS_AE_STOP         92
+#define V31_STATS_AWB_START       93
+#define V31_STATS_AWB_STOP        94
+#define V31_STATS_RS_START        95
+#define V31_STATS_RS_STOP         96
+#define V31_STATS_CS_START        97
+#define V31_STATS_CS_STOP         98
+#define V31_STATS_SKIN_START      99
+#define V31_STATS_SKIN_STOP       100
+#define V31_STATS_IHIST_START     101
+#define V31_STATS_IHIST_STOP      102
+#define V31_DUMMY_10              103
+#define V31_SYNC_TIMER_SETTING    104
+#define V31_ASYNC_TIMER_SETTING   105
+#define V31_LIVESHOT              106
+#define V31_ZSL                   107
+#define V31_STEREOCAM             108
+#define V31_LA_SETUP              109
+#define V31_XBAR_CFG              110
+#define V31_EZTUNE_CFG            111
+
+#define V31_CAMIF_OFF             0x000001E4
+#define V31_CAMIF_LEN             32
+
+#define V31_DEMUX_OFF             0x00000284
+#define V31_DEMUX_LEN             20
+
+#define V31_DEMOSAIC_0_OFF        0x00000298
+#define V31_DEMOSAIC_0_LEN        4
+/* ABF     */
+#define V31_DEMOSAIC_1_OFF        0x000002A4
+#define V31_DEMOSAIC_1_LEN        180
+/* BPC     */
+#define V31_DEMOSAIC_2_OFF        0x0000029C
+#define V31_DEMOSAIC_2_LEN        8
+
+/* gamma VFE_LUT_BANK_SEL*/
+#define V31_GAMMA_CFG_OFF         0x000003BC
+#define V31_LUMA_CFG_OFF          0x000003C0
+
+#define V31_OUT_CLAMP_OFF         0x00000524
+#define V31_OUT_CLAMP_LEN         8
+
+#define V31_OPERATION_CFG_LEN     32
+
+#define V31_AXI_OUT_OFF           0x00000038
+#define V31_AXI_OUT_LEN           212
+#define V31_AXI_CH_INF_LEN        24
+#define V31_AXI_CFG_LEN           47
+
+#define V31_FRAME_SKIP_OFF        0x00000504
+#define V31_FRAME_SKIP_LEN        32
+
+#define V31_CHROMA_SUBS_OFF       0x000004F8
+#define V31_CHROMA_SUBS_LEN       12
+
+#define V31_FOV_OFF           0x00000360
+#define V31_FOV_LEN           8
+
+#define V31_MAIN_SCALER_OFF 0x00000368
+#define V31_MAIN_SCALER_LEN 28
+
+#define V31_S2Y_OFF 0x000004D0
+#define V31_S2Y_LEN 20
+
+#define V31_S2CbCr_OFF 0x000004E4
+#define V31_S2CbCr_LEN 20
+
+#define V31_CHROMA_EN_OFF 0x000003C4
+#define V31_CHROMA_EN_LEN 36
+
+#define V31_SYNC_TIMER_OFF      0x0000020C
+#define V31_SYNC_TIMER_POLARITY_OFF 0x00000234
+#define V31_TIMER_SELECT_OFF        0x0000025C
+#define V31_SYNC_TIMER_LEN 28
+
+#define V31_ASYNC_TIMER_OFF 0x00000238
+#define V31_ASYNC_TIMER_LEN 28
+
+#define V31_BLACK_LEVEL_OFF 0x00000264
+#define V31_BLACK_LEVEL_LEN 16
+
+#define V31_ROLL_OFF_CFG_OFF 0x00000274
+#define V31_ROLL_OFF_CFG_LEN 16
+
+#define V31_COLOR_COR_OFF 0x00000388
+#define V31_COLOR_COR_LEN 52
+
+#define V31_WB_OFF 0x00000384
+#define V31_WB_LEN 4
+
+#define V31_RGB_G_OFF 0x000003BC
+#define V31_RGB_G_LEN 4
+
+#define V31_LA_OFF 0x000003C0
+#define V31_LA_LEN 4
+
+#define V31_SCE_OFF 0x00000418
+#define V31_SCE_LEN 136
+
+#define V31_CHROMA_SUP_OFF 0x000003E8
+#define V31_CHROMA_SUP_LEN 12
+
+#define V31_MCE_OFF 0x000003F4
+#define V31_MCE_LEN 36
+#define V31_STATS_AF_OFF 0x0000053c
+#define V31_STATS_AF_LEN 16
+
+#define V31_STATS_AE_OFF 0x00000534
+#define V31_STATS_AE_LEN 8
+
+#define V31_STATS_AWB_OFF 0x0000054c
+#define V31_STATS_AWB_LEN 32
+
+#define V31_STATS_IHIST_OFF 0x0000057c
+#define V31_STATS_IHIST_LEN 8
+
+#define V31_STATS_RS_OFF 0x0000056c
+#define V31_STATS_RS_LEN 8
+
+#define V31_STATS_CS_OFF 0x00000574
+#define V31_STATS_CS_LEN 8
+
+#define V31_XBAR_CFG_OFF 0x00000040
+#define V31_XBAR_CFG_LEN 8
+
+#define V31_EZTUNE_CFG_OFF 0x00000010
+#define V31_EZTUNE_CFG_LEN 4
+
+#define V31_ASF_OFF 0x000004A0
+#define V31_ASF_LEN 48
+#define V31_ASF_UPDATE_LEN 36
+
+#define V31_CAPTURE_LEN 4
+
+struct vfe_cmd_hw_version {
+	uint32_t minorVersion;
+	uint32_t majorVersion;
+	uint32_t coreVersion;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+	VFE_AXI_OUTPUT_MODE_Output1,
+	VFE_AXI_OUTPUT_MODE_Output2,
+	VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+	VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+	VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+	VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+	VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+	VFE_RAW_OUTPUT_DISABLED,
+	VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+	VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+	VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH     4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT  3
+
+struct vfe_cmds_per_write_master {
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint16_t outRowCount;
+	uint16_t outRowIncrement;
+	uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+		[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+	uint8_t fragmentCount;
+	struct vfe_cmds_per_write_master firstWM;
+	struct vfe_cmds_per_write_master secondWM;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+	VFE_AXI_BURST_LENGTH_IS_2  = 2,
+	VFE_AXI_BURST_LENGTH_IS_4  = 4,
+	VFE_AXI_BURST_LENGTH_IS_8  = 8,
+	VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+
+struct vfe_cmd_fov_crop_config {
+	uint8_t enable;
+	uint16_t firstPixel;
+	uint16_t lastPixel;
+	uint16_t firstLine;
+	uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+	uint16_t MNCounterInit;
+	uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+	uint8_t  enable;
+	uint16_t inputSize;
+	uint16_t outputSize;
+	uint32_t phaseMultiplicationFactor;
+	uint8_t  interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension    hconfig;
+	struct vfe_cmds_scaler_one_dimension    vconfig;
+	struct vfe_cmds_main_scaler_stripe_init MNInitH;
+	struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension hconfig;
+	struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+
+struct vfe_cmd_frame_skip_update {
+	uint32_t output1Pattern;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+	uint8_t minCh0;
+	uint8_t minCh1;
+	uint8_t minCh2;
+	uint8_t maxCh0;
+	uint8_t maxCh1;
+	uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+	uint8_t enable;
+	uint8_t cropEnable;
+	uint8_t vsubSampleEnable;
+	uint8_t hsubSampleEnable;
+	uint8_t vCosited;
+	uint8_t hCosited;
+	uint8_t vCositedPhase;
+	uint8_t hCositedPhase;
+	uint16_t cropWidthFirstPixel;
+	uint16_t cropWidthLastPixel;
+	uint16_t cropHeightFirstLine;
+	uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_INPUT_SOURCE {
+	VFE_START_INPUT_SOURCE_CAMIF,
+	VFE_START_INPUT_SOURCE_TESTGEN,
+	VFE_START_INPUT_SOURCE_AXI,
+	VFE_START_INPUT_SOURCE_INVALID
+};
+
+enum VFE_START_PIXEL_PATTERN {
+	VFE_BAYER_RGRGRG,
+	VFE_BAYER_GRGRGR,
+	VFE_BAYER_BGBGBG,
+	VFE_BAYER_GBGBGB,
+	VFE_YUV_YCbYCr,
+	VFE_YUV_YCrYCb,
+	VFE_YUV_CbYCrY,
+	VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+	VFE_BAYER_RAW,
+	VFE_YUV_INTERLEAVED,
+	VFE_YUV_PSEUDO_PLANAR_Y,
+	VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+	VFE_YUV_COSITED,
+	VFE_YUV_INTERPOLATED
+};
+
+
+/* 13*1  */
+#define VFE31_ROLL_OFF_INIT_TABLE_SIZE  13
+/* 13*16 */
+#define VFE31_ROLL_OFF_DELTA_TABLE_SIZE 208
+
+#define VFE31_GAMMA_NUM_ENTRIES  64
+
+#define VFE31_LA_TABLE_LENGTH    64
+
+#define VFE31_HIST_TABLE_LENGTH  256
+
+struct vfe_cmds_demosaic_abf {
+	uint8_t   enable;
+	uint8_t   forceOn;
+	uint8_t   shift;
+	uint16_t  lpThreshold;
+	uint16_t  max;
+	uint16_t  min;
+	uint8_t   ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+	uint8_t   enable;
+	uint16_t  fmaxThreshold;
+	uint16_t  fminThreshold;
+	uint16_t  redDiffThreshold;
+	uint16_t  blueDiffThreshold;
+	uint16_t  greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+	uint8_t   enable;
+	uint8_t   slopeShift;
+	struct vfe_cmds_demosaic_abf abfConfig;
+	struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+	struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+	struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+	uint8_t  enable;
+	uint16_t ch2Gain;
+	uint16_t ch1Gain;
+	uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+	COEF_IS_Q7_SIGNED,
+	COEF_IS_Q8_SIGNED,
+	COEF_IS_Q9_SIGNED,
+	COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+	uint8_t     enable;
+	enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+	int16_t  C0;
+	int16_t  C1;
+	int16_t  C2;
+	int16_t  C3;
+	int16_t  C4;
+	int16_t  C5;
+	int16_t  C6;
+	int16_t  C7;
+	int16_t  C8;
+	int16_t  K0;
+	int16_t  K1;
+	int16_t  K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 64
+
+struct vfe_cmd_la_config {
+	uint8_t enable;
+	int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+	RGB_GAMMA_CH0_SELECTED,
+	RGB_GAMMA_CH1_SELECTED,
+	RGB_GAMMA_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_SELECTED,
+	RGB_GAMMA_CH0_CH2_SELECTED,
+	RGB_GAMMA_CH1_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+	uint8_t enable;
+	enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+	int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+	uint8_t  enable;
+	int16_t am;
+	int16_t ap;
+	int16_t bm;
+	int16_t bp;
+	int16_t cm;
+	int16_t cp;
+	int16_t dm;
+	int16_t dp;
+	int16_t kcr;
+	int16_t kcb;
+	int16_t RGBtoYConversionV0;
+	int16_t RGBtoYConversionV1;
+	int16_t RGBtoYConversionV2;
+	uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+	uint8_t enable;
+	uint8_t m1;
+	uint8_t m3;
+	uint8_t n1;
+	uint8_t n3;
+	uint8_t nn1;
+	uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t sharpThreshE2;
+	int8_t sharpThreshE3;
+	int8_t sharpThreshE4;
+	int8_t sharpThreshE5;
+	int8_t filter1Coefficients[9];
+	int8_t filter2Coefficients[9];
+	uint8_t  cropEnable;
+	uint16_t cropFirstPixel;
+	uint16_t cropLastPixel;
+	uint16_t cropFirstLine;
+	uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t  sharpThreshE2;
+	int8_t  sharpThreshE3;
+	int8_t  sharpThreshE4;
+	int8_t  sharpThreshE5;
+	int8_t  filter1Coefficients[9];
+	int8_t  filter2Coefficients[9];
+	uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+	VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+	VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+
+struct vfe_cmd_bus_pm_start {
+	uint8_t output2YWrPmEnable;
+	uint8_t output2CbcrWrPmEnable;
+	uint8_t output1YWrPmEnable;
+	uint8_t output1CbcrWrPmEnable;
+};
+
+struct  vfe_frame_skip_counts {
+	uint32_t  totalFrameCount;
+	uint32_t  output1Count;
+	uint32_t  output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+	VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+enum VFE31_MESSAGE_ID {
+	MSG_ID_RESET_ACK, /* 0 */
+	MSG_ID_START_ACK,
+	MSG_ID_STOP_ACK,
+	MSG_ID_UPDATE_ACK,
+	MSG_ID_OUTPUT_P,
+	MSG_ID_OUTPUT_T,
+	MSG_ID_OUTPUT_S,
+	MSG_ID_OUTPUT_V,
+	MSG_ID_SNAPSHOT_DONE,
+	MSG_ID_COMMON,
+	MSG_ID_EPOCH1, /* 10 */
+	MSG_ID_EPOCH2,
+	MSG_ID_SYNC_TIMER0_DONE,
+	MSG_ID_SYNC_TIMER1_DONE,
+	MSG_ID_SYNC_TIMER2_DONE,
+	MSG_ID_ASYNC_TIMER0_DONE,
+	MSG_ID_ASYNC_TIMER1_DONE,
+	MSG_ID_ASYNC_TIMER2_DONE,
+	MSG_ID_ASYNC_TIMER3_DONE,
+	MSG_ID_AE_OVERFLOW,
+	MSG_ID_AF_OVERFLOW, /* 20 */
+	MSG_ID_AWB_OVERFLOW,
+	MSG_ID_RS_OVERFLOW,
+	MSG_ID_CS_OVERFLOW,
+	MSG_ID_IHIST_OVERFLOW,
+	MSG_ID_SKIN_OVERFLOW,
+	MSG_ID_AXI_ERROR,
+	MSG_ID_CAMIF_OVERFLOW,
+	MSG_ID_VIOLATION,
+	MSG_ID_CAMIF_ERROR,
+	MSG_ID_BUS_OVERFLOW, /* 30 */
+	MSG_ID_SOF_ACK,
+	MSG_ID_STOP_REC_ACK,
+};
+
+struct stats_buffer {
+	uint32_t aec;
+	uint32_t awb;
+	uint32_t af;
+	uint32_t ihist;
+	uint32_t rs;
+	uint32_t cs;
+	uint32_t skin;
+};
+
+struct vfe_msg_stats {
+	struct stats_buffer buff;
+	uint32_t    frameCounter;
+	uint32_t    status_bits;
+};
+
+
+struct vfe_frame_bpc_info {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+	uint8_t  camifState;
+	uint32_t pixelCount;
+	uint32_t lineCount;
+};
+
+
+struct vfe31_irq_status {
+	uint32_t vfeIrqStatus0;
+	uint32_t vfeIrqStatus1;
+	uint32_t camifStatus;
+	uint32_t demosaicStatus;
+	uint32_t asfMaxEdge;
+	uint32_t vfePingPongStatus;
+};
+
+struct vfe_msg_output {
+	uint8_t   output_id;
+	uint32_t  yBuffer;
+	uint32_t  cbcrBuffer;
+	struct vfe_frame_bpc_info bpcInfo;
+	struct vfe_frame_asf_info asfInfo;
+	uint32_t  frameCounter;
+};
+
+struct vfe_message {
+	enum VFE31_MESSAGE_ID _d;
+	union {
+		struct vfe_msg_output              msgOut;
+		struct vfe_msg_stats               msgStats;
+		struct vfe_msg_camif_status        msgCamifError;
+   } _u;
+};
+
+/* New one for 7x30 */
+struct msm_vfe31_cmd {
+	int32_t  id;
+	uint16_t length;
+	void     *value;
+};
+
+#define V31_PREVIEW_AXI_FLAG  0x00000001
+#define V31_SNAPSHOT_AXI_FLAG (0x00000001<<1)
+
+struct vfe31_cmd_type {
+	uint16_t id;
+	uint32_t length;
+	uint32_t offset;
+	uint32_t flag;
+};
+
+struct vfe31_free_buf {
+	struct list_head node;
+	uint32_t paddr;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+};
+
+struct vfe31_output_ch {
+	struct list_head free_buf_head;
+	spinlock_t free_buf_lock;
+	uint16_t output_fmt;
+	int8_t ch0;
+	int8_t ch1;
+	int8_t ch2;
+	uint32_t  frame_drop_cnt;
+};
+
+/* no error irq in mask 0 */
+#define VFE31_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE31_IMASK_ERROR_ONLY_1               0x003FFFFF
+#define VFE31_IMASK_CAMIF_ERROR               (0x00000001<<0)
+#define VFE31_IMASK_STATS_CS_OVWR             (0x00000001<<1)
+#define VFE31_IMASK_STATS_IHIST_OVWR          (0x00000001<<2)
+#define VFE31_IMASK_REALIGN_BUF_Y_OVFL        (0x00000001<<3)
+#define VFE31_IMASK_REALIGN_BUF_CB_OVFL       (0x00000001<<4)
+#define VFE31_IMASK_REALIGN_BUF_CR_OVFL       (0x00000001<<5)
+#define VFE31_IMASK_VIOLATION                 (0x00000001<<6)
+#define VFE31_IMASK_IMG_MAST_0_BUS_OVFL       (0x00000001<<7)
+#define VFE31_IMASK_IMG_MAST_1_BUS_OVFL       (0x00000001<<8)
+#define VFE31_IMASK_IMG_MAST_2_BUS_OVFL       (0x00000001<<9)
+#define VFE31_IMASK_IMG_MAST_3_BUS_OVFL       (0x00000001<<10)
+#define VFE31_IMASK_IMG_MAST_4_BUS_OVFL       (0x00000001<<11)
+#define VFE31_IMASK_IMG_MAST_5_BUS_OVFL       (0x00000001<<12)
+#define VFE31_IMASK_IMG_MAST_6_BUS_OVFL       (0x00000001<<13)
+#define VFE31_IMASK_STATS_AE_BUS_OVFL         (0x00000001<<14)
+#define VFE31_IMASK_STATS_AF_BUS_OVFL         (0x00000001<<15)
+#define VFE31_IMASK_STATS_AWB_BUS_OVFL        (0x00000001<<16)
+#define VFE31_IMASK_STATS_RS_BUS_OVFL         (0x00000001<<17)
+#define VFE31_IMASK_STATS_CS_BUS_OVFL         (0x00000001<<18)
+#define VFE31_IMASK_STATS_IHIST_BUS_OVFL      (0x00000001<<19)
+#define VFE31_IMASK_STATS_SKIN_BUS_OVFL       (0x00000001<<20)
+#define VFE31_IMASK_AXI_ERROR                 (0x00000001<<21)
+
+#define VFE_COM_STATUS 0x000FE000
+
+struct vfe31_output_path {
+	uint16_t output_mode;     /* bitmask  */
+
+	struct vfe31_output_ch out0; /* preview and thumbnail */
+	struct vfe31_output_ch out1; /* snapshot */
+	struct vfe31_output_ch out2; /* video    */
+};
+
+struct vfe31_frame_extra {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+
+	uint32_t yWrPmStats0;
+	uint32_t yWrPmStats1;
+	uint32_t cbcrWrPmStats0;
+	uint32_t cbcrWrPmStats1;
+
+	uint32_t  frameCounter;
+};
+
+#define VFE_DISABLE_ALL_IRQS             0
+#define VFE_CLEAR_ALL_IRQS               0xffffffff
+
+#define VFE_GLOBAL_RESET                 0x00000004
+#define VFE_CGC_OVERRIDE                 0x0000000C
+#define VFE_MODULE_CFG                   0x00000010
+#define VFE_CFG_OFF                      0x00000014
+#define VFE_IRQ_CMD                      0x00000018
+#define VFE_IRQ_MASK_0                   0x0000001C
+#define VFE_IRQ_MASK_1                   0x00000020
+#define VFE_IRQ_CLEAR_0                  0x00000024
+#define VFE_IRQ_CLEAR_1                  0x00000028
+#define VFE_IRQ_STATUS_0                 0x0000002C
+#define VFE_IRQ_STATUS_1                 0x00000030
+#define VFE_IRQ_COMP_MASK                0x00000034
+#define VFE_BUS_CMD                      0x00000038
+#define VFE_BUS_PING_PONG_STATUS         0x00000180
+#define VFE_BUS_OPERATION_STATUS         0x00000184
+
+#define VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_0        0x00000190
+#define VFE_BUS_IMAGE_MASTER_0_WR_PM_STATS_1        0x00000194
+
+#define VFE_AXI_CMD                      0x000001D8
+#define VFE_AXI_STATUS                   0x000001DC
+#define VFE_BUS_STATS_PING_PONG_BASE     0x000000F4
+
+#define VFE_BUS_STATS_AEC_WR_PING_ADDR   0x000000F4
+#define VFE_BUS_STATS_AEC_WR_PONG_ADDR   0x000000F8
+#define VFE_BUS_STATS_AEC_UB_CFG         0x000000FC
+#define VFE_BUS_STATS_AF_WR_PING_ADDR    0x00000100
+#define VFE_BUS_STATS_AF_WR_PONG_ADDR    0x00000104
+#define VFE_BUS_STATS_AF_UB_CFG          0x00000108
+#define VFE_BUS_STATS_AWB_WR_PING_ADDR   0x0000010C
+#define VFE_BUS_STATS_AWB_WR_PONG_ADDR   0x00000110
+#define VFE_BUS_STATS_AWB_UB_CFG         0x00000114
+#define VFE_BUS_STATS_RS_WR_PING_ADDR    0x00000118
+#define VFE_BUS_STATS_RS_WR_PONG_ADDR    0x0000011C
+#define VFE_BUS_STATS_RS_UB_CFG          0x00000120
+
+#define VFE_BUS_STATS_CS_WR_PING_ADDR    0x00000124
+#define VFE_BUS_STATS_CS_WR_PONG_ADDR    0x00000128
+#define VFE_BUS_STATS_CS_UB_CFG          0x0000012C
+#define VFE_BUS_STATS_HIST_WR_PING_ADDR  0x00000130
+#define VFE_BUS_STATS_HIST_WR_PONG_ADDR  0x00000134
+#define VFE_BUS_STATS_HIST_UB_CFG        0x00000138
+#define VFE_BUS_STATS_SKIN_WR_PING_ADDR  0x0000013C
+#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR  0x00000140
+#define VFE_BUS_STATS_SKIN_UB_CFG        0x00000144
+#define VFE_BUS_PM_CMD                   0x00000188
+#define VFE_BUS_PM_CFG                   0x0000018C
+#define VFE_CAMIF_COMMAND                0x000001E0
+#define VFE_CAMIF_STATUS                 0x00000204
+#define VFE_REG_UPDATE_CMD               0x00000260
+#define VFE_DEMUX_GAIN_0                 0x00000288
+#define VFE_DEMUX_GAIN_1                 0x0000028C
+#define VFE_CHROMA_UP                    0x0000035C
+#define VFE_FRAMEDROP_ENC_Y_CFG          0x00000504
+#define VFE_FRAMEDROP_ENC_CBCR_CFG       0x00000508
+#define VFE_FRAMEDROP_ENC_Y_PATTERN      0x0000050C
+#define VFE_FRAMEDROP_ENC_CBCR_PATTERN   0x00000510
+#define VFE_FRAMEDROP_VIEW_Y             0x00000514
+#define VFE_FRAMEDROP_VIEW_CBCR          0x00000518
+#define VFE_FRAMEDROP_VIEW_Y_PATTERN     0x0000051C
+#define VFE_FRAMEDROP_VIEW_CBCR_PATTERN  0x00000520
+#define VFE_CLAMP_MAX                    0x00000524
+#define VFE_CLAMP_MIN                    0x00000528
+#define VFE_REALIGN_BUF                  0x0000052C
+#define VFE_STATS_CFG                    0x00000530
+#define VFE_DMI_CFG                      0x00000598
+#define VFE_DMI_ADDR                     0x0000059C
+#define VFE_DMI_DATA_LO                  0x000005A4
+#define VFE_AXI_CFG                      0x00000600
+
+struct vfe_stats_control {
+	uint8_t  ackPending;
+	uint32_t nextFrameAddrBuf;
+	uint32_t droppedStatsFrameCount;
+	uint32_t bufToRender;
+};
+
+struct vfe31_ctrl_type {
+	uint16_t operation_mode;     /* streaming or snapshot */
+	struct vfe31_output_path outpath;
+
+	uint32_t vfeImaskCompositePacked;
+
+	spinlock_t  update_ack_lock;
+	spinlock_t  io_lock;
+
+	int8_t aec_ack_pending;
+	int8_t awb_ack_pending;
+	int8_t af_ack_pending;
+	int8_t ihist_ack_pending;
+	int8_t rs_ack_pending;
+	int8_t cs_ack_pending;
+
+	struct msm_vfe_callback *resp;
+	uint32_t extlen;
+	void *extdata;
+
+	int8_t start_ack_pending;
+	atomic_t stop_ack_pending;
+	int8_t reset_ack_pending;
+	int8_t update_ack_pending;
+	enum vfe_recording_state recording_state;
+	int8_t output0_available;
+	int8_t output1_available;
+	int8_t update_gamma;
+	int8_t update_luma;
+	spinlock_t  tasklet_lock;
+	struct list_head tasklet_q;
+	int vfeirq;
+	void __iomem *vfebase;
+	void *syncdata;
+
+	struct resource	*vfemem;
+	struct resource *vfeio;
+
+	uint32_t stats_comp;
+	uint32_t hfr_mode;
+	atomic_t vstate;
+	uint32_t vfe_capture_count;
+	uint32_t sync_timer_repeat_count;
+	uint32_t sync_timer_state;
+	uint32_t sync_timer_number;
+
+	uint32_t vfeFrameId;
+	uint32_t output1Pattern;
+	uint32_t output1Period;
+	uint32_t output2Pattern;
+	uint32_t output2Period;
+	uint32_t vfeFrameSkipCount;
+	uint32_t vfeFrameSkipPeriod;
+	uint32_t status_bits;
+	struct vfe_stats_control afStatsControl;
+	struct vfe_stats_control awbStatsControl;
+	struct vfe_stats_control aecStatsControl;
+	struct vfe_stats_control ihistStatsControl;
+	struct vfe_stats_control rsStatsControl;
+	struct vfe_stats_control csStatsControl;
+	struct msm_camera_sensor_info *s_info;
+	struct vfe_message vMsgHold_Snap;
+	struct vfe_message vMsgHold_Thumb;
+	int8_t xbar_update_pending;
+	uint32_t xbar_cfg[2];
+	spinlock_t xbar_lock;
+	uint32_t while_stopping_mask;
+};
+
+#define statsAeNum      0
+#define statsAfNum      1
+#define statsAwbNum     2
+#define statsRsNum      3
+#define statsCsNum      4
+#define statsIhistNum   5
+#define statsSkinNum    6
+
+struct vfe_cmd_stats_ack{
+  uint32_t  nextStatsBuf;
+};
+
+#define VFE_STATS_BUFFER_COUNT            3
+
+struct vfe_cmd_stats_buf{
+   uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
+};
+#endif /* __MSM_VFE31_H__ */
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
new file mode 100644
index 0000000..4ca62ce
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -0,0 +1,3379 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <mach/msm_reqs.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "msm.h"
+#include "msm_vfe32.h"
+#include "msm_vpe1.h"
+
+atomic_t irq_cnt;
+
+#define CHECKED_COPY_FROM_USER(in) {					\
+	if (copy_from_user((in), (void __user *)cmd->value,		\
+			cmd->length)) {					\
+		rc = -EFAULT;						\
+		break;							\
+	}								\
+}
+
+static struct vfe32_ctrl_type *vfe32_ctrl;
+static struct msm_camera_io_clk camio_clk;
+static void  *vfe_syncdata;
+
+struct vfe32_isr_queue_cmd {
+	struct list_head list;
+	uint32_t                           vfeInterruptStatus0;
+	uint32_t                           vfeInterruptStatus1;
+	struct vfe_frame_asf_info          vfeAsfFrameInfo;
+	struct vfe_frame_bpc_info          vfeBpcFrameInfo;
+	struct vfe_msg_camif_status        vfeCamifStatusLocal;
+};
+
+static struct vfe32_cmd_type vfe32_cmd[] = {
+/* 0*/	{V32_DUMMY_0},
+		{V32_SET_CLK},
+		{V32_RESET},
+		{V32_START},
+		{V32_TEST_GEN_START},
+/* 5*/	{V32_OPERATION_CFG, V32_OPERATION_CFG_LEN},
+		{V32_AXI_OUT_CFG, V32_AXI_OUT_LEN, V32_AXI_OUT_OFF, 0xFF},
+		{V32_CAMIF_CFG, V32_CAMIF_LEN, V32_CAMIF_OFF, 0xFF},
+		{V32_AXI_INPUT_CFG},
+		{V32_BLACK_LEVEL_CFG, V32_BLACK_LEVEL_LEN, V32_BLACK_LEVEL_OFF,
+		0xFF},
+/*10*/  {V32_ROLL_OFF_CFG, V32_ROLL_OFF_CFG_LEN, V32_ROLL_OFF_CFG_OFF,
+		0xFF},
+		{V32_DEMUX_CFG, V32_DEMUX_LEN, V32_DEMUX_OFF, 0xFF},
+		{V32_FOV_CFG, V32_FOV_LEN, V32_FOV_OFF, 0xFF},
+		{V32_MAIN_SCALER_CFG, V32_MAIN_SCALER_LEN, V32_MAIN_SCALER_OFF,
+		0xFF},
+		{V32_WB_CFG, V32_WB_LEN, V32_WB_OFF, 0xFF},
+/*15*/	{V32_COLOR_COR_CFG, V32_COLOR_COR_LEN, V32_COLOR_COR_OFF, 0xFF},
+		{V32_RGB_G_CFG, V32_RGB_G_LEN, V32_RGB_G_OFF, 0xFF},
+		{V32_LA_CFG, V32_LA_LEN, V32_LA_OFF, 0xFF },
+		{V32_CHROMA_EN_CFG, V32_CHROMA_EN_LEN, V32_CHROMA_EN_OFF, 0xFF},
+		{V32_CHROMA_SUP_CFG, V32_CHROMA_SUP_LEN, V32_CHROMA_SUP_OFF,
+		0xFF},
+/*20*/	{V32_MCE_CFG, V32_MCE_LEN, V32_MCE_OFF, 0xFF},
+		{V32_SK_ENHAN_CFG, V32_SCE_LEN, V32_SCE_OFF, 0xFF},
+		{V32_ASF_CFG, V32_ASF_LEN, V32_ASF_OFF, 0xFF},
+		{V32_S2Y_CFG, V32_S2Y_LEN, V32_S2Y_OFF, 0xFF},
+		{V32_S2CbCr_CFG, V32_S2CbCr_LEN, V32_S2CbCr_OFF, 0xFF},
+/*25*/	{V32_CHROMA_SUBS_CFG, V32_CHROMA_SUBS_LEN, V32_CHROMA_SUBS_OFF,
+		0xFF},
+		{V32_OUT_CLAMP_CFG, V32_OUT_CLAMP_LEN, V32_OUT_CLAMP_OFF,
+		0xFF},
+		{V32_FRAME_SKIP_CFG, V32_FRAME_SKIP_LEN, V32_FRAME_SKIP_OFF,
+		0xFF},
+		{V32_DUMMY_1},
+		{V32_DUMMY_2},
+/*30*/	{V32_DUMMY_3},
+		{V32_UPDATE},
+		{V32_BL_LVL_UPDATE, V32_BLACK_LEVEL_LEN, V32_BLACK_LEVEL_OFF,
+		0xFF},
+		{V32_DEMUX_UPDATE, V32_DEMUX_LEN, V32_DEMUX_OFF, 0xFF},
+		{V32_FOV_UPDATE, V32_FOV_LEN, V32_FOV_OFF, 0xFF},
+/*35*/	{V32_MAIN_SCALER_UPDATE, V32_MAIN_SCALER_LEN, V32_MAIN_SCALER_OFF,
+		0xFF},
+		{V32_WB_UPDATE, V32_WB_LEN, V32_WB_OFF, 0xFF},
+		{V32_COLOR_COR_UPDATE, V32_COLOR_COR_LEN, V32_COLOR_COR_OFF,
+		0xFF},
+		{V32_RGB_G_UPDATE, V32_RGB_G_LEN, V32_CHROMA_EN_OFF, 0xFF},
+		{V32_LA_UPDATE, V32_LA_LEN, V32_LA_OFF, 0xFF },
+/*40*/	{V32_CHROMA_EN_UPDATE, V32_CHROMA_EN_LEN, V32_CHROMA_EN_OFF,
+		0xFF},
+		{V32_CHROMA_SUP_UPDATE, V32_CHROMA_SUP_LEN, V32_CHROMA_SUP_OFF,
+		0xFF},
+		{V32_MCE_UPDATE, V32_MCE_LEN, V32_MCE_OFF, 0xFF},
+		{V32_SK_ENHAN_UPDATE, V32_SCE_LEN, V32_SCE_OFF, 0xFF},
+		{V32_S2CbCr_UPDATE, V32_S2CbCr_LEN, V32_S2CbCr_OFF, 0xFF},
+/*45*/	{V32_S2Y_UPDATE, V32_S2Y_LEN, V32_S2Y_OFF, 0xFF},
+		{V32_ASF_UPDATE, V32_ASF_UPDATE_LEN, V32_ASF_OFF, 0xFF},
+		{V32_FRAME_SKIP_UPDATE},
+		{V32_CAMIF_FRAME_UPDATE},
+		{V32_STATS_AF_UPDATE, V32_STATS_AF_LEN, V32_STATS_AF_OFF},
+/*50*/	{V32_STATS_AE_UPDATE, V32_STATS_AE_LEN, V32_STATS_AE_OFF},
+		{V32_STATS_AWB_UPDATE, V32_STATS_AWB_LEN, V32_STATS_AWB_OFF},
+		{V32_STATS_RS_UPDATE, V32_STATS_RS_LEN, V32_STATS_RS_OFF},
+		{V32_STATS_CS_UPDATE, V32_STATS_CS_LEN, V32_STATS_CS_OFF},
+		{V32_STATS_SKIN_UPDATE},
+/*55*/	{V32_STATS_IHIST_UPDATE, V32_STATS_IHIST_LEN, V32_STATS_IHIST_OFF},
+		{V32_DUMMY_4},
+		{V32_EPOCH1_ACK},
+		{V32_EPOCH2_ACK},
+		{V32_START_RECORDING},
+/*60*/	{V32_STOP_RECORDING},
+		{V32_DUMMY_5},
+		{V32_DUMMY_6},
+		{V32_CAPTURE, V32_CAPTURE_LEN, 0xFF},
+		{V32_DUMMY_7},
+/*65*/	{V32_STOP},
+		{V32_GET_HW_VERSION},
+		{V32_GET_FRAME_SKIP_COUNTS},
+		{V32_OUTPUT1_BUFFER_ENQ},
+		{V32_OUTPUT2_BUFFER_ENQ},
+/*70*/	{V32_OUTPUT3_BUFFER_ENQ},
+		{V32_JPEG_OUT_BUF_ENQ},
+		{V32_RAW_OUT_BUF_ENQ},
+		{V32_RAW_IN_BUF_ENQ},
+		{V32_STATS_AF_ENQ},
+/*75*/	{V32_STATS_AE_ENQ},
+		{V32_STATS_AWB_ENQ},
+		{V32_STATS_RS_ENQ},
+		{V32_STATS_CS_ENQ},
+		{V32_STATS_SKIN_ENQ},
+/*80*/	{V32_STATS_IHIST_ENQ},
+		{V32_DUMMY_8},
+		{V32_JPEG_ENC_CFG},
+		{V32_DUMMY_9},
+		{V32_STATS_AF_START, V32_STATS_AF_LEN, V32_STATS_AF_OFF},
+/*85*/	{V32_STATS_AF_STOP},
+		{V32_STATS_AE_START, V32_STATS_AE_LEN, V32_STATS_AE_OFF},
+		{V32_STATS_AE_STOP},
+		{V32_STATS_AWB_START, V32_STATS_AWB_LEN, V32_STATS_AWB_OFF},
+		{V32_STATS_AWB_STOP},
+/*90*/	{V32_STATS_RS_START, V32_STATS_RS_LEN, V32_STATS_RS_OFF},
+		{V32_STATS_RS_STOP},
+		{V32_STATS_CS_START, V32_STATS_CS_LEN, V32_STATS_CS_OFF},
+		{V32_STATS_CS_STOP},
+		{V32_STATS_SKIN_START},
+/*95*/	{V32_STATS_SKIN_STOP},
+		{V32_STATS_IHIST_START,
+		V32_STATS_IHIST_LEN, V32_STATS_IHIST_OFF},
+		{V32_STATS_IHIST_STOP},
+		{V32_DUMMY_10},
+		{V32_SYNC_TIMER_SETTING, V32_SYNC_TIMER_LEN,
+			V32_SYNC_TIMER_OFF},
+/*100*/	{V32_ASYNC_TIMER_SETTING, V32_ASYNC_TIMER_LEN, V32_ASYNC_TIMER_OFF},
+		{V32_LIVESHOT},
+		{V32_LA_SETUP},
+		{V32_LINEARIZATION, V32_LINEARIZATION_LEN1,
+					V32_LINEARIZATION_OFF1},
+		{V32_DEMOSAICV3},
+/*105*/	{V32_DEMOSAICV3_ABCC_CFG},
+		{V32_DEMOSAICV3_DBCC_CFG, V32_DEMOSAICV3_DBCC_LEN,
+			V32_DEMOSAICV3_DBCC_OFF},
+		{V32_DEMOSAICV3_DBPC_CFG},
+		{V32_DEMOSAICV3_ABF_CFG},
+		{V32_DEMOSAICV3_ABCC_UPDATE},
+		{V32_DEMOSAICV3_DBCC_UPDATE, V32_DEMOSAICV3_DBCC_LEN,
+			V32_DEMOSAICV3_DBCC_OFF},
+		{V32_DEMOSAICV3_DBPC_UPDATE},
+};
+
+uint32_t vfe32_AXI_WM_CFG[] = {
+	0x0000004C,
+	0x00000064,
+	0x0000007C,
+	0x00000094,
+	0x000000AC,
+	0x000000C4,
+	0x000000DC,
+};
+
+static const char * const vfe32_general_cmd[] = {
+	"DUMMY_0",  /* 0 */
+	"SET_CLK",
+	"RESET",
+	"START",
+	"TEST_GEN_START",
+	"OPERATION_CFG",  /* 5 */
+	"AXI_OUT_CFG",
+	"CAMIF_CFG",
+	"AXI_INPUT_CFG",
+	"BLACK_LEVEL_CFG",
+	"ROLL_OFF_CFG",  /* 10 */
+	"DEMUX_CFG",
+	"FOV_CFG",
+	"MAIN_SCALER_CFG",
+	"WB_CFG",
+	"COLOR_COR_CFG", /* 15 */
+	"RGB_G_CFG",
+	"LA_CFG",
+	"CHROMA_EN_CFG",
+	"CHROMA_SUP_CFG",
+	"MCE_CFG", /* 20 */
+	"SK_ENHAN_CFG",
+	"ASF_CFG",
+	"S2Y_CFG",
+	"S2CbCr_CFG",
+	"CHROMA_SUBS_CFG",  /* 25 */
+	"OUT_CLAMP_CFG",
+	"FRAME_SKIP_CFG",
+	"DUMMY_1",
+	"DUMMY_2",
+	"DUMMY_3",  /* 30 */
+	"UPDATE",
+	"BL_LVL_UPDATE",
+	"DEMUX_UPDATE",
+	"FOV_UPDATE",
+	"MAIN_SCALER_UPDATE",  /* 35 */
+	"WB_UPDATE",
+	"COLOR_COR_UPDATE",
+	"RGB_G_UPDATE",
+	"LA_UPDATE",
+	"CHROMA_EN_UPDATE",  /* 40 */
+	"CHROMA_SUP_UPDATE",
+	"MCE_UPDATE",
+	"SK_ENHAN_UPDATE",
+	"S2CbCr_UPDATE",
+	"S2Y_UPDATE",  /* 45 */
+	"ASF_UPDATE",
+	"FRAME_SKIP_UPDATE",
+	"CAMIF_FRAME_UPDATE",
+	"STATS_AF_UPDATE",
+	"STATS_AE_UPDATE",  /* 50 */
+	"STATS_AWB_UPDATE",
+	"STATS_RS_UPDATE",
+	"STATS_CS_UPDATE",
+	"STATS_SKIN_UPDATE",
+	"STATS_IHIST_UPDATE",  /* 55 */
+	"DUMMY_4",
+	"EPOCH1_ACK",
+	"EPOCH2_ACK",
+	"START_RECORDING",
+	"STOP_RECORDING",  /* 60 */
+	"DUMMY_5",
+	"DUMMY_6",
+	"CAPTURE",
+	"DUMMY_7",
+	"STOP",  /* 65 */
+	"GET_HW_VERSION",
+	"GET_FRAME_SKIP_COUNTS",
+	"OUTPUT1_BUFFER_ENQ",
+	"OUTPUT2_BUFFER_ENQ",
+	"OUTPUT3_BUFFER_ENQ",  /* 70 */
+	"JPEG_OUT_BUF_ENQ",
+	"RAW_OUT_BUF_ENQ",
+	"RAW_IN_BUF_ENQ",
+	"STATS_AF_ENQ",
+	"STATS_AE_ENQ",  /* 75 */
+	"STATS_AWB_ENQ",
+	"STATS_RS_ENQ",
+	"STATS_CS_ENQ",
+	"STATS_SKIN_ENQ",
+	"STATS_IHIST_ENQ",  /* 80 */
+	"DUMMY_8",
+	"JPEG_ENC_CFG",
+	"DUMMY_9",
+	"STATS_AF_START",
+	"STATS_AF_STOP",  /* 85 */
+	"STATS_AE_START",
+	"STATS_AE_STOP",
+	"STATS_AWB_START",
+	"STATS_AWB_STOP",
+	"STATS_RS_START",  /* 90 */
+	"STATS_RS_STOP",
+	"STATS_CS_START",
+	"STATS_CS_STOP",
+	"STATS_SKIN_START",
+	"STATS_SKIN_STOP",  /* 95 */
+	"STATS_IHIST_START",
+	"STATS_IHIST_STOP",
+	"DUMMY_10",
+	"SYNC_TIMER_SETTING",
+	"ASYNC_TIMER_SETTING",  /* 100 */
+	"LIVESHOT",
+	"LA_SETUP",
+	"LINEARIZATION",
+	"DEMOSAICV3",
+	"DEMOSAICV3_ABCC_CFG", /* 105 */
+	"DEMOSAICV3_DBCC_CFG",
+	"DEMOSAICV3_DBPC_CFG",
+	"DEMOSAICV3_ABF_CFG", /* 108 */
+	"DEMOSAICV3_ABCC_UPDATE",
+	"DEMOSAICV3_DBCC_UPDATE",
+	"DEMOSAICV3_DBPC_UPDATE",
+	"EZTUNE_CFG",
+};
+
+static void vfe_addr_convert(struct msm_vfe_phy_info *pinfo,
+	enum vfe_resp_msg type, void *data, void **ext, int32_t *elen)
+{
+	uint8_t outid;
+	switch (type) {
+	case VFE_MSG_OUTPUT_T:
+	case VFE_MSG_OUTPUT_P:
+	case VFE_MSG_OUTPUT_S:
+	case VFE_MSG_OUTPUT_V:
+		pinfo->output_id =
+			((struct vfe_message *)data)->_u.msgOut.output_id;
+
+		switch (type) {
+		case VFE_MSG_OUTPUT_P:
+			outid = OUTPUT_TYPE_P;
+			break;
+		case VFE_MSG_OUTPUT_V:
+			outid = OUTPUT_TYPE_V;
+			break;
+		case VFE_MSG_OUTPUT_T:
+			outid = OUTPUT_TYPE_T;
+			break;
+		case VFE_MSG_OUTPUT_S:
+			outid = OUTPUT_TYPE_S;
+			break;
+		default:
+			outid = 0xff;
+			break;
+		}
+		pinfo->output_id = outid;
+		pinfo->y_phy =
+			((struct vfe_message *)data)->_u.msgOut.yBuffer;
+		pinfo->cbcr_phy =
+			((struct vfe_message *)data)->_u.msgOut.cbcrBuffer;
+
+		pinfo->frame_id =
+		((struct vfe_message *)data)->_u.msgOut.frameCounter;
+
+		((struct vfe_msg_output *)(vfe32_ctrl->extdata))->bpcInfo =
+		((struct vfe_message *)data)->_u.msgOut.bpcInfo;
+		((struct vfe_msg_output *)(vfe32_ctrl->extdata))->asfInfo =
+		((struct vfe_message *)data)->_u.msgOut.asfInfo;
+		((struct vfe_msg_output *)(vfe32_ctrl->extdata))->frameCounter =
+		((struct vfe_message *)data)->_u.msgOut.frameCounter;
+		*ext  = vfe32_ctrl->extdata;
+		*elen = vfe32_ctrl->extlen;
+		break;
+	case VFE_MSG_STATS_AF:
+	case VFE_MSG_STATS_AEC:
+	case VFE_MSG_STATS_AWB:
+	case VFE_MSG_STATS_IHIST:
+	case VFE_MSG_STATS_RS:
+	case VFE_MSG_STATS_CS:
+		pinfo->sbuf_phy =
+		((struct vfe_message *)data)->_u.msgStats.buffer;
+
+		pinfo->frame_id =
+		((struct vfe_message *)data)->_u.msgStats.frameCounter;
+
+		break;
+	default:
+		break;
+	} /* switch */
+}
+
+static void vfe32_proc_ops(enum VFE32_MESSAGE_ID id, void *msg, size_t len)
+{
+	struct msm_vfe_resp *rp;
+
+	rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp),
+		vfe32_ctrl->syncdata, GFP_ATOMIC);
+	if (!rp) {
+		CDBG("rp: cannot allocate buffer\n");
+		return;
+	}
+	CDBG("vfe32_proc_ops, msgId = %d\n", id);
+	rp->evt_msg.type   = MSM_CAMERA_MSG;
+	rp->evt_msg.msg_id = id;
+	rp->evt_msg.len    = len;
+	rp->evt_msg.data   = msg;
+
+	switch (rp->evt_msg.msg_id) {
+	case MSG_ID_SNAPSHOT_DONE:
+		rp->type = VFE_MSG_SNAPSHOT;
+		break;
+
+	case MSG_ID_OUTPUT_P:
+		rp->type = VFE_MSG_OUTPUT_P;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_P,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_T:
+		rp->type = VFE_MSG_OUTPUT_T;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_T,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_S:
+		rp->type = VFE_MSG_OUTPUT_S;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_S,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_OUTPUT_V:
+		rp->type = VFE_MSG_OUTPUT_V;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_OUTPUT_V,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_STATS_AF:
+		rp->type = VFE_MSG_STATS_AF;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_AF,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_AWB:
+		rp->type = VFE_MSG_STATS_AWB;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_AWB,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_AEC:
+		rp->type = VFE_MSG_STATS_AEC;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_AEC,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_SKIN:
+		rp->type = VFE_MSG_STATS_SKIN;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_SKIN,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_IHIST:
+		rp->type = VFE_MSG_STATS_IHIST;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_IHIST,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_RS:
+		rp->type = VFE_MSG_STATS_RS;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_RS,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_STATS_CS:
+		rp->type = VFE_MSG_STATS_CS;
+		vfe_addr_convert(&(rp->phy), VFE_MSG_STATS_CS,
+				rp->evt_msg.data, NULL, NULL);
+		break;
+
+	case MSG_ID_SYNC_TIMER0_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER0;
+		break;
+
+	case MSG_ID_SYNC_TIMER1_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER1;
+		break;
+
+	case MSG_ID_SYNC_TIMER2_DONE:
+		rp->type = VFE_MSG_SYNC_TIMER2;
+		break;
+
+	default:
+		rp->type = VFE_MSG_GENERAL;
+		break;
+	}
+
+	/* save the frame id.*/
+	rp->evt_msg.frame_id = rp->phy.frame_id;
+
+	v4l2_subdev_notify(vfe32_ctrl->subdev, NOTIFY_VFE_MSG_EVT, rp);
+}
+
+static void vfe_send_outmsg(uint8_t msgid, uint32_t pyaddr,
+	uint32_t pcbcraddr)
+{
+	struct vfe_message msg;
+	uint8_t outid;
+
+	msg._d = msgid;   /* now the output mode is redundnat. */
+
+	switch (msgid) {
+	case MSG_ID_OUTPUT_P:
+		outid = OUTPUT_TYPE_P;
+		break;
+	case MSG_ID_OUTPUT_V:
+		outid = OUTPUT_TYPE_V;
+		break;
+	case MSG_ID_OUTPUT_T:
+		outid = OUTPUT_TYPE_T;
+		break;
+	case MSG_ID_OUTPUT_S:
+		outid = OUTPUT_TYPE_S;
+		break;
+	default:
+		outid = 0xff;  /* -1 for error condition.*/
+		break;
+	}
+	msg._u.msgOut.output_id   = msgid;
+	msg._u.msgOut.yBuffer     = pyaddr;
+	msg._u.msgOut.cbcrBuffer  = pcbcraddr;
+	vfe32_proc_ops(msgid, &msg, sizeof(struct vfe_message));
+	return;
+}
+
+static void vfe32_stop(void)
+{
+	uint8_t  axiBusyFlag = true;
+	unsigned long flags;
+
+	atomic_set(&vfe32_ctrl->vstate, 0);
+
+	/* for reset hw modules, and send msg when reset_irq comes.*/
+	spin_lock_irqsave(&vfe32_ctrl->stop_flag_lock, flags);
+	vfe32_ctrl->stop_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe32_ctrl->stop_flag_lock, flags);
+
+	/* disable all interrupts.  */
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+			vfe32_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1,
+		vfe32_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* in either continuous or snapshot mode, stop command can be issued
+	 * at any time. stop camif immediately. */
+	msm_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
+		vfe32_ctrl->vfebase + VFE_CAMIF_COMMAND);
+	wmb();
+	/* axi halt command. */
+	msm_io_w(AXI_HALT,
+		vfe32_ctrl->vfebase + VFE_AXI_CMD);
+	wmb();
+	while (axiBusyFlag) {
+		if (msm_io_r(vfe32_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+			axiBusyFlag = false;
+	}
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(AXI_HALT_CLEAR,
+		vfe32_ctrl->vfebase + VFE_AXI_CMD);
+
+	/* after axi halt, then ok to apply global reset. */
+	/* enable reset_ack and async timer interrupt only while
+	stopping the pipeline.*/
+	msm_io_w(0xf0000000,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+		vfe32_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe32_enqueue_free_buf(struct vfe32_output_ch *outch,
+	uint32_t paddr, uint32_t y_off, uint32_t cbcr_off)
+{
+	struct vfe32_free_buf *free_buf = NULL;
+	unsigned long flags = 0;
+	free_buf = kmalloc(sizeof(struct vfe32_free_buf), GFP_KERNEL);
+	if (!free_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	free_buf->paddr = paddr;
+	free_buf->y_off = y_off;
+	free_buf->cbcr_off = cbcr_off;
+	list_add_tail(&free_buf->node, &outch->free_buf_queue);
+	CDBG("%s: free_buf paddr = 0x%x, y_off = %d, cbcr_off = %d\n",
+		__func__, free_buf->paddr, free_buf->y_off,
+		free_buf->cbcr_off);
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+	return 0;
+}
+
+static struct vfe32_free_buf *vfe32_dequeue_free_buf(
+	struct vfe32_output_ch *outch)
+{
+	unsigned long flags = 0;
+	struct vfe32_free_buf *free_buf = NULL;
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	if (!list_empty(&outch->free_buf_queue)) {
+		free_buf = list_first_entry(&outch->free_buf_queue,
+			struct vfe32_free_buf, node);
+		if (free_buf)
+			list_del_init(&free_buf->node);
+	}
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+	return free_buf;
+}
+
+static void vfe32_reset_free_buf_queue(
+	struct vfe32_output_ch *outch)
+{
+	unsigned long flags = 0;
+	struct vfe32_free_buf *free_buf = NULL;
+	spin_lock_irqsave(&outch->free_buf_lock, flags);
+	while (!list_empty(&outch->free_buf_queue)) {
+		free_buf = list_first_entry(&outch->free_buf_queue,
+			struct vfe32_free_buf, node);
+		if (free_buf) {
+			list_del_init(&free_buf->node);
+			kfree(free_buf);
+		}
+	}
+	spin_unlock_irqrestore(&outch->free_buf_lock, flags);
+}
+
+static void vfe32_init_free_buf_queues(void)
+{
+	INIT_LIST_HEAD(&vfe32_ctrl->outpath.out0.free_buf_queue);
+	INIT_LIST_HEAD(&vfe32_ctrl->outpath.out1.free_buf_queue);
+	INIT_LIST_HEAD(&vfe32_ctrl->outpath.out2.free_buf_queue);
+	spin_lock_init(&vfe32_ctrl->outpath.out0.free_buf_lock);
+	spin_lock_init(&vfe32_ctrl->outpath.out1.free_buf_lock);
+	spin_lock_init(&vfe32_ctrl->outpath.out2.free_buf_lock);
+}
+
+static void vfe32_reset_free_buf_queues(void)
+{
+	vfe32_reset_free_buf_queue(&vfe32_ctrl->outpath.out0);
+	vfe32_reset_free_buf_queue(&vfe32_ctrl->outpath.out1);
+	vfe32_reset_free_buf_queue(&vfe32_ctrl->outpath.out2);
+}
+
+static int vfe32_config_axi(int mode, struct axidata *ad, uint32_t *ao)
+{
+	int ret;
+	int i;
+	uint32_t *p, *p1, *p2;
+	int32_t *ch_info;
+	struct vfe32_output_ch *outp1, *outp2;
+	struct msm_pmem_region *regp1 = NULL;
+	struct msm_pmem_region *regp2 = NULL;
+
+	outp1 = NULL;
+	outp2 = NULL;
+
+	p = ao + 2;
+
+	/* Update the corresponding write masters for each output*/
+	ch_info = ao + V32_AXI_CFG_LEN;
+	vfe32_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
+	vfe32_ctrl->outpath.out0.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe32_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info++;
+	vfe32_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
+	vfe32_ctrl->outpath.out1.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe32_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info++;
+	vfe32_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
+	vfe32_ctrl->outpath.out2.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
+	vfe32_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+
+	CDBG("vfe32_config_axi: mode = %d, bufnum1 = %d, bufnum2 = %d\n",
+		mode, ad->bufnum1, ad->bufnum2);
+
+	switch (mode) {
+
+	case OUTPUT_2: {
+		if (ad->bufnum2 < 3)
+			return -EINVAL;
+		regp1 = &(ad->region[ad->bufnum1]);
+		outp1 = &(vfe32_ctrl->outpath.out0);
+		vfe32_ctrl->outpath.output_mode |= VFE32_OUTPUT_MODE_PT;
+
+		for (i = 0; i < 2; i++) {
+			p1 = ao + 6 + i;    /* wm0 for y  */
+			*p1 = (regp1->paddr + regp1->info.y_off);
+
+			p1 = ao + 12 + i;  /* wm1 for cbcr */
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			regp1++;
+		}
+		for (i = 2; i < ad->bufnum2; i++) {
+			ret = vfe32_enqueue_free_buf(outp1, regp1->paddr,
+				regp1->info.y_off, regp1->info.cbcr_off);
+			if (ret < 0)
+				return ret;
+			regp1++;
+		}
+	}
+		break;
+
+	case OUTPUT_1_AND_2:
+		/* use wm0& 4 for thumbnail, wm1&5 for main image.*/
+		if ((ad->bufnum1 < 1) || (ad->bufnum2 < 1))
+			return -EINVAL;
+		vfe32_ctrl->outpath.output_mode |=
+			VFE32_OUTPUT_MODE_S;  /* main image.*/
+		vfe32_ctrl->outpath.output_mode |=
+			VFE32_OUTPUT_MODE_PT;  /* thumbnail. */
+
+		regp1 = &(ad->region[0]); /* this is thumbnail buffer. */
+		/* this is main image buffer. */
+		regp2 = &(ad->region[ad->bufnum1]);
+		outp1 = &(vfe32_ctrl->outpath.out0);
+		outp2 = &(vfe32_ctrl->outpath.out1); /* snapshot */
+
+		/*  Parse the buffers!!! */
+		if (ad->bufnum2 == 1) {	/* assuming bufnum1 = bufnum2 */
+			p1 = ao + 6;   /* wm0 ping */
+			*p1++ = (regp1->paddr + regp1->info.y_off);
+			/* this is to duplicate ping address to pong.*/
+			*p1 = (regp1->paddr + regp1->info.y_off);
+			p1 = ao + 30;  /* wm4 ping */
+			*p1++ = (regp1->paddr + regp1->info.cbcr_off);
+			/* this is to duplicate ping address to pong.*/
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			p1 = ao + 12;   /* wm1 ping */
+			*p1++ = (regp2->paddr + regp2->info.y_off);
+			/* pong = ping,*/
+			*p1 = (regp2->paddr + regp2->info.y_off);
+			p1 = ao + 36;  /* wm5 */
+			*p1++ = (regp2->paddr + regp2->info.cbcr_off);
+			*p1 = (regp2->paddr + regp2->info.cbcr_off);
+
+		} else { /* more than one snapshot */
+			/* first fill ping & pong */
+			for (i = 0; i < 2; i++) {
+				p1 = ao + 6 + i;    /* wm0 for y  */
+				*p1 = (regp1->paddr + regp1->info.y_off);
+				p1 = ao + 30 + i;  /* wm4 for cbcr */
+				*p1 = (regp1->paddr + regp1->info.cbcr_off);
+				regp1++;
+			}
+
+			for (i = 0; i < 2; i++) {
+				p2 = ao + 12 + i;    /* wm1 for y  */
+				*p2 = (regp2->paddr + regp2->info.y_off);
+				p2 = ao + 36 + i;  /* wm5 for cbcr */
+				*p2 = (regp2->paddr + regp2->info.cbcr_off);
+				regp2++;
+			}
+
+			for (i = 2; i < ad->bufnum1; i++) {
+				ret = vfe32_enqueue_free_buf(outp1,
+							regp1->paddr,
+							regp1->info.y_off,
+							regp1->info.cbcr_off);
+				if (ret < 0)
+					return ret;
+				regp1++;
+			}
+			for (i = 2; i < ad->bufnum2; i++) {
+				ret = vfe32_enqueue_free_buf(outp2,
+							regp2->paddr,
+							regp2->info.y_off,
+							regp2->info.cbcr_off);
+				if (ret < 0)
+					return ret;
+				regp2++;
+			}
+		}
+		break;
+
+	case OUTPUT_1_AND_3:
+		/* use wm0& 4 for preview, wm1&5 for video.*/
+		if ((ad->bufnum1 < 2) || (ad->bufnum2 < 2))
+			return -EINVAL;
+
+		vfe32_ctrl->outpath.output_mode |=
+			VFE32_OUTPUT_MODE_V;  /* video*/
+		vfe32_ctrl->outpath.output_mode |=
+			VFE32_OUTPUT_MODE_PT;  /* preview */
+
+		regp1 = &(ad->region[0]); /* this is preview buffer. */
+		regp2 = &(ad->region[ad->bufnum1]);/* this is video buffer. */
+		outp1 = &(vfe32_ctrl->outpath.out0); /* preview */
+		outp2 = &(vfe32_ctrl->outpath.out2); /* video */
+
+
+		for (i = 0; i < 2; i++) {
+			p1 = ao + 6 + i;    /* wm0 for y  */
+			*p1 = (regp1->paddr + regp1->info.y_off);
+
+			p1 = ao + 30 + i;  /* wm1 for cbcr */
+			*p1 = (regp1->paddr + regp1->info.cbcr_off);
+			regp1++;
+		}
+
+		for (i = 0; i < 2; i++) {
+			p2 = ao + 12 + i;    /* wm0 for y  */
+			*p2 = (regp2->paddr + regp2->info.y_off);
+
+			p2 = ao + 36 + i;  /* wm1 for cbcr */
+			*p2 = (regp2->paddr + regp2->info.cbcr_off);
+			regp2++;
+		}
+		for (i = 2; i < ad->bufnum1; i++) {
+			ret = vfe32_enqueue_free_buf(outp1, regp1->paddr,
+						regp1->info.y_off,
+						regp1->info.cbcr_off);
+			if (ret < 0)
+				return ret;
+			regp1++;
+		}
+		for (i = 2; i < ad->bufnum2; i++) {
+			ret = vfe32_enqueue_free_buf(outp2, regp2->paddr,
+						regp2->info.y_off,
+						regp2->info.cbcr_off);
+			if (ret < 0)
+				return ret;
+			regp2++;
+		}
+		break;
+	case CAMIF_TO_AXI_VIA_OUTPUT_2: {  /* use wm0 only */
+		if (ad->bufnum2 < 1)
+			return -EINVAL;
+		CDBG("config axi for raw snapshot.\n");
+		vfe32_ctrl->outpath.out1.ch0 = 0; /* raw */
+		regp1 = &(ad->region[ad->bufnum1]);
+		vfe32_ctrl->outpath.output_mode |= VFE32_OUTPUT_MODE_S;
+		p1 = ao + 6;    /* wm0 for y  */
+		*p1 = (regp1->paddr + regp1->info.y_off);
+		}
+		break;
+	default:
+		break;
+	}
+	msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[V32_AXI_OUT_CFG].offset,
+		ao, vfe32_cmd[V32_AXI_OUT_CFG].length - V32_AXI_CH_INF_LEN);
+	return 0;
+}
+
+static void vfe32_reset_internal_variables(void)
+{
+	unsigned long flags;
+	vfe32_ctrl->vfeImaskCompositePacked = 0;
+	/* state control variables */
+	vfe32_ctrl->start_ack_pending = FALSE;
+	atomic_set(&irq_cnt, 0);
+
+	spin_lock_irqsave(&vfe32_ctrl->stop_flag_lock, flags);
+	vfe32_ctrl->stop_ack_pending  = FALSE;
+	spin_unlock_irqrestore(&vfe32_ctrl->stop_flag_lock, flags);
+
+	vfe32_ctrl->reset_ack_pending  = FALSE;
+
+	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
+	vfe32_ctrl->update_ack_pending = FALSE;
+	spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
+
+	vfe32_ctrl->req_stop_video_rec = FALSE;
+	vfe32_ctrl->req_start_video_rec = FALSE;
+
+	atomic_set(&vfe32_ctrl->vstate, 0);
+
+	/* 0 for continuous mode, 1 for snapshot mode */
+	vfe32_ctrl->operation_mode = 0;
+	vfe32_ctrl->outpath.output_mode = 0;
+	vfe32_ctrl->vfe_capture_count = 0;
+
+	/* this is unsigned 32 bit integer. */
+	vfe32_ctrl->vfeFrameId = 0;
+	/* Stats control variables. */
+	memset(&(vfe32_ctrl->afStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->awbStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->aecStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->ihistStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->rsStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->csStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+}
+
+static void vfe32_reset(void)
+{
+	uint32_t vfe_version;
+	vfe32_reset_free_buf_queues();
+	vfe32_reset_internal_variables();
+	vfe_version = msm_io_r(vfe32_ctrl->vfebase);
+	CDBG("vfe_version = 0x%x\n", vfe_version);
+	/* disable all interrupts.  vfeImaskLocal is also reset to 0
+	* to begin with. */
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+	msm_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_io_w(VFE_CLEAR_ALL_IRQS, vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(VFE_CLEAR_ALL_IRQS, vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe32_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* enable reset_ack interrupt.  */
+	msm_io_w(VFE_IMASK_WHILE_STOPPING_1,
+	vfe32_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
+	 * is done, hardware interrupt will be generated.  VFE ist processes
+	 * the interrupt to complete the function call.  Note that the reset
+	 * function is synchronous. */
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(VFE_RESET_UPON_RESET_CMD,
+		vfe32_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe32_operation_config(uint32_t *cmd)
+{
+	uint32_t *p = cmd;
+
+	vfe32_ctrl->operation_mode = *p;
+	vfe32_ctrl->stats_comp = *(++p);
+
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_CFG);
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_PIXEL_IF_CFG);
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_REALIGN_BUF);
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_CHROMA_UP);
+	msm_io_w(*(++p), vfe32_ctrl->vfebase + VFE_STATS_CFG);
+	return 0;
+}
+
+static uint32_t vfe_stats_awb_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	vfe32_ctrl->awbStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_aec_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+
+	vfe32_ctrl->aecStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_af_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+	vfe32_ctrl->afStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_ihist_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+
+	vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_rs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_RS_WR_PONG_ADDR);
+
+	vfe32_ctrl->rsStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static uint32_t vfe_stats_cs_buf_init(struct vfe_cmd_stats_buf *in)
+{
+	uint32_t *ptr = in->statsBuf;
+	uint32_t addr;
+
+	addr = ptr[0];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PING_ADDR);
+	addr = ptr[1];
+	msm_io_w(addr, vfe32_ctrl->vfebase + VFE_BUS_STATS_CS_WR_PONG_ADDR);
+
+	vfe32_ctrl->csStatsControl.nextFrameAddrBuf = in->statsBuf[2];
+	return 0;
+}
+
+static void vfe32_start_common(void)
+{
+
+	vfe32_ctrl->start_ack_pending = TRUE;
+	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
+		vfe32_ctrl->operation_mode, vfe32_ctrl->outpath.output_mode);
+	msm_io_w(0x00EFE021, vfe32_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		vfe32_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	msm_io_dump(vfe32_ctrl->vfebase, 0x740);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_io_w(1, vfe32_ctrl->vfebase + VFE_CAMIF_COMMAND);
+	wmb();
+
+	atomic_set(&vfe32_ctrl->vstate, 1);
+}
+
+static int vfe32_start_recording(void)
+{
+	vfe32_ctrl->req_start_video_rec = TRUE;
+	/* Mask with 0x7 to extract the pixel pattern*/
+	switch (msm_io_r(vfe32_ctrl->vfebase + VFE_CFG) & 0x7) {
+	case VFE_YUV_YCbYCr:
+	case VFE_YUV_YCrYCb:
+	case VFE_YUV_CbYCrY:
+	case VFE_YUV_CrYCbY:
+		msm_io_w_mb(1,
+		vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int vfe32_stop_recording(void)
+{
+	vfe32_ctrl->req_stop_video_rec = TRUE;
+	/* Mask with 0x7 to extract the pixel pattern*/
+	switch (msm_io_r(vfe32_ctrl->vfebase + VFE_CFG) & 0x7) {
+	case VFE_YUV_YCbYCr:
+	case VFE_YUV_YCrYCb:
+	case VFE_YUV_CbYCrY:
+	case VFE_YUV_CrYCbY:
+		msm_io_w_mb(1,
+		vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void vfe32_liveshot(void){
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+	if (p_sync)
+		p_sync->liveshot_enabled = true;
+}
+
+static int vfe32_capture(uint32_t num_frames_capture)
+{
+	uint32_t irq_comp_mask = 0;
+	struct msm_sync* p_sync = (struct msm_sync *)vfe_syncdata;
+	if (p_sync) {
+		p_sync->snap_count = num_frames_capture;
+		p_sync->thumb_count = num_frames_capture;
+	}
+	/* capture command is valid for both idle and active state. */
+	vfe32_ctrl->outpath.out1.capture_cnt = num_frames_capture;
+	if (vfe32_ctrl->operation_mode == VFE_MODE_OF_OPERATION_SNAPSHOT) {
+		vfe32_ctrl->outpath.out0.capture_cnt =
+		num_frames_capture;
+	}
+
+	vfe32_ctrl->vfe_capture_count = num_frames_capture;
+	irq_comp_mask	=
+		msm_io_r(vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe32_ctrl->operation_mode == VFE_MODE_OF_OPERATION_SNAPSHOT) {
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_PT) {
+			irq_comp_mask |= (0x1 << vfe32_ctrl->outpath.out0.ch0 |
+					0x1 << vfe32_ctrl->outpath.out0.ch1);
+		}
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_S) {
+			irq_comp_mask |=
+			(0x1 << (vfe32_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (vfe32_ctrl->outpath.out1.ch1 + 8));
+		}
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_PT) {
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out0.ch0]);
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out0.ch1]);
+		}
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_S) {
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out1.ch0]);
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out1.ch1]);
+		}
+	} else {  /* this is raw snapshot mode. */
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_S) {
+			irq_comp_mask |=
+			(0x1 << (vfe32_ctrl->outpath.out1.ch0 + 8));
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out1.ch0]);
+			msm_io_w(0x1000, vfe32_ctrl->vfebase +
+					VFE_BUS_IO_FORMAT_CFG);
+		}
+	}
+	msm_io_w(irq_comp_mask, vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_io_r(vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	vfe32_start_common();
+	msm_io_r(vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	/* for debug */
+	msm_io_w(1, vfe32_ctrl->vfebase + 0x18C);
+	msm_io_w(1, vfe32_ctrl->vfebase + 0x188);
+	return 0;
+}
+
+static int vfe32_start(void)
+{
+	uint32_t irq_comp_mask = 0;
+	/* start command now is only good for continuous mode. */
+	if ((vfe32_ctrl->operation_mode != VFE_MODE_OF_OPERATION_CONTINUOUS) &&
+		(vfe32_ctrl->operation_mode != VFE_MODE_OF_OPERATION_VIDEO))
+		return 0;
+	irq_comp_mask	=
+		msm_io_r(vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_PT) {
+		irq_comp_mask |= (0x1 << vfe32_ctrl->outpath.out0.ch0 |
+			0x1 << vfe32_ctrl->outpath.out0.ch1);
+	}
+
+	if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_V) {
+		irq_comp_mask |= (0x1 << (vfe32_ctrl->outpath.out2.ch0 + 16)|
+			0x1 << (vfe32_ctrl->outpath.out2.ch1 + 16));
+	}
+
+	msm_io_w(irq_comp_mask, vfe32_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_PT) {
+		msm_io_w(1, vfe32_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out0.ch0]);
+		msm_io_w(1, vfe32_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out0.ch1]);
+	}
+	vfe32_start_common();
+	return 0;
+}
+
+static void vfe32_update(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
+	vfe32_ctrl->update_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return;
+}
+
+static void vfe32_sync_timer_stop(void)
+{
+	uint32_t value = 0;
+	vfe32_ctrl->sync_timer_state = 0;
+	if (vfe32_ctrl->sync_timer_number == 0)
+		value = 0x10000;
+	else if (vfe32_ctrl->sync_timer_number == 1)
+		value = 0x20000;
+	else if (vfe32_ctrl->sync_timer_number == 2)
+		value = 0x40000;
+
+	/* Timer Stop */
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_SYNC_TIMER_OFF);
+}
+
+static void vfe32_sync_timer_start(const uint32_t *tbl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = 1;
+	uint32_t val;
+
+	vfe32_ctrl->sync_timer_state = *tbl++;
+	vfe32_ctrl->sync_timer_repeat_count = *tbl++;
+	vfe32_ctrl->sync_timer_number = *tbl++;
+	CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n",
+		 __func__, vfe32_ctrl->sync_timer_state,
+		 vfe32_ctrl->sync_timer_repeat_count,
+		 vfe32_ctrl->sync_timer_number);
+
+	if (vfe32_ctrl->sync_timer_state) { /* Start Timer */
+		value = value << vfe32_ctrl->sync_timer_number;
+	} else { /* Stop Timer */
+		CDBG("Failed to Start timer\n");
+		return;
+	}
+
+	/* Timer Start */
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_SYNC_TIMER_OFF);
+	/* Sync Timer Line Start */
+	value = *tbl++;
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_SYNC_TIMER_OFF +
+		4 + ((vfe32_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Start */
+	value = *tbl++;
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_SYNC_TIMER_OFF +
+			 8 + ((vfe32_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Duration */
+	value = *tbl++;
+	val = camio_clk.vfe_clk_rate / 10000;
+	val = 10000000 / val;
+	val = value * 10000 / val;
+	CDBG("%s: Pixel Clk Cycles!!! %d\n", __func__, val);
+	msm_io_w(val, vfe32_ctrl->vfebase + V32_SYNC_TIMER_OFF +
+		12 + ((vfe32_ctrl->sync_timer_number) * 12));
+	/* Timer0 Active High/LOW */
+	value = *tbl++;
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_SYNC_TIMER_POLARITY_OFF);
+	/* Selects sync timer 0 output to drive onto timer1 port */
+	value = 0;
+	msm_io_w(value, vfe32_ctrl->vfebase + V32_TIMER_SELECT_OFF);
+}
+
+static void vfe32_program_dmi_cfg(enum VFE32_DMI_RAM_SEL bankSel)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = VFE_DMI_CFG_DEFAULT;
+	value += (uint32_t)bankSel;
+
+	msm_io_w(value, vfe32_ctrl->vfebase + VFE_DMI_CFG);
+	/* by default, always starts with offset 0.*/
+	msm_io_w(0, vfe32_ctrl->vfebase + VFE_DMI_ADDR);
+}
+static void vfe32_write_gamma_cfg(enum VFE32_DMI_RAM_SEL channel_sel,
+						const uint32_t *tbl)
+{
+	int i;
+	uint32_t value, value1, value2;
+	vfe32_program_dmi_cfg(channel_sel);
+	/* for loop for extracting init table. */
+	for (i = 0 ; i < (VFE32_GAMMA_NUM_ENTRIES/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_io_w((value1), vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_io_w((value2), vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe32_program_dmi_cfg(NO_MEM_SELECTED);
+}
+
+static void vfe32_write_la_cfg(enum VFE32_DMI_RAM_SEL channel_sel,
+						const uint32_t *tbl)
+{
+	uint32_t i;
+	uint32_t value, value1, value2;
+
+	vfe32_program_dmi_cfg(channel_sel);
+	/* for loop for extracting init table. */
+	for (i = 0 ; i < (VFE32_LA_TABLE_LENGTH/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_io_w((value1), vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_io_w((value2), vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe32_program_dmi_cfg(NO_MEM_SELECTED);
+}
+
+
+static int vfe32_proc_general(struct msm_vfe32_cmd *cmd)
+{
+	int i , rc = 0;
+	uint32_t old_val = 0 , new_val = 0;
+	uint32_t *cmdp = NULL;
+	uint32_t *cmdp_local = NULL;
+	uint32_t snapshot_cnt = 0;
+
+	CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
+		vfe32_general_cmd[cmd->id], cmd->length);
+	switch (cmd->id) {
+	case V32_RESET:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		vfe32_reset();
+		break;
+	case V32_START:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		rc = vfe32_start();
+		break;
+	case V32_UPDATE:
+		vfe32_update();
+		break;
+	case V32_CAPTURE:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe32_capture(snapshot_cnt);
+		break;
+	case V32_START_RECORDING:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		rc = vfe32_start_recording();
+		break;
+	case V32_STOP_RECORDING:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		rc = vfe32_stop_recording();
+		break;
+	case V32_OPERATION_CFG: {
+		if (cmd->length != V32_OPERATION_CFG_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V32_OPERATION_CFG_LEN, GFP_ATOMIC);
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			V32_OPERATION_CFG_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe32_operation_config(cmdp);
+		}
+		break;
+
+	case V32_STATS_AE_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AE_BG_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+		cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+	case V32_STATS_AF_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AF_BF_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+		cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+	case V32_STATS_AWB_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AWB_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+				cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+
+	case V32_STATS_IHIST_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= IHIST_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+				cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+
+
+	case V32_STATS_RS_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/*
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= RS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		*/
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+				cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+
+	case V32_STATS_CS_START: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		/*
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= CS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		*/
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+				cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+
+	case V32_MCE_UPDATE:
+	case V32_MCE_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		the 2nd register has the mce_enable bit */
+		old_val = msm_io_r(vfe32_ctrl->vfebase + V32_CHROMA_EN_OFF + 4);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+		old_val &= MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_CHROMA_EN_OFF + 4,
+					&new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_io_r(vfe32_ctrl->vfebase + V32_CHROMA_EN_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_CHROMA_EN_OFF + 8,
+		&new_val, 4);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+		cmdp_local, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+	case V32_BLACK_LEVEL_CFG:
+		rc = -EFAULT;
+		goto proc_general_done;
+	case V32_ROLL_OFF_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value) , cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+		cmdp_local, 16);
+		cmdp_local += 4;
+		vfe32_program_dmi_cfg(ROLLOFF_RAM);
+		/* for loop for extrcting init table. */
+		for (i = 0 ; i < (VFE32_ROLL_OFF_INIT_TABLE_SIZE * 2) ; i++) {
+			msm_io_w(*cmdp_local ,
+			vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+			cmdp_local++;
+		}
+		CDBG("done writing init table\n");
+		/* by default, always starts with offset 0. */
+		msm_io_w(LENS_ROLL_OFF_DELTA_TABLE_OFFSET,
+		vfe32_ctrl->vfebase + VFE_DMI_ADDR);
+		/* for loop for extracting delta table. */
+		for (i = 0 ; i < (VFE32_ROLL_OFF_DELTA_TABLE_SIZE * 2) ; i++) {
+			msm_io_w(*cmdp_local,
+			vfe32_ctrl->vfebase + VFE_DMI_DATA_LO);
+			cmdp_local++;
+		}
+		vfe32_program_dmi_cfg(NO_MEM_SELECTED);
+		}
+		break;
+
+	case V32_LA_CFG:
+	case V32_LA_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+				cmdp, (vfe32_cmd[cmd->id].length));
+
+		old_val = *cmdp;
+		cmdp += 1;
+		if (old_val == 0x0)
+			vfe32_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0 , cmdp);
+		else
+			vfe32_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1 , cmdp);
+		cmdp -= 1;
+		}
+		break;
+
+	case V32_SK_ENHAN_CFG:
+	case V32_SK_ENHAN_UPDATE:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_SCE_OFF,
+				cmdp, V32_SCE_LEN);
+		}
+		break;
+
+	case V32_LIVESHOT:
+		vfe32_liveshot();
+		break;
+
+	case V32_LINEARIZATION:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_LINEARIZATION_OFF1,
+				cmdp_local, V32_LINEARIZATION_LEN1);
+		cmdp_local += 4;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_LINEARIZATION_OFF2,
+						cmdp_local,
+						V32_LINEARIZATION_LEN2);
+		break;
+
+	case V32_DEMOSAICV3:
+		if (cmd->length !=
+			V32_DEMOSAICV3_0_LEN+V32_DEMOSAICV3_1_LEN) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_DEMOSAICV3_0_OFF,
+			cmdp_local, V32_DEMOSAICV3_0_LEN);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_DEMOSAICV3_1_OFF,
+			cmdp_local, V32_DEMOSAICV3_1_LEN);
+		break;
+
+	case V32_DEMOSAICV3_ABCC_CFG:
+		rc = -EFAULT;
+		break;
+
+	case V32_DEMOSAICV3_DBCC_CFG:
+	case V32_DEMOSAICV3_DBCC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_io_r(vfe32_ctrl->vfebase + V32_DEMOSAICV3_0_OFF);
+		old_val &= DBCC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_DEMOSAICV3_0_OFF,
+					cmdp_local, 4);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+			cmdp_local, (vfe32_cmd[cmd->id].length));
+		break;
+
+	case V32_DEMOSAICV3_DBPC_CFG:
+	case V32_DEMOSAICV3_DBPC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_io_r(vfe32_ctrl->vfebase + V32_DEMOSAICV3_0_OFF);
+		old_val &= DBPC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_io_memcpy(vfe32_ctrl->vfebase +
+			V32_DEMOSAICV3_0_OFF,
+			cmdp_local, V32_DEMOSAICV3_LEN);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase +
+			V32_DEMOSAICV3_DBPC_CFG_OFF,
+			cmdp_local, V32_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase +
+			V32_DEMOSAICV3_DBPC_CFG_OFF0,
+			cmdp_local, V32_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase +
+			V32_DEMOSAICV3_DBPC_CFG_OFF1,
+			cmdp_local, V32_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_io_memcpy(vfe32_ctrl->vfebase +
+			V32_DEMOSAICV3_DBPC_CFG_OFF2,
+			cmdp_local, V32_DEMOSAICV3_DBPC_LEN);
+		break;
+
+	case V32_RGB_G_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_RGB_G_OFF,
+				cmdp, 4);
+		cmdp += 1;
+		vfe32_write_gamma_cfg(RGBLUT_RAM_CH0_BANK0 , cmdp);
+		vfe32_write_gamma_cfg(RGBLUT_RAM_CH1_BANK0 , cmdp);
+		vfe32_write_gamma_cfg(RGBLUT_RAM_CH2_BANK0 , cmdp);
+		cmdp -= 1;
+		}
+		break;
+
+	case V32_RGB_G_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		msm_io_memcpy(vfe32_ctrl->vfebase + V32_RGB_G_OFF, cmdp, 4);
+		old_val = *cmdp;
+		cmdp += 1;
+
+		if (old_val) {
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH0_BANK1 , cmdp);
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH1_BANK1 , cmdp);
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH2_BANK1 , cmdp);
+		} else {
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH0_BANK0 , cmdp);
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH1_BANK0 , cmdp);
+			vfe32_write_gamma_cfg(RGBLUT_RAM_CH2_BANK0 , cmdp);
+		}
+		cmdp -= 1;
+		}
+		break;
+
+	case V32_STATS_AWB_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AWB_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V32_STATS_AE_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AE_BG_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V32_STATS_AF_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AF_BF_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V32_STATS_IHIST_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~IHIST_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V32_STATS_RS_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~RS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case V32_STATS_CS_STOP: {
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~CS_ENABLE_MASK;
+		msm_io_w(old_val,
+			vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case V32_STOP:
+		pr_info("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		vfe32_stop();
+		break;
+
+	case V32_SYNC_TIMER_SETTING:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		vfe32_sync_timer_start(cmdp);
+		break;
+
+	case V32_EZTUNE_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		*cmdp &= ~STATS_ENABLE_MASK;
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= STATS_ENABLE_MASK;
+		*cmdp |= old_val;
+
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+			cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
+
+	default: {
+		if (cmd->length != vfe32_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe32_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		CHECKED_COPY_FROM_USER(cmdp);
+		msm_io_memcpy(vfe32_ctrl->vfebase + vfe32_cmd[cmd->id].offset,
+			cmdp, (vfe32_cmd[cmd->id].length));
+	}
+	break;
+
+	}
+
+proc_general_done:
+	kfree(cmdp);
+
+	return rc;
+}
+
+static void vfe32_stats_af_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->af_ack_lock, flags);
+	vfe32_ctrl->afStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->afStatsControl.ackPending = FALSE;
+	spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+}
+
+static void vfe32_stats_awb_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->awb_ack_lock, flags);
+	vfe32_ctrl->awbStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->awbStatsControl.ackPending = FALSE;
+	spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+}
+
+static void vfe32_stats_aec_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->aec_ack_lock, flags);
+	vfe32_ctrl->aecStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->aecStatsControl.ackPending = FALSE;
+	spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+}
+
+static void vfe32_stats_ihist_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->ihistStatsControl.ackPending = FALSE;
+}
+static void vfe32_stats_rs_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe32_ctrl->rsStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->rsStatsControl.ackPending = FALSE;
+}
+static void vfe32_stats_cs_ack(struct vfe_cmd_stats_ack *pAck)
+{
+	vfe32_ctrl->csStatsControl.nextFrameAddrBuf = pAck->nextStatsBuf;
+	vfe32_ctrl->csStatsControl.ackPending = FALSE;
+}
+
+
+static inline void vfe32_read_irq_status(struct vfe32_irq_status *out)
+{
+	uint32_t *temp;
+	memset(out, 0, sizeof(struct vfe32_irq_status));
+	temp = (uint32_t *)(vfe32_ctrl->vfebase + VFE_IRQ_STATUS_0);
+	out->vfeIrqStatus0 = msm_io_r(temp);
+
+	temp = (uint32_t *)(vfe32_ctrl->vfebase + VFE_IRQ_STATUS_1);
+	out->vfeIrqStatus1 = msm_io_r(temp);
+
+	temp = (uint32_t *)(vfe32_ctrl->vfebase + VFE_CAMIF_STATUS);
+	out->camifStatus = msm_io_r(temp);
+	CDBG("camifStatus  = 0x%x\n", out->camifStatus);
+
+	/* clear the pending interrupt of the same kind.*/
+	msm_io_w(out->vfeIrqStatus0, vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_io_w(out->vfeIrqStatus1, vfe32_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_io_w_mb(1, vfe32_ctrl->vfebase + VFE_IRQ_CMD);
+
+}
+
+static void vfe32_send_msg_no_payload(enum VFE32_MESSAGE_ID id)
+{
+	struct vfe_message msg;
+
+	CDBG("vfe32_send_msg_no_payload\n");
+	msg._d = id;
+	vfe32_proc_ops(id, &msg, 0);
+}
+
+static void vfe32_process_reg_update_irq(void)
+{
+	uint32_t  temp, old_val;
+	unsigned long flags;
+	if (vfe32_ctrl->req_start_video_rec) {
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_V) {
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out2.ch0]);
+			msm_io_w(1, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out2.ch1]);
+			/* Mask with 0x7 to extract the pixel pattern*/
+			switch (msm_io_r(vfe32_ctrl->vfebase + VFE_CFG)
+				& 0x7) {
+			case VFE_YUV_YCbYCr:
+			case VFE_YUV_YCrYCb:
+			case VFE_YUV_CbYCrY:
+			case VFE_YUV_CrYCbY:
+				msm_io_w_mb(1,
+				vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+				break;
+			default:
+				break;
+			}
+		}
+		vfe32_ctrl->req_start_video_rec =  FALSE;
+		if (vpe_ctrl && vpe_ctrl->dis_en) {
+			old_val = msm_io_r(
+				vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+			old_val |= RS_CS_ENABLE_MASK;
+			msm_io_w(old_val,
+				vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		CDBG("start video triggered .\n");
+	} else if (vfe32_ctrl->req_stop_video_rec) {
+		if (vfe32_ctrl->outpath.output_mode & VFE32_OUTPUT_MODE_V) {
+			msm_io_w(0, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out2.ch0]);
+			msm_io_w(0, vfe32_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[vfe32_ctrl->outpath.out2.ch1]);
+			/* Mask with 0x7 to extract the pixel pattern*/
+			switch (msm_io_r(vfe32_ctrl->vfebase + VFE_CFG)
+				& 0x7) {
+			case VFE_YUV_YCbYCr:
+			case VFE_YUV_YCrYCb:
+			case VFE_YUV_CbYCrY:
+			case VFE_YUV_CrYCbY:
+				msm_io_w_mb(1,
+				vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+				break;
+			default:
+				break;
+			}
+		}
+		vfe32_ctrl->req_stop_video_rec =  FALSE;
+		vfe32_send_msg_no_payload(MSG_ID_STOP_REC_ACK);
+
+		/*disable rs& cs when stop recording. */
+		old_val = msm_io_r(vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= (~RS_CS_ENABLE_MASK);
+		msm_io_w(old_val, vfe32_ctrl->vfebase + VFE_MODULE_CFG);
+
+		CDBG("stop video triggered .\n");
+	}
+	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_send_msg_no_payload(MSG_ID_START_ACK);
+		vfe32_ctrl->start_ack_pending = FALSE;
+	} else {
+		spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
+		if (vfe32_ctrl->update_ack_pending == TRUE) {
+			vfe32_ctrl->update_ack_pending = FALSE;
+			spin_unlock_irqrestore(
+				&vfe32_ctrl->update_ack_lock, flags);
+			vfe32_send_msg_no_payload(MSG_ID_UPDATE_ACK);
+		} else {
+			spin_unlock_irqrestore(
+				&vfe32_ctrl->update_ack_lock, flags);
+		}
+	}
+	if (vfe32_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_SNAPSHOT) {  /* in snapshot mode */
+		/* later we need to add check for live snapshot mode. */
+		vfe32_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe32_ctrl->vfe_capture_count == 0) {
+			/* stop the bus output:  write master enable = 0*/
+			if (vfe32_ctrl->outpath.output_mode &
+					VFE32_OUTPUT_MODE_PT) {
+				msm_io_w(0, vfe32_ctrl->vfebase +
+					vfe32_AXI_WM_CFG[vfe32_ctrl->
+						outpath.out0.ch0]);
+				msm_io_w(0, vfe32_ctrl->vfebase +
+					vfe32_AXI_WM_CFG[vfe32_ctrl->
+						outpath.out0.ch1]);
+			}
+			if (vfe32_ctrl->outpath.output_mode &
+					VFE32_OUTPUT_MODE_S) {
+				msm_io_w(0, vfe32_ctrl->vfebase +
+					vfe32_AXI_WM_CFG[vfe32_ctrl->
+							outpath.out1.ch0]);
+				msm_io_w(0, vfe32_ctrl->vfebase +
+					vfe32_AXI_WM_CFG[vfe32_ctrl->
+							outpath.out1.ch1]);
+			}
+
+			/* Ensure the write order while writing
+			to the command register using the barrier */
+			msm_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe32_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+			/* Ensure the read order while reading
+			to the command register using the barrier */
+			temp = msm_io_r_mb(vfe32_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+			/* then do reg_update. */
+			msm_io_w(1, vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		}
+	} /* if snapshot mode. */
+}
+
+static void vfe32_set_default_reg_values(void)
+{
+	msm_io_w(0x800080, vfe32_ctrl->vfebase + VFE_DEMUX_GAIN_0);
+	msm_io_w(0x800080, vfe32_ctrl->vfebase + VFE_DEMUX_GAIN_1);
+	/* What value should we program CGC_OVERRIDE to? */
+	msm_io_w(0xFFFFF, vfe32_ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+	/* default frame drop period and pattern */
+	msm_io_w(0x1f, vfe32_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG);
+	msm_io_w(0x1f, vfe32_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_CFG);
+	msm_io_w(0xFFFFFFFF, vfe32_ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN);
+	msm_io_w(0xFFFFFFFF,
+		vfe32_ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_PATTERN);
+	msm_io_w(0x1f, vfe32_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y);
+	msm_io_w(0x1f, vfe32_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR);
+	msm_io_w(0xFFFFFFFF,
+		vfe32_ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN);
+	msm_io_w(0xFFFFFFFF,
+		vfe32_ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_PATTERN);
+	msm_io_w(0, vfe32_ctrl->vfebase + VFE_CLAMP_MIN);
+	msm_io_w(0xFFFFFF, vfe32_ctrl->vfebase + VFE_CLAMP_MAX);
+
+	/* stats UB config */
+	msm_io_w(0x3980007, vfe32_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
+	msm_io_w(0x3A00007, vfe32_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
+	msm_io_w(0x3A8000F, vfe32_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
+	msm_io_w(0x3B80007, vfe32_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
+	msm_io_w(0x3C0001F, vfe32_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
+	msm_io_w(0x3E0001F, vfe32_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+}
+
+static void vfe32_process_reset_irq(void)
+{
+	unsigned long flags;
+
+	atomic_set(&vfe32_ctrl->vstate, 0);
+
+	spin_lock_irqsave(&vfe32_ctrl->stop_flag_lock, flags);
+	if (vfe32_ctrl->stop_ack_pending) {
+		vfe32_ctrl->stop_ack_pending = FALSE;
+		spin_unlock_irqrestore(&vfe32_ctrl->stop_flag_lock, flags);
+		vfe32_send_msg_no_payload(MSG_ID_STOP_ACK);
+	} else {
+		spin_unlock_irqrestore(&vfe32_ctrl->stop_flag_lock, flags);
+		/* this is from reset command. */
+		vfe32_set_default_reg_values();
+
+		/* reload all write masters. (frame & line)*/
+		msm_io_w(0x7FFF, vfe32_ctrl->vfebase + VFE_BUS_CMD);
+		vfe32_send_msg_no_payload(MSG_ID_RESET_ACK);
+	}
+}
+
+static void vfe32_process_camif_sof_irq(void)
+{
+	uint32_t  temp;
+
+	/* in raw snapshot mode */
+	if (vfe32_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) {
+		if (vfe32_ctrl->start_ack_pending) {
+			vfe32_send_msg_no_payload(MSG_ID_START_ACK);
+			vfe32_ctrl->start_ack_pending = FALSE;
+		}
+		vfe32_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe32_ctrl->vfe_capture_count == 0) {
+			/* Ensure the write order while writing
+			to the command register using the barrier */
+			msm_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe32_ctrl->vfebase + VFE_CAMIF_COMMAND);
+			temp = msm_io_r_mb(vfe32_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+		}
+	} /* if raw snapshot mode. */
+	vfe32_send_msg_no_payload(MSG_ID_SOF_ACK);
+	vfe32_ctrl->vfeFrameId++;
+	CDBG("camif_sof_irq, frameId = %d\n", vfe32_ctrl->vfeFrameId);
+
+	if (vfe32_ctrl->sync_timer_state) {
+		if (vfe32_ctrl->sync_timer_repeat_count == 0)
+			vfe32_sync_timer_stop();
+		else
+			vfe32_ctrl->sync_timer_repeat_count--;
+	}
+}
+
+static void vfe32_process_error_irq(uint32_t errStatus)
+{
+	uint32_t camifStatus;
+	uint32_t *temp;
+
+	if (errStatus & VFE32_IMASK_CAMIF_ERROR) {
+		pr_err("vfe32_irq: camif errors\n");
+		temp = (uint32_t *)(vfe32_ctrl->vfebase + VFE_CAMIF_STATUS);
+		camifStatus = msm_io_r(temp);
+		pr_err("camifStatus  = 0x%x\n", camifStatus);
+		vfe32_send_msg_no_payload(MSG_ID_CAMIF_ERROR);
+	}
+
+	if (errStatus & VFE32_IMASK_BHIST_OVWR)
+		pr_err("vfe32_irq: stats bhist overwrite\n");
+
+	if (errStatus & VFE32_IMASK_STATS_CS_OVWR)
+		pr_err("vfe32_irq: stats cs overwrite\n");
+
+	if (errStatus & VFE32_IMASK_STATS_IHIST_OVWR)
+		pr_err("vfe32_irq: stats ihist overwrite\n");
+
+	if (errStatus & VFE32_IMASK_REALIGN_BUF_Y_OVFL)
+		pr_err("vfe32_irq: realign bug Y overflow\n");
+
+	if (errStatus & VFE32_IMASK_REALIGN_BUF_CB_OVFL)
+		pr_err("vfe32_irq: realign bug CB overflow\n");
+
+	if (errStatus & VFE32_IMASK_REALIGN_BUF_CR_OVFL)
+		pr_err("vfe32_irq: realign bug CR overflow\n");
+
+	if (errStatus & VFE32_IMASK_VIOLATION)
+		pr_err("vfe32_irq: violation interrupt\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_0_BUS_OVFL)
+		pr_err("vfe32_irq: image master 0 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_1_BUS_OVFL)
+		pr_err("vfe32_irq: image master 1 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_2_BUS_OVFL)
+		pr_err("vfe32_irq: image master 2 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_3_BUS_OVFL)
+		pr_err("vfe32_irq: image master 3 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_4_BUS_OVFL)
+		pr_err("vfe32_irq: image master 4 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_5_BUS_OVFL)
+		pr_err("vfe32_irq: image master 5 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_IMG_MAST_6_BUS_OVFL)
+		pr_err("vfe32_irq: image master 6 bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_AE_BG_BUS_OVFL)
+		pr_err("vfe32_irq: ae/bg stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_AF_BF_BUS_OVFL)
+		pr_err("vfe32_irq: af/bf stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_AWB_BUS_OVFL)
+		pr_err("vfe32_irq: awb stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_RS_BUS_OVFL)
+		pr_err("vfe32_irq: rs stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_CS_BUS_OVFL)
+		pr_err("vfe32_irq: cs stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_IHIST_BUS_OVFL)
+		pr_err("vfe32_irq: ihist stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_STATS_SKIN_BHIST_BUS_OVFL)
+		pr_err("vfe32_irq: skin/bhist stats bus overflow\n");
+
+	if (errStatus & VFE32_IMASK_AXI_ERROR)
+		pr_err("vfe32_irq: axi error\n");
+}
+
+#define VFE32_AXI_OFFSET 0x0050
+#define vfe32_get_ch_ping_addr(chn) \
+	(msm_io_r(vfe32_ctrl->vfebase + 0x0050 + 0x18 * (chn)))
+#define vfe32_get_ch_pong_addr(chn) \
+	(msm_io_r(vfe32_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4))
+#define vfe32_get_ch_addr(ping_pong, chn) \
+	(((ping_pong) & (1 << (chn))) == 0 ? \
+	vfe32_get_ch_pong_addr(chn) : vfe32_get_ch_ping_addr(chn))
+
+#define vfe32_put_ch_ping_addr(chn, addr) \
+	(msm_io_w((addr), vfe32_ctrl->vfebase + 0x0050 + 0x18 * (chn)))
+#define vfe32_put_ch_pong_addr(chn, addr) \
+	(msm_io_w((addr), vfe32_ctrl->vfebase + 0x0050 + 0x18 * (chn) + 4))
+#define vfe32_put_ch_addr(ping_pong, chn, addr) \
+	(((ping_pong) & (1 << (chn))) == 0 ?   \
+	vfe32_put_ch_pong_addr((chn), (addr)) : \
+	vfe32_put_ch_ping_addr((chn), (addr)))
+
+static void vfe32_process_output_path_irq_0(void)
+{
+	uint32_t ping_pong;
+	uint32_t pyaddr, pcbcraddr;
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	uint8_t out_bool = 0;
+	struct vfe32_free_buf *free_buf = NULL;
+	free_buf = vfe32_dequeue_free_buf(&vfe32_ctrl->outpath.out0);
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	2. In snapshot shot mode, free buffer is not always available.
+	when pending snapshot count is <=1,  then no need to use
+	free buffer.
+	*/
+	out_bool =
+		((vfe32_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_SNAPSHOT ||
+		vfe32_ctrl->operation_mode ==
+		VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) &&
+		(vfe32_ctrl->vfe_capture_count <= 1)) ||
+		free_buf;
+	if (out_bool) {
+		ping_pong = msm_io_r(vfe32_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Y channel */
+		pyaddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out0.ch1);
+
+		CDBG("output path 0, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+			pyaddr, pcbcraddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out0.ch0,
+			free_buf->paddr + free_buf->y_off);
+			/* Chroma channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out0.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		}
+		if (vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_SNAPSHOT) {
+			/* will add message for multi-shot. */
+			vfe32_ctrl->outpath.out0.capture_cnt--;
+			vfe_send_outmsg(MSG_ID_OUTPUT_T, pyaddr,
+				pcbcraddr);
+		} else {
+			/* always send message for continous mode. */
+			/* if continuous mode, for display. (preview) */
+			vfe_send_outmsg(MSG_ID_OUTPUT_P, pyaddr,
+				pcbcraddr);
+		}
+	} else {
+		vfe32_ctrl->outpath.out0.frame_drop_cnt++;
+		pr_warning("path_irq_0 - no free buffer!\n");
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out0.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out0.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out0.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+						(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out0.ch0,
+							pyaddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out0.ch0,
+							pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out0.ch1,
+						pcbcraddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out0.ch1,
+						pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+}
+
+static void vfe32_process_output_path_irq_1(void)
+{
+	uint32_t ping_pong;
+	uint32_t pyaddr, pcbcraddr;
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	/* this must be snapshot main image output. */
+	uint8_t out_bool = 0;
+	struct vfe32_free_buf *free_buf = NULL;
+	free_buf = vfe32_dequeue_free_buf(&vfe32_ctrl->outpath.out1);
+
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	2. In snapshot shot mode, free buffer is not always available.
+	-- when pending snapshot count is <=1,  then no need to use
+	free buffer.
+	*/
+	out_bool =
+		((vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_SNAPSHOT ||
+			vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) &&
+		 (vfe32_ctrl->vfe_capture_count <= 1)) || free_buf;
+	if (out_bool) {
+		ping_pong = msm_io_r(vfe32_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Y channel */
+		pyaddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		pcbcraddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out1.ch1);
+
+		CDBG("snapshot main, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+			pyaddr, pcbcraddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out1.ch0,
+			free_buf->paddr + free_buf->y_off);
+			/* Chroma channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out1.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		}
+		if (vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_SNAPSHOT ||
+			vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) {
+			vfe32_ctrl->outpath.out1.capture_cnt--;
+			vfe_send_outmsg(MSG_ID_OUTPUT_S, pyaddr,
+				pcbcraddr);
+		}
+	} else {
+		vfe32_ctrl->outpath.out1.frame_drop_cnt++;
+		pr_warning("path_irq_1 - no free buffer!\n");
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out1.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out1.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+						(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out1.ch0,
+							pyaddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out1.ch0,
+							pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out1.ch1,
+						pcbcraddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out1.ch1,
+						pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+}
+
+static void vfe32_process_output_path_irq_2(void)
+{
+	uint32_t ping_pong;
+	uint32_t pyaddr, pcbcraddr;
+#ifdef CONFIG_MSM_CAMERA_V4L2
+	uint32_t pyaddr_ping, pcbcraddr_ping, pyaddr_pong, pcbcraddr_pong;
+#endif
+	uint8_t out_bool = 0;
+	struct vfe32_free_buf *free_buf = NULL;
+	free_buf = vfe32_dequeue_free_buf(&vfe32_ctrl->outpath.out2);
+
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	2. In snapshot shot mode, free buffer is not always available.
+	-- when pending snapshot count is <=1,  then no need to use
+	free buffer.
+	*/
+	out_bool =
+		((vfe32_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_SNAPSHOT) &&
+		(vfe32_ctrl->vfe_capture_count <= 1)) || free_buf;
+
+	CDBG("%s: op mode = %d, capture_cnt = %d\n", __func__,
+		 vfe32_ctrl->operation_mode, vfe32_ctrl->vfe_capture_count);
+
+	if (out_bool) {
+		ping_pong = msm_io_r(vfe32_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Y channel */
+		pyaddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr = vfe32_get_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out2.ch1);
+
+		CDBG("video output, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+			pyaddr, pcbcraddr);
+
+		if (free_buf) {
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out2.ch0,
+			free_buf->paddr + free_buf->y_off);
+			/* Chroma channel */
+			vfe32_put_ch_addr(ping_pong,
+			vfe32_ctrl->outpath.out2.ch1,
+			free_buf->paddr + free_buf->cbcr_off);
+		}
+		vfe_send_outmsg(MSG_ID_OUTPUT_V, pyaddr, pcbcraddr);
+	} else {
+		vfe32_ctrl->outpath.out2.frame_drop_cnt++;
+		pr_warning("path_irq_2 - no free buffer!\n");
+#ifdef CONFIG_MSM_CAMERA_V4L2
+		pr_info("Swapping ping and pong\n");
+
+		/*get addresses*/
+		/* Y channel */
+		pyaddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr_ping = vfe32_get_ch_ping_addr(
+			vfe32_ctrl->outpath.out2.ch1);
+		/* Y channel */
+		pyaddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out2.ch0);
+		/* Chroma channel */
+		pcbcraddr_pong = vfe32_get_ch_pong_addr(
+			vfe32_ctrl->outpath.out2.ch1);
+
+		CDBG("ping = 0x%p, pong = 0x%p\n", (void *)pyaddr_ping,
+						(void *)pyaddr_pong);
+		CDBG("ping_cbcr = 0x%p, pong_cbcr = 0x%p\n",
+			(void *)pcbcraddr_ping, (void *)pcbcraddr_pong);
+
+		/*put addresses*/
+		/* SWAP y channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out2.ch0,
+							pyaddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out2.ch0,
+							pyaddr_ping);
+		/* SWAP chroma channel*/
+		vfe32_put_ch_ping_addr(vfe32_ctrl->outpath.out2.ch1,
+						pcbcraddr_pong);
+		vfe32_put_ch_pong_addr(vfe32_ctrl->outpath.out2.ch1,
+						pcbcraddr_ping);
+		CDBG("after swap: ping = 0x%p, pong = 0x%p\n",
+			(void *)pyaddr_pong, (void *)pyaddr_ping);
+#endif
+	}
+}
+
+static void vfe32_process_stats_comb_irq(uint32_t *irqstatus)
+{
+	return;
+}
+
+static uint32_t  vfe32_process_stats_irq_common(uint32_t statsNum,
+						uint32_t newAddr) {
+
+	uint32_t pingpongStatus;
+	uint32_t returnAddr;
+	uint32_t pingpongAddr;
+
+	/* must be 0=ping, 1=pong */
+	pingpongStatus =
+		((msm_io_r(vfe32_ctrl->vfebase +
+		VFE_BUS_PING_PONG_STATUS))
+	& ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7);
+	/* stats bits starts at 7 */
+	CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus);
+	pingpongAddr =
+		((uint32_t)(vfe32_ctrl->vfebase +
+				VFE_BUS_STATS_PING_PONG_BASE)) +
+				(3*statsNum)*4 + (1-pingpongStatus)*4;
+	returnAddr = msm_io_r((uint32_t *)pingpongAddr);
+	msm_io_w(newAddr, (uint32_t *)pingpongAddr);
+	return returnAddr;
+}
+
+static void
+vfe_send_stats_msg(uint32_t bufAddress, uint32_t statsNum)
+{
+	unsigned long flags;
+	struct  vfe_message msg;
+	/* fill message with right content. */
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	msg._u.msgStats.frameCounter = vfe32_ctrl->vfeFrameId;
+	msg._u.msgStats.buffer = bufAddress;
+
+	switch (statsNum) {
+	case statsAeNum:{
+		msg._d = MSG_ID_STATS_AEC;
+		spin_lock_irqsave(&vfe32_ctrl->aec_ack_lock, flags);
+		vfe32_ctrl->aecStatsControl.ackPending = TRUE;
+		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+		}
+		break;
+	case statsAfNum:{
+		msg._d = MSG_ID_STATS_AF;
+		spin_lock_irqsave(&vfe32_ctrl->af_ack_lock, flags);
+		vfe32_ctrl->afStatsControl.ackPending = TRUE;
+		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+		}
+		break;
+	case statsAwbNum: {
+		msg._d = MSG_ID_STATS_AWB;
+		spin_lock_irqsave(&vfe32_ctrl->awb_ack_lock, flags);
+		vfe32_ctrl->awbStatsControl.ackPending = TRUE;
+		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+		}
+		break;
+
+	case statsIhistNum: {
+		msg._d = MSG_ID_STATS_IHIST;
+		vfe32_ctrl->ihistStatsControl.ackPending = TRUE;
+		}
+		break;
+	case statsRsNum: {
+		msg._d = MSG_ID_STATS_RS;
+		vfe32_ctrl->rsStatsControl.ackPending = TRUE;
+		}
+		break;
+	case statsCsNum: {
+		msg._d = MSG_ID_STATS_CS;
+		vfe32_ctrl->csStatsControl.ackPending = TRUE;
+		}
+		break;
+
+	default:
+		goto stats_done;
+	}
+
+	vfe32_proc_ops(msg._d,
+		&msg, sizeof(struct vfe_message));
+stats_done:
+	/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
+	return;
+}
+
+static void vfe32_process_stats_ae_irq(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->aec_ack_lock, flags);
+	if (!(vfe32_ctrl->aecStatsControl.ackPending)) {
+		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+		vfe32_ctrl->aecStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsAeNum,
+			vfe32_ctrl->aecStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->aecStatsControl.bufToRender,
+						statsAeNum);
+	} else{
+		spin_unlock_irqrestore(&vfe32_ctrl->aec_ack_lock, flags);
+		vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+	}
+}
+
+static void vfe32_process_stats_awb_irq(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->awb_ack_lock, flags);
+	if (!(vfe32_ctrl->awbStatsControl.ackPending)) {
+		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+		vfe32_ctrl->awbStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsAwbNum,
+			vfe32_ctrl->awbStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->awbStatsControl.bufToRender,
+						statsAwbNum);
+	} else{
+		spin_unlock_irqrestore(&vfe32_ctrl->awb_ack_lock, flags);
+		vfe32_ctrl->awbStatsControl.droppedStatsFrameCount++;
+	}
+}
+
+static void vfe32_process_stats_af_irq(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->af_ack_lock, flags);
+	if (!(vfe32_ctrl->afStatsControl.ackPending)) {
+		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+		vfe32_ctrl->afStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsAfNum,
+			vfe32_ctrl->afStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->afStatsControl.bufToRender,
+						statsAfNum);
+	} else{
+		spin_unlock_irqrestore(&vfe32_ctrl->af_ack_lock, flags);
+		vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+	}
+}
+
+static void vfe32_process_stats_ihist_irq(void)
+{
+	if (!(vfe32_ctrl->ihistStatsControl.ackPending)) {
+		vfe32_ctrl->ihistStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsIhistNum,
+			vfe32_ctrl->ihistStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->ihistStatsControl.bufToRender,
+						statsIhistNum);
+	} else
+		vfe32_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+}
+
+static void vfe32_process_stats_rs_irq(void)
+{
+	if (!(vfe32_ctrl->rsStatsControl.ackPending)) {
+		vfe32_ctrl->rsStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsRsNum,
+			vfe32_ctrl->rsStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->rsStatsControl.bufToRender,
+						statsRsNum);
+	} else
+		vfe32_ctrl->rsStatsControl.droppedStatsFrameCount++;
+}
+
+static void vfe32_process_stats_cs_irq(void)
+{
+	if (!(vfe32_ctrl->csStatsControl.ackPending)) {
+		vfe32_ctrl->csStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(statsCsNum,
+			vfe32_ctrl->csStatsControl.nextFrameAddrBuf);
+
+		vfe_send_stats_msg(vfe32_ctrl->csStatsControl.bufToRender,
+						statsCsNum);
+	} else
+		vfe32_ctrl->csStatsControl.droppedStatsFrameCount++;
+}
+
+static void vfe32_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+
+	struct vfe32_isr_queue_cmd *qcmd = NULL;
+
+	CDBG("=== vfe32_do_tasklet start ===\n");
+
+	while (atomic_read(&irq_cnt)) {
+		spin_lock_irqsave(&vfe32_ctrl->tasklet_lock, flags);
+		qcmd = list_first_entry(&vfe32_ctrl->tasklet_q,
+			struct vfe32_isr_queue_cmd, list);
+		atomic_sub(1, &irq_cnt);
+
+		if (!qcmd) {
+			spin_unlock_irqrestore(&vfe32_ctrl->tasklet_lock,
+				flags);
+			return;
+		}
+
+		list_del(&qcmd->list);
+		spin_unlock_irqrestore(&vfe32_ctrl->tasklet_lock,
+			flags);
+
+		/* interrupt to be processed,  *qcmd has the payload.  */
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_REG_UPDATE_MASK) {
+			CDBG("irq	regUpdateIrq\n");
+			vfe32_process_reg_update_irq();
+		}
+
+		if (qcmd->vfeInterruptStatus1 &
+				VFE_IMASK_WHILE_STOPPING_1) {
+			CDBG("irq	resetAckIrq\n");
+			vfe32_process_reset_irq();
+		}
+
+		if (atomic_read(&vfe32_ctrl->vstate)) {
+			if (qcmd->vfeInterruptStatus1 &
+					VFE32_IMASK_ERROR_ONLY_1) {
+				pr_err("irq	errorIrq\n");
+				vfe32_process_error_irq(
+					qcmd->vfeInterruptStatus1 &
+					VFE32_IMASK_ERROR_ONLY_1);
+			}
+			/* next, check output path related interrupts. */
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) {
+				CDBG("Image composite done 0 irq occured.\n");
+				vfe32_process_output_path_irq_0();
+			}
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) {
+				CDBG("Image composite done 1 irq occured.\n");
+				vfe32_process_output_path_irq_1();
+			}
+			if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK) {
+				CDBG("Image composite done 2 irq occured.\n");
+				vfe32_process_output_path_irq_2();
+			}
+			/* in snapshot mode if done then send
+			snapshot done message */
+			if (vfe32_ctrl->operation_mode ==
+				VFE_MODE_OF_OPERATION_SNAPSHOT ||
+				vfe32_ctrl->operation_mode ==
+				VFE_MODE_OF_OPERATION_RAW_SNAPSHOT) {
+				if ((vfe32_ctrl->outpath.out0.capture_cnt == 0)
+						&& (vfe32_ctrl->outpath.out1.
+						capture_cnt == 0)) {
+					vfe32_send_msg_no_payload(
+						MSG_ID_SNAPSHOT_DONE);
+
+					/* Ensure the write order while writing
+					to the cmd register using barrier */
+					msm_io_w_mb(
+						CAMIF_COMMAND_STOP_IMMEDIATELY,
+						vfe32_ctrl->vfebase +
+						VFE_CAMIF_COMMAND);
+				}
+			}
+			/* then process stats irq. */
+			if (vfe32_ctrl->stats_comp) {
+				/* process stats comb interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK) {
+					CDBG("Stats composite irq occured.\n");
+					vfe32_process_stats_comb_irq(
+						&qcmd->vfeInterruptStatus0);
+				}
+			} else {
+				/* process individual stats interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_AEC) {
+					CDBG("Stats AEC irq occured.\n");
+					vfe32_process_stats_ae_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_AWB) {
+					CDBG("Stats AWB irq occured.\n");
+					vfe32_process_stats_awb_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_AF) {
+					CDBG("Stats AF irq occured.\n");
+					vfe32_process_stats_af_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_IHIST) {
+					CDBG("Stats IHIST irq occured.\n");
+					vfe32_process_stats_ihist_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_RS) {
+					CDBG("Stats RS irq occured.\n");
+					vfe32_process_stats_rs_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_CS) {
+					CDBG("Stats CS irq occured.\n");
+					vfe32_process_stats_cs_irq();
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER0) {
+					CDBG("SYNC_TIMER 0 irq occured.\n");
+					vfe32_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER0_DONE);
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER1) {
+					CDBG("SYNC_TIMER 1 irq occured.\n");
+					vfe32_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER1_DONE);
+				}
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_SYNC_TIMER2) {
+					CDBG("SYNC_TIMER 2 irq occured.\n");
+					vfe32_send_msg_no_payload(
+						MSG_ID_SYNC_TIMER2_DONE);
+				}
+			}
+		}
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_CAMIF_SOF_MASK) {
+			CDBG("irq	camifSofIrq\n");
+			vfe32_process_camif_sof_irq();
+		}
+		kfree(qcmd);
+	}
+	CDBG("=== vfe32_do_tasklet end ===\n");
+}
+
+DECLARE_TASKLET(vfe32_tasklet, vfe32_do_tasklet, 0);
+
+static irqreturn_t vfe32_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct vfe32_irq_status irq;
+	struct vfe32_isr_queue_cmd *qcmd;
+
+	CDBG("vfe_parse_irq\n");
+
+	vfe32_read_irq_status(&irq);
+
+	if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) {
+		CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n");
+		return IRQ_HANDLED;
+	}
+
+	qcmd = kzalloc(sizeof(struct vfe32_isr_queue_cmd),
+		GFP_ATOMIC);
+	if (!qcmd) {
+		pr_err("vfe_parse_irq: qcmd malloc failed!\n");
+		return IRQ_HANDLED;
+	}
+
+	spin_lock_irqsave(&vfe32_ctrl->stop_flag_lock, flags);
+	if (vfe32_ctrl->stop_ack_pending) {
+		irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0;
+		irq.vfeIrqStatus1 &= VFE_IMASK_WHILE_STOPPING_1;
+	}
+	spin_unlock_irqrestore(&vfe32_ctrl->stop_flag_lock, flags);
+
+	CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n",
+		irq.vfeIrqStatus0, irq.vfeIrqStatus1);
+
+	qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0;
+	qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1;
+
+	spin_lock_irqsave(&vfe32_ctrl->tasklet_lock, flags);
+	list_add_tail(&qcmd->list, &vfe32_ctrl->tasklet_q);
+
+	atomic_add(1, &irq_cnt);
+	spin_unlock_irqrestore(&vfe32_ctrl->tasklet_lock, flags);
+	tasklet_schedule(&vfe32_tasklet);
+	return IRQ_HANDLED;
+}
+
+static int vfe32_resource_init(struct platform_device *pdev, void *sdata)
+{
+	struct resource	*vfemem, *vfeirq, *vfeio;
+	int rc;
+	struct msm_camera_sensor_info *s_info;
+	s_info = pdev->dev.platform_data;
+
+	pdev->resource = s_info->resource;
+	pdev->num_resources = s_info->num_resources;
+
+	vfemem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!vfemem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!vfeirq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeio = request_mem_region(vfemem->start,
+		resource_size(vfemem), pdev->name);
+	if (!vfeio) {
+		pr_err("%s: VFE region already claimed\n", __func__);
+		return -EBUSY;
+	}
+
+	vfe32_ctrl = kzalloc(sizeof(struct vfe32_ctrl_type), GFP_KERNEL);
+	if (!vfe32_ctrl) {
+		rc = -ENOMEM;
+		goto cmd_init_failed1;
+	}
+
+	vfe32_ctrl->vfeirq = vfeirq->start;
+
+	vfe32_ctrl->vfebase =
+		ioremap(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	if (!vfe32_ctrl->vfebase) {
+		rc = -ENOMEM;
+		pr_err("%s: vfe ioremap failed\n", __func__);
+		goto cmd_init_failed2;
+	}
+
+	vfe32_ctrl->extdata =
+		kmalloc(sizeof(struct vfe32_frame_extra), GFP_KERNEL);
+	if (!vfe32_ctrl->extdata) {
+		rc = -ENOMEM;
+		goto cmd_init_failed3;
+	}
+
+	vfe32_ctrl->extlen = sizeof(struct vfe32_frame_extra);
+
+	spin_lock_init(&vfe32_ctrl->stop_flag_lock);
+	spin_lock_init(&vfe32_ctrl->state_lock);
+	spin_lock_init(&vfe32_ctrl->io_lock);
+	spin_lock_init(&vfe32_ctrl->update_ack_lock);
+	spin_lock_init(&vfe32_ctrl->tasklet_lock);
+
+	spin_lock_init(&vfe32_ctrl->aec_ack_lock);
+	spin_lock_init(&vfe32_ctrl->awb_ack_lock);
+	spin_lock_init(&vfe32_ctrl->af_ack_lock);
+	INIT_LIST_HEAD(&vfe32_ctrl->tasklet_q);
+	vfe32_init_free_buf_queues();
+
+	vfe32_ctrl->syncdata = sdata;
+	vfe32_ctrl->vfemem = vfemem;
+	vfe32_ctrl->vfeio  = vfeio;
+	return 0;
+
+cmd_init_failed3:
+	free_irq(vfe32_ctrl->vfeirq, 0);
+	iounmap(vfe32_ctrl->vfebase);
+cmd_init_failed2:
+	kfree(vfe32_ctrl);
+cmd_init_failed1:
+	release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	return rc;
+}
+
+static long msm_vfe_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int subdev_cmd, void *arg)
+{
+	struct msm_vfe32_cmd vfecmd;
+	struct msm_camvfe_params *vfe_params =
+		(struct msm_camvfe_params *)arg;
+	struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+	void *data = vfe_params->data;
+
+	long rc = 0;
+	uint32_t i = 0;
+	struct vfe_cmd_stats_buf *scfg = NULL;
+	struct msm_pmem_region   *regptr = NULL;
+	struct vfe_cmd_stats_ack *sack = NULL;
+	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+		if (copy_from_user(&vfecmd,
+				(void __user *)(cmd->value),
+				sizeof(vfecmd))) {
+			pr_err("%s %d: copy_from_user failed\n", __func__,
+				__LINE__);
+			return -EFAULT;
+		}
+	} else {
+	/* here eith stats release or frame release. */
+		if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE) {
+			/* then must be stats release. */
+			if (!data)
+				return -EFAULT;
+			sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+							GFP_ATOMIC);
+			if (!sack)
+				return -ENOMEM;
+
+			sack->nextStatsBuf = *(uint32_t *)data;
+		}
+	}
+
+	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+	if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
+		(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+		struct axidata *axid;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto vfe32_config_done;
+		}
+
+		scfg =
+			kmalloc(sizeof(struct vfe_cmd_stats_buf),
+				GFP_ATOMIC);
+		if (!scfg) {
+			rc = -ENOMEM;
+			goto vfe32_config_done;
+		}
+		regptr = axid->region;
+		if (axid->bufnum1 > 0) {
+			for (i = 0; i < axid->bufnum1; i++) {
+				scfg->statsBuf[i] =
+					(uint32_t)(regptr->paddr);
+				regptr++;
+			}
+		}
+		/* individual */
+		switch (cmd->cmd_type) {
+		case CMD_STATS_AEC_ENABLE:
+			rc = vfe_stats_aec_buf_init(scfg);
+			break;
+		case CMD_STATS_AF_ENABLE:
+			rc = vfe_stats_af_buf_init(scfg);
+			break;
+		case CMD_STATS_AWB_ENABLE:
+			rc = vfe_stats_awb_buf_init(scfg);
+			break;
+		case CMD_STATS_IHIST_ENABLE:
+			rc = vfe_stats_ihist_buf_init(scfg);
+			break;
+		case CMD_STATS_RS_ENABLE:
+			rc = vfe_stats_rs_buf_init(scfg);
+			break;
+		case CMD_STATS_CS_ENABLE:
+			rc = vfe_stats_cs_buf_init(scfg);
+			break;
+		}
+	}
+	switch (cmd->cmd_type) {
+	case CMD_GENERAL:
+		rc = vfe32_proc_general(&vfecmd);
+		break;
+	case CMD_FRAME_BUF_RELEASE: {
+		struct msm_frame *b;
+		unsigned long p;
+		struct vfe32_output_ch *outch = NULL;
+		if (!data) {
+			rc = -EFAULT;
+			break;
+		}
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+		CDBG("CMD_FRAME_BUF_RELEASE b->path = %d\n", b->path);
+
+		if ((b->path & OUTPUT_TYPE_P) || (b->path & OUTPUT_TYPE_T)) {
+			CDBG("CMD_FRAME_BUF_RELEASE got free buffer\n");
+			outch = &vfe32_ctrl->outpath.out0;
+		} else if (b->path & OUTPUT_TYPE_S) {
+			outch = &vfe32_ctrl->outpath.out1;
+		} else if (b->path & OUTPUT_TYPE_V) {
+			outch = &vfe32_ctrl->outpath.out2;
+		} else {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = vfe32_enqueue_free_buf(outch, p, b->y_off, b->cbcr_off);
+	}
+		break;
+
+	case CMD_SNAP_BUF_RELEASE:
+		break;
+	case CMD_STATS_AEC_BUF_RELEASE:
+		vfe32_stats_aec_ack(sack);
+		break;
+	case CMD_STATS_AF_BUF_RELEASE:
+		vfe32_stats_af_ack(sack);
+		break;
+	case CMD_STATS_AWB_BUF_RELEASE:
+		vfe32_stats_awb_ack(sack);
+		break;
+
+	case CMD_STATS_IHIST_BUF_RELEASE:
+		vfe32_stats_ihist_ack(sack);
+		break;
+	case CMD_STATS_RS_BUF_RELEASE:
+		vfe32_stats_rs_ack(sack);
+		break;
+	case CMD_STATS_CS_BUF_RELEASE:
+		vfe32_stats_cs_ack(sack);
+		break;
+
+	case CMD_AXI_CFG_PREVIEW: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+		axio =
+			kmalloc(vfe32_cmd[V32_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[V32_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(OUTPUT_2, axid, axio);
+		kfree(axio);
+	}
+		break;
+
+	case CMD_RAW_PICT_AXI_CFG: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+		axio = kmalloc(vfe32_cmd[V32_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[V32_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(CAMIF_TO_AXI_VIA_OUTPUT_2, axid, axio);
+		kfree(axio);
+	}
+		break;
+
+	case CMD_AXI_CFG_SNAP: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid)
+			return -EFAULT;
+		axio =
+			kmalloc(vfe32_cmd[V32_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[V32_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(OUTPUT_1_AND_2, axid, axio);
+		kfree(axio);
+	}
+		break;
+
+	case CMD_AXI_CFG_VIDEO: {
+		struct axidata *axid;
+		uint32_t *axio = NULL;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			break;
+		}
+
+		axio = kmalloc(vfe32_cmd[V32_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[V32_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(OUTPUT_1_AND_3, axid, axio);
+		kfree(axio);
+	}
+		break;
+	default:
+		break;
+	}
+vfe32_config_done:
+	kfree(scfg);
+	kfree(sack);
+	CDBG("%s done: rc = %d\n", __func__, (int) rc);
+	return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_vfe_subdev_core_ops = {
+	.ioctl = msm_vfe_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_vfe_subdev_ops = {
+	.core = &msm_vfe_subdev_core_ops,
+};
+
+int msm_vfe_subdev_init(struct v4l2_subdev *sd, void *data,
+	struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data;
+	struct msm_camera_device_platform_data *camdev = sinfo->pdata;
+
+	v4l2_subdev_init(sd, &msm_vfe_subdev_ops);
+	v4l2_set_subdev_hostdata(sd, data);
+	snprintf(sd->name, sizeof(sd->name), "vfe3.2");
+
+	vfe_syncdata = data;
+
+	camio_clk = camdev->ioclk;
+
+	rc = vfe32_resource_init(pdev, vfe_syncdata);
+	if (rc < 0)
+		return rc;
+
+	vfe32_ctrl->subdev = sd;
+	/* Bring up all the required GPIOs and Clocks */
+	rc = msm_camio_enable(pdev);
+	msm_camio_set_perf_lvl(S_INIT);
+	msm_camio_set_perf_lvl(S_PREVIEW);
+
+	/* TO DO: Need to release the VFE resources */
+	rc = request_irq(vfe32_ctrl->vfeirq, vfe32_parse_irq,
+			IRQF_TRIGGER_RISING, "vfe", 0);
+
+	return rc;
+}
+
+void msm_vfe_subdev_release(struct platform_device *pdev)
+{
+	struct resource	*vfemem, *vfeio;
+
+	vfe32_reset_free_buf_queues();
+	CDBG("%s, free_irq\n", __func__);
+	free_irq(vfe32_ctrl->vfeirq, 0);
+	tasklet_kill(&vfe32_tasklet);
+
+	if (atomic_read(&irq_cnt))
+		pr_warning("%s, Warning IRQ Count not ZERO\n", __func__);
+
+	vfemem = vfe32_ctrl->vfemem;
+	vfeio  = vfe32_ctrl->vfeio;
+
+	kfree(vfe32_ctrl->extdata);
+	iounmap(vfe32_ctrl->vfebase);
+	kfree(vfe32_ctrl);
+	vfe32_ctrl = NULL;
+	release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	CDBG("%s, msm_camio_disable\n", __func__);
+	msm_camio_disable(pdev);
+	msm_camio_set_perf_lvl(S_EXIT);
+
+	vfe_syncdata = NULL;
+}
+
+void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data)
+{
+	fptr->vpe_reg		= msm_vpe_reg;
+	fptr->send_frame_to_vpe	= msm_send_frame_to_vpe;
+	fptr->vpe_config	= msm_vpe_config;
+	fptr->vpe_cfg_update	= msm_vpe_cfg_update;
+	fptr->dis		= &(vpe_ctrl->dis_en);
+	vpe_ctrl->syncdata = data;
+}
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
new file mode 100644
index 0000000..4d48c6b
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -0,0 +1,1104 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE32_H__
+#define __MSM_VFE32_H__
+
+#define TRUE  1
+#define FALSE 0
+
+/* at start of camif,  bit 1:0 = 0x01:enable
+ * image data capture at frame boundary. */
+#define CAMIF_COMMAND_START  0x00000005
+
+/* bit 2= 0x1:clear the CAMIF_STATUS register
+ * value. */
+#define CAMIF_COMMAND_CLEAR  0x00000004
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY  0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY  0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT  0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR  0x00000000
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-32 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD  0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-32 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD  0x000003ff
+
+/* bit 5 is for axi status idle or busy.
+ * 1 =  halted,  0 = busy */
+#define AXI_STATUS_BUSY_MASK 0x00000020
+
+/* bit 0 & bit 1 = 1, both y and cbcr irqs need to be present
+ * for frame done interrupt */
+#define VFE_COMP_IRQ_BOTH_Y_CBCR 3
+
+/* bit 1 = 1, only cbcr irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_CBCR_ONLY 2
+
+/* bit 0 = 1, only y irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_Y_ONLY 1
+
+/* bit 0 = 1, PM go;   bit1 = 1, PM stop */
+#define VFE_PERFORMANCE_MONITOR_GO   0x00000001
+#define VFE_PERFORMANCE_MONITOR_STOP 0x00000002
+
+/* bit 0 = 1, test gen go;   bit1 = 1, test gen stop */
+#define VFE_TEST_GEN_GO   0x00000001
+#define VFE_TEST_GEN_STOP 0x00000002
+
+/* the chroma is assumed to be interpolated between
+ * the luma samples.  JPEG 4:2:2 */
+#define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
+#define VFE_CLEAR_ALL_IRQS   0xffffffff
+
+#define VFE_IRQ_STATUS0_CAMIF_SOF_MASK            0x00000001
+#define VFE_IRQ_STATUS0_REG_UPDATE_MASK           0x00000020
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK 0x00200000
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK 0x00400000
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK 0x00800000
+#define VFE_IRQ_STATUS1_RESET_AXI_HALT_ACK_MASK   0x00800000
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK       0x01000000
+
+#define VFE_IRQ_STATUS0_STATS_AEC     0x2000  /* bit 13 */
+#define VFE_IRQ_STATUS0_STATS_AF      0x4000  /* bit 14 */
+#define VFE_IRQ_STATUS0_STATS_AWB     0x8000  /* bit 15 */
+#define VFE_IRQ_STATUS0_STATS_RS      0x10000  /* bit 16 */
+#define VFE_IRQ_STATUS0_STATS_CS      0x20000  /* bit 17 */
+#define VFE_IRQ_STATUS0_STATS_IHIST   0x40000  /* bit 18 */
+
+#define VFE_IRQ_STATUS0_SYNC_TIMER0   0x2000000  /* bit 25 */
+#define VFE_IRQ_STATUS0_SYNC_TIMER1   0x4000000  /* bit 26 */
+#define VFE_IRQ_STATUS0_SYNC_TIMER2   0x8000000  /* bit 27 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER0  0x10000000  /* bit 28 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER1  0x20000000  /* bit 29 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER2  0x40000000  /* bit 30 */
+#define VFE_IRQ_STATUS0_ASYNC_TIMER3  0x80000000  /* bit 32 */
+
+/* imask for while waiting for stop ack,  driver has already
+ * requested stop, waiting for reset irq, and async timer irq.
+ * For irq_status_0, bit 28-32 are for async timer. For
+ * irq_status_1, bit 22 for reset irq, bit 23 for axi_halt_ack
+   irq */
+#define VFE_IMASK_WHILE_STOPPING_0  0xF0000000
+#define VFE_IMASK_WHILE_STOPPING_1  0x00800000
+
+/* no error irq in mask 0 */
+#define VFE_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE_IMASK_ERROR_ONLY_1  0x003fffff
+
+/* For BPC bit 0,bit 12-17 and bit 26 -20 are set to zero and other's 1 */
+#define BPC_MASK 0xF80C0FFE
+
+/* For BPC bit 1 and 2 are set to zero and other's 1 */
+#define ABF_MASK 0xFFFFFFF9
+
+
+/* For DBPC bit 0 is set to zero and other's 1 */
+#define DBPC_MASK 0xFFFFFFFE
+
+/* For DBPC bit 1 is set to zero and other's 1 */
+#define DBCC_MASK 0xFFFFFFFD
+
+/* For MCE enable bit 28 set to zero and other's 1 */
+#define MCE_EN_MASK 0xEFFFFFFF
+
+/* For MCE Q_K bit 28 to 32 set to zero and other's 1 */
+#define MCE_Q_K_MASK 0x0FFFFFFF
+
+#define AE_BG_ENABLE_MASK 0x00000020      /* bit 5 */
+#define AF_BF_ENABLE_MASK 0x00000040      /* bit 6 */
+#define AWB_ENABLE_MASK 0x00000080     /* bit 7 */
+#define RS_ENABLE_MASK 0x00000100      /* bit 8  */
+#define CS_ENABLE_MASK 0x00000200      /* bit 9  */
+#define RS_CS_ENABLE_MASK 0x00000300   /* bit 8,9  */
+#define CLF_ENABLE_MASK 0x00002000     /* bit 13 */
+#define IHIST_ENABLE_MASK 0x00010000   /* bit 16 */
+#define STATS_ENABLE_MASK 0x000903E0   /* bit 19,16,9,8,7,6,5*/
+
+#define VFE_REG_UPDATE_TRIGGER           1
+#define VFE_PM_BUF_MAX_CNT_MASK          0xFF
+#define VFE_DMI_CFG_DEFAULT              0x00000100
+#define LENS_ROLL_OFF_DELTA_TABLE_OFFSET 32
+#define VFE_AE_PINGPONG_STATUS_BIT       0x80
+#define VFE_AF_PINGPONG_STATUS_BIT       0x100
+#define VFE_AWB_PINGPONG_STATUS_BIT      0x200
+
+
+enum VFE32_DMI_RAM_SEL {
+	NO_MEM_SELECTED          = 0,
+	BLACK_LUT_RAM_BANK0      = 0x1,
+	BLACK_LUT_RAM_BANK1      = 0x2,
+	ROLLOFF_RAM              = 0x3,
+	DEMOSAIC_LUT_RAM_BANK0   = 0x4,
+	DEMOSAIC_LUT_RAM_BANK1   = 0x5,
+	STATS_BHIST_RAM0         = 0x6,
+	STATS_BHIST_RAM1         = 0x7,
+	RGBLUT_RAM_CH0_BANK0     = 0x8,
+	RGBLUT_RAM_CH0_BANK1     = 0x9,
+	RGBLUT_RAM_CH1_BANK0     = 0xa,
+	RGBLUT_RAM_CH1_BANK1     = 0xb,
+	RGBLUT_RAM_CH2_BANK0     = 0xc,
+	RGBLUT_RAM_CH2_BANK1     = 0xd,
+	RGBLUT_CHX_BANK0         = 0xe,
+	RGBLUT_CHX_BANK1         = 0xf,
+	STATS_IHIST_RAM          = 0x10,
+	LUMA_ADAPT_LUT_RAM_BANK0 = 0x11,
+	LUMA_ADAPT_LUT_RAM_BANK1 = 0x12
+};
+
+enum  VFE_STATE {
+	VFE_STATE_IDLE,
+	VFE_STATE_ACTIVE
+};
+
+#define V32_DUMMY_0               0
+#define V32_SET_CLK               1
+#define V32_RESET                 2
+#define V32_START                 3
+#define V32_TEST_GEN_START        4
+#define V32_OPERATION_CFG         5
+#define V32_AXI_OUT_CFG           6
+#define V32_CAMIF_CFG             7
+#define V32_AXI_INPUT_CFG         8
+#define V32_BLACK_LEVEL_CFG       9
+#define V32_ROLL_OFF_CFG          10
+#define V32_DEMUX_CFG             11
+#define V32_FOV_CFG               12
+#define V32_MAIN_SCALER_CFG       13
+#define V32_WB_CFG                14
+#define V32_COLOR_COR_CFG         15
+#define V32_RGB_G_CFG             16
+#define V32_LA_CFG                17
+#define V32_CHROMA_EN_CFG         18
+#define V32_CHROMA_SUP_CFG        19
+#define V32_MCE_CFG               20
+#define V32_SK_ENHAN_CFG          21
+#define V32_ASF_CFG               22
+#define V32_S2Y_CFG               23
+#define V32_S2CbCr_CFG            24
+#define V32_CHROMA_SUBS_CFG       25
+#define V32_OUT_CLAMP_CFG         26
+#define V32_FRAME_SKIP_CFG        27
+#define V32_DUMMY_1               28
+#define V32_DUMMY_2               29
+#define V32_DUMMY_3               30
+#define V32_UPDATE                31
+#define V32_BL_LVL_UPDATE         32
+#define V32_DEMUX_UPDATE          33
+#define V32_FOV_UPDATE            34
+#define V32_MAIN_SCALER_UPDATE    35
+#define V32_WB_UPDATE             36
+#define V32_COLOR_COR_UPDATE      37
+#define V32_RGB_G_UPDATE          38
+#define V32_LA_UPDATE             39
+#define V32_CHROMA_EN_UPDATE      40
+#define V32_CHROMA_SUP_UPDATE     41
+#define V32_MCE_UPDATE            42
+#define V32_SK_ENHAN_UPDATE       43
+#define V32_S2CbCr_UPDATE         44
+#define V32_S2Y_UPDATE            45
+#define V32_ASF_UPDATE            46
+#define V32_FRAME_SKIP_UPDATE     47
+#define V32_CAMIF_FRAME_UPDATE    48
+#define V32_STATS_AF_UPDATE       49
+#define V32_STATS_AE_UPDATE       50
+#define V32_STATS_AWB_UPDATE      51
+#define V32_STATS_RS_UPDATE       52
+#define V32_STATS_CS_UPDATE       53
+#define V32_STATS_SKIN_UPDATE     54
+#define V32_STATS_IHIST_UPDATE    55
+#define V32_DUMMY_4               56
+#define V32_EPOCH1_ACK            57
+#define V32_EPOCH2_ACK            58
+#define V32_START_RECORDING       59
+#define V32_STOP_RECORDING        60
+#define V32_DUMMY_5               61
+#define V32_DUMMY_6               62
+#define V32_CAPTURE               63
+#define V32_DUMMY_7               64
+#define V32_STOP                  65
+#define V32_GET_HW_VERSION        66
+#define V32_GET_FRAME_SKIP_COUNTS 67
+#define V32_OUTPUT1_BUFFER_ENQ    68
+#define V32_OUTPUT2_BUFFER_ENQ    69
+#define V32_OUTPUT3_BUFFER_ENQ    70
+#define V32_JPEG_OUT_BUF_ENQ      71
+#define V32_RAW_OUT_BUF_ENQ       72
+#define V32_RAW_IN_BUF_ENQ        73
+#define V32_STATS_AF_ENQ          74
+#define V32_STATS_AE_ENQ          75
+#define V32_STATS_AWB_ENQ         76
+#define V32_STATS_RS_ENQ          77
+#define V32_STATS_CS_ENQ          78
+#define V32_STATS_SKIN_ENQ        79
+#define V32_STATS_IHIST_ENQ       80
+#define V32_DUMMY_8               81
+#define V32_JPEG_ENC_CFG          82
+#define V32_DUMMY_9               83
+#define V32_STATS_AF_START        84
+#define V32_STATS_AF_STOP         85
+#define V32_STATS_AE_START        86
+#define V32_STATS_AE_STOP         87
+#define V32_STATS_AWB_START       88
+#define V32_STATS_AWB_STOP        89
+#define V32_STATS_RS_START        90
+#define V32_STATS_RS_STOP         91
+#define V32_STATS_CS_START        92
+#define V32_STATS_CS_STOP         93
+#define V32_STATS_SKIN_START      94
+#define V32_STATS_SKIN_STOP       95
+#define V32_STATS_IHIST_START     96
+#define V32_STATS_IHIST_STOP      97
+#define V32_DUMMY_10              98
+#define V32_SYNC_TIMER_SETTING    99
+#define V32_ASYNC_TIMER_SETTING   100
+#define V32_LIVESHOT              101
+#define V32_LA_SETUP              102
+
+#define V32_LINEARIZATION         103
+#define V32_DEMOSAICV3            104
+#define V32_DEMOSAICV3_ABCC_CFG   105
+#define V32_DEMOSAICV3_DBCC_CFG       106
+#define V32_DEMOSAICV3_DBPC_CFG       107
+#define V32_DEMOSAICV3_ABF_CFG        108
+#define V32_DEMOSAICV3_ABCC_UPDATE    109
+#define V32_DEMOSAICV3_DBCC_UPDATE    110
+#define V32_DEMOSAICV3_DBPC_UPDATE    111
+#define V32_EZTUNE_CFG            112
+
+#define V32_CLF_CFG               118
+#define V32_CLF_UPDATE            119
+#define V32_STATS_IHIST3_2_START  120
+#define V32_STATS_IHIST3_2_UPDATE 121
+#define V32_CAMIF3_2_CONFIG       122
+
+#define V32_CAMIF_OFF             0x000001E4
+#define V32_CAMIF_LEN             32
+
+#define V32_DEMUX_OFF             0x00000284
+#define V32_DEMUX_LEN             20
+
+#define V32_DEMOSAICV3_0_OFF      0x00000298
+#define V32_DEMOSAICV3_0_LEN      4
+#define V32_DEMOSAICV3_1_OFF      0x0000061C
+#define V32_DEMOSAICV3_1_LEN      88
+/* BPC     */
+#define V32_DEMOSAIC_2_OFF        0x0000029C
+#define V32_DEMOSAIC_2_LEN        8
+
+#define V32_OUT_CLAMP_OFF         0x00000524
+#define V32_OUT_CLAMP_LEN         8
+
+#define V32_OPERATION_CFG_LEN     32
+
+#define V32_AXI_OUT_OFF           0x00000038
+#define V32_AXI_OUT_LEN           212
+#define V32_AXI_CH_INF_LEN        24
+#define V32_AXI_CFG_LEN           47
+
+#define V32_FRAME_SKIP_OFF        0x00000504
+#define V32_FRAME_SKIP_LEN        32
+
+#define V32_CHROMA_SUBS_OFF       0x000004F8
+#define V32_CHROMA_SUBS_LEN       12
+
+#define V32_FOV_OFF           0x00000360
+#define V32_FOV_LEN           8
+
+#define V32_MAIN_SCALER_OFF 0x00000368
+#define V32_MAIN_SCALER_LEN 28
+
+#define V32_S2Y_OFF 0x000004D0
+#define V32_S2Y_LEN 20
+
+#define V32_S2CbCr_OFF 0x000004E4
+#define V32_S2CbCr_LEN 20
+
+#define V32_CHROMA_EN_OFF 0x000003C4
+#define V32_CHROMA_EN_LEN 36
+
+#define V32_SYNC_TIMER_OFF      0x0000020C
+#define V32_SYNC_TIMER_POLARITY_OFF 0x00000234
+#define V32_TIMER_SELECT_OFF        0x0000025C
+#define V32_SYNC_TIMER_LEN 28
+
+#define V32_ASYNC_TIMER_OFF 0x00000238
+#define V32_ASYNC_TIMER_LEN 28
+
+#define V32_BLACK_LEVEL_OFF 0x00000264
+#define V32_BLACK_LEVEL_LEN 16
+
+#define V32_ROLL_OFF_CFG_OFF 0x00000274
+#define V32_ROLL_OFF_CFG_LEN 16
+
+#define V32_COLOR_COR_OFF 0x00000388
+#define V32_COLOR_COR_LEN 52
+
+#define V32_WB_OFF 0x00000384
+#define V32_WB_LEN 4
+
+#define V32_RGB_G_OFF 0x000003BC
+#define V32_RGB_G_LEN 4
+
+#define V32_LA_OFF 0x000003C0
+#define V32_LA_LEN 4
+
+#define V32_SCE_OFF 0x00000418
+#define V32_SCE_LEN 136
+
+#define V32_CHROMA_SUP_OFF 0x000003E8
+#define V32_CHROMA_SUP_LEN 12
+
+#define V32_MCE_OFF 0x000003E8
+#define V32_MCE_LEN 36
+#define V32_STATS_AF_OFF 0x0000053c
+#define V32_STATS_AF_LEN 16
+
+#define V32_STATS_AE_OFF 0x00000534
+#define V32_STATS_AE_LEN 8
+
+#define V32_STATS_AWB_OFF 0x0000054c
+#define V32_STATS_AWB_LEN 32
+
+#define V32_STATS_IHIST_OFF 0x0000057c
+#define V32_STATS_IHIST_LEN 8
+
+#define V32_STATS_RS_OFF 0x0000056c
+#define V32_STATS_RS_LEN 8
+
+#define V32_STATS_CS_OFF 0x00000574
+#define V32_STATS_CS_LEN 8
+
+
+#define V32_ASF_OFF 0x000004A0
+#define V32_ASF_LEN 48
+#define V32_ASF_UPDATE_LEN 36
+
+#define V32_CAPTURE_LEN 4
+
+#define V32_LINEARIZATION_OFF1 0x00000264
+#define V32_LINEARIZATION_LEN1 16
+
+#define V32_LINEARIZATION_OFF2 0x0000067C
+#define V32_LINEARIZATION_LEN2 52
+
+#define V32_DEMOSAICV3_OFF 0x00000298
+#define V32_DEMOSAICV3_LEN 4
+
+#define V32_DEMOSAICV3_DBPC_CFG_OFF  0x0000029C
+#define V32_DEMOSAICV3_DBPC_LEN 4
+
+#define V32_DEMOSAICV3_DBPC_CFG_OFF0 0x000002a0
+#define V32_DEMOSAICV3_DBPC_CFG_OFF1 0x00000604
+#define V32_DEMOSAICV3_DBPC_CFG_OFF2 0x00000608
+
+#define V32_DEMOSAICV3_DBCC_OFF 0x0000060C
+#define V32_DEMOSAICV3_DBCC_LEN 16
+
+#define V32_DEMOSAICV3_ABF_OFF 0x0000029C
+#define V32_DEMOSAICV3_ABF_LEN
+
+#define V32_EZTUNE_CFG_OFF 0x00000010
+#define V32_EZTUNE_CFG_LEN 4
+
+struct vfe_cmd_hw_version {
+	uint32_t minorVersion;
+	uint32_t majorVersion;
+	uint32_t coreVersion;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+	VFE_AXI_OUTPUT_MODE_Output1,
+	VFE_AXI_OUTPUT_MODE_Output2,
+	VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+	VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+	VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+	VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+	VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+	VFE_RAW_OUTPUT_DISABLED,
+	VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+	VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+	VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH     4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT  3
+
+struct vfe_cmds_per_write_master {
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint16_t outRowCount;
+	uint16_t outRowIncrement;
+	uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+		[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+	uint8_t fragmentCount;
+	struct vfe_cmds_per_write_master firstWM;
+	struct vfe_cmds_per_write_master secondWM;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+	VFE_AXI_BURST_LENGTH_IS_2  = 2,
+	VFE_AXI_BURST_LENGTH_IS_4  = 4,
+	VFE_AXI_BURST_LENGTH_IS_8  = 8,
+	VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+
+struct vfe_cmd_fov_crop_config {
+	uint8_t enable;
+	uint16_t firstPixel;
+	uint16_t lastPixel;
+	uint16_t firstLine;
+	uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+	uint16_t MNCounterInit;
+	uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+	uint8_t  enable;
+	uint16_t inputSize;
+	uint16_t outputSize;
+	uint32_t phaseMultiplicationFactor;
+	uint8_t  interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension    hconfig;
+	struct vfe_cmds_scaler_one_dimension    vconfig;
+	struct vfe_cmds_main_scaler_stripe_init MNInitH;
+	struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension hconfig;
+	struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+
+struct vfe_cmd_frame_skip_update {
+	uint32_t output1Pattern;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+	uint8_t minCh0;
+	uint8_t minCh1;
+	uint8_t minCh2;
+	uint8_t maxCh0;
+	uint8_t maxCh1;
+	uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+	uint8_t enable;
+	uint8_t cropEnable;
+	uint8_t vsubSampleEnable;
+	uint8_t hsubSampleEnable;
+	uint8_t vCosited;
+	uint8_t hCosited;
+	uint8_t vCositedPhase;
+	uint8_t hCositedPhase;
+	uint16_t cropWidthFirstPixel;
+	uint16_t cropWidthLastPixel;
+	uint16_t cropHeightFirstLine;
+	uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_PIXEL_PATTERN {
+	VFE_BAYER_RGRGRG,
+	VFE_BAYER_GRGRGR,
+	VFE_BAYER_BGBGBG,
+	VFE_BAYER_GBGBGB,
+	VFE_YUV_YCbYCr,
+	VFE_YUV_YCrYCb,
+	VFE_YUV_CbYCrY,
+	VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+	VFE_BAYER_RAW,
+	VFE_YUV_INTERLEAVED,
+	VFE_YUV_PSEUDO_PLANAR_Y,
+	VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+	VFE_YUV_COSITED,
+	VFE_YUV_INTERPOLATED
+};
+
+
+/* 13*1  */
+#define VFE32_ROLL_OFF_INIT_TABLE_SIZE  13
+/* 13*16 */
+#define VFE32_ROLL_OFF_DELTA_TABLE_SIZE 208
+
+#define VFE32_GAMMA_NUM_ENTRIES  64
+
+#define VFE32_LA_TABLE_LENGTH    64
+
+struct vfe_cmds_demosaic_abf {
+	uint8_t   enable;
+	uint8_t   forceOn;
+	uint8_t   shift;
+	uint16_t  lpThreshold;
+	uint16_t  max;
+	uint16_t  min;
+	uint8_t   ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+	uint8_t   enable;
+	uint16_t  fmaxThreshold;
+	uint16_t  fminThreshold;
+	uint16_t  redDiffThreshold;
+	uint16_t  blueDiffThreshold;
+	uint16_t  greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+	uint8_t   enable;
+	uint8_t   slopeShift;
+	struct vfe_cmds_demosaic_abf abfConfig;
+	struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+	struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+	struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+	uint8_t  enable;
+	uint16_t ch2Gain;
+	uint16_t ch1Gain;
+	uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+	COEF_IS_Q7_SIGNED,
+	COEF_IS_Q8_SIGNED,
+	COEF_IS_Q9_SIGNED,
+	COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+	uint8_t     enable;
+	enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+	int16_t  C0;
+	int16_t  C1;
+	int16_t  C2;
+	int16_t  C3;
+	int16_t  C4;
+	int16_t  C5;
+	int16_t  C6;
+	int16_t  C7;
+	int16_t  C8;
+	int16_t  K0;
+	int16_t  K1;
+	int16_t  K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 64
+
+struct vfe_cmd_la_config {
+	uint8_t enable;
+	int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+	RGB_GAMMA_CH0_SELECTED,
+	RGB_GAMMA_CH1_SELECTED,
+	RGB_GAMMA_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_SELECTED,
+	RGB_GAMMA_CH0_CH2_SELECTED,
+	RGB_GAMMA_CH1_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+	uint8_t enable;
+	enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+	int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+	uint8_t  enable;
+	int16_t am;
+	int16_t ap;
+	int16_t bm;
+	int16_t bp;
+	int16_t cm;
+	int16_t cp;
+	int16_t dm;
+	int16_t dp;
+	int16_t kcr;
+	int16_t kcb;
+	int16_t RGBtoYConversionV0;
+	int16_t RGBtoYConversionV1;
+	int16_t RGBtoYConversionV2;
+	uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+	uint8_t enable;
+	uint8_t m1;
+	uint8_t m3;
+	uint8_t n1;
+	uint8_t n3;
+	uint8_t nn1;
+	uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t sharpThreshE2;
+	int8_t sharpThreshE3;
+	int8_t sharpThreshE4;
+	int8_t sharpThreshE5;
+	int8_t filter1Coefficients[9];
+	int8_t filter2Coefficients[9];
+	uint8_t  cropEnable;
+	uint16_t cropFirstPixel;
+	uint16_t cropLastPixel;
+	uint16_t cropFirstLine;
+	uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t  sharpThreshE2;
+	int8_t  sharpThreshE3;
+	int8_t  sharpThreshE4;
+	int8_t  sharpThreshE5;
+	int8_t  filter1Coefficients[9];
+	int8_t  filter2Coefficients[9];
+	uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+	VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+	VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+
+struct vfe_cmd_bus_pm_start {
+	uint8_t output2YWrPmEnable;
+	uint8_t output2CbcrWrPmEnable;
+	uint8_t output1YWrPmEnable;
+	uint8_t output1CbcrWrPmEnable;
+};
+
+struct  vfe_frame_skip_counts {
+	uint32_t  totalFrameCount;
+	uint32_t  output1Count;
+	uint32_t  output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+	VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+enum VFE32_MESSAGE_ID {
+	MSG_ID_RESET_ACK, /* 0 */
+	MSG_ID_START_ACK,
+	MSG_ID_STOP_ACK,
+	MSG_ID_UPDATE_ACK,
+	MSG_ID_OUTPUT_P,
+	MSG_ID_OUTPUT_T,
+	MSG_ID_OUTPUT_S,
+	MSG_ID_OUTPUT_V,
+	MSG_ID_SNAPSHOT_DONE,
+	MSG_ID_STATS_AEC,
+	MSG_ID_STATS_AF, /* 10 */
+	MSG_ID_STATS_AWB,
+	MSG_ID_STATS_RS,
+	MSG_ID_STATS_CS,
+	MSG_ID_STATS_IHIST,
+	MSG_ID_STATS_SKIN,
+	MSG_ID_EPOCH1,
+	MSG_ID_EPOCH2,
+	MSG_ID_SYNC_TIMER0_DONE,
+	MSG_ID_SYNC_TIMER1_DONE,
+	MSG_ID_SYNC_TIMER2_DONE, /* 20 */
+	MSG_ID_ASYNC_TIMER0_DONE,
+	MSG_ID_ASYNC_TIMER1_DONE,
+	MSG_ID_ASYNC_TIMER2_DONE,
+	MSG_ID_ASYNC_TIMER3_DONE,
+	MSG_ID_AE_OVERFLOW,
+	MSG_ID_AF_OVERFLOW,
+	MSG_ID_AWB_OVERFLOW,
+	MSG_ID_RS_OVERFLOW,
+	MSG_ID_CS_OVERFLOW,
+	MSG_ID_IHIST_OVERFLOW, /* 30 */
+	MSG_ID_SKIN_OVERFLOW,
+	MSG_ID_AXI_ERROR,
+	MSG_ID_CAMIF_OVERFLOW,
+	MSG_ID_VIOLATION,
+	MSG_ID_CAMIF_ERROR,
+	MSG_ID_BUS_OVERFLOW,
+	MSG_ID_SOF_ACK,
+	MSG_ID_STOP_REC_ACK,
+};
+
+struct vfe_msg_stats {
+	uint32_t    buffer;
+	uint32_t    frameCounter;
+};
+
+
+struct vfe_frame_bpc_info {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+	uint8_t  camifState;
+	uint32_t pixelCount;
+	uint32_t lineCount;
+};
+
+
+struct vfe32_irq_status {
+	uint32_t vfeIrqStatus0;
+	uint32_t vfeIrqStatus1;
+	uint32_t camifStatus;
+	uint32_t demosaicStatus;
+	uint32_t asfMaxEdge;
+};
+
+struct vfe_msg_output {
+	uint8_t   output_id;
+	uint32_t  yBuffer;
+	uint32_t  cbcrBuffer;
+	struct vfe_frame_bpc_info bpcInfo;
+	struct vfe_frame_asf_info asfInfo;
+	uint32_t  frameCounter;
+};
+
+struct vfe_message {
+	enum VFE32_MESSAGE_ID _d;
+	union {
+		struct vfe_msg_output              msgOut;
+		struct vfe_msg_stats               msgStats;
+		struct vfe_msg_camif_status        msgCamifError;
+	} _u;
+};
+
+/* New one for 7x30 */
+struct msm_vfe32_cmd {
+	int32_t  id;
+	uint16_t length;
+	void     *value;
+};
+
+#define V32_PREVIEW_AXI_FLAG  0x00000001
+#define V32_SNAPSHOT_AXI_FLAG (0x00000001<<1)
+
+struct vfe32_cmd_type {
+	uint16_t id;
+	uint32_t length;
+	uint32_t offset;
+	uint32_t flag;
+};
+
+struct vfe32_free_buf {
+	struct list_head node;
+	uint32_t paddr;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+};
+
+struct vfe32_output_ch {
+	struct list_head free_buf_queue;
+	spinlock_t free_buf_lock;
+	uint16_t output_fmt;
+	int8_t ch0;
+	int8_t ch1;
+	int8_t ch2;
+	uint32_t  capture_cnt;
+	uint32_t  frame_drop_cnt;
+};
+
+/* no error irq in mask 0 */
+#define VFE32_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE32_IMASK_ERROR_ONLY_1               0x005FFFFF
+#define VFE32_IMASK_CAMIF_ERROR               (0x00000001<<0)
+#define VFE32_IMASK_BHIST_OVWR                (0x00000001<<1)
+#define VFE32_IMASK_STATS_CS_OVWR             (0x00000001<<2)
+#define VFE32_IMASK_STATS_IHIST_OVWR          (0x00000001<<3)
+#define VFE32_IMASK_REALIGN_BUF_Y_OVFL        (0x00000001<<4)
+#define VFE32_IMASK_REALIGN_BUF_CB_OVFL       (0x00000001<<5)
+#define VFE32_IMASK_REALIGN_BUF_CR_OVFL       (0x00000001<<6)
+#define VFE32_IMASK_VIOLATION                 (0x00000001<<7)
+#define VFE32_IMASK_IMG_MAST_0_BUS_OVFL       (0x00000001<<8)
+#define VFE32_IMASK_IMG_MAST_1_BUS_OVFL       (0x00000001<<9)
+#define VFE32_IMASK_IMG_MAST_2_BUS_OVFL       (0x00000001<<10)
+#define VFE32_IMASK_IMG_MAST_3_BUS_OVFL       (0x00000001<<11)
+#define VFE32_IMASK_IMG_MAST_4_BUS_OVFL       (0x00000001<<12)
+#define VFE32_IMASK_IMG_MAST_5_BUS_OVFL       (0x00000001<<13)
+#define VFE32_IMASK_IMG_MAST_6_BUS_OVFL       (0x00000001<<14)
+#define VFE32_IMASK_STATS_AE_BG_BUS_OVFL      (0x00000001<<15)
+#define VFE32_IMASK_STATS_AF_BF_BUS_OVFL      (0x00000001<<16)
+#define VFE32_IMASK_STATS_AWB_BUS_OVFL        (0x00000001<<17)
+#define VFE32_IMASK_STATS_RS_BUS_OVFL         (0x00000001<<18)
+#define VFE32_IMASK_STATS_CS_BUS_OVFL         (0x00000001<<19)
+#define VFE32_IMASK_STATS_IHIST_BUS_OVFL      (0x00000001<<20)
+#define VFE32_IMASK_STATS_SKIN_BHIST_BUS_OVFL (0x00000001<<21)
+#define VFE32_IMASK_AXI_ERROR                 (0x00000001<<22)
+
+struct vfe32_output_path {
+	uint16_t output_mode;     /* bitmask  */
+
+	struct vfe32_output_ch out0; /* preview and thumbnail */
+	struct vfe32_output_ch out1; /* snapshot */
+	struct vfe32_output_ch out2; /* video    */
+};
+
+struct vfe32_frame_extra {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+
+	uint32_t yWrPmStats0;
+	uint32_t yWrPmStats1;
+	uint32_t cbcrWrPmStats0;
+	uint32_t cbcrWrPmStats1;
+
+	uint32_t  frameCounter;
+};
+
+#define VFE_DISABLE_ALL_IRQS            0
+#define VFE_CLEAR_ALL_IRQS              0xffffffff
+
+#define VFE_HW_VERSION			0x00000000
+#define VFE_GLOBAL_RESET                0x00000004
+#define VFE_MODULE_RESET		0x00000008
+#define VFE_CGC_OVERRIDE                0x0000000C
+#define VFE_MODULE_CFG                  0x00000010
+#define VFE_CFG				0x00000014
+#define VFE_IRQ_CMD                     0x00000018
+#define VFE_IRQ_MASK_0                  0x0000001C
+#define VFE_IRQ_MASK_1                  0x00000020
+#define VFE_IRQ_CLEAR_0                 0x00000024
+#define VFE_IRQ_CLEAR_1                 0x00000028
+#define VFE_IRQ_STATUS_0                0x0000002C
+#define VFE_IRQ_STATUS_1                0x00000030
+#define VFE_IRQ_COMP_MASK               0x00000034
+#define VFE_BUS_CMD                     0x00000038
+#define VFE_BUS_PING_PONG_STATUS        0x00000180
+#define VFE_AXI_CMD                     0x000001D8
+#define VFE_AXI_STATUS        0x000001DC
+#define VFE_BUS_STATS_PING_PONG_BASE    0x000000F4
+
+#define VFE_BUS_STATS_AEC_WR_PING_ADDR    0x000000F4
+#define VFE_BUS_STATS_AEC_WR_PONG_ADDR    0x000000F8
+#define VFE_BUS_STATS_AEC_UB_CFG          0x000000FC
+#define VFE_BUS_STATS_AF_WR_PING_ADDR     0x00000100
+#define VFE_BUS_STATS_AF_WR_PONG_ADDR     0x00000104
+#define VFE_BUS_STATS_AF_UB_CFG           0x00000108
+#define VFE_BUS_STATS_AWB_WR_PING_ADDR    0x0000010C
+#define VFE_BUS_STATS_AWB_WR_PONG_ADDR    0x00000110
+#define VFE_BUS_STATS_AWB_UB_CFG          0x00000114
+#define VFE_BUS_STATS_RS_WR_PING_ADDR    0x00000118
+#define VFE_BUS_STATS_RS_WR_PONG_ADDR    0x0000011C
+#define VFE_BUS_STATS_RS_UB_CFG          0x00000120
+
+#define VFE_BUS_STATS_CS_WR_PING_ADDR    0x00000124
+#define VFE_BUS_STATS_CS_WR_PONG_ADDR    0x00000128
+#define VFE_BUS_STATS_CS_UB_CFG          0x0000012C
+#define VFE_BUS_STATS_HIST_WR_PING_ADDR   0x00000130
+#define VFE_BUS_STATS_HIST_WR_PONG_ADDR   0x00000134
+#define VFE_BUS_STATS_HIST_UB_CFG          0x00000138
+#define VFE_BUS_STATS_SKIN_WR_PING_ADDR    0x0000013C
+#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR    0x00000140
+#define VFE_BUS_STATS_SKIN_UB_CFG          0x00000144
+#define VFE_CAMIF_COMMAND               0x000001E0
+#define VFE_CAMIF_STATUS                0x00000204
+#define VFE_REG_UPDATE_CMD              0x00000260
+#define VFE_DEMUX_GAIN_0                0x00000288
+#define VFE_DEMUX_GAIN_1                0x0000028C
+#define VFE_CHROMA_UP                   0x0000035C
+#define VFE_FRAMEDROP_ENC_Y_CFG         0x00000504
+#define VFE_FRAMEDROP_ENC_CBCR_CFG      0x00000508
+#define VFE_FRAMEDROP_ENC_Y_PATTERN     0x0000050C
+#define VFE_FRAMEDROP_ENC_CBCR_PATTERN  0x00000510
+#define VFE_FRAMEDROP_VIEW_Y            0x00000514
+#define VFE_FRAMEDROP_VIEW_CBCR         0x00000518
+#define VFE_FRAMEDROP_VIEW_Y_PATTERN    0x0000051C
+#define VFE_FRAMEDROP_VIEW_CBCR_PATTERN 0x00000520
+#define VFE_CLAMP_MAX                   0x00000524
+#define VFE_CLAMP_MIN                   0x00000528
+#define VFE_REALIGN_BUF                 0x0000052C
+#define VFE_STATS_CFG                   0x00000530
+#define VFE_DMI_CFG                     0x00000598
+#define VFE_DMI_ADDR                    0x0000059C
+#define VFE_DMI_DATA_LO                 0x000005A4
+#define VFE_BUS_IO_FORMAT_CFG		0x000006F8
+#define VFE_PIXEL_IF_CFG                0x000006FC
+
+struct vfe_stats_control {
+	uint8_t  ackPending;
+	uint32_t nextFrameAddrBuf;
+	uint32_t droppedStatsFrameCount;
+	uint32_t bufToRender;
+};
+
+struct vfe32_ctrl_type {
+	uint16_t operation_mode;     /* streaming or snapshot */
+	struct vfe32_output_path outpath;
+
+	uint32_t vfeImaskCompositePacked;
+
+	spinlock_t  stop_flag_lock;
+	spinlock_t  update_ack_lock;
+	spinlock_t  state_lock;
+	spinlock_t  io_lock;
+
+	spinlock_t  aec_ack_lock;
+	spinlock_t  awb_ack_lock;
+	spinlock_t  af_ack_lock;
+
+	uint32_t extlen;
+	void *extdata;
+
+	int8_t start_ack_pending;
+	int8_t stop_ack_pending;
+	int8_t reset_ack_pending;
+	int8_t update_ack_pending;
+	int8_t req_start_video_rec;
+	int8_t req_stop_video_rec;
+
+	spinlock_t  tasklet_lock;
+	struct list_head tasklet_q;
+	int vfeirq;
+	void __iomem *vfebase;
+	void *syncdata;
+
+	struct resource	*vfemem;
+	struct resource *vfeio;
+
+	uint32_t stats_comp;
+	atomic_t vstate;
+	uint32_t vfe_capture_count;
+	uint32_t sync_timer_repeat_count;
+	uint32_t sync_timer_state;
+	uint32_t sync_timer_number;
+
+	uint32_t vfeFrameId;
+	uint32_t output1Pattern;
+	uint32_t output1Period;
+	uint32_t output2Pattern;
+	uint32_t output2Period;
+	uint32_t vfeFrameSkipCount;
+	uint32_t vfeFrameSkipPeriod;
+	struct vfe_stats_control afStatsControl;
+	struct vfe_stats_control awbStatsControl;
+	struct vfe_stats_control aecStatsControl;
+	struct vfe_stats_control ihistStatsControl;
+	struct vfe_stats_control rsStatsControl;
+	struct vfe_stats_control csStatsControl;
+
+	/* v4l2 subdev */
+	struct v4l2_subdev *subdev;
+};
+
+#define statsAeNum      0
+#define statsAfNum      1
+#define statsAwbNum     2
+#define statsRsNum      3
+#define statsCsNum      4
+#define statsIhistNum   5
+#define statsSkinNum    6
+
+struct vfe_cmd_stats_ack {
+	uint32_t  nextStatsBuf;
+};
+
+#define VFE_STATS_BUFFER_COUNT            3
+
+struct vfe_cmd_stats_buf {
+	uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
+};
+#endif /* __MSM_VFE32_H__ */
diff --git a/drivers/media/video/msm/msm_vfe7x.c b/drivers/media/video/msm/msm_vfe7x.c
new file mode 100644
index 0000000..316aacf
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe7x.c
@@ -0,0 +1,783 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/msm_adsp.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/android_pmem.h>
+#include <linux/slab.h>
+#include <mach/msm_adsp.h>
+#include <mach/clk.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include "msm_vfe7x.h"
+#include <linux/pm_qos_params.h>
+
+#define QDSP_CMDQUEUE 25
+
+#define VFE_RESET_CMD 0
+#define VFE_START_CMD 1
+#define VFE_STOP_CMD  2
+#define VFE_FRAME_ACK 20
+#define STATS_AF_ACK  21
+#define STATS_WE_ACK  22
+
+#define MSG_STOP_ACK  1
+#define MSG_SNAPSHOT  2
+#define MSG_OUTPUT1   6
+#define MSG_OUTPUT2   7
+#define MSG_STATS_AF  8
+#define MSG_STATS_WE  9
+#define MSG_OUTPUT_S  10
+#define MSG_OUTPUT_T  11
+
+#define VFE_ADSP_EVENT 0xFFFF
+#define SNAPSHOT_MASK_MODE 0x00000002
+#define MSM_AXI_QOS_PREVIEW	192000
+#define MSM_AXI_QOS_SNAPSHOT	192000
+
+
+static struct msm_adsp_module *qcam_mod;
+static struct msm_adsp_module *vfe_mod;
+static struct msm_vfe_callback *resp;
+static void *extdata;
+static uint32_t extlen;
+
+struct mutex vfe_lock;
+static void     *vfe_syncdata;
+static uint8_t vfestopped;
+static uint32_t vfetask_state;
+static int cnt;
+
+static struct stop_event stopevent;
+
+unsigned long paddr_s_y;
+unsigned long paddr_s_cbcr;
+unsigned long paddr_t_y;
+unsigned long paddr_t_cbcr;
+
+static void vfe_7x_convert(struct msm_vfe_phy_info *pinfo,
+		enum vfe_resp_msg type,
+		void *data, void **ext, int32_t *elen)
+{
+	switch (type) {
+	case VFE_MSG_OUTPUT_P: {
+		pinfo->y_phy = ((struct vfe_endframe *)data)->y_address;
+		pinfo->cbcr_phy =
+			((struct vfe_endframe *)data)->cbcr_address;
+
+		pinfo->output_id = OUTPUT_TYPE_P;
+
+		CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n",
+				 pinfo->y_phy, pinfo->cbcr_phy);
+
+		((struct vfe_frame_extra *)extdata)->bl_evencol =
+		((struct vfe_endframe *)data)->blacklevelevencolumn;
+
+		((struct vfe_frame_extra *)extdata)->bl_oddcol =
+		((struct vfe_endframe *)data)->blackleveloddcolumn;
+
+		((struct vfe_frame_extra *)extdata)->g_def_p_cnt =
+		((struct vfe_endframe *)data)->greendefectpixelcount;
+
+		((struct vfe_frame_extra *)extdata)->r_b_def_p_cnt =
+		((struct vfe_endframe *)data)->redbluedefectpixelcount;
+
+		*ext  = extdata;
+		*elen = extlen;
+	}
+		break;
+
+	case VFE_MSG_OUTPUT_S: {
+		pinfo->y_phy = paddr_s_y;
+		pinfo->cbcr_phy = paddr_s_cbcr;
+		pinfo->output_id = OUTPUT_TYPE_S;
+		CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n",
+					pinfo->y_phy, pinfo->cbcr_phy);
+	}
+		break;
+
+	case VFE_MSG_OUTPUT_T: {
+		pinfo->y_phy = paddr_t_y;
+		pinfo->cbcr_phy = paddr_t_cbcr;
+		pinfo->output_id = OUTPUT_TYPE_T;
+		CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n",
+					pinfo->y_phy, pinfo->cbcr_phy);
+	}
+		break;
+
+	case VFE_MSG_STATS_AF:
+	case VFE_MSG_STATS_WE:
+		pinfo->sbuf_phy = *(uint32_t *)data;
+		break;
+
+	default:
+		break;
+	} /* switch */
+}
+
+static void vfe_7x_ops(void *driver_data, unsigned id, size_t len,
+		void (*getevent)(void *ptr, size_t len))
+{
+	uint32_t evt_buf[3];
+	struct msm_vfe_resp *rp;
+	void *data;
+	CDBG("%s:id=%d\n", __func__, id);
+
+	len = (id == VFE_ADSP_EVENT) ? 0 : len;
+	data = resp->vfe_alloc(sizeof(struct msm_vfe_resp) + len,
+		vfe_syncdata,  GFP_ATOMIC);
+
+	if (!data) {
+		pr_err("%s: rp: cannot allocate buffer\n", __func__);
+		return;
+	}
+	rp = (struct msm_vfe_resp *)data;
+	rp->evt_msg.len = len;
+
+	if (id == VFE_ADSP_EVENT) {
+		/* event */
+		rp->type           = VFE_EVENT;
+		rp->evt_msg.type   = MSM_CAMERA_EVT;
+		getevent(evt_buf, sizeof(evt_buf));
+		rp->evt_msg.msg_id = evt_buf[0];
+	CDBG("%s:event:msg_id=%d\n", __func__, rp->evt_msg.msg_id);
+		resp->vfe_resp(rp, MSM_CAM_Q_VFE_EVT, vfe_syncdata,
+		GFP_ATOMIC);
+	} else {
+		/* messages */
+		rp->evt_msg.type   = MSM_CAMERA_MSG;
+		rp->evt_msg.msg_id = id;
+		rp->evt_msg.data = rp + 1;
+		getevent(rp->evt_msg.data, len);
+	CDBG("%s:messages:msg_id=%d\n", __func__, rp->evt_msg.msg_id);
+
+		switch (rp->evt_msg.msg_id) {
+		case MSG_SNAPSHOT:
+			update_axi_qos(MSM_AXI_QOS_PREVIEW);
+			vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent);
+			vfe_7x_ops(driver_data, MSG_OUTPUT_T, len, getevent);
+			rp->type = VFE_MSG_SNAPSHOT;
+			break;
+
+		case MSG_OUTPUT_S:
+			rp->type = VFE_MSG_OUTPUT_S;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_S,
+				rp->evt_msg.data, &(rp->extdata),
+				&(rp->extlen));
+			break;
+
+		case MSG_OUTPUT_T:
+			rp->type = VFE_MSG_OUTPUT_T;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_T,
+				rp->evt_msg.data, &(rp->extdata),
+				&(rp->extlen));
+			break;
+
+		case MSG_OUTPUT1:
+		case MSG_OUTPUT2:
+			rp->type = VFE_MSG_OUTPUT_P;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_P,
+				rp->evt_msg.data, &(rp->extdata),
+				&(rp->extlen));
+			break;
+
+		case MSG_STATS_AF:
+			rp->type = VFE_MSG_STATS_AF;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_AF,
+					rp->evt_msg.data, NULL, NULL);
+			break;
+
+		case MSG_STATS_WE:
+			rp->type = VFE_MSG_STATS_WE;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_WE,
+					rp->evt_msg.data, NULL, NULL);
+
+			CDBG("MSG_STATS_WE: phy = 0x%x\n", rp->phy.sbuf_phy);
+			break;
+
+		case MSG_STOP_ACK:
+			rp->type = VFE_MSG_GENERAL;
+			stopevent.state = 1;
+			wake_up(&stopevent.wait);
+			break;
+
+
+		default:
+			rp->type = VFE_MSG_GENERAL;
+			break;
+		}
+		resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, vfe_syncdata, GFP_ATOMIC);
+	}
+}
+
+static struct msm_adsp_ops vfe_7x_sync = {
+	.event = vfe_7x_ops,
+};
+
+static int vfe_7x_enable(struct camera_enable_cmd *enable)
+{
+	int rc = -EFAULT;
+
+	if (!strcmp(enable->name, "QCAMTASK"))
+		rc = msm_adsp_enable(qcam_mod);
+	else if (!strcmp(enable->name, "VFETASK")) {
+		rc = msm_adsp_enable(vfe_mod);
+		vfetask_state = 1;
+	}
+
+	if (!cnt) {
+		add_axi_qos();
+		cnt++;
+	}
+	return rc;
+}
+
+static int vfe_7x_disable(struct camera_enable_cmd *enable,
+		struct platform_device *dev __attribute__((unused)))
+{
+	int rc = -EFAULT;
+
+	if (!strcmp(enable->name, "QCAMTASK"))
+		rc = msm_adsp_disable(qcam_mod);
+	else if (!strcmp(enable->name, "VFETASK")) {
+		rc = msm_adsp_disable(vfe_mod);
+		vfetask_state = 0;
+	}
+
+	return rc;
+}
+
+static int vfe_7x_stop(void)
+{
+	int rc = 0;
+	uint32_t stopcmd = VFE_STOP_CMD;
+	rc = msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
+				&stopcmd, sizeof(uint32_t));
+	if (rc < 0) {
+		CDBG("%s:%d: failed rc = %d \n", __func__, __LINE__, rc);
+		return rc;
+	}
+
+	stopevent.state = 0;
+	rc = wait_event_timeout(stopevent.wait,
+		stopevent.state != 0,
+		msecs_to_jiffies(stopevent.timeout));
+
+	return rc;
+}
+
+static void vfe_7x_release(struct platform_device *pdev)
+{
+	mutex_lock(&vfe_lock);
+	vfe_syncdata = NULL;
+	mutex_unlock(&vfe_lock);
+
+	if (!vfestopped) {
+		CDBG("%s:%d:Calling vfe_7x_stop()\n", __func__, __LINE__);
+		vfe_7x_stop();
+	} else
+		vfestopped = 0;
+
+	msm_adsp_disable(qcam_mod);
+	msm_adsp_disable(vfe_mod);
+	vfetask_state = 0;
+
+	msm_adsp_put(qcam_mod);
+	msm_adsp_put(vfe_mod);
+
+	msm_camio_disable(pdev);
+
+	kfree(extdata);
+	extlen = 0;
+
+	/* Release AXI */
+	release_axi_qos();
+	cnt = 0;
+}
+
+static int vfe_7x_init(struct msm_vfe_callback *presp,
+	struct platform_device *dev)
+{
+	int rc = 0;
+
+	init_waitqueue_head(&stopevent.wait);
+	stopevent.timeout = 200;
+	stopevent.state = 0;
+
+	if (presp && presp->vfe_resp)
+		resp = presp;
+	else
+		return -EFAULT;
+
+	/* Bring up all the required GPIOs and Clocks */
+	rc = msm_camio_enable(dev);
+	if (rc < 0)
+		return rc;
+	msm_camio_camif_pad_reg_reset();
+
+	extlen = sizeof(struct vfe_frame_extra);
+
+	extdata =
+		kmalloc(extlen, GFP_ATOMIC);
+	if (!extdata) {
+		rc = -ENOMEM;
+		goto init_fail;
+	}
+
+	rc = msm_adsp_get("QCAMTASK", &qcam_mod, &vfe_7x_sync, NULL);
+	if (rc) {
+		rc = -EBUSY;
+		goto get_qcam_fail;
+	}
+
+	rc = msm_adsp_get("VFETASK", &vfe_mod, &vfe_7x_sync, NULL);
+	if (rc) {
+		rc = -EBUSY;
+		goto get_vfe_fail;
+	}
+
+	return 0;
+
+get_vfe_fail:
+	msm_adsp_put(qcam_mod);
+get_qcam_fail:
+	kfree(extdata);
+init_fail:
+	extlen = 0;
+	return rc;
+}
+
+static int vfe_7x_config_axi(int mode,
+	struct axidata *ad, struct axiout *ao)
+{
+	struct msm_pmem_region *regptr;
+	unsigned long *bptr;
+	int    cnt;
+
+	int rc = 0;
+
+	if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) {
+		regptr = ad->region;
+
+		CDBG("bufnum1 = %d\n", ad->bufnum1);
+		if (mode == OUTPUT_1_AND_2) {
+			paddr_t_y = regptr->paddr + regptr->info.y_off;
+			paddr_t_cbcr = regptr->paddr +  regptr->info.cbcr_off;
+		}
+
+		CDBG("config_axi1: O1, phy = 0x%lx, y_off = %d, cbcr_off =%d\n",
+			regptr->paddr, regptr->info.y_off,
+			regptr->info.cbcr_off);
+
+		bptr = &ao->output1buffer1_y_phy;
+		for (cnt = 0; cnt < ad->bufnum1; cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+
+			bptr++;
+			regptr++;
+		}
+
+		regptr--;
+		for (cnt = 0; cnt < (8 - ad->bufnum1); cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+			bptr++;
+		}
+	} /* if OUTPUT1 or Both */
+
+	if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) {
+		regptr = &(ad->region[ad->bufnum1]);
+
+		CDBG("bufnum2 = %d\n", ad->bufnum2);
+		paddr_s_y = regptr->paddr +  regptr->info.y_off;
+		paddr_s_cbcr = regptr->paddr +  regptr->info.cbcr_off;
+		CDBG("config_axi2: O2, phy = 0x%lx, y_off = %d, cbcr_off =%d\n",
+		     regptr->paddr, regptr->info.y_off, regptr->info.cbcr_off);
+
+		bptr = &ao->output2buffer1_y_phy;
+		for (cnt = 0; cnt < ad->bufnum2; cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+
+			bptr++;
+			regptr++;
+		}
+
+		regptr--;
+		for (cnt = 0; cnt < (8 - ad->bufnum2); cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+			bptr++;
+		}
+	}
+
+	return rc;
+}
+
+static int vfe_7x_config(struct msm_vfe_cfg_cmd *cmd, void *data)
+{
+	struct msm_pmem_region *regptr;
+	unsigned char buf[256];
+
+	struct vfe_stats_ack sack;
+	struct axidata *axid;
+	uint32_t i, op_mode;
+	uint32_t *_mode;
+
+	struct vfe_stats_we_cfg *scfg = NULL;
+	struct vfe_stats_af_cfg *sfcfg = NULL;
+
+	struct axiout *axio = NULL;
+	void   *cmd_data = NULL;
+	void   *cmd_data_alloc = NULL;
+	long rc = 0;
+	struct msm_vfe_command_7k *vfecmd;
+
+	vfecmd =
+			kmalloc(sizeof(struct msm_vfe_command_7k),
+				GFP_ATOMIC);
+	if (!vfecmd) {
+		pr_err("vfecmd alloc failed!\n");
+		return -ENOMEM;
+	}
+
+	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+	    cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
+	    cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+		if (copy_from_user(vfecmd,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_vfe_command_7k))) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_STATS_AEC_AWB_ENABLE:
+	case CMD_STATS_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		scfg =
+			kmalloc(sizeof(struct vfe_stats_we_cfg),
+				GFP_ATOMIC);
+		if (!scfg) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(scfg,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		CDBG("STATS_ENABLE: bufnum = %d, enabling = %d\n",
+			axid->bufnum1, scfg->wb_expstatsenable);
+
+		if (axid->bufnum1 > 0) {
+			regptr = axid->region;
+
+			for (i = 0; i < axid->bufnum1; i++) {
+
+				CDBG("STATS_ENABLE, phy = 0x%lx\n",
+					regptr->paddr);
+
+				scfg->wb_expstatoutputbuffer[i] =
+					(void *)regptr->paddr;
+				regptr++;
+			}
+
+			cmd_data = scfg;
+
+		} else {
+			rc = -EINVAL;
+			goto config_done;
+		}
+	}
+		break;
+
+	case CMD_STATS_AF_ENABLE:
+	case CMD_STATS_AF_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sfcfg =
+			kmalloc(sizeof(struct vfe_stats_af_cfg),
+				GFP_ATOMIC);
+
+		if (!sfcfg) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(sfcfg,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		CDBG("AF_ENABLE: bufnum = %d, enabling = %d\n",
+			axid->bufnum1, sfcfg->af_enable);
+
+		if (axid->bufnum1 > 0) {
+			regptr = &axid->region[0];
+
+			for (i = 0; i < axid->bufnum1; i++) {
+
+				CDBG("STATS_ENABLE, phy = 0x%lx\n",
+					regptr->paddr);
+
+				sfcfg->af_outbuf[i] =
+					(void *)regptr->paddr;
+
+				regptr++;
+			}
+
+			cmd_data = sfcfg;
+
+		} else {
+			rc = -EINVAL;
+			goto config_done;
+		}
+	}
+		break;
+
+	case CMD_FRAME_BUF_RELEASE: {
+		struct msm_frame *b;
+		unsigned long p;
+		struct vfe_outputack fack;
+		if (!data)  {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+		fack.header = VFE_FRAME_ACK;
+
+		fack.output2newybufferaddress =
+			(void *)(p + b->y_off);
+
+		fack.output2newcbcrbufferaddress =
+			(void *)(p + b->cbcr_off);
+
+		vfecmd->queue = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_outputack);
+		cmd_data = &fack;
+	}
+		break;
+
+	case CMD_SNAP_BUF_RELEASE:
+		break;
+
+	case CMD_STATS_BUF_RELEASE: {
+		CDBG("vfe_7x_config: CMD_STATS_BUF_RELEASE\n");
+		if (!data) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sack.header = STATS_WE_ACK;
+		sack.bufaddr = (void *)*(uint32_t *)data;
+
+		vfecmd->queue  = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_stats_ack);
+		cmd_data = &sack;
+	}
+		break;
+
+	case CMD_STATS_AF_BUF_RELEASE: {
+		CDBG("vfe_7x_config: CMD_STATS_AF_BUF_RELEASE\n");
+		if (!data) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sack.header = STATS_AF_ACK;
+		sack.bufaddr = (void *)*(uint32_t *)data;
+
+		vfecmd->queue  = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_stats_ack);
+		cmd_data = &sack;
+	}
+		break;
+
+	case CMD_GENERAL:
+	case CMD_STATS_DISABLE: {
+		if (vfecmd->length > 256) {
+			cmd_data_alloc =
+			cmd_data = kmalloc(vfecmd->length, GFP_ATOMIC);
+			if (!cmd_data) {
+				rc = -ENOMEM;
+				goto config_failure;
+			}
+		} else
+			cmd_data = buf;
+
+		if (copy_from_user(cmd_data,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		if (vfecmd->queue == QDSP_CMDQUEUE) {
+			switch (*(uint32_t *)cmd_data) {
+			case VFE_RESET_CMD:
+				msm_camio_vfe_blk_reset();
+				vfestopped = 0;
+				break;
+
+			case VFE_START_CMD:
+				_mode = (uint32_t *)cmd_data;
+				op_mode = *(++_mode);
+				if (op_mode & SNAPSHOT_MASK_MODE) {
+					/* request AXI bus for snapshot */
+					if (update_axi_qos(MSM_AXI_QOS_SNAPSHOT)
+						< 0) {
+						rc = -EFAULT;
+						goto config_failure;
+					}
+				} else {
+					/* request AXI bus for snapshot */
+					if (update_axi_qos(MSM_AXI_QOS_PREVIEW)
+						< 0) {
+						rc = -EFAULT;
+						goto config_failure;
+					}
+				}
+				msm_camio_camif_pad_reg_reset_2();
+				vfestopped = 0;
+				break;
+
+			case VFE_STOP_CMD:
+				vfestopped = 1;
+				goto config_send;
+
+			default:
+				break;
+			}
+		} /* QDSP_CMDQUEUE */
+	}
+		break;
+	case CMD_AXI_CFG_PREVIEW:
+	case CMD_RAW_PICT_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd->value),
+					sizeof(struct axiout))) {
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		vfe_7x_config_axi(OUTPUT_2, axid, axio);
+		cmd_data = axio;
+	}
+		break;
+
+	case CMD_AXI_CFG_SNAP: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd->value),
+					sizeof(struct axiout))) {
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		vfe_7x_config_axi(OUTPUT_1_AND_2, axid, axio);
+
+		cmd_data = axio;
+	}
+		break;
+
+	default:
+		break;
+	} /* switch */
+
+	if (vfestopped)
+		goto config_done;
+
+config_send:
+	CDBG("send adsp command = %d\n", *(uint32_t *)cmd_data);
+	if (vfetask_state)
+		rc = msm_adsp_write(vfe_mod, vfecmd->queue,
+					cmd_data, vfecmd->length);
+config_done:
+	if (cmd_data_alloc != NULL)
+		kfree(cmd_data_alloc);
+
+config_failure:
+	kfree(scfg);
+	kfree(axio);
+	kfree(vfecmd);
+	return rc;
+}
+
+void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data)
+{
+	mutex_init(&vfe_lock);
+	fptr->vfe_init    = vfe_7x_init;
+	fptr->vfe_enable  = vfe_7x_enable;
+	fptr->vfe_config  = vfe_7x_config;
+	fptr->vfe_disable = vfe_7x_disable;
+	fptr->vfe_release = vfe_7x_release;
+	vfe_syncdata = data;
+}
+
+void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data)
+{
+	fptr->vpe_reg		= NULL;
+	fptr->send_frame_to_vpe	= NULL;
+	fptr->vpe_config	= NULL;
+	fptr->vpe_cfg_update	= NULL;
+	fptr->dis		= NULL;
+}
diff --git a/drivers/media/video/msm/msm_vfe7x.h b/drivers/media/video/msm/msm_vfe7x.h
new file mode 100644
index 0000000..dd3571f
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe7x.h
@@ -0,0 +1,265 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VFE7X_H__
+#define __MSM_VFE7X_H__
+#include <media/msm_camera.h>
+#include <mach/camera.h>
+
+struct vfe_frame_extra {
+	uint32_t  bl_evencol;
+	uint32_t  bl_oddcol;
+	uint16_t  g_def_p_cnt;
+	uint16_t  r_b_def_p_cnt;
+};
+
+struct vfe_endframe {
+	uint32_t      y_address;
+	uint32_t      cbcr_address;
+
+	unsigned int  blacklevelevencolumn:23;
+	uint16_t      reserved1:9;
+	unsigned int  blackleveloddcolumn:23;
+	uint16_t      reserved2:9;
+
+	uint16_t      greendefectpixelcount:8;
+	uint16_t      reserved3:8;
+	uint16_t      redbluedefectpixelcount:8;
+	uint16_t      reserved4:8;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_outputack {
+	uint32_t  header;
+	void      *output2newybufferaddress;
+	void      *output2newcbcrbufferaddress;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_stats_ack {
+	uint32_t header;
+	/* MUST BE 64 bit ALIGNED */
+	void     *bufaddr;
+} __attribute__((packed, aligned(4)));
+
+/* AXI Output Config Command sent to DSP */
+struct axiout {
+	uint32_t            cmdheader:32;
+	int 		    outputmode:3;
+	uint8_t             format:2;
+	uint32_t            /* reserved */ : 27;
+
+	/* AXI Output 1 Y Configuration, Part 1 */
+	uint32_t            out1yimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out1yimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 Y Configuration, Part 2 */
+	uint8_t             out1yburstlen:2;
+	uint32_t            out1ynumrows:12;
+	uint32_t            out1yrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 CbCr Configuration, Part 1 */
+	uint32_t            out1cbcrimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out1cbcrimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 CbCr Configuration, Part 2 */
+	uint8_t             out1cbcrburstlen:2;
+	uint32_t            out1cbcrnumrows:12;
+	uint32_t            out1cbcrrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 Y Configuration, Part 1 */
+	uint32_t            out2yimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out2yimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 Y Configuration, Part 2 */
+	uint8_t             out2yburstlen:2;
+	uint32_t            out2ynumrows:12;
+	uint32_t            out2yrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 CbCr Configuration, Part 1 */
+	uint32_t            out2cbcrimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out2cbcrimagewidtein64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 CbCr Configuration, Part 2 */
+	uint8_t             out2cbcrburstlen:2;
+	uint32_t            out2cbcrnumrows:12;
+	uint32_t            out2cbcrrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* Address configuration:
+	 * output1 phisycal address */
+	unsigned long   output1buffer1_y_phy;
+	unsigned long   output1buffer1_cbcr_phy;
+	unsigned long   output1buffer2_y_phy;
+	unsigned long   output1buffer2_cbcr_phy;
+	unsigned long   output1buffer3_y_phy;
+	unsigned long   output1buffer3_cbcr_phy;
+	unsigned long   output1buffer4_y_phy;
+	unsigned long   output1buffer4_cbcr_phy;
+	unsigned long   output1buffer5_y_phy;
+	unsigned long   output1buffer5_cbcr_phy;
+	unsigned long   output1buffer6_y_phy;
+	unsigned long   output1buffer6_cbcr_phy;
+	unsigned long   output1buffer7_y_phy;
+	unsigned long   output1buffer7_cbcr_phy;
+	unsigned long   output1buffer8_y_phy;
+	unsigned long   output1buffer8_cbcr_phy;
+
+	/* output2 phisycal address */
+	unsigned long   output2buffer1_y_phy;
+	unsigned long   output2buffer1_cbcr_phy;
+	unsigned long   output2buffer2_y_phy;
+	unsigned long   output2buffer2_cbcr_phy;
+	unsigned long   output2buffer3_y_phy;
+	unsigned long   output2buffer3_cbcr_phy;
+	unsigned long   output2buffer4_y_phy;
+	unsigned long   output2buffer4_cbcr_phy;
+	unsigned long   output2buffer5_y_phy;
+	unsigned long   output2buffer5_cbcr_phy;
+	unsigned long   output2buffer6_y_phy;
+	unsigned long   output2buffer6_cbcr_phy;
+	unsigned long   output2buffer7_y_phy;
+	unsigned long   output2buffer7_cbcr_phy;
+	unsigned long   output2buffer8_y_phy;
+	unsigned long   output2buffer8_cbcr_phy;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_stats_we_cfg {
+	uint32_t       header;
+
+	/* White Balance/Exposure Statistic Selection */
+	uint8_t        wb_expstatsenable:1;
+	uint8_t        wb_expstatbuspriorityselection:1;
+	unsigned int   wb_expstatbuspriorityvalue:4;
+	unsigned int   /* reserved */ : 26;
+
+	/* White Balance/Exposure Statistic Configuration, Part 1 */
+	uint8_t        exposurestatregions:1;
+	uint8_t        exposurestatsubregions:1;
+	unsigned int   /* reserved */ : 14;
+
+	unsigned int   whitebalanceminimumy:8;
+	unsigned int   whitebalancemaximumy:8;
+
+	/* White Balance/Exposure Statistic Configuration, Part 2 */
+	uint8_t wb_expstatslopeofneutralregionline[
+		NUM_WB_EXP_NEUTRAL_REGION_LINES];
+
+	/* White Balance/Exposure Statistic Configuration, Part 3 */
+	unsigned int   wb_expstatcrinterceptofneutralregionline2:12;
+	unsigned int   /* reserved */ : 4;
+	unsigned int   wb_expstatcbinterceptofneutralreginnline1:12;
+	unsigned int    /* reserved */ : 4;
+
+	/* White Balance/Exposure Statistic Configuration, Part 4 */
+	unsigned int   wb_expstatcrinterceptofneutralregionline4:12;
+	unsigned int   /* reserved */ : 4;
+	unsigned int   wb_expstatcbinterceptofneutralregionline3:12;
+	unsigned int   /* reserved */ : 4;
+
+	/* White Balance/Exposure Statistic Output Buffer Header */
+	unsigned int   wb_expmetricheaderpattern:8;
+	unsigned int   /* reserved */ : 24;
+
+	/* White Balance/Exposure Statistic Output Buffers-MUST
+	* BE 64 bit ALIGNED */
+	void  *wb_expstatoutputbuffer[NUM_WB_EXP_STAT_OUTPUT_BUFFERS];
+} __attribute__((packed, aligned(4)));
+
+struct vfe_stats_af_cfg {
+	uint32_t header;
+
+	/* Autofocus Statistic Selection */
+	uint8_t       af_enable:1;
+	uint8_t       af_busprioritysel:1;
+	unsigned int  af_buspriorityval:4;
+	unsigned int  /* reserved */ : 26;
+
+	/* Autofocus Statistic Configuration, Part 1 */
+	unsigned int  af_singlewinvoffset:12;
+	unsigned int  /* reserved */ : 4;
+	unsigned int  af_singlewinhoffset:12;
+	unsigned int  /* reserved */ : 3;
+	uint8_t       af_winmode:1;
+
+	/* Autofocus Statistic Configuration, Part 2 */
+	unsigned int  af_singglewinvh:11;
+	unsigned int  /* reserved */ : 5;
+	unsigned int  af_singlewinhw:11;
+	unsigned int  /* reserved */ : 5;
+
+	/* Autofocus Statistic Configuration, Parts 3-6 */
+	uint8_t       af_multiwingrid[NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS];
+
+	/* Autofocus Statistic Configuration, Part 7 */
+	signed int    af_metrichpfcoefa00:5;
+	signed int    af_metrichpfcoefa04:5;
+	unsigned int  af_metricmaxval:11;
+	uint8_t       af_metricsel:1;
+	unsigned int  /* reserved */ : 10;
+
+	/* Autofocus Statistic Configuration, Part 8 */
+	signed int    af_metrichpfcoefa20:5;
+	signed int    af_metrichpfcoefa21:5;
+	signed int    af_metrichpfcoefa22:5;
+	signed int    af_metrichpfcoefa23:5;
+	signed int    af_metrichpfcoefa24:5;
+	unsigned int  /* reserved */ : 7;
+
+	/* Autofocus Statistic Output Buffer Header */
+	unsigned int  af_metrichp:8;
+	unsigned int  /* reserved */ : 24;
+
+	/* Autofocus Statistic Output Buffers - MUST BE 64 bit ALIGNED!!! */
+	void *af_outbuf[NUM_AF_STAT_OUTPUT_BUFFERS];
+} __attribute__((packed, aligned(4))); /* VFE_StatsAutofocusConfigCmdType */
+
+struct msm_camera_frame_msg {
+	unsigned long   output_y_address;
+	unsigned long   output_cbcr_address;
+
+	unsigned int    blacklevelevenColumn:23;
+	uint16_t        reserved1:9;
+	unsigned int    blackleveloddColumn:23;
+	uint16_t        reserved2:9;
+
+	uint16_t        greendefectpixelcount:8;
+	uint16_t        reserved3:8;
+	uint16_t        redbluedefectpixelcount:8;
+	uint16_t        reserved4:8;
+} __attribute__((packed, aligned(4)));
+
+/* New one for 7k */
+struct msm_vfe_command_7k {
+	uint16_t queue;
+	uint16_t length;
+	void     *value;
+};
+
+struct stop_event {
+  wait_queue_head_t wait;
+	int state;
+  int timeout;
+};
+
+
+#endif /* __MSM_VFE7X_H__ */
diff --git a/drivers/media/video/msm/msm_vfe7x27a.c b/drivers/media/video/msm/msm_vfe7x27a.c
new file mode 100644
index 0000000..c8bfacc
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe7x27a.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/msm_adsp.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/android_pmem.h>
+#include <linux/slab.h>
+#include <linux/pm_qos_params.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <mach/msm_adsp.h>
+#include <mach/clk.h>
+#include <mach/camera.h>
+#include "msm_vfe7x27a.h"
+
+#define QDSP_CMDQUEUE 25
+
+#define VFE_RESET_CMD 0
+#define VFE_START_CMD 1
+#define VFE_STOP_CMD  2
+#define VFE_FRAME_ACK 20
+#define STATS_AF_ACK  21
+#define STATS_WE_ACK  22
+
+#define MSG_STOP_ACK  1
+#define MSG_SNAPSHOT  2
+#define MSG_OUTPUT1   6
+#define MSG_OUTPUT2   7
+#define MSG_STATS_AF  8
+#define MSG_STATS_WE  9
+#define MSG_OUTPUT_S  23
+#define MSG_OUTPUT_T  22
+#define MSG_SOF       15
+
+#define VFE_ADSP_EVENT 0xFFFF
+#define SNAPSHOT_MASK_MODE 0x00000002
+#define MSM_AXI_QOS_PREVIEW	122000
+#define MSM_AXI_QOS_SNAPSHOT	192000
+
+
+static struct msm_adsp_module *qcam_mod;
+static struct msm_adsp_module *vfe_mod;
+static struct msm_vfe_callback *resp;
+static void *extdata;
+static uint32_t extlen;
+
+struct mutex vfe_lock;
+static void     *vfe_syncdata;
+static uint8_t vfestopped;
+
+static struct stop_event stopevent;
+
+unsigned long paddr_s_y;
+unsigned long paddr_s_cbcr;
+unsigned long paddr_t_y;
+unsigned long paddr_t_cbcr;
+static uint32_t op_mode;
+
+static void vfe_7x_convert(struct msm_vfe_phy_info *pinfo,
+		enum vfe_resp_msg type,
+		void *data, void **ext, int32_t *elen)
+{
+	switch (type) {
+	case VFE_MSG_OUTPUT_P: {
+		pinfo->y_phy = ((struct vfe_endframe *)data)->y_address;
+		pinfo->cbcr_phy =
+			((struct vfe_endframe *)data)->cbcr_address;
+
+		pinfo->output_id = OUTPUT_TYPE_P;
+
+		CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n",
+				 pinfo->y_phy, pinfo->cbcr_phy);
+
+		memcpy(((struct vfe_frame_extra *)extdata),
+			&((struct vfe_endframe *)data)->extra,
+			sizeof(struct vfe_frame_extra));
+
+		*ext  = extdata;
+		*elen = extlen;
+		pinfo->frame_id =
+				((struct vfe_frame_extra *)extdata)->frame_id;
+	}
+		break;
+	case VFE_MSG_OUTPUT_S: {
+		pinfo->y_phy = paddr_s_y;
+		pinfo->cbcr_phy = paddr_s_cbcr;
+		pinfo->output_id = OUTPUT_TYPE_S;
+		CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n",
+					pinfo->y_phy, pinfo->cbcr_phy);
+	}
+		break;
+	case VFE_MSG_OUTPUT_T: {
+		pinfo->y_phy = paddr_t_y;
+		pinfo->cbcr_phy = paddr_t_cbcr;
+		pinfo->output_id = OUTPUT_TYPE_T;
+		CDBG("vfe_7x_convert: y_phy = 0x%x cbcr_phy = 0x%x\n",
+					pinfo->y_phy, pinfo->cbcr_phy);
+	}
+		break;
+	case VFE_MSG_STATS_AF:
+	case VFE_MSG_STATS_WE:
+		pinfo->sbuf_phy = *(uint32_t *)data;
+		pinfo->frame_id = *(((uint32_t *)data) + 1);
+		CDBG("frame id = %d\n", pinfo->frame_id);
+		break;
+	default:
+		break;
+	}
+}
+
+static void vfe_7x_ops(void *driver_data, unsigned id, size_t len,
+		void (*getevent)(void *ptr, size_t len))
+{
+	uint32_t evt_buf[3];
+	struct msm_vfe_resp *rp;
+	void *data;
+	CDBG("%s:id=%d\n", __func__, id);
+
+	len = (id == VFE_ADSP_EVENT) ? 0 : len;
+	data = resp->vfe_alloc(sizeof(struct msm_vfe_resp) + len,
+		vfe_syncdata,  GFP_ATOMIC);
+
+	if (!data) {
+		pr_err("%s: rp: cannot allocate buffer\n", __func__);
+		return;
+	}
+	rp = data;
+	rp->evt_msg.len = len;
+
+	if (id == VFE_ADSP_EVENT) {
+		/* event */
+		rp->type           = VFE_EVENT;
+		rp->evt_msg.type   = MSM_CAMERA_EVT;
+		getevent(evt_buf, sizeof(evt_buf));
+		rp->evt_msg.msg_id = evt_buf[0];
+		CDBG("%s:event:msg_id=%d\n", __func__, rp->evt_msg.msg_id);
+		resp->vfe_resp(rp, MSM_CAM_Q_VFE_EVT, vfe_syncdata,
+		GFP_ATOMIC);
+	} else {
+		/* messages */
+		rp->evt_msg.type   = MSM_CAMERA_MSG;
+		rp->evt_msg.msg_id = id;
+		rp->evt_msg.data = rp + 1;
+		getevent(rp->evt_msg.data, len);
+		CDBG("%s:messages:msg_id=%d\n", __func__, rp->evt_msg.msg_id);
+
+		switch (rp->evt_msg.msg_id) {
+		case MSG_SNAPSHOT:
+			msm_camio_set_perf_lvl(S_PREVIEW);
+			vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent);
+			vfe_7x_ops(driver_data, MSG_OUTPUT_T, len, getevent);
+			rp->type = VFE_MSG_SNAPSHOT;
+			break;
+		case MSG_OUTPUT_S:
+			rp->type = VFE_MSG_OUTPUT_S;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_S,
+					rp->evt_msg.data, &(rp->extdata),
+					&(rp->extlen));
+			break;
+		case MSG_OUTPUT_T:
+			rp->type = VFE_MSG_OUTPUT_T;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_T,
+					rp->evt_msg.data, &(rp->extdata),
+					&(rp->extlen));
+			break;
+		case MSG_OUTPUT1:
+		case MSG_OUTPUT2:
+			if (op_mode & SNAPSHOT_MASK_MODE) {
+				resp->vfe_free(data);
+				return;
+			}
+			rp->type = VFE_MSG_OUTPUT_P;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT_P,
+				rp->evt_msg.data, &(rp->extdata),
+				&(rp->extlen));
+			break;
+		case MSG_STATS_AF:
+			rp->type = VFE_MSG_STATS_AF;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_AF,
+					rp->evt_msg.data, NULL, NULL);
+			break;
+		case MSG_STATS_WE:
+			rp->type = VFE_MSG_STATS_WE;
+			vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_WE,
+					rp->evt_msg.data, NULL, NULL);
+
+			CDBG("MSG_STATS_WE: phy = 0x%x\n", rp->phy.sbuf_phy);
+			break;
+		case MSG_STOP_ACK:
+			rp->type = VFE_MSG_GENERAL;
+			stopevent.state = 1;
+			wake_up(&stopevent.wait);
+			break;
+		default:
+			rp->type = VFE_MSG_GENERAL;
+			break;
+		}
+		if (id != MSG_SOF)
+			resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG,
+					vfe_syncdata, GFP_ATOMIC);
+	}
+}
+
+static struct msm_adsp_ops vfe_7x_sync = {
+	.event = vfe_7x_ops,
+};
+
+static int vfe_7x_enable(struct camera_enable_cmd *enable)
+{
+	int rc = -EFAULT;
+	static int cnt;
+
+	if (!strcmp(enable->name, "QCAMTASK"))
+		rc = msm_adsp_enable(qcam_mod);
+	else if (!strcmp(enable->name, "VFETASK"))
+		rc = msm_adsp_enable(vfe_mod);
+
+	if (!cnt) {
+		msm_camio_set_perf_lvl(S_INIT);
+		cnt++;
+	}
+	return rc;
+}
+
+static int vfe_7x_disable(struct camera_enable_cmd *enable,
+		struct platform_device *dev __attribute__((unused)))
+{
+	int rc = -EFAULT;
+
+	if (!strcmp(enable->name, "QCAMTASK"))
+		rc = msm_adsp_disable(qcam_mod);
+	else if (!strcmp(enable->name, "VFETASK"))
+		rc = msm_adsp_disable(vfe_mod);
+
+	return rc;
+}
+
+static int vfe_7x_stop(void)
+{
+	int rc = 0;
+	uint32_t stopcmd = VFE_STOP_CMD;
+	rc = msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
+				&stopcmd, sizeof(uint32_t));
+	if (rc < 0) {
+		CDBG("%s:%d: failed rc = %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+
+	stopevent.state = 0;
+	rc = wait_event_timeout(stopevent.wait,
+		stopevent.state != 0,
+		msecs_to_jiffies(stopevent.timeout));
+
+	return rc;
+}
+
+static void vfe_7x_release(struct platform_device *pdev)
+{
+	mutex_lock(&vfe_lock);
+	vfe_syncdata = NULL;
+	mutex_unlock(&vfe_lock);
+
+	if (!vfestopped) {
+		CDBG("%s:%d:Calling vfe_7x_stop()\n", __func__, __LINE__);
+		vfe_7x_stop();
+	} else
+		vfestopped = 0;
+
+	msm_adsp_disable(qcam_mod);
+	msm_adsp_disable(vfe_mod);
+
+	msm_adsp_put(qcam_mod);
+	msm_adsp_put(vfe_mod);
+
+	msm_camio_disable(pdev);
+
+	kfree(extdata);
+	extlen = 0;
+
+	/* set back the AXI frequency to default */
+	/* TODO msm_camio_set_perf_lvl(S_DEFAULT); */
+}
+
+static int vfe_7x_init(struct msm_vfe_callback *presp,
+	struct platform_device *dev)
+{
+	int rc = 0;
+
+	init_waitqueue_head(&stopevent.wait);
+	stopevent.timeout = 200;
+	stopevent.state = 0;
+
+	if (presp && presp->vfe_resp)
+		resp = presp;
+	else
+		return -EFAULT;
+
+	/* Bring up all the required GPIOs and Clocks */
+	rc = msm_camio_enable(dev);
+	if (rc < 0)
+		return rc;
+
+	extlen = sizeof(struct vfe_frame_extra);
+
+	extdata = kmalloc(extlen, GFP_ATOMIC);
+	if (!extdata) {
+		rc = -ENOMEM;
+		goto init_fail;
+	}
+
+	rc = msm_adsp_get("QCAMTASK", &qcam_mod, &vfe_7x_sync, NULL);
+	if (rc) {
+		rc = -EBUSY;
+		goto get_qcam_fail;
+	}
+
+	rc = msm_adsp_get("VFETASK", &vfe_mod, &vfe_7x_sync, NULL);
+	if (rc) {
+		rc = -EBUSY;
+		goto get_vfe_fail;
+	}
+
+	return 0;
+
+get_vfe_fail:
+	msm_adsp_put(qcam_mod);
+get_qcam_fail:
+	kfree(extdata);
+init_fail:
+	extlen = 0;
+	return rc;
+}
+
+static int vfe_7x_config_axi(int mode,
+	struct axidata *ad, struct axiout *ao)
+{
+	struct msm_pmem_region *regptr;
+	unsigned long *bptr;
+	int    cnt;
+
+	int rc = 0;
+
+	if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) {
+		regptr = ad->region;
+
+		CDBG("bufnum1 = %d\n", ad->bufnum1);
+		if (mode == OUTPUT_1_AND_2) {
+			paddr_t_y = regptr->paddr + regptr->info.y_off;
+			paddr_t_cbcr = regptr->paddr +  regptr->info.cbcr_off;
+		}
+
+		CDBG("config_axi1: O1, phy = 0x%lx, y_off = %d, cbcr_off =%d\n",
+			regptr->paddr, regptr->info.y_off,
+			regptr->info.cbcr_off);
+
+		bptr = &ao->output1buffer1_y_phy;
+		for (cnt = 0; cnt < ad->bufnum1; cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+
+			bptr++;
+			regptr++;
+		}
+
+		regptr--;
+		for (cnt = 0; cnt < (8 - ad->bufnum1); cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+			bptr++;
+		}
+	}
+
+	if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) {
+		regptr = &(ad->region[ad->bufnum1]);
+
+		CDBG("bufnum2 = %d\n", ad->bufnum2);
+		paddr_s_y = regptr->paddr +  regptr->info.y_off;
+		paddr_s_cbcr = regptr->paddr +  regptr->info.cbcr_off;
+
+		CDBG("config_axi2: O2, phy = 0x%lx, y_off = %d, cbcr_off =%d\n",
+		     regptr->paddr, regptr->info.y_off, regptr->info.cbcr_off);
+
+		bptr = &ao->output2buffer1_y_phy;
+		for (cnt = 0; cnt < ad->bufnum2; cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+
+			bptr++;
+			regptr++;
+		}
+
+		regptr--;
+		for (cnt = 0; cnt < (8 - ad->bufnum2); cnt++) {
+			*bptr = regptr->paddr + regptr->info.y_off;
+			bptr++;
+			*bptr = regptr->paddr + regptr->info.cbcr_off;
+			bptr++;
+		}
+	}
+
+	return rc;
+}
+
+static int vfe_7x_config(struct msm_vfe_cfg_cmd *cmd, void *data)
+{
+	struct msm_pmem_region *regptr;
+	unsigned char buf[256];
+
+	struct vfe_stats_ack sack;
+	struct axidata *axid;
+	uint32_t i;
+	uint32_t *_mode;
+
+	struct vfe_stats_we_cfg *scfg = NULL;
+	struct vfe_stats_af_cfg *sfcfg = NULL;
+
+	struct axiout *axio = NULL;
+	void   *cmd_data = NULL;
+	void   *cmd_data_alloc = NULL;
+	long rc = 0;
+	struct msm_vfe_command_7k *vfecmd;
+
+	vfecmd = kmalloc(sizeof(struct msm_vfe_command_7k), GFP_ATOMIC);
+	if (!vfecmd) {
+		pr_err("vfecmd alloc failed!\n");
+		return -ENOMEM;
+	}
+
+	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+	    cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
+	    cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+		if (copy_from_user(vfecmd,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_vfe_command_7k))) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_STATS_AEC_AWB_ENABLE:
+	case CMD_STATS_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		scfg =
+			kmalloc(sizeof(struct vfe_stats_we_cfg),
+				GFP_ATOMIC);
+		if (!scfg) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(scfg,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		CDBG("STATS_ENABLE: bufnum = %d, enabling = %d\n",
+			axid->bufnum1, scfg->wb_expstatsenable);
+
+		if (axid->bufnum1 > 0) {
+			regptr = axid->region;
+
+			for (i = 0; i < axid->bufnum1; i++) {
+
+				CDBG("STATS_ENABLE, phy = 0x%lx\n",
+					regptr->paddr);
+
+				scfg->wb_expstatoutputbuffer[i] =
+					(void *)regptr->paddr;
+				regptr++;
+			}
+
+			cmd_data = scfg;
+
+		} else {
+			rc = -EINVAL;
+			goto config_done;
+		}
+	}
+		break;
+	case CMD_STATS_AF_ENABLE:
+	case CMD_STATS_AF_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sfcfg =
+			kmalloc(sizeof(struct vfe_stats_af_cfg),
+				GFP_ATOMIC);
+
+		if (!sfcfg) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(sfcfg,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		CDBG("AF_ENABLE: bufnum = %d, enabling = %d\n",
+			axid->bufnum1, sfcfg->af_enable);
+
+		if (axid->bufnum1 > 0) {
+			regptr = &axid->region[0];
+
+			for (i = 0; i < axid->bufnum1; i++) {
+
+				CDBG("STATS_ENABLE, phy = 0x%lx\n",
+					regptr->paddr);
+
+				sfcfg->af_outbuf[i] =
+					(void *)regptr->paddr;
+
+				regptr++;
+			}
+
+			cmd_data = sfcfg;
+
+		} else {
+			rc = -EINVAL;
+			goto config_done;
+		}
+	}
+		break;
+	case CMD_FRAME_BUF_RELEASE: {
+		struct msm_frame *b;
+		unsigned long p;
+		struct vfe_outputack fack;
+		if (!data)  {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+		fack.header = VFE_FRAME_ACK;
+
+		fack.output2newybufferaddress =
+			(void *)(p + b->y_off);
+
+		fack.output2newcbcrbufferaddress =
+			(void *)(p + b->cbcr_off);
+
+		vfecmd->queue = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_outputack);
+		cmd_data = &fack;
+	}
+		break;
+	case CMD_SNAP_BUF_RELEASE:
+		break;
+	case CMD_STATS_BUF_RELEASE: {
+		CDBG("vfe_7x_config: CMD_STATS_BUF_RELEASE\n");
+		if (!data) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sack.header = STATS_WE_ACK;
+		sack.bufaddr = (void *)*(uint32_t *)data;
+
+		vfecmd->queue  = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_stats_ack);
+		cmd_data = &sack;
+	}
+		break;
+	case CMD_STATS_AF_BUF_RELEASE: {
+		CDBG("vfe_7x_config: CMD_STATS_AF_BUF_RELEASE\n");
+		if (!data) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		sack.header = STATS_AF_ACK;
+		sack.bufaddr = (void *)*(uint32_t *)data;
+
+		vfecmd->queue  = QDSP_CMDQUEUE;
+		vfecmd->length = sizeof(struct vfe_stats_ack);
+		cmd_data = &sack;
+	}
+		break;
+	case CMD_GENERAL:
+	case CMD_STATS_DISABLE: {
+		if (vfecmd->length > 256) {
+			cmd_data_alloc =
+			cmd_data = kmalloc(vfecmd->length, GFP_ATOMIC);
+			if (!cmd_data) {
+				rc = -ENOMEM;
+				goto config_failure;
+			}
+		} else
+			cmd_data = buf;
+
+		if (copy_from_user(cmd_data,
+					(void __user *)(vfecmd->value),
+					vfecmd->length)) {
+
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		if (vfecmd->queue == QDSP_CMDQUEUE) {
+			switch (*(uint32_t *)cmd_data) {
+			case VFE_RESET_CMD:
+				msm_camio_vfe_blk_reset();
+				vfestopped = 0;
+				break;
+			case VFE_START_CMD:
+				_mode = (uint32_t *)cmd_data;
+				op_mode = *(++_mode);
+				if (op_mode & SNAPSHOT_MASK_MODE)
+					msm_camio_set_perf_lvl(S_CAPTURE);
+				else
+					msm_camio_set_perf_lvl(S_PREVIEW);
+				vfestopped = 0;
+				break;
+			case VFE_STOP_CMD:
+				vfestopped = 1;
+				goto config_send;
+
+			default:
+				break;
+			}
+		} /* QDSP_CMDQUEUE */
+	}
+		break;
+	case CMD_AXI_CFG_PREVIEW:
+	case CMD_RAW_PICT_AXI_CFG: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd->value),
+					sizeof(struct axiout))) {
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		vfe_7x_config_axi(OUTPUT_2, axid, axio);
+		cmd_data = axio;
+	}
+		break;
+	case CMD_AXI_CFG_SNAP: {
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto config_failure;
+		}
+
+		axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			goto config_failure;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd->value),
+					sizeof(struct axiout))) {
+			rc = -EFAULT;
+			goto config_done;
+		}
+
+		vfe_7x_config_axi(OUTPUT_1_AND_2, axid, axio);
+
+		cmd_data = axio;
+	}
+		break;
+	default:
+		break;
+	}
+
+	if (vfestopped)
+		goto config_done;
+
+config_send:
+	CDBG("send adsp command = %d\n", *(uint32_t *)cmd_data);
+	rc = msm_adsp_write(vfe_mod, vfecmd->queue,
+				cmd_data, vfecmd->length);
+
+config_done:
+	kfree(cmd_data_alloc);
+
+config_failure:
+	kfree(scfg);
+	kfree(axio);
+	kfree(vfecmd);
+	return rc;
+}
+
+void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data)
+{
+	mutex_init(&vfe_lock);
+	fptr->vfe_init    = vfe_7x_init;
+	fptr->vfe_enable  = vfe_7x_enable;
+	fptr->vfe_config  = vfe_7x_config;
+	fptr->vfe_disable = vfe_7x_disable;
+	fptr->vfe_release = vfe_7x_release;
+	vfe_syncdata = data;
+}
+
+void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data)
+{
+	fptr->vpe_reg		= NULL;
+	fptr->send_frame_to_vpe	= NULL;
+	fptr->vpe_config	= NULL;
+	fptr->vpe_cfg_update	= NULL;
+	fptr->dis		= NULL;
+}
diff --git a/drivers/media/video/msm/msm_vfe7x27a.h b/drivers/media/video/msm/msm_vfe7x27a.h
new file mode 100644
index 0000000..a488206
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe7x27a.h
@@ -0,0 +1,300 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE7X_H__
+#define __MSM_VFE7X_H__
+#include <media/msm_camera.h>
+#include <mach/camera.h>
+
+struct vfe_frame_extra {
+	uint32_t	bl_evencol:23;
+	uint32_t	rvd1:9;
+	uint32_t	bl_oddcol:23;
+	uint32_t	rvd2:9;
+
+	uint32_t	d_dbpc_stats_hot:16;
+	uint32_t	d_dbpc_stats_cold:16;
+
+	uint32_t	d_dbpc_stats_0_hot:10;
+	uint32_t	rvd3:6;
+	uint32_t	d_dbpc_stats_0_cold:10;
+	uint32_t	rvd4:6;
+	uint32_t	d_dbpc_stats_1_hot:10;
+	uint32_t	rvd5:6;
+	uint32_t	d_dbpc_stats_1_cold:10;
+	uint32_t	rvd6:6;
+
+	uint32_t	asf_max_edge;
+
+	uint32_t	e_y_wm_pm_stats_0:21;
+	uint32_t	rvd7:11;
+	uint32_t	e_y_wm_pm_stats_1_bl:8;
+	uint32_t	rvd8:8;
+	uint32_t	e_y_wm_pm_stats_1_nl:12;
+	uint32_t	rvd9:4;
+
+	uint32_t	e_cbcr_wm_pm_stats_0:21;
+	uint32_t	rvd10:11;
+	uint32_t	e_cbcr_wm_pm_stats_1_bl:8;
+	uint32_t	rvd11:8;
+	uint32_t	e_cbcr_wm_pm_stats_1_nl:12;
+	uint32_t	rvd12:4;
+
+	uint32_t	v_y_wm_pm_stats_0:21;
+	uint32_t	rvd13:11;
+	uint32_t	v_y_wm_pm_stats_1_bl:8;
+	uint32_t	rvd14:8;
+	uint32_t	v_y_wm_pm_stats_1_nl:12;
+	uint32_t	rvd15:4;
+
+	uint32_t	v_cbcr_wm_pm_stats_0:21;
+	uint32_t	rvd16:11;
+	uint32_t	v_cbcr_wm_pm_stats_1_bl:8;
+	uint32_t	rvd17:8;
+	uint32_t	v_cbcr_wm_pm_stats_1_nl:12;
+	uint32_t	rvd18:4;
+
+	uint32_t      frame_id;
+};
+
+struct vfe_endframe {
+	uint32_t      y_address;
+	uint32_t      cbcr_address;
+
+	struct vfe_frame_extra extra;
+} __packed;
+
+struct vfe_outputack {
+	uint32_t  header;
+	void      *output2newybufferaddress;
+	void      *output2newcbcrbufferaddress;
+} __packed;
+
+struct vfe_stats_ack {
+	uint32_t header;
+	/* MUST BE 64 bit ALIGNED */
+	void     *bufaddr;
+} __packed;
+
+/* AXI Output Config Command sent to DSP */
+struct axiout {
+	uint32_t            cmdheader:32;
+	int                 outputmode:3;
+	uint8_t             format:2;
+	uint32_t            /* reserved */ : 27;
+
+	/* AXI Output 1 Y Configuration, Part 1 */
+	uint32_t            out1yimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out1yimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 Y Configuration, Part 2 */
+	uint8_t             out1yburstlen:2;
+	uint32_t            out1ynumrows:12;
+	uint32_t            out1yrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 CbCr Configuration, Part 1 */
+	uint32_t            out1cbcrimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out1cbcrimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 1 CbCr Configuration, Part 2 */
+	uint8_t             out1cbcrburstlen:2;
+	uint32_t            out1cbcrnumrows:12;
+	uint32_t            out1cbcrrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 Y Configuration, Part 1 */
+	uint32_t            out2yimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out2yimagewidthin64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 Y Configuration, Part 2 */
+	uint8_t             out2yburstlen:2;
+	uint32_t            out2ynumrows:12;
+	uint32_t            out2yrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 CbCr Configuration, Part 1 */
+	uint32_t            out2cbcrimageheight:12;
+	uint32_t            /* reserved */ : 4;
+	uint32_t            out2cbcrimagewidtein64bitwords:10;
+	uint32_t            /* reserved */ : 6;
+
+	/* AXI Output 2 CbCr Configuration, Part 2 */
+	uint8_t             out2cbcrburstlen:2;
+	uint32_t            out2cbcrnumrows:12;
+	uint32_t            out2cbcrrowincin64bitincs:12;
+	uint32_t            /* reserved */ : 6;
+
+	/* Address configuration:
+	 * output1 phisycal address */
+	unsigned long   output1buffer1_y_phy;
+	unsigned long   output1buffer1_cbcr_phy;
+	unsigned long   output1buffer2_y_phy;
+	unsigned long   output1buffer2_cbcr_phy;
+	unsigned long   output1buffer3_y_phy;
+	unsigned long   output1buffer3_cbcr_phy;
+	unsigned long   output1buffer4_y_phy;
+	unsigned long   output1buffer4_cbcr_phy;
+	unsigned long   output1buffer5_y_phy;
+	unsigned long   output1buffer5_cbcr_phy;
+	unsigned long   output1buffer6_y_phy;
+	unsigned long   output1buffer6_cbcr_phy;
+	unsigned long   output1buffer7_y_phy;
+	unsigned long   output1buffer7_cbcr_phy;
+	unsigned long   output1buffer8_y_phy;
+	unsigned long   output1buffer8_cbcr_phy;
+
+	/* output2 phisycal address */
+	unsigned long   output2buffer1_y_phy;
+	unsigned long   output2buffer1_cbcr_phy;
+	unsigned long   output2buffer2_y_phy;
+	unsigned long   output2buffer2_cbcr_phy;
+	unsigned long   output2buffer3_y_phy;
+	unsigned long   output2buffer3_cbcr_phy;
+	unsigned long   output2buffer4_y_phy;
+	unsigned long   output2buffer4_cbcr_phy;
+	unsigned long   output2buffer5_y_phy;
+	unsigned long   output2buffer5_cbcr_phy;
+	unsigned long   output2buffer6_y_phy;
+	unsigned long   output2buffer6_cbcr_phy;
+	unsigned long   output2buffer7_y_phy;
+	unsigned long   output2buffer7_cbcr_phy;
+	unsigned long   output2buffer8_y_phy;
+	unsigned long   output2buffer8_cbcr_phy;
+} __packed;
+
+struct vfe_stats_we_cfg {
+	uint32_t       header;
+
+	/* White Balance/Exposure Statistic Selection */
+	uint8_t        wb_expstatsenable:1;
+	uint8_t        wb_expstatbuspriorityselection:1;
+	unsigned int   wb_expstatbuspriorityvalue:4;
+	unsigned int   /* reserved */ : 26;
+
+	/* White Balance/Exposure Statistic Configuration, Part 1 */
+	uint8_t        exposurestatregions:1;
+	uint8_t        exposurestatsubregions:1;
+	unsigned int   /* reserved */ : 14;
+
+	unsigned int   whitebalanceminimumy:8;
+	unsigned int   whitebalancemaximumy:8;
+
+	/* White Balance/Exposure Statistic Configuration, Part 2 */
+	uint8_t wb_expstatslopeofneutralregionline[
+		NUM_WB_EXP_NEUTRAL_REGION_LINES];
+
+	/* White Balance/Exposure Statistic Configuration, Part 3 */
+	unsigned int   wb_expstatcrinterceptofneutralregionline2:12;
+	unsigned int   /* reserved */ : 4;
+	unsigned int   wb_expstatcbinterceptofneutralreginnline1:12;
+	unsigned int    /* reserved */ : 4;
+
+	/* White Balance/Exposure Statistic Configuration, Part 4 */
+	unsigned int   wb_expstatcrinterceptofneutralregionline4:12;
+	unsigned int   /* reserved */ : 4;
+	unsigned int   wb_expstatcbinterceptofneutralregionline3:12;
+	unsigned int   /* reserved */ : 4;
+
+	/* White Balance/Exposure Statistic Output Buffer Header */
+	unsigned int   wb_expmetricheaderpattern:8;
+	unsigned int   /* reserved */ : 24;
+
+	/* White Balance/Exposure Statistic Output Buffers-MUST
+	* BE 64 bit ALIGNED */
+	void  *wb_expstatoutputbuffer[NUM_WB_EXP_STAT_OUTPUT_BUFFERS];
+} __packed;
+
+struct vfe_stats_af_cfg {
+	uint32_t header;
+
+	/* Autofocus Statistic Selection */
+	uint8_t       af_enable:1;
+	uint8_t       af_busprioritysel:1;
+	unsigned int  af_buspriorityval:4;
+	unsigned int  /* reserved */ : 26;
+
+	/* Autofocus Statistic Configuration, Part 1 */
+	unsigned int  af_singlewinvoffset:12;
+	unsigned int  /* reserved */ : 4;
+	unsigned int  af_singlewinhoffset:12;
+	unsigned int  /* reserved */ : 3;
+	uint8_t       af_winmode:1;
+
+	/* Autofocus Statistic Configuration, Part 2 */
+	unsigned int  af_singglewinvh:11;
+	unsigned int  /* reserved */ : 5;
+	unsigned int  af_singlewinhw:11;
+	unsigned int  /* reserved */ : 5;
+
+	/* Autofocus Statistic Configuration, Parts 3-6 */
+	uint8_t       af_multiwingrid[NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS];
+
+	/* Autofocus Statistic Configuration, Part 7 */
+	signed int    af_metrichpfcoefa00:5;
+	signed int    af_metrichpfcoefa04:5;
+	unsigned int  af_metricmaxval:11;
+	uint8_t       af_metricsel:1;
+	unsigned int  /* reserved */ : 10;
+
+	/* Autofocus Statistic Configuration, Part 8 */
+	signed int    af_metrichpfcoefa20:5;
+	signed int    af_metrichpfcoefa21:5;
+	signed int    af_metrichpfcoefa22:5;
+	signed int    af_metrichpfcoefa23:5;
+	signed int    af_metrichpfcoefa24:5;
+	unsigned int  /* reserved */ : 7;
+
+	/* Autofocus Statistic Output Buffer Header */
+	unsigned int  af_metrichp:8;
+	unsigned int  /* reserved */ : 24;
+
+	/* Autofocus Statistic Output Buffers - MUST BE 64 bit ALIGNED!!! */
+	void *af_outbuf[NUM_AF_STAT_OUTPUT_BUFFERS];
+} __packed; /* VFE_StatsAutofocusConfigCmdType */
+
+struct msm_camera_frame_msg {
+	unsigned long   output_y_address;
+	unsigned long   output_cbcr_address;
+
+	unsigned int    blacklevelevenColumn:23;
+	uint16_t        reserved1:9;
+	unsigned int    blackleveloddColumn:23;
+	uint16_t        reserved2:9;
+
+	uint16_t        greendefectpixelcount:8;
+	uint16_t        reserved3:8;
+	uint16_t        redbluedefectpixelcount:8;
+	uint16_t        reserved4:8;
+} __packed;
+
+/* New one for 7k */
+struct msm_vfe_command_7k {
+	uint16_t queue;
+	uint16_t length;
+	void     *value;
+};
+
+struct stop_event {
+	wait_queue_head_t wait;
+	int state;
+	int timeout;
+};
+
+
+#endif /* __MSM_VFE7X_H__ */
diff --git a/drivers/media/video/msm/msm_vfe8x.c b/drivers/media/video/msm/msm_vfe8x.c
new file mode 100644
index 0000000..0bf1785
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe8x.c
@@ -0,0 +1,842 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <mach/irqs.h>
+#include "msm_vfe8x_proc.h"
+#include <linux/pm_qos_params.h>
+
+#define ON  1
+#define OFF 0
+
+static const char *vfe_general_cmd[] = {
+	"START",  /* 0 */
+	"RESET",
+	"AXI_INPUT_CONFIG",
+	"CAMIF_CONFIG",
+	"AXI_OUTPUT_CONFIG",
+	"BLACK_LEVEL_CONFIG",  /* 5 */
+	"ROLL_OFF_CONFIG",
+	"DEMUX_CHANNEL_GAIN_CONFIG",
+	"DEMOSAIC_CONFIG",
+	"FOV_CROP_CONFIG",
+	"MAIN_SCALER_CONFIG",  /* 10 */
+	"WHITE_BALANCE_CONFIG",
+	"COLOR_CORRECTION_CONFIG",
+	"LA_CONFIG",
+	"RGB_GAMMA_CONFIG",
+	"CHROMA_ENHAN_CONFIG",  /* 15 */
+	"CHROMA_SUPPRESSION_CONFIG",
+	"ASF_CONFIG",
+	"SCALER2Y_CONFIG",
+	"SCALER2CbCr_CONFIG",
+	"CHROMA_SUBSAMPLE_CONFIG",  /* 20 */
+	"FRAME_SKIP_CONFIG",
+	"OUTPUT_CLAMP_CONFIG",
+	"TEST_GEN_START",
+	"UPDATE",
+	"OUTPUT1_ACK",  /* 25 */
+	"OUTPUT2_ACK",
+	"EPOCH1_ACK",
+	"EPOCH2_ACK",
+	"STATS_AUTOFOCUS_ACK",
+	"STATS_WB_EXP_ACK",  /* 30 */
+	"BLACK_LEVEL_UPDATE",
+	"DEMUX_CHANNEL_GAIN_UPDATE",
+	"DEMOSAIC_BPC_UPDATE",
+	"DEMOSAIC_ABF_UPDATE",
+	"FOV_CROP_UPDATE",  /* 35 */
+	"WHITE_BALANCE_UPDATE",
+	"COLOR_CORRECTION_UPDATE",
+	"LA_UPDATE",
+	"RGB_GAMMA_UPDATE",
+	"CHROMA_ENHAN_UPDATE",  /* 40 */
+	"CHROMA_SUPPRESSION_UPDATE",
+	"MAIN_SCALER_UPDATE",
+	"SCALER2CbCr_UPDATE",
+	"SCALER2Y_UPDATE",
+	"ASF_UPDATE",  /* 45 */
+	"FRAME_SKIP_UPDATE",
+	"CAMIF_FRAME_UPDATE",
+	"STATS_AUTOFOCUS_UPDATE",
+	"STATS_WB_EXP_UPDATE",
+	"STOP",  /* 50 */
+	"GET_HW_VERSION",
+	"STATS_SETTING",
+	"STATS_AUTOFOCUS_START",
+	"STATS_AUTOFOCUS_STOP",
+	"STATS_WB_EXP_START",  /* 55 */
+	"STATS_WB_EXP_STOP",
+	"ASYNC_TIMER_SETTING",
+};
+
+static void     *vfe_syncdata;
+
+static int vfe_enable(struct camera_enable_cmd *enable)
+{
+	return 0;
+}
+
+static int vfe_disable(struct camera_enable_cmd *enable,
+	struct platform_device *dev)
+{
+	vfe_stop();
+	msm_camio_disable(dev);
+	return 0;
+}
+
+static void vfe_release(struct platform_device *dev)
+{
+	msm_camio_disable(dev);
+	vfe_cmd_release(dev);
+	update_axi_qos(PM_QOS_DEFAULT_VALUE);
+	vfe_syncdata = NULL;
+}
+
+static void vfe_config_axi(int mode,
+			   struct axidata *ad,
+			   struct vfe_cmd_axi_output_config *ao)
+{
+	struct msm_pmem_region *regptr, *regptr1;
+	int i, j;
+	uint32_t *p1, *p2;
+
+	if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) {
+		regptr = ad->region;
+		for (i = 0; i < ad->bufnum1; i++) {
+
+			p1 = &(ao->output1.outputY.outFragments[i][0]);
+			p2 = &(ao->output1.outputCbcr.outFragments[i][0]);
+
+			for (j = 0; j < ao->output1.fragmentCount; j++) {
+
+				*p1 = regptr->paddr + regptr->info.y_off;
+				p1++;
+
+				*p2 = regptr->paddr + regptr->info.cbcr_off;
+				p2++;
+			}
+			regptr++;
+		}
+	} /* if OUTPUT1 or Both */
+
+	if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) {
+
+		regptr = &(ad->region[ad->bufnum1]);
+		CDBG("bufnum2 = %d\n", ad->bufnum2);
+
+		for (i = 0; i < ad->bufnum2; i++) {
+
+			p1 = &(ao->output2.outputY.outFragments[i][0]);
+			p2 = &(ao->output2.outputCbcr.outFragments[i][0]);
+
+			CDBG("config_axi: O2, phy = 0x%lx, y_off = %d, "\
+			     "cbcr_off = %d\n", regptr->paddr,
+			     regptr->info.y_off, regptr->info.cbcr_off);
+
+			for (j = 0; j < ao->output2.fragmentCount; j++) {
+
+				*p1 = regptr->paddr + regptr->info.y_off;
+				CDBG("vfe_config_axi: p1 = 0x%x\n", *p1);
+				p1++;
+
+				*p2 = regptr->paddr + regptr->info.cbcr_off;
+				CDBG("vfe_config_axi: p2 = 0x%x\n", *p2);
+				p2++;
+			}
+			regptr++;
+		}
+	}
+	/* For video configuration */
+	if (mode == OUTPUT_1_AND_3) {
+		/* this is preview buffer. */
+		regptr =  &(ad->region[0]);
+		/* this is video buffer. */
+		regptr1 = &(ad->region[ad->bufnum1]);
+		CDBG("bufnum1 = %d\n", ad->bufnum1);
+		CDBG("bufnum2 = %d\n", ad->bufnum2);
+
+	for (i = 0; i < ad->bufnum1; i++) {
+		p1 = &(ao->output1.outputY.outFragments[i][0]);
+		p2 = &(ao->output1.outputCbcr.outFragments[i][0]);
+
+		CDBG("config_axi: O1, phy = 0x%lx, y_off = %d, "\
+			 "cbcr_off = %d\n", regptr->paddr,
+			 regptr->info.y_off, regptr->info.cbcr_off);
+
+			for (j = 0; j < ao->output1.fragmentCount; j++) {
+
+				*p1 = regptr->paddr + regptr->info.y_off;
+				CDBG("vfe_config_axi: p1 = 0x%x\n", *p1);
+				p1++;
+
+				*p2 = regptr->paddr + regptr->info.cbcr_off;
+				CDBG("vfe_config_axi: p2 = 0x%x\n", *p2);
+				p2++;
+			}
+			regptr++;
+		}
+	for (i = 0; i < ad->bufnum2; i++) {
+		p1 = &(ao->output2.outputY.outFragments[i][0]);
+		p2 = &(ao->output2.outputCbcr.outFragments[i][0]);
+
+		CDBG("config_axi: O2, phy = 0x%lx, y_off = %d, "\
+			 "cbcr_off = %d\n", regptr1->paddr,
+			 regptr1->info.y_off, regptr1->info.cbcr_off);
+
+			for (j = 0; j < ao->output2.fragmentCount; j++) {
+
+				*p1 = regptr1->paddr + regptr1->info.y_off;
+				CDBG("vfe_config_axi: p1 = 0x%x\n", *p1);
+				p1++;
+
+				*p2 = regptr1->paddr + regptr1->info.cbcr_off;
+				CDBG("vfe_config_axi: p2 = 0x%x\n", *p2);
+				p2++;
+			}
+			regptr1++;
+		}
+	}
+
+}
+
+#define CHECKED_COPY_FROM_USER(in) {					\
+	if (cmd->length != sizeof(*(in))) {				\
+		pr_err("msm_camera: %s:%d cmd %d: user data size %d "	\
+			"!= kernel data size %d\n",			\
+			__func__, __LINE__,				\
+			cmd->id, cmd->length, sizeof(*(in)));		\
+		rc = -EIO;						\
+		break;							\
+	}								\
+	if (copy_from_user((in), (void __user *)cmd->value,		\
+			sizeof(*(in)))) {				\
+		rc = -EFAULT;						\
+		break;							\
+	}								\
+}
+
+static int vfe_proc_general(struct msm_vfe_command_8k *cmd)
+{
+	int rc = 0;
+
+	CDBG("%s: cmdID = %s\n", __func__, vfe_general_cmd[cmd->id]);
+
+	switch (cmd->id) {
+	case VFE_CMD_ID_RESET:
+		msm_camio_vfe_blk_reset();
+		msm_camio_camif_pad_reg_reset_2();
+		vfe_reset();
+		break;
+
+	case VFE_CMD_ID_START: {
+		struct vfe_cmd_start start;
+			CHECKED_COPY_FROM_USER(&start);
+
+		/* msm_camio_camif_pad_reg_reset_2(); */
+		msm_camio_camif_pad_reg_reset();
+		vfe_start(&start);
+	}
+		break;
+
+	case VFE_CMD_ID_CAMIF_CONFIG: {
+		struct vfe_cmd_camif_config camif;
+			CHECKED_COPY_FROM_USER(&camif);
+
+		vfe_camif_config(&camif);
+	}
+		break;
+
+	case VFE_CMD_ID_BLACK_LEVEL_CONFIG: {
+		struct vfe_cmd_black_level_config bl;
+			CHECKED_COPY_FROM_USER(&bl);
+
+		vfe_black_level_config(&bl);
+	}
+		break;
+
+	case VFE_CMD_ID_ROLL_OFF_CONFIG:{
+			/* rolloff is too big to be on the stack */
+			struct vfe_cmd_roll_off_config *rolloff =
+			    kmalloc(sizeof(struct vfe_cmd_roll_off_config),
+				    GFP_KERNEL);
+			if (!rolloff) {
+				pr_err("%s: out of memory\n", __func__);
+				rc = -ENOMEM;
+				break;
+			}
+			/* Wrap CHECKED_COPY_FROM_USER() in a do-while(0) loop
+			 * to make sure we free rolloff when copy_from_user()
+			 * fails.
+			 */
+			do {
+				CHECKED_COPY_FROM_USER(rolloff);
+				vfe_roll_off_config(rolloff);
+			} while (0);
+			kfree(rolloff);
+	}
+		break;
+
+	case VFE_CMD_ID_DEMUX_CHANNEL_GAIN_CONFIG: {
+		struct vfe_cmd_demux_channel_gain_config demuxc;
+			CHECKED_COPY_FROM_USER(&demuxc);
+
+		/* demux is always enabled.  */
+		vfe_demux_channel_gain_config(&demuxc);
+	}
+		break;
+
+	case VFE_CMD_ID_DEMOSAIC_CONFIG: {
+		struct vfe_cmd_demosaic_config demosaic;
+			CHECKED_COPY_FROM_USER(&demosaic);
+
+		vfe_demosaic_config(&demosaic);
+	}
+		break;
+
+	case VFE_CMD_ID_FOV_CROP_CONFIG:
+	case VFE_CMD_ID_FOV_CROP_UPDATE: {
+		struct vfe_cmd_fov_crop_config fov;
+			CHECKED_COPY_FROM_USER(&fov);
+
+		vfe_fov_crop_config(&fov);
+	}
+		break;
+
+	case VFE_CMD_ID_MAIN_SCALER_CONFIG:
+	case VFE_CMD_ID_MAIN_SCALER_UPDATE: {
+		struct vfe_cmd_main_scaler_config mainds;
+			CHECKED_COPY_FROM_USER(&mainds);
+
+		vfe_main_scaler_config(&mainds);
+	}
+		break;
+
+	case VFE_CMD_ID_WHITE_BALANCE_CONFIG:
+	case VFE_CMD_ID_WHITE_BALANCE_UPDATE: {
+		struct vfe_cmd_white_balance_config wb;
+			CHECKED_COPY_FROM_USER(&wb);
+
+		vfe_white_balance_config(&wb);
+	}
+		break;
+
+	case VFE_CMD_ID_COLOR_CORRECTION_CONFIG:
+	case VFE_CMD_ID_COLOR_CORRECTION_UPDATE: {
+		struct vfe_cmd_color_correction_config cc;
+			CHECKED_COPY_FROM_USER(&cc);
+
+		vfe_color_correction_config(&cc);
+	}
+		break;
+
+	case VFE_CMD_ID_LA_CONFIG: {
+		struct vfe_cmd_la_config la;
+			CHECKED_COPY_FROM_USER(&la);
+
+		vfe_la_config(&la);
+	}
+		break;
+
+	case VFE_CMD_ID_RGB_GAMMA_CONFIG: {
+		struct vfe_cmd_rgb_gamma_config rgb;
+			CHECKED_COPY_FROM_USER(&rgb);
+
+		rc = vfe_rgb_gamma_config(&rgb);
+	}
+		break;
+
+	case VFE_CMD_ID_CHROMA_ENHAN_CONFIG:
+	case VFE_CMD_ID_CHROMA_ENHAN_UPDATE: {
+		struct vfe_cmd_chroma_enhan_config chrom;
+			CHECKED_COPY_FROM_USER(&chrom);
+
+		vfe_chroma_enhan_config(&chrom);
+	}
+		break;
+
+	case VFE_CMD_ID_CHROMA_SUPPRESSION_CONFIG:
+	case VFE_CMD_ID_CHROMA_SUPPRESSION_UPDATE: {
+		struct vfe_cmd_chroma_suppression_config chromsup;
+			CHECKED_COPY_FROM_USER(&chromsup);
+
+		vfe_chroma_sup_config(&chromsup);
+	}
+		break;
+
+	case VFE_CMD_ID_ASF_CONFIG: {
+		struct vfe_cmd_asf_config asf;
+			CHECKED_COPY_FROM_USER(&asf);
+
+		vfe_asf_config(&asf);
+	}
+		break;
+
+	case VFE_CMD_ID_SCALER2Y_CONFIG:
+	case VFE_CMD_ID_SCALER2Y_UPDATE: {
+		struct vfe_cmd_scaler2_config ds2y;
+			CHECKED_COPY_FROM_USER(&ds2y);
+
+		vfe_scaler2y_config(&ds2y);
+	}
+		break;
+
+	case VFE_CMD_ID_SCALER2CbCr_CONFIG:
+	case VFE_CMD_ID_SCALER2CbCr_UPDATE: {
+		struct vfe_cmd_scaler2_config ds2cbcr;
+			CHECKED_COPY_FROM_USER(&ds2cbcr);
+
+		vfe_scaler2cbcr_config(&ds2cbcr);
+	}
+		break;
+
+	case VFE_CMD_ID_CHROMA_SUBSAMPLE_CONFIG: {
+		struct vfe_cmd_chroma_subsample_config sub;
+			CHECKED_COPY_FROM_USER(&sub);
+
+		vfe_chroma_subsample_config(&sub);
+	}
+		break;
+
+	case VFE_CMD_ID_FRAME_SKIP_CONFIG: {
+		struct vfe_cmd_frame_skip_config fskip;
+			CHECKED_COPY_FROM_USER(&fskip);
+
+		vfe_frame_skip_config(&fskip);
+	}
+		break;
+
+	case VFE_CMD_ID_OUTPUT_CLAMP_CONFIG: {
+		struct vfe_cmd_output_clamp_config clamp;
+			CHECKED_COPY_FROM_USER(&clamp);
+
+		vfe_output_clamp_config(&clamp);
+	}
+		break;
+
+	/* module update commands */
+	case VFE_CMD_ID_BLACK_LEVEL_UPDATE: {
+		struct vfe_cmd_black_level_config blk;
+			CHECKED_COPY_FROM_USER(&blk);
+
+		vfe_black_level_update(&blk);
+	}
+		break;
+
+	case VFE_CMD_ID_DEMUX_CHANNEL_GAIN_UPDATE: {
+		struct vfe_cmd_demux_channel_gain_config dmu;
+			CHECKED_COPY_FROM_USER(&dmu);
+
+		vfe_demux_channel_gain_update(&dmu);
+	}
+		break;
+
+	case VFE_CMD_ID_DEMOSAIC_BPC_UPDATE: {
+		struct vfe_cmd_demosaic_bpc_update demo_bpc;
+			CHECKED_COPY_FROM_USER(&demo_bpc);
+
+		vfe_demosaic_bpc_update(&demo_bpc);
+	}
+		break;
+
+	case VFE_CMD_ID_DEMOSAIC_ABF_UPDATE: {
+		struct vfe_cmd_demosaic_abf_update demo_abf;
+			CHECKED_COPY_FROM_USER(&demo_abf);
+
+		vfe_demosaic_abf_update(&demo_abf);
+	}
+		break;
+
+	case VFE_CMD_ID_LA_UPDATE: {
+		struct vfe_cmd_la_config la;
+			CHECKED_COPY_FROM_USER(&la);
+
+		vfe_la_update(&la);
+	}
+		break;
+
+	case VFE_CMD_ID_RGB_GAMMA_UPDATE: {
+		struct vfe_cmd_rgb_gamma_config rgb;
+			CHECKED_COPY_FROM_USER(&rgb);
+
+		rc = vfe_rgb_gamma_update(&rgb);
+	}
+		break;
+
+	case VFE_CMD_ID_ASF_UPDATE: {
+		struct vfe_cmd_asf_update asf;
+			CHECKED_COPY_FROM_USER(&asf);
+
+		vfe_asf_update(&asf);
+	}
+		break;
+
+	case VFE_CMD_ID_FRAME_SKIP_UPDATE: {
+		struct vfe_cmd_frame_skip_update fskip;
+			CHECKED_COPY_FROM_USER(&fskip);
+			/* Start recording */
+			if (fskip.output2Pattern == 0xffffffff)
+				update_axi_qos(MSM_AXI_QOS_RECORDING);
+			 else if (fskip.output2Pattern == 0)
+				update_axi_qos(MSM_AXI_QOS_PREVIEW);
+
+		vfe_frame_skip_update(&fskip);
+	}
+		break;
+
+	case VFE_CMD_ID_CAMIF_FRAME_UPDATE: {
+		struct vfe_cmds_camif_frame fup;
+			CHECKED_COPY_FROM_USER(&fup);
+
+		vfe_camif_frame_update(&fup);
+	}
+		break;
+
+	/* stats update commands */
+	case VFE_CMD_ID_STATS_AUTOFOCUS_UPDATE: {
+		struct vfe_cmd_stats_af_update afup;
+			CHECKED_COPY_FROM_USER(&afup);
+
+		vfe_stats_update_af(&afup);
+	}
+		break;
+
+	case VFE_CMD_ID_STATS_WB_EXP_UPDATE: {
+		struct vfe_cmd_stats_wb_exp_update wbexp;
+			CHECKED_COPY_FROM_USER(&wbexp);
+
+		vfe_stats_update_wb_exp(&wbexp);
+	}
+		break;
+
+	/* control of start, stop, update, etc... */
+	case VFE_CMD_ID_STOP:
+		vfe_stop();
+		break;
+
+	case VFE_CMD_ID_GET_HW_VERSION:
+		break;
+
+	/* stats */
+	case VFE_CMD_ID_STATS_SETTING: {
+		struct vfe_cmd_stats_setting stats;
+			CHECKED_COPY_FROM_USER(&stats);
+
+		vfe_stats_setting(&stats);
+	}
+		break;
+
+	case VFE_CMD_ID_STATS_AUTOFOCUS_START: {
+		struct vfe_cmd_stats_af_start af;
+			CHECKED_COPY_FROM_USER(&af);
+
+		vfe_stats_start_af(&af);
+	}
+		break;
+
+	case VFE_CMD_ID_STATS_AUTOFOCUS_STOP:
+		vfe_stats_af_stop();
+		break;
+
+	case VFE_CMD_ID_STATS_WB_EXP_START: {
+		struct vfe_cmd_stats_wb_exp_start awexp;
+			CHECKED_COPY_FROM_USER(&awexp);
+
+		vfe_stats_start_wb_exp(&awexp);
+	}
+		break;
+
+	case VFE_CMD_ID_STATS_WB_EXP_STOP:
+		vfe_stats_wb_exp_stop();
+		break;
+
+	case VFE_CMD_ID_ASYNC_TIMER_SETTING:
+		break;
+
+	case VFE_CMD_ID_UPDATE:
+		vfe_update();
+		break;
+
+	/* test gen */
+	case VFE_CMD_ID_TEST_GEN_START:
+		break;
+
+/*
+  acknowledge from upper layer
+	these are not in general command.
+
+	case VFE_CMD_ID_OUTPUT1_ACK:
+		break;
+	case VFE_CMD_ID_OUTPUT2_ACK:
+		break;
+	case VFE_CMD_ID_EPOCH1_ACK:
+		break;
+	case VFE_CMD_ID_EPOCH2_ACK:
+		break;
+	case VFE_CMD_ID_STATS_AUTOFOCUS_ACK:
+		break;
+	case VFE_CMD_ID_STATS_WB_EXP_ACK:
+		break;
+*/
+
+	default:
+		pr_err("%s: invalid cmd id %d\n", __func__, cmd->id);
+		rc = -EINVAL;
+		break;
+	} /* switch */
+
+	return rc;
+}
+
+static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data)
+{
+	struct msm_pmem_region *regptr;
+	struct msm_vfe_command_8k vfecmd;
+	struct vfe_cmd_axi_output_config axio;
+	struct axidata *axid = data;
+
+	int rc = 0;
+
+
+	if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+
+		if (copy_from_user(&vfecmd,
+			(void __user *)(cmd->value), sizeof(vfecmd))) {
+			pr_err("%s %d: copy_from_user failed\n",
+				__func__, __LINE__);
+			return -EFAULT;
+		}
+	}
+
+	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+	switch (cmd->cmd_type) {
+	case CMD_GENERAL:
+		rc = vfe_proc_general(&vfecmd);
+		break;
+
+	case CMD_STATS_ENABLE:
+	case CMD_STATS_AXI_CFG: {
+			int i;
+			struct vfe_cmd_stats_setting scfg;
+
+			BUG_ON(!axid);
+
+			if (vfecmd.length != sizeof(scfg)) {
+				pr_err
+				("msm_camera: %s: cmd %d: user-space "\
+				"data size %d != kernel data size %d\n",
+				__func__,
+				cmd->cmd_type, vfecmd.length,
+				sizeof(scfg));
+				return -EIO;
+			}
+
+			if (copy_from_user(&scfg,
+				(void __user *)(vfecmd.value),
+				sizeof(scfg))) {
+				pr_err("%s %d: copy_from_user failed\n",
+				__func__, __LINE__);
+			return -EFAULT;
+		}
+
+		regptr = axid->region;
+		if (axid->bufnum1 > 0) {
+			for (i = 0; i < axid->bufnum1; i++) {
+					scfg.awbBuffer[i] =
+					(uint32_t)(regptr->paddr);
+				regptr++;
+			}
+		}
+
+		if (axid->bufnum2 > 0) {
+			for (i = 0; i < axid->bufnum2; i++) {
+					scfg.afBuffer[i] =
+					(uint32_t)(regptr->paddr);
+				regptr++;
+			}
+		}
+
+			vfe_stats_setting(&scfg);
+	}
+		break;
+
+	case CMD_STATS_AF_AXI_CFG:
+		break;
+
+	case CMD_FRAME_BUF_RELEASE: {
+		/* preview buffer release */
+		struct msm_frame *b;
+		unsigned long p;
+		struct vfe_cmd_output_ack fack;
+
+			BUG_ON(!data);
+
+		b = (struct msm_frame *)(cmd->value);
+		p = *(unsigned long *)data;
+
+			fack.ybufaddr[0] = (uint32_t) (p + b->y_off);
+
+			fack.chromabufaddr[0] = (uint32_t) (p + b->cbcr_off);
+
+		if (b->path == OUTPUT_TYPE_P)
+			vfe_output_p_ack(&fack);
+
+		if ((b->path == OUTPUT_TYPE_V)
+			 || (b->path == OUTPUT_TYPE_S))
+			vfe_output_v_ack(&fack);
+	}
+		break;
+
+	case CMD_SNAP_BUF_RELEASE:
+		break;
+
+	case CMD_STATS_BUF_RELEASE: {
+		struct vfe_cmd_stats_wb_exp_ack sack;
+
+			BUG_ON(!data);
+
+		sack.nextWbExpOutputBufferAddr = *(uint32_t *)data;
+		vfe_stats_wb_exp_ack(&sack);
+	}
+		break;
+
+	case CMD_STATS_AF_BUF_RELEASE: {
+		struct vfe_cmd_stats_af_ack ack;
+
+			BUG_ON(!data);
+
+		ack.nextAFOutputBufferAddr = *(uint32_t *)data;
+		vfe_stats_af_ack(&ack);
+	}
+		break;
+
+	case CMD_AXI_CFG_PREVIEW:
+	case CMD_RAW_PICT_AXI_CFG: {
+
+			BUG_ON(!axid);
+
+			if (copy_from_user(&axio, (void __user *)(vfecmd.value),
+				sizeof(axio))) {
+				pr_err("%s %d: copy_from_user failed\n",
+					__func__, __LINE__);
+			return -EFAULT;
+		}
+			/* Validate the data from user space */
+			if (axio.output2.fragmentCount <
+				VFE_MIN_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output2.fragmentCount >
+				VFE_MAX_NUM_FRAGMENTS_PER_FRAME)
+				return -EINVAL;
+
+			vfe_config_axi(OUTPUT_2, axid, &axio);
+			axio.outputDataSize = 0;
+			vfe_axi_output_config(&axio);
+	}
+		break;
+
+	case CMD_AXI_CFG_SNAP: {
+
+			BUG_ON(!axid);
+
+			if (copy_from_user(&axio, (void __user *)(vfecmd.value),
+				sizeof(axio))) {
+				pr_err("%s %d: copy_from_user failed\n",
+					__func__, __LINE__);
+			return -EFAULT;
+		}
+			/* Validate the data from user space */
+			if (axio.output1.fragmentCount <
+				VFE_MIN_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output1.fragmentCount >
+				VFE_MAX_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output2.fragmentCount <
+				VFE_MIN_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output2.fragmentCount >
+				VFE_MAX_NUM_FRAGMENTS_PER_FRAME)
+				return -EINVAL;
+
+			vfe_config_axi(OUTPUT_1_AND_2, axid, &axio);
+			vfe_axi_output_config(&axio);
+	}
+		break;
+
+	case CMD_AXI_CFG_VIDEO: {
+			BUG_ON(!axid);
+
+			if (copy_from_user(&axio, (void __user *)(vfecmd.value),
+				sizeof(axio))) {
+				pr_err("%s %d: copy_from_user failed\n",
+					__func__, __LINE__);
+			return -EFAULT;
+		}
+			/* Validate the data from user space */
+			if (axio.output1.fragmentCount <
+				VFE_MIN_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output1.fragmentCount >
+				VFE_MAX_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output2.fragmentCount <
+				VFE_MIN_NUM_FRAGMENTS_PER_FRAME ||
+				axio.output2.fragmentCount >
+				VFE_MAX_NUM_FRAGMENTS_PER_FRAME)
+				return -EINVAL;
+
+			vfe_config_axi(OUTPUT_1_AND_3, axid, &axio);
+			axio.outputDataSize = 0;
+			vfe_axi_output_config(&axio);
+	}
+		break;
+
+	default:
+		break;
+	} /* switch */
+
+	return rc;
+}
+
+static int vfe_init(struct msm_vfe_callback *presp, struct platform_device *dev)
+{
+	int rc = 0;
+
+	rc = vfe_cmd_init(presp, dev, vfe_syncdata);
+	if (rc < 0)
+		return rc;
+
+	/* Bring up all the required GPIOs and Clocks */
+	rc = msm_camio_enable(dev);
+
+	return rc;
+}
+
+void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data)
+{
+	fptr->vfe_init    = vfe_init;
+	fptr->vfe_enable  = vfe_enable;
+	fptr->vfe_config  = vfe_config;
+	fptr->vfe_disable = vfe_disable;
+	fptr->vfe_release = vfe_release;
+	vfe_syncdata = data;
+}
+
+void msm_camvpe_fn_init(struct msm_camvpe_fn *fptr, void *data)
+{
+	fptr->vpe_reg		= NULL;
+	fptr->send_frame_to_vpe	= NULL;
+	fptr->vpe_config	= NULL;
+	fptr->vpe_cfg_update	= NULL;
+	fptr->dis		= NULL;
+}
diff --git a/drivers/media/video/msm/msm_vfe8x.h b/drivers/media/video/msm/msm_vfe8x.h
new file mode 100644
index 0000000..1b3148f
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe8x.h
@@ -0,0 +1,909 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VFE8X_H__
+#define __MSM_VFE8X_H__
+
+#define TRUE  1
+#define FALSE 0
+#define boolean uint8_t
+
+enum  VFE_STATE {
+	VFE_STATE_IDLE,
+	VFE_STATE_ACTIVE
+};
+
+enum vfe_cmd_id {
+	/*
+	*Important! Command_ID are arranged in order.
+	*Don't change!*/
+	VFE_CMD_ID_START,
+	VFE_CMD_ID_RESET,
+
+	/* bus and camif config */
+	VFE_CMD_ID_AXI_INPUT_CONFIG,
+	VFE_CMD_ID_CAMIF_CONFIG,
+	VFE_CMD_ID_AXI_OUTPUT_CONFIG,
+
+	/* module config  */
+	VFE_CMD_ID_BLACK_LEVEL_CONFIG,
+	VFE_CMD_ID_ROLL_OFF_CONFIG,
+	VFE_CMD_ID_DEMUX_CHANNEL_GAIN_CONFIG,
+	VFE_CMD_ID_DEMOSAIC_CONFIG,
+	VFE_CMD_ID_FOV_CROP_CONFIG,
+	VFE_CMD_ID_MAIN_SCALER_CONFIG,
+	VFE_CMD_ID_WHITE_BALANCE_CONFIG,
+	VFE_CMD_ID_COLOR_CORRECTION_CONFIG,
+	VFE_CMD_ID_LA_CONFIG,
+	VFE_CMD_ID_RGB_GAMMA_CONFIG,
+	VFE_CMD_ID_CHROMA_ENHAN_CONFIG,
+	VFE_CMD_ID_CHROMA_SUPPRESSION_CONFIG,
+	VFE_CMD_ID_ASF_CONFIG,
+	VFE_CMD_ID_SCALER2Y_CONFIG,
+	VFE_CMD_ID_SCALER2CbCr_CONFIG,
+	VFE_CMD_ID_CHROMA_SUBSAMPLE_CONFIG,
+	VFE_CMD_ID_FRAME_SKIP_CONFIG,
+	VFE_CMD_ID_OUTPUT_CLAMP_CONFIG,
+
+	/* test gen */
+	VFE_CMD_ID_TEST_GEN_START,
+
+	VFE_CMD_ID_UPDATE,
+
+	/* ackownledge from upper layer */
+	VFE_CMD_ID_OUTPUT1_ACK,
+	VFE_CMD_ID_OUTPUT2_ACK,
+	VFE_CMD_ID_EPOCH1_ACK,
+	VFE_CMD_ID_EPOCH2_ACK,
+	VFE_CMD_ID_STATS_AUTOFOCUS_ACK,
+	VFE_CMD_ID_STATS_WB_EXP_ACK,
+
+	/* module update commands */
+	VFE_CMD_ID_BLACK_LEVEL_UPDATE,
+	VFE_CMD_ID_DEMUX_CHANNEL_GAIN_UPDATE,
+	VFE_CMD_ID_DEMOSAIC_BPC_UPDATE,
+	VFE_CMD_ID_DEMOSAIC_ABF_UPDATE,
+	VFE_CMD_ID_FOV_CROP_UPDATE,
+	VFE_CMD_ID_WHITE_BALANCE_UPDATE,
+	VFE_CMD_ID_COLOR_CORRECTION_UPDATE,
+	VFE_CMD_ID_LA_UPDATE,
+	VFE_CMD_ID_RGB_GAMMA_UPDATE,
+	VFE_CMD_ID_CHROMA_ENHAN_UPDATE,
+	VFE_CMD_ID_CHROMA_SUPPRESSION_UPDATE,
+	VFE_CMD_ID_MAIN_SCALER_UPDATE,
+	VFE_CMD_ID_SCALER2CbCr_UPDATE,
+	VFE_CMD_ID_SCALER2Y_UPDATE,
+	VFE_CMD_ID_ASF_UPDATE,
+	VFE_CMD_ID_FRAME_SKIP_UPDATE,
+	VFE_CMD_ID_CAMIF_FRAME_UPDATE,
+
+	/* stats update commands */
+	VFE_CMD_ID_STATS_AUTOFOCUS_UPDATE,
+	VFE_CMD_ID_STATS_WB_EXP_UPDATE,
+
+	/* control of start, stop, update, etc... */
+  VFE_CMD_ID_STOP,
+	VFE_CMD_ID_GET_HW_VERSION,
+
+	/* stats */
+	VFE_CMD_ID_STATS_SETTING,
+	VFE_CMD_ID_STATS_AUTOFOCUS_START,
+	VFE_CMD_ID_STATS_AUTOFOCUS_STOP,
+	VFE_CMD_ID_STATS_WB_EXP_START,
+	VFE_CMD_ID_STATS_WB_EXP_STOP,
+
+	VFE_CMD_ID_ASYNC_TIMER_SETTING,
+
+	/* max id  */
+	VFE_CMD_ID_MAX
+};
+
+struct vfe_cmd_hw_version {
+	uint32_t minorVersion;
+	uint32_t majorVersion;
+	uint32_t coreVersion;
+};
+
+enum VFE_CAMIF_SYNC_EDGE {
+	VFE_CAMIF_SYNC_EDGE_ActiveHigh,
+	VFE_CAMIF_SYNC_EDGE_ActiveLow
+};
+
+enum VFE_CAMIF_SYNC_MODE {
+	VFE_CAMIF_SYNC_MODE_APS,
+	VFE_CAMIF_SYNC_MODE_EFS,
+	VFE_CAMIF_SYNC_MODE_ELS,
+	VFE_CAMIF_SYNC_MODE_ILLEGAL
+};
+
+struct vfe_cmds_camif_efs {
+	uint8_t efsendofline;
+	uint8_t efsstartofline;
+	uint8_t efsendofframe;
+	uint8_t efsstartofframe;
+};
+
+struct vfe_cmds_camif_frame {
+	uint16_t pixelsPerLine;
+	uint16_t linesPerFrame;
+};
+
+struct vfe_cmds_camif_window {
+	uint16_t firstpixel;
+	uint16_t lastpixel;
+	uint16_t firstline;
+	uint16_t lastline;
+};
+
+enum CAMIF_SUBSAMPLE_FRAME_SKIP {
+	CAMIF_SUBSAMPLE_FRAME_SKIP_0,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_AllFrames,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_2Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_3Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_4Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_5Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_6Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_7Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_8Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_9Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_10Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_11Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_12Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_13Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_14Frame,
+	CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_15Frame
+};
+
+struct vfe_cmds_camif_subsample {
+	uint16_t pixelskipmask;
+	uint16_t lineskipmask;
+	enum CAMIF_SUBSAMPLE_FRAME_SKIP frameskip;
+	uint8_t frameskipmode;
+	uint8_t pixelskipwrap;
+};
+
+struct vfe_cmds_camif_epoch {
+	uint8_t  enable;
+	uint16_t lineindex;
+};
+
+struct vfe_cmds_camif_cfg {
+	enum VFE_CAMIF_SYNC_EDGE  vSyncEdge;
+	enum VFE_CAMIF_SYNC_EDGE  hSyncEdge;
+	enum VFE_CAMIF_SYNC_MODE  syncMode;
+	uint8_t vfeSubSampleEnable;
+	uint8_t busSubSampleEnable;
+	uint8_t irqSubSampleEnable;
+	uint8_t binningEnable;
+	uint8_t misrEnable;
+};
+
+struct vfe_cmd_camif_config {
+	struct vfe_cmds_camif_cfg camifConfig;
+	struct vfe_cmds_camif_efs EFS;
+	struct vfe_cmds_camif_frame     frame;
+	struct vfe_cmds_camif_window    window;
+	struct vfe_cmds_camif_subsample subsample;
+	struct vfe_cmds_camif_epoch     epoch1;
+	struct vfe_cmds_camif_epoch     epoch2;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+	VFE_AXI_OUTPUT_MODE_Output1,
+	VFE_AXI_OUTPUT_MODE_Output2,
+	VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+	VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+	VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+	VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+	VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+	VFE_RAW_OUTPUT_DISABLED,
+	VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+	VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+	VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+enum VFE_RAW_PIXEL_DATA_SIZE {
+	VFE_RAW_PIXEL_DATA_SIZE_8BIT,
+	VFE_RAW_PIXEL_DATA_SIZE_10BIT,
+	VFE_RAW_PIXEL_DATA_SIZE_12BIT,
+};
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH     4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_MIN_NUM_FRAGMENTS_PER_FRAME 1
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT  3
+
+struct vfe_cmds_axi_out_per_component {
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint16_t outRowCount;
+	uint16_t outRowIncrement;
+	uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+		[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+	uint8_t fragmentCount;
+	struct vfe_cmds_axi_out_per_component outputY;
+	struct vfe_cmds_axi_out_per_component outputCbcr;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+	VFE_AXI_BURST_LENGTH_IS_2  = 2,
+	VFE_AXI_BURST_LENGTH_IS_4  = 4,
+	VFE_AXI_BURST_LENGTH_IS_8  = 8,
+	VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+struct vfe_cmd_axi_output_config {
+	enum VFE_AXI_BURST_LENGTH burstLength;
+	enum VFE_AXI_OUTPUT_MODE outputMode;
+	enum VFE_RAW_PIXEL_DATA_SIZE outputDataSize;
+	struct vfe_cmds_axi_per_output_path output1;
+	struct vfe_cmds_axi_per_output_path output2;
+};
+
+struct vfe_cmd_fov_crop_config {
+	uint8_t enable;
+	uint16_t firstPixel;
+	uint16_t lastPixel;
+	uint16_t firstLine;
+	uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+	uint16_t MNCounterInit;
+	uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+	uint8_t  enable;
+	uint16_t inputSize;
+	uint16_t outputSize;
+	uint32_t phaseMultiplicationFactor;
+	uint8_t  interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension    hconfig;
+	struct vfe_cmds_scaler_one_dimension    vconfig;
+	struct vfe_cmds_main_scaler_stripe_init MNInitH;
+	struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension hconfig;
+	struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+struct vfe_cmd_frame_skip_config {
+	uint8_t output1Period;
+	uint32_t output1Pattern;
+	uint8_t output2Period;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_frame_skip_update {
+	uint32_t output1Pattern;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+	uint8_t minCh0;
+	uint8_t minCh1;
+	uint8_t minCh2;
+	uint8_t maxCh0;
+	uint8_t maxCh1;
+	uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+	uint8_t enable;
+	uint8_t cropEnable;
+	uint8_t vsubSampleEnable;
+	uint8_t hsubSampleEnable;
+	uint8_t vCosited;
+	uint8_t hCosited;
+	uint8_t vCositedPhase;
+	uint8_t hCositedPhase;
+	uint16_t cropWidthFirstPixel;
+	uint16_t cropWidthLastPixel;
+	uint16_t cropHeightFirstLine;
+	uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_INPUT_SOURCE {
+	VFE_START_INPUT_SOURCE_CAMIF,
+	VFE_START_INPUT_SOURCE_TESTGEN,
+	VFE_START_INPUT_SOURCE_AXI,
+	VFE_START_INPUT_SOURCE_INVALID
+};
+
+enum VFE_START_OPERATION_MODE {
+	VFE_START_OPERATION_MODE_CONTINUOUS,
+	VFE_START_OPERATION_MODE_SNAPSHOT
+};
+
+enum VFE_START_PIXEL_PATTERN {
+	VFE_BAYER_RGRGRG,
+	VFE_BAYER_GRGRGR,
+	VFE_BAYER_BGBGBG,
+	VFE_BAYER_GBGBGB,
+	VFE_YUV_YCbYCr,
+	VFE_YUV_YCrYCb,
+	VFE_YUV_CbYCrY,
+	VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+	VFE_BAYER_RAW,
+	VFE_YUV_INTERLEAVED,
+	VFE_YUV_PSEUDO_PLANAR_Y,
+	VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+	VFE_YUV_COSITED,
+	VFE_YUV_INTERPOLATED
+};
+
+struct vfe_cmd_start {
+	enum VFE_START_INPUT_SOURCE inputSource;
+	enum VFE_START_OPERATION_MODE operationMode;
+	uint8_t     snapshotCount;
+	enum VFE_START_PIXEL_PATTERN pixel;
+	enum VFE_YUV_INPUT_COSITING_MODE yuvInputCositingMode;
+};
+
+struct vfe_cmd_output_ack {
+	uint32_t ybufaddr[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+	uint32_t chromabufaddr[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+#define VFE_STATS_BUFFER_COUNT 3
+
+struct vfe_cmd_stats_setting {
+	uint16_t frameHDimension;
+	uint16_t frameVDimension;
+	uint8_t  afBusPrioritySelection;
+	uint8_t  afBusPriority;
+	uint8_t  awbBusPrioritySelection;
+	uint8_t  awbBusPriority;
+	uint8_t  histBusPrioritySelection;
+	uint8_t  histBusPriority;
+	uint32_t afBuffer[VFE_STATS_BUFFER_COUNT];
+	uint32_t awbBuffer[VFE_STATS_BUFFER_COUNT];
+	uint32_t histBuffer[VFE_STATS_BUFFER_COUNT];
+};
+
+struct vfe_cmd_stats_af_start {
+	uint8_t  enable;
+	uint8_t  windowMode;
+	uint16_t windowHOffset;
+	uint16_t windowVOffset;
+	uint16_t windowWidth;
+	uint16_t windowHeight;
+	uint8_t  gridForMultiWindows[16];
+	uint8_t     metricSelection;
+	int16_t  metricMax;
+	int8_t   highPassCoef[7];
+	int8_t   bufferHeader;
+};
+
+struct vfe_cmd_stats_af_update {
+	uint8_t  windowMode;
+	uint16_t windowHOffset;
+	uint16_t windowVOffset;
+	uint16_t windowWidth;
+	uint16_t windowHeight;
+};
+
+struct vfe_cmd_stats_wb_exp_start {
+	uint8_t   enable;
+	uint8_t   wbExpRegions;
+	uint8_t   wbExpSubRegion;
+	uint8_t   awbYMin;
+	uint8_t   awbYMax;
+	int8_t    awbMCFG[4];
+	int16_t   awbCCFG[4];
+	int8_t    axwHeader;
+};
+
+struct vfe_cmd_stats_wb_exp_update {
+	uint8_t wbExpRegions;
+	uint8_t wbExpSubRegion;
+	int8_t  awbYMin;
+	int8_t  awbYMax;
+	int8_t  awbMCFG[4];
+	int16_t awbCCFG[4];
+};
+
+struct vfe_cmd_stats_af_ack {
+	uint32_t nextAFOutputBufferAddr;
+};
+
+struct vfe_cmd_stats_wb_exp_ack {
+	uint32_t  nextWbExpOutputBufferAddr;
+};
+
+struct vfe_cmd_black_level_config {
+	uint8_t  enable;
+	uint16_t evenEvenAdjustment;
+	uint16_t evenOddAdjustment;
+	uint16_t oddEvenAdjustment;
+	uint16_t oddOddAdjustment;
+};
+
+/* 13*1  */
+#define  VFE_ROLL_OFF_INIT_TABLE_SIZE  13
+/* 13*16 */
+#define  VFE_ROLL_OFF_DELTA_TABLE_SIZE 208
+
+struct vfe_cmd_roll_off_config {
+	uint8_t  enable;
+	uint16_t gridWidth;
+	uint16_t gridHeight;
+	uint16_t  yDelta;
+	uint8_t  gridXIndex;
+	uint8_t  gridYIndex;
+	uint16_t gridPixelXIndex;
+	uint16_t gridPixelYIndex;
+	uint16_t yDeltaAccum;
+	uint16_t initTableR[VFE_ROLL_OFF_INIT_TABLE_SIZE];
+	uint16_t initTableGr[VFE_ROLL_OFF_INIT_TABLE_SIZE];
+	uint16_t initTableB[VFE_ROLL_OFF_INIT_TABLE_SIZE];
+	uint16_t initTableGb[VFE_ROLL_OFF_INIT_TABLE_SIZE];
+	int16_t  deltaTableR[VFE_ROLL_OFF_DELTA_TABLE_SIZE];
+	int16_t  deltaTableGr[VFE_ROLL_OFF_DELTA_TABLE_SIZE];
+	int16_t  deltaTableB[VFE_ROLL_OFF_DELTA_TABLE_SIZE];
+	int16_t  deltaTableGb[VFE_ROLL_OFF_DELTA_TABLE_SIZE];
+};
+
+struct vfe_cmd_demux_channel_gain_config {
+	uint16_t ch0EvenGain;
+	uint16_t ch0OddGain;
+	uint16_t ch1Gain;
+	uint16_t ch2Gain;
+};
+
+struct vfe_cmds_demosaic_abf {
+	uint8_t   enable;
+	uint8_t   forceOn;
+	uint8_t   shift;
+	uint16_t  lpThreshold;
+	uint16_t  max;
+	uint16_t  min;
+	uint8_t   ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+	uint8_t   enable;
+	uint16_t  fmaxThreshold;
+	uint16_t  fminThreshold;
+	uint16_t  redDiffThreshold;
+	uint16_t  blueDiffThreshold;
+	uint16_t  greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+	uint8_t   enable;
+	uint8_t   slopeShift;
+	struct vfe_cmds_demosaic_abf abfConfig;
+	struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+	struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+	struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+	uint8_t  enable;
+	uint16_t ch2Gain;
+	uint16_t ch1Gain;
+	uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+	COEF_IS_Q7_SIGNED,
+	COEF_IS_Q8_SIGNED,
+	COEF_IS_Q9_SIGNED,
+	COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+	uint8_t     enable;
+	enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+	int16_t  C0;
+	int16_t  C1;
+	int16_t  C2;
+	int16_t  C3;
+	int16_t  C4;
+	int16_t  C5;
+	int16_t  C6;
+	int16_t  C7;
+	int16_t  C8;
+	int16_t  K0;
+	int16_t  K1;
+	int16_t  K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 256
+struct vfe_cmd_la_config {
+	uint8_t enable;
+	int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+	RGB_GAMMA_CH0_SELECTED,
+	RGB_GAMMA_CH1_SELECTED,
+	RGB_GAMMA_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_SELECTED,
+	RGB_GAMMA_CH0_CH2_SELECTED,
+	RGB_GAMMA_CH1_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+	uint8_t enable;
+	enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+	int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+	uint8_t  enable;
+	int16_t am;
+	int16_t ap;
+	int16_t bm;
+	int16_t bp;
+	int16_t cm;
+	int16_t cp;
+	int16_t dm;
+	int16_t dp;
+	int16_t kcr;
+	int16_t kcb;
+	int16_t RGBtoYConversionV0;
+	int16_t RGBtoYConversionV1;
+	int16_t RGBtoYConversionV2;
+	uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+	uint8_t enable;
+	uint8_t m1;
+	uint8_t m3;
+	uint8_t n1;
+	uint8_t n3;
+	uint8_t nn1;
+	uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t sharpThreshE2;
+	int8_t sharpThreshE3;
+	int8_t sharpThreshE4;
+	int8_t sharpThreshE5;
+	int8_t filter1Coefficients[9];
+	int8_t filter2Coefficients[9];
+	uint8_t  cropEnable;
+	uint16_t cropFirstPixel;
+	uint16_t cropLastPixel;
+	uint16_t cropFirstLine;
+	uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t  sharpThreshE2;
+	int8_t  sharpThreshE3;
+	int8_t  sharpThreshE4;
+	int8_t  sharpThreshE5;
+	int8_t  filter1Coefficients[9];
+	int8_t  filter2Coefficients[9];
+	uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+	VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+	VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+struct vfe_cmd_test_gen_start {
+	uint8_t pixelDataSelect;
+	uint8_t systematicDataSelect;
+	enum VFE_TEST_GEN_SYNC_EDGE  hsyncEdge;
+	enum VFE_TEST_GEN_SYNC_EDGE  vsyncEdge;
+	uint16_t numFrame;
+	enum VFE_RAW_PIXEL_DATA_SIZE pixelDataSize;
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint32_t startOfFrameOffset;
+	uint32_t endOfFrameNOffset;
+	uint16_t startOfLineOffset;
+	uint16_t endOfLineNOffset;
+	uint16_t hbi;
+	uint8_t  vblEnable;
+	uint16_t vbl;
+	uint8_t  startOfFrameDummyLine;
+	uint8_t  endOfFrameDummyLine;
+	uint8_t  unicolorBarEnable;
+	uint8_t  colorBarsSplitEnable;
+	uint8_t  unicolorBarSelect;
+	enum VFE_START_PIXEL_PATTERN  colorBarsPixelPattern;
+	uint8_t  colorBarsRotatePeriod;
+	uint16_t testGenRandomSeed;
+};
+
+struct vfe_cmd_bus_pm_start {
+	uint8_t output2YWrPmEnable;
+	uint8_t output2CbcrWrPmEnable;
+	uint8_t output1YWrPmEnable;
+	uint8_t output1CbcrWrPmEnable;
+};
+
+struct vfe_cmd_camif_frame_update {
+	struct vfe_cmds_camif_frame camifFrame;
+};
+
+struct vfe_cmd_sync_timer_setting {
+	uint8_t  whichSyncTimer;
+	uint8_t  operation;
+	uint8_t  polarity;
+	uint16_t repeatCount;
+	uint16_t hsyncCount;
+	uint32_t pclkCount;
+	uint32_t outputDuration;
+};
+
+struct vfe_cmd_async_timer_setting {
+	uint8_t  whichAsyncTimer;
+	uint8_t  operation;
+	uint8_t  polarity;
+	uint16_t repeatCount;
+	uint16_t inactiveCount;
+	uint32_t activeCount;
+};
+
+struct  vfe_frame_skip_counts {
+	uint32_t  totalFrameCount;
+	uint32_t  output1Count;
+	uint32_t  output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+	VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+struct vfe_cmd_axi_input_config {
+	uint32_t  fragAddr[4];
+	uint8_t   totalFragmentCount;
+	uint16_t  ySize;
+	uint16_t  xOffset;
+	uint16_t  xSize;
+	uint16_t  rowIncrement;
+	uint16_t  numOfRows;
+	enum VFE_AXI_BURST_LENGTH burstLength;
+	uint8_t   unpackPhase;
+	enum VFE_AXI_RD_UNPACK_HBI_SEL unpackHbi;
+	enum VFE_RAW_PIXEL_DATA_SIZE   pixelSize;
+	uint8_t   padRepeatCountLeft;
+	uint8_t   padRepeatCountRight;
+	uint8_t   padRepeatCountTop;
+	uint8_t   padRepeatCountBottom;
+	uint8_t   padLeftComponentSelectCycle0;
+	uint8_t   padLeftComponentSelectCycle1;
+	uint8_t   padLeftComponentSelectCycle2;
+	uint8_t   padLeftComponentSelectCycle3;
+	uint8_t   padLeftStopCycle0;
+	uint8_t   padLeftStopCycle1;
+	uint8_t   padLeftStopCycle2;
+	uint8_t   padLeftStopCycle3;
+	uint8_t   padRightComponentSelectCycle0;
+	uint8_t   padRightComponentSelectCycle1;
+	uint8_t   padRightComponentSelectCycle2;
+	uint8_t   padRightComponentSelectCycle3;
+	uint8_t   padRightStopCycle0;
+	uint8_t   padRightStopCycle1;
+	uint8_t   padRightStopCycle2;
+	uint8_t   padRightStopCycle3;
+	uint8_t   padTopLineCount;
+	uint8_t   padBottomLineCount;
+};
+
+struct vfe_interrupt_status {
+	uint8_t camifErrorIrq;
+	uint8_t camifSofIrq;
+	uint8_t camifEolIrq;
+	uint8_t camifEofIrq;
+	uint8_t camifEpoch1Irq;
+	uint8_t camifEpoch2Irq;
+	uint8_t camifOverflowIrq;
+	uint8_t ceIrq;
+	uint8_t regUpdateIrq;
+	uint8_t resetAckIrq;
+	uint8_t encYPingpongIrq;
+	uint8_t encCbcrPingpongIrq;
+	uint8_t viewYPingpongIrq;
+	uint8_t viewCbcrPingpongIrq;
+	uint8_t rdPingpongIrq;
+	uint8_t afPingpongIrq;
+	uint8_t awbPingpongIrq;
+	uint8_t histPingpongIrq;
+	uint8_t encIrq;
+	uint8_t viewIrq;
+	uint8_t busOverflowIrq;
+	uint8_t afOverflowIrq;
+	uint8_t awbOverflowIrq;
+	uint8_t syncTimer0Irq;
+	uint8_t syncTimer1Irq;
+	uint8_t syncTimer2Irq;
+	uint8_t asyncTimer0Irq;
+	uint8_t asyncTimer1Irq;
+	uint8_t asyncTimer2Irq;
+	uint8_t asyncTimer3Irq;
+	uint8_t axiErrorIrq;
+	uint8_t violationIrq;
+	uint8_t anyErrorIrqs;
+	uint8_t anyOutput1PathIrqs;
+	uint8_t anyOutput2PathIrqs;
+	uint8_t anyOutputPathIrqs;
+	uint8_t anyAsyncTimerIrqs;
+	uint8_t anySyncTimerIrqs;
+	uint8_t anyIrqForActiveStatesOnly;
+};
+
+enum VFE_MESSAGE_ID {
+	VFE_MSG_ID_RESET_ACK,
+	VFE_MSG_ID_START_ACK,
+	VFE_MSG_ID_STOP_ACK,
+	VFE_MSG_ID_UPDATE_ACK,
+	VFE_MSG_ID_OUTPUT_P,
+	VFE_MSG_ID_OUTPUT_V,
+	VFE_MSG_ID_OUTPUT_S,
+	VFE_MSG_ID_OUTPUT_T,
+	VFE_MSG_ID_SNAPSHOT_DONE,
+	VFE_MSG_ID_STATS_AUTOFOCUS,
+	VFE_MSG_ID_STATS_WB_EXP,
+	VFE_MSG_ID_EPOCH1,
+	VFE_MSG_ID_EPOCH2,
+	VFE_MSG_ID_SYNC_TIMER0_DONE,
+	VFE_MSG_ID_SYNC_TIMER1_DONE,
+	VFE_MSG_ID_SYNC_TIMER2_DONE,
+	VFE_MSG_ID_ASYNC_TIMER0_DONE,
+	VFE_MSG_ID_ASYNC_TIMER1_DONE,
+	VFE_MSG_ID_ASYNC_TIMER2_DONE,
+	VFE_MSG_ID_ASYNC_TIMER3_DONE,
+	VFE_MSG_ID_AF_OVERFLOW,
+	VFE_MSG_ID_AWB_OVERFLOW,
+	VFE_MSG_ID_AXI_ERROR,
+	VFE_MSG_ID_CAMIF_OVERFLOW,
+	VFE_MSG_ID_VIOLATION,
+	VFE_MSG_ID_CAMIF_ERROR,
+	VFE_MSG_ID_BUS_OVERFLOW,
+	VFE_MSG_ID_SOF_ACK,
+};
+
+struct vfe_msg_stats_autofocus {
+	uint32_t    afBuffer;
+	uint32_t    frameCounter;
+};
+
+struct vfe_msg_stats_wb_exp {
+	uint32_t awbBuffer;
+	uint32_t frameCounter;
+};
+
+struct vfe_frame_bpc_info {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+	uint8_t  camifState;
+	uint32_t pixelCount;
+	uint32_t lineCount;
+};
+
+struct vfe_bus_pm_per_path {
+	uint32_t yWrPmStats0;
+	uint32_t yWrPmStats1;
+	uint32_t cbcrWrPmStats0;
+	uint32_t cbcrWrPmStats1;
+};
+
+struct vfe_bus_performance_monitor {
+	struct vfe_bus_pm_per_path encPathPmInfo;
+	struct vfe_bus_pm_per_path viewPathPmInfo;
+};
+
+struct vfe_irq_thread_msg {
+	uint32_t  vfeIrqStatus;
+	uint32_t  camifStatus;
+	uint32_t  demosaicStatus;
+	uint32_t  asfMaxEdge;
+	struct vfe_bus_performance_monitor pmInfo;
+};
+
+struct vfe_msg_output {
+	uint32_t  yBuffer;
+	uint32_t  cbcrBuffer;
+	struct vfe_frame_bpc_info bpcInfo;
+	struct vfe_frame_asf_info asfInfo;
+	uint32_t  frameCounter;
+	struct vfe_bus_pm_per_path pmData;
+};
+
+struct vfe_message {
+	enum VFE_MESSAGE_ID _d;
+	union {
+		struct vfe_msg_output              msgOutput1;
+		struct vfe_msg_output              msgOutput2;
+		struct vfe_msg_stats_autofocus     msgStatsAf;
+		struct vfe_msg_stats_wb_exp        msgStatsWbExp;
+		struct vfe_msg_camif_status        msgCamifError;
+		struct vfe_bus_performance_monitor msgBusOverflow;
+   } _u;
+};
+
+/* New one for 8k */
+struct msm_vfe_command_8k {
+	int id;
+	uint16_t length;
+	void     *value;
+};
+
+struct vfe_frame_extra {
+	struct vfe_frame_bpc_info bpcInfo;
+	struct vfe_frame_asf_info asfInfo;
+	uint32_t  frameCounter;
+	struct vfe_bus_pm_per_path pmData;
+};
+#endif /* __MSM_VFE8X_H__ */
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.c b/drivers/media/video/msm/msm_vfe8x_proc.c
new file mode 100644
index 0000000..9764557
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe8x_proc.c
@@ -0,0 +1,3888 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include "msm_vfe8x_proc.h"
+#include <media/msm_camera.h>
+#include <mach/board.h>
+
+struct isr_queue_cmd {
+	struct list_head list;
+	struct vfe_interrupt_status vfeInterruptStatus;
+	struct vfe_frame_asf_info vfeAsfFrameInfo;
+	struct vfe_frame_bpc_info vfeBpcFrameInfo;
+	struct vfe_msg_camif_status vfeCamifStatusLocal;
+	struct vfe_bus_performance_monitor vfePmData;
+};
+
+struct msm_vfe8x_ctrl {
+	/* bit 1:0 ENC_IRQ_MASK = 0x11:
+	 * generate IRQ when both y and cbcr frame is ready. */
+
+	/* bit 1:0 VIEW_IRQ_MASK= 0x11:
+	 * generate IRQ when both y and cbcr frame is ready. */
+	struct vfe_irq_composite_mask_config vfeIrqCompositeMaskLocal;
+	struct vfe_module_enable vfeModuleEnableLocal;
+	struct vfe_camif_cfg_data   vfeCamifConfigLocal;
+	struct vfe_interrupt_mask   vfeImaskLocal;
+	struct vfe_stats_cmd_data   vfeStatsCmdLocal;
+	struct vfe_bus_cfg_data     vfeBusConfigLocal;
+	struct vfe_cmd_bus_pm_start vfeBusPmConfigLocal;
+	struct vfe_bus_cmd_data     vfeBusCmdLocal;
+	enum vfe_interrupt_name     vfeInterruptNameLocal;
+	uint32_t vfeLaBankSel;
+	struct vfe_gamma_lut_sel  vfeGammaLutSel;
+
+	boolean vfeStartAckPendingFlag;
+	boolean vfeStopAckPending;
+	boolean vfeResetAckPending;
+	boolean vfeUpdateAckPending;
+
+	enum VFE_AXI_OUTPUT_MODE        axiOutputMode;
+	enum VFE_START_OPERATION_MODE   vfeOperationMode;
+
+	atomic_t vfe_serv_interrupt;
+
+	uint32_t            vfeSnapShotCount;
+	uint32_t            vfeRequestedSnapShotCount;
+	boolean             vfeStatsPingPongReloadFlag;
+	uint32_t            vfeFrameId;
+
+	struct vfe_cmd_frame_skip_config vfeFrameSkip;
+	uint32_t vfeFrameSkipPattern;
+	uint8_t  vfeFrameSkipCount;
+	uint8_t  vfeFrameSkipPeriod;
+
+	boolean  vfeTestGenStartFlag;
+	uint32_t vfeImaskPacked;
+	uint32_t vfeImaskCompositePacked;
+	enum VFE_RAW_PIXEL_DATA_SIZE       axiInputDataSize;
+	struct vfe_irq_thread_msg          vfeIrqThreadMsgLocal;
+
+	struct vfe_output_path_combo  viewPath;
+	struct vfe_output_path_combo  encPath;
+	struct vfe_frame_skip_counts vfeDroppedFrameCounts;
+	struct vfe_stats_control afStatsControl;
+	struct vfe_stats_control awbStatsControl;
+
+	enum VFE_STATE  vstate;
+
+	struct msm_vfe_callback *resp;
+	struct vfe_frame_extra extdata;
+
+	struct isr_queue_cmd irqs[10];
+	spinlock_t irqs_lock;
+	int irq_get;
+	int irq_put;
+
+	int vfeirq;
+	void __iomem *vfebase;
+
+	void *syncdata;
+};
+
+static struct msm_vfe8x_ctrl *ctrl;
+static spinlock_t msm_vfe_ctrl_lock;
+
+static void vfe_prog_hw(uint8_t *hwreg, uint32_t *inptr, uint32_t regcnt)
+{
+	/* unsigned long flags; */
+	uint32_t i;
+	uint32_t *p;
+
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->io_lock, flags); */
+
+	p = (uint32_t *)(hwreg);
+	for (i = 0; i < (regcnt >> 2); i++)
+		writel(*inptr++, p++);
+		/* *p++ = *inptr++; */
+
+	/* spin_unlock_irqrestore(&ctrl->io_lock, flags); */
+}
+
+static void
+vfe_set_bus_pipo_addr(struct vfe_output_path_combo *vpath,
+	struct vfe_output_path_combo *epath)
+{
+	vpath->yPath.hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_VIEW_Y_WR_PING_ADDR);
+	vpath->yPath.hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_VIEW_Y_WR_PONG_ADDR);
+	vpath->cbcrPath.hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_VIEW_CBCR_WR_PING_ADDR);
+	vpath->cbcrPath.hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_VIEW_CBCR_WR_PONG_ADDR);
+
+	epath->yPath.hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PING_ADDR);
+	epath->yPath.hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PONG_ADDR);
+	epath->cbcrPath.hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_ENC_CBCR_WR_PING_ADDR);
+	epath->cbcrPath.hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_ENC_CBCR_WR_PONG_ADDR);
+}
+
+static void vfe_axi_output(struct vfe_cmd_axi_output_config *in,
+	struct vfe_output_path_combo *out1,
+	struct vfe_output_path_combo *out2, uint16_t out)
+{
+	struct vfe_axi_out_cfg cmd;
+
+	uint16_t temp;
+	uint32_t burstLength;
+
+	memset(&cmd, 0, sizeof(cmd));
+	/* force it to burst length 4, hardware does not support it. */
+	burstLength = 1;
+
+	/* AXI Output 2 Y Configuration*/
+	/* VFE_BUS_ENC_Y_WR_PING_ADDR  */
+	cmd.out2YPingAddr = out2->yPath.addressBuffer[0];
+
+	/* VFE_BUS_ENC_Y_WR_PONG_ADDR  */
+	cmd.out2YPongAddr = out2->yPath.addressBuffer[1];
+
+	/* VFE_BUS_ENC_Y_WR_IMAGE_SIZE */
+	cmd.out2YImageHeight = in->output2.outputY.imageHeight;
+	/* convert the image width and row increment to be in
+	 * unit of 64bit (8 bytes) */
+	temp = (in->output2.outputY.imageWidth + (out - 1)) / out;
+	cmd.out2YImageWidthin64bit = temp;
+
+	/* VFE_BUS_ENC_Y_WR_BUFFER_CFG */
+	cmd.out2YBurstLength = burstLength;
+	cmd.out2YNumRows = in->output2.outputY.outRowCount;
+	temp = (in->output2.outputY.outRowIncrement + (out - 1)) / out;
+	cmd.out2YRowIncrementIn64bit = temp;
+
+	/* AXI Output 2 Cbcr Configuration*/
+	/* VFE_BUS_ENC_Cbcr_WR_PING_ADDR  */
+	cmd.out2CbcrPingAddr = out2->cbcrPath.addressBuffer[0];
+
+	/* VFE_BUS_ENC_Cbcr_WR_PONG_ADDR  */
+	cmd.out2CbcrPongAddr = out2->cbcrPath.addressBuffer[1];
+
+	/* VFE_BUS_ENC_Cbcr_WR_IMAGE_SIZE */
+	cmd.out2CbcrImageHeight = in->output2.outputCbcr.imageHeight;
+	temp = (in->output2.outputCbcr.imageWidth + (out - 1)) / out;
+	cmd.out2CbcrImageWidthIn64bit = temp;
+
+	/* VFE_BUS_ENC_Cbcr_WR_BUFFER_CFG */
+	cmd.out2CbcrBurstLength = burstLength;
+	cmd.out2CbcrNumRows = in->output2.outputCbcr.outRowCount;
+	temp = (in->output2.outputCbcr.outRowIncrement + (out - 1)) / out;
+	cmd.out2CbcrRowIncrementIn64bit = temp;
+
+	/* AXI Output 1 Y Configuration */
+	/* VFE_BUS_VIEW_Y_WR_PING_ADDR  */
+	cmd.out1YPingAddr = out1->yPath.addressBuffer[0];
+
+	/* VFE_BUS_VIEW_Y_WR_PONG_ADDR */
+	cmd.out1YPongAddr = out1->yPath.addressBuffer[1];
+
+	/* VFE_BUS_VIEW_Y_WR_IMAGE_SIZE */
+	cmd.out1YImageHeight = in->output1.outputY.imageHeight;
+	temp = (in->output1.outputY.imageWidth + (out - 1)) / out;
+	cmd.out1YImageWidthin64bit = temp;
+
+	/* VFE_BUS_VIEW_Y_WR_BUFFER_CFG     */
+	cmd.out1YBurstLength = burstLength;
+	cmd.out1YNumRows = in->output1.outputY.outRowCount;
+
+	temp = (in->output1.outputY.outRowIncrement + (out - 1)) / out;
+	cmd.out1YRowIncrementIn64bit = temp;
+
+	/* AXI Output 1 Cbcr Configuration*/
+	cmd.out1CbcrPingAddr = out1->cbcrPath.addressBuffer[0];
+
+	/* VFE_BUS_VIEW_Cbcr_WR_PONG_ADDR  */
+	cmd.out1CbcrPongAddr = out1->cbcrPath.addressBuffer[1];
+
+	/* VFE_BUS_VIEW_Cbcr_WR_IMAGE_SIZE */
+	cmd.out1CbcrImageHeight = in->output1.outputCbcr.imageHeight;
+	temp = (in->output1.outputCbcr.imageWidth + (out - 1)) / out;
+	cmd.out1CbcrImageWidthIn64bit = temp;
+
+	cmd.out1CbcrBurstLength = burstLength;
+	cmd.out1CbcrNumRows = in->output1.outputCbcr.outRowCount;
+	temp = (in->output1.outputCbcr.outRowIncrement + (out - 1)) / out;
+
+	cmd.out1CbcrRowIncrementIn64bit = temp;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PING_ADDR,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+static void vfe_reg_bus_cfg(struct vfe_bus_cfg_data *in)
+{
+	struct vfe_axi_bus_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.stripeRdPathEn      = in->stripeRdPathEn;
+	cmd.encYWrPathEn        = in->encYWrPathEn;
+	cmd.encCbcrWrPathEn     = in->encCbcrWrPathEn;
+	cmd.viewYWrPathEn       = in->viewYWrPathEn;
+	cmd.viewCbcrWrPathEn    = in->viewCbcrWrPathEn;
+	cmd.rawPixelDataSize    = (uint32_t)in->rawPixelDataSize;
+	cmd.rawWritePathSelect  = (uint32_t)in->rawWritePathSelect;
+
+	/*  program vfe_bus_cfg */
+	writel(*((uint32_t *)&cmd), ctrl->vfebase + VFE_BUS_CFG);
+}
+
+static void vfe_reg_camif_config(struct vfe_camif_cfg_data *in)
+{
+	struct VFE_CAMIFConfigType cfg;
+
+	memset(&cfg, 0, sizeof(cfg));
+
+	cfg.VSyncEdge = in->camifCfgFromCmd.vSyncEdge;
+
+	cfg.HSyncEdge = in->camifCfgFromCmd.hSyncEdge;
+
+	cfg.syncMode = in->camifCfgFromCmd.syncMode;
+
+	cfg.vfeSubsampleEnable = in->camifCfgFromCmd.vfeSubSampleEnable;
+
+	cfg.busSubsampleEnable = in->camifCfgFromCmd.busSubSampleEnable;
+
+	cfg.camif2vfeEnable = in->camif2OutputEnable;
+
+	cfg.camif2busEnable = in->camif2BusEnable;
+
+	cfg.irqSubsampleEnable = in->camifCfgFromCmd.irqSubSampleEnable;
+
+	cfg.binningEnable = in->camifCfgFromCmd.binningEnable;
+
+	cfg.misrEnable = in->camifCfgFromCmd.misrEnable;
+
+	/*  program camif_config */
+	writel(*((uint32_t *)&cfg), ctrl->vfebase + CAMIF_CONFIG);
+}
+
+static void vfe_reg_bus_cmd(struct vfe_bus_cmd_data *in)
+{
+	struct vfe_buscmd cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.stripeReload        = in->stripeReload;
+	cmd.busPingpongReload   = in->busPingpongReload;
+	cmd.statsPingpongReload = in->statsPingpongReload;
+
+	writel(*((uint32_t *)&cmd), ctrl->vfebase + VFE_BUS_CMD);
+
+	CDBG("bus command = 0x%x\n", (*((uint32_t *)&cmd)));
+
+	/* this is needed, as the control bits are pulse based.
+	 * Don't want to reload bus pingpong again. */
+	in->busPingpongReload = 0;
+	in->statsPingpongReload = 0;
+	in->stripeReload = 0;
+}
+
+static void vfe_reg_module_cfg(struct vfe_module_enable *in)
+{
+	struct vfe_mod_enable ena;
+
+	memset(&ena, 0, sizeof(ena));
+
+	ena.blackLevelCorrectionEnable = in->blackLevelCorrectionEnable;
+	ena.lensRollOffEnable          = in->lensRollOffEnable;
+	ena.demuxEnable                = in->demuxEnable;
+	ena.chromaUpsampleEnable       = in->chromaUpsampleEnable;
+	ena.demosaicEnable             = in->demosaicEnable;
+	ena.statsEnable                = in->statsEnable;
+	ena.cropEnable                 = in->cropEnable;
+	ena.mainScalerEnable           = in->mainScalerEnable;
+	ena.whiteBalanceEnable         = in->whiteBalanceEnable;
+	ena.colorCorrectionEnable      = in->colorCorrectionEnable;
+	ena.yHistEnable                = in->yHistEnable;
+	ena.skinToneEnable             = in->skinToneEnable;
+	ena.lumaAdaptationEnable       = in->lumaAdaptationEnable;
+	ena.rgbLUTEnable               = in->rgbLUTEnable;
+	ena.chromaEnhanEnable          = in->chromaEnhanEnable;
+	ena.asfEnable                  = in->asfEnable;
+	ena.chromaSuppressionEnable    = in->chromaSuppressionEnable;
+	ena.chromaSubsampleEnable      = in->chromaSubsampleEnable;
+	ena.scaler2YEnable             = in->scaler2YEnable;
+	ena.scaler2CbcrEnable          = in->scaler2CbcrEnable;
+
+	writel(*((uint32_t *)&ena), ctrl->vfebase + VFE_MODULE_CFG);
+}
+
+static void vfe_program_dmi_cfg(enum VFE_DMI_RAM_SEL bankSel)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = (uint32_t) ctrl->vfebase + VFE_DMI_CFG_DEFAULT;
+
+	value += (uint32_t)bankSel;
+	/* CDBG("dmi cfg input bank is  0x%x\n", bankSel); */
+
+	writel(value, ctrl->vfebase + VFE_DMI_CFG);
+	writel(0, ctrl->vfebase + VFE_DMI_ADDR);
+}
+
+static void vfe_write_lens_roll_off_table(struct vfe_cmd_roll_off_config *in)
+{
+	uint16_t i;
+	uint32_t data;
+
+	uint16_t *initGr = in->initTableGr;
+	uint16_t *initGb = in->initTableGb;
+	uint16_t *initB =  in->initTableB;
+	uint16_t *initR =  in->initTableR;
+
+	int16_t *pDeltaGr = in->deltaTableGr;
+	int16_t *pDeltaGb = in->deltaTableGb;
+	int16_t *pDeltaB =  in->deltaTableB;
+	int16_t *pDeltaR =  in->deltaTableR;
+
+	vfe_program_dmi_cfg(ROLLOFF_RAM);
+
+	/* first pack and write init table */
+	for (i = 0; i < VFE_ROLL_OFF_INIT_TABLE_SIZE; i++) {
+		data = (((uint32_t)(*initR)) & 0x0000FFFF) |
+			(((uint32_t)(*initGr)) << 16);
+		initR++;
+		initGr++;
+
+		writel(data, ctrl->vfebase + VFE_DMI_DATA_LO);
+
+		data = (((uint32_t)(*initB)) & 0x0000FFFF) |
+			(((uint32_t)(*initGb))<<16);
+		initB++;
+		initGb++;
+
+		writel(data, ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+
+	/* there are gaps between the init table and delta table,
+	 * set the offset for delta table. */
+	writel(LENS_ROLL_OFF_DELTA_TABLE_OFFSET, ctrl->vfebase + VFE_DMI_ADDR);
+
+	/* pack and write delta table */
+	for (i = 0; i < VFE_ROLL_OFF_DELTA_TABLE_SIZE; i++) {
+		data = (((int)(*pDeltaR)) & 0x0000FFFF) |
+			(((int)(*pDeltaGr))<<16);
+		pDeltaR++;
+		pDeltaGr++;
+
+		writel(data, ctrl->vfebase + VFE_DMI_DATA_LO);
+
+		data = (((int)(*pDeltaB)) & 0x0000FFFF) |
+			(((int)(*pDeltaGb))<<16);
+		pDeltaB++;
+		pDeltaGb++;
+
+		writel(data, ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+
+	/* After DMI transfer, to make it safe, need to set the
+	 * DMI_CFG to unselect any SRAM
+	 */
+	/* unselect the SRAM Bank. */
+	writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG);
+}
+
+static void vfe_set_default_reg_values(void)
+{
+	writel(0x800080, ctrl->vfebase + VFE_DEMUX_GAIN_0);
+	writel(0x800080, ctrl->vfebase + VFE_DEMUX_GAIN_1);
+	writel(0xFFFFF, ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+	/* default frame drop period and pattern */
+	writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG);
+	writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_CFG);
+	writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN);
+	writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_PATTERN);
+	writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_CFG);
+	writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_CFG);
+	writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN);
+	writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_PATTERN);
+	writel(0, ctrl->vfebase + VFE_CLAMP_MIN_CFG);
+	writel(0xFFFFFF, ctrl->vfebase + VFE_CLAMP_MAX_CFG);
+}
+
+static void vfe_config_demux(uint32_t period, uint32_t even, uint32_t odd)
+{
+	writel(period, ctrl->vfebase + VFE_DEMUX_CFG);
+	writel(even, ctrl->vfebase + VFE_DEMUX_EVEN_CFG);
+	writel(odd, ctrl->vfebase + VFE_DEMUX_ODD_CFG);
+}
+
+static void vfe_pm_stop(void)
+{
+	writel(VFE_PERFORMANCE_MONITOR_STOP, ctrl->vfebase + VFE_BUS_PM_CMD);
+}
+
+static void vfe_camif_stop_immediately(void)
+{
+	writel(CAMIF_COMMAND_STOP_IMMEDIATELY, ctrl->vfebase + CAMIF_COMMAND);
+	writel(0, ctrl->vfebase + VFE_CGC_OVERRIDE);
+}
+
+static void vfe_program_reg_update_cmd(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
+static void vfe_program_global_reset_cmd(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_GLOBAL_RESET_CMD);
+}
+
+static void vfe_program_axi_cmd(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_AXI_CMD);
+}
+
+static void vfe_program_irq_composite_mask(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_IRQ_COMPOSITE_MASK);
+}
+
+static inline void vfe_program_irq_mask(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_IRQ_MASK);
+}
+
+static uint32_t vfe_read_axi_status(void)
+{
+	return readl(ctrl->vfebase + VFE_AXI_STATUS);
+}
+
+static void
+vfe_set_stats_pingpong_address(struct vfe_stats_control *afControl,
+	struct vfe_stats_control *awbControl)
+{
+	afControl->hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	afControl->hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+	awbControl->hwRegPingAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	awbControl->hwRegPongAddress = (uint8_t *)
+		(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+}
+
+static void vfe_program_lut_bank_sel(struct vfe_gamma_lut_sel *in)
+{
+	struct VFE_GammaLutSelect_ConfigCmdType cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.ch0BankSelect = in->ch0BankSelect;
+	cmd.ch1BankSelect = in->ch1BankSelect;
+	cmd.ch2BankSelect = in->ch2BankSelect;
+	CDBG("VFE gamma lut bank selection is 0x%x\n", *((uint32_t *)&cmd));
+	vfe_prog_hw(ctrl->vfebase + VFE_LUT_BANK_SEL,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+static void vfe_program_stats_cmd(struct vfe_stats_cmd_data *in)
+{
+	struct VFE_StatsCmdType stats;
+	memset(&stats, 0, sizeof(stats));
+
+	stats.autoFocusEnable        = in->autoFocusEnable;
+	stats.axwEnable              = in->axwEnable;
+	stats.histEnable             = in->histEnable;
+	stats.clearHistEnable        = in->clearHistEnable;
+	stats.histAutoClearEnable    = in->histAutoClearEnable;
+	stats.colorConversionEnable  = in->colorConversionEnable;
+
+	writel(*((uint32_t *)&stats), ctrl->vfebase + VFE_STATS_CMD);
+}
+
+static void vfe_pm_start(struct vfe_cmd_bus_pm_start *in)
+{
+	struct VFE_Bus_Pm_ConfigCmdType cmd;
+	memset(&cmd, 0, sizeof(struct VFE_Bus_Pm_ConfigCmdType));
+
+	cmd.output2YWrPmEnable     = in->output2YWrPmEnable;
+	cmd.output2CbcrWrPmEnable  = in->output2CbcrWrPmEnable;
+	cmd.output1YWrPmEnable     = in->output1YWrPmEnable;
+	cmd.output1CbcrWrPmEnable  = in->output1CbcrWrPmEnable;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_BUS_PM_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+static void vfe_8k_pm_start(struct vfe_cmd_bus_pm_start *in)
+{
+	in->output1CbcrWrPmEnable = ctrl->vfeBusConfigLocal.viewCbcrWrPathEn;
+	in->output1YWrPmEnable    = ctrl->vfeBusConfigLocal.viewYWrPathEn;
+	in->output2CbcrWrPmEnable = ctrl->vfeBusConfigLocal.encCbcrWrPathEn;
+	in->output2YWrPmEnable    = ctrl->vfeBusConfigLocal.encYWrPathEn;
+
+	if (in->output1CbcrWrPmEnable || in->output1YWrPmEnable)
+		ctrl->viewPath.pmEnabled = TRUE;
+
+	if (in->output2CbcrWrPmEnable || in->output2YWrPmEnable)
+		ctrl->encPath.pmEnabled = TRUE;
+
+	vfe_pm_start(in);
+
+	writel(VFE_PERFORMANCE_MONITOR_GO, ctrl->vfebase + VFE_BUS_PM_CMD);
+}
+
+static uint32_t vfe_irq_pack(struct vfe_interrupt_mask data)
+{
+	struct vfe_irqenable packedData;
+
+	memset(&packedData, 0, sizeof(packedData));
+
+	packedData.camifErrorIrq          = data.camifErrorIrq;
+	packedData.camifSofIrq            = data.camifSofIrq;
+	packedData.camifEolIrq            = data.camifEolIrq;
+	packedData.camifEofIrq            = data.camifEofIrq;
+	packedData.camifEpoch1Irq         = data.camifEpoch1Irq;
+	packedData.camifEpoch2Irq         = data.camifEpoch2Irq;
+	packedData.camifOverflowIrq       = data.camifOverflowIrq;
+	packedData.ceIrq                  = data.ceIrq;
+	packedData.regUpdateIrq           = data.regUpdateIrq;
+	packedData.resetAckIrq            = data.resetAckIrq;
+	packedData.encYPingpongIrq        = data.encYPingpongIrq;
+	packedData.encCbcrPingpongIrq     = data.encCbcrPingpongIrq;
+	packedData.viewYPingpongIrq       = data.viewYPingpongIrq;
+	packedData.viewCbcrPingpongIrq    = data.viewCbcrPingpongIrq;
+	packedData.rdPingpongIrq          = data.rdPingpongIrq;
+	packedData.afPingpongIrq          = data.afPingpongIrq;
+	packedData.awbPingpongIrq         = data.awbPingpongIrq;
+	packedData.histPingpongIrq        = data.histPingpongIrq;
+	packedData.encIrq                 = data.encIrq;
+	packedData.viewIrq                = data.viewIrq;
+	packedData.busOverflowIrq         = data.busOverflowIrq;
+	packedData.afOverflowIrq          = data.afOverflowIrq;
+	packedData.awbOverflowIrq         = data.awbOverflowIrq;
+	packedData.syncTimer0Irq          = data.syncTimer0Irq;
+	packedData.syncTimer1Irq          = data.syncTimer1Irq;
+	packedData.syncTimer2Irq          = data.syncTimer2Irq;
+	packedData.asyncTimer0Irq         = data.asyncTimer0Irq;
+	packedData.asyncTimer1Irq         = data.asyncTimer1Irq;
+	packedData.asyncTimer2Irq         = data.asyncTimer2Irq;
+	packedData.asyncTimer3Irq         = data.asyncTimer3Irq;
+	packedData.axiErrorIrq            = data.axiErrorIrq;
+	packedData.violationIrq           = data.violationIrq;
+
+	return *((uint32_t *)&packedData);
+}
+
+static uint32_t
+vfe_irq_composite_pack(struct vfe_irq_composite_mask_config data)
+{
+	struct VFE_Irq_Composite_MaskType packedData;
+
+	memset(&packedData, 0, sizeof(packedData));
+
+	packedData.encIrqComMaskBits   = data.encIrqComMask;
+	packedData.viewIrqComMaskBits  = data.viewIrqComMask;
+	packedData.ceDoneSelBits       = data.ceDoneSel;
+
+	return *((uint32_t *)&packedData);
+}
+
+static void vfe_addr_convert(struct msm_vfe_phy_info *pinfo,
+				enum vfe_resp_msg type, void *data, void **ext,
+				int *elen)
+{
+	switch (type) {
+	case VFE_MSG_OUTPUT_P:
+	case VFE_MSG_OUTPUT_V:{
+		pinfo->y_phy =
+			((struct vfe_message *)data)->_u.msgOutput2.yBuffer;
+		pinfo->cbcr_phy =
+			((struct vfe_message *)data)->_u.msgOutput2.
+			cbcrBuffer;
+		ctrl->extdata.bpcInfo =
+			((struct vfe_message *)data)->_u.msgOutput2.bpcInfo;
+		ctrl->extdata.asfInfo =
+			((struct vfe_message *)data)->_u.msgOutput2.asfInfo;
+		ctrl->extdata.frameCounter =
+			((struct vfe_message *)data)->_u.msgOutput2.
+			frameCounter;
+		ctrl->extdata.pmData =
+		((struct vfe_message *)data)->_u.msgOutput2.pmData;
+		*ext = &ctrl->extdata;
+		*elen = sizeof(ctrl->extdata);
+	}
+		break;
+
+	case VFE_MSG_STATS_AF:
+		pinfo->sbuf_phy =
+		((struct vfe_message *)data)->_u.msgStatsAf.afBuffer;
+		break;
+
+	case VFE_MSG_STATS_WE:
+		pinfo->sbuf_phy =
+		((struct vfe_message *)data)->_u.msgStatsWbExp.awbBuffer;
+		break;
+
+	default:
+		break;
+	} /* switch */
+}
+
+static boolean vfe_send_preview_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_video_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_mainimage_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_thumbnail_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_af_stats_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_awb_stats_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_camif_error_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_bus_overflow_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+static boolean vfe_send_sof_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg, void *data);
+
+static boolean invalid(struct msm_vfe_resp *rp,
+		struct vfe_message *_m, void *_d)
+{
+	BUG_ON(1); /* this function should not be called. */
+	return FALSE;
+}
+
+static struct {
+	boolean (*fn)(struct msm_vfe_resp *rp, struct vfe_message *msg,
+		void *data);
+	enum vfe_resp_msg rt; /* reponse type */
+} vfe_funcs[] = {
+	[VFE_MSG_ID_RESET_ACK] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_START_ACK] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_STOP_ACK] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_UPDATE_ACK] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_OUTPUT_P] = { vfe_send_preview_msg, VFE_MSG_OUTPUT_P },
+	[VFE_MSG_ID_OUTPUT_V] = { vfe_send_video_msg, VFE_MSG_OUTPUT_V },
+	[VFE_MSG_ID_OUTPUT_S] = { vfe_send_mainimage_msg, VFE_MSG_OUTPUT_S },
+	[VFE_MSG_ID_OUTPUT_T] = { vfe_send_thumbnail_msg, VFE_MSG_OUTPUT_T },
+	[VFE_MSG_ID_SNAPSHOT_DONE] = { NULL, VFE_MSG_SNAPSHOT },
+	[VFE_MSG_ID_STATS_AUTOFOCUS] = { vfe_send_af_stats_msg,
+		VFE_MSG_STATS_AF },
+	[VFE_MSG_ID_STATS_WB_EXP] = { vfe_send_awb_stats_msg,
+		VFE_MSG_STATS_WE },
+	[VFE_MSG_ID_EPOCH1] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_EPOCH2] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_SYNC_TIMER0_DONE] = { invalid },
+	[VFE_MSG_ID_SYNC_TIMER1_DONE] = { invalid },
+	[VFE_MSG_ID_SYNC_TIMER2_DONE] = { invalid },
+	[VFE_MSG_ID_ASYNC_TIMER0_DONE] = { invalid },
+	[VFE_MSG_ID_ASYNC_TIMER1_DONE] = { invalid },
+	[VFE_MSG_ID_ASYNC_TIMER2_DONE] = { invalid },
+	[VFE_MSG_ID_ASYNC_TIMER3_DONE] = { invalid },
+	[VFE_MSG_ID_AF_OVERFLOW] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_AWB_OVERFLOW] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_AXI_ERROR] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_CAMIF_OVERFLOW] = { NULL, VFE_MSG_GENERAL },
+	[VFE_MSG_ID_VIOLATION] = { invalid },
+	[VFE_MSG_ID_CAMIF_ERROR] = { vfe_send_camif_error_msg,
+		VFE_MSG_GENERAL },
+	[VFE_MSG_ID_BUS_OVERFLOW] = { vfe_send_bus_overflow_msg,
+		VFE_MSG_GENERAL },
+	[VFE_MSG_ID_SOF_ACK] = { vfe_send_sof_msg,
+		VFE_MSG_GENERAL },
+};
+
+static void vfe_proc_ops(enum VFE_MESSAGE_ID id, void *data)
+{
+	struct msm_vfe_resp *rp;
+	struct vfe_message *msg;
+
+	if (id >= ARRAY_SIZE(vfe_funcs) || vfe_funcs[id].fn == invalid) {
+		pr_err("%s: invalid VFE message id %d\n", __func__, id);
+		return;
+	}
+
+	/* In 8k, OUTPUT1 & OUTPUT2 messages arrive before SNAPSHOT_DONE.
+	 * We don't send such messages to the user.  Note that we can do
+	 * this in the vfe_func[] callback, but that would cause us to
+	 * allocate and then immediately free the msm_vfe_resp structure,
+	 * which is wasteful.
+	 */
+	if ((ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) &&
+			(id == VFE_MSG_ID_OUTPUT_T ||
+			 id == VFE_MSG_ID_OUTPUT_S))
+		return;
+
+	rp = ctrl->resp->vfe_alloc(sizeof(*rp) +
+					(vfe_funcs[id].fn ? sizeof(*msg) : 0),
+					ctrl->syncdata,
+					GFP_ATOMIC);
+	if (!rp) {
+		pr_err("%s: out of memory\n", __func__);
+		return;
+	}
+
+	rp->type = vfe_funcs[id].rt;
+	rp->evt_msg.type = MSM_CAMERA_MSG;
+	rp->evt_msg.msg_id = id;
+
+	if (!vfe_funcs[id].fn) {
+		rp->evt_msg.len = 0;
+		rp->evt_msg.data = 0;
+	} else {
+		/* populate the message accordingly */
+		if (vfe_funcs[id].fn)
+			rp->evt_msg.data = msg =
+				(struct vfe_message *)(rp + 1);
+		else
+			rp->evt_msg.data = msg = 0;
+		rp->evt_msg.len = sizeof(*msg);
+		msg->_d = id;
+		if (vfe_funcs[id].fn(rp, msg, data) == FALSE) {
+			pr_warning("%s: freeing memory: handler for %d "
+				"returned false\n", __func__, id);
+			ctrl->resp->vfe_free(rp);
+			return;
+		}
+}
+
+	ctrl->resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, ctrl->syncdata, GFP_KERNEL);
+}
+
+static boolean vfe_send_bus_overflow_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg,
+			void *data)
+{
+#if 0
+	memcpy(&(msg->_u.msgBusOverflow),
+		&ctrl->vfePmData, sizeof(ctrl->vfePmData));
+#endif
+	return TRUE;
+}
+
+static boolean vfe_send_sof_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg,
+			void *data)
+{
+	return TRUE;
+}
+static boolean vfe_send_camif_error_msg(struct msm_vfe_resp *rp,
+			struct vfe_message *msg,
+			void *data)
+{
+#if 0
+	memcpy(&(msg->_u.msgCamifError),
+	       &ctrl->vfeCamifStatusLocal, sizeof(ctrl->vfeCamifStatusLocal));
+#endif
+	return TRUE;
+}
+
+static void vfe_process_error_irq(struct vfe_interrupt_status *irqstatus)
+{
+	/* all possible error irq.  Note error irqs are not enabled, it is
+	 * checked only when other interrupts are present. */
+	if (irqstatus->afOverflowIrq)
+		vfe_proc_ops(VFE_MSG_ID_AF_OVERFLOW, NULL);
+
+	if (irqstatus->awbOverflowIrq)
+		vfe_proc_ops(VFE_MSG_ID_AWB_OVERFLOW, NULL);
+
+	if (irqstatus->axiErrorIrq)
+		vfe_proc_ops(VFE_MSG_ID_AXI_ERROR, NULL);
+
+	if (irqstatus->busOverflowIrq)
+		vfe_proc_ops(VFE_MSG_ID_BUS_OVERFLOW, NULL);
+
+	if (irqstatus->camifErrorIrq) {
+		CDBG("vfe_irq: camif errors\n");
+		vfe_proc_ops(VFE_MSG_ID_CAMIF_ERROR, NULL);
+	}
+
+	if (irqstatus->camifOverflowIrq)
+		vfe_proc_ops(VFE_MSG_ID_CAMIF_OVERFLOW, NULL);
+
+	if (irqstatus->violationIrq)
+		pr_err("%s: violation irq\n", __func__);
+}
+
+static void vfe_process_camif_sof_irq(void)
+{
+	/* increment the frame id number. */
+	ctrl->vfeFrameId++;
+
+	CDBG("camif_sof_irq, frameId = %d\n", ctrl->vfeFrameId);
+
+	/* In snapshot mode, if frame skip is programmed,
+	* need to check it accordingly to stop camif at
+	* correct frame boundary. For the dropped frames,
+	* there won't be any output path irqs, but there is
+	* still SOF irq, which can help us determine when
+	* to stop the camif.
+	*/
+	if (ctrl->vfeOperationMode) {
+		if ((1 << ctrl->vfeFrameSkipCount)&ctrl->vfeFrameSkipPattern) {
+
+			ctrl->vfeSnapShotCount--;
+			if (ctrl->vfeSnapShotCount == 0)
+				/* terminate vfe pipeline at frame boundary. */
+				writel(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+					ctrl->vfebase + CAMIF_COMMAND);
+		}
+
+		/* update frame skip counter for bit checking. */
+		ctrl->vfeFrameSkipCount++;
+		if (ctrl->vfeFrameSkipCount == (ctrl->vfeFrameSkipPeriod + 1))
+			ctrl->vfeFrameSkipCount = 0;
+	}
+	vfe_proc_ops(VFE_MSG_ID_SOF_ACK, NULL);
+}
+
+static boolean vfe_get_af_pingpong_status(void)
+{
+	uint32_t busPingPongStatus =
+		readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS);
+	return !!(busPingPongStatus & VFE_AF_PINGPONG_STATUS_BIT);
+}
+
+static uint32_t vfe_read_af_buf_addr(boolean pipo)
+{
+	if (pipo == FALSE)
+		return readl(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	else
+		return readl(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+}
+
+static void vfe_update_af_buf_addr(boolean pipo, uint32_t addr)
+{
+	if (pipo == FALSE)
+		writel(addr, ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	else
+		writel(addr, ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+}
+
+static boolean vfe_send_af_stats_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	uint32_t afBufAddress = (uint32_t)data;
+
+	/* fill message with right content. */
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+
+	msg->_u.msgStatsAf.afBuffer = afBufAddress;
+	msg->_u.msgStatsAf.frameCounter = ctrl->vfeFrameId;
+
+	ctrl->afStatsControl.ackPending = TRUE;
+
+	vfe_addr_convert(&(rp->phy), rp->type, msg, NULL, NULL);
+	/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
+	return TRUE;
+}
+
+static void vfe_process_stats_af_irq(void)
+{
+	boolean bufferAvailable;
+
+	if (!(ctrl->afStatsControl.ackPending)) {
+
+		/* read hardware status. */
+		ctrl->afStatsControl.pingPongStatus =
+			vfe_get_af_pingpong_status();
+
+		bufferAvailable = (ctrl->afStatsControl.pingPongStatus) ^ 1;
+
+		ctrl->afStatsControl.bufToRender =
+			vfe_read_af_buf_addr(bufferAvailable);
+
+		/* update the same buffer address (ping or pong) */
+		vfe_update_af_buf_addr(bufferAvailable,
+			ctrl->afStatsControl.nextFrameAddrBuf);
+
+		vfe_proc_ops(VFE_MSG_ID_STATS_AUTOFOCUS,
+			(void *)ctrl->afStatsControl.bufToRender);
+	} else
+		ctrl->afStatsControl.droppedStatsFrameCount++;
+}
+
+static boolean vfe_get_awb_pingpong_status(void)
+{
+	uint32_t busPingPongStatus =
+
+		readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS);
+
+	return !!(busPingPongStatus & VFE_AWB_PINGPONG_STATUS_BIT);
+
+}
+
+static uint32_t vfe_read_awb_buf_addr(boolean pingpong)
+{
+	if (pingpong == FALSE)
+		return readl(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	else
+		return readl(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+}
+
+static void vfe_update_awb_buf_addr(boolean pingpong, uint32_t addr)
+{
+	if (pingpong == FALSE)
+		writel(addr, ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	else
+		writel(addr, ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+}
+
+static boolean vfe_send_awb_stats_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	uint32_t awbBufAddress = (uint32_t)data;
+
+	/* fill message with right content. */
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+
+	msg->_u.msgStatsWbExp.awbBuffer = awbBufAddress;
+	msg->_u.msgStatsWbExp.frameCounter = ctrl->vfeFrameId;
+
+
+	ctrl->awbStatsControl.ackPending = TRUE;
+
+	vfe_addr_convert(&(rp->phy),
+			rp->type, msg,
+			NULL, NULL);
+
+	return TRUE;
+}
+
+static void vfe_process_stats_awb_irq(void)
+{
+	boolean bufferAvailable;
+
+	if (!(ctrl->awbStatsControl.ackPending)) {
+
+		ctrl->awbStatsControl.pingPongStatus =
+			vfe_get_awb_pingpong_status();
+
+		bufferAvailable = (ctrl->awbStatsControl.pingPongStatus) ^ 1;
+
+		ctrl->awbStatsControl.bufToRender =
+			vfe_read_awb_buf_addr(bufferAvailable);
+
+		vfe_update_awb_buf_addr(bufferAvailable,
+			ctrl->awbStatsControl.nextFrameAddrBuf);
+
+		vfe_proc_ops(VFE_MSG_ID_STATS_WB_EXP,
+			(void *)ctrl->awbStatsControl.bufToRender);
+
+	} else
+		ctrl->awbStatsControl.droppedStatsFrameCount++;
+}
+
+static void vfe_write_gamma_table(uint8_t channel,
+	boolean bank, int16_t *pTable)
+{
+	uint16_t i;
+
+	enum VFE_DMI_RAM_SEL dmiRamSel = NO_MEM_SELECTED;
+
+	switch (channel) {
+	case 0:
+		if (bank == 0)
+			dmiRamSel = RGBLUT_RAM_CH0_BANK0;
+		else
+			dmiRamSel = RGBLUT_RAM_CH0_BANK1;
+		break;
+
+	case 1:
+		if (bank == 0)
+			dmiRamSel = RGBLUT_RAM_CH1_BANK0;
+		else
+			dmiRamSel = RGBLUT_RAM_CH1_BANK1;
+		break;
+
+	case 2:
+		if (bank == 0)
+			dmiRamSel = RGBLUT_RAM_CH2_BANK0;
+		else
+			dmiRamSel = RGBLUT_RAM_CH2_BANK1;
+		break;
+
+	default:
+		break;
+	}
+
+	vfe_program_dmi_cfg(dmiRamSel);
+
+	for (i = 0; i < VFE_GAMMA_TABLE_LENGTH; i++) {
+		writel((uint32_t)(*pTable), ctrl->vfebase + VFE_DMI_DATA_LO);
+		pTable++;
+	}
+
+	/* After DMI transfer, need to set the DMI_CFG to unselect any SRAM
+	unselect the SRAM Bank. */
+	writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG);
+}
+
+static void vfe_prog_hw_testgen_cmd(uint32_t value)
+{
+	writel(value, ctrl->vfebase + VFE_HW_TESTGEN_CMD);
+}
+
+static inline void vfe_read_irq_status(struct vfe_irq_thread_msg *out)
+{
+	uint32_t *temp;
+
+	memset(out, 0, sizeof(struct vfe_irq_thread_msg));
+
+	temp = (uint32_t *)(ctrl->vfebase + VFE_IRQ_STATUS);
+	out->vfeIrqStatus = readl(temp);
+
+	temp = (uint32_t *)(ctrl->vfebase + CAMIF_STATUS);
+	out->camifStatus = readl(temp);
+
+/*	this for YUV performance tuning
+	writel(0x7, ctrl->vfebase + CAMIF_COMMAND);
+	writel(0x3, ctrl->vfebase + CAMIF_COMMAND);
+	CDBG("camifStatus  = 0x%x\n", out->camifStatus);
+*/
+/*
+	temp = (uint32_t *)(ctrl->vfebase + VFE_DEMOSAIC_STATUS);
+	out->demosaicStatus = readl(temp);
+
+	temp = (uint32_t *)(ctrl->vfebase + VFE_ASF_MAX_EDGE);
+	out->asfMaxEdge = readl(temp);
+
+	temp = (uint32_t *)(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PM_STATS_0);
+*/
+
+#if 0
+	out->pmInfo.encPathPmInfo.yWrPmStats0      = readl(temp++);
+	out->pmInfo.encPathPmInfo.yWrPmStats1      = readl(temp++);
+	out->pmInfo.encPathPmInfo.cbcrWrPmStats0   = readl(temp++);
+	out->pmInfo.encPathPmInfo.cbcrWrPmStats1   = readl(temp++);
+	out->pmInfo.viewPathPmInfo.yWrPmStats0     = readl(temp++);
+	out->pmInfo.viewPathPmInfo.yWrPmStats1     = readl(temp++);
+	out->pmInfo.viewPathPmInfo.cbcrWrPmStats0  = readl(temp++);
+	out->pmInfo.viewPathPmInfo.cbcrWrPmStats1  = readl(temp);
+#endif /* if 0 Jeff */
+}
+
+static void
+vfe_parse_interrupt_status(struct vfe_interrupt_status *ret,
+uint32_t irqStatusIn)
+{
+	struct vfe_irqenable hwstat;
+	boolean temp;
+
+	memset(&hwstat, 0, sizeof(hwstat));
+	memset(ret, 0, sizeof(*ret));
+
+	hwstat = *((struct vfe_irqenable *)(&irqStatusIn));
+
+	ret->camifErrorIrq = hwstat.camifErrorIrq;
+	ret->camifSofIrq = hwstat.camifSofIrq;
+	ret->camifEolIrq = hwstat.camifEolIrq;
+	ret->camifEofIrq = hwstat.camifEofIrq;
+	ret->camifEpoch1Irq = hwstat.camifEpoch1Irq;
+	ret->camifEpoch2Irq = hwstat.camifEpoch2Irq;
+	ret->camifOverflowIrq = hwstat.camifOverflowIrq;
+	ret->ceIrq = hwstat.ceIrq;
+	ret->regUpdateIrq = hwstat.regUpdateIrq;
+	ret->resetAckIrq = hwstat.resetAckIrq;
+	ret->encYPingpongIrq = hwstat.encYPingpongIrq;
+	ret->encCbcrPingpongIrq = hwstat.encCbcrPingpongIrq;
+	ret->viewYPingpongIrq = hwstat.viewYPingpongIrq;
+	ret->viewCbcrPingpongIrq = hwstat.viewCbcrPingpongIrq;
+	ret->rdPingpongIrq = hwstat.rdPingpongIrq;
+	ret->afPingpongIrq = hwstat.afPingpongIrq;
+	ret->awbPingpongIrq = hwstat.awbPingpongIrq;
+	ret->histPingpongIrq = hwstat.histPingpongIrq;
+	ret->encIrq = hwstat.encIrq;
+	ret->viewIrq = hwstat.viewIrq;
+	ret->busOverflowIrq = hwstat.busOverflowIrq;
+	ret->afOverflowIrq = hwstat.afOverflowIrq;
+	ret->awbOverflowIrq = hwstat.awbOverflowIrq;
+	ret->syncTimer0Irq = hwstat.syncTimer0Irq;
+	ret->syncTimer1Irq = hwstat.syncTimer1Irq;
+	ret->syncTimer2Irq = hwstat.syncTimer2Irq;
+	ret->asyncTimer0Irq = hwstat.asyncTimer0Irq;
+	ret->asyncTimer1Irq = hwstat.asyncTimer1Irq;
+	ret->asyncTimer2Irq = hwstat.asyncTimer2Irq;
+	ret->asyncTimer3Irq = hwstat.asyncTimer3Irq;
+	ret->axiErrorIrq = hwstat.axiErrorIrq;
+	ret->violationIrq = hwstat.violationIrq;
+
+	/* logic OR of any error bits
+	 * although each irq corresponds to a bit, the data type here is a
+	 * boolean already. hence use logic operation.
+	 */
+	temp =
+	    ret->camifErrorIrq ||
+	    ret->camifOverflowIrq ||
+	    ret->afOverflowIrq ||
+	    ret->awbOverflowIrq ||
+	    ret->awbPingpongIrq ||
+	    ret->afPingpongIrq ||
+	    ret->busOverflowIrq || ret->axiErrorIrq || ret->violationIrq;
+
+	ret->anyErrorIrqs = temp;
+
+	/* logic OR of any output path bits*/
+	temp = ret->encYPingpongIrq || ret->encCbcrPingpongIrq || ret->encIrq;
+
+	ret->anyOutput2PathIrqs = temp;
+
+	temp = ret->viewYPingpongIrq || ret->viewCbcrPingpongIrq ||
+		ret->viewIrq;
+
+	ret->anyOutput1PathIrqs = temp;
+
+	ret->anyOutputPathIrqs =
+	    ret->anyOutput1PathIrqs || ret->anyOutput2PathIrqs;
+
+	/* logic OR of any sync timer bits*/
+	temp = ret->syncTimer0Irq || ret->syncTimer1Irq || ret->syncTimer2Irq;
+
+	ret->anySyncTimerIrqs = temp;
+
+	/* logic OR of any async timer bits*/
+	temp =
+	    ret->asyncTimer0Irq ||
+	    ret->asyncTimer1Irq || ret->asyncTimer2Irq || ret->asyncTimer3Irq;
+
+	ret->anyAsyncTimerIrqs = temp;
+
+	/* bool for all interrupts that are not allowed in idle state */
+	temp =
+	    ret->anyErrorIrqs ||
+	    ret->anyOutputPathIrqs ||
+	    ret->anySyncTimerIrqs ||
+	    ret->regUpdateIrq ||
+	    ret->awbPingpongIrq ||
+	    ret->afPingpongIrq ||
+	    ret->camifSofIrq || ret->camifEpoch2Irq || ret->camifEpoch1Irq;
+
+	ret->anyIrqForActiveStatesOnly = temp;
+}
+
+static void
+vfe_get_asf_frame_info(struct vfe_frame_asf_info *rc,
+struct vfe_irq_thread_msg *in)
+{
+	struct vfe_asf_info     asfInfoTemp;
+
+	memset(rc, 0, sizeof(*rc));
+	memset(&asfInfoTemp, 0, sizeof(asfInfoTemp));
+
+	asfInfoTemp = *((struct vfe_asf_info *)(&(in->asfMaxEdge)));
+
+	rc->asfHbiCount = asfInfoTemp.HBICount;
+	rc->asfMaxEdge = asfInfoTemp.maxEdge;
+}
+
+static void
+vfe_get_demosaic_frame_info(struct vfe_frame_bpc_info *rc,
+struct vfe_irq_thread_msg *in)
+{
+	struct vfe_bps_info     bpcInfoTemp;
+
+	memset(rc, 0, sizeof(*rc));
+	memset(&bpcInfoTemp, 0, sizeof(bpcInfoTemp));
+
+	bpcInfoTemp = *((struct vfe_bps_info *)(&(in->demosaicStatus)));
+
+	rc->greenDefectPixelCount = bpcInfoTemp.greenBadPixelCount;
+
+	rc->redBlueDefectPixelCount = bpcInfoTemp.RedBlueBadPixelCount;
+}
+
+static void
+vfe_get_camif_status(struct vfe_msg_camif_status *rc,
+struct vfe_irq_thread_msg *in)
+{
+	struct vfe_camif_stats camifStatusTemp;
+
+	memset(rc, 0, sizeof(*rc));
+	memset(&camifStatusTemp, 0, sizeof(camifStatusTemp));
+
+	camifStatusTemp = *((struct vfe_camif_stats *)(&(in->camifStatus)));
+
+	rc->camifState = (boolean) camifStatusTemp.camifHalt;
+	rc->lineCount = camifStatusTemp.lineCount;
+	rc->pixelCount = camifStatusTemp.pixelCount;
+}
+
+static void
+vfe_get_performance_monitor_data(struct vfe_bus_performance_monitor *rc,
+		struct vfe_irq_thread_msg *in)
+{
+	memset(rc, 0, sizeof(*rc));
+
+	rc->encPathPmInfo.yWrPmStats0 = in->pmInfo.encPathPmInfo.yWrPmStats0;
+	rc->encPathPmInfo.yWrPmStats1 = in->pmInfo.encPathPmInfo.yWrPmStats1;
+	rc->encPathPmInfo.cbcrWrPmStats0 =
+		in->pmInfo.encPathPmInfo.cbcrWrPmStats0;
+	rc->encPathPmInfo.cbcrWrPmStats1 =
+		in->pmInfo.encPathPmInfo.cbcrWrPmStats1;
+	rc->viewPathPmInfo.yWrPmStats0 = in->pmInfo.viewPathPmInfo.yWrPmStats0;
+	rc->viewPathPmInfo.yWrPmStats1 = in->pmInfo.viewPathPmInfo.yWrPmStats1;
+	rc->viewPathPmInfo.cbcrWrPmStats0 =
+		in->pmInfo.viewPathPmInfo.cbcrWrPmStats0;
+	rc->viewPathPmInfo.cbcrWrPmStats1 =
+	    in->pmInfo.viewPathPmInfo.cbcrWrPmStats1;
+}
+
+static void vfe_process_reg_update_irq(void)
+{
+	CDBG("vfe_process_reg_update_irq: ackPendingFlag is %d\n",
+	ctrl->vfeStartAckPendingFlag);
+	if (ctrl->vfeStartAckPendingFlag == TRUE) {
+		vfe_proc_ops(VFE_MSG_ID_START_ACK, NULL);
+		ctrl->vfeStartAckPendingFlag = FALSE;
+	} else
+		vfe_proc_ops(VFE_MSG_ID_UPDATE_ACK, NULL);
+}
+
+static void vfe_process_reset_irq(void)
+{
+	/* unsigned long flags; */
+
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	ctrl->vstate = VFE_STATE_IDLE;
+	/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
+
+	if (ctrl->vfeStopAckPending == TRUE) {
+		ctrl->vfeStopAckPending = FALSE;
+		vfe_proc_ops(VFE_MSG_ID_STOP_ACK, NULL);
+	} else {
+		vfe_set_default_reg_values();
+		vfe_proc_ops(VFE_MSG_ID_RESET_ACK, NULL);
+	}
+}
+
+static void vfe_process_pingpong_irq(struct vfe_output_path *in,
+	uint8_t fragmentCount)
+{
+	uint16_t circularIndex;
+	uint32_t nextFragmentAddr;
+
+	/* get next fragment address from circular buffer */
+	circularIndex    = (in->fragIndex) % (2 * fragmentCount);
+	nextFragmentAddr = in->addressBuffer[circularIndex];
+
+	in->fragIndex = circularIndex + 1;
+
+	/* use next fragment to program hardware ping/pong address. */
+	if (in->hwCurrentFlag == ping) {
+		writel(nextFragmentAddr, in->hwRegPingAddress);
+		in->hwCurrentFlag = pong;
+
+	} else {
+		writel(nextFragmentAddr, in->hwRegPongAddress);
+		in->hwCurrentFlag = ping;
+	}
+}
+
+static boolean vfe_send_video_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	struct vfe_msg_output *pPayload = data;
+
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+	memcpy(&(msg->_u),
+		(void *)pPayload, sizeof(struct vfe_msg_output));
+
+	rp->phy.output_id = OUTPUT_TYPE_V;
+	CDBG("vfe_send_video_msg rp->type= %d\n", rp->type);
+
+	vfe_addr_convert(&(rp->phy),
+			rp->type, msg,
+			&(rp->extdata), &(rp->extlen));
+	return TRUE;
+}
+
+static boolean vfe_send_preview_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	struct vfe_msg_output *pPayload = data;
+
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+
+	memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output));
+
+	rp->phy.output_id = OUTPUT_TYPE_P;
+	CDBG("vfe_send_preview_msg rp->type= %d\n", rp->type);
+
+	vfe_addr_convert(&(rp->phy),
+			rp->type, msg,
+			&(rp->extdata), &(rp->extlen));
+
+	return TRUE;
+}
+
+
+static boolean vfe_send_thumbnail_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	struct vfe_msg_output *pPayload = data;
+
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+
+	memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output));
+
+	rp->phy.output_id = OUTPUT_TYPE_T;
+	CDBG("vfe_send_thumbnail_msg rp->type= %d\n", rp->type);
+
+	if (ctrl->viewPath.snapshotPendingCount <= 1)
+		ctrl->viewPath.ackPending = FALSE;
+
+	vfe_addr_convert(&(rp->phy),
+			rp->type, msg,
+			&(rp->extdata), &(rp->extlen));
+	return TRUE;
+}
+
+static boolean vfe_send_mainimage_msg(struct msm_vfe_resp *rp,
+		struct vfe_message *msg, void *data)
+{
+	struct vfe_msg_output *pPayload = data;
+
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return FALSE;
+
+	memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output));
+
+	rp->phy.output_id = OUTPUT_TYPE_S;
+	CDBG("vfe_send_mainimage_msg rp->type= %d\n", rp->type);
+
+	if (ctrl->encPath.snapshotPendingCount <= 1) {
+		ctrl->encPath.ackPending = FALSE;
+	}
+
+	vfe_addr_convert(&(rp->phy),
+			rp->type, msg,
+			&(rp->extdata), &(rp->extlen));
+
+	return TRUE;
+}
+
+static void vfe_send_output_msg(boolean whichOutputPath,
+	uint32_t yPathAddr, uint32_t cbcrPathAddr)
+{
+	struct vfe_msg_output msgPayload;
+
+	msgPayload.yBuffer = yPathAddr;
+	msgPayload.cbcrBuffer = cbcrPathAddr;
+
+	/* asf info is common for both output1 and output2 */
+#if 0
+	msgPayload.asfInfo.asfHbiCount = ctrl->vfeAsfFrameInfo.asfHbiCount;
+	msgPayload.asfInfo.asfMaxEdge = ctrl->vfeAsfFrameInfo.asfMaxEdge;
+
+	/* demosaic info is common for both output1 and output2 */
+	msgPayload.bpcInfo.greenDefectPixelCount =
+		ctrl->vfeBpcFrameInfo.greenDefectPixelCount;
+	msgPayload.bpcInfo.redBlueDefectPixelCount =
+		ctrl->vfeBpcFrameInfo.redBlueDefectPixelCount;
+#endif /* if 0 */
+
+	/* frame ID is common for both paths. */
+	msgPayload.frameCounter = ctrl->vfeFrameId;
+
+	if (whichOutputPath) {
+		/* msgPayload.pmData = ctrl->vfePmData.encPathPmInfo; */
+		ctrl->encPath.ackPending = TRUE;
+
+		if (ctrl->vfeOperationMode == 0) {
+			if (ctrl->axiOutputMode ==
+				VFE_AXI_OUTPUT_MODE_Output1AndOutput2) {
+				/* video mode */
+				vfe_proc_ops(VFE_MSG_ID_OUTPUT_V, &msgPayload);
+			} else{
+				/* preview mode */
+				vfe_proc_ops(VFE_MSG_ID_OUTPUT_P, &msgPayload);
+			}
+		} else {
+			vfe_proc_ops(VFE_MSG_ID_OUTPUT_S, &msgPayload);
+		}
+
+	} else {
+		/* physical output1 path from vfe */
+		ctrl->viewPath.ackPending = TRUE;
+
+		if (ctrl->vfeOperationMode == 0) {
+			vfe_proc_ops(VFE_MSG_ID_OUTPUT_P, &msgPayload);
+			CDBG(" video mode display output.\n");
+
+		} else{
+			vfe_proc_ops(VFE_MSG_ID_OUTPUT_T, &msgPayload);
+			CDBG(" snapshot mode thumbnail output.\n");
+		}
+	}
+}
+
+static void vfe_process_frame_done_irq_multi_frag(struct vfe_output_path_combo
+						  *in)
+{
+	uint32_t yAddress, cbcrAddress;
+	uint16_t idx;
+	uint32_t *ptrY;
+	uint32_t *ptrCbcr;
+	const uint32_t *ptrSrc;
+	uint8_t i;
+
+	if (!in->ackPending) {
+
+		idx = (in->currentFrame) * (in->fragCount);
+
+		/* Send output message. */
+		yAddress = in->yPath.addressBuffer[idx];
+		cbcrAddress = in->cbcrPath.addressBuffer[idx];
+
+		/* copy next frame to current frame. */
+		ptrSrc  = in->nextFrameAddrBuf;
+		ptrY = (uint32_t *)&in->yPath.addressBuffer[idx];
+		ptrCbcr = (uint32_t *)&in->cbcrPath.addressBuffer[idx];
+
+		/* Copy Y address */
+		for (i = 0; i < in->fragCount; i++)
+			*ptrY++ = *ptrSrc++;
+
+		/* Copy Cbcr address */
+		for (i = 0; i < in->fragCount; i++)
+			*ptrCbcr++ = *ptrSrc++;
+
+		vfe_send_output_msg(in->whichOutputPath, yAddress, cbcrAddress);
+
+	} else {
+		if (in->whichOutputPath == 0)
+			ctrl->vfeDroppedFrameCounts.output1Count++;
+
+		if (in->whichOutputPath == 1)
+			ctrl->vfeDroppedFrameCounts.output2Count++;
+	}
+
+	/* toggle current frame. */
+	in->currentFrame = in->currentFrame^1;
+
+	if (ctrl->vfeOperationMode)
+		in->snapshotPendingCount--;
+}
+
+static void vfe_process_frame_done_irq_no_frag_io(
+		struct vfe_output_path_combo *in,
+		uint32_t *pNextAddr,
+	uint32_t *pdestRenderAddr)
+{
+	uint32_t busPingPongStatus;
+	uint32_t tempAddress;
+
+	/* 1. read hw status register. */
+	busPingPongStatus = readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS);
+
+	CDBG("hardware status is 0x%x\n", busPingPongStatus);
+
+	/* 2. determine ping or pong */
+	/* use cbcr status */
+	busPingPongStatus = busPingPongStatus & (1<<(in->cbcrStatusBit));
+
+	/* 3. read out address and update address */
+	if (busPingPongStatus == 0) {
+		/* hw is working on ping, render pong buffer */
+		/* a. read out pong address */
+		/* read out y address. */
+		tempAddress = readl(in->yPath.hwRegPongAddress);
+
+		CDBG("pong 1 addr = 0x%x\n", tempAddress);
+		*pdestRenderAddr++ = tempAddress;
+		/* read out cbcr address. */
+		tempAddress = readl(in->cbcrPath.hwRegPongAddress);
+
+		CDBG("pong 2 addr = 0x%x\n", tempAddress);
+		*pdestRenderAddr = tempAddress;
+
+		/* b. update pong address */
+		writel(*pNextAddr++, in->yPath.hwRegPongAddress);
+		writel(*pNextAddr, in->cbcrPath.hwRegPongAddress);
+	} else {
+		/* hw is working on pong, render ping buffer */
+
+		/* a. read out ping address */
+		tempAddress = readl(in->yPath.hwRegPingAddress);
+		CDBG("ping 1 addr = 0x%x\n", tempAddress);
+		*pdestRenderAddr++ = tempAddress;
+		tempAddress = readl(in->cbcrPath.hwRegPingAddress);
+
+		CDBG("ping 2 addr = 0x%x\n", tempAddress);
+		*pdestRenderAddr = tempAddress;
+
+		/* b. update ping address */
+		writel(*pNextAddr++, in->yPath.hwRegPingAddress);
+		CDBG("NextAddress = 0x%x\n", *pNextAddr);
+		writel(*pNextAddr, in->cbcrPath.hwRegPingAddress);
+	}
+}
+
+static void vfe_process_frame_done_irq_no_frag(struct vfe_output_path_combo *in)
+{
+	uint32_t addressToRender[2];
+
+	if (!in->ackPending) {
+		vfe_process_frame_done_irq_no_frag_io(in,
+						      in->nextFrameAddrBuf,
+						      addressToRender);
+
+		/* use addressToRender to send out message. */
+		vfe_send_output_msg(in->whichOutputPath,
+				addressToRender[0], addressToRender[1]);
+
+	} else {
+		/* ackPending is still there, accumulate dropped frame count.
+		 * These count can be read through ioctrl command. */
+		CDBG("waiting frame ACK\n");
+
+		if (in->whichOutputPath == 0)
+			ctrl->vfeDroppedFrameCounts.output1Count++;
+
+		if (in->whichOutputPath == 1)
+			ctrl->vfeDroppedFrameCounts.output2Count++;
+	}
+
+	/* in case of multishot when upper layer did not ack, there will still
+	 * be a snapshot done msg sent out, even though the number of frames
+	 * sent out may be less than the desired number of frames.  snapshot
+	 * done msg would be helpful to indicate that vfe pipeline has stop,
+	 * and in good known state.
+	 */
+	if (ctrl->vfeOperationMode)
+		in->snapshotPendingCount--;
+}
+
+static void vfe_process_output_path_irq(struct vfe_interrupt_status *irqstatus)
+{
+	/* unsigned long flags; */
+
+	/* process the view path interrupts */
+	if (irqstatus->anyOutput1PathIrqs) {
+		if (ctrl->viewPath.multiFrag) {
+
+			if (irqstatus->viewCbcrPingpongIrq)
+				vfe_process_pingpong_irq(&
+							 (ctrl->viewPath.
+							  cbcrPath),
+							 ctrl->viewPath.
+							 fragCount);
+
+			if (irqstatus->viewYPingpongIrq)
+				vfe_process_pingpong_irq(&
+							 (ctrl->viewPath.yPath),
+							 ctrl->viewPath.
+							 fragCount);
+
+			if (irqstatus->viewIrq)
+				vfe_process_frame_done_irq_multi_frag(&ctrl->
+								      viewPath);
+
+		} else {
+			/* typical case for no fragment,
+			 only frame done irq is enabled. */
+			if (irqstatus->viewIrq)
+				vfe_process_frame_done_irq_no_frag(&ctrl->
+								   viewPath);
+		}
+	}
+
+	/* process the encoder path interrupts */
+	if (irqstatus->anyOutput2PathIrqs) {
+		if (ctrl->encPath.multiFrag) {
+			if (irqstatus->encCbcrPingpongIrq)
+				vfe_process_pingpong_irq(&
+							 (ctrl->encPath.
+							  cbcrPath),
+							 ctrl->encPath.
+							 fragCount);
+
+			if (irqstatus->encYPingpongIrq)
+				vfe_process_pingpong_irq(&(ctrl->encPath.yPath),
+							 ctrl->encPath.
+							 fragCount);
+
+			if (irqstatus->encIrq)
+				vfe_process_frame_done_irq_multi_frag(&ctrl->
+								      encPath);
+
+		} else {
+			if (irqstatus->encIrq)
+				vfe_process_frame_done_irq_no_frag(&ctrl->
+								   encPath);
+		}
+	}
+
+	if (ctrl->vfeOperationMode) {
+		if ((ctrl->encPath.snapshotPendingCount == 0) &&
+				(ctrl->viewPath.snapshotPendingCount == 0)) {
+
+			/* @todo This is causing issues, further investigate */
+			/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+			ctrl->vstate = VFE_STATE_IDLE;
+			/* spin_unlock_irqrestore(&ctrl->state_lock, flags); */
+
+			vfe_proc_ops(VFE_MSG_ID_SNAPSHOT_DONE, NULL);
+			vfe_camif_stop_immediately();
+			vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_STOP);
+			vfe_pm_stop();
+		}
+	}
+}
+
+static void __vfe_do_tasklet(struct isr_queue_cmd *qcmd)
+{
+	if (qcmd->vfeInterruptStatus.regUpdateIrq) {
+		CDBG("irq regUpdateIrq\n");
+		vfe_process_reg_update_irq();
+	}
+
+	if (qcmd->vfeInterruptStatus.resetAckIrq) {
+		CDBG("%s: process resetAckIrq\n", __func__);
+		vfe_process_reset_irq();
+	}
+
+	if (ctrl->vstate != VFE_STATE_ACTIVE)
+		return;
+
+#if 0
+	if (qcmd->vfeInterruptStatus.camifEpoch1Irq)
+		vfe_proc_ops(VFE_MSG_ID_EPOCH1);
+
+	if (qcmd->vfeInterruptStatus.camifEpoch2Irq)
+		vfe_proc_ops(VFE_MSG_ID_EPOCH2);
+#endif /* Jeff */
+
+	/* next, check output path related interrupts. */
+	if (qcmd->vfeInterruptStatus.anyOutputPathIrqs) {
+		CDBG("irq: anyOutputPathIrqs\n");
+		vfe_process_output_path_irq(&qcmd->vfeInterruptStatus);
+	}
+
+	if (qcmd->vfeInterruptStatus.afPingpongIrq)
+		vfe_process_stats_af_irq();
+
+	if (qcmd->vfeInterruptStatus.awbPingpongIrq)
+		vfe_process_stats_awb_irq();
+
+	/* any error irqs*/
+	if (qcmd->vfeInterruptStatus.anyErrorIrqs)
+		vfe_process_error_irq(&qcmd->vfeInterruptStatus);
+
+#if 0
+	if (qcmd->vfeInterruptStatus.anySyncTimerIrqs)
+		vfe_process_sync_timer_irq();
+
+	if (qcmd->vfeInterruptStatus.anyAsyncTimerIrqs)
+		vfe_process_async_timer_irq();
+#endif /* Jeff */
+
+	if (qcmd->vfeInterruptStatus.camifSofIrq) {
+		CDBG("irq: camifSofIrq\n");
+		vfe_process_camif_sof_irq();
+	}
+}
+
+static struct isr_queue_cmd *get_irq_cmd_nosync(void)
+{
+	int old_get = ctrl->irq_get++;
+	ctrl->irq_get = ctrl->irq_get % ARRAY_SIZE(ctrl->irqs);
+	if (ctrl->irq_get == ctrl->irq_put) {
+		pr_err("%s: out of irq command packets\n", __func__);
+		ctrl->irq_get = old_get;
+		return NULL;
+	}
+
+	return ctrl->irqs + old_get;
+}
+
+static struct isr_queue_cmd *next_irq_cmd(void)
+{
+	unsigned long flags;
+	struct isr_queue_cmd *cmd;
+	spin_lock_irqsave(&ctrl->irqs_lock, flags);
+	if (ctrl->irq_get == ctrl->irq_put) {
+		spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+		return NULL; /* already empty */
+	}
+	cmd = ctrl->irqs + ctrl->irq_put;
+	spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+	return cmd;
+}
+
+static void put_irq_cmd(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&ctrl->irqs_lock, flags);
+	if (ctrl->irq_get == ctrl->irq_put) {
+		spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+		return; /* already empty */
+	}
+	ctrl->irq_put++;
+	ctrl->irq_put %= ARRAY_SIZE(ctrl->irqs);
+	spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+}
+
+static void vfe_do_tasklet(unsigned long data)
+{
+	int cnt = 0;
+	unsigned long flags;
+	struct isr_queue_cmd *qcmd = NULL;
+
+	spin_lock_irqsave(&msm_vfe_ctrl_lock, flags);
+	if (!ctrl) {
+		spin_unlock_irqrestore(&msm_vfe_ctrl_lock, flags);
+		return;
+	}
+
+	CDBG("%s\n", __func__);
+
+	while ((qcmd = next_irq_cmd())) {
+		__vfe_do_tasklet(qcmd);
+		put_irq_cmd();
+		cnt++;
+	}
+
+	if (cnt > ARRAY_SIZE(ctrl->irqs)/2)
+		CDBG("%s: serviced %d vfe interrupts\n", __func__, cnt);
+
+	spin_unlock_irqrestore(&msm_vfe_ctrl_lock, flags);
+}
+
+DECLARE_TASKLET(vfe_tasklet, vfe_do_tasklet, 0);
+
+static irqreturn_t vfe_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	uint32_t irqStatusLocal;
+	struct vfe_irq_thread_msg irq;
+	struct isr_queue_cmd *qcmd;
+
+	CDBG("vfe_parse_irq\n");
+
+	if (!atomic_read(&ctrl->vfe_serv_interrupt))
+		return IRQ_HANDLED;
+
+	vfe_read_irq_status(&irq);
+
+	if (irq.vfeIrqStatus == 0) {
+		CDBG("vfe_parse_irq: irq.vfeIrqStatus is 0\n");
+		return IRQ_HANDLED;
+	}
+
+	if (ctrl->vfeStopAckPending)
+		irqStatusLocal = (VFE_IMASK_WHILE_STOPPING & irq.vfeIrqStatus);
+	else
+		irqStatusLocal =
+			((ctrl->vfeImaskPacked | VFE_IMASK_ERROR_ONLY) &
+				irq.vfeIrqStatus);
+
+	spin_lock_irqsave(&ctrl->irqs_lock, flags);
+	qcmd = get_irq_cmd_nosync();
+	if (!qcmd) {
+		spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+		goto done;
+	}
+	/* first parse the interrupt status to local data structures. */
+	vfe_parse_interrupt_status(&qcmd->vfeInterruptStatus, irqStatusLocal);
+	vfe_get_asf_frame_info(&qcmd->vfeAsfFrameInfo, &irq);
+	vfe_get_demosaic_frame_info(&qcmd->vfeBpcFrameInfo, &irq);
+	vfe_get_camif_status(&qcmd->vfeCamifStatusLocal, &irq);
+	vfe_get_performance_monitor_data(&qcmd->vfePmData, &irq);
+	spin_unlock_irqrestore(&ctrl->irqs_lock, flags);
+	tasklet_schedule(&vfe_tasklet);
+
+done:
+	/* clear the pending interrupt of the same kind.*/
+	writel(irq.vfeIrqStatus, ctrl->vfebase + VFE_IRQ_CLEAR);
+
+	return IRQ_HANDLED;
+}
+
+int vfe_cmd_init(struct msm_vfe_callback *presp,
+	struct platform_device *pdev, void *sdata)
+{
+	struct resource	*vfemem, *vfeirq, *vfeio;
+	int rc;
+	struct msm_camera_sensor_info *s_info;
+	s_info = pdev->dev.platform_data;
+
+	pdev->resource = s_info->resource;
+	pdev->num_resources = s_info->num_resources;
+
+	vfemem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!vfemem) {
+		pr_err("%s: no mem resource\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!vfeirq) {
+		pr_err("%s: no irq resource\n", __func__);
+		return -ENODEV;
+	}
+
+	vfeio = request_mem_region(vfemem->start,
+		resource_size(vfemem), pdev->name);
+	if (!vfeio) {
+		pr_err("%s: VFE region already claimed\n", __func__);
+		return -EBUSY;
+	}
+
+	ctrl = kzalloc(sizeof(struct msm_vfe8x_ctrl), GFP_KERNEL);
+	if (!ctrl) {
+		pr_err("%s: out of memory\n", __func__);
+		rc = -ENOMEM;
+		goto cmd_init_failed1;
+	}
+	atomic_set(&ctrl->vfe_serv_interrupt, 0);
+	ctrl->vfeirq  = vfeirq->start;
+
+	ctrl->vfebase =
+		ioremap(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	if (!ctrl->vfebase) {
+		pr_err("%s: ioremap failed\n", __func__);
+		rc = -ENOMEM;
+		goto cmd_init_failed2;
+	}
+
+	rc = request_irq(ctrl->vfeirq, vfe_parse_irq,
+		IRQF_TRIGGER_RISING, "vfe", 0);
+	if (rc < 0) {
+		pr_err("%s: request_irq(%d) failed\n", __func__, ctrl->vfeirq);
+		goto cmd_init_failed2;
+	}
+
+	if (presp && presp->vfe_resp)
+		ctrl->resp = presp;
+	else {
+		pr_err("%s: no vfe_resp function\n", __func__);
+
+		rc = -EIO;
+		goto cmd_init_failed3;
+	}
+
+	ctrl->syncdata = sdata;
+	return 0;
+
+cmd_init_failed3:
+	disable_irq(ctrl->vfeirq);
+	free_irq(ctrl->vfeirq, 0);
+	iounmap(ctrl->vfebase);
+cmd_init_failed2:
+	kfree(ctrl);
+cmd_init_failed1:
+	release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1);
+	return rc;
+}
+
+void vfe_cmd_release(struct platform_device *dev)
+{
+	struct resource	*mem;
+	unsigned long flags;
+	atomic_set(&ctrl->vfe_serv_interrupt, 0);
+	disable_irq(ctrl->vfeirq);
+	free_irq(ctrl->vfeirq, 0);
+
+	iounmap(ctrl->vfebase);
+	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, (mem->end - mem->start) + 1);
+
+	spin_lock_irqsave(&msm_vfe_ctrl_lock, flags);
+	kfree(ctrl);
+	ctrl = 0;
+	spin_unlock_irqrestore(&msm_vfe_ctrl_lock, flags);
+}
+
+void vfe_stats_af_stop(void)
+{
+	ctrl->vfeStatsCmdLocal.autoFocusEnable = FALSE;
+	ctrl->vfeImaskLocal.afPingpongIrq = FALSE;
+}
+
+void vfe_stop(void)
+{
+	int spin_cnt = 0;
+	uint32_t vfeAxiStauts;
+
+	/* for reset hw modules, and send msg when reset_irq comes.*/
+	ctrl->vfeStopAckPending = TRUE;
+
+	ctrl->vfeStatsPingPongReloadFlag = FALSE;
+	vfe_pm_stop();
+
+	/* disable all interrupts.  */
+	vfe_program_irq_mask(VFE_DISABLE_ALL_IRQS);
+
+	/* in either continuous or snapshot mode, stop command can be issued
+	 * at any time.
+	 */
+	vfe_camif_stop_immediately();
+	vfe_program_axi_cmd(AXI_HALT);
+	vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_STOP);
+
+	do {
+		vfeAxiStauts = vfe_read_axi_status();
+		spin_cnt++;
+	} while (!(vfeAxiStauts & AXI_STATUS_BUSY_MASK));
+	if (spin_cnt > 1)
+		pr_warning("%s: spin_cnt %d\n", __func__, spin_cnt);
+
+	vfe_program_axi_cmd(AXI_HALT_CLEAR);
+
+	/* clear all pending interrupts */
+	writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR);
+
+	/* enable reset_ack and async timer interrupt only while stopping
+	 * the pipeline.
+	 */
+	vfe_program_irq_mask(VFE_IMASK_WHILE_STOPPING);
+
+	vfe_program_global_reset_cmd(VFE_RESET_UPON_STOP_CMD);
+}
+
+void vfe_update(void)
+{
+	ctrl->vfeModuleEnableLocal.statsEnable =
+		ctrl->vfeStatsCmdLocal.autoFocusEnable |
+		ctrl->vfeStatsCmdLocal.axwEnable;
+
+	vfe_reg_module_cfg(&ctrl->vfeModuleEnableLocal);
+
+	vfe_program_stats_cmd(&ctrl->vfeStatsCmdLocal);
+
+	ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal);
+	vfe_program_irq_mask(ctrl->vfeImaskPacked);
+
+	if ((ctrl->vfeModuleEnableLocal.statsEnable == TRUE) &&
+			(ctrl->vfeStatsPingPongReloadFlag == FALSE)) {
+		ctrl->vfeStatsPingPongReloadFlag = TRUE;
+
+		ctrl->vfeBusCmdLocal.statsPingpongReload = TRUE;
+		vfe_reg_bus_cmd(&ctrl->vfeBusCmdLocal);
+	}
+
+	vfe_program_reg_update_cmd(VFE_REG_UPDATE_TRIGGER);
+}
+
+int vfe_rgb_gamma_update(struct vfe_cmd_rgb_gamma_config *in)
+{
+	int rc = 0;
+
+	ctrl->vfeModuleEnableLocal.rgbLUTEnable = in->enable;
+
+	switch (in->channelSelect) {
+	case RGB_GAMMA_CH0_SELECTED:
+		ctrl->vfeGammaLutSel.ch0BankSelect ^= 1;
+		vfe_write_gamma_table(0,
+				      ctrl->vfeGammaLutSel.ch0BankSelect,
+				      in->table);
+		break;
+
+	case RGB_GAMMA_CH1_SELECTED:
+		ctrl->vfeGammaLutSel.ch1BankSelect ^= 1;
+		vfe_write_gamma_table(1,
+				      ctrl->vfeGammaLutSel.ch1BankSelect,
+				      in->table);
+		break;
+
+	case RGB_GAMMA_CH2_SELECTED:
+		ctrl->vfeGammaLutSel.ch2BankSelect ^= 1;
+		vfe_write_gamma_table(2,
+				      ctrl->vfeGammaLutSel.ch2BankSelect,
+				      in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH1_SELECTED:
+		ctrl->vfeGammaLutSel.ch0BankSelect ^= 1;
+		ctrl->vfeGammaLutSel.ch1BankSelect ^= 1;
+		vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect,
+			in->table);
+		vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect,
+			in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH2_SELECTED:
+		ctrl->vfeGammaLutSel.ch0BankSelect ^= 1;
+		ctrl->vfeGammaLutSel.ch2BankSelect ^= 1;
+		vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect,
+			in->table);
+		vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect,
+			in->table);
+		break;
+
+	case RGB_GAMMA_CH1_CH2_SELECTED:
+		ctrl->vfeGammaLutSel.ch1BankSelect ^= 1;
+		ctrl->vfeGammaLutSel.ch2BankSelect ^= 1;
+		vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect,
+			in->table);
+		vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect,
+			in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH1_CH2_SELECTED:
+		ctrl->vfeGammaLutSel.ch0BankSelect ^= 1;
+		ctrl->vfeGammaLutSel.ch1BankSelect ^= 1;
+		ctrl->vfeGammaLutSel.ch2BankSelect ^= 1;
+		vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect,
+			in->table);
+		vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect,
+			in->table);
+		vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect,
+			in->table);
+		break;
+
+	default:
+		pr_err("%s: invalid gamma channel %d\n", __func__,
+			in->channelSelect);
+		return -EINVAL;
+	} /* switch */
+
+	/* update the gammaLutSel register. */
+	vfe_program_lut_bank_sel(&ctrl->vfeGammaLutSel);
+
+	return rc;
+}
+
+int vfe_rgb_gamma_config(struct vfe_cmd_rgb_gamma_config *in)
+{
+	int rc = 0;
+
+	ctrl->vfeModuleEnableLocal.rgbLUTEnable = in->enable;
+
+	switch (in->channelSelect) {
+	case RGB_GAMMA_CH0_SELECTED:
+vfe_write_gamma_table(0, 0, in->table);
+break;
+
+	case RGB_GAMMA_CH1_SELECTED:
+		vfe_write_gamma_table(1, 0, in->table);
+		break;
+
+	case RGB_GAMMA_CH2_SELECTED:
+		vfe_write_gamma_table(2, 0, in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH1_SELECTED:
+		vfe_write_gamma_table(0, 0, in->table);
+		vfe_write_gamma_table(1, 0, in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH2_SELECTED:
+		vfe_write_gamma_table(0, 0, in->table);
+		vfe_write_gamma_table(2, 0, in->table);
+		break;
+
+	case RGB_GAMMA_CH1_CH2_SELECTED:
+		vfe_write_gamma_table(1, 0, in->table);
+		vfe_write_gamma_table(2, 0, in->table);
+		break;
+
+	case RGB_GAMMA_CH0_CH1_CH2_SELECTED:
+		vfe_write_gamma_table(0, 0, in->table);
+		vfe_write_gamma_table(1, 0, in->table);
+		vfe_write_gamma_table(2, 0, in->table);
+		break;
+
+	default:
+		pr_err("%s: invalid gamma channel %d\n", __func__,
+			in->channelSelect);
+		rc = -EINVAL;
+		break;
+	} /* switch */
+
+	return rc;
+}
+
+void vfe_stats_af_ack(struct vfe_cmd_stats_af_ack *in)
+{
+	ctrl->afStatsControl.nextFrameAddrBuf = in->nextAFOutputBufferAddr;
+	ctrl->afStatsControl.ackPending = FALSE;
+}
+
+void vfe_stats_wb_exp_ack(struct vfe_cmd_stats_wb_exp_ack *in)
+{
+	ctrl->awbStatsControl.nextFrameAddrBuf = in->nextWbExpOutputBufferAddr;
+	ctrl->awbStatsControl.ackPending = FALSE;
+}
+
+
+void vfe_output_v_ack(struct vfe_cmd_output_ack *in)
+{
+	const uint32_t *psrc;
+	uint32_t *pdest;
+	uint8_t i;
+
+	pdest = ctrl->encPath.nextFrameAddrBuf;
+
+	CDBG("video_frame_ack: ack addr = 0x%x\n", in->ybufaddr[0]);
+
+	psrc = in->ybufaddr;
+	for (i = 0; i < ctrl->encPath.fragCount; i++)
+		*pdest++ = *psrc++;
+
+	psrc = in->chromabufaddr;
+	for (i = 0; i < ctrl->encPath.fragCount; i++)
+		*pdest++ = *psrc++;
+
+	ctrl->encPath.ackPending = FALSE;
+}
+
+void vfe_output_p_ack(struct vfe_cmd_output_ack *in)
+{
+	const uint32_t *psrc;
+	uint32_t *pdest;
+	uint8_t i;
+
+	if (ctrl->axiOutputMode == VFE_AXI_OUTPUT_MODE_Output1AndOutput2) {
+		/* video mode, preview comes from output1 path */
+
+	pdest = ctrl->viewPath.nextFrameAddrBuf;
+
+	psrc = in->ybufaddr;
+	for (i = 0; i < ctrl->viewPath.fragCount; i++)
+		*pdest++ = *psrc++;
+
+	psrc = in->chromabufaddr;
+	for (i = 0; i < ctrl->viewPath.fragCount; i++)
+		*pdest++ = *psrc++;
+
+	ctrl->viewPath.ackPending = FALSE;
+
+	} else { /* preview mode, preview comes from output2 path. */
+		pdest = ctrl->encPath.nextFrameAddrBuf;
+
+		psrc = in->ybufaddr;
+		for (i = 0; i < ctrl->encPath.fragCount; i++)
+			*pdest++ = *psrc++;
+
+		psrc = in->chromabufaddr;
+		for (i = 0; i < ctrl->encPath.fragCount; i++)
+			*pdest++ = *psrc++;
+
+		ctrl->encPath.ackPending = FALSE;
+
+	}
+}
+
+void vfe_start(struct vfe_cmd_start *in)
+{
+	uint32_t  pmstatus = 0;
+	boolean rawmode;
+	uint32_t  demperiod = 0;
+	uint32_t  demeven = 0;
+	uint32_t  demodd = 0;
+
+	/* derived from other commands.  (camif config, axi output config,
+	 * etc)
+	*/
+	struct vfe_cfg hwcfg;
+	struct vfe_upsample_cfg chromupcfg;
+
+	CDBG("vfe_start operationMode = %d\n", in->operationMode);
+
+	memset(&hwcfg, 0, sizeof(hwcfg));
+	memset(&chromupcfg, 0, sizeof(chromupcfg));
+
+	switch (in->pixel) {
+	case VFE_BAYER_RGRGRG:
+		demperiod = 1;
+		demeven = 0xC9;
+		demodd = 0xAC;
+		break;
+
+	case VFE_BAYER_GRGRGR:
+		demperiod = 1;
+		demeven = 0x9C;
+		demodd = 0xCA;
+		break;
+
+	case VFE_BAYER_BGBGBG:
+		demperiod = 1;
+		demeven = 0xCA;
+		demodd = 0x9C;
+		break;
+
+	case VFE_BAYER_GBGBGB:
+		demperiod = 1;
+		demeven = 0xAC;
+		demodd = 0xC9;
+		break;
+
+	case VFE_YUV_YCbYCr:
+		demperiod = 3;
+		demeven = 0x9CAC;
+		demodd = 0x9CAC;
+		break;
+
+	case VFE_YUV_YCrYCb:
+		demperiod = 3;
+		demeven = 0xAC9C;
+		demodd = 0xAC9C;
+		break;
+
+	case VFE_YUV_CbYCrY:
+		demperiod = 3;
+		demeven = 0xC9CA;
+		demodd = 0xC9CA;
+		break;
+
+	case VFE_YUV_CrYCbY:
+		demperiod = 3;
+		demeven = 0xCAC9;
+		demodd = 0xCAC9;
+		break;
+
+	default:
+		return;
+	}
+
+	vfe_config_demux(demperiod, demeven, demodd);
+
+	vfe_program_lut_bank_sel(&ctrl->vfeGammaLutSel);
+
+	/* save variables to local. */
+	ctrl->vfeOperationMode = in->operationMode;
+	if (ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) {
+
+		update_axi_qos(MSM_AXI_QOS_SNAPSHOT);
+		/* in snapshot mode, initialize snapshot count*/
+		ctrl->vfeSnapShotCount = in->snapshotCount;
+
+		/* save the requested count, this is temporarily done, to
+		help with HJR / multishot. */
+		ctrl->vfeRequestedSnapShotCount = ctrl->vfeSnapShotCount;
+
+		CDBG("requested snapshot count = %d\n", ctrl->vfeSnapShotCount);
+
+		/* Assumption is to have the same pattern and period for both
+		paths, if both paths are used. */
+		if (ctrl->viewPath.pathEnabled) {
+			ctrl->viewPath.snapshotPendingCount = in->snapshotCount;
+
+			ctrl->vfeFrameSkipPattern =
+				ctrl->vfeFrameSkip.output1Pattern;
+			ctrl->vfeFrameSkipPeriod =
+				ctrl->vfeFrameSkip.output1Period;
+		}
+
+		if (ctrl->encPath.pathEnabled) {
+			ctrl->encPath.snapshotPendingCount = in->snapshotCount;
+
+			ctrl->vfeFrameSkipPattern =
+				ctrl->vfeFrameSkip.output2Pattern;
+			ctrl->vfeFrameSkipPeriod =
+				ctrl->vfeFrameSkip.output2Period;
+		}
+	} else
+		update_axi_qos(MSM_AXI_QOS_PREVIEW);
+
+	/* enable color conversion for bayer sensor
+	if stats enabled, need to do color conversion. */
+	if (in->pixel <= VFE_BAYER_GBGBGB)
+		ctrl->vfeStatsCmdLocal.colorConversionEnable = TRUE;
+
+	vfe_program_stats_cmd(&ctrl->vfeStatsCmdLocal);
+
+	if (in->pixel >= VFE_YUV_YCbYCr)
+		ctrl->vfeModuleEnableLocal.chromaUpsampleEnable = TRUE;
+
+	ctrl->vfeModuleEnableLocal.demuxEnable = TRUE;
+
+	/* if any stats module is enabled, the main bit is enabled. */
+	ctrl->vfeModuleEnableLocal.statsEnable =
+		ctrl->vfeStatsCmdLocal.autoFocusEnable |
+		ctrl->vfeStatsCmdLocal.axwEnable;
+
+	vfe_reg_module_cfg(&ctrl->vfeModuleEnableLocal);
+
+	/* in case of offline processing, do not need to config camif. Having
+	 * bus output enabled in camif_config register might confuse the
+	 * hardware?
+	 */
+	if (in->inputSource != VFE_START_INPUT_SOURCE_AXI) {
+		vfe_reg_camif_config(&ctrl->vfeCamifConfigLocal);
+	} else {
+		/* offline processing, enable axi read */
+		ctrl->vfeBusConfigLocal.stripeRdPathEn = TRUE;
+		ctrl->vfeBusCmdLocal.stripeReload = TRUE;
+		ctrl->vfeBusConfigLocal.rawPixelDataSize =
+			ctrl->axiInputDataSize;
+	}
+
+	vfe_reg_bus_cfg(&ctrl->vfeBusConfigLocal);
+
+	/* directly from start command */
+	hwcfg.pixelPattern = in->pixel;
+	hwcfg.inputSource = in->inputSource;
+	writel(*(uint32_t *)&hwcfg, ctrl->vfebase + VFE_CFG);
+
+	/* regardless module enabled or not, it does not hurt
+	 * to program the cositing mode. */
+	chromupcfg.chromaCositingForYCbCrInputs = in->yuvInputCositingMode;
+
+	writel(*(uint32_t *)&chromupcfg,
+		ctrl->vfebase + VFE_CHROMA_UPSAMPLE_CFG);
+
+	/* MISR to monitor the axi read. */
+	writel(0xd8, ctrl->vfebase + VFE_BUS_MISR_MAST_CFG_0);
+
+	/* clear all pending interrupts. */
+	writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR);
+
+	/*  define how composite interrupt work.  */
+	ctrl->vfeImaskCompositePacked =
+		vfe_irq_composite_pack(ctrl->vfeIrqCompositeMaskLocal);
+
+	vfe_program_irq_composite_mask(ctrl->vfeImaskCompositePacked);
+
+	/*  enable all necessary interrupts.      */
+	ctrl->vfeImaskLocal.camifSofIrq  = TRUE;
+	ctrl->vfeImaskLocal.regUpdateIrq = TRUE;
+	ctrl->vfeImaskLocal.resetAckIrq  = TRUE;
+
+	ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal);
+	vfe_program_irq_mask(ctrl->vfeImaskPacked);
+
+	/* enable bus performance monitor */
+	vfe_8k_pm_start(&ctrl->vfeBusPmConfigLocal);
+
+	/* trigger vfe reg update */
+	ctrl->vfeStartAckPendingFlag = TRUE;
+
+	/* write bus command to trigger reload of ping pong buffer. */
+	ctrl->vfeBusCmdLocal.busPingpongReload = TRUE;
+
+	if (ctrl->vfeModuleEnableLocal.statsEnable == TRUE) {
+		ctrl->vfeBusCmdLocal.statsPingpongReload = TRUE;
+		ctrl->vfeStatsPingPongReloadFlag = TRUE;
+	}
+
+	writel(VFE_REG_UPDATE_TRIGGER, ctrl->vfebase + VFE_REG_UPDATE_CMD);
+
+	/* program later than the reg update. */
+	vfe_reg_bus_cmd(&ctrl->vfeBusCmdLocal);
+
+	if ((in->inputSource ==
+			 VFE_START_INPUT_SOURCE_CAMIF) ||
+	    (in->inputSource == VFE_START_INPUT_SOURCE_TESTGEN))
+		writel(CAMIF_COMMAND_START, ctrl->vfebase + CAMIF_COMMAND);
+
+	/* start test gen if it is enabled */
+	if (ctrl->vfeTestGenStartFlag == TRUE) {
+		ctrl->vfeTestGenStartFlag = FALSE;
+		vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_GO);
+	}
+
+	CDBG("ctrl->axiOutputMode = %d\n", ctrl->axiOutputMode);
+	if (ctrl->axiOutputMode == VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2) {
+		/* raw dump mode */
+		rawmode = TRUE;
+
+		while (rawmode) {
+			pmstatus =
+				readl(ctrl->vfebase +
+					VFE_BUS_ENC_CBCR_WR_PM_STATS_1);
+
+			if ((pmstatus & VFE_PM_BUF_MAX_CNT_MASK) != 0)
+				rawmode = FALSE;
+		}
+
+		vfe_proc_ops(VFE_MSG_ID_START_ACK, NULL);
+		ctrl->vfeStartAckPendingFlag = FALSE;
+	}
+
+	ctrl->vstate = VFE_STATE_ACTIVE;
+}
+
+void vfe_la_update(struct vfe_cmd_la_config *in)
+{
+	int16_t *pTable;
+	enum VFE_DMI_RAM_SEL dmiRamSel;
+	int i;
+
+	pTable = in->table;
+	ctrl->vfeModuleEnableLocal.lumaAdaptationEnable = in->enable;
+
+	/* toggle the bank to be used. */
+	ctrl->vfeLaBankSel ^= 1;
+
+	if (ctrl->vfeLaBankSel == 0)
+		dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK0;
+	else
+		dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK1;
+
+	/* configure the DMI_CFG to select right sram */
+	vfe_program_dmi_cfg(dmiRamSel);
+
+	for (i = 0; i < VFE_LA_TABLE_LENGTH; i++) {
+		writel((uint32_t)(*pTable), ctrl->vfebase + VFE_DMI_DATA_LO);
+		pTable++;
+	}
+
+	/* After DMI transfer, to make it safe, need to set
+	 * the DMI_CFG to unselect any SRAM */
+	writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG);
+	writel(ctrl->vfeLaBankSel, ctrl->vfebase + VFE_LA_CFG);
+}
+
+void vfe_la_config(struct vfe_cmd_la_config *in)
+{
+	uint16_t i;
+	int16_t  *pTable;
+	enum VFE_DMI_RAM_SEL dmiRamSel;
+
+	pTable = in->table;
+	ctrl->vfeModuleEnableLocal.lumaAdaptationEnable = in->enable;
+
+	if (ctrl->vfeLaBankSel == 0)
+		dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK0;
+	else
+		dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK1;
+
+	/* configure the DMI_CFG to select right sram */
+	vfe_program_dmi_cfg(dmiRamSel);
+
+	for (i = 0; i < VFE_LA_TABLE_LENGTH; i++) {
+		writel((uint32_t)(*pTable), ctrl->vfebase + VFE_DMI_DATA_LO);
+		pTable++;
+	}
+
+	/* After DMI transfer, to make it safe, need to set the
+	 * DMI_CFG to unselect any SRAM */
+	writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG);
+
+	/* can only be bank 0 or bank 1 for now. */
+	writel(ctrl->vfeLaBankSel, ctrl->vfebase + VFE_LA_CFG);
+	CDBG("VFE Luma adaptation bank selection is 0x%x\n",
+			 *(uint32_t *)&ctrl->vfeLaBankSel);
+}
+
+void vfe_test_gen_start(struct vfe_cmd_test_gen_start *in)
+{
+	struct VFE_TestGen_ConfigCmdType cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.numFrame              = in->numFrame;
+	cmd.pixelDataSelect       = in->pixelDataSelect;
+	cmd.systematicDataSelect  = in->systematicDataSelect;
+	cmd.pixelDataSize         = (uint32_t)in->pixelDataSize;
+	cmd.hsyncEdge             = (uint32_t)in->hsyncEdge;
+	cmd.vsyncEdge             = (uint32_t)in->vsyncEdge;
+	cmd.imageWidth            = in->imageWidth;
+	cmd.imageHeight           = in->imageHeight;
+	cmd.sofOffset             = in->startOfFrameOffset;
+	cmd.eofNOffset            = in->endOfFrameNOffset;
+	cmd.solOffset             = in->startOfLineOffset;
+	cmd.eolNOffset            = in->endOfLineNOffset;
+	cmd.hBlankInterval        = in->hbi;
+	cmd.vBlankInterval        = in->vbl;
+	cmd.vBlankIntervalEnable  = in->vblEnable;
+	cmd.sofDummy              = in->startOfFrameDummyLine;
+	cmd.eofDummy              = in->endOfFrameDummyLine;
+	cmd.unicolorBarSelect     = in->unicolorBarSelect;
+	cmd.unicolorBarEnable     = in->unicolorBarEnable;
+	cmd.splitEnable           = in->colorBarsSplitEnable;
+	cmd.pixelPattern          = (uint32_t)in->colorBarsPixelPattern;
+	cmd.rotatePeriod          = in->colorBarsRotatePeriod;
+	cmd.randomSeed            = in->testGenRandomSeed;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_HW_TESTGEN_CFG,
+		(uint32_t *) &cmd, sizeof(cmd));
+}
+
+void vfe_frame_skip_update(struct vfe_cmd_frame_skip_update *in)
+{
+	struct VFE_FRAME_SKIP_UpdateCmdType cmd;
+
+	cmd.yPattern    = in->output1Pattern;
+	cmd.cbcrPattern = in->output1Pattern;
+	vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd.yPattern    = in->output2Pattern;
+	cmd.cbcrPattern = in->output2Pattern;
+	vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_frame_skip_config(struct vfe_cmd_frame_skip_config *in)
+{
+	struct vfe_frame_skip_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeFrameSkip = *in;
+
+	cmd.output2YPeriod     = in->output2Period;
+	cmd.output2CbCrPeriod  = in->output2Period;
+	cmd.output2YPattern    = in->output2Pattern;
+	cmd.output2CbCrPattern = in->output2Pattern;
+	cmd.output1YPeriod     = in->output1Period;
+	cmd.output1CbCrPeriod  = in->output1Period;
+	cmd.output1YPattern    = in->output1Pattern;
+	cmd.output1CbCrPattern = in->output1Pattern;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_output_clamp_config(struct vfe_cmd_output_clamp_config *in)
+{
+	struct vfe_output_clamp_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.yChanMax  = in->maxCh0;
+	cmd.cbChanMax = in->maxCh1;
+	cmd.crChanMax = in->maxCh2;
+
+	cmd.yChanMin  = in->minCh0;
+	cmd.cbChanMin = in->minCh1;
+	cmd.crChanMin = in->minCh2;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_CLAMP_MAX_CFG, (uint32_t *)&cmd,
+		sizeof(cmd));
+}
+
+void vfe_camif_frame_update(struct vfe_cmds_camif_frame *in)
+{
+	struct vfe_camifframe_update cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.pixelsPerLine = in->pixelsPerLine;
+	cmd.linesPerFrame = in->linesPerFrame;
+
+	vfe_prog_hw(ctrl->vfebase + CAMIF_FRAME_CONFIG, (uint32_t *)&cmd,
+		sizeof(cmd));
+}
+
+void vfe_color_correction_config(struct vfe_cmd_color_correction_config *in)
+{
+	struct vfe_color_correction_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ctrl->vfeModuleEnableLocal.colorCorrectionEnable = in->enable;
+
+	cmd.c0 = in->C0;
+	cmd.c1 = in->C1;
+	cmd.c2 = in->C2;
+	cmd.c3 = in->C3;
+	cmd.c4 = in->C4;
+	cmd.c5 = in->C5;
+	cmd.c6 = in->C6;
+	cmd.c7 = in->C7;
+	cmd.c8 = in->C8;
+
+	cmd.k0 = in->K0;
+	cmd.k1 = in->K1;
+	cmd.k2 = in->K2;
+
+	cmd.coefQFactor = in->coefQFactor;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_COLOR_CORRECT_COEFF_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_demosaic_abf_update(struct vfe_cmd_demosaic_abf_update *in)
+{
+struct vfe_demosaic_cfg cmd;
+	struct vfe_demosaic_abf_cfg cmdabf;
+	uint32_t temp;
+
+	memset(&cmd, 0, sizeof(cmd));
+	temp = readl(ctrl->vfebase + VFE_DEMOSAIC_CFG);
+
+	cmd = *((struct vfe_demosaic_cfg *)(&temp));
+	cmd.abfEnable       = in->abfUpdate.enable;
+	cmd.forceAbfOn      = in->abfUpdate.forceOn;
+	cmd.abfShift        = in->abfUpdate.shift;
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmdabf.lpThreshold  = in->abfUpdate.lpThreshold;
+	cmdabf.ratio        = in->abfUpdate.ratio;
+	cmdabf.minValue     = in->abfUpdate.min;
+	cmdabf.maxValue     = in->abfUpdate.max;
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_ABF_CFG_0,
+		(uint32_t *)&cmdabf, sizeof(cmdabf));
+}
+
+void vfe_demosaic_bpc_update(struct vfe_cmd_demosaic_bpc_update *in)
+{
+	struct vfe_demosaic_cfg cmd;
+	struct vfe_demosaic_bpc_cfg cmdbpc;
+	uint32_t temp;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	temp = readl(ctrl->vfebase + VFE_DEMOSAIC_CFG);
+
+	cmd = *((struct vfe_demosaic_cfg *)(&temp));
+	cmd.badPixelCorrEnable = in->bpcUpdate.enable;
+	cmd.fminThreshold      = in->bpcUpdate.fminThreshold;
+	cmd.fmaxThreshold      = in->bpcUpdate.fmaxThreshold;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmdbpc.blueDiffThreshold  = in->bpcUpdate.blueDiffThreshold;
+	cmdbpc.redDiffThreshold   = in->bpcUpdate.redDiffThreshold;
+	cmdbpc.greenDiffThreshold = in->bpcUpdate.greenDiffThreshold;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_BPC_CFG_0,
+		(uint32_t *)&cmdbpc, sizeof(cmdbpc));
+}
+
+void vfe_demosaic_config(struct vfe_cmd_demosaic_config *in)
+{
+	struct vfe_demosaic_cfg cmd;
+	struct vfe_demosaic_bpc_cfg cmd_bpc;
+	struct vfe_demosaic_abf_cfg cmd_abf;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd_bpc, 0, sizeof(cmd_bpc));
+	memset(&cmd_abf, 0, sizeof(cmd_abf));
+
+	ctrl->vfeModuleEnableLocal.demosaicEnable = in->enable;
+
+	cmd.abfEnable          = in->abfConfig.enable;
+	cmd.badPixelCorrEnable = in->bpcConfig.enable;
+	cmd.forceAbfOn         = in->abfConfig.forceOn;
+	cmd.abfShift           = in->abfConfig.shift;
+	cmd.fminThreshold      = in->bpcConfig.fminThreshold;
+	cmd.fmaxThreshold      = in->bpcConfig.fmaxThreshold;
+	cmd.slopeShift         = in->slopeShift;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd_abf.lpThreshold = in->abfConfig.lpThreshold;
+	cmd_abf.ratio       = in->abfConfig.ratio;
+	cmd_abf.minValue    = in->abfConfig.min;
+	cmd_abf.maxValue    = in->abfConfig.max;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_ABF_CFG_0,
+		(uint32_t *)&cmd_abf, sizeof(cmd_abf));
+
+	cmd_bpc.blueDiffThreshold   = in->bpcConfig.blueDiffThreshold;
+	cmd_bpc.redDiffThreshold    = in->bpcConfig.redDiffThreshold;
+	cmd_bpc.greenDiffThreshold  = in->bpcConfig.greenDiffThreshold;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_BPC_CFG_0,
+		(uint32_t *)&cmd_bpc, sizeof(cmd_bpc));
+}
+
+void vfe_demux_channel_gain_update(struct vfe_cmd_demux_channel_gain_config *in)
+{
+	struct vfe_demux_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.ch0EvenGain  = in->ch0EvenGain;
+	cmd.ch0OddGain   = in->ch0OddGain;
+	cmd.ch1Gain      = in->ch1Gain;
+	cmd.ch2Gain      = in->ch2Gain;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMUX_GAIN_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_demux_channel_gain_config(struct vfe_cmd_demux_channel_gain_config *in)
+{
+	struct vfe_demux_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.ch0EvenGain = in->ch0EvenGain;
+	cmd.ch0OddGain  = in->ch0OddGain;
+	cmd.ch1Gain     = in->ch1Gain;
+	cmd.ch2Gain     = in->ch2Gain;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_DEMUX_GAIN_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_black_level_update(struct vfe_cmd_black_level_config *in)
+{
+	struct vfe_blacklevel_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	ctrl->vfeModuleEnableLocal.blackLevelCorrectionEnable = in->enable;
+
+	cmd.evenEvenAdjustment = in->evenEvenAdjustment;
+	cmd.evenOddAdjustment  = in->evenOddAdjustment;
+	cmd.oddEvenAdjustment  = in->oddEvenAdjustment;
+	cmd.oddOddAdjustment   = in->oddOddAdjustment;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_BLACK_EVEN_EVEN_VALUE,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_black_level_config(struct vfe_cmd_black_level_config *in)
+{
+	struct vfe_blacklevel_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.blackLevelCorrectionEnable = in->enable;
+
+	cmd.evenEvenAdjustment = in->evenEvenAdjustment;
+	cmd.evenOddAdjustment  = in->evenOddAdjustment;
+	cmd.oddEvenAdjustment  = in->oddEvenAdjustment;
+	cmd.oddOddAdjustment   = in->oddOddAdjustment;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_BLACK_EVEN_EVEN_VALUE,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_asf_update(struct vfe_cmd_asf_update *in)
+{
+	struct vfe_asf_update cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.asfEnable = in->enable;
+
+	cmd.smoothEnable     = in->smoothFilterEnabled;
+	cmd.sharpMode        = in->sharpMode;
+	cmd.smoothCoeff0     = in->smoothCoefCenter;
+	cmd.smoothCoeff1     = in->smoothCoefSurr;
+	cmd.cropEnable       = in->cropEnable;
+	cmd.sharpThresholdE1 = in->sharpThreshE1;
+	cmd.sharpDegreeK1    = in->sharpK1;
+	cmd.sharpDegreeK2    = in->sharpK2;
+	cmd.normalizeFactor  = in->normalizeFactor;
+	cmd.sharpThresholdE2 = in->sharpThreshE2;
+	cmd.sharpThresholdE3 = in->sharpThreshE3;
+	cmd.sharpThresholdE4 = in->sharpThreshE4;
+	cmd.sharpThresholdE5 = in->sharpThreshE5;
+	cmd.F1Coeff0         = in->filter1Coefficients[0];
+	cmd.F1Coeff1         = in->filter1Coefficients[1];
+	cmd.F1Coeff2         = in->filter1Coefficients[2];
+	cmd.F1Coeff3         = in->filter1Coefficients[3];
+	cmd.F1Coeff4         = in->filter1Coefficients[4];
+	cmd.F1Coeff5         = in->filter1Coefficients[5];
+	cmd.F1Coeff6         = in->filter1Coefficients[6];
+	cmd.F1Coeff7         = in->filter1Coefficients[7];
+	cmd.F1Coeff8         = in->filter1Coefficients[8];
+	cmd.F2Coeff0         = in->filter2Coefficients[0];
+	cmd.F2Coeff1         = in->filter2Coefficients[1];
+	cmd.F2Coeff2         = in->filter2Coefficients[2];
+	cmd.F2Coeff3         = in->filter2Coefficients[3];
+	cmd.F2Coeff4         = in->filter2Coefficients[4];
+	cmd.F2Coeff5         = in->filter2Coefficients[5];
+	cmd.F2Coeff6         = in->filter2Coefficients[6];
+	cmd.F2Coeff7         = in->filter2Coefficients[7];
+	cmd.F2Coeff8         = in->filter2Coefficients[8];
+
+	vfe_prog_hw(ctrl->vfebase + VFE_ASF_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_asf_config(struct vfe_cmd_asf_config *in)
+{
+	struct vfe_asf_update     cmd;
+	struct vfe_asfcrop_cfg cmd2;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd2, 0, sizeof(cmd2));
+
+	ctrl->vfeModuleEnableLocal.asfEnable = in->enable;
+
+	cmd.smoothEnable       = in->smoothFilterEnabled;
+	cmd.sharpMode          = in->sharpMode;
+	cmd.smoothCoeff0       = in->smoothCoefCenter;
+	cmd.smoothCoeff1       = in->smoothCoefSurr;
+	cmd.cropEnable         = in->cropEnable;
+	cmd.sharpThresholdE1   = in->sharpThreshE1;
+	cmd.sharpDegreeK1      = in->sharpK1;
+	cmd.sharpDegreeK2      = in->sharpK2;
+	cmd.normalizeFactor    = in->normalizeFactor;
+	cmd.sharpThresholdE2   = in->sharpThreshE2;
+	cmd.sharpThresholdE3   = in->sharpThreshE3;
+	cmd.sharpThresholdE4   = in->sharpThreshE4;
+	cmd.sharpThresholdE5   = in->sharpThreshE5;
+	cmd.F1Coeff0           = in->filter1Coefficients[0];
+	cmd.F1Coeff1           = in->filter1Coefficients[1];
+	cmd.F1Coeff2           = in->filter1Coefficients[2];
+	cmd.F1Coeff3           = in->filter1Coefficients[3];
+	cmd.F1Coeff4           = in->filter1Coefficients[4];
+	cmd.F1Coeff5           = in->filter1Coefficients[5];
+	cmd.F1Coeff6           = in->filter1Coefficients[6];
+	cmd.F1Coeff7           = in->filter1Coefficients[7];
+	cmd.F1Coeff8           = in->filter1Coefficients[8];
+	cmd.F2Coeff0           = in->filter2Coefficients[0];
+	cmd.F2Coeff1           = in->filter2Coefficients[1];
+	cmd.F2Coeff2           = in->filter2Coefficients[2];
+	cmd.F2Coeff3           = in->filter2Coefficients[3];
+	cmd.F2Coeff4           = in->filter2Coefficients[4];
+	cmd.F2Coeff5           = in->filter2Coefficients[5];
+	cmd.F2Coeff6           = in->filter2Coefficients[6];
+	cmd.F2Coeff7           = in->filter2Coefficients[7];
+	cmd.F2Coeff8           = in->filter2Coefficients[8];
+
+	vfe_prog_hw(ctrl->vfebase + VFE_ASF_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd2.firstLine  = in->cropFirstLine;
+	cmd2.lastLine   = in->cropLastLine;
+	cmd2.firstPixel = in->cropFirstPixel;
+	cmd2.lastPixel  = in->cropLastPixel;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_ASF_CROP_WIDTH_CFG,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+}
+
+void vfe_white_balance_config(struct vfe_cmd_white_balance_config *in)
+{
+	struct vfe_wb_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.whiteBalanceEnable = in->enable;
+
+	cmd.ch0Gain = in->ch0Gain;
+	cmd.ch1Gain = in->ch1Gain;
+	cmd.ch2Gain = in->ch2Gain;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_WB_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_chroma_sup_config(struct vfe_cmd_chroma_suppression_config *in)
+{
+	struct vfe_chroma_suppress_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.chromaSuppressionEnable = in->enable;
+
+	cmd.m1  = in->m1;
+	cmd.m3  = in->m3;
+	cmd.n1  = in->n1;
+	cmd.n3  = in->n3;
+	cmd.mm1 = in->mm1;
+	cmd.nn1 = in->nn1;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_SUPPRESS_CFG_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_roll_off_config(struct vfe_cmd_roll_off_config *in)
+{
+	struct vfe_rolloff_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.lensRollOffEnable = in->enable;
+
+	cmd.gridWidth   = in->gridWidth;
+	cmd.gridHeight  = in->gridHeight;
+	cmd.yDelta      = in->yDelta;
+	cmd.gridX       = in->gridXIndex;
+	cmd.gridY       = in->gridYIndex;
+	cmd.pixelX      = in->gridPixelXIndex;
+	cmd.pixelY      = in->gridPixelYIndex;
+	cmd.yDeltaAccum = in->yDeltaAccum;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_ROLLOFF_CFG_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	vfe_write_lens_roll_off_table(in);
+}
+
+void vfe_chroma_subsample_config(struct vfe_cmd_chroma_subsample_config *in)
+{
+	struct vfe_chromasubsample_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.chromaSubsampleEnable = in->enable;
+
+	cmd.hCositedPhase       = in->hCositedPhase;
+	cmd.vCositedPhase       = in->vCositedPhase;
+	cmd.hCosited            = in->hCosited;
+	cmd.vCosited            = in->vCosited;
+	cmd.hsubSampleEnable    = in->hsubSampleEnable;
+	cmd.vsubSampleEnable    = in->vsubSampleEnable;
+	cmd.cropEnable          = in->cropEnable;
+	cmd.cropWidthLastPixel  = in->cropWidthLastPixel;
+	cmd.cropWidthFirstPixel = in->cropWidthFirstPixel;
+	cmd.cropHeightLastLine  = in->cropHeightLastLine;
+	cmd.cropHeightFirstLine = in->cropHeightFirstLine;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_SUBSAMPLE_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_chroma_enhan_config(struct vfe_cmd_chroma_enhan_config *in)
+{
+	struct vfe_chroma_enhance_cfg cmd;
+	struct vfe_color_convert_cfg cmd2;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd2, 0, sizeof(cmd2));
+
+	ctrl->vfeModuleEnableLocal.chromaEnhanEnable = in->enable;
+
+	cmd.ap             = in->ap;
+	cmd.am             = in->am;
+	cmd.bp             = in->bp;
+	cmd.bm             = in->bm;
+	cmd.cp             = in->cp;
+	cmd.cm             = in->cm;
+	cmd.dp             = in->dp;
+	cmd.dm             = in->dm;
+	cmd.kcb            = in->kcb;
+	cmd.kcr            = in->kcr;
+
+	cmd2.v0            = in->RGBtoYConversionV0;
+	cmd2.v1            = in->RGBtoYConversionV1;
+	cmd2.v2            = in->RGBtoYConversionV2;
+	cmd2.ConvertOffset = in->RGBtoYConversionOffset;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_ENHAN_A,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	vfe_prog_hw(ctrl->vfebase + VFE_COLOR_CONVERT_COEFF_0,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+}
+
+void vfe_scaler2cbcr_config(struct vfe_cmd_scaler2_config *in)
+{
+	struct vfe_scaler2_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.scaler2CbcrEnable = in->enable;
+
+	cmd.hEnable              = in->hconfig.enable;
+	cmd.vEnable              = in->vconfig.enable;
+	cmd.inWidth              = in->hconfig.inputSize;
+	cmd.outWidth             = in->hconfig.outputSize;
+	cmd.horizPhaseMult       = in->hconfig.phaseMultiplicationFactor;
+	cmd.horizInterResolution = in->hconfig.interpolationResolution;
+	cmd.inHeight             = in->vconfig.inputSize;
+	cmd.outHeight            = in->vconfig.outputSize;
+	cmd.vertPhaseMult        = in->vconfig.phaseMultiplicationFactor;
+	cmd.vertInterResolution  = in->vconfig.interpolationResolution;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_SCALE_CBCR_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_scaler2y_config(struct vfe_cmd_scaler2_config *in)
+{
+	struct vfe_scaler2_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.scaler2YEnable = in->enable;
+
+	cmd.hEnable               = in->hconfig.enable;
+	cmd.vEnable               = in->vconfig.enable;
+	cmd.inWidth               = in->hconfig.inputSize;
+	cmd.outWidth              = in->hconfig.outputSize;
+	cmd.horizPhaseMult        = in->hconfig.phaseMultiplicationFactor;
+	cmd.horizInterResolution  = in->hconfig.interpolationResolution;
+	cmd.inHeight              = in->vconfig.inputSize;
+	cmd.outHeight             = in->vconfig.outputSize;
+	cmd.vertPhaseMult         = in->vconfig.phaseMultiplicationFactor;
+	cmd.vertInterResolution   = in->vconfig.interpolationResolution;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_SCALE_Y_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_main_scaler_config(struct vfe_cmd_main_scaler_config *in)
+{
+	struct vfe_main_scaler_cfg cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.mainScalerEnable = in->enable;
+
+	cmd.hEnable              = in->hconfig.enable;
+	cmd.vEnable              = in->vconfig.enable;
+	cmd.inWidth              = in->hconfig.inputSize;
+	cmd.outWidth             = in->hconfig.outputSize;
+	cmd.horizPhaseMult       = in->hconfig.phaseMultiplicationFactor;
+	cmd.horizInterResolution = in->hconfig.interpolationResolution;
+	cmd.horizMNInit          = in->MNInitH.MNCounterInit;
+	cmd.horizPhaseInit       = in->MNInitH.phaseInit;
+	cmd.inHeight             = in->vconfig.inputSize;
+	cmd.outHeight            = in->vconfig.outputSize;
+	cmd.vertPhaseMult        = in->vconfig.phaseMultiplicationFactor;
+	cmd.vertInterResolution  = in->vconfig.interpolationResolution;
+	cmd.vertMNInit           = in->MNInitV.MNCounterInit;
+	cmd.vertPhaseInit        = in->MNInitV.phaseInit;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_SCALE_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_stats_wb_exp_stop(void)
+{
+	ctrl->vfeStatsCmdLocal.axwEnable = FALSE;
+	ctrl->vfeImaskLocal.awbPingpongIrq = FALSE;
+}
+
+void vfe_stats_update_wb_exp(struct vfe_cmd_stats_wb_exp_update *in)
+{
+	struct vfe_statsawb_update   cmd;
+	struct vfe_statsawbae_update cmd2;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd2, 0, sizeof(cmd2));
+
+	cmd.m1  = in->awbMCFG[0];
+	cmd.m2  = in->awbMCFG[1];
+	cmd.m3  = in->awbMCFG[2];
+	cmd.m4  = in->awbMCFG[3];
+	cmd.c1  = in->awbCCFG[0];
+	cmd.c2  = in->awbCCFG[1];
+	cmd.c3  = in->awbCCFG[2];
+	cmd.c4  = in->awbCCFG[3];
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWB_MCFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd2.aeRegionCfg    = in->wbExpRegions;
+	cmd2.aeSubregionCfg = in->wbExpSubRegion;
+	cmd2.awbYMin        = in->awbYMin;
+	cmd2.awbYMax        = in->awbYMax;
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWBAE_CFG,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+}
+
+void vfe_stats_update_af(struct vfe_cmd_stats_af_update *in)
+{
+	struct vfe_statsaf_update cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.windowVOffset = in->windowVOffset;
+	cmd.windowHOffset = in->windowHOffset;
+	cmd.windowMode    = in->windowMode;
+	cmd.windowHeight  = in->windowHeight;
+	cmd.windowWidth   = in->windowWidth;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_stats_start_wb_exp(struct vfe_cmd_stats_wb_exp_start *in)
+{
+	struct vfe_statsawb_update   cmd;
+	struct vfe_statsawbae_update cmd2;
+	struct vfe_statsaxw_hdr_cfg  cmd3;
+
+	ctrl->vfeStatsCmdLocal.axwEnable   =  in->enable;
+	ctrl->vfeImaskLocal.awbPingpongIrq = TRUE;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd2, 0, sizeof(cmd2));
+	memset(&cmd3, 0, sizeof(cmd3));
+
+	cmd.m1  = in->awbMCFG[0];
+	cmd.m2  = in->awbMCFG[1];
+	cmd.m3  = in->awbMCFG[2];
+	cmd.m4  = in->awbMCFG[3];
+	cmd.c1  = in->awbCCFG[0];
+	cmd.c2  = in->awbCCFG[1];
+	cmd.c3  = in->awbCCFG[2];
+	cmd.c4  = in->awbCCFG[3];
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWB_MCFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd2.aeRegionCfg     = in->wbExpRegions;
+	cmd2.aeSubregionCfg  = in->wbExpSubRegion;
+	cmd2.awbYMin         = in->awbYMin;
+	cmd2.awbYMax         = in->awbYMax;
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWBAE_CFG,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+
+	cmd3.axwHeader       = in->axwHeader;
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AXW_HEADER,
+		(uint32_t *)&cmd3, sizeof(cmd3));
+}
+
+void vfe_stats_start_af(struct vfe_cmd_stats_af_start *in)
+{
+	struct vfe_statsaf_update cmd;
+	struct vfe_statsaf_cfg    cmd2;
+
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&cmd2, 0, sizeof(cmd2));
+
+	ctrl->vfeStatsCmdLocal.autoFocusEnable = in->enable;
+	ctrl->vfeImaskLocal.afPingpongIrq = TRUE;
+
+	cmd.windowVOffset = in->windowVOffset;
+	cmd.windowHOffset = in->windowHOffset;
+	cmd.windowMode    = in->windowMode;
+	cmd.windowHeight  = in->windowHeight;
+	cmd.windowWidth   = in->windowWidth;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	cmd2.a00       = in->highPassCoef[0];
+	cmd2.a04       = in->highPassCoef[1];
+	cmd2.a20       = in->highPassCoef[2];
+	cmd2.a21       = in->highPassCoef[3];
+	cmd2.a22       = in->highPassCoef[4];
+	cmd2.a23       = in->highPassCoef[5];
+	cmd2.a24       = in->highPassCoef[6];
+	cmd2.fvMax     = in->metricMax;
+	cmd2.fvMetric  = in->metricSelection;
+	cmd2.afHeader  = in->bufferHeader;
+	cmd2.entry00   = in->gridForMultiWindows[0];
+	cmd2.entry01   = in->gridForMultiWindows[1];
+	cmd2.entry02   = in->gridForMultiWindows[2];
+	cmd2.entry03   = in->gridForMultiWindows[3];
+	cmd2.entry10   = in->gridForMultiWindows[4];
+	cmd2.entry11   = in->gridForMultiWindows[5];
+	cmd2.entry12   = in->gridForMultiWindows[6];
+	cmd2.entry13   = in->gridForMultiWindows[7];
+	cmd2.entry20   = in->gridForMultiWindows[8];
+	cmd2.entry21   = in->gridForMultiWindows[9];
+	cmd2.entry22   = in->gridForMultiWindows[10];
+	cmd2.entry23   = in->gridForMultiWindows[11];
+	cmd2.entry30   = in->gridForMultiWindows[12];
+	cmd2.entry31   = in->gridForMultiWindows[13];
+	cmd2.entry32   = in->gridForMultiWindows[14];
+	cmd2.entry33   = in->gridForMultiWindows[15];
+
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_GRID_0,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+}
+
+void vfe_stats_setting(struct vfe_cmd_stats_setting *in)
+{
+	struct vfe_statsframe cmd1;
+	struct vfe_busstats_wrprio cmd2;
+
+	memset(&cmd1, 0, sizeof(cmd1));
+	memset(&cmd2, 0, sizeof(cmd2));
+
+	ctrl->afStatsControl.addressBuffer[0] = in->afBuffer[0];
+	ctrl->afStatsControl.addressBuffer[1] = in->afBuffer[1];
+	ctrl->afStatsControl.nextFrameAddrBuf = in->afBuffer[2];
+
+	ctrl->awbStatsControl.addressBuffer[0] = in->awbBuffer[0];
+	ctrl->awbStatsControl.addressBuffer[1] = in->awbBuffer[1];
+	ctrl->awbStatsControl.nextFrameAddrBuf = in->awbBuffer[2];
+
+	cmd1.lastPixel = in->frameHDimension;
+	cmd1.lastLine  = in->frameVDimension;
+	vfe_prog_hw(ctrl->vfebase + VFE_STATS_FRAME_SIZE,
+		(uint32_t *)&cmd1, sizeof(cmd1));
+
+	cmd2.afBusPriority    = in->afBusPriority;
+	cmd2.awbBusPriority   = in->awbBusPriority;
+	cmd2.histBusPriority  = in->histBusPriority;
+	cmd2.afBusPriorityEn  = in->afBusPrioritySelection;
+	cmd2.awbBusPriorityEn = in->awbBusPrioritySelection;
+	cmd2.histBusPriorityEn = in->histBusPrioritySelection;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_BUS_STATS_WR_PRIORITY,
+		(uint32_t *)&cmd2, sizeof(cmd2));
+
+	/* Program the bus ping pong address for statistics modules. */
+	writel(in->afBuffer[0], ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR);
+	writel(in->afBuffer[1], ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR);
+	writel(in->awbBuffer[0],
+		ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	writel(in->awbBuffer[1],
+		ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	writel(in->histBuffer[0],
+		ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	writel(in->histBuffer[1],
+		ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+}
+
+void vfe_axi_input_config(struct vfe_cmd_axi_input_config *in)
+{
+	struct VFE_AxiInputCmdType cmd;
+	uint32_t xSizeWord, axiRdUnpackPattern;
+	uint8_t  axiInputPpw;
+	uint32_t busPingpongRdIrqEnable;
+
+	ctrl->vfeImaskLocal.rdPingpongIrq = TRUE;
+
+	switch (in->pixelSize) {
+	case VFE_RAW_PIXEL_DATA_SIZE_10BIT:
+		ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_10BIT;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_12BIT:
+		ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_12BIT;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_8BIT:
+	default:
+		ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_8BIT;
+		break;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	switch (in->pixelSize) {
+	case VFE_RAW_PIXEL_DATA_SIZE_10BIT:
+		axiInputPpw = 6;
+		axiRdUnpackPattern = 0xD43210;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_12BIT:
+		axiInputPpw = 5;
+		axiRdUnpackPattern = 0xC3210;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_8BIT:
+	default:
+		axiInputPpw = 8;
+		axiRdUnpackPattern = 0xF6543210;
+		break;
+	}
+
+	xSizeWord =
+		((((in->xOffset % axiInputPpw) + in->xSize) +
+			(axiInputPpw-1)) / axiInputPpw) - 1;
+
+	cmd.stripeStartAddr0  = in->fragAddr[0];
+	cmd.stripeStartAddr1  = in->fragAddr[1];
+	cmd.stripeStartAddr2  = in->fragAddr[2];
+	cmd.stripeStartAddr3  = in->fragAddr[3];
+	cmd.ySize             = in->ySize;
+	cmd.yOffsetDelta      = 0;
+	cmd.xSizeWord         = xSizeWord;
+	cmd.burstLength       = 1;
+	cmd.NumOfRows         = in->numOfRows;
+	cmd.RowIncrement = (in->rowIncrement + (axiInputPpw - 1)) / axiInputPpw;
+	cmd.mainUnpackHeight  = in->ySize;
+	cmd.mainUnpackWidth   = in->xSize - 1;
+	cmd.mainUnpackHbiSel  = (uint32_t)in->unpackHbi;
+	cmd.mainUnpackPhase   = in->unpackPhase;
+	cmd.unpackPattern     = axiRdUnpackPattern;
+	cmd.padLeft           = in->padRepeatCountLeft;
+	cmd.padRight          = in->padRepeatCountRight;
+	cmd.padTop            = in->padRepeatCountTop;
+	cmd.padBottom         = in->padRepeatCountBottom;
+	cmd.leftUnpackPattern0   = in->padLeftComponentSelectCycle0;
+	cmd.leftUnpackPattern1   = in->padLeftComponentSelectCycle1;
+	cmd.leftUnpackPattern2   = in->padLeftComponentSelectCycle2;
+	cmd.leftUnpackPattern3   = in->padLeftComponentSelectCycle3;
+	cmd.leftUnpackStop0      = in->padLeftStopCycle0;
+	cmd.leftUnpackStop1      = in->padLeftStopCycle1;
+	cmd.leftUnpackStop2      = in->padLeftStopCycle2;
+	cmd.leftUnpackStop3      = in->padLeftStopCycle3;
+	cmd.rightUnpackPattern0  = in->padRightComponentSelectCycle0;
+	cmd.rightUnpackPattern1  = in->padRightComponentSelectCycle1;
+	cmd.rightUnpackPattern2  = in->padRightComponentSelectCycle2;
+	cmd.rightUnpackPattern3  = in->padRightComponentSelectCycle3;
+	cmd.rightUnpackStop0     = in->padRightStopCycle0;
+	cmd.rightUnpackStop1     = in->padRightStopCycle1;
+	cmd.rightUnpackStop2     = in->padRightStopCycle2;
+	cmd.rightUnpackStop3     = in->padRightStopCycle3;
+	cmd.topUnapckPattern     = in->padTopLineCount;
+	cmd.bottomUnapckPattern  = in->padBottomLineCount;
+
+	/*  program vfe_bus_cfg */
+	vfe_prog_hw(ctrl->vfebase + VFE_BUS_STRIPE_RD_ADDR_0,
+		(uint32_t *)&cmd, sizeof(cmd));
+
+	/* hacking code, put it to default value */
+	busPingpongRdIrqEnable = 0xf;
+
+	writel(busPingpongRdIrqEnable, ctrl->vfebase + VFE_BUS_PINGPONG_IRQ_EN);
+}
+
+void vfe_axi_output_config(struct vfe_cmd_axi_output_config *in)
+{
+	/* local variable  */
+	uint32_t *pcircle;
+	uint32_t *pdest;
+	uint32_t *psrc;
+	uint8_t  i;
+	uint8_t  fcnt;
+	uint16_t axioutpw = 8;
+
+	/* parameters check, condition and usage mode check */
+	ctrl->encPath.fragCount = in->output2.fragmentCount;
+	if (ctrl->encPath.fragCount > 1)
+		ctrl->encPath.multiFrag = TRUE;
+
+	ctrl->viewPath.fragCount = in->output1.fragmentCount;
+	if (ctrl->viewPath.fragCount > 1)
+		ctrl->viewPath.multiFrag = TRUE;
+
+	/* VFE_BUS_CFG.  raw data size */
+	ctrl->vfeBusConfigLocal.rawPixelDataSize = in->outputDataSize;
+
+	switch (in->outputDataSize) {
+	case VFE_RAW_PIXEL_DATA_SIZE_8BIT:
+		axioutpw = 8;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_10BIT:
+		axioutpw = 6;
+		break;
+
+	case VFE_RAW_PIXEL_DATA_SIZE_12BIT:
+		axioutpw = 5;
+		break;
+	}
+
+	ctrl->axiOutputMode = in->outputMode;
+
+	CDBG("axiOutputMode = %d\n", ctrl->axiOutputMode);
+
+	switch (ctrl->axiOutputMode) {
+	case VFE_AXI_OUTPUT_MODE_Output1: {
+		ctrl->vfeCamifConfigLocal.camif2BusEnable   = FALSE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect  =
+			VFE_RAW_OUTPUT_DISABLED;
+
+		ctrl->encPath.pathEnabled                   = FALSE;
+		ctrl->vfeImaskLocal.encIrq                  = FALSE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn          = FALSE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn       = FALSE;
+		ctrl->viewPath.pathEnabled                    = TRUE;
+		ctrl->vfeImaskLocal.viewIrq                   = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn    = TRUE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq   = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_Output1 */
+		break;
+
+	case VFE_AXI_OUTPUT_MODE_Output2: {
+		ctrl->vfeCamifConfigLocal.camif2BusEnable   = FALSE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect  =
+			VFE_RAW_OUTPUT_DISABLED;
+
+		ctrl->encPath.pathEnabled                   = TRUE;
+		ctrl->vfeImaskLocal.encIrq                  = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn        = TRUE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn     = TRUE;
+
+		ctrl->viewPath.pathEnabled                   = FALSE;
+		ctrl->vfeImaskLocal.viewIrq                  = FALSE;
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn        = FALSE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn     = FALSE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq   = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_Output2 */
+			break;
+
+	case VFE_AXI_OUTPUT_MODE_Output1AndOutput2: {
+		ctrl->vfeCamifConfigLocal.camif2BusEnable    = FALSE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect   =
+			VFE_RAW_OUTPUT_DISABLED;
+
+		ctrl->encPath.pathEnabled                    = TRUE;
+		ctrl->vfeImaskLocal.encIrq                   = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn         = TRUE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn      = TRUE;
+		ctrl->viewPath.pathEnabled                   = TRUE;
+		ctrl->vfeImaskLocal.viewIrq                  = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn        = TRUE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn     = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq   = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_Output1AndOutput2 */
+		break;
+
+	case VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2: {
+		/* For raw snapshot, we need both ping and pong buffer
+		 * initialized to the same address. Otherwise, if we
+		 * leave the pong buffer to NULL, there will be axi_error.
+		 * Note that ideally we should deal with this at upper layer,
+		 * which is in msm_vfe8x.c */
+		if (!in->output2.outputCbcr.outFragments[1][0]) {
+			in->output2.outputCbcr.outFragments[1][0] =
+				in->output2.outputCbcr.outFragments[0][0];
+		}
+
+		ctrl->vfeCamifConfigLocal.camif2BusEnable   = TRUE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = FALSE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect  =
+			VFE_RAW_OUTPUT_ENC_CBCR_PATH;
+
+		ctrl->encPath.pathEnabled                   = TRUE;
+		ctrl->vfeImaskLocal.encIrq                  = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask =
+			VFE_COMP_IRQ_CBCR_ONLY;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn        = FALSE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn     = TRUE;
+
+		ctrl->viewPath.pathEnabled                   = FALSE;
+		ctrl->vfeImaskLocal.viewIrq                  = FALSE;
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn        = FALSE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn     = FALSE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq   = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2 */
+		break;
+
+	case VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1: {
+		ctrl->vfeCamifConfigLocal.camif2BusEnable   = TRUE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect  =
+			VFE_RAW_OUTPUT_VIEW_CBCR_PATH;
+
+		ctrl->encPath.pathEnabled                   = TRUE;
+		ctrl->vfeImaskLocal.encIrq                  = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn        = TRUE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn     = TRUE;
+
+		ctrl->viewPath.pathEnabled                   = TRUE;
+		ctrl->vfeImaskLocal.viewIrq                  = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_CBCR_ONLY;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn        = FALSE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn     = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq   = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1 */
+		break;
+
+	case VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2: {
+		ctrl->vfeCamifConfigLocal.camif2BusEnable   = TRUE;
+		ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE;
+		ctrl->vfeBusConfigLocal.rawWritePathSelect  =
+			VFE_RAW_OUTPUT_ENC_CBCR_PATH;
+
+		ctrl->encPath.pathEnabled                     = TRUE;
+		ctrl->vfeImaskLocal.encIrq                    = TRUE;
+		ctrl->vfeIrqCompositeMaskLocal.encIrqComMask  =
+			VFE_COMP_IRQ_CBCR_ONLY;
+
+		ctrl->vfeBusConfigLocal.encYWrPathEn          = FALSE;
+		ctrl->vfeBusConfigLocal.encCbcrWrPathEn       = TRUE;
+
+		ctrl->viewPath.pathEnabled                    = TRUE;
+		ctrl->vfeImaskLocal.viewIrq                   = TRUE;
+
+		ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+			VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+		ctrl->vfeBusConfigLocal.viewYWrPathEn         = TRUE;
+		ctrl->vfeBusConfigLocal.viewCbcrWrPathEn      = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encYWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encYPingpongIrq       = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn &&
+				ctrl->encPath.multiFrag)
+			ctrl->vfeImaskLocal.encCbcrPingpongIrq    = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewYWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewYPingpongIrq      = TRUE;
+
+		if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn &&
+				ctrl->viewPath.multiFrag)
+			ctrl->vfeImaskLocal.viewCbcrPingpongIrq   = TRUE;
+	} /* VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2 */
+		break;
+
+	case VFE_AXI_LAST_OUTPUT_MODE_ENUM:
+		break;
+	} /* switch */
+
+	/* Save the addresses for each path. */
+	/* output2 path */
+	fcnt = ctrl->encPath.fragCount;
+
+	pcircle = ctrl->encPath.yPath.addressBuffer;
+	pdest = ctrl->encPath.nextFrameAddrBuf;
+
+	psrc = &(in->output2.outputY.outFragments[0][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output2.outputY.outFragments[1][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output2.outputY.outFragments[2][0]);
+	for (i = 0; i < fcnt; i++)
+		*pdest++ = *psrc++;
+
+	pcircle = ctrl->encPath.cbcrPath.addressBuffer;
+
+	psrc = &(in->output2.outputCbcr.outFragments[0][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output2.outputCbcr.outFragments[1][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output2.outputCbcr.outFragments[2][0]);
+	for (i = 0; i < fcnt; i++)
+		*pdest++ = *psrc++;
+
+	vfe_set_bus_pipo_addr(&ctrl->viewPath, &ctrl->encPath);
+
+	ctrl->encPath.ackPending = FALSE;
+	ctrl->encPath.currentFrame = ping;
+	ctrl->encPath.whichOutputPath = 1;
+	ctrl->encPath.yPath.fragIndex = 2;
+	ctrl->encPath.cbcrPath.fragIndex = 2;
+	ctrl->encPath.yPath.hwCurrentFlag = ping;
+	ctrl->encPath.cbcrPath.hwCurrentFlag = ping;
+
+	/* output1 path */
+	pcircle = ctrl->viewPath.yPath.addressBuffer;
+	pdest = ctrl->viewPath.nextFrameAddrBuf;
+	fcnt = ctrl->viewPath.fragCount;
+
+	psrc = &(in->output1.outputY.outFragments[0][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output1.outputY.outFragments[1][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output1.outputY.outFragments[2][0]);
+	for (i = 0; i < fcnt; i++)
+		*pdest++ = *psrc++;
+
+	pcircle = ctrl->viewPath.cbcrPath.addressBuffer;
+
+	psrc = &(in->output1.outputCbcr.outFragments[0][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output1.outputCbcr.outFragments[1][0]);
+	for (i = 0; i < fcnt; i++)
+		*pcircle++ = *psrc++;
+
+	psrc = &(in->output1.outputCbcr.outFragments[2][0]);
+	for (i = 0; i < fcnt; i++)
+		*pdest++ = *psrc++;
+
+	ctrl->viewPath.ackPending = FALSE;
+	ctrl->viewPath.currentFrame = ping;
+	ctrl->viewPath.whichOutputPath = 0;
+	ctrl->viewPath.yPath.fragIndex = 2;
+	ctrl->viewPath.cbcrPath.fragIndex = 2;
+	ctrl->viewPath.yPath.hwCurrentFlag = ping;
+	ctrl->viewPath.cbcrPath.hwCurrentFlag = ping;
+
+	/* call to program the registers. */
+	vfe_axi_output(in, &ctrl->viewPath, &ctrl->encPath, axioutpw);
+}
+
+void vfe_camif_config(struct vfe_cmd_camif_config *in)
+{
+	struct vfe_camifcfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	CDBG("camif.frame pixelsPerLine = %d\n", in->frame.pixelsPerLine);
+	CDBG("camif.frame linesPerFrame = %d\n", in->frame.linesPerFrame);
+	CDBG("camif.window firstpixel = %d\n", in->window.firstpixel);
+	CDBG("camif.window lastpixel = %d\n",  in->window.lastpixel);
+	CDBG("camif.window firstline = %d\n",  in->window.firstline);
+	CDBG("camif.window lastline = %d\n",   in->window.lastline);
+
+	/* determine if epoch interrupt needs to be enabled.  */
+	if ((in->epoch1.enable == TRUE) &&
+	    (in->epoch1.lineindex <= in->frame.linesPerFrame))
+		ctrl->vfeImaskLocal.camifEpoch1Irq = 1;
+
+	if ((in->epoch2.enable == TRUE) &&
+	    (in->epoch2.lineindex <= in->frame.linesPerFrame)) {
+		ctrl->vfeImaskLocal.camifEpoch2Irq = 1;
+	}
+
+	/*  save the content to program CAMIF_CONFIG seperately. */
+	ctrl->vfeCamifConfigLocal.camifCfgFromCmd = in->camifConfig;
+
+	/* EFS_Config */
+	cmd.efsEndOfLine     = in->EFS.efsendofline;
+	cmd.efsStartOfLine   = in->EFS.efsstartofline;
+	cmd.efsEndOfFrame    = in->EFS.efsendofframe;
+	cmd.efsStartOfFrame  = in->EFS.efsstartofframe;
+
+	/* Frame Config */
+	cmd.frameConfigPixelsPerLine = in->frame.pixelsPerLine;
+	cmd.frameConfigLinesPerFrame = in->frame.linesPerFrame;
+
+	/* Window Width Config */
+	cmd.windowWidthCfgLastPixel  = in->window.lastpixel;
+	cmd.windowWidthCfgFirstPixel = in->window.firstpixel;
+
+	/* Window Height Config */
+	cmd.windowHeightCfglastLine   = in->window.lastline;
+	cmd.windowHeightCfgfirstLine  = in->window.firstline;
+
+	/* Subsample 1 Config */
+	cmd.subsample1CfgPixelSkip = in->subsample.pixelskipmask;
+	cmd.subsample1CfgLineSkip  = in->subsample.lineskipmask;
+
+	/* Subsample 2 Config */
+	cmd.subsample2CfgFrameSkip      = in->subsample.frameskip;
+	cmd.subsample2CfgFrameSkipMode  = in->subsample.frameskipmode;
+	cmd.subsample2CfgPixelSkipWrap  = in->subsample.pixelskipwrap;
+
+	/* Epoch Interrupt */
+	cmd.epoch1Line = in->epoch1.lineindex;
+	cmd.epoch2Line = in->epoch2.lineindex;
+
+	vfe_prog_hw(ctrl->vfebase + CAMIF_EFS_CONFIG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_fov_crop_config(struct vfe_cmd_fov_crop_config *in)
+{
+	struct vfe_fov_crop_cfg cmd;
+	memset(&cmd, 0, sizeof(cmd));
+
+	ctrl->vfeModuleEnableLocal.cropEnable = in->enable;
+
+	/* FOV Corp, Part 1 */
+	cmd.lastPixel  = in->lastPixel;
+	cmd.firstPixel = in->firstPixel;
+
+	/* FOV Corp, Part 2 */
+	cmd.lastLine   = in->lastLine;
+	cmd.firstLine  = in->firstLine;
+
+	vfe_prog_hw(ctrl->vfebase + VFE_CROP_WIDTH_CFG,
+		(uint32_t *)&cmd, sizeof(cmd));
+}
+
+void vfe_get_hw_version(struct vfe_cmd_hw_version *out)
+{
+	uint32_t vfeHwVersionPacked;
+	struct vfe_hw_ver ver;
+
+	vfeHwVersionPacked = readl(ctrl->vfebase + VFE_HW_VERSION);
+
+	ver = *((struct vfe_hw_ver *)&vfeHwVersionPacked);
+
+	out->coreVersion  = ver.coreVersion;
+	out->minorVersion = ver.minorVersion;
+	out->majorVersion = ver.majorVersion;
+}
+
+static void vfe_reset_internal_variables(void)
+{
+	/* local variables to program the hardware. */
+	ctrl->vfeImaskPacked = 0;
+	ctrl->vfeImaskCompositePacked = 0;
+
+	/* FALSE = disable,  1 = enable. */
+	memset(&ctrl->vfeModuleEnableLocal, 0,
+		sizeof(ctrl->vfeModuleEnableLocal));
+
+	/* 0 = disable, 1 = enable */
+	memset(&ctrl->vfeCamifConfigLocal, 0,
+		sizeof(ctrl->vfeCamifConfigLocal));
+	/* 0 = disable, 1 = enable */
+	memset(&ctrl->vfeImaskLocal, 0, sizeof(ctrl->vfeImaskLocal));
+	memset(&ctrl->vfeStatsCmdLocal, 0, sizeof(ctrl->vfeStatsCmdLocal));
+	memset(&ctrl->vfeBusConfigLocal, 0, sizeof(ctrl->vfeBusConfigLocal));
+	memset(&ctrl->vfeBusPmConfigLocal, 0,
+		sizeof(ctrl->vfeBusPmConfigLocal));
+	memset(&ctrl->vfeBusCmdLocal, 0, sizeof(ctrl->vfeBusCmdLocal));
+	memset(&ctrl->vfeInterruptNameLocal, 0,
+		sizeof(ctrl->vfeInterruptNameLocal));
+	memset(&ctrl->vfeDroppedFrameCounts, 0,
+		sizeof(ctrl->vfeDroppedFrameCounts));
+	memset(&ctrl->vfeIrqThreadMsgLocal, 0,
+		sizeof(ctrl->vfeIrqThreadMsgLocal));
+
+	/* state control variables */
+	ctrl->vfeStartAckPendingFlag = FALSE;
+	ctrl->vfeStopAckPending = FALSE;
+	ctrl->vfeIrqCompositeMaskLocal.ceDoneSel = 0;
+	ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = VFE_COMP_IRQ_BOTH_Y_CBCR;
+	ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask =
+		VFE_COMP_IRQ_BOTH_Y_CBCR;
+
+	ctrl->vstate = VFE_STATE_IDLE;
+
+	ctrl->axiOutputMode = VFE_AXI_LAST_OUTPUT_MODE_ENUM;
+	/* 0 for continuous mode, 1 for snapshot mode */
+	ctrl->vfeOperationMode = VFE_START_OPERATION_MODE_CONTINUOUS;
+	ctrl->vfeSnapShotCount = 0;
+	ctrl->vfeStatsPingPongReloadFlag = FALSE;
+	/* this is unsigned 32 bit integer. */
+	ctrl->vfeFrameId = 0;
+	ctrl->vfeFrameSkip.output1Pattern = 0xffffffff;
+	ctrl->vfeFrameSkip.output1Period  = 31;
+	ctrl->vfeFrameSkip.output2Pattern = 0xffffffff;
+	ctrl->vfeFrameSkip.output2Period  = 31;
+	ctrl->vfeFrameSkipPattern = 0xffffffff;
+	ctrl->vfeFrameSkipCount   = 0;
+	ctrl->vfeFrameSkipPeriod  = 31;
+
+	memset((void *)&ctrl->encPath, 0, sizeof(ctrl->encPath));
+	memset((void *)&ctrl->viewPath, 0, sizeof(ctrl->viewPath));
+
+	ctrl->encPath.whichOutputPath  = 1;
+	ctrl->encPath.cbcrStatusBit    = 5;
+	ctrl->viewPath.whichOutputPath = 0;
+	ctrl->viewPath.cbcrStatusBit   = 7;
+
+	ctrl->vfeTestGenStartFlag = FALSE;
+
+	/* default to bank 0. */
+	ctrl->vfeLaBankSel = 0;
+
+	/* default to bank 0 for all channels. */
+	memset(&ctrl->vfeGammaLutSel, 0, sizeof(ctrl->vfeGammaLutSel));
+
+	/* Stats control variables. */
+	memset(&ctrl->afStatsControl, 0, sizeof(ctrl->afStatsControl));
+	memset(&ctrl->awbStatsControl, 0, sizeof(ctrl->awbStatsControl));
+	vfe_set_stats_pingpong_address(&ctrl->afStatsControl,
+		&ctrl->awbStatsControl);
+}
+
+void vfe_reset(void)
+{
+	spin_lock_init(&msm_vfe_ctrl_lock);
+	vfe_reset_internal_variables();
+
+	atomic_set(&ctrl->vfe_serv_interrupt, 1);
+	ctrl->vfeImaskLocal.resetAckIrq = TRUE;
+	ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal);
+
+	/* disable all interrupts. */
+	writel(VFE_DISABLE_ALL_IRQS, ctrl->vfebase + VFE_IRQ_COMPOSITE_MASK);
+
+	/* clear all pending interrupts*/
+	writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR);
+
+	/* enable reset_ack interrupt.  */
+	writel(ctrl->vfeImaskPacked, ctrl->vfebase + VFE_IRQ_MASK);
+
+	writel(VFE_RESET_UPON_RESET_CMD, ctrl->vfebase + VFE_GLOBAL_RESET_CMD);
+}
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.h b/drivers/media/video/msm/msm_vfe8x_proc.h
new file mode 100644
index 0000000..da00e8f
--- /dev/null
+++ b/drivers/media/video/msm/msm_vfe8x_proc.h
@@ -0,0 +1,1563 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_VFE8X_REG_H__
+#define __MSM_VFE8X_REG_H__
+
+#include <mach/msm_iomap.h>
+#include <mach/camera.h>
+#include "msm_vfe8x.h"
+
+
+#define MSM_AXI_QOS_PREVIEW		128000
+#define MSM_AXI_QOS_SNAPSHOT	128000
+#define MSM_AXI_QOS_RECORDING	128000
+
+
+/* at start of camif,  bit 1:0 = 0x01:enable
+ * image data capture at frame boundary. */
+#define CAMIF_COMMAND_START  0x00000005
+
+/* bit 2= 0x1:clear the CAMIF_STATUS register
+ * value. */
+#define CAMIF_COMMAND_CLEAR  0x00000004
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY  0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY  0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT  0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR  0x00000000
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-31 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD  0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-31 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD  0x000003ff
+
+/* bit 5 is for axi status idle or busy.
+ * 1 =  halted,  0 = busy */
+#define AXI_STATUS_BUSY_MASK 0x00000020
+
+/* bit 0 & bit 1 = 1, both y and cbcr irqs need to be present
+ * for frame done interrupt */
+#define VFE_COMP_IRQ_BOTH_Y_CBCR 3
+
+/* bit 1 = 1, only cbcr irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_CBCR_ONLY 2
+
+/* bit 0 = 1, only y irq triggers frame done interrupt */
+#define VFE_COMP_IRQ_Y_ONLY 1
+
+/* bit 0 = 1, PM go;   bit1 = 1, PM stop */
+#define VFE_PERFORMANCE_MONITOR_GO   0x00000001
+#define VFE_PERFORMANCE_MONITOR_STOP 0x00000002
+
+/* bit 0 = 1, test gen go;   bit1 = 1, test gen stop */
+#define VFE_TEST_GEN_GO   0x00000001
+#define VFE_TEST_GEN_STOP 0x00000002
+
+/* the chroma is assumed to be interpolated between
+ * the luma samples.  JPEG 4:2:2 */
+#define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
+#define VFE_CLEAR_ALL_IRQS   0xffffffff
+/* imask for while waiting for stop ack,  driver has already
+ * requested stop, waiting for reset irq,
+ * bit 29,28,27,26 for async timer, bit 9 for reset */
+#define VFE_IMASK_WHILE_STOPPING  0x3c000200
+
+/* when normal case, don't want to block error status.
+ * bit 0,6,20,21,22,30,31 */
+#define VFE_IMASK_ERROR_ONLY             0xC0700041
+#define VFE_REG_UPDATE_TRIGGER           1
+#define VFE_PM_BUF_MAX_CNT_MASK          0xFF
+#define VFE_DMI_CFG_DEFAULT              0x00000100
+#define LENS_ROLL_OFF_DELTA_TABLE_OFFSET 32
+#define VFE_AF_PINGPONG_STATUS_BIT       0x100
+#define VFE_AWB_PINGPONG_STATUS_BIT      0x200
+
+/* VFE I/O registers */
+enum {
+	VFE_HW_VERSION                    = 0x00000000,
+	VFE_GLOBAL_RESET_CMD              = 0x00000004,
+	VFE_MODULE_RESET                  = 0x00000008,
+	VFE_CGC_OVERRIDE                  = 0x0000000C,
+	VFE_MODULE_CFG                    = 0x00000010,
+	VFE_CFG                           = 0x00000014,
+	VFE_IRQ_MASK                      = 0x00000018,
+	VFE_IRQ_CLEAR                     = 0x0000001C,
+VFE_IRQ_STATUS                    = 0x00000020,
+VFE_IRQ_COMPOSITE_MASK            = 0x00000024,
+VFE_BUS_CMD                       = 0x00000028,
+VFE_BUS_CFG                       = 0x0000002C,
+VFE_BUS_ENC_Y_WR_PING_ADDR        = 0x00000030,
+VFE_BUS_ENC_Y_WR_PONG_ADDR        = 0x00000034,
+VFE_BUS_ENC_Y_WR_IMAGE_SIZE       = 0x00000038,
+VFE_BUS_ENC_Y_WR_BUFFER_CFG       = 0x0000003C,
+VFE_BUS_ENC_CBCR_WR_PING_ADDR     = 0x00000040,
+VFE_BUS_ENC_CBCR_WR_PONG_ADDR     = 0x00000044,
+VFE_BUS_ENC_CBCR_WR_IMAGE_SIZE    = 0x00000048,
+VFE_BUS_ENC_CBCR_WR_BUFFER_CFG    = 0x0000004C,
+VFE_BUS_VIEW_Y_WR_PING_ADDR       = 0x00000050,
+VFE_BUS_VIEW_Y_WR_PONG_ADDR       = 0x00000054,
+VFE_BUS_VIEW_Y_WR_IMAGE_SIZE      = 0x00000058,
+VFE_BUS_VIEW_Y_WR_BUFFER_CFG      = 0x0000005C,
+VFE_BUS_VIEW_CBCR_WR_PING_ADDR    = 0x00000060,
+VFE_BUS_VIEW_CBCR_WR_PONG_ADDR    = 0x00000064,
+VFE_BUS_VIEW_CBCR_WR_IMAGE_SIZE   = 0x00000068,
+VFE_BUS_VIEW_CBCR_WR_BUFFER_CFG   = 0x0000006C,
+VFE_BUS_STATS_AF_WR_PING_ADDR     = 0x00000070,
+VFE_BUS_STATS_AF_WR_PONG_ADDR     = 0x00000074,
+VFE_BUS_STATS_AWB_WR_PING_ADDR    = 0x00000078,
+VFE_BUS_STATS_AWB_WR_PONG_ADDR    = 0x0000007C,
+VFE_BUS_STATS_HIST_WR_PING_ADDR   = 0x00000080,
+VFE_BUS_STATS_HIST_WR_PONG_ADDR   = 0x00000084,
+VFE_BUS_STATS_WR_PRIORITY         = 0x00000088,
+VFE_BUS_STRIPE_RD_ADDR_0          = 0x0000008C,
+VFE_BUS_STRIPE_RD_ADDR_1          = 0x00000090,
+VFE_BUS_STRIPE_RD_ADDR_2          = 0x00000094,
+VFE_BUS_STRIPE_RD_ADDR_3          = 0x00000098,
+VFE_BUS_STRIPE_RD_VSIZE           = 0x0000009C,
+VFE_BUS_STRIPE_RD_HSIZE           = 0x000000A0,
+VFE_BUS_STRIPE_RD_BUFFER_CFG      = 0x000000A4,
+VFE_BUS_STRIPE_RD_UNPACK_CFG      = 0x000000A8,
+VFE_BUS_STRIPE_RD_UNPACK          = 0x000000AC,
+VFE_BUS_STRIPE_RD_PAD_SIZE        = 0x000000B0,
+VFE_BUS_STRIPE_RD_PAD_L_UNPACK    = 0x000000B4,
+VFE_BUS_STRIPE_RD_PAD_R_UNPACK    = 0x000000B8,
+VFE_BUS_STRIPE_RD_PAD_TB_UNPACK   = 0x000000BC,
+VFE_BUS_PINGPONG_IRQ_EN           = 0x000000C0,
+VFE_BUS_PINGPONG_STATUS           = 0x000000C4,
+VFE_BUS_PM_CMD                    = 0x000000C8,
+VFE_BUS_PM_CFG                    = 0x000000CC,
+VFE_BUS_ENC_Y_WR_PM_STATS_0       = 0x000000D0,
+VFE_BUS_ENC_Y_WR_PM_STATS_1       = 0x000000D4,
+VFE_BUS_ENC_CBCR_WR_PM_STATS_0    = 0x000000D8,
+VFE_BUS_ENC_CBCR_WR_PM_STATS_1    = 0x000000DC,
+VFE_BUS_VIEW_Y_WR_PM_STATS_0      = 0x000000E0,
+VFE_BUS_VIEW_Y_WR_PM_STATS_1      = 0x000000E4,
+VFE_BUS_VIEW_CBCR_WR_PM_STATS_0   = 0x000000E8,
+VFE_BUS_VIEW_CBCR_WR_PM_STATS_1   = 0x000000EC,
+VFE_BUS_MISR_CFG                  = 0x000000F4,
+VFE_BUS_MISR_MAST_CFG_0           = 0x000000F8,
+VFE_BUS_MISR_MAST_CFG_1           = 0x000000FC,
+VFE_BUS_MISR_RD_VAL               = 0x00000100,
+VFE_AXI_CMD                       = 0x00000104,
+VFE_AXI_CFG                       = 0x00000108,
+VFE_AXI_STATUS                    = 0x0000010C,
+CAMIF_COMMAND                     = 0x00000110,
+CAMIF_CONFIG                      = 0x00000114,
+CAMIF_EFS_CONFIG                  = 0x00000118,
+CAMIF_FRAME_CONFIG                = 0x0000011C,
+CAMIF_WINDOW_WIDTH_CONFIG         = 0x00000120,
+CAMIF_WINDOW_HEIGHT_CONFIG        = 0x00000124,
+CAMIF_SUBSAMPLE1_CONFIG           = 0x00000128,
+CAMIF_SUBSAMPLE2_CONFIG           = 0x0000012C,
+CAMIF_EPOCH_IRQ                   = 0x00000130,
+CAMIF_STATUS                      = 0x00000134,
+CAMIF_MISR                        = 0x00000138,
+VFE_SYNC_TIMER_CMD                = 0x0000013C,
+VFE_SYNC_TIMER0_LINE_START        = 0x00000140,
+VFE_SYNC_TIMER0_PIXEL_START       = 0x00000144,
+VFE_SYNC_TIMER0_PIXEL_DURATION    = 0x00000148,
+VFE_SYNC_TIMER1_LINE_START        = 0x0000014C,
+VFE_SYNC_TIMER1_PIXEL_START       = 0x00000150,
+VFE_SYNC_TIMER1_PIXEL_DURATION    = 0x00000154,
+VFE_SYNC_TIMER2_LINE_START        = 0x00000158,
+VFE_SYNC_TIMER2_PIXEL_START       = 0x0000015C,
+VFE_SYNC_TIMER2_PIXEL_DURATION    = 0x00000160,
+VFE_SYNC_TIMER_POLARITY           = 0x00000164,
+VFE_ASYNC_TIMER_CMD               = 0x00000168,
+VFE_ASYNC_TIMER0_CFG_0            = 0x0000016C,
+VFE_ASYNC_TIMER0_CFG_1            = 0x00000170,
+VFE_ASYNC_TIMER1_CFG_0            = 0x00000174,
+VFE_ASYNC_TIMER1_CFG_1            = 0x00000178,
+VFE_ASYNC_TIMER2_CFG_0            = 0x0000017C,
+VFE_ASYNC_TIMER2_CFG_1            = 0x00000180,
+VFE_ASYNC_TIMER3_CFG_0            = 0x00000184,
+VFE_ASYNC_TIMER3_CFG_1            = 0x00000188,
+VFE_TIMER_SEL                     = 0x0000018C,
+VFE_REG_UPDATE_CMD                = 0x00000190,
+VFE_BLACK_EVEN_EVEN_VALUE         = 0x00000194,
+VFE_BLACK_EVEN_ODD_VALUE          = 0x00000198,
+VFE_BLACK_ODD_EVEN_VALUE          = 0x0000019C,
+VFE_BLACK_ODD_ODD_VALUE           = 0x000001A0,
+VFE_ROLLOFF_CFG_0                 = 0x000001A4,
+VFE_ROLLOFF_CFG_1                 = 0x000001A8,
+VFE_ROLLOFF_CFG_2                 = 0x000001AC,
+VFE_DEMUX_CFG                     = 0x000001B0,
+VFE_DEMUX_GAIN_0                  = 0x000001B4,
+VFE_DEMUX_GAIN_1                  = 0x000001B8,
+VFE_DEMUX_EVEN_CFG                = 0x000001BC,
+VFE_DEMUX_ODD_CFG                 = 0x000001C0,
+VFE_DEMOSAIC_CFG                  = 0x000001C4,
+VFE_DEMOSAIC_ABF_CFG_0            = 0x000001C8,
+VFE_DEMOSAIC_ABF_CFG_1            = 0x000001CC,
+VFE_DEMOSAIC_BPC_CFG_0            = 0x000001D0,
+VFE_DEMOSAIC_BPC_CFG_1            = 0x000001D4,
+VFE_DEMOSAIC_STATUS               = 0x000001D8,
+VFE_CHROMA_UPSAMPLE_CFG           = 0x000001DC,
+VFE_CROP_WIDTH_CFG                = 0x000001E0,
+VFE_CROP_HEIGHT_CFG               = 0x000001E4,
+VFE_COLOR_CORRECT_COEFF_0         = 0x000001E8,
+VFE_COLOR_CORRECT_COEFF_1         = 0x000001EC,
+VFE_COLOR_CORRECT_COEFF_2         = 0x000001F0,
+VFE_COLOR_CORRECT_COEFF_3         = 0x000001F4,
+VFE_COLOR_CORRECT_COEFF_4         = 0x000001F8,
+VFE_COLOR_CORRECT_COEFF_5         = 0x000001FC,
+VFE_COLOR_CORRECT_COEFF_6         = 0x00000200,
+VFE_COLOR_CORRECT_COEFF_7         = 0x00000204,
+VFE_COLOR_CORRECT_COEFF_8         = 0x00000208,
+VFE_COLOR_CORRECT_OFFSET_0        = 0x0000020C,
+VFE_COLOR_CORRECT_OFFSET_1        = 0x00000210,
+VFE_COLOR_CORRECT_OFFSET_2        = 0x00000214,
+VFE_COLOR_CORRECT_COEFF_Q         = 0x00000218,
+VFE_LA_CFG                        = 0x0000021C,
+VFE_LUT_BANK_SEL                  = 0x00000220,
+VFE_CHROMA_ENHAN_A                = 0x00000224,
+VFE_CHROMA_ENHAN_B                = 0x00000228,
+VFE_CHROMA_ENHAN_C                = 0x0000022C,
+VFE_CHROMA_ENHAN_D                = 0x00000230,
+VFE_CHROMA_ENHAN_K                = 0x00000234,
+VFE_COLOR_CONVERT_COEFF_0         = 0x00000238,
+VFE_COLOR_CONVERT_COEFF_1         = 0x0000023C,
+VFE_COLOR_CONVERT_COEFF_2         = 0x00000240,
+VFE_COLOR_CONVERT_OFFSET          = 0x00000244,
+VFE_ASF_CFG                       = 0x00000248,
+VFE_ASF_SHARP_CFG_0               = 0x0000024C,
+VFE_ASF_SHARP_CFG_1               = 0x00000250,
+VFE_ASF_SHARP_COEFF_0             = 0x00000254,
+VFE_ASF_SHARP_COEFF_1             = 0x00000258,
+VFE_ASF_SHARP_COEFF_2             = 0x0000025C,
+VFE_ASF_SHARP_COEFF_3             = 0x00000260,
+VFE_ASF_MAX_EDGE                  = 0x00000264,
+VFE_ASF_CROP_WIDTH_CFG            = 0x00000268,
+VFE_ASF_CROP_HEIGHT_CFG           = 0x0000026C,
+VFE_SCALE_CFG                     = 0x00000270,
+VFE_SCALE_H_IMAGE_SIZE_CFG        = 0x00000274,
+VFE_SCALE_H_PHASE_CFG             = 0x00000278,
+VFE_SCALE_H_STRIPE_CFG            = 0x0000027C,
+VFE_SCALE_V_IMAGE_SIZE_CFG        = 0x00000280,
+VFE_SCALE_V_PHASE_CFG             = 0x00000284,
+VFE_SCALE_V_STRIPE_CFG            = 0x00000288,
+VFE_SCALE_Y_CFG                   = 0x0000028C,
+VFE_SCALE_Y_H_IMAGE_SIZE_CFG      = 0x00000290,
+VFE_SCALE_Y_H_PHASE_CFG           = 0x00000294,
+VFE_SCALE_Y_V_IMAGE_SIZE_CFG      = 0x00000298,
+VFE_SCALE_Y_V_PHASE_CFG           = 0x0000029C,
+VFE_SCALE_CBCR_CFG                = 0x000002A0,
+VFE_SCALE_CBCR_H_IMAGE_SIZE_CFG   = 0x000002A4,
+VFE_SCALE_CBCR_H_PHASE_CFG        = 0x000002A8,
+VFE_SCALE_CBCR_V_IMAGE_SIZE_CFG   = 0x000002AC,
+VFE_SCALE_CBCR_V_PHASE_CFG        = 0x000002B0,
+VFE_WB_CFG                        = 0x000002B4,
+VFE_CHROMA_SUPPRESS_CFG_0         = 0x000002B8,
+VFE_CHROMA_SUPPRESS_CFG_1         = 0x000002BC,
+VFE_CHROMA_SUBSAMPLE_CFG          = 0x000002C0,
+VFE_CHROMA_SUB_CROP_WIDTH_CFG     = 0x000002C4,
+VFE_CHROMA_SUB_CROP_HEIGHT_CFG    = 0x000002C8,
+VFE_FRAMEDROP_ENC_Y_CFG           = 0x000002CC,
+VFE_FRAMEDROP_ENC_CBCR_CFG        = 0x000002D0,
+VFE_FRAMEDROP_ENC_Y_PATTERN       = 0x000002D4,
+VFE_FRAMEDROP_ENC_CBCR_PATTERN    = 0x000002D8,
+VFE_FRAMEDROP_VIEW_Y_CFG          = 0x000002DC,
+VFE_FRAMEDROP_VIEW_CBCR_CFG       = 0x000002E0,
+VFE_FRAMEDROP_VIEW_Y_PATTERN      = 0x000002E4,
+VFE_FRAMEDROP_VIEW_CBCR_PATTERN   = 0x000002E8,
+VFE_CLAMP_MAX_CFG                 = 0x000002EC,
+VFE_CLAMP_MIN_CFG                 = 0x000002F0,
+VFE_STATS_CMD                     = 0x000002F4,
+VFE_STATS_AF_CFG                  = 0x000002F8,
+VFE_STATS_AF_DIM                  = 0x000002FC,
+VFE_STATS_AF_GRID_0               = 0x00000300,
+VFE_STATS_AF_GRID_1               = 0x00000304,
+VFE_STATS_AF_GRID_2               = 0x00000308,
+VFE_STATS_AF_GRID_3               = 0x0000030C,
+VFE_STATS_AF_HEADER               = 0x00000310,
+VFE_STATS_AF_COEF0                = 0x00000314,
+VFE_STATS_AF_COEF1                = 0x00000318,
+VFE_STATS_AWBAE_CFG               = 0x0000031C,
+VFE_STATS_AXW_HEADER              = 0x00000320,
+VFE_STATS_AWB_MCFG                = 0x00000324,
+VFE_STATS_AWB_CCFG1               = 0x00000328,
+VFE_STATS_AWB_CCFG2               = 0x0000032C,
+VFE_STATS_HIST_HEADER             = 0x00000330,
+VFE_STATS_HIST_INNER_OFFSET       = 0x00000334,
+VFE_STATS_HIST_INNER_DIM          = 0x00000338,
+VFE_STATS_FRAME_SIZE              = 0x0000033C,
+VFE_DMI_CFG                       = 0x00000340,
+VFE_DMI_ADDR                      = 0x00000344,
+VFE_DMI_DATA_HI                   = 0x00000348,
+VFE_DMI_DATA_LO                   = 0x0000034C,
+VFE_DMI_RAM_AUTO_LOAD_CMD         = 0x00000350,
+VFE_DMI_RAM_AUTO_LOAD_STATUS      = 0x00000354,
+VFE_DMI_RAM_AUTO_LOAD_CFG         = 0x00000358,
+VFE_DMI_RAM_AUTO_LOAD_SEED        = 0x0000035C,
+VFE_TESTBUS_SEL                   = 0x00000360,
+VFE_TESTGEN_CFG                   = 0x00000364,
+VFE_SW_TESTGEN_CMD                = 0x00000368,
+VFE_HW_TESTGEN_CMD                = 0x0000036C,
+VFE_HW_TESTGEN_CFG                = 0x00000370,
+VFE_HW_TESTGEN_IMAGE_CFG          = 0x00000374,
+VFE_HW_TESTGEN_SOF_OFFSET_CFG     = 0x00000378,
+VFE_HW_TESTGEN_EOF_NOFFSET_CFG    = 0x0000037C,
+VFE_HW_TESTGEN_SOL_OFFSET_CFG     = 0x00000380,
+VFE_HW_TESTGEN_EOL_NOFFSET_CFG    = 0x00000384,
+VFE_HW_TESTGEN_HBI_CFG            = 0x00000388,
+VFE_HW_TESTGEN_VBL_CFG            = 0x0000038C,
+VFE_HW_TESTGEN_SOF_DUMMY_LINE_CFG2 = 0x00000390,
+VFE_HW_TESTGEN_EOF_DUMMY_LINE_CFG2 = 0x00000394,
+VFE_HW_TESTGEN_COLOR_BARS_CFG     = 0x00000398,
+VFE_HW_TESTGEN_RANDOM_CFG         = 0x0000039C,
+VFE_SPARE                         = 0x000003A0,
+};
+
+#define ping 0x0
+#define pong 0x1
+
+struct vfe_bus_cfg_data {
+	boolean                  stripeRdPathEn;
+	boolean                  encYWrPathEn;
+	boolean                  encCbcrWrPathEn;
+	boolean                  viewYWrPathEn;
+	boolean                  viewCbcrWrPathEn;
+	enum VFE_RAW_PIXEL_DATA_SIZE rawPixelDataSize;
+	enum VFE_RAW_WR_PATH_SEL     rawWritePathSelect;
+};
+
+struct vfe_camif_cfg_data {
+	boolean camif2OutputEnable;
+	boolean camif2BusEnable;
+	struct vfe_cmds_camif_cfg camifCfgFromCmd;
+};
+
+struct vfe_irq_composite_mask_config {
+	uint8_t encIrqComMask;
+	uint8_t viewIrqComMask;
+	uint8_t ceDoneSel;
+};
+
+/* define a structure for each output path.*/
+struct vfe_output_path {
+	uint32_t addressBuffer[8];
+	uint16_t fragIndex;
+	boolean  hwCurrentFlag;
+	uint8_t  *hwRegPingAddress;
+	uint8_t  *hwRegPongAddress;
+};
+
+struct vfe_output_path_combo {
+	boolean           whichOutputPath;
+	boolean           pathEnabled;
+	boolean           multiFrag;
+	uint8_t           fragCount;
+	boolean           ackPending;
+	uint8_t           currentFrame;
+	uint32_t          nextFrameAddrBuf[8];
+	struct vfe_output_path   yPath;
+	struct vfe_output_path   cbcrPath;
+	uint8_t           snapshotPendingCount;
+	boolean           pmEnabled;
+	uint8_t           cbcrStatusBit;
+};
+
+struct vfe_stats_control {
+	boolean  ackPending;
+	uint32_t addressBuffer[2];
+	uint32_t nextFrameAddrBuf;
+	boolean  pingPongStatus;
+	uint8_t  *hwRegPingAddress;
+	uint8_t  *hwRegPongAddress;
+	uint32_t droppedStatsFrameCount;
+	uint32_t bufToRender;
+};
+
+struct vfe_gamma_lut_sel {
+	boolean  ch0BankSelect;
+	boolean  ch1BankSelect;
+	boolean  ch2BankSelect;
+};
+
+struct vfe_interrupt_mask {
+	boolean  camifErrorIrq;
+	boolean  camifSofIrq;
+	boolean  camifEolIrq;
+	boolean  camifEofIrq;
+	boolean  camifEpoch1Irq;
+	boolean  camifEpoch2Irq;
+	boolean  camifOverflowIrq;
+	boolean  ceIrq;
+	boolean  regUpdateIrq;
+	boolean  resetAckIrq;
+	boolean  encYPingpongIrq;
+	boolean  encCbcrPingpongIrq;
+	boolean  viewYPingpongIrq;
+	boolean  viewCbcrPingpongIrq;
+	boolean  rdPingpongIrq;
+	boolean  afPingpongIrq;
+	boolean  awbPingpongIrq;
+	boolean  histPingpongIrq;
+	boolean  encIrq;
+	boolean  viewIrq;
+	boolean  busOverflowIrq;
+	boolean  afOverflowIrq;
+	boolean  awbOverflowIrq;
+	boolean  syncTimer0Irq;
+	boolean  syncTimer1Irq;
+	boolean  syncTimer2Irq;
+	boolean  asyncTimer0Irq;
+	boolean  asyncTimer1Irq;
+	boolean  asyncTimer2Irq;
+	boolean  asyncTimer3Irq;
+	boolean  axiErrorIrq;
+	boolean  violationIrq;
+};
+
+enum vfe_interrupt_name {
+	CAMIF_ERROR_IRQ,
+	CAMIF_SOF_IRQ,
+	CAMIF_EOL_IRQ,
+	CAMIF_EOF_IRQ,
+	CAMIF_EPOCH1_IRQ,
+	CAMIF_EPOCH2_IRQ,
+	CAMIF_OVERFLOW_IRQ,
+	CE_IRQ,
+	REG_UPDATE_IRQ,
+	RESET_ACK_IRQ,
+	ENC_Y_PINGPONG_IRQ,
+	ENC_CBCR_PINGPONG_IRQ,
+	VIEW_Y_PINGPONG_IRQ,
+	VIEW_CBCR_PINGPONG_IRQ,
+	RD_PINGPONG_IRQ,
+	AF_PINGPONG_IRQ,
+	AWB_PINGPONG_IRQ,
+	HIST_PINGPONG_IRQ,
+	ENC_IRQ,
+	VIEW_IRQ,
+	BUS_OVERFLOW_IRQ,
+	AF_OVERFLOW_IRQ,
+	AWB_OVERFLOW_IRQ,
+	SYNC_TIMER0_IRQ,
+	SYNC_TIMER1_IRQ,
+	SYNC_TIMER2_IRQ,
+	ASYNC_TIMER0_IRQ,
+	ASYNC_TIMER1_IRQ,
+	ASYNC_TIMER2_IRQ,
+	ASYNC_TIMER3_IRQ,
+	AXI_ERROR_IRQ,
+	VIOLATION_IRQ
+};
+
+enum VFE_DMI_RAM_SEL {
+	NO_MEM_SELECTED          = 0,
+	ROLLOFF_RAM              = 0x1,
+	RGBLUT_RAM_CH0_BANK0     = 0x2,
+	RGBLUT_RAM_CH0_BANK1     = 0x3,
+	RGBLUT_RAM_CH1_BANK0     = 0x4,
+	RGBLUT_RAM_CH1_BANK1     = 0x5,
+	RGBLUT_RAM_CH2_BANK0     = 0x6,
+	RGBLUT_RAM_CH2_BANK1     = 0x7,
+	STATS_HIST_CB_EVEN_RAM   = 0x8,
+	STATS_HIST_CB_ODD_RAM    = 0x9,
+	STATS_HIST_CR_EVEN_RAM   = 0xa,
+	STATS_HIST_CR_ODD_RAM    = 0xb,
+	RGBLUT_CHX_BANK0         = 0xc,
+	RGBLUT_CHX_BANK1         = 0xd,
+	LUMA_ADAPT_LUT_RAM_BANK0 = 0xe,
+	LUMA_ADAPT_LUT_RAM_BANK1 = 0xf
+};
+
+struct vfe_module_enable {
+	boolean  blackLevelCorrectionEnable;
+	boolean  lensRollOffEnable;
+	boolean  demuxEnable;
+	boolean  chromaUpsampleEnable;
+	boolean  demosaicEnable;
+	boolean  statsEnable;
+	boolean  cropEnable;
+	boolean  mainScalerEnable;
+	boolean  whiteBalanceEnable;
+	boolean  colorCorrectionEnable;
+	boolean  yHistEnable;
+	boolean  skinToneEnable;
+	boolean  lumaAdaptationEnable;
+	boolean  rgbLUTEnable;
+	boolean  chromaEnhanEnable;
+	boolean  asfEnable;
+	boolean  chromaSuppressionEnable;
+	boolean  chromaSubsampleEnable;
+	boolean  scaler2YEnable;
+	boolean  scaler2CbcrEnable;
+};
+
+struct vfe_bus_cmd_data {
+	boolean  stripeReload;
+	boolean  busPingpongReload;
+	boolean  statsPingpongReload;
+};
+
+struct vfe_stats_cmd_data {
+	boolean  autoFocusEnable;
+	boolean  axwEnable;
+	boolean  histEnable;
+	boolean  clearHistEnable;
+	boolean  histAutoClearEnable;
+	boolean  colorConversionEnable;
+};
+
+struct vfe_hw_ver {
+	uint32_t minorVersion:8;
+	uint32_t majorVersion:8;
+	uint32_t coreVersion:4;
+	uint32_t /* reserved */ : 12;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_cfg {
+	uint32_t pixelPattern:3;
+	uint32_t /* reserved */ : 13;
+	uint32_t inputSource:2;
+	uint32_t /* reserved */ : 14;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_buscmd {
+	uint32_t  stripeReload:1;
+	uint32_t  /* reserved */ : 3;
+	uint32_t  busPingpongReload:1;
+	uint32_t  statsPingpongReload:1;
+	uint32_t  /* reserved */ : 26;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_Irq_Composite_MaskType {
+	uint32_t  encIrqComMaskBits:2;
+	uint32_t  viewIrqComMaskBits:2;
+	uint32_t  ceDoneSelBits:5;
+	uint32_t  /* reserved */ : 23;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_mod_enable {
+	uint32_t blackLevelCorrectionEnable:1;
+	uint32_t lensRollOffEnable:1;
+	uint32_t demuxEnable:1;
+	uint32_t chromaUpsampleEnable:1;
+	uint32_t demosaicEnable:1;
+	uint32_t statsEnable:1;
+	uint32_t cropEnable:1;
+	uint32_t mainScalerEnable:1;
+	uint32_t whiteBalanceEnable:1;
+	uint32_t colorCorrectionEnable:1;
+	uint32_t yHistEnable:1;
+	uint32_t skinToneEnable:1;
+	uint32_t lumaAdaptationEnable:1;
+	uint32_t rgbLUTEnable:1;
+	uint32_t chromaEnhanEnable:1;
+	uint32_t asfEnable:1;
+	uint32_t chromaSuppressionEnable:1;
+	uint32_t chromaSubsampleEnable:1;
+	uint32_t scaler2YEnable:1;
+	uint32_t scaler2CbcrEnable:1;
+	uint32_t /* reserved */ : 14;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_irqenable {
+	uint32_t camifErrorIrq:1;
+	uint32_t camifSofIrq:1;
+	uint32_t camifEolIrq:1;
+	uint32_t camifEofIrq:1;
+	uint32_t camifEpoch1Irq:1;
+	uint32_t camifEpoch2Irq:1;
+	uint32_t camifOverflowIrq:1;
+	uint32_t ceIrq:1;
+	uint32_t regUpdateIrq:1;
+	uint32_t resetAckIrq:1;
+	uint32_t encYPingpongIrq:1;
+	uint32_t encCbcrPingpongIrq:1;
+	uint32_t viewYPingpongIrq:1;
+	uint32_t viewCbcrPingpongIrq:1;
+	uint32_t rdPingpongIrq:1;
+	uint32_t afPingpongIrq:1;
+	uint32_t awbPingpongIrq:1;
+	uint32_t histPingpongIrq:1;
+	uint32_t encIrq:1;
+	uint32_t viewIrq:1;
+	uint32_t busOverflowIrq:1;
+	uint32_t afOverflowIrq:1;
+	uint32_t awbOverflowIrq:1;
+	uint32_t syncTimer0Irq:1;
+	uint32_t syncTimer1Irq:1;
+	uint32_t syncTimer2Irq:1;
+	uint32_t asyncTimer0Irq:1;
+	uint32_t asyncTimer1Irq:1;
+	uint32_t asyncTimer2Irq:1;
+	uint32_t asyncTimer3Irq:1;
+	uint32_t axiErrorIrq:1;
+	uint32_t violationIrq:1;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_upsample_cfg {
+	uint32_t chromaCositingForYCbCrInputs:1;
+	uint32_t /* reserved */ : 31;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_CAMIFConfigType {
+	/* CAMIF Config */
+	uint32_t  /* reserved */ : 1;
+	uint32_t  VSyncEdge:1;
+	uint32_t  HSyncEdge:1;
+	uint32_t  syncMode:2;
+	uint32_t  vfeSubsampleEnable:1;
+	uint32_t  /* reserved */ : 1;
+	uint32_t  busSubsampleEnable:1;
+	uint32_t  camif2vfeEnable:1;
+	uint32_t  /* reserved */ : 1;
+	uint32_t  camif2busEnable:1;
+	uint32_t  irqSubsampleEnable:1;
+	uint32_t  binningEnable:1;
+	uint32_t  /* reserved */ : 18;
+	uint32_t  misrEnable:1;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_camifcfg {
+	/* EFS_Config */
+	uint32_t efsEndOfLine:8;
+	uint32_t efsStartOfLine:8;
+	uint32_t efsEndOfFrame:8;
+	uint32_t efsStartOfFrame:8;
+	/* Frame Config */
+	uint32_t frameConfigPixelsPerLine:14;
+	uint32_t /* reserved */ : 2;
+	uint32_t frameConfigLinesPerFrame:14;
+	uint32_t /* reserved */ : 2;
+	/* Window Width Config */
+	uint32_t windowWidthCfgLastPixel:14;
+	uint32_t /* reserved */ : 2;
+	uint32_t windowWidthCfgFirstPixel:14;
+	uint32_t /* reserved */ : 2;
+	/* Window Height Config */
+	uint32_t windowHeightCfglastLine:14;
+	uint32_t /* reserved */ : 2;
+	uint32_t windowHeightCfgfirstLine:14;
+	uint32_t /* reserved */ : 2;
+	/* Subsample 1 Config */
+	uint32_t subsample1CfgPixelSkip:16;
+	uint32_t subsample1CfgLineSkip:16;
+	/* Subsample 2 Config */
+	uint32_t subsample2CfgFrameSkip:4;
+	uint32_t subsample2CfgFrameSkipMode:1;
+	uint32_t subsample2CfgPixelSkipWrap:1;
+	uint32_t /* reserved */ : 26;
+	/* Epoch Interrupt */
+	uint32_t epoch1Line:14;
+	uint32_t /* reserved */ : 2;
+	uint32_t epoch2Line:14;
+	uint32_t /* reserved */ : 2;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_camifframe_update {
+	uint32_t pixelsPerLine:14;
+	uint32_t /* reserved */ : 2;
+	uint32_t linesPerFrame:14;
+	uint32_t /* reserved */ : 2;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_axi_bus_cfg {
+	uint32_t  stripeRdPathEn:1;
+	uint32_t  /* reserved */ : 3;
+	uint32_t  encYWrPathEn:1;
+	uint32_t  encCbcrWrPathEn:1;
+	uint32_t  viewYWrPathEn:1;
+	uint32_t  viewCbcrWrPathEn:1;
+	uint32_t  rawPixelDataSize:2;
+	uint32_t  rawWritePathSelect:2;
+	uint32_t  /* reserved */ : 20;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_axi_out_cfg {
+	uint32_t  out2YPingAddr:32;
+	uint32_t  out2YPongAddr:32;
+	uint32_t  out2YImageHeight:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out2YImageWidthin64bit:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  out2YBurstLength:2;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  out2YNumRows:12;
+	uint32_t  out2YRowIncrementIn64bit:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out2CbcrPingAddr:32;
+	uint32_t  out2CbcrPongAddr:32;
+	uint32_t  out2CbcrImageHeight:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out2CbcrImageWidthIn64bit:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  out2CbcrBurstLength:2;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  out2CbcrNumRows:12;
+	uint32_t  out2CbcrRowIncrementIn64bit:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out1YPingAddr:32;
+	uint32_t  out1YPongAddr:32;
+	uint32_t  out1YImageHeight:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out1YImageWidthin64bit:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  out1YBurstLength:2;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  out1YNumRows:12;
+	uint32_t  out1YRowIncrementIn64bit:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out1CbcrPingAddr:32;
+	uint32_t  out1CbcrPongAddr:32;
+	uint32_t  out1CbcrImageHeight:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  out1CbcrImageWidthIn64bit:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  out1CbcrBurstLength:2;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  out1CbcrNumRows:12;
+	uint32_t  out1CbcrRowIncrementIn64bit:12;
+	uint32_t  /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_output_clamp_cfg {
+	/* Output Clamp Maximums */
+	uint32_t yChanMax:8;
+	uint32_t cbChanMax:8;
+	uint32_t crChanMax:8;
+	uint32_t /* reserved */ : 8;
+	/* Output Clamp Minimums */
+	uint32_t yChanMin:8;
+	uint32_t cbChanMin:8;
+	uint32_t crChanMin:8;
+	uint32_t /* reserved */ : 8;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_fov_crop_cfg {
+	uint32_t lastPixel:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t firstPixel:12;
+	uint32_t /* reserved */ : 4;
+
+	/* FOV Corp, Part 2 */
+	uint32_t lastLine:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t firstLine:12;
+	uint32_t /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_FRAME_SKIP_UpdateCmdType {
+	uint32_t  yPattern:32;
+	uint32_t  cbcrPattern:32;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_frame_skip_cfg {
+	/* Frame Drop Enc (output2) */
+	uint32_t output2YPeriod:5;
+	uint32_t /* reserved */	: 27;
+	uint32_t output2CbCrPeriod:5;
+	uint32_t /* reserved */ : 27;
+	uint32_t output2YPattern:32;
+	uint32_t output2CbCrPattern:32;
+	/* Frame Drop View (output1) */
+	uint32_t output1YPeriod:5;
+	uint32_t /* reserved */ : 27;
+	uint32_t output1CbCrPeriod:5;
+	uint32_t /* reserved */ : 27;
+	uint32_t output1YPattern:32;
+	uint32_t output1CbCrPattern:32;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_main_scaler_cfg {
+	/* Scaler Enable Config */
+	uint32_t hEnable:1;
+	uint32_t vEnable:1;
+	uint32_t /* reserved */ : 30;
+	/* Scale H Image Size Config */
+	uint32_t inWidth:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t outWidth:12;
+	uint32_t /* reserved */ : 4;
+	/* Scale H Phase Config */
+	uint32_t horizPhaseMult:18;
+	uint32_t /* reserved */ : 2;
+	uint32_t horizInterResolution:2;
+	uint32_t /* reserved */ : 10;
+	/* Scale H Stripe Config */
+	uint32_t horizMNInit:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t horizPhaseInit:15;
+	uint32_t /* reserved */ : 1;
+	/* Scale V Image Size Config */
+	uint32_t inHeight:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t outHeight:12;
+	uint32_t /* reserved */ : 4;
+	/* Scale V Phase Config */
+	uint32_t vertPhaseMult:18;
+	uint32_t /* reserved */ : 2;
+	uint32_t vertInterResolution:2;
+	uint32_t /* reserved */ : 10;
+	/* Scale V Stripe Config */
+	uint32_t vertMNInit:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t vertPhaseInit:15;
+	uint32_t /* reserved */ : 1;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_scaler2_cfg {
+	/* Scaler   Enable Config */
+	uint32_t  hEnable:1;
+	uint32_t  vEnable:1;
+	uint32_t  /* reserved */ : 30;
+	/* Scaler   H Image Size Config */
+	uint32_t  inWidth:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  outWidth:12;
+	uint32_t  /* reserved */ : 4;
+	/* Scaler   H Phase Config */
+	uint32_t  horizPhaseMult:18;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  horizInterResolution:2;
+	uint32_t  /* reserved */ : 10;
+	/* Scaler   V Image Size Config */
+	uint32_t  inHeight:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  outHeight:12;
+	uint32_t  /* reserved */ : 4;
+	/* Scaler   V Phase Config */
+	uint32_t  vertPhaseMult:18;
+	uint32_t  /* reserved */ : 2;
+	uint32_t  vertInterResolution:2;
+	uint32_t  /* reserved */ : 10;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_rolloff_cfg {
+	/* Rolloff 0 Config */
+	uint32_t  gridWidth:9;
+	uint32_t  gridHeight:9;
+	uint32_t  yDelta:9;
+	uint32_t  /* reserved */ : 5;
+	/* Rolloff 1 Config*/
+	uint32_t  gridX:4;
+	uint32_t  gridY:4;
+	uint32_t  pixelX:9;
+	uint32_t  /* reserved */ : 3;
+	uint32_t  pixelY:9;
+	uint32_t  /* reserved */ : 3;
+	/* Rolloff 2 Config */
+	uint32_t  yDeltaAccum:12;
+	uint32_t  /* reserved */ : 20;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_asf_update {
+	/* ASF Config Command */
+	uint32_t smoothEnable:1;
+	uint32_t sharpMode:2;
+	uint32_t /* reserved */ : 1;
+	uint32_t smoothCoeff1:4;
+	uint32_t smoothCoeff0:8;
+	uint32_t pipeFlushCount:12;
+	uint32_t pipeFlushOvd:1;
+	uint32_t flushHaltOvd:1;
+	uint32_t cropEnable:1;
+	uint32_t /* reserved */ : 1;
+	/* Sharpening Config 0 */
+	uint32_t sharpThresholdE1:7;
+	uint32_t /* reserved */ : 1;
+	uint32_t sharpDegreeK1:5;
+	uint32_t /* reserved */ : 3;
+	uint32_t sharpDegreeK2:5;
+	uint32_t /* reserved */ : 3;
+	uint32_t normalizeFactor:7;
+	uint32_t /* reserved */ : 1;
+	/* Sharpening Config 1 */
+	uint32_t sharpThresholdE2:8;
+	uint32_t sharpThresholdE3:8;
+	uint32_t sharpThresholdE4:8;
+	uint32_t sharpThresholdE5:8;
+	/* Sharpening Coefficients 0 */
+	uint32_t F1Coeff0:6;
+	uint32_t F1Coeff1:6;
+	uint32_t F1Coeff2:6;
+	uint32_t F1Coeff3:6;
+	uint32_t F1Coeff4:6;
+	uint32_t /* reserved */ : 2;
+	/* Sharpening Coefficients 1 */
+	uint32_t F1Coeff5:6;
+	uint32_t F1Coeff6:6;
+	uint32_t F1Coeff7:6;
+	uint32_t F1Coeff8:7;
+	uint32_t /* reserved */ : 7;
+	/* Sharpening Coefficients 2 */
+	uint32_t F2Coeff0:6;
+	uint32_t F2Coeff1:6;
+	uint32_t F2Coeff2:6;
+	uint32_t F2Coeff3:6;
+	uint32_t F2Coeff4:6;
+	uint32_t /* reserved */ : 2;
+	/* Sharpening Coefficients 3 */
+	uint32_t F2Coeff5:6;
+	uint32_t F2Coeff6:6;
+	uint32_t F2Coeff7:6;
+	uint32_t F2Coeff8:7;
+	uint32_t /* reserved */ : 7;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_asfcrop_cfg {
+	/* ASF Crop Width Config */
+	uint32_t lastPixel:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t firstPixel:12;
+	uint32_t /* reserved */ : 4;
+	/* ASP Crop Height Config */
+	uint32_t lastLine:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t firstLine:12;
+	uint32_t /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_chroma_suppress_cfg {
+	/* Chroma Suppress 0 Config */
+	uint32_t m1:8;
+	uint32_t m3:8;
+	uint32_t n1:3;
+	uint32_t /* reserved */ : 1;
+	uint32_t n3:3;
+	uint32_t /* reserved */ : 9;
+	/* Chroma Suppress 1 Config */
+	uint32_t mm1:8;
+	uint32_t nn1:3;
+	uint32_t /* reserved */ : 21;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_chromasubsample_cfg {
+	/* Chroma Subsample Selection */
+	uint32_t  hCositedPhase:1;
+	uint32_t  vCositedPhase:1;
+	uint32_t  hCosited:1;
+	uint32_t  vCosited:1;
+	uint32_t  hsubSampleEnable:1;
+	uint32_t  vsubSampleEnable:1;
+	uint32_t  cropEnable:1;
+	uint32_t  /* reserved */ : 25;
+	uint32_t  cropWidthLastPixel:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  cropWidthFirstPixel:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  cropHeightLastLine:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  cropHeightFirstLine:12;
+	uint32_t  /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_blacklevel_cfg {
+	/* Black Even-Even Value Config */
+	uint32_t    evenEvenAdjustment:9;
+	uint32_t   /* reserved */ : 23;
+	/* Black Even-Odd Value Config */
+	uint32_t    evenOddAdjustment:9;
+	uint32_t   /* reserved */ : 23;
+	/* Black Odd-Even Value Config */
+	uint32_t    oddEvenAdjustment:9;
+	uint32_t   /* reserved */ : 23;
+	/* Black Odd-Odd Value Config */
+	uint32_t    oddOddAdjustment:9;
+	uint32_t   /* reserved */ : 23;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_demux_cfg {
+	/* Demux Gain 0 Config */
+	uint32_t  ch0EvenGain:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  ch0OddGain:10;
+	uint32_t  /* reserved */ : 6;
+	/* Demux Gain 1 Config */
+	uint32_t  ch1Gain:10;
+	uint32_t  /* reserved */ : 6;
+	uint32_t  ch2Gain:10;
+	uint32_t  /* reserved */ : 6;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_bps_info {
+  uint32_t greenBadPixelCount:8;
+  uint32_t /* reserved */ : 8;
+  uint32_t RedBlueBadPixelCount:8;
+  uint32_t /* reserved */ : 8;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_demosaic_cfg {
+	/* Demosaic Config */
+	uint32_t abfEnable:1;
+	uint32_t badPixelCorrEnable:1;
+	uint32_t forceAbfOn:1;
+	uint32_t /* reserved */ : 1;
+	uint32_t abfShift:4;
+	uint32_t fminThreshold:7;
+	uint32_t /* reserved */ : 1;
+	uint32_t fmaxThreshold:7;
+	uint32_t /* reserved */ : 5;
+	uint32_t slopeShift:3;
+	uint32_t /* reserved */ : 1;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_demosaic_bpc_cfg {
+	/* Demosaic BPC Config 0 */
+	uint32_t blueDiffThreshold:12;
+	uint32_t redDiffThreshold:12;
+	uint32_t /* reserved */ : 8;
+	/* Demosaic BPC Config 1 */
+	uint32_t greenDiffThreshold:12;
+	uint32_t /* reserved */ : 20;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_demosaic_abf_cfg {
+	/* Demosaic ABF Config 0 */
+	uint32_t lpThreshold:10;
+	uint32_t /* reserved */ : 22;
+	/* Demosaic ABF Config 1 */
+	uint32_t ratio:4;
+	uint32_t minValue:10;
+	uint32_t /* reserved */ : 2;
+	uint32_t maxValue:10;
+	uint32_t /* reserved */ : 6;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_color_correction_cfg {
+	/* Color Corr. Coefficient 0 Config */
+	uint32_t   c0:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 1 Config */
+	uint32_t   c1:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 2 Config */
+	uint32_t   c2:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 3 Config */
+	uint32_t   c3:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 4 Config */
+	uint32_t   c4:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 5 Config */
+	uint32_t   c5:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 6 Config */
+	uint32_t   c6:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 7 Config */
+	uint32_t   c7:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Coefficient 8 Config */
+	uint32_t   c8:12;
+	uint32_t   /* reserved */ : 20;
+	/* Color Corr. Offset 0 Config */
+	uint32_t   k0:11;
+	uint32_t   /* reserved */ : 21;
+	/* Color Corr. Offset 1 Config */
+	uint32_t   k1:11;
+	uint32_t   /* reserved */ : 21;
+	/* Color Corr. Offset 2 Config */
+	uint32_t   k2:11;
+	uint32_t   /* reserved */ : 21;
+	/* Color Corr. Coefficient Q Config */
+	uint32_t   coefQFactor:2;
+	uint32_t   /* reserved */ : 30;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_LumaAdaptation_ConfigCmdType {
+	/* LA Config */
+	uint32_t   lutBankSelect:1;
+	uint32_t   /* reserved */ : 31;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_wb_cfg {
+	/* WB Config */
+	uint32_t ch0Gain:9;
+	uint32_t ch1Gain:9;
+	uint32_t ch2Gain:9;
+	uint32_t /* reserved */ : 5;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_GammaLutSelect_ConfigCmdType {
+	/* LUT Bank Select Config */
+	uint32_t   ch0BankSelect:1;
+	uint32_t   ch1BankSelect:1;
+	uint32_t   ch2BankSelect:1;
+	uint32_t   /* reserved */ : 29;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_chroma_enhance_cfg {
+	/* Chroma Enhance A Config */
+	uint32_t ap:11;
+	uint32_t /* reserved */ : 5;
+	uint32_t am:11;
+	uint32_t /* reserved */ : 5;
+	/* Chroma Enhance B Config */
+	uint32_t bp:11;
+	uint32_t /* reserved */ : 5;
+	uint32_t bm:11;
+	uint32_t /* reserved */ : 5;
+	/* Chroma Enhance C Config */
+	uint32_t cp:11;
+	uint32_t /* reserved */ : 5;
+	uint32_t cm:11;
+	uint32_t /* reserved */ : 5;
+	/* Chroma Enhance D Config */
+	uint32_t dp:11;
+	uint32_t /* reserved */ : 5;
+	uint32_t dm:11;
+	uint32_t /* reserved */ : 5;
+	/* Chroma Enhance K Config */
+	uint32_t kcb:11;
+	uint32_t /* reserved */ : 5;
+	uint32_t kcr:11;
+	uint32_t /* reserved */ : 5;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_color_convert_cfg {
+	/* Conversion Coefficient 0 */
+	uint32_t v0:12;
+	uint32_t /* reserved */ : 20;
+	/* Conversion Coefficient 1 */
+	uint32_t v1:12;
+	uint32_t /* reserved */ : 20;
+	/* Conversion Coefficient 2 */
+	uint32_t v2:12;
+	uint32_t /* reserved */ : 20;
+	/* Conversion Offset */
+	uint32_t ConvertOffset:8;
+	uint32_t /* reserved */ : 24;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_SyncTimer_ConfigCmdType {
+	/* Timer Line Start Config */
+	uint32_t       timerLineStart:12;
+	uint32_t       /* reserved */ : 20;
+	/* Timer Pixel Start Config */
+	uint32_t       timerPixelStart:18;
+	uint32_t       /* reserved */ : 14;
+	/* Timer Pixel Duration Config */
+	uint32_t       timerPixelDuration:28;
+	uint32_t       /* reserved */ : 4;
+	/* Sync Timer Polarity Config */
+	uint32_t       timer0Polarity:1;
+	uint32_t       timer1Polarity:1;
+	uint32_t       timer2Polarity:1;
+	uint32_t       /* reserved */ : 29;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_AsyncTimer_ConfigCmdType {
+	/* Async Timer Config 0 */
+	uint32_t     inactiveLength:20;
+	uint32_t     numRepetition:10;
+	uint32_t     /* reserved */ : 1;
+	uint32_t     polarity:1;
+	/* Async Timer Config 1 */
+	uint32_t     activeLength:20;
+	uint32_t     /* reserved */ : 12;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_AWBAEStatistics_ConfigCmdType {
+	/* AWB autoexposure Config */
+	uint32_t    aeRegionConfig:1;
+	uint32_t    aeSubregionConfig:1;
+	uint32_t    /* reserved */ : 14;
+	uint32_t    awbYMin:8;
+	uint32_t    awbYMax:8;
+	/* AXW Header */
+	uint32_t    axwHeader:8;
+	uint32_t    /* reserved */ : 24;
+	/* AWB Mconfig */
+	uint32_t    m4:8;
+	uint32_t    m3:8;
+	uint32_t    m2:8;
+	uint32_t    m1:8;
+	/* AWB Cconfig */
+	uint32_t    c2:12;
+	uint32_t    /* reserved */ : 4;
+	uint32_t    c1:12;
+	uint32_t    /* reserved */ : 4;
+	/* AWB Cconfig 2 */
+	uint32_t    c4:12;
+	uint32_t    /* reserved */ : 4;
+	uint32_t    c3:12;
+	uint32_t    /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_TestGen_ConfigCmdType {
+	/* HW Test Gen Config */
+	uint32_t   numFrame:10;
+	uint32_t   /* reserved */ : 2;
+	uint32_t   pixelDataSelect:1;
+	uint32_t   systematicDataSelect:1;
+	uint32_t   /* reserved */ : 2;
+	uint32_t   pixelDataSize:2;
+	uint32_t   hsyncEdge:1;
+	uint32_t   vsyncEdge:1;
+	uint32_t   /* reserved */ : 12;
+	/* HW Test Gen Image Config */
+	uint32_t   imageWidth:14;
+	uint32_t   /* reserved */ : 2;
+	uint32_t   imageHeight:14;
+	uint32_t   /* reserved */ : 2;
+	/* SOF Offset Config */
+	uint32_t   sofOffset:24;
+	uint32_t   /* reserved */ : 8;
+	/* EOF NOffset Config */
+	uint32_t   eofNOffset:24;
+	uint32_t   /* reserved */ : 8;
+	/* SOL Offset Config */
+	uint32_t   solOffset:9;
+	uint32_t   /* reserved */ : 23;
+	/* EOL NOffset Config */
+	uint32_t   eolNOffset:9;
+	uint32_t   /* reserved */ : 23;
+	/* HBI Config */
+	uint32_t   hBlankInterval:14;
+	uint32_t   /* reserved */ : 18;
+	/* VBL Config */
+	uint32_t   vBlankInterval:14;
+	uint32_t   /* reserved */ : 2;
+	uint32_t   vBlankIntervalEnable:1;
+	uint32_t   /* reserved */ : 15;
+	/* SOF Dummy Line Config */
+	uint32_t   sofDummy:8;
+	uint32_t   /* reserved */ : 24;
+	/* EOF Dummy Line Config */
+	uint32_t   eofDummy:8;
+	uint32_t   /* reserved */ : 24;
+	/* Color Bars Config */
+	uint32_t   unicolorBarSelect:3;
+	uint32_t   /* reserved */ : 1;
+	uint32_t   unicolorBarEnable:1;
+	uint32_t   splitEnable:1;
+	uint32_t   pixelPattern:2;
+	uint32_t   rotatePeriod:6;
+	uint32_t   /* reserved */ : 18;
+	/* Random Config */
+	uint32_t   randomSeed:16;
+	uint32_t   /* reserved */ : 16;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_Bus_Pm_ConfigCmdType {
+	/* VFE Bus Performance Monitor Config */
+	uint32_t  output2YWrPmEnable:1;
+	uint32_t  output2CbcrWrPmEnable:1;
+	uint32_t  output1YWrPmEnable:1;
+	uint32_t  output1CbcrWrPmEnable:1;
+	uint32_t  /* reserved */ : 28;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_asf_info {
+	/* asf max edge  */
+	uint32_t maxEdge:13;
+	uint32_t /* reserved */ : 3;
+	/* HBi count  */
+	uint32_t HBICount:12;
+	uint32_t /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_camif_stats {
+  uint32_t  pixelCount:14;
+  uint32_t  /* reserved */ : 2;
+  uint32_t  lineCount:14;
+  uint32_t  /* reserved */ : 1;
+  uint32_t  camifHalt:1;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_StatsCmdType {
+	uint32_t  autoFocusEnable:1;
+	uint32_t  axwEnable:1;
+	uint32_t  histEnable:1;
+	uint32_t  clearHistEnable:1;
+	uint32_t  histAutoClearEnable:1;
+	uint32_t  colorConversionEnable:1;
+	uint32_t  /* reserved */ : 26;
+} __attribute__((packed, aligned(4)));
+
+
+struct vfe_statsframe {
+	uint32_t lastPixel:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t lastLine:12;
+	uint32_t /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_busstats_wrprio {
+	uint32_t afBusPriority:4;
+	uint32_t awbBusPriority:4;
+	uint32_t histBusPriority:4;
+	uint32_t afBusPriorityEn:1;
+	uint32_t awbBusPriorityEn:1;
+	uint32_t histBusPriorityEn:1;
+	uint32_t /* reserved */ : 17;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_statsaf_update {
+	/* VFE_STATS_AF_CFG */
+	uint32_t windowVOffset:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t windowHOffset:12;
+	uint32_t /* reserved */ : 3;
+	uint32_t windowMode:1;
+
+	/* VFE_STATS_AF_DIM */
+	uint32_t windowHeight:12;
+	uint32_t /* reserved */ : 4;
+	uint32_t windowWidth:12;
+	uint32_t /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_statsaf_cfg {
+	/* VFE_STATS_AF_GRID_0 */
+	uint32_t  entry00:8;
+	uint32_t  entry01:8;
+	uint32_t  entry02:8;
+	uint32_t  entry03:8;
+
+	/* VFE_STATS_AF_GRID_1 */
+	uint32_t  entry10:8;
+	uint32_t  entry11:8;
+	uint32_t  entry12:8;
+	uint32_t  entry13:8;
+
+	/* VFE_STATS_AF_GRID_2 */
+	uint32_t  entry20:8;
+	uint32_t  entry21:8;
+	uint32_t  entry22:8;
+	uint32_t  entry23:8;
+
+	/* VFE_STATS_AF_GRID_3 */
+	uint32_t  entry30:8;
+	uint32_t  entry31:8;
+	uint32_t  entry32:8;
+	uint32_t  entry33:8;
+
+	/* VFE_STATS_AF_HEADER */
+	uint32_t  afHeader:8;
+	uint32_t  /* reserved */ : 24;
+	/*  VFE_STATS_AF_COEF0 */
+	uint32_t  a00:5;
+	uint32_t  a04:5;
+	uint32_t  fvMax:11;
+	uint32_t  fvMetric:1;
+	uint32_t  /* reserved */ : 10;
+
+	/* VFE_STATS_AF_COEF1 */
+	uint32_t  a20:5;
+	uint32_t  a21:5;
+	uint32_t  a22:5;
+	uint32_t  a23:5;
+	uint32_t  a24:5;
+	uint32_t  /* reserved */ : 7;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_statsawbae_update {
+	uint32_t  aeRegionCfg:1;
+	uint32_t  aeSubregionCfg:1;
+	uint32_t  /* reserved */ : 14;
+	uint32_t  awbYMin:8;
+	uint32_t  awbYMax:8;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_statsaxw_hdr_cfg {
+	/* Stats AXW Header Config */
+	uint32_t axwHeader:8;
+	uint32_t /* reserved */ : 24;
+} __attribute__((packed, aligned(4)));
+
+struct vfe_statsawb_update {
+	/* AWB MConfig */
+	uint32_t  m4:8;
+	uint32_t  m3:8;
+	uint32_t  m2:8;
+	uint32_t  m1:8;
+
+	/* AWB CConfig1 */
+	uint32_t  c2:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  c1:12;
+	uint32_t  /* reserved */ : 4;
+
+	/* AWB CConfig2 */
+	uint32_t  c4:12;
+	uint32_t  /* reserved */ : 4;
+	uint32_t  c3:12;
+	uint32_t  /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_SyncTimerCmdType {
+	uint32_t  hsyncCount:12;
+	uint32_t  /* reserved */ : 20;
+	uint32_t  pclkCount:18;
+	uint32_t  /* reserved */ : 14;
+	uint32_t  outputDuration:28;
+	uint32_t  /* reserved */ : 4;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_AsyncTimerCmdType {
+	/*  config 0 */
+	uint32_t    inactiveCount:20;
+	uint32_t    repeatCount:10;
+	uint32_t    /* reserved */ : 1;
+	uint32_t    polarity:1;
+	/*  config 1 */
+	uint32_t    activeCount:20;
+	uint32_t    /* reserved */ : 12;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_AxiInputCmdType {
+	uint32_t   stripeStartAddr0:32;
+	uint32_t   stripeStartAddr1:32;
+	uint32_t   stripeStartAddr2:32;
+	uint32_t   stripeStartAddr3:32;
+
+	uint32_t   ySize:12;
+	uint32_t   yOffsetDelta:12;
+	uint32_t   /* reserved */ : 8;
+
+	/* bus_stripe_rd_hSize */
+	uint32_t   /* reserved */ : 16;
+	uint32_t   xSizeWord:10;
+	uint32_t   /* reserved */ : 6;
+
+	/* bus_stripe_rd_buffer_cfg */
+	uint32_t   burstLength:2;
+	uint32_t   /* reserved */ : 2;
+	uint32_t   NumOfRows:12;
+	uint32_t   RowIncrement:12;
+	uint32_t   /* reserved */ : 4;
+
+	/* bus_stripe_rd_unpack_cfg */
+	uint32_t   mainUnpackHeight:12;
+	uint32_t   mainUnpackWidth:13;
+	uint32_t   mainUnpackHbiSel:3;
+	uint32_t   mainUnpackPhase:3;
+	uint32_t   /* reserved */ : 1;
+
+	/* bus_stripe_rd_unpack */
+	uint32_t   unpackPattern:32;
+
+	/* bus_stripe_rd_pad_size */
+	uint32_t   padLeft:7;
+	uint32_t   /* reserved */ : 1;
+	uint32_t   padRight:7;
+	uint32_t   /* reserved */ : 1;
+	uint32_t   padTop:7;
+	uint32_t   /* reserved */ : 1;
+	uint32_t   padBottom:7;
+	uint32_t   /* reserved */ : 1;
+
+	/* bus_stripe_rd_pad_L_unpack */
+	uint32_t   leftUnpackPattern0:4;
+	uint32_t   leftUnpackPattern1:4;
+	uint32_t   leftUnpackPattern2:4;
+	uint32_t   leftUnpackPattern3:4;
+	uint32_t   leftUnpackStop0:1;
+	uint32_t   leftUnpackStop1:1;
+	uint32_t   leftUnpackStop2:1;
+	uint32_t   leftUnpackStop3:1;
+	uint32_t   /* reserved */ : 12;
+
+	/* bus_stripe_rd_pad_R_unpack */
+	uint32_t   rightUnpackPattern0:4;
+	uint32_t   rightUnpackPattern1:4;
+	uint32_t   rightUnpackPattern2:4;
+	uint32_t   rightUnpackPattern3:4;
+	uint32_t   rightUnpackStop0:1;
+	uint32_t   rightUnpackStop1:1;
+	uint32_t   rightUnpackStop2:1;
+	uint32_t   rightUnpackStop3:1;
+	uint32_t   /* reserved */ : 12;
+
+	/* bus_stripe_rd_pad_tb_unpack */
+	uint32_t   topUnapckPattern:4;
+	uint32_t   /* reserved */ : 12;
+	uint32_t   bottomUnapckPattern:4;
+	uint32_t   /* reserved */ : 12;
+} __attribute__((packed, aligned(4)));
+
+struct VFE_AxiRdFragIrqEnable {
+	uint32_t stripeRdFragirq0Enable:1;
+	uint32_t stripeRdFragirq1Enable:1;
+	uint32_t stripeRdFragirq2Enable:1;
+	uint32_t stripeRdFragirq3Enable:1;
+	uint32_t   /* reserved */ : 28;
+} __attribute__((packed, aligned(4)));
+
+int vfe_cmd_init(struct msm_vfe_callback *, struct platform_device *, void *);
+void vfe_stats_af_stop(void);
+void vfe_stop(void);
+void vfe_update(void);
+int vfe_rgb_gamma_update(struct vfe_cmd_rgb_gamma_config *);
+int vfe_rgb_gamma_config(struct vfe_cmd_rgb_gamma_config *);
+void vfe_stats_wb_exp_ack(struct vfe_cmd_stats_wb_exp_ack *);
+void vfe_stats_af_ack(struct vfe_cmd_stats_af_ack *);
+void vfe_start(struct vfe_cmd_start *);
+void vfe_la_update(struct vfe_cmd_la_config *);
+void vfe_la_config(struct vfe_cmd_la_config *);
+void vfe_test_gen_start(struct vfe_cmd_test_gen_start *);
+void vfe_frame_skip_update(struct vfe_cmd_frame_skip_update *);
+void vfe_frame_skip_config(struct vfe_cmd_frame_skip_config *);
+void vfe_output_clamp_config(struct vfe_cmd_output_clamp_config *);
+void vfe_camif_frame_update(struct vfe_cmds_camif_frame *);
+void vfe_color_correction_config(struct vfe_cmd_color_correction_config *);
+void vfe_demosaic_abf_update(struct vfe_cmd_demosaic_abf_update *);
+void vfe_demosaic_bpc_update(struct vfe_cmd_demosaic_bpc_update *);
+void vfe_demosaic_config(struct vfe_cmd_demosaic_config *);
+void vfe_demux_channel_gain_update(struct vfe_cmd_demux_channel_gain_config *);
+void vfe_demux_channel_gain_config(struct vfe_cmd_demux_channel_gain_config *);
+void vfe_black_level_update(struct vfe_cmd_black_level_config *);
+void vfe_black_level_config(struct vfe_cmd_black_level_config *);
+void vfe_asf_update(struct vfe_cmd_asf_update *);
+void vfe_asf_config(struct vfe_cmd_asf_config *);
+void vfe_white_balance_config(struct vfe_cmd_white_balance_config *);
+void vfe_chroma_sup_config(struct vfe_cmd_chroma_suppression_config *);
+void vfe_roll_off_config(struct vfe_cmd_roll_off_config *);
+void vfe_chroma_subsample_config(struct vfe_cmd_chroma_subsample_config *);
+void vfe_chroma_enhan_config(struct vfe_cmd_chroma_enhan_config *);
+void vfe_scaler2cbcr_config(struct vfe_cmd_scaler2_config *);
+void vfe_scaler2y_config(struct vfe_cmd_scaler2_config *);
+void vfe_main_scaler_config(struct vfe_cmd_main_scaler_config *);
+void vfe_stats_wb_exp_stop(void);
+void vfe_stats_update_wb_exp(struct vfe_cmd_stats_wb_exp_update *);
+void vfe_stats_update_af(struct vfe_cmd_stats_af_update *);
+void vfe_stats_start_wb_exp(struct vfe_cmd_stats_wb_exp_start *);
+void vfe_stats_start_af(struct vfe_cmd_stats_af_start *);
+void vfe_stats_setting(struct vfe_cmd_stats_setting *);
+void vfe_axi_input_config(struct vfe_cmd_axi_input_config *);
+void vfe_axi_output_config(struct vfe_cmd_axi_output_config *);
+void vfe_camif_config(struct vfe_cmd_camif_config *);
+void vfe_fov_crop_config(struct vfe_cmd_fov_crop_config *);
+void vfe_get_hw_version(struct vfe_cmd_hw_version *);
+void vfe_reset(void);
+void vfe_cmd_release(struct platform_device *);
+void vfe_output_p_ack(struct vfe_cmd_output_ack *);
+void vfe_output_v_ack(struct vfe_cmd_output_ack *);
+#endif /* __MSM_VFE8X_REG_H__ */
diff --git a/drivers/media/video/msm/msm_vpe1.c b/drivers/media/video/msm/msm_vpe1.c
new file mode 100644
index 0000000..70b9448
--- /dev/null
+++ b/drivers/media/video/msm/msm_vpe1.c
@@ -0,0 +1,1446 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <mach/irqs.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "msm_vpe1.h"
+#include <mach/msm_reqs.h>
+#include <linux/pm_qos_params.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <asm/div64.h>
+
+static int vpe_enable(uint32_t);
+static int vpe_disable(void);
+static int vpe_update_scaler(struct video_crop_t *pcrop);
+static struct vpe_device_type  vpe_device_data;
+static struct vpe_device_type  *vpe_device;
+struct vpe_ctrl_type    *vpe_ctrl;
+char *vpe_general_cmd[] = {
+	"VPE_DUMMY_0",  /* 0 */
+	"VPE_SET_CLK",
+	"VPE_RESET",
+	"VPE_START",
+	"VPE_ABORT",
+	"VPE_OPERATION_MODE_CFG",  /* 5 */
+	"VPE_INPUT_PLANE_CFG",
+	"VPE_OUTPUT_PLANE_CFG",
+	"VPE_INPUT_PLANE_UPDATE",
+	"VPE_SCALE_CFG_TYPE",
+	"VPE_ROTATION_CFG_TYPE",  /* 10 */
+	"VPE_AXI_OUT_CFG",
+	"VPE_CMD_DIS_OFFSET_CFG",
+	"VPE_ENABLE",
+	"VPE_DISABLE",
+};
+static uint32_t orig_src_y, orig_src_cbcr;
+
+#define CHECKED_COPY_FROM_USER(in) {					\
+	if (copy_from_user((in), (void __user *)cmd->value,		\
+			cmd->length)) {					\
+		rc = -EFAULT;						\
+		break;							\
+	}								\
+}
+
+#define msm_dequeue_vpe(queue, member) ({			\
+	unsigned long flags;					\
+	struct msm_device_queue *__q = (queue);			\
+	struct msm_queue_cmd *qcmd = 0;				\
+	spin_lock_irqsave(&__q->lock, flags);			\
+	if (!list_empty(&__q->list)) {				\
+		__q->len--;					\
+		qcmd = list_first_entry(&__q->list,		\
+				struct msm_queue_cmd, member);	\
+		list_del_init(&qcmd->member);			\
+	}							\
+	spin_unlock_irqrestore(&__q->lock, flags);		\
+	qcmd;							\
+})
+
+/*
+static   struct vpe_cmd_type vpe_cmd[] = {
+		{VPE_DUMMY_0, 0},
+		{VPE_SET_CLK, 0},
+		{VPE_RESET, 0},
+		{VPE_START, 0},
+		{VPE_ABORT, 0},
+		{VPE_OPERATION_MODE_CFG, VPE_OPERATION_MODE_CFG_LEN},
+		{VPE_INPUT_PLANE_CFG, VPE_INPUT_PLANE_CFG_LEN},
+		{VPE_OUTPUT_PLANE_CFG, VPE_OUTPUT_PLANE_CFG_LEN},
+		{VPE_INPUT_PLANE_UPDATE, VPE_INPUT_PLANE_UPDATE_LEN},
+		{VPE_SCALE_CFG_TYPE, VPE_SCALER_CONFIG_LEN},
+		{VPE_ROTATION_CFG_TYPE, 0},
+		{VPE_AXI_OUT_CFG, 0},
+		{VPE_CMD_DIS_OFFSET_CFG, VPE_DIS_OFFSET_CFG_LEN},
+};
+*/
+
+static long long vpe_do_div(long long num, long long den)
+{
+	do_div(num, den);
+	return num;
+}
+
+static int vpe_start(void)
+{
+	/*  enable the frame irq, bit 0 = Display list 0 ROI done */
+	msm_io_w(1, vpe_device->vpebase + VPE_INTR_ENABLE_OFFSET);
+	msm_io_dump(vpe_device->vpebase + 0x10000, 0x250);
+	/* this triggers the operation. */
+	msm_io_w(1, vpe_device->vpebase + VPE_DL0_START_OFFSET);
+
+	return 0;
+}
+
+void vpe_reset_state_variables(void)
+{
+	/* initialize local variables for state control, etc.*/
+	vpe_ctrl->op_mode = 0;
+	vpe_ctrl->state = VPE_STATE_INIT;
+	spin_lock_init(&vpe_ctrl->tasklet_lock);
+	spin_lock_init(&vpe_ctrl->state_lock);
+	INIT_LIST_HEAD(&vpe_ctrl->tasklet_q);
+}
+
+static void vpe_config_axi_default(void)
+{
+	msm_io_w(0x25, vpe_device->vpebase + VPE_AXI_ARB_2_OFFSET);
+
+	CDBG("%s: yaddr %ld cbcraddr %ld", __func__,
+		 vpe_ctrl->out_y_addr, vpe_ctrl->out_cbcr_addr);
+
+	if (!vpe_ctrl->out_y_addr || !vpe_ctrl->out_cbcr_addr)
+		return;
+
+	msm_io_w(vpe_ctrl->out_y_addr,
+		vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
+	/* for video  CbCr address */
+	msm_io_w(vpe_ctrl->out_cbcr_addr,
+		vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
+
+}
+
+static int vpe_reset(void)
+{
+	uint32_t vpe_version;
+	uint32_t rc;
+
+	vpe_reset_state_variables();
+	vpe_version = msm_io_r(vpe_device->vpebase + VPE_HW_VERSION_OFFSET);
+	CDBG("vpe_version = 0x%x\n", vpe_version);
+
+	/* disable all interrupts.*/
+	msm_io_w(0, vpe_device->vpebase + VPE_INTR_ENABLE_OFFSET);
+	/* clear all pending interrupts*/
+	msm_io_w(0x1fffff, vpe_device->vpebase + VPE_INTR_CLEAR_OFFSET);
+
+	/* write sw_reset to reset the core. */
+	msm_io_w(0x10, vpe_device->vpebase + VPE_SW_RESET_OFFSET);
+
+	/* then poll the reset bit, it should be self-cleared. */
+	while (1) {
+		rc =
+		msm_io_r(vpe_device->vpebase + VPE_SW_RESET_OFFSET) & 0x10;
+		if (rc == 0)
+			break;
+	}
+
+	/*  at this point, hardware is reset. Then pogram to default
+		values. */
+	msm_io_w(VPE_AXI_RD_ARB_CONFIG_VALUE,
+			vpe_device->vpebase + VPE_AXI_RD_ARB_CONFIG_OFFSET);
+
+	msm_io_w(VPE_CGC_ENABLE_VALUE,
+			vpe_device->vpebase + VPE_CGC_EN_OFFSET);
+
+	msm_io_w(1, vpe_device->vpebase + VPE_CMD_MODE_OFFSET);
+
+	msm_io_w(VPE_DEFAULT_OP_MODE_VALUE,
+			vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+	msm_io_w(VPE_DEFAULT_SCALE_CONFIG,
+			vpe_device->vpebase + VPE_SCALE_CONFIG_OFFSET);
+
+	vpe_config_axi_default();
+	return 0;
+}
+
+int msm_vpe_cfg_update(void *pinfo)
+{
+	uint32_t  rot_flag, rc = 0;
+	struct video_crop_t *pcrop = (struct video_crop_t *)pinfo;
+
+	rot_flag = msm_io_r(vpe_device->vpebase +
+						VPE_OP_MODE_OFFSET) & 0xE00;
+	if (pinfo != NULL) {
+		CDBG("Crop info in2_w = %d, in2_h = %d "
+			"out2_h = %d out2_w = %d \n", pcrop->in2_w,
+			pcrop->in2_h,
+			pcrop->out2_h, pcrop->out2_w);
+		rc = vpe_update_scaler(pcrop);
+	}
+	CDBG("return rc = %d rot_flag = %d\n", rc, rot_flag);
+	rc |= rot_flag;
+
+	return rc;
+}
+
+void vpe_update_scale_coef(uint32_t *p)
+{
+	uint32_t i, offset;
+	offset = *p;
+	for (i = offset; i < (VPE_SCALE_COEFF_NUM + offset); i++) {
+		msm_io_w(*(++p), vpe_device->vpebase + VPE_SCALE_COEFF_LSBn(i));
+		msm_io_w(*(++p), vpe_device->vpebase + VPE_SCALE_COEFF_MSBn(i));
+	}
+}
+
+void vpe_input_plane_config(uint32_t *p)
+{
+	msm_io_w(*p, vpe_device->vpebase + VPE_SRC_FORMAT_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_SRC_UNPACK_PATTERN1_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_SRC_IMAGE_SIZE_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_SRC_YSTRIDE1_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_SRC_SIZE_OFFSET);
+	vpe_ctrl->in_h_w = *p;
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_SRC_XY_OFFSET);
+}
+
+void vpe_output_plane_config(uint32_t *p)
+{
+	msm_io_w(*p, vpe_device->vpebase + VPE_OUT_FORMAT_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_OUT_PACK_PATTERN1_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_OUT_YSTRIDE1_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_OUT_SIZE_OFFSET);
+	msm_io_w(*(++p), vpe_device->vpebase + VPE_OUT_XY_OFFSET);
+	vpe_ctrl->pcbcr_dis_offset = *(++p);
+}
+
+static int vpe_operation_config(uint32_t *p)
+{
+	uint32_t  outw, outh, temp;
+	msm_io_w(*p, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+	temp = msm_io_r(vpe_device->vpebase + VPE_OUT_SIZE_OFFSET);
+	outw = temp & 0xFFF;
+	outh = (temp & 0xFFF0000) >> 16;
+
+	if (*p++ & 0xE00) {
+		/* rotation enabled. */
+		vpe_ctrl->out_w = outh;
+		vpe_ctrl->out_h = outw;
+	} else {
+		vpe_ctrl->out_w = outw;
+		vpe_ctrl->out_h = outh;
+	}
+	vpe_ctrl->dis_en = *p;
+	return 0;
+}
+
+/* Later we can separate the rotation and scaler calc. If
+*  rotation is enabled, simply swap the destination dimension.
+*  And then pass the already swapped output size to this
+*  function. */
+static int vpe_update_scaler(struct video_crop_t *pcrop)
+{
+	uint32_t out_ROI_width, out_ROI_height;
+	uint32_t src_ROI_width, src_ROI_height;
+
+	uint32_t rc = 0;  /* default to no zoom. */
+	/*
+	* phase_step_x, phase_step_y, phase_init_x and phase_init_y
+	* are represented in fixed-point, unsigned 3.29 format
+	*/
+	uint32_t phase_step_x = 0;
+	uint32_t phase_step_y = 0;
+	uint32_t phase_init_x = 0;
+	uint32_t phase_init_y = 0;
+
+	uint32_t src_roi, src_x, src_y, src_xy, temp;
+	uint32_t yscale_filter_sel, xscale_filter_sel;
+	uint32_t scale_unit_sel_x, scale_unit_sel_y;
+	uint64_t numerator, denominator;
+
+	if ((pcrop->in2_w >= pcrop->out2_w) &&
+		(pcrop->in2_h >= pcrop->out2_h)) {
+		CDBG(" =======VPE no zoom needed.\n");
+
+		temp = msm_io_r(vpe_device->vpebase + VPE_OP_MODE_OFFSET)
+		& 0xfffffffc;
+		msm_io_w(temp, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+
+		msm_io_w(0, vpe_device->vpebase + VPE_SRC_XY_OFFSET);
+
+		CDBG("vpe_ctrl->in_h_w = %d \n", vpe_ctrl->in_h_w);
+		msm_io_w(vpe_ctrl->in_h_w , vpe_device->vpebase +
+				VPE_SRC_SIZE_OFFSET);
+
+		return rc;
+	}
+	/* If fall through then scaler is needed.*/
+
+	CDBG("========VPE zoom needed.\n");
+	/* assumption is both direction need zoom. this can be
+	improved. */
+	temp =
+		msm_io_r(vpe_device->vpebase + VPE_OP_MODE_OFFSET) | 0x3;
+	msm_io_w(temp, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+	src_ROI_width = pcrop->in2_w;
+	src_ROI_height = pcrop->in2_h;
+	out_ROI_width = pcrop->out2_w;
+	out_ROI_height = pcrop->out2_h;
+
+	CDBG("src w = 0x%x, h=0x%x, dst w = 0x%x, h =0x%x.\n",
+		src_ROI_width, src_ROI_height, out_ROI_width,
+		out_ROI_height);
+	src_roi = (src_ROI_height << 16) + src_ROI_width;
+
+	msm_io_w(src_roi, vpe_device->vpebase + VPE_SRC_SIZE_OFFSET);
+
+	src_x = (out_ROI_width - src_ROI_width)/2;
+	src_y = (out_ROI_height - src_ROI_height)/2;
+
+	CDBG("src_x = %d, src_y=%d.\n", src_x, src_y);
+
+	src_xy = src_y*(1<<16) + src_x;
+	msm_io_w(src_xy, vpe_device->vpebase +
+			VPE_SRC_XY_OFFSET);
+	CDBG("src_xy = %d, src_roi=%d.\n", src_xy, src_roi);
+
+	/* decide whether to use FIR or M/N for scaling */
+	if ((out_ROI_width == 1 && src_ROI_width < 4) ||
+		(src_ROI_width < 4 * out_ROI_width - 3))
+		scale_unit_sel_x = 0;/* use FIR scalar */
+	else
+		scale_unit_sel_x = 1;/* use M/N scalar */
+
+	if ((out_ROI_height == 1 && src_ROI_height < 4) ||
+		(src_ROI_height < 4 * out_ROI_height - 3))
+		scale_unit_sel_y = 0;/* use FIR scalar */
+	else
+		scale_unit_sel_y = 1;/* use M/N scalar */
+
+	/* calculate phase step for the x direction */
+
+	/* if destination is only 1 pixel wide,
+	the value of phase_step_x
+	is unimportant. Assigning phase_step_x to
+	src ROI width as an arbitrary value. */
+	if (out_ROI_width == 1)
+		phase_step_x = (uint32_t) ((src_ROI_width) <<
+						SCALER_PHASE_BITS);
+
+		/* if using FIR scalar */
+	else if (scale_unit_sel_x == 0) {
+
+		/* Calculate the quotient ( src_ROI_width - 1 )
+		/ ( out_ROI_width - 1)
+		with u3.29 precision. Quotient is rounded up to
+		the larger 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_width - 1) <<
+			SCALER_PHASE_BITS;
+		/* never equals to 0 because of the
+		"(out_ROI_width == 1 )"*/
+		denominator = (uint64_t)(out_ROI_width - 1);
+		/* divide and round up to the larger 29th
+		decimal point. */
+		phase_step_x = (uint32_t) vpe_do_div((numerator +
+					denominator - 1), denominator);
+	} else if (scale_unit_sel_x == 1) { /* if M/N scalar */
+		/* Calculate the quotient ( src_ROI_width ) /
+		( out_ROI_width)
+		with u3.29 precision. Quotient is rounded down to the
+		smaller 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_width) <<
+			SCALER_PHASE_BITS;
+		denominator = (uint64_t)(out_ROI_width);
+		phase_step_x =
+			(uint32_t) vpe_do_div(numerator, denominator);
+	}
+	/* calculate phase step for the y direction */
+
+	/* if destination is only 1 pixel wide, the value of
+		phase_step_x is unimportant. Assigning phase_step_x
+		to src ROI width as an arbitrary value. */
+	if (out_ROI_height == 1)
+		phase_step_y =
+		(uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
+
+	/* if FIR scalar */
+	else if (scale_unit_sel_y == 0) {
+		/* Calculate the quotient ( src_ROI_height - 1 ) /
+		( out_ROI_height - 1)
+		with u3.29 precision. Quotient is rounded up to the
+		larger 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_height - 1) <<
+			SCALER_PHASE_BITS;
+		/* never equals to 0 because of the "
+		( out_ROI_height == 1 )" case */
+		denominator = (uint64_t)(out_ROI_height - 1);
+		/* Quotient is rounded up to the larger
+		29th decimal point. */
+		phase_step_y =
+		(uint32_t) vpe_do_div(
+			(numerator + denominator - 1), denominator);
+	} else if (scale_unit_sel_y == 1) { /* if M/N scalar */
+		/* Calculate the quotient ( src_ROI_height )
+		/ ( out_ROI_height)
+		with u3.29 precision. Quotient is rounded down
+		to the smaller 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_height) <<
+			SCALER_PHASE_BITS;
+		denominator = (uint64_t)(out_ROI_height);
+		phase_step_y = (uint32_t) vpe_do_div(
+			numerator, denominator);
+	}
+
+	/* decide which set of FIR coefficients to use */
+	if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
+		xscale_filter_sel = 0;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
+		xscale_filter_sel = 1;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
+		xscale_filter_sel = 2;
+	else
+		xscale_filter_sel = 3;
+
+	if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
+		yscale_filter_sel = 0;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
+		yscale_filter_sel = 1;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
+		yscale_filter_sel = 2;
+	else
+		yscale_filter_sel = 3;
+
+	/* calculate phase init for the x direction */
+
+	/* if using FIR scalar */
+	if (scale_unit_sel_x == 0) {
+		if (out_ROI_width == 1)
+			phase_init_x =
+				(uint32_t) ((src_ROI_width - 1) <<
+							SCALER_PHASE_BITS);
+		else
+			phase_init_x = 0;
+	} else if (scale_unit_sel_x == 1) /* M over N scalar  */
+		phase_init_x = 0;
+
+	/* calculate phase init for the y direction
+	if using FIR scalar */
+	if (scale_unit_sel_y == 0) {
+		if (out_ROI_height == 1)
+			phase_init_y =
+			(uint32_t) ((src_ROI_height -
+						1) << SCALER_PHASE_BITS);
+		else
+			phase_init_y = 0;
+	} else if (scale_unit_sel_y == 1) /* M over N scalar   */
+		phase_init_y = 0;
+
+	CDBG("phase step x = %d, step y = %d.\n",
+		 phase_step_x, phase_step_y);
+	CDBG("phase init x = %d, init y = %d.\n",
+		 phase_init_x, phase_init_y);
+
+	msm_io_w(phase_step_x, vpe_device->vpebase +
+			VPE_SCALE_PHASEX_STEP_OFFSET);
+	msm_io_w(phase_step_y, vpe_device->vpebase +
+			VPE_SCALE_PHASEY_STEP_OFFSET);
+
+	msm_io_w(phase_init_x, vpe_device->vpebase +
+			VPE_SCALE_PHASEX_INIT_OFFSET);
+
+	msm_io_w(phase_init_y, vpe_device->vpebase +
+			VPE_SCALE_PHASEY_INIT_OFFSET);
+
+	return 1;
+}
+
+static int vpe_update_scaler_with_dis(struct video_crop_t *pcrop,
+				struct dis_offset_type *dis_offset)
+{
+	uint32_t out_ROI_width, out_ROI_height;
+	uint32_t src_ROI_width, src_ROI_height;
+
+	uint32_t rc = 0;  /* default to no zoom. */
+	/*
+	* phase_step_x, phase_step_y, phase_init_x and phase_init_y
+	* are represented in fixed-point, unsigned 3.29 format
+	*/
+	uint32_t phase_step_x = 0;
+	uint32_t phase_step_y = 0;
+	uint32_t phase_init_x = 0;
+	uint32_t phase_init_y = 0;
+
+	uint32_t src_roi, temp;
+	int32_t  src_x, src_y, src_xy;
+	uint32_t yscale_filter_sel, xscale_filter_sel;
+	uint32_t scale_unit_sel_x, scale_unit_sel_y;
+	uint64_t numerator, denominator;
+	int32_t  zoom_dis_x, zoom_dis_y;
+
+	CDBG("%s: pcrop->in2_w = %d, pcrop->in2_h = %d\n", __func__,
+		 pcrop->in2_w, pcrop->in2_h);
+	CDBG("%s: pcrop->out2_w = %d, pcrop->out2_h = %d\n", __func__,
+		 pcrop->out2_w, pcrop->out2_h);
+
+	if ((pcrop->in2_w >= pcrop->out2_w) &&
+		(pcrop->in2_h >= pcrop->out2_h)) {
+		CDBG(" =======VPE no zoom needed, DIS is still enabled. \n");
+
+		temp = msm_io_r(vpe_device->vpebase + VPE_OP_MODE_OFFSET)
+		& 0xfffffffc;
+		msm_io_w(temp, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+		/* no zoom, use dis offset directly. */
+		src_xy = dis_offset->dis_offset_y * (1<<16) +
+			dis_offset->dis_offset_x;
+
+		msm_io_w(src_xy, vpe_device->vpebase + VPE_SRC_XY_OFFSET);
+
+		CDBG("vpe_ctrl->in_h_w = 0x%x \n", vpe_ctrl->in_h_w);
+		msm_io_w(vpe_ctrl->in_h_w, vpe_device->vpebase +
+				 VPE_SRC_SIZE_OFFSET);
+		return rc;
+	}
+	/* If fall through then scaler is needed.*/
+
+	CDBG("========VPE zoom needed + DIS enabled.\n");
+	/* assumption is both direction need zoom. this can be
+	 improved. */
+	temp = msm_io_r(vpe_device->vpebase +
+					VPE_OP_MODE_OFFSET) | 0x3;
+	msm_io_w(temp, vpe_device->vpebase +
+			VPE_OP_MODE_OFFSET);
+	zoom_dis_x = dis_offset->dis_offset_x *
+		pcrop->in2_w / pcrop->out2_w;
+	zoom_dis_y = dis_offset->dis_offset_y *
+		pcrop->in2_h / pcrop->out2_h;
+
+	src_x = zoom_dis_x + (pcrop->out2_w-pcrop->in2_w)/2;
+	src_y = zoom_dis_y + (pcrop->out2_h-pcrop->in2_h)/2;
+
+	out_ROI_width = vpe_ctrl->out_w;
+	out_ROI_height = vpe_ctrl->out_h;
+
+	src_ROI_width = out_ROI_width * pcrop->in2_w / pcrop->out2_w;
+	src_ROI_height = out_ROI_height * pcrop->in2_h / pcrop->out2_h;
+
+	/* clamp to output size.  This is because along
+	processing, we mostly do truncation, therefore
+	dis_offset tends to be
+	smaller values.  The intention was to make sure that the
+	offset does not exceed margin.   But in the case it could
+	result src_roi bigger, due to subtract a smaller value. */
+	CDBG("src w = 0x%x, h=0x%x, dst w = 0x%x, h =0x%x.\n",
+		src_ROI_width, src_ROI_height, out_ROI_width,
+		out_ROI_height);
+
+	src_roi = (src_ROI_height << 16) + src_ROI_width;
+
+	msm_io_w(src_roi, vpe_device->vpebase + VPE_SRC_SIZE_OFFSET);
+
+	CDBG("src_x = %d, src_y=%d.\n", src_x, src_y);
+
+	src_xy = src_y*(1<<16) + src_x;
+	msm_io_w(src_xy, vpe_device->vpebase +
+			VPE_SRC_XY_OFFSET);
+	CDBG("src_xy = 0x%x, src_roi=0x%x.\n", src_xy, src_roi);
+
+	/* decide whether to use FIR or M/N for scaling */
+	if ((out_ROI_width == 1 && src_ROI_width < 4) ||
+		(src_ROI_width < 4 * out_ROI_width - 3))
+		scale_unit_sel_x = 0;/* use FIR scalar */
+	else
+		scale_unit_sel_x = 1;/* use M/N scalar */
+
+	if ((out_ROI_height == 1 && src_ROI_height < 4) ||
+		(src_ROI_height < 4 * out_ROI_height - 3))
+		scale_unit_sel_y = 0;/* use FIR scalar */
+	else
+		scale_unit_sel_y = 1;/* use M/N scalar */
+	/* calculate phase step for the x direction */
+
+	/* if destination is only 1 pixel wide, the value of
+	phase_step_x is unimportant. Assigning phase_step_x
+	to src ROI width as an arbitrary value. */
+	if (out_ROI_width == 1)
+		phase_step_x = (uint32_t) ((src_ROI_width) <<
+							SCALER_PHASE_BITS);
+	else if (scale_unit_sel_x == 0) { /* if using FIR scalar */
+		/* Calculate the quotient ( src_ROI_width - 1 )
+		/ ( out_ROI_width - 1)with u3.29 precision.
+		Quotient is rounded up to the larger
+		29th decimal point. */
+		numerator =
+			(uint64_t)(src_ROI_width - 1) <<
+			SCALER_PHASE_BITS;
+		/* never equals to 0 because of the "
+		(out_ROI_width == 1 )"*/
+		denominator = (uint64_t)(out_ROI_width - 1);
+		/* divide and round up to the larger 29th
+		decimal point. */
+		phase_step_x = (uint32_t) vpe_do_div(
+			(numerator + denominator - 1), denominator);
+	} else if (scale_unit_sel_x == 1) { /* if M/N scalar */
+		/* Calculate the quotient
+		( src_ROI_width ) / ( out_ROI_width)
+		with u3.29 precision. Quotient is rounded
+		down to the smaller 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_width) <<
+			SCALER_PHASE_BITS;
+		denominator = (uint64_t)(out_ROI_width);
+		phase_step_x =
+			(uint32_t) vpe_do_div(numerator, denominator);
+	}
+	/* calculate phase step for the y direction */
+
+	/* if destination is only 1 pixel wide, the value of
+		phase_step_x is unimportant. Assigning phase_step_x
+		to src ROI width as an arbitrary value. */
+	if (out_ROI_height == 1)
+		phase_step_y =
+		(uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
+	else if (scale_unit_sel_y == 0) { /* if FIR scalar */
+		/* Calculate the quotient
+		( src_ROI_height - 1 ) / ( out_ROI_height - 1)
+		with u3.29 precision. Quotient is rounded up to the
+		larger 29th decimal point. */
+		numerator = (uint64_t)(src_ROI_height - 1) <<
+			SCALER_PHASE_BITS;
+		/* never equals to 0 because of the
+		"( out_ROI_height == 1 )" case */
+		denominator = (uint64_t)(out_ROI_height - 1);
+		/* Quotient is rounded up to the larger 29th
+		decimal point. */
+		phase_step_y =
+		(uint32_t) vpe_do_div(
+		(numerator + denominator - 1), denominator);
+	} else if (scale_unit_sel_y == 1) { /* if M/N scalar */
+		/* Calculate the quotient ( src_ROI_height ) / ( out_ROI_height)
+		with u3.29 precision. Quotient is rounded down to the smaller
+		29th decimal point. */
+		numerator = (uint64_t)(src_ROI_height) <<
+			SCALER_PHASE_BITS;
+		denominator = (uint64_t)(out_ROI_height);
+		phase_step_y = (uint32_t) vpe_do_div(
+			numerator, denominator);
+	}
+
+	/* decide which set of FIR coefficients to use */
+	if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
+		xscale_filter_sel = 0;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
+		xscale_filter_sel = 1;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
+		xscale_filter_sel = 2;
+	else
+		xscale_filter_sel = 3;
+
+	if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
+		yscale_filter_sel = 0;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
+		yscale_filter_sel = 1;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
+		yscale_filter_sel = 2;
+	else
+		yscale_filter_sel = 3;
+
+	/* calculate phase init for the x direction */
+
+	/* if using FIR scalar */
+	if (scale_unit_sel_x == 0) {
+		if (out_ROI_width == 1)
+			phase_init_x =
+			(uint32_t) ((src_ROI_width - 1) <<
+						SCALER_PHASE_BITS);
+		else
+			phase_init_x = 0;
+
+	} else if (scale_unit_sel_x == 1) /* M over N scalar  */
+		phase_init_x = 0;
+
+	/* calculate phase init for the y direction
+	if using FIR scalar */
+	if (scale_unit_sel_y == 0) {
+		if (out_ROI_height == 1)
+			phase_init_y =
+			(uint32_t) ((src_ROI_height -
+						1) << SCALER_PHASE_BITS);
+		else
+			phase_init_y = 0;
+
+	} else if (scale_unit_sel_y == 1) /* M over N scalar   */
+		phase_init_y = 0;
+
+	CDBG("phase step x = %d, step y = %d.\n",
+		phase_step_x, phase_step_y);
+	CDBG("phase init x = %d, init y = %d.\n",
+		phase_init_x, phase_init_y);
+
+	msm_io_w(phase_step_x, vpe_device->vpebase +
+			VPE_SCALE_PHASEX_STEP_OFFSET);
+
+	msm_io_w(phase_step_y, vpe_device->vpebase +
+			VPE_SCALE_PHASEY_STEP_OFFSET);
+
+	msm_io_w(phase_init_x, vpe_device->vpebase +
+			VPE_SCALE_PHASEX_INIT_OFFSET);
+
+	msm_io_w(phase_init_y, vpe_device->vpebase +
+			VPE_SCALE_PHASEY_INIT_OFFSET);
+
+	return 1;
+}
+
+void msm_send_frame_to_vpe(uint32_t pyaddr, uint32_t pcbcraddr,
+		struct timespec *ts, int output_type)
+{
+	uint32_t temp_pyaddr = 0, temp_pcbcraddr = 0;
+
+	CDBG("vpe input, pyaddr = 0x%x, pcbcraddr = 0x%x\n",
+		pyaddr, pcbcraddr);
+	msm_io_w(pyaddr, vpe_device->vpebase + VPE_SRCP0_ADDR_OFFSET);
+	msm_io_w(pcbcraddr, vpe_device->vpebase + VPE_SRCP1_ADDR_OFFSET);
+
+	if (vpe_ctrl->state == VPE_STATE_ACTIVE)
+		CDBG(" =====VPE is busy!!!  Wrong!========\n");
+
+	if (output_type != OUTPUT_TYPE_ST_R)
+		vpe_ctrl->ts = *ts;
+
+	if (output_type == OUTPUT_TYPE_ST_L) {
+		vpe_ctrl->pcbcr_before_dis = msm_io_r(vpe_device->vpebase +
+			VPE_OUTP1_ADDR_OFFSET);
+		temp_pyaddr = msm_io_r(vpe_device->vpebase +
+			VPE_OUTP0_ADDR_OFFSET);
+		temp_pcbcraddr = temp_pyaddr + PAD_TO_2K(vpe_ctrl->out_w *
+			vpe_ctrl->out_h * 2, vpe_ctrl->pad_2k_bool);
+		msm_io_w(temp_pcbcraddr, vpe_device->vpebase +
+			VPE_OUTP1_ADDR_OFFSET);
+	}
+
+	if (vpe_ctrl->dis_en) {
+		/* Changing the VPE output CBCR address,
+		to make Y/CBCR continuous */
+		vpe_ctrl->pcbcr_before_dis = msm_io_r(vpe_device->vpebase +
+			VPE_OUTP1_ADDR_OFFSET);
+		temp_pyaddr = msm_io_r(vpe_device->vpebase +
+			VPE_OUTP0_ADDR_OFFSET);
+		temp_pcbcraddr = temp_pyaddr + vpe_ctrl->pcbcr_dis_offset;
+		msm_io_w(temp_pcbcraddr, vpe_device->vpebase +
+			VPE_OUTP1_ADDR_OFFSET);
+	}
+
+	vpe_ctrl->output_type = output_type;
+	vpe_ctrl->state = VPE_STATE_ACTIVE;
+	vpe_start();
+}
+
+static int vpe_proc_general(struct msm_vpe_cmd *cmd)
+{
+	int rc = 0;
+	uint32_t *cmdp = NULL;
+	struct msm_queue_cmd *qcmd = NULL;
+	struct msm_vpe_buf_info *vpe_buf;
+	int turbo_mode = 0;
+	struct msm_sync *sync = (struct msm_sync *)vpe_ctrl->syncdata;
+	CDBG("vpe_proc_general: cmdID = %s, length = %d\n",
+		vpe_general_cmd[cmd->id], cmd->length);
+	switch (cmd->id) {
+	case VPE_ENABLE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto vpe_proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		turbo_mode = *((int *)(cmd->value));
+		rc = turbo_mode ? vpe_enable(VPE_TURBO_MODE_CLOCK_RATE)
+			: vpe_enable(VPE_NORMAL_MODE_CLOCK_RATE);
+		break;
+	case VPE_DISABLE:
+		rc = vpe_disable();
+		break;
+	case VPE_RESET:
+	case VPE_ABORT:
+		rc = vpe_reset();
+		break;
+	case VPE_START:
+		rc = vpe_start();
+		break;
+
+	case VPE_INPUT_PLANE_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto vpe_proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		vpe_input_plane_config(cmdp);
+		break;
+
+	case VPE_OPERATION_MODE_CFG:
+		CDBG("cmd->length = %d \n", cmd->length);
+		if (cmd->length != VPE_OPERATION_MODE_CFG_LEN) {
+			rc = -EINVAL;
+			goto vpe_proc_general_done;
+		}
+		cmdp = kmalloc(VPE_OPERATION_MODE_CFG_LEN,
+					GFP_ATOMIC);
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			VPE_OPERATION_MODE_CFG_LEN)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		rc = vpe_operation_config(cmdp);
+		CDBG("rc = %d \n", rc);
+		break;
+
+	case VPE_OUTPUT_PLANE_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto vpe_proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		vpe_output_plane_config(cmdp);
+		break;
+
+	case VPE_SCALE_CFG_TYPE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto vpe_proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		vpe_update_scale_coef(cmdp);
+		break;
+
+	case VPE_CMD_DIS_OFFSET_CFG: {
+		struct msm_vfe_resp *vdata;
+		/* first get the dis offset and frame id. */
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto vpe_proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto vpe_proc_general_done;
+		}
+		/* get the offset. */
+		vpe_ctrl->dis_offset = *(struct dis_offset_type *)cmdp;
+		qcmd = msm_dequeue_vpe(&sync->vpe_q, list_vpe_frame);
+		if (!qcmd) {
+			pr_err("%s: no video frame.\n", __func__);
+			kfree(cmdp);
+			return -EAGAIN;
+		}
+		vdata = (struct msm_vfe_resp *)(qcmd->command);
+		vpe_buf = &vdata->vpe_bf;
+		vpe_update_scaler_with_dis(&(vpe_buf->vpe_crop),
+					&(vpe_ctrl->dis_offset));
+
+		msm_send_frame_to_vpe(vpe_buf->y_phy, vpe_buf->cbcr_phy,
+						&(vpe_buf->ts), OUTPUT_TYPE_V);
+
+		if (!qcmd || !atomic_read(&qcmd->on_heap)) {
+			kfree(cmdp);
+			return -EAGAIN;
+		}
+		if (!atomic_sub_return(1, &qcmd->on_heap))
+			kfree(qcmd);
+		break;
+	}
+
+	default:
+		break;
+	}
+vpe_proc_general_done:
+	kfree(cmdp);
+	return rc;
+}
+
+static void vpe_addr_convert(struct msm_vpe_phy_info *pinfo,
+	enum vpe_resp_msg type, void *data, void **ext, int32_t *elen)
+{
+	CDBG("In vpe_addr_convert type = %d\n", type);
+	switch (type) {
+	case VPE_MSG_OUTPUT_V:
+		pinfo->output_id = OUTPUT_TYPE_V;
+		break;
+	case VPE_MSG_OUTPUT_ST_R:
+		/* output_id will be used by user space only. */
+		pinfo->output_id = OUTPUT_TYPE_V;
+		break;
+	default:
+		break;
+	} /* switch */
+
+	CDBG("In vpe_addr_convert output_id = %d\n", pinfo->output_id);
+
+	pinfo->y_phy =
+		((struct vpe_message *)data)->_u.msgOut.yBuffer;
+	pinfo->cbcr_phy =
+		((struct vpe_message *)data)->_u.msgOut.cbcrBuffer;
+	*ext  = vpe_ctrl->extdata;
+	*elen = vpe_ctrl->extlen;
+}
+
+void vpe_proc_ops(uint8_t id, void *msg, size_t len)
+{
+	struct msm_vpe_resp *rp;
+
+	rp = vpe_ctrl->resp->vpe_alloc(sizeof(struct msm_vpe_resp),
+		vpe_ctrl->syncdata, GFP_ATOMIC);
+	if (!rp) {
+		CDBG("rp: cannot allocate buffer\n");
+		return;
+	}
+
+	CDBG("vpe_proc_ops, msgId = %d rp->evt_msg.msg_id = %d\n",
+		id, rp->evt_msg.msg_id);
+	rp->evt_msg.type   = MSM_CAMERA_MSG;
+	rp->evt_msg.msg_id = id;
+	rp->evt_msg.len    = len;
+	rp->evt_msg.data   = msg;
+
+	switch (rp->evt_msg.msg_id) {
+	case MSG_ID_VPE_OUTPUT_V:
+		rp->type = VPE_MSG_OUTPUT_V;
+		vpe_addr_convert(&(rp->phy), VPE_MSG_OUTPUT_V,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_VPE_OUTPUT_ST_R:
+		rp->type = VPE_MSG_OUTPUT_ST_R;
+		vpe_addr_convert(&(rp->phy), VPE_MSG_OUTPUT_ST_R,
+			rp->evt_msg.data, &(rp->extdata),
+			&(rp->extlen));
+		break;
+
+	case MSG_ID_VPE_OUTPUT_ST_L:
+		rp->type = VPE_MSG_OUTPUT_ST_L;
+		break;
+
+	default:
+		rp->type = VPE_MSG_GENERAL;
+		break;
+	}
+	CDBG("%s: time = %ld\n",
+			__func__, vpe_ctrl->ts.tv_nsec);
+
+	vpe_ctrl->resp->vpe_resp(rp, MSM_CAM_Q_VPE_MSG,
+					vpe_ctrl->syncdata,
+					&(vpe_ctrl->ts), GFP_ATOMIC);
+}
+
+int vpe_config_axi(struct axidata *ad)
+{
+	uint32_t p1;
+	struct msm_pmem_region *regp1 = NULL;
+	CDBG("vpe_config_axi:bufnum1 = %d.\n", ad->bufnum1);
+
+	if (ad->bufnum1 != 1)
+		return -EINVAL;
+
+	regp1 = &(ad->region[0]);
+	/* for video  Y address */
+	p1 = (regp1->paddr + regp1->info.y_off);
+	msm_io_w(p1, vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
+	/* for video  CbCr address */
+	p1 = (regp1->paddr + regp1->info.cbcr_off);
+	msm_io_w(p1, vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
+
+	return 0;
+}
+
+int msm_vpe_config(struct msm_vpe_cfg_cmd *cmd, void *data)
+{
+	struct msm_vpe_cmd vpecmd;
+	int rc = 0;
+	if (copy_from_user(&vpecmd,
+			(void __user *)(cmd->value),
+			sizeof(vpecmd))) {
+		pr_err("%s %d: copy_from_user failed\n", __func__,
+				__LINE__);
+		return -EFAULT;
+	}
+	CDBG("%s: cmd_type %d\n", __func__, cmd->cmd_type);
+	switch (cmd->cmd_type) {
+	case CMD_VPE:
+		rc = vpe_proc_general(&vpecmd);
+		CDBG(" rc = %d\n", rc);
+		break;
+
+	case CMD_AXI_CFG_VPE:
+	case CMD_AXI_CFG_SNAP_VPE:
+	case CMD_AXI_CFG_SNAP_THUMB_VPE: {
+		struct axidata *axid;
+		axid = data;
+		if (!axid)
+			return -EFAULT;
+		vpe_config_axi(axid);
+		break;
+	}
+	default:
+		break;
+	}
+	CDBG("%s: rc = %d\n", __func__, rc);
+	return rc;
+}
+
+void msm_vpe_offset_update(int frame_pack, uint32_t pyaddr, uint32_t pcbcraddr,
+	struct timespec *ts, int output_id, struct msm_st_half st_half,
+	int frameid)
+{
+	struct msm_vpe_buf_info vpe_buf;
+	uint32_t input_stride;
+
+	vpe_buf.vpe_crop.in2_w = st_half.stCropInfo.in_w;
+	vpe_buf.vpe_crop.in2_h = st_half.stCropInfo.in_h;
+	vpe_buf.vpe_crop.out2_w = st_half.stCropInfo.out_w;
+	vpe_buf.vpe_crop.out2_h = st_half.stCropInfo.out_h;
+	vpe_ctrl->dis_offset.dis_offset_x = st_half.pix_x_off;
+	vpe_ctrl->dis_offset.dis_offset_y = st_half.pix_y_off;
+	vpe_ctrl->dis_offset.frame_id = frameid;
+	vpe_ctrl->frame_pack = frame_pack;
+	vpe_ctrl->output_type = output_id;
+
+	input_stride = (st_half.buf_cbcr_stride * (1<<16)) +
+		st_half.buf_y_stride;
+
+	msm_io_w(input_stride, vpe_device->vpebase + VPE_SRC_YSTRIDE1_OFFSET);
+
+	vpe_update_scaler_with_dis(&(vpe_buf.vpe_crop),
+		&(vpe_ctrl->dis_offset));
+
+	msm_send_frame_to_vpe(pyaddr, pcbcraddr, ts, output_id);
+}
+
+static void vpe_send_outmsg(uint8_t msgid, uint32_t pyaddr,
+	uint32_t pcbcraddr)
+{
+	struct vpe_message msg;
+	uint8_t outid;
+	msg._d = outid = msgid;
+	msg._u.msgOut.output_id   = msgid;
+	msg._u.msgOut.yBuffer     = pyaddr;
+	msg._u.msgOut.cbcrBuffer  = pcbcraddr;
+	vpe_proc_ops(outid, &msg, sizeof(struct vpe_message));
+	return;
+}
+
+int msm_vpe_reg(struct msm_vpe_callback *presp)
+{
+	if (presp && presp->vpe_resp)
+		vpe_ctrl->resp = presp;
+
+	return 0;
+}
+
+static void vpe_send_msg_no_payload(enum VPE_MESSAGE_ID id)
+{
+	struct vpe_message msg;
+
+	CDBG("vfe31_send_msg_no_payload\n");
+	msg._d = id;
+	vpe_proc_ops(id, &msg, 0);
+}
+
+static void vpe_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	uint32_t pyaddr = 0, pcbcraddr = 0;
+	uint32_t src_y, src_cbcr, temp;
+
+	struct vpe_isr_queue_cmd_type *qcmd = NULL;
+
+	CDBG("=== vpe_do_tasklet start === \n");
+
+	spin_lock_irqsave(&vpe_ctrl->tasklet_lock, flags);
+	qcmd = list_first_entry(&vpe_ctrl->tasklet_q,
+		struct vpe_isr_queue_cmd_type, list);
+
+	if (!qcmd) {
+		spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags);
+		return;
+	}
+
+	list_del(&qcmd->list);
+	spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags);
+
+	/* interrupt to be processed,  *qcmd has the payload.  */
+	if (qcmd->irq_status & 0x1) {
+		if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_L) {
+			CDBG("vpe left frame done.\n");
+			vpe_ctrl->output_type = 0;
+			CDBG("vpe send out msg.\n");
+			orig_src_y = msm_io_r(vpe_device->vpebase +
+				VPE_SRCP0_ADDR_OFFSET);
+			orig_src_cbcr = msm_io_r(vpe_device->vpebase +
+				VPE_SRCP1_ADDR_OFFSET);
+
+			pyaddr = msm_io_r(vpe_device->vpebase +
+				VPE_OUTP0_ADDR_OFFSET);
+			pcbcraddr = msm_io_r(vpe_device->vpebase +
+				VPE_OUTP1_ADDR_OFFSET);
+			CDBG("%s: out_w = %d, out_h = %d\n", __func__,
+				vpe_ctrl->out_w, vpe_ctrl->out_h);
+
+			if ((vpe_ctrl->frame_pack == TOP_DOWN_FULL) ||
+				(vpe_ctrl->frame_pack == TOP_DOWN_HALF)) {
+				msm_io_w(pyaddr + (vpe_ctrl->out_w *
+					vpe_ctrl->out_h), vpe_device->vpebase +
+					VPE_OUTP0_ADDR_OFFSET);
+				msm_io_w(pcbcraddr + (vpe_ctrl->out_w *
+					vpe_ctrl->out_h/2),
+					vpe_device->vpebase +
+					VPE_OUTP1_ADDR_OFFSET);
+			} else if ((vpe_ctrl->frame_pack ==
+				SIDE_BY_SIDE_HALF) || (vpe_ctrl->frame_pack ==
+				SIDE_BY_SIDE_FULL)) {
+				msm_io_w(pyaddr + vpe_ctrl->out_w,
+					vpe_device->vpebase +
+					VPE_OUTP0_ADDR_OFFSET);
+				msm_io_w(pcbcraddr + vpe_ctrl->out_w,
+					vpe_device->vpebase +
+					VPE_OUTP1_ADDR_OFFSET);
+			} else
+				CDBG("%s: Invalid packing = %d\n", __func__,
+					vpe_ctrl->frame_pack);
+
+			vpe_send_msg_no_payload(MSG_ID_VPE_OUTPUT_ST_L);
+			vpe_ctrl->state = VPE_STATE_INIT;
+			kfree(qcmd);
+			return;
+		} else if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_R) {
+			src_y = orig_src_y;
+			src_cbcr = orig_src_cbcr;
+			CDBG("%s: out_w = %d, out_h = %d\n", __func__,
+				vpe_ctrl->out_w, vpe_ctrl->out_h);
+
+			if ((vpe_ctrl->frame_pack == TOP_DOWN_FULL) ||
+				(vpe_ctrl->frame_pack == TOP_DOWN_HALF)) {
+				pyaddr = msm_io_r(vpe_device->vpebase +
+					VPE_OUTP0_ADDR_OFFSET) -
+					(vpe_ctrl->out_w * vpe_ctrl->out_h);
+			} else if ((vpe_ctrl->frame_pack ==
+				SIDE_BY_SIDE_HALF) || (vpe_ctrl->frame_pack ==
+				SIDE_BY_SIDE_FULL)) {
+				pyaddr = msm_io_r(vpe_device->vpebase +
+				VPE_OUTP0_ADDR_OFFSET) - vpe_ctrl->out_w;
+			} else
+				CDBG("%s: Invalid packing = %d\n", __func__,
+					vpe_ctrl->frame_pack);
+
+			pcbcraddr = vpe_ctrl->pcbcr_before_dis;
+		} else {
+			src_y =	msm_io_r(vpe_device->vpebase +
+				VPE_SRCP0_ADDR_OFFSET);
+			src_cbcr = msm_io_r(vpe_device->vpebase +
+				VPE_SRCP1_ADDR_OFFSET);
+			pyaddr = msm_io_r(vpe_device->vpebase +
+				VPE_OUTP0_ADDR_OFFSET);
+			pcbcraddr = msm_io_r(vpe_device->vpebase +
+				VPE_OUTP1_ADDR_OFFSET);
+		}
+
+		if (vpe_ctrl->dis_en)
+			pcbcraddr = vpe_ctrl->pcbcr_before_dis;
+
+		msm_io_w(src_y,
+				vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
+		msm_io_w(src_cbcr,
+				vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
+
+		temp = msm_io_r(vpe_device->vpebase + VPE_OP_MODE_OFFSET) &
+			0xFFFFFFFC;
+		msm_io_w(temp, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
+
+		/*  now pass this frame to msm_camera.c. */
+		if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_R) {
+			CDBG("vpe send out R msg.\n");
+			vpe_send_outmsg(MSG_ID_VPE_OUTPUT_ST_R, pyaddr,
+				pcbcraddr);
+		} else if (vpe_ctrl->output_type == OUTPUT_TYPE_V) {
+			CDBG("vpe send out V msg.\n");
+			vpe_send_outmsg(MSG_ID_VPE_OUTPUT_V, pyaddr, pcbcraddr);
+		}
+
+		vpe_ctrl->output_type = 0;
+		vpe_ctrl->state = VPE_STATE_INIT;   /* put it back to idle. */
+
+	}
+	kfree(qcmd);
+}
+DECLARE_TASKLET(vpe_tasklet, vpe_do_tasklet, 0);
+
+static irqreturn_t vpe_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	uint32_t irq_status = 0;
+	struct vpe_isr_queue_cmd_type *qcmd;
+
+	CDBG("vpe_parse_irq.\n");
+	/* read and clear back-to-back. */
+	irq_status = msm_io_r_mb(vpe_device->vpebase +
+							VPE_INTR_STATUS_OFFSET);
+	msm_io_w_mb(irq_status, vpe_device->vpebase +
+				VPE_INTR_CLEAR_OFFSET);
+
+	msm_io_w(0, vpe_device->vpebase + VPE_INTR_ENABLE_OFFSET);
+
+	if (irq_status == 0) {
+		pr_err("%s: irq_status = 0,Something is wrong!\n", __func__);
+		return IRQ_HANDLED;
+	}
+	irq_status &= 0x1;
+	/* apply mask. only interested in bit 0.  */
+	if (irq_status) {
+		qcmd = kzalloc(sizeof(struct vpe_isr_queue_cmd_type),
+			GFP_ATOMIC);
+		if (!qcmd) {
+			pr_err("%s: qcmd malloc failed!\n", __func__);
+			return IRQ_HANDLED;
+		}
+		/* must be 0x1 now. so in bottom half we don't really
+		need to check. */
+		qcmd->irq_status = irq_status & 0x1;
+		spin_lock_irqsave(&vpe_ctrl->tasklet_lock, flags);
+		list_add_tail(&qcmd->list, &vpe_ctrl->tasklet_q);
+		spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags);
+		tasklet_schedule(&vpe_tasklet);
+	}
+	return IRQ_HANDLED;
+}
+
+static int vpe_enable_irq(void)
+{
+	uint32_t   rc = 0;
+	rc = request_irq(vpe_device->vpeirq,
+				vpe_parse_irq,
+				IRQF_TRIGGER_HIGH, "vpe", 0);
+	return rc;
+}
+
+int msm_vpe_open(void)
+{
+	int rc = 0;
+
+	CDBG("%s: In \n", __func__);
+
+	vpe_ctrl = kzalloc(sizeof(struct vpe_ctrl_type), GFP_KERNEL);
+	if (!vpe_ctrl) {
+		pr_err("%s: no memory!\n", __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&vpe_ctrl->ops_lock);
+	CDBG("%s: Out\n", __func__);
+
+	return rc;
+}
+
+int msm_vpe_release(void)
+{
+	/* clean up....*/
+	int rc = 0;
+	CDBG("%s: state %d\n", __func__, vpe_ctrl->state);
+	if (vpe_ctrl->state != VPE_STATE_IDLE)
+		rc = vpe_disable();
+
+	kfree(vpe_ctrl);
+	return rc;
+}
+
+
+int vpe_enable(uint32_t clk_rate)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+	/* don't change the order of clock and irq.*/
+	CDBG("%s: enable_clock rate %u\n", __func__, clk_rate);
+	spin_lock_irqsave(&vpe_ctrl->ops_lock, flags);
+	if (vpe_ctrl->state != VPE_STATE_IDLE) {
+		CDBG("%s: VPE already enabled", __func__);
+		spin_unlock_irqrestore(&vpe_ctrl->ops_lock, flags);
+		return 0;
+	}
+	vpe_ctrl->state = VPE_STATE_INIT;
+	spin_unlock_irqrestore(&vpe_ctrl->ops_lock, flags);
+
+	rc = msm_camio_vpe_clk_enable(clk_rate);
+	if (rc < 0) {
+		pr_err("%s: msm_camio_vpe_clk_enable failed", __func__);
+		vpe_ctrl->state = VPE_STATE_IDLE;
+		return rc;
+	}
+
+	CDBG("%s: enable_irq\n", __func__);
+	vpe_enable_irq();
+
+	/* initialize the data structure - lock, queue etc. */
+	spin_lock_init(&vpe_ctrl->tasklet_lock);
+	INIT_LIST_HEAD(&vpe_ctrl->tasklet_q);
+
+	return rc;
+}
+
+int vpe_disable(void)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+	CDBG("%s: called", __func__);
+	spin_lock_irqsave(&vpe_ctrl->ops_lock, flags);
+	if (vpe_ctrl->state == VPE_STATE_IDLE) {
+		CDBG("%s: VPE already disabled", __func__);
+		spin_unlock_irqrestore(&vpe_ctrl->ops_lock, flags);
+		return 0;
+	}
+	vpe_ctrl->state = VPE_STATE_IDLE;
+	spin_unlock_irqrestore(&vpe_ctrl->ops_lock, flags);
+	vpe_ctrl->out_y_addr = msm_io_r(vpe_device->vpebase +
+		VPE_OUTP0_ADDR_OFFSET);
+	vpe_ctrl->out_cbcr_addr = msm_io_r(vpe_device->vpebase +
+		VPE_OUTP1_ADDR_OFFSET);
+	free_irq(vpe_device->vpeirq, 0);
+	tasklet_kill(&vpe_tasklet);
+	rc = msm_camio_vpe_clk_disable();
+	return rc;
+}
+
+static int __msm_vpe_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct resource   *vpemem, *vpeirq, *vpeio;
+	void __iomem      *vpebase;
+
+	/* first allocate */
+
+	vpe_device = &vpe_device_data;
+	memset(vpe_device, 0, sizeof(struct vpe_device_type));
+
+	/* does the device exist? */
+	vpeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!vpeirq) {
+		pr_err("%s: no vpe irq resource.\n", __func__);
+		rc = -ENODEV;
+		goto vpe_free_device;
+	}
+	vpemem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!vpemem) {
+		pr_err("%s: no vpe mem resource!\n", __func__);
+		rc = -ENODEV;
+		goto vpe_free_device;
+	}
+	vpeio = request_mem_region(vpemem->start,
+			resource_size(vpemem), pdev->name);
+	if (!vpeio) {
+		pr_err("%s: VPE region already claimed.\n", __func__);
+		rc = -EBUSY;
+		goto vpe_free_device;
+	}
+
+	vpebase =
+		ioremap(vpemem->start,
+				(vpemem->end - vpemem->start) + 1);
+	if (!vpebase) {
+		pr_err("%s: vpe ioremap failed.\n", __func__);
+		rc = -ENOMEM;
+		goto vpe_release_mem_region;
+	}
+
+	/* Fall through, _probe is successful. */
+	vpe_device->vpeirq = vpeirq->start;
+	vpe_device->vpemem = vpemem;
+	vpe_device->vpeio = vpeio;
+	vpe_device->vpebase = vpebase;
+	return rc;  /* this rc should be zero.*/
+
+	iounmap(vpe_device->vpebase);  /* this path should never occur */
+
+/* from this part it is error handling. */
+vpe_release_mem_region:
+	release_mem_region(vpemem->start, (vpemem->end - vpemem->start) + 1);
+vpe_free_device:
+	return rc;  /* this rc should have error code. */
+}
+
+static int __msm_vpe_remove(struct platform_device *pdev)
+{
+	struct resource	*vpemem;
+	vpemem = vpe_device->vpemem;
+
+	iounmap(vpe_device->vpebase);
+	release_mem_region(vpemem->start,
+					(vpemem->end - vpemem->start) + 1);
+	return 0;
+}
+
+static struct platform_driver msm_vpe_driver = {
+	.probe = __msm_vpe_probe,
+	.remove = __msm_vpe_remove,
+	.driver = {
+		.name = "msm_vpe",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_vpe_init(void)
+{
+	return platform_driver_register(&msm_vpe_driver);
+}
+module_init(msm_vpe_init);
+
+static void __exit msm_vpe_exit(void)
+{
+	platform_driver_unregister(&msm_vpe_driver);
+}
+module_exit(msm_vpe_exit);
+
+MODULE_DESCRIPTION("msm vpe 1.0 driver");
+MODULE_VERSION("msm vpe driver 1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/msm_vpe1.h b/drivers/media/video/msm/msm_vpe1.h
new file mode 100644
index 0000000..ed7112e
--- /dev/null
+++ b/drivers/media/video/msm/msm_vpe1.h
@@ -0,0 +1,253 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _msm_vpe1_h_
+#define _msm_vpe1_h_
+
+#include <mach/camera.h>
+
+/***********  start of register offset *********************/
+#define VPE_INTR_ENABLE_OFFSET                0x0020
+#define VPE_INTR_STATUS_OFFSET                0x0024
+#define VPE_INTR_CLEAR_OFFSET                 0x0028
+#define VPE_DL0_START_OFFSET                  0x0030
+#define VPE_HW_VERSION_OFFSET                 0x0070
+#define VPE_SW_RESET_OFFSET                   0x0074
+#define VPE_AXI_RD_ARB_CONFIG_OFFSET          0x0078
+#define VPE_SEL_CLK_OR_HCLK_TEST_BUS_OFFSET   0x007C
+#define VPE_CGC_EN_OFFSET                     0x0100
+#define VPE_CMD_STATUS_OFFSET                 0x10008
+#define VPE_PROFILE_EN_OFFSET                 0x10010
+#define VPE_PROFILE_COUNT_OFFSET              0x10014
+#define VPE_CMD_MODE_OFFSET                   0x10060
+#define VPE_SRC_SIZE_OFFSET                   0x10108
+#define VPE_SRCP0_ADDR_OFFSET                 0x1010C
+#define VPE_SRCP1_ADDR_OFFSET                 0x10110
+#define VPE_SRC_YSTRIDE1_OFFSET               0x1011C
+#define VPE_SRC_FORMAT_OFFSET                 0x10124
+#define VPE_SRC_UNPACK_PATTERN1_OFFSET        0x10128
+#define VPE_OP_MODE_OFFSET                    0x10138
+#define VPE_SCALE_PHASEX_INIT_OFFSET          0x1013C
+#define VPE_SCALE_PHASEY_INIT_OFFSET          0x10140
+#define VPE_SCALE_PHASEX_STEP_OFFSET          0x10144
+#define VPE_SCALE_PHASEY_STEP_OFFSET          0x10148
+#define VPE_OUT_FORMAT_OFFSET                 0x10150
+#define VPE_OUT_PACK_PATTERN1_OFFSET          0x10154
+#define VPE_OUT_SIZE_OFFSET                   0x10164
+#define VPE_OUTP0_ADDR_OFFSET                 0x10168
+#define VPE_OUTP1_ADDR_OFFSET                 0x1016C
+#define VPE_OUT_YSTRIDE1_OFFSET               0x10178
+#define VPE_OUT_XY_OFFSET                     0x1019C
+#define VPE_SRC_XY_OFFSET                     0x10200
+#define VPE_SRC_IMAGE_SIZE_OFFSET             0x10208
+#define VPE_SCALE_CONFIG_OFFSET               0x10230
+#define VPE_DEINT_STATUS_OFFSET               0x30000
+#define VPE_DEINT_DECISION_OFFSET             0x30004
+#define VPE_DEINT_COEFF0_OFFSET               0x30010
+#define VPE_SCALE_STATUS_OFFSET               0x50000
+#define VPE_SCALE_SVI_PARAM_OFFSET            0x50010
+#define VPE_SCALE_SHARPEN_CFG_OFFSET          0x50020
+#define VPE_SCALE_COEFF_LSP_0_OFFSET          0x50400
+#define VPE_SCALE_COEFF_MSP_0_OFFSET          0x50404
+
+#define VPE_AXI_ARB_2_OFFSET                  0x004C
+
+#define VPE_SCALE_COEFF_LSBn(n)	(0x50400 + 8 * (n))
+#define VPE_SCALE_COEFF_MSBn(n)	(0x50404 + 8 * (n))
+#define VPE_SCALE_COEFF_NUM			32
+
+/*********** end of register offset ********************/
+
+
+#define VPE_HARDWARE_VERSION          0x00080308
+#define VPE_SW_RESET_VALUE            0x00000010  /* bit 4 for PPP*/
+#define VPE_AXI_RD_ARB_CONFIG_VALUE   0x124924
+#define VPE_CMD_MODE_VALUE        0x1
+#define VPE_DEFAULT_OP_MODE_VALUE     0x40FC0004
+#define VPE_CGC_ENABLE_VALUE          0xffff
+#define VPE_DEFAULT_SCALE_CONFIG      0x3c
+
+#define VPE_NORMAL_MODE_CLOCK_RATE   150000000
+#define VPE_TURBO_MODE_CLOCK_RATE   200000000
+/**************************************************/
+/*********** Start of command id ******************/
+/**************************************************/
+enum VPE_CMD_ID_ENUM {
+	VPE_DUMMY_0 = 0,
+	VPE_SET_CLK,
+	VPE_RESET,
+	VPE_START,
+	VPE_ABORT,
+	VPE_OPERATION_MODE_CFG, /* 5 */
+	VPE_INPUT_PLANE_CFG,
+	VPE_OUTPUT_PLANE_CFG,
+	VPE_INPUT_PLANE_UPDATE,
+	VPE_SCALE_CFG_TYPE,
+	VPE_ROTATION_CFG_TYPE, /* 10 */
+	VPE_AXI_OUT_CFG,
+	VPE_CMD_DIS_OFFSET_CFG,
+	VPE_ENABLE,
+	VPE_DISABLE,
+};
+
+/* Length of each command.  In bytes.  (payload only) */
+#define VPE_OPERATION_MODE_CFG_LEN 8
+#define VPE_INPUT_PLANE_CFG_LEN    24
+#define VPE_OUTPUT_PLANE_CFG_LEN   20
+#define VPE_INPUT_PLANE_UPDATE_LEN 12
+#define VPE_SCALER_CONFIG_LEN      260
+#define VPE_DIS_OFFSET_CFG_LEN     12
+/**************************************************/
+/*********** End of command id ********************/
+/**************************************************/
+
+struct msm_vpe_cmd {
+	int32_t  id;
+	uint16_t length;
+	void     *value;
+};
+
+struct vpe_cmd_type {
+	uint16_t id;
+	uint32_t length;
+};
+
+struct vpe_isr_queue_cmd_type {
+	struct list_head            list;
+	uint32_t                    irq_status;
+};
+
+enum VPE_MESSAGE_ID {
+	MSG_ID_VPE_OUTPUT_V = 7, /* To match with that of VFE */
+	MSG_ID_VPE_OUTPUT_ST_L,
+	MSG_ID_VPE_OUTPUT_ST_R,
+};
+
+enum vpe_state {
+	VPE_STATE_IDLE,
+	VPE_STATE_INIT,
+	VPE_STATE_ACTIVE,
+};
+
+struct vpe_device_type {
+	/* device related. */
+	int   vpeirq;
+	void __iomem      *vpebase;
+	struct resource	  *vpemem;
+	struct resource   *vpeio;
+	void        *device_extdata;
+};
+
+struct dis_offset_type {
+	int32_t dis_offset_x;
+	int32_t dis_offset_y;
+	uint32_t frame_id;
+};
+
+struct vpe_ctrl_type {
+	spinlock_t        tasklet_lock;
+	spinlock_t        state_lock;
+	spinlock_t        ops_lock;
+
+	struct list_head  tasklet_q;
+	void              *syncdata;
+	uint16_t          op_mode;
+	void              *extdata;
+	uint32_t          extlen;
+	struct msm_vpe_callback *resp;
+	uint32_t          in_h_w;
+	uint32_t          out_h;  /* this is BEFORE rotation. */
+	uint32_t          out_w;  /* this is BEFORE rotation. */
+	uint32_t          dis_en;
+	struct timespec   ts;
+	struct dis_offset_type   dis_offset;
+	uint32_t          pcbcr_before_dis;
+	uint32_t          pcbcr_dis_offset;
+	int               output_type;
+	int               frame_pack;
+	uint8_t           pad_2k_bool;
+	enum vpe_state    state;
+	unsigned long     out_y_addr;
+	unsigned long     out_cbcr_addr;
+};
+
+/*
+* vpe_input_update
+*
+* Define the parameters for output plane
+*/
+/* this is the dimension of ROI.  width / height. */
+struct vpe_src_size_packed {
+	uint32_t        src_w;
+	uint32_t        src_h;
+};
+
+struct vpe_src_xy_packed {
+	uint32_t        src_x;
+	uint32_t        src_y;
+};
+
+struct vpe_input_plane_update_type {
+	struct vpe_src_size_packed             src_roi_size;
+	/* DIS updates this set. */
+	struct vpe_src_xy_packed               src_roi_offset;
+	/* input address*/
+	uint8_t                         *src_p0_addr;
+	uint8_t                         *src_p1_addr;
+};
+
+struct vpe_msg_stats{
+	uint32_t    buffer;
+	uint32_t    frameCounter;
+};
+
+struct vpe_msg_output {
+	uint8_t   output_id;
+	uint32_t  yBuffer;
+	uint32_t  cbcrBuffer;
+	uint32_t  frameCounter;
+};
+
+struct vpe_message {
+	uint8_t  _d;
+	union {
+		struct vpe_msg_output              msgOut;
+		struct vpe_msg_stats               msgStats;
+	} _u;
+};
+
+#define SCALER_PHASE_BITS 29
+#define HAL_MDP_PHASE_STEP_2P50    0x50000000
+#define HAL_MDP_PHASE_STEP_1P66    0x35555555
+#define HAL_MDP_PHASE_STEP_1P25    0x28000000
+
+struct phase_val_t {
+	int32_t phase_init_x;
+	int32_t phase_init_y;
+	int32_t phase_step_x;
+	int32_t phase_step_y;
+};
+
+extern struct vpe_ctrl_type *vpe_ctrl;
+
+int msm_vpe_open(void);
+int msm_vpe_release(void);
+int msm_vpe_reg(struct msm_vpe_callback *presp);
+void msm_send_frame_to_vpe(uint32_t pyaddr, uint32_t pcbcraddr,
+	struct timespec *ts, int output_id);
+int msm_vpe_config(struct msm_vpe_cfg_cmd *cmd, void *data);
+int msm_vpe_cfg_update(void *pinfo);
+void msm_vpe_offset_update(int frame_pack, uint32_t pyaddr, uint32_t pcbcraddr,
+	struct timespec *ts, int output_id, struct msm_st_half st_half,
+	int frameid);
+#endif /*_msm_vpe1_h_*/
+
diff --git a/drivers/media/video/msm/mt9d112.c b/drivers/media/video/msm/mt9d112.c
new file mode 100644
index 0000000..a7b5156
--- /dev/null
+++ b/drivers/media/video/msm/mt9d112.c
@@ -0,0 +1,845 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include "mt9d112.h"
+
+/* Micron MT9D112 Registers and their values */
+/* Sensor Core Registers */
+#define  REG_MT9D112_MODEL_ID 0x3000
+#define  MT9D112_MODEL_ID     0x1580
+
+/*  SOC Registers Page 1  */
+#define  REG_MT9D112_SENSOR_RESET     0x301A
+#define  REG_MT9D112_STANDBY_CONTROL  0x3202
+#define  REG_MT9D112_MCU_BOOT         0x3386
+
+#define SENSOR_DEBUG 0
+
+struct mt9d112_work {
+	struct work_struct work;
+};
+
+static struct  mt9d112_work *mt9d112_sensorw;
+static struct  i2c_client *mt9d112_client;
+
+struct mt9d112_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+};
+
+
+static struct mt9d112_ctrl *mt9d112_ctrl;
+
+static DECLARE_WAIT_QUEUE_HEAD(mt9d112_wait_queue);
+DEFINE_SEMAPHORE(mt9d112_sem);
+static int16_t mt9d112_effect = CAMERA_EFFECT_OFF;
+
+/*=============================================================
+	EXTERNAL DECLARATIONS
+==============================================================*/
+extern struct mt9d112_reg mt9d112_regs;
+
+
+/*=============================================================*/
+
+static int mt9d112_reset(const struct msm_camera_sensor_info *dev)
+{
+	int rc = 0;
+
+	rc = gpio_request(dev->sensor_reset, "mt9d112");
+
+	if (!rc) {
+		rc = gpio_direction_output(dev->sensor_reset, 0);
+		msleep(20);
+		gpio_set_value_cansleep(dev->sensor_reset, 1);
+		msleep(20);
+	}
+
+	return rc;
+}
+
+static int32_t mt9d112_i2c_txdata(unsigned short saddr,
+	unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+
+#if SENSOR_DEBUG
+	if (length == 2)
+		CDBG("msm_io_i2c_w: 0x%04x 0x%04x\n",
+			*(u16 *) txdata, *(u16 *) (txdata + 2));
+	else if (length == 4)
+		CDBG("msm_io_i2c_w: 0x%04x\n", *(u16 *) txdata);
+	else
+		CDBG("msm_io_i2c_w: length = %d\n", length);
+#endif
+	if (i2c_transfer(mt9d112_client->adapter, msg, 1) < 0) {
+		CDBG("mt9d112_i2c_txdata failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9d112_i2c_write(unsigned short saddr,
+	unsigned short waddr, unsigned short wdata, enum mt9d112_width width)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	switch (width) {
+	case WORD_LEN: {
+		buf[0] = (waddr & 0xFF00)>>8;
+		buf[1] = (waddr & 0x00FF);
+		buf[2] = (wdata & 0xFF00)>>8;
+		buf[3] = (wdata & 0x00FF);
+
+		rc = mt9d112_i2c_txdata(saddr, buf, 4);
+	}
+		break;
+
+	case BYTE_LEN: {
+		buf[0] = waddr;
+		buf[1] = wdata;
+		rc = mt9d112_i2c_txdata(saddr, buf, 2);
+	}
+		break;
+
+	default:
+		break;
+	}
+
+	if (rc < 0)
+		CDBG(
+		"i2c_write failed, addr = 0x%x, val = 0x%x!\n",
+		waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9d112_i2c_write_table(
+	struct mt9d112_i2c_reg_conf const *reg_conf_tbl,
+	int num_of_items_in_table)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num_of_items_in_table; i++) {
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			reg_conf_tbl->waddr, reg_conf_tbl->wdata,
+			reg_conf_tbl->width);
+		if (rc < 0)
+			break;
+		if (reg_conf_tbl->mdelay_time != 0)
+			mdelay(reg_conf_tbl->mdelay_time);
+		reg_conf_tbl++;
+	}
+
+	return rc;
+}
+
+static int mt9d112_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+	{
+		.addr   = saddr,
+		.flags = 0,
+		.len   = 2,
+		.buf   = rxdata,
+	},
+	{
+		.addr   = saddr,
+		.flags = I2C_M_RD,
+		.len   = length,
+		.buf   = rxdata,
+	},
+	};
+
+#if SENSOR_DEBUG
+	if (length == 2)
+		CDBG("msm_io_i2c_r: 0x%04x 0x%04x\n",
+			*(u16 *) rxdata, *(u16 *) (rxdata + 2));
+	else if (length == 4)
+		CDBG("msm_io_i2c_r: 0x%04x\n", *(u16 *) rxdata);
+	else
+		CDBG("msm_io_i2c_r: length = %d\n", length);
+#endif
+
+	if (i2c_transfer(mt9d112_client->adapter, msgs, 2) < 0) {
+		CDBG("mt9d112_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9d112_i2c_read(unsigned short   saddr,
+	unsigned short raddr, unsigned short *rdata, enum mt9d112_width width)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	switch (width) {
+	case WORD_LEN: {
+		buf[0] = (raddr & 0xFF00)>>8;
+		buf[1] = (raddr & 0x00FF);
+
+		rc = mt9d112_i2c_rxdata(saddr, buf, 2);
+		if (rc < 0)
+			return rc;
+
+		*rdata = buf[0] << 8 | buf[1];
+	}
+		break;
+
+	default:
+		break;
+	}
+
+	if (rc < 0)
+		CDBG("mt9d112_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int32_t mt9d112_set_lens_roll_off(void)
+{
+	int32_t rc = 0;
+	rc = mt9d112_i2c_write_table(&mt9d112_regs.rftbl[0],
+								 mt9d112_regs.rftbl_size);
+	return rc;
+}
+
+static long mt9d112_reg_init(void)
+{
+	int32_t array_length;
+	int32_t i;
+	long rc;
+
+	/* PLL Setup Start */
+	rc = mt9d112_i2c_write_table(&mt9d112_regs.plltbl[0],
+					mt9d112_regs.plltbl_size);
+
+	if (rc < 0)
+		return rc;
+	/* PLL Setup End   */
+
+	array_length = mt9d112_regs.prev_snap_reg_settings_size;
+
+	/* Configure sensor for Preview mode and Snapshot mode */
+	for (i = 0; i < array_length; i++) {
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+		  mt9d112_regs.prev_snap_reg_settings[i].register_address,
+		  mt9d112_regs.prev_snap_reg_settings[i].register_value,
+		  WORD_LEN);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	/* Configure for Noise Reduction, Saturation and Aperture Correction */
+	array_length = mt9d112_regs.noise_reduction_reg_settings_size;
+
+	for (i = 0; i < array_length; i++) {
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			mt9d112_regs.noise_reduction_reg_settings[i].register_address,
+			mt9d112_regs.noise_reduction_reg_settings[i].register_value,
+			WORD_LEN);
+
+		if (rc < 0)
+			return rc;
+	}
+
+	/* Set Color Kill Saturation point to optimum value */
+	rc =
+	mt9d112_i2c_write(mt9d112_client->addr,
+	0x35A4,
+	0x0593,
+	WORD_LEN);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d112_i2c_write_table(&mt9d112_regs.stbl[0],
+					mt9d112_regs.stbl_size);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d112_set_lens_roll_off();
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static long mt9d112_set_effect(int mode, int effect)
+{
+	uint16_t reg_addr;
+	uint16_t reg_val;
+	long rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		/* Context A Special Effects */
+		reg_addr = 0x2799;
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+	case SENSOR_SNAPSHOT_MODE:
+		/* Context B Special Effects */
+		reg_addr = 0x279B;
+		break;
+
+	default:
+		reg_addr = 0x2799;
+		break;
+	}
+
+	switch (effect) {
+	case CAMERA_EFFECT_OFF: {
+		reg_val = 0x6440;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+	}
+			break;
+
+	case CAMERA_EFFECT_MONO: {
+		reg_val = 0x6441;
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+	}
+		break;
+
+	case CAMERA_EFFECT_NEGATIVE: {
+		reg_val = 0x6443;
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+	}
+		break;
+
+	case CAMERA_EFFECT_SOLARIZE: {
+		reg_val = 0x6445;
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+	}
+		break;
+
+	case CAMERA_EFFECT_SEPIA: {
+		reg_val = 0x6442;
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+	}
+		break;
+
+	default: {
+		reg_val = 0x6440;
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x338C, reg_addr, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, reg_val, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		return -EINVAL;
+	}
+	}
+	mt9d112_effect = effect;
+	/* Refresh Sequencer */
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		0x338C, 0xA103, WORD_LEN);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		0x3390, 0x0005, WORD_LEN);
+
+	return rc;
+}
+
+static long mt9d112_set_sensor_mode(int mode)
+{
+	uint16_t clock;
+	long rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA20C, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0004, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA215, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0004, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA20B, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0000, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		clock = 0x23C;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x341C, clock, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA103, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0001, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		mdelay(5);
+
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		/* Switch to lower fps for Snapshot */
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x341C, 0x0120, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA120, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		msleep(40);/*waiting for the delay of one frame*/
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0002, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		msleep(80);/*waiting for the delay of two frames*/
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA103, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		msleep(40);/*waiting for the delay of one frame*/
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0002, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		/* Setting the effect to CAMERA_EFFECT_OFF */
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0x279B, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+			0x3390, 0x6440, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		msleep(40);/*waiting for the delay of one frame*/
+		/* Switch to lower fps for Snapshot */
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x341C, 0x0120, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA120, WORD_LEN);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0002, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		msleep(80);/*waiting for the delay of two frames frame*/
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x338C, 0xA103, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		msleep(40);/*waiting for the delay of one frame*/
+		rc =
+			mt9d112_i2c_write(mt9d112_client->addr,
+				0x3390, 0x0002, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mt9d112_sensor_init_probe(const struct msm_camera_sensor_info *data)
+{
+	uint16_t model_id = 0;
+	int rc = 0;
+
+	CDBG("init entry \n");
+	rc = mt9d112_reset(data);
+	if (rc < 0) {
+		CDBG("reset failed!\n");
+		goto init_probe_fail;
+	}
+
+	msm_camio_clk_rate_set(24000000);
+	msleep(20);
+
+	/* Micron suggested Power up block Start:
+	* Put MCU into Reset - Stop MCU */
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		REG_MT9D112_MCU_BOOT, 0x0501, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	/* Pull MCU from Reset - Start MCU */
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		REG_MT9D112_MCU_BOOT, 0x0500, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	mdelay(5);
+
+	/* Micron Suggested - Power up block */
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		REG_MT9D112_SENSOR_RESET, 0x0ACC, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		REG_MT9D112_STANDBY_CONTROL, 0x0008, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	/* FUSED_DEFECT_CORRECTION */
+	rc = mt9d112_i2c_write(mt9d112_client->addr,
+		0x33F4, 0x031D, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	mdelay(5);
+
+	/* Micron suggested Power up block End */
+	/* Read the Model ID of the sensor */
+	rc = mt9d112_i2c_read(mt9d112_client->addr,
+		REG_MT9D112_MODEL_ID, &model_id, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	CDBG("mt9d112 model_id = 0x%x\n", model_id);
+
+	/* Check if it matches it with the value in Datasheet */
+	if (model_id != MT9D112_MODEL_ID) {
+		rc = -EINVAL;
+		goto init_probe_fail;
+	}
+
+	rc = mt9d112_reg_init();
+	if (rc < 0)
+		goto init_probe_fail;
+
+	return rc;
+
+init_probe_fail:
+	return rc;
+}
+
+int mt9d112_sensor_init(const struct msm_camera_sensor_info *data)
+{
+	int rc = 0;
+
+	mt9d112_ctrl = kzalloc(sizeof(struct mt9d112_ctrl), GFP_KERNEL);
+	if (!mt9d112_ctrl) {
+		CDBG("mt9d112_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	if (data)
+		mt9d112_ctrl->sensordata = data;
+
+	/* Input MCLK = 24MHz */
+	msm_camio_clk_rate_set(24000000);
+	mdelay(5);
+
+	msm_camio_camif_pad_reg_reset();
+
+	rc = mt9d112_sensor_init_probe(data);
+	if (rc < 0) {
+		CDBG("mt9d112_sensor_init failed!\n");
+		goto init_fail;
+	}
+
+init_done:
+	return rc;
+
+init_fail:
+	kfree(mt9d112_ctrl);
+	return rc;
+}
+
+static int mt9d112_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9d112_wait_queue);
+	return 0;
+}
+
+int mt9d112_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cfg_data;
+	long   rc = 0;
+
+	if (copy_from_user(&cfg_data,
+			(void *)argp,
+			sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	/* down(&mt9d112_sem); */
+
+	CDBG("mt9d112_ioctl, cfgtype = %d, mode = %d\n",
+		cfg_data.cfgtype, cfg_data.mode);
+
+		switch (cfg_data.cfgtype) {
+		case CFG_SET_MODE:
+			rc = mt9d112_set_sensor_mode(
+						cfg_data.mode);
+			break;
+
+		case CFG_SET_EFFECT:
+			rc = mt9d112_set_effect(cfg_data.mode,
+						cfg_data.cfg.effect);
+			break;
+
+		case CFG_GET_AF_MAX_STEPS:
+		default:
+			rc = -EINVAL;
+			break;
+		}
+
+	/* up(&mt9d112_sem); */
+
+	return rc;
+}
+
+int mt9d112_sensor_release(void)
+{
+	int rc = 0;
+
+	/* down(&mt9d112_sem); */
+	gpio_set_value_cansleep(mt9d112_ctrl->sensordata->sensor_reset, 0);
+	msleep(20);
+	gpio_free(mt9d112_ctrl->sensordata->sensor_reset);
+	kfree(mt9d112_ctrl);
+	/* up(&mt9d112_sem); */
+
+	return rc;
+}
+
+static int mt9d112_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		rc = -ENOTSUPP;
+		goto probe_failure;
+	}
+
+	mt9d112_sensorw =
+		kzalloc(sizeof(struct mt9d112_work), GFP_KERNEL);
+
+	if (!mt9d112_sensorw) {
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9d112_sensorw);
+	mt9d112_init_client(client);
+	mt9d112_client = client;
+
+	CDBG("mt9d112_probe succeeded!\n");
+
+	return 0;
+
+probe_failure:
+	kfree(mt9d112_sensorw);
+	mt9d112_sensorw = NULL;
+	CDBG("mt9d112_probe failed!\n");
+	return rc;
+}
+
+static const struct i2c_device_id mt9d112_i2c_id[] = {
+	{ "mt9d112", 0},
+	{ },
+};
+
+static struct i2c_driver mt9d112_i2c_driver = {
+	.id_table = mt9d112_i2c_id,
+	.probe  = mt9d112_i2c_probe,
+	.remove = __exit_p(mt9d112_i2c_remove),
+	.driver = {
+		.name = "mt9d112",
+	},
+};
+
+static int mt9d112_sensor_probe(const struct msm_camera_sensor_info *info,
+				struct msm_sensor_ctrl *s)
+{
+	int rc = i2c_add_driver(&mt9d112_i2c_driver);
+	if (rc < 0 || mt9d112_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	/* Input MCLK = 24MHz */
+	msm_camio_clk_rate_set(24000000);
+	mdelay(5);
+
+	rc = mt9d112_sensor_init_probe(info);
+	if (rc < 0) {
+		gpio_free(info->sensor_reset);
+		goto probe_done;
+	}
+	s->s_init = mt9d112_sensor_init;
+	s->s_release = mt9d112_sensor_release;
+	s->s_config  = mt9d112_sensor_config;
+	s->s_camera_type = FRONT_CAMERA_2D;
+	s->s_mount_angle  = 0;
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	msleep(20);
+	gpio_free(info->sensor_reset);
+
+probe_done:
+	CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__);
+	return rc;
+}
+
+static int __mt9d112_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9d112_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9d112_probe,
+	.driver = {
+		.name = "msm_camera_mt9d112",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init mt9d112_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9d112_init);
diff --git a/drivers/media/video/msm/mt9d112.h b/drivers/media/video/msm/mt9d112.h
new file mode 100644
index 0000000..309fcec
--- /dev/null
+++ b/drivers/media/video/msm/mt9d112.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9D112_H
+#define MT9D112_H
+
+#include <linux/types.h>
+#include <mach/camera.h>
+
+extern struct mt9d112_reg mt9d112_regs;
+
+enum mt9d112_width {
+	WORD_LEN,
+	BYTE_LEN
+};
+
+struct mt9d112_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+	enum mt9d112_width width;
+	unsigned short mdelay_time;
+};
+
+struct mt9d112_reg {
+	const struct register_address_value_pair *prev_snap_reg_settings;
+	uint16_t prev_snap_reg_settings_size;
+	const struct register_address_value_pair *noise_reduction_reg_settings;
+	uint16_t noise_reduction_reg_settings_size;
+	const struct mt9d112_i2c_reg_conf *plltbl;
+	uint16_t plltbl_size;
+	const struct mt9d112_i2c_reg_conf *stbl;
+	uint16_t stbl_size;
+	const struct mt9d112_i2c_reg_conf *rftbl;
+	uint16_t rftbl_size;
+};
+
+#endif /* MT9D112_H */
diff --git a/drivers/media/video/msm/mt9d112_reg.c b/drivers/media/video/msm/mt9d112_reg.c
new file mode 100644
index 0000000..24edaf2
--- /dev/null
+++ b/drivers/media/video/msm/mt9d112_reg.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mt9d112.h"
+
+
+struct register_address_value_pair const
+preview_snapshot_mode_reg_settings_array[] = {
+	{0x338C, 0x2703},
+	{0x3390, 800},    /* Output Width (P) = 640 */
+	{0x338C, 0x2705},
+	{0x3390, 600},    /* Output Height (P) = 480 */
+	{0x338C, 0x2707},
+	{0x3390, 0x0640}, /* Output Width (S) = 1600 */
+	{0x338C, 0x2709},
+	{0x3390, 0x04B0}, /* Output Height (S) = 1200 */
+	{0x338C, 0x270D},
+	{0x3390, 0x0000}, /* Row Start (P) = 0 */
+	{0x338C, 0x270F},
+	{0x3390, 0x0000}, /* Column Start (P) = 0 */
+	{0x338C, 0x2711},
+	{0x3390, 0x04BD}, /* Row End (P) = 1213 */
+	{0x338C, 0x2713},
+	{0x3390, 0x064D}, /* Column End (P) = 1613 */
+	{0x338C, 0x2715},
+	{0x3390, 0x0000}, /* Extra Delay (P) = 0 */
+	{0x338C, 0x2717},
+	{0x3390, 0x2111}, /* Row Speed (P) = 8465 */
+	{0x338C, 0x2719},
+	{0x3390, 0x046C}, /* Read Mode (P) = 1132 */
+	{0x338C, 0x271B},
+	{0x3390, 0x024F}, /* Sensor_Sample_Time_pck(P) = 591 */
+	{0x338C, 0x271D},
+	{0x3390, 0x0102}, /* Sensor_Fine_Correction(P) = 258 */
+	{0x338C, 0x271F},
+	{0x3390, 0x0279}, /* Sensor_Fine_IT_min(P) = 633 */
+	{0x338C, 0x2721},
+	{0x3390, 0x0155}, /* Sensor_Fine_IT_max_margin(P) = 341 */
+	{0x338C, 0x2723},
+	{0x3390, 659},    /* Frame Lines (P) = 679 */
+	{0x338C, 0x2725},
+	{0x3390, 0x061B}, /* Line Length (P) = 1563 */
+	{0x338C, 0x2727},
+	{0x3390, 0x2020},
+	{0x338C, 0x2729},
+	{0x3390, 0x2020},
+	{0x338C, 0x272B},
+	{0x3390, 0x1020},
+	{0x338C, 0x272D},
+	{0x3390, 0x2007},
+	{0x338C, 0x272F},
+	{0x3390, 0x0004}, /* Row Start(S) = 4 */
+	{0x338C, 0x2731},
+	{0x3390, 0x0004}, /* Column Start(S) = 4 */
+	{0x338C, 0x2733},
+	{0x3390, 0x04BB}, /* Row End(S) = 1211 */
+	{0x338C, 0x2735},
+	{0x3390, 0x064B}, /* Column End(S) = 1611 */
+	{0x338C, 0x2737},
+	{0x3390, 0x04CE}, /* Extra Delay(S) = 1230 */
+	{0x338C, 0x2739},
+	{0x3390, 0x2111}, /* Row Speed(S) = 8465 */
+	{0x338C, 0x273B},
+	{0x3390, 0x0024}, /* Read Mode(S) = 36 */
+	{0x338C, 0x273D},
+	{0x3390, 0x0120}, /* Sensor sample time pck(S) = 288 */
+	{0x338C, 0x2741},
+	{0x3390, 0x0169}, /* Sensor_Fine_IT_min(P) = 361 */
+	{0x338C, 0x2745},
+	{0x3390, 0x04FF}, /* Frame Lines(S) = 1279 */
+	{0x338C, 0x2747},
+	{0x3390, 0x0824}, /* Line Length(S) = 2084 */
+	{0x338C, 0x2751},
+	{0x3390, 0x0000}, /* Crop_X0(P) = 0 */
+	{0x338C, 0x2753},
+	{0x3390, 0x0320}, /* Crop_X1(P) = 800 */
+	{0x338C, 0x2755},
+	{0x3390, 0x0000}, /* Crop_Y0(P) = 0 */
+	{0x338C, 0x2757},
+	{0x3390, 0x0258}, /* Crop_Y1(P) = 600 */
+	{0x338C, 0x275F},
+	{0x3390, 0x0000}, /* Crop_X0(S) = 0 */
+	{0x338C, 0x2761},
+	{0x3390, 0x0640}, /* Crop_X1(S) = 1600 */
+	{0x338C, 0x2763},
+	{0x3390, 0x0000}, /* Crop_Y0(S) = 0 */
+	{0x338C, 0x2765},
+	{0x3390, 0x04B0}, /* Crop_Y1(S) = 1200 */
+	{0x338C, 0x222E},
+	{0x3390, 0x00A0}, /* R9 Step = 160 */
+	{0x338C, 0xA408},
+	{0x3390, 0x001F},
+	{0x338C, 0xA409},
+	{0x3390, 0x0021},
+	{0x338C, 0xA40A},
+	{0x3390, 0x0025},
+	{0x338C, 0xA40B},
+	{0x3390, 0x0027},
+	{0x338C, 0x2411},
+	{0x3390, 0x00A0},
+	{0x338C, 0x2413},
+	{0x3390, 0x00C0},
+	{0x338C, 0x2415},
+	{0x3390, 0x00A0},
+	{0x338C, 0x2417},
+	{0x3390, 0x00C0},
+	{0x338C, 0x2799},
+	{0x3390, 0x6408}, /* MODE_SPEC_EFFECTS(P) */
+	{0x338C, 0x279B},
+	{0x3390, 0x6408}, /* MODE_SPEC_EFFECTS(S) */
+};
+
+static struct register_address_value_pair const
+noise_reduction_reg_settings_array[] = {
+	{0x338C, 0xA76D},
+	{0x3390, 0x0003},
+	{0x338C, 0xA76E},
+	{0x3390, 0x0003},
+	{0x338C, 0xA76F},
+	{0x3390, 0},
+	{0x338C, 0xA770},
+	{0x3390, 21},
+	{0x338C, 0xA771},
+	{0x3390, 37},
+	{0x338C, 0xA772},
+	{0x3390, 63},
+	{0x338C, 0xA773},
+	{0x3390, 100},
+	{0x338C, 0xA774},
+	{0x3390, 128},
+	{0x338C, 0xA775},
+	{0x3390, 151},
+	{0x338C, 0xA776},
+	{0x3390, 169},
+	{0x338C, 0xA777},
+	{0x3390, 186},
+	{0x338C, 0xA778},
+	{0x3390, 199},
+	{0x338C, 0xA779},
+	{0x3390, 210},
+	{0x338C, 0xA77A},
+	{0x3390, 220},
+	{0x338C, 0xA77B},
+	{0x3390, 228},
+	{0x338C, 0xA77C},
+	{0x3390, 234},
+	{0x338C, 0xA77D},
+	{0x3390, 240},
+	{0x338C, 0xA77E},
+	{0x3390, 244},
+	{0x338C, 0xA77F},
+	{0x3390, 248},
+	{0x338C, 0xA780},
+	{0x3390, 252},
+	{0x338C, 0xA781},
+	{0x3390, 255},
+	{0x338C, 0xA782},
+	{0x3390, 0},
+	{0x338C, 0xA783},
+	{0x3390, 21},
+	{0x338C, 0xA784},
+	{0x3390, 37},
+	{0x338C, 0xA785},
+	{0x3390, 63},
+	{0x338C, 0xA786},
+	{0x3390, 100},
+	{0x338C, 0xA787},
+	{0x3390, 128},
+	{0x338C, 0xA788},
+	{0x3390, 151},
+	{0x338C, 0xA789},
+	{0x3390, 169},
+	{0x338C, 0xA78A},
+	{0x3390, 186},
+	{0x338C, 0xA78B},
+	{0x3390, 199},
+	{0x338C, 0xA78C},
+	{0x3390, 210},
+	{0x338C, 0xA78D},
+	{0x3390, 220},
+	{0x338C, 0xA78E},
+	{0x3390, 228},
+	{0x338C, 0xA78F},
+	{0x3390, 234},
+	{0x338C, 0xA790},
+	{0x3390, 240},
+	{0x338C, 0xA791},
+	{0x3390, 244},
+	{0x338C, 0xA793},
+	{0x3390, 252},
+	{0x338C, 0xA794},
+	{0x3390, 255},
+	{0x338C, 0xA103},
+	{0x3390, 6},
+};
+
+static const struct mt9d112_i2c_reg_conf const lens_roll_off_tbl[] = {
+	{ 0x34CE, 0x81A0, WORD_LEN, 0 },
+	{ 0x34D0, 0x6331, WORD_LEN, 0 },
+	{ 0x34D2, 0x3394, WORD_LEN, 0 },
+	{ 0x34D4, 0x9966, WORD_LEN, 0 },
+	{ 0x34D6, 0x4B25, WORD_LEN, 0 },
+	{ 0x34D8, 0x2670, WORD_LEN, 0 },
+	{ 0x34DA, 0x724C, WORD_LEN, 0 },
+	{ 0x34DC, 0xFFFD, WORD_LEN, 0 },
+	{ 0x34DE, 0x00CA, WORD_LEN, 0 },
+	{ 0x34E6, 0x00AC, WORD_LEN, 0 },
+	{ 0x34EE, 0x0EE1, WORD_LEN, 0 },
+	{ 0x34F6, 0x0D87, WORD_LEN, 0 },
+	{ 0x3500, 0xE1F7, WORD_LEN, 0 },
+	{ 0x3508, 0x1CF4, WORD_LEN, 0 },
+	{ 0x3510, 0x1D28, WORD_LEN, 0 },
+	{ 0x3518, 0x1F26, WORD_LEN, 0 },
+	{ 0x3520, 0x2220, WORD_LEN, 0 },
+	{ 0x3528, 0x333D, WORD_LEN, 0 },
+	{ 0x3530, 0x15D9, WORD_LEN, 0 },
+	{ 0x3538, 0xCFB8, WORD_LEN, 0 },
+	{ 0x354C, 0x05FE, WORD_LEN, 0 },
+	{ 0x3544, 0x05F8, WORD_LEN, 0 },
+	{ 0x355C, 0x0596, WORD_LEN, 0 },
+	{ 0x3554, 0x0611, WORD_LEN, 0 },
+	{ 0x34E0, 0x00F2, WORD_LEN, 0 },
+	{ 0x34E8, 0x00A8, WORD_LEN, 0 },
+	{ 0x34F0, 0x0F7B, WORD_LEN, 0 },
+	{ 0x34F8, 0x0CD7, WORD_LEN, 0 },
+	{ 0x3502, 0xFEDB, WORD_LEN, 0 },
+	{ 0x350A, 0x13E4, WORD_LEN, 0 },
+	{ 0x3512, 0x1F2C, WORD_LEN, 0 },
+	{ 0x351A, 0x1D20, WORD_LEN, 0 },
+	{ 0x3522, 0x2422, WORD_LEN, 0 },
+	{ 0x352A, 0x2925, WORD_LEN, 0 },
+	{ 0x3532, 0x1D04, WORD_LEN, 0 },
+	{ 0x353A, 0xFBF2, WORD_LEN, 0 },
+	{ 0x354E, 0x0616, WORD_LEN, 0 },
+	{ 0x3546, 0x0597, WORD_LEN, 0 },
+	{ 0x355E, 0x05CD, WORD_LEN, 0 },
+	{ 0x3556, 0x0529, WORD_LEN, 0 },
+	{ 0x34E4, 0x00B2, WORD_LEN, 0 },
+	{ 0x34EC, 0x005E, WORD_LEN, 0 },
+	{ 0x34F4, 0x0F43, WORD_LEN, 0 },
+	{ 0x34FC, 0x0E2F, WORD_LEN, 0 },
+	{ 0x3506, 0xF9FC, WORD_LEN, 0 },
+	{ 0x350E, 0x0CE4, WORD_LEN, 0 },
+	{ 0x3516, 0x1E1E, WORD_LEN, 0 },
+	{ 0x351E, 0x1B19, WORD_LEN, 0 },
+	{ 0x3526, 0x151B, WORD_LEN, 0 },
+	{ 0x352E, 0x1416, WORD_LEN, 0 },
+	{ 0x3536, 0x10FC, WORD_LEN, 0 },
+	{ 0x353E, 0xC018, WORD_LEN, 0 },
+	{ 0x3552, 0x06B4, WORD_LEN, 0 },
+	{ 0x354A, 0x0506, WORD_LEN, 0 },
+	{ 0x3562, 0x06AB, WORD_LEN, 0 },
+	{ 0x355A, 0x063A, WORD_LEN, 0 },
+	{ 0x34E2, 0x00E5, WORD_LEN, 0 },
+	{ 0x34EA, 0x008B, WORD_LEN, 0 },
+	{ 0x34F2, 0x0E4C, WORD_LEN, 0 },
+	{ 0x34FA, 0x0CA3, WORD_LEN, 0 },
+	{ 0x3504, 0x0907, WORD_LEN, 0 },
+	{ 0x350C, 0x1DFD, WORD_LEN, 0 },
+	{ 0x3514, 0x1E24, WORD_LEN, 0 },
+	{ 0x351C, 0x2529, WORD_LEN, 0 },
+	{ 0x3524, 0x1D20, WORD_LEN, 0 },
+	{ 0x352C, 0x2332, WORD_LEN, 0 },
+	{ 0x3534, 0x10E9, WORD_LEN, 0 },
+	{ 0x353C, 0x0BCB, WORD_LEN, 0 },
+	{ 0x3550, 0x04EF, WORD_LEN, 0 },
+	{ 0x3548, 0x0609, WORD_LEN, 0 },
+	{ 0x3560, 0x0580, WORD_LEN, 0 },
+	{ 0x3558, 0x05DD, WORD_LEN, 0 },
+	{ 0x3540, 0x0000, WORD_LEN, 0 },
+	{ 0x3542, 0x0000, WORD_LEN, 0 }
+};
+
+static const struct mt9d112_i2c_reg_conf const pll_setup_tbl[] = {
+	{ 0x341E, 0x8F09, WORD_LEN, 0 },
+	{ 0x341C, 0x0250, WORD_LEN, 0 },
+	{ 0x341E, 0x8F09, WORD_LEN, 5 },
+	{ 0x341E, 0x8F08, WORD_LEN, 0 }
+};
+
+/* Refresh Sequencer */
+static const struct mt9d112_i2c_reg_conf const sequencer_tbl[] = {
+	{ 0x338C, 0x2799, WORD_LEN, 0},
+	{ 0x3390, 0x6440, WORD_LEN, 5},
+	{ 0x338C, 0x279B, WORD_LEN, 0},
+	{ 0x3390, 0x6440, WORD_LEN, 5},
+	{ 0x338C, 0xA103, WORD_LEN, 0},
+	{ 0x3390, 0x0005, WORD_LEN, 5},
+	{ 0x338C, 0xA103, WORD_LEN, 0},
+	{ 0x3390, 0x0006, WORD_LEN, 5}
+};
+
+struct mt9d112_reg mt9d112_regs = {
+	.prev_snap_reg_settings = &preview_snapshot_mode_reg_settings_array[0],
+	.prev_snap_reg_settings_size = ARRAY_SIZE(
+		preview_snapshot_mode_reg_settings_array),
+	.noise_reduction_reg_settings = &noise_reduction_reg_settings_array[0],
+	.noise_reduction_reg_settings_size = ARRAY_SIZE(
+		noise_reduction_reg_settings_array),
+	.plltbl = pll_setup_tbl,
+	.plltbl_size = ARRAY_SIZE(pll_setup_tbl),
+	.stbl = sequencer_tbl,
+	.stbl_size = ARRAY_SIZE(sequencer_tbl),
+	.rftbl = lens_roll_off_tbl,
+	.rftbl_size = ARRAY_SIZE(lens_roll_off_tbl)
+};
+
+
+
diff --git a/drivers/media/video/msm/mt9d113.c b/drivers/media/video/msm/mt9d113.c
new file mode 100644
index 0000000..a6b6a28
--- /dev/null
+++ b/drivers/media/video/msm/mt9d113.c
@@ -0,0 +1,664 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include "mt9d113.h"
+
+/* Micron MT9D113 Registers and their values */
+#define  REG_MT9D113_MODEL_ID	0x0000
+#define  MT9D113_MODEL_ID		0x2580
+#define Q8						0x00000100
+
+struct mt9d113_work {
+	struct work_struct work;
+};
+
+static struct  mt9d113_work *mt9d113_sensorw;
+static struct  i2c_client *mt9d113_client;
+
+struct mt9d113_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+	uint16_t config_csi;
+	enum mt9d113_resolution_t prev_res;
+	enum mt9d113_resolution_t pict_res;
+	enum mt9d113_resolution_t curr_res;
+	enum mt9d113_test_mode_t  set_test;
+};
+
+static struct mt9d113_ctrl *mt9d113_ctrl;
+
+static DECLARE_WAIT_QUEUE_HEAD(mt9d113_wait_queue);
+DEFINE_MUTEX(mt9d113_mut);
+
+static int mt9d113_i2c_rxdata(unsigned short saddr,
+				unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr   = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr   = saddr,
+			.flags = I2C_M_RD,
+			.len   = length,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(mt9d113_client->adapter, msgs, 2) < 0) {
+		CDBG("mt9d113_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t mt9d113_i2c_read(unsigned short   saddr,
+				unsigned short raddr,
+				unsigned short *rdata,
+				enum mt9d113_width width)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	switch (width) {
+	case WORD_LEN: {
+			buf[0] = (raddr & 0xFF00)>>8;
+			buf[1] = (raddr & 0x00FF);
+			rc = mt9d113_i2c_rxdata(saddr, buf, 2);
+			if (rc < 0)
+				return rc;
+			*rdata = buf[0] << 8 | buf[1];
+		}
+		break;
+	default:
+		break;
+	}
+	if (rc < 0)
+		CDBG("mt9d113_i2c_read failed !\n");
+	return rc;
+}
+
+static int32_t mt9d113_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+	if (i2c_transfer(mt9d113_client->adapter, msg, 1) < 0) {
+		CDBG("mt9d113_i2c_txdata failed\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t mt9d113_i2c_write(unsigned short saddr,
+				unsigned short waddr,
+				unsigned short wdata,
+				enum mt9d113_width width)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	switch (width) {
+	case WORD_LEN: {
+			buf[0] = (waddr & 0xFF00)>>8;
+			buf[1] = (waddr & 0x00FF);
+			buf[2] = (wdata & 0xFF00)>>8;
+			buf[3] = (wdata & 0x00FF);
+			rc = mt9d113_i2c_txdata(saddr, buf, 4);
+		}
+		break;
+	case BYTE_LEN: {
+			buf[0] = waddr;
+			buf[1] = wdata;
+			rc = mt9d113_i2c_txdata(saddr, buf, 2);
+		}
+		break;
+	default:
+		break;
+	}
+	if (rc < 0)
+		printk(KERN_ERR
+			"i2c_write failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	return rc;
+}
+
+static int32_t mt9d113_i2c_write_table(
+				struct mt9d113_i2c_reg_conf
+				const *reg_conf_tbl,
+				int num_of_items_in_table)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num_of_items_in_table; i++) {
+		rc = mt9d113_i2c_write(mt9d113_client->addr,
+				reg_conf_tbl->waddr, reg_conf_tbl->wdata,
+				WORD_LEN);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static long mt9d113_reg_init(void)
+{
+	uint16_t data = 0;
+	int32_t rc = 0;
+	int count = 0;
+	struct msm_camera_csi_params mt9d113_csi_params;
+	if (!mt9d113_ctrl->config_csi) {
+		mt9d113_csi_params.lane_cnt = 1;
+		mt9d113_csi_params.data_format = CSI_8BIT;
+		mt9d113_csi_params.lane_assign = 0xe4;
+		mt9d113_csi_params.dpcm_scheme = 0;
+		mt9d113_csi_params.settle_cnt = 0x14;
+		rc = msm_camio_csi_config(&mt9d113_csi_params);
+		mt9d113_ctrl->config_csi = 1;
+		msleep(50);
+	}
+	/* Disable parallel and enable mipi*/
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x001A,
+				0x0051, WORD_LEN);
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x001A,
+				0x0050,
+				WORD_LEN);
+	msleep(20);
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x001A,
+				0x0058,
+				WORD_LEN);
+
+	/* Preset pll settings begin*/
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.pll_tbl[0],
+				mt9d113_regs.pll_tbl_size);
+	if (rc < 0)
+		return rc;
+	rc = mt9d113_i2c_read(mt9d113_client->addr,
+				0x0014, &data, WORD_LEN);
+	data = data&0x8000;
+	/* Poll*/
+	while (data == 0x0000) {
+		data = 0;
+		rc = mt9d113_i2c_read(mt9d113_client->addr,
+				0x0014, &data, WORD_LEN);
+		data = data & 0x8000;
+		usleep_range(11000, 12000);
+		count++;
+		if (count == 100) {
+			CDBG(" Timeout:1\n");
+			break;
+		}
+	}
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x0014,
+				0x20FA,
+				WORD_LEN);
+
+	/*Preset pll Ends*/
+	mt9d113_i2c_write(mt9d113_client->addr,
+				0x0018,
+				0x402D,
+				WORD_LEN);
+
+	mt9d113_i2c_write(mt9d113_client->addr,
+				0x0018,
+				0x402C,
+				WORD_LEN);
+	/*POLL_REG=0x0018,0x4000,!=0x0000,DELAY=10,TIMEOUT=100*/
+	data = 0;
+	rc = mt9d113_i2c_read(mt9d113_client->addr,
+		0x0018, &data, WORD_LEN);
+	data = data & 0x4000;
+	count = 0;
+	while (data != 0x0000) {
+		rc = mt9d113_i2c_read(mt9d113_client->addr,
+			0x0018, &data, WORD_LEN);
+		data = data & 0x4000;
+		CDBG(" data is %d\n" , data);
+		usleep_range(11000, 12000);
+		count++;
+		if (count == 100) {
+			CDBG(" Loop2 timeout: MT9D113\n");
+			break;
+		}
+		CDBG(" Not streaming\n");
+	}
+	CDBG("MT9D113: Start stream\n");
+	/*Preset Register Wizard Conf*/
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.register_tbl[0],
+				mt9d113_regs.register_tbl_size);
+	if (rc < 0)
+		return rc;
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.err_tbl[0],
+				mt9d113_regs.err_tbl_size);
+	if (rc < 0)
+		return rc;
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.eeprom_tbl[0],
+				mt9d113_regs.eeprom_tbl_size);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.low_light_tbl[0],
+				mt9d113_regs.low_light_tbl_size);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.awb_tbl[0],
+				mt9d113_regs.awb_tbl_size);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9d113_i2c_write_table(&mt9d113_regs.patch_tbl[0],
+				mt9d113_regs.patch_tbl_size);
+	if (rc < 0)
+		return rc;
+
+	/*check patch load*/
+	mt9d113_i2c_write(mt9d113_client->addr,
+				0x098C,
+				0xA024,
+				WORD_LEN);
+	count = 0;
+	/*To check if patch is loaded properly
+	poll the register 0x990 till the condition is
+	met or till the timeout*/
+	data = 0;
+	rc = mt9d113_i2c_read(mt9d113_client->addr,
+				0x0990, &data, WORD_LEN);
+	while (data == 0) {
+		data = 0;
+		rc = mt9d113_i2c_read(mt9d113_client->addr,
+				0x0990, &data, WORD_LEN);
+		usleep_range(11000, 12000);
+		count++;
+		if (count == 100) {
+			CDBG("Timeout in patch loading\n");
+			break;
+		}
+	}
+		/*BITFIELD=0x0018, 0x0004, 0*/
+	/*Preset continue begin */
+	rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x0028,
+				WORD_LEN);
+	CDBG(" mt9d113 wait for seq done\n");
+	/* syncronize the FW with the sensor
+	MCU_ADDRESS [SEQ_CMD]*/
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x098C, 0xA103, WORD_LEN);
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x0990, 0x0006, WORD_LEN);
+		/*mt9d113 wait for seq done
+	 syncronize the FW with the sensor */
+	msleep(20);
+	/*Preset continue end */
+	CDBG(" MT9D113: Preset continue end\n");
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x0012,
+				0x00F5,
+				WORD_LEN);
+	/*continue begin */
+	CDBG(" MT9D113: Preset continue begin\n");
+	rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x0028 ,
+				WORD_LEN);
+	/*mt9d113 wait for seq done
+	 syncronize the FW with the sensor
+	MCU_ADDRESS [SEQ_CMD]*/
+	msleep(20);
+	rc = mt9d113_i2c_write(mt9d113_client->addr,
+				0x098C, 0xA103, WORD_LEN);
+	/* MCU DATA */
+	rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990,
+				0x0006, WORD_LEN);
+	/*mt9d113 wait for seq done
+	syncronize the FW with the sensor */
+	/* MCU_ADDRESS [SEQ_CMD]*/
+	msleep(20);
+	/*Preset continue end*/
+	return rc;
+
+}
+
+static long mt9d113_set_sensor_mode(int mode)
+{
+	long rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = mt9d113_reg_init();
+		CDBG("MT9D113: configure to preview begin\n");
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0xA115, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x0990, 0x0000, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0xA103, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0x0001, WORD_LEN);
+		if (rc < 0)
+			return rc;
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0xA115, WORD_LEN);
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0x0002, WORD_LEN);
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0xA103, WORD_LEN);
+		rc =
+		mt9d113_i2c_write(mt9d113_client->addr,
+						0x098C, 0x0002, WORD_LEN);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mt9d113_sensor_init_probe(const struct
+				msm_camera_sensor_info * data)
+{
+	uint16_t model_id = 0;
+	int rc = 0;
+	/* Read the Model ID of the sensor */
+	rc = mt9d113_i2c_read(mt9d113_client->addr,
+						REG_MT9D113_MODEL_ID,
+						&model_id, WORD_LEN);
+	if (rc < 0)
+		goto init_probe_fail;
+	/* Check if it matches it with the value in Datasheet */
+	if (model_id != MT9D113_MODEL_ID)
+		printk(KERN_INFO "mt9d113 model_id = 0x%x\n", model_id);
+	if (rc < 0)
+		goto init_probe_fail;
+	return rc;
+init_probe_fail:
+	printk(KERN_INFO "probe fail\n");
+	return rc;
+}
+
+static int mt9d113_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9d113_wait_queue);
+	return 0;
+}
+
+int mt9d113_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cfg_data;
+	long rc = 0;
+
+	if (copy_from_user(&cfg_data,
+					(void *)argp,
+					(sizeof(struct sensor_cfg_data))))
+		return -EFAULT;
+	mutex_lock(&mt9d113_mut);
+	CDBG("mt9d113_ioctl, cfgtype = %d, mode = %d\n",
+		 cfg_data.cfgtype, cfg_data.mode);
+	switch (cfg_data.cfgtype) {
+	case CFG_SET_MODE:
+		rc = mt9d113_set_sensor_mode(
+						cfg_data.mode);
+		break;
+	case CFG_SET_EFFECT:
+		return rc;
+	case CFG_GET_AF_MAX_STEPS:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&mt9d113_mut);
+	return rc;
+}
+
+int mt9d113_sensor_release(void)
+{
+	int rc = 0;
+
+	mutex_lock(&mt9d113_mut);
+	gpio_set_value_cansleep(mt9d113_ctrl->sensordata->sensor_reset, 0);
+	msleep(20);
+	gpio_free(mt9d113_ctrl->sensordata->sensor_reset);
+	kfree(mt9d113_ctrl);
+	mutex_unlock(&mt9d113_mut);
+
+	return rc;
+}
+
+static int mt9d113_probe_init_done(const struct msm_camera_sensor_info
+				*data)
+{
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int mt9d113_probe_init_sensor(const struct msm_camera_sensor_info
+				*data)
+{
+	int32_t rc = 0;
+	uint16_t chipid = 0;
+	rc = gpio_request(data->sensor_pwd, "mt9d113");
+	if (!rc) {
+		printk(KERN_INFO "sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_pwd, 0);
+		usleep_range(11000, 12000);
+	} else {
+		goto init_probe_done;
+	}
+	msleep(20);
+	rc = gpio_request(data->sensor_reset, "mt9d113");
+	printk(KERN_INFO " mt9d113_probe_init_sensor\n");
+	if (!rc) {
+		printk(KERN_INFO "sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		usleep_range(11000, 12000);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		usleep_range(11000, 12000);
+	} else
+		goto init_probe_done;
+	printk(KERN_INFO " mt9d113_probe_init_sensor called\n");
+	rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID,
+						&chipid, 2);
+	if (rc < 0)
+		goto init_probe_fail;
+	/*Compare sensor ID to MT9D113 ID: */
+	if (chipid != MT9D113_MODEL_ID) {
+		printk(KERN_INFO "mt9d113_probe_init_sensor chip idis%d\n",
+			chipid);
+	}
+	CDBG("mt9d113_probe_init_sensor Success\n");
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" ov2720_probe_init_sensor fails\n");
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	mt9d113_probe_init_done(data);
+init_probe_done:
+	printk(KERN_INFO " mt9d113_probe_init_sensor finishes\n");
+	return rc;
+}
+
+static int mt9d113_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc = 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		rc = -ENOTSUPP;
+		goto probe_failure;
+	}
+	mt9d113_sensorw =
+	kzalloc(sizeof(struct mt9d113_work), GFP_KERNEL);
+	if (!mt9d113_sensorw) {
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+	i2c_set_clientdata(client, mt9d113_sensorw);
+	mt9d113_init_client(client);
+	mt9d113_client = client;
+	CDBG("mt9d113_probe succeeded!\n");
+	return 0;
+probe_failure:
+	kfree(mt9d113_sensorw);
+	mt9d113_sensorw = NULL;
+	CDBG("mt9d113_probe failed!\n");
+	return rc;
+}
+
+static const struct i2c_device_id mt9d113_i2c_id[] = {
+	{ "mt9d113", 0},
+	{},
+};
+
+static struct i2c_driver mt9d113_i2c_driver = {
+	.id_table = mt9d113_i2c_id,
+	.probe  = mt9d113_i2c_probe,
+	.remove = __exit_p(mt9d113_i2c_remove),
+			  .driver = {
+		.name = "mt9d113",
+	},
+};
+
+int mt9d113_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	mt9d113_ctrl = kzalloc(sizeof(struct mt9d113_ctrl), GFP_KERNEL);
+	if (!mt9d113_ctrl) {
+		printk(KERN_INFO "mt9d113_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	mt9d113_ctrl->fps_divider = 1 * 0x00000400;
+	mt9d113_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9d113_ctrl->set_test = TEST_OFF;
+	mt9d113_ctrl->config_csi = 0;
+	mt9d113_ctrl->prev_res = QTR_SIZE;
+	mt9d113_ctrl->pict_res = FULL_SIZE;
+	mt9d113_ctrl->curr_res = INVALID_SIZE;
+	if (data)
+		mt9d113_ctrl->sensordata = data;
+	if (rc < 0) {
+		printk(KERN_INFO "mt9d113_sensor_open_init fail\n");
+		return rc;
+	}
+		/* enable mclk first */
+		msm_camio_clk_rate_set(24000000);
+		msleep(20);
+		rc = mt9d113_probe_init_sensor(data);
+		if (rc < 0)
+			goto init_fail;
+		mt9d113_ctrl->fps = 30*Q8;
+		rc = mt9d113_sensor_init_probe(data);
+		if (rc < 0) {
+			gpio_set_value_cansleep(data->sensor_reset, 0);
+			goto init_fail;
+		} else
+			printk(KERN_ERR "%s: %d\n", __func__, __LINE__);
+		goto init_done;
+init_fail:
+		printk(KERN_INFO "init_fail\n");
+		mt9d113_probe_init_done(data);
+init_done:
+		CDBG("init_done\n");
+		return rc;
+}
+
+static int mt9d113_sensor_probe(const struct msm_camera_sensor_info
+				*info,
+				struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&mt9d113_i2c_driver);
+	if (rc < 0 || mt9d113_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(24000000);
+	usleep_range(5000, 6000);
+	rc = mt9d113_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = mt9d113_sensor_open_init;
+	s->s_release = mt9d113_sensor_release;
+	s->s_config  = mt9d113_sensor_config;
+	s->s_camera_type = FRONT_CAMERA_2D;
+	s->s_mount_angle  = 0;
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	mt9d113_probe_init_done(info);
+	return rc;
+probe_fail:
+	printk(KERN_INFO "mt9d113_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __mt9d113_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9d113_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9d113_probe,
+	.driver = {
+		.name = "msm_camera_mt9d113",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init mt9d113_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9d113_init);
+
+MODULE_DESCRIPTION("Micron 2MP YUV sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/mt9d113.h b/drivers/media/video/msm/mt9d113.h
new file mode 100644
index 0000000..f22f16c
--- /dev/null
+++ b/drivers/media/video/msm/mt9d113.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9D113_H
+#define MT9D113_H
+
+#include <linux/types.h>
+#include <mach/camera.h>
+
+extern struct mt9d113_reg mt9d113_regs;
+
+enum mt9d113_width {
+	WORD_LEN,
+	BYTE_LEN
+};
+
+struct mt9d113_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+struct mt9d113_reg {
+	const struct mt9d113_i2c_reg_conf *pll_tbl;
+	uint16_t pll_tbl_size;
+	const struct mt9d113_i2c_reg_conf *register_tbl;
+	uint16_t register_tbl_size;
+	const struct mt9d113_i2c_reg_conf *err_tbl;
+	uint16_t err_tbl_size;
+	const struct mt9d113_i2c_reg_conf *low_light_tbl;
+	uint16_t low_light_tbl_size;
+	const struct mt9d113_i2c_reg_conf *awb_tbl;
+	uint16_t awb_tbl_size;
+	const struct mt9d113_i2c_reg_conf *patch_tbl;
+	uint16_t patch_tbl_size;
+	const struct mt9d113_i2c_reg_conf *eeprom_tbl ;
+	uint16_t eeprom_tbl_size ;
+};
+
+enum mt9d113_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9d113_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum mt9d113_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+#endif /* MT9D113_H */
diff --git a/drivers/media/video/msm/mt9d113_reg.c b/drivers/media/video/msm/mt9d113_reg.c
new file mode 100644
index 0000000..cd5be0f
--- /dev/null
+++ b/drivers/media/video/msm/mt9d113_reg.c
@@ -0,0 +1,455 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mt9d113.h"
+
+struct mt9d113_i2c_reg_conf const
+	pll_tbl_settings[] = {
+		{0x0014, 0x21F9 }, /*PLL control: BYPASS PLL = 8697*/
+		{0x0010, 0x0115 }, /*PLL Dividers = 277*/
+		{0x0012, 0x0F5  }, /*PLL P Dividers = 245*/
+		{0x0014, 0x21FB }, /*PLL control: PLL_ENABLE on = 8699*/
+		{0x0014, 0x20FB }, /*PLL control: SEL_LOCK_DET on = 8443*/
+};
+
+struct mt9d113_i2c_reg_conf const
+	register_wizard_settings[] = {
+		{0x098C, 0x2719},
+		{0x0990, 0x005A},
+		{0x098C, 0x271B},
+		{0x0990, 0x01BE},
+		{0x098C, 0x271D},
+		{0x0990, 0x0131},
+		{0x098C, 0x271F},
+		{0x0990, 0x02BB},
+		{0x098C, 0x2721},
+		{0x0990, 0x0888},
+		{0x098C, 0x272F},
+		{0x0990, 0x003A},
+		{0x098C, 0x2731},
+		{0x0990, 0x00F6},
+		{0x098C, 0x2733},
+		{0x0990, 0x008B},
+		{0x098C, 0x2735},
+		{0x0990, 0x0521},
+		{0x098C, 0x2737},
+		{0x0990, 0x0888},
+		{0x098C, 0x275F},
+		{0x0990, 0x0194},
+		{0x098C, 0x2761},
+		{0x0990, 0x0014},
+		{0x098C, 0xA765},
+		{0x0990, 0x0044},
+		{0x098C, 0xA24F},
+		{0x0990, 0x0028},
+		{0x098C, 0xA20E},
+		{0x0990, 0x00A0},
+		{0x098C, 0xA20C},
+		{0x0990, 0x000E},
+		{0x098C, 0x2222},
+		{0x0990, 0x00A0},
+		{0x098C, 0x2212},
+		{0x0990, 0x01EE},
+		{0x098C, 0xA408},
+		{0x0990, 0x0026},
+		{0x098C, 0xA409},
+		{0x0990, 0x0029},
+		{0x098C, 0xA40A},
+		{0x0990, 0x002E},
+		{0x098C, 0xA40B},
+		{0x0990, 0x0031},
+		{0x098C, 0x2411},
+		{0x0990, 0x00A0},
+		{0x098C, 0x2413},
+		{0x0990, 0x00C0},
+		{0x098C, 0x2415},
+		{0x0990, 0x00A0},
+		{0x098C, 0x2417},
+		{0x0990, 0x00C0},
+};
+
+struct mt9d113_i2c_reg_conf const
+	err_settings[] = {
+		{0x3084, 0x240C},
+		{0x3092, 0x0A4C},
+		{0x3094, 0x4C4C},
+		{0x3096, 0x4C54},
+};
+
+struct mt9d113_i2c_reg_conf const
+	patch_settings[] = {
+		{0x098C, 0x0415},    /* MCU_ADDRESS*/
+		{0x0990, 0xF601},
+		{0x0992, 0x42C1},
+		{0x0994, 0x0326},
+		{0x0996, 0x11F6},
+		{0x0998, 0x0143},
+		{0x099A, 0xC104},
+		{0x099C, 0x260A},
+		{0x099E, 0xCC04},
+		{0x098C, 0x0425},
+		{0x0990, 0x33BD},
+		{0x0992, 0xA362},
+		{0x0994, 0xBD04},
+		{0x0996, 0x3339},
+		{0x0998, 0xC6FF},
+		{0x099A, 0xF701},
+		{0x099C, 0x6439},
+		{0x099E, 0xFE01},
+		{0x098C, 0x0435},
+		{0x0990, 0x6918},
+		{0x0992, 0xCE03},
+		{0x0994, 0x25CC},
+		{0x0996, 0x0013},
+		{0x0998, 0xBDC2},
+		{0x099A, 0xB8CC},
+		{0x099C, 0x0489},
+		{0x099E, 0xFD03},
+		{0x098C, 0x0445},
+		{0x0990, 0x27CC},
+		{0x0992, 0x0325},
+		{0x0994, 0xFD01},
+		{0x0996, 0x69FE},
+		{0x0998, 0x02BD},
+		{0x099A, 0x18CE},
+		{0x099C, 0x0339},
+		{0x099E, 0xCC00},
+		{0x098C, 0x0455},
+		{0x0990, 0x11BD},
+		{0x0992, 0xC2B8},
+		{0x0994, 0xCC04},
+		{0x0996, 0xC8FD},
+		{0x0998, 0x0347},
+		{0x099A, 0xCC03},
+		{0x099C, 0x39FD},
+		{0x099E, 0x02BD},
+		{0x098C, 0x0465},
+		{0x0990, 0xDE00},
+		{0x0992, 0x18CE},
+		{0x0994, 0x00C2},
+		{0x0996, 0xCC00},
+		{0x0998, 0x37BD},
+		{0x099A, 0xC2B8},
+		{0x099C, 0xCC04},
+		{0x099E, 0xEFDD},
+		{0x098C, 0x0475},
+		{0x0990, 0xE6CC},
+		{0x0992, 0x00C2},
+		{0x0994, 0xDD00},
+		{0x0996, 0xC601},
+		{0x0998, 0xF701},
+		{0x099A, 0x64C6},
+		{0x099C, 0x03F7},
+		{0x099E, 0x0165},
+		{0x098C, 0x0485},
+		{0x0990, 0x7F01},
+		{0x0992, 0x6639},
+		{0x0994, 0x3C3C},
+		{0x0996, 0x3C34},
+		{0x0998, 0xCC32},
+		{0x099A, 0x3EBD},
+		{0x099C, 0xA558},
+		{0x099E, 0x30ED},
+		{0x098C, 0x0495},
+		{0x0990, 0x04BD},
+		{0x0992, 0xB2D7},
+		{0x0994, 0x30E7},
+		{0x0996, 0x06CC},
+		{0x0998, 0x323E},
+		{0x099A, 0xED00},
+		{0x099C, 0xEC04},
+		{0x099E, 0xBDA5},
+		{0x098C, 0x04A5},
+		{0x0990, 0x44CC},
+		{0x0992, 0x3244},
+		{0x0994, 0xBDA5},
+		{0x0996, 0x585F},
+		{0x0998, 0x30ED},
+		{0x099A, 0x02CC},
+		{0x099C, 0x3244},
+		{0x099E, 0xED00},
+		{0x098C, 0x04B5},
+		{0x0990, 0xF601},
+		{0x0992, 0xD54F},
+		{0x0994, 0xEA03},
+		{0x0996, 0xAA02},
+		{0x0998, 0xBDA5},
+		{0x099A, 0x4430},
+		{0x099C, 0xE606},
+		{0x099E, 0x3838},
+		{0x098C, 0x04C5},
+		{0x0990, 0x3831},
+		{0x0992, 0x39BD},
+		{0x0994, 0xD661},
+		{0x0996, 0xF602},
+		{0x0998, 0xF4C1},
+		{0x099A, 0x0126},
+		{0x099C, 0x0BFE},
+		{0x099E, 0x02BD},
+		{0x098C, 0x04D5},
+		{0x0990, 0xEE10},
+		{0x0992, 0xFC02},
+		{0x0994, 0xF5AD},
+		{0x0996, 0x0039},
+		{0x0998, 0xF602},
+		{0x099A, 0xF4C1},
+		{0x099C, 0x0226},
+		{0x099E, 0x0AFE},
+		{0x098C, 0x04E5},
+		{0x0990, 0x02BD},
+		{0x0992, 0xEE10},
+		{0x0994, 0xFC02},
+		{0x0996, 0xF7AD},
+		{0x0998, 0x0039},
+		{0x099A, 0x3CBD},
+		{0x099C, 0xB059},
+		{0x099E, 0xCC00},
+		{0x098C, 0x04F5},
+		{0x0990, 0x28BD},
+		{0x0992, 0xA558},
+		{0x0994, 0x8300},
+		{0x0996, 0x0027},
+		{0x0998, 0x0BCC},
+		{0x099A, 0x0026},
+		{0x099C, 0x30ED},
+		{0x099E, 0x00C6},
+		{0x098C, 0x0505},
+		{0x0990, 0x03BD},
+		{0x0992, 0xA544},
+		{0x0994, 0x3839},
+		{0x098C, 0x2006},
+		{0x0990, 0x0415},
+		{0x098C, 0xA005},
+		{0x0990, 0x0001},
+};
+
+struct mt9d113_i2c_reg_conf const
+	eeprom_settings[] = {
+		{0x3658, 0x0110},
+		{0x365A, 0x1B6D},
+		{0x365C, 0x01F2},
+		{0x365E, 0xFBCD},
+		{0x3660, 0x8C91},
+		{0x3680, 0xB9ED},
+		{0x3682, 0x0EE},
+		{0x3684, 0x256F},
+		{0x3686, 0x824F},
+		{0x3688, 0xD293},
+		{0x36A8, 0x5BF2},
+		{0x36AA, 0x1711},
+		{0x36AC, 0xA095},
+		{0x36AE, 0x642C},
+		{0x36B0, 0x0E38},
+		{0x36D0, 0x88B0},
+		{0x36D2, 0x2EB2},
+		{0x36D4, 0x4C74},
+		{0x36D6, 0x9F96},
+		{0x36D8, 0x9557},
+		{0x36F8, 0xCE51},
+		{0x36FA, 0xB354},
+		{0x36FC, 0x2817},
+		{0x36FE, 0x14B8},
+		{0x3700, 0xB019},
+		{0x364E, 0x0710},
+		{0x3650, 0x30ED},
+		{0x3652, 0x03F2},
+		{0x3654, 0xF12E},
+		{0x3656, 0x8492},
+		{0x3676, 0xD9AD},
+		{0x3678, 0x88D0},
+		{0x367A, 0x7DED},
+		{0x367C, 0x3E31},
+		{0x367E, 0x91B3},
+		{0x369E, 0x7032},
+		{0x36A0, 0x2791},
+		{0x36A2, 0xBB55},
+		{0x36A4, 0xAB32},
+		{0x36A6, 0x1A58},
+		{0x36C6, 0xB50F},
+		{0x36C8, 0x0011},
+		{0x36CA, 0x6DB4},
+		{0x36CC, 0x96F5},
+		{0x36CE, 0x9BB7},
+		{0x36EE, 0x9353},
+		{0x36F0, 0xDF74},
+		{0x36F2, 0x04F8},
+		{0x36F4, 0x0FD8},
+		{0x36F6, 0xA87A},
+		{0x3662, 0x0170},
+		{0x3664, 0x6F0C},
+		{0x3666, 0x0112},
+		{0x3668, 0xCBAB},
+		{0x366A, 0x9111},
+		{0x368A, 0xB38D},
+		{0x368C, 0xE96F},
+		{0x368E, 0xCC0F},
+		{0x3690, 0x5851},
+		{0x3692, 0xFDD2},
+		{0x36B2, 0x5F92},
+		{0x36B4, 0x33B2},
+		{0x36B6, 0x9815},
+		{0x36B8, 0x86F5},
+		{0x36BA, 0x0578},
+		{0x36DA, 0xCD90},
+		{0x36DC, 0x1131},
+		{0x36DE, 0x5275},
+		{0x36E0, 0xE855},
+		{0x36E2, 0xD037},
+		{0x3702, 0xAAD1},
+		{0x3704, 0xEB75},
+		{0x3706, 0x0CD7},
+		{0x3708, 0x2C79},
+		{0x370A, 0xE0B9},
+		{0x366C, 0x0190},
+		{0x366E, 0x1C8D},
+		{0x3670, 0x0052},
+		{0x3672, 0xD66E},
+		{0x3674, 0xF511},
+		{0x3694, 0xB54D},
+		{0x3696, 0x6E4E},
+		{0x3698, 0x142E},
+		{0x369A, 0xC190},
+		{0x369C, 0xA753},
+		{0x36BC, 0x70F2},
+		{0x36BE, 0x04F1},
+		{0x36C0, 0xBD95},
+		{0x36C2, 0x0CEE},
+		{0x36C4, 0x1BF8},
+		{0x36E4, 0x806F},
+		{0x36E6, 0x1672},
+		{0x36E8, 0x2DF4},
+		{0x36EA, 0x8F16},
+		{0x36EC, 0xF776},
+		{0x370C, 0xAD73},
+		{0x370E, 0xB534},
+		{0x3710, 0x0D18},
+		{0x3712, 0x6057},
+		{0x3714, 0xBD1A},
+		{0x3644, 0x0354},
+		{0x3642, 0x0234},
+		{0x3210, 0x01B8},
+};
+
+struct mt9d113_i2c_reg_conf const
+	awb_settings[] = {
+		{0x098C, 0x2306},
+		{0x0990, 0x0180},
+		{0x098C, 0x2308},
+		{0x0990, 0xFF00},
+		{0x098C, 0x230A},
+		{0x0990, 0x0080},
+		{0x098C, 0x230C},
+		{0x0990, 0xFF66},
+		{0x098C, 0x230E},
+		{0x0990, 0x0180},
+		{0x098C, 0x2310},
+		{0x0990, 0xFFEE},
+		{0x098C, 0x2312},
+		{0x0990, 0xFFCD},
+		{0x098C, 0x2314},
+		{0x0990, 0xFECD},
+		{0x098C, 0x2316},
+		{0x0990, 0x019A},
+		{0x098C, 0x2318},
+		{0x0990, 0x0020},
+		{0x098C, 0x231A},
+		{0x0990, 0x0033},
+		{0x098C, 0x231C},
+		{0x0990, 0x0100},
+		{0x098C, 0x231E},
+		{0x0990, 0xFF9A},
+		{0x098C, 0x2320},
+		{0x0990, 0x0000},
+		{0x098C, 0x2322},
+		{0x0990, 0x004D},
+		{0x098C, 0x2324},
+		{0x0990, 0xFFCD},
+		{0x098C, 0x2326},
+		{0x0990, 0xFFB8},
+		{0x098C, 0x2328},
+		{0x0990, 0x004D},
+		{0x098C, 0x232A},
+		{0x0990, 0x0080},
+		{0x098C, 0x232C},
+		{0x0990, 0xFF66},
+		{0x098C, 0x232E},
+		{0x0990, 0x0008},
+		{0x098C, 0x2330},
+		{0x0990, 0xFFF7},
+		{0x098C, 0xA363},
+		{0x0990, 0x00D2},
+		{0x098C, 0xA364},
+		{0x0990, 0x00EE},
+		{0x3244, 0x0328},
+		{0x323E, 0xC22C},
+};
+
+struct mt9d113_i2c_reg_conf const
+	low_light_setting[] = {
+		{0x098C, 0x2B28},
+		{0x0990, 0x35E8},
+		{0x098C, 0x2B2A},
+		{0x0990, 0xB3B0},
+		{0x098C, 0xAB20},
+		{0x0990, 0x004B},
+		{0x098C, 0xAB24},
+		{0x0990, 0x0000},
+		{0x098C, 0xAB25},
+		{0x0990, 0x00FF},
+		{0x098C, 0xAB30},
+		{0x0990, 0x00FF},
+		{0x098C, 0xAB31},
+		{0x0990, 0x00FF},
+		{0x098C, 0xAB32},
+		{0x0990, 0x00FF},
+		{0x098C, 0xAB33},
+		{0x0990, 0x0057},
+		{0x098C, 0xAB34},
+		{0x0990, 0x0080},
+		{0x098C, 0xAB35},
+		{0x0990, 0x00FF},
+		{0x098C, 0xAB36},
+		{0x0990, 0x0014},
+		{0x098C, 0xAB37},
+		{0x0990, 0x0003},
+		{0x098C, 0x2B38},
+		{0x0990, 0x32C8},
+		{0x098C, 0x2B3A},
+		{0x0990, 0x7918},
+		{0x098C, 0x2B62},
+		{0x0990, 0xFFFE},
+		{0x098C, 0x2B64},
+		{0x0990, 0xFFFF},
+};
+
+struct mt9d113_reg mt9d113_regs = {
+		.pll_tbl = pll_tbl_settings,
+		.pll_tbl_size = ARRAY_SIZE(
+			pll_tbl_settings),
+		.register_tbl = register_wizard_settings,
+		.register_tbl_size = ARRAY_SIZE(
+			register_wizard_settings),
+		.err_tbl = err_settings,
+		.err_tbl_size = ARRAY_SIZE(err_settings),
+		.low_light_tbl = low_light_setting,
+		.low_light_tbl_size = ARRAY_SIZE(low_light_setting),
+		.awb_tbl = awb_settings,
+		.awb_tbl_size = ARRAY_SIZE(awb_settings),
+		.patch_tbl = patch_settings,
+		.patch_tbl_size = ARRAY_SIZE(patch_settings),
+		.eeprom_tbl = eeprom_settings,
+		.eeprom_tbl_size = ARRAY_SIZE(eeprom_settings),
+};
+
+
+
diff --git a/drivers/media/video/msm/mt9e013.c b/drivers/media/video/msm/mt9e013.c
new file mode 100644
index 0000000..94546f4
--- /dev/null
+++ b/drivers/media/video/msm/mt9e013.c
@@ -0,0 +1,1140 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "mt9e013.h"
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+#define REG_GROUPED_PARAMETER_HOLD		0x0104
+#define GROUPED_PARAMETER_HOLD_OFF		0x00
+#define GROUPED_PARAMETER_HOLD			0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME		0x3012
+/* Gain */
+#define REG_GLOBAL_GAIN	0x305E
+/* PLL registers */
+#define REG_FRAME_LENGTH_LINES		0x0340
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE			0x0601
+#define REG_VCM_NEW_CODE			0x30F2
+
+/*============================================================================
+							 TYPE DECLARATIONS
+============================================================================*/
+
+/* 16bit address - 8 bit context register structure */
+#define Q8	0x00000100
+#define Q10	0x00000400
+#define MT9E013_MASTER_CLK_RATE 24000000
+
+/* AF Total steps parameters */
+#define MT9E013_TOTAL_STEPS_NEAR_TO_FAR    32
+
+uint16_t mt9e013_step_position_table[MT9E013_TOTAL_STEPS_NEAR_TO_FAR+1];
+uint16_t mt9e013_nl_region_boundary1;
+uint16_t mt9e013_nl_region_code_per_step1;
+uint16_t mt9e013_l_region_code_per_step = 4;
+uint16_t mt9e013_damping_threshold = 10;
+uint16_t mt9e013_sw_damping_time_wait = 1;
+
+struct mt9e013_work_t {
+	struct work_struct work;
+};
+
+static struct mt9e013_work_t *mt9e013_sensorw;
+static struct i2c_client *mt9e013_client;
+
+struct mt9e013_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	uint16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum mt9e013_resolution_t prev_res;
+	enum mt9e013_resolution_t pict_res;
+	enum mt9e013_resolution_t curr_res;
+	enum mt9e013_test_mode_t  set_test;
+};
+
+
+static bool CSI_CONFIG;
+static struct mt9e013_ctrl_t *mt9e013_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(mt9e013_wait_queue);
+DEFINE_MUTEX(mt9e013_mut);
+
+static int cam_debug_init(void);
+static struct dentry *debugfs_base;
+/*=============================================================*/
+
+static int mt9e013_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(mt9e013_client->adapter, msgs, 2) < 0) {
+		CDBG("mt9e013_i2c_rxdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t mt9e013_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(mt9e013_client->adapter, msg, 1) < 0) {
+		CDBG("mt9e013_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9e013_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = mt9e013_i2c_rxdata(mt9e013_client->addr<<1, buf, rlen);
+	if (rc < 0) {
+		CDBG("mt9e013_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("mt9e013_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+	return rc;
+}
+
+static int32_t mt9e013_i2c_write_w_sensor(unsigned short waddr, uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
+	rc = mt9e013_i2c_txdata(mt9e013_client->addr<<1, buf, 4);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	}
+	return rc;
+}
+
+static int32_t mt9e013_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = mt9e013_i2c_txdata(mt9e013_client->addr<<1, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+
+static int32_t mt9e013_i2c_write_w_table(struct mt9e013_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = mt9e013_i2c_write_w_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static void mt9e013_group_hold_on(void)
+{
+	mt9e013_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD);
+}
+
+static void mt9e013_group_hold_off(void)
+{
+	mt9e013_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD_OFF);
+}
+
+static void mt9e013_start_stream(void)
+{
+	mt9e013_i2c_write_w_sensor(0x301A, 0x8250);
+	mt9e013_i2c_write_w_sensor(0x301A, 0x8650);
+	mt9e013_i2c_write_w_sensor(0x301A, 0x8658);
+	mt9e013_i2c_write_b_sensor(0x0104, 0x00);
+	mt9e013_i2c_write_w_sensor(0x301A, 0x065C);
+}
+
+static void mt9e013_stop_stream(void)
+{
+	mt9e013_i2c_write_w_sensor(0x301A, 0x0058);
+	mt9e013_i2c_write_w_sensor(0x301A, 0x0050);
+	mt9e013_i2c_write_b_sensor(0x0104, 0x01);
+}
+
+static void mt9e013_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider, d1, d2;
+
+	d1 = mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata
+		* 0x00000400/
+		mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
+	d2 = mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata
+		* 0x00000400/
+		mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
+	divider = d1 * d2 / 0x400;
+
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+	/* 2 is the ratio of no.of snapshot channels
+	to number of preview channels */
+}
+
+static uint16_t mt9e013_get_prev_lines_pf(void)
+{
+	if (mt9e013_ctrl->prev_res == QTR_SIZE)
+		return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->prev_res == FULL_SIZE)
+		return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->prev_res == HFR_60FPS)
+		return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->prev_res == HFR_90FPS)
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+	else
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+}
+
+static uint16_t mt9e013_get_prev_pixels_pl(void)
+{
+	if (mt9e013_ctrl->prev_res == QTR_SIZE)
+		return mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->prev_res == FULL_SIZE)
+		return mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->prev_res == HFR_60FPS)
+		return mt9e013_regs.reg_60fps[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->prev_res == HFR_90FPS)
+		return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
+	else
+		return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
+}
+
+static uint16_t mt9e013_get_pict_lines_pf(void)
+{
+	if (mt9e013_ctrl->pict_res == QTR_SIZE)
+		return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->pict_res == FULL_SIZE)
+		return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->pict_res == HFR_60FPS)
+		return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->pict_res == HFR_90FPS)
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+	else
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+}
+
+static uint16_t mt9e013_get_pict_pixels_pl(void)
+{
+	if (mt9e013_ctrl->pict_res == QTR_SIZE)
+		return mt9e013_regs.reg_prev[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->pict_res == FULL_SIZE)
+		return mt9e013_regs.reg_snap[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->pict_res == HFR_60FPS)
+		return mt9e013_regs.reg_60fps[E013_LINE_LENGTH_PCK].wdata;
+	else if (mt9e013_ctrl->pict_res == HFR_90FPS)
+		return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
+	else
+		return mt9e013_regs.reg_120fps[E013_LINE_LENGTH_PCK].wdata;
+}
+
+static uint32_t mt9e013_get_pict_max_exp_lc(void)
+{
+	if (mt9e013_ctrl->pict_res == QTR_SIZE)
+		return mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata
+			* 24;
+	else if (mt9e013_ctrl->pict_res == FULL_SIZE)
+		return mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata
+			* 24;
+	else if (mt9e013_ctrl->pict_res == HFR_60FPS)
+		return mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata
+			* 24;
+	else if (mt9e013_ctrl->pict_res == HFR_90FPS)
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata
+			* 24;
+	else
+		return mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata
+			* 24;
+}
+
+static int32_t mt9e013_set_fps(struct fps_cfg   *fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	if (mt9e013_ctrl->curr_res == QTR_SIZE)
+		total_lines_per_frame =
+		mt9e013_regs.reg_prev[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->curr_res == FULL_SIZE)
+		total_lines_per_frame =
+		mt9e013_regs.reg_snap[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->curr_res == HFR_60FPS)
+		total_lines_per_frame =
+		mt9e013_regs.reg_60fps[E013_FRAME_LENGTH_LINES].wdata;
+	else if (mt9e013_ctrl->curr_res == HFR_90FPS)
+		total_lines_per_frame =
+		mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+	else
+		total_lines_per_frame =
+		mt9e013_regs.reg_120fps[E013_FRAME_LENGTH_LINES].wdata;
+
+	mt9e013_ctrl->fps_divider = fps->fps_div;
+	mt9e013_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	if (mt9e013_ctrl->curr_res == FULL_SIZE) {
+		total_lines_per_frame = (uint16_t)
+		(total_lines_per_frame * mt9e013_ctrl->pict_fps_divider/0x400);
+	} else {
+		total_lines_per_frame = (uint16_t)
+		(total_lines_per_frame * mt9e013_ctrl->fps_divider/0x400);
+	}
+
+	mt9e013_group_hold_on();
+	rc = mt9e013_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES,
+							total_lines_per_frame);
+	mt9e013_group_hold_off();
+	return rc;
+}
+
+static int32_t mt9e013_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0xE7F;
+	int32_t rc = 0;
+	if (gain > max_legal_gain) {
+		CDBG("Max legal gain Line:%d\n", __LINE__);
+		gain = max_legal_gain;
+	}
+
+	if (mt9e013_ctrl->curr_res != FULL_SIZE) {
+		mt9e013_ctrl->my_reg_gain = gain;
+		mt9e013_ctrl->my_reg_line_count = (uint16_t) line;
+		line = (uint32_t) (line * mt9e013_ctrl->fps_divider /
+						   0x00000400);
+	} else {
+		line = (uint32_t) (line * mt9e013_ctrl->pict_fps_divider /
+						   0x00000400);
+	}
+
+	gain |= 0x1000;
+
+	mt9e013_group_hold_on();
+	rc = mt9e013_i2c_write_w_sensor(REG_GLOBAL_GAIN, gain);
+	rc = mt9e013_i2c_write_w_sensor(REG_COARSE_INTEGRATION_TIME, line);
+	mt9e013_group_hold_off();
+	return rc;
+}
+
+static int32_t mt9e013_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = mt9e013_write_exp_gain(gain, line);
+	mt9e013_i2c_write_w_sensor(0x301A, 0x065C|0x2);
+	return rc;
+}
+
+#define DIV_CEIL(x, y) (x/y + (x%y) ? 1 : 0)
+
+static int32_t mt9e013_move_focus(int direction,
+	int32_t num_steps)
+{
+	int16_t step_direction, dest_lens_position, dest_step_position;
+	int16_t target_dist, small_step, next_lens_position;
+	if (direction == MOVE_NEAR)
+		step_direction = 1;
+	else
+		step_direction = -1;
+
+	dest_step_position = mt9e013_ctrl->curr_step_pos
+						+ (step_direction * num_steps);
+
+	if (dest_step_position < 0)
+		dest_step_position = 0;
+	else if (dest_step_position > MT9E013_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = MT9E013_TOTAL_STEPS_NEAR_TO_FAR;
+
+	if (dest_step_position == mt9e013_ctrl->curr_step_pos)
+		return 0;
+
+	dest_lens_position = mt9e013_step_position_table[dest_step_position];
+	target_dist = step_direction *
+		(dest_lens_position - mt9e013_ctrl->curr_lens_pos);
+
+	if (step_direction < 0 && (target_dist >=
+		mt9e013_step_position_table[mt9e013_damping_threshold])) {
+		small_step = DIV_CEIL(target_dist, 10);
+		mt9e013_sw_damping_time_wait = 10;
+	} else {
+		small_step = DIV_CEIL(target_dist, 4);
+		mt9e013_sw_damping_time_wait = 4;
+	}
+
+	for (next_lens_position = mt9e013_ctrl->curr_lens_pos
+		+ (step_direction * small_step);
+		(step_direction * next_lens_position) <=
+		(step_direction * dest_lens_position);
+		next_lens_position += (step_direction * small_step)) {
+		mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE,
+		next_lens_position);
+		mt9e013_ctrl->curr_lens_pos = next_lens_position;
+		usleep(mt9e013_sw_damping_time_wait*50);
+	}
+
+	if (mt9e013_ctrl->curr_lens_pos != dest_lens_position) {
+		mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE,
+		dest_lens_position);
+		usleep(mt9e013_sw_damping_time_wait*50);
+	}
+	mt9e013_ctrl->curr_lens_pos = dest_lens_position;
+	mt9e013_ctrl->curr_step_pos = dest_step_position;
+	return 0;
+}
+
+static int32_t mt9e013_set_default_focus(uint8_t af_step)
+{
+	int32_t rc = 0;
+	if (mt9e013_ctrl->curr_step_pos != 0) {
+		rc = mt9e013_move_focus(MOVE_FAR,
+		mt9e013_ctrl->curr_step_pos);
+	} else {
+		mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, 0x00);
+	}
+
+	mt9e013_ctrl->curr_lens_pos = 0;
+	mt9e013_ctrl->curr_step_pos = 0;
+
+	return rc;
+}
+
+static void mt9e013_init_focus(void)
+{
+	uint8_t i;
+	mt9e013_step_position_table[0] = 0;
+	for (i = 1; i <= MT9E013_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+		if (i <= mt9e013_nl_region_boundary1) {
+			mt9e013_step_position_table[i] =
+				mt9e013_step_position_table[i-1]
+				+ mt9e013_nl_region_code_per_step1;
+		} else {
+			mt9e013_step_position_table[i] =
+				mt9e013_step_position_table[i-1]
+				+ mt9e013_l_region_code_per_step;
+		}
+
+		if (mt9e013_step_position_table[i] > 255)
+			mt9e013_step_position_table[i] = 255;
+	}
+}
+
+static int32_t mt9e013_test(enum mt9e013_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
+		1: Bypass Signal Processing
+		REG_0x30D8[5] is EBDMASK: 0:
+		Output Embedded data, 1: No output embedded data */
+		if (mt9e013_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static int32_t mt9e013_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	struct msm_camera_csi_params mt9e013_csi_params;
+	uint8_t stored_af_step = 0;
+	CDBG("sensor_settings\n");
+	stored_af_step = mt9e013_ctrl->curr_step_pos;
+	mt9e013_set_default_focus(0);
+	mt9e013_stop_stream();
+	msleep(15);
+	if (update_type == REG_INIT) {
+		mt9e013_i2c_write_w_table(mt9e013_regs.reg_mipi,
+			mt9e013_regs.reg_mipi_size);
+		mt9e013_i2c_write_w_table(mt9e013_regs.rec_settings,
+			mt9e013_regs.rec_size);
+		cam_debug_init();
+		CSI_CONFIG = 0;
+	} else if (update_type == UPDATE_PERIODIC) {
+		if (rt == QTR_SIZE) {
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll,
+				mt9e013_regs.reg_pll_size);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_prev,
+				mt9e013_regs.reg_prev_size);
+		} else if (rt == FULL_SIZE) {
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll,
+				mt9e013_regs.reg_pll_size);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_snap,
+				mt9e013_regs.reg_snap_size);
+		} else if (rt == HFR_60FPS) {
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
+				mt9e013_regs.reg_pll_120fps_size);
+			mt9e013_i2c_write_w_sensor(0x0306, 0x0029);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
+				mt9e013_regs.reg_120fps_size);
+		} else if (rt == HFR_90FPS) {
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
+				mt9e013_regs.reg_pll_120fps_size);
+			mt9e013_i2c_write_w_sensor(0x0306, 0x003D);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
+				mt9e013_regs.reg_120fps_size);
+		} else if (rt == HFR_120FPS) {
+			msm_camio_vfe_clk_rate_set(266667000);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_pll_120fps,
+				mt9e013_regs.reg_pll_120fps_size);
+			mt9e013_i2c_write_w_table(mt9e013_regs.reg_120fps,
+				mt9e013_regs.reg_120fps_size);
+		}
+		if (!CSI_CONFIG) {
+			msm_camio_vfe_clk_rate_set(192000000);
+			mt9e013_csi_params.data_format = CSI_10BIT;
+			mt9e013_csi_params.lane_cnt = 2;
+			mt9e013_csi_params.lane_assign = 0xe4;
+			mt9e013_csi_params.dpcm_scheme = 0;
+			mt9e013_csi_params.settle_cnt = 0x18;
+			rc = msm_camio_csi_config(&mt9e013_csi_params);
+			msleep(10);
+			CSI_CONFIG = 1;
+		}
+		mt9e013_move_focus(MOVE_NEAR, stored_af_step);
+		mt9e013_start_stream();
+	}
+	return rc;
+}
+
+static int32_t mt9e013_video_config(int mode)
+{
+
+	int32_t rc = 0;
+
+	CDBG("video config\n");
+	/* change sensor resolution if needed */
+	if (mt9e013_sensor_setting(UPDATE_PERIODIC,
+			mt9e013_ctrl->prev_res) < 0)
+		return rc;
+	if (mt9e013_ctrl->set_test) {
+		if (mt9e013_test(mt9e013_ctrl->set_test) < 0)
+			return  rc;
+	}
+
+	mt9e013_ctrl->curr_res = mt9e013_ctrl->prev_res;
+	mt9e013_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t mt9e013_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/*change sensor resolution if needed */
+	if (mt9e013_ctrl->curr_res != mt9e013_ctrl->pict_res) {
+		if (mt9e013_sensor_setting(UPDATE_PERIODIC,
+				mt9e013_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	mt9e013_ctrl->curr_res = mt9e013_ctrl->pict_res;
+	mt9e013_ctrl->sensormode = mode;
+	return rc;
+} /*end of mt9e013_snapshot_config*/
+
+static int32_t mt9e013_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/* change sensor resolution if needed */
+	if (mt9e013_ctrl->curr_res != mt9e013_ctrl->pict_res) {
+		if (mt9e013_sensor_setting(UPDATE_PERIODIC,
+				mt9e013_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	mt9e013_ctrl->curr_res = mt9e013_ctrl->pict_res;
+	mt9e013_ctrl->sensormode = mode;
+	return rc;
+} /*end of mt9e013_raw_snapshot_config*/
+
+static int32_t mt9e013_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+	case SENSOR_HFR_60FPS_MODE:
+	case SENSOR_HFR_90FPS_MODE:
+	case SENSOR_HFR_120FPS_MODE:
+		mt9e013_ctrl->prev_res = res;
+		rc = mt9e013_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		mt9e013_ctrl->pict_res = res;
+		rc = mt9e013_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		mt9e013_ctrl->pict_res = res;
+		rc = mt9e013_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t mt9e013_power_down(void)
+{
+	return 0;
+}
+
+static int mt9e013_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	CDBG("probe done\n");
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int mt9e013_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	uint16_t chipid = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "mt9e013");
+	CDBG(" mt9e013_probe_init_sensor\n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(10);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		msleep(10);
+	} else {
+		goto init_probe_done;
+	}
+
+	CDBG(" mt9e013_probe_init_sensor is called\n");
+	rc = mt9e013_i2c_read(0x0000, &chipid, 2);
+	CDBG("ID: %d\n", chipid);
+	/* 4. Compare sensor ID to MT9E013 ID: */
+	if (chipid != 0x4B00) {
+		rc = -ENODEV;
+		CDBG("mt9e013_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+
+	mt9e013_ctrl = kzalloc(sizeof(struct mt9e013_ctrl_t), GFP_KERNEL);
+	if (!mt9e013_ctrl) {
+		CDBG("mt9e013_init failed!\n");
+		rc = -ENOMEM;
+	}
+	mt9e013_ctrl->fps_divider = 1 * 0x00000400;
+	mt9e013_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9e013_ctrl->set_test = TEST_OFF;
+	mt9e013_ctrl->prev_res = QTR_SIZE;
+	mt9e013_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9e013_ctrl->sensordata = data;
+
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" mt9e013_probe_init_sensor fails\n");
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	mt9e013_probe_init_done(data);
+init_probe_done:
+	CDBG(" mt9e013_probe_init_sensor finishes\n");
+	return rc;
+}
+/* camsensor_mt9e013_reset */
+
+int mt9e013_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling mt9e013_sensor_open_init\n");
+
+	mt9e013_ctrl = kzalloc(sizeof(struct mt9e013_ctrl_t), GFP_KERNEL);
+	if (!mt9e013_ctrl) {
+		CDBG("mt9e013_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	mt9e013_ctrl->fps_divider = 1 * 0x00000400;
+	mt9e013_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9e013_ctrl->set_test = TEST_OFF;
+	mt9e013_ctrl->prev_res = QTR_SIZE;
+	mt9e013_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9e013_ctrl->sensordata = data;
+	if (rc < 0) {
+		CDBG("Calling mt9e013_sensor_open_init fail1\n");
+		return rc;
+	}
+	CDBG("%s: %d\n", __func__, __LINE__);
+	/* enable mclk first */
+	msm_camio_clk_rate_set(MT9E013_MASTER_CLK_RATE);
+	rc = mt9e013_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+
+	CDBG("init settings\n");
+	rc = mt9e013_sensor_setting(REG_INIT, mt9e013_ctrl->prev_res);
+	mt9e013_ctrl->fps = 30*Q8;
+	mt9e013_init_focus();
+	if (rc < 0) {
+		gpio_set_value_cansleep(data->sensor_reset, 0);
+		goto init_fail;
+	} else
+		goto init_done;
+init_fail:
+	CDBG("init_fail\n");
+	mt9e013_probe_init_done(data);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+} /*endof mt9e013_sensor_open_init*/
+
+static int mt9e013_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9e013_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id mt9e013_i2c_id[] = {
+	{"mt9e013", 0},
+	{ }
+};
+
+static int mt9e013_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("mt9e013_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	mt9e013_sensorw = kzalloc(sizeof(struct mt9e013_work_t), GFP_KERNEL);
+	if (!mt9e013_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9e013_sensorw);
+	mt9e013_init_client(client);
+	mt9e013_client = client;
+
+
+	CDBG("mt9e013_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("mt9e013_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int mt9e013_send_wb_info(struct wb_info_cfg *wb)
+{
+	return 0;
+
+} /*end of mt9e013_snapshot_config*/
+
+static int __exit mt9e013_remove(struct i2c_client *client)
+{
+	struct mt9e013_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	mt9e013_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver mt9e013_i2c_driver = {
+	.id_table = mt9e013_i2c_id,
+	.probe  = mt9e013_i2c_probe,
+	.remove = __exit_p(mt9e013_i2c_remove),
+	.driver = {
+		.name = "mt9e013",
+	},
+};
+
+int mt9e013_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&mt9e013_mut);
+	CDBG("mt9e013_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+		switch (cdata.cfgtype) {
+		case CFG_GET_PICT_FPS:
+			mt9e013_get_pict_fps(
+				cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_L_PF:
+			cdata.cfg.prevl_pf =
+			mt9e013_get_prev_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_P_PL:
+			cdata.cfg.prevp_pl =
+				mt9e013_get_prev_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_L_PF:
+			cdata.cfg.pictl_pf =
+				mt9e013_get_pict_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_P_PL:
+			cdata.cfg.pictp_pl =
+				mt9e013_get_pict_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_MAX_EXP_LC:
+			cdata.cfg.pict_max_exp_lc =
+				mt9e013_get_pict_max_exp_lc();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_FPS:
+		case CFG_SET_PICT_FPS:
+			rc = mt9e013_set_fps(&(cdata.cfg.fps));
+			break;
+
+		case CFG_SET_EXP_GAIN:
+			rc =
+				mt9e013_write_exp_gain(
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_PICT_EXP_GAIN:
+			rc =
+				mt9e013_set_pict_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_MODE:
+			rc = mt9e013_set_sensor_mode(cdata.mode,
+					cdata.rs);
+			break;
+
+		case CFG_PWR_DOWN:
+			rc = mt9e013_power_down();
+			break;
+
+		case CFG_MOVE_FOCUS:
+			rc =
+				mt9e013_move_focus(
+				cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_DEFAULT_FOCUS:
+			rc =
+				mt9e013_set_default_focus(
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_GET_AF_MAX_STEPS:
+			cdata.max_steps = MT9E013_TOTAL_STEPS_NEAR_TO_FAR;
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_EFFECT:
+			rc = mt9e013_set_default_focus(
+				cdata.cfg.effect);
+			break;
+
+
+		case CFG_SEND_WB_INFO:
+			rc = mt9e013_send_wb_info(
+				&(cdata.cfg.wb_info));
+			break;
+
+		default:
+			rc = -EFAULT;
+			break;
+		}
+
+	mutex_unlock(&mt9e013_mut);
+
+	return rc;
+}
+
+static int mt9e013_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&mt9e013_mut);
+	mt9e013_power_down();
+	gpio_set_value_cansleep(mt9e013_ctrl->sensordata->sensor_reset, 0);
+	msleep(5);
+	gpio_free(mt9e013_ctrl->sensordata->sensor_reset);
+	kfree(mt9e013_ctrl);
+	mt9e013_ctrl = NULL;
+	CDBG("mt9e013_release completed\n");
+	mutex_unlock(&mt9e013_mut);
+
+	return rc;
+}
+
+static int mt9e013_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&mt9e013_i2c_driver);
+	if (rc < 0 || mt9e013_client == NULL) {
+		rc = -ENOTSUPP;
+		CDBG("I2C add driver failed");
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(MT9E013_MASTER_CLK_RATE);
+	rc = mt9e013_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = mt9e013_sensor_open_init;
+	s->s_release = mt9e013_sensor_release;
+	s->s_config  = mt9e013_sensor_config;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	mt9e013_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("mt9e013_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __mt9e013_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9e013_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9e013_probe,
+	.driver = {
+		.name = "msm_camera_mt9e013",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init mt9e013_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9e013_init);
+void mt9e013_exit(void)
+{
+	i2c_del_driver(&mt9e013_i2c_driver);
+}
+MODULE_DESCRIPTION("Aptina 8 MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
+static bool streaming = 1;
+
+static int mt9e013_focus_test(void *data, u64 *val)
+{
+	int i = 0;
+	mt9e013_set_default_focus(0);
+
+	for (i = 90; i < 256; i++) {
+		mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, i);
+		msleep(5000);
+	}
+	msleep(5000);
+	for (i = 255; i > 90; i--) {
+		mt9e013_i2c_write_w_sensor(REG_VCM_NEW_CODE, i);
+		msleep(5000);
+	}
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_focus, mt9e013_focus_test,
+			NULL, "%lld\n");
+
+static int mt9e013_step_test(void *data, u64 *val)
+{
+	int i = 0;
+	mt9e013_set_default_focus(0);
+
+	for (i = 0; i < MT9E013_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+		mt9e013_move_focus(MOVE_NEAR, 1);
+		msleep(5000);
+	}
+
+	mt9e013_move_focus(MOVE_FAR, MT9E013_TOTAL_STEPS_NEAR_TO_FAR);
+	msleep(5000);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_step, mt9e013_step_test,
+			NULL, "%lld\n");
+
+static int cam_debug_stream_set(void *data, u64 val)
+{
+	int rc = 0;
+
+	if (val) {
+		mt9e013_start_stream();
+		streaming = 1;
+	} else {
+		mt9e013_stop_stream();
+		streaming = 0;
+	}
+
+	return rc;
+}
+
+static int cam_debug_stream_get(void *data, u64 *val)
+{
+	*val = streaming;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cam_stream, cam_debug_stream_get,
+			cam_debug_stream_set, "%llu\n");
+
+
+static int cam_debug_init(void)
+{
+	struct dentry *cam_dir;
+	debugfs_base = debugfs_create_dir("sensor", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	cam_dir = debugfs_create_dir("mt9e013", debugfs_base);
+	if (!cam_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("focus", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_focus))
+		return -ENOMEM;
+	if (!debugfs_create_file("step", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_step))
+		return -ENOMEM;
+	if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_stream))
+		return -ENOMEM;
+
+	return 0;
+}
+
+
+
diff --git a/drivers/media/video/msm/mt9e013.h b/drivers/media/video/msm/mt9e013.h
new file mode 100644
index 0000000..9052a35
--- /dev/null
+++ b/drivers/media/video/msm/mt9e013.h
@@ -0,0 +1,174 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9E013_H
+#define MT9E013_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct mt9e013_reg mt9e013_regs;
+struct reg_struct_init {
+	uint8_t reg_0x0112;      /* 0x0112*/
+	uint8_t reg_0x0113;      /* 0x0113*/
+	uint8_t vt_pix_clk_div;  /* 0x0301*/
+	uint8_t pre_pll_clk_div; /* 0x0305*/
+	uint8_t pll_multiplier;  /* 0x0307*/
+	uint8_t op_pix_clk_div;  /* 0x0309*/
+	uint8_t reg_0x3030;      /*0x3030*/
+	uint8_t reg_0x0111;      /*0x0111*/
+	uint8_t reg_0x0b00;      /*0x0b00*/
+	uint8_t reg_0x3001;      /*0x3001*/
+	uint8_t reg_0x3004;      /*0x3004*/
+	uint8_t reg_0x3007;      /*0x3007*/
+	uint8_t reg_0x3016;      /*0x3016*/
+	uint8_t reg_0x301d;      /*0x301d*/
+	uint8_t reg_0x317e;      /*0x317E*/
+	uint8_t reg_0x317f;      /*0x317F*/
+	uint8_t reg_0x3400;      /*0x3400*/
+	uint8_t reg_0x0b06;      /*0x0b06*/
+	uint8_t reg_0x0b07;      /*0x0b07*/
+	uint8_t reg_0x0b08;      /*0x0b08*/
+	uint8_t reg_0x0b09;      /*0x0b09*/
+	uint8_t reg_0x0136;
+	uint8_t reg_0x0137;
+	/* Edof */
+	uint8_t reg_0x0b83;      /*0x0b83*/
+	uint8_t reg_0x0b84;      /*0x0b84*/
+	uint8_t reg_0x0b85;      /*0x0b85*/
+	uint8_t reg_0x0b88;      /*0x0b88*/
+	uint8_t reg_0x0b89;      /*0x0b89*/
+	uint8_t reg_0x0b8a;      /*0x0b8a*/
+	};
+struct reg_struct {
+	uint8_t coarse_integration_time_hi; /*REG_COARSE_INTEGRATION_TIME_HI*/
+	uint8_t coarse_integration_time_lo; /*REG_COARSE_INTEGRATION_TIME_LO*/
+	uint8_t analogue_gain_code_global;
+	uint8_t frame_length_lines_hi; /* 0x0340*/
+	uint8_t frame_length_lines_lo; /* 0x0341*/
+	uint8_t line_length_pck_hi;    /* 0x0342*/
+	uint8_t line_length_pck_lo;    /* 0x0343*/
+	uint8_t reg_0x3005;   /* 0x3005*/
+	uint8_t reg_0x3010;  /* 0x3010*/
+	uint8_t reg_0x3011;  /* 0x3011*/
+	uint8_t reg_0x301a;  /* 0x301a*/
+	uint8_t reg_0x3035;  /* 0x3035*/
+	uint8_t reg_0x3036;   /* 0x3036*/
+	uint8_t reg_0x3041;  /*0x3041*/
+	uint8_t reg_0x3042;  /*0x3042*/
+	uint8_t reg_0x3045;  /*0x3045*/
+	uint8_t reg_0x0b80;   /* 0x0b80*/
+	uint8_t reg_0x0900;   /*0x0900*/
+	uint8_t reg_0x0901;   /* 0x0901*/
+	uint8_t reg_0x0902;   /*0x0902*/
+	uint8_t reg_0x0383;   /*0x0383*/
+	uint8_t reg_0x0387;   /* 0x0387*/
+	uint8_t reg_0x034c;   /* 0x034c*/
+	uint8_t reg_0x034d;   /*0x034d*/
+	uint8_t reg_0x034e;   /* 0x034e*/
+	uint8_t reg_0x034f;   /* 0x034f*/
+	uint8_t reg_0x1716; /*0x1716*/
+	uint8_t reg_0x1717; /*0x1717*/
+	uint8_t reg_0x1718; /*0x1718*/
+	uint8_t reg_0x1719; /*0x1719*/
+	uint8_t reg_0x3210;/*0x3210*/
+	uint8_t reg_0x111; /*0x111*/
+	uint8_t reg_0x3410;  /*0x3410*/
+	uint8_t reg_0x3098;
+	uint8_t reg_0x309D;
+	uint8_t reg_0x0200;
+	uint8_t reg_0x0201;
+	};
+struct mt9e013_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+enum mt9e013_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9e013_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	HFR_60FPS,
+	HFR_90FPS,
+	HFR_120FPS,
+	INVALID_SIZE
+};
+enum mt9e013_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum mt9e013_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum mt9e013_reg_pll {
+	E013_VT_PIX_CLK_DIV,
+	E013_VT_SYS_CLK_DIV,
+	E013_PRE_PLL_CLK_DIV,
+	E013_PLL_MULTIPLIER,
+	E013_OP_PIX_CLK_DIV,
+	E013_OP_SYS_CLK_DIV
+};
+
+enum mt9e013_reg_mode {
+	E013_X_ADDR_START,
+	E013_X_ADDR_END,
+	E013_Y_ADDR_START,
+	E013_Y_ADDR_END,
+	E013_X_OUTPUT_SIZE,
+	E013_Y_OUTPUT_SIZE,
+	E013_DATAPATH_SELECT,
+	E013_READ_MODE,
+	E013_ANALOG_CONTROL5,
+	E013_DAC_LD_4_5,
+	E013_SCALING_MODE,
+	E013_SCALE_M,
+	E013_LINE_LENGTH_PCK,
+	E013_FRAME_LENGTH_LINES,
+	E013_COARSE_INTEGRATION_TIME,
+	E013_FINE_INTEGRATION_TIME,
+	E013_FINE_CORRECTION
+};
+
+struct mt9e013_reg {
+	const struct mt9e013_i2c_reg_conf *reg_mipi;
+	const unsigned short reg_mipi_size;
+	const struct mt9e013_i2c_reg_conf *rec_settings;
+	const unsigned short rec_size;
+	const struct mt9e013_i2c_reg_conf *reg_pll;
+	const unsigned short reg_pll_size;
+	const struct mt9e013_i2c_reg_conf *reg_pll_60fps;
+	const unsigned short reg_pll_60fps_size;
+	const struct mt9e013_i2c_reg_conf *reg_pll_120fps;
+	const unsigned short reg_pll_120fps_size;
+	const struct mt9e013_i2c_reg_conf *reg_prev;
+	const unsigned short reg_prev_size;
+	const struct mt9e013_i2c_reg_conf *reg_snap;
+	const unsigned short reg_snap_size;
+	const struct mt9e013_i2c_reg_conf *reg_60fps;
+	const unsigned short reg_60fps_size;
+	const struct mt9e013_i2c_reg_conf *reg_120fps;
+	const unsigned short reg_120fps_size;
+};
+#endif /* MT9E013_H */
diff --git a/drivers/media/video/msm/mt9e013_reg.c b/drivers/media/video/msm/mt9e013_reg.c
new file mode 100644
index 0000000..9a4bd7e
--- /dev/null
+++ b/drivers/media/video/msm/mt9e013_reg.c
@@ -0,0 +1,234 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "mt9e013.h"
+
+static struct mt9e013_i2c_reg_conf mipi_settings[] = {
+	/*Disable embedded data*/
+	{0x3064, 0x7800},/*SMIA_TEST*/
+	/*configure 2-lane MIPI*/
+	{0x31AE, 0x0202},/*SERIAL_FORMAT*/
+	{0x31B8, 0x0E3F},/*MIPI_TIMING_2*/
+	/*set data to RAW10 format*/
+	{0x0112, 0x0A0A},/*CCP_DATA_FORMAT*/
+	{0x30F0, 0x8000},/*VCM CONTROL*/
+};
+
+/*PLL Configuration
+(Ext=24MHz, vt_pix_clk=174MHz, op_pix_clk=69.6MHz)*/
+static struct mt9e013_i2c_reg_conf pll_settings[] = {
+	{0x0300, 0x0004},/*VT_PIX_CLK_DIV*/
+	{0x0302, 0x0001},/*VT_SYS_CLK_DIV*/
+	{0x0304, 0x0002},/*PRE_PLL_CLK_DIV*/
+	{0x0306, 0x003A},/*PLL_MULTIPLIER*/
+	{0x0308, 0x000A},/*OP_PIX_CLK_DIV*/
+	{0x030A, 0x0001},/*OP_SYS_CLK_DIV*/
+};
+
+static struct mt9e013_i2c_reg_conf prev_settings[] = {
+	/*Output Size (1632x1224)*/
+	{0x0344, 0x0008},/*X_ADDR_START*/
+	{0x0348, 0x0CC9},/*X_ADDR_END*/
+	{0x0346, 0x0008},/*Y_ADDR_START*/
+	{0x034A, 0x0999},/*Y_ADDR_END*/
+	{0x034C, 0x0660},/*X_OUTPUT_SIZE*/
+	{0x034E, 0x04C8},/*Y_OUTPUT_SIZE*/
+	{0x306E, 0xFCB0},/*DATAPATH_SELECT*/
+	{0x3040, 0x04C3},/*READ_MODE*/
+	{0x3178, 0x0000},/*ANALOG_CONTROL5*/
+	{0x3ED0, 0x1E24},/*DAC_LD_4_5*/
+	{0x0400, 0x0002},/*SCALING_MODE*/
+	{0x0404, 0x0010},/*SCALE_M*/
+	/*Timing configuration*/
+	{0x0342, 0x1018},/*LINE_LENGTH_PCK*/
+	{0x0340, 0x055B},/*FRAME_LENGTH_LINES*/
+	{0x0202, 0x0557},/*COARSE_INTEGRATION_TIME*/
+	{0x3014, 0x0846},/*FINE_INTEGRATION_TIME_*/
+	{0x3010, 0x0130},/*FINE_CORRECTION*/
+};
+
+static struct mt9e013_i2c_reg_conf snap_settings[] = {
+	/*Output Size (3264x2448)*/
+	{0x0344, 0x0008},/*X_ADDR_START */
+	{0x0348, 0x0CD7},/*X_ADDR_END*/
+	{0x0346, 0x0008},/*Y_ADDR_START */
+	{0x034A, 0x09A7},/*Y_ADDR_END*/
+	{0x034C, 0x0CD0},/*X_OUTPUT_SIZE*/
+	{0x034E, 0x09A0},/*Y_OUTPUT_SIZE*/
+	{0x306E, 0xFC80},/*DATAPATH_SELECT*/
+	{0x3040, 0x0041},/*READ_MODE*/
+	{0x3178, 0x0000},/*ANALOG_CONTROL5*/
+	{0x3ED0, 0x1E24},/*DAC_LD_4_5*/
+	{0x0400, 0x0000},/*SCALING_MODE*/
+	{0x0404, 0x0010},/*SCALE_M*/
+	/*Timing configuration*/
+	{0x0342, 0x13F8},/*LINE_LENGTH_PCK*/
+	{0x0340, 0x0A2F},/*FRAME_LENGTH_LINES*/
+	{0x0202, 0x0A1F},/*COARSE_INTEGRATION_TIME*/
+	{0x3014, 0x03F6},/*FINE_INTEGRATION_TIME_ */
+	{0x3010, 0x0078},/*FINE_CORRECTION*/
+};
+
+static struct mt9e013_i2c_reg_conf pll_settings_60fps[] = {
+	{0x0300, 0x0004},/*VT_PIX_CLK_DIV*/
+	{0x0302, 0x0001},/*VT_SYS_CLK_DIV*/
+	{0x0304, 0x0002},/*PRE_PLL_CLK_DIV*/
+	{0x0306, 0x0042},/*PLL_MULTIPLIER*/
+	{0x0308, 0x000A},/*OP_PIX_CLK_DIV*/
+	{0x030A, 0x0001},/*OP_SYS_CLK_DIV*/
+};
+
+static struct mt9e013_i2c_reg_conf prev_settings_60fps[] = {
+	/*Output Size (1632x1224)*/
+	{0x0344, 0x0008},/*X_ADDR_START*/
+	{0x0348, 0x0CC5},/*X_ADDR_END*/
+	{0x0346, 0x013a},/*Y_ADDR_START*/
+	{0x034A, 0x0863},/*Y_ADDR_END*/
+	{0x034C, 0x0660},/*X_OUTPUT_SIZE*/
+	{0x034E, 0x0396},/*Y_OUTPUT_SIZE*/
+	{0x306E, 0xFC80},/*DATAPATH_SELECT*/
+	{0x3040, 0x00C3},/*READ_MODE*/
+	{0x3178, 0x0000},/*ANALOG_CONTROL5*/
+	{0x3ED0, 0x1E24},/*DAC_LD_4_5*/
+	{0x0400, 0x0000},/*SCALING_MODE*/
+	{0x0404, 0x0010},/*SCALE_M*/
+	/*Timing configuration*/
+	{0x0342, 0x0BE8},/*LINE_LENGTH_PCK*/
+	{0x0340, 0x0425},/*FRAME_LENGTH_LINES*/
+	{0x0202, 0x0425},/*COARSE_INTEGRATION_TIME*/
+	{0x3014, 0x03F6},/*FINE_INTEGRATION_TIME_*/
+	{0x3010, 0x0078},/*FINE_CORRECTION*/
+};
+
+static struct mt9e013_i2c_reg_conf pll_settings_120fps[] = {
+	{0x0300, 0x0005},/*VT_PIX_CLK_DIV*/
+	{0x0302, 0x0001},/*VT_SYS_CLK_DIV*/
+	{0x0304, 0x0002},/*PRE_PLL_CLK_DIV*/
+	{0x0306, 0x0052},/*PLL_MULTIPLIER*/
+	{0x0308, 0x000A},/*OP_PIX_CLK_DIV*/
+	{0x030A, 0x0001},/*OP_SYS_CLK_DIV*/
+};
+
+static struct mt9e013_i2c_reg_conf prev_settings_120fps[] = {
+	{0x0344, 0x0008},/*X_ADDR_START*/
+	{0x0348, 0x0685},/*X_ADDR_END*/
+	{0x0346, 0x013a},/*Y_ADDR_START*/
+	{0x034A, 0x055B},/*Y_ADDR_END*/
+	{0x034C, 0x0340},/*X_OUTPUT_SIZE*/
+	{0x034E, 0x0212},/*Y_OUTPUT_SIZE*/
+	{0x306E, 0xFC80},/*DATAPATH_SELECT*/
+	{0x3040, 0x00C3},/*READ_MODE*/
+	{0x3178, 0x0000},/*ANALOG_CONTROL5*/
+	{0x3ED0, 0x1E24},/*DAC_LD_4_5*/
+	{0x0400, 0x0000},/*SCALING_MODE*/
+	{0x0404, 0x0010},/*SCALE_M*/
+	/*Timing configuration*/
+	{0x0342, 0x0970},/*LINE_LENGTH_PCK*/
+	{0x0340, 0x02A1},/*FRAME_LENGTH_LINES*/
+	{0x0202, 0x02A1},/*COARSE_INTEGRATION_TIME*/
+	{0x3014, 0x03F6},/*FINE_INTEGRATION_TIME_*/
+	{0x3010, 0x0078},/*FINE_CORRECTION*/
+};
+
+static struct mt9e013_i2c_reg_conf recommend_settings[] = {
+	{0x3044, 0x0590},
+	{0x306E, 0xFC80},
+	{0x30B2, 0xC000},
+	{0x30D6, 0x0800},
+	{0x316C, 0xB42F},
+	{0x316E, 0x869C},
+	{0x3170, 0x210E},
+	{0x317A, 0x010E},
+	{0x31E0, 0x1FB9},
+	{0x31E6, 0x07FC},
+	{0x37C0, 0x0000},
+	{0x37C2, 0x0000},
+	{0x37C4, 0x0000},
+	{0x37C6, 0x0000},
+	{0x3E02, 0x8801},
+	{0x3E04, 0x2301},
+	{0x3E06, 0x8449},
+	{0x3E08, 0x6841},
+	{0x3E0A, 0x400C},
+	{0x3E0C, 0x1001},
+	{0x3E0E, 0x2103},
+	{0x3E10, 0x4B41},
+	{0x3E12, 0x4B26},
+	{0x3E16, 0x8802},
+	{0x3E18, 0x84FF},
+	{0x3E1A, 0x8601},
+	{0x3E1C, 0x8401},
+	{0x3E1E, 0x840A},
+	{0x3E20, 0xFF00},
+	{0x3E22, 0x8401},
+	{0x3E24, 0x00FF},
+	{0x3E26, 0x0088},
+	{0x3E28, 0x2E8A},
+	{0x3E32, 0x8801},
+	{0x3E34, 0x4024},
+	{0x3E38, 0x8469},
+	{0x3E3C, 0x2301},
+	{0x3E3E, 0x3E25},
+	{0x3E40, 0x1C01},
+	{0x3E42, 0x8486},
+	{0x3E44, 0x8401},
+	{0x3E46, 0x00FF},
+	{0x3E48, 0x8401},
+	{0x3E4A, 0x8601},
+	{0x3E4C, 0x8402},
+	{0x3E4E, 0x00FF},
+	{0x3E50, 0x6623},
+	{0x3E52, 0x8340},
+	{0x3E54, 0x00FF},
+	{0x3E56, 0x4A42},
+	{0x3E58, 0x2203},
+	{0x3E5A, 0x674D},
+	{0x3E5C, 0x3F25},
+	{0x3E5E, 0x846A},
+	{0x3E60, 0x4C01},
+	{0x3E62, 0x8401},
+	{0x3E66, 0x3901},
+	{0x3ECC, 0x00EB},
+	{0x3ED0, 0x1E24},
+	{0x3ED4, 0xAFC4},
+	{0x3ED6, 0x909B},
+	{0x3ED8, 0x0006},
+	{0x3EDA, 0xCFC6},
+	{0x3EDC, 0x4FE4},
+	{0x3EE0, 0x2424},
+	{0x3EE2, 0x9797},
+	{0x3EE4, 0xC100},
+	{0x3EE6, 0x0540}
+};
+
+struct mt9e013_reg mt9e013_regs = {
+	.reg_mipi = &mipi_settings[0],
+	.reg_mipi_size = ARRAY_SIZE(mipi_settings),
+	.rec_settings = &recommend_settings[0],
+	.rec_size = ARRAY_SIZE(recommend_settings),
+	.reg_pll = &pll_settings[0],
+	.reg_pll_size = ARRAY_SIZE(pll_settings),
+	.reg_prev = &prev_settings[0],
+	.reg_pll_60fps = &pll_settings_60fps[0],
+	.reg_pll_60fps_size = ARRAY_SIZE(pll_settings_60fps),
+	.reg_pll_120fps = &pll_settings_120fps[0],
+	.reg_pll_120fps_size = ARRAY_SIZE(pll_settings_120fps),
+	.reg_prev_size = ARRAY_SIZE(prev_settings),
+	.reg_snap = &snap_settings[0],
+	.reg_snap_size = ARRAY_SIZE(snap_settings),
+	.reg_60fps = &prev_settings_60fps[0],
+	.reg_60fps_size = ARRAY_SIZE(prev_settings_60fps),
+	.reg_120fps = &prev_settings_120fps[0],
+	.reg_120fps_size = ARRAY_SIZE(prev_settings_120fps),
+};
diff --git a/drivers/media/video/msm/mt9p012.h b/drivers/media/video/msm/mt9p012.h
new file mode 100644
index 0000000..0579813
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9T012_H
+#define MT9T012_H
+
+#include <linux/types.h>
+
+extern struct mt9p012_reg mt9p012_regs;	/* from mt9p012_reg.c */
+
+struct reg_struct {
+	uint16_t vt_pix_clk_div;     /* 0x0300 */
+	uint16_t vt_sys_clk_div;     /* 0x0302 */
+	uint16_t pre_pll_clk_div;    /* 0x0304 */
+	uint16_t pll_multiplier;     /* 0x0306 */
+	uint16_t op_pix_clk_div;     /* 0x0308 */
+	uint16_t op_sys_clk_div;     /* 0x030A */
+	uint16_t scale_m;            /* 0x0404 */
+	uint16_t row_speed;          /* 0x3016 */
+	uint16_t x_addr_start;       /* 0x3004 */
+	uint16_t x_addr_end;         /* 0x3008 */
+	uint16_t y_addr_start;       /* 0x3002 */
+	uint16_t y_addr_end;         /* 0x3006 */
+	uint16_t read_mode;          /* 0x3040 */
+	uint16_t x_output_size ;     /* 0x034C */
+	uint16_t y_output_size;      /* 0x034E */
+	uint16_t line_length_pck;    /* 0x300C */
+	uint16_t frame_length_lines; /* 0x300A */
+	uint16_t coarse_int_time;    /* 0x3012 */
+	uint16_t fine_int_time;      /* 0x3014 */
+};
+
+
+struct mt9p012_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+
+struct mt9p012_reg {
+	struct reg_struct const *reg_pat;
+	uint16_t reg_pat_size;
+	struct mt9p012_i2c_reg_conf const *ttbl;
+	uint16_t ttbl_size;
+	struct mt9p012_i2c_reg_conf const *rftbl;
+	uint16_t rftbl_size;
+};
+
+#endif /* MT9T012_H */
diff --git a/drivers/media/video/msm/mt9p012_bam.c b/drivers/media/video/msm/mt9p012_bam.c
new file mode 100644
index 0000000..9197380
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_bam.c
@@ -0,0 +1,1426 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "mt9p012.h"
+
+/*=============================================================
+    SENSOR REGISTER DEFINES
+==============================================================*/
+#define MT9P012_REG_MODEL_ID         0x0000
+#define MT9P012_MODEL_ID             0x2801
+#define REG_GROUPED_PARAMETER_HOLD   0x0104
+#define GROUPED_PARAMETER_HOLD       0x0100
+#define GROUPED_PARAMETER_UPDATE     0x0000
+#define REG_COARSE_INT_TIME          0x3012
+#define REG_VT_PIX_CLK_DIV           0x0300
+#define REG_VT_SYS_CLK_DIV           0x0302
+#define REG_PRE_PLL_CLK_DIV          0x0304
+#define REG_PLL_MULTIPLIER           0x0306
+#define REG_OP_PIX_CLK_DIV           0x0308
+#define REG_OP_SYS_CLK_DIV           0x030A
+#define REG_SCALE_M                  0x0404
+#define REG_FRAME_LENGTH_LINES       0x300A
+#define REG_LINE_LENGTH_PCK          0x300C
+#define REG_X_ADDR_START             0x3004
+#define REG_Y_ADDR_START             0x3002
+#define REG_X_ADDR_END               0x3008
+#define REG_Y_ADDR_END               0x3006
+#define REG_X_OUTPUT_SIZE            0x034C
+#define REG_Y_OUTPUT_SIZE            0x034E
+#define REG_FINE_INTEGRATION_TIME    0x3014
+#define REG_ROW_SPEED                0x3016
+#define MT9P012_REG_RESET_REGISTER   0x301A
+#define MT9P012_RESET_REGISTER_PWON  0x10CC
+#define MT9P012_RESET_REGISTER_PWOFF 0x10C8
+#define REG_READ_MODE                0x3040
+#define REG_GLOBAL_GAIN              0x305E
+#define REG_TEST_PATTERN_MODE        0x3070
+
+#define MT9P012_REV_7
+
+enum mt9p012_test_mode {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9p012_resolution {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum mt9p012_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+
+/* actuator's Slave Address */
+#define MT9P012_AF_I2C_ADDR   0x0A
+
+/* AF Total steps parameters */
+#define MT9P012_STEPS_NEAR_TO_CLOSEST_INF  20
+#define MT9P012_TOTAL_STEPS_NEAR_TO_FAR    20
+
+#define MT9P012_MU5M0_PREVIEW_DUMMY_PIXELS 0
+#define MT9P012_MU5M0_PREVIEW_DUMMY_LINES  0
+
+/* Time in milisecs for waiting for the sensor to reset.*/
+#define MT9P012_RESET_DELAY_MSECS   66
+
+/* for 20 fps preview */
+#define MT9P012_DEFAULT_CLOCK_RATE  24000000
+#define MT9P012_DEFAULT_MAX_FPS     26	/* ???? */
+
+struct mt9p012_work {
+	struct work_struct work;
+};
+static struct mt9p012_work *mt9p012_sensorw;
+static struct i2c_client *mt9p012_client;
+
+struct mt9p012_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t fps_divider;	/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;	/* init to 1 * 0x00000400 */
+
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+
+	enum mt9p012_resolution prev_res;
+	enum mt9p012_resolution pict_res;
+	enum mt9p012_resolution curr_res;
+	enum mt9p012_test_mode set_test;
+};
+
+static uint16_t bam_macro, bam_infinite;
+static uint16_t bam_step_lookup_table[MT9P012_TOTAL_STEPS_NEAR_TO_FAR + 1];
+static uint16_t update_type = UPDATE_PERIODIC;
+static struct mt9p012_ctrl *mt9p012_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(mt9p012_wait_queue);
+DEFINE_MUTEX(mt9p012_mut);
+
+/*=============================================================*/
+
+static int mt9p012_i2c_rxdata(unsigned short saddr, int slength,
+			      unsigned char *rxdata, int rxlength)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = slength,
+			.buf = rxdata,
+		},
+		{
+			.addr = saddr,
+			.flags = I2C_M_RD,
+			.len = rxlength,
+			.buf = rxdata,
+		},
+	};
+
+	if (i2c_transfer(mt9p012_client->adapter, msgs, 2) < 0) {
+		CDBG("mt9p012_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+static int32_t mt9p012_i2c_read_b(unsigned short saddr, unsigned char raddr,
+				  unsigned short *rdata)
+{
+	int32_t rc = 0;
+	if (!rdata)
+		return -EIO;
+	rc = mt9p012_i2c_rxdata(saddr, 1, &raddr, 1);
+	if (rc < 0)
+		return rc;
+	*rdata = raddr;
+	if (rc < 0)
+		CDBG("mt9p012_i2c_read_b failed!\n");
+	return rc;
+}
+
+static int32_t mt9p012_i2c_read_w(unsigned short saddr, unsigned short raddr,
+				  unsigned short *rdata)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = mt9p012_i2c_rxdata(saddr, 2, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	*rdata = buf[0] << 8 | buf[1];
+
+	if (rc < 0)
+		CDBG("mt9p012_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_txdata(unsigned short saddr, unsigned char *txdata,
+				  int length)
+{
+	struct i2c_msg msg[] = {
+		{
+		 .addr = saddr,
+		 .flags = 0,
+		 .len = length,
+		 .buf = txdata,
+		 },
+	};
+
+	if (i2c_transfer(mt9p012_client->adapter, msg, 1) < 0) {
+		CDBG("mt9p012_i2c_txdata failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9p012_i2c_write_b(unsigned short saddr, unsigned short baddr,
+				   unsigned short bdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = mt9p012_i2c_txdata(saddr, buf, 2);
+
+	if (rc < 0)
+		CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n",
+		     saddr, baddr, bdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_write_w(unsigned short saddr, unsigned short waddr,
+				   unsigned short wdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+
+	rc = mt9p012_i2c_txdata(saddr, buf, 4);
+
+	if (rc < 0)
+		CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+		     waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_write_w_table(struct mt9p012_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num; i++) {
+		rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+					 reg_conf_tbl->waddr,
+					 reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+
+	return rc;
+}
+
+static int32_t mt9p012_test(enum mt9p012_test_mode mo)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	if (mo == TEST_OFF)
+		return 0;
+	else {
+		rc = mt9p012_i2c_write_w_table(mt9p012_regs.ttbl,
+					       mt9p012_regs.ttbl_size);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+					 REG_TEST_PATTERN_MODE, (uint16_t) mo);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9p012_lens_shading_enable(uint8_t is_enable)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: entered. enable = %d\n", __func__, is_enable);
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3780,
+				 ((uint16_t) is_enable) << 15);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	CDBG("%s: exiting. rc = %d\n", __func__, rc);
+	return rc;
+}
+
+static int32_t mt9p012_set_lc(void)
+{
+	int32_t rc;
+
+	rc = mt9p012_i2c_write_w_table(mt9p012_regs.rftbl,
+				       mt9p012_regs.rftbl_size);
+
+	return rc;
+}
+
+static void mt9p012_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider;	/*Q10 */
+	uint32_t pclk_mult;	/*Q10 */
+
+	if (mt9p012_ctrl->prev_res == QTR_SIZE) {
+		divider = (uint32_t)
+		    (((mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines *
+		       mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck) *
+		      0x00000400) /
+		     (mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines *
+		      mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck));
+
+		pclk_mult =
+		    (uint32_t) ((mt9p012_regs.reg_pat[RES_CAPTURE].
+				 pll_multiplier * 0x00000400) /
+				(mt9p012_regs.reg_pat[RES_PREVIEW].
+				 pll_multiplier));
+	} else {
+		/* full size resolution used for preview. */
+		divider = 0x00000400;	/*1.0 */
+		pclk_mult = 0x00000400;	/*1.0 */
+	}
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps = (uint16_t) (fps * divider * pclk_mult / 0x00000400 /
+			    0x00000400);
+}
+
+static uint16_t mt9p012_get_prev_lines_pf(void)
+{
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		return mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines;
+	else
+		return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_get_prev_pixels_pl(void)
+{
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		return mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck;
+	else
+		return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint16_t mt9p012_get_pict_lines_pf(void)
+{
+	return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_get_pict_pixels_pl(void)
+{
+	return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint32_t mt9p012_get_pict_max_exp_lc(void)
+{
+	uint16_t snapshot_lines_per_frame;
+
+	if (mt9p012_ctrl->pict_res == QTR_SIZE)
+		snapshot_lines_per_frame =
+		    mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1;
+	else
+		snapshot_lines_per_frame =
+		    mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1;
+
+	return snapshot_lines_per_frame * 24;
+}
+
+static int32_t mt9p012_set_fps(struct fps_cfg *fps)
+{
+	/* input is new fps in Q10 format */
+	int32_t rc = 0;
+	enum mt9p012_setting setting;
+
+	mt9p012_ctrl->fps_divider = fps->fps_div;
+	mt9p012_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return -EBUSY;
+
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE)
+		setting = RES_PREVIEW;
+	else
+		setting = RES_CAPTURE;
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+			REG_FRAME_LENGTH_LINES,
+			(mt9p012_regs.reg_pat[setting].frame_length_lines *
+			fps->fps_div / 0x00000400));
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	return rc;
+}
+
+static int32_t mt9p012_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x01FF;
+	uint32_t line_length_ratio = 0x00000400;
+	enum mt9p012_setting setting;
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_write_exp_gain \n", __LINE__);
+
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		mt9p012_ctrl->my_reg_gain = gain;
+		mt9p012_ctrl->my_reg_line_count = (uint16_t) line;
+	}
+
+	if (gain > max_legal_gain) {
+		CDBG("Max legal gain Line:%d \n", __LINE__);
+		gain = max_legal_gain;
+	}
+
+	/* Verify no overflow */
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		line = (uint32_t) (line * mt9p012_ctrl->fps_divider /
+				   0x00000400);
+		setting = RES_PREVIEW;
+	} else {
+		line = (uint32_t) (line * mt9p012_ctrl->pict_fps_divider /
+				   0x00000400);
+		setting = RES_CAPTURE;
+	}
+
+	/* Set digital gain to 1 */
+#ifdef MT9P012_REV_7
+	gain |= 0x1000;
+#else
+	gain |= 0x0200;
+#endif
+
+	if ((mt9p012_regs.reg_pat[setting].frame_length_lines - 1) < line) {
+		line_length_ratio = (uint32_t) (line * 0x00000400) /
+		    (mt9p012_regs.reg_pat[setting].frame_length_lines - 1);
+	} else
+		line_length_ratio = 0x00000400;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GLOBAL_GAIN, gain);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_COARSE_INT_TIME, line);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	CDBG("mt9p012_write_exp_gain: gain = %d, line = %d\n", gain, line);
+
+	return rc;
+}
+
+static int32_t mt9p012_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_set_pict_exp_gain \n", __LINE__);
+
+	rc = mt9p012_write_exp_gain(gain, line);
+	if (rc < 0) {
+		CDBG("Line:%d mt9p012_set_pict_exp_gain failed... \n",
+		     __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0002);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	mdelay(5);
+
+	/* camera_timed_wait(snapshot_wait*exposure_ratio); */
+	return rc;
+}
+
+static int32_t mt9p012_setting(enum mt9p012_reg_update rupdate,
+			       enum mt9p012_setting rt)
+{
+	int32_t rc = 0;
+
+	switch (rupdate) {
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct mt9p012_i2c_reg_conf ppc_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				{REG_ROW_SPEED,
+				 mt9p012_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].y_output_size},
+
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+				 (mt9p012_regs.reg_pat[rt].frame_length_lines *
+				  mt9p012_ctrl->fps_divider / 0x00000400)},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+			if (update_type == REG_INIT) {
+				update_type = rupdate;
+				return rc;
+			}
+			rc = mt9p012_i2c_write_w_table(&ppc_tbl[0],
+						ARRAY_SIZE(ppc_tbl));
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_test(mt9p012_ctrl->set_test);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 MT9P012_REG_RESET_REGISTER,
+						 MT9P012_RESET_REGISTER_PWON |
+						 0x0002);
+			if (rc < 0)
+				return rc;
+
+			mdelay(5);	/* 15? wait for sensor to transition */
+
+			return rc;
+		}
+		break;		/* UPDATE_PERIODIC */
+
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct mt9p012_i2c_reg_conf ipc_tbl1[] = {
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF},
+				{REG_VT_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_pix_clk_div},
+				{REG_VT_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_sys_clk_div},
+				{REG_PRE_PLL_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+				 mt9p012_regs.reg_pat[rt].pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_pix_clk_div},
+				{REG_OP_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_sys_clk_div},
+#ifdef MT9P012_REV_7
+				{0x30B0, 0x0001},
+				{0x308E, 0xE060},
+				{0x3092, 0x0A52},
+				{0x3094, 0x4656},
+				{0x3096, 0x5652},
+				{0x30CA, 0x8006},
+				{0x312A, 0xDD02},
+				{0x312C, 0x00E4},
+				{0x3170, 0x299A},
+#endif
+				/* optimized settings for noise */
+				{0x3088, 0x6FF6},
+				{0x3154, 0x0282},
+				{0x3156, 0x0381},
+				{0x3162, 0x04CE},
+				{0x0204, 0x0010},
+				{0x0206, 0x0010},
+				{0x0208, 0x0010},
+				{0x020A, 0x0010},
+				{0x020C, 0x0010},
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON},
+			};
+
+			struct mt9p012_i2c_reg_conf ipc_tbl2[] = {
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF},
+				{REG_VT_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_pix_clk_div},
+				{REG_VT_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_sys_clk_div},
+				{REG_PRE_PLL_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+				 mt9p012_regs.reg_pat[rt].pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_pix_clk_div},
+				{REG_OP_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_sys_clk_div},
+#ifdef MT9P012_REV_7
+				{0x30B0, 0x0001},
+				{0x308E, 0xE060},
+				{0x3092, 0x0A52},
+				{0x3094, 0x4656},
+				{0x3096, 0x5652},
+				{0x30CA, 0x8006},
+				{0x312A, 0xDD02},
+				{0x312C, 0x00E4},
+				{0x3170, 0x299A},
+#endif
+				/* optimized settings for noise */
+				{0x3088, 0x6FF6},
+				{0x3154, 0x0282},
+				{0x3156, 0x0381},
+				{0x3162, 0x04CE},
+				{0x0204, 0x0010},
+				{0x0206, 0x0010},
+				{0x0208, 0x0010},
+				{0x020A, 0x0010},
+				{0x020C, 0x0010},
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON},
+			};
+
+			struct mt9p012_i2c_reg_conf ipc_tbl3[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				/* Set preview or snapshot mode */
+				{REG_ROW_SPEED,
+				 mt9p012_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].y_output_size},
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+				 mt9p012_regs.reg_pat[rt].frame_length_lines},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+
+			/* reset fps_divider */
+			mt9p012_ctrl->fps_divider = 1 * 0x0400;
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl1[0],
+						       ARRAY_SIZE(ipc_tbl1));
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl2[0],
+						       ARRAY_SIZE(ipc_tbl2));
+			if (rc < 0)
+				return rc;
+
+			mdelay(5);
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl3[0],
+						       ARRAY_SIZE(ipc_tbl3));
+			if (rc < 0)
+				return rc;
+
+			/* load lens shading */
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_HOLD);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_set_lc();
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_UPDATE);
+
+			if (rc < 0)
+				return rc;
+		}
+		update_type = rupdate;
+		break;		/* case REG_INIT: */
+
+	default:
+		rc = -EINVAL;
+		break;
+	}			/* switch (rupdate) */
+
+	return rc;
+}
+
+static int32_t mt9p012_video_config(int mode, int res)
+{
+	int32_t rc;
+
+	switch (res) {
+	case QTR_SIZE:
+		rc = mt9p012_setting(UPDATE_PERIODIC, RES_PREVIEW);
+		if (rc < 0)
+			return rc;
+
+		CDBG("mt9p012 sensor configuration done!\n");
+		break;
+
+	case FULL_SIZE:
+		rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	default:
+		return 0;
+	}			/* switch */
+
+	mt9p012_ctrl->prev_res = res;
+	mt9p012_ctrl->curr_res = res;
+	mt9p012_ctrl->sensormode = mode;
+
+	rc = mt9p012_write_exp_gain(mt9p012_ctrl->my_reg_gain,
+				    mt9p012_ctrl->my_reg_line_count);
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10cc | 0x0002);
+
+	return rc;
+}
+
+static int32_t mt9p012_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res;
+
+	mt9p012_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res;
+
+	mt9p012_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_power_down(void)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF);
+
+	mdelay(5);
+	return rc;
+}
+
+static int32_t mt9p012_move_focus(int direction, int32_t num_steps)
+{
+	int32_t rc;
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_position;
+	uint8_t code_val;
+	uint8_t time_out;
+	uint8_t temp_pos;
+
+	uint16_t actual_position_target;
+	if (num_steps > MT9P012_TOTAL_STEPS_NEAR_TO_FAR)
+		num_steps = MT9P012_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (num_steps == 0) {
+		CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (direction == MOVE_NEAR)
+		step_direction = -1;
+	else if (direction == MOVE_FAR)
+		step_direction = 1;
+	else {
+		CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (mt9p012_ctrl->curr_lens_pos < mt9p012_ctrl->init_curr_lens_pos)
+		mt9p012_ctrl->curr_lens_pos = mt9p012_ctrl->init_curr_lens_pos;
+
+	actual_step = (int16_t) (step_direction * (int16_t) num_steps);
+	next_position = (int16_t) (mt9p012_ctrl->curr_lens_pos + actual_step);
+
+	if (next_position > MT9P012_TOTAL_STEPS_NEAR_TO_FAR)
+		next_position = MT9P012_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (next_position < 0)
+		next_position = 0;
+
+	if (num_steps >= 10)
+		time_out = 100;
+	else
+		time_out = 30;
+	code_val = next_position;
+	actual_position_target = bam_step_lookup_table[code_val];
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29);
+	if (rc < 0)
+		return rc;
+	temp_pos = (uint8_t) (actual_position_target >> 8);
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, temp_pos);
+	if (rc < 0)
+		return rc;
+	temp_pos = (uint8_t) (actual_position_target & 0x00FF);
+	/* code_val_lsb |= mode_mask; */
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, temp_pos);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, time_out);
+	if (rc < 0)
+		return rc;
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27);
+	if (rc < 0)
+		return rc;
+
+	mdelay(time_out);
+
+	/* Storing the current lens Position */
+	mt9p012_ctrl->curr_lens_pos = next_position;
+
+	return rc;
+}
+
+static int32_t mt9p012_set_default_focus(void)
+{
+	int32_t rc = 0;
+
+	uint8_t temp_pos;
+
+	/* Write the digital code for current to the actuator */
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29);
+	if (rc < 0)
+		return rc;
+	temp_pos = (uint8_t) (bam_infinite >> 8);
+
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, temp_pos);
+	if (rc < 0)
+		return rc;
+	temp_pos = (uint8_t) (bam_infinite & 0x00FF);
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, temp_pos);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27);
+	if (rc < 0)
+		return rc;
+
+	mdelay(140);
+
+	mt9p012_ctrl->curr_lens_pos = MT9P012_TOTAL_STEPS_NEAR_TO_FAR;
+
+	return rc;
+}
+
+static int mt9p012_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int mt9p012_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	uint16_t chipid;
+
+	rc = gpio_request(data->sensor_reset, "mt9p012");
+	if (!rc)
+		gpio_direction_output(data->sensor_reset, 1);
+	else
+		goto init_probe_done;
+
+	msleep(20);
+
+	/* RESET the sensor image part via I2C command */
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0001);
+	if (rc < 0) {
+		CDBG("sensor reset failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	msleep(MT9P012_RESET_DELAY_MSECS);
+
+	/* 3. Read sensor Model ID: */
+	rc = mt9p012_i2c_read_w(mt9p012_client->addr,
+				MT9P012_REG_MODEL_ID, &chipid);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	/* 4. Compare sensor ID to MT9T012VC ID: */
+	if (chipid != MT9P012_MODEL_ID) {
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x306E, 0x9000);
+	if (rc < 0) {
+		CDBG("REV_7 write failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* RESET_REGISTER, enable parallel interface and disable serialiser */
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x301A, 0x10CC);
+	if (rc < 0) {
+		CDBG("enable parallel interface failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* To disable the 2 extra lines */
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3064, 0x0805);
+
+	if (rc < 0) {
+		CDBG("disable the 2 extra lines failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	goto init_probe_done;
+
+init_probe_fail:
+	mt9p012_probe_init_done(data);
+init_probe_done:
+	return rc;
+}
+
+static int mt9p012_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	unsigned short temp_pos;
+	uint8_t i;
+	uint16_t temp;
+
+	mt9p012_ctrl = kzalloc(sizeof(struct mt9p012_ctrl), GFP_KERNEL);
+	if (!mt9p012_ctrl) {
+		CDBG("mt9p012_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	mt9p012_ctrl->fps_divider = 1 * 0x00000400;
+	mt9p012_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9p012_ctrl->set_test = TEST_OFF;
+	mt9p012_ctrl->prev_res = QTR_SIZE;
+	mt9p012_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9p012_ctrl->sensordata = data;
+
+	msm_camio_camif_pad_reg_reset();
+	mdelay(20);
+
+	rc = mt9p012_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail1;
+
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		rc = mt9p012_setting(REG_INIT, RES_PREVIEW);
+	else
+		rc = mt9p012_setting(REG_INIT, RES_CAPTURE);
+
+	if (rc < 0) {
+		CDBG("mt9p012_setting failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* sensor : output enable */
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON);
+	if (rc < 0) {
+		CDBG("sensor output enable failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* enable AF actuator */
+	rc = gpio_request(mt9p012_ctrl->sensordata->vcm_pwd, "mt9p012");
+	if (!rc)
+		gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 1);
+	else {
+		CDBG("mt9p012_ctrl gpio request failed!\n");
+		goto init_fail1;
+	}
+
+	mdelay(20);
+
+	bam_infinite = 0;
+	bam_macro = 0;
+	/*initialize AF actuator */
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x09);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x2E);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0A, 0x01);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x17, 0x06);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x16, 0x0A);
+
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, 0x00);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, 0x00);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27);
+	mdelay(140);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x01, 0x29);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x05, 0x03);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x06, 0xFF);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x0B, 0x64);
+	mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, 0x07, 0x27);
+	mdelay(140);
+
+	if (mt9p012_i2c_read_b(MT9P012_AF_I2C_ADDR >> 1, 0x12, &temp_pos)
+	    >= 0) {
+		bam_infinite = (uint16_t) temp_pos;
+		if (mt9p012_i2c_read_b
+		    (MT9P012_AF_I2C_ADDR >> 1, 0x13, &temp_pos) >= 0)
+			bam_infinite =
+			    (bam_infinite << 8) | ((uint16_t) temp_pos);
+	} else {
+		bam_infinite = 100;
+	}
+
+	if (mt9p012_i2c_read_b(MT9P012_AF_I2C_ADDR >> 1, 0x14, &temp_pos)
+	    >= 0) {
+		bam_macro = (uint16_t) temp_pos;
+		if (mt9p012_i2c_read_b
+		    (MT9P012_AF_I2C_ADDR >> 1, 0x15, &temp_pos) >= 0)
+			bam_macro = (bam_macro << 8) | ((uint16_t) temp_pos);
+	}
+	temp = (bam_infinite - bam_macro) / MT9P012_TOTAL_STEPS_NEAR_TO_FAR;
+	for (i = 0; i < MT9P012_TOTAL_STEPS_NEAR_TO_FAR; i++)
+		bam_step_lookup_table[i] = bam_macro + temp * i;
+
+	bam_step_lookup_table[MT9P012_TOTAL_STEPS_NEAR_TO_FAR] = bam_infinite;
+
+	rc = mt9p012_set_default_focus();
+	if (rc >= 0)
+		goto init_done;
+
+init_fail1:
+	mt9p012_probe_init_done(data);
+	kfree(mt9p012_ctrl);
+init_done:
+	return rc;
+}
+
+static int mt9p012_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9p012_wait_queue);
+	return 0;
+}
+
+static int32_t mt9p012_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = mt9p012_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = mt9p012_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = mt9p012_raw_snapshot_config(mode);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int mt9p012_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	int rc = 0;
+
+	if (copy_from_user(&cdata,
+			   (void *)argp, sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&mt9p012_mut);
+
+	CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		mt9p012_get_pict_fps(cdata.cfg.gfps.prevfps,
+				     &(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp, &cdata,
+				 sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = mt9p012_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = mt9p012_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = mt9p012_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl = mt9p012_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc = mt9p012_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = mt9p012_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc = mt9p012_write_exp_gain(cdata.cfg.exp_gain.gain,
+					    cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__);
+		rc = mt9p012_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+					       cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = mt9p012_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = mt9p012_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		CDBG("mt9p012_ioctl: CFG_MOVE_FOCUS: dir=%d steps=%d\n",
+		     cdata.cfg.focus.dir, cdata.cfg.focus.steps);
+		rc = mt9p012_move_focus(cdata.cfg.focus.dir,
+					cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = mt9p012_set_default_focus();
+
+		break;
+
+	case CFG_SET_EFFECT:
+		rc = mt9p012_set_default_focus();
+		break;
+
+	case CFG_SET_LENS_SHADING:
+		CDBG("%s: CFG_SET_LENS_SHADING\n", __func__);
+		rc = mt9p012_lens_shading_enable(cdata.cfg.lens_shading);
+		break;
+
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = MT9P012_STEPS_NEAR_TO_CLOSEST_INF;
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&mt9p012_mut);
+	return rc;
+}
+
+int mt9p012_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&mt9p012_mut);
+
+	mt9p012_power_down();
+
+	gpio_direction_output(mt9p012_ctrl->sensordata->sensor_reset, 0);
+	gpio_free(mt9p012_ctrl->sensordata->sensor_reset);
+
+	gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 0);
+	gpio_free(mt9p012_ctrl->sensordata->vcm_pwd);
+
+	kfree(mt9p012_ctrl);
+	mt9p012_ctrl = NULL;
+
+	CDBG("mt9p012_release completed\n");
+
+	mutex_unlock(&mt9p012_mut);
+	return rc;
+}
+
+static int mt9p012_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("mt9p012_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	mt9p012_sensorw = kzalloc(sizeof(struct mt9p012_work), GFP_KERNEL);
+	if (!mt9p012_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9p012_sensorw);
+	mt9p012_init_client(client);
+	mt9p012_client = client;
+
+	mdelay(50);
+
+	CDBG("mt9p012_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("mt9p012_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __exit mt9p012_remove(struct i2c_client *client)
+{
+	struct mt9p012_work_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	mt9p012_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static const struct i2c_device_id mt9p012_i2c_id[] = {
+	{"mt9p012", 0}
+};
+
+static struct i2c_driver mt9p012_i2c_driver = {
+	.id_table = mt9p012_i2c_id,
+	.probe = mt9p012_i2c_probe,
+	.remove = __exit_p(mt9p012_i2c_remove),
+	.driver = {
+		.name = "mt9p012",
+	},
+};
+
+static int mt9p012_sensor_probe(const struct msm_camera_sensor_info *info,
+				struct msm_sensor_ctrl *s)
+{
+	int rc = i2c_add_driver(&mt9p012_i2c_driver);
+	if (rc < 0 || mt9p012_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	msm_camio_clk_rate_set(MT9P012_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = mt9p012_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_done;
+
+	s->s_init = mt9p012_sensor_open_init;
+	s->s_release = mt9p012_sensor_release;
+	s->s_config = mt9p012_sensor_config;
+	s->s_mount_angle  = 0;
+	mt9p012_probe_init_done(info);
+
+probe_done:
+	CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__);
+	return rc;
+}
+
+static int __mt9p012_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9p012_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9p012_probe,
+	.driver = {
+		.name = "msm_camera_mt9p012",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init mt9p012_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9p012_init);
+void mt9p012_exit(void)
+{
+	i2c_del_driver(&mt9p012_i2c_driver);
+}
diff --git a/drivers/media/video/msm/mt9p012_fox.c b/drivers/media/video/msm/mt9p012_fox.c
new file mode 100644
index 0000000..4a732f3
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_fox.c
@@ -0,0 +1,1345 @@
+/* Copyright (c) 2009, 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "mt9p012.h"
+
+/*=============================================================
+    SENSOR REGISTER DEFINES
+==============================================================*/
+#define MT9P012_REG_MODEL_ID         0x0000
+#define MT9P012_MODEL_ID             0x2801
+#define REG_GROUPED_PARAMETER_HOLD   0x0104
+#define GROUPED_PARAMETER_HOLD       0x0100
+#define GROUPED_PARAMETER_UPDATE     0x0000
+#define REG_COARSE_INT_TIME          0x3012
+#define REG_VT_PIX_CLK_DIV           0x0300
+#define REG_VT_SYS_CLK_DIV           0x0302
+#define REG_PRE_PLL_CLK_DIV          0x0304
+#define REG_PLL_MULTIPLIER           0x0306
+#define REG_OP_PIX_CLK_DIV           0x0308
+#define REG_OP_SYS_CLK_DIV           0x030A
+#define REG_SCALE_M                  0x0404
+#define REG_FRAME_LENGTH_LINES       0x300A
+#define REG_LINE_LENGTH_PCK          0x300C
+#define REG_X_ADDR_START             0x3004
+#define REG_Y_ADDR_START             0x3002
+#define REG_X_ADDR_END               0x3008
+#define REG_Y_ADDR_END               0x3006
+#define REG_X_OUTPUT_SIZE            0x034C
+#define REG_Y_OUTPUT_SIZE            0x034E
+#define REG_FINE_INTEGRATION_TIME    0x3014
+#define REG_ROW_SPEED                0x3016
+#define MT9P012_REG_RESET_REGISTER   0x301A
+#define MT9P012_RESET_REGISTER_PWON  0x10CC
+#define MT9P012_RESET_REGISTER_PWOFF 0x10C8
+#define REG_READ_MODE                0x3040
+#define REG_GLOBAL_GAIN              0x305E
+#define REG_TEST_PATTERN_MODE        0x3070
+
+#define MT9P012_REV_7
+
+enum mt9p012_test_mode {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9p012_resolution {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum mt9p012_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+
+/* actuator's Slave Address */
+#define MT9P012_AF_I2C_ADDR   0x18
+
+/* AF Total steps parameters */
+#define MT9P012_STEPS_NEAR_TO_CLOSEST_INF  32
+#define MT9P012_TOTAL_STEPS_NEAR_TO_FAR    32
+
+#define MT9P012_MU5M0_PREVIEW_DUMMY_PIXELS 0
+#define MT9P012_MU5M0_PREVIEW_DUMMY_LINES  0
+
+/* Time in milisecs for waiting for the sensor to reset.*/
+#define MT9P012_RESET_DELAY_MSECS   66
+
+/* for 20 fps preview */
+#define MT9P012_DEFAULT_CLOCK_RATE  24000000
+#define MT9P012_DEFAULT_MAX_FPS     26	/* ???? */
+
+struct mt9p012_work {
+	struct work_struct work;
+};
+static struct mt9p012_work *mt9p012_sensorw;
+static struct i2c_client *mt9p012_client;
+
+struct mt9p012_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t fps_divider;	/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;	/* init to 1 * 0x00000400 */
+
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+
+	enum mt9p012_resolution prev_res;
+	enum mt9p012_resolution pict_res;
+	enum mt9p012_resolution curr_res;
+	enum mt9p012_test_mode set_test;
+};
+static uint16_t update_type = UPDATE_PERIODIC;
+static struct mt9p012_ctrl *mt9p012_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(mt9p012_wait_queue);
+DEFINE_MUTEX(mt9p012_mut);
+
+
+/*=============================================================*/
+
+static int mt9p012_i2c_rxdata(unsigned short saddr, unsigned char *rxdata,
+			      int length)
+{
+	int retry_cnt = 0;
+	int rc;
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = saddr,
+		 .flags = 0,
+		 .len = 2,
+		 .buf = rxdata,
+		 },
+		{
+		 .addr = saddr,
+		 .flags = I2C_M_RD,
+		 .len = length,
+		 .buf = rxdata,
+		 },
+	};
+
+	do {
+		rc = i2c_transfer(mt9p012_client->adapter, msgs, 2);
+		if (rc > 0)
+			break;
+		retry_cnt++;
+	} while (retry_cnt < 3);
+
+	if (rc < 0) {
+		pr_err("mt9p012_i2c_rxdata failed!:%d %d\n", rc, retry_cnt);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9p012_i2c_read_w(unsigned short saddr, unsigned short raddr,
+				  unsigned short *rdata)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = mt9p012_i2c_rxdata(saddr, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	*rdata = buf[0] << 8 | buf[1];
+
+	if (rc < 0)
+		CDBG("mt9p012_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_txdata(unsigned short saddr, unsigned char *txdata,
+				  int length)
+{
+	int retry_cnt = 0;
+	int rc;
+
+	struct i2c_msg msg[] = {
+		{
+		 .addr = saddr,
+		 .flags = 0,
+		 .len = length,
+		 .buf = txdata,
+		 },
+	};
+
+	do {
+		rc = i2c_transfer(mt9p012_client->adapter, msg, 1);
+		if (rc > 0)
+			break;
+		retry_cnt++;
+	} while (retry_cnt < 3);
+
+	if (rc < 0) {
+		pr_err("mt9p012_i2c_txdata failed: %d %d\n", rc, retry_cnt);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9p012_i2c_write_b(unsigned short saddr, unsigned short baddr,
+				   unsigned short bdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = mt9p012_i2c_txdata(saddr, buf, 2);
+
+	if (rc < 0)
+		CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n",
+		     saddr, baddr, bdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_write_w(unsigned short saddr, unsigned short waddr,
+				   unsigned short wdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+
+	rc = mt9p012_i2c_txdata(saddr, buf, 4);
+
+	if (rc < 0)
+		CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+		     waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_i2c_write_w_table(struct mt9p012_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num; i++) {
+		rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+					 reg_conf_tbl->waddr,
+					 reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+
+	return rc;
+}
+
+static int32_t mt9p012_test(enum mt9p012_test_mode mo)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	if (mo == TEST_OFF)
+		return 0;
+	else {
+		rc = mt9p012_i2c_write_w_table(mt9p012_regs.ttbl,
+					       mt9p012_regs.ttbl_size);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+					 REG_TEST_PATTERN_MODE, (uint16_t) mo);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9p012_lens_shading_enable(uint8_t is_enable)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: entered. enable = %d\n", __func__, is_enable);
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3780,
+				 ((uint16_t) is_enable) << 15);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	CDBG("%s: exiting. rc = %d\n", __func__, rc);
+	return rc;
+}
+
+static int32_t mt9p012_set_lc(void)
+{
+	int32_t rc;
+
+	rc = mt9p012_i2c_write_w_table(mt9p012_regs.rftbl,
+				       mt9p012_regs.rftbl_size);
+
+	return rc;
+}
+
+static void mt9p012_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider;	/*Q10 */
+	uint32_t pclk_mult;	/*Q10 */
+	uint32_t d1;
+	uint32_t d2;
+
+	d1 =
+		(uint32_t)(
+		(mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines *
+		0x00000400) /
+		mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines);
+
+	d2 =
+		(uint32_t)(
+		(mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck *
+		0x00000400) /
+		mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck);
+
+	divider = (uint32_t) (d1 * d2) / 0x00000400;
+
+	pclk_mult =
+		(uint32_t) ((mt9p012_regs.reg_pat[RES_CAPTURE].pll_multiplier *
+		0x00000400) /
+		(mt9p012_regs.reg_pat[RES_PREVIEW].pll_multiplier));
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps = (uint16_t) (fps * divider * pclk_mult / 0x00000400 /
+			    0x00000400);
+}
+
+static uint16_t mt9p012_get_prev_lines_pf(void)
+{
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		return mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines;
+	else
+		return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_get_prev_pixels_pl(void)
+{
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		return mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck;
+	else
+		return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint16_t mt9p012_get_pict_lines_pf(void)
+{
+	return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_get_pict_pixels_pl(void)
+{
+	return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint32_t mt9p012_get_pict_max_exp_lc(void)
+{
+	uint16_t snapshot_lines_per_frame;
+
+	if (mt9p012_ctrl->pict_res == QTR_SIZE)
+		snapshot_lines_per_frame =
+		    mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1;
+	else
+		snapshot_lines_per_frame =
+		    mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1;
+
+	return snapshot_lines_per_frame * 24;
+}
+
+static int32_t mt9p012_set_fps(struct fps_cfg *fps)
+{
+	/* input is new fps in Q10 format */
+	int32_t rc = 0;
+	enum mt9p012_setting setting;
+
+	mt9p012_ctrl->fps_divider = fps->fps_div;
+	mt9p012_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return -EBUSY;
+
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE)
+		setting = RES_PREVIEW;
+	else
+		setting = RES_CAPTURE;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+		REG_FRAME_LENGTH_LINES,
+		(mt9p012_regs.reg_pat[setting].frame_length_lines *
+		fps->fps_div / 0x00000400));
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	return rc;
+}
+
+static int32_t mt9p012_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x01FF;
+	uint32_t line_length_ratio = 0x00000400;
+	enum mt9p012_setting setting;
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_write_exp_gain \n", __LINE__);
+
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		mt9p012_ctrl->my_reg_gain = gain;
+		mt9p012_ctrl->my_reg_line_count = (uint16_t) line;
+	}
+
+	if (gain > max_legal_gain) {
+		CDBG("Max legal gain Line:%d \n", __LINE__);
+		gain = max_legal_gain;
+	}
+
+	/* Verify no overflow */
+	if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		line = (uint32_t) (line * mt9p012_ctrl->fps_divider /
+				   0x00000400);
+		setting = RES_PREVIEW;
+	} else {
+		line = (uint32_t) (line * mt9p012_ctrl->pict_fps_divider /
+				   0x00000400);
+		setting = RES_CAPTURE;
+	}
+
+	/* Set digital gain to 1 */
+#ifdef MT9P012_REV_7
+	gain |= 0x1000;
+#else
+	gain |= 0x0200;
+#endif
+
+	if ((mt9p012_regs.reg_pat[setting].frame_length_lines - 1) < line) {
+		line_length_ratio = (uint32_t) (line * 0x00000400) /
+		    (mt9p012_regs.reg_pat[setting].frame_length_lines - 1);
+	} else
+		line_length_ratio = 0x00000400;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GLOBAL_GAIN, gain);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 REG_COARSE_INT_TIME, line);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	CDBG("mt9p012_write_exp_gain: gain = %d, line = %d\n", gain, line);
+
+	return rc;
+}
+
+static int32_t mt9p012_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_set_pict_exp_gain \n", __LINE__);
+
+	rc = mt9p012_write_exp_gain(gain, line);
+	if (rc < 0) {
+		CDBG("Line:%d mt9p012_set_pict_exp_gain failed... \n",
+		     __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0002);
+	if (rc < 0) {
+		CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	mdelay(5);
+
+	/* camera_timed_wait(snapshot_wait*exposure_ratio); */
+	return rc;
+}
+
+static int32_t mt9p012_setting(enum mt9p012_reg_update rupdate,
+			       enum mt9p012_setting rt)
+{
+	int32_t rc = 0;
+	switch (rupdate) {
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct mt9p012_i2c_reg_conf ppc_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				{REG_ROW_SPEED,
+				 mt9p012_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].y_output_size},
+
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+				 (mt9p012_regs.reg_pat[rt].frame_length_lines *
+				  mt9p012_ctrl->fps_divider / 0x00000400)},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+			if (update_type == REG_INIT) {
+				update_type = rupdate;
+				return rc;
+			}
+			rc = mt9p012_i2c_write_w_table(&ppc_tbl[0],
+						ARRAY_SIZE(ppc_tbl));
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_test(mt9p012_ctrl->set_test);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 MT9P012_REG_RESET_REGISTER,
+						 MT9P012_RESET_REGISTER_PWON |
+						 0x0002);
+			if (rc < 0)
+				return rc;
+
+			mdelay(5);	/* 15? wait for sensor to transition */
+
+			return rc;
+		}
+		break;		/* UPDATE_PERIODIC */
+
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct mt9p012_i2c_reg_conf ipc_tbl1[] = {
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF},
+				{REG_VT_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_pix_clk_div},
+				{REG_VT_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_sys_clk_div},
+				{REG_PRE_PLL_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+				 mt9p012_regs.reg_pat[rt].pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_pix_clk_div},
+				{REG_OP_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_sys_clk_div},
+#ifdef MT9P012_REV_7
+				{0x30B0, 0x0001},
+				{0x308E, 0xE060},
+				{0x3092, 0x0A52},
+				{0x3094, 0x4656},
+				{0x3096, 0x5652},
+				{0x30CA, 0x8006},
+				{0x312A, 0xDD02},
+				{0x312C, 0x00E4},
+				{0x3170, 0x299A},
+#endif
+				/* optimized settings for noise */
+				{0x3088, 0x6FF6},
+				{0x3154, 0x0282},
+				{0x3156, 0x0381},
+				{0x3162, 0x04CE},
+				{0x0204, 0x0010},
+				{0x0206, 0x0010},
+				{0x0208, 0x0010},
+				{0x020A, 0x0010},
+				{0x020C, 0x0010},
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON},
+			};
+
+			struct mt9p012_i2c_reg_conf ipc_tbl2[] = {
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF},
+				{REG_VT_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_pix_clk_div},
+				{REG_VT_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].vt_sys_clk_div},
+				{REG_PRE_PLL_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+				 mt9p012_regs.reg_pat[rt].pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_pix_clk_div},
+				{REG_OP_SYS_CLK_DIV,
+				 mt9p012_regs.reg_pat[rt].op_sys_clk_div},
+#ifdef MT9P012_REV_7
+				{0x30B0, 0x0001},
+				{0x308E, 0xE060},
+				{0x3092, 0x0A52},
+				{0x3094, 0x4656},
+				{0x3096, 0x5652},
+				{0x30CA, 0x8006},
+				{0x312A, 0xDD02},
+				{0x312C, 0x00E4},
+				{0x3170, 0x299A},
+#endif
+				/* optimized settings for noise */
+				{0x3088, 0x6FF6},
+				{0x3154, 0x0282},
+				{0x3156, 0x0381},
+				{0x3162, 0x04CE},
+				{0x0204, 0x0010},
+				{0x0206, 0x0010},
+				{0x0208, 0x0010},
+				{0x020A, 0x0010},
+				{0x020C, 0x0010},
+				{MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON},
+			};
+
+			struct mt9p012_i2c_reg_conf ipc_tbl3[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				/* Set preview or snapshot mode */
+				{REG_ROW_SPEED,
+				 mt9p012_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_regs.reg_pat[rt].y_output_size},
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+				 mt9p012_regs.reg_pat[rt].frame_length_lines},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+
+			/* reset fps_divider */
+			mt9p012_ctrl->fps_divider = 1 * 0x0400;
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl1[0],
+						       ARRAY_SIZE(ipc_tbl1));
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl2[0],
+						       ARRAY_SIZE(ipc_tbl2));
+			if (rc < 0)
+				return rc;
+
+			mdelay(5);
+
+			rc = mt9p012_i2c_write_w_table(&ipc_tbl3[0],
+						       ARRAY_SIZE(ipc_tbl3));
+			if (rc < 0)
+				return rc;
+
+			/* load lens shading */
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_HOLD);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_set_lc();
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_UPDATE);
+
+			if (rc < 0)
+				return rc;
+		}
+		update_type = rupdate;
+		break;		/* case REG_INIT: */
+
+	default:
+		rc = -EINVAL;
+		break;
+	}			/* switch (rupdate) */
+
+	return rc;
+}
+
+static int32_t mt9p012_video_config(int mode, int res)
+{
+	int32_t rc;
+
+	switch (res) {
+	case QTR_SIZE:
+		rc = mt9p012_setting(UPDATE_PERIODIC, RES_PREVIEW);
+		if (rc < 0)
+			return rc;
+
+		CDBG("mt9p012 sensor configuration done!\n");
+		break;
+
+	case FULL_SIZE:
+		rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	default:
+		return 0;
+	}			/* switch */
+
+	mt9p012_ctrl->prev_res = res;
+	mt9p012_ctrl->curr_res = res;
+	mt9p012_ctrl->sensormode = mode;
+
+	rc = mt9p012_write_exp_gain(mt9p012_ctrl->my_reg_gain,
+				    mt9p012_ctrl->my_reg_line_count);
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10cc | 0x0002);
+
+	return rc;
+}
+
+static int32_t mt9p012_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res;
+
+	mt9p012_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res;
+
+	mt9p012_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_power_down(void)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWOFF);
+
+	mdelay(5);
+	return rc;
+}
+
+static int32_t mt9p012_move_focus(int direction, int32_t num_steps)
+{
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_position;
+	uint8_t code_val_msb, code_val_lsb;
+
+	if (num_steps > MT9P012_TOTAL_STEPS_NEAR_TO_FAR)
+		num_steps = MT9P012_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (num_steps == 0) {
+		CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (direction == MOVE_NEAR)
+		step_direction = 16;	/* 10bit */
+	else if (direction == MOVE_FAR)
+		step_direction = -16;	/* 10 bit */
+	else {
+		CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (mt9p012_ctrl->curr_lens_pos < mt9p012_ctrl->init_curr_lens_pos)
+		mt9p012_ctrl->curr_lens_pos = mt9p012_ctrl->init_curr_lens_pos;
+
+	actual_step = (int16_t) (step_direction * (int16_t) num_steps);
+	next_position = (int16_t) (mt9p012_ctrl->curr_lens_pos + actual_step);
+
+	if (next_position > 1023)
+		next_position = 1023;
+	else if (next_position < 0)
+		next_position = 0;
+
+	code_val_msb = next_position >> 4;
+	code_val_lsb = (next_position & 0x000F) << 4;
+	/* code_val_lsb |= mode_mask; */
+
+	/* Writing the digital code for current to the actuator */
+	if (mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1,
+				code_val_msb, code_val_lsb) < 0) {
+		CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__);
+		return -EBUSY;
+	}
+
+	/* Storing the current lens Position */
+	mt9p012_ctrl->curr_lens_pos = next_position;
+
+	return 0;
+}
+
+static int32_t mt9p012_set_default_focus(void)
+{
+	int32_t rc = 0;
+	uint8_t code_val_msb, code_val_lsb;
+
+	code_val_msb = 0x00;
+	code_val_lsb = 0x00;
+
+	/* Write the digital code for current to the actuator */
+	rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1,
+				 code_val_msb, code_val_lsb);
+
+	mt9p012_ctrl->curr_lens_pos = 0;
+	mt9p012_ctrl->init_curr_lens_pos = 0;
+
+	return rc;
+}
+
+static int mt9p012_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int mt9p012_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	uint16_t chipid;
+
+	rc = gpio_request(data->sensor_reset, "mt9p012");
+	if (!rc)
+		gpio_direction_output(data->sensor_reset, 1);
+	else
+		goto init_probe_done;
+
+	msleep(20);
+
+	/* RESET the sensor image part via I2C command */
+	CDBG("mt9p012_sensor_init(): reseting sensor.\n");
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0001);
+	if (rc < 0) {
+		CDBG("sensor reset failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	msleep(MT9P012_RESET_DELAY_MSECS);
+
+	/* 3. Read sensor Model ID: */
+	rc = mt9p012_i2c_read_w(mt9p012_client->addr,
+				MT9P012_REG_MODEL_ID, &chipid);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	/* 4. Compare sensor ID to MT9T012VC ID: */
+	if (chipid != MT9P012_MODEL_ID) {
+		CDBG("mt9p012 wrong model_id = 0x%x\n", chipid);
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x306E, 0x9000);
+	if (rc < 0) {
+		CDBG("REV_7 write failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* RESET_REGISTER, enable parallel interface and disable serialiser */
+	CDBG("mt9p012_sensor_init(): enabling parallel interface.\n");
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x301A, 0x10CC);
+	if (rc < 0) {
+		CDBG("enable parallel interface failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* To disable the 2 extra lines */
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3064, 0x0805);
+
+	if (rc < 0) {
+		CDBG("disable the 2 extra lines failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+
+init_probe_fail:
+	mt9p012_probe_init_done(data);
+init_probe_done:
+	return rc;
+}
+
+static int mt9p012_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+
+	mt9p012_ctrl = kzalloc(sizeof(struct mt9p012_ctrl), GFP_KERNEL);
+	if (!mt9p012_ctrl) {
+		CDBG("mt9p012_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	mt9p012_ctrl->fps_divider = 1 * 0x00000400;
+	mt9p012_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9p012_ctrl->set_test = TEST_OFF;
+	mt9p012_ctrl->prev_res = QTR_SIZE;
+	mt9p012_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9p012_ctrl->sensordata = data;
+
+	msm_camio_camif_pad_reg_reset();
+	mdelay(20);
+
+	rc = mt9p012_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail1;
+
+	if (mt9p012_ctrl->prev_res == QTR_SIZE)
+		rc = mt9p012_setting(REG_INIT, RES_PREVIEW);
+	else
+		rc = mt9p012_setting(REG_INIT, RES_CAPTURE);
+
+	if (rc < 0) {
+		CDBG("mt9p012_setting failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* sensor : output enable */
+	CDBG("mt9p012_sensor_open_init(): enabling output.\n");
+	rc = mt9p012_i2c_write_w(mt9p012_client->addr,
+				 MT9P012_REG_RESET_REGISTER,
+				 MT9P012_RESET_REGISTER_PWON);
+	if (rc < 0) {
+		CDBG("sensor output enable failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* enable AF actuator */
+	if (mt9p012_ctrl->sensordata->vcm_enable) {
+		CDBG("enable AF actuator, gpio = %d\n",
+			 mt9p012_ctrl->sensordata->vcm_pwd);
+		rc = gpio_request(mt9p012_ctrl->sensordata->vcm_pwd,
+						"mt9p012");
+		if (!rc)
+			gpio_direction_output(
+				mt9p012_ctrl->sensordata->vcm_pwd,
+				 1);
+		else {
+			CDBG("mt9p012_ctrl gpio request failed!\n");
+			goto init_fail1;
+		}
+		msleep(20);
+		rc = mt9p012_set_default_focus();
+		if (rc < 0) {
+			gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd,
+								0);
+			gpio_free(mt9p012_ctrl->sensordata->vcm_pwd);
+		}
+	}
+	if (rc >= 0)
+		goto init_done;
+init_fail1:
+	mt9p012_probe_init_done(data);
+	kfree(mt9p012_ctrl);
+init_done:
+	return rc;
+}
+
+static int mt9p012_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9p012_wait_queue);
+	return 0;
+}
+
+static int32_t mt9p012_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = mt9p012_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = mt9p012_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = mt9p012_raw_snapshot_config(mode);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int mt9p012_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	int rc = 0;
+
+	if (copy_from_user(&cdata,
+			   (void *)argp, sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&mt9p012_mut);
+
+	CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		mt9p012_get_pict_fps(cdata.cfg.gfps.prevfps,
+				     &(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp, &cdata,
+				 sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = mt9p012_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = mt9p012_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = mt9p012_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl = mt9p012_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc = mt9p012_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = mt9p012_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc = mt9p012_write_exp_gain(cdata.cfg.exp_gain.gain,
+					    cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__);
+		rc = mt9p012_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+					       cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = mt9p012_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = mt9p012_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		CDBG("mt9p012_ioctl: CFG_MOVE_FOCUS: cdata.cfg.focus.dir=%d \
+				cdata.cfg.focus.steps=%d\n",
+				cdata.cfg.focus.dir, cdata.cfg.focus.steps);
+		rc = mt9p012_move_focus(cdata.cfg.focus.dir,
+					cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = mt9p012_set_default_focus();
+		break;
+
+	case CFG_SET_LENS_SHADING:
+		CDBG("%s: CFG_SET_LENS_SHADING\n", __func__);
+		rc = mt9p012_lens_shading_enable(cdata.cfg.lens_shading);
+		break;
+
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = MT9P012_STEPS_NEAR_TO_CLOSEST_INF;
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&mt9p012_mut);
+	return rc;
+}
+
+int mt9p012_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&mt9p012_mut);
+
+	mt9p012_power_down();
+
+	gpio_direction_output(mt9p012_ctrl->sensordata->sensor_reset, 0);
+	gpio_free(mt9p012_ctrl->sensordata->sensor_reset);
+
+	if (mt9p012_ctrl->sensordata->vcm_enable) {
+		gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 0);
+		gpio_free(mt9p012_ctrl->sensordata->vcm_pwd);
+	}
+
+	kfree(mt9p012_ctrl);
+	mt9p012_ctrl = NULL;
+
+	CDBG("mt9p012_release completed\n");
+
+	mutex_unlock(&mt9p012_mut);
+	return rc;
+}
+
+static int mt9p012_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("mt9p012_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	mt9p012_sensorw = kzalloc(sizeof(struct mt9p012_work), GFP_KERNEL);
+	if (!mt9p012_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9p012_sensorw);
+	mt9p012_init_client(client);
+	mt9p012_client = client;
+
+	mdelay(50);
+
+	CDBG("mt9p012_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("mt9p012_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static const struct i2c_device_id mt9p012_i2c_id[] = {
+	{"mt9p012", 0},
+	{}
+};
+
+static struct i2c_driver mt9p012_i2c_driver = {
+	.id_table = mt9p012_i2c_id,
+	.probe = mt9p012_i2c_probe,
+	.remove = __exit_p(mt9p012_i2c_remove),
+	.driver = {
+		   .name = "mt9p012",
+		   },
+};
+
+static int mt9p012_sensor_probe(const struct msm_camera_sensor_info *info,
+				struct msm_sensor_ctrl *s)
+{
+	int rc = i2c_add_driver(&mt9p012_i2c_driver);
+	if (rc < 0 || mt9p012_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	msm_camio_clk_rate_set(MT9P012_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = mt9p012_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_done;
+
+	s->s_init = mt9p012_sensor_open_init;
+	s->s_release = mt9p012_sensor_release;
+	s->s_config = mt9p012_sensor_config;
+	s->s_mount_angle  = 0;
+	mt9p012_probe_init_done(info);
+
+probe_done:
+	CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__);
+	return rc;
+}
+
+static int __mt9p012_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9p012_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9p012_probe,
+	.driver = {
+		   .name = "msm_camera_mt9p012",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __init mt9p012_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9p012_init);
diff --git a/drivers/media/video/msm/mt9p012_km.c b/drivers/media/video/msm/mt9p012_km.c
new file mode 100644
index 0000000..c20064c
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_km.c
@@ -0,0 +1,1295 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "mt9p012_km.h"
+
+/*=============================================================
+    SENSOR REGISTER DEFINES
+==============================================================*/
+
+#define MT9P012_KM_REG_MODEL_ID      0x0000
+#define MT9P012_KM_MODEL_ID          0x2800
+#define REG_GROUPED_PARAMETER_HOLD   0x0104
+#define GROUPED_PARAMETER_HOLD       0x0100
+#define GROUPED_PARAMETER_UPDATE     0x0000
+#define REG_COARSE_INT_TIME          0x3012
+#define REG_VT_PIX_CLK_DIV           0x0300
+#define REG_VT_SYS_CLK_DIV           0x0302
+#define REG_PRE_PLL_CLK_DIV          0x0304
+#define REG_PLL_MULTIPLIER           0x0306
+#define REG_OP_PIX_CLK_DIV           0x0308
+#define REG_OP_SYS_CLK_DIV           0x030A
+#define REG_SCALE_M                  0x0404
+#define REG_FRAME_LENGTH_LINES       0x300A
+#define REG_LINE_LENGTH_PCK          0x300C
+#define REG_X_ADDR_START             0x3004
+#define REG_Y_ADDR_START             0x3002
+#define REG_X_ADDR_END               0x3008
+#define REG_Y_ADDR_END               0x3006
+#define REG_X_OUTPUT_SIZE            0x034C
+#define REG_Y_OUTPUT_SIZE            0x034E
+#define REG_FINE_INTEGRATION_TIME    0x3014
+#define REG_ROW_SPEED                0x3016
+#define MT9P012_KM_REG_RESET_REGISTER   0x301A
+#define MT9P012_KM_RESET_REGISTER_PWON  0x10CC
+#define MT9P012_KM_RESET_REGISTER_PWOFF 0x10C8
+#define REG_READ_MODE                0x3040
+#define REG_GLOBAL_GAIN              0x305E
+#define REG_TEST_PATTERN_MODE        0x3070
+
+enum mt9p012_km_test_mode {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9p012_km_resolution {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum mt9p012_km_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum mt9p012_km_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+
+uint8_t mode_mask = 0x04;
+
+/* actuator's Slave Address */
+#define MT9P012_KM_AF_I2C_ADDR   (0x18 >> 1)
+
+/* AF Total steps parameters */
+#define MT9P012_KM_STEPS_NEAR_TO_CLOSEST_INF  30
+#define MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR    30
+
+/* Time in milisecs for waiting for the sensor to reset.*/
+#define MT9P012_KM_RESET_DELAY_MSECS   66
+
+/* for 20 fps preview */
+#define MT9P012_KM_DEFAULT_CLOCK_RATE  24000000
+
+struct mt9p012_km_work {
+	struct work_struct work;
+};
+static struct mt9p012_km_work *mt9p012_km_sensorw;
+static struct i2c_client *mt9p012_km_client;
+
+struct mt9p012_km_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t fps_divider;	/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;	/* init to 1 * 0x00000400 */
+
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+
+	enum mt9p012_km_resolution prev_res;
+	enum mt9p012_km_resolution pict_res;
+	enum mt9p012_km_resolution curr_res;
+	enum mt9p012_km_test_mode set_test;
+};
+static uint16_t update_type = UPDATE_PERIODIC;
+static struct mt9p012_km_ctrl *mt9p012_km_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(mt9p012_km_wait_queue);
+DEFINE_MUTEX(mt9p012_km_mut);
+
+/*=============================================================*/
+
+static int mt9p012_km_i2c_rxdata(unsigned short saddr, unsigned char *rxdata,
+			int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = saddr << 1,
+			.flags = 0,
+			.len = 2,
+			.buf = rxdata,
+		},
+		{
+			.addr = saddr << 1,
+			.flags = I2C_M_RD,
+			.len = length,
+			.buf = rxdata,
+		},
+	};
+
+	if (i2c_transfer(mt9p012_km_client->adapter, msgs, 2) < 0) {
+		CDBG("mt9p012_km_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9p012_km_i2c_read_w(unsigned short saddr, unsigned short raddr,
+				  unsigned short *rdata)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = mt9p012_km_i2c_rxdata(saddr, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	*rdata = buf[0] << 8 | buf[1];
+
+	if (rc < 0)
+		CDBG("mt9p012_km_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int32_t mt9p012_km_i2c_txdata(unsigned short saddr,
+				  unsigned char *txdata,
+				  int length)
+{
+	struct i2c_msg msg[] = {
+		{
+		 .addr = saddr << 1,
+		 .flags = 0,
+		 .len = length,
+		 .buf = txdata,
+		 },
+	};
+
+	if (i2c_transfer(mt9p012_km_client->adapter, msg, 1) < 0) {
+		CDBG("mt9p012_km_i2c_txdata failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9p012_km_i2c_write_b(unsigned short saddr,
+				   unsigned short baddr,
+				   unsigned short bdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = mt9p012_km_i2c_txdata(saddr, buf, 2);
+
+	if (rc < 0)
+		CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n",
+		     saddr, baddr, bdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_km_i2c_write_w(unsigned short saddr,
+				   unsigned short waddr,
+				   unsigned short wdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+
+	rc = mt9p012_km_i2c_txdata(saddr, buf, 4);
+
+	if (rc < 0)
+		CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+		     waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9p012_km_i2c_write_w_table(struct mt9p012_km_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num; i++) {
+		rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+					 reg_conf_tbl->waddr,
+					 reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+
+	return rc;
+}
+
+static int32_t mt9p012_km_test(enum mt9p012_km_test_mode mo)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	if (mo == TEST_OFF)
+		return 0;
+	else {
+		rc = mt9p012_km_i2c_write_w_table(mt9p012_km_regs.ttbl,
+					 mt9p012_km_regs.ttbl_size);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+					 REG_TEST_PATTERN_MODE, (uint16_t) mo);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9p012_km_lens_shading_enable(uint8_t is_enable)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: entered. enable = %d\n", __func__, is_enable);
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x3780,
+				 ((uint16_t) is_enable) << 15);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	CDBG("%s: exiting. rc = %d\n", __func__, rc);
+	return rc;
+}
+
+static int32_t mt9p012_km_set_lc(void)
+{
+	int32_t rc;
+
+	rc = mt9p012_km_i2c_write_w_table(mt9p012_km_regs.lctbl,
+				       mt9p012_km_regs.lctbl_size);
+
+	return rc;
+}
+
+static void mt9p012_km_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider;   /*Q10 */
+	uint32_t pclk_mult; /*Q10 */
+	uint32_t d1;
+	uint32_t d2;
+
+	d1 =
+		(uint32_t)(
+		(mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines *
+		0x00000400) /
+		mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines);
+
+	d2 =
+		(uint32_t)(
+		(mt9p012_km_regs.reg_pat[RES_PREVIEW].line_length_pck *
+		0x00000400) /
+		mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck);
+
+	divider = (uint32_t) (d1 * d2) / 0x00000400;
+
+	pclk_mult =
+		(uint32_t) ((mt9p012_km_regs.reg_pat[RES_CAPTURE].
+		pll_multiplier * 0x00000400) /
+		(mt9p012_km_regs.reg_pat[RES_PREVIEW].pll_multiplier));
+
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps = (uint16_t)((((fps * pclk_mult) / 0x00000400) * divider)/
+				0x00000400);
+}
+
+static uint16_t mt9p012_km_get_prev_lines_pf(void)
+{
+	if (mt9p012_km_ctrl->prev_res == QTR_SIZE)
+		return  mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines;
+	else
+		return  mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_km_get_prev_pixels_pl(void)
+{
+	if (mt9p012_km_ctrl->prev_res == QTR_SIZE)
+		return  mt9p012_km_regs.reg_pat[RES_PREVIEW].line_length_pck;
+	else
+		return  mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint16_t mt9p012_km_get_pict_lines_pf(void)
+{
+	return  mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9p012_km_get_pict_pixels_pl(void)
+{
+	return  mt9p012_km_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint32_t mt9p012_km_get_pict_max_exp_lc(void)
+{
+	uint16_t snapshot_lines_per_frame;
+
+	if (mt9p012_km_ctrl->pict_res == QTR_SIZE)
+		snapshot_lines_per_frame =
+	    mt9p012_km_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1;
+	else
+		snapshot_lines_per_frame =
+	    mt9p012_km_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1;
+
+	return snapshot_lines_per_frame * 24;
+}
+
+static int32_t mt9p012_km_set_fps(struct fps_cfg *fps)
+{
+	int32_t rc = 0;
+
+	mt9p012_km_ctrl->fps_divider = fps->fps_div;
+	mt9p012_km_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return -EBUSY;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+			REG_FRAME_LENGTH_LINES,
+			mt9p012_km_regs.reg_pat[mt9p012_km_ctrl->sensormode].
+			frame_length_lines *
+			mt9p012_km_ctrl->fps_divider / 0x00000400);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+
+	return rc;
+}
+
+
+static int32_t mt9p012_km_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x01FF;
+	uint32_t line_length_ratio = 0x00000400;
+	enum mt9p012_km_setting setting;
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_km_write_exp_gain \n", __LINE__);
+
+	if (mt9p012_km_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		mt9p012_km_ctrl->my_reg_gain = gain;
+		mt9p012_km_ctrl->my_reg_line_count = (uint16_t) line;
+	}
+
+	if (gain > max_legal_gain) {
+		CDBG("Max legal gain Line:%d \n", __LINE__);
+		gain = max_legal_gain;
+	}
+
+	/* Verify no overflow */
+	if (mt9p012_km_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		line = (uint32_t) (line * mt9p012_km_ctrl->fps_divider /
+				   0x00000400);
+		setting = RES_PREVIEW;
+	} else {
+		line = (uint32_t) (line * mt9p012_km_ctrl->pict_fps_divider /
+				   0x00000400);
+		setting = RES_CAPTURE;
+	}
+
+	gain |= 0x0200;
+
+	if ((mt9p012_km_regs.reg_pat[setting].frame_length_lines - 1) < line) {
+		line_length_ratio = (uint32_t) (line * 0x00000400) /
+		    (mt9p012_km_regs.reg_pat[setting].frame_length_lines - 1);
+	} else
+		line_length_ratio = 0x00000400;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_HOLD);
+	if (rc < 0) {
+		CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GLOBAL_GAIN, gain);
+	if (rc < 0) {
+		CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				REG_LINE_LENGTH_PCK,
+			       (uint16_t) (mt9p012_km_regs.reg_pat[setting].
+			    line_length_pck * line_length_ratio / 0x00000400));
+	if (rc < 0)
+		return rc;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_COARSE_INT_TIME,
+				 (uint16_t) ((line * 0x00000400)/
+				 line_length_ratio));
+	if (rc < 0) {
+		CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE);
+	if (rc < 0) {
+		CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	CDBG("mt9p012_km_write_exp_gain: gain = %d, line = %d\n", gain, line);
+
+	return rc;
+}
+
+static int32_t mt9p012_km_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	CDBG("Line:%d mt9p012_km_set_pict_exp_gain \n", __LINE__);
+
+	rc = mt9p012_km_write_exp_gain(gain, line);
+	if (rc < 0) {
+		CDBG("Line:%d mt9p012_km_set_pict_exp_gain failed... \n",
+		     __LINE__);
+		return rc;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 MT9P012_KM_REG_RESET_REGISTER,
+				 0x10CC | 0x0002);
+	if (rc < 0) {
+		CDBG("mt9p012_km_i2c_write_w failed... Line:%d \n", __LINE__);
+		return rc;
+	}
+
+	mdelay(5);
+
+	/* camera_timed_wait(snapshot_wait*exposure_ratio); */
+	return rc;
+}
+
+static int32_t mt9p012_km_setting(enum mt9p012_km_reg_update rupdate,
+				enum mt9p012_km_setting rt)
+{
+	int32_t rc = 0;
+
+	switch (rupdate) {
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+
+			struct mt9p012_km_i2c_reg_conf ppc_tbl[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				{REG_ROW_SPEED,
+				 mt9p012_km_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_km_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_km_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_km_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_km_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_km_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M,
+				 mt9p012_km_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_km_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_km_regs.reg_pat[rt].y_output_size},
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_km_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+			       (mt9p012_km_regs.reg_pat[rt].frame_length_lines *
+				mt9p012_km_ctrl->fps_divider / 0x00000400)},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_km_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_km_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+
+			if (update_type == REG_INIT) {
+				update_type = rupdate;
+				return rc;
+			}
+
+			rc = mt9p012_km_i2c_write_w_table(&ppc_tbl[0],
+						ARRAY_SIZE(ppc_tbl));
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_km_test(mt9p012_km_ctrl->set_test);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+						 MT9P012_KM_REG_RESET_REGISTER,
+						 0x10cc |
+						 0x0002);
+			if (rc < 0)
+				return rc;
+
+			mdelay(15);	/* 15? wait for sensor to transition */
+
+			return rc;
+		}
+		break;	/* UPDATE_PERIODIC */
+
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct mt9p012_km_i2c_reg_conf ipc_tbl1[] = {
+				{MT9P012_KM_REG_RESET_REGISTER,
+				 MT9P012_KM_RESET_REGISTER_PWOFF},
+				{REG_VT_PIX_CLK_DIV,
+				 mt9p012_km_regs.reg_pat[rt].vt_pix_clk_div},
+				{REG_VT_SYS_CLK_DIV,
+				 mt9p012_km_regs.reg_pat[rt].vt_sys_clk_div},
+				{REG_PRE_PLL_CLK_DIV,
+				 mt9p012_km_regs.reg_pat[rt].pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+				 mt9p012_km_regs.reg_pat[rt].pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+				 mt9p012_km_regs.reg_pat[rt].op_pix_clk_div},
+				{REG_OP_SYS_CLK_DIV,
+				 mt9p012_km_regs.reg_pat[rt].op_sys_clk_div},
+				{MT9P012_KM_REG_RESET_REGISTER,
+				 MT9P012_KM_RESET_REGISTER_PWON},
+			};
+
+			struct mt9p012_km_i2c_reg_conf ipc_tbl2[] = {
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_HOLD},
+				/* Optimized register settings for
+				   Rev3 Silicon */
+				{0x308A, 0x6424},
+				{0x3092, 0x0A52},
+				{0x3094, 0x4656},
+				{0x3096, 0x5652},
+				{0x0204, 0x0010},
+				{0x0206, 0x0010},
+				{0x0208, 0x0010},
+				{0x020A, 0x0010},
+				{0x020C, 0x0010},
+				{0x3088, 0x6FF6},
+				{0x3154, 0x0282},
+				{0x3156, 0x0381},
+				{0x3162, 0x04CE},
+			};
+
+			struct mt9p012_km_i2c_reg_conf ipc_tbl3[] = {
+				/* Set preview or snapshot mode */
+				{REG_ROW_SPEED,
+				 mt9p012_km_regs.reg_pat[rt].row_speed},
+				{REG_X_ADDR_START,
+				 mt9p012_km_regs.reg_pat[rt].x_addr_start},
+				{REG_X_ADDR_END,
+				 mt9p012_km_regs.reg_pat[rt].x_addr_end},
+				{REG_Y_ADDR_START,
+				 mt9p012_km_regs.reg_pat[rt].y_addr_start},
+				{REG_Y_ADDR_END,
+				 mt9p012_km_regs.reg_pat[rt].y_addr_end},
+				{REG_READ_MODE,
+				 mt9p012_km_regs.reg_pat[rt].read_mode},
+				{REG_SCALE_M,
+				 mt9p012_km_regs.reg_pat[rt].scale_m},
+				{REG_X_OUTPUT_SIZE,
+				 mt9p012_km_regs.reg_pat[rt].x_output_size},
+				{REG_Y_OUTPUT_SIZE,
+				 mt9p012_km_regs.reg_pat[rt].y_output_size},
+				{REG_LINE_LENGTH_PCK,
+				 mt9p012_km_regs.reg_pat[rt].line_length_pck},
+				{REG_FRAME_LENGTH_LINES,
+				 mt9p012_km_regs.reg_pat[rt].
+				 frame_length_lines},
+				{REG_COARSE_INT_TIME,
+				 mt9p012_km_regs.reg_pat[rt].coarse_int_time},
+				{REG_FINE_INTEGRATION_TIME,
+				 mt9p012_km_regs.reg_pat[rt].fine_int_time},
+				{REG_GROUPED_PARAMETER_HOLD,
+				 GROUPED_PARAMETER_UPDATE},
+			};
+
+			/* reset fps_divider */
+			mt9p012_km_ctrl->fps_divider = 1 * 0x0400;
+
+			rc = mt9p012_km_i2c_write_w_table(&ipc_tbl1[0],
+							ARRAY_SIZE(ipc_tbl1));
+			if (rc < 0)
+				return rc;
+
+			mdelay(15);
+
+			rc = mt9p012_km_i2c_write_w_table(&ipc_tbl2[0],
+							ARRAY_SIZE(ipc_tbl2));
+			if (rc < 0)
+				return rc;
+
+			mdelay(5);
+
+			rc = mt9p012_km_i2c_write_w_table(&ipc_tbl3[0],
+						       ARRAY_SIZE(ipc_tbl3));
+			if (rc < 0)
+				return rc;
+
+			/* load lens shading */
+			rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_HOLD);
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_km_set_lc();
+			if (rc < 0)
+				return rc;
+
+			rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+						 REG_GROUPED_PARAMETER_HOLD,
+						 GROUPED_PARAMETER_UPDATE);
+
+			if (rc < 0)
+				return rc;
+		}
+		update_type = rupdate;
+		break;		/* case REG_INIT: */
+
+	default:
+		rc = -EINVAL;
+		break;
+	}			/* switch (rupdate) */
+
+	return rc;
+}
+
+static int32_t mt9p012_km_video_config(int mode, int res)
+{
+	int32_t rc;
+
+	switch (res) {
+	case QTR_SIZE:
+		rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_PREVIEW);
+		if (rc < 0)
+			return rc;
+
+		CDBG("mt9p012_km sensor configuration done!\n");
+		break;
+
+	case FULL_SIZE:
+		rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	default:
+		return 0;
+	}			/* switch */
+
+	mt9p012_km_ctrl->prev_res = res;
+	mt9p012_km_ctrl->curr_res = res;
+	mt9p012_km_ctrl->sensormode = mode;
+
+	rc = mt9p012_km_write_exp_gain(mt9p012_km_ctrl->my_reg_gain,
+				    mt9p012_km_ctrl->my_reg_line_count);
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 MT9P012_KM_REG_RESET_REGISTER,
+				 0x10cc | 0x0002);
+
+	mdelay(15);
+	return rc;
+}
+
+static int32_t mt9p012_km_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_km_ctrl->curr_res = mt9p012_km_ctrl->pict_res;
+
+	mt9p012_km_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_km_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_km_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9p012_km_ctrl->curr_res = mt9p012_km_ctrl->pict_res;
+
+	mt9p012_km_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t mt9p012_km_power_down(void)
+{
+	int32_t rc = 0;
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 MT9P012_KM_REG_RESET_REGISTER,
+				 MT9P012_KM_RESET_REGISTER_PWOFF);
+
+	mdelay(5);
+	return rc;
+}
+
+static int32_t mt9p012_km_move_focus(int direction, int32_t num_steps)
+{
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_position;
+	uint8_t code_val_msb, code_val_lsb;
+
+	if (num_steps > MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR)
+		num_steps = MT9P012_KM_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (num_steps == 0) {
+		CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (direction == MOVE_NEAR)
+		step_direction = 16;	/* 10bit */
+	else if (direction == MOVE_FAR)
+		step_direction = -16;	/* 10 bit */
+	else {
+		CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	if (mt9p012_km_ctrl->curr_lens_pos <
+				mt9p012_km_ctrl->init_curr_lens_pos)
+		mt9p012_km_ctrl->curr_lens_pos =
+				mt9p012_km_ctrl->init_curr_lens_pos;
+
+	actual_step = (int16_t) (step_direction * (int16_t) num_steps);
+	next_position = (int16_t) (mt9p012_km_ctrl->curr_lens_pos +
+							actual_step);
+
+	if (next_position > 1023)
+		next_position = 1023;
+	else if (next_position < 0)
+		next_position = 0;
+
+	code_val_msb = next_position >> 4;
+	code_val_lsb = (next_position & 0x000F) << 4;
+	code_val_lsb |= mode_mask;
+
+	/* Writing the digital code for current to the actuator */
+	if (mt9p012_km_i2c_write_b(MT9P012_KM_AF_I2C_ADDR >> 1,
+				code_val_msb, code_val_lsb) < 0) {
+		CDBG("mt9p012_km_move_focus failed at line %d ...\n", __LINE__);
+		return -EBUSY;
+	}
+
+	/* Storing the current lens Position */
+	mt9p012_km_ctrl->curr_lens_pos = next_position;
+
+	return 0;
+}
+
+static int32_t mt9p012_km_set_default_focus(void)
+{
+	int32_t rc = 0;
+	uint8_t code_val_msb, code_val_lsb;
+
+	code_val_msb = 0x00;
+	code_val_lsb = 0x04;
+
+	/* Write the digital code for current to the actuator */
+	rc = mt9p012_km_i2c_write_b(MT9P012_KM_AF_I2C_ADDR >> 1,
+				 code_val_msb, code_val_lsb);
+
+	mt9p012_km_ctrl->curr_lens_pos = 0;
+	mt9p012_km_ctrl->init_curr_lens_pos = 0;
+
+	return rc;
+}
+
+static int mt9p012_km_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int
+	mt9p012_km_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	uint16_t chipid;
+
+	rc = gpio_request(data->sensor_reset, "mt9p012_km");
+	if (!rc)
+		gpio_direction_output(data->sensor_reset, 1);
+	else
+		goto init_probe_done;
+
+	msleep(20);
+
+	/* RESET the sensor image part via I2C command */
+	CDBG("mt9p012_km_sensor_init(): reseting sensor.\n");
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 MT9P012_KM_REG_RESET_REGISTER,
+				 0x10CC | 0x0001);
+	if (rc < 0) {
+		CDBG("sensor reset failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+		msleep(MT9P012_KM_RESET_DELAY_MSECS);
+
+	/* 3. Read sensor Model ID: */
+	rc = mt9p012_km_i2c_read_w(mt9p012_km_client->addr,
+				MT9P012_KM_REG_MODEL_ID, &chipid);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	/* 4. Compare sensor ID to MT9T012VC ID: */
+	if (chipid != MT9P012_KM_MODEL_ID) {
+		CDBG("mt9p012_km wrong model_id = 0x%x\n", chipid);
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x306E, 0x9080);
+	if (rc < 0) {
+		CDBG("REV_7 write failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* RESET_REGISTER, enable parallel interface and disable serialiser */
+	CDBG("mt9p012_km_sensor_init(): enabling parallel interface.\n");
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x301A, 0x10CC);
+	if (rc < 0) {
+		CDBG("enable parallel interface failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	/* To disable the 2 extra lines */
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr, 0x3064, 0x0805);
+
+	if (rc < 0) {
+		CDBG("disable the 2 extra lines failed. rc = %d\n", rc);
+		goto init_probe_fail;
+	}
+
+	goto init_probe_done;
+
+init_probe_fail:
+	mt9p012_km_probe_init_done(data);
+init_probe_done:
+	return rc;
+}
+
+static int
+	mt9p012_km_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+
+	mt9p012_km_ctrl = kzalloc(sizeof(struct mt9p012_km_ctrl), GFP_KERNEL);
+	if (!mt9p012_km_ctrl) {
+		CDBG("mt9p012_km_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	mt9p012_km_ctrl->fps_divider = 1 * 0x00000400;
+	mt9p012_km_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9p012_km_ctrl->set_test = TEST_OFF;
+	mt9p012_km_ctrl->prev_res = QTR_SIZE;
+	mt9p012_km_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9p012_km_ctrl->sensordata = data;
+
+	msm_camio_camif_pad_reg_reset();
+	mdelay(20);
+
+	rc = mt9p012_km_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail1;
+
+	if (mt9p012_km_ctrl->prev_res == QTR_SIZE)
+		rc = mt9p012_km_setting(REG_INIT, RES_PREVIEW);
+	else
+		rc = mt9p012_km_setting(REG_INIT, RES_CAPTURE);
+
+	if (rc < 0) {
+		CDBG("mt9p012_km_setting failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* sensor : output enable */
+	CDBG("mt9p012_km_sensor_open_init(): enabling output.\n");
+	rc = mt9p012_km_i2c_write_w(mt9p012_km_client->addr,
+				 MT9P012_KM_REG_RESET_REGISTER,
+				 MT9P012_KM_RESET_REGISTER_PWON);
+	if (rc < 0) {
+		CDBG("sensor output enable failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	if (rc >= 0)
+		goto init_done;
+
+init_fail1:
+	mt9p012_km_probe_init_done(data);
+	kfree(mt9p012_km_ctrl);
+init_done:
+	return rc;
+}
+
+static int mt9p012_km_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9p012_km_wait_queue);
+	return 0;
+}
+
+static int32_t mt9p012_km_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = mt9p012_km_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = mt9p012_km_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = mt9p012_km_raw_snapshot_config(mode);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int mt9p012_km_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	int rc = 0;
+
+	if (copy_from_user(&cdata,
+			   (void *)argp, sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&mt9p012_km_mut);
+
+	CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		mt9p012_km_get_pict_fps(cdata.cfg.gfps.prevfps,
+				     &(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp, &cdata,
+				 sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = mt9p012_km_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = mt9p012_km_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = mt9p012_km_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl = mt9p012_km_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc = mt9p012_km_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = mt9p012_km_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc = mt9p012_km_write_exp_gain(cdata.cfg.exp_gain.gain,
+					    cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__);
+		rc = mt9p012_km_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+					       cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = mt9p012_km_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = mt9p012_km_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		CDBG("mt9p012_km_ioctl: CFG_MOVE_FOCUS: cdata.cfg.focus.dir=%d \
+				cdata.cfg.focus.steps=%d\n",
+				cdata.cfg.focus.dir, cdata.cfg.focus.steps);
+		rc = mt9p012_km_move_focus(cdata.cfg.focus.dir,
+					cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = mt9p012_km_set_default_focus();
+		break;
+
+	case CFG_SET_LENS_SHADING:
+		CDBG("%s: CFG_SET_LENS_SHADING\n", __func__);
+		rc = mt9p012_km_lens_shading_enable(cdata.cfg.lens_shading);
+		break;
+
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = MT9P012_KM_STEPS_NEAR_TO_CLOSEST_INF;
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&mt9p012_km_mut);
+	return rc;
+}
+
+int mt9p012_km_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&mt9p012_km_mut);
+
+	mt9p012_km_power_down();
+
+	gpio_direction_output(mt9p012_km_ctrl->sensordata->sensor_reset, 0);
+	gpio_free(mt9p012_km_ctrl->sensordata->sensor_reset);
+
+	gpio_direction_output(mt9p012_km_ctrl->sensordata->vcm_pwd, 0);
+	gpio_free(mt9p012_km_ctrl->sensordata->vcm_pwd);
+
+	kfree(mt9p012_km_ctrl);
+	mt9p012_km_ctrl = NULL;
+
+	CDBG("mt9p012_km_release completed\n");
+
+	mutex_unlock(&mt9p012_km_mut);
+	return rc;
+}
+
+static int mt9p012_km_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("mt9p012_km_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	mt9p012_km_sensorw = kzalloc(sizeof(struct mt9p012_km_work),
+							GFP_KERNEL);
+	if (!mt9p012_km_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9p012_km_sensorw);
+	mt9p012_km_init_client(client);
+	mt9p012_km_client = client;
+
+	mdelay(50);
+
+	CDBG("mt9p012_km_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("mt9p012_km_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static const struct i2c_device_id mt9p012_km_i2c_id[] = {
+	{"mt9p012_km", 0},
+	{}
+};
+
+static struct i2c_driver mt9p012_km_i2c_driver = {
+	.id_table = mt9p012_km_i2c_id,
+	.probe = mt9p012_km_i2c_probe,
+	.remove = __exit_p(mt9p012_km_i2c_remove),
+	.driver = {
+		   .name = "mt9p012_km",
+		   },
+};
+
+static int mt9p012_km_sensor_probe(const struct msm_camera_sensor_info *info,
+				struct msm_sensor_ctrl *s)
+{
+	int rc = i2c_add_driver(&mt9p012_km_i2c_driver);
+	if (rc < 0 || mt9p012_km_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	msm_camio_clk_rate_set(MT9P012_KM_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = mt9p012_km_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_done;
+
+	s->s_init = mt9p012_km_sensor_open_init;
+	s->s_release = mt9p012_km_sensor_release;
+	s->s_config = mt9p012_km_sensor_config;
+	s->s_mount_angle  = 0;
+	mt9p012_km_probe_init_done(info);
+
+probe_done:
+	CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__);
+	return rc;
+}
+
+static int __mt9p012_km_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9p012_km_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9p012_km_probe,
+	.driver = {
+		   .name = "msm_camera_mt9p012_km",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __init mt9p012_km_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9p012_km_init);
diff --git a/drivers/media/video/msm/mt9p012_km.h b/drivers/media/video/msm/mt9p012_km.h
new file mode 100644
index 0000000..aefabd4
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_km.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9P012_KM_H
+#define MT9P012_KM_H
+
+#include <linux/types.h>
+
+extern struct mt9p012_km_reg mt9p012_km_regs;	/* from mt9p012_km_reg.c */
+
+struct reg_struct {
+	uint16_t vt_pix_clk_div;     /* 0x0300 */
+	uint16_t vt_sys_clk_div;     /* 0x0302 */
+	uint16_t pre_pll_clk_div;    /* 0x0304 */
+	uint16_t pll_multiplier;     /* 0x0306 */
+	uint16_t op_pix_clk_div;     /* 0x0308 */
+	uint16_t op_sys_clk_div;     /* 0x030A */
+	uint16_t scale_m;            /* 0x0404 */
+	uint16_t row_speed;          /* 0x3016 */
+	uint16_t x_addr_start;       /* 0x3004 */
+	uint16_t x_addr_end;         /* 0x3008 */
+	uint16_t y_addr_start;       /* 0x3002 */
+	uint16_t y_addr_end;         /* 0x3006 */
+	uint16_t read_mode;          /* 0x3040 */
+	uint16_t x_output_size ;     /* 0x034C */
+	uint16_t y_output_size;      /* 0x034E */
+	uint16_t line_length_pck;    /* 0x300C */
+	uint16_t frame_length_lines; /* 0x300A */
+	uint16_t coarse_int_time;    /* 0x3012 */
+	uint16_t fine_int_time;      /* 0x3014 */
+};
+
+
+struct mt9p012_km_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+
+struct mt9p012_km_reg {
+	struct reg_struct const *reg_pat;
+	uint16_t reg_pat_size;
+	struct mt9p012_km_i2c_reg_conf const *ttbl;
+	uint16_t ttbl_size;
+	struct mt9p012_km_i2c_reg_conf const *lctbl;
+	uint16_t lctbl_size;
+	struct mt9p012_km_i2c_reg_conf const *rftbl;
+	uint16_t rftbl_size;
+};
+
+#endif /* MT9P012_KM_H */
diff --git a/drivers/media/video/msm/mt9p012_km_reg.c b/drivers/media/video/msm/mt9p012_km_reg.c
new file mode 100644
index 0000000..109930b
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_km_reg.c
@@ -0,0 +1,375 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mt9p012_km.h"
+#include <linux/kernel.h>
+
+/*Micron settings from Applications for lower power consumption.*/
+struct reg_struct const mt9p012_km_reg_pat[2] = {
+	{ /* Preview */
+		/* vt_pix_clk_div          REG=0x0300 */
+		6,  /* 5 */
+
+		/* vt_sys_clk_div          REG=0x0302 */
+		1,
+
+		/* pre_pll_clk_div         REG=0x0304 */
+		2,
+
+		/* pll_multiplier          REG=0x0306 */
+		60,
+
+		/* op_pix_clk_div          REG=0x0308 */
+		8,  /* 10 */
+
+		/* op_sys_clk_div          REG=0x030A */
+		1,
+
+		/* scale_m                 REG=0x0404 */
+		16,
+
+		/* row_speed               REG=0x3016 */
+		0x0111,
+
+		/* x_addr_start            REG=0x3004 */
+		8,
+
+		/* x_addr_end              REG=0x3008 */
+		2597,
+
+		/* y_addr_start            REG=0x3002 */
+		8,
+
+		/* y_addr_end              REG=0x3006 */
+		1949,
+
+		/* read_mode               REG=0x3040
+		 * Preview 2x2 skipping */
+		0x006C,
+
+		/* x_output_size           REG=0x034C */
+		1296,
+
+		/* y_output_size           REG=0x034E */
+		972,
+
+		/* line_length_pck         REG=0x300C */
+		3783,
+
+		/* frame_length_lines      REG=0x300A */
+		1074,
+
+		/* coarse_integration_time REG=0x3012 */
+		16,
+
+		/* fine_integration_time   REG=0x3014 */
+		1764
+	},
+	{ /* Snapshot */
+		/* vt_pix_clk_div          REG=0x0300 */
+		6,
+
+		/* vt_sys_clk_div          REG=0x0302 */
+		1,
+
+		/* pre_pll_clk_div         REG=0x0304 */
+		2,
+
+		/* pll_multiplier          REG=0x0306
+		 * 39 for 10fps snapshot */
+		39,
+
+		/* op_pix_clk_div          REG=0x0308 */
+		8,
+
+		/* op_sys_clk_div          REG=0x030A */
+		1,
+
+		/* scale_m                 REG=0x0404 */
+		16,
+
+		/* row_speed               REG=0x3016 */
+		0x0111,
+
+		/* x_addr_start            REG=0x3004 */
+		8,
+
+		/* x_addr_end              REG=0x3008 */
+		2615,
+
+		/* y_addr_start            REG=0x3002 */
+		8,
+
+		/* y_addr_end              REG=0x3006 */
+		1967,
+
+		/* read_mode               REG=0x3040 */
+		0x0024,
+
+		/* x_output_size           REG=0x034C */
+		2608,
+
+		/* y_output_size           REG=0x034E */
+		1960,
+
+		/* line_length_pck         REG=0x300C */
+		3788,
+
+		/* frame_length_lines      REG=0x300A 10 fps snapshot */
+		2045,
+
+		/* coarse_integration_time REG=0x3012 */
+		16,
+
+		/* fine_integration_time   REG=0x3014 */
+		882
+	}
+};
+
+struct mt9p012_km_i2c_reg_conf const mt9p012_km_test_tbl[] = {
+	{0x3044, 0x0544 & 0xFBFF},
+	{0x30CA, 0x0004 | 0x0001},
+	{0x30D4, 0x9020 & 0x7FFF},
+	{0x31E0, 0x0003 & 0xFFFE},
+	{0x3180, 0x91FF & 0x7FFF},
+	{0x301A, (0x10CC | 0x8000) & 0xFFF7},
+	{0x301E, 0x0000},
+	{0x3780, 0x0000},
+};
+
+
+struct mt9p012_km_i2c_reg_conf const mt9p012_km_lc_tbl[] = {
+	{0x360A, 0x00F0},
+	{0x360C, 0x0B29},
+	{0x360E, 0x5ED1},
+	{0x3610, 0x890D},
+	{0x3612, 0x9871},
+	{0x364A, 0xAD2C},
+	{0x364C, 0x0A8C},
+	{0x364E, 0x91EC},
+	{0x3650, 0x94EC},
+	{0x3652, 0xC76B},
+	{0x368A, 0x5931},
+	{0x368C, 0x4FED},
+	{0x368E, 0x8A50},
+	{0x3690, 0x5C0F},
+	{0x3692, 0x8393},
+	{0x36CA, 0xDB8E},
+	{0x36CC, 0xCA4D},
+	{0x36CE, 0x146F},
+	{0x36D0, 0x618F},
+	{0x36D2, 0x014F},
+	{0x370A, 0x1FEE},
+	{0x370C, 0xDD50},
+	{0x370E, 0xDB54},
+	{0x3710, 0xCA92},
+	{0x3712, 0x1896},
+	{0x3600, 0x00F0},
+	{0x3602, 0xA04C},
+	{0x3604, 0x5711},
+	{0x3606, 0x5E6D},
+	{0x3608, 0xA971},
+	{0x3640, 0xDCCC},
+	{0x3642, 0x0529},
+	{0x3644, 0x96ED},
+	{0x3646, 0xF447},
+	{0x3648, 0x4AEE},
+	{0x3680, 0x2171},
+	{0x3682, 0x634F},
+	{0x3684, 0xCC91},
+	{0x3686, 0xA9CE},
+	{0x3688, 0x8751},
+	{0x36C0, 0x8B6D},
+	{0x36C2, 0xE20E},
+	{0x36C4, 0x750F},
+	{0x36C6, 0x0090},
+	{0x36C8, 0x9E91},
+	{0x3700, 0xEAAF},
+	{0x3702, 0xB8AF},
+	{0x3704, 0xE293},
+	{0x3706, 0xAB33},
+	{0x3708, 0x4595},
+	{0x3614, 0x00D0},
+	{0x3616, 0x8AAB},
+	{0x3618, 0x18B1},
+	{0x361A, 0x54AD},
+	{0x361C, 0x9DB0},
+	{0x3654, 0x11EB},
+	{0x3656, 0x332C},
+	{0x3658, 0x316D},
+	{0x365A, 0xF0EB},
+	{0x365C, 0xB4ED},
+	{0x3694, 0x0F31},
+	{0x3696, 0x08D0},
+	{0x3698, 0xA52F},
+	{0x369A, 0xE64F},
+	{0x369C, 0xC9D2},
+	{0x36D4, 0x8C2D},
+	{0x36D6, 0xAD6E},
+	{0x36D8, 0xE1CE},
+	{0x36DA, 0x1750},
+	{0x36DC, 0x8CAD},
+	{0x3714, 0x8CAF},
+	{0x3716, 0x8C11},
+	{0x3718, 0xE453},
+	{0x371A, 0x9693},
+	{0x371C, 0x38B5},
+	{0x361E, 0x00D0},
+	{0x3620, 0xB6CB},
+	{0x3622, 0x4811},
+	{0x3624, 0xB70C},
+	{0x3626, 0xA771},
+	{0x365E, 0xB5A9},
+	{0x3660, 0x05AA},
+	{0x3662, 0x00CF},
+	{0x3664, 0xB86B},
+	{0x3666, 0xA4AF},
+	{0x369E, 0x3E31},
+	{0x36A0, 0x902B},
+	{0x36A2, 0xD251},
+	{0x36A4, 0x5C2F},
+	{0x36A6, 0x8471},
+	{0x36DE, 0x2C6D},
+	{0x36E0, 0xECEE},
+	{0x36E2, 0xB650},
+	{0x36E4, 0x0210},
+	{0x36E6, 0xACAE},
+	{0x371E, 0xAC30},
+	{0x3720, 0x394E},
+	{0x3722, 0xFDD3},
+	{0x3724, 0xBCB2},
+	{0x3726, 0x5AD5},
+	{0x3782, 0x0508},
+	{0x3784, 0x03B4},
+	{0x3780, 0x8000},
+};
+
+struct mt9p012_km_i2c_reg_conf const mt9p012_km_rolloff_tbl[] = {
+	{0x360A, 0x00F0},
+	{0x360C, 0x0B29},
+	{0x360E, 0x5ED1},
+	{0x3610, 0x890D},
+	{0x3612, 0x9871},
+	{0x364A, 0xAD2C},
+	{0x364C, 0x0A8C},
+	{0x364E, 0x91EC},
+	{0x3650, 0x94EC},
+	{0x3652, 0xC76B},
+	{0x368A, 0x5931},
+	{0x368C, 0x4FED},
+	{0x368E, 0x8A50},
+	{0x3690, 0x5C0F},
+	{0x3692, 0x8393},
+	{0x36CA, 0xDB8E},
+	{0x36CC, 0xCA4D},
+	{0x36CE, 0x146F},
+	{0x36D0, 0x618F},
+	{0x36D2, 0x014F},
+	{0x370A, 0x1FEE},
+	{0x370C, 0xDD50},
+	{0x370E, 0xDB54},
+	{0x3710, 0xCA92},
+	{0x3712, 0x1896},
+	{0x3600, 0x00F0},
+	{0x3602, 0xA04C},
+	{0x3604, 0x5711},
+	{0x3606, 0x5E6D},
+	{0x3608, 0xA971},
+	{0x3640, 0xDCCC},
+	{0x3642, 0x0529},
+	{0x3644, 0x96ED},
+	{0x3646, 0xF447},
+	{0x3648, 0x4AEE},
+	{0x3680, 0x2171},
+	{0x3682, 0x634F},
+	{0x3684, 0xCC91},
+	{0x3686, 0xA9CE},
+	{0x3688, 0x8751},
+	{0x36C0, 0x8B6D},
+	{0x36C2, 0xE20E},
+	{0x36C4, 0x750F},
+	{0x36C6, 0x0090},
+	{0x36C8, 0x9E91},
+	{0x3700, 0xEAAF},
+	{0x3702, 0xB8AF},
+	{0x3704, 0xE293},
+	{0x3706, 0xAB33},
+	{0x3708, 0x4595},
+	{0x3614, 0x00D0},
+	{0x3616, 0x8AAB},
+	{0x3618, 0x18B1},
+	{0x361A, 0x54AD},
+	{0x361C, 0x9DB0},
+	{0x3654, 0x11EB},
+	{0x3656, 0x332C},
+	{0x3658, 0x316D},
+	{0x365A, 0xF0EB},
+	{0x365C, 0xB4ED},
+	{0x3694, 0x0F31},
+	{0x3696, 0x08D0},
+	{0x3698, 0xA52F},
+	{0x369A, 0xE64F},
+	{0x369C, 0xC9D2},
+	{0x36D4, 0x8C2D},
+	{0x36D6, 0xAD6E},
+	{0x36D8, 0xE1CE},
+	{0x36DA, 0x1750},
+	{0x36DC, 0x8CAD},
+	{0x3714, 0x8CAF},
+	{0x3716, 0x8C11},
+	{0x3718, 0xE453},
+	{0x371A, 0x9693},
+	{0x371C, 0x38B5},
+	{0x361E, 0x00D0},
+	{0x3620, 0xB6CB},
+	{0x3622, 0x4811},
+	{0x3624, 0xB70C},
+	{0x3626, 0xA771},
+	{0x365E, 0xB5A9},
+	{0x3660, 0x05AA},
+	{0x3662, 0x00CF},
+	{0x3664, 0xB86B},
+	{0x3666, 0xA4AF},
+	{0x369E, 0x3E31},
+	{0x36A0, 0x902B},
+	{0x36A2, 0xD251},
+	{0x36A4, 0x5C2F},
+	{0x36A6, 0x8471},
+	{0x36DE, 0x2C6D},
+	{0x36E0, 0xECEE},
+	{0x36E2, 0xB650},
+	{0x36E4, 0x0210},
+	{0x36E6, 0xACAE},
+	{0x371E, 0xAC30},
+	{0x3720, 0x394E},
+	{0x3722, 0xFDD3},
+	{0x3724, 0xBCB2},
+	{0x3726, 0x5AD5},
+	{0x3782, 0x0508},
+	{0x3784, 0x03B4},
+	{0x3780, 0x8000},
+};
+
+
+struct mt9p012_km_reg mt9p012_km_regs = {
+	.reg_pat = &mt9p012_km_reg_pat[0],
+	.reg_pat_size = ARRAY_SIZE(mt9p012_km_reg_pat),
+	.ttbl = &mt9p012_km_test_tbl[0],
+	.ttbl_size = ARRAY_SIZE(mt9p012_km_test_tbl),
+	.lctbl = &mt9p012_km_lc_tbl[0],
+	.lctbl_size = ARRAY_SIZE(mt9p012_km_lc_tbl),
+	.rftbl = &mt9p012_km_rolloff_tbl[0],
+	.rftbl_size = ARRAY_SIZE(mt9p012_km_rolloff_tbl)
+};
+
+
diff --git a/drivers/media/video/msm/mt9p012_reg.c b/drivers/media/video/msm/mt9p012_reg.c
new file mode 100644
index 0000000..06fe419
--- /dev/null
+++ b/drivers/media/video/msm/mt9p012_reg.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mt9p012.h"
+#include <linux/kernel.h>
+
+/*Micron settings from Applications for lower power consumption.*/
+struct reg_struct const mt9p012_reg_pat[2] = {
+	{ /* Preview */
+		/* vt_pix_clk_div          REG=0x0300 */
+		6,  /* 5 */
+
+		/* vt_sys_clk_div          REG=0x0302 */
+		1,
+		/* pre_pll_clk_div         REG=0x0304 */
+		2,
+		/* pll_multiplier          REG=0x0306 */
+		60,
+
+		/* op_pix_clk_div          REG=0x0308 */
+		8,  /* 10 */
+
+		/* op_sys_clk_div          REG=0x030A */
+		1,
+
+		/* scale_m                 REG=0x0404 */
+		16,
+
+		/* row_speed               REG=0x3016 */
+		0x0111,
+
+		/* x_addr_start            REG=0x3004 */
+		8,
+
+		/* x_addr_end              REG=0x3008 */
+		2597,
+
+		/* y_addr_start            REG=0x3002 */
+		8,
+
+		/* y_addr_end              REG=0x3006 */
+		1949,
+
+		/* read_mode               REG=0x3040
+		 * Preview 2x2 skipping */
+		0x00C3,
+
+		/* x_output_size           REG=0x034C */
+		1296,
+
+		/* y_output_size           REG=0x034E */
+		972,
+
+		/* line_length_pck         REG=0x300C */
+		3659,
+
+		/* frame_length_lines      REG=0x300A */
+		1074,
+
+		/* coarse_integration_time REG=0x3012 */
+		16,
+
+		/* fine_integration_time   REG=0x3014 */
+		1764
+	},
+	{ /* Snapshot */
+		/* vt_pix_clk_div          REG=0x0300 */
+		6,
+
+		/* vt_sys_clk_div          REG=0x0302 */
+		1,
+
+		/* pre_pll_clk_div         REG=0x0304 */
+		2,
+
+		/* pll_multiplier          REG=0x0306
+		 * 60 for 10fps snapshot */
+		60,
+
+		/* op_pix_clk_div          REG=0x0308 */
+		8,
+
+		/* op_sys_clk_div          REG=0x030A */
+		1,
+
+		/* scale_m                 REG=0x0404 */
+		16,
+
+		/* row_speed               REG=0x3016 */
+		0x0111,
+
+		/* x_addr_start            REG=0x3004 */
+		8,
+
+		/* x_addr_end              REG=0x3008 */
+		2615,
+
+		/* y_addr_start            REG=0x3002 */
+		8,
+
+		/* y_addr_end              REG=0x3006 */
+		1967,
+
+		/* read_mode               REG=0x3040 */
+		0x0041,
+
+		/* x_output_size           REG=0x034C */
+		2608,
+
+		/* y_output_size           REG=0x034E */
+		1960,
+
+		/* line_length_pck         REG=0x300C */
+		3911,
+
+		/* frame_length_lines      REG=0x300A 10 fps snapshot */
+		2045,
+
+		/* coarse_integration_time REG=0x3012 */
+		16,
+
+		/* fine_integration_time   REG=0x3014 */
+		882
+	}
+};
+
+
+struct mt9p012_i2c_reg_conf const mt9p012_test_tbl[] = {
+	{0x3044, 0x0544 & 0xFBFF},
+	{0x30CA, 0x0004 | 0x0001},
+	{0x30D4, 0x9020 & 0x7FFF},
+	{0x31E0, 0x0003 & 0xFFFE},
+	{0x3180, 0x91FF & 0x7FFF},
+	{0x301A, (0x10CC | 0x8000) & 0xFFF7},
+	{0x301E, 0x0000},
+	{0x3780, 0x0000},
+};
+struct mt9p012_i2c_reg_conf const mt9p012_rolloff_tbl[] = {
+	{0x360A, 0x0110},
+	{0x360C, 0x270D},
+	{0x360E, 0x0071},
+	{0x3610, 0xA38D},
+	{0x3612, 0xA610},
+	{0x364A, 0x8F49},
+	{0x364C, 0x696A},
+	{0x364E, 0x0FCD},
+	{0x3650, 0x20ED},
+	{0x3652, 0x81ED},
+	{0x368A, 0x1031},
+	{0x368C, 0xBCAD},
+	{0x368E, 0x77AA},
+	{0x3690, 0xD10E},
+	{0x3692, 0xC133},
+	{0x36CA, 0x4F8D},
+	{0x36CC, 0xAC4D},
+	{0x36CE, 0xC8CE},
+	{0x36D0, 0x73AD},
+	{0x36D2, 0xC150},
+	{0x370A, 0xB590},
+	{0x370C, 0x9010},
+	{0x370E, 0xAC52},
+	{0x3710, 0x4D51},
+	{0x3712, 0x5670},
+	{0x3600, 0x00F0},
+	{0x3602, 0xCE4B},
+	{0x3604, 0x4270},
+	{0x3606, 0x8BC9},
+	{0x3608, 0xFA2F},
+	{0x3640, 0x9A09},
+	{0x3642, 0xB40C},
+	{0x3644, 0x4ECD},
+	{0x3646, 0x1BCC},
+	{0x3648, 0xD68E},
+	{0x3680, 0x1BF0},
+	{0x3682, 0xC94D},
+	{0x3684, 0x714F},
+	{0x3686, 0x1491},
+	{0x3688, 0xB8D3},
+	{0x36C0, 0x3E49},
+	{0x36C2, 0x7A6C},
+	{0x36C4, 0xEF2E},
+	{0x36C6, 0xE0EE},
+	{0x36C8, 0x570F},
+	{0x3700, 0xD6AF},
+	{0x3702, 0x2251},
+	{0x3704, 0x8A33},
+	{0x3706, 0xEFB3},
+	{0x3708, 0x1174},
+	{0x3614, 0x0150},
+	{0x3616, 0xA9AB},
+	{0x3618, 0x1770},
+	{0x361A, 0x8809},
+	{0x361C, 0xE3AE},
+	{0x3654, 0x5ACC},
+	{0x3656, 0x35EA},
+	{0x3658, 0x2DEC},
+	{0x365A, 0xB90B},
+	{0x365C, 0x250C},
+	{0x3694, 0x1630},
+	{0x3696, 0xD88C},
+	{0x3698, 0xBD0E},
+	{0x369A, 0x16D1},
+	{0x369C, 0xE492},
+	{0x36D4, 0x5D6D},
+	{0x36D6, 0x906E},
+	{0x36D8, 0x10AE},
+	{0x36DA, 0x7A8E},
+	{0x36DC, 0x9672},
+	{0x3714, 0x8D90},
+	{0x3716, 0x04F1},
+	{0x3718, 0x23F1},
+	{0x371A, 0xF313},
+	{0x371C, 0xE833},
+	{0x361E, 0x0490},
+	{0x3620, 0x14CD},
+	{0x3622, 0x38F0},
+	{0x3624, 0xBAED},
+	{0x3626, 0xFF6F},
+	{0x365E, 0x358C},
+	{0x3660, 0xA9E9},
+	{0x3662, 0x4A4E},
+	{0x3664, 0x398D},
+	{0x3666, 0x890F},
+	{0x369E, 0x2DF0},
+	{0x36A0, 0xF7CE},
+	{0x36A2, 0xB3CC},
+	{0x36A4, 0x118D},
+	{0x36A6, 0x9CB3},
+	{0x36DE, 0x462D},
+	{0x36E0, 0x74AA},
+	{0x36E2, 0xC8CF},
+	{0x36E4, 0x8DEF},
+	{0x36E6, 0xF130},
+	{0x371E, 0x9250},
+	{0x3720, 0x19CC},
+	{0x3722, 0xDFD1},
+	{0x3724, 0x5B70},
+	{0x3726, 0x34D2},
+	{0x3782, 0x0530},
+	{0x3784, 0x03C8},
+	{0x3780, 0x8000},
+};
+
+struct mt9p012_reg mt9p012_regs = {
+	.reg_pat = &mt9p012_reg_pat[0],
+	.reg_pat_size = ARRAY_SIZE(mt9p012_reg_pat),
+	.ttbl = &mt9p012_test_tbl[0],
+	.ttbl_size = ARRAY_SIZE(mt9p012_test_tbl),
+	.rftbl = &mt9p012_rolloff_tbl[0],
+	.rftbl_size = ARRAY_SIZE(mt9p012_rolloff_tbl)
+};
+
+
diff --git a/drivers/media/video/msm/mt9t013.c b/drivers/media/video/msm/mt9t013.c
new file mode 100644
index 0000000..e1f6167
--- /dev/null
+++ b/drivers/media/video/msm/mt9t013.c
@@ -0,0 +1,1503 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <asm/mach-types.h>
+#include "mt9t013.h"
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+#define MT9T013_REG_MODEL_ID 		 0x0000
+#define MT9T013_MODEL_ID     		 0x2600
+#define REG_GROUPED_PARAMETER_HOLD   0x0104
+#define GROUPED_PARAMETER_HOLD       0x0100
+#define GROUPED_PARAMETER_UPDATE     0x0000
+#define REG_COARSE_INT_TIME          0x3012
+#define REG_VT_PIX_CLK_DIV           0x0300
+#define REG_VT_SYS_CLK_DIV           0x0302
+#define REG_PRE_PLL_CLK_DIV          0x0304
+#define REG_PLL_MULTIPLIER           0x0306
+#define REG_OP_PIX_CLK_DIV           0x0308
+#define REG_OP_SYS_CLK_DIV           0x030A
+#define REG_SCALE_M                  0x0404
+#define REG_FRAME_LENGTH_LINES       0x300A
+#define REG_LINE_LENGTH_PCK          0x300C
+#define REG_X_ADDR_START             0x3004
+#define REG_Y_ADDR_START             0x3002
+#define REG_X_ADDR_END               0x3008
+#define REG_Y_ADDR_END               0x3006
+#define REG_X_OUTPUT_SIZE            0x034C
+#define REG_Y_OUTPUT_SIZE            0x034E
+#define REG_FINE_INT_TIME            0x3014
+#define REG_ROW_SPEED                0x3016
+#define MT9T013_REG_RESET_REGISTER   0x301A
+#define MT9T013_RESET_REGISTER_PWON  0x10CC
+#define MT9T013_RESET_REGISTER_PWOFF 0x1008 /* 0x10C8 stop streaming*/
+#define MT9T013_RESET_FAST_TRANSITION 0x0002
+#define REG_READ_MODE                0x3040
+#define REG_GLOBAL_GAIN              0x305E
+#define REG_TEST_PATTERN_MODE        0x3070
+
+
+enum mt9t013_test_mode {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum mt9t013_resolution {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum mt9t013_reg_update {
+	REG_INIT, /* registers that need to be updated during initialization */
+	UPDATE_PERIODIC, /* registers that needs periodic I2C writes */
+	UPDATE_ALL, /* all registers will be updated */
+	UPDATE_INVALID
+};
+
+enum mt9t013_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+
+/* actuator's Slave Address */
+#define MT9T013_AF_I2C_ADDR   0x18
+
+/*
+* AF Total steps parameters
+*/
+#define MT9T013_TOTAL_STEPS_NEAR_TO_FAR    30
+
+/*
+ * Time in milisecs for waiting for the sensor to reset.
+ */
+#define MT9T013_RESET_DELAY_MSECS   66
+
+/* for 30 fps preview */
+#define MT9T013_DEFAULT_CLOCK_RATE  24000000
+#define MT9T013_DEFAULT_MAX_FPS     26
+
+
+/* FIXME: Changes from here */
+struct mt9t013_work {
+	struct work_struct work;
+};
+
+static struct  mt9t013_work *mt9t013_sensorw;
+static struct  i2c_client *mt9t013_client;
+
+struct mt9t013_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t fps_divider; 		/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider; 	/* init to 1 * 0x00000400 */
+
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+
+	enum mt9t013_resolution prev_res;
+	enum mt9t013_resolution pict_res;
+	enum mt9t013_resolution curr_res;
+	enum mt9t013_test_mode  set_test;
+
+	unsigned short imgaddr;
+};
+
+
+static struct mt9t013_ctrl *mt9t013_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(mt9t013_wait_queue);
+DEFINE_SEMAPHORE(mt9t013_sem);
+
+static int mt9t013_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+	{
+		.addr   = saddr,
+		.flags = 0,
+		.len   = 2,
+		.buf   = rxdata,
+	},
+	{
+		.addr  = saddr,
+		.flags = I2C_M_RD,
+		.len   = length,
+		.buf   = rxdata,
+	},
+	};
+
+	if (i2c_transfer(mt9t013_client->adapter, msgs, 2) < 0) {
+		pr_err("mt9t013_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9t013_i2c_read_w(unsigned short saddr,
+	unsigned short raddr, unsigned short *rdata)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00)>>8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = mt9t013_i2c_rxdata(saddr, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	*rdata = buf[0] << 8 | buf[1];
+
+	if (rc < 0)
+		pr_err("mt9t013_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int32_t mt9t013_i2c_txdata(unsigned short saddr,
+	unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+	{
+		.addr = saddr,
+		.flags = 0,
+		.len = length,
+		.buf = txdata,
+	},
+	};
+
+	if (i2c_transfer(mt9t013_client->adapter, msg, 1) < 0) {
+		pr_err("mt9t013_i2c_txdata failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t mt9t013_i2c_write_b(unsigned short saddr,
+	unsigned short waddr, unsigned short wdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = waddr;
+	buf[1] = wdata;
+	rc = mt9t013_i2c_txdata(saddr, buf, 2);
+
+	if (rc < 0)
+		pr_err("i2c_write failed, addr = 0x%x, val = 0x%x!\n",
+		waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9t013_i2c_write_w(unsigned short saddr,
+	unsigned short waddr, unsigned short wdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00)>>8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00)>>8;
+	buf[3] = (wdata & 0x00FF);
+
+	rc = mt9t013_i2c_txdata(saddr, buf, 4);
+
+	if (rc < 0)
+		pr_err("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+		waddr, wdata);
+
+	return rc;
+}
+
+static int32_t mt9t013_i2c_write_w_table(
+	struct mt9t013_i2c_reg_conf const *reg_conf_tbl,
+	int num_of_items_in_table)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num_of_items_in_table; i++) {
+		rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			reg_conf_tbl->waddr, reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+
+	return rc;
+}
+
+static int32_t mt9t013_test(enum mt9t013_test_mode mo)
+{
+	int32_t rc = 0;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	if (mo == TEST_OFF)
+		return 0;
+	else {
+		rc = mt9t013_i2c_write_w_table(mt9t013_regs.ttbl,
+				mt9t013_regs.ttbl_size);
+		if (rc < 0)
+			return rc;
+		rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_TEST_PATTERN_MODE, (uint16_t)mo);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_UPDATE);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9t013_set_lc(void)
+{
+	int32_t rc;
+
+	rc = mt9t013_i2c_write_w_table(mt9t013_regs.lctbl,
+		mt9t013_regs.lctbl_size);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9t013_set_default_focus(uint8_t af_step)
+{
+	int32_t rc = 0;
+	uint8_t code_val_msb, code_val_lsb;
+	code_val_msb = 0x01;
+	code_val_lsb = af_step;
+
+	/* Write the digital code for current to the actuator */
+	rc = mt9t013_i2c_write_b(MT9T013_AF_I2C_ADDR>>1,
+			code_val_msb, code_val_lsb);
+
+	mt9t013_ctrl->curr_lens_pos = 0;
+	mt9t013_ctrl->init_curr_lens_pos = 0;
+	return rc;
+}
+
+static void mt9t013_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider;   /*Q10 */
+	uint32_t pclk_mult; /*Q10 */
+	uint32_t d1;
+	uint32_t d2;
+
+	d1 =
+		(uint32_t)(
+		(mt9t013_regs.reg_pat[RES_PREVIEW].frame_length_lines *
+		0x00000400) /
+		mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines);
+
+	d2 =
+		(uint32_t)(
+		(mt9t013_regs.reg_pat[RES_PREVIEW].line_length_pck *
+		0x00000400) /
+		mt9t013_regs.reg_pat[RES_CAPTURE].line_length_pck);
+
+	divider = (uint32_t) (d1 * d2) / 0x00000400;
+
+	pclk_mult =
+		(uint32_t) ((mt9t013_regs.reg_pat[RES_CAPTURE].pll_multiplier *
+		0x00000400) /
+		(mt9t013_regs.reg_pat[RES_PREVIEW].pll_multiplier));
+
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps =
+		(uint16_t) (fps * divider * pclk_mult /
+		0x00000400 / 0x00000400);
+}
+
+static uint16_t mt9t013_get_prev_lines_pf(void)
+{
+	if (mt9t013_ctrl->prev_res == QTR_SIZE)
+		return mt9t013_regs.reg_pat[RES_PREVIEW].frame_length_lines;
+	else
+		return mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9t013_get_prev_pixels_pl(void)
+{
+	if (mt9t013_ctrl->prev_res == QTR_SIZE)
+		return mt9t013_regs.reg_pat[RES_PREVIEW].line_length_pck;
+	else
+		return mt9t013_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint16_t mt9t013_get_pict_lines_pf(void)
+{
+	return mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines;
+}
+
+static uint16_t mt9t013_get_pict_pixels_pl(void)
+{
+	return mt9t013_regs.reg_pat[RES_CAPTURE].line_length_pck;
+}
+
+static uint32_t mt9t013_get_pict_max_exp_lc(void)
+{
+	uint16_t snapshot_lines_per_frame;
+
+	if (mt9t013_ctrl->pict_res == QTR_SIZE) {
+		snapshot_lines_per_frame =
+		mt9t013_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1;
+	} else  {
+		snapshot_lines_per_frame =
+		mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1;
+	}
+
+	return snapshot_lines_per_frame * 24;
+}
+
+static int32_t mt9t013_set_fps(struct fps_cfg *fps)
+{
+	/* input is new fps in Q8 format */
+	int32_t rc = 0;
+	enum mt9t013_setting setting;
+
+	mt9t013_ctrl->fps_divider = fps->fps_div;
+	mt9t013_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return -EBUSY;
+
+	CDBG("mt9t013_set_fps: fps_div is %d, f_mult is %d\n",
+			fps->fps_div, fps->f_mult);
+
+	if (mt9t013_ctrl->sensormode == SENSOR_PREVIEW_MODE)
+		setting = RES_PREVIEW;
+	else
+		setting = RES_CAPTURE;
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_FRAME_LENGTH_LINES,
+			(uint16_t) (
+			mt9t013_regs.reg_pat[setting].frame_length_lines *
+			fps->fps_div / 0x00000400));
+
+	if (rc < 0)
+		return rc;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_UPDATE);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9t013_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x01FF;
+	int32_t rc = 0;
+
+	if (mt9t013_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		mt9t013_ctrl->my_reg_gain = gain;
+		mt9t013_ctrl->my_reg_line_count = (uint16_t) line;
+	}
+
+	if (gain > max_legal_gain)
+		gain = max_legal_gain;
+
+	if (mt9t013_ctrl->sensormode != SENSOR_SNAPSHOT_MODE)
+		line = (uint32_t) (line * mt9t013_ctrl->fps_divider /
+				   0x00000400);
+	else
+		line = (uint32_t) (line * mt9t013_ctrl->pict_fps_divider /
+				   0x00000400);
+
+	/*Set digital gain to 1 */
+	gain |= 0x0200;
+
+	/* There used to be PARAMETER_HOLD register write before and
+	 * after REG_GLOBAL_GAIN & REG_COARSE_INIT_TIME. This causes
+	 * aec oscillation. Hence removed. */
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr, REG_GLOBAL_GAIN, gain);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_COARSE_INT_TIME, line);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t mt9t013_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	rc = mt9t013_write_exp_gain(gain, line);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			MT9T013_REG_RESET_REGISTER,
+			0x10CC | 0x0002);
+
+	mdelay(5);
+
+	return rc;
+}
+
+static int32_t mt9t013_setting(enum mt9t013_reg_update rupdate,
+	enum mt9t013_setting rt)
+{
+	int32_t rc = 0;
+
+	switch (rupdate) {
+	case UPDATE_PERIODIC: {
+
+	if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+#if 0
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				MT9T013_REG_RESET_REGISTER,
+				MT9T013_RESET_REGISTER_PWOFF);
+		if (rc < 0)
+			return rc;
+#endif
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_VT_PIX_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].vt_pix_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_VT_SYS_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].vt_sys_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_PRE_PLL_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].pre_pll_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_PLL_MULTIPLIER,
+				mt9t013_regs.reg_pat[rt].pll_multiplier);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_OP_PIX_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].op_pix_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_OP_SYS_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].op_sys_clk_div);
+		if (rc < 0)
+			return rc;
+
+		mdelay(5);
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_HOLD);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_ROW_SPEED,
+				mt9t013_regs.reg_pat[rt].row_speed);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_ADDR_START,
+				mt9t013_regs.reg_pat[rt].x_addr_start);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_ADDR_END,
+				mt9t013_regs.reg_pat[rt].x_addr_end);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_ADDR_START,
+				mt9t013_regs.reg_pat[rt].y_addr_start);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_ADDR_END,
+				mt9t013_regs.reg_pat[rt].y_addr_end);
+		if (rc < 0)
+			return rc;
+
+		if (machine_is_sapphire()) {
+			if (rt == 0)
+				rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					0x046F);
+			else
+				rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					0x0027);
+		} else
+			rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					mt9t013_regs.reg_pat[rt].read_mode);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_SCALE_M,
+				mt9t013_regs.reg_pat[rt].scale_m);
+		if (rc < 0)
+			return rc;
+
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_OUTPUT_SIZE,
+				mt9t013_regs.reg_pat[rt].x_output_size);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_OUTPUT_SIZE,
+				mt9t013_regs.reg_pat[rt].y_output_size);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_LINE_LENGTH_PCK,
+				mt9t013_regs.reg_pat[rt].line_length_pck);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_FRAME_LENGTH_LINES,
+			(mt9t013_regs.reg_pat[rt].frame_length_lines *
+			mt9t013_ctrl->fps_divider / 0x00000400));
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_COARSE_INT_TIME,
+			mt9t013_regs.reg_pat[rt].coarse_int_time);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_FINE_INT_TIME,
+			mt9t013_regs.reg_pat[rt].fine_int_time);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_UPDATE);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9t013_test(mt9t013_ctrl->set_test);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+			MT9T013_REG_RESET_REGISTER,
+			MT9T013_RESET_REGISTER_PWON|
+			MT9T013_RESET_FAST_TRANSITION);
+		if (rc < 0)
+			return rc;
+
+		mdelay(5);
+
+		return rc;
+	}
+	}
+		break;
+
+	/*CAMSENSOR_REG_UPDATE_PERIODIC */
+	case REG_INIT: {
+	if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				MT9T013_REG_RESET_REGISTER,
+				MT9T013_RESET_REGISTER_PWOFF);
+		if (rc < 0)
+			/* MODE_SELECT, stop streaming */
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_VT_PIX_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].vt_pix_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_VT_SYS_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].vt_sys_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_PRE_PLL_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].pre_pll_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_PLL_MULTIPLIER,
+				mt9t013_regs.reg_pat[rt].pll_multiplier);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_OP_PIX_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].op_pix_clk_div);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_OP_SYS_CLK_DIV,
+				mt9t013_regs.reg_pat[rt].op_sys_clk_div);
+		if (rc < 0)
+			return rc;
+
+		mdelay(5);
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_HOLD);
+		if (rc < 0)
+			return rc;
+
+		/* additional power saving mode ok around 38.2MHz */
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				0x3084, 0x2409);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				0x3092, 0x0A49);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				0x3094, 0x4949);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				0x3096, 0x4949);
+		if (rc < 0)
+			return rc;
+
+		/* Set preview or snapshot mode */
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_ROW_SPEED,
+				mt9t013_regs.reg_pat[rt].row_speed);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_ADDR_START,
+				mt9t013_regs.reg_pat[rt].x_addr_start);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_ADDR_END,
+				mt9t013_regs.reg_pat[rt].x_addr_end);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_ADDR_START,
+				mt9t013_regs.reg_pat[rt].y_addr_start);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_ADDR_END,
+				mt9t013_regs.reg_pat[rt].y_addr_end);
+		if (rc < 0)
+			return rc;
+
+		if (machine_is_sapphire()) {
+			if (rt == 0)
+				rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					0x046F);
+			else
+				rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					0x0027);
+		} else
+			rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+					REG_READ_MODE,
+					mt9t013_regs.reg_pat[rt].read_mode);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_SCALE_M,
+				mt9t013_regs.reg_pat[rt].scale_m);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_X_OUTPUT_SIZE,
+				mt9t013_regs.reg_pat[rt].x_output_size);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_Y_OUTPUT_SIZE,
+				mt9t013_regs.reg_pat[rt].y_output_size);
+		if (rc < 0)
+			return 0;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_LINE_LENGTH_PCK,
+				mt9t013_regs.reg_pat[rt].line_length_pck);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_FRAME_LENGTH_LINES,
+				mt9t013_regs.reg_pat[rt].frame_length_lines);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_COARSE_INT_TIME,
+				mt9t013_regs.reg_pat[rt].coarse_int_time);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_FINE_INT_TIME,
+				mt9t013_regs.reg_pat[rt].fine_int_time);
+		if (rc < 0)
+			return rc;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_UPDATE);
+			if (rc < 0)
+				return rc;
+
+		/* load lens shading */
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_HOLD);
+		if (rc < 0)
+			return rc;
+
+		/* most likely needs to be written only once. */
+		rc = mt9t013_set_lc();
+		if (rc < 0)
+			return -EBUSY;
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_UPDATE);
+		if (rc < 0)
+			return rc;
+
+		rc = mt9t013_test(mt9t013_ctrl->set_test);
+		if (rc < 0)
+			return rc;
+
+		mdelay(5);
+
+		rc =
+			mt9t013_i2c_write_w(mt9t013_client->addr,
+				MT9T013_REG_RESET_REGISTER,
+				MT9T013_RESET_REGISTER_PWON);
+		if (rc < 0)
+			/* MODE_SELECT, stop streaming */
+			return rc;
+
+		CDBG("!!! mt9t013 !!! PowerOn is done!\n");
+		mdelay(5);
+		return rc;
+		}
+	} /* case CAMSENSOR_REG_INIT: */
+	break;
+
+	/*CAMSENSOR_REG_INIT */
+	default:
+		rc = -EINVAL;
+		break;
+	} /* switch (rupdate) */
+
+	return rc;
+}
+
+static int32_t mt9t013_video_config(int mode, int res)
+{
+	int32_t rc;
+
+	switch (res) {
+	case QTR_SIZE:
+		rc = mt9t013_setting(UPDATE_PERIODIC, RES_PREVIEW);
+		if (rc < 0)
+			return rc;
+		CDBG("sensor configuration done!\n");
+		break;
+
+	case FULL_SIZE:
+		rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE);
+		if (rc < 0)
+			return rc;
+		break;
+
+	default:
+		return -EINVAL;
+	} /* switch */
+
+	mt9t013_ctrl->prev_res = res;
+	mt9t013_ctrl->curr_res = res;
+	mt9t013_ctrl->sensormode = mode;
+
+	rc = mt9t013_write_exp_gain(mt9t013_ctrl->my_reg_gain,
+			mt9t013_ctrl->my_reg_line_count);
+	if (rc < 0)
+		return rc;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+		MT9T013_REG_RESET_REGISTER,
+		MT9T013_RESET_REGISTER_PWON|MT9T013_RESET_FAST_TRANSITION);
+	if (rc < 0)
+		return rc;
+
+	msleep(5);
+	return rc;
+}
+
+static int32_t mt9t013_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9t013_ctrl->curr_res = mt9t013_ctrl->pict_res;
+	mt9t013_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t mt9t013_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	mt9t013_ctrl->curr_res = mt9t013_ctrl->pict_res;
+	mt9t013_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t mt9t013_power_down(void)
+{
+	int32_t rc = 0;
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			MT9T013_REG_RESET_REGISTER,
+			MT9T013_RESET_REGISTER_PWOFF);
+	if (rc >= 0)
+		mdelay(5);
+	return rc;
+}
+
+static int32_t mt9t013_move_focus(int direction, int32_t num_steps)
+{
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_position;
+	int16_t break_steps[4];
+	uint8_t code_val_msb, code_val_lsb;
+	int16_t i;
+
+	if (num_steps > MT9T013_TOTAL_STEPS_NEAR_TO_FAR)
+		num_steps = MT9T013_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (num_steps == 0)
+		return -EINVAL;
+
+	if (direction == MOVE_NEAR)
+		step_direction = 4;
+	else if (direction == MOVE_FAR)
+		step_direction = -4;
+	else
+		return -EINVAL;
+
+	if (mt9t013_ctrl->curr_lens_pos < mt9t013_ctrl->init_curr_lens_pos)
+		mt9t013_ctrl->curr_lens_pos = mt9t013_ctrl->init_curr_lens_pos;
+
+	actual_step =
+		(int16_t) (step_direction *
+		(int16_t) num_steps);
+
+	for (i = 0; i < 4; i++)
+		break_steps[i] =
+			actual_step / 4 * (i + 1) - actual_step / 4 * i;
+
+	for (i = 0; i < 4; i++) {
+		next_position =
+		(int16_t)
+		(mt9t013_ctrl->curr_lens_pos + break_steps[i]);
+
+		if (next_position > 255)
+			next_position = 255;
+		else if (next_position < 0)
+			next_position = 0;
+
+		code_val_msb =
+		((next_position >> 4) << 2) |
+		((next_position << 4) >> 6);
+
+		code_val_lsb =
+		((next_position & 0x03) << 6);
+
+		/* Writing the digital code for current to the actuator */
+		if (mt9t013_i2c_write_b(MT9T013_AF_I2C_ADDR>>1,
+				code_val_msb, code_val_lsb) < 0)
+			return -EBUSY;
+
+		/* Storing the current lens Position */
+		mt9t013_ctrl->curr_lens_pos = next_position;
+
+		if (i < 3)
+			mdelay(1);
+	} /* for */
+
+	return 0;
+}
+
+static int mt9t013_sensor_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int mt9t013_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int rc;
+	uint16_t chipid;
+
+	rc = gpio_request(data->sensor_reset, "mt9t013");
+	if (!rc)
+		gpio_direction_output(data->sensor_reset, 1);
+	else
+		goto init_probe_done;
+
+	mdelay(20);
+
+	/* RESET the sensor image part via I2C command */
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+		MT9T013_REG_RESET_REGISTER, 0x1009);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	msleep(10);
+
+	/* 3. Read sensor Model ID: */
+	rc = mt9t013_i2c_read_w(mt9t013_client->addr,
+		MT9T013_REG_MODEL_ID, &chipid);
+
+	if (rc < 0)
+		goto init_probe_fail;
+
+	CDBG("mt9t013 model_id = 0x%x\n", chipid);
+
+	/* 4. Compare sensor ID to MT9T012VC ID: */
+	if (chipid != MT9T013_MODEL_ID) {
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+		0x3064, 0x0805);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	mdelay(MT9T013_RESET_DELAY_MSECS);
+
+	goto init_probe_done;
+
+	/* sensor: output enable */
+#if 0
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+		MT9T013_REG_RESET_REGISTER,
+		MT9T013_RESET_REGISTER_PWON);
+
+	/* if this fails, the sensor is not the MT9T013 */
+	rc = mt9t013_set_default_focus(0);
+#endif
+
+init_probe_fail:
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+init_probe_done:
+	return rc;
+}
+
+static int32_t mt9t013_poweron_af(void)
+{
+	int32_t rc = 0;
+
+	/* enable AF actuator */
+	CDBG("enable AF actuator, gpio = %d\n",
+			mt9t013_ctrl->sensordata->vcm_pwd);
+	rc = gpio_request(mt9t013_ctrl->sensordata->vcm_pwd, "mt9t013");
+	if (!rc) {
+		gpio_direction_output(mt9t013_ctrl->sensordata->vcm_pwd, 0);
+		mdelay(20);
+		rc = mt9t013_set_default_focus(0);
+	} else
+		pr_err("%s, gpio_request failed (%d)!\n", __func__, rc);
+	return rc;
+}
+
+static void mt9t013_poweroff_af(void)
+{
+	gpio_direction_output(mt9t013_ctrl->sensordata->vcm_pwd, 1);
+	gpio_free(mt9t013_ctrl->sensordata->vcm_pwd);
+}
+
+int mt9t013_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t  rc;
+
+	mt9t013_ctrl = kzalloc(sizeof(struct mt9t013_ctrl), GFP_KERNEL);
+	if (!mt9t013_ctrl) {
+		pr_err("mt9t013_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	mt9t013_ctrl->fps_divider = 1 * 0x00000400;
+	mt9t013_ctrl->pict_fps_divider = 1 * 0x00000400;
+	mt9t013_ctrl->set_test = TEST_OFF;
+	mt9t013_ctrl->prev_res = QTR_SIZE;
+	mt9t013_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		mt9t013_ctrl->sensordata = data;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(MT9T013_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	msm_camio_camif_pad_reg_reset();
+	mdelay(20);
+
+	rc = mt9t013_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+
+	if (mt9t013_ctrl->prev_res == QTR_SIZE)
+		rc = mt9t013_setting(REG_INIT, RES_PREVIEW);
+	else
+		rc = mt9t013_setting(REG_INIT, RES_CAPTURE);
+
+	if (rc >= 0)
+		if (machine_is_sapphire())
+			rc = mt9t013_poweron_af();
+
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+
+init_fail:
+	kfree(mt9t013_ctrl);
+init_done:
+	return rc;
+}
+
+static int mt9t013_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&mt9t013_wait_queue);
+	return 0;
+}
+
+
+static int32_t mt9t013_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+	rc = mt9t013_i2c_write_w(mt9t013_client->addr,
+			REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD);
+	if (rc < 0)
+		return rc;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = mt9t013_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = mt9t013_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = mt9t013_raw_snapshot_config(mode);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* FIXME: what should we do if rc < 0? */
+	if (rc >= 0)
+		return mt9t013_i2c_write_w(mt9t013_client->addr,
+				REG_GROUPED_PARAMETER_HOLD,
+				GROUPED_PARAMETER_UPDATE);
+	return rc;
+}
+
+int mt9t013_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+
+	if (copy_from_user(&cdata, (void *)argp,
+			sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	down(&mt9t013_sem);
+
+	CDBG("mt9t013_sensor_config: cfgtype = %d\n", cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		mt9t013_get_pict_fps(cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = mt9t013_get_prev_lines_pf();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = mt9t013_get_prev_pixels_pl();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = mt9t013_get_pict_lines_pf();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			mt9t013_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			mt9t013_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = mt9t013_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc = mt9t013_write_exp_gain(cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		rc = mt9t013_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = mt9t013_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = mt9t013_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		rc = mt9t013_move_focus(cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = mt9t013_set_default_focus(cdata.cfg.focus.steps);
+		break;
+
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = MT9T013_TOTAL_STEPS_NEAR_TO_FAR;
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	up(&mt9t013_sem);
+	return rc;
+}
+
+int mt9t013_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	down(&mt9t013_sem);
+
+	if (machine_is_sapphire())
+		mt9t013_poweroff_af();
+	mt9t013_power_down();
+
+	gpio_direction_output(mt9t013_ctrl->sensordata->sensor_reset,
+			0);
+	gpio_free(mt9t013_ctrl->sensordata->sensor_reset);
+
+	kfree(mt9t013_ctrl);
+
+	up(&mt9t013_sem);
+	CDBG("mt9t013_release completed!\n");
+	return rc;
+}
+
+static int mt9t013_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		rc = -ENOTSUPP;
+		goto probe_failure;
+	}
+
+	mt9t013_sensorw =
+		kzalloc(sizeof(struct mt9t013_work), GFP_KERNEL);
+
+	if (!mt9t013_sensorw) {
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, mt9t013_sensorw);
+	mt9t013_init_client(client);
+	mt9t013_client = client;
+	mt9t013_client->addr = mt9t013_client->addr >> 1;
+	mdelay(50);
+
+	CDBG("i2c probe ok\n");
+	return 0;
+
+probe_failure:
+	kfree(mt9t013_sensorw);
+	mt9t013_sensorw = NULL;
+	pr_err("i2c probe failure %d\n", rc);
+	return rc;
+}
+
+static const struct i2c_device_id mt9t013_i2c_id[] = {
+	{ "mt9t013", 0},
+	{ }
+};
+
+static struct i2c_driver mt9t013_i2c_driver = {
+	.id_table = mt9t013_i2c_id,
+	.probe  = mt9t013_i2c_probe,
+	.remove = __exit_p(mt9t013_i2c_remove),
+	.driver = {
+		.name = "mt9t013",
+	},
+};
+
+static int mt9t013_sensor_probe(
+		const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	/* We expect this driver to match with the i2c device registered
+	 * in the board file immediately. */
+	int rc = i2c_add_driver(&mt9t013_i2c_driver);
+	if (rc < 0 || mt9t013_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(MT9T013_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = mt9t013_probe_init_sensor(info);
+	if (rc < 0) {
+		i2c_del_driver(&mt9t013_i2c_driver);
+		goto probe_done;
+	}
+
+	s->s_init = mt9t013_sensor_open_init;
+	s->s_release = mt9t013_sensor_release;
+	s->s_config  = mt9t013_sensor_config;
+	s->s_mount_angle = 0;
+	mt9t013_sensor_init_done(info);
+
+probe_done:
+	return rc;
+}
+
+static int __mt9t013_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, mt9t013_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __mt9t013_probe,
+	.driver = {
+		.name = "msm_camera_mt9t013",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init mt9t013_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(mt9t013_init);
diff --git a/drivers/media/video/msm/mt9t013.h b/drivers/media/video/msm/mt9t013.h
new file mode 100644
index 0000000..f6b7c28
--- /dev/null
+++ b/drivers/media/video/msm/mt9t013.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MT9T013_H
+#define MT9T013_H
+
+#include <linux/types.h>
+
+extern struct mt9t013_reg mt9t013_regs; /* from mt9t013_reg.c */
+
+struct reg_struct {
+	uint16_t vt_pix_clk_div;        /*  0x0300 */
+	uint16_t vt_sys_clk_div;        /*  0x0302 */
+	uint16_t pre_pll_clk_div;       /*  0x0304 */
+	uint16_t pll_multiplier;        /*  0x0306 */
+	uint16_t op_pix_clk_div;        /*  0x0308 */
+	uint16_t op_sys_clk_div;        /*  0x030A */
+	uint16_t scale_m;               /*  0x0404 */
+	uint16_t row_speed;             /*  0x3016 */
+	uint16_t x_addr_start;          /*  0x3004 */
+	uint16_t x_addr_end;            /*  0x3008 */
+	uint16_t y_addr_start;        	/*  0x3002 */
+	uint16_t y_addr_end;            /*  0x3006 */
+	uint16_t read_mode;             /*  0x3040 */
+	uint16_t x_output_size;         /*  0x034C */
+	uint16_t y_output_size;         /*  0x034E */
+	uint16_t line_length_pck;       /*  0x300C */
+	uint16_t frame_length_lines;	/*  0x300A */
+	uint16_t coarse_int_time; 		/*  0x3012 */
+	uint16_t fine_int_time;   		/*  0x3014 */
+};
+
+struct mt9t013_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+struct mt9t013_reg {
+	struct reg_struct const *reg_pat;
+	uint16_t reg_pat_size;
+	struct mt9t013_i2c_reg_conf const *ttbl;
+	uint16_t ttbl_size;
+	struct mt9t013_i2c_reg_conf const *lctbl;
+	uint16_t lctbl_size;
+	struct mt9t013_i2c_reg_conf const *rftbl;
+	uint16_t rftbl_size;
+};
+
+#endif /* #define MT9T013_H */
diff --git a/drivers/media/video/msm/mt9t013_reg.c b/drivers/media/video/msm/mt9t013_reg.c
new file mode 100644
index 0000000..9a9867e
--- /dev/null
+++ b/drivers/media/video/msm/mt9t013_reg.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mt9t013.h"
+#include <linux/kernel.h>
+
+struct reg_struct const mt9t013_reg_pat[2] = {
+	{ /* Preview 2x2 binning 20fps, pclk MHz, MCLK 24MHz */
+	/* vt_pix_clk_div:REG=0x0300 update get_snapshot_fps
+	* if this change */
+	8,
+
+	/* vt_sys_clk_div: REG=0x0302  update get_snapshot_fps
+	* if this change */
+	1,
+
+	/* pre_pll_clk_div REG=0x0304  update get_snapshot_fps
+	* if this change */
+	2,
+
+	/* pll_multiplier  REG=0x0306 60 for 30fps preview, 40
+	 * for 20fps preview
+	 * 46 for 30fps preview, try 47/48 to increase further */
+	46,
+
+	/* op_pix_clk_div        REG=0x0308 */
+	8,
+
+	/* op_sys_clk_div        REG=0x030A */
+	1,
+
+	/* scale_m       REG=0x0404 */
+	16,
+
+	/* row_speed     REG=0x3016 */
+	0x0111,
+
+	/* x_addr_start  REG=0x3004 */
+	8,
+
+	/* x_addr_end    REG=0x3008 */
+	2053,
+
+	/* y_addr_start  REG=0x3002 */
+	8,
+
+	/* y_addr_end    REG=0x3006 */
+	1541,
+
+	/* read_mode     REG=0x3040 */
+	0x046C,
+
+	/* x_output_size REG=0x034C */
+	1024,
+
+	/* y_output_size REG=0x034E */
+	768,
+
+	/* line_length_pck    REG=0x300C */
+	2616,
+
+	/* frame_length_lines REG=0x300A */
+	916,
+
+	/* coarse_int_time REG=0x3012 */
+	16,
+
+	/* fine_int_time   REG=0x3014 */
+	1461
+	},
+	{ /*Snapshot */
+	/* vt_pix_clk_div  REG=0x0300 update get_snapshot_fps
+	* if this change */
+	8,
+
+	/* vt_sys_clk_div  REG=0x0302 update get_snapshot_fps
+	* if this change */
+	1,
+
+	/* pre_pll_clk_div REG=0x0304 update get_snapshot_fps
+	 * if this change */
+	2,
+
+	/* pll_multiplier REG=0x0306 50 for 15fps snapshot,
+	 * 40 for 10fps snapshot
+	 * 46 for 30fps snapshot, try 47/48 to increase further */
+	46,
+
+	/* op_pix_clk_div        REG=0x0308 */
+	8,
+
+	/* op_sys_clk_div        REG=0x030A */
+	1,
+
+	/* scale_m       REG=0x0404 */
+	16,
+
+	/* row_speed     REG=0x3016 */
+	0x0111,
+
+	/* x_addr_start  REG=0x3004 */
+	8,
+
+	/* x_addr_end    REG=0x3008 */
+	2071,
+
+	/* y_addr_start  REG=0x3002 */
+	8,
+
+	/* y_addr_end    REG=0x3006 */
+	1551,
+
+	/* read_mode     REG=0x3040 */
+	0x0024,
+
+	/* x_output_size REG=0x034C */
+	2064,
+
+	/* y_output_size REG=0x034E */
+	1544,
+
+	/* line_length_pck REG=0x300C */
+	2952,
+
+	/* frame_length_lines    REG=0x300A */
+	1629,
+
+	/* coarse_int_time REG=0x3012 */
+	16,
+
+	/* fine_int_time REG=0x3014   */
+	733
+	}
+};
+
+struct mt9t013_i2c_reg_conf const mt9t013_test_tbl[] = {
+	{ 0x3044, 0x0544 & 0xFBFF },
+	{ 0x30CA, 0x0004 | 0x0001 },
+	{ 0x30D4, 0x9020 & 0x7FFF },
+	{ 0x31E0, 0x0003 & 0xFFFE },
+	{ 0x3180, 0x91FF & 0x7FFF },
+	{ 0x301A, (0x10CC | 0x8000) & 0xFFF7 },
+	{ 0x301E, 0x0000 },
+	{ 0x3780, 0x0000 },
+};
+
+/* [Lens shading 85 Percent TL84] */
+struct mt9t013_i2c_reg_conf const mt9t013_lc_tbl[] = {
+	{ 0x360A, 0x0290 }, /* P_RD_P0Q0 */
+	{ 0x360C, 0xC92D }, /* P_RD_P0Q1 */
+	{ 0x360E, 0x0771 }, /* P_RD_P0Q2 */
+	{ 0x3610, 0xE38C }, /* P_RD_P0Q3 */
+	{ 0x3612, 0xD74F }, /* P_RD_P0Q4 */
+	{ 0x364A, 0x168C }, /* P_RD_P1Q0 */
+	{ 0x364C, 0xCACB }, /* P_RD_P1Q1 */
+	{ 0x364E, 0x8C4C }, /* P_RD_P1Q2 */
+	{ 0x3650, 0x0BEA }, /* P_RD_P1Q3 */
+	{ 0x3652, 0xDC0F }, /* P_RD_P1Q4 */
+	{ 0x368A, 0x70B0 }, /* P_RD_P2Q0 */
+	{ 0x368C, 0x200B }, /* P_RD_P2Q1 */
+	{ 0x368E, 0x30B2 }, /* P_RD_P2Q2 */
+	{ 0x3690, 0xD04F }, /* P_RD_P2Q3 */
+	{ 0x3692, 0xACF5 }, /* P_RD_P2Q4 */
+	{ 0x36CA, 0xF7C9 }, /* P_RD_P3Q0 */
+	{ 0x36CC, 0x2AED }, /* P_RD_P3Q1 */
+	{ 0x36CE, 0xA652 }, /* P_RD_P3Q2 */
+	{ 0x36D0, 0x8192 }, /* P_RD_P3Q3 */
+	{ 0x36D2, 0x3A15 }, /* P_RD_P3Q4 */
+	{ 0x370A, 0xDA30 }, /* P_RD_P4Q0 */
+	{ 0x370C, 0x2E2F }, /* P_RD_P4Q1 */
+	{ 0x370E, 0xBB56 }, /* P_RD_P4Q2 */
+	{ 0x3710, 0x8195 }, /* P_RD_P4Q3 */
+	{ 0x3712, 0x02F9 }, /* P_RD_P4Q4 */
+	{ 0x3600, 0x0230 }, /* P_GR_P0Q0 */
+	{ 0x3602, 0x58AD }, /* P_GR_P0Q1 */
+	{ 0x3604, 0x18D1 }, /* P_GR_P0Q2 */
+	{ 0x3606, 0x260D }, /* P_GR_P0Q3 */
+	{ 0x3608, 0xF530 }, /* P_GR_P0Q4 */
+	{ 0x3640, 0x17EB }, /* P_GR_P1Q0 */
+	{ 0x3642, 0x3CAB }, /* P_GR_P1Q1 */
+	{ 0x3644, 0x87CE }, /* P_GR_P1Q2 */
+	{ 0x3646, 0xC02E }, /* P_GR_P1Q3 */
+	{ 0x3648, 0xF48F }, /* P_GR_P1Q4 */
+	{ 0x3680, 0x5350 }, /* P_GR_P2Q0 */
+	{ 0x3682, 0x7EAF }, /* P_GR_P2Q1 */
+	{ 0x3684, 0x4312 }, /* P_GR_P2Q2 */
+	{ 0x3686, 0xC652 }, /* P_GR_P2Q3 */
+	{ 0x3688, 0xBC15 }, /* P_GR_P2Q4 */
+	{ 0x36C0, 0xB8AD }, /* P_GR_P3Q0 */
+	{ 0x36C2, 0xBDCD }, /* P_GR_P3Q1 */
+	{ 0x36C4, 0xE4B2 }, /* P_GR_P3Q2 */
+	{ 0x36C6, 0xB50F }, /* P_GR_P3Q3 */
+	{ 0x36C8, 0x5B95 }, /* P_GR_P3Q4 */
+	{ 0x3700, 0xFC90 }, /* P_GR_P4Q0 */
+	{ 0x3702, 0x8C51 }, /* P_GR_P4Q1 */
+	{ 0x3704, 0xCED6 }, /* P_GR_P4Q2 */
+	{ 0x3706, 0xB594 }, /* P_GR_P4Q3 */
+	{ 0x3708, 0x0A39 }, /* P_GR_P4Q4 */
+	{ 0x3614, 0x0230 }, /* P_BL_P0Q0 */
+	{ 0x3616, 0x160D }, /* P_BL_P0Q1 */
+	{ 0x3618, 0x08D1 }, /* P_BL_P0Q2 */
+	{ 0x361A, 0x98AB }, /* P_BL_P0Q3 */
+	{ 0x361C, 0xEA50 }, /* P_BL_P0Q4 */
+	{ 0x3654, 0xB4EA }, /* P_BL_P1Q0 */
+	{ 0x3656, 0xEA6C }, /* P_BL_P1Q1 */
+	{ 0x3658, 0xFE08 }, /* P_BL_P1Q2 */
+	{ 0x365A, 0x2C6E }, /* P_BL_P1Q3 */
+	{ 0x365C, 0xEB0E }, /* P_BL_P1Q4 */
+	{ 0x3694, 0x6DF0 }, /* P_BL_P2Q0 */
+	{ 0x3696, 0x3ACF }, /* P_BL_P2Q1 */
+	{ 0x3698, 0x3E0F }, /* P_BL_P2Q2 */
+	{ 0x369A, 0xB2B1 }, /* P_BL_P2Q3 */
+	{ 0x369C, 0xC374 }, /* P_BL_P2Q4 */
+	{ 0x36D4, 0xF2AA }, /* P_BL_P3Q0 */
+	{ 0x36D6, 0x8CCC }, /* P_BL_P3Q1 */
+	{ 0x36D8, 0xDEF2 }, /* P_BL_P3Q2 */
+	{ 0x36DA, 0xFA11 }, /* P_BL_P3Q3 */
+	{ 0x36DC, 0x42F5 }, /* P_BL_P3Q4 */
+	{ 0x3714, 0xF4F1 }, /* P_BL_P4Q0 */
+	{ 0x3716, 0xF6F0 }, /* P_BL_P4Q1 */
+	{ 0x3718, 0x8FD6 }, /* P_BL_P4Q2 */
+	{ 0x371A, 0xEA14 }, /* P_BL_P4Q3 */
+	{ 0x371C, 0x6338 }, /* P_BL_P4Q4 */
+	{ 0x361E, 0x0350 }, /* P_GB_P0Q0 */
+	{ 0x3620, 0x91AE }, /* P_GB_P0Q1 */
+	{ 0x3622, 0x0571 }, /* P_GB_P0Q2 */
+	{ 0x3624, 0x100D }, /* P_GB_P0Q3 */
+	{ 0x3626, 0xCA70 }, /* P_GB_P0Q4 */
+	{ 0x365E, 0xE6CB }, /* P_GB_P1Q0 */
+	{ 0x3660, 0x50ED }, /* P_GB_P1Q1 */
+	{ 0x3662, 0x3DAE }, /* P_GB_P1Q2 */
+	{ 0x3664, 0xAA4F }, /* P_GB_P1Q3 */
+	{ 0x3666, 0xDC50 }, /* P_GB_P1Q4 */
+	{ 0x369E, 0x5470 }, /* P_GB_P2Q0 */
+	{ 0x36A0, 0x1F6E }, /* P_GB_P2Q1 */
+	{ 0x36A2, 0x6671 }, /* P_GB_P2Q2 */
+	{ 0x36A4, 0xC010 }, /* P_GB_P2Q3 */
+	{ 0x36A6, 0x8DF5 }, /* P_GB_P2Q4 */
+	{ 0x36DE, 0x0B0C }, /* P_GB_P3Q0 */
+	{ 0x36E0, 0x84CE }, /* P_GB_P3Q1 */
+	{ 0x36E2, 0x8493 }, /* P_GB_P3Q2 */
+	{ 0x36E4, 0xA610 }, /* P_GB_P3Q3 */
+	{ 0x36E6, 0x50B5 }, /* P_GB_P3Q4 */
+	{ 0x371E, 0x9651 }, /* P_GB_P4Q0 */
+	{ 0x3720, 0x1EAB }, /* P_GB_P4Q1 */
+	{ 0x3722, 0xAF76 }, /* P_GB_P4Q2 */
+	{ 0x3724, 0xE4F4 }, /* P_GB_P4Q3 */
+	{ 0x3726, 0x79F8 }, /* P_GB_P4Q4 */
+	{ 0x3782, 0x0410 }, /* POLY_ORIGIN_C */
+	{ 0x3784, 0x0320 }, /* POLY_ORIGIN_R  */
+	{ 0x3780, 0x8000 } /* POLY_SC_ENABLE */
+};
+
+struct mt9t013_reg mt9t013_regs = {
+	.reg_pat = &mt9t013_reg_pat[0],
+	.reg_pat_size = ARRAY_SIZE(mt9t013_reg_pat),
+	.ttbl = &mt9t013_test_tbl[0],
+	.ttbl_size = ARRAY_SIZE(mt9t013_test_tbl),
+	.lctbl = &mt9t013_lc_tbl[0],
+	.lctbl_size = ARRAY_SIZE(mt9t013_lc_tbl),
+	.rftbl = &mt9t013_lc_tbl[0],	/* &mt9t013_rolloff_tbl[0], */
+	.rftbl_size = ARRAY_SIZE(mt9t013_lc_tbl)
+};
+
+
diff --git a/drivers/media/video/msm/ov2720.c b/drivers/media/video/msm/ov2720.c
new file mode 100644
index 0000000..cfd6efa
--- /dev/null
+++ b/drivers/media/video/msm/ov2720.c
@@ -0,0 +1,598 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <mach/camera.h>
+#include <mach/gpio.h>
+#include <media/msm_camera.h>
+#include "msm_sensor.h"
+#include "ov2720.h"
+#include "msm.h"
+#define SENSOR_NAME "ov2720"
+#define PLATFORM_DRIVER_NAME "msm_camera_ov2720"
+#define ov2720_obj ov2720_##obj
+
+DEFINE_MUTEX(ov2720_mut);
+static struct msm_sensor_ctrl_t ov2720_s_ctrl;
+
+struct msm_sensor_i2c_reg_conf ov2720_start_settings[] = {
+	{0x0100, 0x01},
+};
+
+struct msm_sensor_i2c_reg_conf ov2720_stop_settings[] = {
+	{0x0100, 0x00},
+};
+
+struct msm_sensor_i2c_reg_conf ov2720_groupon_settings[] = {
+	{0x3208, 0x00},
+};
+
+struct msm_sensor_i2c_reg_conf ov2720_groupoff_settings[] = {
+	{0x3208, 0x10},
+	{0x3208, 0xA0},
+};
+
+static struct msm_sensor_i2c_reg_conf ov2720_prev_settings[] = {
+	{0x3800, 0x00},
+	{0x3801, 0x0c},
+	{0x3802, 0x00},
+	{0x3803, 0x02},
+	{0x3804, 0x07},
+	{0x3805, 0x97},
+	{0x3806, 0x04},
+	{0x3807, 0x45},
+	{0x3808, 0x07},
+	{0x3809, 0x80},
+	{0x380a, 0x04},
+	{0x380b, 0x38},
+	{0x380c, 0x08},/*Line Length Pclk Hi*/
+	{0x380d, 0x5c},/*Line Length Pclk Lo*/
+	{0x380e, 0x04},/*Frame Length Line Hi*/
+	{0x380f, 0x60},/*Frame Length Line Lo*/
+	{0x3810, 0x00},
+	{0x3811, 0x05},
+	{0x3812, 0x00},
+	{0x3813, 0x06},
+	{0x3820, 0x80},
+	{0x3821, 0x06},
+	{0x3814, 0x11},
+	{0x3815, 0x11},
+	{0x3612, 0x0b},
+	{0x3618, 0x04},
+	{0x3a08, 0x01},
+	{0x3a09, 0x50},
+	{0x3a0a, 0x01},
+	{0x3a0b, 0x18},
+	{0x3a0d, 0x03},
+	{0x3a0e, 0x03},
+	{0x4520, 0x00},
+	{0x4837, 0x1b},
+	{0x3000, 0xff},
+	{0x3001, 0xff},
+	{0x3002, 0xf0},
+	{0x3600, 0x08},
+	{0x3621, 0xc0},
+	{0x3632, 0xd2},
+	{0x3633, 0x23},
+	{0x3634, 0x54},
+	{0x3f01, 0x0c},
+	{0x5001, 0xc1},
+	{0x3614, 0xf0},
+	{0x3630, 0x2d},
+	{0x370b, 0x62},
+	{0x3706, 0x61},
+	{0x4000, 0x02},
+	{0x4002, 0xc5},
+	{0x4005, 0x08},
+	{0x404f, 0x84},
+	{0x4051, 0x00},
+	{0x5000, 0xff},
+	{0x3a18, 0x00},
+	{0x3a19, 0x80},
+	{0x3503, 0x13},
+	{0x4521, 0x00},
+	{0x5183, 0xb0},
+	{0x5184, 0xb0},
+	{0x5185, 0xb0},
+	{0x370c, 0x0c},
+	{0x3035, 0x10},
+	{0x3036, 0x1e},
+	{0x3037, 0x21},
+	{0x303e, 0x19},
+	{0x3038, 0x06},
+	{0x3018, 0x04},
+	{0x3000, 0x00},
+	{0x3001, 0x00},
+	{0x3002, 0x00},
+	{0x3a0f, 0x40},
+	{0x3a10, 0x38},
+	{0x3a1b, 0x48},
+	{0x3a1e, 0x30},
+	{0x3a11, 0x90},
+	{0x3a1f, 0x10},
+	{0x4800, 0x24},
+};
+
+static struct msm_sensor_i2c_reg_conf ov2720_720_settings[] = {
+	{0x3800, 0x01},
+	{0x3801, 0x4a},
+	{0x3802, 0x00},
+	{0x3803, 0xba},
+	{0x3804, 0x06},
+	{0x3805, 0x51+32},
+	{0x3806, 0x03},
+	{0x3807, 0x8d+24},
+	{0x3808, 0x05},
+	{0x3809, 0x00+16},
+	{0x380a, 0x02},
+	{0x380b, 0x78},
+	{0x380c, 0x08},/*Line Length Pclk Hi*/
+	{0x380d, 0x5e},/*Line Length Pclk Lo*/
+	{0x380e, 0x04},/*Frame Length Line Hi*/
+	{0x380f, 0x60},/*Frame Length Line Lo*/
+	{0x3810, 0x00},
+	{0x3811, 0x05},
+	{0x3812, 0x00},
+	{0x3813, 0x02},
+	{0x3820, 0x80},
+	{0x3821, 0x06},
+	{0x3814, 0x11},
+	{0x3815, 0x11},
+	{0x3612, 0x0b},
+	{0x3618, 0x04},
+	{0x3a08, 0x01},
+	{0x3a09, 0x50},
+	{0x3a0a, 0x01},
+	{0x3a0b, 0x18},
+	{0x3a0d, 0x03},
+	{0x3a0e, 0x03},
+	{0x4520, 0x00},
+	{0x4837, 0x1b},
+	{0x3000, 0xff},
+	{0x3001, 0xff},
+	{0x3002, 0xf0},
+	{0x3600, 0x08},
+	{0x3621, 0xc0},
+	{0x3632, 0xd2},
+	{0x3633, 0x23},
+	{0x3634, 0x54},
+	{0x3f01, 0x0c},
+	{0x5001, 0xc1},
+	{0x3614, 0xf0},
+	{0x3630, 0x2d},
+	{0x370b, 0x62},
+	{0x3706, 0x61},
+	{0x4000, 0x02},
+	{0x4002, 0xc5},
+	{0x4005, 0x08},
+	{0x404f, 0x84},
+	{0x4051, 0x00},
+	{0x5000, 0xff},
+	{0x3a18, 0x00},
+	{0x3a19, 0x80},
+	{0x3503, 0x13},
+	{0x4521, 0x00},
+	{0x5183, 0xb0},
+	{0x5184, 0xb0},
+	{0x5185, 0xb0},
+	{0x370c, 0x0c},
+	{0x3035, 0x10},
+	{0x3036, 0x04},
+	{0x3037, 0x61},
+	{0x303e, 0x19},
+	{0x3038, 0x06},
+	{0x3018, 0x04},
+	{0x3000, 0x00},
+	{0x3001, 0x00},
+	{0x3002, 0x00},
+	{0x3a0f, 0x40},
+	{0x3a10, 0x38},
+	{0x3a1b, 0x48},
+	{0x3a1e, 0x30},
+	{0x3a11, 0x90},
+	{0x3a1f, 0x10},
+	{0x4800, 0x24},
+};
+
+static struct msm_sensor_i2c_reg_conf ov2720_vga_settings[] = {
+	{0x3800, 0x00},
+	{0x3801, 0x0c},
+	{0x3802, 0x00},
+	{0x3803, 0x02},
+	{0x3804, 0x07},
+	{0x3805, 0x97+32},
+	{0x3806, 0x04},
+	{0x3807, 0x45+24},
+	{0x3808, 0x02},
+	{0x3809, 0x88+16},
+	{0x380a, 0x01},
+	{0x380b, 0xe6+12},
+	{0x380c, 0x08},/*Line Length Pclk Hi*/
+	{0x380d, 0x5e},/*Line Length Pclk Lo*/
+	{0x380e, 0x04},/*Frame Length Line Hi*/
+	{0x380f, 0x68},/*Frame Length Line Lo*/
+	{0x3810, 0x00},
+	{0x3811, 0x03},
+	{0x3812, 0x00},
+	{0x3813, 0x03},
+	{0x3820, 0x80},
+	{0x3821, 0x06},
+	{0x3814, 0x11},
+	{0x3815, 0x11},
+	{0x3612, 0x0b},
+	{0x3618, 0x04},
+	{0x3a08, 0x01},
+	{0x3a09, 0x50},
+	{0x3a0a, 0x01},
+	{0x3a0b, 0x18},
+	{0x3a0d, 0x03},
+	{0x3a0e, 0x03},
+	{0x4520, 0x00},
+	{0x4837, 0x1b},
+	{0x3000, 0xff},
+	{0x3001, 0xff},
+	{0x3002, 0xf0},
+	{0x3600, 0x08},
+	{0x3621, 0xc0},
+	{0x3632, 0xd2},
+	{0x3633, 0x23},
+	{0x3634, 0x54},
+	{0x3f01, 0x0c},
+	{0x5001, 0xc1},
+	{0x3614, 0xf0},
+	{0x3630, 0x2d},
+	{0x370b, 0x62},
+	{0x3706, 0x61},
+	{0x4000, 0x02},
+	{0x4002, 0xc5},
+	{0x4005, 0x08},
+	{0x404f, 0x84},
+	{0x4051, 0x00},
+	{0x5000, 0xff},
+	{0x3a18, 0x00},
+	{0x3a19, 0x80},
+	{0x3503, 0x13},
+	{0x4521, 0x00},
+	{0x5183, 0xb0},
+	{0x5184, 0xb0},
+	{0x5185, 0xb0},
+	{0x370c, 0x0c},
+	{0x3035, 0x10},
+	{0x3036, 0x04},
+	{0x3037, 0x61},
+	{0x303e, 0x19},
+	{0x3038, 0x06},
+	{0x3018, 0x04},
+	{0x3000, 0x00},
+	{0x3001, 0x00},
+	{0x3002, 0x00},
+	{0x3a0f, 0x40},
+	{0x3a10, 0x38},
+	{0x3a1b, 0x48},
+	{0x3a1e, 0x30},
+	{0x3a11, 0x90},
+	{0x3a1f, 0x10},
+	{0x4800, 0x24},
+	{0x3500, 0x00},
+	{0x3501, 0x17},
+	{0x3502, 0xf0},
+	{0x3508, 0x00},
+	{0x3509, 0x20},
+};
+
+static struct msm_sensor_i2c_reg_conf ov2720_recommend_settings[] = {
+	{0x0103, 0x01},
+	{0x3718, 0x10},
+	{0x3702, 0x24},
+	{0x373a, 0x60},
+	{0x3715, 0x01},
+	{0x3703, 0x2e},
+	{0x3705, 0x10},
+	{0x3730, 0x30},
+	{0x3704, 0x62},
+	{0x3f06, 0x3a},
+	{0x371c, 0x00},
+	{0x371d, 0xc4},
+	{0x371e, 0x01},
+	{0x371f, 0x0d},
+	{0x3708, 0x61},
+	{0x3709, 0x12},
+};
+
+static struct msm_camera_csi_params ov2720_csi_params = {
+	.lane_cnt = 2,
+	.data_format = CSI_10BIT,
+	.lane_assign = 0xe4,
+	.dpcm_scheme = 0,
+	.settle_cnt = 0x18,
+};
+
+static struct v4l2_subdev_info ov2720_subdev_info[] = {
+	{
+	.code   = V4L2_MBUS_FMT_SBGGR10_1X10,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.fmt    = 1,
+	.order    = 0,
+	},
+	/* more can be supported, to be added later */
+};
+
+static struct msm_sensor_i2c_conf_array ov2720_init_conf[] = {
+	{&ov2720_recommend_settings[0],
+	ARRAY_SIZE(ov2720_recommend_settings), 0}
+};
+
+static struct msm_sensor_i2c_conf_array ov2720_confs[] = {
+	{&ov2720_prev_settings[0], ARRAY_SIZE(ov2720_prev_settings), 0},
+	{&ov2720_vga_settings[0], ARRAY_SIZE(ov2720_vga_settings), 0},
+	{&ov2720_720_settings[0], ARRAY_SIZE(ov2720_720_settings), 0},
+};
+
+static int32_t ov2720_write_exp_gain(struct msm_sensor_ctrl_t *s_ctrl,
+		uint16_t gain, uint32_t line)
+{
+	uint32_t fl_lines, offset;
+	fl_lines =
+		(s_ctrl->curr_frame_length_lines * s_ctrl->fps_divider) / Q10;
+	offset = s_ctrl->vert_offset;
+	if (line > (fl_lines - offset))
+		fl_lines = line + offset;
+
+	pr_err("LINE: 0x%x\n", line);
+	s_ctrl->func_tbl.sensor_group_hold_on(s_ctrl);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->frame_length_lines_addr, fl_lines);
+	msm_sensor_i2c_waddr_write_b(s_ctrl,
+			s_ctrl->coarse_int_time_addr-1, line >> 12);
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->coarse_int_time_addr, ((line << 4) & 0xFFFF));
+	msm_sensor_i2c_waddr_write_w(s_ctrl,
+			s_ctrl->global_gain_addr, gain);
+	s_ctrl->func_tbl.sensor_group_hold_off(s_ctrl);
+	return 0;
+}
+
+static int32_t ov2720_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
+				int update_type, int rt)
+{
+	struct msm_camera_csid_params ov2720_csid_params;
+	struct msm_camera_csiphy_params ov2720_csiphy_params;
+	int32_t rc = 0;
+	s_ctrl->func_tbl.sensor_stop_stream(s_ctrl);
+	msleep(30);
+	if (update_type == MSM_SENSOR_REG_INIT) {
+		s_ctrl->config_csi_flag = 1;
+		msm_sensor_enable_debugfs(s_ctrl);
+		msm_sensor_write_b_init_settings(s_ctrl);
+	} else if (update_type == MSM_SENSOR_UPDATE_PERIODIC) {
+		msm_sensor_write_b_res_settings(s_ctrl, rt);
+		if (s_ctrl->config_csi_flag) {
+			struct msm_camera_csid_vc_cfg ov2720_vccfg[] = {
+				{0, CSI_RAW10, CSI_DECODE_10BIT},
+				{1, CSI_EMBED_DATA, CSI_DECODE_8BIT},
+			};
+			ov2720_csid_params.lane_cnt = 2;
+			ov2720_csid_params.lane_assign = 0xe4;
+			ov2720_csid_params.lut_params.num_cid =
+				ARRAY_SIZE(ov2720_vccfg);
+			ov2720_csid_params.lut_params.vc_cfg =
+				&ov2720_vccfg[0];
+			ov2720_csiphy_params.lane_cnt = 2;
+			ov2720_csiphy_params.settle_cnt = 0x1B;
+			rc = msm_camio_csid_config(&ov2720_csid_params);
+			v4l2_subdev_notify(s_ctrl->sensor_v4l2_subdev,
+						NOTIFY_CID_CHANGE, NULL);
+			mb();
+			rc = msm_camio_csiphy_config(&ov2720_csiphy_params);
+			mb();
+			msleep(20);
+			s_ctrl->config_csi_flag = 0;
+		}
+		s_ctrl->func_tbl.sensor_start_stream(s_ctrl);
+		msleep(30);
+	}
+	return rc;
+}
+
+static int ov2720_sensor_config(void __user *argp)
+{
+	return (int) msm_sensor_config(&ov2720_s_ctrl, argp);
+}
+
+static int ov2720_power_down(const struct msm_camera_sensor_info *data)
+{
+	pr_err("%s\n", __func__);
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int ov2720_power_up(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	pr_err("%s: %d\n", __func__, __LINE__);
+	msm_camio_clk_rate_set(MSM_SENSOR_MCLK_24HZ);
+	rc = gpio_request(data->sensor_reset, "SENSOR_NAME");
+	if (rc < 0)
+		goto gpio_request_fail;
+
+	pr_err("%s: reset sensor\n", __func__);
+	gpio_direction_output(data->sensor_reset, 0);
+	msleep(50);
+	gpio_set_value_cansleep(data->sensor_reset, 1);
+	msleep(50);
+
+	rc = msm_sensor_match_id(&ov2720_s_ctrl);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	goto init_probe_done;
+gpio_request_fail:
+	pr_err("%s: gpio request fail\n", __func__);
+	return rc;
+init_probe_fail:
+	pr_err(" %s fails\n", __func__);
+	ov2720_power_down(data);
+	return rc;
+init_probe_done:
+	pr_err("%s finishes\n", __func__);
+	return rc;
+}
+
+static int ov2720_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	pr_err("%s: %d\n", __func__, __LINE__);
+	ov2720_s_ctrl.fps = 30*Q8;
+	ov2720_s_ctrl.fps_divider = 1 * 0x00000400;
+	ov2720_s_ctrl.cam_mode = MSM_SENSOR_MODE_INVALID;
+
+	if (data)
+		ov2720_s_ctrl.sensordata = data;
+
+	rc = ov2720_power_up(data);
+	if (rc < 0)
+		goto init_done;
+
+	goto init_done;
+init_done:
+	pr_err("%s finishes\n", __func__);
+	return rc;
+}
+
+static int ov2720_sensor_release(void)
+{
+	mutex_lock(ov2720_s_ctrl.msm_sensor_mutex);
+	gpio_set_value_cansleep(ov2720_s_ctrl.sensordata->sensor_reset, 0);
+	msleep(20);
+	gpio_free(ov2720_s_ctrl.sensordata->sensor_reset);
+	mutex_unlock(ov2720_s_ctrl.msm_sensor_mutex);
+	pr_err("%s completed\n", __func__);
+	return 0;
+}
+
+static const struct i2c_device_id ov2720_i2c_id[] = {
+	{SENSOR_NAME, (kernel_ulong_t)&ov2720_s_ctrl},
+	{ }
+};
+
+static struct i2c_driver ov2720_i2c_driver = {
+	.id_table = ov2720_i2c_id,
+	.probe  = msm_sensor_i2c_probe,
+	.driver = {
+		.name = SENSOR_NAME,
+	},
+};
+
+static int ov2720_sensor_v4l2_probe(const struct msm_camera_sensor_info *info,
+	struct v4l2_subdev *sdev, struct msm_sensor_ctrl *s)
+{
+	return msm_sensor_v4l2_probe(&ov2720_s_ctrl, info, sdev, s);
+}
+
+static int ov2720_probe(struct platform_device *pdev)
+{
+	return msm_sensor_register(pdev, ov2720_sensor_v4l2_probe);
+}
+
+struct platform_driver ov2720_driver = {
+	.probe = ov2720_probe,
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_sensor_init_module(void)
+{
+	return platform_driver_register(&ov2720_driver);
+}
+
+static struct v4l2_subdev_core_ops ov2720_subdev_core_ops;
+static struct v4l2_subdev_video_ops ov2720_subdev_video_ops = {
+	.enum_mbus_fmt = msm_sensor_v4l2_enum_fmt,
+};
+
+static struct v4l2_subdev_ops ov2720_subdev_ops = {
+	.core = &ov2720_subdev_core_ops,
+	.video  = &ov2720_subdev_video_ops,
+};
+
+static struct msm_sensor_ctrl_t ov2720_s_ctrl = {
+	.msm_sensor_reg = {
+		.start_stream_conf = ov2720_start_settings,
+		.start_stream_conf_size = ARRAY_SIZE(ov2720_start_settings),
+		.stop_stream_conf = ov2720_stop_settings,
+		.stop_stream_conf_size = ARRAY_SIZE(ov2720_stop_settings),
+		.group_hold_on_conf = ov2720_groupon_settings,
+		.group_hold_on_conf_size = ARRAY_SIZE(ov2720_groupon_settings),
+		.group_hold_off_conf = ov2720_groupoff_settings,
+		.group_hold_off_conf_size =
+			ARRAY_SIZE(ov2720_groupoff_settings),
+		.init_settings = &ov2720_init_conf[0],
+		.init_size = ARRAY_SIZE(ov2720_init_conf),
+		.res_settings = &ov2720_confs[0],
+		.num_conf = ARRAY_SIZE(ov2720_confs),
+	},
+	.sensor_id_addr = 0x300A,
+	.sensor_id = 0x2720,
+	.frame_length_lines_addr = 0x380e,
+	.coarse_int_time_addr = 0x3501,
+	.global_gain_addr = 0x3508,
+	.line_length_pck_addr = 0x380c,
+	.frame_length_lines_array_addr = 14,
+	.line_length_pck_array_addr = 12,
+	.vert_offset = 6,
+	.cam_mode = MSM_SENSOR_MODE_INVALID,
+	.camera_type = FRONT_CAMERA_2D,
+	.config_csi_flag = 1,
+	.csi_params = &ov2720_csi_params,
+	.msm_sensor_mutex = &ov2720_mut,
+	.msm_sensor_i2c_driver = &ov2720_i2c_driver,
+	.sensor_v4l2_subdev_info = ov2720_subdev_info,
+	.sensor_v4l2_subdev_info_size = ARRAY_SIZE(ov2720_subdev_info),
+	.sensor_v4l2_subdev_ops = &ov2720_subdev_ops,
+
+	.func_tbl = {
+		.sensor_start_stream = msm_sensor_start_stream,
+		.sensor_stop_stream = msm_sensor_stop_stream,
+		.sensor_group_hold_on = msm_sensor_group_hold_on,
+		.sensor_group_hold_off = msm_sensor_group_hold_off,
+		.sensor_get_prev_lines_pf = msm_sensor_get_prev_lines_pf,
+		.sensor_get_prev_pixels_pl = msm_sensor_get_prev_pixels_pl,
+		.sensor_get_pict_lines_pf = msm_sensor_get_pict_lines_pf,
+		.sensor_get_pict_pixels_pl = msm_sensor_get_pict_pixels_pl,
+		.sensor_get_pict_max_exp_lc = msm_sensor_get_pict_max_exp_lc,
+		.sensor_get_pict_fps = msm_sensor_get_pict_fps,
+		.sensor_set_fps = msm_sensor_set_fps,
+		.sensor_write_exp_gain = ov2720_write_exp_gain,
+		.sensor_setting = ov2720_sensor_setting,
+		.sensor_set_sensor_mode = msm_sensor_set_sensor_mode_b,
+		.sensor_mode_init = msm_sensor_mode_init_bdata,
+		.sensor_config = ov2720_sensor_config,
+		.sensor_open_init = ov2720_sensor_open_init,
+		.sensor_release = ov2720_sensor_release,
+		.sensor_power_up = ov2720_power_up,
+		.sensor_power_down = ov2720_power_down,
+		.sensor_probe = msm_sensor_probe,
+	},
+};
+
+module_init(msm_sensor_init_module);
+MODULE_DESCRIPTION("Omnivision 2MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/media/video/msm/ov2720.h b/drivers/media/video/msm/ov2720.h
new file mode 100644
index 0000000..7077a7d
--- /dev/null
+++ b/drivers/media/video/msm/ov2720.h
@@ -0,0 +1,16 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <mach/board.h>
+extern struct platform_driver ov2720_driver;
+
diff --git a/drivers/media/video/msm/ov2720_reg.c b/drivers/media/video/msm/ov2720_reg.c
new file mode 100644
index 0000000..bf094a5
--- /dev/null
+++ b/drivers/media/video/msm/ov2720_reg.c
@@ -0,0 +1,123 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "ov2720.h"
+
+struct ov2720_i2c_reg_conf ov2720_prev_settings[] = {
+	{0x3800, 0x00},
+	{0x3801, 0x0c},
+	{0x3802, 0x00},
+	{0x3803, 0x02},
+	{0x3804, 0x07},
+	{0x3805, 0x97},
+	{0x3806, 0x04},
+	{0x3807, 0x45},
+	{0x3808, 0x07},
+	{0x3809, 0x80},
+	{0x380a, 0x04},
+	{0x380b, 0x38},
+	{0x380c, 0x08},/*Line Length Pclk Hi*/
+	{0x380d, 0x5c},/*Line Length Pclk Lo*/
+	{0x380e, 0x04},/*Frame Length Line Hi*/
+	{0x380f, 0x60},/*Frame Length Line Lo*/
+	{0x3810, 0x00},
+	{0x3811, 0x05},
+	{0x3812, 0x00},
+	{0x3813, 0x06},
+	{0x3820, 0x80},
+	{0x3821, 0x06},
+	{0x3814, 0x11},
+	{0x3815, 0x11},
+	{0x3612, 0x0b},
+	{0x3618, 0x04},
+	{0x3a08, 0x01},
+	{0x3a09, 0x50},
+	{0x3a0a, 0x01},
+	{0x3a0b, 0x18},
+	{0x3a0d, 0x03},
+	{0x3a0e, 0x03},
+	{0x4520, 0x00},
+	{0x4837, 0x1b},
+	{0x3000, 0xff},
+	{0x3001, 0xff},
+	{0x3002, 0xf0},
+	{0x3600, 0x08},
+	{0x3621, 0xc0},
+	{0x3632, 0xd2},
+	{0x3633, 0x23},
+	{0x3634, 0x54},
+	{0x3f01, 0x0c},
+	{0x5001, 0xc1},
+	{0x3614, 0xf0},
+	{0x3630, 0x2d},
+	{0x370b, 0x62},
+	{0x3706, 0x61},
+	{0x4000, 0x02},
+	{0x4002, 0xc5},
+	{0x4005, 0x08},
+	{0x404f, 0x84},
+	{0x4051, 0x00},
+	{0x5000, 0xff},
+	{0x3a18, 0x00},
+	{0x3a19, 0x80},
+	{0x3503, 0x00},
+	{0x4521, 0x00},
+	{0x5183, 0xb0},
+	{0x5184, 0xb0},
+	{0x5185, 0xb0},
+	{0x370c, 0x0c},
+};
+
+struct ov2720_i2c_reg_conf ov2720_recommend_settings[] = {
+	{0x0103, 0x01},
+	{0x3718, 0x10},
+	{0x3702, 0x24},
+	{0x373a, 0x60},
+	{0x3715, 0x01},
+	{0x3703, 0x2e},
+	{0x3705, 0x10},
+	{0x3730, 0x30},
+	{0x3704, 0x62},
+	{0x3f06, 0x3a},
+	{0x371c, 0x00},
+	{0x371d, 0xc4},
+	{0x371e, 0x01},
+	{0x371f, 0x0d},
+	{0x3708, 0x61},
+	{0x3709, 0x12},
+	{0x3035, 0x10},
+	{0x3036, 0x1e},
+	{0x3037, 0x21},
+	{0x303e, 0x19},
+	{0x3038, 0x06},
+	{0x3018, 0x04},
+	{0x3000, 0x00},
+	{0x3001, 0x00},
+	{0x3002, 0x00},
+	{0x3a0f, 0x40},
+	{0x3a10, 0x38},
+	{0x3a1b, 0x48},
+	{0x3a1e, 0x30},
+	{0x3a11, 0x90},
+	{0x3a1f, 0x10},
+};
+
+struct ov2720_i2c_conf_array ov2720_confs[] = {
+	{&ov2720_prev_settings[0], ARRAY_SIZE(ov2720_prev_settings)},
+};
+
+struct ov2720_reg ov2720_regs = {
+	.rec_settings = &ov2720_recommend_settings[0],
+	.rec_size = ARRAY_SIZE(ov2720_recommend_settings),
+	.conf_array = &ov2720_confs[0],
+};
diff --git a/drivers/media/video/msm/ov7692.c b/drivers/media/video/msm/ov7692.c
new file mode 100644
index 0000000..7372156
--- /dev/null
+++ b/drivers/media/video/msm/ov7692.c
@@ -0,0 +1,595 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/camera.h>
+#include "ov7692.h"
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+#define Q8    0x00000100
+
+/* Omnivision8810 product ID register address */
+#define REG_OV7692_MODEL_ID_MSB                       0x0A
+#define REG_OV7692_MODEL_ID_LSB                       0x0B
+
+#define OV7692_MODEL_ID                       0x7692
+/* Omnivision8810 product ID */
+
+/* Time in milisecs for waiting for the sensor to reset */
+#define OV7692_RESET_DELAY_MSECS    66
+#define OV7692_DEFAULT_CLOCK_RATE   24000000
+/* Registers*/
+
+/* Color bar pattern selection */
+#define OV7692_COLOR_BAR_PATTERN_SEL_REG     0x82
+/* Color bar enabling control */
+#define OV7692_COLOR_BAR_ENABLE_REG           0x601
+/* Time in milisecs for waiting for the sensor to reset*/
+#define OV7692_RESET_DELAY_MSECS    66
+
+/*============================================================================
+							DATA DECLARATIONS
+============================================================================*/
+/*  96MHz PCLK @ 24MHz MCLK */
+struct reg_addr_val_pair_struct ov7692_init_settings_array[] = {
+    {0x12, 0x80},
+    {0x0e, 0x08},
+    {0x69, 0x52},
+    {0x1e, 0xb3},
+    {0x48, 0x42},
+    {0xff, 0x01},
+    {0xae, 0xa0},
+    {0xa8, 0x26},
+    {0xb4, 0xc0},
+    {0xb5, 0x40},
+    {0xff, 0x00},
+    {0x0c, 0x00},
+    {0x62, 0x10},
+    {0x12, 0x00},
+    {0x17, 0x65},
+    {0x18, 0xa4},
+    {0x19, 0x0a},
+    {0x1a, 0xf6},
+    {0x3e, 0x30},
+    {0x64, 0x0a},
+    {0xff, 0x01},
+    {0xb4, 0xc0},
+    {0xff, 0x00},
+    {0x67, 0x20},
+    {0x81, 0x3f},
+    {0xcc, 0x02},
+    {0xcd, 0x80},
+    {0xce, 0x01},
+    {0xcf, 0xe0},
+    {0xc8, 0x02},
+    {0xc9, 0x80},
+    {0xca, 0x01},
+    {0xcb, 0xe0},
+    {0xd0, 0x48},
+    {0x82, 0x03},
+    {0x0e, 0x00},
+    {0x70, 0x00},
+    {0x71, 0x34},
+    {0x74, 0x28},
+    {0x75, 0x98},
+    {0x76, 0x00},
+    {0x77, 0x64},
+    {0x78, 0x01},
+    {0x79, 0xc2},
+    {0x7a, 0x4e},
+    {0x7b, 0x1f},
+    {0x7c, 0x00},
+    {0x11, 0x00},
+    {0x20, 0x00},
+    {0x21, 0x23},
+    {0x50, 0x9a},
+    {0x51, 0x80},
+    {0x4c, 0x7d},
+    {0x0e, 0x00},
+    {0x80, 0x7f},
+    {0x85, 0x10},
+    {0x86, 0x00},
+    {0x87, 0x00},
+    {0x88, 0x00},
+    {0x89, 0x2a},
+    {0x8a, 0x26},
+    {0x8b, 0x22},
+    {0xbb, 0x7a},
+    {0xbc, 0x69},
+    {0xbd, 0x11},
+    {0xbe, 0x13},
+    {0xbf, 0x81},
+    {0xc0, 0x96},
+    {0xc1, 0x1e},
+    {0xb7, 0x05},
+    {0xb8, 0x09},
+    {0xb9, 0x00},
+    {0xba, 0x18},
+    {0x5a, 0x1f},
+    {0x5b, 0x9f},
+    {0x5c, 0x6a},
+    {0x5d, 0x42},
+    {0x24, 0x78},
+    {0x25, 0x68},
+    {0x26, 0xb3},
+    {0xa3, 0x0b},
+    {0xa4, 0x15},
+    {0xa5, 0x2a},
+    {0xa6, 0x51},
+    {0xa7, 0x63},
+    {0xa8, 0x74},
+    {0xa9, 0x83},
+    {0xaa, 0x91},
+    {0xab, 0x9e},
+    {0xac, 0xaa},
+    {0xad, 0xbe},
+    {0xae, 0xce},
+    {0xaf, 0xe5},
+    {0xb0, 0xf3},
+    {0xb1, 0xfb},
+    {0xb2, 0x06},
+    {0x8c, 0x5c},
+    {0x8d, 0x11},
+    {0x8e, 0x12},
+    {0x8f, 0x19},
+    {0x90, 0x50},
+    {0x91, 0x20},
+    {0x92, 0x96},
+    {0x93, 0x80},
+    {0x94, 0x13},
+    {0x95, 0x1b},
+    {0x96, 0xff},
+    {0x97, 0x00},
+    {0x98, 0x3d},
+    {0x99, 0x36},
+    {0x9a, 0x51},
+    {0x9b, 0x43},
+    {0x9c, 0xf0},
+    {0x9d, 0xf0},
+    {0x9e, 0xf0},
+    {0x9f, 0xff},
+    {0xa0, 0x68},
+    {0xa1, 0x62},
+    {0xa2, 0x0e},
+};
+
+static bool OV7692_CSI_CONFIG;
+/* 816x612, 24MHz MCLK 96MHz PCLK */
+uint32_t OV7692_FULL_SIZE_WIDTH        = 640;
+uint32_t OV7692_FULL_SIZE_HEIGHT       = 480;
+
+uint32_t OV7692_QTR_SIZE_WIDTH         = 640;
+uint32_t OV7692_QTR_SIZE_HEIGHT        = 480;
+
+uint32_t OV7692_HRZ_FULL_BLK_PIXELS    = 16;
+uint32_t OV7692_VER_FULL_BLK_LINES     = 12;
+uint32_t OV7692_HRZ_QTR_BLK_PIXELS     = 16;
+uint32_t OV7692_VER_QTR_BLK_LINES      = 12;
+
+struct ov7692_work_t {
+	struct work_struct work;
+};
+static struct  ov7692_work_t *ov7692_sensorw;
+static struct  i2c_client *ov7692_client;
+struct ov7692_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;		/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;	/* init to 1 * 0x00000400 */
+	uint32_t fps;
+	int32_t  curr_lens_pos;
+	uint32_t curr_step_pos;
+	uint32_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint32_t total_lines_per_frame;
+	enum ov7692_resolution_t prev_res;
+	enum ov7692_resolution_t pict_res;
+	enum ov7692_resolution_t curr_res;
+	enum ov7692_test_mode_t  set_test;
+	unsigned short imgaddr;
+};
+static struct ov7692_ctrl_t *ov7692_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(ov7692_wait_queue);
+DEFINE_MUTEX(ov7692_mut);
+
+/*=============================================================*/
+
+static int ov7692_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 1,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 1,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(ov7692_client->adapter, msgs, 2) < 0) {
+		CDBG("ov7692_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+static int32_t ov7692_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = 2,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(ov7692_client->adapter, msg, 1) < 0) {
+		CDBG("ov7692_i2c_txdata faild 0x%x\n", ov7692_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t ov7692_i2c_read(uint8_t raddr,
+	uint8_t *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[1];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = raddr;
+	rc = ov7692_i2c_rxdata(ov7692_client->addr >> 1, buf, rlen);
+	if (rc < 0) {
+		CDBG("ov7692_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = buf[0];
+	return rc;
+}
+static int32_t ov7692_i2c_write_b_sensor(uint8_t waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[2];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = waddr;
+	buf[1] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = ov7692_i2c_txdata(ov7692_client->addr >> 1, buf, 2);
+	if (rc < 0)
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	return rc;
+}
+
+static int32_t ov7692_sensor_setting(int update_type, int rt)
+{
+	int32_t i, array_length;
+	int32_t rc = 0;
+	struct msm_camera_csi_params ov7692_csi_params;
+	switch (update_type) {
+	case REG_INIT:
+		OV7692_CSI_CONFIG = 0;
+		ov7692_i2c_write_b_sensor(0x0e, 0x08);
+		return rc;
+		break;
+	case UPDATE_PERIODIC:
+		if (!OV7692_CSI_CONFIG) {
+			ov7692_csi_params.lane_cnt = 1;
+			ov7692_csi_params.data_format = CSI_8BIT;
+			ov7692_csi_params.lane_assign = 0xe4;
+			ov7692_csi_params.dpcm_scheme = 0;
+			ov7692_csi_params.settle_cnt = 0x14;
+
+			rc = msm_camio_csi_config(&ov7692_csi_params);
+			msleep(10);
+			array_length = sizeof(ov7692_init_settings_array) /
+				sizeof(ov7692_init_settings_array[0]);
+			for (i = 0; i < array_length; i++) {
+				rc = ov7692_i2c_write_b_sensor(
+					ov7692_init_settings_array[i].reg_addr,
+					ov7692_init_settings_array[i].reg_val);
+				if (rc < 0)
+					return rc;
+			}
+			OV7692_CSI_CONFIG = 1;
+			msleep(20);
+			return rc;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t ov7692_video_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/* change sensor resolution if needed */
+	rt = RES_PREVIEW;
+
+	if (ov7692_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	ov7692_ctrl->curr_res = ov7692_ctrl->prev_res;
+	ov7692_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t ov7692_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = ov7692_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+static int32_t ov7692_power_down(void)
+{
+	return 0;
+}
+
+static int ov7692_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	uint8_t model_id_msb, model_id_lsb = 0;
+	uint16_t model_id;
+	int32_t rc = 0;
+	/*The reset pin is not physically connected to the sensor.
+	The standby pin will do the reset hence there is no need
+	to request the gpio reset*/
+
+	/* Read sensor Model ID: */
+	rc = ov7692_i2c_read(REG_OV7692_MODEL_ID_MSB, &model_id_msb, 1);
+	if (rc < 0)
+		goto init_probe_fail;
+	rc = ov7692_i2c_read(REG_OV7692_MODEL_ID_LSB, &model_id_lsb, 1);
+	if (rc < 0)
+		goto init_probe_fail;
+	model_id = (model_id_msb << 8) | ((model_id_lsb & 0x00FF)) ;
+	CDBG("ov7692 model_id = 0x%x, 0x%x, 0x%x\n",
+		 model_id, model_id_msb, model_id_lsb);
+	/* 4. Compare sensor ID to OV7692 ID: */
+	if (model_id != OV7692_MODEL_ID) {
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	pr_warning(" ov7692_probe_init_sensor fails\n");
+init_probe_done:
+	CDBG(" ov7692_probe_init_sensor finishes\n");
+	return rc;
+}
+
+int ov7692_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling ov7692_sensor_open_init\n");
+	ov7692_ctrl = kzalloc(sizeof(struct ov7692_ctrl_t), GFP_KERNEL);
+	if (!ov7692_ctrl) {
+		CDBG("ov7692_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	ov7692_ctrl->fps_divider = 1 * 0x00000400;
+	ov7692_ctrl->pict_fps_divider = 1 * 0x00000400;
+	ov7692_ctrl->fps = 30 * Q8;
+	ov7692_ctrl->set_test = TEST_OFF;
+	ov7692_ctrl->prev_res = QTR_SIZE;
+	ov7692_ctrl->pict_res = FULL_SIZE;
+	ov7692_ctrl->curr_res = INVALID_SIZE;
+
+	if (data)
+		ov7692_ctrl->sensordata = data;
+
+	/* enable mclk first */
+
+	msm_camio_clk_rate_set(24000000);
+	msleep(20);
+
+	rc = ov7692_probe_init_sensor(data);
+	if (rc < 0) {
+		CDBG("Calling ov7692_sensor_open_init fail\n");
+		goto init_fail;
+	}
+
+	rc = ov7692_sensor_setting(REG_INIT, RES_PREVIEW);
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+
+init_fail:
+	CDBG(" ov7692_sensor_open_init fail\n");
+	kfree(ov7692_ctrl);
+init_done:
+	CDBG("ov7692_sensor_open_init done\n");
+	return rc;
+}
+
+static int ov7692_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&ov7692_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id ov7692_i2c_id[] = {
+	{"ov7692", 0},
+	{ }
+};
+
+static int ov7692_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("ov7692_i2c_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	ov7692_sensorw = kzalloc(sizeof(struct ov7692_work_t), GFP_KERNEL);
+	if (!ov7692_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, ov7692_sensorw);
+	ov7692_init_client(client);
+	ov7692_client = client;
+
+	CDBG("ov7692_i2c_probe success! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("ov7692_i2c_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __exit ov7692_remove(struct i2c_client *client)
+{
+	struct ov7692_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	ov7692_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver ov7692_i2c_driver = {
+	.id_table = ov7692_i2c_id,
+	.probe  = ov7692_i2c_probe,
+	.remove = __exit_p(ov7692_i2c_remove),
+	.driver = {
+		.name = "ov7692",
+	},
+};
+
+int ov7692_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&ov7692_mut);
+	CDBG("ov7692_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_SET_MODE:
+		rc = ov7692_set_sensor_mode(cdata.mode,
+			cdata.rs);
+		break;
+	case CFG_PWR_DOWN:
+		rc = ov7692_power_down();
+		break;
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&ov7692_mut);
+
+	return rc;
+}
+static int ov7692_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&ov7692_mut);
+	ov7692_power_down();
+	kfree(ov7692_ctrl);
+	ov7692_ctrl = NULL;
+	CDBG("ov7692_release completed\n");
+	mutex_unlock(&ov7692_mut);
+
+	return rc;
+}
+
+static int ov7692_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&ov7692_i2c_driver);
+	if (rc < 0 || ov7692_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(24000000);
+	rc = ov7692_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = ov7692_sensor_open_init;
+	s->s_release = ov7692_sensor_release;
+	s->s_config  = ov7692_sensor_config;
+	s->s_camera_type = FRONT_CAMERA_2D;
+	s->s_mount_angle = 0;
+	return rc;
+
+probe_fail:
+	CDBG("ov7692_sensor_probe: SENSOR PROBE FAILS!\n");
+	i2c_del_driver(&ov7692_i2c_driver);
+	return rc;
+}
+
+static int __ov7692_probe(struct platform_device *pdev)
+{
+
+	return msm_camera_drv_start(pdev, ov7692_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __ov7692_probe,
+	.driver = {
+		.name = "msm_camera_ov7692",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ov7692_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(ov7692_init);
+
+MODULE_DESCRIPTION("OMNI VGA YUV sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/ov7692.h b/drivers/media/video/msm/ov7692.h
new file mode 100644
index 0000000..e43a17d
--- /dev/null
+++ b/drivers/media/video/msm/ov7692.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef OV7692_H
+#define OV7692_H
+#include <linux/types.h>
+#include <mach/board.h>
+
+struct reg_addr_val_pair_struct {
+	uint8_t	reg_addr;
+	uint8_t	reg_val;
+};
+
+enum ov7692_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum ov7692_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum ov7692_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum ov7692_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+#endif
+
diff --git a/drivers/media/video/msm/ov9726.c b/drivers/media/video/msm/ov9726.c
new file mode 100644
index 0000000..fc04558
--- /dev/null
+++ b/drivers/media/video/msm/ov9726.c
@@ -0,0 +1,792 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "ov9726.h"
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+#define OV9726_Q8				0x00000100
+#define OV9726_Q8Shift				8
+#define OV9726_Q10				0x00000400
+#define OV9726_Q10Shift				10
+
+/* Omnivision8810 product ID register address */
+#define	OV9726_PIDH_REG				0x0000
+#define	OV9726_PIDL_REG				0x0001
+/* Omnivision8810 product ID */
+#define	OV9726_PID				0x97
+/* Omnivision8810 version */
+#define	OV9726_VER				0x26
+/* Time in milisecs for waiting for the sensor to reset */
+#define	OV9726_RESET_DELAY_MSECS		66
+#define	OV9726_DEFAULT_CLOCK_RATE		24000000
+/* Registers*/
+#define	OV9726_GAIN				0x3000
+#define	OV9726_AEC_MSB				0x3002
+#define	OV9726_AEC_LSB				0x3003
+
+/* Color bar pattern selection */
+#define OV9726_COLOR_BAR_PATTERN_SEL_REG	0x600
+/* Color bar enabling control */
+#define OV9726_COLOR_BAR_ENABLE_REG		0x601
+/* Time in milisecs for waiting for the sensor to reset*/
+#define OV9726_RESET_DELAY_MSECS		66
+/* I2C Address of the Sensor */
+/*============================================================================
+		DATA DECLARATIONS
+============================================================================*/
+#define OV9726_FULL_SIZE_DUMMY_PIXELS		0
+#define OV9726_FULL_SIZE_DUMMY_LINES		0
+#define OV9726_QTR_SIZE_DUMMY_PIXELS		0
+#define OV9726_QTR_SIZE_DUMMY_LINES		0
+
+#define OV9726_FULL_SIZE_WIDTH			1296
+#define OV9726_FULL_SIZE_HEIGHT			808
+
+#define OV9726_QTR_SIZE_WIDTH			1296
+#define OV9726_QTR_SIZE_HEIGHT			808
+
+#define OV9726_HRZ_FULL_BLK_PIXELS		368
+#define OV9726_VER_FULL_BLK_LINES		32
+#define OV9726_HRZ_QTR_BLK_PIXELS		368
+#define OV9726_VER_QTR_BLK_LINES		32
+
+#define OV9726_MSB_MASK			0xFF00
+#define OV9726_LSB_MASK			0x00FF
+
+struct ov9726_work_t {
+	struct work_struct work;
+};
+static struct ov9726_work_t *ov9726_sensorw;
+static struct i2c_client *ov9726_client;
+struct ov9726_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;		/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;	/* init to 1 * 0x00000400 */
+	uint16_t fps;
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+	enum ov9726_resolution_t prev_res;
+	enum ov9726_resolution_t pict_res;
+	enum ov9726_resolution_t curr_res;
+	enum ov9726_test_mode_t  set_test;
+	unsigned short imgaddr;
+};
+static struct ov9726_ctrl_t *ov9726_ctrl;
+static int8_t config_not_set = 1;
+static DECLARE_WAIT_QUEUE_HEAD(ov9726_wait_queue);
+DEFINE_MUTEX(ov9726_mut);
+
+/*=============================================================*/
+static int ov9726_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+	{
+		.addr  = saddr,
+		.flags = 0,
+		.len   = 2,
+		.buf   = rxdata,
+	},
+	{
+		.addr  = saddr,
+		.flags = I2C_M_RD,
+		.len   = length,
+		.buf   = rxdata,
+	},
+	};
+
+	if (i2c_transfer(ov9726_client->adapter, msgs, 2) < 0) {
+		CDBG("ov9726_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t ov9726_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+		 .addr = saddr ,
+		 .flags = 0,
+		 .len = length,
+		 .buf = txdata,
+		 },
+	};
+
+	if (i2c_transfer(ov9726_client->adapter, msg, 1) < 0) {
+		CDBG("ov9726_i2c_txdata faild 0x%x\n", ov9726_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t ov9726_i2c_read(unsigned short raddr,
+				unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+
+	if (!rdata)
+		return -EIO;
+
+	buf[0] = (raddr & OV9726_MSB_MASK) >> 8;
+	buf[1] = (raddr & OV9726_LSB_MASK);
+
+	rc = ov9726_i2c_rxdata(ov9726_client->addr, buf, rlen);
+
+	if (rc < 0) {
+		CDBG("ov9726_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	return rc;
+}
+
+static int32_t ov9726_i2c_write_b(unsigned short saddr,
+	unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+
+	buf[0] = (waddr & OV9726_MSB_MASK) >> 8;
+	buf[1] = (waddr & OV9726_LSB_MASK);
+	buf[2] = bdata;
+
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%xd\n", waddr, bdata);
+	rc = ov9726_i2c_txdata(saddr, buf, 3);
+
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			 waddr, bdata);
+	}
+
+	return rc;
+}
+
+static void ov9726_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	uint32_t divider;	/*Q10 */
+	uint32_t d1;
+	uint32_t d2;
+	uint16_t snapshot_height, preview_height, preview_width, snapshot_width;
+	if (ov9726_ctrl->prev_res == QTR_SIZE) {
+		preview_width = OV9726_QTR_SIZE_WIDTH +
+			OV9726_HRZ_QTR_BLK_PIXELS ;
+		preview_height = OV9726_QTR_SIZE_HEIGHT +
+			OV9726_VER_QTR_BLK_LINES ;
+	} else {
+		/* full size resolution used for preview. */
+		preview_width = OV9726_FULL_SIZE_WIDTH +
+			OV9726_HRZ_FULL_BLK_PIXELS ;
+		preview_height = OV9726_FULL_SIZE_HEIGHT +
+			OV9726_VER_FULL_BLK_LINES ;
+	}
+	if (ov9726_ctrl->pict_res == QTR_SIZE) {
+		snapshot_width  = OV9726_QTR_SIZE_WIDTH +
+			OV9726_HRZ_QTR_BLK_PIXELS ;
+		snapshot_height = OV9726_QTR_SIZE_HEIGHT +
+			OV9726_VER_QTR_BLK_LINES ;
+	} else {
+		snapshot_width  = OV9726_FULL_SIZE_WIDTH +
+			OV9726_HRZ_FULL_BLK_PIXELS;
+		snapshot_height = OV9726_FULL_SIZE_HEIGHT +
+			OV9726_VER_FULL_BLK_LINES;
+	}
+
+	d1 = (uint32_t)(((uint32_t)preview_height <<
+		OV9726_Q10Shift) /
+		snapshot_height);
+
+	d2 = (uint32_t)(((uint32_t)preview_width <<
+		OV9726_Q10Shift) /
+		 snapshot_width);
+
+	divider = (uint32_t) (d1 * d2) >> OV9726_Q10Shift;
+	*pfps = (uint16_t)((uint32_t)(fps * divider) >> OV9726_Q10Shift);
+}
+
+static uint16_t ov9726_get_prev_lines_pf(void)
+{
+	if (ov9726_ctrl->prev_res == QTR_SIZE)
+		return OV9726_QTR_SIZE_HEIGHT + OV9726_VER_QTR_BLK_LINES;
+	else
+		return OV9726_FULL_SIZE_HEIGHT + OV9726_VER_FULL_BLK_LINES;
+}
+
+static uint16_t ov9726_get_prev_pixels_pl(void)
+{
+	if (ov9726_ctrl->prev_res == QTR_SIZE)
+		return OV9726_QTR_SIZE_WIDTH + OV9726_HRZ_QTR_BLK_PIXELS;
+	else
+		return OV9726_FULL_SIZE_WIDTH + OV9726_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t ov9726_get_pict_lines_pf(void)
+{
+	if (ov9726_ctrl->pict_res == QTR_SIZE)
+		return OV9726_QTR_SIZE_HEIGHT + OV9726_VER_QTR_BLK_LINES;
+	else
+		return OV9726_FULL_SIZE_HEIGHT + OV9726_VER_FULL_BLK_LINES;
+}
+
+static uint16_t ov9726_get_pict_pixels_pl(void)
+{
+	if (ov9726_ctrl->pict_res == QTR_SIZE)
+		return OV9726_QTR_SIZE_WIDTH + OV9726_HRZ_QTR_BLK_PIXELS;
+	else
+		return OV9726_FULL_SIZE_WIDTH + OV9726_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t ov9726_get_pict_max_exp_lc(void)
+{
+	if (ov9726_ctrl->pict_res == QTR_SIZE)
+		return (OV9726_QTR_SIZE_HEIGHT + OV9726_VER_QTR_BLK_LINES)*24;
+	else
+		return (OV9726_FULL_SIZE_HEIGHT + OV9726_VER_FULL_BLK_LINES)*24;
+}
+
+static int32_t ov9726_set_fps(struct fps_cfg	*fps)
+{
+	int32_t rc = 0;
+	CDBG("%s: fps->fps_div = %d\n", __func__, fps->fps_div);
+	/* TODO: Passing of fps_divider from user space has issues. */
+	/* ov9726_ctrl->fps_divider = fps->fps_div; */
+	ov9726_ctrl->fps_divider = 1 * 0x400;
+	CDBG("%s: ov9726_ctrl->fps_divider = %d\n", __func__,
+		ov9726_ctrl->fps_divider);
+	ov9726_ctrl->pict_fps_divider = fps->pict_fps_div;
+	ov9726_ctrl->fps = fps->f_mult;
+	return rc;
+}
+
+static int32_t ov9726_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	static uint16_t max_legal_gain = 0x00FF;
+	uint8_t gain_msb, gain_lsb;
+	uint8_t intg_time_msb, intg_time_lsb;
+	uint8_t ov9726_offset = 6;
+	uint8_t line_length_pck_msb, line_length_pck_lsb;
+	uint16_t line_length_pck, frame_length_lines;
+	uint32_t line_length_ratio = 1 << OV9726_Q8Shift;
+	int32_t rc = -1;
+	CDBG("%s: gain = %d	line = %d", __func__, gain, line);
+
+	if (ov9726_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) {
+		if (ov9726_ctrl->curr_res == QTR_SIZE) {
+			frame_length_lines = OV9726_QTR_SIZE_HEIGHT +
+			 OV9726_VER_QTR_BLK_LINES;
+			line_length_pck = OV9726_QTR_SIZE_WIDTH	+
+			 OV9726_HRZ_QTR_BLK_PIXELS;
+		} else {
+			frame_length_lines = OV9726_FULL_SIZE_HEIGHT +
+				OV9726_VER_FULL_BLK_LINES;
+			line_length_pck = OV9726_FULL_SIZE_WIDTH +
+				OV9726_HRZ_FULL_BLK_PIXELS;
+		}
+		if (line > (frame_length_lines - ov9726_offset))
+			ov9726_ctrl->fps = (uint16_t) (((uint32_t)30 <<
+				OV9726_Q8Shift) *
+				(frame_length_lines - ov9726_offset) / line);
+		else
+			ov9726_ctrl->fps = (uint16_t) ((uint32_t)30 <<
+				OV9726_Q8Shift);
+	} else {
+		frame_length_lines = OV9726_FULL_SIZE_HEIGHT +
+			OV9726_VER_FULL_BLK_LINES;
+		line_length_pck = OV9726_FULL_SIZE_WIDTH +
+			OV9726_HRZ_FULL_BLK_PIXELS;
+	}
+
+	if (ov9726_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) {
+		line = (uint32_t) (line * ov9726_ctrl->fps_divider) >>
+			OV9726_Q10Shift;
+	} else {
+		line = (uint32_t) (line * ov9726_ctrl->pict_fps_divider) >>
+			OV9726_Q10Shift;
+	}
+
+	/* calculate line_length_ratio */
+	if (line > (frame_length_lines - ov9726_offset)) {
+		line_length_ratio = (line << OV9726_Q8Shift) /
+			(frame_length_lines - ov9726_offset);
+		line = frame_length_lines - ov9726_offset;
+	} else
+		line_length_ratio = (uint32_t)1 << OV9726_Q8Shift;
+
+	if (gain > max_legal_gain) {
+		/* range:	0	to 224 */
+		gain = max_legal_gain;
+	}
+	/* update	gain registers */
+	gain_msb = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lsb = (uint8_t) (gain & 0x00FF);
+	/* linear	AFR	horizontal stretch */
+	line_length_pck = (uint16_t) ((line_length_pck *
+		line_length_ratio) >> OV9726_Q8Shift);
+	line_length_pck_msb = (uint8_t) ((line_length_pck & 0xFF00) >> 8);
+	line_length_pck_lsb = (uint8_t) (line_length_pck & 0x00FF);
+	/* update	line count registers */
+	intg_time_msb = (uint8_t) ((line & 0xFF00) >> 8);
+	intg_time_lsb = (uint8_t) (line	& 0x00FF);
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x104, 0x1);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x204, gain_msb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x205, gain_lsb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x342,
+		line_length_pck_msb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x343,
+		line_length_pck_lsb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x0202, intg_time_msb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x0203, intg_time_lsb);
+	if (rc < 0)
+		return rc;
+
+	rc = ov9726_i2c_write_b(ov9726_client->addr, 0x104, 0x0);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t ov9726_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = ov9726_write_exp_gain(gain, line);
+	return rc;
+}
+
+static int32_t initialize_ov9726_registers(void)
+{
+	int32_t i;
+	int32_t rc = 0;
+	ov9726_ctrl->sensormode = SENSOR_PREVIEW_MODE ;
+	/* Configure sensor for Preview mode and Snapshot mode */
+	CDBG("Initialize_ov9726_registers\n");
+	for (i = 0; i < ov9726_array_length; i++) {
+		rc = ov9726_i2c_write_b(ov9726_client->addr,
+			ov9726_init_settings_array[i].reg_addr,
+			ov9726_init_settings_array[i].reg_val);
+	if (rc < 0)
+		return rc;
+	}
+	return rc;
+}
+
+static int32_t ov9726_video_config(int mode)
+{
+	int32_t rc = 0;
+
+	ov9726_ctrl->sensormode = mode;
+
+	if (config_not_set) {
+		struct msm_camera_csi_params ov9726_csi_params;
+
+		/* sensor in standby */
+		ov9726_i2c_write_b(ov9726_client->addr, 0x100, 0);
+		msleep(5);
+		/* Initialize Sensor registers */
+		ov9726_csi_params.data_format = CSI_10BIT;
+		ov9726_csi_params.lane_cnt = 1;
+		ov9726_csi_params.lane_assign = 0xe4;
+		ov9726_csi_params.dpcm_scheme = 0;
+		ov9726_csi_params.settle_cnt = 7;
+
+		rc = msm_camio_csi_config(&ov9726_csi_params);
+		rc = initialize_ov9726_registers();
+		config_not_set = 0;
+	}
+	return rc;
+}
+
+static int32_t ov9726_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	ov9726_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t ov9726_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	ov9726_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t ov9726_set_sensor_mode(int  mode,
+			int  res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = ov9726_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = ov9726_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = ov9726_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int ov9726_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	uint16_t  chipidl, chipidh;
+
+	if (data->sensor_reset_enable) {
+		rc = gpio_request(data->sensor_reset, "ov9726");
+		if (!rc) {
+			gpio_direction_output(data->sensor_reset, 0);
+			gpio_set_value_cansleep(data->sensor_reset, 1);
+			msleep(20);
+		} else
+			goto init_probe_done;
+	}
+	/* 3. Read sensor Model ID: */
+	rc = ov9726_i2c_read(OV9726_PIDH_REG, &chipidh, 1);
+	if (rc < 0)
+		goto init_probe_fail;
+	rc = ov9726_i2c_read(OV9726_PIDL_REG, &chipidl, 1);
+	if (rc < 0)
+		goto init_probe_fail;
+	CDBG("kov9726 model_id = 0x%x  0x%x\n", chipidh, chipidl);
+	/* 4. Compare sensor ID to OV9726 ID: */
+	if (chipidh != OV9726_PID) {
+		rc = -ENODEV;
+		printk(KERN_INFO "Probeinit fail\n");
+		goto init_probe_fail;
+	}
+	CDBG("chipidh == OV9726_PID\n");
+	msleep(OV9726_RESET_DELAY_MSECS);
+	CDBG("after delay\n");
+	goto init_probe_done;
+
+init_probe_fail:
+	if (data->sensor_reset_enable) {
+		gpio_direction_output(data->sensor_reset, 0);
+		gpio_free(data->sensor_reset);
+	}
+init_probe_done:
+	printk(KERN_INFO " ov9726_probe_init_sensor finishes\n");
+	return rc;
+}
+
+int ov9726_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t  rc;
+
+	CDBG("Calling ov9726_sensor_open_init\n");
+	ov9726_ctrl = kzalloc(sizeof(struct ov9726_ctrl_t), GFP_KERNEL);
+	if (!ov9726_ctrl) {
+		CDBG("ov9726_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	ov9726_ctrl->curr_lens_pos = -1;
+	ov9726_ctrl->fps_divider = 1 << OV9726_Q10Shift;
+	ov9726_ctrl->pict_fps_divider = 1 << OV9726_Q10Shift;
+	ov9726_ctrl->set_test = TEST_OFF;
+	ov9726_ctrl->prev_res = FULL_SIZE;
+	ov9726_ctrl->pict_res = FULL_SIZE;
+	ov9726_ctrl->curr_res = INVALID_SIZE;
+	config_not_set = 1;
+	if (data)
+		ov9726_ctrl->sensordata = data;
+	/* enable mclk first */
+	msm_camio_clk_rate_set(OV9726_DEFAULT_CLOCK_RATE);
+	msleep(20);
+	rc = ov9726_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+
+	ov9726_ctrl->fps = (uint16_t)(30 << OV9726_Q8Shift);
+	/* generate test pattern */
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+	/* reset the driver state */
+init_fail:
+	CDBG(" init_fail\n");
+	kfree(ov9726_ctrl);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+}
+
+static int ov9726_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&ov9726_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id ov9726_i2c_id[] = {
+	{ "ov9726", 0},
+	{ }
+};
+
+static int ov9726_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("ov9726_probe called!\n");
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+	ov9726_sensorw = kzalloc(sizeof(struct ov9726_work_t), GFP_KERNEL);
+	if (!ov9726_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+	i2c_set_clientdata(client, ov9726_sensorw);
+	ov9726_init_client(client);
+	ov9726_client = client;
+	msleep(50);
+	CDBG("ov9726_probe successed! rc = %d\n", rc);
+	return 0;
+probe_failure:
+	CDBG("ov9726_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __exit ov9726_remove(struct i2c_client *client)
+{
+	struct ov9726_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	ov9726_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver ov9726_i2c_driver = {
+	.id_table = ov9726_i2c_id,
+	.probe	= ov9726_i2c_probe,
+	.remove = __exit_p(ov9726_i2c_remove),
+	.driver = {
+		.name = "ov9726",
+	},
+};
+
+int ov9726_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+
+	if (copy_from_user(&cdata,
+				(void *)argp,
+				sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&ov9726_mut);
+	CDBG("ov9726_sensor_config: cfgtype = %d\n",
+		cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		ov9726_get_pict_fps(cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+			break;
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = ov9726_get_prev_lines_pf();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = ov9726_get_prev_pixels_pl();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = ov9726_get_pict_lines_pf();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+				ov9726_get_pict_pixels_pl();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc = ov9726_get_pict_max_exp_lc();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = ov9726_set_fps(&(cdata.cfg.fps));
+		break;
+	case CFG_SET_EXP_GAIN:
+		rc = ov9726_write_exp_gain(
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc = ov9726_set_pict_exp_gain(
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_MODE:
+		rc = ov9726_set_sensor_mode(cdata.mode,
+						cdata.rs);
+		break;
+	case CFG_PWR_DOWN:
+	case CFG_MOVE_FOCUS:
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = 0;
+		break;
+	case CFG_SET_EFFECT:
+	default:
+		rc = -EFAULT;
+		break;
+	}
+	mutex_unlock(&ov9726_mut);
+	return rc;
+}
+
+static int ov9726_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	if (data->sensor_reset_enable) {
+		gpio_direction_output(data->sensor_reset, 0);
+		gpio_free(data->sensor_reset);
+	}
+	return 0;
+}
+
+static int ov9726_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&ov9726_mut);
+	gpio_direction_output(ov9726_ctrl->sensordata->sensor_reset,
+		0);
+	gpio_free(ov9726_ctrl->sensordata->sensor_reset);
+	kfree(ov9726_ctrl);
+	ov9726_ctrl = NULL;
+	CDBG("ov9726_release completed\n");
+	mutex_unlock(&ov9726_mut);
+	return rc;
+}
+
+static int ov9726_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+
+	rc = i2c_add_driver(&ov9726_i2c_driver);
+	if (rc < 0 || ov9726_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(24000000);
+	msleep(20);
+	rc = ov9726_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+
+	s->s_init = ov9726_sensor_open_init;
+	s->s_release = ov9726_sensor_release;
+	s->s_config  = ov9726_sensor_config;
+	s->s_camera_type = FRONT_CAMERA_2D;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+	ov9726_probe_init_done(info);
+
+	return rc;
+
+probe_fail:
+	CDBG("SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __ov9726_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, ov9726_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __ov9726_probe,
+	.driver = {
+		.name = "msm_camera_ov9726",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init ov9726_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(ov9726_init);
+void ov9726_exit(void)
+{
+	i2c_del_driver(&ov9726_i2c_driver);
+}
+
+MODULE_DESCRIPTION("OMNI VGA Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/video/msm/ov9726.h b/drivers/media/video/msm/ov9726.h
new file mode 100644
index 0000000..56d3da6
--- /dev/null
+++ b/drivers/media/video/msm/ov9726.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef OV9726_H
+#define OV9726_H
+#include <linux/types.h>
+#include <mach/board.h>
+
+/* 16bit address - 8 bit context register structure */
+struct reg_struct_type {
+	uint16_t	reg_addr;
+	unsigned char	reg_val;
+};
+
+enum ov9726_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum ov9726_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+extern struct reg_struct_type ov9726_init_settings_array[];
+extern int32_t ov9726_array_length;
+#endif
+
diff --git a/drivers/media/video/msm/ov9726_reg.c b/drivers/media/video/msm/ov9726_reg.c
new file mode 100644
index 0000000..54afbe8
--- /dev/null
+++ b/drivers/media/video/msm/ov9726_reg.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "ov9726.h"
+struct reg_struct_type ov9726_init_settings_array[] = {
+	{0x0103, 0x01}, /* SOFTWARE_RESET */
+	{0x3026, 0x00}, /* OUTPUT_SELECT01 */
+	{0x3027, 0x00}, /* OUTPUT_SELECT02 */
+	{0x3002, 0xe8}, /* IO_CTRL00 */
+	{0x3004, 0x03}, /* IO_CTRL01 */
+	{0x3005, 0xff}, /* IO_CTRL02 */
+	{0x3703, 0x42},
+	{0x3704, 0x10},
+	{0x3705, 0x45},
+	{0x3603, 0xaa},
+	{0x3632, 0x2f},
+	{0x3620, 0x66},
+	{0x3621, 0xc0},
+	{0x0340, 0x03}, /* FRAME_LENGTH_LINES_HI */
+	{0x0341, 0xC1}, /* FRAME_LENGTH_LINES_LO */
+	{0x0342, 0x06}, /* LINE_LENGTH_PCK_HI */
+	{0x0343, 0x80}, /* LINE_LENGTH_PCK_LO */
+	{0x0202, 0x03}, /* COARSE_INTEGRATION_TIME_HI */
+	{0x0203, 0x43}, /* COARSE_INTEGRATION_TIME_LO */
+	{0x3833, 0x04},
+	{0x3835, 0x02},
+	{0x4702, 0x04},
+	{0x4704, 0x00}, /* DVP_CTRL01 */
+	{0x4706, 0x08},
+	{0x5052, 0x01},
+	{0x3819, 0x6e},
+	{0x3817, 0x94},
+	{0x3a18, 0x00}, /* AEC_GAIN_CEILING_HI */
+	{0x3a19, 0x7f}, /* AEC_GAIN_CEILING_LO */
+	{0x404e, 0x7e},
+	{0x3631, 0x52},
+	{0x3633, 0x50},
+	{0x3630, 0xd2},
+	{0x3604, 0x08},
+	{0x3601, 0x40},
+	{0x3602, 0x14},
+	{0x3610, 0xa0},
+	{0x3612, 0x20},
+	{0x034c, 0x05}, /* X_OUTPUT_SIZE_HI */
+	{0x034d, 0x10}, /* X_OUTPUT_SIZE_LO */
+	{0x034e, 0x03}, /* Y_OUTPUT_SIZE_HI */
+	{0x034f, 0x28}, /* Y_OUTPUT_SIZE_LO */
+	{0x0340, 0x03}, /* FRAME_LENGTH_LINES_HI */
+	{0x0341, 0xC1}, /* FRAME_LENGTH_LINES_LO */
+	{0x0342, 0x06}, /* LINE_LENGTH_PCK_HI */
+	{0x0343, 0x80}, /* LINE_LENGTH_PCK_LO */
+	{0x0202, 0x03}, /* COARSE_INTEGRATION_TIME_HI */
+	{0x0203, 0x43}, /* COARSE_INTEGRATION_TIME_LO */
+	{0x0303, 0x01}, /* VT_SYS_CLK_DIV_LO */
+	{0x3002, 0x00}, /* IO_CTRL00 */
+	{0x3004, 0x00}, /* IO_CTRL01 */
+	{0x3005, 0x00}, /* IO_CTRL02 */
+	{0x4801, 0x0f}, /* MIPI_CTRL01 */
+	{0x4803, 0x05}, /* MIPI_CTRL03 */
+	{0x4601, 0x16}, /* VFIFO_READ_CONTROL */
+	{0x3014, 0x05}, /* SC_CMMN_MIPI / SC_CTRL00 */
+	{0x3104, 0x80},
+	{0x0305, 0x04}, /* PRE_PLL_CLK_DIV_LO */
+	{0x0307, 0x64}, /* PLL_MULTIPLIER_LO */
+	{0x300c, 0x02},
+	{0x300d, 0x20},
+	{0x300e, 0x01},
+	{0x3010, 0x01},
+	{0x460e, 0x81}, /* VFIFO_CONTROL00 */
+	{0x0101, 0x01}, /* IMAGE_ORIENTATION */
+	{0x3707, 0x14},
+	{0x3622, 0x9f},
+	{0x5047, 0x3D}, /* ISP_CTRL47 */
+	{0x4002, 0x45}, /* BLC_CTRL02 */
+	{0x5000, 0x06}, /* ISP_CTRL0 */
+	{0x5001, 0x00}, /* ISP_CTRL1 */
+	{0x3406, 0x00}, /* AWB_MANUAL_CTRL */
+	{0x3503, 0x13}, /* AEC_ENABLE */
+	{0x4005, 0x18}, /* BLC_CTRL05 */
+	{0x4837, 0x21},
+	{0x0100, 0x01}, /* MODE_SELECT */
+	{0x3a0f, 0x64}, /* AEC_CTRL0F */
+	{0x3a10, 0x54}, /* AEC_CTRL10 */
+	{0x3a11, 0xc2}, /* AEC_CTRL11 */
+	{0x3a1b, 0x64}, /* AEC_CTRL1B */
+	{0x3a1e, 0x54}, /* AEC_CTRL1E */
+	{0x3a1a, 0x05}, /* AEC_DIFF_MAX */
+};
+int32_t ov9726_array_length = sizeof(ov9726_init_settings_array) /
+	sizeof(ov9726_init_settings_array[0]);
+
diff --git a/drivers/media/video/msm/qs_s5k4e1.c b/drivers/media/video/msm/qs_s5k4e1.c
new file mode 100644
index 0000000..d6bb51d
--- /dev/null
+++ b/drivers/media/video/msm/qs_s5k4e1.c
@@ -0,0 +1,1663 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "qs_s5k4e1.h"
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+#define REG_GROUPED_PARAMETER_HOLD		0x0104
+#define GROUPED_PARAMETER_HOLD_OFF		0x00
+#define GROUPED_PARAMETER_HOLD			0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME		0x0202
+/* Gain */
+#define REG_GLOBAL_GAIN					0x0204
+#define REG_GR_GAIN					0x020E
+#define REG_R_GAIN					0x0210
+#define REG_B_GAIN					0x0212
+#define REG_GB_GAIN					0x0214
+/* PLL registers */
+#define REG_FRAME_LENGTH_LINES			0x0340
+#define REG_LINE_LENGTH_PCK				0x0342
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE			0x0601
+#define REG_VCM_NEW_CODE				0x30F2
+#define AF_ADDR							0x18
+#define BRIDGE_ADDR						0x80
+/*============================================================================
+			 TYPE DECLARATIONS
+============================================================================*/
+
+/* 16bit address - 8 bit context register structure */
+#define Q8  0x00000100
+#define Q10 0x00000400
+#define QS_S5K4E1_MASTER_CLK_RATE 24000000
+#define QS_S5K4E1_OFFSET			8
+
+/* AF Total steps parameters */
+#define QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR    32
+
+uint16_t qs_s5k4e1_step_position_table[QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR+1];
+uint16_t qs_s5k4e1_nl_region_boundary1;
+uint16_t qs_s5k4e1_nl_region_code_per_step1 = 190;
+uint16_t qs_s5k4e1_l_region_code_per_step = 8;
+uint16_t qs_s5k4e1_damping_threshold = 10;
+uint16_t qs_s5k4e1_sw_damping_time_wait = 8;
+uint16_t qs_s5k4e1_af_mode = 4;
+int16_t qs_s5k4e1_af_initial_code = 190;
+int16_t qs_s5k4e1_af_right_adjust;
+
+struct qs_s5k4e1_work_t {
+	struct work_struct work;
+};
+
+static struct qs_s5k4e1_work_t *qs_s5k4e1_sensorw;
+static struct i2c_client *qs_s5k4e1_client;
+static char lens_eeprom_data[864];
+static bool cali_data_status;
+struct qs_s5k4e1_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	uint16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum qs_s5k4e1_resolution_t prev_res;
+	enum qs_s5k4e1_resolution_t pict_res;
+	enum qs_s5k4e1_resolution_t curr_res;
+	enum qs_s5k4e1_test_mode_t  set_test;
+	enum qs_s5k4e1_cam_mode_t cam_mode;
+};
+
+static uint16_t prev_line_length_pck;
+static uint16_t prev_frame_length_lines;
+static uint16_t snap_line_length_pck;
+static uint16_t snap_frame_length_lines;
+
+static bool CSI_CONFIG, LENS_SHADE_CONFIG, default_lens_shade;
+static struct qs_s5k4e1_ctrl_t *qs_s5k4e1_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(qs_s5k4e1_wait_queue);
+DEFINE_MUTEX(qs_s5k4e1_mut);
+
+static int cam_debug_init(void);
+static struct dentry *debugfs_base;
+/*=============================================================*/
+
+static int qs_s5k4e1_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = length,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(qs_s5k4e1_client->adapter, msgs, 2) < 0) {
+		CDBG("qs_s5k4e1_i2c_rxdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t qs_s5k4e1_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(qs_s5k4e1_client->adapter, msg, 1) < 0) {
+		CDBG("qs_s5k4e1_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t qs_s5k4e1_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = qs_s5k4e1_i2c_rxdata(qs_s5k4e1_client->addr>>1, buf, rlen);
+	if (rc < 0) {
+		CDBG("qs_s5k4e1_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("qs_s5k4e1_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+	return rc;
+}
+
+static int32_t qs_s5k4e1_i2c_write_w_sensor(unsigned short waddr,
+	 uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
+	rc = qs_s5k4e1_i2c_txdata(qs_s5k4e1_client->addr>>1, buf, 4);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = qs_s5k4e1_i2c_txdata(qs_s5k4e1_client->addr>>1, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_i2c_write_b_table(struct qs_s5k4e1_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = qs_s5k4e1_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_i2c_write_seq_sensor(unsigned short waddr,
+		unsigned char *seq_data, int len)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[len+2];
+	int i = 0;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	for (i = 0; i < len; i++)
+		buf[i+2] = seq_data[i];
+	rc = qs_s5k4e1_i2c_txdata(qs_s5k4e1_client->addr>>1, buf, len+2);
+	return rc;
+}
+
+static int32_t af_i2c_write_b_sensor(unsigned short baddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[2];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", baddr, bdata);
+	rc = qs_s5k4e1_i2c_txdata(AF_ADDR>>1, buf, 2);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			baddr, bdata);
+	}
+	return rc;
+}
+
+static int32_t bridge_i2c_write_w(unsigned short waddr, uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("bridge_i2c_write_w addr = 0x%x, val = 0x%x\n", waddr, wdata);
+	rc = qs_s5k4e1_i2c_txdata(BRIDGE_ADDR>>1, buf, 4);
+	if (rc < 0) {
+		CDBG("bridge_i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	}
+	return rc;
+}
+
+static int32_t bridge_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = qs_s5k4e1_i2c_rxdata(BRIDGE_ADDR>>1, buf, rlen);
+	if (rc < 0) {
+		CDBG("bridge_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("bridge_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+	return rc;
+}
+
+static int32_t qs_s5k4e1_eeprom_i2c_read(unsigned short raddr,
+	unsigned char *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned short i2caddr = 0xA0 >> 1;
+	unsigned char buf[rlen];
+	int i = 0;
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = qs_s5k4e1_i2c_rxdata(i2caddr, buf, rlen);
+	if (rc < 0) {
+		CDBG("qs_s5k4e1_eeprom_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	for (i = 0; i < rlen; i++) {
+		rdata[i] = buf[i];
+		CDBG("qs_s5k4e1_eeprom_i2c_read 0x%x index: %d val = 0x%x!\n",
+			raddr, i, buf[i]);
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_eeprom_i2c_read_b(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	rc = qs_s5k4e1_eeprom_i2c_read(raddr, &buf[0], rlen);
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("qs_s5k4e1_eeprom_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+	return rc;
+}
+
+static int32_t qs_s5k4e1_get_calibration_data(
+	struct sensor_3d_cali_data_t *cdata)
+{
+	int32_t rc = 0;
+	cali_data_status = 1;
+	rc = qs_s5k4e1_eeprom_i2c_read(0x0,
+		&(cdata->left_p_matrix[0][0][0]), 96);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read(0x60,
+		&(cdata->right_p_matrix[0][0][0]), 96);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read(0xC0, &(cdata->square_len[0]), 8);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read(0xC8, &(cdata->focal_len[0]), 8);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read(0xD0, &(cdata->pixel_pitch[0]), 8);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x100, &(cdata->left_r), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x101, &(cdata->right_r), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x102, &(cdata->left_b), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x103, &(cdata->right_b), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x104, &(cdata->left_gb), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x105, &(cdata->right_gb), 1);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x110, &(cdata->left_af_far), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x112, &(cdata->right_af_far), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x114, &(cdata->left_af_mid), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x116, &(cdata->right_af_mid), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x118, &(cdata->left_af_short), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x11A, &(cdata->right_af_short), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x11C, &(cdata->left_af_5um), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x11E, &(cdata->right_af_5um), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x120, &(cdata->left_af_50up), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x122, &(cdata->right_af_50up), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x124, &(cdata->left_af_50down), 2);
+	if (rc < 0)
+		goto fail;
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x126, &(cdata->right_af_50down), 2);
+	if (rc < 0)
+		goto fail;
+
+	return 0;
+
+fail:
+	cali_data_status = 0;
+	return -EIO;
+
+}
+static int32_t qs_s5k4e1_write_left_lsc(char *left_lsc, int rt)
+{
+	struct qs_s5k4e1_i2c_reg_conf *ptr = (struct qs_s5k4e1_i2c_reg_conf *)
+		(qs_s5k4e1_regs.reg_lens + rt);
+	bridge_i2c_write_w(0x06, 0x02);
+	if (!LENS_SHADE_CONFIG) {
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x40);
+		qs_s5k4e1_i2c_write_b_table(ptr, qs_s5k4e1_regs.reg_lens_size);
+		if (default_lens_shade)
+			qs_s5k4e1_i2c_write_b_table(qs_s5k4e1_regs.
+			reg_default_lens, qs_s5k4e1_regs.reg_default_lens_size);
+		else {
+			qs_s5k4e1_i2c_write_seq_sensor(0x3200,
+				&left_lsc[0], 216);
+			qs_s5k4e1_i2c_write_seq_sensor(0x32D8,
+				&left_lsc[216], 216);
+		}
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x60);
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x40);
+	} else
+		qs_s5k4e1_i2c_write_b_table(ptr, qs_s5k4e1_regs.reg_lens_size);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_write_right_lsc(char *right_lsc, int rt)
+{
+	struct qs_s5k4e1_i2c_reg_conf *ptr = (struct qs_s5k4e1_i2c_reg_conf *)
+		(qs_s5k4e1_regs.reg_lens + rt);
+	bridge_i2c_write_w(0x06, 0x01);
+	if (!LENS_SHADE_CONFIG) {
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x40);
+		qs_s5k4e1_i2c_write_b_table(ptr, qs_s5k4e1_regs.reg_lens_size);
+		if (default_lens_shade)
+			qs_s5k4e1_i2c_write_b_table(qs_s5k4e1_regs.
+			reg_default_lens, qs_s5k4e1_regs.reg_default_lens_size);
+		else {
+			qs_s5k4e1_i2c_write_seq_sensor(0x3200,
+				&right_lsc[0], 216);
+			qs_s5k4e1_i2c_write_seq_sensor(0x32D8,
+				&right_lsc[216], 216);
+		}
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x60);
+		qs_s5k4e1_i2c_write_b_sensor(0x3096, 0x40);
+	} else
+		qs_s5k4e1_i2c_write_b_table(ptr, qs_s5k4e1_regs.reg_lens_size);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_write_lsc(char *lsc, int rt)
+{
+	if (qs_s5k4e1_ctrl->cam_mode == MODE_3D) {
+		qs_s5k4e1_write_left_lsc(&lsc[0], rt);
+		qs_s5k4e1_write_right_lsc(&lsc[432], rt);
+		bridge_i2c_write_w(0x06, 0x03);
+	} else if (qs_s5k4e1_ctrl->cam_mode == MODE_2D_LEFT)
+		qs_s5k4e1_write_left_lsc(&lsc[0], rt);
+	else if (qs_s5k4e1_ctrl->cam_mode == MODE_2D_RIGHT)
+		qs_s5k4e1_write_right_lsc(&lsc[432], rt);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_read_left_lsc(char *left_lsc)
+{
+	qs_s5k4e1_eeprom_i2c_read(0x200, &left_lsc[0], 216);
+	qs_s5k4e1_eeprom_i2c_read(0x2D8, &left_lsc[216], 216);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_read_right_lsc(char *right_lsc)
+{
+	qs_s5k4e1_eeprom_i2c_read(0x3B0, &right_lsc[0], 216);
+	qs_s5k4e1_eeprom_i2c_read(0x488, &right_lsc[216], 216);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_read_lsc(char *lsc)
+{
+	qs_s5k4e1_read_left_lsc(&lsc[0]);
+	qs_s5k4e1_read_right_lsc(&lsc[432]);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_bridge_reset(void){
+	unsigned short RegData = 0, GPIOInState = 0;
+	int32_t rc = 0;
+	rc = bridge_i2c_write_w(0x50, 0x00);
+	if (rc < 0)
+		goto bridge_fail;
+	rc = bridge_i2c_write_w(0x53, 0x00);
+	if (rc < 0)
+		goto bridge_fail;
+	msleep(30);
+	rc = bridge_i2c_write_w(0x53, 0x01);
+	if (rc < 0)
+		goto bridge_fail;
+	msleep(30);
+	rc = bridge_i2c_write_w(0x0E, 0xFFFF);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_read(0x54, &RegData, 2);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_write_w(0x54, (RegData | 0x1));
+	if (rc < 0)
+		goto err;
+	msleep(30);
+	rc = bridge_i2c_read(0x54, &RegData, 2);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_write_w(0x54, (RegData | 0x4));
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_read(0x55, &GPIOInState, 2);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_write_w(0x55, (GPIOInState | 0x1));
+	if (rc < 0)
+		goto err;
+	msleep(30);
+	rc = bridge_i2c_read(0x55, &GPIOInState, 2);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_write_w(0x55, (GPIOInState | 0x4));
+	if (rc < 0)
+		goto err;
+	msleep(30);
+	rc = bridge_i2c_read(0x55, &GPIOInState, 2);
+	if (rc < 0)
+		goto err;
+	GPIOInState = ((GPIOInState >> 4) & 0x1);
+
+	rc = bridge_i2c_read(0x08, &GPIOInState, 2);
+	if (rc < 0)
+		goto err;
+	rc = bridge_i2c_write_w(0x08, GPIOInState | 0x4000);
+	if (rc < 0)
+		goto err;
+	return rc;
+
+err:
+	bridge_i2c_write_w(0x53, 0x00);
+	msleep(30);
+
+bridge_fail:
+	return rc;
+
+}
+
+static void qs_s5k4e1_bridge_config(int mode, int rt)
+{
+	unsigned short RegData = 0;
+	if (mode == MODE_3D) {
+		bridge_i2c_read(0x54, &RegData, 2);
+		bridge_i2c_write_w(0x54, (RegData | 0x2));
+		bridge_i2c_write_w(0x54, (RegData | 0xa));
+		bridge_i2c_read(0x55, &RegData, 2);
+		bridge_i2c_write_w(0x55, (RegData | 0x2));
+		bridge_i2c_write_w(0x55, (RegData | 0xa));
+		bridge_i2c_write_w(0x14, 0x0C);
+		msleep(20);
+		bridge_i2c_write_w(0x16, 0x00);
+		bridge_i2c_write_w(0x51, 0x3);
+		bridge_i2c_write_w(0x52, 0x1);
+		bridge_i2c_write_w(0x06, 0x03);
+		bridge_i2c_write_w(0x04, 0x2018);
+		bridge_i2c_write_w(0x50, 0x00);
+	} else if (mode == MODE_2D_RIGHT) {
+		bridge_i2c_read(0x54, &RegData, 2);
+		RegData |= 0x2;
+		bridge_i2c_write_w(0x54, RegData);
+		bridge_i2c_write_w(0x54, (RegData & ~(0x8)));
+		bridge_i2c_read(0x55, &RegData, 2);
+		RegData |= 0x2;
+		bridge_i2c_write_w(0x55, RegData);
+		bridge_i2c_write_w(0x55, (RegData & ~(0x8)));
+		bridge_i2c_write_w(0x14, 0x04);
+		msleep(20);
+		bridge_i2c_write_w(0x51, 0x3);
+		bridge_i2c_write_w(0x06, 0x01);
+		bridge_i2c_write_w(0x04, 0x2018);
+		bridge_i2c_write_w(0x50, 0x01);
+	} else if (mode == MODE_2D_LEFT) {
+		bridge_i2c_read(0x54, &RegData, 2);
+		RegData |= 0x8;
+		bridge_i2c_write_w(0x54, RegData);
+		bridge_i2c_write_w(0x54, (RegData & ~(0x2)));
+		bridge_i2c_read(0x55, &RegData, 2);
+		RegData |= 0x8;
+		bridge_i2c_write_w(0x55, RegData);
+		bridge_i2c_write_w(0x55, (RegData & ~(0x2)));
+		bridge_i2c_write_w(0x14, 0x08);
+		msleep(20);
+		bridge_i2c_write_w(0x51, 0x3);
+		bridge_i2c_write_w(0x06, 0x02);
+		bridge_i2c_write_w(0x04, 0x2018);
+		bridge_i2c_write_w(0x50, 0x02);
+	}
+}
+
+static void qs_s5k4e1_group_hold_on(void)
+{
+	qs_s5k4e1_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD);
+}
+
+static void qs_s5k4e1_group_hold_off(void)
+{
+	qs_s5k4e1_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+						GROUPED_PARAMETER_HOLD_OFF);
+}
+
+static void qs_s5k4e1_start_stream(void)
+{
+	qs_s5k4e1_i2c_write_b_sensor(0x0100, 0x01);
+}
+
+static void qs_s5k4e1_stop_stream(void)
+{
+	qs_s5k4e1_i2c_write_b_sensor(0x0100, 0x00);
+}
+
+static void qs_s5k4e1_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider, d1, d2;
+
+	d1 = prev_frame_length_lines * 0x00000400 / snap_frame_length_lines;
+	d2 = prev_line_length_pck * 0x00000400 / snap_line_length_pck;
+	divider = d1 * d2 / 0x400;
+
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+	/* 2 is the ratio of no.of snapshot channels
+	to number of preview channels */
+}
+
+static uint16_t qs_s5k4e1_get_prev_lines_pf(void)
+{
+
+	return prev_frame_length_lines;
+
+}
+
+static uint16_t qs_s5k4e1_get_prev_pixels_pl(void)
+{
+	return prev_line_length_pck;
+
+}
+
+static uint16_t qs_s5k4e1_get_pict_lines_pf(void)
+{
+	return snap_frame_length_lines;
+}
+
+static uint16_t qs_s5k4e1_get_pict_pixels_pl(void)
+{
+	return snap_line_length_pck;
+}
+
+
+static uint32_t qs_s5k4e1_get_pict_max_exp_lc(void)
+{
+	return snap_frame_length_lines  * 24;
+}
+
+static int32_t qs_s5k4e1_set_fps(struct fps_cfg   *fps)
+{
+	uint16_t total_line_length_pclk;
+	int32_t rc = 0;
+	qs_s5k4e1_ctrl->fps_divider = fps->fps_div;
+	qs_s5k4e1_ctrl->pict_fps_divider = fps->pict_fps_div;
+	if (qs_s5k4e1_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		total_line_length_pclk = (uint16_t)
+		((prev_line_length_pck) * qs_s5k4e1_ctrl->fps_divider/0x400);
+	} else {
+		total_line_length_pclk = (uint16_t)
+		((snap_line_length_pck) *
+			qs_s5k4e1_ctrl->pict_fps_divider/0x400);
+	}
+	qs_s5k4e1_group_hold_on();
+	rc = qs_s5k4e1_i2c_write_w_sensor(REG_LINE_LENGTH_PCK,
+							total_line_length_pclk);
+	qs_s5k4e1_group_hold_off();
+	return rc;
+}
+
+static int32_t qs_s5k4e1_write_exp_gain(struct sensor_3d_exp_cfg exp_cfg)
+{
+	uint16_t max_legal_gain = 0x0200;
+	uint16_t min_ll_pck = 0x0AB2;
+	uint32_t ll_pck, fl_lines;
+	uint16_t gain = exp_cfg.gain;
+	uint32_t line = exp_cfg.line;
+	uint32_t ll_ratio;
+	int32_t rc = 0;
+	if (gain > max_legal_gain) {
+		CDBG("Max legal gain Line:%d\n", __LINE__);
+		gain = max_legal_gain;
+	}
+	CDBG("qs_s5k4e1_write_exp_gain : gain = %d line = %d\n", gain, line);
+
+	if (qs_s5k4e1_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		qs_s5k4e1_ctrl->my_reg_gain = gain;
+		qs_s5k4e1_ctrl->my_reg_line_count = (uint16_t) line;
+		ll_ratio = (uint32_t)(qs_s5k4e1_ctrl->fps_divider);
+		fl_lines = prev_frame_length_lines;
+		ll_pck = prev_line_length_pck;
+	} else {
+		ll_ratio = (uint32_t)(qs_s5k4e1_ctrl->pict_fps_divider);
+		fl_lines = snap_frame_length_lines;
+		ll_pck = snap_line_length_pck;
+	}
+	if (((fl_lines * ll_ratio / 0x400) - QS_S5K4E1_OFFSET) < line) {
+		ll_ratio = ll_ratio * line / (fl_lines - QS_S5K4E1_OFFSET);
+		line = fl_lines - QS_S5K4E1_OFFSET;
+	}
+	ll_pck = ll_pck * ll_ratio / 0x400;
+	if (ll_pck < min_ll_pck)
+		ll_pck = min_ll_pck;
+	qs_s5k4e1_group_hold_on();
+	rc = qs_s5k4e1_i2c_write_w_sensor(REG_GLOBAL_GAIN, gain);
+	rc = qs_s5k4e1_i2c_write_w_sensor(REG_LINE_LENGTH_PCK, ll_pck);
+	rc = qs_s5k4e1_i2c_write_w_sensor(REG_COARSE_INTEGRATION_TIME, line);
+	if ((qs_s5k4e1_ctrl->cam_mode == MODE_3D) && (cali_data_status == 1)) {
+		bridge_i2c_write_w(0x06, 0x01);
+		rc = qs_s5k4e1_i2c_write_w_sensor(REG_GLOBAL_GAIN,
+			 exp_cfg.gain_adjust);
+		rc = qs_s5k4e1_i2c_write_w_sensor(REG_GR_GAIN, exp_cfg.gr_gain);
+		rc = qs_s5k4e1_i2c_write_w_sensor(REG_R_GAIN,
+				exp_cfg.r_gain);
+		rc = qs_s5k4e1_i2c_write_w_sensor(REG_B_GAIN,
+				exp_cfg.b_gain);
+		rc = qs_s5k4e1_i2c_write_w_sensor(REG_GB_GAIN,
+				exp_cfg.gb_gain);
+		bridge_i2c_write_w(0x06, 0x03);
+	}
+	qs_s5k4e1_group_hold_off();
+	return rc;
+}
+
+static int32_t qs_s5k4e1_set_pict_exp_gain(struct sensor_3d_exp_cfg exp_cfg)
+{
+	int32_t rc = 0;
+	rc = qs_s5k4e1_write_exp_gain(exp_cfg);
+	return rc;
+}
+
+static int32_t qs_s5k4e1_write_focus_value(uint16_t code_value)
+{
+	uint8_t code_val_msb, code_val_lsb;
+	if ((qs_s5k4e1_ctrl->cam_mode == MODE_2D_LEFT) ||
+		(qs_s5k4e1_ctrl->cam_mode == MODE_3D)) {
+		/* Left */
+		bridge_i2c_write_w(0x06, 0x02);
+		CDBG("%s: Left Lens Position: %d\n", __func__,
+			code_value);
+		code_val_msb = code_value >> 4;
+		code_val_lsb = (code_value & 0x000F) << 4;
+		code_val_lsb |= qs_s5k4e1_af_mode;
+		if (af_i2c_write_b_sensor(code_val_msb, code_val_lsb) < 0) {
+			CDBG("move_focus failed at line %d ...\n", __LINE__);
+			return -EBUSY;
+		}
+	}
+
+	if ((qs_s5k4e1_ctrl->cam_mode == MODE_2D_RIGHT) ||
+		(qs_s5k4e1_ctrl->cam_mode == MODE_3D)) {
+		/* Right */
+		bridge_i2c_write_w(0x06, 0x01);
+		code_value += qs_s5k4e1_af_right_adjust;
+		CDBG("%s: Right Lens Position: %d\n", __func__,
+			code_value);
+		code_val_msb = code_value >> 4;
+		code_val_lsb = (code_value & 0x000F) << 4;
+		code_val_lsb |= qs_s5k4e1_af_mode;
+		if (af_i2c_write_b_sensor(code_val_msb, code_val_lsb) < 0) {
+			CDBG("move_focus failed at line %d ...\n", __LINE__);
+			return -EBUSY;
+		}
+	}
+
+	if (qs_s5k4e1_ctrl->cam_mode == MODE_3D) {
+		/* 3D Mode */
+		bridge_i2c_write_w(0x06, 0x03);
+	}
+	usleep(qs_s5k4e1_sw_damping_time_wait*50);
+	return 0;
+}
+
+static int32_t qs_s5k4e1_move_focus(int direction,
+	int32_t num_steps)
+{
+	int16_t step_direction, actual_step, dest_lens_position,
+		dest_step_position;
+	CDBG("Inside %s\n", __func__);
+	if (direction == MOVE_NEAR)
+		step_direction = 1;
+	else
+		step_direction = -1;
+
+	actual_step = (int16_t) (step_direction * (int16_t) num_steps);
+	dest_step_position = (int16_t) (qs_s5k4e1_ctrl->curr_step_pos +
+		actual_step);
+
+	if (dest_step_position > QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (dest_step_position < 0)
+		dest_step_position = 0;
+
+	if (dest_step_position == qs_s5k4e1_ctrl->curr_step_pos) {
+		CDBG("%s cur and dest pos are same\n", __func__);
+		CDBG("%s cur_step_pos:%d\n", __func__,
+			qs_s5k4e1_ctrl->curr_step_pos);
+		return 0;
+	}
+
+	dest_lens_position = qs_s5k4e1_step_position_table[dest_step_position];
+	CDBG("%s: Step Position: %d\n", __func__, dest_step_position);
+
+	if (step_direction < 0) {
+		if (num_steps >= 20) {
+			/* sweeping towards all the way in infinity direction */
+			qs_s5k4e1_af_mode = 2;
+			qs_s5k4e1_sw_damping_time_wait = 8;
+		} else if (num_steps <= 4) {
+			/* reverse search during macro mode */
+			qs_s5k4e1_af_mode = 4;
+			qs_s5k4e1_sw_damping_time_wait = 16;
+		} else {
+			qs_s5k4e1_af_mode = 3;
+			qs_s5k4e1_sw_damping_time_wait = 12;
+		}
+	} else {
+		/* coarse search towards macro direction */
+		qs_s5k4e1_af_mode = 4;
+		qs_s5k4e1_sw_damping_time_wait = 16;
+	}
+
+	if (qs_s5k4e1_ctrl->curr_lens_pos != dest_lens_position) {
+		if (qs_s5k4e1_write_focus_value(dest_lens_position) < 0) {
+			CDBG("move_focus failed at line %d ...\n", __LINE__);
+			return -EBUSY;
+		}
+	}
+
+	qs_s5k4e1_ctrl->curr_step_pos = dest_step_position;
+	qs_s5k4e1_ctrl->curr_lens_pos = dest_lens_position;
+	return 0;
+}
+
+static int32_t qs_s5k4e1_set_default_focus(uint8_t af_step)
+{
+	int32_t rc = 0;
+	if (qs_s5k4e1_ctrl->curr_step_pos) {
+		rc = qs_s5k4e1_move_focus(MOVE_FAR,
+			qs_s5k4e1_ctrl->curr_step_pos);
+		if (rc < 0)
+			return rc;
+	} else {
+		rc = qs_s5k4e1_write_focus_value(
+			qs_s5k4e1_step_position_table[0]);
+		if (rc < 0)
+			return rc;
+		qs_s5k4e1_ctrl->curr_lens_pos =
+			qs_s5k4e1_step_position_table[0];
+	}
+	CDBG("%s\n", __func__);
+	return 0;
+}
+
+static void qs_s5k4e1_init_focus(void)
+{
+	uint8_t i;
+	int32_t rc = 0;
+	int16_t af_far_data = 0;
+	qs_s5k4e1_af_initial_code = 190;
+	/* Read the calibration data from left and right sensors if available */
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x110, &af_far_data, 2);
+	if (rc == 0) {
+		CDBG("%s: Left Far data - %d\n", __func__, af_far_data);
+		qs_s5k4e1_af_initial_code = af_far_data;
+	}
+
+	rc = qs_s5k4e1_eeprom_i2c_read_b(0x112, &af_far_data, 2);
+	if (rc == 0) {
+		CDBG("%s: Right Far data - %d\n", __func__, af_far_data);
+		qs_s5k4e1_af_right_adjust = af_far_data -
+			qs_s5k4e1_af_initial_code;
+	}
+
+	qs_s5k4e1_step_position_table[0] = qs_s5k4e1_af_initial_code;
+	for (i = 1; i <= QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+		if (i <= qs_s5k4e1_nl_region_boundary1) {
+			qs_s5k4e1_step_position_table[i] =
+				qs_s5k4e1_step_position_table[i-1]
+				+ qs_s5k4e1_nl_region_code_per_step1;
+		} else {
+			qs_s5k4e1_step_position_table[i] =
+				qs_s5k4e1_step_position_table[i-1]
+				+ qs_s5k4e1_l_region_code_per_step;
+		}
+
+		if (qs_s5k4e1_step_position_table[i] > 1023)
+			qs_s5k4e1_step_position_table[i] = 1023;
+	}
+	qs_s5k4e1_ctrl->curr_step_pos = 0;
+}
+
+static int32_t qs_s5k4e1_test(enum qs_s5k4e1_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
+		1: Bypass Signal Processing
+		REG_0x30D8[5] is EBDMASK: 0:
+		Output Embedded data, 1: No output embedded data */
+		if (qs_s5k4e1_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	struct msm_camera_csi_params qs_s5k4e1_csi_params;
+
+	qs_s5k4e1_stop_stream();
+	msleep(80);
+	bridge_i2c_write_w(0x53, 0x00);
+	msleep(80);
+	if (update_type == REG_INIT) {
+		CSI_CONFIG = 0;
+		LENS_SHADE_CONFIG = 0;
+		default_lens_shade = 1;
+		bridge_i2c_write_w(0x53, 0x01);
+		msleep(30);
+		qs_s5k4e1_bridge_config(qs_s5k4e1_ctrl->cam_mode, rt);
+		msleep(30);
+		qs_s5k4e1_i2c_write_b_table(qs_s5k4e1_regs.rec_settings,
+				qs_s5k4e1_regs.rec_size);
+		msleep(30);
+	} else if (update_type == UPDATE_PERIODIC) {
+		qs_s5k4e1_write_lsc(lens_eeprom_data, rt);
+		msleep(100);
+		if (!CSI_CONFIG) {
+			if (qs_s5k4e1_ctrl->cam_mode == MODE_3D) {
+				qs_s5k4e1_csi_params.lane_cnt = 4;
+				qs_s5k4e1_csi_params.data_format = CSI_8BIT;
+			} else {
+				qs_s5k4e1_csi_params.lane_cnt = 2;
+				qs_s5k4e1_csi_params.data_format = CSI_10BIT;
+			}
+			qs_s5k4e1_csi_params.lane_assign = 0xe4;
+			qs_s5k4e1_csi_params.dpcm_scheme = 0;
+			qs_s5k4e1_csi_params.settle_cnt = 24;
+			rc = msm_camio_csi_config(&qs_s5k4e1_csi_params);
+			msleep(10);
+			cam_debug_init();
+			CSI_CONFIG = 1;
+		}
+		bridge_i2c_write_w(0x53, 0x01);
+		msleep(50);
+		qs_s5k4e1_i2c_write_b_table(qs_s5k4e1_regs.conf_array[rt].conf,
+			qs_s5k4e1_regs.conf_array[rt].size);
+		msleep(50);
+		qs_s5k4e1_start_stream();
+		msleep(80);
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_video_config(int mode)
+{
+
+	int32_t rc = 0;
+	/* change sensor resolution if needed */
+	if (qs_s5k4e1_sensor_setting(UPDATE_PERIODIC,
+			qs_s5k4e1_ctrl->prev_res) < 0)
+		return rc;
+	if (qs_s5k4e1_ctrl->set_test) {
+		if (qs_s5k4e1_test(qs_s5k4e1_ctrl->set_test) < 0)
+			return  rc;
+	}
+
+	qs_s5k4e1_ctrl->curr_res = qs_s5k4e1_ctrl->prev_res;
+	qs_s5k4e1_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t qs_s5k4e1_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/*change sensor resolution if needed */
+	if (qs_s5k4e1_ctrl->curr_res != qs_s5k4e1_ctrl->pict_res) {
+		if (qs_s5k4e1_sensor_setting(UPDATE_PERIODIC,
+				qs_s5k4e1_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	qs_s5k4e1_ctrl->curr_res = qs_s5k4e1_ctrl->pict_res;
+	qs_s5k4e1_ctrl->sensormode = mode;
+	return rc;
+} /*end of qs_s5k4e1_snapshot_config*/
+
+static int32_t qs_s5k4e1_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	/* change sensor resolution if needed */
+	if (qs_s5k4e1_ctrl->curr_res != qs_s5k4e1_ctrl->pict_res) {
+		if (qs_s5k4e1_sensor_setting(UPDATE_PERIODIC,
+				qs_s5k4e1_ctrl->pict_res) < 0)
+			return rc;
+	}
+
+	qs_s5k4e1_ctrl->curr_res = qs_s5k4e1_ctrl->pict_res;
+	qs_s5k4e1_ctrl->sensormode = mode;
+	return rc;
+} /*end of qs_s5k4e1_raw_snapshot_config*/
+
+static int32_t qs_s5k4e1_mode_init(int mode, struct sensor_init_cfg init_info)
+{
+	int32_t rc = 0;
+	if (mode != qs_s5k4e1_ctrl->cam_mode) {
+		qs_s5k4e1_ctrl->prev_res = init_info.prev_res;
+		qs_s5k4e1_ctrl->pict_res = init_info.pict_res;
+		qs_s5k4e1_ctrl->cam_mode = mode;
+
+		prev_frame_length_lines =
+		((qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->prev_res]\
+			.conf[QS_S5K4E1_FRAME_LENGTH_LINES_H].wdata << 8)
+			| qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->prev_res]\
+			.conf[QS_S5K4E1_FRAME_LENGTH_LINES_L].wdata);
+		prev_line_length_pck =
+		(qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->prev_res]\
+			.conf[QS_S5K4E1_LINE_LENGTH_PCK_H].wdata << 8)
+			| qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->prev_res]\
+			.conf[QS_S5K4E1_LINE_LENGTH_PCK_L].wdata;
+		snap_frame_length_lines =
+		(qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->pict_res]\
+			.conf[QS_S5K4E1_FRAME_LENGTH_LINES_H].wdata << 8)
+			| qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->pict_res]\
+			.conf[QS_S5K4E1_FRAME_LENGTH_LINES_L].wdata;
+		snap_line_length_pck =
+		(qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->pict_res]\
+			.conf[QS_S5K4E1_LINE_LENGTH_PCK_H].wdata << 8)
+			| qs_s5k4e1_regs.conf_array[qs_s5k4e1_ctrl->pict_res]\
+			.conf[QS_S5K4E1_LINE_LENGTH_PCK_L].wdata;
+
+	rc = qs_s5k4e1_sensor_setting(REG_INIT,
+		qs_s5k4e1_ctrl->prev_res);
+	}
+	return rc;
+}
+static int32_t qs_s5k4e1_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		qs_s5k4e1_ctrl->prev_res = res;
+		rc = qs_s5k4e1_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		qs_s5k4e1_ctrl->pict_res = res;
+		rc = qs_s5k4e1_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		qs_s5k4e1_ctrl->pict_res = res;
+		rc = qs_s5k4e1_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t qs_s5k4e1_power_down(void)
+{
+	qs_s5k4e1_stop_stream();
+	msleep(30);
+	qs_s5k4e1_af_mode = 2;
+	qs_s5k4e1_af_right_adjust = 0;
+	qs_s5k4e1_write_focus_value(0);
+	msleep(100);
+	/* Set AF actutator to PowerDown */
+	af_i2c_write_b_sensor(0x80, 00);
+	return 0;
+}
+
+static int qs_s5k4e1_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	CDBG("probe done\n");
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int
+	qs_s5k4e1_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	uint16_t chipid = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "qs_s5k4e1");
+	CDBG(" qs_s5k4e1_probe_init_sensor\n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(50);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		msleep(13);
+	} else {
+		goto init_probe_done;
+	}
+	msleep(70);
+	rc = qs_s5k4e1_bridge_reset();
+	if (rc < 0)
+		goto init_probe_fail;
+	qs_s5k4e1_bridge_config(MODE_3D, RES_PREVIEW);
+	msleep(30);
+
+	CDBG(" qs_s5k4e1_probe_init_sensor is called\n");
+	rc = qs_s5k4e1_i2c_read(0x0000, &chipid, 2);
+	CDBG("ID: %d\n", chipid);
+	/* 4. Compare sensor ID to QS_S5K4E1 ID: */
+	if (chipid != 0x4e10) {
+		rc = -ENODEV;
+		CDBG("qs_s5k4e1_probe_init_sensor fail chip id mismatch\n");
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" qs_s5k4e1_probe_init_sensor fails\n");
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	qs_s5k4e1_probe_init_done(data);
+init_probe_done:
+	CDBG(" qs_s5k4e1_probe_init_sensor finishes\n");
+	return rc;
+}
+/* camsensor_qs_s5k4e1_reset */
+
+int qs_s5k4e1_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling qs_s5k4e1_sensor_open_init\n");
+
+	qs_s5k4e1_ctrl = kzalloc(sizeof(struct qs_s5k4e1_ctrl_t), GFP_KERNEL);
+	if (!qs_s5k4e1_ctrl) {
+		CDBG("qs_s5k4e1_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	qs_s5k4e1_ctrl->fps_divider = 1 * 0x00000400;
+	qs_s5k4e1_ctrl->pict_fps_divider = 1 * 0x00000400;
+	qs_s5k4e1_ctrl->set_test = TEST_OFF;
+	qs_s5k4e1_ctrl->cam_mode = MODE_INVALID;
+
+	if (data)
+		qs_s5k4e1_ctrl->sensordata = data;
+	if (rc < 0) {
+		CDBG("Calling qs_s5k4e1_sensor_open_init fail1\n");
+		return rc;
+	}
+	CDBG("%s: %d\n", __func__, __LINE__);
+	/* enable mclk first */
+	msm_camio_clk_rate_set(QS_S5K4E1_MASTER_CLK_RATE);
+	rc = qs_s5k4e1_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+/*Default mode is 3D*/
+	memcpy(lens_eeprom_data, data->eeprom_data, 864);
+	qs_s5k4e1_ctrl->fps = 30*Q8;
+	qs_s5k4e1_init_focus();
+	if (rc < 0) {
+		gpio_set_value_cansleep(data->sensor_reset, 0);
+		goto init_fail;
+	} else
+		goto init_done;
+init_fail:
+	CDBG("init_fail\n");
+	qs_s5k4e1_probe_init_done(data);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+} /*endof qs_s5k4e1_sensor_open_init*/
+
+static int qs_s5k4e1_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&qs_s5k4e1_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id qs_s5k4e1_i2c_id[] = {
+	{"qs_s5k4e1", 0},
+	{ }
+};
+
+static int qs_s5k4e1_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("qs_s5k4e1_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	qs_s5k4e1_sensorw = kzalloc(sizeof(struct qs_s5k4e1_work_t),
+		 GFP_KERNEL);
+	if (!qs_s5k4e1_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, qs_s5k4e1_sensorw);
+	qs_s5k4e1_init_client(client);
+	qs_s5k4e1_client = client;
+
+	msleep(50);
+
+	CDBG("qs_s5k4e1_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("qs_s5k4e1_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int qs_s5k4e1_send_wb_info(struct wb_info_cfg *wb)
+{
+	return 0;
+
+} /*end of qs_s5k4e1_snapshot_config*/
+
+static int __exit qs_s5k4e1_remove(struct i2c_client *client)
+{
+	struct qs_s5k4e1_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	qs_s5k4e1_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver qs_s5k4e1_i2c_driver = {
+	.id_table = qs_s5k4e1_i2c_id,
+	.probe  = qs_s5k4e1_i2c_probe,
+	.remove = __exit_p(qs_s5k4e1_i2c_remove),
+	.driver = {
+		.name = "qs_s5k4e1",
+	},
+};
+
+int qs_s5k4e1_3D_sensor_config(void __user *argp)
+{
+	struct sensor_large_data cdata;
+	long rc;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_large_data)))
+		return -EFAULT;
+	mutex_lock(&qs_s5k4e1_mut);
+	rc = qs_s5k4e1_get_calibration_data
+		(&cdata.data.sensor_3d_cali_data);
+	if (rc < 0)
+		goto fail;
+	if (copy_to_user((void *)argp,
+		&cdata,
+		sizeof(struct sensor_large_data)))
+		rc = -EFAULT;
+fail:
+	mutex_unlock(&qs_s5k4e1_mut);
+	return rc;
+}
+
+int qs_s5k4e1_2D_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&qs_s5k4e1_mut);
+	CDBG("qs_s5k4e1_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+		switch (cdata.cfgtype) {
+		case CFG_GET_PICT_FPS:
+			qs_s5k4e1_get_pict_fps(
+				cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_L_PF:
+			cdata.cfg.prevl_pf =
+			qs_s5k4e1_get_prev_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_P_PL:
+			cdata.cfg.prevp_pl =
+				qs_s5k4e1_get_prev_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_L_PF:
+			cdata.cfg.pictl_pf =
+				qs_s5k4e1_get_pict_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_P_PL:
+			cdata.cfg.pictp_pl =
+				qs_s5k4e1_get_pict_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_MAX_EXP_LC:
+			cdata.cfg.pict_max_exp_lc =
+				qs_s5k4e1_get_pict_max_exp_lc();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_FPS:
+		case CFG_SET_PICT_FPS:
+			rc = qs_s5k4e1_set_fps(&(cdata.cfg.fps));
+			break;
+
+		case CFG_SET_EXP_GAIN:
+			rc =
+				qs_s5k4e1_write_exp_gain(
+					cdata.cfg.sensor_3d_exp);
+			break;
+
+		case CFG_SET_PICT_EXP_GAIN:
+			rc =
+				qs_s5k4e1_set_pict_exp_gain(
+				cdata.cfg.sensor_3d_exp);
+			break;
+
+		case CFG_SET_MODE:
+			rc = qs_s5k4e1_set_sensor_mode(cdata.mode,
+					cdata.rs);
+			break;
+
+		case CFG_PWR_DOWN:
+			rc = qs_s5k4e1_power_down();
+			break;
+
+		case CFG_MOVE_FOCUS:
+			rc =
+				qs_s5k4e1_move_focus(
+				cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_DEFAULT_FOCUS:
+			rc =
+				qs_s5k4e1_set_default_focus(
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_GET_AF_MAX_STEPS:
+			cdata.max_steps = QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR;
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_EFFECT:
+			rc = qs_s5k4e1_set_default_focus(
+				cdata.cfg.effect);
+			break;
+
+
+		case CFG_SEND_WB_INFO:
+			rc = qs_s5k4e1_send_wb_info(
+				&(cdata.cfg.wb_info));
+			break;
+
+		case CFG_SENSOR_INIT:
+			rc = qs_s5k4e1_mode_init(cdata.mode,
+					cdata.cfg.init_info);
+			break;
+
+		default:
+			rc = -EFAULT;
+			break;
+		}
+
+	mutex_unlock(&qs_s5k4e1_mut);
+
+	return rc;
+}
+
+int qs_s5k4e1_sensor_config(void __user *argp)
+{
+	int cfgtype;
+	long rc;
+	if (copy_from_user(&cfgtype,
+		(void *)argp,
+		sizeof(int)))
+		return -EFAULT;
+	if (cfgtype != CFG_GET_3D_CALI_DATA)
+		rc = qs_s5k4e1_2D_sensor_config(argp);
+	else
+		rc = qs_s5k4e1_3D_sensor_config(argp);
+	return rc;
+}
+
+static int qs_s5k4e1_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&qs_s5k4e1_mut);
+	qs_s5k4e1_power_down();
+	bridge_i2c_write_w(0x53, 0x00);
+	msleep(20);
+	gpio_set_value_cansleep(qs_s5k4e1_ctrl->sensordata->sensor_reset, 0);
+	msleep(5);
+	gpio_free(qs_s5k4e1_ctrl->sensordata->sensor_reset);
+	kfree(qs_s5k4e1_ctrl);
+	qs_s5k4e1_ctrl = NULL;
+	CDBG("qs_s5k4e1_release completed\n");
+	mutex_unlock(&qs_s5k4e1_mut);
+
+	return rc;
+}
+
+static int qs_s5k4e1_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&qs_s5k4e1_i2c_driver);
+	if (rc < 0 || qs_s5k4e1_client == NULL) {
+		rc = -ENOTSUPP;
+		CDBG("I2C add driver failed");
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(QS_S5K4E1_MASTER_CLK_RATE);
+	rc = qs_s5k4e1_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	qs_s5k4e1_read_lsc(info->eeprom_data); /*Default mode is 3D*/
+	s->s_init = qs_s5k4e1_sensor_open_init;
+	s->s_release = qs_s5k4e1_sensor_release;
+	s->s_config  = qs_s5k4e1_sensor_config;
+	s->s_mount_angle = 0;
+	s->s_camera_type = BACK_CAMERA_3D;
+	s->s_video_packing = SIDE_BY_SIDE_HALF;
+	s->s_snap_packing = SIDE_BY_SIDE_FULL;
+	bridge_i2c_write_w(0x53, 0x00);
+	msleep(20);
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	qs_s5k4e1_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("qs_s5k4e1_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static bool streaming = 1;
+
+static int qs_s5k4e1_focus_test(void *data, u64 *val)
+{
+	int i = 0;
+	qs_s5k4e1_set_default_focus(0);
+
+	for (i = 0; i < QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR; i++) {
+		qs_s5k4e1_move_focus(MOVE_NEAR, 1);
+		msleep(2000);
+	}
+	msleep(5000);
+	for ( ; i > 0; i--) {
+		qs_s5k4e1_move_focus(MOVE_FAR, 1);
+		msleep(2000);
+	}
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_focus, qs_s5k4e1_focus_test,
+			NULL, "%lld\n");
+
+static int qs_s5k4e1_step_test(void *data, u64 *val)
+{
+	int rc = 0;
+	struct sensor_large_data cdata;
+	rc = qs_s5k4e1_get_calibration_data
+		(&cdata.data.sensor_3d_cali_data);
+	if (rc < 0)
+		CDBG("%s: Calibration data read fail.\n", __func__);
+
+	return 0;
+}
+
+static int qs_s5k4e1_set_step(void *data, u64 val)
+{
+	qs_s5k4e1_l_region_code_per_step = val & 0xFF;
+	qs_s5k4e1_af_mode = (val >> 8) & 0xFF;
+	qs_s5k4e1_nl_region_code_per_step1 = (val >> 16) & 0xFFFF;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_step, qs_s5k4e1_step_test,
+			qs_s5k4e1_set_step, "%lld\n");
+
+static int cam_debug_stream_set(void *data, u64 val)
+{
+	int rc = 0;
+
+	if (val) {
+		qs_s5k4e1_start_stream();
+		streaming = 1;
+	} else {
+		qs_s5k4e1_stop_stream();
+		streaming = 0;
+	}
+
+	return rc;
+}
+
+static int cam_debug_stream_get(void *data, u64 *val)
+{
+	*val = streaming;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cam_stream, cam_debug_stream_get,
+			cam_debug_stream_set, "%llu\n");
+
+static uint16_t qs_s5k4e1_step_val = QS_S5K4E1_TOTAL_STEPS_NEAR_TO_FAR;
+static uint8_t qs_s5k4e1_step_dir = MOVE_NEAR;
+static int qs_s5k4e1_af_step_config(void *data, u64 val)
+{
+	qs_s5k4e1_step_val = val & 0xFFFF;
+	qs_s5k4e1_step_dir = (val >> 16) & 0x1;
+	CDBG("%s\n", __func__);
+	return 0;
+}
+
+static int qs_s5k4e1_af_step(void *data, u64 *val)
+{
+	int i = 0;
+	int dir = MOVE_NEAR;
+	CDBG("%s\n", __func__);
+	qs_s5k4e1_set_default_focus(0);
+	msleep(5000);
+	if (qs_s5k4e1_step_dir == 1)
+		dir = MOVE_FAR;
+
+	for (i = 0; i < qs_s5k4e1_step_val; i += 4) {
+		qs_s5k4e1_move_focus(dir, 4);
+		msleep(1000);
+	}
+	qs_s5k4e1_set_default_focus(0);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(af_step, qs_s5k4e1_af_step,
+			qs_s5k4e1_af_step_config, "%llu\n");
+
+static int cam_debug_init(void)
+{
+	struct dentry *cam_dir;
+	debugfs_base = debugfs_create_dir("sensor", NULL);
+	if (!debugfs_base)
+		return -ENOMEM;
+
+	cam_dir = debugfs_create_dir("qs_s5k4e1", debugfs_base);
+	if (!cam_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("focus", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_focus))
+		return -ENOMEM;
+	if (!debugfs_create_file("step", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_step))
+		return -ENOMEM;
+	if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &cam_stream))
+		return -ENOMEM;
+	if (!debugfs_create_file("af_step", S_IRUGO | S_IWUSR, cam_dir,
+							 NULL, &af_step))
+		return -ENOMEM;
+	return 0;
+}
+
+static int __qs_s5k4e1_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, qs_s5k4e1_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __qs_s5k4e1_probe,
+	.driver = {
+		.name = "msm_camera_qs_s5k4e1",
+	.owner = THIS_MODULE,
+	},
+};
+
+static int __init qs_s5k4e1_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(qs_s5k4e1_init);
+void qs_s5k4e1_exit(void)
+{
+	i2c_del_driver(&qs_s5k4e1_i2c_driver);
+}
+MODULE_DESCRIPTION("Samsung 5MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/qs_s5k4e1.h b/drivers/media/video/msm/qs_s5k4e1.h
new file mode 100644
index 0000000..f9c4c3f
--- /dev/null
+++ b/drivers/media/video/msm/qs_s5k4e1.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef QS_S5K4E1_H
+#define QS_S5K4E1_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct qs_s5k4e1_reg qs_s5k4e1_regs;
+
+#define LENS_SHADE_TABLE 16
+
+struct qs_s5k4e1_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+struct qs_s5k4e1_i2c_conf_array {
+       struct qs_s5k4e1_i2c_reg_conf *conf;
+       unsigned short size;
+};
+
+enum qs_s5k4e1_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum qs_s5k4e1_resolution_t {
+	QTR_2D_SIZE,
+	FULL_2D_SIZE,
+	QTR_3D_SIZE,
+	FULL_3D_SIZE,
+	INVALID_SIZE
+};
+enum qs_s5k4e1_setting {
+	RES_PREVIEW,
+	RES_CAPTURE,
+	RES_3D_PREVIEW,
+	RES_3D_CAPTURE
+};
+enum qs_s5k4e1_cam_mode_t {
+    MODE_2D_RIGHT,
+	MODE_2D_LEFT,
+	MODE_3D,
+	MODE_INVALID
+};
+enum qs_s5k4e1_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum qs_s5k4e1_reg_mode {
+	QS_S5K4E1_FRAME_LENGTH_LINES_H = 1,
+	QS_S5K4E1_FRAME_LENGTH_LINES_L,
+	QS_S5K4E1_LINE_LENGTH_PCK_H,
+	QS_S5K4E1_LINE_LENGTH_PCK_L,
+};
+
+struct qs_s5k4e1_reg {
+	const struct qs_s5k4e1_i2c_reg_conf *rec_settings;
+	const unsigned short rec_size;
+	const struct qs_s5k4e1_i2c_reg_conf *reg_prev;
+	const unsigned short reg_prev_size;
+	const struct qs_s5k4e1_i2c_reg_conf *reg_snap;
+	const unsigned short reg_snap_size;
+	const struct qs_s5k4e1_i2c_reg_conf (*reg_lens)[LENS_SHADE_TABLE];
+	const unsigned short reg_lens_size;
+	const struct qs_s5k4e1_i2c_reg_conf *reg_default_lens;
+	const unsigned short reg_default_lens_size;
+	const struct qs_s5k4e1_i2c_conf_array *conf_array;
+};
+#endif /* QS_S5K4E1_H */
diff --git a/drivers/media/video/msm/qs_s5k4e1_reg.c b/drivers/media/video/msm/qs_s5k4e1_reg.c
new file mode 100644
index 0000000..22876de
--- /dev/null
+++ b/drivers/media/video/msm/qs_s5k4e1_reg.c
@@ -0,0 +1,799 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "qs_s5k4e1.h"
+
+struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_prev_settings_3d[] = {
+	{0x0100, 0x00},
+	/*Frame Length*/
+	{0x0340, 0x04},
+	{0x0341, 0x90},
+	/*Line Length*/
+	{0x0342, 0x0A},
+	{0x0343, 0xB2},
+	{0x3030, 0x06},
+	{0x3017, 0xA4},
+	{0x301B, 0x88},
+	{0x30BC, 0x90},
+	{0x301C, 0x04},
+	{0x0202, 0x04},
+	{0x0203, 0x12},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+	{0x0306, 0x00},
+	{0x0307, 0x60},
+	{0x30F1, 0x70},
+/*MIPI Size Setting*/
+	{0x30A9, 0x02},
+	{0x300E, 0xE8},
+	{0x0387, 0x01},
+	{0x0344, 0x01},
+	{0x0345, 0x18},
+	{0x0348, 0x09},
+	{0x0349, 0x17},
+	{0x0346, 0x01},
+	{0x0347, 0x94},
+	{0x034A, 0x06},
+	{0x034B, 0x13},
+	{0x0380, 0x00},
+	{0x0381, 0x01},
+	{0x0382, 0x00},
+	{0x0383, 0x01},
+	{0x0384, 0x00},
+	{0x0385, 0x01},
+	{0x0386, 0x00},
+	{0x0387, 0x01},
+	{0x034C, 0x04},
+	{0x034D, 0x00},
+	{0x034E, 0x04},
+	{0x034F, 0x80},
+	{0x30BF, 0xAA},
+	{0x30C0, 0x40},
+	{0x30C8, 0x04},
+	{0x30C9, 0x00},
+};
+
+struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_prev_settings_2d[] = {
+	{0x0100, 0x00},
+	{0x0340, 0x03},
+	{0x0341, 0xe0},
+	{0x0342, 0x0A},
+	{0x0343, 0xB2},
+	{0x3030, 0x06},
+	{0x301B, 0x83},
+	{0x30BC, 0x98},
+	{0x301C, 0x04},
+	{0x0202, 0x01},
+	{0x0203, 0xFD},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+	{0x0306, 0x00},
+	{0x0307, 0x64},
+	{0x30F1, 0xa0},
+	{0x30A9, 0x02},
+	{0x300E, 0xEB},
+	{0x0387, 0x03},
+	{0x0344, 0x00},
+	{0x0345, 0x00},
+	{0x0348, 0x0A},
+	{0x0349, 0x2F},
+	{0x0346, 0x00},
+	{0x0347, 0x00},
+	{0x034A, 0x07},
+	{0x034B, 0xA7},
+	{0x0380, 0x00},
+	{0x0381, 0x01},
+	{0x0382, 0x00},
+	{0x0383, 0x01},
+	{0x0384, 0x00},
+	{0x0385, 0x01},
+	{0x0386, 0x00},
+	{0x0387, 0x03},
+	{0x034C, 0x05},
+	{0x034D, 0x10},
+	{0x034E, 0x03},
+	{0x034F, 0xd4},
+	{0x30BF, 0xAB},
+	{0x30C0, 0xc0},
+	{0x30C8, 0x06},
+	{0x30C9, 0x54},
+};
+
+struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_snap_settings_2d[] = {
+	{0x0100, 0x00},
+	{0x0340, 0x07},
+	{0x0341, 0xb4},
+	{0x0342, 0x0A},
+	{0x0343, 0xB2},
+	{0x3030, 0x06}, /*shut streaming off*/
+	{0x300E, 0xE8},
+	{0x301B, 0x75},
+	{0x301C, 0x04},
+	{0x30BC, 0x98},
+	{0x0202, 0x04},
+	{0x0203, 0x12},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+	{0x0306, 0x00},
+	{0x0307, 0x64},
+	{0x30F1, 0xa0},
+	{0x30A9, 0x03},/*Horizontal Binning Off*/
+	{0x300E, 0xE8},/*Vertical Binning Off*/
+	{0x0387, 0x01},/*y_odd_inc*/
+	{0x034C, 0x0A},/*x_output size*/
+	{0x034D, 0x30},
+	{0x034E, 0x07},/*y_output size*/
+	{0x034F, 0xA8},
+	{0x30BF, 0xAB},/*outif_enable[7], data_type[5:0](2Bh = bayer 10bit)*/
+	{0x30C0, 0x86},/*video_offset[7:4] 3260%12*/
+	{0x30C8, 0x0C},/*video_data_length 3260 = 2608 * 1.25*/
+	{0x30C9, 0xBC},
+
+};
+
+struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_snap_settings_3d[] = {
+	{0x0100, 0x00},
+
+/* Frame Length*/
+	{0x0340, 0x09},
+	{0x0341, 0x20},
+/* Line Length*/
+	{0x0342, 0x0A},
+	{0x0343, 0xB2},
+	{0x3030, 0x06},/*shut streaming off*/
+/*Analog Setting*/
+	{0x3017, 0xA4},
+	{0x301B, 0x88},
+	{0x30BC, 0x90},
+	{0x301C, 0x04},
+/*Integration setting ... */
+	{0x0202, 0x04},
+	{0x0203, 0x12},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+/*PLL setting ...*/
+	{0x0306, 0x00},
+	{0x0307, 0x60},
+	{0x30F1, 0x70},
+/*MIPI Size Setting*/
+	{0x30A9, 0x01},
+	{0x300E, 0xE8},
+	{0x0387, 0x01},
+	{0x0344, 0x01},/*x_addr_start*/
+	{0x0345, 0x14},
+	{0x0348, 0x09},/*x_addr_end*/
+	{0x0349, 0x17},
+	{0x0346, 0x01},/*y_addr_start*/
+	{0x0347, 0x94},
+	{0x034A, 0x06},/*y_addr_end*/
+	{0x034B, 0x13},
+	{0x0380, 0x00},/*x_even_inc 1*/
+	{0x0381, 0x01},
+	{0x0382, 0x00},/*x_odd_inc 1*/
+	{0x0383, 0x01},
+	{0x0384, 0x00},/*y_even_inc 1*/
+	{0x0385, 0x01},
+	{0x0386, 0x00},/*y_odd_inc 1*/
+	{0x0387, 0x01},
+	{0x034C, 0x08},/*x_output size*/
+	{0x034D, 0x00},
+	{0x034E, 0x04},/*y_output size*/
+	{0x034F, 0x80},
+	{0x30BF, 0xAA},/*outif_enable[7], data_type[5:0](2Bh = bayer 8bit)*/
+	{0x30C0, 0x80},/*video_offset[7:4]*/
+	{0x30C8, 0x08},/*video_data_length*/
+	{0x30C9, 0x00},
+
+};
+
+struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_recommend_settings[] = {
+	{0x0100, 0x00},
+
+	{0x3030, 0x06},/*shut streaming*/
+/*Analog Setting*/
+	{0x3000, 0x05},
+	{0x3001, 0x03},
+	{0x3002, 0x08},
+	{0x3003, 0x09},
+	{0x3004, 0x2E},
+	{0x3005, 0x06},
+	{0x3006, 0x34},
+	{0x3007, 0x00},
+	{0x3008, 0x3C},
+	{0x3009, 0x3C},
+	{0x300A, 0x28},
+	{0x300B, 0x04},
+	{0x300C, 0x0A},
+	{0x300D, 0x02},
+	{0x300F, 0x82},
+	{0x3010, 0x00},
+	{0x3011, 0x4C},
+	{0x3012, 0x30},
+	{0x3013, 0xC0},
+	{0x3014, 0x00},
+	{0x3015, 0x00},
+	{0x3016, 0x2C},
+	{0x3017, 0x94},
+	{0x3018, 0x78},
+	{0x301D, 0xD4},
+	{0x3021, 0x02},
+	{0x3022, 0x24},
+	{0x3024, 0x40},
+	{0x3027, 0x08},
+	{0x3029, 0xC6},
+	{0x302B, 0x01},
+	{0x30D8, 0x3F},
+/* ADLC setting ...*/
+	{0x3070, 0x5F},
+	{0x3071, 0x00},
+	{0x3080, 0x04},
+	{0x3081, 0x38},
+
+/*MIPI setting*/
+	{0x30BD, 0x00},/*SEL_CCP[0]*/
+	{0x3084, 0x15},/*SYNC Mode*/
+	{0x30BE, 0x1A},/*M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0]*/
+	{0x30C1, 0x01},/*pack video enable [0]*/
+	{0x30EE, 0x02},/*DPHY enable [1]*/
+	{0x3111, 0x86},/*Embedded data off [5]*/
+/*For MIPI T8 T9*/
+	{0x30E3, 0x38},
+	{0x30E4, 0x40},
+	{0x3113, 0x70},
+	{0x3114, 0x80},
+	{0x3115, 0x7B},
+	{0x3116, 0xC0},
+	{0x30EE, 0x12},
+
+/*PLL setting ...*/
+	{0x0305, 0x06},
+	{0x30B5, 0x01},
+	{0x30E2, 0x02},/*num lanes[1:0] = 1*/
+
+};
+static struct qs_s5k4e1_i2c_reg_conf qs_s5k4e1_default_lenshading_settings[] = {
+
+	{0x3200, 0x00},
+	{0x3201, 0x9a},
+	{0x3202, 0x56},
+	{0x3203, 0xf },
+	{0x3204, 0xd8},
+	{0x3205, 0x94},
+	{0x3206, 0x0 },
+	{0x3207, 0x10},
+	{0x3208, 0x71},
+	{0x3209, 0x0 },
+	{0x320a, 0x9 },
+	{0x320b, 0xc1},
+	{0x320c, 0xf },
+	{0x320d, 0xf1},
+	{0x320e, 0x3d},
+	{0x320f, 0x0 },
+	{0x3210, 0xa },
+	{0x3211, 0x93},
+	{0x3212, 0xf },
+	{0x3213, 0xc9},
+	{0x3214, 0xa1},
+	{0x3215, 0x0 },
+	{0x3216, 0x10},
+	{0x3217, 0x89},
+	{0x3218, 0xf },
+	{0x3219, 0xfb},
+	{0x321a, 0xf3},
+	{0x321b, 0xf },
+	{0x321c, 0xf8},
+	{0x321d, 0xfc},
+	{0x321e, 0x0 },
+	{0x321f, 0x4 },
+	{0x3220, 0xe3},
+	{0x3221, 0xf },
+	{0x3222, 0xfe},
+	{0x3223, 0x94},
+	{0x3224, 0x0 },
+	{0x3225, 0x24},
+	{0x3226, 0x59},
+	{0x3227, 0xf },
+	{0x3228, 0xe9},
+	{0x3229, 0x68},
+	{0x322a, 0xf },
+	{0x322b, 0xfa},
+	{0x322c, 0x7f},
+	{0x322d, 0x0 },
+	{0x322e, 0x13},
+	{0x322f, 0xe1},
+	{0x3230, 0x0 },
+	{0x3231, 0x3 },
+	{0x3232, 0xbc},
+	{0x3233, 0xf },
+	{0x3234, 0xf0},
+	{0x3235, 0xa1},
+	{0x3236, 0xf },
+	{0x3237, 0xf4},
+	{0x3238, 0xc9},
+	{0x3239, 0x0 },
+	{0x323a, 0x11},
+	{0x323b, 0x4b},
+	{0x323c, 0x0 },
+	{0x323d, 0x12},
+	{0x323e, 0xc5},
+	{0x323f, 0xf },
+	{0x3240, 0xe3},
+	{0x3241, 0xb },
+	{0x3242, 0xf },
+	{0x3243, 0xf8},
+	{0x3244, 0x4f},
+	{0x3245, 0x0 },
+	{0x3246, 0x13},
+	{0x3247, 0xac},
+	{0x3248, 0x0 },
+	{0x3249, 0x0 },
+	{0x324a, 0x7c},
+	{0x324b, 0xf },
+	{0x324c, 0xfe},
+	{0x324d, 0xdd},
+	{0x324e, 0xf },
+	{0x324f, 0xf2},
+	{0x3250, 0x96},
+	{0x3251, 0x0 },
+	{0x3252, 0x8 },
+	{0x3253, 0xef},
+	{0x3254, 0x0 },
+	{0x3255, 0x6 },
+	{0x3256, 0xa4},
+	{0x3257, 0x0 },
+	{0x3258, 0x2 },
+	{0x3259, 0x4b},
+	{0x325a, 0x0 },
+	{0x325b, 0x6 },
+	{0x325c, 0x85},
+	{0x325d, 0xf },
+	{0x325e, 0xf8},
+	{0x325f, 0x6a},
+	{0x3260, 0xf },
+	{0x3261, 0xfd},
+	{0x3262, 0x70},
+	{0x3263, 0x0 },
+	{0x3264, 0xd },
+	{0x3265, 0xa9},
+	{0x3266, 0xf },
+	{0x3267, 0xfd},
+	{0x3268, 0xf8},
+	{0x3269, 0xf },
+	{0x326a, 0xec},
+	{0x326b, 0xfc},
+	{0x326c, 0x0 },
+	{0x326d, 0xa7},
+	{0x326e, 0x5 },
+	{0x326f, 0xf },
+	{0x3270, 0xd6},
+	{0x3271, 0x19},
+	{0x3272, 0x0 },
+	{0x3273, 0xa },
+	{0x3274, 0xe8},
+	{0x3275, 0x0 },
+	{0x3276, 0x17},
+	{0x3277, 0x1 },
+	{0x3278, 0xf },
+	{0x3279, 0xe7},
+	{0x327a, 0xa0},
+	{0x327b, 0x0 },
+	{0x327c, 0xb },
+	{0x327d, 0xc3},
+	{0x327e, 0xf },
+	{0x327f, 0xc0},
+	{0x3280, 0xe3},
+	{0x3281, 0x0 },
+	{0x3282, 0x15},
+	{0x3283, 0x5a},
+	{0x3284, 0xf },
+	{0x3285, 0xf9},
+	{0x3286, 0xa0},
+	{0x3287, 0xf },
+	{0x3288, 0xf4},
+	{0x3289, 0xce},
+	{0x328a, 0x0 },
+	{0x328b, 0xb },
+	{0x328c, 0x72},
+	{0x328d, 0xf },
+	{0x328e, 0xfb},
+	{0x328f, 0xb5},
+	{0x3290, 0x0 },
+	{0x3291, 0x2f},
+	{0x3292, 0xb },
+	{0x3293, 0xf },
+	{0x3294, 0xde},
+	{0x3295, 0xc0},
+	{0x3296, 0x0 },
+	{0x3297, 0x0 },
+	{0x3298, 0x58},
+	{0x3299, 0x0 },
+	{0x329a, 0x1b},
+	{0x329b, 0x5 },
+	{0x329c, 0xf },
+	{0x329d, 0xf9},
+	{0x329e, 0x23},
+	{0x329f, 0xf },
+	{0x32a0, 0xf3},
+	{0x32a1, 0x94},
+	{0x32a2, 0xf },
+	{0x32a3, 0xe7},
+	{0x32a4, 0xc2},
+	{0x32a5, 0x0 },
+	{0x32a6, 0x1d},
+	{0x32a7, 0xe5},
+	{0x32a8, 0x0 },
+	{0x32a9, 0x5 },
+	{0x32aa, 0xaf},
+	{0x32ab, 0xf },
+	{0x32ac, 0xe3},
+	{0x32ad, 0xb7},
+	{0x32ae, 0xf },
+	{0x32af, 0xf8},
+	{0x32b0, 0x34},
+	{0x32b1, 0x0 },
+	{0x32b2, 0x1c},
+	{0x32b3, 0x3d},
+	{0x32b4, 0x0 },
+	{0x32b5, 0x10},
+	{0x32b6, 0x4a},
+	{0x32b7, 0xf },
+	{0x32b8, 0xfa},
+	{0x32b9, 0x7 },
+	{0x32ba, 0xf },
+	{0x32bb, 0xff},
+	{0x32bc, 0x16},
+	{0x32bd, 0x0 },
+	{0x32be, 0x5 },
+	{0x32bf, 0x4e},
+	{0x32c0, 0x0 },
+	{0x32c1, 0xc },
+	{0x32c2, 0x1b},
+	{0x32c3, 0xf },
+	{0x32c4, 0xf1},
+	{0x32c5, 0xdb},
+	{0x32c6, 0xf },
+	{0x32c7, 0xfc},
+	{0x32c8, 0xf8},
+	{0x32c9, 0xf },
+	{0x32ca, 0xf4},
+	{0x32cb, 0xad},
+	{0x32cc, 0xf },
+	{0x32cd, 0xfb},
+	{0x32ce, 0x59},
+	{0x32cf, 0x0 },
+	{0x32d0, 0x9 },
+	{0x32d1, 0xf7},
+	{0x32d2, 0x0 },
+	{0x32d3, 0x0 },
+	{0x32d4, 0xc1},
+	{0x32d5, 0xf },
+	{0x32d6, 0xf5},
+	{0x32d7, 0x30},
+	{0x32d8, 0x0 },
+	{0x32d9, 0x83},
+	{0x32da, 0x1d},
+	{0x32db, 0xf },
+	{0x32dc, 0xe3},
+	{0x32dd, 0x3c},
+	{0x32de, 0x0 },
+	{0x32df, 0xa },
+	{0x32e0, 0x10},
+	{0x32e1, 0x0 },
+	{0x32e2, 0x7 },
+	{0x32e3, 0x65},
+	{0x32e4, 0xf },
+	{0x32e5, 0xfe},
+	{0x32e6, 0x79},
+	{0x32e7, 0xf },
+	{0x32e8, 0xfd},
+	{0x32e9, 0x57},
+	{0x32ea, 0xf },
+	{0x32eb, 0xd6},
+	{0x32ec, 0x8f},
+	{0x32ed, 0x0 },
+	{0x32ee, 0x3 },
+	{0x32ef, 0x93},
+	{0x32f0, 0x0 },
+	{0x32f1, 0x6 },
+	{0x32f2, 0xa },
+	{0x32f3, 0xf },
+	{0x32f4, 0xfa},
+	{0x32f5, 0x6c},
+	{0x32f6, 0xf },
+	{0x32f7, 0xf1},
+	{0x32f8, 0x1e},
+	{0x32f9, 0x0 },
+	{0x32fa, 0x14},
+	{0x32fb, 0xe7},
+	{0x32fc, 0x0 },
+	{0x32fd, 0x1f},
+	{0x32fe, 0x2d},
+	{0x32ff, 0x0 },
+	{0x3300, 0x7 },
+	{0x3301, 0x5e},
+	{0x3302, 0xf },
+	{0x3303, 0xe0},
+	{0x3304, 0x55},
+	{0x3305, 0x0 },
+	{0x3306, 0x20},
+	{0x3307, 0x93},
+	{0x3308, 0x0 },
+	{0x3309, 0xf },
+	{0x330a, 0x20},
+	{0x330b, 0xf },
+	{0x330c, 0xd7},
+	{0x330d, 0xf5},
+	{0x330e, 0xf },
+	{0x330f, 0xef},
+	{0x3310, 0xb8},
+	{0x3311, 0xf },
+	{0x3312, 0xf0},
+	{0x3313, 0x29},
+	{0x3314, 0x0 },
+	{0x3315, 0x27},
+	{0x3316, 0x5e},
+	{0x3317, 0xf },
+	{0x3318, 0xda},
+	{0x3319, 0x14},
+	{0x331a, 0xf },
+	{0x331b, 0xef},
+	{0x331c, 0x93},
+	{0x331d, 0x0 },
+	{0x331e, 0x2c},
+	{0x331f, 0xdc},
+	{0x3320, 0x0 },
+	{0x3321, 0xe },
+	{0x3322, 0x2d},
+	{0x3323, 0x0 },
+	{0x3324, 0x6 },
+	{0x3325, 0xcf},
+	{0x3326, 0xf },
+	{0x3327, 0xfb},
+	{0x3328, 0x26},
+	{0x3329, 0x0 },
+	{0x332a, 0x3 },
+	{0x332b, 0x5 },
+	{0x332c, 0x0 },
+	{0x332d, 0x6 },
+	{0x332e, 0xa6},
+	{0x332f, 0xf },
+	{0x3330, 0xf7},
+	{0x3331, 0x7b},
+	{0x3332, 0xf },
+	{0x3333, 0xf9},
+	{0x3334, 0xb },
+	{0x3335, 0x0 },
+	{0x3336, 0x7 },
+	{0x3337, 0x5a},
+	{0x3338, 0xf },
+	{0x3339, 0xe4},
+	{0x333a, 0x7a},
+	{0x333b, 0x0 },
+	{0x333c, 0x1b},
+	{0x333d, 0xb0},
+	{0x333e, 0x0 },
+	{0x333f, 0x2 },
+	{0x3340, 0xa7},
+	{0x3341, 0xf },
+	{0x3342, 0xe9},
+	{0x3343, 0x3a},
+	{0x3344, 0x0 },
+	{0x3345, 0x95},
+	{0x3346, 0x42},
+	{0x3347, 0xf },
+	{0x3348, 0xda},
+	{0x3349, 0x45},
+	{0x334a, 0x0 },
+	{0x334b, 0x16},
+	{0x334c, 0x7a},
+	{0x334d, 0xf },
+	{0x334e, 0xfb},
+	{0x334f, 0x32},
+	{0x3350, 0x0 },
+	{0x3351, 0x6 },
+	{0x3352, 0x35},
+	{0x3353, 0xf },
+	{0x3354, 0xfc},
+	{0x3355, 0x8f},
+	{0x3356, 0xf },
+	{0x3357, 0xca},
+	{0x3358, 0xd5},
+	{0x3359, 0x0 },
+	{0x335a, 0x11},
+	{0x335b, 0x59},
+	{0x335c, 0xf },
+	{0x335d, 0xfa},
+	{0x335e, 0xaa},
+	{0x335f, 0xf },
+	{0x3360, 0xfe},
+	{0x3361, 0x84},
+	{0x3362, 0xf },
+	{0x3363, 0xf6},
+	{0x3364, 0x8f},
+	{0x3365, 0x0 },
+	{0x3366, 0xb },
+	{0x3367, 0x70},
+	{0x3368, 0x0 },
+	{0x3369, 0x25},
+	{0x336a, 0x83},
+	{0x336b, 0xf },
+	{0x336c, 0xe7},
+	{0x336d, 0x27},
+	{0x336e, 0xf },
+	{0x336f, 0xf1},
+	{0x3370, 0x72},
+	{0x3371, 0x0 },
+	{0x3372, 0x21},
+	{0x3373, 0x6d},
+	{0x3374, 0x0 },
+	{0x3375, 0x2 },
+	{0x3376, 0xc3},
+	{0x3377, 0xf },
+	{0x3378, 0xe8},
+	{0x3379, 0x5a},
+	{0x337a, 0xf },
+	{0x337b, 0xf2},
+	{0x337c, 0x73},
+	{0x337d, 0x0 },
+	{0x337e, 0x19},
+	{0x337f, 0xa5},
+	{0x3380, 0x0 },
+	{0x3381, 0x1a},
+	{0x3382, 0x81},
+	{0x3383, 0xf },
+	{0x3384, 0xd0},
+	{0x3385, 0x31},
+	{0x3386, 0xf },
+	{0x3387, 0xfb},
+	{0x3388, 0xff},
+	{0x3389, 0x0 },
+	{0x338a, 0x1e},
+	{0x338b, 0xe1},
+	{0x338c, 0x0 },
+	{0x338d, 0x5 },
+	{0x338e, 0xe1},
+	{0x338f, 0xf },
+	{0x3390, 0xee},
+	{0x3391, 0xe2},
+	{0x3392, 0xf },
+	{0x3393, 0xf6},
+	{0x3394, 0xcf},
+	{0x3395, 0x0 },
+	{0x3396, 0x13},
+	{0x3397, 0x8f},
+	{0x3398, 0x0 },
+	{0x3399, 0x3 },
+	{0x339a, 0x61},
+	{0x339b, 0xf },
+	{0x339c, 0xf8},
+	{0x339d, 0xf7},
+	{0x339e, 0x0 },
+	{0x339f, 0x0 },
+	{0x33a0, 0xb5},
+	{0x33a1, 0x0 },
+	{0x33a2, 0x5 },
+	{0x33a3, 0x78},
+	{0x33a4, 0xf },
+	{0x33a5, 0xf4},
+	{0x33a6, 0x5 },
+	{0x33a7, 0x0 },
+	{0x33a8, 0xc },
+	{0x33a9, 0xe },
+	{0x33aa, 0x0 },
+	{0x33ab, 0x3 },
+	{0x33ac, 0x53},
+	{0x33ad, 0xf },
+	{0x33ae, 0xec},
+	{0x33af, 0xbd},
+};
+
+const struct
+qs_s5k4e1_i2c_reg_conf qs_s5k4e1_lenshading_settings[4][LENS_SHADE_TABLE] = {
+	{/*2D Preview*/
+		{0x3097, 0x52},/*sh4ch_blk_width = 82*/
+		{0x3098, 0x3e},/*sh4ch_blk_height = 62*/
+		{0x3099, 0x03},/*sh4ch_step_x msb (sh4ch_step_x = 799)*/
+		{0x309a, 0x1f},/*sh4ch_step_x lsb*/
+		{0x309b, 0x04},/*sh4ch_step_y msb (sh4ch_step_y = 1057)*/
+		{0x309c, 0x21},/*sh4ch_step_y lsb*/
+		{0x309d, 0x00},/*sh4ch_start_blk_cnt_x = 0*/
+		{0x309e, 0x00},/*sh4ch_start_int_cnt_x = 0*/
+		{0x309f, 0x00},/*sh4ch_start_frac_cnt_x msb (0)*/
+		{0x30a0, 0x00},/*sh4ch_start_frac_cnt_x lsb*/
+		{0x30a1, 0x00},/*sh4ch_start_blk_cnt_y = 0*/
+		{0x30a2, 0x00},/*sh4ch_start_int_cnt_y = 0*/
+		{0x30a3, 0x00},/*sh4ch_start_frac_cnt_y msb (0)*/
+		{0x30a4, 0x00},/*sh4ch_start_frac_cnt_y lsb*/
+		{0x30a5, 0x01},
+		{0x30a6, 0x00},/*gs_pedestal	= 64*/
+	},
+	{/*2D Snapshot*/
+		{0x3097, 0x52},/*sh4ch_blk_width = 82*/
+		{0x3098, 0x7b},/*sh4ch_blk_height = 123*/
+		{0x3099, 0x03},/*sh4ch_step_x msb (sh4ch_step_x = 799)*/
+		{0x309a, 0x1f},/*sh4ch_step_x lsb*/
+		{0x309b, 0x02},/*sh4ch_step_y msb (sh4ch_step_y = 533)*/
+		{0x309c, 0x15},/*sh4ch_step_y lsb*/
+		{0x309d, 0x00},/*sh4ch_start_blk_cnt_x = 0*/
+		{0x309e, 0x00},/*sh4ch_start_int_cnt_x = 0*/
+		{0x309f, 0x00},/*sh4ch_start_frac_cnt_x msb (0)*/
+		{0x30a0, 0x00},/*sh4ch_start_frac_cnt_x lsb*/
+		{0x30a1, 0x00},/*sh4ch_start_blk_cnt_y = 0*/
+		{0x30a2, 0x00},/*sh4ch_start_int_cnt_y = 0*/
+		{0x30a3, 0x00},/*sh4ch_start_frac_cnt_y msb (0)*/
+		{0x30a4, 0x00},/*sh4ch_start_frac_cnt_y lsb*/
+		{0x30a5, 0x01},
+		{0x30a6, 0x00},/*gs_pedestal	= 64*/
+	},
+
+	{/*3D Preview*/
+		{0x3097, 0x52},/*sh4ch_blk_width = 82*/
+		{0x3098, 0x7b},/*sh4ch_blk_height = 123*/
+		{0x3099, 0x03},/*sh4ch_step_x msb (sh4ch_step_x = 799)*/
+		{0x309a, 0x1f},/*sh4ch_step_x lsb*/
+		{0x309b, 0x02},/*sh4ch_step_y msb (sh4ch_step_y = 533)*/
+		{0x309c, 0x15},/*sh4ch_step_y lsb*/
+		{0x309d, 0x3a},/*sh4ch_start_blk_cnt_x = 58*/
+		{0x309e, 0x01},/*sh4ch_start_int_cnt_x = 1*/
+		{0x309f, 0xb5},/*sh4ch_start_frac_cnt_x msb (46342)*/
+		{0x30a0, 0x06},/*sh4ch_start_frac_cnt_x lsb*/
+		{0x30a1, 0x23},/*sh4ch_start_blk_cnt_y = 35*/
+		{0x30a2, 0x03},/*sh4ch_start_int_cnt_y = 3*/
+		{0x30a3, 0x48},/*sh4ch_start_frac_cnt_y msb (46342)*/
+		{0x30a4, 0xdf},/*sh4ch_start_frac_cnt_y lsb*/
+		{0x30a5, 0x01},
+		{0x30a6, 0x00},/*gs_pedestal	= 64*/
+	},
+
+	{/*3D Snapshot*/
+		{0x3097, 0x52},/*sh4ch_blk_width = 82*/
+		{0x3098, 0x7b},/*sh4ch_blk_height = 123*/
+		{0x3099, 0x03},/*sh4ch_step_x msb (sh4ch_step_x = 799)*/
+		{0x309a, 0x1f},/*sh4ch_step_x lsb*/
+		{0x309b, 0x02},/*sh4ch_step_y msb (sh4ch_step_y = 533)*/
+		{0x309c, 0x15},/*sh4ch_step_y lsb*/
+		{0x309d, 0x38},/*sh4ch_start_blk_cnt_x = 56*/
+		{0x309e, 0x01},/*sh4ch_start_int_cnt_x = 1*/
+		{0x309f, 0xae},/*sh4ch_start_frac_cnt_x msb (44744)*/
+		{0x30a0, 0xc8},/*sh4ch_start_frac_cnt_x lsb*/
+		{0x30a1, 0x23},/*sh4ch_start_blk_cnt_y = 35*/
+		{0x30a2, 0x03},/*sh4ch_start_int_cnt_y = 3*/
+		{0x30a3, 0x48},/*sh4ch_start_frac_cnt_y msb (44744)*/
+		{0x30a4, 0xdf},/*sh4ch_start_frac_cnt_y lsb*/
+		{0x30a5, 0x01},
+		{0x30a6, 0x00},/*gs_pedestal	= 64*/
+	},
+
+};
+
+struct qs_s5k4e1_i2c_conf_array qs_s5k4e1_confs[] = {
+	{&qs_s5k4e1_prev_settings_2d[0], \
+		ARRAY_SIZE(qs_s5k4e1_prev_settings_2d)},
+	{&qs_s5k4e1_snap_settings_2d[0], \
+		ARRAY_SIZE(qs_s5k4e1_snap_settings_2d)},
+	{&qs_s5k4e1_prev_settings_3d[0], \
+		ARRAY_SIZE(qs_s5k4e1_prev_settings_3d)},
+	{&qs_s5k4e1_snap_settings_3d[0], \
+		ARRAY_SIZE(qs_s5k4e1_snap_settings_3d)},
+};
+struct qs_s5k4e1_reg qs_s5k4e1_regs = {
+	.rec_settings = &qs_s5k4e1_recommend_settings[0],
+	.rec_size = ARRAY_SIZE(qs_s5k4e1_recommend_settings),
+	.reg_lens = &qs_s5k4e1_lenshading_settings[0],
+	.reg_lens_size = ARRAY_SIZE(qs_s5k4e1_lenshading_settings[0]),
+	.reg_default_lens = &qs_s5k4e1_default_lenshading_settings[0],
+	.reg_default_lens_size =
+		ARRAY_SIZE(qs_s5k4e1_default_lenshading_settings),
+	.conf_array = &qs_s5k4e1_confs[0],
+};
diff --git a/drivers/media/video/msm/s5k3e2fx.c b/drivers/media/video/msm/s5k3e2fx.c
new file mode 100644
index 0000000..178a080
--- /dev/null
+++ b/drivers/media/video/msm/s5k3e2fx.c
@@ -0,0 +1,1386 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "s5k3e2fx.h"
+
+#define S5K3E2FX_REG_MODEL_ID   0x0000
+#define S5K3E2FX_MODEL_ID       0x3E2F
+
+/* PLL Registers */
+#define REG_PRE_PLL_CLK_DIV           0x0305
+#define REG_PLL_MULTIPLIER_MSB        0x0306
+#define REG_PLL_MULTIPLIER_LSB        0x0307
+#define REG_VT_PIX_CLK_DIV            0x0301
+#define REG_VT_SYS_CLK_DIV            0x0303
+#define REG_OP_PIX_CLK_DIV            0x0309
+#define REG_OP_SYS_CLK_DIV            0x030B
+
+/* Data Format Registers */
+#define REG_CCP_DATA_FORMAT_MSB       0x0112
+#define REG_CCP_DATA_FORMAT_LSB       0x0113
+
+/* Output Size */
+#define REG_X_OUTPUT_SIZE_MSB         0x034C
+#define REG_X_OUTPUT_SIZE_LSB         0x034D
+#define REG_Y_OUTPUT_SIZE_MSB         0x034E
+#define REG_Y_OUTPUT_SIZE_LSB         0x034F
+
+/* Binning */
+#define REG_X_EVEN_INC                0x0381
+#define REG_X_ODD_INC                 0x0383
+#define REG_Y_EVEN_INC                0x0385
+#define REG_Y_ODD_INC                 0x0387
+/*Reserved register */
+#define REG_BINNING_ENABLE            0x3014
+
+/* Frame Fotmat */
+#define REG_FRAME_LENGTH_LINES_MSB    0x0340
+#define REG_FRAME_LENGTH_LINES_LSB    0x0341
+#define REG_LINE_LENGTH_PCK_MSB       0x0342
+#define REG_LINE_LENGTH_PCK_LSB       0x0343
+
+/* MSR setting */
+/* Reserved registers */
+#define REG_SHADE_CLK_ENABLE          0x30AC
+#define REG_SEL_CCP                   0x30C4
+#define REG_VPIX                      0x3024
+#define REG_CLAMP_ON                  0x3015
+#define REG_OFFSET                    0x307E
+
+/* CDS timing settings */
+/* Reserved registers */
+#define REG_LD_START                  0x3000
+#define REG_LD_END                    0x3001
+#define REG_SL_START                  0x3002
+#define REG_SL_END                    0x3003
+#define REG_RX_START                  0x3004
+#define REG_S1_START                  0x3005
+#define REG_S1_END                    0x3006
+#define REG_S1S_START                 0x3007
+#define REG_S1S_END                   0x3008
+#define REG_S3_START                  0x3009
+#define REG_S3_END                    0x300A
+#define REG_CMP_EN_START              0x300B
+#define REG_CLP_SL_START              0x300C
+#define REG_CLP_SL_END                0x300D
+#define REG_OFF_START                 0x300E
+#define REG_RMP_EN_START              0x300F
+#define REG_TX_START                  0x3010
+#define REG_TX_END                    0x3011
+#define REG_STX_WIDTH                 0x3012
+#define REG_TYPE1_AF_ENABLE           0x3130
+#define DRIVER_ENABLED                0x0001
+#define AUTO_START_ENABLED            0x0010
+#define REG_NEW_POSITION              0x3131
+#define REG_3152_RESERVED             0x3152
+#define REG_315A_RESERVED             0x315A
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB 0x0204
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB 0x0205
+#define REG_FINE_INTEGRATION_TIME         0x0200
+#define REG_COARSE_INTEGRATION_TIME       0x0202
+#define REG_COARSE_INTEGRATION_TIME_LSB   0x0203
+
+/* Mode select register */
+#define S5K3E2FX_REG_MODE_SELECT      0x0100
+#define S5K3E2FX_MODE_SELECT_STREAM     0x01   /* start streaming */
+#define S5K3E2FX_MODE_SELECT_SW_STANDBY 0x00   /* software standby */
+#define S5K3E2FX_REG_SOFTWARE_RESET   0x0103
+#define S5K3E2FX_SOFTWARE_RESET         0x01
+#define REG_TEST_PATTERN_MODE         0x0601
+
+struct reg_struct {
+  uint8_t pre_pll_clk_div;               /* 0x0305 */
+  uint8_t pll_multiplier_msb;            /* 0x0306 */
+  uint8_t pll_multiplier_lsb;            /* 0x0307 */
+  uint8_t vt_pix_clk_div;                /* 0x0301 */
+  uint8_t vt_sys_clk_div;                /* 0x0303 */
+  uint8_t op_pix_clk_div;                /* 0x0309 */
+  uint8_t op_sys_clk_div;                /* 0x030B */
+  uint8_t ccp_data_format_msb;           /* 0x0112 */
+  uint8_t ccp_data_format_lsb;           /* 0x0113 */
+  uint8_t x_output_size_msb;             /* 0x034C */
+  uint8_t x_output_size_lsb;             /* 0x034D */
+  uint8_t y_output_size_msb;             /* 0x034E */
+  uint8_t y_output_size_lsb;             /* 0x034F */
+  uint8_t x_even_inc;                    /* 0x0381 */
+  uint8_t x_odd_inc;                     /* 0x0383 */
+  uint8_t y_even_inc;                    /* 0x0385 */
+  uint8_t y_odd_inc;                     /* 0x0387 */
+  uint8_t binning_enable;                /* 0x3014 */
+  uint8_t frame_length_lines_msb;        /* 0x0340 */
+  uint8_t frame_length_lines_lsb;        /* 0x0341 */
+  uint8_t line_length_pck_msb;           /* 0x0342 */
+  uint8_t line_length_pck_lsb;           /* 0x0343 */
+  uint8_t shade_clk_enable ;             /* 0x30AC */
+  uint8_t sel_ccp;                       /* 0x30C4 */
+  uint8_t vpix;                          /* 0x3024 */
+  uint8_t clamp_on;                      /* 0x3015 */
+  uint8_t offset;                        /* 0x307E */
+  uint8_t ld_start;                      /* 0x3000 */
+  uint8_t ld_end;                        /* 0x3001 */
+  uint8_t sl_start;                      /* 0x3002 */
+  uint8_t sl_end;                        /* 0x3003 */
+  uint8_t rx_start;                      /* 0x3004 */
+  uint8_t s1_start;                      /* 0x3005 */
+  uint8_t s1_end;                        /* 0x3006 */
+  uint8_t s1s_start;                     /* 0x3007 */
+  uint8_t s1s_end;                       /* 0x3008 */
+  uint8_t s3_start;                      /* 0x3009 */
+  uint8_t s3_end;                        /* 0x300A */
+  uint8_t cmp_en_start;                  /* 0x300B */
+  uint8_t clp_sl_start;                  /* 0x300C */
+  uint8_t clp_sl_end;                    /* 0x300D */
+  uint8_t off_start;                     /* 0x300E */
+  uint8_t rmp_en_start;                  /* 0x300F */
+  uint8_t tx_start;                      /* 0x3010 */
+  uint8_t tx_end;                        /* 0x3011 */
+  uint8_t stx_width;                     /* 0x3012 */
+  uint8_t reg_3152_reserved;             /* 0x3152 */
+  uint8_t reg_315A_reserved;             /* 0x315A */
+  uint8_t analogue_gain_code_global_msb; /* 0x0204 */
+  uint8_t analogue_gain_code_global_lsb; /* 0x0205 */
+  uint8_t fine_integration_time;         /* 0x0200 */
+  uint8_t coarse_integration_time;       /* 0x0202 */
+  uint32_t  size_h;
+  uint32_t  blk_l;
+  uint32_t  size_w;
+  uint32_t  blk_p;
+};
+
+struct reg_struct s5k3e2fx_reg_pat[2] =  {
+  {	/* Preview */
+    0x06,  /* pre_pll_clk_div       REG=0x0305 */
+    0x00,  /* pll_multiplier_msb    REG=0x0306 */
+    0x88,  /* pll_multiplier_lsb    REG=0x0307 */
+    0x0a,  /* vt_pix_clk_div        REG=0x0301 */
+    0x01,  /* vt_sys_clk_div        REG=0x0303 */
+    0x0a,  /* op_pix_clk_div        REG=0x0309 */
+    0x01,  /* op_sys_clk_div        REG=0x030B */
+    0x0a,  /* ccp_data_format_msb   REG=0x0112 */
+    0x0a,  /* ccp_data_format_lsb   REG=0x0113 */
+    0x05,  /* x_output_size_msb     REG=0x034C */
+    0x10,  /* x_output_size_lsb     REG=0x034D */
+    0x03,  /* y_output_size_msb     REG=0x034E */
+    0xcc,  /* y_output_size_lsb     REG=0x034F */
+
+    /* enable binning for preview */
+    0x01,  /* x_even_inc             REG=0x0381 */
+    0x01,  /* x_odd_inc              REG=0x0383 */
+    0x01,  /* y_even_inc             REG=0x0385 */
+    0x03,  /* y_odd_inc              REG=0x0387 */
+    0x06,  /* binning_enable         REG=0x3014 */
+
+    0x03,  /* frame_length_lines_msb        REG=0x0340 */
+    0xde,  /* frame_length_lines_lsb        REG=0x0341 */
+    0x0a,  /* line_length_pck_msb           REG=0x0342 */
+    0xac,  /* line_length_pck_lsb           REG=0x0343 */
+    0x81,  /* shade_clk_enable              REG=0x30AC */
+    0x01,  /* sel_ccp                       REG=0x30C4 */
+    0x04,  /* vpix                          REG=0x3024 */
+    0x00,  /* clamp_on                      REG=0x3015 */
+    0x02,  /* offset                        REG=0x307E */
+    0x03,  /* ld_start                      REG=0x3000 */
+    0x9c,  /* ld_end                        REG=0x3001 */
+    0x02,  /* sl_start                      REG=0x3002 */
+    0x9e,  /* sl_end                        REG=0x3003 */
+    0x05,  /* rx_start                      REG=0x3004 */
+    0x0f,  /* s1_start                      REG=0x3005 */
+    0x24,  /* s1_end                        REG=0x3006 */
+    0x7c,  /* s1s_start                     REG=0x3007 */
+    0x9a,  /* s1s_end                       REG=0x3008 */
+    0x10,  /* s3_start                      REG=0x3009 */
+    0x14,  /* s3_end                        REG=0x300A */
+    0x10,  /* cmp_en_start                  REG=0x300B */
+    0x04,  /* clp_sl_start                  REG=0x300C */
+    0x26,  /* clp_sl_end                    REG=0x300D */
+    0x02,  /* off_start                     REG=0x300E */
+    0x0e,  /* rmp_en_start                  REG=0x300F */
+    0x30,  /* tx_start                      REG=0x3010 */
+    0x4e,  /* tx_end                        REG=0x3011 */
+    0x1E,  /* stx_width                     REG=0x3012 */
+    0x08,  /* reg_3152_reserved             REG=0x3152 */
+    0x10,  /* reg_315A_reserved             REG=0x315A */
+    0x00,  /* analogue_gain_code_global_msb REG=0x0204 */
+    0x80,  /* analogue_gain_code_global_lsb REG=0x0205 */
+    0x02,  /* fine_integration_time         REG=0x0200 */
+    0x03,  /* coarse_integration_time       REG=0x0202 */
+		972,
+		18,
+		1296,
+		1436
+  },
+  { /* Snapshot */
+    0x06,  /* pre_pll_clk_div               REG=0x0305 */
+    0x00,  /* pll_multiplier_msb            REG=0x0306 */
+    0x88,  /* pll_multiplier_lsb            REG=0x0307 */
+    0x0a,  /* vt_pix_clk_div                REG=0x0301 */
+    0x01,  /* vt_sys_clk_div                REG=0x0303 */
+    0x0a,  /* op_pix_clk_div                REG=0x0309 */
+    0x01,  /* op_sys_clk_div                REG=0x030B */
+    0x0a,  /* ccp_data_format_msb           REG=0x0112 */
+    0x0a,  /* ccp_data_format_lsb           REG=0x0113 */
+    0x0a,  /* x_output_size_msb             REG=0x034C */
+    0x30,  /* x_output_size_lsb             REG=0x034D */
+    0x07,  /* y_output_size_msb             REG=0x034E */
+    0xa8,  /* y_output_size_lsb             REG=0x034F */
+
+    /* disable binning for snapshot */
+    0x01,  /* x_even_inc                    REG=0x0381 */
+    0x01,  /* x_odd_inc                     REG=0x0383 */
+    0x01,  /* y_even_inc                    REG=0x0385 */
+    0x01,  /* y_odd_inc                     REG=0x0387 */
+    0x00,  /* binning_enable                REG=0x3014 */
+
+    0x07,  /* frame_length_lines_msb        REG=0x0340 */
+    0xb6,  /* frame_length_lines_lsb        REG=0x0341 */
+    0x0a,  /* line_length_pck_msb           REG=0x0342 */
+    0xac,  /* line_length_pck_lsb           REG=0x0343 */
+    0x81,  /* shade_clk_enable              REG=0x30AC */
+    0x01,  /* sel_ccp                       REG=0x30C4 */
+    0x04,  /* vpix                          REG=0x3024 */
+    0x00,  /* clamp_on                      REG=0x3015 */
+    0x02,  /* offset                        REG=0x307E */
+    0x03,  /* ld_start                      REG=0x3000 */
+    0x9c,  /* ld_end                        REG=0x3001 */
+    0x02,  /* sl_start                      REG=0x3002 */
+    0x9e,  /* sl_end                        REG=0x3003 */
+    0x05,  /* rx_start                      REG=0x3004 */
+    0x0f,  /* s1_start                      REG=0x3005 */
+    0x24,  /* s1_end                        REG=0x3006 */
+    0x7c,  /* s1s_start                     REG=0x3007 */
+    0x9a,  /* s1s_end                       REG=0x3008 */
+    0x10,  /* s3_start                      REG=0x3009 */
+    0x14,  /* s3_end                        REG=0x300A */
+    0x10,  /* cmp_en_start                  REG=0x300B */
+    0x04,  /* clp_sl_start                  REG=0x300C */
+    0x26,  /* clp_sl_end                    REG=0x300D */
+    0x02,  /* off_start                     REG=0x300E */
+    0x0e,  /* rmp_en_start                  REG=0x300F */
+    0x30,  /* tx_start                      REG=0x3010 */
+    0x4e,  /* tx_end                        REG=0x3011 */
+    0x1E,  /* stx_width                     REG=0x3012 */
+    0x08,  /* reg_3152_reserved             REG=0x3152 */
+    0x10,  /* reg_315A_reserved             REG=0x315A */
+    0x00,  /* analogue_gain_code_global_msb REG=0x0204 */
+    0x80,  /* analogue_gain_code_global_lsb REG=0x0205 */
+    0x02,  /* fine_integration_time         REG=0x0200 */
+    0x03,  /* coarse_integration_time       REG=0x0202 */
+		1960,
+		14,
+		2608,
+		124
+	}
+};
+
+struct s5k3e2fx_work {
+	struct work_struct work;
+};
+static struct s5k3e2fx_work *s5k3e2fx_sensorw;
+static struct i2c_client *s5k3e2fx_client;
+
+struct s5k3e2fx_ctrl {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t fps_divider; /* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */
+
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+
+	enum msm_s_resolution prev_res;
+	enum msm_s_resolution pict_res;
+	enum msm_s_resolution curr_res;
+	enum msm_s_test_mode  set_test;
+};
+
+struct s5k3e2fx_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned char  bdata;
+};
+
+static struct s5k3e2fx_ctrl *s5k3e2fx_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(s5k3e2fx_wait_queue);
+DEFINE_MUTEX(s5k3e2fx_mutex);
+
+static int s5k3e2fx_i2c_rxdata(unsigned short saddr, unsigned char *rxdata,
+	int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr   = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr   = saddr,
+			.flags = I2C_M_RD,
+			.len   = length,
+			.buf   = rxdata,
+		},
+	};
+
+	if (i2c_transfer(s5k3e2fx_client->adapter, msgs, 2) < 0) {
+		CDBG("s5k3e2fx_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t s5k3e2fx_i2c_txdata(unsigned short saddr,
+	unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+		.addr  = saddr,
+		.flags = 0,
+		.len = length,
+		.buf = txdata,
+		},
+	};
+
+	if (i2c_transfer(s5k3e2fx_client->adapter, msg, 1) < 0) {
+		CDBG("s5k3e2fx_i2c_txdata failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t s5k3e2fx_i2c_write_b(unsigned short saddr, unsigned short waddr,
+	unsigned char bdata)
+{
+	int32_t rc = -EIO;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00)>>8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+
+	rc = s5k3e2fx_i2c_txdata(saddr, buf, 3);
+
+	if (rc < 0)
+		CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_i2c_write_table(
+	struct s5k3e2fx_i2c_reg_conf *reg_cfg_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			reg_cfg_tbl->waddr, reg_cfg_tbl->bdata);
+		if (rc < 0)
+			break;
+		reg_cfg_tbl++;
+	}
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_i2c_read_w(unsigned short saddr, unsigned short raddr,
+	unsigned short *rdata)
+{
+	int32_t rc = 0;
+	unsigned char buf[4];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00)>>8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = s5k3e2fx_i2c_rxdata(saddr, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	*rdata = buf[0] << 8 | buf[1];
+
+	if (rc < 0)
+		CDBG("s5k3e2fx_i2c_read failed!\n");
+
+	return rc;
+}
+
+static int s5k3e2fx_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int s5k3e2fx_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t  rc;
+	uint16_t chipid = 0;
+
+	rc = gpio_request(data->sensor_reset, "s5k3e2fx");
+	if (!rc)
+		gpio_direction_output(data->sensor_reset, 1);
+	else
+		goto init_probe_done;
+
+	mdelay(20);
+
+	CDBG("s5k3e2fx_sensor_init(): reseting sensor.\n");
+
+	rc = s5k3e2fx_i2c_read_w(s5k3e2fx_client->addr,
+		S5K3E2FX_REG_MODEL_ID, &chipid);
+	if (rc < 0)
+		goto init_probe_fail;
+
+	if (chipid != S5K3E2FX_MODEL_ID) {
+		CDBG("S5K3E2FX wrong model_id = 0x%x\n", chipid);
+		rc = -ENODEV;
+		goto init_probe_fail;
+	}
+
+	goto init_probe_done;
+
+init_probe_fail:
+	s5k3e2fx_probe_init_done(data);
+init_probe_done:
+	return rc;
+}
+
+static int s5k3e2fx_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&s5k3e2fx_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id s5k3e2fx_i2c_id[] = {
+	{ "s5k3e2fx", 0},
+	{ }
+};
+
+static int s5k3e2fx_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("s5k3e2fx_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	s5k3e2fx_sensorw = kzalloc(sizeof(struct s5k3e2fx_work), GFP_KERNEL);
+	if (!s5k3e2fx_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, s5k3e2fx_sensorw);
+	s5k3e2fx_init_client(client);
+	s5k3e2fx_client = client;
+
+	mdelay(50);
+
+	CDBG("s5k3e2fx_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("s5k3e2fx_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static struct i2c_driver s5k3e2fx_i2c_driver = {
+	.id_table = s5k3e2fx_i2c_id,
+	.probe  = s5k3e2fx_i2c_probe,
+	.remove = __exit_p(s5k3e2fx_i2c_remove),
+	.driver = {
+		.name = "s5k3e2fx",
+	},
+};
+
+static int32_t s5k3e2fx_test(enum msm_s_test_mode mo)
+{
+	int32_t rc = 0;
+
+	if (mo == S_TEST_OFF)
+		rc = 0;
+	else
+		rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			REG_TEST_PATTERN_MODE, (uint16_t)mo);
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_setting(enum msm_s_reg_update rupdate,
+	enum msm_s_setting rt)
+{
+	int32_t rc = 0;
+  uint16_t num_lperf;
+
+	switch (rupdate) {
+	case S_UPDATE_PERIODIC:
+	if (rt == S_RES_PREVIEW || rt == S_RES_CAPTURE) {
+
+		struct s5k3e2fx_i2c_reg_conf tbl_1[] = {
+			{REG_CCP_DATA_FORMAT_MSB,
+				s5k3e2fx_reg_pat[rt].ccp_data_format_msb},
+			{REG_CCP_DATA_FORMAT_LSB,
+				s5k3e2fx_reg_pat[rt].ccp_data_format_lsb},
+			{REG_X_OUTPUT_SIZE_MSB,
+				s5k3e2fx_reg_pat[rt].x_output_size_msb},
+			{REG_X_OUTPUT_SIZE_LSB,
+				s5k3e2fx_reg_pat[rt].x_output_size_lsb},
+			{REG_Y_OUTPUT_SIZE_MSB,
+				s5k3e2fx_reg_pat[rt].y_output_size_msb},
+			{REG_Y_OUTPUT_SIZE_LSB,
+				s5k3e2fx_reg_pat[rt].y_output_size_lsb},
+			{REG_X_EVEN_INC,
+				s5k3e2fx_reg_pat[rt].x_even_inc},
+			{REG_X_ODD_INC,
+				s5k3e2fx_reg_pat[rt].x_odd_inc},
+			{REG_Y_EVEN_INC,
+				s5k3e2fx_reg_pat[rt].y_even_inc},
+			{REG_Y_ODD_INC,
+				s5k3e2fx_reg_pat[rt].y_odd_inc},
+			{REG_BINNING_ENABLE,
+				s5k3e2fx_reg_pat[rt].binning_enable},
+		};
+
+		struct s5k3e2fx_i2c_reg_conf tbl_2[] = {
+			{REG_FRAME_LENGTH_LINES_MSB, 0},
+			{REG_FRAME_LENGTH_LINES_LSB, 0},
+			{REG_LINE_LENGTH_PCK_MSB,
+				s5k3e2fx_reg_pat[rt].line_length_pck_msb},
+			{REG_LINE_LENGTH_PCK_LSB,
+				s5k3e2fx_reg_pat[rt].line_length_pck_lsb},
+			{REG_SHADE_CLK_ENABLE,
+				s5k3e2fx_reg_pat[rt].shade_clk_enable},
+			{REG_SEL_CCP, s5k3e2fx_reg_pat[rt].sel_ccp},
+			{REG_VPIX, s5k3e2fx_reg_pat[rt].vpix},
+			{REG_CLAMP_ON, s5k3e2fx_reg_pat[rt].clamp_on},
+			{REG_OFFSET, s5k3e2fx_reg_pat[rt].offset},
+			{REG_LD_START, s5k3e2fx_reg_pat[rt].ld_start},
+			{REG_LD_END, s5k3e2fx_reg_pat[rt].ld_end},
+			{REG_SL_START, s5k3e2fx_reg_pat[rt].sl_start},
+			{REG_SL_END, s5k3e2fx_reg_pat[rt].sl_end},
+			{REG_RX_START, s5k3e2fx_reg_pat[rt].rx_start},
+			{REG_S1_START, s5k3e2fx_reg_pat[rt].s1_start},
+			{REG_S1_END, s5k3e2fx_reg_pat[rt].s1_end},
+			{REG_S1S_START, s5k3e2fx_reg_pat[rt].s1s_start},
+			{REG_S1S_END, s5k3e2fx_reg_pat[rt].s1s_end},
+			{REG_S3_START, s5k3e2fx_reg_pat[rt].s3_start},
+			{REG_S3_END, s5k3e2fx_reg_pat[rt].s3_end},
+			{REG_CMP_EN_START, s5k3e2fx_reg_pat[rt].cmp_en_start},
+			{REG_CLP_SL_START, s5k3e2fx_reg_pat[rt].clp_sl_start},
+			{REG_CLP_SL_END, s5k3e2fx_reg_pat[rt].clp_sl_end},
+			{REG_OFF_START, s5k3e2fx_reg_pat[rt].off_start},
+			{REG_RMP_EN_START, s5k3e2fx_reg_pat[rt].rmp_en_start},
+			{REG_TX_START, s5k3e2fx_reg_pat[rt].tx_start},
+			{REG_TX_END, s5k3e2fx_reg_pat[rt].tx_end},
+			{REG_STX_WIDTH, s5k3e2fx_reg_pat[rt].stx_width},
+			{REG_3152_RESERVED,
+				s5k3e2fx_reg_pat[rt].reg_3152_reserved},
+			{REG_315A_RESERVED,
+				s5k3e2fx_reg_pat[rt].reg_315A_reserved},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB,
+				s5k3e2fx_reg_pat[rt].
+				analogue_gain_code_global_msb},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB,
+				s5k3e2fx_reg_pat[rt].
+				analogue_gain_code_global_lsb},
+			{REG_FINE_INTEGRATION_TIME,
+				s5k3e2fx_reg_pat[rt].fine_integration_time},
+			{REG_COARSE_INTEGRATION_TIME,
+				s5k3e2fx_reg_pat[rt].coarse_integration_time},
+			{S5K3E2FX_REG_MODE_SELECT, S5K3E2FX_MODE_SELECT_STREAM},
+		};
+
+		rc = s5k3e2fx_i2c_write_table(&tbl_1[0],
+			ARRAY_SIZE(tbl_1));
+		if (rc < 0)
+			return rc;
+
+		num_lperf = (uint16_t)
+			((s5k3e2fx_reg_pat[rt].frame_length_lines_msb << 8)
+			& 0xFF00)
+			+ s5k3e2fx_reg_pat[rt].frame_length_lines_lsb;
+
+		num_lperf = num_lperf * s5k3e2fx_ctrl->fps_divider / 0x0400;
+
+		tbl_2[0] = (struct s5k3e2fx_i2c_reg_conf)
+			{REG_FRAME_LENGTH_LINES_MSB, (num_lperf & 0xFF00) >> 8};
+		tbl_2[1] = (struct s5k3e2fx_i2c_reg_conf)
+			{REG_FRAME_LENGTH_LINES_LSB, (num_lperf & 0x00FF)};
+
+		rc = s5k3e2fx_i2c_write_table(&tbl_2[0],
+			ARRAY_SIZE(tbl_2));
+		if (rc < 0)
+			return rc;
+
+		mdelay(5);
+
+		rc = s5k3e2fx_test(s5k3e2fx_ctrl->set_test);
+		if (rc < 0)
+			return rc;
+	  }
+    break; /* UPDATE_PERIODIC */
+
+	case S_REG_INIT:
+	if (rt == S_RES_PREVIEW || rt == S_RES_CAPTURE) {
+
+		struct s5k3e2fx_i2c_reg_conf tbl_3[] = {
+			{S5K3E2FX_REG_SOFTWARE_RESET, S5K3E2FX_SOFTWARE_RESET},
+			{S5K3E2FX_REG_MODE_SELECT,
+				S5K3E2FX_MODE_SELECT_SW_STANDBY},
+			/* PLL setting */
+			{REG_PRE_PLL_CLK_DIV,
+				s5k3e2fx_reg_pat[rt].pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER_MSB,
+				s5k3e2fx_reg_pat[rt].pll_multiplier_msb},
+			{REG_PLL_MULTIPLIER_LSB,
+				s5k3e2fx_reg_pat[rt].pll_multiplier_lsb},
+			{REG_VT_PIX_CLK_DIV,
+				s5k3e2fx_reg_pat[rt].vt_pix_clk_div},
+			{REG_VT_SYS_CLK_DIV,
+				s5k3e2fx_reg_pat[rt].vt_sys_clk_div},
+			{REG_OP_PIX_CLK_DIV,
+				s5k3e2fx_reg_pat[rt].op_pix_clk_div},
+			{REG_OP_SYS_CLK_DIV,
+				s5k3e2fx_reg_pat[rt].op_sys_clk_div},
+			/*Data Format */
+			{REG_CCP_DATA_FORMAT_MSB,
+				s5k3e2fx_reg_pat[rt].ccp_data_format_msb},
+			{REG_CCP_DATA_FORMAT_LSB,
+				s5k3e2fx_reg_pat[rt].ccp_data_format_lsb},
+			/*Output Size */
+			{REG_X_OUTPUT_SIZE_MSB,
+				s5k3e2fx_reg_pat[rt].x_output_size_msb},
+			{REG_X_OUTPUT_SIZE_LSB,
+				s5k3e2fx_reg_pat[rt].x_output_size_lsb},
+			{REG_Y_OUTPUT_SIZE_MSB,
+				s5k3e2fx_reg_pat[rt].y_output_size_msb},
+			{REG_Y_OUTPUT_SIZE_LSB,
+				s5k3e2fx_reg_pat[rt].y_output_size_lsb},
+			/* Binning */
+			{REG_X_EVEN_INC, s5k3e2fx_reg_pat[rt].x_even_inc},
+			{REG_X_ODD_INC, s5k3e2fx_reg_pat[rt].x_odd_inc },
+			{REG_Y_EVEN_INC, s5k3e2fx_reg_pat[rt].y_even_inc},
+			{REG_Y_ODD_INC, s5k3e2fx_reg_pat[rt].y_odd_inc},
+			{REG_BINNING_ENABLE,
+				s5k3e2fx_reg_pat[rt].binning_enable},
+			/* Frame format */
+			{REG_FRAME_LENGTH_LINES_MSB,
+				s5k3e2fx_reg_pat[rt].frame_length_lines_msb},
+			{REG_FRAME_LENGTH_LINES_LSB,
+				s5k3e2fx_reg_pat[rt].frame_length_lines_lsb},
+			{REG_LINE_LENGTH_PCK_MSB,
+				s5k3e2fx_reg_pat[rt].line_length_pck_msb},
+			{REG_LINE_LENGTH_PCK_LSB,
+				s5k3e2fx_reg_pat[rt].line_length_pck_lsb},
+			/* MSR setting */
+			{REG_SHADE_CLK_ENABLE,
+				s5k3e2fx_reg_pat[rt].shade_clk_enable},
+			{REG_SEL_CCP, s5k3e2fx_reg_pat[rt].sel_ccp},
+			{REG_VPIX, s5k3e2fx_reg_pat[rt].vpix},
+			{REG_CLAMP_ON, s5k3e2fx_reg_pat[rt].clamp_on},
+			{REG_OFFSET, s5k3e2fx_reg_pat[rt].offset},
+			/* CDS timing setting */
+			{REG_LD_START, s5k3e2fx_reg_pat[rt].ld_start},
+			{REG_LD_END, s5k3e2fx_reg_pat[rt].ld_end},
+			{REG_SL_START, s5k3e2fx_reg_pat[rt].sl_start},
+			{REG_SL_END, s5k3e2fx_reg_pat[rt].sl_end},
+			{REG_RX_START, s5k3e2fx_reg_pat[rt].rx_start},
+			{REG_S1_START, s5k3e2fx_reg_pat[rt].s1_start},
+			{REG_S1_END, s5k3e2fx_reg_pat[rt].s1_end},
+			{REG_S1S_START, s5k3e2fx_reg_pat[rt].s1s_start},
+			{REG_S1S_END, s5k3e2fx_reg_pat[rt].s1s_end},
+			{REG_S3_START, s5k3e2fx_reg_pat[rt].s3_start},
+			{REG_S3_END, s5k3e2fx_reg_pat[rt].s3_end},
+			{REG_CMP_EN_START, s5k3e2fx_reg_pat[rt].cmp_en_start},
+			{REG_CLP_SL_START, s5k3e2fx_reg_pat[rt].clp_sl_start},
+			{REG_CLP_SL_END, s5k3e2fx_reg_pat[rt].clp_sl_end},
+			{REG_OFF_START, s5k3e2fx_reg_pat[rt].off_start},
+			{REG_RMP_EN_START, s5k3e2fx_reg_pat[rt].rmp_en_start},
+			{REG_TX_START, s5k3e2fx_reg_pat[rt].tx_start},
+			{REG_TX_END, s5k3e2fx_reg_pat[rt].tx_end},
+			{REG_STX_WIDTH, s5k3e2fx_reg_pat[rt].stx_width},
+			{REG_3152_RESERVED,
+				s5k3e2fx_reg_pat[rt].reg_3152_reserved},
+			{REG_315A_RESERVED,
+				s5k3e2fx_reg_pat[rt].reg_315A_reserved},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB,
+				s5k3e2fx_reg_pat[rt].
+				analogue_gain_code_global_msb},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB,
+				s5k3e2fx_reg_pat[rt].
+				analogue_gain_code_global_lsb},
+			{REG_FINE_INTEGRATION_TIME,
+				s5k3e2fx_reg_pat[rt].fine_integration_time},
+			{REG_COARSE_INTEGRATION_TIME,
+				s5k3e2fx_reg_pat[rt].coarse_integration_time},
+			{S5K3E2FX_REG_MODE_SELECT, S5K3E2FX_MODE_SELECT_STREAM},
+		};
+
+		/* reset fps_divider */
+		s5k3e2fx_ctrl->fps_divider = 1 * 0x0400;
+		rc = s5k3e2fx_i2c_write_table(&tbl_3[0],
+			ARRAY_SIZE(tbl_3));
+		if (rc < 0)
+			return rc;
+		}
+		break; /* case REG_INIT: */
+
+	default:
+		rc = -EINVAL;
+		break;
+	} /* switch (rupdate) */
+
+	return rc;
+}
+
+static int s5k3e2fx_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t  rc;
+
+	s5k3e2fx_ctrl = kzalloc(sizeof(struct s5k3e2fx_ctrl), GFP_KERNEL);
+	if (!s5k3e2fx_ctrl) {
+		CDBG("s5k3e2fx_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	s5k3e2fx_ctrl->fps_divider = 1 * 0x00000400;
+	s5k3e2fx_ctrl->pict_fps_divider = 1 * 0x00000400;
+	s5k3e2fx_ctrl->set_test = S_TEST_OFF;
+	s5k3e2fx_ctrl->prev_res = S_QTR_SIZE;
+	s5k3e2fx_ctrl->pict_res = S_FULL_SIZE;
+
+	if (data)
+		s5k3e2fx_ctrl->sensordata = data;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(24000000);
+	mdelay(20);
+
+	msm_camio_camif_pad_reg_reset();
+	mdelay(20);
+
+	rc = s5k3e2fx_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail1;
+
+	if (s5k3e2fx_ctrl->prev_res == S_QTR_SIZE)
+		rc = s5k3e2fx_setting(S_REG_INIT, S_RES_PREVIEW);
+	else
+		rc = s5k3e2fx_setting(S_REG_INIT, S_RES_CAPTURE);
+
+	if (rc < 0) {
+		CDBG("s5k3e2fx_setting failed. rc = %d\n", rc);
+		goto init_fail1;
+	}
+
+	/* initialize AF */
+	rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			0x3146, 0x3A);
+	if (rc < 0)
+		goto init_fail1;
+
+	rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			0x3130, 0x03);
+	if (rc < 0)
+		goto init_fail1;
+
+	goto init_done;
+
+init_fail1:
+	s5k3e2fx_probe_init_done(data);
+	kfree(s5k3e2fx_ctrl);
+init_done:
+	return rc;
+}
+
+static int32_t s5k3e2fx_power_down(void)
+{
+	int32_t rc = 0;
+	return rc;
+}
+
+static int s5k3e2fx_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&s5k3e2fx_mutex);
+
+	s5k3e2fx_power_down();
+
+	gpio_direction_output(s5k3e2fx_ctrl->sensordata->sensor_reset,
+		0);
+	gpio_free(s5k3e2fx_ctrl->sensordata->sensor_reset);
+
+	kfree(s5k3e2fx_ctrl);
+	s5k3e2fx_ctrl = NULL;
+
+	CDBG("s5k3e2fx_release completed\n");
+
+	mutex_unlock(&s5k3e2fx_mutex);
+	return rc;
+}
+
+static void s5k3e2fx_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider;   /* Q10 */
+
+	divider = (uint32_t)
+		((s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h +
+			s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l) *
+		 (s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w +
+			s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p)) * 0x00000400 /
+		((s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h +
+			s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l) *
+		 (s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w +
+			s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p));
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps = (uint16_t)(fps * divider / 0x00000400);
+}
+
+static uint16_t s5k3e2fx_get_prev_lines_pf(void)
+{
+	return s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h +
+		s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l;
+}
+
+static uint16_t s5k3e2fx_get_prev_pixels_pl(void)
+{
+	return s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w +
+		s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p;
+}
+
+static uint16_t s5k3e2fx_get_pict_lines_pf(void)
+{
+	return s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h +
+		s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l;
+}
+
+static uint16_t s5k3e2fx_get_pict_pixels_pl(void)
+{
+	return s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w +
+		s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p;
+}
+
+static uint32_t s5k3e2fx_get_pict_max_exp_lc(void)
+{
+	uint32_t snapshot_lines_per_frame;
+
+	if (s5k3e2fx_ctrl->pict_res == S_QTR_SIZE)
+		snapshot_lines_per_frame =
+		s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h +
+		s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l;
+	else
+		snapshot_lines_per_frame = 3961 * 3;
+
+	return snapshot_lines_per_frame;
+}
+
+static int32_t s5k3e2fx_set_fps(struct fps_cfg *fps)
+{
+	/* input is new fps in Q10 format */
+	int32_t rc = 0;
+	enum msm_s_setting setting;
+
+	s5k3e2fx_ctrl->fps_divider = fps->fps_div;
+
+	if (s5k3e2fx_ctrl->sensormode == SENSOR_PREVIEW_MODE)
+		setting = S_RES_PREVIEW;
+	else
+		setting = S_RES_CAPTURE;
+
+  rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+		REG_FRAME_LENGTH_LINES_MSB,
+		(((s5k3e2fx_reg_pat[setting].size_h +
+			s5k3e2fx_reg_pat[setting].blk_l) *
+			s5k3e2fx_ctrl->fps_divider / 0x400) & 0xFF00) >> 8);
+	if (rc < 0)
+		goto set_fps_done;
+
+  rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+		REG_FRAME_LENGTH_LINES_LSB,
+		(((s5k3e2fx_reg_pat[setting].size_h +
+			s5k3e2fx_reg_pat[setting].blk_l) *
+			s5k3e2fx_ctrl->fps_divider / 0x400) & 0x00FF));
+
+set_fps_done:
+	return rc;
+}
+
+static int32_t s5k3e2fx_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	uint16_t max_legal_gain = 0x0200;
+	uint32_t ll_ratio; /* Q10 */
+	uint32_t ll_pck, fl_lines;
+	uint16_t offset = 4;
+	uint32_t  gain_msb, gain_lsb;
+	uint32_t  intg_t_msb, intg_t_lsb;
+	uint32_t  ll_pck_msb, ll_pck_lsb;
+
+	struct s5k3e2fx_i2c_reg_conf tbl[2];
+
+	CDBG("Line:%d s5k3e2fx_write_exp_gain \n", __LINE__);
+
+	if (s5k3e2fx_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+
+		s5k3e2fx_ctrl->my_reg_gain = gain;
+		s5k3e2fx_ctrl->my_reg_line_count = (uint16_t)line;
+
+		fl_lines = s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h +
+			s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l;
+
+		ll_pck = s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w +
+			s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p;
+
+	} else {
+
+		fl_lines = s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h +
+			s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l;
+
+		ll_pck = s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w +
+			s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p;
+	}
+
+	if (gain > max_legal_gain)
+		gain = max_legal_gain;
+
+	/* in Q10 */
+	line = (line * s5k3e2fx_ctrl->fps_divider);
+
+	if (fl_lines < (line / 0x400))
+		ll_ratio = (line / (fl_lines - offset));
+	else
+		ll_ratio = 0x400;
+
+	/* update gain registers */
+	gain_msb = (gain & 0xFF00) >> 8;
+	gain_lsb = gain & 0x00FF;
+	tbl[0].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB;
+	tbl[0].bdata = gain_msb;
+	tbl[1].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB;
+	tbl[1].bdata = gain_lsb;
+	rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl));
+	if (rc < 0)
+		goto write_gain_done;
+
+	ll_pck = ll_pck * ll_ratio;
+	ll_pck_msb = ((ll_pck / 0x400) & 0xFF00) >> 8;
+	ll_pck_lsb = (ll_pck / 0x400) & 0x00FF;
+	tbl[0].waddr = REG_LINE_LENGTH_PCK_MSB;
+	tbl[0].bdata = ll_pck_msb;
+	tbl[1].waddr = REG_LINE_LENGTH_PCK_LSB;
+	tbl[1].bdata = ll_pck_lsb;
+	rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl));
+	if (rc < 0)
+		goto write_gain_done;
+
+	line = line / ll_ratio;
+	intg_t_msb = (line & 0xFF00) >> 8;
+	intg_t_lsb = (line & 0x00FF);
+	tbl[0].waddr = REG_COARSE_INTEGRATION_TIME;
+	tbl[0].bdata = intg_t_msb;
+	tbl[1].waddr = REG_COARSE_INTEGRATION_TIME_LSB;
+	tbl[1].bdata = intg_t_lsb;
+	rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl));
+
+write_gain_done:
+	return rc;
+}
+
+static int32_t s5k3e2fx_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+
+	CDBG("Line:%d s5k3e2fx_set_pict_exp_gain \n", __LINE__);
+
+	rc =
+		s5k3e2fx_write_exp_gain(gain, line);
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_video_config(int mode, int res)
+{
+	int32_t rc;
+
+	switch (res) {
+	case S_QTR_SIZE:
+		rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_PREVIEW);
+		if (rc < 0)
+			return rc;
+
+		CDBG("s5k3e2fx sensor configuration done!\n");
+		break;
+
+	case S_FULL_SIZE:
+		rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_CAPTURE);
+		if (rc < 0)
+			return rc;
+
+		break;
+
+	default:
+		return 0;
+	} /* switch */
+
+	s5k3e2fx_ctrl->prev_res = res;
+	s5k3e2fx_ctrl->curr_res = res;
+	s5k3e2fx_ctrl->sensormode = mode;
+
+	rc =
+		s5k3e2fx_write_exp_gain(s5k3e2fx_ctrl->my_reg_gain,
+			s5k3e2fx_ctrl->my_reg_line_count);
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	s5k3e2fx_ctrl->curr_res = s5k3e2fx_ctrl->pict_res;
+	s5k3e2fx_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+
+	rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	s5k3e2fx_ctrl->curr_res = s5k3e2fx_ctrl->pict_res;
+	s5k3e2fx_ctrl->sensormode = mode;
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = s5k3e2fx_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = s5k3e2fx_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = s5k3e2fx_raw_snapshot_config(mode);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_set_default_focus(void)
+{
+	int32_t rc = 0;
+
+  rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+		0x3131, 0);
+	if (rc < 0)
+		return rc;
+
+  rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+		0x3132, 0);
+	if (rc < 0)
+		return rc;
+
+	s5k3e2fx_ctrl->curr_lens_pos = 0;
+
+	return rc;
+}
+
+static int32_t s5k3e2fx_move_focus(int direction, int32_t num_steps)
+{
+	int32_t rc = 0;
+	int32_t i;
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_pos, pos_offset;
+	int16_t init_code = 50;
+	uint8_t next_pos_msb, next_pos_lsb;
+	int16_t s_move[5];
+	uint32_t gain; /* Q10 format */
+
+	if (direction == MOVE_NEAR)
+		step_direction = 20;
+	else if (direction == MOVE_FAR)
+		step_direction = -20;
+	else {
+		CDBG("s5k3e2fx_move_focus failed at line %d ...\n", __LINE__);
+		return -EINVAL;
+	}
+
+	actual_step = step_direction * (int16_t)num_steps;
+	pos_offset = init_code + s5k3e2fx_ctrl->curr_lens_pos;
+	gain = actual_step * 0x400 / 5;
+
+	for (i = 0; i <= 4; i++) {
+		if (actual_step >= 0)
+			s_move[i] = (((i+1)*gain+0x200)-(i*gain+0x200))/0x400;
+		else
+			s_move[i] = (((i+1)*gain-0x200)-(i*gain-0x200))/0x400;
+	}
+
+	/* Ring Damping Code */
+	for (i = 0; i <= 4; i++) {
+		next_pos = (int16_t)(pos_offset + s_move[i]);
+
+		if (next_pos > (738 + init_code))
+			next_pos = 738 + init_code;
+		else if (next_pos < 0)
+			next_pos = 0;
+
+		CDBG("next_position in damping mode = %d\n", next_pos);
+		/* Writing the Values to the actuator */
+		if (next_pos == init_code)
+			next_pos = 0x00;
+
+		next_pos_msb = next_pos >> 8;
+		next_pos_lsb = next_pos & 0x00FF;
+
+		rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			0x3131, next_pos_msb);
+		if (rc < 0)
+			break;
+
+		rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr,
+			0x3132, next_pos_lsb);
+		if (rc < 0)
+			break;
+
+		pos_offset = next_pos;
+		s5k3e2fx_ctrl->curr_lens_pos = pos_offset - init_code;
+		if (i < 4)
+			mdelay(3);
+	}
+
+	return rc;
+}
+
+static int s5k3e2fx_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+
+	if (copy_from_user(&cdata,
+			(void *)argp,
+			sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&s5k3e2fx_mutex);
+
+	CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		s5k3e2fx_get_pict_fps(cdata.cfg.gfps.prevfps,
+			&(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp, &cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = s5k3e2fx_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = s5k3e2fx_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = s5k3e2fx_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl = s5k3e2fx_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			s5k3e2fx_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = s5k3e2fx_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc =
+			s5k3e2fx_write_exp_gain(cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__);
+		rc =
+			s5k3e2fx_set_pict_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc =
+			s5k3e2fx_set_sensor_mode(
+			cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = s5k3e2fx_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		rc =
+			s5k3e2fx_move_focus(
+			cdata.cfg.focus.dir,
+			cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc =
+			s5k3e2fx_set_default_focus();
+		break;
+
+	case CFG_GET_AF_MAX_STEPS:
+	case CFG_SET_EFFECT:
+	case CFG_SET_LENS_SHADING:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&s5k3e2fx_mutex);
+	return rc;
+}
+
+static int s5k3e2fx_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+
+	rc = i2c_add_driver(&s5k3e2fx_i2c_driver);
+	if (rc < 0 || s5k3e2fx_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+
+	msm_camio_clk_rate_set(24000000);
+	mdelay(20);
+
+	rc = s5k3e2fx_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+
+	s->s_init = s5k3e2fx_sensor_open_init;
+	s->s_release = s5k3e2fx_sensor_release;
+	s->s_config  = s5k3e2fx_sensor_config;
+	s->s_mount_angle  = 0;
+	s5k3e2fx_probe_init_done(info);
+
+	return rc;
+
+probe_fail:
+	CDBG("SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __s5k3e2fx_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, s5k3e2fx_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __s5k3e2fx_probe,
+	.driver = {
+		.name = "msm_camera_s5k3e2fx",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s5k3e2fx_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(s5k3e2fx_init);
+
diff --git a/drivers/media/video/msm/s5k3e2fx.h b/drivers/media/video/msm/s5k3e2fx.h
new file mode 100644
index 0000000..cf3f881
--- /dev/null
+++ b/drivers/media/video/msm/s5k3e2fx.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef CAMSENSOR_S5K3E2FX
+#define CAMSENSOR_S5K3E2FX
+
+#include <mach/board.h>
+#endif /* CAMSENSOR_S5K3E2FX */
diff --git a/drivers/media/video/msm/s5k4e1.c b/drivers/media/video/msm/s5k4e1.c
new file mode 100644
index 0000000..9cdd44c
--- /dev/null
+++ b/drivers/media/video/msm/s5k4e1.c
@@ -0,0 +1,1086 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/bitops.h>
+#include <mach/camera.h>
+#include <media/msm_camera.h>
+#include "s5k4e1.h"
+
+/* 16bit address - 8 bit context register structure */
+#define Q8	0x00000100
+#define Q10	0x00000400
+
+/* MCLK */
+#define S5K4E1_MASTER_CLK_RATE 24000000
+
+/* AF Total steps parameters */
+#define S5K4E1_TOTAL_STEPS_NEAR_TO_FAR	32
+
+#define S5K4E1_REG_PREV_FRAME_LEN_1	31
+#define S5K4E1_REG_PREV_FRAME_LEN_2	32
+#define S5K4E1_REG_PREV_LINE_LEN_1	33
+#define S5K4E1_REG_PREV_LINE_LEN_2	34
+
+#define S5K4E1_REG_SNAP_FRAME_LEN_1	15
+#define S5K4E1_REG_SNAP_FRAME_LEN_2	16
+#define  S5K4E1_REG_SNAP_LINE_LEN_1	17
+#define S5K4E1_REG_SNAP_LINE_LEN_2	18
+#define MSB                             1
+#define LSB                             0
+
+struct s5k4e1_work_t {
+	struct work_struct work;
+};
+
+static struct s5k4e1_work_t *s5k4e1_sensorw;
+static struct s5k4e1_work_t *s5k4e1_af_sensorw;
+static struct i2c_client *s5k4e1_af_client;
+static struct i2c_client *s5k4e1_client;
+
+struct s5k4e1_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	uint16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum s5k4e1_resolution_t prev_res;
+	enum s5k4e1_resolution_t pict_res;
+	enum s5k4e1_resolution_t curr_res;
+	enum s5k4e1_test_mode_t  set_test;
+};
+
+static bool CSI_CONFIG;
+static struct s5k4e1_ctrl_t *s5k4e1_ctrl;
+
+static DECLARE_WAIT_QUEUE_HEAD(s5k4e1_wait_queue);
+static DECLARE_WAIT_QUEUE_HEAD(s5k4e1_af_wait_queue);
+DEFINE_MUTEX(s5k4e1_mut);
+
+static uint16_t prev_line_length_pck;
+static uint16_t prev_frame_length_lines;
+static uint16_t snap_line_length_pck;
+static uint16_t snap_frame_length_lines;
+
+static int s5k4e1_i2c_rxdata(unsigned short saddr,
+		unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 1,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 1,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(s5k4e1_client->adapter, msgs, 2) < 0) {
+		CDBG("s5k4e1_i2c_rxdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int32_t s5k4e1_i2c_txdata(unsigned short saddr,
+		unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+	if (i2c_transfer(s5k4e1_client->adapter, msg, 1) < 0) {
+		CDBG("s5k4e1_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t s5k4e1_i2c_read(unsigned short raddr,
+		unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = s5k4e1_i2c_rxdata(s5k4e1_client->addr, buf, rlen);
+	if (rc < 0) {
+		CDBG("s5k4e1_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	CDBG("s5k4e1_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
+
+	return rc;
+}
+
+static int32_t s5k4e1_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = s5k4e1_i2c_txdata(s5k4e1_client->addr, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+				waddr, bdata);
+	}
+	return rc;
+}
+
+static int32_t s5k4e1_i2c_write_b_table(struct s5k4e1_i2c_reg_conf const
+		*reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+
+	for (i = 0; i < num; i++) {
+		rc = s5k4e1_i2c_write_b_sensor(reg_conf_tbl->waddr,
+				reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static int32_t s5k4e1_af_i2c_txdata(unsigned short saddr,
+		unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+	if (i2c_transfer(s5k4e1_af_client->adapter, msg, 1) < 0) {
+		pr_err("s5k4e1_af_i2c_txdata faild 0x%x\n", saddr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t s5k4e1_af_i2c_write_b_sensor(uint8_t waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = waddr;
+	buf[1] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = s5k4e1_af_i2c_txdata(s5k4e1_af_client->addr << 1, buf, 2);
+	if (rc < 0) {
+		pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+				waddr, bdata);
+	}
+	return rc;
+}
+
+static void s5k4e1_start_stream(void)
+{
+	s5k4e1_i2c_write_b_sensor(0x0100, 0x01);/* streaming on */
+}
+
+static void s5k4e1_stop_stream(void)
+{
+	s5k4e1_i2c_write_b_sensor(0x0100, 0x00);/* streaming off */
+}
+
+static void s5k4e1_group_hold_on(void)
+{
+	s5k4e1_i2c_write_b_sensor(0x0104, 0x01);
+}
+
+static void s5k4e1_group_hold_off(void)
+{
+	s5k4e1_i2c_write_b_sensor(0x0104, 0x0);
+}
+
+static void s5k4e1_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider, d1, d2;
+
+	d1 = (prev_frame_length_lines * 0x00000400) / snap_frame_length_lines;
+	d2 = (prev_line_length_pck * 0x00000400) / snap_line_length_pck;
+	divider = (d1 * d2) / 0x400;
+
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+}
+
+static uint16_t s5k4e1_get_prev_lines_pf(void)
+{
+	if (s5k4e1_ctrl->prev_res == QTR_SIZE)
+		return prev_frame_length_lines;
+	else
+		return snap_frame_length_lines;
+}
+
+static uint16_t s5k4e1_get_prev_pixels_pl(void)
+{
+	if (s5k4e1_ctrl->prev_res == QTR_SIZE)
+		return prev_line_length_pck;
+	else
+		return snap_line_length_pck;
+}
+
+static uint16_t s5k4e1_get_pict_lines_pf(void)
+{
+	if (s5k4e1_ctrl->pict_res == QTR_SIZE)
+		return prev_frame_length_lines;
+	else
+		return snap_frame_length_lines;
+}
+
+static uint16_t s5k4e1_get_pict_pixels_pl(void)
+{
+	if (s5k4e1_ctrl->pict_res == QTR_SIZE)
+		return prev_line_length_pck;
+	else
+		return snap_line_length_pck;
+}
+
+static uint32_t s5k4e1_get_pict_max_exp_lc(void)
+{
+	return snap_frame_length_lines * 24;
+}
+
+static int32_t s5k4e1_set_fps(struct fps_cfg   *fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+
+	s5k4e1_ctrl->fps_divider = fps->fps_div;
+	s5k4e1_ctrl->pict_fps_divider = fps->pict_fps_div;
+
+	if (s5k4e1_ctrl->sensormode == SENSOR_PREVIEW_MODE) {
+		total_lines_per_frame = (uint16_t)
+		((prev_frame_length_lines * s5k4e1_ctrl->fps_divider) / 0x400);
+	} else {
+		total_lines_per_frame = (uint16_t)
+		((snap_frame_length_lines * s5k4e1_ctrl->fps_divider) / 0x400);
+	}
+
+	s5k4e1_group_hold_on();
+	rc = s5k4e1_i2c_write_b_sensor(0x0340,
+			((total_lines_per_frame & 0xFF00) >> 8));
+	rc = s5k4e1_i2c_write_b_sensor(0x0341,
+			(total_lines_per_frame & 0x00FF));
+	s5k4e1_group_hold_off();
+
+	return rc;
+}
+
+static inline uint8_t s5k4e1_byte(uint16_t word, uint8_t offset)
+{
+	return word >> (offset * BITS_PER_BYTE);
+}
+
+static int32_t s5k4e1_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x0200;
+	int32_t rc = 0;
+	static uint32_t fl_lines;
+
+	if (gain > max_legal_gain) {
+		pr_debug("Max legal gain Line:%d\n", __LINE__);
+		gain = max_legal_gain;
+	}
+	/* Analogue Gain */
+	s5k4e1_i2c_write_b_sensor(0x0204, s5k4e1_byte(gain, MSB));
+	s5k4e1_i2c_write_b_sensor(0x0205, s5k4e1_byte(gain, LSB));
+
+	if (line > (prev_frame_length_lines - 4)) {
+		fl_lines = line+4;
+		s5k4e1_group_hold_on();
+		s5k4e1_i2c_write_b_sensor(0x0340, s5k4e1_byte(fl_lines, MSB));
+		s5k4e1_i2c_write_b_sensor(0x0341, s5k4e1_byte(fl_lines, LSB));
+		/* Coarse Integration Time */
+		s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB));
+		s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB));
+		s5k4e1_group_hold_off();
+	} else if (line < (fl_lines - 4)) {
+		fl_lines = line+4;
+		if (fl_lines < prev_frame_length_lines)
+			fl_lines = prev_frame_length_lines;
+
+		s5k4e1_group_hold_on();
+		/* Coarse Integration Time */
+		s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB));
+		s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB));
+		s5k4e1_i2c_write_b_sensor(0x0340, s5k4e1_byte(fl_lines, MSB));
+		s5k4e1_i2c_write_b_sensor(0x0341, s5k4e1_byte(fl_lines, LSB));
+		s5k4e1_group_hold_off();
+	} else {
+		fl_lines = line+4;
+		s5k4e1_group_hold_on();
+		/* Coarse Integration Time */
+		s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB));
+		s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB));
+		s5k4e1_group_hold_off();
+	}
+	return rc;
+}
+
+static int32_t s5k4e1_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t max_legal_gain = 0x0200;
+	uint16_t min_ll_pck = 0x0AB2;
+	uint32_t ll_pck, fl_lines;
+	uint32_t ll_ratio;
+	int32_t rc = 0;
+	uint8_t gain_msb, gain_lsb;
+	uint8_t intg_time_msb, intg_time_lsb;
+	uint8_t ll_pck_msb, ll_pck_lsb;
+
+	if (gain > max_legal_gain) {
+		pr_debug("Max legal gain Line:%d\n", __LINE__);
+		gain = max_legal_gain;
+	}
+
+	pr_debug("s5k4e1_write_exp_gain : gain = %d line = %d\n", gain, line);
+	line = (uint32_t) (line * s5k4e1_ctrl->pict_fps_divider);
+	fl_lines = snap_frame_length_lines;
+	ll_pck = snap_line_length_pck;
+
+	if (fl_lines < (line / 0x400))
+		ll_ratio = (line / (fl_lines - 4));
+	else
+		ll_ratio = 0x400;
+
+	ll_pck = ll_pck * ll_ratio / 0x400;
+	line = line / ll_ratio;
+	if (ll_pck < min_ll_pck)
+		ll_pck = min_ll_pck;
+
+	gain_msb = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lsb = (uint8_t) (gain & 0x00FF);
+
+	intg_time_msb = (uint8_t) ((line & 0xFF00) >> 8);
+	intg_time_lsb = (uint8_t) (line & 0x00FF);
+
+	ll_pck_msb = (uint8_t) ((ll_pck & 0xFF00) >> 8);
+	ll_pck_lsb = (uint8_t) (ll_pck & 0x00FF);
+
+	s5k4e1_group_hold_on();
+	s5k4e1_i2c_write_b_sensor(0x0204, gain_msb); /* Analogue Gain */
+	s5k4e1_i2c_write_b_sensor(0x0205, gain_lsb);
+
+	s5k4e1_i2c_write_b_sensor(0x0342, ll_pck_msb);
+	s5k4e1_i2c_write_b_sensor(0x0343, ll_pck_lsb);
+
+	/* Coarse Integration Time */
+	s5k4e1_i2c_write_b_sensor(0x0202, intg_time_msb);
+	s5k4e1_i2c_write_b_sensor(0x0203, intg_time_lsb);
+	s5k4e1_group_hold_off();
+
+	return rc;
+}
+
+static int32_t s5k4e1_move_focus(int direction,
+		int32_t num_steps)
+{
+	int16_t step_direction, actual_step, next_position;
+	uint8_t code_val_msb, code_val_lsb;
+
+	if (direction == MOVE_NEAR)
+		step_direction = 16;
+	else
+		step_direction = -16;
+
+	actual_step = (int16_t) (step_direction * num_steps);
+	next_position = (int16_t) (s5k4e1_ctrl->curr_lens_pos + actual_step);
+
+	if (next_position > 1023)
+		next_position = 1023;
+	else if (next_position < 0)
+		next_position = 0;
+
+	code_val_msb = next_position >> 4;
+	code_val_lsb = (next_position & 0x000F) << 4;
+
+	if (s5k4e1_af_i2c_write_b_sensor(code_val_msb, code_val_lsb) < 0) {
+		pr_err("move_focus failed at line %d ...\n", __LINE__);
+		return -EBUSY;
+	}
+
+	s5k4e1_ctrl->curr_lens_pos = next_position;
+	return 0;
+}
+
+static int32_t s5k4e1_set_default_focus(uint8_t af_step)
+{
+	int32_t rc = 0;
+
+	if (s5k4e1_ctrl->curr_step_pos != 0) {
+		rc = s5k4e1_move_focus(MOVE_FAR,
+				s5k4e1_ctrl->curr_step_pos);
+	} else {
+		s5k4e1_af_i2c_write_b_sensor(0x00, 0x00);
+	}
+
+	s5k4e1_ctrl->curr_lens_pos = 0;
+	s5k4e1_ctrl->curr_step_pos = 0;
+
+	return rc;
+}
+
+static int32_t s5k4e1_test(enum s5k4e1_test_mode_t mo)
+{
+	int32_t rc = 0;
+
+	if (mo != TEST_OFF)
+		rc = s5k4e1_i2c_write_b_sensor(0x0601, (uint8_t) mo);
+
+	return rc;
+}
+
+static void s5k4e1_reset_sensor(void)
+{
+	s5k4e1_i2c_write_b_sensor(0x103, 0x1);
+}
+
+static int32_t s5k4e1_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	struct msm_camera_csi_params s5k4e1_csi_params;
+
+	s5k4e1_stop_stream();
+	msleep(30);
+
+	if (update_type == REG_INIT) {
+		s5k4e1_reset_sensor();
+		s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_mipi,
+				s5k4e1_regs.reg_mipi_size);
+		s5k4e1_i2c_write_b_table(s5k4e1_regs.rec_settings,
+				s5k4e1_regs.rec_size);
+		s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_pll_p,
+				s5k4e1_regs.reg_pll_p_size);
+		CSI_CONFIG = 0;
+	} else if (update_type == UPDATE_PERIODIC) {
+		if (rt == RES_PREVIEW)
+			s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_prev,
+					s5k4e1_regs.reg_prev_size);
+		else
+			s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_snap,
+					s5k4e1_regs.reg_snap_size);
+		msleep(20);
+		if (!CSI_CONFIG) {
+			msm_camio_vfe_clk_rate_set(192000000);
+			s5k4e1_csi_params.data_format = CSI_10BIT;
+			s5k4e1_csi_params.lane_cnt = 1;
+			s5k4e1_csi_params.lane_assign = 0xe4;
+			s5k4e1_csi_params.dpcm_scheme = 0;
+			s5k4e1_csi_params.settle_cnt = 24;
+			rc = msm_camio_csi_config(&s5k4e1_csi_params);
+			msleep(20);
+			CSI_CONFIG = 1;
+		}
+		s5k4e1_start_stream();
+		msleep(30);
+	}
+	return rc;
+}
+
+static int32_t s5k4e1_video_config(int mode)
+{
+
+	int32_t rc = 0;
+	int rt;
+	CDBG("video config\n");
+	/* change sensor resolution if needed */
+	if (s5k4e1_ctrl->prev_res == QTR_SIZE)
+		rt = RES_PREVIEW;
+	else
+		rt = RES_CAPTURE;
+	if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	if (s5k4e1_ctrl->set_test) {
+		if (s5k4e1_test(s5k4e1_ctrl->set_test) < 0)
+			return  rc;
+	}
+
+	s5k4e1_ctrl->curr_res = s5k4e1_ctrl->prev_res;
+	s5k4e1_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t s5k4e1_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+
+	/*change sensor resolution if needed */
+	if (s5k4e1_ctrl->curr_res != s5k4e1_ctrl->pict_res) {
+		if (s5k4e1_ctrl->pict_res == QTR_SIZE)
+			rt = RES_PREVIEW;
+		else
+			rt = RES_CAPTURE;
+		if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+
+	s5k4e1_ctrl->curr_res = s5k4e1_ctrl->pict_res;
+	s5k4e1_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t s5k4e1_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+
+	/* change sensor resolution if needed */
+	if (s5k4e1_ctrl->curr_res != s5k4e1_ctrl->pict_res) {
+		if (s5k4e1_ctrl->pict_res == QTR_SIZE)
+			rt = RES_PREVIEW;
+		else
+			rt = RES_CAPTURE;
+		if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+
+	s5k4e1_ctrl->curr_res = s5k4e1_ctrl->pict_res;
+	s5k4e1_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t s5k4e1_set_sensor_mode(int mode,
+		int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = s5k4e1_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = s5k4e1_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = s5k4e1_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t s5k4e1_power_down(void)
+{
+	s5k4e1_stop_stream();
+	return 0;
+}
+
+static int s5k4e1_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	CDBG("probe done\n");
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int s5k4e1_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	uint16_t regaddress1 = 0x0000;
+	uint16_t regaddress2 = 0x0001;
+	uint16_t chipid1 = 0;
+	uint16_t chipid2 = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG(" s5k4e1_probe_init_sensor is called\n");
+
+	rc = gpio_request(data->sensor_reset, "s5k4e1");
+	CDBG(" s5k4e1_probe_init_sensor\n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(50);
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+		msleep(20);
+	} else {
+		goto init_probe_done;
+	}
+	msleep(20);
+
+	s5k4e1_i2c_read(regaddress1, &chipid1, 1);
+	if (chipid1 != 0x4E) {
+		rc = -ENODEV;
+		CDBG("s5k4e1_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+
+	s5k4e1_i2c_read(regaddress2, &chipid2 , 1);
+	if (chipid2 != 0x10) {
+		rc = -ENODEV;
+		CDBG("s5k4e1_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+
+	CDBG("ID: %d\n", chipid1);
+	CDBG("ID: %d\n", chipid1);
+
+
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" s5k4e1_probe_init_sensor fails\n");
+	s5k4e1_probe_init_done(data);
+init_probe_done:
+	CDBG(" s5k4e1_probe_init_sensor finishes\n");
+	return rc;
+}
+
+int s5k4e1_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling s5k4e1_sensor_open_init\n");
+
+	s5k4e1_ctrl = kzalloc(sizeof(struct s5k4e1_ctrl_t), GFP_KERNEL);
+	if (!s5k4e1_ctrl) {
+		CDBG("s5k4e1_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	s5k4e1_ctrl->fps_divider = 1 * 0x00000400;
+	s5k4e1_ctrl->pict_fps_divider = 1 * 0x00000400;
+	s5k4e1_ctrl->set_test = TEST_OFF;
+	s5k4e1_ctrl->prev_res = QTR_SIZE;
+	s5k4e1_ctrl->pict_res = FULL_SIZE;
+
+	if (data)
+		s5k4e1_ctrl->sensordata = data;
+
+	prev_frame_length_lines =
+	((s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_FRAME_LEN_1].wdata << 8) |
+		s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_FRAME_LEN_2].wdata);
+
+	prev_line_length_pck =
+	(s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_LINE_LEN_1].wdata << 8) |
+		s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_LINE_LEN_2].wdata;
+
+	snap_frame_length_lines =
+	(s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_FRAME_LEN_1].wdata << 8) |
+		s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_FRAME_LEN_2].wdata;
+
+	snap_line_length_pck =
+	(s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_LINE_LEN_1].wdata << 8) |
+		s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_LINE_LEN_1].wdata;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(S5K4E1_MASTER_CLK_RATE);
+	rc = s5k4e1_probe_init_sensor(data);
+	if (rc < 0)
+		goto init_fail;
+
+	CDBG("init settings\n");
+	if (s5k4e1_ctrl->prev_res == QTR_SIZE)
+		rc = s5k4e1_sensor_setting(REG_INIT, RES_PREVIEW);
+	else
+		rc = s5k4e1_sensor_setting(REG_INIT, RES_CAPTURE);
+	s5k4e1_ctrl->fps = 30 * Q8;
+
+	/* enable AF actuator */
+	if (s5k4e1_ctrl->sensordata->vcm_enable) {
+		CDBG("enable AF actuator, gpio = %d\n",
+			 s5k4e1_ctrl->sensordata->vcm_pwd);
+		rc = gpio_request(s5k4e1_ctrl->sensordata->vcm_pwd,
+						"s5k4e1_af");
+		if (!rc)
+			gpio_direction_output(
+				s5k4e1_ctrl->sensordata->vcm_pwd,
+				 1);
+		else {
+			pr_err("s5k4e1_ctrl gpio request failed!\n");
+			goto init_fail;
+		}
+		msleep(20);
+		rc = s5k4e1_set_default_focus(0);
+		if (rc < 0) {
+			gpio_direction_output(s5k4e1_ctrl->sensordata->vcm_pwd,
+								0);
+			gpio_free(s5k4e1_ctrl->sensordata->vcm_pwd);
+		}
+	}
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+init_fail:
+	CDBG("init_fail\n");
+	s5k4e1_probe_init_done(data);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+}
+
+static int s5k4e1_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&s5k4e1_wait_queue);
+	return 0;
+}
+
+static int s5k4e1_af_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&s5k4e1_af_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id s5k4e1_af_i2c_id[] = {
+	{"s5k4e1_af", 0},
+	{ }
+};
+
+static int s5k4e1_af_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("s5k4e1_af_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	s5k4e1_af_sensorw = kzalloc(sizeof(struct s5k4e1_work_t), GFP_KERNEL);
+	if (!s5k4e1_af_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, s5k4e1_af_sensorw);
+	s5k4e1_af_init_client(client);
+	s5k4e1_af_client = client;
+
+	msleep(50);
+
+	CDBG("s5k4e1_af_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("s5k4e1_af_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static const struct i2c_device_id s5k4e1_i2c_id[] = {
+	{"s5k4e1", 0},
+	{ }
+};
+
+static int s5k4e1_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("s5k4e1_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	s5k4e1_sensorw = kzalloc(sizeof(struct s5k4e1_work_t), GFP_KERNEL);
+	if (!s5k4e1_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, s5k4e1_sensorw);
+	s5k4e1_init_client(client);
+	s5k4e1_client = client;
+
+	msleep(50);
+
+	CDBG("s5k4e1_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("s5k4e1_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int __devexit s5k4e1_remove(struct i2c_client *client)
+{
+	struct s5k4e1_work_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	s5k4e1_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static int __devexit s5k4e1_af_remove(struct i2c_client *client)
+{
+	struct s5k4e1_work_t *s5k4e1_af = i2c_get_clientdata(client);
+	free_irq(client->irq, s5k4e1_af);
+	s5k4e1_af_client = NULL;
+	kfree(s5k4e1_af);
+	return 0;
+}
+
+static struct i2c_driver s5k4e1_i2c_driver = {
+	.id_table = s5k4e1_i2c_id,
+	.probe  = s5k4e1_i2c_probe,
+	.remove = __exit_p(s5k4e1_i2c_remove),
+	.driver = {
+		.name = "s5k4e1",
+	},
+};
+
+static struct i2c_driver s5k4e1_af_i2c_driver = {
+	.id_table = s5k4e1_af_i2c_id,
+	.probe  = s5k4e1_af_i2c_probe,
+	.remove = __exit_p(s5k4e1_af_i2c_remove),
+	.driver = {
+		.name = "s5k4e1_af",
+	},
+};
+
+int s5k4e1_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+				(void *)argp,
+				sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&s5k4e1_mut);
+	CDBG("s5k4e1_sensor_config: cfgtype = %d\n",
+			cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		s5k4e1_get_pict_fps(
+			cdata.cfg.gfps.prevfps,
+			&(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf =
+			s5k4e1_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl =
+			s5k4e1_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf =
+			s5k4e1_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			s5k4e1_get_pict_pixels_pl();
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			s5k4e1_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = s5k4e1_set_fps(&(cdata.cfg.fps));
+		break;
+	case CFG_SET_EXP_GAIN:
+		rc = s5k4e1_write_exp_gain(cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc = s5k4e1_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_MODE:
+		rc = s5k4e1_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+	case CFG_PWR_DOWN:
+		rc = s5k4e1_power_down();
+		break;
+	case CFG_MOVE_FOCUS:
+		rc = s5k4e1_move_focus(cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+		break;
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = s5k4e1_set_default_focus(cdata.cfg.focus.steps);
+		break;
+	case CFG_GET_AF_MAX_STEPS:
+		cdata.max_steps = S5K4E1_TOTAL_STEPS_NEAR_TO_FAR;
+		if (copy_to_user((void *)argp,
+					&cdata,
+				sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+	case CFG_SET_EFFECT:
+		rc = s5k4e1_set_default_focus(cdata.cfg.effect);
+		break;
+	default:
+		rc = -EFAULT;
+		break;
+	}
+	mutex_unlock(&s5k4e1_mut);
+
+	return rc;
+}
+
+static int s5k4e1_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&s5k4e1_mut);
+	s5k4e1_power_down();
+	msleep(20);
+	gpio_set_value_cansleep(s5k4e1_ctrl->sensordata->sensor_reset, 0);
+	usleep_range(5000, 5100);
+	gpio_free(s5k4e1_ctrl->sensordata->sensor_reset);
+	if (s5k4e1_ctrl->sensordata->vcm_enable) {
+		gpio_direction_output(s5k4e1_ctrl->sensordata->vcm_pwd, 0);
+		gpio_free(s5k4e1_ctrl->sensordata->vcm_pwd);
+	}
+	kfree(s5k4e1_ctrl);
+	s5k4e1_ctrl = NULL;
+	CDBG("s5k4e1_release completed\n");
+	mutex_unlock(&s5k4e1_mut);
+
+	return rc;
+}
+
+static int s5k4e1_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+
+	rc = i2c_add_driver(&s5k4e1_i2c_driver);
+	if (rc < 0 || s5k4e1_client == NULL) {
+		rc = -ENOTSUPP;
+		CDBG("I2C add driver failed");
+		goto probe_fail_1;
+	}
+
+	rc = i2c_add_driver(&s5k4e1_af_i2c_driver);
+	if (rc < 0 || s5k4e1_af_client == NULL) {
+		rc = -ENOTSUPP;
+		CDBG("I2C add driver failed");
+		goto probe_fail_2;
+	}
+
+	msm_camio_clk_rate_set(S5K4E1_MASTER_CLK_RATE);
+
+	rc = s5k4e1_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail_3;
+
+	s->s_init = s5k4e1_sensor_open_init;
+	s->s_release = s5k4e1_sensor_release;
+	s->s_config  = s5k4e1_sensor_config;
+	s->s_mount_angle = info->sensor_platform_info->mount_angle;
+	gpio_set_value_cansleep(info->sensor_reset, 0);
+	s5k4e1_probe_init_done(info);
+
+	return rc;
+
+probe_fail_3:
+	i2c_del_driver(&s5k4e1_af_i2c_driver);
+probe_fail_2:
+	i2c_del_driver(&s5k4e1_i2c_driver);
+probe_fail_1:
+	CDBG("s5k4e1_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __devinit s5k4e1_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, s5k4e1_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = s5k4e1_probe,
+	.driver = {
+		.name = "msm_camera_s5k4e1",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s5k4e1_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(s5k4e1_init);
+MODULE_DESCRIPTION("Samsung 5 MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/s5k4e1.h b/drivers/media/video/msm/s5k4e1.h
new file mode 100644
index 0000000..7f60332
--- /dev/null
+++ b/drivers/media/video/msm/s5k4e1.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef S5K4E1_H
+#define S5K4E1_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct s5k4e1_reg s5k4e1_regs;
+
+struct s5k4e1_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+enum s5k4e1_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum s5k4e1_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+enum s5k4e1_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum s5k4e1_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum s5k4e1_reg_pll {
+	E013_VT_PIX_CLK_DIV,
+	E013_VT_SYS_CLK_DIV,
+	E013_PRE_PLL_CLK_DIV,
+	E013_PLL_MULTIPLIER,
+	E013_OP_PIX_CLK_DIV,
+	E013_OP_SYS_CLK_DIV
+};
+
+enum s5k4e1_reg_mode {
+	E013_X_ADDR_START,
+	E013_X_ADDR_END,
+	E013_Y_ADDR_START,
+	E013_Y_ADDR_END,
+	E013_X_OUTPUT_SIZE,
+	E013_Y_OUTPUT_SIZE,
+	E013_DATAPATH_SELECT,
+	E013_READ_MODE,
+	E013_ANALOG_CONTROL5,
+	E013_DAC_LD_4_5,
+	E013_SCALING_MODE,
+	E013_SCALE_M,
+	E013_LINE_LENGTH_PCK,
+	E013_FRAME_LENGTH_LINES,
+	E013_COARSE_INTEGRATION_TIME,
+	E013_FINE_INTEGRATION_TIME,
+	E013_FINE_CORRECTION
+};
+
+struct s5k4e1_reg {
+	const struct s5k4e1_i2c_reg_conf *reg_mipi;
+	const unsigned short reg_mipi_size;
+	const struct s5k4e1_i2c_reg_conf *rec_settings;
+	const unsigned short rec_size;
+	const struct s5k4e1_i2c_reg_conf *reg_pll_p;
+	const unsigned short reg_pll_p_size;
+	const struct s5k4e1_i2c_reg_conf *reg_pll_s;
+	const unsigned short reg_pll_s_size;
+	const struct s5k4e1_i2c_reg_conf *reg_prev;
+	const unsigned short reg_prev_size;
+	const struct s5k4e1_i2c_reg_conf *reg_snap;
+	const unsigned short reg_snap_size;
+};
+#endif /* S5K4E1_H */
diff --git a/drivers/media/video/msm/s5k4e1_reg.c b/drivers/media/video/msm/s5k4e1_reg.c
new file mode 100644
index 0000000..59bb1c8
--- /dev/null
+++ b/drivers/media/video/msm/s5k4e1_reg.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include "s5k4e1.h"
+
+struct s5k4e1_i2c_reg_conf s5k4e1_mipi_settings[] = {
+	{0x30BD, 0x00},/* SEL_CCP[0] */
+	{0x3084, 0x15},/* SYNC Mode */
+	{0x30BE, 0x1A},/* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */
+	{0x30C1, 0x01},/* pack video enable [0] */
+	{0x30EE, 0x02},/* DPHY enable [ 1] */
+	{0x3111, 0x86},/* Embedded data off [5] */
+};
+
+/* PLL Configuration */
+struct s5k4e1_i2c_reg_conf s5k4e1_pll_preview_settings[] = {
+	{0x0305, 0x04},
+	{0x0306, 0x00},
+	{0x0307, 0x44},
+	{0x30B5, 0x00},
+	{0x30E2, 0x01},/* num lanes[1:0] = 2 */
+	{0x30F1, 0xB0},
+};
+
+struct s5k4e1_i2c_reg_conf s5k4e1_pll_snap_settings[] = {
+	{0x0305, 0x04},
+	{0x0306, 0x00},
+	{0x0307, 0x44},
+	{0x30B5, 0x00},
+	{0x30E2, 0x01},/* num lanes[1:0] = 2 */
+	{0x30F1, 0xB0},
+};
+
+struct s5k4e1_i2c_reg_conf s5k4e1_prev_settings[] = {
+	/* output size (1304 x 980) */
+	{0x30A9, 0x02},/* Horizontal Binning On */
+	{0x300E, 0xEB},/* Vertical Binning On */
+	{0x0387, 0x03},/* y_odd_inc 03(10b AVG) */
+	{0x0344, 0x00},/* x_addr_start 0 */
+	{0x0345, 0x00},
+	{0x0348, 0x0A},/* x_addr_end 2607 */
+	{0x0349, 0x2F},
+	{0x0346, 0x00},/* y_addr_start 0 */
+	{0x0347, 0x00},
+	{0x034A, 0x07},/* y_addr_end 1959 */
+	{0x034B, 0xA7},
+	{0x0380, 0x00},/* x_even_inc 1 */
+	{0x0381, 0x01},
+	{0x0382, 0x00},/* x_odd_inc 1 */
+	{0x0383, 0x01},
+	{0x0384, 0x00},/* y_even_inc 1 */
+	{0x0385, 0x01},
+	{0x0386, 0x00},/* y_odd_inc 3 */
+	{0x0387, 0x03},
+	{0x034C, 0x05},/* x_output_size 1304 */
+	{0x034D, 0x18},
+	{0x034E, 0x03},/* y_output_size 980 */
+	{0x034F, 0xd4},
+	{0x30BF, 0xAB},/* outif_enable[7], data_type[5:0](2Bh = bayer 10bit} */
+	{0x30C0, 0xA0},/* video_offset[7:4] 3260%12 */
+	{0x30C8, 0x06},/* video_data_length 1600 = 1304 * 1.25 */
+	{0x30C9, 0x5E},
+	/* Timing Configuration */
+	{0x0202, 0x03},
+	{0x0203, 0x14},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+	{0x0340, 0x03},/* Frame Length */
+	{0x0341, 0xE0},
+	{0x0342, 0x0A},/* 2738  Line Length */
+	{0x0343, 0xB2},
+};
+
+struct s5k4e1_i2c_reg_conf s5k4e1_snap_settings[] = {
+	/*Output Size (2608x1960)*/
+	{0x30A9, 0x03},/* Horizontal Binning Off */
+	{0x300E, 0xE8},/* Vertical Binning Off */
+	{0x0387, 0x01},/* y_odd_inc */
+	{0x034C, 0x0A},/* x_output size */
+	{0x034D, 0x30},
+	{0x034E, 0x07},/* y_output size */
+	{0x034F, 0xA8},
+	{0x30BF, 0xAB},/* outif_enable[7], data_type[5:0](2Bh = bayer 10bit} */
+	{0x30C0, 0x80},/* video_offset[7:4] 3260%12 */
+	{0x30C8, 0x0C},/* video_data_length 3260 = 2608 * 1.25 */
+	{0x30C9, 0xBC},
+	/*Timing configuration*/
+	{0x0202, 0x06},
+	{0x0203, 0x28},
+	{0x0204, 0x00},
+	{0x0205, 0x80},
+	{0x0340, 0x07},/* Frame Length */
+	{0x0341, 0xB4},
+	{0x0342, 0x0A},/* 2738 Line Length */
+	{0x0343, 0xB2},
+};
+
+struct s5k4e1_i2c_reg_conf s5k4e1_recommend_settings[] = {
+	/*CDS timing setting ... */
+	{0x3000, 0x05},
+	{0x3001, 0x03},
+	{0x3002, 0x08},
+	{0x3003, 0x0A},
+	{0x3004, 0x50},
+	{0x3005, 0x0E},
+	{0x3006, 0x5E},
+	{0x3007, 0x00},
+	{0x3008, 0x78},
+	{0x3009, 0x78},
+	{0x300A, 0x50},
+	{0x300B, 0x08},
+	{0x300C, 0x14},
+	{0x300D, 0x00},
+	{0x300E, 0xE8},
+	{0x300F, 0x82},
+	{0x301B, 0x77},
+
+	/* CDS option setting ... */
+	{0x3010, 0x00},
+	{0x3011, 0x3A},
+	{0x3029, 0x04},
+	{0x3012, 0x30},
+	{0x3013, 0xA0},
+	{0x3014, 0x00},
+	{0x3015, 0x00},
+	{0x3016, 0x30},
+	{0x3017, 0x94},
+	{0x3018, 0x70},
+	{0x301D, 0xD4},
+	{0x3021, 0x02},
+	{0x3022, 0x24},
+	{0x3024, 0x40},
+	{0x3027, 0x08},
+
+	/* Pixel option setting ...   */
+	{0x301C, 0x04},
+	{0x30D8, 0x3F},
+	{0x302B, 0x01},
+
+	{0x3070, 0x5F},
+	{0x3071, 0x00},
+	{0x3080, 0x04},
+	{0x3081, 0x38},
+};
+
+struct s5k4e1_reg s5k4e1_regs = {
+	.reg_mipi = &s5k4e1_mipi_settings[0],
+	.reg_mipi_size = ARRAY_SIZE(s5k4e1_mipi_settings),
+	.rec_settings = &s5k4e1_recommend_settings[0],
+	.rec_size = ARRAY_SIZE(s5k4e1_recommend_settings),
+	.reg_pll_p = &s5k4e1_pll_preview_settings[0],
+	.reg_pll_p_size = ARRAY_SIZE(s5k4e1_pll_preview_settings),
+	.reg_pll_s = &s5k4e1_pll_snap_settings[0],
+	.reg_pll_s_size = ARRAY_SIZE(s5k4e1_pll_snap_settings),
+	.reg_prev = &s5k4e1_prev_settings[0],
+	.reg_prev_size = ARRAY_SIZE(s5k4e1_prev_settings),
+	.reg_snap = &s5k4e1_snap_settings[0],
+	.reg_snap_size = ARRAY_SIZE(s5k4e1_snap_settings),
+};
diff --git a/drivers/media/video/msm/sn12m0pz.c b/drivers/media/video/msm/sn12m0pz.c
new file mode 100644
index 0000000..affa581
--- /dev/null
+++ b/drivers/media/video/msm/sn12m0pz.c
@@ -0,0 +1,1850 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <linux/slab.h>
+#include "sn12m0pz.h"
+
+
+#define	Q8					0x00000100
+#define	REG_GROUPED_PARAMETER_HOLD		0x0104
+#define	GROUPED_PARAMETER_HOLD_OFF		0x00
+#define	GROUPED_PARAMETER_HOLD			0x01
+#define	REG_MODE_SELECT				0x0100
+#define	MODE_SELECT_STANDBY_MODE		0x00
+#define	MODE_SELECT_STREAM			0x01
+
+/* Integration Time */
+#define	REG_COARSE_INTEGRATION_TIME_MSB		0x0202
+#define	REG_COARSE_INTEGRATION_TIME_LSB		0x0203
+
+/* Gain */
+#define	REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB	0x0204
+#define	REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB	0x0205
+
+/* PLL Register Defines */
+#define	REG_PLL_MULTIPLIER			0x0307
+#define	REG_0x302B				0x302B
+
+/* MIPI Enable Settings */
+#define	REG_0x30E5				0x30E5
+#define	REG_0x3300				0x3300
+
+/* Global Setting */
+#define	REG_IMAGE_ORIENTATION			0x0101
+
+#define	REG_0x300A				0x300A
+#define	REG_0x3014				0x3014
+#define	REG_0x3015				0x3015
+#define	REG_0x3017				0x3017
+#define	REG_0x301C				0x301C
+#define	REG_0x3031				0x3031
+#define	REG_0x3040				0x3040
+#define	REG_0x3041				0x3041
+#define	REG_0x3051				0x3051
+#define	REG_0x3053				0x3053
+#define	REG_0x3055				0x3055
+#define	REG_0x3057				0x3057
+#define	REG_0x3060				0x3060
+#define	REG_0x3065				0x3065
+#define	REG_0x30AA				0x30AA
+#define	REG_0x30AB				0x30AB
+#define	REG_0x30B0				0x30B0
+#define	REG_0x30B2				0x30B2
+#define	REG_0x30D3				0x30D3
+
+#define	REG_0x3106				0x3106
+#define	REG_0x3108				0x3108
+#define	REG_0x310A				0x310A
+#define	REG_0x310C				0x310C
+#define	REG_0x310E				0x310E
+#define	REG_0x3126				0x3126
+#define	REG_0x312E				0x312E
+#define	REG_0x313C				0x313C
+#define	REG_0x313E				0x313E
+#define	REG_0x3140				0x3140
+#define	REG_0x3142				0x3142
+#define	REG_0x3144				0x3144
+#define	REG_0x3148				0x3148
+#define	REG_0x314A				0x314A
+#define	REG_0x3166				0x3166
+#define	REG_0x3168				0x3168
+#define	REG_0x316F				0x316F
+#define	REG_0x3171				0x3171
+#define	REG_0x3173				0x3173
+#define	REG_0x3175				0x3175
+#define	REG_0x3177				0x3177
+#define	REG_0x3179				0x3179
+#define	REG_0x317B				0x317B
+#define	REG_0x317D				0x317D
+#define	REG_0x317F			0x317F
+#define	REG_0x3181			0x3181
+#define	REG_0x3184			0x3184
+#define	REG_0x3185			0x3185
+#define	REG_0x3187			0x3187
+
+#define	REG_0x31A4			0x31A4
+#define	REG_0x31A6			0x31A6
+#define	REG_0x31AC			0x31AC
+#define	REG_0x31AE			0x31AE
+#define	REG_0x31B4			0x31B4
+#define	REG_0x31B6			0x31B6
+
+#define	REG_0x3254			0x3254
+#define	REG_0x3256			0x3256
+#define	REG_0x3258			0x3258
+#define	REG_0x325A			0x325A
+#define	REG_0x3260			0x3260
+#define	REG_0x3262			0x3262
+
+
+#define	REG_0x3304			0x3304
+#define	REG_0x3305			0x3305
+#define	REG_0x3306			0x3306
+#define	REG_0x3307			0x3307
+#define	REG_0x3308			0x3308
+#define	REG_0x3309			0x3309
+#define	REG_0x330A			0x330A
+#define	REG_0x330B			0x330B
+#define	REG_0x330C			0x330C
+#define	REG_0x330D			0x330D
+
+/* Mode Setting */
+#define	REG_FRAME_LENGTH_LINES_MSB	0x0340
+#define	REG_FRAME_LENGTH_LINES_LSB	0x0341
+#define	REG_LINE_LENGTH_PCK_MSB		0x0342
+#define	REG_LINE_LENGTH_PCK_LSB		0x0343
+#define	REG_X_OUTPUT_SIZE_MSB		0x034C
+#define	REG_X_OUTPUT_SIZE_LSB		0x034D
+#define	REG_Y_OUTPUT_SIZE_MSB		0x034E
+#define	REG_Y_OUTPUT_SIZE_LSB		0x034F
+#define	REG_X_EVEN_INC_LSB		0x0381
+#define	REG_X_ODD_INC_LSB		0x0383
+#define	REG_Y_EVEN_INC_LSB		0x0385
+#define	REG_Y_ODD_INC_LSB		0x0387
+#define	REG_0x3016			0x3016
+#define	REG_0x30E8			0x30E8
+#define	REG_0x3301			0x3301
+/* for 120fps support */
+#define	REG_0x0344			0x0344
+#define	REG_0x0345			0x0345
+#define	REG_0x0346			0x0346
+#define	REG_0x0347			0x0347
+#define	REG_0x0348			0x0348
+#define	REG_0x0349			0x0349
+#define	REG_0x034A			0x034A
+#define	REG_0x034B			0x034B
+
+/* Test Pattern */
+#define	REG_0x30D8			0x30D8
+#define	REG_TEST_PATTERN_MODE		0x0601
+
+/* Solid Color Test Pattern */
+#define	REG_TEST_DATA_RED_MSB		0x0603
+#define	REG_TEST_DATA_RED_LSB		0x0603
+#define	REG_TEST_DATA_GREENR_MSB	0x0604
+#define	REG_TEST_DATA_GREENR_LSB	0x0605
+#define	REG_TEST_DATA_BLUE_MSB		0x0606
+#define	REG_TEST_DATA_BLUE_LSB		0x0607
+#define	REG_TEST_DATA_GREENB_MSB	0x0608
+#define	REG_TEST_DATA_GREENB_LSB	0x0609
+#define	SN12M0PZ_AF_I2C_SLAVE_ID	0xE4
+#define	SN12M0PZ_STEPS_NEAR_TO_CLOSEST_INF	42
+#define	SN12M0PZ_TOTAL_STEPS_NEAR_TO_FAR	42
+
+
+/* TYPE DECLARATIONS */
+
+
+enum mipi_config_type {
+	IU060F_SN12M0PZ_STMIPID01,
+	IU060F_SN12M0PZ_STMIPID02
+};
+
+enum sn12m0pz_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum sn12m0pz_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE,
+	QVGA_SIZE,
+};
+
+enum sn12m0pz_setting {
+	RES_PREVIEW,
+	RES_CAPTURE,
+	RES_VIDEO_120FPS,
+};
+
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+/* 816x612, 24MHz MCLK 96MHz PCLK */
+#define	IU060F_SN12M0PZ_OFFSET			3
+/* Time in milisecs for waiting for the sensor to reset.*/
+#define	SN12M0PZ_RESET_DELAY_MSECS		66
+#define	SN12M0PZ_WIDTH				4032
+#define	SN12M0PZ_HEIGHT				3024
+#define	SN12M0PZ_FULL_SIZE_WIDTH		4032
+#define	SN12M0PZ_FULL_SIZE_HEIGHT		3024
+#define	SN12M0PZ_HRZ_FULL_BLK_PIXELS		176
+#define	SN12M0PZ_VER_FULL_BLK_LINES		50
+#define	SN12M0PZ_QTR_SIZE_WIDTH			2016
+#define	SN12M0PZ_QTR_SIZE_HEIGHT		1512
+#define	SN12M0PZ_HRZ_QTR_BLK_PIXELS		2192
+#define	SN12M0PZ_VER_QTR_BLK_LINES		26
+
+/* 120fps mode */
+#define	SN12M0PZ_QVGA_SIZE_WIDTH		4032
+#define	SN12M0PZ_QVGA_SIZE_HEIGHT		249
+#define	SN12M0PZ_HRZ_QVGA_BLK_PIXELS		176
+#define	SN12M0PZ_VER_QVGA_BLK_LINES		9
+#define	SN12M0PZ_DEFAULT_CLOCK_RATE		24000000
+
+static uint32_t IU060F_SN12M0PZ_DELAY_MSECS = 30;
+static enum mipi_config_type mipi_config = IU060F_SN12M0PZ_STMIPID02;
+/* AF Tuning Parameters */
+static int16_t enable_single_D02_lane;
+static int16_t fullsize_cropped_at_8mp;
+
+struct sn12m0pz_work_t {
+	struct work_struct work;
+};
+
+static struct sn12m0pz_work_t *sn12m0pz_sensorw;
+static struct i2c_client *sn12m0pz_client;
+
+struct sn12m0pz_ctrl_t {
+	const struct msm_camera_sensor_info *sensordata;
+	uint32_t sensormode;
+	uint32_t fps_divider;/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
+	uint16_t fps;
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+	enum sn12m0pz_resolution_t prev_res;
+	enum sn12m0pz_resolution_t pict_res;
+	enum sn12m0pz_resolution_t curr_res;
+	enum sn12m0pz_test_mode_t  set_test;
+	unsigned short imgaddr;
+};
+
+static struct sn12m0pz_ctrl_t *sn12m0pz_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(sn12m0pz_wait_queue);
+DEFINE_MUTEX(sn12m0pz_mut);
+
+
+static int sn12m0pz_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+
+	if (i2c_transfer(sn12m0pz_client->adapter, msgs, 2) < 0) {
+		CDBG("sn12m0pz_i2c_rxdata failed!");
+		return -EIO;
+	}
+
+	return 0;
+}
+static int32_t sn12m0pz_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+
+	struct i2c_msg msg[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len	 = length,
+			.buf	 = txdata,
+		},
+	};
+
+	if (i2c_transfer(sn12m0pz_client->adapter, msg, 1) < 0) {
+		CDBG("sn12m0pz_i2c_txdata faild 0x%x", sn12m0pz_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t sn12m0pz_i2c_read(unsigned short raddr,
+				unsigned short *rdata, int rlen)
+{
+	int32_t rc;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = sn12m0pz_i2c_rxdata(sn12m0pz_client->addr, buf, rlen);
+
+	if (rc < 0) {
+		CDBG("sn12m0pz_i2c_read 0x%x failed!", raddr);
+		return rc;
+	}
+
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+
+	return rc;
+}
+
+static int32_t sn12m0pz_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc;
+	unsigned char buf[3];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	udelay(90);
+	CDBG("i2c_write_b addr = %x, val = %x\n", waddr, bdata);
+	rc = sn12m0pz_i2c_txdata(sn12m0pz_client->addr, buf, 3);
+
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!",
+			waddr, bdata);
+	}
+
+	return rc;
+}
+
+static int16_t sn12m0pz_i2c_write_b_af(unsigned short saddr,
+				unsigned short baddr, unsigned short bdata)
+{
+	int16_t rc;
+	unsigned char buf[2];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = baddr;
+	buf[1] = bdata;
+	rc = sn12m0pz_i2c_txdata(saddr, buf, 2);
+
+	if (rc < 0)
+		CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!",
+			saddr, baddr, bdata);
+
+	return rc;
+}
+
+static int32_t sn12m0pz_i2c_write_byte_bridge(unsigned short saddr,
+				unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc;
+	unsigned char buf[3];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+
+	CDBG("i2c_write_b addr = %x, val = %x", waddr, bdata);
+	rc = sn12m0pz_i2c_txdata(saddr, buf, 3);
+
+	if (rc < 0)
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!",
+			waddr, bdata);
+
+	return rc;
+}
+
+static int32_t sn12m0pz_stmipid01_config(void)
+{
+	int32_t rc = 0;
+	/* Initiate I2C for D01: */
+	/* MIPI Bridge configuration */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0002, 0x19) < 0)
+		return rc; /* enable clock lane*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0003, 0x00) < 0)
+		return rc;
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0004, 0x3E) < 0)
+		return rc; /* mipi mode clock*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0005, 0x01) < 0)
+		return rc; /* enable data line*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0006, 0x0F) < 0)
+		return rc; /* mipi mode data 0x01*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0009, 0x00) < 0)
+		return rc; /* Data_Lane1_Reg1*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x000D, 0x92) < 0)
+		return rc; /* CCPRxRegisters*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x000E, 0x28) < 0)
+		return rc; /* 10 bits for pixel width input for CCP rx.*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0014, 0xC0) < 0)
+		return rc; /* no bypass, no decomp, 1Lane System,CSIstreaming*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0015, 0x48) < 0)
+		return rc; /* ModeControlRegisters-- Don't reset error flag*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0017, 0x2B) < 0)
+		return rc; /* Data_ID_Rreg*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0018, 0x2B) < 0)
+		return rc; /* Data_ID_Rreg_emb*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0019, 0x0C) < 0)
+		return rc;
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x001E, 0x0A) < 0)
+		return rc;
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x001F, 0x0A) < 0)
+		return rc;
+
+	return rc;
+}
+static int32_t sn12m0pz_stmipid02_config(void)
+{
+	int32_t rc = 0;
+
+	/* Main Camera Clock Lane 1 (CLHP1, CLKN1)*/
+	/* Enable Clock Lane 1 (CLHP1, CLKN1), 0x15 for 400MHz */
+	if (enable_single_D02_lane) {
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0002, 0x19) < 0)
+			return rc;
+		/* Main Camera Data Lane 1.1 (DATA2P1, DATA2N1) */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0009, 0x00) < 0)
+			return rc;/* Enable Data Lane 1.2 (DATA2P1, DATA2N1) */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x000A, 0x00) < 0)
+			return rc; /*CSIMode on Data Lane1.2(DATA2P1,DATA2N1)*/
+		/* Mode Control */
+		/* Enable single lane for qtr preview */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0014, 0xC0) < 0)
+			return rc; /*set 0xC0 - left justified on upper bits)*/
+		/* bit 1 set to 0 i.e. 1 lane system for qtr size preview */
+	} else {
+		if (sn12m0pz_ctrl->prev_res == QVGA_SIZE) {
+			if (sn12m0pz_i2c_write_byte_bridge(0x28>>1,
+				0x0002, 0x19) < 0)
+				return rc;
+		} else {
+			if (sn12m0pz_i2c_write_byte_bridge(0x28>>1,
+				0x0002, 0x21) < 0)
+				return rc;
+		}
+		/* Main Camera Data Lane 1.1 (DATA2P1, DATA2N1) */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0009, 0x01) < 0)
+			return rc; /* Enable Data Lane 1.2 (DATA2P1, DATA2N1) */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x000A, 0x01) < 0)
+			return rc; /* CSI Mode Data Lane1.2(DATA2P1, DATA2N1)*/
+
+		/* Mode Control */
+		/* Enable two lanes for full size preview/ snapshot */
+		if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0014, 0xC2) < 0)
+			return rc; /* No decompression, CSI dual lane */
+	}
+
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0004, 0x1E) < 0)
+		return rc;
+
+	/* Main Camera Data Lane 1.1 (DATA1P1, DATA1N1) */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0005, 0x03) < 0)
+		return rc; /* Enable Data Lane 1.1 (DATA1P1, DATA1N1) */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0006, 0x0f) < 0)
+		return rc; /* CSI Mode on Data Lane 1.1 (DATA1P1, DATA1N1) */
+
+	/* Tristated Output, continuous clock, */
+	/*polarity of clock is inverted and sync signals not inverted*/
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0015, 0x08) < 0)
+		return rc;
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0036, 0x20) < 0)
+		return rc; /* Enable compensation macro, main camera */
+
+	/* Data type: 0x2B Raw 10 */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0017, 0x2B) < 0)
+		return rc;
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0018, 0x2B) < 0)
+		return rc; /* Data type of embedded data: 0x2B Raw 10 */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x0019, 0x0C) < 0)
+		return rc; /* Data type and pixel width programmed 0x0C*/
+
+	/* Decompression Mode */
+
+	/* Pixel Width and Decompression ON/OFF */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x001E, 0x0A) < 0)
+		return rc; /* Image data not compressed: 0x0A for 10 bits */
+	if (sn12m0pz_i2c_write_byte_bridge(0x28>>1, 0x001F, 0x0A) < 0)
+		return rc; /* Embedded data not compressed: 0x0A for 10 bits */
+	return rc;
+}
+
+static int16_t sn12m0pz_af_init(void)
+{
+	int16_t rc;
+	/* Initialize waveform */
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x01, 0xA9);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x02, 0xD2);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x03, 0x0C);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x04, 0x14);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x05, 0xB6);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x06, 0x4F);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x07, 0x00);
+
+	return rc;
+}
+
+static int32_t sn12m0pz_move_focus(int direction,
+	int32_t num_steps)
+{
+	int8_t step_direction, dest_step_position, bit_mask;
+	int32_t rc = 0;
+	uint16_t sn12m0pz_l_region_code_per_step = 3;
+
+	if (num_steps == 0)
+		return rc;
+
+	if (direction == MOVE_NEAR) {
+		step_direction = 1;
+		bit_mask = 0x80;
+	} else if (direction == MOVE_FAR) {
+		step_direction = -1;
+		bit_mask = 0x00;
+	} else {
+		CDBG("sn12m0pz_move_focus: Illegal focus direction");
+		return -EINVAL;
+	}
+
+	dest_step_position = sn12m0pz_ctrl->curr_step_pos +
+		(step_direction * num_steps);
+
+	if (dest_step_position < 0)
+		dest_step_position = 0;
+	else if (dest_step_position > SN12M0PZ_TOTAL_STEPS_NEAR_TO_FAR)
+		dest_step_position = SN12M0PZ_TOTAL_STEPS_NEAR_TO_FAR;
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x00,
+		((num_steps * sn12m0pz_l_region_code_per_step) | bit_mask));
+
+	sn12m0pz_ctrl->curr_step_pos = dest_step_position;
+
+	return rc;
+}
+static int32_t sn12m0pz_set_default_focus(uint8_t af_step)
+{
+	int32_t rc;
+
+	/* Initialize to infinity */
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x00, 0x7F);
+
+	rc = sn12m0pz_i2c_write_b_af(SN12M0PZ_AF_I2C_SLAVE_ID >> 1, 0x00, 0x7F);
+
+	sn12m0pz_ctrl->curr_step_pos = 0;
+
+	return rc;
+}
+static void sn12m0pz_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint16_t preview_frame_length_lines, snapshot_frame_length_lines;
+	uint16_t preview_line_length_pck, snapshot_line_length_pck;
+	uint32_t divider, pclk_mult, d1, d2;
+
+	/* Total frame_length_lines and line_length_pck for preview */
+	CDBG("sn12m0pz_get_pict_fps prev_res %d", sn12m0pz_ctrl->prev_res);
+	if (sn12m0pz_ctrl->prev_res == QVGA_SIZE) {
+		preview_frame_length_lines = SN12M0PZ_QVGA_SIZE_HEIGHT +
+			SN12M0PZ_VER_QVGA_BLK_LINES;
+		preview_line_length_pck = SN12M0PZ_QVGA_SIZE_WIDTH +
+			SN12M0PZ_HRZ_QVGA_BLK_PIXELS;
+	} else {
+		preview_frame_length_lines = SN12M0PZ_QTR_SIZE_HEIGHT +
+			SN12M0PZ_VER_QTR_BLK_LINES;
+		preview_line_length_pck = SN12M0PZ_QTR_SIZE_WIDTH +
+			SN12M0PZ_HRZ_QTR_BLK_PIXELS;
+	}
+	/* Total frame_length_lines and line_length_pck for snapshot */
+	snapshot_frame_length_lines = SN12M0PZ_FULL_SIZE_HEIGHT
+				+ SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+	snapshot_line_length_pck = SN12M0PZ_FULL_SIZE_WIDTH
+				+ SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+	d1 = preview_frame_length_lines *
+				0x00000400 / snapshot_frame_length_lines;
+	d2 = preview_line_length_pck *
+				0x00000400/snapshot_line_length_pck;
+	divider = d1 * d2 / 0x400;
+	pclk_mult =
+		(uint32_t)
+		(sn12m0pz_regs.reg_pat[RES_CAPTURE].pll_multiplier_lsb *
+		0x400) / (uint32_t)
+		sn12m0pz_regs.reg_pat[RES_PREVIEW].pll_multiplier_lsb;
+	*pfps = (uint16_t) (((fps * divider) / 0x400 * pclk_mult) / 0x400);
+}
+
+static uint16_t sn12m0pz_get_prev_lines_pf(void)
+{
+	if (sn12m0pz_ctrl->prev_res == QTR_SIZE)
+		return SN12M0PZ_QTR_SIZE_HEIGHT +
+			SN12M0PZ_VER_QTR_BLK_LINES;
+	else if (sn12m0pz_ctrl->prev_res == QVGA_SIZE)
+		return SN12M0PZ_QVGA_SIZE_HEIGHT +
+			SN12M0PZ_VER_QVGA_BLK_LINES;
+
+	else
+		return SN12M0PZ_FULL_SIZE_HEIGHT +
+			SN12M0PZ_VER_FULL_BLK_LINES;
+}
+
+static uint16_t sn12m0pz_get_prev_pixels_pl(void)
+{
+	if (sn12m0pz_ctrl->prev_res == QTR_SIZE)
+		return SN12M0PZ_QTR_SIZE_WIDTH +
+			SN12M0PZ_HRZ_QTR_BLK_PIXELS;
+	else
+		return SN12M0PZ_FULL_SIZE_WIDTH +
+			SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t sn12m0pz_get_pict_lines_pf(void)
+{
+	if (sn12m0pz_ctrl->pict_res == QTR_SIZE)
+		return SN12M0PZ_QTR_SIZE_HEIGHT +
+			SN12M0PZ_VER_QTR_BLK_LINES;
+	else
+		return SN12M0PZ_FULL_SIZE_HEIGHT +
+			SN12M0PZ_VER_FULL_BLK_LINES;
+}
+
+static uint16_t sn12m0pz_get_pict_pixels_pl(void)
+{
+	if (sn12m0pz_ctrl->pict_res == QTR_SIZE)
+		return SN12M0PZ_QTR_SIZE_WIDTH +
+			SN12M0PZ_HRZ_QTR_BLK_PIXELS;
+	else
+		return SN12M0PZ_FULL_SIZE_WIDTH +
+			SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t sn12m0pz_get_pict_max_exp_lc(void)
+{
+	if (sn12m0pz_ctrl->pict_res == QTR_SIZE)
+		return (SN12M0PZ_QTR_SIZE_HEIGHT +
+			SN12M0PZ_VER_QTR_BLK_LINES) * 24;
+	else
+		return (SN12M0PZ_FULL_SIZE_HEIGHT +
+			SN12M0PZ_VER_FULL_BLK_LINES) * 24;
+}
+
+static int32_t sn12m0pz_set_fps(struct fps_cfg	*fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+
+	total_lines_per_frame = (uint16_t)((SN12M0PZ_QTR_SIZE_HEIGHT +
+				SN12M0PZ_VER_QTR_BLK_LINES) *
+				sn12m0pz_ctrl->fps_divider / 0x400);
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_MSB,
+				((total_lines_per_frame & 0xFF00) >> 8)) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LSB,
+				(total_lines_per_frame & 0x00FF)) < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t sn12m0pz_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	static uint16_t max_legal_gain = 0x00E0;
+	uint8_t gain_msb, gain_lsb;
+	uint8_t intg_time_msb, intg_time_lsb;
+	uint8_t line_length_pck_msb, line_length_pck_lsb;
+	uint16_t line_length_pck, frame_length_lines, temp_lines;
+	uint32_t line_length_ratio = 1 * Q8;
+	int32_t rc = 0;
+	CDBG("sn12m0pz_write_exp_gain : gain = %d line = %d", gain, line);
+
+	if (sn12m0pz_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) {
+		if (sn12m0pz_ctrl->prev_res == QVGA_SIZE) {
+			frame_length_lines = SN12M0PZ_QVGA_SIZE_HEIGHT +
+						SN12M0PZ_VER_QVGA_BLK_LINES;
+			line_length_pck = SN12M0PZ_QVGA_SIZE_WIDTH +
+						SN12M0PZ_HRZ_QVGA_BLK_PIXELS;
+			if (line > (frame_length_lines -
+					IU060F_SN12M0PZ_OFFSET))
+				line = frame_length_lines -
+						IU060F_SN12M0PZ_OFFSET;
+			sn12m0pz_ctrl->fps = (uint16_t) (120 * Q8);
+		} else {
+			if (sn12m0pz_ctrl->curr_res  == QTR_SIZE) {
+				frame_length_lines = SN12M0PZ_QTR_SIZE_HEIGHT +
+						SN12M0PZ_VER_QTR_BLK_LINES;
+				line_length_pck = SN12M0PZ_QTR_SIZE_WIDTH +
+						SN12M0PZ_HRZ_QTR_BLK_PIXELS;
+			} else {
+				frame_length_lines = SN12M0PZ_HEIGHT +
+						SN12M0PZ_VER_FULL_BLK_LINES;
+				line_length_pck = SN12M0PZ_WIDTH +
+						SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+			}
+			if (line > (frame_length_lines -
+						IU060F_SN12M0PZ_OFFSET))
+				sn12m0pz_ctrl->fps = (uint16_t) (30 * Q8 *
+			(frame_length_lines - IU060F_SN12M0PZ_OFFSET) / line);
+			else
+				sn12m0pz_ctrl->fps = (uint16_t) (30 * Q8);
+		}
+	} else {
+		if (sn12m0pz_ctrl->curr_res  == QTR_SIZE) {
+			frame_length_lines = SN12M0PZ_QTR_SIZE_HEIGHT +
+						SN12M0PZ_VER_QTR_BLK_LINES;
+			line_length_pck = SN12M0PZ_QTR_SIZE_WIDTH +
+						SN12M0PZ_HRZ_QTR_BLK_PIXELS;
+		} else {
+			frame_length_lines = SN12M0PZ_HEIGHT +
+						SN12M0PZ_VER_FULL_BLK_LINES;
+			line_length_pck = SN12M0PZ_WIDTH +
+						SN12M0PZ_HRZ_FULL_BLK_PIXELS;
+		}
+	}
+	if (gain > max_legal_gain)
+		/* range: 0 to 224 */
+		gain = max_legal_gain;
+	temp_lines = line;
+	/* calculate line_length_ratio */
+	if (line > (frame_length_lines - IU060F_SN12M0PZ_OFFSET)) {
+		line_length_ratio = (line * Q8) / (frame_length_lines -
+					IU060F_SN12M0PZ_OFFSET);
+		temp_lines = frame_length_lines - IU060F_SN12M0PZ_OFFSET;
+		if (line_length_ratio == 0)
+			line_length_ratio = 1 * Q8;
+	} else
+		line_length_ratio = 1 * Q8;
+
+	line = (uint32_t) (line * sn12m0pz_ctrl->fps_divider/0x400);
+
+	/* update gain registers */
+	gain_msb = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lsb = (uint8_t) (gain & 0x00FF);
+
+	/* linear AFR horizontal stretch */
+	line_length_pck = (uint16_t) (line_length_pck * line_length_ratio / Q8);
+	line_length_pck_msb = (uint8_t) ((line_length_pck & 0xFF00) >> 8);
+	line_length_pck_lsb = (uint8_t) (line_length_pck & 0x00FF);
+
+	/* update line count registers */
+	intg_time_msb = (uint8_t) ((temp_lines & 0xFF00) >> 8);
+	intg_time_lsb = (uint8_t) (temp_lines & 0x00FF);
+
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB,
+			gain_msb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB,
+			gain_lsb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_MSB,
+			line_length_pck_msb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_LSB,
+			line_length_pck_lsb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_MSB,
+			intg_time_msb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_LSB,
+			intg_time_lsb) < 0)
+		return rc;
+
+	if (sn12m0pz_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD_OFF) < 0)
+		return rc;
+
+	return rc;
+}
+
+
+static int32_t sn12m0pz_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc;
+	rc = sn12m0pz_write_exp_gain(gain, line);
+	return rc;
+}
+
+static int32_t sn12m0pz_test(enum sn12m0pz_test_mode_t mo)
+{
+	uint8_t test_data_val_msb = 0x07;
+	uint8_t test_data_val_lsb = 0xFF;
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
+		 1: Bypass Signal Processing. REG_0x30D8[5] is EBDMASK:
+		 0: Output Embedded data, 1: No output embedded data */
+
+		if (sn12m0pz_i2c_write_b_sensor(REG_0x30D8, 0x10) < 0)
+			return rc;
+
+		if (sn12m0pz_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0)
+			return rc;
+
+		/* Solid Color Test Pattern */
+
+		if (mo == TEST_1) {
+			if (sn12m0pz_i2c_write_b_sensor(REG_TEST_DATA_RED_MSB,
+				test_data_val_msb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_TEST_DATA_RED_LSB,
+				test_data_val_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(
+						REG_TEST_DATA_GREENR_MSB,
+						test_data_val_msb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(
+						REG_TEST_DATA_GREENR_LSB,
+						test_data_val_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_TEST_DATA_BLUE_MSB,
+				test_data_val_msb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_TEST_DATA_BLUE_LSB,
+				test_data_val_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(
+						REG_TEST_DATA_GREENB_MSB,
+						test_data_val_msb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(
+						REG_TEST_DATA_GREENB_LSB,
+						test_data_val_lsb) < 0)
+				return rc;
+		}
+
+	}
+
+	return rc;
+}
+
+static int32_t sn12m0pz_reset(void)
+{
+	int32_t rc = 0;
+	/* register 0x0002 is Port 2, CAM_XCLRO */
+	gpio_direction_output(sn12m0pz_ctrl->
+		sensordata->sensor_reset,
+		0);
+	msleep(50);
+	gpio_direction_output(sn12m0pz_ctrl->
+		sensordata->sensor_reset,
+		1);
+	msleep(13);
+	return rc;
+}
+
+static int32_t sn12m0pz_sensor_setting(int update_type, int rt)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+
+	switch (update_type) {
+	case UPDATE_PERIODIC:
+		/* Put Sensor into sofware standby mode	*/
+		if (sn12m0pz_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) <  0)
+			return rc;
+		msleep(5);
+		/* Hardware reset D02, lane config between full size/qtr size*/
+		rc = sn12m0pz_reset();
+		if (rc < 0)
+			return rc;
+
+		if (sn12m0pz_stmipid02_config() < 0)
+			return rc;
+	case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE
+				|| rt == RES_VIDEO_120FPS) {
+			/* reset fps_divider */
+			sn12m0pz_ctrl->fps_divider = 1 * 0x400;
+
+			/* PLL settings */
+			if (sn12m0pz_i2c_write_b_sensor(REG_PLL_MULTIPLIER,
+			sn12m0pz_regs.reg_pat[rt].pll_multiplier_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x302B,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x302B) < 0)
+				return rc;
+
+			/* MIPI Enable Settings */
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30E5,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30E5) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3300,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3300) < 0)
+				return rc;
+
+			/* Global Setting */
+			if (
+				sn12m0pz_i2c_write_b_sensor(
+				REG_IMAGE_ORIENTATION,
+				sn12m0pz_regs.reg_pat_init[0].image_orient) < 0)
+				return rc;
+			if (
+				sn12m0pz_i2c_write_b_sensor(
+				REG_COARSE_INTEGRATION_TIME_MSB,
+				sn12m0pz_regs.reg_pat[rt].coarse_integ_time_msb)
+				< 0)
+				return rc;
+			if (
+				sn12m0pz_i2c_write_b_sensor(
+				REG_COARSE_INTEGRATION_TIME_LSB,
+				sn12m0pz_regs.reg_pat[rt].coarse_integ_time_lsb)
+				 < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x300A,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x300A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3014,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3014) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3015,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3015) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3017,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3017) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x301C,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x301C) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3031,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3031) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3040,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3040) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3041,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3041) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3051,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3051) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3053,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3053) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3055,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3055) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3057,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3057) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3060,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3060) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3065,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3065) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30AA,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30AA) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30AB,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30AB) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30B0,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30B0) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30B2,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30B2) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30D3,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30D3) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30D8,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x30D8) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3106,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3106) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3108,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3108) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x310A,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x310A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x310C,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x310C) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x310E,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x310E) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3126,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3126) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x312E,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x312E) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x313C,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x313C) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x313E,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x313E) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3140,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3140) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3142,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3142) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3144,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3144) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3148,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3148) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x314A,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x314A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3166,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3166) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3168,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3168) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x316F,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x316F) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3171,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3171) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3173,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3173) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3175,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3175) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3177,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3177) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3179,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3179) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x317B,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x317B) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x317D,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x317D) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x317F,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x317F) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3181,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3181) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3184,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3184) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3185,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3185) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3187,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3187) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31A4,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31A4) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31A6,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31A6) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31AC,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31AC) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31AE,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31AE) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31B4,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31B4) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x31B6,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x31B6) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3254,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3254) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3256,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3256) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3258,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3258) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x325A,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x325A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3260,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3260) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3262,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3262) < 0)
+				return rc;
+
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3304,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3304) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3305,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3305) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3306,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3306) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3307,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3307) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3308,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3308) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3309,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x3309) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x330A,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x330A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x330B,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x330B) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x330C,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x330C) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x330D,
+				sn12m0pz_regs.reg_pat_init[0].reg_0x330D) < 0)
+				return rc;
+
+			/* Mode setting */
+			/* Update registers with correct
+				 frame_length_line value for AFR */
+			total_lines_per_frame = (uint16_t)(
+			(sn12m0pz_regs.reg_pat[rt].frame_length_lines_msb << 8)
+			& 0xFF00) +
+			sn12m0pz_regs.reg_pat[rt].frame_length_lines_lsb;
+			total_lines_per_frame = total_lines_per_frame *
+					sn12m0pz_ctrl->fps_divider / 0x400;
+
+			if (sn12m0pz_i2c_write_b_sensor(
+					REG_FRAME_LENGTH_LINES_MSB,
+					(total_lines_per_frame & 0xFF00) >> 8)
+					< 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(
+					REG_FRAME_LENGTH_LINES_LSB,
+					(total_lines_per_frame & 0x00FF)) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_MSB,
+				sn12m0pz_regs.reg_pat[rt].line_length_pck_msb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_LSB,
+				sn12m0pz_regs.reg_pat[rt].line_length_pck_lsb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_X_OUTPUT_SIZE_MSB,
+				sn12m0pz_regs.reg_pat[rt].x_output_size_msb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_X_OUTPUT_SIZE_LSB,
+				sn12m0pz_regs.reg_pat[rt].x_output_size_lsb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_Y_OUTPUT_SIZE_MSB,
+				sn12m0pz_regs.reg_pat[rt].y_output_size_msb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_Y_OUTPUT_SIZE_LSB,
+				sn12m0pz_regs.reg_pat[rt].y_output_size_lsb) <
+				0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_X_EVEN_INC_LSB,
+				sn12m0pz_regs.reg_pat[rt].x_even_inc_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_X_ODD_INC_LSB,
+				sn12m0pz_regs.reg_pat[rt].x_odd_inc_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_Y_EVEN_INC_LSB,
+				sn12m0pz_regs.reg_pat[rt].y_even_inc_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_Y_ODD_INC_LSB,
+				sn12m0pz_regs.reg_pat[rt].y_odd_inc_lsb) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3016,
+				sn12m0pz_regs.reg_pat[rt].reg_0x3016) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x30E8,
+				sn12m0pz_regs.reg_pat[rt].reg_0x30E8) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x3301,
+				sn12m0pz_regs.reg_pat[rt].reg_0x3301) < 0)
+				return rc;
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0344,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0344) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0345,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0345) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0346,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0346) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0347,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0347) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0348,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0348) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x0349,
+				sn12m0pz_regs.reg_pat[rt].reg_0x0349) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x034A,
+				sn12m0pz_regs.reg_pat[rt].reg_0x034A) < 0)
+				return rc;
+			if (sn12m0pz_i2c_write_b_sensor(REG_0x034B,
+				sn12m0pz_regs.reg_pat[rt].reg_0x034B) < 0)
+				return rc;
+
+			if ((rt == RES_CAPTURE) && fullsize_cropped_at_8mp) {
+				/* x address end */
+				if (sn12m0pz_i2c_write_b_sensor(0x0348,
+								0x0C) < 0)
+					return rc;
+				if (sn12m0pz_i2c_write_b_sensor(0x0349,
+								0x0CF) < 0)
+					return rc;
+				/* y address end */
+				if (sn12m0pz_i2c_write_b_sensor(0x034A,
+								0x09) < 0)
+					return rc;
+				if (sn12m0pz_i2c_write_b_sensor(0x034B,
+								0x9F) < 0)
+					return rc;
+			}
+
+			if (mipi_config == IU060F_SN12M0PZ_STMIPID01) {
+				if (sn12m0pz_i2c_write_b_sensor(
+						REG_PLL_MULTIPLIER, 0x43) < 0)
+					return rc;
+				if (rt == RES_CAPTURE) {
+					if (sn12m0pz_i2c_write_b_sensor(
+						REG_0x3301, 0x01) < 0)
+						return rc;
+				if (sn12m0pz_i2c_write_b_sensor(
+						REG_0x3017, 0xE0) < 0)
+					return rc;
+				}
+			}
+
+			if (sn12m0pz_i2c_write_b_sensor(REG_MODE_SELECT,
+						MODE_SELECT_STREAM) < 0)
+				return rc;
+
+			msleep(IU060F_SN12M0PZ_DELAY_MSECS);
+
+			if (sn12m0pz_test(sn12m0pz_ctrl->set_test) < 0)
+				return rc;
+
+			if (mipi_config == IU060F_SN12M0PZ_STMIPID02)
+				CDBG("%s,%d", __func__, __LINE__);
+			return rc;
+		}
+	default:
+		return rc;
+		}
+}
+
+
+static int32_t sn12m0pz_video_config(int mode)
+{
+
+	int32_t rc = 0;
+	int rt;
+
+
+	if (mode == SENSOR_HFR_120FPS_MODE)
+		sn12m0pz_ctrl->prev_res = QVGA_SIZE;
+
+	/* change sensor resolution if needed */
+	if (sn12m0pz_ctrl->curr_res != sn12m0pz_ctrl->prev_res) {
+		if (sn12m0pz_ctrl->prev_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			IU060F_SN12M0PZ_DELAY_MSECS = 35; /*measured on scope*/
+			enable_single_D02_lane = 1;
+		} else if (sn12m0pz_ctrl->prev_res == QVGA_SIZE) {
+			rt = RES_VIDEO_120FPS;
+			IU060F_SN12M0PZ_DELAY_MSECS = 35; /*measured on scope*/
+			enable_single_D02_lane = 0;
+		} else {
+			rt = RES_CAPTURE;
+			enable_single_D02_lane = 0;
+		}
+
+		if (sn12m0pz_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+
+	sn12m0pz_ctrl->curr_res = sn12m0pz_ctrl->prev_res;
+	sn12m0pz_ctrl->sensormode = mode;
+
+	return rc;
+}
+static int32_t sn12m0pz_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/* change sensor resolution if needed */
+	if (sn12m0pz_ctrl->curr_res != sn12m0pz_ctrl->pict_res) {
+		if (sn12m0pz_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			enable_single_D02_lane = 1;
+		} else {
+			rt = RES_CAPTURE;
+			IU060F_SN12M0PZ_DELAY_MSECS = 100;/*measured on scope*/
+			enable_single_D02_lane = 0;
+		}
+
+		if (sn12m0pz_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+
+	sn12m0pz_ctrl->curr_res = sn12m0pz_ctrl->pict_res;
+	sn12m0pz_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t sn12m0pz_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/* change sensor resolution if needed */
+	if (sn12m0pz_ctrl->curr_res != sn12m0pz_ctrl->pict_res) {
+		if (sn12m0pz_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			enable_single_D02_lane = 1;
+		} else {
+			rt = RES_CAPTURE;
+			IU060F_SN12M0PZ_DELAY_MSECS = 100;/*measured on scope*/
+			enable_single_D02_lane = 0;
+		}
+		if (sn12m0pz_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+		}
+	sn12m0pz_ctrl->curr_res = sn12m0pz_ctrl->pict_res;
+	sn12m0pz_ctrl->sensormode = mode;
+	return rc;
+}
+static int32_t sn12m0pz_set_sensor_mode(int  mode,
+	int  res)
+{
+	int32_t rc;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+	case SENSOR_HFR_120FPS_MODE:
+		rc = sn12m0pz_video_config(mode);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+		rc = sn12m0pz_snapshot_config(mode);
+		break;
+
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = sn12m0pz_raw_snapshot_config(mode);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t sn12m0pz_power_down(void)
+{
+	return 0;
+}
+
+
+static int sn12m0pz_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	gpio_direction_output(data->vcm_pwd, 0);
+	gpio_free(data->vcm_pwd);
+	return 0;
+}
+
+static int sn12m0pz_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	unsigned short chipidl, chipidh;
+	CDBG("Requesting gpio");
+	rc = gpio_request(data->sensor_reset, "sn12m0pz");
+	CDBG(" sn12m0pz_probe_init_sensor");
+	if (!rc) {
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(20);
+		gpio_direction_output(data->sensor_reset, 1);
+		msleep(13);
+	} else {
+		goto init_probe_done;
+	}
+	CDBG("Requestion gpio");
+	rc = gpio_request(data->vcm_pwd, "sn12m0pz");
+	CDBG(" sn12m0pz_probe_init_sensor");
+
+	if (!rc) {
+		gpio_direction_output(data->vcm_pwd, 0);
+		msleep(20);
+		gpio_direction_output(data->vcm_pwd, 1);
+		msleep(13);
+	} else {
+		gpio_direction_output(data->sensor_reset, 0);
+		gpio_free(data->sensor_reset);
+		goto init_probe_done;
+	}
+
+	msleep(20);
+
+	/* 3. Read sensor Model ID: */
+	rc = sn12m0pz_i2c_read(0x0000, &chipidh, 1);
+	if (rc < 0) {
+		CDBG(" sn12m0pz_probe_init_sensor3");
+		goto init_probe_fail;
+	}
+	rc = sn12m0pz_i2c_read(0x0001, &chipidl, 1);
+	if (rc < 0) {
+		CDBG(" sn12m0pz_probe_init_sensor4");
+		goto init_probe_fail;
+	}
+
+	/* 4. Compare sensor ID to SN12M0PZ ID: */
+	if (chipidh != 0x00 || chipidl != 0x60) {
+		rc = -ENODEV;
+		CDBG("sn12m0pz_probe_init_sensor fail chip id doesnot match");
+		goto init_probe_fail;
+	}
+
+	msleep(SN12M0PZ_RESET_DELAY_MSECS);
+
+	goto init_probe_done;
+
+init_probe_fail:
+	CDBG(" sn12m0pz_probe_init_sensor fails");
+	sn12m0pz_probe_init_done(data);
+
+init_probe_done:
+	CDBG(" sn12m0pz_probe_init_sensor finishes");
+	return rc;
+}
+
+int sn12m0pz_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	CDBG("Calling sn12m0pz_sensor_open_init");
+
+	sn12m0pz_ctrl = kzalloc(sizeof(struct sn12m0pz_ctrl_t), GFP_KERNEL);
+	if (!sn12m0pz_ctrl) {
+		CDBG("sn12m0pz_init failed!");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+
+	sn12m0pz_ctrl->fps_divider      = 1 * 0x00000400;
+	sn12m0pz_ctrl->pict_fps_divider = 1 * 0x00000400;
+	sn12m0pz_ctrl->set_test = TEST_OFF;
+	sn12m0pz_ctrl->prev_res = QTR_SIZE;
+	sn12m0pz_ctrl->pict_res = FULL_SIZE;
+	sn12m0pz_ctrl->curr_res = INVALID_SIZE;
+	if (data)
+		sn12m0pz_ctrl->sensordata = data;
+
+	if (rc < 0)
+		return rc;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(SN12M0PZ_DEFAULT_CLOCK_RATE);
+	msleep(20);
+	msm_camio_camif_pad_reg_reset();
+	msleep(20);
+	CDBG("Calling sn12m0pz_sensor_open_init");
+	rc = sn12m0pz_probe_init_sensor(data);
+
+	if (rc < 0)
+		goto init_fail;
+	/* send reset signal */
+	if (mipi_config == IU060F_SN12M0PZ_STMIPID01) {
+		if (sn12m0pz_stmipid01_config() < 0) {
+			CDBG("Calling sn12m0pz_sensor_open_init fail");
+			return rc;
+		}
+	} else {
+		if (sn12m0pz_ctrl->prev_res  == QTR_SIZE)
+			enable_single_D02_lane = 1;
+		else /* FULL_SIZE */
+			enable_single_D02_lane = 0;
+
+		if (sn12m0pz_stmipid02_config() < 0) {
+			CDBG("Calling sn12m0pz_sensor_open_init fail");
+			return rc;
+		}
+	}
+
+
+	if (sn12m0pz_ctrl->prev_res == QTR_SIZE) {
+		if (sn12m0pz_sensor_setting(REG_INIT, RES_PREVIEW) < 0)
+			return rc;
+	} else if (sn12m0pz_ctrl->prev_res == QVGA_SIZE) {
+		if (sn12m0pz_sensor_setting(REG_INIT, RES_VIDEO_120FPS) < 0)
+			return rc;
+	} else {
+		if (sn12m0pz_sensor_setting(REG_INIT, RES_CAPTURE) < 0)
+			return rc;
+	}
+
+	if (sn12m0pz_af_init() < 0)
+		return rc;
+	sn12m0pz_ctrl->fps = 30*Q8;
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+init_fail:
+	CDBG(" init_fail");
+	sn12m0pz_probe_init_done(data);
+	kfree(sn12m0pz_ctrl);
+init_done:
+	CDBG("init_done");
+	return rc;
+}
+static int __init sn12m0pz_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&sn12m0pz_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id sn12m0pz_i2c_id[] = {
+	{ "sn12m0pz", 0},
+	{ }
+};
+
+static int sn12m0pz_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("sn12m0pz_probe called!");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed");
+		goto probe_failure;
+	}
+
+	sn12m0pz_sensorw = kzalloc(sizeof(struct sn12m0pz_work_t), GFP_KERNEL);
+	if (!sn12m0pz_sensorw) {
+		CDBG("kzalloc failed");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, sn12m0pz_sensorw);
+	sn12m0pz_init_client(client);
+	sn12m0pz_client = client;
+
+	msleep(50);
+
+	CDBG("sn12m0pz_probe successed! rc = %d", rc);
+	return 0;
+
+probe_failure:
+	CDBG("sn12m0pz_probe failed! rc = %d", rc);
+	return rc;
+}
+
+static int __exit sn12m0pz_remove(struct i2c_client *client)
+{
+	struct sn12m0pz_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	sn12m0pz_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver sn12m0pz_i2c_driver = {
+	.id_table = sn12m0pz_i2c_id,
+	.probe	= sn12m0pz_i2c_probe,
+	.remove = __exit_p(sn12m0pz_i2c_remove),
+	.driver = {
+		.name = "sn12m0pz",
+	},
+};
+
+int sn12m0pz_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	int32_t rc = 0;
+	if (copy_from_user(&cdata,
+				(void *)argp,
+				sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&sn12m0pz_mut);
+
+	CDBG("sn12m0pz_sensor_config: cfgtype = %d",
+		cdata.cfgtype);
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		sn12m0pz_get_pict_fps(cdata.cfg.gfps.prevfps,
+					&(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf =
+			sn12m0pz_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl =
+			sn12m0pz_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf =
+			sn12m0pz_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl =
+			sn12m0pz_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc =
+			sn12m0pz_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+			&cdata,
+			sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = sn12m0pz_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc =
+			sn12m0pz_write_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+	case CFG_SET_PICT_EXP_GAIN:
+		rc =
+			sn12m0pz_set_pict_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = sn12m0pz_set_sensor_mode(cdata.mode,
+					cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = sn12m0pz_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		rc = sn12m0pz_move_focus(cdata.cfg.focus.dir,
+					cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = sn12m0pz_set_default_focus(cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_EFFECT:
+		rc = 0;
+		break;
+	case CFG_SET_LENS_SHADING:
+		rc = 0;
+		break;
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&sn12m0pz_mut);
+
+	return rc;
+}
+
+static int sn12m0pz_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&sn12m0pz_mut);
+
+	sn12m0pz_power_down();
+
+	gpio_direction_output(sn12m0pz_ctrl->sensordata->sensor_reset,
+		0);
+	gpio_free(sn12m0pz_ctrl->sensordata->sensor_reset);
+
+	gpio_direction_output(sn12m0pz_ctrl->sensordata->vcm_pwd,
+		0);
+	gpio_free(sn12m0pz_ctrl->sensordata->vcm_pwd);
+
+	kfree(sn12m0pz_ctrl);
+	sn12m0pz_ctrl = NULL;
+
+	CDBG("sn12m0pz_release completed");
+
+
+	mutex_unlock(&sn12m0pz_mut);
+
+	return rc;
+}
+
+static int sn12m0pz_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc;
+
+	rc = i2c_add_driver(&sn12m0pz_i2c_driver);
+	if (rc < 0 || sn12m0pz_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+
+	msm_camio_clk_rate_set(SN12M0PZ_DEFAULT_CLOCK_RATE);
+	msleep(20);
+
+	rc = sn12m0pz_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+
+	s->s_init = sn12m0pz_sensor_open_init;
+	s->s_release = sn12m0pz_sensor_release;
+	s->s_config  = sn12m0pz_sensor_config;
+	s->s_mount_angle  = 0;
+	sn12m0pz_probe_init_done(info);
+
+	return rc;
+
+probe_fail:
+	CDBG("SENSOR PROBE FAILS!");
+	i2c_del_driver(&sn12m0pz_i2c_driver);
+	return rc;
+}
+
+static int __sn12m0pz_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, sn12m0pz_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __sn12m0pz_probe,
+	.driver = {
+		.name = "msm_camera_sn12m0pz",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init sn12m0pz_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(sn12m0pz_init);
+
+MODULE_DESCRIPTION("Sony 12M MP Bayer sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/sn12m0pz.h b/drivers/media/video/msm/sn12m0pz.h
new file mode 100644
index 0000000..f2abc47
--- /dev/null
+++ b/drivers/media/video/msm/sn12m0pz.h
@@ -0,0 +1,138 @@
+
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SN12M0PZ_H
+#define SN12M0PZ_H
+
+#include <linux/types.h>
+extern struct sn12m0pz_reg sn12m0pz_regs; /* from mt9t013_reg.c */
+struct reg_struct{
+	uint8_t pll_multiplier_lsb;            /* 0x0307*/
+	uint8_t coarse_integ_time_msb;   /* 0x0202*/
+	uint8_t coarse_integ_time_lsb;   /* 0x0203*/
+	uint8_t frame_length_lines_msb;        /* 0x0340*/
+	uint8_t frame_length_lines_lsb;        /* 0x0341*/
+	uint8_t line_length_pck_msb;           /* 0x0342*/
+	uint8_t line_length_pck_lsb;           /* 0x0343*/
+	uint8_t x_output_size_msb;             /* 0x034C*/
+	uint8_t x_output_size_lsb;             /* 0x034D*/
+	uint8_t y_output_size_msb;             /* 0x034E*/
+	uint8_t y_output_size_lsb;             /* 0x034F*/
+	uint8_t x_even_inc_lsb;                /* 0x0381*/
+	uint8_t x_odd_inc_lsb;                 /* 0x0383*/
+	uint8_t y_even_inc_lsb;                /* 0x0385*/
+	uint8_t y_odd_inc_lsb;                 /* 0x0387*/
+	uint8_t reg_0x3016;                    /* 0x3016 VMODEADD*/
+	uint8_t reg_0x30E8;                    /* 0x30E8 HADDAVE*/
+	uint8_t reg_0x3301;                    /* 0x3301 RGLANESEL*/
+	/*added for 120fps support */
+	uint8_t reg_0x0344;
+	uint8_t reg_0x0345;
+	uint8_t reg_0x0346;
+	uint8_t reg_0x0347;
+	uint8_t reg_0x0348;
+	uint8_t reg_0x0349;
+	uint8_t reg_0x034A;
+	uint8_t reg_0x034B;
+};
+struct reg_struct_init{
+	uint8_t reg_0x302B;/* 0x302B*/
+
+	uint8_t reg_0x30E5;/* 0x30E5*/
+	uint8_t reg_0x3300;   /* 0x3300*/
+
+	uint8_t image_orient;   /* 0x0101*/
+
+	uint8_t reg_0x300A;   /* 0x300A*/
+	uint8_t reg_0x3014;   /* 0x3014*/
+	uint8_t reg_0x3015;   /* 0x3015*/
+	uint8_t reg_0x3017;   /* 0x3017*/
+	uint8_t reg_0x301C;   /* 0x301C*/
+	uint8_t reg_0x3031;   /* 0x3031*/
+	uint8_t reg_0x3040;   /* 0x3040*/
+	uint8_t reg_0x3041;   /* 0x3041*/
+	uint8_t reg_0x3051;   /* 0x3051*/
+	uint8_t reg_0x3053;   /* 0x3053*/
+	uint8_t reg_0x3055;   /* 0x3055*/
+	uint8_t reg_0x3057;   /* 0x3057*/
+	uint8_t reg_0x3060;   /* 0x3060*/
+	uint8_t reg_0x3065;   /* 0x3065*/
+	uint8_t reg_0x30AA;   /* 0x30AA*/
+	uint8_t reg_0x30AB;   /* 0x30AB*/
+	uint8_t reg_0x30B0;   /* 0x30B0*/
+	uint8_t reg_0x30B2;   /* 0x30B2*/
+
+	uint8_t reg_0x30D3;   /* 0X30D3*/
+	uint8_t reg_0x30D8;   /* 0X30D8*/
+
+	uint8_t reg_0x3106;   /* 0x3106*/
+	uint8_t reg_0x3108;   /* 0x3108*/
+	uint8_t reg_0x310A;   /* 0x310A*/
+	uint8_t reg_0x310C;   /* 0x310C*/
+	uint8_t reg_0x310E;   /* 0x310E*/
+	uint8_t reg_0x3126;   /* 0x3126*/
+	uint8_t reg_0x312E;   /* 0x312E*/
+	uint8_t reg_0x313C;   /* 0x313C*/
+	uint8_t reg_0x313E;   /* 0x313E*/
+	uint8_t reg_0x3140;   /* 0x3140*/
+	uint8_t reg_0x3142;   /* 0x3142*/
+	uint8_t reg_0x3144;   /* 0x3144*/
+	uint8_t reg_0x3148;   /* 0x3148*/
+	uint8_t reg_0x314A;   /* 0x314A*/
+	uint8_t reg_0x3166;   /* 0x3166*/
+	uint8_t reg_0x3168;   /* 0x3168*/
+	uint8_t reg_0x316F;   /* 0x316F*/
+	uint8_t reg_0x3171;   /* 0x3171*/
+	uint8_t reg_0x3173;   /* 0x3173*/
+	uint8_t reg_0x3175;   /* 0x3175*/
+	uint8_t reg_0x3177;   /* 0x3177*/
+	uint8_t reg_0x3179;   /* 0x3179*/
+	uint8_t reg_0x317B;   /* 0x317B*/
+	uint8_t reg_0x317D;   /* 0x317D*/
+	uint8_t reg_0x317F;   /* 0x317F*/
+	uint8_t reg_0x3181;   /* 0x3181*/
+	uint8_t reg_0x3184;   /* 0x3184*/
+	uint8_t reg_0x3185;   /* 0x3185*/
+	uint8_t reg_0x3187;   /* 0x3187*/
+
+	uint8_t reg_0x31A4;   /* 0x31A4*/
+	uint8_t reg_0x31A6;   /* 0x31A6*/
+	uint8_t reg_0x31AC;   /* 0x31AC*/
+	uint8_t reg_0x31AE;   /* 0x31AE*/
+	uint8_t reg_0x31B4;   /* 0x31B4*/
+	uint8_t reg_0x31B6;   /* 0x31B6*/
+
+	uint8_t reg_0x3254;   /* 0x3254*/
+	uint8_t reg_0x3256;   /* 0x3256*/
+	uint8_t reg_0x3258;   /* 0x3258*/
+	uint8_t reg_0x325A;   /* 0x325A*/
+	uint8_t reg_0x3260;   /* 0x3260*/
+	uint8_t reg_0x3262;   /* 0x3262*/
+
+	uint8_t reg_0x3304;   /* 0x3304*/
+	uint8_t reg_0x3305;   /* 0x3305*/
+	uint8_t reg_0x3306;   /* 0x3306*/
+	uint8_t reg_0x3307;   /* 0x3307*/
+	uint8_t reg_0x3308;   /* 0x3308*/
+	uint8_t reg_0x3309;   /* 0x3309*/
+	uint8_t reg_0x330A;   /* 0x330A*/
+	uint8_t reg_0x330B;   /* 0x330B*/
+	uint8_t reg_0x330C;   /* 0x330C*/
+	uint8_t reg_0x330D;   /* 0x330D*/
+
+};
+struct sn12m0pz_reg{
+	const struct reg_struct  *reg_pat;
+	const struct reg_struct_init  *reg_pat_init;
+};
+#endif
diff --git a/drivers/media/video/msm/sn12m0pz_reg.c b/drivers/media/video/msm/sn12m0pz_reg.c
new file mode 100644
index 0000000..d21eac1
--- /dev/null
+++ b/drivers/media/video/msm/sn12m0pz_reg.c
@@ -0,0 +1,213 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "sn12m0pz.h"
+/* Initialisation settings */
+
+const struct reg_struct_init iu060f_reg_pat_init[1] = {{
+	/* PLL setting */
+	0x4B, /* reg 0x302B*/
+	/* MIPI Enable Setting */
+	0x04, /* reg 0x30E5*/
+	0x00, /* reg 0x3300*/
+	/* Global Setting */
+	0x00, /* image_orientation*/
+	0x80, /* reg 0x300A*/
+	0x08, /* reg 0x3014*/
+	0x37, /* reg 0x3015*/
+	0x60, /* reg 0x3017*/
+	0x01, /* reg 0x301C*/
+	0x28, /* reg 0x3031*/
+	0x00, /* reg 0x3040*/
+	0x60, /* reg 0x3041*/
+	0x24, /* reg 0x3051*/
+	0x34, /* reg 0x3053*/
+	0x3B, /* reg 0x3055*/
+	0xC0, /* reg 0x3057*/
+	0x30, /* reg 0x3060*/
+	0x00, /* reg 0x3065*/
+	0x88, /* reg 0x30AA*/
+	0x1C, /* reg 0x30AB*/
+	0x32, /* reg 0x30B0*/
+	0x83, /* reg 0x30B2*/
+	0x04, /* reg 0x30D3*/
+	0xC0, /* reg 0x30D8*/
+	0x50, /* reg 0x3106*/
+	0xA5, /* reg 0x3108*/
+	0xA9, /* reg 0x310A*/
+	0x0C, /* reg 0x310C*/
+	0x55, /* reg 0x310E*/
+	0xCC, /* reg 0x3126*/
+	0x83, /* reg 0x312E*/
+	0xC7, /* reg 0x313C*/
+	0x07, /* reg 0x313E*/
+	0x32, /* reg 0x3140*/
+	0x35, /* reg 0x3142*/
+	0x35, /* reg 0x3144*/
+	0x73, /* reg 0x3148*/
+	0x80, /* reg 0x314A*/
+	0xBE, /* reg 0x3166*/
+	0xBD, /* reg 0x3168*/
+	0x82, /* reg 0x316F*/
+	0xBC, /* reg 0x3171*/
+	0x82, /* reg 0x3173*/
+	0xBC, /* reg 0x3175*/
+	0x0C, /* reg 0x3177*/
+	0x2C, /* reg 0x3179*/
+	0x83, /* reg 0x317B*/
+	0xAF, /* reg 0x317D*/
+	0x83, /* reg 0x317F*/
+	0xAF, /* reg 0x3181*/
+	0x06, /* reg 0x3184*/
+	0xBA, /* reg 0x3185*/
+	0xBE, /* reg 0x3187*/
+	0xD8, /* reg 0x31A4*/
+	0x17, /* reg 0x31A6*/
+	0xCF, /* reg 0x31AC*/
+	0xF1, /* reg 0x31AE*/
+	0xD8, /* reg 0x31B4*/
+	0x17, /* reg 0x31B6*/
+	0x09, /* reg 0x3254 */
+	0xC5, /* reg 0x3256 */
+	0x84, /* reg 0x3258 */
+	0x6C, /* reg 0x325A */
+	0x0B, /* reg 0x3260 */
+	0x09, /* reg 0x3262 */
+	0x05, /* reg 0x3304*/
+	0x04, /* reg 0x3305*/
+	0x15, /* reg 0x3306*/
+	0x03, /* reg 0x3307*/
+	0x13, /* reg 0x3308*/
+	0x05, /* reg 0x3309*/
+	0x0B, /* reg 0x330A*/
+	0x04, /* reg 0x330B*/
+	0x0B, /* reg 0x330C*/
+	0x06  /* reg 0x330D*/
+}
+};
+
+/* Preview / Snapshot register settings */
+const struct reg_struct iu060f_reg_pat[3] = {
+	{ /* Preview */
+		0x22, /*0x1b*/ /* fps*/
+
+		/* Global Setting */
+		0x01, /* coarse_integration_time_msb*/
+		0xFF, /* coarse_integration_time_lsb*/
+
+		/* Mode Setting */
+		/* V: 1/2 V-addition (1,3),
+		H: 1/2 H-averaging (1,3) */
+
+		0x06, /* frame_length_lines_msb     0x0340*/
+		0x02, /* frame_length_lines_lsb     0x0341*/
+		0x10, /* line_length_pck_msb        0x0342*/
+		0x70, /* line_length_pck_lsb        0x0343*/
+		0x07, /* x_output_size_msb          0x034C*/
+		0xe0, /* x_output_size_lsb          0x034D*/
+		0x05, /* y_output_size_msb          0x034E*/
+		0xe8, /* y_output_size_lsb          0x034F*/
+		0x01, /* x_even_inc_lsb             0x0381*/
+		0x03, /* x_odd_inc_lsb              0x0383*/
+		0x01, /* y_even_inc_lsb             0x0385*/
+		0x03, /* y_odd_inc_lsb              0x0387*/
+		0x46, /* reg 0x3016 VMODEADD        0x3016*/
+		0x86, /* reg 0x30E8 HADDAVE         0x30E8*/
+		0x01, /* reg 0x3301 RGLANESEL       0x3301*/
+
+		0x00,  /* 0x0344 */
+		0x00,  /* 0x0345 */
+		0x00,  /* 0x0346 */
+		0x00,  /* 0x0347 */
+		0x0F,  /* 0x0348 */
+		0xBF,  /* 0x0349 */
+		0x0B,  /* 0x034A */
+		0xCF,  /* 0x034B */
+	},
+	{ /* Snapshot */
+		0x14, /* pll_multiplier_lsb    // 20/10 fps*/
+		/* 0x14 for pclk 96MHz at 7.5 fps */
+
+		/* Global Setting */
+		0x0B, /* coarse_integration_time_msb*/
+		0xFF, /* coarse_integration_time_lsb*/
+
+		/* Mode Setting */
+		/* Full */
+		0x0C,/*frame_length_lines_msb 0x0340*/
+		0x02,/*frame_length_lines_lsb 0x0341*/
+		0x10,/*line_length_pck_msb 0x0342*/
+		0x70,/* line_length_pck_lsb 0x0343*/
+		0x0F,/* x_output_size_msb   0x034C*/
+		0xC0, /* x_output_size_lsb  0x034D*/
+		0x0B, /* y_output_size_msb  0x034E*/
+		0xD0, /* y_output_size_lsb  0x034F*/
+		0x01, /* x_even_inc_lsb     0x0381*/
+		0x01, /* x_odd_inc_lsb      0x0383*/
+		0x01, /* y_even_inc_lsb                     0x0385*/
+		0x01, /* y_odd_inc_lsb                      0x0387*/
+		0x06, /* reg 0x3016 VMODEADD                0x3016*/
+		0x06, /* reg 0x30E8 HADDAVE                 0x30E8*/
+		0x00, /* reg 0x3301 RGLANESEL               0x3301*/
+
+		0x00,  /* 0x0344 */
+		0x00,  /* 0x0345 */
+		0x00,  /* 0x0346 */
+		0x00,  /* 0x0347 */
+		0x0F,  /* 0x0348 */
+		0xBF,  /* 0x0349 */
+		0x0B,  /* 0x034A */
+		0xCF,  /* 0x034B */
+	},
+	/* 120 fps settings */
+	{
+		0x1B, /*0x1B fps*/
+		/* Global Setting */
+		0x00, /* coarse_integration_time_msb*/
+		0xFE, /* coarse_integration_time_lsb*/
+
+		/* Mode Setting */
+		/* V: 1/8 V-addition (9,7),
+		H: Full */
+
+		0x01, /* frame_length_lines_msb     0x0340*/
+		0x01, /* frame_length_lines_lsb     0x0341*/
+		0x10, /* line_length_pck_msb        0x0342*/
+		0x70, /* line_length_pck_lsb        0x0343*/
+		0x0f, /* x_output_size_msb          0x034C*/
+		0xc0, /* x_output_size_lsb          0x034D*/
+		0x00, /* y_output_size_msb          0x034E*/
+		0xF8, /* y_output_size_lsb          0x034F*/
+		0x01, /* x_even_inc_lsb             0x0381*/
+		0x01, /* x_odd_inc_lsb              0x0383*/
+		0x09, /* y_even_inc_lsb             0x0385*/
+		0x07, /* y_odd_inc_lsb              0x0387*/
+		0x46, /* reg 0x3016 VMODEADD        0x3016*/
+		0x86, /* reg 0x30E8 HADDAVE         0x30E8*/
+		0x00, /* reg 0x3301 RGLANESEL       0x3301*/
+		/* add for 120fps support */
+		0x00, /* 0x0344*/
+		0x00, /* 0x0345*/
+		0x02, /* 0x0346*/
+		0x10, /* 0x0347*/
+		0x0F, /* 0x0348*/
+		0xBF, /* 0x0349*/
+		0x09, /* 0x034A*/
+		0xCF, /* 0x034B*/
+	}
+};
+struct sn12m0pz_reg sn12m0pz_regs = {
+	.reg_pat = &iu060f_reg_pat[0],
+	.reg_pat_init = &iu060f_reg_pat_init[0],
+};
+
diff --git a/drivers/media/video/msm/vb6801.c b/drivers/media/video/msm/vb6801.c
new file mode 100644
index 0000000..fa82570
--- /dev/null
+++ b/drivers/media/video/msm/vb6801.c
@@ -0,0 +1,1616 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include "vb6801.h"
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+enum {
+	REG_HOLD = 0x0104,
+	RELEASE_HOLD = 0x0000,
+	HOLD = 0x0001,
+	STANDBY_MODE = 0x0000,
+	REG_COARSE_INTEGRATION_TIME = 0x0202,
+	REG_ANALOGUE_GAIN_CODE_GLOBAL = 0x0204,
+	REG_RAMP_SCALE = 0x3116,
+	REG_POWER_MAN_ENABLE_3 = 0x3142,
+	REG_POWER_MAN_ENABLE_4 = 0x3143,
+	REG_POWER_MAN_ENABLE_5 = 0x3144,
+	REG_CCP2_DATA_FORMAT = 0x0112,
+	REG_PRE_PLL_CLK_DIV = 0x0304,
+	REG_PLL_MULTIPLIER = 0x0306,
+	REG_VT_SYS_CLK_DIV = 0x0302,
+	REG_VT_PIX_CLK_DIV = 0x0300,
+	REG_OP_SYS_CLK_DIV = 0x030A,
+	REG_OP_PIX_CLK_DIV = 0x0308,
+	REG_VT_LINE_LENGTH_PCK = 0x0342,
+	REG_X_OUTPUT_SIZE = 0x034C,
+	REG_Y_OUTPUT_SIZE = 0x034E,
+	REG_X_ODD_INC = 0x0382,
+	REG_Y_ODD_INC = 0x0386,
+	REG_VT_FRAME_LENGTH_LINES = 0x0340,
+	REG_ANALOG_TIMING_MODES_2 = 0x3113,
+	REG_BRUCE_ENABLE = 0x37B0,
+	REG_OP_CODER_SYNC_CLK_SETUP = 0x3400,
+	REG_OP_CODER_ENABLE = 0x3401,
+	REG_OP_CODER_SLOW_PAD_EN = 0x3402,
+	REG_OP_CODER_AUTO_STARTUP = 0x3414,
+	REG_SCYTHE_ENABLE = 0x3204,
+	REG_SCYTHE_WEIGHT = 0x3206,
+	REG_FRAME_COUNT = 0x0005,
+	REG_MODE_SELECT = 0x0100,
+	REG_CCP2_CHANNEL_IDENTIFIER = 0x0110,
+	REG_CCP2_SIGNALLING_MODE = 0x0111,
+	REG_BTL_LEVEL_SETUP = 0x311B,
+	REG_OP_CODER_AUTOMATIC_MODE_ENABLE = 0x3403,
+	REG_PLL_CTRL = 0x3801,
+	REG_VCM_DAC_CODE = 0x3860,
+	REG_VCM_DAC_STROBE = 0x3868,
+	REG_VCM_DAC_ENABLE = 0x386C,
+	REG_NVM_T1_ADDR_00 = 0x3600,
+	REG_NVM_T1_ADDR_01 = 0x3601,
+	REG_NVM_T1_ADDR_02 = 0x3602,
+	REG_NVM_T1_ADDR_03 = 0x3603,
+	REG_NVM_T1_ADDR_04 = 0x3604,
+	REG_NVM_T1_ADDR_05 = 0x3605,
+	REG_NVM_T1_ADDR_06 = 0x3606,
+	REG_NVM_T1_ADDR_07 = 0x3607,
+	REG_NVM_T1_ADDR_08 = 0x3608,
+	REG_NVM_T1_ADDR_09 = 0x3609,
+	REG_NVM_T1_ADDR_0A = 0x360A,
+	REG_NVM_T1_ADDR_0B = 0x360B,
+	REG_NVM_T1_ADDR_0C = 0x360C,
+	REG_NVM_T1_ADDR_0D = 0x360D,
+	REG_NVM_T1_ADDR_0E = 0x360E,
+	REG_NVM_T1_ADDR_0F = 0x360F,
+	REG_NVM_T1_ADDR_10 = 0x3610,
+	REG_NVM_T1_ADDR_11 = 0x3611,
+	REG_NVM_T1_ADDR_12 = 0x3612,
+	REG_NVM_T1_ADDR_13 = 0x3613,
+	REG_NVM_CTRL = 0x3680,
+	REG_NVM_PDN = 0x3681,
+	REG_NVM_PULSE_WIDTH = 0x368B,
+};
+
+#define VB6801_LINES_PER_FRAME_PREVIEW   800
+#define VB6801_LINES_PER_FRAME_SNAPSHOT 1600
+#define VB6801_PIXELS_PER_LINE_PREVIEW  2500
+#define VB6801_PIXELS_PER_LINE_SNAPSHOT 2500
+
+/* AF constant */
+#define VB6801_TOTAL_STEPS_NEAR_TO_FAR    25
+#define VB6801_STEPS_NEAR_TO_CLOSEST_INF  25
+
+/* for 30 fps preview */
+#define VB6801_DEFAULT_CLOCK_RATE    12000000
+
+enum vb6801_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum vb6801_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+
+enum vb6801_setting_t {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+
+struct vb6801_work_t {
+	struct work_struct work;
+};
+
+struct sensor_dynamic_params_t {
+	uint16_t preview_pixelsPerLine;
+	uint16_t preview_linesPerFrame;
+	uint16_t snapshot_pixelsPerLine;
+	uint16_t snapshot_linesPerFrame;
+	uint8_t snapshot_changed_fps;
+	uint32_t pclk;
+};
+
+struct vb6801_sensor_info {
+	/* Sensor Configuration Input Parameters */
+	uint32_t ext_clk_freq_mhz;
+	uint32_t target_frame_rate_fps;
+	uint32_t target_vt_pix_clk_freq_mhz;
+	uint32_t sub_sampling_factor;
+	uint32_t analog_binning_allowed;
+	uint32_t raw_mode;
+	uint32_t capture_mode;
+
+	/* Image Readout Registers */
+	uint32_t x_odd_inc;	/* x pixel array addressing odd increment */
+	uint32_t y_odd_inc;	/* y pixel array addressing odd increment */
+	uint32_t x_output_size;	/* width of output image  */
+	uint32_t y_output_size;	/* height of output image */
+
+	/* Declare data format */
+	uint32_t ccp2_data_format;
+
+	/* Clock Tree Registers */
+	uint32_t pre_pll_clk_div;
+	uint32_t pll_multiplier;
+	uint32_t vt_sys_clk_div;
+	uint32_t vt_pix_clk_div;
+	uint32_t op_sys_clk_div;
+	uint32_t op_pix_clk_div;
+
+	/* Video Timing Registers */
+	uint32_t vt_line_length_pck;
+	uint32_t vt_frame_length_lines;
+
+	/* Analogue Binning Registers */
+	uint8_t vtiming_major;
+	uint8_t analog_timing_modes_4;
+
+	/* Fine (pixel) Integration Time Registers */
+	uint32_t fine_integration_time;
+
+	/* Coarse (lines) Integration Time Limit Registers */
+	uint32_t coarse_integration_time_max;
+
+	/* Coarse (lines) Integration Timit Register (16-bit) */
+	uint32_t coarse_integration_time;
+
+	/* Analogue Gain Code Global Registers */
+	uint32_t analogue_gain_code_global;
+
+	/* Digital Gain Code Registers */
+	uint32_t digital_gain_code;
+
+	/* Overall gain (analogue & digital) code
+	 * Note that this is not a real register but just
+	 * an abstraction for the combination of analogue
+	 * and digital gain */
+	uint32_t gain_code;
+
+	/* FMT Test Information */
+	uint32_t pass_fail;
+	uint32_t day;
+	uint32_t month;
+	uint32_t year;
+	uint32_t tester;
+	uint32_t part_number;
+
+	/* Autofocus controls */
+	uint32_t vcm_dac_code;
+	int vcm_max_dac_code_step;
+	int vcm_proportional_factor;
+	int vcm_dac_code_spacing_ms;
+
+	/* VCM NVM Characterisation Information */
+	uint32_t vcm_dac_code_infinity_dn;
+	uint32_t vcm_dac_code_macro_up;
+	uint32_t vcm_dac_code_up_dn_delta;
+
+	/* Internal Variables */
+	uint32_t min_vt_frame_length_lines;
+};
+
+struct vb6801_work_t *vb6801_sensorw;
+struct i2c_client *vb6801_client;
+
+struct vb6801_ctrl_t {
+	const struct msm_camera_sensor_info *sensordata;
+
+	int sensormode;
+	uint32_t factor_fps;	/* init to 1 * 0x00000400 */
+	uint16_t curr_fps;
+	uint16_t max_fps;
+	int8_t pict_exp_update;
+	int8_t reducel;
+	uint16_t curr_lens_pos;
+	uint16_t init_curr_lens_pos;
+	enum vb6801_resolution_t prev_res;
+	enum vb6801_resolution_t pict_res;
+	enum vb6801_resolution_t curr_res;
+	enum vb6801_test_mode_t set_test;
+
+	struct vb6801_sensor_info s_info;
+	struct sensor_dynamic_params_t s_dynamic_params;
+};
+
+static struct vb6801_ctrl_t *vb6801_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(vb6801_wait_queue);
+DEFINE_MUTEX(vb6801_mut);
+
+static int vb6801_i2c_rxdata(unsigned short saddr,
+			     unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = 2,
+			.buf = rxdata,
+		},
+		{
+			.addr = saddr,
+			.flags = I2C_M_RD,
+			.len = 2,
+			.buf = rxdata,
+		},
+	};
+
+	if (i2c_transfer(vb6801_client->adapter, msgs, 2) < 0) {
+		CDBG("vb6801_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t vb6801_i2c_read(unsigned short raddr,
+			       unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+
+	if (!rdata)
+		return -EIO;
+
+	memset(buf, 0, sizeof(buf));
+
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+
+	rc = vb6801_i2c_rxdata(vb6801_client->addr, buf, rlen);
+
+	if (rc < 0) {
+		CDBG("vb6801_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+
+	return rc;
+}
+
+static int32_t vb6801_i2c_read_table(struct vb6801_i2c_reg_conf_t *regs,
+				     int items)
+{
+	int i;
+	int32_t rc = -EFAULT;
+
+	for (i = 0; i < items; i++) {
+		unsigned short *buf =
+		    regs->dlen == D_LEN_BYTE ?
+		    (unsigned short *)&regs->bdata :
+		    (unsigned short *)&regs->wdata;
+		rc = vb6801_i2c_read(regs->waddr, buf, regs->dlen + 1);
+
+		if (rc < 0) {
+			CDBG("vb6801_i2c_read_table Failed!!!\n");
+			break;
+		}
+
+		regs++;
+	}
+
+	return rc;
+}
+
+static int32_t vb6801_i2c_txdata(unsigned short saddr,
+				 unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		},
+	};
+
+	if (i2c_transfer(vb6801_client->adapter, msg, 1) < 0) {
+		CDBG("vb6801_i2c_txdata faild 0x%x\n", vb6801_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int32_t vb6801_i2c_write_b(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+
+	CDBG("i2c_write_b addr = %d, val = %d\n", waddr, bdata);
+	rc = vb6801_i2c_txdata(vb6801_client->addr, buf, 3);
+
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+		     waddr, bdata);
+	}
+
+	return rc;
+}
+
+static int32_t vb6801_i2c_write_w(unsigned short waddr, unsigned short wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+
+	CDBG("i2c_write_w addr = %d, val = %d, buf[2] = 0x%x, buf[3] = 0x%x\n",
+	     waddr, wdata, buf[2], buf[3]);
+
+	rc = vb6801_i2c_txdata(vb6801_client->addr, buf, 4);
+	if (rc < 0) {
+		CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
+		     waddr, wdata);
+	}
+
+	return rc;
+}
+
+static int32_t vb6801_i2c_write_table(struct vb6801_i2c_reg_conf_t *regs,
+				      int items)
+{
+	int i;
+	int32_t rc = -EFAULT;
+
+	for (i = 0; i < items; i++) {
+		rc = ((regs->dlen == D_LEN_BYTE) ?
+		      vb6801_i2c_write_b(regs->waddr, regs->bdata) :
+		      vb6801_i2c_write_w(regs->waddr, regs->wdata));
+
+		if (rc < 0) {
+			CDBG("vb6801_i2c_write_table Failed!!!\n");
+			break;
+		}
+
+		regs++;
+	}
+
+	return rc;
+}
+
+static int32_t vb6801_reset(const struct msm_camera_sensor_info *data)
+{
+	int rc;
+
+	rc = gpio_request(data->sensor_reset, "vb6801");
+	if (!rc) {
+		CDBG("sensor_reset SUcceeded\n");
+		gpio_direction_output(data->sensor_reset, 0);
+		mdelay(50);
+		gpio_direction_output(data->sensor_reset, 1);
+		mdelay(13);
+	} else
+		CDBG("sensor_reset FAiled\n");
+
+	return rc;
+}
+
+static int32_t vb6801_set_default_focus(void)
+{
+	int32_t rc = 0;
+
+	/* FIXME: Default focus not supported */
+
+	return rc;
+}
+
+static void vb6801_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint32_t divider; /*Q10 */
+	uint32_t pclk_mult; /*Q10 */
+	uint32_t d1;
+	uint32_t d2;
+
+	d1 =
+		(uint32_t)(
+		(vb6801_ctrl->s_dynamic_params.preview_linesPerFrame *
+		0x00000400) /
+		vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame);
+
+	d2 =
+		(uint32_t)(
+		(vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine *
+		0x00000400) /
+		vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine);
+
+
+	divider = (uint32_t) (d1 * d2) / 0x00000400;
+
+	pclk_mult = (48 * 0x400) / 60;
+
+	/* Verify PCLK settings and frame sizes. */
+	*pfps = (uint16_t)((((fps * pclk_mult) / 0x00000400) * divider)/
+				0x00000400);
+}
+
+static uint16_t vb6801_get_prev_lines_pf(void)
+{
+	if (vb6801_ctrl->prev_res == QTR_SIZE)
+		return vb6801_ctrl->s_dynamic_params.preview_linesPerFrame;
+	else
+		return vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame;
+}
+
+static uint16_t vb6801_get_prev_pixels_pl(void)
+{
+	if (vb6801_ctrl->prev_res == QTR_SIZE)
+		return vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine;
+	else
+		return vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine;
+}
+
+static uint16_t vb6801_get_pict_lines_pf(void)
+{
+	return vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame;
+}
+
+static uint16_t vb6801_get_pict_pixels_pl(void)
+{
+	return vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine;
+}
+
+static uint32_t vb6801_get_pict_max_exp_lc(void)
+{
+	uint16_t snapshot_lines_per_frame;
+
+	if (vb6801_ctrl->pict_res == QTR_SIZE) {
+		snapshot_lines_per_frame =
+		    vb6801_ctrl->s_dynamic_params.preview_linesPerFrame - 3;
+	} else {
+		snapshot_lines_per_frame =
+		    vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame - 3;
+	}
+
+	return snapshot_lines_per_frame;
+}
+
+static int32_t vb6801_set_fps(struct fps_cfg *fps)
+{
+	int32_t rc = 0;
+
+	/* input is new fps in Q8 format */
+	switch (fps->fps_div) {
+	case 7680:		/* 30 * Q8 */
+		vb6801_ctrl->factor_fps = 1;
+		break;
+
+	case 3840:		/* 15 * Q8 */
+		vb6801_ctrl->factor_fps = 2;
+		break;
+
+	case 2560:		/* 10 * Q8 */
+		vb6801_ctrl->factor_fps = 3;
+		break;
+
+	case 1920:		/* 7.5 * Q8 */
+		vb6801_ctrl->factor_fps = 4;
+		break;
+
+	default:
+		rc = -ENODEV;
+		break;
+	}
+
+	return rc;
+}
+
+static int32_t vb6801_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	uint16_t lpf;
+
+	if (vb6801_ctrl->curr_res == SENSOR_FULL_SIZE)
+		lpf = VB6801_LINES_PER_FRAME_SNAPSHOT;
+	else
+		lpf = VB6801_LINES_PER_FRAME_PREVIEW;
+
+	/* hold */
+	rc = vb6801_i2c_write_w(REG_HOLD, HOLD);
+	if (rc < 0)
+		goto exp_gain_done;
+
+	if ((vb6801_ctrl->curr_fps <
+	     vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps) &&
+	    (!vb6801_ctrl->pict_exp_update)) {
+
+		if (vb6801_ctrl->reducel) {
+
+			rc = vb6801_i2c_write_w(REG_VT_FRAME_LENGTH_LINES,
+						lpf * vb6801_ctrl->factor_fps);
+
+			vb6801_ctrl->curr_fps =
+			    vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps;
+
+		} else if (!vb6801_ctrl->reducel) {
+
+			rc = vb6801_i2c_write_w(REG_COARSE_INTEGRATION_TIME,
+						line * vb6801_ctrl->factor_fps);
+
+			vb6801_ctrl->reducel = 1;
+		}
+	} else if ((vb6801_ctrl->curr_fps >
+		    vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps) &&
+		   (!vb6801_ctrl->pict_exp_update)) {
+
+		rc = vb6801_i2c_write_w(REG_VT_FRAME_LENGTH_LINES,
+					lpf * vb6801_ctrl->factor_fps);
+
+		vb6801_ctrl->curr_fps =
+		    vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps;
+
+	} else {
+		/* analogue_gain_code_global */
+		rc = vb6801_i2c_write_w(REG_ANALOGUE_GAIN_CODE_GLOBAL, gain);
+		if (rc < 0)
+			goto exp_gain_done;
+
+		/* coarse_integration_time */
+		rc = vb6801_i2c_write_w(REG_COARSE_INTEGRATION_TIME,
+					line * vb6801_ctrl->factor_fps);
+		if (rc < 0)
+			goto exp_gain_done;
+
+		vb6801_ctrl->pict_exp_update = 1;
+	}
+
+	rc = vb6801_i2c_write_w(REG_HOLD, RELEASE_HOLD);
+
+exp_gain_done:
+	return rc;
+}
+
+static int32_t vb6801_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	vb6801_ctrl->pict_exp_update = 1;
+	return vb6801_write_exp_gain(gain, line);
+}
+
+static int32_t vb6801_power_down(void)
+{
+	int32_t rc = 0;
+	rc = vb6801_i2c_write_b(REG_NVM_PDN, 0);
+
+	mdelay(5);
+	return rc;
+}
+
+static int32_t vb6801_go_to_position(uint32_t target_vcm_dac_code,
+				     struct vb6801_sensor_info *ps)
+{
+	/* Prior to running this function the following values must
+	 * be initialised in the sensor data structure, PS
+	 * ps->vcm_dac_code
+	 * ps->vcm_max_dac_code_step
+	 * ps->vcm_dac_code_spacing_ms */
+
+	int32_t rc = 0;
+
+	ps->vcm_dac_code = target_vcm_dac_code;
+
+	/* Restore Strobe to zero state */
+	rc = vb6801_i2c_write_b(REG_VCM_DAC_STROBE, 0x00);
+	if (rc < 0)
+		return rc;
+
+	/* Write 9-bit VCM DAC Code */
+	rc = vb6801_i2c_write_w(REG_VCM_DAC_CODE, ps->vcm_dac_code);
+	if (rc < 0)
+		return rc;
+
+	/* Generate a rising edge on the dac_strobe to latch
+	 * new DAC value */
+
+	rc = vb6801_i2c_write_w(REG_VCM_DAC_STROBE, 0x01);
+
+	return rc;
+}
+
+static int32_t vb6801_move_focus(int direction, int32_t num_steps)
+{
+	int16_t step_direction;
+	int16_t actual_step;
+	int16_t next_position;
+	uint32_t step_size;
+	int16_t small_move[4];
+	uint16_t i;
+	int32_t rc = 0;
+
+	step_size = (vb6801_ctrl->s_info.vcm_dac_code_macro_up -
+		     vb6801_ctrl->s_info.vcm_dac_code_infinity_dn) /
+	    VB6801_TOTAL_STEPS_NEAR_TO_FAR;
+
+	if (num_steps > VB6801_TOTAL_STEPS_NEAR_TO_FAR)
+		num_steps = VB6801_TOTAL_STEPS_NEAR_TO_FAR;
+	else if (num_steps == 0)
+		return -EINVAL;
+
+	if (direction == MOVE_NEAR)
+		step_direction = 4;
+	else if (direction == MOVE_FAR)
+		step_direction = -4;
+	else
+		return -EINVAL;
+
+	/* need to decide about default position and power supplied
+	 * at start up and reset */
+	if (vb6801_ctrl->curr_lens_pos < vb6801_ctrl->init_curr_lens_pos)
+		vb6801_ctrl->curr_lens_pos = vb6801_ctrl->init_curr_lens_pos;
+
+	actual_step = (step_direction * num_steps);
+
+	next_position = vb6801_ctrl->curr_lens_pos;
+
+	for (i = 0; i < 4; i++) {
+		if (actual_step >= 0)
+			small_move[i] =
+			    (i + 1) * actual_step / 4 - i * actual_step / 4;
+
+		if (actual_step < 0)
+			small_move[i] =
+			    (i + 1) * actual_step / 4 - i * actual_step / 4;
+	}
+
+	if (next_position > 511)
+		next_position = 511;
+	else if (next_position < 0)
+		next_position = 0;
+
+	/* for damping */
+	for (i = 0; i < 4; i++) {
+		next_position =
+		    (int16_t) (vb6801_ctrl->curr_lens_pos + small_move[i]);
+
+		/* Writing the digital code for current to the actuator */
+		CDBG("next_position in damping mode = %d\n", next_position);
+
+		rc = vb6801_go_to_position(next_position, &vb6801_ctrl->s_info);
+		if (rc < 0) {
+			CDBG("go_to_position Failed!!!\n");
+			return rc;
+		}
+
+		vb6801_ctrl->curr_lens_pos = next_position;
+		if (i < 3)
+			mdelay(5);
+	}
+
+	return rc;
+}
+
+static int vb6801_read_nvm_data(struct vb6801_sensor_info *ps)
+{
+	/* +--------+------+------+----------------+---------------+
+	 * | Index | NVM | NVM | Name | Description |
+	 * | | Addr | Byte | | |
+	 * +--------+------+------+----------------+---------------+
+	 * | 0x3600 | 0 | 3 | nvm_t1_addr_00 | {PF[2:0]:Day[4:0]} |
+	 * | 0x3601 | 0 | 2 | nvm_t1_addr_01 | {Month[3:0]:Year[3:0]} |
+	 * | 0x3602 | 0 | 1 | nvm_t1_addr_02 | Tester[7:0] |
+	 * | 0x3603 | 0 | 0 | nvm_t1_addr_03 | Part[15:8] |
+	 * +--------+------+------+----------------+---------------+
+	 * | 0x3604 | 1 | 3 | nvm_t1_addr_04 | Part[7:0] |
+	 * | 0x3605 | 1 | 2 | nvm_t1_addr_05 | StartWPM[7:0] |
+	 * | 0x3606 | 1 | 1 | nvm_t1_addr_06 | Infinity[7:0] |
+	 * | 0x3607 | 1 | 0 | nvm_t1_addr_07 | Macro[7:0] |
+	 * +--------+------+------+----------------+---------------+
+	 * | 0x3608 | 2 | 3 | nvm_t1_addr_08 | Reserved |
+	 * | 0x3609 | 2 | 2 | nvm_t1_addr_09 | Reserved |
+	 * | 0x360A | 2 | 1 | nvm_t1_addr_0A | UpDown[7:0] |
+	 * | 0x360B | 2 | 0 | nvm_t1_addr_0B | Reserved |
+	 * +--------+------+------+----------------+---------------+
+	 * | 0x360C | 3 | 3 | nvm_t1_addr_0C | Reserved |
+	 * | 0x360D | 3 | 2 | nvm_t1_addr_0D | Reserved |
+	 * | 0x360E | 3 | 1 | nvm_t1_addr_0E | Reserved |
+	 * | 0x360F | 3 | 0 | nvm_t1_addr_0F | Reserved |
+	 * +--------+------+------+----------------+---------------+
+	 * | 0x3610 | 4 | 3 | nvm_t1_addr_10 | Reserved |
+	 * | 0x3611 | 4 | 2 | nvm_t1_addr_11 | Reserved |
+	 * | 0x3612 | 4 | 1 | nvm_t1_addr_12 | Reserved |
+	 * | 0x3613 | 4 | 0 | nvm_t1_addr_13 | Reserved |
+	 * +--------+------+------+----------------+---------------+*/
+
+	int32_t rc;
+	struct vb6801_i2c_reg_conf_t rreg[] = {
+		{REG_NVM_T1_ADDR_00, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_01, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_02, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_03, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_04, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_05, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_06, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_07, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_08, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_09, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0A, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0B, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0C, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0D, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0E, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_0F, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_10, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_11, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_12, 0, 0, D_LEN_BYTE},
+		{REG_NVM_T1_ADDR_13, 0, 0, D_LEN_BYTE},
+	};
+
+	struct vb6801_i2c_reg_conf_t wreg[] = {
+		/* Enable NVM for Direct Reading */
+		{REG_NVM_CTRL, 0, 2, D_LEN_BYTE},
+
+		/* Power up NVM */
+		{REG_NVM_PDN, 0, 1, D_LEN_BYTE},
+	};
+
+	rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
+	if (rc < 0) {
+		CDBG("I2C Write Table FAILED!!!\n");
+		return rc;
+	}
+
+	/* NVM Read Pulse Width
+	 * ====================
+	 * nvm_pulse_width_us = nvm_pulse_width_ext_clk / ext_clk_freq_mhz
+	 * Valid Range for Read Pulse Width = 400ns -> 3.0us
+	 * Min ext_clk_freq_mhz = 6MHz  => 3.0 *  6  = 18
+	 * Max ext_clk_freq_mhz = 27MHz => 0.4 * 27 = 10.8
+	 * Choose 15 as a common value
+	 *  - 15 /  6.0 = 2.5000us
+	 *  - 15 / 12.0 = 1.2500us
+	 *  - 15 / 27.0 = 0.5555us */
+	rc = vb6801_i2c_write_w(REG_NVM_PULSE_WIDTH, 15);
+	if (rc < 0) {
+		rc = -EBUSY;
+		goto nv_shutdown;
+	}
+
+	rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
+	if (rc < 0) {
+		CDBG("I2C Read Table FAILED!!!\n");
+		rc = -EBUSY;
+		goto nv_shutdown;
+	}
+
+	/* Decode and Save FMT Info */
+	ps->pass_fail = (rreg[0].bdata & 0x00E0) >> 5;
+	ps->day = (rreg[0].bdata & 0x001F);
+	ps->month = (rreg[1].bdata & 0x00F0) >> 4;
+	ps->year = (rreg[1].bdata & 0x000F) + 2000;
+	ps->tester = rreg[2].bdata;
+	ps->part_number = (rreg[3].bdata << 8) + rreg[4].bdata;
+
+	/* Decode and Save VCM Dac Values in data structure */
+	ps->vcm_dac_code_infinity_dn = rreg[6].bdata;
+	ps->vcm_dac_code_macro_up = rreg[7].bdata << 1;
+	ps->vcm_dac_code_up_dn_delta = rreg[10].bdata;
+
+nv_shutdown:
+	/* Power Down NVM to extend life time */
+	rc = vb6801_i2c_write_b(REG_NVM_PDN, 0);
+
+	return rc;
+}
+
+static int vb6801_config_sensor(int32_t ext_clk_freq_mhz,
+				int32_t target_frame_rate_fps,
+				int32_t target_vt_pix_clk_freq_mhz,
+				uint32_t sub_sampling_factor,
+				uint32_t analog_binning_allowed,
+				uint32_t raw_mode, int capture_mode,
+				enum vb6801_resolution_t res)
+{
+	uint32_t rc;
+	/* ext_clk_freq_mhz      = 6.0 -> 27.0 MHz
+	 * target_frame_rate_fps  = 15 fps
+	 * target_vt_pix_clk_freq_mhz = 24.0 -> 64.0MHz
+	 * sub_sampling factor   = 1, 2, 3, or 4
+	 * raw_mode factor       = 10
+	 *
+	 * capture_mode, 0 = CCP1
+	 * capture_mode, 1 = CCP2
+	 * capture_mode, 2 = 10-bit parallel + hsync + vsync */
+
+	/* Declare data format */
+	uint32_t ccp2_data_format = 0x0A0A;
+
+	/*  Declare clock tree variables */
+	int32_t min_pll_ip_freq_mhz = 6;
+	int32_t max_pll_op_freq_mhz = 640;
+	uint32_t pre_pll_clk_div = 1;
+	int32_t pll_ip_freq_mhz = 6;
+	uint32_t pll_multiplier = 100;
+	int32_t pll_op_freq_mhz = 600;
+	uint32_t vt_sys_clk_div = 1;
+	int32_t vt_sys_clk_freq_mhz = 600;
+	uint32_t vt_pix_clk_div = 10;
+	int32_t vt_pix_clk_freq_mhz = 60;
+	uint32_t op_sys_clk_div = 1;
+	int32_t op_sys_clk_freq_mhz = 60;
+	uint32_t op_pix_clk_div = 10;
+	int32_t op_pix_clk_freq_mhz = 60;
+
+	/* Declare pixel array and frame timing variables */
+	uint32_t x_pixel_array = 2064;
+	uint32_t y_pixel_array = 1544;
+	uint32_t x_even_inc = 1;
+	uint32_t x_odd_inc = 1;
+	uint32_t y_even_inc = 1;
+	uint32_t y_odd_inc = 1;
+	uint32_t x_output_size = 2064;
+	uint32_t y_output_size = 1544;
+	uint32_t additional_rows = 2;
+	uint32_t min_vt_frame_blanking_lines = 16;
+	uint32_t vt_line_length_pck = 2500;
+	uint32_t vt_line_length_us = 0;
+	uint32_t min_vt_frame_length_lines = 1562;
+	uint32_t vt_frame_length_lines = 1600;
+	uint32_t target_vt_frame_length_ms;	/* 200 * 0x0001000 / 3; */
+	uint32_t vt_frame_length_ms;	/* 200 * 0x0001000 / 3; */
+	uint32_t frame_rate_fps = 15;
+
+	/* Coarse intergration time */
+	uint32_t coarse_integration_time = 1597;
+	uint32_t coarse_integration_time_max_margin = 3;
+	uint16_t frame_count;
+	int timeout;
+
+	struct vb6801_sensor_info *pinfo = &vb6801_ctrl->s_info;
+
+	struct vb6801_i2c_reg_conf_t rreg[] = {
+		{REG_PRE_PLL_CLK_DIV, 0, 0, D_LEN_WORD},
+		{REG_PLL_MULTIPLIER, 0, 0, D_LEN_WORD},
+		{REG_VT_SYS_CLK_DIV, 0, 0, D_LEN_WORD},
+		{REG_VT_PIX_CLK_DIV, 0, 0, D_LEN_WORD},
+		{REG_OP_SYS_CLK_DIV, 0, 0, D_LEN_WORD},
+		{REG_OP_PIX_CLK_DIV, 0, 0, D_LEN_WORD},
+		{REG_FRAME_COUNT, 0, 0, D_LEN_BYTE},
+	};
+
+	struct vb6801_i2c_reg_conf_t wreg2[] = {
+		{REG_POWER_MAN_ENABLE_3, 0, 95, D_LEN_BYTE},
+		{REG_POWER_MAN_ENABLE_4, 0, 142, D_LEN_BYTE},
+		{REG_POWER_MAN_ENABLE_5, 0, 7, D_LEN_BYTE},
+	};
+
+	/* VIDEO TIMING CALCULATIONS
+	 * ========================= */
+
+	/* Pixel Array Size */
+	x_pixel_array = 2064;
+	y_pixel_array = 1544;
+
+	/* set current resolution */
+	vb6801_ctrl->curr_res = res;
+
+	/* Analogue binning setup */
+	if (pinfo->analog_binning_allowed > 0 &&
+	    pinfo->sub_sampling_factor == 4) {
+
+		pinfo->vtiming_major = 1;
+		pinfo->analog_timing_modes_4 = 32;
+	} else if (pinfo->analog_binning_allowed > 0 &&
+		   pinfo->sub_sampling_factor == 2) {
+
+		pinfo->vtiming_major = 1;
+		pinfo->analog_timing_modes_4 = 0;
+	} else {
+
+		pinfo->vtiming_major = 0;
+		pinfo->analog_timing_modes_4 = 0;
+	}
+
+	/* Sub-Sampling X & Y Odd Increments: valid values 1, 3, 5, 7 */
+	x_even_inc = 1;
+	y_even_inc = 1;
+	x_odd_inc = (sub_sampling_factor << 1) - x_even_inc;
+	y_odd_inc = (sub_sampling_factor << 1) - y_even_inc;
+
+	/* Output image size
+	 * Must always be a multiple of 2 - round down */
+	x_output_size = ((x_pixel_array / sub_sampling_factor) >> 1) << 1;
+	y_output_size = ((y_pixel_array / sub_sampling_factor) >> 1) << 1;
+
+	/* Output data format */
+	ccp2_data_format = (raw_mode << 8) + raw_mode;
+
+	/* Pre PLL clock divider : valid values 1, 2 or 4
+	 * The 1st step is to ensure that PLL input frequency is as close
+	 * as possible to the min allowed PLL input frequency.
+	 * This yields the smallest step size in the PLL output frequency. */
+	pre_pll_clk_div =
+	    ((int)(ext_clk_freq_mhz / min_pll_ip_freq_mhz) >> 1) << 1;
+	if (pre_pll_clk_div < 2)
+		pre_pll_clk_div = 1;
+
+	pll_ip_freq_mhz = ext_clk_freq_mhz / pre_pll_clk_div;
+
+	/* Video Timing System Clock divider: valid values 1, 2, 4
+	 * Now need to work backwards through the clock tree to determine the
+	 * 1st pass estimates for vt_sys_clk_freq_mhz and then the PLL output
+	 * frequency.*/
+	vt_sys_clk_freq_mhz = vt_pix_clk_div * target_vt_pix_clk_freq_mhz;
+	vt_sys_clk_div = max_pll_op_freq_mhz / vt_sys_clk_freq_mhz;
+	if (vt_sys_clk_div < 2)
+		vt_sys_clk_div = 1;
+
+	/* PLL Mulitplier: min , max 106 */
+	pll_op_freq_mhz = vt_sys_clk_div * vt_sys_clk_freq_mhz;
+	pll_multiplier = (pll_op_freq_mhz * 0x0001000) / pll_ip_freq_mhz;
+
+	/* Calculate the acutal pll output frequency
+	 * - the pll_multiplier calculation introduces a quantisation error
+	 *   due the integer nature of the pll multiplier */
+	pll_op_freq_mhz = (pll_ip_freq_mhz * pll_multiplier) / 0x0001000;
+
+	/* Re-calculate video timing clock frequencies based
+	 * on actual PLL freq */
+	vt_sys_clk_freq_mhz = pll_op_freq_mhz / vt_sys_clk_div;
+	vt_pix_clk_freq_mhz = ((vt_sys_clk_freq_mhz * 0x0001000) /
+				vt_pix_clk_div)/0x0001000;
+
+	/* Output System Clock Divider: valid value 1, 2, 4, 6, 8
+	 * op_sys_clk_div = vt_sys_clk_div;*/
+	op_sys_clk_div = (vt_sys_clk_div * sub_sampling_factor);
+	if (op_sys_clk_div < 2)
+		op_sys_clk_div = 1;
+
+	/* Calculate output timing clock frequencies */
+	op_sys_clk_freq_mhz = pll_op_freq_mhz / op_sys_clk_div;
+	op_pix_clk_freq_mhz =
+	    (op_sys_clk_freq_mhz * 0x0001000) / (op_pix_clk_div * 0x0001000);
+
+	/* Line length in pixels and us */
+	vt_line_length_pck = 2500;
+	vt_line_length_us =
+	    vt_line_length_pck * 0x0001000 / vt_pix_clk_freq_mhz;
+
+	/* Target vt_frame_length_ms */
+	target_vt_frame_length_ms = (1000 * 0x0001000 / target_frame_rate_fps);
+
+	/* Frame length in lines */
+	min_vt_frame_length_lines =
+	    additional_rows + y_output_size + min_vt_frame_blanking_lines;
+
+	vt_frame_length_lines =
+	    ((1000 * target_vt_frame_length_ms) / vt_line_length_us);
+
+	if (vt_frame_length_lines <= min_vt_frame_length_lines)
+		vt_frame_length_lines = min_vt_frame_length_lines;
+
+	/* Calcuate the actual frame length in ms */
+	vt_frame_length_ms = (vt_frame_length_lines * vt_line_length_us / 1000);
+
+	/* Frame Rate in fps */
+	frame_rate_fps = (1000 * 0x0001000 / vt_frame_length_ms);
+
+	/* Set coarse integration to max */
+	coarse_integration_time =
+	    vt_frame_length_lines - coarse_integration_time_max_margin;
+
+	CDBG("SENSOR VIDEO TIMING SUMMARY:\n");
+	CDBG(" ============================\n");
+	CDBG("ext_clk_freq_mhz      = %d\n", ext_clk_freq_mhz);
+	CDBG("pre_pll_clk_div       = %d\n", pre_pll_clk_div);
+	CDBG("pll_ip_freq_mhz       = %d\n", pll_ip_freq_mhz);
+	CDBG("pll_multiplier        = %d\n", pll_multiplier);
+	CDBG("pll_op_freq_mhz       = %d\n", pll_op_freq_mhz);
+	CDBG("vt_sys_clk_div        = %d\n", vt_sys_clk_div);
+	CDBG("vt_sys_clk_freq_mhz   = %d\n", vt_sys_clk_freq_mhz);
+	CDBG("vt_pix_clk_div        = %d\n", vt_pix_clk_div);
+	CDBG("vt_pix_clk_freq_mhz   = %d\n", vt_pix_clk_freq_mhz);
+	CDBG("op_sys_clk_div        = %d\n", op_sys_clk_div);
+	CDBG("op_sys_clk_freq_mhz   = %d\n", op_sys_clk_freq_mhz);
+	CDBG("op_pix_clk_div        = %d\n", op_pix_clk_div);
+	CDBG("op_pix_clk_freq_mhz   = %d\n", op_pix_clk_freq_mhz);
+	CDBG("vt_line_length_pck    = %d\n", vt_line_length_pck);
+	CDBG("vt_line_length_us     = %d\n", vt_line_length_us/0x0001000);
+	CDBG("vt_frame_length_lines = %d\n", vt_frame_length_lines);
+	CDBG("vt_frame_length_ms    = %d\n", vt_frame_length_ms/0x0001000);
+	CDBG("frame_rate_fps        = %d\n", frame_rate_fps);
+	CDBG("ccp2_data_format = %d\n", ccp2_data_format);
+	CDBG("x_output_size = %d\n", x_output_size);
+	CDBG("y_output_size = %d\n", y_output_size);
+	CDBG("x_odd_inc = %d\n", x_odd_inc);
+	CDBG("y_odd_inc = %d\n", y_odd_inc);
+	CDBG("(vt_frame_length_lines * frame_rate_factor ) = %d\n",
+	    (vt_frame_length_lines * vb6801_ctrl->factor_fps));
+	CDBG("coarse_integration_time = %d\n", coarse_integration_time);
+	CDBG("pinfo->vcm_dac_code = %d\n", pinfo->vcm_dac_code);
+	CDBG("capture_mode = %d\n", capture_mode);
+
+	/* RE-CONFIGURE SENSOR WITH NEW TIMINGS
+	 * ====================================
+	 * Enter Software Standby Mode */
+	rc = vb6801_i2c_write_b(REG_MODE_SELECT, 0);
+	if (rc < 0) {
+		CDBG("I2C vb6801_i2c_write_b FAILED!!!\n");
+		return rc;
+	}
+
+	/* Wait 100ms */
+	mdelay(100);
+
+	if (capture_mode == 0) {
+
+		rc = vb6801_i2c_write_b(REG_CCP2_CHANNEL_IDENTIFIER, 0);
+		rc = vb6801_i2c_write_b(REG_CCP2_SIGNALLING_MODE, 0);
+	} else if (capture_mode == 1) {
+
+		rc = vb6801_i2c_write_b(REG_CCP2_CHANNEL_IDENTIFIER, 0);
+		rc = vb6801_i2c_write_b(REG_CCP2_SIGNALLING_MODE, 1);
+	}
+
+	{
+		struct vb6801_i2c_reg_conf_t wreg[] = {
+			/* Re-configure Sensor */
+			{REG_CCP2_DATA_FORMAT, ccp2_data_format, 0,
+			 D_LEN_WORD},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL, 128, 0, D_LEN_WORD},
+			{REG_PRE_PLL_CLK_DIV, pre_pll_clk_div, 0, D_LEN_WORD},
+			{REG_VT_SYS_CLK_DIV, vt_sys_clk_div, 0, D_LEN_WORD},
+			{REG_VT_PIX_CLK_DIV, vt_pix_clk_div, 0, D_LEN_WORD},
+			{REG_OP_SYS_CLK_DIV, vt_sys_clk_div, 0, D_LEN_WORD},
+			{REG_OP_PIX_CLK_DIV, vt_pix_clk_div, 0, D_LEN_WORD},
+			{REG_VT_LINE_LENGTH_PCK, vt_line_length_pck, 0,
+			 D_LEN_WORD},
+			{REG_X_OUTPUT_SIZE, x_output_size, 0, D_LEN_WORD},
+			{REG_Y_OUTPUT_SIZE, y_output_size, 0, D_LEN_WORD},
+			{REG_X_ODD_INC, x_odd_inc, 0, D_LEN_WORD},
+			{REG_Y_ODD_INC, y_odd_inc, 0, D_LEN_WORD},
+			{REG_VT_FRAME_LENGTH_LINES,
+			 vt_frame_length_lines * vb6801_ctrl->factor_fps, 0,
+			 D_LEN_WORD},
+			{REG_COARSE_INTEGRATION_TIME,
+			 coarse_integration_time, 0, D_LEN_WORD},
+			/* Analogue Settings */
+			{REG_ANALOG_TIMING_MODES_2, 0, 132, D_LEN_BYTE},
+			{REG_RAMP_SCALE, 0, 5, D_LEN_BYTE},
+			{REG_BTL_LEVEL_SETUP, 0, 11, D_LEN_BYTE},
+			/* Enable Defect Correction */
+			{REG_SCYTHE_ENABLE, 0, 1, D_LEN_BYTE},
+			{REG_SCYTHE_WEIGHT, 0, 16, D_LEN_BYTE},
+			{REG_BRUCE_ENABLE, 0, 1, D_LEN_BYTE},
+			/* Auto Focus Configuration
+			 * Please note that the DAC Code is a written as a
+			 * 16-bit value 0 = infinity (no DAC current) */
+			{REG_VCM_DAC_CODE, pinfo->vcm_dac_code, 0, D_LEN_WORD},
+			{REG_VCM_DAC_STROBE, 0, 0, D_LEN_BYTE},
+			{REG_VCM_DAC_ENABLE, 0, 1, D_LEN_BYTE},
+		};
+
+		rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
+		if (rc < 0) {
+			CDBG("I2C Write Table FAILED!!!\n");
+			return rc;
+		}
+	}
+	/* Parallel Interface Configuration */
+	if (capture_mode >= 2) {
+		struct vb6801_i2c_reg_conf_t wreg1[] = {
+			{REG_OP_CODER_SYNC_CLK_SETUP, 0, 15, D_LEN_BYTE},
+			{REG_OP_CODER_ENABLE, 0, 3, D_LEN_BYTE},
+			{REG_OP_CODER_SLOW_PAD_EN, 0, 1, D_LEN_BYTE},
+			{REG_OP_CODER_AUTOMATIC_MODE_ENABLE, 0, 3, D_LEN_BYTE},
+			{REG_OP_CODER_AUTO_STARTUP, 0, 2, D_LEN_BYTE},
+		};
+
+		rc = vb6801_i2c_write_table(wreg1, ARRAY_SIZE(wreg1));
+		if (rc < 0) {
+			CDBG("I2C Write Table FAILED!!!\n");
+			return rc;
+		}
+	}
+
+	/* Enter Streaming Mode */
+	rc = vb6801_i2c_write_b(REG_MODE_SELECT, 1);
+	if (rc < 0) {
+		CDBG("I2C Write Table FAILED!!!\n");
+		return rc;
+	}
+
+	/* Wait until the sensor starts streaming
+	 * Poll until the reported frame_count value is != 0xFF */
+	frame_count = 0xFF;
+	timeout = 2000;
+	while (frame_count == 0xFF && timeout > 0) {
+		rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
+		if (rc < 0)
+			return rc;
+
+		CDBG("REG_FRAME_COUNT  = 0x%x\n", frame_count);
+		timeout--;
+	}
+
+	/* Post Streaming Configuration */
+
+	rc = vb6801_i2c_write_table(wreg2, ARRAY_SIZE(wreg2));
+	if (rc < 0) {
+		CDBG("I2C Write Table FAILED!!!\n");
+		return rc;
+	}
+
+	rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
+	if (rc < 0) {
+		CDBG("I2C Read Table FAILED!!!\n");
+		return rc;
+	}
+
+	CDBG("REG_PRE_PLL_CLK_DIV = 0x%x\n", rreg[0].wdata);
+	CDBG("REG_PLL_MULTIPLIER  = 0x%x\n", rreg[1].wdata);
+	CDBG("REG_VT_SYS_CLK_DIV  = 0x%x\n", rreg[2].wdata);
+	CDBG("REG_VT_PIX_CLK_DIV  = 0x%x\n", rreg[3].wdata);
+	CDBG("REG_OP_SYS_CLK_DIV  = 0x%x\n", rreg[4].wdata);
+	CDBG("REG_OP_PIX_CLK_DIV  = 0x%x\n", rreg[5].wdata);
+	CDBG("REG_FRAME_COUNT  = 0x%x\n", rreg[6].bdata);
+
+	mdelay(50);
+	frame_count = 0;
+	rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
+	CDBG("REG_FRAME_COUNT1  = 0x%x\n", frame_count);
+
+	mdelay(150);
+	frame_count = 0;
+	rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
+	CDBG("REG_FRAME_COUNT2  = 0x%x\n", frame_count);
+
+	mdelay(100);
+	frame_count = 0;
+	rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
+	CDBG("REG_FRAME_COUNT3  = 0x%x\n", frame_count);
+
+	mdelay(250);
+	frame_count = 0;
+	rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
+	CDBG("REG_FRAME_COUNT4  = 0x%x\n", frame_count);
+
+	return rc;
+}
+
+static int vb6801_sensor_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_direction_output(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+
+static int vb6801_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&vb6801_wait_queue);
+	return 0;
+}
+
+static int32_t vb6801_video_config(int mode, int res)
+{
+	int32_t rc = 0;
+
+	vb6801_ctrl->prev_res = res;
+	vb6801_ctrl->curr_res = res;
+	vb6801_ctrl->sensormode = mode;
+
+	rc = vb6801_config_sensor(12, 30, 60, 2, 1, 10, 2, RES_PREVIEW);
+	if (rc < 0)
+		return rc;
+
+	rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
+			     &vb6801_ctrl->s_dynamic_params.
+			     preview_pixelsPerLine, 2);
+	if (rc < 0)
+		return rc;
+
+	rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
+			     &vb6801_ctrl->s_dynamic_params.
+			     preview_linesPerFrame, 2);
+
+	return rc;
+}
+
+static int32_t vb6801_snapshot_config(int mode, int res)
+{
+	int32_t rc = 0;
+
+	vb6801_ctrl->curr_res = vb6801_ctrl->pict_res;
+	vb6801_ctrl->sensormode = mode;
+
+	rc = vb6801_config_sensor(12, 12, 48, 1, 1, 10, 2, RES_CAPTURE);
+	if (rc < 0)
+		return rc;
+
+	rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
+			     &vb6801_ctrl->s_dynamic_params.
+			     snapshot_pixelsPerLine, 2);
+	if (rc < 0)
+		return rc;
+
+	rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
+			     &vb6801_ctrl->s_dynamic_params.
+			     snapshot_linesPerFrame, 2);
+
+	return rc;
+}
+
+static int32_t vb6801_set_sensor_mode(int mode, int res)
+{
+	int32_t rc = 0;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = vb6801_video_config(mode, res);
+		break;
+
+	case SENSOR_SNAPSHOT_MODE:
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = vb6801_snapshot_config(mode, res);
+		break;
+
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int vb6801_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long rc = 0;
+
+	if (copy_from_user(&cdata,
+			   (void *)argp, sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+
+	mutex_lock(&vb6801_mut);
+
+	CDBG("vb6801_sensor_config, cfgtype = %d\n", cdata.cfgtype);
+
+	switch (cdata.cfgtype) {
+	case CFG_GET_PICT_FPS:
+		vb6801_get_pict_fps(cdata.cfg.gfps.prevfps,
+				    &(cdata.cfg.gfps.pictfps));
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_L_PF:
+		cdata.cfg.prevl_pf = vb6801_get_prev_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PREV_P_PL:
+		cdata.cfg.prevp_pl = vb6801_get_prev_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_L_PF:
+		cdata.cfg.pictl_pf = vb6801_get_pict_lines_pf();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_P_PL:
+		cdata.cfg.pictp_pl = vb6801_get_pict_pixels_pl();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_GET_PICT_MAX_EXP_LC:
+		cdata.cfg.pict_max_exp_lc = vb6801_get_pict_max_exp_lc();
+
+		if (copy_to_user((void *)argp,
+				 &cdata, sizeof(struct sensor_cfg_data)))
+			rc = -EFAULT;
+		break;
+
+	case CFG_SET_FPS:
+	case CFG_SET_PICT_FPS:
+		rc = vb6801_set_fps(&(cdata.cfg.fps));
+		break;
+
+	case CFG_SET_EXP_GAIN:
+		rc = vb6801_write_exp_gain(cdata.cfg.exp_gain.gain,
+					   cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_PICT_EXP_GAIN:
+		rc = vb6801_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
+					      cdata.cfg.exp_gain.line);
+		break;
+
+	case CFG_SET_MODE:
+		rc = vb6801_set_sensor_mode(cdata.mode, cdata.rs);
+		break;
+
+	case CFG_PWR_DOWN:
+		rc = vb6801_power_down();
+		break;
+
+	case CFG_MOVE_FOCUS:
+		rc = vb6801_move_focus(cdata.cfg.focus.dir,
+				       cdata.cfg.focus.steps);
+		break;
+
+	case CFG_SET_DEFAULT_FOCUS:
+		rc = vb6801_set_default_focus();
+		break;
+
+	default:
+		rc = -EFAULT;
+		break;
+	}
+
+	mutex_unlock(&vb6801_mut);
+
+	return rc;
+}
+
+static int vb6801_sensor_release(void)
+{
+	int rc = -EBADF;
+
+	mutex_lock(&vb6801_mut);
+
+	vb6801_power_down();
+	vb6801_sensor_init_done(vb6801_ctrl->sensordata);
+	kfree(vb6801_ctrl);
+	mutex_unlock(&vb6801_mut);
+
+	return rc;
+}
+
+static int vb6801_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	int rc = 0;
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		rc = -ENOTSUPP;
+		goto probe_failure;
+	}
+
+	vb6801_sensorw = kzalloc(sizeof(struct vb6801_work_t), GFP_KERNEL);
+
+	if (!vb6801_sensorw) {
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, vb6801_sensorw);
+	vb6801_init_client(client);
+	vb6801_client = client;
+	vb6801_client->addr = vb6801_client->addr >> 1;
+
+	return 0;
+
+probe_failure:
+	if (vb6801_sensorw != NULL) {
+		kfree(vb6801_sensorw);
+		vb6801_sensorw = NULL;
+	}
+	return rc;
+}
+
+static int __exit vb6801_i2c_remove(struct i2c_client *client)
+{
+	struct vb6801_work_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	vb6801_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static const struct i2c_device_id vb6801_i2c_id[] = {
+	{"vb6801", 0},
+	{}
+};
+
+static struct i2c_driver vb6801_i2c_driver = {
+	.id_table = vb6801_i2c_id,
+	.probe = vb6801_i2c_probe,
+	.remove = __exit_p(vb6801_i2c_remove),
+	.driver = {
+		   .name = "vb6801",
+		   },
+};
+
+static int vb6801_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int rc;
+
+	struct vb6801_i2c_reg_conf_t rreg[] = {
+		{0x0000, 0, 0, D_LEN_BYTE},
+		{0x0001, 0, 0, D_LEN_BYTE},
+	};
+
+	rc = vb6801_reset(data);
+	if (rc < 0)
+		goto init_probe_done;
+
+	mdelay(20);
+
+	rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
+	if (rc < 0) {
+		CDBG("I2C Read Table FAILED!!!\n");
+		goto init_probe_fail;
+	}
+
+	/* 4. Compare sensor ID to VB6801 ID: */
+	if (rreg[0].bdata != 0x03 || rreg[1].bdata != 0x53) {
+		CDBG("vb6801_sensor_init: sensor ID don't match!\n");
+		goto init_probe_fail;
+	}
+
+	goto init_probe_done;
+
+init_probe_fail:
+	vb6801_sensor_init_done(data);
+init_probe_done:
+	return rc;
+}
+
+int vb6801_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc;
+	struct vb6801_i2c_reg_conf_t wreg[] = {
+		{REG_MODE_SELECT, 0, STANDBY_MODE, D_LEN_BYTE},
+		{0x0113, 0, 0x0A, D_LEN_BYTE},
+	};
+
+	vb6801_ctrl = kzalloc(sizeof(struct vb6801_ctrl_t), GFP_KERNEL);
+	if (!vb6801_ctrl) {
+		rc = -ENOMEM;
+		goto open_init_fail1;
+	}
+
+	vb6801_ctrl->factor_fps = 1 /** 0x00000400*/ ;
+	vb6801_ctrl->curr_fps = 7680; /* 30 * Q8 */ ;
+	vb6801_ctrl->max_fps = 7680; /* 30 * Q8 */ ;
+	vb6801_ctrl->pict_exp_update = 0; /* 30 * Q8 */ ;
+	vb6801_ctrl->reducel = 0; /* 30 * Q8 */ ;
+
+	vb6801_ctrl->set_test = TEST_OFF;
+	vb6801_ctrl->prev_res = QTR_SIZE;
+	vb6801_ctrl->pict_res = FULL_SIZE;
+
+	vb6801_ctrl->s_dynamic_params.preview_linesPerFrame =
+	    VB6801_LINES_PER_FRAME_PREVIEW;
+	vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine =
+	    VB6801_PIXELS_PER_LINE_PREVIEW;
+	vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame =
+	    VB6801_LINES_PER_FRAME_SNAPSHOT;
+	vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine =
+	    VB6801_PIXELS_PER_LINE_SNAPSHOT;
+
+	if (data)
+		vb6801_ctrl->sensordata = data;
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(VB6801_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = vb6801_reset(data);
+	if (rc < 0)
+		goto open_init_fail1;
+
+	rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
+	if (rc < 0) {
+		CDBG("I2C Write Table FAILED!!!\n");
+		goto open_init_fail2;
+	}
+
+	rc = vb6801_read_nvm_data(&vb6801_ctrl->s_info);
+	if (rc < 0) {
+		CDBG("vb6801_read_nvm_data FAILED!!!\n");
+		goto open_init_fail2;
+	}
+	mdelay(66);
+
+	rc = vb6801_config_sensor(12, 30, 60, 2, 1, 10, 2, RES_PREVIEW);
+	if (rc < 0)
+		goto open_init_fail2;
+
+	goto open_init_done;
+
+open_init_fail2:
+	vb6801_sensor_init_done(data);
+open_init_fail1:
+	kfree(vb6801_ctrl);
+open_init_done:
+	return rc;
+}
+
+static int vb6801_sensor_probe(const struct msm_camera_sensor_info *info,
+			       struct msm_sensor_ctrl *s)
+{
+	int rc = i2c_add_driver(&vb6801_i2c_driver);
+	if (rc < 0 || vb6801_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_done;
+	}
+
+	/* enable mclk first */
+	msm_camio_clk_rate_set(VB6801_DEFAULT_CLOCK_RATE);
+	mdelay(20);
+
+	rc = vb6801_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_done;
+
+	s->s_init = vb6801_sensor_open_init;
+	s->s_release = vb6801_sensor_release;
+	s->s_config = vb6801_sensor_config;
+	s->s_mount_angle  = 0;
+	vb6801_sensor_init_done(info);
+
+probe_done:
+	return rc;
+}
+
+static int __vb6801_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, vb6801_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __vb6801_probe,
+	.driver = {
+		   .name = "msm_camera_vb6801",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __init vb6801_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(vb6801_init);
+void vb6801_exit(void)
+{
+	i2c_del_driver(&vb6801_i2c_driver);
+}
diff --git a/drivers/media/video/msm/vb6801.h b/drivers/media/video/msm/vb6801.h
new file mode 100644
index 0000000..8248f8d
--- /dev/null
+++ b/drivers/media/video/msm/vb6801.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VB6801_H
+#define VB6801_H
+
+#include <mach/board.h>
+
+extern struct vb6801_reg_t vb6801_regs;	/* from vb6801_reg.c */
+
+struct reg_struct {
+	uint16_t vt_pix_clk_div;	/*  0x0300 */
+	uint16_t vt_sys_clk_div;	/*  0x0302 */
+	uint16_t pre_pll_clk_div;	/*  0x0304 */
+	uint16_t pll_multiplier;	/*  0x0306 */
+	uint16_t op_pix_clk_div;	/*  0x0308 */
+	uint16_t op_sys_clk_div;	/*  0x030A */
+	uint16_t scale_m;	/*  0x0404 */
+	uint16_t row_speed;	/*  0x3016 */
+	uint16_t x_addr_start;	/*  0x3004 */
+	uint16_t x_addr_end;	/*  0x3008 */
+	uint16_t y_addr_start;	/*  0x3002 */
+	uint16_t y_addr_end;	/*  0x3006 */
+	uint16_t read_mode;	/*  0x3040 */
+	uint16_t x_output_size;	/*  0x034C */
+	uint16_t y_output_size;	/*  0x034E */
+	uint16_t line_length_pck;	/*  0x300C */
+	uint16_t frame_length_lines;	/*  0x300A */
+	uint16_t coarse_int_time;	/*  0x3012 */
+	uint16_t fine_int_time;	/*  0x3014 */
+};
+
+enum i2c_data_len {
+	D_LEN_BYTE,
+	D_LEN_WORD
+};
+
+struct vb6801_i2c_reg_conf_t {
+	unsigned short waddr;
+	unsigned short wdata;
+	uint8_t bdata;
+	enum i2c_data_len dlen;
+};
+
+struct vb6801_reg_t {
+	struct reg_struct const *reg_pat;
+	uint16_t reg_pat_size;
+	struct vb6801_i2c_reg_conf_t const *ttbl;
+	uint16_t ttbl_size;
+	struct vb6801_i2c_reg_conf_t const *lctbl;
+	uint16_t lctbl_size;
+	struct vb6801_i2c_reg_conf_t const *rftbl;
+	uint16_t rftbl_size;
+};
+
+#endif /* VB6801_H */
diff --git a/drivers/media/video/msm/vx6953.c b/drivers/media/video/msm/vx6953.c
new file mode 100644
index 0000000..17e5e2e
--- /dev/null
+++ b/drivers/media/video/msm/vx6953.c
@@ -0,0 +1,3666 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <linux/slab.h>
+#include "vx6953.h"
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+
+#define REG_GROUPED_PARAMETER_HOLD			0x0104
+#define GROUPED_PARAMETER_HOLD_OFF			0x00
+#define GROUPED_PARAMETER_HOLD				0x01
+#define REG_MODE_SELECT					0x0100
+#define MODE_SELECT_STANDBY_MODE			0x00
+#define MODE_SELECT_STREAM				0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME_HI			0x0202
+#define REG_COARSE_INTEGRATION_TIME_LO			0x0203
+/* Gain */
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_HI		0x0204
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LO		0x0205
+/* Digital Gain */
+#define REG_DIGITAL_GAIN_GREEN_R_HI			0x020E
+#define REG_DIGITAL_GAIN_GREEN_R_LO			0x020F
+#define REG_DIGITAL_GAIN_RED_HI				0x0210
+#define REG_DIGITAL_GAIN_RED_LO				0x0211
+#define REG_DIGITAL_GAIN_BLUE_HI			0x0212
+#define REG_DIGITAL_GAIN_BLUE_LO			0x0213
+#define REG_DIGITAL_GAIN_GREEN_B_HI			0x0214
+#define REG_DIGITAL_GAIN_GREEN_B_LO			0x0215
+/* output bits setting */
+#define REG_0x0112					0x0112
+#define REG_0x0113					0x0113
+/* PLL registers */
+#define REG_VT_PIX_CLK_DIV				0x0301
+#define REG_PRE_PLL_CLK_DIV				0x0305
+#define REG_PLL_MULTIPLIER				0x0307
+#define REG_OP_PIX_CLK_DIV				0x0309
+#define REG_0x034c					0x034c
+#define REG_0x034d					0x034d
+#define REG_0x034e					0x034e
+#define REG_0x034f					0x034f
+#define REG_0x0387					0x0387
+#define REG_0x0383					0x0383
+#define REG_FRAME_LENGTH_LINES_HI			0x0340
+#define REG_FRAME_LENGTH_LINES_LO			0x0341
+#define REG_LINE_LENGTH_PCK_HI				0x0342
+#define REG_LINE_LENGTH_PCK_LO				0x0343
+#define REG_0x3030					0x3030
+#define REG_0x0111					0x0111
+#define REG_0x0136					0x0136
+#define REG_0x0137					0x0137
+#define REG_0x0b00					0x0b00
+#define REG_0x3001					0x3001
+#define REG_0x3004					0x3004
+#define REG_0x3007					0x3007
+#define REG_0x301a					0x301a
+#define REG_0x3101					0x3101
+#define REG_0x3364					0x3364
+#define REG_0x3365					0x3365
+#define REG_0x0b83					0x0b83
+#define REG_0x0b84					0x0b84
+#define REG_0x0b85					0x0b85
+#define REG_0x0b88					0x0b88
+#define REG_0x0b89					0x0b89
+#define REG_0x0b8a					0x0b8a
+#define REG_0x3005					0x3005
+#define REG_0x3010					0x3010
+#define REG_0x3036					0x3036
+#define REG_0x3041					0x3041
+#define REG_0x0b80					0x0b80
+#define REG_0x0900					0x0900
+#define REG_0x0901					0x0901
+#define REG_0x0902					0x0902
+#define REG_0x3016					0x3016
+#define REG_0x301d					0x301d
+#define REG_0x317e					0x317e
+#define REG_0x317f					0x317f
+#define REG_0x3400					0x3400
+#define REG_0x303a					0x303a
+#define REG_0x1716					0x1716
+#define REG_0x1717					0x1717
+#define REG_0x1718					0x1718
+#define REG_0x1719					0x1719
+#define REG_0x3006					0x3006
+#define REG_0x301b					0x301b
+#define REG_0x3098					0x3098
+#define REG_0x309d					0x309d
+#define REG_0x3011					0x3011
+#define REG_0x3035					0x3035
+#define REG_0x3045					0x3045
+#define REG_0x3210					0x3210
+#define	REG_0x0111					0x0111
+#define REG_0x3410					0x3410
+#define REG_0x0b06					0x0b06
+#define REG_0x0b07					0x0b07
+#define REG_0x0b08					0x0b08
+#define REG_0x0b09					0x0b09
+#define REG_0x3640					0x3640
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE				0x0601
+
+/*============================================================================
+							 TYPE DECLARATIONS
+============================================================================*/
+
+/* 16bit address - 8 bit context register structure */
+#define	VX6953_STM5M0EDOF_OFFSET	9
+#define	Q8		0x00000100
+#define	Q10		0x00000400
+#define	VX6953_STM5M0EDOF_MAX_SNAPSHOT_EXPOSURE_LINE_COUNT	2922
+#define	VX6953_STM5M0EDOF_DEFAULT_MASTER_CLK_RATE	24000000
+#define	VX6953_STM5M0EDOF_OP_PIXEL_CLOCK_RATE	79800000
+#define	VX6953_STM5M0EDOF_VT_PIXEL_CLOCK_RATE	88670000
+/* Full	Size */
+#define	VX6953_FULL_SIZE_WIDTH	2608
+#define	VX6953_FULL_SIZE_HEIGHT		1960
+#define	VX6953_FULL_SIZE_DUMMY_PIXELS	1
+#define	VX6953_FULL_SIZE_DUMMY_LINES	0
+/* Quarter Size	*/
+#define	VX6953_QTR_SIZE_WIDTH	1304
+#define	VX6953_QTR_SIZE_HEIGHT		980
+#define	VX6953_QTR_SIZE_DUMMY_PIXELS	1
+#define	VX6953_QTR_SIZE_DUMMY_LINES		0
+/* Blanking	as measured	on the scope */
+/* Full	Size */
+#define	VX6953_HRZ_FULL_BLK_PIXELS	348
+#define	VX6953_VER_FULL_BLK_LINES	40
+/* Quarter Size	*/
+#define	VX6953_HRZ_QTR_BLK_PIXELS	1628
+#define	VX6953_VER_QTR_BLK_LINES	28
+#define	MAX_LINE_LENGTH_PCK		8190
+#define	MAX_FRAME_LENGTH_LINES	16383
+#define	VX6953_REVISION_NUMBER_CUT2	0x10/*revision number	for	Cut2.0*/
+#define	VX6953_REVISION_NUMBER_CUT3	0x20/*revision number	for	Cut3.0*/
+/* FIXME: Changes from here */
+struct vx6953_work_t {
+	struct work_struct work;
+};
+
+static struct vx6953_work_t *vx6953_sensorw;
+static struct i2c_client *vx6953_client;
+
+struct vx6953_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;   	/* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;  /* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum vx6953_resolution_t prev_res;
+	enum vx6953_resolution_t pict_res;
+	enum vx6953_resolution_t curr_res;
+	enum vx6953_test_mode_t  set_test;
+	enum sensor_revision_t sensor_type;
+
+	enum edof_mode_t edof_mode;
+
+	unsigned short imgaddr;
+};
+
+
+static uint8_t vx6953_stm5m0edof_delay_msecs_stdby;
+static uint16_t vx6953_stm5m0edof_delay_msecs_stream = 20;
+static uint8_t count;
+static struct vx6953_ctrl_t *vx6953_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(vx6953_wait_queue);
+DEFINE_MUTEX(vx6953_mut);
+static struct vx6953_i2c_reg_conf patch_tbl_cut2[] = {
+	{0xFB94, 0},	/*intialise Data Xfer Status reg*/
+	{0xFB95, 0},	/*gain 1	  (0x00)*/
+	{0xFB96, 0},	/*gain 1.07   (0x10)*/
+	{0xFB97, 0},	/*gain 1.14   (0x20)*/
+	{0xFB98, 0},	/*gain 1.23   (0x30)*/
+	{0xFB99, 0},	/*gain 1.33   (0x40)*/
+	{0xFB9A, 0},	/*gain 1.45   (0x50)*/
+	{0xFB9B, 0},	/*gain 1.6    (0x60)*/
+	{0xFB9C, 0},	/*gain 1.78   (0x70)*/
+	{0xFB9D, 2},	/*gain 2	  (0x80)*/
+	{0xFB9E, 2},	/*gain 2.29   (0x90)*/
+	{0xFB9F, 3},	/*gain 2.67   (0xA0)*/
+	{0xFBA0, 3},	/*gain 3.2    (0xB0)*/
+	{0xFBA1, 4},	/*gain 4	  (0xC0)*/
+	{0xFBA2, 7},	/*gain 5.33   (0xD0)*/
+	{0xFBA3, 10},	/*gain 8	  (0xE0)*/
+	{0xFBA4, 11},	/*gain 9.14   (0xE4)*/
+	{0xFBA5, 13},	/*gain 10.67  (0xE8)*/
+	{0xFBA6, 15},	/*gain 12.8   (0xEC)*/
+	{0xFBA7, 19},	/*gain 16     (0xF0)*/
+	{0xF800, 0x12},
+	{0xF801, 0x06},
+	{0xF802, 0xf7},
+	{0xF803, 0x90},
+	{0xF804, 0x02},
+	{0xF805, 0x05},
+	{0xF806, 0xe0},
+	{0xF807, 0xff},
+	{0xF808, 0x65},
+	{0xF809, 0x7d},
+	{0xF80A, 0x70},
+	{0xF80B, 0x03},
+	{0xF80C, 0x02},
+	{0xF80D, 0xf9},
+	{0xF80E, 0x1c},
+	{0xF80F, 0x8f},
+	{0xF810, 0x7d},
+	{0xF811, 0xe4},
+	{0xF812, 0xf5},
+	{0xF813, 0x7a},
+	{0xF814, 0x75},
+	{0xF815, 0x78},
+	{0xF816, 0x30},
+	{0xF817, 0x75},
+	{0xF818, 0x79},
+	{0xF819, 0x53},
+	{0xF81A, 0x85},
+	{0xF81B, 0x79},
+	{0xF81C, 0x82},
+	{0xF81D, 0x85},
+	{0xF81E, 0x78},
+	{0xF81F, 0x83},
+	{0xF820, 0xe0},
+	{0xF821, 0xc3},
+	{0xF822, 0x95},
+	{0xF823, 0x7b},
+	{0xF824, 0xf0},
+	{0xF825, 0x74},
+	{0xF826, 0x02},
+	{0xF827, 0x25},
+	{0xF828, 0x79},
+	{0xF829, 0xf5},
+	{0xF82A, 0x79},
+	{0xF82B, 0xe4},
+	{0xF82C, 0x35},
+	{0xF82D, 0x78},
+	{0xF82E, 0xf5},
+	{0xF82F, 0x78},
+	{0xF830, 0x05},
+	{0xF831, 0x7a},
+	{0xF832, 0xe5},
+	{0xF833, 0x7a},
+	{0xF834, 0xb4},
+	{0xF835, 0x08},
+	{0xF836, 0xe3},
+	{0xF837, 0xe5},
+	{0xF838, 0x7d},
+	{0xF839, 0x70},
+	{0xF83A, 0x04},
+	{0xF83B, 0xff},
+	{0xF83C, 0x02},
+	{0xF83D, 0xf8},
+	{0xF83E, 0xe4},
+	{0xF83F, 0xe5},
+	{0xF840, 0x7d},
+	{0xF841, 0xb4},
+	{0xF842, 0x10},
+	{0xF843, 0x05},
+	{0xF844, 0x7f},
+	{0xF845, 0x01},
+	{0xF846, 0x02},
+	{0xF847, 0xf8},
+	{0xF848, 0xe4},
+	{0xF849, 0xe5},
+	{0xF84A, 0x7d},
+	{0xF84B, 0xb4},
+	{0xF84C, 0x20},
+	{0xF84D, 0x05},
+	{0xF84E, 0x7f},
+	{0xF84F, 0x02},
+	{0xF850, 0x02},
+	{0xF851, 0xf8},
+	{0xF852, 0xe4},
+	{0xF853, 0xe5},
+	{0xF854, 0x7d},
+	{0xF855, 0xb4},
+	{0xF856, 0x30},
+	{0xF857, 0x05},
+	{0xF858, 0x7f},
+	{0xF859, 0x03},
+	{0xF85A, 0x02},
+	{0xF85B, 0xf8},
+	{0xF85C, 0xe4},
+	{0xF85D, 0xe5},
+	{0xF85E, 0x7d},
+	{0xF85F, 0xb4},
+	{0xF860, 0x40},
+	{0xF861, 0x04},
+	{0xF862, 0x7f},
+	{0xF863, 0x04},
+	{0xF864, 0x80},
+	{0xF865, 0x7e},
+	{0xF866, 0xe5},
+	{0xF867, 0x7d},
+	{0xF868, 0xb4},
+	{0xF869, 0x50},
+	{0xF86A, 0x04},
+	{0xF86B, 0x7f},
+	{0xF86C, 0x05},
+	{0xF86D, 0x80},
+	{0xF86E, 0x75},
+	{0xF86F, 0xe5},
+	{0xF870, 0x7d},
+	{0xF871, 0xb4},
+	{0xF872, 0x60},
+	{0xF873, 0x04},
+	{0xF874, 0x7f},
+	{0xF875, 0x06},
+	{0xF876, 0x80},
+	{0xF877, 0x6c},
+	{0xF878, 0xe5},
+	{0xF879, 0x7d},
+	{0xF87A, 0xb4},
+	{0xF87B, 0x70},
+	{0xF87C, 0x04},
+	{0xF87D, 0x7f},
+	{0xF87E, 0x07},
+	{0xF87F, 0x80},
+	{0xF880, 0x63},
+	{0xF881, 0xe5},
+	{0xF882, 0x7d},
+	{0xF883, 0xb4},
+	{0xF884, 0x80},
+	{0xF885, 0x04},
+	{0xF886, 0x7f},
+	{0xF887, 0x08},
+	{0xF888, 0x80},
+	{0xF889, 0x5a},
+	{0xF88A, 0xe5},
+	{0xF88B, 0x7d},
+	{0xF88C, 0xb4},
+	{0xF88D, 0x90},
+	{0xF88E, 0x04},
+	{0xF88F, 0x7f},
+	{0xF890, 0x09},
+	{0xF891, 0x80},
+	{0xF892, 0x51},
+	{0xF893, 0xe5},
+	{0xF894, 0x7d},
+	{0xF895, 0xb4},
+	{0xF896, 0xa0},
+	{0xF897, 0x04},
+	{0xF898, 0x7f},
+	{0xF899, 0x0a},
+	{0xF89A, 0x80},
+	{0xF89B, 0x48},
+	{0xF89C, 0xe5},
+	{0xF89D, 0x7d},
+	{0xF89E, 0xb4},
+	{0xF89F, 0xb0},
+	{0xF8A0, 0x04},
+	{0xF8A1, 0x7f},
+	{0xF8A2, 0x0b},
+	{0xF8A3, 0x80},
+	{0xF8A4, 0x3f},
+	{0xF8A5, 0xe5},
+	{0xF8A6, 0x7d},
+	{0xF8A7, 0xb4},
+	{0xF8A8, 0xc0},
+	{0xF8A9, 0x04},
+	{0xF8AA, 0x7f},
+	{0xF8AB, 0x0c},
+	{0xF8AC, 0x80},
+	{0xF8AD, 0x36},
+	{0xF8AE, 0xe5},
+	{0xF8AF, 0x7d},
+	{0xF8B0, 0xb4},
+	{0xF8B1, 0xd0},
+	{0xF8B2, 0x04},
+	{0xF8B3, 0x7f},
+	{0xF8B4, 0x0d},
+	{0xF8B5, 0x80},
+	{0xF8B6, 0x2d},
+	{0xF8B7, 0xe5},
+	{0xF8B8, 0x7d},
+	{0xF8B9, 0xb4},
+	{0xF8BA, 0xe0},
+	{0xF8BB, 0x04},
+	{0xF8BC, 0x7f},
+	{0xF8BD, 0x0e},
+	{0xF8BE, 0x80},
+	{0xF8BF, 0x24},
+	{0xF8C0, 0xe5},
+	{0xF8C1, 0x7d},
+	{0xF8C2, 0xb4},
+	{0xF8C3, 0xe4},
+	{0xF8C4, 0x04},
+	{0xF8C5, 0x7f},
+	{0xF8C6, 0x0f},
+	{0xF8C7, 0x80},
+	{0xF8C8, 0x1b},
+	{0xF8C9, 0xe5},
+	{0xF8CA, 0x7d},
+	{0xF8CB, 0xb4},
+	{0xF8CC, 0xe8},
+	{0xF8CD, 0x04},
+	{0xF8CE, 0x7f},
+	{0xF8CF, 0x10},
+	{0xF8D0, 0x80},
+	{0xF8D1, 0x12},
+	{0xF8D2, 0xe5},
+	{0xF8D3, 0x7d},
+	{0xF8D4, 0xb4},
+	{0xF8D5, 0xec},
+	{0xF8D6, 0x04},
+	{0xF8D7, 0x7f},
+	{0xF8D8, 0x11},
+	{0xF8D9, 0x80},
+	{0xF8DA, 0x09},
+	{0xF8DB, 0xe5},
+	{0xF8DC, 0x7d},
+	{0xF8DD, 0x7f},
+	{0xF8DE, 0x00},
+	{0xF8DF, 0xb4},
+	{0xF8E0, 0xf0},
+	{0xF8E1, 0x02},
+	{0xF8E2, 0x7f},
+	{0xF8E3, 0x12},
+	{0xF8E4, 0x8f},
+	{0xF8E5, 0x7c},
+	{0xF8E6, 0xef},
+	{0xF8E7, 0x24},
+	{0xF8E8, 0x95},
+	{0xF8E9, 0xff},
+	{0xF8EA, 0xe4},
+	{0xF8EB, 0x34},
+	{0xF8EC, 0xfb},
+	{0xF8ED, 0x8f},
+	{0xF8EE, 0x82},
+	{0xF8EF, 0xf5},
+	{0xF8F0, 0x83},
+	{0xF8F1, 0xe4},
+	{0xF8F2, 0x93},
+	{0xF8F3, 0xf5},
+	{0xF8F4, 0x7c},
+	{0xF8F5, 0xf5},
+	{0xF8F6, 0x7b},
+	{0xF8F7, 0xe4},
+	{0xF8F8, 0xf5},
+	{0xF8F9, 0x7a},
+	{0xF8FA, 0x75},
+	{0xF8FB, 0x78},
+	{0xF8FC, 0x30},
+	{0xF8FD, 0x75},
+	{0xF8FE, 0x79},
+	{0xF8FF, 0x53},
+	{0xF900, 0x85},
+	{0xF901, 0x79},
+	{0xF902, 0x82},
+	{0xF903, 0x85},
+	{0xF904, 0x78},
+	{0xF905, 0x83},
+	{0xF906, 0xe0},
+	{0xF907, 0x25},
+	{0xF908, 0x7c},
+	{0xF909, 0xf0},
+	{0xF90A, 0x74},
+	{0xF90B, 0x02},
+	{0xF90C, 0x25},
+	{0xF90D, 0x79},
+	{0xF90E, 0xf5},
+	{0xF90F, 0x79},
+	{0xF910, 0xe4},
+	{0xF911, 0x35},
+	{0xF912, 0x78},
+	{0xF913, 0xf5},
+	{0xF914, 0x78},
+	{0xF915, 0x05},
+	{0xF916, 0x7a},
+	{0xF917, 0xe5},
+	{0xF918, 0x7a},
+	{0xF919, 0xb4},
+	{0xF91A, 0x08},
+	{0xF91B, 0xe4},
+	{0xF91C, 0x02},
+	{0xF91D, 0x18},
+	{0xF91E, 0x32},
+	{0xF91F, 0x22},
+	{0xF920, 0xf0},
+	{0xF921, 0x90},
+	{0xF922, 0xa0},
+	{0xF923, 0xf8},
+	{0xF924, 0xe0},
+	{0xF925, 0x70},
+	{0xF926, 0x02},
+	{0xF927, 0xa3},
+	{0xF928, 0xe0},
+	{0xF929, 0x70},
+	{0xF92A, 0x0a},
+	{0xF92B, 0x90},
+	{0xF92C, 0xa1},
+	{0xF92D, 0x10},
+	{0xF92E, 0xe0},
+	{0xF92F, 0xfe},
+	{0xF930, 0xa3},
+	{0xF931, 0xe0},
+	{0xF932, 0xff},
+	{0xF933, 0x80},
+	{0xF934, 0x04},
+	{0xF935, 0x7e},
+	{0xF936, 0x00},
+	{0xF937, 0x7f},
+	{0xF938, 0x00},
+	{0xF939, 0x8e},
+	{0xF93A, 0x7e},
+	{0xF93B, 0x8f},
+	{0xF93C, 0x7f},
+	{0xF93D, 0x90},
+	{0xF93E, 0x36},
+	{0xF93F, 0x0d},
+	{0xF940, 0xe0},
+	{0xF941, 0x44},
+	{0xF942, 0x02},
+	{0xF943, 0xf0},
+	{0xF944, 0x90},
+	{0xF945, 0x36},
+	{0xF946, 0x0e},
+	{0xF947, 0xe5},
+	{0xF948, 0x7e},
+	{0xF949, 0xf0},
+	{0xF94A, 0xa3},
+	{0xF94B, 0xe5},
+	{0xF94C, 0x7f},
+	{0xF94D, 0xf0},
+	{0xF94E, 0xe5},
+	{0xF94F, 0x3a},
+	{0xF950, 0x60},
+	{0xF951, 0x0c},
+	{0xF952, 0x90},
+	{0xF953, 0x36},
+	{0xF954, 0x09},
+	{0xF955, 0xe0},
+	{0xF956, 0x70},
+	{0xF957, 0x06},
+	{0xF958, 0x90},
+	{0xF959, 0x36},
+	{0xF95A, 0x08},
+	{0xF95B, 0xf0},
+	{0xF95C, 0xf5},
+	{0xF95D, 0x3a},
+	{0xF95E, 0x02},
+	{0xF95F, 0x03},
+	{0xF960, 0x94},
+	{0xF961, 0x22},
+	{0xF962, 0x78},
+	{0xF963, 0x07},
+	{0xF964, 0xe6},
+	{0xF965, 0xd3},
+	{0xF966, 0x94},
+	{0xF967, 0x00},
+	{0xF968, 0x40},
+	{0xF969, 0x16},
+	{0xF96A, 0x16},
+	{0xF96B, 0xe6},
+	{0xF96C, 0x90},
+	{0xF96D, 0x30},
+	{0xF96E, 0xa1},
+	{0xF96F, 0xf0},
+	{0xF970, 0x90},
+	{0xF971, 0x43},
+	{0xF972, 0x83},
+	{0xF973, 0xe0},
+	{0xF974, 0xb4},
+	{0xF975, 0x01},
+	{0xF976, 0x0f},
+	{0xF977, 0x90},
+	{0xF978, 0x43},
+	{0xF979, 0x87},
+	{0xF97A, 0xe0},
+	{0xF97B, 0xb4},
+	{0xF97C, 0x01},
+	{0xF97D, 0x08},
+	{0xF97E, 0x80},
+	{0xF97F, 0x00},
+	{0xF980, 0x90},
+	{0xF981, 0x30},
+	{0xF982, 0xa0},
+	{0xF983, 0x74},
+	{0xF984, 0x01},
+	{0xF985, 0xf0},
+	{0xF986, 0x22},
+	{0xF987, 0xf0},
+	{0xF988, 0x90},
+	{0xF989, 0x35},
+	{0xF98A, 0xba},
+	{0xF98B, 0xe0},
+	{0xF98C, 0xb4},
+	{0xF98D, 0x0a},
+	{0xF98E, 0x0d},
+	{0xF98F, 0xa3},
+	{0xF990, 0xe0},
+	{0xF991, 0xb4},
+	{0xF992, 0x01},
+	{0xF993, 0x08},
+	{0xF994, 0x90},
+	{0xF995, 0xfb},
+	{0xF996, 0x94},
+	{0xF997, 0xe0},
+	{0xF998, 0x90},
+	{0xF999, 0x35},
+	{0xF99A, 0xb8},
+	{0xF99B, 0xf0},
+	{0xF99C, 0xd0},
+	{0xF99D, 0xd0},
+	{0xF99E, 0xd0},
+	{0xF99F, 0x82},
+	{0xF9A0, 0xd0},
+	{0xF9A1, 0x83},
+	{0xF9A2, 0xd0},
+	{0xF9A3, 0xe0},
+	{0xF9A4, 0x32},
+	{0xF9A5, 0x22},
+	{0xF9A6, 0xe5},
+	{0xF9A7, 0x7f},
+	{0xF9A8, 0x45},
+	{0xF9A9, 0x7e},
+	{0xF9AA, 0x60},
+	{0xF9AB, 0x15},
+	{0xF9AC, 0x90},
+	{0xF9AD, 0x01},
+	{0xF9AE, 0x00},
+	{0xF9AF, 0xe0},
+	{0xF9B0, 0x70},
+	{0xF9B1, 0x0f},
+	{0xF9B2, 0x90},
+	{0xF9B3, 0xa0},
+	{0xF9B4, 0xf8},
+	{0xF9B5, 0xe5},
+	{0xF9B6, 0x7e},
+	{0xF9B7, 0xf0},
+	{0xF9B8, 0xa3},
+	{0xF9B9, 0xe5},
+	{0xF9BA, 0x7f},
+	{0xF9BB, 0xf0},
+	{0xF9BC, 0xe4},
+	{0xF9BD, 0xf5},
+	{0xF9BE, 0x7e},
+	{0xF9BF, 0xf5},
+	{0xF9C0, 0x7f},
+	{0xF9C1, 0x22},
+	{0xF9C2, 0x02},
+	{0xF9C3, 0x0e},
+	{0xF9C4, 0x79},
+	{0xF9C5, 0x22},
+	/* Offsets:*/
+	{0x35C6, 0x00},/* FIDDLEDARKCAL*/
+	{0x35C7, 0x00},
+	{0x35C8, 0x01},/*STOREDISTANCEATSTOPSTREAMING*/
+	{0x35C9, 0x20},
+	{0x35CA, 0x01},/*BRUCEFIX*/
+	{0x35CB, 0x62},
+	{0x35CC, 0x01},/*FIXDATAXFERSTATUSREG*/
+	{0x35CD, 0x87},
+	{0x35CE, 0x01},/*FOCUSDISTANCEUPDATE*/
+	{0x35CF, 0xA6},
+	{0x35D0, 0x01},/*SKIPEDOFRESET*/
+	{0x35D1, 0xC2},
+	{0x35D2, 0x00},
+	{0x35D3, 0xFB},
+	{0x35D4, 0x00},
+	{0x35D5, 0x94},
+	{0x35D6, 0x00},
+	{0x35D7, 0xFB},
+	{0x35D8, 0x00},
+	{0x35D9, 0x94},
+	{0x35DA, 0x00},
+	{0x35DB, 0xFB},
+	{0x35DC, 0x00},
+	{0x35DD, 0x94},
+	{0x35DE, 0x00},
+	{0x35DF, 0xFB},
+	{0x35E0, 0x00},
+	{0x35E1, 0x94},
+	{0x35E6, 0x18},/* FIDDLEDARKCAL*/
+	{0x35E7, 0x2F},
+	{0x35E8, 0x03},/* STOREDISTANCEATSTOPSTREAMING*/
+	{0x35E9, 0x93},
+	{0x35EA, 0x18},/* BRUCEFIX*/
+	{0x35EB, 0x99},
+	{0x35EC, 0x00},/* FIXDATAXFERSTATUSREG*/
+	{0x35ED, 0xA3},
+	{0x35EE, 0x21},/* FOCUSDISTANCEUPDATE*/
+	{0x35EF, 0x5B},
+	{0x35F0, 0x0E},/* SKIPEDOFRESET*/
+	{0x35F1, 0x74},
+	{0x35F2, 0x04},
+	{0x35F3, 0x64},
+	{0x35F4, 0x04},
+	{0x35F5, 0x65},
+	{0x35F6, 0x04},
+	{0x35F7, 0x7B},
+	{0x35F8, 0x04},
+	{0x35F9, 0x7C},
+	{0x35FA, 0x04},
+	{0x35FB, 0xDD},
+	{0x35FC, 0x04},
+	{0x35FD, 0xDE},
+	{0x35FE, 0x04},
+	{0x35FF, 0xEF},
+	{0x3600, 0x04},
+	{0x3601, 0xF0},
+	/*Jump/Data:*/
+	{0x35C2, 0x3F},/* Jump Reg*/
+	{0x35C3, 0xFF},/* Jump Reg*/
+	{0x35C4, 0x3F},/* Data Reg*/
+	{0x35C5, 0xC0},/* Data Reg*/
+	{0x35C0, 0x01},/* Enable*/
+
+};
+
+static struct vx6953_i2c_reg_conf cut3_cali_data[] = {
+		{0x360A, 0x07 },
+		{0x3530, 0x07 },
+		{0x35B5, 0x00 },
+		{0x35BC, 0x00 },
+		{0xAFF8, 0x00 },
+		{0xAFF9, 0x01 },
+		{0xF800, 0x90 },
+		{0xF801, 0x30 },
+		{0xF802, 0x31 },
+		{0xF803, 0xe0 },
+		{0xF804, 0xf5 },
+		{0xF805, 0x7d },
+		{0xF806, 0xb4 },
+		{0xF807, 0x01 },
+		{0xF808, 0x06 },
+		{0xF809, 0x75 },
+		{0xF80A, 0x7d },
+		{0xF80B, 0x03 },
+		{0xF80C, 0x74 },
+		{0xF80D, 0x03 },
+		{0xF80E, 0xf0 },
+		{0xF80F, 0x90 },
+		{0xF810, 0x30 },
+		{0xF811, 0x04 },
+		{0xF812, 0x74 },
+		{0xF813, 0x33 },
+		{0xF814, 0xf0 },
+		{0xF815, 0x90 },
+		{0xF816, 0x30 },
+		{0xF817, 0x06 },
+		{0xF818, 0xe4 },
+		{0xF819, 0xf0 },
+		{0xF81A, 0xa3 },
+		{0xF81B, 0x74 },
+		{0xF81C, 0x08 },
+		{0xF81D, 0xf0 },
+		{0xF81E, 0x90 },
+		{0xF81F, 0x30 },
+		{0xF820, 0x10 },
+		{0xF821, 0xe4 },
+		{0xF822, 0xf0 },
+		{0xF823, 0xa3 },
+		{0xF824, 0xf0 },
+		{0xF825, 0x90 },
+		{0xF826, 0x30 },
+		{0xF827, 0x16 },
+		{0xF828, 0x74 },
+		{0xF829, 0x1e },
+		{0xF82A, 0xf0 },
+		{0xF82B, 0x90 },
+		{0xF82C, 0x30 },
+		{0xF82D, 0x1a },
+		{0xF82E, 0x74 },
+		{0xF82F, 0x6a },
+		{0xF830, 0xf0 },
+		{0xF831, 0x90 },
+		{0xF832, 0x30 },
+		{0xF833, 0x30 },
+		{0xF834, 0x74 },
+		{0xF835, 0x08 },
+		{0xF836, 0xf0 },
+		{0xF837, 0x90 },
+		{0xF838, 0x30 },
+		{0xF839, 0x36 },
+		{0xF83A, 0x74 },
+		{0xF83B, 0x2c },
+		{0xF83C, 0xf0 },
+		{0xF83D, 0x90 },
+		{0xF83E, 0x30 },
+		{0xF83F, 0x41 },
+		{0xF840, 0xe4 },
+		{0xF841, 0xf0 },
+		{0xF842, 0xa3 },
+		{0xF843, 0x74 },
+		{0xF844, 0x24 },
+		{0xF845, 0xf0 },
+		{0xF846, 0x90 },
+		{0xF847, 0x30 },
+		{0xF848, 0x45 },
+		{0xF849, 0x74 },
+		{0xF84A, 0x81 },
+		{0xF84B, 0xf0 },
+		{0xF84C, 0x90 },
+		{0xF84D, 0x30 },
+		{0xF84E, 0x98 },
+		{0xF84F, 0x74 },
+		{0xF850, 0x01 },
+		{0xF851, 0xf0 },
+		{0xF852, 0x90 },
+		{0xF853, 0x30 },
+		{0xF854, 0x9d },
+		{0xF855, 0x74 },
+		{0xF856, 0x05 },
+		{0xF857, 0xf0 },
+		{0xF858, 0xe5 },
+		{0xF859, 0x7d },
+		{0xF85A, 0x70 },
+		{0xF85B, 0x22 },
+		{0xF85C, 0x90 },
+		{0xF85D, 0x02 },
+		{0xF85E, 0x00 },
+		{0xF85F, 0x74 },
+		{0xF860, 0x02 },
+		{0xF861, 0xf0 },
+		{0xF862, 0xa3 },
+		{0xF863, 0x74 },
+		{0xF864, 0x54 },
+		{0xF865, 0xf0 },
+		{0xF866, 0x90 },
+		{0xF867, 0x30 },
+		{0xF868, 0x05 },
+		{0xF869, 0x74 },
+		{0xF86A, 0x01 },
+		{0xF86B, 0xf0 },
+		{0xF86C, 0x90 },
+		{0xF86D, 0x30 },
+		{0xF86E, 0x1b },
+		{0xF86F, 0x74 },
+		{0xF870, 0x29 },
+		{0xF871, 0xf0 },
+		{0xF872, 0x90 },
+		{0xF873, 0x30 },
+		{0xF874, 0x30 },
+		{0xF875, 0xe4 },
+		{0xF876, 0xf0 },
+		{0xF877, 0x90 },
+		{0xF878, 0x30 },
+		{0xF879, 0x35 },
+		{0xF87A, 0x04 },
+		{0xF87B, 0xf0 },
+		{0xF87C, 0x80 },
+		{0xF87D, 0x69 },
+		{0xF87E, 0xe5 },
+		{0xF87F, 0x7d },
+		{0xF880, 0x64 },
+		{0xF881, 0x02 },
+		{0xF882, 0x70 },
+		{0xF883, 0x3c },
+		{0xF884, 0x90 },
+		{0xF885, 0x02 },
+		{0xF886, 0x00 },
+		{0xF887, 0x74 },
+		{0xF888, 0x04 },
+		{0xF889, 0xf0 },
+		{0xF88A, 0xa3 },
+		{0xF88B, 0x74 },
+		{0xF88C, 0x10 },
+		{0xF88D, 0xf0 },
+		{0xF88E, 0x90 },
+		{0xF88F, 0x30 },
+		{0xF890, 0x04 },
+		{0xF891, 0x74 },
+		{0xF892, 0x34 },
+		{0xF893, 0xf0 },
+		{0xF894, 0xa3 },
+		{0xF895, 0x74 },
+		{0xF896, 0x07 },
+		{0xF897, 0xf0 },
+		{0xF898, 0x90 },
+		{0xF899, 0x30 },
+		{0xF89A, 0x10 },
+		{0xF89B, 0x74 },
+		{0xF89C, 0x10 },
+		{0xF89D, 0xf0 },
+		{0xF89E, 0x90 },
+		{0xF89F, 0x30 },
+		{0xF8A0, 0x16 },
+		{0xF8A1, 0x74 },
+		{0xF8A2, 0x1f },
+		{0xF8A3, 0xf0 },
+		{0xF8A4, 0x90 },
+		{0xF8A5, 0x30 },
+		{0xF8A6, 0x1a },
+		{0xF8A7, 0x74 },
+		{0xF8A8, 0x62 },
+		{0xF8A9, 0xf0 },
+		{0xF8AA, 0xa3 },
+		{0xF8AB, 0x74 },
+		{0xF8AC, 0x2a },
+		{0xF8AD, 0xf0 },
+		{0xF8AE, 0x90 },
+		{0xF8AF, 0x30 },
+		{0xF8B0, 0x35 },
+		{0xF8B1, 0x74 },
+		{0xF8B2, 0x04 },
+		{0xF8B3, 0xf0 },
+		{0xF8B4, 0x90 },
+		{0xF8B5, 0x30 },
+		{0xF8B6, 0x41 },
+		{0xF8B7, 0x74 },
+		{0xF8B8, 0x60 },
+		{0xF8B9, 0xf0 },
+		{0xF8BA, 0xa3 },
+		{0xF8BB, 0x74 },
+		{0xF8BC, 0x64 },
+		{0xF8BD, 0xf0 },
+		{0xF8BE, 0x80 },
+		{0xF8BF, 0x27 },
+		{0xF8C0, 0xe5 },
+		{0xF8C1, 0x7d },
+		{0xF8C2, 0xb4 },
+		{0xF8C3, 0x03 },
+		{0xF8C4, 0x22 },
+		{0xF8C5, 0x90 },
+		{0xF8C6, 0x02 },
+		{0xF8C7, 0x00 },
+		{0xF8C8, 0x74 },
+		{0xF8C9, 0x02 },
+		{0xF8CA, 0xf0 },
+		{0xF8CB, 0xa3 },
+		{0xF8CC, 0x74 },
+		{0xF8CD, 0x26 },
+		{0xF8CE, 0xf0 },
+		{0xF8CF, 0x90 },
+		{0xF8D0, 0x30 },
+		{0xF8D1, 0x05 },
+		{0xF8D2, 0x74 },
+		{0xF8D3, 0x03 },
+		{0xF8D4, 0xf0 },
+		{0xF8D5, 0x90 },
+		{0xF8D6, 0x30 },
+		{0xF8D7, 0x11 },
+		{0xF8D8, 0x74 },
+		{0xF8D9, 0x01 },
+		{0xF8DA, 0xf0 },
+		{0xF8DB, 0x90 },
+		{0xF8DC, 0x30 },
+		{0xF8DD, 0x1b },
+		{0xF8DE, 0x74 },
+		{0xF8DF, 0x2a },
+		{0xF8E0, 0xf0 },
+		{0xF8E1, 0x90 },
+		{0xF8E2, 0x30 },
+		{0xF8E3, 0x35 },
+		{0xF8E4, 0x74 },
+		{0xF8E5, 0x03 },
+		{0xF8E6, 0xf0 },
+		{0xF8E7, 0x90 },
+		{0xF8E8, 0x41 },
+		{0xF8E9, 0x01 },
+		{0xF8EA, 0xe0 },
+		{0xF8EB, 0xf5 },
+		{0xF8EC, 0x79 },
+		{0xF8ED, 0x90 },
+		{0xF8EE, 0x43 },
+		{0xF8EF, 0x87 },
+		{0xF8F0, 0xe0 },
+		{0xF8F1, 0xf5 },
+		{0xF8F2, 0x7a },
+		{0xF8F3, 0x90 },
+		{0xF8F4, 0x42 },
+		{0xF8F5, 0x05 },
+		{0xF8F6, 0xe0 },
+		{0xF8F7, 0xf5 },
+		{0xF8F8, 0x7b },
+		{0xF8F9, 0x22 },
+		{0xF8FA, 0x78 },
+		{0xF8FB, 0x07 },
+		{0xF8FC, 0xe6 },
+		{0xF8FD, 0xf5 },
+		{0xF8FE, 0x7c },
+		{0xF8FF, 0xe5 },
+		{0xF900, 0x7c },
+		{0xF901, 0x60 },
+		{0xF902, 0x1e },
+		{0xF903, 0x90 },
+		{0xF904, 0x43 },
+		{0xF905, 0x83 },
+		{0xF906, 0xe0 },
+		{0xF907, 0xb4 },
+		{0xF908, 0x01 },
+		{0xF909, 0x17 },
+		{0xF90A, 0x90 },
+		{0xF90B, 0x43 },
+		{0xF90C, 0x87 },
+		{0xF90D, 0xe0 },
+		{0xF90E, 0xb4 },
+		{0xF90F, 0x01 },
+		{0xF910, 0x10 },
+		{0xF911, 0x15 },
+		{0xF912, 0x7c },
+		{0xF913, 0x90 },
+		{0xF914, 0x30 },
+		{0xF915, 0xa1 },
+		{0xF916, 0xe5 },
+		{0xF917, 0x7c },
+		{0xF918, 0xf0 },
+		{0xF919, 0x90 },
+		{0xF91A, 0x30 },
+		{0xF91B, 0xa0 },
+		{0xF91C, 0x74 },
+		{0xF91D, 0x01 },
+		{0xF91E, 0xf0 },
+		{0xF91F, 0x80 },
+		{0xF920, 0x05 },
+		{0xF921, 0xe4 },
+		{0xF922, 0x90 },
+		{0xF923, 0x30 },
+		{0xF924, 0xa0 },
+		{0xF925, 0xf0 },
+		{0xF926, 0x90 },
+		{0xF927, 0x41 },
+		{0xF928, 0x01 },
+		{0xF929, 0xe0 },
+		{0xF92A, 0xfc },
+		{0xF92B, 0x54 },
+		{0xF92C, 0x02 },
+		{0xF92D, 0xfe },
+		{0xF92E, 0xe5 },
+		{0xF92F, 0x79 },
+		{0xF930, 0x54 },
+		{0xF931, 0x02 },
+		{0xF932, 0xb5 },
+		{0xF933, 0x06 },
+		{0xF934, 0x0f },
+		{0xF935, 0x90 },
+		{0xF936, 0x43 },
+		{0xF937, 0x87 },
+		{0xF938, 0xe0 },
+		{0xF939, 0xb5 },
+		{0xF93A, 0x7a },
+		{0xF93B, 0x08 },
+		{0xF93C, 0x90 },
+		{0xF93D, 0x42 },
+		{0xF93E, 0x05 },
+		{0xF93F, 0xe0 },
+		{0xF940, 0x65 },
+		{0xF941, 0x7b },
+		{0xF942, 0x60 },
+		{0xF943, 0x0b },
+		{0xF944, 0x90 },
+		{0xF945, 0x30 },
+		{0xF946, 0x50 },
+		{0xF947, 0xe0 },
+		{0xF948, 0x54 },
+		{0xF949, 0xf9 },
+		{0xF94A, 0x44 },
+		{0xF94B, 0x02 },
+		{0xF94C, 0xf0 },
+		{0xF94D, 0x80 },
+		{0xF94E, 0x09 },
+		{0xF94F, 0x90 },
+		{0xF950, 0x30 },
+		{0xF951, 0x50 },
+		{0xF952, 0xe0 },
+		{0xF953, 0x54 },
+		{0xF954, 0xf9 },
+		{0xF955, 0x44 },
+		{0xF956, 0x04 },
+		{0xF957, 0xf0 },
+		{0xF958, 0x8c },
+		{0xF959, 0x79 },
+		{0xF95A, 0x90 },
+		{0xF95B, 0x43 },
+		{0xF95C, 0x87 },
+		{0xF95D, 0xe0 },
+		{0xF95E, 0xf5 },
+		{0xF95F, 0x7a },
+		{0xF960, 0x90 },
+		{0xF961, 0x42 },
+		{0xF962, 0x05 },
+		{0xF963, 0xe0 },
+		{0xF964, 0xf5 },
+		{0xF965, 0x7b },
+		{0xF966, 0x22 },
+		{0xF967, 0xc3 },
+		{0xF968, 0x90 },
+		{0xF969, 0x0b },
+		{0xF96A, 0x89 },
+		{0xF96B, 0xe0 },
+		{0xF96C, 0x94 },
+		{0xF96D, 0x1e },
+		{0xF96E, 0x90 },
+		{0xF96F, 0x0b },
+		{0xF970, 0x88 },
+		{0xF971, 0xe0 },
+		{0xF972, 0x94 },
+		{0xF973, 0x00 },
+		{0xF974, 0x50 },
+		{0xF975, 0x06 },
+		{0xF976, 0x7e },
+		{0xF977, 0x00 },
+		{0xF978, 0x7f },
+		{0xF979, 0x01 },
+		{0xF97A, 0x80 },
+		{0xF97B, 0x3d },
+		{0xF97C, 0xc3 },
+		{0xF97D, 0x90 },
+		{0xF97E, 0x0b },
+		{0xF97F, 0x89 },
+		{0xF980, 0xe0 },
+		{0xF981, 0x94 },
+		{0xF982, 0x3c },
+		{0xF983, 0x90 },
+		{0xF984, 0x0b },
+		{0xF985, 0x88 },
+		{0xF986, 0xe0 },
+		{0xF987, 0x94 },
+		{0xF988, 0x00 },
+		{0xF989, 0x50 },
+		{0xF98A, 0x06 },
+		{0xF98B, 0x7e },
+		{0xF98C, 0x00 },
+		{0xF98D, 0x7f },
+		{0xF98E, 0x02 },
+		{0xF98F, 0x80 },
+		{0xF990, 0x28 },
+		{0xF991, 0xc3 },
+		{0xF992, 0x90 },
+		{0xF993, 0x0b },
+		{0xF994, 0x89 },
+		{0xF995, 0xe0 },
+		{0xF996, 0x94 },
+		{0xF997, 0xfa },
+		{0xF998, 0x90 },
+		{0xF999, 0x0b },
+		{0xF99A, 0x88 },
+		{0xF99B, 0xe0 },
+		{0xF99C, 0x94 },
+		{0xF99D, 0x00 },
+		{0xF99E, 0x50 },
+		{0xF99F, 0x06 },
+		{0xF9A0, 0x7e },
+		{0xF9A1, 0x00 },
+		{0xF9A2, 0x7f },
+		{0xF9A3, 0x03 },
+		{0xF9A4, 0x80 },
+		{0xF9A5, 0x13 },
+		{0xF9A6, 0xc3 },
+		{0xF9A7, 0x90 },
+		{0xF9A8, 0x0b },
+		{0xF9A9, 0x88 },
+		{0xF9AA, 0xe0 },
+		{0xF9AB, 0x94 },
+		{0xF9AC, 0x80 },
+		{0xF9AD, 0x50 },
+		{0xF9AE, 0x06 },
+		{0xF9AF, 0x7e },
+		{0xF9B0, 0x00 },
+		{0xF9B1, 0x7f },
+		{0xF9B2, 0x04 },
+		{0xF9B3, 0x80 },
+		{0xF9B4, 0x04 },
+		{0xF9B5, 0xae },
+		{0xF9B6, 0x7e },
+		{0xF9B7, 0xaf },
+		{0xF9B8, 0x7f },
+		{0xF9B9, 0x90 },
+		{0xF9BA, 0xa0 },
+		{0xF9BB, 0xf8 },
+		{0xF9BC, 0xee },
+		{0xF9BD, 0xf0 },
+		{0xF9BE, 0xa3 },
+		{0xF9BF, 0xef },
+		{0xF9C0, 0xf0 },
+		{0xF9C1, 0x22 },
+		{0xF9C2, 0x90 },
+		{0xF9C3, 0x33 },
+		{0xF9C4, 0x82 },
+		{0xF9C5, 0xe0 },
+		{0xF9C6, 0xff },
+		{0xF9C7, 0x64 },
+		{0xF9C8, 0x01 },
+		{0xF9C9, 0x70 },
+		{0xF9CA, 0x30 },
+		{0xF9CB, 0xe5 },
+		{0xF9CC, 0x7f },
+		{0xF9CD, 0x64 },
+		{0xF9CE, 0x02 },
+		{0xF9CF, 0x45 },
+		{0xF9D0, 0x7e },
+		{0xF9D1, 0x70 },
+		{0xF9D2, 0x04 },
+		{0xF9D3, 0x7d },
+		{0xF9D4, 0x1e },
+		{0xF9D5, 0x80 },
+		{0xF9D6, 0x1d },
+		{0xF9D7, 0xe5 },
+		{0xF9D8, 0x7f },
+		{0xF9D9, 0x64 },
+		{0xF9DA, 0x03 },
+		{0xF9DB, 0x45 },
+		{0xF9DC, 0x7e },
+		{0xF9DD, 0x70 },
+		{0xF9DE, 0x04 },
+		{0xF9DF, 0x7d },
+		{0xF9E0, 0x3c },
+		{0xF9E1, 0x80 },
+		{0xF9E2, 0x11 },
+		{0xF9E3, 0xe5 },
+		{0xF9E4, 0x7f },
+		{0xF9E5, 0x64 },
+		{0xF9E6, 0x04 },
+		{0xF9E7, 0x45 },
+		{0xF9E8, 0x7e },
+		{0xF9E9, 0x70 },
+		{0xF9EA, 0x04 },
+		{0xF9EB, 0x7d },
+		{0xF9EC, 0xfa },
+		{0xF9ED, 0x80 },
+		{0xF9EE, 0x05 },
+		{0xF9EF, 0x90 },
+		{0xF9F0, 0x33 },
+		{0xF9F1, 0x81 },
+		{0xF9F2, 0xe0 },
+		{0xF9F3, 0xfd },
+		{0xF9F4, 0xae },
+		{0xF9F5, 0x05 },
+		{0xF9F6, 0x90 },
+		{0xF9F7, 0x33 },
+		{0xF9F8, 0x81 },
+		{0xF9F9, 0xed },
+		{0xF9FA, 0xf0 },
+		{0xF9FB, 0xef },
+		{0xF9FC, 0xb4 },
+		{0xF9FD, 0x01 },
+		{0xF9FE, 0x10 },
+		{0xF9FF, 0x90 },
+		{0xFA00, 0x01 },
+		{0xFA01, 0x00 },
+		{0xFA02, 0xe0 },
+		{0xFA03, 0x60 },
+		{0xFA04, 0x0a },
+		{0xFA05, 0x90 },
+		{0xFA06, 0xa1 },
+		{0xFA07, 0x10 },
+		{0xFA08, 0xe0 },
+		{0xFA09, 0xf5 },
+		{0xFA0A, 0x7e },
+		{0xFA0B, 0xa3 },
+		{0xFA0C, 0xe0 },
+		{0xFA0D, 0xf5 },
+		{0xFA0E, 0x7f },
+		{0xFA0F, 0x22 },
+		{0xFA10, 0x12 },
+		{0xFA11, 0x2f },
+		{0xFA12, 0x4d },
+		{0xFA13, 0x90 },
+		{0xFA14, 0x35 },
+		{0xFA15, 0x38 },
+		{0xFA16, 0xe0 },
+		{0xFA17, 0x70 },
+		{0xFA18, 0x05 },
+		{0xFA19, 0x12 },
+		{0xFA1A, 0x00 },
+		{0xFA1B, 0x0e },
+		{0xFA1C, 0x80 },
+		{0xFA1D, 0x03 },
+		{0xFA1E, 0x12 },
+		{0xFA1F, 0x07 },
+		{0xFA20, 0xc9 },
+		{0xFA21, 0x90 },
+		{0xFA22, 0x40 },
+		{0xFA23, 0x06 },
+		{0xFA24, 0xe0 },
+		{0xFA25, 0xf4 },
+		{0xFA26, 0x54 },
+		{0xFA27, 0x02 },
+		{0xFA28, 0xff },
+		{0xFA29, 0xe0 },
+		{0xFA2A, 0x54 },
+		{0xFA2B, 0x01 },
+		{0xFA2C, 0x4f },
+		{0xFA2D, 0x90 },
+		{0xFA2E, 0x31 },
+		{0xFA2F, 0x32 },
+		{0xFA30, 0xf0 },
+		{0xFA31, 0x90 },
+		{0xFA32, 0xfa },
+		{0xFA33, 0x9d },
+		{0xFA34, 0xe0 },
+		{0xFA35, 0x70 },
+		{0xFA36, 0x03 },
+		{0xFA37, 0x12 },
+		{0xFA38, 0x27 },
+		{0xFA39, 0x27 },
+		{0xFA3A, 0x02 },
+		{0xFA3B, 0x05 },
+		{0xFA3C, 0xac },
+		{0xFA3D, 0x22 },
+		{0xFA3E, 0xf0 },
+		{0xFA3F, 0xe5 },
+		{0xFA40, 0x3a },
+		{0xFA41, 0xb4 },
+		{0xFA42, 0x06 },
+		{0xFA43, 0x06 },
+		{0xFA44, 0x63 },
+		{0xFA45, 0x3e },
+		{0xFA46, 0x02 },
+		{0xFA47, 0x12 },
+		{0xFA48, 0x03 },
+		{0xFA49, 0xea },
+		{0xFA4A, 0x02 },
+		{0xFA4B, 0x17 },
+		{0xFA4C, 0x4a },
+		{0xFA4D, 0x22 },
+		{0x35C9, 0xFA },
+		{0x35CA, 0x01 },
+		{0x35CB, 0x67 },
+		{0x35CC, 0x01 },
+		{0x35CD, 0xC2 },
+		{0x35CE, 0x02 },
+		{0x35CF, 0x10 },
+		{0x35D0, 0x02 },
+		{0x35D1, 0x3E },
+		{0x35D3, 0xF6 },
+		{0x35D5, 0x07 },
+		{0x35D7, 0xA3 },
+		{0x35DB, 0x02 },
+		{0x35DD, 0x06 },
+		{0x35DF, 0x27 },
+		{0x35E6, 0x28 },
+		{0x35E7, 0x76 },
+		{0x35E8, 0x2A },
+		{0x35E9, 0x15 },
+		{0x35EA, 0x2D },
+		{0x35EB, 0x07 },
+		{0x35EC, 0x04 },
+		{0x35ED, 0x43 },
+		{0x35EE, 0x05 },
+		{0x35EF, 0xA9 },
+		{0x35F0, 0x17 },
+		{0x35F1, 0x41 },
+		{0x35F2, 0x24 },
+		{0x35F3, 0x88 },
+		{0x35F4, 0x01 },
+		{0x35F5, 0x54 },
+		{0x35F6, 0x01 },
+		{0x35F7, 0x55 },
+		{0x35F8, 0x2E },
+		{0x35F9, 0xF2 },
+		{0x35FA, 0x06 },
+		{0x35FB, 0x02 },
+		{0x35FC, 0x06 },
+		{0x35FD, 0x03 },
+		{0x35FE, 0x06 },
+		{0x35FF, 0x04 },
+		{0x3600, 0x0F },
+		{0x3601, 0x48 },
+		{0x3602, 0x0F },
+		{0x3603, 0x49 },
+		{0x3604, 0x0F },
+		{0x3605, 0x4A },
+		{0x35C2, 0xFF },
+		{0x35C3, 0xFF },
+		{0x35C4, 0xFF },
+		{0x35C5, 0xC0 },
+		{0x35C0, 0x01 },
+
+
+		{0xa098, 0x02 },
+		{0xa099, 0x87 },
+		{0xa09c, 0x00 },
+		{0xa09d, 0xc5 },
+		{0xa4ec, 0x05 },
+		{0xa4ed, 0x05 },
+		{0xa4f0, 0x04 },
+		{0xa4f1, 0x04 },
+		{0xa4f4, 0x04 },
+		{0xa4f5, 0x05 },
+		{0xa4f8, 0x05 },
+		{0xa4f9, 0x07 },
+		{0xa4fc, 0x07 },
+		{0xa4fd, 0x07 },
+		{0xa500, 0x07 },
+		{0xa501, 0x07 },
+		{0xa504, 0x08 },
+		{0xa505, 0x08 },
+		{0xa518, 0x01 },
+		{0xa519, 0x02 },
+		{0xa51c, 0x01 },
+		{0xa51d, 0x00 },
+		{0xa534, 0x00 },
+		{0xa535, 0x04 },
+		{0xa538, 0x04 },
+		{0xa539, 0x03 },
+		{0xa53c, 0x05 },
+		{0xa53d, 0x07 },
+		{0xa540, 0x07 },
+		{0xa541, 0x06 },
+		{0xa544, 0x07 },
+		{0xa545, 0x06 },
+		{0xa548, 0x05 },
+		{0xa549, 0x06 },
+		{0xa54c, 0x06 },
+		{0xa54d, 0x07 },
+		{0xa550, 0x07 },
+		{0xa551, 0x04 },
+		{0xa554, 0x04 },
+		{0xa555, 0x04 },
+		{0xa558, 0x05 },
+		{0xa559, 0x06 },
+		{0xa55c, 0x07 },
+		{0xa55d, 0x07 },
+		{0xa56c, 0x00 },
+		{0xa56d, 0x0a },
+		{0xa570, 0x08 },
+		{0xa571, 0x05 },
+		{0xa574, 0x04 },
+		{0xa575, 0x03 },
+		{0xa578, 0x04 },
+		{0xa579, 0x04 },
+		{0xa58c, 0x1f },
+		{0xa58d, 0x1b },
+		{0xa590, 0x17 },
+		{0xa591, 0x13 },
+		{0xa594, 0x10 },
+		{0xa595, 0x0d },
+		{0xa598, 0x0f },
+		{0xa599, 0x11 },
+		{0xa59c, 0x03 },
+		{0xa59d, 0x03 },
+		{0xa5a0, 0x03 },
+		{0xa5a1, 0x03 },
+		{0xa5a4, 0x03 },
+		{0xa5a5, 0x04 },
+		{0xa5a8, 0x05 },
+		{0xa5a9, 0x00 },
+		{0xa5ac, 0x00 },
+		{0xa5ad, 0x00 },
+		{0xa5b0, 0x00 },
+		{0xa5b1, 0x00 },
+		{0xa5b4, 0x00 },
+		{0xa5b5, 0x00 },
+		{0xa5c4, 0x1f },
+		{0xa5c5, 0x13 },
+		{0xa5c8, 0x14 },
+		{0xa5c9, 0x14 },
+		{0xa5cc, 0x14 },
+		{0xa5cd, 0x13 },
+		{0xa5d0, 0x17 },
+		{0xa5d1, 0x1a },
+		{0xa5f4, 0x05 },
+		{0xa5f5, 0x05 },
+		{0xa5f8, 0x05 },
+		{0xa5f9, 0x06 },
+		{0xa5fc, 0x06 },
+		{0xa5fd, 0x06 },
+		{0xa600, 0x06 },
+		{0xa601, 0x06 },
+		{0xa608, 0x07 },
+		{0xa609, 0x08 },
+		{0xa60c, 0x08 },
+		{0xa60d, 0x07 },
+		{0xa63c, 0x00 },
+		{0xa63d, 0x02 },
+		{0xa640, 0x02 },
+		{0xa641, 0x02 },
+		{0xa644, 0x02 },
+		{0xa645, 0x02 },
+		{0xa648, 0x03 },
+		{0xa649, 0x04 },
+		{0xa64c, 0x0a },
+		{0xa64d, 0x09 },
+		{0xa650, 0x08 },
+		{0xa651, 0x09 },
+		{0xa654, 0x09 },
+		{0xa655, 0x0a },
+		{0xa658, 0x0a },
+		{0xa659, 0x0a },
+		{0xa65c, 0x0a },
+		{0xa65d, 0x09 },
+		{0xa660, 0x09 },
+		{0xa661, 0x09 },
+		{0xa664, 0x09 },
+		{0xa665, 0x08 },
+		{0xa680, 0x01 },
+		{0xa681, 0x02 },
+		{0xa694, 0x1f },
+		{0xa695, 0x10 },
+		{0xa698, 0x0e },
+		{0xa699, 0x0c },
+		{0xa69c, 0x0d },
+		{0xa69d, 0x0d },
+		{0xa6a0, 0x0f },
+		{0xa6a1, 0x11 },
+		{0xa6a4, 0x00 },
+		{0xa6a5, 0x00 },
+		{0xa6a8, 0x00 },
+		{0xa6a9, 0x00 },
+		{0xa6ac, 0x00 },
+		{0xa6ad, 0x00 },
+		{0xa6b0, 0x00 },
+		{0xa6b1, 0x04 },
+		{0xa6b4, 0x04 },
+		{0xa6b5, 0x04 },
+		{0xa6b8, 0x04 },
+		{0xa6b9, 0x04 },
+		{0xa6bc, 0x05 },
+		{0xa6bd, 0x05 },
+		{0xa6c0, 0x1f },
+		{0xa6c1, 0x1f },
+		{0xa6c4, 0x1f },
+		{0xa6c5, 0x1f },
+		{0xa6c8, 0x1f },
+		{0xa6c9, 0x1f },
+		{0xa6cc, 0x1f },
+		{0xa6cd, 0x0b },
+		{0xa6d0, 0x0c },
+		{0xa6d1, 0x0d },
+		{0xa6d4, 0x0d },
+		{0xa6d5, 0x0d },
+		{0xa6d8, 0x11 },
+		{0xa6d9, 0x14 },
+		{0xa6fc, 0x02 },
+		{0xa6fd, 0x03 },
+		{0xa700, 0x03 },
+		{0xa701, 0x03 },
+		{0xa704, 0x03 },
+		{0xa705, 0x04 },
+		{0xa708, 0x05 },
+		{0xa709, 0x02 },
+		{0xa70c, 0x02 },
+		{0xa70d, 0x02 },
+		{0xa710, 0x03 },
+		{0xa711, 0x04 },
+		{0xa714, 0x04 },
+		{0xa715, 0x04 },
+		{0xa744, 0x00 },
+		{0xa745, 0x03 },
+		{0xa748, 0x04 },
+		{0xa749, 0x04 },
+		{0xa74c, 0x05 },
+		{0xa74d, 0x06 },
+		{0xa750, 0x07 },
+		{0xa751, 0x07 },
+		{0xa754, 0x05 },
+		{0xa755, 0x05 },
+		{0xa758, 0x05 },
+		{0xa759, 0x05 },
+		{0xa75c, 0x05 },
+		{0xa75d, 0x06 },
+		{0xa760, 0x07 },
+		{0xa761, 0x07 },
+		{0xa764, 0x06 },
+		{0xa765, 0x05 },
+		{0xa768, 0x05 },
+		{0xa769, 0x05 },
+		{0xa76c, 0x06 },
+		{0xa76d, 0x07 },
+		{0xa77c, 0x00 },
+		{0xa77d, 0x05 },
+		{0xa780, 0x05 },
+		{0xa781, 0x05 },
+		{0xa784, 0x05 },
+		{0xa785, 0x04 },
+		{0xa788, 0x05 },
+		{0xa789, 0x06 },
+		{0xa79c, 0x1f },
+		{0xa79d, 0x15 },
+		{0xa7a0, 0x13 },
+		{0xa7a1, 0x10 },
+		{0xa7a4, 0x0f },
+		{0xa7a5, 0x0d },
+		{0xa7a8, 0x11 },
+		{0xa7a9, 0x14 },
+		{0xa7ac, 0x02 },
+		{0xa7ad, 0x02 },
+		{0xa7b0, 0x02 },
+		{0xa7b1, 0x02 },
+		{0xa7b4, 0x02 },
+		{0xa7b5, 0x03 },
+		{0xa7b8, 0x03 },
+		{0xa7b9, 0x00 },
+		{0xa7bc, 0x00 },
+		{0xa7bd, 0x00 },
+		{0xa7c0, 0x00 },
+		{0xa7c1, 0x00 },
+		{0xa7c4, 0x00 },
+		{0xa7c5, 0x00 },
+		{0xa7d4, 0x1f },
+		{0xa7d5, 0x0d },
+		{0xa7d8, 0x0f },
+		{0xa7d9, 0x10 },
+		{0xa7dc, 0x10 },
+		{0xa7dd, 0x10 },
+		{0xa7e0, 0x13 },
+		{0xa7e1, 0x16 },
+		{0xa7f4, 0x00 },
+		{0xa7f5, 0x03 },
+		{0xa7f8, 0x04 },
+		{0xa7f9, 0x04 },
+		{0xa7fc, 0x04 },
+		{0xa7fd, 0x03 },
+		{0xa800, 0x03 },
+		{0xa801, 0x03 },
+		{0xa804, 0x03 },
+		{0xa805, 0x03 },
+		{0xa808, 0x03 },
+		{0xa809, 0x03 },
+		{0xa80c, 0x03 },
+		{0xa80d, 0x04 },
+		{0xa810, 0x04 },
+		{0xa811, 0x0a },
+		{0xa814, 0x0a },
+		{0xa815, 0x0a },
+		{0xa818, 0x0f },
+		{0xa819, 0x14 },
+		{0xa81c, 0x14 },
+		{0xa81d, 0x14 },
+		{0xa82c, 0x00 },
+		{0xa82d, 0x04 },
+		{0xa830, 0x02 },
+		{0xa831, 0x00 },
+		{0xa834, 0x00 },
+		{0xa835, 0x00 },
+		{0xa838, 0x00 },
+		{0xa839, 0x00 },
+		{0xa840, 0x1f },
+		{0xa841, 0x1f },
+		{0xa848, 0x1f },
+		{0xa849, 0x1f },
+		{0xa84c, 0x1f },
+		{0xa84d, 0x0c },
+		{0xa850, 0x0c },
+		{0xa851, 0x0c },
+		{0xa854, 0x0c },
+		{0xa855, 0x0c },
+		{0xa858, 0x0c },
+		{0xa859, 0x0c },
+		{0xa85c, 0x0c },
+		{0xa85d, 0x0c },
+		{0xa860, 0x0c },
+		{0xa861, 0x0c },
+		{0xa864, 0x0c },
+		{0xa865, 0x0c },
+		{0xa868, 0x0c },
+		{0xa869, 0x0c },
+		{0xa86c, 0x0c },
+		{0xa86d, 0x0c },
+		{0xa870, 0x0c },
+		{0xa871, 0x0c },
+		{0xa874, 0x0c },
+		{0xa875, 0x0c },
+		{0xa878, 0x1f },
+		{0xa879, 0x1f },
+		{0xa87c, 0x1f },
+		{0xa87d, 0x1f },
+		{0xa880, 0x1f },
+		{0xa881, 0x1f },
+		{0xa884, 0x1f },
+		{0xa885, 0x0c },
+		{0xa888, 0x0c },
+		{0xa889, 0x0c },
+		{0xa88c, 0x0c },
+		{0xa88d, 0x0c },
+		{0xa890, 0x0c },
+		{0xa891, 0x0c },
+		{0xa898, 0x1f },
+		{0xa899, 0x1f },
+		{0xa8a0, 0x1f },
+		{0xa8a1, 0x1f },
+		{0xa8a4, 0x1f },
+		{0xa8a5, 0x0c },
+		{0xa8a8, 0x0c },
+		{0xa8a9, 0x0c },
+		{0xa8ac, 0x0c },
+		{0xa8ad, 0x0c },
+		{0xa8b0, 0x0c },
+		{0xa8b1, 0x0c },
+		{0xa8b4, 0x0c },
+		{0xa8b5, 0x0c },
+		{0xa8b8, 0x0c },
+		{0xa8b9, 0x0c },
+		{0xa8bc, 0x0c },
+		{0xa8bd, 0x0c },
+		{0xa8c0, 0x0c },
+		{0xa8c1, 0x0c },
+		{0xa8c4, 0x0c },
+		{0xa8c5, 0x0c },
+		{0xa8c8, 0x0c },
+		{0xa8c9, 0x0c },
+		{0xa8cc, 0x0c },
+		{0xa8cd, 0x0c },
+		{0xa8d0, 0x1f },
+		{0xa8d1, 0x1f },
+		{0xa8d4, 0x1f },
+		{0xa8d5, 0x1f },
+		{0xa8d8, 0x1f },
+		{0xa8d9, 0x1f },
+		{0xa8dc, 0x1f },
+		{0xa8dd, 0x0c },
+		{0xa8e0, 0x0c },
+		{0xa8e1, 0x0c },
+		{0xa8e4, 0x0c },
+		{0xa8e5, 0x0c },
+		{0xa8e8, 0x0c },
+		{0xa8e9, 0x0c },
+		{0xa8f0, 0x1f },
+		{0xa8f1, 0x1f },
+		{0xa8f8, 0x1f },
+		{0xa8f9, 0x1f },
+		{0xa8fc, 0x1f },
+		{0xa8fd, 0x0c },
+		{0xa900, 0x0c },
+		{0xa901, 0x0c },
+		{0xa904, 0x0c },
+		{0xa905, 0x0c },
+		{0xa908, 0x0c },
+		{0xa909, 0x0c },
+		{0xa90c, 0x0c },
+		{0xa90d, 0x0c },
+		{0xa910, 0x0c },
+		{0xa911, 0x0c },
+		{0xa914, 0x0c },
+		{0xa915, 0x0c },
+		{0xa918, 0x0c },
+		{0xa919, 0x0c },
+		{0xa91c, 0x0c },
+		{0xa91d, 0x0c },
+		{0xa920, 0x0c },
+		{0xa921, 0x0c },
+		{0xa924, 0x0c },
+		{0xa925, 0x0c },
+		{0xa928, 0x1f },
+		{0xa929, 0x1f },
+		{0xa92c, 0x1f },
+		{0xa92d, 0x1f },
+		{0xa930, 0x1f },
+		{0xa931, 0x1f },
+		{0xa934, 0x1f },
+		{0xa935, 0x0c },
+		{0xa938, 0x0c },
+		{0xa939, 0x0c },
+		{0xa93c, 0x0c },
+		{0xa93d, 0x0c },
+		{0xa940, 0x0c },
+		{0xa941, 0x0c },
+		{0xa96c, 0x0d },
+		{0xa96d, 0x16 },
+		{0xa970, 0x19 },
+		{0xa971, 0x0e },
+		{0xa974, 0x16 },
+		{0xa975, 0x1a },
+		{0xa978, 0x0d },
+		{0xa979, 0x15 },
+		{0xa97c, 0x19 },
+		{0xa97d, 0x0d },
+		{0xa980, 0x15 },
+		{0xa981, 0x1a },
+		{0xa984, 0x0d },
+		{0xa985, 0x15 },
+		{0xa988, 0x1a },
+		{0xa989, 0x0d },
+		{0xa98c, 0x15 },
+		{0xa98d, 0x1a },
+		{0xa990, 0x0b },
+		{0xa991, 0x11 },
+		{0xa994, 0x02 },
+		{0xa995, 0x0e },
+		{0xa998, 0x16 },
+		{0xa999, 0x02 },
+		{0xa99c, 0x0c },
+		{0xa99d, 0x13 },
+		{0xa9a0, 0x02 },
+		{0xa9a1, 0x0c },
+		{0xa9a4, 0x12 },
+		{0xa9a5, 0x02 },
+		{0xa9a8, 0x0c },
+		{0xa9a9, 0x12 },
+		{0xa9ac, 0x02 },
+		{0xa9ad, 0x0c },
+		{0xa9b0, 0x12 },
+		{0xa9b1, 0x02 },
+		{0xa9b4, 0x10 },
+		{0xa9b5, 0x1e },
+		{0xa9b8, 0x0f },
+		{0xa9b9, 0x13 },
+		{0xa9bc, 0x20 },
+		{0xa9bd, 0x10 },
+		{0xa9c0, 0x11 },
+		{0xa9c1, 0x1e },
+		{0xa9c4, 0x10 },
+		{0xa9c5, 0x11 },
+		{0xa9c8, 0x1e },
+		{0xa9c9, 0x10 },
+		{0xa9cc, 0x11 },
+		{0xa9cd, 0x20 },
+		{0xa9d0, 0x10 },
+		{0xa9d1, 0x13 },
+		{0xa9d4, 0x24 },
+		{0xa9d5, 0x10 },
+		{0xa9f0, 0x02 },
+		{0xa9f1, 0x01 },
+		{0xa9f8, 0x19 },
+		{0xa9f9, 0x0b },
+		{0xa9fc, 0x0a },
+		{0xa9fd, 0x07 },
+		{0xaa00, 0x0c },
+		{0xaa01, 0x0e },
+		{0xaa08, 0x0c },
+		{0xaa09, 0x06 },
+		{0xaa0c, 0x0c },
+		{0xaa0d, 0x0a },
+		{0xaa24, 0x10 },
+		{0xaa25, 0x12 },
+		{0xaa28, 0x0b },
+		{0xaa29, 0x07 },
+		{0xaa2c, 0x10 },
+		{0xaa2d, 0x14 },
+		{0xaa34, 0x0e },
+		{0xaa35, 0x0e },
+		{0xaa38, 0x07 },
+		{0xaa39, 0x07 },
+		{0xaa3c, 0x0e },
+		{0xaa3d, 0x0c },
+		{0xaa48, 0x09 },
+		{0xaa49, 0x0c },
+		{0xaa4c, 0x0c },
+		{0xaa4d, 0x07 },
+		{0xaa54, 0x08 },
+		{0xaa55, 0x06 },
+		{0xaa58, 0x04 },
+		{0xaa59, 0x05 },
+		{0xaa5c, 0x06 },
+		{0xaa5d, 0x06 },
+		{0xaa68, 0x05 },
+		{0xaa69, 0x05 },
+		{0xaa6c, 0x04 },
+		{0xaa6d, 0x05 },
+		{0xaa74, 0x06 },
+		{0xaa75, 0x04 },
+		{0xaa78, 0x05 },
+		{0xaa79, 0x05 },
+		{0xaa7c, 0x04 },
+		{0xaa7d, 0x06 },
+		{0xac18, 0x14 },
+		{0xac19, 0x00 },
+		{0xac1c, 0x14 },
+		{0xac1d, 0x00 },
+		{0xac20, 0x14 },
+		{0xac21, 0x00 },
+		{0xac24, 0x14 },
+		{0xac25, 0x00 },
+		{0xac28, 0x14 },
+		{0xac29, 0x00 },
+		{0xac2c, 0x14 },
+		{0xac2d, 0x00 },
+		{0xac34, 0x16 },
+		{0xac35, 0x00 },
+		{0xac38, 0x16 },
+		{0xac39, 0x00 },
+		{0xac3c, 0x16 },
+		{0xac3d, 0x00 },
+		{0xac40, 0x16 },
+		{0xac41, 0x00 },
+		{0xac44, 0x16 },
+		{0xac45, 0x00 },
+		{0xac48, 0x16 },
+		{0xac49, 0x00 },
+		{0xac50, 0x1b },
+		{0xac51, 0x00 },
+		{0xac54, 0x1b },
+		{0xac55, 0x00 },
+		{0xac58, 0x1b },
+		{0xac59, 0x00 },
+		{0xac5c, 0x1b },
+		{0xac5d, 0x00 },
+		{0xac60, 0x1b },
+		{0xac61, 0x00 },
+		{0xac64, 0x1b },
+		{0xac65, 0x00 },
+		{0xac74, 0x09 },
+		{0xac75, 0x0c },
+		{0xac78, 0x0f },
+		{0xac79, 0x11 },
+		{0xac7c, 0x12 },
+		{0xac7d, 0x14 },
+		{0xac80, 0x09 },
+		{0xac81, 0x0c },
+		{0xac84, 0x0f },
+		{0xac85, 0x11 },
+		{0xac88, 0x12 },
+		{0xac89, 0x14 },
+		{0xac8c, 0x09 },
+		{0xac8d, 0x0c },
+		{0xac90, 0x0f },
+		{0xac91, 0x11 },
+		{0xac94, 0x12 },
+		{0xac95, 0x14 },
+		{0xac98, 0x09 },
+		{0xac99, 0x0c },
+		{0xac9c, 0x0f },
+		{0xac9d, 0x11 },
+		{0xaca0, 0x12 },
+		{0xaca1, 0x14 },
+		{0xaca4, 0x09 },
+		{0xaca5, 0x0c },
+		{0xaca8, 0x0f },
+		{0xaca9, 0x11 },
+		{0xacac, 0x12 },
+		{0xacad, 0x14 },
+		{0xacb0, 0x07 },
+		{0xacb1, 0x09 },
+		{0xacb4, 0x0c },
+		{0xacb5, 0x0d },
+		{0xacb8, 0x0d },
+		{0xacb9, 0x0e },
+		{0xacbc, 0x05 },
+		{0xacbd, 0x07 },
+		{0xacc0, 0x0a },
+		{0xacc1, 0x0b },
+		{0xacc4, 0x0b },
+		{0xacc5, 0x0c },
+		{0xacc8, 0x03 },
+		{0xacc9, 0x04 },
+		{0xaccc, 0x07 },
+		{0xaccd, 0x08 },
+		{0xacd0, 0x09 },
+		{0xacd1, 0x09 },
+		{0x35B5, 0x01 },
+		{0x35BC, 0x01 },
+		{0x360A, 0x02 },
+		{0xFA9B, 0x01 },
+};
+
+#define NUM_LSC_CAST_REGS      33
+
+enum LSC_Cast_t{
+	cast_H = 0,
+	cast_U30,
+	cast_CW,
+	cast_D,
+	cast_MAX
+};
+
+static short int LSC_CorrectionForCast[cast_MAX][NUM_LSC_CAST_REGS] = {
+	{-30, -20,  8, 11, -16, -26, -35, -53, -9, -10, 44, 57, -39,
+		-14, 50, -173, -38, -32, -1, 9, 39, 51, -33, -49, -28,
+		-22, 7, 11, -21, 17, -62, -56, 0},
+	{-29, -18,  6,  1,  17, -35, -77, 0, 5, -17, -6, -22, -41, -1,
+		-37, 83, -38, -32, 1, -2, 15, 25, -67, 19, -28, -22, 5,
+		2, -18, 21, -86, 0, 0},
+	{-10, -15, -4, -6,  -8,  -3, -63, 8, 25, -9, -39, -51, -9,
+		0, -21, 112, -10, -23, -7, -9, 10, 18, -11, 23, -10,
+		-15, -4, -6, -10, -3, -52, 7, 0},
+	{  5,   3, -4, -5,  -1,   3,   4, 8, 12, 3, -22, -21, 7, 17,
+		2, 35, 8, 2, -3, -2, -9, -5, 10, 4, 9, 2, -4, -5,
+		-2, 0, -6, 9, 0}
+};
+
+static unsigned short LSC_CastRegs[] = {
+	0xFB7E,			/* H   */
+	0xFB3C,			/* U30 */
+	0xFAFA,			/* CW  */
+	0xFAB8			/* D65 */
+};
+
+/*=============================================================*/
+
+static int vx6953_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(vx6953_client->adapter, msgs, 2) < 0) {
+		CDBG("vx6953_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+static int32_t vx6953_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(vx6953_client->adapter, msg, 1) < 0) {
+		CDBG("vx6953_i2c_txdata faild 0x%x\n", vx6953_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+static int32_t vx6953_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = vx6953_i2c_rxdata(vx6953_client->addr>>1, buf, rlen);
+	if (rc < 0) {
+		CDBG("vx6953_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	return rc;
+}
+static int32_t vx6953_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = vx6953_i2c_txdata(vx6953_client->addr>>1, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+static int32_t vx6953_i2c_write_w_sensor(unsigned short waddr, uint16_t wdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[4];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = (wdata & 0xFF00) >> 8;
+	buf[3] = (wdata & 0x00FF);
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
+	rc = vx6953_i2c_txdata(vx6953_client->addr>>1, buf, 4);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, wdata);
+	}
+	return rc;
+}
+static int32_t vx6953_i2c_write_seq_sensor(unsigned short waddr,
+	uint8_t *bdata, uint16_t len)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[len+2];
+	int i;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	for (i = 2; i < len+2; i++)
+		buf[i] = *bdata++;
+	rc = vx6953_i2c_txdata(vx6953_client->addr>>1, buf, len+2);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			 waddr, bdata[0]);
+	}
+	return rc;
+}
+
+static int32_t vx6953_i2c_write_w_table(struct vx6953_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = vx6953_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static void vx6953_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint16_t preview_frame_length_lines, snapshot_frame_length_lines;
+	uint16_t preview_line_length_pck, snapshot_line_length_pck;
+	uint32_t divider, d1, d2;
+	/* Total frame_length_lines and line_length_pck for preview */
+	preview_frame_length_lines = VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES;
+	preview_line_length_pck = VX6953_QTR_SIZE_WIDTH +
+		VX6953_HRZ_QTR_BLK_PIXELS;
+	/* Total frame_length_lines and line_length_pck for snapshot */
+	snapshot_frame_length_lines = VX6953_FULL_SIZE_HEIGHT +
+		VX6953_VER_FULL_BLK_LINES;
+	snapshot_line_length_pck = VX6953_FULL_SIZE_WIDTH +
+		VX6953_HRZ_FULL_BLK_PIXELS;
+	d1 = preview_frame_length_lines * 0x00000400/
+		snapshot_frame_length_lines;
+	d2 = preview_line_length_pck * 0x00000400/
+		snapshot_line_length_pck;
+	divider = d1 * d2 / 0x400;
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+	/* 2 is the ratio of no.of snapshot channels
+	to number of preview channels */
+
+}
+
+static uint16_t vx6953_get_prev_lines_pf(void)
+{
+	if (vx6953_ctrl->prev_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_HEIGHT + VX6953_VER_QTR_BLK_LINES;
+	else
+		return VX6953_FULL_SIZE_HEIGHT + VX6953_VER_FULL_BLK_LINES;
+
+}
+
+static uint16_t vx6953_get_prev_pixels_pl(void)
+{
+	if (vx6953_ctrl->prev_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_WIDTH + VX6953_HRZ_QTR_BLK_PIXELS;
+	else
+		return VX6953_FULL_SIZE_WIDTH + VX6953_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t vx6953_get_pict_lines_pf(void)
+{
+		if (vx6953_ctrl->pict_res == QTR_SIZE)
+			return VX6953_QTR_SIZE_HEIGHT +
+				VX6953_VER_QTR_BLK_LINES;
+		else
+			return VX6953_FULL_SIZE_HEIGHT +
+				VX6953_VER_FULL_BLK_LINES;
+}
+
+static uint16_t vx6953_get_pict_pixels_pl(void)
+{
+	if (vx6953_ctrl->pict_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_WIDTH +
+			VX6953_HRZ_QTR_BLK_PIXELS;
+	else
+		return VX6953_FULL_SIZE_WIDTH +
+			VX6953_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t vx6953_get_pict_max_exp_lc(void)
+{
+	if (vx6953_ctrl->pict_res == QTR_SIZE)
+		return (VX6953_QTR_SIZE_HEIGHT +
+			VX6953_VER_QTR_BLK_LINES)*24;
+	else
+		return (VX6953_FULL_SIZE_HEIGHT +
+			VX6953_VER_FULL_BLK_LINES)*24;
+}
+
+static int32_t vx6953_set_fps(struct fps_cfg	*fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	total_lines_per_frame = (uint16_t)((VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES) * vx6953_ctrl->fps_divider/0x400);
+
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD);
+	if (vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+		((total_lines_per_frame & 0xFF00) >> 8)) < 0)
+		return rc;
+	if (vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+		(total_lines_per_frame & 0x00FF)) < 0)
+		return rc;
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD_OFF);
+	return rc;
+}
+
+static int32_t vx6953_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t line_length_pck, frame_length_lines;
+	uint8_t gain_hi, gain_lo;
+	uint8_t intg_time_hi, intg_time_lo;
+	uint8_t frame_length_lines_hi = 0, frame_length_lines_lo = 0;
+	int32_t rc = 0;
+	if (vx6953_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) {
+		frame_length_lines = VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES;
+		line_length_pck = VX6953_QTR_SIZE_WIDTH +
+			VX6953_HRZ_QTR_BLK_PIXELS;
+		if (line > (frame_length_lines -
+			VX6953_STM5M0EDOF_OFFSET)) {
+			vx6953_ctrl->fps = (uint16_t) (30 * Q8 *
+			(frame_length_lines - VX6953_STM5M0EDOF_OFFSET)/
+			line);
+		} else {
+			vx6953_ctrl->fps = (uint16_t) (30 * Q8);
+		}
+	} else {
+		frame_length_lines = VX6953_FULL_SIZE_HEIGHT +
+				VX6953_VER_FULL_BLK_LINES;
+		line_length_pck = VX6953_FULL_SIZE_WIDTH +
+				VX6953_HRZ_FULL_BLK_PIXELS;
+	}
+
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD);
+	if ((line + VX6953_STM5M0EDOF_OFFSET) > MAX_FRAME_LENGTH_LINES) {
+		frame_length_lines = MAX_FRAME_LENGTH_LINES;
+		line = MAX_FRAME_LENGTH_LINES - VX6953_STM5M0EDOF_OFFSET;
+	} else if ((line + VX6953_STM5M0EDOF_OFFSET) > frame_length_lines) {
+		frame_length_lines = line + VX6953_STM5M0EDOF_OFFSET;
+		line = frame_length_lines;
+	}
+
+	frame_length_lines_hi = (uint8_t) ((frame_length_lines &
+		0xFF00) >> 8);
+	frame_length_lines_lo = (uint8_t) (frame_length_lines &
+		0x00FF);
+	vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+		frame_length_lines_hi);
+	vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+		frame_length_lines_lo);
+
+	/* update analogue gain registers */
+	gain_hi = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lo = (uint8_t) (gain & 0x00FF);
+	vx6953_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+		gain_lo);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_GREEN_R_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_RED_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_BLUE_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_GREEN_B_LO, gain_hi);
+	CDBG("%s, gain_hi 0x%x, gain_lo 0x%x\n", __func__,
+		gain_hi, gain_lo);
+	/* update line count registers */
+	intg_time_hi = (uint8_t) (((uint16_t)line & 0xFF00) >> 8);
+	intg_time_lo = (uint8_t) ((uint16_t)line & 0x00FF);
+	vx6953_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_HI,
+		intg_time_hi);
+	vx6953_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_LO,
+		intg_time_lo);
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD_OFF);
+
+	return rc;
+}
+
+static int32_t vx6953_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = vx6953_write_exp_gain(gain, line);
+	return rc;
+} /* endof vx6953_set_pict_exp_gain*/
+
+static int32_t vx6953_move_focus(int direction,
+	int32_t num_steps)
+{
+	return 0;
+}
+
+
+static int32_t vx6953_set_default_focus(uint8_t af_step)
+{
+	return 0;
+}
+
+static int32_t vx6953_test(enum vx6953_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
+		1: Bypass Signal Processing
+		REG_0x30D8[5] is EBDMASK: 0:
+		Output Embedded data, 1: No output embedded data */
+		if (vx6953_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static int vx6953_enable_edof(enum edof_mode_t edof_mode)
+{
+	int rc = 0;
+	if (edof_mode == VX6953_EDOF_ESTIMATION) {
+		/* EDof Estimation mode for preview */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x02) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_ESTIMATION");
+	} else if (edof_mode == VX6953_EDOF_APPLICATION) {
+		/* EDof Application mode for Capture */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x01) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_APPLICATION");
+	} else {
+		/* EDOF disabled */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x00) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_DISABLE");
+	}
+	return rc;
+}
+
+static int32_t vx6953_patch_for_cut2(void)
+{
+	int32_t rc = 0;
+	rc = vx6953_i2c_write_w_table(patch_tbl_cut2,
+		ARRAY_SIZE(patch_tbl_cut2));
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+static int32_t vx6953_lsc_patch(void)
+{
+	int32_t rc = 0;
+	int i, j;
+	short int  v;
+	unsigned short version = 0;
+	unsigned short LSC_Raw[NUM_LSC_CAST_REGS];
+	unsigned short LSC_Fixed[NUM_LSC_CAST_REGS];
+
+	vx6953_i2c_read(0x10, &version, 1);
+	CDBG("Cut 3 Version %d\n", version);
+	if (version != 1)
+		return 0;
+
+	vx6953_i2c_write_b_sensor(0x3640, 0x00);
+	for (j = cast_H; j < cast_MAX; j++) {
+		for (i = 0; i < NUM_LSC_CAST_REGS; i++) {
+			rc = vx6953_i2c_read(LSC_CastRegs[cast_D]+(2*i),
+								&LSC_Raw[i], 2);
+			if (rc < 0)
+				return rc;
+			v = LSC_Raw[i];
+			v +=  LSC_CorrectionForCast[j][i];
+			LSC_Fixed[i] = (unsigned short) v;
+		}
+		for (i = 0; i < NUM_LSC_CAST_REGS; i++) {
+			rc = vx6953_i2c_write_w_sensor(LSC_CastRegs[j]+(2*i),
+								LSC_Fixed[i]);
+			if (rc < 0)
+				return rc;
+		}
+	}
+	CDBG("vx6953_lsc_patch done\n");
+	return rc;
+}
+
+static int32_t vx6953_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	unsigned short frame_cnt;
+	struct msm_camera_csi_params vx6953_csi_params;
+	if (vx6953_ctrl->sensor_type != VX6953_STM5M0EDOF_CUT_2) {
+		switch (update_type) {
+		case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_tbl[] = {
+				{REG_0x0112,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0112},
+				{REG_0x0113,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0113},
+				{REG_VT_PIX_CLK_DIV,
+					vx6953_regs.reg_pat_init[0].
+					vt_pix_clk_div},
+				{0x303, 0x01},
+				{0x30b, 0x01},
+				{REG_PRE_PLL_CLK_DIV,
+					vx6953_regs.reg_pat_init[0].
+					pre_pll_clk_div},
+				{REG_PLL_MULTIPLIER,
+					vx6953_regs.reg_pat_init[0].
+					pll_multiplier},
+				{REG_OP_PIX_CLK_DIV,
+					vx6953_regs.reg_pat_init[0].
+					op_pix_clk_div},
+				{REG_0x3210, 0x01},
+				{REG_0x0111,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0111},
+				{REG_0x0b00,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b00},
+				{REG_0x0136,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0136},
+				{REG_0x0137,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0137},
+				{REG_0x0b06,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b06},
+				{REG_0x0b07,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b07},
+				{REG_0x0b08,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b08},
+				{REG_0x0b09,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b09},
+				{REG_0x0b83,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b83},
+				{REG_0x0b84,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b84},
+				{REG_0x0b85,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b85},
+				{REG_0x0b88,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b88},
+				{REG_0x0b89,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b89},
+				{REG_0x0b8a,
+					vx6953_regs.reg_pat_init[0].
+					reg_0x0b8a},
+				{0x3393, 0x06},
+				{0x3394, 0x07},
+				{0x338d, 0x08},
+				{0x338e, 0x08},
+				{0x338f, 0x00},
+			};
+			/* reset fps_divider */
+			vx6953_ctrl->fps = 30 * Q8;
+			/* stop streaming */
+
+			count = 0;
+			CDBG("Init vx6953_sensor_setting standby\n");
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+			vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD);
+
+			rc = vx6953_i2c_write_w_table(cut3_cali_data,
+				ARRAY_SIZE(cut3_cali_data));
+
+			vx6953_lsc_patch();
+
+			vx6953_i2c_write_w_sensor(0x100A, 0x07A3);
+			vx6953_i2c_write_w_sensor(0x114A, 0x002A);
+			vx6953_i2c_write_w_sensor(0x1716, 0x0204);
+			vx6953_i2c_write_w_sensor(0x1718, 0x0880);
+
+			rc = vx6953_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(10);
+
+		}
+		return rc;
+		case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf preview_mode_tbl[] = {
+				{0x200, 0x02},
+				{0x201, 0x26},
+				{REG_COARSE_INTEGRATION_TIME_HI,
+					vx6953_regs.reg_pat[rt].
+					coarse_integration_time_hi},
+				{REG_COARSE_INTEGRATION_TIME_LO,
+					vx6953_regs.reg_pat[rt].
+					coarse_integration_time_lo},
+				{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+					vx6953_regs.reg_pat[rt].
+					analogue_gain_code_global},
+				{REG_FRAME_LENGTH_LINES_HI,
+					vx6953_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					vx6953_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_LINE_LENGTH_PCK_HI,
+					vx6953_regs.reg_pat[rt].
+					line_length_pck_hi},
+				{REG_LINE_LENGTH_PCK_LO,
+					vx6953_regs.reg_pat[rt].
+					line_length_pck_lo},
+				{REG_0x0b80,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0b80},
+				{REG_0x0900,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0900},
+				{REG_0x0901,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0901},
+				{REG_0x0902,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0902},
+				{REG_0x0383,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0383},
+				{REG_0x0387,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0387},
+				{REG_0x034c,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034c},
+				{REG_0x034d,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034d},
+				{REG_0x034e,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034e},
+				{REG_0x034f,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034f},
+				{REG_0x3640, 0x00},
+			};
+
+			struct vx6953_i2c_reg_conf snapshot_mode_tbl[] = {
+				{0x0200, 0x02},
+				{0x0201, 0x54},
+				{REG_COARSE_INTEGRATION_TIME_HI,
+					vx6953_regs.reg_pat[rt].
+					coarse_integration_time_hi},
+				{REG_COARSE_INTEGRATION_TIME_LO,
+					vx6953_regs.reg_pat[rt].
+					coarse_integration_time_lo},
+				{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+					vx6953_regs.reg_pat[rt].
+					analogue_gain_code_global},
+				{REG_FRAME_LENGTH_LINES_HI,
+					vx6953_regs.reg_pat[rt].
+					frame_length_lines_hi},
+				{REG_FRAME_LENGTH_LINES_LO,
+					vx6953_regs.reg_pat[rt].
+					frame_length_lines_lo},
+				{REG_LINE_LENGTH_PCK_HI,
+					vx6953_regs.reg_pat[rt].
+					line_length_pck_hi},
+				{REG_LINE_LENGTH_PCK_LO,
+					vx6953_regs.reg_pat[rt].
+					line_length_pck_lo},
+				{REG_0x0b80,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0b80},
+				{REG_0x0900,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0900},
+				{REG_0x0901,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0901},
+				{REG_0x0902,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0902},
+				{REG_0x0383,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0383},
+				{REG_0x0387,
+					vx6953_regs.reg_pat[rt].
+					reg_0x0387},
+				{REG_0x034c,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034c},
+				{REG_0x034d,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034d},
+				{REG_0x034e,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034e},
+				{REG_0x034f,
+					vx6953_regs.reg_pat[rt].
+					reg_0x034f},
+				{0x3140, 0x01},
+				{REG_0x3640, 0x00},
+			};
+			/* stop streaming */
+
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			if (count == 0) {
+				vx6953_csi_params.data_format = CSI_8BIT;
+				vx6953_csi_params.lane_cnt = 1;
+				vx6953_csi_params.lane_assign = 0xe4;
+				vx6953_csi_params.dpcm_scheme = 0;
+				vx6953_csi_params.settle_cnt = 7;
+				rc = msm_camio_csi_config(&vx6953_csi_params);
+				if (rc < 0)
+					CDBG("config csi controller failed\n");
+
+				msleep(20);
+				count = 1;
+			}
+
+			vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD);
+
+			if (rt == RES_PREVIEW) {
+				rc = vx6953_i2c_write_w_table(
+					&preview_mode_tbl[0],
+					ARRAY_SIZE(preview_mode_tbl));
+				if (rc < 0)
+					return rc;
+			}
+			if (rt == RES_CAPTURE) {
+				rc = vx6953_i2c_write_w_table(
+					&snapshot_mode_tbl[0],
+					ARRAY_SIZE(snapshot_mode_tbl));
+				if (rc < 0)
+					return rc;
+			}
+
+			vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+			GROUPED_PARAMETER_HOLD_OFF);
+
+			/* Start sensor streaming */
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM) < 0)
+				return rc;
+			msleep(10);
+
+			if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+				return rc;
+
+			while (frame_cnt == 0xFF) {
+				if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+					return rc;
+				CDBG("frame_cnt=%d\n", frame_cnt);
+				msleep(2);
+			}
+		}
+		return rc;
+		default:
+			return rc;
+		}
+	} else {
+		switch (update_type) {
+		case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_0x3030,
+				vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+				vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+				vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{REG_0x3016,
+				vx6953_regs.reg_pat_init[0].reg_0x3016},
+			{REG_0x301d,
+				vx6953_regs.reg_pat_init[0].reg_0x301d},
+			{REG_0x317e,
+				vx6953_regs.reg_pat_init[0].reg_0x317e},
+			{REG_0x317f,
+				vx6953_regs.reg_pat_init[0].reg_0x317f},
+			{REG_0x3400,
+				vx6953_regs.reg_pat_init[0].reg_0x3400},
+			/* DEFCOR settings */
+			/*Single Defect Correction Weight DISABLE*/
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x3005,
+				vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+				vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+				vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+				vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+				vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+				vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+				vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+				vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture mode
+			(standard settings - Not tuned) */
+			{REG_0x0b80,
+				vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+				vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+				vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+				vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+				vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+				vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+			{REG_0x1716,
+				vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717,
+				vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718,
+				vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719,
+				vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+						/* reset fps_divider */
+			vx6953_ctrl->fps = 30 * Q8;
+			/* stop streaming */
+
+			/* Reset everything first */
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			CDBG("Init vx6953_sensor_setting standby\n");
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+				/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+			vx6953_patch_for_cut2();
+			rc = vx6953_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+				msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+		}
+	return rc;
+	case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_mode_tbl[] =  {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_0x3030,
+				vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+				vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+				vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{REG_0x3016,
+				vx6953_regs.reg_pat_init[0].reg_0x3016},
+			{REG_0x301d,
+				vx6953_regs.reg_pat_init[0].reg_0x301d},
+			{REG_0x317e,
+				vx6953_regs.reg_pat_init[0].reg_0x317e},
+			{REG_0x317f,
+				vx6953_regs.reg_pat_init[0].reg_0x317f},
+			{REG_0x3400,
+				vx6953_regs.reg_pat_init[0].reg_0x3400},
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x3005,
+				vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+				vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+				vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+				vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+				vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+				vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+				vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+				vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture mode
+			(standard settings - Not tuned) */
+			{REG_0x0b80,
+				vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+				vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+				vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+				vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+				vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+				vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+			{REG_0x1716,
+				vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717,
+				vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718,
+				vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719,
+				vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+			struct vx6953_i2c_reg_conf mode_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+		/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].line_length_pck_lo},
+			{REG_0x3005, vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010, vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011, vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a, vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035, vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036, vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041, vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042, vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045, vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture
+			mode(standard settings - Not tuned) */
+			{REG_0x0b80, vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900, vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901, vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902, vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383, vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387, vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c, vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d, vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e, vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f, vx6953_regs.reg_pat[rt].reg_0x034f},
+			/*{0x200, vx6953_regs.reg_pat[rt].reg_0x0200},
+			{0x201, vx6953_regs.reg_pat[rt].reg_0x0201},*/
+			{REG_0x1716, vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717, vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718, vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719, vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+			/* stop streaming */
+			msleep(5);
+
+			/* Reset everything first */
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+
+			vx6953_csi_params.data_format = CSI_8BIT;
+			vx6953_csi_params.lane_cnt = 1;
+			vx6953_csi_params.lane_assign = 0xe4;
+			vx6953_csi_params.dpcm_scheme = 0;
+			vx6953_csi_params.settle_cnt = 7;
+			rc = msm_camio_csi_config(&vx6953_csi_params);
+			if (rc < 0)
+				CDBG(" config csi controller failed \n");
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_patch_for_cut2();
+			rc = vx6953_i2c_write_w_table(&init_mode_tbl[0],
+				ARRAY_SIZE(init_mode_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			rc = vx6953_i2c_write_w_table(&mode_tbl[0],
+				ARRAY_SIZE(mode_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			/* Start sensor streaming */
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM) < 0)
+				return rc;
+			msleep(vx6953_stm5m0edof_delay_msecs_stream);
+
+			if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+				return rc;
+
+			while (frame_cnt == 0xFF) {
+				if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+					return rc;
+				CDBG("frame_cnt=%d", frame_cnt);
+				msleep(10);
+			}
+		}
+		return rc;
+	default:
+		return rc;
+	}
+	}
+	return rc;
+}
+
+
+static int32_t vx6953_video_config(int mode)
+{
+
+	int32_t	rc = 0;
+	int	rt;
+	/* change sensor resolution	if needed */
+	if (vx6953_ctrl->curr_res != vx6953_ctrl->prev_res) {
+		if (vx6953_ctrl->prev_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			vx6953_stm5m0edof_delay_msecs_stdby	=
+				((((2 * 1000 * vx6953_ctrl->fps_divider) /
+				   vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		} else {
+			rt = RES_CAPTURE;
+			vx6953_stm5m0edof_delay_msecs_stdby	=
+				((((1000 * vx6953_ctrl->fps_divider) /
+				   vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		}
+		if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+	if (vx6953_ctrl->set_test) {
+		if (vx6953_test(vx6953_ctrl->set_test) < 0)
+			return	rc;
+	}
+	vx6953_ctrl->edof_mode = VX6953_EDOF_ESTIMATION;
+	rc = vx6953_enable_edof(vx6953_ctrl->edof_mode);
+	if (rc < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->prev_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t vx6953_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/*change sensor resolution if needed */
+	if (vx6953_ctrl->curr_res != vx6953_ctrl->pict_res) {
+		if (vx6953_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((2 * 1000 * vx6953_ctrl->fps_divider) /
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		} else {
+			rt = RES_CAPTURE;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((1000 * vx6953_ctrl->fps_divider) /
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		}
+	if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	}
+
+	vx6953_ctrl->edof_mode = VX6953_EDOF_APPLICATION;
+	if (vx6953_enable_edof(vx6953_ctrl->edof_mode) < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->pict_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+} /*end of vx6953_snapshot_config*/
+
+static int32_t vx6953_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/* change sensor resolution if needed */
+	if (vx6953_ctrl->curr_res != vx6953_ctrl->pict_res) {
+		if (vx6953_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((2 * 1000 * vx6953_ctrl->fps_divider)/
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		} else {
+			rt = RES_CAPTURE;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((1000 * vx6953_ctrl->fps_divider)/
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		}
+		if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+	vx6953_ctrl->edof_mode = VX6953_EDOF_APPLICATION;
+	if (vx6953_enable_edof(vx6953_ctrl->edof_mode) < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->pict_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+} /*end of vx6953_raw_snapshot_config*/
+static int32_t vx6953_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = vx6953_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = vx6953_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = vx6953_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+static int32_t vx6953_power_down(void)
+{
+	return 0;
+}
+
+
+static int vx6953_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_set_value_cansleep(data->sensor_reset, 0);
+	gpio_free(data->sensor_reset);
+	return 0;
+}
+static int vx6953_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	int32_t rc = 0;
+	unsigned short chipidl, chipidh;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "vx6953");
+	CDBG(" vx6953_probe_init_sensor \n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		CDBG(" vx6953_probe_init_sensor 1\n");
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(10);
+		CDBG(" vx6953_probe_init_sensor 1\n");
+		gpio_set_value_cansleep(data->sensor_reset, 1);
+	} else {
+		CDBG(" vx6953_probe_init_sensor 2\n");
+		goto init_probe_done;
+	}
+	msleep(20);
+	CDBG(" vx6953_probe_init_sensor is called\n");
+	/* 3. Read sensor Model ID: */
+	rc = vx6953_i2c_read(0x0000, &chipidh, 1);
+	if (rc < 0) {
+		CDBG(" vx6953_probe_init_sensor 3\n");
+		goto init_probe_fail;
+	}
+	rc = vx6953_i2c_read(0x0001, &chipidl, 1);
+	if (rc < 0) {
+		CDBG(" vx6953_probe_init_sensor4\n");
+		goto init_probe_fail;
+	}
+	CDBG("vx6953 model_id = 0x%x  0x%x\n", chipidh, chipidl);
+	/* 4. Compare sensor ID to VX6953 ID: */
+	if (chipidh != 0x03 || chipidl != 0xB9) {
+		rc = -ENODEV;
+		CDBG("vx6953_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" vx6953_probe_init_sensor fails\n");
+	vx6953_probe_init_done(data);
+init_probe_done:
+	CDBG(" vx6953_probe_init_sensor finishes\n");
+	return rc;
+	}
+/* camsensor_iu060f_vx6953_reset */
+int vx6953_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	unsigned short revision_number;
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling vx6953_sensor_open_init\n");
+	vx6953_ctrl = kzalloc(sizeof(struct vx6953_ctrl_t), GFP_KERNEL);
+	if (!vx6953_ctrl) {
+		CDBG("vx6953_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	vx6953_ctrl->fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->pict_fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->set_test = TEST_OFF;
+	vx6953_ctrl->prev_res = QTR_SIZE;
+	vx6953_ctrl->pict_res = FULL_SIZE;
+	vx6953_ctrl->curr_res = INVALID_SIZE;
+	vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+	vx6953_ctrl->edof_mode = VX6953_EDOF_ESTIMATION;
+	if (data)
+		vx6953_ctrl->sensordata = data;
+	if (rc < 0) {
+		CDBG("Calling vx6953_sensor_open_init fail1\n");
+		return rc;
+	}
+	CDBG("%s: %d\n", __func__, __LINE__);
+	/* enable mclk first */
+	msm_camio_clk_rate_set(VX6953_STM5M0EDOF_DEFAULT_MASTER_CLK_RATE);
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = vx6953_probe_init_sensor(data);
+	if (rc < 0) {
+		CDBG("Calling vx6953_sensor_open_init fail3\n");
+		goto init_fail;
+	}
+	if (vx6953_i2c_read(0x0002, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number major = 0x%x\n", revision_number);
+	if (vx6953_i2c_read(0x0018, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number = 0x%x\n", revision_number);
+	if (revision_number == VX6953_REVISION_NUMBER_CUT3) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_3;
+		CDBG("VX6953 EDof Cut 3.0 sensor\n ");
+	} else if (revision_number == VX6953_REVISION_NUMBER_CUT2) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+		CDBG("VX6953 EDof Cut 2.0 sensor\n ");
+	} else {/* Cut1.0 reads 0x00 for register 0x0018*/
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_1;
+		CDBG("VX6953 EDof Cut 1.0 sensor\n ");
+	}
+	if (vx6953_ctrl->prev_res == QTR_SIZE) {
+		if (vx6953_sensor_setting(REG_INIT, RES_PREVIEW) < 0)
+			return rc;
+	} else {
+		if (vx6953_sensor_setting(REG_INIT, RES_CAPTURE) < 0)
+			return rc;
+	}
+	vx6953_ctrl->fps = 30*Q8;
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+init_fail:
+	CDBG("init_fail\n");
+	vx6953_probe_init_done(data);
+	kfree(vx6953_ctrl);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+} /*endof vx6953_sensor_open_init*/
+
+static int vx6953_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&vx6953_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id vx6953_i2c_id[] = {
+	{"vx6953", 0},
+	{ }
+};
+
+static int vx6953_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("vx6953_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	vx6953_sensorw = kzalloc(sizeof(struct vx6953_work_t), GFP_KERNEL);
+	if (!vx6953_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, vx6953_sensorw);
+	vx6953_init_client(client);
+	vx6953_client = client;
+
+	msleep(50);
+
+	CDBG("vx6953_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("vx6953_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int vx6953_send_wb_info(struct wb_info_cfg *wb)
+{
+	unsigned short read_data;
+	uint8_t temp[8];
+	int rc = 0;
+	int i = 0;
+
+	/* red_gain */
+	temp[2] = wb->red_gain >> 8;
+	temp[3] = wb->red_gain & 0xFF;
+
+	/* green_gain */
+	temp[0] = wb->green_gain >> 8;
+	temp[1] = wb->green_gain & 0xFF;
+	temp[6] = temp[0];
+	temp[7] = temp[1];
+
+	/* blue_gain */
+	temp[4] = wb->blue_gain >> 8;
+	temp[5] = wb->blue_gain & 0xFF;
+	rc = vx6953_i2c_write_seq_sensor(0x0B8E, &temp[0], 8);
+
+	for (i = 0; i < 6; i++) {
+		rc = vx6953_i2c_read(0x0B8E + i, &read_data, 1);
+		CDBG("%s addr 0x%x val %d \n", __func__, 0x0B8E + i, read_data);
+	}
+	rc = vx6953_i2c_read(0x0B82, &read_data, 1);
+	CDBG("%s addr 0x%x val %d \n", __func__, 0x0B82, read_data);
+	if (rc < 0)
+		return rc;
+	return rc;
+} /*end of vx6953_snapshot_config*/
+
+static int __exit vx6953_remove(struct i2c_client *client)
+{
+	struct vx6953_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	vx6953_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver vx6953_i2c_driver = {
+	.id_table = vx6953_i2c_id,
+	.probe  = vx6953_i2c_probe,
+	.remove = __exit_p(vx6953_i2c_remove),
+	.driver = {
+		.name = "vx6953",
+	},
+};
+
+int vx6953_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&vx6953_mut);
+	CDBG("vx6953_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+		switch (cdata.cfgtype) {
+		case CFG_GET_PICT_FPS:
+			vx6953_get_pict_fps(
+				cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_L_PF:
+			cdata.cfg.prevl_pf =
+			vx6953_get_prev_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_P_PL:
+			cdata.cfg.prevp_pl =
+				vx6953_get_prev_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_L_PF:
+			cdata.cfg.pictl_pf =
+				vx6953_get_pict_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_P_PL:
+			cdata.cfg.pictp_pl =
+				vx6953_get_pict_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_MAX_EXP_LC:
+			cdata.cfg.pict_max_exp_lc =
+				vx6953_get_pict_max_exp_lc();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_FPS:
+		case CFG_SET_PICT_FPS:
+			rc = vx6953_set_fps(&(cdata.cfg.fps));
+			break;
+
+		case CFG_SET_EXP_GAIN:
+			rc =
+				vx6953_write_exp_gain(
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_PICT_EXP_GAIN:
+			rc =
+				vx6953_set_pict_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_MODE:
+			rc = vx6953_set_sensor_mode(cdata.mode,
+					cdata.rs);
+			break;
+
+		case CFG_PWR_DOWN:
+			rc = vx6953_power_down();
+			break;
+
+		case CFG_MOVE_FOCUS:
+			rc =
+				vx6953_move_focus(
+				cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_DEFAULT_FOCUS:
+			rc =
+				vx6953_set_default_focus(
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_EFFECT:
+			rc = vx6953_set_default_focus(
+				cdata.cfg.effect);
+			break;
+
+
+		case CFG_SEND_WB_INFO:
+			rc = vx6953_send_wb_info(
+				&(cdata.cfg.wb_info));
+			break;
+
+		default:
+			rc = -EFAULT;
+			break;
+		}
+
+	mutex_unlock(&vx6953_mut);
+
+	return rc;
+}
+
+
+
+
+static int vx6953_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&vx6953_mut);
+	vx6953_power_down();
+	gpio_direction_output(vx6953_ctrl->sensordata->sensor_reset, 0);
+	gpio_free(vx6953_ctrl->sensordata->sensor_reset);
+	kfree(vx6953_ctrl);
+	vx6953_ctrl = NULL;
+	CDBG("vx6953_release completed\n");
+	mutex_unlock(&vx6953_mut);
+
+	return rc;
+}
+
+static int vx6953_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&vx6953_i2c_driver);
+	if (rc < 0 || vx6953_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(24000000);
+	rc = vx6953_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = vx6953_sensor_open_init;
+	s->s_release = vx6953_sensor_release;
+	s->s_config  = vx6953_sensor_config;
+	s->s_mount_angle  = 0;
+	vx6953_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("vx6953_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+static int __vx6953_probe(struct platform_device *pdev)
+{
+	return msm_camera_drv_start(pdev, vx6953_sensor_probe);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __vx6953_probe,
+	.driver = {
+		.name = "msm_camera_vx6953",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init vx6953_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(vx6953_init);
+void vx6953_exit(void)
+{
+	i2c_del_driver(&vx6953_i2c_driver);
+}
+
+
diff --git a/drivers/media/video/msm/vx6953.h b/drivers/media/video/msm/vx6953.h
new file mode 100644
index 0000000..0e12063
--- /dev/null
+++ b/drivers/media/video/msm/vx6953.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VX6953_H
+#define VX6953_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct vx6953_reg vx6953_regs;
+struct reg_struct_init {
+	uint8_t reg_0x0112;      /* 0x0112*/
+	uint8_t reg_0x0113;      /* 0x0113*/
+	uint8_t vt_pix_clk_div;  /* 0x0301*/
+	uint8_t pre_pll_clk_div; /* 0x0305*/
+	uint8_t pll_multiplier;  /* 0x0307*/
+	uint8_t op_pix_clk_div;  /* 0x0309*/
+	uint8_t reg_0x3030;      /*0x3030*/
+	uint8_t reg_0x0111;      /*0x0111*/
+	uint8_t reg_0x0b00;      /*0x0b00*/
+	uint8_t reg_0x3001;      /*0x3001*/
+	uint8_t reg_0x3004;      /*0x3004*/
+	uint8_t reg_0x3007;      /*0x3007*/
+	uint8_t reg_0x3016;      /*0x3016*/
+	uint8_t reg_0x301d;      /*0x301d*/
+	uint8_t reg_0x317e;      /*0x317E*/
+	uint8_t reg_0x317f;      /*0x317F*/
+	uint8_t reg_0x3400;      /*0x3400*/
+	uint8_t reg_0x0b06;      /*0x0b06*/
+	uint8_t reg_0x0b07;      /*0x0b07*/
+	uint8_t reg_0x0b08;      /*0x0b08*/
+	uint8_t reg_0x0b09;      /*0x0b09*/
+	uint8_t reg_0x0136;
+	uint8_t reg_0x0137;
+	/* Edof */
+	uint8_t reg_0x0b83;      /*0x0b83*/
+	uint8_t reg_0x0b84;      /*0x0b84*/
+	uint8_t reg_0x0b85;      /*0x0b85*/
+	uint8_t reg_0x0b88;      /*0x0b88*/
+	uint8_t reg_0x0b89;      /*0x0b89*/
+	uint8_t reg_0x0b8a;      /*0x0b8a*/
+	};
+struct reg_struct {
+	uint8_t coarse_integration_time_hi; /*REG_COARSE_INTEGRATION_TIME_HI*/
+	uint8_t coarse_integration_time_lo; /*REG_COARSE_INTEGRATION_TIME_LO*/
+	uint8_t analogue_gain_code_global;
+	uint8_t frame_length_lines_hi; /* 0x0340*/
+	uint8_t frame_length_lines_lo; /* 0x0341*/
+	uint8_t line_length_pck_hi;    /* 0x0342*/
+	uint8_t line_length_pck_lo;    /* 0x0343*/
+	uint8_t reg_0x3005;   /* 0x3005*/
+	uint8_t reg_0x3010;  /* 0x3010*/
+	uint8_t reg_0x3011;  /* 0x3011*/
+	uint8_t reg_0x301a;  /* 0x301a*/
+	uint8_t reg_0x3035;  /* 0x3035*/
+	uint8_t reg_0x3036;   /* 0x3036*/
+	uint8_t reg_0x3041;  /*0x3041*/
+	uint8_t reg_0x3042;  /*0x3042*/
+	uint8_t reg_0x3045;  /*0x3045*/
+	uint8_t reg_0x0b80;   /* 0x0b80*/
+	uint8_t reg_0x0900;   /*0x0900*/
+	uint8_t reg_0x0901;   /* 0x0901*/
+	uint8_t reg_0x0902;   /*0x0902*/
+	uint8_t reg_0x0383;   /*0x0383*/
+	uint8_t reg_0x0387;   /* 0x0387*/
+	uint8_t reg_0x034c;   /* 0x034c*/
+	uint8_t reg_0x034d;   /*0x034d*/
+	uint8_t reg_0x034e;   /* 0x034e*/
+	uint8_t reg_0x034f;   /* 0x034f*/
+	uint8_t reg_0x1716; /*0x1716*/
+	uint8_t reg_0x1717; /*0x1717*/
+	uint8_t reg_0x1718; /*0x1718*/
+	uint8_t reg_0x1719; /*0x1719*/
+	uint8_t reg_0x3210;/*0x3210*/
+	uint8_t reg_0x111; /*0x111*/
+	uint8_t reg_0x3410;  /*0x3410*/
+	uint8_t reg_0x3098;
+	uint8_t reg_0x309D;
+	uint8_t reg_0x0200;
+	uint8_t reg_0x0201;
+	};
+struct vx6953_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+enum vx6953_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum vx6953_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+enum vx6953_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum sensor_revision_t {
+	VX6953_STM5M0EDOF_CUT_1,
+	VX6953_STM5M0EDOF_CUT_2,
+	VX6953_STM5M0EDOF_CUT_3
+};
+enum edof_mode_t {
+	VX6953_EDOF_DISABLE,       /* 0x00 */
+	VX6953_EDOF_APPLICATION,   /* 0x01 */
+	VX6953_EDOF_ESTIMATION     /* 0x02 */
+};
+struct vx6953_reg {
+	const struct reg_struct_init  *reg_pat_init;
+	const struct reg_struct  *reg_pat;
+};
+#endif /* VX6953_H */
diff --git a/drivers/media/video/msm/vx6953_reg.c b/drivers/media/video/msm/vx6953_reg.c
new file mode 100644
index 0000000..48fc71f
--- /dev/null
+++ b/drivers/media/video/msm/vx6953_reg.c
@@ -0,0 +1,135 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "vx6953.h"
+const struct reg_struct_init vx6953_reg_init[1] = {
+	{
+		10,			/*REG = 0x0112 , 10 bit */
+		10,			/*REG = 0x0113*/
+		9,			/*REG = 0x0301 vt_pix_clk_div*/
+		4,		/*REG = 0x0305 pre_pll_clk_div*/
+		133,		/*REG = 0x0307 pll_multiplier*/
+		10,		/*REG = 0x0309 op_pix_clk_div*/
+		0x08,		/*REG = 0x3030*/
+		0x02,		/*REG = 0x0111*/
+		0x01,		/*REG = 0x0b00 ,lens shading off */
+		0x30,		/*REG = 0x3001*/
+		0x33,		/*REG = 0x3004*/
+		0x09,		/*REG = 0x3007*/
+		0x1F,		/*REG = 0x3016*/
+		0x03,		/*REG = 0x301d*/
+		0x11,		/*REG = 0x317E*/
+		0x09,		/*REG = 0x317F*/
+		0x38,		/*REG = 0x3400*/
+		0x00,		/*REG_0x0b06*/
+		0x80,		/*REG_0x0b07*/
+		0x01,		/*REG_0x0b08*/
+		0x4F,		/*REG_0x0b09*/
+		0x18,		/*REG_0x0136*/
+		0x00,		/*/REG_0x0137*/
+		0x20,		/*REG = 0x0b83*/
+		0x90,		/*REG = 0x0b84*/
+		0x20,		/*REG = 0x0b85*/
+		0x80,		/*REG = 0x0b88*/
+		0x00,		/*REG = 0x0b89*/
+		0x00,		/*REG = 0x0b8a*/
+	}
+};
+const struct reg_struct vx6953_reg_pat[2] = {
+	{/* Preview */
+		0x03,	/*REG = 0x0202 coarse integration_time_hi*/
+		0xd0,	/*REG = 0x0203 coarse_integration_time_lo*/
+		0xc0,	/*REG = 0x0205 analogue_gain_code_global*/
+		0x03,	/*REG = 0x0340 frame_length_lines_hi*/
+		0xf0,	/*REG = 0x0341 frame_length_lines_lo*/
+		0x0b,	/*REG = 0x0342  line_length_pck_hi*/
+		0x74,	/*REG = 0x0343  line_length_pck_lo*/
+		0x03,	/*REG = 0x3005*/
+		0x00,	/*REG = 0x3010*/
+		0x01,	/*REG = 0x3011*/
+		0x6a,	/*REG = 0x301a*/
+		0x03,	/*REG = 0x3035*/
+		0x2c,	/*REG = 0x3036*/
+		0x00,	/*REG = 0x3041*/
+		0x24,	/*REG = 0x3042*/
+		0x81,	/*REG = 0x3045*/
+		0x02,	/*REG = 0x0b80 edof estimate*/
+		0x01,	/*REG = 0x0900*/
+		0x22,	/*REG = 0x0901*/
+		0x04,	/*REG = 0x0902*/
+		0x03,	/*REG = 0x0383*/
+		0x03,	/*REG = 0x0387*/
+		0x05,	/*REG = 0x034c*/
+		0x18,	/*REG = 0x034d*/
+		0x03,	/*REG = 0x034e*/
+		0xd4,	/*REG = 0x034f*/
+		0x02,	/*0x1716*/
+		0x04,	/*0x1717*/
+		0x08,	/*0x1718*/
+		0x2c,	/*0x1719*/
+		0x01,   /*0x3210*/
+		0x02,   /*0x111*/
+		0x01,   /*0x3410*/
+		0x01,   /*0x3098*/
+		0x05,   /*0x309D*/
+		0x02,
+		0x04,
+	},
+	{ /* Snapshot */
+		0x07,/*REG = 0x0202 coarse_integration_time_hi*/
+		0x00,/*REG = 0x0203 coarse_integration_time_lo*/
+		0xc0,/*REG = 0x0205 analogue_gain_code_global*/
+		0x07,/*REG = 0x0340 frame_length_lines_hi*/
+		0xd0,/*REG = 0x0341 frame_length_lines_lo*/
+		0x0b,/*REG = 0x0342 line_length_pck_hi*/
+		0x8c,/*REG = 0x0343 line_length_pck_lo*/
+		0x01,/*REG = 0x3005*/
+		0x00,/*REG = 0x3010*/
+		0x00,/*REG = 0x3011*/
+		0x55,/*REG = 0x301a*/
+		0x01,/*REG = 0x3035*/
+		0x23,/*REG = 0x3036*/
+		0x00,/*REG = 0x3041*/
+		0x24,/*REG = 0x3042*/
+		0xb7,/*REG = 0x3045*/
+		0x01,/*REG = 0x0b80 edof application*/
+		0x00,/*REG = 0x0900*/
+		0x00,/*REG = 0x0901*/
+		0x00,/*REG = 0x0902*/
+		0x01,/*REG = 0x0383*/
+		0x01,/*REG = 0x0387*/
+		0x0A,/*REG = 0x034c*/
+		0x30,/*REG = 0x034d*/
+		0x07,/*REG = 0x034e*/
+		0xA8,/*REG = 0x034f*/
+		0x02,/*0x1716*/
+		0x0d,/*0x1717*/
+		0x07,/*0x1718*/
+		0x7d,/*0x1719*/
+		0x01,/*0x3210*/
+		0x02,/*0x111*/
+		0x01,/*0x3410*/
+		0x01,/*0x3098*/
+		0x05, /*0x309D*/
+		0x02,
+		0x00,
+	}
+};
+
+
+
+struct vx6953_reg vx6953_regs = {
+	.reg_pat_init = &vx6953_reg_init[0],
+	.reg_pat = &vx6953_reg_pat[0],
+};
diff --git a/drivers/media/video/msm/vx6953_reg_v4l2.c b/drivers/media/video/msm/vx6953_reg_v4l2.c
new file mode 100644
index 0000000..f16054b
--- /dev/null
+++ b/drivers/media/video/msm/vx6953_reg_v4l2.c
@@ -0,0 +1,135 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "vx6953_v4l2.h"
+const struct reg_struct_init vx6953_reg_init[1] = {
+	{
+		10,			/*REG = 0x0112 , 10 bit */
+		10,			/*REG = 0x0113*/
+		9,			/*REG = 0x0301 vt_pix_clk_div*/
+		4,		/*REG = 0x0305 pre_pll_clk_div*/
+		133,		/*REG = 0x0307 pll_multiplier*/
+		10,		/*REG = 0x0309 op_pix_clk_div*/
+		0x08,		/*REG = 0x3030*/
+		0x02,		/*REG = 0x0111*/
+		0x01,		/*REG = 0x0b00 ,lens shading off */
+		0x30,		/*REG = 0x3001*/
+		0x33,		/*REG = 0x3004*/
+		0x09,		/*REG = 0x3007*/
+		0x1F,		/*REG = 0x3016*/
+		0x03,		/*REG = 0x301d*/
+		0x11,		/*REG = 0x317E*/
+		0x09,		/*REG = 0x317F*/
+		0x38,		/*REG = 0x3400*/
+		0x00,		/*REG_0x0b06*/
+		0x80,		/*REG_0x0b07*/
+		0x01,		/*REG_0x0b08*/
+		0x4F,		/*REG_0x0b09*/
+		0x18,		/*REG_0x0136*/
+		0x00,		/*/REG_0x0137*/
+		0x20,		/*REG = 0x0b83*/
+		0x90,		/*REG = 0x0b84*/
+		0x20,		/*REG = 0x0b85*/
+		0x80,		/*REG = 0x0b88*/
+		0x00,		/*REG = 0x0b89*/
+		0x00,		/*REG = 0x0b8a*/
+	}
+};
+const struct reg_struct vx6953_reg_pat[2] = {
+	{/* Preview */
+		0x03,	/*REG = 0x0202 coarse integration_time_hi*/
+		0xd0,	/*REG = 0x0203 coarse_integration_time_lo*/
+		0xc0,	/*REG = 0x0205 analogue_gain_code_global*/
+		0x03,	/*REG = 0x0340 frame_length_lines_hi*/
+		0xf0,	/*REG = 0x0341 frame_length_lines_lo*/
+		0x0b,	/*REG = 0x0342  line_length_pck_hi*/
+		0xa5,	/*REG = 0x0343  line_length_pck_lo*/
+		0x03,	/*REG = 0x3005*/
+		0x00,	/*REG = 0x3010*/
+		0x01,	/*REG = 0x3011*/
+		0x6a,	/*REG = 0x301a*/
+		0x03,	/*REG = 0x3035*/
+		0x2c,	/*REG = 0x3036*/
+		0x00,	/*REG = 0x3041*/
+		0x24,	/*REG = 0x3042*/
+		0x81,	/*REG = 0x3045*/
+		0x02,	/*REG = 0x0b80 edof estimate*/
+		0x01,	/*REG = 0x0900*/
+		0x22,	/*REG = 0x0901*/
+		0x04,	/*REG = 0x0902*/
+		0x03,	/*REG = 0x0383*/
+		0x03,	/*REG = 0x0387*/
+		0x05,	/*REG = 0x034c*/
+		0x18,	/*REG = 0x034d*/
+		0x03,	/*REG = 0x034e*/
+		0xd4,	/*REG = 0x034f*/
+		0x02,	/*0x1716*/
+		0x04,	/*0x1717*/
+		0x08,	/*0x1718*/
+		0x80,	/*0x1719*/
+		0x01,   /*0x3210*/
+		0x02,   /*0x111*/
+		0x01,   /*0x3410*/
+		0x01,   /*0x3098*/
+		0x05,   /*0x309D*/
+		0x02,
+		0x04,
+	},
+	{ /* Snapshot */
+		0x07,/*REG = 0x0202 coarse_integration_time_hi*/
+		0x00,/*REG = 0x0203 coarse_integration_time_lo*/
+		0xc0,/*REG = 0x0205 analogue_gain_code_global*/
+		0x07,/*REG = 0x0340 frame_length_lines_hi*/
+		0xd0,/*REG = 0x0341 frame_length_lines_lo*/
+		0x0b,/*REG = 0x0342 line_length_pck_hi*/
+		0x8c,/*REG = 0x0343 line_length_pck_lo*/
+		0x01,/*REG = 0x3005*/
+		0x00,/*REG = 0x3010*/
+		0x00,/*REG = 0x3011*/
+		0x55,/*REG = 0x301a*/
+		0x01,/*REG = 0x3035*/
+		0x23,/*REG = 0x3036*/
+		0x00,/*REG = 0x3041*/
+		0x24,/*REG = 0x3042*/
+		0xb7,/*REG = 0x3045*/
+		0x01,/*REG = 0x0b80 edof application*/
+		0x00,/*REG = 0x0900*/
+		0x00,/*REG = 0x0901*/
+		0x00,/*REG = 0x0902*/
+		0x01,/*REG = 0x0383*/
+		0x01,/*REG = 0x0387*/
+		0x0A,/*REG = 0x034c*/
+		0x30,/*REG = 0x034d*/
+		0x07,/*REG = 0x034e*/
+		0xA8,/*REG = 0x034f*/
+		0x02,/*0x1716*/
+		0x0d,/*0x1717*/
+		0x07,/*0x1718*/
+		0x7d,/*0x1719*/
+		0x01,/*0x3210*/
+		0x02,/*0x111*/
+		0x01,/*0x3410*/
+		0x01,/*0x3098*/
+		0x05, /*0x309D*/
+		0x02,
+		0x00,
+	}
+};
+
+
+
+struct vx6953_reg vx6953_regs = {
+	.reg_pat_init = &vx6953_reg_init[0],
+	.reg_pat = &vx6953_reg_pat[0],
+};
diff --git a/drivers/media/video/msm/vx6953_v4l2.c b/drivers/media/video/msm/vx6953_v4l2.c
new file mode 100644
index 0000000..2e5e39b
--- /dev/null
+++ b/drivers/media/video/msm/vx6953_v4l2.c
@@ -0,0 +1,4149 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_camera.h>
+#include <mach/gpio.h>
+#include <mach/camera.h>
+#include <linux/slab.h>
+#include "vx6953_v4l2.h"
+#include "msm.h"
+
+#define V4L2_IDENT_VX6953  50000
+
+/*=============================================================
+	SENSOR REGISTER DEFINES
+==============================================================*/
+
+#define REG_GROUPED_PARAMETER_HOLD			0x0104
+#define GROUPED_PARAMETER_HOLD_OFF			0x00
+#define GROUPED_PARAMETER_HOLD				0x01
+#define REG_MODE_SELECT					0x0100
+#define MODE_SELECT_STANDBY_MODE			0x00
+#define MODE_SELECT_STREAM				0x01
+/* Integration Time */
+#define REG_COARSE_INTEGRATION_TIME_HI			0x0202
+#define REG_COARSE_INTEGRATION_TIME_LO			0x0203
+/* Gain */
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_HI		0x0204
+#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LO		0x0205
+/* Digital Gain */
+#define REG_DIGITAL_GAIN_GREEN_R_HI			0x020E
+#define REG_DIGITAL_GAIN_GREEN_R_LO			0x020F
+#define REG_DIGITAL_GAIN_RED_HI				0x0210
+#define REG_DIGITAL_GAIN_RED_LO				0x0211
+#define REG_DIGITAL_GAIN_BLUE_HI			0x0212
+#define REG_DIGITAL_GAIN_BLUE_LO			0x0213
+#define REG_DIGITAL_GAIN_GREEN_B_HI			0x0214
+#define REG_DIGITAL_GAIN_GREEN_B_LO			0x0215
+/* output bits setting */
+#define REG_0x0112					0x0112
+#define REG_0x0113					0x0113
+/* PLL registers */
+#define REG_VT_PIX_CLK_DIV				0x0301
+#define REG_PRE_PLL_CLK_DIV				0x0305
+#define REG_PLL_MULTIPLIER				0x0307
+#define REG_OP_PIX_CLK_DIV				0x0309
+#define REG_0x034c					0x034c
+#define REG_0x034d					0x034d
+#define REG_0x034e					0x034e
+#define REG_0x034f					0x034f
+#define REG_0x0387					0x0387
+#define REG_0x0383					0x0383
+#define REG_FRAME_LENGTH_LINES_HI			0x0340
+#define REG_FRAME_LENGTH_LINES_LO			0x0341
+#define REG_LINE_LENGTH_PCK_HI				0x0342
+#define REG_LINE_LENGTH_PCK_LO				0x0343
+#define REG_0x3030					0x3030
+#define REG_0x0111					0x0111
+#define REG_0x0136					0x0136
+#define REG_0x0137					0x0137
+#define REG_0x0b00					0x0b00
+#define REG_0x3001					0x3001
+#define REG_0x3004					0x3004
+#define REG_0x3007					0x3007
+#define REG_0x301a					0x301a
+#define REG_0x3101					0x3101
+#define REG_0x3364					0x3364
+#define REG_0x3365					0x3365
+#define REG_0x0b83					0x0b83
+#define REG_0x0b84					0x0b84
+#define REG_0x0b85					0x0b85
+#define REG_0x0b88					0x0b88
+#define REG_0x0b89					0x0b89
+#define REG_0x0b8a					0x0b8a
+#define REG_0x3005					0x3005
+#define REG_0x3010					0x3010
+#define REG_0x3036					0x3036
+#define REG_0x3041					0x3041
+#define REG_0x0b80					0x0b80
+#define REG_0x0900					0x0900
+#define REG_0x0901					0x0901
+#define REG_0x0902					0x0902
+#define REG_0x3016					0x3016
+#define REG_0x301d					0x301d
+#define REG_0x317e					0x317e
+#define REG_0x317f					0x317f
+#define REG_0x3400					0x3400
+#define REG_0x303a					0x303a
+#define REG_0x1716					0x1716
+#define REG_0x1717					0x1717
+#define REG_0x1718					0x1718
+#define REG_0x1719					0x1719
+#define REG_0x3006					0x3006
+#define REG_0x301b					0x301b
+#define REG_0x3098					0x3098
+#define REG_0x309d					0x309d
+#define REG_0x3011					0x3011
+#define REG_0x3035					0x3035
+#define REG_0x3045					0x3045
+#define REG_0x3210					0x3210
+#define	REG_0x0111					0x0111
+#define REG_0x3410					0x3410
+/* Test Pattern */
+#define REG_TEST_PATTERN_MODE				0x0601
+
+/*============================================================================
+							 TYPE DECLARATIONS
+============================================================================*/
+
+/* 16bit address - 8 bit context register structure */
+#define	VX6953_STM5M0EDOF_OFFSET	9
+#define	Q8		0x00000100
+#define	Q10		0x00000400
+#define	VX6953_STM5M0EDOF_MAX_SNAPSHOT_EXPOSURE_LINE_COUNT	2922
+#define	VX6953_STM5M0EDOF_DEFAULT_MASTER_CLK_RATE	24000000
+#define	VX6953_STM5M0EDOF_OP_PIXEL_CLOCK_RATE	79800000
+#define	VX6953_STM5M0EDOF_VT_PIXEL_CLOCK_RATE	88670000
+/* Full	Size */
+#define	VX6953_FULL_SIZE_WIDTH	2608
+#define	VX6953_FULL_SIZE_HEIGHT		1960
+#define	VX6953_FULL_SIZE_DUMMY_PIXELS	1
+#define	VX6953_FULL_SIZE_DUMMY_LINES	0
+/* Quarter Size	*/
+#define	VX6953_QTR_SIZE_WIDTH	1304
+#define	VX6953_QTR_SIZE_HEIGHT		980
+#define	VX6953_QTR_SIZE_DUMMY_PIXELS	1
+#define	VX6953_QTR_SIZE_DUMMY_LINES		0
+/* Blanking	as measured	on the scope */
+/* Full	Size */
+#define	VX6953_HRZ_FULL_BLK_PIXELS	348
+#define	VX6953_VER_FULL_BLK_LINES	40
+/* Quarter Size	*/
+#define	VX6953_HRZ_QTR_BLK_PIXELS	1628
+#define	VX6953_VER_QTR_BLK_LINES	28
+#define	MAX_LINE_LENGTH_PCK		8190
+#define	VX6953_REVISION_NUMBER_CUT2	0x10/*revision number	for	Cut2.0*/
+#define	VX6953_REVISION_NUMBER_CUT3	0x20/*revision number	for	Cut3.0*/
+/* FIXME: Changes from here */
+struct vx6953_work_t {
+	struct work_struct work;
+};
+
+static struct vx6953_work_t *vx6953_sensorw;
+static struct i2c_client *vx6953_client;
+
+struct vx6953_ctrl_t {
+	const struct  msm_camera_sensor_info *sensordata;
+
+	uint32_t sensormode;
+	uint32_t fps_divider;  /* init to 1 * 0x00000400 */
+	uint32_t pict_fps_divider;  /* init to 1 * 0x00000400 */
+	uint16_t fps;
+
+	int16_t curr_lens_pos;
+	uint16_t curr_step_pos;
+	uint16_t my_reg_gain;
+	uint32_t my_reg_line_count;
+	uint16_t total_lines_per_frame;
+
+	enum vx6953_resolution_t prev_res;
+	enum vx6953_resolution_t pict_res;
+	enum vx6953_resolution_t curr_res;
+	enum vx6953_test_mode_t  set_test;
+	enum sensor_revision_t sensor_type;
+
+	enum edof_mode_t edof_mode;
+
+	unsigned short imgaddr;
+
+	struct v4l2_subdev *sensor_dev;
+	struct vx6953_format *fmt;
+};
+
+
+static uint8_t vx6953_stm5m0edof_delay_msecs_stdby;
+static uint16_t vx6953_stm5m0edof_delay_msecs_stream = 20;
+
+static struct vx6953_ctrl_t *vx6953_ctrl;
+static DECLARE_WAIT_QUEUE_HEAD(vx6953_wait_queue);
+DEFINE_MUTEX(vx6953_mut);
+static struct vx6953_i2c_reg_conf patch_tbl_cut2[] = {
+	{0xFB94, 0},	/*intialise Data Xfer Status reg*/
+	{0xFB95, 0},	/*gain 1	  (0x00)*/
+	{0xFB96, 0},	/*gain 1.07   (0x10)*/
+	{0xFB97, 0},	/*gain 1.14   (0x20)*/
+	{0xFB98, 0},	/*gain 1.23   (0x30)*/
+	{0xFB99, 0},	/*gain 1.33   (0x40)*/
+	{0xFB9A, 0},	/*gain 1.45   (0x50)*/
+	{0xFB9B, 0},	/*gain 1.6    (0x60)*/
+	{0xFB9C, 0},	/*gain 1.78   (0x70)*/
+	{0xFB9D, 2},	/*gain 2	  (0x80)*/
+	{0xFB9E, 2},	/*gain 2.29   (0x90)*/
+	{0xFB9F, 3},	/*gain 2.67   (0xA0)*/
+	{0xFBA0, 3},	/*gain 3.2    (0xB0)*/
+	{0xFBA1, 4},	/*gain 4	  (0xC0)*/
+	{0xFBA2, 7},	/*gain 5.33   (0xD0)*/
+	{0xFBA3, 10},	/*gain 8	  (0xE0)*/
+	{0xFBA4, 11},	/*gain 9.14   (0xE4)*/
+	{0xFBA5, 13},	/*gain 10.67  (0xE8)*/
+	{0xFBA6, 15},	/*gain 12.8   (0xEC)*/
+	{0xFBA7, 19},	/*gain 16     (0xF0)*/
+	{0xF800, 0x12},
+	{0xF801, 0x06},
+	{0xF802, 0xf7},
+	{0xF803, 0x90},
+	{0xF804, 0x02},
+	{0xF805, 0x05},
+	{0xF806, 0xe0},
+	{0xF807, 0xff},
+	{0xF808, 0x65},
+	{0xF809, 0x7d},
+	{0xF80A, 0x70},
+	{0xF80B, 0x03},
+	{0xF80C, 0x02},
+	{0xF80D, 0xf9},
+	{0xF80E, 0x1c},
+	{0xF80F, 0x8f},
+	{0xF810, 0x7d},
+	{0xF811, 0xe4},
+	{0xF812, 0xf5},
+	{0xF813, 0x7a},
+	{0xF814, 0x75},
+	{0xF815, 0x78},
+	{0xF816, 0x30},
+	{0xF817, 0x75},
+	{0xF818, 0x79},
+	{0xF819, 0x53},
+	{0xF81A, 0x85},
+	{0xF81B, 0x79},
+	{0xF81C, 0x82},
+	{0xF81D, 0x85},
+	{0xF81E, 0x78},
+	{0xF81F, 0x83},
+	{0xF820, 0xe0},
+	{0xF821, 0xc3},
+	{0xF822, 0x95},
+	{0xF823, 0x7b},
+	{0xF824, 0xf0},
+	{0xF825, 0x74},
+	{0xF826, 0x02},
+	{0xF827, 0x25},
+	{0xF828, 0x79},
+	{0xF829, 0xf5},
+	{0xF82A, 0x79},
+	{0xF82B, 0xe4},
+	{0xF82C, 0x35},
+	{0xF82D, 0x78},
+	{0xF82E, 0xf5},
+	{0xF82F, 0x78},
+	{0xF830, 0x05},
+	{0xF831, 0x7a},
+	{0xF832, 0xe5},
+	{0xF833, 0x7a},
+	{0xF834, 0xb4},
+	{0xF835, 0x08},
+	{0xF836, 0xe3},
+	{0xF837, 0xe5},
+	{0xF838, 0x7d},
+	{0xF839, 0x70},
+	{0xF83A, 0x04},
+	{0xF83B, 0xff},
+	{0xF83C, 0x02},
+	{0xF83D, 0xf8},
+	{0xF83E, 0xe4},
+	{0xF83F, 0xe5},
+	{0xF840, 0x7d},
+	{0xF841, 0xb4},
+	{0xF842, 0x10},
+	{0xF843, 0x05},
+	{0xF844, 0x7f},
+	{0xF845, 0x01},
+	{0xF846, 0x02},
+	{0xF847, 0xf8},
+	{0xF848, 0xe4},
+	{0xF849, 0xe5},
+	{0xF84A, 0x7d},
+	{0xF84B, 0xb4},
+	{0xF84C, 0x20},
+	{0xF84D, 0x05},
+	{0xF84E, 0x7f},
+	{0xF84F, 0x02},
+	{0xF850, 0x02},
+	{0xF851, 0xf8},
+	{0xF852, 0xe4},
+	{0xF853, 0xe5},
+	{0xF854, 0x7d},
+	{0xF855, 0xb4},
+	{0xF856, 0x30},
+	{0xF857, 0x05},
+	{0xF858, 0x7f},
+	{0xF859, 0x03},
+	{0xF85A, 0x02},
+	{0xF85B, 0xf8},
+	{0xF85C, 0xe4},
+	{0xF85D, 0xe5},
+	{0xF85E, 0x7d},
+	{0xF85F, 0xb4},
+	{0xF860, 0x40},
+	{0xF861, 0x04},
+	{0xF862, 0x7f},
+	{0xF863, 0x04},
+	{0xF864, 0x80},
+	{0xF865, 0x7e},
+	{0xF866, 0xe5},
+	{0xF867, 0x7d},
+	{0xF868, 0xb4},
+	{0xF869, 0x50},
+	{0xF86A, 0x04},
+	{0xF86B, 0x7f},
+	{0xF86C, 0x05},
+	{0xF86D, 0x80},
+	{0xF86E, 0x75},
+	{0xF86F, 0xe5},
+	{0xF870, 0x7d},
+	{0xF871, 0xb4},
+	{0xF872, 0x60},
+	{0xF873, 0x04},
+	{0xF874, 0x7f},
+	{0xF875, 0x06},
+	{0xF876, 0x80},
+	{0xF877, 0x6c},
+	{0xF878, 0xe5},
+	{0xF879, 0x7d},
+	{0xF87A, 0xb4},
+	{0xF87B, 0x70},
+	{0xF87C, 0x04},
+	{0xF87D, 0x7f},
+	{0xF87E, 0x07},
+	{0xF87F, 0x80},
+	{0xF880, 0x63},
+	{0xF881, 0xe5},
+	{0xF882, 0x7d},
+	{0xF883, 0xb4},
+	{0xF884, 0x80},
+	{0xF885, 0x04},
+	{0xF886, 0x7f},
+	{0xF887, 0x08},
+	{0xF888, 0x80},
+	{0xF889, 0x5a},
+	{0xF88A, 0xe5},
+	{0xF88B, 0x7d},
+	{0xF88C, 0xb4},
+	{0xF88D, 0x90},
+	{0xF88E, 0x04},
+	{0xF88F, 0x7f},
+	{0xF890, 0x09},
+	{0xF891, 0x80},
+	{0xF892, 0x51},
+	{0xF893, 0xe5},
+	{0xF894, 0x7d},
+	{0xF895, 0xb4},
+	{0xF896, 0xa0},
+	{0xF897, 0x04},
+	{0xF898, 0x7f},
+	{0xF899, 0x0a},
+	{0xF89A, 0x80},
+	{0xF89B, 0x48},
+	{0xF89C, 0xe5},
+	{0xF89D, 0x7d},
+	{0xF89E, 0xb4},
+	{0xF89F, 0xb0},
+	{0xF8A0, 0x04},
+	{0xF8A1, 0x7f},
+	{0xF8A2, 0x0b},
+	{0xF8A3, 0x80},
+	{0xF8A4, 0x3f},
+	{0xF8A5, 0xe5},
+	{0xF8A6, 0x7d},
+	{0xF8A7, 0xb4},
+	{0xF8A8, 0xc0},
+	{0xF8A9, 0x04},
+	{0xF8AA, 0x7f},
+	{0xF8AB, 0x0c},
+	{0xF8AC, 0x80},
+	{0xF8AD, 0x36},
+	{0xF8AE, 0xe5},
+	{0xF8AF, 0x7d},
+	{0xF8B0, 0xb4},
+	{0xF8B1, 0xd0},
+	{0xF8B2, 0x04},
+	{0xF8B3, 0x7f},
+	{0xF8B4, 0x0d},
+	{0xF8B5, 0x80},
+	{0xF8B6, 0x2d},
+	{0xF8B7, 0xe5},
+	{0xF8B8, 0x7d},
+	{0xF8B9, 0xb4},
+	{0xF8BA, 0xe0},
+	{0xF8BB, 0x04},
+	{0xF8BC, 0x7f},
+	{0xF8BD, 0x0e},
+	{0xF8BE, 0x80},
+	{0xF8BF, 0x24},
+	{0xF8C0, 0xe5},
+	{0xF8C1, 0x7d},
+	{0xF8C2, 0xb4},
+	{0xF8C3, 0xe4},
+	{0xF8C4, 0x04},
+	{0xF8C5, 0x7f},
+	{0xF8C6, 0x0f},
+	{0xF8C7, 0x80},
+	{0xF8C8, 0x1b},
+	{0xF8C9, 0xe5},
+	{0xF8CA, 0x7d},
+	{0xF8CB, 0xb4},
+	{0xF8CC, 0xe8},
+	{0xF8CD, 0x04},
+	{0xF8CE, 0x7f},
+	{0xF8CF, 0x10},
+	{0xF8D0, 0x80},
+	{0xF8D1, 0x12},
+	{0xF8D2, 0xe5},
+	{0xF8D3, 0x7d},
+	{0xF8D4, 0xb4},
+	{0xF8D5, 0xec},
+	{0xF8D6, 0x04},
+	{0xF8D7, 0x7f},
+	{0xF8D8, 0x11},
+	{0xF8D9, 0x80},
+	{0xF8DA, 0x09},
+	{0xF8DB, 0xe5},
+	{0xF8DC, 0x7d},
+	{0xF8DD, 0x7f},
+	{0xF8DE, 0x00},
+	{0xF8DF, 0xb4},
+	{0xF8E0, 0xf0},
+	{0xF8E1, 0x02},
+	{0xF8E2, 0x7f},
+	{0xF8E3, 0x12},
+	{0xF8E4, 0x8f},
+	{0xF8E5, 0x7c},
+	{0xF8E6, 0xef},
+	{0xF8E7, 0x24},
+	{0xF8E8, 0x95},
+	{0xF8E9, 0xff},
+	{0xF8EA, 0xe4},
+	{0xF8EB, 0x34},
+	{0xF8EC, 0xfb},
+	{0xF8ED, 0x8f},
+	{0xF8EE, 0x82},
+	{0xF8EF, 0xf5},
+	{0xF8F0, 0x83},
+	{0xF8F1, 0xe4},
+	{0xF8F2, 0x93},
+	{0xF8F3, 0xf5},
+	{0xF8F4, 0x7c},
+	{0xF8F5, 0xf5},
+	{0xF8F6, 0x7b},
+	{0xF8F7, 0xe4},
+	{0xF8F8, 0xf5},
+	{0xF8F9, 0x7a},
+	{0xF8FA, 0x75},
+	{0xF8FB, 0x78},
+	{0xF8FC, 0x30},
+	{0xF8FD, 0x75},
+	{0xF8FE, 0x79},
+	{0xF8FF, 0x53},
+	{0xF900, 0x85},
+	{0xF901, 0x79},
+	{0xF902, 0x82},
+	{0xF903, 0x85},
+	{0xF904, 0x78},
+	{0xF905, 0x83},
+	{0xF906, 0xe0},
+	{0xF907, 0x25},
+	{0xF908, 0x7c},
+	{0xF909, 0xf0},
+	{0xF90A, 0x74},
+	{0xF90B, 0x02},
+	{0xF90C, 0x25},
+	{0xF90D, 0x79},
+	{0xF90E, 0xf5},
+	{0xF90F, 0x79},
+	{0xF910, 0xe4},
+	{0xF911, 0x35},
+	{0xF912, 0x78},
+	{0xF913, 0xf5},
+	{0xF914, 0x78},
+	{0xF915, 0x05},
+	{0xF916, 0x7a},
+	{0xF917, 0xe5},
+	{0xF918, 0x7a},
+	{0xF919, 0xb4},
+	{0xF91A, 0x08},
+	{0xF91B, 0xe4},
+	{0xF91C, 0x02},
+	{0xF91D, 0x18},
+	{0xF91E, 0x32},
+	{0xF91F, 0x22},
+	{0xF920, 0xf0},
+	{0xF921, 0x90},
+	{0xF922, 0xa0},
+	{0xF923, 0xf8},
+	{0xF924, 0xe0},
+	{0xF925, 0x70},
+	{0xF926, 0x02},
+	{0xF927, 0xa3},
+	{0xF928, 0xe0},
+	{0xF929, 0x70},
+	{0xF92A, 0x0a},
+	{0xF92B, 0x90},
+	{0xF92C, 0xa1},
+	{0xF92D, 0x10},
+	{0xF92E, 0xe0},
+	{0xF92F, 0xfe},
+	{0xF930, 0xa3},
+	{0xF931, 0xe0},
+	{0xF932, 0xff},
+	{0xF933, 0x80},
+	{0xF934, 0x04},
+	{0xF935, 0x7e},
+	{0xF936, 0x00},
+	{0xF937, 0x7f},
+	{0xF938, 0x00},
+	{0xF939, 0x8e},
+	{0xF93A, 0x7e},
+	{0xF93B, 0x8f},
+	{0xF93C, 0x7f},
+	{0xF93D, 0x90},
+	{0xF93E, 0x36},
+	{0xF93F, 0x0d},
+	{0xF940, 0xe0},
+	{0xF941, 0x44},
+	{0xF942, 0x02},
+	{0xF943, 0xf0},
+	{0xF944, 0x90},
+	{0xF945, 0x36},
+	{0xF946, 0x0e},
+	{0xF947, 0xe5},
+	{0xF948, 0x7e},
+	{0xF949, 0xf0},
+	{0xF94A, 0xa3},
+	{0xF94B, 0xe5},
+	{0xF94C, 0x7f},
+	{0xF94D, 0xf0},
+	{0xF94E, 0xe5},
+	{0xF94F, 0x3a},
+	{0xF950, 0x60},
+	{0xF951, 0x0c},
+	{0xF952, 0x90},
+	{0xF953, 0x36},
+	{0xF954, 0x09},
+	{0xF955, 0xe0},
+	{0xF956, 0x70},
+	{0xF957, 0x06},
+	{0xF958, 0x90},
+	{0xF959, 0x36},
+	{0xF95A, 0x08},
+	{0xF95B, 0xf0},
+	{0xF95C, 0xf5},
+	{0xF95D, 0x3a},
+	{0xF95E, 0x02},
+	{0xF95F, 0x03},
+	{0xF960, 0x94},
+	{0xF961, 0x22},
+	{0xF962, 0x78},
+	{0xF963, 0x07},
+	{0xF964, 0xe6},
+	{0xF965, 0xd3},
+	{0xF966, 0x94},
+	{0xF967, 0x00},
+	{0xF968, 0x40},
+	{0xF969, 0x16},
+	{0xF96A, 0x16},
+	{0xF96B, 0xe6},
+	{0xF96C, 0x90},
+	{0xF96D, 0x30},
+	{0xF96E, 0xa1},
+	{0xF96F, 0xf0},
+	{0xF970, 0x90},
+	{0xF971, 0x43},
+	{0xF972, 0x83},
+	{0xF973, 0xe0},
+	{0xF974, 0xb4},
+	{0xF975, 0x01},
+	{0xF976, 0x0f},
+	{0xF977, 0x90},
+	{0xF978, 0x43},
+	{0xF979, 0x87},
+	{0xF97A, 0xe0},
+	{0xF97B, 0xb4},
+	{0xF97C, 0x01},
+	{0xF97D, 0x08},
+	{0xF97E, 0x80},
+	{0xF97F, 0x00},
+	{0xF980, 0x90},
+	{0xF981, 0x30},
+	{0xF982, 0xa0},
+	{0xF983, 0x74},
+	{0xF984, 0x01},
+	{0xF985, 0xf0},
+	{0xF986, 0x22},
+	{0xF987, 0xf0},
+	{0xF988, 0x90},
+	{0xF989, 0x35},
+	{0xF98A, 0xba},
+	{0xF98B, 0xe0},
+	{0xF98C, 0xb4},
+	{0xF98D, 0x0a},
+	{0xF98E, 0x0d},
+	{0xF98F, 0xa3},
+	{0xF990, 0xe0},
+	{0xF991, 0xb4},
+	{0xF992, 0x01},
+	{0xF993, 0x08},
+	{0xF994, 0x90},
+	{0xF995, 0xfb},
+	{0xF996, 0x94},
+	{0xF997, 0xe0},
+	{0xF998, 0x90},
+	{0xF999, 0x35},
+	{0xF99A, 0xb8},
+	{0xF99B, 0xf0},
+	{0xF99C, 0xd0},
+	{0xF99D, 0xd0},
+	{0xF99E, 0xd0},
+	{0xF99F, 0x82},
+	{0xF9A0, 0xd0},
+	{0xF9A1, 0x83},
+	{0xF9A2, 0xd0},
+	{0xF9A3, 0xe0},
+	{0xF9A4, 0x32},
+	{0xF9A5, 0x22},
+	{0xF9A6, 0xe5},
+	{0xF9A7, 0x7f},
+	{0xF9A8, 0x45},
+	{0xF9A9, 0x7e},
+	{0xF9AA, 0x60},
+	{0xF9AB, 0x15},
+	{0xF9AC, 0x90},
+	{0xF9AD, 0x01},
+	{0xF9AE, 0x00},
+	{0xF9AF, 0xe0},
+	{0xF9B0, 0x70},
+	{0xF9B1, 0x0f},
+	{0xF9B2, 0x90},
+	{0xF9B3, 0xa0},
+	{0xF9B4, 0xf8},
+	{0xF9B5, 0xe5},
+	{0xF9B6, 0x7e},
+	{0xF9B7, 0xf0},
+	{0xF9B8, 0xa3},
+	{0xF9B9, 0xe5},
+	{0xF9BA, 0x7f},
+	{0xF9BB, 0xf0},
+	{0xF9BC, 0xe4},
+	{0xF9BD, 0xf5},
+	{0xF9BE, 0x7e},
+	{0xF9BF, 0xf5},
+	{0xF9C0, 0x7f},
+	{0xF9C1, 0x22},
+	{0xF9C2, 0x02},
+	{0xF9C3, 0x0e},
+	{0xF9C4, 0x79},
+	{0xF9C5, 0x22},
+	/* Offsets:*/
+	{0x35C6, 0x00},/* FIDDLEDARKCAL*/
+	{0x35C7, 0x00},
+	{0x35C8, 0x01},/*STOREDISTANCEATSTOPSTREAMING*/
+	{0x35C9, 0x20},
+	{0x35CA, 0x01},/*BRUCEFIX*/
+	{0x35CB, 0x62},
+	{0x35CC, 0x01},/*FIXDATAXFERSTATUSREG*/
+	{0x35CD, 0x87},
+	{0x35CE, 0x01},/*FOCUSDISTANCEUPDATE*/
+	{0x35CF, 0xA6},
+	{0x35D0, 0x01},/*SKIPEDOFRESET*/
+	{0x35D1, 0xC2},
+	{0x35D2, 0x00},
+	{0x35D3, 0xFB},
+	{0x35D4, 0x00},
+	{0x35D5, 0x94},
+	{0x35D6, 0x00},
+	{0x35D7, 0xFB},
+	{0x35D8, 0x00},
+	{0x35D9, 0x94},
+	{0x35DA, 0x00},
+	{0x35DB, 0xFB},
+	{0x35DC, 0x00},
+	{0x35DD, 0x94},
+	{0x35DE, 0x00},
+	{0x35DF, 0xFB},
+	{0x35E0, 0x00},
+	{0x35E1, 0x94},
+	{0x35E6, 0x18},/* FIDDLEDARKCAL*/
+	{0x35E7, 0x2F},
+	{0x35E8, 0x03},/* STOREDISTANCEATSTOPSTREAMING*/
+	{0x35E9, 0x93},
+	{0x35EA, 0x18},/* BRUCEFIX*/
+	{0x35EB, 0x99},
+	{0x35EC, 0x00},/* FIXDATAXFERSTATUSREG*/
+	{0x35ED, 0xA3},
+	{0x35EE, 0x21},/* FOCUSDISTANCEUPDATE*/
+	{0x35EF, 0x5B},
+	{0x35F0, 0x0E},/* SKIPEDOFRESET*/
+	{0x35F1, 0x74},
+	{0x35F2, 0x04},
+	{0x35F3, 0x64},
+	{0x35F4, 0x04},
+	{0x35F5, 0x65},
+	{0x35F6, 0x04},
+	{0x35F7, 0x7B},
+	{0x35F8, 0x04},
+	{0x35F9, 0x7C},
+	{0x35FA, 0x04},
+	{0x35FB, 0xDD},
+	{0x35FC, 0x04},
+	{0x35FD, 0xDE},
+	{0x35FE, 0x04},
+	{0x35FF, 0xEF},
+	{0x3600, 0x04},
+	{0x3601, 0xF0},
+	/*Jump/Data:*/
+	{0x35C2, 0x3F},/* Jump Reg*/
+	{0x35C3, 0xFF},/* Jump Reg*/
+	{0x35C4, 0x3F},/* Data Reg*/
+	{0x35C5, 0xC0},/* Data Reg*/
+	{0x35C0, 0x01},/* Enable*/
+
+};
+
+static struct vx6953_i2c_reg_conf edof_tbl[] = {
+	{0xa098, 0x02},
+	{0xa099, 0x87},
+	{0xa09c, 0x00},
+	{0xa09d, 0xc5},
+	{0xa4ec, 0x05},
+	{0xa4ed, 0x05},
+	{0xa4f0, 0x04},
+	{0xa4f1, 0x04},
+	{0xa4f4, 0x04},
+	{0xa4f5, 0x05},
+	{0xa4f8, 0x05},
+	{0xa4f9, 0x07},
+	{0xa4fc, 0x07},
+	{0xa4fd, 0x07},
+	{0xa500, 0x07},
+	{0xa501, 0x07},
+	{0xa504, 0x08},
+	{0xa505, 0x08},
+	{0xa518, 0x01},
+	{0xa519, 0x02},
+	{0xa51c, 0x01},
+	{0xa51d, 0x00},
+	{0xa534, 0x00},
+	{0xa535, 0x04},
+	{0xa538, 0x04},
+	{0xa539, 0x03},
+	{0xa53c, 0x05},
+	{0xa53d, 0x07},
+	{0xa540, 0x07},
+	{0xa541, 0x06},
+	{0xa544, 0x07},
+	{0xa545, 0x06},
+	{0xa548, 0x05},
+	{0xa549, 0x06},
+	{0xa54c, 0x06},
+	{0xa54d, 0x07},
+	{0xa550, 0x07},
+	{0xa551, 0x04},
+	{0xa554, 0x04},
+	{0xa555, 0x04},
+	{0xa558, 0x05},
+	{0xa559, 0x06},
+	{0xa55c, 0x07},
+	{0xa55d, 0x07},
+	{0xa56c, 0x00},
+	{0xa56d, 0x0a},
+	{0xa570, 0x08},
+	{0xa571, 0x05},
+	{0xa574, 0x04},
+	{0xa575, 0x03},
+	{0xa578, 0x04},
+	{0xa579, 0x04},
+	{0xa58c, 0x1f},
+	{0xa58d, 0x1b},
+	{0xa590, 0x17},
+	{0xa591, 0x13},
+	{0xa594, 0x10},
+	{0xa595, 0x0d},
+	{0xa598, 0x0f},
+	{0xa599, 0x11},
+	{0xa59c, 0x03},
+	{0xa59d, 0x03},
+	{0xa5a0, 0x03},
+	{0xa5a1, 0x03},
+	{0xa5a4, 0x03},
+	{0xa5a5, 0x04},
+	{0xa5a8, 0x05},
+	{0xa5a9, 0x00},
+	{0xa5ac, 0x00},
+	{0xa5ad, 0x00},
+	{0xa5b0, 0x00},
+	{0xa5b1, 0x00},
+	{0xa5b4, 0x00},
+	{0xa5b5, 0x00},
+	{0xa5c4, 0x1f},
+	{0xa5c5, 0x13},
+	{0xa5c8, 0x14},
+	{0xa5c9, 0x14},
+	{0xa5cc, 0x14},
+	{0xa5cd, 0x13},
+	{0xa5d0, 0x17},
+	{0xa5d1, 0x1a},
+	{0xa5f4, 0x05},
+	{0xa5f5, 0x05},
+	{0xa5f8, 0x05},
+	{0xa5f9, 0x06},
+	{0xa5fc, 0x06},
+	{0xa5fd, 0x06},
+	{0xa600, 0x06},
+	{0xa601, 0x06},
+	{0xa608, 0x07},
+	{0xa609, 0x08},
+	{0xa60c, 0x08},
+	{0xa60d, 0x07},
+	{0xa63c, 0x00},
+	{0xa63d, 0x02},
+	{0xa640, 0x02},
+	{0xa641, 0x02},
+	{0xa644, 0x02},
+	{0xa645, 0x02},
+	{0xa648, 0x03},
+	{0xa649, 0x04},
+	{0xa64c, 0x0a},
+	{0xa64d, 0x09},
+	{0xa650, 0x08},
+	{0xa651, 0x09},
+	{0xa654, 0x09},
+	{0xa655, 0x0a},
+	{0xa658, 0x0a},
+	{0xa659, 0x0a},
+	{0xa65c, 0x0a},
+	{0xa65d, 0x09},
+	{0xa660, 0x09},
+	{0xa661, 0x09},
+	{0xa664, 0x09},
+	{0xa665, 0x08},
+	{0xa680, 0x01},
+	{0xa681, 0x02},
+	{0xa694, 0x1f},
+	{0xa695, 0x10},
+	{0xa698, 0x0e},
+	{0xa699, 0x0c},
+	{0xa69c, 0x0d},
+	{0xa69d, 0x0d},
+	{0xa6a0, 0x0f},
+	{0xa6a1, 0x11},
+	{0xa6a4, 0x00},
+	{0xa6a5, 0x00},
+	{0xa6a8, 0x00},
+	{0xa6a9, 0x00},
+	{0xa6ac, 0x00},
+	{0xa6ad, 0x00},
+	{0xa6b0, 0x00},
+	{0xa6b1, 0x04},
+	{0xa6b4, 0x04},
+	{0xa6b5, 0x04},
+	{0xa6b8, 0x04},
+	{0xa6b9, 0x04},
+	{0xa6bc, 0x05},
+	{0xa6bd, 0x05},
+	{0xa6c0, 0x1f},
+	{0xa6c1, 0x1f},
+	{0xa6c4, 0x1f},
+	{0xa6c5, 0x1f},
+	{0xa6c8, 0x1f},
+	{0xa6c9, 0x1f},
+	{0xa6cc, 0x1f},
+	{0xa6cd, 0x0b},
+	{0xa6d0, 0x0c},
+	{0xa6d1, 0x0d},
+	{0xa6d4, 0x0d},
+	{0xa6d5, 0x0d},
+	{0xa6d8, 0x11},
+	{0xa6d9, 0x14},
+	{0xa6fc, 0x02},
+	{0xa6fd, 0x03},
+	{0xa700, 0x03},
+	{0xa701, 0x03},
+	{0xa704, 0x03},
+	{0xa705, 0x04},
+	{0xa708, 0x05},
+	{0xa709, 0x02},
+	{0xa70c, 0x02},
+	{0xa70d, 0x02},
+	{0xa710, 0x03},
+	{0xa711, 0x04},
+	{0xa714, 0x04},
+	{0xa715, 0x04},
+	{0xa744, 0x00},
+	{0xa745, 0x03},
+	{0xa748, 0x04},
+	{0xa749, 0x04},
+	{0xa74c, 0x05},
+	{0xa74d, 0x06},
+	{0xa750, 0x07},
+	{0xa751, 0x07},
+	{0xa754, 0x05},
+	{0xa755, 0x05},
+	{0xa758, 0x05},
+	{0xa759, 0x05},
+	{0xa75c, 0x05},
+	{0xa75d, 0x06},
+	{0xa760, 0x07},
+	{0xa761, 0x07},
+	{0xa764, 0x06},
+	{0xa765, 0x05},
+	{0xa768, 0x05},
+	{0xa769, 0x05},
+	{0xa76c, 0x06},
+	{0xa76d, 0x07},
+	{0xa77c, 0x00},
+	{0xa77d, 0x05},
+	{0xa780, 0x05},
+	{0xa781, 0x05},
+	{0xa784, 0x05},
+	{0xa785, 0x04},
+	{0xa788, 0x05},
+	{0xa789, 0x06},
+	{0xa79c, 0x1f},
+	{0xa79d, 0x15},
+	{0xa7a0, 0x13},
+	{0xa7a1, 0x10},
+	{0xa7a4, 0x0f},
+	{0xa7a5, 0x0d},
+	{0xa7a8, 0x11},
+	{0xa7a9, 0x14},
+	{0xa7ac, 0x02},
+	{0xa7ad, 0x02},
+	{0xa7b0, 0x02},
+	{0xa7b1, 0x02},
+	{0xa7b4, 0x02},
+	{0xa7b5, 0x03},
+	{0xa7b8, 0x03},
+	{0xa7b9, 0x00},
+	{0xa7bc, 0x00},
+	{0xa7bd, 0x00},
+	{0xa7c0, 0x00},
+	{0xa7c1, 0x00},
+	{0xa7c4, 0x00},
+	{0xa7c5, 0x00},
+	{0xa7d4, 0x1f},
+	{0xa7d5, 0x0d},
+	{0xa7d8, 0x0f},
+	{0xa7d9, 0x10},
+	{0xa7dc, 0x10},
+	{0xa7dd, 0x10},
+	{0xa7e0, 0x13},
+	{0xa7e1, 0x16},
+	{0xa7f4, 0x00},
+	{0xa7f5, 0x03},
+	{0xa7f8, 0x04},
+	{0xa7f9, 0x04},
+	{0xa7fc, 0x04},
+	{0xa7fd, 0x03},
+	{0xa800, 0x03},
+	{0xa801, 0x03},
+	{0xa804, 0x03},
+	{0xa805, 0x03},
+	{0xa808, 0x03},
+	{0xa809, 0x03},
+	{0xa80c, 0x03},
+	{0xa80d, 0x04},
+	{0xa810, 0x04},
+	{0xa811, 0x0a},
+	{0xa814, 0x0a},
+	{0xa815, 0x0a},
+	{0xa818, 0x0f},
+	{0xa819, 0x14},
+	{0xa81c, 0x14},
+	{0xa81d, 0x14},
+	{0xa82c, 0x00},
+	{0xa82d, 0x04},
+	{0xa830, 0x02},
+	{0xa831, 0x00},
+	{0xa834, 0x00},
+	{0xa835, 0x00},
+	{0xa838, 0x00},
+	{0xa839, 0x00},
+	{0xa840, 0x1f},
+	{0xa841, 0x1f},
+	{0xa848, 0x1f},
+	{0xa849, 0x1f},
+	{0xa84c, 0x1f},
+	{0xa84d, 0x0c},
+	{0xa850, 0x0c},
+	{0xa851, 0x0c},
+	{0xa854, 0x0c},
+	{0xa855, 0x0c},
+	{0xa858, 0x0c},
+	{0xa859, 0x0c},
+	{0xa85c, 0x0c},
+	{0xa85d, 0x0c},
+	{0xa860, 0x0c},
+	{0xa861, 0x0c},
+	{0xa864, 0x0c},
+	{0xa865, 0x0c},
+	{0xa868, 0x0c},
+	{0xa869, 0x0c},
+	{0xa86c, 0x0c},
+	{0xa86d, 0x0c},
+	{0xa870, 0x0c},
+	{0xa871, 0x0c},
+	{0xa874, 0x0c},
+	{0xa875, 0x0c},
+	{0xa878, 0x1f},
+	{0xa879, 0x1f},
+	{0xa87c, 0x1f},
+	{0xa87d, 0x1f},
+	{0xa880, 0x1f},
+	{0xa881, 0x1f},
+	{0xa884, 0x1f},
+	{0xa885, 0x0c},
+	{0xa888, 0x0c},
+	{0xa889, 0x0c},
+	{0xa88c, 0x0c},
+	{0xa88d, 0x0c},
+	{0xa890, 0x0c},
+	{0xa891, 0x0c},
+	{0xa898, 0x1f},
+	{0xa899, 0x1f},
+	{0xa8a0, 0x1f},
+	{0xa8a1, 0x1f},
+	{0xa8a4, 0x1f},
+	{0xa8a5, 0x0c},
+	{0xa8a8, 0x0c},
+	{0xa8a9, 0x0c},
+	{0xa8ac, 0x0c},
+	{0xa8ad, 0x0c},
+	{0xa8b0, 0x0c},
+	{0xa8b1, 0x0c},
+	{0xa8b4, 0x0c},
+	{0xa8b5, 0x0c},
+	{0xa8b8, 0x0c},
+	{0xa8b9, 0x0c},
+	{0xa8bc, 0x0c},
+	{0xa8bd, 0x0c},
+	{0xa8c0, 0x0c},
+	{0xa8c1, 0x0c},
+	{0xa8c4, 0x0c},
+	{0xa8c5, 0x0c},
+	{0xa8c8, 0x0c},
+	{0xa8c9, 0x0c},
+	{0xa8cc, 0x0c},
+	{0xa8cd, 0x0c},
+	{0xa8d0, 0x1f},
+	{0xa8d1, 0x1f},
+	{0xa8d4, 0x1f},
+	{0xa8d5, 0x1f},
+	{0xa8d8, 0x1f},
+	{0xa8d9, 0x1f},
+	{0xa8dc, 0x1f},
+	{0xa8dd, 0x0c},
+	{0xa8e0, 0x0c},
+	{0xa8e1, 0x0c},
+	{0xa8e4, 0x0c},
+	{0xa8e5, 0x0c},
+	{0xa8e8, 0x0c},
+	{0xa8e9, 0x0c},
+	{0xa8f0, 0x1f},
+	{0xa8f1, 0x1f},
+	{0xa8f8, 0x1f},
+	{0xa8f9, 0x1f},
+	{0xa8fc, 0x1f},
+	{0xa8fd, 0x0c},
+	{0xa900, 0x0c},
+	{0xa901, 0x0c},
+	{0xa904, 0x0c},
+	{0xa905, 0x0c},
+	{0xa908, 0x0c},
+	{0xa909, 0x0c},
+	{0xa90c, 0x0c},
+	{0xa90d, 0x0c},
+	{0xa910, 0x0c},
+	{0xa911, 0x0c},
+	{0xa914, 0x0c},
+	{0xa915, 0x0c},
+	{0xa918, 0x0c},
+	{0xa919, 0x0c},
+	{0xa91c, 0x0c},
+	{0xa91d, 0x0c},
+	{0xa920, 0x0c},
+	{0xa921, 0x0c},
+	{0xa924, 0x0c},
+	{0xa925, 0x0c},
+	{0xa928, 0x1f},
+	{0xa929, 0x1f},
+	{0xa92c, 0x1f},
+	{0xa92d, 0x1f},
+	{0xa930, 0x1f},
+	{0xa931, 0x1f},
+	{0xa934, 0x1f},
+	{0xa935, 0x0c},
+	{0xa938, 0x0c},
+	{0xa939, 0x0c},
+	{0xa93c, 0x0c},
+	{0xa93d, 0x0c},
+	{0xa940, 0x0c},
+	{0xa941, 0x0c},
+	{0xa96c, 0x0d},
+	{0xa96d, 0x16},
+	{0xa970, 0x19},
+	{0xa971, 0x0e},
+	{0xa974, 0x16},
+	{0xa975, 0x1a},
+	{0xa978, 0x0d},
+	{0xa979, 0x15},
+	{0xa97c, 0x19},
+	{0xa97d, 0x0d},
+	{0xa980, 0x15},
+	{0xa981, 0x1a},
+	{0xa984, 0x0d},
+	{0xa985, 0x15},
+	{0xa988, 0x1a},
+	{0xa989, 0x0d},
+	{0xa98c, 0x15},
+	{0xa98d, 0x1a},
+	{0xa990, 0x0b},
+	{0xa991, 0x11},
+	{0xa994, 0x02},
+	{0xa995, 0x0e},
+	{0xa998, 0x16},
+	{0xa999, 0x02},
+	{0xa99c, 0x0c},
+	{0xa99d, 0x13},
+	{0xa9a0, 0x02},
+	{0xa9a1, 0x0c},
+	{0xa9a4, 0x12},
+	{0xa9a5, 0x02},
+	{0xa9a8, 0x0c},
+	{0xa9a9, 0x12},
+	{0xa9ac, 0x02},
+	{0xa9ad, 0x0c},
+	{0xa9b0, 0x12},
+	{0xa9b1, 0x02},
+	{0xa9b4, 0x10},
+	{0xa9b5, 0x1e},
+	{0xa9b8, 0x0f},
+	{0xa9b9, 0x13},
+	{0xa9bc, 0x20},
+	{0xa9bd, 0x10},
+	{0xa9c0, 0x11},
+	{0xa9c1, 0x1e},
+	{0xa9c4, 0x10},
+	{0xa9c5, 0x11},
+	{0xa9c8, 0x1e},
+	{0xa9c9, 0x10},
+	{0xa9cc, 0x11},
+	{0xa9cd, 0x20},
+	{0xa9d0, 0x10},
+	{0xa9d1, 0x13},
+	{0xa9d4, 0x24},
+	{0xa9d5, 0x10},
+	{0xa9f0, 0x02},
+	{0xa9f1, 0x01},
+	{0xa9f8, 0x19},
+	{0xa9f9, 0x0b},
+	{0xa9fc, 0x0a},
+	{0xa9fd, 0x07},
+	{0xaa00, 0x0c},
+	{0xaa01, 0x0e},
+	{0xaa08, 0x0c},
+	{0xaa09, 0x06},
+	{0xaa0c, 0x0c},
+	{0xaa0d, 0x0a},
+	{0xaa24, 0x10},
+	{0xaa25, 0x12},
+	{0xaa28, 0x0b},
+	{0xaa29, 0x07},
+	{0xaa2c, 0x10},
+	{0xaa2d, 0x14},
+	{0xaa34, 0x0e},
+	{0xaa35, 0x0e},
+	{0xaa38, 0x07},
+	{0xaa39, 0x07},
+	{0xaa3c, 0x0e},
+	{0xaa3d, 0x0c},
+	{0xaa48, 0x09},
+	{0xaa49, 0x0c},
+	{0xaa4c, 0x0c},
+	{0xaa4d, 0x07},
+	{0xaa54, 0x08},
+	{0xaa55, 0x06},
+	{0xaa58, 0x04},
+	{0xaa59, 0x05},
+	{0xaa5c, 0x06},
+	{0xaa5d, 0x06},
+	{0xaa68, 0x05},
+	{0xaa69, 0x05},
+	{0xaa6c, 0x04},
+	{0xaa6d, 0x05},
+	{0xaa74, 0x06},
+	{0xaa75, 0x04},
+	{0xaa78, 0x05},
+	{0xaa79, 0x05},
+	{0xaa7c, 0x04},
+	{0xaa7d, 0x06},
+	{0xac18, 0x14},
+	{0xac19, 0x00},
+	{0xac1c, 0x14},
+	{0xac1d, 0x00},
+	{0xac20, 0x14},
+	{0xac21, 0x00},
+	{0xac24, 0x14},
+	{0xac25, 0x00},
+	{0xac28, 0x14},
+	{0xac29, 0x00},
+	{0xac2c, 0x14},
+	{0xac2d, 0x00},
+	{0xac34, 0x16},
+	{0xac35, 0x00},
+	{0xac38, 0x16},
+	{0xac39, 0x00},
+	{0xac3c, 0x16},
+	{0xac3d, 0x00},
+	{0xac40, 0x16},
+	{0xac41, 0x00},
+	{0xac44, 0x16},
+	{0xac45, 0x00},
+	{0xac48, 0x16},
+	{0xac49, 0x00},
+	{0xac50, 0x1b},
+	{0xac51, 0x00},
+	{0xac54, 0x1b},
+	{0xac55, 0x00},
+	{0xac58, 0x1b},
+	{0xac59, 0x00},
+	{0xac5c, 0x1b},
+	{0xac5d, 0x00},
+	{0xac60, 0x1b},
+	{0xac61, 0x00},
+	{0xac64, 0x1b},
+	{0xac65, 0x00},
+	{0xac74, 0x09},
+	{0xac75, 0x0c},
+	{0xac78, 0x0f},
+	{0xac79, 0x11},
+	{0xac7c, 0x12},
+	{0xac7d, 0x14},
+	{0xac80, 0x09},
+	{0xac81, 0x0c},
+	{0xac84, 0x0f},
+	{0xac85, 0x11},
+	{0xac88, 0x12},
+	{0xac89, 0x14},
+	{0xac8c, 0x09},
+	{0xac8d, 0x0c},
+	{0xac90, 0x0f},
+	{0xac91, 0x11},
+	{0xac94, 0x12},
+	{0xac95, 0x14},
+	{0xac98, 0x09},
+	{0xac99, 0x0c},
+	{0xac9c, 0x0f},
+	{0xac9d, 0x11},
+	{0xaca0, 0x12},
+	{0xaca1, 0x14},
+	{0xaca4, 0x09},
+	{0xaca5, 0x0c},
+	{0xaca8, 0x0f},
+	{0xaca9, 0x11},
+	{0xacac, 0x12},
+	{0xacad, 0x14},
+	{0xacb0, 0x07},
+	{0xacb1, 0x09},
+	{0xacb4, 0x0c},
+	{0xacb5, 0x0d},
+	{0xacb8, 0x0d},
+	{0xacb9, 0x0e},
+	{0xacbc, 0x05},
+	{0xacbd, 0x07},
+	{0xacc0, 0x0a},
+	{0xacc1, 0x0b},
+	{0xacc4, 0x0b},
+	{0xacc5, 0x0c},
+	{0xacc8, 0x03},
+	{0xacc9, 0x04},
+	{0xaccc, 0x07},
+	{0xaccd, 0x08},
+	{0xacd0, 0x09},
+	{0xacd1, 0x09}
+};
+
+static struct vx6953_i2c_reg_conf patch_tbl_cut3[] = {
+	{0xF800, 0x90},
+	{0xF801, 0x30},
+	{0xF802, 0x31},
+	{0xF803, 0xe0},
+	{0xF804, 0xf5},
+	{0xF805, 0x7d},
+	{0xF806, 0xb4},
+	{0xF807, 0x01},
+	{0xF808, 0x06},
+	{0xF809, 0x75},
+	{0xF80A, 0x7d},
+	{0xF80B, 0x03},
+	{0xF80C, 0x74},
+	{0xF80D, 0x03},
+	{0xF80E, 0xf0},
+	{0xF80F, 0x90},
+	{0xF810, 0x30},
+	{0xF811, 0x04},
+	{0xF812, 0x74},
+	{0xF813, 0x33},
+	{0xF814, 0xf0},
+	{0xF815, 0x90},
+	{0xF816, 0x30},
+	{0xF817, 0x06},
+	{0xF818, 0xe4},
+	{0xF819, 0xf0},
+	{0xF81A, 0xa3},
+	{0xF81B, 0x74},
+	{0xF81C, 0x09},
+	{0xF81D, 0xf0},
+	{0xF81E, 0x90},
+	{0xF81F, 0x30},
+	{0xF820, 0x10},
+	{0xF821, 0xe4},
+	{0xF822, 0xf0},
+	{0xF823, 0xa3},
+	{0xF824, 0xf0},
+	{0xF825, 0x90},
+	{0xF826, 0x30},
+	{0xF827, 0x16},
+	{0xF828, 0x74},
+	{0xF829, 0x1e},
+	{0xF82A, 0xf0},
+	{0xF82B, 0x90},
+	{0xF82C, 0x30},
+	{0xF82D, 0x1a},
+	{0xF82E, 0x74},
+	{0xF82F, 0x6a},
+	{0xF830, 0xf0},
+	{0xF831, 0xa3},
+	{0xF832, 0x74},
+	{0xF833, 0x29},
+	{0xF834, 0xf0},
+	{0xF835, 0x90},
+	{0xF836, 0x30},
+	{0xF837, 0x30},
+	{0xF838, 0x74},
+	{0xF839, 0x08},
+	{0xF83A, 0xf0},
+	{0xF83B, 0x90},
+	{0xF83C, 0x30},
+	{0xF83D, 0x36},
+	{0xF83E, 0x74},
+	{0xF83F, 0x2c},
+	{0xF840, 0xf0},
+	{0xF841, 0x90},
+	{0xF842, 0x30},
+	{0xF843, 0x41},
+	{0xF844, 0xe4},
+	{0xF845, 0xf0},
+	{0xF846, 0xa3},
+	{0xF847, 0x74},
+	{0xF848, 0x24},
+	{0xF849, 0xf0},
+	{0xF84A, 0x90},
+	{0xF84B, 0x30},
+	{0xF84C, 0x45},
+	{0xF84D, 0x74},
+	{0xF84E, 0x81},
+	{0xF84F, 0xf0},
+	{0xF850, 0x90},
+	{0xF851, 0x30},
+	{0xF852, 0x98},
+	{0xF853, 0x74},
+	{0xF854, 0x01},
+	{0xF855, 0xf0},
+	{0xF856, 0x90},
+	{0xF857, 0x30},
+	{0xF858, 0x9d},
+	{0xF859, 0x74},
+	{0xF85A, 0x05},
+	{0xF85B, 0xf0},
+	{0xF85C, 0xe5},
+	{0xF85D, 0x7d},
+	{0xF85E, 0x70},
+	{0xF85F, 0x10},
+	{0xF860, 0x90},
+	{0xF861, 0x30},
+	{0xF862, 0x05},
+	{0xF863, 0x04},
+	{0xF864, 0xf0},
+	{0xF865, 0x90},
+	{0xF866, 0x30},
+	{0xF867, 0x30},
+	{0xF868, 0xe4},
+	{0xF869, 0xf0},
+	{0xF86A, 0x90},
+	{0xF86B, 0x30},
+	{0xF86C, 0x35},
+	{0xF86D, 0x04},
+	{0xF86E, 0xf0},
+	{0xF86F, 0x22},
+	{0xF870, 0xe5},
+	{0xF871, 0x7d},
+	{0xF872, 0x64},
+	{0xF873, 0x02},
+	{0xF874, 0x70},
+	{0xF875, 0x2d},
+	{0xF876, 0x90},
+	{0xF877, 0x30},
+	{0xF878, 0x04},
+	{0xF879, 0x74},
+	{0xF87A, 0x34},
+	{0xF87B, 0xf0},
+	{0xF87C, 0xa3},
+	{0xF87D, 0x74},
+	{0xF87E, 0x07},
+	{0xF87F, 0xf0},
+	{0xF880, 0x90},
+	{0xF881, 0x30},
+	{0xF882, 0x10},
+	{0xF883, 0x74},
+	{0xF884, 0x10},
+	{0xF885, 0xf0},
+	{0xF886, 0x90},
+	{0xF887, 0x30},
+	{0xF888, 0x16},
+	{0xF889, 0x74},
+	{0xF88A, 0x1f},
+	{0xF88B, 0xf0},
+	{0xF88C, 0x90},
+	{0xF88D, 0x30},
+	{0xF88E, 0x1a},
+	{0xF88F, 0x74},
+	{0xF890, 0x62},
+	{0xF891, 0xf0},
+	{0xF892, 0x90},
+	{0xF893, 0x30},
+	{0xF894, 0x35},
+	{0xF895, 0x74},
+	{0xF896, 0x04},
+	{0xF897, 0xf0},
+	{0xF898, 0x90},
+	{0xF899, 0x30},
+	{0xF89A, 0x41},
+	{0xF89B, 0x74},
+	{0xF89C, 0x60},
+	{0xF89D, 0xf0},
+	{0xF89E, 0xa3},
+	{0xF89F, 0x74},
+	{0xF8A0, 0x64},
+	{0xF8A1, 0xf0},
+	{0xF8A2, 0x22},
+	{0xF8A3, 0xe5},
+	{0xF8A4, 0x7d},
+	{0xF8A5, 0xb4},
+	{0xF8A6, 0x03},
+	{0xF8A7, 0x12},
+	{0xF8A8, 0x90},
+	{0xF8A9, 0x30},
+	{0xF8AA, 0x05},
+	{0xF8AB, 0x74},
+	{0xF8AC, 0x03},
+	{0xF8AD, 0xf0},
+	{0xF8AE, 0x90},
+	{0xF8AF, 0x30},
+	{0xF8B0, 0x11},
+	{0xF8B1, 0x74},
+	{0xF8B2, 0x01},
+	{0xF8B3, 0xf0},
+	{0xF8B4, 0x90},
+	{0xF8B5, 0x30},
+	{0xF8B6, 0x35},
+	{0xF8B7, 0x74},
+	{0xF8B8, 0x03},
+	{0xF8B9, 0xf0},
+	{0xF8BA, 0x22},
+	{0xF8BB, 0xc3},
+	{0xF8BC, 0x90},
+	{0xF8BD, 0x0b},
+	{0xF8BE, 0x89},
+	{0xF8BF, 0xe0},
+	{0xF8C0, 0x94},
+	{0xF8C1, 0x1e},
+	{0xF8C2, 0x90},
+	{0xF8C3, 0x0b},
+	{0xF8C4, 0x88},
+	{0xF8C5, 0xe0},
+	{0xF8C6, 0x94},
+	{0xF8C7, 0x00},
+	{0xF8C8, 0x50},
+	{0xF8C9, 0x06},
+	{0xF8CA, 0x7e},
+	{0xF8CB, 0x00},
+	{0xF8CC, 0x7f},
+	{0xF8CD, 0x01},
+	{0xF8CE, 0x80},
+	{0xF8CF, 0x3d},
+	{0xF8D0, 0xc3},
+	{0xF8D1, 0x90},
+	{0xF8D2, 0x0b},
+	{0xF8D3, 0x89},
+	{0xF8D4, 0xe0},
+	{0xF8D5, 0x94},
+	{0xF8D6, 0x3c},
+	{0xF8D7, 0x90},
+	{0xF8D8, 0x0b},
+	{0xF8D9, 0x88},
+	{0xF8DA, 0xe0},
+	{0xF8DB, 0x94},
+	{0xF8DC, 0x00},
+	{0xF8DD, 0x50},
+	{0xF8DE, 0x06},
+	{0xF8DF, 0x7e},
+	{0xF8E0, 0x00},
+	{0xF8E1, 0x7f},
+	{0xF8E2, 0x02},
+	{0xF8E3, 0x80},
+	{0xF8E4, 0x28},
+	{0xF8E5, 0xc3},
+	{0xF8E6, 0x90},
+	{0xF8E7, 0x0b},
+	{0xF8E8, 0x89},
+	{0xF8E9, 0xe0},
+	{0xF8EA, 0x94},
+	{0xF8EB, 0xfa},
+	{0xF8EC, 0x90},
+	{0xF8ED, 0x0b},
+	{0xF8EE, 0x88},
+	{0xF8EF, 0xe0},
+	{0xF8F0, 0x94},
+	{0xF8F1, 0x00},
+	{0xF8F2, 0x50},
+	{0xF8F3, 0x06},
+	{0xF8F4, 0x7e},
+	{0xF8F5, 0x00},
+	{0xF8F6, 0x7f},
+	{0xF8F7, 0x03},
+	{0xF8F8, 0x80},
+	{0xF8F9, 0x13},
+	{0xF8FA, 0xc3},
+	{0xF8FB, 0x90},
+	{0xF8FC, 0x0b},
+	{0xF8FD, 0x88},
+	{0xF8FE, 0xe0},
+	{0xF8FF, 0x94},
+	{0xF900, 0x80},
+	{0xF901, 0x50},
+	{0xF902, 0x06},
+	{0xF903, 0x7e},
+	{0xF904, 0x00},
+	{0xF905, 0x7f},
+	{0xF906, 0x04},
+	{0xF907, 0x80},
+	{0xF908, 0x04},
+	{0xF909, 0xae},
+	{0xF90A, 0x7e},
+	{0xF90B, 0xaf},
+	{0xF90C, 0x7f},
+	{0xF90D, 0x90},
+	{0xF90E, 0xa0},
+	{0xF90F, 0xf8},
+	{0xF910, 0xee},
+	{0xF911, 0xf0},
+	{0xF912, 0xa3},
+	{0xF913, 0xef},
+	{0xF914, 0xf0},
+	{0xF915, 0x22},
+	{0xF916, 0x90},
+	{0xF917, 0x33},
+	{0xF918, 0x82},
+	{0xF919, 0xe0},
+	{0xF91A, 0xff},
+	{0xF91B, 0x64},
+	{0xF91C, 0x01},
+	{0xF91D, 0x70},
+	{0xF91E, 0x30},
+	{0xF91F, 0xe5},
+	{0xF920, 0x7f},
+	{0xF921, 0x64},
+	{0xF922, 0x02},
+	{0xF923, 0x45},
+	{0xF924, 0x7e},
+	{0xF925, 0x70},
+	{0xF926, 0x04},
+	{0xF927, 0x7d},
+	{0xF928, 0x1e},
+	{0xF929, 0x80},
+	{0xF92A, 0x1d},
+	{0xF92B, 0xe5},
+	{0xF92C, 0x7f},
+	{0xF92D, 0x64},
+	{0xF92E, 0x03},
+	{0xF92F, 0x45},
+	{0xF930, 0x7e},
+	{0xF931, 0x70},
+	{0xF932, 0x04},
+	{0xF933, 0x7d},
+	{0xF934, 0x3c},
+	{0xF935, 0x80},
+	{0xF936, 0x11},
+	{0xF937, 0xe5},
+	{0xF938, 0x7f},
+	{0xF939, 0x64},
+	{0xF93A, 0x04},
+	{0xF93B, 0x45},
+	{0xF93C, 0x7e},
+	{0xF93D, 0x70},
+	{0xF93E, 0x04},
+	{0xF93F, 0x7d},
+	{0xF940, 0xfa},
+	{0xF941, 0x80},
+	{0xF942, 0x05},
+	{0xF943, 0x90},
+	{0xF944, 0x33},
+	{0xF945, 0x81},
+	{0xF946, 0xe0},
+	{0xF947, 0xfd},
+	{0xF948, 0xae},
+	{0xF949, 0x05},
+	{0xF94A, 0x90},
+	{0xF94B, 0x33},
+	{0xF94C, 0x81},
+	{0xF94D, 0xed},
+	{0xF94E, 0xf0},
+	{0xF94F, 0xef},
+	{0xF950, 0xb4},
+	{0xF951, 0x01},
+	{0xF952, 0x10},
+	{0xF953, 0x90},
+	{0xF954, 0x01},
+	{0xF955, 0x00},
+	{0xF956, 0xe0},
+	{0xF957, 0x60},
+	{0xF958, 0x0a},
+	{0xF959, 0x90},
+	{0xF95A, 0xa1},
+	{0xF95B, 0x10},
+	{0xF95C, 0xe0},
+	{0xF95D, 0xf5},
+	{0xF95E, 0x7e},
+	{0xF95F, 0xa3},
+	{0xF960, 0xe0},
+	{0xF961, 0xf5},
+	{0xF962, 0x7f},
+	{0xF963, 0x22},
+	{0xF964, 0x12},
+	{0xF965, 0x2f},
+	{0xF966, 0x4d},
+	{0xF967, 0x90},
+	{0xF968, 0x35},
+	{0xF969, 0x38},
+	{0xF96A, 0xe0},
+	{0xF96B, 0x70},
+	{0xF96C, 0x05},
+	{0xF96D, 0x12},
+	{0xF96E, 0x00},
+	{0xF96F, 0x0e},
+	{0xF970, 0x80},
+	{0xF971, 0x03},
+	{0xF972, 0x12},
+	{0xF973, 0x07},
+	{0xF974, 0xc9},
+	{0xF975, 0x90},
+	{0xF976, 0x40},
+	{0xF977, 0x06},
+	{0xF978, 0xe0},
+	{0xF979, 0xf4},
+	{0xF97A, 0x54},
+	{0xF97B, 0x02},
+	{0xF97C, 0xff},
+	{0xF97D, 0xe0},
+	{0xF97E, 0x54},
+	{0xF97F, 0x01},
+	{0xF980, 0x4f},
+	{0xF981, 0x90},
+	{0xF982, 0x31},
+	{0xF983, 0x32},
+	{0xF984, 0xf0},
+	{0xF985, 0x90},
+	{0xF986, 0xfa},
+	{0xF987, 0x9d},
+	{0xF988, 0xe0},
+	{0xF989, 0x70},
+	{0xF98A, 0x03},
+	{0xF98B, 0x12},
+	{0xF98C, 0x27},
+	{0xF98D, 0x27},
+	{0xF98E, 0x02},
+	{0xF98F, 0x05},
+	{0xF990, 0xac},
+	{0xF991, 0x22},
+	{0xF992, 0x78},
+	{0xF993, 0x07},
+	{0xF994, 0xe6},
+	{0xF995, 0xf5},
+	{0xF996, 0x7c},
+	{0xF997, 0xe5},
+	{0xF998, 0x7c},
+	{0xF999, 0x60},
+	{0xF99A, 0x1d},
+	{0xF99B, 0x90},
+	{0xF99C, 0x43},
+	{0xF99D, 0x83},
+	{0xF99E, 0xe0},
+	{0xF99F, 0xb4},
+	{0xF9A0, 0x01},
+	{0xF9A1, 0x16},
+	{0xF9A2, 0x90},
+	{0xF9A3, 0x43},
+	{0xF9A4, 0x87},
+	{0xF9A5, 0xe0},
+	{0xF9A6, 0xb4},
+	{0xF9A7, 0x01},
+	{0xF9A8, 0x0f},
+	{0xF9A9, 0x15},
+	{0xF9AA, 0x7c},
+	{0xF9AB, 0x90},
+	{0xF9AC, 0x30},
+	{0xF9AD, 0xa1},
+	{0xF9AE, 0xe5},
+	{0xF9AF, 0x7c},
+	{0xF9B0, 0xf0},
+	{0xF9B1, 0x90},
+	{0xF9B2, 0x30},
+	{0xF9B3, 0xa0},
+	{0xF9B4, 0x74},
+	{0xF9B5, 0x01},
+	{0xF9B6, 0xf0},
+	{0xF9B7, 0x22},
+	{0xF9B8, 0xe4},
+	{0xF9B9, 0x90},
+	{0xF9BA, 0x30},
+	{0xF9BB, 0xa0},
+	{0xF9BC, 0xf0},
+	{0xF9BD, 0x22},
+	{0xF9BE, 0xf0},
+	{0xF9BF, 0xe5},
+	{0xF9C0, 0x3a},
+	{0xF9C1, 0xb4},
+	{0xF9C2, 0x06},
+	{0xF9C3, 0x06},
+	{0xF9C4, 0x63},
+	{0xF9C5, 0x3e},
+	{0xF9C6, 0x02},
+	{0xF9C7, 0x12},
+	{0xF9C8, 0x03},
+	{0xF9C9, 0xea},
+	{0xF9CA, 0x02},
+	{0xF9CB, 0x17},
+	{0xF9CC, 0x4a},
+	{0xF9CD, 0x22},
+	{0x35C9, 0xBB},
+	{0x35CA, 0x01},
+	{0x35CB, 0x16},
+	{0x35CC, 0x01},
+	{0x35CD, 0x64},
+	{0x35CE, 0x01},
+	{0x35CF, 0x92},
+	{0x35D0, 0x01},
+	{0x35D1, 0xBE},
+	{0x35D3, 0xF6},
+	{0x35D5, 0x07},
+	{0x35D7, 0xA3},
+	{0x35DB, 0x02},
+	{0x35DD, 0x06},
+	{0x35DF, 0x1B},
+	{0x35E6, 0x28},
+	{0x35E7, 0x76},
+	{0x35E8, 0x2D},
+	{0x35E9, 0x07},
+	{0x35EA, 0x04},
+	{0x35EB, 0x43},
+	{0x35EC, 0x05},
+	{0x35ED, 0xA9},
+	{0x35EE, 0x2A},
+	{0x35EF, 0x15},
+	{0x35F0, 0x17},
+	{0x35F1, 0x41},
+	{0x35F2, 0x24},
+	{0x35F3, 0x88},
+	{0x35F4, 0x01},
+	{0x35F5, 0x54},
+	{0x35F6, 0x01},
+	{0x35F7, 0x55},
+	{0x35F8, 0x2E},
+	{0x35F9, 0xF2},
+	{0x35FA, 0x06},
+	{0x35FB, 0x02},
+	{0x35FC, 0x06},
+	{0x35FD, 0x03},
+	{0x35FE, 0x06},
+	{0x35FF, 0x04},
+	{0x35C2, 0x1F},
+	{0x35C3, 0xFF},
+	{0x35C4, 0x1F},
+	{0x35C5, 0xC0},
+	{0x35C0, 0x01},
+};
+
+struct vx6953_format {
+	enum v4l2_mbus_pixelcode code;
+	enum v4l2_colorspace colorspace;
+	u16 fmt;
+	u16 order;
+};
+
+static const struct vx6953_format vx6953_cfmts[] = {
+	{
+	.code   = V4L2_MBUS_FMT_YUYV8_2X8,
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	.fmt    = 1,
+	.order    = 0,
+	}
+	/* more can be supported, to be added later */
+};
+
+
+/*=============================================================*/
+
+static int vx6953_i2c_rxdata(unsigned short saddr,
+	unsigned char *rxdata, int length)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = 2,
+			.buf   = rxdata,
+		},
+	};
+	if (i2c_transfer(vx6953_client->adapter, msgs, 2) < 0) {
+		CDBG("vx6953_i2c_rxdata failed!\n");
+		return -EIO;
+	}
+	return 0;
+}
+static int32_t vx6953_i2c_txdata(unsigned short saddr,
+				unsigned char *txdata, int length)
+{
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	if (i2c_transfer(vx6953_client->adapter, msg, 1) < 0) {
+		CDBG("vx6953_i2c_txdata faild 0x%x\n", vx6953_client->addr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+static int32_t vx6953_i2c_read(unsigned short raddr,
+	unsigned short *rdata, int rlen)
+{
+	int32_t rc = 0;
+	unsigned char buf[2];
+	if (!rdata)
+		return -EIO;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (raddr & 0xFF00) >> 8;
+	buf[1] = (raddr & 0x00FF);
+	rc = vx6953_i2c_rxdata(vx6953_client->addr>>1, buf, rlen);
+	if (rc < 0) {
+		CDBG("vx6953_i2c_read 0x%x failed!\n", raddr);
+		return rc;
+	}
+	*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
+	return rc;
+}
+static int32_t vx6953_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[3];
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	buf[2] = bdata;
+	CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
+	rc = vx6953_i2c_txdata(vx6953_client->addr>>1, buf, 3);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			waddr, bdata);
+	}
+	return rc;
+}
+static int32_t vx6953_i2c_write_seq_sensor(unsigned short waddr,
+	uint8_t *bdata, uint16_t len)
+{
+	int32_t rc = -EFAULT;
+	unsigned char buf[len+2];
+	int i;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = (waddr & 0xFF00) >> 8;
+	buf[1] = (waddr & 0x00FF);
+	for (i = 2; i < len+2; i++)
+		buf[i] = *bdata++;
+	rc = vx6953_i2c_txdata(vx6953_client->addr>>1, buf, len+2);
+	if (rc < 0) {
+		CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
+			 waddr, bdata[0]);
+	}
+	return rc;
+}
+
+static int32_t vx6953_i2c_write_w_table(struct vx6953_i2c_reg_conf const
+					 *reg_conf_tbl, int num)
+{
+	int i;
+	int32_t rc = -EIO;
+	for (i = 0; i < num; i++) {
+		rc = vx6953_i2c_write_b_sensor(reg_conf_tbl->waddr,
+			reg_conf_tbl->wdata);
+		if (rc < 0)
+			break;
+		reg_conf_tbl++;
+	}
+	return rc;
+}
+
+static void vx6953_get_pict_fps(uint16_t fps, uint16_t *pfps)
+{
+	/* input fps is preview fps in Q8 format */
+	uint16_t preview_frame_length_lines, snapshot_frame_length_lines;
+	uint16_t preview_line_length_pck, snapshot_line_length_pck;
+	uint32_t divider, d1, d2;
+	/* Total frame_length_lines and line_length_pck for preview */
+	preview_frame_length_lines = VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES;
+	preview_line_length_pck = VX6953_QTR_SIZE_WIDTH +
+		VX6953_HRZ_QTR_BLK_PIXELS;
+	/* Total frame_length_lines and line_length_pck for snapshot */
+	snapshot_frame_length_lines = VX6953_FULL_SIZE_HEIGHT +
+		VX6953_VER_FULL_BLK_LINES;
+	snapshot_line_length_pck = VX6953_FULL_SIZE_WIDTH +
+		VX6953_HRZ_FULL_BLK_PIXELS;
+	d1 = preview_frame_length_lines * 0x00000400/
+		snapshot_frame_length_lines;
+	d2 = preview_line_length_pck * 0x00000400/
+		snapshot_line_length_pck;
+	divider = d1 * d2 / 0x400;
+	/*Verify PCLK settings and frame sizes.*/
+	*pfps = (uint16_t) (fps * divider / 0x400);
+	/* 2 is the ratio of no.of snapshot channels
+	to number of preview channels */
+
+}
+
+static uint16_t vx6953_get_prev_lines_pf(void)
+{
+	if (vx6953_ctrl->prev_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_HEIGHT + VX6953_VER_QTR_BLK_LINES;
+	else
+		return VX6953_FULL_SIZE_HEIGHT + VX6953_VER_FULL_BLK_LINES;
+
+}
+
+static uint16_t vx6953_get_prev_pixels_pl(void)
+{
+	if (vx6953_ctrl->prev_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_WIDTH + VX6953_HRZ_QTR_BLK_PIXELS;
+	else
+		return VX6953_FULL_SIZE_WIDTH + VX6953_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint16_t vx6953_get_pict_lines_pf(void)
+{
+		if (vx6953_ctrl->pict_res == QTR_SIZE)
+			return VX6953_QTR_SIZE_HEIGHT +
+				VX6953_VER_QTR_BLK_LINES;
+		else
+			return VX6953_FULL_SIZE_HEIGHT +
+				VX6953_VER_FULL_BLK_LINES;
+}
+
+static uint16_t vx6953_get_pict_pixels_pl(void)
+{
+	if (vx6953_ctrl->pict_res == QTR_SIZE)
+		return VX6953_QTR_SIZE_WIDTH +
+			VX6953_HRZ_QTR_BLK_PIXELS;
+	else
+		return VX6953_FULL_SIZE_WIDTH +
+			VX6953_HRZ_FULL_BLK_PIXELS;
+}
+
+static uint32_t vx6953_get_pict_max_exp_lc(void)
+{
+	if (vx6953_ctrl->pict_res == QTR_SIZE)
+		return (VX6953_QTR_SIZE_HEIGHT +
+			VX6953_VER_QTR_BLK_LINES)*24;
+	else
+		return (VX6953_FULL_SIZE_HEIGHT +
+			VX6953_VER_FULL_BLK_LINES)*24;
+}
+
+static int32_t vx6953_set_fps(struct fps_cfg	*fps)
+{
+	uint16_t total_lines_per_frame;
+	int32_t rc = 0;
+	total_lines_per_frame = (uint16_t)((VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES) * vx6953_ctrl->fps_divider/0x400);
+	if (vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_HI,
+		((total_lines_per_frame & 0xFF00) >> 8)) < 0)
+		return rc;
+	if (vx6953_i2c_write_b_sensor(REG_FRAME_LENGTH_LINES_LO,
+		(total_lines_per_frame & 0x00FF)) < 0)
+		return rc;
+	return rc;
+}
+
+static int32_t vx6953_write_exp_gain(uint16_t gain, uint32_t line)
+{
+	uint16_t line_length_pck, frame_length_lines;
+	uint8_t gain_hi, gain_lo;
+	uint8_t intg_time_hi, intg_time_lo;
+	uint8_t line_length_pck_hi = 0, line_length_pck_lo = 0;
+	uint16_t line_length_ratio = 1 * Q8;
+	int32_t rc = 0;
+	if (vx6953_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) {
+		frame_length_lines = VX6953_QTR_SIZE_HEIGHT +
+		VX6953_VER_QTR_BLK_LINES;
+		line_length_pck = VX6953_QTR_SIZE_WIDTH +
+			VX6953_HRZ_QTR_BLK_PIXELS;
+		if (line > (frame_length_lines -
+			VX6953_STM5M0EDOF_OFFSET)) {
+			vx6953_ctrl->fps = (uint16_t) (30 * Q8 *
+			(frame_length_lines - VX6953_STM5M0EDOF_OFFSET)/
+			line);
+		} else {
+			vx6953_ctrl->fps = (uint16_t) (30 * Q8);
+		}
+	} else {
+		frame_length_lines = VX6953_FULL_SIZE_HEIGHT +
+				VX6953_VER_FULL_BLK_LINES;
+		line_length_pck = VX6953_FULL_SIZE_WIDTH +
+				VX6953_HRZ_FULL_BLK_PIXELS;
+	}
+	/* calculate line_length_ratio */
+	if ((frame_length_lines - VX6953_STM5M0EDOF_OFFSET) < line) {
+		line_length_ratio = (line*Q8) /
+			(frame_length_lines - VX6953_STM5M0EDOF_OFFSET);
+		line = frame_length_lines - VX6953_STM5M0EDOF_OFFSET;
+	} else {
+		line_length_ratio = 1*Q8;
+	}
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD);
+	line_length_pck = (line_length_pck >
+		MAX_LINE_LENGTH_PCK) ?
+		MAX_LINE_LENGTH_PCK : line_length_pck;
+	line_length_pck = (uint16_t) (line_length_pck *
+		line_length_ratio/Q8);
+	line_length_pck_hi = (uint8_t) ((line_length_pck &
+		0xFF00) >> 8);
+	line_length_pck_lo = (uint8_t) (line_length_pck &
+		0x00FF);
+	vx6953_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_HI,
+		line_length_pck_hi);
+	vx6953_i2c_write_b_sensor(REG_LINE_LENGTH_PCK_LO,
+		line_length_pck_lo);
+	/* update analogue gain registers */
+	gain_hi = (uint8_t) ((gain & 0xFF00) >> 8);
+	gain_lo = (uint8_t) (gain & 0x00FF);
+	vx6953_i2c_write_b_sensor(REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+		gain_lo);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_GREEN_R_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_RED_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_BLUE_LO, gain_hi);
+	vx6953_i2c_write_b_sensor(REG_DIGITAL_GAIN_GREEN_B_LO, gain_hi);
+	CDBG("%s, gain_hi 0x%x, gain_lo 0x%x\n", __func__,
+		gain_hi, gain_lo);
+	/* update line count registers */
+	intg_time_hi = (uint8_t) (((uint16_t)line & 0xFF00) >> 8);
+	intg_time_lo = (uint8_t) ((uint16_t)line & 0x00FF);
+	vx6953_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_HI,
+		intg_time_hi);
+	vx6953_i2c_write_b_sensor(REG_COARSE_INTEGRATION_TIME_LO,
+		intg_time_lo);
+	vx6953_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
+		GROUPED_PARAMETER_HOLD_OFF);
+
+	return rc;
+}
+
+static int32_t vx6953_set_pict_exp_gain(uint16_t gain, uint32_t line)
+{
+	int32_t rc = 0;
+	rc = vx6953_write_exp_gain(gain, line);
+	return rc;
+} /* endof vx6953_set_pict_exp_gain*/
+
+static int32_t vx6953_move_focus(int direction,
+	int32_t num_steps)
+{
+	return 0;
+}
+
+
+static int32_t vx6953_set_default_focus(uint8_t af_step)
+{
+	return 0;
+}
+
+static int32_t vx6953_test(enum vx6953_test_mode_t mo)
+{
+	int32_t rc = 0;
+	if (mo == TEST_OFF)
+		return rc;
+	else {
+		/* REG_0x30D8[4] is TESBYPEN: 0: Normal Operation,
+		1: Bypass Signal Processing
+		REG_0x30D8[5] is EBDMASK: 0:
+		Output Embedded data, 1: No output embedded data */
+		if (vx6953_i2c_write_b_sensor(REG_TEST_PATTERN_MODE,
+			(uint8_t) mo) < 0) {
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static int vx6953_enable_edof(enum edof_mode_t edof_mode)
+{
+	int rc = 0;
+	if (edof_mode == VX6953_EDOF_ESTIMATION) {
+		/* EDof Estimation mode for preview */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x02) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_ESTIMATION");
+	} else if (edof_mode == VX6953_EDOF_APPLICATION) {
+		/* EDof Application mode for Capture */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x01) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_APPLICATION");
+	} else {
+		/* EDOF disabled */
+		if (vx6953_i2c_write_b_sensor(REG_0x0b80, 0x00) < 0)
+			return rc;
+		CDBG("VX6953_EDOF_DISABLE");
+	}
+	return rc;
+}
+
+static int32_t vx6953_patch_for_cut2(void)
+{
+	int32_t rc = 0;
+	rc = vx6953_i2c_write_w_table(patch_tbl_cut2,
+		ARRAY_SIZE(patch_tbl_cut2));
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+static int32_t vx6953_patch_for_cut3(void)
+{
+	int32_t rc = 0;
+	rc = vx6953_i2c_write_w_table(patch_tbl_cut3,
+		ARRAY_SIZE(patch_tbl_cut3));
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+static int32_t vx6953_sensor_setting(int update_type, int rt)
+{
+
+	int32_t rc = 0;
+	unsigned short frame_cnt;
+	struct msm_camera_csi_params vx6953_csi_params;
+	if (vx6953_ctrl->sensor_type != VX6953_STM5M0EDOF_CUT_2) {
+		switch (update_type) {
+		case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{0x6003, 0x01},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_0x3030,
+				vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+				vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+				vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{0x3006, 0x00},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{0x301b, 0x29},
+			/* DEFCOR settings */
+			/*Single Defect Correction Weight DISABLE*/
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{REG_0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{REG_0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{REG_0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{REG_0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{REG_0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x3005,
+				vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+				vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+				vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+				vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+				vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+				vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+				vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+				vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture mode
+			(standard settings - Not tuned) */
+			{REG_0x0b80,
+				vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+				vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+				vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+				vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+				vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+				vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+			};
+			/* reset fps_divider */
+			vx6953_ctrl->fps = 30 * Q8;
+			/* stop streaming */
+
+			/* Reset everything first */
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			CDBG("Init vx6953_sensor_setting standby\n");
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+
+
+			vx6953_patch_for_cut3();
+			rc = vx6953_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_i2c_write_b_sensor(0x0b80, 0x00);
+			vx6953_i2c_write_b_sensor(0x3388, 0x03);
+			vx6953_i2c_write_b_sensor(0x3640, 0x00);
+
+			rc = vx6953_i2c_write_w_table(&edof_tbl[0],
+				ARRAY_SIZE(edof_tbl));
+			vx6953_i2c_write_b_sensor(0x3388, 0x00);
+
+		}
+		return rc;
+		case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf preview_mode_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{0x6003, 0x01},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+
+			{REG_0x3210, vx6953_regs.reg_pat[rt].reg_0x3210},
+			{REG_0x0111, vx6953_regs.reg_pat[rt].reg_0x111},
+			{REG_0x3410, vx6953_regs.reg_pat[rt].reg_0x3410},
+
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3006, 0x00},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{REG_0x301b, 0x29},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3045, vx6953_regs.reg_pat[rt].reg_0x3045},
+			{REG_0x3098, vx6953_regs.reg_pat[rt].reg_0x3098},
+			{REG_0x309d, vx6953_regs.reg_pat[rt].reg_0x309D},
+
+			{REG_0x0900, vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901, vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902, vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383, vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387, vx6953_regs.reg_pat[rt].reg_0x0387},
+
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+
+			{REG_0x3005, vx6953_regs.reg_pat[rt].reg_0x3005},
+			{REG_0x3010, vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011, vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a, vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3030, 0x08},
+			{REG_0x3035, vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3041, vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042, vx6953_regs.reg_pat[rt].reg_0x3042},
+
+			{0x200, vx6953_regs.reg_pat[rt].reg_0x0200},
+			{0x201, vx6953_regs.reg_pat[rt].reg_0x0201},
+
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+
+			{REG_0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{REG_0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture
+			mode(standard settings - Not tuned) */
+			{REG_0x0b80, vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{REG_0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{REG_0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{REG_0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			{0x3393, 0x06}, /* man_spec_edof_ctrl_edof*/
+			{0x3394, 0x07}, /* man_spec_edof_ctrl_edof*/
+			};
+
+			struct vx6953_i2c_reg_conf snapshot_mode_tbl[] = {
+			{REG_MODE_SELECT,	MODE_SELECT_STANDBY_MODE},
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{0x6003, 0x01},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{0x303,	1}, /* VT_SYS_CLK_DIV */
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{0x30b,	1},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_0x3210, vx6953_regs.reg_pat[rt].reg_0x3210},
+			{REG_0x0111, vx6953_regs.reg_pat[rt].reg_0x111},
+
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{0x3140, 0x01},  /* AV2X2 block enabled */
+			{REG_0x3410, vx6953_regs.reg_pat[rt].reg_0x3410},
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+
+
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3006, 0x00},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{0x301A, 0x6A},
+			{REG_0x301b, 0x29},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3045, vx6953_regs.reg_pat[rt].reg_0x3045},
+			{REG_0x3098, vx6953_regs.reg_pat[rt].reg_0x3098},
+			{REG_0x309d, vx6953_regs.reg_pat[rt].reg_0x309D},
+
+			{REG_0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{REG_0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+
+			{REG_0x0b80, vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{REG_0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{REG_0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{REG_0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			{0x3393, 0x06}, /* man_spec_edof_ctrl*/
+			{0x3394, 0x07}, /* man_spec_edof_ctrl*/
+			};
+			/* stop streaming */
+			msleep(5);
+
+			/* Reset everything first */
+
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_csi_params.data_format = CSI_8BIT;
+			vx6953_csi_params.lane_cnt = 1;
+			vx6953_csi_params.lane_assign = 0xe4;
+			vx6953_csi_params.dpcm_scheme = 0;
+			vx6953_csi_params.settle_cnt = 7;
+			rc = msm_camio_csi_config(&vx6953_csi_params);
+			if (rc < 0)
+				CDBG(" config csi controller failed\n");
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_patch_for_cut3();
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			if (rt == RES_PREVIEW) {
+				rc = vx6953_i2c_write_w_table(
+					&preview_mode_tbl[0],
+					ARRAY_SIZE(preview_mode_tbl));
+				if (rc < 0)
+					return rc;
+			}
+			if (rt == RES_CAPTURE) {
+				rc = vx6953_i2c_write_w_table(
+					&snapshot_mode_tbl[0],
+					ARRAY_SIZE(snapshot_mode_tbl));
+				if (rc < 0)
+					return rc;
+			}
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			/* Start sensor streaming */
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM) < 0)
+				return rc;
+			msleep(vx6953_stm5m0edof_delay_msecs_stream);
+			/* man_spec_edof_ctrl_tune_smooth_lowlight*/
+			vx6953_i2c_write_b_sensor(0x338d, 0x08);
+			/* man_spec_edof_ctrl_tune_smooth_indoor*/
+			vx6953_i2c_write_b_sensor(0x338e, 0x08);
+			/* man_spec_edof_ctrl_tune_smooth_outdoor*/
+			vx6953_i2c_write_b_sensor(0x338f, 0x00);
+			/*Apply Capture FPGA state machine reset*/
+			vx6953_i2c_write_b_sensor(0x16, 0x00);
+			msleep(100);
+			vx6953_i2c_write_b_sensor(0x16, 0x01);
+
+			if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+				return rc;
+
+			while (frame_cnt == 0xFF) {
+				if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+					return rc;
+				CDBG("frame_cnt=%d", frame_cnt);
+				msleep(10);
+			}
+		}
+		return rc;
+		default:
+			return rc;
+		}
+	} else {
+		switch (update_type) {
+		case REG_INIT:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_0x3030,
+				vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+				vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+				vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{REG_0x3016,
+				vx6953_regs.reg_pat_init[0].reg_0x3016},
+			{REG_0x301d,
+				vx6953_regs.reg_pat_init[0].reg_0x301d},
+			{REG_0x317e,
+				vx6953_regs.reg_pat_init[0].reg_0x317e},
+			{REG_0x317f,
+				vx6953_regs.reg_pat_init[0].reg_0x317f},
+			{REG_0x3400,
+				vx6953_regs.reg_pat_init[0].reg_0x3400},
+			/* DEFCOR settings */
+			/*Single Defect Correction Weight DISABLE*/
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x3005,
+				vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+				vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+				vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+				vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+				vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+				vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+				vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+				vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture mode
+			(standard settings - Not tuned) */
+			{REG_0x0b80,
+				vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+				vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+				vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+				vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+				vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+				vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+			{REG_0x1716,
+				vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717,
+				vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718,
+				vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719,
+				vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+			/* reset fps_divider */
+			vx6953_ctrl->fps = 30 * Q8;
+			/* stop streaming */
+
+			/* Reset everything first */
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			CDBG("Init vx6953_sensor_setting standby\n");
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+				/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+			vx6953_patch_for_cut2();
+			rc = vx6953_i2c_write_w_table(&init_tbl[0],
+				ARRAY_SIZE(init_tbl));
+			if (rc < 0)
+				return rc;
+				msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+		}
+		return rc;
+		case UPDATE_PERIODIC:
+		if (rt == RES_PREVIEW || rt == RES_CAPTURE) {
+			struct vx6953_i2c_reg_conf init_mode_tbl[] =  {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+				vx6953_regs.reg_pat[rt].
+				coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+				vx6953_regs.reg_pat[rt].
+				analogue_gain_code_global},
+			{REG_0x3030,
+				vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+				vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+				vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+				vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+				vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{REG_0x3007,
+				vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{REG_0x3016,
+				vx6953_regs.reg_pat_init[0].reg_0x3016},
+			{REG_0x301d,
+				vx6953_regs.reg_pat_init[0].reg_0x301d},
+			{REG_0x317e,
+				vx6953_regs.reg_pat_init[0].reg_0x317e},
+			{REG_0x317f,
+				vx6953_regs.reg_pat_init[0].reg_0x317f},
+			{REG_0x3400,
+				vx6953_regs.reg_pat_init[0].reg_0x3400},
+			{0x0b06,
+				vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+				vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+				vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+				vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{0x0136,
+				vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{0x0137,
+				vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+				vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+				vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{0x0b85,
+				vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{0x0b88,
+				vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{0x0b89,
+				vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+				vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].
+				frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].
+				line_length_pck_lo},
+			{REG_0x3005,
+				vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+				vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+				vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+				vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+				vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+				vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+				vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+				vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+				vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture mode
+			(standard settings - Not tuned) */
+			{REG_0x0b80,
+				vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+				vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+				vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+				vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+				vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+				vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+				vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+				vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+				vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+				vx6953_regs.reg_pat[rt].reg_0x034f},
+			{REG_0x1716,
+				vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717,
+				vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718,
+				vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719,
+				vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+			struct vx6953_i2c_reg_conf mode_tbl[] = {
+			{REG_0x0112,
+				vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{REG_0x0113,
+				vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+				vx6953_regs.reg_pat_init[0].
+				pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+				vx6953_regs.reg_pat_init[0].
+				op_pix_clk_div},
+		/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+				vx6953_regs.reg_pat[rt].frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+				vx6953_regs.reg_pat[rt].frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+				vx6953_regs.reg_pat[rt].line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+				vx6953_regs.reg_pat[rt].line_length_pck_lo},
+			{REG_0x3005, vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010, vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011, vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a, vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035, vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036, vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041, vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042, vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045, vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			Application settings for capture
+			mode(standard settings - Not tuned) */
+			{REG_0x0b80, vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900, vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901, vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902, vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383, vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387, vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c, vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d, vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e, vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f, vx6953_regs.reg_pat[rt].reg_0x034f},
+			/*{0x200, vx6953_regs.reg_pat[rt].reg_0x0200},
+			{0x201, vx6953_regs.reg_pat[rt].reg_0x0201},*/
+			{REG_0x1716, vx6953_regs.reg_pat[rt].reg_0x1716},
+			{REG_0x1717, vx6953_regs.reg_pat[rt].reg_0x1717},
+			{REG_0x1718, vx6953_regs.reg_pat[rt].reg_0x1718},
+			{REG_0x1719, vx6953_regs.reg_pat[rt].reg_0x1719},
+			};
+			/* stop streaming */
+			msleep(5);
+
+			/* Reset everything first */
+			if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+				CDBG("S/W reset failed\n");
+				return rc;
+			} else
+				CDBG("S/W reset successful\n");
+
+			msleep(10);
+
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STANDBY_MODE) < 0)
+				return rc;
+			/*vx6953_stm5m0edof_delay_msecs_stdby*/
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_csi_params.data_format = CSI_8BIT;
+			vx6953_csi_params.lane_cnt = 1;
+			vx6953_csi_params.lane_assign = 0xe4;
+			vx6953_csi_params.dpcm_scheme = 0;
+			vx6953_csi_params.settle_cnt = 7;
+			rc = msm_camio_csi_config(&vx6953_csi_params);
+			if (rc < 0)
+				CDBG(" config csi controller failed\n");
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			vx6953_patch_for_cut2();
+			rc = vx6953_i2c_write_w_table(&init_mode_tbl[0],
+				ARRAY_SIZE(init_mode_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			rc = vx6953_i2c_write_w_table(&mode_tbl[0],
+				ARRAY_SIZE(mode_tbl));
+			if (rc < 0)
+				return rc;
+
+			msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+			/* Start sensor streaming */
+			if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				MODE_SELECT_STREAM) < 0)
+				return rc;
+			msleep(vx6953_stm5m0edof_delay_msecs_stream);
+
+			if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+				return rc;
+
+			while (frame_cnt == 0xFF) {
+				if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+					return rc;
+				CDBG("frame_cnt=%d", frame_cnt);
+				msleep(10);
+			}
+		}
+		return rc;
+		default:
+		return rc;
+	}
+	}
+	return rc;
+}
+
+
+static int32_t vx6953_video_config(int mode)
+{
+
+	int32_t	rc = 0;
+	int	rt;
+	/* change sensor resolution	if needed */
+	if (vx6953_ctrl->prev_res == QTR_SIZE) {
+		rt = RES_PREVIEW;
+		vx6953_stm5m0edof_delay_msecs_stdby	=
+			((((2 * 1000 * vx6953_ctrl->fps_divider) /
+			vx6953_ctrl->fps) * Q8) / Q10) + 1;
+	} else {
+		rt = RES_CAPTURE;
+		vx6953_stm5m0edof_delay_msecs_stdby	=
+			((((1000 * vx6953_ctrl->fps_divider) /
+			vx6953_ctrl->fps) * Q8) / Q10) + 1;
+	}
+	if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	if (vx6953_ctrl->set_test) {
+		if (vx6953_test(vx6953_ctrl->set_test) < 0)
+			return	rc;
+	}
+	vx6953_ctrl->edof_mode = VX6953_EDOF_ESTIMATION;
+	rc = vx6953_enable_edof(vx6953_ctrl->edof_mode);
+	if (rc < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->prev_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+}
+
+static int32_t vx6953_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/*change sensor resolution if needed */
+	if (vx6953_ctrl->curr_res != vx6953_ctrl->pict_res) {
+		if (vx6953_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((2 * 1000 * vx6953_ctrl->fps_divider) /
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		} else {
+			rt = RES_CAPTURE;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((1000 * vx6953_ctrl->fps_divider) /
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		}
+	if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+		return rc;
+	}
+
+	vx6953_ctrl->edof_mode = VX6953_EDOF_APPLICATION;
+	if (vx6953_enable_edof(vx6953_ctrl->edof_mode) < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->pict_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+} /*end of vx6953_snapshot_config*/
+
+static int32_t vx6953_raw_snapshot_config(int mode)
+{
+	int32_t rc = 0;
+	int rt;
+	/* change sensor resolution if needed */
+	if (vx6953_ctrl->curr_res != vx6953_ctrl->pict_res) {
+		if (vx6953_ctrl->pict_res == QTR_SIZE) {
+			rt = RES_PREVIEW;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((2 * 1000 * vx6953_ctrl->fps_divider)/
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		} else {
+			rt = RES_CAPTURE;
+			vx6953_stm5m0edof_delay_msecs_stdby =
+				((((1000 * vx6953_ctrl->fps_divider)/
+				vx6953_ctrl->fps) * Q8) / Q10) + 1;
+		}
+		if (vx6953_sensor_setting(UPDATE_PERIODIC, rt) < 0)
+			return rc;
+	}
+	vx6953_ctrl->edof_mode = VX6953_EDOF_APPLICATION;
+	if (vx6953_enable_edof(vx6953_ctrl->edof_mode) < 0)
+		return rc;
+	vx6953_ctrl->curr_res = vx6953_ctrl->pict_res;
+	vx6953_ctrl->sensormode = mode;
+	return rc;
+} /*end of vx6953_raw_snapshot_config*/
+static int32_t vx6953_set_sensor_mode(int mode,
+	int res)
+{
+	int32_t rc = 0;
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		rc = vx6953_video_config(mode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		rc = vx6953_snapshot_config(mode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		rc = vx6953_raw_snapshot_config(mode);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+static int32_t vx6953_power_down(void)
+{
+	vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+	MODE_SELECT_STANDBY_MODE);
+	return 0;
+}
+
+
+static int vx6953_probe_init_done(const struct msm_camera_sensor_info *data)
+{
+	gpio_free(data->sensor_reset);
+	kfree(vx6953_ctrl);
+	vx6953_ctrl = NULL;
+	return 0;
+}
+static int vx6953_probe_init_sensor(const struct msm_camera_sensor_info *data)
+{
+	unsigned short revision_number;
+	int32_t rc = 0;
+	unsigned short chipidl, chipidh;
+	CDBG("%s: %d\n", __func__, __LINE__);
+	rc = gpio_request(data->sensor_reset, "vx6953");
+	CDBG(" vx6953_probe_init_sensor\n");
+	if (!rc) {
+		CDBG("sensor_reset = %d\n", rc);
+		CDBG(" vx6953_probe_init_sensor 1\n");
+		gpio_direction_output(data->sensor_reset, 0);
+		msleep(50);
+		CDBG(" vx6953_probe_init_sensor 1\n");
+		gpio_direction_output(data->sensor_reset, 1);
+		msleep(13);
+	} else {
+		CDBG(" vx6953_probe_init_sensor 2\n");
+		goto init_probe_done;
+	}
+	msleep(20);
+	CDBG(" vx6953_probe_init_sensor is called\n");
+	/* 3. Read sensor Model ID: */
+	rc = vx6953_i2c_read(0x0000, &chipidh, 1);
+	if (rc < 0) {
+		CDBG(" vx6953_probe_init_sensor 3\n");
+		goto init_probe_fail;
+	}
+	rc = vx6953_i2c_read(0x0001, &chipidl, 1);
+	if (rc < 0) {
+		CDBG(" vx6953_probe_init_sensor4\n");
+		goto init_probe_fail;
+	}
+	CDBG("vx6953 model_id = 0x%x  0x%x\n", chipidh, chipidl);
+	/* 4. Compare sensor ID to VX6953 ID: */
+	if (chipidh != 0x03 || chipidl != 0xB9) {
+		rc = -ENODEV;
+		CDBG("vx6953_probe_init_sensor fail chip id doesnot match\n");
+		goto init_probe_fail;
+	}
+
+	vx6953_ctrl = kzalloc(sizeof(struct vx6953_ctrl_t), GFP_KERNEL);
+	if (!vx6953_ctrl) {
+		CDBG("vx6953_init failed!\n");
+		rc = -ENOMEM;
+	}
+	vx6953_ctrl->fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->pict_fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->set_test = TEST_OFF;
+	vx6953_ctrl->prev_res = QTR_SIZE;
+	vx6953_ctrl->pict_res = FULL_SIZE;
+	vx6953_ctrl->curr_res = INVALID_SIZE;
+	vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+	vx6953_ctrl->edof_mode = VX6953_EDOF_ESTIMATION;
+
+	if (data)
+		vx6953_ctrl->sensordata = data;
+
+	if (vx6953_i2c_read(0x0002, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number major = 0x%x\n", revision_number);
+	if (vx6953_i2c_read(0x0018, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number = 0x%x\n", revision_number);
+	if (revision_number == VX6953_REVISION_NUMBER_CUT3) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_3;
+		CDBG("VX6953 EDof Cut 3.0 sensor\n ");
+	} else if (revision_number == VX6953_REVISION_NUMBER_CUT2) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+		CDBG("VX6953 EDof Cut 2.0 sensor\n ");
+	} else {/* Cut1.0 reads 0x00 for register 0x0018*/
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_1;
+		CDBG("VX6953 EDof Cut 1.0 sensor\n ");
+	}
+
+	if (vx6953_ctrl->prev_res == QTR_SIZE) {
+		if (vx6953_sensor_setting(REG_INIT, RES_PREVIEW) < 0)
+			goto init_probe_fail;
+	} else {
+		if (vx6953_sensor_setting(REG_INIT, RES_CAPTURE) < 0)
+			goto init_probe_fail;
+	}
+
+	goto init_probe_done;
+init_probe_fail:
+	CDBG(" vx6953_probe_init_sensor fails\n");
+	gpio_direction_output(data->sensor_reset, 0);
+	vx6953_probe_init_done(data);
+init_probe_done:
+	CDBG(" vx6953_probe_init_sensor finishes\n");
+	return rc;
+	}
+/* camsensor_iu060f_vx6953_reset */
+int vx6953_sensor_open_init(const struct msm_camera_sensor_info *data)
+{
+	unsigned short revision_number;
+	int32_t rc = 0;
+
+	CDBG("%s: %d\n", __func__, __LINE__);
+	CDBG("Calling vx6953_sensor_open_init\n");
+	rc = gpio_request(data->sensor_reset, "vx6953");
+	if (!rc)
+		CDBG("vx6953 gpio_request fail\n");
+
+	vx6953_ctrl = kzalloc(sizeof(struct vx6953_ctrl_t), GFP_KERNEL);
+	if (!vx6953_ctrl) {
+		CDBG("vx6953_init failed!\n");
+		rc = -ENOMEM;
+		goto init_done;
+	}
+	vx6953_ctrl->fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->pict_fps_divider = 1 * 0x00000400;
+	vx6953_ctrl->set_test = TEST_OFF;
+	vx6953_ctrl->prev_res = QTR_SIZE;
+	vx6953_ctrl->pict_res = FULL_SIZE;
+	vx6953_ctrl->curr_res = INVALID_SIZE;
+	vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+	vx6953_ctrl->edof_mode = VX6953_EDOF_ESTIMATION;
+	if (data)
+		vx6953_ctrl->sensordata = data;
+	if (rc < 0) {
+		CDBG("Calling vx6953_sensor_open_init fail1\n");
+		return rc;
+	}
+	CDBG("%s: %d\n", __func__, __LINE__);
+	/* enable mclk first */
+	msm_camio_clk_rate_set(VX6953_STM5M0EDOF_DEFAULT_MASTER_CLK_RATE);
+	CDBG("%s: %d\n", __func__, __LINE__);
+	if (vx6953_i2c_read(0x0002, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number major = 0x%x\n", revision_number);
+	if (vx6953_i2c_read(0x0018, &revision_number, 1) < 0)
+		return rc;
+		CDBG("sensor revision number = 0x%x\n", revision_number);
+	if (revision_number == VX6953_REVISION_NUMBER_CUT3) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_3;
+		CDBG("VX6953 EDof Cut 3.0 sensor\n ");
+	} else if (revision_number == VX6953_REVISION_NUMBER_CUT2) {
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_2;
+		CDBG("VX6953 EDof Cut 2.0 sensor\n ");
+	} else {/* Cut1.0 reads 0x00 for register 0x0018*/
+		vx6953_ctrl->sensor_type = VX6953_STM5M0EDOF_CUT_1;
+		CDBG("VX6953 EDof Cut 1.0 sensor\n ");
+	}
+
+	vx6953_ctrl->fps = 30*Q8;
+	if (rc < 0)
+		goto init_fail;
+	else
+		goto init_done;
+init_fail:
+	CDBG("init_fail\n");
+	gpio_direction_output(data->sensor_reset, 0);
+	vx6953_probe_init_done(data);
+init_done:
+	CDBG("init_done\n");
+	return rc;
+} /*endof vx6953_sensor_open_init*/
+
+static int vx6953_init_client(struct i2c_client *client)
+{
+	/* Initialize the MSM_CAMI2C Chip */
+	init_waitqueue_head(&vx6953_wait_queue);
+	return 0;
+}
+
+static const struct i2c_device_id vx6953_i2c_id[] = {
+	{"vx6953", 0},
+	{ }
+};
+
+static int vx6953_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int rc = 0;
+	CDBG("vx6953_probe called!\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CDBG("i2c_check_functionality failed\n");
+		goto probe_failure;
+	}
+
+	vx6953_sensorw = kzalloc(sizeof(struct vx6953_work_t), GFP_KERNEL);
+	if (!vx6953_sensorw) {
+		CDBG("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, vx6953_sensorw);
+	vx6953_init_client(client);
+	vx6953_client = client;
+
+	msleep(50);
+
+	CDBG("vx6953_probe successed! rc = %d\n", rc);
+	return 0;
+
+probe_failure:
+	CDBG("vx6953_probe failed! rc = %d\n", rc);
+	return rc;
+}
+
+static int vx6953_send_wb_info(struct wb_info_cfg *wb)
+{
+	unsigned short read_data;
+	uint8_t temp[8];
+	int rc = 0;
+	int i = 0;
+
+	/* red_gain */
+	temp[2] = wb->red_gain >> 8;
+	temp[3] = wb->red_gain & 0xFF;
+
+	/* green_gain */
+	temp[0] = wb->green_gain >> 8;
+	temp[1] = wb->green_gain & 0xFF;
+	temp[6] = temp[0];
+	temp[7] = temp[1];
+
+	/* blue_gain */
+	temp[4] = wb->blue_gain >> 8;
+	temp[5] = wb->blue_gain & 0xFF;
+	rc = vx6953_i2c_write_seq_sensor(0x0B8E, &temp[0], 8);
+
+	for (i = 0; i < 6; i++) {
+		rc = vx6953_i2c_read(0x0B8E + i, &read_data, 1);
+		CDBG("%s addr 0x%x val %d\n", __func__, 0x0B8E + i, read_data);
+	}
+	rc = vx6953_i2c_read(0x0B82, &read_data, 1);
+	CDBG("%s addr 0x%x val %d\n", __func__, 0x0B82, read_data);
+	if (rc < 0)
+		return rc;
+	return rc;
+} /*end of vx6953_snapshot_config*/
+
+static int __exit vx6953_remove(struct i2c_client *client)
+{
+	struct vx6953_work_t_t *sensorw = i2c_get_clientdata(client);
+	free_irq(client->irq, sensorw);
+	vx6953_client = NULL;
+	kfree(sensorw);
+	return 0;
+}
+
+static struct i2c_driver vx6953_i2c_driver = {
+	.id_table = vx6953_i2c_id,
+	.probe  = vx6953_i2c_probe,
+	.remove = __exit_p(vx6953_i2c_remove),
+	.driver = {
+		.name = "vx6953",
+	},
+};
+
+static int vx6953_sensor_config(void __user *argp)
+{
+	struct sensor_cfg_data cdata;
+	long   rc = 0;
+	if (copy_from_user(&cdata,
+		(void *)argp,
+		sizeof(struct sensor_cfg_data)))
+		return -EFAULT;
+	mutex_lock(&vx6953_mut);
+	CDBG("vx6953_sensor_config: cfgtype = %d\n",
+	cdata.cfgtype);
+		switch (cdata.cfgtype) {
+		case CFG_GET_PICT_FPS:
+			vx6953_get_pict_fps(
+				cdata.cfg.gfps.prevfps,
+				&(cdata.cfg.gfps.pictfps));
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_L_PF:
+			cdata.cfg.prevl_pf =
+			vx6953_get_prev_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PREV_P_PL:
+			cdata.cfg.prevp_pl =
+				vx6953_get_prev_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_L_PF:
+			cdata.cfg.pictl_pf =
+				vx6953_get_pict_lines_pf();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_P_PL:
+			cdata.cfg.pictp_pl =
+				vx6953_get_pict_pixels_pl();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_GET_PICT_MAX_EXP_LC:
+			cdata.cfg.pict_max_exp_lc =
+				vx6953_get_pict_max_exp_lc();
+
+			if (copy_to_user((void *)argp,
+				&cdata,
+				sizeof(struct sensor_cfg_data)))
+				rc = -EFAULT;
+			break;
+
+		case CFG_SET_FPS:
+		case CFG_SET_PICT_FPS:
+			rc = vx6953_set_fps(&(cdata.cfg.fps));
+			break;
+
+		case CFG_SET_EXP_GAIN:
+			rc =
+				vx6953_write_exp_gain(
+					cdata.cfg.exp_gain.gain,
+					cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_PICT_EXP_GAIN:
+			rc =
+				vx6953_set_pict_exp_gain(
+				cdata.cfg.exp_gain.gain,
+				cdata.cfg.exp_gain.line);
+			break;
+
+		case CFG_SET_MODE:
+			rc = vx6953_set_sensor_mode(cdata.mode,
+					cdata.rs);
+			break;
+
+		case CFG_PWR_DOWN:
+			rc = vx6953_power_down();
+			break;
+
+		case CFG_MOVE_FOCUS:
+			rc =
+				vx6953_move_focus(
+				cdata.cfg.focus.dir,
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_DEFAULT_FOCUS:
+			rc =
+				vx6953_set_default_focus(
+				cdata.cfg.focus.steps);
+			break;
+
+		case CFG_SET_EFFECT:
+			rc = vx6953_set_default_focus(
+				cdata.cfg.effect);
+			break;
+
+
+		case CFG_SEND_WB_INFO:
+			rc = vx6953_send_wb_info(
+				&(cdata.cfg.wb_info));
+			break;
+
+		default:
+			rc = -EFAULT;
+			break;
+		}
+
+	mutex_unlock(&vx6953_mut);
+
+	return rc;
+}
+
+
+
+
+static int vx6953_sensor_release(void)
+{
+	int rc = -EBADF;
+	mutex_lock(&vx6953_mut);
+	vx6953_power_down();
+	gpio_free(vx6953_ctrl->sensordata->sensor_reset);
+	kfree(vx6953_ctrl);
+	vx6953_ctrl = NULL;
+	CDBG("vx6953_release completed\n");
+	mutex_unlock(&vx6953_mut);
+
+	return rc;
+}
+
+static int vx6953_g_chip_ident(struct v4l2_subdev *sd,
+			struct v4l2_dbg_chip_ident *id)
+{
+	/* TODO: Need to add this ID in v4l2-chip-ident.h */
+	id->ident    = V4L2_IDENT_VX6953;
+	id->revision = 0;
+
+	return 0;
+}
+
+static int vx6953_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
+{
+	int ret = 0;
+	/* return current mode value */
+	param->parm.capture.capturemode = vx6953_ctrl->sensormode;
+	return ret;
+}
+
+static int vx6953_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
+{
+	/* set the desired mode */
+	/* right now, the only purpose is to set the desired mode -
+	 preview or snapshot */
+	vx6953_ctrl->sensormode = param->parm.capture.capturemode;
+	return 0;
+}
+
+static int vx6953_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	long rc = 0;
+	int mode = vx6953_ctrl->sensormode;
+	int rt = RES_PREVIEW;
+	unsigned short frame_cnt;
+	struct msm_camera_csi_params vx6953_csi_params;
+
+	CDBG("mode = %d, enable = %d\n", mode, enable);
+
+	if (!enable) {
+		/* turn off streaming */
+		/* TODO: Make call to I2C write to turn streaming off */
+		/* rc = vx6953_i2c_write_b_sensor(); */
+
+		struct vx6953_i2c_reg_conf init_tbl[] = {
+			{REG_0x0112,
+			vx6953_regs.reg_pat_init[0].reg_0x0112},
+			{0x6003, 0x01},
+			{REG_0x0113,
+			vx6953_regs.reg_pat_init[0].reg_0x0113},
+			{REG_VT_PIX_CLK_DIV,
+			vx6953_regs.reg_pat_init[0].
+			vt_pix_clk_div},
+			{REG_PRE_PLL_CLK_DIV,
+			vx6953_regs.reg_pat_init[0].
+			pre_pll_clk_div},
+			{REG_PLL_MULTIPLIER,
+			vx6953_regs.reg_pat_init[0].
+			pll_multiplier},
+			{REG_OP_PIX_CLK_DIV,
+			vx6953_regs.reg_pat_init[0].
+			op_pix_clk_div},
+			{REG_COARSE_INTEGRATION_TIME_HI,
+			vx6953_regs.reg_pat[rt].
+			coarse_integration_time_hi},
+			{REG_COARSE_INTEGRATION_TIME_LO,
+			vx6953_regs.reg_pat[rt].
+			coarse_integration_time_lo},
+			{REG_ANALOGUE_GAIN_CODE_GLOBAL_LO,
+			vx6953_regs.reg_pat[rt].
+			analogue_gain_code_global},
+			{REG_0x3030,
+			vx6953_regs.reg_pat_init[0].reg_0x3030},
+			/* 953 specific registers */
+			{REG_0x0111,
+			vx6953_regs.reg_pat_init[0].reg_0x0111},
+			{REG_0x0b00,
+			vx6953_regs.reg_pat_init[0].reg_0x0b00},
+			{REG_0x3001,
+			vx6953_regs.reg_pat_init[0].reg_0x3001},
+			{REG_0x3004,
+			vx6953_regs.reg_pat_init[0].reg_0x3004},
+			{0x3006, 0x00},
+			{REG_0x3007,
+			vx6953_regs.reg_pat_init[0].reg_0x3007},
+			{0x301b, 0x29},
+			/* DEFCOR settings */
+			/*Single Defect Correction Weight DISABLE*/
+			{0x0b06,
+			vx6953_regs.reg_pat_init[0].reg_0x0b06},
+			/*Single_defect_correct_weight = auto*/
+			{0x0b07,
+			vx6953_regs.reg_pat_init[0].reg_0x0b07},
+			/*Dynamic couplet correction ENABLED*/
+			{0x0b08,
+			vx6953_regs.reg_pat_init[0].reg_0x0b08},
+			/*Dynamic couplet correction weight*/
+			{0x0b09,
+			vx6953_regs.reg_pat_init[0].reg_0x0b09},
+			/* Clock Setup */
+			/* Tell sensor ext clk is 24MHz*/
+			{REG_0x0136,
+			vx6953_regs.reg_pat_init[0].reg_0x0136},
+			{REG_0x0137,
+			vx6953_regs.reg_pat_init[0].reg_0x0137},
+			/* The white balance gains must be written
+			 to the sensor every frame. */
+			/* Edof */
+			{REG_0x0b83,
+			vx6953_regs.reg_pat_init[0].reg_0x0b83},
+			{REG_0x0b84,
+			vx6953_regs.reg_pat_init[0].reg_0x0b84},
+			{REG_0x0b85,
+			vx6953_regs.reg_pat_init[0].reg_0x0b85},
+			{REG_0x0b88,
+			vx6953_regs.reg_pat_init[0].reg_0x0b88},
+			{REG_0x0b89,
+			vx6953_regs.reg_pat_init[0].reg_0x0b89},
+			{REG_0x0b8a,
+			vx6953_regs.reg_pat_init[0].reg_0x0b8a},
+			/* Mode specific regieters */
+			{REG_FRAME_LENGTH_LINES_HI,
+			vx6953_regs.reg_pat[rt].
+			frame_length_lines_hi},
+			{REG_FRAME_LENGTH_LINES_LO,
+			vx6953_regs.reg_pat[rt].
+			frame_length_lines_lo},
+			{REG_LINE_LENGTH_PCK_HI,
+			vx6953_regs.reg_pat[rt].
+			line_length_pck_hi},
+			{REG_LINE_LENGTH_PCK_LO,
+			vx6953_regs.reg_pat[rt].
+			line_length_pck_lo},
+			{REG_0x3005,
+			vx6953_regs.reg_pat[rt].reg_0x3005},
+			{0x3010,
+			vx6953_regs.reg_pat[rt].reg_0x3010},
+			{REG_0x3011,
+			vx6953_regs.reg_pat[rt].reg_0x3011},
+			{REG_0x301a,
+			vx6953_regs.reg_pat[rt].reg_0x301a},
+			{REG_0x3035,
+			vx6953_regs.reg_pat[rt].reg_0x3035},
+			{REG_0x3036,
+			vx6953_regs.reg_pat[rt].reg_0x3036},
+			{REG_0x3041,
+			vx6953_regs.reg_pat[rt].reg_0x3041},
+			{0x3042,
+			vx6953_regs.reg_pat[rt].reg_0x3042},
+			{REG_0x3045,
+			vx6953_regs.reg_pat[rt].reg_0x3045},
+			/*EDOF: Estimation settings for Preview mode
+			  Application settings for capture mode
+			  (standard settings - Not tuned) */
+			{REG_0x0b80,
+			vx6953_regs.reg_pat[rt].reg_0x0b80},
+			{REG_0x0900,
+			vx6953_regs.reg_pat[rt].reg_0x0900},
+			{REG_0x0901,
+			vx6953_regs.reg_pat[rt].reg_0x0901},
+			{REG_0x0902,
+			vx6953_regs.reg_pat[rt].reg_0x0902},
+			{REG_0x0383,
+			vx6953_regs.reg_pat[rt].reg_0x0383},
+			{REG_0x0387,
+			vx6953_regs.reg_pat[rt].reg_0x0387},
+			/* Change output size / frame rate */
+			{REG_0x034c,
+			vx6953_regs.reg_pat[rt].reg_0x034c},
+			{REG_0x034d,
+			vx6953_regs.reg_pat[rt].reg_0x034d},
+			{REG_0x034e,
+			vx6953_regs.reg_pat[rt].reg_0x034e},
+			{REG_0x034f,
+			vx6953_regs.reg_pat[rt].reg_0x034f},
+		};
+		/* reset fps_divider */
+		vx6953_ctrl->fps = 30 * Q8;
+		/* stop streaming */
+
+		/* Reset everything first */
+		if (vx6953_i2c_write_b_sensor(0x103, 0x01) < 0) {
+			CDBG("S/W reset failed\n");
+			return rc;
+		} else
+			CDBG("S/W reset successful\n");
+
+		msleep(10);
+
+		CDBG("Init vx6953_sensor_setting standby\n");
+		if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+				    MODE_SELECT_STANDBY_MODE) < 0)
+			return rc;
+
+		/*vx6953_stm5m0edof_delay_msecs_stdby*/
+		msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+		vx6953_csi_params.data_format = CSI_8BIT;
+		vx6953_csi_params.lane_cnt = 1;
+		vx6953_csi_params.lane_assign = 0xe4;
+		vx6953_csi_params.dpcm_scheme = 0;
+		vx6953_csi_params.settle_cnt = 7;
+		rc = msm_camio_csi_config(&vx6953_csi_params);
+		if (rc < 0)
+			CDBG(" config csi controller failed\n");
+		msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+		vx6953_patch_for_cut3();
+		rc = vx6953_i2c_write_w_table(&init_tbl[0],
+					    ARRAY_SIZE(init_tbl));
+		if (rc < 0)
+			return rc;
+
+		msleep(vx6953_stm5m0edof_delay_msecs_stdby);
+
+		vx6953_i2c_write_b_sensor(0x0b80, 0x00);
+		vx6953_i2c_write_b_sensor(0x3388, 0x03);
+		vx6953_i2c_write_b_sensor(0x3640, 0x00);
+		return rc;
+	} else {
+		/* Start sensor streaming */
+		if (vx6953_i2c_write_b_sensor(REG_MODE_SELECT,
+					    MODE_SELECT_STREAM) < 0)
+			return rc;
+		CDBG("Init vx6953_sensor_setting stream\n");
+		msleep(vx6953_stm5m0edof_delay_msecs_stream);
+		if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+			return rc;
+
+		rc = vx6953_i2c_write_w_table(&edof_tbl[0],
+					    ARRAY_SIZE(edof_tbl));
+		vx6953_i2c_write_b_sensor(0x3388, 0x00);
+
+		while (frame_cnt == 0xFF) {
+			if (vx6953_i2c_read(0x0005, &frame_cnt, 1) < 0)
+				return rc;
+			CDBG("frame_cnt=%d", frame_cnt);
+			msleep(10);
+		}
+
+		/* set desired mode */
+		switch (mode) {
+		case SENSOR_PREVIEW_MODE:
+			CDBG("SENSOR_PREVIEW_MODE\n");
+			rc = vx6953_video_config(mode);
+			break;
+		case SENSOR_SNAPSHOT_MODE:
+			CDBG("SENSOR_SNAPSHOT_MODE\n");
+			rc = vx6953_snapshot_config(mode);
+			break;
+		case SENSOR_RAW_SNAPSHOT_MODE:
+			CDBG("SENSOR_RAW_SNAPSHOT_MODE\n");
+			rc = vx6953_raw_snapshot_config(mode);
+			break;
+		default:
+			CDBG("default\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void vx6953_frame_check(u32 *width, u32 *height)
+{
+	/* get mode first */
+	int mode = vx6953_ctrl->sensormode;
+
+	switch (mode) {
+	case SENSOR_PREVIEW_MODE:
+		if (*width > VX6953_QTR_SIZE_WIDTH)
+			*width = VX6953_QTR_SIZE_WIDTH;
+
+		if (*height > VX6953_QTR_SIZE_HEIGHT)
+			*height = VX6953_QTR_SIZE_HEIGHT;
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		if (*width > VX6953_HRZ_FULL_BLK_PIXELS)
+			*width = VX6953_HRZ_FULL_BLK_PIXELS;
+
+		if (*height > VX6953_VER_FULL_BLK_LINES)
+			*height = VX6953_VER_FULL_BLK_LINES;
+		break;
+	default:
+		break;
+	}
+}
+
+
+static int vx6953_set_params(struct i2c_client *client, u32 width, u32 height,
+			     enum v4l2_mbus_pixelcode code)
+{
+	int i;
+	vx6953_ctrl->fmt = NULL;
+
+	/*
+	 * frame size check
+	 */
+	vx6953_frame_check(&width, &height);
+
+	/*
+	 * get color format
+	 */
+	for (i = 0; i < ARRAY_SIZE(vx6953_cfmts); i++)
+		if (vx6953_cfmts[i].code == code)
+			break;
+	if (i == ARRAY_SIZE(vx6953_cfmts))
+		return -EINVAL;
+
+	/* sensor supports one fixed size depending upon the mode */
+	switch (vx6953_ctrl->sensormode) {
+	case SENSOR_PREVIEW_MODE:
+		vx6953_video_config(vx6953_ctrl->sensormode);
+		break;
+	case SENSOR_SNAPSHOT_MODE:
+		vx6953_snapshot_config(vx6953_ctrl->sensormode);
+		break;
+	case SENSOR_RAW_SNAPSHOT_MODE:
+		vx6953_raw_snapshot_config(vx6953_ctrl->sensormode);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* why need this ? vx6953_ctrl->fmt = &(vx6953_cfmts[i]); */
+
+	return 0;
+}
+
+static int vx6953_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+	/* right now we are not supporting, probably vfe can take care */
+	return -EINVAL;
+}
+
+static int vx6953_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+	return -EINVAL;
+}
+
+static int vx6953_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+	return -EINVAL;
+}
+
+static int vx6953_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+	/* by this time vx6953_client should already be set */
+	struct i2c_client *client = vx6953_client;
+
+	/* currently sensor supports fixed dimensions only
+	 * depending upon the mode*/
+	if (!vx6953_ctrl->fmt) {
+		int ret = vx6953_set_params(client, VX6953_QTR_SIZE_WIDTH,
+						VX6953_QTR_SIZE_HEIGHT,
+						V4L2_MBUS_FMT_YUYV8_2X8);
+		if (ret < 0)
+			return ret;
+	}
+
+	mf->width = vx6953_get_pict_pixels_pl();
+	mf->height  = vx6953_get_pict_lines_pf();
+	/* TODO: set colorspace */
+	mf->code  = vx6953_ctrl->fmt->code;
+	mf->field = V4L2_FIELD_NONE;
+
+	return 0;
+}
+
+static int vx6953_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+	/* by this time vx6953_client should already be set */
+	struct i2c_client *client = vx6953_client;
+
+	/* TODO: We need to define this function */
+	/* TODO: set colorspace */
+	return vx6953_set_params(client, mf->width, mf->height, mf->code);
+}
+
+static int vx6953_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(vx6953_cfmts); i++)
+		if (mf->code == vx6953_cfmts[i].code)
+			break;
+
+	if (i == ARRAY_SIZE(vx6953_cfmts))
+		return -EINVAL;
+
+	/* check that frame is within max sensor supported frame size */
+	vx6953_frame_check(&mf->width, &mf->height);
+
+	/* TODO: set colorspace */
+	mf->field = V4L2_FIELD_NONE;
+
+	return 0;
+}
+
+static int vx6953_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+			   enum v4l2_mbus_pixelcode *code)
+{
+	printk(KERN_DEBUG "Index is %d\n", index);
+	if ((unsigned int)index >= ARRAY_SIZE(vx6953_cfmts))
+		return -EINVAL;
+
+	*code = vx6953_cfmts[index].code;
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops vx6953_subdev_core_ops = {
+	.g_chip_ident = vx6953_g_chip_ident,
+};
+
+static struct v4l2_subdev_video_ops vx6953_subdev_video_ops = {
+	.g_parm			   = vx6953_g_parm,
+	.s_parm			   = vx6953_s_parm,
+	.s_stream = vx6953_s_stream,
+	.g_mbus_fmt = vx6953_g_fmt,
+	.s_mbus_fmt = vx6953_s_fmt,
+	.try_mbus_fmt = vx6953_try_fmt,
+	.cropcap  = vx6953_cropcap,
+	.g_crop   = vx6953_g_crop,
+	.s_crop   = vx6953_s_crop,
+	.enum_mbus_fmt  = vx6953_enum_fmt,
+};
+
+static struct v4l2_subdev_ops vx6953_subdev_ops = {
+	.core = &vx6953_subdev_core_ops,
+	.video  = &vx6953_subdev_video_ops,
+};
+
+static int vx6953_sensor_probe(const struct msm_camera_sensor_info *info,
+		struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = i2c_add_driver(&vx6953_i2c_driver);
+	if (rc < 0 || vx6953_client == NULL) {
+		rc = -ENOTSUPP;
+		goto probe_fail;
+	}
+	msm_camio_clk_rate_set(24000000);
+	rc = vx6953_probe_init_sensor(info);
+	if (rc < 0)
+		goto probe_fail;
+	s->s_init = vx6953_sensor_open_init;
+	s->s_release = vx6953_sensor_release;
+	s->s_config  = vx6953_sensor_config;
+	vx6953_probe_init_done(info);
+	return rc;
+
+probe_fail:
+	CDBG("vx6953_sensor_probe: SENSOR PROBE FAILS!\n");
+	return rc;
+}
+
+
+static int vx6953_sensor_probe_cb(const struct msm_camera_sensor_info *info,
+	struct v4l2_subdev *sdev, struct msm_sensor_ctrl *s)
+{
+	int rc = 0;
+	rc = vx6953_sensor_probe(info, s);
+	if (rc < 0)
+		return rc;
+
+	vx6953_ctrl = kzalloc(sizeof(struct vx6953_ctrl_t), GFP_KERNEL);
+	if (!vx6953_ctrl) {
+		CDBG("vx6953_sensor_probe failed!\n");
+		return -ENOMEM;
+	}
+
+	/* probe is successful, init a v4l2 subdevice */
+	printk(KERN_DEBUG "going into v4l2_i2c_subdev_init\n");
+	if (sdev) {
+		v4l2_i2c_subdev_init(sdev, vx6953_client,
+						&vx6953_subdev_ops);
+		vx6953_ctrl->sensor_dev = sdev;
+	}
+	return rc;
+}
+
+static int __vx6953_probe(struct platform_device *pdev)
+{
+	return msm_sensor_register(pdev, vx6953_sensor_probe_cb);
+}
+
+static struct platform_driver msm_camera_driver = {
+	.probe = __vx6953_probe,
+	.driver = {
+		.name = "msm_camera_vx6953",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init vx6953_init(void)
+{
+	return platform_driver_register(&msm_camera_driver);
+}
+
+module_init(vx6953_init);
+void vx6953_exit(void)
+{
+	i2c_del_driver(&vx6953_i2c_driver);
+}
+
+
diff --git a/drivers/media/video/msm/vx6953_v4l2.h b/drivers/media/video/msm/vx6953_v4l2.h
new file mode 100644
index 0000000..e5428e9
--- /dev/null
+++ b/drivers/media/video/msm/vx6953_v4l2.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VX6953_V4L2_H
+#define VX6953_V4L2_H
+#include <linux/types.h>
+#include <mach/board.h>
+extern struct vx6953_reg vx6953_regs;
+struct reg_struct_init {
+	uint8_t reg_0x0112;      /* 0x0112*/
+	uint8_t reg_0x0113;      /* 0x0113*/
+	uint8_t vt_pix_clk_div;  /* 0x0301*/
+	uint8_t pre_pll_clk_div; /* 0x0305*/
+	uint8_t pll_multiplier;  /* 0x0307*/
+	uint8_t op_pix_clk_div;  /* 0x0309*/
+	uint8_t reg_0x3030;      /*0x3030*/
+	uint8_t reg_0x0111;      /*0x0111*/
+	uint8_t reg_0x0b00;      /*0x0b00*/
+	uint8_t reg_0x3001;      /*0x3001*/
+	uint8_t reg_0x3004;      /*0x3004*/
+	uint8_t reg_0x3007;      /*0x3007*/
+	uint8_t reg_0x3016;      /*0x3016*/
+	uint8_t reg_0x301d;      /*0x301d*/
+	uint8_t reg_0x317e;      /*0x317E*/
+	uint8_t reg_0x317f;      /*0x317F*/
+	uint8_t reg_0x3400;      /*0x3400*/
+	uint8_t reg_0x0b06;      /*0x0b06*/
+	uint8_t reg_0x0b07;      /*0x0b07*/
+	uint8_t reg_0x0b08;      /*0x0b08*/
+	uint8_t reg_0x0b09;      /*0x0b09*/
+	uint8_t reg_0x0136;
+	uint8_t reg_0x0137;
+	/* Edof */
+	uint8_t reg_0x0b83;      /*0x0b83*/
+	uint8_t reg_0x0b84;      /*0x0b84*/
+	uint8_t reg_0x0b85;      /*0x0b85*/
+	uint8_t reg_0x0b88;      /*0x0b88*/
+	uint8_t reg_0x0b89;      /*0x0b89*/
+	uint8_t reg_0x0b8a;      /*0x0b8a*/
+	};
+struct reg_struct {
+	uint8_t coarse_integration_time_hi; /*REG_COARSE_INTEGRATION_TIME_HI*/
+	uint8_t coarse_integration_time_lo; /*REG_COARSE_INTEGRATION_TIME_LO*/
+	uint8_t analogue_gain_code_global;
+	uint8_t frame_length_lines_hi; /* 0x0340*/
+	uint8_t frame_length_lines_lo; /* 0x0341*/
+	uint8_t line_length_pck_hi;    /* 0x0342*/
+	uint8_t line_length_pck_lo;    /* 0x0343*/
+	uint8_t reg_0x3005;   /* 0x3005*/
+	uint8_t reg_0x3010;  /* 0x3010*/
+	uint8_t reg_0x3011;  /* 0x3011*/
+	uint8_t reg_0x301a;  /* 0x301a*/
+	uint8_t reg_0x3035;  /* 0x3035*/
+	uint8_t reg_0x3036;   /* 0x3036*/
+	uint8_t reg_0x3041;  /*0x3041*/
+	uint8_t reg_0x3042;  /*0x3042*/
+	uint8_t reg_0x3045;  /*0x3045*/
+	uint8_t reg_0x0b80;   /* 0x0b80*/
+	uint8_t reg_0x0900;   /*0x0900*/
+	uint8_t reg_0x0901;   /* 0x0901*/
+	uint8_t reg_0x0902;   /*0x0902*/
+	uint8_t reg_0x0383;   /*0x0383*/
+	uint8_t reg_0x0387;   /* 0x0387*/
+	uint8_t reg_0x034c;   /* 0x034c*/
+	uint8_t reg_0x034d;   /*0x034d*/
+	uint8_t reg_0x034e;   /* 0x034e*/
+	uint8_t reg_0x034f;   /* 0x034f*/
+	uint8_t reg_0x1716; /*0x1716*/
+	uint8_t reg_0x1717; /*0x1717*/
+	uint8_t reg_0x1718; /*0x1718*/
+	uint8_t reg_0x1719; /*0x1719*/
+	uint8_t reg_0x3210;/*0x3210*/
+	uint8_t reg_0x111; /*0x111*/
+	uint8_t reg_0x3410;  /*0x3410*/
+	uint8_t reg_0x3098;
+	uint8_t reg_0x309D;
+	uint8_t reg_0x0200;
+	uint8_t reg_0x0201;
+	};
+struct vx6953_i2c_reg_conf {
+	unsigned short waddr;
+	unsigned short wdata;
+};
+
+enum vx6953_test_mode_t {
+	TEST_OFF,
+	TEST_1,
+	TEST_2,
+	TEST_3
+};
+
+enum vx6953_resolution_t {
+	QTR_SIZE,
+	FULL_SIZE,
+	INVALID_SIZE
+};
+enum vx6953_setting {
+	RES_PREVIEW,
+	RES_CAPTURE
+};
+enum mt9p012_reg_update {
+	/* Sensor egisters that need to be updated during initialization */
+	REG_INIT,
+	/* Sensor egisters that needs periodic I2C writes */
+	UPDATE_PERIODIC,
+	/* All the sensor Registers will be updated */
+	UPDATE_ALL,
+	/* Not valid update */
+	UPDATE_INVALID
+};
+
+enum sensor_revision_t {
+	VX6953_STM5M0EDOF_CUT_1,
+	VX6953_STM5M0EDOF_CUT_2,
+	VX6953_STM5M0EDOF_CUT_3
+};
+enum edof_mode_t {
+	VX6953_EDOF_DISABLE,       /* 0x00 */
+	VX6953_EDOF_APPLICATION,   /* 0x01 */
+	VX6953_EDOF_ESTIMATION     /* 0x02 */
+};
+struct vx6953_reg {
+	const struct reg_struct_init  *reg_pat_init;
+	const struct reg_struct  *reg_pat;
+};
+#endif /* VX6953_H */
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index de4fa4e..ac48afd 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -330,6 +330,7 @@
 		break;
 	case V4L2_MEMORY_USERPTR:
 		b->m.userptr = vb->baddr;
+		b->reserved = vb->boff;
 		b->length    = vb->bsize;
 		break;
 	case V4L2_MEMORY_OVERLAY:
@@ -600,6 +601,7 @@
 		    buf->baddr != b->m.userptr)
 			q->ops->buf_release(q, buf);
 		buf->baddr = b->m.userptr;
+		buf->boff = b->reserved;
 		break;
 	case V4L2_MEMORY_OVERLAY:
 		buf->boff = b->m.offset;
@@ -1138,8 +1140,6 @@
 			buf = list_entry(q->stream.next,
 					 struct videobuf_buffer, stream);
 	} else {
-		if (!q->reading)
-			__videobuf_read_start(q);
 		if (!q->reading) {
 			rc = POLLERR;
 		} else if (NULL == q->read_buf) {
diff --git a/drivers/media/video/videobuf-msm-mem.c b/drivers/media/video/videobuf-msm-mem.c
new file mode 100644
index 0000000..6f2cf9f
--- /dev/null
+++ b/drivers/media/video/videobuf-msm-mem.c
@@ -0,0 +1,394 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * Based on videobuf-dma-contig.c,
+ * (c) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * helper functions for physically contiguous pmem capture buffers
+ * The functions support contiguous memory allocations using pmem
+ * kernel API.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/android_pmem.h>
+#include <linux/memory_alloc.h>
+#include <media/videobuf-msm-mem.h>
+#include <media/msm_camera.h>
+#include <mach/memory.h>
+
+#define MAGIC_PMEM 0x0733ac64
+#define MAGIC_CHECK(is, should)               \
+	if (unlikely((is) != (should))) {           \
+		pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
+		BUG();                  \
+	}
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) printk(KERN_DEBUG "videobuf-msm-mem: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+static int32_t msm_mem_allocate(const size_t size)
+{
+	int32_t phyaddr;
+	phyaddr = allocate_contiguous_ebi_nomap(size, SZ_4K);
+	return phyaddr;
+}
+
+static int32_t msm_mem_free(const int32_t phyaddr)
+{
+	int32_t rc = 0;
+	free_contiguous_memory_by_paddr(phyaddr);
+	return rc;
+}
+
+static void
+videobuf_vm_open(struct vm_area_struct *vma)
+{
+	struct videobuf_mapping *map = vma->vm_private_data;
+
+	D("vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+		map, map->count, vma->vm_start, vma->vm_end);
+
+	map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+	struct videobuf_mapping *map = vma->vm_private_data;
+	struct videobuf_queue *q = map->q;
+	int i, rc;
+
+	D("vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+		map, map->count, vma->vm_start, vma->vm_end);
+
+	map->count--;
+	if (0 == map->count) {
+		struct videobuf_contig_pmem *mem;
+
+		D("munmap %p q=%p\n", map, q);
+		mutex_lock(&q->vb_lock);
+
+		/* We need first to cancel streams, before unmapping */
+		if (q->streaming)
+			videobuf_queue_cancel(q);
+
+		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+			if (NULL == q->bufs[i])
+				continue;
+
+			if (q->bufs[i]->map != map)
+				continue;
+
+			mem = q->bufs[i]->priv;
+			if (mem) {
+				/* This callback is called only if kernel has
+				 * allocated memory and this memory is mmapped.
+				 * In this case, memory should be freed,
+				 * in order to do memory unmap.
+				 */
+
+				MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+				/* vfree is not atomic - can't be
+				 called with IRQ's disabled
+				 */
+				D("buf[%d] freeing physical %d\n",
+					i, mem->phyaddr);
+
+				rc = msm_mem_free(mem->phyaddr);
+				if (rc < 0)
+					D("%s: Invalid memory location\n",
+								__func__);
+				else {
+					mem->phyaddr = 0;
+				}
+			}
+
+			q->bufs[i]->map   = NULL;
+			q->bufs[i]->baddr = 0;
+		}
+
+		kfree(map);
+
+		mutex_unlock(&q->vb_lock);
+
+		/* deallocate the q->bufs[i] structure not a good solution
+		 as it will result in unnecessary iterations but right now
+		 this looks like the only cleaner way  */
+		videobuf_mmap_free(q);
+	}
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+	.open     = videobuf_vm_open,
+	.close    = videobuf_vm_close,
+};
+
+/**
+ * videobuf_pmem_contig_user_put() - reset pointer to user space buffer
+ * @mem: per-buffer private videobuf-contig-pmem data
+ *
+ * This function resets the user space pointer
+ */
+static void videobuf_pmem_contig_user_put(struct videobuf_contig_pmem *mem)
+{
+	put_pmem_file(mem->file);
+	mem->is_userptr = 0;
+	mem->phyaddr = 0;
+	mem->size = 0;
+}
+
+/**
+ * videobuf_pmem_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-contig-pmem data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+static int videobuf_pmem_contig_user_get(struct videobuf_contig_pmem *mem,
+					struct videobuf_buffer *vb)
+{
+	unsigned long kvstart;
+	unsigned long len;
+	int rc;
+
+	mem->size = PAGE_ALIGN(vb->size);
+	rc = get_pmem_file(vb->baddr, (unsigned long *)&mem->phyaddr,
+					&kvstart, &len, &mem->file);
+	if (rc < 0) {
+		pr_err("%s: get_pmem_file fd %lu error %d\n",
+					__func__, vb->baddr,
+							rc);
+		return rc;
+	}
+	mem->phyaddr += vb->boff;
+	mem->y_off = 0;
+	mem->cbcr_off = (vb->size)*2/3;
+	mem->is_userptr = 1;
+	return rc;
+}
+
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
+{
+	struct videobuf_contig_pmem *mem;
+	struct videobuf_buffer *vb;
+
+	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+	if (vb) {
+		mem = vb->priv = ((char *)vb) + size;
+		mem->magic = MAGIC_PMEM;
+	}
+
+	return vb;
+}
+
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
+{
+	struct videobuf_contig_pmem *mem = buf->priv;
+
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+	return mem->vaddr;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+				struct videobuf_buffer *vb,
+				struct v4l2_framebuffer *fbuf)
+{
+	int rc = 0;
+	struct videobuf_contig_pmem *mem = vb->priv;
+
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+	switch (vb->memory) {
+	case V4L2_MEMORY_MMAP:
+		D("%s memory method MMAP\n", __func__);
+
+		/* All handling should be done by __videobuf_mmap_mapper() */
+		break;
+	case V4L2_MEMORY_USERPTR:
+		D("%s memory method USERPTR\n", __func__);
+
+		/* handle pointer from user space */
+		rc = videobuf_pmem_contig_user_get(mem, vb);
+		break;
+	case V4L2_MEMORY_OVERLAY:
+	default:
+		pr_err("%s memory method OVERLAY/unknown\n", __func__);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+		struct videobuf_buffer *buf,
+		struct vm_area_struct *vma)
+{
+	struct videobuf_contig_pmem *mem;
+	struct videobuf_mapping *map;
+	int retval;
+	unsigned long size;
+
+	D("%s\n", __func__);
+
+	/* create mapping + update buffer list */
+	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+	if (!map) {
+		pr_err("%s: kzalloc failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf->map = map;
+	map->q = q;
+
+	buf->baddr = vma->vm_start;
+
+	mem = buf->priv;
+	D("mem = 0x%x\n", (u32)mem);
+	D("buf = 0x%x\n", (u32)buf);
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+	mem->size = PAGE_ALIGN(buf->bsize);
+	mem->y_off = 0;
+	mem->cbcr_off = (buf->bsize)*2/3;
+	if (buf->i >= 0 && buf->i <= 3)
+		mem->buffer_type = OUTPUT_TYPE_P;
+	else
+		mem->buffer_type = OUTPUT_TYPE_V;
+
+	buf->bsize = mem->size;
+	mem->phyaddr = msm_mem_allocate(mem->size);
+
+	if (IS_ERR((void *)mem->phyaddr)) {
+		pr_err("%s : pmem memory allocation failed\n", __func__);
+		goto error;
+	}
+
+	/* Try to remap memory */
+	size = vma->vm_end - vma->vm_start;
+	size = (size < mem->size) ? size : mem->size;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	retval = remap_pfn_range(vma, vma->vm_start,
+		mem->phyaddr >> PAGE_SHIFT,
+		size, vma->vm_page_prot);
+	if (retval) {
+		pr_err("mmap: remap failed with error %d. ", retval);
+		retval = msm_mem_free(mem->phyaddr);
+		if (retval < 0)
+			printk(KERN_ERR "%s: Invalid memory location\n",
+								__func__);
+		else {
+			mem->phyaddr = 0;
+		}
+		goto error;
+	}
+
+	vma->vm_ops          = &videobuf_vm_ops;
+	vma->vm_flags       |= VM_DONTEXPAND;
+	vma->vm_private_data = map;
+
+	D("mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+		map, q, vma->vm_start, vma->vm_end,
+		(long int)buf->bsize,
+		vma->vm_pgoff, buf->i);
+
+	videobuf_vm_open(vma);
+
+	return 0;
+
+error:
+	kfree(map);
+	return -ENOMEM;
+}
+
+static struct videobuf_qtype_ops qops = {
+	.magic        = MAGIC_QTYPE_OPS,
+
+	.alloc_vb     = __videobuf_alloc,
+	.iolock       = __videobuf_iolock,
+	.mmap_mapper  = __videobuf_mmap_mapper,
+	.vaddr        = __videobuf_to_vaddr,
+};
+
+void videobuf_queue_pmem_contig_init(struct videobuf_queue *q,
+	const struct videobuf_queue_ops *ops,
+	struct device *dev,
+	spinlock_t *irqlock,
+	enum v4l2_buf_type type,
+	enum v4l2_field field,
+	unsigned int msize,
+	void *priv,
+	struct mutex *ext_lock)
+{
+	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+							priv, &qops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_pmem_contig_init);
+
+int videobuf_to_pmem_contig(struct videobuf_buffer *buf)
+{
+	struct videobuf_contig_pmem *mem = buf->priv;
+
+	BUG_ON(!mem);
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+	return mem->phyaddr;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_pmem_contig);
+
+int videobuf_pmem_contig_free(struct videobuf_queue *q,
+				struct videobuf_buffer *buf)
+{
+	struct videobuf_contig_pmem *mem = buf->priv;
+
+	/* mmapped memory can't be freed here, otherwise mmapped region
+	 would be released, while still needed. In this case, the memory
+	 release should happen inside videobuf_vm_close().
+	 So, it should free memory only if the memory were allocated for
+	 read() operation.
+	*/
+	if (buf->memory != V4L2_MEMORY_USERPTR)
+		return -EINVAL;
+
+	if (!mem)
+		return -ENOMEM;
+
+	MAGIC_CHECK(mem->magic, MAGIC_PMEM);
+
+	/* handle user space pointer case */
+	if (buf->baddr) {
+		videobuf_pmem_contig_user_put(mem);
+		return 0;
+	} else {
+		/* don't support read() method */
+		return -EINVAL;
+	}
+}
+EXPORT_SYMBOL_GPL(videobuf_pmem_contig_free);
+
+MODULE_DESCRIPTION("helper module to manage video4linux PMEM contig buffers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 37b83eb..8f2c9ac 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -145,6 +145,39 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called tps65010.
 
+config TPS65023
+        tristate "TPS65023 Power Management chip"
+        depends on I2C && ARCH_MSM_SCORPION && !MSM_SMP
+        default y if I2C && ARCH_MSM_SCORPION && !MSM_SMP
+        help
+          Say yes here for Qualcomm QSD chips. The TI PMIC is used by the
+          QSD8x50 series of chips for power management.
+
+config PMIC8058
+	tristate "PMIC8058 Power Management chip"
+	depends on I2C_SSBI && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_FSM9XXX)
+	default y if I2C_SSBI && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_FSM9XXX)
+	select MFD_CORE
+	select MSM_SHOW_RESUME_IRQ
+	help
+	  Say yes here for Qualcomm PM8058 chip.
+
+config PMIC8901
+	tristate "PMIC8901 Power Management chip"
+	depends on I2C_SSBI && ARCH_MSM8X60
+	default y if I2C_SSBI && ARCH_MSM8X60
+	select MFD_CORE
+	help
+	  Say yes here for Qualcomm PM8901 chip.
+
+config MARIMBA_TSADC
+	tristate "Support for Marimba Touchscreen ADC"
+	depends on MARIMBA_CORE && ARCH_MSM7X30
+	default y if MARIMBA_CORE
+	help
+	  Say yes here if you want to include support for TSADC in the
+	  Qualcomm Marimba chip.
+
 config TPS6507X
 	tristate "TPS6507x Power Management / Touch Screen chips"
 	select MFD_CORE
@@ -181,6 +214,33 @@
 	  and other features that are often used in portable devices like
 	  cell phones and PDAs.
 
+config MARIMBA_CORE
+	tristate "Marimba Core"
+	depends on I2C && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM7X27A)
+	default n
+	help
+	  Enables the Marimba Core driver. The core driver provides
+	  read/write capability to registers which are part of the
+	  marimba core.
+	  This driver dynamically detects the SoC and works for both
+	  Marimba and Bahama Chip.
+
+config MARIMBA_CODEC
+	tristate "Marimba Codec"
+	depends on MARIMBA_CORE
+	default n
+	help
+	 This driver programs Marimba Wideband Codec for input/output of
+	 audio signal.
+
+config TIMPANI_CODEC
+	tristate "Timpani Codec"
+	depends on MARIMBA_CORE
+	default n
+	help
+	 This driver programs Timpani Wideband Codec for input/output of
+	 audio signal.
+
 config TWL4030_CORE
 	bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
 	depends on I2C=y && GENERIC_HARDIRQS
@@ -737,6 +797,63 @@
 config TPS65911_COMPARATOR
 	tristate
 
+config MFD_PM8921_ADC
+	tristate "Support for Qualcomm PM8921 ADC"
+	depends on MFD_PM8921_CORE
+	help
+	  This is the ADC arbiter driver for Qualcomm PM8921 Chip.
+
+	  The driver supports reading the HKADC, XOADC and support to set and receive
+	  temperature threshold notifications using the Battery temperature module.
+
+config MFD_PM8XXX_DEBUG
+	tristate "Qualcomm PM8xxx debugfs support"
+	depends on MFD_PM8XXX && DEBUG_FS
+	default y if MFD_PM8XXX
+	help
+	  This driver provides a debugfs interface to the SSBI registers on
+	  Qualcomm PM 8xxx PMIC chips.  It allows for reads and writes to
+	  arbitrary addresses.  Writes are blocking so values are guaranteed to
+	  be set into hardware registers upon return.
+
+config MFD_PM8XXX_PWM
+	tristate "Support for Qualcomm PM8xxx PWM feature"
+	depends on MFD_PM8XXX
+	default y if MFD_PM8XXX
+	help
+	  This is the Pulse Width Modulation (PWM) driver for Qualcomm
+	  PM 8xxx PMIC chips. It can drive 8 channels of PWM output, and
+	  has a lookup table with size of 64 to be shared by any of the
+	  8 channels.
+
+config MFD_PM8XXX_MISC
+	tristate "Support for Qualcomm PM8xxx miscellaneous APIs"
+	depends on MFD_PM8XXX
+	default y if MFD_PM8XXX
+	help
+	  This driver implements several miscellaneous APIs that may be needed
+	  in order to control the PM8XXX PMIC chip.
+
+config MFD_PM8XXX_BATT_ALARM
+	tristate "Support for Qualcomm PM8xxx battery voltage alarm"
+	depends on MFD_PM8XXX
+	help
+	  This driver provides a means monitor battery under and over-voltage
+	  conditions.  An upper and/or lower threshold can be specified for
+	  normal operation.  A wakeable interrupt is triggered when the battery
+	  voltage leaves the accepatable range which then calls a notifier call
+	  chain.
+
+config WCD9310_CODEC
+        tristate "WCD9310 Codec"
+        depends on SLIMBUS
+        select MFD_CORE
+        default n
+        help
+          Enables the WCD9310 core driver. The core driver provides
+          read/write capability to registers which are part of the
+          WCD9310 core and gives the ability to use the WCD9310 codec.
+
 endif # MFD_SUPPORT
 
 menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22a280f..f66fc76 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -35,12 +35,26 @@
 obj-$(CONFIG_TPS6105X)		+= tps6105x.o
 obj-$(CONFIG_TPS65010)		+= tps65010.o
 obj-$(CONFIG_TPS6507X)		+= tps6507x.o
+obj-$(CONFIG_MARIMBA_CODEC)     += marimba-codec.o
 obj-$(CONFIG_MENELAUS)		+= menelaus.o
 
 obj-$(CONFIG_TWL4030_CORE)	+= twl-core.o twl4030-irq.o twl6030-irq.o
 obj-$(CONFIG_TWL4030_MADC)      += twl4030-madc.o
 obj-$(CONFIG_TWL4030_POWER)    += twl4030-power.o
 obj-$(CONFIG_MFD_TWL4030_AUDIO)	+= twl4030-audio.o
+
+obj-$(CONFIG_MARIMBA_CORE)	+= marimba-core.o
+obj-$(CONFIG_MARIMBA_TSADC)	+= marimba-tsadc.o
+obj-$(CONFIG_TPS65023)		+= tps65023.o
+
+obj-$(CONFIG_TIMPANI_CODEC)	+= timpani-codec.o
+
+ifdef CONFIG_TIMPANI_CODEC
+obj-$(CONFIG_TIMPANI_CODEC) += msm-adie-codec.o
+else ifdef CONFIG_MARIMBA_CODEC
+obj-$(CONFIG_MARIMBA_CODEC) += msm-adie-codec.o
+endif
+
 obj-$(CONFIG_TWL6030_PWM)	+= twl6030-pwm.o
 obj-$(CONFIG_TWL6040_CORE)	+= twl6040-core.o twl6040-irq.o
 
@@ -55,6 +69,8 @@
 obj-$(CONFIG_MCP_UCB1200)	+= ucb1x00-core.o
 obj-$(CONFIG_MCP_UCB1200_TS)	+= ucb1x00-ts.o
 
+obj-$(CONFIG_WCD9310_CODEC)       += wcd9310-core.o wcd9310-irq.o
+
 ifeq ($(CONFIG_SA1100_ASSABET),y)
 obj-$(CONFIG_MCP_UCB1200)	+= ucb1x00-assabet.o
 endif
@@ -96,3 +112,12 @@
 obj-$(CONFIG_MFD_PM8XXX_IRQ) 	+= pm8xxx-irq.o
 obj-$(CONFIG_MFD_TPS65910)	+= tps65910.o tps65910-irq.o
 obj-$(CONFIG_TPS65911_COMPARATOR)	+= tps65911-comparator.o
+obj-$(CONFIG_PMIC8058)		+= pmic8058.o
+obj-$(CONFIG_PMIC8901)		+= pmic8901.o
+obj-$(CONFIG_MFD_PM8921_CORE) 	+= pm8921-core.o
+obj-$(CONFIG_MFD_PM8XXX_IRQ) 	+= pm8xxx-irq.o
+obj-$(CONFIG_MFD_PM8XXX_DEBUG) 	+= pm8xxx-debug.o
+obj-$(CONFIG_MFD_PM8XXX_PWM) 	+= pm8xxx-pwm.o
+obj-$(CONFIG_MFD_PM8XXX_MISC) 	+= pm8xxx-misc.o
+obj-$(CONFIG_MFD_PM8XXX_BATT_ALARM) 	+= pm8xxx-batt-alarm.o
+obj-$(CONFIG_MFD_PM8921_ADC)	+= pm8921-adc.o msmproc_adc.o
diff --git a/drivers/mfd/marimba-codec.c b/drivers/mfd/marimba-codec.c
new file mode 100644
index 0000000..6416e0a
--- /dev/null
+++ b/drivers/mfd/marimba-codec.c
@@ -0,0 +1,963 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/msm-adie-codec.h>
+#include <linux/mfd/marimba.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/string.h>
+
+#define MARIMBA_CDC_RX_CTL 0x81
+#define MARIMBA_CDC_RX_CTL_ST_EN_MASK 0x20
+#define MARIMBA_CDC_RX_CTL_ST_EN_SHFT 0x5
+#define MARIMBA_CODEC_CDC_LRXG     0x84
+#define MARIMBA_CODEC_CDC_RRXG     0x85
+#define MARIMBA_CODEC_CDC_LTXG     0x86
+#define MARIMBA_CODEC_CDC_RTXG     0x87
+
+#define MAX_MDELAY_US 2000
+#define MIN_MDELAY_US 1000
+
+struct adie_codec_path {
+	struct adie_codec_dev_profile *profile;
+	struct adie_codec_register_image img;
+	u32 hwsetting_idx;
+	u32 stage_idx;
+	u32 curr_stage;
+};
+
+static struct adie_codec_register adie_codec_tx_regs[] = {
+	{ 0x04, 0xc0, 0x8C },
+	{ 0x0D, 0xFF, 0x00 },
+	{ 0x0E, 0xFF, 0x00 },
+	{ 0x0F, 0xFF, 0x00 },
+	{ 0x10, 0xF8, 0x68 },
+	{ 0x11, 0xFE, 0x00 },
+	{ 0x12, 0xFE, 0x00 },
+	{ 0x13, 0xFF, 0x58 },
+	{ 0x14, 0xFF, 0x00 },
+	{ 0x15, 0xFE, 0x00 },
+	{ 0x16, 0xFF, 0x00 },
+	{ 0x1A, 0xFF, 0x00 },
+	{ 0x80, 0x01, 0x00 },
+	{ 0x82, 0x7F, 0x18 },
+	{ 0x83, 0x1C, 0x00 },
+	{ 0x86, 0xFF, 0xAC },
+	{ 0x87, 0xFF, 0xAC },
+	{ 0x89, 0xFF, 0xFF },
+	{ 0x8A, 0xF0, 0x30 }
+};
+
+static struct adie_codec_register adie_codec_rx_regs[] = {
+	{ 0x23, 0xF8, 0x00 },
+	{ 0x24, 0x6F, 0x00 },
+	{ 0x25, 0x7F, 0x00 },
+	{ 0x26, 0xFC, 0x00 },
+	{ 0x28, 0xFE, 0x00 },
+	{ 0x29, 0xFE, 0x00 },
+	{ 0x33, 0xFF, 0x00 },
+	{ 0x34, 0xFF, 0x00 },
+	{ 0x35, 0xFC, 0x00 },
+	{ 0x36, 0xFE, 0x00 },
+	{ 0x37, 0xFE, 0x00 },
+	{ 0x38, 0xFE, 0x00 },
+	{ 0x39, 0xF0, 0x00 },
+	{ 0x3A, 0xFF, 0x0A },
+	{ 0x3B, 0xFC, 0xAC },
+	{ 0x3C, 0xFC, 0xAC },
+	{ 0x3D, 0xFF, 0x55 },
+	{ 0x3E, 0xFF, 0x55 },
+	{ 0x3F, 0xCF, 0x00 },
+	{ 0x40, 0x3F, 0x00 },
+	{ 0x41, 0x3F, 0x00 },
+	{ 0x42, 0xFF, 0x00 },
+	{ 0x43, 0xF7, 0x00 },
+	{ 0x43, 0xF7, 0x00 },
+	{ 0x43, 0xF7, 0x00 },
+	{ 0x43, 0xF7, 0x00 },
+	{ 0x44, 0xF7, 0x00 },
+	{ 0x45, 0xFF, 0x00 },
+	{ 0x46, 0xFF, 0x00 },
+	{ 0x47, 0xF7, 0x00 },
+	{ 0x48, 0xF7, 0x00 },
+	{ 0x49, 0xFF, 0x00 },
+	{ 0x4A, 0xFF, 0x00 },
+	{ 0x80, 0x02, 0x00 },
+	{ 0x81, 0xFF, 0x4C },
+	{ 0x83, 0x23, 0x00 },
+	{ 0x84, 0xFF, 0xAC },
+	{ 0x85, 0xFF, 0xAC },
+	{ 0x88, 0xFF, 0xFF },
+	{ 0x8A, 0x0F, 0x03 },
+	{ 0x8B, 0xFF, 0xAC },
+	{ 0x8C, 0x03, 0x01 },
+	{ 0x8D, 0xFF, 0x00 },
+	{ 0x8E, 0xFF, 0x00 }
+};
+
+static struct adie_codec_register adie_codec_lb_regs[] = {
+	{ 0x2B, 0x8F, 0x02 },
+	{ 0x2C, 0x8F, 0x02 }
+};
+
+struct adie_codec_state {
+	struct adie_codec_path path[ADIE_CODEC_MAX];
+	u32 ref_cnt;
+	struct marimba *pdrv_ptr;
+	struct marimba_codec_platform_data *codec_pdata;
+	struct mutex lock;
+};
+
+static struct adie_codec_state adie_codec;
+
+/* Array containing write details of Tx and RX Digital Volume
+   Tx and Rx and both the left and right channel use the same data
+*/
+u8 adie_codec_rx_tx_dig_vol_data[] = {
+	0x81, 0x82, 0x83, 0x84,
+	0x85, 0x86, 0x87, 0x88,
+	0x89, 0x8a, 0x8b, 0x8c,
+	0x8d, 0x8e, 0x8f, 0x90,
+	0x91, 0x92, 0x93, 0x94,
+	0x95, 0x96, 0x97, 0x98,
+	0x99, 0x9a, 0x9b, 0x9c,
+	0x9d, 0x9e, 0x9f, 0xa0,
+	0xa1, 0xa2, 0xa3, 0xa4,
+	0xa5, 0xa6, 0xa7, 0xa8,
+	0xa9, 0xaa, 0xab, 0xac,
+	0xad, 0xae, 0xaf, 0xb0,
+	0xb1, 0xb2, 0xb3, 0xb4,
+	0xb5, 0xb6, 0xb7, 0xb8,
+	0xb9, 0xba, 0xbb, 0xbc,
+	0xbd, 0xbe, 0xbf, 0xc0,
+	0xc1, 0xc2, 0xc3, 0xc4,
+	0xc5, 0xc6, 0xc7, 0xc8,
+	0xc9, 0xca, 0xcb, 0xcc,
+	0xcd, 0xce, 0xcf, 0xd0,
+	0xd1, 0xd2, 0xd3, 0xd4,
+	0xd5, 0xd6, 0xd7, 0xd8,
+	0xd9, 0xda, 0xdb, 0xdc,
+	0xdd, 0xde, 0xdf, 0xe0,
+	0xe1, 0xe2, 0xe3, 0xe4,
+	0xe5, 0xe6, 0xe7, 0xe8,
+	0xe9, 0xea, 0xeb, 0xec,
+	0xed, 0xee, 0xf0, 0xf1,
+	0xf2, 0xf3, 0xf4, 0xf5,
+	0xf6, 0xf7, 0xf8, 0xf9,
+	0xfa, 0xfb, 0xfc, 0xfd,
+	0xfe, 0xff, 0x00, 0x01,
+	0x02, 0x03, 0x04, 0x05,
+	0x06, 0x07, 0x08, 0x09,
+	0x0a, 0x0b, 0x0c, 0x0d,
+	0x0e, 0x0f, 0x10, 0x11,
+	0x12, 0x13, 0x14, 0x15,
+	0x16, 0x17, 0x18, 0x19,
+	0x1a, 0x1b, 0x1c, 0x1d,
+	0x1e, 0x1f, 0x20, 0x21,
+	0x22, 0x23, 0x24, 0x25,
+	0x26, 0x27, 0x28, 0x29,
+	0x2a, 0x2b, 0x2c, 0x2d,
+	0x2e, 0x2f, 0x30, 0x31,
+	0x32, 0x33, 0x34, 0x35,
+	0x36, 0x37, 0x38, 0x39,
+	0x3a, 0x3b, 0x3c, 0x3d,
+	0x3e, 0x3f, 0x40, 0x41,
+	0x42, 0x43, 0x44, 0x45,
+	0x46, 0x47, 0x48, 0x49,
+	0x4a, 0x4b, 0x4c, 0x4d,
+	0x4e, 0x4f, 0x50, 0x51,
+	0x52, 0x53, 0x54, 0x55,
+	0x56, 0x57, 0x58, 0x59,
+	0x5a, 0x5b, 0x5c, 0x5d,
+	0x5e, 0x5f, 0x60, 0x61,
+	0x62, 0x63, 0x64, 0x65,
+	0x66, 0x67, 0x68, 0x69,
+	0x6a, 0x6b, 0x6c, 0x6d,
+	0x6e, 0x6f, 0x70, 0x71,
+	0x72, 0x73, 0x74, 0x75,
+	0x76, 0x77, 0x78, 0x79,
+	0x7a, 0x7b, 0x7c, 0x7d,
+	0x7e, 0x7f
+};
+
+enum adie_vol_type {
+	ADIE_CODEC_RX_DIG_VOL,
+	ADIE_CODEC_TX_DIG_VOL,
+	ADIE_CODEC_VOL_TYPE_MAX
+};
+
+struct adie_codec_ch_vol_cntrl {
+	u8 codec_reg;
+	u8 codec_mask;
+	u8 *vol_cntrl_data;
+};
+
+struct adie_codec_vol_cntrl_data {
+
+	enum adie_vol_type vol_type;
+
+	/* Jump length used while doing writes in incremental fashion */
+	u32 jump_length;
+	s32 min_mb;		/* Min Db applicable to the vol control */
+	s32 max_mb;		/* Max Db applicable to the vol control */
+	u32 step_in_mb;
+	u32 steps;		/* No of steps allowed for this vol type */
+
+	struct adie_codec_ch_vol_cntrl *ch_vol_cntrl_info;
+};
+
+static struct adie_codec_ch_vol_cntrl adie_codec_rx_vol_cntrl[] = {
+	{MARIMBA_CODEC_CDC_LRXG, 0xff, adie_codec_rx_tx_dig_vol_data},
+	{MARIMBA_CODEC_CDC_RRXG, 0xff, adie_codec_rx_tx_dig_vol_data}
+};
+
+static struct adie_codec_ch_vol_cntrl adie_codec_tx_vol_cntrl[] = {
+	{MARIMBA_CODEC_CDC_LTXG, 0xff, adie_codec_rx_tx_dig_vol_data},
+	{MARIMBA_CODEC_CDC_RTXG, 0xff, adie_codec_rx_tx_dig_vol_data}
+};
+
+static struct adie_codec_vol_cntrl_data adie_codec_vol_cntrl[] = {
+	{ADIE_CODEC_RX_DIG_VOL, 5100, -12700, 12700, 100, 255,
+	 adie_codec_rx_vol_cntrl},
+
+	{ADIE_CODEC_TX_DIG_VOL, 5100, -12700, 12700, 100, 255,
+	 adie_codec_tx_vol_cntrl}
+};
+
+static int adie_codec_write(u8 reg, u8 mask, u8 val)
+{
+	int rc;
+
+	rc = marimba_write_bit_mask(adie_codec.pdrv_ptr, reg,  &val, 1, mask);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: fail to write reg %x\n", __func__, reg);
+		return -EIO;
+	}
+
+	pr_debug("%s: write reg %x val %x\n", __func__, reg, val);
+
+	return 0;
+}
+
+static int adie_codec_read(u8 reg, u8 *val)
+{
+	return marimba_read(adie_codec.pdrv_ptr, reg, val, 1);
+}
+
+static int adie_codec_read_dig_vol(enum adie_vol_type vol_type, u32 chan_index,
+				   u32 *cur_index)
+{
+	u32 counter;
+	u32 size;
+	u8 reg, mask, cur_val;
+	int rc;
+
+	reg =
+	    adie_codec_vol_cntrl[vol_type].
+	    ch_vol_cntrl_info[chan_index].codec_reg;
+
+	mask =
+	    adie_codec_vol_cntrl[vol_type].
+	    ch_vol_cntrl_info[chan_index].codec_mask;
+
+	rc = marimba_read(adie_codec.pdrv_ptr, reg, &cur_val, 1);
+
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: fail to read reg %x\n", __func__, reg);
+		return -EIO;
+	}
+
+	cur_val = cur_val & mask;
+
+	pr_debug("%s: reg 0x%x  mask 0x%x  reg_value = 0x%x"
+		"vol_type = %d\n", __func__, reg, mask, cur_val, vol_type);
+
+	size = adie_codec_vol_cntrl[vol_type].steps;
+
+	for (counter = 0; counter <= size; counter++) {
+
+		if (adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+		    [chan_index].vol_cntrl_data[counter] == cur_val) {
+			*cur_index = counter;
+			return 0;
+		}
+	}
+
+	pr_err("%s: could not find 0x%x in reg 0x%x values array\n",
+			__func__, cur_val, reg);
+
+	return -EINVAL;;
+}
+
+static int adie_codec_set_dig_vol(enum adie_vol_type vol_type, u32 chan_index,
+				  u32 cur_index, u32 target_index)
+{
+	u32 count;
+	u8 reg, mask, val;
+	u32 i;
+	u32 index;
+	u32 index_jump;
+
+	int rc;
+
+	index_jump = adie_codec_vol_cntrl[vol_type].jump_length;
+
+	reg =
+	    adie_codec_vol_cntrl[vol_type].
+	    ch_vol_cntrl_info[chan_index].codec_reg;
+
+	mask =
+	    adie_codec_vol_cntrl[vol_type].
+	    ch_vol_cntrl_info[chan_index].codec_mask;
+
+	/* compare the target index with current index */
+	if (cur_index < target_index) {
+
+		/* Volume is being increased loop and increase it in 4-5 steps
+		 */
+		count = ((target_index - cur_index) * 100 / index_jump);
+		index = cur_index;
+
+		for (i = 1; i <= count; i++) {
+			index = index + (int)(index_jump / 100);
+
+			val =
+			    adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+			    [chan_index].vol_cntrl_data[index];
+
+			pr_debug("%s: write reg %x val 0x%x\n",
+					__func__, reg, val);
+
+			rc = adie_codec_write(reg, mask, val);
+			if (rc < 0) {
+				pr_err("%s: write reg %x val 0x%x failed\n",
+					__func__, reg, val);
+				return rc;
+			}
+		}
+
+		/*do one final write to take it to the target index level */
+		val =
+		    adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+		    [chan_index].vol_cntrl_data[target_index];
+
+		pr_debug("%s: write reg %x val 0x%x\n", __func__, reg, val);
+
+		rc = adie_codec_write(reg, mask, val);
+
+		if (rc < 0) {
+			pr_err("%s: write reg %x val 0x%x failed\n",
+					__func__, reg, val);
+			return rc;
+		}
+
+	} else {
+
+		/* Volume is being decreased from the current setting */
+		index = cur_index;
+		/* loop and decrease it in 4-5 steps */
+		count = ((cur_index - target_index) * 100 / index_jump);
+
+		for (i = 1; i <= count; i++) {
+			index = index - (int)(index_jump / 100);
+
+			val =
+			    adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+			    [chan_index].vol_cntrl_data[index];
+
+			pr_debug("%s: write reg %x val 0x%x\n",
+					__func__, reg, val);
+
+			rc = adie_codec_write(reg, mask, val);
+			if (rc < 0) {
+				pr_err("%s: write reg %x val 0x%x failed\n",
+					__func__, reg, val);
+				return rc;
+			}
+		}
+
+		/* do one final write to take it to the target index level */
+		val =
+		    adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+		    [chan_index].vol_cntrl_data[target_index];
+
+		pr_debug("%s: write reg %x val 0x%x\n", __func__, reg, val);
+
+		rc = adie_codec_write(reg, mask, val);
+
+		if (rc < 0) {
+			pr_err("%s: write reg %x val 0x%x failed\n",
+					__func__, reg, val);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int marimba_adie_codec_set_device_digital_volume(
+		struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 vol_percentage /* in percentage */)
+{
+	enum adie_vol_type vol_type;
+	s32 milli_bel;
+	u32 chan_index;
+	u32 step_index;
+	u32 cur_step_index = 0;
+
+	if (!path_ptr  || (path_ptr->curr_stage !=
+				ADIE_CODEC_DIGITAL_ANALOG_READY)) {
+		pr_info("%s: Marimba codec not ready for volume control\n",
+		       __func__);
+		return  -EPERM;
+	}
+
+	if (num_channels > 2) {
+		pr_err("%s: Marimba codec only supports max two channels\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	if (path_ptr->profile->path_type == ADIE_CODEC_RX)
+		vol_type = ADIE_CODEC_RX_DIG_VOL;
+	else if (path_ptr->profile->path_type == ADIE_CODEC_TX)
+		vol_type = ADIE_CODEC_TX_DIG_VOL;
+	else {
+		pr_err("%s: invalid device data neither RX nor TX\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	milli_bel = ((adie_codec_vol_cntrl[vol_type].max_mb -
+			adie_codec_vol_cntrl[vol_type].min_mb) *
+			vol_percentage) / 100;
+
+	milli_bel = adie_codec_vol_cntrl[vol_type].min_mb + milli_bel;
+
+	pr_debug("%s: milli bell = %d vol_type = %d vol_percentage = %d"
+		 " num_cha =  %d \n",
+		 __func__, milli_bel, vol_type, vol_percentage, num_channels);
+
+
+	step_index = ((milli_bel
+		       - adie_codec_vol_cntrl[vol_type].min_mb
+		       + (adie_codec_vol_cntrl[vol_type].step_in_mb / 2))
+		      / adie_codec_vol_cntrl[vol_type].step_in_mb);
+
+
+	for (chan_index = 0; chan_index < num_channels; chan_index++) {
+		adie_codec_read_dig_vol(vol_type, chan_index, &cur_step_index);
+
+		pr_debug("%s: cur_step_index = %u  current vol = 0x%x\n",
+				__func__, cur_step_index,
+			adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+			[chan_index].vol_cntrl_data[cur_step_index]);
+
+		pr_debug("%s: step index = %u  new volume = 0x%x\n",
+		 __func__, step_index,
+		 adie_codec_vol_cntrl[vol_type].ch_vol_cntrl_info
+		 [chan_index].vol_cntrl_data[step_index]);
+
+		adie_codec_set_dig_vol(vol_type, chan_index, cur_step_index,
+				       step_index);
+
+	}
+	return 0;
+}
+
+static int marimba_adie_codec_setpath(struct adie_codec_path *path_ptr,
+					u32 freq_plan, u32 osr)
+{
+	int rc = 0;
+	u32 i, freq_idx = 0, freq = 0;
+
+	if ((path_ptr->curr_stage != ADIE_CODEC_DIGITAL_OFF) &&
+		(path_ptr->curr_stage != ADIE_CODEC_FLASH_IMAGE)) {
+		rc = -EBUSY;
+		goto error;
+	}
+
+	for (i = 0; i < path_ptr->profile->setting_sz; i++) {
+		if (path_ptr->profile->settings[i].osr == osr) {
+			if (path_ptr->profile->settings[i].freq_plan >=
+				freq_plan) {
+				if (freq == 0) {
+					freq = path_ptr->profile->settings[i].
+								freq_plan;
+					freq_idx = i;
+				} else if (path_ptr->profile->settings[i].
+					freq_plan < freq) {
+					freq = path_ptr->profile->settings[i].
+								freq_plan;
+					freq_idx = i;
+				}
+			}
+		}
+	}
+
+	if (freq_idx >= path_ptr->profile->setting_sz)
+		rc = -ENODEV;
+	else {
+		path_ptr->hwsetting_idx = freq_idx;
+		path_ptr->stage_idx = 0;
+	}
+
+error:
+	return rc;
+}
+
+static u32 marimba_adie_codec_freq_supported(
+				struct adie_codec_dev_profile *profile,
+				u32 requested_freq)
+{
+	u32 i, rc = -EINVAL;
+
+	for (i = 0; i < profile->setting_sz; i++) {
+		if (profile->settings[i].freq_plan >= requested_freq) {
+			rc = 0;
+			break;
+		}
+	}
+	return rc;
+}
+
+static int marimba_adie_codec_enable_sidetone(
+				struct adie_codec_path *rx_path_ptr,
+				u32 enable)
+{
+	int rc = 0;
+
+	pr_debug("%s()\n", __func__);
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!rx_path_ptr || &adie_codec.path[ADIE_CODEC_RX] != rx_path_ptr) {
+		pr_err("%s: invalid path pointer\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	} else if (rx_path_ptr->curr_stage !=
+		ADIE_CODEC_DIGITAL_ANALOG_READY) {
+		pr_err("%s: bad state\n", __func__);
+		rc = -EPERM;
+		goto error;
+	}
+
+	if (enable)
+		rc = adie_codec_write(MARIMBA_CDC_RX_CTL,
+		MARIMBA_CDC_RX_CTL_ST_EN_MASK,
+		(0x1 << MARIMBA_CDC_RX_CTL_ST_EN_SHFT));
+	else
+		rc = adie_codec_write(MARIMBA_CDC_RX_CTL,
+		MARIMBA_CDC_RX_CTL_ST_EN_MASK, 0);
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static void adie_codec_reach_stage_action(struct adie_codec_path *path_ptr,
+						u32 stage)
+{
+	u32 iter;
+	struct adie_codec_register *reg_info;
+
+	if (stage == ADIE_CODEC_FLASH_IMAGE) {
+		/* perform reimage */
+		for (iter = 0; iter < path_ptr->img.img_sz; iter++) {
+			reg_info = &path_ptr->img.regs[iter];
+			adie_codec_write(reg_info->reg,
+			reg_info->mask, reg_info->val);
+		}
+	}
+}
+
+static int marimba_adie_codec_proceed_stage(struct adie_codec_path *path_ptr,
+						u32 state)
+{
+	int rc = 0, loop_exit = 0;
+	struct adie_codec_action_unit *curr_action;
+	struct adie_codec_hwsetting_entry *setting;
+	u8 reg, mask, val;
+
+	mutex_lock(&adie_codec.lock);
+	setting = &path_ptr->profile->settings[path_ptr->hwsetting_idx];
+	while (!loop_exit) {
+		curr_action = &setting->actions[path_ptr->stage_idx];
+		switch (curr_action->type) {
+		case ADIE_CODEC_ACTION_ENTRY:
+			ADIE_CODEC_UNPACK_ENTRY(curr_action->action,
+			reg, mask, val);
+			adie_codec_write(reg, mask, val);
+			break;
+		case ADIE_CODEC_ACTION_DELAY_WAIT:
+			if (curr_action->action > MAX_MDELAY_US)
+				msleep(curr_action->action/1000);
+			else if (curr_action->action < MIN_MDELAY_US)
+				udelay(curr_action->action);
+			else
+				mdelay(curr_action->action/1000);
+			break;
+		case ADIE_CODEC_ACTION_STAGE_REACHED:
+			adie_codec_reach_stage_action(path_ptr,
+				curr_action->action);
+			if (curr_action->action == state) {
+				path_ptr->curr_stage = state;
+				loop_exit = 1;
+			}
+			break;
+		default:
+			BUG();
+		}
+
+		path_ptr->stage_idx++;
+		if (path_ptr->stage_idx == setting->action_sz)
+			path_ptr->stage_idx = 0;
+	}
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static void marimba_codec_bring_up(void)
+{
+	/* bring up sequence for Marimba codec core
+	 * ensure RESET_N = 0 and GDFS_CLAMP_EN=1 -
+	 * set GDFS_EN_FEW=1 then GDFS_EN_REST=1 then
+	 * GDFS_CLAMP_EN = 0 and finally RESET_N = 1
+	 * Marimba codec bring up should use the Marimba
+	 * slave address after which the codec slave
+	 * address can be used
+	 */
+
+	/* Bring up codec */
+	adie_codec_write(0xFF, 0xFF, 0x08);
+
+	/* set GDFS_EN_FEW=1 */
+	adie_codec_write(0xFF, 0xFF, 0x0a);
+
+	/* set GDFS_EN_REST=1 */
+	adie_codec_write(0xFF, 0xFF, 0x0e);
+
+	/* set RESET_N=1 */
+	adie_codec_write(0xFF, 0xFF, 0x07);
+
+	adie_codec_write(0xFF, 0xFF, 0x17);
+
+	/* enable band gap */
+	adie_codec_write(0x03, 0xFF, 0x04);
+
+	/* dither delay selected and dmic gain stage bypassed */
+	adie_codec_write(0x8F, 0xFF, 0x44);
+}
+
+static void marimba_codec_bring_down(void)
+{
+	adie_codec_write(0xFF, 0xFF, 0x07);
+	adie_codec_write(0xFF, 0xFF, 0x06);
+	adie_codec_write(0xFF, 0xFF, 0x0e);
+	adie_codec_write(0xFF, 0xFF, 0x08);
+	adie_codec_write(0x03, 0xFF, 0x00);
+}
+
+static int marimba_adie_codec_open(struct adie_codec_dev_profile *profile,
+	struct adie_codec_path **path_pptr)
+{
+	int rc = 0;
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!profile || !path_pptr) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (adie_codec.path[profile->path_type].profile) {
+		rc = -EBUSY;
+		goto error;
+	}
+
+	if (!adie_codec.ref_cnt) {
+
+		if (adie_codec.codec_pdata &&
+				adie_codec.codec_pdata->marimba_codec_power) {
+
+			rc = adie_codec.codec_pdata->marimba_codec_power(1);
+			if (rc) {
+				pr_err("%s: could not power up marimba "
+						"codec\n", __func__);
+				goto error;
+			}
+		}
+		marimba_codec_bring_up();
+	}
+
+	adie_codec.path[profile->path_type].profile = profile;
+	*path_pptr = (void *) &adie_codec.path[profile->path_type];
+	adie_codec.ref_cnt++;
+	adie_codec.path[profile->path_type].hwsetting_idx = 0;
+	adie_codec.path[profile->path_type].curr_stage = ADIE_CODEC_FLASH_IMAGE;
+	adie_codec.path[profile->path_type].stage_idx = 0;
+
+
+error:
+
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static int marimba_adie_codec_close(struct adie_codec_path *path_ptr)
+{
+	int rc = 0;
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!path_ptr) {
+		rc = -EINVAL;
+		goto error;
+	}
+	if (path_ptr->curr_stage != ADIE_CODEC_DIGITAL_OFF)
+		adie_codec_proceed_stage(path_ptr, ADIE_CODEC_DIGITAL_OFF);
+
+	BUG_ON(!adie_codec.ref_cnt);
+
+	path_ptr->profile = NULL;
+	adie_codec.ref_cnt--;
+
+	if (!adie_codec.ref_cnt) {
+
+		marimba_codec_bring_down();
+
+		if (adie_codec.codec_pdata &&
+				adie_codec.codec_pdata->marimba_codec_power) {
+
+			rc = adie_codec.codec_pdata->marimba_codec_power(0);
+			if (rc) {
+				pr_err("%s: could not power down marimba "
+						"codec\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static const struct adie_codec_operations marimba_adie_ops = {
+	.codec_id = MARIMBA_ID,
+	.codec_open = marimba_adie_codec_open,
+	.codec_close = marimba_adie_codec_close,
+	.codec_setpath = marimba_adie_codec_setpath,
+	.codec_proceed_stage = marimba_adie_codec_proceed_stage,
+	.codec_freq_supported = marimba_adie_codec_freq_supported,
+	.codec_enable_sidetone = marimba_adie_codec_enable_sidetone,
+	.codec_set_device_digital_volume =
+			marimba_adie_codec_set_device_digital_volume,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_marimba_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+static struct dentry *debugfs_power;
+
+static unsigned char read_data;
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (strict_strtoul(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+			}
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t codec_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[8];
+
+	snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data);
+	return simple_read_from_buffer(ubuf, count, ppos, lbuf, strlen(lbuf));
+}
+
+static ssize_t codec_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *access_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	long int param[5];
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+
+	if (!strcmp(access_str, "power")) {
+		if (get_parameters(lbuf, param, 1) == 0) {
+			switch (param[0]) {
+			case 1:
+				adie_codec.codec_pdata->marimba_codec_power(1);
+				marimba_codec_bring_up();
+				break;
+			case 0:
+				marimba_codec_bring_down();
+				adie_codec.codec_pdata->marimba_codec_power(0);
+				break;
+			default:
+				rc = -EINVAL;
+				break;
+			}
+		} else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "poke")) {
+		/* write */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= 0xFF) && (param[1] <= 0xFF) &&
+			(rc == 0))
+			adie_codec_write(param[0], 0xFF, param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "peek")) {
+		/* read */
+		rc = get_parameters(lbuf, param, 1);
+		if ((param[0] <= 0xFF) && (rc == 0))
+			adie_codec_read(param[0], &read_data);
+		else
+			rc = -EINVAL;
+	}
+
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations codec_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+	.read = codec_debug_read
+};
+#endif
+
+static int marimba_codec_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	adie_codec.pdrv_ptr = platform_get_drvdata(pdev);
+	adie_codec.codec_pdata = pdev->dev.platform_data;
+
+	if (adie_codec.codec_pdata->snddev_profile_init)
+		adie_codec.codec_pdata->snddev_profile_init();
+
+	/* Register the marimba ADIE operations */
+	rc = adie_codec_register_codec_operations(&marimba_adie_ops);
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_marimba_dent = debugfs_create_dir("msm_adie_codec", 0);
+	if (!IS_ERR(debugfs_marimba_dent)) {
+		debugfs_peek = debugfs_create_file("peek",
+		S_IFREG | S_IRUGO, debugfs_marimba_dent,
+		(void *) "peek", &codec_debug_ops);
+
+		debugfs_poke = debugfs_create_file("poke",
+		S_IFREG | S_IRUGO, debugfs_marimba_dent,
+		(void *) "poke", &codec_debug_ops);
+
+		debugfs_power = debugfs_create_file("power",
+		S_IFREG | S_IRUGO, debugfs_marimba_dent,
+		(void *) "power", &codec_debug_ops);
+	}
+#endif
+	return rc;
+}
+
+static struct platform_driver marimba_codec_driver = {
+	.probe = marimba_codec_probe,
+	.driver = {
+		.name = "marimba_codec",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init marimba_codec_init(void)
+{
+	s32 rc;
+
+	rc = platform_driver_register(&marimba_codec_driver);
+	if (IS_ERR_VALUE(rc))
+		goto error;
+
+	adie_codec.path[ADIE_CODEC_TX].img.regs = adie_codec_tx_regs;
+	adie_codec.path[ADIE_CODEC_TX].img.img_sz =
+	ARRAY_SIZE(adie_codec_tx_regs);
+	adie_codec.path[ADIE_CODEC_RX].img.regs = adie_codec_rx_regs;
+	adie_codec.path[ADIE_CODEC_RX].img.img_sz =
+	ARRAY_SIZE(adie_codec_rx_regs);
+	adie_codec.path[ADIE_CODEC_LB].img.regs = adie_codec_lb_regs;
+	adie_codec.path[ADIE_CODEC_LB].img.img_sz =
+	ARRAY_SIZE(adie_codec_lb_regs);
+	mutex_init(&adie_codec.lock);
+
+error:
+	return rc;
+}
+
+static void __exit marimba_codec_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(debugfs_peek);
+	debugfs_remove(debugfs_poke);
+	debugfs_remove(debugfs_power);
+	debugfs_remove(debugfs_marimba_dent);
+#endif
+	platform_driver_unregister(&marimba_codec_driver);
+}
+
+module_init(marimba_codec_init);
+module_exit(marimba_codec_exit);
+
+MODULE_DESCRIPTION("Marimba codec driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/marimba-core.c b/drivers/mfd/marimba-core.c
new file mode 100644
index 0000000..fec9ef4
--- /dev/null
+++ b/drivers/mfd/marimba-core.c
@@ -0,0 +1,699 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm Marimba Core Driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/i2c.h>
+#include <linux/mfd/marimba.h>
+
+#define MARIMBA_MODE				0x00
+
+#define ADIE_ARRY_SIZE  (CHIP_ID_MAX * MARIMBA_NUM_CHILD)
+
+static int marimba_shadow[ADIE_ARRY_SIZE][0xff];
+
+struct marimba marimba_modules[ADIE_ARRY_SIZE];
+
+#define MARIMBA_VERSION_REG		0x11
+#define MARIMBA_MODE_REG		0x00
+
+struct marimba_platform_data *marimba_pdata;
+
+static uint32_t marimba_gpio_count;
+static bool fm_status;
+static bool bt_status;
+
+#ifdef CONFIG_I2C_SSBI
+#define NUM_ADD	MARIMBA_NUM_CHILD
+#else
+#define NUM_ADD	(MARIMBA_NUM_CHILD - 1)
+#endif
+
+
+/**
+ * marimba_read_bahama_ver - Reads Bahama version.
+ * @param marimba: marimba structure pointer passed by client
+ * @returns result of the operation.
+ */
+int marimba_read_bahama_ver(struct marimba *marimba)
+{
+	int rc;
+	u8 bahama_version;
+
+	rc = marimba_read_bit_mask(marimba, 0x00,  &bahama_version, 1, 0x1F);
+	if (rc < 0)
+		return rc;
+	switch (bahama_version) {
+	case 0x08: /* varient of bahama v1 */
+	case 0x10:
+	case 0x00:
+		return BAHAMA_VER_1_0;
+	case 0x09: /* variant of bahama v2 */
+		return BAHAMA_VER_2_0;
+	default:
+		return BAHAMA_VER_UNSUPPORTED;
+	}
+}
+EXPORT_SYMBOL(marimba_read_bahama_ver);
+/**
+ * marimba_ssbi_write - Writes a n bit TSADC register in Marimba
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: buffer to be written
+ * @param len: num of bytes
+ * @returns result of the operation.
+ */
+int marimba_ssbi_write(struct marimba *marimba, u16 reg , u8 *value, int len)
+{
+	struct i2c_msg *msg;
+	int ret;
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	msg = &marimba->xfer_msg[0];
+	msg->addr = reg;
+	msg->flags = 0x0;
+	msg->buf = value;
+	msg->len = len;
+
+	ret = i2c_transfer(marimba->client->adapter, marimba->xfer_msg, 1);
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_ssbi_write);
+
+/**
+ * marimba_ssbi_read - Reads a n bit TSADC register in Marimba
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: ssbi read of the register to be stored
+ * @param len: num of bytes
+ *
+ * @returns result of the operation.
+*/
+int marimba_ssbi_read(struct marimba *marimba, u16 reg, u8 *value, int len)
+{
+	struct i2c_msg *msg;
+	int ret;
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	msg = &marimba->xfer_msg[0];
+	msg->addr = reg;
+	msg->flags = I2C_M_RD;
+	msg->buf = value;
+	msg->len = len;
+
+	ret = i2c_transfer(marimba->client->adapter, marimba->xfer_msg, 1);
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_ssbi_read);
+
+/**
+ * marimba_write_bit_mask - Sets n bit register using bit mask
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: buffer to be written to the registers
+ * @param num_bytes: n bytes to write
+ * @param mask: bit mask corresponding to the registers
+ *
+ * @returns result of the operation.
+ */
+int marimba_write_bit_mask(struct marimba *marimba, u8 reg, u8 *value,
+						unsigned num_bytes, u8 mask)
+{
+	int ret, i;
+	struct i2c_msg *msg;
+	u8 data[num_bytes + 1];
+	u8 mask_value[num_bytes];
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	for (i = 0; i < num_bytes; i++)
+		mask_value[i] = (marimba_shadow[marimba->mod_id][reg + i]
+					& ~mask) | (value[i] & mask);
+
+	msg = &marimba->xfer_msg[0];
+	msg->addr = marimba->client->addr;
+	msg->flags = 0;
+	msg->len = num_bytes + 1;
+	msg->buf = data;
+	data[0] = reg;
+	memcpy(data+1, mask_value, num_bytes);
+
+	ret = i2c_transfer(marimba->client->adapter, marimba->xfer_msg, 1);
+
+	/* Try again if the write fails */
+	if (ret != 1)
+		ret = i2c_transfer(marimba->client->adapter,
+						marimba->xfer_msg, 1);
+
+	if (ret == 1) {
+		for (i = 0; i < num_bytes; i++)
+			marimba_shadow[marimba->mod_id][reg + i]
+							= mask_value[i];
+	} else
+		dev_err(&marimba->client->dev, "i2c write failed\n");
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_write_bit_mask);
+
+/**
+ * marimba_write - Sets n bit register in Marimba
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: buffer values to be written
+ * @param num_bytes: n bytes to write
+ *
+ * @returns result of the operation.
+ */
+int marimba_write(struct marimba *marimba, u8 reg, u8 *value,
+							unsigned num_bytes)
+{
+	return marimba_write_bit_mask(marimba, reg, value, num_bytes, 0xff);
+}
+EXPORT_SYMBOL(marimba_write);
+
+/**
+ * marimba_read_bit_mask - Reads a n bit register based on bit mask
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: i2c read of the register to be stored
+ * @param num_bytes: n bytes to be read.
+ * @param mask: bit mask concerning its register
+ *
+ * @returns result of the operation.
+*/
+int marimba_read_bit_mask(struct marimba *marimba, u8 reg, u8 *value,
+						unsigned num_bytes, u8 mask)
+{
+	int ret, i;
+
+	struct i2c_msg *msg;
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	msg = &marimba->xfer_msg[0];
+	msg->addr = marimba->client->addr;
+	msg->len = 1;
+	msg->flags = 0;
+	msg->buf = &reg;
+
+	msg = &marimba->xfer_msg[1];
+	msg->addr = marimba->client->addr;
+	msg->len = num_bytes;
+	msg->flags = I2C_M_RD;
+	msg->buf = value;
+
+	ret = i2c_transfer(marimba->client->adapter, marimba->xfer_msg, 2);
+
+	/* Try again if read fails first time */
+	if (ret != 2)
+		ret = i2c_transfer(marimba->client->adapter,
+						marimba->xfer_msg, 2);
+
+	if (ret == 2) {
+		for (i = 0; i < num_bytes; i++) {
+			marimba_shadow[marimba->mod_id][reg + i] = value[i];
+			value[i] &= mask;
+		}
+	} else
+		dev_err(&marimba->client->dev, "i2c read failed\n");
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_read_bit_mask);
+
+/**
+ * marimba_read - Reads n bit registers in Marimba
+ * @param marimba: marimba structure pointer passed by client
+ * @param reg: register address
+ * @param value: i2c read of the register to be stored
+ * @param num_bytes: n bytes to read.
+ * @param mask: bit mask concerning its register
+ *
+ * @returns result of the operation.
+*/
+int marimba_read(struct marimba *marimba, u8 reg, u8 *value, unsigned num_bytes)
+{
+	return marimba_read_bit_mask(marimba, reg, value, num_bytes, 0xff);
+}
+EXPORT_SYMBOL(marimba_read);
+
+int timpani_read(struct marimba *marimba, u8 reg, u8 *value, unsigned num_bytes)
+{
+	return marimba_read_bit_mask(marimba, reg, value, num_bytes, 0xff);
+}
+EXPORT_SYMBOL(timpani_read);
+
+int timpani_write(struct marimba *marimba, u8 reg,
+					u8 *value, unsigned num_bytes)
+{
+	return marimba_write_bit_mask(marimba, reg, value, num_bytes, 0xff);
+}
+EXPORT_SYMBOL(timpani_write);
+
+static int cur_codec_type = -1, cur_adie_type = -1, cur_connv_type = -1;
+static int adie_arry_idx;
+
+int adie_get_detected_codec_type(void)
+{
+	return cur_codec_type;
+}
+EXPORT_SYMBOL(adie_get_detected_codec_type);
+
+int adie_get_detected_connectivity_type(void)
+{
+	return cur_connv_type;
+}
+EXPORT_SYMBOL(adie_get_detected_connectivity_type);
+
+static struct device *
+add_numbered_child(unsigned chip, const char *name, int num, u8 driver_data,
+					void *pdata, unsigned pdata_len)
+{
+	struct platform_device *pdev;
+	struct marimba  *marimba = &marimba_modules[chip + adie_arry_idx];
+	int status = 0;
+
+	pdev = platform_device_alloc(name, num);
+	if (!pdev) {
+		status = -ENOMEM;
+		return ERR_PTR(status);
+	}
+
+	pdev->dev.parent = &marimba->client->dev;
+
+	marimba->mod_id = chip + adie_arry_idx;
+
+	platform_set_drvdata(pdev, marimba);
+
+	if (pdata) {
+		status = platform_device_add_data(pdev, pdata, pdata_len);
+		if (status < 0)
+			goto err;
+	}
+
+	status = platform_device_add(pdev);
+	if (status < 0)
+		goto err;
+
+err:
+	if (status < 0) {
+		platform_set_drvdata(pdev, NULL);
+		platform_device_put(pdev);
+		dev_err(&marimba->client->dev, "can't add %s dev\n", name);
+		return ERR_PTR(status);
+	}
+	return &pdev->dev;
+}
+
+static inline struct device *add_child(unsigned chip, const char *name,
+		u8 driver_data, void *pdata, unsigned pdata_len)
+{
+	return add_numbered_child(chip, name, -1, driver_data, pdata,
+								pdata_len);
+}
+
+static int marimba_add_child(struct marimba_platform_data *pdata,
+					u8 driver_data)
+{
+	struct device	*child;
+
+	if (cur_adie_type == MARIMBA_ID) {
+		child = add_child(MARIMBA_SLAVE_ID_FM, "marimba_fm",
+			driver_data, pdata->fm, sizeof(*pdata->fm));
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+	} else if ((cur_adie_type == BAHAMA_ID) &&
+			(cur_connv_type == BAHAMA_ID)) {
+		child = add_child(BAHAMA_SLAVE_ID_FM_ID, "marimba_fm",
+			driver_data, pdata->fm, sizeof(*pdata->fm));
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+	}
+
+	/* Add Codec for Marimba and Timpani */
+	if (cur_adie_type == MARIMBA_ID) {
+		child = add_child(MARIMBA_SLAVE_ID_CDC, "marimba_codec",
+			driver_data, pdata->codec, sizeof(*pdata->codec));
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+	} else if (cur_adie_type == TIMPANI_ID) {
+		child = add_child(MARIMBA_SLAVE_ID_CDC, "timpani_codec",
+			driver_data, pdata->codec, sizeof(*pdata->codec));
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+	}
+
+#if defined(CONFIG_I2C_SSBI)
+	if ((pdata->tsadc != NULL) && (cur_adie_type != BAHAMA_ID)) {
+		child = add_child(MARIMBA_ID_TSADC, "marimba_tsadc",
+			driver_data, pdata->tsadc, sizeof(*pdata->tsadc));
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+	}
+#endif
+	return 0;
+}
+
+int marimba_gpio_config(int gpio_value)
+{
+	struct marimba *marimba = &marimba_modules[MARIMBA_SLAVE_ID_MARIMBA];
+	struct marimba_platform_data *pdata = marimba_pdata;
+	int rc = 0;
+
+	/* Clients BT/FM need to manage GPIO 34 on Fusion for its clocks */
+
+	mutex_lock(&marimba->xfer_lock);
+
+	if (gpio_value) {
+		marimba_gpio_count++;
+		if (marimba_gpio_count == 1)
+			rc = pdata->marimba_gpio_config(1);
+	} else {
+		marimba_gpio_count--;
+		if (marimba_gpio_count == 0)
+			rc = pdata->marimba_gpio_config(0);
+	}
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(marimba_gpio_config);
+
+bool marimba_get_fm_status(struct marimba *marimba)
+{
+	bool ret;
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	ret = fm_status;
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_get_fm_status);
+
+void marimba_set_fm_status(struct marimba *marimba, bool value)
+{
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	fm_status = value;
+
+	mutex_unlock(&marimba->xfer_lock);
+}
+EXPORT_SYMBOL(marimba_set_fm_status);
+
+bool marimba_get_bt_status(struct marimba *marimba)
+{
+	bool ret;
+
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	ret = bt_status;
+
+	mutex_unlock(&marimba->xfer_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(marimba_get_bt_status);
+
+void marimba_set_bt_status(struct marimba *marimba, bool value)
+{
+	marimba = &marimba_modules[marimba->mod_id];
+
+	mutex_lock(&marimba->xfer_lock);
+
+	bt_status = value;
+
+	mutex_unlock(&marimba->xfer_lock);
+}
+EXPORT_SYMBOL(marimba_set_bt_status);
+
+static int get_adie_type(void)
+{
+	u8 rd_val;
+	int ret;
+
+	struct marimba *marimba = &marimba_modules[ADIE_ARRY_SIZE - 1];
+
+	marimba->mod_id = ADIE_ARRY_SIZE - 1;
+	/* Enable the Mode for Marimba/Timpani */
+	ret = marimba_read(marimba, MARIMBA_MODE_REG, &rd_val, 1);
+
+	if (ret >= 0) {
+		if (rd_val & 0x80) {
+			cur_adie_type = BAHAMA_ID;
+			return cur_adie_type;
+		} else {
+			ret = marimba_read(marimba,
+				MARIMBA_VERSION_REG, &rd_val, 1);
+			if ((ret >= 0) && (rd_val & 0x20)) {
+				cur_adie_type = TIMPANI_ID;
+				return cur_adie_type;
+			} else if (ret >= 0) {
+				cur_adie_type = MARIMBA_ID;
+				return cur_adie_type;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void marimba_init_reg(struct i2c_client *client, u8 driver_data)
+{
+	struct marimba_platform_data *pdata = client->dev.platform_data;
+	struct marimba *marimba =
+		&marimba_modules[MARIMBA_SLAVE_ID_MARIMBA + adie_arry_idx];
+
+	u8 buf[1];
+
+	buf[0] = 0x10;
+
+	if (cur_adie_type != BAHAMA_ID) {
+		marimba->mod_id = MARIMBA_SLAVE_ID_MARIMBA + adie_arry_idx;
+		/* Enable the Mode for Marimba/Timpani */
+		marimba_write(marimba, MARIMBA_MODE, buf, 1);
+	} else if ((cur_adie_type == BAHAMA_ID) &&
+				(cur_connv_type == BAHAMA_ID)) {
+		marimba->mod_id = MARIMBA_SLAVE_ID_MARIMBA + adie_arry_idx;
+		marimba_write(marimba, BAHAMA_SLAVE_ID_FM_ID,
+				&pdata->slave_id[SLAVE_ID_BAHAMA_FM], 1);
+		/* Configure Bahama core registers (AREG & DREG) */
+		/* with optimal values to eliminate power leakage */
+		if (pdata->bahama_core_config != NULL)
+			pdata->bahama_core_config(cur_adie_type);
+	}
+}
+
+static int marimba_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct marimba_platform_data *pdata = client->dev.platform_data;
+	struct i2c_adapter *ssbi_adap;
+	struct marimba *marimba;
+	int i, status, rc, client_loop, adie_slave_idx_offset;
+	int rc_bahama = 0, rc_marimba = 0;
+
+	if (!pdata) {
+		dev_dbg(&client->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
+		dev_dbg(&client->dev, "can't talk I2C?\n");
+		return -EIO;
+	}
+
+	/* First, identify the codec type */
+	if (pdata->marimba_setup != NULL) {
+		rc_marimba = pdata->marimba_setup();
+		if (rc_marimba)
+			pdata->marimba_shutdown();
+	}
+
+	if (pdata->bahama_setup != NULL &&
+		cur_connv_type != BAHAMA_ID) {
+		rc_bahama = pdata->bahama_setup();
+		if (rc_bahama)
+			pdata->bahama_shutdown(cur_connv_type);
+	}
+
+	if (rc_marimba & rc_bahama)
+		return -EAGAIN;
+
+	marimba = &marimba_modules[ADIE_ARRY_SIZE - 1];
+	marimba->client = client;
+	mutex_init(&marimba->xfer_lock);
+
+	rc = get_adie_type();
+
+	mutex_destroy(&marimba->xfer_lock);
+
+	if (rc < 0) {
+		if (pdata->bahama_setup != NULL)
+			pdata->bahama_shutdown(cur_adie_type);
+		if (pdata->marimba_shutdown != NULL)
+			pdata->marimba_shutdown();
+		return -ENODEV;
+	}
+
+	if (rc < 2) {
+		adie_arry_idx = 0;
+		adie_slave_idx_offset = 0;
+		client_loop = 0;
+		cur_codec_type = rc;
+		if (cur_connv_type < 0)
+			cur_connv_type = rc;
+		if (pdata->bahama_shutdown != NULL)
+			pdata->bahama_shutdown(cur_connv_type);
+	} else {
+		adie_arry_idx = 5;
+		adie_slave_idx_offset = 5;
+		client_loop = 1;
+		cur_connv_type = rc;
+	}
+
+	marimba = &marimba_modules[adie_arry_idx];
+	marimba->client = client;
+	mutex_init(&marimba->xfer_lock);
+
+	for (i = 1; i <= (NUM_ADD - client_loop); i++) {
+		/* Skip adding BT/FM for Timpani */
+		if (i == 1 && rc >= 1)
+			i++;
+		marimba = &marimba_modules[i + adie_arry_idx];
+		if (i != MARIMBA_ID_TSADC)
+			marimba->client = i2c_new_dummy(client->adapter,
+				pdata->slave_id[i + adie_slave_idx_offset]);
+		else if (pdata->tsadc_ssbi_adap) {
+			ssbi_adap = i2c_get_adapter(pdata->tsadc_ssbi_adap);
+			marimba->client = i2c_new_dummy(ssbi_adap,
+						0x55);
+		} else
+			ssbi_adap = NULL;
+
+		if (!marimba->client) {
+			dev_err(&marimba->client->dev,
+				"can't attach client %d\n", i);
+			status = -ENOMEM;
+			goto fail;
+		}
+		strlcpy(marimba->client->name, id->name,
+			sizeof(marimba->client->name));
+
+		mutex_init(&marimba->xfer_lock);
+	}
+
+	marimba_init_reg(client, id->driver_data);
+
+	status = marimba_add_child(pdata, id->driver_data);
+
+	marimba_pdata = pdata;
+
+	return 0;
+
+fail:
+	return status;
+}
+
+static int __devexit marimba_remove(struct i2c_client *client)
+{
+	int i;
+	struct marimba_platform_data *pdata;
+
+	pdata = client->dev.platform_data;
+	for (i = 0; i <= ADIE_ARRY_SIZE; i++) {
+		struct marimba *marimba = &marimba_modules[i];
+
+		if (marimba->client && marimba->client != client)
+			i2c_unregister_device(marimba->client);
+
+		marimba_modules[i].client = NULL;
+	}
+
+	if (pdata->marimba_shutdown != NULL)
+		pdata->marimba_shutdown();
+
+	return 0;
+}
+
+static struct i2c_device_id marimba_id_table[] = {
+	{"marimba", MARIMBA_ID},
+	{"timpani", TIMPANI_ID},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, marimba_id_table);
+
+static struct i2c_driver marimba_driver = {
+		.driver			= {
+			.owner		=	THIS_MODULE,
+			.name		=	"marimba-core",
+		},
+		.id_table		=	marimba_id_table,
+		.probe			=	marimba_probe,
+		.remove			=	__devexit_p(marimba_remove),
+};
+
+static int __init marimba_init(void)
+{
+	return i2c_add_driver(&marimba_driver);
+}
+module_init(marimba_init);
+
+static void __exit marimba_exit(void)
+{
+	i2c_del_driver(&marimba_driver);
+}
+module_exit(marimba_exit);
+
+MODULE_DESCRIPTION("Marimba Top level Driver");
+MODULE_ALIAS("platform:marimba-core");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
diff --git a/drivers/mfd/marimba-tsadc.c b/drivers/mfd/marimba-tsadc.c
new file mode 100644
index 0000000..8a7b781
--- /dev/null
+++ b/drivers/mfd/marimba-tsadc.c
@@ -0,0 +1,696 @@
+/*
+ * Marimba TSADC driver.
+ *
+ * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/mfd/marimba.h>
+#include <linux/mfd/marimba-tsadc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+
+/* marimba configuration block: TS_CTL0 */
+#define TS_CTL0			0xFF
+#define TS_CTL0_RESET		BIT(0)
+#define TS_CTL0_CLK_EN		BIT(1)
+#define TS_CTL0_XO_EN		BIT(2)
+#define TS_CTL0_EOC_EN		BIT(3)
+#define TS_CTL0_PENIRQ_EN	BIT(4)
+
+/* TSADC registers */
+#define SSBI_PRESET		0x00
+#define TSHK_DIG_CONFIG		0x4F
+#define TSHK_INTF_CONFIG	0x50
+#define TSHK_SETUP		0x51
+	#define TSHK_SETUP_EN_ADC  BIT(0)
+	#define TSHK_SETUP_EN_PIRQ BIT(7)
+#define TSHK_PARAM		0x52
+#define TSHK_DATA_RD		0x53
+#define TSHK_STATUS		0x54
+#define TSHK_SETUP2		0x55
+#define TSHK_RSV1		0x56
+	#define TSHK_RSV1_PRECHARGE_EN	BIT(0)
+#define TSHK_COMMAND		0x57
+#define TSHK_PARAM2		0x58
+	#define TSHK_INPUT_CLK_MASK	0x3F
+	#define TSHK_SAMPLE_PRD_MASK	0xC7
+	#define TSHK_INPUT_CLK_SHIFT	0x6
+	#define TSHK_SAMPLE_PRD_SHIFT	0x3
+#define TSHK_PARAM3		0x59
+	#define TSHK_PARAM3_MODE_MASK	0xFC
+	#define TSHK_PARAM3_PRE_CHG_SHIFT (5)
+	#define TSHK_PARAM3_STABIZ_SHIFT (2)
+	#define TSHK_STABLE_TIME_MASK	0xE3
+	#define TSHK_PRECHG_TIME_MASK	0x1F
+#define TSHK_PARAM4		0x5A
+#define TSHK_RSV2		0x5B
+#define TSHK_RSV3		0x5C
+#define TSHK_RSV4		0x5D
+#define TSHK_RSV5		0x5E
+
+struct marimba_tsadc_client {
+	unsigned int is_ts;
+	struct platform_device *pdev;
+};
+
+struct marimba_tsadc {
+	struct marimba *marimba;
+	struct device *dev;
+	struct marimba_tsadc_platform_data *pdata;
+	struct clk	*codec_ssbi;
+	struct device *child_tssc;
+	bool clk_enabled;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+	struct early_suspend		early_suspend;
+#endif
+};
+
+static struct marimba_tsadc *tsadc_dev;
+
+static int marimba_write_u8(struct marimba_tsadc *tsadc, u8 reg, u8 data)
+{
+	int rc;
+
+	tsadc->marimba->mod_id = MARIMBA_SLAVE_ID_MARIMBA;
+	rc = marimba_write(tsadc->marimba, reg, &data, 1);
+
+	if (!rc)
+		dev_warn(tsadc->dev, "Error writing marimba reg %X - ret %X\n",
+				reg, data);
+	return 0;
+}
+
+static int marimba_tsadc_write(struct marimba_tsadc *tsadc, u8 reg, u8 data)
+{
+	int rc;
+
+	tsadc->marimba->mod_id = MARIMBA_ID_TSADC;
+
+	rc = marimba_ssbi_write(tsadc->marimba, reg, &data, 1);
+	if (!rc)
+		dev_warn(tsadc->dev, "Error writing marimba reg %X - ret %X\n",
+				reg, data);
+	return rc;
+}
+
+static int marimba_tsadc_shutdown(struct marimba_tsadc *tsadc)
+{
+	u8 val;
+	int rc;
+
+	/* force reset */
+	val = TS_CTL0_XO_EN | TS_CTL0_EOC_EN | TS_CTL0_PENIRQ_EN |
+				TS_CTL0_CLK_EN;
+	rc = marimba_write_u8(tsadc, TS_CTL0, val);
+	if (rc < 0)
+		return rc;
+
+	/* disable xo, clock */
+	val = TS_CTL0_PENIRQ_EN | TS_CTL0_EOC_EN;
+	rc = marimba_write_u8(tsadc, TS_CTL0, val);
+	if (rc < 0)
+		return rc;
+
+	/* de-vote S2 1.3v */
+	if (tsadc->pdata->level_vote)
+		/* REVISIT: Ignore error for level_vote(0) for now*/
+		tsadc->pdata->level_vote(0);
+
+	return 0;
+}
+
+static int marimba_tsadc_startup(struct marimba_tsadc *tsadc)
+{
+	u8 val;
+	int rc = 0;
+
+	/* vote for S2 1.3v */
+	if (tsadc->pdata->level_vote) {
+		rc = tsadc->pdata->level_vote(1);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* disable XO, clock and output enables */
+	rc = marimba_write_u8(tsadc, TS_CTL0, 0x00);
+	if (rc < 0)
+		goto fail_marimba_write;
+
+	/* Enable output enables */
+	val = TS_CTL0_XO_EN | TS_CTL0_EOC_EN | TS_CTL0_PENIRQ_EN;
+	rc = marimba_write_u8(tsadc, TS_CTL0, val);
+	if (rc < 0)
+		goto fail_marimba_write;
+
+	/* Enable clock */
+	val = val | TS_CTL0_CLK_EN;
+	rc = marimba_write_u8(tsadc, TS_CTL0, val);
+	if (rc < 0)
+		goto fail_marimba_write;
+
+	/* remove reset */
+	val = val | TS_CTL0_RESET;
+	rc = marimba_write_u8(tsadc, TS_CTL0, val);
+	if (rc < 0)
+		goto fail_marimba_write;
+
+	return 0;
+
+fail_marimba_write:
+	if (tsadc->pdata->level_vote)
+		/* REVISIT: Ignore error for level_vote(0) for now*/
+		tsadc->pdata->level_vote(0);
+	return rc;
+}
+
+
+static int marimba_tsadc_configure(struct marimba_tsadc *tsadc)
+{
+	u8 rsv1 = 0,  setup = 0, i, count = 0;
+	u8 param2 = 0,  param3 = 0;
+	unsigned long val;
+	int rc;
+
+	rc = marimba_tsadc_write(tsadc, SSBI_PRESET, 0x00);
+	if (rc < 0)
+		return rc;
+
+	if (!tsadc->pdata)
+		return -EINVAL;
+
+	/* Configure RSV1 register*/
+	if (tsadc->pdata->tsadc_prechg_en == true)
+		rsv1 |= TSHK_RSV1_PRECHARGE_EN;
+	else
+		rsv1 &= ~TSHK_RSV1_PRECHARGE_EN;
+
+	/*  Set RSV1 register*/
+	rc = marimba_tsadc_write(tsadc, TSHK_RSV1, rsv1);
+	if (rc < 0)
+		return rc;
+
+	/* Configure PARAM2 register */
+	/* Input clk */
+	val = tsadc->pdata->params2.input_clk_khz;
+	param2 &= TSHK_INPUT_CLK_MASK;
+	val /= 600;
+	if (val >= 1 && val <= 8 && !(val & (val - 1))) {
+		/* Input clk can be .6, 1.2, 2.4, 4.8Mhz */
+		if (val % 4 != 0)
+			param2 = (4 - (val % 4)) << TSHK_INPUT_CLK_SHIFT;
+		else
+			param2 = ((val / 4) - 1) << TSHK_INPUT_CLK_SHIFT;
+	} else /* Configure the default clk 2.4Mhz */
+		param2 = 0x00 << TSHK_INPUT_CLK_SHIFT;
+
+	/* Sample period */
+	param2 &= TSHK_SAMPLE_PRD_MASK;
+	param2 |=  tsadc->pdata->params2.sample_prd << TSHK_SAMPLE_PRD_SHIFT;
+
+	/* Write PARAM2 register */
+	rc = marimba_tsadc_write(tsadc, TSHK_PARAM2, param2);
+	if (rc < 0)
+		return rc;
+
+	/* REVISIT: If Precharge time, stabilization time  > 409.6us */
+	/* Configure PARAM3 register */
+	val = tsadc->pdata->params3.prechg_time_nsecs;
+	param3 &= TSHK_PRECHG_TIME_MASK;
+	val /= 6400;
+	if (val >= 1 && val <= 64  && !(val & (val - 1))) {
+		count = 0;
+		while ((val = val >> 1) != 0)
+			count++;
+		param3 |= count << TSHK_PARAM3_PRE_CHG_SHIFT;
+	} else	/* Set default value if the input is wrong */
+		param3 |= 0x00 << TSHK_PARAM3_PRE_CHG_SHIFT;
+
+	val = tsadc->pdata->params3.stable_time_nsecs;
+	param3 &= TSHK_STABLE_TIME_MASK;
+	val /= 6400;
+	if (val >= 1 && val <= 64 && !(val & (val - 1))) {
+		count = 0;
+		while ((val = val >> 1) != 0)
+			count++;
+		param3 |= count << TSHK_PARAM3_STABIZ_SHIFT;
+	} else /* Set default value if the input is wrong */
+		param3 |=  0x00 << TSHK_PARAM3_STABIZ_SHIFT;
+
+	/* Get TSADC mode */
+	val = tsadc->pdata->params3.tsadc_test_mode;
+	param3 &= TSHK_PARAM3_MODE_MASK;
+	if (val == 0)
+		param3 |= 0x00;
+	else
+		for (i = 0; i < 3 ; i++) {
+			if (((val + i) % 39322) == 0) {
+				param3 |= (i + 1);
+				break;
+			}
+		}
+	if (i == 3) /* Set to normal mode if input is wrong */
+		param3 |= 0x00;
+
+	rc = marimba_tsadc_write(tsadc, TSHK_PARAM3, param3);
+	if (rc < 0)
+		return rc;
+
+	/* Configure TSHK SETUP Register */
+	if (tsadc->pdata->setup.pen_irq_en == true)
+		setup |= TSHK_SETUP_EN_PIRQ;
+	else
+		setup &= ~TSHK_SETUP_EN_PIRQ;
+
+	if (tsadc->pdata->setup.tsadc_en == true)
+		setup |= TSHK_SETUP_EN_ADC;
+	else
+		setup &= ~TSHK_SETUP_EN_ADC;
+
+	/* Enable signals to ADC, pen irq assertion */
+	rc = marimba_tsadc_write(tsadc, TSHK_SETUP, setup);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+int marimba_tsadc_start(struct marimba_tsadc_client *client)
+{
+	int rc = 0;
+
+	if (!client) {
+		pr_err("%s: Not a valid client\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!tsadc_dev) {
+		dev_err(&client->pdev->dev,
+			"%s: No tsadc device available\n", __func__);
+		return -ENODEV;
+	}
+
+	/* REVISIT - add locks */
+	if (client->is_ts) {
+		rc = marimba_tsadc_startup(tsadc_dev);
+		if (rc < 0)
+			goto fail_tsadc_startup;
+		rc = marimba_tsadc_configure(tsadc_dev);
+		if (rc < 0)
+			goto fail_tsadc_conf;
+	}
+
+	return 0;
+fail_tsadc_conf:
+	marimba_tsadc_shutdown(tsadc_dev);
+fail_tsadc_startup:
+	return rc;
+}
+EXPORT_SYMBOL(marimba_tsadc_start);
+
+struct marimba_tsadc_client *
+marimba_tsadc_register(struct platform_device *pdev, unsigned int is_ts)
+{
+	struct marimba_tsadc_client *client;
+
+	if (!pdev) {
+		pr_err("%s: valid platform device pointer please\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!is_ts) {
+		dev_err(&pdev->dev, "%s: only TS right now\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!tsadc_dev) {
+		dev_err(&pdev->dev,
+			"%s: No tsadc device available\n", __func__);
+		return ERR_PTR(-ENODEV);
+	}
+
+	client = kzalloc(sizeof *client, GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	client->pdev = pdev;
+	client->is_ts = is_ts;
+
+	return client;
+}
+EXPORT_SYMBOL(marimba_tsadc_register);
+
+void marimba_tsadc_unregister(struct marimba_tsadc_client *client)
+{
+	if (client->is_ts)
+		marimba_tsadc_shutdown(tsadc_dev);
+	kfree(client);
+}
+EXPORT_SYMBOL(marimba_tsadc_unregister);
+
+static struct resource resources_tssc[] = {
+	{
+		.start	= 0xAD300000,
+		.end	= 0xAD300000 + SZ_4K - 1,
+		.name	= "tssc",
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= 55,
+		.end	= 55,
+		.name	= "tssc1",
+		.flags	= IORESOURCE_IRQ | IRQF_TRIGGER_RISING,
+	},
+	{
+		.start	= 56,
+		.end	= 56,
+		.name	= "tssc2",
+		.flags	= IORESOURCE_IRQ | IRQF_TRIGGER_RISING,
+	},
+};
+
+static struct device *
+marimba_add_tssc_subdev(struct device *parent, const char *name, int num,
+			 struct resource *resources, int num_resources,
+			 void *pdata, int pdata_len)
+{
+	struct platform_device	*pdev;
+	int			status;
+
+	pdev = platform_device_alloc(name, num);
+	if (!pdev) {
+		dev_dbg(parent, "can't alloc dev\n");
+		status = -ENOMEM;
+		goto err;
+	}
+
+	pdev->dev.parent = parent;
+
+	if (pdata) {
+		status = platform_device_add_data(pdev, pdata, pdata_len);
+		if (status < 0) {
+			dev_dbg(&pdev->dev, "can't add platform_data\n");
+			goto err;
+		}
+	}
+
+	status = platform_device_add_resources(pdev, resources, num_resources);
+	if (status < 0) {
+		dev_dbg(&pdev->dev, "can't add resources\n");
+		goto err;
+	}
+
+	status = platform_device_add(pdev);
+
+err:
+	if (status < 0) {
+		platform_device_put(pdev);
+		dev_err(parent, "can't add %s dev\n", name);
+		return ERR_PTR(status);
+	}
+	return &pdev->dev;
+}
+
+#ifdef CONFIG_PM
+static int
+marimba_tsadc_suspend(struct device *dev)
+{
+	int rc = 0, ret = 0;
+	struct marimba_tsadc *tsadc = dev_get_drvdata(dev);
+
+	if (tsadc->clk_enabled == true) {
+		clk_disable(tsadc->codec_ssbi);
+		tsadc->clk_enabled = false;
+	}
+
+	if (!(device_may_wakeup(dev) &&
+			device_may_wakeup(tsadc->child_tssc))) {
+		rc = marimba_tsadc_shutdown(tsadc);
+		if (rc < 0) {
+			pr_err("%s: Unable to shutdown TSADC\n", __func__);
+			goto fail_shutdown;
+		}
+
+		if (tsadc->pdata->marimba_tsadc_power) {
+			rc = tsadc->pdata->marimba_tsadc_power(0);
+			if (rc < 0)
+				goto fail_tsadc_power;
+		}
+	}
+	return rc;
+
+fail_tsadc_power:
+	marimba_tsadc_startup(tsadc_dev);
+	marimba_tsadc_configure(tsadc_dev);
+fail_shutdown:
+	if (tsadc->clk_enabled == false) {
+		ret = clk_enable(tsadc->codec_ssbi);
+		if (ret == 0)
+			tsadc->clk_enabled = true;
+	}
+	return rc;
+}
+
+static int marimba_tsadc_resume(struct device *dev)
+{
+	int rc = 0;
+	struct marimba_tsadc *tsadc = dev_get_drvdata(dev);
+
+	if (tsadc->clk_enabled == false) {
+		rc = clk_enable(tsadc->codec_ssbi);
+		if (rc != 0) {
+			pr_err("%s: Clk enable failed\n", __func__);
+			return rc;
+		}
+		tsadc->clk_enabled = true;
+	}
+
+	if (!(device_may_wakeup(dev) &&
+			device_may_wakeup(tsadc->child_tssc))) {
+		if (tsadc->pdata->marimba_tsadc_power) {
+			rc = tsadc->pdata->marimba_tsadc_power(1);
+			if (rc) {
+				pr_err("%s: Unable to power on TSADC \n",
+						__func__);
+				goto fail_tsadc_power;
+			}
+		}
+
+		rc = marimba_tsadc_startup(tsadc_dev);
+		if (rc < 0) {
+			pr_err("%s: Unable to startup TSADC\n", __func__);
+			goto fail_tsadc_startup;
+		}
+
+		rc = marimba_tsadc_configure(tsadc_dev);
+		if (rc < 0) {
+			pr_err("%s: Unable to configure TSADC\n", __func__);
+			goto fail_tsadc_configure;
+		}
+	}
+	return rc;
+
+fail_tsadc_configure:
+	marimba_tsadc_shutdown(tsadc_dev);
+fail_tsadc_startup:
+	if (tsadc->pdata->marimba_tsadc_power)
+		tsadc->pdata->marimba_tsadc_power(0);
+fail_tsadc_power:
+	if (tsadc->clk_enabled == true) {
+		clk_disable(tsadc->codec_ssbi);
+		tsadc->clk_enabled = false;
+	}
+	return rc;
+}
+
+static struct dev_pm_ops tsadc_pm_ops = {
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = marimba_tsadc_suspend,
+	.resume = marimba_tsadc_resume,
+#endif
+};
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void marimba_tsadc_early_suspend(struct early_suspend *h)
+{
+	struct marimba_tsadc *tsadc = container_of(h, struct marimba_tsadc,
+						 early_suspend);
+
+	marimba_tsadc_suspend(tsadc->dev);
+}
+
+static void marimba_tsadc_late_resume(struct early_suspend *h)
+{
+	struct marimba_tsadc *tsadc = container_of(h, struct marimba_tsadc,
+						 early_suspend);
+
+	marimba_tsadc_resume(tsadc->dev);
+}
+#endif
+
+static int __devinit marimba_tsadc_probe(struct platform_device *pdev)
+{
+	struct marimba *marimba = platform_get_drvdata(pdev);
+	struct marimba_tsadc *tsadc;
+	struct marimba_tsadc_platform_data *pdata = pdev->dev.platform_data;
+	int rc = 0;
+	struct device *child;
+
+	printk("%s\n", __func__);
+
+	if (!pdata) {
+		dev_dbg(&pdev->dev, "no tsadc platform data?\n");
+		return -EINVAL;
+	}
+
+	tsadc = kzalloc(sizeof *tsadc, GFP_KERNEL);
+	if (!tsadc)
+		return -ENOMEM;
+
+	tsadc->marimba	= marimba;
+	tsadc->dev	= &pdev->dev;
+	tsadc->pdata	= pdata;
+
+	platform_set_drvdata(pdev, tsadc);
+
+	if (tsadc->pdata->init) {
+		rc = tsadc->pdata->init();
+		if (rc < 0)
+			goto fail_tsadc_init;
+	}
+
+	if (tsadc->pdata->marimba_tsadc_power) {
+		rc = tsadc->pdata->marimba_tsadc_power(1);
+		if (rc) {
+			pr_err("%s: Unable to power up TSADC \n", __func__);
+			goto fail_tsadc_power;
+		}
+	}
+
+	tsadc->codec_ssbi = clk_get(NULL, "codec_ssbi_clk");
+	if (IS_ERR(tsadc->codec_ssbi)) {
+		rc = PTR_ERR(tsadc->codec_ssbi);
+		goto fail_clk_get;
+	}
+	rc = clk_enable(tsadc->codec_ssbi);
+	if (rc != 0)
+		goto fail_clk_enable;
+
+	tsadc->clk_enabled = true;
+
+	child = marimba_add_tssc_subdev(&pdev->dev, "msm_touchscreen", -1,
+			 resources_tssc, ARRAY_SIZE(resources_tssc),
+			 pdata->tssc_data, sizeof(*pdata->tssc_data));
+
+	if (IS_ERR(child)) {
+		rc = PTR_ERR(child);
+		goto fail_add_subdev;
+	}
+
+	tsadc->child_tssc = child;
+	platform_set_drvdata(pdev, tsadc);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	tsadc->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
+						 TSADC_SUSPEND_LEVEL;
+	tsadc->early_suspend.suspend = marimba_tsadc_early_suspend;
+	tsadc->early_suspend.resume = marimba_tsadc_late_resume;
+	register_early_suspend(&tsadc->early_suspend);
+#endif
+
+	tsadc_dev = tsadc;
+	device_init_wakeup(&pdev->dev, pdata->can_wakeup);
+
+	return rc;
+
+fail_add_subdev:
+	clk_disable(tsadc->codec_ssbi);
+
+fail_clk_enable:
+	clk_put(tsadc->codec_ssbi);
+
+fail_clk_get:
+	if (tsadc->pdata->marimba_tsadc_power)
+		rc = tsadc->pdata->marimba_tsadc_power(0);
+fail_tsadc_power:
+	if (tsadc->pdata->exit)
+		rc = tsadc->pdata->exit();
+fail_tsadc_init:
+	kfree(tsadc);
+	return rc;
+}
+
+static int __devexit marimba_tsadc_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct marimba_tsadc *tsadc = platform_get_drvdata(pdev);
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	if (tsadc->clk_enabled == true)
+		clk_disable(tsadc->codec_ssbi);
+
+	clk_put(tsadc->codec_ssbi);
+
+	if (tsadc->pdata->exit)
+		rc = tsadc->pdata->exit();
+
+	if (tsadc->pdata->marimba_tsadc_power)
+		rc = tsadc->pdata->marimba_tsadc_power(0);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&tsadc->early_suspend);
+#endif
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(tsadc);
+	return rc;
+}
+
+static struct platform_driver tsadc_driver = {
+	.probe	= marimba_tsadc_probe,
+	.remove	= __devexit_p(marimba_tsadc_remove),
+	.driver	= {
+		.name = "marimba_tsadc",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &tsadc_pm_ops,
+#endif
+	},
+};
+
+static int __init marimba_tsadc_init(void)
+{
+	return platform_driver_register(&tsadc_driver);
+}
+device_initcall(marimba_tsadc_init);
+
+static void __exit marimba_tsadc_exit(void)
+{
+	return platform_driver_unregister(&tsadc_driver);
+}
+module_exit(marimba_tsadc_exit);
+
+MODULE_DESCRIPTION("Marimba TSADC driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:marimba_tsadc");
diff --git a/drivers/mfd/msm-adie-codec.c b/drivers/mfd/msm-adie-codec.c
new file mode 100644
index 0000000..d9414ed
--- /dev/null
+++ b/drivers/mfd/msm-adie-codec.c
@@ -0,0 +1,196 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/msm-adie-codec.h>
+#include <linux/mfd/marimba.h>
+
+static const struct adie_codec_operations *cur_adie_ops;
+
+int adie_codec_register_codec_operations(
+			const struct adie_codec_operations *adie_ops)
+{
+	if (adie_ops == NULL)
+		return -EINVAL;
+
+	if (adie_ops->codec_id != adie_get_detected_codec_type())
+		return -EINVAL;
+
+	cur_adie_ops = adie_ops;
+	pr_info("%s: codec type %d\n", __func__, adie_ops->codec_id);
+	return 0;
+}
+
+int adie_codec_open(struct adie_codec_dev_profile *profile,
+	struct adie_codec_path **path_pptr)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_open != NULL)
+			rc = cur_adie_ops->codec_open(profile, path_pptr);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_open);
+
+int adie_codec_close(struct adie_codec_path *path_ptr)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_close != NULL)
+			rc = cur_adie_ops->codec_close(path_ptr);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_close);
+
+int adie_codec_set_device_digital_volume(struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 vol_percentage /* in percentage */)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_set_device_digital_volume != NULL) {
+			rc = cur_adie_ops->codec_set_device_digital_volume(
+							path_ptr,
+							num_channels,
+							vol_percentage);
+		}
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_set_device_digital_volume);
+
+int adie_codec_set_device_analog_volume(struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 volume /* in percentage */)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_set_device_analog_volume != NULL) {
+			rc = cur_adie_ops->codec_set_device_analog_volume(
+							path_ptr,
+							num_channels,
+							volume);
+		}
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_set_device_analog_volume);
+
+int adie_codec_setpath(struct adie_codec_path *path_ptr, u32 freq_plan, u32 osr)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_setpath != NULL) {
+			rc = cur_adie_ops->codec_setpath(path_ptr,
+							freq_plan,
+							osr);
+		}
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_setpath);
+
+u32 adie_codec_freq_supported(struct adie_codec_dev_profile *profile,
+	u32 requested_freq)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_freq_supported != NULL)
+			rc = cur_adie_ops->codec_freq_supported(profile,
+							requested_freq);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_freq_supported);
+
+int adie_codec_enable_sidetone(struct adie_codec_path *rx_path_ptr,
+	u32 enable)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_enable_sidetone != NULL)
+			rc = cur_adie_ops->codec_enable_sidetone(rx_path_ptr,
+								enable);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_enable_sidetone);
+
+int adie_codec_enable_anc(struct adie_codec_path *rx_path_ptr,
+	u32 enable, struct adie_codec_anc_data *calibration_writes)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_enable_anc != NULL)
+			rc = cur_adie_ops->codec_enable_anc(rx_path_ptr,
+				enable, calibration_writes);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_enable_anc);
+
+int adie_codec_proceed_stage(struct adie_codec_path *path_ptr, u32 state)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_proceed_stage != NULL)
+			rc = cur_adie_ops->codec_proceed_stage(path_ptr,
+								state);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_proceed_stage);
+
+int adie_codec_set_master_mode(struct adie_codec_path *path_ptr, u8 master)
+{
+	int rc = -EPERM;
+
+	if (cur_adie_ops != NULL) {
+		if (cur_adie_ops->codec_set_master_mode != NULL)
+			rc = cur_adie_ops->codec_set_master_mode(path_ptr,
+					master);
+	} else
+		rc = -ENODEV;
+
+	return rc;
+}
+EXPORT_SYMBOL(adie_codec_set_master_mode);
+
+
diff --git a/drivers/mfd/msmproc_adc.c b/drivers/mfd/msmproc_adc.c
new file mode 100644
index 0000000..a47486a
--- /dev/null
+++ b/drivers/mfd/msmproc_adc.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mfd/pm8921-adc.h>
+#define KELVINMIL_DEGMIL	273160
+#define PM8921_ADC_SLOPE	10
+#define PM8921_ADC_CODE_SCALE	24576
+
+static const struct pm8921_adc_map_pt adcmap_batttherm[] = {
+	{2020,	-30},
+	{1923,	-20},
+	{1796,	-10},
+	{1640,	  0},
+	{1459,	 10},
+	{1260,	 20},
+	{1159,	 25},
+	{1059,	 30},
+	{871,	 40},
+	{706,	 50},
+	{567,	 60},
+	{453,	 70},
+	{364,	 80}
+};
+
+static const struct pm8921_adc_map_pt adcmap_ntcg_104ef_104fb[] = {
+	{696483,	-40960},
+	{649148,	-39936},
+	{605368,	-38912},
+	{564809,	-37888},
+	{527215,	-36864},
+	{492322,	-35840},
+	{460007,	-34816},
+	{429982,	-33792},
+	{402099,	-32768},
+	{376192,	-31744},
+	{352075,	-30720},
+	{329714,	-29696},
+	{308876,	-28672},
+	{289480,	-27648},
+	{271417,	-26624},
+	{254574,	-25600},
+	{238903,	-24576},
+	{224276,	-23552},
+	{210631,	-22528},
+	{197896,	-21504},
+	{186007,	-20480},
+	{174899,	-19456},
+	{164521,	-18432},
+	{154818,	-17408},
+	{145744,	-16384},
+	{137265,	-15360},
+	{129307,	-14336},
+	{121866,	-13312},
+	{114896,	-12288},
+	{108365,	-11264},
+	{102252,	-10240},
+	{96499,		-9216},
+	{91111,		-8192},
+	{86055,		-7168},
+	{81308,		-6144},
+	{76857,		-5120},
+	{72660,		-4096},
+	{68722,		-3072},
+	{65020,		-2048},
+	{61538,		-1024},
+	{58261,		0},
+	{55177,		1024},
+	{52274,		2048},
+	{49538,		3072},
+	{46962,		4096},
+	{44531,		5120},
+	{42243,		6144},
+	{40083,		7168},
+	{38045,		8192},
+	{36122,		9216},
+	{34308,		10240},
+	{32592,		11264},
+	{30972,		12288},
+	{29442,		13312},
+	{27995,		14336},
+	{26624,		15360},
+	{25333,		16384},
+	{24109,		17408},
+	{22951,		18432},
+	{21854,		19456},
+	{20807,		20480},
+	{19831,		21504},
+	{18899,		22528},
+	{18016,		23552},
+	{17178,		24576},
+	{16384,		25600},
+	{15631,		26624},
+	{14916,		27648},
+	{14237,		28672},
+	{13593,		29696},
+	{12976,		30720},
+	{12400,		31744},
+	{11848,		32768},
+	{11324,		33792},
+	{10825,		34816},
+	{10354,		35840},
+	{9900,		36864},
+	{9471,		37888},
+	{9062,		38912},
+	{8674,		39936},
+	{8306,		40960},
+	{7951,		41984},
+	{7616,		43008},
+	{7296,		44032},
+	{6991,		45056},
+	{6701,		46080},
+	{6424,		47104},
+	{6160,		48128},
+	{5908,		49152},
+	{5667,		50176},
+	{5439,		51200},
+	{5219,		52224},
+	{5010,		53248},
+	{4810,		54272},
+	{4619,		55296},
+	{4440,		56320},
+	{4263,		57344},
+	{4097,		58368},
+	{3938,		59392},
+	{3785,		60416},
+	{3637,		61440},
+	{3501,		62464},
+	{3368,		63488},
+	{3240,		64512},
+	{3118,		65536},
+	{2998,		66560},
+	{2889,		67584},
+	{2782,		68608},
+	{2680,		69632},
+	{2581,		70656},
+	{2490,		71680},
+	{2397,		72704},
+	{2310,		73728},
+	{2227,		74752},
+	{2147,		75776},
+	{2064,		76800},
+	{1998,		77824},
+	{1927,		78848},
+	{1860,		79872},
+	{1795,		80896},
+	{1736,		81920},
+	{1673,		82944},
+	{1615,		83968},
+	{1560,		84992},
+	{1507,		86016},
+	{1456,		87040},
+	{1407,		88064},
+	{1360,		89088},
+	{1314,		90112},
+	{1271,		91136},
+	{1228,		92160},
+	{1189,		93184},
+	{1150,		94208},
+	{1112,		95232},
+	{1076,		96256},
+	{1042,		97280},
+	{1008,		98304},
+	{976,		99328},
+	{945,		100352},
+	{915,		101376},
+	{886,		102400},
+	{859,		103424},
+	{832,		104448},
+	{807,		105472},
+	{782,		106496},
+	{756,		107520},
+	{735,		108544},
+	{712,		109568},
+	{691,		110592},
+	{670,		111616},
+	{650,		112640},
+	{631,		113664},
+	{612,		114688},
+	{594,		115712},
+	{577,		116736},
+	{560,		117760},
+	{544,		118784},
+	{528,		119808},
+	{513,		120832},
+	{498,		121856},
+	{483,		122880},
+	{470,		123904},
+	{457,		124928},
+	{444,		125952},
+	{431,		126976},
+	{419,		128000}
+};
+
+static int32_t pm8921_adc_map_linear(const struct pm8921_adc_map_pt *pts,
+		uint32_t tablesize, int32_t input, int64_t *output)
+{
+	bool descending = 1;
+	uint32_t i = 0;
+
+	if ((pts == NULL) || (output == NULL))
+		return -EINVAL;
+
+	/* Check if table is descending or ascending */
+	if (tablesize > 1) {
+		if (pts[0].x < pts[1].x)
+			descending = 0;
+	}
+
+	while (i < tablesize) {
+		if ((descending == 1) && (pts[i].x < input)) {
+			/* table entry is less than measured
+				value and table is descending, stop */
+			break;
+		} else if ((descending == 0) &&
+				(pts[i].x > input)) {
+			/* table entry is greater than measured
+				value and table is ascending, stop */
+			break;
+		} else {
+			i++;
+		}
+	}
+
+	if (i == 0)
+		*output = pts[0].y;
+	else if (i == tablesize)
+		*output = pts[tablesize-1].y;
+	else {
+		/* result is between search_index and search_index-1 */
+		/* interpolate linearly */
+		*output = (((int32_t) ((pts[i].y - pts[i-1].y)*
+			(input - pts[i-1].x))/
+			(pts[i].x - pts[i-1].x))+
+			pts[i-1].y);
+	}
+
+	return 0;
+}
+
+int32_t pm8921_adc_scale_default(int32_t adc_code,
+		const struct pm8921_adc_properties *adc_properties,
+		const struct pm8921_adc_chan_properties *chan_properties,
+		struct pm8921_adc_chan_result *adc_chan_result)
+{
+	bool negative_rawfromoffset = 0;
+	int32_t rawfromoffset = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	rawfromoffset = adc_code -
+			chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].offset;
+
+	adc_chan_result->adc_code = adc_code;
+	if (rawfromoffset < 0) {
+		if (adc_properties->bipolar) {
+			rawfromoffset = -rawfromoffset;
+			negative_rawfromoffset = 1;
+		} else {
+			rawfromoffset = 0;
+		}
+	}
+
+	if (rawfromoffset >= 1 << adc_properties->bitresolution)
+		rawfromoffset = (1 << adc_properties->bitresolution) - 1;
+
+	adc_chan_result->measurement = (int64_t)rawfromoffset *
+		chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].dx *
+				chan_properties->offset_gain_denominator;
+
+	/* do_div only perform positive integer division! */
+	do_div(adc_chan_result->measurement,
+		chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].dy *
+				chan_properties->offset_gain_numerator);
+
+	if (negative_rawfromoffset)
+		adc_chan_result->measurement = -adc_chan_result->measurement;
+
+	/* Note: adc_chan_result->measurement is in the unit of
+	 * adc_properties.adc_reference. For generic channel processing,
+	 * channel measurement is a scale/ratio relative to the adc
+	 * reference input */
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_scale_default);
+
+int32_t pm8921_adc_scale_batt_therm(int32_t adc_code,
+		const struct pm8921_adc_properties *adc_properties,
+		const struct pm8921_adc_chan_properties *chan_properties,
+		struct pm8921_adc_chan_result *adc_chan_result)
+{
+	int rc;
+
+	rc = pm8921_adc_scale_default(adc_code, adc_properties, chan_properties,
+			adc_chan_result);
+	if (rc < 0) {
+		pr_debug("PM8921 ADC scale default error with %d\n", rc);
+		return rc;
+	}
+	/* convert mV ---> degC using the table */
+	return pm8921_adc_map_linear(
+			adcmap_batttherm,
+			sizeof(adcmap_batttherm)/sizeof(adcmap_batttherm[0]),
+			adc_chan_result->physical,
+			&adc_chan_result->physical);
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_scale_batt_therm);
+
+int32_t pm8921_adc_scale_pmic_therm(int32_t adc_code,
+		const struct pm8921_adc_properties *adc_properties,
+		const struct pm8921_adc_chan_properties *chan_properties,
+		struct pm8921_adc_chan_result *adc_chan_result)
+{
+	int32_t rawfromoffset;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	adc_chan_result->adc_code = adc_code;
+	rawfromoffset = adc_code -
+			chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].offset;
+	if (rawfromoffset > 0) {
+		if (rawfromoffset >= 1 << adc_properties->bitresolution)
+			rawfromoffset = (1 << adc_properties->bitresolution)
+									- 1;
+		/* 2mV/K */
+		adc_chan_result->measurement = (int64_t)rawfromoffset*
+			chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].dx *
+			chan_properties->offset_gain_denominator * 1000;
+
+		do_div(adc_chan_result->measurement,
+			chan_properties->adc_graph[ADC_CALIB_ABSOLUTE].dy *
+			chan_properties->offset_gain_numerator*2);
+	} else {
+		adc_chan_result->measurement = 0;
+	}
+	/* Note: adc_chan_result->measurement is in the unit of
+		adc_properties.adc_reference */
+	adc_chan_result->physical = (int32_t)adc_chan_result->measurement;
+	/* Change to .001 deg C */
+	adc_chan_result->physical -= KELVINMIL_DEGMIL;
+	adc_chan_result->measurement <<= 1;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_scale_pmic_therm);
+
+/* Scales the ADC code to 0.001 degrees C using the map
+ * table for the XO thermistor.
+ */
+int32_t pm8921_adc_tdkntcg_therm(int32_t adc_code,
+		const struct pm8921_adc_properties *adc_properties,
+		const struct pm8921_adc_chan_properties *chan_properties,
+		struct pm8921_adc_chan_result *adc_chan_result)
+{
+	uint32_t num1, num2, denom, rt_r25;
+	int32_t offset = chan_properties->adc_graph->offset,
+		dy = chan_properties->adc_graph->dy,
+		dx = chan_properties->adc_graph->dx,
+		fullscale_calibrated_adc_code;
+
+	adc_chan_result->adc_code = adc_code;
+	fullscale_calibrated_adc_code = dy + offset;
+	/* The above is a short cut in math that would reduce a lot of
+	   computation whereas the below expression
+		(adc_properties->adc_reference*dy+dx*offset+(dx>>1))/dx
+	   is a more generic formula when the 2 reference voltages are
+	   different than 0 and full scale voltage. */
+
+	if ((dy == 0) || (dx == 0) ||
+			(offset >= fullscale_calibrated_adc_code)) {
+		return -EINVAL;
+	} else {
+		if (adc_code >= fullscale_calibrated_adc_code) {
+			rt_r25 = (uint32_t)-1;
+		} else if (adc_code <= offset) {
+			rt_r25 = 0;
+		} else {
+		/* The formula used is (adc_code of current reading - offset)/
+		 * (the calibrated fullscale adc code - adc_code of current
+		 * reading). For this channel, at this time, chan_properties->
+		 * offset_gain_numerator = chan_properties->
+		 * offset_gain_denominator = 1, so no need to incorporate into
+		 * the formula even though it could be multiplied/divided by 1
+		 * which yields the same result but
+		 * expensive on computation. */
+		num1 = (adc_code - offset) << 14;
+		num2 = (fullscale_calibrated_adc_code - adc_code) >> 1;
+		denom = fullscale_calibrated_adc_code - adc_code;
+
+			if ((int)denom <= 0)
+				rt_r25 = 0x7FFFFFFF;
+			else
+				rt_r25 = (num1 + num2) / denom;
+		}
+
+		if (rt_r25 > 0x7FFFFFFF)
+			rt_r25 = 0x7FFFFFFF;
+
+		pm8921_adc_map_linear(adcmap_ntcg_104ef_104fb,
+		sizeof(adcmap_ntcg_104ef_104fb)/
+			sizeof(adcmap_ntcg_104ef_104fb[0]),
+		(int32_t)rt_r25, &adc_chan_result->physical);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_tdkntcg_therm);
+
+int32_t pm8921_adc_scale_xtern_chgr_cur(int32_t adc_code,
+		const struct pm8921_adc_properties *adc_properties,
+		const struct pm8921_adc_chan_properties *chan_properties,
+		struct pm8921_adc_chan_result *adc_chan_result)
+{
+	int32_t rawfromoffset = (adc_code - PM8921_ADC_CODE_SCALE)
+						/PM8921_ADC_SLOPE;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	adc_chan_result->adc_code = adc_code;
+	if (rawfromoffset > 0) {
+		if (rawfromoffset >= 1 << adc_properties->bitresolution)
+			rawfromoffset = (1 << adc_properties->bitresolution)
+									- 1;
+		adc_chan_result->measurement = ((int64_t)rawfromoffset * 5)*
+				chan_properties->offset_gain_denominator;
+		do_div(adc_chan_result->measurement,
+					chan_properties->offset_gain_numerator);
+	} else {
+		adc_chan_result->measurement = 0;
+	}
+	adc_chan_result->physical = (int32_t) adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_scale_xtern_chgr_cur);
+
+int32_t pm8921_adc_batt_scaler(struct pm8921_adc_arb_btm_param *btm_param)
+{
+	/* TODO based on the schematics for the batt thermistor
+	parameters and the HW/SW doc for the device. This is the
+	external batt therm */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_batt_scaler);
diff --git a/drivers/mfd/pm8921-adc.c b/drivers/mfd/pm8921-adc.c
new file mode 100644
index 0000000..b48b033
--- /dev/null
+++ b/drivers/mfd/pm8921-adc.c
@@ -0,0 +1,1022 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Qualcomm's PM8921 ADC Arbiter driver
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/mpp.h>
+#include <linux/mfd/pm8921-adc.h>
+#include <linux/debugfs.h>
+
+/* User Bank register set */
+#define PM8921_ADC_ARB_USRP_CNTRL1			0x197
+#define PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB		BIT(0)
+#define PM8921_ADC_ARB_USRP_CNTRL1_RSV1			BIT(1)
+#define PM8921_ADC_ARB_USRP_CNTRL1_RSV2			BIT(2)
+#define PM8921_ADC_ARB_USRP_CNTRL1_RSV3			BIT(3)
+#define PM8921_ADC_ARB_USRP_CNTRL1_RSV4			BIT(4)
+#define PM8921_ADC_ARB_USRP_CNTRL1_RSV5			BIT(5)
+#define PM8921_ADC_ARB_USRP_CNTRL1_EOC			BIT(6)
+#define PM8921_ADC_ARB_USRP_CNTRL1_REQ			BIT(7)
+
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL			0x198
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_RSV0		BIT(0)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_RSV1		BIT(1)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_PREMUX0		BIT(2)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_PREMUX1		BIT(3)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_SEL0		BIT(4)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_SEL1		BIT(5)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_SEL2		BIT(6)
+#define PM8921_ADC_ARB_USRP_AMUX_CNTRL_SEL3		BIT(7)
+
+#define PM8921_ADC_ARB_USRP_ANA_PARAM			0x199
+#define PM8921_ADC_ARB_USRP_DIG_PARAM			0x19A
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT0	BIT(0)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT1	BIT(1)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_CLK_RATE0		BIT(2)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_CLK_RATE1		BIT(3)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_EOC		BIT(4)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE0		BIT(5)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE1		BIT(6)
+#define PM8921_ADC_ARB_USRP_DIG_PARAM_EN		BIT(7)
+
+#define PM8921_ADC_ARB_USRP_RSV				0x19B
+#define PM8921_ADC_ARB_USRP_RSV_RST			BIT(0)
+#define PM8921_ADC_ARB_USRP_RSV_DTEST0			BIT(1)
+#define PM8921_ADC_ARB_USRP_RSV_DTEST1			BIT(2)
+#define PM8921_ADC_ARB_USRP_RSV_OP			BIT(3)
+#define PM8921_ADC_ARB_USRP_RSV_IP_SEL0			BIT(4)
+#define PM8921_ADC_ARB_USRP_RSV_IP_SEL1			BIT(5)
+#define PM8921_ADC_ARB_USRP_RSV_IP_SEL2			BIT(6)
+#define PM8921_ADC_ARB_USRP_RSV_TRM			BIT(7)
+
+#define PM8921_ADC_ARB_USRP_DATA0			0x19D
+#define PM8921_ADC_ARB_USRP_DATA1			0x19C
+
+#define PM8921_ADC_ARB_BTM_CNTRL1			0x17e
+#define PM8921_ADC_ARB_BTM_CNTRL1_EN_BTM		BIT(0)
+#define PM8921_ADC_ARB_BTM_CNTRL1_SEL_OP_MODE		BIT(1)
+#define PM8921_ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL1	BIT(2)
+#define PM8921_ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL2	BIT(3)
+#define PM8921_ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL3	BIT(4)
+#define PM8921_ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL4	BIT(5)
+#define PM8921_ADC_ARB_BTM_CNTRL1_EOC			BIT(6)
+#define PM8921_ADC_ARB_BTM_CNTRL1_REQ			BIT(7)
+
+#define PM8921_ADC_ARB_BTM_CNTRL2			0x18c
+#define PM8921_ADC_ARB_BTM_AMUX_CNTRL			0x17f
+#define PM8921_ADC_ARB_BTM_ANA_PARAM			0x180
+#define PM8921_ADC_ARB_BTM_DIG_PARAM			0x181
+#define PM8921_ADC_ARB_BTM_RSV				0x182
+#define PM8921_ADC_ARB_BTM_DATA1			0x183
+#define PM8921_ADC_ARB_BTM_DATA0			0x184
+#define PM8921_ADC_ARB_BTM_BAT_COOL_THR1		0x185
+#define PM8921_ADC_ARB_BTM_BAT_COOL_THR0		0x186
+#define PM8921_ADC_ARB_BTM_BAT_WARM_THR1		0x187
+#define PM8921_ADC_ARB_BTM_BAT_WARM_THR0		0x188
+
+#define PM8921_ADC_ARB_SECP_CNTRL			0x190
+#define PM8921_ADC_ARB_IRQ_BLOCK_SEL_SEC		0x1ac
+#define PM8921_ADC_ARB_IRQ_CONFIG_SEC			0x1ae
+#define PM8921_ADC_ARB_IRQ_BIT_PERM_USR			0x1a6
+#define PM8921_ADC_ARB_IRQ_BLOCK_SEL_USR		0x1c0
+#define PM8921_ADC_ARB_IRQ_CONFIG_USR			0x1c2
+
+#define PM8921_ADC_ARB_IRQ_BLOCK_SEL_DATA		0x09
+#define PM8921_ADC_ARB_IRQ_CONFIG_SEC_DATA		0xe0
+#define PM8921_ADC_ARB_IRQ_BIT_PERM_USR_DATA		0x40
+#define PM8921_ADC_ARB_IRQ_BLOCK_SEL_USR_DATA		0x09
+#define PM8921_ADC_ARB_IRQ_CONFIG_USR_DATA		0xe0
+
+#define PM8921_ADC_ARB_ANA_DIG				0xa0
+#define PM8921_ADC_ARB_SECP_CNTRL_WR			0x31
+
+#define PM8921_ADC_AMUX_MPP_SEL				2
+#define PM8921_ADC_AMUX_SEL				4
+#define PM8921_ADC_RSV_IP_SEL				4
+#define PM8921_ADC_BTM_CHANNEL_SEL			4
+#define PM8921_MAX_CHANNEL_PROPERTIES			2
+#define PM8921_ADC_IRQ_0				0
+#define PM8921_ADC_IRQ_1				1
+#define PM8921_ADC_IRQ_2				2
+#define PM8921_ADC_BTM_INTERVAL_SEL			5
+#define PM8921_ADC_BTM_DECIMATION_SEL			5
+#define PM8921_ADC_MUL					10
+#define PM8921_ADC_CONV_TIME_MIN			2000
+#define PM8921_ADC_CONV_TIME_MAX			2100
+
+struct pm8921_adc {
+	struct device				*dev;
+	struct pm8921_adc_properties		*adc_prop;
+	int					adc_irq;
+	struct mutex				adc_lock;
+	struct mutex				btm_lock;
+	uint32_t				adc_num_channel;
+	struct completion			adc_rslt_completion;
+	struct pm8921_adc_amux			*adc_channel;
+	struct pm8921_adc_amux_properties	*conv;
+	struct pm8921_adc_arb_btm		*batt;
+	int					btm_warm_irq;
+	int					btm_cold_irq;
+	struct dentry				*dent;
+};
+
+struct pm8921_adc_amux_properties {
+	uint32_t				amux_channel;
+	uint32_t				decimation;
+	uint32_t				amux_ip_rsv;
+	uint32_t				amux_mpp_channel;
+	struct pm8921_adc_chan_properties	*chan_prop;
+};
+
+static const struct pm8921_adc_scaling_ratio pm8921_amux_scaling_ratio[] = {
+	{1, 1},
+	{1, 3},
+	{1, 4},
+	{1, 6}
+};
+
+static struct pm8921_adc *pmic_adc;
+
+static struct pm8921_adc_scale_fn adc_scale_fn[] = {
+	[ADC_SCALE_DEFAULT] = {pm8921_adc_scale_default},
+	[ADC_SCALE_BATT_THERM] = {pm8921_adc_scale_batt_therm},
+	[ADC_SCALE_PMIC_THERM] = {pm8921_adc_scale_pmic_therm},
+	[ADC_SCALE_XTERN_CHGR_CUR] = {pm8921_adc_scale_xtern_chgr_cur},
+};
+
+static bool pm8921_adc_calib_first_adc, pm8921_btm_calib_first_adc;
+static bool pm8921_adc_initialized, pm8921_adc_calib_device_init;
+
+static int32_t pm8921_adc_arb_cntrl(uint32_t arb_cntrl)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int i, rc;
+	u8 data_arb_cntrl = 0;
+
+	if (arb_cntrl)
+		data_arb_cntrl |= (PM8921_ADC_ARB_USRP_CNTRL1_REQ |
+				PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB);
+
+	/* Write twice to the CNTRL register for the arbiter settings
+	   to take into effect */
+	for (i = 0; i < 2; i++) {
+		rc = pm8xxx_writeb(adc_pmic->dev->parent,
+				PM8921_ADC_ARB_USRP_CNTRL1, data_arb_cntrl);
+		if (rc < 0) {
+			pr_err("PM8921 arb cntrl write failed with %d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t pm8921_adc_read_reg(uint32_t reg, u8 *data)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int rc;
+
+	rc = pm8xxx_readb(adc_pmic->dev->parent, reg, data);
+	if (rc < 0) {
+		pr_err("PM8921 adc read reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static uint32_t pm8921_adc_write_reg(uint32_t reg, u8 data)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int rc;
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent, reg, data);
+	if (rc < 0) {
+		pr_err("PM8921 adc write reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t pm8921_adc_configure(
+				struct pm8921_adc_amux_properties *chan_prop)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	u8 data_amux_chan = 0, data_arb_rsv = 0, data_dig_param = 0;
+	int rc, i;
+
+	for (i = 0; i < 2; i++) {
+		rc = pm8921_adc_write_reg(PM8921_ADC_ARB_SECP_CNTRL,
+					PM8921_ADC_ARB_SECP_CNTRL_WR);
+		if (rc < 0)
+			return rc;
+	}
+
+	data_amux_chan |= chan_prop->amux_channel << PM8921_ADC_AMUX_SEL;
+
+	if (chan_prop->amux_mpp_channel)
+		data_amux_chan |= chan_prop->amux_mpp_channel <<
+					PM8921_ADC_AMUX_MPP_SEL;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_USRP_AMUX_CNTRL,
+							data_amux_chan);
+	if (rc < 0)
+		return rc;
+
+	data_arb_rsv &= (PM8921_ADC_ARB_USRP_RSV_RST |
+		PM8921_ADC_ARB_USRP_RSV_DTEST0 |
+		PM8921_ADC_ARB_USRP_RSV_DTEST1 |
+		PM8921_ADC_ARB_USRP_RSV_OP |
+		PM8921_ADC_ARB_USRP_RSV_TRM);
+	data_arb_rsv |= chan_prop->amux_ip_rsv << PM8921_ADC_RSV_IP_SEL;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_USRP_RSV, data_arb_rsv);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_read_reg(PM8921_ADC_ARB_USRP_DIG_PARAM,
+							&data_dig_param);
+	if (rc < 0)
+		return rc;
+
+	/* Default 2.4Mhz clock rate */
+	/* Client chooses the decimation */
+	switch (chan_prop->decimation) {
+	case ADC_DECIMATION_TYPE1:
+		data_dig_param |= PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE0;
+		break;
+	case ADC_DECIMATION_TYPE2:
+		data_dig_param |= (PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE0
+				| PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE1);
+		break;
+	default:
+		data_dig_param |= PM8921_ADC_ARB_USRP_DIG_PARAM_DEC_RATE0;
+		break;
+	}
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_USRP_DIG_PARAM,
+						PM8921_ADC_ARB_ANA_DIG);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_USRP_ANA_PARAM,
+						PM8921_ADC_ARB_ANA_DIG);
+	if (rc < 0)
+		return rc;
+
+	if (!pm8921_adc_calib_first_adc)
+		enable_irq(adc_pmic->adc_irq);
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_IRQ_BLOCK_SEL_SEC,
+					PM8921_ADC_ARB_IRQ_BLOCK_SEL_DATA);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_IRQ_CONFIG_SEC,
+					PM8921_ADC_ARB_IRQ_CONFIG_SEC_DATA);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_IRQ_BIT_PERM_USR,
+					PM8921_ADC_ARB_IRQ_BIT_PERM_USR_DATA);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_IRQ_BLOCK_SEL_USR,
+					PM8921_ADC_ARB_IRQ_BLOCK_SEL_USR_DATA);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_IRQ_CONFIG_USR,
+					PM8921_ADC_ARB_IRQ_CONFIG_USR_DATA);
+	if (rc < 0)
+		return rc;
+
+	rc = pm8921_adc_arb_cntrl(1);
+	if (rc < 0) {
+		pr_err("Configuring ADC Arbiter"
+				"enable failed with %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static uint32_t pm8921_adc_read_adc_code(int32_t *data)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	uint8_t rslt_lsb, rslt_msb;
+	int32_t rc, max_ideal_adc_code = 1 << adc_pmic->adc_prop->bitresolution;
+
+	rc = pm8xxx_readb(adc_pmic->dev->parent,
+				PM8921_ADC_ARB_USRP_DATA0, &rslt_lsb);
+	if (rc < 0) {
+		pr_err("PM8921 adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	rc = pm8xxx_readb(adc_pmic->dev->parent,
+				PM8921_ADC_ARB_USRP_DATA1, &rslt_msb);
+	if (rc < 0) {
+		pr_err("PM8921 adc result read failed with %d\n", rc);
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	/* Use the midpoint to determine underflow or overflow */
+	if (*data > max_ideal_adc_code + (max_ideal_adc_code >> 1))
+		*data |= ((1 << (8 * sizeof(*data) -
+			adc_pmic->adc_prop->bitresolution)) - 1) <<
+			adc_pmic->adc_prop->bitresolution;
+
+	/* Default value for switching off the arbiter after reading
+	   the ADC value. Bit 0 set to 0. */
+	rc = pm8921_adc_arb_cntrl(0);
+	if (rc < 0) {
+		pr_err("%s: Configuring ADC Arbiter disable"
+					"failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static irqreturn_t pm8921_adc_isr(int irq, void *dev_id)
+{
+	struct pm8921_adc *adc_8921 = dev_id;
+
+	disable_irq_nosync(adc_8921->adc_irq);
+
+	if (pm8921_adc_calib_first_adc)
+		return IRQ_HANDLED;
+	/* TODO Handle spurius interrupt condition */
+	complete(&adc_8921->adc_rslt_completion);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8921_btm_warm_isr(int irq, void *dev_id)
+{
+	struct pm8921_adc *btm_8921 = dev_id;
+
+	disable_irq_nosync(btm_8921->btm_warm_irq);
+
+	if (pm8921_btm_calib_first_adc)
+		return IRQ_HANDLED;
+
+	if (btm_8921->batt->btm_param->btm_warm_fn != NULL)
+		btm_8921->batt->btm_param->btm_warm_fn();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8921_btm_cold_isr(int irq, void *dev_id)
+{
+	struct pm8921_adc *btm_8921 = dev_id;
+
+	disable_irq_nosync(btm_8921->btm_cold_irq);
+
+	if (pm8921_btm_calib_first_adc)
+		return IRQ_HANDLED;
+
+	if (btm_8921->batt->btm_param->btm_cold_fn != NULL)
+		btm_8921->batt->btm_param->btm_cold_fn();
+
+	return IRQ_HANDLED;
+}
+
+static uint32_t pm8921_adc_calib_device(void)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	struct pm8921_adc_amux_properties conv;
+	int rc, offset_adc, slope_adc, calib_read_1, calib_read_2;
+	u8 data_arb_usrp_cntrl1 = 0;
+
+	conv.amux_channel = CHANNEL_125V;
+	conv.decimation = ADC_DECIMATION_TYPE2;
+	conv.amux_ip_rsv = AMUX_RSV1;
+	conv.amux_mpp_channel = PREMUX_MPP_SCALE_0;
+	pm8921_adc_calib_first_adc = true;
+	rc = pm8921_adc_configure(&conv);
+	if (rc) {
+		pr_err("pm8921_adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (data_arb_usrp_cntrl1 != (PM8921_ADC_ARB_USRP_CNTRL1_EOC |
+					PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB)) {
+		rc = pm8921_adc_read_reg(PM8921_ADC_ARB_USRP_CNTRL1,
+					&data_arb_usrp_cntrl1);
+		if (rc < 0)
+			return rc;
+		usleep_range(PM8921_ADC_CONV_TIME_MIN,
+					PM8921_ADC_CONV_TIME_MAX);
+	}
+	data_arb_usrp_cntrl1 = 0;
+
+	rc = pm8921_adc_read_adc_code(&calib_read_1);
+	if (rc) {
+		pr_err("pm8921_adc read adc failed with %d\n", rc);
+		pm8921_adc_calib_first_adc = false;
+		goto calib_fail;
+	}
+	pm8921_adc_calib_first_adc = false;
+
+	conv.amux_channel = CHANNEL_625MV;
+	conv.decimation = ADC_DECIMATION_TYPE2;
+	conv.amux_ip_rsv = AMUX_RSV1;
+	conv.amux_mpp_channel = PREMUX_MPP_SCALE_0;
+	pm8921_adc_calib_first_adc = true;
+	rc = pm8921_adc_configure(&conv);
+	if (rc) {
+		pr_err("pm8921_adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (data_arb_usrp_cntrl1 != (PM8921_ADC_ARB_USRP_CNTRL1_EOC |
+					PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB)) {
+		rc = pm8921_adc_read_reg(PM8921_ADC_ARB_USRP_CNTRL1,
+					&data_arb_usrp_cntrl1);
+		if (rc < 0)
+			return rc;
+		usleep_range(PM8921_ADC_CONV_TIME_MIN,
+					PM8921_ADC_CONV_TIME_MAX);
+	}
+	data_arb_usrp_cntrl1 = 0;
+
+	rc = pm8921_adc_read_adc_code(&calib_read_2);
+	if (rc) {
+		pr_err("pm8921_adc read adc failed with %d\n", rc);
+		pm8921_adc_calib_first_adc = false;
+		goto calib_fail;
+	}
+	pm8921_adc_calib_first_adc = false;
+
+	slope_adc = (((calib_read_1 - calib_read_2) << PM8921_ADC_MUL)/
+					PM8921_CHANNEL_ADC_625_MV);
+	offset_adc = calib_read_2 -
+			((slope_adc * PM8921_CHANNEL_ADC_625_MV) >>
+							PM8921_ADC_MUL);
+
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_ABSOLUTE].offset
+								= offset_adc;
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_ABSOLUTE].dy =
+					(calib_read_1 - calib_read_2);
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_ABSOLUTE].dx
+						= PM8921_CHANNEL_ADC_625_MV;
+	rc = pm8921_adc_arb_cntrl(0);
+	if (rc < 0) {
+		pr_err("%s: Configuring ADC Arbiter disable"
+					"failed\n", __func__);
+		return rc;
+	}
+	/* Ratiometric Calibration */
+	conv.amux_channel = CHANNEL_MUXOFF;
+	conv.decimation = ADC_DECIMATION_TYPE2;
+	conv.amux_ip_rsv = AMUX_RSV5;
+	conv.amux_mpp_channel = PREMUX_MPP_SCALE_0;
+	pm8921_adc_calib_first_adc = true;
+	rc = pm8921_adc_configure(&conv);
+	if (rc) {
+		pr_err("pm8921_adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (data_arb_usrp_cntrl1 != (PM8921_ADC_ARB_USRP_CNTRL1_EOC |
+					PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB)) {
+		rc = pm8921_adc_read_reg(PM8921_ADC_ARB_USRP_CNTRL1,
+					&data_arb_usrp_cntrl1);
+		if (rc < 0)
+			return rc;
+		usleep_range(PM8921_ADC_CONV_TIME_MIN,
+					PM8921_ADC_CONV_TIME_MAX);
+	}
+	data_arb_usrp_cntrl1 = 0;
+
+	rc = pm8921_adc_read_adc_code(&calib_read_1);
+	if (rc) {
+		pr_err("pm8921_adc read adc failed with %d\n", rc);
+		pm8921_adc_calib_first_adc = false;
+		goto calib_fail;
+	}
+	pm8921_adc_calib_first_adc = false;
+
+	conv.amux_channel = CHANNEL_MUXOFF;
+	conv.decimation = ADC_DECIMATION_TYPE2;
+	conv.amux_ip_rsv = AMUX_RSV4;
+	conv.amux_mpp_channel = PREMUX_MPP_SCALE_0;
+	pm8921_adc_calib_first_adc = true;
+	rc = pm8921_adc_configure(&conv);
+	if (rc) {
+		pr_err("pm8921_adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (data_arb_usrp_cntrl1 != (PM8921_ADC_ARB_USRP_CNTRL1_EOC |
+					PM8921_ADC_ARB_USRP_CNTRL1_EN_ARB)) {
+		rc = pm8921_adc_read_reg(PM8921_ADC_ARB_USRP_CNTRL1,
+					&data_arb_usrp_cntrl1);
+		if (rc < 0)
+			return rc;
+		usleep_range(PM8921_ADC_CONV_TIME_MIN,
+					PM8921_ADC_CONV_TIME_MAX);
+	}
+	data_arb_usrp_cntrl1 = 0;
+
+	rc = pm8921_adc_read_adc_code(&calib_read_2);
+	if (rc) {
+		pr_err("pm8921_adc read adc failed with %d\n", rc);
+		pm8921_adc_calib_first_adc = false;
+		goto calib_fail;
+	}
+	pm8921_adc_calib_first_adc = false;
+
+	slope_adc = (((calib_read_1 - calib_read_2) << PM8921_ADC_MUL)/
+				adc_pmic->adc_prop->adc_vdd_reference);
+	offset_adc = calib_read_2 -
+			((slope_adc * adc_pmic->adc_prop->adc_vdd_reference)
+							>> PM8921_ADC_MUL);
+
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_RATIOMETRIC].offset
+								= offset_adc;
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_RATIOMETRIC].dy =
+					(calib_read_1 - calib_read_2);
+	adc_pmic->conv->chan_prop->adc_graph[ADC_CALIB_RATIOMETRIC].dx =
+					adc_pmic->adc_prop->adc_vdd_reference;
+calib_fail:
+	rc = pm8921_adc_arb_cntrl(0);
+	if (rc < 0) {
+		pr_err("%s: Configuring ADC Arbiter disable"
+					"failed\n", __func__);
+	}
+
+	return rc;
+}
+
+uint32_t pm8921_adc_read(enum pm8921_adc_channels channel,
+				struct pm8921_adc_chan_result *result)
+{
+	return pm8921_adc_mpp_read(channel, result, PREMUX_MPP_SCALE_0);
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_read);
+
+uint32_t pm8921_adc_mpp_read(enum pm8921_adc_mpp_channels channel,
+				struct pm8921_adc_chan_result *result,
+				enum pm8921_adc_premux_mpp_scale_type mpp_scale)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int i = 0, rc, amux_prescaling, scale_type;
+
+	if (!pm8921_adc_initialized)
+		return -ENODEV;
+
+	if (!pm8921_adc_calib_device_init) {
+		if (pm8921_adc_calib_device() == 0)
+			pm8921_adc_calib_device_init = true;
+	}
+
+	mutex_lock(&adc_pmic->adc_lock);
+
+	for (i = 0; i < adc_pmic->adc_num_channel; i++) {
+		if (channel == adc_pmic->adc_channel[i].channel_name)
+			break;
+	}
+
+	if (i == adc_pmic->adc_num_channel) {
+		mutex_unlock(&adc_pmic->adc_lock);
+		return -EBADF; /* unknown channel */
+	}
+
+	adc_pmic->conv->amux_channel = i;
+	adc_pmic->conv->amux_mpp_channel = mpp_scale;
+
+	adc_pmic->conv->amux_ip_rsv = adc_pmic->adc_channel[i].adc_rsv;
+	adc_pmic->conv->decimation = adc_pmic->adc_channel[i].adc_decimation;
+	amux_prescaling = adc_pmic->adc_channel[i].chan_path_prescaling;
+
+	adc_pmic->conv->chan_prop->offset_gain_numerator =
+		pm8921_amux_scaling_ratio[amux_prescaling].num;
+	adc_pmic->conv->chan_prop->offset_gain_denominator =
+		 pm8921_amux_scaling_ratio[amux_prescaling].den;
+
+	rc = pm8921_adc_configure(adc_pmic->conv);
+	if (rc) {
+		mutex_unlock(&adc_pmic->adc_lock);
+		return -EINVAL;
+	}
+
+	wait_for_completion(&adc_pmic->adc_rslt_completion);
+
+	rc = pm8921_adc_read_adc_code(&result->adc_code);
+	if (rc) {
+		mutex_unlock(&adc_pmic->adc_lock);
+		return -EINVAL;
+	}
+
+	scale_type = adc_pmic->adc_channel[i].adc_scale_fn;
+	if (scale_type >= ADC_SCALE_NONE) {
+		mutex_unlock(&adc_pmic->adc_lock);
+		return -EBADF;
+	}
+
+	adc_scale_fn[scale_type].chan(result->adc_code,
+			adc_pmic->adc_prop, adc_pmic->conv->chan_prop, result);
+
+	mutex_unlock(&adc_pmic->adc_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_mpp_read);
+
+uint32_t pm8921_adc_btm_configure(struct pm8921_adc_arb_btm_param *btm_param)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	u8 data_btm_cool_thr0, data_btm_cool_thr1;
+	u8 data_btm_warm_thr0, data_btm_warm_thr1;
+	u8 arb_btm_cntrl1;
+	int rc;
+
+	mutex_lock(&adc_pmic->btm_lock);
+
+	data_btm_cool_thr0 = ((btm_param->low_thr_voltage << 24) >> 24);
+	data_btm_cool_thr1 = ((btm_param->low_thr_voltage << 16) >> 24);
+	data_btm_warm_thr0 = ((btm_param->high_thr_voltage << 24) >> 24);
+	data_btm_warm_thr1 = ((btm_param->high_thr_voltage << 16) >> 24);
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_BAT_COOL_THR0,
+						data_btm_cool_thr0);
+	if (rc < 0)
+		goto write_err;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_BAT_COOL_THR1,
+						data_btm_cool_thr0);
+	if (rc < 0)
+		goto write_err;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_BAT_WARM_THR0,
+						data_btm_warm_thr0);
+	if (rc < 0)
+		goto write_err;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_BAT_WARM_THR1,
+						data_btm_warm_thr1);
+	if (rc < 0)
+		goto write_err;
+
+	arb_btm_cntrl1 = btm_param->interval << PM8921_ADC_BTM_INTERVAL_SEL;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_CNTRL1, arb_btm_cntrl1);
+	if (rc < 0)
+		goto write_err;
+
+	adc_pmic->batt->btm_param->btm_warm_fn = btm_param->btm_warm_fn;
+	adc_pmic->batt->btm_param->btm_cold_fn = btm_param->btm_cold_fn;
+
+	mutex_unlock(&adc_pmic->btm_lock);
+
+	return rc;
+
+write_err:
+	mutex_unlock(&adc_pmic->btm_lock);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_btm_configure);
+
+static uint32_t pm8921_adc_btm_read(uint32_t channel)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int rc, i;
+	u8 arb_btm_dig_param, arb_btm_ana_param, arb_btm_rsv;
+	u8 arb_btm_amux_cntrl, arb_btm_decimation, data_arb_btm_cntrl;
+
+	arb_btm_amux_cntrl = channel << PM8921_ADC_BTM_CHANNEL_SEL;
+	arb_btm_rsv = adc_pmic->adc_channel[channel].adc_rsv;
+	arb_btm_decimation =
+		adc_pmic->adc_channel[channel].adc_decimation;
+	arb_btm_ana_param = PM8921_ADC_ARB_ANA_DIG;
+
+	mutex_lock(&adc_pmic->btm_lock);
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_AMUX_CNTRL,
+						arb_btm_amux_cntrl);
+	if (rc < 0)
+		goto write_err;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_RSV, arb_btm_rsv);
+	if (rc < 0)
+		goto write_err;
+
+	arb_btm_dig_param = arb_btm_decimation <<
+				PM8921_ADC_BTM_DECIMATION_SEL;
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_DIG_PARAM,
+						arb_btm_dig_param);
+	if (rc < 0)
+		goto write_err;
+
+	rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_ANA_PARAM,
+						arb_btm_ana_param);
+	if (rc < 0)
+		goto write_err;
+
+	data_arb_btm_cntrl = PM8921_ADC_ARB_BTM_CNTRL1_EOC |
+				PM8921_ADC_ARB_BTM_CNTRL1_EN_BTM;
+
+	/* Write twice to the CNTRL register for the arbiter settings
+	   to take into effect */
+	for (i = 0; i < 2; i++) {
+		rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_CNTRL1,
+						data_arb_btm_cntrl);
+		if (rc < 0)
+			goto write_err;
+	}
+
+	mutex_unlock(&adc_pmic->btm_lock);
+
+	return 0;
+
+write_err:
+	mutex_unlock(&adc_pmic->btm_lock);
+	return rc;
+}
+
+uint32_t pm8921_adc_btm_start(void)
+{
+	int rc;
+
+	rc = pm8921_adc_btm_read(CHANNEL_BATT_THERM);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_btm_start);
+
+uint32_t pm8921_adc_btm_end(void)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+	int i, rc;
+	u8 data_arb_btm_cntrl;
+
+	/* Set BTM registers to Disable mode */
+	data_arb_btm_cntrl = PM8921_ADC_ARB_BTM_CNTRL1_EOC;
+
+	mutex_lock(&adc_pmic->btm_lock);
+	/* Write twice to the CNTRL register for the arbiter settings
+	   to take into effect */
+	for (i = 0; i < 2; i++) {
+		rc = pm8921_adc_write_reg(PM8921_ADC_ARB_BTM_CNTRL1,
+							data_arb_btm_cntrl);
+		if (rc < 0) {
+			mutex_unlock(&adc_pmic->btm_lock);
+			return rc;
+		}
+	}
+	mutex_unlock(&adc_pmic->btm_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8921_adc_btm_end);
+
+static int get_adc(void *data, u64 *val)
+{
+	struct pm8921_adc_chan_result result;
+	int i = (int)data;
+	int rc;
+
+	rc = pm8921_adc_read(i, &result);
+
+	pr_info("ADC value raw:%x physical:%lld\n",
+			result.adc_code, result.physical);
+	*val = result.physical;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_adc, NULL, "%llu\n");
+
+#ifdef CONFIG_DEBUG_FS
+static void create_debugfs_entries(void)
+{
+	pmic_adc->dent = debugfs_create_dir("pm8921_adc", NULL);
+
+	if (IS_ERR(pmic_adc->dent)) {
+		pr_err("pmic adc debugfs dir not created\n");
+		return;
+	}
+
+	debugfs_create_file("vbat", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_VBAT, &reg_fops);
+	debugfs_create_file("625mv", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_625MV, &reg_fops);
+	debugfs_create_file("125v", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_125V, &reg_fops);
+	debugfs_create_file("die_temp", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_DIE_TEMP, &reg_fops);
+	debugfs_create_file("vcoin", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_VCOIN, &reg_fops);
+	debugfs_create_file("dc_in", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_DCIN, &reg_fops);
+	debugfs_create_file("vph_pwr", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_VPH_PWR, &reg_fops);
+	debugfs_create_file("usb_in", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_USBIN, &reg_fops);
+	debugfs_create_file("batt_therm", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_BATT_THERM, &reg_fops);
+	debugfs_create_file("batt_id", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_BATT_ID, &reg_fops);
+	debugfs_create_file("chg_temp", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_CHG_TEMP, &reg_fops);
+	debugfs_create_file("charger_current", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_ICHG, &reg_fops);
+	debugfs_create_file("ibat", 0644, pmic_adc->dent,
+			    (void *)CHANNEL_IBAT, &reg_fops);
+}
+#else
+static inline void create_debugfs_entries(void)
+{
+}
+#endif
+
+static int __devexit pm8921_adc_teardown(struct platform_device *pdev)
+{
+	struct pm8921_adc *adc_pmic = pmic_adc;
+
+	device_init_wakeup(&pdev->dev, 0);
+	free_irq(adc_pmic->adc_irq, adc_pmic);
+	free_irq(adc_pmic->btm_warm_irq, adc_pmic);
+	free_irq(adc_pmic->btm_cold_irq, adc_pmic);
+	platform_set_drvdata(pdev, NULL);
+	pmic_adc = NULL;
+	kfree(adc_pmic->conv->chan_prop);
+	kfree(adc_pmic->adc_channel);
+	kfree(adc_pmic);
+	pm8921_adc_initialized = false;
+
+	return 0;
+}
+
+static int __devinit pm8921_adc_probe(struct platform_device *pdev)
+{
+	const struct pm8921_adc_platform_data *pdata = pdev->dev.platform_data;
+	struct pm8921_adc *adc_pmic;
+	struct pm8921_adc_amux_properties *adc_amux_prop;
+	struct pm8921_adc_chan_properties *adc_pmic_chanprop;
+	struct pm8921_adc_amux *adc_amux;
+	int rc = 0;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	adc_pmic = kzalloc(sizeof(struct pm8921_adc),
+						GFP_KERNEL);
+	if (!adc_pmic) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_amux_prop = kzalloc(sizeof(struct pm8921_adc_amux_properties),
+						GFP_KERNEL);
+	if (!adc_amux_prop) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_amux = kzalloc(sizeof(struct pm8921_adc_amux),
+						GFP_KERNEL);
+	if (!adc_amux) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_pmic_chanprop = kzalloc(sizeof(struct pm8921_adc_chan_properties),
+						GFP_KERNEL);
+	if (!adc_pmic_chanprop) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_pmic->dev = &pdev->dev;
+	adc_pmic->adc_prop = pdata->adc_prop;
+	adc_pmic->conv = adc_amux_prop;
+	adc_pmic->conv->chan_prop = adc_pmic_chanprop;
+
+	init_completion(&adc_pmic->adc_rslt_completion);
+	adc_amux = pdata->adc_channel;
+	adc_pmic->adc_channel = adc_amux;
+	adc_pmic->adc_num_channel = pdata->adc_num_channel;
+
+	mutex_init(&adc_pmic->adc_lock);
+	mutex_init(&adc_pmic->btm_lock);
+
+	adc_pmic->adc_irq = platform_get_irq(pdev, PM8921_ADC_IRQ_0);
+	if (adc_pmic->adc_irq < 0) {
+		rc = -ENXIO;
+		goto err_cleanup;
+	}
+
+	rc = request_irq(adc_pmic->adc_irq,
+				pm8921_adc_isr,
+		IRQF_TRIGGER_RISING, "pm8921_adc_interrupt", adc_pmic);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request adc irq "
+						"with error %d\n", rc);
+		goto err_cleanup;
+	}
+
+	disable_irq_nosync(adc_pmic->adc_irq);
+
+	adc_pmic->btm_warm_irq = platform_get_irq(pdev, PM8921_ADC_IRQ_1);
+	if (adc_pmic->btm_warm_irq < 0) {
+		rc = -ENXIO;
+		goto err_cleanup;
+	}
+
+	rc = request_irq(adc_pmic->btm_warm_irq,
+				pm8921_btm_warm_isr,
+		IRQF_TRIGGER_RISING, "pm8921_btm_warm_interrupt", adc_pmic);
+	if (rc) {
+		pr_err("btm warm irq failed %d with interrupt number %d\n",
+						rc, adc_pmic->btm_warm_irq);
+		dev_err(&pdev->dev, "failed to request btm irq\n");
+		goto err_cleanup;
+	}
+
+	disable_irq_nosync(adc_pmic->btm_warm_irq);
+
+	adc_pmic->btm_cold_irq = platform_get_irq(pdev, PM8921_ADC_IRQ_2);
+	if (adc_pmic->btm_cold_irq < 0) {
+		rc = -ENXIO;
+		goto err_cleanup;
+	}
+
+	rc = request_irq(adc_pmic->btm_cold_irq,
+				pm8921_btm_cold_isr,
+		IRQF_TRIGGER_RISING, "pm8921_btm_cold_interrupt", adc_pmic);
+	if (rc) {
+		pr_err("btm cold irq failed with return %d and number %d\n",
+						rc, adc_pmic->btm_cold_irq);
+		dev_err(&pdev->dev, "failed to request btm irq\n");
+		goto err_cleanup;
+	}
+
+	disable_irq_nosync(adc_pmic->btm_cold_irq);
+	device_init_wakeup(&pdev->dev, pdata->adc_wakeup);
+	platform_set_drvdata(pdev, adc_pmic);
+	pmic_adc = adc_pmic;
+
+	create_debugfs_entries();
+	pm8921_adc_calib_first_adc = false;
+	pm8921_btm_calib_first_adc = false;
+	pm8921_adc_calib_device_init = false;
+	pm8921_adc_initialized = true;
+	return 0;
+
+err_cleanup:
+	pm8921_adc_teardown(pdev);
+	return rc;
+}
+
+static struct platform_driver pm8921_adc_driver = {
+	.probe	= pm8921_adc_probe,
+	.remove	= __devexit_p(pm8921_adc_teardown),
+	.driver	= {
+		.name	= PM8921_ADC_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8921_adc_init(void)
+{
+	return platform_driver_register(&pm8921_adc_driver);
+}
+module_init(pm8921_adc_init);
+
+static void __exit pm8921_adc_exit(void)
+{
+	platform_driver_unregister(&pm8921_adc_driver);
+}
+module_exit(pm8921_adc_exit);
+
+MODULE_ALIAS("platform:" PM8921_ADC_DEV_NAME);
+MODULE_DESCRIPTION("PMIC8921 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index e873b15..f088dc1 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -21,13 +21,38 @@
 #include <linux/mfd/core.h>
 #include <linux/mfd/pm8xxx/pm8921.h>
 #include <linux/mfd/pm8xxx/core.h>
+#include <linux/leds-pm8xxx.h>
 
 #define REG_HWREV		0x002  /* PMIC4 revision */
 #define REG_HWREV_2		0x0E8  /* PMIC4 revision 2 */
 
+#define REG_MPP_BASE		0x050
+
+#define REG_TEMP_ALARM_CTRL	0x1B
+#define REG_TEMP_ALARM_PWM	0x9B
+
+#define REG_BATT_ALARM_THRESH	0x023
+#define REG_BATT_ALARM_CTRL1	0x024
+#define REG_BATT_ALARM_CTRL2	0x0AA
+#define REG_BATT_ALARM_PWM_CTRL	0x0A3
+
+#define PM8921_VERSION_MASK	0xFFF0
+#define PM8921_VERSION_VALUE	0x06F0
+#define PM8921_REVISION_MASK	0x000F
+
+#define SINGLE_IRQ_RESOURCE(_name, _irq) \
+{ \
+	.name	= _name, \
+	.start	= _irq, \
+	.end	= _irq, \
+	.flags	= IORESOURCE_IRQ, \
+}
+
 struct pm8921 {
 	struct device			*dev;
 	struct pm_irq_chip		*irq_chip;
+	struct mfd_cell                 *mfd_regulators;
+	u32				rev_registers;
 };
 
 static int pm8921_readb(const struct device *dev, u16 addr, u8 *val)
@@ -72,25 +97,258 @@
 	return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
 }
 
+static enum pm8xxx_version pm8921_get_version(const struct device *dev)
+{
+	const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+	const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+	enum pm8xxx_version version = -ENODEV;
+
+	if ((pmic->rev_registers & PM8921_VERSION_MASK) == PM8921_VERSION_VALUE)
+		version = PM8XXX_VERSION_8921;
+
+	return version;
+}
+
+static int pm8921_get_revision(const struct device *dev)
+{
+	const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+	const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+	return pmic->rev_registers & PM8921_REVISION_MASK;
+}
+
 static struct pm8xxx_drvdata pm8921_drvdata = {
 	.pmic_readb		= pm8921_readb,
 	.pmic_writeb		= pm8921_writeb,
 	.pmic_read_buf		= pm8921_read_buf,
 	.pmic_write_buf		= pm8921_write_buf,
 	.pmic_read_irq_stat	= pm8921_read_irq_stat,
+	.pmic_get_version	= pm8921_get_version,
+	.pmic_get_revision	= pm8921_get_revision,
 };
 
-static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data
-					   *pdata,
-					   struct pm8921 *pmic,
-					   u32 rev)
+static const struct resource gpio_cell_resources[] __devinitconst = {
+	[0] = {
+		.start = PM8921_IRQ_BLOCK_BIT(PM8921_GPIO_BLOCK_START, 0),
+		.end   = PM8921_IRQ_BLOCK_BIT(PM8921_GPIO_BLOCK_START, 0)
+			+ PM8921_NR_GPIOS - 1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct mfd_cell gpio_cell __devinitdata = {
+	.name		= PM8XXX_GPIO_DEV_NAME,
+	.id		= -1,
+	.resources	= gpio_cell_resources,
+	.num_resources	= ARRAY_SIZE(gpio_cell_resources),
+};
+
+static const struct resource adc_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_EOC_USR_IRQ),
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_BATT_TEMP_WARM_IRQ),
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_BATT_TEMP_COLD_IRQ),
+};
+
+static struct mfd_cell adc_cell __devinitdata = {
+	.name		= PM8921_ADC_DEV_NAME,
+	.id		= -1,
+	.resources	= adc_cell_resources,
+	.num_resources	= ARRAY_SIZE(adc_cell_resources),
+};
+
+static const struct resource mpp_cell_resources[] __devinitconst = {
+	{
+		.start	= PM8921_IRQ_BLOCK_BIT(PM8921_MPP_BLOCK_START, 0),
+		.end	= PM8921_IRQ_BLOCK_BIT(PM8921_MPP_BLOCK_START, 0)
+			  + PM8921_NR_MPPS - 1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct mfd_cell mpp_cell __devinitdata = {
+	.name		= PM8XXX_MPP_DEV_NAME,
+	.id		= -1,
+	.resources	= mpp_cell_resources,
+	.num_resources	= ARRAY_SIZE(mpp_cell_resources),
+};
+
+static const struct resource rtc_cell_resources[] __devinitconst = {
+	[0] = SINGLE_IRQ_RESOURCE(NULL, PM8921_RTC_ALARM_IRQ),
+	[1] = {
+		.name   = "pmic_rtc_base",
+		.start  = PM8921_RTC_BASE,
+		.end    = PM8921_RTC_BASE,
+		.flags  = IORESOURCE_IO,
+	},
+};
+
+static struct mfd_cell rtc_cell __devinitdata = {
+	.name           = PM8XXX_RTC_DEV_NAME,
+	.id             = -1,
+	.resources      = rtc_cell_resources,
+	.num_resources  = ARRAY_SIZE(rtc_cell_resources),
+};
+
+static const struct resource resources_pwrkey[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_PWRKEY_REL_IRQ),
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_PWRKEY_PRESS_IRQ),
+};
+
+static struct mfd_cell pwrkey_cell __devinitdata = {
+	.name		= PM8XXX_PWRKEY_DEV_NAME,
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(resources_pwrkey),
+	.resources	= resources_pwrkey,
+};
+
+static const struct resource resources_keypad[] = {
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_KEYPAD_IRQ),
+	SINGLE_IRQ_RESOURCE(NULL, PM8921_KEYSTUCK_IRQ),
+};
+
+static struct mfd_cell keypad_cell __devinitdata = {
+	.name		= PM8XXX_KEYPAD_DEV_NAME,
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(resources_keypad),
+	.resources	= resources_keypad,
+};
+
+static struct mfd_cell debugfs_cell __devinitdata = {
+	.name		= "pm8xxx-debug",
+	.id		= -1,
+	.platform_data	= "pm8921-dbg",
+	.pdata_size	= sizeof("pm8921-dbg"),
+};
+
+static struct mfd_cell pwm_cell __devinitdata = {
+	.name           = PM8XXX_PWM_DEV_NAME,
+	.id             = -1,
+};
+
+static const struct resource charger_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("USBIN_VALID_IRQ", PM8921_USBIN_VALID_IRQ),
+	SINGLE_IRQ_RESOURCE("USBIN_OV_IRQ", PM8921_USBIN_OV_IRQ),
+	SINGLE_IRQ_RESOURCE("BATT_INSERTED_IRQ", PM8921_BATT_INSERTED_IRQ),
+	SINGLE_IRQ_RESOURCE("VBATDET_LOW_IRQ", PM8921_VBATDET_LOW_IRQ),
+	SINGLE_IRQ_RESOURCE("USBIN_UV_IRQ", PM8921_USBIN_UV_IRQ),
+	SINGLE_IRQ_RESOURCE("VBAT_OV_IRQ", PM8921_VBAT_OV_IRQ),
+	SINGLE_IRQ_RESOURCE("CHGWDOG_IRQ", PM8921_CHGWDOG_IRQ),
+	SINGLE_IRQ_RESOURCE("VCP_IRQ", PM8921_VCP_IRQ),
+	SINGLE_IRQ_RESOURCE("ATCDONE_IRQ", PM8921_ATCDONE_IRQ),
+	SINGLE_IRQ_RESOURCE("ATCFAIL_IRQ", PM8921_ATCFAIL_IRQ),
+	SINGLE_IRQ_RESOURCE("CHGDONE_IRQ", PM8921_CHGDONE_IRQ),
+	SINGLE_IRQ_RESOURCE("CHGFAIL_IRQ", PM8921_CHGFAIL_IRQ),
+	SINGLE_IRQ_RESOURCE("CHGSTATE_IRQ", PM8921_CHGSTATE_IRQ),
+	SINGLE_IRQ_RESOURCE("LOOP_CHANGE_IRQ", PM8921_LOOP_CHANGE_IRQ),
+	SINGLE_IRQ_RESOURCE("FASTCHG_IRQ", PM8921_FASTCHG_IRQ),
+	SINGLE_IRQ_RESOURCE("TRKLCHG_IRQ", PM8921_TRKLCHG_IRQ),
+	SINGLE_IRQ_RESOURCE("BATT_REMOVED_IRQ", PM8921_BATT_REMOVED_IRQ),
+	SINGLE_IRQ_RESOURCE("BATTTEMP_HOT_IRQ", PM8921_BATTTEMP_HOT_IRQ),
+	SINGLE_IRQ_RESOURCE("CHGHOT_IRQ", PM8921_CHGHOT_IRQ),
+	SINGLE_IRQ_RESOURCE("BATTTEMP_COLD_IRQ", PM8921_BATTTEMP_COLD_IRQ),
+	SINGLE_IRQ_RESOURCE("CHG_GONE_IRQ", PM8921_CHG_GONE_IRQ),
+	SINGLE_IRQ_RESOURCE("BAT_TEMP_OK_IRQ", PM8921_BAT_TEMP_OK_IRQ),
+	SINGLE_IRQ_RESOURCE("COARSE_DET_LOW_IRQ", PM8921_COARSE_DET_LOW_IRQ),
+	SINGLE_IRQ_RESOURCE("VDD_LOOP_IRQ", PM8921_VDD_LOOP_IRQ),
+	SINGLE_IRQ_RESOURCE("VREG_OV_IRQ", PM8921_VREG_OV_IRQ),
+	SINGLE_IRQ_RESOURCE("VBATDET_IRQ", PM8921_VBATDET_IRQ),
+	SINGLE_IRQ_RESOURCE("BATFET_IRQ", PM8921_BATFET_IRQ),
+	SINGLE_IRQ_RESOURCE("PSI_IRQ", PM8921_PSI_IRQ),
+	SINGLE_IRQ_RESOURCE("DCIN_VALID_IRQ", PM8921_DCIN_VALID_IRQ),
+	SINGLE_IRQ_RESOURCE("DCIN_OV_IRQ", PM8921_DCIN_OV_IRQ),
+	SINGLE_IRQ_RESOURCE("DCIN_UV_IRQ", PM8921_DCIN_UV_IRQ),
+};
+
+static const struct resource bms_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_SBI_WRITE_OK", PM8921_BMS_SBI_WRITE_OK),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_CC_THR", PM8921_BMS_CC_THR),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_THR", PM8921_BMS_VSENSE_THR),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_FOR_R", PM8921_BMS_VSENSE_FOR_R),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_OCV_FOR_R", PM8921_BMS_OCV_FOR_R),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_GOOD_OCV", PM8921_BMS_GOOD_OCV),
+	SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_AVG", PM8921_BMS_VSENSE_AVG),
+};
+
+static struct mfd_cell charger_cell __devinitdata = {
+	.name		= PM8921_CHARGER_DEV_NAME,
+	.id		= -1,
+	.resources	= charger_cell_resources,
+	.num_resources	= ARRAY_SIZE(charger_cell_resources),
+};
+
+static struct mfd_cell bms_cell __devinitdata = {
+	.name		= PM8921_BMS_DEV_NAME,
+	.id		= -1,
+	.resources	= bms_cell_resources,
+	.num_resources	= ARRAY_SIZE(bms_cell_resources),
+};
+
+static struct mfd_cell misc_cell __devinitdata = {
+	.name           = PM8XXX_MISC_DEV_NAME,
+	.id             = -1,
+};
+
+static struct mfd_cell leds_cell __devinitdata = {
+	.name		= PM8XXX_LEDS_DEV_NAME,
+	.id		= -1,
+};
+
+static const struct resource thermal_alarm_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("pm8921_tempstat_irq", PM8921_TEMPSTAT_IRQ),
+	SINGLE_IRQ_RESOURCE("pm8921_overtemp_irq", PM8921_OVERTEMP_IRQ),
+};
+
+static struct pm8xxx_tm_core_data thermal_alarm_cdata = {
+	.adc_channel =			CHANNEL_DIE_TEMP,
+	.adc_type =			PM8XXX_TM_ADC_PM8921_ADC,
+	.reg_addr_temp_alarm_ctrl =	REG_TEMP_ALARM_CTRL,
+	.reg_addr_temp_alarm_pwm =	REG_TEMP_ALARM_PWM,
+	.tm_name =			"pm8921_tz",
+	.irq_name_temp_stat =		"pm8921_tempstat_irq",
+	.irq_name_over_temp =		"pm8921_overtemp_irq",
+};
+
+static struct mfd_cell thermal_alarm_cell __devinitdata = {
+	.name		= PM8XXX_TM_DEV_NAME,
+	.id		= -1,
+	.resources	= thermal_alarm_cell_resources,
+	.num_resources	= ARRAY_SIZE(thermal_alarm_cell_resources),
+	.platform_data	= &thermal_alarm_cdata,
+	.pdata_size	= sizeof(struct pm8xxx_tm_core_data),
+};
+
+static const struct resource batt_alarm_cell_resources[] __devinitconst = {
+	SINGLE_IRQ_RESOURCE("pm8921_batt_alarm_irq", PM8921_BATT_ALARM_IRQ),
+};
+
+static struct pm8xxx_batt_alarm_core_data batt_alarm_cdata = {
+	.irq_name		= "pm8921_batt_alarm_irq",
+	.reg_addr_threshold	= REG_BATT_ALARM_THRESH,
+	.reg_addr_ctrl1		= REG_BATT_ALARM_CTRL1,
+	.reg_addr_ctrl2		= REG_BATT_ALARM_CTRL2,
+	.reg_addr_pwm_ctrl	= REG_BATT_ALARM_PWM_CTRL,
+};
+
+static struct mfd_cell batt_alarm_cell __devinitdata = {
+	.name		= PM8XXX_BATT_ALARM_DEV_NAME,
+	.id		= -1,
+	.resources	= batt_alarm_cell_resources,
+	.num_resources	= ARRAY_SIZE(batt_alarm_cell_resources),
+	.platform_data	= &batt_alarm_cdata,
+	.pdata_size	= sizeof(struct pm8xxx_batt_alarm_core_data),
+};
+
+static int __devinit
+pm8921_add_subdevices(const struct pm8921_platform_data *pdata,
+		      struct pm8921 *pmic)
 {
 	int ret = 0, irq_base = 0;
 	struct pm_irq_chip *irq_chip;
+	static struct mfd_cell *mfd_regulators;
+	int i;
 
 	if (pdata->irq_pdata) {
 		pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS;
-		pdata->irq_pdata->irq_cdata.rev = rev;
 		irq_base = pdata->irq_pdata->irq_base;
 		irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
 
@@ -101,16 +359,217 @@
 		}
 		pmic->irq_chip = irq_chip;
 	}
+
+	if (pdata->gpio_pdata) {
+		pdata->gpio_pdata->gpio_cdata.ngpios = PM8921_NR_GPIOS;
+		gpio_cell.platform_data = pdata->gpio_pdata;
+		gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1,
+					NULL, irq_base);
+		if (ret) {
+			pr_err("Failed to add  gpio subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->mpp_pdata) {
+		pdata->mpp_pdata->core_data.nmpps = PM8921_NR_MPPS;
+		pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE;
+		mpp_cell.platform_data = pdata->mpp_pdata;
+		mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add mpp subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->rtc_pdata) {
+		rtc_cell.platform_data = pdata->rtc_pdata;
+		rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL,
+				irq_base);
+		if (ret) {
+			pr_err("Failed to add rtc subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->pwrkey_pdata) {
+		pwrkey_cell.platform_data = pdata->pwrkey_pdata;
+		pwrkey_cell.pdata_size =
+			sizeof(struct pm8xxx_pwrkey_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add pwrkey subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->keypad_pdata) {
+		keypad_cell.platform_data = pdata->keypad_pdata;
+		keypad_cell.pdata_size =
+			sizeof(struct pm8xxx_keypad_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &keypad_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add keypad subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->charger_pdata) {
+		pdata->charger_pdata->charger_cdata.vbat_channel = CHANNEL_VBAT;
+		charger_cell.platform_data = pdata->charger_pdata;
+		charger_cell.pdata_size =
+				sizeof(struct pm8921_charger_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &charger_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add charger subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->adc_pdata) {
+		adc_cell.platform_data = pdata->adc_pdata;
+		adc_cell.pdata_size =
+			sizeof(struct pm8921_adc_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &adc_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add regulator subdevices ret=%d\n",
+					ret);
+		}
+	}
+
+	if (pdata->bms_pdata) {
+		bms_cell.platform_data = pdata->bms_pdata;
+		bms_cell.pdata_size =
+				sizeof(struct pm8921_bms_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &bms_cell, 1, NULL,
+					irq_base);
+		if (ret) {
+			pr_err("Failed to add bms subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	/* Add one device for each regulator used by the board. */
+	if (pdata->num_regulators > 0 && pdata->regulator_pdatas) {
+		mfd_regulators = kzalloc(sizeof(struct mfd_cell)
+					 * (pdata->num_regulators), GFP_KERNEL);
+		if (!mfd_regulators) {
+			pr_err("Cannot allocate %d bytes for pm8921 regulator "
+				"mfd cells\n", sizeof(struct mfd_cell)
+						* (pdata->num_regulators));
+			ret = -ENOMEM;
+			goto bail;
+		}
+		for (i = 0; i < pdata->num_regulators; i++) {
+			mfd_regulators[i].name = PM8921_REGULATOR_DEV_NAME;
+			mfd_regulators[i].id = pdata->regulator_pdatas[i].id;
+			mfd_regulators[i].platform_data =
+				&(pdata->regulator_pdatas[i]);
+			mfd_regulators[i].pdata_size =
+				sizeof(struct pm8921_regulator_platform_data);
+		}
+		ret = mfd_add_devices(pmic->dev, 0, mfd_regulators,
+				pdata->num_regulators, NULL, irq_base);
+		if (ret) {
+			pr_err("Failed to add regulator subdevices ret=%d\n",
+				ret);
+			kfree(mfd_regulators);
+			goto bail;
+		}
+		pmic->mfd_regulators = mfd_regulators;
+	}
+
+	ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base);
+	if (ret) {
+		pr_err("Failed to add debugfs subdevice ret=%d\n", ret);
+		goto bail;
+	}
+
+	ret = mfd_add_devices(pmic->dev, 0, &pwm_cell, 1, NULL, 0);
+	if (ret) {
+		pr_err("Failed to add pwm subdevice ret=%d\n", ret);
+		goto bail;
+	}
+
+	if (pdata->misc_pdata) {
+		misc_cell.platform_data = pdata->misc_pdata;
+		misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL,
+				      irq_base);
+		if (ret) {
+			pr_err("Failed to add  misc subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	if (pdata->leds_pdata) {
+		/* PM8921 supports only 4 LED DRVs */
+		for (i = 0; i < pdata->leds_pdata->num_leds; i++) {
+			if (pdata->leds_pdata->leds[i].flags >
+						PM8XXX_ID_LED_2) {
+				pr_err("%s: LED %d not supported\n", __func__,
+					pdata->leds_pdata->leds[i].flags);
+				goto bail;
+			}
+		}
+		leds_cell.platform_data = pdata->leds_pdata;
+		leds_cell.pdata_size = sizeof(struct led_platform_data);
+		ret = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL, 0);
+		if (ret) {
+			pr_err("Failed to add leds subdevice ret=%d\n", ret);
+			goto bail;
+		}
+	}
+
+	ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL,
+				irq_base);
+	if (ret) {
+		pr_err("Failed to add thermal alarm subdevice ret=%d\n",
+			ret);
+		goto bail;
+	}
+
+	ret = mfd_add_devices(pmic->dev, 0, &batt_alarm_cell, 1, NULL,
+				irq_base);
+	if (ret) {
+		pr_err("Failed to add battery alarm subdevice ret=%d\n",
+			ret);
+		goto bail;
+	}
+
+	return 0;
+bail:
+	if (pmic->irq_chip) {
+		pm8xxx_irq_exit(pmic->irq_chip);
+		pmic->irq_chip = NULL;
+	}
 	return ret;
 }
 
+static const char * const pm8921_rev_names[] = {
+	[PM8XXX_REVISION_8921_TEST]	= "test",
+	[PM8XXX_REVISION_8921_1p0]	= "1.0",
+	[PM8XXX_REVISION_8921_1p1]	= "1.1",
+	[PM8XXX_REVISION_8921_2p0]	= "2.0",
+};
+
 static int __devinit pm8921_probe(struct platform_device *pdev)
 {
 	const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
+	const char *revision_name = "unknown";
 	struct pm8921 *pmic;
+	enum pm8xxx_version version;
+	int revision;
 	int rc;
 	u8 val;
-	u32 rev;
 
 	if (!pdata) {
 		pr_err("missing platform data\n");
@@ -130,7 +589,7 @@
 		goto err_read_rev;
 	}
 	pr_info("PMIC revision 1: %02X\n", val);
-	rev = val;
+	pmic->rev_registers = val;
 
 	/* Read PMIC chip revision 2 */
 	rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
@@ -140,13 +599,24 @@
 		goto err_read_rev;
 	}
 	pr_info("PMIC revision 2: %02X\n", val);
-	rev |= val << BITS_PER_BYTE;
+	pmic->rev_registers |= val << BITS_PER_BYTE;
 
 	pmic->dev = &pdev->dev;
 	pm8921_drvdata.pm_chip_data = pmic;
 	platform_set_drvdata(pdev, &pm8921_drvdata);
 
-	rc = pm8921_add_subdevices(pdata, pmic, rev);
+	/* Print out human readable version and revision names. */
+	version = pm8xxx_get_version(pmic->dev);
+	if (version == PM8XXX_VERSION_8921) {
+		revision = pm8xxx_get_revision(pmic->dev);
+		if (revision >= 0 && revision < ARRAY_SIZE(pm8921_rev_names))
+			revision_name = pm8921_rev_names[revision];
+		pr_info("PMIC version: PM8921 rev %s\n", revision_name);
+	} else {
+		WARN_ON(version != PM8XXX_VERSION_8921);
+	}
+
+	rc = pm8921_add_subdevices(pdata, pmic);
 	if (rc) {
 		pr_err("Cannot add subdevices rc=%d\n", rc);
 		goto err;
@@ -180,6 +650,7 @@
 		pmic->irq_chip = NULL;
 	}
 	platform_set_drvdata(pdev, NULL);
+	kfree(pmic->mfd_regulators);
 	kfree(pmic);
 
 	return 0;
@@ -198,7 +669,7 @@
 {
 	return platform_driver_register(&pm8921_driver);
 }
-subsys_initcall(pm8921_init);
+postcore_initcall(pm8921_init);
 
 static void __exit pm8921_exit(void)
 {
diff --git a/drivers/mfd/pm8xxx-batt-alarm.c b/drivers/mfd/pm8xxx-batt-alarm.c
new file mode 100644
index 0000000..92ade1c
--- /dev/null
+++ b/drivers/mfd/pm8xxx-batt-alarm.c
@@ -0,0 +1,805 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC PM8xxx Battery Alarm driver
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/batt-alarm.h>
+
+/* Available voltage threshold values */
+#define THRESHOLD_MIN_MV		2500
+#define THRESHOLD_MAX_MV		5675
+#define THRESHOLD_STEP_MV		25
+
+/* Register bit definitions */
+
+/* Threshold register */
+#define THRESHOLD_UPPER_MASK		0xF0
+#define THRESHOLD_LOWER_MASK		0x0F
+#define THRESHOLD_UPPER_SHIFT		4
+#define THRESHOLD_LOWER_SHIFT		0
+
+/* CTRL 1 register */
+#define CTRL1_BATT_ALARM_ENABLE_MASK	0x80
+#define CTRL1_BATT_ALARM_ENABLE		0x80
+#define CTRL1_BATT_ALARM_DISABLE	0x00
+#define CTRL1_HOLD_TIME_MASK		0x70
+#define CTRL1_STATUS_UPPER_MASK		0x02
+#define CTRL1_STATUS_LOWER_MASK		0x01
+#define CTRL1_HOLD_TIME_SHIFT		4
+#define CTRL1_HOLD_TIME_MIN		0
+#define CTRL1_HOLD_TIME_MAX		7
+
+/* CTRL 2 register */
+#define CTRL2_COMP_UPPER_DISABLE_MASK	0x80
+#define CTRL2_COMP_UPPER_ENABLE		0x00
+#define CTRL2_COMP_UPPER_DISABLE	0x80
+#define CTRL2_COMP_LOWER_DISABLE_MASK	0x40
+#define CTRL2_COMP_LOWER_ENABLE		0x00
+#define CTRL2_COMP_LOWER_DISABLE	0x40
+#define CTRL2_FINE_STEP_UPPER_MASK	0x30
+#define CTRL2_RANGE_EXT_UPPER_MASK	0x08
+#define CTRL2_FINE_STEP_LOWER_MASK	0x06
+#define CTRL2_RANGE_EXT_LOWER_MASK	0x01
+#define CTRL2_FINE_STEP_UPPER_SHIFT	4
+#define CTRL2_FINE_STEP_LOWER_SHIFT	1
+
+/* PWM control register */
+#define PWM_CTRL_ALARM_EN_MASK		0xC0
+#define PWM_CTRL_ALARM_EN_NEVER		0x00
+#define PWM_CTRL_ALARM_EN_TCXO		0x40
+#define PWM_CTRL_ALARM_EN_PWM		0x80
+#define PWM_CTRL_ALARM_EN_ALWAYS	0xC0
+#define PWM_CTRL_PRE_MASK		0x38
+#define PWM_CTRL_DIV_MASK		0x07
+#define PWM_CTRL_PRE_SHIFT		3
+#define PWM_CTRL_DIV_SHIFT		0
+#define PWM_CTRL_PRE_MIN		0
+#define PWM_CTRL_PRE_MAX		7
+#define PWM_CTRL_DIV_MIN		1
+#define PWM_CTRL_DIV_MAX		7
+
+/* PWM control input range */
+#define PWM_CTRL_PRE_INPUT_MIN		2
+#define PWM_CTRL_PRE_INPUT_MAX		9
+#define PWM_CTRL_DIV_INPUT_MIN		2
+#define PWM_CTRL_DIV_INPUT_MAX		8
+
+/* Available voltage threshold values */
+#define THRESHOLD_BASIC_MIN_MV		2800
+#define THRESHOLD_EXT_MIN_MV		4400
+
+/*
+ * Default values used during initialization:
+ * Slowest PWM rate to ensure minimal status jittering when crossing thresholds.
+ * Largest hold time also helps reduce status value jittering.  Comparators
+ * are disabled by default and must be turned on by calling
+ * pm8xxx_batt_alarm_state_set.
+ */
+#define DEFAULT_THRESHOLD_LOWER		3200
+#define DEFAULT_THRESHOLD_UPPER		4300
+#define DEFAULT_HOLD_TIME		PM8XXX_BATT_ALARM_HOLD_TIME_16_MS
+#define DEFAULT_USE_PWM			1
+#define DEFAULT_PWM_SCALER		9
+#define DEFAULT_PWM_DIVIDER		8
+#define DEFAULT_LOWER_ENABLE		0
+#define DEFAULT_UPPER_ENABLE		0
+
+struct pm8xxx_batt_alarm_chip {
+	struct pm8xxx_batt_alarm_core_data	cdata;
+	struct srcu_notifier_head		irq_notifier_list;
+	struct work_struct			irq_work;
+	struct device				*dev;
+	struct mutex				lock;
+	unsigned int				irq;
+	int					notifier_count;
+	u8					reg_threshold;
+	u8					reg_ctrl1;
+	u8					reg_ctrl2;
+	u8					reg_pwm_ctrl;
+};
+static struct pm8xxx_batt_alarm_chip *the_battalarm;
+
+static int pm8xxx_reg_write(struct pm8xxx_batt_alarm_chip *chip, u16 addr,
+				u8 val, u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save)
+		rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+	if (rc)
+		pr_err("pm8xxx_writeb failed; addr=%03X, rc=%d\n", addr, rc);
+	else
+		*reg_save = reg;
+	return rc;
+}
+
+/**
+ * pm8xxx_batt_alarm_enable - enable one of the battery voltage threshold
+ *			      comparators
+ * @comparator:	selects which comparator to enable
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_enable(enum pm8xxx_batt_alarm_comparator comparator)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+	u8 val_ctrl2 = 0, mask_ctrl2 = 0;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENODEV;
+	}
+
+	if (comparator < 0 || comparator > PM8XXX_BATT_ALARM_UPPER_COMPARATOR) {
+		pr_err("invalid comparator ID number: %d\n", comparator);
+		return -EINVAL;
+	}
+
+	if (comparator == PM8XXX_BATT_ALARM_LOWER_COMPARATOR) {
+		val_ctrl2 = CTRL2_COMP_LOWER_ENABLE;
+		mask_ctrl2 = CTRL2_COMP_LOWER_DISABLE_MASK;
+	} else {
+		val_ctrl2 = CTRL2_COMP_UPPER_ENABLE;
+		mask_ctrl2 = CTRL2_COMP_UPPER_DISABLE_MASK;
+	}
+
+	mutex_lock(&chip->lock);
+
+	/* Enable the battery alarm block. */
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl1,
+				CTRL1_BATT_ALARM_ENABLE,
+				CTRL1_BATT_ALARM_ENABLE_MASK, &chip->reg_ctrl1);
+	if (rc)
+		goto bail;
+
+	/* Enable the individual comparators. */
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl2, val_ctrl2,
+				mask_ctrl2, &chip->reg_ctrl2);
+
+bail:
+	mutex_unlock(&chip->lock);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_enable);
+
+/**
+ * pm8xxx_batt_alarm_disable - disable one of the battery voltage threshold
+ *			       comparators
+ * @comparator:	selects which comparator to disable
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_disable(enum pm8xxx_batt_alarm_comparator comparator)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+	u8 val_ctrl1 = 0, val_ctrl2 = 0, mask_ctrl2 = 0;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENODEV;
+	}
+
+	if (comparator < 0 || comparator > PM8XXX_BATT_ALARM_UPPER_COMPARATOR) {
+		pr_err("invalid comparator ID number: %d\n", comparator);
+		return -EINVAL;
+	}
+
+	if (comparator == PM8XXX_BATT_ALARM_LOWER_COMPARATOR) {
+		val_ctrl2 = CTRL2_COMP_LOWER_DISABLE;
+		mask_ctrl2 = CTRL2_COMP_LOWER_DISABLE_MASK;
+	} else {
+		val_ctrl2 = CTRL2_COMP_UPPER_DISABLE;
+		mask_ctrl2 = CTRL2_COMP_UPPER_DISABLE_MASK;
+	}
+
+	mutex_lock(&chip->lock);
+
+	/* Disable the specified comparator. */
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl2, val_ctrl2,
+				mask_ctrl2, &chip->reg_ctrl2);
+	if (rc)
+		goto bail;
+
+	/* Disable the battery alarm block if both comparators are disabled. */
+	val_ctrl2 = chip->reg_ctrl2
+	      & (CTRL2_COMP_LOWER_DISABLE_MASK | CTRL2_COMP_UPPER_DISABLE_MASK);
+	if (val_ctrl2 == (CTRL2_COMP_LOWER_DISABLE | CTRL2_COMP_UPPER_DISABLE))
+		val_ctrl1 = CTRL1_BATT_ALARM_DISABLE;
+	else
+		val_ctrl1 = CTRL1_BATT_ALARM_ENABLE;
+
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl1, val_ctrl1,
+				CTRL1_BATT_ALARM_ENABLE_MASK, &chip->reg_ctrl1);
+
+bail:
+	mutex_unlock(&chip->lock);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_disable);
+
+/**
+ * pm8xxx_batt_alarm_threshold_set - set the lower and upper alarm thresholds
+ * @comparator:		selects which comparator to set the threshold of
+ * @threshold_mV:	battery voltage threshold in millivolts
+ *			set points = 2500-5675 mV in 25 mV steps
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_threshold_set(
+	enum pm8xxx_batt_alarm_comparator comparator, int threshold_mV)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int step, fine_step, rc;
+	u8 val_threshold = 0, val_ctrl2 = 0;
+	int threshold_mask, threshold_shift, range_ext_mask, fine_step_mask;
+	int fine_step_shift;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (comparator < 0 || comparator > PM8XXX_BATT_ALARM_UPPER_COMPARATOR) {
+		pr_err("invalid comparator ID number: %d\n", comparator);
+		return -EINVAL;
+	}
+
+	if (threshold_mV < THRESHOLD_MIN_MV
+	    || threshold_mV > THRESHOLD_MAX_MV) {
+		pr_err("threshold value, %d mV, is outside of allowable "
+			"range: [%d, %d] mV\n", threshold_mV,
+			THRESHOLD_MIN_MV, THRESHOLD_MAX_MV);
+		return -EINVAL;
+	}
+
+	if (comparator == PM8XXX_BATT_ALARM_LOWER_COMPARATOR) {
+		threshold_mask = THRESHOLD_LOWER_MASK;
+		threshold_shift = THRESHOLD_LOWER_SHIFT;
+		range_ext_mask = CTRL2_RANGE_EXT_LOWER_MASK;
+		fine_step_mask = CTRL2_FINE_STEP_LOWER_MASK;
+		fine_step_shift = CTRL2_FINE_STEP_LOWER_SHIFT;
+	} else {
+		threshold_mask = THRESHOLD_UPPER_MASK;
+		threshold_shift = THRESHOLD_UPPER_SHIFT;
+		range_ext_mask = CTRL2_RANGE_EXT_UPPER_MASK;
+		fine_step_mask = CTRL2_FINE_STEP_UPPER_MASK;
+		fine_step_shift = CTRL2_FINE_STEP_UPPER_SHIFT;
+	}
+
+	/* Determine register settings to achieve the threshold. */
+	if (threshold_mV < THRESHOLD_BASIC_MIN_MV) {
+		/* Extended low range */
+		val_ctrl2 |= range_ext_mask;
+
+		step = (threshold_mV - THRESHOLD_MIN_MV) / THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended low range is for steps 0 to 2 */
+		step >>= 2;
+	} else if (threshold_mV >= THRESHOLD_EXT_MIN_MV) {
+		/* Extended high range */
+		val_ctrl2 |= range_ext_mask;
+
+		step = (threshold_mV - THRESHOLD_EXT_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended high range is for steps 3 to 15 */
+		step = (step >> 2) + 3;
+	} else {
+		/* Basic range */
+		step = (threshold_mV - THRESHOLD_BASIC_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		step >>= 2;
+	}
+	val_threshold |= step << threshold_shift;
+	val_ctrl2 |= (fine_step << fine_step_shift) & fine_step_mask;
+
+	mutex_lock(&chip->lock);
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_threshold,
+			val_threshold, threshold_mask, &chip->reg_threshold);
+	if (rc)
+		goto bail;
+
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl2, val_ctrl2,
+			range_ext_mask | fine_step_mask, &chip->reg_ctrl2);
+
+bail:
+	mutex_unlock(&chip->lock);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_threshold_set);
+
+/**
+ * pm8xxx_batt_alarm_status_read - get status of both threshold comparators
+ *
+ * RETURNS:	< 0	   = error
+ *		  0	   = battery voltage ok
+ *		BIT(0) set = battery voltage below lower threshold
+ *		BIT(1) set = battery voltage above upper threshold
+ */
+int pm8xxx_batt_alarm_status_read(void)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int status, rc;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	mutex_lock(&chip->lock);
+	rc = pm8xxx_readb(chip->dev->parent, chip->cdata.reg_addr_ctrl1,
+			  &chip->reg_ctrl1);
+
+	status = ((chip->reg_ctrl1 & CTRL1_STATUS_LOWER_MASK)
+			? PM8XXX_BATT_ALARM_STATUS_BELOW_LOWER : 0)
+		| ((chip->reg_ctrl1 & CTRL1_STATUS_UPPER_MASK)
+			? PM8XXX_BATT_ALARM_STATUS_ABOVE_UPPER : 0);
+	mutex_unlock(&chip->lock);
+
+	if (rc) {
+		pr_err("pm8xxx_readb failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return status;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_status_read);
+
+/**
+ * pm8xxx_batt_alarm_hold_time_set - set hold time of interrupt output *
+ * @hold_time:	amount of time that battery voltage must remain outside of the
+ *		threshold range before the battery alarm interrupt triggers
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_hold_time_set(enum pm8xxx_batt_alarm_hold_time hold_time)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+	u8 reg_ctrl1 = 0;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (hold_time < CTRL1_HOLD_TIME_MIN
+	    || hold_time > CTRL1_HOLD_TIME_MAX) {
+
+		pr_err("hold time, %d, is outside of allowable range: "
+			"[%d, %d]\n", hold_time, CTRL1_HOLD_TIME_MIN,
+			CTRL1_HOLD_TIME_MAX);
+		return -EINVAL;
+	}
+
+	reg_ctrl1 = hold_time << CTRL1_HOLD_TIME_SHIFT;
+
+	mutex_lock(&chip->lock);
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_ctrl1, reg_ctrl1,
+			      CTRL1_HOLD_TIME_MASK, &chip->reg_ctrl1);
+	mutex_unlock(&chip->lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_hold_time_set);
+
+/**
+ * pm8xxx_batt_alarm_pwm_rate_set - set battery alarm update rate *
+ * @use_pwm:		1 = use PWM update rate, 0 = comparators always active
+ * @clock_scaler:	PWM clock scaler = 2 to 9
+ * @clock_divider:	PWM clock divider = 2 to 8
+ *
+ * This function sets the rate at which the battery alarm module enables
+ * the threshold comparators.  The rate is determined by the following equation:
+ *
+ * f_update = (1024 Hz) / (clock_divider * (2 ^ clock_scaler))
+ *
+ * Thus, the update rate can range from 0.25 Hz to 128 Hz.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_pwm_rate_set(int use_pwm, int clock_scaler,
+				   int clock_divider)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+	u8 reg_pwm_ctrl = 0, mask = 0;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (use_pwm && (clock_scaler < PWM_CTRL_PRE_INPUT_MIN
+	    || clock_scaler > PWM_CTRL_PRE_INPUT_MAX)) {
+		pr_err("PWM clock scaler, %d, is outside of allowable range: "
+			"[%d, %d]\n", clock_scaler, PWM_CTRL_PRE_INPUT_MIN,
+			PWM_CTRL_PRE_INPUT_MAX);
+		return -EINVAL;
+	}
+
+	if (use_pwm && (clock_divider < PWM_CTRL_DIV_INPUT_MIN
+	    || clock_divider > PWM_CTRL_DIV_INPUT_MAX)) {
+		pr_err("PWM clock divider, %d, is outside of allowable range: "
+			"[%d, %d]\n", clock_divider, PWM_CTRL_DIV_INPUT_MIN,
+			PWM_CTRL_DIV_INPUT_MAX);
+		return -EINVAL;
+	}
+
+	if (!use_pwm) {
+		/* Turn off PWM control and always enable. */
+		reg_pwm_ctrl = PWM_CTRL_ALARM_EN_ALWAYS;
+		mask = PWM_CTRL_ALARM_EN_MASK;
+	} else {
+		/* Use PWM control. */
+		reg_pwm_ctrl = PWM_CTRL_ALARM_EN_PWM;
+		mask = PWM_CTRL_ALARM_EN_MASK | PWM_CTRL_PRE_MASK
+			| PWM_CTRL_DIV_MASK;
+
+		clock_scaler -= PWM_CTRL_PRE_INPUT_MIN - PWM_CTRL_PRE_MIN;
+		clock_divider -= PWM_CTRL_DIV_INPUT_MIN - PWM_CTRL_DIV_MIN;
+
+		reg_pwm_ctrl |= (clock_scaler << PWM_CTRL_PRE_SHIFT)
+				& PWM_CTRL_PRE_MASK;
+		reg_pwm_ctrl |= (clock_divider << PWM_CTRL_DIV_SHIFT)
+				& PWM_CTRL_DIV_MASK;
+	}
+
+	mutex_lock(&chip->lock);
+	rc = pm8xxx_reg_write(chip, chip->cdata.reg_addr_pwm_ctrl, reg_pwm_ctrl,
+			      mask, &chip->reg_pwm_ctrl);
+	mutex_unlock(&chip->lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_pwm_rate_set);
+
+/*
+ * Handle the BATT_ALARM interrupt:
+ * Battery voltage is above or below threshold range.
+ */
+static irqreturn_t pm8xxx_batt_alarm_isr(int irq, void *data)
+{
+	struct pm8xxx_batt_alarm_chip *chip = data;
+
+	disable_irq_nosync(chip->irq);
+	schedule_work(&chip->irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static void pm8xxx_batt_alarm_isr_work(struct work_struct *work)
+{
+	struct pm8xxx_batt_alarm_chip *chip
+		= container_of(work, struct pm8xxx_batt_alarm_chip, irq_work);
+	int status;
+
+	if (chip) {
+		status = pm8xxx_batt_alarm_status_read();
+
+		if (status < 0)
+			pr_err("failed to read status, rc=%d\n", status);
+		else
+			srcu_notifier_call_chain(&chip->irq_notifier_list,
+						 status, NULL);
+	}
+
+	enable_irq(chip->irq);
+}
+
+/**
+ * pm8xxx_batt_alarm_register_notifier - register a notifier to run when a
+ *	battery voltage change interrupt fires
+ * @nb:	notifier block containing callback function to register
+ *
+ * nb->notifier_call must point to a function of this form -
+ * int (*notifier_call)(struct notifier_block *nb, unsigned long status,
+ *			void *unused);
+ * "status" will receive the battery alarm status; "unused" will be NULL.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_register_notifier(struct notifier_block *nb)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	rc = srcu_notifier_chain_register(&chip->irq_notifier_list, nb);
+	mutex_lock(&chip->lock);
+	if (rc == 0) {
+		if (chip->notifier_count == 0) {
+			enable_irq(chip->irq);
+			rc = irq_set_irq_wake(chip->irq, 1);
+		}
+
+		chip->notifier_count++;
+	}
+
+	mutex_unlock(&chip->lock);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_register_notifier);
+
+/**
+ * pm8xxx_batt_alarm_unregister_notifier - unregister a notifier that is run
+ *	when a battery voltage change interrupt fires
+ * @nb:	notifier block containing callback function to unregister
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_batt_alarm_unregister_notifier(struct notifier_block *nb)
+{
+	struct pm8xxx_batt_alarm_chip *chip = the_battalarm;
+	int rc;
+
+	if (!chip) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	rc = srcu_notifier_chain_unregister(&chip->irq_notifier_list, nb);
+	if (rc == 0) {
+		mutex_lock(&chip->lock);
+
+		chip->notifier_count--;
+
+		if (chip->notifier_count == 0) {
+			rc = irq_set_irq_wake(chip->irq, 0);
+			disable_irq(chip->irq);
+		}
+
+		WARN_ON(chip->notifier_count < 0);
+
+		mutex_unlock(&chip->lock);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_batt_alarm_unregister_notifier);
+
+static int pm8xxx_batt_alarm_reg_init(struct pm8xxx_batt_alarm_chip *chip)
+{
+	int rc = 0;
+
+	/* save the current register states */
+	rc = pm8xxx_readb(chip->dev->parent, chip->cdata.reg_addr_threshold,
+			  &chip->reg_threshold);
+	if (rc)
+		goto bail;
+
+	rc = pm8xxx_readb(chip->dev->parent, chip->cdata.reg_addr_ctrl1,
+			  &chip->reg_ctrl1);
+	if (rc)
+		goto bail;
+
+	rc = pm8xxx_readb(chip->dev->parent, chip->cdata.reg_addr_ctrl2,
+			  &chip->reg_ctrl2);
+	if (rc)
+		goto bail;
+
+	rc = pm8xxx_readb(chip->dev->parent, chip->cdata.reg_addr_pwm_ctrl,
+			  &chip->reg_pwm_ctrl);
+	if (rc)
+		goto bail;
+
+bail:
+	if (rc)
+		pr_err("pm8xxx_readb failed; initial register states "
+			"unknown, rc=%d\n", rc);
+	return rc;
+}
+
+/* TODO: should this default setting function be removed? */
+static int pm8xxx_batt_alarm_config_defaults(void)
+{
+	int rc = 0;
+
+	/* Use default values when no platform data is provided. */
+	rc = pm8xxx_batt_alarm_threshold_set(PM8XXX_BATT_ALARM_LOWER_COMPARATOR,
+		DEFAULT_THRESHOLD_LOWER);
+	if (rc) {
+		pr_err("threshold_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8xxx_batt_alarm_threshold_set(PM8XXX_BATT_ALARM_UPPER_COMPARATOR,
+		DEFAULT_THRESHOLD_UPPER);
+	if (rc) {
+		pr_err("threshold_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8xxx_batt_alarm_hold_time_set(DEFAULT_HOLD_TIME);
+	if (rc) {
+		pr_err("hold_time_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8xxx_batt_alarm_pwm_rate_set(DEFAULT_USE_PWM,
+			DEFAULT_PWM_SCALER, DEFAULT_PWM_DIVIDER);
+	if (rc) {
+		pr_err("pwm_rate_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8xxx_batt_alarm_disable(PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+	if (rc) {
+		pr_err("disable lower failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8xxx_batt_alarm_disable(PM8XXX_BATT_ALARM_UPPER_COMPARATOR);
+	if (rc) {
+		pr_err("disable upper failed, rc=%d\n", rc);
+		goto done;
+	}
+
+done:
+	return rc;
+}
+
+static int __devinit pm8xxx_batt_alarm_probe(struct platform_device *pdev)
+{
+	const struct pm8xxx_batt_alarm_core_data *cdata
+			= pdev->dev.platform_data;
+	struct pm8xxx_batt_alarm_chip *chip;
+	struct resource *res;
+	int rc;
+
+	if (the_battalarm) {
+		pr_err("A PMIC battery alarm device has already probed.\n");
+		return -ENODEV;
+	}
+
+	if (!cdata) {
+		pr_err("missing core data\n");
+		return -EINVAL;
+	}
+
+	if (!cdata->irq_name) {
+		pr_err("missing IRQ name\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct pm8xxx_batt_alarm_chip), GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+		cdata->irq_name);
+	if (res) {
+		chip->irq = res->start;
+	} else {
+		pr_err("Battery alarm IRQ not specified\n");
+		rc = -EINVAL;
+		goto err_free_chip;
+	}
+
+	chip->dev = &pdev->dev;
+	memcpy(&(chip->cdata), cdata,
+		sizeof(struct pm8xxx_batt_alarm_core_data));
+
+	srcu_init_notifier_head(&chip->irq_notifier_list);
+
+	chip->notifier_count = 0;
+	mutex_init(&chip->lock);
+
+	the_battalarm = chip;
+
+	rc = pm8xxx_batt_alarm_reg_init(chip);
+	if (rc)
+		goto err_free_mutex;
+
+	rc = pm8xxx_batt_alarm_config_defaults();
+	if (rc)
+		goto err_free_mutex;
+
+	INIT_WORK(&chip->irq_work, pm8xxx_batt_alarm_isr_work);
+
+/* TODO: Is it best to trigger on both edges? Should this be configurable? */
+	rc = request_irq(chip->irq, pm8xxx_batt_alarm_isr,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, cdata->irq_name,
+		chip);
+	if (rc < 0) {
+		pr_err("request_irq(%d) failed, rc=%d\n", chip->irq, rc);
+		goto err_cancel_work;
+	}
+
+	/* Disable the IRQ until a notifier is registered. */
+	disable_irq(chip->irq);
+
+	platform_set_drvdata(pdev, chip);
+
+	return 0;
+
+err_cancel_work:
+	cancel_work_sync(&chip->irq_work);
+err_free_mutex:
+	mutex_destroy(&chip->lock);
+	srcu_cleanup_notifier_head(&chip->irq_notifier_list);
+err_free_chip:
+	kfree(chip);
+	the_battalarm = NULL;
+
+	return rc;
+}
+
+static int __devexit pm8xxx_batt_alarm_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_batt_alarm_chip *chip = platform_get_drvdata(pdev);
+
+	if (chip) {
+		platform_set_drvdata(pdev, NULL);
+		irq_set_irq_wake(chip->irq, 0);
+		free_irq(chip->irq, chip);
+		cancel_work_sync(&chip->irq_work);
+		srcu_cleanup_notifier_head(&chip->irq_notifier_list);
+		mutex_destroy(&chip->lock);
+		kfree(chip);
+		the_battalarm = NULL;
+	}
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_batt_alarm_driver = {
+	.probe	= pm8xxx_batt_alarm_probe,
+	.remove	= __devexit_p(pm8xxx_batt_alarm_remove),
+	.driver	= {
+		.name = PM8XXX_BATT_ALARM_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_batt_alarm_init(void)
+{
+	return platform_driver_register(&pm8xxx_batt_alarm_driver);
+}
+
+static void __exit pm8xxx_batt_alarm_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_batt_alarm_driver);
+}
+
+module_init(pm8xxx_batt_alarm_init);
+module_exit(pm8xxx_batt_alarm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC PM8xxx Battery Alarm");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_BATT_ALARM_DEV_NAME);
diff --git a/drivers/mfd/pm8xxx-debug.c b/drivers/mfd/pm8xxx-debug.c
new file mode 100644
index 0000000..3b69121
--- /dev/null
+++ b/drivers/mfd/pm8xxx-debug.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/debugfs.h>
+
+#define PM8XXX_DEBUG_DEV_NAME "pm8xxx-debug"
+
+struct pm8xxx_debug_device {
+	struct mutex		debug_mutex;
+	struct device		*parent;
+	struct dentry		*dir;
+	int			addr;
+};
+
+static bool pm8xxx_debug_addr_is_valid(int addr)
+{
+	if (addr < 0 || addr > 0x3FF) {
+		pr_err("PMIC register address is invalid: %d\n", addr);
+		return false;
+	}
+	return true;
+}
+
+static int pm8xxx_debug_data_set(void *data, u64 val)
+{
+	struct pm8xxx_debug_device *debugdev = data;
+	u8 reg = val;
+	int rc;
+
+	mutex_lock(&debugdev->debug_mutex);
+
+	if (pm8xxx_debug_addr_is_valid(debugdev->addr)) {
+		rc = pm8xxx_writeb(debugdev->parent, debugdev->addr, reg);
+
+		if (rc)
+			pr_err("pm8xxx_writeb(0x%03X)=0x%02X failed: rc=%d\n",
+				debugdev->addr, reg, rc);
+	}
+
+	mutex_unlock(&debugdev->debug_mutex);
+	return 0;
+}
+
+static int pm8xxx_debug_data_get(void *data, u64 *val)
+{
+	struct pm8xxx_debug_device *debugdev = data;
+	int rc;
+	u8 reg;
+
+	mutex_lock(&debugdev->debug_mutex);
+
+	if (pm8xxx_debug_addr_is_valid(debugdev->addr)) {
+		rc = pm8xxx_readb(debugdev->parent, debugdev->addr, &reg);
+
+		if (rc)
+			pr_err("pm8xxx_readb(0x%03X) failed: rc=%d\n",
+				debugdev->addr, rc);
+		else
+			*val = reg;
+	}
+
+	mutex_unlock(&debugdev->debug_mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, pm8xxx_debug_data_get,
+			pm8xxx_debug_data_set, "0x%02llX\n");
+
+static int pm8xxx_debug_addr_set(void *data, u64 val)
+{
+	struct pm8xxx_debug_device *debugdev = data;
+
+	if (pm8xxx_debug_addr_is_valid(val)) {
+		mutex_lock(&debugdev->debug_mutex);
+		debugdev->addr = val;
+		mutex_unlock(&debugdev->debug_mutex);
+	}
+
+	return 0;
+}
+
+static int pm8xxx_debug_addr_get(void *data, u64 *val)
+{
+	struct pm8xxx_debug_device *debugdev = data;
+
+	mutex_lock(&debugdev->debug_mutex);
+
+	if (pm8xxx_debug_addr_is_valid(debugdev->addr))
+		*val = debugdev->addr;
+
+	mutex_unlock(&debugdev->debug_mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, pm8xxx_debug_addr_get,
+			pm8xxx_debug_addr_set, "0x%03llX\n");
+
+static int __devinit pm8xxx_debug_probe(struct platform_device *pdev)
+{
+	char *name = pdev->dev.platform_data;
+	struct pm8xxx_debug_device *debugdev;
+	struct dentry *dir;
+	struct dentry *temp;
+	int rc;
+
+	if (name == NULL) {
+		pr_err("debugfs directory name must be specified in "
+			"platform_data pointer\n");
+		return -EINVAL;
+	}
+
+	debugdev = kzalloc(sizeof(struct pm8xxx_debug_device), GFP_KERNEL);
+	if (debugdev == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&debugdev->debug_mutex);
+
+	debugdev->parent = pdev->dev.parent;
+	debugdev->addr = -1;
+
+	dir = debugfs_create_dir(name, NULL);
+	if (dir == NULL || IS_ERR(dir)) {
+		pr_err("debugfs_create_dir failed: rc=%ld\n", PTR_ERR(dir));
+		rc = PTR_ERR(dir);
+		goto dir_error;
+	}
+
+	temp = debugfs_create_file("addr", S_IRUSR | S_IWUSR, dir, debugdev,
+				   &debug_addr_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
+		rc = PTR_ERR(temp);
+		goto file_error;
+	}
+
+	temp = debugfs_create_file("data", S_IRUSR | S_IWUSR, dir, debugdev,
+				   &debug_data_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
+		rc = PTR_ERR(temp);
+		goto file_error;
+	}
+
+	debugdev->dir = dir;
+	platform_set_drvdata(pdev, debugdev);
+
+	return 0;
+
+file_error:
+	debugfs_remove_recursive(dir);
+dir_error:
+	kfree(debugdev);
+
+	return rc;
+}
+
+static int __devexit pm8xxx_debug_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_debug_device *debugdev = platform_get_drvdata(pdev);
+
+	if (debugdev) {
+		debugfs_remove_recursive(debugdev->dir);
+		kfree(debugdev);
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_debug_driver = {
+	.probe		= pm8xxx_debug_probe,
+	.remove		= __devexit_p(pm8xxx_debug_remove),
+	.driver		= {
+		.name	= PM8XXX_DEBUG_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_debug_init(void)
+{
+	return platform_driver_register(&pm8xxx_debug_driver);
+}
+subsys_initcall(pm8xxx_debug_init);
+
+static void __exit pm8xxx_debug_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_debug_driver);
+}
+module_exit(pm8xxx_debug_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8XXX Debug driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_DEBUG_DEV_NAME);
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c
index d452dd0..c04d0d1 100644
--- a/drivers/mfd/pm8xxx-irq.c
+++ b/drivers/mfd/pm8xxx-irq.c
@@ -247,12 +247,20 @@
 	return 0;
 }
 
+static int pm8xxx_irq_read_line(struct irq_data *d)
+{
+	struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+
+	return pm8xxx_get_irq_stat(chip, d->irq);
+}
+
 static struct irq_chip pm8xxx_irq_chip = {
 	.name		= "pm8xxx",
 	.irq_mask_ack	= pm8xxx_irq_mask_ack,
 	.irq_unmask	= pm8xxx_irq_unmask,
 	.irq_set_type	= pm8xxx_irq_set_type,
 	.irq_set_wake	= pm8xxx_irq_set_wake,
+	.irq_read_line	= pm8xxx_irq_read_line,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND,
 };
 
@@ -358,7 +366,7 @@
 	irq_set_irq_type(devirq, pdata->irq_trigger_flag);
 	irq_set_handler_data(devirq, chip);
 	irq_set_chained_handler(devirq, pm8xxx_irq_handler);
-	set_irq_wake(devirq, 1);
+	irq_set_irq_wake(devirq, 1);
 
 	return chip;
 }
diff --git a/drivers/mfd/pm8xxx-misc.c b/drivers/mfd/pm8xxx-misc.c
new file mode 100644
index 0000000..cd5624f
--- /dev/null
+++ b/drivers/mfd/pm8xxx-misc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/misc.h>
+
+/* PON CTRL 1 register */
+#define REG_PM8058_PON_CTRL_1			0x01C
+#define REG_PM8921_PON_CTRL_1			0x01C
+
+#define PON_CTRL_1_PULL_UP_MASK			0xE0
+#define PON_CTRL_1_USB_PWR_EN			0x10
+
+#define PON_CTRL_1_WD_EN_MASK			0x08
+#define PON_CTRL_1_WD_EN_RESET			0x08
+#define PON_CTRL_1_WD_EN_PWR_OFF		0x00
+
+/* Regulator L22 control register */
+#define REG_PM8058_L22_CTRL			0x121
+
+/* SLEEP CTRL register */
+#define REG_PM8058_SLEEP_CTRL			0x02B
+#define REG_PM8921_SLEEP_CTRL			0x10A
+
+#define SLEEP_CTRL_SMPL_EN_MASK			0x04
+#define SLEEP_CTRL_SMPL_EN_RESET		0x04
+#define SLEEP_CTRL_SMPL_EN_PWR_OFF		0x00
+
+/* FTS regulator PMR registers */
+#define REG_PM8901_REGULATOR_S1_PMR		0xA7
+#define REG_PM8901_REGULATOR_S2_PMR		0xA8
+#define REG_PM8901_REGULATOR_S3_PMR		0xA9
+#define REG_PM8901_REGULATOR_S4_PMR		0xAA
+
+#define PM8901_REGULATOR_PMR_STATE_MASK		0x60
+#define PM8901_REGULATOR_PMR_STATE_OFF		0x20
+
+struct pm8xxx_misc_chip {
+	struct list_head			link;
+	struct pm8xxx_misc_platform_data	pdata;
+	struct device				*dev;
+	enum pm8xxx_version			version;
+};
+
+static LIST_HEAD(pm8xxx_misc_chips);
+static DEFINE_SPINLOCK(pm8xxx_misc_chips_lock);
+
+static int pm8xxx_misc_masked_write(struct pm8xxx_misc_chip *chip, u16 addr,
+				    u8 mask, u8 val)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_readb(chip->dev->parent, addr, &reg);
+	if (rc) {
+		pr_err("pm8xxx_readb(0x%03X) failed, rc=%d\n", addr, rc);
+		return rc;
+	}
+	reg &= ~mask;
+	reg |= val & mask;
+	rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+	if (rc)
+		pr_err("pm8xxx_writeb(0x%03X)=0x%02X failed, rc=%d\n", addr,
+			reg, rc);
+	return rc;
+}
+
+static int __pm8058_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
+{
+	int rc;
+
+	/*
+	 * Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
+	 * pull-down state intact. This ensures a safe shutdown.
+	 */
+	rc = pm8xxx_misc_masked_write(chip, REG_PM8058_L22_CTRL, 0xBF, 0x93);
+	if (rc) {
+		pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
+		goto read_write_err;
+	}
+
+	/* Enable SMPL if resetting is desired. */
+	rc = pm8xxx_misc_masked_write(chip, REG_PM8058_SLEEP_CTRL,
+	       SLEEP_CTRL_SMPL_EN_MASK,
+	       (reset ? SLEEP_CTRL_SMPL_EN_RESET : SLEEP_CTRL_SMPL_EN_PWR_OFF));
+	if (rc) {
+		pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
+		goto read_write_err;
+	}
+
+	/*
+	 * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+	 * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+	 * USB charging is enabled.
+	 */
+	rc = pm8xxx_misc_masked_write(chip, REG_PM8058_PON_CTRL_1,
+		PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
+		| PON_CTRL_1_WD_EN_MASK,
+		PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
+		| (reset ? PON_CTRL_1_WD_EN_RESET : PON_CTRL_1_WD_EN_PWR_OFF));
+	if (rc) {
+		pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
+		goto read_write_err;
+	}
+
+read_write_err:
+	return rc;
+}
+
+static int __pm8901_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
+{
+	int rc = 0, i;
+	u8 pmr_addr[4] = {
+		REG_PM8901_REGULATOR_S2_PMR,
+		REG_PM8901_REGULATOR_S3_PMR,
+		REG_PM8901_REGULATOR_S4_PMR,
+		REG_PM8901_REGULATOR_S1_PMR,
+	};
+
+	/* Fix-up: Turn off regulators S1, S2, S3, S4 when shutting down. */
+	if (!reset) {
+		for (i = 0; i < 4; i++) {
+			rc = pm8xxx_misc_masked_write(chip, pmr_addr[i],
+				PM8901_REGULATOR_PMR_STATE_MASK,
+				PM8901_REGULATOR_PMR_STATE_OFF);
+			if (rc) {
+				pr_err("pm8xxx_misc_masked_write failed, "
+					"rc=%d\n", rc);
+				goto read_write_err;
+			}
+		}
+	}
+
+read_write_err:
+	return rc;
+}
+
+static int __pm8921_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
+{
+	int rc;
+
+	/* Enable SMPL if resetting is desired. */
+	rc = pm8xxx_misc_masked_write(chip, REG_PM8921_SLEEP_CTRL,
+	       SLEEP_CTRL_SMPL_EN_MASK,
+	       (reset ? SLEEP_CTRL_SMPL_EN_RESET : SLEEP_CTRL_SMPL_EN_PWR_OFF));
+	if (rc) {
+		pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
+		goto read_write_err;
+	}
+
+	/*
+	 * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+	 * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+	 * USB charging is enabled.
+	 */
+	rc = pm8xxx_misc_masked_write(chip, REG_PM8921_PON_CTRL_1,
+		PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
+		| PON_CTRL_1_WD_EN_MASK,
+		PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
+		| (reset ? PON_CTRL_1_WD_EN_RESET : PON_CTRL_1_WD_EN_PWR_OFF));
+	if (rc) {
+		pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
+		goto read_write_err;
+	}
+
+read_write_err:
+	return rc;
+}
+
+/**
+ * pm8xxx_reset_pwr_off - switch all PM8XXX PMIC chips attached to the system to
+ *			  either reset or shutdown when they are turned off
+ * @reset: 0 = shudown the PMICs, 1 = shutdown and then restart the PMICs
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8xxx_reset_pwr_off(int reset)
+{
+	struct pm8xxx_misc_chip *chip;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
+
+	/* Loop over all attached PMICs and call specific functions for them. */
+	list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
+		switch (chip->version) {
+		case PM8XXX_VERSION_8058:
+			rc = __pm8058_reset_pwr_off(chip, reset);
+			break;
+		case PM8XXX_VERSION_8901:
+			rc = __pm8901_reset_pwr_off(chip, reset);
+			break;
+		case PM8XXX_VERSION_8921:
+			rc = __pm8921_reset_pwr_off(chip, reset);
+			break;
+		default:
+			/* PMIC doesn't have reset_pwr_off; do nothing. */
+			break;
+		}
+		if (rc) {
+			pr_err("reset_pwr_off failed, rc=%d\n", rc);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_reset_pwr_off);
+
+static int __devinit pm8xxx_misc_probe(struct platform_device *pdev)
+{
+	const struct pm8xxx_misc_platform_data *pdata = pdev->dev.platform_data;
+	struct pm8xxx_misc_chip *chip;
+	struct pm8xxx_misc_chip *sibling;
+	struct list_head *prev;
+	unsigned long flags;
+	int rc = 0;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct pm8xxx_misc_chip), GFP_KERNEL);
+	if (!chip) {
+		pr_err("Cannot allocate %d bytes\n",
+			sizeof(struct pm8xxx_misc_chip));
+		return -ENOMEM;
+	}
+
+	chip->dev = &pdev->dev;
+	chip->version = pm8xxx_get_version(chip->dev->parent);
+	memcpy(&(chip->pdata), pdata, sizeof(struct pm8xxx_misc_platform_data));
+
+	/* Insert PMICs in priority order (lowest value first). */
+	spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
+	prev = &pm8xxx_misc_chips;
+	list_for_each_entry(sibling, &pm8xxx_misc_chips, link) {
+		if (chip->pdata.priority < sibling->pdata.priority)
+			break;
+		else
+			prev = &sibling->link;
+	}
+	list_add(&chip->link, prev);
+	spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
+
+	platform_set_drvdata(pdev, chip);
+
+	return rc;
+}
+
+static int __devexit pm8xxx_misc_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_misc_chip *chip = platform_get_drvdata(pdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
+	list_del(&chip->link);
+	spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(chip);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_misc_driver = {
+	.probe	= pm8xxx_misc_probe,
+	.remove	= __devexit_p(pm8xxx_misc_remove),
+	.driver	= {
+		.name	= PM8XXX_MISC_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_misc_init(void)
+{
+	return platform_driver_register(&pm8xxx_misc_driver);
+}
+postcore_initcall(pm8xxx_misc_init);
+
+static void __exit pm8xxx_misc_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_misc_driver);
+}
+module_exit(pm8xxx_misc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC 8XXX misc driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_MISC_DEV_NAME);
diff --git a/drivers/mfd/pm8xxx-pwm.c b/drivers/mfd/pm8xxx-pwm.c
new file mode 100644
index 0000000..cdddd98
--- /dev/null
+++ b/drivers/mfd/pm8xxx-pwm.c
@@ -0,0 +1,1082 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PM8XXX Pulse Width Modulation (PWM) driver
+ *
+ * The HW module is also called LPG (Light Pulse Generator).
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/pwm.h>
+
+#define PM8XXX_LPG_BANKS		8
+#define PM8XXX_PWM_CHANNELS		PM8XXX_LPG_BANKS
+
+#define PM8XXX_LPG_CTL_REGS		7
+
+/* PM8XXX PWM */
+#define SSBI_REG_ADDR_LPG_CTL_BASE	0x13C
+#define SSBI_REG_ADDR_LPG_CTL(n)	(SSBI_REG_ADDR_LPG_CTL_BASE + (n))
+#define SSBI_REG_ADDR_LPG_BANK_SEL	0x143
+#define SSBI_REG_ADDR_LPG_BANK_EN	0x144
+#define SSBI_REG_ADDR_LPG_LUT_CFG0	0x145
+#define SSBI_REG_ADDR_LPG_LUT_CFG1	0x146
+
+/* Control 0 */
+#define PM8XXX_PWM_1KHZ_COUNT_MASK	0xF0
+#define PM8XXX_PWM_1KHZ_COUNT_SHIFT	4
+
+#define PM8XXX_PWM_1KHZ_COUNT_MAX	15
+
+#define PM8XXX_PWM_OUTPUT_EN		0x08
+#define PM8XXX_PWM_PWM_EN		0x04
+#define PM8XXX_PWM_RAMP_GEN_EN		0x02
+#define PM8XXX_PWM_RAMP_START		0x01
+
+#define PM8XXX_PWM_PWM_START		(PM8XXX_PWM_OUTPUT_EN \
+					| PM8XXX_PWM_PWM_EN)
+#define PM8XXX_PWM_RAMP_GEN_START	(PM8XXX_PWM_RAMP_GEN_EN \
+					| PM8XXX_PWM_RAMP_START)
+
+/* Control 1 */
+#define PM8XXX_PWM_REVERSE_EN		0x80
+#define PM8XXX_PWM_BYPASS_LUT		0x40
+#define PM8XXX_PWM_HIGH_INDEX_MASK	0x3F
+
+/* Control 2 */
+#define PM8XXX_PWM_LOOP_EN		0x80
+#define PM8XXX_PWM_RAMP_UP		0x40
+#define PM8XXX_PWM_LOW_INDEX_MASK	0x3F
+
+/* Control 3 */
+#define PM8XXX_PWM_VALUE_BIT7_0		0xFF
+#define PM8XXX_PWM_VALUE_BIT5_0		0x3F
+
+/* Control 4 */
+#define PM8XXX_PWM_VALUE_BIT8		0x80
+
+#define PM8XXX_PWM_CLK_SEL_MASK		0x60
+#define PM8XXX_PWM_CLK_SEL_SHIFT	5
+
+#define PM8XXX_PWM_CLK_SEL_NO		0
+#define PM8XXX_PWM_CLK_SEL_1KHZ		1
+#define PM8XXX_PWM_CLK_SEL_32KHZ	2
+#define PM8XXX_PWM_CLK_SEL_19P2MHZ	3
+
+#define PM8XXX_PWM_PREDIVIDE_MASK	0x18
+#define PM8XXX_PWM_PREDIVIDE_SHIFT	3
+
+#define PM8XXX_PWM_PREDIVIDE_2		0
+#define PM8XXX_PWM_PREDIVIDE_3		1
+#define PM8XXX_PWM_PREDIVIDE_5		2
+#define PM8XXX_PWM_PREDIVIDE_6		3
+
+#define PM8XXX_PWM_M_MASK		0x07
+#define PM8XXX_PWM_M_MIN		0
+#define PM8XXX_PWM_M_MAX		7
+
+/* Control 5 */
+#define PM8XXX_PWM_PAUSE_COUNT_HI_MASK		0xFC
+#define PM8XXX_PWM_PAUSE_COUNT_HI_SHIFT		2
+
+#define PM8XXX_PWM_PAUSE_ENABLE_HIGH		0x02
+#define PM8XXX_PWM_SIZE_9_BIT			0x01
+
+/* Control 6 */
+#define PM8XXX_PWM_PAUSE_COUNT_LO_MASK		0xFC
+#define PM8XXX_PWM_PAUSE_COUNT_LO_SHIFT		2
+
+#define PM8XXX_PWM_PAUSE_ENABLE_LOW		0x02
+#define PM8XXX_PWM_RESERVED			0x01
+
+#define PM8XXX_PWM_PAUSE_COUNT_MAX		56 /* < 2^6 = 64 */
+
+/* LUT_CFG1 */
+#define PM8XXX_PWM_LUT_READ			0x40
+
+/*
+ * PWM Frequency = Clock Frequency / (N * T)
+ *	or
+ * PWM Period = Clock Period * (N * T)
+ *	where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, where m = 0..7 (exponent)
+ *
+ * This is the formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) / 2^m = (Pre-divide * Clock Period)
+ */
+#define NUM_CLOCKS	3
+
+#define NSEC_1000HZ	(NSEC_PER_SEC / 1000)
+#define NSEC_32768HZ	(NSEC_PER_SEC / 32768)
+#define NSEC_19P2MHZ	(NSEC_PER_SEC / 19200000)
+
+#define CLK_PERIOD_MIN	NSEC_19P2MHZ
+#define CLK_PERIOD_MAX	NSEC_1000HZ
+
+#define NUM_PRE_DIVIDE	3	/* No default support for pre-divide = 6 */
+
+#define PRE_DIVIDE_0		2
+#define PRE_DIVIDE_1		3
+#define PRE_DIVIDE_2		5
+
+#define PRE_DIVIDE_MIN		PRE_DIVIDE_0
+#define PRE_DIVIDE_MAX		PRE_DIVIDE_2
+
+static unsigned int pt_t[NUM_PRE_DIVIDE][NUM_CLOCKS] = {
+	{	PRE_DIVIDE_0 * NSEC_1000HZ,
+		PRE_DIVIDE_0 * NSEC_32768HZ,
+		PRE_DIVIDE_0 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_1 * NSEC_1000HZ,
+		PRE_DIVIDE_1 * NSEC_32768HZ,
+		PRE_DIVIDE_1 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_2 * NSEC_1000HZ,
+		PRE_DIVIDE_2 * NSEC_32768HZ,
+		PRE_DIVIDE_2 * NSEC_19P2MHZ,
+	},
+};
+
+#define MIN_MPT	((PRE_DIVIDE_MIN * CLK_PERIOD_MIN) << PM8XXX_PWM_M_MIN)
+#define MAX_MPT	((PRE_DIVIDE_MAX * CLK_PERIOD_MAX) << PM8XXX_PWM_M_MAX)
+
+/* Private data */
+struct pm8xxx_pwm_chip;
+
+struct pwm_device {
+	int			pwm_id;		/* = bank/channel id */
+	int			in_use;
+	const char		*label;
+	int			pwm_period;
+	int			pwm_duty;
+	u8			pwm_ctl[PM8XXX_LPG_CTL_REGS];
+	int			irq;
+	struct pm8xxx_pwm_chip	*chip;
+};
+
+struct pm8xxx_pwm_chip {
+	struct pwm_device		pwm_dev[PM8XXX_PWM_CHANNELS];
+	u8				bank_mask;
+	struct mutex			pwm_mutex;
+	struct device			*dev;
+};
+
+static struct pm8xxx_pwm_chip	*pwm_chip;
+
+struct pm8xxx_pwm_config {
+	int	pwm_size;	/* round up to 6 or 9 for 6/9-bit PWM SIZE */
+	int	clk;
+	int	pre_div;
+	int	pre_div_exp;
+	int	pwm_value;
+	int	bypass_lut;
+
+	/* LUT parameters when bypass_lut is 0 */
+	int	lut_duty_ms;
+	int	lut_lo_index;
+	int	lut_hi_index;
+	int	lut_pause_hi;
+	int	lut_pause_lo;
+	int	flags;
+};
+
+static const u16 duty_msec[PM8XXX_PWM_1KHZ_COUNT_MAX + 1] = {
+	0, 1, 2, 3, 4, 6, 8, 16, 18, 24, 32, 36, 64, 128, 256, 512
+};
+
+static const u16 pause_count[PM8XXX_PWM_PAUSE_COUNT_MAX + 1] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+	23, 28, 31, 42, 47, 56, 63, 83, 94, 111, 125, 167, 188, 222, 250, 333,
+	375, 500, 667, 750, 800, 900, 1000, 1100,
+	1200, 1300, 1400, 1500, 1600, 1800, 2000, 2500,
+	3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500,
+	7000
+};
+
+/* Internal functions */
+static int pm8xxx_pwm_bank_enable(struct pwm_device *pwm, int enable)
+{
+	int	rc;
+	u8	reg;
+	struct pm8xxx_pwm_chip	*chip;
+
+	chip = pwm->chip;
+
+	if (enable)
+		reg = chip->bank_mask | (1 << pwm->pwm_id);
+	else
+		reg = chip->bank_mask & ~(1 << pwm->pwm_id);
+
+	rc = pm8xxx_writeb(chip->dev->parent, SSBI_REG_ADDR_LPG_BANK_EN, reg);
+	if (rc) {
+		pr_err("pm8xxx_write(): rc=%d (Enable LPG Bank)\n", rc);
+		return rc;
+	}
+	chip->bank_mask = reg;
+
+	return 0;
+}
+
+static int pm8xxx_pwm_bank_sel(struct pwm_device *pwm)
+{
+	int	rc;
+
+	rc = pm8xxx_writeb(pwm->chip->dev->parent, SSBI_REG_ADDR_LPG_BANK_SEL,
+			   pwm->pwm_id);
+	if (rc)
+		pr_err("pm8xxx_write(): rc=%d (Select PWM Bank)\n", rc);
+	return rc;
+}
+
+static int pm8xxx_pwm_start(struct pwm_device *pwm, int start, int ramp_start)
+{
+	int	rc;
+	u8	reg;
+
+	if (start) {
+		reg = pwm->pwm_ctl[0] | PM8XXX_PWM_PWM_START;
+		if (ramp_start)
+			reg |= PM8XXX_PWM_RAMP_GEN_START;
+		else
+			reg &= ~PM8XXX_PWM_RAMP_GEN_START;
+	} else {
+		reg = pwm->pwm_ctl[0] & ~PM8XXX_PWM_PWM_START;
+		reg &= ~PM8XXX_PWM_RAMP_GEN_START;
+	}
+
+	rc = pm8xxx_writeb(pwm->chip->dev->parent, SSBI_REG_ADDR_LPG_CTL(0),
+			   reg);
+	if (rc)
+		pr_err("pm8xxx_write(): rc=%d (Enable PWM Ctl 0)\n", rc);
+	else
+		pwm->pwm_ctl[0] = reg;
+	return rc;
+}
+
+static void pm8xxx_pwm_calc_period(unsigned int period_us,
+					   struct pm8xxx_pwm_config *pwm_conf)
+{
+	int	n, m, clk, div;
+	int	best_m, best_div, best_clk;
+	int	last_err, cur_err, better_err, better_m;
+	unsigned int	tmp_p, last_p, min_err, period_n;
+
+	/* PWM Period / N */
+	if (period_us < (40 * USEC_PER_SEC)) {  /* ~6-bit max */
+		period_n = (period_us * NSEC_PER_USEC) >> 6;
+		n = 6;
+	} else if (period_us < (274 * USEC_PER_SEC)) { /* overflow threshold */
+		period_n = (period_us >> 6) * NSEC_PER_USEC;
+		if (period_n >= MAX_MPT) {
+			n = 9;
+			period_n >>= 3;
+		} else {
+			n = 6;
+		}
+	} else {
+		period_n = (period_us >> 9) * NSEC_PER_USEC;
+		n = 9;
+	}
+
+	min_err = MAX_MPT;
+	best_m = 0;
+	best_clk = 0;
+	best_div = 0;
+	for (clk = 0; clk < NUM_CLOCKS; clk++) {
+		for (div = 0; div < NUM_PRE_DIVIDE; div++) {
+			tmp_p = period_n;
+			last_p = tmp_p;
+			for (m = 0; m <= PM8XXX_PWM_M_MAX; m++) {
+				if (tmp_p <= pt_t[div][clk]) {
+					/* Found local best */
+					if (!m) {
+						better_err = pt_t[div][clk] -
+							tmp_p;
+						better_m = m;
+					} else {
+						last_err = last_p -
+							pt_t[div][clk];
+						cur_err = pt_t[div][clk] -
+							tmp_p;
+
+						if (cur_err < last_err) {
+							better_err = cur_err;
+							better_m = m;
+						} else {
+							better_err = last_err;
+							better_m = m - 1;
+						}
+					}
+
+					if (better_err < min_err) {
+						min_err = better_err;
+						best_m = better_m;
+						best_clk = clk;
+						best_div = div;
+					}
+					break;
+				} else {
+					last_p = tmp_p;
+					tmp_p >>= 1;
+				}
+			}
+		}
+	}
+
+	pwm_conf->pwm_size = n;
+	pwm_conf->clk = best_clk;
+	pwm_conf->pre_div = best_div;
+	pwm_conf->pre_div_exp = best_m;
+}
+
+static int pm8xxx_pwm_configure(struct pwm_device *pwm,
+			 struct pm8xxx_pwm_config *pwm_conf)
+{
+	int	i, rc, len;
+	u8	reg, ramp_enabled = 0;
+
+	reg = (pwm_conf->pwm_size > 6) ? PM8XXX_PWM_SIZE_9_BIT : 0;
+	pwm->pwm_ctl[5] = reg;
+
+	reg = ((pwm_conf->clk + 1) << PM8XXX_PWM_CLK_SEL_SHIFT)
+		& PM8XXX_PWM_CLK_SEL_MASK;
+	reg |= (pwm_conf->pre_div << PM8XXX_PWM_PREDIVIDE_SHIFT)
+		& PM8XXX_PWM_PREDIVIDE_MASK;
+	reg |= pwm_conf->pre_div_exp & PM8XXX_PWM_M_MASK;
+	pwm->pwm_ctl[4] = reg;
+
+	if (pwm_conf->bypass_lut) {
+		pwm->pwm_ctl[0] &= PM8XXX_PWM_PWM_START; /* keep enabled */
+		pwm->pwm_ctl[1] = PM8XXX_PWM_BYPASS_LUT;
+		pwm->pwm_ctl[2] = 0;
+
+		if (pwm_conf->pwm_size > 6) {
+			pwm->pwm_ctl[3] = pwm_conf->pwm_value
+						& PM8XXX_PWM_VALUE_BIT7_0;
+			pwm->pwm_ctl[4] |= (pwm_conf->pwm_value >> 1)
+						& PM8XXX_PWM_VALUE_BIT8;
+		} else {
+			pwm->pwm_ctl[3] = pwm_conf->pwm_value
+						& PM8XXX_PWM_VALUE_BIT5_0;
+		}
+
+		len = 6;
+	} else {
+		int	pause_cnt, j;
+
+		/* Linear search for duty time */
+		for (i = 0; i < PM8XXX_PWM_1KHZ_COUNT_MAX; i++) {
+			if (duty_msec[i] >= pwm_conf->lut_duty_ms)
+				break;
+		}
+
+		ramp_enabled = pwm->pwm_ctl[0] & PM8XXX_PWM_RAMP_GEN_START;
+		pwm->pwm_ctl[0] &= PM8XXX_PWM_PWM_START; /* keep enabled */
+		pwm->pwm_ctl[0] |= (i << PM8XXX_PWM_1KHZ_COUNT_SHIFT) &
+					PM8XXX_PWM_1KHZ_COUNT_MASK;
+		pwm->pwm_ctl[1] = pwm_conf->lut_hi_index &
+					PM8XXX_PWM_HIGH_INDEX_MASK;
+		pwm->pwm_ctl[2] = pwm_conf->lut_lo_index &
+					PM8XXX_PWM_LOW_INDEX_MASK;
+
+		if (pwm_conf->flags & PM_PWM_LUT_REVERSE)
+			pwm->pwm_ctl[1] |= PM8XXX_PWM_REVERSE_EN;
+		if (pwm_conf->flags & PM_PWM_LUT_RAMP_UP)
+			pwm->pwm_ctl[2] |= PM8XXX_PWM_RAMP_UP;
+		if (pwm_conf->flags & PM_PWM_LUT_LOOP)
+			pwm->pwm_ctl[2] |= PM8XXX_PWM_LOOP_EN;
+
+		/* Pause time */
+		if (pwm_conf->flags & PM_PWM_LUT_PAUSE_HI_EN) {
+			/* Linear search for pause time */
+			pause_cnt = (pwm_conf->lut_pause_hi + duty_msec[i] / 2)
+					/ duty_msec[i];
+			for (j = 0; j < PM8XXX_PWM_PAUSE_COUNT_MAX; j++) {
+				if (pause_count[j] >= pause_cnt)
+					break;
+			}
+			pwm->pwm_ctl[5] |= (j <<
+					   PM8XXX_PWM_PAUSE_COUNT_HI_SHIFT) &
+						PM8XXX_PWM_PAUSE_COUNT_HI_MASK;
+			pwm->pwm_ctl[5] |= PM8XXX_PWM_PAUSE_ENABLE_HIGH;
+		}
+
+		if (pwm_conf->flags & PM_PWM_LUT_PAUSE_LO_EN) {
+			/* Linear search for pause time */
+			pause_cnt = (pwm_conf->lut_pause_lo + duty_msec[i] / 2)
+					/ duty_msec[i];
+			for (j = 0; j < PM8XXX_PWM_PAUSE_COUNT_MAX; j++) {
+				if (pause_count[j] >= pause_cnt)
+					break;
+			}
+			pwm->pwm_ctl[6] = (j <<
+					   PM8XXX_PWM_PAUSE_COUNT_LO_SHIFT) &
+						PM8XXX_PWM_PAUSE_COUNT_LO_MASK;
+			pwm->pwm_ctl[6] |= PM8XXX_PWM_PAUSE_ENABLE_LOW;
+		} else {
+			pwm->pwm_ctl[6] = 0;
+		}
+
+		len = 7;
+	}
+
+	pm8xxx_pwm_bank_sel(pwm);
+
+	for (i = 0; i < len; i++) {
+		rc = pm8xxx_writeb(pwm->chip->dev->parent,
+				   SSBI_REG_ADDR_LPG_CTL(i),
+				   pwm->pwm_ctl[i]);
+		if (rc) {
+			pr_err("pm8xxx_write(): rc=%d (PWM Ctl[%d])\n", rc, i);
+			break;
+		}
+	}
+
+	if (ramp_enabled) {
+		pwm->pwm_ctl[0] |= ramp_enabled;
+		pm8xxx_writeb(pwm->chip->dev->parent,
+			      SSBI_REG_ADDR_LPG_CTL(0),
+			      pwm->pwm_ctl[0]);
+	}
+
+	return rc;
+}
+
+/* APIs */
+/**
+ * pwm_request - request a PWM device
+ * @pwm_id: PWM id or channel
+ * @label: the label to identify the user
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+	struct pwm_device	*pwm;
+
+	if (pwm_id > PM8XXX_PWM_CHANNELS || pwm_id < 0) {
+		pr_err("Invalid pwm_id: %d with %s\n",
+		       pwm_id, label ? label : ".");
+		return ERR_PTR(-EINVAL);
+	}
+	if (pwm_chip == NULL) {
+		pr_err("No pwm_chip\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_lock(&pwm_chip->pwm_mutex);
+	pwm = &pwm_chip->pwm_dev[pwm_id];
+	if (!pwm->in_use) {
+		pwm->in_use = 1;
+		pwm->label = label;
+	} else {
+		pwm = ERR_PTR(-EBUSY);
+	}
+	mutex_unlock(&pwm_chip->pwm_mutex);
+
+	return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request);
+
+/**
+ * pwm_free - free a PWM device
+ * @pwm: the PWM device
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle\n");
+		return;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8xxx_pwm_bank_sel(pwm);
+		pm8xxx_pwm_start(pwm, 0, 0);
+
+		pwm->in_use = 0;
+		pwm->label = NULL;
+	}
+	pm8xxx_pwm_bank_enable(pwm, 0);
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL_GPL(pwm_free);
+
+/**
+ * pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_us: period in microseconds
+ * @duty_us: duty cycle in microseconds
+ */
+int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	struct pm8xxx_pwm_config	pwm_conf;
+	unsigned int max_pwm_value, tmp;
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm) ||
+		duty_us > period_us ||
+		(unsigned)period_us > PM8XXX_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM8XXX_PWM_PERIOD_MIN) {
+		pr_err("Invalid pwm handle or parameters\n");
+		return -EINVAL;
+	}
+	if (pwm->chip == NULL) {
+		pr_err("No pwm_chip\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		pr_err("pwm_id: %d: stale handle?\n", pwm->pwm_id);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pm8xxx_pwm_calc_period(period_us, &pwm_conf);
+
+	/* Figure out pwm_value with overflow handling */
+	if ((unsigned)period_us > (1 << pwm_conf.pwm_size)) {
+		tmp = period_us;
+		tmp >>= pwm_conf.pwm_size;
+		pwm_conf.pwm_value = (unsigned)duty_us / tmp;
+	} else {
+		tmp = duty_us;
+		tmp <<= pwm_conf.pwm_size;
+		pwm_conf.pwm_value = tmp / (unsigned)period_us;
+	}
+	max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+	if (pwm_conf.pwm_value > max_pwm_value)
+		pwm_conf.pwm_value = max_pwm_value;
+
+	pwm_conf.bypass_lut = 1;
+
+	rc = pm8xxx_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_config);
+
+/**
+ * pwm_enable - start a PWM output toggling
+ * @pwm: the PWM device
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm)) {
+		pr_err("Invalid pwm handle\n");
+		return -EINVAL;
+	}
+	if (pwm->chip == NULL) {
+		pr_err("No pwm_chip\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (!pwm->in_use) {
+		pr_err("pwm_id: %d: stale handle?\n", pwm->pwm_id);
+		rc = -EINVAL;
+	} else {
+		rc = pm8xxx_pwm_bank_enable(pwm, 1);
+
+		pm8xxx_pwm_bank_sel(pwm);
+		pm8xxx_pwm_start(pwm, 1, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_enable);
+
+/**
+ * pwm_disable - stop a PWM output toggling
+ * @pwm: the PWM device
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8xxx_pwm_bank_sel(pwm);
+		pm8xxx_pwm_start(pwm, 0, 0);
+
+		pm8xxx_pwm_bank_enable(pwm, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL_GPL(pwm_disable);
+
+/**
+ * pm8xxx_pwm_lut_config - change a PWM device configuration to use LUT
+ * @pwm: the PWM device
+ * @period_us: period in microseconds
+ * @duty_pct: arrary of duty cycles in percent, like 20, 50.
+ * @duty_time_ms: time for each duty cycle in milliseconds
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in milliseconds at low index
+ * @pause_hi: pause time in milliseconds at high index
+ * @flags: control flags
+ */
+int pm8xxx_pwm_lut_config(struct pwm_device *pwm, int period_us,
+			  int duty_pct[], int duty_time_ms, int start_idx,
+			  int idx_len, int pause_lo, int pause_hi, int flags)
+{
+	struct pm8xxx_pwm_config	pwm_conf;
+	unsigned int pwm_value, max_pwm_value;
+	u8	cfg0, cfg1;
+	int	i, len;
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || !idx_len) {
+		pr_err("Invalid pwm handle or idx_len=0\n");
+		return -EINVAL;
+	}
+	if (duty_pct == NULL && !(flags & PM_PWM_LUT_NO_TABLE)) {
+		pr_err("Invalid duty_pct with flag\n");
+		return -EINVAL;
+	}
+	if (pwm->chip == NULL) {
+		pr_err("No pwm_chip\n");
+		return -ENODEV;
+	}
+	if (idx_len >= PM_PWM_LUT_SIZE && start_idx) {
+		pr_err("Wrong LUT size or index\n");
+		return -EINVAL;
+	}
+	if ((start_idx + idx_len) > PM_PWM_LUT_SIZE) {
+		pr_err("Exceed LUT limit\n");
+		return -EINVAL;
+	}
+	if ((unsigned)period_us > PM8XXX_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM8XXX_PWM_PERIOD_MIN) {
+		pr_err("Period out of range\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		pr_err("pwm_id: %d: stale handle?\n", pwm->pwm_id);
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pm8xxx_pwm_calc_period(period_us, &pwm_conf);
+
+	len = (idx_len > PM_PWM_LUT_SIZE) ? PM_PWM_LUT_SIZE : idx_len;
+
+	if (flags & PM_PWM_LUT_NO_TABLE)
+		goto after_table_write;
+
+	max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+	for (i = 0; i < len; i++) {
+		pwm_value = (duty_pct[i] << pwm_conf.pwm_size) / 100;
+		/* Avoid overflow */
+		if (pwm_value > max_pwm_value)
+			pwm_value = max_pwm_value;
+		cfg0 = pwm_value & 0xff;
+		cfg1 = (pwm_value >> 1) & 0x80;
+		cfg1 |= start_idx + i;
+
+		pm8xxx_writeb(pwm->chip->dev->parent,
+			      SSBI_REG_ADDR_LPG_LUT_CFG0, cfg0);
+		pm8xxx_writeb(pwm->chip->dev->parent,
+			      SSBI_REG_ADDR_LPG_LUT_CFG1, cfg1);
+	}
+
+after_table_write:
+	pwm_conf.lut_duty_ms = duty_time_ms;
+	pwm_conf.lut_lo_index = start_idx;
+	pwm_conf.lut_hi_index = start_idx + len - 1;
+	pwm_conf.lut_pause_lo = pause_lo;
+	pwm_conf.lut_pause_hi = pause_hi;
+	pwm_conf.flags = flags;
+	pwm_conf.bypass_lut = 0;
+
+	rc = pm8xxx_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_pwm_lut_config);
+
+/**
+ * pm8xxx_pwm_lut_enable - control a PWM device to start/stop LUT ramp
+ * @pwm: the PWM device
+ * @start: to start (1), or stop (0)
+ */
+int pm8xxx_pwm_lut_enable(struct pwm_device *pwm, int start)
+{
+	if (pwm == NULL || IS_ERR(pwm)) {
+		pr_err("Invalid pwm handle\n");
+		return -EINVAL;
+	}
+	if (pwm->chip == NULL) {
+		pr_err("No pwm_chip\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (start) {
+		pm8xxx_pwm_bank_enable(pwm, 1);
+
+		pm8xxx_pwm_bank_sel(pwm);
+		pm8xxx_pwm_start(pwm, 1, 1);
+	} else {
+		pm8xxx_pwm_bank_sel(pwm);
+		pm8xxx_pwm_start(pwm, 0, 0);
+
+		pm8xxx_pwm_bank_enable(pwm, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_pwm_lut_enable);
+
+#if defined(CONFIG_DEBUG_FS)
+
+struct pm8xxx_pwm_dbg_device;
+
+struct pm8xxx_pwm_user {
+	int				pwm_id;
+	struct pwm_device		*pwm;
+	int				period;
+	int				duty_cycle;
+	int				enable;
+	struct pm8xxx_pwm_dbg_device	*dbgdev;
+};
+
+struct pm8xxx_pwm_dbg_device {
+	struct mutex		dbg_mutex;
+	struct device		*dev;
+	struct dentry		*dent;
+
+	struct pm8xxx_pwm_user	user[PM8XXX_PWM_CHANNELS];
+};
+
+static struct pm8xxx_pwm_dbg_device *pmic_dbg_device;
+
+static int dbg_pwm_check_period(int period)
+{
+	if (period < PM8XXX_PWM_PERIOD_MIN || period > PM8XXX_PWM_PERIOD_MAX) {
+		pr_err("period is invalid: %d\n", period);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dbg_pwm_check_duty_cycle(int duty_cycle, const char *func_name)
+{
+	if (duty_cycle <= 0 || duty_cycle > 100) {
+		pr_err("%s: duty_cycle is invalid: %d\n",
+		      func_name, duty_cycle);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void dbg_pwm_check_handle(struct pm8xxx_pwm_user *puser)
+{
+	struct pwm_device *tmp;
+
+	if (puser->pwm == NULL) {
+		tmp = pwm_request(puser->pwm_id, "pwm-dbg");
+		if (PTR_ERR(puser->pwm)) {
+			pr_err("pwm_request: err=%ld\n", PTR_ERR(puser->pwm));
+			puser->pwm = NULL;
+		}
+	}
+}
+
+static int dbg_pwm_enable_set(void *data, u64 val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+	int     rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	rc = dbg_pwm_check_duty_cycle(puser->duty_cycle, __func__);
+	if (!rc) {
+		puser->enable = val;
+		dbg_pwm_check_handle(puser);
+		if (puser->pwm) {
+			if (puser->enable)
+				pwm_enable(puser->pwm);
+			else
+				pwm_disable(puser->pwm);
+		}
+	}
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+static int dbg_pwm_enable_get(void *data, u64 *val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	*val = puser->enable;
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_pwm_enable_fops,
+			dbg_pwm_enable_get, dbg_pwm_enable_set,
+			"%lld\n");
+
+static int dbg_pwm_duty_cycle_set(void *data, u64 val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+	int     rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	rc = dbg_pwm_check_duty_cycle(val, __func__);
+	if (!rc) {
+		puser->duty_cycle = val;
+		dbg_pwm_check_handle(puser);
+		if (puser->pwm) {
+			int     duty_us;
+
+			duty_us = puser->duty_cycle * puser->period;
+			pwm_config(puser->pwm,
+				  puser->duty_cycle, puser->period);
+		}
+	}
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+static int dbg_pwm_duty_cycle_get(void *data, u64 *val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	*val = puser->duty_cycle;
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_pwm_duty_cycle_fops,
+			dbg_pwm_duty_cycle_get, dbg_pwm_duty_cycle_set,
+			"%lld\n");
+
+static int dbg_pwm_period_set(void *data, u64 val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+	int     rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	rc = dbg_pwm_check_period(val);
+	if (!rc)
+		puser->period = val;
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+static int dbg_pwm_period_get(void *data, u64 *val)
+{
+	struct pm8xxx_pwm_user	  *puser = data;
+	struct pm8xxx_pwm_dbg_device    *dbgdev = puser->dbgdev;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	*val = puser->period;
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_pwm_period_fops,
+			dbg_pwm_period_get, dbg_pwm_period_set, "%lld\n");
+
+static int __devinit pm8xxx_pwm_dbg_probe(struct device *dev)
+{
+	struct pm8xxx_pwm_dbg_device    *dbgdev;
+	struct dentry		   *dent;
+	struct dentry		   *temp;
+	struct pm8xxx_pwm_user	  *puser;
+	int			     i;
+
+	if (dev == NULL) {
+		pr_err("no parent data passed in.\n");
+		return -EINVAL;
+	}
+
+	dbgdev = kzalloc(sizeof *dbgdev, GFP_KERNEL);
+	if (dbgdev == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&dbgdev->dbg_mutex);
+
+	dbgdev->dev = dev;
+
+	dent = debugfs_create_dir("pm8xxx-pwm-dbg", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("ERR debugfs_create_dir: dent=%p\n", dent);
+		return -ENOMEM;
+	}
+
+	dbgdev->dent = dent;
+
+	for (i = 0; i < PM8XXX_PWM_CHANNELS; i++) {
+		char pwm_ch[] = "0";
+
+		pwm_ch[0] = '0' + i;
+		dent = debugfs_create_dir(pwm_ch, dbgdev->dent);
+		if (dent == NULL || IS_ERR(dent)) {
+			pr_err("ERR: pwm=%d: dir: dent=%p\n", i, dent);
+			goto debug_error;
+		}
+
+		puser = &dbgdev->user[i];
+		puser->dbgdev = dbgdev;
+		puser->pwm_id = i;
+		temp = debugfs_create_file("period", S_IRUGO | S_IWUSR,
+				dent, puser, &dbg_pwm_period_fops);
+		if (temp == NULL || IS_ERR(temp)) {
+			pr_err("ERR: pwm=%d: period: dent=%p\n", i, dent);
+			goto debug_error;
+		}
+
+		temp = debugfs_create_file("duty-cycle", S_IRUGO | S_IWUSR,
+				dent, puser, &dbg_pwm_duty_cycle_fops);
+		if (temp == NULL || IS_ERR(temp)) {
+			pr_err("ERR: pwm=%d: duty-cycle: dent=%p\n", i, dent);
+			goto debug_error;
+		}
+
+		temp = debugfs_create_file("enable", S_IRUGO | S_IWUSR,
+				dent, puser, &dbg_pwm_enable_fops);
+		if (temp == NULL || IS_ERR(temp)) {
+			pr_err("ERR: pwm=%d: enable: dent=%p\n", i, dent);
+			goto debug_error;
+		}
+	}
+
+	pmic_dbg_device = dbgdev;
+
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dbgdev->dent);
+	return -ENOMEM;
+}
+
+static int __devexit pm8xxx_pwm_dbg_remove(void)
+{
+	if (pmic_dbg_device) {
+		debugfs_remove_recursive(pmic_dbg_device->dent);
+		kfree(pmic_dbg_device);
+	}
+	return 0;
+}
+
+#else
+
+static int __devinit pm8xxx_pwm_dbg_probe(struct device *dev)
+{
+	return 0;
+}
+
+static int __devexit pm8xxx_pwm_dbg_remove(void)
+{
+	return 0;
+}
+
+#endif
+
+static int __devinit pm8xxx_pwm_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pwm_chip	*chip;
+	int	i;
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < PM8XXX_PWM_CHANNELS; i++) {
+		chip->pwm_dev[i].pwm_id = i;
+		chip->pwm_dev[i].chip = chip;
+	}
+
+	mutex_init(&chip->pwm_mutex);
+
+	chip->dev = &pdev->dev;
+	pwm_chip = chip;
+	platform_set_drvdata(pdev, chip);
+
+	if (pm8xxx_pwm_dbg_probe(&pdev->dev) < 0)
+		pr_err("could not set up debugfs\n");
+
+	pr_notice("OK\n");
+	return 0;
+}
+
+static int __devexit pm8xxx_pwm_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_pwm_chip	*chip = dev_get_drvdata(pdev->dev.parent);
+
+	pm8xxx_pwm_dbg_remove();
+	mutex_destroy(&chip->pwm_mutex);
+	platform_set_drvdata(pdev, NULL);
+	kfree(chip);
+	return 0;
+}
+
+static struct platform_driver pm8xxx_pwm_driver = {
+	.probe		= pm8xxx_pwm_probe,
+	.remove		= __devexit_p(pm8xxx_pwm_remove),
+	.driver		= {
+		.name = PM8XXX_PWM_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_pwm_init(void)
+{
+	return platform_driver_register(&pm8xxx_pwm_driver);
+}
+
+static void __exit pm8xxx_pwm_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_pwm_driver);
+}
+
+subsys_initcall(pm8xxx_pwm_init);
+module_exit(pm8xxx_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8XXX PWM driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_PWM_DEV_NAME);
diff --git a/drivers/mfd/pmic8058.c b/drivers/mfd/pmic8058.c
new file mode 100644
index 0000000..87d4475
--- /dev/null
+++ b/drivers/mfd/pmic8058.c
@@ -0,0 +1,1308 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 driver
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+
+/* PMIC8058 Revision */
+#define SSBI_REG_REV			0x002  /* PMIC4 revision */
+
+/* PMIC8058 IRQ */
+#define	SSBI_REG_ADDR_IRQ_BASE		0x1BB
+
+#define	SSBI_REG_ADDR_IRQ_ROOT		(SSBI_REG_ADDR_IRQ_BASE + 0)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS1	(SSBI_REG_ADDR_IRQ_BASE + 1)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS2	(SSBI_REG_ADDR_IRQ_BASE + 2)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS3	(SSBI_REG_ADDR_IRQ_BASE + 3)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS4	(SSBI_REG_ADDR_IRQ_BASE + 4)
+#define	SSBI_REG_ADDR_IRQ_BLK_SEL	(SSBI_REG_ADDR_IRQ_BASE + 5)
+#define	SSBI_REG_ADDR_IRQ_IT_STATUS	(SSBI_REG_ADDR_IRQ_BASE + 6)
+#define	SSBI_REG_ADDR_IRQ_CONFIG	(SSBI_REG_ADDR_IRQ_BASE + 7)
+#define	SSBI_REG_ADDR_IRQ_RT_STATUS	(SSBI_REG_ADDR_IRQ_BASE + 8)
+
+#define	PM8058_IRQF_LVL_SEL		0x01	/* level select */
+#define	PM8058_IRQF_MASK_FE		0x02	/* mask falling edge */
+#define	PM8058_IRQF_MASK_RE		0x04	/* mask rising edge */
+#define	PM8058_IRQF_CLR			0x08	/* clear interrupt */
+#define	PM8058_IRQF_BITS_MASK		0x70
+#define	PM8058_IRQF_BITS_SHIFT		4
+#define	PM8058_IRQF_WRITE		0x80
+
+#define	PM8058_IRQF_MASK_ALL		(PM8058_IRQF_MASK_FE | \
+					PM8058_IRQF_MASK_RE)
+#define PM8058_IRQF_W_C_M		(PM8058_IRQF_WRITE |	\
+					PM8058_IRQF_CLR |	\
+					PM8058_IRQF_MASK_ALL)
+
+/* MISC register */
+#define	SSBI_REG_ADDR_MISC		0x1CC
+
+/* PON CNTL 1 register */
+#define SSBI_REG_ADDR_PON_CNTL_1	0x01C
+
+#define PM8058_PON_PUP_MASK		0xF0
+
+#define PM8058_PON_WD_EN_MASK		0x08
+#define PM8058_PON_WD_EN_RESET		0x08
+#define PM8058_PON_WD_EN_PWR_OFF	0x00
+
+/* PON CNTL 4 register */
+#define SSBI_REG_ADDR_PON_CNTL_4 0x98
+#define PM8058_PON_RESET_EN_MASK 0x01
+
+/* PON CNTL 5 register */
+#define SSBI_REG_ADDR_PON_CNTL_5 0x7B
+#define PM8058_HARD_RESET_EN_MASK 0x08
+
+/* Regulator L22 control register */
+#define SSBI_REG_ADDR_L22_CTRL		0x121
+
+/* SLEEP CNTL register */
+#define SSBI_REG_ADDR_SLEEP_CNTL	0x02B
+
+#define PM8058_SLEEP_SMPL_EN_MASK	0x04
+#define PM8058_SLEEP_SMPL_EN_RESET	0x04
+#define PM8058_SLEEP_SMPL_EN_PWR_OFF	0x00
+
+#define PM8058_SLEEP_SMPL_SEL_MASK	0x03
+#define PM8058_SLEEP_SMPL_SEL_MIN	0
+#define PM8058_SLEEP_SMPL_SEL_MAX	3
+
+#define	MAX_PM_IRQ		256
+#define	MAX_PM_BLOCKS		(MAX_PM_IRQ / 8 + 1)
+#define	MAX_PM_MASTERS		(MAX_PM_BLOCKS / 8 + 1)
+
+struct pm8058_chip {
+	struct pm8058_platform_data	pdata;
+
+	struct i2c_client		*dev;
+
+	u8	irqs_allowed[MAX_PM_BLOCKS];
+	u8	blocks_allowed[MAX_PM_MASTERS];
+	u8	masters_allowed;
+	int	pm_max_irq;
+	int	pm_max_blocks;
+	int	pm_max_masters;
+
+	u8	config[MAX_PM_IRQ];
+	u8	bus_unlock_config[MAX_PM_IRQ];
+	u8	wake_enable[MAX_PM_IRQ];
+	u16	count_wakeable;
+
+	u8	revision;
+
+	struct mutex	pm_lock;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+struct pm8058_dbg_device {
+	struct mutex		dbg_mutex;
+	struct pm8058_chip	*pm_chip;
+	struct dentry		*dent;
+	int			addr;
+};
+
+static struct pm8058_dbg_device *pmic_dbg_device;
+#endif
+
+static struct pm8058_chip *pmic_chip;
+
+/* Helper Functions */
+DEFINE_RATELIMIT_STATE(pm8058_msg_ratelimit, 60 * HZ, 10);
+
+static inline int pm8058_can_print(void)
+{
+	return __ratelimit(&pm8058_msg_ratelimit);
+}
+
+static inline int
+ssbi_write(struct i2c_client *client, u16 addr, const u8 *buf, size_t len)
+{
+	int	rc;
+	struct	i2c_msg msg = {
+		.addr           = addr,
+		.flags          = 0x0,
+		.buf            = (u8 *)buf,
+		.len            = len,
+	};
+
+	rc = i2c_transfer(client->adapter, &msg, 1);
+	return (rc == 1) ? 0 : rc;
+}
+
+static inline int
+ssbi_read(struct i2c_client *client, u16 addr, u8 *buf, size_t len)
+{
+	int	rc;
+	struct	i2c_msg msg = {
+		.addr           = addr,
+		.flags          = I2C_M_RD,
+		.buf            = buf,
+		.len            = len,
+	};
+
+	rc = i2c_transfer(client->adapter, &msg, 1);
+	return (rc == 1) ? 0 : rc;
+}
+
+static int pm8058_masked_write(u16 addr, u8 val, u8 mask)
+{
+	int rc;
+	u8 reg;
+
+	if (pmic_chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pmic_chip->pm_lock);
+
+	rc = ssbi_read(pmic_chip->dev, addr, &reg, 1);
+	if (rc) {
+		pr_err("%s: ssbi_read(0x%03X) failed: rc=%d\n", __func__, addr,
+			rc);
+		goto done;
+	}
+
+	reg &= ~mask;
+	reg |= val & mask;
+
+	rc = ssbi_write(pmic_chip->dev, addr, &reg, 1);
+	if (rc)
+		pr_err("%s: ssbi_write(0x%03X)=0x%02X failed: rc=%d\n",
+			__func__, addr, reg, rc);
+done:
+	mutex_unlock(&pmic_chip->pm_lock);
+
+	return rc;
+}
+
+/* External APIs */
+int pm8058_rev(struct pm8058_chip *chip)
+{
+	if (chip == NULL)
+		return -EINVAL;
+
+	return chip->revision;
+}
+EXPORT_SYMBOL(pm8058_rev);
+
+int pm8058_irq_get_rt_status(struct pm8058_chip *chip, int irq)
+{
+	int     rc;
+	u8      block, bits, bit;
+
+	if (chip == NULL || irq < chip->pdata.irq_base ||
+			irq >= chip->pdata.irq_base + MAX_PM_IRQ)
+		return -EINVAL;
+
+	irq -= chip->pdata.irq_base;
+
+	block = irq / 8;
+	bit = irq % 8;
+
+	mutex_lock(&chip->pm_lock);
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, &block, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(): rc=%d (Select Block)\n",
+				__func__, rc);
+		goto bail_out;
+	}
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read RT Status)\n",
+				__func__, rc);
+		goto bail_out;
+	}
+
+	rc = (bits & (1 << bit)) ? 1 : 0;
+
+bail_out:
+	mutex_unlock(&chip->pm_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_irq_get_rt_status);
+
+int pm8058_read(struct pm8058_chip *chip, u16 addr, u8 *values,
+		unsigned int len)
+{
+	if (chip == NULL)
+		return -EINVAL;
+
+	return ssbi_read(chip->dev, addr, values, len);
+}
+EXPORT_SYMBOL(pm8058_read);
+
+int pm8058_write(struct pm8058_chip *chip, u16 addr, u8 *values,
+		 unsigned int len)
+{
+	if (chip == NULL)
+		return -EINVAL;
+
+	return ssbi_write(chip->dev, addr, values, len);
+}
+EXPORT_SYMBOL(pm8058_write);
+
+int pm8058_misc_control(struct pm8058_chip *chip, int mask, int flag)
+{
+	int		rc;
+	u8		misc;
+
+	if (chip == NULL)
+		chip = pmic_chip;	/* for calls from non child */
+	if (chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&chip->pm_lock);
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_MISC, &misc, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+		       __func__, SSBI_REG_ADDR_MISC, rc);
+		goto get_out;
+	}
+
+	misc &= ~mask;
+	misc |= flag;
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_MISC, &misc, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+		       __func__, SSBI_REG_ADDR_MISC, misc, rc);
+		goto get_out;
+	}
+
+get_out:
+	mutex_unlock(&chip->pm_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_misc_control);
+
+/**
+ * pm8058_smpl_control - enables/disables SMPL detection
+ * @enable: 0 = shutdown PMIC on power loss, 1 = reset PMIC on power loss
+ *
+ * This function enables or disables the Sudden Momentary Power Loss detection
+ * module.  If SMPL detection is enabled, then when a sufficiently long power
+ * loss event occurs, the PMIC will automatically reset itself.  If SMPL
+ * detection is disabled, then the PMIC will shutdown when power loss occurs.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_smpl_control(int enable)
+{
+	return pm8058_masked_write(SSBI_REG_ADDR_SLEEP_CNTL,
+				   (enable ? PM8058_SLEEP_SMPL_EN_RESET
+					   : PM8058_SLEEP_SMPL_EN_PWR_OFF),
+				   PM8058_SLEEP_SMPL_EN_MASK);
+}
+EXPORT_SYMBOL(pm8058_smpl_control);
+
+/**
+ * pm8058_smpl_set_delay - sets the SMPL detection time delay
+ * @delay: enum value corresponding to delay time
+ *
+ * This function sets the time delay of the SMPL detection module.  If power
+ * is reapplied within this interval, then the PMIC reset automatically.  The
+ * SMPL detection module must be enabled for this delay time to take effect.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_smpl_set_delay(enum pm8058_smpl_delay delay)
+{
+	if (delay < PM8058_SLEEP_SMPL_SEL_MIN
+	    || delay > PM8058_SLEEP_SMPL_SEL_MAX) {
+		pr_err("%s: invalid delay specified: %d\n", __func__, delay);
+		return -EINVAL;
+	}
+
+	return pm8058_masked_write(SSBI_REG_ADDR_SLEEP_CNTL, delay,
+				   PM8058_SLEEP_SMPL_SEL_MASK);
+}
+EXPORT_SYMBOL(pm8058_smpl_set_delay);
+
+/**
+ * pm8058_watchdog_reset_control - enables/disables watchdog reset detection
+ * @enable: 0 = shutdown when PS_HOLD goes low, 1 = reset when PS_HOLD goes low
+ *
+ * This function enables or disables the PMIC watchdog reset detection feature.
+ * If watchdog reset detection is enabled, then the PMIC will reset itself
+ * when PS_HOLD goes low.  If it is not enabled, then the PMIC will shutdown
+ * when PS_HOLD goes low.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_watchdog_reset_control(int enable)
+{
+	return pm8058_masked_write(SSBI_REG_ADDR_PON_CNTL_1,
+				   (enable ? PM8058_PON_WD_EN_RESET
+					   : PM8058_PON_WD_EN_PWR_OFF),
+				   PM8058_PON_WD_EN_MASK);
+}
+EXPORT_SYMBOL(pm8058_watchdog_reset_control);
+
+int pm8058_reset_pwr_off(int reset)
+{
+	int		rc;
+	u8		pon;
+	u8		ctrl;
+	u8		smpl;
+
+	if (pmic_chip == NULL)
+		return -ENODEV;
+
+	/* Set regulator L22 to 1.225V in high power mode. */
+	rc = ssbi_read(pmic_chip->dev, SSBI_REG_ADDR_L22_CTRL, &ctrl, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n", __func__,
+			SSBI_REG_ADDR_L22_CTRL, rc);
+		goto get_out3;
+	}
+	/* Leave pull-down state intact. */
+	ctrl &= 0x40;
+	ctrl |= 0x93;
+	rc = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_L22_CTRL, &ctrl, 1);
+	if (rc)
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n", __func__,
+			SSBI_REG_ADDR_L22_CTRL, ctrl, rc);
+
+get_out3:
+	if (!reset) {
+		/* Only modify the SLEEP_CNTL reg if shutdown is desired. */
+		rc = ssbi_read(pmic_chip->dev, SSBI_REG_ADDR_SLEEP_CNTL,
+			       &smpl, 1);
+		if (rc) {
+			pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+			       __func__, SSBI_REG_ADDR_SLEEP_CNTL, rc);
+			goto get_out2;
+		}
+
+		smpl &= ~PM8058_SLEEP_SMPL_EN_MASK;
+		smpl |= PM8058_SLEEP_SMPL_EN_PWR_OFF;
+
+		rc = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_SLEEP_CNTL,
+				&smpl, 1);
+		if (rc)
+			pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+			       __func__, SSBI_REG_ADDR_SLEEP_CNTL, smpl, rc);
+	}
+
+get_out2:
+	rc = ssbi_read(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_1, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_1, rc);
+		goto get_out;
+	}
+
+	pon &= ~PM8058_PON_WD_EN_MASK;
+	pon |= reset ? PM8058_PON_WD_EN_RESET : PM8058_PON_WD_EN_PWR_OFF;
+
+	/* Enable all pullups */
+	pon |= PM8058_PON_PUP_MASK;
+
+	rc = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_1, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_1, pon, rc);
+		goto get_out;
+	}
+
+get_out:
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_reset_pwr_off);
+
+/*
+   power on hard reset configuration
+   config = DISABLE_HARD_RESET to disable hard reset
+	  = SHUTDOWN_ON_HARD_RESET to turn off the system on hard reset
+	  = RESTART_ON_HARD_RESET to restart the system on hard reset
+ */
+int pm8058_hard_reset_config(enum pon_config config)
+{
+	int rc, ret;
+	u8 pon, pon_5;
+
+	if (config >= MAX_PON_CONFIG)
+		return -EINVAL;
+
+	if (pmic_chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pmic_chip->pm_lock);
+
+	rc = ssbi_read(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_5, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_5, rc);
+		mutex_unlock(&pmic_chip->pm_lock);
+		return rc;
+	}
+
+	pon_5 = pon;
+	(config != DISABLE_HARD_RESET) ? (pon |= PM8058_HARD_RESET_EN_MASK) :
+					(pon &= ~PM8058_HARD_RESET_EN_MASK);
+
+	rc = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_5, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_5, pon, rc);
+		mutex_unlock(&pmic_chip->pm_lock);
+		return rc;
+	}
+
+	if (config == DISABLE_HARD_RESET) {
+		mutex_unlock(&pmic_chip->pm_lock);
+		return 0;
+	}
+
+	rc = ssbi_read(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_4, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_4, rc);
+		goto err_restore_pon_5;
+	}
+
+	(config == RESTART_ON_HARD_RESET) ? (pon |= PM8058_PON_RESET_EN_MASK) :
+					(pon &= ~PM8058_PON_RESET_EN_MASK);
+
+	rc = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_4, &pon, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_4, pon, rc);
+		goto err_restore_pon_5;
+	}
+	mutex_unlock(&pmic_chip->pm_lock);
+	return 0;
+
+err_restore_pon_5:
+	ret = ssbi_write(pmic_chip->dev, SSBI_REG_ADDR_PON_CNTL_5, &pon_5, 1);
+	if (ret)
+		pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d\n",
+		       __func__, SSBI_REG_ADDR_PON_CNTL_5, pon, ret);
+	mutex_unlock(&pmic_chip->pm_lock);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_hard_reset_config);
+
+/* Internal functions */
+static inline int
+pm8058_config_irq(struct pm8058_chip *chip, u8 *bp, u8 *cp)
+{
+	int	rc;
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp, 1);
+	if (rc) {
+		pr_err("%s: ssbi_write: rc=%d (Select block)\n",
+			__func__, rc);
+		goto bail_out;
+	}
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp, 1);
+	if (rc)
+		pr_err("%s: ssbi_write: rc=%d (Configure IRQ)\n",
+			__func__, rc);
+
+bail_out:
+	return rc;
+}
+
+static void pm8058_irq_mask(struct irq_data *data)
+{
+	int	master, irq_bit;
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	u8	block, config;
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	chip->irqs_allowed[block] &= ~(1 << irq_bit);
+	if (!chip->irqs_allowed[block]) {
+		chip->blocks_allowed[master] &= ~(1 << (block % 8));
+
+		if (!chip->blocks_allowed[master])
+			chip->masters_allowed &= ~(1 << master);
+	}
+
+	config = PM8058_IRQF_WRITE | chip->config[irq] |
+		PM8058_IRQF_MASK_FE | PM8058_IRQF_MASK_RE;
+	chip->bus_unlock_config[irq] = config;
+}
+
+static void pm8058_irq_unmask(struct irq_data *data)
+{
+	int	master, irq_bit;
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	u8	block, config, old_irqs_allowed, old_blocks_allowed;
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	old_irqs_allowed = chip->irqs_allowed[block];
+	chip->irqs_allowed[block] |= 1 << irq_bit;
+	if (!old_irqs_allowed) {
+		master = block / 8;
+
+		old_blocks_allowed = chip->blocks_allowed[master];
+		chip->blocks_allowed[master] |= 1 << (block % 8);
+
+		if (!old_blocks_allowed)
+			chip->masters_allowed |= 1 << master;
+	}
+
+	config = PM8058_IRQF_WRITE | chip->config[irq];
+	chip->bus_unlock_config[irq] = config;
+}
+
+static void pm8058_irq_ack(struct irq_data *data)
+{
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	u8	block, config;
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+
+	config = PM8058_IRQF_WRITE | chip->config[irq] | PM8058_IRQF_CLR;
+	/* Keep the mask */
+	if (!(chip->irqs_allowed[block] & (1 << (irq % 8))))
+		config |= PM8058_IRQF_MASK_FE | PM8058_IRQF_MASK_RE;
+	chip->bus_unlock_config[irq] = config;
+}
+
+static int pm8058_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	int	master, irq_bit;
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	u8	block, config;
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	if (irq > chip->pm_max_irq) {
+		chip->pm_max_irq = irq;
+		chip->pm_max_blocks =
+			chip->pm_max_irq / 8 + 1;
+		chip->pm_max_masters =
+			chip->pm_max_blocks / 8 + 1;
+	}
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	chip->config[irq] = (irq_bit << PM8058_IRQF_BITS_SHIFT) |
+			PM8058_IRQF_MASK_RE | PM8058_IRQF_MASK_FE;
+	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+		if (flow_type & IRQF_TRIGGER_RISING)
+			chip->config[irq] &= ~PM8058_IRQF_MASK_RE;
+		if (flow_type & IRQF_TRIGGER_FALLING)
+			chip->config[irq] &= ~PM8058_IRQF_MASK_FE;
+	} else {
+		chip->config[irq] |= PM8058_IRQF_LVL_SEL;
+
+		if (flow_type & IRQF_TRIGGER_HIGH)
+			chip->config[irq] &= ~PM8058_IRQF_MASK_RE;
+		else
+			chip->config[irq] &= ~PM8058_IRQF_MASK_FE;
+	}
+
+	config = PM8058_IRQF_WRITE | chip->config[irq] | PM8058_IRQF_CLR;
+	chip->bus_unlock_config[irq] = config;
+	return 0;
+}
+
+static int pm8058_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	if (on) {
+		if (!chip->wake_enable[irq]) {
+			chip->wake_enable[irq] = 1;
+			chip->count_wakeable++;
+		}
+	} else {
+		if (chip->wake_enable[irq]) {
+			chip->wake_enable[irq] = 0;
+			chip->count_wakeable--;
+		}
+	}
+
+	return 0;
+}
+
+static void pm8058_irq_bus_lock(struct irq_data *data)
+{
+	u8	block;
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	chip->bus_unlock_config[irq] = 0;
+
+	mutex_lock(&chip->pm_lock);
+}
+
+static void pm8058_irq_bus_sync_unlock(struct irq_data *data)
+{
+	u8	block, config;
+	struct	pm8058_chip *chip = irq_data_get_irq_chip_data(data);
+	unsigned int irq = data->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	config = chip->bus_unlock_config[irq];
+	/* dont waste cpu cycles if we dont have data to write */
+	if (config)
+		pm8058_config_irq(chip, &block, &config);
+	mutex_unlock(&chip->pm_lock);
+}
+
+static inline int
+pm8058_read_root(struct pm8058_chip *chip, u8 *rp)
+{
+	int	rc;
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Root)\n",
+			__func__, rc);
+		*rp = 0;
+	}
+
+	return rc;
+}
+
+static inline int
+pm8058_read_master(struct pm8058_chip *chip, u8 m, u8 *bp)
+{
+	int	rc;
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Master)\n",
+			__func__, rc);
+		*bp = 0;
+	}
+
+	return rc;
+}
+
+static inline int
+pm8058_read_block(struct pm8058_chip *chip, u8 *bp, u8 *ip)
+{
+	int	rc;
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(): rc=%d (Select Block)\n",
+		       __func__, rc);
+		*bp = 0;
+		goto bail_out;
+	}
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip, 1);
+	if (rc)
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Status)\n",
+		       __func__, rc);
+
+bail_out:
+	return rc;
+}
+
+static irqreturn_t pm8058_isr_thread(int irq_requested, void *data)
+{
+	struct pm8058_chip *chip = data;
+	int	i, j, k;
+	u8	root, block, config, bits;
+	u8	blocks[MAX_PM_MASTERS];
+	int	masters = 0, irq, handled = 0, spurious = 0;
+	u16     irqs_to_handle[MAX_PM_IRQ];
+
+	mutex_lock(&chip->pm_lock);
+
+	/* Read root for masters */
+	if (pm8058_read_root(chip, &root))
+		goto bail_out;
+
+	masters = root >> 1;
+
+	if (!(masters & chip->masters_allowed) ||
+	    (masters & ~chip->masters_allowed)) {
+		spurious = 1000000;
+	}
+
+	/* Read allowed masters for blocks. */
+	for (i = 0; i < chip->pm_max_masters; i++) {
+		if (masters & (1 << i)) {
+			if (pm8058_read_master(chip, i, &blocks[i]))
+				goto bail_out;
+
+			if (!blocks[i]) {
+				if (pm8058_can_print())
+					pr_err("%s: Spurious master: %d "
+					       "(blocks=0)", __func__, i);
+				spurious += 10000;
+			}
+		} else
+			blocks[i] = 0;
+	}
+
+	/* Select block, read status and call isr */
+	for (i = 0; i < chip->pm_max_masters; i++) {
+		if (!blocks[i])
+			continue;
+
+		for (j = 0; j < 8; j++) {
+			if (!(blocks[i] & (1 << j)))
+				continue;
+
+			block = i * 8 + j;	/* block # */
+			if (pm8058_read_block(chip, &block, &bits))
+				goto bail_out;
+
+			if (!bits) {
+				if (pm8058_can_print())
+					pr_err("%s: Spurious block: "
+					       "[master, block]=[%d, %d] "
+					       "(bits=0)\n", __func__, i, j);
+				spurious += 100;
+				continue;
+			}
+
+			/* Check IRQ bits */
+			for (k = 0; k < 8; k++) {
+				if (!(bits & (1 << k)))
+					continue;
+
+				/* Check spurious interrupts */
+				if (((1 << i) & chip->masters_allowed) &&
+				    (blocks[i] & chip->blocks_allowed[i]) &&
+				    (bits & chip->irqs_allowed[block])) {
+
+					/* Found one */
+					irq = block * 8 + k;
+					irqs_to_handle[handled] = irq +
+						chip->pdata.irq_base;
+					handled++;
+				} else {
+					/* Clear and mask wrong one */
+					config = PM8058_IRQF_W_C_M |
+						(k << PM8058_IRQF_BITS_SHIFT);
+
+					pm8058_config_irq(chip,
+							  &block, &config);
+
+					if (pm8058_can_print())
+						pr_err("%s: Spurious IRQ: "
+						       "[master, block, bit]="
+						       "[%d, %d (%d), %d]\n",
+							__func__,
+						       i, j, block, k);
+					spurious++;
+				}
+			}
+		}
+
+	}
+
+bail_out:
+
+	mutex_unlock(&chip->pm_lock);
+
+	for (i = 0; i < handled; i++)
+		handle_nested_irq(irqs_to_handle[i]);
+
+	for (i = 0; i < handled; i++) {
+		irqs_to_handle[i] -= chip->pdata.irq_base;
+		block  = irqs_to_handle[i] / 8 ;
+		config = PM8058_IRQF_WRITE | chip->config[irqs_to_handle[i]]
+				| PM8058_IRQF_CLR;
+		pm8058_config_irq(chip, &block, &config);
+	}
+
+	if (spurious) {
+		if (!pm8058_can_print())
+			return IRQ_HANDLED;
+
+		pr_err("%s: spurious = %d (handled = %d)\n",
+		       __func__, spurious, handled);
+		pr_err("   root = 0x%x (masters_allowed<<1 = 0x%x)\n",
+		       root, chip->masters_allowed << 1);
+		for (i = 0; i < chip->pm_max_masters; i++) {
+			if (masters & (1 << i))
+				pr_err("   blocks[%d]=0x%x, "
+				       "allowed[%d]=0x%x\n",
+				       i, blocks[i],
+				       i, chip->blocks_allowed[i]);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int check_addr(int addr, const char *func_name)
+{
+	if (addr < 0 || addr > 0x3FF) {
+		pr_err("%s: PMIC 8058 register address is invalid: %d\n",
+			func_name, addr);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int data_set(void *data, u64 val)
+{
+	struct pm8058_dbg_device *dbgdev = data;
+	u8 reg = val;
+	int rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc)
+		goto done;
+
+	rc = pm8058_write(dbgdev->pm_chip, dbgdev->addr, &reg, 1);
+
+	if (rc)
+		pr_err("%s: FAIL pm8058_write(0x%03X)=0x%02X: rc=%d\n",
+			__func__, dbgdev->addr, reg, rc);
+done:
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return rc;
+}
+
+static int data_get(void *data, u64 *val)
+{
+	struct pm8058_dbg_device *dbgdev = data;
+	int rc;
+	u8 reg;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc)
+		goto done;
+
+	rc = pm8058_read(dbgdev->pm_chip, dbgdev->addr, &reg, 1);
+
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(0x%03X)=0x%02X: rc=%d\n",
+			__func__, dbgdev->addr, reg, rc);
+		goto done;
+	}
+
+	*val = reg;
+done:
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_data_fops, data_get, data_set, "0x%02llX\n");
+
+static int addr_set(void *data, u64 val)
+{
+	struct pm8058_dbg_device *dbgdev = data;
+	int rc;
+
+	rc = check_addr(val, __func__);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	dbgdev->addr = val;
+	mutex_unlock(&dbgdev->dbg_mutex);
+
+	return 0;
+}
+
+static int addr_get(void *data, u64 *val)
+{
+	struct pm8058_dbg_device *dbgdev = data;
+	int rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc) {
+		mutex_unlock(&dbgdev->dbg_mutex);
+		return rc;
+	}
+	*val = dbgdev->addr;
+
+	mutex_unlock(&dbgdev->dbg_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_addr_fops, addr_get, addr_set, "0x%03llX\n");
+
+static int __devinit pmic8058_dbg_probe(struct pm8058_chip *chip)
+{
+	struct pm8058_dbg_device *dbgdev;
+	struct dentry *dent;
+	struct dentry *temp;
+
+	if (chip == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EINVAL;
+	}
+
+	dbgdev = kzalloc(sizeof *dbgdev, GFP_KERNEL);
+	if (dbgdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&dbgdev->dbg_mutex);
+
+	dbgdev->pm_chip = chip;
+	dbgdev->addr = -1;
+
+	dent = debugfs_create_dir("pm8058-dbg", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		return -ENOMEM;
+	}
+
+	temp = debugfs_create_file("addr", S_IRUSR | S_IWUSR, dent,
+					dbgdev, &dbg_addr_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)temp);
+		goto debug_error;
+	}
+
+	temp = debugfs_create_file("data", S_IRUSR | S_IWUSR, dent,
+					dbgdev, &dbg_data_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)temp);
+		goto debug_error;
+	}
+
+	dbgdev->dent = dent;
+
+	pmic_dbg_device = dbgdev;
+
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dent);
+	return -ENOMEM;
+}
+
+static int __devexit pmic8058_dbg_remove(void)
+{
+	if (pmic_dbg_device) {
+		debugfs_remove_recursive(pmic_dbg_device->dent);
+		kfree(pmic_dbg_device);
+	}
+	return 0;
+}
+
+#else
+
+static int __devinit pmic8058_dbg_probe(struct pm8058_chip *chip)
+{
+	return 0;
+}
+
+static int __devexit pmic8058_dbg_remove(void)
+{
+	return 0;
+}
+
+#endif
+
+static struct irq_chip pm8058_irq_chip = {
+	.name      = "pm8058",
+	.irq_ack	= pm8058_irq_ack,
+	.irq_mask	= pm8058_irq_mask,
+	.irq_unmask	= pm8058_irq_unmask,
+	.irq_set_type	= pm8058_irq_set_type,
+	.irq_set_wake	= pm8058_irq_set_wake,
+	.irq_bus_lock	= pm8058_irq_bus_lock,
+	.irq_bus_sync_unlock	= pm8058_irq_bus_sync_unlock,
+};
+
+static int pm8058_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int	i, rc;
+	struct	pm8058_platform_data *pdata = client->dev.platform_data;
+	struct	pm8058_chip *chip;
+
+	if (pdata == NULL || !client->irq) {
+		pr_err("%s: No platform_data or IRQ.\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdata->num_subdevs == 0) {
+		pr_err("%s: No sub devices to support.\n", __func__);
+		return -ENODEV;
+	}
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
+		pr_err("%s: i2c_check_functionality failed.\n", __func__);
+		return -ENODEV;
+	}
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	chip->dev = client;
+
+	/* Read PMIC chip revision */
+	rc = ssbi_read(chip->dev, SSBI_REG_REV, &chip->revision, 1);
+	if (rc)
+		pr_err("%s: Failed on ssbi_read for revision: rc=%d.\n",
+			__func__, rc);
+	pr_info("%s: PMIC revision: %X\n", __func__, chip->revision);
+
+	(void) memcpy((void *)&chip->pdata, (const void *)pdata,
+		      sizeof(chip->pdata));
+
+	mutex_init(&chip->pm_lock);
+	irq_set_handler_data(chip->dev->irq, (void *)chip);
+	irq_set_irq_wake(chip->dev->irq, 1);
+
+	chip->pm_max_irq = 0;
+	chip->pm_max_blocks = 0;
+	chip->pm_max_masters = 0;
+
+	i2c_set_clientdata(client, chip);
+
+	pmic_chip = chip;
+
+	/* Register for all reserved IRQs */
+	for (i = pdata->irq_base; i < (pdata->irq_base + MAX_PM_IRQ); i++) {
+		irq_set_chip(i, &pm8058_irq_chip);
+		irq_set_chip_data(i, (void *)chip);
+		irq_set_handler(i, handle_edge_irq);
+		set_irq_flags(i, IRQF_VALID);
+		irq_set_nested_thread(i, 1);
+	}
+
+	rc = mfd_add_devices(&chip->dev->dev, 0, pdata->sub_devices,
+			     pdata->num_subdevs, NULL, 0);
+
+	/* Add charger sub device with the chip parameter as driver data */
+	if (pdata->charger_sub_device) {
+		rc = mfd_add_devices(&chip->dev->dev, 0,
+					pdata->charger_sub_device,
+					1, NULL, 0);
+	}
+
+	if (pdata->init) {
+		rc = pdata->init(chip);
+		if (rc != 0) {
+			pr_err("%s: board init failed\n", __func__);
+			chip->dev = NULL;
+			kfree(chip);
+			return -ENODEV;
+		}
+	}
+
+	rc = request_threaded_irq(chip->dev->irq, NULL, pm8058_isr_thread,
+			IRQF_ONESHOT | IRQF_DISABLED | pdata->irq_trigger_flags,
+			"pm8058-irq", chip);
+	if (rc < 0)
+		pr_err("%s: could not request irq %d: %d\n", __func__,
+				chip->dev->irq, rc);
+
+	rc = pmic8058_dbg_probe(chip);
+	if (rc < 0)
+		pr_err("%s: could not set up debugfs: %d\n", __func__, rc);
+
+	return 0;
+}
+
+static int __devexit pm8058_remove(struct i2c_client *client)
+{
+	struct	pm8058_chip *chip;
+
+	chip = i2c_get_clientdata(client);
+	if (chip) {
+		if (chip->pm_max_irq) {
+			irq_set_irq_wake(chip->dev->irq, 0);
+			free_irq(chip->dev->irq, chip);
+		}
+		mutex_destroy(&chip->pm_lock);
+		chip->dev = NULL;
+
+		kfree(chip);
+	}
+
+	pmic8058_dbg_remove();
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pm8058_suspend(struct device *dev)
+{
+	struct i2c_client *client;
+	struct	pm8058_chip *chip;
+	struct irq_data *data;
+	int	i;
+
+	client = to_i2c_client(dev);
+	chip = i2c_get_clientdata(client);
+
+	for (i = 0; i < MAX_PM_IRQ; i++) {
+		if (chip->config[i] && !chip->wake_enable[i]) {
+			if (!((chip->config[i] & PM8058_IRQF_MASK_ALL)
+			      == PM8058_IRQF_MASK_ALL)) {
+				data = irq_get_irq_data(i +
+						chip->pdata.irq_base);
+				pm8058_irq_bus_lock(data);
+				pm8058_irq_mask(data);
+				pm8058_irq_bus_sync_unlock(data);
+			}
+		}
+	}
+
+	if (!chip->count_wakeable)
+		disable_irq(chip->dev->irq);
+
+	return 0;
+}
+
+extern int msm_show_resume_irq_mask;
+
+static void pm8058_show_resume_irq(void)
+{
+	u8	block, bits;
+	int i;
+	struct pm8058_chip *chip = pmic_chip;
+
+	if (!msm_show_resume_irq_mask)
+		return;
+
+	for (i = 0; i < MAX_PM_IRQ; i++) {
+		if (chip->wake_enable[i]) {
+			block = i / 8;
+			if (!pm8058_read_block(chip, &block, &bits)) {
+				if (bits & (1 << (i & 0x7)))
+					pr_warning("%s:%d triggered\n",
+					__func__, i + chip->pdata.irq_base);
+			}
+		}
+	}
+}
+
+static int pm8058_resume(struct device *dev)
+{
+	struct i2c_client *client;
+	struct	pm8058_chip *chip;
+	struct irq_data *data;
+	int	i;
+
+	pm8058_show_resume_irq();
+
+	client = to_i2c_client(dev);
+	chip = i2c_get_clientdata(client);
+
+	for (i = 0; i < MAX_PM_IRQ; i++) {
+		if (chip->config[i] && !chip->wake_enable[i]) {
+			if (!((chip->config[i] & PM8058_IRQF_MASK_ALL)
+			      == PM8058_IRQF_MASK_ALL)) {
+				data = irq_get_irq_data(i +
+						chip->pdata.irq_base);
+				pm8058_irq_bus_lock(data);
+				pm8058_irq_unmask(data);
+				pm8058_irq_bus_sync_unlock(data);
+			}
+		}
+	}
+
+	if (!chip->count_wakeable)
+		enable_irq(chip->dev->irq);
+
+	return 0;
+}
+#else
+#define	pm8058_suspend		NULL
+#define	pm8058_resume		NULL
+#endif
+
+static const struct i2c_device_id pm8058_ids[] = {
+	{ "pm8058-core", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, pm8058_ids);
+
+static struct dev_pm_ops pm8058_pm = {
+	.suspend = pm8058_suspend,
+	.resume = pm8058_resume,
+};
+
+static struct i2c_driver pm8058_driver = {
+	.driver.name	= "pm8058-core",
+	.driver.pm      = &pm8058_pm,
+	.id_table	= pm8058_ids,
+	.probe		= pm8058_probe,
+	.remove		= __devexit_p(pm8058_remove),
+};
+
+static int __init pm8058_init(void)
+{
+	int rc = i2c_add_driver(&pm8058_driver);
+	pr_notice("%s: i2c_add_driver: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void __exit pm8058_exit(void)
+{
+	i2c_del_driver(&pm8058_driver);
+}
+
+arch_initcall(pm8058_init);
+module_exit(pm8058_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 core driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-core");
diff --git a/drivers/mfd/pmic8901.c b/drivers/mfd/pmic8901.c
new file mode 100644
index 0000000..da46656
--- /dev/null
+++ b/drivers/mfd/pmic8901.c
@@ -0,0 +1,953 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/pmic8901.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+
+/* PMIC8901 Revision */
+#define SSBI_REG_REV			0x002  /* PMIC4 revision */
+
+/* PMIC8901 IRQ */
+#define	SSBI_REG_ADDR_IRQ_BASE		0xD5
+
+#define	SSBI_REG_ADDR_IRQ_ROOT		(SSBI_REG_ADDR_IRQ_BASE + 0)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS1	(SSBI_REG_ADDR_IRQ_BASE + 1)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS2	(SSBI_REG_ADDR_IRQ_BASE + 2)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS3	(SSBI_REG_ADDR_IRQ_BASE + 3)
+#define	SSBI_REG_ADDR_IRQ_M_STATUS4	(SSBI_REG_ADDR_IRQ_BASE + 4)
+#define	SSBI_REG_ADDR_IRQ_BLK_SEL	(SSBI_REG_ADDR_IRQ_BASE + 5)
+#define	SSBI_REG_ADDR_IRQ_IT_STATUS	(SSBI_REG_ADDR_IRQ_BASE + 6)
+#define	SSBI_REG_ADDR_IRQ_CONFIG	(SSBI_REG_ADDR_IRQ_BASE + 7)
+#define	SSBI_REG_ADDR_IRQ_RT_STATUS	(SSBI_REG_ADDR_IRQ_BASE + 8)
+
+#define	PM8901_IRQF_LVL_SEL		0x01	/* level select */
+#define	PM8901_IRQF_MASK_FE		0x02	/* mask falling edge */
+#define	PM8901_IRQF_MASK_RE		0x04	/* mask rising edge */
+#define	PM8901_IRQF_CLR			0x08	/* clear interrupt */
+#define	PM8901_IRQF_BITS_MASK		0x70
+#define	PM8901_IRQF_BITS_SHIFT		4
+#define	PM8901_IRQF_WRITE		0x80
+
+#define	PM8901_IRQF_MASK_ALL		(PM8901_IRQF_MASK_FE | \
+					PM8901_IRQF_MASK_RE)
+#define PM8901_IRQF_W_C_M		(PM8901_IRQF_WRITE |	\
+					PM8901_IRQF_CLR |	\
+					PM8901_IRQF_MASK_ALL)
+
+#define	MAX_PM_IRQ			72
+#define	MAX_PM_BLOCKS			(MAX_PM_IRQ / 8 + 1)
+#define	MAX_PM_MASTERS			(MAX_PM_BLOCKS / 8 + 1)
+
+#define MPP_IRQ_BLOCK			1
+
+/* FTS regulator PMR registers */
+#define SSBI_REG_ADDR_S1_PMR		(0xA7)
+#define SSBI_REG_ADDR_S2_PMR		(0xA8)
+#define SSBI_REG_ADDR_S3_PMR		(0xA9)
+#define SSBI_REG_ADDR_S4_PMR		(0xAA)
+
+#define REGULATOR_PMR_STATE_MASK	0x60
+#define REGULATOR_PMR_STATE_OFF		0x20
+
+struct pm8901_chip {
+	struct pm8901_platform_data	pdata;
+
+	struct i2c_client		*dev;
+
+	u8	irqs_allowed[MAX_PM_BLOCKS];
+	u8	blocks_allowed[MAX_PM_MASTERS];
+	u8	masters_allowed;
+	int	pm_max_irq;
+	int	pm_max_blocks;
+	int	pm_max_masters;
+
+	u8	config[MAX_PM_IRQ];
+	u8	wake_enable[MAX_PM_IRQ];
+	u16	count_wakeable;
+
+	u8	revision;
+
+	spinlock_t	pm_lock;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+struct pm8901_dbg_device {
+	struct mutex		dbg_mutex;
+	struct pm8901_chip	*pm_chip;
+	struct dentry		*dent;
+	int			addr;
+};
+
+static struct pm8901_dbg_device *pmic_dbg_device;
+#endif
+
+static struct pm8901_chip *pmic_chip;
+
+/* Helper Functions */
+DEFINE_RATELIMIT_STATE(pm8901_msg_ratelimit, 60 * HZ, 10);
+
+static inline int pm8901_can_print(void)
+{
+	return __ratelimit(&pm8901_msg_ratelimit);
+}
+
+static inline int
+ssbi_write(struct i2c_client *client, u16 addr, const u8 *buf, size_t len)
+{
+	int	rc;
+	struct	i2c_msg msg = {
+		.addr           = addr,
+		.flags          = 0x0,
+		.buf            = (u8 *)buf,
+		.len            = len,
+	};
+
+	rc = i2c_transfer(client->adapter, &msg, 1);
+	return (rc == 1) ? 0 : rc;
+}
+
+static inline int
+ssbi_read(struct i2c_client *client, u16 addr, u8 *buf, size_t len)
+{
+	int	rc;
+	struct	i2c_msg msg = {
+		.addr           = addr,
+		.flags          = I2C_M_RD,
+		.buf            = buf,
+		.len            = len,
+	};
+
+	rc = i2c_transfer(client->adapter, &msg, 1);
+	return (rc == 1) ? 0 : rc;
+}
+
+/* External APIs */
+int pm8901_rev(struct pm8901_chip *chip)
+{
+	if (chip == NULL) {
+		if (pmic_chip != NULL)
+			return pmic_chip->revision;
+		else
+			return -EINVAL;
+	}
+
+	return chip->revision;
+}
+EXPORT_SYMBOL(pm8901_rev);
+
+int pm8901_read(struct pm8901_chip *chip, u16 addr, u8 *values,
+		unsigned int len)
+{
+	if (chip == NULL)
+		return -EINVAL;
+
+	return ssbi_read(chip->dev, addr, values, len);
+}
+EXPORT_SYMBOL(pm8901_read);
+
+int pm8901_write(struct pm8901_chip *chip, u16 addr, u8 *values,
+		 unsigned int len)
+{
+	if (chip == NULL)
+		return -EINVAL;
+
+	return ssbi_write(chip->dev, addr, values, len);
+}
+EXPORT_SYMBOL(pm8901_write);
+
+int pm8901_irq_get_rt_status(struct pm8901_chip *chip, int irq)
+{
+	int     rc;
+	u8      block, bits, bit;
+	unsigned long   irqsave;
+
+	if (chip == NULL || irq < chip->pdata.irq_base ||
+			irq >= chip->pdata.irq_base + MAX_PM_IRQ)
+		return -EINVAL;
+
+	irq -= chip->pdata.irq_base;
+
+	block = irq / 8;
+	bit = irq % 8;
+
+	spin_lock_irqsave(&chip->pm_lock, irqsave);
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, &block, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(): rc=%d (Select Block)\n",
+				__func__, rc);
+		goto bail_out;
+	}
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read RT Status)\n",
+				__func__, rc);
+		goto bail_out;
+	}
+
+	rc = (bits & (1 << bit)) ? 1 : 0;
+
+bail_out:
+	spin_unlock_irqrestore(&chip->pm_lock, irqsave);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8901_irq_get_rt_status);
+
+int pm8901_reset_pwr_off(int reset)
+{
+	int rc = 0, i;
+	u8 pmr;
+	u8 pmr_addr[4] = {
+		SSBI_REG_ADDR_S2_PMR,
+		SSBI_REG_ADDR_S3_PMR,
+		SSBI_REG_ADDR_S4_PMR,
+		SSBI_REG_ADDR_S1_PMR,
+	};
+
+	if (pmic_chip == NULL)
+		return -ENODEV;
+
+	/* Turn off regulators S1, S2, S3, S4 when shutting down. */
+	if (!reset) {
+		for (i = 0; i < 4; i++) {
+			rc = ssbi_read(pmic_chip->dev, pmr_addr[i], &pmr, 1);
+			if (rc) {
+				pr_err("%s: FAIL ssbi_read(0x%x): rc=%d\n",
+				       __func__, pmr_addr[i], rc);
+				goto get_out;
+			}
+
+			pmr &= ~REGULATOR_PMR_STATE_MASK;
+			pmr |= REGULATOR_PMR_STATE_OFF;
+
+			rc = ssbi_write(pmic_chip->dev, pmr_addr[i], &pmr, 1);
+			if (rc) {
+				pr_err("%s: FAIL ssbi_write(0x%x)=0x%x: rc=%d"
+				       "\n", __func__, pmr_addr[i], pmr, rc);
+				goto get_out;
+			}
+		}
+	}
+
+get_out:
+	return rc;
+}
+EXPORT_SYMBOL(pm8901_reset_pwr_off);
+
+/* Internal functions */
+static inline int
+pm8901_config_irq(struct pm8901_chip *chip, u8 *bp, u8 *cp)
+{
+	int	rc;
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp, 1);
+	if (rc) {
+		pr_err("%s: ssbi_write: rc=%d (Select block)\n",
+			__func__, rc);
+		goto bail_out;
+	}
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp, 1);
+	if (rc)
+		pr_err("%s: ssbi_write: rc=%d (Configure IRQ)\n",
+			__func__, rc);
+
+bail_out:
+	return rc;
+}
+
+static void pm8901_irq_mask(struct irq_data *d)
+{
+	int	master, irq_bit;
+	struct	pm8901_chip *chip = irq_data_get_irq_handler_data(d);
+	u8	block, config;
+	unsigned int irq = d->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	chip->irqs_allowed[block] &= ~(1 << irq_bit);
+	if (!chip->irqs_allowed[block]) {
+		chip->blocks_allowed[master] &= ~(1 << (block % 8));
+
+		if (!chip->blocks_allowed[master])
+			chip->masters_allowed &= ~(1 << master);
+	}
+
+	config = PM8901_IRQF_WRITE | chip->config[irq] |
+		PM8901_IRQF_MASK_FE | PM8901_IRQF_MASK_RE;
+	pm8901_config_irq(chip, &block, &config);
+}
+
+static void pm8901_irq_unmask(struct irq_data *d)
+{
+	int	master, irq_bit;
+	struct	pm8901_chip *chip = irq_data_get_irq_handler_data(d);
+	u8	block, config, old_irqs_allowed, old_blocks_allowed;
+	unsigned int irq = d->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	old_irqs_allowed = chip->irqs_allowed[block];
+	chip->irqs_allowed[block] |= 1 << irq_bit;
+	if (!old_irqs_allowed) {
+		master = block / 8;
+
+		old_blocks_allowed = chip->blocks_allowed[master];
+		chip->blocks_allowed[master] |= 1 << (block % 8);
+
+		if (!old_blocks_allowed)
+			chip->masters_allowed |= 1 << master;
+	}
+
+	config = PM8901_IRQF_WRITE | chip->config[irq];
+	pm8901_config_irq(chip, &block, &config);
+}
+
+static void pm8901_irq_ack(struct irq_data *d)
+{
+	struct	pm8901_chip *chip = irq_data_get_irq_handler_data(d);
+	u8	block, config;
+	unsigned int irq = d->irq;
+
+	irq -= chip->pdata.irq_base;
+	block = irq / 8;
+
+	config = PM8901_IRQF_WRITE | chip->config[irq] | PM8901_IRQF_CLR;
+	pm8901_config_irq(chip, &block, &config);
+}
+
+static int pm8901_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+	int	master, irq_bit;
+	struct	pm8901_chip *chip = irq_data_get_irq_handler_data(d);
+	u8	block, config;
+	unsigned int irq = d->irq;
+
+	irq -= chip->pdata.irq_base;
+	if (irq > chip->pm_max_irq) {
+		chip->pm_max_irq = irq;
+		chip->pm_max_blocks =
+			chip->pm_max_irq / 8 + 1;
+		chip->pm_max_masters =
+			chip->pm_max_blocks / 8 + 1;
+	}
+	block = irq / 8;
+	master = block / 8;
+	irq_bit = irq % 8;
+
+	chip->config[irq] = (irq_bit << PM8901_IRQF_BITS_SHIFT) |
+			PM8901_IRQF_MASK_RE | PM8901_IRQF_MASK_FE;
+	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+		if (flow_type & IRQF_TRIGGER_RISING)
+			chip->config[irq] &= ~PM8901_IRQF_MASK_RE;
+		if (flow_type & IRQF_TRIGGER_FALLING)
+			chip->config[irq] &= ~PM8901_IRQF_MASK_FE;
+	} else {
+		chip->config[irq] |= PM8901_IRQF_LVL_SEL;
+
+		if (flow_type & IRQF_TRIGGER_HIGH)
+			chip->config[irq] &= ~PM8901_IRQF_MASK_RE;
+		else
+			chip->config[irq] &= ~PM8901_IRQF_MASK_FE;
+	}
+
+	config = PM8901_IRQF_WRITE | chip->config[irq] | PM8901_IRQF_CLR;
+	return pm8901_config_irq(chip, &block, &config);
+}
+
+static int pm8901_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct	pm8901_chip *chip = irq_data_get_irq_handler_data(d);
+	unsigned int irq = d->irq;
+
+	irq -= chip->pdata.irq_base;
+	if (on) {
+		if (!chip->wake_enable[irq]) {
+			chip->wake_enable[irq] = 1;
+			chip->count_wakeable++;
+		}
+	} else {
+		if (chip->wake_enable[irq]) {
+			chip->wake_enable[irq] = 0;
+			chip->count_wakeable--;
+		}
+	}
+
+	return 0;
+}
+
+static inline int
+pm8901_read_root(struct pm8901_chip *chip, u8 *rp)
+{
+	int	rc;
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Root)\n",
+			__func__, rc);
+		*rp = 0;
+	}
+
+	return rc;
+}
+
+static inline int
+pm8901_read_master(struct pm8901_chip *chip, u8 m, u8 *bp)
+{
+	int	rc;
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Master)\n",
+			__func__, rc);
+		*bp = 0;
+	}
+
+	return rc;
+}
+
+static inline int
+pm8901_read_block(struct pm8901_chip *chip, u8 *bp, u8 *ip)
+{
+	int	rc;
+
+	rc = ssbi_write(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp, 1);
+	if (rc) {
+		pr_err("%s: FAIL ssbi_write(): rc=%d (Select Block)\n",
+		       __func__, rc);
+		*bp = 0;
+		goto bail_out;
+	}
+
+	rc = ssbi_read(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip, 1);
+	if (rc)
+		pr_err("%s: FAIL ssbi_read(): rc=%d (Read Status)\n",
+		       __func__, rc);
+
+bail_out:
+	return rc;
+}
+
+static irqreturn_t pm8901_isr_thread(int irq_requested, void *data)
+{
+	struct pm8901_chip *chip = data;
+	int	i, j, k;
+	u8	root, block, config, bits;
+	u8	blocks[MAX_PM_MASTERS];
+	int	masters = 0, irq, handled = 0, spurious = 0;
+	u16     irqs_to_handle[MAX_PM_IRQ];
+	unsigned long	irqsave;
+
+	spin_lock_irqsave(&chip->pm_lock, irqsave);
+
+	/* Read root for masters */
+	if (pm8901_read_root(chip, &root))
+		goto bail_out;
+
+	masters = root >> 1;
+
+	if (!(masters & chip->masters_allowed) ||
+	    (masters & ~chip->masters_allowed)) {
+		spurious = 1000000;
+	}
+
+	/* Read allowed masters for blocks. */
+	for (i = 0; i < chip->pm_max_masters; i++) {
+		if (masters & (1 << i)) {
+			if (pm8901_read_master(chip, i, &blocks[i]))
+				goto bail_out;
+
+			if (!blocks[i]) {
+				if (pm8901_can_print())
+					pr_err("%s: Spurious master: %d "
+					       "(blocks=0)", __func__, i);
+				spurious += 10000;
+			}
+		} else
+			blocks[i] = 0;
+	}
+
+	/* Select block, read status and call isr */
+	for (i = 0; i < chip->pm_max_masters; i++) {
+		if (!blocks[i])
+			continue;
+
+		for (j = 0; j < 8; j++) {
+			if (!(blocks[i] & (1 << j)))
+				continue;
+
+			block = i * 8 + j;	/* block # */
+			if (pm8901_read_block(chip, &block, &bits))
+				goto bail_out;
+
+			if (!bits) {
+				if (pm8901_can_print())
+					pr_err("%s: Spurious block: "
+					       "[master, block]=[%d, %d] "
+					       "(bits=0)\n", __func__, i, j);
+				spurious += 100;
+				continue;
+			}
+
+			/* Check IRQ bits */
+			for (k = 0; k < 8; k++) {
+				if (!(bits & (1 << k)))
+					continue;
+
+				/* Check spurious interrupts */
+				if (((1 << i) & chip->masters_allowed) &&
+				    (blocks[i] & chip->blocks_allowed[i]) &&
+				    (bits & chip->irqs_allowed[block])) {
+
+					/* Found one */
+					irq = block * 8 + k;
+					irqs_to_handle[handled] = irq +
+						chip->pdata.irq_base;
+					handled++;
+				} else {
+					/* Clear and mask wrong one */
+					config = PM8901_IRQF_W_C_M |
+						(k < PM8901_IRQF_BITS_SHIFT);
+
+					pm8901_config_irq(chip,
+							  &block, &config);
+
+					if (pm8901_can_print())
+						pr_err("%s: Spurious IRQ: "
+						       "[master, block, bit]="
+						       "[%d, %d (%d), %d]\n",
+							__func__,
+						       i, j, block, k);
+					spurious++;
+				}
+			}
+		}
+
+	}
+
+bail_out:
+
+	spin_unlock_irqrestore(&chip->pm_lock, irqsave);
+
+	for (i = 0; i < handled; i++)
+		generic_handle_irq(irqs_to_handle[i]);
+
+	if (spurious) {
+		if (!pm8901_can_print())
+			return IRQ_HANDLED;
+
+		pr_err("%s: spurious = %d (handled = %d)\n",
+		       __func__, spurious, handled);
+		pr_err("   root = 0x%x (masters_allowed<<1 = 0x%x)\n",
+		       root, chip->masters_allowed << 1);
+		for (i = 0; i < chip->pm_max_masters; i++) {
+			if (masters & (1 << i))
+				pr_err("   blocks[%d]=0x%x, "
+				       "allowed[%d]=0x%x\n",
+				       i, blocks[i],
+				       i, chip->blocks_allowed[i]);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int check_addr(int addr, const char *func_name)
+{
+	if (addr < 0 || addr > 0x3FF) {
+		pr_err("%s: PMIC 8901 register address is invalid: %d\n",
+			func_name, addr);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int data_set(void *data, u64 val)
+{
+	struct pm8901_dbg_device *dbgdev = data;
+	u8 reg = val;
+	int rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc)
+		goto done;
+
+	rc = pm8901_write(dbgdev->pm_chip, dbgdev->addr, &reg, 1);
+
+	if (rc)
+		pr_err("%s: FAIL pm8901_write(0x%03X)=0x%02X: rc=%d\n",
+			__func__, dbgdev->addr, reg, rc);
+done:
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return rc;
+}
+
+static int data_get(void *data, u64 *val)
+{
+	struct pm8901_dbg_device *dbgdev = data;
+	int rc;
+	u8 reg;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc)
+		goto done;
+
+	rc = pm8901_read(dbgdev->pm_chip, dbgdev->addr, &reg, 1);
+
+	if (rc) {
+		pr_err("%s: FAIL pm8901_read(0x%03X)=0x%02X: rc=%d\n",
+			__func__, dbgdev->addr, reg, rc);
+		goto done;
+	}
+
+	*val = reg;
+done:
+	mutex_unlock(&dbgdev->dbg_mutex);
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_data_fops, data_get, data_set, "0x%02llX\n");
+
+static int addr_set(void *data, u64 val)
+{
+	struct pm8901_dbg_device *dbgdev = data;
+	int rc;
+
+	rc = check_addr(val, __func__);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+	dbgdev->addr = val;
+	mutex_unlock(&dbgdev->dbg_mutex);
+
+	return 0;
+}
+
+static int addr_get(void *data, u64 *val)
+{
+	struct pm8901_dbg_device *dbgdev = data;
+	int rc;
+
+	mutex_lock(&dbgdev->dbg_mutex);
+
+	rc = check_addr(dbgdev->addr, __func__);
+	if (rc) {
+		mutex_unlock(&dbgdev->dbg_mutex);
+		return rc;
+	}
+	*val = dbgdev->addr;
+
+	mutex_unlock(&dbgdev->dbg_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_addr_fops, addr_get, addr_set, "0x%03llX\n");
+
+static int __devinit pmic8901_dbg_probe(struct pm8901_chip *chip)
+{
+	struct pm8901_dbg_device *dbgdev;
+	struct dentry *dent;
+	struct dentry *temp;
+
+	if (chip == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EINVAL;
+	}
+
+	dbgdev = kzalloc(sizeof *dbgdev, GFP_KERNEL);
+	if (dbgdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&dbgdev->dbg_mutex);
+
+	dbgdev->pm_chip = chip;
+	dbgdev->addr = -1;
+
+	dent = debugfs_create_dir("pm8901-dbg", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		return -ENOMEM;
+	}
+
+	temp = debugfs_create_file("addr", S_IRUSR | S_IWUSR, dent,
+					dbgdev, &dbg_addr_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)temp);
+		goto debug_error;
+	}
+
+	temp = debugfs_create_file("data", S_IRUSR | S_IWUSR, dent,
+					dbgdev, &dbg_data_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)temp);
+		goto debug_error;
+	}
+
+	dbgdev->dent = dent;
+
+	pmic_dbg_device = dbgdev;
+
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dent);
+	return -ENOMEM;
+}
+
+static int __devexit pmic8901_dbg_remove(void)
+{
+	if (pmic_dbg_device) {
+		debugfs_remove_recursive(pmic_dbg_device->dent);
+		kfree(pmic_dbg_device);
+	}
+	return 0;
+}
+
+#else
+
+static int __devinit pmic8901_dbg_probe(struct pm8901_chip *chip)
+{
+	return 0;
+}
+
+static int __devexit pmic8901_dbg_remove(void)
+{
+	return 0;
+}
+
+#endif
+
+static struct irq_chip pm8901_irq_chip = {
+	.name      = "pm8901",
+	.irq_ack       = pm8901_irq_ack,
+	.irq_mask      = pm8901_irq_mask,
+	.irq_unmask    = pm8901_irq_unmask,
+	.irq_set_type  = pm8901_irq_set_type,
+	.irq_set_wake  = pm8901_irq_set_wake,
+};
+
+static int pm8901_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int	i, rc;
+	struct	pm8901_platform_data *pdata = client->dev.platform_data;
+	struct	pm8901_chip *chip;
+
+	if (pdata == NULL || !client->irq) {
+		pr_err("%s: No platform_data or IRQ.\n", __func__);
+		return -ENODEV;
+	}
+
+	if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
+		pr_err("%s: i2c_check_functionality failed.\n", __func__);
+		return -ENODEV;
+	}
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	chip->dev = client;
+
+	/* Read PMIC chip revision */
+	rc = ssbi_read(chip->dev, SSBI_REG_REV, &chip->revision, 1);
+	if (rc)
+		pr_err("%s: Failed on ssbi_read for revision: rc=%d.\n",
+			__func__, rc);
+	pr_info("%s: PMIC revision: %X\n", __func__, chip->revision);
+
+	(void) memcpy((void *)&chip->pdata, (const void *)pdata,
+		      sizeof(chip->pdata));
+
+	irq_set_handler_data(chip->dev->irq, (void *)chip);
+	irq_set_irq_wake(chip->dev->irq, 1);
+
+	chip->pm_max_irq = 0;
+	chip->pm_max_blocks = 0;
+	chip->pm_max_masters = 0;
+
+	i2c_set_clientdata(client, chip);
+
+	pmic_chip = chip;
+	spin_lock_init(&chip->pm_lock);
+
+	/* Register for all reserved IRQs */
+	for (i = pdata->irq_base; i < (pdata->irq_base + MAX_PM_IRQ); i++) {
+		irq_set_chip(i, &pm8901_irq_chip);
+		irq_set_handler(i, handle_edge_irq);
+		set_irq_flags(i, IRQF_VALID);
+		irq_set_handler_data(i, (void *)chip);
+	}
+
+	rc = mfd_add_devices(&chip->dev->dev, 0, pdata->sub_devices,
+			     pdata->num_subdevs, NULL, 0);
+	if (rc) {
+		pr_err("%s: could not add devices %d\n", __func__, rc);
+		return rc;
+	}
+
+	rc = request_threaded_irq(chip->dev->irq, NULL, pm8901_isr_thread,
+			IRQF_ONESHOT | IRQF_DISABLED | pdata->irq_trigger_flags,
+			"pm8901-irq", chip);
+	if (rc)
+		pr_err("%s: could not request irq %d: %d\n", __func__,
+				chip->dev->irq, rc);
+
+	rc = pmic8901_dbg_probe(chip);
+	if (rc < 0)
+		pr_err("%s: could not set up debugfs: %d\n", __func__, rc);
+
+	return rc;
+}
+
+static int __devexit pm8901_remove(struct i2c_client *client)
+{
+	struct	pm8901_chip *chip;
+
+	chip = i2c_get_clientdata(client);
+	if (chip) {
+		if (chip->pm_max_irq) {
+			irq_set_irq_wake(chip->dev->irq, 0);
+			free_irq(chip->dev->irq, chip);
+		}
+
+		mfd_remove_devices(&chip->dev->dev);
+
+		chip->dev = NULL;
+
+		kfree(chip);
+	}
+
+	pmic8901_dbg_remove();
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pm8901_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct	pm8901_chip *chip;
+	int	i;
+	unsigned long	irqsave;
+
+	chip = i2c_get_clientdata(client);
+
+	for (i = 0; i < MAX_PM_IRQ; i++) {
+		spin_lock_irqsave(&chip->pm_lock, irqsave);
+		if (chip->config[i] && !chip->wake_enable[i]) {
+			if (!((chip->config[i] & PM8901_IRQF_MASK_ALL)
+			      == PM8901_IRQF_MASK_ALL))
+				pm8901_irq_mask(irq_get_irq_data(i +
+							chip->pdata.irq_base));
+		}
+		spin_unlock_irqrestore(&chip->pm_lock, irqsave);
+	}
+
+	if (!chip->count_wakeable)
+		disable_irq(chip->dev->irq);
+
+	return 0;
+}
+
+static int pm8901_resume(struct i2c_client *client)
+{
+	struct	pm8901_chip *chip;
+	int	i;
+	unsigned long	irqsave;
+
+	chip = i2c_get_clientdata(client);
+
+	for (i = 0; i < MAX_PM_IRQ; i++) {
+		spin_lock_irqsave(&chip->pm_lock, irqsave);
+		if (chip->config[i] && !chip->wake_enable[i]) {
+			if (!((chip->config[i] & PM8901_IRQF_MASK_ALL)
+			      == PM8901_IRQF_MASK_ALL))
+				pm8901_irq_unmask(irq_get_irq_data(i +
+							chip->pdata.irq_base));
+		}
+		spin_unlock_irqrestore(&chip->pm_lock, irqsave);
+	}
+
+	if (!chip->count_wakeable)
+		enable_irq(chip->dev->irq);
+
+	return 0;
+}
+#else
+#define	pm8901_suspend		NULL
+#define	pm8901_resume		NULL
+#endif
+
+static const struct i2c_device_id pm8901_ids[] = {
+	{ "pm8901-core", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, pm8901_ids);
+
+static struct i2c_driver pm8901_driver = {
+	.driver.name	= "pm8901-core",
+	.id_table	= pm8901_ids,
+	.probe		= pm8901_probe,
+	.remove		= __devexit_p(pm8901_remove),
+	.suspend	= pm8901_suspend,
+	.resume		= pm8901_resume,
+};
+
+static int __init pm8901_init(void)
+{
+	int rc = i2c_add_driver(&pm8901_driver);
+	pr_notice("%s: i2c_add_driver: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void __exit pm8901_exit(void)
+{
+	i2c_del_driver(&pm8901_driver);
+}
+
+arch_initcall(pm8901_init);
+module_exit(pm8901_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8901 core driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8901-core");
diff --git a/drivers/mfd/timpani-codec.c b/drivers/mfd/timpani-codec.c
new file mode 100644
index 0000000..364670e
--- /dev/null
+++ b/drivers/mfd/timpani-codec.c
@@ -0,0 +1,3645 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/msm-adie-codec.h>
+#include <linux/mfd/marimba.h>
+#include <linux/mfd/timpani-audio.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/string.h>
+
+/* Timpani codec driver is activated through Marimba core driver */
+
+#define MAX_MDELAY_US 20000
+
+#define TIMPANI_PATH_MASK(x) (1 << (x))
+
+#define TIMPANI_CODEC_AUXPGA_GAIN_RANGE (0x0F)
+
+#define TIMPANI_RX1_ST_MASK (TIMPANI_CDC_RX1_CTL_SIDETONE_EN1_L_M |\
+		TIMPANI_CDC_RX1_CTL_SIDETONE_EN1_R_M)
+#define TIMPANI_RX1_ST_ENABLE ((1 << TIMPANI_CDC_RX1_CTL_SIDETONE_EN1_L_S) |\
+		(1 << TIMPANI_CDC_RX1_CTL_SIDETONE_EN1_R_S))
+#define TIMPANI_CDC_ST_MIXING_TX1_MASK (TIMPANI_CDC_ST_MIXING_TX1_L_M |\
+		TIMPANI_CDC_ST_MIXING_TX1_R_M)
+#define TIMPANI_CDC_ST_MIXING_TX1_ENABLE ((1 << TIMPANI_CDC_ST_MIXING_TX1_L_S)\
+		| (1 << TIMPANI_CDC_ST_MIXING_TX1_R_S))
+#define TIMPANI_CDC_ST_MIXING_TX2_MASK (TIMPANI_CDC_ST_MIXING_TX2_L_M |\
+		TIMPANI_CDC_ST_MIXING_TX2_R_M)
+#define TIMPANI_CDC_ST_MIXING_TX2_ENABLE ((1 << TIMPANI_CDC_ST_MIXING_TX2_L_S)\
+		| (1 << TIMPANI_CDC_ST_MIXING_TX2_R_S))
+
+enum refcnt {
+	DEC = 0,
+	INC = 1,
+	IGNORE = 2,
+};
+#define TIMPANI_ARRAY_SIZE	(TIMPANI_A_CDC_COMP_HALT + 1)
+
+static u8 timpani_shadow[TIMPANI_ARRAY_SIZE];
+
+struct adie_codec_path {
+	struct adie_codec_dev_profile *profile;
+	struct adie_codec_register_image img;
+	u32 hwsetting_idx;
+	u32 stage_idx;
+	u32 curr_stage;
+	u32 reg_owner;
+};
+
+enum /* regaccess blk id */
+{
+	RA_BLOCK_RX1 = 0,
+	RA_BLOCK_RX2,
+	RA_BLOCK_TX1,
+	RA_BLOCK_TX2,
+	RA_BLOCK_LB,
+	RA_BLOCK_SHARED_RX_LB,
+	RA_BLOCK_SHARED_TX,
+	RA_BLOCK_TXFE1,
+	RA_BLOCK_TXFE2,
+	RA_BLOCK_PA_COMMON,
+	RA_BLOCK_PA_EAR,
+	RA_BLOCK_PA_HPH,
+	RA_BLOCK_PA_LINE,
+	RA_BLOCK_PA_AUX,
+	RA_BLOCK_ADC,
+	RA_BLOCK_DMIC,
+	RA_BLOCK_TX_I2S,
+	RA_BLOCK_DRV,
+	RA_BLOCK_TEST,
+	RA_BLOCK_RESERVED,
+	RA_BLOCK_NUM,
+};
+
+enum /* regaccess onwer ID */
+{
+	RA_OWNER_NONE = 0,
+	RA_OWNER_PATH_RX1,
+	RA_OWNER_PATH_RX2,
+	RA_OWNER_PATH_TX1,
+	RA_OWNER_PATH_TX2,
+	RA_OWNER_PATH_LB,
+	RA_OWNER_DRV,
+	RA_OWNER_NUM,
+};
+
+struct reg_acc_blk_cfg {
+	u8 valid_owners[RA_OWNER_NUM];
+};
+
+struct reg_ref_cnt {
+	u8 mask;
+	u8 path_mask;
+};
+
+#define TIMPANI_MAX_FIELDS	5
+
+struct timpani_regaccess {
+	u8 reg_addr;
+	u8 blk_mask[RA_BLOCK_NUM];
+	u8 reg_mask;
+	u8 reg_default;
+	struct reg_ref_cnt fld_ref_cnt[TIMPANI_MAX_FIELDS];
+};
+
+struct timpani_regaccess timpani_regset[] = {
+	{
+		TIMPANI_A_MREF,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFC, 0x0, 0x3},
+		TIMPANI_MREF_M,
+		TIMPANI_MREF_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_IDAC_REF_CUR,
+		{0xFC, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDAC_IDAC_REF_CUR_M,
+		TIMPANI_CDAC_IDAC_REF_CUR_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC12_REF_CURR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_TXADC12_REF_CURR_M,
+		TIMPANI_TXADC12_REF_CURR_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC3_EN,
+		{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_TXADC3_EN_M,
+		TIMPANI_TXADC3_EN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC4_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_TXADC4_EN_M,
+		TIMPANI_TXADC4_EN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CODEC_TXADC_STATUS_REGISTER_1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0, 0x30, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF},
+		TIMPANI_CODEC_TXADC_STATUS_REGISTER_1_M,
+		TIMPANI_CODEC_TXADC_STATUS_REGISTER_1_POR,
+		{
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x30, .path_mask = 0},
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXFE1_M,
+		TIMPANI_TXFE1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXFE2_M,
+		TIMPANI_TXFE2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE12_ATEST,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXFE12_ATEST_M,
+		TIMPANI_TXFE12_ATEST_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE_CLT,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF8, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7},
+		TIMPANI_TXFE_CLT_M,
+		TIMPANI_TXFE_CLT_POR,
+		{
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC1_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_TXADC1_EN_M,
+		TIMPANI_TXADC1_EN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC2_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_TXADC2_EN_M,
+		TIMPANI_TXADC2_EN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXADC_CTL_M,
+		TIMPANI_TXADC_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXADC_CTL2_M,
+		TIMPANI_TXADC_CTL2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC_CTL3,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0xFE, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_TXADC_CTL3_M,
+		TIMPANI_TXADC_CTL3_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXADC_CHOP_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0xFC, 0x0, 0x0, 0x0, 0x0, 0x3},
+		TIMPANI_TXADC_CHOP_CTL_M,
+		TIMPANI_TXADC_CHOP_CTL_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE3,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xE2, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1D},
+		TIMPANI_TXFE3_M,
+		TIMPANI_TXFE3_POR,
+		{
+			{ .mask = 0xE2, .path_mask = 0},
+			{ .mask = 0x1D, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE4,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xE2, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1D},
+		TIMPANI_TXFE4_M,
+		TIMPANI_TXFE4_POR,
+		{
+			{ .mask = 0xE2, .path_mask = 0},
+			{ .mask = 0x1D, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE3_ATEST,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_TXFE3_ATEST_M,
+		TIMPANI_TXFE3_ATEST_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_TXFE_DIFF_SE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xC, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF0},
+		TIMPANI_TXFE_DIFF_SE_M,
+		TIMPANI_TXFE_DIFF_SE_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x0C, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_RX_CLK_CTL,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDAC_RX_CLK_CTL_M,
+		TIMPANI_CDAC_RX_CLK_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_BUFF_CTL,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDAC_BUFF_CTL_M,
+		TIMPANI_CDAC_BUFF_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_REF_CTL1,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDAC_REF_CTL1_M,
+		TIMPANI_CDAC_REF_CTL1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_IDAC_DWA_FIR_CTL,
+		{0xF8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7},
+		TIMPANI_IDAC_DWA_FIR_CTL_M,
+		TIMPANI_IDAC_DWA_FIR_CTL_POR,
+		{
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_REF_CTL2,
+		{0x6F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x90},
+		TIMPANI_CDAC_REF_CTL2_M,
+		TIMPANI_CDAC_REF_CTL2_POR,
+		{
+			{ .mask = 0x6F, .path_mask = 0},
+			{ .mask = 0x90, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_CTL1,
+		{0x7F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80},
+		TIMPANI_CDAC_CTL1_M,
+		TIMPANI_CDAC_CTL1_POR,
+		{
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDAC_CTL2,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDAC_CTL2_M,
+		TIMPANI_CDAC_CTL2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_IDAC_L_CTL,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_IDAC_L_CTL_M,
+		TIMPANI_IDAC_L_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_IDAC_R_CTL,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_IDAC_R_CTL_M,
+		TIMPANI_IDAC_R_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_MASTER_BIAS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1F,
+		0xE0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_MASTER_BIAS_M,
+		TIMPANI_PA_MASTER_BIAS_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_BIAS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_BIAS_M,
+		TIMPANI_PA_CLASSD_BIAS_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_AUXPGA_CUR,
+		{0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_AUXPGA_CUR_M,
+		TIMPANI_AUXPGA_CUR_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_AUXPGA_CM,
+		{0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_AUXPGA_CM_M,
+		TIMPANI_AUXPGA_CM_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_EARPA_MSTB_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x2, 0xFC,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_HPH_EARPA_MSTB_EN_M,
+		TIMPANI_PA_HPH_EARPA_MSTB_EN_POR,
+		{
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x02, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_AUXO_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0xF8, 0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_LINE_AUXO_EN_M,
+		TIMPANI_PA_LINE_AUXO_EN_POR,
+		{
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_AUXPGA_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_PA_CLASSD_AUXPGA_EN_M,
+		TIMPANI_PA_CLASSD_AUXPGA_EN_POR,
+		{
+			{ .mask = 0x30, .path_mask = 0},
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_L_GAIN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFC, 0x0, 0x3},
+		TIMPANI_PA_LINE_L_GAIN_M,
+		TIMPANI_PA_LINE_L_GAIN_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_R_GAIN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFC, 0x0, 0x3},
+		TIMPANI_PA_LINE_R_GAIN_M,
+		TIMPANI_PA_LINE_R_GAIN_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_L_GAIN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x1},
+		TIMPANI_PA_HPH_L_GAIN_M,
+		TIMPANI_PA_HPH_L_GAIN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_R_GAIN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFE, 0x0, 0x1},
+		TIMPANI_PA_HPH_R_GAIN_M,
+		TIMPANI_PA_HPH_R_GAIN_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_AUXPGA_LR_GAIN,
+		{0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_AUXPGA_LR_GAIN_M,
+		TIMPANI_AUXPGA_LR_GAIN_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_AUXO_EARPA_CONN,
+		{0x21, 0x42, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x18},
+		TIMPANI_PA_AUXO_EARPA_CONN_M,
+		TIMPANI_PA_AUXO_EARPA_CONN_POR,
+		{
+			{ .mask = 0x21, .path_mask = 0},
+			{ .mask = 0x42, .path_mask = 0},
+			{ .mask = 0x84, .path_mask = 0},
+			{ .mask = 0x18, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_ST_CONN,
+		{0x24, 0x48, 0x0, 0x0, 0x93, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_LINE_ST_CONN_M,
+		TIMPANI_PA_LINE_ST_CONN_POR,
+		{
+			{ .mask = 0x24, .path_mask = 0},
+			{ .mask = 0x48, .path_mask = 0},
+			{ .mask = 0x93, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_MONO_CONN,
+		{0x24, 0x48, 0x0, 0x0, 0x93, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_LINE_MONO_CONN_M,
+		TIMPANI_PA_LINE_MONO_CONN_POR,
+		{
+			{ .mask = 0x24, .path_mask = 0},
+			{ .mask = 0x48, .path_mask = 0},
+			{ .mask = 0x93, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_ST_CONN,
+		{0x24, 0x48, 0x0, 0x0, 0x90, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_HPH_ST_CONN_M,
+		TIMPANI_PA_HPH_ST_CONN_POR,
+		{
+			{ .mask = 0x24, .path_mask = 0},
+			{ .mask = 0x48, .path_mask = 0},
+			{ .mask = 0x90, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_MONO_CONN,
+		{0x24, 0x48, 0x0, 0x0, 0x90, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
+		TIMPANI_PA_HPH_MONO_CONN_M,
+		TIMPANI_PA_HPH_MONO_CONN_POR,
+		{
+			{ .mask = 0x24, .path_mask = 0},
+			{ .mask = 0x48, .path_mask = 0},
+			{ .mask = 0x90, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_CONN,
+		{0x80, 0x40, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF},
+		TIMPANI_PA_CLASSD_CONN_M,
+		TIMPANI_PA_CLASSD_CONN_POR,
+		{
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x40, .path_mask = 0},
+			{ .mask = 0x20, .path_mask = 0},
+			{ .mask = 0x10, .path_mask = 0},
+			{ .mask = 0x0F, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CNP_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xCF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30},
+		TIMPANI_PA_CNP_CTL_M,
+		TIMPANI_PA_CNP_CTL_POR,
+		{
+			{ .mask = 0xCF, .path_mask = 0},
+			{ .mask = 0x30, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_L_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3F,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_PA_CLASSD_L_CTL_M,
+		TIMPANI_PA_CLASSD_L_CTL_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_R_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3F,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_PA_CLASSD_R_CTL_M,
+		TIMPANI_PA_CLASSD_R_CTL_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_INT2_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_INT2_CTL_M,
+		TIMPANI_PA_CLASSD_INT2_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_L_OCP_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_HPH_L_OCP_CLK_CTL_M,
+		TIMPANI_PA_HPH_L_OCP_CLK_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_L_SW_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF7,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8},
+		TIMPANI_PA_CLASSD_L_SW_CTL_M,
+		TIMPANI_PA_CLASSD_L_SW_CTL_POR,
+		{
+			{ .mask = 0xF7, .path_mask = 0},
+			{ .mask = 0x08, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_L_OCP1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_L_OCP1_M,
+		TIMPANI_PA_CLASSD_L_OCP1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_L_OCP2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_L_OCP2_M,
+		TIMPANI_PA_CLASSD_L_OCP2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_R_OCP_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_HPH_R_OCP_CLK_CTL_M,
+		TIMPANI_PA_HPH_R_OCP_CLK_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_R_SW_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF7,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8},
+		TIMPANI_PA_CLASSD_R_SW_CTL_M,
+		TIMPANI_PA_CLASSD_R_SW_CTL_POR,
+		{
+			{ .mask = 0xF7, .path_mask = 0},
+			{ .mask = 0x08, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_R_OCP1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_R_OCP1_M,
+		TIMPANI_PA_CLASSD_R_OCP1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_R_OCP2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_CLASSD_R_OCP2_M,
+		TIMPANI_PA_CLASSD_R_OCP2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_CTL1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_PA_HPH_CTL1_M,
+		TIMPANI_PA_HPH_CTL1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_PA_HPH_CTL2_M,
+		TIMPANI_PA_HPH_CTL2_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_LINE_AUXO_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0xC3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x3C, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_LINE_AUXO_CTL_M,
+		TIMPANI_PA_LINE_AUXO_CTL_POR,
+		{
+			{ .mask = 0xC3, .path_mask = 0},
+			{ .mask = 0x3C, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_AUXO_EARPA_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0x0,
+		0x0, 0x38, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_PA_AUXO_EARPA_CTL_M,
+		TIMPANI_PA_AUXO_EARPA_CTL_POR,
+		{
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0x38, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_EARO_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_PA_EARO_CTL_M,
+		TIMPANI_PA_EARO_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_MASTER_BIAS_CUR,
+		{0x0, 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x18,
+		0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_PA_MASTER_BIAS_CUR_M,
+		TIMPANI_PA_MASTER_BIAS_CUR_POR,
+		{
+			{ .mask = 0x60, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x18, .path_mask = 0},
+			{ .mask = 0x06, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_CLASSD_SC_STATUS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xCC,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x33},
+		TIMPANI_PA_CLASSD_SC_STATUS_M,
+		TIMPANI_PA_CLASSD_SC_STATUS_POR,
+		{
+			{ .mask = 0xCC, .path_mask = 0},
+			{ .mask = 0x33, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_PA_HPH_SC_STATUS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x77},
+		TIMPANI_PA_HPH_SC_STATUS_M,
+		TIMPANI_PA_HPH_SC_STATUS_POR,
+		{
+			{ .mask = 0x88, .path_mask = 0},
+			{ .mask = 0x77, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x7F},
+		TIMPANI_ATEST_EN_M,
+		TIMPANI_ATEST_EN_POR,
+		{
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_TSHKADC,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0xF0},
+		TIMPANI_ATEST_TSHKADC_M,
+		TIMPANI_ATEST_TSHKADC_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_TXADC13,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7F, 0x80},
+		TIMPANI_ATEST_TXADC13_M,
+		TIMPANI_ATEST_TXADC13_POR,
+		{
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_TXADC24,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7F, 0x80},
+		TIMPANI_ATEST_TXADC24_M,
+		TIMPANI_ATEST_TXADC24_POR,
+		{
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_AUXPGA,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF8, 0x7},
+		TIMPANI_ATEST_AUXPGA_M,
+		TIMPANI_ATEST_AUXPGA_POR,
+		{
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_CDAC,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_ATEST_CDAC_M,
+		TIMPANI_ATEST_CDAC_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_IDAC,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_ATEST_IDAC_M,
+		TIMPANI_ATEST_IDAC_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_PA1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_ATEST_PA1_M,
+		TIMPANI_ATEST_PA1_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_CLASSD,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_ATEST_CLASSD_M,
+		TIMPANI_ATEST_CLASSD_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_ATEST_LINEO_AUXO,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_ATEST_LINEO_AUXO_M,
+		TIMPANI_ATEST_LINEO_AUXO_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RESET_CTL,
+		{0x2, 0x8, 0x5, 0x30, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_RESET_CTL_M,
+		TIMPANI_CDC_RESET_CTL_POR,
+		{
+			{ .mask = 0x02, .path_mask = 0},
+			{ .mask = 0x08, .path_mask = 0},
+			{ .mask = 0x05, .path_mask = 0},
+			{ .mask = 0x30, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1_CTL,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX1_CTL_M,
+		TIMPANI_CDC_RX1_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX_I2S_CTL,
+		{0x0, 0x0, 0x10, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_TX_I2S_CTL_M,
+		TIMPANI_CDC_TX_I2S_CTL_POR,
+		{
+			{ .mask = 0x10, .path_mask = 0},
+			{ .mask = 0x20, .path_mask = 0},
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_CH_CTL,
+		{0x3, 0x30, 0xC, 0xC0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_CH_CTL_M,
+		TIMPANI_CDC_CH_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x30, .path_mask = 0},
+			{ .mask = 0x0C, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1LG,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX1LG_M,
+		TIMPANI_CDC_RX1LG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1RG,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX1RG_M,
+		TIMPANI_CDC_RX1RG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX1LG,
+		{0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX1LG_M,
+		TIMPANI_CDC_TX1LG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX1RG,
+		{0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX1RG_M,
+		TIMPANI_CDC_TX1RG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX_PGA_TIMER,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX_PGA_TIMER_M,
+		TIMPANI_CDC_RX_PGA_TIMER_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX_PGA_TIMER,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX_PGA_TIMER_M,
+		TIMPANI_CDC_TX_PGA_TIMER_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_GCTL1,
+		{0xF, 0x0, 0xF0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_GCTL1_M,
+		TIMPANI_CDC_GCTL1_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX1L_STG,
+		{0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX1L_STG_M,
+		TIMPANI_CDC_TX1L_STG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ST_CTL,
+		{0x0, 0xF, 0x0, 0xF0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_ST_CTL_M,
+		TIMPANI_CDC_ST_CTL_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1L_DCOFFSET,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX1L_DCOFFSET_M,
+		TIMPANI_CDC_RX1L_DCOFFSET_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1R_DCOFFSET,
+		{0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX1R_DCOFFSET_M,
+		TIMPANI_CDC_RX1R_DCOFFSET_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_BYPASS_CTL1,
+		{0xF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF0},
+		TIMPANI_CDC_BYPASS_CTL1_M,
+		TIMPANI_CDC_BYPASS_CTL1_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_PDM_CONFIG,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0xF0},
+		TIMPANI_CDC_PDM_CONFIG_M,
+		TIMPANI_CDC_PDM_CONFIG_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TESTMODE1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3F, 0xC0},
+		TIMPANI_CDC_TESTMODE1_M,
+		TIMPANI_CDC_TESTMODE1_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_DMIC_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x3F, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_DMIC_CLK_CTL_M,
+		TIMPANI_CDC_DMIC_CLK_CTL_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ADC12_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_ADC12_CLK_CTL_M,
+		TIMPANI_CDC_ADC12_CLK_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX1_CTL,
+		{0x0, 0x0, 0x3F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_TX1_CTL_M,
+		TIMPANI_CDC_TX1_CTL_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ADC34_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_ADC34_CLK_CTL_M,
+		TIMPANI_CDC_ADC34_CLK_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX2_CTL,
+		{0x0, 0x0, 0x0, 0x3F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_TX2_CTL_M,
+		TIMPANI_CDC_TX2_CTL_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX1_CLK_CTL,
+		{0x1F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xE0},
+		TIMPANI_CDC_RX1_CLK_CTL_M,
+		TIMPANI_CDC_RX1_CLK_CTL_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2_CLK_CTL,
+		{0x1F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xE0},
+		TIMPANI_CDC_RX2_CLK_CTL_M,
+		TIMPANI_CDC_RX2_CLK_CTL_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_DEC_ADC_SEL,
+		{0x0, 0x0, 0xF, 0xF0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_DEC_ADC_SEL_M,
+		TIMPANI_CDC_DEC_ADC_SEL_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC_INPUT_MUX,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3F, 0x0, 0xC0},
+		TIMPANI_CDC_ANC_INPUT_MUX_M,
+		TIMPANI_CDC_ANC_INPUT_MUX_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC_RX_CLK_NS_SEL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0xFE},
+		TIMPANI_CDC_ANC_RX_CLK_NS_SEL_M,
+		TIMPANI_CDC_ANC_RX_CLK_NS_SEL_POR,
+		{
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC_FB_TUNE_SEL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_ANC_FB_TUNE_SEL_M,
+		TIMPANI_CDC_ANC_FB_TUNE_SEL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CLK_DIV_SYNC_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0xFC},
+		TIMPANI_CLK_DIV_SYNC_CTL_M,
+		TIMPANI_CLK_DIV_SYNC_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ADC_CLK_EN,
+		{0x0, 0x0, 0x3, 0xC, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF0},
+		TIMPANI_CDC_ADC_CLK_EN_M,
+		TIMPANI_CDC_ADC_CLK_EN_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x0C, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ST_MIXING,
+		{0x0, 0x0, 0x3, 0xC, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF0},
+		TIMPANI_CDC_ST_MIXING_M,
+		TIMPANI_CDC_ST_MIXING_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x0C, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2_CTL,
+		{0x0, 0x7F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80},
+		TIMPANI_CDC_RX2_CTL_M,
+		TIMPANI_CDC_RX2_CTL_POR,
+		{
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ARB_CLK_EN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_ARB_CLK_EN_M,
+		TIMPANI_CDC_ARB_CLK_EN_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_I2S_CTL2,
+		{0x2, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x39, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_I2S_CTL2_M,
+		TIMPANI_CDC_I2S_CTL2_POR,
+		{
+			{ .mask = 0x02, .path_mask = 0},
+			{ .mask = 0x04, .path_mask = 0},
+			{ .mask = 0x39, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2LG,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX2LG_M,
+		TIMPANI_CDC_RX2LG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2RG,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX2RG_M,
+		TIMPANI_CDC_RX2RG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX2LG,
+		{0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX2LG_M,
+		TIMPANI_CDC_TX2LG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX2RG,
+		{0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX2RG_M,
+		TIMPANI_CDC_TX2RG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_DMIC_MUX,
+		{0x0, 0x0, 0xF, 0xF0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_DMIC_MUX_M,
+		TIMPANI_CDC_DMIC_MUX_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ARB_CLK_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0xFC},
+		TIMPANI_CDC_ARB_CLK_CTL_M,
+		TIMPANI_CDC_ARB_CLK_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_GCTL2,
+		{0x0, 0xF, 0x0, 0xF0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_GCTL2_M,
+		TIMPANI_CDC_GCTL2_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_BYPASS_CTL2,
+		{0x0, 0x0, 0x3F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_BYPASS_CTL2_M,
+		TIMPANI_CDC_BYPASS_CTL2_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_BYPASS_CTL3,
+		{0x0, 0x0, 0x0, 0x3F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xC0},
+		TIMPANI_CDC_BYPASS_CTL3_M,
+		TIMPANI_CDC_BYPASS_CTL3_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_BYPASS_CTL4,
+		{0x0, 0xF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF0},
+		TIMPANI_CDC_BYPASS_CTL4_M,
+		TIMPANI_CDC_BYPASS_CTL4_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2L_DCOFFSET,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX2L_DCOFFSET_M,
+		TIMPANI_CDC_RX2L_DCOFFSET_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX2R_DCOFFSET,
+		{0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_RX2R_DCOFFSET_M,
+		TIMPANI_CDC_RX2R_DCOFFSET_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_RX_MIX_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0xFC},
+		TIMPANI_CDC_RX_MIX_CTL_M,
+		TIMPANI_CDC_RX_MIX_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_SPARE_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xFE},
+		TIMPANI_CDC_SPARE_CTL_M,
+		TIMPANI_CDC_SPARE_CTL_POR,
+		{
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TESTMODE2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1F, 0xE0},
+		TIMPANI_CDC_TESTMODE2_M,
+		TIMPANI_CDC_TESTMODE2_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_PDM_OE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0},
+		TIMPANI_CDC_PDM_OE_M,
+		TIMPANI_CDC_PDM_OE_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX1R_STG,
+		{0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX1R_STG_M,
+		TIMPANI_CDC_TX1R_STG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX2L_STG,
+		{0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX2L_STG_M,
+		TIMPANI_CDC_TX2L_STG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_TX2R_STG,
+		{0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+		TIMPANI_CDC_TX2R_STG_M,
+		TIMPANI_CDC_TX2R_STG_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ARB_BYPASS_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_ARB_BYPASS_CTL_M,
+		TIMPANI_CDC_ARB_BYPASS_CTL_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_CTL1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1F, 0x0, 0xE0},
+		TIMPANI_CDC_ANC1_CTL1_M,
+		TIMPANI_CDC_ANC1_CTL1_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3F, 0x0, 0xC0},
+		TIMPANI_CDC_ANC1_CTL2_M,
+		TIMPANI_CDC_ANC1_CTL2_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_FF_FB_SHIFT,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_FF_FB_SHIFT_M,
+		TIMPANI_CDC_ANC1_FF_FB_SHIFT_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_RX_NS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0x0, 0xF8},
+		TIMPANI_CDC_ANC1_RX_NS_M,
+		TIMPANI_CDC_ANC1_RX_NS_POR,
+		{
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_SPARE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_SPARE_M,
+		TIMPANI_CDC_ANC1_SPARE_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_IIR_COEFF_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1F, 0x0, 0xE0},
+		TIMPANI_CDC_ANC1_IIR_COEFF_PTR_M,
+		TIMPANI_CDC_ANC1_IIR_COEFF_PTR_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_IIR_COEFF_MSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0xFE},
+		TIMPANI_CDC_ANC1_IIR_COEFF_MSB_M,
+		TIMPANI_CDC_ANC1_IIR_COEFF_MSB_POR,
+		{
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_IIR_COEFF_LSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_IIR_COEFF_LSB_M,
+		TIMPANI_CDC_ANC1_IIR_COEFF_LSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_IIR_COEFF_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0xFC},
+		TIMPANI_CDC_ANC1_IIR_COEFF_CTL_M,
+		TIMPANI_CDC_ANC1_IIR_COEFF_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_LPF_COEFF_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC1_LPF_COEFF_PTR_M,
+		TIMPANI_CDC_ANC1_LPF_COEFF_PTR_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_LPF_COEFF_MSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC1_LPF_COEFF_MSB_M,
+		TIMPANI_CDC_ANC1_LPF_COEFF_MSB_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_LPF_COEFF_LSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_LPF_COEFF_LSB_M,
+		TIMPANI_CDC_ANC1_LPF_COEFF_LSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_SCALE_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_SCALE_PTR_M,
+		TIMPANI_CDC_ANC1_SCALE_PTR_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_SCALE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC1_SCALE_M,
+		TIMPANI_CDC_ANC1_SCALE_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC1_DEBUG,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC1_DEBUG_M,
+		TIMPANI_CDC_ANC1_DEBUG_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_CTL1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1F, 0x0, 0xE0},
+		TIMPANI_CDC_ANC2_CTL1_M,
+		TIMPANI_CDC_ANC2_CTL1_POR,
+		{
+			{ .mask = 0x1F, .path_mask = 0},
+			{ .mask = 0xE0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3F, 0x0, 0xC0},
+		TIMPANI_CDC_ANC2_CTL2_M,
+		TIMPANI_CDC_ANC2_CTL2_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_FF_FB_SHIFT,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_FF_FB_SHIFT_M,
+		TIMPANI_CDC_ANC2_FF_FB_SHIFT_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_RX_NS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0x0, 0xF8},
+		TIMPANI_CDC_ANC2_RX_NS_M,
+		TIMPANI_CDC_ANC2_RX_NS_POR,
+		{
+			{ .mask = 0x07, .path_mask = 0},
+			{ .mask = 0xF8, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_SPARE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_SPARE_M,
+		TIMPANI_CDC_ANC2_SPARE_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_IIR_COEFF_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC2_IIR_COEFF_PTR_M,
+		TIMPANI_CDC_ANC2_IIR_COEFF_PTR_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_IIR_COEFF_MSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0xFE},
+		TIMPANI_CDC_ANC2_IIR_COEFF_MSB_M,
+		TIMPANI_CDC_ANC2_IIR_COEFF_MSB_POR,
+		{
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_IIR_COEFF_LSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_IIR_COEFF_LSB_M,
+		TIMPANI_CDC_ANC2_IIR_COEFF_LSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_IIR_COEFF_CTL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0xFC},
+		TIMPANI_CDC_ANC2_IIR_COEFF_CTL_M,
+		TIMPANI_CDC_ANC2_IIR_COEFF_CTL_POR,
+		{
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_LPF_COEFF_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC2_LPF_COEFF_PTR_M,
+		TIMPANI_CDC_ANC2_LPF_COEFF_PTR_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_LPF_COEFF_MSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC2_LPF_COEFF_MSB_M,
+		TIMPANI_CDC_ANC2_LPF_COEFF_MSB_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_LPF_COEFF_LSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_LPF_COEFF_LSB_M,
+		TIMPANI_CDC_ANC2_LPF_COEFF_LSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_SCALE_PTR,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_SCALE_PTR_M,
+		TIMPANI_CDC_ANC2_SCALE_PTR_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_SCALE,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_ANC2_SCALE_M,
+		TIMPANI_CDC_ANC2_SCALE_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_ANC2_DEBUG,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_ANC2_DEBUG_M,
+		TIMPANI_CDC_ANC2_DEBUG_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_LINE_L_AVOL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0xFC, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
+		TIMPANI_CDC_LINE_L_AVOL_M,
+		TIMPANI_CDC_LINE_L_AVOL_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_LINE_R_AVOL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0xFC, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
+		TIMPANI_CDC_LINE_R_AVOL_M,
+		TIMPANI_CDC_LINE_R_AVOL_POR,
+		{
+			{ .mask = 0xFC, .path_mask = 0},
+			{ .mask = 0x03, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_HPH_L_AVOL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_CDC_HPH_L_AVOL_M,
+		TIMPANI_CDC_HPH_L_AVOL_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_HPH_R_AVOL,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFE,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+		TIMPANI_CDC_HPH_R_AVOL_M,
+		TIMPANI_CDC_HPH_R_AVOL_POR,
+		{
+			{ .mask = 0xFE, .path_mask = 0},
+			{ .mask = 0x01, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_CTL1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x3F, 0x0, 0xC0},
+		TIMPANI_CDC_COMP_CTL1_M,
+		TIMPANI_CDC_COMP_CTL1_POR,
+		{
+			{ .mask = 0x3F, .path_mask = 0},
+			{ .mask = 0xC0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_COMP_CTL2_M,
+		TIMPANI_CDC_COMP_CTL2_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_PEAK_METER,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_COMP_PEAK_METER_M,
+		TIMPANI_CDC_COMP_PEAK_METER_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_LEVEL_METER_CTL1,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF0},
+		TIMPANI_CDC_COMP_LEVEL_METER_CTL1_M,
+		TIMPANI_CDC_COMP_LEVEL_METER_CTL1_POR,
+		{
+			{ .mask = 0x0F, .path_mask = 0},
+			{ .mask = 0xF0, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_LEVEL_METER_CTL2,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_COMP_LEVEL_METER_CTL2_M,
+		TIMPANI_CDC_COMP_LEVEL_METER_CTL2_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_ZONE_SELECT,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x7F, 0x0, 0x80},
+		TIMPANI_CDC_COMP_ZONE_SELECT_M,
+		TIMPANI_CDC_COMP_ZONE_SELECT_POR,
+		{
+			{ .mask = 0x7F, .path_mask = 0},
+			{ .mask = 0x80, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_ZC_MSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_COMP_ZC_MSB_M,
+		TIMPANI_CDC_COMP_ZC_MSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_ZC_LSB,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0xFF, 0x0, 0x0},
+		TIMPANI_CDC_COMP_ZC_LSB_M,
+		TIMPANI_CDC_COMP_ZC_LSB_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_SHUT_DOWN,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_COMP_SHUT_DOWN_M,
+		TIMPANI_CDC_COMP_SHUT_DOWN_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_SHUT_DOWN_STATUS,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_COMP_SHUT_DOWN_STATUS_M,
+		TIMPANI_CDC_COMP_SHUT_DOWN_STATUS_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	},
+	{
+		TIMPANI_A_CDC_COMP_HALT,
+		{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xFF},
+		TIMPANI_CDC_COMP_HALT_M,
+		TIMPANI_CDC_COMP_HALT_POR,
+		{
+			{ .mask = 0xFF, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+			{ .mask = 0x00, .path_mask = 0},
+		}
+	}
+};
+
+struct reg_acc_blk_cfg timpani_blkcfg[RA_BLOCK_NUM] = {
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		0, 0, 0, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_RX1 */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, RA_OWNER_PATH_RX2,
+		0, 0, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_RX2 */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		0, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TX1 */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, 0, RA_OWNER_PATH_TX2,
+		0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TX2 */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, 0, 0,
+		RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_LB */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_SHARED_RX_LB */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_SHARED_TX */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TXFE1 */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TXFE2 */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_PA_COMMON */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_PA_EAR */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_PA_HPH */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_PA_LINE */
+	{
+		.valid_owners = {RA_OWNER_NONE, RA_OWNER_PATH_RX1,
+		RA_OWNER_PATH_RX2, 0, 0, RA_OWNER_PATH_LB, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_PA_AUX */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_ADC */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_DMIC */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, RA_OWNER_PATH_TX1,
+		RA_OWNER_PATH_TX2, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TX_I2S */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, 0, 0, 0, RA_OWNER_DRV}
+	},
+	/*RA_BLOCK_DRV */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, 0, 0, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_TEST */
+	{
+		.valid_owners = {RA_OWNER_NONE, 0, 0, 0, 0, 0, RA_OWNER_DRV}
+	},
+	/* RA_BLOCK_RESERVED */
+};
+
+struct adie_codec_state {
+	struct adie_codec_path path[ADIE_CODEC_MAX];
+	u32 ref_cnt;
+	struct marimba *pdrv_ptr;
+	struct marimba_codec_platform_data *codec_pdata;
+	struct mutex lock;
+};
+
+static struct adie_codec_state adie_codec;
+
+/* A cacheable register is one that if the register's current value is being
+ * written to it again, then it is permissable to skip that register write
+ * because it does not actually change the value of the hardware register.
+ *
+ * Some registers are uncacheable, meaning that even they are being written
+ * again with their current value, the write has another purpose and must go
+ * through.
+ *
+ * Knowing the codec's uncacheable registers allows the driver to avoid
+ * unnecessary codec register writes while making sure important register writes
+ * are not skipped.
+ */
+
+static bool timpani_register_is_cacheable(u8 reg)
+{
+	switch (reg) {
+	case TIMPANI_A_PA_LINE_L_GAIN:
+	case TIMPANI_A_PA_LINE_R_GAIN:
+	case TIMPANI_A_PA_HPH_L_GAIN:
+	case TIMPANI_A_PA_HPH_R_GAIN:
+	case TIMPANI_A_CDC_GCTL1:
+	case TIMPANI_A_CDC_ST_CTL:
+	case TIMPANI_A_CDC_GCTL2:
+	case TIMPANI_A_CDC_ARB_BYPASS_CTL:
+	case TIMPANI_A_CDC_CH_CTL:
+	case TIMPANI_A_CDC_ANC1_IIR_COEFF_PTR:
+	case TIMPANI_A_CDC_ANC1_IIR_COEFF_MSB:
+	case TIMPANI_A_CDC_ANC1_IIR_COEFF_LSB:
+	case TIMPANI_A_CDC_ANC1_LPF_COEFF_PTR:
+	case TIMPANI_A_CDC_ANC1_LPF_COEFF_MSB:
+	case TIMPANI_A_CDC_ANC1_LPF_COEFF_LSB:
+	case TIMPANI_A_CDC_ANC1_SCALE_PTR:
+	case TIMPANI_A_CDC_ANC1_SCALE:
+	case TIMPANI_A_CDC_ANC2_IIR_COEFF_PTR:
+	case TIMPANI_A_CDC_ANC2_IIR_COEFF_MSB:
+	case TIMPANI_A_CDC_ANC2_IIR_COEFF_LSB:
+	case TIMPANI_A_CDC_ANC2_LPF_COEFF_PTR:
+	case TIMPANI_A_CDC_ANC2_LPF_COEFF_MSB:
+	case TIMPANI_A_CDC_ANC2_LPF_COEFF_LSB:
+	case TIMPANI_A_CDC_ANC2_SCALE_PTR:
+	case TIMPANI_A_CDC_ANC2_SCALE:
+	case TIMPANI_A_CDC_ANC1_CTL1:
+	case TIMPANI_A_CDC_ANC1_CTL2:
+	case TIMPANI_A_CDC_ANC1_FF_FB_SHIFT:
+	case TIMPANI_A_CDC_ANC2_CTL1:
+	case TIMPANI_A_CDC_ANC2_CTL2:
+	case TIMPANI_A_CDC_ANC2_FF_FB_SHIFT:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static int adie_codec_write(u8 reg, u8 mask, u8 val)
+{
+	int rc = 0;
+	u8 new_val;
+
+	new_val = (val & mask) | (timpani_shadow[reg] & ~mask);
+	if (!(timpani_register_is_cacheable(reg) &&
+		(new_val == timpani_shadow[reg]))) {
+
+		rc = marimba_write_bit_mask(adie_codec.pdrv_ptr, reg,  &new_val,
+			1, 0xFF);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("%s: fail to write reg %x\n", __func__, reg);
+			rc = -EIO;
+			goto error;
+		}
+		timpani_shadow[reg] = new_val;
+		pr_debug("%s: write reg %x val %x new value %x\n", __func__,
+			reg, val, new_val);
+	}
+
+error:
+	return rc;
+}
+
+
+static int reg_in_use(u8 reg_ref, u8 path_type)
+{
+	if ((reg_ref & ~path_type) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static int adie_codec_refcnt_write(u8 reg, u8 mask, u8 val, enum refcnt cnt,
+		u8 path_type)
+{
+	u8 i;
+	int j;
+	u8 fld_mask;
+	u8 path_mask;
+	u8 reg_mask = 0;
+	int rc = 0;
+
+	for (i = 0; i < 0xEF; i++) {
+		if (timpani_regset[i].reg_addr == reg) {
+			for (j = 0; j < TIMPANI_MAX_FIELDS; j++) {
+				fld_mask = timpani_regset[i].fld_ref_cnt[j].mask
+					& mask;
+				path_mask = timpani_regset[i].fld_ref_cnt[j]
+							.path_mask;
+				if (fld_mask) {
+					if (!reg_in_use(path_mask, path_type))
+						reg_mask |= fld_mask;
+					if (cnt == INC)
+						timpani_regset[i].fld_ref_cnt[j]
+							.path_mask |= path_type;
+					else if (cnt == DEC)
+						timpani_regset[i].fld_ref_cnt[j]
+							.path_mask &=
+								~path_type;
+				}
+			}
+
+			if (reg_mask)
+				rc = adie_codec_write(reg, reg_mask, val);
+			reg_mask = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int adie_codec_read(u8 reg, u8 *val)
+{
+	return marimba_read(adie_codec.pdrv_ptr, reg, val, 1);
+}
+
+static int timpani_adie_codec_setpath(struct adie_codec_path *path_ptr,
+					u32 freq_plan, u32 osr)
+{
+	int rc = 0;
+	u32 i, freq_idx = 0, freq = 0;
+
+	if (path_ptr == NULL)
+		return -EINVAL;
+
+	if (path_ptr->curr_stage != ADIE_CODEC_DIGITAL_OFF) {
+		rc = -EBUSY;
+		goto error;
+	}
+
+	for (i = 0; i < path_ptr->profile->setting_sz; i++) {
+		if (path_ptr->profile->settings[i].osr == osr) {
+			if (path_ptr->profile->settings[i].freq_plan >=
+				freq_plan) {
+				if (freq == 0) {
+					freq = path_ptr->profile->settings[i].
+								freq_plan;
+					freq_idx = i;
+				} else if (path_ptr->profile->settings[i].
+					freq_plan < freq) {
+					freq = path_ptr->profile->settings[i].
+								freq_plan;
+					freq_idx = i;
+				}
+			}
+		}
+	}
+
+	if (freq_idx >= path_ptr->profile->setting_sz)
+		rc = -ENODEV;
+	else {
+		path_ptr->hwsetting_idx = freq_idx;
+		path_ptr->stage_idx = 0;
+	}
+
+error:
+	return rc;
+}
+
+static u32 timpani_adie_codec_freq_supported(
+				struct adie_codec_dev_profile *profile,
+				u32 requested_freq)
+{
+	u32 i, rc = -EINVAL;
+
+	for (i = 0; i < profile->setting_sz; i++) {
+		if (profile->settings[i].freq_plan >= requested_freq) {
+			rc = 0;
+			break;
+		}
+	}
+	return rc;
+}
+int timpani_adie_codec_enable_sidetone(struct adie_codec_path *rx_path_ptr,
+	u32 enable)
+{
+	int rc = 0;
+
+	pr_debug("%s()\n", __func__);
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!rx_path_ptr || &adie_codec.path[ADIE_CODEC_RX] != rx_path_ptr) {
+		pr_err("%s: invalid path pointer\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	} else if (rx_path_ptr->curr_stage !=
+		ADIE_CODEC_DIGITAL_ANALOG_READY) {
+		pr_err("%s: bad state\n", __func__);
+		rc = -EPERM;
+		goto error;
+	}
+
+	if (enable) {
+		rc = adie_codec_write(TIMPANI_A_CDC_RX1_CTL,
+			TIMPANI_RX1_ST_MASK, TIMPANI_RX1_ST_ENABLE);
+
+		if (rx_path_ptr->reg_owner == RA_OWNER_PATH_RX1)
+			adie_codec_write(TIMPANI_A_CDC_ST_MIXING,
+				TIMPANI_CDC_ST_MIXING_TX1_MASK,
+				TIMPANI_CDC_ST_MIXING_TX1_ENABLE);
+		else if (rx_path_ptr->reg_owner == RA_OWNER_PATH_RX2)
+			adie_codec_write(TIMPANI_A_CDC_ST_MIXING,
+				TIMPANI_CDC_ST_MIXING_TX2_MASK,
+				TIMPANI_CDC_ST_MIXING_TX2_ENABLE);
+	 } else {
+		rc = adie_codec_write(TIMPANI_A_CDC_RX1_CTL,
+			TIMPANI_RX1_ST_MASK, 0);
+
+		if (rx_path_ptr->reg_owner == RA_OWNER_PATH_RX1)
+			adie_codec_write(TIMPANI_A_CDC_ST_MIXING,
+				TIMPANI_CDC_ST_MIXING_TX1_MASK, 0);
+		else if (rx_path_ptr->reg_owner == RA_OWNER_PATH_RX2)
+			adie_codec_write(TIMPANI_A_CDC_ST_MIXING,
+				TIMPANI_CDC_ST_MIXING_TX2_MASK, 0);
+	 }
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+static int timpani_adie_codec_enable_anc(struct adie_codec_path *rx_path_ptr,
+	u32 enable, struct adie_codec_anc_data *calibration_writes)
+{
+	int index = 0;
+	int rc = 0;
+	u8 reg, mask, val;
+	pr_debug("%s: enable = %d\n", __func__, enable);
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!rx_path_ptr || &adie_codec.path[ADIE_CODEC_RX] != rx_path_ptr) {
+		pr_err("%s: invalid path pointer\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	} else if (rx_path_ptr->curr_stage !=
+		ADIE_CODEC_DIGITAL_ANALOG_READY) {
+		pr_err("%s: bad state\n", __func__);
+		rc = -EPERM;
+		goto error;
+	}
+	if (enable) {
+		if (!calibration_writes || !calibration_writes->writes) {
+			pr_err("%s: No ANC calibration data\n", __func__);
+			rc = -EPERM;
+			goto error;
+		}
+		while (index < calibration_writes->size) {
+			ADIE_CODEC_UNPACK_ENTRY(calibration_writes->
+				writes[index], reg, mask, val);
+			adie_codec_write(reg, mask, val);
+			index++;
+		}
+	} else {
+		adie_codec_write(TIMPANI_A_CDC_ANC1_CTL1,
+		TIMPANI_CDC_ANC1_CTL1_ANC1_EN_M,
+		TIMPANI_CDC_ANC1_CTL1_ANC1_EN_ANC_DIS <<
+		TIMPANI_CDC_ANC1_CTL1_ANC1_EN_S);
+
+		adie_codec_write(TIMPANI_A_CDC_ANC2_CTL1,
+		TIMPANI_CDC_ANC2_CTL1_ANC2_EN_M,
+		TIMPANI_CDC_ANC2_CTL1_ANC2_EN_ANC_DIS <<
+		TIMPANI_CDC_ANC2_CTL1_ANC2_EN_S);
+	}
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static void adie_codec_restore_regdefault(u8 path_mask, u32 blk)
+{
+	u32 ireg;
+	u32 regset_sz =
+	(sizeof(timpani_regset)/sizeof(struct timpani_regaccess));
+
+	for (ireg = 0; ireg < regset_sz; ireg++) {
+		if (timpani_regset[ireg].blk_mask[blk]) {
+			/* only process register belong to the block */
+			u8 reg = timpani_regset[ireg].reg_addr;
+			u8 mask = timpani_regset[ireg].blk_mask[blk];
+			u8 val = timpani_regset[ireg].reg_default;
+			adie_codec_refcnt_write(reg, mask, val, IGNORE,
+				path_mask);
+		}
+	}
+}
+
+static void adie_codec_reach_stage_action(struct adie_codec_path *path_ptr,
+	u32 stage)
+{
+	u32 iblk, iowner; /* iterators */
+	u8 path_mask;
+
+	if (path_ptr == NULL)
+		return;
+
+	path_mask = TIMPANI_PATH_MASK(path_ptr->reg_owner);
+
+	if (stage != ADIE_CODEC_DIGITAL_OFF)
+		return;
+
+	for (iblk = 0 ; iblk <= RA_BLOCK_RESERVED ; iblk++) {
+		for (iowner = 0; iowner < RA_OWNER_NUM; iowner++) {
+			if (timpani_blkcfg[iblk].valid_owners[iowner] ==
+					path_ptr->reg_owner) {
+				adie_codec_restore_regdefault(path_mask, iblk);
+				break; /* This path owns this block */
+			}
+		}
+	}
+}
+
+static int timpani_adie_codec_proceed_stage(struct adie_codec_path *path_ptr,
+						u32 state)
+{
+	int rc = 0, loop_exit = 0;
+	struct adie_codec_action_unit *curr_action;
+	struct adie_codec_hwsetting_entry *setting;
+	u8 reg, mask, val;
+	u8 path_mask;
+
+	if (path_ptr == NULL)
+		return -EINVAL;
+
+	path_mask = TIMPANI_PATH_MASK(path_ptr->reg_owner);
+
+	mutex_lock(&adie_codec.lock);
+	setting = &path_ptr->profile->settings[path_ptr->hwsetting_idx];
+	while (!loop_exit) {
+
+		curr_action = &setting->actions[path_ptr->stage_idx];
+
+		switch (curr_action->type) {
+		case ADIE_CODEC_ACTION_ENTRY:
+			ADIE_CODEC_UNPACK_ENTRY(curr_action->action,
+			reg, mask, val);
+			if (state == ADIE_CODEC_DIGITAL_OFF)
+				adie_codec_refcnt_write(reg, mask, val, DEC,
+					path_mask);
+			else
+				adie_codec_refcnt_write(reg, mask, val, INC,
+					path_mask);
+			break;
+		case ADIE_CODEC_ACTION_DELAY_WAIT:
+			if (curr_action->action > MAX_MDELAY_US)
+				msleep(curr_action->action/1000);
+			else
+				usleep_range(curr_action->action,
+				curr_action->action);
+			break;
+		case ADIE_CODEC_ACTION_STAGE_REACHED:
+			adie_codec_reach_stage_action(path_ptr,
+				curr_action->action);
+			if (curr_action->action == state) {
+				path_ptr->curr_stage = state;
+				loop_exit = 1;
+			}
+			break;
+		default:
+			BUG();
+		}
+
+		path_ptr->stage_idx++;
+		if (path_ptr->stage_idx == setting->action_sz)
+			path_ptr->stage_idx = 0;
+	}
+	mutex_unlock(&adie_codec.lock);
+
+	return rc;
+}
+
+static void timpani_codec_bring_up(void)
+{
+	/* Codec power up sequence */
+	adie_codec_write(0xFF, 0xFF, 0x08);
+	adie_codec_write(0xFF, 0xFF, 0x0A);
+	adie_codec_write(0xFF, 0xFF, 0x0E);
+	adie_codec_write(0xFF, 0xFF, 0x07);
+	adie_codec_write(0xFF, 0xFF, 0x17);
+	adie_codec_write(TIMPANI_A_MREF, 0xFF, 0xF2);
+	msleep(15);
+	adie_codec_write(TIMPANI_A_MREF, 0xFF, 0x22);
+
+	/* Bypass TX HPFs to prevent pops */
+	adie_codec_write(TIMPANI_A_CDC_BYPASS_CTL2, TIMPANI_CDC_BYPASS_CTL2_M,
+		TIMPANI_CDC_BYPASS_CTL2_POR);
+	adie_codec_write(TIMPANI_A_CDC_BYPASS_CTL3, TIMPANI_CDC_BYPASS_CTL3_M,
+		TIMPANI_CDC_BYPASS_CTL3_POR);
+}
+
+static void timpani_codec_bring_down(void)
+{
+	adie_codec_write(TIMPANI_A_MREF, 0xFF, TIMPANI_MREF_POR);
+	adie_codec_write(0xFF, 0xFF, 0x07);
+	adie_codec_write(0xFF, 0xFF, 0x06);
+	adie_codec_write(0xFF, 0xFF, 0x0E);
+	adie_codec_write(0xFF, 0xFF, 0x08);
+}
+
+static int timpani_adie_codec_open(struct adie_codec_dev_profile *profile,
+	struct adie_codec_path **path_pptr)
+{
+	int rc = 0;
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!profile || !path_pptr) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (adie_codec.path[profile->path_type].profile) {
+		rc = -EBUSY;
+		goto error;
+	}
+
+	if (!adie_codec.ref_cnt) {
+
+		if (adie_codec.codec_pdata &&
+				adie_codec.codec_pdata->marimba_codec_power) {
+
+			rc = adie_codec.codec_pdata->marimba_codec_power(1);
+			if (rc) {
+				pr_err("%s: could not power up timpani "
+						"codec\n", __func__);
+				goto error;
+			}
+			timpani_codec_bring_up();
+		} else {
+			pr_err("%s: couldn't detect timpani codec\n", __func__);
+			rc = -ENODEV;
+			goto error;
+		}
+
+	}
+
+	adie_codec.path[profile->path_type].profile = profile;
+	*path_pptr = (void *) &adie_codec.path[profile->path_type];
+	adie_codec.ref_cnt++;
+	adie_codec.path[profile->path_type].hwsetting_idx = 0;
+	adie_codec.path[profile->path_type].curr_stage = ADIE_CODEC_DIGITAL_OFF;
+	adie_codec.path[profile->path_type].stage_idx = 0;
+
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static int timpani_adie_codec_close(struct adie_codec_path *path_ptr)
+{
+	int rc = 0;
+
+	mutex_lock(&adie_codec.lock);
+
+	if (!path_ptr) {
+		rc = -EINVAL;
+		goto error;
+	}
+	if (path_ptr->curr_stage != ADIE_CODEC_DIGITAL_OFF)
+		adie_codec_proceed_stage(path_ptr, ADIE_CODEC_DIGITAL_OFF);
+
+	BUG_ON(!adie_codec.ref_cnt);
+
+	path_ptr->profile = NULL;
+	adie_codec.ref_cnt--;
+
+	if (!adie_codec.ref_cnt) {
+		/* Timpani CDC power down sequence */
+		timpani_codec_bring_down();
+
+		if (adie_codec.codec_pdata &&
+				adie_codec.codec_pdata->marimba_codec_power) {
+
+			rc = adie_codec.codec_pdata->marimba_codec_power(0);
+			if (rc) {
+				pr_err("%s: could not power down timpani "
+						"codec\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+error:
+	mutex_unlock(&adie_codec.lock);
+	return rc;
+}
+
+static int timpani_adie_codec_set_master_mode(struct adie_codec_path *path_ptr,
+			u8 master)
+{
+	u8 val = master ? 1 : 0;
+
+	if (!path_ptr)
+		return -EINVAL;
+
+	if (path_ptr->reg_owner == RA_OWNER_PATH_RX1)
+		adie_codec_write(TIMPANI_A_CDC_RX1_CTL, 0x01, val);
+	else if (path_ptr->reg_owner == RA_OWNER_PATH_TX1)
+		adie_codec_write(TIMPANI_A_CDC_TX_I2S_CTL, 0x01, val);
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+int timpani_adie_codec_set_device_analog_volume(
+		struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 volume)
+{
+	u8 val;
+	u8 curr_val;
+	u8 i;
+
+	adie_codec_read(TIMPANI_A_AUXPGA_LR_GAIN, &curr_val);
+
+	/* Volume is expressed as a percentage. */
+	/* The upper nibble is the left channel, lower right channel. */
+	val = (u8)((volume * TIMPANI_CODEC_AUXPGA_GAIN_RANGE) / 100);
+	val |= val << 4;
+
+	if ((curr_val & 0x0F) < (val & 0x0F)) {
+		for (i = curr_val; i < val; i += 0x11)
+			adie_codec_write(TIMPANI_A_AUXPGA_LR_GAIN, 0xFF, i);
+	} else if ((curr_val & 0x0F) > (val & 0x0F)) {
+		for (i = curr_val; i > val; i -= 0x11)
+			adie_codec_write(TIMPANI_A_AUXPGA_LR_GAIN, 0xFF, i);
+	}
+
+	return 0;
+}
+
+enum adie_vol_type {
+	ADIE_CODEC_RX_DIG_VOL,
+	ADIE_CODEC_TX_DIG_VOL,
+	ADIE_CODEC_VOL_TYPE_MAX
+};
+
+#define CDC_RX1LG		0x84
+#define CDC_RX1RG		0x85
+#define CDC_TX1LG		0x86
+#define CDC_TX1RG		0x87
+#define	DIG_VOL_MASK		0xFF
+
+#define CDC_GCTL1		0x8A
+#define RX1_PGA_UPDATE_L	0x04
+#define RX1_PGA_UPDATE_R	0x08
+#define TX1_PGA_UPDATE_L	0x40
+#define TX1_PGA_UPDATE_R	0x80
+#define CDC_GCTL1_RX_MASK	0x0F
+#define CDC_GCTL1_TX_MASK	0xF0
+
+enum {
+	TIMPANI_MIN_DIG_VOL	= -84,	/* in DB*/
+	TIMPANI_MAX_DIG_VOL	=  16,	/* in DB*/
+	TIMPANI_DIG_VOL_STEP	=  3	/* in DB*/
+};
+
+static int timpani_adie_codec_set_dig_vol(enum adie_vol_type vol_type,
+	u32 num_chan, u32 vol_per)
+{
+	u8 reg_left, reg_right;
+	u8 gain_reg_val, gain_reg_mask;
+	s8 new_reg_val, cur_reg_val;
+	s8 step_size;
+
+	adie_codec_read(CDC_GCTL1, &gain_reg_val);
+
+	if (vol_type == ADIE_CODEC_RX_DIG_VOL) {
+
+		pr_debug("%s : RX DIG VOL. num_chan = %u\n", __func__,
+				num_chan);
+		reg_left =  CDC_RX1LG;
+		reg_right = CDC_RX1RG;
+
+		if (num_chan == 1)
+			gain_reg_val |=  RX1_PGA_UPDATE_L;
+		else
+			gain_reg_val |= (RX1_PGA_UPDATE_L | RX1_PGA_UPDATE_R);
+
+		gain_reg_mask = CDC_GCTL1_RX_MASK;
+	} else {
+
+		pr_debug("%s : TX DIG VOL. num_chan = %u\n", __func__,
+				num_chan);
+		reg_left = CDC_TX1LG;
+		reg_right = CDC_TX1RG;
+
+		if (num_chan == 1)
+			gain_reg_val |=  TX1_PGA_UPDATE_L;
+		else
+			gain_reg_val |= (TX1_PGA_UPDATE_L | TX1_PGA_UPDATE_R);
+
+		gain_reg_mask = CDC_GCTL1_TX_MASK;
+	}
+
+	adie_codec_read(reg_left, &cur_reg_val);
+
+	pr_debug("%s: vol_per = %d cur_reg_val = %d 0x%x\n", __func__, vol_per,
+			cur_reg_val, cur_reg_val);
+
+	new_reg_val =  TIMPANI_MIN_DIG_VOL +
+		(((TIMPANI_MAX_DIG_VOL - TIMPANI_MIN_DIG_VOL) * vol_per) / 100);
+
+	pr_debug("new_reg_val = %d 0x%x\n", new_reg_val, new_reg_val);
+
+	if (new_reg_val > cur_reg_val) {
+		step_size = TIMPANI_DIG_VOL_STEP;
+	} else if (new_reg_val < cur_reg_val) {
+		step_size = -TIMPANI_DIG_VOL_STEP;
+	} else {
+		pr_debug("new_reg_val and cur_reg_val are same 0x%x\n",
+				new_reg_val);
+		return 0;
+	}
+
+	while (cur_reg_val != new_reg_val) {
+
+		if (((new_reg_val > cur_reg_val) &&
+			((new_reg_val - cur_reg_val) < TIMPANI_DIG_VOL_STEP)) ||
+			((cur_reg_val > new_reg_val) &&
+			((cur_reg_val - new_reg_val)
+			 < TIMPANI_DIG_VOL_STEP))) {
+
+			cur_reg_val = new_reg_val;
+
+			pr_debug("diff less than step. write new_reg_val = %d"
+				" 0x%x\n", new_reg_val, new_reg_val);
+
+		 } else {
+			cur_reg_val = cur_reg_val + step_size;
+
+			pr_debug("cur_reg_val = %d 0x%x\n",
+					cur_reg_val, cur_reg_val);
+		 }
+
+		adie_codec_write(reg_left, DIG_VOL_MASK, cur_reg_val);
+
+		if (num_chan == 2)
+			adie_codec_write(reg_right, DIG_VOL_MASK, cur_reg_val);
+
+		adie_codec_write(CDC_GCTL1, gain_reg_mask, gain_reg_val);
+	}
+	return 0;
+}
+
+static int timpani_adie_codec_set_device_digital_volume(
+		struct adie_codec_path *path_ptr,
+		u32 num_channels, u32 vol_percentage /* in percentage */)
+{
+	enum adie_vol_type vol_type;
+
+	if (!path_ptr  || (path_ptr->curr_stage !=
+				ADIE_CODEC_DIGITAL_ANALOG_READY)) {
+		pr_info("%s: timpani codec not ready for volume control\n",
+		       __func__);
+		return  -EPERM;
+	}
+
+	if (num_channels > 2) {
+		pr_err("%s: timpani odec only supports max two channels\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	if (path_ptr->profile->path_type == ADIE_CODEC_RX) {
+		vol_type = ADIE_CODEC_RX_DIG_VOL;
+	} else if (path_ptr->profile->path_type == ADIE_CODEC_TX) {
+		vol_type = ADIE_CODEC_TX_DIG_VOL;
+	} else {
+		pr_err("%s: invalid device data neither RX nor TX\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	timpani_adie_codec_set_dig_vol(vol_type, num_channels, vol_percentage);
+
+	return 0;
+}
+
+static const struct adie_codec_operations timpani_adie_ops = {
+	.codec_id = TIMPANI_ID,
+	.codec_open = timpani_adie_codec_open,
+	.codec_close = timpani_adie_codec_close,
+	.codec_setpath = timpani_adie_codec_setpath,
+	.codec_proceed_stage = timpani_adie_codec_proceed_stage,
+	.codec_freq_supported = timpani_adie_codec_freq_supported,
+	.codec_enable_sidetone = timpani_adie_codec_enable_sidetone,
+	.codec_set_master_mode = timpani_adie_codec_set_master_mode,
+	.codec_enable_anc = timpani_adie_codec_enable_anc,
+	.codec_set_device_analog_volume =
+		timpani_adie_codec_set_device_analog_volume,
+	.codec_set_device_digital_volume =
+		timpani_adie_codec_set_device_digital_volume,
+};
+
+static void timpani_codec_populate_shadow_registers(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(timpani_regset); i++) {
+		if (timpani_regset[i].reg_addr < TIMPANI_ARRAY_SIZE) {
+			timpani_shadow[timpani_regset[i].reg_addr] =
+				timpani_regset[i].reg_default;
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_timpani_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+static struct dentry *debugfs_power;
+static struct dentry *debugfs_dump;
+
+static unsigned char read_data;
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (strict_strtoul(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+			}
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t codec_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[8];
+
+	snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data);
+	return simple_read_from_buffer(ubuf, count, ppos, lbuf, strlen(lbuf));
+}
+
+static ssize_t codec_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *access_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	int i;
+	int read_result;
+	u8 reg_val;
+	long int param[5];
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+
+	if (!strcmp(access_str, "power")) {
+		if (get_parameters(lbuf, param, 1) == 0) {
+			switch (param[0]) {
+			case 1:
+				adie_codec.codec_pdata->marimba_codec_power(1);
+				timpani_codec_bring_up();
+				break;
+			case 0:
+				timpani_codec_bring_down();
+				adie_codec.codec_pdata->marimba_codec_power(0);
+				break;
+			default:
+				rc = -EINVAL;
+				break;
+			}
+		} else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "poke")) {
+		/* write */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= 0xFF) && (param[1] <= 0xFF) &&
+			(rc == 0))
+			adie_codec_write(param[0], 0xFF, param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "peek")) {
+		/* read */
+		rc = get_parameters(lbuf, param, 1);
+		if ((param[0] <= 0xFF) && (rc == 0))
+			adie_codec_read(param[0], &read_data);
+		else
+			rc = -EINVAL;
+	} else if (!strcmp(access_str, "dump")) {
+		pr_info("************** timpani regs *************\n");
+		for (i = 0; i < 0xFF; i++) {
+			read_result = adie_codec_read(i, &reg_val);
+			if (read_result < 0) {
+				pr_info("failed to read codec register\n");
+				break;
+			} else
+				pr_info("reg 0x%02X val 0x%02X\n", i, reg_val);
+		}
+		pr_info("*****************************************\n");
+	}
+
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations codec_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+	.read = codec_debug_read
+};
+#endif
+
+static int timpani_codec_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	adie_codec.pdrv_ptr = platform_get_drvdata(pdev);
+	adie_codec.codec_pdata = pdev->dev.platform_data;
+
+	if (adie_codec.codec_pdata->snddev_profile_init)
+		adie_codec.codec_pdata->snddev_profile_init();
+
+	timpani_codec_populate_shadow_registers();
+
+	/* Register the timpani ADIE operations */
+	rc = adie_codec_register_codec_operations(&timpani_adie_ops);
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_timpani_dent = debugfs_create_dir("msm_adie_codec", 0);
+	if (!IS_ERR(debugfs_timpani_dent)) {
+		debugfs_peek = debugfs_create_file("peek",
+		S_IFREG | S_IRUGO, debugfs_timpani_dent,
+		(void *) "peek", &codec_debug_ops);
+
+		debugfs_poke = debugfs_create_file("poke",
+		S_IFREG | S_IRUGO, debugfs_timpani_dent,
+		(void *) "poke", &codec_debug_ops);
+
+		debugfs_power = debugfs_create_file("power",
+		S_IFREG | S_IRUGO, debugfs_timpani_dent,
+		(void *) "power", &codec_debug_ops);
+
+		debugfs_dump = debugfs_create_file("dump",
+		S_IFREG | S_IRUGO, debugfs_timpani_dent,
+		(void *) "dump", &codec_debug_ops);
+
+	}
+#endif
+
+	return rc;
+}
+
+static struct platform_driver timpani_codec_driver = {
+	.probe = timpani_codec_probe,
+	.driver = {
+		.name = "timpani_codec",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init timpani_codec_init(void)
+{
+	s32 rc;
+
+	rc = platform_driver_register(&timpani_codec_driver);
+	if (IS_ERR_VALUE(rc))
+		goto error;
+
+	adie_codec.path[ADIE_CODEC_TX].reg_owner = RA_OWNER_PATH_TX1;
+	adie_codec.path[ADIE_CODEC_RX].reg_owner = RA_OWNER_PATH_RX1;
+	adie_codec.path[ADIE_CODEC_LB].reg_owner = RA_OWNER_PATH_LB;
+	mutex_init(&adie_codec.lock);
+error:
+	return rc;
+}
+
+static void __exit timpani_codec_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(debugfs_peek);
+	debugfs_remove(debugfs_poke);
+	debugfs_remove(debugfs_power);
+	debugfs_remove(debugfs_dump);
+	debugfs_remove(debugfs_timpani_dent);
+#endif
+	platform_driver_unregister(&timpani_codec_driver);
+}
+
+module_init(timpani_codec_init);
+module_exit(timpani_codec_exit);
+
+MODULE_DESCRIPTION("Timpani codec driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65023.c b/drivers/mfd/tps65023.c
new file mode 100644
index 0000000..e67997c
--- /dev/null
+++ b/drivers/mfd/tps65023.c
@@ -0,0 +1,122 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/mfd/tps65023.h>
+
+/* TPS65023_registers */
+#define TPS65023_VERSION	0
+#define TPS65023_PGOODZ		1
+#define TPS65023_MASK		2
+#define TPS65023_REG_CTRL	3
+#define TPS65023_CON_CTRL	4
+#define TPS65023_CON_CTRL2	5
+#define TPS65023_DEFCORE	6
+#define TPS65023_DEFSLEW	7
+#define TPS65023_LDO_CTRL	8
+#define TPS65023_MAX		9
+
+static struct i2c_client *tpsclient;
+
+int tps65023_set_dcdc1_level(int mvolts)
+{
+	int val;
+	int ret;
+
+	if (!tpsclient)
+		return -ENODEV;
+
+	if (mvolts < 800 || mvolts > 1600)
+		return -EINVAL;
+
+	if (mvolts == 1600)
+		val = 0x1F;
+	else
+		val = ((mvolts - 800)/25) & 0x1F;
+
+	ret = i2c_smbus_write_byte_data(tpsclient, TPS65023_DEFCORE, val);
+
+	if (!ret)
+		ret = i2c_smbus_write_byte_data(tpsclient,
+				TPS65023_CON_CTRL2, 0x80);
+
+	return ret;
+}
+EXPORT_SYMBOL(tps65023_set_dcdc1_level);
+
+int tps65023_get_dcdc1_level(int *mvolts)
+{
+	int val;
+
+	if (!tpsclient)
+		return -ENODEV;
+
+	val = i2c_smbus_read_byte_data(tpsclient, TPS65023_DEFCORE) & 0x1F;
+
+	if (val == 0x1F)
+		*mvolts = 1600;
+	else
+		*mvolts = (val * 25) + 800;
+	return 0;
+}
+EXPORT_SYMBOL(tps65023_get_dcdc1_level);
+
+static int tps65023_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	if (!i2c_check_functionality(client->adapter,
+				I2C_FUNC_SMBUS_BYTE_DATA)) {
+		printk(KERN_ERR "TPS65023 does not support SMBUS_BYTE_DATA.\n");
+		return -EINVAL;
+	}
+
+	tpsclient = client;
+	printk(KERN_INFO "TPS65023: PMIC probed.\n");
+	return 0;
+}
+
+static int __devexit tps65023_remove(struct i2c_client *client)
+{
+	tpsclient = NULL;
+	return 0;
+}
+
+static const struct i2c_device_id tps65023_id[] = {
+	{ "tps65023", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tps65023_id);
+
+static struct i2c_driver tps65023_driver = {
+	.driver = {
+		.name   = "tps65023",
+		.owner  = THIS_MODULE,
+	},
+	.probe  = tps65023_probe,
+	.remove = __devexit_p(tps65023_remove),
+	.id_table = tps65023_id,
+};
+
+static int __init tps65023_init(void)
+{
+	return i2c_add_driver(&tps65023_driver);
+}
+
+
+static void __exit tps65023_exit(void)
+{
+	i2c_del_driver(&tps65023_driver);
+}
+
+module_init(tps65023_init);
+module_exit(tps65023_exit);
diff --git a/drivers/mfd/wcd9310-core.c b/drivers/mfd/wcd9310-core.c
new file mode 100644
index 0000000..5a77785
--- /dev/null
+++ b/drivers/mfd/wcd9310-core.c
@@ -0,0 +1,734 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/mfd/wcd9310/core.h>
+#include <linux/mfd/wcd9310/pdata.h>
+#include <linux/mfd/wcd9310/registers.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/regulator/consumer.h>
+#include <sound/soc.h>
+
+#define TABLA_REGISTER_START_OFFSET 0x800
+static int tabla_read(struct tabla *tabla, unsigned short reg,
+		       int bytes, void *dest, bool interface_reg)
+{
+	int ret;
+	u8 *buf = dest;
+
+	if (bytes <= 0) {
+		dev_err(tabla->dev, "Invalid byte read length %d\n", bytes);
+		return -EINVAL;
+	}
+
+	ret = tabla->read_dev(tabla, reg, bytes, dest, interface_reg);
+	if (ret < 0) {
+		dev_err(tabla->dev, "Tabla read failed\n");
+		return ret;
+	} else
+		dev_dbg(tabla->dev, "Read 0x%02x from R%d(0x%x)\n",
+			 *buf, reg, reg);
+
+	return 0;
+}
+int tabla_reg_read(struct tabla *tabla, unsigned short reg)
+{
+	u8 val;
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+	ret = tabla_read(tabla, reg, 1, &val, false);
+	mutex_unlock(&tabla->io_lock);
+
+	if (ret < 0)
+		return ret;
+	else
+		return val;
+}
+EXPORT_SYMBOL_GPL(tabla_reg_read);
+
+static int tabla_write(struct tabla *tabla, unsigned short reg,
+			int bytes, void *src, bool interface_reg)
+{
+	u8 *buf = src;
+
+	if (bytes <= 0) {
+		pr_err("%s: Error, invalid write length\n", __func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(tabla->dev, "Write %02x to R%d(0x%x)\n",
+		 *buf, reg, reg);
+
+	return tabla->write_dev(tabla, reg, bytes, src, interface_reg);
+}
+
+int tabla_reg_write(struct tabla *tabla, unsigned short reg,
+		     u8 val)
+{
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+	ret = tabla_write(tabla, reg, 1, &val, false);
+	mutex_unlock(&tabla->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tabla_reg_write);
+
+static u8 tabla_pgd_la;
+static u8 tabla_inf_la;
+
+int tabla_get_logical_addresses(u8 *pgd_la, u8 *inf_la)
+{
+	*pgd_la = tabla_pgd_la;
+	*inf_la = tabla_inf_la;
+	return 0;
+
+}
+EXPORT_SYMBOL_GPL(tabla_get_logical_addresses);
+
+int tabla_interface_reg_read(struct tabla *tabla, unsigned short reg)
+{
+	u8 val;
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+	ret = tabla_read(tabla, reg, 1, &val, true);
+	mutex_unlock(&tabla->io_lock);
+
+	if (ret < 0)
+		return ret;
+	else
+		return val;
+}
+EXPORT_SYMBOL_GPL(tabla_interface_reg_read);
+
+int tabla_interface_reg_write(struct tabla *tabla, unsigned short reg,
+		     u8 val)
+{
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+	ret = tabla_write(tabla, reg, 1, &val, true);
+	mutex_unlock(&tabla->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tabla_interface_reg_write);
+
+int tabla_bulk_read(struct tabla *tabla, unsigned short reg,
+		     int count, u8 *buf)
+{
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+
+	ret = tabla_read(tabla, reg, count, buf, false);
+
+	mutex_unlock(&tabla->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tabla_bulk_read);
+
+int tabla_bulk_write(struct tabla *tabla, unsigned short reg,
+		     int count, u8 *buf)
+{
+	int ret;
+
+	mutex_lock(&tabla->io_lock);
+
+	ret = tabla_write(tabla, reg, count, buf, false);
+
+	mutex_unlock(&tabla->io_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tabla_bulk_write);
+
+static int tabla_slim_read_device(struct tabla *tabla, unsigned short reg,
+				int bytes, void *dest, bool interface)
+{
+	int ret;
+	struct slim_ele_access msg;
+	msg.start_offset = TABLA_REGISTER_START_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	mutex_lock(&tabla->xfer_lock);
+	if (interface)
+		ret = slim_request_val_element(tabla->slim_slave, &msg, dest,
+			bytes);
+	else
+		ret = slim_request_val_element(tabla->slim, &msg, dest, bytes);
+
+	if (ret)
+		pr_err("%s: Error, Tabla read failed\n", __func__);
+
+	mutex_unlock(&tabla->xfer_lock);
+	return ret;
+}
+/* Interface specifies whether the write is to the interface or general
+ * registers.
+ */
+static int tabla_slim_write_device(struct tabla *tabla, unsigned short reg,
+				   int bytes, void *src, bool interface)
+{
+	int ret;
+	struct slim_ele_access msg;
+	msg.start_offset = TABLA_REGISTER_START_OFFSET + reg;
+	msg.num_bytes = bytes;
+	msg.comp = NULL;
+
+	mutex_lock(&tabla->xfer_lock);
+	if (interface)
+		ret = slim_change_val_element(tabla->slim_slave, &msg, src,
+			bytes);
+	else
+		ret = slim_change_val_element(tabla->slim, &msg, src, bytes);
+	if (ret)
+		pr_err("%s: Error, Tabla write failed\n", __func__);
+
+	mutex_unlock(&tabla->xfer_lock);
+	return ret;
+}
+
+static struct mfd_cell tabla_devs[] = {
+	{
+		.name = "tabla_codec",
+	},
+};
+
+static void tabla_bring_up(struct tabla *tabla)
+{
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 0x4);
+	tabla_reg_write(tabla, TABLA_A_CDC_CTL, 0);
+	usleep_range(5000, 5000);
+	tabla_reg_write(tabla, TABLA_A_CDC_CTL, 3);
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 3);
+}
+
+static void tabla_bring_down(struct tabla *tabla)
+{
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 0x7);
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 0x6);
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 0xe);
+	tabla_reg_write(tabla, TABLA_A_LEAKAGE_CTL, 0x8);
+}
+
+static int tabla_reset(struct tabla *tabla)
+{
+	int ret;
+	struct pm_gpio param = {
+		.direction      = PM_GPIO_DIR_OUT,
+		.output_buffer  = PM_GPIO_OUT_BUF_CMOS,
+		.output_value   = 1,
+		.pull	   = PM_GPIO_PULL_NO,
+		.vin_sel	= PM_GPIO_VIN_S4,
+		.out_strength   = PM_GPIO_STRENGTH_MED,
+		.function       = PM_GPIO_FUNC_NORMAL,
+	};
+
+	if (tabla->reset_gpio) {
+		ret = gpio_request(tabla->reset_gpio, "CDC_RESET");
+		if (ret) {
+			pr_err("%s: Failed to request gpio %d\n", __func__,
+				tabla->reset_gpio);
+			tabla->reset_gpio = 0;
+			return ret;
+		}
+
+		ret = pm8xxx_gpio_config(tabla->reset_gpio, &param);
+		if (ret)
+			pr_err("%s: Failed to configure gpio\n", __func__);
+
+		gpio_direction_output(tabla->reset_gpio, 1);
+		msleep(20);
+		gpio_direction_output(tabla->reset_gpio, 0);
+		msleep(20);
+		gpio_direction_output(tabla->reset_gpio, 1);
+		msleep(20);
+	}
+	return 0;
+}
+
+static void tabla_free_reset(struct tabla *tabla)
+{
+	if (tabla->reset_gpio) {
+		gpio_free(tabla->reset_gpio);
+		tabla->reset_gpio = 0;
+	}
+}
+
+struct tabla_regulator {
+	const char *name;
+	int min_uV;
+	int max_uV;
+	int optimum_uA;
+	struct regulator *regulator;
+};
+
+
+/*
+ *	format : TABLA_<POWER_SUPPLY_PIN_NAME>_CUR_MAX
+ *
+ *	<POWER_SUPPLY_PIN_NAME> from Tabla objective spec
+*/
+
+#define  TABLA_CDC_VDDA_CP_CUR_MAX	500000
+#define  TABLA_CDC_VDDA_RX_CUR_MAX	20000
+#define  TABLA_CDC_VDDA_TX_CUR_MAX	20000
+#define  TABLA_VDDIO_CDC_CUR_MAX	5000
+
+#define  TABLA_VDDD_CDC_D_CUR_MAX	5000
+#define  TABLA_VDDD_CDC_A_CUR_MAX	5000
+
+static struct tabla_regulator tabla_regulators[] = {
+	{
+		.name = "CDC_VDD_CP",
+		.min_uV = 1800000,
+		.max_uV = 1800000,
+		.optimum_uA = TABLA_CDC_VDDA_CP_CUR_MAX,
+	},
+	{
+		.name = "CDC_VDDA_RX",
+		.min_uV = 1800000,
+		.max_uV = 1800000,
+		.optimum_uA = TABLA_CDC_VDDA_RX_CUR_MAX,
+	},
+	{
+		.name = "CDC_VDDA_TX",
+		.min_uV = 1800000,
+		.max_uV = 1800000,
+		.optimum_uA = TABLA_CDC_VDDA_TX_CUR_MAX,
+	},
+	{
+		.name = "VDDIO_CDC",
+		.min_uV = 1800000,
+		.max_uV = 1800000,
+		.optimum_uA = TABLA_VDDIO_CDC_CUR_MAX,
+	},
+	{
+		.name = "VDDD_CDC_D",
+		.min_uV = 1225000,
+		.max_uV = 1225000,
+		.optimum_uA = TABLA_VDDD_CDC_D_CUR_MAX,
+	},
+	{
+		.name = "CDC_VDDA_A_1P2V",
+		.min_uV = 1225000,
+		.max_uV = 1225000,
+		.optimum_uA = TABLA_VDDD_CDC_A_CUR_MAX,
+	},
+};
+
+static int tabla_device_init(struct tabla *tabla, int irq)
+{
+	int ret;
+
+	mutex_init(&tabla->io_lock);
+	mutex_init(&tabla->xfer_lock);
+	dev_set_drvdata(tabla->dev, tabla);
+
+	tabla_bring_up(tabla);
+
+	ret = tabla_irq_init(tabla);
+	if (ret) {
+		pr_err("IRQ initialization failed\n");
+		goto err;
+	}
+
+	ret = mfd_add_devices(tabla->dev, -1,
+			      tabla_devs, ARRAY_SIZE(tabla_devs),
+			      NULL, 0);
+	if (ret != 0) {
+		dev_err(tabla->dev, "Failed to add children: %d\n", ret);
+		goto err_irq;
+	}
+
+	return ret;
+err_irq:
+	tabla_irq_exit(tabla);
+err:
+	tabla_bring_down(tabla);
+	mutex_destroy(&tabla->io_lock);
+	mutex_destroy(&tabla->xfer_lock);
+	return ret;
+}
+static void tabla_device_exit(struct tabla *tabla)
+{
+	tabla_irq_exit(tabla);
+	tabla_bring_down(tabla);
+	tabla_free_reset(tabla);
+	mutex_destroy(&tabla->io_lock);
+	mutex_destroy(&tabla->xfer_lock);
+	slim_remove_device(tabla->slim_slave);
+	kfree(tabla);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+struct tabla *debugTabla;
+
+static struct dentry *debugfs_tabla_dent;
+static struct dentry *debugfs_peek;
+static struct dentry *debugfs_poke;
+
+static unsigned char read_data;
+
+static int codec_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int get_parameters(char *buf, long int *param1, int num_of_par)
+{
+	char *token;
+	int base, cnt;
+
+	token = strsep(&buf, " ");
+
+	for (cnt = 0; cnt < num_of_par; cnt++) {
+		if (token != NULL) {
+			if ((token[1] == 'x') || (token[1] == 'X'))
+				base = 16;
+			else
+				base = 10;
+
+			if (strict_strtoul(token, base, &param1[cnt]) != 0)
+				return -EINVAL;
+
+			token = strsep(&buf, " ");
+		} else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t codec_debug_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char lbuf[8];
+
+	snprintf(lbuf, sizeof(lbuf), "0x%x\n", read_data);
+	return simple_read_from_buffer(ubuf, count, ppos, lbuf,
+		strnlen(lbuf, 7));
+}
+
+
+static ssize_t codec_debug_write(struct file *filp,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	char *access_str = filp->private_data;
+	char lbuf[32];
+	int rc;
+	long int param[5];
+
+	if (cnt > sizeof(lbuf) - 1)
+		return -EINVAL;
+
+	rc = copy_from_user(lbuf, ubuf, cnt);
+	if (rc)
+		return -EFAULT;
+
+	lbuf[cnt] = '\0';
+
+	if (!strncmp(access_str, "poke", 6)) {
+		/* write */
+		rc = get_parameters(lbuf, param, 2);
+		if ((param[0] <= 0x3FF) && (param[1] <= 0xFF) &&
+			(rc == 0))
+			tabla_interface_reg_write(debugTabla, param[0],
+				param[1]);
+		else
+			rc = -EINVAL;
+	} else if (!strncmp(access_str, "peek", 6)) {
+		/* read */
+		rc = get_parameters(lbuf, param, 1);
+		if ((param[0] <= 0x3FF) && (rc == 0))
+			read_data = tabla_interface_reg_read(debugTabla,
+				param[0]);
+		else
+			rc = -EINVAL;
+	}
+
+	if (rc == 0)
+		rc = cnt;
+	else
+		pr_err("%s: rc = %d\n", __func__, rc);
+
+	return rc;
+}
+
+static const struct file_operations codec_debug_ops = {
+	.open = codec_debug_open,
+	.write = codec_debug_write,
+	.read = codec_debug_read
+};
+#endif
+
+static int tabla_enable_supplies(struct tabla *tabla)
+{
+	int ret;
+	int i;
+
+	tabla->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
+				   ARRAY_SIZE(tabla_regulators),
+				   GFP_KERNEL);
+	if (!tabla->supplies) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(tabla_regulators); i++)
+		tabla->supplies[i].supply = tabla_regulators[i].name;
+
+	ret = regulator_bulk_get(tabla->dev, ARRAY_SIZE(tabla_regulators),
+				 tabla->supplies);
+	if (ret != 0) {
+		dev_err(tabla->dev, "Failed to get supplies: err = %d\n", ret);
+		goto err_supplies;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(tabla_regulators); i++) {
+		ret = regulator_set_voltage(tabla->supplies[i].consumer,
+			tabla_regulators[i].min_uV, tabla_regulators[i].max_uV);
+		if (ret) {
+			pr_err("%s: Setting regulator voltage failed for "
+				"regulator %s err = %d\n", __func__,
+				tabla->supplies[i].supply, ret);
+			goto err_get;
+		}
+
+		ret = regulator_set_optimum_mode(tabla->supplies[i].consumer,
+			tabla_regulators[i].optimum_uA);
+		if (ret < 0) {
+			pr_err("%s: Setting regulator optimum mode failed for "
+				"regulator %s err = %d\n", __func__,
+				tabla->supplies[i].supply, ret);
+			goto err_get;
+		}
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tabla_regulators),
+				    tabla->supplies);
+	if (ret != 0) {
+		dev_err(tabla->dev, "Failed to enable supplies: err = %d\n",
+				ret);
+		goto err_configure;
+	}
+	return ret;
+
+err_configure:
+	for (i = 0; i < ARRAY_SIZE(tabla_regulators); i++) {
+		regulator_set_voltage(tabla->supplies[i].consumer, 0,
+			tabla_regulators[i].max_uV);
+		regulator_set_optimum_mode(tabla->supplies[i].consumer, 0);
+	}
+err_get:
+	regulator_bulk_free(ARRAY_SIZE(tabla_regulators), tabla->supplies);
+err_supplies:
+	kfree(tabla->supplies);
+err:
+	return ret;
+}
+
+static void tabla_disable_supplies(struct tabla *tabla)
+{
+	int i;
+
+	regulator_bulk_disable(ARRAY_SIZE(tabla_regulators),
+				    tabla->supplies);
+	for (i = 0; i < ARRAY_SIZE(tabla_regulators); i++) {
+		regulator_set_voltage(tabla->supplies[i].consumer, 0,
+			tabla_regulators[i].max_uV);
+		regulator_set_optimum_mode(tabla->supplies[i].consumer, 0);
+	}
+	regulator_bulk_free(ARRAY_SIZE(tabla_regulators), tabla->supplies);
+	kfree(tabla->supplies);
+}
+
+static int tabla_slim_probe(struct slim_device *slim)
+{
+	struct tabla *tabla;
+	struct tabla_pdata *pdata;
+	int ret = 0;
+
+	pdata = slim->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&slim->dev, "Error, no platform data\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	tabla = kzalloc(sizeof(struct tabla), GFP_KERNEL);
+	if (tabla == NULL) {
+		pr_err("%s: error, allocation failed\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+	if (!slim->ctrl) {
+		pr_err("Error, no SLIMBUS control data\n");
+		ret = -EINVAL;
+		goto err_tabla;
+	}
+	tabla->slim = slim;
+	slim_set_clientdata(slim, tabla);
+	tabla->reset_gpio = pdata->reset_gpio;
+	tabla->dev = &slim->dev;
+
+	ret = tabla_enable_supplies(tabla);
+	if (ret) {
+		pr_info("%s: Fail to enable Tabla supplies\n", __func__);
+		goto err_tabla;
+	}
+	usleep_range(5, 5);
+
+	ret = tabla_reset(tabla);
+	if (ret) {
+		pr_err("%s: Resetting Tabla failed\n", __func__);
+		goto err_supplies;
+	}
+
+	ret = slim_get_logical_addr(tabla->slim, tabla->slim->e_addr,
+		ARRAY_SIZE(tabla->slim->e_addr), &tabla->slim->laddr);
+	if (ret) {
+		pr_err("fail to get slimbus logical address %d\n", ret);
+		goto err_reset;
+	}
+	tabla->read_dev = tabla_slim_read_device;
+	tabla->write_dev = tabla_slim_write_device;
+	tabla->irq = pdata->irq;
+	tabla->irq_base = pdata->irq_base;
+	tabla_pgd_la = tabla->slim->laddr;
+
+	if (pdata->num_irqs < TABLA_NUM_IRQS) {
+		pr_err("%s: Error, not enough interrupt lines allocated\n",
+			__func__);
+		goto err_reset;
+	}
+
+	tabla->slim_slave = &pdata->slimbus_slave_device;
+
+	ret = slim_add_device(slim->ctrl, tabla->slim_slave);
+	if (ret) {
+		pr_err("%s: error, adding SLIMBUS device failed\n", __func__);
+		goto err_reset;
+	}
+
+	ret = slim_get_logical_addr(tabla->slim_slave,
+		tabla->slim_slave->e_addr,
+		ARRAY_SIZE(tabla->slim_slave->e_addr),
+			&tabla->slim_slave->laddr);
+	if (ret) {
+		pr_err("fail to get slimbus slave logical address %d\n", ret);
+		goto err_slim_add;
+	}
+	tabla_inf_la = tabla->slim_slave->laddr;
+
+	ret = tabla_device_init(tabla, tabla->irq);
+	if (ret) {
+		pr_err("%s: error, initializing device failed\n", __func__);
+		goto err_slim_add;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	debugTabla = tabla;
+
+	debugfs_tabla_dent = debugfs_create_dir
+		("wcd9310_slimbus_interface_device", 0);
+	if (!IS_ERR(debugfs_tabla_dent)) {
+		debugfs_peek = debugfs_create_file("peek",
+		S_IFREG | S_IRUGO, debugfs_tabla_dent,
+		(void *) "peek", &codec_debug_ops);
+
+		debugfs_poke = debugfs_create_file("poke",
+		S_IFREG | S_IRUGO, debugfs_tabla_dent,
+		(void *) "poke", &codec_debug_ops);
+	}
+#endif
+
+
+	return ret;
+
+err_slim_add:
+	slim_remove_device(tabla->slim_slave);
+err_reset:
+	tabla_free_reset(tabla);
+err_supplies:
+	tabla_disable_supplies(tabla);
+err_tabla:
+	kfree(tabla);
+err:
+	return ret;
+}
+static int tabla_slim_remove(struct slim_device *pdev)
+{
+	struct tabla *tabla;
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(debugfs_peek);
+	debugfs_remove(debugfs_poke);
+	debugfs_remove(debugfs_tabla_dent);
+#endif
+
+	tabla = slim_get_devicedata(pdev);
+	tabla_device_exit(tabla);
+	tabla_disable_supplies(tabla);
+	kfree(tabla);
+
+	return 0;
+}
+static const struct slim_device_id slimtest_id[] = {
+	{"tabla-slim", 0},
+	{}
+};
+static struct slim_driver tabla_slim_driver = {
+	.driver = {
+		.name = "tabla-slim",
+		.owner = THIS_MODULE,
+	},
+	.probe = tabla_slim_probe,
+	.remove = tabla_slim_remove,
+	.id_table = slimtest_id,
+};
+static int __init tabla_init(void)
+{
+	int ret;
+
+	ret = slim_driver_register(&tabla_slim_driver);
+	if (ret != 0) {
+		pr_err("Failed to register tabla SB driver: %d\n", ret);
+		goto err;
+	}
+err:
+	return ret;
+}
+module_init(tabla_init);
+
+static void __exit tabla_exit(void)
+{
+}
+module_exit(tabla_exit);
+
+MODULE_DESCRIPTION("Tabla core driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd9310-irq.c b/drivers/mfd/wcd9310-irq.c
new file mode 100644
index 0000000..bc7841e
--- /dev/null
+++ b/drivers/mfd/wcd9310-irq.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wcd9310/core.h>
+#include <linux/mfd/wcd9310/registers.h>
+#include <linux/interrupt.h>
+
+#define BYTE_BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_BYTE))
+#define BIT_BYTE(nr)			((nr) / BITS_PER_BYTE)
+
+struct tabla_irq {
+	bool level;
+};
+
+static struct tabla_irq tabla_irqs[TABLA_NUM_IRQS] = {
+	[0] = { .level = 1},
+/* All other tabla interrupts are edge triggered */
+};
+
+static inline int irq_to_tabla_irq(struct tabla *tabla, int irq)
+{
+	return irq - tabla->irq_base;
+}
+
+static void tabla_irq_lock(struct irq_data *data)
+{
+	struct tabla *tabla = irq_data_get_irq_chip_data(data);
+	mutex_lock(&tabla->irq_lock);
+}
+
+static void tabla_irq_sync_unlock(struct irq_data *data)
+{
+	struct tabla *tabla = irq_data_get_irq_chip_data(data);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tabla->irq_masks_cur); i++) {
+		/* If there's been a change in the mask write it back
+		 * to the hardware.
+		 */
+		if (tabla->irq_masks_cur[i] != tabla->irq_masks_cache[i]) {
+			tabla->irq_masks_cache[i] = tabla->irq_masks_cur[i];
+			tabla_reg_write(tabla, TABLA_A_INTR_MASK0+i,
+				tabla->irq_masks_cur[i]);
+		}
+	}
+
+	mutex_unlock(&tabla->irq_lock);
+}
+
+static void tabla_irq_enable(struct irq_data *data)
+{
+	struct tabla *tabla = irq_data_get_irq_chip_data(data);
+	int tabla_irq = irq_to_tabla_irq(tabla, data->irq);
+	tabla->irq_masks_cur[BIT_BYTE(tabla_irq)] &=
+		~(BYTE_BIT_MASK(tabla_irq));
+}
+
+static void tabla_irq_disable(struct irq_data *data)
+{
+	struct tabla *tabla = irq_data_get_irq_chip_data(data);
+	int tabla_irq = irq_to_tabla_irq(tabla, data->irq);
+	tabla->irq_masks_cur[BIT_BYTE(tabla_irq)] |= BYTE_BIT_MASK(tabla_irq);
+}
+
+static struct irq_chip tabla_irq_chip = {
+	.name = "tabla",
+	.irq_bus_lock = tabla_irq_lock,
+	.irq_bus_sync_unlock = tabla_irq_sync_unlock,
+	.irq_disable = tabla_irq_disable,
+	.irq_enable = tabla_irq_enable,
+};
+
+static irqreturn_t tabla_irq_thread(int irq, void *data)
+{
+	int ret;
+	struct tabla *tabla = data;
+	u8 status[TABLA_NUM_IRQ_REGS];
+	unsigned int i;
+
+	ret = tabla_bulk_read(tabla, TABLA_A_INTR_STATUS0,
+			       TABLA_NUM_IRQ_REGS, status);
+	if (ret < 0) {
+		dev_err(tabla->dev, "Failed to read interrupt status: %d\n",
+			ret);
+		return IRQ_NONE;
+	}
+	/* Apply masking */
+	for (i = 0; i < TABLA_NUM_IRQ_REGS; i++)
+		status[i] &= ~tabla->irq_masks_cur[i];
+
+	/* Find out which interrupt was triggered and call that interrupt's
+	 * handler function
+	 */
+	for (i = 0; i < TABLA_NUM_IRQS; i++) {
+		if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) {
+			if ((i <= TABLA_IRQ_MBHC_INSERTION) &&
+				(i >= TABLA_IRQ_MBHC_REMOVAL)) {
+				tabla_reg_write(tabla, TABLA_A_INTR_CLEAR0 +
+					BIT_BYTE(i), BYTE_BIT_MASK(i));
+				handle_nested_irq(tabla->irq_base + i);
+			} else {
+				handle_nested_irq(tabla->irq_base + i);
+				tabla_reg_write(tabla, TABLA_A_INTR_CLEAR0 +
+					BIT_BYTE(i), BYTE_BIT_MASK(i));
+			}
+			break;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int tabla_irq_init(struct tabla *tabla)
+{
+	int ret;
+	unsigned int i, cur_irq;
+
+	mutex_init(&tabla->irq_lock);
+
+	if (!tabla->irq) {
+		dev_warn(tabla->dev,
+			 "No interrupt specified, no interrupts\n");
+		tabla->irq_base = 0;
+		return 0;
+	}
+
+	if (!tabla->irq_base) {
+		dev_err(tabla->dev,
+			"No interrupt base specified, no interrupts\n");
+		return 0;
+	}
+	/* Mask the individual interrupt sources */
+	for (i = 0, cur_irq = tabla->irq_base; i < TABLA_NUM_IRQS; i++,
+		cur_irq++) {
+
+		irq_set_chip_data(cur_irq, tabla);
+
+		if (tabla_irqs[i].level)
+			irq_set_chip_and_handler(cur_irq, &tabla_irq_chip,
+					 handle_level_irq);
+		else
+			irq_set_chip_and_handler(cur_irq, &tabla_irq_chip,
+					 handle_edge_irq);
+
+		irq_set_nested_thread(cur_irq, 1);
+
+		/* ARM needs us to explicitly flag the IRQ as valid
+		 * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+		set_irq_flags(cur_irq, IRQF_VALID);
+#else
+		set_irq_noprobe(cur_irq);
+#endif
+
+		tabla->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
+		tabla->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
+		tabla->irq_level[BIT_BYTE(i)] |= tabla_irqs[i].level <<
+			(i % BITS_PER_BYTE);
+	}
+	for (i = 0; i < TABLA_NUM_IRQ_REGS; i++) {
+		/* Initialize interrupt mask and level registers */
+		tabla_reg_write(tabla, TABLA_A_INTR_LEVEL0 + i,
+			tabla->irq_level[i]);
+		tabla_reg_write(tabla, TABLA_A_INTR_MASK0 + i,
+			tabla->irq_masks_cur[i]);
+	}
+
+	ret = request_threaded_irq(tabla->irq, NULL, tabla_irq_thread,
+				   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				   "tabla", tabla);
+
+	if (ret != 0) {
+		dev_err(tabla->dev, "Failed to request IRQ %d: %d\n",
+			tabla->irq, ret);
+		return ret;
+	}
+	return 0;
+}
+void tabla_irq_exit(struct tabla *tabla)
+{
+	if (tabla->irq)
+		free_irq(tabla->irq, tabla);
+	mutex_destroy(&tabla->irq_lock);
+}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 68f3671..1b1c6e6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -535,6 +535,124 @@
 	 If your platform uses a different flash partition label for storing
  	 crashdumps, enter it here.
 
+config TSIF
+	depends on ARCH_MSM
+	tristate "TSIF (Transport Stream InterFace) support"
+	default n
+	---help---
+	  This driver supports low level TSIF interface. It provides API
+	  for upper layer drivers. If you have a TSIF hardware, say
+	  Y here and read <file:Documentation/arm/msm/tsif.txt>.
+
+	  To compile this driver as module, choose M here: the
+	  module will be called msm_tsif.
+
+config TSIF_CHRDEV
+	tristate "TSIF character device"
+	depends on TSIF
+	default n
+	---help---
+	  This driver uses low level TSIF interface. It provides character
+	  device useable from user space programs: one can read TSIF stream
+	  from this device.
+
+	  This driver may be used as example for TSIF API usage.
+
+	  To compile this driver as module, choose M here: the
+	  module will be called tsif_chrdev.
+
+config TSIF_DEBUG
+	bool "Turn on debugging information for tsif driver"
+	depends on TSIF
+	default n
+	---help---
+	  This turns on debugging information for the tsif driver
+
+config HAPTIC_ISA1200
+	tristate "ISA1200 haptic support"
+	depends on I2C
+	default n
+	help
+	  The ISA1200 is a high performance enhanced haptic driver.
+
+config PMIC8058_PWM
+	tristate "Qualcomm PM8058 PWM support"
+	depends on PMIC8058
+	default y
+	help
+	  This option enables device driver support for the PWM channels
+	  on Qualcomm PM8058 chip. Pulse Width Modulation is used for
+	  purposes including software controlled brightness of backlight,
+	  motor control, and waveform generation.
+
+config PMIC8058_VIBRATOR
+	tristate "Qualcomm PM8058 vibrator support"
+	depends on PMIC8058 && ANDROID_TIMED_OUTPUT
+	default n
+	help
+	  This option enables device driver support for the vibrator
+	  on Qualcomm PM8058 chip.
+
+config PMIC8058_NFC
+	tristate "Qualcomm PM8058 support for Near Field Communication"
+	depends on PMIC8058
+	default y
+	help
+	  Qualcomm PM8058 chip has a module to support NFC (Near Field
+	  Communication). This option enables the driver to support it.
+
+config PMIC8058_UPL
+	tristate "Qualcomm PM8058 support for User Programmable Logic"
+	depends on PMIC8058
+	default n
+	help
+	  This option enables device driver support for User Programmable Logic
+	  on Qualcomm PM8058 chip.  The UPL module provides a means to implement
+	  simple truth table based logic via a set of control registers. I/O may
+	  be routed in and out of the UPL module via GPIO or DTEST pins.
+
+config PMIC8058_XOADC
+	tristate "Qualcomm PM8058 XOADC driver"
+	depends on PMIC8058
+	default n
+	help
+	  Enables User processor ADC reads over the XOADC module of Qualcomm's
+	  PMIC8058. Driver interface to program registers of the ADC over
+	  AMUX channels, devices on programmable MPP's and xotherm.
+
+config PMIC8058_MISC
+	tristate "Qualcomm PM8058 Misc Device driver"
+	depends on PMIC8058
+	default n
+	help
+	  Provides functionality for various small drivers utilizing the
+	  Qualcomm PM8058 chip.  Examples include: signalling when the 32kHz
+	  oscillator malfunctions.
+
+config PMIC8058_BATTALARM
+	tristate "Qualcomm PM8058 Battery Alarm Device driver"
+	depends on PMIC8058
+	help
+	  This option enables support for the battery alarm module on the
+	  Qualcomm PM8058 PMIC chip.  This support allows for configuration of
+	  the alarm module as well as interrupt handling.
+
+config TZCOM
+	tristate "Trustzone Communicator driver"
+	default n
+	help
+	  Provides a communication interface between userspace and
+	  TrustZone Operating Environment (TZBSP) using Secure Channel
+	  Manager (SCM) interface.
+
+config QFP_FUSE
+	tristate "QFPROM Fuse Read/Write support"
+	help
+	  This option enables device driver to read/write QFPROM
+	  fuses. The ioctls provides the necessary interface
+	  to the fuse block. Currently this is supported only
+	  on FSM targets.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2d43048..1795ecf 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -52,3 +52,17 @@
 obj-$(CONFIG_WL127X_RFKILL)	+= wl127x-rfkill.o
 obj-$(CONFIG_APANIC)		+= apanic.o
 obj-$(CONFIG_SENSORS_AK8975)	+= akm8975.o
+obj-$(CONFIG_TSIF) += msm_tsif.o
+msm_tsif-objs := tsif.o
+obj-$(CONFIG_TSIF_CHRDEV) += tsif_chrdev.o
+obj-$(CONFIG_HAPTIC_ISA1200)		+= isa1200.o
+obj-$(CONFIG_PMIC8058_PWM) += pmic8058-pwm.o
+obj-$(CONFIG_PMIC8058_VIBRATOR) += pmic8058-vibrator.o
+obj-$(CONFIG_PMIC8058_NFC) += pmic8058-nfc.o
+obj-$(CONFIG_PMIC8058_UPL) += pmic8058-upl.o
+obj-$(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN) \
+	+= msm_migrate_pages.o
+obj-$(CONFIG_PMIC8058_XOADC) += pmic8058-xoadc.o
+obj-$(CONFIG_PMIC8058_MISC) += pmic8058-misc.o
+obj-$(CONFIG_PMIC8058_BATTALARM) += pmic8058-batt-alarm.o
+obj-$(CONFIG_TZCOM) += tzcom.o
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
new file mode 100644
index 0000000..bb3f9a8
--- /dev/null
+++ b/drivers/misc/isa1200.c
@@ -0,0 +1,440 @@
+/*
+ *  isa1200.c - Haptic Motor
+ *
+ *  Copyright (C) 2009 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/i2c/isa1200.h>
+#include "../staging/android/timed_output.h"
+
+#define ISA1200_HCTRL0		0x30
+#define ISA1200_HCTRL1		0x31
+#define ISA1200_HCTRL5		0x35
+
+#define ISA1200_HCTRL0_RESET	0x01
+#define ISA1200_HCTRL1_RESET	0x4B
+
+#define ISA1200_HCTRL5_VIB_STRT	0xD5
+#define ISA1200_HCTRL5_VIB_STOP	0x6B
+
+struct isa1200_chip {
+	struct i2c_client *client;
+	struct isa1200_platform_data *pdata;
+	struct pwm_device *pwm;
+	struct hrtimer timer;
+	struct timed_output_dev dev;
+	struct work_struct work;
+	spinlock_t lock;
+	unsigned int enable;
+	unsigned int period_ns;
+};
+
+static int isa1200_read_reg(struct i2c_client *client, int reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static int isa1200_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, value);
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static void isa1200_vib_set(struct isa1200_chip *haptic, int enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+			int period_us = haptic->period_ns / 1000;
+			rc = pwm_config(haptic->pwm,
+				(period_us * haptic->pdata->duty) / 100,
+				period_us);
+			if (rc < 0)
+				pr_err("%s: pwm_config fail\n", __func__);
+			rc = pwm_enable(haptic->pwm);
+			if (rc < 0)
+				pr_err("%s: pwm_enable fail\n", __func__);
+		} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			rc = isa1200_write_reg(haptic->client,
+						ISA1200_HCTRL5,
+						ISA1200_HCTRL5_VIB_STRT);
+			if (rc < 0)
+				pr_err("%s: start vibartion fail\n", __func__);
+		}
+	} else {
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+			pwm_disable(haptic->pwm);
+		else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			rc = isa1200_write_reg(haptic->client,
+						ISA1200_HCTRL5,
+						ISA1200_HCTRL5_VIB_STOP);
+			if (rc < 0)
+				pr_err("%s: stop vibartion fail\n", __func__);
+		}
+	}
+}
+
+static void isa1200_chip_work(struct work_struct *work)
+{
+	struct isa1200_chip *haptic;
+
+	haptic = container_of(work, struct isa1200_chip, work);
+	isa1200_vib_set(haptic, haptic->enable);
+}
+
+static void isa1200_chip_enable(struct timed_output_dev *dev, int value)
+{
+	struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+					dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&haptic->lock, flags);
+	hrtimer_cancel(&haptic->timer);
+	if (value == 0)
+		haptic->enable = 0;
+	else {
+		value = (value > haptic->pdata->max_timeout ?
+				haptic->pdata->max_timeout : value);
+		haptic->enable = 1;
+		hrtimer_start(&haptic->timer,
+			ktime_set(value / 1000, (value % 1000) * 1000000),
+			HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&haptic->lock, flags);
+	schedule_work(&haptic->work);
+}
+
+static int isa1200_chip_get_time(struct timed_output_dev *dev)
+{
+	struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+					dev);
+
+	if (hrtimer_active(&haptic->timer)) {
+		ktime_t r = hrtimer_get_remaining(&haptic->timer);
+		struct timeval t = ktime_to_timeval(r);
+		return t.tv_sec * 1000 + t.tv_usec / 1000;
+	} else
+		return 0;
+}
+
+static enum hrtimer_restart isa1200_vib_timer_func(struct hrtimer *timer)
+{
+	struct isa1200_chip *haptic = container_of(timer, struct isa1200_chip,
+					timer);
+	haptic->enable = 0;
+	schedule_work(&haptic->work);
+
+	return HRTIMER_NORESTART;
+}
+
+static void dump_isa1200_reg(char *str, struct i2c_client *client)
+{
+	pr_debug("%s reg0x%x=0x%x, reg0x%x=0x%x, reg0x%x=0x%x\n", str,
+		ISA1200_HCTRL0, isa1200_read_reg(client, ISA1200_HCTRL0),
+		ISA1200_HCTRL1, isa1200_read_reg(client, ISA1200_HCTRL1),
+		ISA1200_HCTRL5, isa1200_read_reg(client, ISA1200_HCTRL5));
+}
+
+static int isa1200_setup(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int value, temp, rc;
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	udelay(250);
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+
+	value =	(haptic->pdata->smart_en << 3) |
+		(haptic->pdata->is_erm << 5) |
+		(haptic->pdata->ext_clk_en << 7);
+
+	rc = isa1200_write_reg(client, ISA1200_HCTRL1, value);
+	if (rc < 0) {
+		pr_err("%s: i2c write failure\n", __func__);
+		return rc;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_div;
+		if (temp < 128 || temp > 1024 || temp % 128) {
+			pr_err("%s: Invalid divider\n", __func__);
+			goto reset_hctrl1;
+		}
+		value = ((temp >> 7) - 1);
+	} else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_freq;
+		if (temp < 22400 || temp > 172600 || temp % 22400) {
+			pr_err("%s: Invalid frequency\n", __func__);
+			goto reset_hctrl1;
+		}
+		value = ((temp / 22400) - 1);
+		haptic->period_ns = NSEC_PER_SEC / temp;
+	}
+
+	value |= (haptic->pdata->mode_ctrl << 3) |
+		(haptic->pdata->overdrive_high << 5) |
+		(haptic->pdata->overdrive_en << 5) |
+		(haptic->pdata->chip_en << 7);
+
+	rc = isa1200_write_reg(client, ISA1200_HCTRL0, value);
+	if (rc < 0) {
+		pr_err("%s: i2c write failure\n", __func__);
+		goto reset_hctrl1;
+	}
+
+	dump_isa1200_reg("new:", client);
+	return 0;
+
+reset_hctrl1:
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				ISA1200_HCTRL1_RESET);
+	return rc;
+}
+
+static int __devinit isa1200_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct isa1200_chip *haptic;
+	struct isa1200_platform_data *pdata;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev, "%s: no support for i2c read/write"
+				"byte data\n", __func__);
+		return -EIO;
+	}
+
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "%s: no platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pdata->dev_setup) {
+		ret = pdata->dev_setup(true);
+		if (ret < 0) {
+			dev_err(&client->dev, "dev setup failed\n");
+			return -EINVAL;
+		}
+	}
+
+	haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL);
+	if (!haptic) {
+		ret = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+	haptic->client = client;
+	haptic->enable = 0;
+	haptic->pdata = pdata;
+
+	if (pdata->power_on) {
+		ret = pdata->power_on(1);
+		if (ret) {
+			dev_err(&client->dev, "%s: power-up failed\n",
+							__func__);
+			goto pwr_up_fail;
+		}
+	}
+
+	spin_lock_init(&haptic->lock);
+	INIT_WORK(&haptic->work, isa1200_chip_work);
+
+	hrtimer_init(&haptic->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	haptic->timer.function = isa1200_vib_timer_func;
+
+	/*register with timed output class*/
+	haptic->dev.name = pdata->name;
+	haptic->dev.get_time = isa1200_chip_get_time;
+	haptic->dev.enable = isa1200_chip_enable;
+	ret = timed_output_dev_register(&haptic->dev);
+	if (ret < 0)
+		goto timed_reg_fail;
+
+	i2c_set_clientdata(client, haptic);
+
+	ret = gpio_is_valid(pdata->hap_en_gpio);
+	if (ret) {
+		ret = gpio_request(pdata->hap_en_gpio, "haptic_gpio");
+		if (ret) {
+			dev_err(&client->dev, "%s: gpio %d request failed\n",
+					__func__, pdata->hap_en_gpio);
+			goto gpio_fail;
+		}
+	} else {
+		dev_err(&client->dev, "%s: Invalid gpio %d\n", __func__,
+					pdata->hap_en_gpio);
+		goto gpio_fail;
+	}
+
+	ret = isa1200_setup(client);
+	if (ret) {
+		dev_err(&client->dev, "%s: setup fail %d\n", __func__, ret);
+		goto gpio_fail;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		haptic->pwm = pwm_request(pdata->pwm_ch_id, id->name);
+		if (IS_ERR(haptic->pwm)) {
+			dev_err(&client->dev, "%s: pwm request failed\n",
+							__func__);
+			ret = PTR_ERR(haptic->pwm);
+			goto reset_hctrl0;
+		}
+	}
+
+	printk(KERN_INFO "%s: %s registered\n", __func__, id->name);
+	return 0;
+
+reset_hctrl0:
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+					ISA1200_HCTRL0_RESET);
+gpio_fail:
+	timed_output_dev_unregister(&haptic->dev);
+timed_reg_fail:
+	if (pdata->power_on)
+		pdata->power_on(0);
+pwr_up_fail:
+	kfree(haptic);
+mem_alloc_fail:
+	if (pdata->dev_setup)
+		pdata->dev_setup(false);
+	return ret;
+}
+
+static int __devexit isa1200_remove(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+
+	hrtimer_cancel(&haptic->timer);
+	cancel_work_sync(&haptic->work);
+
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+		pwm_free(haptic->pwm);
+
+	timed_output_dev_unregister(&haptic->dev);
+	gpio_free(haptic->pdata->hap_en_gpio);
+
+	/* reset hardware registers */
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+				ISA1200_HCTRL0_RESET);
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				ISA1200_HCTRL1_RESET);
+
+	if (haptic->pdata->dev_setup)
+		haptic->pdata->dev_setup(false);
+
+	/* power-off the chip */
+	if (haptic->pdata->power_on)
+		haptic->pdata->power_on(0);
+
+	kfree(haptic);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int isa1200_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int ret;
+
+	hrtimer_cancel(&haptic->timer);
+	cancel_work_sync(&haptic->work);
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	if (haptic->pdata->power_on) {
+		ret = haptic->pdata->power_on(0);
+		if (ret) {
+			dev_err(&client->dev, "power-down failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int isa1200_resume(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int ret;
+
+	if (haptic->pdata->power_on) {
+		ret = haptic->pdata->power_on(1);
+		if (ret) {
+			dev_err(&client->dev, "power-up failed\n");
+			return ret;
+		}
+	}
+
+	isa1200_setup(client);
+	return 0;
+}
+#else
+#define isa1200_suspend		NULL
+#define isa1200_resume		NULL
+#endif
+
+static const struct i2c_device_id isa1200_id[] = {
+	{ "isa1200_1", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, isa1200_id);
+
+static struct i2c_driver isa1200_driver = {
+	.driver	= {
+		.name	= "isa1200",
+	},
+	.probe		= isa1200_probe,
+	.remove		= __devexit_p(isa1200_remove),
+	.suspend	= isa1200_suspend,
+	.resume		= isa1200_resume,
+	.id_table	= isa1200_id,
+};
+
+static int __init isa1200_init(void)
+{
+	return i2c_add_driver(&isa1200_driver);
+}
+
+static void __exit isa1200_exit(void)
+{
+	i2c_del_driver(&isa1200_driver);
+}
+
+module_init(isa1200_init);
+module_exit(isa1200_exit);
+
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("ISA1200 Haptic Motor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/msm_migrate_pages.c b/drivers/misc/msm_migrate_pages.c
new file mode 100644
index 0000000..df7af5f
--- /dev/null
+++ b/drivers/misc/msm_migrate_pages.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <mach/msm_migrate_pages.h>
+
+static unsigned long unstable_memory_state;
+
+unsigned long get_msm_migrate_pages_status(void)
+{
+	return unstable_memory_state;
+}
+EXPORT_SYMBOL(get_msm_migrate_pages_status);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int migrate_pages_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	int ret = 0;
+
+	switch (action) {
+	case MEM_ONLINE:
+		unstable_memory_state = action;
+		break;
+	case MEM_OFFLINE:
+		unstable_memory_state = action;
+		break;
+	case MEM_GOING_OFFLINE:
+	case MEM_GOING_ONLINE:
+	case MEM_CANCEL_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	}
+	return ret;
+}
+#endif
+
+static int __devinit msm_migrate_pages_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+	hotplug_memory_notifier(migrate_pages_callback, 0);
+#endif
+	unstable_memory_state = 0;
+	return 0;
+}
+
+static struct platform_driver msm_migrate_pages_driver = {
+	.probe = msm_migrate_pages_probe,
+	.driver = {
+		.name = "msm_migrate_pages",
+	},
+};
+
+static int __init msm_migrate_pages_init(void)
+{
+	return platform_driver_register(&msm_migrate_pages_driver);
+}
+
+static void __exit msm_migrate_pages_exit(void)
+{
+	platform_driver_unregister(&msm_migrate_pages_driver);
+}
+
+module_init(msm_migrate_pages_init);
+module_exit(msm_migrate_pages_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Get Status of Unstable Memory Region");
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
index abb73c1..5063551 100644
--- a/drivers/misc/pmem.c
+++ b/drivers/misc/pmem.c
@@ -1,6 +1,7 @@
 /* drivers/android/pmem.c
  *
  * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,20 +20,34 @@
 #include <linux/file.h>
 #include <linux/mm.h>
 #include <linux/list.h>
-#include <linux/mutex.h>
 #include <linux/debugfs.h>
 #include <linux/android_pmem.h>
 #include <linux/mempolicy.h>
-#include <linux/sched.h>
+#include <linux/kobject.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
+#include <asm/sizes.h>
+#include <linux/pm_runtime.h>
+#include <linux/memory_alloc.h>
 
-#define PMEM_MAX_DEVICES 10
-#define PMEM_MAX_ORDER 128
+#define PMEM_MAX_DEVICES (10)
+
+#define PMEM_MAX_ORDER (128)
 #define PMEM_MIN_ALLOC PAGE_SIZE
 
+#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64)
+
+#define PMEM_32BIT_WORD_ORDER (5)
+#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1)
+
+#ifdef CONFIG_ANDROID_PMEM_DEBUG
 #define PMEM_DEBUG 1
+#else
+#define PMEM_DEBUG 0
+#endif
+
+#define SYSTEM_ALLOC_RETRY 10
 
 /* indicates that a refernce to this file has been taken via get_pmem_file,
  * the file should not be released until put_pmem_file is called */
@@ -50,7 +65,6 @@
 #define PMEM_FLAGS_SUBMAP 0x1 << 3
 #define PMEM_FLAGS_UNSUBMAP 0x1 << 4
 
-
 struct pmem_data {
 	/* in alloc mode: an index into the bitmap
 	 * in no_alloc mode: the size of the allocation */
@@ -93,13 +107,28 @@
 #define PMEM_DEBUG_MSGS 0
 #if PMEM_DEBUG_MSGS
 #define DLOG(fmt,args...) \
-	do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+	do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
 		    ##args); } \
 	while (0)
 #else
 #define DLOG(x...) do {} while (0)
 #endif
 
+enum pmem_align {
+	PMEM_ALIGN_4K,
+	PMEM_ALIGN_1M,
+};
+
+#define PMEM_NAME_SIZE 16
+
+struct alloc_list {
+	void *addr;                  /* physical addr of allocation */
+	void *aaddr;                 /* aligned physical addr       */
+	unsigned int size;           /* total size of allocation    */
+	unsigned char __iomem *vaddr; /* Virtual addr                */
+	struct list_head allocs;
+};
+
 struct pmem_info {
 	struct miscdevice dev;
 	/* physical start address of the remaped pmem space */
@@ -112,62 +141,113 @@
 	unsigned long num_entries;
 	/* pfn of the garbage page in memory */
 	unsigned long garbage_pfn;
+	/* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
+	unsigned memory_type;
+
+	char name[PMEM_NAME_SIZE];
+
 	/* index of the garbage page in the pmem space */
 	int garbage_index;
-	/* the bitmap for the region indicating which entries are allocated
-	 * and which are free */
-	struct pmem_bits *bitmap;
-	/* indicates the region should not be managed with an allocator */
-	unsigned no_allocator;
+
+	enum pmem_allocator_type allocator_type;
+
+	int (*allocate)(const int,
+			const unsigned long,
+			const unsigned int);
+	int (*free)(int, int);
+	int (*free_space)(int, struct pmem_freespace *);
+	unsigned long (*len)(int, struct pmem_data *);
+	unsigned long (*start_addr)(int, struct pmem_data *);
+
+	/* actual size of memory element, e.g.: (4 << 10) is 4K */
+	unsigned int quantum;
+
 	/* indicates maps of this region should be cached, if a mix of
 	 * cached and uncached is desired, set this and open the device with
 	 * O_SYNC to get an uncached region */
 	unsigned cached;
 	unsigned buffered;
-	/* in no_allocator mode the first mapper gets the whole space and sets
-	 * this flag */
-	unsigned allocated;
+	union {
+		struct {
+			/* in all_or_nothing allocator mode the first mapper
+			 * gets the whole space and sets this flag */
+			unsigned allocated;
+		} all_or_nothing;
+
+		struct {
+			/* the buddy allocator bitmap for the region
+			 * indicating which entries are allocated and which
+			 * are free.
+			 */
+
+			struct pmem_bits *buddy_bitmap;
+		} buddy_bestfit;
+
+		struct {
+			unsigned int bitmap_free; /* # of zero bits/quanta */
+			uint32_t *bitmap;
+			int32_t bitmap_allocs;
+			struct {
+				short bit;
+				unsigned short quanta;
+			} *bitm_alloc;
+		} bitmap;
+
+		struct {
+			unsigned long used;      /* Bytes currently allocated */
+			struct list_head alist;  /* List of allocations       */
+		} system_mem;
+	} allocator;
+
+	int id;
+	struct kobject kobj;
+
 	/* for debugging, creates a list of pmem file structs, the
-	 * data_list_lock should be taken before pmem_data->sem if both are
+	 * data_list_mutex should be taken before pmem_data->sem if both are
 	 * needed */
-	struct mutex data_list_lock;
+	struct mutex data_list_mutex;
 	struct list_head data_list;
-	/* pmem_sem protects the bitmap array
-	 * a write lock should be held when modifying entries in bitmap
-	 * a read lock should be held when reading data from bits or
-	 * dereferencing a pointer into bitmap
-	 *
-	 * pmem_data->sem protects the pmem data of a particular file
-	 * Many of the function that require the pmem_data->sem have a non-
-	 * locking version for when the caller is already holding that sem.
+	/* arena_mutex protects the global allocation arena
 	 *
 	 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
-	 * down(pmem_data->sem) => down(bitmap_sem)
+	 * down(pmem_data->sem) => mutex_lock(arena_mutex)
 	 */
-	struct rw_semaphore bitmap_sem;
+	struct mutex arena_mutex;
 
 	long (*ioctl)(struct file *, unsigned int, unsigned long);
 	int (*release)(struct inode *, struct file *);
 };
+#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
 
 static struct pmem_info pmem[PMEM_MAX_DEVICES];
 static int id_count;
 
-#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
-#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
-#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
-#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
-#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
-#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
-#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
-#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
-	PMEM_LEN(id, index))
-#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
-#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
-	PMEM_LEN(id, index))
+#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */
+static struct kset *pmem_kset;
+
+#define PMEM_IS_FREE_BUDDY(id, index) \
+	(!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated))
+#define PMEM_BUDDY_ORDER(id, index) \
+	(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order)
+#define PMEM_BUDDY_INDEX(id, index) \
+	(index ^ (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_BUDDY_NEXT_INDEX(id, index) \
+	(index + (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * pmem[id].quantum)
+#define PMEM_START_ADDR(id, index) \
+	(PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_BUDDY_LEN(id, index) \
+	((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum)
+#define PMEM_END_ADDR(id, index) \
+	(PMEM_START_ADDR(id, index) + PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) \
+	(PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) \
+	(PMEM_START_VADDR(id, index) + PMEM_LEN(id, index))
 #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
 #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
-#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+#define PMEM_IS_SUBMAP(data) \
+	((data->flags & PMEM_FLAGS_SUBMAP) && \
 	(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
 
 static int pmem_release(struct inode *, struct file *);
@@ -182,79 +262,361 @@
 	.unlocked_ioctl = pmem_ioctl,
 };
 
+#define PMEM_ATTR(_name, _mode, _show, _store) {            \
+	.attr = {.name = __stringify(_name), .mode = _mode }, \
+	.show = _show,                                        \
+	.store = _store,                                      \
+}
+
+struct pmem_attr {
+	struct attribute attr;
+	ssize_t(*show) (const int id, char * const);
+	ssize_t(*store) (const int id, const char * const, const size_t count);
+};
+#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr)
+
+#define RW_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name)
+
+#define RO_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL)
+
+#define WO_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name)
+
+static ssize_t show_pmem(struct kobject *kobj,
+			struct attribute *attr,
+			char *buf)
+{
+	struct pmem_attr *a = to_pmem_attr(attr);
+	return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO;
+}
+
+static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct pmem_attr *a = to_pmem_attr(attr);
+	return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO;
+}
+
+static struct sysfs_ops pmem_ops = {
+	.show = show_pmem,
+	.store = store_pmem,
+};
+
+static ssize_t show_pmem_base(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+		pmem[id].base, pmem[id].base);
+}
+RO_PMEM_ATTR(base);
+
+static ssize_t show_pmem_size(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+		pmem[id].size, pmem[id].size);
+}
+RO_PMEM_ATTR(size);
+
+static ssize_t show_pmem_allocator_type(int id, char *buf)
+{
+	switch (pmem[id].allocator_type) {
+	case  PMEM_ALLOCATORTYPE_ALLORNOTHING:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing");
+	case  PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit");
+	case  PMEM_ALLOCATORTYPE_BITMAP:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap");
+	case PMEM_ALLOCATORTYPE_SYSTEM:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap");
+	default:
+		return scnprintf(buf, PAGE_SIZE,
+			"??? Invalid allocator type (%d) for this region! "
+			"Something isn't right.\n",
+			pmem[id].allocator_type);
+	}
+}
+RO_PMEM_ATTR(allocator_type);
+
+static ssize_t show_pmem_mapped_regions(int id, char *buf)
+{
+	struct list_head *elt;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE,
+		      "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+	mutex_lock(&pmem[id].data_list_mutex);
+	list_for_each(elt, &pmem[id].data_list) {
+		struct pmem_data *data =
+			list_entry(elt, struct pmem_data, list);
+		struct list_head *elt2;
+
+		down_read(&data->sem);
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:",
+				data->pid);
+		list_for_each(elt2, &data->region_list) {
+			struct pmem_region_node *region_node = list_entry(elt2,
+					struct pmem_region_node,
+					list);
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					"(%lx,%lx) ",
+					region_node->region.offset,
+					region_node->region.len);
+		}
+		up_read(&data->sem);
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	}
+	mutex_unlock(&pmem[id].data_list_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(mapped_regions);
+
+#define PMEM_COMMON_SYSFS_ATTRS \
+	&pmem_attr_base.attr, \
+	&pmem_attr_size.attr, \
+	&pmem_attr_allocator_type.attr, \
+	&pmem_attr_mapped_regions.attr
+
+
+static ssize_t show_pmem_allocated(int id, char *buf)
+{
+	ssize_t ret;
+
+	mutex_lock(&pmem[id].arena_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "%s\n",
+		pmem[id].allocator.all_or_nothing.allocated ?
+		"is allocated" : "is NOT allocated");
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(allocated);
+
+static struct attribute *pmem_allornothing_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_allocated.attr,
+
+	NULL
+};
+
+static struct kobj_type pmem_allornothing_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_allornothing_attrs,
+};
+
+static ssize_t show_pmem_total_entries(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries);
+}
+RO_PMEM_ATTR(total_entries);
+
+static ssize_t show_pmem_quantum_size(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n",
+		pmem[id].quantum, pmem[id].quantum);
+}
+RO_PMEM_ATTR(quantum_size);
+
+static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf)
+{
+	int ret, i;
+
+	mutex_lock(&pmem[id].data_list_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n");
+
+	for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret);
+			i = PMEM_BUDDY_NEXT_INDEX(id, i))
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n",
+			i, PMEM_BUDDY_ORDER(id, i),
+			PMEM_BUDDY_LEN(id, i),
+			!PMEM_IS_FREE_BUDDY(id, i));
+
+	mutex_unlock(&pmem[id].data_list_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(buddy_bitmap_dump);
+
+#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \
+	&pmem_attr_quantum_size.attr, \
+	&pmem_attr_total_entries.attr
+
+static struct attribute *pmem_buddy_bestfit_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_buddy_bitmap_dump.attr,
+
+	NULL
+};
+
+static struct kobj_type pmem_buddy_bestfit_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_buddy_bestfit_attrs,
+};
+
+static ssize_t show_pmem_free_quanta(int id, char *buf)
+{
+	ssize_t ret;
+
+	mutex_lock(&pmem[id].arena_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+		pmem[id].allocator.bitmap.bitmap_free);
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(free_quanta);
+
+static ssize_t show_pmem_bits_allocated(int id, char *buf)
+{
+	ssize_t ret;
+	unsigned int i;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	ret = scnprintf(buf, PAGE_SIZE,
+		"id: %d\nbitnum\tindex\tquanta allocated\n", id);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+		if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+				"%u\t%u\t%u\n",
+				i,
+				pmem[id].allocator.bitmap.bitm_alloc[i].bit,
+				pmem[id].allocator.bitmap.bitm_alloc[i].quanta
+				);
+
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(bits_allocated);
+
+static struct attribute *pmem_bitmap_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_free_quanta.attr,
+	&pmem_attr_bits_allocated.attr,
+
+	NULL
+};
+
+static struct attribute *pmem_system_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	NULL
+};
+
+static struct kobj_type pmem_bitmap_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_bitmap_attrs,
+};
+
+static struct kobj_type pmem_system_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_system_attrs,
+};
+
 static int get_id(struct file *file)
 {
 	return MINOR(file->f_dentry->d_inode->i_rdev);
 }
 
-int is_pmem_file(struct file *file)
+static char *get_name(struct file *file)
+{
+	int id = get_id(file);
+	return pmem[id].name;
+}
+
+static int is_pmem_file(struct file *file)
 {
 	int id;
 
 	if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
 		return 0;
+
 	id = get_id(file);
-	if (unlikely(id >= PMEM_MAX_DEVICES))
-		return 0;
-	if (unlikely(file->f_dentry->d_inode->i_rdev !=
-	     MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
-		return 0;
-	return 1;
+	return (unlikely(id >= PMEM_MAX_DEVICES ||
+		file->f_dentry->d_inode->i_rdev !=
+		     MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1;
 }
 
 static int has_allocation(struct file *file)
 {
-	struct pmem_data *data;
-	/* check is_pmem_file first if not accessed via pmem_file_ops */
-
-	if (unlikely(!file->private_data))
-		return 0;
-	data = (struct pmem_data *)file->private_data;
-	if (unlikely(data->index < 0))
-		return 0;
-	return 1;
+	/* must be called with at least read lock held on
+	 * ((struct pmem_data *)(file->private_data))->sem which
+	 * means that file is guaranteed not to be NULL upon entry!!
+	 * check is_pmem_file first if not accessed via pmem_file_ops */
+	struct pmem_data *pdata = file->private_data;
+	return pdata && pdata->index != -1;
 }
 
 static int is_master_owner(struct file *file)
 {
 	struct file *master_file;
-	struct pmem_data *data;
+	struct pmem_data *data = file->private_data;
 	int put_needed, ret = 0;
 
-	if (!is_pmem_file(file) || !has_allocation(file))
+	if (!has_allocation(file))
 		return 0;
-	data = (struct pmem_data *)file->private_data;
 	if (PMEM_FLAGS_MASTERMAP & data->flags)
 		return 1;
 	master_file = fget_light(data->master_fd, &put_needed);
 	if (master_file && data->master_file == master_file)
 		ret = 1;
-	fput_light(master_file, put_needed);
+	if (master_file)
+		fput_light(master_file, put_needed);
 	return ret;
 }
 
-static int pmem_free(int id, int index)
+static int pmem_free_all_or_nothing(int id, int index)
 {
-	/* caller should hold the write lock on pmem_sem! */
-	int buddy, curr = index;
+	/* caller should hold the lock on arena_mutex! */
 	DLOG("index %d\n", index);
 
-	if (pmem[id].no_allocator) {
-		pmem[id].allocated = 0;
-		return 0;
-	}
+	pmem[id].allocator.all_or_nothing.allocated =  0;
+	return 0;
+}
+
+static int pmem_free_space_all_or_nothing(int id,
+		struct pmem_freespace *fs)
+{
+	/* caller should hold the lock on arena_mutex! */
+	fs->total = (unsigned long)
+		pmem[id].allocator.all_or_nothing.allocated == 0 ?
+		pmem[id].size : 0;
+
+	fs->largest = fs->total;
+	return 0;
+}
+
+
+static int pmem_free_buddy_bestfit(int id, int index)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr = index;
+	DLOG("index %d\n", index);
+
+
 	/* clean up the bitmap, merging any buddies */
-	pmem[id].bitmap[curr].allocated = 0;
+	pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0;
 	/* find a slots buddy Buddy# = Slot# ^ (1 << order)
 	 * if the buddy is also free merge them
 	 * repeat until the buddy is not free or end of the bitmap is reached
 	 */
 	do {
-		buddy = PMEM_BUDDY_INDEX(id, curr);
-		if (PMEM_IS_FREE(id, buddy) &&
-				PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
-			PMEM_ORDER(id, buddy)++;
-			PMEM_ORDER(id, curr)++;
+		int buddy = PMEM_BUDDY_INDEX(id, curr);
+		if (buddy < pmem[id].num_entries &&
+		    PMEM_IS_FREE_BUDDY(id, buddy) &&
+		    PMEM_BUDDY_ORDER(id, buddy) ==
+				PMEM_BUDDY_ORDER(id, curr)) {
+			PMEM_BUDDY_ORDER(id, buddy)++;
+			PMEM_BUDDY_ORDER(id, curr)++;
 			curr = min(buddy, curr);
 		} else {
 			break;
@@ -264,43 +626,222 @@
 	return 0;
 }
 
+
+static int pmem_free_space_buddy_bestfit(int id,
+		struct pmem_freespace *fs)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr;
+	unsigned long size;
+	fs->total = 0;
+	fs->largest = 0;
+
+	for (curr = 0; curr < pmem[id].num_entries;
+	     curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) {
+		if (PMEM_IS_FREE_BUDDY(id, curr)) {
+			size = PMEM_BUDDY_LEN(id, curr);
+			if (size > fs->largest)
+				fs->largest = size;
+			fs->total += size;
+		}
+	}
+	return 0;
+}
+
+
+static inline uint32_t start_mask(int bit_start)
+{
+	return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline uint32_t end_mask(int bit_end)
+{
+	return (uint32_t)(~0) >>
+		((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline int compute_total_words(int bit_end, int word_index)
+{
+	return ((bit_end + BITS_PER_LONG - 1) >>
+			PMEM_32BIT_WORD_ORDER) - word_index;
+}
+
+static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+	int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+	total_words = compute_total_words(bit_end, word_index);
+	if (total_words > 0) {
+		if (total_words == 1) {
+			bitp[word_index] &=
+				~(start_mask(bit_start) & end_mask(bit_end));
+		} else {
+			bitp[word_index++] &= ~start_mask(bit_start);
+			if (total_words > 2) {
+				int total_bytes;
+
+				total_words -= 2;
+				total_bytes = total_words << 2;
+
+				memset(&bitp[word_index], 0, total_bytes);
+				word_index += total_words;
+			}
+			bitp[word_index] &= ~end_mask(bit_end);
+		}
+	}
+}
+
+static int pmem_free_bitmap(int id, int bitnum)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int i;
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+
+	DLOG("bitnum %d\n", bitnum);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) {
+		const int curr_bit =
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit;
+
+		if (curr_bit == bitnum) {
+			const int curr_quanta =
+				pmem[id].allocator.bitmap.bitm_alloc[i].quanta;
+
+			bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap,
+				curr_bit, curr_bit + curr_quanta);
+			pmem[id].allocator.bitmap.bitmap_free += curr_quanta;
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+			return 0;
+		}
+	}
+	printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id"
+		" %d, pid %d(%s)\n", __func__, bitnum, id,  current->pid,
+		get_task_comm(currtask_name, current));
+
+	return -1;
+}
+
+static int pmem_free_system(int id, int index)
+{
+	/* caller should hold the lock on arena_mutex! */
+	struct alloc_list *item;
+
+	DLOG("index %d\n", index);
+	if (index != 0)
+		item = (struct alloc_list *)index;
+	else
+		return 0;
+
+	if (item->vaddr != NULL) {
+		iounmap(item->vaddr);
+		kfree(__va(item->addr));
+		list_del(&item->allocs);
+		kfree(item);
+	}
+
+	return 0;
+}
+
+static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs)
+{
+	int i, j;
+	int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs;
+	int alloc_start = 0;
+	int next_alloc;
+	unsigned long size = 0;
+
+	fs->total = 0;
+	fs->largest = 0;
+
+	for (i = 0; i < max_allocs; i++) {
+
+		int alloc_quanta = 0;
+		int alloc_idx = 0;
+		next_alloc = pmem[id].num_entries;
+
+		/* Look for the lowest bit where next allocation starts */
+		for (j = 0; j < max_allocs; j++) {
+			const int curr_alloc = pmem[id].allocator.
+						bitmap.bitm_alloc[j].bit;
+			if (curr_alloc != -1) {
+				if (alloc_start == curr_alloc)
+					alloc_idx = j;
+				if (alloc_start >= curr_alloc)
+					continue;
+				if (curr_alloc < next_alloc)
+					next_alloc = curr_alloc;
+			}
+		}
+		alloc_quanta = pmem[id].allocator.bitmap.
+				bitm_alloc[alloc_idx].quanta;
+		size = (next_alloc - (alloc_start + alloc_quanta)) *
+				pmem[id].quantum;
+
+		if (size > fs->largest)
+			fs->largest = size;
+		fs->total += size;
+
+		if (next_alloc == pmem[id].num_entries)
+			break;
+		else
+			alloc_start = next_alloc;
+	}
+
+	return 0;
+}
+
+static int pmem_free_space_system(int id, struct pmem_freespace *fs)
+{
+	fs->total = pmem[id].size;
+	fs->largest = pmem[id].size;
+
+	return 0;
+}
+
 static void pmem_revoke(struct file *file, struct pmem_data *data);
 
 static int pmem_release(struct inode *inode, struct file *file)
 {
-	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	struct pmem_data *data = file->private_data;
 	struct pmem_region_node *region_node;
 	struct list_head *elt, *elt2;
 	int id = get_id(file), ret = 0;
 
-
-	mutex_lock(&pmem[id].data_list_lock);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), get_name(file), id);
+	mutex_lock(&pmem[id].data_list_mutex);
 	/* if this file is a master, revoke all the memory in the connected
 	 *  files */
 	if (PMEM_FLAGS_MASTERMAP & data->flags) {
-		struct pmem_data *sub_data;
 		list_for_each(elt, &pmem[id].data_list) {
-			sub_data = list_entry(elt, struct pmem_data, list);
+			struct pmem_data *sub_data =
+				list_entry(elt, struct pmem_data, list);
+			int is_master;
+
 			down_read(&sub_data->sem);
-			if (PMEM_IS_SUBMAP(sub_data) &&
-			    file == sub_data->master_file) {
-				up_read(&sub_data->sem);
+			is_master = (PMEM_IS_SUBMAP(sub_data) &&
+				file == sub_data->master_file);
+			up_read(&sub_data->sem);
+
+			if (is_master)
 				pmem_revoke(file, sub_data);
-			}  else
-				up_read(&sub_data->sem);
 		}
 	}
 	list_del(&data->list);
-	mutex_unlock(&pmem[id].data_list_lock);
-
+	mutex_unlock(&pmem[id].data_list_mutex);
 
 	down_write(&data->sem);
 
-	/* if its not a conencted file and it has an allocation, free it */
+	/* if it is not a connected file and it has an allocation, free it */
 	if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
-		down_write(&pmem[id].bitmap_sem);
-		ret = pmem_free(id, data->index);
-		up_write(&pmem[id].bitmap_sem);
+		mutex_lock(&pmem[id].arena_mutex);
+		ret = pmem[id].free(id, data->index);
+		mutex_unlock(&pmem[id].arena_mutex);
 	}
 
 	/* if this file is a submap (mapped, connected file), downref the
@@ -333,15 +874,17 @@
 	struct pmem_data *data;
 	int id = get_id(file);
 	int ret = 0;
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
 
-	DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
-	/* setup file->private_data to indicate its unmapped */
-	/*  you can only open a pmem device one time */
-	if (file->private_data != NULL)
-		return -1;
+	DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), get_name(file), id);
 	data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
 	if (!data) {
-		printk("pmem: unable to allocate memory for pmem metadata.");
+		printk(KERN_ALERT "pmem: %s: unable to allocate memory for "
+				"pmem metadata.", __func__);
 		return -1;
 	}
 	data->flags = 0;
@@ -359,17 +902,17 @@
 	file->private_data = data;
 	INIT_LIST_HEAD(&data->list);
 
-	mutex_lock(&pmem[id].data_list_lock);
+	mutex_lock(&pmem[id].data_list_mutex);
 	list_add(&data->list, &pmem[id].data_list);
-	mutex_unlock(&pmem[id].data_list_lock);
+	mutex_unlock(&pmem[id].data_list_mutex);
 	return ret;
 }
 
-static unsigned long pmem_order(unsigned long len)
+static unsigned long pmem_order(unsigned long len, int id)
 {
 	int i;
 
-	len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+	len = (len + pmem[id].quantum - 1)/pmem[id].quantum;
 	len--;
 	for (i = 0; i < sizeof(len)*8; i++)
 		if (len >> i == 0)
@@ -377,74 +920,385 @@
 	return i;
 }
 
-static int pmem_allocate(int id, unsigned long len)
+static int pmem_allocator_all_or_nothing(const int id,
+		const unsigned long len,
+		const unsigned int align)
 {
-	/* caller should hold the write lock on pmem_sem! */
-	/* return the corresponding pdata[] entry */
-	int curr = 0;
-	int end = pmem[id].num_entries;
-	int best_fit = -1;
-	unsigned long order = pmem_order(len);
-
-	if (pmem[id].no_allocator) {
-		DLOG("no allocator");
-		if ((len > pmem[id].size) || pmem[id].allocated)
-			return -1;
-		pmem[id].allocated = 1;
-		return len;
-	}
-
-	if (order > PMEM_MAX_ORDER)
+	/* caller should hold the lock on arena_mutex! */
+	DLOG("all or nothing\n");
+	if ((len > pmem[id].size) ||
+		pmem[id].allocator.all_or_nothing.allocated)
 		return -1;
+	pmem[id].allocator.all_or_nothing.allocated = 1;
+	return len;
+}
+
+static int pmem_allocator_buddy_bestfit(const int id,
+		const unsigned long len,
+		unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr;
+	int best_fit = -1;
+	unsigned long order;
+
+	DLOG("buddy bestfit\n");
+	order = pmem_order(len, id);
+	if (order > PMEM_MAX_ORDER)
+		goto out;
+
 	DLOG("order %lx\n", order);
 
-	/* look through the bitmap:
-	 * 	if you find a free slot of the correct order use it
-	 * 	otherwise, use the best fit (smallest with size > order) slot
+	/* Look through the bitmap.
+	 * 	If a free slot of the correct order is found, use it.
+	 * 	Otherwise, use the best fit (smallest with size > order) slot.
 	 */
-	while (curr < end) {
-		if (PMEM_IS_FREE(id, curr)) {
-			if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+	for (curr = 0;
+	     curr < pmem[id].num_entries;
+	     curr = PMEM_BUDDY_NEXT_INDEX(id, curr))
+		if (PMEM_IS_FREE_BUDDY(id, curr)) {
+			if (PMEM_BUDDY_ORDER(id, curr) ==
+					(unsigned char)order) {
 				/* set the not free bit and clear others */
 				best_fit = curr;
 				break;
 			}
-			if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+			if (PMEM_BUDDY_ORDER(id, curr) >
+					(unsigned char)order &&
 			    (best_fit < 0 ||
-			     PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+			     PMEM_BUDDY_ORDER(id, curr) <
+					PMEM_BUDDY_ORDER(id, best_fit)))
 				best_fit = curr;
 		}
-		curr = PMEM_NEXT_INDEX(id, curr);
-	}
 
-	/* if best_fit < 0, there are no suitable slots,
-	 * return an error
-	 */
+	/* if best_fit < 0, there are no suitable slots; return an error */
 	if (best_fit < 0) {
-		printk("pmem: no space left to allocate!\n");
-		return -1;
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: %s: no space left to allocate!\n",
+			__func__);
+#endif
+		goto out;
 	}
 
 	/* now partition the best fit:
 	 * 	split the slot into 2 buddies of order - 1
 	 * 	repeat until the slot is of the correct order
 	 */
-	while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+	while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) {
 		int buddy;
-		PMEM_ORDER(id, best_fit) -= 1;
+		PMEM_BUDDY_ORDER(id, best_fit) -= 1;
 		buddy = PMEM_BUDDY_INDEX(id, best_fit);
-		PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+		PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit);
 	}
-	pmem[id].bitmap[best_fit].allocated = 1;
+	pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1;
+out:
 	return best_fit;
 }
 
-static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+
+static inline unsigned long paddr_from_bit(const int id, const int bitnum)
+{
+	return pmem[id].base + pmem[id].quantum * bitnum;
+}
+
+static inline unsigned long bit_from_paddr(const int id,
+		const unsigned long paddr)
+{
+	return (paddr - pmem[id].base) / pmem[id].quantum;
+}
+
+static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+	int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+	total_words = compute_total_words(bit_end, word_index);
+	if (total_words > 0) {
+		if (total_words == 1) {
+			bitp[word_index] |=
+				(start_mask(bit_start) & end_mask(bit_end));
+		} else {
+			bitp[word_index++] |= start_mask(bit_start);
+			if (total_words > 2) {
+				int total_bytes;
+
+				total_words -= 2;
+				total_bytes = total_words << 2;
+
+				memset(&bitp[word_index], ~0, total_bytes);
+				word_index += total_words;
+			}
+			bitp[word_index] |= end_mask(bit_end);
+		}
+	}
+}
+
+static int
+bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
+		int total_bits, int spacing)
+{
+	int bit_start, last_bit, word_index;
+
+	if (num_bits_to_alloc <= 0)
+		return -1;
+
+	for (bit_start = 0; ;
+		bit_start = (last_bit +
+			(word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
+			& ~(spacing - 1)) {
+		int bit_end = bit_start + num_bits_to_alloc, total_words;
+
+		if (bit_end > total_bits)
+			return -1; /* out of contiguous memory */
+
+		word_index = bit_start >> PMEM_32BIT_WORD_ORDER;
+		total_words = compute_total_words(bit_end, word_index);
+
+		if (total_words <= 0)
+			return -1;
+
+		if (total_words == 1) {
+			last_bit = fls(bitp[word_index] &
+					(start_mask(bit_start) &
+						end_mask(bit_end)));
+			if (last_bit)
+				continue;
+		} else {
+			int end_word = word_index + (total_words - 1);
+			last_bit =
+				fls(bitp[word_index] & start_mask(bit_start));
+			if (last_bit)
+				continue;
+
+			for (word_index++;
+					word_index < end_word;
+					word_index++) {
+				last_bit = fls(bitp[word_index]);
+				if (last_bit)
+					break;
+			}
+			if (last_bit)
+				continue;
+
+			last_bit = fls(bitp[word_index] & end_mask(bit_end));
+			if (last_bit)
+				continue;
+		}
+		bitmap_bits_set_all(bitp, bit_start, bit_end);
+		return bit_start;
+	}
+	return -1;
+}
+
+static int reserve_quanta(const unsigned int quanta_needed,
+		const int id,
+		unsigned int align)
+{
+	/* alignment should be a valid power of 2 */
+	int ret = -1, start_bit = 0, spacing = 1;
+
+	/* Sanity check */
+	if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: %s: request (%d) too big for"
+			" available free (%d)\n", __func__, quanta_needed,
+			pmem[id].allocator.bitmap.bitmap_free);
+#endif
+		return -1;
+	}
+
+	start_bit = bit_from_paddr(id,
+		(pmem[id].base + align - 1) & ~(align - 1));
+	if (start_bit <= -1) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT
+			"pmem: %s: bit_from_paddr fails for"
+			" %u alignment.\n", __func__, align);
+#endif
+		return -1;
+	}
+	spacing = align / pmem[id].quantum;
+	spacing = spacing > 1 ? spacing : 1;
+
+	ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
+		quanta_needed,
+		(pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
+		spacing);
+
+#if PMEM_DEBUG
+	if (ret < 0)
+		printk(KERN_ALERT "pmem: %s: not enough contiguous bits free "
+			"in bitmap! Region memory is either too fragmented or"
+			" request is too large for available memory.\n",
+			__func__);
+#endif
+
+	return ret;
+}
+
+static int pmem_allocator_bitmap(const int id,
+		const unsigned long len,
+		const unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int bitnum, i;
+	unsigned int quanta_needed;
+
+	DLOG("bitmap id %d, len %ld, align %u\n", id, len, align);
+	if (!pmem[id].allocator.bitmap.bitm_alloc) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n",
+			id);
+#endif
+		return -1;
+	}
+
+	quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum;
+	DLOG("quantum size %u quanta needed %u free %u id %d\n",
+		pmem[id].quantum, quanta_needed,
+		pmem[id].allocator.bitmap.bitmap_free, id);
+
+	if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: memory allocation failure. "
+			"PMEM memory region exhausted, id %d."
+			" Unable to comply with allocation request.\n", id);
+#endif
+		return -1;
+	}
+
+	bitnum = reserve_quanta(quanta_needed, id, align);
+	if (bitnum == -1)
+		goto leave;
+
+	for (i = 0;
+		i < pmem[id].allocator.bitmap.bitmap_allocs &&
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1;
+		i++)
+		;
+
+	if (i >= pmem[id].allocator.bitmap.bitmap_allocs) {
+		void *temp;
+		int32_t new_bitmap_allocs =
+			pmem[id].allocator.bitmap.bitmap_allocs << 1;
+		int j;
+
+		if (!new_bitmap_allocs) { /* failed sanity check!! */
+#if PMEM_DEBUG
+			pr_alert("pmem: bitmap_allocs number"
+				" wrapped around to zero! Something "
+				"is VERY wrong.\n");
+#endif
+			return -1;
+		}
+
+		if (new_bitmap_allocs > pmem[id].num_entries) {
+			/* failed sanity check!! */
+#if PMEM_DEBUG
+			pr_alert("pmem: required bitmap_allocs"
+				" number exceeds maximum entries possible"
+				" for current quanta\n");
+#endif
+			return -1;
+		}
+
+		temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc,
+				new_bitmap_allocs *
+				sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+				GFP_KERNEL);
+		if (!temp) {
+#if PMEM_DEBUG
+			pr_alert("pmem: can't realloc bitmap_allocs,"
+				"id %d, current num bitmap allocs %d\n",
+				id, pmem[id].allocator.bitmap.bitmap_allocs);
+#endif
+			return -1;
+		}
+		pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs;
+		pmem[id].allocator.bitmap.bitm_alloc = temp;
+
+		for (j = i; j < new_bitmap_allocs; j++) {
+			pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+		}
+
+		DLOG("increased # of allocated regions to %d for id %d\n",
+			pmem[id].allocator.bitmap.bitmap_allocs, id);
+	}
+
+	DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i);
+
+	pmem[id].allocator.bitmap.bitmap_free -= quanta_needed;
+	pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum;
+	pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed;
+leave:
+	return bitnum;
+}
+
+static int pmem_allocator_system(const int id,
+		const unsigned long len,
+		const unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	struct alloc_list *list;
+	unsigned long aligned_len;
+	int count = SYSTEM_ALLOC_RETRY;
+	void *buf;
+
+	DLOG("system id %d, len %ld, align %u\n", id, len, align);
+
+	if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) {
+		DLOG("requested size would be larger than quota\n");
+		return -1;
+	}
+
+	/* Handle alignment */
+	aligned_len = len + align;
+
+	/* Attempt allocation */
+	list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL);
+	if (list == NULL) {
+		printk(KERN_ERR "pmem: failed to allocate system metadata\n");
+		return -1;
+	}
+	list->vaddr = NULL;
+
+	buf = NULL;
+	while ((buf == NULL) && count--) {
+		buf = kmalloc((aligned_len), GFP_KERNEL);
+		if (buf == NULL) {
+			DLOG("pmem: kmalloc %d temporarily failed len= %ld\n",
+				count, aligned_len);
+		}
+	}
+	if (!buf) {
+		printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n",
+			id, aligned_len);
+		kfree(list);
+		return -1;
+	}
+	list->size = aligned_len;
+	list->addr = (void *)__pa(buf);
+	list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) &
+			~(align - 1));
+
+	if (!pmem[id].cached)
+		list->vaddr = ioremap(__pa(buf), aligned_len);
+	else
+		list->vaddr = ioremap_cached(__pa(buf), aligned_len);
+
+	INIT_LIST_HEAD(&list->allocs);
+	list_add(&list->allocs, &pmem[id].allocator.system_mem.alist);
+
+	return (int)list;
+}
+
+static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
 {
 	int id = get_id(file);
-#ifdef pgprot_noncached
+#ifdef pgprot_writecombine
 	if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
-		return pgprot_noncached(vma_prot);
+		/* on ARMv6 and ARMv7 this expands to Normal Noncached */
+		return pgprot_writecombine(vma_prot);
 #endif
 #ifdef pgprot_ext_buffered
 	else if (pmem[id].buffered)
@@ -453,26 +1307,80 @@
 	return vma_prot;
 }
 
-static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+static unsigned long pmem_start_addr_all_or_nothing(int id,
+		struct pmem_data *data)
 {
-	if (pmem[id].no_allocator)
-		return PMEM_START_ADDR(id, 0);
-	else
-		return PMEM_START_ADDR(id, data->index);
+	return PMEM_START_ADDR(id, 0);
+}
 
+static unsigned long pmem_start_addr_buddy_bestfit(int id,
+		struct pmem_data *data)
+{
+	return PMEM_START_ADDR(id, data->index);
+}
+
+static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data)
+{
+	return data->index * pmem[id].quantum + pmem[id].base;
+}
+
+static unsigned long pmem_start_addr_system(int id, struct pmem_data *data)
+{
+	return (unsigned long)(((struct alloc_list *)(data->index))->aaddr);
 }
 
 static void *pmem_start_vaddr(int id, struct pmem_data *data)
 {
-	return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM)
+		return ((struct alloc_list *)(data->index))->vaddr;
+	else
+	return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase;
 }
 
-static unsigned long pmem_len(int id, struct pmem_data *data)
+static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data)
 {
-	if (pmem[id].no_allocator)
-		return data->index;
-	else
-		return PMEM_LEN(id, data->index);
+	return data->index;
+}
+
+static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data)
+{
+	return PMEM_BUDDY_LEN(id, data->index);
+}
+
+static unsigned long pmem_len_bitmap(int id, struct pmem_data *data)
+{
+	int i;
+	unsigned long ret = 0;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+		if (pmem[id].allocator.bitmap.bitm_alloc[i].bit ==
+				data->index) {
+			ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta *
+				pmem[id].quantum;
+			break;
+		}
+
+	mutex_unlock(&pmem[id].arena_mutex);
+#if PMEM_DEBUG
+	if (i >= pmem[id].allocator.bitmap.bitmap_allocs)
+		pr_alert("pmem: %s: can't find bitnum %d in "
+			"alloc'd array!\n", __func__, data->index);
+#endif
+	return ret;
+}
+
+static unsigned long pmem_len_system(int id, struct pmem_data *data)
+{
+	unsigned long ret = 0;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	ret = ((struct alloc_list *)data->index)->size;
+	mutex_unlock(&pmem[id].arena_mutex);
+
+	return ret;
 }
 
 static int pmem_map_garbage(int id, struct vm_area_struct *vma,
@@ -509,18 +1417,25 @@
 			      struct pmem_data *data, unsigned long offset,
 			      unsigned long len)
 {
+	int ret;
 	DLOG("map offset %lx len %lx\n", offset, len);
 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
 
-	if (io_remap_pfn_range(vma, vma->vm_start + offset,
-		(pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
-		len, vma->vm_page_prot)) {
-		return -EAGAIN;
+	ret = io_remap_pfn_range(vma, vma->vm_start + offset,
+		(pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT,
+		len, vma->vm_page_prot);
+	if (ret) {
+#if PMEM_DEBUG
+		pr_alert("pmem: %s: io_remap_pfn_range fails with "
+			"return value: %d!\n",	__func__, ret);
+#endif
+
+		ret = -EAGAIN;
 	}
-	return 0;
+	return ret;
 }
 
 static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
@@ -538,13 +1453,21 @@
 	struct file *file = vma->vm_file;
 	struct pmem_data *data = file->private_data;
 	int id = get_id(file);
+
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+		get_name(file), id, current->pid,
+		get_task_comm(currtask_name, current),
+		current->parent->pid, file, file_count(file));
 	/* this should never be called as we don't support copying pmem
 	 * ranges via fork */
+	down_read(&data->sem);
 	BUG_ON(!has_allocation(file));
-	down_write(&data->sem);
 	/* remap the garbage pages, forkers don't get access to the data */
 	pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
-	up_write(&data->sem);
+	up_read(&data->sem);
 }
 
 static void pmem_vma_close(struct vm_area_struct *vma)
@@ -552,15 +1475,29 @@
 	struct file *file = vma->vm_file;
 	struct pmem_data *data = file->private_data;
 
-	DLOG("current %u ppid %u file %p count %d\n", current->pid,
-	     current->parent->pid, file, file_count(file));
-	if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
-		printk(KERN_WARNING "pmem: something is very wrong, you are "
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+		get_name(file), get_id(file), current->pid,
+		get_task_comm(currtask_name, current),
+		current->parent->pid, file, file_count(file));
+
+	if (unlikely(!is_pmem_file(file))) {
+		pr_warning("pmem: something is very wrong, you are "
 		       "closing a vm backing an allocation that doesn't "
 		       "exist!\n");
 		return;
 	}
+
 	down_write(&data->sem);
+	if (unlikely(!has_allocation(file))) {
+		up_write(&data->sem);
+		pr_warning("pmem: something is very wrong, you are "
+		       "closing a vm backing an allocation that doesn't "
+		       "exist!\n");
+		return;
+	}
 	if (data->vma == vma) {
 		data->vma = NULL;
 		if ((data->flags & PMEM_FLAGS_CONNECTED) &&
@@ -578,64 +1515,78 @@
 
 static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
 {
-	struct pmem_data *data;
+	struct pmem_data *data = file->private_data;
 	int index;
 	unsigned long vma_size =  vma->vm_end - vma->vm_start;
 	int ret = 0, id = get_id(file);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
 
+	if (!data) {
+		pr_err("pmem: Invalid file descriptor, no private data\n");
+		return -EINVAL;
+	}
+	DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid,
+		get_task_comm(currtask_name, current), vma_size,
+		get_name(file), id);
 	if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
 #if PMEM_DEBUG
-		printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+		pr_err("pmem: mmaps must be at offset zero, aligned"
 				" and a multiple of pages_size.\n");
 #endif
 		return -EINVAL;
 	}
 
-	data = (struct pmem_data *)file->private_data;
 	down_write(&data->sem);
 	/* check this file isn't already mmaped, for submaps check this file
 	 * has never been mmaped */
 	if ((data->flags & PMEM_FLAGS_SUBMAP) ||
 	    (data->flags & PMEM_FLAGS_UNSUBMAP)) {
 #if PMEM_DEBUG
-		printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+		pr_err("pmem: you can only mmap a pmem file once, "
 		       "this file is already mmaped. %x\n", data->flags);
 #endif
 		ret = -EINVAL;
 		goto error;
 	}
 	/* if file->private_data == unalloced, alloc*/
-	if (data && data->index == -1) {
-		down_write(&pmem[id].bitmap_sem);
-		index = pmem_allocate(id, vma->vm_end - vma->vm_start);
-		up_write(&pmem[id].bitmap_sem);
+	if (data->index == -1) {
+		mutex_lock(&pmem[id].arena_mutex);
+		index = pmem[id].allocate(id,
+				vma->vm_end - vma->vm_start,
+				SZ_4K);
+		mutex_unlock(&pmem[id].arena_mutex);
+		/* either no space was available or an error occured */
+		if (index == -1) {
+			pr_err("pmem: mmap unable to allocate memory"
+				"on %s\n", get_name(file));
+			ret = -ENOMEM;
+			goto error;
+		}
+		/* store the index of a successful allocation */
 		data->index = index;
 	}
-	/* either no space was available or an error occured */
-	if (!has_allocation(file)) {
-		ret = -EINVAL;
-		printk("pmem: could not find allocation for map.\n");
-		goto error;
-	}
 
-	if (pmem_len(id, data) < vma_size) {
+	if (pmem[id].len(id, data) < vma_size) {
 #if PMEM_DEBUG
-		printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
-		       "size of backing region [%lu].\n", vma_size,
-		       pmem_len(id, data));
+		pr_err("pmem: mmap size [%lu] does not match"
+		       " size of backing region [%lu].\n", vma_size,
+		       pmem[id].len(id, data));
 #endif
 		ret = -EINVAL;
 		goto error;
 	}
 
-	vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
-	vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+	vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT;
+
+	vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot);
 
 	if (data->flags & PMEM_FLAGS_CONNECTED) {
 		struct pmem_region_node *region_node;
 		struct list_head *elt;
 		if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
-			printk("pmem: mmap failed in kernel!\n");
+			pr_alert("pmem: mmap failed in kernel!\n");
 			ret = -EAGAIN;
 			goto error;
 		}
@@ -663,7 +1614,7 @@
 		     current->pid);
 	} else {
 		if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
-			printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+			pr_err("pmem: mmap failed in kernel!\n");
 			ret = -EAGAIN;
 			goto error;
 		}
@@ -681,103 +1632,155 @@
 int get_pmem_user_addr(struct file *file, unsigned long *start,
 		   unsigned long *len)
 {
-	struct pmem_data *data;
-	if (!is_pmem_file(file) || !has_allocation(file)) {
+	int ret = -1;
+
+	if (is_pmem_file(file)) {
+		struct pmem_data *data = file->private_data;
+
+		down_read(&data->sem);
+		if (has_allocation(file)) {
+			if (data->vma) {
+				*start = data->vma->vm_start;
+				*len = data->vma->vm_end - data->vma->vm_start;
+			} else {
+				*start = *len = 0;
 #if PMEM_DEBUG
-		printk(KERN_INFO "pmem: requested pmem data from invalid"
-				  "file.\n");
+				pr_err("pmem: %s: no vma present.\n",
+					__func__);
 #endif
-		return -1;
+			}
+			ret = 0;
+		}
+		up_read(&data->sem);
 	}
-	data = (struct pmem_data *)file->private_data;
-	down_read(&data->sem);
-	if (data->vma) {
-		*start = data->vma->vm_start;
-		*len = data->vma->vm_end - data->vma->vm_start;
-	} else {
-		*start = 0;
-		*len = 0;
-	}
-	up_read(&data->sem);
-	return 0;
+
+#if PMEM_DEBUG
+	if (ret)
+		pr_err("pmem: %s: requested pmem data from invalid"
+			"file.\n", __func__);
+#endif
+	return ret;
 }
 
 int get_pmem_addr(struct file *file, unsigned long *start,
 		  unsigned long *vstart, unsigned long *len)
 {
-	struct pmem_data *data;
-	int id;
+	int ret = -1;
 
-	if (!is_pmem_file(file) || !has_allocation(file)) {
-		return -1;
-	}
+	if (is_pmem_file(file)) {
+		struct pmem_data *data = file->private_data;
 
-	data = (struct pmem_data *)file->private_data;
-	if (data->index == -1) {
+		down_read(&data->sem);
+		if (has_allocation(file)) {
+			int id = get_id(file);
+
+			*start = pmem[id].start_addr(id, data);
+			*len = pmem[id].len(id, data);
+			*vstart = (unsigned long)
+				pmem_start_vaddr(id, data);
+			up_read(&data->sem);
 #if PMEM_DEBUG
-		printk(KERN_INFO "pmem: requested pmem data from file with no "
-		       "allocation.\n");
-		return -1;
+			down_write(&data->sem);
+			data->ref++;
+			up_write(&data->sem);
 #endif
+			DLOG("returning start %#lx len %lu "
+				"vstart %#lx\n",
+				*start, *len, *vstart);
+			ret = 0;
+		} else {
+			up_read(&data->sem);
+		}
 	}
-	id = get_id(file);
-
-	down_read(&data->sem);
-	*start = pmem_start_addr(id, data);
-	*len = pmem_len(id, data);
-	*vstart = (unsigned long)pmem_start_vaddr(id, data);
-	up_read(&data->sem);
-#if PMEM_DEBUG
-	down_write(&data->sem);
-	data->ref++;
-	up_write(&data->sem);
-#endif
-	return 0;
+	return ret;
 }
 
-int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
 		  unsigned long *len, struct file **filp)
 {
-	struct file *file;
+	int ret = -1;
+	struct file *file = fget(fd);
 
-	file = fget(fd);
 	if (unlikely(file == NULL)) {
-		printk(KERN_INFO "pmem: requested data from file descriptor "
-		       "that doesn't exist.");
-		return -1;
+		pr_err("pmem: %s: requested data from file "
+			"descriptor that doesn't exist.\n", __func__);
+	} else {
+#if PMEM_DEBUG_MSGS
+		char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+		DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)"
+			" dev %s(id: %d)\n", filp,
+			file->f_dentry->d_inode->i_rdev,
+			current->pid, get_task_comm(currtask_name, current),
+			file, file_count(file), get_name(file), get_id(file));
+
+		if (!get_pmem_addr(file, start, vstart, len)) {
+			if (filp)
+				*filp = file;
+			ret = 0;
+		} else {
+			fput(file);
+		}
 	}
-
-	if (get_pmem_addr(file, start, vstart, len))
-		goto end;
-
-	if (filp)
-		*filp = file;
-	return 0;
-end:
-	fput(file);
-	return -1;
+	return ret;
 }
+EXPORT_SYMBOL(get_pmem_file);
+
+int get_pmem_fd(int fd, unsigned long *start, unsigned long *len)
+{
+	unsigned long vstart;
+	return get_pmem_file(fd, start, &vstart, len, NULL);
+}
+EXPORT_SYMBOL(get_pmem_fd);
 
 void put_pmem_file(struct file *file)
 {
-	struct pmem_data *data;
-	int id;
-
-	if (!is_pmem_file(file))
-		return;
-	id = get_id(file);
-	data = (struct pmem_data *)file->private_data;
-#if PMEM_DEBUG
-	down_write(&data->sem);
-	if (data->ref == 0) {
-		printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
-		       pmem[id].dev.name, data->pid);
-		BUG();
-	}
-	data->ref--;
-	up_write(&data->sem);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
 #endif
-	fput(file);
+	DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n",
+		file->f_dentry->d_inode->i_rdev, current->pid,
+		get_task_comm(currtask_name, current), file,
+		file_count(file), get_name(file), get_id(file));
+	if (is_pmem_file(file)) {
+#if PMEM_DEBUG
+		struct pmem_data *data = file->private_data;
+
+		down_write(&data->sem);
+		if (!data->ref--) {
+			data->ref++;
+			pr_alert("pmem: pmem_put > pmem_get %s "
+				"(pid %d)\n",
+			       pmem[get_id(file)].dev.name, data->pid);
+			BUG();
+		}
+		up_write(&data->sem);
+#endif
+		fput(file);
+	}
+}
+EXPORT_SYMBOL(put_pmem_file);
+
+void put_pmem_fd(int fd)
+{
+	int put_needed;
+	struct file *file = fget_light(fd, &put_needed);
+
+	if (file) {
+		put_pmem_file(file);
+		fput_light(file, put_needed);
+	}
+}
+
+void flush_pmem_fd(int fd, unsigned long offset, unsigned long len)
+{
+	int fput_needed;
+	struct file *file = fget_light(fd, &fput_needed);
+
+	if (file) {
+		flush_pmem_file(file, offset, len);
+		fput_light(file, fput_needed);
+	}
 }
 
 void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
@@ -788,21 +1791,50 @@
 	struct pmem_region_node *region_node;
 	struct list_head *elt;
 	void *flush_start, *flush_end;
-
-	if (!is_pmem_file(file) || !has_allocation(file)) {
+#ifdef CONFIG_OUTER_CACHE
+	unsigned long phy_start, phy_end;
+#endif
+	if (!is_pmem_file(file))
 		return;
-	}
 
 	id = get_id(file);
-	data = (struct pmem_data *)file->private_data;
-	if (!pmem[id].cached || file->f_flags & O_SYNC)
+	if (!pmem[id].cached)
 		return;
 
+	/* is_pmem_file fails if !file */
+	data = file->private_data;
+
 	down_read(&data->sem);
+	if (!has_allocation(file))
+		goto end;
+
 	vaddr = pmem_start_vaddr(id, data);
+
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) {
+		dmac_flush_range(vaddr,
+			(void *)((unsigned long)vaddr +
+				 ((struct alloc_list *)(data->index))->size));
+#ifdef CONFIG_OUTER_CACHE
+		phy_start = pmem_start_addr_system(id, data);
+
+		phy_end = phy_start +
+			((struct alloc_list *)(data->index))->size;
+
+		outer_flush_range(phy_start, phy_end);
+#endif
+		goto end;
+	}
 	/* if this isn't a submmapped file, flush the whole thing */
 	if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
-		dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+		dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data));
+#ifdef CONFIG_OUTER_CACHE
+		phy_start = (unsigned long)vaddr -
+				(unsigned long)pmem[id].vbase + pmem[id].base;
+
+		phy_end  =  phy_start + pmem[id].len(id, data);
+
+		outer_flush_range(phy_start, phy_end);
+#endif
 		goto end;
 	}
 	/* otherwise, flush the region of the file we are drawing */
@@ -814,6 +1846,15 @@
 			flush_start = vaddr + region_node->region.offset;
 			flush_end = flush_start + region_node->region.len;
 			dmac_flush_range(flush_start, flush_end);
+#ifdef CONFIG_OUTER_CACHE
+
+			phy_start = (unsigned long)flush_start -
+				(unsigned long)pmem[id].vbase + pmem[id].base;
+
+			phy_end  =  phy_start + region_node->region.len;
+
+			outer_flush_range(phy_start, phy_end);
+#endif
 			break;
 		}
 	}
@@ -821,45 +1862,145 @@
 	up_read(&data->sem);
 }
 
+int pmem_cache_maint(struct file *file, unsigned int cmd,
+		struct pmem_addr *pmem_addr)
+{
+	struct pmem_data *data;
+	int id;
+	unsigned long vaddr, paddr, length, offset,
+		      pmem_len, pmem_start_addr;
+
+	/* Called from kernel-space so file may be NULL */
+	if (!file)
+		return -EBADF;
+
+	data = file->private_data;
+	id = get_id(file);
+
+	if (!pmem[id].cached)
+		return 0;
+
+	offset = pmem_addr->offset;
+	length = pmem_addr->length;
+
+	down_read(&data->sem);
+	if (!has_allocation(file)) {
+		up_read(&data->sem);
+		return -EINVAL;
+	}
+	pmem_len = pmem[id].len(id, data);
+	pmem_start_addr = pmem[id].start_addr(id, data);
+	up_read(&data->sem);
+
+	if (offset + length > pmem_len)
+		return -EINVAL;
+
+	vaddr = pmem_addr->vaddr;
+	paddr = pmem_start_addr + offset;
+
+	DLOG("pmem cache maint on dev %s(id: %d)"
+		"(vaddr %lx paddr %lx len %lu bytes)\n",
+		get_name(file), id, vaddr, paddr, length);
+	if (cmd == PMEM_CLEAN_INV_CACHES)
+		clean_and_invalidate_caches(vaddr,
+				length, paddr);
+	else if (cmd == PMEM_CLEAN_CACHES)
+		clean_caches(vaddr, length, paddr);
+	else if (cmd == PMEM_INV_CACHES)
+		invalidate_caches(vaddr, length, paddr);
+
+	return 0;
+}
+EXPORT_SYMBOL(pmem_cache_maint);
+
 static int pmem_connect(unsigned long connect, struct file *file)
 {
-	struct pmem_data *data = (struct pmem_data *)file->private_data;
-	struct pmem_data *src_data;
-	struct file *src_file;
 	int ret = 0, put_needed;
+	struct file *src_file;
 
-	down_write(&data->sem);
-	/* retrieve the src file and check it is a pmem file with an alloc */
+	if (!file) {
+		pr_err("pmem: %s: NULL file pointer passed in, "
+			"bailing out!\n", __func__);
+		ret = -EINVAL;
+		goto leave;
+	}
+
 	src_file = fget_light(connect, &put_needed);
-	DLOG("connect %p to %p\n", file, src_file);
+
 	if (!src_file) {
-		printk("pmem: src file not found!\n");
-		ret = -EINVAL;
-		goto err_no_file;
+		pr_err("pmem: %s: src file not found!\n", __func__);
+		ret = -EBADF;
+		goto leave;
 	}
-	if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
-		printk(KERN_INFO "pmem: src file is not a pmem file or has no "
-		       "alloc!\n");
-		ret = -EINVAL;
-		goto err_bad_file;
-	}
-	src_data = (struct pmem_data *)src_file->private_data;
 
-	if (has_allocation(file) && (data->index != src_data->index)) {
-		printk("pmem: file is already mapped but doesn't match this"
-		       " src_file!\n");
+	if (src_file == file) { /* degenerative case, operator error */
+		pr_err("pmem: %s: src_file and passed in file are "
+			"the same; refusing to connect to self!\n", __func__);
 		ret = -EINVAL;
-		goto err_bad_file;
+		goto put_src_file;
 	}
-	data->index = src_data->index;
-	data->flags |= PMEM_FLAGS_CONNECTED;
-	data->master_fd = connect;
-	data->master_file = src_file;
 
-err_bad_file:
+	if (unlikely(!is_pmem_file(src_file))) {
+		pr_err("pmem: %s: src file is not a pmem file!\n",
+			__func__);
+		ret = -EINVAL;
+		goto put_src_file;
+	} else {
+		struct pmem_data *src_data = src_file->private_data;
+
+		if (!src_data) {
+			pr_err("pmem: %s: src file pointer has no"
+				"private data, bailing out!\n", __func__);
+			ret = -EINVAL;
+			goto put_src_file;
+		}
+
+		down_read(&src_data->sem);
+
+		if (unlikely(!has_allocation(src_file))) {
+			up_read(&src_data->sem);
+			pr_err("pmem: %s: src file has no allocation!\n",
+				__func__);
+			ret = -EINVAL;
+		} else {
+			struct pmem_data *data;
+			int src_index = src_data->index;
+
+			up_read(&src_data->sem);
+
+			data = file->private_data;
+			if (!data) {
+				pr_err("pmem: %s: passed in file "
+					"pointer has no private data, bailing"
+					" out!\n", __func__);
+				ret = -EINVAL;
+				goto put_src_file;
+			}
+
+			down_write(&data->sem);
+			if (has_allocation(file) &&
+					(data->index != src_index)) {
+				up_write(&data->sem);
+
+				pr_err("pmem: %s: file is already "
+					"mapped but doesn't match this "
+					"src_file!\n", __func__);
+				ret = -EINVAL;
+			} else {
+				data->index = src_index;
+				data->flags |= PMEM_FLAGS_CONNECTED;
+				data->master_fd = connect;
+				data->master_file = src_file;
+
+				up_write(&data->sem);
+
+				DLOG("connect %p to %p\n", file, src_file);
+			}
+		}
+	}
+put_src_file:
 	fput_light(src_file, put_needed);
-err_no_file:
-	up_write(&data->sem);
+leave:
 	return ret;
 }
 
@@ -878,16 +2019,23 @@
 {
 	int ret = 0;
 	struct mm_struct *mm = NULL;
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("pid %u(%s) file %p(%ld)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file));
+
 	*locked_mm = NULL;
 lock_mm:
 	down_read(&data->sem);
 	if (PMEM_IS_SUBMAP(data)) {
 		mm = get_task_mm(data->task);
 		if (!mm) {
-#if PMEM_DEBUG
-			printk("pmem: can't remap task is gone!\n");
-#endif
 			up_read(&data->sem);
+#if PMEM_DEBUG
+			pr_alert("pmem: can't remap - task is gone!\n");
+#endif
 			return -1;
 		}
 	}
@@ -902,7 +2050,7 @@
 	 * once */
 	if (PMEM_IS_SUBMAP(data) && !mm) {
 		pmem_unlock_data_and_mm(data, mm);
-		up_write(&data->sem);
+		DLOG("mapping contention, repeating mmap op\n");
 		goto lock_mm;
 	}
 	/* now check that vma.mm is still there, it could have been
@@ -916,6 +2064,9 @@
 			data->flags &= ~(PMEM_FLAGS_SUBMAP);
 		}
 		pmem_unlock_data_and_mm(data, mm);
+#if PMEM_DEBUG
+		pr_alert("pmem: vma.mm went away!\n");
+#endif
 		return -1;
 	}
 	*locked_mm = mm;
@@ -930,14 +2081,28 @@
 	struct mm_struct *mm = NULL;
 	struct list_head *elt, *elt2;
 	int id = get_id(file);
-	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	struct pmem_data *data;
+
+	DLOG("operation %#x, region offset %ld, region len %ld\n",
+		operation, region->offset, region->len);
+
+	if (!is_pmem_file(file)) {
+#if PMEM_DEBUG
+		pr_err("pmem: remap request for non-pmem file descriptor\n");
+#endif
+		return -EINVAL;
+	}
+
+	/* is_pmem_file fails if !file */
+	data = file->private_data;
 
 	/* pmem region must be aligned on a page boundry */
 	if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
 		 !PMEM_IS_PAGE_ALIGNED(region->len))) {
 #if PMEM_DEBUG
-		printk("pmem: request for unaligned pmem suballocation "
-		       "%lx %lx\n", region->offset, region->len);
+		pr_err("pmem: request for unaligned pmem"
+			"suballocation %lx %lx\n",
+			region->offset, region->len);
 #endif
 		return -EINVAL;
 	}
@@ -955,18 +2120,18 @@
 	 * that back in it */
 	if (!is_master_owner(file)) {
 #if PMEM_DEBUG
-		printk("pmem: remap requested from non-master process\n");
+		pr_err("pmem: remap requested from non-master process\n");
 #endif
 		ret = -EINVAL;
 		goto err;
 	}
 
 	/* check that the requested range is within the src allocation */
-	if (unlikely((region->offset > pmem_len(id, data)) ||
-		     (region->len > pmem_len(id, data)) ||
-		     (region->offset + region->len > pmem_len(id, data)))) {
+	if (unlikely((region->offset > pmem[id].len(id, data)) ||
+		     (region->len > pmem[id].len(id, data)) ||
+		     (region->offset + region->len > pmem[id].len(id, data)))) {
 #if PMEM_DEBUG
-		printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+		pr_err("pmem: suballoc doesn't fit in src_file!\n");
 #endif
 		ret = -EINVAL;
 		goto err;
@@ -978,7 +2143,7 @@
 		if (!region_node) {
 			ret = -ENOMEM;
 #if PMEM_DEBUG
-			printk(KERN_INFO "No space to allocate metadata!");
+			pr_alert("pmem: No space to allocate remap metadata!");
 #endif
 			goto err;
 		}
@@ -999,8 +2164,8 @@
 		}
 		if (!found) {
 #if PMEM_DEBUG
-			printk("pmem: Unmap region does not map any mapped "
-				"region!");
+			pr_err("pmem: Unmap region does not map any"
+				" mapped region!");
 #endif
 			ret = -EINVAL;
 			goto err;
@@ -1010,10 +2175,10 @@
 	if (data->vma && PMEM_IS_SUBMAP(data)) {
 		if (operation == PMEM_MAP)
 			ret = pmem_remap_pfn_range(id, data->vma, data,
-						   region->offset, region->len);
+				   region->offset, region->len);
 		else if (operation == PMEM_UNMAP)
 			ret = pmem_unmap_pfn_range(id, data->vma, data,
-						   region->offset, region->len);
+				   region->offset, region->len);
 	}
 
 err:
@@ -1054,63 +2219,83 @@
 
 static void pmem_get_size(struct pmem_region *region, struct file *file)
 {
-	struct pmem_data *data = (struct pmem_data *)file->private_data;
+	/* called via ioctl file op, so file guaranteed to be not NULL */
+	struct pmem_data *data = file->private_data;
 	int id = get_id(file);
 
+	down_read(&data->sem);
 	if (!has_allocation(file)) {
 		region->offset = 0;
 		region->len = 0;
-		return;
 	} else {
-		region->offset = pmem_start_addr(id, data);
-		region->len = pmem_len(id, data);
+		region->offset = pmem[id].start_addr(id, data);
+		region->len = pmem[id].len(id, data);
 	}
-	DLOG("offset %lx len %lx\n", region->offset, region->len);
+	up_read(&data->sem);
+	DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len);
 }
 
 
 static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-	struct pmem_data *data;
+	/* called from user space as file op, so file guaranteed to be not
+	 * NULL
+	 */
+	struct pmem_data *data = file->private_data;
 	int id = get_id(file);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[
+		FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+
+	DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), cmd, get_name(file), id);
 
 	switch (cmd) {
 	case PMEM_GET_PHYS:
 		{
 			struct pmem_region region;
+
 			DLOG("get_phys\n");
+			down_read(&data->sem);
 			if (!has_allocation(file)) {
 				region.offset = 0;
 				region.len = 0;
 			} else {
-				data = (struct pmem_data *)file->private_data;
-				region.offset = pmem_start_addr(id, data);
-				region.len = pmem_len(id, data);
+				region.offset = pmem[id].start_addr(id, data);
+				region.len = pmem[id].len(id, data);
 			}
-			printk(KERN_INFO "pmem: request for physical address of pmem region "
-					"from process %d.\n", current->pid);
+			up_read(&data->sem);
+
 			if (copy_to_user((void __user *)arg, &region,
 						sizeof(struct pmem_region)))
 				return -EFAULT;
+
+			DLOG("pmem: successful request for "
+				"physical address of pmem region id %d, "
+				"offset 0x%lx, len 0x%lx\n",
+				id, region.offset, region.len);
+
 			break;
 		}
 	case PMEM_MAP:
 		{
 			struct pmem_region region;
+			DLOG("map\n");
 			if (copy_from_user(&region, (void __user *)arg,
 						sizeof(struct pmem_region)))
 				return -EFAULT;
-			data = (struct pmem_data *)file->private_data;
 			return pmem_remap(&region, file, PMEM_MAP);
 		}
 		break;
 	case PMEM_UNMAP:
 		{
 			struct pmem_region region;
+			DLOG("unmap\n");
 			if (copy_from_user(&region, (void __user *)arg,
 						sizeof(struct pmem_region)))
 				return -EFAULT;
-			data = (struct pmem_data *)file->private_data;
 			return pmem_remap(&region, file, PMEM_UNMAP);
 			break;
 		}
@@ -1136,169 +2321,369 @@
 				return -EFAULT;
 			break;
 		}
+	case PMEM_GET_FREE_SPACE:
+		{
+			struct pmem_freespace fs;
+			DLOG("get freespace on %s(id: %d)\n",
+				get_name(file), id);
+
+			mutex_lock(&pmem[id].arena_mutex);
+			pmem[id].free_space(id, &fs);
+			mutex_unlock(&pmem[id].arena_mutex);
+
+			DLOG("%s(id: %d) total free %lu, largest %lu\n",
+				get_name(file), id, fs.total, fs.largest);
+
+			if (copy_to_user((void __user *)arg, &fs,
+				sizeof(struct pmem_freespace)))
+				return -EFAULT;
+			break;
+	}
+
 	case PMEM_ALLOCATE:
 		{
-			if (has_allocation(file))
+			int ret = 0;
+			DLOG("allocate, id %d\n", id);
+			down_write(&data->sem);
+			if (has_allocation(file)) {
+				pr_err("pmem: Existing allocation found on "
+					"this file descrpitor\n");
+				up_write(&data->sem);
 				return -EINVAL;
-			data = (struct pmem_data *)file->private_data;
-			data->index = pmem_allocate(id, arg);
-			break;
+			}
+
+			mutex_lock(&pmem[id].arena_mutex);
+			data->index = pmem[id].allocate(id,
+					arg,
+					SZ_4K);
+			mutex_unlock(&pmem[id].arena_mutex);
+			ret = data->index == -1 ? -ENOMEM :
+				data->index;
+			up_write(&data->sem);
+			return ret;
+		}
+	case PMEM_ALLOCATE_ALIGNED:
+		{
+			struct pmem_allocation alloc;
+			int ret = 0;
+
+			if (copy_from_user(&alloc, (void __user *)arg,
+						sizeof(struct pmem_allocation)))
+				return -EFAULT;
+			DLOG("allocate id align %d %u\n", id, alloc.align);
+			down_write(&data->sem);
+			if (has_allocation(file)) {
+				pr_err("pmem: Existing allocation found on "
+					"this file descrpitor\n");
+				up_write(&data->sem);
+				return -EINVAL;
+			}
+
+			if (alloc.align & (alloc.align - 1)) {
+				pr_err("pmem: Alignment is not a power of 2\n");
+				return -EINVAL;
+			}
+
+			if (alloc.align != SZ_4K &&
+					(pmem[id].allocator_type !=
+						PMEM_ALLOCATORTYPE_BITMAP)) {
+				pr_err("pmem: Non 4k alignment requires bitmap"
+					" allocator on %s\n", pmem[id].name);
+				return -EINVAL;
+			}
+
+			if (alloc.align > SZ_1M ||
+				alloc.align < SZ_4K) {
+				pr_err("pmem: Invalid Alignment (%u) "
+					"specified\n", alloc.align);
+				return -EINVAL;
+			}
+
+			mutex_lock(&pmem[id].arena_mutex);
+			data->index = pmem[id].allocate(id,
+					alloc.size,
+					alloc.align);
+			mutex_unlock(&pmem[id].arena_mutex);
+			ret = data->index == -1 ? -ENOMEM :
+				data->index;
+			up_write(&data->sem);
+			return ret;
 		}
 	case PMEM_CONNECT:
 		DLOG("connect\n");
 		return pmem_connect(arg, file);
-		break;
-	case PMEM_CACHE_FLUSH:
+	case PMEM_CLEAN_INV_CACHES:
+	case PMEM_CLEAN_CACHES:
+	case PMEM_INV_CACHES:
 		{
-			struct pmem_region region;
-			DLOG("flush\n");
-			if (copy_from_user(&region, (void __user *)arg,
-					   sizeof(struct pmem_region)))
+			struct pmem_addr pmem_addr;
+
+			if (copy_from_user(&pmem_addr, (void __user *)arg,
+						sizeof(struct pmem_addr)))
 				return -EFAULT;
-			flush_pmem_file(file, region.offset, region.len);
-			break;
+
+			return pmem_cache_maint(file, cmd, &pmem_addr);
 		}
 	default:
 		if (pmem[id].ioctl)
 			return pmem[id].ioctl(file, cmd, arg);
+
+		DLOG("ioctl invalid (%#x)\n", cmd);
 		return -EINVAL;
 	}
 	return 0;
 }
 
-#if PMEM_DEBUG
-static ssize_t debug_open(struct inode *inode, struct file *file)
+static void ioremap_pmem(int id)
 {
-	file->private_data = inode->i_private;
-	return 0;
-}
-
-static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
-			  loff_t *ppos)
-{
-	struct list_head *elt, *elt2;
-	struct pmem_data *data;
-	struct pmem_region_node *region_node;
-	int id = (int)file->private_data;
-	const int debug_bufmax = 4096;
-	static char buffer[4096];
-	int n = 0;
-
-	DLOG("debug open\n");
-	n = scnprintf(buffer, debug_bufmax,
-		      "pid #: mapped regions (offset, len) (offset,len)...\n");
-
-	mutex_lock(&pmem[id].data_list_lock);
-	list_for_each(elt, &pmem[id].data_list) {
-		data = list_entry(elt, struct pmem_data, list);
-		down_read(&data->sem);
-		n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
-				data->pid);
-		list_for_each(elt2, &data->region_list) {
-			region_node = list_entry(elt2, struct pmem_region_node,
-				      list);
-			n += scnprintf(buffer + n, debug_bufmax - n,
-					"(%lx,%lx) ",
-					region_node->region.offset,
-					region_node->region.len);
-		}
-		n += scnprintf(buffer + n, debug_bufmax - n, "\n");
-		up_read(&data->sem);
-	}
-	mutex_unlock(&pmem[id].data_list_lock);
-
-	n++;
-	buffer[n] = 0;
-	return simple_read_from_buffer(buf, count, ppos, buffer, n);
-}
-
-static struct file_operations debug_fops = {
-	.read = debug_read,
-	.open = debug_open,
-};
+	if (pmem[id].cached)
+		pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
+#ifdef ioremap_ext_buffered
+	else if (pmem[id].buffered)
+		pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+					pmem[id].size);
 #endif
-
-#if 0
-static struct miscdevice pmem_dev = {
-	.name = "pmem",
-	.fops = &pmem_fops,
-};
-#endif
+	else
+		pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+}
 
 int pmem_setup(struct android_pmem_platform_data *pdata,
 	       long (*ioctl)(struct file *, unsigned int, unsigned long),
 	       int (*release)(struct inode *, struct file *))
 {
-	int err = 0;
-	int i, index = 0;
-	int id = id_count;
-	id_count++;
+	int i, index = 0, id;
 
-	pmem[id].no_allocator = pdata->no_allocator;
+	if (id_count >= PMEM_MAX_DEVICES) {
+		pr_alert("pmem: %s: unable to register driver(%s) - no more "
+			"devices available!\n", __func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	if (!pdata->size) {
+		pr_alert("pmem: %s: unable to register pmem driver(%s) - zero "
+			"size passed in!\n", __func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	id = id_count++;
+
+	pmem[id].id = id;
+
+	if (pmem[id].allocate) {
+		pr_alert("pmem: %s: unable to register pmem driver - "
+			"duplicate registration of %s!\n",
+			__func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	pmem[id].allocator_type = pdata->allocator_type;
+
+	/* 'quantum' is a "hidden" variable that defaults to 0 in the board
+	 * files */
+	pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC;
+	if (pmem[id].quantum < PMEM_MIN_ALLOC ||
+		!is_power_of_2(pmem[id].quantum)) {
+		pr_alert("pmem: %s: unable to register pmem driver %s - "
+			"invalid quantum value (%#x)!\n",
+			__func__, pdata->name, pmem[id].quantum);
+		goto err_reset_pmem_info;
+	}
+
+	if (pdata->size % pmem[id].quantum) {
+		/* bad alignment for size! */
+		pr_alert("pmem: %s: Unable to register driver %s - "
+			"memory region size (%#lx) is not a multiple of "
+			"quantum size(%#x)!\n", __func__, pdata->name,
+			pdata->size, pmem[id].quantum);
+		goto err_reset_pmem_info;
+	}
+
 	pmem[id].cached = pdata->cached;
 	pmem[id].buffered = pdata->buffered;
-	pmem[id].base = pdata->start;
 	pmem[id].size = pdata->size;
+	pmem[id].memory_type = pdata->memory_type;
+	strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE);
+
+	pmem[id].num_entries = pmem[id].size / pmem[id].quantum;
+
+	memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj));
+	pmem[id].kobj.kset = pmem_kset;
+
+	switch (pmem[id].allocator_type) {
+	case PMEM_ALLOCATORTYPE_ALLORNOTHING:
+		pmem[id].allocate = pmem_allocator_all_or_nothing;
+		pmem[id].free = pmem_free_all_or_nothing;
+		pmem[id].free_space = pmem_free_space_all_or_nothing;
+		pmem[id].len = pmem_len_all_or_nothing;
+		pmem[id].start_addr = pmem_start_addr_all_or_nothing;
+		pmem[id].num_entries = 1;
+		pmem[id].quantum = pmem[id].size;
+		pmem[id].allocator.all_or_nothing.allocated = 0;
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_allornothing_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		break;
+
+	case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+		pmem[id].allocator.buddy_bestfit.buddy_bitmap = kmalloc(
+			pmem[id].num_entries * sizeof(struct pmem_bits),
+			GFP_KERNEL);
+		if (!pmem[id].allocator.buddy_bestfit.buddy_bitmap)
+			goto err_reset_pmem_info;
+
+		memset(pmem[id].allocator.buddy_bestfit.buddy_bitmap, 0,
+			sizeof(struct pmem_bits) * pmem[id].num_entries);
+
+		for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--)
+			if ((pmem[id].num_entries) &  1<<i) {
+				PMEM_BUDDY_ORDER(id, index) = i;
+				index = PMEM_BUDDY_NEXT_INDEX(id, index);
+			}
+		pmem[id].allocate = pmem_allocator_buddy_bestfit;
+		pmem[id].free = pmem_free_buddy_bestfit;
+		pmem[id].free_space = pmem_free_space_buddy_bestfit;
+		pmem[id].len = pmem_len_buddy_bestfit;
+		pmem[id].start_addr = pmem_start_addr_buddy_bestfit;
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_buddy_bestfit_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		break;
+
+	case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */
+		pmem[id].allocator.bitmap.bitm_alloc = kmalloc(
+			PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS *
+				sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+			GFP_KERNEL);
+		if (!pmem[id].allocator.bitmap.bitm_alloc) {
+			pr_alert("pmem: %s: Unable to register pmem "
+					"driver %s - can't allocate "
+					"bitm_alloc!\n",
+					__func__, pdata->name);
+			goto err_reset_pmem_info;
+		}
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_bitmap_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) {
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+		}
+
+		pmem[id].allocator.bitmap.bitmap_allocs =
+			PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS;
+
+		pmem[id].allocator.bitmap.bitmap =
+			kcalloc((pmem[id].num_entries + 31) / 32,
+				sizeof(unsigned int), GFP_KERNEL);
+		if (!pmem[id].allocator.bitmap.bitmap) {
+			pr_alert("pmem: %s: Unable to register pmem "
+				"driver - can't allocate bitmap!\n",
+				__func__);
+			goto err_cant_register_device;
+		}
+		pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries;
+
+		pmem[id].allocate = pmem_allocator_bitmap;
+		pmem[id].free = pmem_free_bitmap;
+		pmem[id].free_space = pmem_free_space_bitmap;
+		pmem[id].len = pmem_len_bitmap;
+		pmem[id].start_addr = pmem_start_addr_bitmap;
+
+		DLOG("bitmap allocator id %d (%s), num_entries %u, raw size "
+			"%lu, quanta size %u\n",
+			id, pdata->name, pmem[id].allocator.bitmap.bitmap_free,
+			pmem[id].size, pmem[id].quantum);
+		break;
+
+	case PMEM_ALLOCATORTYPE_SYSTEM:
+
+		INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist);
+
+		pmem[id].allocator.system_mem.used = 0;
+		pmem[id].vbase = NULL;
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_system_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		pmem[id].allocate = pmem_allocator_system;
+		pmem[id].free = pmem_free_system;
+		pmem[id].free_space = pmem_free_space_system;
+		pmem[id].len = pmem_len_system;
+		pmem[id].start_addr = pmem_start_addr_system;
+		pmem[id].num_entries = 0;
+		pmem[id].quantum = PAGE_SIZE;
+
+		DLOG("system allocator id %d (%s), raw size %lu\n",
+			id, pdata->name, pmem[id].size);
+		break;
+
+	default:
+		pr_alert("Invalid allocator type (%d) for pmem driver\n",
+			pdata->allocator_type);
+		goto err_reset_pmem_info;
+	}
+
 	pmem[id].ioctl = ioctl;
 	pmem[id].release = release;
-	init_rwsem(&pmem[id].bitmap_sem);
-	mutex_init(&pmem[id].data_list_lock);
+	mutex_init(&pmem[id].arena_mutex);
+	mutex_init(&pmem[id].data_list_mutex);
 	INIT_LIST_HEAD(&pmem[id].data_list);
+
 	pmem[id].dev.name = pdata->name;
 	pmem[id].dev.minor = id;
 	pmem[id].dev.fops = &pmem_fops;
-	printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+	pr_info("pmem: Initializing %s (user-space) as %s\n",
+		pdata->name, pdata->cached ? "cached" : "non-cached");
 
-	err = misc_register(&pmem[id].dev);
-	if (err) {
-		printk(KERN_ALERT "Unable to register pmem driver!\n");
+	if (misc_register(&pmem[id].dev)) {
+		pr_alert("Unable to register pmem driver!\n");
 		goto err_cant_register_device;
 	}
-	pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
 
-	pmem[id].bitmap = kmalloc(pmem[id].num_entries *
-				  sizeof(struct pmem_bits), GFP_KERNEL);
-	if (!pmem[id].bitmap)
-		goto err_no_mem_for_metadata;
+	pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
+		pmem[id].memory_type, PAGE_SIZE);
 
-	memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
-					  pmem[id].num_entries);
-
-	for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
-		if ((pmem[id].num_entries) &  1<<i) {
-			PMEM_ORDER(id, index) = i;
-			index = PMEM_NEXT_INDEX(id, index);
+	if (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM) {
+		ioremap_pmem(id);
+		if (pmem[id].vbase == 0) {
+			pr_err("pmem: ioremap failed for device %s\n",
+				pmem[id].name);
+			goto error_cant_remap;
 		}
 	}
 
-	if (pmem[id].cached)
-		pmem[id].vbase = ioremap_cached(pmem[id].base,
-						pmem[id].size);
-#ifdef ioremap_ext_buffered
-	else if (pmem[id].buffered)
-		pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
-						      pmem[id].size);
-#endif
-	else
-		pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
-
-	if (pmem[id].vbase == 0)
-		goto error_cant_remap;
+	pr_info("allocating %lu bytes at %p (%lx physical) for %s\n",
+		pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name);
 
 	pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
-	if (pmem[id].no_allocator)
-		pmem[id].allocated = 0;
 
-#if PMEM_DEBUG
-	debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
-			    &debug_fops);
-#endif
 	return 0;
+
 error_cant_remap:
-	kfree(pmem[id].bitmap);
-err_no_mem_for_metadata:
 	misc_deregister(&pmem[id].dev);
 err_cant_register_device:
+out_put_kobj:
+	kobject_put(&pmem[id].kobj);
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
+		kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
+	else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
+		kfree(pmem[id].allocator.bitmap.bitmap);
+		kfree(pmem[id].allocator.bitmap.bitm_alloc);
+	}
+err_reset_pmem_info:
+	pmem[id].allocate = 0;
+	pmem[id].dev.minor = -1;
+err_no_mem:
 	return -1;
 }
 
@@ -1307,31 +2692,62 @@
 	struct android_pmem_platform_data *pdata;
 
 	if (!pdev || !pdev->dev.platform_data) {
-		printk(KERN_ALERT "Unable to probe pmem!\n");
+		pr_alert("Unable to probe pmem!\n");
 		return -1;
 	}
 	pdata = pdev->dev.platform_data;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
 	return pmem_setup(pdata, NULL, NULL);
 }
 
-
 static int pmem_remove(struct platform_device *pdev)
 {
 	int id = pdev->id;
 	__free_page(pfn_to_page(pmem[id].garbage_pfn));
+	pm_runtime_disable(&pdev->dev);
 	misc_deregister(&pmem[id].dev);
 	return 0;
 }
 
+static int pmem_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int pmem_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops pmem_dev_pm_ops = {
+	.runtime_suspend = pmem_runtime_suspend,
+	.runtime_resume = pmem_runtime_resume,
+};
+
 static struct platform_driver pmem_driver = {
 	.probe = pmem_probe,
 	.remove = pmem_remove,
-	.driver = { .name = "android_pmem" }
+	.driver = { .name = "android_pmem",
+		    .pm = &pmem_dev_pm_ops,
+  }
 };
 
 
 static int __init pmem_init(void)
 {
+	/* create /sys/kernel/<PMEM_SYSFS_DIR_NAME> directory */
+	pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME,
+		NULL, kernel_kobj);
+	if (!pmem_kset) {
+		pr_err("pmem(%s):kset_create_and_add fail\n", __func__);
+		return -ENOMEM;
+	}
+
 	return platform_driver_register(&pmem_driver);
 }
 
diff --git a/drivers/misc/pmic8058-batt-alarm.c b/drivers/misc/pmic8058-batt-alarm.c
new file mode 100644
index 0000000..bff0720
--- /dev/null
+++ b/drivers/misc/pmic8058-batt-alarm.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC 8058 Battery Alarm Device driver
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pmic8058-batt-alarm.h>
+#include <linux/mfd/pmic8058.h>
+
+/* PMIC 8058 Battery Alarm SSBI registers */
+#define	REG_THRESHOLD			0x023
+#define	REG_CTRL1			0x024
+#define	REG_CTRL2			0x0AA
+#define	REG_PWM_CTRL			0x0A3
+
+/* Available voltage threshold values */
+#define THRESHOLD_MIN_MV		2500
+#define THRESHOLD_MAX_MV		5675
+#define THRESHOLD_STEP_MV		25
+
+/* Register bit definitions */
+
+/* Threshold register */
+#define THRESHOLD_UPPER_MASK		0xF0
+#define THRESHOLD_LOWER_MASK		0x0F
+#define THRESHOLD_UPPER_SHIFT		4
+#define THRESHOLD_LOWER_SHIFT		0
+
+/* CTRL 1 register */
+#define CTRL1_BATT_ALARM_EN_MASK	0x80
+#define CTRL1_HOLD_TIME_MASK		0x70
+#define CTRL1_STATUS_UPPER_MASK		0x02
+#define CTRL1_STATUS_LOWER_MASK		0x01
+#define CTRL1_HOLD_TIME_SHIFT		4
+#define CTRL1_HOLD_TIME_MIN		0
+#define CTRL1_HOLD_TIME_MAX		7
+
+/* CTRL 2 register */
+#define CTRL2_COMP_UPPER_DISABLE_MASK	0x80
+#define CTRL2_COMP_LOWER_DISABLE_MASK	0x40
+#define CTRL2_FINE_STEP_UPPER_MASK	0x30
+#define CTRL2_RANGE_EXT_UPPER_MASK	0x08
+#define CTRL2_FINE_STEP_LOWER_MASK	0x06
+#define CTRL2_RANGE_EXT_LOWER_MASK	0x01
+#define CTRL2_FINE_STEP_UPPER_SHIFT	4
+#define CTRL2_FINE_STEP_LOWER_SHIFT	1
+
+/* PWM control register */
+#define PWM_CTRL_ALARM_EN_MASK		0xC0
+#define PWM_CTRL_ALARM_EN_NEVER		0x00
+#define PWM_CTRL_ALARM_EN_TCXO		0x40
+#define PWM_CTRL_ALARM_EN_PWM		0x80
+#define PWM_CTRL_ALARM_EN_ALWAYS	0xC0
+#define PWM_CTRL_PRE_MASK		0x38
+#define PWM_CTRL_DIV_MASK		0x07
+#define PWM_CTRL_PRE_SHIFT		3
+#define PWM_CTRL_DIV_SHIFT		0
+#define PWM_CTRL_PRE_MIN		0
+#define PWM_CTRL_PRE_MAX		7
+#define PWM_CTRL_DIV_MIN		1
+#define PWM_CTRL_DIV_MAX		7
+
+/* PWM control input range */
+#define PWM_CTRL_PRE_INPUT_MIN		2
+#define PWM_CTRL_PRE_INPUT_MAX		9
+#define PWM_CTRL_DIV_INPUT_MIN		2
+#define PWM_CTRL_DIV_INPUT_MAX		8
+
+/* Available voltage threshold values */
+#define THRESHOLD_BASIC_MIN_MV		2800
+#define THRESHOLD_EXT_MIN_MV		4400
+
+/*
+ * Default values used during initialization:
+ * Slowest PWM rate to ensure minimal status jittering when crossing thresholds.
+ * Largest hold time also helps reduce status value jittering.  Comparators
+ * are disabled by default and must be turned on by calling
+ * pm8058_batt_alarm_state_set.
+ */
+#define DEFAULT_THRESHOLD_LOWER		3200
+#define DEFAULT_THRESHOLD_UPPER		4300
+#define DEFAULT_HOLD_TIME		PM8058_BATT_ALARM_HOLD_TIME_16_MS
+#define DEFAULT_USE_PWM			1
+#define DEFAULT_PWM_SCALER		9
+#define DEFAULT_PWM_DIVIDER		8
+#define DEFAULT_LOWER_ENABLE		0
+#define DEFAULT_UPPER_ENABLE		0
+
+struct pm8058_batt_alarm_device {
+	struct srcu_notifier_head		irq_notifier_list;
+	struct pm8058_chip			*pm_chip;
+	struct mutex				batt_mutex;
+	unsigned int				irq;
+	int					notifier_count;
+	u8					reg_threshold;
+	u8					reg_ctrl1;
+	u8					reg_ctrl2;
+	u8					reg_pwm_ctrl;
+};
+static struct pm8058_batt_alarm_device *the_battalarm;
+
+static int pm8058_reg_write(struct pm8058_chip *chip, u16 addr, u8 val, u8 mask,
+			    u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save)
+		rc = pm8058_write(chip, addr, &reg, 1);
+	if (rc)
+		pr_err("pm8058_write failed; addr=%03X, rc=%d\n", addr, rc);
+	else
+		*reg_save = reg;
+	return rc;
+}
+
+/**
+ * pm8058_batt_alarm_state_set - enable or disable the threshold comparators
+ * @enable_lower_comparator: 1 = enable comparator, 0 = disable comparator
+ * @enable_upper_comparator: 1 = enable comparator, 0 = disable comparator
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_state_set(int enable_lower_comparator,
+				int enable_upper_comparator)
+{
+	struct pm8058_batt_alarm_device *battdev = the_battalarm;
+	int rc;
+	u8 reg_ctrl1 = 0, reg_ctrl2 = 0;
+
+	if (!battdev) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (!enable_lower_comparator)
+		reg_ctrl2 |= CTRL2_COMP_LOWER_DISABLE_MASK;
+	if (!enable_upper_comparator)
+		reg_ctrl2 |= CTRL2_COMP_UPPER_DISABLE_MASK;
+
+	if (enable_lower_comparator || enable_upper_comparator)
+		reg_ctrl1 = CTRL1_BATT_ALARM_EN_MASK;
+
+	mutex_lock(&battdev->batt_mutex);
+	rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL1, reg_ctrl1,
+				CTRL1_BATT_ALARM_EN_MASK, &battdev->reg_ctrl1);
+	if (rc)
+		goto bail;
+
+	rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL2, reg_ctrl2,
+		CTRL2_COMP_LOWER_DISABLE_MASK | CTRL2_COMP_UPPER_DISABLE_MASK,
+		&battdev->reg_ctrl2);
+
+bail:
+	mutex_unlock(&battdev->batt_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_state_set);
+
+/**
+ * pm8058_batt_alarm_threshold_set - set the lower and upper alarm thresholds
+ * @lower_threshold_mV: battery undervoltage threshold in millivolts
+ * @upper_threshold_mV: battery overvoltage threshold in millivolts
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_threshold_set(int lower_threshold_mV,
+				    int upper_threshold_mV)
+{
+	struct pm8058_batt_alarm_device *battdev = the_battalarm;
+	int step, fine_step, rc;
+	u8 reg_threshold = 0, reg_ctrl2 = 0;
+
+	if (!battdev) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (lower_threshold_mV < THRESHOLD_MIN_MV
+	    || lower_threshold_mV > THRESHOLD_MAX_MV) {
+		pr_err("lower threshold value, %d mV, is outside of allowable "
+			"range: [%d, %d] mV\n", lower_threshold_mV,
+			THRESHOLD_MIN_MV, THRESHOLD_MAX_MV);
+		return -EINVAL;
+	}
+
+	if (upper_threshold_mV < THRESHOLD_MIN_MV
+	    || upper_threshold_mV > THRESHOLD_MAX_MV) {
+		pr_err("upper threshold value, %d mV, is outside of allowable "
+			"range: [%d, %d] mV\n", upper_threshold_mV,
+			THRESHOLD_MIN_MV, THRESHOLD_MAX_MV);
+		return -EINVAL;
+	}
+
+	if (upper_threshold_mV < lower_threshold_mV) {
+		pr_err("lower threshold value, %d mV, must be <= upper "
+			"threshold value, %d mV\n", lower_threshold_mV,
+			upper_threshold_mV);
+		return -EINVAL;
+	}
+
+	/* Determine register settings for lower threshold. */
+	if (lower_threshold_mV < THRESHOLD_BASIC_MIN_MV) {
+		/* Extended low range */
+		reg_ctrl2 |= CTRL2_RANGE_EXT_LOWER_MASK;
+
+		step = (lower_threshold_mV - THRESHOLD_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended low range is for steps 0 to 2 */
+		step >>= 2;
+
+		reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+				 & THRESHOLD_LOWER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+			     & CTRL2_FINE_STEP_LOWER_MASK;
+	} else if (lower_threshold_mV >= THRESHOLD_EXT_MIN_MV) {
+		/* Extended high range */
+		reg_ctrl2 |= CTRL2_RANGE_EXT_LOWER_MASK;
+
+		step = (lower_threshold_mV - THRESHOLD_EXT_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended high range is for steps 3 to 15 */
+		step = (step >> 2) + 3;
+
+		reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+				 & THRESHOLD_LOWER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+			     & CTRL2_FINE_STEP_LOWER_MASK;
+	} else {
+		/* Basic range */
+		step = (lower_threshold_mV - THRESHOLD_BASIC_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		step >>= 2;
+
+		reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+				 & THRESHOLD_LOWER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+			     & CTRL2_FINE_STEP_LOWER_MASK;
+	}
+
+	/* Determine register settings for upper threshold. */
+	if (upper_threshold_mV < THRESHOLD_BASIC_MIN_MV) {
+		/* Extended low range */
+		reg_ctrl2 |= CTRL2_RANGE_EXT_UPPER_MASK;
+
+		step = (upper_threshold_mV - THRESHOLD_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended low range is for steps 0 to 2 */
+		step >>= 2;
+
+		reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+				 & THRESHOLD_UPPER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+			     & CTRL2_FINE_STEP_UPPER_MASK;
+	} else if (upper_threshold_mV >= THRESHOLD_EXT_MIN_MV) {
+		/* Extended high range */
+		reg_ctrl2 |= CTRL2_RANGE_EXT_UPPER_MASK;
+
+		step = (upper_threshold_mV - THRESHOLD_EXT_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		/* Extended high range is for steps 3 to 15 */
+		step = (step >> 2) + 3;
+
+		reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+				 & THRESHOLD_UPPER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+			     & CTRL2_FINE_STEP_UPPER_MASK;
+	} else {
+		/* Basic range */
+		step = (upper_threshold_mV - THRESHOLD_BASIC_MIN_MV)
+			/ THRESHOLD_STEP_MV;
+
+		fine_step = step & 0x3;
+		step >>= 2;
+
+		reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+				 & THRESHOLD_UPPER_MASK;
+		reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+			     & CTRL2_FINE_STEP_UPPER_MASK;
+	}
+
+	mutex_lock(&battdev->batt_mutex);
+	rc = pm8058_reg_write(battdev->pm_chip, REG_THRESHOLD, reg_threshold,
+				THRESHOLD_LOWER_MASK | THRESHOLD_UPPER_MASK,
+				&battdev->reg_threshold);
+	if (rc)
+		goto bail;
+
+	rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL2, reg_ctrl2,
+		CTRL2_FINE_STEP_LOWER_MASK | CTRL2_FINE_STEP_UPPER_MASK
+		  | CTRL2_RANGE_EXT_LOWER_MASK | CTRL2_RANGE_EXT_UPPER_MASK,
+		&battdev->reg_ctrl2);
+
+bail:
+	mutex_unlock(&battdev->batt_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_threshold_set);
+
+/**
+ * pm8058_batt_alarm_status_read - get status of both threshold comparators
+ *
+ * RETURNS:	< 0	   = error
+ *		  0	   = battery voltage ok
+ *		BIT(0) set = battery voltage below lower threshold
+ *		BIT(1) set = battery voltage above upper threshold
+ */
+int pm8058_batt_alarm_status_read(void)
+{
+	struct pm8058_batt_alarm_device *battdev = the_battalarm;
+	int status, rc;
+
+	if (!battdev) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	mutex_lock(&battdev->batt_mutex);
+	rc = pm8058_read(battdev->pm_chip, REG_CTRL1, &battdev->reg_ctrl1, 1);
+
+	status = ((battdev->reg_ctrl1 & CTRL1_STATUS_LOWER_MASK)
+			? PM8058_BATT_ALARM_STATUS_BELOW_LOWER : 0)
+		| ((battdev->reg_ctrl1 & CTRL1_STATUS_UPPER_MASK)
+			? PM8058_BATT_ALARM_STATUS_ABOVE_UPPER : 0);
+	mutex_unlock(&battdev->batt_mutex);
+
+	if (rc) {
+		pr_err("pm8058_read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return status;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_status_read);
+
+/**
+ * pm8058_batt_alarm_hold_time_set - set hold time of interrupt output *
+ * @hold_time:	amount of time that battery voltage must remain outside of the
+ *		threshold range before the battery alarm interrupt triggers
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_hold_time_set(enum pm8058_batt_alarm_hold_time hold_time)
+{
+	struct pm8058_batt_alarm_device *battdev = the_battalarm;
+	int rc;
+	u8 reg_ctrl1 = 0;
+
+	if (!battdev) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (hold_time < CTRL1_HOLD_TIME_MIN
+	    || hold_time > CTRL1_HOLD_TIME_MAX) {
+
+		pr_err("hold time, %d, is outside of allowable range: "
+			"[%d, %d]\n", hold_time, CTRL1_HOLD_TIME_MIN,
+			CTRL1_HOLD_TIME_MAX);
+		return -EINVAL;
+	}
+
+	reg_ctrl1 = hold_time << CTRL1_HOLD_TIME_SHIFT;
+
+	mutex_lock(&battdev->batt_mutex);
+	rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL1, reg_ctrl1,
+			      CTRL1_HOLD_TIME_MASK, &battdev->reg_ctrl1);
+	mutex_unlock(&battdev->batt_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_hold_time_set);
+
+/**
+ * pm8058_batt_alarm_pwm_rate_set - set battery alarm update rate *
+ * @use_pwm:		1 = use PWM update rate, 0 = comparators always active
+ * @clock_scaler:	PWM clock scaler = 2 to 9
+ * @clock_divider:	PWM clock divider = 2 to 8
+ *
+ * This function sets the rate at which the battery alarm module enables
+ * the threshold comparators.  The rate is determined by the following equation:
+ *
+ * f_update = (1024 Hz) / (clock_divider * (2 ^ clock_scaler))
+ *
+ * Thus, the update rate can range from 0.25 Hz to 128 Hz.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_pwm_rate_set(int use_pwm, int clock_scaler,
+				   int clock_divider)
+{
+	struct pm8058_batt_alarm_device *battdev = the_battalarm;
+	int rc;
+	u8 reg_pwm_ctrl = 0, mask = 0;
+
+	if (!battdev) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	if (use_pwm && (clock_scaler < PWM_CTRL_PRE_INPUT_MIN
+	    || clock_scaler > PWM_CTRL_PRE_INPUT_MAX)) {
+		pr_err("PWM clock scaler, %d, is outside of allowable range: "
+			"[%d, %d]\n", clock_scaler, PWM_CTRL_PRE_INPUT_MIN,
+			PWM_CTRL_PRE_INPUT_MAX);
+		return -EINVAL;
+	}
+
+	if (use_pwm && (clock_divider < PWM_CTRL_DIV_INPUT_MIN
+	    || clock_divider > PWM_CTRL_DIV_INPUT_MAX)) {
+		pr_err("PWM clock divider, %d, is outside of allowable range: "
+			"[%d, %d]\n", clock_divider, PWM_CTRL_DIV_INPUT_MIN,
+			PWM_CTRL_DIV_INPUT_MAX);
+		return -EINVAL;
+	}
+
+	if (!use_pwm) {
+		/* Turn off PWM control and always enable. */
+		reg_pwm_ctrl = PWM_CTRL_ALARM_EN_ALWAYS;
+		mask = PWM_CTRL_ALARM_EN_MASK;
+	} else {
+		/* Use PWM control. */
+		reg_pwm_ctrl = PWM_CTRL_ALARM_EN_PWM;
+		mask = PWM_CTRL_ALARM_EN_MASK | PWM_CTRL_PRE_MASK
+			| PWM_CTRL_DIV_MASK;
+
+		clock_scaler -= PWM_CTRL_PRE_INPUT_MIN - PWM_CTRL_PRE_MIN;
+		clock_divider -= PWM_CTRL_DIV_INPUT_MIN - PWM_CTRL_DIV_MIN;
+
+		reg_pwm_ctrl |= (clock_scaler << PWM_CTRL_PRE_SHIFT)
+				& PWM_CTRL_PRE_MASK;
+		reg_pwm_ctrl |= (clock_divider << PWM_CTRL_DIV_SHIFT)
+				& PWM_CTRL_DIV_MASK;
+	}
+
+	mutex_lock(&battdev->batt_mutex);
+	rc = pm8058_reg_write(battdev->pm_chip, REG_PWM_CTRL, reg_pwm_ctrl,
+			      mask, &battdev->reg_pwm_ctrl);
+	mutex_unlock(&battdev->batt_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_pwm_rate_set);
+
+/*
+ * Handle the BATT_ALARM interrupt:
+ * Battery voltage is above or below threshold range.
+ */
+static irqreturn_t pm8058_batt_alarm_isr(int irq, void *data)
+{
+	struct pm8058_batt_alarm_device *battdev = data;
+	int status;
+
+	if (battdev) {
+		status = pm8058_batt_alarm_status_read();
+
+		if (status < 0)
+			pr_err("failed to read status, rc=%d\n", status);
+		else
+			srcu_notifier_call_chain(&battdev->irq_notifier_list,
+						 status, NULL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pm8058_batt_alarm_register_notifier - register a notifier to run when a
+ *	battery voltage change interrupt fires
+ * @nb:	notifier block containing callback function to register
+ *
+ * nb->notifier_call must point to a function of this form -
+ * int (*notifier_call)(struct notifier_block *nb, unsigned long status,
+ *			void *unused);
+ * "status" will receive the battery alarm status; "unused" will be NULL.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_register_notifier(struct notifier_block *nb)
+{
+	int rc;
+
+	if (!the_battalarm) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	rc = srcu_notifier_chain_register(&the_battalarm->irq_notifier_list,
+					  nb);
+	mutex_lock(&the_battalarm->batt_mutex);
+	if (rc == 0) {
+		if (the_battalarm->notifier_count == 0) {
+			/* request the irq */
+			rc = request_threaded_irq(the_battalarm->irq, NULL,
+				pm8058_batt_alarm_isr,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"pm8058-batt_alarm-irq", the_battalarm);
+			if (rc < 0) {
+				pr_err("request_irq(%d) failed, rc=%d\n",
+					the_battalarm->irq, rc);
+				goto done;
+			}
+
+			rc = irq_set_irq_wake(the_battalarm->irq, 1);
+			if (rc < 0) {
+				pr_err("irq_set_irq_wake(%d,1) failed, rc=%d\n",
+					the_battalarm->irq, rc);
+				goto done;
+			}
+		}
+
+		the_battalarm->notifier_count++;
+	}
+done:
+	mutex_unlock(&the_battalarm->batt_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_register_notifier);
+
+/**
+ * pm8058_batt_alarm_unregister_notifier - unregister a notifier that is run
+ *	when a battery voltage change interrupt fires
+ * @nb:	notifier block containing callback function to unregister
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_unregister_notifier(struct notifier_block *nb)
+{
+	int rc;
+
+	if (!the_battalarm) {
+		pr_err("no battery alarm device found.\n");
+		return -ENXIO;
+	}
+
+	rc = srcu_notifier_chain_unregister(&the_battalarm->irq_notifier_list,
+					    nb);
+	if (rc == 0) {
+		mutex_lock(&the_battalarm->batt_mutex);
+
+		the_battalarm->notifier_count--;
+
+		if (the_battalarm->notifier_count == 0)
+			free_irq(the_battalarm->irq, the_battalarm);
+
+		WARN_ON(the_battalarm->notifier_count < 0);
+
+		mutex_unlock(&the_battalarm->batt_mutex);
+	}
+
+
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_unregister_notifier);
+
+static int pm8058_batt_alarm_reg_init(struct pm8058_batt_alarm_device *battdev)
+{
+	int rc = 0;
+
+	/* save the current register states */
+	rc = pm8058_read(battdev->pm_chip, REG_THRESHOLD,
+			 &battdev->reg_threshold, 1);
+	if (rc)
+		goto bail;
+
+	rc = pm8058_read(battdev->pm_chip, REG_CTRL1,
+			 &battdev->reg_ctrl1, 1);
+	if (rc)
+		goto bail;
+
+	rc = pm8058_read(battdev->pm_chip, REG_CTRL2,
+			 &battdev->reg_ctrl2, 1);
+	if (rc)
+		goto bail;
+
+	rc = pm8058_read(battdev->pm_chip, REG_PWM_CTRL,
+			 &battdev->reg_pwm_ctrl, 1);
+	if (rc)
+		goto bail;
+
+bail:
+	if (rc)
+		pr_err("pm8058_read failed; initial register states "
+			"unknown, rc=%d\n", rc);
+	return rc;
+}
+
+static int pm8058_batt_alarm_config(void)
+{
+	int rc = 0;
+
+	/* Use default values when no platform data is provided. */
+	rc = pm8058_batt_alarm_threshold_set(DEFAULT_THRESHOLD_LOWER,
+		DEFAULT_THRESHOLD_UPPER);
+	if (rc) {
+		pr_err("threshold_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8058_batt_alarm_hold_time_set(DEFAULT_HOLD_TIME);
+	if (rc) {
+		pr_err("hold_time_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8058_batt_alarm_pwm_rate_set(DEFAULT_USE_PWM,
+			DEFAULT_PWM_SCALER, DEFAULT_PWM_DIVIDER);
+	if (rc) {
+		pr_err("pwm_rate_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = pm8058_batt_alarm_state_set(DEFAULT_LOWER_ENABLE,
+			DEFAULT_UPPER_ENABLE);
+	if (rc) {
+		pr_err("state_set failed, rc=%d\n", rc);
+		goto done;
+	}
+
+done:
+	return rc;
+}
+
+static int __devinit pm8058_batt_alarm_probe(struct platform_device *pdev)
+{
+	struct pm8058_batt_alarm_device *battdev;
+	struct pm8058_chip *pm_chip;
+	unsigned int irq;
+	int rc;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("no driver data passed in.\n");
+		rc = -EFAULT;
+		goto exit_input;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		pr_err("no IRQ passed in.\n");
+		rc = -EFAULT;
+		goto exit_input;
+	}
+
+	battdev = kzalloc(sizeof *battdev, GFP_KERNEL);
+	if (battdev == NULL) {
+		pr_err("kzalloc() failed.\n");
+		rc = -ENOMEM;
+		goto exit_input;
+	}
+
+	battdev->pm_chip = pm_chip;
+	platform_set_drvdata(pdev, battdev);
+
+	srcu_init_notifier_head(&battdev->irq_notifier_list);
+
+	battdev->irq = irq;
+	battdev->notifier_count = 0;
+	mutex_init(&battdev->batt_mutex);
+
+	rc = pm8058_batt_alarm_reg_init(battdev);
+	if (rc)
+		goto exit_free_dev;
+
+	the_battalarm = battdev;
+
+	rc = pm8058_batt_alarm_config();
+	if (rc)
+		goto exit_free_dev;
+
+	pr_notice("OK\n");
+	return 0;
+
+exit_free_dev:
+	mutex_destroy(&battdev->batt_mutex);
+	srcu_cleanup_notifier_head(&battdev->irq_notifier_list);
+	platform_set_drvdata(pdev, battdev->pm_chip);
+	kfree(battdev);
+exit_input:
+	return rc;
+}
+
+static int __devexit pm8058_batt_alarm_remove(struct platform_device *pdev)
+{
+	struct pm8058_batt_alarm_device *battdev = platform_get_drvdata(pdev);
+
+	mutex_destroy(&battdev->batt_mutex);
+	srcu_cleanup_notifier_head(&battdev->irq_notifier_list);
+	platform_set_drvdata(pdev, battdev->pm_chip);
+	free_irq(battdev->irq, battdev);
+	kfree(battdev);
+
+	the_battalarm = NULL;
+
+	return 0;
+}
+
+static struct platform_driver pm8058_batt_alarm_driver = {
+	.probe	= pm8058_batt_alarm_probe,
+	.remove	= __devexit_p(pm8058_batt_alarm_remove),
+	.driver	= {
+		.name = "pm8058-batt-alarm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_batt_alarm_init(void)
+{
+	return platform_driver_register(&pm8058_batt_alarm_driver);
+}
+
+static void __exit pm8058_batt_alarm_exit(void)
+{
+	platform_driver_unregister(&pm8058_batt_alarm_driver);
+}
+
+module_init(pm8058_batt_alarm_init);
+module_exit(pm8058_batt_alarm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 Battery Alarm Device driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8058-batt-alarm");
diff --git a/drivers/misc/pmic8058-misc.c b/drivers/misc/pmic8058-misc.c
new file mode 100644
index 0000000..77a2f47
--- /dev/null
+++ b/drivers/misc/pmic8058-misc.c
@@ -0,0 +1,335 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 Misc Device driver
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-misc.h>
+
+/* VIB_DRV register */
+#define SSBI_REG_ADDR_DRV_VIB		0x4A
+
+#define PM8058_VIB_DRIVE_SHIFT		3
+#define PM8058_VIB_LOGIC_SHIFT		2
+#define PM8058_VIB_MIN_LEVEL_mV		1200
+#define PM8058_VIB_MAX_LEVEL_mV		3100
+
+/* COINCELL_CHG register */
+#define SSBI_REG_ADDR_COINCELL_CHG	(0x2F)
+#define PM8058_COINCELL_RESISTOR_SHIFT	(2)
+
+/* Resource offsets. */
+enum PM8058_MISC_IRQ {
+	PM8058_MISC_IRQ_OSC_HALT = 0
+};
+
+struct pm8058_misc_device {
+	struct pm8058_chip	*pm_chip;
+	struct dentry		*dgb_dir;
+	unsigned int		osc_halt_irq;
+	u64			osc_halt_count;
+};
+
+static struct pm8058_misc_device *misc_dev;
+
+int pm8058_vibrator_config(struct pm8058_vib_config *vib_config)
+{
+	u8 reg = 0;
+	int rc;
+
+	if (misc_dev == NULL) {
+		pr_info("misc_device is NULL\n");
+		return -EINVAL;
+	}
+
+	if (vib_config->drive_mV) {
+		if (vib_config->drive_mV < PM8058_VIB_MIN_LEVEL_mV ||
+			vib_config->drive_mV > PM8058_VIB_MAX_LEVEL_mV) {
+			pr_err("Invalid vibrator drive strength\n");
+			return -EINVAL;
+		}
+	}
+
+	reg = (vib_config->drive_mV / 100) << PM8058_VIB_DRIVE_SHIFT;
+
+	reg |= (!!vib_config->active_low) << PM8058_VIB_LOGIC_SHIFT;
+
+	reg |= vib_config->enable_mode;
+
+	rc = pm8058_write(misc_dev->pm_chip, SSBI_REG_ADDR_DRV_VIB, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058 write failed: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_vibrator_config);
+
+/**
+ * pm8058_coincell_chg_config - Disables or enables the coincell charger, and
+ *				configures its voltage and resistor settings.
+ * @chg_config:			Holds both voltage and resistor values, and a
+ *				switch to change the state of charger.
+ *				If state is to disable the charger then
+ *				both voltage and resistor are disregarded.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_coincell_chg_config(struct pm8058_coincell_chg_config *chg_config)
+{
+	u8 reg, voltage, resistor;
+	int rc;
+
+	reg = 0;
+	voltage = 0;
+	resistor = 0;
+	rc = 0;
+
+	if (misc_dev == NULL) {
+		pr_err("misc_device is NULL\n");
+		return -EINVAL;
+	}
+
+	if (chg_config == NULL) {
+		pr_err("chg_config is NULL\n");
+		return -EINVAL;
+	}
+
+	if (chg_config->state == PM8058_COINCELL_CHG_DISABLE) {
+		rc = pm8058_write(misc_dev->pm_chip,
+				SSBI_REG_ADDR_COINCELL_CHG, &reg, 1);
+		if (rc)
+			pr_err("%s: pm8058 write failed: rc=%d\n",
+							__func__, rc);
+		return rc;
+	}
+
+	voltage = chg_config->voltage;
+	resistor = chg_config->resistor;
+
+	if (voltage < PM8058_COINCELL_VOLTAGE_3p2V ||
+			(voltage > PM8058_COINCELL_VOLTAGE_3p0V &&
+				voltage != PM8058_COINCELL_VOLTAGE_2p5V)) {
+		pr_err("Invalid voltage value provided\n");
+		return -EINVAL;
+	}
+
+	if (resistor < PM8058_COINCELL_RESISTOR_2100_OHMS ||
+			resistor > PM8058_COINCELL_RESISTOR_800_OHMS) {
+		pr_err("Invalid resistor value provided\n");
+		return -EINVAL;
+	}
+
+	reg |= voltage;
+
+	reg |= (resistor << PM8058_COINCELL_RESISTOR_SHIFT);
+
+	rc = pm8058_write(misc_dev->pm_chip,
+			SSBI_REG_ADDR_COINCELL_CHG, &reg, 1);
+
+	if (rc)
+		pr_err("%s: pm8058 write failed: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_coincell_chg_config);
+
+/* Handle the OSC_HALT interrupt: 32 kHz XTAL oscillator has stopped. */
+static irqreturn_t pm8058_osc_halt_isr(int irq, void *data)
+{
+	struct pm8058_misc_device *miscdev = data;
+	u64 count = 0;
+
+	if (miscdev) {
+		miscdev->osc_halt_count++;
+		count = miscdev->osc_halt_count;
+	}
+
+	pr_crit("%s: OSC_HALT interrupt has triggered, 32 kHz XTAL oscillator"
+		" has halted (%llu)!\n", __func__, count);
+
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int osc_halt_count_get(void *data, u64 *val)
+{
+	struct pm8058_misc_device *miscdev = data;
+
+	if (miscdev == NULL) {
+		pr_err("%s: null pointer input.\n", __func__);
+		return -EINVAL;
+	}
+
+	*val = miscdev->osc_halt_count;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_osc_halt_fops, osc_halt_count_get, NULL, "%llu\n");
+
+static int __devinit pmic8058_misc_dbg_probe(struct pm8058_misc_device *miscdev)
+{
+	struct dentry *dent;
+	struct dentry *temp;
+
+	if (miscdev == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EINVAL;
+	}
+
+	dent = debugfs_create_dir("pm8058-misc", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		return -ENOMEM;
+	}
+
+	temp = debugfs_create_file("osc_halt_count", S_IRUSR, dent,
+					miscdev, &dbg_osc_halt_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)temp);
+		goto debug_error;
+	}
+
+	miscdev->dgb_dir = dent;
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dent);
+	return -ENOMEM;
+}
+
+static int __devexit pmic8058_misc_dbg_remove(
+		struct pm8058_misc_device *miscdev)
+{
+	if (miscdev->dgb_dir)
+		debugfs_remove_recursive(miscdev->dgb_dir);
+
+	return 0;
+}
+
+#else
+
+static int __devinit pmic8058_misc_dbg_probe(struct pm8058_misc_device *miscdev)
+{
+	return 0;
+}
+
+static int __devexit pmic8058_misc_dbg_remove(
+		struct pm8058_misc_device *miscdev)
+{
+	return 0;
+}
+
+#endif
+
+
+static int __devinit pmic8058_misc_probe(struct platform_device *pdev)
+{
+	struct pm8058_misc_device *miscdev;
+	struct pm8058_chip *pm_chip;
+	unsigned int irq;
+	int rc;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no driver data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	irq = platform_get_irq(pdev, PM8058_MISC_IRQ_OSC_HALT);
+	if (!irq) {
+		pr_err("%s: no IRQ passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	miscdev = kzalloc(sizeof *miscdev, GFP_KERNEL);
+	if (miscdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	miscdev->pm_chip = pm_chip;
+	platform_set_drvdata(pdev, miscdev);
+
+	rc = request_threaded_irq(irq, NULL, pm8058_osc_halt_isr,
+			 IRQF_TRIGGER_RISING | IRQF_DISABLED,
+			 "pm8058-osc_halt-irq", miscdev);
+	if (rc < 0) {
+		pr_err("%s: request_irq(%d) FAIL: %d\n", __func__, irq, rc);
+		platform_set_drvdata(pdev, miscdev->pm_chip);
+		kfree(miscdev);
+		return rc;
+	}
+	miscdev->osc_halt_irq = irq;
+	miscdev->osc_halt_count = 0;
+
+	rc = pmic8058_misc_dbg_probe(miscdev);
+	if (rc)
+		return rc;
+
+	misc_dev = miscdev;
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8058_misc_remove(struct platform_device *pdev)
+{
+	struct pm8058_misc_device *miscdev = platform_get_drvdata(pdev);
+
+	pmic8058_misc_dbg_remove(miscdev);
+
+	platform_set_drvdata(pdev, miscdev->pm_chip);
+	free_irq(miscdev->osc_halt_irq, miscdev);
+	kfree(miscdev);
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_misc_driver = {
+	.probe	= pmic8058_misc_probe,
+	.remove	= __devexit_p(pmic8058_misc_remove),
+	.driver	= {
+		.name = "pm8058-misc",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_misc_init(void)
+{
+	return platform_driver_register(&pmic8058_misc_driver);
+}
+
+static void __exit pm8058_misc_exit(void)
+{
+	platform_driver_unregister(&pmic8058_misc_driver);
+}
+
+module_init(pm8058_misc_init);
+module_exit(pm8058_misc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 Misc Device driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-misc");
diff --git a/drivers/misc/pmic8058-nfc.c b/drivers/misc/pmic8058-nfc.c
new file mode 100644
index 0000000..76a19f4
--- /dev/null
+++ b/drivers/misc/pmic8058-nfc.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 NFC driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-nfc.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* PMIC8058 NFC */
+#define	SSBI_REG_NFC_CTRL	0x14D
+#define	SSBI_REG_NFC_TEST	0x14E
+
+/* NFC_CTRL */
+#define	PM8058_NFC_SUPPORT_EN		0x80
+#define	PM8058_NFC_LDO_EN		0x40
+#define	PM8058_NFC_EN			0x20
+#define	PM8058_NFC_EXT_VDDLDO_EN	0x10
+#define	PM8058_NFC_VPH_PWR_EN		0x08
+#define	PM8058_NFC_RESERVED		0x04
+#define	PM8058_NFC_VDDLDO_LEVEL		0x03
+
+/* NFC_TEST */
+#define	PM8058_NFC_VDDLDO_MON_EN	0x80
+#define	PM8058_NFC_ATEST_EN		0x40
+#define	PM8058_NFC_DTEST1_EN		0x20
+#define	PM8058_NFC_RESERVED2		0x18
+#define	PM8058_NFC_VDDLDO_OK_S		0x04
+#define	PM8058_NFC_MBG_EN_S		0x02
+#define	PM8058_NFC_EXT_EN_S		0x01
+
+struct pm8058_nfc_device {
+	struct mutex		nfc_mutex;
+	struct pm8058_chip	*pm_chip;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry 		*dent;
+#endif
+};
+static struct pm8058_nfc_device	*nfc_dev;
+
+/* APIs */
+/*
+ * pm8058_nfc_request - request a handle to access NFC device
+ */
+struct pm8058_nfc_device *pm8058_nfc_request(void)
+{
+	return nfc_dev;
+}
+EXPORT_SYMBOL(pm8058_nfc_request);
+
+/*
+ * pm8058_nfc_config - configure NFC signals
+ *
+ * @nfcdev: the NFC device
+ * @mask: signal mask to configure
+ * @flags: control flags
+ */
+int pm8058_nfc_config(struct pm8058_nfc_device *nfcdev, u32 mask, u32 flags)
+{
+	u8	nfc_ctrl, nfc_test, m, f;
+	int	rc;
+
+	if (nfcdev == NULL || IS_ERR(nfcdev) || !mask)
+		return -EINVAL;
+	if (nfcdev->pm_chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&nfcdev->nfc_mutex);
+
+	if (!(mask & PM_NFC_CTRL_REQ))
+		goto config_test;
+
+	rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto config_done;
+	}
+
+	m = mask & 0x00ff;
+	f = flags & 0x00ff;
+	nfc_ctrl &= ~m;
+	nfc_ctrl |= m & f;
+
+	rc = pm8058_write(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_write(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto config_done;
+	}
+
+config_test:
+	if (!(mask & PM_NFC_TEST_REQ))
+		goto config_done;
+
+	rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+		goto config_done;
+	}
+
+	m = (mask >> 8) & 0x00ff;
+	f = (flags >> 8) & 0x00ff;
+	nfc_test &= ~m;
+	nfc_test |= m & f;
+
+	rc = pm8058_write(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_write(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+		goto config_done;
+	}
+
+config_done:
+	mutex_unlock(&nfcdev->nfc_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_nfc_config);
+
+/*
+ * pm8058_nfc_get_status - get NFC status
+ *
+ * @nfcdev: the NFC device
+ * @mask: of status mask to read
+ * @status: pointer to the status variable
+ */
+int pm8058_nfc_get_status(struct pm8058_nfc_device *nfcdev,
+			  u32 mask, u32 *status)
+{
+	u8	nfc_ctrl, nfc_test;
+	u32	st;
+	int	rc;
+
+	if (nfcdev == NULL || IS_ERR(nfcdev) || status == NULL)
+		return -EINVAL;
+	if (nfcdev->pm_chip == NULL)
+		return -ENODEV;
+
+	st = 0;
+	mutex_lock(&nfcdev->nfc_mutex);
+
+	if (!(mask & PM_NFC_CTRL_REQ))
+		goto read_test;
+
+	rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto get_status_done;
+	}
+
+read_test:
+	if (!(mask & (PM_NFC_TEST_REQ | PM_NFC_TEST_STATUS)))
+		goto get_status_done;
+
+	rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+	if (rc)
+		pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+
+get_status_done:
+	st = nfc_ctrl;
+	st |= nfc_test << 8;
+	*status = st;
+
+	mutex_unlock(&nfcdev->nfc_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_nfc_get_status);
+
+/*
+ * pm8058_nfc_free - free the NFC device
+ */
+void pm8058_nfc_free(struct pm8058_nfc_device *nfcdev)
+{
+	/* Disable all signals */
+	pm8058_nfc_config(nfcdev, PM_NFC_CTRL_REQ, 0);
+}
+EXPORT_SYMBOL(pm8058_nfc_free);
+
+#if defined(CONFIG_DEBUG_FS)
+static int pm8058_nfc_debug_set(void *data, u64 val)
+{
+	struct pm8058_nfc_device *nfcdev;
+	u32	mask, control;
+	int	rc;
+
+	nfcdev = (struct pm8058_nfc_device *)data;
+	control = (u32)val & 0xffff;
+	mask = ((u32)val >> 16) & 0xffff;
+	rc = pm8058_nfc_config(nfcdev, mask, control);
+	if (rc)
+		pr_err("%s: ERR pm8058_nfc_config: rc=%d, "
+		       "[mask, control]=[0x%x, 0x%x]\n",
+		       __func__, rc, mask, control);
+
+	return 0;
+}
+
+static int pm8058_nfc_debug_get(void *data, u64 *val)
+{
+	struct pm8058_nfc_device *nfcdev;
+	u32	status;
+	int	rc;
+
+	nfcdev = (struct pm8058_nfc_device *)data;
+	rc = pm8058_nfc_get_status(nfcdev, (u32)-1, &status);
+	if (rc)
+		pr_err("%s: ERR pm8058_nfc_get_status: rc=%d, status=0x%x\n",
+		       __func__, rc, status);
+
+	if (val)
+		*val = (u64)status;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm8058_nfc_fops, pm8058_nfc_debug_get,
+			pm8058_nfc_debug_set, "%llu\n");
+
+static int pm8058_nfc_debug_init(struct pm8058_nfc_device *nfcdev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_file("pm8058-nfc", 0644, NULL,
+				   (void *)nfcdev, &pm8058_nfc_fops);
+
+	if (dent == NULL || IS_ERR(dent))
+		pr_err("%s: ERR debugfs_create_file: dent=0x%x\n",
+		       __func__, (unsigned)dent);
+
+	nfcdev->dent = dent;
+	return 0;
+}
+#endif
+
+static int __devinit pmic8058_nfc_probe(struct platform_device *pdev)
+{
+	struct pm8058_chip		*pm_chip;
+	struct pm8058_nfc_device	*nfcdev;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	nfcdev = kzalloc(sizeof *nfcdev, GFP_KERNEL);
+	if (nfcdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&nfcdev->nfc_mutex);
+
+	nfcdev->pm_chip = pm_chip;
+	nfc_dev = nfcdev;
+	platform_set_drvdata(pdev, nfcdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8058_nfc_debug_init(nfc_dev);
+#endif
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8058_nfc_remove(struct platform_device *pdev)
+{
+	struct pm8058_nfc_device *nfcdev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	debugfs_remove(nfcdev->dent);
+#endif
+
+	platform_set_drvdata(pdev, nfcdev->pm_chip);
+	kfree(nfcdev);
+	return 0;
+}
+
+static struct platform_driver pmic8058_nfc_driver = {
+	.probe		= pmic8058_nfc_probe,
+	.remove		= __devexit_p(pmic8058_nfc_remove),
+	.driver		= {
+		.name = "pm8058-nfc",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_nfc_init(void)
+{
+	return platform_driver_register(&pmic8058_nfc_driver);
+}
+
+static void __exit pm8058_nfc_exit(void)
+{
+	platform_driver_unregister(&pmic8058_nfc_driver);
+}
+
+module_init(pm8058_nfc_init);
+module_exit(pm8058_nfc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 NFC driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-nfc");
diff --git a/drivers/misc/pmic8058-pwm.c b/drivers/misc/pmic8058-pwm.c
new file mode 100644
index 0000000..2c04bdc
--- /dev/null
+++ b/drivers/misc/pmic8058-pwm.c
@@ -0,0 +1,926 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 PWM driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwm.h>
+#include <linux/slab.h>
+
+#define	PM8058_LPG_BANKS		8
+#define	PM8058_PWM_CHANNELS		PM8058_LPG_BANKS	/* MAX=8 */
+
+#define	PM8058_LPG_CTL_REGS		7
+
+/* PMIC8058 LPG/PWM */
+#define	SSBI_REG_ADDR_LPG_CTL_BASE	0x13C
+#define	SSBI_REG_ADDR_LPG_CTL(n)	(SSBI_REG_ADDR_LPG_CTL_BASE + (n))
+#define	SSBI_REG_ADDR_LPG_BANK_SEL	0x143
+#define	SSBI_REG_ADDR_LPG_BANK_EN	0x144
+#define	SSBI_REG_ADDR_LPG_LUT_CFG0	0x145
+#define	SSBI_REG_ADDR_LPG_LUT_CFG1	0x146
+#define	SSBI_REG_ADDR_LPG_TEST		0x147
+
+/* Control 0 */
+#define	PM8058_PWM_1KHZ_COUNT_MASK	0xF0
+#define	PM8058_PWM_1KHZ_COUNT_SHIFT	4
+
+#define	PM8058_PWM_1KHZ_COUNT_MAX	15
+
+#define	PM8058_PWM_OUTPUT_EN		0x08
+#define	PM8058_PWM_PWM_EN		0x04
+#define	PM8058_PWM_RAMP_GEN_EN		0x02
+#define	PM8058_PWM_RAMP_START		0x01
+
+#define	PM8058_PWM_PWM_START		(PM8058_PWM_OUTPUT_EN \
+					| PM8058_PWM_PWM_EN)
+#define	PM8058_PWM_RAMP_GEN_START	(PM8058_PWM_RAMP_GEN_EN \
+					| PM8058_PWM_RAMP_START)
+
+/* Control 1 */
+#define	PM8058_PWM_REVERSE_EN		0x80
+#define	PM8058_PWM_BYPASS_LUT		0x40
+#define	PM8058_PWM_HIGH_INDEX_MASK	0x3F
+
+/* Control 2 */
+#define	PM8058_PWM_LOOP_EN		0x80
+#define	PM8058_PWM_RAMP_UP		0x40
+#define	PM8058_PWM_LOW_INDEX_MASK	0x3F
+
+/* Control 3 */
+#define	PM8058_PWM_VALUE_BIT7_0		0xFF
+#define	PM8058_PWM_VALUE_BIT5_0		0x3F
+
+/* Control 4 */
+#define	PM8058_PWM_VALUE_BIT8		0x80
+
+#define	PM8058_PWM_CLK_SEL_MASK		0x60
+#define	PM8058_PWM_CLK_SEL_SHIFT	5
+
+#define	PM8058_PWM_CLK_SEL_NO		0
+#define	PM8058_PWM_CLK_SEL_1KHZ		1
+#define	PM8058_PWM_CLK_SEL_32KHZ	2
+#define	PM8058_PWM_CLK_SEL_19P2MHZ	3
+
+#define	PM8058_PWM_PREDIVIDE_MASK	0x18
+#define	PM8058_PWM_PREDIVIDE_SHIFT	3
+
+#define	PM8058_PWM_PREDIVIDE_2		0
+#define	PM8058_PWM_PREDIVIDE_3		1
+#define	PM8058_PWM_PREDIVIDE_5		2
+#define	PM8058_PWM_PREDIVIDE_6		3
+
+#define	PM8058_PWM_M_MASK	0x07
+#define	PM8058_PWM_M_MIN	0
+#define	PM8058_PWM_M_MAX	7
+
+/* Control 5 */
+#define	PM8058_PWM_PAUSE_COUNT_HI_MASK		0xFC
+#define	PM8058_PWM_PAUSE_COUNT_HI_SHIFT		2
+
+#define	PM8058_PWM_PAUSE_ENABLE_HIGH		0x02
+#define	PM8058_PWM_SIZE_9_BIT			0x01
+
+/* Control 6 */
+#define	PM8058_PWM_PAUSE_COUNT_LO_MASK		0xFC
+#define	PM8058_PWM_PAUSE_COUNT_LO_SHIFT		2
+
+#define	PM8058_PWM_PAUSE_ENABLE_LOW		0x02
+#define	PM8058_PWM_RESERVED			0x01
+
+#define	PM8058_PWM_PAUSE_COUNT_MAX		56 /* < 2^6 = 64*/
+
+/* LUT_CFG1 */
+#define	PM8058_PWM_LUT_READ			0x40
+
+/* TEST */
+#define	PM8058_PWM_DTEST_MASK		0x38
+#define	PM8058_PWM_DTEST_SHIFT		3
+
+#define	PM8058_PWM_DTEST_BANK_MASK	0x07
+
+/* PWM frequency support
+ *
+ * PWM Frequency = Clock Frequency / (N * T)
+ * 	or
+ * PWM Period = Clock Period * (N * T)
+ * 	where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ *
+ * We use this formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) / 2^m = (Pre-divide * Clock Period)
+*/
+#define	NUM_CLOCKS	3
+
+#define	NSEC_1000HZ	(NSEC_PER_SEC / 1000)
+#define	NSEC_32768HZ	(NSEC_PER_SEC / 32768)
+#define	NSEC_19P2MHZ	(NSEC_PER_SEC / 19200000)
+
+#define	CLK_PERIOD_MIN	NSEC_19P2MHZ
+#define	CLK_PERIOD_MAX	NSEC_1000HZ
+
+#define	NUM_PRE_DIVIDE	3	/* No default support for pre-divide = 6 */
+
+#define	PRE_DIVIDE_0		2
+#define	PRE_DIVIDE_1		3
+#define	PRE_DIVIDE_2		5
+
+#define	PRE_DIVIDE_MIN		PRE_DIVIDE_0
+#define	PRE_DIVIDE_MAX		PRE_DIVIDE_2
+
+static char *clks[NUM_CLOCKS] = {
+	"1K", "32768", "19.2M"
+};
+
+static unsigned pre_div[NUM_PRE_DIVIDE] = {
+	PRE_DIVIDE_0, PRE_DIVIDE_1, PRE_DIVIDE_2
+};
+
+static unsigned int pt_t[NUM_PRE_DIVIDE][NUM_CLOCKS] = {
+	{	PRE_DIVIDE_0 * NSEC_1000HZ,
+		PRE_DIVIDE_0 * NSEC_32768HZ,
+		PRE_DIVIDE_0 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_1 * NSEC_1000HZ,
+		PRE_DIVIDE_1 * NSEC_32768HZ,
+		PRE_DIVIDE_1 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_2 * NSEC_1000HZ,
+		PRE_DIVIDE_2 * NSEC_32768HZ,
+		PRE_DIVIDE_2 * NSEC_19P2MHZ,
+	},
+};
+
+#define	MIN_MPT	((PRE_DIVIDE_MIN * CLK_PERIOD_MIN) << PM8058_PWM_M_MIN)
+#define	MAX_MPT	((PRE_DIVIDE_MAX * CLK_PERIOD_MAX) << PM8058_PWM_M_MAX)
+
+/* Private data */
+struct pm8058_pwm_chip;
+
+struct pwm_device {
+	int			pwm_id;		/* = bank/channel id */
+	int			in_use;
+	const char		*label;
+	int			pwm_period;
+	int			pwm_duty;
+	u8			pwm_ctl[PM8058_LPG_CTL_REGS];
+	int			irq;
+	struct pm8058_pwm_chip	*chip;
+};
+
+struct pm8058_pwm_chip {
+	struct pwm_device	pwm_dev[PM8058_PWM_CHANNELS];
+	u8			bank_mask;
+	struct mutex		pwm_mutex;
+	struct pm8058_chip	*pm_chip;
+	struct pm8058_pwm_pdata	*pdata;
+};
+
+static struct pm8058_pwm_chip	*pwm_chip;
+
+struct pw8058_pwm_config {
+	int	pwm_size;	/* round up to 6 or 9 for 6/9-bit PWM SIZE */
+	int	clk;
+	int	pre_div;
+	int	pre_div_exp;
+	int	pwm_value;
+	int	bypass_lut;
+
+	/* LUT parameters when bypass_lut is 0 */
+	int	lut_duty_ms;
+	int	lut_lo_index;
+	int	lut_hi_index;
+	int	lut_pause_hi;
+	int	lut_pause_lo;
+	int	flags;
+};
+
+static u16 duty_msec[PM8058_PWM_1KHZ_COUNT_MAX + 1] = {
+	0, 1, 2, 3, 4, 6, 8, 16, 18, 24, 32, 36, 64, 128, 256, 512
+};
+
+static u16 pause_count[PM8058_PWM_PAUSE_COUNT_MAX + 1] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+	23, 28, 31, 42, 47, 56, 63, 83, 94, 111, 125, 167, 188, 222, 250, 333,
+	375, 500, 667, 750, 800, 900, 1000, 1100,
+	1200, 1300, 1400, 1500, 1600, 1800, 2000, 2500,
+	3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500,
+	7000
+};
+
+/* Internal functions */
+static int pm8058_pwm_bank_enable(struct pwm_device *pwm, int enable)
+{
+	int	rc;
+	u8	reg;
+	struct pm8058_pwm_chip	*chip;
+
+	chip = pwm->chip;
+
+	if (enable)
+		reg = chip->bank_mask | (1 << pwm->pwm_id);
+	else
+		reg = chip->bank_mask & ~(1 << pwm->pwm_id);
+
+	rc = pm8058_write(chip->pm_chip, SSBI_REG_ADDR_LPG_BANK_EN, &reg, 1);
+	if (rc) {
+		pr_err("%s: pm8058_write(): rc=%d (Enable LPG Bank)\n",
+		       __func__, rc);
+		goto bail_out;
+	}
+	chip->bank_mask = reg;
+
+bail_out:
+	return rc;
+}
+
+static int pm8058_pwm_bank_sel(struct pwm_device *pwm)
+{
+	int	rc;
+	u8	reg;
+
+	reg = pwm->pwm_id;
+	rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_BANK_SEL,
+			     &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_write(): rc=%d (Select PWM Bank)\n",
+		       __func__, rc);
+	return rc;
+}
+
+static int pm8058_pwm_start(struct pwm_device *pwm, int start, int ramp_start)
+{
+	int	rc;
+	u8	reg;
+
+	if (start) {
+		reg = pwm->pwm_ctl[0] | PM8058_PWM_PWM_START;
+		if (ramp_start)
+			reg |= PM8058_PWM_RAMP_GEN_START;
+		else
+			reg &= ~PM8058_PWM_RAMP_GEN_START;
+	} else {
+		reg = pwm->pwm_ctl[0] & ~PM8058_PWM_PWM_START;
+		reg &= ~PM8058_PWM_RAMP_GEN_START;
+	}
+
+	rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_CTL(0),
+			  &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_write(): rc=%d (Enable PWM Ctl 0)\n",
+		       __func__, rc);
+	else
+		pwm->pwm_ctl[0] = reg;
+	return rc;
+}
+
+static void pm8058_pwm_calc_period(unsigned int period_us,
+					   struct pw8058_pwm_config *pwm_conf)
+{
+	int	n, m, clk, div;
+	int	best_m, best_div, best_clk;
+	int	last_err, cur_err, better_err, better_m;
+	unsigned int	tmp_p, last_p, min_err, period_n;
+
+	/* PWM Period / N : handle underflow or overflow */
+	if (period_us < (PM_PWM_PERIOD_MAX / NSEC_PER_USEC))
+		period_n = (period_us * NSEC_PER_USEC) >> 6;
+	else
+		period_n = (period_us >> 6) * NSEC_PER_USEC;
+	if (period_n >= MAX_MPT) {
+		n = 9;
+		period_n >>= 3;
+	} else
+		n = 6;
+
+	min_err = MAX_MPT;
+	best_m = 0;
+	best_clk = 0;
+	best_div = 0;
+	for (clk = 0; clk < NUM_CLOCKS; clk++) {
+		for (div = 0; div < NUM_PRE_DIVIDE; div++) {
+			tmp_p = period_n;
+			last_p = tmp_p;
+			for (m = 0; m <= PM8058_PWM_M_MAX; m++) {
+				if (tmp_p <= pt_t[div][clk]) {
+					/* Found local best */
+					if (!m) {
+						better_err = pt_t[div][clk] -
+							tmp_p;
+						better_m = m;
+					} else {
+						last_err = last_p -
+							pt_t[div][clk];
+						cur_err = pt_t[div][clk] -
+							tmp_p;
+
+						if (cur_err < last_err) {
+							better_err = cur_err;
+							better_m = m;
+						} else {
+							better_err = last_err;
+							better_m = m - 1;
+						}
+					}
+
+					if (better_err < min_err) {
+						min_err = better_err;
+						best_m = better_m;
+						best_clk = clk;
+						best_div = div;
+					}
+					break;
+				} else {
+					last_p = tmp_p;
+					tmp_p >>= 1;
+				}
+			}
+		}
+	}
+
+	pwm_conf->pwm_size = n;
+	pwm_conf->clk = best_clk;
+	pwm_conf->pre_div = best_div;
+	pwm_conf->pre_div_exp = best_m;
+
+	pr_debug("%s: period=%u: n=%d, m=%d, clk[%d]=%s, div[%d]=%d\n",
+		 __func__, (unsigned)period_us, n, best_m,
+		 best_clk, clks[best_clk], best_div, pre_div[best_div]);
+}
+
+static int pm8058_pwm_configure(struct pwm_device *pwm,
+			 struct pw8058_pwm_config *pwm_conf)
+{
+	int	i, rc, len;
+	u8	reg, ramp_enabled = 0;
+
+	reg = (pwm_conf->pwm_size > 6) ? PM8058_PWM_SIZE_9_BIT : 0;
+	pwm->pwm_ctl[5] = reg;
+
+	reg = ((pwm_conf->clk + 1) << PM8058_PWM_CLK_SEL_SHIFT)
+		& PM8058_PWM_CLK_SEL_MASK;
+	reg |= (pwm_conf->pre_div << PM8058_PWM_PREDIVIDE_SHIFT)
+		& PM8058_PWM_PREDIVIDE_MASK;
+	reg |= pwm_conf->pre_div_exp & PM8058_PWM_M_MASK;
+	pwm->pwm_ctl[4] = reg;
+
+	if (pwm_conf->bypass_lut) {
+		pwm->pwm_ctl[0] &= PM8058_PWM_PWM_START; /* keep enabled */
+		pwm->pwm_ctl[1] = PM8058_PWM_BYPASS_LUT;
+		pwm->pwm_ctl[2] = 0;
+
+		if (pwm_conf->pwm_size > 6) {
+			pwm->pwm_ctl[3] = pwm_conf->pwm_value
+						& PM8058_PWM_VALUE_BIT7_0;
+			pwm->pwm_ctl[4] |= (pwm_conf->pwm_value >> 1)
+						& PM8058_PWM_VALUE_BIT8;
+		} else {
+			pwm->pwm_ctl[3] = pwm_conf->pwm_value
+						& PM8058_PWM_VALUE_BIT5_0;
+		}
+
+		len = 6;
+	} else {
+		int	pause_cnt, j;
+
+		/* Linear search for duty time */
+		for (i = 0; i < PM8058_PWM_1KHZ_COUNT_MAX; i++) {
+			if (duty_msec[i] >= pwm_conf->lut_duty_ms)
+				break;
+		}
+
+		ramp_enabled = pwm->pwm_ctl[0] & PM8058_PWM_RAMP_GEN_START;
+		pwm->pwm_ctl[0] &= PM8058_PWM_PWM_START; /* keep enabled */
+		pwm->pwm_ctl[0] |= (i << PM8058_PWM_1KHZ_COUNT_SHIFT) &
+					PM8058_PWM_1KHZ_COUNT_MASK;
+		pwm->pwm_ctl[1] = pwm_conf->lut_hi_index &
+					PM8058_PWM_HIGH_INDEX_MASK;
+		pwm->pwm_ctl[2] = pwm_conf->lut_lo_index &
+					PM8058_PWM_LOW_INDEX_MASK;
+
+		if (pwm_conf->flags & PM_PWM_LUT_REVERSE)
+			pwm->pwm_ctl[1] |= PM8058_PWM_REVERSE_EN;
+		if (pwm_conf->flags & PM_PWM_LUT_RAMP_UP)
+			pwm->pwm_ctl[2] |= PM8058_PWM_RAMP_UP;
+		if (pwm_conf->flags & PM_PWM_LUT_LOOP)
+			pwm->pwm_ctl[2] |= PM8058_PWM_LOOP_EN;
+
+		/* Pause time */
+		if (pwm_conf->flags & PM_PWM_LUT_PAUSE_HI_EN) {
+			/* Linear search for pause time */
+			pause_cnt = (pwm_conf->lut_pause_hi + duty_msec[i] / 2)
+					/ duty_msec[i];
+			for (j = 0; j < PM8058_PWM_PAUSE_COUNT_MAX; j++) {
+				if (pause_count[j] >= pause_cnt)
+					break;
+			}
+			pwm->pwm_ctl[5] = (j <<
+					   PM8058_PWM_PAUSE_COUNT_HI_SHIFT) &
+						PM8058_PWM_PAUSE_COUNT_HI_MASK;
+			pwm->pwm_ctl[5] |= PM8058_PWM_PAUSE_ENABLE_HIGH;
+		} else
+			pwm->pwm_ctl[5] = 0;
+
+		if (pwm_conf->flags & PM_PWM_LUT_PAUSE_LO_EN) {
+			/* Linear search for pause time */
+			pause_cnt = (pwm_conf->lut_pause_lo + duty_msec[i] / 2)
+					/ duty_msec[i];
+			for (j = 0; j < PM8058_PWM_PAUSE_COUNT_MAX; j++) {
+				if (pause_count[j] >= pause_cnt)
+					break;
+			}
+			pwm->pwm_ctl[6] = (j <<
+					   PM8058_PWM_PAUSE_COUNT_LO_SHIFT) &
+						PM8058_PWM_PAUSE_COUNT_LO_MASK;
+			pwm->pwm_ctl[6] |= PM8058_PWM_PAUSE_ENABLE_LOW;
+		} else
+			pwm->pwm_ctl[6] = 0;
+
+		len = 7;
+	}
+
+	pm8058_pwm_bank_sel(pwm);
+
+	for (i = 0; i < len; i++) {
+		rc = pm8058_write(pwm->chip->pm_chip,
+				  SSBI_REG_ADDR_LPG_CTL(i),
+				  &pwm->pwm_ctl[i], 1);
+		if (rc) {
+			pr_err("%s: pm8058_write(): rc=%d (PWM Ctl[%d])\n",
+			       __func__, rc, i);
+			break;
+		}
+	}
+
+	if (ramp_enabled) {
+		pwm->pwm_ctl[0] |= ramp_enabled;
+		pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_CTL(0),
+			     &pwm->pwm_ctl[0], 1);
+	}
+
+	return rc;
+}
+
+/* APIs */
+/*
+ * pwm_request - request a PWM device
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+	struct pwm_device	*pwm;
+
+	if (pwm_id > PM8058_PWM_CHANNELS || pwm_id < 0)
+		return ERR_PTR(-EINVAL);
+	if (pwm_chip == NULL)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&pwm_chip->pwm_mutex);
+	pwm = &pwm_chip->pwm_dev[pwm_id];
+	if (!pwm->in_use) {
+		pwm->in_use = 1;
+		pwm->label = label;
+
+		if (pwm_chip->pdata && pwm_chip->pdata->config)
+			pwm_chip->pdata->config(pwm, pwm_id, 1);
+	} else
+		pwm = ERR_PTR(-EBUSY);
+	mutex_unlock(&pwm_chip->pwm_mutex);
+
+	return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+/*
+ * pwm_free - free a PWM device
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+		return;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		if (pwm->chip->pdata && pwm->chip->pdata->config)
+			pwm->chip->pdata->config(pwm, pwm->pwm_id, 0);
+
+		pwm->in_use = 0;
+		pwm->label = NULL;
+	}
+	pm8058_pwm_bank_enable(pwm, 0);
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_free);
+
+/*
+ * pwm_config - change a PWM device configuration
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_us: duty cycle in micro second
+ */
+int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	struct pw8058_pwm_config	pwm_conf;
+	unsigned int max_pwm_value, tmp;
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm) ||
+		(unsigned)duty_us > (unsigned)period_us ||
+		(unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN)
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pm8058_pwm_calc_period(period_us, &pwm_conf);
+
+	/* Figure out pwm_value with overflow handling */
+	if ((unsigned)period_us > (1 << pwm_conf.pwm_size)) {
+		tmp = period_us;
+		tmp >>= pwm_conf.pwm_size;
+		pwm_conf.pwm_value = (unsigned)duty_us / tmp;
+	} else {
+		tmp = duty_us;
+		tmp <<= pwm_conf.pwm_size;
+		pwm_conf.pwm_value = tmp / (unsigned)period_us;
+	}
+	max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+	if (pwm_conf.pwm_value > max_pwm_value)
+		pwm_conf.pwm_value = max_pwm_value;
+
+	pwm_conf.bypass_lut = 1;
+
+	pr_debug("%s: duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
+		 __func__, (unsigned)duty_us, (unsigned)period_us,
+		 pwm_conf.pwm_value, 1 << pwm_conf.pwm_size);
+
+	rc = pm8058_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config);
+
+/*
+ * pwm_enable - start a PWM output toggling
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (!pwm->in_use)
+		rc = -EINVAL;
+	else {
+		if (pwm->chip->pdata && pwm->chip->pdata->enable)
+			pwm->chip->pdata->enable(pwm, pwm->pwm_id, 1);
+
+		rc = pm8058_pwm_bank_enable(pwm, 1);
+
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 1, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+		return;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		pm8058_pwm_bank_enable(pwm, 0);
+
+		if (pwm->chip->pdata && pwm->chip->pdata->enable)
+			pwm->chip->pdata->enable(pwm, pwm->pwm_id, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_disable);
+
+/*
+ * pm8058_pwm_lut_config - change a PWM device configuration to use LUT
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: arrary of duty cycles in percent, like 20, 50.
+ * @duty_time_ms: time for each duty cycle in millisecond
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @flags: control flags
+ *
+ */
+int pm8058_pwm_lut_config(struct pwm_device *pwm, int period_us,
+			  int duty_pct[], int duty_time_ms, int start_idx,
+			  int idx_len, int pause_lo, int pause_hi, int flags)
+{
+	struct pw8058_pwm_config	pwm_conf;
+	unsigned int pwm_value, max_pwm_value;
+	u8	cfg0, cfg1;
+	int	i, len;
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || !idx_len)
+		return -EINVAL;
+	if (duty_pct == NULL && !(flags & PM_PWM_LUT_NO_TABLE))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+	if (idx_len >= PM_PWM_LUT_SIZE && start_idx)
+		return -EINVAL;
+	if ((start_idx + idx_len) > PM_PWM_LUT_SIZE)
+		return -EINVAL;
+	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN)
+		return -EINVAL;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pm8058_pwm_calc_period(period_us, &pwm_conf);
+
+	len = (idx_len > PM_PWM_LUT_SIZE) ? PM_PWM_LUT_SIZE : idx_len;
+
+	if (flags & PM_PWM_LUT_NO_TABLE)
+		goto after_table_write;
+
+	max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+	for (i = 0; i < len; i++) {
+		pwm_value = (duty_pct[i] << pwm_conf.pwm_size) / 100;
+		/* Avoid overflow */
+		if (pwm_value > max_pwm_value)
+			pwm_value = max_pwm_value;
+		cfg0 = pwm_value & 0xff;
+		cfg1 = (pwm_value >> 1) & 0x80;
+		cfg1 |= start_idx + i;
+
+		pr_debug("%s: %d: pwm=%d\n", __func__, i, pwm_value);
+
+		pm8058_write(pwm->chip->pm_chip,
+			     SSBI_REG_ADDR_LPG_LUT_CFG0,
+			     &cfg0, 1);
+		pm8058_write(pwm->chip->pm_chip,
+			     SSBI_REG_ADDR_LPG_LUT_CFG1,
+			     &cfg1, 1);
+	}
+
+after_table_write:
+	pwm_conf.lut_duty_ms = duty_time_ms;
+	pwm_conf.lut_lo_index = start_idx;
+	pwm_conf.lut_hi_index = start_idx + len - 1;
+	pwm_conf.lut_pause_lo = pause_lo;
+	pwm_conf.lut_pause_hi = pause_hi;
+	pwm_conf.flags = flags;
+	pwm_conf.bypass_lut = 0;
+
+	rc = pm8058_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_config);
+
+/*
+ * pm8058_pwm_lut_enable - control a PWM device to start/stop LUT ramp
+ *
+ * @pwm: the PWM device
+ * @start: to start (1), or stop (0)
+ */
+int pm8058_pwm_lut_enable(struct pwm_device *pwm, int start)
+{
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (start) {
+		pm8058_pwm_bank_enable(pwm, 1);
+
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 1, 1);
+	} else {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		pm8058_pwm_bank_enable(pwm, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_enable);
+
+#define SSBI_REG_ADDR_LED_BASE		0x131
+#define SSBI_REG_ADDR_LED(n)		(SSBI_REG_ADDR_LED_BASE + (n))
+#define SSBI_REG_ADDR_FLASH_BASE	0x48
+#define SSBI_REG_ADDR_FLASH_DRV_1	0xFB
+#define SSBI_REG_ADDR_FLASH(n)		(((n) < 2 ? \
+					    SSBI_REG_ADDR_FLASH_BASE + (n) : \
+					    SSBI_REG_ADDR_FLASH_DRV_1))
+
+#define PM8058_LED_CURRENT_SHIFT	3
+#define PM8058_LED_MODE_MASK		0x07
+
+#define PM8058_FLASH_CURRENT_SHIFT	4
+#define PM8058_FLASH_MODE_MASK		0x03
+#define PM8058_FLASH_MODE_NONE		0
+#define PM8058_FLASH_MODE_DTEST1	1
+#define PM8058_FLASH_MODE_DTEST2	2
+#define PM8058_FLASH_MODE_PWM		3
+
+int pm8058_pwm_config_led(struct pwm_device *pwm, int id,
+			  int mode, int max_current)
+{
+	int	rc;
+	u8	conf;
+
+	switch (id) {
+	case PM_PWM_LED_0:
+	case PM_PWM_LED_1:
+	case PM_PWM_LED_2:
+		conf = mode & PM8058_LED_MODE_MASK;
+		conf |= (max_current / 2) << PM8058_LED_CURRENT_SHIFT;
+		rc = pm8058_write(pwm->chip->pm_chip,
+				  SSBI_REG_ADDR_LED(id), &conf, 1);
+		break;
+
+	case PM_PWM_LED_KPD:
+	case PM_PWM_LED_FLASH:
+	case PM_PWM_LED_FLASH1:
+		switch (mode) {
+		case PM_PWM_CONF_PWM1:
+		case PM_PWM_CONF_PWM2:
+		case PM_PWM_CONF_PWM3:
+			conf = PM8058_FLASH_MODE_PWM;
+			break;
+		case PM_PWM_CONF_DTEST1:
+			conf = PM8058_FLASH_MODE_DTEST1;
+			break;
+		case PM_PWM_CONF_DTEST2:
+			conf = PM8058_FLASH_MODE_DTEST2;
+			break;
+		default:
+			conf = PM8058_FLASH_MODE_NONE;
+			break;
+		}
+		conf |= (max_current / 20) << PM8058_FLASH_CURRENT_SHIFT;
+		id -= PM_PWM_LED_KPD;
+		rc = pm8058_write(pwm->chip->pm_chip,
+				  SSBI_REG_ADDR_FLASH(id), &conf, 1);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_config_led);
+
+int pm8058_pwm_set_dtest(struct pwm_device *pwm, int enable)
+{
+	int	rc;
+	u8	reg;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	if (!pwm->in_use)
+		rc = -EINVAL;
+	else {
+		reg = pwm->pwm_id & PM8058_PWM_DTEST_BANK_MASK;
+		if (enable)
+			/* Only Test 1 available */
+			reg |= (1 << PM8058_PWM_DTEST_SHIFT) &
+				PM8058_PWM_DTEST_MASK;
+		rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_TEST,
+				  &reg, 1);
+		if (rc)
+			pr_err("%s: pm8058_write(DTEST=0x%x): rc=%d\n",
+			       __func__, reg, rc);
+
+	}
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_set_dtest);
+
+static int __devinit pmic8058_pwm_probe(struct platform_device *pdev)
+{
+	struct pm8058_chip	*pm_chip;
+	struct pm8058_pwm_chip	*chip;
+	int	i;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < PM8058_PWM_CHANNELS; i++) {
+		chip->pwm_dev[i].pwm_id = i;
+		chip->pwm_dev[i].chip = chip;
+	}
+
+	mutex_init(&chip->pwm_mutex);
+
+	chip->pdata = pdev->dev.platform_data;
+	chip->pm_chip = pm_chip;
+	pwm_chip = chip;
+	platform_set_drvdata(pdev, chip);
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8058_pwm_remove(struct platform_device *pdev)
+{
+	struct pm8058_pwm_chip	*chip = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(chip);
+	return 0;
+}
+
+static struct platform_driver pmic8058_pwm_driver = {
+	.probe		= pmic8058_pwm_probe,
+	.remove		= __devexit_p(pmic8058_pwm_remove),
+	.driver		= {
+		.name = "pm8058-pwm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_pwm_init(void)
+{
+	return platform_driver_register(&pmic8058_pwm_driver);
+}
+
+static void __exit pm8058_pwm_exit(void)
+{
+	platform_driver_unregister(&pmic8058_pwm_driver);
+}
+
+subsys_initcall(pm8058_pwm_init);
+module_exit(pm8058_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 PWM driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058_pwm");
diff --git a/drivers/misc/pmic8058-upl.c b/drivers/misc/pmic8058-upl.c
new file mode 100644
index 0000000..ae0abd8
--- /dev/null
+++ b/drivers/misc/pmic8058-upl.c
@@ -0,0 +1,363 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 UPL driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-upl.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* PMIC8058 UPL registers */
+#define	SSBI_REG_UPL_CTRL		0x17B
+#define	SSBI_REG_UPL_TRUTHTABLE1	0x17C
+#define	SSBI_REG_UPL_TRUTHTABLE2	0x17D
+
+struct pm8058_upl_device {
+	struct mutex		upl_mutex;
+	struct pm8058_chip	*pm_chip;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry		*dent;
+#endif
+};
+static struct pm8058_upl_device *upl_dev;
+
+/* APIs */
+
+/*
+ * pm8058_upl_request - request a handle to access UPL device
+ */
+struct pm8058_upl_device *pm8058_upl_request(void)
+{
+	return upl_dev;
+}
+EXPORT_SYMBOL(pm8058_upl_request);
+
+/*
+ * pm8058_upl_read_truthtable - read value currently stored in UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value read from UPL truth table
+ */
+int pm8058_upl_read_truthtable(struct pm8058_upl_device *upldev,
+				u16 *truthtable)
+{
+	int rc = 0;
+	u8 table[2];
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+	if (upldev->pm_chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE1,
+			&(table[0]), 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+		goto upl_read_done;
+	}
+
+	rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE2,
+			&(table[1]), 1);
+	if (rc)
+		pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_read_done:
+	mutex_unlock(&upldev->upl_mutex);
+	*truthtable = (((u16)table[1]) << 8) | table[0];
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_read_truthtable);
+
+/*
+ * pm8058_upl_writes_truthtable - write value into UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value written to UPL truth table
+ *
+ * Each bit in parameter "truthtable" corresponds to the UPL output for a given
+ * set of input pin values. For example, if the input pins have the following
+ * values: A=1, B=1, C=1, D=0, then the UPL would output the value of bit 14
+ * (0b1110) in parameter "truthtable".
+ */
+int pm8058_upl_write_truthtable(struct pm8058_upl_device *upldev,
+				u16 truthtable)
+{
+	int rc = 0;
+	u8 table[2];
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+	if (upldev->pm_chip == NULL)
+		return -ENODEV;
+
+	table[0] = truthtable & 0xFF;
+	table[1] = (truthtable >> 8) & 0xFF;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE1,
+				&(table[0]), 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_write(0x%X)=0x%04X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+		goto upl_write_done;
+	}
+
+	rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE2,
+				&(table[1]), 1);
+	if (rc)
+		pr_err("%s: FAIL pm8058_write(0x%X)=0x%04X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_write_done:
+	mutex_unlock(&upldev->upl_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_write_truthtable);
+
+/*
+ * pm8058_upl_config - configure UPL I/O settings and UPL enable/disable
+ *
+ * @upldev: the UPL device
+ * @mask: setting mask to configure
+ * @flags: setting flags
+ */
+int pm8058_upl_config(struct pm8058_upl_device *upldev, u32 mask, u32 flags)
+{
+	int rc;
+	u8 upl_ctrl, m, f;
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+	if (upldev->pm_chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_CTRL, &upl_ctrl, 1);
+	if (rc) {
+		pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+		goto upl_config_done;
+	}
+
+	m = mask & 0x00ff;
+	f = flags & 0x00ff;
+	upl_ctrl &= ~m;
+	upl_ctrl |= m & f;
+
+	rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_CTRL, &upl_ctrl, 1);
+	if (rc)
+		pr_err("%s: FAIL pm8058_write(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+upl_config_done:
+	mutex_unlock(&upldev->upl_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_config);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int truthtable_set(void *data, u64 val)
+{
+	int rc;
+
+	rc = pm8058_upl_write_truthtable(data, val);
+	if (rc)
+		pr_err("%s: pm8058_upl_write_truthtable: rc=%d, "
+			"truthtable=0x%llX\n", __func__, rc, val);
+	return rc;
+}
+
+static int truthtable_get(void *data, u64 *val)
+{
+	int rc;
+	u16 truthtable;
+
+	rc = pm8058_upl_read_truthtable(data, &truthtable);
+	if (rc)
+		pr_err("%s: pm8058_upl_read_truthtable: rc=%d, "
+			"truthtable=0x%X\n", __func__, rc, truthtable);
+	if (val)
+		*val = truthtable;
+
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_truthtable_fops, truthtable_get,
+			truthtable_set, "0x%04llX\n");
+
+/* enter values as 0xMMMMFFFF where MMMM is the mask and FFFF is the flags */
+static int control_set(void *data, u64 val)
+{
+	u8 mask, flags;
+	int rc;
+
+	flags = val & 0xFFFF;
+	mask = (val >> 16) & 0xFFFF;
+
+	rc = pm8058_upl_config(data, mask, flags);
+	if (rc)
+		pr_err("%s: pm8058_upl_config: rc=%d, mask = 0x%X, "
+			"flags = 0x%X\n", __func__, rc, mask, flags);
+	return rc;
+}
+
+static int control_get(void *data, u64 *val)
+{
+	struct pm8058_upl_device *upldev;
+	int rc = 0;
+	u8 ctrl;
+
+	upldev = data;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_CTRL, &ctrl, 1);
+	if (rc)
+		pr_err("%s: FAIL pm8058_read(): rc=%d (ctrl=0x%02X)\n",
+		       __func__, rc, ctrl);
+
+	mutex_unlock(&upldev->upl_mutex);
+
+	*val = ctrl;
+
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_control_fops, control_get,
+			control_set, "0x%02llX\n");
+
+static int pm8058_upl_debug_init(struct pm8058_upl_device *upldev)
+{
+	struct dentry *dent;
+	struct dentry *temp;
+
+	dent = debugfs_create_dir("pm8058-upl", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		return -ENOMEM;
+	}
+
+	temp = debugfs_create_file("truthtable", S_IRUSR | S_IWUSR, dent,
+					upldev, &upl_truthtable_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		goto debug_error;
+	}
+
+	temp = debugfs_create_file("control", S_IRUSR | S_IWUSR, dent,
+					upldev, &upl_control_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		goto debug_error;
+	}
+
+	upldev->dent = dent;
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dent);
+	return -ENOMEM;
+}
+
+static int __devexit pm8058_upl_debug_remove(struct pm8058_upl_device *upldev)
+{
+	debugfs_remove_recursive(upldev->dent);
+	return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit pmic8058_upl_probe(struct platform_device *pdev)
+{
+	struct pm8058_chip		*pm_chip;
+	struct pm8058_upl_device	*upldev;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no parent data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	upldev = kzalloc(sizeof *upldev, GFP_KERNEL);
+	if (upldev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&upldev->upl_mutex);
+
+	upldev->pm_chip = pm_chip;
+	upl_dev = upldev;
+	platform_set_drvdata(pdev, upldev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8058_upl_debug_init(upl_dev);
+#endif
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8058_upl_remove(struct platform_device *pdev)
+{
+	struct pm8058_upl_device *upldev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8058_upl_debug_remove(upldev);
+#endif
+
+	platform_set_drvdata(pdev, upldev->pm_chip);
+	kfree(upldev);
+	pr_notice("%s: OK\n", __func__);
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_upl_driver = {
+	.probe		= pmic8058_upl_probe,
+	.remove		= __devexit_p(pmic8058_upl_remove),
+	.driver		= {
+		.name = "pm8058-upl",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_upl_init(void)
+{
+	return platform_driver_register(&pmic8058_upl_driver);
+}
+
+static void __exit pm8058_upl_exit(void)
+{
+	platform_driver_unregister(&pmic8058_upl_driver);
+}
+
+module_init(pm8058_upl_init);
+module_exit(pm8058_upl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 UPL driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-upl");
diff --git a/drivers/misc/pmic8058-vibrator.c b/drivers/misc/pmic8058-vibrator.c
new file mode 100644
index 0000000..1b15b18
--- /dev/null
+++ b/drivers/misc/pmic8058-vibrator.c
@@ -0,0 +1,307 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pmic8058-vibrator.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "../staging/android/timed_output.h"
+
+#define VIB_DRV			0x4A
+
+#define VIB_DRV_SEL_MASK	0xf8
+#define VIB_DRV_SEL_SHIFT	0x03
+#define VIB_DRV_EN_MANUAL_MASK	0xfc
+
+#define VIB_MAX_LEVEL_mV	3100
+#define VIB_MIN_LEVEL_mV	1200
+
+struct pmic8058_vib {
+	struct hrtimer vib_timer;
+	struct timed_output_dev timed_dev;
+	spinlock_t lock;
+	struct work_struct work;
+
+	struct device *dev;
+	struct pmic8058_vibrator_pdata *pdata;
+	int state;
+	int level;
+	u8  reg_vib_drv;
+
+	struct pm8058_chip	*pm_chip;
+};
+
+/* REVISIT: just for debugging, will be removed in final working version */
+static void __dump_vib_regs(struct pmic8058_vib *vib, char *msg)
+{
+	u8 temp;
+
+	dev_dbg(vib->dev, "%s\n", msg);
+
+	pm8058_read(vib->pm_chip, VIB_DRV, &temp, 1);
+	dev_dbg(vib->dev, "VIB_DRV - %X\n", temp);
+}
+
+static int pmic8058_vib_read_u8(struct pmic8058_vib *vib,
+				 u8 *data, u16 reg)
+{
+	int rc;
+
+	rc = pm8058_read(vib->pm_chip, reg, data, 1);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error reading pmic8058: %X - ret %X\n",
+				reg, rc);
+
+	return rc;
+}
+
+static int pmic8058_vib_write_u8(struct pmic8058_vib *vib,
+				 u8 data, u16 reg)
+{
+	int rc;
+
+	rc = pm8058_write(vib->pm_chip, reg, &data, 1);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error writing pmic8058: %X - ret %X\n",
+				reg, rc);
+	return rc;
+}
+
+static int pmic8058_vib_set(struct pmic8058_vib *vib, int on)
+{
+	int rc;
+	u8 val;
+
+	if (on) {
+		rc = pm_runtime_resume(vib->dev);
+		if (rc < 0)
+			dev_dbg(vib->dev, "pm_runtime_resume failed\n");
+
+		val = vib->reg_vib_drv;
+		val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
+		rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+	} else {
+		val = vib->reg_vib_drv;
+		val &= ~VIB_DRV_SEL_MASK;
+		rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+
+		rc = pm_runtime_suspend(vib->dev);
+		if (rc < 0)
+			dev_dbg(vib->dev, "pm_runtime_suspend failed\n");
+	}
+	__dump_vib_regs(vib, "vib_set_end");
+
+	return rc;
+}
+
+static void pmic8058_vib_enable(struct timed_output_dev *dev, int value)
+{
+	struct pmic8058_vib *vib = container_of(dev, struct pmic8058_vib,
+					 timed_dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vib->lock, flags);
+	hrtimer_cancel(&vib->vib_timer);
+
+	if (value == 0)
+		vib->state = 0;
+	else {
+		value = (value > vib->pdata->max_timeout_ms ?
+				 vib->pdata->max_timeout_ms : value);
+		vib->state = 1;
+		hrtimer_start(&vib->vib_timer,
+			      ktime_set(value / 1000, (value % 1000) * 1000000),
+			      HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&vib->lock, flags);
+	schedule_work(&vib->work);
+}
+
+static void pmic8058_vib_update(struct work_struct *work)
+{
+	struct pmic8058_vib *vib = container_of(work, struct pmic8058_vib,
+					 work);
+
+	pmic8058_vib_set(vib, vib->state);
+}
+
+static int pmic8058_vib_get_time(struct timed_output_dev *dev)
+{
+	struct pmic8058_vib *vib = container_of(dev, struct pmic8058_vib,
+					 timed_dev);
+
+	if (hrtimer_active(&vib->vib_timer)) {
+		ktime_t r = hrtimer_get_remaining(&vib->vib_timer);
+		return (int) ktime_to_us(r);
+	} else
+		return 0;
+}
+
+static enum hrtimer_restart pmic8058_vib_timer_func(struct hrtimer *timer)
+{
+	struct pmic8058_vib *vib = container_of(timer, struct pmic8058_vib,
+					 vib_timer);
+	vib->state = 0;
+	schedule_work(&vib->work);
+	return HRTIMER_NORESTART;
+}
+
+#ifdef CONFIG_PM
+static int pmic8058_vib_suspend(struct device *dev)
+{
+	struct pmic8058_vib *vib = dev_get_drvdata(dev);
+
+	hrtimer_cancel(&vib->vib_timer);
+	cancel_work_sync(&vib->work);
+	/* turn-off vibrator */
+	pmic8058_vib_set(vib, 0);
+	return 0;
+}
+
+static struct dev_pm_ops pmic8058_vib_pm_ops = {
+	.suspend = pmic8058_vib_suspend,
+};
+#endif
+
+static int __devinit pmic8058_vib_probe(struct platform_device *pdev)
+
+{
+	struct pmic8058_vibrator_pdata *pdata = pdev->dev.platform_data;
+	struct pmic8058_vib *vib;
+	u8 val;
+	int rc;
+
+	struct pm8058_chip	*pm_chip;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (pdata->level_mV < VIB_MIN_LEVEL_mV ||
+			 pdata->level_mV > VIB_MAX_LEVEL_mV)
+		return -EINVAL;
+
+	vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+	if (!vib)
+		return -ENOMEM;
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pdev->dev);
+
+	vib->pm_chip	= pm_chip;
+	vib->pdata	= pdata;
+	vib->level	= pdata->level_mV / 100;
+	vib->dev	= &pdev->dev;
+
+	spin_lock_init(&vib->lock);
+	INIT_WORK(&vib->work, pmic8058_vib_update);
+
+	hrtimer_init(&vib->vib_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	vib->vib_timer.function = pmic8058_vib_timer_func;
+
+	vib->timed_dev.name = "vibrator";
+	vib->timed_dev.get_time = pmic8058_vib_get_time;
+	vib->timed_dev.enable = pmic8058_vib_enable;
+
+	__dump_vib_regs(vib, "boot_vib_default");
+
+	/* operate in manual mode */
+	rc = pmic8058_vib_read_u8(vib, &val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+	val &= ~VIB_DRV_EN_MANUAL_MASK;
+	rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+
+	vib->reg_vib_drv = val;
+
+	rc = timed_output_dev_register(&vib->timed_dev);
+	if (rc < 0)
+		goto err_read_vib;
+
+	pmic8058_vib_enable(&vib->timed_dev, pdata->initial_vibrate_ms);
+
+	platform_set_drvdata(pdev, vib);
+
+	pm_runtime_set_suspended(&pdev->dev);
+	return 0;
+
+err_read_vib:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(vib);
+	return rc;
+}
+
+static int __devexit pmic8058_vib_remove(struct platform_device *pdev)
+{
+	struct pmic8058_vib *vib = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	cancel_work_sync(&vib->work);
+	hrtimer_cancel(&vib->vib_timer);
+	timed_output_dev_unregister(&vib->timed_dev);
+	kfree(vib);
+
+	return 0;
+}
+
+static struct platform_driver pmic8058_vib_driver = {
+	.probe		= pmic8058_vib_probe,
+	.remove		= __devexit_p(pmic8058_vib_remove),
+	.driver		= {
+		.name	= "pm8058-vib",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &pmic8058_vib_pm_ops,
+#endif
+	},
+};
+
+static int __init pmic8058_vib_init(void)
+{
+	return platform_driver_register(&pmic8058_vib_driver);
+}
+module_init(pmic8058_vib_init);
+
+static void __exit pmic8058_vib_exit(void)
+{
+	platform_driver_unregister(&pmic8058_vib_driver);
+}
+module_exit(pmic8058_vib_exit);
+
+MODULE_ALIAS("platform:pmic8058_vib");
+MODULE_DESCRIPTION("PMIC8058 vibrator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/pmic8058-xoadc.c b/drivers/misc/pmic8058-xoadc.c
new file mode 100644
index 0000000..d2d8cba
--- /dev/null
+++ b/drivers/misc/pmic8058-xoadc.c
@@ -0,0 +1,770 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/msm_adc.h>
+#include <linux/pmic8058-xoadc.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/delay.h>
+
+#include <mach/mpp.h>
+#include <mach/msm_xo.h>
+
+#define ADC_DRIVER_NAME			"pm8058-xoadc"
+
+#define MAX_QUEUE_LENGTH        0X15
+#define MAX_CHANNEL_PROPERTIES_QUEUE    0X7
+#define MAX_QUEUE_SLOT		0x1
+
+/* User Processor */
+#define ADC_ARB_USRP_CNTRL                      0x197
+	#define ADC_ARB_USRP_CNTRL_EN_ARB	BIT(0)
+	#define ADC_ARB_USRP_CNTRL_RSV1		BIT(1)
+	#define ADC_ARB_USRP_CNTRL_RSV2		BIT(2)
+	#define ADC_ARB_USRP_CNTRL_RSV3		BIT(3)
+	#define ADC_ARB_USRP_CNTRL_RSV4		BIT(4)
+	#define ADC_ARB_USRP_CNTRL_RSV5		BIT(5)
+	#define ADC_ARB_USRP_CNTRL_EOC		BIT(6)
+	#define ADC_ARB_USRP_CNTRL_REQ		BIT(7)
+
+#define ADC_ARB_USRP_AMUX_CNTRL         0x198
+#define ADC_ARB_USRP_ANA_PARAM          0x199
+#define ADC_ARB_USRP_DIG_PARAM          0x19A
+#define ADC_ARB_USRP_RSV                        0x19B
+
+#define ADC_ARB_USRP_DATA0                      0x19D
+#define ADC_ARB_USRP_DATA1                      0x19C
+
+struct pmic8058_adc {
+	struct xoadc_platform_data *pdata;
+	struct pm8058_chip *pm_chip;
+	struct adc_properties *adc_prop;
+	struct xoadc_conv_state	conv[2];
+	int xoadc_queue_count;
+	int adc_irq;
+	struct linear_graph *adc_graph;
+	struct xoadc_conv_state *conv_slot_request;
+	struct xoadc_conv_state *conv_queue_list;
+	struct adc_conv_slot conv_queue_elements[MAX_QUEUE_LENGTH];
+	int xoadc_num;
+	struct msm_xo_voter *adc_voter;
+};
+
+static struct pmic8058_adc *pmic_adc[XOADC_PMIC_0 + 1];
+
+static bool xoadc_initialized, xoadc_calib_first_adc;
+
+DEFINE_RATELIMIT_STATE(pm8058_xoadc_msg_ratelimit,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+static inline int pm8058_xoadc_can_print(void)
+{
+	return __ratelimit(&pm8058_xoadc_msg_ratelimit);
+}
+
+int32_t pm8058_xoadc_registered(void)
+{
+	return xoadc_initialized;
+}
+EXPORT_SYMBOL(pm8058_xoadc_registered);
+
+void pm8058_xoadc_restore_slot(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+	mutex_lock(&slot_state->list_lock);
+	list_add(&slot->list, &slot_state->slots);
+	mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_restore_slot);
+
+void pm8058_xoadc_slot_request(uint32_t adc_instance,
+					struct adc_conv_slot **slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+	mutex_lock(&slot_state->list_lock);
+
+	if (!list_empty(&slot_state->slots)) {
+		*slot = list_first_entry(&slot_state->slots,
+				struct adc_conv_slot, list);
+		list_del(&(*slot)->list);
+	} else
+		*slot = NULL;
+
+	mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_slot_request);
+
+static int32_t pm8058_xoadc_arb_cntrl(uint32_t arb_cntrl,
+					uint32_t adc_instance)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	int i, rc;
+	u8 data_arb_cntrl;
+
+	data_arb_cntrl = ADC_ARB_USRP_CNTRL_EOC |
+			ADC_ARB_USRP_CNTRL_RSV5 |
+			ADC_ARB_USRP_CNTRL_RSV4;
+
+	if (arb_cntrl) {
+		data_arb_cntrl |= ADC_ARB_USRP_CNTRL_EN_ARB;
+		msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_ON);
+		adc_pmic->pdata->xoadc_mpp_config();
+	}
+
+	/* Write twice to the CNTRL register for the arbiter settings
+	   to take into effect */
+	for (i = 0; i < 2; i++) {
+		rc = pm8058_write(adc_pmic->pm_chip, ADC_ARB_USRP_CNTRL,
+					&data_arb_cntrl, 1);
+		if (rc < 0) {
+			pr_debug("%s: PM8058 write failed\n", __func__);
+			return rc;
+		}
+	}
+
+	if (!arb_cntrl)
+		msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_OFF);
+
+	return 0;
+}
+
+static int32_t pm8058_xoadc_configure(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	u8 data_arb_cntrl, data_amux_chan, data_arb_rsv, data_ana_param;
+	u8 data_dig_param, data_ana_param2;
+	int rc;
+
+	rc = pm8058_xoadc_arb_cntrl(1, adc_instance);
+	if (rc < 0) {
+		pr_debug("%s: Configuring ADC Arbiter"
+				"enable failed\n", __func__);
+		return rc;
+	}
+
+	switch (slot->chan_path) {
+
+	case CHAN_PATH_TYPE1:
+		data_amux_chan = CHANNEL_VCOIN << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE2:
+		data_amux_chan = CHANNEL_VBAT << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE3:
+		data_amux_chan = CHANNEL_VCHG << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 10;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE4:
+		data_amux_chan = CHANNEL_CHG_MONITOR << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE5:
+		data_amux_chan = CHANNEL_VPH_PWR << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE6:
+		data_amux_chan = CHANNEL_MPP5 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE7:
+		data_amux_chan = CHANNEL_MPP6 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE8:
+		data_amux_chan = CHANNEL_MPP7 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE9:
+		data_amux_chan = CHANNEL_MPP8 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE10:
+		data_amux_chan = CHANNEL_MPP9 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE11:
+		data_amux_chan = CHANNEL_USB_VBUS << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE12:
+		data_amux_chan = CHANNEL_DIE_TEMP << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE13:
+		data_amux_chan = CHANNEL_125V << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE14:
+		data_amux_chan = CHANNEL_INTERNAL_2 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE_NONE:
+		data_amux_chan = CHANNEL_MUXOFF << 4;
+		data_arb_rsv = 0x10;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1];
+		break;
+
+	case CHAN_PATH_TYPE15:
+		data_amux_chan = CHANNEL_INTERNAL << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+	}
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+			ADC_ARB_USRP_AMUX_CNTRL, &data_amux_chan, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+			ADC_ARB_USRP_RSV, &data_arb_rsv, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	/* Set default clock rate to 2.4 MHz XO ADC clock digital */
+	switch (slot->chan_adc_config) {
+
+	case ADC_CONFIG_TYPE1:
+		data_ana_param = 0xFE;
+		data_dig_param = 0x23;
+		data_ana_param2 = 0xFF;
+		/* AMUX register data to start the ADC conversion */
+		data_arb_cntrl = 0xF1;
+		break;
+
+	case ADC_CONFIG_TYPE2:
+		data_ana_param = 0xFE;
+		data_dig_param = 0x03;
+		data_ana_param2 = 0xFF;
+		/* AMUX register data to start the ADC conversion */
+		data_arb_cntrl = 0xF1;
+		break;
+	}
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+				ADC_ARB_USRP_ANA_PARAM, &data_ana_param, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+				ADC_ARB_USRP_DIG_PARAM, &data_dig_param, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+				ADC_ARB_USRP_ANA_PARAM, &data_ana_param2, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	enable_irq(adc_pmic->adc_irq);
+
+	rc = pm8058_write(adc_pmic->pm_chip,
+				ADC_ARB_USRP_CNTRL, &data_arb_cntrl, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t pm8058_xoadc_select_chan_and_start_conv(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+
+	if (!xoadc_initialized)
+		return -ENODEV;
+
+	mutex_lock(&slot_state->list_lock);
+	list_add_tail(&slot->list, &slot_state->slots);
+	if (adc_pmic->xoadc_queue_count == 0) {
+		if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+			adc_pmic->pdata->xoadc_vreg_set(1);
+		pm8058_xoadc_configure(adc_instance, slot);
+	}
+	adc_pmic->xoadc_queue_count++;
+	mutex_unlock(&slot_state->list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_select_chan_and_start_conv);
+
+static int32_t pm8058_xoadc_dequeue_slot_request(uint32_t adc_instance,
+				struct adc_conv_slot **slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+	int rc = 0;
+
+	mutex_lock(&slot_state->list_lock);
+	if (adc_pmic->xoadc_queue_count > 0 &&
+			!list_empty(&slot_state->slots)) {
+		*slot = list_first_entry(&slot_state->slots,
+			struct adc_conv_slot, list);
+		list_del(&(*slot)->list);
+	} else
+		rc = -EINVAL;
+	mutex_unlock(&slot_state->list_lock);
+
+	if (rc < 0) {
+		if (pm8058_xoadc_can_print())
+			pr_err("Pmic 8058 xoadc spurious interrupt detected\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t pm8058_xoadc_read_adc_code(uint32_t adc_instance, int32_t *data)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+	uint8_t rslt_lsb, rslt_msb;
+	struct adc_conv_slot *slot;
+	int32_t rc, max_ideal_adc_code = 1 << adc_pmic->adc_prop->bitresolution;
+
+	if (!xoadc_initialized)
+		return -ENODEV;
+
+	rc = pm8058_read(adc_pmic->pm_chip, ADC_ARB_USRP_DATA0, &rslt_lsb, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 read failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8058_read(adc_pmic->pm_chip, ADC_ARB_USRP_DATA1, &rslt_msb, 1);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 read failed\n", __func__);
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	/* Use the midpoint to determine underflow or overflow */
+	if (*data > max_ideal_adc_code + (max_ideal_adc_code >> 1))
+		*data |= ((1 << (8 * sizeof(*data) -
+			adc_pmic->adc_prop->bitresolution)) - 1) <<
+			adc_pmic->adc_prop->bitresolution;
+	/* Return if this is a calibration run since there
+	 * is no need to check requests in the waiting queue */
+	if (xoadc_calib_first_adc)
+		return 0;
+
+	mutex_lock(&slot_state->list_lock);
+	adc_pmic->xoadc_queue_count--;
+	if (adc_pmic->xoadc_queue_count > 0) {
+		slot = list_first_entry(&slot_state->slots,
+				struct adc_conv_slot, list);
+		pm8058_xoadc_configure(adc_instance, slot);
+	}
+	mutex_unlock(&slot_state->list_lock);
+
+	mutex_lock(&slot_state->list_lock);
+	/* Default value for switching off the arbiter after reading
+	   the ADC value. Bit 0 set to 0. */
+	if (adc_pmic->xoadc_queue_count == 0) {
+		rc = pm8058_xoadc_arb_cntrl(0, adc_instance);
+		if (rc < 0) {
+			pr_debug("%s: Configuring ADC Arbiter disable"
+						"failed\n", __func__);
+			return rc;
+		}
+		if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+			adc_pmic->pdata->xoadc_vreg_set(0);
+	}
+	mutex_unlock(&slot_state->list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_read_adc_code);
+
+static irqreturn_t pm8058_xoadc(int irq, void *dev_id)
+{
+	struct pmic8058_adc *xoadc_8058 = dev_id;
+	struct adc_conv_slot *slot = NULL;
+	int rc;
+
+	disable_irq_nosync(xoadc_8058->adc_irq);
+
+	if (xoadc_calib_first_adc)
+		return IRQ_HANDLED;
+
+	rc = pm8058_xoadc_dequeue_slot_request(xoadc_8058->xoadc_num, &slot);
+
+	if (rc < 0)
+		return IRQ_NONE;
+
+	if (rc == 0)
+		msm_adc_conv_cb(slot, 0, NULL, 0);
+
+	return IRQ_HANDLED;
+}
+
+struct adc_properties *pm8058_xoadc_get_properties(uint32_t dev_instance)
+{
+	struct pmic8058_adc *xoadc_8058 = pmic_adc[dev_instance];
+
+	return xoadc_8058->adc_prop;
+}
+EXPORT_SYMBOL(pm8058_xoadc_get_properties);
+
+int32_t pm8058_xoadc_calib_device(uint32_t adc_instance)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct adc_conv_slot *slot;
+	int rc, offset_xoadc, slope_xoadc, calib_read_1, calib_read_2;
+
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(1);
+
+	pm8058_xoadc_slot_request(adc_instance, &slot);
+	if (slot) {
+		slot->chan_path = CHAN_PATH_TYPE13;
+		slot->chan_adc_config = ADC_CONFIG_TYPE2;
+		slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+		xoadc_calib_first_adc = true;
+		rc = pm8058_xoadc_configure(adc_instance, slot);
+		if (rc) {
+			pr_err("pm8058_xoadc configure failed\n");
+			goto fail;
+		}
+	} else {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	msleep(3);
+
+	rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_1);
+	if (rc) {
+		pr_err("pm8058_xoadc read adc failed\n");
+		xoadc_calib_first_adc = false;
+		goto fail;
+	}
+	xoadc_calib_first_adc = false;
+
+	pm8058_xoadc_slot_request(adc_instance, &slot);
+	if (slot) {
+		slot->chan_path = CHAN_PATH_TYPE15;
+		slot->chan_adc_config = ADC_CONFIG_TYPE2;
+		slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+		xoadc_calib_first_adc = true;
+		rc = pm8058_xoadc_configure(adc_instance, slot);
+		if (rc) {
+			pr_err("pm8058_xoadc configure failed\n");
+			goto fail;
+		}
+	} else {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	msleep(3);
+
+	rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_2);
+	if (rc) {
+		pr_err("pm8058_xoadc read adc failed\n");
+		xoadc_calib_first_adc = false;
+		goto fail;
+	}
+	xoadc_calib_first_adc = false;
+
+	pm8058_xoadc_restore_slot(adc_instance, slot);
+
+	slope_xoadc = (((calib_read_1 - calib_read_2) << 10)/
+					CHANNEL_ADC_625_MV);
+	offset_xoadc = calib_read_2 -
+			((slope_xoadc * CHANNEL_ADC_625_MV) >> 10);
+
+	printk(KERN_INFO"pmic8058_xoadc:The offset for AMUX calibration"
+						"was %d\n", offset_xoadc);
+
+	adc_pmic->adc_graph[0].offset = offset_xoadc;
+	adc_pmic->adc_graph[0].dy = (calib_read_1 - calib_read_2);
+	adc_pmic->adc_graph[0].dx = CHANNEL_ADC_625_MV;
+
+	/* Retain ideal calibration settings for therm readings */
+	adc_pmic->adc_graph[1].offset = 0 ;
+	adc_pmic->adc_graph[1].dy = (1 << 15) - 1;
+	adc_pmic->adc_graph[1].dx = 2200;
+
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(0);
+
+	return 0;
+fail:
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(0);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calib_device);
+
+int32_t pm8058_xoadc_calibrate(uint32_t dev_instance,
+				struct adc_conv_slot *slot, int *calib_status)
+{
+	*calib_status = CALIB_NOT_REQUIRED;
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calibrate);
+
+static int __devexit pm8058_xoadc_teardown(struct platform_device *pdev)
+{
+	struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev);
+
+	if (adc_pmic->pdata->xoadc_vreg_shutdown != NULL)
+		adc_pmic->pdata->xoadc_vreg_shutdown();
+
+	msm_xo_put(adc_pmic->adc_voter);
+	platform_set_drvdata(pdev, adc_pmic->pm_chip);
+	device_init_wakeup(&pdev->dev, 0);
+	kfree(adc_pmic);
+	xoadc_initialized = false;
+
+	return 0;
+}
+
+static int __devinit pm8058_xoadc_probe(struct platform_device *pdev)
+{
+	struct xoadc_platform_data *pdata = pdev->dev.platform_data;
+	struct pm8058_chip *pm_chip;
+	struct pmic8058_adc *adc_pmic;
+	int i, rc = 0;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		dev_err(&pdev->dev, "no parent data passed in\n");
+		return -EFAULT;
+	}
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	adc_pmic = kzalloc(sizeof(struct pmic8058_adc), GFP_KERNEL);
+	if (!adc_pmic) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_pmic->pm_chip = pm_chip;
+	adc_pmic->adc_prop = pdata->xoadc_prop;
+	adc_pmic->xoadc_num = pdata->xoadc_num;
+	adc_pmic->xoadc_queue_count = 0;
+
+	platform_set_drvdata(pdev, adc_pmic);
+
+	if (adc_pmic->xoadc_num > XOADC_PMIC_0) {
+		dev_err(&pdev->dev, "ADC device not supported\n");
+		rc = -EINVAL;
+		goto err_cleanup;
+	}
+
+	adc_pmic->pdata = pdata;
+	adc_pmic->adc_graph = kzalloc(sizeof(struct linear_graph)
+			* MAX_CHANNEL_PROPERTIES_QUEUE, GFP_KERNEL);
+	if (!adc_pmic->adc_graph) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		rc = -ENOMEM;
+		goto err_cleanup;
+	}
+
+	/* Will be replaced by individual channel calibration */
+	for (i = 0; i < MAX_CHANNEL_PROPERTIES_QUEUE; i++) {
+		adc_pmic->adc_graph[i].offset = 0 ;
+		adc_pmic->adc_graph[i].dy = (1 << 15) - 1;
+		adc_pmic->adc_graph[i].dx = 2200;
+	}
+
+	if (pdata->xoadc_mpp_config != NULL)
+		pdata->xoadc_mpp_config();
+
+	adc_pmic->conv_slot_request = &adc_pmic->conv[0];
+	adc_pmic->conv_slot_request->context =
+		&adc_pmic->conv_queue_elements[0];
+
+	mutex_init(&adc_pmic->conv_slot_request->list_lock);
+	INIT_LIST_HEAD(&adc_pmic->conv_slot_request->slots);
+
+	/* tie each slot and initwork them */
+	for (i = 0; i < MAX_QUEUE_LENGTH; i++) {
+		list_add(&adc_pmic->conv_slot_request->context[i].list,
+					&adc_pmic->conv_slot_request->slots);
+		INIT_WORK(&adc_pmic->conv_slot_request->context[i].work,
+							msm_adc_wq_work);
+		init_completion(&adc_pmic->conv_slot_request->context[i].comp);
+		adc_pmic->conv_slot_request->context[i].idx = i;
+	}
+
+	adc_pmic->conv_queue_list = &adc_pmic->conv[1];
+
+	mutex_init(&adc_pmic->conv_queue_list->list_lock);
+	INIT_LIST_HEAD(&adc_pmic->conv_queue_list->slots);
+
+	adc_pmic->adc_irq = platform_get_irq(pdev, 0);
+	if (adc_pmic->adc_irq < 0) {
+		rc = -ENXIO;
+		goto err_cleanup;
+	}
+
+	rc = request_threaded_irq(adc_pmic->adc_irq,
+				NULL, pm8058_xoadc,
+		IRQF_TRIGGER_RISING, "pm8058_adc_interrupt", adc_pmic);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request adc irq\n");
+		goto err_cleanup;
+	}
+
+	disable_irq(adc_pmic->adc_irq);
+
+	device_init_wakeup(&pdev->dev, pdata->xoadc_wakeup);
+
+	if (adc_pmic->adc_voter == NULL) {
+		adc_pmic->adc_voter = msm_xo_get(MSM_XO_TCXO_D1,
+							"pmic8058_xoadc");
+		if (IS_ERR(adc_pmic->adc_voter)) {
+			dev_err(&pdev->dev, "Failed to get XO vote\n");
+			goto err_cleanup;
+		}
+	}
+
+	pmic_adc[adc_pmic->xoadc_num] = adc_pmic;
+
+	if (pdata->xoadc_vreg_setup != NULL)
+		pdata->xoadc_vreg_setup();
+
+	xoadc_initialized = true;
+	xoadc_calib_first_adc = false;
+
+	return 0;
+
+err_cleanup:
+	pm8058_xoadc_teardown(pdev);
+
+	return rc;
+}
+
+static struct platform_driver pm8058_xoadc_driver = {
+	.probe = pm8058_xoadc_probe,
+	.remove = __devexit_p(pm8058_xoadc_teardown),
+	.driver = {
+		.name = "pm8058-xoadc",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_xoadc_init(void)
+{
+	return platform_driver_register(&pm8058_xoadc_driver);
+}
+module_init(pm8058_xoadc_init);
+
+static void __exit pm8058_xoadc_exit(void)
+{
+	platform_driver_unregister(&pm8058_xoadc_driver);
+}
+module_exit(pm8058_xoadc_exit);
+
+MODULE_ALIAS("platform:pmic8058_xoadc");
+MODULE_DESCRIPTION("PMIC8058 XOADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/qfp_fuse.c b/drivers/misc/qfp_fuse.c
new file mode 100644
index 0000000..341e5b2
--- /dev/null
+++ b/drivers/misc/qfp_fuse.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/qfp_fuse.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+
+/*
+ * Time QFPROM requires to reliably burn a fuse.
+ */
+#define QFPROM_BLOW_TIMEOUT_US      10
+#define QFPROM_BLOW_TIMER_OFFSET    0x2038
+/*
+ * Denotes number of cycles required to blow the fuse.
+ */
+#define QFPROM_BLOW_TIMER_VALUE     (QFPROM_BLOW_TIMEOUT_US * 83)
+
+#define QFPROM_BLOW_STATUS_OFFSET   0x204C
+#define QFPROM_BLOW_STATUS_BUSY     0x01
+#define QFPROM_BLOW_STATUS_ERROR    0x02
+
+#define QFP_FUSE_READY              0x01
+#define QFP_FUSE_OFF                0x00
+
+struct qfp_priv_t {
+	uint32_t base;
+	uint32_t end;
+	struct mutex lock;
+	struct regulator *fuse_vdd;
+	u8 state;
+};
+
+/* We need only one instance of this for the driver */
+static struct qfp_priv_t *qfp_priv;
+
+
+static int qfp_fuse_open(struct inode *inode, struct file *filp)
+{
+	if (qfp_priv == NULL)
+		return -ENODEV;
+
+	filp->private_data = qfp_priv;
+
+	return 0;
+}
+
+static int qfp_fuse_release(struct inode *inode, struct file *filp)
+{
+
+	filp->private_data = NULL;
+
+	return 0;
+}
+
+static inline int qfp_fuse_wait_for_fuse_blow(u32 *status)
+{
+	u32 timeout = QFPROM_BLOW_TIMEOUT_US;
+	/* wait for 400us before checking for the first time */
+	udelay(400);
+	do {
+		*status = readl_relaxed(
+			qfp_priv->base + QFPROM_BLOW_STATUS_OFFSET);
+
+		if (!(*status & QFPROM_BLOW_STATUS_BUSY))
+			return 0;
+
+		timeout--;
+		udelay(1);
+	} while (timeout);
+	pr_err("Timeout waiting for FUSE blow, status = %x\n", *status);
+	return -ETIMEDOUT;
+}
+
+static inline int qfp_fuse_enable_regulator(void)
+{
+	int err;
+	err = regulator_enable(qfp_priv->fuse_vdd);
+	if (err != 0)
+		pr_err("Error (%d) enabling regulator\n", err);
+	return err;
+}
+
+static inline int qfp_fuse_disable_regulator(void)
+{
+	int err;
+	err = regulator_disable(qfp_priv->fuse_vdd);
+	if (err != 0)
+		pr_err("Error (%d) disabling regulator\n", err);
+	return err;
+}
+
+static int qfp_fuse_write_word(u32 *addr, u32 data)
+{
+	u32 blow_status = 0;
+	u32 read_data;
+	int err;
+
+	/* Set QFPROM  blow timer register */
+	writel_relaxed(QFPROM_BLOW_TIMER_VALUE,
+			qfp_priv->base + QFPROM_BLOW_TIMER_OFFSET);
+	mb();
+
+	/* Enable LVS0 regulator */
+	err = qfp_fuse_enable_regulator();
+	if (err != 0)
+		return err;
+
+	/*
+	 * Wait for about 1ms. However msleep(1) can sleep for
+	 * up to 20ms as per Documentation/timers/timers-howto.txt.
+	 * Time is not a constraint here.
+	 */
+
+	msleep(20);
+
+	/* Write data */
+	__raw_writel(data, addr);
+	mb();
+
+	/* blow_status = QFPROM_BLOW_STATUS_BUSY; */
+	err = qfp_fuse_wait_for_fuse_blow(&blow_status);
+	if (err) {
+		qfp_fuse_disable_regulator();
+		return err;
+	}
+
+	/* Check error status */
+	if (blow_status & QFPROM_BLOW_STATUS_ERROR) {
+		pr_err("Fuse blow status error: %d\n", blow_status);
+		qfp_fuse_disable_regulator();
+		return -EFAULT;
+	}
+
+	/* Disable regulator */
+	qfp_fuse_disable_regulator();
+	/*
+	 * Wait for about 1ms. However msleep(1) can sleep for
+	 * up to 20ms as per Documentation/timers/timers-howto.txt.
+	 * Time is not a constraint here.
+	 */
+	msleep(20);
+
+	/* Verify written data */
+	read_data = readl_relaxed(addr);
+	if (read_data != data) {
+		pr_err("Error: read/write data mismatch\n");
+		pr_err("Address = %p written data = %x read data = %x\n",
+			addr, data, read_data);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long
+qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qfp_fuse_req req;
+	u32 *buf = NULL;
+	int i;
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case QFP_FUSE_IOC_READ:
+		if (arg == 0) {
+			pr_err("user space arg not supplied\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+			pr_err("Error copying req from user space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Check for limits */
+		if (!req.size) {
+			pr_err("Request size zero.\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+				qfp_priv->end) {
+			pr_err("Req size exceeds QFPROM addr space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Allocate memory for buffer */
+		buf = kzalloc(req.size * 4, GFP_KERNEL);
+		if (buf == NULL) {
+			pr_alert("No memory for data\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		if (mutex_lock_interruptible(&qfp_priv->lock)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		/* Read data */
+		for (i = 0; i < req.size; i++)
+			buf[i] = readl_relaxed(
+				((u32 *) (qfp_priv->base + req.offset)) + i);
+
+		if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
+			pr_err("Error copying to user space\n");
+			err = -EFAULT;
+		}
+
+		mutex_unlock(&qfp_priv->lock);
+		break;
+
+	case QFP_FUSE_IOC_WRITE:
+		if (arg == 0) {
+			pr_err("user space arg not supplied\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+			pr_err("Error copying req from user space\n");
+			err = -EFAULT;
+			break;
+		}
+		/* Check for limits */
+		if (!req.size) {
+			pr_err("Request size zero.\n");
+			err = -EFAULT;
+			break;
+		}
+		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+				qfp_priv->end) {
+			pr_err("Req size exceeds QFPROM space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Allocate memory for buffer */
+		buf = kzalloc(4 * (req.size), GFP_KERNEL);
+		if (buf == NULL) {
+			pr_alert("No memory for data\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		/* Copy user data to local buffer */
+		if (copy_from_user(buf, (void __user *)req.data,
+				4 * (req.size))) {
+			pr_err("Error copying data from user space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (mutex_lock_interruptible(&qfp_priv->lock)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		/* Write data word at a time */
+		for (i = 0; i < req.size && !err; i++) {
+			err = qfp_fuse_write_word(((u32 *) (
+				qfp_priv->base + req.offset) + i), buf[i]);
+		}
+
+		mutex_unlock(&qfp_priv->lock);
+		break;
+	default:
+		pr_err("Invalid ioctl command.\n");
+		return -ENOTTY;
+	}
+	kfree(buf);
+	return err;
+}
+
+static const struct file_operations qfp_fuse_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qfp_fuse_ioctl,
+	.open = qfp_fuse_open,
+	.release = qfp_fuse_release
+};
+
+static struct miscdevice qfp_fuse_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "qfpfuse",
+	.fops = &qfp_fuse_fops
+};
+
+
+static int qfp_fuse_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	const char *regulator_name = pdev->dev.platform_data;
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	if (!regulator_name)
+		return -EINVAL;
+
+	/* Initialize */
+	qfp_priv = kzalloc(sizeof(struct qfp_priv_t), GFP_KERNEL);
+
+	if (qfp_priv == NULL) {
+		pr_alert("Not enough memory to initialize device\n");
+		return -ENOMEM;
+	}
+
+	/* The driver is passed ioremapped address */
+	qfp_priv->base = res->start;
+	qfp_priv->end = res->end;
+
+	/* Get regulator for QFPROM writes */
+	qfp_priv->fuse_vdd = regulator_get(NULL, regulator_name);
+	if (IS_ERR(qfp_priv->fuse_vdd)) {
+		ret = PTR_ERR(qfp_priv->fuse_vdd);
+		pr_err("Err (%d) getting %s\n", ret, regulator_name);
+		qfp_priv->fuse_vdd = NULL;
+		goto err;
+	}
+
+	mutex_init(&qfp_priv->lock);
+
+	ret = misc_register(&qfp_fuse_dev);
+	if (ret < 0)
+		goto err;
+
+	pr_info("Fuse driver base:%x end:%x\n", qfp_priv->base, qfp_priv->end);
+	return 0;
+
+err:
+	if (qfp_priv->fuse_vdd)
+		regulator_put(qfp_priv->fuse_vdd);
+
+	kfree(qfp_priv);
+	qfp_priv = NULL;
+
+	return ret;
+
+}
+
+static int __devexit qfp_fuse_remove(struct platform_device *plat)
+{
+	if (qfp_priv && qfp_priv->fuse_vdd)
+		regulator_put(qfp_priv->fuse_vdd);
+
+	kfree(qfp_priv);
+	qfp_priv = NULL;
+
+	misc_deregister(&qfp_fuse_dev);
+	pr_info("Removing Fuse driver\n");
+	return 0;
+}
+
+static struct platform_driver qfp_fuse_driver = {
+	.probe = qfp_fuse_probe,
+	.remove = qfp_fuse_remove,
+	.driver = {
+		.name = "qfp_fuse_driver",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init qfp_fuse_init(void)
+{
+	return platform_driver_register(&qfp_fuse_driver);
+}
+
+static void __exit qfp_fuse_exit(void)
+{
+	platform_driver_unregister(&qfp_fuse_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Driver to read/write to QFPROM fuses.");
+MODULE_VERSION("1.01");
+
+module_init(qfp_fuse_init);
+module_exit(qfp_fuse_exit);
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
new file mode 100644
index 0000000..53d4ef2
--- /dev/null
+++ b/drivers/misc/tsif.c
@@ -0,0 +1,1564 @@
+/*
+ * TSIF Driver
+ *
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>       /* Needed by all modules */
+#include <linux/kernel.h>       /* Needed for KERN_INFO */
+#include <linux/init.h>         /* Needed for the macros */
+#include <linux/err.h>          /* IS_ERR etc. */
+#include <linux/platform_device.h>
+
+#include <linux/ioport.h>       /* XXX_mem_region */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>  /* dma_XXX */
+#include <linux/delay.h>        /* msleep */
+
+#include <linux/io.h>             /* ioXXX */
+#include <linux/uaccess.h>        /* copy_from_user */
+#include <linux/clk.h>
+#include <linux/wakelock.h>
+#include <linux/tsif_api.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>          /* kfree, kzalloc */
+
+#include <mach/gpio.h>
+#include <mach/dma.h>
+#include <mach/msm_tsif.h>
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF               (0x0)
+#define TSIF_TIME_LIMIT_OFF            (0x4)
+#define TSIF_CLK_REF_OFF               (0x8)
+#define TSIF_LPBK_FLAGS_OFF            (0xc)
+#define TSIF_LPBK_DATA_OFF            (0x10)
+#define TSIF_TEST_CTL_OFF             (0x14)
+#define TSIF_TEST_MODE_OFF            (0x18)
+#define TSIF_TEST_RESET_OFF           (0x1c)
+#define TSIF_TEST_EXPORT_OFF          (0x20)
+#define TSIF_TEST_CURRENT_OFF         (0x24)
+
+#define TSIF_DATA_PORT_OFF            (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ       (1 << 28)
+#define TSIF_STS_CTL_PACK_AVAIL   (1 << 27)
+#define TSIF_STS_CTL_1ST_PACKET   (1 << 26)
+#define TSIF_STS_CTL_OVERFLOW     (1 << 25)
+#define TSIF_STS_CTL_LOST_SYNC    (1 << 24)
+#define TSIF_STS_CTL_TIMEOUT      (1 << 23)
+#define TSIF_STS_CTL_INV_SYNC     (1 << 21)
+#define TSIF_STS_CTL_INV_NULL     (1 << 20)
+#define TSIF_STS_CTL_INV_ERROR    (1 << 19)
+#define TSIF_STS_CTL_INV_ENABLE   (1 << 18)
+#define TSIF_STS_CTL_INV_DATA     (1 << 17)
+#define TSIF_STS_CTL_INV_CLOCK    (1 << 16)
+#define TSIF_STS_CTL_SPARE        (1 << 15)
+#define TSIF_STS_CTL_EN_NULL      (1 << 11)
+#define TSIF_STS_CTL_EN_ERROR     (1 << 10)
+#define TSIF_STS_CTL_LAST_BIT     (1 <<  9)
+#define TSIF_STS_CTL_EN_TIME_LIM  (1 <<  8)
+#define TSIF_STS_CTL_EN_TCR       (1 <<  7)
+#define TSIF_STS_CTL_TEST_MODE    (3 <<  5)
+#define TSIF_STS_CTL_EN_DM        (1 <<  4)
+#define TSIF_STS_CTL_STOP         (1 <<  3)
+#define TSIF_STS_CTL_START        (1 <<  0)
+
+/*
+ * Data buffering parameters
+ *
+ * Data stored in cyclic buffer;
+ *
+ * Data organized in chunks of packets.
+ * One chunk processed at a time by the data mover
+ *
+ */
+#define TSIF_PKTS_IN_CHUNK_DEFAULT  (16)  /**< packets in one DM chunk */
+#define TSIF_CHUNKS_IN_BUF_DEFAULT   (8)
+#define TSIF_PKTS_IN_CHUNK        (tsif_device->pkts_per_chunk)
+#define TSIF_CHUNKS_IN_BUF        (tsif_device->chunks_per_buf)
+#define TSIF_PKTS_IN_BUF          (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
+#define TSIF_BUF_SIZE             (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
+
+#define ROW_RESET                 (MSM_CLK_CTL_BASE + 0x214)
+#define GLBL_CLK_ENA              (MSM_CLK_CTL_BASE + 0x000)
+#define CLK_HALT_STATEB           (MSM_CLK_CTL_BASE + 0x104)
+#define TSIF_NS_REG               (MSM_CLK_CTL_BASE + 0x0b4)
+#define TV_NS_REG                 (MSM_CLK_CTL_BASE + 0x0bc)
+
+/* used to create debugfs entries */
+static const struct {
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_tsif_regs[] = {
+	{"sts_ctl",      S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+	{"time_limit",   S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+	{"clk_ref",      S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+	{"lpbk_flags",   S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+	{"lpbk_data",    S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+	{"test_ctl",     S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+	{"test_mode",    S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+	{"test_reset",             S_IWUSR, TSIF_TEST_RESET_OFF},
+	{"test_export",  S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+	{"test_current", S_IRUGO,           TSIF_TEST_CURRENT_OFF},
+	{"data_port",    S_IRUSR,           TSIF_DATA_PORT_OFF},
+};
+
+/* structures for Data Mover */
+struct tsif_dmov_cmd {
+	dmov_box box;
+	dma_addr_t box_ptr;
+};
+
+struct msm_tsif_device;
+
+struct tsif_xfer {
+	struct msm_dmov_cmd hdr;
+	struct msm_tsif_device *tsif_device;
+	int busy;
+	int wi;   /**< set devices's write index after xfer */
+};
+
+struct msm_tsif_device {
+	struct list_head devlist;
+	struct platform_device *pdev;
+	struct resource *memres;
+	void __iomem *base;
+	unsigned int irq;
+	int mode;
+	u32 time_limit;
+	enum tsif_state state;
+	struct wake_lock wake_lock;
+	/* clocks */
+	struct clk *tsif_clk;
+	struct clk *tsif_pclk;
+	struct clk *tsif_ref_clk;
+	/* debugfs */
+	struct dentry *dent_tsif;
+	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+	struct dentry *debugfs_gpio;
+	struct dentry *debugfs_action;
+	struct dentry *debugfs_dma;
+	struct dentry *debugfs_databuf;
+	struct debugfs_blob_wrapper blob_wrapper_databuf;
+	/* DMA related */
+	int dma;
+	int crci;
+	void *data_buffer;
+	dma_addr_t data_buffer_dma;
+	u32 pkts_per_chunk;
+	u32 chunks_per_buf;
+	int ri;
+	int wi;
+	int dmwi;  /**< DataMover write index */
+	struct tsif_dmov_cmd *dmov_cmd[2];
+	dma_addr_t dmov_cmd_dma[2];
+	struct tsif_xfer xfer[2];
+	struct tasklet_struct dma_refill;
+	/* statistics */
+	u32 stat_rx;
+	u32 stat_overflow;
+	u32 stat_lost_sync;
+	u32 stat_timeout;
+	u32 stat_dmov_err;
+	u32 stat_soft_drop;
+	int stat_ifi; /* inter frame interval */
+	u32 stat0, stat1;
+	/* client */
+	void *client_data;
+	void (*client_notify)(void *client_data);
+};
+
+/* ===clocks begin=== */
+
+static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->tsif_clk) {
+		clk_put(tsif_device->tsif_clk);
+		tsif_device->tsif_clk = NULL;
+	}
+	if (tsif_device->tsif_pclk) {
+		clk_put(tsif_device->tsif_pclk);
+		tsif_device->tsif_pclk = NULL;
+	}
+
+	if (tsif_device->tsif_ref_clk) {
+		clk_put(tsif_device->tsif_ref_clk);
+		tsif_device->tsif_ref_clk = NULL;
+	}
+}
+
+static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	int rc = 0;
+
+	if (pdata->tsif_clk) {
+		tsif_device->tsif_clk = clk_get(NULL, pdata->tsif_clk);
+		if (IS_ERR(tsif_device->tsif_clk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_clk);
+			rc = PTR_ERR(tsif_device->tsif_clk);
+			tsif_device->tsif_clk = NULL;
+			goto ret;
+		}
+	}
+	if (pdata->tsif_pclk) {
+		tsif_device->tsif_pclk = clk_get(NULL, pdata->tsif_pclk);
+		if (IS_ERR(tsif_device->tsif_pclk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_pclk);
+			rc = PTR_ERR(tsif_device->tsif_pclk);
+			tsif_device->tsif_pclk = NULL;
+			goto ret;
+		}
+	}
+	if (pdata->tsif_ref_clk) {
+		tsif_device->tsif_ref_clk = clk_get(NULL, pdata->tsif_ref_clk);
+		if (IS_ERR(tsif_device->tsif_ref_clk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_ref_clk);
+			rc = PTR_ERR(tsif_device->tsif_ref_clk);
+			tsif_device->tsif_ref_clk = NULL;
+			goto ret;
+		}
+	}
+	return 0;
+ret:
+	tsif_put_clocks(tsif_device);
+	return rc;
+}
+
+static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
+{
+	if (on) {
+		if (tsif_device->tsif_clk)
+			clk_enable(tsif_device->tsif_clk);
+		if (tsif_device->tsif_pclk)
+			clk_enable(tsif_device->tsif_pclk);
+		clk_enable(tsif_device->tsif_ref_clk);
+	} else {
+		if (tsif_device->tsif_clk)
+			clk_disable(tsif_device->tsif_clk);
+		if (tsif_device->tsif_pclk)
+			clk_disable(tsif_device->tsif_pclk);
+		clk_disable(tsif_device->tsif_ref_clk);
+	}
+}
+/* ===clocks end=== */
+/* ===gpio begin=== */
+
+static void tsif_gpios_free(const struct msm_gpio *table, int size)
+{
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		g = table + i;
+		gpio_free(GPIO_PIN(g->gpio_cfg));
+	}
+}
+
+static int tsif_gpios_request(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
+		if (rc) {
+			pr_err("gpio_request(%d) <%s> failed: %d\n",
+			       GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tsif_gpios_free(table, i);
+	return rc;
+}
+
+static int tsif_gpios_disable(const struct msm_gpio *table, int size)
+{
+	int rc = 0;
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		int tmp;
+		g = table + i;
+		tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+		if (tmp) {
+			pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			if (!rc)
+				rc = tmp;
+		}
+	}
+
+	return rc;
+}
+
+static int tsif_gpios_enable(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
+		if (rc) {
+			pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tsif_gpios_disable(table, i);
+	return rc;
+}
+
+static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+	int rc = tsif_gpios_request(table, size);
+	if (rc)
+		return rc;
+	rc = tsif_gpios_enable(table, size);
+	if (rc)
+		tsif_gpios_free(table, size);
+	return rc;
+}
+
+static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+	tsif_gpios_disable(table, size);
+	tsif_gpios_free(table, size);
+}
+
+static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
+}
+
+static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
+}
+
+/* ===gpio end=== */
+
+static int tsif_start_hw(struct msm_tsif_device *tsif_device)
+{
+	u32 ctl = TSIF_STS_CTL_EN_IRQ |
+		  TSIF_STS_CTL_EN_TIME_LIM |
+		  TSIF_STS_CTL_EN_TCR |
+		  TSIF_STS_CTL_EN_DM;
+	dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+	switch (tsif_device->mode) {
+	case 1: /* mode 1 */
+		ctl |= (0 << 5);
+		break;
+	case 2: /* mode 2 */
+		ctl |= (1 << 5);
+		break;
+	case 3: /* manual - control from debugfs */
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+	iowrite32(tsif_device->time_limit,
+		  tsif_device->base + TSIF_TIME_LIMIT_OFF);
+	wmb();
+	iowrite32(ctl | TSIF_STS_CTL_START,
+		  tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+	ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+	return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
+}
+
+static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
+{
+	iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+}
+
+/* ===DMA begin=== */
+/**
+ * TSIF DMA theory of operation
+ *
+ * Circular memory buffer \a tsif_mem_buffer allocated;
+ * 4 pointers points to and moved forward on:
+ * - \a ri index of first ready to read packet.
+ *      Updated by client's call to tsif_reclaim_packets()
+ * - \a wi points to the next packet to be written by DM.
+ *      Data below is valid and will not be overriden by DMA.
+ *      Moved on DM callback
+ * - \a dmwi points to the next packet not scheduled yet for DM
+ *      moved when packet scheduled for DM
+ *
+ * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
+ * at time immediately after scheduling.
+ *
+ * Initially, 2 packets get scheduled for the DM.
+ *
+ * Upon packet receive, DM writes packet to the pre-programmed
+ * location and invoke its callback.
+ *
+ * DM callback moves sets wi pointer to \a xfer->wi;
+ * then it schedules next packet for DM and moves \a dmwi pointer.
+ *
+ * Buffer overflow handling
+ *
+ * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
+ * DMA re-scheduled to the same index.
+ * Callback check and not move \a wi to become equal to \a ri
+ *
+ * On \a read request, data between \a ri and \a wi pointers may be read;
+ * \ri pointer moved accordingly.
+ *
+ * It is always granted, on modulo sizeof(tsif_mem_buffer), that
+ * \a wi is between [\a ri, \a dmwi]
+ *
+ * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
+ *
+ * Number of scheduled packets for DM: (dmwi-wi)
+ */
+
+/**
+ * tsif_dma_schedule - schedule DMA transfers
+ *
+ * @tsif_device: device
+ *
+ * Executed from process context on init, or from tasklet when
+ * re-scheduling upon DMA completion.
+ * This prevent concurrent execution from several CPU's
+ */
+static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
+{
+	int i, dmwi0, dmwi1, found = 0;
+	/* find free entry */
+	for (i = 0; i < 2; i++) {
+		struct tsif_xfer *xfer = &tsif_device->xfer[i];
+		if (xfer->busy)
+			continue;
+		found++;
+		xfer->busy = 1;
+		dmwi0 = tsif_device->dmwi;
+		tsif_device->dmov_cmd[i]->box.dst_row_addr =
+			tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
+		/* proposed value for dmwi */
+		dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
+		/**
+		 * If dmwi going to overlap with ri,
+		 * overflow occurs because data was not read.
+		 * Still get this packet, to not interrupt TSIF
+		 * hardware, but do not advance dmwi.
+		 *
+		 * Upon receive, packet will be dropped.
+		 */
+		if (dmwi1 != tsif_device->ri) {
+			tsif_device->dmwi = dmwi1;
+		} else {
+			dev_info(&tsif_device->pdev->dev,
+				 "Overflow detected\n");
+		}
+		xfer->wi = tsif_device->dmwi;
+#ifdef CONFIG_TSIF_DEBUG
+		dev_info(&tsif_device->pdev->dev,
+			"schedule xfer[%d] -> [%2d]{%2d}\n",
+			i, dmwi0, xfer->wi);
+#endif
+		/* complete all the writes to box */
+		dma_coherent_pre_ops();
+		msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
+	}
+	if (!found)
+		dev_info(&tsif_device->pdev->dev,
+			 "All xfer entries are busy\n");
+}
+
+/**
+ * tsif_dmov_complete_func - DataMover completion callback
+ *
+ * @cmd:      original DM command
+ * @result:   DM result
+ * @err:      optional error buffer
+ *
+ * Executed in IRQ context (Data Mover's IRQ)
+ * DataMover's spinlock @msm_dmov_lock held.
+ */
+static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
+				    unsigned int result,
+				    struct msm_dmov_errdata *err)
+{
+	int i;
+	u32 data_offset;
+	struct tsif_xfer *xfer;
+	struct msm_tsif_device *tsif_device;
+	int reschedule = 0;
+	if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
+		pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
+		return;
+	}
+	/* restore original context */
+	xfer = container_of(cmd, struct tsif_xfer, hdr);
+	tsif_device = xfer->tsif_device;
+	i = xfer - tsif_device->xfer;
+	data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
+		      tsif_device->data_buffer_dma;
+
+	/* order reads from the xferred buffer */
+	dma_coherent_post_ops();
+	if (result & DMOV_RSLT_DONE) {
+		int w = data_offset / TSIF_PKT_SIZE;
+		tsif_device->stat_rx++;
+		/*
+		 * sowtware overflow when I was scheduled?
+		 *
+		 * @w is where this xfer was actually written to;
+		 * @xfer->wi is where device's @wi will be set;
+		 *
+		 * if these 2 are equal, we are short in space and
+		 * going to overwrite this xfer - this is "soft drop"
+		 */
+		if (w == xfer->wi)
+			tsif_device->stat_soft_drop++;
+		reschedule = (tsif_device->state == tsif_state_running);
+#ifdef CONFIG_TSIF_DEBUG
+		/* IFI calculation */
+		/*
+		 * update stat_ifi (inter frame interval)
+		 *
+		 * Calculate time difference between last and 1-st
+		 * packets in chunk
+		 *
+		 * To be removed after tuning
+		 */
+		if (TSIF_PKTS_IN_CHUNK > 1) {
+			void *ptr = tsif_device->data_buffer + data_offset;
+			u32 *p0 = ptr;
+			u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
+				TSIF_PKT_SIZE;
+			u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
+						   tsif_pkt_status(p0));
+			u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
+						   tsif_pkt_status(p1));
+			tsif_device->stat_ifi = (tts1 - tts0) /
+				(TSIF_PKTS_IN_CHUNK - 1);
+		}
+#endif
+	} else {
+		/**
+		 *  Error or flush
+		 *
+		 *  To recover - re-open TSIF device.
+		 */
+		/* mark status "not valid" in data buffer */
+		int n;
+		void *ptr = tsif_device->data_buffer + data_offset;
+		for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
+			u32 *p = ptr + (n * TSIF_PKT_SIZE);
+			/* last dword is status + TTS */
+			p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
+		}
+		if (result & DMOV_RSLT_ERROR) {
+			dev_err(&tsif_device->pdev->dev,
+				"DMA error (0x%08x)\n", result);
+			tsif_device->stat_dmov_err++;
+			/* force device close */
+			if (tsif_device->state == tsif_state_running) {
+				tsif_stop_hw(tsif_device);
+				/*
+				 * Clocks _may_ be stopped right from IRQ
+				 * context. This is far from optimal w.r.t
+				 * latency.
+				 *
+				 * But, this branch taken only in case of
+				 * severe hardware problem (I don't even know
+				 * what should happens for DMOV_RSLT_ERROR);
+				 * thus I prefer code simplicity over
+				 * performance.
+				 */
+				tsif_clock(tsif_device, 0);
+				tsif_device->state = tsif_state_flushing;
+			}
+		}
+		if (result & DMOV_RSLT_FLUSH) {
+			/*
+			 * Flushing normally happens in process of
+			 * @tsif_stop(), when we are waiting for outstanding
+			 * DMA commands to be flushed.
+			 */
+			dev_info(&tsif_device->pdev->dev,
+				 "DMA channel flushed (0x%08x)\n", result);
+			if (tsif_device->state == tsif_state_flushing) {
+				if ((!tsif_device->xfer[0].busy) &&
+				    (!tsif_device->xfer[1].busy)) {
+					tsif_device->state = tsif_state_stopped;
+				}
+			}
+		}
+		if (err)
+			dev_err(&tsif_device->pdev->dev,
+				"Flush data: %08x %08x %08x %08x %08x %08x\n",
+				err->flush[0], err->flush[1], err->flush[2],
+				err->flush[3], err->flush[4], err->flush[5]);
+	}
+	tsif_device->wi = xfer->wi;
+	xfer->busy = 0;
+	if (tsif_device->client_notify)
+		tsif_device->client_notify(tsif_device->client_data);
+	/*
+	 * Can't schedule next DMA -
+	 * DataMover driver still hold its semaphore,
+	 * deadlock will occur.
+	 */
+	if (reschedule)
+		tasklet_schedule(&tsif_device->dma_refill);
+}
+
+/**
+ * tsif_dma_refill - tasklet function for tsif_device->dma_refill
+ *
+ * @data:   tsif_device
+ *
+ * Reschedule DMA requests
+ *
+ * Executed in tasklet
+ */
+static void tsif_dma_refill(unsigned long data)
+{
+	struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
+	if (tsif_device->state == tsif_state_running)
+		tsif_dma_schedule(tsif_device);
+}
+
+/**
+ * tsif_dma_flush - flush DMA channel
+ *
+ * @tsif_device:
+ *
+ * busy wait till DMA flushed
+ */
+static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
+		tsif_device->state = tsif_state_flushing;
+		while (tsif_device->xfer[0].busy ||
+		       tsif_device->xfer[1].busy) {
+			msm_dmov_flush(tsif_device->dma);
+			msleep(10);
+		}
+	}
+	tsif_device->state = tsif_state_stopped;
+	if (tsif_device->client_notify)
+		tsif_device->client_notify(tsif_device->client_data);
+}
+
+static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
+{
+	int i;
+	tsif_device->state = tsif_state_flushing;
+	tasklet_kill(&tsif_device->dma_refill);
+	tsif_dma_flush(tsif_device);
+	for (i = 0; i < 2; i++) {
+		if (tsif_device->dmov_cmd[i]) {
+			dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
+					  tsif_device->dmov_cmd[i],
+					  tsif_device->dmov_cmd_dma[i]);
+			tsif_device->dmov_cmd[i] = NULL;
+		}
+	}
+	if (tsif_device->data_buffer) {
+		tsif_device->blob_wrapper_databuf.data = NULL;
+		tsif_device->blob_wrapper_databuf.size = 0;
+		dma_free_coherent(NULL, TSIF_BUF_SIZE,
+				  tsif_device->data_buffer,
+				  tsif_device->data_buffer_dma);
+		tsif_device->data_buffer = NULL;
+	}
+}
+
+static int tsif_dma_init(struct msm_tsif_device *tsif_device)
+{
+	int i;
+	/* TODO: allocate all DMA memory in one buffer */
+	/* Note: don't pass device,
+	   it require coherent_dma_mask id device definition */
+	tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
+				&tsif_device->data_buffer_dma, GFP_KERNEL);
+	if (!tsif_device->data_buffer)
+		goto err;
+	dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
+		 tsif_device->data_buffer, tsif_device->data_buffer_dma);
+	tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
+	tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
+	tsif_device->ri = 0;
+	tsif_device->wi = 0;
+	tsif_device->dmwi = 0;
+	for (i = 0; i < 2; i++) {
+		dmov_box *box;
+		struct msm_dmov_cmd *hdr;
+		tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
+			sizeof(struct tsif_dmov_cmd),
+			&tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
+		if (!tsif_device->dmov_cmd[i])
+			goto err;
+		dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
+			 i, tsif_device->dmov_cmd[i],
+			 tsif_device->dmov_cmd_dma[i]);
+		/* dst in 16 LSB, src in 16 MSB */
+		box = &(tsif_device->dmov_cmd[i]->box);
+		box->cmd = CMD_MODE_BOX | CMD_LC |
+			   CMD_SRC_CRCI(tsif_device->crci);
+		box->src_row_addr =
+			tsif_device->memres->start + TSIF_DATA_PORT_OFF;
+		box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
+		box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
+		box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
+
+		tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
+			DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+				      offsetof(struct tsif_dmov_cmd, box));
+		tsif_device->xfer[i].tsif_device = tsif_device;
+		hdr = &tsif_device->xfer[i].hdr;
+		hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+			      offsetof(struct tsif_dmov_cmd, box_ptr));
+		hdr->complete_func = tsif_dmov_complete_func;
+	}
+	msm_dmov_flush(tsif_device->dma);
+	return 0;
+err:
+	dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
+	tsif_dma_exit(tsif_device);
+	return -ENOMEM;
+}
+
+/* ===DMA end=== */
+
+/* ===IRQ begin=== */
+
+static irqreturn_t tsif_irq(int irq, void *dev_id)
+{
+	struct msm_tsif_device *tsif_device = dev_id;
+	u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+	if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+			 TSIF_STS_CTL_OVERFLOW |
+			 TSIF_STS_CTL_LOST_SYNC |
+			 TSIF_STS_CTL_TIMEOUT))) {
+		dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
+		return IRQ_NONE;
+	}
+	if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
+		tsif_device->stat_rx++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
+		tsif_device->stat_overflow++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
+		tsif_device->stat_lost_sync++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
+		tsif_device->stat_timeout++;
+	}
+	iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+	return IRQ_HANDLED;
+}
+
+/* ===IRQ end=== */
+
+/* ===Device attributes begin=== */
+
+static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	char *state_string;
+	switch (tsif_device->state) {
+	case tsif_state_stopped:
+		state_string = "stopped";
+		break;
+	case tsif_state_running:
+		state_string = "running";
+		break;
+	case tsif_state_flushing:
+		state_string = "flushing";
+		break;
+	default:
+		state_string = "???";
+	}
+	return snprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"Mode       = %d\n"
+			"Time limit = %d\n"
+			"State        %s\n"
+			"Client     = %p\n"
+			"Pkt/Buf    = %d\n"
+			"Pkt/chunk  = %d\n"
+			"--statistics--\n"
+			"Rx chunks  = %d\n"
+			"Overflow   = %d\n"
+			"Lost sync  = %d\n"
+			"Timeout    = %d\n"
+			"DMA error  = %d\n"
+			"Soft drop  = %d\n"
+			"IFI        = %d\n"
+			"(0x%08x - 0x%08x) / %d\n"
+			"--debug--\n"
+			"GLBL_CLK_ENA     = 0x%08x\n"
+			"ROW_RESET        = 0x%08x\n"
+			"CLK_HALT_STATEB  = 0x%08x\n"
+			"TV_NS_REG        = 0x%08x\n"
+			"TSIF_NS_REG      = 0x%08x\n",
+			dev_name(dev),
+			tsif_device->mode,
+			tsif_device->time_limit,
+			state_string,
+			tsif_device->client_data,
+			TSIF_PKTS_IN_BUF,
+			TSIF_PKTS_IN_CHUNK,
+			tsif_device->stat_rx,
+			tsif_device->stat_overflow,
+			tsif_device->stat_lost_sync,
+			tsif_device->stat_timeout,
+			tsif_device->stat_dmov_err,
+			tsif_device->stat_soft_drop,
+			tsif_device->stat_ifi,
+			tsif_device->stat1,
+			tsif_device->stat0,
+			TSIF_PKTS_IN_CHUNK - 1,
+			ioread32(GLBL_CLK_ENA),
+			ioread32(ROW_RESET),
+			ioread32(CLK_HALT_STATEB),
+			ioread32(TV_NS_REG),
+			ioread32(TSIF_NS_REG)
+			);
+}
+/**
+ * set_stats - reset statistics on write
+ *
+ * @dev:
+ * @attr:
+ * @buf:
+ * @count:
+ */
+static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	tsif_device->stat_rx = 0;
+	tsif_device->stat_overflow = 0;
+	tsif_device->stat_lost_sync = 0;
+	tsif_device->stat_timeout = 0;
+	tsif_device->stat_dmov_err = 0;
+	tsif_device->stat_soft_drop = 0;
+	tsif_device->stat_ifi = 0;
+	return count;
+}
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
+
+static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
+}
+
+static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	int value;
+	int rc;
+	if (1 != sscanf(buf, "%d", &value)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_mode(tsif_device, value);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
+
+static ssize_t show_time_limit(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
+}
+
+static ssize_t set_time_limit(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	int value;
+	int rc;
+	if (1 != sscanf(buf, "%d", &value)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_time_limit(tsif_device, value);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
+		   show_time_limit, set_time_limit);
+
+static ssize_t show_buf_config(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d * %d\n",
+			tsif_device->pkts_per_chunk,
+			tsif_device->chunks_per_buf);
+}
+
+static ssize_t set_buf_config(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	u32 p, c;
+	int rc;
+	if (2 != sscanf(buf, "%d * %d", &p, &c)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_buf_config(tsif_device, p, c);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
+		   show_buf_config, set_buf_config);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_time_limit.attr,
+	&dev_attr_buf_config.attr,
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+/* ===debugfs begin=== */
+
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	iowrite32(val, data);
+	wmb();
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	*val = ioread32(data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
+					struct dentry *parent, u32 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
+}
+
+static int action_open(struct msm_tsif_device *tsif_device)
+{
+	int rc = -EINVAL;
+	int result;
+
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+	if (tsif_device->state != tsif_state_stopped)
+		return -EAGAIN;
+	rc = tsif_dma_init(tsif_device);
+	if (rc) {
+		dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
+		return rc;
+	}
+	tsif_device->state = tsif_state_running;
+	/*
+	 * DMA should be scheduled prior to TSIF hardware initialization,
+	 * otherwise "bus error" will be reported by Data Mover
+	 */
+	enable_irq(tsif_device->irq);
+	tsif_clock(tsif_device, 1);
+	tsif_dma_schedule(tsif_device);
+	/*
+	 * init the device if required
+	 */
+	if (pdata->init)
+		pdata->init(pdata);
+	rc = tsif_start_hw(tsif_device);
+	if (rc) {
+		dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
+		tsif_dma_exit(tsif_device);
+		tsif_clock(tsif_device, 0);
+		return rc;
+	}
+
+	result = pm_runtime_get(&tsif_device->pdev->dev);
+	if (result < 0) {
+		dev_err(&tsif_device->pdev->dev,
+			"Runtime PM: Unable to wake up the device, rc = %d\n",
+			result);
+		return result;
+	}
+
+	wake_lock(&tsif_device->wake_lock);
+	return rc;
+}
+
+static int action_close(struct msm_tsif_device *tsif_device)
+{
+	dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
+		 (int)tsif_device->state);
+	/*
+	 * DMA should be flushed/stopped prior to TSIF hardware stop,
+	 * otherwise "bus error" will be reported by Data Mover
+	 */
+	tsif_stop_hw(tsif_device);
+	tsif_dma_exit(tsif_device);
+	tsif_clock(tsif_device, 0);
+	disable_irq(tsif_device->irq);
+
+	pm_runtime_put(&tsif_device->pdev->dev);
+	wake_unlock(&tsif_device->wake_lock);
+	return 0;
+}
+
+
+static struct {
+	int (*func)(struct msm_tsif_device *);
+	const char *name;
+} actions[] = {
+	{ action_open,  "open"},
+	{ action_close, "close"},
+};
+
+static ssize_t tsif_debugfs_action_write(struct file *filp,
+					 const char __user *userbuf,
+					 size_t count, loff_t *f_pos)
+{
+	int i;
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	char s[40];
+	int len = min(sizeof(s) - 1, count);
+	if (copy_from_user(s, userbuf, len))
+		return -EFAULT;
+	s[len] = '\0';
+	dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
+	for (i = 0; i < ARRAY_SIZE(actions); i++) {
+		if (!strncmp(s, actions[i].name,
+		    min(count, strlen(actions[i].name)))) {
+			int rc = actions[i].func(tsif_device);
+			if (!rc)
+				rc = count;
+			return rc;
+		}
+	}
+	return -EINVAL;
+}
+
+static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations fops_debugfs_action = {
+	.open  = tsif_debugfs_generic_open,
+	.write = tsif_debugfs_action_write,
+};
+
+static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
+				     size_t count, loff_t *f_pos)
+{
+	static char bufa[200];
+	static char *buf = bufa;
+	int sz = sizeof(bufa);
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	int len = 0;
+	if (tsif_device) {
+		int i;
+		len += snprintf(buf + len, sz - len,
+				"ri %3d | wi %3d | dmwi %3d |",
+				tsif_device->ri, tsif_device->wi,
+				tsif_device->dmwi);
+		for (i = 0; i < 2; i++) {
+			struct tsif_xfer *xfer = &tsif_device->xfer[i];
+			if (xfer->busy) {
+				u32 dst =
+				    tsif_device->dmov_cmd[i]->box.dst_row_addr;
+				u32 base = tsif_device->data_buffer_dma;
+				int w = (dst - base) / TSIF_PKT_SIZE;
+				len += snprintf(buf + len, sz - len,
+						" [%3d]{%3d}",
+						w, xfer->wi);
+			} else {
+				len += snprintf(buf + len, sz - len,
+						" ---idle---");
+			}
+		}
+			len += snprintf(buf + len, sz - len, "\n");
+	} else {
+		len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+	}
+	return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_dma = {
+	.open = tsif_debugfs_generic_open,
+	.read = tsif_debugfs_dma_read,
+};
+
+static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
+				       size_t count, loff_t *f_pos)
+{
+	static char bufa[300];
+	static char *buf = bufa;
+	int sz = sizeof(bufa);
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	int len = 0;
+	if (tsif_device) {
+		struct msm_tsif_platform_data *pdata =
+			tsif_device->pdev->dev.platform_data;
+		int i;
+		for (i = 0; i < pdata->num_gpios; i++) {
+			if (pdata->gpios[i].gpio_cfg) {
+				int x = !!gpio_get_value(GPIO_PIN(
+					pdata->gpios[i].gpio_cfg));
+				len += snprintf(buf + len, sz - len,
+						"%15s: %d\n",
+						pdata->gpios[i].label, x);
+			}
+		}
+	} else {
+		len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+	}
+	return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_gpios = {
+	.open = tsif_debugfs_generic_open,
+	.read = tsif_debugfs_gpios_read,
+};
+
+
+static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
+{
+	tsif_device->dent_tsif = debugfs_create_dir(
+	      dev_name(&tsif_device->pdev->dev), NULL);
+	if (tsif_device->dent_tsif) {
+		int i;
+		void __iomem *base = tsif_device->base;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+			tsif_device->debugfs_tsif_regs[i] =
+			   debugfs_create_iomem_x32(
+				debugfs_tsif_regs[i].name,
+				debugfs_tsif_regs[i].mode,
+				tsif_device->dent_tsif,
+				base + debugfs_tsif_regs[i].offset);
+		}
+		tsif_device->debugfs_gpio = debugfs_create_file("gpios",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
+		tsif_device->debugfs_action = debugfs_create_file("action",
+		    S_IWUSR,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
+		tsif_device->debugfs_dma = debugfs_create_file("dma",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
+		tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
+	}
+}
+
+static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->dent_tsif) {
+		int i;
+		debugfs_remove_recursive(tsif_device->dent_tsif);
+		tsif_device->dent_tsif = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+			tsif_device->debugfs_tsif_regs[i] = NULL;
+		tsif_device->debugfs_gpio = NULL;
+		tsif_device->debugfs_action = NULL;
+		tsif_device->debugfs_dma = NULL;
+		tsif_device->debugfs_databuf = NULL;
+	}
+}
+/* ===debugfs end=== */
+
+/* ===module begin=== */
+static LIST_HEAD(tsif_devices);
+
+static struct msm_tsif_device *tsif_find_by_id(int id)
+{
+	struct msm_tsif_device *tsif_device;
+	list_for_each_entry(tsif_device, &tsif_devices, devlist) {
+		if (tsif_device->pdev->id == id)
+			return tsif_device;
+	}
+	return NULL;
+}
+
+static int __devinit msm_tsif_probe(struct platform_device *pdev)
+{
+	int rc = -ENODEV;
+	struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
+	struct msm_tsif_device *tsif_device;
+	struct resource *res;
+	/* check device validity */
+	/* must have platform data */
+	if (!plat) {
+		dev_err(&pdev->dev, "Platform data not available\n");
+		rc = -EINVAL;
+		goto out;
+	}
+/*TODO macro for max. id*/
+	if ((pdev->id < 0) || (pdev->id > 0)) {
+		dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
+		rc = -EINVAL;
+		goto out;
+	}
+	/* OK, we will use this device */
+	tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
+	if (!tsif_device) {
+		dev_err(&pdev->dev, "Failed to allocate memory for device\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* cross links */
+	tsif_device->pdev = pdev;
+	platform_set_drvdata(pdev, tsif_device);
+	tsif_device->mode = 1;
+	tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
+	tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
+	tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
+		     (unsigned long)tsif_device);
+	if (tsif_get_clocks(tsif_device))
+		goto err_clocks;
+/* map I/O memory */
+	tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!tsif_device->memres) {
+		dev_err(&pdev->dev, "Missing MEM resource\n");
+		rc = -ENXIO;
+		goto err_rgn;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Missing DMA resource\n");
+		rc = -ENXIO;
+		goto err_rgn;
+	}
+	tsif_device->dma = res->start;
+	tsif_device->crci = res->end;
+	tsif_device->base = ioremap(tsif_device->memres->start,
+				    resource_size(tsif_device->memres));
+	if (!tsif_device->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		goto err_ioremap;
+	}
+	dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
+		 tsif_device->memres->start, tsif_device->base);
+	rc = tsif_start_gpios(tsif_device);
+	if (rc)
+		goto err_gpio;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	tsif_debugfs_init(tsif_device);
+	rc = platform_get_irq(pdev, 0);
+	if (rc > 0) {
+		tsif_device->irq = rc;
+		rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
+				 dev_name(&pdev->dev), tsif_device);
+		disable_irq(tsif_device->irq);
+	}
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
+			tsif_device->irq, rc);
+		goto err_irq;
+	}
+	rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+	wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
+		       dev_name(&pdev->dev));
+	dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
+		 tsif_device->irq, tsif_device->memres->start,
+		 tsif_device->dma, tsif_device->crci);
+	list_add(&tsif_device->devlist, &tsif_devices);
+	return 0;
+/* error path */
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+err_attrs:
+	free_irq(tsif_device->irq, tsif_device);
+err_irq:
+	tsif_debugfs_exit(tsif_device);
+	tsif_stop_gpios(tsif_device);
+err_gpio:
+	iounmap(tsif_device->base);
+err_ioremap:
+err_rgn:
+	tsif_put_clocks(tsif_device);
+err_clocks:
+	kfree(tsif_device);
+out:
+	return rc;
+}
+
+static int __devexit msm_tsif_remove(struct platform_device *pdev)
+{
+	struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
+	dev_info(&pdev->dev, "Unload\n");
+	list_del(&tsif_device->devlist);
+	wake_lock_destroy(&tsif_device->wake_lock);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+	free_irq(tsif_device->irq, tsif_device);
+	tsif_debugfs_exit(tsif_device);
+	tsif_dma_exit(tsif_device);
+	tsif_stop_gpios(tsif_device);
+	iounmap(tsif_device->base);
+	tsif_put_clocks(tsif_device);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(tsif_device);
+	return 0;
+}
+
+static int tsif_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int tsif_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops tsif_dev_pm_ops = {
+	.runtime_suspend = tsif_runtime_suspend,
+	.runtime_resume = tsif_runtime_resume,
+};
+
+
+static struct platform_driver msm_tsif_driver = {
+	.probe          = msm_tsif_probe,
+	.remove         = __exit_p(msm_tsif_remove),
+	.driver         = {
+		.name   = "msm_tsif",
+		.pm     = &tsif_dev_pm_ops,
+	},
+};
+
+static int __init mod_init(void)
+{
+	int rc = platform_driver_register(&msm_tsif_driver);
+	if (rc)
+		pr_err("TSIF: platform_driver_register failed: %d\n", rc);
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	platform_driver_unregister(&msm_tsif_driver);
+}
+/* ===module end=== */
+
+/* public API */
+
+void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
+{
+	struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
+	if (tsif_device->client_notify || tsif_device->client_data)
+		return ERR_PTR(-EBUSY);
+	tsif_device->client_notify = notify;
+	tsif_device->client_data = data;
+	/* prevent from unloading */
+	get_device(&tsif_device->pdev->dev);
+	return tsif_device;
+}
+EXPORT_SYMBOL(tsif_attach);
+
+void tsif_detach(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	tsif_device->client_notify = NULL;
+	tsif_device->client_data = NULL;
+	put_device(&tsif_device->pdev->dev);
+}
+EXPORT_SYMBOL(tsif_detach);
+
+void tsif_get_info(void *cookie, void **pdata, int *psize)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (pdata)
+		*pdata = tsif_device->data_buffer;
+	if (psize)
+		*psize = TSIF_PKTS_IN_BUF;
+}
+EXPORT_SYMBOL(tsif_get_info);
+
+int tsif_set_mode(void *cookie, int mode)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->state != tsif_state_stopped) {
+		dev_err(&tsif_device->pdev->dev,
+			"Can't change mode while device is active\n");
+		return -EBUSY;
+	}
+	switch (mode) {
+	case 1:
+	case 2:
+	case 3:
+		tsif_device->mode = mode;
+		break;
+	default:
+		dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_mode);
+
+int tsif_set_time_limit(void *cookie, u32 value)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->state != tsif_state_stopped) {
+		dev_err(&tsif_device->pdev->dev,
+			"Can't change time limit while device is active\n");
+		return -EBUSY;
+	}
+	if (value != (value & 0xFFFFFF)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Invalid time limit (should be 24 bit): %#x\n", value);
+		return -EINVAL;
+	}
+	tsif_device->time_limit = value;
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_time_limit);
+
+int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->data_buffer) {
+		dev_err(&tsif_device->pdev->dev,
+			"Data buffer already allocated: %p\n",
+			tsif_device->data_buffer);
+		return -EBUSY;
+	}
+	/* check for crazy user */
+	if (pkts_in_chunk * chunks_in_buf > 10240) {
+		dev_err(&tsif_device->pdev->dev,
+			"Buffer requested is too large: %d * %d\n",
+			pkts_in_chunk,
+			chunks_in_buf);
+		return -EINVAL;
+	}
+	/* parameters are OK, execute */
+	tsif_device->pkts_per_chunk = pkts_in_chunk;
+	tsif_device->chunks_per_buf = chunks_in_buf;
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_buf_config);
+
+void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (ri)
+		*ri    = tsif_device->ri;
+	if (wi)
+		*wi    = tsif_device->wi;
+	if (state)
+		*state = tsif_device->state;
+}
+EXPORT_SYMBOL(tsif_get_state);
+
+int tsif_start(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	return action_open(tsif_device);
+}
+EXPORT_SYMBOL(tsif_start);
+
+void tsif_stop(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	action_close(tsif_device);
+}
+EXPORT_SYMBOL(tsif_stop);
+
+void tsif_reclaim_packets(void *cookie, int read_index)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	tsif_device->ri = read_index;
+}
+EXPORT_SYMBOL(tsif_reclaim_packets);
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
+		   " Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tsif_chrdev.c b/drivers/misc/tsif_chrdev.c
new file mode 100644
index 0000000..4068ac3
--- /dev/null
+++ b/drivers/misc/tsif_chrdev.c
@@ -0,0 +1,225 @@
+/**
+ * TSIF driver client
+ *
+ * Character device that, being read
+ * returns stream of TSIF packets.
+ *
+ * Copyright (c) 2009-2010, Code Aurora Forum. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>       /* Needed by all modules */
+#include <linux/kernel.h>       /* Needed for KERN_INFO */
+#include <linux/cdev.h>
+#include <linux/err.h>          /* IS_ERR etc. */
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>        /* TASK_INTERRUPTIBLE */
+
+#include <linux/uaccess.h>        /* copy_to_user */
+
+#include <linux/tsif_api.h>
+
+struct tsif_chrdev {
+	struct cdev cdev;
+	struct device *dev;
+	wait_queue_head_t wq_read;
+	void *cookie;
+	/* mirror for tsif data */
+	void *data_buffer;
+	unsigned buf_size_packets; /**< buffer size in packets */
+	unsigned ri, wi;
+	enum tsif_state state;
+	unsigned rptr;
+};
+
+static ssize_t tsif_open(struct inode *inode, struct file *file)
+{
+	int rc;
+	struct tsif_chrdev *the_dev =
+	       container_of(inode->i_cdev, struct tsif_chrdev, cdev);
+	if (!the_dev->cookie)  /* not bound yet */
+		return -ENODEV;
+	file->private_data = the_dev;
+	rc = tsif_start(the_dev->cookie);
+	if (rc)
+		return rc;
+	tsif_get_info(the_dev->cookie, &the_dev->data_buffer,
+		      &the_dev->buf_size_packets);
+	the_dev->rptr = 0;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t tsif_release(struct inode *inode, struct file *filp)
+{
+	struct tsif_chrdev *the_dev = filp->private_data;
+	tsif_stop(the_dev->cookie);
+	return 0;
+}
+
+static ssize_t tsif_read(struct file *filp, char __user *buf, size_t count,
+			 loff_t *f_pos)
+{
+	int avail = 0;
+	int wi;
+	struct tsif_chrdev *the_dev = filp->private_data;
+	tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+		       &the_dev->state);
+	/* consistency check */
+	if (the_dev->ri != (the_dev->rptr / TSIF_PKT_SIZE)) {
+		dev_err(the_dev->dev,
+			"%s: inconsistent read pointers: ri %d rptr %d\n",
+			__func__, the_dev->ri, the_dev->rptr);
+		the_dev->rptr = the_dev->ri * TSIF_PKT_SIZE;
+	}
+	/* ri == wi if no data */
+	if (the_dev->ri == the_dev->wi) {
+		/* shall I block waiting for data? */
+		if (filp->f_flags & O_NONBLOCK) {
+			if (the_dev->state == tsif_state_running) {
+				return -EAGAIN;
+			} else {
+				/* not running -> EOF */
+				return 0;
+			}
+		}
+		if (wait_event_interruptible(the_dev->wq_read,
+		      (the_dev->ri != the_dev->wi) ||
+		      (the_dev->state != tsif_state_running))) {
+			/* got signal -> tell FS to handle it */
+			return -ERESTARTSYS;
+		}
+		if (the_dev->ri == the_dev->wi) {
+			/* still no data -> EOF */
+			return 0;
+		}
+	}
+	/* contiguous chunk last up to wi or end of buffer */
+	wi = (the_dev->wi > the_dev->ri) ?
+		the_dev->wi : the_dev->buf_size_packets;
+	avail = min(wi * TSIF_PKT_SIZE - the_dev->rptr, count);
+	if (copy_to_user(buf, the_dev->data_buffer + the_dev->rptr, avail))
+		return -EFAULT;
+	the_dev->rptr = (the_dev->rptr + avail) %
+		(TSIF_PKT_SIZE * the_dev->buf_size_packets);
+	the_dev->ri = the_dev->rptr / TSIF_PKT_SIZE;
+	*f_pos += avail;
+	tsif_reclaim_packets(the_dev->cookie, the_dev->ri);
+	return avail;
+}
+
+static void tsif_notify(void *data)
+{
+	struct tsif_chrdev *the_dev = data;
+	tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+		       &the_dev->state);
+	wake_up_interruptible(&the_dev->wq_read);
+}
+
+static const struct file_operations tsif_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tsif_read,
+	.open    = tsif_open,
+	.release = tsif_release,
+};
+
+static struct class *tsif_class;
+static dev_t tsif_dev;  /**< 1-st dev_t from allocated range */
+static dev_t tsif_dev0; /**< next not yet assigned dev_t */
+
+static int tsif_init_one(struct tsif_chrdev *the_dev, int index)
+{
+	int rc;
+	pr_info("%s[%d]\n", __func__, index);
+	cdev_init(&the_dev->cdev, &tsif_fops);
+	the_dev->cdev.owner = THIS_MODULE;
+	init_waitqueue_head(&the_dev->wq_read);
+	rc = cdev_add(&the_dev->cdev, tsif_dev0++, 1);
+	the_dev->dev = device_create(tsif_class, NULL, the_dev->cdev.dev,
+				     the_dev, "tsif%d", index);
+	if (IS_ERR(the_dev->dev)) {
+		rc = PTR_ERR(the_dev->dev);
+		pr_err("device_create failed: %d\n", rc);
+		goto err_create;
+	}
+	the_dev->cookie = tsif_attach(index, tsif_notify, the_dev);
+	if (IS_ERR(the_dev->cookie)) {
+		rc = PTR_ERR(the_dev->cookie);
+		pr_err("tsif_attach failed: %d\n", rc);
+		goto err_attach;
+	}
+	/* now data buffer is not allocated yet */
+	tsif_get_info(the_dev->cookie, &the_dev->data_buffer, NULL);
+	dev_info(the_dev->dev,
+		 "Device %d.%d attached to TSIF, buffer size %d\n",
+		 MAJOR(the_dev->cdev.dev), MINOR(the_dev->cdev.dev),
+		 the_dev->buf_size_packets);
+	return 0;
+err_attach:
+	device_destroy(tsif_class, the_dev->cdev.dev);
+err_create:
+	cdev_del(&the_dev->cdev);
+	return rc;
+}
+
+static void tsif_exit_one(struct tsif_chrdev *the_dev)
+{
+	dev_info(the_dev->dev, "%s\n", __func__);
+	tsif_detach(the_dev->cookie);
+	device_destroy(tsif_class, the_dev->cdev.dev);
+	cdev_del(&the_dev->cdev);
+}
+
+#define TSIF_NUM_DEVS 1 /**< support this many devices */
+
+struct tsif_chrdev the_devices[TSIF_NUM_DEVS];
+
+static int __init mod_init(void)
+{
+	int rc;
+	rc = alloc_chrdev_region(&tsif_dev, 0, TSIF_NUM_DEVS, "tsif");
+	if (rc) {
+		pr_err("alloc_chrdev_region failed: %d\n", rc);
+		goto err_devrgn;
+	}
+	tsif_dev0 = tsif_dev;
+	tsif_class = class_create(THIS_MODULE, "tsif");
+	if (IS_ERR(tsif_class)) {
+		rc = PTR_ERR(tsif_class);
+		pr_err("Error creating tsif class: %d\n", rc);
+		goto err_class;
+	}
+	rc = tsif_init_one(&the_devices[0], 0);
+	if (rc)
+		goto err_init1;
+	return 0;
+err_init1:
+	class_destroy(tsif_class);
+err_class:
+	unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+err_devrgn:
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	tsif_exit_one(&the_devices[0]);
+	class_destroy(tsif_class);
+	unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF character device interface");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tzcom.c b/drivers/misc/tzcom.c
new file mode 100644
index 0000000..e947dee
--- /dev/null
+++ b/drivers/misc/tzcom.c
@@ -0,0 +1,910 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "TZCOM"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/android_pmem.h>
+#include <linux/io.h>
+#include <mach/scm.h>
+#include <mach/peripheral-loader.h>
+#include <linux/tzcom.h>
+#include "tzcomi.h"
+
+#define TZCOM_DEV "tzcom"
+
+#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
+
+#undef PDEBUG
+#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
+		__func__, current->pid, current->comm, ## args)
+
+#undef PERR
+#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
+		__func__, current->pid, current->comm, ## args)
+
+
+static struct class *driver_class;
+static dev_t tzcom_device_no;
+static struct cdev tzcom_cdev;
+
+static u8 *sb_in_virt;
+static s32 sb_in_phys;
+static size_t sb_in_length = 20 * SZ_1K;
+static u8 *sb_out_virt;
+static s32 sb_out_phys;
+static size_t sb_out_length = 20 * SZ_1K;
+
+static void *pil;
+
+static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
+static DEFINE_MUTEX(sb_in_lock);
+static DEFINE_MUTEX(sb_out_lock);
+static DEFINE_MUTEX(send_cmd_lock);
+
+struct tzcom_callback_list {
+	struct list_head      list;
+	struct tzcom_callback callback;
+};
+
+struct tzcom_registered_svc_list {
+	struct list_head                 list;
+	struct tzcom_register_svc_op_req svc;
+	wait_queue_head_t                next_cmd_wq;
+	int                              next_cmd_flag;
+};
+
+struct tzcom_data_t {
+	struct list_head  callback_list_head;
+	struct mutex      callback_list_lock;
+	struct list_head  registered_svc_list_head;
+	spinlock_t        registered_svc_list_lock;
+	wait_queue_head_t cont_cmd_wq;
+	int               cont_cmd_flag;
+	u32               handled_cmd_svc_instance_id;
+};
+
+static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len)
+{
+	return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
+			cmd_buf, cmd_len, resp_buf, resp_len);
+}
+
+static s32 tzcom_virt_to_phys(u8 *virt)
+{
+	if (virt >= sb_in_virt &&
+			virt < (sb_in_virt + sb_in_length)) {
+		return sb_in_phys + (virt - sb_in_virt);
+	} else if (virt >= sb_out_virt &&
+			virt < (sb_out_virt + sb_out_length)) {
+		return sb_out_phys + (virt - sb_out_virt);
+	} else {
+		return virt_to_phys(virt);
+	}
+}
+
+static u8 *tzcom_phys_to_virt(s32 phys)
+{
+	if (phys >= sb_in_phys &&
+			phys < (sb_in_phys + sb_in_length)) {
+		return sb_in_virt + (phys - sb_in_phys);
+	} else if (phys >= sb_out_phys &&
+			phys < (sb_out_phys + sb_out_length)) {
+		return sb_out_virt + (phys - sb_out_phys);
+	} else {
+		return phys_to_virt(phys);
+	}
+}
+
+static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
+		struct tzcom_register_svc_op_req svc)
+{
+	struct tzcom_registered_svc_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
+		if (ptr->svc.svc_id == svc.svc_id) {
+			PERR("Service id: %u is already registered",
+					ptr->svc.svc_id);
+			unique = 0;
+			break;
+		} else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
+				svc.cmd_id_low <= ptr->svc.cmd_id_high) {
+			PERR("Cmd id low falls in the range of another"
+					"registered service");
+			unique = 0;
+			break;
+		} else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
+				svc.cmd_id_high <= ptr->svc.cmd_id_high) {
+			PERR("Cmd id high falls in the range of another"
+					"registered service");
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+	return unique;
+}
+
+static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret;
+	unsigned long flags;
+	struct tzcom_register_svc_op_req rcvd_svc;
+	struct tzcom_registered_svc_list *new_entry;
+
+	ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
+
+	if (ret) {
+		PDEBUG("copy_from_user failed");
+		return ret;
+	}
+
+	PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
+			rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
+			rcvd_svc.cmd_id_high);
+	if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
+		PDEBUG("Provided service is not unique");
+		return -EINVAL;
+	}
+
+	rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
+
+	ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
+	if (ret) {
+		PDEBUG("copy_to_user failed");
+		return ret;
+	}
+
+	new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry) {
+		pr_err("%s: kmalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
+	new_entry->next_cmd_flag = 0;
+	init_waitqueue_head(&new_entry->next_cmd_wq);
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_add_tail(&new_entry->list, &data->registered_svc_list_head);
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+
+	return ret;
+}
+
+static int tzcom_unregister_service(struct tzcom_data_t *data,
+		void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct tzcom_unregister_svc_op_req req;
+	struct tzcom_registered_svc_list *ptr;
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_from_user failed");
+		return ret;
+	}
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
+		if (req.svc_id == ptr->svc.svc_id &&
+				req.instance_id == ptr->svc.instance_id) {
+			wake_up_all(&ptr->next_cmd_wq);
+			list_del(&ptr->list);
+			kfree(ptr);
+			spin_unlock_irqrestore(&data->registered_svc_list_lock,
+					flags);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+	return -EINVAL;
+}
+
+/**
+ *   +---------+                              +-----+       +-----------------+
+ *   |  TZCOM  |                              | SCM |       | TZCOM_SCHEDULER |
+ *   +----+----+                              +--+--+       +--------+--------+
+ *        |                                      |                   |
+ *        |        scm_call                      |                   |
+ *        |------------------------------------->|                   |
+ *        |  cmd_buf = struct tzcom_command {    |                   |
+ *        |              cmd_type,               |------------------>|
+ * +------+------------- sb_in_cmd_addr,         |                   |
+ * |      |              sb_in_cmd_len           |                   |
+ * |      |            }                         |                   |
+ * |      |   resp_buf = struct tzcom_response { |                   |
+ * |                         cmd_status,         |                   |
+ * |             +---------- sb_in_rsp_addr,     |                   |
+ * |             |           sb_in_rsp_len       |<------------------|
+ * |             |         }
+ * |             |                            struct tzcom_callback {---------+
+ * |             |                                uint32_t cmd_id;            |
+ * |             |                                uint32_t sb_out_cb_data_len;|
+ * |             +---------------+                uint32_t sb_out_cb_data_off;|
+ * |                             |            }                               |
+ * |    _________________________|_______________________________             |
+ * |    +-----------------------+| +----------------------+                   |
+ * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf |                   |
+ *      +-----------------------+  +----------------------+                   |
+ *      _________________________________________________________             |
+ *                               INPUT SHARED BUFFER                          |
+ *   +------------------------------------------------------------------------+
+ *   |  _________________________________________________________
+ *   |  +---------------------------------------------+
+ *   +->| cmd_id | data_len | data_off |   data...    |
+ *      +---------------------------------------------+
+ *                                     |<------------>|copy to next_cmd.req_buf
+ *      _________________________________________________________
+ *                              OUTPUT SHARED BUFFER
+ */
+static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reqd_len_sb_in = 0;
+	u32 reqd_len_sb_out = 0;
+	struct tzcom_send_cmd_op_req req;
+	struct tzcom_command cmd;
+	struct tzcom_response resp;
+	struct tzcom_callback *next_callback;
+	void *cb_data = NULL;
+	struct tzcom_callback_list *new_entry;
+	struct tzcom_callback *cb;
+	size_t new_entry_len = 0;
+	struct tzcom_registered_svc_list *ptr_svc;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_from_user failed");
+		return ret;
+	}
+
+	if (req.cmd_buf == NULL || req.resp_buf == NULL) {
+		PDEBUG("cmd buffer or response buffer is null");
+		return -EINVAL;
+	}
+
+	if (req.cmd_len <= 0 || req.resp_len <= 0) {
+		PDEBUG("cmd buffer length or "
+				"response buffer length not valid");
+		return -EINVAL;
+	}
+	PDEBUG("received cmd_req.req: 0x%p",
+				req.cmd_buf);
+	PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
+			req.resp_len,
+			req.resp_buf);
+
+	reqd_len_sb_in = req.cmd_len + req.resp_len;
+	if (reqd_len_sb_in > sb_in_length) {
+		PDEBUG("Not enough memory to fit cmd_buf and "
+				"resp_buf. Required: %u, Available: %u",
+				reqd_len_sb_in, sb_in_length);
+		return -ENOMEM;
+	}
+
+	/* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
+	mutex_lock(&sb_in_lock);
+	PDEBUG("Before memcpy on sb_in");
+	memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
+	PDEBUG("After memcpy on sb_in");
+
+	/* cmd_type will always be a new here */
+	cmd.cmd_type = TZ_SCHED_CMD_NEW;
+	cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+	cmd.sb_in_cmd_len = req.cmd_len;
+
+	resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+	resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
+			req.cmd_len);
+	resp.sb_in_rsp_len = req.resp_len;
+
+	PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
+	PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
+
+	tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp, sizeof(resp));
+	mutex_unlock(&sb_in_lock);
+
+	while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+		/*
+		 * If cmd is incomplete, get the callback cmd out from SB out
+		 * and put it on the list
+		 */
+		PDEBUG("cmd_status is incomplete.");
+		next_callback = (struct tzcom_callback *)sb_out_virt;
+
+		mutex_lock(&sb_out_lock);
+		reqd_len_sb_out = sizeof(*next_callback)
+					+ next_callback->sb_out_cb_data_len;
+		if (reqd_len_sb_out > sb_out_length) {
+			PDEBUG("Not enough memory to"
+					" fit tzcom_callback buffer."
+					" Required: %u, Available: %u",
+					reqd_len_sb_out, sb_out_length);
+			mutex_unlock(&sb_out_lock);
+			return -ENOMEM;
+		}
+
+		/* Assumption is cb_data_off is sizeof(tzcom_callback) */
+		new_entry_len = sizeof(*new_entry)
+					+ next_callback->sb_out_cb_data_len;
+		new_entry = kmalloc(new_entry_len, GFP_KERNEL);
+		if (!new_entry) {
+			PERR("kmalloc failed");
+			mutex_unlock(&sb_out_lock);
+			return -ENOMEM;
+		}
+
+		cb = &new_entry->callback;
+		cb->cmd_id = next_callback->cmd_id;
+		cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
+		cb->sb_out_cb_data_off = next_callback->sb_out_cb_data_off;
+
+		cb_data = (u8 *)next_callback
+				+ next_callback->sb_out_cb_data_off;
+		memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
+				next_callback->sb_out_cb_data_len);
+		mutex_unlock(&sb_out_lock);
+
+		mutex_lock(&data->callback_list_lock);
+		list_add_tail(&new_entry->list, &data->callback_list_head);
+		mutex_unlock(&data->callback_list_lock);
+
+		/*
+		 * We don't know which service can handle the command. so we
+		 * wake up all blocking services and let them figure out if
+		 * they can handle the given command.
+		 */
+		spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+		list_for_each_entry(ptr_svc,
+				&data->registered_svc_list_head, list) {
+			ptr_svc->next_cmd_flag = 1;
+			wake_up_interruptible(&ptr_svc->next_cmd_wq);
+		}
+		spin_unlock_irqrestore(&data->registered_svc_list_lock,
+				flags);
+
+		PDEBUG("waking up next_cmd_wq and "
+				"waiting for cont_cmd_wq");
+		if (wait_event_interruptible(data->cont_cmd_wq,
+					data->cont_cmd_flag != 0)) {
+			PDEBUG("Interrupted: exiting send_cmd loop");
+			return -ERESTARTSYS;
+		}
+		data->cont_cmd_flag = 0;
+		cmd.cmd_type = TZ_SCHED_CMD_PENDING;
+		mutex_lock(&sb_in_lock);
+		tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
+				sizeof(resp));
+		mutex_unlock(&sb_in_lock);
+	}
+
+	mutex_lock(&sb_in_lock);
+	resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
+	resp.sb_in_rsp_len = req.resp_len;
+	mutex_unlock(&sb_in_lock);
+
+	/* Cmd is done now. Copy the response from SB in to user */
+	if (req.resp_len >= resp.sb_in_rsp_len) {
+		PDEBUG("Before memcpy resp_buf");
+		mutex_lock(&sb_in_lock);
+		memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
+		mutex_unlock(&sb_in_lock);
+	} else {
+		PDEBUG("Provided response buffer is smaller"
+				" than required. Required: %u,"
+				" Provided: %u",
+				resp.sb_in_rsp_len, req.resp_len);
+		ret = -ENOMEM;
+	}
+
+	PDEBUG("sending cmd_req.rsp "
+			"size: %u, ptr: 0x%p", req.resp_len,
+			req.resp_buf);
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_to_user failed");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct tzcom_registered_svc_list *__tzcom_find_svc(
+		struct tzcom_data_t *data,
+		uint32_t instance_id)
+{
+	struct tzcom_registered_svc_list *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry(entry,
+			&data->registered_svc_list_head, list) {
+		if (entry->svc.instance_id == instance_id)
+			break;
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+	return entry;
+}
+
+static int __tzcom_copy_cmd(struct tzcom_data_t *data,
+		struct tzcom_next_cmd_op_req *req,
+		struct tzcom_registered_svc_list *ptr_svc)
+{
+	int found = 0;
+	int ret = -EAGAIN;
+	struct tzcom_callback_list *entry;
+	struct tzcom_callback *cb;
+
+	PDEBUG("In here");
+	mutex_lock(&data->callback_list_lock);
+	PDEBUG("Before looping through cmd and svc lists.");
+	list_for_each_entry(entry, &data->callback_list_head, list) {
+		cb = &entry->callback;
+		if (req->svc_id == ptr_svc->svc.svc_id &&
+			req->instance_id == ptr_svc->svc.instance_id &&
+			cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
+			cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
+			PDEBUG("Found matching entry");
+			found = 1;
+			if (cb->sb_out_cb_data_len <= req->req_len) {
+				PDEBUG("copying cmd buffer %p to req "
+					"buffer %p, length: %u",
+					(u8 *)cb + cb->sb_out_cb_data_off,
+					req->req_buf, cb->sb_out_cb_data_len);
+				req->cmd_id = cb->cmd_id;
+				ret = copy_to_user(req->req_buf,
+					(u8 *)cb + cb->sb_out_cb_data_off,
+					cb->sb_out_cb_data_len);
+				if (ret) {
+					PDEBUG("copy_to_user failed");
+					break;
+				}
+				list_del(&entry->list);
+				kfree(entry);
+				ret = 0;
+			} else {
+				PDEBUG("callback data buffer is "
+					"larger than provided buffer."
+					"Required: %u, Provided: %u",
+					cb->sb_out_cb_data_len,
+					req->req_len);
+				ret = -ENOMEM;
+			}
+			break;
+		}
+	}
+	PDEBUG("After looping through cmd and svc lists.");
+	mutex_unlock(&data->callback_list_lock);
+	return ret;
+}
+
+static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_next_cmd_op_req req;
+	struct tzcom_registered_svc_list *this_svc;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_from_user failed");
+		return ret;
+	}
+
+	if (req.instance_id > atomic_read(&svc_instance_ctr)) {
+		PDEBUG("Invalid instance_id for the request");
+		return -EINVAL;
+	}
+
+	if (!req.req_buf || req.req_len == 0) {
+		PDEBUG("Invalid request buffer or buffer length");
+		return -EINVAL;
+	}
+
+	PDEBUG("Before next_cmd loop");
+	this_svc = __tzcom_find_svc(data, req.instance_id);
+
+	while (1) {
+		PDEBUG("Before wait_event next_cmd.");
+		if (wait_event_interruptible(this_svc->next_cmd_wq,
+				this_svc->next_cmd_flag != 0)) {
+			PDEBUG("Interrupted: exiting wait_next_cmd loop");
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+		PDEBUG("After wait_event next_cmd.");
+		this_svc->next_cmd_flag = 0;
+
+		ret = __tzcom_copy_cmd(data, &req, this_svc);
+		if (ret == 0) {
+			PDEBUG("Successfully found svc for cmd");
+			data->handled_cmd_svc_instance_id = req.instance_id;
+			break;
+		} else if (ret == -ENOMEM) {
+			PDEBUG("Not enough memory");
+			return ret;
+		}
+	}
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_to_user failed");
+		return ret;
+	}
+	PDEBUG("copy_to_user is done.");
+	return ret;
+}
+
+static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_cont_cmd_op_req req;
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_from_user failed");
+		return ret;
+	}
+
+	/*
+	 * Only the svc instance that handled the cmd (in read_next_cmd method)
+	 * can call continue cmd
+	 */
+	if (data->handled_cmd_svc_instance_id != req.instance_id) {
+		PDEBUG("Only the service instance that handled the last "
+				"callback can continue cmd. "
+				"Expected: %u, Received: %u",
+				data->handled_cmd_svc_instance_id,
+				req.instance_id);
+		return -EINVAL;
+	}
+
+	if (req.resp_buf) {
+		mutex_lock(&sb_out_lock);
+		memcpy(sb_out_virt, req.resp_buf, req.resp_len);
+		mutex_unlock(&sb_out_lock);
+	}
+
+	data->cont_cmd_flag = 1;
+	wake_up_interruptible(&data->cont_cmd_wq);
+	return ret;
+}
+
+static long tzcom_ioctl(struct file *file, unsigned cmd,
+		unsigned long arg)
+{
+	int ret = 0;
+	struct tzcom_data_t *tzcom_data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	PDEBUG("enter tzcom_ioctl()");
+	switch (cmd) {
+	case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
+		PDEBUG("ioctl register_service_req()");
+		ret = tzcom_register_service(tzcom_data, argp);
+		if (ret)
+			PDEBUG("failed tzcom_register_service: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
+		PDEBUG("ioctl unregister_service_req()");
+		ret = tzcom_unregister_service(tzcom_data, argp);
+		if (ret)
+			PDEBUG("failed tzcom_unregister_service: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_SEND_CMD_REQ: {
+		PDEBUG("ioctl send_cmd_req()");
+		/* Only one client allowed here at a time */
+		mutex_lock(&send_cmd_lock);
+		ret = tzcom_send_cmd(tzcom_data, argp);
+		mutex_unlock(&send_cmd_lock);
+		if (ret)
+			PDEBUG("failed tzcom_send_cmd: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
+		PDEBUG("ioctl read_next_cmd_req()");
+		ret = tzcom_read_next_cmd(tzcom_data, argp);
+		if (ret)
+			PDEBUG("failed tzcom_read_next: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
+		PDEBUG("ioctl continue_cmd_req()");
+		ret = tzcom_cont_cmd(tzcom_data, argp);
+		if (ret)
+			PDEBUG("failed tzcom_cont_cmd: %d", ret);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int tzcom_open(struct inode *inode, struct file *file)
+{
+	long pil_error;
+	struct tz_pr_init_sb_req_s sb_out_init_req;
+	struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
+	void *rsp_addr_virt;
+	struct tzcom_command cmd;
+	struct tzcom_response resp;
+	struct tzcom_data_t *tzcom_data;
+
+	PDEBUG("In here");
+	if (pil == NULL) {
+		pil = pil_get("playrdy");
+		if (IS_ERR(pil)) {
+			PERR("Playready PIL image load failed");
+			pil_error = PTR_ERR(pil);
+			pil = NULL;
+			return pil_error;
+		}
+		PDEBUG("playrdy image loaded successfully");
+	}
+
+	sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
+	sb_out_init_req.sb_len = sb_out_length;
+	sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
+	PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
+			"sb_ptr (phys): 0x%x }",
+			sb_out_init_req.pr_cmd,
+			sb_out_init_req.sb_len,
+			sb_out_init_req.sb_ptr);
+
+	mutex_lock(&sb_in_lock);
+	PDEBUG("Before memcpy on sb_in");
+	memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
+	PDEBUG("After memcpy on sb_in");
+
+	/* It will always be a new cmd from this method */
+	cmd.cmd_type = TZ_SCHED_CMD_NEW;
+	cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+	cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
+	PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
+			"sb_in_cmd_len: %u }",
+			cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
+
+	resp.cmd_status = 0;
+	resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr + cmd.sb_in_cmd_len;
+	resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
+	PDEBUG("tzcom_response before scm { cmd_status: %u, "
+			"sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
+			resp.cmd_status, resp.sb_in_rsp_addr,
+			resp.sb_in_rsp_len);
+
+	PDEBUG("Before scm_call for sb_init");
+	tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
+	PDEBUG("After scm_call for sb_init");
+	PDEBUG("tzcom_response after scm { cmd_status: %u, "
+			"sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
+			resp.cmd_status, resp.sb_in_rsp_addr,
+			resp.sb_in_rsp_len);
+
+	if (resp.sb_in_rsp_addr) {
+		rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
+				resp.sb_in_rsp_addr);
+		PDEBUG("Received response phys: %p, virt: %p",
+				resp.sb_in_rsp_addr,
+				rsp_addr_virt);
+		memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
+	} else {
+		PERR("Error with SB initialization");
+		mutex_unlock(&sb_in_lock);
+		return -EPERM;
+	}
+	mutex_unlock(&sb_in_lock);
+
+	PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
+			sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
+
+	if (sb_out_init_rsp.ret) {
+		PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
+		return -EPERM;
+	}
+
+	tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
+	if (!tzcom_data) {
+		PERR("kmalloc failed");
+		return -ENOMEM;
+	}
+	file->private_data = tzcom_data;
+
+	INIT_LIST_HEAD(&tzcom_data->callback_list_head);
+	mutex_init(&tzcom_data->callback_list_lock);
+
+	INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
+	spin_lock_init(&tzcom_data->registered_svc_list_lock);
+
+	init_waitqueue_head(&tzcom_data->cont_cmd_wq);
+	tzcom_data->cont_cmd_flag = 0;
+	tzcom_data->handled_cmd_svc_instance_id = 0;
+	return 0;
+}
+
+static int tzcom_release(struct inode *inode, struct file *file)
+{
+	struct tzcom_data_t *tzcom_data = file->private_data;
+	struct tzcom_callback_list *lcb, *ncb;
+	struct tzcom_registered_svc_list *lsvc, *nsvc;
+	PDEBUG("In here");
+
+	wake_up_all(&tzcom_data->cont_cmd_wq);
+
+	list_for_each_entry_safe(lcb, ncb,
+			&tzcom_data->callback_list_head, list) {
+		list_del(&lcb->list);
+		kfree(lcb);
+	}
+
+	list_for_each_entry_safe(lsvc, nsvc,
+			&tzcom_data->registered_svc_list_head, list) {
+		wake_up_all(&lsvc->next_cmd_wq);
+		list_del(&lsvc->list);
+		kfree(lsvc);
+	}
+
+	kfree(tzcom_data);
+	return 0;
+}
+
+static const struct file_operations tzcom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = tzcom_ioctl,
+		.open = tzcom_open,
+		.release = tzcom_release
+};
+
+static int __init tzcom_init(void)
+{
+	int rc;
+	struct device *class_dev;
+
+	PDEBUG("Hello tzcom");
+
+	rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
+	if (rc < 0) {
+		PERR("alloc_chrdev_region failed %d", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, TZCOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		PERR("class_create failed %d", rc);
+		goto unregister_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
+			TZCOM_DEV);
+	if (!class_dev) {
+		PERR("class_device_create failed %d", rc);
+		rc = -ENOMEM;
+		goto class_destroy;
+	}
+
+	cdev_init(&tzcom_cdev, &tzcom_fops);
+	tzcom_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
+	if (rc < 0) {
+		PERR("cdev_add failed %d", rc);
+		goto class_device_destroy;
+	}
+
+	sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
+			PMEM_ALIGNMENT_4K);
+	if (IS_ERR((void *)sb_in_phys)) {
+		PERR("could not allocte in kernel pmem buffers for sb_in");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
+
+	sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
+			sb_in_length);
+	if (!sb_in_virt) {
+		PERR("Shared buffer IN allocation failed.");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("sb_in virt address: %p, phys address: 0x%x",
+			sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
+
+	sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
+			PMEM_ALIGNMENT_4K);
+	if (IS_ERR((void *)sb_out_phys)) {
+		PERR("could not allocte in kernel pmem buffers for sb_out");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
+
+	sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
+			sb_out_length);
+	if (!sb_out_virt) {
+		PERR("Shared buffer OUT allocation failed.");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("sb_out virt address: %p, phys address: 0x%x",
+			sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
+
+	/* Initialized in tzcom_open */
+	pil = NULL;
+
+	return 0;
+
+class_device_destroy:
+	if (sb_in_virt)
+		iounmap(sb_in_virt);
+	if (sb_in_phys)
+		pmem_kfree(sb_in_phys);
+	if (sb_out_virt)
+		iounmap(sb_out_virt);
+	if (sb_out_phys)
+		pmem_kfree(sb_out_phys);
+	device_destroy(driver_class, tzcom_device_no);
+class_destroy:
+	class_destroy(driver_class);
+unregister_chrdev_region:
+	unregister_chrdev_region(tzcom_device_no, 1);
+	return rc;
+}
+
+static void __exit tzcom_exit(void)
+{
+	PDEBUG("Goodbye tzcom");
+	if (sb_in_virt)
+		iounmap(sb_in_virt);
+	if (sb_in_phys)
+		pmem_kfree(sb_in_phys);
+	if (sb_out_virt)
+		iounmap(sb_out_virt);
+	if (sb_out_phys)
+		pmem_kfree(sb_out_phys);
+	if (pil != NULL) {
+		pil_put("playrdy");
+		pil = NULL;
+	}
+	device_destroy(driver_class, tzcom_device_no);
+	class_destroy(driver_class);
+	unregister_chrdev_region(tzcom_device_no, 1);
+}
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
+MODULE_VERSION("1.00");
+
+module_init(tzcom_init);
+module_exit(tzcom_exit);
diff --git a/drivers/misc/tzcomi.h b/drivers/misc/tzcomi.h
new file mode 100644
index 0000000..33634cf
--- /dev/null
+++ b/drivers/misc/tzcomi.h
@@ -0,0 +1,112 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TZCOMI_H_
+#define __TZCOMI_H_
+
+#include <linux/types.h>
+
+enum tz_sched_cmd_id {
+	TZ_SCHED_CMD_ID_INVALID      = 0,
+	TZ_SCHED_CMD_ID_INIT_SB_OUT,    /**< Initialize the shared buffer */
+	TZ_SCHED_CMD_ID_INIT_SB_LOG,    /**< Initialize the logging shared buf */
+	TZ_SCHED_CMD_ID_UNKNOWN         = 0x7FFFFFFE,
+	TZ_SCHED_CMD_ID_MAX             = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_type {
+	TZ_SCHED_CMD_INVALID = 0,
+	TZ_SCHED_CMD_NEW,      /** New TZ Scheduler Command */
+	TZ_SCHED_CMD_PENDING,  /** Pending cmd...sched will restore stack */
+	TZ_SCHED_CMD_COMPLETE, /** TZ sched command is complete */
+	TZ_SCHED_CMD_MAX     = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_status {
+	TZ_SCHED_STATUS_INCOMPLETE = 0,
+	TZ_SCHED_STATUS_COMPLETE,
+	TZ_SCHED_STATUS_MAX  = 0x7FFFFFFF
+};
+
+/** Command structure for initializing shared buffers (SB_OUT
+    and SB_LOG)
+*/
+__packed struct tz_pr_init_sb_req_s {
+	/** First 4 bytes should always be command id
+	 * from enum tz_sched_cmd_id */
+	uint32_t                  pr_cmd;
+	/** Pointer to the physical location of sb_out buffer */
+	uint32_t                  sb_ptr;
+	/** length of shared buffer */
+	uint32_t                  sb_len;
+};
+
+
+__packed struct tz_pr_init_sb_rsp_s {
+	/** First 4 bytes should always be command id
+	 * from enum tz_sched_cmd_id */
+	uint32_t                  pr_cmd;
+	/** Return code, 0 for success, Approp error code otherwise */
+	int32_t                   ret;
+};
+
+
+/**
+ * struct tzcom_command - tzcom command buffer
+ * @cmd_type: value from enum tz_sched_cmd_type
+ * @sb_in_cmd_addr: points to physical location of command
+ *                buffer
+ * @sb_in_cmd_len: length of command buffer
+ */
+__packed struct tzcom_command {
+	uint32_t               cmd_type;
+	uint8_t                *sb_in_cmd_addr;
+	uint32_t               sb_in_cmd_len;
+};
+
+/**
+ * struct tzcom_response - tzcom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct tzcom_response {
+	uint32_t                 cmd_status;
+	uint8_t                  *sb_in_rsp_addr;
+	uint32_t                 sb_in_rsp_len;
+};
+
+/**
+ * struct tzcom_callback - tzcom callback buffer
+ * @cmd_id: command to run in registered service
+ * @sb_out_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_cmd_len: length of command response
+ *
+ * A callback buffer would be laid out in sb_out as follows:
+ *
+ *     --------------------- <--- struct tzcom_callback
+ *     | callback header   |
+ *     --------------------- <--- tzcom_callback.sb_out_cb_data_off
+ *     | callback data     |
+ *     ---------------------
+ */
+__packed struct tzcom_callback {
+	uint32_t cmd_id;
+	uint32_t sb_out_cb_data_len;
+	uint32_t sb_out_cb_data_off;
+};
+
+#endif /* __TZCOMI_H_ */
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38..9116551 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -19,6 +19,14 @@
 	  This is an option for use by developers; most people should
 	  say N here.  This enables MMC core and driver debugging.
 
+config MMC_PERF_PROFILING
+	bool "MMC performance profiling"
+	depends on MMC != n
+	default n
+	help
+	  If you say Y here, support will be added for collecting
+	  performance numbers at the MMC Queue and Host layers.
+
 if MMC
 
 source "drivers/mmc/core/Kconfig"
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c779503..16feada 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1044,7 +1044,7 @@
 	md->disk->fops = &mmc_bdops;
 	md->disk->private_data = md;
 	md->disk->queue = md->queue.queue;
-	md->disk->driverfs_dev = parent;
+	md->disk->driverfs_dev = &card->dev;
 	set_disk_ro(md->disk, md->read_only || default_ro);
 	md->disk->flags = GENHD_FL_EXT_DEVT;
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa..9b64847 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -46,12 +46,20 @@
 {
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
+	struct request *req;
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+	ktime_t start, diff;
+	struct mmc_host *host = mq->card->host;
+	unsigned long bytes_xfer;
+#endif
+
 
 	current->flags |= PF_MEMALLOC;
 
 	down(&mq->thread_sem);
 	do {
-		struct request *req = NULL;
+		req = NULL;	/* Must be set to NULL at each iteration */
 
 		spin_lock_irq(q->queue_lock);
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -71,7 +79,26 @@
 		}
 		set_current_state(TASK_RUNNING);
 
-		mq->issue_fn(mq, req);
+#ifdef CONFIG_MMC_PERF_PROFILING
+		bytes_xfer = blk_rq_bytes(req);
+		if (rq_data_dir(req) == READ) {
+			start = ktime_get();
+			mq->issue_fn(mq, req);
+			diff = ktime_sub(ktime_get(), start);
+			host->perf.rbytes_mmcq += bytes_xfer;
+			host->perf.rtime_mmcq =
+				ktime_add(host->perf.rtime_mmcq, diff);
+		} else {
+			start = ktime_get();
+			mq->issue_fn(mq, req);
+			diff = ktime_sub(ktime_get(), start);
+			host->perf.wbytes_mmcq += bytes_xfer;
+			host->perf.wtime_mmcq =
+				ktime_add(host->perf.wtime_mmcq, diff);
+		}
+#else
+			mq->issue_fn(mq, req);
+#endif
 	} while (1);
 	up(&mq->thread_sem);
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7c3444a..3223110 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -24,6 +24,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/wakelock.h>
+#include <linux/pm.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -97,6 +98,9 @@
 {
 	struct mmc_command *cmd = mrq->cmd;
 	int err = cmd->error;
+#ifdef CONFIG_MMC_PERF_PROFILING
+	ktime_t diff;
+#endif
 
 	if (err && cmd->retries && mmc_host_is_spi(host)) {
 		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
@@ -119,6 +123,20 @@
 			cmd->resp[2], cmd->resp[3]);
 
 		if (mrq->data) {
+#ifdef CONFIG_MMC_PERF_PROFILING
+			diff = ktime_sub(ktime_get(), host->perf.start);
+			if (mrq->data->flags == MMC_DATA_READ) {
+				host->perf.rbytes_drv +=
+						mrq->data->bytes_xfered;
+				host->perf.rtime_drv =
+					ktime_add(host->perf.rtime_drv, diff);
+			} else {
+				host->perf.wbytes_drv +=
+						 mrq->data->bytes_xfered;
+				host->perf.wtime_drv =
+					ktime_add(host->perf.wtime_drv, diff);
+			}
+#endif
 			pr_debug("%s:     %d bytes transferred: %d\n",
 				mmc_hostname(host),
 				mrq->data->bytes_xfered, mrq->data->error);
@@ -193,6 +211,9 @@
 			mrq->stop->error = 0;
 			mrq->stop->mrq = mrq;
 		}
+#ifdef CONFIG_MMC_PERF_PROFILING
+		host->perf.start = ktime_get();
+#endif
 	}
 	mmc_host_clk_ungate(host);
 	led_trigger_event(host->led, LED_FULL);
@@ -222,7 +243,7 @@
 
 	mmc_start_request(host, mrq);
 
-	wait_for_completion(&complete);
+	wait_for_completion_io(&complete);
 }
 
 EXPORT_SYMBOL(mmc_wait_for_req);
@@ -479,6 +500,14 @@
 	might_sleep();
 
 	add_wait_queue(&host->wq, &wait);
+#ifdef CONFIG_PM_RUNTIME
+	while (mmc_dev(host)->power.runtime_status == RPM_SUSPENDING) {
+		if (host->suspend_task == current)
+			break;
+		msleep(15);
+	}
+#endif
+
 	spin_lock_irqsave(&host->lock, flags);
 	while (1) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1589,8 +1618,16 @@
 	/* Order's important: probe SDIO, then SD, then MMC */
 	if (!mmc_attach_sdio(host))
 		return 0;
+
+	if (!host->ios.vdd)
+		mmc_power_up(host);
+
 	if (!mmc_attach_sd(host))
 		return 0;
+
+	if (!host->ios.vdd)
+		mmc_power_up(host);
+
 	if (!mmc_attach_mmc(host))
 		return 0;
 
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 84694a9..3dead90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -94,7 +94,7 @@
 		spin_unlock_irqrestore(&host->clk_lock, flags);
 		return;
 	}
-	mutex_lock(&host->clk_gate_mutex);
+	mmc_claim_host(host);
 	spin_lock_irqsave(&host->clk_lock, flags);
 	if (!host->clk_requests) {
 		spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -104,7 +104,7 @@
 		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
 	}
 	spin_unlock_irqrestore(&host->clk_lock, flags);
-	mutex_unlock(&host->clk_gate_mutex);
+	mmc_release_host(host);
 }
 
 /*
@@ -130,7 +130,7 @@
 {
 	unsigned long flags;
 
-	mutex_lock(&host->clk_gate_mutex);
+	mmc_claim_host(host);
 	spin_lock_irqsave(&host->clk_lock, flags);
 	if (host->clk_gated) {
 		spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -140,7 +140,7 @@
 	}
 	host->clk_requests++;
 	spin_unlock_irqrestore(&host->clk_lock, flags);
-	mutex_unlock(&host->clk_gate_mutex);
+	mmc_release_host(host);
 }
 
 /**
@@ -215,7 +215,6 @@
 	host->clk_gated = false;
 	INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
 	spin_lock_init(&host->clk_lock);
-	mutex_init(&host->clk_gate_mutex);
 }
 
 /**
@@ -309,6 +308,70 @@
 }
 
 EXPORT_SYMBOL(mmc_alloc_host);
+#ifdef CONFIG_MMC_PERF_PROFILING
+static ssize_t
+show_perf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = dev_get_drvdata(dev);
+	int64_t rtime_mmcq, wtime_mmcq, rtime_drv, wtime_drv;
+	unsigned long rbytes_mmcq, wbytes_mmcq, rbytes_drv, wbytes_drv;
+
+	spin_lock(&host->lock);
+
+	rbytes_mmcq = host->perf.rbytes_mmcq;
+	wbytes_mmcq = host->perf.wbytes_mmcq;
+	rbytes_drv = host->perf.rbytes_drv;
+	wbytes_drv = host->perf.wbytes_drv;
+
+	rtime_mmcq = ktime_to_us(host->perf.rtime_mmcq);
+	wtime_mmcq = ktime_to_us(host->perf.wtime_mmcq);
+	rtime_drv = ktime_to_us(host->perf.rtime_drv);
+	wtime_drv = ktime_to_us(host->perf.wtime_drv);
+
+	spin_unlock(&host->lock);
+
+	return snprintf(buf, PAGE_SIZE, "Write performance at MMCQ Level:"
+					"%lu bytes in %lld microseconds\n"
+					"Read performance at MMCQ Level:"
+					"%lu bytes in %lld microseconds\n"
+					"Write performance at driver Level:"
+					"%lu bytes in %lld microseconds\n"
+					"Read performance at driver Level:"
+					"%lu bytes in %lld microseconds\n",
+					wbytes_mmcq, wtime_mmcq, rbytes_mmcq,
+					rtime_mmcq, wbytes_drv, wtime_drv,
+					rbytes_drv, rtime_drv);
+}
+
+static ssize_t
+set_perf(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int64_t value;
+	struct mmc_host *host = dev_get_drvdata(dev);
+	sscanf(buf, "%lld", &value);
+	if (!value) {
+		spin_lock(&host->lock);
+		memset(&host->perf, 0, sizeof(host->perf));
+		spin_unlock(&host->lock);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR,
+		show_perf, set_perf);
+#endif
+
+static struct attribute *dev_attrs[] = {
+#ifdef CONFIG_MMC_PERF_PROFILING
+	&dev_attr_perf.attr,
+#endif
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
 
 /**
  *	mmc_add_host - initialise host hardware
@@ -334,6 +397,10 @@
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif
+	err = sysfs_create_group(&host->parent->kobj, &dev_attr_grp);
+	if (err)
+		pr_err("%s: failed to create sysfs group with err %d\n",
+							 __func__, err);
 
 	mmc_start_host(host);
 	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
@@ -362,6 +429,8 @@
 #ifdef CONFIG_DEBUG_FS
 	mmc_remove_host_debugfs(host);
 #endif
+	sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
+
 
 	device_del(&host->class_dev);
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d7..04816e9 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -660,6 +660,9 @@
 
 		/* Erase size depends on CSD and Extended CSD */
 		mmc_set_erase_size(card);
+
+		if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
+			mmc_card_set_blockaddr(card);
 	}
 
 	/*
@@ -864,7 +867,10 @@
 	BUG_ON(!host->card);
 
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c..2e39d2c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -398,6 +398,7 @@
 	if (err)
 		return err;
 
+	mmc_delay(1);
 	/* Must check status to be sure of no errors */
 	do {
 		err = mmc_send_status(card, &status);
@@ -506,6 +507,9 @@
 
 	data.sg = &sg;
 	data.sg_len = 1;
+	data.timeout_ns = 1000000;
+	data.timeout_clks = 0;
+
 	sg_init_one(&sg, data_buf, len);
 	mmc_wait_for_req(host, &mrq);
 	err = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 5decf49..c549216 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1003,7 +1003,10 @@
 	BUG_ON(!host->card);
 
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 7da522e..2024824 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -161,7 +161,7 @@
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -171,7 +171,10 @@
 	if (ret)
 		return ret;
 
-	ctrl |= SDIO_BUS_WIDTH_4BIT;
+	if (card->host->caps & MMC_CAP_8_BIT_DATA)
+		ctrl |= SDIO_BUS_WIDTH_8BIT;
+	else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+		ctrl |= SDIO_BUS_WIDTH_4BIT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
 	if (ret)
@@ -212,7 +215,7 @@
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -222,10 +225,10 @@
 	if (ret)
 		return ret;
 
-	if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+	if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
 		return 0;
 
-	ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+	ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
 	ctrl |= SDIO_BUS_ASYNC_INT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -532,8 +535,12 @@
 	 * Switch to wider bus (if supported).
 	 */
 	err = sdio_enable_4bit_bus(card);
-	if (err > 0)
-		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+	if (err > 0) {
+		if (card->host->caps & MMC_CAP_8_BIT_DATA)
+			mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
+		else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+			mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+	}
 	else if (err)
 		goto remove;
 
@@ -678,7 +685,10 @@
 		/* We may have switched to 1-bit mode during suspend */
 		err = sdio_enable_4bit_bus(host->card);
 		if (err > 0) {
-			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			if (host->caps & MMC_CAP_8_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+			else if (host->caps & MMC_CAP_4_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
 			err = 0;
 		}
 	}
@@ -983,8 +993,12 @@
 	mmc_set_clock(host, mmc_sdio_get_max_clock(card));
 
 	err = sdio_enable_4bit_bus(card);
-	if (err > 0)
-		mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+	if (err > 0) {
+		if (host->caps & MMC_CAP_8_BIT_DATA)
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+		else if (host->caps & MMC_CAP_4_BIT_DATA)
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+	}
 	else if (err)
 		goto err;
 
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 541bdb8..dc94222 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -270,8 +270,16 @@
 			break;
 
 		/* null entries have no link field or data */
-		if (tpl_code == 0x00)
-			continue;
+		if (tpl_code == 0x00) {
+			if (card->cis.vendor == 0x70 &&
+				(card->cis.device == 0x2460 ||
+				 card->cis.device == 0x0460 ||
+				 card->cis.device == 0x23F1 ||
+				 card->cis.device == 0x23F0))
+				break;
+			else
+				continue;
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
 		if (ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f..7378c62 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -442,6 +442,148 @@
 config MMC_TMIO_CORE
 	tristate
 
+config MMC_MSM
+	tristate "Qualcomm SDCC Controller Support"
+	depends on MMC && ARCH_MSM
+	help
+	  This provides support for the SD/MMC cell found in the
+          MSM and QSD SOCs from Qualcomm.
+
+config MMC_MSM_SDIO_SUPPORT
+	boolean "Qualcomm MSM SDIO support"
+	depends on MMC_MSM
+	help
+	  This enables SDIO support in the msm_sdcc driver.
+
+config MMC_MSM_CARD_HW_DETECTION
+	boolean "Qualcomm MMC Hardware detection support"
+	depends on MMC_MSM
+	default n
+	help
+	  Select Y if the hardware has support to detect card insertion/removal.
+
+config MMC_MSM_SDC1_SUPPORT
+	boolean "Qualcomm SDC1 support"
+	depends on MMC_MSM
+	default y
+	help
+	  Select Y to enable Slot 1.
+
+config MMC_MSM_SDC1_8_BIT_SUPPORT
+	boolean "Qualcomm SDC1 8bit support"
+	depends on MMC_MSM_SDC1_SUPPORT
+	default n
+	help
+	  Select Y to enable 8bit support for Slot 1.
+
+config MMC_MSM_SDC1_DUMMY52_REQUIRED
+	boolean "Send dummy 52 read for SDC1"
+	depends on MMC_MSM_SDC1_SUPPORT
+	default n
+	help
+	  Select Y to enable sending dummy 52 reads to complete
+	  all data commands.  Required for some SDIO cards.
+	  If unsure, say N.
+
+config MMC_MSM_SDC2_SUPPORT
+	boolean "Qualcomm SDC2 support"
+	depends on MMC_MSM
+	default y
+	help
+	  Select Y to enable Slot 2.
+
+config MMC_MSM_SDC2_8_BIT_SUPPORT
+	boolean "Qualcomm SDC2 8bit support"
+	depends on MMC_MSM_SDC2_SUPPORT
+	default n
+	help
+	  Select Y to enable 8bit support for Slot 2.
+
+config MMC_MSM_SDC2_DUMMY52_REQUIRED
+	boolean "Send dummy 52 read for SDC2"
+	depends on MMC_MSM_SDC2_SUPPORT
+	default n
+	help
+	  Select Y to enable sending dummy 52 reads to complete
+	  all data commands.  Required for some SDIO cards.
+	  If unsure, say N.
+
+config MMC_MSM_SDC3_SUPPORT
+	boolean "Qualcomm SDC3 support"
+	depends on MMC_MSM
+	default n
+	help
+	  Select Y to enable Slot 3.
+
+config MMC_MSM_SDC3_8_BIT_SUPPORT
+	boolean "Qualcomm SDC3 8bit support"
+	depends on MMC_MSM_SDC3_SUPPORT
+	default n
+	help
+	  Select Y to enable 8bit support for Slot 3.
+
+config MMC_MSM_SDC3_DUMMY52_REQUIRED
+	boolean "Send dummy 52 read for SDC3"
+	depends on MMC_MSM_SDC3_SUPPORT
+	default n
+	help
+	  Select Y to enable sending dummy 52 reads to complete
+	  all data commands.  Required for some SDIO cards.
+	  If unsure, say N.
+
+config MMC_MSM_SDC4_SUPPORT
+	boolean "Qualcomm SDC4 support"
+	depends on MMC_MSM
+	default n
+	help
+	  Select Y to enable Slot 4.
+
+config MMC_MSM_SDC4_8_BIT_SUPPORT
+	boolean "Qualcomm SDC4 8bit support"
+	depends on MMC_MSM_SDC4_SUPPORT
+	default n
+	help
+	  Select Y to enable 8bit support for Slot 4.
+
+config MMC_MSM_SDC4_DUMMY52_REQUIRED
+	boolean "Send dummy 52 read for SDC4"
+	depends on MMC_MSM_SDC4_SUPPORT
+	default n
+	help
+	  Select Y to enable sending dummy 52 reads to complete
+	  all data commands.  Required for some SDIO cards.
+	  If unsure, say N.
+
+config MMC_MSM_SDC5_SUPPORT
+	boolean "Qualcomm SDC5 support"
+	depends on MMC_MSM
+	default n
+	help
+	  Select Y to enable Slot 5.
+
+config MMC_MSM_SDC5_8_BIT_SUPPORT
+	boolean "Qualcomm SDC5 8bit support"
+	depends on MMC_MSM_SDC5_SUPPORT
+	default n
+	help
+	  Select Y to enable 8bit support for Slot 5.
+
+config MMC_MSM_SDC5_DUMMY52_REQUIRED
+	boolean "Send dummy 52 read for SDC5"
+	depends on MMC_MSM_SDC5_SUPPORT
+	default n
+	help
+	  Select Y to enable sending dummy 52 reads to complete
+	  all data commands.  Required for some SDIO cards.
+	  If unsure, say N.
+
+config MMC_MSM_SPS_SUPPORT
+	bool "Use SPS BAM as data mover"
+	depends on MMC_MSM && SPS
+	default n
+	help
+	  Select Y to use SPS BAM as data mover
+
 config MMC_TMIO
 	tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
 	depends on MFD_TMIO || MFD_ASIC3
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf7..e780400 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -36,6 +36,9 @@
 endif
 obj-$(CONFIG_MMC_SDHI)		+= sh_mobile_sdhi.o
 obj-$(CONFIG_MMC_CB710)		+= cb710-mmc.o
+obj-$(CONFIG_MMC_MSM)		+= msm_sdcc.o
+obj-$(CONFIG_MMC_MSM_SPS_SUPPORT) += msm_sdcc_dml.o
+obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
 obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index a4c865a..23e8d69 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2007 Google Inc,
  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *  Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
+ *  Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -21,12 +21,14 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/log2.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/sdio.h>
 #include <linux/clk.h>
 #include <linux/scatterlist.h>
@@ -35,149 +37,251 @@
 #include <linux/debugfs.h>
 #include <linux/io.h>
 #include <linux/memory.h>
-#include <linux/gfp.h>
+#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
 #include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/mmc/mmc.h>
 
 #include <asm/cacheflush.h>
 #include <asm/div64.h>
 #include <asm/sizes.h>
 
-#include <mach/mmc.h>
+#include <asm/mach/mmc.h>
 #include <mach/msm_iomap.h>
-#include <mach/dma.h>
 #include <mach/clk.h>
+#include <mach/dma.h>
+#include <mach/htc_pwrsink.h>
+#include <mach/sdio_al.h>
 
 #include "msm_sdcc.h"
+#include "msm_sdcc_dml.h"
 
 #define DRIVER_NAME "msm-sdcc"
 
-#define BUSCLK_PWRSAVE 1
-#define BUSCLK_TIMEOUT (HZ)
-static unsigned int msmsdcc_fmin = 144000;
-static unsigned int msmsdcc_fmax = 50000000;
-static unsigned int msmsdcc_4bit = 1;
+#define DBG(host, fmt, args...)	\
+	pr_debug("%s: %s: " fmt "\n", mmc_hostname(host->mmc), __func__ , args)
+
+#define IRQ_DEBUG 0
+#define SPS_SDCC_PRODUCER_PIPE_INDEX	1
+#define SPS_SDCC_CONSUMER_PIPE_INDEX	2
+#define SPS_CONS_PERIPHERAL		0
+#define SPS_PROD_PERIPHERAL		1
+/* 16 KB */
+#define SPS_MAX_DESC_SIZE		(16 * 1024)
+
+#if defined(CONFIG_DEBUG_FS)
+static void msmsdcc_dbg_createhost(struct msmsdcc_host *);
+static struct dentry *debugfs_dir;
+static struct dentry *debugfs_file;
+static int  msmsdcc_dbg_init(void);
+#endif
+
 static unsigned int msmsdcc_pwrsave = 1;
-static unsigned int msmsdcc_piopoll = 1;
-static unsigned int msmsdcc_sdioirq;
 
-#define PIO_SPINMAX 30
-#define CMD_SPINMAX 20
+#define DUMMY_52_STATE_NONE		0
+#define DUMMY_52_STATE_SENT		1
 
+static struct mmc_command dummy52cmd;
+static struct mmc_request dummy52mrq = {
+	.cmd = &dummy52cmd,
+	.data = NULL,
+	.stop = NULL,
+};
+static struct mmc_command dummy52cmd = {
+	.opcode = SD_IO_RW_DIRECT,
+	.flags = MMC_RSP_PRESENT,
+	.data = NULL,
+	.mrq = &dummy52mrq,
+};
+/*
+ * An array holding the Tuning pattern to compare with when
+ * executing a tuning cycle.
+ */
+static const u32 cmd19_tuning_block[16] = {
+	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+	0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+	0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
+};
 
-static inline void
-msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+#define VERBOSE_COMMAND_TIMEOUTS	0
+
+#if IRQ_DEBUG == 1
+static char *irq_status_bits[] = { "cmdcrcfail", "datcrcfail", "cmdtimeout",
+				   "dattimeout", "txunderrun", "rxoverrun",
+				   "cmdrespend", "cmdsent", "dataend", NULL,
+				   "datablkend", "cmdactive", "txactive",
+				   "rxactive", "txhalfempty", "rxhalffull",
+				   "txfifofull", "rxfifofull", "txfifoempty",
+				   "rxfifoempty", "txdataavlbl", "rxdataavlbl",
+				   "sdiointr", "progdone", "atacmdcompl",
+				   "sdiointrope", "ccstimeout", NULL, NULL,
+				   NULL, NULL, NULL };
+
+static void
+msmsdcc_print_status(struct msmsdcc_host *host, char *hdr, uint32_t status)
 {
-	WARN_ON(!host->clks_on);
+	int i;
 
-	BUG_ON(host->curr.mrq);
-
-	if (deferr) {
-		mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
-	} else {
-		del_timer_sync(&host->busclk_timer);
-		/* Need to check clks_on again in case the busclk
-		 * timer fired
-		 */
-		if (host->clks_on) {
-			clk_disable(host->clk);
-			clk_disable(host->pclk);
-			host->clks_on = 0;
-		}
+	pr_debug("%s-%s ", mmc_hostname(host->mmc), hdr);
+	for (i = 0; i < 32; i++) {
+		if (status & (1 << i))
+			pr_debug("%s ", irq_status_bits[i]);
 	}
+	pr_debug("\n");
 }
-
-static inline int
-msmsdcc_enable_clocks(struct msmsdcc_host *host)
-{
-	int rc;
-
-	del_timer_sync(&host->busclk_timer);
-
-	if (!host->clks_on) {
-		rc = clk_enable(host->pclk);
-		if (rc)
-			return rc;
-		rc = clk_enable(host->clk);
-		if (rc) {
-			clk_disable(host->pclk);
-			return rc;
-		}
-		udelay(1 + ((3 * USEC_PER_SEC) /
-		       (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
-		host->clks_on = 1;
-	}
-	return 0;
-}
-
-static inline unsigned int
-msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
-{
-	return readl(host->base + reg);
-}
-
-static inline void
-msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
-{
-	writel(data, host->base + reg);
-	/* 3 clk delay required! */
-	udelay(1 + ((3 * USEC_PER_SEC) /
-	       (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
-}
+#endif
 
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
 		      u32 c);
 
-static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+static int msmsdcc_sps_reset_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep);
+static int msmsdcc_sps_restore_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep);
+#else
+static inline int msmsdcc_sps_init_ep_conn(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep,
+				bool is_producer) { return 0; }
+static inline void msmsdcc_sps_exit_ep_conn(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep) { }
+static inline int msmsdcc_sps_reset_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep)
 {
-	u32	mci_clk = 0;
-	u32	mci_mask0 = 0;
-	int	ret = 0;
+	return 0;
+}
+static inline int msmsdcc_sps_restore_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep)
+{
+	return 0;
+}
+static inline int msmsdcc_sps_init(struct msmsdcc_host *host) { return 0; }
+static inline void msmsdcc_sps_exit(struct msmsdcc_host *host) {}
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
 
-	/* Save the controller state */
-	mci_clk = readl(host->base + MMCICLOCK);
-	mci_mask0 = readl(host->base + MMCIMASK0);
+/**
+ * Apply soft reset
+ *
+ * This function applies soft reset to SDCC core and
+ * BAM, DML core.
+ *
+ * This function should be called to recover from error
+ * conditions encountered with CMD/DATA tranfsers with card.
+ *
+ * Soft reset should only be used with SDCC controller v4.
+ *
+ * @host - Pointer to driver's host structure
+ *
+ */
+static void msmsdcc_soft_reset_and_restore(struct msmsdcc_host *host)
+{
+	int rc;
 
-	/* Reset the controller */
-	ret = clk_reset(host->clk, CLK_RESET_ASSERT);
-	if (ret)
-		pr_err("%s: Clock assert failed at %u Hz with err %d\n",
-				mmc_hostname(host->mmc), host->clk_rate, ret);
+	if (host->is_sps_mode) {
+		/* Reset DML first */
+		msmsdcc_dml_reset(host);
+		/* Now reset all BAM pipes connections */
+		rc = msmsdcc_sps_reset_ep(host, &host->sps.prod);
+		if (rc)
+			pr_err("%s:msmsdcc_sps_reset_ep() error=%d\n",
+					mmc_hostname(host->mmc), rc);
+		rc = msmsdcc_sps_reset_ep(host, &host->sps.cons);
+		if (rc)
+			pr_err("%s:msmsdcc_sps_reset_ep() error=%d\n",
+					mmc_hostname(host->mmc), rc);
+	}
+	/*
+	 * Reset SDCC controller's DPSM (data path state machine
+	 * and CPSM (command path state machine).
+	 */
+	mb();
+	writel_relaxed(0, host->base + MMCICOMMAND);
+	writel_relaxed(0, host->base + MMCIDATACTRL);
+	mb();
 
-	ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
-	if (ret)
-		pr_err("%s: Clock deassert failed at %u Hz with err %d\n",
-				mmc_hostname(host->mmc), host->clk_rate, ret);
-
-	pr_info("%s: Controller has been re-initialiazed\n",
+	pr_debug("%s: Applied soft reset to Controller\n",
 			mmc_hostname(host->mmc));
 
-	/* Restore the contoller state */
-	writel(host->pwr, host->base + MMCIPOWER);
-	writel(mci_clk, host->base + MMCICLOCK);
-	writel(mci_mask0, host->base + MMCIMASK0);
-	ret = clk_set_rate(host->clk, host->clk_rate);
-	if (ret)
-		pr_err("%s: Failed to set clk rate %u Hz (%d)\n",
-				mmc_hostname(host->mmc), host->clk_rate, ret);
+	if (host->is_sps_mode) {
+		/* Restore all BAM pipes connections */
+		rc = msmsdcc_sps_restore_ep(host, &host->sps.prod);
+		if (rc)
+			pr_err("%s:msmsdcc_sps_restore_ep() error=%d\n",
+					mmc_hostname(host->mmc), rc);
+		rc = msmsdcc_sps_restore_ep(host, &host->sps.cons);
+		if (rc)
+			pr_err("%s:msmsdcc_sps_restore_ep() error=%d\n",
+					mmc_hostname(host->mmc), rc);
+		msmsdcc_dml_init(host);
+	}
 }
 
-static void
+static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
+{
+	if (host->plat->sdcc_v4_sup) {
+		msmsdcc_soft_reset_and_restore(host);
+	} else {
+		/* Give Clock reset (hard reset) to controller */
+		u32	mci_clk = 0;
+		u32	mci_mask0 = 0;
+		int ret;
+
+		/* Save the controller state */
+		mci_clk = readl_relaxed(host->base + MMCICLOCK);
+		mci_mask0 = readl_relaxed(host->base + MMCIMASK0);
+
+		mb();
+		/* Reset the controller */
+		ret = clk_reset(host->clk, CLK_RESET_ASSERT);
+		if (ret)
+			pr_err("%s: Clock assert failed at %u Hz"
+				" with err %d\n", mmc_hostname(host->mmc),
+					host->clk_rate, ret);
+
+		ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
+		if (ret)
+			pr_err("%s: Clock deassert failed at %u Hz"
+				" with err %d\n", mmc_hostname(host->mmc),
+					host->clk_rate, ret);
+
+		pr_debug("%s: Controller has been reinitialized\n",
+				mmc_hostname(host->mmc));
+
+		mb();
+		/* Restore the contoller state */
+		writel_relaxed(host->pwr, host->base + MMCIPOWER);
+		writel_relaxed(mci_clk, host->base + MMCICLOCK);
+		writel_relaxed(mci_mask0, host->base + MMCIMASK0);
+		ret = clk_set_rate(host->clk, host->clk_rate);
+		if (ret)
+			pr_err("%s: Failed to set clk rate %u Hz. err %d\n",
+					mmc_hostname(host->mmc),
+					host->clk_rate, ret);
+		mb();
+	}
+}
+
+static int
 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
 {
+	int retval = 0;
+
 	BUG_ON(host->curr.data);
 
 	host->curr.mrq = NULL;
 	host->curr.cmd = NULL;
 
+	del_timer(&host->req_tout_timer);
+
 	if (mrq->data)
 		mrq->data->bytes_xfered = host->curr.data_xfered;
 	if (mrq->cmd->error == -ETIMEDOUT)
 		mdelay(5);
 
-#if BUSCLK_PWRSAVE
-	msmsdcc_disable_clocks(host, 1);
-#endif
 	/*
 	 * Need to drop the host lock here; mmc_request_done may call
 	 * back into the driver...
@@ -185,6 +289,8 @@
 	spin_unlock(&host->lock);
 	mmc_request_done(host->mmc, mrq);
 	spin_lock(&host->lock);
+
+	return retval;
 }
 
 static void
@@ -194,34 +300,46 @@
 	host->curr.got_dataend = 0;
 }
 
-uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
+static inline uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
 {
-	return host->memres->start + MMCIFIFO;
+	return host->core_memres->start + MMCIFIFO;
+}
+
+static inline unsigned int msmsdcc_get_min_sup_clk_rate(
+					struct msmsdcc_host *host);
+static inline void msmsdcc_delay(struct msmsdcc_host *host)
+{
+	mb();
+	udelay(1 + ((3 * USEC_PER_SEC) /
+		(host->clk_rate ? host->clk_rate :
+			msmsdcc_get_min_sup_clk_rate(host))));
 }
 
 static inline void
-msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
-       msmsdcc_writel(host, arg, MMCIARGUMENT);
-       msmsdcc_writel(host, c, MMCICOMMAND);
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c)
+{
+	writel_relaxed(arg, host->base + MMCIARGUMENT);
+	msmsdcc_delay(host);
+	writel_relaxed(c, host->base + MMCICOMMAND);
+	mb();
 }
 
 static void
 msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
 {
-	struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+	struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->user;
 
-	msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
-	msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
-		       MMCIDATALENGTH);
-	msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
-	msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+	writel_relaxed(host->cmd_timeout, host->base + MMCIDATATIMER);
+	writel_relaxed((unsigned int)host->curr.xfer_size,
+			host->base + MMCIDATALENGTH);
+	msmsdcc_delay(host);	/* Allow data parms to be applied */
+	writel_relaxed(host->cmd_datactrl, host->base + MMCIDATACTRL);
+	msmsdcc_delay(host);	/* Force delay prior to ADM or command */
 
 	if (host->cmd_cmd) {
 		msmsdcc_start_command_exec(host,
-					   (u32) host->cmd_cmd->arg,
-					   (u32) host->cmd_c);
+			(u32)host->cmd_cmd->arg, (u32)host->cmd_c);
 	}
-	host->dma.active = 1;
 }
 
 static void
@@ -230,15 +348,10 @@
 	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
 	unsigned long		flags;
 	struct mmc_request	*mrq;
-	struct msm_dmov_errdata err;
 
 	spin_lock_irqsave(&host->lock, flags);
-	host->dma.active = 0;
-
-	err = host->dma.err;
 	mrq = host->curr.mrq;
 	BUG_ON(!mrq);
-	WARN_ON(!mrq->data);
 
 	if (!(host->dma.result & DMOV_RSLT_VALID)) {
 		pr_err("msmsdcc: Invalid DataMover result\n");
@@ -247,6 +360,7 @@
 
 	if (host->dma.result & DMOV_RSLT_DONE) {
 		host->curr.data_xfered = host->curr.xfer_size;
+		host->curr.xfer_remain -= host->curr.xfer_size;
 	} else {
 		/* Error or flush  */
 		if (host->dma.result & DMOV_RSLT_ERROR)
@@ -255,11 +369,11 @@
 		if (host->dma.result & DMOV_RSLT_FLUSH)
 			pr_err("%s: DMA channel flushed (0x%.8x)\n",
 			       mmc_hostname(host->mmc), host->dma.result);
-
 		pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
-		       err.flush[0], err.flush[1], err.flush[2],
-		       err.flush[3], err.flush[4], err.flush[5]);
-
+		       host->dma.err.flush[0], host->dma.err.flush[1],
+		       host->dma.err.flush[2], host->dma.err.flush[3],
+		       host->dma.err.flush[4],
+		       host->dma.err.flush[5]);
 		msmsdcc_reset_and_restore(host);
 		if (!mrq->data->error)
 			mrq->data->error = -EIO;
@@ -267,6 +381,14 @@
 	dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
 		     host->dma.dir);
 
+	if (host->curr.user_pages) {
+		struct scatterlist *sg = host->dma.sg;
+		int i;
+
+		for (i = 0; i < host->dma.num_ents; i++, sg++)
+			flush_dcache_page(sg_page(sg));
+	}
+
 	host->dma.sg = NULL;
 	host->dma.busy = 0;
 
@@ -278,17 +400,17 @@
 		 */
 		msmsdcc_stop_data(host);
 
-		if (!mrq->data->error)
+		if (!mrq->data->error) {
 			host->curr.data_xfered = host->curr.xfer_size;
+			host->curr.xfer_remain -= host->curr.xfer_size;
+		}
 		if (!mrq->data->stop || mrq->cmd->error) {
 			host->curr.mrq = NULL;
 			host->curr.cmd = NULL;
 			mrq->data->bytes_xfered = host->curr.data_xfered;
-
+			del_timer(&host->req_tout_timer);
 			spin_unlock_irqrestore(&host->lock, flags);
-#if BUSCLK_PWRSAVE
-			msmsdcc_disable_clocks(host, 1);
-#endif
+
 			mmc_request_done(host->mmc, mrq);
 			return;
 		} else
@@ -300,6 +422,193 @@
 	return;
 }
 
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ * SPS driver invokes this callback in BAM irq context so
+ * SDCC driver schedule a tasklet for further processing
+ * this callback notification at later point of time in
+ * tasklet context and immediately returns control back
+ * to SPS driver.
+ *
+ * @nofity - Pointer to sps event notify sturcture
+ *
+ */
+static void
+msmsdcc_sps_complete_cb(struct sps_event_notify *notify)
+{
+	struct msmsdcc_host *host =
+		(struct msmsdcc_host *)
+		((struct sps_event_notify *)notify)->user;
+
+	host->sps.notify = *notify;
+	pr_debug("%s: %s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+		mmc_hostname(host->mmc), __func__, notify->event_id,
+		notify->data.transfer.iovec.addr,
+		notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+	/* Schedule a tasklet for completing data transfer */
+	tasklet_schedule(&host->sps.tlet);
+}
+
+/**
+ * Tasklet handler for processing SPS callback event
+ *
+ * This function processing SPS event notification and
+ * checks if the SPS transfer is completed or not and
+ * then accordingly notifies status to MMC core layer.
+ *
+ * This function is called in tasklet context.
+ *
+ * @data - Pointer to sdcc driver data
+ *
+ */
+static void msmsdcc_sps_complete_tlet(unsigned long data)
+{
+	unsigned long flags;
+	int i, rc;
+	u32 data_xfered = 0;
+	struct mmc_request *mrq;
+	struct sps_iovec iovec;
+	struct sps_pipe *sps_pipe_handle;
+	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
+	struct sps_event_notify *notify = &host->sps.notify;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->sps.dir == DMA_FROM_DEVICE)
+		sps_pipe_handle = host->sps.prod.pipe_handle;
+	else
+		sps_pipe_handle = host->sps.cons.pipe_handle;
+	mrq = host->curr.mrq;
+
+	if (!mrq) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	pr_debug("%s: %s: sps event_id=%d\n",
+		mmc_hostname(host->mmc), __func__,
+		notify->event_id);
+
+	if (msmsdcc_is_dml_busy(host)) {
+		/* oops !!! this should never happen. */
+		pr_err("%s: %s: Received SPS EOT event"
+			" but DML HW is still busy !!!\n",
+			mmc_hostname(host->mmc), __func__);
+	}
+	/*
+	 * Got End of transfer event!!! Check if all of the data
+	 * has been transferred?
+	 */
+	for (i = 0; i < host->sps.xfer_req_cnt; i++) {
+		rc = sps_get_iovec(sps_pipe_handle, &iovec);
+		if (rc) {
+			pr_err("%s: %s: sps_get_iovec() failed rc=%d, i=%d",
+				mmc_hostname(host->mmc), __func__, rc, i);
+			break;
+		}
+		data_xfered += iovec.size;
+	}
+
+	if (data_xfered == host->curr.xfer_size) {
+		host->curr.data_xfered = host->curr.xfer_size;
+		host->curr.xfer_remain -= host->curr.xfer_size;
+		pr_debug("%s: Data xfer success. data_xfered=0x%x",
+			mmc_hostname(host->mmc),
+			host->curr.xfer_size);
+	} else {
+		pr_err("%s: Data xfer failed. data_xfered=0x%x,"
+			" xfer_size=%d", mmc_hostname(host->mmc),
+			data_xfered, host->curr.xfer_size);
+		msmsdcc_reset_and_restore(host);
+		if (!mrq->data->error)
+			mrq->data->error = -EIO;
+	}
+
+	/* Unmap sg buffers */
+	dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents,
+			 host->sps.dir);
+
+	host->sps.sg = NULL;
+	host->sps.busy = 0;
+
+	if (host->curr.got_dataend || mrq->data->error) {
+		/*
+		 * If we've already gotten our DATAEND / DATABLKEND
+		 * for this request, then complete it through here.
+		 */
+		msmsdcc_stop_data(host);
+
+		if (!mrq->data->error) {
+			host->curr.data_xfered = host->curr.xfer_size;
+			host->curr.xfer_remain -= host->curr.xfer_size;
+		}
+		if (!mrq->data->stop || mrq->cmd->error) {
+			host->curr.mrq = NULL;
+			host->curr.cmd = NULL;
+			mrq->data->bytes_xfered = host->curr.data_xfered;
+			del_timer(&host->req_tout_timer);
+			spin_unlock_irqrestore(&host->lock, flags);
+
+			mmc_request_done(host->mmc, mrq);
+			return;
+		} else {
+			msmsdcc_start_command(host, mrq->data->stop, 0);
+		}
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/**
+ * Exit from current SPS data transfer
+ *
+ * This function exits from current SPS data transfer.
+ *
+ * This function should be called when error condition
+ * is encountered during data transfer.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ */
+static void msmsdcc_sps_exit_curr_xfer(struct msmsdcc_host *host)
+{
+	struct mmc_request *mrq;
+
+	mrq = host->curr.mrq;
+	BUG_ON(!mrq);
+
+	msmsdcc_reset_and_restore(host);
+	if (!mrq->data->error)
+		mrq->data->error = -EIO;
+
+	/* Unmap sg buffers */
+	dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents,
+			 host->sps.dir);
+
+	host->sps.sg = NULL;
+	host->sps.busy = 0;
+	if (host->curr.data)
+		msmsdcc_stop_data(host);
+
+	if (!mrq->data->stop || mrq->cmd->error)
+		msmsdcc_request_end(host, mrq);
+	else
+		msmsdcc_start_command(host, mrq->data->stop, 0);
+
+}
+#else
+static inline void msmsdcc_sps_complete_cb(struct sps_event_notify *notify) { }
+static inline void msmsdcc_sps_complete_tlet(unsigned long data) { }
+static inline void msmsdcc_sps_exit_curr_xfer(struct msmsdcc_host *host) { }
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
+
+static void msmsdcc_enable_cdr_cm_sdc4_dll(struct msmsdcc_host *host);
+
 static void
 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
 			  unsigned int result,
@@ -316,16 +625,13 @@
 	tasklet_schedule(&host->dma_tlet);
 }
 
-static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
+static int msmsdcc_check_dma_op_req(struct mmc_data *data)
 {
-	if (host->dma.channel == -1)
-		return -ENOENT;
-
-	if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
+	if (((data->blksz * data->blocks) < MCI_FIFOSIZE) ||
+	     ((data->blksz * data->blocks) % MCI_FIFOSIZE))
 		return -EINVAL;
-	if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
-		return -EINVAL;
-	return 0;
+	else
+		return 0;
 }
 
 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
@@ -335,34 +641,32 @@
 	uint32_t rows;
 	uint32_t crci;
 	unsigned int n;
-	int i, rc;
+	int i;
 	struct scatterlist *sg = data->sg;
 
-	rc = validate_dma(host, data);
-	if (rc)
-		return rc;
+	if (host->dma.channel == -1)
+		return -ENOENT;
 
 	host->dma.sg = data->sg;
 	host->dma.num_ents = data->sg_len;
 
-       BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+	BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
 
 	nc = host->dma.nc;
 
-	switch (host->pdev_id) {
-	case 1:
-		crci = MSMSDCC_CRCI_SDC1;
-		break;
-	case 2:
-		crci = MSMSDCC_CRCI_SDC2;
-		break;
-	case 3:
-		crci = MSMSDCC_CRCI_SDC3;
-		break;
-	case 4:
-		crci = MSMSDCC_CRCI_SDC4;
-		break;
-	default:
+	if (host->pdev_id == 1)
+		crci = DMOV_SDC1_CRCI;
+	else if (host->pdev_id == 2)
+		crci = DMOV_SDC2_CRCI;
+	else if (host->pdev_id == 3)
+		crci = DMOV_SDC3_CRCI;
+	else if (host->pdev_id == 4)
+		crci = DMOV_SDC4_CRCI;
+#ifdef DMOV_SDC5_CRCI
+	else if (host->pdev_id == 5)
+		crci = DMOV_SDC5_CRCI;
+#endif
+	else {
 		host->dma.sg = NULL;
 		host->dma.num_ents = 0;
 		return -ENOENT;
@@ -373,33 +677,18 @@
 	else
 		host->dma.dir = DMA_TO_DEVICE;
 
+	/* host->curr.user_pages = (data->flags & MMC_DATA_USERPAGE); */
 	host->curr.user_pages = 0;
-
 	box = &nc->cmd[0];
-
-	/* location of command block must be 64 bit aligned */
-	BUG_ON(host->dma.cmd_busaddr & 0x07);
-
-	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
-	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
-			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
-	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
-	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-			host->dma.num_ents, host->dma.dir);
-	if (n == 0) {
-		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
-			mmc_hostname(host->mmc));
-		host->dma.sg = NULL;
-		host->dma.num_ents = 0;
-		return -ENOMEM;
-	}
-
-	for_each_sg(host->dma.sg, sg, n, i) {
-
+	for (i = 0; i < host->dma.num_ents; i++) {
 		box->cmd = CMD_MODE_BOX;
 
-		if (i == n - 1)
+		/* Initialize sg dma address */
+		sg->dma_address = pfn_to_dma(mmc_dev(host->mmc),
+					      page_to_pfn(sg_page(sg)))
+					      + sg->offset;
+
+		if (i == (host->dma.num_ents - 1))
 			box->cmd |= CMD_LC;
 		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
 			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -427,25 +716,140 @@
 			box->cmd |= CMD_DST_CRCI(crci);
 		}
 		box++;
+		sg++;
+	}
+
+	/* location of command block must be 64 bit aligned */
+	BUG_ON(host->dma.cmd_busaddr & 0x07);
+
+	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+	host->dma.hdr.crci_mask = msm_dmov_build_crci_mask(1, crci);
+
+	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+			host->dma.num_ents, host->dma.dir);
+	/* dsb inside dma_map_sg will write nc out to mem as well */
+
+	if (n != host->dma.num_ents) {
+		pr_err("%s: Unable to map in all sg elements\n",
+		       mmc_hostname(host->mmc));
+		host->dma.sg = NULL;
+		host->dma.num_ents = 0;
+		return -ENOMEM;
 	}
 
 	return 0;
 }
 
-static int
-snoop_cccr_abort(struct mmc_command *cmd)
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+/**
+ * Submits data transfer request to SPS driver
+ *
+ * This function make sg (scatter gather) data buffers
+ * DMA ready and then submits them to SPS driver for
+ * transfer.
+ *
+ * @host - Pointer to sdcc host structure
+ * @data - Pointer to mmc_data structure
+ *
+ * @return 0 if success else negative value
+ */
+static int msmsdcc_sps_start_xfer(struct msmsdcc_host *host,
+				struct mmc_data *data)
 {
-	if ((cmd->opcode == 52) &&
-	    (cmd->arg & 0x80000000) &&
-	    (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
-		return 1;
-	return 0;
+	int rc = 0;
+	u32 flags;
+	int i;
+	u32 addr, len, data_cnt;
+	struct scatterlist *sg = data->sg;
+	struct sps_pipe *sps_pipe_handle;
+
+	BUG_ON(data->sg_len > NR_SG); /* Prevent memory corruption */
+
+	host->sps.sg = data->sg;
+	host->sps.num_ents = data->sg_len;
+	host->sps.xfer_req_cnt = 0;
+	if (data->flags & MMC_DATA_READ) {
+		host->sps.dir = DMA_FROM_DEVICE;
+		sps_pipe_handle = host->sps.prod.pipe_handle;
+	} else {
+		host->sps.dir = DMA_TO_DEVICE;
+		sps_pipe_handle = host->sps.cons.pipe_handle;
+	}
+
+	/* Make sg buffers DMA ready */
+	rc = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+			host->sps.dir);
+
+	if (rc != data->sg_len) {
+		pr_err("%s: Unable to map in all sg elements, rc=%d\n",
+		       mmc_hostname(host->mmc), rc);
+		host->sps.sg = NULL;
+		host->sps.num_ents = 0;
+		rc = -ENOMEM;
+		goto dma_map_err;
+	}
+
+	pr_debug("%s: %s: %s: pipe=0x%x, total_xfer=0x%x, sg_len=%d\n",
+		mmc_hostname(host->mmc), __func__,
+		host->sps.dir == DMA_FROM_DEVICE ? "READ" : "WRITE",
+		(u32)sps_pipe_handle, host->curr.xfer_size, data->sg_len);
+
+	for (i = 0; i < data->sg_len; i++) {
+		/*
+		 * Check if this is the last buffer to transfer?
+		 * If yes then set the INT and EOT flags.
+		 */
+		len = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		flags = 0;
+		while (len > 0) {
+			if (len > SPS_MAX_DESC_SIZE) {
+				data_cnt = SPS_MAX_DESC_SIZE;
+			} else {
+				data_cnt = len;
+				if (i == data->sg_len - 1)
+					flags = SPS_IOVEC_FLAG_INT |
+						SPS_IOVEC_FLAG_EOT;
+			}
+			rc = sps_transfer_one(sps_pipe_handle, addr,
+						data_cnt, host, flags);
+			if (rc) {
+				pr_err("%s: sps_transfer_one() error! rc=%d,"
+					" pipe=0x%x, sg=0x%x, sg_buf_no=%d\n",
+					mmc_hostname(host->mmc), rc,
+					(u32)sps_pipe_handle, (u32)sg, i);
+				goto dma_map_err;
+			}
+			addr += data_cnt;
+			len -= data_cnt;
+			host->sps.xfer_req_cnt++;
+		}
+		sg++;
+	}
+	goto out;
+
+dma_map_err:
+	/* unmap sg buffers */
+	dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents,
+			host->sps.dir);
+out:
+	return rc;
 }
+#else
+static int msmsdcc_sps_start_xfer(struct msmsdcc_host *host,
+				struct mmc_data *data) { return 0; }
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
 
 static void
 msmsdcc_start_command_deferred(struct msmsdcc_host *host,
 				struct mmc_command *cmd, u32 *c)
 {
+	DBG(host, "op %02x arg %08x flags %08x\n",
+	    cmd->opcode, cmd->arg, cmd->flags);
+
 	*c |= (cmd->opcode | MCI_CPSM_ENABLE);
 
 	if (cmd->flags & MMC_RSP_PRESENT) {
@@ -462,22 +866,33 @@
 	      (cmd->opcode == 53))
 		*c |= MCI_CSPM_DATCMD;
 
+	/* Check if AUTO CMD19 is required or not? */
+	if (((cmd->opcode == 17) || (cmd->opcode == 18)) &&
+		host->tuning_needed) {
+		msmsdcc_enable_cdr_cm_sdc4_dll(host);
+		*c |= MCI_CSPM_AUTO_CMD19;
+	}
+
 	if (host->prog_scan && (cmd->opcode == 12)) {
 		*c |= MCI_CPSM_PROGENA;
-		host->prog_enable = true;
+		host->prog_enable = 1;
 	}
 
 	if (cmd == cmd->mrq->stop)
 		*c |= MCI_CSPM_MCIABORT;
 
-	if (snoop_cccr_abort(cmd))
-		*c |= MCI_CSPM_MCIABORT;
-
 	if (host->curr.cmd != NULL) {
-		printk(KERN_ERR "%s: Overlapping command requests\n",
-			mmc_hostname(host->mmc));
+		pr_err("%s: Overlapping command requests\n",
+		       mmc_hostname(host->mmc));
 	}
 	host->curr.cmd = cmd;
+
+	/*
+	 * Kick the software command timeout timer here.
+	 * Timer expires in 10 secs.
+	 */
+	mod_timer(&host->req_tout_timer,
+			(jiffies + msecs_to_jiffies(MSM_MMC_REQ_TIMEOUT)));
 }
 
 static void
@@ -486,6 +901,7 @@
 {
 	unsigned int datactrl, timeout;
 	unsigned long long clks;
+	void __iomem *base = host->base;
 	unsigned int pio_irqmask = 0;
 
 	host->curr.data = data;
@@ -498,9 +914,35 @@
 
 	datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
 
-	if (!msmsdcc_config_dma(host, data))
-		datactrl |= MCI_DPSM_DMAENABLE;
-	else {
+	if (!msmsdcc_check_dma_op_req(data)) {
+		if (host->is_dma_mode && !msmsdcc_config_dma(host, data)) {
+			datactrl |= MCI_DPSM_DMAENABLE;
+		} else if (host->is_sps_mode) {
+			if (!msmsdcc_is_dml_busy(host)) {
+				if (!msmsdcc_sps_start_xfer(host, data)) {
+					/* Now kick start DML transfer */
+					mb();
+					msmsdcc_dml_start_xfer(host, data);
+					datactrl |= MCI_DPSM_DMAENABLE;
+					host->sps.busy = 1;
+				}
+			} else {
+				/*
+				 * Can't proceed with new transfer as
+				 * previous trasnfer is already in progress.
+				 * There is no point of going into PIO mode
+				 * as well. Is this a time to do kernel panic?
+				 */
+				pr_err("%s: %s: DML HW is busy!!!"
+					" Can't perform new SPS transfers"
+					" now\n", mmc_hostname(host->mmc),
+					__func__);
+			}
+		}
+	}
+
+	/* Is data transfer in PIO mode required? */
+	if (!(datactrl & MCI_DPSM_DMAENABLE)) {
 		host->pio.sg = data->sg;
 		host->pio.sg_len = data->sg_len;
 		host->pio.sg_off = 0;
@@ -510,43 +952,56 @@
 			if (host->curr.xfer_remain < MCI_FIFOSIZE)
 				pio_irqmask |= MCI_RXDATAAVLBLMASK;
 		} else
-			pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
+			pio_irqmask = MCI_TXFIFOHALFEMPTYMASK |
+					MCI_TXFIFOEMPTYMASK;
 	}
 
 	if (data->flags & MMC_DATA_READ)
 		datactrl |= MCI_DPSM_DIRECTION;
 
 	clks = (unsigned long long)data->timeout_ns * host->clk_rate;
-	do_div(clks, NSEC_PER_SEC);
+	do_div(clks, 1000000000UL);
 	timeout = data->timeout_clks + (unsigned int)clks*2 ;
 
-	if (datactrl & MCI_DPSM_DMAENABLE) {
-		/* Save parameters for the exec function */
+	if (host->is_dma_mode && (datactrl & MCI_DPSM_DMAENABLE)) {
+		/* Use ADM (Application Data Mover) HW for Data transfer */
+		/* Save parameters for the dma exec function */
 		host->cmd_timeout = timeout;
 		host->cmd_pio_irqmask = pio_irqmask;
 		host->cmd_datactrl = datactrl;
 		host->cmd_cmd = cmd;
 
-		host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
-		host->dma.hdr.data = (void *)host;
+		host->dma.hdr.exec_func = msmsdcc_dma_exec_func;
+		host->dma.hdr.user = (void *)host;
 		host->dma.busy = 1;
+		if (data->flags & MMC_DATA_WRITE)
+			host->prog_scan = 1;
 
 		if (cmd) {
 			msmsdcc_start_command_deferred(host, cmd, &c);
 			host->cmd_c = c;
 		}
-		msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
-		if (data->flags & MMC_DATA_WRITE)
-			host->prog_scan = true;
+		writel_relaxed((readl_relaxed(host->base + MMCIMASK0) &
+				(~(MCI_IRQ_PIO))) | host->cmd_pio_irqmask,
+				host->base + MMCIMASK0);
+		mb();
+		msm_dmov_enqueue_cmd_ext(host->dma.channel, &host->dma.hdr);
 	} else {
-		msmsdcc_writel(host, timeout, MMCIDATATIMER);
+		/* SPS-BAM mode or PIO mode */
+		if (data->flags & MMC_DATA_WRITE)
+			host->prog_scan = 1;
+		writel_relaxed(timeout, base + MMCIDATATIMER);
 
-		msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+		writel_relaxed(host->curr.xfer_size, base + MMCIDATALENGTH);
 
-		msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
-		msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+		writel_relaxed((readl_relaxed(host->base + MMCIMASK0) &
+				(~(MCI_IRQ_PIO))) | pio_irqmask,
+				host->base + MMCIMASK0);
+		msmsdcc_delay(host);	/* Allow parms to be applied */
+		writel_relaxed(datactrl, base + MMCIDATACTRL);
 
 		if (cmd) {
+			msmsdcc_delay(host); /* Delay between data/command */
 			/* Daisy-chain the command if requested */
 			msmsdcc_start_command(host, cmd, c);
 		}
@@ -556,11 +1011,6 @@
 static void
 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
 {
-	if (cmd == cmd->mrq->stop)
-		c |= MCI_CSPM_MCIABORT;
-
-	host->stats.cmds++;
-
 	msmsdcc_start_command_deferred(host, cmd, &c);
 	msmsdcc_start_command_exec(host, cmd->arg, c);
 }
@@ -570,15 +1020,28 @@
 		 unsigned int status)
 {
 	if (status & MCI_DATACRCFAIL) {
-		pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
-		pr_err("%s: opcode 0x%.8x\n", __func__,
-		       data->mrq->cmd->opcode);
-		pr_err("%s: blksz %d, blocks %d\n", __func__,
-		       data->blksz, data->blocks);
-		data->error = -EILSEQ;
+		if (!(data->mrq->cmd->opcode == MMC_BUS_TEST_W
+			|| data->mrq->cmd->opcode == MMC_BUS_TEST_R)) {
+			pr_err("%s: Data CRC error\n",
+			       mmc_hostname(host->mmc));
+			pr_err("%s: opcode 0x%.8x\n", __func__,
+			       data->mrq->cmd->opcode);
+			pr_err("%s: blksz %d, blocks %d\n", __func__,
+			       data->blksz, data->blocks);
+			data->error = -EILSEQ;
+		}
 	} else if (status & MCI_DATATIMEOUT) {
-		pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
-		data->error = -ETIMEDOUT;
+		/* CRC is optional for the bus test commands, not all
+		 * cards respond back with CRC. However controller
+		 * waits for the CRC and times out. Hence ignore the
+		 * data timeouts during the Bustest.
+		 */
+		if (!(data->mrq->cmd->opcode == MMC_BUS_TEST_W
+			|| data->mrq->cmd->opcode == MMC_BUS_TEST_R)) {
+			pr_err("%s: Data timeout\n",
+				 mmc_hostname(host->mmc));
+			data->error = -ETIMEDOUT;
+		}
 	} else if (status & MCI_RXOVERRUN) {
 		pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
 		data->error = -EIO;
@@ -587,23 +1050,28 @@
 		data->error = -EIO;
 	} else {
 		pr_err("%s: Unknown error (0x%.8x)\n",
-		       mmc_hostname(host->mmc), status);
+		      mmc_hostname(host->mmc), status);
 		data->error = -EIO;
 	}
-}
 
+	/* Dummy CMD52 is not needed when CMD53 has errors */
+	if (host->plat->dummy52_required && host->dummy_52_needed)
+		host->dummy_52_needed = 0;
+}
 
 static int
 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
 {
+	void __iomem	*base = host->base;
 	uint32_t	*ptr = (uint32_t *) buffer;
 	int		count = 0;
 
 	if (remain % 4)
 		remain = ((remain >> 2) + 1) << 2;
 
-	while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
-		*ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
+	while (readl_relaxed(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
+
+		*ptr = readl_relaxed(base + MMCIFIFO + (count % MCI_FIFOSIZE));
 		ptr++;
 		count += sizeof(uint32_t);
 
@@ -616,16 +1084,16 @@
 
 static int
 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
-		  unsigned int remain, u32 status)
+		  unsigned int remain)
 {
 	void __iomem *base = host->base;
 	char *ptr = buffer;
+	unsigned int maxcnt = MCI_FIFOHALFSIZE;
 
-	do {
-		unsigned int count, maxcnt, sz;
+	while (readl_relaxed(base + MMCISTATUS) &
+		(MCI_TXFIFOEMPTY | MCI_TXFIFOHALFEMPTY)) {
+		unsigned int count, sz;
 
-		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
-						    MCI_FIFOHALFSIZE;
 		count = min(remain, maxcnt);
 
 		sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
@@ -635,49 +1103,38 @@
 
 		if (remain == 0)
 			break;
-
-		status = msmsdcc_readl(host, MMCISTATUS);
-	} while (status & MCI_TXFIFOHALFEMPTY);
+	}
+	mb();
 
 	return ptr - buffer;
 }
 
-static int
-msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
-{
-	while (maxspin) {
-		if ((msmsdcc_readl(host, MMCISTATUS) & mask))
-			return 0;
-		udelay(1);
-		--maxspin;
-	}
-	return -ETIMEDOUT;
-}
-
 static irqreturn_t
 msmsdcc_pio_irq(int irq, void *dev_id)
 {
 	struct msmsdcc_host	*host = dev_id;
+	void __iomem		*base = host->base;
 	uint32_t		status;
 
-	status = msmsdcc_readl(host, MMCISTATUS);
+	status = readl_relaxed(base + MMCISTATUS);
+	if (((readl_relaxed(host->base + MMCIMASK0) & status) &
+				(MCI_IRQ_PIO)) == 0)
+		return IRQ_NONE;
+
+#if IRQ_DEBUG
+	msmsdcc_print_status(host, "irq1-r", status);
+#endif
+
+	spin_lock(&host->lock);
 
 	do {
 		unsigned long flags;
 		unsigned int remain, len;
 		char *buffer;
 
-		if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
-			if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
-				break;
-
-			if (msmsdcc_spin_on_status(host,
-						   (MCI_TXFIFOHALFEMPTY |
-						   MCI_RXDATAAVLBL),
-						   PIO_SPINMAX)) {
-				break;
-			}
-		}
+		if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_TXFIFOEMPTY
+				| MCI_RXDATAAVLBL)))
+			break;
 
 		/* Map the current scatter buffer */
 		local_irq_save(flags);
@@ -685,11 +1142,12 @@
 				     KM_BIO_SRC_IRQ) + host->pio.sg->offset;
 		buffer += host->pio.sg_off;
 		remain = host->pio.sg->length - host->pio.sg_off;
+
 		len = 0;
 		if (status & MCI_RXACTIVE)
 			len = msmsdcc_pio_read(host, buffer, remain);
 		if (status & MCI_TXACTIVE)
-			len = msmsdcc_pio_write(host, buffer, remain, status);
+			len = msmsdcc_pio_write(host, buffer, remain);
 
 		/* Unmap the buffer */
 		kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
@@ -700,55 +1158,114 @@
 		host->curr.data_xfered += len;
 		remain -= len;
 
-		if (remain == 0) {
-			/* This sg page is full - do some housekeeping */
-			if (status & MCI_RXACTIVE && host->curr.user_pages)
-				flush_dcache_page(sg_page(host->pio.sg));
+		if (remain) /* Done with this page? */
+			break; /* Nope */
 
-			if (!--host->pio.sg_len) {
-				memset(&host->pio, 0, sizeof(host->pio));
-				break;
-			}
+		if (status & MCI_RXACTIVE && host->curr.user_pages)
+			flush_dcache_page(sg_page(host->pio.sg));
 
-			/* Advance to next sg */
-			host->pio.sg++;
-			host->pio.sg_off = 0;
+		if (!--host->pio.sg_len) {
+			memset(&host->pio, 0, sizeof(host->pio));
+			break;
 		}
 
-		status = msmsdcc_readl(host, MMCISTATUS);
+		/* Advance to next sg */
+		host->pio.sg++;
+		host->pio.sg_off = 0;
+
+		status = readl_relaxed(base + MMCISTATUS);
 	} while (1);
 
-	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
-		msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
+	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) {
+		writel_relaxed((readl_relaxed(host->base + MMCIMASK0) &
+				(~(MCI_IRQ_PIO))) | MCI_RXDATAAVLBLMASK,
+				host->base + MMCIMASK0);
+		if (!host->curr.xfer_remain) {
+			/* Delay needed (same port was just written) */
+			msmsdcc_delay(host);
+			writel_relaxed((readl_relaxed(host->base + MMCIMASK0) &
+				(~(MCI_IRQ_PIO))) | 0, host->base + MMCIMASK0);
+		}
+		mb();
+	} else if (!host->curr.xfer_remain) {
+		writel_relaxed((readl_relaxed(host->base + MMCIMASK0) &
+				(~(MCI_IRQ_PIO))) | 0, host->base + MMCIMASK0);
+		mb();
+	}
 
-	if (!host->curr.xfer_remain)
-		msmsdcc_writel(host, 0, MMCIMASK1);
+	spin_unlock(&host->lock);
 
 	return IRQ_HANDLED;
 }
 
+static void
+msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq);
+
+static void msmsdcc_wait_for_rxdata(struct msmsdcc_host *host,
+					struct mmc_data *data)
+{
+	u32 loop_cnt = 0;
+
+	/*
+	 * For read commands with data less than fifo size, it is possible to
+	 * get DATAEND first and RXDATA_AVAIL might be set later because of
+	 * synchronization delay through the asynchronous RX FIFO. Thus, for
+	 * such cases, even after DATAEND interrupt is received software
+	 * should poll for RXDATA_AVAIL until the requested data is read out
+	 * of FIFO. This change is needed to get around this abnormal but
+	 * sometimes expected behavior of SDCC3 controller.
+	 *
+	 * We can expect RXDATAAVAIL bit to be set after 6HCLK clock cycles
+	 * after the data is loaded into RX FIFO. This would amount to less
+	 * than a microsecond and thus looping for 1000 times is good enough
+	 * for that delay.
+	 */
+	while (((int)host->curr.xfer_remain > 0) && (++loop_cnt < 1000)) {
+		if (readl_relaxed(host->base + MMCISTATUS) & MCI_RXDATAAVLBL) {
+			spin_unlock(&host->lock);
+			msmsdcc_pio_irq(1, host);
+			spin_lock(&host->lock);
+		}
+	}
+	if (loop_cnt == 1000) {
+		pr_info("%s: Timed out while polling for Rx Data\n",
+				mmc_hostname(host->mmc));
+		data->error = -ETIMEDOUT;
+		msmsdcc_reset_and_restore(host);
+	}
+}
+
 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
 {
 	struct mmc_command *cmd = host->curr.cmd;
 
 	host->curr.cmd = NULL;
-	cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
-	cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
-	cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
-	cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
+	cmd->resp[0] = readl_relaxed(host->base + MMCIRESPONSE0);
+	cmd->resp[1] = readl_relaxed(host->base + MMCIRESPONSE1);
+	cmd->resp[2] = readl_relaxed(host->base + MMCIRESPONSE2);
+	cmd->resp[3] = readl_relaxed(host->base + MMCIRESPONSE3);
 
-	if (status & MCI_CMDTIMEOUT) {
+	if (status & (MCI_CMDTIMEOUT | MCI_AUTOCMD19TIMEOUT)) {
+#if VERBOSE_COMMAND_TIMEOUTS
+		pr_err("%s: Command timeout\n", mmc_hostname(host->mmc));
+#endif
 		cmd->error = -ETIMEDOUT;
-	} else if (status & MCI_CMDCRCFAIL &&
-		   cmd->flags & MMC_RSP_CRC) {
+	} else if ((status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) &&
+			!host->cmd19_tuning_in_progress) {
 		pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
 		cmd->error = -EILSEQ;
 	}
 
 	if (!cmd->data || cmd->error) {
-		if (host->curr.data && host->dma.sg)
+		if (host->curr.data && host->dma.sg &&
+			host->is_dma_mode)
 			msm_dmov_stop_cmd(host->dma.channel,
 					  &host->dma.hdr, 0);
+		else if (host->curr.data && host->sps.sg &&
+			host->is_sps_mode){
+			/* Stop current SPS transfer */
+			msmsdcc_sps_exit_curr_xfer(host);
+		}
 		else if (host->curr.data) { /* Non DMA */
 			msmsdcc_reset_and_restore(host);
 			msmsdcc_stop_data(host);
@@ -756,87 +1273,28 @@
 		} else { /* host->data == NULL */
 			if (!cmd->error && host->prog_enable) {
 				if (status & MCI_PROGDONE) {
-					host->prog_scan = false;
-					host->prog_enable = false;
-					msmsdcc_request_end(host, cmd->mrq);
-				} else {
+					host->prog_scan = 0;
+					host->prog_enable = 0;
+					 msmsdcc_request_end(host, cmd->mrq);
+				} else
 					host->curr.cmd = cmd;
-				}
 			} else {
 				if (host->prog_enable) {
-					host->prog_scan = false;
-					host->prog_enable = false;
+					host->prog_scan = 0;
+					host->prog_enable = 0;
+				}
+				if (cmd->data && cmd->error) {
+					msmsdcc_reset_and_restore(host);
+					if (host->plat->dummy52_required &&
+							host->dummy_52_needed)
+						host->dummy_52_needed = 0;
 				}
 				msmsdcc_request_end(host, cmd->mrq);
 			}
 		}
-	} else if (cmd->data)
+	} else if (cmd->data) {
 		if (!(cmd->data->flags & MMC_DATA_READ))
-			msmsdcc_start_data(host, cmd->data,
-						NULL, 0);
-}
-
-static void
-msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
-			void __iomem *base)
-{
-	struct mmc_data *data = host->curr.data;
-
-	if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
-			MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
-		msmsdcc_do_cmdirq(host, status);
-	}
-
-	if (!data)
-		return;
-
-	/* Check for data errors */
-	if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
-		      MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
-		msmsdcc_data_err(host, data, status);
-		host->curr.data_xfered = 0;
-		if (host->dma.sg)
-			msm_dmov_stop_cmd(host->dma.channel,
-					  &host->dma.hdr, 0);
-		else {
-			msmsdcc_reset_and_restore(host);
-			if (host->curr.data)
-				msmsdcc_stop_data(host);
-			if (!data->stop)
-				msmsdcc_request_end(host, data->mrq);
-			else
-				msmsdcc_start_command(host, data->stop, 0);
-		}
-	}
-
-	/* Check for data done */
-	if (!host->curr.got_dataend && (status & MCI_DATAEND))
-		host->curr.got_dataend = 1;
-
-	/*
-	 * If DMA is still in progress, we complete via the completion handler
-	 */
-	if (host->curr.got_dataend && !host->dma.busy) {
-		/*
-		 * There appears to be an issue in the controller where
-		 * if you request a small block transfer (< fifo size),
-		 * you may get your DATAEND/DATABLKEND irq without the
-		 * PIO data irq.
-		 *
-		 * Check to see if there is still data to be read,
-		 * and simulate a PIO irq.
-		 */
-		if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
-			msmsdcc_pio_irq(1, host);
-
-		msmsdcc_stop_data(host);
-		if (!data->error)
-			host->curr.data_xfered = host->curr.xfer_size;
-
-		if (!data->stop)
-			msmsdcc_request_end(host, data->mrq);
-		else
-			msmsdcc_start_command(host, data->stop, 0);
+			msmsdcc_start_data(host, cmd->data, NULL, 0);
 	}
 }
 
@@ -844,57 +1302,197 @@
 msmsdcc_irq(int irq, void *dev_id)
 {
 	struct msmsdcc_host	*host = dev_id;
-	void __iomem		*base = host->base;
 	u32			status;
 	int			ret = 0;
-	int			cardint = 0;
+	int			timer = 0;
 
 	spin_lock(&host->lock);
 
 	do {
-		status = msmsdcc_readl(host, MMCISTATUS);
-		status &= msmsdcc_readl(host, MMCIMASK0);
-		msmsdcc_writel(host, status, MMCICLEAR);
+		struct mmc_command *cmd;
+		struct mmc_data *data;
 
-		if (status & MCI_SDIOINTR)
-			status &= ~MCI_SDIOINTR;
+		if (timer) {
+			timer = 0;
+			msmsdcc_delay(host);
+		}
 
-		if (!status)
+		if (!host->clks_on) {
+			pr_debug("%s: %s: SDIO async irq received\n",
+					mmc_hostname(host->mmc), __func__);
+			host->mmc->ios.clock = host->clk_rate;
+			spin_unlock(&host->lock);
+			host->mmc->ops->set_ios(host->mmc, &host->mmc->ios);
+			spin_lock(&host->lock);
+			if (host->plat->cfg_mpm_sdiowakeup &&
+				(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
+				wake_lock(&host->sdio_wlock);
+			/* only ansyc interrupt can come when clocks are off */
+			writel_relaxed(MCI_SDIOINTMASK, host->base + MMCICLEAR);
+		}
+
+		status = readl_relaxed(host->base + MMCISTATUS);
+
+		if (((readl_relaxed(host->base + MMCIMASK0) & status) &
+						(~(MCI_IRQ_PIO))) == 0)
 			break;
 
-		msmsdcc_handle_irq_data(host, status, base);
-
-		if (status & MCI_SDIOINTOPER) {
-			cardint = 1;
-			status &= ~MCI_SDIOINTOPER;
+#if IRQ_DEBUG
+		msmsdcc_print_status(host, "irq0-r", status);
+#endif
+		status &= readl_relaxed(host->base + MMCIMASK0);
+		writel_relaxed(status, host->base + MMCICLEAR);
+		mb();
+#if IRQ_DEBUG
+		msmsdcc_print_status(host, "irq0-p", status);
+#endif
+#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT
+		if (status & MCI_SDIOINTROPE) {
+			if (host->sdcc_suspending)
+				wake_lock(&host->sdio_suspend_wlock);
+			mmc_signal_sdio_irq(host->mmc);
 		}
+#endif
+		if ((host->plat->dummy52_required) &&
+		    (host->dummy_52_state == DUMMY_52_STATE_SENT)) {
+			if (status & (MCI_PROGDONE | MCI_CMDCRCFAIL |
+					  MCI_CMDTIMEOUT)) {
+				if (status & MCI_CMDTIMEOUT)
+					pr_debug("%s: dummy CMD52 timeout\n",
+						mmc_hostname(host->mmc));
+				if (status & MCI_CMDCRCFAIL)
+					pr_debug("%s: dummy CMD52 CRC failed\n",
+						mmc_hostname(host->mmc));
+				host->dummy_52_state = DUMMY_52_STATE_NONE;
+				host->curr.cmd = NULL;
+				msmsdcc_request_start(host, host->curr.mrq);
+				spin_unlock(&host->lock);
+				return IRQ_HANDLED;
+			}
+			break;
+		}
+
+		data = host->curr.data;
+
+		/*
+		 * Check for proper command response
+		 */
+		cmd = host->curr.cmd;
+		if ((status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+			MCI_CMDTIMEOUT | MCI_PROGDONE |
+			MCI_AUTOCMD19TIMEOUT)) && host->curr.cmd) {
+			msmsdcc_do_cmdirq(host, status);
+		}
+
+		if (data) {
+			/* Check for data errors */
+			if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|
+				      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+				msmsdcc_data_err(host, data, status);
+				host->curr.data_xfered = 0;
+				if (host->dma.sg && host->is_dma_mode)
+					msm_dmov_stop_cmd(host->dma.channel,
+							  &host->dma.hdr, 0);
+				else if (host->sps.sg && host->is_sps_mode) {
+					/* Stop current SPS transfer */
+					msmsdcc_sps_exit_curr_xfer(host);
+				}
+				else {
+					msmsdcc_reset_and_restore(host);
+					if (host->curr.data)
+						msmsdcc_stop_data(host);
+					if (!data->stop)
+						timer |=
+						 msmsdcc_request_end(host,
+								    data->mrq);
+					else {
+						msmsdcc_start_command(host,
+								     data->stop,
+								     0);
+						timer = 1;
+					}
+				}
+			}
+
+			/* Check for data done */
+			if (!host->curr.got_dataend && (status & MCI_DATAEND))
+				host->curr.got_dataend = 1;
+
+			if (host->curr.got_dataend) {
+				/*
+				 * If DMA is still in progress, we complete
+				 * via the completion handler
+				 */
+				if (!host->dma.busy && !host->sps.busy) {
+					/*
+					 * There appears to be an issue in the
+					 * controller where if you request a
+					 * small block transfer (< fifo size),
+					 * you may get your DATAEND/DATABLKEND
+					 * irq without the PIO data irq.
+					 *
+					 * Check to see if theres still data
+					 * to be read, and simulate a PIO irq.
+					 */
+					if (data->flags & MMC_DATA_READ)
+						msmsdcc_wait_for_rxdata(host,
+								data);
+					msmsdcc_stop_data(host);
+					if (!data->error) {
+						host->curr.data_xfered =
+							host->curr.xfer_size;
+						host->curr.xfer_remain -=
+							host->curr.xfer_size;
+					}
+
+					if (!data->stop)
+						timer |= msmsdcc_request_end(
+							  host, data->mrq);
+					else {
+						msmsdcc_start_command(host,
+							      data->stop, 0);
+						timer = 1;
+					}
+				}
+			}
+		}
+
 		ret = 1;
 	} while (status);
 
 	spin_unlock(&host->lock);
 
-	/*
-	 * We have to delay handling the card interrupt as it calls
-	 * back into the driver.
-	 */
-	if (cardint)
-		mmc_signal_sdio_irq(host->mmc);
-
 	return IRQ_RETVAL(ret);
 }
 
 static void
+msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq)
+{
+	if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
+		/* Queue/read data, daisy-chain command when data starts */
+		msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+	} else {
+		msmsdcc_start_command(host, mrq->cmd, 0);
+	}
+}
+
+static void
 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
-	unsigned long flags;
+	unsigned long		flags;
 
-	WARN_ON(host->curr.mrq != NULL);
-	WARN_ON(host->pwr == 0);
+	/*
+	 * Get the SDIO AL client out of LPM.
+	 */
+	if (host->plat->is_sdio_al_client)
+		msmsdcc_sdio_al_lpm(mmc, false);
 
 	spin_lock_irqsave(&host->lock, flags);
-
-	host->stats.reqs++;
+	WARN(host->curr.mrq, "Request in progress\n");
+	WARN(!host->pwr, "SDCC power is turned off\n");
+	WARN(!host->clks_on, "SDCC clocks are turned off\n");
+	WARN(host->sdcc_irq_disabled, "SDCC IRQ is disabled\n");
 
 	if (host->eject) {
 		if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
@@ -909,42 +1507,340 @@
 		return;
 	}
 
-	msmsdcc_enable_clocks(host);
-
 	host->curr.mrq = mrq;
 
-	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
-		/* Queue/read data, daisy-chain command when data starts */
-		msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
-	else
-		msmsdcc_start_command(host, mrq->cmd, 0);
-
-	if (host->cmdpoll && !msmsdcc_spin_on_status(host,
-				MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
-				CMD_SPINMAX)) {
-		uint32_t status = msmsdcc_readl(host, MMCISTATUS);
-		msmsdcc_do_cmdirq(host, status);
-		msmsdcc_writel(host,
-			       MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
-			       MMCICLEAR);
-		host->stats.cmdpoll_hits++;
-	} else {
-		host->stats.cmdpoll_misses++;
+	if (host->plat->dummy52_required) {
+		if (host->dummy_52_needed) {
+			host->dummy_52_state = DUMMY_52_STATE_SENT;
+			msmsdcc_start_command(host, &dummy52cmd,
+					      MCI_CPSM_PROGENA);
+			spin_unlock_irqrestore(&host->lock, flags);
+			if (mrq->data && mrq->data->flags == MMC_DATA_WRITE) {
+				if (mrq->cmd->opcode == SD_IO_RW_EXTENDED ||
+					mrq->cmd->opcode == 54)
+					host->dummy_52_needed = 1;
+			} else {
+				host->dummy_52_needed = 0;
+			}
+			return;
+		}
+		if (mrq->data && mrq->data->flags == MMC_DATA_WRITE) {
+			if (mrq->cmd->opcode == SD_IO_RW_EXTENDED ||
+				mrq->cmd->opcode == 54)
+				host->dummy_52_needed = 1;
+		}
 	}
+	msmsdcc_request_start(host, mrq);
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
+static inline int msmsdcc_vreg_set_voltage(struct msm_mmc_reg_data *vreg,
+					int min_uV, int max_uV)
+{
+	int rc = 0;
+
+	if (vreg->set_voltage_sup) {
+		rc = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+		if (rc) {
+			pr_err("%s: regulator_set_voltage(%s) failed."
+				" min_uV=%d, max_uV=%d, rc=%d\n",
+				__func__, vreg->name, min_uV, max_uV, rc);
+		}
+	}
+
+	return rc;
+}
+
+static inline int msmsdcc_vreg_set_optimum_mode(struct msm_mmc_reg_data *vreg,
+						int uA_load)
+{
+	int rc = 0;
+
+	rc = regulator_set_optimum_mode(vreg->reg, uA_load);
+	if (rc < 0)
+		pr_err("%s: regulator_set_optimum_mode(reg=%s, uA_load=%d)"
+			" failed. rc=%d\n", __func__, vreg->name,
+			uA_load, rc);
+	else
+		/* regulator_set_optimum_mode() can return non zero value
+		 * even for success case.
+		 */
+		rc = 0;
+
+	return rc;
+}
+
+static inline int msmsdcc_vreg_init_reg(struct msm_mmc_reg_data *vreg,
+				struct device *dev)
+{
+	int rc = 0;
+
+	/* check if regulator is already initialized? */
+	if (vreg->reg)
+		goto out;
+
+	/* Get the regulator handle */
+	vreg->reg = regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		rc = PTR_ERR(vreg->reg);
+		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
+			__func__, vreg->name, rc);
+	}
+out:
+	return rc;
+}
+
+static inline void msmsdcc_vreg_deinit_reg(struct msm_mmc_reg_data *vreg)
+{
+	if (vreg->reg)
+		regulator_put(vreg->reg);
+}
+
+/* This init function should be called only once for each SDCC slot */
+static int msmsdcc_vreg_init(struct msmsdcc_host *host, bool is_init)
+{
+	int rc = 0;
+	struct msm_mmc_slot_reg_data *curr_slot;
+	struct msm_mmc_reg_data *curr_vdd_reg, *curr_vccq_reg, *curr_vddp_reg;
+	struct device *dev = mmc_dev(host->mmc);
+
+	curr_slot = host->plat->vreg_data;
+	if (!curr_slot)
+		goto out;
+
+	curr_vdd_reg = curr_slot->vdd_data;
+	curr_vccq_reg = curr_slot->vccq_data;
+	curr_vddp_reg = curr_slot->vddp_data;
+
+	if (is_init) {
+		/*
+		 * Get the regulator handle from voltage regulator framework
+		 * and then try to set the voltage level for the regulator
+		 */
+		if (curr_vdd_reg) {
+			rc = msmsdcc_vreg_init_reg(curr_vdd_reg, dev);
+			if (rc)
+				goto out;
+		}
+		if (curr_vccq_reg) {
+			rc = msmsdcc_vreg_init_reg(curr_vccq_reg, dev);
+			if (rc)
+				goto vdd_reg_deinit;
+		}
+		if (curr_vddp_reg) {
+			rc = msmsdcc_vreg_init_reg(curr_vddp_reg, dev);
+			if (rc)
+				goto vccq_reg_deinit;
+		}
+		goto out;
+	} else {
+		/* Deregister all regulators from regulator framework */
+		goto vddp_reg_deinit;
+	}
+vddp_reg_deinit:
+	if (curr_vddp_reg)
+		msmsdcc_vreg_deinit_reg(curr_vddp_reg);
+vccq_reg_deinit:
+	if (curr_vccq_reg)
+		msmsdcc_vreg_deinit_reg(curr_vccq_reg);
+vdd_reg_deinit:
+	if (curr_vdd_reg)
+		msmsdcc_vreg_deinit_reg(curr_vdd_reg);
+out:
+	return rc;
+}
+
+static int msmsdcc_vreg_enable(struct msm_mmc_reg_data *vreg)
+{
+	int rc = 0;
+
+	if (!vreg->is_enabled) {
+		/* Set voltage level */
+		rc = msmsdcc_vreg_set_voltage(vreg, vreg->level,
+						vreg->level);
+		if (rc)
+			goto out;
+
+		rc = regulator_enable(vreg->reg);
+		if (rc) {
+			pr_err("%s: regulator_enable(%s) failed. rc=%d\n",
+			__func__, vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = true;
+	}
+
+	/* Put regulator in HPM (high power mode) */
+	rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+	if (rc < 0)
+		goto vreg_disable;
+
+	goto out;
+
+vreg_disable:
+	regulator_disable(vreg->reg);
+	vreg->is_enabled = false;
+out:
+	return rc;
+}
+
+static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg)
+{
+	int rc = 0;
+
+	/* Never disable regulator marked as always_on */
+	if (vreg->is_enabled && !vreg->always_on) {
+		rc = regulator_disable(vreg->reg);
+		if (rc) {
+			pr_err("%s: regulator_disable(%s) failed. rc=%d\n",
+				__func__, vreg->name, rc);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		rc = msmsdcc_vreg_set_optimum_mode(vreg, 0);
+		if (rc < 0)
+			goto out;
+
+		/* Set min. voltage level to 0 */
+		rc = msmsdcc_vreg_set_voltage(vreg, 0, vreg->level);
+		if (rc)
+			goto out;
+	} else if (vreg->is_enabled && vreg->always_on && vreg->lpm_sup) {
+		/* Put always_on regulator in LPM (low power mode) */
+		rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA);
+		if (rc < 0)
+			goto out;
+	}
+out:
+	return rc;
+}
+
+static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable)
+{
+	int rc = 0, i;
+	struct msm_mmc_slot_reg_data *curr_slot;
+	struct msm_mmc_reg_data *curr_vdd_reg, *curr_vccq_reg, *curr_vddp_reg;
+	struct msm_mmc_reg_data *vreg_table[3];
+
+	curr_slot = host->plat->vreg_data;
+	if (!curr_slot)
+		goto out;
+
+	curr_vdd_reg = vreg_table[0] = curr_slot->vdd_data;
+	curr_vccq_reg = vreg_table[1] = curr_slot->vccq_data;
+	curr_vddp_reg = vreg_table[2] = curr_slot->vddp_data;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+		if (vreg_table[i]) {
+			if (enable)
+				rc = msmsdcc_vreg_enable(vreg_table[i]);
+			else
+				rc = msmsdcc_vreg_disable(vreg_table[i]);
+			if (rc)
+				goto out;
+		}
+	}
+out:
+	return rc;
+}
+
+static int msmsdcc_tune_vdd_pad_level(struct msmsdcc_host *host, int level)
+{
+	int rc = 0;
+
+	if (host->plat->vreg_data) {
+		struct msm_mmc_reg_data *vddp_reg =
+			host->plat->vreg_data->vddp_data;
+
+		if (vddp_reg && vddp_reg->is_enabled)
+			rc = msmsdcc_vreg_set_voltage(vddp_reg, level, level);
+	}
+
+	return rc;
+}
+
+static inline int msmsdcc_is_pwrsave(struct msmsdcc_host *host)
+{
+	if (host->clk_rate > 400000 && msmsdcc_pwrsave)
+		return 1;
+	return 0;
+}
+
+static inline void msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
+{
+	if (enable) {
+		if (!IS_ERR_OR_NULL(host->dfab_pclk))
+			clk_enable(host->dfab_pclk);
+		if (!IS_ERR(host->pclk))
+			clk_enable(host->pclk);
+		clk_enable(host->clk);
+	} else {
+		clk_disable(host->clk);
+		if (!IS_ERR(host->pclk))
+			clk_disable(host->pclk);
+		if (!IS_ERR_OR_NULL(host->dfab_pclk))
+			clk_disable(host->dfab_pclk);
+	}
+}
+
+static inline unsigned int msmsdcc_get_sup_clk_rate(struct msmsdcc_host *host,
+						unsigned int req_clk)
+{
+	unsigned int sel_clk = -1;
+
+	if (host->plat->sup_clk_table && host->plat->sup_clk_cnt) {
+		unsigned char cnt;
+
+		for (cnt = 0; cnt < host->plat->sup_clk_cnt; cnt++) {
+			if (host->plat->sup_clk_table[cnt] > req_clk)
+				break;
+			else if (host->plat->sup_clk_table[cnt] == req_clk) {
+				sel_clk = host->plat->sup_clk_table[cnt];
+				break;
+			} else
+				sel_clk = host->plat->sup_clk_table[cnt];
+		}
+	} else {
+		if ((req_clk < host->plat->msmsdcc_fmax) &&
+			(req_clk > host->plat->msmsdcc_fmid))
+			sel_clk = host->plat->msmsdcc_fmid;
+		else
+			sel_clk = req_clk;
+	}
+
+	return sel_clk;
+}
+
+static inline unsigned int msmsdcc_get_min_sup_clk_rate(
+				struct msmsdcc_host *host)
+{
+	if (host->plat->sup_clk_table && host->plat->sup_clk_cnt)
+		return host->plat->sup_clk_table[0];
+	else
+		return host->plat->msmsdcc_fmin;
+}
+
+static inline unsigned int msmsdcc_get_max_sup_clk_rate(
+				struct msmsdcc_host *host)
+{
+	if (host->plat->sup_clk_table && host->plat->sup_clk_cnt)
+		return host->plat->sup_clk_table[host->plat->sup_clk_cnt - 1];
+	else
+		return host->plat->msmsdcc_fmax;
+}
+
+static int msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
 {
 	struct msm_mmc_gpio_data *curr;
 	int i, rc = 0;
 
-	if (!host->plat->gpio_data && host->gpio_config_status == enable)
-		return;
-
-	curr = host->plat->gpio_data;
+	curr = host->plat->pin_data->gpio_data;
 	for (i = 0; i < curr->size; i++) {
 		if (enable) {
+			if (curr->gpio[i].is_always_on &&
+				curr->gpio[i].is_enabled)
+				continue;
 			rc = gpio_request(curr->gpio[i].no,
 						curr->gpio[i].name);
 			if (rc) {
@@ -954,16 +1850,96 @@
 					curr->gpio[i].name, rc);
 				goto free_gpios;
 			}
+			curr->gpio[i].is_enabled = true;
 		} else {
+			if (curr->gpio[i].is_always_on)
+				continue;
 			gpio_free(curr->gpio[i].no);
+			curr->gpio[i].is_enabled = false;
 		}
 	}
-	host->gpio_config_status = enable;
-	return;
+	goto out;
 
 free_gpios:
-	for (; i >= 0; i--)
+	for (; i >= 0; i--) {
 		gpio_free(curr->gpio[i].no);
+		curr->gpio[i].is_enabled = false;
+	}
+out:
+	return rc;
+}
+
+static int msmsdcc_setup_pad(struct msmsdcc_host *host, bool enable)
+{
+	struct msm_mmc_pad_data *curr;
+	int i;
+
+	curr = host->plat->pin_data->pad_data;
+	for (i = 0; i < curr->drv->size; i++) {
+		if (enable)
+			msm_tlmm_set_hdrive(curr->drv->on[i].no,
+				curr->drv->on[i].val);
+		else
+			msm_tlmm_set_hdrive(curr->drv->off[i].no,
+				curr->drv->off[i].val);
+	}
+
+	for (i = 0; i < curr->pull->size; i++) {
+		if (enable)
+			msm_tlmm_set_hdrive(curr->pull->on[i].no,
+				curr->pull->on[i].val);
+		else
+			msm_tlmm_set_hdrive(curr->pull->off[i].no,
+				curr->pull->off[i].val);
+	}
+
+	return 0;
+}
+
+static u32 msmsdcc_setup_pins(struct msmsdcc_host *host, bool enable)
+{
+	int rc = 0;
+
+	if (!host->plat->pin_data || host->plat->pin_data->cfg_sts == enable)
+		return 0;
+
+	if (host->plat->pin_data->is_gpio)
+		rc = msmsdcc_setup_gpio(host, enable);
+	else
+		rc = msmsdcc_setup_pad(host, enable);
+
+	if (!rc)
+		host->plat->pin_data->cfg_sts = enable;
+
+	return rc;
+}
+
+static void msmsdcc_enable_irq_wake(struct msmsdcc_host *host)
+{
+	unsigned int wakeup_irq;
+
+	wakeup_irq = (host->plat->sdiowakeup_irq) ?
+			host->plat->sdiowakeup_irq :
+			host->core_irqres->start;
+
+	if (!host->irq_wake_enabled) {
+		enable_irq_wake(wakeup_irq);
+		host->irq_wake_enabled = true;
+	}
+}
+
+static void msmsdcc_disable_irq_wake(struct msmsdcc_host *host)
+{
+	unsigned int wakeup_irq;
+
+	wakeup_irq = (host->plat->sdiowakeup_irq) ?
+			host->plat->sdiowakeup_irq :
+			host->core_irqres->start;
+
+	if (host->irq_wake_enabled) {
+		disable_irq_wake(wakeup_irq);
+		host->irq_wake_enabled = false;
+	}
 }
 
 static void
@@ -973,118 +1949,718 @@
 	u32 clk = 0, pwr = 0;
 	int rc;
 	unsigned long flags;
+	unsigned int clock;
 
-	spin_lock_irqsave(&host->lock, flags);
-
-	msmsdcc_enable_clocks(host);
-
-	spin_unlock_irqrestore(&host->lock, flags);
+	DBG(host, "ios->clock = %u\n", ios->clock);
 
 	if (ios->clock) {
-		if (ios->clock != host->clk_rate) {
-			rc = clk_set_rate(host->clk, ios->clock);
-			if (rc < 0)
-				pr_err("%s: Error setting clock rate (%d)\n",
-				       mmc_hostname(host->mmc), rc);
-			else
-				host->clk_rate = ios->clock;
+		spin_lock_irqsave(&host->lock, flags);
+		if (!host->clks_on) {
+			msmsdcc_setup_clocks(host, true);
+			host->clks_on = 1;
+			if (mmc->card && mmc->card->type == MMC_TYPE_SDIO) {
+				if (!host->plat->sdiowakeup_irq) {
+					writel_relaxed(host->mci_irqenable,
+							host->base + MMCIMASK0);
+					mb();
+					if (host->plat->cfg_mpm_sdiowakeup &&
+					(mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
+						host->plat->cfg_mpm_sdiowakeup(
+						mmc_dev(mmc), SDC_DAT1_DISWAKE);
+					msmsdcc_disable_irq_wake(host);
+				} else if (!(mmc->pm_flags &
+							MMC_PM_WAKE_SDIO_IRQ)) {
+					writel_relaxed(host->mci_irqenable,
+							host->base + MMCIMASK0);
+				}
+			}
 		}
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		clock = msmsdcc_get_sup_clk_rate(host, ios->clock);
+		/*
+		 * For DDR50 mode, controller needs clock rate to be
+		 * double than what is required on the SD card CLK pin.
+		 */
+		if (ios->timing == MMC_TIMING_UHS_DDR50) {
+			/*
+			 * Make sure that we don't double the clock if
+			 * doubled clock rate is already set
+			 */
+			if (!host->ddr_doubled_clk_rate ||
+				(host->ddr_doubled_clk_rate &&
+				(host->ddr_doubled_clk_rate != ios->clock))) {
+				host->ddr_doubled_clk_rate =
+					msmsdcc_get_sup_clk_rate(
+						host, (ios->clock * 2));
+				clock = host->ddr_doubled_clk_rate;
+			}
+		} else {
+			host->ddr_doubled_clk_rate = 0;
+		}
+
+		if (clock != host->clk_rate) {
+			rc = clk_set_rate(host->clk, clock);
+			if (rc < 0)
+				pr_debug("%s: failed to set clk rate %u\n",
+						mmc_hostname(mmc), clock);
+			host->clk_rate = clock;
+		}
+		/*
+		 * give atleast 2 MCLK cycles delay for clocks
+		 * and SDCC core to stabilize
+		 */
+		msmsdcc_delay(host);
 		clk |= MCI_CLK_ENABLE;
 	}
 
-	if (ios->bus_width == MMC_BUS_WIDTH_4)
-		clk |= (2 << 10); /* Set WIDEBUS */
+	if (ios->bus_width == MMC_BUS_WIDTH_8)
+		clk |= MCI_CLK_WIDEBUS_8;
+	else if (ios->bus_width == MMC_BUS_WIDTH_4)
+		clk |= MCI_CLK_WIDEBUS_4;
+	else
+		clk |= MCI_CLK_WIDEBUS_1;
 
-	if (ios->clock > 400000 && msmsdcc_pwrsave)
-		clk |= (1 << 9); /* PWRSAVE */
+	if (msmsdcc_is_pwrsave(host))
+		clk |= MCI_CLK_PWRSAVE;
 
-	clk |= (1 << 12); /* FLOW_ENA */
-	clk |= (1 << 15); /* feedback clock */
+	clk |= MCI_CLK_FLOWENA;
 
-	if (host->plat->translate_vdd)
+	host->tuning_needed = 0;
+	/*
+	 * Select the controller timing mode according
+	 * to current bus speed mode
+	 */
+	if ((ios->timing == MMC_TIMING_UHS_SDR104) ||
+		(ios->timing == MMC_TIMING_UHS_SDR50)) {
+		clk |= (4 << 14);
+		host->tuning_needed = 1;
+	} else if (ios->timing == MMC_TIMING_UHS_DDR50) {
+		clk |= (3 << 14);
+	} else {
+		clk |= (2 << 14); /* feedback clock */
+	}
+
+	/* Select free running MCLK as input clock of cm_dll_sdc4 */
+	clk |= (2 << 23);
+
+	if (host->io_pad_pwr_switch)
+		clk |= IO_PAD_PWR_SWITCH;
+
+	if (host->plat->translate_vdd && !host->sdio_gpio_lpm)
 		pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
+	else if (!host->plat->translate_vdd && !host->sdio_gpio_lpm)
+		pwr |= msmsdcc_setup_vreg(host, !!ios->vdd);
 
 	switch (ios->power_mode) {
 	case MMC_POWER_OFF:
-		msmsdcc_setup_gpio(host, false);
+		htc_pwrsink_set(PWRSINK_SDCARD, 0);
+		if (!host->sdcc_irq_disabled) {
+			if (host->plat->cfg_mpm_sdiowakeup)
+				host->plat->cfg_mpm_sdiowakeup(
+					mmc_dev(mmc), SDC_DAT1_DISABLE);
+			disable_irq(host->core_irqres->start);
+			host->sdcc_irq_disabled = 1;
+		}
+		msmsdcc_setup_pins(host, false);
 		break;
 	case MMC_POWER_UP:
 		pwr |= MCI_PWR_UP;
-		msmsdcc_setup_gpio(host, true);
+		if (host->sdcc_irq_disabled) {
+			if (host->plat->cfg_mpm_sdiowakeup)
+				host->plat->cfg_mpm_sdiowakeup(
+					mmc_dev(mmc), SDC_DAT1_ENABLE);
+			enable_irq(host->core_irqres->start);
+			host->sdcc_irq_disabled = 0;
+		}
+		msmsdcc_setup_pins(host, true);
 		break;
 	case MMC_POWER_ON:
+		htc_pwrsink_set(PWRSINK_SDCARD, 100);
 		pwr |= MCI_PWR_ON;
 		break;
 	}
 
-	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
-		pwr |= MCI_OD;
-
-	msmsdcc_writel(host, clk, MMCICLOCK);
+	spin_lock_irqsave(&host->lock, flags);
+	if (!host->clks_on) {
+		/* force the clocks to be on */
+		msmsdcc_setup_clocks(host, true);
+		/*
+		 * give atleast 2 MCLK cycles delay for clocks
+		 * and SDCC core to stabilize
+		 */
+		msmsdcc_delay(host);
+	}
+	writel_relaxed(clk, host->base + MMCICLOCK);
+	msmsdcc_delay(host);
 
 	if (host->pwr != pwr) {
 		host->pwr = pwr;
-		msmsdcc_writel(host, pwr, MMCIPOWER);
+		writel_relaxed(pwr, host->base + MMCIPOWER);
+		mb();
 	}
-#if BUSCLK_PWRSAVE
-	spin_lock_irqsave(&host->lock, flags);
-	msmsdcc_disable_clocks(host, 1);
+	if (!host->clks_on) {
+		/* force the clocks to be off */
+		msmsdcc_setup_clocks(host, false);
+		/*
+		 * give atleast 2 MCLK cycles delay for clocks
+		 * and SDCC core to stabilize
+		 */
+		msmsdcc_delay(host);
+	}
+
+	if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
+		if (mmc->card && mmc->card->type == MMC_TYPE_SDIO) {
+			if (!host->plat->sdiowakeup_irq) {
+				writel_relaxed(MCI_SDIOINTMASK,
+						host->base + MMCIMASK0);
+				mb();
+				if (host->plat->cfg_mpm_sdiowakeup &&
+					(mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
+					host->plat->cfg_mpm_sdiowakeup(
+						mmc_dev(mmc), SDC_DAT1_ENWAKE);
+				msmsdcc_enable_irq_wake(host);
+			} else if (mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
+				writel_relaxed(0, host->base + MMCIMASK0);
+			} else {
+				writel_relaxed(MCI_SDIOINTMASK,
+						host->base + MMCIMASK0);
+			}
+			msmsdcc_delay(host);
+		}
+		msmsdcc_setup_clocks(host, false);
+		host->clks_on = 0;
+	}
 	spin_unlock_irqrestore(&host->lock, flags);
-#endif
 }
 
+int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave)
+{
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	u32 clk;
+
+	clk = readl_relaxed(host->base + MMCICLOCK);
+	pr_debug("Changing to pwr_save=%d", pwrsave);
+	if (pwrsave && msmsdcc_is_pwrsave(host))
+		clk |= MCI_CLK_PWRSAVE;
+	else
+		clk &= ~MCI_CLK_PWRSAVE;
+	writel_relaxed(clk, host->base + MMCICLOCK);
+	mb();
+
+	return 0;
+}
+
+static int msmsdcc_get_ro(struct mmc_host *mmc)
+{
+	int status = -ENOSYS;
+	struct msmsdcc_host *host = mmc_priv(mmc);
+
+	if (host->plat->wpswitch) {
+		status = host->plat->wpswitch(mmc_dev(mmc));
+	} else if (host->plat->wpswitch_gpio) {
+		status = gpio_request(host->plat->wpswitch_gpio,
+					"SD_WP_Switch");
+		if (status) {
+			pr_err("%s: %s: Failed to request GPIO %d\n",
+				mmc_hostname(mmc), __func__,
+				host->plat->wpswitch_gpio);
+		} else {
+			status = gpio_direction_input(
+					host->plat->wpswitch_gpio);
+			if (!status) {
+				/*
+				 * Wait for atleast 300ms as debounce
+				 * time for GPIO input to stabilize.
+				 */
+				msleep(300);
+				status = gpio_get_value_cansleep(
+						host->plat->wpswitch_gpio);
+				status ^= !host->plat->wpswitch_polarity;
+			}
+			gpio_free(host->plat->wpswitch_gpio);
+		}
+	}
+
+	if (status < 0)
+		status = -ENOSYS;
+	pr_debug("%s: Card read-only status %d\n", __func__, status);
+
+	return status;
+}
+
+#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT
 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
 	struct msmsdcc_host *host = mmc_priv(mmc);
 	unsigned long flags;
-	u32 status;
+
+	if (enable) {
+		spin_lock_irqsave(&host->lock, flags);
+		host->mci_irqenable |= MCI_SDIOINTOPERMASK;
+		writel_relaxed(readl_relaxed(host->base + MMCIMASK0) |
+				MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
+		spin_unlock_irqrestore(&host->lock, flags);
+	} else {
+		host->mci_irqenable &= ~MCI_SDIOINTOPERMASK;
+		writel_relaxed(readl_relaxed(host->base + MMCIMASK0) &
+				~MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
+	}
+	mb();
+}
+#endif /* CONFIG_MMC_MSM_SDIO_SUPPORT */
+
+#ifdef CONFIG_PM_RUNTIME
+static int msmsdcc_enable(struct mmc_host *mmc)
+{
+	int rc;
+	struct device *dev = mmc->parent;
+
+	if (atomic_read(&dev->power.usage_count) > 0) {
+		pm_runtime_get_noresume(dev);
+		goto out;
+	}
+
+	rc = pm_runtime_get_sync(dev);
+
+	if (rc < 0) {
+		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
+				__func__, rc);
+		return rc;
+	}
+out:
+	return 0;
+}
+
+static int msmsdcc_disable(struct mmc_host *mmc, int lazy)
+{
+	int rc;
+
+	if (mmc->card && mmc->card->type == MMC_TYPE_SDIO)
+		return -ENOTSUPP;
+
+	rc = pm_runtime_put_sync(mmc->parent);
+
+	if (rc < 0)
+		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
+				__func__, rc);
+	return rc;
+}
+#else
+#define msmsdcc_enable NULL
+#define msmsdcc_disable NULL
+#endif
+
+static int msmsdcc_start_signal_voltage_switch(struct mmc_host *mmc,
+						struct mmc_ios *ios)
+{
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	unsigned long flags;
+	int err = 0;
+
+	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+		/* Change voltage level of VDDPX to high voltage */
+		if (msmsdcc_tune_vdd_pad_level(host, 2950000)) {
+			pr_err("%s: %s: failed to change vddp level to %d",
+				mmc_hostname(mmc), __func__, 2950000);
+		}
+		goto out;
+	} else if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+		/* invalid selection. don't do anything */
+		goto out;
+	}
 
 	spin_lock_irqsave(&host->lock, flags);
-	if (msmsdcc_sdioirq == 1) {
-		status = msmsdcc_readl(host, MMCIMASK0);
-		if (enable)
-			status |= MCI_SDIOINTOPERMASK;
-		else
-			status &= ~MCI_SDIOINTOPERMASK;
-		host->saved_irq0mask = status;
-		msmsdcc_writel(host, status, MMCIMASK0);
+	/*
+	 * If we are here means voltage switch from high voltage to
+	 * low voltage is required
+	 */
+
+	/*
+	 * Poll on MCIDATIN_3_0 and MCICMDIN bits of MCI_TEST_INPUT
+	 * register until they become all zeros.
+	 */
+	if (readl_relaxed(host->base + MCI_TEST_INPUT) & (0xF << 1)) {
+		err = -EAGAIN;
+		pr_err("%s: %s: MCIDATIN_3_0 is still not all zeros",
+			mmc_hostname(mmc), __func__);
+		goto out_unlock;
 	}
+
+	/* Stop SD CLK output. */
+	writel_relaxed((readl_relaxed(host->base + MMCICLOCK) |
+			MCI_CLK_PWRSAVE), host->base + MMCICLOCK);
+
 	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * Switch VDDPX from high voltage to low voltage
+	 * to change the VDD of the SD IO pads.
+	 */
+	if (msmsdcc_tune_vdd_pad_level(host, 1850000)) {
+		pr_err("%s: %s: failed to change vddp level to %d",
+			mmc_hostname(mmc), __func__, 1850000);
+		goto out;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	writel_relaxed((readl_relaxed(host->base + MMCICLOCK) |
+			IO_PAD_PWR_SWITCH), host->base + MMCICLOCK);
+	host->io_pad_pwr_switch = 1;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/* Wait 5 ms for the voltage regulater in the card to become stable. */
+	usleep_range(5000, 5500);
+
+	spin_lock_irqsave(&host->lock, flags);
+	/* Start SD CLK output. */
+	writel_relaxed((readl_relaxed(host->base + MMCICLOCK)
+			& ~MCI_CLK_PWRSAVE), host->base + MMCICLOCK);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * If MCIDATIN_3_0 and MCICMDIN bits of MCI_TEST_INPUT register
+	 * don't become all ones within 1 ms then a Voltage Switch
+	 * sequence has failed and a power cycle to the card is required.
+	 * Otherwise Voltage Switch sequence is completed successfully.
+	 */
+	usleep_range(1000, 1500);
+
+	spin_lock_irqsave(&host->lock, flags);
+	if ((readl_relaxed(host->base + MCI_TEST_INPUT) & (0xF << 1))
+				!= (0xF << 1)) {
+		pr_err("%s: %s: MCIDATIN_3_0 are still not all ones",
+			mmc_hostname(mmc), __func__);
+		err = -EAGAIN;
+		goto out_unlock;
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&host->lock, flags);
+out:
+	return err;
+}
+
+static int msmsdcc_config_cm_sdc4_dll_phase(struct msmsdcc_host *host,
+						u8 phase);
+/* Initialize the DLL (Programmable Delay Line ) */
+static int msmsdcc_init_cm_sdc4_dll(struct msmsdcc_host *host)
+{
+	int rc = 0;
+	u32 wait_timeout;
+
+	/* Write 0 to DLL_PDN bit of MCI_DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~MCI_DLL_PDN), host->base + MCI_DLL_CONFIG);
+
+	/* Write 1 to DLL_RST bit of MCI_DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			| MCI_DLL_RST), host->base + MCI_DLL_CONFIG);
+
+	msmsdcc_delay(host);
+
+	/* Write 0 to DLL_RST bit of MCI_DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~MCI_DLL_RST), host->base + MCI_DLL_CONFIG);
+
+	/* Initialize the phase to 0 */
+	rc = msmsdcc_config_cm_sdc4_dll_phase(host, 0);
+	if (rc)
+		goto out;
+
+	wait_timeout = 1000;
+	/* Wait until DLL_LOCK bit of MCI_DLL_STATUS register becomes '1' */
+	while (!(readl_relaxed(host->base + MCI_DLL_STATUS) & MCI_DLL_LOCK)) {
+		/* max. wait for 1 sec for LOCK bit to be set */
+		if (--wait_timeout == 0) {
+			pr_err("%s: %s: DLL failed to lock at phase: %d",
+				mmc_hostname(host->mmc), __func__, 0);
+			rc = -1;
+			goto out;
+		}
+		/* wait for 1ms */
+		usleep_range(1000, 1500);
+	}
+out:
+	return rc;
+}
+
+/*
+ * Enable a CDR circuit in CM_SDC4_DLL block to enable automatic
+ * calibration sequence. This function should be called before
+ * enabling AUTO_CMD19 bit in MCI_CMD register for block read
+ * commands (CMD17/CMD18).
+ */
+static void msmsdcc_enable_cdr_cm_sdc4_dll(struct msmsdcc_host *host)
+{
+	/* Set CDR_EN bit to 1. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) |
+			MCI_CDR_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Set CDR_EXT_EN bit to 0. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~MCI_CDR_EXT_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Set CK_OUT_EN bit to 0. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '0' */
+	while (readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN)
+		;
+
+	/* Set CK_OUT_EN bit of MCI_DLL_CONFIG register to 1. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			| MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register is 1. */
+	while (!(readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN))
+		;
+}
+
+static int msmsdcc_config_cm_sdc4_dll_phase(struct msmsdcc_host *host,
+						u8 phase)
+{
+	int rc = 0;
+	u32 mclk_freq = 0;
+	u32 wait_timeout;
+
+	/* Set CDR_EN bit to 0. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~MCI_CDR_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Set CDR_EXT_EN bit to 1. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			| MCI_CDR_EXT_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Program the MCLK value to MCLK_FREQ bit field */
+	if (host->clk_rate <= 112000000)
+		mclk_freq = 0;
+	else if (host->clk_rate <= 125000000)
+		mclk_freq = 1;
+	else if (host->clk_rate <= 137000000)
+		mclk_freq = 2;
+	else if (host->clk_rate <= 150000000)
+		mclk_freq = 3;
+	else if (host->clk_rate <= 162000000)
+		mclk_freq = 4;
+	else if (host->clk_rate <= 175000000)
+		mclk_freq = 5;
+	else if (host->clk_rate <= 187000000)
+		mclk_freq = 6;
+	else if (host->clk_rate <= 200000000)
+		mclk_freq = 7;
+
+	writel_relaxed(((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~(7 << 24)) | (mclk_freq << 24)),
+			host->base + MCI_DLL_CONFIG);
+
+	/* Set CK_OUT_EN bit to 0. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+		& ~MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG);
+
+	/* Set DLL_EN bit to 1. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			| MCI_DLL_EN), host->base + MCI_DLL_CONFIG);
+
+	wait_timeout = 1000;
+	/* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '0' */
+	while (readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN) {
+		/* max. wait for 1 sec for LOCK bit for be set */
+		if (--wait_timeout == 0) {
+			pr_err("%s: %s: Failed to set DLL phase: %d, CK_OUT_EN bit is not 0",
+				mmc_hostname(host->mmc), __func__, phase);
+			rc = -1;
+			goto out;
+		}
+		/* wait for 1ms */
+		usleep_range(1000, 1500);
+	}
+
+	/*
+	 * Write the selected DLL clock output phase (0 ... 15)
+	 * to CDR_SELEXT bit field of MCI_DLL_CONFIG register.
+	 */
+	writel_relaxed(((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			& ~(0xF << 20)) | (phase << 20)),
+			host->base + MCI_DLL_CONFIG);
+
+	/* Set CK_OUT_EN bit of MCI_DLL_CONFIG register to 1. */
+	writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG)
+			| MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG);
+
+	wait_timeout = 1000;
+	/* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '1' */
+	while (!(readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN)) {
+		/* max. wait for 1 sec for LOCK bit for be set */
+		if (--wait_timeout == 0) {
+			pr_err("%s: %s: Failed to set DLL phase: %d, CK_OUT_EN bit is not 1",
+				mmc_hostname(host->mmc), __func__, phase);
+			rc = -1;
+			goto out;
+		}
+		/* wait for 1ms */
+		usleep_range(1000, 1500);
+	}
+out:
+	return rc;
+}
+
+static int msmsdcc_execute_tuning(struct mmc_host *mmc)
+{
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	u8 phase;
+	u8 *data_buf;
+	u8 tuned_phases[16], tuned_phase_cnt = 0;
+	int rc = 0;
+
+	/* Tuning is only required for SDR50 & SDR104 modes */
+	if (!host->tuning_needed) {
+		rc = 0;
+		goto out;
+	}
+
+	host->cmd19_tuning_in_progress = 1;
+	/*
+	 * Make sure that clock is always enabled when DLL
+	 * tuning is in progress. Keeping PWRSAVE ON may
+	 * turn off the clock. So let's disable the PWRSAVE
+	 * here and re-enable it once tuning is completed.
+	 */
+	writel_relaxed((readl_relaxed(host->base + MMCICLOCK)
+			& ~MCI_CLK_PWRSAVE), host->base + MMCICLOCK);
+	/* first of all reset the tuning block */
+	rc = msmsdcc_init_cm_sdc4_dll(host);
+	if (rc)
+		goto out;
+
+	data_buf = kmalloc(64, GFP_KERNEL);
+	if (!data_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	phase = 0;
+	do {
+		struct mmc_command cmd = {0};
+		struct mmc_data data = {0};
+		struct mmc_request mrq = {
+			.cmd = &cmd,
+			.data = &data
+		};
+		struct scatterlist sg;
+
+		/* set the phase in delay line hw block */
+		rc = msmsdcc_config_cm_sdc4_dll_phase(host, phase);
+		if (rc)
+			goto kfree;
+
+		cmd.opcode = MMC_SEND_TUNING_BLOCK;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		data.blksz = 64;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+		data.sg = &sg;
+		data.sg_len = 1;
+		sg_init_one(&sg, data_buf, 64);
+		memset(data_buf, 0, 64);
+		mmc_wait_for_req(mmc, &mrq);
+
+		if (!cmd.error && !data.error &&
+			!memcmp(data_buf, cmd19_tuning_block, 64)) {
+			/* tuning is successful with this tuning point */
+			tuned_phases[tuned_phase_cnt++] = phase;
+		}
+	} while (++phase < 16);
+
+	kfree(data_buf);
+
+	if (tuned_phase_cnt) {
+		tuned_phase_cnt--;
+		tuned_phase_cnt = (tuned_phase_cnt * 3) / 4;
+		phase = tuned_phases[tuned_phase_cnt];
+		/*
+		 * Finally set the selected phase in delay
+		 * line hw block.
+		 */
+		rc = msmsdcc_config_cm_sdc4_dll_phase(host, phase);
+		if (rc)
+			goto out;
+	} else {
+		/* tuning failed */
+		rc = -EAGAIN;
+		pr_err("%s: %s: no tuning point found",
+			mmc_hostname(mmc), __func__);
+	}
+	goto out;
+
+kfree:
+	kfree(data_buf);
+out:
+	/* re-enable PWESAVE */
+	writel_relaxed((readl_relaxed(host->base + MMCICLOCK) |
+			MCI_CLK_PWRSAVE), host->base + MMCICLOCK);
+	host->cmd19_tuning_in_progress = 0;
+	return rc;
 }
 
 static const struct mmc_host_ops msmsdcc_ops = {
+	.enable		= msmsdcc_enable,
+	.disable	= msmsdcc_disable,
 	.request	= msmsdcc_request,
 	.set_ios	= msmsdcc_set_ios,
+	.get_ro		= msmsdcc_get_ro,
+#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT
 	.enable_sdio_irq = msmsdcc_enable_sdio_irq,
+#endif
+	.start_signal_voltage_switch = msmsdcc_start_signal_voltage_switch,
+	.execute_tuning = msmsdcc_execute_tuning
 };
 
+static unsigned int
+msmsdcc_slot_status(struct msmsdcc_host *host)
+{
+	int status;
+	unsigned int gpio_no = host->plat->status_gpio;
+
+	status = gpio_request(gpio_no, "SD_HW_Detect");
+	if (status) {
+		pr_err("%s: %s: Failed to request GPIO %d\n",
+			mmc_hostname(host->mmc), __func__, gpio_no);
+	} else {
+		status = gpio_direction_input(gpio_no);
+		if (!status)
+			status = !gpio_get_value_cansleep(gpio_no);
+		gpio_free(gpio_no);
+	}
+	return status;
+}
+
 static void
 msmsdcc_check_status(unsigned long data)
 {
 	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
 	unsigned int status;
 
-	if (!host->plat->status) {
-		mmc_detect_change(host->mmc, 0);
-		goto out;
-	}
-
-	status = host->plat->status(mmc_dev(host->mmc));
-	host->eject = !status;
-	if (status ^ host->oldstat) {
-		pr_info("%s: Slot status change detected (%d -> %d)\n",
-			mmc_hostname(host->mmc), host->oldstat, status);
-		if (status)
-			mmc_detect_change(host->mmc, (5 * HZ) / 2);
+	if (host->plat->status || host->plat->status_gpio) {
+		if (host->plat->status)
+			status = host->plat->status(mmc_dev(host->mmc));
 		else
+			status = msmsdcc_slot_status(host);
+
+		host->eject = !status;
+		if (status ^ host->oldstat) {
+			pr_info("%s: Slot status change detected (%d -> %d)\n",
+			       mmc_hostname(host->mmc), host->oldstat, status);
 			mmc_detect_change(host->mmc, 0);
+		}
+		host->oldstat = status;
+	} else {
+		mmc_detect_change(host->mmc, 0);
 	}
-
-	host->oldstat = status;
-
-out:
-	if (host->timer.function)
-		mod_timer(&host->timer, jiffies + HZ);
 }
 
 static irqreturn_t
@@ -1092,30 +2668,55 @@
 {
 	struct msmsdcc_host *host = dev_id;
 
-	printk(KERN_DEBUG "%s: %d\n", __func__, irq);
+	pr_debug("%s: %d\n", __func__, irq);
 	msmsdcc_check_status((unsigned long) host);
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t
+msmsdcc_platform_sdiowakeup_irq(int irq, void *dev_id)
+{
+	struct msmsdcc_host	*host = dev_id;
+
+	pr_debug("%s: SDIO Wake up IRQ : %d\n", mmc_hostname(host->mmc), irq);
+	spin_lock(&host->lock);
+	if (!host->sdio_irq_disabled) {
+		disable_irq_nosync(irq);
+		if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
+			wake_lock(&host->sdio_wlock);
+			msmsdcc_disable_irq_wake(host);
+		}
+		host->sdio_irq_disabled = 1;
+	}
+	if (host->plat->is_sdio_al_client) {
+		if (!host->clks_on) {
+			msmsdcc_setup_clocks(host, true);
+			host->clks_on = 1;
+		}
+		if (host->sdcc_irq_disabled) {
+			writel_relaxed(host->mci_irqenable,
+				       host->base + MMCIMASK0);
+			mb();
+			enable_irq(host->core_irqres->start);
+			host->sdcc_irq_disabled = 0;
+		}
+		wake_lock(&host->sdio_wlock);
+	}
+	spin_unlock(&host->lock);
+
+	return IRQ_HANDLED;
+}
+
 static void
 msmsdcc_status_notify_cb(int card_present, void *dev_id)
 {
 	struct msmsdcc_host *host = dev_id;
 
-	printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
+	pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),
 	       card_present);
 	msmsdcc_check_status((unsigned long) host);
 }
 
-static void
-msmsdcc_busclk_expired(unsigned long _data)
-{
-	struct msmsdcc_host	*host = (struct msmsdcc_host *) _data;
-
-	if (host->clks_on)
-		msmsdcc_disable_clocks(host, 0);
-}
-
 static int
 msmsdcc_init_dma(struct msmsdcc_host *host)
 {
@@ -1143,18 +2744,534 @@
 	return 0;
 }
 
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+/**
+ * Allocate and Connect a SDCC peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @host - Pointer to sdcc host structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int msmsdcc_sps_init_ep_conn(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_handle = sps_alloc_endpoint();
+	if (!sps_pipe_handle) {
+		pr_err("%s: sps_alloc_endpoint() failed!!! is_producer=%d",
+			   mmc_hostname(host->mmc), is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_handle, sps_config);
+	if (rc) {
+		pr_err("%s: sps_get_config() failed!!! pipe_handle=0x%x,"
+			" rc=%d", mmc_hostname(host->mmc),
+			(u32)sps_pipe_handle, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		 * For SDCC producer transfer, source should be
+		 * SDCC peripheral where as destination should
+		 * be system memory.
+		 */
+		sps_config->source = host->sps.bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+	} else {
+		/*
+		 * For SDCC consumer transfer, source should be
+		 * system memory where as destination should
+		 * SDCC peripheral
+		 */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = host->sps.bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+	}
+
+	/* Producer pipe index */
+	sps_config->src_pipe_index = host->sps.src_pipe_index;
+	/* Consumer pipe index */
+	sps_config->dest_pipe_index = host->sps.dest_pipe_index;
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	sps_config->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_config->desc.size = 512;
+	sps_config->desc.base = dma_alloc_coherent(mmc_dev(host->mmc),
+						sps_config->desc.size,
+						&sps_config->desc.phys_base,
+						GFP_KERNEL);
+
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_handle, sps_config);
+	if (rc) {
+		pr_err("%s: sps_connect() failed!!! pipe_handle=0x%x,"
+			" rc=%d", mmc_hostname(host->mmc),
+			(u32)sps_pipe_handle, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->options = SPS_O_EOT;
+	sps_event->callback = msmsdcc_sps_complete_cb;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)host;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	rc = sps_register_event(sps_pipe_handle, sps_event);
+	if (rc) {
+		pr_err("%s: sps_connect() failed!!! pipe_handle=0x%x,"
+			" rc=%d", mmc_hostname(host->mmc),
+			(u32)sps_pipe_handle, rc);
+		goto reg_event_err;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe_handle = sps_pipe_handle;
+	pr_debug("%s: %s, success !!! %s: pipe_handle=0x%x,"
+		" desc_fifo.phys_base=0x%x\n", mmc_hostname(host->mmc),
+		__func__, is_producer ? "READ" : "WRITE",
+		(u32)sps_pipe_handle, sps_config->desc.phys_base);
+	goto out;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+sps_connect_err:
+	dma_free_coherent(mmc_dev(host->mmc),
+			sps_config->desc.size,
+			sps_config->desc.base,
+			sps_config->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a SDCC peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @host - Pointer to sdcc host structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void msmsdcc_sps_exit_ep_conn(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	sps_event->xfer_done = NULL;
+	sps_event->callback = NULL;
+	sps_register_event(sps_pipe_handle, sps_event);
+	sps_disconnect(sps_pipe_handle);
+	dma_free_coherent(mmc_dev(host->mmc),
+			sps_config->desc.size,
+			sps_config->desc.base,
+			sps_config->desc.phys_base);
+	sps_free_endpoint(sps_pipe_handle);
+}
+
+/**
+ * Reset SDCC peripheral's SPS endpoint
+ *
+ * This function disconnects an endpoint.
+ *
+ * This function should be called for reseting
+ * SPS endpoint when data transfer error is
+ * encountered during data transfer. This
+ * can be considered as soft reset to endpoint.
+ *
+ * This function should only be called if
+ * msmsdcc_sps_init() is already called.
+ *
+ * @host - Pointer to sdcc host structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ * @return - 0 if successful else negative value.
+ */
+static int msmsdcc_sps_reset_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+
+	rc = sps_disconnect(sps_pipe_handle);
+	if (rc) {
+		pr_err("%s: %s: sps_disconnect() failed!!! pipe_handle=0x%x,"
+			" rc=%d", mmc_hostname(host->mmc), __func__,
+			(u32)sps_pipe_handle, rc);
+		goto out;
+	}
+ out:
+	return rc;
+}
+
+/**
+ * Restore SDCC peripheral's SPS endpoint
+ *
+ * This function connects an endpoint.
+ *
+ * This function should be called for restoring
+ * SPS endpoint after data transfer error is
+ * encountered during data transfer. This
+ * can be considered as soft reset to endpoint.
+ *
+ * This function should only be called if
+ * msmsdcc_sps_reset_ep() is called before.
+ *
+ * @host - Pointer to sdcc host structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ * @return - 0 if successful else negative value.
+ */
+static int msmsdcc_sps_restore_ep(struct msmsdcc_host *host,
+				struct msmsdcc_sps_ep_conn_data *ep)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_handle, sps_config);
+	if (rc) {
+		pr_err("%s: %s: sps_connect() failed!!! pipe_handle=0x%x,"
+			" rc=%d", mmc_hostname(host->mmc), __func__,
+			(u32)sps_pipe_handle, rc);
+		goto out;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	rc = sps_register_event(sps_pipe_handle, sps_event);
+	if (rc) {
+		pr_err("%s: %s: sps_register_event() failed!!!"
+			" pipe_handle=0x%x, rc=%d",
+			mmc_hostname(host->mmc), __func__,
+			(u32)sps_pipe_handle, rc);
+		goto reg_event_err;
+	}
+	goto out;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Initialize SPS HW connected with SDCC core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int msmsdcc_sps_init(struct msmsdcc_host *host)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+
+	host->bam_base = ioremap(host->bam_memres->start,
+				resource_size(host->bam_memres));
+	if (!host->bam_base) {
+		pr_err("%s: BAM ioremap() failed!!! phys_addr=0x%x,"
+			" size=0x%x", mmc_hostname(host->mmc),
+			host->bam_memres->start,
+			(host->bam_memres->end -
+			host->bam_memres->start));
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	bam.phys_addr = host->bam_memres->start;
+	bam.virt_addr = host->bam_base;
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  MCI_FIFOSIZE (64 bytes).
+	 * PIO mode will be used when
+	 * data transfer size < MCI_FIFOSIZE (64 bytes).
+	 * So set this thresold value to 64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the SDCC BAM IRQ */
+	bam.irq = (u32)host->bam_irqres->start;
+	bam.manage = SPS_BAM_MGR_LOCAL;
+
+	pr_info("%s: bam physical base=0x%x\n", mmc_hostname(host->mmc),
+			(u32)bam.phys_addr);
+	pr_info("%s: bam virtual base=0x%x\n", mmc_hostname(host->mmc),
+			(u32)bam.virt_addr);
+
+	/* Register SDCC Peripheral BAM device to SPS driver */
+	rc = sps_register_bam_device(&bam, &host->sps.bam_handle);
+	if (rc) {
+		pr_err("%s: sps_register_bam_device() failed!!! err=%d",
+			   mmc_hostname(host->mmc), rc);
+		goto reg_bam_err;
+	}
+	pr_info("%s: BAM device registered. bam_handle=0x%x",
+		mmc_hostname(host->mmc), host->sps.bam_handle);
+
+	host->sps.src_pipe_index = SPS_SDCC_PRODUCER_PIPE_INDEX;
+	host->sps.dest_pipe_index = SPS_SDCC_CONSUMER_PIPE_INDEX;
+
+	rc = msmsdcc_sps_init_ep_conn(host, &host->sps.prod,
+					SPS_PROD_PERIPHERAL);
+	if (rc)
+		goto sps_reset_err;
+	rc = msmsdcc_sps_init_ep_conn(host, &host->sps.cons,
+					SPS_CONS_PERIPHERAL);
+	if (rc)
+		goto cons_conn_err;
+
+	pr_info("%s: Qualcomm MSM SDCC-BAM at 0x%016llx irq %d\n",
+		mmc_hostname(host->mmc),
+		(unsigned long long)host->bam_memres->start,
+		(unsigned int)host->bam_irqres->start);
+	goto out;
+
+cons_conn_err:
+	msmsdcc_sps_exit_ep_conn(host, &host->sps.prod);
+sps_reset_err:
+	sps_deregister_bam_device(host->sps.bam_handle);
+reg_bam_err:
+	iounmap(host->bam_base);
+out:
+	return rc;
+}
+
+/**
+ * De-initialize SPS HW connected with SDCC core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ */
+static void msmsdcc_sps_exit(struct msmsdcc_host *host)
+{
+	msmsdcc_sps_exit_ep_conn(host, &host->sps.cons);
+	msmsdcc_sps_exit_ep_conn(host, &host->sps.prod);
+	sps_deregister_bam_device(host->sps.bam_handle);
+	iounmap(host->bam_base);
+}
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
+
+static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	int poll;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	poll = !!(mmc->caps & MMC_CAP_NEEDS_POLL);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+set_polling(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	int value;
+	unsigned long flags;
+
+	sscanf(buf, "%d", &value);
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (value) {
+		mmc->caps |= MMC_CAP_NEEDS_POLL;
+		mmc_detect_change(host->mmc, 0);
+	} else {
+		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+	}
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	host->polling_enabled = mmc->caps & MMC_CAP_NEEDS_POLL;
+#endif
+	spin_unlock_irqrestore(&host->lock, flags);
+	return count;
+}
+
+static DEVICE_ATTR(polling, S_IRUGO | S_IWUSR,
+		show_polling, set_polling);
+static struct attribute *dev_attrs[] = {
+	&dev_attr_polling.attr,
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void msmsdcc_early_suspend(struct early_suspend *h)
+{
+	struct msmsdcc_host *host =
+		container_of(h, struct msmsdcc_host, early_suspend);
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	host->polling_enabled = host->mmc->caps & MMC_CAP_NEEDS_POLL;
+	host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+	spin_unlock_irqrestore(&host->lock, flags);
+};
+static void msmsdcc_late_resume(struct early_suspend *h)
+{
+	struct msmsdcc_host *host =
+		container_of(h, struct msmsdcc_host, early_suspend);
+	unsigned long flags;
+
+	if (host->polling_enabled) {
+		spin_lock_irqsave(&host->lock, flags);
+		host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+		mmc_detect_change(host->mmc, 0);
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+};
+#endif
+
+static void msmsdcc_req_tout_timer_hdlr(unsigned long data)
+{
+	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
+	struct mmc_request *mrq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if ((host->plat->dummy52_required) &&
+		(host->dummy_52_state == DUMMY_52_STATE_SENT)) {
+		pr_info("%s: %s: dummy CMD52 timeout\n",
+				mmc_hostname(host->mmc), __func__);
+		host->dummy_52_state = DUMMY_52_STATE_NONE;
+	}
+
+	mrq = host->curr.mrq;
+
+	if (mrq && mrq->cmd) {
+		pr_info("%s: %s CMD%d\n", mmc_hostname(host->mmc),
+				__func__, mrq->cmd->opcode);
+		if (!mrq->cmd->error)
+			mrq->cmd->error = -ETIMEDOUT;
+		if (host->plat->dummy52_required && host->dummy_52_needed)
+			host->dummy_52_needed = 0;
+		if (host->curr.data) {
+			pr_info("%s: %s Request timeout\n",
+					mmc_hostname(host->mmc), __func__);
+			if (mrq->data && !mrq->data->error)
+				mrq->data->error = -ETIMEDOUT;
+			host->curr.data_xfered = 0;
+			if (host->dma.sg && host->is_dma_mode) {
+				msm_dmov_stop_cmd(host->dma.channel,
+						&host->dma.hdr, 0);
+			} else if (host->sps.sg && host->is_sps_mode) {
+				/* Stop current SPS transfer */
+				msmsdcc_sps_exit_curr_xfer(host);
+			} else {
+				msmsdcc_reset_and_restore(host);
+				msmsdcc_stop_data(host);
+				if (mrq->data && mrq->data->stop)
+					msmsdcc_start_command(host,
+							mrq->data->stop, 0);
+				else
+					msmsdcc_request_end(host, mrq);
+			}
+		} else {
+			if (host->prog_enable) {
+				host->prog_scan = 0;
+				host->prog_enable = 0;
+			}
+			msmsdcc_reset_and_restore(host);
+			msmsdcc_request_end(host, mrq);
+		}
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
 static int
 msmsdcc_probe(struct platform_device *pdev)
 {
-	struct msm_mmc_platform_data *plat = pdev->dev.platform_data;
+	struct mmc_platform_data *plat = pdev->dev.platform_data;
 	struct msmsdcc_host *host;
 	struct mmc_host *mmc;
-	struct resource *cmd_irqres = NULL;
-	struct resource *pio_irqres = NULL;
-	struct resource *stat_irqres = NULL;
-	struct resource *memres = NULL;
+	unsigned long flags;
+	struct resource *core_irqres = NULL;
+	struct resource *bam_irqres = NULL;
+	struct resource *core_memres = NULL;
+	struct resource *dml_memres = NULL;
+	struct resource *bam_memres = NULL;
 	struct resource *dmares = NULL;
 	int ret;
+	int i;
 
 	/* must have platform data */
 	if (!plat) {
@@ -1163,32 +3280,60 @@
 		goto out;
 	}
 
-	if (pdev->id < 1 || pdev->id > 4)
+	if (pdev->id < 1 || pdev->id > 5)
 		return -EINVAL;
 
+	if (plat->is_sdio_al_client)
+		if (!plat->sdio_lpm_gpio_setup || !plat->sdiowakeup_irq)
+			return -EINVAL;
+
 	if (pdev->resource == NULL || pdev->num_resources < 2) {
 		pr_err("%s: Invalid resource\n", __func__);
 		return -ENXIO;
 	}
 
-	memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-						  "cmd_irq");
-	pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-						  "pio_irq");
-	stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-						   "status_irq");
+	for (i = 0; i < pdev->num_resources; i++) {
+		if (pdev->resource[i].flags & IORESOURCE_MEM) {
+			if (!strcmp(pdev->resource[i].name,
+					"sdcc_dml_addr"))
+				dml_memres = &pdev->resource[i];
+			else if (!strcmp(pdev->resource[i].name,
+					"sdcc_bam_addr"))
+				bam_memres = &pdev->resource[i];
+			else
+				core_memres = &pdev->resource[i];
 
-	if (!cmd_irqres || !pio_irqres || !memres) {
-		pr_err("%s: Invalid resource\n", __func__);
+		}
+		if (pdev->resource[i].flags & IORESOURCE_IRQ) {
+			if (!strcmp(pdev->resource[i].name,
+					"sdcc_bam_irq"))
+				bam_irqres = &pdev->resource[i];
+			else
+				core_irqres = &pdev->resource[i];
+		}
+		if (pdev->resource[i].flags & IORESOURCE_DMA)
+			dmares = &pdev->resource[i];
+	}
+
+	if (!core_irqres || !core_memres) {
+		pr_err("%s: Invalid sdcc core resource\n", __func__);
+		return -ENXIO;
+	}
+
+	/*
+	 * Both BAM and DML memory resource should be preset.
+	 * BAM IRQ resource should also be present.
+	 */
+	if ((bam_memres && !dml_memres) ||
+		(!bam_memres && dml_memres) ||
+		((bam_memres && dml_memres) && !bam_irqres)) {
+		pr_err("%s: Invalid sdcc BAM/DML resource\n", __func__);
 		return -ENXIO;
 	}
 
 	/*
 	 * Setup our host structure
 	 */
-
 	mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
 	if (!mmc) {
 		ret = -ENOMEM;
@@ -1200,268 +3345,851 @@
 	host->plat = plat;
 	host->mmc = mmc;
 	host->curr.cmd = NULL;
+	if (bam_memres && dml_memres && bam_irqres)
+		host->is_sps_mode = 1;
+	else if (dmares)
+		host->is_dma_mode = 1;
 
-	host->cmdpoll = 1;
-
-	host->base = ioremap(memres->start, PAGE_SIZE);
+	host->base = ioremap(core_memres->start,
+			resource_size(core_memres));
 	if (!host->base) {
 		ret = -ENOMEM;
-		goto out;
+		goto host_free;
 	}
 
-	host->cmd_irqres = cmd_irqres;
-	host->pio_irqres = pio_irqres;
-	host->memres = memres;
+	host->core_irqres = core_irqres;
+	host->bam_irqres = bam_irqres;
+	host->core_memres = core_memres;
+	host->dml_memres = dml_memres;
+	host->bam_memres = bam_memres;
 	host->dmares = dmares;
 	spin_lock_init(&host->lock);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (plat->embedded_sdio)
+		mmc_set_embedded_sdio_data(mmc,
+					   &plat->embedded_sdio->cis,
+					   &plat->embedded_sdio->cccr,
+					   plat->embedded_sdio->funcs,
+					   plat->embedded_sdio->num_funcs);
+#endif
+
 	tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
 			(unsigned long)host);
 
-	/*
-	 * Setup DMA
-	 */
-	msmsdcc_init_dma(host);
-
-	/* Get our clocks */
-	host->pclk = clk_get(&pdev->dev, "sdc_pclk");
-	if (IS_ERR(host->pclk)) {
-		ret = PTR_ERR(host->pclk);
-		goto host_free;
+	tasklet_init(&host->sps.tlet, msmsdcc_sps_complete_tlet,
+			(unsigned long)host);
+	if (host->is_dma_mode) {
+		/* Setup DMA */
+		ret = msmsdcc_init_dma(host);
+		if (ret)
+			goto ioremap_free;
+	} else {
+		host->dma.channel = -1;
 	}
 
+	/*
+	 * Setup SDCC clock if derived from Dayatona
+	 * fabric core clock.
+	 */
+	if (plat->pclk_src_dfab) {
+		host->dfab_pclk = clk_get(&pdev->dev, "dfab_sdc_clk");
+		if (!IS_ERR(host->dfab_pclk)) {
+			/* Set the clock rate to 64MHz for max. performance */
+			ret = clk_set_rate(host->dfab_pclk, 64000000);
+			if (ret)
+				goto dfab_pclk_put;
+			ret = clk_enable(host->dfab_pclk);
+			if (ret)
+				goto dfab_pclk_put;
+		} else
+			goto dma_free;
+	}
+
+	/*
+	 * Setup main peripheral bus clock
+	 */
+	host->pclk = clk_get(&pdev->dev, "sdc_pclk");
+	if (!IS_ERR(host->pclk)) {
+		ret = clk_enable(host->pclk);
+		if (ret)
+			goto pclk_put;
+
+		host->pclk_rate = clk_get_rate(host->pclk);
+	}
+
+	/*
+	 * Setup SDC MMC clock
+	 */
 	host->clk = clk_get(&pdev->dev, "sdc_clk");
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
-		goto pclk_put;
+		goto pclk_disable;
 	}
 
-	/* Enable clocks */
-	ret = msmsdcc_enable_clocks(host);
+	ret = clk_set_rate(host->clk, msmsdcc_get_min_sup_clk_rate(host));
+	if (ret) {
+		pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
+		goto clk_put;
+	}
+
+	ret = clk_enable(host->clk);
 	if (ret)
 		goto clk_put;
 
-	ret = clk_set_rate(host->clk, msmsdcc_fmin);
+	host->clk_rate = clk_get_rate(host->clk);
+
+	host->clks_on = 1;
+
+	ret = msmsdcc_vreg_init(host, true);
 	if (ret) {
-		pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
+		pr_err("%s: msmsdcc_vreg_init() failed (%d)\n", __func__, ret);
 		goto clk_disable;
 	}
 
-	host->pclk_rate = clk_get_rate(host->pclk);
-	host->clk_rate = clk_get_rate(host->clk);
+
+	/* Clocks has to be running before accessing SPS/DML HW blocks */
+	if (host->is_sps_mode) {
+		/* Initialize SPS */
+		ret = msmsdcc_sps_init(host);
+		if (ret)
+			goto vreg_deinit;
+		/* Initialize DML */
+		ret = msmsdcc_dml_init(host);
+		if (ret)
+			goto sps_exit;
+	}
 
 	/*
 	 * Setup MMC host structure
 	 */
 	mmc->ops = &msmsdcc_ops;
-	mmc->f_min = msmsdcc_fmin;
-	mmc->f_max = msmsdcc_fmax;
+	mmc->f_min = msmsdcc_get_min_sup_clk_rate(host);
+	mmc->f_max = msmsdcc_get_max_sup_clk_rate(host);
 	mmc->ocr_avail = plat->ocr_mask;
+	mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+	mmc->caps |= plat->mmc_bus_width;
 
-	if (msmsdcc_4bit)
-		mmc->caps |= MMC_CAP_4_BIT_DATA;
-	if (msmsdcc_sdioirq)
-		mmc->caps |= MMC_CAP_SDIO_IRQ;
 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
+	mmc->caps |= plat->uhs_caps;
+	/*
+	 * XPC controls the maximum current in the default speed mode of SDXC
+	 * card. XPC=0 means 100mA (max.) but speed class is not supported.
+	 * XPC=1 means 150mA (max.) and speed class is supported.
+	 */
+	if (plat->xpc_cap)
+		mmc->caps |= (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
+				MMC_CAP_SET_XPC_180);
+
+	if (plat->nonremovable)
+		mmc->caps |= MMC_CAP_NONREMOVABLE;
+#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT
+	mmc->caps |= MMC_CAP_SDIO_IRQ;
+#endif
+
+	if (plat->is_sdio_al_client)
+		mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
 
 	mmc->max_segs = NR_SG;
 	mmc->max_blk_size = 4096;	/* MCI_DATA_CTL BLOCKSIZE up to 4096 */
-	mmc->max_blk_count = 65536;
+	mmc->max_blk_count = 65535;
 
 	mmc->max_req_size = 33554432;	/* MCI_DATA_LENGTH is 25 bits */
 	mmc->max_seg_size = mmc->max_req_size;
 
-	msmsdcc_writel(host, 0, MMCIMASK0);
-	msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
+	writel_relaxed(0, host->base + MMCIMASK0);
+	writel_relaxed(MCI_CLEAR_STATIC_MASK, host->base + MMCICLEAR);
 
-	msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
-	host->saved_irq0mask = MCI_IRQENABLE;
+	/* Delay needed (MMCIMASK0 was just written above) */
+	msmsdcc_delay(host);
+	writel_relaxed(MCI_IRQENABLE, host->base + MMCIMASK0);
+	mb();
+	host->mci_irqenable = MCI_IRQENABLE;
 
+	ret = request_irq(core_irqres->start, msmsdcc_irq, IRQF_SHARED,
+			  DRIVER_NAME " (cmd)", host);
+	if (ret)
+		goto dml_exit;
+
+	ret = request_irq(core_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
+			  DRIVER_NAME " (pio)", host);
+	if (ret)
+		goto irq_free;
+
+	/*
+	 * Enable SDCC IRQ only when host is powered on. Otherwise, this
+	 * IRQ is un-necessarily being monitored by MPM (Modem power
+	 * management block) during idle-power collapse.  The MPM will be
+	 * configured to monitor the DATA1 GPIO line with level-low trigger
+	 * and thus depending on the GPIO status, it prevents TCXO shutdown
+	 * during idle-power collapse.
+	 */
+	disable_irq(core_irqres->start);
+	host->sdcc_irq_disabled = 1;
+
+	if (plat->sdiowakeup_irq) {
+		wake_lock_init(&host->sdio_wlock, WAKE_LOCK_SUSPEND,
+				mmc_hostname(mmc));
+		ret = request_irq(plat->sdiowakeup_irq,
+			msmsdcc_platform_sdiowakeup_irq,
+			IRQF_SHARED | IRQF_TRIGGER_LOW,
+			DRIVER_NAME "sdiowakeup", host);
+		if (ret) {
+			pr_err("Unable to get sdio wakeup IRQ %d (%d)\n",
+				plat->sdiowakeup_irq, ret);
+			goto pio_irq_free;
+		} else {
+			spin_lock_irqsave(&host->lock, flags);
+			if (!host->sdio_irq_disabled) {
+				disable_irq_nosync(plat->sdiowakeup_irq);
+				host->sdio_irq_disabled = 1;
+			}
+			spin_unlock_irqrestore(&host->lock, flags);
+		}
+	}
+
+	if (plat->cfg_mpm_sdiowakeup) {
+		wake_lock_init(&host->sdio_wlock, WAKE_LOCK_SUSPEND,
+				mmc_hostname(mmc));
+	}
+
+	wake_lock_init(&host->sdio_suspend_wlock, WAKE_LOCK_SUSPEND,
+			mmc_hostname(mmc));
 	/*
 	 * Setup card detect change
 	 */
 
-	memset(&host->timer, 0, sizeof(host->timer));
+	if (plat->status || plat->status_gpio) {
+		if (plat->status)
+			host->oldstat = plat->status(mmc_dev(host->mmc));
+		else
+			host->oldstat = msmsdcc_slot_status(host);
+		host->eject = !host->oldstat;
+	}
 
-	if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
-		unsigned long irqflags = IRQF_SHARED |
-			(stat_irqres->flags & IRQF_TRIGGER_MASK);
-
-		host->stat_irq = stat_irqres->start;
-		ret = request_irq(host->stat_irq,
+	if (plat->status_irq) {
+		ret = request_threaded_irq(plat->status_irq, NULL,
 				  msmsdcc_platform_status_irq,
-				  irqflags,
+				  plat->irq_flags,
 				  DRIVER_NAME " (slot)",
 				  host);
 		if (ret) {
-			pr_err("%s: Unable to get slot IRQ %d (%d)\n",
-			       mmc_hostname(mmc), host->stat_irq, ret);
-			goto clk_disable;
+			pr_err("Unable to get slot IRQ %d (%d)\n",
+			       plat->status_irq, ret);
+			goto sdiowakeup_irq_free;
 		}
 	} else if (plat->register_status_notify) {
 		plat->register_status_notify(msmsdcc_status_notify_cb, host);
 	} else if (!plat->status)
 		pr_err("%s: No card detect facilities available\n",
 		       mmc_hostname(mmc));
-	else {
-		init_timer(&host->timer);
-		host->timer.data = (unsigned long)host;
-		host->timer.function = msmsdcc_check_status;
-		host->timer.expires = jiffies + HZ;
-		add_timer(&host->timer);
-	}
-
-	if (plat->status) {
-		host->oldstat = host->plat->status(mmc_dev(host->mmc));
-		host->eject = !host->oldstat;
-	}
-
-	init_timer(&host->busclk_timer);
-	host->busclk_timer.data = (unsigned long) host;
-	host->busclk_timer.function = msmsdcc_busclk_expired;
-
-	ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
-			  DRIVER_NAME " (cmd)", host);
-	if (ret)
-		goto stat_irq_free;
-
-	ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
-			  DRIVER_NAME " (pio)", host);
-	if (ret)
-		goto cmd_irq_free;
 
 	mmc_set_drvdata(pdev, mmc);
+
+	ret = pm_runtime_set_active(&(pdev)->dev);
+	if (ret < 0)
+		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
+				__func__, ret);
+	/*
+	 * There is no notion of suspend/resume for SD/MMC/SDIO
+	 * cards. So host can be suspended/resumed with out
+	 * worrying about its children.
+	 */
+	pm_suspend_ignore_children(&(pdev)->dev, true);
+
+	/*
+	 * MMC/SD/SDIO bus suspend/resume operations are defined
+	 * only for the slots that will be used for non-removable
+	 * media or for all slots when CONFIG_MMC_UNSAFE_RESUME is
+	 * defined. Otherwise, they simply become card removal and
+	 * insertion events during suspend and resume respectively.
+	 * Hence, enable run-time PM only for slots for which bus
+	 * suspend/resume operations are defined.
+	 */
+#ifdef CONFIG_MMC_UNSAFE_RESUME
+	/*
+	 * If this capability is set, MMC core will enable/disable host
+	 * for every claim/release operation on a host. We use this
+	 * notification to increment/decrement runtime pm usage count.
+	 */
+	mmc->caps |= MMC_CAP_DISABLE;
+	pm_runtime_enable(&(pdev)->dev);
+#else
+	if (mmc->caps & MMC_CAP_NONREMOVABLE) {
+		mmc->caps |= MMC_CAP_DISABLE;
+		pm_runtime_enable(&(pdev)->dev);
+	}
+#endif
+	setup_timer(&host->req_tout_timer, msmsdcc_req_tout_timer_hdlr,
+			(unsigned long)host);
+
 	mmc_add_host(mmc);
 
-	pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
-		mmc_hostname(mmc), (unsigned long long)memres->start,
-		(unsigned int) cmd_irqres->start,
-		(unsigned int) host->stat_irq, host->dma.channel);
-	pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
-		(mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
-	pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
-		mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
-	pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
-	pr_info("%s: Power save feature enable = %d\n",
-		mmc_hostname(mmc), msmsdcc_pwrsave);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	host->early_suspend.suspend = msmsdcc_early_suspend;
+	host->early_suspend.resume  = msmsdcc_late_resume;
+	host->early_suspend.level   = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+	register_early_suspend(&host->early_suspend);
+#endif
 
-	if (host->dma.channel != -1) {
+	pr_info("%s: Qualcomm MSM SDCC-core at 0x%016llx irq %d,%d dma %d\n",
+	       mmc_hostname(mmc), (unsigned long long)core_memres->start,
+	       (unsigned int) core_irqres->start,
+	       (unsigned int) plat->status_irq, host->dma.channel);
+
+	pr_info("%s: 8 bit data mode %s\n", mmc_hostname(mmc),
+		(mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled"));
+	pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
+	       (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
+	pr_info("%s: polling status mode %s\n", mmc_hostname(mmc),
+	       (mmc->caps & MMC_CAP_NEEDS_POLL ? "enabled" : "disabled"));
+	pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
+	       mmc_hostname(mmc), msmsdcc_get_min_sup_clk_rate(host),
+		msmsdcc_get_max_sup_clk_rate(host), host->pclk_rate);
+	pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc),
+	       host->eject);
+	pr_info("%s: Power save feature enable = %d\n",
+	       mmc_hostname(mmc), msmsdcc_pwrsave);
+
+	if (host->is_dma_mode && host->dma.channel != -1) {
 		pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
-			mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
+		       mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
 		pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
-			mmc_hostname(mmc), host->dma.cmd_busaddr,
-			host->dma.cmdptr_busaddr);
+		       mmc_hostname(mmc), host->dma.cmd_busaddr,
+		       host->dma.cmdptr_busaddr);
+	} else if (host->is_sps_mode) {
+		pr_info("%s: SPS-BAM data transfer mode available\n",
+			mmc_hostname(mmc));
 	} else
 		pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
-	if (host->timer.function)
-		pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
+#if defined(CONFIG_DEBUG_FS)
+	msmsdcc_dbg_createhost(host);
+#endif
+	if (!plat->status_irq) {
+		ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
+		if (ret)
+			goto platform_irq_free;
+	}
 	return 0;
- cmd_irq_free:
-	free_irq(cmd_irqres->start, host);
- stat_irq_free:
-	if (host->stat_irq)
-		free_irq(host->stat_irq, host);
+
+ platform_irq_free:
+	del_timer_sync(&host->req_tout_timer);
+	pm_runtime_disable(&(pdev)->dev);
+	pm_runtime_set_suspended(&(pdev)->dev);
+
+	if (plat->status_irq)
+		free_irq(plat->status_irq, host);
+ sdiowakeup_irq_free:
+	wake_lock_destroy(&host->sdio_suspend_wlock);
+	if (plat->sdiowakeup_irq)
+		free_irq(plat->sdiowakeup_irq, host);
+ pio_irq_free:
+	if (plat->sdiowakeup_irq)
+		wake_lock_destroy(&host->sdio_wlock);
+	free_irq(core_irqres->start, host);
+ irq_free:
+	free_irq(core_irqres->start, host);
+ dml_exit:
+	if (host->is_sps_mode)
+		msmsdcc_dml_exit(host);
+ sps_exit:
+	if (host->is_sps_mode)
+		msmsdcc_sps_exit(host);
+ vreg_deinit:
+	msmsdcc_vreg_init(host, false);
  clk_disable:
-	msmsdcc_disable_clocks(host, 0);
+	clk_disable(host->clk);
  clk_put:
 	clk_put(host->clk);
+ pclk_disable:
+	if (!IS_ERR(host->pclk))
+		clk_disable(host->pclk);
  pclk_put:
-	clk_put(host->pclk);
+	if (!IS_ERR(host->pclk))
+		clk_put(host->pclk);
+	if (!IS_ERR_OR_NULL(host->dfab_pclk))
+		clk_disable(host->dfab_pclk);
+ dfab_pclk_put:
+	if (!IS_ERR_OR_NULL(host->dfab_pclk))
+		clk_put(host->dfab_pclk);
+ dma_free:
+	if (host->is_dma_mode) {
+		if (host->dmares)
+			dma_free_coherent(NULL,
+				sizeof(struct msmsdcc_nc_dmadata),
+				host->dma.nc, host->dma.nc_busaddr);
+	}
+ ioremap_free:
+	iounmap(host->base);
  host_free:
 	mmc_free_host(mmc);
  out:
 	return ret;
 }
 
-#ifdef CONFIG_PM
-#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
-static void
-do_resume_work(struct work_struct *work)
+static int msmsdcc_remove(struct platform_device *pdev)
 {
-	struct msmsdcc_host *host =
-		container_of(work, struct msmsdcc_host, resume_task);
-	struct mmc_host	*mmc = host->mmc;
+	struct mmc_host *mmc = mmc_get_drvdata(pdev);
+	struct mmc_platform_data *plat;
+	struct msmsdcc_host *host;
 
-	if (mmc) {
-		mmc_resume_host(mmc);
-		if (host->stat_irq)
-			enable_irq(host->stat_irq);
+	if (!mmc)
+		return -ENXIO;
+
+	if (pm_runtime_suspended(&(pdev)->dev))
+		pm_runtime_resume(&(pdev)->dev);
+
+	host = mmc_priv(mmc);
+
+	DBG(host, "Removing SDCC device = %d\n", pdev->id);
+	plat = host->plat;
+
+	if (!plat->status_irq)
+		sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+
+	del_timer_sync(&host->req_tout_timer);
+	tasklet_kill(&host->dma_tlet);
+	tasklet_kill(&host->sps.tlet);
+	mmc_remove_host(mmc);
+
+	if (plat->status_irq)
+		free_irq(plat->status_irq, host);
+
+	wake_lock_destroy(&host->sdio_suspend_wlock);
+	if (plat->sdiowakeup_irq) {
+		wake_lock_destroy(&host->sdio_wlock);
+		irq_set_irq_wake(plat->sdiowakeup_irq, 0);
+		free_irq(plat->sdiowakeup_irq, host);
 	}
+
+	free_irq(host->core_irqres->start, host);
+	free_irq(host->core_irqres->start, host);
+
+	clk_put(host->clk);
+	if (!IS_ERR(host->pclk))
+		clk_put(host->pclk);
+	if (!IS_ERR_OR_NULL(host->dfab_pclk))
+		clk_put(host->dfab_pclk);
+
+	msmsdcc_vreg_init(host, false);
+
+	if (host->is_dma_mode) {
+		if (host->dmares)
+			dma_free_coherent(NULL,
+					sizeof(struct msmsdcc_nc_dmadata),
+					host->dma.nc, host->dma.nc_busaddr);
+	}
+
+	if (host->is_sps_mode) {
+		msmsdcc_dml_exit(host);
+		msmsdcc_sps_exit(host);
+	}
+
+	iounmap(host->base);
+	mmc_free_host(mmc);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&host->early_suspend);
+#endif
+	pm_runtime_disable(&(pdev)->dev);
+	pm_runtime_set_suspended(&(pdev)->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_MSM_SDIO_AL
+int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
+{
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	pr_debug("%s: %sabling LPM\n", mmc_hostname(mmc),
+			enable ? "En" : "Dis");
+
+	if (enable) {
+		if (!host->sdcc_irq_disabled) {
+			writel_relaxed(0, host->base + MMCIMASK0);
+			disable_irq(host->core_irqres->start);
+			host->sdcc_irq_disabled = 1;
+		}
+
+		if (host->clks_on) {
+			msmsdcc_setup_clocks(host, false);
+			host->clks_on = 0;
+		}
+
+		if (!host->sdio_gpio_lpm) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			host->plat->sdio_lpm_gpio_setup(mmc_dev(mmc), 0);
+			spin_lock_irqsave(&host->lock, flags);
+			host->sdio_gpio_lpm = 1;
+		}
+
+		if (host->sdio_irq_disabled) {
+			msmsdcc_enable_irq_wake(host);
+			enable_irq(host->plat->sdiowakeup_irq);
+			host->sdio_irq_disabled = 0;
+		}
+	} else {
+		if (!host->sdio_irq_disabled) {
+			disable_irq_nosync(host->plat->sdiowakeup_irq);
+			host->sdio_irq_disabled = 1;
+			msmsdcc_disable_irq_wake(host);
+		}
+
+		if (host->sdio_gpio_lpm) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			host->plat->sdio_lpm_gpio_setup(mmc_dev(mmc), 1);
+			spin_lock_irqsave(&host->lock, flags);
+			host->sdio_gpio_lpm = 0;
+		}
+
+		if (!host->clks_on) {
+			msmsdcc_setup_clocks(host, true);
+			host->clks_on = 1;
+		}
+
+		if (host->sdcc_irq_disabled) {
+			writel_relaxed(host->mci_irqenable,
+				       host->base + MMCIMASK0);
+			mb();
+			enable_irq(host->core_irqres->start);
+			host->sdcc_irq_disabled = 0;
+		}
+		wake_lock_timeout(&host->sdio_wlock, 1);
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+	return 0;
+}
+#else
+int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
+{
+	return 0;
 }
 #endif
 
-
+#ifdef CONFIG_PM
 static int
-msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
+msmsdcc_runtime_suspend(struct device *dev)
 {
-	struct mmc_host *mmc = mmc_get_drvdata(dev);
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
 	int rc = 0;
 
+	if (host->plat->is_sdio_al_client)
+		return 0;
+
 	if (mmc) {
-		struct msmsdcc_host *host = mmc_priv(mmc);
+		host->sdcc_suspending = 1;
+		mmc->suspend_task = current;
 
-		if (host->stat_irq)
-			disable_irq(host->stat_irq);
+		/*
+		 * If the clocks are already turned off by SDIO clients (as
+		 * part of LPM), then clocks should be turned on before
+		 * calling mmc_suspend_host() because mmc_suspend_host might
+		 * send some commands to the card. The clocks will be turned
+		 * off again after mmc_suspend_host. Thus for SD/MMC/SDIO
+		 * cards, clocks will be turned on before mmc_suspend_host
+		 * and turned off after mmc_suspend_host.
+		 */
+		mmc->ios.clock = host->clk_rate;
+		mmc->ops->set_ios(host->mmc, &host->mmc->ios);
 
-		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
-			rc = mmc_suspend_host(mmc);
-		if (!rc)
-			msmsdcc_writel(host, 0, MMCIMASK0);
-		if (host->clks_on)
-			msmsdcc_disable_clocks(host, 0);
+		/*
+		 * MMC core thinks that host is disabled by now since
+		 * runtime suspend is scheduled after msmsdcc_disable()
+		 * is called. Thus, MMC core will try to enable the host
+		 * while suspending it. This results in a synchronous
+		 * runtime resume request while in runtime suspending
+		 * context and hence inorder to complete this resume
+		 * requet, it will wait for suspend to be complete,
+		 * but runtime suspend also can not proceed further
+		 * until the host is resumed. Thus, it leads to a hang.
+		 * Hence, increase the pm usage count before suspending
+		 * the host so that any resume requests after this will
+		 * simple become pm usage counter increment operations.
+		 */
+		pm_runtime_get_noresume(dev);
+		rc = mmc_suspend_host(mmc);
+		pm_runtime_put_noidle(dev);
+
+		if (!rc) {
+			if (mmc->card && (mmc->card->type == MMC_TYPE_SDIO) &&
+				(mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) {
+				disable_irq(host->core_irqres->start);
+				host->sdcc_irq_disabled = 1;
+
+				/*
+				 * If MMC core level suspend is not supported,
+				 * turn off clocks to allow deep sleep (TCXO
+				 * shutdown).
+				 */
+				mmc->ios.clock = 0;
+				mmc->ops->set_ios(host->mmc, &host->mmc->ios);
+				enable_irq(host->core_irqres->start);
+				host->sdcc_irq_disabled = 0;
+
+				if (host->plat->sdiowakeup_irq) {
+					host->sdio_irq_disabled = 0;
+					msmsdcc_enable_irq_wake(host);
+					enable_irq(host->plat->sdiowakeup_irq);
+				}
+			}
+		}
+		host->sdcc_suspending = 0;
+		mmc->suspend_task = NULL;
+		if (rc && wake_lock_active(&host->sdio_suspend_wlock))
+			wake_unlock(&host->sdio_suspend_wlock);
 	}
 	return rc;
 }
 
 static int
-msmsdcc_resume(struct platform_device *dev)
+msmsdcc_runtime_resume(struct device *dev)
 {
-	struct mmc_host *mmc = mmc_get_drvdata(dev);
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	unsigned long flags;
+
+	if (host->plat->is_sdio_al_client)
+		return 0;
 
 	if (mmc) {
-		struct msmsdcc_host *host = mmc_priv(mmc);
+		if (mmc->card && mmc->card->type == MMC_TYPE_SDIO) {
+			if (host->sdcc_irq_disabled) {
+				enable_irq(host->core_irqres->start);
+				host->sdcc_irq_disabled = 0;
+			}
+		}
+		mmc->ios.clock = host->clk_rate;
+		mmc->ops->set_ios(host->mmc, &host->mmc->ios);
 
-		msmsdcc_enable_clocks(host);
+		spin_lock_irqsave(&host->lock, flags);
+		writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0);
+		mb();
 
-		msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
+		if (mmc->card && (mmc->card->type == MMC_TYPE_SDIO) &&
+				(mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ) &&
+				!host->sdio_irq_disabled) {
+				if (host->plat->sdiowakeup_irq) {
+					disable_irq_nosync(
+						host->plat->sdiowakeup_irq);
+					msmsdcc_disable_irq_wake(host);
+					host->sdio_irq_disabled = 1;
+				}
+		}
 
-		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
-			mmc_resume_host(mmc);
-		if (host->stat_irq)
-			enable_irq(host->stat_irq);
-#if BUSCLK_PWRSAVE
-		msmsdcc_disable_clocks(host, 1);
-#endif
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		mmc_resume_host(mmc);
+
+		/*
+		 * FIXME: Clearing of flags must be handled in clients
+		 * resume handler.
+		 */
+		spin_lock_irqsave(&host->lock, flags);
+		mmc->pm_flags = 0;
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		/*
+		 * After resuming the host wait for sometime so that
+		 * the SDIO work will be processed.
+		 */
+		if (mmc->card && (mmc->card->type == MMC_TYPE_SDIO)) {
+			if ((host->plat->cfg_mpm_sdiowakeup ||
+					host->plat->sdiowakeup_irq) &&
+					wake_lock_active(&host->sdio_wlock))
+				wake_lock_timeout(&host->sdio_wlock, 1);
+		}
+
+		wake_unlock(&host->sdio_suspend_wlock);
 	}
 	return 0;
 }
+
+static int msmsdcc_runtime_idle(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+
+	if (host->plat->is_sdio_al_client)
+		return 0;
+
+	/* Idle timeout is not configurable for now */
+	pm_schedule_suspend(dev, MSM_MMC_IDLE_TIMEOUT);
+
+	return -EAGAIN;
+}
+
+static int msmsdcc_pm_suspend(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	int rc = 0;
+
+	if (host->plat->is_sdio_al_client)
+		return 0;
+
+
+	if (host->plat->status_irq)
+		disable_irq(host->plat->status_irq);
+
+	if (!pm_runtime_suspended(dev))
+		rc = msmsdcc_runtime_suspend(dev);
+
+	return rc;
+}
+
+static int msmsdcc_pm_resume(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	int rc = 0;
+
+	if (host->plat->is_sdio_al_client)
+		return 0;
+
+	rc = msmsdcc_runtime_resume(dev);
+	if (host->plat->status_irq) {
+		msmsdcc_check_status((unsigned long)host);
+		enable_irq(host->plat->status_irq);
+	}
+
+	/* Update the run-time PM status */
+	pm_runtime_disable(dev);
+	rc = pm_runtime_set_active(dev);
+	if (rc < 0)
+		pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
+				__func__, rc);
+	pm_runtime_enable(dev);
+
+	return rc;
+}
+
 #else
-#define msmsdcc_suspend	0
-#define msmsdcc_resume 0
+#define msmsdcc_runtime_suspend NULL
+#define msmsdcc_runtime_resume NULL
+#define msmsdcc_runtime_idle NULL
+#define msmsdcc_pm_suspend NULL
+#define msmsdcc_pm_resume NULL
 #endif
 
+static const struct dev_pm_ops msmsdcc_dev_pm_ops = {
+	.runtime_suspend = msmsdcc_runtime_suspend,
+	.runtime_resume  = msmsdcc_runtime_resume,
+	.runtime_idle    = msmsdcc_runtime_idle,
+	.suspend 	 = msmsdcc_pm_suspend,
+	.resume		 = msmsdcc_pm_resume,
+};
+
 static struct platform_driver msmsdcc_driver = {
 	.probe		= msmsdcc_probe,
-	.suspend	= msmsdcc_suspend,
-	.resume		= msmsdcc_resume,
+	.remove		= msmsdcc_remove,
 	.driver		= {
 		.name	= "msm_sdcc",
+		.pm	= &msmsdcc_dev_pm_ops,
 	},
 };
 
 static int __init msmsdcc_init(void)
 {
+#if defined(CONFIG_DEBUG_FS)
+	int ret = 0;
+	ret = msmsdcc_dbg_init();
+	if (ret) {
+		pr_err("Failed to create debug fs dir \n");
+		return ret;
+	}
+#endif
 	return platform_driver_register(&msmsdcc_driver);
 }
 
 static void __exit msmsdcc_exit(void)
 {
 	platform_driver_unregister(&msmsdcc_driver);
+
+#if defined(CONFIG_DEBUG_FS)
+	debugfs_remove(debugfs_file);
+	debugfs_remove(debugfs_dir);
+#endif
 }
 
 module_init(msmsdcc_init);
 module_exit(msmsdcc_exit);
 
-MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
+MODULE_DESCRIPTION("Qualcomm Multimedia Card Interface driver");
 MODULE_LICENSE("GPL");
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int
+msmsdcc_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+msmsdcc_dbg_state_read(struct file *file, char __user *ubuf,
+		       size_t count, loff_t *ppos)
+{
+	struct msmsdcc_host *host = (struct msmsdcc_host *) file->private_data;
+	char buf[1024];
+	int max, i;
+
+	i = 0;
+	max = sizeof(buf) - 1;
+
+	i += scnprintf(buf + i, max - i, "STAT: %p %p %p\n", host->curr.mrq,
+		       host->curr.cmd, host->curr.data);
+	if (host->curr.cmd) {
+		struct mmc_command *cmd = host->curr.cmd;
+
+		i += scnprintf(buf + i, max - i, "CMD : %.8x %.8x %.8x\n",
+			      cmd->opcode, cmd->arg, cmd->flags);
+	}
+	if (host->curr.data) {
+		struct mmc_data *data = host->curr.data;
+		i += scnprintf(buf + i, max - i,
+			      "DAT0: %.8x %.8x %.8x %.8x %.8x %.8x\n",
+			      data->timeout_ns, data->timeout_clks,
+			      data->blksz, data->blocks, data->error,
+			      data->flags);
+		i += scnprintf(buf + i, max - i, "DAT1: %.8x %.8x %.8x %p\n",
+			      host->curr.xfer_size, host->curr.xfer_remain,
+			      host->curr.data_xfered, host->dma.sg);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+static const struct file_operations msmsdcc_dbg_state_ops = {
+	.read	= msmsdcc_dbg_state_read,
+	.open	= msmsdcc_dbg_state_open,
+};
+
+static void msmsdcc_dbg_createhost(struct msmsdcc_host *host)
+{
+	if (debugfs_dir) {
+		debugfs_file = debugfs_create_file(mmc_hostname(host->mmc),
+							0644, debugfs_dir, host,
+							&msmsdcc_dbg_state_ops);
+	}
+}
+
+static int __init msmsdcc_dbg_init(void)
+{
+	int err;
+
+	debugfs_dir = debugfs_create_dir("msmsdcc", 0);
+	if (IS_ERR(debugfs_dir)) {
+		err = PTR_ERR(debugfs_dir);
+		debugfs_dir = NULL;
+		return err;
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 42d7bbc..c6acea9 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -2,6 +2,7 @@
  *  linux/drivers/mmc/host/msmsdcc.h - QCT MSM7K SDC Controller
  *
  *  Copyright (C) 2008 Google, All Rights Reserved.
+ *  Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -13,10 +14,23 @@
 #ifndef _MSM_SDCC_H
 #define _MSM_SDCC_H
 
-#define MSMSDCC_CRCI_SDC1	6
-#define MSMSDCC_CRCI_SDC2	7
-#define MSMSDCC_CRCI_SDC3	12
-#define MSMSDCC_CRCI_SDC4	13
+#include <linux/types.h>
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/wakelock.h>
+#include <linux/earlysuspend.h>
+#include <mach/sps.h>
+
+#include <asm/sizes.h>
+#include <asm/mach/mmc.h>
+#include <mach/dma.h>
 
 #define MMCIPOWER		0x000
 #define MCI_PWR_OFF		0x00
@@ -27,10 +41,13 @@
 #define MMCICLOCK		0x004
 #define MCI_CLK_ENABLE		(1 << 8)
 #define MCI_CLK_PWRSAVE		(1 << 9)
-#define MCI_CLK_WIDEBUS		(1 << 10)
+#define MCI_CLK_WIDEBUS_1	(0 << 10)
+#define MCI_CLK_WIDEBUS_4	(2 << 10)
+#define MCI_CLK_WIDEBUS_8	(3 << 10)
 #define MCI_CLK_FLOWENA		(1 << 12)
 #define MCI_CLK_INVERTOUT	(1 << 13)
-#define MCI_CLK_SELECTIN	(1 << 14)
+#define MCI_CLK_SELECTIN	(1 << 15)
+#define IO_PAD_PWR_SWITCH	(1 << 21)
 
 #define MMCIARGUMENT		0x008
 #define MMCICOMMAND		0x00c
@@ -44,6 +61,7 @@
 #define MCI_CSPM_MCIABORT	(1 << 13)
 #define MCI_CSPM_CCSENABLE	(1 << 14)
 #define MCI_CSPM_CCSDISABLE	(1 << 15)
+#define MCI_CSPM_AUTO_CMD19	(1 << 16)
 
 
 #define MMCIRESPCMD		0x010
@@ -86,8 +104,9 @@
 #define MCI_SDIOINTR		(1 << 22)
 #define MCI_PROGDONE		(1 << 23)
 #define MCI_ATACMDCOMPL		(1 << 24)
-#define MCI_SDIOINTOPER		(1 << 25)
+#define MCI_SDIOINTROPE		(1 << 25)
 #define MCI_CCSTIMEOUT		(1 << 26)
+#define MCI_AUTOCMD19TIMEOUT	(1 << 30)
 
 #define MMCICLEAR		0x038
 #define MCI_CMDCRCFAILCLR	(1 << 0)
@@ -99,8 +118,23 @@
 #define MCI_CMDRESPENDCLR	(1 << 6)
 #define MCI_CMDSENTCLR		(1 << 7)
 #define MCI_DATAENDCLR		(1 << 8)
+#define MCI_STARTBITERRCLR	(1 << 9)
 #define MCI_DATABLOCKENDCLR	(1 << 10)
 
+#define MCI_SDIOINTRCLR		(1 << 22)
+#define MCI_PROGDONECLR		(1 << 23)
+#define MCI_ATACMDCOMPLCLR	(1 << 24)
+#define MCI_SDIOINTROPECLR	(1 << 25)
+#define MCI_CCSTIMEOUTCLR 	(1 << 26)
+
+#define MCI_CLEAR_STATIC_MASK	\
+	(MCI_CMDCRCFAILCLR|MCI_DATACRCFAILCLR|MCI_CMDTIMEOUTCLR|\
+	MCI_DATATIMEOUTCLR|MCI_TXUNDERRUNCLR|MCI_RXOVERRUNCLR|  \
+	MCI_CMDRESPENDCLR|MCI_CMDSENTCLR|MCI_DATAENDCLR|	\
+	MCI_STARTBITERRCLR|MCI_DATABLOCKENDCLR|MCI_SDIOINTRCLR|	\
+	MCI_SDIOINTROPECLR|MCI_PROGDONECLR|MCI_ATACMDCOMPLCLR|	\
+	MCI_CCSTIMEOUTCLR)
+
 #define MMCIMASK0		0x03c
 #define MCI_CMDCRCFAILMASK	(1 << 0)
 #define MCI_DATACRCFAILMASK	(1 << 1)
@@ -128,17 +162,37 @@
 #define MCI_ATACMDCOMPLMASK	(1 << 24)
 #define MCI_SDIOINTOPERMASK	(1 << 25)
 #define MCI_CCSTIMEOUTMASK	(1 << 26)
+#define MCI_AUTOCMD19TIMEOUTMASK (1 << 30)
 
 #define MMCIMASK1		0x040
 #define MMCIFIFOCNT		0x044
 #define MCICCSTIMER		0x058
+#define MCI_DLL_CONFIG		0x060
+#define MCI_DLL_EN		(1 << 16)
+#define MCI_CDR_EN		(1 << 17)
+#define MCI_CK_OUT_EN		(1 << 18)
+#define MCI_CDR_EXT_EN		(1 << 19)
+#define MCI_DLL_PDN		(1 << 29)
+#define MCI_DLL_RST		(1 << 30)
+
+#define MCI_DLL_STATUS		0x068
+#define MCI_DLL_LOCK		(1 << 7)
 
 #define MMCIFIFO		0x080 /* to 0x0bc */
 
+#define MCI_TEST_INPUT		0x0D4
+
 #define MCI_IRQENABLE	\
 	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\
 	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\
-	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
+	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|		\
+	MCI_PROGDONEMASK|MCI_AUTOCMD19TIMEOUTMASK)
+
+#define MCI_IRQ_PIO 	\
+	(MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | 	\
+	MCI_RXFIFOEMPTYMASK | MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK |\
+	MCI_TXFIFOFULLMASK | MCI_RXFIFOHALFFULLMASK |			\
+	MCI_TXFIFOHALFEMPTYMASK | MCI_RXACTIVEMASK | MCI_TXACTIVEMASK)
 
 /*
  * The size of the FIFO in bytes.
@@ -149,6 +203,14 @@
 
 #define NR_SG		32
 
+#define MSM_MMC_IDLE_TIMEOUT	10000 /* msecs */
+
+/*
+ * Set the request timeout to 10secs to allow
+ * bad cards/controller to respond.
+ */
+#define MSM_MMC_REQ_TIMEOUT	10000 /* msecs */
+
 struct clk;
 
 struct msmsdcc_nc_dmadata {
@@ -171,8 +233,7 @@
 	int				channel;
 	struct msmsdcc_host		*host;
 	int				busy; /* Set if DM is busy */
-	int				active;
-	unsigned int			result;
+	unsigned int 			result;
 	struct msm_dmov_errdata		err;
 };
 
@@ -193,29 +254,48 @@
 	int			user_pages;
 };
 
-struct msmsdcc_stats {
-	unsigned int reqs;
-	unsigned int cmds;
-	unsigned int cmdpoll_hits;
-	unsigned int cmdpoll_misses;
+struct msmsdcc_sps_ep_conn_data {
+	struct sps_pipe			*pipe_handle;
+	struct sps_connect		config;
+	struct sps_register_event	event;
+};
+
+struct msmsdcc_sps_data {
+	struct msmsdcc_sps_ep_conn_data	prod;
+	struct msmsdcc_sps_ep_conn_data	cons;
+	struct sps_event_notify		notify;
+	enum dma_data_direction		dir;
+	struct scatterlist		*sg;
+	int				num_ents;
+	u32				bam_handle;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	unsigned int			busy;
+	unsigned int			xfer_req_cnt;
+	struct tasklet_struct		tlet;
+
 };
 
 struct msmsdcc_host {
-	struct resource		*cmd_irqres;
-	struct resource		*pio_irqres;
-	struct resource		*memres;
+	struct resource		*core_irqres;
+	struct resource		*bam_irqres;
+	struct resource		*core_memres;
+	struct resource		*bam_memres;
+	struct resource		*dml_memres;
 	struct resource		*dmares;
 	void __iomem		*base;
+	void __iomem		*dml_base;
+	void __iomem		*bam_base;
+
 	int			pdev_id;
-	unsigned int		stat_irq;
 
 	struct msmsdcc_curr_req	curr;
 
 	struct mmc_host		*mmc;
 	struct clk		*clk;		/* main MMC bus clock */
 	struct clk		*pclk;		/* SDCC peripheral bus clock */
+	struct clk		*dfab_pclk;	/* Daytona Fabric SDCC clock */
 	unsigned int		clks_on;	/* set if clocks are enabled */
-	struct timer_list	busclk_timer;
 
 	unsigned int		eject;		/* eject state */
 
@@ -223,30 +303,67 @@
 
 	unsigned int		clk_rate;	/* Current clock rate */
 	unsigned int		pclk_rate;
+	unsigned int		ddr_doubled_clk_rate;
 
 	u32			pwr;
-	u32			saved_irq0mask;	/* MMCIMASK0 reg value */
-	struct msm_mmc_platform_data *plat;
+	struct mmc_platform_data *plat;
 
-	struct timer_list	timer;
 	unsigned int		oldstat;
 
 	struct msmsdcc_dma_data	dma;
+	struct msmsdcc_sps_data sps;
+	bool			is_dma_mode;
+	bool			is_sps_mode;
 	struct msmsdcc_pio_data	pio;
-	int			cmdpoll;
-	struct msmsdcc_stats	stats;
 
-	struct tasklet_struct	dma_tlet;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+	int polling_enabled;
+#endif
+
+	struct tasklet_struct 	dma_tlet;
+
+	unsigned int prog_scan;
+	unsigned int prog_enable;
+
 	/* Command parameters */
 	unsigned int		cmd_timeout;
 	unsigned int		cmd_pio_irqmask;
 	unsigned int		cmd_datactrl;
 	struct mmc_command	*cmd_cmd;
-	u32			cmd_c;
-	bool			gpio_config_status;
+	u32					cmd_c;
 
-	bool prog_scan;
-	bool prog_enable;
+	unsigned int	mci_irqenable;
+	unsigned int	dummy_52_needed;
+	unsigned int	dummy_52_state;
+	unsigned int	sdio_irq_disabled;
+	struct wake_lock	sdio_wlock;
+	struct wake_lock	sdio_suspend_wlock;
+	unsigned int    sdcc_suspending;
+
+	unsigned int sdcc_irq_disabled;
+	struct timer_list req_tout_timer;
+	bool io_pad_pwr_switch;
+	bool cmd19_tuning_in_progress;
+	bool tuning_needed;
+	bool sdio_gpio_lpm;
+	bool irq_wake_enabled;
 };
 
+int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave);
+int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable);
+
+#ifdef CONFIG_MSM_SDIO_AL
+
+static inline int msmsdcc_lpm_enable(struct mmc_host *mmc)
+{
+	return msmsdcc_sdio_al_lpm(mmc, true);
+}
+
+static inline int msmsdcc_lpm_disable(struct mmc_host *mmc)
+{
+	return msmsdcc_sdio_al_lpm(mmc, false);
+}
+#endif
+
 #endif
diff --git a/drivers/mmc/host/msm_sdcc_dml.c b/drivers/mmc/host/msm_sdcc_dml.c
new file mode 100644
index 0000000..320f52e
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc_dml.c
@@ -0,0 +1,303 @@
+/*
+ * linux/drivers/mmc/host/msm_sdcc_dml.c - Qualcomm MSM SDCC DML Driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <asm/sizes.h>
+#include <mach/msm_iomap.h>
+
+#include "msm_sdcc_dml.h"
+
+/*
+ * DML registers definations
+ */
+
+/* DML config register defination */
+#define DML_CONFIG 0x0000
+#define PRODUCER_CRCI_DIS   0x00
+#define PRODUCER_CRCI_X_SEL 0x01
+#define PRODUCER_CRCI_Y_SEL 0x02
+#define PRODUCER_CRCI_MSK   0x3
+#define CONSUMER_CRCI_DIS   (0x00 << 2)
+#define CONSUMER_CRCI_X_SEL (0x01 << 2)
+#define CONSUMER_CRCI_Y_SEL (0x02 << 2)
+#define CONSUMER_CRCI_MSK   (0x3 << 2)
+#define PRODUCER_TRANS_END_EN (1 << 4)
+#define BYPASS (1 << 16)
+#define DIRECT_MODE (1 << 17)
+#define INFINITE_CONS_TRANS (1 << 18)
+
+/* DML status register defination */
+#define DML_STATUS 0x0004
+#define PRODUCER_IDLE (1 << 0)
+#define CONSUMER_IDLE (1 << 16)
+
+/*
+ * DML SW RESET register defination
+ * NOTE: write to this register resets the DML core.
+ * All internal state information will be lost and all
+ * register values will be reset as well
+ */
+#define DML_SW_RESET 0x0008
+
+/*
+ * DML PRODUCER START register defination
+ * NOTE: A write to this register triggers the DML
+ * Producer state machine. No SW register values will be
+ * altered.
+ */
+#define DML_PRODUCER_START 0x000C
+
+/*
+ * DML CONSUMER START register defination
+ * NOTE: A write to this register triggers the DML
+ * Consumer state machine. No SW register values will be
+ * altered.
+ */
+#define DML_CONSUMER_START 0x0010
+
+/*
+ * DML producer pipe logical size register defination
+ * NOTE: This register holds the size of the producer pipe
+ * (in units of bytes) _to_ which the peripheral can
+ * keep writing data to when its the PRODUCER.
+ */
+#define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x0014
+
+/*
+ * DML producer pipe logical size register defination
+ * NOTE: This register holds the size of the consumer pipe
+ * (in units of bytes) _from_ which the peripheral
+ * can keep _reading_ data from when its the CONSUMER.
+ */
+#define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x00018
+
+/*
+ * DML PIPE ID register
+ * This register holds pipe IDs that services
+ * the producer and consumer side of the peripheral
+ */
+#define DML_PIPE_ID 0x0001C
+#define PRODUCER_PIPE_ID_SHFT 0
+#define PRODUCER_PIPE_ID_MSK 0x1f
+#define CONSUMER_PIPE_ID_SHFT 16
+#define CONSUMER_PIPE_ID_MSK (0x1f << 16)
+
+/*
+ * DML Producer trackers register defination.
+ * This register is for debug purposes only. They reflect
+ * the value of the producer block and transaction counters
+ * when read. The values may be dynamically changing when
+ * a transaction is in progress.
+ */
+#define DML_PRODUCER_TRACKERS 0x00020
+#define PROD_BLOCK_CNT_SHFT 0
+#define PROD_BLOCK_CNT_MSK  0xffff
+#define PROD_TRANS_CNT_SHFT 16
+#define PROD_TRANS_CNT_MSK  (0xffff << 16)
+
+/*
+ * DML Producer BAM block size register defination.
+ * This regsiter holds the block size, in units of bytes,
+ * associated with the Producer BAM. The DML asserts the
+ * block_end side band signal to the BAM whenever the producer
+ * side of the peripheral has generated the said amount of data.
+ * This register value should be an integral multiple of the
+ * Producer CRCI Block Size.
+ */
+#define DML_PRODUCER_BAM_BLOCK_SIZE 0x00024
+
+/*
+ * DML Producer BAM Transaction size defination.
+ * This regsiter holds the transaction size, in units of bytes,
+ * associated with the Producer BAM. The DML asserts the transaction_end
+ * side band signal to the BAM whenever the producer side of the peripheral
+ * has generated the said amount of data.
+ */
+#define DML_PRODUCER_BAM_TRANS_SIZE 0x00028
+
+/*
+ * DML Direct mode base address defination
+ * This register is used whenever the DIRECT_MODE bit
+ * in config register is set.
+ */
+#define DML_DIRECT_MODE_BASE_ADDR 0x002C
+#define PRODUCER_BASE_ADDR_BSHFT 0
+#define PRODUCER_BASE_ADDR_BMSK  0xffff
+#define CONSUMER_BASE_ADDR_BSHFT 16
+#define CONSUMER_BASE_ADDR_BMSK  (0xffff << 16)
+
+/*
+ * DMA Debug and status register defination.
+ * These are the read-only registers useful debugging.
+ */
+#define DML_DEBUG 0x0030
+#define DML_BAM_SIDE_STATUS_1 0x0034
+#define DML_BAM_SIDE_STATUS_2 0x0038
+
+/* other definations */
+#define PRODUCER_PIPE_LOGICAL_SIZE 4096
+#define CONSUMER_PIPE_LOGICAL_SIZE 4096
+
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+/**
+ * Initialize DML HW connected with SDCC core
+ *
+ */
+int msmsdcc_dml_init(struct msmsdcc_host *host)
+{
+	int rc = 0;
+	u32 config = 0;
+	void __iomem *dml_base;
+
+	if (!host->dml_base) {
+		host->dml_base = ioremap(host->dml_memres->start,
+					resource_size(host->dml_memres));
+		if (!host->dml_base) {
+			pr_err("%s: DML ioremap() failed!!! phys_addr=0x%x,"
+				" size=0x%x", mmc_hostname(host->mmc),
+				host->dml_memres->start,
+				(host->dml_memres->end -
+				host->dml_memres->start));
+			rc = -ENOMEM;
+			goto out;
+		}
+		pr_info("%s: Qualcomm MSM SDCC-DML at 0x%016llx\n",
+			mmc_hostname(host->mmc),
+			(unsigned long long)host->dml_memres->start);
+	}
+
+	dml_base = host->dml_base;
+	/* Reset the DML block */
+	writel_relaxed(1, (dml_base + DML_SW_RESET));
+
+	/* Disable the producer and consumer CRCI */
+	config = (PRODUCER_CRCI_DIS | CONSUMER_CRCI_DIS);
+	/*
+	 * Disable the bypass mode. Bypass mode will only be used
+	 * if data transfer is to happen in PIO mode and don't
+	 * want the BAM interface to connect with SDCC-DML.
+	 */
+	config &= ~BYPASS;
+	/*
+	 * Disable direct mode as we don't DML to MASTER the AHB bus.
+	 * BAM connected with DML should MASTER the AHB bus.
+	 */
+	config &= ~DIRECT_MODE;
+	/*
+	 * Disable infinite mode transfer as we won't be doing any
+	 * infinite size data transfers. All data transfer will be
+	 * of finite data size.
+	 */
+	config &= ~INFINITE_CONS_TRANS;
+	writel_relaxed(config, (dml_base + DML_CONFIG));
+
+	/*
+	 * Initialize the logical BAM pipe size for producer
+	 * and consumer.
+	 */
+	writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE,
+		(dml_base + DML_PRODUCER_PIPE_LOGICAL_SIZE));
+	writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE,
+		(dml_base + DML_CONSUMER_PIPE_LOGICAL_SIZE));
+
+	/* Initialize Producer/consumer pipe id */
+	writel_relaxed(host->sps.src_pipe_index |
+		(host->sps.dest_pipe_index << CONSUMER_PIPE_ID_SHFT),
+		(dml_base + DML_PIPE_ID));
+	mb();
+out:
+	return rc;
+}
+
+/**
+ * Soft reset DML HW
+ *
+ */
+void msmsdcc_dml_reset(struct msmsdcc_host *host)
+{
+	/* Reset the DML block */
+	writel_relaxed(1, (host->dml_base + DML_SW_RESET));
+	mb();
+}
+
+/**
+ * Checks if DML HW is busy or not?
+ *
+ */
+bool msmsdcc_is_dml_busy(struct msmsdcc_host *host)
+{
+	return !(readl_relaxed(host->dml_base + DML_STATUS) & PRODUCER_IDLE) ||
+		!(readl_relaxed(host->dml_base + DML_STATUS) & CONSUMER_IDLE);
+}
+
+/**
+ * Start data transfer.
+ *
+ */
+void msmsdcc_dml_start_xfer(struct msmsdcc_host *host, struct mmc_data *data)
+{
+	u32 config;
+	void __iomem *dml_base = host->dml_base;
+
+	if (data->flags & MMC_DATA_READ) {
+		/* Read operation: configure DML for producer operation */
+		/* Set producer CRCI-x and disable consumer CRCI */
+		config = readl_relaxed(dml_base + DML_CONFIG);
+		config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL;
+		config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DIS;
+		writel_relaxed(config, (dml_base + DML_CONFIG));
+
+		/* Set the Producer BAM block size */
+		writel_relaxed(data->blksz, (dml_base +
+					DML_PRODUCER_BAM_BLOCK_SIZE));
+
+		/* Set Producer BAM Transaction size */
+		writel_relaxed(host->curr.xfer_size,
+			(dml_base + DML_PRODUCER_BAM_TRANS_SIZE));
+		/* Set Producer Transaction End bit */
+		writel_relaxed((readl_relaxed(dml_base + DML_CONFIG)
+			| PRODUCER_TRANS_END_EN),
+			(dml_base + DML_CONFIG));
+		/* Trigger producer */
+		writel_relaxed(1, (dml_base + DML_PRODUCER_START));
+	} else {
+		/* Write operation: configure DML for consumer operation */
+		/* Set consumer CRCI-x and disable producer CRCI*/
+		config = readl_relaxed(dml_base + DML_CONFIG);
+		config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL;
+		config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DIS;
+		writel_relaxed(config, (dml_base + DML_CONFIG));
+		/* Clear Producer Transaction End bit */
+		writel_relaxed((readl_relaxed(dml_base + DML_CONFIG)
+			& ~PRODUCER_TRANS_END_EN),
+			(dml_base + DML_CONFIG));
+		/* Trigger consumer */
+		writel_relaxed(1, (dml_base + DML_CONSUMER_START));
+	}
+	mb();
+}
+
+/**
+ * Deinitialize DML HW connected with SDCC core
+ *
+ */
+void msmsdcc_dml_exit(struct msmsdcc_host *host)
+{
+	/* Put DML block in reset state before exiting */
+	msmsdcc_dml_reset(host);
+	iounmap(host->dml_base);
+}
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
diff --git a/drivers/mmc/host/msm_sdcc_dml.h b/drivers/mmc/host/msm_sdcc_dml.h
new file mode 100644
index 0000000..f0e1b78
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc_dml.h
@@ -0,0 +1,105 @@
+/*
+ *  linux/drivers/mmc/host/msm_sdcc_dml.h - Qualcomm SDCC DML driver
+ *					    header file
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_SDCC_DML_H
+#define _MSM_SDCC_DML_H
+
+#include <linux/types.h>
+#include <linux/mmc/host.h>
+
+#include "msm_sdcc.h"
+
+#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
+/**
+ * Initialize DML HW connected with SDCC core
+ *
+ * This function initialize DML HW.
+ *
+ * This function should only be called once
+ * typically during driver probe.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+int msmsdcc_dml_init(struct msmsdcc_host *host);
+
+/**
+ * Start data transfer.
+ *
+ * This function configure DML HW registers with
+ * data transfer direction and data transfer size.
+ *
+ * This function should be called after submitting
+ * data transfer request to SPS HW and before kick
+ * starting data transfer in SDCC core.
+ *
+ * @host - Pointer to sdcc host structure
+ * @data - Pointer to mmc_data structure
+ *
+ */
+void msmsdcc_dml_start_xfer(struct msmsdcc_host *host, struct mmc_data *data);
+
+/**
+ * Checks if DML HW is busy or not?
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ * @return - 1 if DML HW is busy with data transfer
+ *           0 if DML HW is IDLE.
+ *
+ */
+bool msmsdcc_is_dml_busy(struct msmsdcc_host *host);
+
+/**
+ * Soft reset DML HW
+ *
+ * This function give soft reset to DML HW.
+ *
+ * This function should be called to reset DML HW
+ * if data transfer error is detected.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ */
+void msmsdcc_dml_reset(struct msmsdcc_host *host);
+
+/**
+ * Deinitialize DML HW connected with SDCC core
+ *
+ * This function resets DML HW and unmap DML
+ * register region.
+ *
+ * This function should only be called once
+ * typically during driver remove.
+ *
+ * @host - Pointer to sdcc host structure
+ *
+ */
+void msmsdcc_dml_exit(struct msmsdcc_host *host);
+#else
+static inline int msmsdcc_dml_init(struct msmsdcc_host *host) { return 0; }
+static inline int msmsdcc_dml_start_xfer(struct msmsdcc_host *host,
+				struct mmc_data *data) { return 0; }
+static inline bool msmsdcc_is_dml_busy(
+				struct msmsdcc_host *host) { return 0; }
+static inline void msmsdcc_dml_reset(struct msmsdcc_host *host) { }
+static inline void msmsdcc_dml_exit(struct msmsdcc_host *host) { }
+#endif /* CONFIG_MMC_MSM_SPS_SUPPORT */
+
+#endif /* _MSM_SDCC_DML_H */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7fc5848..c35a7c7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2456,6 +2456,22 @@
 	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
 		host->timeout_clk *= 1000;
 
+ 	/*
+	 * In case of Host Controller v3.00, find out whether clock
+	 * multiplier is supported.
+	 */
+	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
+		SDHCI_CLOCK_MUL_SHIFT;
+
+	/*
+	 * In case the value in Clock Multiplier is 0, then programmable
+	 * clock mode is not supported, otherwise the actual clock
+	 * multiplier is one more than the value of Clock Multiplier
+	 * in the Capabilities Register.
+	 */
+	if (host->clk_mul)
+		host->clk_mul += 1;
+
 	/*
 	 * In case of Host Controller v3.00, find out whether clock
 	 * multiplier is supported.
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce..1b48dae 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -49,6 +49,16 @@
 	  say M here and read <file:Documentation/kbuild/modules.txt>.
 	  The module will be called ms02-nv.
 
+config MTD_MSM_NAND
+	tristate "MSM NAND Device Support"
+	depends on MTD && ARCH_MSM
+	select CRC16
+	select BITREVERSE
+	select MTD_NAND_IDS
+	default y
+	help
+	  Support for some NAND chips connected to the MSM NAND controller.
+
 config MTD_DATAFLASH
 	tristate "Support for AT45xxx DataFlash"
 	depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1..fe959e8 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_MTD_PHRAM)		+= phram.o
 obj-$(CONFIG_MTD_PMC551)	+= pmc551.o
 obj-$(CONFIG_MTD_MS02NV)	+= ms02-nv.o
+obj-$(CONFIG_MTD_MSM_NAND)	+= msm_nand.o
 obj-$(CONFIG_MTD_MTDRAM)	+= mtdram.o
 obj-$(CONFIG_MTD_LART)		+= lart.o
 obj-$(CONFIG_MTD_BLOCK2MTD)	+= block2mtd.o
diff --git a/drivers/mtd/devices/msm_nand.c b/drivers/mtd/devices/msm_nand.c
new file mode 100644
index 0000000..d1a1353
--- /dev/null
+++ b/drivers/mtd/devices/msm_nand.c
@@ -0,0 +1,7143 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+
+#include <asm/dma.h>
+#include <asm/mach/flash.h>
+
+#include <mach/dma.h>
+
+#include "msm_nand.h"
+
+unsigned long msm_nand_phys;
+unsigned long msm_nandc01_phys;
+unsigned long msm_nandc10_phys;
+unsigned long msm_nandc11_phys;
+unsigned long ebi2_register_base;
+uint32_t dual_nand_ctlr_present;
+uint32_t interleave_enable;
+uint32_t enable_bch_ecc;
+unsigned crci_mask;
+
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+#define MSM_NAND_DMA_BUFFER_SLOTS \
+	(MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+
+#define ONFI_IDENTIFIER_LENGTH 0x0004
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+
+#define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
+#define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+
+#define VERBOSE 0
+
+struct msm_nand_chip {
+	struct device *dev;
+	wait_queue_head_t wait_queue;
+	atomic_t dma_buffer_busy;
+	unsigned dma_channel;
+	uint8_t *dma_buffer;
+	dma_addr_t dma_addr;
+	unsigned CFG0, CFG1, CFG0_RAW, CFG1_RAW;
+	uint32_t ecc_buf_cfg;
+	uint32_t ecc_bch_cfg;
+	uint32_t ecc_parity_bytes;
+	unsigned cw_size;
+};
+
+#define CFG1_WIDE_FLASH (1U << 1)
+
+/* TODO: move datamover code out */
+
+#define SRC_CRCI_NAND_CMD  CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
+#define DST_CRCI_NAND_CMD  CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
+#define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
+#define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
+
+#define msm_virt_to_dma(chip, vaddr) \
+	((chip)->dma_addr + \
+	 ((uint8_t *)(vaddr) - (chip)->dma_buffer))
+
+/**
+ * msm_nand_oob_64 - oob info for 2KB page
+ */
+static struct nand_ecclayout msm_nand_oob_64 = {
+	.eccbytes	= 40,
+	.eccpos		= {
+		0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+		10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+		20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+		46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+		},
+	.oobavail	= 16,
+	.oobfree	= {
+		{30, 16},
+	}
+};
+
+/**
+ * msm_nand_oob_128 - oob info for 4KB page
+ */
+static struct nand_ecclayout msm_nand_oob_128 = {
+	.eccbytes	= 80,
+	.eccpos		= {
+		  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
+		 10,  11,  12,  13,  14,  15,  16,  17,  18,  19,
+		 20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
+		 30,  31,  32,  33,  34,  35,  36,  37,  38,  39,
+		 40,  41,  42,  43,  44,  45,  46,  47,  48,  49,
+		 50,  51,  52,  53,  54,  55,  56,  57,  58,  59,
+		 60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
+		102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+		},
+	.oobavail	= 32,
+	.oobfree	= {
+		{70, 32},
+	}
+};
+
+/**
+ * msm_nand_oob_224 - oob info for 4KB page 8Bit interface
+ */
+static struct nand_ecclayout msm_nand_oob_224_x8 = {
+	.eccbytes	= 104,
+	.eccpos		= {
+		  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+		 13,  14,  15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,
+		 26,  27,  28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,
+		 39,  40,  41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,
+		 52,  53,  54,  55,  56,  57,  58,  59,	 60,  61,  62,  63,  64,
+		 65,  66,  67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,
+		 78,  79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,
+		123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+		},
+	.oobavail	= 32,
+	.oobfree	= {
+		{91, 32},
+	}
+};
+
+/**
+ * msm_nand_oob_224 - oob info for 4KB page 16Bit interface
+ */
+static struct nand_ecclayout msm_nand_oob_224_x16 = {
+	.eccbytes	= 112,
+	.eccpos		= {
+	  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,  13,
+	 14,  15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,
+	 28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,
+	 42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,  54,  55,
+	 56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
+	 70,  71,  72,  73,  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,
+	 84,  85,  86,  87,  88,  89,  90,  91,  92,  93,  94,  95,  96,  97,
+	130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+	},
+	.oobavail	= 32,
+	.oobfree	= {
+		{98, 32},
+	}
+};
+
+/**
+ * msm_nand_oob_256 - oob info for 8KB page
+ */
+static struct nand_ecclayout msm_nand_oob_256 = {
+	.eccbytes 	= 160,
+	.eccpos 	= {
+		  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
+		 10,  11,  12,  13,  14,  15,  16,  17,  18,  19,
+		 20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
+		 30,  31,  32,  33,  34,  35,  36,  37,  38,  39,
+		 40,  41,  42,  43,  44,  45,  46,  47,  48,  49,
+		 50,  51,  52,  53,  54,  55,  56,  57,  58,  59,
+		 60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
+		 70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+		 80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
+		 90,  91,  92,  93,  94,  96,  97,  98 , 99, 100,
+		101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+		111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+		121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+		131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
+		141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+		215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+		},
+	.oobavail	= 64,
+	.oobfree	= {
+		{151, 64},
+	}
+};
+
+/**
+ * msm_onenand_oob_64 - oob info for large (2KB) page
+ */
+static struct nand_ecclayout msm_onenand_oob_64 = {
+	.eccbytes	= 20,
+	.eccpos		= {
+		8, 9, 10, 11, 12,
+		24, 25, 26, 27, 28,
+		40, 41, 42, 43, 44,
+		56, 57, 58, 59, 60,
+		},
+	.oobavail	= 20,
+	.oobfree	= {
+		{2, 3}, {14, 2}, {18, 3}, {30, 2},
+		{34, 3}, {46, 2}, {50, 3}, {62, 2}
+	}
+};
+
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+	unsigned int bitmask, free_bitmask, old_bitmask;
+	unsigned int need_mask, current_need_mask;
+	int free_index;
+
+	need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
+	bitmask = atomic_read(&chip->dma_buffer_busy);
+	free_bitmask = ~bitmask;
+	do {
+		free_index = __ffs(free_bitmask);
+		current_need_mask = need_mask << free_index;
+
+		if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
+						 MSM_NAND_DMA_BUFFER_SIZE)
+			return NULL;
+
+		if ((bitmask & current_need_mask) == 0) {
+			old_bitmask =
+				atomic_cmpxchg(&chip->dma_buffer_busy,
+					       bitmask,
+					       bitmask | current_need_mask);
+			if (old_bitmask == bitmask)
+				return chip->dma_buffer +
+					free_index * MSM_NAND_DMA_BUFFER_SLOTS;
+			free_bitmask = 0; /* force return */
+		}
+		/* current free range was too small, clear all free bits */
+		/* below the top busy bit within current_need_mask */
+		free_bitmask &=
+			~(~0U >> (32 - fls(bitmask & current_need_mask)));
+	} while (free_bitmask);
+
+	return NULL;
+}
+
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+					void *buffer, size_t size)
+{
+	int index;
+	unsigned int used_mask;
+
+	used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
+	index = ((uint8_t *)buffer - chip->dma_buffer) /
+		MSM_NAND_DMA_BUFFER_SLOTS;
+	atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+	wake_up(&chip->wait_queue);
+}
+
+
+unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
+{
+	struct {
+		dmov_s cmd;
+		unsigned cmdptr;
+		unsigned data;
+	} *dma_buffer;
+	unsigned rv;
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
+	dma_buffer->cmd.src = addr;
+	dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
+	dma_buffer->cmd.len = 4;
+
+	dma_buffer->cmdptr =
+		(msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+	dma_buffer->data = 0xeeeeeeee;
+
+	mb();
+	msm_dmov_exec_cmd(
+		chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	rv = dma_buffer->data;
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	return rv;
+}
+
+void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
+{
+	struct {
+		dmov_s cmd;
+		unsigned cmdptr;
+		unsigned data;
+	} *dma_buffer;
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
+	dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
+	dma_buffer->cmd.dst = addr;
+	dma_buffer->cmd.len = 4;
+
+	dma_buffer->cmdptr =
+		(msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+	dma_buffer->data = val;
+
+	mb();
+	msm_dmov_exec_cmd(
+		chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+}
+
+static dma_addr_t
+msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+		 enum dma_data_direction dir)
+{
+	struct page *page;
+	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+	if (virt_addr_valid(addr))
+		page = virt_to_page(addr);
+	else {
+		if (WARN_ON(size + offset > PAGE_SIZE))
+			return ~0;
+		page = vmalloc_to_page(addr);
+	}
+	return dma_map_page(dev, page, offset, size, dir);
+}
+
+uint32_t flash_read_id(struct msm_nand_chip *chip)
+{
+	struct {
+		dmov_s cmd[7];
+		unsigned cmdptr;
+		unsigned data[7];
+	} *dma_buffer;
+	uint32_t rv;
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	dma_buffer->data[0] = 0 | 4;
+	dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
+	dma_buffer->data[2] = 1;
+	dma_buffer->data[3] = 0xeeeeeeee;
+	dma_buffer->data[4] = 0xeeeeeeee;
+	dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
+	dma_buffer->data[6] = 0x00000000;
+	BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
+
+	dma_buffer->cmd[0].cmd = 0 | CMD_OCB;
+	dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
+	dma_buffer->cmd[0].dst = MSM_NAND_SFLASHC_BURST_CFG;
+	dma_buffer->cmd[0].len = 4;
+
+	dma_buffer->cmd[1].cmd = 0;
+	dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
+	dma_buffer->cmd[1].dst = MSM_NAND_FLASH_CHIP_SELECT;
+	dma_buffer->cmd[1].len = 4;
+
+	dma_buffer->cmd[2].cmd = DST_CRCI_NAND_CMD;
+	dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
+	dma_buffer->cmd[2].dst = MSM_NAND_FLASH_CMD;
+	dma_buffer->cmd[2].len = 4;
+
+	dma_buffer->cmd[3].cmd = 0;
+	dma_buffer->cmd[3].src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
+	dma_buffer->cmd[3].dst = MSM_NAND_EXEC_CMD;
+	dma_buffer->cmd[3].len = 4;
+
+	dma_buffer->cmd[4].cmd = SRC_CRCI_NAND_DATA;
+	dma_buffer->cmd[4].src = MSM_NAND_FLASH_STATUS;
+	dma_buffer->cmd[4].dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
+	dma_buffer->cmd[4].len = 4;
+
+	dma_buffer->cmd[5].cmd = 0;
+	dma_buffer->cmd[5].src = MSM_NAND_READ_ID;
+	dma_buffer->cmd[5].dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
+	dma_buffer->cmd[5].len = 4;
+
+	dma_buffer->cmd[6].cmd = CMD_OCU | CMD_LC;
+	dma_buffer->cmd[6].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
+	dma_buffer->cmd[6].dst = MSM_NAND_SFLASHC_BURST_CFG;
+	dma_buffer->cmd[6].len = 4;
+
+	BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd) - 1);
+
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
+			) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	pr_info("status: %x\n", dma_buffer->data[3]);
+	pr_info("nandid: %x maker %02x device %02x\n",
+	       dma_buffer->data[4], dma_buffer->data[4] & 0xff,
+	       (dma_buffer->data[4] >> 8) & 0xff);
+	rv = dma_buffer->data[4];
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return rv;
+}
+
+struct flash_identification {
+	uint32_t flash_id;
+	uint32_t density;
+	uint32_t widebus;
+	uint32_t pagesize;
+	uint32_t blksize;
+	uint32_t oobsize;
+	uint32_t ecc_correctability;
+} supported_flash;
+
+uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+	int i;
+	uint16_t result;
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	return result;
+}
+
+
+uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
+{
+	struct onfi_param_page {
+		uint32_t parameter_page_signature;
+		uint16_t revision_number;
+		uint16_t features_supported;
+		uint16_t optional_commands_supported;
+		uint8_t  reserved0[22];
+		uint8_t  device_manufacturer[12];
+		uint8_t  device_model[20];
+		uint8_t  jedec_manufacturer_id;
+		uint16_t date_code;
+		uint8_t  reserved1[13];
+		uint32_t number_of_data_bytes_per_page;
+		uint16_t number_of_spare_bytes_per_page;
+		uint32_t number_of_data_bytes_per_partial_page;
+		uint16_t number_of_spare_bytes_per_partial_page;
+		uint32_t number_of_pages_per_block;
+		uint32_t number_of_blocks_per_logical_unit;
+		uint8_t  number_of_logical_units;
+		uint8_t  number_of_address_cycles;
+		uint8_t  number_of_bits_per_cell;
+		uint16_t maximum_bad_blocks_per_logical_unit;
+		uint16_t block_endurance;
+		uint8_t  guaranteed_valid_begin_blocks;
+		uint16_t guaranteed_valid_begin_blocks_endurance;
+		uint8_t  number_of_programs_per_page;
+		uint8_t  partial_program_attributes;
+		uint8_t  number_of_bits_ecc_correctability;
+		uint8_t  number_of_interleaved_address_bits;
+		uint8_t  interleaved_operation_attributes;
+		uint8_t  reserved2[13];
+		uint8_t  io_pin_capacitance;
+		uint16_t timing_mode_support;
+		uint16_t program_cache_timing_mode_support;
+		uint16_t maximum_page_programming_time;
+		uint16_t maximum_block_erase_time;
+		uint16_t maximum_page_read_time;
+		uint16_t maximum_change_column_setup_time;
+		uint8_t  reserved3[23];
+		uint16_t vendor_specific_revision_number;
+		uint8_t  vendor_specific[88];
+		uint16_t integrity_crc;
+
+	} __attribute__((__packed__));
+
+	struct onfi_param_page *onfi_param_page_ptr;
+	uint8_t *onfi_identifier_buf = NULL;
+	uint8_t *onfi_param_info_buf = NULL;
+
+	struct {
+		dmov_s cmd[11];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t exec;
+			uint32_t flash_status;
+			uint32_t devcmd1_orig;
+			uint32_t devcmdvld_orig;
+			uint32_t devcmd1_mod;
+			uint32_t devcmdvld_mod;
+			uint32_t sflash_bcfg_orig;
+			uint32_t sflash_bcfg_mod;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	unsigned page_address = 0;
+	int err = 0;
+	dma_addr_t dma_addr_param_info = 0;
+	dma_addr_t dma_addr_identifier = 0;
+	unsigned cmd_set_count = 2;
+	unsigned crc_chk_count = 0;
+
+	if (msm_nand_data.nr_parts) {
+		page_address = ((msm_nand_data.parts[0]).offset << 6);
+	} else {
+		pr_err("flash_onfi_probe: "
+				"No partition info available\n");
+		err = -EIO;
+		return err;
+	}
+
+	wait_event(chip->wait_queue, (onfi_identifier_buf =
+		msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
+	dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
+
+	wait_event(chip->wait_queue, (onfi_param_info_buf =
+		msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+	dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
+				(chip, MSM_NAND_SFLASHC_BURST_CFG);
+	dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
+	dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
+						 MSM_NAND_DEV_CMD_VLD);
+
+	while (cmd_set_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
+				0xFFFFFF00) | (cmd_set_count
+				? FLASH_READ_ONFI_IDENTIFIER_COMMAND
+				: FLASH_READ_ONFI_PARAMETERS_COMMAND);
+		dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
+		dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
+				? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
+				: FLASH_READ_ONFI_PARAMETERS_ADDRESS);
+		dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
+		dma_buffer->data.cfg0 =	(cmd_set_count
+				? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
+				: MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
+		dma_buffer->data.cfg1 =	(cmd_set_count
+				? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
+				: MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
+		dma_buffer->data.sflash_bcfg_mod = 0x00000000;
+		dma_buffer->data.devcmdvld_mod = (dma_buffer->
+				data.devcmdvld_orig & 0xFFFFFFFE);
+		dma_buffer->data.exec = 1;
+		dma_buffer->data.flash_status = 0xeeeeeeee;
+
+		/* Put the Nand ctlr in Async mode and disable SFlash ctlr */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.sflash_bcfg_mod);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+		cmd->dst = MSM_NAND_FLASH_CMD;
+		cmd->len = 12;
+		cmd++;
+
+		/* Configure the CFG0 and CFG1 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.cfg0);
+		cmd->dst = MSM_NAND_DEV0_CFG0;
+		cmd->len = 8;
+		cmd++;
+
+		/* Configure the DEV_CMD_VLD register */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.devcmdvld_mod);
+		cmd->dst = MSM_NAND_DEV_CMD_VLD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Configure the DEV_CMD1 register */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.devcmd1_mod);
+		cmd->dst = MSM_NAND_DEV_CMD1;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.exec);
+		cmd->dst = MSM_NAND_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the two status registers */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_FLASH_STATUS;
+		cmd->dst = msm_virt_to_dma(chip,
+				&dma_buffer->data.flash_status);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read data block - valid only if status says success */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_FLASH_BUFFER;
+		cmd->dst = (cmd_set_count ? dma_addr_identifier :
+				dma_addr_param_info);
+		cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
+				ONFI_PARAM_INFO_LENGTH);
+		cmd++;
+
+		/* Restore the DEV_CMD1 register */
+		cmd->cmd = 0 ;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.devcmd1_orig);
+		cmd->dst = MSM_NAND_DEV_CMD1;
+		cmd->len = 4;
+		cmd++;
+
+		/* Restore the DEV_CMD_VLD register */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.devcmdvld_orig);
+		cmd->dst = MSM_NAND_DEV_CMD_VLD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Restore the SFLASH_BURST_CONFIG register */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.sflash_bcfg_orig);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		BUILD_BUG_ON(11 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+				>> 3) | CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+		mb();
+
+		/* Check for errors, protection violations etc */
+		if (dma_buffer->data.flash_status & 0x110) {
+			pr_info("MPU/OP error (0x%x) during "
+					"ONFI probe\n",
+					dma_buffer->data.flash_status);
+			err = -EIO;
+			break;
+		}
+
+		if (cmd_set_count) {
+			onfi_param_page_ptr = (struct onfi_param_page *)
+				(&(onfi_identifier_buf[0]));
+			if (onfi_param_page_ptr->parameter_page_signature !=
+					ONFI_PARAMETER_PAGE_SIGNATURE) {
+				pr_info("ONFI probe : Found a non"
+						"ONFI Compliant device \n");
+				err = -EIO;
+				break;
+			}
+		} else {
+			for (crc_chk_count = 0; crc_chk_count <
+					ONFI_PARAM_INFO_LENGTH
+					/ ONFI_PARAM_PAGE_LENGTH;
+					crc_chk_count++) {
+				onfi_param_page_ptr =
+					(struct onfi_param_page *)
+					(&(onfi_param_info_buf
+					[ONFI_PARAM_PAGE_LENGTH *
+					crc_chk_count]));
+				if (flash_onfi_crc_check(
+					(uint8_t *)onfi_param_page_ptr,
+					ONFI_PARAM_PAGE_LENGTH - 2) ==
+					onfi_param_page_ptr->integrity_crc) {
+					break;
+				}
+			}
+			if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+					/ ONFI_PARAM_PAGE_LENGTH) {
+				pr_info("ONFI probe : CRC Check "
+						"failed on ONFI Parameter "
+						"data \n");
+				err = -EIO;
+				break;
+			} else {
+				supported_flash.flash_id =
+					flash_read_id(chip);
+				supported_flash.widebus  =
+					onfi_param_page_ptr->
+					features_supported & 0x01;
+				supported_flash.pagesize =
+					onfi_param_page_ptr->
+					number_of_data_bytes_per_page;
+				supported_flash.blksize  =
+					onfi_param_page_ptr->
+					number_of_pages_per_block *
+					supported_flash.pagesize;
+				supported_flash.oobsize  =
+					onfi_param_page_ptr->
+					number_of_spare_bytes_per_page;
+				supported_flash.density  =
+					onfi_param_page_ptr->
+					number_of_blocks_per_logical_unit
+					* supported_flash.blksize;
+				supported_flash.ecc_correctability =
+					onfi_param_page_ptr->
+					number_of_bits_ecc_correctability;
+
+				pr_info("ONFI probe : Found an ONFI "
+					"compliant device %s\n",
+					onfi_param_page_ptr->device_model);
+
+				/* Temporary hack for MT29F4G08ABC device.
+				 * Since the device is not properly adhering
+				 * to ONFi specification it is reporting
+				 * as 16 bit device though it is 8 bit device!!!
+				 */
+				if (!strncmp(onfi_param_page_ptr->device_model,
+					"MT29F4G08ABC", 12))
+					supported_flash.widebus  = 0;
+			}
+		}
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+			ONFI_PARAM_INFO_LENGTH);
+	msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
+			ONFI_IDENTIFIER_LENGTH);
+
+	return err;
+}
+
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+			     struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[8 * 5 + 2];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t eccbchcfg;
+			uint32_t exec;
+			uint32_t ecccfg;
+			struct {
+				uint32_t flash_status;
+				uint32_t buffer_status;
+			} result[8];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned n;
+	unsigned page = 0;
+	uint32_t oob_len;
+	uint32_t sectordatasize;
+	uint32_t sectoroobsize;
+	int err, pageerr, rawerr;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+	uint32_t oob_col = 0;
+	unsigned page_count;
+	unsigned pages_read = 0;
+	unsigned start_sector = 0;
+	uint32_t ecc_errors;
+	uint32_t total_ecc_errors = 0;
+	unsigned cwperpage;
+#if VERBOSE
+	pr_info("================================================="
+			"================\n");
+	pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
+			"\noobbuf 0x%p ooblen 0x%x\n",
+			__func__, from, ops->mode, ops->datbuf, ops->len,
+			ops->oobbuf, ops->ooblen);
+#endif
+
+	if (mtd->writesize == 2048)
+		page = from >> 11;
+
+	if (mtd->writesize == 4096)
+		page = from >> 12;
+
+	oob_len = ops->ooblen;
+	cwperpage = (mtd->writesize >> 9);
+
+	if (from & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported from, 0x%llx\n",
+		       __func__, from);
+		return -EINVAL;
+	}
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
+			/* when ops->datbuf is NULL, ops->len can be ooblen */
+			pr_err("%s: unsupported ops->len, %d\n",
+			       __func__, ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if (ops->datbuf != NULL &&
+			(ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len,"
+				" %d for MTD_OOB_RAW\n", __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n",
+		       __func__, ops->ooboffs);
+		return -EINVAL;
+	}
+
+	if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OOB_AUTO)
+		start_sector = cwperpage - 1;
+
+	if (ops->oobbuf && !ops->datbuf) {
+		page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
+			mtd->oobavail : mtd->oobsize);
+		if ((page_count == 0) && (ops->ooblen))
+			page_count = 1;
+	} else if (ops->mode != MTD_OOB_RAW)
+		page_count = ops->len / mtd->writesize;
+	else
+		page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	if (ops->datbuf) {
+		data_dma_addr_curr = data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+				       DMA_FROM_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("msm_nand_read_oob: failed to get dma addr "
+			       "for %p\n", ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		memset(ops->oobbuf, 0xff, ops->ooblen);
+		oob_dma_addr_curr = oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf,
+				       ops->ooblen, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("msm_nand_read_oob: failed to get dma addr "
+			       "for %p\n", ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	oob_col = start_sector * chip->cw_size;
+	if (chip->CFG1 & CFG1_WIDE_FLASH)
+		oob_col >>= 1;
+
+	err = 0;
+	while (page_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		/* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
+		if (ops->mode != MTD_OOB_RAW) {
+			dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+			dma_buffer->data.cfg0 =
+			(chip->CFG0 & ~(7U << 6))
+				| (((cwperpage-1) - start_sector) << 6);
+			dma_buffer->data.cfg1 = chip->CFG1;
+			if (enable_bch_ecc)
+				dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
+		} else {
+			dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
+			dma_buffer->data.cfg0 = (chip->CFG0_RAW
+					& ~(7U << 6)) | ((cwperpage-1) << 6);
+			dma_buffer->data.cfg1 = chip->CFG1_RAW |
+					(chip->CFG1 & CFG1_WIDE_FLASH);
+		}
+
+		dma_buffer->data.addr0 = (page << 16) | oob_col;
+		dma_buffer->data.addr1 = (page >> 16) & 0xff;
+		/* chipsel_0 + enable DM interface */
+		dma_buffer->data.chipsel = 0 | 4;
+
+
+		/* GO bit for the EXEC register */
+		dma_buffer->data.exec = 1;
+
+
+		BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
+
+		for (n = start_sector; n < cwperpage; n++) {
+			/* flash + buffer status return words */
+			dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
+			dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
+
+			/* block on cmd ready, then
+			 * write CMD / ADDR0 / ADDR1 / CHIPSEL
+			 * regs in a burst
+			 */
+			cmd->cmd = DST_CRCI_NAND_CMD;
+			cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+			cmd->dst = MSM_NAND_FLASH_CMD;
+			if (n == start_sector)
+				cmd->len = 16;
+			else
+				cmd->len = 4;
+			cmd++;
+
+			if (n == start_sector) {
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cfg0);
+				cmd->dst = MSM_NAND_DEV0_CFG0;
+				if (enable_bch_ecc)
+					cmd->len = 12;
+				else
+					cmd->len = 8;
+				cmd++;
+
+				dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.ecccfg);
+				cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
+				cmd->len = 4;
+				cmd++;
+			}
+
+			/* kick the execute register */
+			cmd->cmd = 0;
+			cmd->src =
+				msm_virt_to_dma(chip, &dma_buffer->data.exec);
+			cmd->dst = MSM_NAND_EXEC_CMD;
+			cmd->len = 4;
+			cmd++;
+
+			/* block on data ready, then
+			 * read the status register
+			 */
+			cmd->cmd = SRC_CRCI_NAND_DATA;
+			cmd->src = MSM_NAND_FLASH_STATUS;
+			cmd->dst = msm_virt_to_dma(chip,
+						   &dma_buffer->data.result[n]);
+			/* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
+			cmd->len = 8;
+			cmd++;
+
+			/* read data block
+			 * (only valid if status says success)
+			 */
+			if (ops->datbuf) {
+				if (ops->mode != MTD_OOB_RAW)
+					sectordatasize = (n < (cwperpage - 1))
+					? 516 : (512 - ((cwperpage - 1) << 2));
+				else
+					sectordatasize = chip->cw_size;
+
+				cmd->cmd = 0;
+				cmd->src = MSM_NAND_FLASH_BUFFER;
+				cmd->dst = data_dma_addr_curr;
+				data_dma_addr_curr += sectordatasize;
+				cmd->len = sectordatasize;
+				cmd++;
+			}
+
+			if (ops->oobbuf && (n == (cwperpage - 1)
+			     || ops->mode != MTD_OOB_AUTO)) {
+				cmd->cmd = 0;
+				if (n == (cwperpage - 1)) {
+					cmd->src = MSM_NAND_FLASH_BUFFER +
+						(512 - ((cwperpage - 1) << 2));
+					sectoroobsize = (cwperpage << 2);
+					if (ops->mode != MTD_OOB_AUTO)
+						sectoroobsize +=
+							chip->ecc_parity_bytes;
+				} else {
+					cmd->src = MSM_NAND_FLASH_BUFFER + 516;
+					sectoroobsize = chip->ecc_parity_bytes;
+				}
+
+				cmd->dst = oob_dma_addr_curr;
+				if (sectoroobsize < oob_len)
+					cmd->len = sectoroobsize;
+				else
+					cmd->len = oob_len;
+				oob_dma_addr_curr += cmd->len;
+				oob_len -= cmd->len;
+				if (cmd->len > 0)
+					cmd++;
+			}
+		}
+
+		BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr =
+			(msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
+			| CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+		mb();
+
+		/* if any of the writes failed (0x10), or there
+		 * was a protection violation (0x100), we lose
+		 */
+		pageerr = rawerr = 0;
+		for (n = start_sector; n < cwperpage; n++) {
+			if (dma_buffer->data.result[n].flash_status & 0x110) {
+				rawerr = -EIO;
+				break;
+			}
+		}
+		if (rawerr) {
+			if (ops->datbuf && ops->mode != MTD_OOB_RAW) {
+				uint8_t *datbuf = ops->datbuf +
+					pages_read * mtd->writesize;
+
+				dma_sync_single_for_cpu(chip->dev,
+					data_dma_addr_curr-mtd->writesize,
+					mtd->writesize, DMA_BIDIRECTIONAL);
+
+				for (n = 0; n < mtd->writesize; n++) {
+					/* empty blocks read 0x54 at
+					 * these offsets
+					 */
+					if ((n % 516 == 3 || n % 516 == 175)
+							&& datbuf[n] == 0x54)
+						datbuf[n] = 0xff;
+					if (datbuf[n] != 0xff) {
+						pageerr = rawerr;
+						break;
+					}
+				}
+
+				dma_sync_single_for_device(chip->dev,
+					data_dma_addr_curr-mtd->writesize,
+					mtd->writesize, DMA_BIDIRECTIONAL);
+
+			}
+			if (ops->oobbuf) {
+				dma_sync_single_for_cpu(chip->dev,
+				oob_dma_addr_curr - (ops->ooblen - oob_len),
+				ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
+
+				for (n = 0; n < ops->ooblen; n++) {
+					if (ops->oobbuf[n] != 0xff) {
+						pageerr = rawerr;
+						break;
+					}
+				}
+
+				dma_sync_single_for_device(chip->dev,
+				oob_dma_addr_curr - (ops->ooblen - oob_len),
+				ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
+			}
+		}
+		if (pageerr) {
+			for (n = start_sector; n < cwperpage; n++) {
+				if (enable_bch_ecc ?
+			(dma_buffer->data.result[n].buffer_status & 0x10) :
+			(dma_buffer->data.result[n].buffer_status & 0x8)) {
+					/* not thread safe */
+					mtd->ecc_stats.failed++;
+					pageerr = -EBADMSG;
+					break;
+				}
+			}
+		}
+		if (!rawerr) { /* check for corretable errors */
+			for (n = start_sector; n < cwperpage; n++) {
+				ecc_errors = enable_bch_ecc ?
+			(dma_buffer->data.result[n].buffer_status & 0xF) :
+			(dma_buffer->data.result[n].buffer_status & 0x7);
+				if (ecc_errors) {
+					total_ecc_errors += ecc_errors;
+					/* not thread safe */
+					mtd->ecc_stats.corrected += ecc_errors;
+					if (ecc_errors > 1)
+						pageerr = -EUCLEAN;
+				}
+			}
+		}
+		if (pageerr && (pageerr != -EUCLEAN || err == 0))
+			err = pageerr;
+
+#if VERBOSE
+		if (rawerr && !pageerr) {
+			pr_err("msm_nand_read_oob %llx %x %x empty page\n",
+			       (loff_t)page * mtd->writesize, ops->len,
+			       ops->ooblen);
+		} else {
+			for (n = start_sector; n < cwperpage; n++)
+				pr_info("flash_status[%d] = %x,\
+				buffr_status[%d] = %x\n",
+				n, dma_buffer->data.result[n].flash_status,
+				n, dma_buffer->data.result[n].buffer_status);
+		}
+#endif
+		if (err && err != -EUCLEAN && err != -EBADMSG)
+			break;
+		pages_read++;
+		page++;
+	}
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (ops->oobbuf) {
+		dma_unmap_page(chip->dev, oob_dma_addr,
+				 ops->ooblen, DMA_FROM_DEVICE);
+	}
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf) {
+		dma_unmap_page(chip->dev, data_dma_addr,
+				 ops->len, DMA_BIDIRECTIONAL);
+	}
+
+	if (ops->mode != MTD_OOB_RAW)
+		ops->retlen = mtd->writesize * pages_read;
+	else
+		ops->retlen = (mtd->writesize +  mtd->oobsize) *
+							pages_read;
+	ops->oobretlen = ops->ooblen - oob_len;
+	if (err)
+		pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
+		       from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+		       total_ecc_errors);
+#if VERBOSE
+	pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+			__func__, err, ops->retlen, ops->oobretlen);
+
+	pr_info("==================================================="
+			"==============\n");
+#endif
+	return err;
+}
+
+static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
+			struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[16 * 6 + 20];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t nandc01_addr0;
+			uint32_t nandc10_addr0;
+			uint32_t nandc11_addr1;
+			uint32_t chipsel_cs0;
+			uint32_t chipsel_cs1;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t eccbchcfg;
+			uint32_t exec;
+			uint32_t ecccfg;
+			uint32_t ebi2_chip_select_cfg0;
+			uint32_t adm_mux_data_ack_req_nc01;
+			uint32_t adm_mux_cmd_ack_req_nc01;
+			uint32_t adm_mux_data_ack_req_nc10;
+			uint32_t adm_mux_cmd_ack_req_nc10;
+			uint32_t adm_default_mux;
+			uint32_t default_ebi2_chip_select_cfg0;
+			uint32_t nc10_flash_dev_cmd_vld;
+			uint32_t nc10_flash_dev_cmd1;
+			uint32_t nc10_flash_dev_cmd_vld_default;
+			uint32_t nc10_flash_dev_cmd1_default;
+			struct {
+				uint32_t flash_status;
+				uint32_t buffer_status;
+			} result[16];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned n;
+	unsigned page = 0;
+	uint32_t oob_len;
+	uint32_t sectordatasize;
+	uint32_t sectoroobsize;
+	int err, pageerr, rawerr;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+	uint32_t oob_col = 0;
+	unsigned page_count;
+	unsigned pages_read = 0;
+	unsigned start_sector = 0;
+	uint32_t ecc_errors;
+	uint32_t total_ecc_errors = 0;
+	unsigned cwperpage;
+	unsigned cw_offset = chip->cw_size;
+#if VERBOSE
+		pr_info("================================================="
+				"============\n");
+		pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
+				"\noobbuf 0x%p ooblen 0x%x\n\n",
+				__func__, from, ops->mode, ops->datbuf,
+				ops->len, ops->oobbuf, ops->ooblen);
+#endif
+
+	if (mtd->writesize == 2048)
+		page = from >> 11;
+
+	if (mtd->writesize == 4096)
+		page = from >> 12;
+
+	if (interleave_enable)
+		page = (from >> 1) >> 12;
+
+	oob_len = ops->ooblen;
+	cwperpage = (mtd->writesize >> 9);
+
+	if (from & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported from, 0x%llx\n",
+		       __func__, from);
+		return -EINVAL;
+	}
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
+			pr_err("%s: unsupported ops->len, %d\n",
+			       __func__, ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if (ops->datbuf != NULL &&
+			(ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len,"
+				" %d for MTD_OOB_RAW\n", __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n",
+		       __func__, ops->ooboffs);
+		return -EINVAL;
+	}
+
+	if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OOB_AUTO)
+		start_sector = cwperpage - 1;
+
+	if (ops->oobbuf && !ops->datbuf) {
+		page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
+			mtd->oobavail : mtd->oobsize);
+		if ((page_count == 0) && (ops->ooblen))
+			page_count = 1;
+	} else if (ops->mode != MTD_OOB_RAW)
+		page_count = ops->len / mtd->writesize;
+	else
+		page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	if (ops->datbuf) {
+		data_dma_addr_curr = data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+				       DMA_FROM_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("msm_nand_read_oob_dualnandc: "
+				"failed to get dma addr for %p\n",
+				ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		memset(ops->oobbuf, 0xff, ops->ooblen);
+		oob_dma_addr_curr = oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf,
+				       ops->ooblen, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("msm_nand_read_oob_dualnandc: "
+				"failed to get dma addr for %p\n",
+				ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	oob_col = start_sector * chip->cw_size;
+	if (chip->CFG1 & CFG1_WIDE_FLASH) {
+		oob_col >>= 1;
+		cw_offset >>= 1;
+	}
+
+	err = 0;
+	while (page_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		if (ops->mode != MTD_OOB_RAW) {
+			dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+			if (start_sector == (cwperpage - 1)) {
+				dma_buffer->data.cfg0 = (chip->CFG0 &
+							~(7U << 6));
+			} else {
+				dma_buffer->data.cfg0 = (chip->CFG0 &
+				~(7U << 6))
+				| (((cwperpage >> 1)-1) << 6);
+			}
+			dma_buffer->data.cfg1 = chip->CFG1;
+			if (enable_bch_ecc)
+				dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
+		} else {
+			dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
+			dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
+				~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
+			dma_buffer->data.cfg1 = chip->CFG1_RAW |
+					(chip->CFG1 & CFG1_WIDE_FLASH);
+		}
+
+		if (!interleave_enable) {
+			if (start_sector == (cwperpage - 1)) {
+				dma_buffer->data.nandc10_addr0 =
+							(page << 16) | oob_col;
+				dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
+				dma_buffer->data.nc10_flash_dev_cmd1 =
+								0xF00F3000;
+			} else {
+				dma_buffer->data.nandc01_addr0 = page << 16;
+				/* NC10 ADDR0 points to the next code word */
+				dma_buffer->data.nandc10_addr0 = (page << 16) |
+								cw_offset;
+				dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
+				dma_buffer->data.nc10_flash_dev_cmd1 =
+								0xF00FE005;
+			}
+		} else {
+			dma_buffer->data.nandc01_addr0 =
+			dma_buffer->data.nandc10_addr0 =
+						(page << 16) | oob_col;
+		}
+		/* ADDR1 */
+		dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
+
+		dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
+		dma_buffer->data.adm_mux_cmd_ack_req_nc01  = 0x0000053C;
+		dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
+		dma_buffer->data.adm_mux_cmd_ack_req_nc10  = 0x00000F14;
+		dma_buffer->data.adm_default_mux = 0x00000FC0;
+		dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
+		dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
+
+		dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
+		dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
+
+		/* chipsel_0 + enable DM interface */
+		dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
+		/* chipsel_1 + enable DM interface */
+		dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
+
+		/* GO bit for the EXEC register */
+		dma_buffer->data.exec = 1;
+
+		BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
+
+		for (n = start_sector; n < cwperpage; n++) {
+			/* flash + buffer status return words */
+			dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
+			dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
+
+			if (n == start_sector) {
+				if (!interleave_enable) {
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+						data.nc10_flash_dev_cmd_vld);
+					cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
+					cmd->len = 4;
+					cmd++;
+
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nc10_flash_dev_cmd1);
+					cmd->dst = NC10(MSM_NAND_DEV_CMD1);
+					cmd->len = 4;
+					cmd++;
+
+					/* NC01, NC10 --> ADDR1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc11_addr1);
+					cmd->dst = NC11(MSM_NAND_ADDR1);
+					cmd->len = 8;
+					cmd++;
+
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cfg0);
+					cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
+					if (enable_bch_ecc)
+						cmd->len = 12;
+					else
+						cmd->len = 8;
+					cmd++;
+				} else {
+					/* enable CS0 & CS1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+						data.ebi2_chip_select_cfg0);
+					cmd->dst = EBI2_CHIP_SELECT_CFG0;
+					cmd->len = 4;
+					cmd++;
+
+					/* NC01, NC10 --> ADDR1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc11_addr1);
+					cmd->dst = NC11(MSM_NAND_ADDR1);
+					cmd->len = 4;
+					cmd++;
+
+					/* Enable CS0 for NC01 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.chipsel_cs0);
+					cmd->dst =
+					NC01(MSM_NAND_FLASH_CHIP_SELECT);
+					cmd->len = 4;
+					cmd++;
+
+					/* Enable CS1 for NC10 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.chipsel_cs1);
+					cmd->dst =
+					NC10(MSM_NAND_FLASH_CHIP_SELECT);
+					cmd->len = 4;
+					cmd++;
+
+					/* config DEV0_CFG0 & CFG1 for CS0 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.cfg0);
+					cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
+					cmd->len = 8;
+					cmd++;
+
+					/* config DEV1_CFG0 & CFG1 for CS1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.cfg0);
+					cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
+					cmd->len = 8;
+					cmd++;
+				}
+
+				dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.ecccfg);
+				cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
+				cmd->len = 4;
+				cmd++;
+
+				/* if 'only' the last code word */
+				if (n == cwperpage - 1) {
+					/* MASK CMD ACK/REQ --> NC01 (0x53C)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+						data.adm_mux_cmd_ack_req_nc01);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* CMD */
+					cmd->cmd = DST_CRCI_NAND_CMD;
+					cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cmd);
+					cmd->dst = NC10(MSM_NAND_FLASH_CMD);
+					cmd->len = 4;
+					cmd++;
+
+					/* NC10 --> ADDR0 ( 0x0 ) */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc10_addr0);
+					cmd->dst = NC10(MSM_NAND_ADDR0);
+					cmd->len = 4;
+					cmd++;
+
+					/* kick the execute reg for NC10 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.exec);
+					cmd->dst = NC10(MSM_NAND_EXEC_CMD);
+					cmd->len = 4;
+					cmd++;
+
+					/* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.adm_mux_data_ack_req_nc01);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* block on data ready from NC10, then
+					 * read the status register
+					 */
+					cmd->cmd = SRC_CRCI_NAND_DATA;
+					cmd->src = NC10(MSM_NAND_FLASH_STATUS);
+					cmd->dst = msm_virt_to_dma(chip,
+						&dma_buffer->data.result[n]);
+					/* MSM_NAND_FLASH_STATUS +
+					 * MSM_NAND_BUFFER_STATUS
+					 */
+					cmd->len = 8;
+					cmd++;
+				} else {
+					/* NC01 --> ADDR0 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc01_addr0);
+					cmd->dst = NC01(MSM_NAND_ADDR0);
+					cmd->len = 4;
+					cmd++;
+
+					/* NC10 --> ADDR1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc10_addr0);
+					cmd->dst = NC10(MSM_NAND_ADDR0);
+					cmd->len = 4;
+					cmd++;
+
+					/* MASK CMD ACK/REQ --> NC10 (0xF14)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+						data.adm_mux_cmd_ack_req_nc10);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* CMD */
+					cmd->cmd = DST_CRCI_NAND_CMD;
+					cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cmd);
+					cmd->dst = NC01(MSM_NAND_FLASH_CMD);
+					cmd->len = 4;
+					cmd++;
+
+					/* kick the execute register for NC01*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						 &dma_buffer->data.exec);
+					cmd->dst = NC01(MSM_NAND_EXEC_CMD);
+					cmd->len = 4;
+					cmd++;
+				}
+			}
+
+			/* read data block
+			 * (only valid if status says success)
+			 */
+			if (ops->datbuf || (ops->oobbuf &&
+						 ops->mode != MTD_OOB_AUTO)) {
+				if (ops->mode != MTD_OOB_RAW)
+					sectordatasize = (n < (cwperpage - 1))
+					? 516 : (512 - ((cwperpage - 1) << 2));
+				else
+					sectordatasize = chip->cw_size;
+
+				if (n % 2 == 0) {
+					/* MASK DATA ACK/REQ --> NC10 (0xF28)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.adm_mux_data_ack_req_nc10);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* block on data ready from NC01, then
+					 * read the status register
+					 */
+					cmd->cmd = SRC_CRCI_NAND_DATA;
+					cmd->src = NC01(MSM_NAND_FLASH_STATUS);
+					cmd->dst = msm_virt_to_dma(chip,
+						&dma_buffer->data.result[n]);
+					/* MSM_NAND_FLASH_STATUS +
+					 * MSM_NAND_BUFFER_STATUS
+					 */
+					cmd->len = 8;
+					cmd++;
+
+					/* MASK CMD ACK/REQ --> NC01 (0x53C)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+						data.adm_mux_cmd_ack_req_nc01);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* CMD */
+					cmd->cmd = DST_CRCI_NAND_CMD;
+					cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cmd);
+					cmd->dst = NC10(MSM_NAND_FLASH_CMD);
+					cmd->len = 4;
+					cmd++;
+
+					/* kick the execute register for NC10 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.exec);
+					cmd->dst = NC10(MSM_NAND_EXEC_CMD);
+					cmd->len = 4;
+					cmd++;
+
+					/* Read only when there is data
+					 * buffer
+					 */
+					if (ops->datbuf) {
+						cmd->cmd = 0;
+						cmd->src =
+						NC01(MSM_NAND_FLASH_BUFFER);
+						cmd->dst = data_dma_addr_curr;
+						data_dma_addr_curr +=
+						sectordatasize;
+						cmd->len = sectordatasize;
+						cmd++;
+					}
+				} else {
+					/* MASK DATA ACK/REQ -->
+					 * NC01 (0xA3C)
+					 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.adm_mux_data_ack_req_nc01);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* block on data ready from NC10
+					 * then read the status register
+					 */
+					cmd->cmd = SRC_CRCI_NAND_DATA;
+					cmd->src =
+					NC10(MSM_NAND_FLASH_STATUS);
+					cmd->dst = msm_virt_to_dma(chip,
+					   &dma_buffer->data.result[n]);
+					/* MSM_NAND_FLASH_STATUS +
+					 * MSM_NAND_BUFFER_STATUS
+					 */
+					cmd->len = 8;
+					cmd++;
+					if (n != cwperpage - 1) {
+						/* MASK CMD ACK/REQ -->
+						 * NC10 (0xF14)
+						 */
+						cmd->cmd = 0;
+						cmd->src =
+						msm_virt_to_dma(chip,
+						&dma_buffer->
+						data.adm_mux_cmd_ack_req_nc10);
+						cmd->dst = EBI2_NAND_ADM_MUX;
+						cmd->len = 4;
+						cmd++;
+
+						/* CMD */
+						cmd->cmd = DST_CRCI_NAND_CMD;
+						cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cmd);
+						cmd->dst =
+						NC01(MSM_NAND_FLASH_CMD);
+						cmd->len = 4;
+						cmd++;
+
+						/* EXEC */
+						cmd->cmd = 0;
+						cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.exec);
+						cmd->dst =
+						NC01(MSM_NAND_EXEC_CMD);
+						cmd->len = 4;
+						cmd++;
+					}
+
+					/* Read only when there is data
+					 * buffer
+					 */
+					if (ops->datbuf) {
+						cmd->cmd = 0;
+						cmd->src =
+						NC10(MSM_NAND_FLASH_BUFFER);
+						cmd->dst = data_dma_addr_curr;
+						data_dma_addr_curr +=
+						sectordatasize;
+						cmd->len = sectordatasize;
+						cmd++;
+					}
+				}
+			}
+
+			if (ops->oobbuf && (n == (cwperpage - 1)
+			     || ops->mode != MTD_OOB_AUTO)) {
+				cmd->cmd = 0;
+				if (n == (cwperpage - 1)) {
+					/* Use NC10 for reading the
+					 * last codeword!!!
+					 */
+					cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
+						(512 - ((cwperpage - 1) << 2));
+					sectoroobsize = (cwperpage << 2);
+					if (ops->mode != MTD_OOB_AUTO)
+						sectoroobsize +=
+							chip->ecc_parity_bytes;
+				} else {
+					if (n % 2 == 0)
+						cmd->src =
+						NC01(MSM_NAND_FLASH_BUFFER)
+						+ 516;
+					else
+						cmd->src =
+						NC10(MSM_NAND_FLASH_BUFFER)
+						+ 516;
+					sectoroobsize = chip->ecc_parity_bytes;
+				}
+				cmd->dst = oob_dma_addr_curr;
+				if (sectoroobsize < oob_len)
+					cmd->len = sectoroobsize;
+				else
+					cmd->len = oob_len;
+				oob_dma_addr_curr += cmd->len;
+				oob_len -= cmd->len;
+				if (cmd->len > 0)
+					cmd++;
+			}
+		}
+		/* ADM --> Default mux state (0xFC0) */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_default_mux);
+		cmd->dst = EBI2_NAND_ADM_MUX;
+		cmd->len = 4;
+		cmd++;
+
+		if (!interleave_enable) {
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.nc10_flash_dev_cmd_vld_default);
+			cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
+			cmd->len = 4;
+			cmd++;
+
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.nc10_flash_dev_cmd1_default);
+			cmd->dst = NC10(MSM_NAND_DEV_CMD1);
+			cmd->len = 4;
+			cmd++;
+		} else {
+			/* disable CS1 */
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.default_ebi2_chip_select_cfg0);
+			cmd->dst = EBI2_CHIP_SELECT_CFG0;
+			cmd->len = 4;
+			cmd++;
+		}
+
+		BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr =
+			(msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
+			| CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+		mb();
+
+		/* if any of the writes failed (0x10), or there
+		 * was a protection violation (0x100), we lose
+		 */
+		pageerr = rawerr = 0;
+		for (n = start_sector; n < cwperpage; n++) {
+			if (dma_buffer->data.result[n].flash_status & 0x110) {
+				rawerr = -EIO;
+				break;
+			}
+		}
+		if (rawerr) {
+			if (ops->datbuf && ops->mode != MTD_OOB_RAW) {
+				uint8_t *datbuf = ops->datbuf +
+					pages_read * mtd->writesize;
+
+				dma_sync_single_for_cpu(chip->dev,
+					data_dma_addr_curr-mtd->writesize,
+					mtd->writesize, DMA_BIDIRECTIONAL);
+
+				for (n = 0; n < mtd->writesize; n++) {
+					/* empty blocks read 0x54 at
+					 * these offsets
+					 */
+					if ((n % 516 == 3 || n % 516 == 175)
+							&& datbuf[n] == 0x54)
+						datbuf[n] = 0xff;
+					if (datbuf[n] != 0xff) {
+						pageerr = rawerr;
+						break;
+					}
+				}
+
+				dma_sync_single_for_device(chip->dev,
+					data_dma_addr_curr-mtd->writesize,
+					mtd->writesize, DMA_BIDIRECTIONAL);
+
+			}
+			if (ops->oobbuf) {
+				dma_sync_single_for_cpu(chip->dev,
+				oob_dma_addr_curr - (ops->ooblen - oob_len),
+				ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
+
+				for (n = 0; n < ops->ooblen; n++) {
+					if (ops->oobbuf[n] != 0xff) {
+						pageerr = rawerr;
+						break;
+					}
+				}
+
+				dma_sync_single_for_device(chip->dev,
+				oob_dma_addr_curr - (ops->ooblen - oob_len),
+				ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
+			}
+		}
+		if (pageerr) {
+			for (n = start_sector; n < cwperpage; n++) {
+				if (dma_buffer->data.result[n].buffer_status
+					& MSM_NAND_BUF_STAT_UNCRCTBL_ERR) {
+					/* not thread safe */
+					mtd->ecc_stats.failed++;
+					pageerr = -EBADMSG;
+					break;
+				}
+			}
+		}
+		if (!rawerr) { /* check for corretable errors */
+			for (n = start_sector; n < cwperpage; n++) {
+				ecc_errors = dma_buffer->data.
+					result[n].buffer_status
+					& MSM_NAND_BUF_STAT_NUM_ERR_MASK;
+				if (ecc_errors) {
+					total_ecc_errors += ecc_errors;
+					/* not thread safe */
+					mtd->ecc_stats.corrected += ecc_errors;
+					if (ecc_errors > 1)
+						pageerr = -EUCLEAN;
+				}
+			}
+		}
+		if (pageerr && (pageerr != -EUCLEAN || err == 0))
+			err = pageerr;
+
+#if VERBOSE
+		if (rawerr && !pageerr) {
+			pr_err("msm_nand_read_oob_dualnandc "
+				"%llx %x %x empty page\n",
+			       (loff_t)page * mtd->writesize, ops->len,
+			       ops->ooblen);
+		} else {
+			for (n = start_sector; n < cwperpage; n++) {
+				if (n%2) {
+					pr_info("NC10: flash_status[%d] = %x, "
+					 "buffr_status[%d] = %x\n",
+					n, dma_buffer->
+						data.result[n].flash_status,
+					n, dma_buffer->
+						data.result[n].buffer_status);
+				} else {
+					pr_info("NC01: flash_status[%d] = %x, "
+					 "buffr_status[%d] = %x\n",
+					n, dma_buffer->
+						data.result[n].flash_status,
+					n, dma_buffer->
+						data.result[n].buffer_status);
+				}
+			}
+		}
+#endif
+		if (err && err != -EUCLEAN && err != -EBADMSG)
+			break;
+		pages_read++;
+		page++;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (ops->oobbuf) {
+		dma_unmap_page(chip->dev, oob_dma_addr,
+				 ops->ooblen, DMA_FROM_DEVICE);
+	}
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf) {
+		dma_unmap_page(chip->dev, data_dma_addr,
+				 ops->len, DMA_BIDIRECTIONAL);
+	}
+
+	if (ops->mode != MTD_OOB_RAW)
+		ops->retlen = mtd->writesize * pages_read;
+	else
+		ops->retlen = (mtd->writesize +  mtd->oobsize) *
+							pages_read;
+	ops->oobretlen = ops->ooblen - oob_len;
+	if (err)
+		pr_err("msm_nand_read_oob_dualnandc "
+			"%llx %x %x failed %d, corrected %d\n",
+			from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+			total_ecc_errors);
+#if VERBOSE
+	pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+			__func__, err, ops->retlen, ops->oobretlen);
+
+	pr_info("==================================================="
+			"==========\n");
+#endif
+	return err;
+}
+
+static int
+msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+	      size_t *retlen, u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	/* printk("msm_nand_read %llx %x\n", from, len); */
+
+	ops.mode = MTD_OOB_PLACE;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	if (!dual_nand_ctlr_present)
+		ret =  msm_nand_read_oob(mtd, from, &ops);
+	else
+		ret = msm_nand_read_oob_dualnandc(mtd, from, &ops);
+	*retlen = ops.retlen;
+	return ret;
+}
+
+static int
+msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+	struct {
+		dmov_s cmd[8 * 7 + 2];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t eccbchcfg;
+			uint32_t exec;
+			uint32_t ecccfg;
+			uint32_t clrfstatus;
+			uint32_t clrrstatus;
+			uint32_t flash_status[8];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned n;
+	unsigned page = 0;
+	uint32_t oob_len;
+	uint32_t sectordatawritesize;
+	int err = 0;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+	unsigned page_count;
+	unsigned pages_written = 0;
+	unsigned cwperpage;
+#if VERBOSE
+	pr_info("================================================="
+			"================\n");
+	pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
+			"\noobbuf 0x%p ooblen 0x%x\n",
+			__func__, to, ops->mode, ops->datbuf, ops->len,
+			ops->oobbuf, ops->ooblen);
+#endif
+
+	if (mtd->writesize == 2048)
+		page = to >> 11;
+
+	if (mtd->writesize == 4096)
+		page = to >> 12;
+
+	oob_len = ops->ooblen;
+	cwperpage = (mtd->writesize >> 9);
+
+	if (to & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->ooblen != 0 && ops->mode != MTD_OOB_AUTO) {
+			pr_err("%s: unsupported ops->mode,%d\n",
+					 __func__, ops->mode);
+			return -EINVAL;
+		}
+		if ((ops->len % mtd->writesize) != 0) {
+			pr_err("%s: unsupported ops->len, %d\n",
+					__func__, ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len, "
+				"%d for MTD_OOB_RAW mode\n",
+				 __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if (ops->datbuf == NULL) {
+		pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n",
+		       __func__, ops->ooboffs);
+		return -EINVAL;
+	}
+
+	if (ops->datbuf) {
+		data_dma_addr_curr = data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf,
+				       ops->len, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("msm_nand_write_oob: failed to get dma addr "
+			       "for %p\n", ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		oob_dma_addr_curr = oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf,
+				       ops->ooblen, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("msm_nand_write_oob: failed to get dma addr "
+			       "for %p\n", ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+	if (ops->mode != MTD_OOB_RAW)
+		page_count = ops->len / mtd->writesize;
+	else
+		page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	wait_event(chip->wait_queue, (dma_buffer =
+			msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+	while (page_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		if (ops->mode != MTD_OOB_RAW) {
+			dma_buffer->data.cfg0 = chip->CFG0;
+			dma_buffer->data.cfg1 = chip->CFG1;
+			if (enable_bch_ecc)
+				dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
+		} else {
+			dma_buffer->data.cfg0 = (chip->CFG0_RAW &
+					~(7U << 6)) | ((cwperpage-1) << 6);
+			dma_buffer->data.cfg1 = chip->CFG1_RAW |
+						(chip->CFG1 & CFG1_WIDE_FLASH);
+		}
+
+		/* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
+		dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
+		dma_buffer->data.addr0 = page << 16;
+		dma_buffer->data.addr1 = (page >> 16) & 0xff;
+		/* chipsel_0 + enable DM interface */
+		dma_buffer->data.chipsel = 0 | 4;
+
+
+		/* GO bit for the EXEC register */
+		dma_buffer->data.exec = 1;
+		dma_buffer->data.clrfstatus = 0x00000020;
+		dma_buffer->data.clrrstatus = 0x000000C0;
+
+		BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
+
+		for (n = 0; n < cwperpage ; n++) {
+			/* status return words */
+			dma_buffer->data.flash_status[n] = 0xeeeeeeee;
+			/* block on cmd ready, then
+			 * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
+			 */
+			cmd->cmd = DST_CRCI_NAND_CMD;
+			cmd->src =
+				msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+			cmd->dst = MSM_NAND_FLASH_CMD;
+			if (n == 0)
+				cmd->len = 16;
+			else
+				cmd->len = 4;
+			cmd++;
+
+			if (n == 0) {
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+							&dma_buffer->data.cfg0);
+				cmd->dst = MSM_NAND_DEV0_CFG0;
+				if (enable_bch_ecc)
+					cmd->len = 12;
+				else
+					cmd->len = 8;
+				cmd++;
+
+				dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						 &dma_buffer->data.ecccfg);
+				cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
+				cmd->len = 4;
+				cmd++;
+			}
+
+			/* write data block */
+			if (ops->mode != MTD_OOB_RAW)
+				sectordatawritesize = (n < (cwperpage - 1)) ?
+					516 : (512 - ((cwperpage - 1) << 2));
+			else
+				sectordatawritesize = chip->cw_size;
+
+			cmd->cmd = 0;
+			cmd->src = data_dma_addr_curr;
+			data_dma_addr_curr += sectordatawritesize;
+			cmd->dst = MSM_NAND_FLASH_BUFFER;
+			cmd->len = sectordatawritesize;
+			cmd++;
+
+			if (ops->oobbuf) {
+				if (n == (cwperpage - 1)) {
+					cmd->cmd = 0;
+					cmd->src = oob_dma_addr_curr;
+					cmd->dst = MSM_NAND_FLASH_BUFFER +
+						(512 - ((cwperpage - 1) << 2));
+					if ((cwperpage << 2) < oob_len)
+						cmd->len = (cwperpage << 2);
+					else
+						cmd->len = oob_len;
+					oob_dma_addr_curr += cmd->len;
+					oob_len -= cmd->len;
+					if (cmd->len > 0)
+						cmd++;
+				}
+				if (ops->mode != MTD_OOB_AUTO) {
+					/* skip ecc bytes in oobbuf */
+					if (oob_len < chip->ecc_parity_bytes) {
+						oob_dma_addr_curr +=
+							chip->ecc_parity_bytes;
+						oob_len -=
+							chip->ecc_parity_bytes;
+					} else {
+						oob_dma_addr_curr += oob_len;
+						oob_len = 0;
+					}
+				}
+			}
+
+			/* kick the execute register */
+			cmd->cmd = 0;
+			cmd->src =
+				msm_virt_to_dma(chip, &dma_buffer->data.exec);
+			cmd->dst = MSM_NAND_EXEC_CMD;
+			cmd->len = 4;
+			cmd++;
+
+			/* block on data ready, then
+			 * read the status register
+			 */
+			cmd->cmd = SRC_CRCI_NAND_DATA;
+			cmd->src = MSM_NAND_FLASH_STATUS;
+			cmd->dst = msm_virt_to_dma(chip,
+					     &dma_buffer->data.flash_status[n]);
+			cmd->len = 4;
+			cmd++;
+
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.clrfstatus);
+			cmd->dst = MSM_NAND_FLASH_STATUS;
+			cmd->len = 4;
+			cmd++;
+
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.clrrstatus);
+			cmd->dst = MSM_NAND_READ_STATUS;
+			cmd->len = 4;
+			cmd++;
+
+		}
+
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+		BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmdptr =
+			(msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
+			CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
+				msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+		mb();
+
+		/* if any of the writes failed (0x10), or there was a
+		 * protection violation (0x100), or the program success
+		 * bit (0x80) is unset, we lose
+		 */
+		err = 0;
+		for (n = 0; n < cwperpage; n++) {
+			if (dma_buffer->data.flash_status[n] & 0x110) {
+				err = -EIO;
+				break;
+			}
+			if (!(dma_buffer->data.flash_status[n] & 0x80)) {
+				err = -EIO;
+				break;
+			}
+		}
+
+#if VERBOSE
+		for (n = 0; n < cwperpage; n++)
+			pr_info("write pg %d: flash_status[%d] = %x\n", page,
+				n, dma_buffer->data.flash_status[n]);
+
+#endif
+		if (err)
+			break;
+		pages_written++;
+		page++;
+	}
+	if (ops->mode != MTD_OOB_RAW)
+		ops->retlen = mtd->writesize * pages_written;
+	else
+		ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+	ops->oobretlen = ops->ooblen - oob_len;
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, oob_dma_addr,
+				 ops->ooblen, DMA_TO_DEVICE);
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, data_dma_addr, ops->len,
+				DMA_TO_DEVICE);
+	if (err)
+		pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
+		       to, ops->len, ops->ooblen, err);
+
+#if VERBOSE
+		pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+				__func__, err, ops->retlen, ops->oobretlen);
+
+		pr_info("==================================================="
+				"==============\n");
+#endif
+	return err;
+}
+
+static int
+msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
+				struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+	struct {
+		dmov_s cmd[16 * 6 + 18];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t nandc01_addr0;
+			uint32_t nandc10_addr0;
+			uint32_t nandc11_addr1;
+			uint32_t chipsel_cs0;
+			uint32_t chipsel_cs1;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t eccbchcfg;
+			uint32_t exec;
+			uint32_t ecccfg;
+			uint32_t cfg0_nc01;
+			uint32_t ebi2_chip_select_cfg0;
+			uint32_t adm_mux_data_ack_req_nc01;
+			uint32_t adm_mux_cmd_ack_req_nc01;
+			uint32_t adm_mux_data_ack_req_nc10;
+			uint32_t adm_mux_cmd_ack_req_nc10;
+			uint32_t adm_default_mux;
+			uint32_t default_ebi2_chip_select_cfg0;
+			uint32_t nc01_flash_dev_cmd_vld;
+			uint32_t nc10_flash_dev_cmd0;
+			uint32_t nc01_flash_dev_cmd_vld_default;
+			uint32_t nc10_flash_dev_cmd0_default;
+			uint32_t flash_status[16];
+			uint32_t clrfstatus;
+			uint32_t clrrstatus;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned n;
+	unsigned page = 0;
+	uint32_t oob_len;
+	uint32_t sectordatawritesize;
+	int err = 0;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+	unsigned page_count;
+	unsigned pages_written = 0;
+	unsigned cwperpage;
+	unsigned cw_offset = chip->cw_size;
+#if VERBOSE
+		pr_info("================================================="
+				"============\n");
+		pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
+				"\noobbuf 0x%p ooblen 0x%x\n\n",
+				__func__, to, ops->mode, ops->datbuf, ops->len,
+				ops->oobbuf, ops->ooblen);
+#endif
+
+	if (mtd->writesize == 2048)
+		page = to >> 11;
+
+	if (mtd->writesize == 4096)
+		page = to >> 12;
+
+	if (interleave_enable)
+		page = (to >> 1) >> 12;
+
+	oob_len = ops->ooblen;
+	cwperpage = (mtd->writesize >> 9);
+
+	if (to & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->ooblen != 0 && ops->mode != MTD_OOB_AUTO) {
+			pr_err("%s: unsupported ops->mode,%d\n",
+					 __func__, ops->mode);
+			return -EINVAL;
+		}
+		if ((ops->len % mtd->writesize) != 0) {
+			pr_err("%s: unsupported ops->len, %d\n",
+					__func__, ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len, "
+				"%d for MTD_OOB_RAW mode\n",
+				 __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if (ops->datbuf == NULL) {
+		pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n",
+		       __func__, ops->ooboffs);
+		return -EINVAL;
+	}
+
+	if (ops->datbuf) {
+		data_dma_addr_curr = data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf,
+				       ops->len, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("msm_nand_write_oob_dualnandc:"
+				"failed to get dma addr "
+			       "for %p\n", ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		oob_dma_addr_curr = oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf,
+				       ops->ooblen, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("msm_nand_write_oob_dualnandc:"
+				"failed to get dma addr "
+			       "for %p\n", ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+	if (ops->mode != MTD_OOB_RAW)
+		page_count = ops->len / mtd->writesize;
+	else
+		page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	wait_event(chip->wait_queue, (dma_buffer =
+			msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+	if (chip->CFG1 & CFG1_WIDE_FLASH)
+		cw_offset >>= 1;
+
+	dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
+	dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc01  = 0x0000053C;
+	dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc10  = 0x00000F14;
+	dma_buffer->data.adm_default_mux = 0x00000FC0;
+	dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
+	dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
+	dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
+	dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
+	dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
+	dma_buffer->data.clrfstatus = 0x00000020;
+	dma_buffer->data.clrrstatus = 0x000000C0;
+
+	while (page_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		if (ops->mode != MTD_OOB_RAW) {
+			dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
+				& ~(1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
+			dma_buffer->data.cfg1 = chip->CFG1;
+			if (enable_bch_ecc)
+				dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
+		} else {
+			dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
+			~(7U << 6)) & ~(1 << 4)) | (((cwperpage >> 1)-1) << 6);
+			dma_buffer->data.cfg1 = chip->CFG1_RAW |
+					(chip->CFG1 & CFG1_WIDE_FLASH);
+		}
+
+		/* Disables the automatic issuing of the read
+		 * status command for first NAND controller.
+		 */
+		if (!interleave_enable)
+			dma_buffer->data.cfg0_nc01 = dma_buffer->data.cfg0
+							| (1 << 4);
+		else
+			dma_buffer->data.cfg0 |= (1 << 4);
+
+		dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
+		dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
+		dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
+
+		/* GO bit for the EXEC register */
+		dma_buffer->data.exec = 1;
+
+		if (!interleave_enable) {
+			dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
+			/* NC10 ADDR0 points to the next code word */
+			dma_buffer->data.nandc10_addr0 =
+					(page << 16) | cw_offset;
+		} else {
+			dma_buffer->data.nandc01_addr0 =
+			dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
+		}
+		/* ADDR1 */
+		dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
+
+		BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
+
+		for (n = 0; n < cwperpage; n++) {
+			/* status return words */
+			dma_buffer->data.flash_status[n] = 0xeeeeeeee;
+
+			if (n == 0) {
+				if (!interleave_enable) {
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.nc01_flash_dev_cmd_vld);
+					cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
+					cmd->len = 4;
+					cmd++;
+
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nc10_flash_dev_cmd0);
+					cmd->dst = NC10(MSM_NAND_DEV_CMD0);
+					cmd->len = 4;
+					cmd++;
+
+					/* common settings for both NC01 & NC10
+					 * NC01, NC10 --> ADDR1 / CHIPSEL
+					 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc11_addr1);
+					cmd->dst = NC11(MSM_NAND_ADDR1);
+					cmd->len = 8;
+					cmd++;
+
+					/* Disables the automatic issue of the
+					 * read status command after the write
+					 * operation.
+					 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cfg0_nc01);
+					cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
+					cmd->len = 4;
+					cmd++;
+
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cfg0);
+					cmd->dst = NC10(MSM_NAND_DEV0_CFG0);
+					cmd->len = 4;
+					cmd++;
+
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cfg1);
+					cmd->dst = NC11(MSM_NAND_DEV0_CFG1);
+					if (enable_bch_ecc)
+						cmd->len = 8;
+					else
+						cmd->len = 4;
+					cmd++;
+				} else {
+					/* enable CS1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.ebi2_chip_select_cfg0);
+					cmd->dst = EBI2_CHIP_SELECT_CFG0;
+					cmd->len = 4;
+					cmd++;
+
+					/* NC11 --> ADDR1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc11_addr1);
+					cmd->dst = NC11(MSM_NAND_ADDR1);
+					cmd->len = 4;
+					cmd++;
+
+					/* Enable CS0 for NC01 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.chipsel_cs0);
+					cmd->dst =
+					NC01(MSM_NAND_FLASH_CHIP_SELECT);
+					cmd->len = 4;
+					cmd++;
+
+					/* Enable CS1 for NC10 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.chipsel_cs1);
+					cmd->dst =
+					NC10(MSM_NAND_FLASH_CHIP_SELECT);
+					cmd->len = 4;
+					cmd++;
+
+					/* config DEV0_CFG0 & CFG1 for CS0 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cfg0);
+					cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
+					cmd->len = 8;
+					cmd++;
+
+					/* config DEV1_CFG0 & CFG1 for CS1 */
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.cfg0);
+					cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
+					cmd->len = 8;
+					cmd++;
+				}
+
+				dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.ecccfg);
+				cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
+				cmd->len = 4;
+				cmd++;
+
+				/* NC01 --> ADDR0 */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.nandc01_addr0);
+				cmd->dst = NC01(MSM_NAND_ADDR0);
+				cmd->len = 4;
+				cmd++;
+
+				/* NC10 --> ADDR0 */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.nandc10_addr0);
+				cmd->dst = NC10(MSM_NAND_ADDR0);
+				cmd->len = 4;
+				cmd++;
+			}
+
+			if (n % 2 == 0) {
+				/* MASK CMD ACK/REQ --> NC10 (0xF14)*/
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.adm_mux_cmd_ack_req_nc10);
+				cmd->dst = EBI2_NAND_ADM_MUX;
+				cmd->len = 4;
+				cmd++;
+
+				/* CMD */
+				cmd->cmd = DST_CRCI_NAND_CMD;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cmd);
+				cmd->dst = NC01(MSM_NAND_FLASH_CMD);
+				cmd->len = 4;
+				cmd++;
+			} else {
+				/* MASK CMD ACK/REQ --> NC01 (0x53C)*/
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.adm_mux_cmd_ack_req_nc01);
+				cmd->dst = EBI2_NAND_ADM_MUX;
+				cmd->len = 4;
+				cmd++;
+
+				/* CMD */
+				cmd->cmd = DST_CRCI_NAND_CMD;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.cmd);
+				cmd->dst = NC10(MSM_NAND_FLASH_CMD);
+				cmd->len = 4;
+				cmd++;
+			}
+
+			if (ops->mode != MTD_OOB_RAW)
+				sectordatawritesize = (n < (cwperpage - 1)) ?
+					516 : (512 - ((cwperpage - 1) << 2));
+			else
+				sectordatawritesize = chip->cw_size;
+
+			cmd->cmd = 0;
+			cmd->src = data_dma_addr_curr;
+			data_dma_addr_curr += sectordatawritesize;
+
+			if (n % 2 == 0)
+				cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
+			else
+				cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
+			cmd->len = sectordatawritesize;
+			cmd++;
+
+			if (ops->oobbuf) {
+				if (n == (cwperpage - 1)) {
+					cmd->cmd = 0;
+					cmd->src = oob_dma_addr_curr;
+					cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
+						(512 - ((cwperpage - 1) << 2));
+					if ((cwperpage << 2) < oob_len)
+						cmd->len = (cwperpage << 2);
+					else
+						cmd->len = oob_len;
+					oob_dma_addr_curr += cmd->len;
+					oob_len -= cmd->len;
+					if (cmd->len > 0)
+						cmd++;
+				}
+				if (ops->mode != MTD_OOB_AUTO) {
+					/* skip ecc bytes in oobbuf */
+					if (oob_len < chip->ecc_parity_bytes) {
+						oob_dma_addr_curr +=
+							chip->ecc_parity_bytes;
+						oob_len -=
+							chip->ecc_parity_bytes;
+					} else {
+						oob_dma_addr_curr += oob_len;
+						oob_len = 0;
+					}
+				}
+			}
+
+			if (n % 2 == 0) {
+				if (n != 0) {
+					/* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
+					cmd->cmd = 0;
+					cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->
+					data.adm_mux_data_ack_req_nc01);
+					cmd->dst = EBI2_NAND_ADM_MUX;
+					cmd->len = 4;
+					cmd++;
+
+					/* block on data ready from NC10, then
+					* read the status register
+					*/
+					cmd->cmd = SRC_CRCI_NAND_DATA;
+					cmd->src = NC10(MSM_NAND_FLASH_STATUS);
+					cmd->dst = msm_virt_to_dma(chip,
+					&dma_buffer->data.flash_status[n-1]);
+					cmd->len = 4;
+					cmd++;
+				}
+				/* kick the NC01 execute register */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.exec);
+				cmd->dst = NC01(MSM_NAND_EXEC_CMD);
+				cmd->len = 4;
+				cmd++;
+			} else {
+				/* MASK DATA ACK/REQ --> NC10 (0xF28)*/
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.adm_mux_data_ack_req_nc10);
+				cmd->dst = EBI2_NAND_ADM_MUX;
+				cmd->len = 4;
+				cmd++;
+
+				/* block on data ready from NC01, then
+				 * read the status register
+				 */
+				cmd->cmd = SRC_CRCI_NAND_DATA;
+				cmd->src = NC01(MSM_NAND_FLASH_STATUS);
+				cmd->dst = msm_virt_to_dma(chip,
+				&dma_buffer->data.flash_status[n-1]);
+				cmd->len = 4;
+				cmd++;
+
+				/* kick the execute register */
+				cmd->cmd = 0;
+				cmd->src =
+				msm_virt_to_dma(chip, &dma_buffer->data.exec);
+				cmd->dst = NC10(MSM_NAND_EXEC_CMD);
+				cmd->len = 4;
+				cmd++;
+			}
+		}
+
+		/* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.adm_mux_data_ack_req_nc01);
+		cmd->dst = EBI2_NAND_ADM_MUX;
+		cmd->len = 4;
+		cmd++;
+
+		/* we should process outstanding request */
+		/* block on data ready, then
+		 * read the status register
+		 */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = NC10(MSM_NAND_FLASH_STATUS);
+		cmd->dst = msm_virt_to_dma(chip,
+			     &dma_buffer->data.flash_status[n-1]);
+		cmd->len = 4;
+		cmd++;
+
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
+		cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
+		cmd->len = 4;
+		cmd++;
+
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
+		cmd->dst = NC11(MSM_NAND_READ_STATUS);
+		cmd->len = 4;
+		cmd++;
+
+		/* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip,
+				&dma_buffer->data.adm_default_mux);
+		cmd->dst = EBI2_NAND_ADM_MUX;
+		cmd->len = 4;
+		cmd++;
+
+		if (!interleave_enable) {
+			/* setting to defalut values back */
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.nc01_flash_dev_cmd_vld_default);
+			cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
+			cmd->len = 4;
+			cmd++;
+
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.nc10_flash_dev_cmd0_default);
+			cmd->dst = NC10(MSM_NAND_DEV_CMD0);
+			cmd->len = 4;
+			cmd++;
+		} else {
+			/* disable CS1 */
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.default_ebi2_chip_select_cfg0);
+			cmd->dst = EBI2_CHIP_SELECT_CFG0;
+			cmd->len = 4;
+			cmd++;
+		}
+
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+		BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmdptr =
+		((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
+				msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+		mb();
+
+		/* if any of the writes failed (0x10), or there was a
+		 * protection violation (0x100), or the program success
+		 * bit (0x80) is unset, we lose
+		 */
+		err = 0;
+		for (n = 0; n < cwperpage; n++) {
+			if (dma_buffer->data.flash_status[n] & 0x110) {
+				err = -EIO;
+				break;
+			}
+			if (!(dma_buffer->data.flash_status[n] & 0x80)) {
+				err = -EIO;
+				break;
+			}
+		}
+		/* check for flash status busy for the last codeword */
+		if (!interleave_enable)
+			if (!(dma_buffer->data.flash_status[cwperpage - 1]
+								& 0x20)) {
+				err = -EIO;
+				break;
+			}
+#if VERBOSE
+	for (n = 0; n < cwperpage; n++) {
+		if (n%2) {
+			pr_info("NC10: write pg %d: flash_status[%d] = %x\n",
+				page, n, dma_buffer->data.flash_status[n]);
+		} else {
+			pr_info("NC01: write pg %d: flash_status[%d] = %x\n",
+				page, n, dma_buffer->data.flash_status[n]);
+		}
+	}
+#endif
+		if (err)
+			break;
+		pages_written++;
+		page++;
+	}
+	if (ops->mode != MTD_OOB_RAW)
+		ops->retlen = mtd->writesize * pages_written;
+	else
+		ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+	ops->oobretlen = ops->ooblen - oob_len;
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, oob_dma_addr,
+				 ops->ooblen, DMA_TO_DEVICE);
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, data_dma_addr, ops->len,
+				DMA_TO_DEVICE);
+	if (err)
+		pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
+		       to, ops->len, ops->ooblen, err);
+
+#if VERBOSE
+	pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+			__func__, err, ops->retlen, ops->oobretlen);
+
+	pr_info("==================================================="
+			"==========\n");
+#endif
+	return err;
+}
+
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			  size_t *retlen, const u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OOB_PLACE;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+	if (!dual_nand_ctlr_present)
+		ret =  msm_nand_write_oob(mtd, to, &ops);
+	else
+		ret =  msm_nand_write_oob_dualnandc(mtd, to, &ops);
+	*retlen = ops.retlen;
+	return ret;
+}
+
+static int
+msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	int err;
+	struct msm_nand_chip *chip = mtd->priv;
+	struct {
+		dmov_s cmd[6];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t exec;
+			uint32_t flash_status;
+			uint32_t clrfstatus;
+			uint32_t clrrstatus;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned page = 0;
+
+	if (mtd->writesize == 2048)
+		page = instr->addr >> 11;
+
+	if (mtd->writesize == 4096)
+		page = instr->addr >> 12;
+
+	if (instr->addr & (mtd->erasesize - 1)) {
+		pr_err("%s: unsupported erase address, 0x%llx\n",
+		       __func__, instr->addr);
+		return -EINVAL;
+	}
+	if (instr->len != mtd->erasesize) {
+		pr_err("%s: unsupported erase len, %lld\n",
+		       __func__, instr->len);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	cmd = dma_buffer->cmd;
+
+	dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+	dma_buffer->data.addr0 = page;
+	dma_buffer->data.addr1 = 0;
+	dma_buffer->data.chipsel = 0 | 4;
+	dma_buffer->data.exec = 1;
+	dma_buffer->data.flash_status = 0xeeeeeeee;
+	dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6));  /* CW_PER_PAGE = 0 */
+	dma_buffer->data.cfg1 = chip->CFG1;
+	dma_buffer->data.clrfstatus = 0x00000020;
+	dma_buffer->data.clrrstatus = 0x000000C0;
+
+	cmd->cmd = DST_CRCI_NAND_CMD | CMD_OCB;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = MSM_NAND_FLASH_CMD;
+	cmd->len = 16;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = MSM_NAND_DEV0_CFG0;
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = MSM_NAND_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_FLASH_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
+	cmd->dst = MSM_NAND_FLASH_STATUS;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = CMD_OCU | CMD_LC;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
+	cmd->dst = MSM_NAND_READ_STATUS;
+	cmd->len = 4;
+	cmd++;
+
+	BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmdptr =
+		(msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(
+		chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	/* we fail if there was an operation error, a mpu error, or the
+	 * erase success bit was not set.
+	 */
+
+	if (dma_buffer->data.flash_status & 0x110 ||
+			!(dma_buffer->data.flash_status & 0x80))
+		err = -EIO;
+	else
+		err = 0;
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (err) {
+		pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
+		instr->fail_addr = instr->addr;
+		instr->state = MTD_ERASE_FAILED;
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		instr->fail_addr = 0xffffffff;
+		mtd_erase_callback(instr);
+	}
+	return err;
+}
+
+static int
+msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
+{
+	int err;
+	struct msm_nand_chip *chip = mtd->priv;
+	struct {
+		dmov_s cmd[18];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel_cs0;
+			uint32_t chipsel_cs1;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t exec;
+			uint32_t ecccfg;
+			uint32_t ebi2_chip_select_cfg0;
+			uint32_t adm_mux_data_ack_req_nc01;
+			uint32_t adm_mux_cmd_ack_req_nc01;
+			uint32_t adm_mux_data_ack_req_nc10;
+			uint32_t adm_mux_cmd_ack_req_nc10;
+			uint32_t adm_default_mux;
+			uint32_t default_ebi2_chip_select_cfg0;
+			uint32_t nc01_flash_dev_cmd0;
+			uint32_t nc01_flash_dev_cmd0_default;
+			uint32_t flash_status[2];
+			uint32_t clrfstatus;
+			uint32_t clrrstatus;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	unsigned page = 0;
+
+	if (mtd->writesize == 2048)
+		page = instr->addr >> 11;
+
+	if (mtd->writesize == 4096)
+		page = instr->addr >> 12;
+
+	if (mtd->writesize == 8192)
+		page = (instr->addr >> 1) >> 12;
+
+	if (instr->addr & (mtd->erasesize - 1)) {
+		pr_err("%s: unsupported erase address, 0x%llx\n",
+		       __func__, instr->addr);
+		return -EINVAL;
+	}
+	if (instr->len != mtd->erasesize) {
+		pr_err("%s: unsupported erase len, %lld\n",
+		       __func__, instr->len);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue,
+		   (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	cmd = dma_buffer->cmd;
+
+	dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+	dma_buffer->data.addr0 = page;
+	dma_buffer->data.addr1 = 0;
+	dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
+	dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
+	dma_buffer->data.exec = 1;
+	dma_buffer->data.flash_status[0] = 0xeeeeeeee;
+	dma_buffer->data.flash_status[1] = 0xeeeeeeee;
+	dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6));  /* CW_PER_PAGE = 0 */
+	dma_buffer->data.cfg1 = chip->CFG1;
+	dma_buffer->data.clrfstatus = 0x00000020;
+	dma_buffer->data.clrrstatus = 0x000000C0;
+
+	dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
+	dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc01  = 0x0000053C;
+	dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc10  = 0x00000F14;
+	dma_buffer->data.adm_default_mux = 0x00000FC0;
+	dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
+
+	/* enable CS1 */
+	cmd->cmd = 0 | CMD_OCB;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.ebi2_chip_select_cfg0);
+	cmd->dst = EBI2_CHIP_SELECT_CFG0;
+	cmd->len = 4;
+	cmd++;
+
+	/* erase CS0 block now !!! */
+	/* 0xF14 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_mux_cmd_ack_req_nc10);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = NC01(MSM_NAND_FLASH_CMD);
+	cmd->len = 16;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = NC01(MSM_NAND_EXEC_CMD);
+	cmd->len = 4;
+	cmd++;
+
+	/* 0xF28 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_mux_data_ack_req_nc10);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = NC01(MSM_NAND_FLASH_STATUS);
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[0]);
+	cmd->len = 4;
+	cmd++;
+
+	/* erase CS1 block now !!! */
+	/* 0x53C */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			       &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = NC10(MSM_NAND_FLASH_CMD);
+	cmd->len = 12;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
+	cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
+	cmd->len = 8;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = NC10(MSM_NAND_EXEC_CMD);
+	cmd->len = 4;
+	cmd++;
+
+	/* 0xA3C */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			     &dma_buffer->data.adm_mux_data_ack_req_nc01);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = NC10(MSM_NAND_FLASH_STATUS);
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[1]);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
+	cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
+	cmd->dst = NC11(MSM_NAND_READ_STATUS);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_default_mux);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	/* disable CS1 */
+	cmd->cmd = CMD_OCU | CMD_LC;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.default_ebi2_chip_select_cfg0);
+	cmd->dst = EBI2_CHIP_SELECT_CFG0;
+	cmd->len = 4;
+	cmd++;
+
+	BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+
+	dma_buffer->cmdptr =
+		(msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(
+		chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	/* we fail if there was an operation error, a mpu error, or the
+	 * erase success bit was not set.
+	 */
+
+	if (dma_buffer->data.flash_status[0] & 0x110 ||
+			!(dma_buffer->data.flash_status[0] & 0x80) ||
+			dma_buffer->data.flash_status[1] & 0x110 ||
+			!(dma_buffer->data.flash_status[1] & 0x80))
+		err = -EIO;
+	else
+		err = 0;
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (err) {
+		pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
+		instr->fail_addr = instr->addr;
+		instr->state = MTD_ERASE_FAILED;
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		instr->fail_addr = 0xffffffff;
+		mtd_erase_callback(instr);
+	}
+	return err;
+}
+
+static int
+msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+	int ret;
+	struct {
+		dmov_s cmd[5];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t eccbchcfg;
+			uint32_t exec;
+			uint32_t ecccfg;
+			struct {
+				uint32_t flash_status;
+				uint32_t buffer_status;
+			} result;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	uint8_t *buf;
+	unsigned page = 0;
+	unsigned cwperpage;
+
+	if (mtd->writesize == 2048)
+		page = ofs >> 11;
+
+	if (mtd->writesize == 4096)
+		page = ofs >> 12;
+
+	cwperpage = (mtd->writesize >> 9);
+
+	/* Check for invalid offset */
+	if (ofs > mtd->size)
+		return -EINVAL;
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("%s: unsupported block address, 0x%x\n",
+			 __func__, (uint32_t)ofs);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue,
+		(dma_buffer = msm_nand_get_dma_buffer(chip ,
+					 sizeof(*dma_buffer) + 4)));
+	buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+	/* Read 4 bytes starting from the bad block marker location
+	 * in the last code word of the page
+	 */
+
+	cmd = dma_buffer->cmd;
+
+	dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
+	dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
+	dma_buffer->data.cfg1 = chip->CFG1_RAW |
+				(chip->CFG1 & CFG1_WIDE_FLASH);
+	if (enable_bch_ecc)
+		dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
+
+	if (chip->CFG1 & CFG1_WIDE_FLASH)
+		dma_buffer->data.addr0 = (page << 16) |
+			((chip->cw_size * (cwperpage-1)) >> 1);
+	else
+		dma_buffer->data.addr0 = (page << 16) |
+			(chip->cw_size * (cwperpage-1));
+
+	dma_buffer->data.addr1 = (page >> 16) & 0xff;
+	dma_buffer->data.chipsel = 0 | 4;
+
+	dma_buffer->data.exec = 1;
+
+	dma_buffer->data.result.flash_status = 0xeeeeeeee;
+	dma_buffer->data.result.buffer_status = 0xeeeeeeee;
+
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = MSM_NAND_FLASH_CMD;
+	cmd->len = 16;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = MSM_NAND_DEV0_CFG0;
+	if (enable_bch_ecc)
+		cmd->len = 12;
+	else
+		cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = MSM_NAND_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_FLASH_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = MSM_NAND_FLASH_BUFFER +
+	(mtd->writesize - (chip->cw_size * (cwperpage-1)));
+	cmd->dst = msm_virt_to_dma(chip, buf);
+	cmd->len = 4;
+	cmd++;
+
+	BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmd[0].cmd |= CMD_OCB;
+	cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip,
+				dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	ret = 0;
+	if (dma_buffer->data.result.flash_status & 0x110)
+		ret = -EIO;
+
+	if (!ret) {
+		/* Check for bad block marker byte */
+		if (chip->CFG1 & CFG1_WIDE_FLASH) {
+			if (buf[0] != 0xFF || buf[1] != 0xFF)
+				ret = 1;
+		} else {
+			if (buf[0] != 0xFF)
+				ret = 1;
+		}
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+	return ret;
+}
+
+static int
+msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+	int ret;
+	struct {
+		dmov_s cmd[18];
+		unsigned cmdptr;
+		struct {
+			uint32_t cmd;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t chipsel_cs0;
+			uint32_t chipsel_cs1;
+			uint32_t cfg0;
+			uint32_t cfg1;
+			uint32_t exec;
+			uint32_t ecccfg;
+			uint32_t ebi2_chip_select_cfg0;
+			uint32_t adm_mux_data_ack_req_nc01;
+			uint32_t adm_mux_cmd_ack_req_nc01;
+			uint32_t adm_mux_data_ack_req_nc10;
+			uint32_t adm_mux_cmd_ack_req_nc10;
+			uint32_t adm_default_mux;
+			uint32_t default_ebi2_chip_select_cfg0;
+			struct {
+				uint32_t flash_status;
+				uint32_t buffer_status;
+			} result[2];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+	uint8_t *buf01;
+	uint8_t *buf10;
+	unsigned page = 0;
+	unsigned cwperpage;
+
+	if (mtd->writesize == 2048)
+		page = ofs >> 11;
+
+	if (mtd->writesize == 4096)
+		page = ofs >> 12;
+
+	if (mtd->writesize == 8192)
+		page = (ofs >> 1) >> 12;
+
+	cwperpage = ((mtd->writesize >> 1) >> 9);
+
+	/* Check for invalid offset */
+	if (ofs > mtd->size)
+		return -EINVAL;
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("%s: unsupported block address, 0x%x\n",
+			 __func__, (uint32_t)ofs);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue,
+		(dma_buffer = msm_nand_get_dma_buffer(chip ,
+					 sizeof(*dma_buffer) + 8)));
+	buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+	buf10 = buf01 + 4;
+
+	/* Read 4 bytes starting from the bad block marker location
+	 * in the last code word of the page
+	 */
+	cmd = dma_buffer->cmd;
+
+	dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
+	dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
+	dma_buffer->data.cfg1 = chip->CFG1_RAW |
+				(chip->CFG1 & CFG1_WIDE_FLASH);
+
+	if (chip->CFG1 & CFG1_WIDE_FLASH)
+		dma_buffer->data.addr0 = (page << 16) |
+			((528*(cwperpage-1)) >> 1);
+	else
+		dma_buffer->data.addr0 = (page << 16) |
+			(528*(cwperpage-1));
+
+	dma_buffer->data.addr1 = (page >> 16) & 0xff;
+	dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
+	dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
+
+	dma_buffer->data.exec = 1;
+
+	dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
+	dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
+	dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
+	dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
+
+	dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
+	dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc01  = 0x0000053C;
+	dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
+	dma_buffer->data.adm_mux_cmd_ack_req_nc10  = 0x00000F14;
+	dma_buffer->data.adm_default_mux = 0x00000FC0;
+	dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
+
+	/* Reading last code word from NC01 */
+	/* enable CS1 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.ebi2_chip_select_cfg0);
+	cmd->dst = EBI2_CHIP_SELECT_CFG0;
+	cmd->len = 4;
+	cmd++;
+
+	/* 0xF14 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_mux_cmd_ack_req_nc10);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = NC01(MSM_NAND_FLASH_CMD);
+	cmd->len = 16;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = NC01(MSM_NAND_EXEC_CMD);
+	cmd->len = 4;
+	cmd++;
+
+	/* 0xF28 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_mux_data_ack_req_nc10);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = NC01(MSM_NAND_FLASH_STATUS);
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
+							(528*(cwperpage-1)));
+	cmd->dst = msm_virt_to_dma(chip, buf01);
+	cmd->len = 4;
+	cmd++;
+
+	/* Reading last code word from NC10 */
+	/* 0x53C */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+	&dma_buffer->data.adm_mux_cmd_ack_req_nc01);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = NC10(MSM_NAND_FLASH_CMD);
+	cmd->len = 12;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
+	cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
+	cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = NC10(MSM_NAND_EXEC_CMD);
+	cmd->len = 4;
+	cmd++;
+
+	/* A3C */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_mux_data_ack_req_nc01);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = NC10(MSM_NAND_FLASH_STATUS);
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
+	cmd->len = 8;
+	cmd++;
+
+	cmd->cmd = 0;
+	cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
+							(528*(cwperpage-1)));
+	cmd->dst = msm_virt_to_dma(chip, buf10);
+	cmd->len = 4;
+	cmd++;
+
+	/* FC0 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.adm_default_mux);
+	cmd->dst = EBI2_NAND_ADM_MUX;
+	cmd->len = 4;
+	cmd++;
+
+	/* disble CS1 */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip,
+			&dma_buffer->data.ebi2_chip_select_cfg0);
+	cmd->dst = EBI2_CHIP_SELECT_CFG0;
+	cmd->len = 4;
+	cmd++;
+
+	BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmd[0].cmd |= CMD_OCB;
+	cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip,
+				dma_buffer->cmd) >> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
+		DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
+	mb();
+
+	ret = 0;
+	if ((dma_buffer->data.result[0].flash_status & 0x110) ||
+			(dma_buffer->data.result[1].flash_status & 0x110))
+		ret = -EIO;
+
+	if (!ret) {
+		/* Check for bad block marker byte for NC01 & NC10 */
+		if (chip->CFG1 & CFG1_WIDE_FLASH) {
+			if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
+				(buf10[0] != 0xFF || buf10[1] != 0xFF))
+				ret = 1;
+		} else {
+			if (buf01[0] != 0xFF || buf10[0] != 0xFF)
+				ret = 1;
+		}
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
+	return ret;
+}
+
+static int
+msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+	uint8_t *buf;
+
+	/* Check for invalid offset */
+	if (ofs > mtd->size)
+		return -EINVAL;
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("%s: unsupported block address, 0x%x\n",
+				 __func__, (uint32_t)ofs);
+		return -EINVAL;
+	}
+
+	/*
+	Write all 0s to the first page
+	This will set the BB marker to 0
+	*/
+	buf = page_address(ZERO_PAGE());
+
+	ops.mode = MTD_OOB_RAW;
+	ops.len = mtd->writesize + mtd->oobsize;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	if (!interleave_enable)
+		ret =  msm_nand_write_oob(mtd, ofs, &ops);
+	else
+		ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
+
+	return ret;
+}
+
+/**
+ * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
+ * @param mtd		MTD device structure
+ */
+static int msm_nand_suspend(struct mtd_info *mtd)
+{
+	return 0;
+}
+
+/**
+ * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
+ * @param mtd		MTD device structure
+ */
+static void msm_nand_resume(struct mtd_info *mtd)
+{
+}
+
+struct onenand_information {
+	uint16_t manufacturer_id;
+	uint16_t device_id;
+	uint16_t version_id;
+	uint16_t data_buf_size;
+	uint16_t boot_buf_size;
+	uint16_t num_of_buffers;
+	uint16_t technology;
+};
+
+static struct onenand_information onenand_info;
+static uint32_t nand_sfcmd_mode;
+
+uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
+{
+	struct {
+		dmov_s cmd[7];
+		unsigned cmdptr;
+		struct {
+			uint32_t bcfg;
+			uint32_t cmd;
+			uint32_t exec;
+			uint32_t status;
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+	uint32_t initialsflashcmd = 0;
+
+	initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
+
+	if ((initialsflashcmd & 0x10) == 0x10)
+		nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
+	else
+		nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
+
+	printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	cmd = dma_buffer->cmd;
+
+	dma_buffer->data.bcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+	dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
+						MSM_NAND_SFCMD_DATXS,
+						nand_sfcmd_mode,
+						MSM_NAND_SFCMD_REGRD);
+	dma_buffer->data.exec = 1;
+	dma_buffer->data.status = CLEAN_DATA_32;
+	dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
+						(ONENAND_MANUFACTURER_ID);
+	dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
+						(ONENAND_VERSION_ID);
+	dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
+						(ONENAND_BOOT_BUFFER_SIZE);
+	dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_TECHNOLOGY << 0);
+	dma_buffer->data.data0 = CLEAN_DATA_32;
+	dma_buffer->data.data1 = CLEAN_DATA_32;
+	dma_buffer->data.data2 = CLEAN_DATA_32;
+	dma_buffer->data.data3 = CLEAN_DATA_32;
+
+	/* Enable and configure the SFlash controller */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
+	cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on cmd ready and write CMD register */
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
+	cmd->dst = MSM_NAND_SFLASHC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Configure the ADDR0 and ADDR1 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+	cmd->dst = MSM_NAND_ADDR0;
+	cmd->len = 8;
+	cmd++;
+
+	/* Configure the ADDR2 and ADDR3 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+	cmd->dst = MSM_NAND_ADDR2;
+	cmd->len = 8;
+	cmd++;
+
+	/* Kick the execute command */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
+	cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on data ready, and read the two status registers */
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_SFLASHC_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
+	cmd->len = 4;
+	cmd++;
+
+	/* Read data registers - valid only if status says success */
+	cmd->cmd = 0;
+	cmd->src = MSM_NAND_GENP_REG0;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+	cmd->len = 16;
+	cmd++;
+
+	BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmd[0].cmd |= CMD_OCB;
+	cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+			>> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
+			| DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+	mb();
+
+	/* Check for errors, protection violations etc */
+	if (dma_buffer->data.status & 0x110) {
+		pr_info("%s: MPU/OP error"
+				"(0x%x) during Onenand probe\n",
+				__func__, dma_buffer->data.status);
+		err = -EIO;
+	} else {
+
+		onenand_info.manufacturer_id =
+			(dma_buffer->data.data0 >> 0) & 0x0000FFFF;
+		onenand_info.device_id =
+			(dma_buffer->data.data0 >> 16) & 0x0000FFFF;
+		onenand_info.version_id =
+			(dma_buffer->data.data1 >> 0) & 0x0000FFFF;
+		onenand_info.data_buf_size =
+			(dma_buffer->data.data1 >> 16) & 0x0000FFFF;
+		onenand_info.boot_buf_size =
+			(dma_buffer->data.data2 >> 0) & 0x0000FFFF;
+		onenand_info.num_of_buffers =
+			(dma_buffer->data.data2 >> 16) & 0x0000FFFF;
+		onenand_info.technology =
+			(dma_buffer->data.data3 >> 0) & 0x0000FFFF;
+
+
+		pr_info("======================================="
+				"==========================\n");
+
+		pr_info("%s: manufacturer_id = 0x%x\n"
+				, __func__, onenand_info.manufacturer_id);
+		pr_info("%s: device_id = 0x%x\n"
+				, __func__, onenand_info.device_id);
+		pr_info("%s: version_id = 0x%x\n"
+				, __func__, onenand_info.version_id);
+		pr_info("%s: data_buf_size = 0x%x\n"
+				, __func__, onenand_info.data_buf_size);
+		pr_info("%s: boot_buf_size = 0x%x\n"
+				, __func__, onenand_info.boot_buf_size);
+		pr_info("%s: num_of_buffers = 0x%x\n"
+				, __func__, onenand_info.num_of_buffers);
+		pr_info("%s: technology = 0x%x\n"
+				, __func__, onenand_info.technology);
+
+		pr_info("======================================="
+				"==========================\n");
+
+		if ((onenand_info.manufacturer_id != 0x00EC)
+			|| ((onenand_info.device_id & 0x0040) != 0x0040)
+			|| (onenand_info.data_buf_size != 0x0800)
+			|| (onenand_info.boot_buf_size != 0x0200)
+			|| (onenand_info.num_of_buffers != 0x0201)
+			|| (onenand_info.technology != 0)) {
+
+			pr_info("%s: Detected an unsupported device\n"
+				, __func__);
+			err = -EIO;
+		}
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	return err;
+}
+
+int msm_onenand_read_oob(struct mtd_info *mtd,
+		loff_t from, struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[53];
+		unsigned cmdptr;
+		struct {
+			uint32_t sfbcfg;
+			uint32_t sfcmd[9];
+			uint32_t sfexec;
+			uint32_t sfstat[9];
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+			uint32_t macro[5];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+	int i;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+
+	loff_t from_curr = 0;
+	unsigned page_count;
+	unsigned pages_read = 0;
+
+	uint16_t onenand_startaddr1;
+	uint16_t onenand_startaddr8;
+	uint16_t onenand_startaddr2;
+	uint16_t onenand_startbuffer;
+	uint16_t onenand_sysconfig1;
+	uint16_t controller_status;
+	uint16_t interrupt_status;
+	uint16_t ecc_status;
+#if VERBOSE
+	pr_info("================================================="
+			"================\n");
+	pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
+			"\noobbuf 0x%p ooblen 0x%x\n",
+			__func__, from, ops->mode, ops->datbuf, ops->len,
+			ops->oobbuf, ops->ooblen);
+#endif
+	if (!mtd) {
+		pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
+				(uint32_t)mtd);
+		return -EINVAL;
+	}
+	if (from & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported from, 0x%llx\n", __func__,
+				from);
+		return -EINVAL;
+	}
+
+	if ((ops->mode != MTD_OOB_PLACE) && (ops->mode != MTD_OOB_AUTO) &&
+			(ops->mode != MTD_OOB_RAW)) {
+		pr_err("%s: unsupported ops->mode, %d\n", __func__,
+				ops->mode);
+		return -EINVAL;
+	}
+
+	if (((ops->datbuf == NULL) || (ops->len == 0)) &&
+			((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
+		pr_err("%s: incorrect ops fields - nothing to do\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if ((ops->datbuf != NULL) && (ops->len == 0)) {
+		pr_err("%s: data buffer passed but length 0\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
+		pr_err("%s: oob buffer passed but length 0\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
+			/* when ops->datbuf is NULL, ops->len can be ooblen */
+			pr_err("%s: unsupported ops->len, %d\n", __func__,
+					ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if (ops->datbuf != NULL &&
+			(ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len,"
+				" %d for MTD_OOB_RAW\n", __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if ((ops->mode == MTD_OOB_RAW) && (ops->oobbuf)) {
+		pr_err("%s: unsupported operation, oobbuf pointer "
+				"passed in for RAW mode, %x\n", __func__,
+				(uint32_t)ops->oobbuf);
+		return -EINVAL;
+	}
+
+	if (ops->oobbuf && !ops->datbuf) {
+		page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
+			mtd->oobavail : mtd->oobsize);
+		if ((page_count == 0) && (ops->ooblen))
+			page_count = 1;
+	} else if (ops->mode != MTD_OOB_RAW)
+			page_count = ops->len / mtd->writesize;
+		else
+			page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	if ((ops->mode == MTD_OOB_PLACE) && (ops->oobbuf != NULL)) {
+		if (page_count * mtd->oobsize > ops->ooblen) {
+			pr_err("%s: unsupported ops->ooblen for "
+				"PLACE, %d\n", __func__, ops->ooblen);
+			return -EINVAL;
+		}
+	}
+
+	if ((ops->mode == MTD_OOB_PLACE) && (ops->ooblen != 0) &&
+							(ops->ooboffs != 0)) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
+				ops->ooboffs);
+		return -EINVAL;
+	}
+
+	if (ops->datbuf) {
+		memset(ops->datbuf, 0x55, ops->len);
+		data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
+				ops->datbuf, ops->len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("%s: failed to get dma addr for %p\n",
+					__func__, ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		memset(ops->oobbuf, 0x55, ops->ooblen);
+		oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
+				ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("%s: failed to get dma addr for %p\n",
+					__func__, ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	from_curr = from;
+
+	while (page_count-- > 0) {
+
+		cmd = dma_buffer->cmd;
+
+		if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
+			&& (from_curr >= (mtd->size>>1))) { /* DDP Device */
+				onenand_startaddr1 = DEVICE_FLASHCORE_1 |
+					(((uint32_t)(from_curr-(mtd->size>>1))
+					/ mtd->erasesize));
+				onenand_startaddr2 = DEVICE_BUFFERRAM_1;
+		} else {
+				onenand_startaddr1 = DEVICE_FLASHCORE_0 |
+				((uint32_t)from_curr / mtd->erasesize) ;
+				onenand_startaddr2 = DEVICE_BUFFERRAM_0;
+		}
+
+		onenand_startaddr8 = (((uint32_t)from_curr &
+				(mtd->erasesize - 1)) / mtd->writesize) << 2;
+		onenand_startbuffer = DATARAM0_0 << 8;
+		onenand_sysconfig1 = (ops->mode == MTD_OOB_RAW) ?
+			ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
+			ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
+
+		dma_buffer->data.sfbcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+		dma_buffer->data.sfcmd[0] =  SFLASH_PREPCMD(7, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfcmd[1] =  SFLASH_PREPCMD(0, 0, 32,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_INTHI);
+		dma_buffer->data.sfcmd[2] =  SFLASH_PREPCMD(3, 7, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGRD);
+		dma_buffer->data.sfcmd[3] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATRD);
+		dma_buffer->data.sfcmd[4] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATRD);
+		dma_buffer->data.sfcmd[5] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATRD);
+		dma_buffer->data.sfcmd[6] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATRD);
+		dma_buffer->data.sfcmd[7] =  SFLASH_PREPCMD(32, 0, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATRD);
+		dma_buffer->data.sfcmd[8] =  SFLASH_PREPCMD(4, 10, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfexec = 1;
+		dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
+		dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
+						(ONENAND_START_ADDRESS_2);
+		dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
+						(ONENAND_COMMAND);
+		dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
+						(ONENAND_INTERRUPT_STATUS);
+		dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
+						(onenand_sysconfig1);
+		dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
+						(onenand_startaddr1);
+		dma_buffer->data.data2 = (onenand_startbuffer << 16) |
+						(onenand_startaddr2);
+		dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_CMDLOADSPARE);
+		dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
+						(CLEAN_DATA_16);
+		dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
+						(ONENAND_STARTADDR1_RES);
+		dma_buffer->data.macro[0] = 0x0200;
+		dma_buffer->data.macro[1] = 0x0300;
+		dma_buffer->data.macro[2] = 0x0400;
+		dma_buffer->data.macro[3] = 0x0500;
+		dma_buffer->data.macro[4] = 0x8010;
+
+		/*************************************************************/
+		/* Write necessary address registers in the onenand device   */
+		/*************************************************************/
+
+		/* Enable and configure the SFlash controller */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the ADDR0 and ADDR1 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+		cmd->dst = MSM_NAND_ADDR0;
+		cmd->len = 8;
+		cmd++;
+
+		/* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+		cmd->dst = MSM_NAND_ADDR2;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the ADDR6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
+		cmd->dst = MSM_NAND_ADDR6;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the GENP0, GENP1, GENP2, GENP3 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+		cmd->dst = MSM_NAND_GENP_REG0;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the FLASH_DEV_CMD4,5,6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->dst = MSM_NAND_DEV_CMD4;
+		cmd->len = 12;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Wait for the interrupt from the Onenand device controller */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Read necessary status registers from the onenand device   */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the GENP3 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_GENP_REG3;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the DEVCMD4 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_DEV_CMD4;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Read the data ram area from the onenand buffer ram        */
+		/*************************************************************/
+
+		if (ops->datbuf) {
+
+			dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+							(ONENAND_CMDLOAD);
+
+			for (i = 0; i < 4; i++) {
+
+				/* Block on cmd ready and write CMD register */
+				cmd->cmd = DST_CRCI_NAND_CMD;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfcmd[3+i]);
+				cmd->dst = MSM_NAND_SFLASHC_CMD;
+				cmd->len = 4;
+				cmd++;
+
+				/* Write the MACRO1 register */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.macro[i]);
+				cmd->dst = MSM_NAND_MACRO1_REG;
+				cmd->len = 4;
+				cmd++;
+
+				/* Kick the execute command */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfexec);
+				cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+				cmd->len = 4;
+				cmd++;
+
+				/* Block on data rdy, & read status register */
+				cmd->cmd = SRC_CRCI_NAND_DATA;
+				cmd->src = MSM_NAND_SFLASHC_STATUS;
+				cmd->dst = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfstat[3+i]);
+				cmd->len = 4;
+				cmd++;
+
+				/* Transfer nand ctlr buf contents to usr buf */
+				cmd->cmd = 0;
+				cmd->src = MSM_NAND_FLASH_BUFFER;
+				cmd->dst = data_dma_addr_curr;
+				cmd->len = 512;
+				data_dma_addr_curr += 512;
+				cmd++;
+			}
+		}
+
+		if ((ops->oobbuf) || (ops->mode == MTD_OOB_RAW)) {
+
+			/* Block on cmd ready and write CMD register */
+			cmd->cmd = DST_CRCI_NAND_CMD;
+			cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.sfcmd[7]);
+			cmd->dst = MSM_NAND_SFLASHC_CMD;
+			cmd->len = 4;
+			cmd++;
+
+			/* Write the MACRO1 register */
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.macro[4]);
+			cmd->dst = MSM_NAND_MACRO1_REG;
+			cmd->len = 4;
+			cmd++;
+
+			/* Kick the execute command */
+			cmd->cmd = 0;
+			cmd->src = msm_virt_to_dma(chip,
+					&dma_buffer->data.sfexec);
+			cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+			cmd->len = 4;
+			cmd++;
+
+			/* Block on data ready, and read status register */
+			cmd->cmd = SRC_CRCI_NAND_DATA;
+			cmd->src = MSM_NAND_SFLASHC_STATUS;
+			cmd->dst = msm_virt_to_dma(chip,
+					&dma_buffer->data.sfstat[7]);
+			cmd->len = 4;
+			cmd++;
+
+			/* Transfer nand ctlr buffer contents into usr buf */
+			if (ops->mode == MTD_OOB_AUTO) {
+				for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
+					cmd->cmd = 0;
+					cmd->src = MSM_NAND_FLASH_BUFFER +
+					mtd->ecclayout->oobfree[i].offset;
+					cmd->dst = oob_dma_addr_curr;
+					cmd->len =
+					mtd->ecclayout->oobfree[i].length;
+					oob_dma_addr_curr +=
+					mtd->ecclayout->oobfree[i].length;
+					cmd++;
+				}
+			}
+			if (ops->mode == MTD_OOB_PLACE) {
+					cmd->cmd = 0;
+					cmd->src = MSM_NAND_FLASH_BUFFER;
+					cmd->dst = oob_dma_addr_curr;
+					cmd->len = mtd->oobsize;
+					oob_dma_addr_curr += mtd->oobsize;
+					cmd++;
+			}
+			if (ops->mode == MTD_OOB_RAW) {
+					cmd->cmd = 0;
+					cmd->src = MSM_NAND_FLASH_BUFFER;
+					cmd->dst = data_dma_addr_curr;
+					cmd->len = mtd->oobsize;
+					data_dma_addr_curr += mtd->oobsize;
+					cmd++;
+			}
+		}
+
+		/*************************************************************/
+		/* Restore the necessary registers to proper values          */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
+		cmd->len = 4;
+		cmd++;
+
+
+		BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+				>> 3) | CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+				&dma_buffer->cmdptr)));
+		mb();
+
+		ecc_status = (dma_buffer->data.data3 >> 16) &
+							0x0000FFFF;
+		interrupt_status = (dma_buffer->data.data4 >> 0) &
+							0x0000FFFF;
+		controller_status = (dma_buffer->data.data4 >> 16) &
+							0x0000FFFF;
+
+#if VERBOSE
+		pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
+				"%x %x\n", __func__,
+					dma_buffer->data.sfstat[0],
+					dma_buffer->data.sfstat[1],
+					dma_buffer->data.sfstat[2],
+					dma_buffer->data.sfstat[3],
+					dma_buffer->data.sfstat[4],
+					dma_buffer->data.sfstat[5],
+					dma_buffer->data.sfstat[6],
+					dma_buffer->data.sfstat[7],
+					dma_buffer->data.sfstat[8]);
+
+		pr_info("%s: controller_status = %x\n", __func__,
+					controller_status);
+		pr_info("%s: interrupt_status = %x\n", __func__,
+					interrupt_status);
+		pr_info("%s: ecc_status = %x\n", __func__,
+					ecc_status);
+#endif
+		/* Check for errors, protection violations etc */
+		if ((controller_status != 0)
+				|| (dma_buffer->data.sfstat[0] & 0x110)
+				|| (dma_buffer->data.sfstat[1] & 0x110)
+				|| (dma_buffer->data.sfstat[2] & 0x110)
+				|| (dma_buffer->data.sfstat[8] & 0x110)
+				|| ((dma_buffer->data.sfstat[3] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[4] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[5] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[6] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[7] & 0x110) &&
+								((ops->oobbuf)
+					|| (ops->mode == MTD_OOB_RAW)))) {
+			pr_info("%s: ECC/MPU/OP error\n", __func__);
+			err = -EIO;
+		}
+
+		if (err)
+			break;
+		pages_read++;
+		from_curr += mtd->writesize;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (ops->oobbuf) {
+		dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
+				DMA_FROM_DEVICE);
+	}
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf) {
+		dma_unmap_page(chip->dev, data_dma_addr, ops->len,
+				DMA_FROM_DEVICE);
+	}
+
+	if (err) {
+		pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
+				ops->datbuf ? ops->len : 0, ops->ooblen);
+	} else {
+		ops->retlen = ops->oobretlen = 0;
+		if (ops->datbuf != NULL) {
+			if (ops->mode != MTD_OOB_RAW)
+				ops->retlen = mtd->writesize * pages_read;
+			else
+				ops->retlen = (mtd->writesize +  mtd->oobsize)
+							* pages_read;
+		}
+		if (ops->oobbuf != NULL) {
+			if (ops->mode == MTD_OOB_AUTO)
+				ops->oobretlen = mtd->oobavail * pages_read;
+			else
+				ops->oobretlen = mtd->oobsize * pages_read;
+		}
+	}
+
+#if VERBOSE
+	pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+			__func__, err, ops->retlen, ops->oobretlen);
+
+	pr_info("==================================================="
+			"==============\n");
+#endif
+	return err;
+}
+
+int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
+		size_t *retlen, u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OOB_PLACE;
+	ops.datbuf = buf;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.oobbuf = NULL;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ret =  msm_onenand_read_oob(mtd, from, &ops);
+	*retlen = ops.retlen;
+
+	return ret;
+}
+
+static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
+		struct mtd_oob_ops *ops)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[53];
+		unsigned cmdptr;
+		struct {
+			uint32_t sfbcfg;
+			uint32_t sfcmd[10];
+			uint32_t sfexec;
+			uint32_t sfstat[10];
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+			uint32_t macro[5];
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+	int i, j, k;
+	dma_addr_t data_dma_addr = 0;
+	dma_addr_t oob_dma_addr = 0;
+	dma_addr_t init_dma_addr = 0;
+	dma_addr_t data_dma_addr_curr = 0;
+	dma_addr_t oob_dma_addr_curr = 0;
+	uint8_t *init_spare_bytes;
+
+	loff_t to_curr = 0;
+	unsigned page_count;
+	unsigned pages_written = 0;
+
+	uint16_t onenand_startaddr1;
+	uint16_t onenand_startaddr8;
+	uint16_t onenand_startaddr2;
+	uint16_t onenand_startbuffer;
+	uint16_t onenand_sysconfig1;
+
+	uint16_t controller_status;
+	uint16_t interrupt_status;
+	uint16_t ecc_status;
+
+#if VERBOSE
+	pr_info("================================================="
+			"================\n");
+	pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
+			"\noobbuf 0x%p ooblen 0x%x\n",
+			__func__, to, ops->mode, ops->datbuf, ops->len,
+			ops->oobbuf, ops->ooblen);
+#endif
+	if (!mtd) {
+		pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
+				(uint32_t)mtd);
+		return -EINVAL;
+	}
+	if (to & (mtd->writesize - 1)) {
+		pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
+		return -EINVAL;
+	}
+
+	if ((ops->mode != MTD_OOB_PLACE) && (ops->mode != MTD_OOB_AUTO) &&
+			(ops->mode != MTD_OOB_RAW)) {
+		pr_err("%s: unsupported ops->mode, %d\n", __func__,
+				ops->mode);
+		return -EINVAL;
+	}
+
+	if (((ops->datbuf == NULL) || (ops->len == 0)) &&
+			((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
+		pr_err("%s: incorrect ops fields - nothing to do\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if ((ops->datbuf != NULL) && (ops->len == 0)) {
+		pr_err("%s: data buffer passed but length 0\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
+		pr_err("%s: oob buffer passed but length 0\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OOB_RAW) {
+		if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
+			/* when ops->datbuf is NULL, ops->len can be ooblen */
+			pr_err("%s: unsupported ops->len, %d\n", __func__,
+					ops->len);
+			return -EINVAL;
+		}
+	} else {
+		if (ops->datbuf != NULL &&
+			(ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
+			pr_err("%s: unsupported ops->len,"
+				" %d for MTD_OOB_RAW\n", __func__, ops->len);
+			return -EINVAL;
+		}
+	}
+
+	if ((ops->mode == MTD_OOB_RAW) && (ops->oobbuf)) {
+		pr_err("%s: unsupported operation, oobbuf pointer "
+				"passed in for RAW mode, %x\n", __func__,
+				(uint32_t)ops->oobbuf);
+		return -EINVAL;
+	}
+
+	if (ops->oobbuf && !ops->datbuf) {
+		page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
+			mtd->oobavail : mtd->oobsize);
+		if ((page_count == 0) && (ops->ooblen))
+			page_count = 1;
+	} else if (ops->mode != MTD_OOB_RAW)
+			page_count = ops->len / mtd->writesize;
+		else
+			page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	if ((ops->mode == MTD_OOB_AUTO) && (ops->oobbuf != NULL)) {
+		if (page_count > 1) {
+			pr_err("%s: unsupported ops->ooblen for"
+				"AUTO, %d\n", __func__, ops->ooblen);
+			return -EINVAL;
+		}
+	}
+
+	if ((ops->mode == MTD_OOB_PLACE) && (ops->oobbuf != NULL)) {
+		if (page_count * mtd->oobsize > ops->ooblen) {
+			pr_err("%s: unsupported ops->ooblen for"
+				"PLACE,	%d\n", __func__, ops->ooblen);
+			return -EINVAL;
+		}
+	}
+
+	if ((ops->mode == MTD_OOB_PLACE) && (ops->ooblen != 0) &&
+						(ops->ooboffs != 0)) {
+		pr_err("%s: unsupported ops->ooboffs, %d\n",
+				__func__, ops->ooboffs);
+		return -EINVAL;
+	}
+
+	init_spare_bytes = kmalloc(64, GFP_KERNEL);
+	if (!init_spare_bytes) {
+		pr_err("%s: failed to alloc init_spare_bytes buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+	for (i = 0; i < 64; i++)
+		init_spare_bytes[i] = 0xFF;
+
+	if ((ops->oobbuf) && (ops->mode == MTD_OOB_AUTO)) {
+		for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
+			for (j = 0; j < mtd->ecclayout->oobfree[i].length;
+					j++) {
+				init_spare_bytes[j +
+					mtd->ecclayout->oobfree[i].offset]
+						= (ops->oobbuf)[k];
+				k++;
+			}
+	}
+
+	if (ops->datbuf) {
+		data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
+				ops->datbuf, ops->len, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, data_dma_addr)) {
+			pr_err("%s: failed to get dma addr for %p\n",
+					__func__, ops->datbuf);
+			return -EIO;
+		}
+	}
+	if (ops->oobbuf) {
+		oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
+				ops->oobbuf, ops->ooblen, DMA_TO_DEVICE);
+		if (dma_mapping_error(chip->dev, oob_dma_addr)) {
+			pr_err("%s: failed to get dma addr for %p\n",
+					__func__, ops->oobbuf);
+			err = -EIO;
+			goto err_dma_map_oobbuf_failed;
+		}
+	}
+
+	init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, 64,
+			DMA_TO_DEVICE);
+	if (dma_mapping_error(chip->dev, init_dma_addr)) {
+		pr_err("%s: failed to get dma addr for %p\n",
+				__func__, init_spare_bytes);
+		err = -EIO;
+		goto err_dma_map_initbuf_failed;
+	}
+
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	to_curr = to;
+
+	while (page_count-- > 0) {
+		cmd = dma_buffer->cmd;
+
+		if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
+			&& (to_curr >= (mtd->size>>1))) { /* DDP Device */
+				onenand_startaddr1 = DEVICE_FLASHCORE_1 |
+					(((uint32_t)(to_curr-(mtd->size>>1))
+					/ mtd->erasesize));
+				onenand_startaddr2 = DEVICE_BUFFERRAM_1;
+		} else {
+				onenand_startaddr1 = DEVICE_FLASHCORE_0 |
+					((uint32_t)to_curr / mtd->erasesize) ;
+				onenand_startaddr2 = DEVICE_BUFFERRAM_0;
+		}
+
+		onenand_startaddr8 = (((uint32_t)to_curr &
+				(mtd->erasesize - 1)) / mtd->writesize) << 2;
+		onenand_startbuffer = DATARAM0_0 << 8;
+		onenand_sysconfig1 = (ops->mode == MTD_OOB_RAW) ?
+			ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
+			ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
+
+		dma_buffer->data.sfbcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+		dma_buffer->data.sfcmd[0] =  SFLASH_PREPCMD(6, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfcmd[1] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATWR);
+		dma_buffer->data.sfcmd[2] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATWR);
+		dma_buffer->data.sfcmd[3] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATWR);
+		dma_buffer->data.sfcmd[4] =  SFLASH_PREPCMD(256, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATWR);
+		dma_buffer->data.sfcmd[5] =  SFLASH_PREPCMD(32, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_DATWR);
+		dma_buffer->data.sfcmd[6] =  SFLASH_PREPCMD(1, 6, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfcmd[7] =  SFLASH_PREPCMD(0, 0, 32,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_INTHI);
+		dma_buffer->data.sfcmd[8] =  SFLASH_PREPCMD(3, 7, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGRD);
+		dma_buffer->data.sfcmd[9] =  SFLASH_PREPCMD(4, 10, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfexec = 1;
+		dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
+		dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
+						(ONENAND_START_ADDRESS_2);
+		dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
+						(ONENAND_COMMAND);
+		dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
+						(ONENAND_INTERRUPT_STATUS);
+		dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
+						(onenand_sysconfig1);
+		dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
+						(onenand_startaddr1);
+		dma_buffer->data.data2 = (onenand_startbuffer << 16) |
+						(onenand_startaddr2);
+		dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_CMDPROGSPARE);
+		dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
+						(CLEAN_DATA_16);
+		dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
+						(ONENAND_STARTADDR1_RES);
+		dma_buffer->data.macro[0] = 0x0200;
+		dma_buffer->data.macro[1] = 0x0300;
+		dma_buffer->data.macro[2] = 0x0400;
+		dma_buffer->data.macro[3] = 0x0500;
+		dma_buffer->data.macro[4] = 0x8010;
+
+
+		/*************************************************************/
+		/* Write necessary address registers in the onenand device   */
+		/*************************************************************/
+
+		/* Enable and configure the SFlash controller */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the ADDR0 and ADDR1 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+		cmd->dst = MSM_NAND_ADDR0;
+		cmd->len = 8;
+		cmd++;
+
+		/* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+		cmd->dst = MSM_NAND_ADDR2;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the ADDR6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
+		cmd->dst = MSM_NAND_ADDR6;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the GENP0, GENP1, GENP2, GENP3 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+		cmd->dst = MSM_NAND_GENP_REG0;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the FLASH_DEV_CMD4,5,6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->dst = MSM_NAND_DEV_CMD4;
+		cmd->len = 12;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Write the data ram area in the onenand buffer ram         */
+		/*************************************************************/
+
+		if (ops->datbuf) {
+			dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+							(ONENAND_CMDPROG);
+
+			for (i = 0; i < 4; i++) {
+
+				/* Block on cmd ready and write CMD register */
+				cmd->cmd = DST_CRCI_NAND_CMD;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfcmd[1+i]);
+				cmd->dst = MSM_NAND_SFLASHC_CMD;
+				cmd->len = 4;
+				cmd++;
+
+				/* Trnsfr usr buf contents to nand ctlr buf */
+				cmd->cmd = 0;
+				cmd->src = data_dma_addr_curr;
+				cmd->dst = MSM_NAND_FLASH_BUFFER;
+				cmd->len = 512;
+				data_dma_addr_curr += 512;
+				cmd++;
+
+				/* Write the MACRO1 register */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.macro[i]);
+				cmd->dst = MSM_NAND_MACRO1_REG;
+				cmd->len = 4;
+				cmd++;
+
+				/* Kick the execute command */
+				cmd->cmd = 0;
+				cmd->src = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfexec);
+				cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+				cmd->len = 4;
+				cmd++;
+
+				/* Block on data rdy, & read status register */
+				cmd->cmd = SRC_CRCI_NAND_DATA;
+				cmd->src = MSM_NAND_SFLASHC_STATUS;
+				cmd->dst = msm_virt_to_dma(chip,
+						&dma_buffer->data.sfstat[1+i]);
+				cmd->len = 4;
+				cmd++;
+
+			}
+		}
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[5]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		if ((ops->oobbuf) || (ops->mode == MTD_OOB_RAW)) {
+
+			/* Transfer user buf contents into nand ctlr buffer */
+			if (ops->mode == MTD_OOB_AUTO) {
+				cmd->cmd = 0;
+				cmd->src = init_dma_addr;
+				cmd->dst = MSM_NAND_FLASH_BUFFER;
+				cmd->len = mtd->oobsize;
+				cmd++;
+			}
+			if (ops->mode == MTD_OOB_PLACE) {
+				cmd->cmd = 0;
+				cmd->src = oob_dma_addr_curr;
+				cmd->dst = MSM_NAND_FLASH_BUFFER;
+				cmd->len = mtd->oobsize;
+				oob_dma_addr_curr += mtd->oobsize;
+				cmd++;
+			}
+			if (ops->mode == MTD_OOB_RAW) {
+				cmd->cmd = 0;
+				cmd->src = data_dma_addr_curr;
+				cmd->dst = MSM_NAND_FLASH_BUFFER;
+				cmd->len = mtd->oobsize;
+				data_dma_addr_curr += mtd->oobsize;
+				cmd++;
+			}
+		} else {
+				cmd->cmd = 0;
+				cmd->src = init_dma_addr;
+				cmd->dst = MSM_NAND_FLASH_BUFFER;
+				cmd->len = mtd->oobsize;
+				cmd++;
+		}
+
+		/* Write the MACRO1 register */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[4]);
+		cmd->dst = MSM_NAND_MACRO1_REG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[5]);
+		cmd->len = 4;
+		cmd++;
+
+		/*********************************************************/
+		/* Issuing write command                                 */
+		/*********************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[6]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[6]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Wait for the interrupt from the Onenand device controller */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[7]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[7]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Read necessary status registers from the onenand device   */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the GENP3 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_GENP_REG3;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the DEVCMD4 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_DEV_CMD4;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Restore the necessary registers to proper values          */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
+		cmd->len = 4;
+		cmd++;
+
+
+		BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+				>> 3) | CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+				&dma_buffer->cmdptr)));
+		mb();
+
+		ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
+		interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
+		controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
+
+#if VERBOSE
+		pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
+				" %x %x %x\n", __func__,
+					dma_buffer->data.sfstat[0],
+					dma_buffer->data.sfstat[1],
+					dma_buffer->data.sfstat[2],
+					dma_buffer->data.sfstat[3],
+					dma_buffer->data.sfstat[4],
+					dma_buffer->data.sfstat[5],
+					dma_buffer->data.sfstat[6],
+					dma_buffer->data.sfstat[7],
+					dma_buffer->data.sfstat[8],
+					dma_buffer->data.sfstat[9]);
+
+		pr_info("%s: controller_status = %x\n", __func__,
+					controller_status);
+		pr_info("%s: interrupt_status = %x\n", __func__,
+					interrupt_status);
+		pr_info("%s: ecc_status = %x\n", __func__,
+					ecc_status);
+#endif
+		/* Check for errors, protection violations etc */
+		if ((controller_status != 0)
+				|| (dma_buffer->data.sfstat[0] & 0x110)
+				|| (dma_buffer->data.sfstat[6] & 0x110)
+				|| (dma_buffer->data.sfstat[7] & 0x110)
+				|| (dma_buffer->data.sfstat[8] & 0x110)
+				|| (dma_buffer->data.sfstat[9] & 0x110)
+				|| ((dma_buffer->data.sfstat[1] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[2] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[3] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[4] & 0x110) &&
+								(ops->datbuf))
+				|| ((dma_buffer->data.sfstat[5] & 0x110) &&
+								((ops->oobbuf)
+					|| (ops->mode == MTD_OOB_RAW)))) {
+			pr_info("%s: ECC/MPU/OP error\n", __func__);
+			err = -EIO;
+		}
+
+		if (err)
+			break;
+		pages_written++;
+		to_curr += mtd->writesize;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	dma_unmap_page(chip->dev, init_dma_addr, 64, DMA_TO_DEVICE);
+
+err_dma_map_initbuf_failed:
+	if (ops->oobbuf) {
+		dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
+							DMA_TO_DEVICE);
+	}
+err_dma_map_oobbuf_failed:
+	if (ops->datbuf) {
+		dma_unmap_page(chip->dev, data_dma_addr, ops->len,
+							DMA_TO_DEVICE);
+	}
+
+	if (err) {
+		pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
+				ops->datbuf ? ops->len : 0, ops->ooblen);
+	} else {
+		ops->retlen = ops->oobretlen = 0;
+		if (ops->datbuf != NULL) {
+			if (ops->mode != MTD_OOB_RAW)
+				ops->retlen = mtd->writesize * pages_written;
+			else
+				ops->retlen = (mtd->writesize +  mtd->oobsize)
+							* pages_written;
+		}
+		if (ops->oobbuf != NULL) {
+			if (ops->mode == MTD_OOB_AUTO)
+				ops->oobretlen = mtd->oobavail * pages_written;
+			else
+				ops->oobretlen = mtd->oobsize * pages_written;
+		}
+	}
+
+#if VERBOSE
+	pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
+			__func__, err, ops->retlen, ops->oobretlen);
+
+	pr_info("================================================="
+			"================\n");
+#endif
+	kfree(init_spare_bytes);
+	return err;
+}
+
+static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
+		size_t *retlen, const u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OOB_PLACE;
+	ops.datbuf = (uint8_t *)buf;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.oobbuf = NULL;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ret =  msm_onenand_write_oob(mtd, to, &ops);
+	*retlen = ops.retlen;
+
+	return ret;
+}
+
+static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[20];
+		unsigned cmdptr;
+		struct {
+			uint32_t sfbcfg;
+			uint32_t sfcmd[4];
+			uint32_t sfexec;
+			uint32_t sfstat[4];
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+
+	uint16_t onenand_startaddr1;
+	uint16_t onenand_startaddr8;
+	uint16_t onenand_startaddr2;
+	uint16_t onenand_startbuffer;
+
+	uint16_t controller_status;
+	uint16_t interrupt_status;
+	uint16_t ecc_status;
+
+	uint64_t temp;
+
+#if VERBOSE
+	pr_info("================================================="
+			"================\n");
+	pr_info("%s: addr 0x%llx len 0x%llx\n",
+			__func__, instr->addr, instr->len);
+#endif
+	if (instr->addr & (mtd->erasesize - 1)) {
+		pr_err("%s: Unsupported erase address, 0x%llx\n",
+				__func__, instr->addr);
+		return -EINVAL;
+	}
+	if (instr->len != mtd->erasesize) {
+		pr_err("%s: Unsupported erase len, %lld\n",
+				__func__, instr->len);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	cmd = dma_buffer->cmd;
+
+	temp = instr->addr;
+
+	if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
+		&& (temp >= (mtd->size>>1))) { /* DDP Device */
+			onenand_startaddr1 = DEVICE_FLASHCORE_1 |
+				(((uint32_t)(temp-(mtd->size>>1))
+						/ mtd->erasesize));
+			onenand_startaddr2 = DEVICE_BUFFERRAM_1;
+	} else {
+		onenand_startaddr1 = DEVICE_FLASHCORE_0 |
+			((uint32_t)temp / mtd->erasesize) ;
+		onenand_startaddr2 = DEVICE_BUFFERRAM_0;
+	}
+
+	onenand_startaddr8 = 0x0000;
+	onenand_startbuffer = DATARAM0_0 << 8;
+
+	dma_buffer->data.sfbcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+	dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
+						MSM_NAND_SFCMD_CMDXS,
+						nand_sfcmd_mode,
+						MSM_NAND_SFCMD_REGWR);
+	dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
+						MSM_NAND_SFCMD_CMDXS,
+						nand_sfcmd_mode,
+						MSM_NAND_SFCMD_INTHI);
+	dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
+						MSM_NAND_SFCMD_DATXS,
+						nand_sfcmd_mode,
+						MSM_NAND_SFCMD_REGRD);
+	dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
+						MSM_NAND_SFCMD_CMDXS,
+						nand_sfcmd_mode,
+						MSM_NAND_SFCMD_REGWR);
+	dma_buffer->data.sfexec = 1;
+	dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
+	dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
+	dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
+	dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
+	dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+	dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
+						(ONENAND_START_ADDRESS_1);
+	dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
+						(ONENAND_START_ADDRESS_2);
+	dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
+						(ONENAND_COMMAND);
+	dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
+						(ONENAND_INTERRUPT_STATUS);
+	dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+	dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
+						(ONENAND_START_ADDRESS_1);
+	dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+	dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
+						(onenand_startaddr1);
+	dma_buffer->data.data2 = (onenand_startbuffer << 16) |
+						(onenand_startaddr2);
+	dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_CMDERAS);
+	dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
+						(CLEAN_DATA_16);
+	dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+	dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
+						(ONENAND_STARTADDR1_RES);
+
+	/***************************************************************/
+	/* Write the necessary address registers in the onenand device */
+	/***************************************************************/
+
+	/* Enable and configure the SFlash controller */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
+	cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on cmd ready and write CMD register */
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
+	cmd->dst = MSM_NAND_SFLASHC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Write the ADDR0 and ADDR1 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+	cmd->dst = MSM_NAND_ADDR0;
+	cmd->len = 8;
+	cmd++;
+
+	/* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+	cmd->dst = MSM_NAND_ADDR2;
+	cmd->len = 16;
+	cmd++;
+
+	/* Write the ADDR6 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
+	cmd->dst = MSM_NAND_ADDR6;
+	cmd->len = 4;
+	cmd++;
+
+	/* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+	cmd->dst = MSM_NAND_GENP_REG0;
+	cmd->len = 16;
+	cmd++;
+
+	/* Write the FLASH_DEV_CMD4,5,6 registers */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+	cmd->dst = MSM_NAND_DEV_CMD4;
+	cmd->len = 12;
+	cmd++;
+
+	/* Kick the execute command */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+	cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on data ready, and read the status register */
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_SFLASHC_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
+	cmd->len = 4;
+	cmd++;
+
+	/***************************************************************/
+	/* Wait for the interrupt from the Onenand device controller   */
+	/***************************************************************/
+
+	/* Block on cmd ready and write CMD register */
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
+	cmd->dst = MSM_NAND_SFLASHC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Kick the execute command */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+	cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on data ready, and read the status register */
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_SFLASHC_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
+	cmd->len = 4;
+	cmd++;
+
+	/***************************************************************/
+	/* Read the necessary status registers from the onenand device */
+	/***************************************************************/
+
+	/* Block on cmd ready and write CMD register */
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
+	cmd->dst = MSM_NAND_SFLASHC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Kick the execute command */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+	cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on data ready, and read the status register */
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_SFLASHC_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
+	cmd->len = 4;
+	cmd++;
+
+	/* Read the GENP3 register */
+	cmd->cmd = 0;
+	cmd->src = MSM_NAND_GENP_REG3;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
+	cmd->len = 4;
+	cmd++;
+
+	/* Read the DEVCMD4 register */
+	cmd->cmd = 0;
+	cmd->src = MSM_NAND_DEV_CMD4;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+	cmd->len = 4;
+	cmd++;
+
+	/***************************************************************/
+	/* Restore the necessary registers to proper values            */
+	/***************************************************************/
+
+	/* Block on cmd ready and write CMD register */
+	cmd->cmd = DST_CRCI_NAND_CMD;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
+	cmd->dst = MSM_NAND_SFLASHC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Kick the execute command */
+	cmd->cmd = 0;
+	cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+	cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+	cmd->len = 4;
+	cmd++;
+
+	/* Block on data ready, and read the status register */
+	cmd->cmd = SRC_CRCI_NAND_DATA;
+	cmd->src = MSM_NAND_SFLASHC_STATUS;
+	cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
+	cmd->len = 4;
+	cmd++;
+
+
+	BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmd[0].cmd |= CMD_OCB;
+	cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+			>> 3) | CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
+			| DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+	mb();
+
+	ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
+	interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
+	controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
+
+#if VERBOSE
+	pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
+				dma_buffer->data.sfstat[0],
+				dma_buffer->data.sfstat[1],
+				dma_buffer->data.sfstat[2],
+				dma_buffer->data.sfstat[3]);
+
+	pr_info("%s: controller_status = %x\n", __func__,
+				controller_status);
+	pr_info("%s: interrupt_status = %x\n", __func__,
+				interrupt_status);
+	pr_info("%s: ecc_status = %x\n", __func__,
+				ecc_status);
+#endif
+	/* Check for errors, protection violations etc */
+	if ((controller_status != 0)
+			|| (dma_buffer->data.sfstat[0] & 0x110)
+			|| (dma_buffer->data.sfstat[1] & 0x110)
+			|| (dma_buffer->data.sfstat[2] & 0x110)
+			|| (dma_buffer->data.sfstat[3] & 0x110)) {
+		pr_err("%s: ECC/MPU/OP error\n", __func__);
+		err = -EIO;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+	if (err) {
+		pr_err("%s: Erase failed, 0x%llx\n", __func__,
+				instr->addr);
+		instr->fail_addr = instr->addr;
+		instr->state = MTD_ERASE_FAILED;
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		instr->fail_addr = 0xffffffff;
+		mtd_erase_callback(instr);
+	}
+
+#if VERBOSE
+	pr_info("\n%s: ret %d\n", __func__, err);
+	pr_info("===================================================="
+			"=============\n");
+#endif
+	return err;
+}
+
+static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	int rval, i;
+	int ret = 0;
+	uint8_t *buffer;
+	uint8_t *oobptr;
+
+	if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
+		pr_err("%s: unsupported block address, 0x%x\n",
+			 __func__, (uint32_t)ofs);
+		return -EINVAL;
+	}
+
+	buffer = kmalloc(2112, GFP_KERNEL|GFP_DMA);
+	if (buffer == 0) {
+		pr_err("%s: Could not kmalloc for buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	memset(buffer, 0x00, 2112);
+	oobptr = &(buffer[2048]);
+
+	ops.mode = MTD_OOB_RAW;
+	ops.len = 2112;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = buffer;
+	ops.oobbuf = NULL;
+
+	for (i = 0; i < 2; i++) {
+		ofs = ofs + i*mtd->writesize;
+		rval = msm_onenand_read_oob(mtd, ofs, &ops);
+		if (rval) {
+			pr_err("%s: Error in reading bad blk info\n",
+					__func__);
+			ret = rval;
+			break;
+		}
+		if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
+		    (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
+		    (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
+		    (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF)
+		   ) {
+			ret = 1;
+			break;
+		}
+	}
+
+	kfree(buffer);
+
+#if VERBOSE
+	if (ret == 1)
+		pr_info("%s : Block containing 0x%x is bad\n",
+				__func__, (unsigned int)ofs);
+#endif
+	return ret;
+}
+
+static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	int rval, i;
+	int ret = 0;
+	uint8_t *buffer;
+
+	if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
+		pr_err("%s: unsupported block address, 0x%x\n",
+			 __func__, (uint32_t)ofs);
+		return -EINVAL;
+	}
+
+	buffer = page_address(ZERO_PAGE());
+
+	ops.mode = MTD_OOB_RAW;
+	ops.len = 2112;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobretlen = 0;
+	ops.ooboffs = 0;
+	ops.datbuf = buffer;
+	ops.oobbuf = NULL;
+
+	for (i = 0; i < 2; i++) {
+		ofs = ofs + i*mtd->writesize;
+		rval = msm_onenand_write_oob(mtd, ofs, &ops);
+		if (rval) {
+			pr_err("%s: Error in writing bad blk info\n",
+					__func__);
+			ret = rval;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[20];
+		unsigned cmdptr;
+		struct {
+			uint32_t sfbcfg;
+			uint32_t sfcmd[4];
+			uint32_t sfexec;
+			uint32_t sfstat[4];
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+
+	uint16_t onenand_startaddr1;
+	uint16_t onenand_startaddr8;
+	uint16_t onenand_startaddr2;
+	uint16_t onenand_startblock;
+
+	uint16_t controller_status;
+	uint16_t interrupt_status;
+	uint16_t write_prot_status;
+
+	uint64_t start_ofs;
+
+#if VERBOSE
+	pr_info("===================================================="
+			"=============\n");
+	pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
+#endif
+	/* 'ofs' & 'len' should align to block size */
+	if (ofs&(mtd->erasesize - 1)) {
+		pr_err("%s: Unsupported ofs address, 0x%llx\n",
+				__func__, ofs);
+		return -EINVAL;
+	}
+
+	if (len&(mtd->erasesize - 1)) {
+		pr_err("%s: Unsupported len, %lld\n",
+				__func__, len);
+		return -EINVAL;
+	}
+
+	if (ofs+len > mtd->size) {
+		pr_err("%s: Maximum chip size exceeded\n", __func__);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
+#if VERBOSE
+		pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
+#endif
+
+		cmd = dma_buffer->cmd;
+		if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
+			&& (ofs >= (mtd->size>>1))) { /* DDP Device */
+			onenand_startaddr1 = DEVICE_FLASHCORE_1 |
+				(((uint32_t)(ofs - (mtd->size>>1))
+						/ mtd->erasesize));
+			onenand_startaddr2 = DEVICE_BUFFERRAM_1;
+			onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
+						/ mtd->erasesize);
+		} else {
+			onenand_startaddr1 = DEVICE_FLASHCORE_0 |
+					((uint32_t)ofs / mtd->erasesize) ;
+			onenand_startaddr2 = DEVICE_BUFFERRAM_0;
+			onenand_startblock = ((uint32_t)ofs
+						/ mtd->erasesize);
+		}
+
+		onenand_startaddr8 = 0x0000;
+		dma_buffer->data.sfbcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+		dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_INTHI);
+		dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGRD);
+		dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfexec = 1;
+		dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
+		dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
+						(ONENAND_START_ADDRESS_2);
+		dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
+						(ONENAND_COMMAND);
+		dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
+						(ONENAND_INTERRUPT_STATUS);
+		dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
+						(onenand_startaddr1);
+		dma_buffer->data.data2 = (onenand_startblock << 16) |
+						(onenand_startaddr2);
+		dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_CMD_UNLOCK);
+		dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
+						(CLEAN_DATA_16);
+		dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
+						(ONENAND_STARTADDR1_RES);
+
+		/*************************************************************/
+		/* Write the necessary address reg in the onenand device     */
+		/*************************************************************/
+
+		/* Enable and configure the SFlash controller */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the ADDR0 and ADDR1 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+		cmd->dst = MSM_NAND_ADDR0;
+		cmd->len = 8;
+		cmd++;
+
+		/* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+		cmd->dst = MSM_NAND_ADDR2;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the ADDR6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
+		cmd->dst = MSM_NAND_ADDR6;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+		cmd->dst = MSM_NAND_GENP_REG0;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the FLASH_DEV_CMD4,5,6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->dst = MSM_NAND_DEV_CMD4;
+		cmd->len = 12;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Wait for the interrupt from the Onenand device controller */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
+		cmd->len = 4;
+		cmd++;
+
+		/*********************************************************/
+		/* Read the necessary status reg from the onenand device */
+		/*********************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the GENP3 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_GENP_REG3;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the DEVCMD4 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_DEV_CMD4;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->len = 4;
+		cmd++;
+
+		/************************************************************/
+		/* Restore the necessary registers to proper values         */
+		/************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
+		cmd->len = 4;
+		cmd++;
+
+
+		BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+				>> 3) | CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+				&dma_buffer->cmdptr)));
+		mb();
+
+		write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
+		interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
+		controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
+
+#if VERBOSE
+		pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
+					dma_buffer->data.sfstat[0],
+					dma_buffer->data.sfstat[1],
+					dma_buffer->data.sfstat[2],
+					dma_buffer->data.sfstat[3]);
+
+		pr_info("%s: controller_status = %x\n", __func__,
+					controller_status);
+		pr_info("%s: interrupt_status = %x\n", __func__,
+					interrupt_status);
+		pr_info("%s: write_prot_status = %x\n", __func__,
+					write_prot_status);
+#endif
+		/* Check for errors, protection violations etc */
+		if ((controller_status != 0)
+				|| (dma_buffer->data.sfstat[0] & 0x110)
+				|| (dma_buffer->data.sfstat[1] & 0x110)
+				|| (dma_buffer->data.sfstat[2] & 0x110)
+				|| (dma_buffer->data.sfstat[3] & 0x110)) {
+			pr_err("%s: ECC/MPU/OP error\n", __func__);
+			err = -EIO;
+		}
+
+		if (!(write_prot_status & ONENAND_WP_US)) {
+			pr_err("%s: Unexpected status ofs = 0x%llx,"
+				"wp_status = %x\n",
+				__func__, ofs, write_prot_status);
+			err = -EIO;
+		}
+
+		if (err)
+			break;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+#if VERBOSE
+	pr_info("\n%s: ret %d\n", __func__, err);
+	pr_info("===================================================="
+			"=============\n");
+#endif
+	return err;
+}
+
+static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[20];
+		unsigned cmdptr;
+		struct {
+			uint32_t sfbcfg;
+			uint32_t sfcmd[4];
+			uint32_t sfexec;
+			uint32_t sfstat[4];
+			uint32_t addr0;
+			uint32_t addr1;
+			uint32_t addr2;
+			uint32_t addr3;
+			uint32_t addr4;
+			uint32_t addr5;
+			uint32_t addr6;
+			uint32_t data0;
+			uint32_t data1;
+			uint32_t data2;
+			uint32_t data3;
+			uint32_t data4;
+			uint32_t data5;
+			uint32_t data6;
+		} data;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	int err = 0;
+
+	uint16_t onenand_startaddr1;
+	uint16_t onenand_startaddr8;
+	uint16_t onenand_startaddr2;
+	uint16_t onenand_startblock;
+
+	uint16_t controller_status;
+	uint16_t interrupt_status;
+	uint16_t write_prot_status;
+
+	uint64_t start_ofs;
+
+#if VERBOSE
+	pr_info("===================================================="
+			"=============\n");
+	pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
+#endif
+	/* 'ofs' & 'len' should align to block size */
+	if (ofs&(mtd->erasesize - 1)) {
+		pr_err("%s: Unsupported ofs address, 0x%llx\n",
+				__func__, ofs);
+		return -EINVAL;
+	}
+
+	if (len&(mtd->erasesize - 1)) {
+		pr_err("%s: Unsupported len, %lld\n",
+				__func__, len);
+		return -EINVAL;
+	}
+
+	if (ofs+len > mtd->size) {
+		pr_err("%s: Maximum chip size exceeded\n", __func__);
+		return -EINVAL;
+	}
+
+	wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
+#if VERBOSE
+		pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
+#endif
+
+		cmd = dma_buffer->cmd;
+		if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
+			&& (ofs >= (mtd->size>>1))) { /* DDP Device */
+			onenand_startaddr1 = DEVICE_FLASHCORE_1 |
+				(((uint32_t)(ofs - (mtd->size>>1))
+						/ mtd->erasesize));
+			onenand_startaddr2 = DEVICE_BUFFERRAM_1;
+			onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
+						/ mtd->erasesize);
+		} else {
+			onenand_startaddr1 = DEVICE_FLASHCORE_0 |
+					((uint32_t)ofs / mtd->erasesize) ;
+			onenand_startaddr2 = DEVICE_BUFFERRAM_0;
+			onenand_startblock = ((uint32_t)ofs
+						/ mtd->erasesize);
+		}
+
+		onenand_startaddr8 = 0x0000;
+		dma_buffer->data.sfbcfg = SFLASH_BCFG |
+					(nand_sfcmd_mode ? 0 : (1 << 24));
+		dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_INTHI);
+		dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
+							MSM_NAND_SFCMD_DATXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGRD);
+		dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
+							MSM_NAND_SFCMD_CMDXS,
+							nand_sfcmd_mode,
+							MSM_NAND_SFCMD_REGWR);
+		dma_buffer->data.sfexec = 1;
+		dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
+		dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
+		dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
+						(ONENAND_START_ADDRESS_2);
+		dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
+						(ONENAND_COMMAND);
+		dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
+						(ONENAND_INTERRUPT_STATUS);
+		dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
+						(ONENAND_SYSTEM_CONFIG_1);
+		dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
+						(ONENAND_START_ADDRESS_1);
+		dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
+						(onenand_startaddr1);
+		dma_buffer->data.data2 = (onenand_startblock << 16) |
+						(onenand_startaddr2);
+		dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
+						(ONENAND_CMD_LOCK);
+		dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
+						(CLEAN_DATA_16);
+		dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
+				(ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
+		dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
+						(ONENAND_STARTADDR1_RES);
+
+		/*************************************************************/
+		/* Write the necessary address reg in the onenand device     */
+		/*************************************************************/
+
+		/* Enable and configure the SFlash controller */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
+		cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the ADDR0 and ADDR1 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
+		cmd->dst = MSM_NAND_ADDR0;
+		cmd->len = 8;
+		cmd++;
+
+		/* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
+		cmd->dst = MSM_NAND_ADDR2;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the ADDR6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
+		cmd->dst = MSM_NAND_ADDR6;
+		cmd->len = 4;
+		cmd++;
+
+		/* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
+		cmd->dst = MSM_NAND_GENP_REG0;
+		cmd->len = 16;
+		cmd++;
+
+		/* Write the FLASH_DEV_CMD4,5,6 registers */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->dst = MSM_NAND_DEV_CMD4;
+		cmd->len = 12;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
+		cmd->len = 4;
+		cmd++;
+
+		/*************************************************************/
+		/* Wait for the interrupt from the Onenand device controller */
+		/*************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
+		cmd->len = 4;
+		cmd++;
+
+		/*********************************************************/
+		/* Read the necessary status reg from the onenand device */
+		/*********************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the GENP3 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_GENP_REG3;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
+		cmd->len = 4;
+		cmd++;
+
+		/* Read the DEVCMD4 register */
+		cmd->cmd = 0;
+		cmd->src = MSM_NAND_DEV_CMD4;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
+		cmd->len = 4;
+		cmd++;
+
+		/************************************************************/
+		/* Restore the necessary registers to proper values         */
+		/************************************************************/
+
+		/* Block on cmd ready and write CMD register */
+		cmd->cmd = DST_CRCI_NAND_CMD;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
+		cmd->dst = MSM_NAND_SFLASHC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Kick the execute command */
+		cmd->cmd = 0;
+		cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
+		cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
+		cmd->len = 4;
+		cmd++;
+
+		/* Block on data ready, and read the status register */
+		cmd->cmd = SRC_CRCI_NAND_DATA;
+		cmd->src = MSM_NAND_SFLASHC_STATUS;
+		cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
+		cmd->len = 4;
+		cmd++;
+
+
+		BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->cmd[0].cmd |= CMD_OCB;
+		cmd[-1].cmd |= CMD_OCU | CMD_LC;
+
+		dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
+				>> 3) | CMD_PTR_LP;
+
+		mb();
+		msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
+			DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+				&dma_buffer->cmdptr)));
+		mb();
+
+		write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
+		interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
+		controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
+
+#if VERBOSE
+		pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
+					dma_buffer->data.sfstat[0],
+					dma_buffer->data.sfstat[1],
+					dma_buffer->data.sfstat[2],
+					dma_buffer->data.sfstat[3]);
+
+		pr_info("%s: controller_status = %x\n", __func__,
+					controller_status);
+		pr_info("%s: interrupt_status = %x\n", __func__,
+					interrupt_status);
+		pr_info("%s: write_prot_status = %x\n", __func__,
+					write_prot_status);
+#endif
+		/* Check for errors, protection violations etc */
+		if ((controller_status != 0)
+				|| (dma_buffer->data.sfstat[0] & 0x110)
+				|| (dma_buffer->data.sfstat[1] & 0x110)
+				|| (dma_buffer->data.sfstat[2] & 0x110)
+				|| (dma_buffer->data.sfstat[3] & 0x110)) {
+			pr_err("%s: ECC/MPU/OP error\n", __func__);
+			err = -EIO;
+		}
+
+		if (!(write_prot_status & ONENAND_WP_LS)) {
+			pr_err("%s: Unexpected status ofs = 0x%llx,"
+				"wp_status = %x\n",
+				__func__, ofs, write_prot_status);
+			err = -EIO;
+		}
+
+		if (err)
+			break;
+	}
+
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+
+#if VERBOSE
+	pr_info("\n%s: ret %d\n", __func__, err);
+	pr_info("===================================================="
+			"=============\n");
+#endif
+	return err;
+}
+
+static int msm_onenand_suspend(struct mtd_info *mtd)
+{
+	return 0;
+}
+
+static void msm_onenand_resume(struct mtd_info *mtd)
+{
+}
+
+int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	/* Probe and check whether onenand device is present */
+	if (flash_onenand_probe(chip))
+		return -ENODEV;
+
+	mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
+	mtd->writesize = onenand_info.data_buf_size;
+	mtd->oobsize = mtd->writesize >> 5;
+	mtd->erasesize = mtd->writesize << 6;
+	mtd->oobavail = msm_onenand_oob_64.oobavail;
+	mtd->ecclayout = &msm_onenand_oob_64;
+
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+	mtd->erase = msm_onenand_erase;
+	mtd->point = NULL;
+	mtd->unpoint = NULL;
+	mtd->read = msm_onenand_read;
+	mtd->write = msm_onenand_write;
+	mtd->read_oob = msm_onenand_read_oob;
+	mtd->write_oob = msm_onenand_write_oob;
+	mtd->lock = msm_onenand_lock;
+	mtd->unlock = msm_onenand_unlock;
+	mtd->suspend = msm_onenand_suspend;
+	mtd->resume = msm_onenand_resume;
+	mtd->block_isbad = msm_onenand_block_isbad;
+	mtd->block_markbad = msm_onenand_block_markbad;
+	mtd->owner = THIS_MODULE;
+
+	pr_info("Found a supported onenand device\n");
+
+	return 0;
+}
+
+/**
+ * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
+ * @param mtd		MTD device structure
+ * @param maxchips	Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ */
+int msm_nand_scan(struct mtd_info *mtd, int maxchips)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+	uint32_t flash_id = 0, i, mtd_writesize;
+	uint8_t dev_found = 0;
+	uint8_t wide_bus;
+	uint32_t manid;
+	uint32_t devid;
+	uint32_t devcfg;
+	struct nand_flash_dev *flashdev = NULL;
+	struct nand_manufacturers  *flashman = NULL;
+
+	/* Probe the Flash device for ONFI compliance */
+	if (!flash_onfi_probe(chip)) {
+		dev_found = 1;
+	} else {
+		/* Read the Flash ID from the Nand Flash Device */
+		flash_id = flash_read_id(chip);
+		manid = flash_id & 0xFF;
+		devid = (flash_id >> 8) & 0xFF;
+		devcfg = (flash_id >> 24) & 0xFF;
+
+		for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+			if (nand_manuf_ids[i].id == manid)
+				flashman = &nand_manuf_ids[i];
+		for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
+			if (nand_flash_ids[i].id == devid)
+				flashdev = &nand_flash_ids[i];
+		if (!flashdev || !flashman) {
+			pr_err("ERROR: unknown nand device manuf=%x devid=%x\n",
+				manid, devid);
+			return -ENOENT;
+		} else
+			dev_found = 1;
+
+		if (!flashdev->pagesize) {
+			supported_flash.flash_id = flash_id;
+			supported_flash.density = flashdev->chipsize << 20;
+			supported_flash.widebus = devcfg & (1 << 6) ? 1 : 0;
+			supported_flash.pagesize = 1024 << (devcfg & 0x3);
+			supported_flash.blksize = (64 * 1024) <<
+							((devcfg >> 4) & 0x3);
+			supported_flash.oobsize = (8 << ((devcfg >> 2) & 1)) *
+				(supported_flash.pagesize >> 9);
+		} else {
+			supported_flash.flash_id = flash_id;
+			supported_flash.density = flashdev->chipsize << 20;
+			supported_flash.widebus = flashdev->options &
+					 NAND_BUSWIDTH_16 ? 1 : 0;
+			supported_flash.pagesize = flashdev->pagesize;
+			supported_flash.blksize = flashdev->erasesize;
+			supported_flash.oobsize = flashdev->pagesize >> 5;
+		}
+	}
+
+	if (dev_found) {
+		(!interleave_enable) ? (i = 1) : (i = 2);
+		wide_bus       = supported_flash.widebus;
+		mtd->size      = supported_flash.density  * i;
+		mtd->writesize = supported_flash.pagesize * i;
+		mtd->oobsize   = supported_flash.oobsize  * i;
+		mtd->erasesize = supported_flash.blksize  * i;
+
+		if (!interleave_enable)
+			mtd_writesize = mtd->writesize;
+		else
+			mtd_writesize = mtd->writesize >> 1;
+
+		/* Check whether controller and NAND device support 8bit ECC*/
+		if ((flash_rd_reg(chip, MSM_NAND_HW_INFO) == 0x307)
+				&& (supported_flash.ecc_correctability >= 8)) {
+			pr_info("Found supported NAND device for %dbit ECC\n",
+					supported_flash.ecc_correctability);
+			enable_bch_ecc = 1;
+		} else {
+			pr_info("Found a supported NAND device\n");
+		}
+		pr_info("NAND Id  : 0x%x\n", supported_flash.flash_id);
+		pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
+		pr_info("Density  : %lld MByte\n", (mtd->size>>20));
+		pr_info("Pagesize : %d Bytes\n", mtd->writesize);
+		pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
+		pr_info("Oobsize  : %d Bytes\n", mtd->oobsize);
+	} else {
+		pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
+		return -ENODEV;
+	}
+
+	/* Size of each codeword is 532Bytes incase of 8bit BCH ECC*/
+	chip->cw_size = enable_bch_ecc ? 532 : 528;
+	chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
+		|  (516 <<  9)  /* 516 user data bytes */
+		|   (10 << 19)  /* 10 parity bytes */
+		|    (5 << 27)  /* 5 address cycles */
+		|    (0 << 30)  /* Do not read status before data */
+		|    (1 << 31)  /* Send read cmd */
+		/* 0 spare bytes for 16 bit nand or 1/2 spare bytes for 8 bit */
+		| (wide_bus ? 0 << 23 : (enable_bch_ecc ? 2 << 23 : 1 << 23));
+
+	chip->CFG1 = (0 <<  0)  /* Enable ecc */
+		|    (7 <<  2)  /* 8 recovery cycles */
+		|    (0 <<  5)  /* Allow CS deassertion */
+		/* Bad block marker location */
+		|  ((mtd_writesize - (chip->cw_size * (
+					(mtd_writesize >> 9) - 1)) + 1) <<  6)
+		|    (0 << 16)  /* Bad block in user data area */
+		|    (2 << 17)  /* 6 cycle tWB/tRB */
+		| ((wide_bus) ? CFG1_WIDE_FLASH : 0); /* Wide flash bit */
+
+	chip->ecc_buf_cfg = 0x203;
+	chip->CFG0_RAW = 0xA80420C0;
+	chip->CFG1_RAW = 0x5045D;
+
+	if (enable_bch_ecc) {
+		chip->CFG1 |= (1 << 27); /* Enable BCH engine */
+		chip->ecc_bch_cfg = (0 << 0) /* Enable ECC*/
+			|   (0 << 1) /* Enable/Disable SW reset of ECC engine */
+			|   (1 << 4) /* 8bit ecc*/
+			|   ((wide_bus) ? (14 << 8) : (13 << 8))/*parity bytes*/
+			|   (516 << 16) /* 516 user data bytes */
+			|   (1 << 30); /* Turn on ECC engine clocks always */
+		chip->CFG0_RAW = 0xA80428C0; /* CW size is increased to 532B */
+	}
+
+	/*
+	 * For 4bit RS ECC (default ECC), parity bytes = 10 (for x8 and x16 I/O)
+	 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+	 */
+	chip->ecc_parity_bytes = enable_bch_ecc ? (wide_bus ? 14 : 13) : 10;
+
+	pr_info("CFG0 Init  : 0x%08x\n", chip->CFG0);
+	pr_info("CFG1 Init  : 0x%08x\n", chip->CFG1);
+	pr_info("ECCBUFCFG  : 0x%08x\n", chip->ecc_buf_cfg);
+
+	if (mtd->oobsize == 64) {
+		mtd->oobavail = msm_nand_oob_64.oobavail;
+		mtd->ecclayout = &msm_nand_oob_64;
+	} else if (mtd->oobsize == 128) {
+		mtd->oobavail = msm_nand_oob_128.oobavail;
+		mtd->ecclayout = &msm_nand_oob_128;
+	} else if (mtd->oobsize == 224) {
+		mtd->oobavail = wide_bus ? msm_nand_oob_224_x16.oobavail :
+			msm_nand_oob_224_x8.oobavail;
+		mtd->ecclayout = wide_bus ? &msm_nand_oob_224_x16 :
+			&msm_nand_oob_224_x8;
+	} else if (mtd->oobsize == 256) {
+		mtd->oobavail = msm_nand_oob_256.oobavail;
+		mtd->ecclayout = &msm_nand_oob_256;
+	} else {
+		pr_err("Unsupported Nand, oobsize: 0x%x \n",
+		       mtd->oobsize);
+		return -ENODEV;
+	}
+
+	/* Fill in remaining MTD driver data */
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+	/* mtd->ecctype = MTD_ECC_SW; */
+	mtd->erase = msm_nand_erase;
+	mtd->block_isbad = msm_nand_block_isbad;
+	mtd->block_markbad = msm_nand_block_markbad;
+	mtd->point = NULL;
+	mtd->unpoint = NULL;
+	mtd->read = msm_nand_read;
+	mtd->write = msm_nand_write;
+	mtd->read_oob  = msm_nand_read_oob;
+	mtd->write_oob = msm_nand_write_oob;
+	if (dual_nand_ctlr_present) {
+		mtd->read_oob = msm_nand_read_oob_dualnandc;
+		mtd->write_oob = msm_nand_write_oob_dualnandc;
+		if (interleave_enable) {
+			mtd->erase = msm_nand_erase_dualnandc;
+			mtd->block_isbad = msm_nand_block_isbad_dualnandc;
+		}
+	}
+
+	/* mtd->sync = msm_nand_sync; */
+	mtd->lock = NULL;
+	/* mtd->unlock = msm_nand_unlock; */
+	mtd->suspend = msm_nand_suspend;
+	mtd->resume = msm_nand_resume;
+	mtd->owner = THIS_MODULE;
+
+	/* Unlock whole block */
+	/* msm_nand_unlock_all(mtd); */
+
+	/* return this->scan_bbt(mtd); */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(msm_nand_scan);
+
+/**
+ * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
+ * @param mtd		MTD device structure
+ */
+void msm_nand_release(struct mtd_info *mtd)
+{
+	/* struct msm_nand_chip *this = mtd->priv; */
+
+#ifdef CONFIG_MTD_PARTITIONS
+	/* Deregister partitions */
+	del_mtd_partitions(mtd);
+#endif
+	/* Deregister the device */
+	mtd_device_unregister(mtd);
+}
+EXPORT_SYMBOL_GPL(msm_nand_release);
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL,  };
+#endif
+
+struct msm_nand_info {
+	struct mtd_info		mtd;
+	struct mtd_partition	*parts;
+	struct msm_nand_chip	msm_nand;
+};
+
+/* duplicating the NC01 XFR contents to NC10 */
+static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
+{
+	struct msm_nand_chip *chip = mtd->priv;
+
+	struct {
+		dmov_s cmd[2];
+		unsigned cmdptr;
+	} *dma_buffer;
+	dmov_s *cmd;
+
+	wait_event(chip->wait_queue,
+		(dma_buffer = msm_nand_get_dma_buffer(
+				chip, sizeof(*dma_buffer))));
+
+	cmd = dma_buffer->cmd;
+
+	/* Copying XFR register contents from NC01 --> NC10 */
+	cmd->cmd = 0;
+	cmd->src = NC01(MSM_NAND_XFR_STEP1);
+	cmd->dst = NC10(MSM_NAND_XFR_STEP1);
+	cmd->len = 28;
+	cmd++;
+
+	BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->cmd[0].cmd |= CMD_OCB;
+	cmd[-1].cmd |= CMD_OCU | CMD_LC;
+	dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
+				| CMD_PTR_LP;
+
+	mb();
+	msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
+			| DMOV_CMD_ADDR(msm_virt_to_dma(chip,
+			&dma_buffer->cmdptr)));
+	mb();
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return 0;
+}
+
+#ifdef CONFIG_MTD_PARTITIONS
+static void setup_mtd_device(struct platform_device *pdev,
+			     struct msm_nand_info *info)
+{
+	int i, nr_parts;
+	struct flash_platform_data *pdata = pdev->dev.platform_data;
+
+	for (i = 0; i < pdata->nr_parts; i++) {
+		pdata->parts[i].offset = pdata->parts[i].offset
+			* info->mtd.erasesize;
+		pdata->parts[i].size = pdata->parts[i].size
+			* info->mtd.erasesize;
+	}
+
+	nr_parts = parse_mtd_partitions(&info->mtd, part_probes, &info->parts,
+					0);
+	if (nr_parts > 0)
+		add_mtd_partitions(&info->mtd, info->parts, nr_parts);
+	else if (nr_parts <= 0 && pdata && pdata->parts)
+		add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+	else
+		mtd_device_register(&info->mtd, NULL, 0);
+}
+#else
+static void setup_mtd_device(struct platform_device *pdev,
+			     struct msm_nand_info *info)
+{
+	mtd_device_register(&info->mtd, NULL, 0);
+}
+#endif
+
+static int __devinit msm_nand_probe(struct platform_device *pdev)
+{
+	struct msm_nand_info *info;
+	struct resource *res;
+	int err;
+	struct flash_platform_data *plat_data;
+
+	plat_data = pdev->dev.platform_data;
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "msm_nand_phys");
+	if (!res || !res->start) {
+		pr_err("%s: msm_nand_phys resource invalid/absent\n",
+				__func__);
+		return -ENODEV;
+	}
+	msm_nand_phys = res->start;
+	pr_info("%s: phys addr 0x%lx \n", __func__, msm_nand_phys);
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "msm_nandc01_phys");
+	if (!res || !res->start)
+		goto no_dual_nand_ctlr_support;
+	msm_nandc01_phys = res->start;
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "msm_nandc10_phys");
+	if (!res || !res->start)
+		goto no_dual_nand_ctlr_support;
+	msm_nandc10_phys = res->start;
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "msm_nandc11_phys");
+	if (!res || !res->start)
+		goto no_dual_nand_ctlr_support;
+	msm_nandc11_phys = res->start;
+
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ebi2_reg_base");
+	if (!res || !res->start)
+		goto no_dual_nand_ctlr_support;
+	ebi2_register_base = res->start;
+
+	dual_nand_ctlr_present = 1;
+	if (plat_data != NULL)
+		interleave_enable = plat_data->interleave;
+	else
+		interleave_enable = 0;
+
+	if (!interleave_enable)
+		pr_info("%s: Dual Nand Ctrl in ping-pong mode\n", __func__);
+	else
+		pr_info("%s: Dual Nand Ctrl in interleave mode\n", __func__);
+
+no_dual_nand_ctlr_support:
+	res = platform_get_resource_byname(pdev,
+					IORESOURCE_DMA, "msm_nand_dmac");
+	if (!res || !res->start) {
+		pr_err("%s: invalid msm_nand_dmac resource\n", __func__);
+		return -ENODEV;
+	}
+
+	info = kzalloc(sizeof(struct msm_nand_info), GFP_KERNEL);
+	if (!info) {
+		pr_err("%s: No memory for msm_nand_info\n", __func__);
+		return -ENOMEM;
+	}
+
+	info->msm_nand.dev = &pdev->dev;
+
+	init_waitqueue_head(&info->msm_nand.wait_queue);
+
+	info->msm_nand.dma_channel = res->start;
+	pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
+
+	/* this currently fails if dev is passed in */
+	info->msm_nand.dma_buffer =
+		dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
+				&info->msm_nand.dma_addr, GFP_KERNEL);
+	if (info->msm_nand.dma_buffer == NULL) {
+		pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
+		err = -ENOMEM;
+		goto out_free_info;
+	}
+
+	pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
+		__func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
+
+	crci_mask = msm_dmov_build_crci_mask(2,
+			DMOV_NAND_CRCI_DATA, DMOV_NAND_CRCI_CMD);
+
+	info->mtd.name = dev_name(&pdev->dev);
+	info->mtd.priv = &info->msm_nand;
+	info->mtd.owner = THIS_MODULE;
+
+	/* config ebi2_cfg register only for ping pong mode!!! */
+	if (!interleave_enable && dual_nand_ctlr_present)
+		flash_wr_reg(&info->msm_nand, EBI2_CFG_REG, 0x4010080);
+
+	if (dual_nand_ctlr_present)
+		msm_nand_nc10_xfr_settings(&info->mtd);
+
+	if (msm_nand_scan(&info->mtd, 1))
+		if (msm_onenand_scan(&info->mtd, 1)) {
+			pr_err("%s: No nand device found\n", __func__);
+			err = -ENXIO;
+			goto out_free_dma_buffer;
+		}
+
+	setup_mtd_device(pdev, info);
+	dev_set_drvdata(&pdev->dev, info);
+
+	return 0;
+
+out_free_dma_buffer:
+	dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
+			info->msm_nand.dma_buffer,
+			info->msm_nand.dma_addr);
+out_free_info:
+	kfree(info);
+
+	return err;
+}
+
+static int __devexit msm_nand_remove(struct platform_device *pdev)
+{
+	struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (info) {
+#ifdef CONFIG_MTD_PARTITIONS
+		if (info->parts)
+			del_mtd_partitions(&info->mtd);
+		else
+#endif
+			mtd_device_unregister(&info->mtd);
+
+		msm_nand_release(&info->mtd);
+		dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
+				  info->msm_nand.dma_buffer,
+				  info->msm_nand.dma_addr);
+		kfree(info);
+	}
+
+	return 0;
+}
+
+#define DRIVER_NAME "msm_nand"
+
+static struct platform_driver msm_nand_driver = {
+	.probe		= msm_nand_probe,
+	.remove		= __devexit_p(msm_nand_remove),
+	.driver = {
+		.name		= DRIVER_NAME,
+	}
+};
+
+MODULE_ALIAS(DRIVER_NAME);
+
+static int __init msm_nand_init(void)
+{
+	return platform_driver_register(&msm_nand_driver);
+}
+
+static void __exit msm_nand_exit(void)
+{
+	platform_driver_unregister(&msm_nand_driver);
+}
+
+module_init(msm_nand_init);
+module_exit(msm_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("msm_nand flash driver code");
diff --git a/drivers/mtd/devices/msm_nand.h b/drivers/mtd/devices/msm_nand.h
new file mode 100644
index 0000000..2729c6b
--- /dev/null
+++ b/drivers/mtd/devices/msm_nand.h
@@ -0,0 +1,195 @@
+/* drivers/mtd/devices/msm_nand.h
+ *
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_MTD_DEVICES_MSM_NAND_H
+#define __DRIVERS_MTD_DEVICES_MSM_NAND_H
+
+#include <mach/msm_iomap.h>
+
+extern unsigned long msm_nand_phys;
+extern unsigned long msm_nandc01_phys;
+extern unsigned long msm_nandc10_phys;
+extern unsigned long msm_nandc11_phys;
+extern unsigned long ebi2_register_base;
+
+#define NC01(X) ((X) + msm_nandc01_phys - msm_nand_phys)
+#define NC10(X) ((X) + msm_nandc10_phys - msm_nand_phys)
+#define NC11(X) ((X) + msm_nandc11_phys - msm_nand_phys)
+
+#define MSM_NAND_REG(off) (msm_nand_phys + (off))
+
+#define MSM_NAND_FLASH_CMD            MSM_NAND_REG(0x0000)
+#define MSM_NAND_ADDR0                MSM_NAND_REG(0x0004)
+#define MSM_NAND_ADDR1                MSM_NAND_REG(0x0008)
+#define MSM_NAND_FLASH_CHIP_SELECT    MSM_NAND_REG(0x000C)
+#define MSM_NAND_EXEC_CMD             MSM_NAND_REG(0x0010)
+#define MSM_NAND_FLASH_STATUS         MSM_NAND_REG(0x0014)
+#define MSM_NAND_BUFFER_STATUS        MSM_NAND_REG(0x0018)
+#define MSM_NAND_SFLASHC_STATUS       MSM_NAND_REG(0x001C)
+#define MSM_NAND_DEV0_CFG0            MSM_NAND_REG(0x0020)
+#define MSM_NAND_DEV0_CFG1            MSM_NAND_REG(0x0024)
+#define MSM_NAND_DEV0_ECC_CFG	      MSM_NAND_REG(0x0028)
+#define MSM_NAND_DEV1_ECC_CFG	      MSM_NAND_REG(0x002C)
+#define MSM_NAND_DEV1_CFG0            MSM_NAND_REG(0x0030)
+#define MSM_NAND_DEV1_CFG1            MSM_NAND_REG(0x0034)
+#define MSM_NAND_SFLASHC_CMD          MSM_NAND_REG(0x0038)
+#define MSM_NAND_SFLASHC_EXEC_CMD     MSM_NAND_REG(0x003C)
+#define MSM_NAND_READ_ID              MSM_NAND_REG(0x0040)
+#define MSM_NAND_READ_STATUS          MSM_NAND_REG(0x0044)
+#define MSM_NAND_CONFIG_DATA          MSM_NAND_REG(0x0050)
+#define MSM_NAND_CONFIG               MSM_NAND_REG(0x0054)
+#define MSM_NAND_CONFIG_MODE          MSM_NAND_REG(0x0058)
+#define MSM_NAND_CONFIG_STATUS        MSM_NAND_REG(0x0060)
+#define MSM_NAND_MACRO1_REG           MSM_NAND_REG(0x0064)
+#define MSM_NAND_XFR_STEP1            MSM_NAND_REG(0x0070)
+#define MSM_NAND_XFR_STEP2            MSM_NAND_REG(0x0074)
+#define MSM_NAND_XFR_STEP3            MSM_NAND_REG(0x0078)
+#define MSM_NAND_XFR_STEP4            MSM_NAND_REG(0x007C)
+#define MSM_NAND_XFR_STEP5            MSM_NAND_REG(0x0080)
+#define MSM_NAND_XFR_STEP6            MSM_NAND_REG(0x0084)
+#define MSM_NAND_XFR_STEP7            MSM_NAND_REG(0x0088)
+#define MSM_NAND_GENP_REG0            MSM_NAND_REG(0x0090)
+#define MSM_NAND_GENP_REG1            MSM_NAND_REG(0x0094)
+#define MSM_NAND_GENP_REG2            MSM_NAND_REG(0x0098)
+#define MSM_NAND_GENP_REG3            MSM_NAND_REG(0x009C)
+#define MSM_NAND_DEV_CMD0             MSM_NAND_REG(0x00A0)
+#define MSM_NAND_DEV_CMD1             MSM_NAND_REG(0x00A4)
+#define MSM_NAND_DEV_CMD2             MSM_NAND_REG(0x00A8)
+#define MSM_NAND_DEV_CMD_VLD          MSM_NAND_REG(0x00AC)
+#define MSM_NAND_EBI2_MISR_SIG_REG    MSM_NAND_REG(0x00B0)
+#define MSM_NAND_ADDR2                MSM_NAND_REG(0x00C0)
+#define MSM_NAND_ADDR3                MSM_NAND_REG(0x00C4)
+#define MSM_NAND_ADDR4                MSM_NAND_REG(0x00C8)
+#define MSM_NAND_ADDR5                MSM_NAND_REG(0x00CC)
+#define MSM_NAND_DEV_CMD3             MSM_NAND_REG(0x00D0)
+#define MSM_NAND_DEV_CMD4             MSM_NAND_REG(0x00D4)
+#define MSM_NAND_DEV_CMD5             MSM_NAND_REG(0x00D8)
+#define MSM_NAND_DEV_CMD6             MSM_NAND_REG(0x00DC)
+#define MSM_NAND_SFLASHC_BURST_CFG    MSM_NAND_REG(0x00E0)
+#define MSM_NAND_ADDR6                MSM_NAND_REG(0x00E4)
+#define MSM_NAND_EBI2_ECC_BUF_CFG     MSM_NAND_REG(0x00F0)
+#define MSM_NAND_HW_INFO	      MSM_NAND_REG(0x00FC)
+#define MSM_NAND_FLASH_BUFFER         MSM_NAND_REG(0x0100)
+
+/* device commands */
+
+#define MSM_NAND_CMD_SOFT_RESET         0x01
+#define MSM_NAND_CMD_PAGE_READ          0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC      0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL      0x34
+#define MSM_NAND_CMD_SEQ_PAGE_READ      0x15
+#define MSM_NAND_CMD_PRG_PAGE           0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC       0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL       0x39
+#define MSM_NAND_CMD_BLOCK_ERASE        0x3A
+#define MSM_NAND_CMD_FETCH_ID           0x0B
+#define MSM_NAND_CMD_STATUS             0x0C
+#define MSM_NAND_CMD_RESET              0x0D
+
+/* Sflash Commands */
+
+#define MSM_NAND_SFCMD_DATXS            0x0
+#define MSM_NAND_SFCMD_CMDXS            0x1
+#define MSM_NAND_SFCMD_BURST            0x0
+#define MSM_NAND_SFCMD_ASYNC            0x1
+#define MSM_NAND_SFCMD_ABORT            0x1
+#define MSM_NAND_SFCMD_REGRD            0x2
+#define MSM_NAND_SFCMD_REGWR            0x3
+#define MSM_NAND_SFCMD_INTLO            0x4
+#define MSM_NAND_SFCMD_INTHI            0x5
+#define MSM_NAND_SFCMD_DATRD            0x6
+#define MSM_NAND_SFCMD_DATWR            0x7
+
+#define SFLASH_PREPCMD(numxfr, offval, delval, trnstp, mode, opcode) \
+	((numxfr<<20)|(offval<<12)|(delval<<6)|(trnstp<<5)|(mode<<4)|opcode)
+
+#define SFLASH_BCFG			0x20100327
+
+/* Onenand addresses */
+
+#define ONENAND_MANUFACTURER_ID		0xF000
+#define ONENAND_DEVICE_ID		0xF001
+#define ONENAND_VERSION_ID		0xF002
+#define ONENAND_DATA_BUFFER_SIZE	0xF003
+#define ONENAND_BOOT_BUFFER_SIZE	0xF004
+#define ONENAND_AMOUNT_OF_BUFFERS	0xF005
+#define ONENAND_TECHNOLOGY		0xF006
+#define ONENAND_START_ADDRESS_1		0xF100
+#define ONENAND_START_ADDRESS_2		0xF101
+#define ONENAND_START_ADDRESS_3		0xF102
+#define ONENAND_START_ADDRESS_4		0xF103
+#define ONENAND_START_ADDRESS_5		0xF104
+#define ONENAND_START_ADDRESS_6		0xF105
+#define ONENAND_START_ADDRESS_7		0xF106
+#define ONENAND_START_ADDRESS_8		0xF107
+#define ONENAND_START_BUFFER		0xF200
+#define ONENAND_COMMAND			0xF220
+#define ONENAND_SYSTEM_CONFIG_1		0xF221
+#define ONENAND_SYSTEM_CONFIG_2		0xF222
+#define ONENAND_CONTROLLER_STATUS	0xF240
+#define ONENAND_INTERRUPT_STATUS	0xF241
+#define ONENAND_START_BLOCK_ADDRESS	0xF24C
+#define ONENAND_WRITE_PROT_STATUS	0xF24E
+#define ONENAND_ECC_STATUS		0xFF00
+#define ONENAND_ECC_ERRPOS_MAIN0	0xFF01
+#define ONENAND_ECC_ERRPOS_SPARE0	0xFF02
+#define ONENAND_ECC_ERRPOS_MAIN1	0xFF03
+#define ONENAND_ECC_ERRPOS_SPARE1	0xFF04
+#define ONENAND_ECC_ERRPOS_MAIN2	0xFF05
+#define ONENAND_ECC_ERRPOS_SPARE2	0xFF06
+#define ONENAND_ECC_ERRPOS_MAIN3	0xFF07
+#define ONENAND_ECC_ERRPOS_SPARE3	0xFF08
+
+/* Onenand commands */
+#define ONENAND_WP_US                   (1 << 2)
+#define ONENAND_WP_LS                   (1 << 1)
+
+#define ONENAND_CMDLOAD			0x0000
+#define ONENAND_CMDLOADSPARE		0x0013
+#define ONENAND_CMDPROG			0x0080
+#define ONENAND_CMDPROGSPARE		0x001A
+#define ONENAND_CMDERAS			0x0094
+#define ONENAND_CMD_UNLOCK              0x0023
+#define ONENAND_CMD_LOCK                0x002A
+
+#define ONENAND_SYSCFG1_ECCENA(mode)	(0x40E0 | (mode ? 0 : 0x8002))
+#define ONENAND_SYSCFG1_ECCDIS(mode)	(0x41E0 | (mode ? 0 : 0x8002))
+
+#define ONENAND_CLRINTR			0x0000
+#define ONENAND_STARTADDR1_RES		0x07FF
+#define ONENAND_STARTADDR3_RES		0x07FF
+
+#define DATARAM0_0			0x8
+#define DEVICE_FLASHCORE_0              (0 << 15)
+#define DEVICE_FLASHCORE_1              (1 << 15)
+#define DEVICE_BUFFERRAM_0              (0 << 15)
+#define DEVICE_BUFFERRAM_1              (1 << 15)
+#define ONENAND_DEVICE_IS_DDP		(1 << 3)
+
+#define CLEAN_DATA_16			0xFFFF
+#define CLEAN_DATA_32			0xFFFFFFFF
+
+#define EBI2_REG(off)   		(ebi2_register_base + (off))
+#define EBI2_CHIP_SELECT_CFG0           EBI2_REG(0x0000)
+#define EBI2_CFG_REG		       	EBI2_REG(0x0004)
+#define EBI2_NAND_ADM_MUX       	EBI2_REG(0x005C)
+
+#define MSM_NAND_BUF_STAT_UNCRCTBL_ERR	(1 << 8)
+#define MSM_NAND_BUF_STAT_NUM_ERR_MASK	(enable_bch_ecc ? 0x1F : 0x0F)
+
+extern struct flash_platform_data msm_nand_data;
+
+#endif
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ca38569..25f66e4 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -424,6 +424,13 @@
 		goto error3;
 
 	new->rq->queuedata = new;
+
+	/*
+	 * Empirical measurements revealed that read ahead values larger than
+	 * 4 slowed down boot time, so start out with this small value.
+	 */
+	new->rq->backing_dev_info.ra_pages = (4 * 1024) / PAGE_CACHE_SIZE;
+
 	blk_queue_logical_block_size(new->rq, tr->blksize);
 
 	if (tr->discard) {
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index b44dcab..8089d9d 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
 obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
 obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_erasepart.o
diff --git a/drivers/mtd/tests/mtd_erasepart.c b/drivers/mtd/tests/mtd_erasepart.c
new file mode 100644
index 0000000..b679a7c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_erasepart.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Erase the given MTD partition.
+ *
+ */
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#define PRINT_PREF KERN_INFO "mtd_erasepart: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *bbt;
+static int ebcnt;
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int erase_whole_device(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "erasing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			return err;
+		cond_resched();
+	}
+	printk(PRINT_PREF "erased %u eraseblocks\n", i);
+	return 0;
+}
+
+static int is_block_bad(int ebnum)
+{
+	int ret;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_erasepart_init(void)
+{
+	int err = 0;
+	uint64_t tmp;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->type != MTD_NANDFLASH) {
+		printk(PRINT_PREF "this test requires NAND flash\n");
+		err = -ENODEV;
+		goto out2;
+	}
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       mtd->writesize, ebcnt);
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out1;
+
+	printk(PRINT_PREF "Erasing the whole mtd partition\n");
+
+	err = erase_whole_device();
+out1:
+	kfree(bbt);
+out2:
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_erasepart_init);
+
+static void __exit mtd_erasepart_exit(void)
+{
+	return;
+}
+module_exit(mtd_erasepart_exit);
+
+MODULE_DESCRIPTION("Erase a given MTD partition");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 906ef8f..f6c44c6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2561,6 +2561,15 @@
 	  ML7223 is companion chip for Intel Atom E6xx series.
 	  ML7223 is completely compatible for Intel EG20T PCH.
 
+config QFEC
+	tristate "QFEC ethernet driver"
+	select MII
+	depends on ARM
+	help
+	  This driver supports Ethernet in the FSM9xxx.
+	  To compile this driver as a module, choose M here: the
+	  module will be called qfec.
+
 endif # NETDEV_1000
 
 #
@@ -3431,6 +3440,38 @@
 	If you want to log kernel messages over the network, enable this.
 	See <file:Documentation/networking/netconsole.txt> for details.
 
+config MSM_RMNET
+	tristate "MSM RMNET Virtual Network Device"
+	depends on ARCH_MSM
+	default y
+	help
+	  Virtual ethernet interface for MSM RMNET transport.
+
+config MSM_RMNET_SDIO
+	bool "RMNET SDIO Driver"
+	depends on MSM_SDIO_DMUX
+	default n
+	help
+	  Implements RMNET over SDIO interface.
+
+config MSM_RMNET_BAM
+	bool "RMNET BAM Driver"
+	depends on MSM_BAM_DMUX
+	default n
+	help
+	  Implements RMNET over BAM interface.
+	  RMNET provides a virtual ethernet interface
+	  for routing IP packets within the MSM using
+	  BAM as a physical transport.
+
+config MSM_RMNET_DEBUG
+	bool "MSM RMNET debug interface"
+	depends on MSM_RMNET
+	default n
+	help
+	  Debug stats on wakeup counts.
+
+
 config NETCONSOLE_DYNAMIC
 	bool "Dynamic reconfiguration of logging targets"
 	depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 13ef4df..43079b3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -269,6 +269,7 @@
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_MACB) += macb.o
 obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_QFEC) += qfec.o
 
 obj-$(CONFIG_ARM) += arm/
 obj-$(CONFIG_DEV_APPLETALK) += appletalk/
@@ -298,6 +299,11 @@
 obj-$(CONFIG_FS_ENET) += fs_enet/
 
 obj-$(CONFIG_NETXEN_NIC) += netxen/
+
+obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
+obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
+
 obj-$(CONFIG_NIU) += niu.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_SFC) += sfc/
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index bcd9ba6..f79dce2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -21,7 +21,7 @@
 #include <linux/cache.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
-
+#include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
 #include "ks8851.h"
@@ -127,6 +127,8 @@
 	struct spi_message	spi_msg2;
 	struct spi_transfer	spi_xfer1;
 	struct spi_transfer	spi_xfer2[2];
+	struct regulator	*vdd_io;
+	struct regulator	*vdd_phy;
 };
 
 static int msg_enable;
@@ -1592,6 +1594,15 @@
 
 	ks = netdev_priv(ndev);
 
+	ks->vdd_io = regulator_get(&spi->dev, "vdd_io");
+	ks->vdd_phy = regulator_get(&spi->dev, "vdd_phy");
+
+	if (!IS_ERR(ks->vdd_io))
+		regulator_enable(ks->vdd_io);
+
+	if (!IS_ERR(ks->vdd_phy))
+		regulator_enable(ks->vdd_phy);
+
 	ks->netdev = ndev;
 	ks->spidev = spi;
 	ks->tx_space = 6144;
@@ -1686,6 +1697,16 @@
 err_id:
 err_irq:
 	free_netdev(ndev);
+
+	if (!IS_ERR(ks->vdd_io)) {
+		regulator_disable(ks->vdd_phy);
+		regulator_put(ks->vdd_io);
+	}
+
+	if (!IS_ERR(ks->vdd_phy)) {
+		regulator_disable(ks->vdd_phy);
+		regulator_put(ks->vdd_phy);
+	}
 	return ret;
 }
 
@@ -1696,6 +1717,16 @@
 	if (netif_msg_drv(priv))
 		dev_info(&spi->dev, "remove\n");
 
+	if (!IS_ERR(priv->vdd_io)) {
+		regulator_disable(priv->vdd_phy);
+		regulator_put(priv->vdd_io);
+	}
+
+	if (!IS_ERR(priv->vdd_phy)) {
+		regulator_disable(priv->vdd_phy);
+		regulator_put(priv->vdd_phy);
+	}
+
 	unregister_netdev(priv->netdev);
 	free_irq(spi->irq, priv);
 	free_netdev(priv->netdev);
diff --git a/drivers/net/msm_rmnet.c b/drivers/net/msm_rmnet.c
new file mode 100644
index 0000000..6889425
--- /dev/null
+++ b/drivers/net/msm_rmnet.c
@@ -0,0 +1,849 @@
+/* linux/drivers/net/msm_rmnet.c
+ *
+ * Virtual Ethernet Interface for MSM7K Networking
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/msm_smd.h>
+#include <mach/peripheral-loader.h>
+
+/* Debug message support */
+static int msm_rmnet_debug_mask;
+module_param_named(debug_enable, msm_rmnet_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			\
+		if (msm_rmnet_debug_mask & m)   \
+			pr_info(x);		\
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+static const char *ch_name[RMNET_DEVICE_COUNT] = {
+	"DATA5",
+	"DATA6",
+	"DATA7",
+	"DATA8",
+	"DATA9",
+	"DATA12",
+	"DATA13",
+	"DATA14",
+};
+
+/* XXX should come from smd headers */
+#define SMD_PORT_ETHER0 11
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define HEADROOM_FOR_QOS    8
+
+static struct completion *port_complete[RMNET_DEVICE_COUNT];
+
+struct rmnet_private
+{
+	smd_channel_t *ch;
+	struct net_device_stats stats;
+	const char *chname;
+	struct wake_lock wake_lock;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode;    /* IOCTL specified mode (protocol, QoS header) */
+	struct platform_driver pdrv;
+	struct completion complete;
+	void *pil;
+	struct mutex pil_lock;
+};
+
+static uint msm_rmnet_modem_wait;
+module_param_named(modem_wait, msm_rmnet_modem_wait,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
+{
+	timeout_suspend_us = simple_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler) {
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler) {
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p) {
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
+		ret = 1;
+	}
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+		char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = simple_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in uppder layer for unknown protocol */
+	}
+	return protocol;
+}
+
+/* Called in soft-irq context */
+static void smd_net_data_handler(unsigned long arg)
+{
+	struct net_device *dev = (struct net_device *) arg;
+	struct rmnet_private *p = netdev_priv(dev);
+	struct sk_buff *skb;
+	void *ptr = 0;
+	int sz;
+	u32 opmode = p->operation_mode;
+	unsigned long flags;
+
+	for (;;) {
+		sz = smd_cur_packet_size(p->ch);
+		if (sz == 0) break;
+		if (smd_read_avail(p->ch) < sz) break;
+
+		if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
+						(sz > (dev->mtu + ETH_HLEN))) {
+			pr_err("[%s] rmnet_recv() discarding packet len %d (%d mtu)\n",
+				dev->name, sz, RMNET_IS_MODE_IP(opmode) ?
+					dev->mtu : (dev->mtu + ETH_HLEN));
+			ptr = 0;
+		} else {
+			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
+			if (skb == NULL) {
+				pr_err("[%s] rmnet_recv() cannot allocate skb\n",
+				       dev->name);
+			} else {
+				skb->dev = dev;
+				skb_reserve(skb, NET_IP_ALIGN);
+				ptr = skb_put(skb, sz);
+				wake_lock_timeout(&p->wake_lock, HZ / 2);
+				if (smd_read(p->ch, ptr, sz) != sz) {
+					pr_err("[%s] rmnet_recv() smd lied about avail?!",
+						dev->name);
+					ptr = 0;
+					dev_kfree_skb_irq(skb);
+				} else {
+					/* Handle Rx frame format */
+					spin_lock_irqsave(&p->lock, flags);
+					opmode = p->operation_mode;
+					spin_unlock_irqrestore(&p->lock, flags);
+
+					if (RMNET_IS_MODE_IP(opmode)) {
+						/* Driver in IP mode */
+						skb->protocol =
+						  rmnet_ip_type_trans(skb, dev);
+					} else {
+						/* Driver in Ethernet mode */
+						skb->protocol =
+						  eth_type_trans(skb, dev);
+					}
+					if (RMNET_IS_MODE_IP(opmode) ||
+					    count_this_packet(ptr, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+						p->wakeups_rcv +=
+							rmnet_cause_wakeup(p);
+#endif
+						p->stats.rx_packets++;
+						p->stats.rx_bytes += skb->len;
+					}
+					DBG1("[%s] Rx packet #%lu len=%d\n",
+						dev->name, p->stats.rx_packets,
+						skb->len);
+
+					/* Deliver to network stack */
+					netif_rx(skb);
+				}
+				continue;
+			}
+		}
+		if (smd_read(p->ch, ptr, sz) != sz)
+			pr_err("[%s] rmnet_recv() smd lied about avail?!",
+				dev->name);
+	}
+}
+
+static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	smd_channel_t *ch = p->ch;
+	int smd_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	smd_ret = smd_write(ch, skb->data, skb->len);
+	if (smd_ret != skb->len) {
+		pr_err("[%s] %s: smd_write returned error %d",
+			dev->name, __func__, smd_ret);
+		p->stats.tx_errors++;
+		goto xmit_out;
+	}
+
+	if (RMNET_IS_MODE_IP(opmode) ||
+	    count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+xmit_out:
+	/* data xmited, safe to release skb */
+	dev_kfree_skb_irq(skb);
+	return 0;
+}
+
+static void _rmnet_resume_flow(unsigned long param)
+{
+	struct net_device *dev = (struct net_device *)param;
+	struct rmnet_private *p = netdev_priv(dev);
+	struct sk_buff *skb = NULL;
+	unsigned long flags;
+
+	/* xmit and enable the flow only once even if
+	   multiple tasklets were scheduled by smd_net_notify */
+	spin_lock_irqsave(&p->lock, flags);
+	if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+		skb = p->skb;
+		p->skb = NULL;
+		spin_unlock_irqrestore(&p->lock, flags);
+		_rmnet_xmit(skb, dev);
+		netif_wake_queue(dev);
+	} else
+		spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void msm_rmnet_unload_modem(void *pil)
+{
+	if (pil)
+		pil_put(pil);
+}
+
+static void *msm_rmnet_load_modem(struct net_device *dev)
+{
+	void *pil;
+	int rc;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	pil = pil_get("modem");
+	if (IS_ERR(pil))
+		pr_err("[%s] %s: modem load failed\n",
+			dev->name, __func__);
+	else if (msm_rmnet_modem_wait) {
+		rc = wait_for_completion_interruptible_timeout(
+			&p->complete,
+			msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
+		if (!rc)
+			rc = -ETIMEDOUT;
+		if (rc < 0) {
+			pr_err("[%s] %s: wait for rmnet port failed %d\n",
+			       dev->name, __func__, rc);
+			msm_rmnet_unload_modem(pil);
+			pil = ERR_PTR(rc);
+		}
+	}
+
+	return pil;
+}
+
+static void smd_net_notify(void *_dev, unsigned event)
+{
+	struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		spin_lock(&p->lock);
+		if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+			smd_disable_read_intr(p->ch);
+			tasklet_hi_schedule(&p->tsklt);
+		}
+
+		spin_unlock(&p->lock);
+
+		if (smd_read_avail(p->ch) &&
+			(smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
+			smd_net_data_tasklet.data = (unsigned long) _dev;
+			tasklet_schedule(&smd_net_data_tasklet);
+		}
+		break;
+
+	case SMD_EVENT_OPEN:
+		DBG0("%s: opening SMD port\n", __func__);
+		netif_carrier_on(_dev);
+		if (netif_queue_stopped(_dev)) {
+			DBG0("%s: re-starting if queue\n", __func__);
+			netif_wake_queue(_dev);
+		}
+		break;
+
+	case SMD_EVENT_CLOSE:
+		DBG0("%s: closing SMD port\n", __func__);
+		netif_carrier_off(_dev);
+		break;
+	}
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	void *pil;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	mutex_lock(&p->pil_lock);
+	if (!p->pil) {
+		pil = msm_rmnet_load_modem(dev);
+		if (IS_ERR(pil)) {
+			mutex_unlock(&p->pil_lock);
+			return PTR_ERR(pil);
+		}
+		p->pil = pil;
+	}
+	mutex_unlock(&p->pil_lock);
+
+	if (!p->ch) {
+		r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	smd_disable_read_intr(p->ch);
+	return 0;
+}
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc;
+	unsigned long flags;
+
+	if (p->ch) {
+		rc = smd_close(p->ch);
+		spin_lock_irqsave(&p->lock, flags);
+		p->ch = 0;
+		spin_unlock_irqrestore(&p->lock, flags);
+		return rc;
+	} else
+		return -EBADF;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	netif_stop_queue(dev);
+	tasklet_kill(&p->tsklt);
+
+	/* TODO: unload modem safely,
+	   currently, this causes unnecessary unloads */
+	/*
+	mutex_lock(&p->pil_lock);
+	msm_rmnet_unload_modem(p->pil);
+	p->pil = NULL;
+	mutex_unlock(&p->pil_lock);
+	*/
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	smd_channel_t *ch = p->ch;
+	unsigned long flags;
+
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
+			dev->name);
+		return 0;
+	}
+
+	spin_lock_irqsave(&p->lock, flags);
+	smd_enable_read_intr(ch);
+	if (smd_write_avail(ch) < skb->len) {
+		netif_stop_queue(dev);
+		p->skb = skb;
+		spin_unlock_irqrestore(&p->lock, flags);
+		return 0;
+	}
+	smd_disable_read_intr(ch);
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	_rmnet_xmit(skb, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open		= rmnet_open,
+	.ndo_stop		= rmnet_stop,
+	.ndo_start_xmit		= rmnet_xmit,
+	.ndo_get_stats		= rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout		= rmnet_tx_timeout,
+	.ndo_do_ioctl		= rmnet_ioctl,
+	.ndo_change_mtu		= rmnet_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open		= rmnet_open,
+	.ndo_stop		= rmnet_stop,
+	.ndo_start_xmit		= rmnet_xmit,
+	.ndo_get_stats		= rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout		= rmnet_tx_timeout,
+	.ndo_do_ioctl		= rmnet_ioctl,
+	.ndo_change_mtu		= rmnet_change_mtu,
+	.ndo_set_mac_address	= 0,
+	.ndo_validate_addr	= 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				(RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_QOS;
+
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+static int msm_rmnet_smd_probe(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < RMNET_DEVICE_COUNT; i++)
+		if (!strcmp(pdev->name, ch_name[i])) {
+			complete_all(port_complete[i]);
+			break;
+		}
+
+	return 0;
+}
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		p->chname = ch_name[n];
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->skb = NULL;
+		spin_lock_init(&p->lock);
+		tasklet_init(&p->tsklt, _rmnet_resume_flow,
+				(unsigned long)dev);
+		wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		init_completion(&p->complete);
+		port_complete[n] = &p->complete;
+		mutex_init(&p->pil_lock);
+		p->pdrv.probe = msm_rmnet_smd_probe;
+		p->pdrv.driver.name = ch_name[n];
+		p->pdrv.driver.owner = THIS_MODULE;
+		ret = platform_driver_register(&p->pdrv);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+		ret = register_netdev(dev);
+		if (ret) {
+			platform_driver_unregister(&p->pdrv);
+			free_netdev(dev);
+			return ret;
+		}
+
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
diff --git a/drivers/net/msm_rmnet_bam.c b/drivers/net/msm_rmnet_bam.c
new file mode 100644
index 0000000..a8bdeb3
--- /dev/null
+++ b/drivers/net/msm_rmnet_bam.c
@@ -0,0 +1,653 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET BAM Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/bam_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_bam_debug_mask;
+module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			   \
+		if (msm_rmnet_bam_debug_mask & m) \
+			pr_info(x);		   \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID   -1
+
+#define DEVICE_INACTIVE      0
+#define DEVICE_ACTIVE        1
+
+#define HEADROOM_FOR_BAM   8 /* for mux header */
+#define HEADROOM_FOR_QOS    8
+#define TAILROOM            8 /* for padding by mux layer */
+
+struct rmnet_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+	uint8_t device_up;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t n)
+{
+	timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+		   timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+		ret = 1;
+
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+	}
+	return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void bam_recv_notify(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	unsigned long flags;
+	u32 opmode;
+
+	if (skb) {
+		skb->dev = dev;
+		/* Handle Rx frame format */
+		spin_lock_irqsave(&p->lock, flags);
+		opmode = p->operation_mode;
+		spin_unlock_irqrestore(&p->lock, flags);
+
+		if (RMNET_IS_MODE_IP(opmode)) {
+			/* Driver in IP mode */
+			skb->protocol = rmnet_ip_type_trans(skb, dev);
+		} else {
+			/* Driver in Ethernet mode */
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+			p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+			p->stats.rx_packets++;
+			p->stats.rx_bytes += skb->len;
+		}
+		DBG1("[%s] Rx packet #%lu len=%d\n",
+			((struct net_device *)dev)->name,
+			p->stats.rx_packets, skb->len);
+
+		/* Deliver to network stack */
+		netif_rx(skb);
+	} else
+		pr_err("[%s] %s: No skb received",
+			((struct net_device *)dev)->name, __func__);
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int bam_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	bam_ret = msm_bam_dmux_write(p->ch_id, skb);
+
+	if (bam_ret != 0) {
+		pr_err("[%s] %s: write returned error %d",
+			dev->name, __func__, bam_ret);
+		goto xmit_out;
+	}
+
+	if (count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+	return 0;
+xmit_out:
+	/* data xmited, safe to release skb */
+	dev_kfree_skb_any(skb);
+	return 0;
+}
+
+static void bam_write_done(void *dev, struct sk_buff *skb)
+{
+	DBG1("%s: write complete\n", __func__);
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] __rmnet_open()\n", dev->name);
+
+	if (!p->device_up) {
+		r = msm_bam_dmux_open(p->ch_id, dev,
+				       bam_recv_notify, bam_write_done);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	p->device_up = DEVICE_ACTIVE;
+	return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc = 0;
+
+	if (p->device_up) {
+		/* do not close rmnet port once up,  this causes
+		   remote side to hang if tried to open again */
+		p->device_up = DEVICE_INACTIVE;
+		return rc;
+	} else
+		return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	__rmnet_close(dev);
+	netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s]fatal: rmnet_xmit called when "
+			"netif_queue is stopped", dev->name);
+		return 0;
+	}
+
+	netif_stop_queue(dev);
+	_rmnet_xmit(skb, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->needed_headroom = HEADROOM_FOR_BAM +
+			  HEADROOM_FOR_QOS;
+			dev->needed_tailroom = TAILROOM;
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
+	dev->needed_tailroom = TAILROOM;
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->ch_id = n;
+		spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		ret = register_netdev(dev);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/msm_rmnet_sdio.c b/drivers/net/msm_rmnet_sdio.c
new file mode 100644
index 0000000..883c649
--- /dev/null
+++ b/drivers/net/msm_rmnet_sdio.c
@@ -0,0 +1,704 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SDIO Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/sdio_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_sdio_debug_mask;
+module_param_named(debug_enable, msm_rmnet_sdio_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			   \
+		if (msm_rmnet_sdio_debug_mask & m) \
+			pr_info(x);		   \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID   -1
+
+#define DEVICE_INACTIVE      0
+#define DEVICE_ACTIVE        1
+
+#define HEADROOM_FOR_SDIO   8 /* for mux header */
+#define HEADROOM_FOR_QOS    8
+#define TAILROOM            8 /* for padding by mux layer */
+
+struct rmnet_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+	uint8_t device_up;
+	uint8_t in_reset;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t n)
+{
+	timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+		   timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+		ret = 1;
+
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+	}
+	return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+static int sdio_update_reset_state(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int	new_state;
+
+	new_state = msm_sdio_is_channel_in_reset(p->ch_id);
+
+	if (p->in_reset != new_state) {
+		p->in_reset = (uint8_t)new_state;
+
+		if (p->in_reset)
+			netif_carrier_off(dev);
+		else
+			netif_carrier_on(dev);
+		return 1;
+	}
+	return 0;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void sdio_recv_notify(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	unsigned long flags;
+	u32 opmode;
+
+	if (skb) {
+		skb->dev = dev;
+		/* Handle Rx frame format */
+		spin_lock_irqsave(&p->lock, flags);
+		opmode = p->operation_mode;
+		spin_unlock_irqrestore(&p->lock, flags);
+
+		if (RMNET_IS_MODE_IP(opmode)) {
+			/* Driver in IP mode */
+			skb->protocol = rmnet_ip_type_trans(skb, dev);
+		} else {
+			/* Driver in Ethernet mode */
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+			p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+			p->stats.rx_packets++;
+			p->stats.rx_bytes += skb->len;
+		}
+		DBG1("[%s] Rx packet #%lu len=%d\n",
+			((struct net_device *)dev)->name,
+			p->stats.rx_packets, skb->len);
+
+		/* Deliver to network stack */
+		netif_rx(skb);
+	} else {
+		spin_lock_irqsave(&p->lock, flags);
+		if (!sdio_update_reset_state((struct net_device *)dev))
+			pr_err("[%s] %s: No skb received",
+				((struct net_device *)dev)->name, __func__);
+		spin_unlock_irqrestore(&p->lock, flags);
+	}
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int sdio_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	if (!netif_carrier_ok(dev)) {
+		pr_err("[%s] %s: channel in reset",
+			dev->name, __func__);
+		goto xmit_out;
+	}
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);
+
+	if (sdio_ret != 0) {
+		pr_err("[%s] %s: write returned error %d",
+			dev->name, __func__, sdio_ret);
+		goto xmit_out;
+	}
+
+	if (count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+	return 0;
+xmit_out:
+	dev_kfree_skb_any(skb);
+	p->stats.tx_errors++;
+	return 0;
+}
+
+static void sdio_write_done(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+
+	if (!p->in_reset) {
+		DBG1("%s: write complete skb=%p\n",	__func__, skb);
+
+		if (netif_queue_stopped(dev) &&
+				msm_sdio_dmux_is_ch_low(p->ch_id)) {
+			DBG0("%s: Low WM hit, waking queue=%p\n",
+					__func__, skb);
+			netif_wake_queue(dev);
+		}
+	} else {
+		DBG1("%s: write in reset skb=%p\n",	__func__, skb);
+	}
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] __rmnet_open()\n", dev->name);
+
+	if (!p->device_up) {
+		r = msm_sdio_dmux_open(p->ch_id, dev,
+				       sdio_recv_notify, sdio_write_done);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	p->device_up = DEVICE_ACTIVE;
+	return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc = 0;
+
+	if (p->device_up) {
+		/* do not close rmnet port once up,  this causes
+		   remote side to hang if tried to open again */
+		/* rc = msm_sdio_dmux_close(p->ch_id); */
+		p->device_up = DEVICE_INACTIVE;
+		return rc;
+	} else
+		return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	__rmnet_close(dev);
+	netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s]fatal: rmnet_xmit called when "
+			"netif_queue is stopped", dev->name);
+		return 0;
+	}
+
+	_rmnet_xmit(skb, dev);
+
+	if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
+		netif_stop_queue(dev);
+		DBG0("%s: High WM hit, stopping queue=%p\n",	__func__, skb);
+	}
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->needed_headroom = HEADROOM_FOR_SDIO +
+			  HEADROOM_FOR_QOS;
+			dev->needed_tailroom = TAILROOM;
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ;
+	dev->needed_tailroom = TAILROOM;
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet_sdio%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->ch_id = n;
+		spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		ret = register_netdev(dev);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/qfec.c b/drivers/net/qfec.c
new file mode 100644
index 0000000..90e8eff
--- /dev/null
+++ b/drivers/net/qfec.c
@@ -0,0 +1,2521 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+
+#include <linux/platform_device.h>
+
+#include <linux/types.h>        /* size_t */
+#include <linux/interrupt.h>    /* mark_bh */
+
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/mii.h>
+
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/inet.h>
+
+#include "qfec.h"
+
+#define QFEC_NAME       "qfec"
+#define QFEC_DRV_VER    "June 18a 2011"
+
+#define ETH_BUF_SIZE    0x600
+#define MAX_N_BD        50
+#define MAC_ADDR_SIZE	6
+
+#define RX_TX_BD_RATIO  8
+#define RX_BD_NUM       32
+#define TX_BD_NUM       (RX_BD_NUM * RX_TX_BD_RATIO)
+#define TX_BD_TI_RATIO  4
+
+/*
+ * logging macros
+ */
+#define QFEC_LOG_PR     1
+#define QFEC_LOG_DBG    2
+#define QFEC_LOG_DBG2   4
+#define QFEC_LOG_MDIO_W 8
+#define QFEC_LOG_MDIO_R 16
+
+static int qfec_debug = QFEC_LOG_PR;
+
+#ifdef QFEC_DEBUG
+# define QFEC_LOG(flag, ...)                    \
+	do {                                    \
+		if (flag & qfec_debug)          \
+			pr_info(__VA_ARGS__);  \
+	} while (0)
+#else
+# define QFEC_LOG(flag, ...)
+#endif
+
+#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
+
+/*
+ * driver buffer-descriptor
+ *   contains the 4 word HW descriptor plus an additional 4-words.
+ *   (See the DSL bits in the BUS-Mode register).
+ */
+#define BD_FLAG_LAST_BD     1
+
+struct buf_desc {
+	struct qfec_buf_desc   *p_desc;
+	struct sk_buff         *skb;
+	void                   *buf_virt_addr;
+	void                   *buf_phys_addr;
+	uint32_t                last_bd_flag;
+};
+
+/*
+ *inline functions accessing non-struct qfec_buf_desc elements
+ */
+
+/* skb */
+static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
+{
+	return p_bd->skb;
+};
+
+static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
+{
+	p_bd->skb   = p;
+};
+
+/* virtual addr  */
+static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
+{
+	p_bd->buf_virt_addr = addr;
+};
+
+static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
+{
+	return p_bd->buf_virt_addr;
+};
+
+/* physical addr  */
+static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
+{
+	p_bd->buf_phys_addr = addr;
+};
+
+static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
+{
+	return p_bd->buf_phys_addr;
+};
+
+/* last_bd_flag */
+static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
+{
+	return (p_bd->last_bd_flag != 0);
+};
+
+static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
+{
+	p_bd->last_bd_flag = BD_FLAG_LAST_BD;
+};
+
+/*
+ *inline functions accessing struct qfec_buf_desc elements
+ */
+
+/* ownership bit */
+static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->status & BUF_OWN;
+};
+
+static inline void qfec_bd_own_set(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->status |= BUF_OWN ;
+};
+
+static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->status &= ~(BUF_OWN);
+};
+
+static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->status;
+};
+
+static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
+{
+	p_bd->p_desc->status = status;
+};
+
+static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
+{
+	return BUF_RX_FL_GET((*p_bd->p_desc));
+};
+
+/* control register */
+static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->ctl  = 0;
+};
+
+static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->ctl;
+};
+
+static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
+{
+	p_bd->p_desc->ctl |= val;
+};
+
+static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
+{
+	p_bd->p_desc->ctl = val;
+};
+
+/* pbuf register  */
+static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->p_buf;
+}
+
+static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
+{
+	p_bd->p_desc->p_buf = p;
+}
+
+/* next register */
+static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->next;
+};
+
+/*
+ * initialize an RX BD w/ a new buf
+ */
+static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
+{
+	struct sk_buff     *skb;
+	void               *p;
+	void               *v;
+
+	/* allocate and record ptrs for sk buff */
+	skb   = dev_alloc_skb(ETH_BUF_SIZE);
+	if (!skb)
+		goto err;
+
+	qfec_bd_skbuf_set(p_bd, skb);
+
+	v = skb_put(skb, ETH_BUF_SIZE);
+	qfec_bd_virt_set(p_bd, v);
+
+	p = (void *) dma_map_single(&dev->dev,
+		(void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
+	qfec_bd_pbuf_set(p_bd, p);
+	qfec_bd_phys_set(p_bd, p);
+
+	/* populate control register */
+	/* mark the last BD and set end-of-ring bit */
+	qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
+		(qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
+
+	qfec_bd_status_set(p_bd, BUF_OWN);
+
+	if (!(qfec_debug & QFEC_LOG_DBG2))
+		return 0;
+
+	/* debug messages */
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
+
+	QFEC_LOG(QFEC_LOG_DBG2,
+		"%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
+		__func__, (void *)p_bd,
+		(void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
+		(void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
+		(void *)p);
+
+	return 0;
+
+err:
+	return -ENOMEM;
+};
+
+/*
+ * ring structure used to maintain indices of buffer-descriptor (BD) usage
+ *
+ *   The RX BDs are normally all pre-allocated with buffers available to be
+ *   DMA'd into with received frames.  The head indicates the first BD/buffer
+ *   containing a received frame, and the tail indicates the oldest BD/buffer
+ *   that needs to be restored for use.   Head and tail are both initialized
+ *   to zero, and n_free is initialized to zero, since all BD are initialized.
+ *
+ *   The TX BDs are normally available for use, only being initialized as
+ *   TX frames are requested for transmission.   The head indicates the
+ *   first available BD, and the tail indicate the oldest BD that has
+ *   not been acknowledged as transmitted.    Head and tail are both initialized
+ *   to zero, and n_free is initialized to len, since all are available for use.
+ */
+struct ring {
+	int     head;
+	int     tail;
+	int     n_free;
+	int     len;
+};
+
+/* accessory in line functions for struct ring */
+static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
+{
+	p_ring->head  = p_ring->tail = 0;
+	p_ring->len   = size;
+	p_ring->n_free = free;
+}
+
+static inline int qfec_ring_full(struct ring *p_ring)
+{
+	return (p_ring->n_free == 0);
+};
+
+static inline int qfec_ring_empty(struct ring *p_ring)
+{
+	return (p_ring->n_free == p_ring->len);
+}
+
+static inline void qfec_ring_head_adv(struct ring *p_ring)
+{
+	p_ring->head = ++p_ring->head % p_ring->len;
+	p_ring->n_free--;
+};
+
+static inline void qfec_ring_tail_adv(struct ring *p_ring)
+{
+	p_ring->tail = ++p_ring->tail % p_ring->len;
+	p_ring->n_free++;
+};
+
+static inline int qfec_ring_head(struct ring *p_ring)
+{
+
+	return p_ring->head;
+};
+
+static inline int qfec_ring_tail(struct ring *p_ring)
+{
+	return p_ring->tail;
+};
+
+static inline int qfec_ring_room(struct ring *p_ring)
+{
+	return p_ring->n_free;
+};
+
+/*
+ * counters track normal and abnormal driver events and activity
+ */
+enum cntr {
+	isr                  =  0,
+	fatal_bus,
+
+	early_tx,
+	tx_no_resource,
+	tx_proc_stopped,
+	tx_jabber_tmout,
+
+	xmit,
+	tx_int,
+	tx_isr,
+	tx_owned,
+	tx_underflow,
+
+	tx_replenish,
+	tx_skb_null,
+	tx_timeout,
+	tx_too_large,
+
+	gmac_isr,
+
+	/* half */
+	norm_int,
+	abnorm_int,
+
+	early_rx,
+	rx_buf_unavail,
+	rx_proc_stopped,
+	rx_watchdog,
+
+	netif_rx_cntr,
+	rx_int,
+	rx_isr,
+	rx_owned,
+	rx_overflow,
+
+	rx_dropped,
+	rx_skb_null,
+	queue_start,
+	queue_stop,
+
+	rx_paddr_nok,
+	ts_ioctl,
+	ts_tx_en,
+	ts_tx_rtn,
+
+	ts_rec,
+	cntr_last,
+};
+
+static char *cntr_name[]  = {
+	"isr",
+	"fatal_bus",
+
+	"early_tx",
+	"tx_no_resource",
+	"tx_proc_stopped",
+	"tx_jabber_tmout",
+
+	"xmit",
+	"tx_int",
+	"tx_isr",
+	"tx_owned",
+	"tx_underflow",
+
+	"tx_replenish",
+	"tx_skb_null",
+	"tx_timeout",
+	"tx_too_large",
+
+	"gmac_isr",
+
+	/* half */
+	"norm_int",
+	"abnorm_int",
+
+	"early_rx",
+	"rx_buf_unavail",
+	"rx_proc_stopped",
+	"rx_watchdog",
+
+	"netif_rx",
+	"rx_int",
+	"rx_isr",
+	"rx_owned",
+	"rx_overflow",
+
+	"rx_dropped",
+	"rx_skb_null",
+	"queue_start",
+	"queue_stop",
+
+	"rx_paddr_nok",
+	"ts_ioctl",
+	"ts_tx_en",
+	"ts_tx_rtn",
+
+	"ts_rec",
+	""
+};
+
+/*
+ * private data
+ */
+
+static struct net_device  *qfec_dev;
+
+enum qfec_state {
+	timestamping  = 0x04,
+};
+
+struct qfec_priv {
+	struct net_device      *net_dev;
+	struct net_device_stats stats;            /* req statistics */
+
+	struct device           dev;
+
+	spinlock_t              xmit_lock;
+	spinlock_t              mdio_lock;
+
+	unsigned int            state;            /* driver state */
+
+	unsigned int            bd_size;          /* buf-desc alloc size */
+	struct qfec_buf_desc   *bd_base;          /* * qfec-buf-desc */
+	dma_addr_t              tbd_dma;          /* dma/phy-addr buf-desc */
+	dma_addr_t              rbd_dma;          /* dma/phy-addr buf-desc */
+
+	struct resource        *mac_res;
+	void                   *mac_base;         /* mac (virt) base address */
+
+	struct resource        *clk_res;
+	void                   *clk_base;         /* clk (virt) base address */
+
+	struct resource        *fuse_res;
+	void                   *fuse_base;        /* mac addr fuses */
+
+	unsigned int            n_tbd;            /* # of TX buf-desc */
+	struct ring             ring_tbd;         /* TX ring */
+	struct buf_desc        *p_tbd;
+	unsigned int            tx_ic_mod;        /* (%) val for setting IC */
+
+	unsigned int            n_rbd;            /* # of RX buf-desc */
+	struct ring             ring_rbd;         /* RX ring */
+	struct buf_desc        *p_rbd;
+
+	struct buf_desc        *p_latest_rbd;
+	struct buf_desc        *p_ending_rbd;
+
+	unsigned long           cntr[cntr_last];  /* activity counters */
+
+	struct mii_if_info      mii;              /* used by mii lib */
+
+	int                     mdio_clk;         /* phy mdio clock rate */
+	int                     phy_id;           /* default PHY addr (0) */
+	struct timer_list       phy_tmr;          /* monitor PHY state */
+};
+
+/*
+ * cntrs display
+ */
+
+static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	int                      h    = (cntr_last + 1) / 2;
+	int                      l;
+	int                      n;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	l = snprintf(&buf[0], count, "%s:\n", __func__);
+	for (n = 0; n < h; n++)  {
+		l += snprintf(&buf[l], count - l,
+			"      %12lu  %-16s %12lu  %s\n",
+			priv->cntr[n],   cntr_name[n],
+			priv->cntr[n+h], cntr_name[n+h]);
+	}
+
+	return l;
+}
+
+# define CNTR_INC(priv, name)  (priv->cntr[name]++)
+
+/*
+ * functions that manage state
+ */
+static inline void qfec_queue_start(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	if (netif_queue_stopped(dev)) {
+		netif_wake_queue(dev);
+		CNTR_INC(priv, queue_start);
+	}
+};
+
+static inline void qfec_queue_stop(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	CNTR_INC(priv, queue_stop);
+};
+
+/*
+ * functions to access and initialize the MAC registers
+ */
+static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
+{
+	return ioread32((void *) (priv->mac_base + reg));
+}
+
+static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
+{
+	uint32_t    addr = (uint32_t)priv->mac_base + reg;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+	iowrite32(val, (void *)addr);
+}
+
+/*
+ * speed/duplex/pause  settings
+ */
+static int qfec_config_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	int                      cfg  = qfec_reg_read(priv, MAC_CONFIG_REG);
+	int                      flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	int                      l    = 0;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	l += snprintf(&buf[l], count, "%s:", __func__);
+
+	l += snprintf(&buf[l], count - l, "  [0x%08x] %4dM %s %s", cfg,
+		(cfg & MAC_CONFIG_REG_PS)
+			? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
+		cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
+		cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
+
+	flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+	l += snprintf(&buf[l], count - l, "  [0x%08x] %s", flow,
+		(flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
+			: ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
+			: ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
+
+	l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
+	l += snprintf(&buf[l], count - l, "\n");
+	return l;
+}
+
+
+/*
+ * table and functions to initialize controller registers
+ */
+
+struct reg_entry {
+	unsigned int  rdonly;
+	unsigned int  addr;
+	char         *label;
+	unsigned int  val;
+};
+
+static struct reg_entry  qfec_reg_tbl[] = {
+	{ 0, BUS_MODE_REG,           "BUS_MODE_REG",     BUS_MODE_REG_DEFAULT },
+	{ 0, AXI_BUS_MODE_REG,       "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
+	{ 0, AXI_STATUS_REG,         "AXI_STATUS_REG",     0 },
+
+	{ 0, MAC_ADR_0_HIGH_REG,     "MAC_ADR_0_HIGH_REG", 0x00000302 },
+	{ 0, MAC_ADR_0_LOW_REG,      "MAC_ADR_0_LOW_REG",  0x01350702 },
+
+	{ 1, RX_DES_LST_ADR_REG,     "RX_DES_LST_ADR_REG", 0 },
+	{ 1, TX_DES_LST_ADR_REG,     "TX_DES_LST_ADR_REG", 0 },
+	{ 1, STATUS_REG,             "STATUS_REG",         0 },
+	{ 1, DEBUG_REG,              "DEBUG_REG",          0 },
+
+	{ 0, INTRP_EN_REG,           "INTRP_EN_REG",       QFEC_INTRP_SETUP},
+
+	{ 1, CUR_HOST_TX_DES_REG,    "CUR_HOST_TX_DES_REG",    0 },
+	{ 1, CUR_HOST_RX_DES_REG,    "CUR_HOST_RX_DES_REG",    0 },
+	{ 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
+	{ 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
+
+	{ 1, MAC_FR_FILTER_REG,      "MAC_FR_FILTER_REG",      0 },
+
+	{ 0, MAC_CONFIG_REG,         "MAC_CONFIG_REG",    MAC_CONFIG_REG_SPD_1G
+							| MAC_CONFIG_REG_DM
+							| MAC_CONFIG_REG_TE
+							| MAC_CONFIG_REG_RE
+							| MAC_CONFIG_REG_IPC },
+
+	{ 1, INTRP_STATUS_REG,       "INTRP_STATUS_REG",   0 },
+	{ 1, INTRP_MASK_REG,         "INTRP_MASK_REG",     0 },
+
+	{ 0, OPER_MODE_REG,          "OPER_MODE_REG",  OPER_MODE_REG_DEFAULT },
+
+	{ 1, GMII_ADR_REG,           "GMII_ADR_REG",           0 },
+	{ 1, GMII_DATA_REG,          "GMII_DATA_REG",          0 },
+
+	{ 0, MMC_INTR_MASK_RX_REG,   "MMC_INTR_MASK_RX_REG",   0xFFFFFFFF },
+	{ 0, MMC_INTR_MASK_TX_REG,   "MMC_INTR_MASK_TX_REG",   0xFFFFFFFF },
+
+	{ 1, TS_HIGH_REG,            "TS_HIGH_REG",            0 },
+	{ 1, TS_LOW_REG,             "TS_LOW_REG",             0 },
+
+	{ 1, TS_HI_UPDT_REG,         "TS_HI_UPDATE_REG",       0 },
+	{ 1, TS_LO_UPDT_REG,         "TS_LO_UPDATE_REG",       0 },
+	{ 0, TS_SUB_SEC_INCR_REG,    "TS_SUB_SEC_INCR_REG",    86 },
+
+	{ 0, TS_CTL_REG,             "TS_CTL_REG",        TS_CTL_TSENALL
+							| TS_CTL_TSCTRLSSR
+							| TS_CTL_TSINIT
+							| TS_CTL_TSENA },
+};
+
+static void qfec_reg_init(struct qfec_priv *priv)
+{
+	struct reg_entry *p = qfec_reg_tbl;
+	int         n = ARRAY_SIZE(qfec_reg_tbl);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	for  (; n--; p++) {
+		if (!p->rdonly)
+			qfec_reg_write(priv, p->addr, p->val);
+	}
+}
+
+/*
+ * display registers thru sysfs
+ */
+static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	struct reg_entry   *p = qfec_reg_tbl;
+	int                 n = ARRAY_SIZE(qfec_reg_tbl);
+	int                 l = 0;
+	int                 count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (; n--; p++) {
+		l += snprintf(&buf[l], count - l, "    %8p   %04x %08x  %s\n",
+			(void *)priv->mac_base + p->addr, p->addr,
+			qfec_reg_read(priv, p->addr), p->label);
+	}
+
+	return  l;
+}
+
+/*
+ * set the MAC-0 address
+ */
+static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
+{
+	uint32_t        h = 0;
+	uint32_t        l = 0;
+
+	h = h << 8 | addr[5];
+	h = h << 8 | addr[4];
+
+	l = l << 8 | addr[3];
+	l = l << 8 | addr[2];
+	l = l << 8 | addr[1];
+	l = l << 8 | addr[0];
+
+	qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
+	qfec_reg_write(priv, MAC_ADR_0_LOW_REG,  l);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
+}
+
+/*
+ * reset the controller
+ */
+
+#define QFEC_RESET_TIMEOUT   10000
+	/* reset should always clear but did not w/o test/delay
+	 * in RgMii mode.  there is no spec'd max timeout
+	 */
+
+static int qfec_hw_reset(struct qfec_priv *priv)
+{
+	int             timeout = QFEC_RESET_TIMEOUT;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
+
+	while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
+		if (timeout-- == 0) {
+			QFEC_LOG_ERR("%s: timeout\n", __func__);
+			return -ETIME;
+		}
+
+		/* there were problems resetting the controller
+		 * in RGMII mode when there wasn't sufficient
+		 * delay between register reads
+		 */
+		usleep_range(100, 200);
+	}
+
+	return 0;
+}
+
+/*
+ * initialize controller
+ */
+static int qfec_hw_init(struct qfec_priv *priv)
+{
+	int  res = 0;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	res = qfec_hw_reset(priv);
+	if (res)
+		return res;
+
+	qfec_reg_init(priv);
+
+	/* config buf-desc locations */
+	qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
+	qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
+
+	/* clear interrupts */
+	qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
+		| INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
+
+	return res;
+}
+
+/*
+ * en/disable controller
+ */
+static void qfec_hw_enable(struct qfec_priv *priv)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, OPER_MODE_REG,
+	qfec_reg_read(priv, OPER_MODE_REG)
+		| OPER_MODE_REG_ST | OPER_MODE_REG_SR);
+}
+
+static void qfec_hw_disable(struct qfec_priv *priv)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, OPER_MODE_REG,
+	qfec_reg_read(priv, OPER_MODE_REG)
+		& ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
+}
+
+/*
+ * interface selection
+ */
+struct intf_config  {
+	uint32_t     intf_sel;
+	uint32_t     emac_ns;
+	uint32_t     eth_x_en_ns;
+	uint32_t     clkmux_sel;
+};
+
+#define ETH_X_EN_NS_REVMII      (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
+#define CLKMUX_REVMII           (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
+
+static struct intf_config intf_config_tbl[] = {
+	{ EMAC_PHY_INTF_SEL_MII,    EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+	{ EMAC_PHY_INTF_SEL_RGMII,  EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+	{ EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
+								CLKMUX_REVMII }
+};
+
+/*
+ * emac clk register read and write functions
+ */
+static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
+{
+	return ioread32((void *) (priv->clk_base + reg));
+}
+
+static inline void qfec_clkreg_write(struct qfec_priv *priv,
+	uint32_t reg, uint32_t val)
+{
+	uint32_t   addr = (uint32_t)priv->clk_base + reg;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+	iowrite32(val, (void *)addr);
+}
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+enum phy_intfc  {
+	intfc_mii     = 0,
+	intfc_rgmii   = 1,
+	intfc_revmii  = 2,
+};
+
+static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
+{
+	struct intf_config   *p;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
+
+	if (intfc > intfc_revmii)  {
+		QFEC_LOG_ERR("%s: range\n", __func__);
+		return -ENXIO;
+	}
+
+	p = &intf_config_tbl[intfc];
+
+	qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
+	qfec_clkreg_write(priv, EMAC_NS_REG,           p->emac_ns);
+	qfec_clkreg_write(priv, ETH_X_EN_NS_REG,       p->eth_x_en_ns);
+	qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG,   p->clkmux_sel);
+
+	return 0;
+}
+
+/*
+ * display registers thru proc-fs
+ */
+static struct qfec_clk_reg {
+	uint32_t        offset;
+	char           *label;
+} qfec_clk_regs[] = {
+	{ ETH_MD_REG,                  "ETH_MD_REG"  },
+	{ ETH_NS_REG,                  "ETH_NS_REG"  },
+	{ ETH_X_EN_NS_REG,             "ETH_X_EN_NS_REG"  },
+	{ EMAC_PTP_MD_REG,             "EMAC_PTP_MD_REG"  },
+	{ EMAC_PTP_NS_REG,             "EMAC_PTP_NS_REG"  },
+	{ EMAC_NS_REG,                 "EMAC_NS_REG"  },
+	{ EMAC_TX_FS_REG,              "EMAC_TX_FS_REG"  },
+	{ EMAC_RX_FS_REG,              "EMAC_RX_FS_REG"  },
+	{ EMAC_PHY_INTF_SEL_REG,       "EMAC_PHY_INTF_SEL_REG"  },
+	{ EMAC_PHY_ADDR_REG,           "EMAC_PHY_ADDR_REG"  },
+	{ EMAC_REVMII_PHY_ADDR_REG,    "EMAC_REVMII_PHY_ADDR_REG"  },
+	{ EMAC_CLKMUX_SEL_REG,         "EMAC_CLKMUX_SEL_REG"  },
+};
+
+static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	struct qfec_clk_reg     *p = qfec_clk_regs;
+	int                      n = ARRAY_SIZE(qfec_clk_regs);
+	int                      l = 0;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (; n--; p++) {
+		l += snprintf(&buf[l], count - l, "    %8p  %8x  %08x  %s\n",
+			(void *)priv->clk_base + p->offset, p->offset,
+			qfec_clkreg_read(priv, p->offset), p->label);
+	}
+
+	return  l;
+}
+
+/*
+ * speed selection
+ */
+
+struct qfec_pll_cfg {
+	uint32_t    spd;
+	uint32_t    eth_md;     /* M [31:16], NOT 2*D [15:0] */
+	uint32_t    eth_ns;     /* NOT(M-N) [31:16], ctl bits [11:0]  */
+};
+
+static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
+	/* 2.5 MHz */
+	{ MAC_CONFIG_REG_SPD_10,   ETH_MD_M(1)  | ETH_MD_2D_N(100),
+						  ETH_NS_NM(100-1)
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC },
+	/* 25 MHz */
+	{ MAC_CONFIG_REG_SPD_100,  ETH_MD_M(1)  | ETH_MD_2D_N(10),
+						  ETH_NS_NM(10-1)
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC },
+	/* 125 MHz */
+	{MAC_CONFIG_REG_SPD_1G,    0,             ETH_NS_PRE_DIV(1)
+						| CLK_SRC_PLL_EMAC },
+};
+
+enum speed  {
+	spd_10   = 0,
+	spd_100  = 1,
+	spd_1000 = 2,
+};
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
+	unsigned int dplx)
+{
+	struct qfec_priv       *priv = netdev_priv(dev);
+	struct qfec_pll_cfg    *p;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
+
+	if (spd > spd_1000)  {
+		QFEC_LOG_ERR("%s: range\n", __func__);
+		return -ENODEV;
+	}
+
+	p = &qfec_pll_cfg_tbl[spd];
+
+	/* set the MAC speed bits */
+	qfec_reg_write(priv, MAC_CONFIG_REG,
+	(qfec_reg_read(priv, MAC_CONFIG_REG)
+		& ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
+			| p->spd | (dplx ? MAC_CONFIG_REG_DM : 0));
+
+	qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
+	qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
+
+	return 0;
+}
+
+/*
+ * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
+ */
+
+static struct qfec_pll_cfg qfec_pll_ptp = {
+	/* 25 MHz */
+	0,      ETH_MD_M(1) | ETH_MD_2D_N(10),    ETH_NS_NM(10-1)
+						| EMAC_PTP_NS_ROOT_EN
+						| EMAC_PTP_NS_CLK_EN
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC
+};
+
+#define PLLTEST_PAD_CFG     0x01E0
+#define PLLTEST_PLL_7       0x3700
+
+#define CLKTEST_REG         0x01EC
+#define CLKTEST_EMAC_RX     0x3fc07f7a
+
+static int qfec_ptp_cfg(struct qfec_priv *priv)
+{
+	struct qfec_pll_cfg    *p    = &qfec_pll_ptp;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
+		__func__, p->eth_md, p->eth_ns);
+
+	qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
+	qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
+
+	/* configure HS/LS clk test ports to verify clks */
+	qfec_clkreg_write(priv, CLKTEST_REG,     CLKTEST_EMAC_RX);
+	qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
+
+	return 0;
+}
+
+/*
+ * MDIO operations
+ */
+
+/*
+ * wait reasonable amount of time for MDIO operation to complete, not busy
+ */
+static int qfec_mdio_busy(struct net_device *dev)
+{
+	int     i;
+
+	for (i = 100; i > 0; i--)  {
+		if (!(qfec_reg_read(
+			netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB))  {
+			return 0;
+		}
+		udelay(1);
+	}
+
+	return -ETIME;
+}
+
+/*
+ * initiate either a read or write MDIO operation
+ */
+
+static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	int                 res = 0;
+
+	/* insure phy not busy */
+	res = qfec_mdio_busy(dev);
+	if (res)  {
+		QFEC_LOG_ERR("%s: busy\n", __func__);
+		goto done;
+	}
+
+	/* initiate operation */
+	qfec_reg_write(priv, GMII_ADR_REG,
+		GMII_ADR_REG_ADR_SET(phy_id)
+		| GMII_ADR_REG_REG_SET(reg)
+		| GMII_ADR_REG_CSR_SET(priv->mdio_clk)
+		| (wr ? GMII_ADR_REG_GW : 0)
+		| GMII_ADR_REG_GB);
+
+	/* wait for operation to complete */
+	res = qfec_mdio_busy(dev);
+	if (res)
+		QFEC_LOG_ERR("%s: timeout\n", __func__);
+
+done:
+	return res;
+}
+
+/*
+ * read MDIO register
+ */
+static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	int                 res = 0;
+	unsigned long       flags;
+
+	spin_lock_irqsave(&priv->mdio_lock, flags);
+
+	res = qfec_mdio_oper(dev, phy_id, reg, 0);
+	if (res)  {
+		QFEC_LOG_ERR("%s: oper\n", __func__);
+		goto done;
+	}
+
+	res = qfec_reg_read(priv, GMII_DATA_REG);
+	QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
+		__func__, reg, res);
+
+done:
+	spin_unlock_irqrestore(&priv->mdio_lock, flags);
+	return res;
+}
+
+/*
+ * write MDIO register
+ */
+static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
+	int val)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	unsigned long       flags;
+
+	spin_lock_irqsave(&priv->mdio_lock, flags);
+
+	QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
+		__func__, reg, val);
+
+	qfec_reg_write(priv, GMII_DATA_REG, val);
+
+	if (qfec_mdio_oper(dev, phy_id, reg, 1))
+		QFEC_LOG_ERR("%s: oper\n", __func__);
+
+	spin_unlock_irqrestore(&priv->mdio_lock, flags);
+}
+
+/*
+ * get auto-negotiation results
+ */
+
+#define QFEC_100        (LPA_100HALF | LPA_100FULL | LPA_100HALF)
+#define QFEC_100_FD     (LPA_100FULL | LPA_100BASE4)
+#define QFEC_10         (LPA_10HALF  | LPA_10FULL)
+#define QFEC_10_FD       LPA_10FULL
+
+static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	uint32_t            status;
+	uint32_t            advert;
+	uint32_t            lpa;
+	uint32_t            flow;
+
+	advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	lpa    = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
+	status = advert & lpa;
+
+	/* todo: check extended status register for 1G abilities */
+
+	if (status & QFEC_100)  {
+		*spd  = spd_100;
+		*dplx = status & QFEC_100_FD ? 1 : 0;
+	}
+
+	else if (status & QFEC_10)  {
+		*spd  = spd_10;
+		*dplx = status & QFEC_10_FD ? 1 : 0;
+	}
+
+	/* check pause */
+	flow  = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
+
+	if (status & ADVERTISE_PAUSE_CAP)  {
+		flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+	} else if (status & ADVERTISE_PAUSE_ASYM)  {
+		if (lpa & ADVERTISE_PAUSE_CAP)
+			flow |= FLOW_CONTROL_TFE;
+		else if (advert & ADVERTISE_PAUSE_CAP)
+			flow |= FLOW_CONTROL_RFE;
+	}
+
+	qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
+}
+
+/*
+ * monitor phy status, and process auto-neg results when changed
+ */
+
+static void qfec_phy_monitor(unsigned long data)
+{
+	struct net_device  *dev  = (struct net_device *) data;
+	struct qfec_priv   *priv = netdev_priv(dev);
+	unsigned int        spd  = 0;
+	unsigned int        dplx = 1;
+
+	mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+	if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev))  {
+		qfec_get_an(dev, &spd, &dplx);
+		qfec_speed_cfg(dev, spd, dplx);
+		QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
+			__func__, spd, dplx);
+
+		netif_carrier_on(dev);
+	}
+
+	else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev))  {
+		QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
+		netif_carrier_off(dev);
+	}
+}
+
+/*
+ * dealloc buffer descriptor memory
+ */
+
+static void qfec_mem_dealloc(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	dma_free_coherent(&dev->dev,
+		priv->bd_size, priv->bd_base, priv->tbd_dma);
+	priv->bd_base = 0;
+}
+
+/*
+ * allocate shared device memory for TX/RX buf-desc (and buffers)
+ */
+
+static int qfec_mem_alloc(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+	priv->bd_size =
+		(priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
+
+	priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
+	if (!priv->p_tbd)  {
+		QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
+		return -ENOMEM;
+	}
+
+	priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
+	if (!priv->p_rbd)  {
+		QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* alloc mem for buf-desc, if not already alloc'd */
+	if (!priv->bd_base)  {
+		priv->bd_base = dma_alloc_coherent(&dev->dev,
+			priv->bd_size, &priv->tbd_dma,
+			GFP_KERNEL | __GFP_DMA);
+	}
+
+	if (!priv->bd_base)  {
+		QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	priv->rbd_dma   = priv->tbd_dma
+			+ (priv->n_tbd * sizeof(struct qfec_buf_desc));
+
+	QFEC_LOG(QFEC_LOG_DBG,
+		" %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
+		__func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
+
+	return 0;
+}
+
+/*
+ * display buffer descriptors
+ */
+
+static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
+{
+	return snprintf(buf, size,
+		"%8p: %08x %08x %8p %8p  %8p %8p %8p %x",
+		p_bd,                     qfec_bd_status_get(p_bd),
+		qfec_bd_ctl_get(p_bd),    qfec_bd_pbuf_get(p_bd),
+		qfec_bd_next_get(p_bd),   qfec_bd_skbuf_get(p_bd),
+		qfec_bd_virt_get(p_bd),   qfec_bd_phys_get(p_bd),
+		qfec_bd_last_bd(p_bd));
+}
+
+static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
+	struct ring *p_ring, char *label)
+{
+	int     l = 0;
+	int     n;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
+
+	l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
+	if (!p_bd)
+		return l;
+
+	n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
+
+	for (n = 0; n < n_bd; n++, p_bd++) {
+		l += qfec_bd_fmt(&buf[l], count - l, p_bd);
+		l += snprintf(&buf[l], count - l, "%s%s\n",
+			(qfec_ring_head(p_ring) == n ? " < h" : ""),
+			(qfec_ring_tail(p_ring) == n ? " < t" : ""));
+	}
+
+	return l;
+}
+
+/*
+ * display TX BDs
+ */
+static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	int                 count = PAGE_SIZE;
+
+	return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
+				&priv->ring_tbd, "TX");
+}
+
+/*
+ * display RX BDs
+ */
+static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	int                 count = PAGE_SIZE;
+
+	return  qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
+				&priv->ring_rbd, "RX");
+}
+
+/*
+ * read timestamp from buffer descriptor
+ *    the pbuf and next fields of the buffer descriptors are overwritten
+ *    with the timestamp high and low register values.   The high register
+ *    counts seconds, but the sub-second increment register is programmed
+ *    with the appropriate value to increment the timestamp low register
+ *    such that it overflows at 0x8000 0000.  The low register value
+ *    (next) must be converted to units of nano secs, * 10^9 / 2^31.
+ */
+static void qfec_read_timestamp(struct buf_desc *p_bd,
+	struct skb_shared_hwtstamps *ts)
+{
+	unsigned long  sec = (unsigned long)qfec_bd_next_get(p_bd);
+	long long      ns  = (unsigned long)qfec_bd_pbuf_get(p_bd);
+
+#define BILLION		1000000000
+#define LOW_REG_BITS    31
+	ns  *= BILLION;
+	ns >>= LOW_REG_BITS;
+
+	ts->hwtstamp  = ktime_set(sec, ns);
+	ts->syststamp = ktime_set(sec, ns);
+}
+
+/*
+ * free transmitted skbufs from buffer-descriptor no owned by HW
+ */
+static int qfec_tx_replenish(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_tbd;
+	struct buf_desc    *p_bd   = &priv->p_tbd[qfec_ring_tail(p_ring)];
+	struct sk_buff     *skb;
+	unsigned long      flags;
+
+	CNTR_INC(priv, tx_replenish);
+
+	spin_lock_irqsave(&priv->xmit_lock, flags);
+
+	while (!qfec_ring_empty(p_ring))  {
+		if (qfec_bd_own(p_bd))
+			break;          /* done for now */
+
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (unlikely(skb == NULL))  {
+			QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+			CNTR_INC(priv, tx_skb_null);
+			break;
+		}
+
+		qfec_reg_write(priv, STATUS_REG,
+			STATUS_REG_TU | STATUS_REG_TI);
+
+		/* retrieve timestamp if requested */
+		if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS)  {
+			CNTR_INC(priv, ts_tx_rtn);
+			qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+			skb_tstamp_tx(skb, skb_hwtstamps(skb));
+		}
+
+		/* update statistics before freeing skb */
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes  += skb->len;
+
+		dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
+				skb->len, DMA_TO_DEVICE);
+
+		dev_kfree_skb_any(skb);
+		qfec_bd_skbuf_set(p_bd, NULL);
+
+		qfec_ring_tail_adv(p_ring);
+		p_bd   = &priv->p_tbd[qfec_ring_tail(p_ring)];
+	}
+
+	spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+	qfec_queue_start(dev);
+
+	return 0;
+}
+
+/*
+ * clear ownership bits of all TX buf-desc and release the sk-bufs
+ */
+static void qfec_tx_timeout(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct buf_desc    *bd     = priv->p_tbd;
+	int                 n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+	CNTR_INC(priv, tx_timeout);
+
+	for (n = 0; n < priv->n_tbd; n++, bd++)
+		qfec_bd_own_clr(bd);
+
+	qfec_tx_replenish(dev);
+}
+
+/*
+ * rx() - process a received frame
+ */
+static void qfec_rx_int(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_rbd;
+	struct buf_desc    *p_bd   = priv->p_latest_rbd;
+	uint32_t desc_status;
+	uint32_t mis_fr_reg;
+
+	desc_status = qfec_bd_status_get(p_bd);
+	mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
+
+	CNTR_INC(priv, rx_int);
+
+	/* check that valid interrupt occurred */
+	if (unlikely(desc_status & BUF_OWN)) {
+		char  s[100];
+
+		qfec_bd_fmt(s, sizeof(s), p_bd);
+		QFEC_LOG_ERR("%s: owned by DMA, %08x, %s\n", __func__,
+			qfec_reg_read(priv, CUR_HOST_RX_DES_REG), s);
+		CNTR_INC(priv, rx_owned);
+		return;
+	}
+
+	/* accumulate missed-frame count (reg reset when read) */
+	priv->stats.rx_missed_errors += mis_fr_reg
+					& MIS_FR_REG_MISS_CNT;
+
+	/* process all unowned frames */
+	while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring)))  {
+		struct sk_buff     *skb;
+		struct buf_desc    *p_bd_next;
+
+		skb = qfec_bd_skbuf_get(p_bd);
+
+		if (unlikely(skb == NULL))  {
+			QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+			CNTR_INC(priv, rx_skb_null);
+			break;
+		}
+
+		/* cache coherency before skb->data is accessed */
+		dma_unmap_single(&dev->dev,
+			(dma_addr_t) qfec_bd_phys_get(p_bd),
+			ETH_BUF_SIZE, DMA_FROM_DEVICE);
+		prefetch(skb->data);
+
+		if (unlikely(desc_status & BUF_RX_ES)) {
+			priv->stats.rx_dropped++;
+			CNTR_INC(priv, rx_dropped);
+			dev_kfree_skb(skb);
+		} else  {
+			qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
+
+			skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
+
+			if (priv->state & timestamping)  {
+				CNTR_INC(priv, ts_rec);
+				qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+			}
+
+			/* update statistics before freeing skb */
+			priv->stats.rx_packets++;
+			priv->stats.rx_bytes  += skb->len;
+
+			skb->dev        = dev;
+			skb->protocol   = eth_type_trans(skb, dev);
+			skb->ip_summed  = CHECKSUM_UNNECESSARY;
+
+			if (NET_RX_DROP == netif_rx(skb))  {
+				priv->stats.rx_dropped++;
+				CNTR_INC(priv, rx_dropped);
+			}
+			CNTR_INC(priv, netif_rx_cntr);
+		}
+
+		if (p_bd != priv->p_ending_rbd)
+			p_bd_next = p_bd + 1;
+		else
+			p_bd_next = priv->p_rbd;
+		desc_status = qfec_bd_status_get(p_bd_next);
+
+		qfec_bd_skbuf_set(p_bd, NULL);
+
+		qfec_ring_head_adv(p_ring);
+		p_bd = p_bd_next;
+	}
+
+	priv->p_latest_rbd = p_bd;
+
+	/* replenish bufs */
+	while (!qfec_ring_empty(p_ring))  {
+		if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
+			break;
+		qfec_ring_tail_adv(p_ring);
+	}
+}
+
+/*
+ * isr() - interrupt service routine
+ *          determine cause of interrupt and invoke/schedule appropriate
+ *          processing or error handling
+ */
+#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
+	if (status & interrupt) \
+		CNTR_INC(priv, cntr)
+
+static irqreturn_t qfec_int(int irq, void *dev_id)
+{
+	struct net_device  *dev      = dev_id;
+	struct qfec_priv   *priv     = netdev_priv(dev);
+	uint32_t            status   = qfec_reg_read(priv, STATUS_REG);
+	uint32_t            int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
+
+	/* abnormal interrupt */
+	if (status & STATUS_REG_AIS)  {
+		QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
+			__func__, status);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_RU,  rx_buf_unavail);
+		ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
+		ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
+		ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
+		ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
+		ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
+
+		int_bits |= STATUS_REG_AIS_BITS;
+		CNTR_INC(priv, abnorm_int);
+	}
+
+	if (status & STATUS_REG_NIS)
+		CNTR_INC(priv, norm_int);
+
+	/* receive interrupt */
+	if (status & STATUS_REG_RI)  {
+		CNTR_INC(priv, rx_isr);
+		qfec_rx_int(dev);
+	}
+
+	/* transmit interrupt */
+	if (status & STATUS_REG_TI)  {
+		CNTR_INC(priv, tx_isr);
+		qfec_tx_replenish(dev);
+	}
+
+	/* gmac interrupt */
+	if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI))  {
+		CNTR_INC(priv, gmac_isr);
+		int_bits |= STATUS_REG_GMI;
+	}
+
+	/* clear interrupts */
+	qfec_reg_write(priv, STATUS_REG, int_bits);
+	CNTR_INC(priv, isr);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * open () - register system resources (IRQ, DMA, ...)
+ *   turn on HW, perform device setup.
+ */
+static int qfec_open(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct buf_desc    *p_bd;
+	struct ring        *p_ring;
+	struct qfec_buf_desc *p_desc;
+	int                 n;
+	int                 res = 0;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+	if (!dev)  {
+		res = -EINVAL;
+		goto err;
+	}
+
+	/* allocate TX/RX buffer-descriptors and buffers */
+
+	res = qfec_mem_alloc(dev);
+	if (res)
+		goto err;
+
+	/* initialize TX */
+	p_desc = priv->bd_base;
+
+	for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
+		p_bd->p_desc = p_desc++;
+
+		if (n == (priv->n_tbd - 1))
+			qfec_bd_last_bd_set(p_bd);
+
+		qfec_bd_own_clr(p_bd);      /* clear ownership */
+	}
+
+	qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
+
+	priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
+	if (priv->tx_ic_mod == 0)
+		priv->tx_ic_mod = 1;
+
+	/* initialize RX buffer descriptors and allocate sk_bufs */
+	p_ring = &priv->ring_rbd;
+	qfec_ring_init(p_ring, priv->n_rbd, 0);
+	qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
+
+	for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
+		p_bd->p_desc = p_desc++;
+
+		if (qfec_rbd_init(dev, p_bd))
+			break;
+		qfec_ring_tail_adv(p_ring);
+	}
+
+	priv->p_latest_rbd = priv->p_rbd;
+	priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
+
+	/* config ptp clock */
+	qfec_ptp_cfg(priv);
+
+	/* configure PHY - must be set before reset/hw_init */
+	qfec_intf_sel(priv, intfc_mii);
+
+	/* initialize controller after BDs allocated */
+	res = qfec_hw_init(priv);
+	if (res)
+		goto err1;
+
+	/* get/set (primary) MAC address */
+	qfec_set_adr_regs(priv, dev->dev_addr);
+
+	/* start phy monitor */
+	QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
+	netif_carrier_off(priv->net_dev);
+	setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
+	mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+	/* initialize interrupts */
+	QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
+	res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
+	if (res)
+		goto err1;
+
+	/* enable controller */
+	qfec_hw_enable(priv);
+	netif_start_queue(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
+		mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+	return 0;
+
+err1:
+	qfec_mem_dealloc(dev);
+err:
+	QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
+	return res;
+}
+
+/*
+ * stop() - "reverse operations performed at open time"
+ */
+static int qfec_stop(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct buf_desc    *p_bd;
+	struct sk_buff     *skb;
+	int                 n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	del_timer_sync(&priv->phy_tmr);
+
+	qfec_hw_disable(priv);
+	qfec_queue_stop(dev);
+	free_irq(dev->irq, dev);
+
+	/* free all pending sk_bufs */
+	for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (skb)
+			dev_kfree_skb(skb);
+	}
+
+	for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (skb)
+			dev_kfree_skb(skb);
+	}
+
+	qfec_mem_dealloc(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+
+	return 0;
+}
+
+static int qfec_set_config(struct net_device *dev, struct ifmap *map)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+	return 0;
+}
+
+/*
+ * pass data from skbuf to buf-desc
+ */
+static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_tbd;
+	struct buf_desc    *p_bd;
+	uint32_t            ctrl   = 0;
+	int                 ret    = NETDEV_TX_OK;
+	unsigned long       flags;
+
+	CNTR_INC(priv, xmit);
+
+	spin_lock_irqsave(&priv->xmit_lock, flags);
+
+	/* stop queuing if no resources available */
+	if (qfec_ring_room(p_ring) == 0)  {
+		qfec_queue_stop(dev);
+		CNTR_INC(priv, tx_no_resource);
+
+		ret = NETDEV_TX_BUSY;
+		goto done;
+	}
+
+	/* locate and save *sk_buff */
+	p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
+	qfec_bd_skbuf_set(p_bd, skb);
+
+	/* set DMA ptr to sk_buff data and write cache to memory */
+	qfec_bd_pbuf_set(p_bd, (void *)
+	dma_map_single(&dev->dev,
+		(void *)skb->data, skb->len, DMA_TO_DEVICE));
+
+	ctrl  = skb->len;
+	if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
+		ctrl |= BUF_TX_IC; /* interrupt on complete */
+
+	/* check if timestamping enabled and requested */
+	if (priv->state & timestamping)  {
+		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+			CNTR_INC(priv, ts_tx_en);
+			ctrl |= BUF_TX_IC;	/* interrupt on complete */
+			ctrl |= BUF_TX_TTSE;	/* enable timestamp */
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		}
+	}
+
+	if (qfec_bd_last_bd(p_bd))
+		ctrl |= BUF_RX_RER;
+
+	/* no gather, no multi buf frames */
+	ctrl |= BUF_TX_FS | BUF_TX_LS;  /* 1st and last segment */
+
+	qfec_bd_ctl_wr(p_bd, ctrl);
+	qfec_bd_status_set(p_bd, BUF_OWN);
+
+	qfec_ring_head_adv(p_ring);
+	qfec_reg_write(priv, TX_POLL_DEM_REG, 1);      /* poll */
+
+done:
+	spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+	return ret;
+}
+
+static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct qfec_priv        *priv = netdev_priv(dev);
+	struct hwtstamp_config  *cfg  = (struct hwtstamp_config *) ifr;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	if (cmd == SIOCSHWTSTAMP) {
+		CNTR_INC(priv, ts_ioctl);
+		QFEC_LOG(QFEC_LOG_DBG,
+			"%s: SIOCSHWTSTAMP - %x flags  %x tx  %x rx\n",
+			__func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
+
+		cfg->flags      = 0;
+		cfg->tx_type    = HWTSTAMP_TX_ON;
+		cfg->rx_filter  = HWTSTAMP_FILTER_ALL;
+
+		priv->state |= timestamping;
+		qfec_reg_write(priv, TS_CTL_REG,
+			qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
+
+		return 0;
+	}
+
+	return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
+}
+
+static struct net_device_stats *qfec_get_stats(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
+
+	return &priv->stats;
+}
+
+/*
+ * accept new mac address
+ */
+static int qfec_set_mac_address(struct net_device *dev, void *p)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct sockaddr    *addr = p;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	qfec_set_adr_regs(priv, dev->dev_addr);
+
+	return 0;
+}
+
+/*
+ *  read discontinuous MAC address from corrected fuse memory region
+ */
+
+static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
+{
+	static int  offset[] = { 0, 1, 2, 3, 4, 8 };
+	int         n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	for (n = 0; n < nBytes; n++)
+		buf[n] = ioread8(mac_base + offset[n]);
+
+	/* check that MAC programmed  */
+	if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0)  {
+		QFEC_LOG_ERR("%s: null MAC address\n", __func__);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+/*
+ * static definition of driver functions
+ */
+static const struct net_device_ops qfec_netdev_ops = {
+	.ndo_open               = qfec_open,
+	.ndo_stop               = qfec_stop,
+	.ndo_start_xmit         = qfec_xmit,
+
+	.ndo_do_ioctl           = qfec_do_ioctl,
+	.ndo_tx_timeout         = qfec_tx_timeout,
+	.ndo_set_mac_address    = qfec_set_mac_address,
+
+	.ndo_change_mtu         = eth_change_mtu,
+	.ndo_validate_addr      = eth_validate_addr,
+
+	.ndo_get_stats          = qfec_get_stats,
+	.ndo_set_config         = qfec_set_config,
+};
+
+/*
+ * ethtool functions
+ */
+
+static int qfec_nway_reset(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	return mii_nway_restart(&priv->mii);
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static void qfec_ethtool_getpauseparam(struct net_device *dev,
+			struct ethtool_pauseparam *pp)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	u32                flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	u32                advert;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	/* report current settings */
+	pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
+	pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
+
+	/* report if pause is being advertised */
+	advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	pp->autoneg =
+		(advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int qfec_ethtool_setpauseparam(struct net_device *dev,
+			struct ethtool_pauseparam *pp)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	u32                advert;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
+		pp->autoneg, pp->rx_pause, pp->tx_pause);
+
+	advert  =  qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	/* If pause autonegotiation is enabled, but both rx and tx are not
+	 * because neither was specified in the ethtool cmd,
+	 * enable both symetrical and asymetrical pause.
+	 * otherwise, only enable the pause mode indicated by rx/tx.
+	 */
+	if (pp->autoneg)  {
+		if (pp->rx_pause)
+			advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
+		else if (pp->tx_pause)
+			advert |= ADVERTISE_PAUSE_ASYM;
+		else
+			advert |= ADVERTISE_PAUSE_CAP;
+	}
+
+	qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
+
+	return 0;
+}
+
+/*
+ * ethtool ring parameter (-g/G) support
+ */
+
+/*
+ * setringparamam - change the tx/rx ring lengths
+ */
+#define MIN_RING_SIZE	3
+#define MAX_RING_SIZE	1000
+static int qfec_ethtool_setringparam(struct net_device *dev,
+	struct ethtool_ringparam *ring)
+{
+	struct qfec_priv  *priv    = netdev_priv(dev);
+	u32                timeout = 20;
+
+	/* notify stack the link is down */
+	netif_carrier_off(dev);
+
+	/* allow tx to complete & free skbufs on the tx ring */
+	do {
+		usleep_range(10000, 100000);
+		qfec_tx_replenish(dev);
+
+		if (timeout-- == 0)  {
+			QFEC_LOG_ERR("%s: timeout\n", __func__);
+			return -ETIME;
+		}
+	} while (!qfec_ring_empty(&priv->ring_tbd));
+
+
+	qfec_stop(dev);
+
+	/* set tx ring size */
+	if (ring->tx_pending < MIN_RING_SIZE)
+		ring->tx_pending = MIN_RING_SIZE;
+	else if (ring->tx_pending > MAX_RING_SIZE)
+		ring->tx_pending = MAX_RING_SIZE;
+	priv->n_tbd = ring->tx_pending;
+
+	/* set rx ring size */
+	if (ring->rx_pending < MIN_RING_SIZE)
+		ring->rx_pending = MIN_RING_SIZE;
+	else if (ring->rx_pending > MAX_RING_SIZE)
+		ring->rx_pending = MAX_RING_SIZE;
+	priv->n_rbd = ring->rx_pending;
+
+
+	qfec_open(dev);
+
+	return 0;
+}
+
+/*
+ * getringparamam - returns local values
+ */
+static void qfec_ethtool_getringparam(struct net_device *dev,
+	struct ethtool_ringparam *ring)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	ring->rx_max_pending       = MAX_RING_SIZE;
+	ring->rx_mini_max_pending  = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->tx_max_pending       = MAX_RING_SIZE;
+
+	ring->rx_pending           = priv->n_rbd;
+	ring->rx_mini_pending      = 0;
+	ring->rx_jumbo_pending     = 0;
+	ring->tx_pending           = priv->n_tbd;
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static int
+qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	cmd->maxrxpkt = priv->n_rbd;
+	cmd->maxtxpkt = priv->n_tbd;
+
+	return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int
+qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+/*
+ * msg/debug level
+ */
+static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
+{
+	return qfec_debug;
+}
+
+static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+	qfec_debug ^= level;	/* toggle on/off */
+}
+
+/*
+ * register dump
+ */
+#define DMA_DMP_OFFSET  0x0000
+#define DMA_REG_OFFSET  0x1000
+#define DMA_REG_LEN     23
+
+#define MAC_DMP_OFFSET  0x0080
+#define MAC_REG_OFFSET  0x0000
+#define MAC_REG_LEN     55
+
+#define TS_DMP_OFFSET   0x0180
+#define TS_REG_OFFSET   0x0700
+#define TS_REG_LEN      15
+
+#define MDIO_DMP_OFFSET 0x0200
+#define MDIO_REG_LEN    16
+
+#define REG_SIZE    (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
+
+static int qfec_ethtool_getregs_len(struct net_device *dev)
+{
+	return REG_SIZE;
+}
+
+static void
+qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+			 void *buf)
+{
+	struct qfec_priv  *priv   = netdev_priv(dev);
+	u32               *data   = buf;
+	u16               *data16;
+	unsigned int       i;
+	unsigned int       j;
+	unsigned int       n;
+
+	memset(buf, 0, REG_SIZE);
+
+	j = DMA_DMP_OFFSET / sizeof(u32);
+	for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	j = MAC_DMP_OFFSET / sizeof(u32);
+	for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	j = TS_DMP_OFFSET / sizeof(u32);
+	for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
+	for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
+		data16[n++] = htons(qfec_mdio_read(dev, 0, i));
+
+	regs->len     = REG_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
+}
+
+/*
+ * statistics
+ *   return counts of various ethernet activity.
+ *   many of these are same as in struct net_device_stats
+ *
+ *   missed-frames indicates the number of attempts made by the ethernet
+ *      controller to write to a buffer-descriptor when the BD ownership
+ *      bit was not set.   The rxfifooverflow counter (0x1D4) is not
+ *      available.  The Missed Frame and Buffer Overflow Counter register
+ *      (0x1020) is used, but has only 16-bits and is reset when read.
+ *      It is read and updates the value in priv->stats.rx_missed_errors
+ *      in qfec_rx_int().
+ */
+static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
+	"TX good/bad Bytes         ",
+	"TX Bytes                  ",
+	"TX good/bad Frames        ",
+	"TX Bcast Frames           ",
+	"TX Mcast Frames           ",
+	"TX Unicast Frames         ",
+	"TX Pause Frames           ",
+	"TX Vlan Frames            ",
+	"TX Frames 64              ",
+	"TX Frames 65-127          ",
+	"TX Frames 128-255         ",
+	"TX Frames 256-511         ",
+	"TX Frames 512-1023        ",
+	"TX Frames 1024+           ",
+	"TX Pause Frames           ",
+	"TX Collisions             ",
+	"TX Late Collisions        ",
+	"TX Excessive Collisions   ",
+
+	"RX good/bad Bytes         ",
+	"RX Bytes                  ",
+	"RX good/bad Frames        ",
+	"RX Bcast Frames           ",
+	"RX Mcast Frames           ",
+	"RX Unicast Frames         ",
+	"RX Pause Frames           ",
+	"RX Vlan Frames            ",
+	"RX Frames 64              ",
+	"RX Frames 65-127          ",
+	"RX Frames 128-255         ",
+	"RX Frames 256-511         ",
+	"RX Frames 512-1023        ",
+	"RX Frames 1024+           ",
+	"RX Pause Frames           ",
+	"RX Crc error Frames       ",
+	"RX Length error Frames    ",
+	"RX Alignment error Frames ",
+	"RX Runt Frames            ",
+	"RX Oversize Frames        ",
+	"RX Missed Frames          ",
+
+};
+
+static u32 qfec_stats_regs[] =  {
+
+	     69,     89,     70,     71,     72,     90,     92,     93,
+	     73,     74,     75,     76,     77,     78,     92,     84,
+	     86,     87,
+
+	     97,     98,     96,     99,    100,    113,    116,    118,
+	    107,    108,    109,    110,    111,    112,    116,    101,
+	    114,    102,    103,    106
+};
+
+static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct qfec_priv  *priv = netdev_priv(to_net_dev(dev));
+	int                count = PAGE_SIZE;
+	int                l     = 0;
+	int                n;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)  {
+		l += snprintf(&buf[l], count - l, "      %12u  %s\n",
+			qfec_reg_read(priv,
+				qfec_stats_regs[n] * sizeof(uint32_t)),
+			qfec_stats_strings[n]);
+	}
+
+	return l;
+}
+
+static int qfec_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(qfec_stats_regs) + 1;	/* missed frames */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
+		u8 *buf)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
+		sizeof(qfec_stats_strings));
+
+	memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
+}
+
+static void qfec_ethtool_getstats(struct net_device *dev,
+		struct ethtool_stats *stats, uint64_t *data)
+{
+	struct qfec_priv        *priv = netdev_priv(dev);
+	int                      j = 0;
+	int                      n;
+
+	for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
+		data[j++] = qfec_reg_read(priv,
+				qfec_stats_regs[n] * sizeof(uint32_t));
+
+	data[j++] = priv->stats.rx_missed_errors;
+
+	stats->n_stats = j;
+}
+
+static void qfec_ethtool_getdrvinfo(struct net_device *dev,
+					struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver,  QFEC_NAME,    sizeof(info->driver));
+	strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(dev->dev.parent),
+		sizeof(info->bus_info));
+
+	info->eedump_len  = 0;
+	info->regdump_len = qfec_ethtool_getregs_len(dev);
+}
+
+/*
+ * ethtool ops table
+ */
+static const struct ethtool_ops qfec_ethtool_ops = {
+	.nway_reset         = qfec_nway_reset,
+
+	.get_settings       = qfec_ethtool_getsettings,
+	.set_settings       = qfec_ethtool_setsettings,
+	.get_link           = ethtool_op_get_link,
+	.get_drvinfo        = qfec_ethtool_getdrvinfo,
+	.get_msglevel       = qfec_ethtool_getmsglevel,
+	.set_msglevel       = qfec_ethtool_setmsglevel,
+	.get_regs_len       = qfec_ethtool_getregs_len,
+	.get_regs           = qfec_ethtool_getregs,
+
+	.get_ringparam      = qfec_ethtool_getringparam,
+	.set_ringparam      = qfec_ethtool_setringparam,
+
+	.get_pauseparam     = qfec_ethtool_getpauseparam,
+	.set_pauseparam     = qfec_ethtool_setpauseparam,
+
+	.get_sset_count     = qfec_get_sset_count,
+	.get_strings        = qfec_ethtool_getstrings,
+	.get_ethtool_stats  = qfec_ethtool_getstats,
+};
+
+/*
+ *  create sysfs entries
+ */
+static DEVICE_ATTR(bd_tx,   0444, qfec_bd_tx_show,   NULL);
+static DEVICE_ATTR(bd_rx,   0444, qfec_bd_rx_show,   NULL);
+static DEVICE_ATTR(cfg,     0444, qfec_config_show,  NULL);
+static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
+static DEVICE_ATTR(cntrs,   0444, qfec_cntrs_show,   NULL);
+static DEVICE_ATTR(stats,   0444, qfec_stats_show,   NULL);
+static DEVICE_ATTR(reg,     0444, qfec_reg_show,     NULL);
+
+static void qfec_sysfs_create(struct net_device *dev)
+{
+	if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
+		device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
+		device_create_file(&(dev->dev), &dev_attr_cfg) ||
+		device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
+		device_create_file(&(dev->dev), &dev_attr_cntrs) ||
+		device_create_file(&(dev->dev), &dev_attr_reg) ||
+		device_create_file(&(dev->dev), &dev_attr_stats))
+		pr_err("qfec_sysfs_create failed to create sysfs files\n");
+}
+
+/*
+ * map a specified resource
+ */
+static int qfec_map_resource(struct platform_device *plat, int resource,
+	struct resource **priv_res,
+	void                   **addr)
+{
+	struct resource         *res;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
+
+	/* allocate region to access controller registers */
+	*priv_res = res = platform_get_resource(plat, resource, 0);
+	if (!res) {
+		QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
+		return -ENODEV;
+	}
+
+	res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
+	if (!res) {
+		QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
+			__func__, res->start, res->end - res->start);
+		return -EBUSY;
+	}
+
+	*addr = ioremap(res->start, res->end - res->start);
+	if (!*addr)
+		return -ENOMEM;
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
+		__func__, (void *)res->start, *addr);
+
+	return 0;
+};
+
+/*
+ * free allocated io regions
+ */
+static void qfec_free_res(struct resource *res, void *base)
+{
+
+	if (res)  {
+		if (base)
+			iounmap((void __iomem *)base);
+
+		release_mem_region(res->start, res->end - res->start);
+	}
+};
+
+/*
+ * probe function that obtain configuration info and allocate net_device
+ */
+static int __devinit qfec_probe(struct platform_device *plat)
+{
+	struct net_device  *dev;
+	struct qfec_priv   *priv;
+	int                 ret = 0;
+
+	/* allocate device */
+	dev = alloc_etherdev(sizeof(struct qfec_priv));
+	if (!dev) {
+		QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n",      __func__, (int)dev);
+
+	qfec_dev = dev;
+	SET_NETDEV_DEV(dev, &plat->dev);
+
+	dev->netdev_ops      = &qfec_netdev_ops;
+	dev->ethtool_ops     = &qfec_ethtool_ops;
+	dev->watchdog_timeo  = 2 * HZ;
+	dev->irq             = platform_get_irq(plat, 0);
+
+	dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+	/* initialize private data */
+	priv = (struct qfec_priv *)netdev_priv(dev);
+	memset((void *)priv, 0, sizeof(priv));
+
+	priv->net_dev   = dev;
+	platform_set_drvdata(plat, dev);
+
+	priv->n_tbd     = TX_BD_NUM;
+	priv->n_rbd     = RX_BD_NUM;
+
+	/* initialize phy structure */
+	priv->mii.phy_id_mask   = 0x1F;
+	priv->mii.reg_num_mask  = 0x1F;
+	priv->mii.dev           = dev;
+	priv->mii.mdio_read     = qfec_mdio_read;
+	priv->mii.mdio_write    = qfec_mdio_write;
+
+	/* map register regions */
+	ret = qfec_map_resource(
+		plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
+		goto err1;
+	}
+
+	ret = qfec_map_resource(
+		plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
+		goto err2;
+	}
+
+	ret = qfec_map_resource(
+		plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
+		goto err3;
+	}
+
+	/* initialize MAC addr */
+	ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
+		MAC_ADDR_SIZE);
+	if (ret)
+		goto err4;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: mac  %02x:%02x:%02x:%02x:%02x:%02x\n",
+		__func__,
+		dev->dev_addr[0], dev->dev_addr[1],
+		dev->dev_addr[2], dev->dev_addr[3],
+		dev->dev_addr[4], dev->dev_addr[5]);
+
+	ret = register_netdev(dev);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
+		goto err4;
+	}
+
+	spin_lock_init(&priv->mdio_lock);
+	spin_lock_init(&priv->xmit_lock);
+	qfec_sysfs_create(dev);
+
+	return 0;
+
+	/* error handling */
+err4:
+	qfec_free_res(priv->fuse_res, priv->fuse_base);
+err3:
+	qfec_free_res(priv->clk_res, priv->clk_base);
+err2:
+	qfec_free_res(priv->mac_res, priv->mac_base);
+err1:
+	free_netdev(dev);
+err:
+	QFEC_LOG_ERR("%s: err\n", __func__);
+	return ret;
+}
+
+/*
+ * module remove
+ */
+static int __devexit qfec_remove(struct platform_device *plat)
+{
+	struct net_device  *dev  = platform_get_drvdata(plat);
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	platform_set_drvdata(plat, NULL);
+
+	qfec_free_res(priv->fuse_res, priv->fuse_base);
+	qfec_free_res(priv->clk_res, priv->clk_base);
+	qfec_free_res(priv->mac_res, priv->mac_base);
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+
+	return 0;
+}
+
+/*
+ * module support
+ *     the FSM9xxx is not a mobile device does not support power management
+ */
+
+static struct platform_driver qfec_driver = {
+	.probe  = qfec_probe,
+	.remove = __devexit_p(qfec_remove),
+	.driver = {
+		.name   = QFEC_NAME,
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*
+ * module init
+ */
+static int __init qfec_init_module(void)
+{
+	int  res;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
+
+	res = platform_driver_register(&qfec_driver);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
+		__func__, res);
+
+	return  res;
+}
+
+/*
+ * module exit
+ */
+static void __exit qfec_exit_module(void)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	platform_driver_unregister(&qfec_driver);
+}
+
+MODULE_DESCRIPTION("FSM Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_VERSION("1.0");
+
+module_init(qfec_init_module);
+module_exit(qfec_exit_module);
diff --git a/drivers/net/qfec.h b/drivers/net/qfec.h
new file mode 100644
index 0000000..6328804
--- /dev/null
+++ b/drivers/net/qfec.h
@@ -0,0 +1,793 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* qualcomm fast Ethernet controller HW description */
+
+#ifndef _QFEC_EMAC_H_
+# define _QFEC_EMAC_H_
+
+# ifndef __KERNEL__
+#   include "stdint.h"
+# endif
+
+# define MskBits(nBits, pos)     (((1 << nBits)-1)<<pos)
+
+/* Rx/Tx Ethernet Buffer Descriptors
+ *     status contains the ownership, status and receive length bits
+ *     ctl    contains control and size bits for two buffers
+ *     p_buf  contains a ptr to the data buffer
+ *            MAC writes timestamp low into p_buf
+ *     next   contains either ptr to 2nd buffer or next buffer-desc
+ *            MAC writes timestamp high into next
+ *
+ *     status/ctl bit definition depend on RX or TX usage
+ */
+
+
+struct qfec_buf_desc {
+	uint32_t            status;
+	uint32_t            ctl;
+	void               *p_buf;
+	void               *next;
+};
+
+/* ownership bit operations */
+# define BUF_OWN                     0x80000000 /* DMA owns buffer */
+# define BUF_OWN_DMA                 BUF_OWN
+
+/* RX buffer status bits */
+# define BUF_RX_AFM               0x40000000 /* dest addr filt fail */
+
+# define BUF_RX_FL                0x3fff0000 /* frame length */
+# define BUF_RX_FL_GET(p)         ((p.status & BUF_RX_FL) >> 16)
+# define BUF_RX_FL_SET(p, x) \
+	(p.status = (p.status & ~BUF_RX_FL) | ((x << 16) & BUF_RX_FL))
+# define BUF_RX_FL_GET_FROM_STATUS(status) \
+				  (((status) & BUF_RX_FL) >> 16)
+
+# define BUF_RX_ES                0x00008000 /* error summary */
+# define BUF_RX_DE                0x00004000 /* error descriptor (es) */
+# define BUF_RX_SAF               0x00002000 /* source addr filt fail */
+# define BUF_RX_LE                0x00001000 /* length error */
+
+# define BUF_RX_OE                0x00000800 /* overflow error (es) */
+# define BUF_RX_VLAN              0x00000400 /* vlan tag */
+# define BUF_RX_FS                0x00000200 /* first descriptor */
+# define BUF_RX_LS                0x00000100 /* last  descriptor */
+
+# define BUF_RX_IPC               0x00000080 /* cksum-err/giant-frame (es) */
+# define BUF_RX_LC                0x00000040 /* late collision (es) */
+# define BUF_RX_FT                0x00000020 /* frame type */
+# define BUF_RX_RWT               0x00000010 /* rec watchdog timeout (es) */
+
+# define BUF_RX_RE                0x00000008 /* rec error (es) */
+# define BUF_RX_DBE               0x00000004 /* dribble bit err */
+# define BUF_RX_CE                0x00000002 /* crc err (es) */
+# define BUF_RX_CSE               0x00000001 /* checksum err */
+
+# define BUF_RX_ERRORS  \
+	(BUF_RX_DE  | BUF_RX_SAF | BUF_RX_LE  | BUF_RX_OE \
+	| BUF_RX_IPC | BUF_RX_LC  | BUF_RX_RWT | BUF_RX_RE \
+	| BUF_RX_DBE | BUF_RX_CE  | BUF_RX_CSE)
+
+/* RX buffer control bits */
+# define BUF_RX_DI                0x80000000 /* disable intrp on compl */
+# define BUF_RX_RER               0x02000000 /* rec end of ring */
+# define BUF_RX_RCH               0x01000000 /* 2nd addr chained */
+
+# define BUF_RX_SIZ2              0x003ff800 /* buffer 2 size */
+# define BUF_RX_SIZ2_GET(p)       ((p.control&BUF_RX_SIZ2) >> 11)
+
+# define BUF_RX_SIZ               0x000007ff /* rx buf 1 size */
+# define BUF_RX_SIZ_GET(p)        (p.ctl&BUF_RX_SIZ)
+
+/* TX buffer status bits */
+# define BUF_TX_TTSS              0x00020000 /* time stamp status */
+# define BUF_TX_IHE               0x00010000 /* IP hdr err */
+
+# define BUF_TX_ES                0x00008000 /* error summary */
+# define BUF_TX_JT                0x00004000 /* jabber timeout (es) */
+# define BUF_TX_FF                0x00002000 /* frame flushed (es) */
+# define BUF_TX_PCE               0x00001000 /* payld cksum err */
+
+# define BUF_TX_LOC               0x00000800 /* loss carrier (es) */
+# define BUF_TX_NC                0x00000400 /* no carrier (es) */
+# define BUF_TX_LC                0x00000200 /* late collision (es) */
+# define BUF_TX_EC                0x00000100 /* excessive collision (es) */
+
+# define BUF_TX_VLAN              0x00000080 /* VLAN frame */
+# define BUF_TX_CC                MskBits(4, 3) /* collision count */
+# define BUF_TX_CC_GET(p)         ((p.status&BUF_TX_CC)>>3)
+
+# define BUF_TX_ED                0x00000004 /* excessive deferral (es) */
+# define BUF_TX_UF                0x00000002 /* underflow err (es) */
+# define BUF_TX_DB                0x00000001 /* deferred bit */
+
+/* TX buffer control bits */
+# define BUF_TX_IC                0x80000000 /* intrpt on compl */
+# define BUF_TX_LS                0x40000000 /* last segment */
+# define BUF_TX_FS                0x20000000 /* first segment */
+# define BUF_TX_CIC               0x18000000 /* cksum insert control */
+# define BUF_TX_CIC_SET(n)        (BUF_TX_CIC&(n<<27))
+
+# define BUF_TX_DC                0x04000000 /* disable CRC */
+# define BUF_TX_TER               0x02000000 /* end of ring */
+# define BUF_TX_TCH               0x01000000 /* 2nd addr chained */
+
+# define BUF_TX_DP                0x00800000 /* disable padding */
+# define BUF_TX_TTSE              0x00400000 /* timestamp enable */
+
+# define BUF_TX_SIZ2              0x003ff800 /* buffer 2 size */
+# define BUF_TX_SIZ2_SET(n)       (BUF_TX_SIZ2(n<<11))
+
+# define BUF_TX_SIZ               0x000007ff /* buffer 1 size */
+# define BUF_TX_SIZ_SET(n)        (BUF_TX_SI1 & n)
+
+
+/* Ethernet Controller Registers */
+# define BUS_MODE_REG             0x1000
+
+# define BUS_MODE_MB              0x04000000  /* mixed burst */
+# define BUS_MODE_AAL             0x02000000  /* address alignment beats */
+# define BUS_MODE_8XPBL           0x01000000  /*  */
+
+# define BUS_MODE_USP             0x00800000  /* use separate PBL */
+# define BUS_MODE_RPBL            0x007e0000  /* rxDMA PBL */
+# define BUS_MODE_FB              0x00010000  /* fixed burst */
+
+# define BUS_MODE_PR              0x0000c000  /* tx/rx priority */
+# define BUS_MODE_PR4             0x0000c000  /* tx/rx priority 4:1 */
+# define BUS_MODE_PR3             0x00008000  /* tx/rx priority 3:1 */
+# define BUS_MODE_PR2             0x00004000  /* tx/rx priority 2:1 */
+# define BUS_MODE_PR1             0x00000000  /* tx/rx priority 1:1 */
+
+# define BUS_MODE_PBL             0x00003f00  /* programmable burst length */
+# define BUS_MODE_PBLSET(n)       (BUS_MODE_PBL&(n<<8))
+
+# define BUS_MODE_DSL             0x0000007c  /* descriptor skip length */
+# define BUS_MODE_DSL_SET(n)      (BUS_MODE_DSL & (n << 2))
+
+# define BUS_MODE_DA              0x00000002  /* DMA arbitration scheme  */
+# define BUS_MODE_SWR             0x00000001  /* software reset */
+
+#define BUS_MODE_REG_DEFAULT     (BUS_MODE_FB \
+				| BUS_MODE_AAL \
+				| BUS_MODE_PBLSET(16) \
+				| BUS_MODE_DA \
+				| BUS_MODE_DSL_SET(0))
+
+# define TX_POLL_DEM_REG          0x1004      /* transmit poll demand */
+# define RX_POLL_DEM_REG          0x1008      /* receive poll demand */
+
+# define RX_DES_LST_ADR_REG       0x100c      /* receive buffer descriptor */
+# define TX_DES_LST_ADR_REG       0x1010      /* transmit buffer descriptor */
+
+# define STATUS_REG               0x1014
+
+# define STATUS_REG_RSVRD_1       0xc0000000  /* reserved */
+# define STATUS_REG_TTI           0x20000000  /* time-stamp trigger intrpt */
+# define STATUS_REG_GPI           0x10000000  /* gmac PMT interrupt */
+
+# define STATUS_REG_GMI           0x08000000  /* gmac MMC interrupt */
+# define STATUS_REG_GLI           0x04000000  /* gmac line interface intrpt */
+
+# define STATUS_REG_EB            0x03800000  /* error bits */
+# define STATUS_REG_EB_DATA       0x00800000  /* error during data transfer */
+# define STATUS_REG_EB_RDWR       0x01000000  /* error during rd/wr transfer */
+# define STATUS_REG_EB_DESC       0x02000000  /* error during desc access */
+
+# define STATUS_REG_TS            0x00700000  /* transmit process state */
+
+# define STATUS_REG_TS_STOP       0x00000000  /*   stopped */
+# define STATUS_REG_TS_FETCH_DESC 0x00100000  /*   fetching descriptor */
+# define STATUS_REG_TS_WAIT       0x00200000  /*   waiting for status */
+# define STATUS_REG_TS_READ       0x00300000  /*   reading host memory */
+# define STATUS_REG_TS_TIMESTAMP  0x00400000  /*   timestamp write status */
+# define STATUS_REG_TS_RSVRD      0x00500000  /*   reserved */
+# define STATUS_REG_TS_SUSPEND    0x00600000  /*   desc-unavail/buffer-unflw */
+# define STATUS_REG_TS_CLOSE      0x00700000  /*   closing desc */
+
+# define STATUS_REG_RS            0x000e0000  /* receive process state */
+
+# define STATUS_REG_RS_STOP       0x00000000  /*   stopped */
+# define STATUS_REG_RS_FETCH_DESC 0x00020000  /*   fetching descriptor */
+# define STATUS_REG_RS_RSVRD_1    0x00040000  /*   reserved */
+# define STATUS_REG_RS_WAIT       0x00060000  /*   waiting for packet */
+# define STATUS_REG_RS_SUSPEND    0x00080000  /*   desc unavail */
+# define STATUS_REG_RS_CLOSE      0x000a0000  /*   closing desc */
+# define STATUS_REG_RS_TIMESTAMP  0x000c0000  /*   timestamp write status */
+# define STATUS_REG_RS_RSVRD_2    0x000e0000  /*   writing host memory */
+
+# define STATUS_REG_NIS           0x00010000  /* normal intrpt   14|6|2|0 */
+# define STATUS_REG_AIS           0x00008000  /* intrpts 13|10|9|8|7|5|4|3|1 */
+
+# define STATUS_REG_ERI           0x00004000  /* early receive interrupt */
+# define STATUS_REG_FBI           0x00002000  /* fatal bus error interrupt */
+# define STATUS_REG_RSVRD_2       0x00001800  /* reserved */
+
+# define STATUS_REG_ETI           0x00000400  /* early transmit interrupt */
+# define STATUS_REG_RWT           0x00000200  /* receive watchdog timeout */
+# define STATUS_REG_RPS           0x00000100  /* receive process stopped */
+
+# define STATUS_REG_RU            0x00000080  /* receive buffer unavailable */
+# define STATUS_REG_RI            0x00000040  /* receive interrupt */
+# define STATUS_REG_UNF           0x00000020  /* transmit underflow */
+# define STATUS_REG_OVF           0x00000010  /* receive overflow */
+
+# define STATUS_REG_TJT           0x00000008  /* transmit jabber timeout */
+# define STATUS_REG_TU            0x00000004  /* transmit buffer unavailable */
+# define STATUS_REG_TPS           0x00000002  /* transmit process stopped */
+# define STATUS_REG_TI            0x00000001  /* transmit interrupt */
+
+# define STATUS_REG_AIS_BITS    (STATUS_REG_FBI | STATUS_REG_ETI \
+				| STATUS_REG_RWT | STATUS_REG_RPS \
+				| STATUS_REG_RU | STATUS_REG_UNF \
+				| STATUS_REG_OVF | STATUS_REG_TJT \
+				| STATUS_REG_TPS | STATUS_REG_AIS)
+
+# define OPER_MODE_REG             0x1018
+
+# define OPER_MODE_REG_DT          0x04000000 /* disab drop ip cksum err fr */
+# define OPER_MODE_REG_RSF         0x02000000 /* rec store and forward */
+# define OPER_MODE_REG_DFF         0x01000000 /* disable flush of rec frames */
+
+# define OPER_MODE_REG_RFA2        0x00800000 /* thresh MSB for act flow-ctl */
+# define OPER_MODE_REG_RFD2        0x00400000 /* thresh MSB deAct flow-ctl */
+# define OPER_MODE_REG_TSF         0x00200000 /* tx store and forward */
+# define OPER_MODE_REG_FTF         0x00100000 /* flush tx FIFO */
+
+# define OPER_MODE_REG_RSVD1       0x000e0000 /* reserved */
+# define OPER_MODE_REG_TTC         0x0001c000 /* transmit threshold control */
+# define OPER_MODE_REG_TTC_SET(x)  (OPER_MODE_REG_TTC & (x << 14))
+# define OPER_MODE_REG_ST          0x00002000 /* start/stop transmission cmd */
+
+# define OPER_MODE_REG_RFD         0x00001800 /* thresh for deAct flow-ctl */
+# define OPER_MODE_REG_RFA         0x00000600 /* threshold for act flow-ctl */
+# define OPER_MODE_REG_EFC         0x00000100 /* enable HW flow-ctl */
+
+# define OPER_MODE_REG_FEF         0x00000080 /* forward error frames */
+# define OPER_MODE_REG_FUF         0x00000040 /* forward undersize good fr */
+# define OPER_MODE_REG_RSVD2       0x00000020 /* reserved */
+# define OPER_MODE_REG_RTC         0x00000018 /* receive threshold control */
+# define OPER_MODE_REG_RTC_SET(x)  (OPER_MODE_REG_RTC & (x << 3))
+
+# define OPER_MODE_REG_OSF         0x00000004 /* operate on second frame */
+# define OPER_MODE_REG_SR          0x00000002 /* start/stop receive */
+# define OPER_MODE_REG_RSVD3       0x00000001 /* reserved */
+
+
+#define OPER_MODE_REG_DEFAULT    (OPER_MODE_REG_RSF \
+				| OPER_MODE_REG_TSF \
+				| OPER_MODE_REG_TTC_SET(5) \
+				| OPER_MODE_REG_RTC_SET(1) \
+				| OPER_MODE_REG_OSF)
+
+# define INTRP_EN_REG              0x101c
+
+# define INTRP_EN_REG_RSVD1        0xfffc0000 /* */
+# define INTRP_EN_REG_NIE          0x00010000 /* normal intrpt summ enable */
+
+# define INTRP_EN_REG_AIE          0x00008000 /* abnormal intrpt summary en */
+# define INTRP_EN_REG_ERE          0x00004000 /* early receive intrpt enable */
+# define INTRP_EN_REG_FBE          0x00002000 /* fatal bus error enable */
+
+# define INTRP_EN_REG_RSVD2        0x00001800 /* */
+
+# define INTRP_EN_REG_ETE          0x00000400 /* early tx intrpt enable */
+# define INTRP_EN_REG_RWE          0x00000200 /* rx watchdog timeout enable */
+# define INTRP_EN_REG_RSE          0x00000100 /* rx stopped enable */
+
+# define INTRP_EN_REG_RUE          0x00000080 /* rx buf unavailable enable */
+# define INTRP_EN_REG_RIE          0x00000040 /* rx interrupt enable */
+# define INTRP_EN_REG_UNE          0x00000020 /* underflow interrupt enable */
+# define INTRP_EN_REG_OVE          0x00000010 /* overflow interrupt enable */
+
+# define INTRP_EN_REG_TJE          0x00000008 /* tx jabber timeout enable */
+# define INTRP_EN_REG_TUE          0x00000004 /* tx buf unavailable enable */
+# define INTRP_EN_REG_TSE          0x00000002 /* tx stopped enable */
+# define INTRP_EN_REG_TIE          0x00000001 /* tx interrupt enable */
+
+# define INTRP_EN_REG_All          (~(INTRP_EN_REG_RSVD1))
+
+# define MIS_FR_REG                0x1020
+
+# define MIS_FR_REG_FIFO_OVFL      0x10000000  /* fifo overflow */
+# define MIS_FR_REG_FIFO_CNT       0x0FFE0000  /* fifo cnt */
+
+# define MIS_FR_REG_MISS_OVFL      0x00010000  /* missed-frame overflow */
+# define MIS_FR_REG_MISS_CNT       0x0000FFFF  /* missed-frame cnt */
+
+# define RX_INTRP_WTCHDOG_REG      0x1024
+# define AXI_BUS_MODE_REG          0x1028
+
+# define AXI_BUS_MODE_EN_LPI       0x80000000  /* enable low power interface */
+# define AXI_BUS_MODE_UNLK_MGC_PKT 0x40000000  /* unlock-magic-pkt/rem-wk-up */
+# define AXI_BUS_MODE_WR_OSR_LMT   0x00F00000  /* max wr out stndg req limit */
+# define AXI_BUS_MODE_RD_OSR_LMT   0x000F0000  /* max rd out stndg req limit */
+# define AXI_BUS_MODE_AXI_AAL      0x00001000  /* address aligned beats */
+# define AXI_BUS_MODE_BLEN256      0x00000080  /* axi burst length 256 */
+# define AXI_BUS_MODE_BLEN128      0x00000040  /* axi burst length 128 */
+# define AXI_BUS_MODE_BLEN64       0x00000020  /* axi burst length 64  */
+# define AXI_BUS_MODE_BLEN32       0x00000010  /* axi burst length 32  */
+# define AXI_BUS_MODE_BLEN16       0x00000008  /* axi burst length 16  */
+# define AXI_BUS_MODE_BLEN8        0x00000004  /* axi burst length 8   */
+# define AXI_BUS_MODE_BLEN4        0x00000002  /* axi burst length 4   */
+# define AXI_BUS_MODE_UNDEF        0x00000001  /* axi undef burst length */
+
+#define AXI_BUS_MODE_DEFAULT     (AXI_BUS_MODE_WR_OSR_LMT \
+				| AXI_BUS_MODE_RD_OSR_LMT \
+				| AXI_BUS_MODE_BLEN16 \
+				| AXI_BUS_MODE_BLEN8 \
+				| AXI_BUS_MODE_BLEN4)
+
+# define AXI_STATUS_REG            0x102c
+
+/*     0x1030-0x1044 reserved */
+# define CUR_HOST_TX_DES_REG       0x1048
+# define CUR_HOST_RX_DES_REG       0x104c
+# define CUR_HOST_TX_BU_ADR_REG    0x1050
+# define CUR_HOST_RX_BU_ADR_REG    0x1054
+
+# define HW_FEATURE_REG            0x1058
+
+# define MAC_CONFIG_REG            0x0000
+
+# define MAC_CONFIG_REG_RSVD1      0xf8000000 /* */
+
+# define MAC_CONFIG_REG_SFTERR     0x04000000 /* smii force tx error */
+# define MAC_CONFIG_REG_CST        0x02000000 /* crc strip for type frame */
+# define MAC_CONFIG_REG_TC         0x01000000 /* tx cfg in rgmii/sgmii/smii */
+
+# define MAC_CONFIG_REG_WD         0x00800000 /* watchdog disable */
+# define MAC_CONFIG_REG_JD         0x00400000 /* jabber disable */
+# define MAC_CONFIG_REG_BE         0x00200000 /* frame burst enable */
+# define MAC_CONFIG_REG_JE         0x00100000 /* jumbo frame enable */
+
+# define MAC_CONFIG_REG_IFG        0x000e0000 /* inter frame gap, 96-(8*n) */
+# define MAC_CONFIG_REG_DCRS       0x00010000 /* dis carrier sense during tx */
+
+# define MAC_CONFIG_REG_PS         0x00008000 /* port select: 0/1 g/(10/100) */
+# define MAC_CONFIG_REG_FES        0x00004000 /* speed 100 mbps */
+# define MAC_CONFIG_REG_SPD        (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_1G     (0)
+# define MAC_CONFIG_REG_SPD_100    (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_10     (MAC_CONFIG_REG_PS)
+# define MAC_CONFIG_REG_SPD_SET(x) (MAC_CONFIG_REG_PS_FES & (x << 14))
+
+# define MAC_CONFIG_REG_DO         0x00002000 /* disable receive own */
+# define MAC_CONFIG_REG_LM         0x00001000 /* loopback mode */
+
+# define MAC_CONFIG_REG_DM         0x00000800 /* (full) duplex mode */
+# define MAC_CONFIG_REG_IPC        0x00000400 /* checksum offload */
+# define MAC_CONFIG_REG_DR         0x00000200 /* disable retry */
+# define MAC_CONFIG_REG_LUD        0x00000100 /* link up/down */
+
+# define MAC_CONFIG_REG_ACS        0x00000080 /* auto pad/crc stripping */
+# define MAC_CONFIG_REG_BL         0x00000060 /* back-off limit */
+# define MAC_CONFIG_REG_BL_10      0x00000000 /*          10 */
+# define MAC_CONFIG_REG_BL_8       0x00000020 /*          8  */
+# define MAC_CONFIG_REG_BL_4       0x00000040 /*          4  */
+# define MAC_CONFIG_REG_BL_1       0x00000060 /*          1  */
+# define MAC_CONFIG_REG_DC         0x00000010 /* deferral check */
+
+# define MAC_CONFIG_REG_TE         0x00000008 /* transmitter enable */
+# define MAC_CONFIG_REG_RE         0x00000004 /* receiver enable */
+# define MAC_CONFIG_REG_RSVD2      0x00000003 /* */
+
+# define MAC_FR_FILTER_REG         0x0004
+
+# define MAC_FR_FILTER_RA          0x80000000 /* receive all */
+
+# define MAC_FR_FILTER_HPF         0x00000400 /* hash or perfect filter */
+# define MAC_FR_FILTER_SAF         0x00000200 /* source addr filt en */
+# define MAC_FR_FILTER_SAIF        0x00000100 /* SA inverse filter */
+# define MAC_FR_FILTER_PCF_MASK    0x000000c0 /* pass control frames */
+# define MAC_FR_FILTER_PCF_0       0x00000000 /*    */
+# define MAC_FR_FILTER_PCF_1       0x00000040 /*    */
+# define MAC_FR_FILTER_PCF_2       0x00000080 /*    */
+# define MAC_FR_FILTER_PCF_3       0x000000c0 /*    */
+# define MAC_FR_FILTER_DBF         0x00000020 /* disable broadcast frames */
+# define MAC_FR_FILTER_PM          0x00000010 /* pass all multicast */
+# define MAC_FR_FILTER_DAIF        0x00000008 /* DA inverse filtering */
+# define MAC_FR_FILTER_HMC         0x00000004 /* hash multicast */
+# define MAC_FR_FILTER_HUC         0x00000002 /* hash unicast */
+# define MAC_FR_FILTER_PR          0x00000001 /* promiscuous mode */
+
+# define HASH_TABLE_HIGH_REG       0x0008
+# define HASH_TABLE_LOW_REG        0x000c
+
+# define GMII_ADR_REG              0x0010
+
+# define GMII_ADR_REG_PA           0x0000f800 /* addr bits */
+# define GMII_ADR_REG_GR           0x000007c0 /* addr bits */
+# define GMII_ADR_REG_RSVRD1       0x00000020 /* */
+# define GMII_ADR_REG_CR           0x0000001c /* csr clock range */
+# define GMII_ADR_REG_GW           0x00000002 /* gmii write */
+# define GMII_ADR_REG_GB           0x00000001 /* gmii busy */
+
+# define GMII_ADR_REG_ADR_SET(x)    (GMII_ADR_REG_PA & (x << 11))
+# define GMII_ADR_REG_ADR_GET(x)    ((x & GMII_ADR_REG_PA) >> 11)
+
+# define GMII_ADR_REG_REG_SET(x)    (GMII_ADR_REG_GR & (x << 6))
+# define GMII_ADR_REG_REG_GET(x)    (((x & GMII_ADR_REG_GR) >> 6)
+
+# define GMII_ADR_REG_CSR_SET(x)    (GMII_ADR_REG_CR & (x << 2))
+# define GMII_ADR_REG_CSR_GET(x)    (((x & GMII_ADR_REG_CR) >> 2)
+
+# define GMII_DATA_REG             0x0014
+
+# define GMII_DATA_REG_DATA        0x0000ffff /* gmii data */
+
+# define FLOW_CONTROL_REG          0x0018
+
+# define FLOW_CONTROL_PT           0xFFFF0000 /* pause time */
+# define FLOW_CONTROL_DZPQ         0x00000080 /* disable zero-quanta pause */
+# define FLOW_CONTROL_PLT          0x00000030 /* pause level threshold */
+
+# define FLOW_CONTROL_UP           0x00000008 /* unicast pause frame detect */
+# define FLOW_CONTROL_RFE          0x00000004 /* receive flow control enable */
+# define FLOW_CONTROL_TFE          0x00000002 /* transmit flow control enable */
+# define FLOW_CONTROL_FCB          0x00000001 /* flow control busy (BPA) */
+
+# define VLAN_TAG_REG              0x001c
+
+# define VERSION_REG               0x0020
+
+/* don't define these until HW if finished */
+/* # define VERSION_USER              0x10 */
+/* # define VERSION_QFEC              0x36 */
+
+# define VERSION_REG_USER(x)       (0xFF & (x >> 8))
+# define VERSION_REG_QFEC(x)       (0xFF & x)
+
+# define DEBUG_REG                 0x0024
+
+# define DEBUG_REG_RSVD1           0xfc000000 /* */
+# define DEBUG_REG_TX_FIFO_FULL    0x02000000 /* Tx fifo full */
+# define DEBUG_REG_TX_FIFO_NEMP    0x01000000 /* Tx fifo not empty */
+
+# define DEBUG_REG_RSVD2           0x00800000 /* */
+# define DEBUG_REG_TX_WR_ACTIVE    0x00400000 /* Tx fifo write ctrl active */
+
+# define DEBUG_REG_TX_RD_STATE     0x00300000 /* Tx fifo rd ctrl state */
+# define DEBUG_REG_TX_RD_IDLE      0x00000000 /*         idle */
+# define DEBUG_REG_TX_RD_WAIT      0x00100000 /*         waiting for status */
+# define DEBUG_REG_TX_RD_PASUE     0x00200000 /*         generating pause */
+# define DEBUG_REG_TX_RD_WRTG      0x00300000 /*         wr stat flush fifo */
+
+# define DEBUG_REG_TX_PAUSE        0x00080000 /* Tx in pause condition */
+
+# define DEBUG_REG_TX_CTRL_STATE   0x00060000 /* Tx frame controller state */
+# define DEBUG_REG_TX_CTRL_IDLE    0x00090000 /*         idle */
+# define DEBUG_REG_TX_CTRL_WAIT    0x00020000 /*         waiting for status*/
+# define DEBUG_REG_TX_CTRL_PAUSE   0x00040000 /*         generating pause */
+# define DEBUG_REG_TX_CTRL_XFER    0x00060000 /*         transferring input */
+
+# define DEBUG_REG_TX_ACTIVE       0x00010000 /* Tx actively transmitting */
+# define DEBUG_REG_RSVD3           0x0000fc00 /* */
+
+# define DEBUG_REG_RX_STATE        0x00000300 /* Rx fifo state */
+# define DEBUG_REG_RX_EMPTY        0x00000000 /*         empty */
+# define DEBUG_REG_RX_LOW          0x00000100 /*         below threshold */
+# define DEBUG_REG_RX_HIGH         0x00000200 /*         above threshold */
+# define DEBUG_REG_RX_FULL         0x00000300 /*         full */
+
+# define DEBUG_REG_RSVD4           0x00000080 /* */
+
+# define DEBUG_REG_RX_RD_STATE     0x00000060 /* Rx rd ctrl state */
+# define DEBUG_REG_RX_RD_IDLE      0x00000000 /*         idle */
+# define DEBUG_REG_RX_RD_RDG_FR    0x00000020 /*         reading frame data */
+# define DEBUG_REG_RX_RD_RDG_STA   0x00000040 /*         reading status */
+# define DEBUG_REG_RX_RD_FLUSH     0x00000060 /*         flush fr data/stat */
+
+# define DEBUG_REG_RX_ACTIVE       0x00000010 /* Rx wr ctlr active */
+
+# define DEBUG_REG_RSVD5           0x00000008 /* */
+# define DEBUG_REG_SM_FIFO_RW_STA  0x00000006 /* small fifo rd/wr state */
+# define DEBUG_REG_RX_RECVG        0x00000001 /* Rx actively receiving data */
+
+# define REM_WAKEUP_FR_REG         0x0028
+# define PMT_CTRL_STAT_REG         0x002c
+/*   0x0030-0x0034 reserved */
+
+# define INTRP_STATUS_REG          0x0038
+
+# define INTRP_STATUS_REG_RSVD1    0x0000fc00 /* */
+# define INTRP_STATUS_REG_TSI      0x00000200 /* time stamp int stat */
+# define INTRP_STATUS_REG_RSVD2    0x00000100 /* */
+
+# define INTRP_STATUS_REG_RCOI     0x00000080 /* rec checksum offload int */
+# define INTRP_STATUS_REG_TI       0x00000040 /* tx int stat */
+# define INTRP_STATUS_REG_RI       0x00000020 /* rx int stat */
+# define INTRP_STATUS_REG_NI       0x00000010 /* normal int summary */
+
+# define INTRP_STATUS_REG_PMTI     0x00000008 /* PMT int */
+# define INTRP_STATUS_REG_ANC      0x00000004 /* auto negotiation complete */
+# define INTRP_STATUS_REG_LSC      0x00000002 /* link status change */
+# define INTRP_STATUS_REG_MII      0x00000001 /* rgMii/sgMii int */
+
+# define INTRP_MASK_REG            0x003c
+
+# define INTRP_MASK_REG_RSVD1      0xfc00     /* */
+# define INTRP_MASK_REG_TSIM       0x0200     /* time stamp int mask */
+# define INTRP_MASK_REG_RSVD2      0x01f0     /* */
+
+# define INTRP_MASK_REG_PMTIM      0x0000     /* PMT int mask */
+# define INTRP_MASK_REG_ANCM       0x0000     /* auto negotiation compl mask */
+# define INTRP_MASK_REG_LSCM       0x0000     /* link status change mask */
+# define INTRP_MASK_REG_MIIM       0x0000     /* rgMii/sgMii int mask */
+
+# define MAC_ADR_0_HIGH_REG        0x0040
+# define MAC_ADR_0_LOW_REG         0x0044
+/* additional pairs of registers for MAC addresses 1-15 */
+
+# define AN_CONTROL_REG            0x00c0
+
+# define AN_CONTROL_REG_RSVRD1     0xfff80000 /* */
+# define AN_CONTROL_REG_SGM_RAL    0x00040000 /* sgmii ral control */
+# define AN_CONTROL_REG_LR         0x00020000 /* lock to reference */
+# define AN_CONTROL_REG_ECD        0x00010000 /* enable comma detect */
+
+# define AN_CONTROL_REG_RSVRD2     0x00008000 /* */
+# define AN_CONTROL_REG_ELE        0x00004000 /* external loopback enable */
+# define AN_CONTROL_REG_RSVRD3     0x00002000 /* */
+# define AN_CONTROL_REG_ANE        0x00001000 /* auto negotiation enable */
+
+# define AN_CONTROL_REG_RSRVD4     0x00000c00 /* */
+# define AN_CONTROL_REG_RAN        0x00000200 /* restart auto negotiation */
+# define AN_CONTROL_REG_RSVRD5     0x000001ff /* */
+
+# define AN_STATUS_REG             0x00c4
+
+# define AN_STATUS_REG_RSVRD1      0xfffffe00 /* */
+# define AN_STATUS_REG_ES          0x00000100 /* extended status */
+# define AN_STATUS_REG_RSVRD2      0x000000c0 /* */
+# define AN_STATUS_REG_ANC         0x00000020 /* auto-negotiation complete */
+# define AN_STATUS_REG_RSVRD3      0x00000010 /* */
+# define AN_STATUS_REG_ANA         0x00000008 /* auto-negotiation ability */
+# define AN_STATUS_REG_LS          0x00000004 /* link status */
+# define AN_STATUS_REG_RSVRD4      0x00000003 /* */
+
+# define AN_ADVERTISE_REG          0x00c8
+# define AN_LNK_PRTNR_ABIL_REG     0x00cc
+# define AN_EXPANDSION_REG         0x00d0
+# define TBI_EXT_STATUS_REG        0x00d4
+
+# define SG_RG_SMII_STATUS_REG     0x00d8
+
+# define LINK_STATUS_REG           0x00d8
+
+# define LINK_STATUS_REG_RSVRD1    0xffffffc0 /* */
+# define LINK_STATUS_REG_FCD       0x00000020 /* false carrier detect */
+# define LINK_STATUS_REG_JT        0x00000010 /* jabber timeout */
+# define LINK_STATUS_REG_UP        0x00000008 /* link status */
+
+# define LINK_STATUS_REG_SPD       0x00000006 /* link speed */
+# define LINK_STATUS_REG_SPD_2_5   0x00000000 /* 10M   2.5M * 4 */
+# define LINK_STATUS_REG_SPD_25    0x00000002 /* 100M   25M * 4 */
+# define LINK_STATUS_REG_SPD_125   0x00000004 /* 1G    125M * 8 */
+
+# define LINK_STATUS_REG_F_DUPLEX  0x00000001 /* full duplex */
+
+/*     0x00dc-0x00fc reserved */
+
+/* MMC Register Map is from     0x0100-0x02fc */
+# define MMC_CNTRL_REG             0x0100
+# define MMC_INTR_RX_REG           0x0104
+# define MMC_INTR_TX_REG           0x0108
+# define MMC_INTR_MASK_RX_REG      0x010C
+# define MMC_INTR_MASK_TX_REG      0x0110
+
+/*     0x0300-0x06fc reserved */
+
+/* precision time protocol   time stamp registers */
+
+# define TS_CTL_REG                 0x0700
+
+# define TS_CTL_ATSFC               0x00080000
+# define TS_CTL_TSENMAC             0x00040000
+
+# define TS_CTL_TSCLKTYPE           0x00030000
+# define TS_CTL_TSCLK_ORD           0x00000000
+# define TS_CTL_TSCLK_BND           0x00010000
+# define TS_CTL_TSCLK_ETE           0x00020000
+# define TS_CTL_TSCLK_PTP           0x00030000
+
+# define TS_CTL_TSMSTRENA           0x00008000
+# define TS_CTL_TSEVNTENA           0x00004000
+# define TS_CTL_TSIPV4ENA           0x00002000
+# define TS_CTL_TSIPV6ENA           0x00001000
+
+# define TS_CTL_TSIPENA             0x00000800
+# define TS_CTL_TSVER2ENA           0x00000400
+# define TS_CTL_TSCTRLSSR           0x00000200
+# define TS_CTL_TSENALL             0x00000100
+
+# define TS_CTL_TSADDREG            0x00000020
+# define TS_CTL_TSTRIG              0x00000010
+
+# define TS_CTL_TSUPDT              0x00000008
+# define TS_CTL_TSINIT              0x00000004
+# define TS_CTL_TSCFUPDT            0x00000002
+# define TS_CTL_TSENA               0x00000001
+
+
+# define TS_SUB_SEC_INCR_REG        0x0704
+# define TS_HIGH_REG                0x0708
+# define TS_LOW_REG                 0x070c
+# define TS_HI_UPDT_REG             0x0710
+# define TS_LO_UPDT_REG             0x0714
+# define TS_APPEND_REG              0x0718
+# define TS_TARG_TIME_HIGH_REG      0x071c
+# define TS_TARG_TIME_LOW_REG       0x0720
+# define TS_HIGHER_WD_REG           0x0724
+# define TS_STATUS_REG              0x072c
+
+/*     0x0730-0x07fc reserved */
+
+# define MAC_ADR16_HIGH_REG        0x0800
+# define MAC_ADR16_LOW_REG         0x0804
+/* additional pairs of registers for MAC addresses 17-31 */
+
+# define MAC_ADR_MAX             32
+
+
+# define  QFEC_INTRP_SETUP               (INTRP_EN_REG_AIE    \
+					| INTRP_EN_REG_FBE \
+					| INTRP_EN_REG_RWE \
+					| INTRP_EN_REG_RSE \
+					| INTRP_EN_REG_RUE \
+					| INTRP_EN_REG_UNE \
+					| INTRP_EN_REG_OVE \
+					| INTRP_EN_REG_TJE \
+					| INTRP_EN_REG_TSE \
+					| INTRP_EN_REG_NIE \
+					| INTRP_EN_REG_RIE \
+					| INTRP_EN_REG_TIE)
+
+/*
+ * ASIC Ethernet clock register definitions:
+ *     address offsets and some register definitions
+ */
+
+# define EMAC_CLK_REG_BASE           0x94020000
+
+/*
+ * PHY clock PLL register locations
+ */
+# define ETH_MD_REG                  0x02A4
+# define ETH_NS_REG                  0x02A8
+
+/* definitions of NS_REG control bits
+ */
+# define ETH_NS_SRC_SEL              0x0007
+
+# define ETH_NS_PRE_DIV_MSK          0x0018
+# define ETH_NS_PRE_DIV(x)           (ETH_NS_PRE_DIV_MSK & (x << 3))
+
+# define ETH_NS_MCNTR_MODE_MSK       0x0060
+# define ETH_NS_MCNTR_MODE_BYPASS    0x0000
+# define ETH_NS_MCNTR_MODE_SWALLOW   0x0020
+# define ETH_NS_MCNTR_MODE_DUAL      0x0040
+# define ETH_NS_MCNTR_MODE_SINGLE    0x0060
+
+# define ETH_NS_MCNTR_RST            0x0080
+# define ETH_NS_MCNTR_EN             0x0100
+
+# define EMAC_PTP_NS_CLK_EN          0x0200
+# define EMAC_PTP_NS_CLK_INV         0x0400
+# define EMAC_PTP_NS_ROOT_EN         0x0800
+
+/* clock sources
+ */
+# define CLK_SRC_TCXO                0x0
+# define CLK_SRC_PLL_GLOBAL          0x1
+# define CLK_SRC_PLL_ARM             0x2
+# define CLK_SRC_PLL_QDSP6           0x3
+# define CLK_SRC_PLL_EMAC            0x4
+# define CLK_SRC_EXT_CLK2            0x5
+# define CLK_SRC_EXT_CLK1            0x6
+# define CLK_SRC_CORE_TEST           0x7
+
+# define ETH_MD_M(x)                 (x << 16)
+# define ETH_MD_2D_N(x)              ((~(x) & 0xffff))
+# define ETH_NS_NM(x)                ((~(x) << 16) & 0xffff0000)
+
+/*
+ * PHY interface clock divider
+ */
+# define ETH_X_EN_NS_REG             0x02AC
+
+# define ETH_RX_CLK_FB_INV           0x80
+# define ETH_RX_CLK_FB_EN            0x40
+# define ETH_TX_CLK_FB_INV           0x20
+# define ETH_TX_CLK_FB_EN            0x10
+# define ETH_RX_CLK_INV              0x08
+# define ETH_RX_CLK_EN               0x04
+# define ETH_TX_CLK_INV              0x02
+# define ETH_TX_CLK_EN               0x01
+
+# define ETH_X_EN_NS_DEFAULT \
+	(ETH_RX_CLK_FB_EN | ETH_TX_CLK_FB_EN | ETH_RX_CLK_EN | ETH_TX_CLK_EN)
+
+# define EMAC_PTP_MD_REG             0x02B0
+
+/* PTP clock divider
+ */
+# define EMAC_PTP_NS_REG             0x02B4
+
+/*
+ * clock interface pin controls
+ */
+# define EMAC_NS_REG                 0x02B8
+
+# define EMAC_RX_180_CLK_INV         0x2000
+# define EMAC_RX_180_CLK_EN          0x1000
+# define EMAC_RX_180_CLK_EN_INV      (EMAC_RX_180_CLK_INV | EMAC_RX_180_CLK_EN)
+
+# define EMAC_TX_180_CLK_INV         0x0800
+# define EMAC_TX_180_CLK_EN          0x0400
+# define EMAC_TX_180_CLK_EN_INV      (EMAC_TX_180_CLK_INV | EMAC_TX_180_CLK_EN)
+
+# define EMAC_REVMII_RX_CLK_INV      0x0200
+# define EMAC_REVMII_RX_CLK_EN       0x0100
+
+# define EMAC_RX_CLK_INV             0x0080
+# define EMAC_RX_CLK_EN              0x0040
+
+# define EMAC_REVMII_TX_CLK_INV      0x0020
+# define EMAC_REVMII_TX_CLK_EN       0x0010
+
+# define EMAC_TX_CLK_INV             0x0008
+# define EMAC_TX_CLK_EN              0x0004
+
+# define EMAC_RX_R_CLK_EN            0x0002
+# define EMAC_TX_R_CLK_EN            0x0001
+
+# define EMAC_NS_DEFAULT \
+	(EMAC_RX_180_CLK_EN_INV | EMAC_TX_180_CLK_EN_INV \
+	| EMAC_REVMII_RX_CLK_EN | EMAC_REVMII_TX_CLK_EN \
+	| EMAC_RX_CLK_EN | EMAC_TX_CLK_EN \
+	| EMAC_RX_R_CLK_EN | EMAC_TX_R_CLK_EN)
+
+/*
+ *
+ */
+# define EMAC_TX_FS_REG              0x02BC
+# define EMAC_RX_FS_REG              0x02C0
+
+/*
+ * Ethernet controller PHY interface select
+ */
+# define EMAC_PHY_INTF_SEL_REG       0x18030
+
+# define EMAC_PHY_INTF_SEL_MII       0x0
+# define EMAC_PHY_INTF_SEL_RGMII     0x1
+# define EMAC_PHY_INTF_SEL_REVMII    0x7
+# define EMAC_PHY_INTF_SEL_MASK      0x7
+
+/*
+ * MDIO addresses
+ */
+# define EMAC_PHY_ADDR_REG           0x18034
+# define EMAC_REVMII_PHY_ADDR_REG    0x18038
+
+/*
+ * clock routing
+ */
+# define EMAC_CLKMUX_SEL_REG         0x1803c
+
+# define EMAC_CLKMUX_SEL_0           0x1
+# define EMAC_CLKMUX_SEL_1           0x2
+
+
+#endif
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 5f53fbb..1e1617e 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -223,6 +223,20 @@
 #define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
 #define SMC_IRQ_FLAGS		(-1)	/* from resource */
 
+#elif defined(CONFIG_ARCH_MSM)
+
+#define SMC_CAN_USE_8BIT	0
+#define SMC_CAN_USE_16BIT	1
+#define SMC_CAN_USE_32BIT	0
+#define SMC_NOWAIT		1
+
+#define SMC_inw(a, r)		readw((a) + (r))
+#define SMC_outw(v, a, r)	writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS		IRQF_TRIGGER_HIGH
+
 #elif defined(CONFIG_MN10300)
 
 /*
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c6d47d1..1064aa0 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -42,6 +42,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -872,7 +873,7 @@
 			    (!pdata->using_extphy)) {
 				/* Restore original GPIO configuration */
 				pdata->gpio_setting = pdata->gpio_orig_setting;
-				smsc911x_reg_write(pdata, GPIO_CFG,
+				smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
 					pdata->gpio_setting);
 			}
 		} else {
@@ -880,7 +881,7 @@
 			/* Check global setting that LED1
 			 * usage is 10/100 indicator */
 			pdata->gpio_setting = smsc911x_reg_read(pdata,
-				GPIO_CFG);
+				SMSC_GPIO_CFG);
 			if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
 			    (!pdata->using_extphy)) {
 				/* Force 10/100 LED off, after saving
@@ -891,7 +892,7 @@
 				pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
 							| GPIO_CFG_GPIODIR0_
 							| GPIO_CFG_GPIOD0_);
-				smsc911x_reg_write(pdata, GPIO_CFG,
+				smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
 					pdata->gpio_setting);
 			}
 		}
@@ -1314,7 +1315,7 @@
 		SMSC_WARN(pdata, ifup,
 			  "Timed out waiting for EEPROM busy bit to clear");
 
-	smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
+	smsc911x_reg_write(pdata, SMSC_GPIO_CFG, 0x70070000);
 
 	/* The soft reset above cleared the device's MAC address,
 	 * restore it from local copy (set in probe) */
@@ -1758,9 +1759,9 @@
 
 static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
 {
-	unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
+	unsigned int temp = smsc911x_reg_read(pdata, SMSC_GPIO_CFG);
 	temp &= ~GPIO_CFG_EEPR_EN_;
-	smsc911x_reg_write(pdata, GPIO_CFG, temp);
+	smsc911x_reg_write(pdata, SMSC_GPIO_CFG, temp);
 	msleep(1);
 }
 
@@ -2055,6 +2056,10 @@
 
 	SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
+	if (pdata->config.has_reset_gpio) {
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+		gpio_free(pdata->config.reset_gpio);
+	}
 	phy_disconnect(pdata->phy_dev);
 	pdata->phy_dev = NULL;
 	mdiobus_unregister(pdata->mii_bus);
@@ -2185,7 +2190,7 @@
 	smsc911x_reg_write(pdata, INT_EN, 0);
 	smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 
-	retval = request_irq(dev->irq, smsc911x_irqhandler,
+	retval = request_any_context_irq(dev->irq, smsc911x_irqhandler,
 			     irq_flags | IRQF_SHARED, dev->name, dev);
 	if (retval) {
 		SMSC_WARN(pdata, probe,
@@ -2277,6 +2282,10 @@
 		PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
 		PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
 
+	/* Drive the GPIO Ethernet_Reset Line low to Suspend */
+	if (pdata->config.has_reset_gpio)
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+
 	return 0;
 }
 
@@ -2286,6 +2295,9 @@
 	struct smsc911x_data *pdata = netdev_priv(ndev);
 	unsigned int to = 100;
 
+	if (pdata->config.has_reset_gpio)
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 1);
+
 	/* Note 3.11 from the datasheet:
 	 * 	"When the LAN9220 is in a power saving state, a write of any
 	 * 	 data to the BYTE_TEST register will wake-up the device."
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 8d67aac..19711d2 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -236,7 +236,7 @@
 #define PMT_CTRL_PME_EN_		0x00000002
 #define PMT_CTRL_READY_			0x00000001
 
-#define GPIO_CFG			0x88
+#define SMSC_GPIO_CFG			0x88
 #define GPIO_CFG_LED3_EN_		0x40000000
 #define GPIO_CFG_LED2_EN_		0x20000000
 #define GPIO_CFG_LED1_EN_		0x10000000
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f1d88c5..50dacf9 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -273,6 +273,26 @@
 	help
 	  Enables Power/Reset/Carddetect function abstraction
 
+config LIBRA_SDIOIF
+	tristate "Qualcomm libra wlan SDIO driver"
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	depends on MMC_MSM
+	---help---
+	  A driver for Qualcomm WLAN SDIO Libra chipset.
+
+config WCNSS_WLAN
+	tristate "Qualcomm WCNSS WLAN driver"
+	depends on ARCH_MSM8960
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	---help---
+	  A driver for Qualcomm WCNSS WLAN feature
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 8ceae0a..f725adf 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -61,3 +61,6 @@
 
 obj-$(CONFIG_BCM4329)	+= bcm4329/
 obj-$(CONFIG_BCMDHD)	+= bcmdhd/
+
+obj-$(CONFIG_LIBRA_SDIOIF)	+= libra/
+obj-$(CONFIG_WCNSS_WLAN)	+= wcnss/
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1be7c8b..82fb6ce 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -506,6 +506,17 @@
 	ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
 			 AR_WA_ASPM_TIMER_BASED_DISABLE);
 
+	/*
+	 * Read back AR_WA into a permanent copy and set bits 14 and 17.
+	 * We need to do this to avoid RMW of this register. We cannot
+	 * read the reg when chip is asleep.
+	 */
+	ah->WARegVal = REG_READ(ah, AR_WA);
+	ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+			 AR_WA_ASPM_TIMER_BASED_DISABLE);
+
+	ath9k_hw_read_revisions(ah);
+
 	if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
 		ath_err(common, "Couldn't reset chip\n");
 		return -EIO;
diff --git a/drivers/net/wireless/bcm4329/bcmspibrcm.c b/drivers/net/wireless/bcm4329/bcmspibrcm.c
new file mode 100644
index 0000000..0f131a4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmspibrcm.c
@@ -0,0 +1,1726 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.c,v 1.11.2.10.2.9.6.11 2009/05/21 13:21:57 Exp $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h>
+#include <spid.h>
+
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <sdio.h>
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+#define F0_RESPONSE_DELAY	16
+#define F1_RESPONSE_DELAY	16
+#define F2_RESPONSE_DELAY	F0_RESPONSE_DELAY
+
+#define CMDLEN		4
+
+#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE)
+
+/* Globals */
+uint sd_msglevel = 0;
+
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+
+uint8	spi_outbuf[SPI_MAX_PKT_LEN];
+uint8	spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN	128
+uint8	spi_outbuf2[BUF2_PKT_LEN];
+uint8	spi_inbuf2[BUF2_PKT_LEN];
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                           uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+                                 uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+	 * mode
+	 */
+	sd->wordlen = 2;
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (bcmspi_driver_init(sd) != SUCCESS) {
+		sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_query_device(sdioh_info_t *sd)
+{
+	/* Return a BRCM ID appropriate to the dongle class */
+	return (sd->num_funcs > 1) ? BCM4329_D11NDUAL_ID : BCM4318_D11G_ID;
+}
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+	return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+	sd->chip = chip;
+	sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+	uint8 reg = 0;
+	int status;
+
+	if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+
+	if (set) {
+		reg |= DWORD_PKT_LEN_EN;
+		sd->dwordmode = TRUE;
+		sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+	} else {
+		reg &= ~DWORD_PKT_LEN_EN;
+		sd->dwordmode = FALSE;
+		sd->client_block_size[SPI_FUNC_2] = 2048;
+	}
+
+	if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_SPIERRSTATS,
+	IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+	{"spi_respdelay",	IOV_RESP_DELAY_ALL,	0,	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+/*
+	sdioh_regs_t *regs;
+*/
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("%s: set clock failed\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("%s: Failed changing highspeed mode to %d.\n",
+			        __FUNCTION__, sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_SPIERRSTATS):
+	{
+		bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SPIERRSTATS):
+	{
+		bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_GVAL(IOV_RESP_DELAY_ALL):
+		int_val = (int32)si->resp_delay_all;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RESP_DELAY_ALL):
+		si->resp_delay_all = (bool)int_val;
+		int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+		if (si->resp_delay_all)
+			int_val |= RESP_DELAY_ALL;
+		else {
+			if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+			     F1_RESPONSE_DELAY) != SUCCESS) {
+				sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				break;
+			}
+		}
+
+		if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+		     != SUCCESS) {
+			sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+
+	if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+		uint8 dummy_data;
+		status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+		if (status) {
+			sd_err(("sdioh_cfg_read() failed.\n"));
+			return status;
+		}
+	}
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 cis_byte;
+	uint16 *cis = (uint16 *)cisd;
+	uint bar0 = SI_ENUM_BASE;
+	int status;
+	uint8 data;
+
+	sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+	spi_lock(sd);
+
+	/* Set sb window address to 0x18000000 */
+	data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+	status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+	if (status == SUCCESS) {
+		data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+	if (status == SUCCESS) {
+		data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+
+	offset =  CC_OTP; /* OTP offset in chipcommon. */
+	for (count = 0; count < length/2; count++) {
+		if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return (BCME_ERROR);
+		}
+
+		*cis = (uint16)cis_byte;
+		cis++;
+		offset += 2;
+	}
+
+	spi_unlock(sd);
+
+	return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, rw, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	if (rw == SDIOH_READ)
+		*byte = (uint8)data;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks. */
+	while (buflen > 0) {
+		len = MIN(sd->client_block_size[func], buflen);
+		if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sd_err(("%s: bcmspi_card_buf %s failed\n",
+				__FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+	uint32 cmd_arg;
+	uint32 datalen = 1;
+	uint32 hostlen;
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf2 = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf2 = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf2[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+	} else {
+		sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer  */
+	if (datalen != 0) {
+			if (sd->wordlen == 4) { /* 32bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = bcmswap32(byte);
+			} else if (sd->wordlen == 2) { /* 16bit spid */
+				*(uint16 *)&spi_outbuf2[CMDLEN] = bcmswap16(byte & 0xffff);
+				*(uint16 *)&spi_outbuf2[CMDLEN + 2] =
+					bcmswap16((byte & 0xffff0000) >> 16);
+			}
+	}
+
+	/* +4 for cmd, +4 for dstatus */
+	hostlen = datalen + 8;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+	if (sd->resp_delay_all == FALSE)
+		return (BCME_OK);
+
+	if (sd->prev_fun == func)
+		return (BCME_OK);
+
+	if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+		return (BCME_OK);
+
+	bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+	/* Remember function for which to avoid reprogramming resp-delay in next iteration */
+	sd->prev_fun = func;
+
+	return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN	0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+	uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	*(uint32 *)spi_outbuf2 = cmd_arg;
+
+	/* for Write, put the data into the output buffer  */
+	*(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+	/* +4 for cmd, +4 for dstatus */
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+	uint32 dstatus = sd->card_dstatus;
+	struct spierrstats_t *spierrstats = &sd->spierrstats;
+	int err = SUCCESS;
+
+	sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+	/* Store dstatus of last few gSPI transactions */
+	spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+	spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+	dstatus_count++;
+
+	if (sd->card_init_done == FALSE)
+		return err;
+
+	if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+		spierrstats->dna++;
+		sd_trace(("Read data not available on F1 addr = 0x%x\n",
+		        GFIELD(cmd_arg, SPI_REG_ADDR)));
+		/* Clear dna bit */
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+	}
+
+	if (dstatus & STATUS_UNDERFLOW) {
+		spierrstats->rdunderflow++;
+		sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+	}
+
+	if (dstatus & STATUS_OVERFLOW) {
+		spierrstats->wroverflow++;
+		sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+		if ((sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 0)) {
+			bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+			bcmspi_resync_f1(sd);
+			sd_err(("Recovering from F1 FIFO overflow.\n"));
+		} else {
+			err = ERROR_OF;
+		}
+	}
+
+	if (dstatus & STATUS_F2_INTR) {
+		spierrstats->f2interrupt++;
+		sd_trace(("Interrupt from F2.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_F3_INTR) {
+		spierrstats->f3interrupt++;
+		sd_err(("Interrupt from F3.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+		spierrstats->hostcmddataerr++;
+		sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+	}
+
+	if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+		spierrstats->f2pktavailable++;
+		sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+		sd_trace(("Packet length = %d\n", sd->dwordmode ?
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+	}
+
+	if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+		spierrstats->f3pktavailable++;
+		sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+		sd_err(("Packet length = %d\n",
+		        (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+	return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+	/* Default power on mode */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+	uint32 regdata[2];
+	int status;
+
+	/* Find F1/F2/F3 max packet size */
+	if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+	                                 8, regdata)) != SUCCESS) {
+		return status;
+	}
+
+	sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+	        regdata[0], regdata[1]));
+
+	sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+	ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+	sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+	ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+	sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+	ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+	return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+	uint32	status_en_reg = 0;
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#else
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	if (!bcmspi_host_device_init_adapt(sd)) {
+		sd_err(("bcmspi_host_device_init_adapt failed\n"));
+		return ERROR;
+	}
+
+	if (!bcmspi_test_card(sd)) {
+		sd_err(("bcmspi_test_card failed\n"));
+		return ERROR;
+	}
+
+	sd->num_funcs = SPI_MAX_IOFUNCS;
+
+	get_client_blocksize(sd);
+
+	/* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+	bcmspi_resync_f1(sd);
+
+	sd->dwordmode = FALSE;
+
+	bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+	sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+	status_en_reg |= INTR_WITH_STATUS;
+
+
+	if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+	    status_en_reg & 0xff) != SUCCESS) {
+		sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+		return ERROR;
+	}
+
+
+#ifndef HSMODE
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, 4)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	sd->card_init_done = TRUE;
+
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+	                                 4, &regdata)) != SUCCESS)
+		return status;
+
+	sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+	if (hsmode == TRUE) {
+		sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			sd_trace(("Device is already in High-Speed mode.\n"));
+			return status;
+		} else {
+			regdata |= HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS) {
+				return status;
+			}
+		}
+	} else {
+		sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			regdata &= ~HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS)
+				return status;
+		}
+		 else {
+			sd_trace(("Device is already in Low-Speed mode.\n"));
+			return status;
+		}
+	}
+
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+	sd->wordlen = 2; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd->wordlen = 4; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd_trace(("Silicon testability issue: regdata = 0x%x." \
+	          " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata));	\
+	OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP		100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+	uint32 wrregdata, regdata = 0;
+	int status;
+	int i;
+
+	/* Due to a silicon testability issue, the first command from the Host
+	 * to the device will get corrupted (first bit will be lost). So the
+	 * Host should poll the device with a safe read request. ie: The Host
+	 * should try to read F0 addr 0x14 using the Fixed address mode
+	 * (This will prevent a unintended write command to be detected by device)
+	 */
+	for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+		/* If device was not power-cycled it will stay in 32bit mode with
+		 * response-delay-all bit set.  Alternate the iteration so that
+		 * read either with or without response-delay for F0 to succeed.
+		 */
+		bcmspi_find_curr_mode(sd);
+		sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = TRUE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = FALSE;
+	}
+
+	/* Bail out, device not detected */
+	if (i == INIT_ADAPT_LOOP)
+		return FALSE;
+
+	/* Softreset the spid logic */
+	if ((sd->dwordmode) || (sd->wordlen == 4)) {
+		bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+		bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+		sd_trace(("reset reg read = 0x%x\n", regdata));
+		sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+		       sd->wordlen, sd->resp_delay_all));
+		/* Restore default state after softreset */
+		sd->wordlen = 2;
+		sd->dwordmode = FALSE;
+	}
+
+	if (sd->wordlen == 4) {
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+		     SUCCESS)
+				return FALSE;
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+			          regdata));
+			sd_trace(("Spid power was left on.\n"));
+		} else {
+			sd_err(("Spid power was left on but signature read failed."
+			        " Value read = 0x%x\n", regdata));
+			return FALSE;
+		}
+	} else {
+		sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT	0x00010430 /* according to the host m/c */
+
+		wrregdata = (CTRL_REG_DEFAULT);
+		sd->resp_delay_all = TRUE;
+		if (sd->resp_delay_all == TRUE) {
+			/* Enable response delay for all */
+			wrregdata |= (RESP_DELAY_ALL << 16);
+			/* Program response delay value */
+			wrregdata &= 0xffff00ff;
+			wrregdata |= (F1_RESPONSE_DELAY << 8);
+			sd->prev_fun = SPI_FUNC_1;
+			bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+		}
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+		sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+		wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+		wrregdata &= ~HIGH_SPEED_MODE;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+		for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+			if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+				sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+				if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+				     &regdata)) != SUCCESS)
+					return FALSE;
+			}
+			OSL_DELAY(1000);
+		}
+
+
+		/* Change to host controller intr-polarity of active-low */
+		wrregdata &= ~INTR_POLARITY;
+		sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+		        wrregdata));
+		/* Change to 32bit mode */
+		wrregdata |= WORD_LENGTH_32;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+		/* Change command/data packaging in 32bit LE mode */
+		sd->wordlen = 4;
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+			sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+		} else {
+			sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+			  "0x%x\n", regdata));
+			return FALSE;
+		}
+	}
+
+
+	return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+		return FALSE;
+
+	if (regdata == (TEST_RO_DATA_32BIT_LE))
+		sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+	else {
+		sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+		return FALSE;
+	}
+
+
+#define RW_PATTERN1	0xA0A1A2A3
+#define RW_PATTERN2	0x4B5B6B7B
+
+	regdata = RW_PATTERN1;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN1) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN1, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	regdata = RW_PATTERN2;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN2) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN2, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((bcmspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (bcmspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);	/* Fixed access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	sd_trace(("dstatus =0x%x\n", dstatus));
+	return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 1, func,
+	         regaddr, data));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		return status;
+	}
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+	*dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32	i, j;
+	uint8	resp_delay = 0;
+	int	err = SUCCESS;
+	uint32	hostlen;
+	uint32 spilen = 0;
+	uint32 dstatus_idx = 0;
+	uint16 templen, buslen, len, *ptr = NULL;
+
+	sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+	if (DWORDMODE_ON) {
+		spilen = GFIELD(cmd_arg, SPI_LEN);
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) ||
+		    (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1))
+			dstatus_idx = spilen * 3;
+
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			spilen = spilen << 2;
+			dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0;
+			/* convert len to mod16 size */
+			spilen = ROUNDUP(spilen, 16);
+			cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+		}
+	}
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+		if (datalen < 4)
+			datalen = ROUNDUP(datalen, 4);
+	} else {
+		sd_err(("Host is %d bit spid, could not create SPI command.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+		/* We send len field of hw-header always a mod16 size, both from host and dongle */
+		if (DWORDMODE_ON) {
+			if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				/* ASSERT(*ptr == ~*(ptr + 1)); */
+				templen = ROUNDUP(templen, 16);
+				*ptr = templen;
+				sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1))));
+			}
+		}
+
+		if (datalen != 0) {
+			for (i = 0; i < datalen/4; i++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap32(data[i]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap16(data[i] & 0xffff);
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN + 2] =
+						bcmswap16((data[i] & 0xffff0000) >> 16);
+				}
+			}
+		}
+	}
+
+	/* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) {
+		int func = GFIELD(cmd_arg, SPI_FUNCTION);
+		switch (func) {
+			case 0:
+				resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+				break;
+			case 1:
+				resp_delay = F1_RESPONSE_DELAY;
+				break;
+			case 2:
+				resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+				break;
+			default:
+				ASSERT(0);
+				break;
+		}
+		/* Program response delay */
+	        bcmspi_prog_resp_delay(sd, func, resp_delay);
+	}
+
+	/* +4 for cmd and +4 for dstatus */
+	hostlen = datalen + 8 + resp_delay;
+	hostlen += dstatus_idx;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+	/* for Read, get the data into the input buffer */
+	if (datalen != 0) {
+		if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+			for (j = 0; j < datalen/4; j++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					data[j] = bcmswap32(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					data[j] = (bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay])) |
+					         ((bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay + 2])) << 16);
+				}
+			}
+
+			if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				buslen = len = ~(*(ptr + 1));
+				buslen = ROUNDUP(buslen, 16);
+				/* populate actual len in hw-header */
+				if (templen == buslen)
+					*ptr = len;
+			}
+		}
+	}
+
+	/* Restore back the len field of the hw header */
+	if (DWORDMODE_ON) {
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			ptr = (uint16 *)&data[0];
+			*ptr = (uint16)(~*(ptr+1));
+		}
+	}
+
+	dstatus_idx += (datalen + CMDLEN + resp_delay);
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx + 2]) << 16));
+	} else {
+		sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+	if (sd->card_dstatus == 0xffffffff) {
+		sd_err(("looks like not a GSPI device or device is not powered.\n"));
+	}
+
+	err = bcmspi_update_stats(sd, cmd_arg);
+
+	return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	bool write = rw == SDIOH_READ ? 0 : 1;
+	uint retries = 0;
+
+	bool enable;
+	uint32	spilen;
+
+	cmd_arg = 0;
+
+	ASSERT(nbytes);
+	ASSERT(nbytes <= sd->client_block_size[func]);
+
+	if (write) sd->t_cnt++; else sd->r_cnt++;
+
+	if (func == 2) {
+		/* Frame len check limited by gSPI. */
+		if ((nbytes > 2000) && write) {
+			sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+		}
+		/* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+		/* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+		if (write) {
+			uint32 dstatus;
+			/* check F2 ready with cached one */
+			bcmspi_cmd_getdstatus(sd, &dstatus);
+			if ((dstatus & STATUS_F2_RX_READY) == 0) {
+				retries = WAIT_F2RXFIFORDY;
+				enable = 0;
+				while (retries-- && !enable) {
+					OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+					bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+					                   &dstatus);
+					if (dstatus & STATUS_F2_RX_READY)
+						enable = TRUE;
+				}
+				if (!enable) {
+					struct spierrstats_t *spierrstats = &sd->spierrstats;
+					spierrstats->f2rxnotready++;
+					sd_err(("F2 FIFO is not ready to receive data.\n"));
+					return ERROR;
+				}
+				sd_trace(("No of retries on F2 ready %d\n",
+					(WAIT_F2RXFIFORDY - retries)));
+			}
+		}
+	}
+
+	/* F2 transfers happen on 0 addr */
+	addr = (func == 2) ? 0 : addr;
+
+	/* In pio mode buffer is read using fixed address fifo in func 1 */
+	if ((func == 1) && (fifo))
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+	spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+	if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+		/* convert len to mod4 size */
+		spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+	} else
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		          __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+		          addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg,
+	     data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+			(write ? "write" : "read")));
+		return status;
+	}
+
+	/* gSPI expects that hw-header-len is equal to spi-command-len */
+	if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+	}
+
+	if ((nbytes > 2000) && !write) {
+		sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+	}
+
+	return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return bcmspi_client_init(si);
+}
diff --git a/drivers/net/wireless/libra/Makefile b/drivers/net/wireless/libra/Makefile
new file mode 100644
index 0000000..3c606ba
--- /dev/null
+++ b/drivers/net/wireless/libra/Makefile
@@ -0,0 +1,14 @@
+
+# Makefile for wlan sdio if driver
+
+librasdioif-objs += libra_sdioif.o
+
+ifdef CONFIG_ARCH_MSM8X60
+	librasdioif-objs += qcomwlan_pwrif.o
+endif
+
+ifdef CONFIG_ARCH_MSM7X27A
+	librasdioif-objs += qcomwlan7x27a_pwrif.o
+endif
+
+obj-$(CONFIG_LIBRA_SDIOIF) += librasdioif.o
diff --git a/drivers/net/wireless/libra/libra_sdioif.c b/drivers/net/wireless/libra/libra_sdioif.c
new file mode 100644
index 0000000..3955642
--- /dev/null
+++ b/drivers/net/wireless/libra/libra_sdioif.c
@@ -0,0 +1,481 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/libra_sdioif.h>
+#include <linux/delay.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+/* Libra SDIO function device */
+static struct sdio_func *libra_sdio_func;
+static struct mmc_host *libra_mmc_host;
+static int libra_mmc_host_index;
+
+/* SDIO Card ID / Device ID */
+static unsigned short  libra_sdio_card_id;
+
+static suspend_handler_t *libra_suspend_hldr;
+static resume_handler_t *libra_resume_hldr;
+
+/**
+ * libra_sdio_configure() - Function to configure the SDIO device param
+ * @libra_sdio_rxhandler    Rx handler
+ * @func_drv_fn             Function driver function for special setup
+ * @funcdrv_timeout         Function Enable timeout
+ * @blksize                 Block size
+ *
+ * Configure SDIO device, enable function and set block size
+ */
+int libra_sdio_configure(sdio_irq_handler_t libra_sdio_rxhandler,
+	void  (*func_drv_fn)(int *status),
+	unsigned int funcdrv_timeout, unsigned int blksize)
+{
+	int err_ret = 0;
+	struct sdio_func *func = libra_sdio_func;
+
+	if (libra_sdio_func == NULL) {
+		printk(KERN_ERR "%s: Error SDIO card not detected\n", __func__);
+		goto cfg_error;
+	}
+
+	sdio_claim_host(func);
+
+	/* Currently block sizes are set here. */
+	func->max_blksize = blksize;
+	if (sdio_set_block_size(func, blksize)) {
+		printk(KERN_ERR "%s: Unable to set the block size.\n",
+				__func__);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	/* Function driver specific configuration. */
+	if (func_drv_fn) {
+		(*func_drv_fn)(&err_ret);
+		if (err_ret) {
+			printk(KERN_ERR "%s: function driver provided configure function error=%d\n",
+				__func__, err_ret);
+			sdio_release_host(func);
+			goto cfg_error;
+		}
+	}
+
+	/* We set this based on the function card. */
+	func->enable_timeout = funcdrv_timeout;
+	err_ret = sdio_enable_func(func);
+	if (err_ret != 0) {
+		printk(KERN_ERR "%s: Unable to enable function %d\n",
+				__func__, err_ret);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	if (sdio_claim_irq(func, libra_sdio_rxhandler)) {
+		sdio_disable_func(func);
+		printk(KERN_ERR "%s: Unable to claim irq.\n", __func__);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	sdio_release_host(func);
+
+	return 0;
+
+cfg_error:
+	return -1;
+
+}
+EXPORT_SYMBOL(libra_sdio_configure);
+
+int libra_sdio_configure_suspend_resume(
+		suspend_handler_t *libra_sdio_suspend_hdlr,
+		resume_handler_t *libra_sdio_resume_hdlr)
+{
+	libra_suspend_hldr = libra_sdio_suspend_hdlr;
+	libra_resume_hldr = libra_sdio_resume_hdlr;
+	return 0;
+}
+EXPORT_SYMBOL(libra_sdio_configure_suspend_resume);
+
+/*
+ * libra_sdio_deconfigure() - Function to reset the SDIO device param
+ */
+void libra_sdio_deconfigure(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_claim_host(func);
+	sdio_release_irq(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+}
+EXPORT_SYMBOL(libra_sdio_deconfigure);
+
+int libra_enable_sdio_irq(struct sdio_func *func, u8 enable)
+{
+	if (libra_mmc_host && libra_mmc_host->ops &&
+			libra_mmc_host->ops->enable_sdio_irq) {
+		libra_mmc_host->ops->enable_sdio_irq(libra_mmc_host, enable);
+		return 0;
+	}
+
+	printk(KERN_ERR "%s: Could not enable disable irq\n", __func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_enable_sdio_irq);
+
+int libra_disable_sdio_irq_capability(struct sdio_func *func, u8 disable)
+{
+	if (libra_mmc_host) {
+		if (disable)
+			libra_mmc_host->caps &= ~MMC_CAP_SDIO_IRQ;
+		else
+			libra_mmc_host->caps |= MMC_CAP_SDIO_IRQ;
+		return 0;
+	}
+	printk(KERN_ERR "%s: Could not change sdio capabilities to polling\n",
+			__func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_disable_sdio_irq_capability);
+
+/*
+ * libra_sdio_release_irq() - Function to release IRQ
+ */
+void libra_sdio_release_irq(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_release_irq(func);
+}
+EXPORT_SYMBOL(libra_sdio_release_irq);
+
+/*
+ * libra_sdio_disable_func() - Function to disable sdio func
+ */
+void libra_sdio_disable_func(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_disable_func(func);
+}
+EXPORT_SYMBOL(libra_sdio_disable_func);
+
+/*
+ * Return the SDIO Function device
+ */
+struct sdio_func *libra_getsdio_funcdev(void)
+{
+	return libra_sdio_func;
+}
+EXPORT_SYMBOL(libra_getsdio_funcdev);
+
+/*
+ * Set function driver as the private data for the function device
+ */
+void libra_sdio_setprivdata(struct sdio_func *sdio_func_dev,
+		void *padapter)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_set_drvdata(sdio_func_dev, padapter);
+}
+EXPORT_SYMBOL(libra_sdio_setprivdata);
+
+/*
+ * Return private data of the function device.
+ */
+void *libra_sdio_getprivdata(struct sdio_func *sdio_func_dev)
+{
+	return sdio_get_drvdata(sdio_func_dev);
+}
+EXPORT_SYMBOL(libra_sdio_getprivdata);
+
+/*
+ * Function driver claims the SDIO device
+ */
+void libra_claim_host(struct sdio_func *sdio_func_dev,
+		pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	if (*curr_claimed == current_pid) {
+		atomic_inc(claim_count);
+		return;
+	}
+
+	/* Go ahead and claim the host if not locked by anybody. */
+	sdio_claim_host(sdio_func_dev);
+
+	*curr_claimed = current_pid;
+	atomic_inc(claim_count);
+
+}
+EXPORT_SYMBOL(libra_claim_host);
+
+/*
+ * Function driver releases the SDIO device
+ */
+void libra_release_host(struct sdio_func *sdio_func_dev,
+		pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count)
+{
+
+	if (NULL == libra_sdio_func)
+		return;
+
+	if (*curr_claimed != current_pid) {
+		/* Dont release  */
+		return;
+	}
+
+	atomic_dec(claim_count);
+	if (atomic_read(claim_count) == 0) {
+		*curr_claimed = 0;
+		sdio_release_host(sdio_func_dev);
+	}
+}
+EXPORT_SYMBOL(libra_release_host);
+
+void libra_sdiocmd52(struct sdio_func *sdio_func_dev, unsigned int addr,
+	u8 *byte_var, int write, int *err_ret)
+{
+	if (write)
+		sdio_writeb(sdio_func_dev, byte_var[0], addr, err_ret);
+	else
+		byte_var[0] = sdio_readb(sdio_func_dev, addr, err_ret);
+}
+EXPORT_SYMBOL(libra_sdiocmd52);
+
+u8 libra_sdio_readsb(struct sdio_func *func, void *dst,
+	unsigned int addr, int count)
+{
+	return sdio_readsb(func, dst, addr, count);
+}
+EXPORT_SYMBOL(libra_sdio_readsb);
+
+int libra_sdio_memcpy_fromio(struct sdio_func *func,
+		void *dst, unsigned int addr, int count)
+{
+	return sdio_memcpy_fromio(func, dst, addr, count);
+}
+EXPORT_SYMBOL(libra_sdio_memcpy_fromio);
+
+int libra_sdio_writesb(struct sdio_func *func,
+		unsigned int addr, void *src, int count)
+{
+	return sdio_writesb(func, addr, src, count);
+}
+EXPORT_SYMBOL(libra_sdio_writesb);
+
+int libra_sdio_memcpy_toio(struct sdio_func *func,
+	unsigned int addr, void *src, int count)
+{
+	return sdio_memcpy_toio(func, addr, src, count);
+}
+EXPORT_SYMBOL(libra_sdio_memcpy_toio);
+
+int libra_detect_card_change(void)
+{
+	if (libra_mmc_host) {
+		if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host")
+			&& (libra_mmc_host_index == libra_mmc_host->index)) {
+			mmc_detect_change(libra_mmc_host, 0);
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "%s: Could not trigger card change\n", __func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_detect_card_change);
+
+int libra_sdio_enable_polling(void)
+{
+	if (libra_mmc_host) {
+		if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host")
+			&& (libra_mmc_host_index == libra_mmc_host->index)) {
+			libra_mmc_host->caps |= MMC_CAP_NEEDS_POLL;
+			mmc_detect_change(libra_mmc_host, 0);
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "%s: Could not trigger SDIO scan\n", __func__);
+	return -1;
+}
+EXPORT_SYMBOL(libra_sdio_enable_polling);
+
+void libra_sdio_set_clock(struct sdio_func *func, unsigned int clk_freq)
+{
+    struct mmc_host *host = func->card->host;
+    host->ios.clock = clk_freq;
+    host->ops->set_ios(host, &host->ios);
+
+}
+EXPORT_SYMBOL(libra_sdio_set_clock);
+
+/*
+ * API to get SDIO Device Card ID
+ */
+void libra_sdio_get_card_id(struct sdio_func *func, unsigned short *card_id)
+{
+	if (card_id)
+		*card_id = libra_sdio_card_id;
+}
+EXPORT_SYMBOL(libra_sdio_get_card_id);
+
+/*
+ * SDIO Probe
+ */
+static int libra_sdio_probe(struct sdio_func *func,
+		const struct sdio_device_id *sdio_dev_id)
+{
+	libra_mmc_host = func->card->host;
+	libra_mmc_host_index = libra_mmc_host->index;
+	libra_sdio_func = func;
+	libra_sdio_card_id = sdio_dev_id->device;
+
+	printk(KERN_INFO "%s: success with block size of %d device_id=0x%x\n",
+		__func__,
+		func->cur_blksize,
+		sdio_dev_id->device);
+
+	/* Turn off SDIO polling from now on */
+	libra_mmc_host->caps &= ~MMC_CAP_NEEDS_POLL;
+	return 0;
+}
+
+static void libra_sdio_remove(struct sdio_func *func)
+{
+	libra_sdio_func = NULL;
+
+	printk(KERN_INFO "%s : Module removed.\n", __func__);
+}
+
+#ifdef CONFIG_PM
+static int libra_sdio_suspend(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	int ret = 0;
+
+	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
+	if (ret) {
+		printk(KERN_ERR "%s: Error Host doesn't support the keep power capability\n" ,
+			__func__);
+		return ret;
+	}
+	if (libra_suspend_hldr) {
+		/* Disable SDIO IRQ when driver is being suspended */
+		libra_enable_sdio_irq(func, 0);
+		ret = libra_suspend_hldr(func);
+		if (ret) {
+			printk(KERN_ERR
+			"%s: Libra driver is not able to suspend\n" , __func__);
+			/* Error - Restore SDIO IRQ */
+			libra_enable_sdio_irq(func, 1);
+			return ret;
+		}
+	}
+
+
+	return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+}
+
+static int libra_sdio_resume(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+
+	if (libra_resume_hldr) {
+		libra_resume_hldr(func);
+		/* Restore SDIO IRQ */
+		libra_enable_sdio_irq(func, 1);
+	}
+
+	return 0;
+}
+#else
+#define libra_sdio_suspend 0
+#define libra_sdio_resume 0
+#endif
+
+static struct sdio_device_id libra_sdioid[] = {
+    {.class = 0, .vendor = LIBRA_MAN_ID,  .device = LIBRA_REV_1_0_CARD_ID},
+    {.class = 0, .vendor = VOLANS_MAN_ID, .device = VOLANS_REV_2_0_CARD_ID},
+    {}
+};
+
+static const struct dev_pm_ops libra_sdio_pm_ops = {
+    .suspend = libra_sdio_suspend,
+    .resume = libra_sdio_resume,
+};
+
+static struct sdio_driver libra_sdiofn_driver = {
+    .name      = "libra_sdiofn",
+    .id_table  = libra_sdioid,
+    .probe     = libra_sdio_probe,
+    .remove    = libra_sdio_remove,
+    .drv.pm    = &libra_sdio_pm_ops,
+};
+
+static int __init libra_sdioif_init(void)
+{
+	libra_sdio_func = NULL;
+	libra_mmc_host = NULL;
+	libra_mmc_host_index = -1;
+	libra_suspend_hldr = NULL;
+	libra_resume_hldr = NULL;
+
+	sdio_register_driver(&libra_sdiofn_driver);
+
+	printk(KERN_INFO "%s: Loaded Successfully\n", __func__);
+
+	return 0;
+}
+
+static void __exit libra_sdioif_exit(void)
+{
+	unsigned int attempts = 0;
+
+	if (!libra_detect_card_change()) {
+		do {
+			++attempts;
+			msleep(500);
+		} while (libra_sdio_func != NULL && attempts < 3);
+	}
+
+	if (libra_sdio_func != NULL)
+		printk(KERN_ERR "%s: Card removal not detected\n", __func__);
+
+	sdio_unregister_driver(&libra_sdiofn_driver);
+
+	libra_sdio_func = NULL;
+	libra_mmc_host = NULL;
+	libra_mmc_host_index = -1;
+
+	printk(KERN_INFO "%s: Unloaded Successfully\n", __func__);
+}
+
+module_init(libra_sdioif_init);
+module_exit(libra_sdioif_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("WLAN SDIODriver");
diff --git a/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c b/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c
new file mode 100644
index 0000000..ca2680f
--- /dev/null
+++ b/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c
@@ -0,0 +1,172 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <mach/vreg.h>
+#include <linux/gpio.h>
+#include <mach/rpc_pmapp.h>
+#include <linux/err.h>
+#include <linux/qcomwlan7x27a_pwrif.h>
+
+#define WLAN_GPIO_EXT_POR_N     134
+
+static const char *id = "WLAN";
+
+enum {
+	WLAN_VREG_L17 = 0,
+	WLAN_VREG_S3,
+	WLAN_VREG_TCXO_L11,
+	WLAN_VREG_L19,
+	WLAN_VREG_L5,
+	WLAN_VREG_L6
+};
+
+struct wlan_vreg_info {
+	const char *vreg_id;
+	unsigned int vreg_level;
+	unsigned int pmapp_id;
+	unsigned int is_vreg_pin_controlled;
+	struct vreg *vreg;
+};
+
+
+static struct wlan_vreg_info vreg_info[] = {
+	{"bt", 3050, 56, 0, NULL},
+	{"msme1", 1800, 2, 0, NULL},
+	{"wlan_tcx0", 1800, 53, 0, NULL},
+	{"wlan4", 1200, 57, 0, NULL},
+	{"wlan2", 1350, 45, 0, NULL},
+	{"wlan3", 1200, 51, 0, NULL} };
+
+int chip_power_qrf6285(bool on)
+{
+	int rc = 0, index = 0;
+
+	if (on) {
+		rc = gpio_request(WLAN_GPIO_EXT_POR_N, "WLAN_DEEP_SLEEP_N");
+
+		if (rc) {
+			pr_err("WLAN reset GPIO %d request failed %d\n",
+			WLAN_GPIO_EXT_POR_N, rc);
+			goto fail;
+		}
+		rc = gpio_direction_output(WLAN_GPIO_EXT_POR_N, 1);
+		if (rc < 0) {
+			pr_err("WLAN reset GPIO %d set direction failed %d\n",
+			WLAN_GPIO_EXT_POR_N, rc);
+			goto fail_gpio_dir_out;
+		}
+	} else {
+		gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0);
+		gpio_free(WLAN_GPIO_EXT_POR_N);
+	}
+
+
+	for (index = 0; index < ARRAY_SIZE(vreg_info); index++) {
+		vreg_info[index].vreg = vreg_get(NULL,
+						vreg_info[index].vreg_id);
+		if (IS_ERR(vreg_info[index].vreg)) {
+			pr_err("%s:%s vreg get failed %ld\n",
+				__func__, vreg_info[index].vreg_id,
+				PTR_ERR(vreg_info[index].vreg));
+			rc = PTR_ERR(vreg_info[index].vreg);
+			if (on)
+				goto vreg_fail;
+			else
+				continue;
+		}
+		if (on) {
+			rc = vreg_set_level(vreg_info[index].vreg,
+					 vreg_info[index].vreg_level);
+			if (rc) {
+				pr_err("%s:%s vreg set level failed %d\n",
+					__func__, vreg_info[index].vreg_id, rc);
+				goto vreg_fail;
+			}
+			if (vreg_info[index].is_vreg_pin_controlled) {
+				rc = pmapp_vreg_pincntrl_vote(id,
+					 vreg_info[index].pmapp_id,
+					 PMAPP_CLOCK_ID_A0, 1);
+				if (rc) {
+					pr_err("%s:%s pmapp_vreg_pincntrl_vote"
+						" for enable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+					goto vreg_fail;
+				}
+			} else {
+				rc = vreg_enable(vreg_info[index].vreg);
+				if (rc) {
+					pr_err("%s:%s vreg enable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+					goto vreg_fail;
+				}
+			}
+
+			if (WLAN_VREG_TCXO_L11 == index) {
+				/*
+				 * Configure TCXO to be slave to
+				 * WLAN_CLK_PWR_REQ
+`				 */
+				rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0,
+						PMAPP_CLOCK_VOTE_PIN_CTRL);
+				if (rc) {
+					pr_err("%s: Configuring TCXO to Pin"
+					" controllable failed %d\n",
+							 __func__, rc);
+					goto vreg_clock_vote_fail;
+				}
+			}
+
+		} else {
+
+			if (vreg_info[index].is_vreg_pin_controlled) {
+				rc = pmapp_vreg_pincntrl_vote(id,
+						 vreg_info[index].pmapp_id,
+						 PMAPP_CLOCK_ID_A0, 0);
+				if (rc) {
+					pr_err("%s:%s pmapp_vreg_pincntrl_vote"
+						" for disable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+				}
+			} else {
+				rc = vreg_disable(vreg_info[index].vreg);
+				if (rc) {
+					pr_err("%s:%s vreg disable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+				}
+			}
+		}
+	}
+	return 0;
+vreg_fail:
+	index--;
+vreg_clock_vote_fail:
+	while (index > 0) {
+		rc = vreg_disable(vreg_info[index].vreg);
+		if (rc) {
+			pr_err("%s:%s vreg disable failed %d\n",
+				__func__, vreg_info[index].vreg_id, rc);
+		}
+		index--;
+	}
+	if (!on)
+		goto fail;
+fail_gpio_dir_out:
+	gpio_free(WLAN_GPIO_EXT_POR_N);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(chip_power_qrf6285);
diff --git a/drivers/net/wireless/libra/qcomwlan_pwrif.c b/drivers/net/wireless/libra/qcomwlan_pwrif.c
new file mode 100644
index 0000000..bb5e135
--- /dev/null
+++ b/drivers/net/wireless/libra/qcomwlan_pwrif.c
@@ -0,0 +1,256 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qcomwlan_pwrif.h>
+
+#define GPIO_WLAN_DEEP_SLEEP_N  230
+#define WLAN_RESET_OUT          1
+#define WLAN_RESET              0
+
+static const char *id = "WLAN";
+
+/**
+ * vos_chip_power_qrf8615() - WLAN Power Up Seq for WCN1314 rev 2.0 on QRF 8615
+ * @on - Turn WLAN ON/OFF (1 or 0)
+ *
+ * Power up/down WLAN by turning on/off various regs and asserting/deasserting
+ * Power-on-reset pin. Also, put XO A0 buffer as slave to wlan_clk_pwr_req while
+ * turning ON WLAN and vice-versa.
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+int vos_chip_power_qrf8615(int on)
+{
+	static char wlan_on;
+	static const char *vregs_qwlan_name[] = {
+		"8058_l20",
+		"8058_l8",
+		"8901_s4",
+		"8901_lvs1",
+		"8901_l0",
+		"8058_s2",
+		"8058_s1",
+	};
+	static const int vregs_qwlan_val_min[] = {
+		1800000,
+		3050000,
+		1225000,
+		0,
+		1200000,
+		1300000,
+		500000,
+	};
+	static const int vregs_qwlan_val_max[] = {
+		1800000,
+		3050000,
+		1225000,
+		0,
+		1200000,
+		1300000,
+		1250000,
+	};
+	static const bool vregs_is_pin_controlled[] = {
+		1,
+		1,
+		0,
+		0,
+		1,
+		1,
+		0,
+	};
+	static struct regulator *vregs_qwlan[ARRAY_SIZE(vregs_qwlan_name)];
+	static struct msm_xo_voter *wlan_clock;
+	int ret, i, rc = 0;
+
+	/* WLAN RESET and CLK settings */
+	if (on && !wlan_on) {
+		/*
+		 * Program U12 GPIO expander pin IO1 to de-assert (drive 0)
+		 * WLAN_EXT_POR_N to put WLAN in reset
+		 */
+		rc = gpio_request(GPIO_WLAN_DEEP_SLEEP_N, "WLAN_DEEP_SLEEP_N");
+		if (rc) {
+			pr_err("WLAN reset GPIO %d request failed\n",
+					GPIO_WLAN_DEEP_SLEEP_N);
+			goto fail;
+		}
+		rc = gpio_direction_output(GPIO_WLAN_DEEP_SLEEP_N,
+				WLAN_RESET_OUT);
+		if (rc < 0) {
+			pr_err("WLAN reset GPIO %d set output direction failed",
+					GPIO_WLAN_DEEP_SLEEP_N);
+			goto fail_gpio_dir_out;
+		}
+
+		/* Configure TCXO to be slave to WLAN_CLK_PWR_REQ */
+		if (wlan_clock == NULL) {
+			wlan_clock = msm_xo_get(MSM_XO_TCXO_A0, id);
+			if (IS_ERR(wlan_clock)) {
+				pr_err("Failed to get TCXO_A0 voter (%ld)\n",
+						PTR_ERR(wlan_clock));
+				goto fail_gpio_dir_out;
+			}
+		}
+
+		rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_PIN_CTRL);
+		if (rc < 0) {
+			pr_err("Configuring TCXO to Pin controllable failed"
+					"(%d)\n", rc);
+			goto fail_xo_mode_vote;
+		}
+	} else if (!on && wlan_on) {
+		if (wlan_clock != NULL)
+			msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
+		gpio_set_value_cansleep(GPIO_WLAN_DEEP_SLEEP_N, WLAN_RESET);
+		gpio_free(GPIO_WLAN_DEEP_SLEEP_N);
+	}
+
+	/* WLAN VREG settings */
+	for (i = 0; i < ARRAY_SIZE(vregs_qwlan_name); i++) {
+		if (vregs_qwlan[i] == NULL) {
+			vregs_qwlan[i] = regulator_get(NULL,
+					vregs_qwlan_name[i]);
+			if (IS_ERR(vregs_qwlan[i])) {
+				pr_err("regulator get of %s failed (%ld)\n",
+						vregs_qwlan_name[i],
+						PTR_ERR(vregs_qwlan[i]));
+				rc = PTR_ERR(vregs_qwlan[i]);
+				goto vreg_get_fail;
+			}
+			if (vregs_qwlan_val_min[i] || vregs_qwlan_val_max[i]) {
+				rc = regulator_set_voltage(vregs_qwlan[i],
+						vregs_qwlan_val_min[i],
+						vregs_qwlan_val_max[i]);
+				if (rc) {
+					pr_err("regulator_set_voltage(%s) failed\n",
+							vregs_qwlan_name[i]);
+					goto vreg_fail;
+				}
+			}
+			/* vote for pin control (if needed) */
+			if (vregs_is_pin_controlled[i]) {
+				rc = regulator_set_mode(vregs_qwlan[i],
+						REGULATOR_MODE_IDLE);
+				if (rc) {
+					pr_err("regulator_set_mode(%s) failed\n",
+							vregs_qwlan_name[i]);
+					goto vreg_fail;
+				}
+			}
+		}
+		if (on && !wlan_on) {
+			rc = regulator_enable(vregs_qwlan[i]);
+			if (rc < 0) {
+				pr_err("vreg %s enable failed (%d)\n",
+						vregs_qwlan_name[i], rc);
+				goto vreg_fail;
+			}
+		} else if (!on && wlan_on) {
+			rc = regulator_disable(vregs_qwlan[i]);
+			if (rc < 0) {
+				pr_err("vreg %s disable failed (%d)\n",
+						vregs_qwlan_name[i], rc);
+				goto vreg_fail;
+			}
+		}
+	}
+	if (on)
+		wlan_on = true;
+	else
+		wlan_on = false;
+	return 0;
+
+vreg_fail:
+	regulator_put(vregs_qwlan[i]);
+vreg_get_fail:
+	i--;
+	while (i) {
+		ret = !on ? regulator_enable(vregs_qwlan[i]) :
+			regulator_disable(vregs_qwlan[i]);
+		if (ret < 0) {
+			pr_err("vreg %s %s failed (%d) in err path\n",
+					vregs_qwlan_name[i],
+					!on ? "enable" : "disable", ret);
+		}
+		regulator_put(vregs_qwlan[i]);
+		i--;
+	}
+	if (!on)
+		goto fail;
+fail_xo_mode_vote:
+	msm_xo_put(wlan_clock);
+fail_gpio_dir_out:
+	gpio_free(GPIO_WLAN_DEEP_SLEEP_N);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(vos_chip_power_qrf8615);
+
+/**
+ * qcomwlan_pmic_xo_core_force_enable() - Force XO Core of PMIC to be ALWAYS ON
+ * @on - Force XO Core  ON/OFF (1 or 0)
+ *
+ * The XO_CORE controls the XO feeding the TCXO buffers (A0, A1, etc.). WLAN
+ * wants to keep the XO core on even though our buffer A0 is in pin control
+ * because it can take a long time turn the XO back on and warm up the buffers.
+ * This helps in optimizing power in BMPS (power save) mode of WLAN.
+ * The WLAN driver wrapper function takes care that this API is not called
+ * consecutively.
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+int qcomwlan_pmic_xo_core_force_enable(int on)
+{
+	static struct msm_xo_voter *wlan_ps;
+	int rc = 0;
+
+	if (wlan_ps == NULL) {
+		wlan_ps = msm_xo_get(MSM_XO_CORE, id);
+		if (IS_ERR(wlan_ps)) {
+			pr_err("Failed to get XO CORE voter (%ld)\n",
+					PTR_ERR(wlan_ps));
+			goto fail;
+		}
+	}
+
+	if (on)
+		rc = msm_xo_mode_vote(wlan_ps, MSM_XO_MODE_ON);
+	else
+		rc = msm_xo_mode_vote(wlan_ps, MSM_XO_MODE_OFF);
+
+	if (rc < 0) {
+		pr_err("XO Core %s failed (%d)\n",
+			on ? "enable" : "disable", rc);
+		goto fail_xo_mode_vote;
+	}
+	return 0;
+fail_xo_mode_vote:
+	msm_xo_put(wlan_ps);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(qcomwlan_pmic_xo_core_force_enable);
+
+
+/**
+ * qcomwlan_freq_change_1p3v_supply() - function to change the freq for 1.3V RF supply.
+ * @freq - freq of the 1.3V Supply
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+
+int qcomwlan_freq_change_1p3v_supply(enum rpm_vreg_freq freq)
+{
+	return rpm_vreg_set_frequency(RPM_VREG_ID_PM8058_S2, freq);
+}
+EXPORT_SYMBOL(qcomwlan_freq_change_1p3v_supply);
diff --git a/drivers/net/wireless/wcnss/Makefile b/drivers/net/wireless/wcnss/Makefile
new file mode 100644
index 0000000..d182b6e
--- /dev/null
+++ b/drivers/net/wireless/wcnss/Makefile
@@ -0,0 +1,6 @@
+
+# Makefile for WCNSS WLAN driver
+
+wcnsswlan-objs += wcnss_wlan.o wcnss_riva.o qcomwlan_secif.o
+
+obj-$(CONFIG_WCNSS_WLAN) += wcnsswlan.o
diff --git a/drivers/net/wireless/wcnss/qcomwlan_secif.c b/drivers/net/wireless/wcnss/qcomwlan_secif.c
new file mode 100644
index 0000000..124f387
--- /dev/null
+++ b/drivers/net/wireless/wcnss/qcomwlan_secif.c
@@ -0,0 +1,62 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/qcomwlan_secif.h>
+
+/*
+ * APIs for calling crypto routines from kernel
+ */
+struct crypto_ahash *wcnss_wlan_crypto_alloc_ahash(const char *alg_name,
+							 u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(alg_name, type, mask);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_alloc_ahash);
+
+int wcnss_wlan_crypto_ahash_digest(struct ahash_request *req)
+{
+	return crypto_ahash_digest(req);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_ahash_digest);
+
+void wcnss_wlan_crypto_free_ahash(struct crypto_ahash *tfm)
+{
+	crypto_free_ahash(tfm);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_free_ahash);
+
+int wcnss_wlan_crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	return crypto_ahash_setkey(tfm, key, keylen);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_ahash_setkey);
+
+struct crypto_ablkcipher *
+wcnss_wlan_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(alg_name, type, mask);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_alloc_ablkcipher);
+
+void wcnss_wlan_ablkcipher_request_free(struct ablkcipher_request *req)
+{
+	ablkcipher_request_free(req);
+}
+EXPORT_SYMBOL(wcnss_wlan_ablkcipher_request_free);
+
+void wcnss_wlan_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
+{
+	crypto_free_ablkcipher(tfm);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_free_ablkcipher);
+
diff --git a/drivers/net/wireless/wcnss/wcnss_riva.c b/drivers/net/wireless/wcnss/wcnss_riva.c
new file mode 100644
index 0000000..3617ba8
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_riva.c
@@ -0,0 +1,314 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/mfd/pm8xxx/gpio.h>
+#include <mach/msm_xo.h>
+#include <mach/msm_iomap.h>
+
+#include "wcnss_riva.h"
+
+static void __iomem *msm_riva_base;
+static struct msm_xo_voter *wlan_clock;
+static const char *id = "WLAN";
+
+#define MSM_RIVA_PHYS                     0x03204000
+#define RIVA_PMU_CFG                      (msm_riva_base + 0x28)
+#define RIVA_PMU_CFG_IRIS_XO_CFG          BIT(3)
+#define RIVA_PMU_CFG_IRIS_XO_EN           BIT(4)
+#define RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP   BIT(5)
+#define RIVA_PMU_CFG_IRIS_XO_CFG_STS      BIT(6) /* 1: in progress, 0: done */
+
+#define RIVA_PMU_CFG_IRIS_XO_MODE         0x6
+#define RIVA_PMU_CFG_IRIS_XO_MODE_48      (3 << 1)
+
+#define VREG_NULL_CONFIG            0x0000
+#define VREG_GET_REGULATOR_MASK     0x0001
+#define VREG_SET_VOLTAGE_MASK       0x0002
+#define VREG_OPTIMUM_MODE_MASK      0x0004
+#define VREG_ENABLE_MASK            0x0008
+
+struct vregs_info {
+	const char * const name;
+	int state;
+	const int nominal_min;
+	const int low_power_min;
+	const int max_voltage;
+	const int uA_load;
+	struct regulator *regulator;
+};
+
+static struct vregs_info iris_vregs[] = {
+	{"iris_vddio",  VREG_NULL_CONFIG, 0000000, 0, 0000000, 0,      NULL},
+	{"iris_vddxo",  VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000,  NULL},
+	{"iris_vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL},
+	{"iris_vddpa",  VREG_NULL_CONFIG, 2900000, 0, 2900000, 515000, NULL},
+	{"iris_vdddig", VREG_NULL_CONFIG, 0000000, 0, 0000000, 0,      NULL},
+};
+
+static struct vregs_info riva_vregs[] = {
+	{"riva_vddmx",  VREG_NULL_CONFIG, 1050000, 0, 1150000, 0,      NULL},
+	{"riva_vddcx",  VREG_NULL_CONFIG, 1050000, 0, 1150000, 0,      NULL},
+	{"riva_vddpx",  VREG_NULL_CONFIG, 1800000, 0, 1800000, 0,      NULL},
+};
+
+static int configure_iris_xo(bool use_48mhz_xo, int on)
+{
+	u32 reg = 0;
+	int rc = 0;
+
+	if (on) {
+		msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
+		if (!msm_riva_base) {
+			pr_err("ioremap MSM_RIVA_PHYS failed\n");
+			goto fail;
+		}
+
+		/* Enable IRIS XO */
+		writel_relaxed(0, RIVA_PMU_CFG);
+		reg = readl_relaxed(RIVA_PMU_CFG);
+		reg |= RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				RIVA_PMU_CFG_IRIS_XO_EN;
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */
+		reg &= ~(RIVA_PMU_CFG_IRIS_XO_MODE);
+
+		if (use_48mhz_xo)
+			reg |= RIVA_PMU_CFG_IRIS_XO_MODE_48;
+
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Start IRIS XO configuration */
+		reg |= RIVA_PMU_CFG_IRIS_XO_CFG;
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Wait for XO configuration to finish */
+		while (readl_relaxed(RIVA_PMU_CFG) &
+						RIVA_PMU_CFG_IRIS_XO_CFG_STS)
+			cpu_relax();
+
+		/* Stop IRIS XO configuration */
+		reg &= ~(RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				RIVA_PMU_CFG_IRIS_XO_CFG);
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		if (!use_48mhz_xo) {
+			wlan_clock = msm_xo_get(MSM_XO_TCXO_A0, id);
+			if (IS_ERR(wlan_clock)) {
+				rc = PTR_ERR(wlan_clock);
+				pr_err("Failed to get MSM_XO_TCXO_A0 voter"
+							" (%d)\n", rc);
+				goto fail;
+			}
+
+			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_ON);
+			if (rc < 0) {
+				pr_err("Configuring MSM_XO_MODE_ON failed"
+							" (%d)\n", rc);
+				goto msm_xo_vote_fail;
+			}
+		}
+	}  else {
+		if (wlan_clock != NULL && !use_48mhz_xo) {
+			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
+			if (rc < 0)
+				pr_err("Configuring MSM_XO_MODE_OFF failed"
+							" (%d)\n", rc);
+		}
+	}
+
+	/* Add some delay for XO to settle */
+	msleep(20);
+
+	return rc;
+
+msm_xo_vote_fail:
+	msm_xo_put(wlan_clock);
+
+fail:
+	return rc;
+}
+
+/* Helper routine to turn off all WCNSS vregs e.g. IRIS, Riva */
+static void wcnss_vregs_off(struct vregs_info regulators[], uint size)
+{
+	int i, rc = 0;
+
+	/* Regulators need to be turned off in the reverse order */
+	for (i = (size-1); i >= 0; i--) {
+		if (regulators[i].state == VREG_NULL_CONFIG)
+			continue;
+
+		/* Remove PWM mode */
+		if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
+			rc = regulator_set_optimum_mode(
+					regulators[i].regulator, 0);
+			if (rc < 0)
+				pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Set voltage to lowest level */
+		if (regulators[i].state & VREG_SET_VOLTAGE_MASK) {
+			rc = regulator_set_voltage(regulators[i].regulator,
+					regulators[i].low_power_min,
+					regulators[i].max_voltage);
+			if (rc)
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Disable regulator */
+		if (regulators[i].state & VREG_ENABLE_MASK) {
+			rc = regulator_disable(regulators[i].regulator);
+			if (rc < 0)
+				pr_err("vreg %s disable failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Free the regulator source */
+		if (regulators[i].state & VREG_GET_REGULATOR_MASK)
+			regulator_put(regulators[i].regulator);
+
+		regulators[i].state = VREG_NULL_CONFIG;
+	}
+}
+
+/* Common helper routine to turn on all WCNSS vregs e.g. IRIS, Riva */
+static int wcnss_vregs_on(struct device *dev,
+		struct vregs_info regulators[], uint size)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < size; i++) {
+			/* Get regulator source */
+		regulators[i].regulator =
+			regulator_get(dev, regulators[i].name);
+		if (IS_ERR(regulators[i].regulator)) {
+			rc = PTR_ERR(regulators[i].regulator);
+				pr_err("regulator get of %s failed (%d)\n",
+					regulators[i].name, rc);
+				goto fail;
+		}
+		regulators[i].state |= VREG_GET_REGULATOR_MASK;
+
+		/* Set voltage to nominal. Exclude swtiches e.g. LVS */
+		if (regulators[i].nominal_min || regulators[i].max_voltage) {
+			rc = regulator_set_voltage(regulators[i].regulator,
+					regulators[i].nominal_min,
+					regulators[i].max_voltage);
+			if (rc) {
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+						regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_SET_VOLTAGE_MASK;
+		}
+
+		/* Vote for PWM/PFM mode if needed */
+		if (regulators[i].uA_load) {
+			rc = regulator_set_optimum_mode(regulators[i].regulator,
+					regulators[i].uA_load);
+			if (rc < 0) {
+				pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
+						regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
+		}
+
+		/* Enable the regulator */
+		rc = regulator_enable(regulators[i].regulator);
+		if (rc) {
+			pr_err("vreg %s enable failed (%d)\n",
+				regulators[i].name, rc);
+			goto fail;
+		}
+		regulators[i].state |= VREG_ENABLE_MASK;
+	}
+
+	return rc;
+
+fail:
+	wcnss_vregs_off(regulators, size);
+	return rc;
+
+}
+
+static void wcnss_iris_vregs_off(void)
+{
+	wcnss_vregs_off(iris_vregs, ARRAY_SIZE(iris_vregs));
+}
+
+static int wcnss_iris_vregs_on(struct device *dev)
+{
+	return wcnss_vregs_on(dev, iris_vregs, ARRAY_SIZE(iris_vregs));
+}
+
+static void wcnss_riva_vregs_off(void)
+{
+	wcnss_vregs_off(riva_vregs, ARRAY_SIZE(riva_vregs));
+}
+
+static int wcnss_riva_vregs_on(struct device *dev)
+{
+	return wcnss_vregs_on(dev, riva_vregs, ARRAY_SIZE(riva_vregs));
+}
+
+int wcnss_wlan_power(struct device *dev,
+		struct wcnss_wlan_config *cfg,
+		enum wcnss_opcode on)
+{
+	int rc = 0;
+
+	if (on) {
+		/* RIVA regulator settings */
+		rc = wcnss_riva_vregs_on(dev);
+		if (rc)
+			goto fail_riva_on;
+
+		/* IRIS regulator settings */
+		rc = wcnss_iris_vregs_on(dev);
+		if (rc)
+			goto fail_iris_on;
+
+		/* Configure IRIS XO */
+		rc = configure_iris_xo(cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_ON);
+		if (rc)
+			goto fail_iris_xo;
+
+	} else {
+		configure_iris_xo(cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_OFF);
+		wcnss_iris_vregs_off();
+		wcnss_riva_vregs_off();
+	}
+
+	return rc;
+
+fail_iris_xo:
+	wcnss_iris_vregs_off();
+
+fail_iris_on:
+	wcnss_riva_vregs_off();
+
+fail_riva_on:
+	return rc;
+}
+EXPORT_SYMBOL(wcnss_wlan_power);
+
diff --git a/drivers/net/wireless/wcnss/wcnss_riva.h b/drivers/net/wireless/wcnss/wcnss_riva.h
new file mode 100644
index 0000000..e037f58
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_riva.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCNSS_RIVA_H_
+#define _WCNSS_RIVA_H_
+
+#include <linux/device.h>
+
+enum wcnss_opcode {
+	WCNSS_WLAN_SWITCH_OFF = 0,
+	WCNSS_WLAN_SWITCH_ON,
+};
+
+struct wcnss_wlan_config {
+	int		use_48mhz_xo;
+};
+
+int wcnss_wlan_power(struct device *dev,
+		struct wcnss_wlan_config *cfg,
+		enum wcnss_opcode opcode);
+
+#endif /* _WCNSS_RIVA_H_ */
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
new file mode 100644
index 0000000..371e58e
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -0,0 +1,339 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/parser.h>
+#include <linux/wcnss_wlan.h>
+#include <mach/peripheral-loader.h>
+#include "wcnss_riva.h"
+
+#define DEVICE "wcnss_wlan"
+#define VERSION "1.01"
+#define WCNSS_PIL_DEVICE "wcnss"
+#define WCNSS_NV_NAME "wlan/prima/WCNSS_qcom_cfg.ini"
+
+/* By default assume 48MHz XO is populated */
+#define CONFIG_USE_48MHZ_XO_DEFAULT 1
+
+static struct {
+	struct platform_device *pdev;
+	void		*pil;
+	struct resource	*mmio_res;
+	struct resource	*tx_irq_res;
+	struct resource	*rx_irq_res;
+	const struct dev_pm_ops *pm_ops;
+	int             smd_channel_ready;
+	struct wcnss_wlan_config wlan_config;
+} *penv = NULL;
+
+enum {
+	nv_none = -1,
+	nv_use_48mhz_xo,
+	nv_end,
+};
+
+static const match_table_t nv_tokens = {
+	{nv_use_48mhz_xo, "gUse48MHzXO=%d"},
+	{nv_end, "END"},
+	{nv_none, NULL}
+};
+
+static void wcnss_init_config(void)
+{
+	penv->wlan_config.use_48mhz_xo = CONFIG_USE_48MHZ_XO_DEFAULT;
+}
+
+static void wcnss_parse_nv(char *nvp)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *cur;
+	char *tok;
+	int token;
+	int intval;
+
+	cur = nvp;
+	while (cur != NULL) {
+		if ('#' == *cur) {
+			/* comment, consume remainder of line */
+			tok = strsep(&cur, "\r\n");
+			continue;
+		}
+
+		tok = strsep(&cur, " \t\r\n,");
+		if (!*tok)
+			continue;
+
+		token = match_token(tok, nv_tokens, args);
+		switch (token) {
+		case nv_use_48mhz_xo:
+			if (match_int(&args[0], &intval)) {
+				dev_err(&penv->pdev->dev,
+					"Invalid value for gUse48MHzXO: %s\n",
+					args[0].from);
+				continue;
+			}
+			if ((0 > intval) || (1 < intval)) {
+				dev_err(&penv->pdev->dev,
+					"Invalid value for gUse48MHzXO: %d\n",
+					intval);
+				continue;
+			}
+			penv->wlan_config.use_48mhz_xo = intval;
+			dev_info(&penv->pdev->dev,
+					"gUse48MHzXO set to %d\n", intval);
+			break;
+		case nv_end:
+			/* end of options so we are done */
+			return;
+		default:
+			/* silently ignore unknown settings */
+			break;
+		}
+	}
+}
+
+static int __devinit
+wcnss_wlan_ctrl_probe(struct platform_device *pdev)
+{
+	if (penv)
+		penv->smd_channel_ready = 1;
+
+	pr_info("%s: SMD ctrl channel up\n", __func__);
+
+	return 0;
+}
+
+static int __devexit
+wcnss_wlan_ctrl_remove(struct platform_device *pdev)
+{
+	if (penv)
+		penv->smd_channel_ready = 0;
+
+	pr_info("%s: SMD ctrl channel down\n", __func__);
+
+	return 0;
+}
+
+
+static struct platform_driver wcnss_wlan_ctrl_driver = {
+	.driver = {
+		.name	= "WLAN_CTRL",
+		.owner	= THIS_MODULE,
+	},
+	.probe	= wcnss_wlan_ctrl_probe,
+	.remove	= __devexit_p(wcnss_wlan_ctrl_remove),
+};
+
+struct device *wcnss_wlan_get_device(void)
+{
+	if (penv && penv->pdev && penv->smd_channel_ready)
+		return &penv->pdev->dev;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_device);
+
+struct resource *wcnss_wlan_get_memory_map(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready)
+		return penv->mmio_res;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_memory_map);
+
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+				penv->tx_irq_res && penv->smd_channel_ready)
+		return penv->tx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_tx_irq);
+
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+				penv->rx_irq_res && penv->smd_channel_ready)
+		return penv->rx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_rx_irq);
+
+void wcnss_wlan_register_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && pm_ops)
+		penv->pm_ops = pm_ops;
+}
+EXPORT_SYMBOL(wcnss_wlan_register_pm_ops);
+
+static int wcnss_wlan_suspend(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->suspend)
+		return penv->pm_ops->suspend(dev);
+	return 0;
+}
+
+static int wcnss_wlan_resume(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->resume)
+		return penv->pm_ops->resume(dev);
+	return 0;
+}
+
+static int __devinit
+wcnss_wlan_probe(struct platform_device *pdev)
+{
+	const struct firmware *nv;
+	char *nvp;
+	int ret;
+
+	/* verify we haven't been called more than once */
+	if (penv) {
+		dev_err(&pdev->dev, "cannot handle multiple devices.\n");
+		return -ENODEV;
+	}
+
+	/* create an environment to track the device */
+	penv = kzalloc(sizeof(*penv), GFP_KERNEL);
+	if (!penv) {
+		dev_err(&pdev->dev, "cannot allocate device memory.\n");
+		return -ENOMEM;
+	}
+	penv->pdev = pdev;
+
+	/* initialize the WCNSS default configuration */
+	wcnss_init_config();
+
+	/* update the WCNSS configuration from NV if present */
+	ret = request_firmware(&nv, WCNSS_NV_NAME, &pdev->dev);
+	if (!ret) {
+		/* firmware is read-only so make a NUL-terminated copy */
+		nvp = kmalloc(nv->size+1, GFP_KERNEL);
+		if (nvp) {
+			memcpy(nvp, nv->data, nv->size);
+			nvp[nv->size] = '\0';
+			wcnss_parse_nv(nvp);
+			kfree(nvp);
+		} else {
+			dev_err(&pdev->dev, "cannot parse NV.\n");
+		}
+		release_firmware(nv);
+	} else {
+		dev_err(&pdev->dev, "cannot read NV.\n");
+	}
+
+	/* power up the WCNSS */
+	ret = wcnss_wlan_power(&pdev->dev, &penv->wlan_config,
+					WCNSS_WLAN_SWITCH_ON);
+	if (ret) {
+		dev_err(&pdev->dev, "WCNSS Power-up failed.\n");
+		goto fail_power;
+	}
+
+	/* trigger initialization of the WCNSS */
+	penv->pil = pil_get(WCNSS_PIL_DEVICE);
+	if (IS_ERR(penv->pil)) {
+		dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+		ret = PTR_ERR(penv->pil);
+		penv->pil = NULL;
+		goto fail_pil;
+	}
+
+	/* allocate resources */
+	penv->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"wcnss_mmio");
+	penv->tx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlantx_irq");
+	penv->rx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlanrx_irq");
+
+	if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) {
+		dev_err(&pdev->dev, "insufficient resources\n");
+		ret = -ENOENT;
+		goto fail_res;
+	}
+
+	return 0;
+
+fail_res:
+	if (penv->pil)
+		pil_put(penv->pil);
+fail_pil:
+	wcnss_wlan_power(&pdev->dev, &penv->wlan_config,
+				WCNSS_WLAN_SWITCH_OFF);
+fail_power:
+	kfree(penv);
+	penv = NULL;
+	return ret;
+}
+
+static int __devexit
+wcnss_wlan_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+
+static const struct dev_pm_ops wcnss_wlan_pm_ops = {
+	.suspend	= wcnss_wlan_suspend,
+	.resume		= wcnss_wlan_resume,
+};
+
+static struct platform_driver wcnss_wlan_driver = {
+	.driver = {
+		.name	= DEVICE,
+		.owner	= THIS_MODULE,
+		.pm	= &wcnss_wlan_pm_ops,
+	},
+	.probe	= wcnss_wlan_probe,
+	.remove	= __devexit_p(wcnss_wlan_remove),
+};
+
+static int __init wcnss_wlan_init(void)
+{
+	platform_driver_register(&wcnss_wlan_driver);
+	platform_driver_register(&wcnss_wlan_ctrl_driver);
+
+	return 0;
+}
+
+static void __exit wcnss_wlan_exit(void)
+{
+	if (penv) {
+		if (penv->pil)
+			pil_put(penv->pil);
+
+		wcnss_wlan_power(&penv->pdev->dev, &penv->wlan_config,
+					WCNSS_WLAN_SWITCH_OFF);
+
+		kfree(penv);
+		penv = NULL;
+	}
+
+	platform_driver_unregister(&wcnss_wlan_ctrl_driver);
+	platform_driver_unregister(&wcnss_wlan_driver);
+}
+
+module_init(wcnss_wlan_init);
+module_exit(wcnss_wlan_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION(DEVICE "Driver");
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 8390dca..17b5df5 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,3 +1,6 @@
 if X86
 source "drivers/platform/x86/Kconfig"
 endif
+if ARCH_MSM
+source "drivers/platform/msm/Kconfig"
+endif
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 782953a..58c62bd 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_X86)		+= x86/
+obj-$(CONFIG_ARCH_MSM)		+= msm/
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
new file mode 100644
index 0000000..af10b1b
--- /dev/null
+++ b/drivers/platform/msm/Kconfig
@@ -0,0 +1,36 @@
+menu "Qualcomm MSM specific device drivers"
+	depends on ARCH_MSM
+
+config MSM_SSBI
+	bool "Qualcomm Single-wire Serial Bus Interface (SSBI)"
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in SSBI interface on Qualcomm MSM family processors.
+
+	  This is required for communicating with Qualcomm PMICs and
+	  other devices that have the SSBI interface.
+
+config SPS
+	bool "SPS support"
+	depends on (HAS_IOMEM && (ARCH_MSM8960 || ARCH_MSM8X60))
+	select GENERIC_ALLOCATOR
+	default n
+	help
+	  The SPS (Smart Peripheral Switch) is a DMA engine.
+	  It can move data in the following modes:
+		1. Peripheral-to-Peripheral.
+		2. Peripheral-to-Memory.
+		3. Memory-to-Memory.
+
+config SPS_SUPPORT_BAMDMA
+	bool "SPS support BAM DMA"
+	depends on SPS
+	default n
+	help
+	The BAM-DMA is used for Memory-to-Memory transfers.
+	The main use cases is RPC between processors.
+	The BAM-DMA hardware has 2 registers sets:
+	1. A BAM HW like all the peripherals.
+	2. A DMA channel configuration (i.e. channel priority).
+
+endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
new file mode 100644
index 0000000..f6f212e
--- /dev/null
+++ b/drivers/platform/msm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the MSM specific device drivers.
+#
+obj-$(CONFIG_MSM_SSBI) += ssbi.o
+obj-$(CONFIG_SPS) += sps/
diff --git a/drivers/platform/msm/sps/Makefile b/drivers/platform/msm/sps/Makefile
new file mode 100755
index 0000000..f19e162
--- /dev/null
+++ b/drivers/platform/msm/sps/Makefile
@@ -0,0 +1,2 @@
+obj-y += bam.o sps_bam.o sps.o sps_dma.o sps_map.o sps_mem.o sps_rm.o
+
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
new file mode 100644
index 0000000..f816236
--- /dev/null
+++ b/drivers/platform/msm/sps/bam.c
@@ -0,0 +1,755 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+#include <linux/errno.h>	/* ENODEV */
+
+#include "bam.h"
+#include "sps_bam.h"
+
+/**
+ *  Valid BAM Hardware version.
+ *
+ */
+#define BAM_MIN_VERSION 2
+#define BAM_MAX_VERSION 0x1f
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 4
+
+/**
+ *  BAM Hardware registers.
+ *
+ */
+#define CTRL                        (0xf80)
+#define REVISION                    (0xf84)
+#define NUM_PIPES                   (0xfbc)
+#define DESC_CNT_TRSHLD             (0xf88)
+#define IRQ_SRCS                    (0xf8c)
+#define IRQ_SRCS_MSK                (0xf90)
+#define IRQ_SRCS_UNMASKED           (0xfb0)
+#define IRQ_STTS                    (0xf94)
+#define IRQ_CLR                     (0xf98)
+#define IRQ_EN                      (0xf9c)
+#define IRQ_SIC_SEL                 (0xfa0)
+#define AHB_MASTER_ERR_CTRLS        (0xfa4)
+#define AHB_MASTER_ERR_ADDR         (0xfa8)
+#define AHB_MASTER_ERR_DATA         (0xfac)
+/* The addresses for IRQ_DEST and PERIPH_IRQ_DEST become reserved */
+#define IRQ_DEST                    (0xfb4)
+#define PERIPH_IRQ_DEST             (0xfb8)
+#define TEST_BUS_REG                (0xff8)
+#define CNFG_BITS                   (0xffc)
+#define TEST_BUS_SEL                (0xff4)
+#define TRUST_REG                   (0xff0)
+#define IRQ_SRCS_EE(n)             (0x1800 + 128 * (n))
+#define IRQ_SRCS_MSK_EE(n)         (0x1804 + 128 * (n))
+#define IRQ_SRCS_UNMASKED_EE(n)    (0x1808 + 128 * (n))
+
+#define P_CTRL(n)                  (0x0000 + 128 * (n))
+#define P_RST(n)                   (0x0004 + 128 * (n))
+#define P_HALT(n)                  (0x0008 + 128 * (n))
+#define P_IRQ_STTS(n)              (0x0010 + 128 * (n))
+#define P_IRQ_CLR(n)               (0x0014 + 128 * (n))
+#define P_IRQ_EN(n)                (0x0018 + 128 * (n))
+#define P_TIMER(n)                 (0x001c + 128 * (n))
+#define P_TIMER_CTRL(n)            (0x0020 + 128 * (n))
+#define P_PRDCR_SDBND(n)            (0x0024 + 128 * (n))
+#define P_CNSMR_SDBND(n)            (0x0028 + 128 * (n))
+#define P_TRUST_REG(n)             (0x0030 + 128 * (n))
+#define P_EVNT_DEST_ADDR(n)        (0x102c + 64 * (n))
+#define P_EVNT_REG(n)              (0x1018 + 64 * (n))
+#define P_SW_OFSTS(n)              (0x1000 + 64 * (n))
+#define P_DATA_FIFO_ADDR(n)        (0x1024 + 64 * (n))
+#define P_DESC_FIFO_ADDR(n)        (0x101c + 64 * (n))
+#define P_EVNT_GEN_TRSHLD(n)       (0x1028 + 64 * (n))
+#define P_FIFO_SIZES(n)            (0x1020 + 64 * (n))
+#define P_IRQ_DEST_ADDR(n)         (0x103c + 64 * (n))
+#define P_RETR_CNTXT(n)           (0x1034 + 64 * (n))
+#define P_SI_CNTXT(n)             (0x1038 + 64 * (n))
+#define P_AU_PSM_CNTXT_1(n)       (0x1004 + 64 * (n))
+#define P_PSM_CNTXT_2(n)          (0x1008 + 64 * (n))
+#define P_PSM_CNTXT_3(n)          (0x100c + 64 * (n))
+#define P_PSM_CNTXT_4(n)          (0x1010 + 64 * (n))
+#define P_PSM_CNTXT_5(n)          (0x1014 + 64 * (n))
+
+/**
+ *  BAM Hardware registers bitmask.
+ *  format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define IBC_DISABLE                            0x10000
+#define BAM_CACHED_DESC_STORE                   0x8000
+#define BAM_DESC_CACHE_SEL                      0x6000
+/* BAM_PERIPH_IRQ_SIC_SEL is an obsolete field; This bit is reserved now */
+#define BAM_PERIPH_IRQ_SIC_SEL                  0x1000
+#define BAM_EN_ACCUM                              0x10
+#define BAM_EN                                     0x2
+#define BAM_SW_RST                                 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE                0xff000000
+#define BAM_INACTIV_TMRS_EXST                  0x80000
+#define BAM_HIGH_FREQUENCY_BAM                 0x40000
+#define BAM_HAS_NO_BYPASS                      0x20000
+#define BAM_SECURED                            0x10000
+#define BAM_NUM_EES                              0xf00
+#define BAM_REVISION                              0xff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP                    0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP               0xff0000
+#define BAM_NUM_PIPES                             0xff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD                     0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ                         0x80000000
+#define P_IRQ                           0x7fffffff
+
+#define IRQ_STTS_BAM_EMPTY_IRQ                          0x8
+#define IRQ_STTS_BAM_ERROR_IRQ                          0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ                      0x2
+#define IRQ_CLR_BAM_EMPTY_CLR                           0x8
+#define IRQ_CLR_BAM_ERROR_CLR                           0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR                       0x2
+#define IRQ_EN_BAM_EMPTY_EN                             0x8
+#define IRQ_EN_BAM_ERROR_EN                             0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN                         0x2
+#define IRQ_SIC_SEL_BAM_IRQ_SIC_SEL              0x80000000
+#define IRQ_SIC_SEL_P_IRQ_SIC_SEL                0x7fffffff
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID         0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE    0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID           0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT            0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST            0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE             0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE             0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS             0x3
+#define CNFG_BITS_BAM_AU_ACCUMED                  0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA               0x2000000
+#define CNFG_BITS_BAM_REG_P_EN                    0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST             0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT                0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL               0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW                   0x100000
+#define CNFG_BITS_BAM_WB_P_RES                      0x80000
+#define CNFG_BITS_BAM_SI_P_RES                      0x40000
+#define CNFG_BITS_BAM_AU_P_RES                      0x20000
+#define CNFG_BITS_BAM_PSM_P_RES                     0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ                    0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ                     0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE                    0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST                   0x1000
+#define CNFG_BITS_BAM_FULL_PIPE                       0x800
+#define CNFG_BITS_BAM_PIPE_CNFG                         0x4
+
+/* TEST_BUS_SEL */
+#define BAM_DATA_ERASE                         0x40000
+#define BAM_DATA_FLUSH                         0x20000
+#define BAM_CLK_ALWAYS_ON                      0x10000
+#define BAM_TESTBUS_SEL                           0x7f
+
+/* TRUST_REG  */
+#define BAM_VMID                                0x1f00
+#define BAM_RST_BLOCK                             0x80
+#define BAM_EE                                     0x3
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID                              0x1f00
+#define BAM_P_EE                                   0x3
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE         0xffff
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK       0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE       0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R     0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL        0xffff
+
+/* P_ctrln */
+#define P_PREFETCH_LIMIT                         0x600
+#define P_AUTO_EOB_SEL                           0x180
+#define P_AUTO_EOB                                0x40
+#define P_SYS_MODE                             0x20
+#define P_SYS_STRM                             0x10
+#define P_DIRECTION                             0x8
+#define P_EN                                    0x2
+
+#define P_RST_P_SW_RST                                 0x1
+
+#define P_HALT_P_PROD_HALTED                           0x2
+#define P_HALT_P_HALT                                  0x1
+
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ                   0x20
+#define P_IRQ_STTS_P_ERR_IRQ                          0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ                   0x8
+#define P_IRQ_STTS_P_WAKE_IRQ                          0x4
+#define P_IRQ_STTS_P_TIMER_IRQ                         0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ                    0x1
+
+#define P_IRQ_CLR_P_TRNSFR_END_CLR                    0x20
+#define P_IRQ_CLR_P_ERR_CLR                           0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR                    0x8
+#define P_IRQ_CLR_P_WAKE_CLR                           0x4
+#define P_IRQ_CLR_P_TIMER_CLR                          0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR                     0x1
+
+#define P_IRQ_EN_P_TRNSFR_END_EN                      0x20
+#define P_IRQ_EN_P_ERR_EN                             0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN                      0x8
+#define P_IRQ_EN_P_WAKE_EN                             0x4
+#define P_IRQ_EN_P_TIMER_EN                            0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN                       0x1
+
+#define P_TIMER_P_TIMER                             0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST                0x80000000
+#define P_TIMER_RUN                0x40000000
+#define P_TIMER_MODE               0x20000000
+#define P_TIMER_TRSHLD                 0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED             0xffff0000
+#define P_DESC_FIFO_PEER_OFST            0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC              0xffff0000
+#define SW_DESC_OFST                     0xffff
+
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD                  0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE           0xffff0000
+#define P_DESC_FIFO_SIZE               0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST            0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC             0xffff
+#define P_SI_CNTXT_SI_DESC_OFST                    0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED        0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED                  0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID           0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ             0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE        0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS         0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE            0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE         0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE          0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE                0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST            0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE       0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT       0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC             0xffff
+
+#define BAM_ERROR   (-1)
+
+/* AHB buffer error control */
+enum bam_nonsecure_reset {
+	BAM_NONSECURE_RESET_ENABLE  = 0,
+	BAM_NONSECURE_RESET_DISABLE = 1,
+};
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg(void *base, u32 offset)
+{
+	u32 val = ioread32(base + offset);
+	SPS_DBG("sps:bam 0x%x(va) read reg 0x%x r_val 0x%x.\n",
+			(u32) base, offset, val);
+	return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg_field(void *base, u32 offset, const u32 mask)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 val = ioread32(base + offset);
+	val &= mask;		/* clear other bits */
+	val >>= shift;
+	SPS_DBG("sps:bam 0x%x(va) read reg 0x%x mask 0x%x r_val 0x%x.\n",
+			(u32) base, offset, mask, val);
+	return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	SPS_DBG("sps:bam 0x%x(va) write reg 0x%x w_val 0x%x.\n",
+			(u32) base, offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg_field(void *base, u32 offset,
+				       const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+	SPS_DBG("sps:bam 0x%x(va) write reg 0x%x w_val 0x%x.\n",
+			(u32) base, offset, val);
+}
+
+/**
+ * Initialize a BAM device
+ *
+ */
+int bam_init(void *base, u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version, u32 *num_pipes)
+{
+	/* disable bit#11 because of HW bug */
+	u32 cfg_bits = 0xffffffff & ~(1 << 11);
+	u32 ver = 0;
+
+	ver = bam_read_reg_field(base, REVISION, BAM_REVISION);
+
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR("sps:bam 0x%x(va) Invalid BAM REVISION 0x%x.\n",
+				(u32) base, ver);
+		return -ENODEV;
+	} else
+		SPS_INFO("sps:REVISION of BAM 0x%x is 0x%x.\n",
+				(u32) base, ver);
+
+	if (summing_threshold == 0) {
+		summing_threshold = 4;
+		SPS_ERR("sps:bam 0x%x(va) summing_threshold is zero , "
+				"use default 4.\n", (u32) base);
+	}
+
+	bam_write_reg_field(base, CTRL, BAM_SW_RST, 1);
+	/* No delay needed */
+	bam_write_reg_field(base, CTRL, BAM_SW_RST, 0);
+
+	bam_write_reg_field(base, CTRL, BAM_EN, 1);
+
+	bam_write_reg(base, DESC_CNT_TRSHLD, summing_threshold);
+
+	bam_write_reg(base, CNFG_BITS, cfg_bits);
+
+	/*
+	 *  Enable Global BAM Interrupt - for error reasons ,
+	 *  filter with mask.
+	 *  Note: Pipes interrupts are disabled until BAM_P_IRQ_enn is set
+	 */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 1);
+
+	bam_write_reg(base, IRQ_EN, irq_mask);
+
+	*num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES);
+
+	*version = ver;
+
+	return 0;
+}
+
+/**
+ * Set BAM global execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @reset - enable/disable BAM global software reset
+ */
+static void bam_set_ee(void *base, u32 ee, u32 vmid,
+			enum bam_nonsecure_reset reset)
+{
+	bam_write_reg_field(base, TRUST_REG, BAM_EE, ee);
+	bam_write_reg_field(base, TRUST_REG, BAM_VMID, vmid);
+	bam_write_reg_field(base, TRUST_REG, BAM_RST_BLOCK, reset);
+}
+
+/**
+ * Set the pipe execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ */
+static void bam_pipe_set_ee(void *base, u32 pipe, u32 ee, u32 vmid)
+{
+	bam_write_reg_field(base, P_TRUST_REG(pipe), BAM_P_EE, ee);
+	bam_write_reg_field(base, P_TRUST_REG(pipe), BAM_P_VMID, vmid);
+}
+
+/**
+ * Initialize BAM device security execution environment
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask)
+{
+	u32 version;
+	u32 num_pipes;
+	u32 mask;
+	u32 pipe;
+
+	/*
+	 * Discover the hardware version number and the number of pipes
+	 * supported by this BAM
+	 */
+	version = bam_read_reg_field(base, REVISION, BAM_REVISION);
+	num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES);
+	if (version < 3 || version > 0x1F) {
+		SPS_ERR("sps:bam 0x%x(va) security is not supported for this"
+			"BAM version 0x%x.\n", (u32) base, version);
+		return -ENODEV;
+	}
+
+	if (num_pipes > BAM_MAX_PIPES)
+		return -ENODEV;
+
+	for (pipe = 0, mask = 1; pipe < num_pipes; pipe++, mask <<= 1)
+		if ((mask & pipe_mask) != 0)
+			bam_pipe_set_ee(base, pipe, ee, vmid);
+
+	/* If MSbit is set, assign top-level interrupt to this EE */
+	mask = 1UL << 31;
+	if ((mask & pipe_mask) != 0)
+		bam_set_ee(base, ee, vmid, BAM_NONSECURE_RESET_ENABLE);
+
+	return 0;
+}
+
+/**
+ * Verify that a BAM device is enabled and gathers the hardware
+ * configuration.
+ *
+ */
+int bam_check(void *base, u32 *version, u32 *num_pipes)
+{
+	u32 ver = 0;
+
+	if (!bam_read_reg_field(base, CTRL, BAM_EN))
+		return -ENODEV;
+
+	ver = bam_read_reg(base, REVISION) & BAM_REVISION;
+
+	/*
+	 *  Discover the hardware version number and the number of pipes
+	 *  supported by this BAM
+	 */
+	*num_pipes = bam_read_reg(base, NUM_PIPES);
+	*version = ver;
+
+	/* Check BAM version */
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR("sps:bam 0x%x(va) Invalid BAM version 0x%x.\n",
+				(u32) base, ver);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * Disable a BAM device
+ *
+ */
+void bam_exit(void *base, u32 ee)
+{
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 0);
+
+	bam_write_reg(base, IRQ_EN, 0);
+
+	/* Disable the BAM */
+	bam_write_reg_field(base, CTRL, BAM_EN, 0);
+}
+
+/**
+ * Get BAM global IRQ status
+ */
+u32 bam_get_irq_status(void *base, u32 ee, u32 mask)
+{
+	u32 status = bam_read_reg(base, IRQ_SRCS_EE(ee));
+	status &= mask;
+
+	return status;
+}
+
+/**
+ * Initialize a BAM pipe
+ */
+int bam_pipe_init(void *base, u32 pipe,	struct bam_pipe_parameters *param,
+					u32 ee)
+{
+	/* Reset the BAM pipe */
+	bam_write_reg(base, P_RST(pipe), 1);
+	/* No delay needed */
+	bam_write_reg(base, P_RST(pipe), 0);
+
+	/* Enable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 1);
+
+	bam_write_reg(base, P_IRQ_EN(pipe), param->pipe_irq_mask);
+
+	bam_write_reg_field(base, P_CTRL(pipe), P_DIRECTION, param->dir);
+	bam_write_reg_field(base, P_CTRL(pipe), P_SYS_MODE, param->mode);
+
+	bam_write_reg(base, P_EVNT_GEN_TRSHLD(pipe), param->event_threshold);
+
+	bam_write_reg(base, P_DESC_FIFO_ADDR(pipe), param->desc_base);
+	bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DESC_FIFO_SIZE,
+			    param->desc_size);
+
+	bam_write_reg_field(base, P_CTRL(pipe), P_SYS_STRM,
+			    param->stream_mode);
+
+	if (param->mode == BAM_PIPE_MODE_BAM2BAM) {
+		u32 peer_dest_addr = param->peer_phys_addr +
+				      P_EVNT_REG(param->peer_pipe);
+
+		bam_write_reg(base, P_DATA_FIFO_ADDR(pipe),
+			      param->data_base);
+		bam_write_reg_field(base, P_FIFO_SIZES(pipe),
+				    P_DATA_FIFO_SIZE, param->data_size);
+
+		bam_write_reg(base, P_EVNT_DEST_ADDR(pipe), peer_dest_addr);
+
+		SPS_DBG("sps:bam=0x%x(va).pipe=%d.peer_bam=0x%x."
+			"peer_pipe=%d.\n",
+			(u32) base, pipe,
+			(u32) param->peer_phys_addr,
+			param->peer_pipe);
+	}
+
+	/* Pipe Enable - at last */
+	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Reset the BAM pipe
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee)
+{
+	bam_write_reg(base, P_IRQ_EN(pipe), 0);
+
+	/* Disable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 0);
+
+	/* Pipe Disable */
+	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0);
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe)
+{
+	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
+}
+
+/**
+ * Diasble a BAM pipe
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe)
+{
+	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0);
+}
+
+/**
+ * Check if a BAM pipe is enabled.
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_CTRL(pipe), P_EN);
+}
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee)
+{
+	bam_write_reg(base, P_IRQ_EN(pipe), src_mask);
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), irq_en);
+}
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee)
+{
+	bam_write_reg(base, P_IRQ_EN(pipe), 0);
+	bam_write_reg(base, P_IRQ_DEST_ADDR(pipe), irq_gen_addr);
+
+	bam_write_reg_field(base, IRQ_SIC_SEL, (1 << pipe), 1);
+	bam_write_reg_field(base, IRQ_SRCS_MSK, (1 << pipe), 1);
+}
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr)
+{
+	/*
+	 * MTI use is only supported on BAMs when global config is controlled
+	 * by a remote processor.
+	 * Consequently, the global configuration register to enable SIC (MTI)
+	 * support cannot be accessed.
+	 * The remote processor must be relied upon to enable the SIC and the
+	 * interrupt. Since the remote processor enable both SIC and interrupt,
+	 * the interrupt enable mask must be set to zero for polling mode.
+	 */
+
+	bam_write_reg(base, P_IRQ_DEST_ADDR(pipe), irq_gen_addr);
+
+	if (!irq_en)
+		src_mask = 0;
+
+	bam_write_reg(base, P_IRQ_EN(pipe), src_mask);
+}
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe)
+{
+	u32 status = 0;
+
+	status = bam_read_reg(base, P_IRQ_STTS(pipe));
+	bam_write_reg(base, P_IRQ_CLR(pipe), status);
+
+	return status;
+}
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write)
+{
+	/*
+	 * It is not necessary to perform a read-modify-write masking to write
+	 * the P_DESC_FIFO_PEER_OFST value, since the other field in the
+	 * register (P_BYTES_CONSUMED) is read-only.
+	 */
+	bam_write_reg_field(base, P_EVNT_REG(pipe), P_DESC_FIFO_PEER_OFST,
+			    next_write);
+}
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_EVNT_REG(pipe),
+				  P_DESC_FIFO_PEER_OFST);
+}
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_SW_OFSTS(pipe), SW_DESC_OFST);
+}
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe, enum bam_pipe_timer_mode mode,
+			 u32 timeout_count)
+{
+	bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_MODE, mode);
+	bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_TRSHLD,
+			    timeout_count);
+}
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe)
+{
+	/* reset */
+	bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 0);
+	/* active */
+	bam_write_reg_field(base, P_TIMER_CTRL(pipe), P_TIMER_RST, 1);
+}
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe)
+{
+	return bam_read_reg(base, P_TIMER(pipe));
+}
diff --git a/drivers/platform/msm/sps/bam.h b/drivers/platform/msm/sps/bam.h
new file mode 100644
index 0000000..522073c
--- /dev/null
+++ b/drivers/platform/msm/sps/bam.h
@@ -0,0 +1,394 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager functions API. */
+
+#ifndef _BAM_H_
+#define _BAM_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+
+/* Pipe mode */
+enum bam_pipe_mode {
+	BAM_PIPE_MODE_BAM2BAM = 0,	/* BAM to BAM */
+	BAM_PIPE_MODE_SYSTEM = 1,	/* BAM to/from System Memory */
+};
+
+/* Pipe direction */
+enum bam_pipe_dir {
+	/* The Pipe Reads data from data-fifo or system-memory */
+	BAM_PIPE_CONSUMER = 0,
+	/* The Pipe Writes data to data-fifo or system-memory */
+	BAM_PIPE_PRODUCER = 1,
+};
+
+/* Stream mode Type */
+enum bam_stream_mode {
+	BAM_STREAM_MODE_DISABLE = 0,
+	BAM_STREAM_MODE_ENABLE = 1,
+};
+
+/* Enable Type */
+enum bam_enable {
+	BAM_DISABLE = 0,
+	BAM_ENABLE = 1,
+};
+
+/* Pipe timer mode */
+enum bam_pipe_timer_mode {
+	BAM_PIPE_TIMER_ONESHOT = 0,
+	BAM_PIPE_TIMER_PERIODIC = 1,
+};
+
+struct transfer_descriptor {
+	u32 addr;	/* Buffer physical address */
+	u32 size:16;	/* Buffer size in bytes */
+	u32 flags:16;	/* Flag bitmask (see SPS_IOVEC_FLAG_ #defines) */
+}  __packed;
+
+/* BAM pipe initialization parameters */
+struct bam_pipe_parameters {
+	u16 event_threshold;
+	u32 pipe_irq_mask;
+	enum bam_pipe_dir dir;
+	enum bam_pipe_mode mode;
+	u32 desc_base;	/* Physical address of descriptor FIFO */
+	u32 desc_size;	/* Size (bytes) of descriptor FIFO */
+	enum bam_stream_mode stream_mode;
+	u32 ee;		/* BAM execution environment index */
+
+	/* The following are only valid if mode is BAM2BAM */
+	u32 peer_phys_addr;
+	u32 peer_pipe;
+	u32 data_base;	/* Physical address of data FIFO */
+	u32 data_size;	/* Size (bytes) of data FIFO */
+};
+
+/**
+ * Initialize a BAM device
+ *
+ * This function initializes a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @summing_threshold - summing threshold (global for all pipes)
+ *
+ * @irq_mask - error interrupts mask
+ *
+ * @version - return BAM hardware version
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_init(void *base,
+		u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version, u32 *num_pipes);
+
+/**
+ * Initialize BAM device security execution environment
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @pipe_mask - bit mask of pipes to assign to EE
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask);
+
+/**
+ * Check a BAM device
+ *
+ * This function verifies that a BAM device is enabled and gathers
+ *    the hardware configuration.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @version - return BAM hardware version
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_check(void *base, u32 *version, u32 *num_pipes);
+
+/**
+ * Disable a BAM device
+ *
+ * This function disables a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_exit(void *base, u32 ee);
+
+/**
+ * Get BAM global IRQ status
+ *
+ * This function gets BAM global IRQ status.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @mask - active pipes mask.
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_get_irq_status(void *base, u32 ee, u32 mask);
+
+/**
+ * Initialize a BAM pipe
+ *
+ * This function initializes a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @param - bam pipe parameters.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param,
+					u32 ee);
+
+/**
+ * Reset the BAM pipe
+ *
+ * This function resets the BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe);
+
+/**
+ * Get a BAM pipe enable state
+ *
+ * This function determines if a BAM pipe is enabled.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return true if enabled, false if disabled
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe);
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee);
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ * This function configures a BAM pipe for satellite MTI use.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee);
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr);
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ * This function gets and clears BAM pipe IRQ status.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe);
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ * This function sets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by software in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @next_write - descriptor FIFO write offset
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write);
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ * This function gets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe's peer pipe or by software.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO write offset
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe);
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ * This function gets the read offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO read offset
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe);
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ * This function configures the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @mode - timer operating mode
+ *
+ * @timeout_count - timeout count
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe,
+			   enum bam_pipe_timer_mode mode,
+			   u32 timeout_count);
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ * This function resets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe);
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ * This function gets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return inactivity timer count
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe);
+
+#endif				/* _BAM_H_ */
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
new file mode 100644
index 0000000..9b5d87c
--- /dev/null
+++ b/drivers/platform/msm/sps/sps.c
@@ -0,0 +1,1502 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) Module. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/module.h>	/* module_init() */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/device.h>	/* device */
+#include <linux/fs.h>		/* alloc_chrdev_region() */
+#include <linux/list.h>		/* list_head */
+#include <linux/memory.h>	/* memset */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/clk.h>		/* clk_enable() */
+#include <linux/platform_device.h>	/* platform_get_resource_byname() */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <mach/msm_sps.h>	/* msm_sps_platform_data */
+
+#include "sps_bam.h"
+#include "spsi.h"
+#include "sps_core.h"
+
+#define SPS_DRV_NAME "msm_sps"	/* must match the platform_device name */
+
+/**
+ *  SPS Driver state struct
+ */
+struct sps_drv {
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct clk *pmem_clk;
+	struct clk *bamdma_clk;
+	struct clk *dfab_clk;
+
+	int is_ready;
+
+	/* Platform data */
+	u32 pipemem_phys_base;
+	u32 pipemem_size;
+	u32 bamdma_bam_phys_base;
+	u32 bamdma_bam_size;
+	u32 bamdma_dma_phys_base;
+	u32 bamdma_dma_size;
+	u32 bamdma_irq;
+	u32 bamdma_restricted_pipes;
+
+	/* Driver options bitflags (see SPS_OPT_*) */
+	u32 options;
+
+	/* Mutex to protect BAM and connection queues */
+	struct mutex lock;
+
+	/* BAM devices */
+	struct list_head bams_q;
+
+	char *hal_bam_version;
+
+	/* Connection control state */
+	struct sps_rm connection_ctrl;
+};
+
+
+/**
+ *  SPS driver state
+ */
+static struct sps_drv *sps;
+
+static void sps_device_de_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+static int sps_debugfs_enabled;
+static char *debugfs_buf;
+static int debugfs_buf_size;
+static int debugfs_buf_used;
+static int wraparound;
+
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *msg)
+{
+	if (sps_debugfs_enabled) {
+		if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) {
+			debugfs_buf_used = 0;
+			wraparound = true;
+		}
+		debugfs_buf_used += scnprintf(debugfs_buf + debugfs_buf_used,
+				debugfs_buf_size - debugfs_buf_used, msg);
+
+		if (wraparound)
+			scnprintf(debugfs_buf + debugfs_buf_used,
+					debugfs_buf_size - debugfs_buf_used,
+					"\n**** end line of sps log ****\n\n");
+	}
+}
+
+/* read the recorded debug info to userspace */
+static ssize_t sps_read_info(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int ret;
+	int size;
+
+	if (wraparound)
+		size = debugfs_buf_size - MAX_MSG_LEN;
+	else
+		size = debugfs_buf_used;
+
+	ret = simple_read_from_buffer(ubuf, count, ppos,
+			debugfs_buf, size);
+
+	return ret;
+}
+
+/*
+ * set the buffer size (in KB) for debug info
+ * if input is 0, then stop recording debug info into buffer
+ */
+static ssize_t sps_set_info(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	static char str[5];
+	int i, buf_size_kb = 0;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, sizeof(str));
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		buf_size_kb = (buf_size_kb * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs buffer size is %dKB\n", buf_size_kb);
+
+	if (sps_debugfs_enabled && (buf_size_kb == 0)) {
+		sps_debugfs_enabled = false;
+		kfree(debugfs_buf);
+		debugfs_buf = NULL;
+		debugfs_buf_used = 0;
+		debugfs_buf_size = 0;
+		wraparound = false;
+	} else if (!sps_debugfs_enabled && (buf_size_kb > 0)) {
+		debugfs_buf_size = buf_size_kb * SZ_1K;
+
+		debugfs_buf = kzalloc(sizeof(char) * debugfs_buf_size,
+				GFP_KERNEL);
+		if (!debugfs_buf) {
+			debugfs_buf_size = 0;
+			pr_err("sps:fail to allocate memory for debug_fs.\n");
+			return -ENOMEM;
+		}
+
+		sps_debugfs_enabled = true;
+		debugfs_buf_used = 0;
+		wraparound = false;
+	} else if (sps_debugfs_enabled && (buf_size_kb > 0))
+		pr_info("sps:should disable debugfs before change "
+				"buffer size.\n");
+
+	return sps_debugfs_enabled;
+}
+
+const struct file_operations sps_info_ops = {
+	.read = sps_read_info,
+	.write = sps_set_info,
+};
+
+struct dentry *dent;
+struct dentry *dfile;
+static void sps_debugfs_init(void)
+{
+	sps_debugfs_enabled = false;
+	debugfs_buf_size = 0;
+	debugfs_buf_used = 0;
+	wraparound = false;
+
+	dent = debugfs_create_dir("sps", 0);
+	if (IS_ERR(dent)) {
+		pr_err("sps:fail to create the folder for debug_fs.\n");
+		return;
+	}
+
+	dfile = debugfs_create_file("info", 0444, dent, 0,
+			&sps_info_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		pr_err("sps:fail to create the file for debug_fs.\n");
+		debugfs_remove(dent);
+		return;
+	}
+}
+
+static void sps_debugfs_exit(void)
+{
+	if (dfile)
+		debugfs_remove(dfile);
+	if (dent)
+		debugfs_remove(dent);
+	kfree(debugfs_buf);
+	debugfs_buf = NULL;
+}
+#endif
+
+/**
+ * Initialize SPS device
+ *
+ * This function initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_device_init(void)
+{
+	int result;
+	int success;
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	struct sps_bam_props bamdma_props = {0};
+#endif
+
+	SPS_DBG("sps_device_init");
+
+	success = false;
+
+	result = sps_mem_init(sps->pipemem_phys_base, sps->pipemem_size);
+	if (result) {
+		SPS_ERR("SPS memory init failed");
+		goto exit_err;
+	}
+
+	INIT_LIST_HEAD(&sps->bams_q);
+	mutex_init(&sps->lock);
+
+	if (sps_rm_init(&sps->connection_ctrl, sps->options)) {
+		SPS_ERR("Failed to init SPS resource manager");
+		goto exit_err;
+	}
+
+	result = sps_bam_driver_init(sps->options);
+	if (result) {
+		SPS_ERR("SPS BAM driver init failed");
+		goto exit_err;
+	}
+
+	/* Initialize the BAM DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	bamdma_props.phys_addr = sps->bamdma_bam_phys_base;
+	bamdma_props.virt_addr = ioremap(sps->bamdma_bam_phys_base,
+					 sps->bamdma_bam_size);
+
+	if (!bamdma_props.virt_addr) {
+		SPS_ERR("sps:Failed to IO map BAM-DMA BAM registers.\n");
+		goto exit_err;
+	}
+
+	SPS_DBG("sps:bamdma_bam.phys=0x%x.virt=0x%x.",
+		bamdma_props.phys_addr,
+		(u32) bamdma_props.virt_addr);
+
+	bamdma_props.periph_phys_addr =	sps->bamdma_dma_phys_base;
+	bamdma_props.periph_virt_size = sps->bamdma_dma_size;
+	bamdma_props.periph_virt_addr = ioremap(sps->bamdma_dma_phys_base,
+						sps->bamdma_dma_size);
+
+	if (!bamdma_props.periph_virt_addr) {
+		SPS_ERR("sps:Failed to IO map BAM-DMA peripheral reg.\n");
+		goto exit_err;
+	}
+
+	SPS_DBG("sps:bamdma_dma.phys=0x%x.virt=0x%x.",
+		bamdma_props.periph_phys_addr,
+		(u32) bamdma_props.periph_virt_addr);
+
+	bamdma_props.irq = sps->bamdma_irq;
+
+	bamdma_props.event_threshold = 0x10;	/* Pipe event threshold */
+	bamdma_props.summing_threshold = 0x10;	/* BAM event threshold */
+
+	bamdma_props.options = SPS_BAM_OPT_BAMDMA;
+	bamdma_props.restricted_pipes =	sps->bamdma_restricted_pipes;
+
+	result = sps_dma_init(&bamdma_props);
+	if (result) {
+		SPS_ERR("SPS BAM DMA driver init failed");
+		goto exit_err;
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	result = sps_map_init(NULL, sps->options);
+	if (result) {
+		SPS_ERR("SPS connection mapping init failed");
+		goto exit_err;
+	}
+
+	success = true;
+exit_err:
+	if (!success) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_device_de_init();
+#endif
+		return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * De-initialize SPS device
+ *
+ * This function de-initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static void sps_device_de_init(void)
+{
+	SPS_DBG("%s.", __func__);
+
+	if (sps != NULL) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_dma_de_init();
+#endif
+		/* Are there any remaining BAM registrations? */
+		if (!list_empty(&sps->bams_q))
+			SPS_ERR("SPS de-init: BAMs are still registered");
+
+		sps_map_de_init();
+
+		kfree(sps);
+	}
+
+	sps_mem_de_init();
+}
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_init(struct sps_pipe *client)
+{
+	if (client == NULL)
+		return -EINVAL;
+
+	/*
+	 * NOTE: Cannot store any state within the SPS driver because
+	 * the driver init function may not have been called yet.
+	 */
+	memset(client, 0, sizeof(*client));
+	sps_rm_config_init(&client->connect);
+
+	client->client_state = SPS_STATE_DISCONNECT;
+	client->bam = NULL;
+
+	return 0;
+}
+
+/**
+ * De-initialize client state context
+ *
+ * This function de-initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_de_init(struct sps_pipe *client)
+{
+	if (client->client_state != SPS_STATE_DISCONNECT) {
+		SPS_ERR("De-init client in connected state: 0x%x",
+				   client->client_state);
+		return SPS_ERROR;
+	}
+
+	client->bam = NULL;
+	client->map = NULL;
+	memset(&client->connect, 0, sizeof(client->connect));
+
+	return 0;
+}
+
+/**
+ * Find the BAM device from the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *phy2bam(u32 phys_addr)
+{
+	struct sps_bam *bam;
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if (bam->props.phys_addr == phys_addr)
+			return bam;
+	}
+
+	return NULL;
+}
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(u32 h)
+{
+	struct sps_bam *bam;
+
+	if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
+		return NULL;
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if ((u32) bam == (u32) h)
+			return bam;
+	}
+
+	SPS_ERR("Can't find BAM device for handle 0x%x.", h);
+
+	return NULL;
+}
+
+/**
+ * Lock BAM device
+ *
+ * This function obtains the BAM mutex on the client's connection.
+ *
+ * @pipe - pointer to client pipe state
+ *
+ * @return pointer to BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *sps_bam_lock(struct sps_pipe *pipe)
+{
+	struct sps_bam *bam;
+	u32 pipe_index;
+
+	bam = pipe->bam;
+	if (bam == NULL) {
+		SPS_ERR("Connection not in connected state");
+		return NULL;
+	}
+
+	mutex_lock(&bam->lock);
+
+	/* Verify client owns this pipe */
+	pipe_index = pipe->pipe_index;
+	if (pipe_index >= bam->props.num_pipes ||
+	    pipe != bam->pipes[pipe_index]) {
+		SPS_ERR("Client not owner of BAM 0x%x pipe: %d (max %d)",
+			bam->props.phys_addr, pipe_index,
+			bam->props.num_pipes);
+		mutex_unlock(&bam->lock);
+		return NULL;
+	}
+
+	return bam;
+}
+
+/**
+ * Unlock BAM device
+ *
+ * This function releases the BAM mutex on the client's connection.
+ *
+ * @bam - pointer to BAM device struct
+ *
+ */
+static inline void sps_bam_unlock(struct sps_bam *bam)
+{
+	mutex_unlock(&bam->lock);
+}
+
+/**
+ * Connect an SPS connection end point
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	struct sps_pipe *pipe = h;
+	u32 dev;
+	struct sps_bam *bam;
+	int result;
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR("sps_connect.sps driver not ready.\n");
+		return -EAGAIN;
+	}
+
+	mutex_lock(&sps->lock);
+	/*
+	 * Must lock the BAM device at the top level function, so must
+	 * determine which BAM is the target for the connection
+	 */
+	if (connect->mode == SPS_MODE_SRC)
+		dev = connect->source;
+	else
+		dev = connect->destination;
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR("Invalid BAM device handle: 0x%x", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	SPS_DBG("sps_connect: bam 0x%x src 0x%x dest 0x%x mode %s",
+			BAM_ID(bam),
+			connect->source,
+			connect->destination,
+			connect->mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	/* Allocate resources for the specified connection */
+	pipe->connect = *connect;
+	mutex_lock(&bam->lock);
+	result = sps_rm_state_change(pipe, SPS_STATE_ALLOCATE);
+	mutex_unlock(&bam->lock);
+	if (result)
+		goto exit_err;
+
+	/* Configure the connection */
+	mutex_lock(&bam->lock);
+	result = sps_rm_state_change(pipe, SPS_STATE_CONNECT);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		sps_disconnect(h);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated.  For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_pipe *check;
+	struct sps_bam *bam;
+	int result;
+
+	if (pipe == NULL)
+		return SPS_ERROR;
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG("sps_disconnect: bam 0x%x src 0x%x dest 0x%x mode %s",
+			BAM_ID(bam),
+			pipe->connect.source,
+			pipe->connect.destination,
+			pipe->connect.mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	result = SPS_ERROR;
+	/* Cross-check client with map table */
+	if (pipe->connect.mode == SPS_MODE_SRC)
+		check = pipe->map->client_src;
+	else
+		check = pipe->map->client_dest;
+
+	if (check != pipe) {
+		SPS_ERR("Client context is corrupt");
+		goto exit_err;
+	}
+
+	/* Disconnect the BAM pipe */
+	result = sps_rm_state_change(pipe, SPS_STATE_DISCONNECT);
+	if (result)
+		goto exit_err;
+
+	sps_rm_config_init(&pipe->connect);
+	result = 0;
+
+exit_err:
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_disconnect);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR("sps_connect.sps driver not ready.\n");
+		return -EAGAIN;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_reg_event(bam, pipe->pipe_index, reg);
+	sps_bam_unlock(bam);
+	if (result)
+		SPS_ERR("Failed to register event for BAM 0x%x pipe %d",
+			pipe->bam->props.phys_addr, pipe->pipe_index);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_register_event);
+
+/**
+ * Enable an SPS connection end point
+ *
+ */
+int sps_flow_on(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	/* Enable the pipe data flow */
+	result = sps_rm_state_change(pipe, SPS_STATE_ENABLE);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_on);
+
+/**
+ * Disable an SPS connection end point
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	/* Disable the pipe data flow */
+	result = sps_rm_state_change(pipe, SPS_STATE_DISABLE);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_off);
+
+/**
+ * Perform a DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_transfer(bam, pipe->pipe_index, transfer);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, u32 addr, u32 size,
+		     void *user, u32 flags)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_transfer_one(bam, pipe->pipe_index,
+					   addr, size, user, flags);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer_one);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_get_event(bam, pipe->pipe_index, notify);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_event);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_is_empty(bam, pipe->pipe_index, empty);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_is_pipe_empty);
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_get_free_count(bam, pipe->pipe_index, count);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_free_count);
+
+/**
+ * Reset an SPS BAM device
+ *
+ */
+int sps_device_reset(u32 dev)
+{
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s: dev = 0x%x", __func__, dev);
+
+	mutex_lock(&sps->lock);
+	/* Search for the target BAM device */
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR("Invalid BAM device handle: 0x%x", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	mutex_lock(&bam->lock);
+	result = sps_bam_reset(bam);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		SPS_ERR("Failed to reset BAM device: 0x%x", dev);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_device_reset);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+
+	if (config == NULL) {
+		SPS_ERR("Config pointer is NULL");
+		return SPS_ERROR;
+	}
+
+	/* Copy current client connection state */
+	*config = pipe->connect;
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_get_config);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_pipe_set_params(bam, pipe->pipe_index,
+					 config->options);
+	if (result == 0)
+		pipe->connect.options = config->options;
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (owner != SPS_OWNER_REMOTE) {
+		SPS_ERR("Unsupported ownership state: %d", owner);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	result = sps_bam_set_satellite(bam, pipe->pipe_index);
+	if (result)
+		goto exit_err;
+
+	/* Return satellite connect info */
+	if (connect == NULL)
+		goto exit_err;
+
+	if (pipe->connect.mode == SPS_MODE_SRC) {
+		connect->dev = pipe->map->src.bam_phys;
+		connect->pipe_index = pipe->map->src.pipe_index;
+	} else {
+		connect->dev = pipe->map->dest.bam_phys;
+		connect->pipe_index = pipe->map->dest.pipe_index;
+	}
+	connect->config = SPS_CONFIG_SATELLITE;
+	connect->options = (enum sps_option) 0;
+
+exit_err:
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_owner);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR("sps_alloc_mem.sps driver not ready.\n");
+		return -EAGAIN;
+	}
+
+	if (mem_buffer == NULL || mem_buffer->size == 0)
+		return SPS_ERROR;
+
+	mem_buffer->phys_base = sps_mem_alloc_io(mem_buffer->size);
+	if (mem_buffer->phys_base == SPS_ADDR_INVALID)
+		return SPS_ERROR;
+
+	mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_alloc_mem);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
+{
+	if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID)
+		return SPS_ERROR;
+
+	sps_mem_free_io(mem_buffer->phys_base, mem_buffer->size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_free_mem);
+
+/**
+ * Register a BAM device
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+				u32 *dev_handle)
+{
+	struct sps_bam *bam = NULL;
+	void *virt_addr = NULL;
+	u32 manage;
+	int ok;
+	int result;
+
+	if (sps == NULL)
+		return SPS_ERROR;
+
+	/* BAM-DMA is registered internally during power-up */
+	if ((!sps->is_ready) && !(bam_props->options & SPS_BAM_OPT_BAMDMA)) {
+		SPS_ERR("sps_register_bam_device.sps driver not ready.\n");
+		return -EAGAIN;
+	}
+
+	if (bam_props == NULL || dev_handle == NULL)
+		return SPS_ERROR;
+
+	/* Check BAM parameters */
+	manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
+	if (manage != SPS_BAM_MGR_NONE) {
+		if (bam_props->virt_addr == NULL && bam_props->virt_size == 0) {
+			SPS_ERR("Invalid properties for BAM: %x",
+					   bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	if ((bam_props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		/* BAM global is configured by local processor */
+		if (bam_props->summing_threshold == 0) {
+			SPS_ERR("Invalid device ctrl properties for BAM: %x",
+			 bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	manage = bam_props->manage &
+		  (SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL);
+
+	/* In case of error */
+	*dev_handle = SPS_DEV_HANDLE_INVALID;
+	result = SPS_ERROR;
+
+	mutex_lock(&sps->lock);
+	/* Is this BAM already registered? */
+	bam = phy2bam(bam_props->phys_addr);
+	if (bam != NULL) {
+		mutex_unlock(&sps->lock);
+		SPS_ERR("BAM already registered: %x", bam->props.phys_addr);
+		result = -EEXIST;
+		bam = NULL;   /* Avoid error clean-up kfree(bam) */
+		goto exit_err;
+	}
+
+	/* Perform virtual mapping if required */
+	if ((bam_props->manage & SPS_BAM_MGR_ACCESS_MASK) !=
+	    SPS_BAM_MGR_NONE && bam_props->virt_addr == NULL) {
+		/* Map the memory region */
+		virt_addr = ioremap(bam_props->phys_addr, bam_props->virt_size);
+		if (virt_addr == NULL) {
+			SPS_ERR("Unable to map BAM IO memory: %x %x",
+				bam_props->phys_addr, bam_props->virt_size);
+			goto exit_err;
+		}
+	}
+
+	bam = kzalloc(sizeof(*bam), GFP_KERNEL);
+	if (bam == NULL) {
+		SPS_ERR("Unable to allocate BAM device state: size 0x%x",
+			sizeof(*bam));
+		goto exit_err;
+	}
+	memset(bam, 0, sizeof(*bam));
+
+	mutex_init(&bam->lock);
+	mutex_lock(&bam->lock);
+
+	/* Copy configuration to BAM device descriptor */
+	bam->props = *bam_props;
+	if (virt_addr != NULL)
+		bam->props.virt_addr = virt_addr;
+
+	if ((bam_props->manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+	    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) != 0 &&
+	    bam_props->ee == 0) {
+		/*
+		 * BAM global is owned by a remote processor, so force EE index
+		 * to a non-zero value to insure EE zero globals are not
+		 * modified.
+		 */
+		SPS_INFO("Setting EE for BAM %x to non-zero",
+				  bam_props->phys_addr);
+		bam->props.ee = 1;
+	}
+
+	ok = sps_bam_device_init(bam);
+	mutex_unlock(&bam->lock);
+	if (ok) {
+		SPS_ERR("Failed to init BAM device: phys 0x%0x",
+			bam->props.phys_addr);
+		goto exit_err;
+	}
+
+	/* Add BAM to the list */
+	list_add_tail(&bam->list, &sps->bams_q);
+	*dev_handle = (u32) bam;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	if (result) {
+		if (virt_addr != NULL)
+			iounmap(bam->props.virt_addr);
+
+		if (bam != NULL)
+			kfree(bam);
+
+		return result;
+	}
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		if (sps_dma_device_init((u32) bam)) {
+			bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+			sps_deregister_bam_device((u32) bam);
+			SPS_ERR("Failed to init BAM-DMA device: BAM phys 0x%0x",
+				bam->props.phys_addr);
+			return SPS_ERROR;
+		}
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	SPS_DBG("SPS registered BAM: phys 0x%x.", bam->props.phys_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_register_bam_device);
+
+/**
+ * Deregister a BAM device
+ *
+ */
+int sps_deregister_bam_device(u32 dev_handle)
+{
+	struct sps_bam *bam;
+
+	bam = sps_h2bam(dev_handle);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG("SPS deregister BAM: phys 0x%x.", bam->props.phys_addr);
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		mutex_lock(&bam->lock);
+		(void)sps_dma_device_de_init((u32) bam);
+		bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+		mutex_unlock(&bam->lock);
+	}
+#endif
+
+	/* Remove the BAM from the registration list */
+	mutex_lock(&sps->lock);
+	list_del(&bam->list);
+	mutex_unlock(&sps->lock);
+
+	/* De-init the BAM and free resources */
+	mutex_lock(&bam->lock);
+	sps_bam_device_de_init(bam);
+	mutex_unlock(&bam->lock);
+	if (bam->props.virt_size)
+		(void)iounmap(bam->props.virt_addr);
+
+	kfree(bam);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_deregister_bam_device);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL || iovec == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG("%s.", __func__);
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	/* Get an iovec from the BAM pipe descriptor FIFO */
+	result = sps_bam_pipe_get_iovec(bam, pipe->pipe_index, iovec);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_iovec);
+
+/**
+ * Perform timer control
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+			struct sps_timer_ctrl *timer_ctrl,
+			struct sps_timer_result *timer_result)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	SPS_DBG("%s.", __func__);
+
+	if (h == NULL || timer_ctrl == NULL)
+		return SPS_ERROR;
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	/* Perform the BAM pipe timer control operation */
+	result = sps_bam_pipe_timer_ctrl(bam, pipe->pipe_index, timer_ctrl,
+					 timer_result);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_timer_ctrl);
+
+/**
+ * Allocate client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void)
+{
+	struct sps_pipe *ctx = NULL;
+
+	ctx = kzalloc(sizeof(struct sps_pipe), GFP_KERNEL);
+	if (ctx == NULL) {
+		SPS_ERR("Allocate pipe context fail.");
+		return NULL;
+	}
+
+	sps_client_init(ctx);
+
+	return ctx;
+}
+EXPORT_SYMBOL(sps_alloc_endpoint);
+
+/**
+ * Free client state context
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *ctx)
+{
+	int res;
+
+	res = sps_client_de_init(ctx);
+
+	if (res == 0)
+		kfree(ctx);
+
+	return res;
+}
+EXPORT_SYMBOL(sps_free_endpoint);
+
+/**
+ * Platform Driver.
+ */
+static int get_platform_data(struct platform_device *pdev)
+{
+	struct resource *resource;
+	struct msm_sps_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+
+	if (pdata == NULL) {
+		SPS_ERR("sps:inavlid platform data.\n");
+		sps->bamdma_restricted_pipes = 0;
+		return -EINVAL;
+	} else {
+		sps->bamdma_restricted_pipes = pdata->bamdma_restricted_pipes;
+		SPS_DBG("sps:bamdma_restricted_pipes=0x%x.",
+			sps->bamdma_restricted_pipes);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "pipe_mem");
+	if (resource) {
+		sps->pipemem_phys_base = resource->start;
+		sps->pipemem_size = resource_size(resource);
+		SPS_DBG("sps:pipemem.base=0x%x,size=0x%x.",
+			sps->pipemem_phys_base,
+			sps->pipemem_size);
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_bam");
+	if (resource) {
+		sps->bamdma_bam_phys_base = resource->start;
+		sps->bamdma_bam_size = resource_size(resource);
+		SPS_DBG("sps:bamdma_bam.base=0x%x,size=0x%x.",
+			sps->bamdma_bam_phys_base,
+			sps->bamdma_bam_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_dma");
+	if (resource) {
+		sps->bamdma_dma_phys_base = resource->start;
+		sps->bamdma_dma_size = resource_size(resource);
+		SPS_DBG("sps:bamdma_dma.base=0x%x,size=0x%x.",
+			sps->bamdma_dma_phys_base,
+			sps->bamdma_dma_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						 "bamdma_irq");
+	if (resource) {
+		sps->bamdma_irq = resource->start;
+		SPS_DBG("sps:bamdma_irq=%d.", sps->bamdma_irq);
+	}
+#endif
+
+	return 0;
+}
+
+static int __devinit msm_sps_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	SPS_DBG("sps:msm_sps_probe.");
+
+	ret = get_platform_data(pdev);
+	if (ret)
+		return -ENODEV;
+
+	/* Create Device */
+	sps->dev_class = class_create(THIS_MODULE, SPS_DRV_NAME);
+
+	ret = alloc_chrdev_region(&sps->dev_num, 0, 1, SPS_DRV_NAME);
+	if (ret) {
+		SPS_ERR("sps:alloc_chrdev_region err.");
+		goto alloc_chrdev_region_err;
+	}
+
+	sps->dev = device_create(sps->dev_class, NULL, sps->dev_num, sps,
+				SPS_DRV_NAME);
+	if (IS_ERR(sps->dev)) {
+		SPS_ERR("sps:device_create err.");
+		goto device_create_err;
+	}
+
+	sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
+	if (IS_ERR(sps->dfab_clk)) {
+		SPS_ERR("sps:fail to get dfab_clk.");
+		goto clk_err;
+	} else {
+		ret = clk_enable(sps->dfab_clk);
+		if (ret) {
+			SPS_ERR("sps:failed to enable dfab_clk. ret=%d", ret);
+			goto clk_err;
+		}
+	}
+
+	sps->pmem_clk = clk_get(sps->dev, "pmem_clk");
+	if (IS_ERR(sps->pmem_clk)) {
+		SPS_ERR("sps:fail to get pmem_clk.");
+		goto clk_err;
+	} else {
+		ret = clk_enable(sps->pmem_clk);
+		if (ret) {
+			SPS_ERR("sps:failed to enable pmem_clk. ret=%d", ret);
+			goto clk_err;
+		}
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	sps->bamdma_clk = clk_get(sps->dev, "dma_bam_pclk");
+	if (IS_ERR(sps->bamdma_clk)) {
+		SPS_ERR("sps:fail to get bamdma_clk.");
+		goto clk_err;
+	} else {
+		ret = clk_enable(sps->bamdma_clk);
+		if (ret) {
+			SPS_ERR("sps:failed to enable bamdma_clk. ret=%d", ret);
+			goto clk_err;
+		}
+	}
+#endif
+
+	ret = sps_device_init();
+	if (ret) {
+		SPS_ERR("sps:sps_device_init err.");
+		goto sps_device_init_err;
+	}
+
+	sps->is_ready = true;
+
+	SPS_INFO("sps is ready.");
+
+	return 0;
+clk_err:
+sps_device_init_err:
+	device_destroy(sps->dev_class, sps->dev_num);
+device_create_err:
+	unregister_chrdev_region(sps->dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sps->dev_class);
+
+	return -ENODEV;
+}
+
+static int __devexit msm_sps_remove(struct platform_device *pdev)
+{
+	SPS_DBG("%s.", __func__);
+
+	device_destroy(sps->dev_class, sps->dev_num);
+	unregister_chrdev_region(sps->dev_num, 1);
+	class_destroy(sps->dev_class);
+	sps_device_de_init();
+
+	clk_put(sps->dfab_clk);
+	clk_put(sps->pmem_clk);
+	clk_put(sps->bamdma_clk);
+
+	return 0;
+}
+
+static struct platform_driver msm_sps_driver = {
+	.probe          = msm_sps_probe,
+	.driver		= {
+		.name	= SPS_DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(msm_sps_remove),
+};
+
+/**
+ * Module Init.
+ */
+static int __init sps_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_init();
+#endif
+
+	SPS_DBG("%s.", __func__);
+
+	/* Allocate the SPS driver state struct */
+	sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+	if (sps == NULL) {
+		SPS_ERR("sps:Unable to allocate driver state context.");
+		return -ENOMEM;
+	}
+
+	ret = platform_driver_register(&msm_sps_driver);
+
+	return ret;
+}
+
+/**
+ * Module Exit.
+ */
+static void __exit sps_exit(void)
+{
+	SPS_DBG("%s.", __func__);
+
+	platform_driver_unregister(&msm_sps_driver);
+
+	if (sps != NULL) {
+		kfree(sps);
+		sps = NULL;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_exit();
+#endif
+}
+
+arch_initcall(sps_init);
+module_exit(sps_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Smart Peripheral Switch (SPS)");
+
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
new file mode 100644
index 0000000..8fbf4f9
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -0,0 +1,1910 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/interrupt.h>	/* request_irq() */
+#include <linux/memory.h>	/* memset */
+
+#include "sps_bam.h"
+#include "bam.h"
+#include "spsi.h"
+
+/* All BAM global IRQ sources */
+#define BAM_IRQ_ALL (BAM_DEV_IRQ_HRESP_ERROR | BAM_DEV_IRQ_ERROR)
+
+/* BAM device state flags */
+#define BAM_STATE_INIT     (1UL << 1)
+#define BAM_STATE_IRQ      (1UL << 2)
+#define BAM_STATE_ENABLED  (1UL << 3)
+#define BAM_STATE_BAM2BAM  (1UL << 4)
+#define BAM_STATE_MTI      (1UL << 5)
+#define BAM_STATE_REMOTE   (1UL << 6)
+
+/* Mask for valid hardware descriptor flags */
+#define BAM_IOVEC_FLAG_MASK   \
+	(SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_EOB)
+
+/* Mask for invalid BAM-to-BAM pipe options */
+#define BAM2BAM_O_INVALID   \
+	(SPS_O_DESC_DONE | \
+	 SPS_O_EOT | \
+	 SPS_O_POLL | \
+	 SPS_O_NO_Q | \
+	 SPS_O_ACK_TRANSFERS)
+
+/**
+ * Pipe/client pointer value indicating pipe is allocated, but no client has
+ * been assigned
+ */
+#define BAM_PIPE_UNASSIGNED   ((struct sps_pipe *)0x77777777)
+
+/* Check whether pipe has been assigned */
+#define BAM_PIPE_IS_ASSIGNED(p)  \
+	(((p) != NULL) && ((p) != BAM_PIPE_UNASSIGNED))
+
+/* Is MTI use supported for a specific BAM version? */
+#define BAM_VERSION_MTI_SUPPORT(ver)   (ver <= 2)
+
+/* Event option<->event translation table entry */
+struct sps_bam_opt_event_table {
+	enum sps_event event_id;
+	enum sps_option option;
+	enum bam_pipe_irq pipe_irq;
+};
+
+static const struct sps_bam_opt_event_table opt_event_table[] = {
+	{SPS_EVENT_EOT, SPS_O_EOT, BAM_PIPE_IRQ_EOT},
+	{SPS_EVENT_DESC_DONE, SPS_O_DESC_DONE, BAM_PIPE_IRQ_DESC_INT},
+	{SPS_EVENT_WAKEUP, SPS_O_WAKEUP, BAM_PIPE_IRQ_WAKE},
+	{SPS_EVENT_INACTIVE, SPS_O_INACTIVE, BAM_PIPE_IRQ_TIMER},
+	{SPS_EVENT_OUT_OF_DESC, SPS_O_OUT_OF_DESC,
+		BAM_PIPE_IRQ_OUT_OF_DESC},
+	{SPS_EVENT_ERROR, SPS_O_ERROR, BAM_PIPE_IRQ_ERROR}
+};
+
+/* Pipe event source handler */
+static void pipe_handler(struct sps_bam *dev,
+			struct sps_pipe *pipe);
+
+/**
+ * Pipe transfer event (EOT, DESC_DONE) source handler.
+ * This function is called by pipe_handler() and other functions to process the
+ * descriptor FIFO.
+ */
+static void pipe_handler_eot(struct sps_bam *dev,
+			   struct sps_pipe *pipe);
+
+/**
+ * BAM driver initialization
+ */
+int sps_bam_driver_init(u32 options)
+{
+	int n;
+
+	/*
+	 * Check that SPS_O_ and BAM_PIPE_IRQ_ values are identical.
+	 * This is required so that the raw pipe IRQ status can be passed
+	 * to the client in the SPS_EVENT_IRQ.
+	 */
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		if ((u32)opt_event_table[n].option !=
+			(u32)opt_event_table[n].pipe_irq) {
+			SPS_ERR("SPS_O 0x%x != HAL IRQ 0x%x",
+				opt_event_table[n].option,
+				opt_event_table[n].pipe_irq);
+			return SPS_ERROR;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * BAM interrupt service routine
+ *
+ * This function is the BAM interrupt service routine.
+ *
+ * @ctxt - pointer to ISR's registered argument
+ *
+ * @return void
+ */
+static irqreturn_t bam_isr(int irq, void *ctxt)
+{
+	struct sps_bam *dev = ctxt;
+	struct sps_pipe *pipe;
+	u32 source;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&dev->isr_lock, flags);
+
+	/* Get BAM interrupt source(s) */
+	if ((dev->state & BAM_STATE_MTI) == 0) {
+		u32 mask = dev->pipe_active_mask;
+		source = bam_get_irq_status(dev->base,
+							  dev->props.ee,
+							  mask);
+
+		SPS_DBG("sps:bam_isr:bam=0x%x;source=0x%x;mask=0x%x.",
+				BAM_ID(dev), source, mask);
+
+		/* Mask any non-local source */
+		source &= dev->pipe_active_mask;
+	} else {
+		/* If MTIs are used, must poll each active pipe */
+		source = dev->pipe_active_mask;
+	}
+
+	/* Process active pipe sources */
+	pipe = list_first_entry(&dev->pipes_q, struct sps_pipe, list);
+
+	list_for_each_entry(pipe, &dev->pipes_q, list) {
+		/* Check this pipe's bit in the source mask */
+		if ((source & pipe->pipe_index_mask)) {
+			/* This pipe has an interrupt pending */
+			pipe_handler(dev, pipe);
+			source &= ~pipe->pipe_index_mask;
+		}
+		if (source == 0)
+			break;
+	}
+
+	/* Process any inactive pipe sources */
+	if (source) {
+		SPS_ERR("IRQ from BAM 0x%x inactive pipe(s) 0x%x",
+			BAM_ID(dev), source);
+		dev->irq_from_disabled_pipe++;
+	}
+
+	spin_unlock_irqrestore(&dev->isr_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * BAM device enable
+ */
+int sps_bam_enable(struct sps_bam *dev)
+{
+	u32 num_pipes;
+	u32 irq_mask;
+	int result;
+	int rc;
+	int MTIenabled;
+
+	/* Is this BAM enabled? */
+	if ((dev->state & BAM_STATE_ENABLED))
+		return 0;	/* Yes, so no work to do */
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR("No local access to BAM 0x%x", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Set interrupt handling */
+	if ((dev->props.options & SPS_BAM_OPT_IRQ_DISABLED) != 0 ||
+	    dev->props.irq == SPS_IRQ_INVALID) {
+		/* Disable the BAM interrupt */
+		irq_mask = 0;
+		dev->state &= ~BAM_STATE_IRQ;
+	} else {
+		/* Register BAM ISR */
+		if (dev->props.irq > 0)
+			result = request_irq(dev->props.irq,
+				    (irq_handler_t) bam_isr,
+				    IRQF_TRIGGER_HIGH, "sps", dev);
+
+		if (result) {
+			SPS_ERR("Failed to register BAM 0x%x IRQ %d",
+				BAM_ID(dev), dev->props.irq);
+			return SPS_ERROR;
+		}
+
+		/* Enable the BAM interrupt */
+		irq_mask = BAM_IRQ_ALL;
+		dev->state |= BAM_STATE_IRQ;
+	}
+
+	/* Is global BAM control managed by the local processor? */
+	num_pipes = 0;
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0)
+		/* Yes, so initialize the BAM device */
+		rc = bam_init(dev->base,
+				  dev->props.ee,
+				  (u16) dev->props.summing_threshold,
+				  irq_mask,
+				  &dev->version, &num_pipes);
+	else
+		/* No, so just verify that it is enabled */
+		rc = bam_check(dev->base, &dev->version, &num_pipes);
+
+	if (rc) {
+		SPS_ERR("Failed to init BAM 0x%x IRQ %d",
+			BAM_ID(dev), dev->props.irq);
+		return SPS_ERROR;
+	}
+
+	/* Check if this BAM supports MTIs (Message Triggered Interrupts) or
+	 * multiple EEs (Execution Environments).
+	 * MTI and EE support are mutually exclusive.
+	 */
+	MTIenabled = BAM_VERSION_MTI_SUPPORT(dev->version);
+
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+			(dev->props.manage & SPS_BAM_MGR_MULTI_EE) != 0 &&
+			dev->props.ee == 0 && MTIenabled) {
+		/*
+		 * BAM global is owned by remote processor and local processor
+		 * must use MTI. Thus, force EE index to a non-zero value to
+		 * insure that EE zero globals can't be modified.
+		 */
+		SPS_ERR("sps: EE for satellite BAM must be set to non-zero");
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Enable MTI use (message triggered interrupt)
+	 * if local processor does not control the global BAM config
+	 * and this BAM supports MTIs.
+	 */
+	if ((dev->state & BAM_STATE_IRQ) != 0 &&
+		(dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+		MTIenabled) {
+		if (dev->props.irq_gen_addr == 0 ||
+		    dev->props.irq_gen_addr == SPS_ADDR_INVALID) {
+			SPS_ERR("MTI destination address not specified "
+				"for BAM 0x%x",	BAM_ID(dev));
+			return SPS_ERROR;
+		}
+		dev->state |= BAM_STATE_MTI;
+	}
+
+	if (num_pipes) {
+		dev->props.num_pipes = num_pipes;
+		SPS_DBG("BAM 0x%x number of pipes reported by hw: %d",
+				 BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Check EE index */
+	if (!MTIenabled && dev->props.ee >= SPS_BAM_NUM_EES) {
+		SPS_ERR("Invalid EE BAM 0x%x: %d", BAM_ID(dev), dev->props.ee);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Process EE configuration parameters,
+	 * if specified in the properties
+	 */
+	if (!MTIenabled && dev->props.sec_config == SPS_BAM_SEC_DO_CONFIG) {
+		struct sps_bam_sec_config_props *p_sec =
+						dev->props.p_sec_config_props;
+		if (p_sec == NULL) {
+			SPS_ERR("EE config table is not specified for "
+				"BAM 0x%x", BAM_ID(dev));
+			return SPS_ERROR;
+		}
+
+		/*
+		 * Set restricted pipes based on the pipes assigned to local EE
+		 */
+		dev->props.restricted_pipes =
+					~p_sec->ees[dev->props.ee].pipe_mask;
+
+		/*
+		 * If local processor manages the BAM, perform the EE
+		 * configuration
+		 */
+		if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+			u32 ee;
+			u32 pipe_mask;
+			int n, i;
+
+			/*
+			 * Verify that there are no overlapping pipe
+			 * assignments
+			 */
+			for (n = 0; n < SPS_BAM_NUM_EES - 1; n++) {
+				for (i = n + 1; i < SPS_BAM_NUM_EES; i++) {
+					if ((p_sec->ees[n].pipe_mask &
+						p_sec->ees[i].pipe_mask) != 0) {
+						SPS_ERR("Overlapping pipe "
+							"assignments for BAM "
+							"0x%x: EEs %d and %d",
+							BAM_ID(dev), n, i);
+						return SPS_ERROR;
+					}
+				}
+			}
+
+			for (ee = 0; ee < SPS_BAM_NUM_EES; ee++) {
+				/*
+				 * MSbit specifies EE for the global (top-level)
+				 * BAM interrupt
+				 */
+				pipe_mask = p_sec->ees[ee].pipe_mask;
+				if (ee == dev->props.ee)
+					pipe_mask |= (1UL << 31);
+				else
+					pipe_mask &= ~(1UL << 31);
+
+				bam_security_init(dev->base, ee,
+						p_sec->ees[ee].vmid, pipe_mask);
+			}
+		}
+	}
+
+	/*
+	 * If local processor manages the BAM and the BAM supports MTIs
+	 * but does not support multiple EEs, set all restricted pipes
+	 * to MTI mode.
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0
+			&& MTIenabled) {
+		u32 pipe_index;
+		u32 pipe_mask;
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes) == 0)
+				continue;	/* This is a local pipe */
+
+			/*
+			 * Enable MTI with destination address of zero
+			 * (and source mask zero). Pipe is in reset,
+			 * so no interrupt will be generated.
+			 */
+			bam_pipe_satellite_mti(dev->base, pipe_index, 0,
+						       dev->props.ee);
+		}
+	}
+
+	dev->state |= BAM_STATE_ENABLED;
+	SPS_DBG("BAM 0x%x enabled: ver: %d, number of pipes: %d",
+		BAM_ID(dev), dev->version, dev->props.num_pipes);
+	return 0;
+}
+
+/**
+ * BAM device disable
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev)
+{
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		return 0;
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR("No local access to BAM 0x%x", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Is this BAM controlled by the local processor? */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		/* No, so just mark it disabled */
+		dev->state &= ~BAM_STATE_ENABLED;
+		return 0;
+	}
+
+	/* Disable BAM (interrupts) */
+	if ((dev->state & BAM_STATE_IRQ)) {
+		bam_exit(dev->base, dev->props.ee);
+
+		/* Deregister BAM ISR */
+		if ((dev->state & BAM_STATE_IRQ))
+			if (dev->props.irq > 0)
+				free_irq(dev->props.irq, dev);
+		dev->state &= ~BAM_STATE_IRQ;
+	}
+
+	dev->state &= ~BAM_STATE_ENABLED;
+
+	SPS_DBG("BAM 0x%x disabled", BAM_ID(dev));
+
+	return 0;
+}
+
+/**
+ * BAM device initialization
+ */
+int sps_bam_device_init(struct sps_bam *dev)
+{
+	if (dev->props.virt_addr == NULL) {
+		SPS_ERR("NULL BAM virtual address");
+		return SPS_ERROR;
+	}
+	dev->base = (void *) dev->props.virt_addr;
+
+	if (dev->props.num_pipes == 0) {
+		/* Assume max number of pipes until BAM registers can be read */
+		dev->props.num_pipes = BAM_MAX_PIPES;
+		SPS_DBG("BAM 0x%x: assuming max number of pipes: %d",
+			BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Init BAM state data */
+	dev->state = 0;
+	dev->pipe_active_mask = 0;
+	dev->pipe_remote_mask = 0;
+	INIT_LIST_HEAD(&dev->pipes_q);
+
+	spin_lock_init(&dev->isr_lock);
+
+	if ((dev->props.options & SPS_BAM_OPT_ENABLE_AT_BOOT))
+		if (sps_bam_enable(dev))
+			return SPS_ERROR;
+
+	SPS_DBG("BAM device: phys 0x%x IRQ %d", BAM_ID(dev), dev->props.irq);
+
+	return 0;
+}
+
+/**
+ * BAM device de-initialization
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev)
+{
+	int result;
+
+	SPS_DBG("BAM device DEINIT: phys 0x%x IRQ %d",
+		BAM_ID(dev), dev->props.irq);
+
+	result = sps_bam_disable(dev);
+
+	return result;
+}
+
+/**
+ * BAM device reset
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev)
+{
+	struct sps_pipe *pipe;
+	u32 pipe_index;
+	int result;
+
+	SPS_DBG("BAM device RESET: phys 0x%x IRQ %d",
+		BAM_ID(dev), dev->props.irq);
+
+	/* If BAM is enabled, then disable */
+	result = 0;
+	if ((dev->state & BAM_STATE_ENABLED)) {
+		/* Verify that no pipes are currently allocated */
+		for (pipe_index = 0; pipe_index < dev->props.num_pipes;
+		      pipe_index++) {
+			pipe = dev->pipes[pipe_index];
+			if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+				SPS_ERR("BAM device 0x%x RESET failed: "
+					"pipe %d in use",
+					BAM_ID(dev), pipe_index);
+				result = SPS_ERROR;
+				break;
+			}
+		}
+
+		if (result == 0)
+			result = sps_bam_disable(dev);
+	}
+
+	/* BAM will be reset as part of the enable process */
+	if (result == 0)
+		result = sps_bam_enable(dev);
+
+	return result;
+}
+
+/**
+ * Clear the BAM pipe state struct
+ *
+ * This function clears the BAM pipe state struct.
+ *
+ * @pipe - pointer to client pipe struct
+ *
+ */
+static void pipe_clear(struct sps_pipe *pipe)
+{
+	INIT_LIST_HEAD(&pipe->list);
+
+	pipe->state = 0;
+	pipe->pipe_index = SPS_BAM_PIPE_INVALID;
+	pipe->pipe_index_mask = 0;
+	pipe->irq_mask = 0;
+	pipe->mode = -1;
+	pipe->num_descs = 0;
+	pipe->desc_size = 0;
+	memset(&pipe->sys, 0, sizeof(pipe->sys));
+	INIT_LIST_HEAD(&pipe->sys.events_q);
+}
+
+/**
+ * Allocate a BAM pipe
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index)
+{
+	u32 pipe_mask;
+
+	if (pipe_index == SPS_BAM_PIPE_INVALID) {
+		/* Allocate a pipe from the BAM */
+		if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_ALLOC)) {
+			SPS_ERR("Restricted from allocating pipes on BAM 0x%x",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes))
+				continue;	/* This is a restricted pipe */
+
+			if (dev->pipes[pipe_index] == NULL)
+				break;	/* Found an available pipe */
+		}
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR("Failed to allocate pipe on BAM 0x%x",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	} else {
+		/* Check that client-specified pipe is available */
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR("Invalid pipe %d for allocate on BAM 0x%x",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if ((dev->props.restricted_pipes & (1UL << pipe_index))) {
+			SPS_ERR("BAM 0x%x pipe %d is not local",
+				BAM_ID(dev), pipe_index);
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if (dev->pipes[pipe_index] != NULL) {
+			SPS_ERR("Pipe %d already allocated on BAM 0x%x",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	}
+
+	/* Mark pipe as allocated */
+	dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+
+	return pipe_index;
+}
+
+/**
+ * Free a BAM pipe
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR("Invalid BAM 0x%x pipe: %d", BAM_ID(dev), pipe_index);
+		return;
+	}
+
+	/* Get the client pipe struct and mark the pipe free */
+	pipe = dev->pipes[pipe_index];
+	dev->pipes[pipe_index] = NULL;
+
+	/* Is the pipe currently allocated? */
+	if (pipe == NULL) {
+		SPS_ERR("Attempt to free unallocated pipe %d on BAM 0x%x",
+			pipe_index, BAM_ID(dev));
+		return;
+	}
+
+	if (pipe == BAM_PIPE_UNASSIGNED)
+		return;		/* Never assigned, so no work to do */
+
+	/* Return pending items to appropriate pools */
+	if (!list_empty(&pipe->sys.events_q)) {
+		struct sps_q_event *sps_event;
+
+		SPS_ERR("Disconnect BAM 0x%x pipe %d with events pending",
+			BAM_ID(dev), pipe_index);
+
+		list_for_each_entry(sps_event, &pipe->sys.events_q, list) {
+			list_del(&sps_event->list);
+			kfree(sps_event);
+		}
+	}
+
+	/* Clear the BAM pipe state struct */
+	pipe_clear(pipe);
+}
+
+/**
+ * Establish BAM pipe connection
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
+			 const struct sps_bam_connect_param *params)
+{
+	struct bam_pipe_parameters hw_params;
+	struct sps_bam *dev;
+	const struct sps_connection *map = bam_pipe->map;
+	const struct sps_conn_end_pt *map_pipe;
+	const struct sps_conn_end_pt *other_pipe;
+	void *desc_buf = NULL;
+	u32 pipe_index;
+	int result;
+
+	/* Clear the client pipe state and hw init struct */
+	pipe_clear(bam_pipe);
+	memset(&hw_params, 0, sizeof(hw_params));
+
+	/* Initialize the BAM state struct */
+	bam_pipe->mode = params->mode;
+
+	/* Set pipe streaming mode */
+	if ((params->options & SPS_O_STREAMING) == 0)
+		hw_params.stream_mode = BAM_STREAM_MODE_DISABLE;
+	else
+		hw_params.stream_mode = BAM_STREAM_MODE_ENABLE;
+
+	/* Determine which end point to connect */
+	if (bam_pipe->mode == SPS_MODE_SRC) {
+		map_pipe = &map->src;
+		other_pipe = &map->dest;
+		hw_params.dir = BAM_PIPE_PRODUCER;
+	} else {
+		map_pipe = &map->dest;
+		other_pipe = &map->src;
+		hw_params.dir = BAM_PIPE_CONSUMER;
+	}
+
+	/* Process map parameters */
+	dev = map_pipe->bam;
+	pipe_index = map_pipe->pipe_index;
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR("Invalid BAM 0x%x pipe: %d", BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+	hw_params.event_threshold = (u16) map_pipe->event_threshold;
+	hw_params.ee = dev->props.ee;
+
+	/* Verify that control of this pipe is allowed */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CTRL) ||
+	    (dev->props.restricted_pipes & (1UL << pipe_index))) {
+		SPS_ERR("BAM 0x%x pipe %d is not local",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Control without configuration permission is not supported yet */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CONFIG)) {
+		SPS_ERR("BAM 0x%x pipe %d remote config is not supported",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine operational mode */
+	if (other_pipe->bam != NULL) {
+		/* BAM-to-BAM mode */
+		bam_pipe->state |= BAM_STATE_BAM2BAM;
+		hw_params.mode = BAM_PIPE_MODE_BAM2BAM;
+		hw_params.peer_phys_addr =
+			((struct sps_bam *) (other_pipe->bam))->props.phys_addr;
+		hw_params.peer_pipe = other_pipe->pipe_index;
+
+		/* Verify FIFO buffers are allocated for BAM-to-BAM pipes */
+		if (map->desc.phys_base == SPS_ADDR_INVALID ||
+		    map->data.phys_base == SPS_ADDR_INVALID ||
+		    map->desc.size == 0 || map->data.size == 0) {
+			SPS_ERR("FIFO buffers are not allocated for BAM 0x%x "
+				"pipe %d", BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+		hw_params.data_base = map->data.phys_base;
+		hw_params.data_size = map->data.size;
+
+		/* Clear the data FIFO for debug */
+		if (map->data.base != NULL && bam_pipe->mode == SPS_MODE_SRC)
+			memset(map->data.base, 0, hw_params.data_size);
+	} else {
+		/* System mode */
+		hw_params.mode = BAM_PIPE_MODE_SYSTEM;
+		bam_pipe->sys.desc_buf = map->desc.base;
+		bam_pipe->sys.desc_offset = 0;
+		bam_pipe->sys.acked_offset = 0;
+	}
+
+	/* Initialize the client pipe state */
+	bam_pipe->pipe_index = pipe_index;
+	bam_pipe->pipe_index_mask = 1UL << pipe_index;
+
+	/* Get virtual address for descriptor FIFO */
+	if (map->desc.phys_base != SPS_ADDR_INVALID) {
+		if (map->desc.size < (2 * sizeof(struct sps_iovec))) {
+			SPS_ERR("Invalid descriptor FIFO size "
+				"for BAM 0x%x pipe %d: %d",
+				BAM_ID(dev), pipe_index, map->desc.size);
+			return SPS_ERROR;
+		}
+		desc_buf = map->desc.base;
+
+		/*
+		 * Note that descriptor base and size will be left zero from
+		 * the memset() above if the physical address was invalid.
+		 * This allows a satellite driver to set the FIFO as
+		 * local memory	for system mode.
+		 */
+		hw_params.desc_base = map->desc.phys_base;
+		hw_params.desc_size = map->desc.size;
+	}
+
+	/* Configure the descriptor FIFO for both operational modes */
+	if (desc_buf != NULL)
+		if (bam_pipe->mode == SPS_MODE_SRC ||
+		    hw_params.mode == BAM_PIPE_MODE_SYSTEM)
+			memset(desc_buf, 0, hw_params.desc_size);
+
+	bam_pipe->desc_size = hw_params.desc_size;
+	bam_pipe->num_descs = bam_pipe->desc_size / sizeof(struct sps_iovec);
+
+	result = SPS_ERROR;
+	/* Insure that the BAM is enabled */
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		if (sps_bam_enable(dev))
+			goto exit_init_err;
+
+	/* Check pipe allocation */
+	if (dev->pipes[pipe_index] != BAM_PIPE_UNASSIGNED) {
+		SPS_ERR("Invalid pipe %d on BAM 0x%x for connect",
+			pipe_index, BAM_ID(dev));
+		goto exit_err;
+	}
+
+	if (bam_pipe_is_enabled(dev->base, pipe_index)) {
+		SPS_ERR("BAM 0x%x pipe %d sharing violation",
+			BAM_ID(dev), pipe_index);
+		goto exit_err;
+	}
+
+	if (bam_pipe_init(dev->base, pipe_index, &hw_params, dev->props.ee)) {
+		SPS_ERR("BAM 0x%x pipe %d init error",
+			BAM_ID(dev), pipe_index);
+		goto exit_err;
+	}
+
+	/* Assign pipe to client */
+	dev->pipes[pipe_index] = bam_pipe;
+
+	/* Process configuration parameters */
+	if (params->options != 0 ||
+	    (bam_pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Process init-time only parameters */
+		u32 irq_gen_addr;
+
+		/* Set interrupt mode */
+		irq_gen_addr = SPS_ADDR_INVALID;
+		if ((params->options & SPS_O_IRQ_MTI))
+			/* Client has directly specified the MTI address */
+			irq_gen_addr = params->irq_gen_addr;
+		else if ((dev->state & BAM_STATE_MTI))
+			/* This BAM has MTI use enabled */
+			irq_gen_addr = dev->props.irq_gen_addr;
+
+		if (irq_gen_addr != SPS_ADDR_INVALID) {
+			/*
+			 * No checks - assume BAM is already setup for
+			 * MTI generation,
+			 * or the pipe will be set to satellite control.
+			 */
+			bam_pipe->state |= BAM_STATE_MTI;
+			bam_pipe->irq_gen_addr = irq_gen_addr;
+		}
+
+		/* Process runtime parameters */
+		if (sps_bam_pipe_set_params(dev, pipe_index,
+					  params->options)) {
+			dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+			goto exit_err;
+		}
+	}
+
+	/* Indicate initialization is complete */
+	dev->pipes[pipe_index] = bam_pipe;
+	dev->pipe_active_mask |= 1UL << pipe_index;
+	list_add_tail(&bam_pipe->list, &dev->pipes_q);
+
+	bam_pipe->state |= BAM_STATE_INIT;
+	result = 0;
+exit_err:
+	if (result)
+		bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
+exit_init_err:
+	if (result) {
+		/* Clear the client pipe state */
+		pipe_clear(bam_pipe);
+	}
+
+	return result;
+}
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+	int result;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR("Invalid BAM 0x%x pipe: %d", BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Deallocate and reset the BAM pipe */
+	pipe = dev->pipes[pipe_index];
+	if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+		if ((dev->pipe_active_mask & (1UL << pipe_index))) {
+			list_del(&pipe->list);
+			dev->pipe_active_mask &= ~(1UL << pipe_index);
+		}
+		dev->pipe_remote_mask &= ~(1UL << pipe_index);
+		bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
+		if (pipe->sys.desc_cache != NULL) {
+			kfree(pipe->sys.desc_cache);
+			pipe->sys.desc_cache = NULL;
+		}
+		dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+		pipe_clear(pipe);
+		result = 0;
+	} else {
+		result = SPS_ERROR;
+	}
+
+	if (result)
+		SPS_ERR("BAM 0x%x pipe %d already disconnected",
+			BAM_ID(dev), pipe_index);
+
+	return result;
+}
+
+/**
+ * Set BAM pipe interrupt enable state
+ *
+ * This function sets the interrupt enable state for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @poll - true if SPS_O_POLL is set, false otherwise
+ *
+ */
+static void pipe_set_irq(struct sps_bam *dev, u32 pipe_index,
+				 u32 poll)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	enum bam_enable irq_enable;
+
+	if (poll == 0 && pipe->irq_mask != 0 &&
+	    (dev->state & BAM_STATE_IRQ)) {
+		if ((pipe->state & BAM_STATE_BAM2BAM) != 0 &&
+		    (pipe->state & BAM_STATE_IRQ) == 0) {
+			/*
+			 * If enabling the interrupt for a BAM-to-BAM pipe,
+			 * clear the existing interrupt status
+			 */
+			(void)bam_pipe_get_and_clear_irq_status(dev->base,
+							   pipe_index);
+		}
+		pipe->state |= BAM_STATE_IRQ;
+		irq_enable = BAM_ENABLE;
+		pipe->polled = false;
+	} else {
+		pipe->state &= ~BAM_STATE_IRQ;
+		irq_enable = BAM_DISABLE;
+		pipe->polled = true;
+		if (poll == 0 && pipe->irq_mask)
+			SPS_INFO("BAM 0x%x pipe %d forced to use polling",
+				 BAM_ID(dev), pipe_index);
+	}
+	if ((pipe->state & BAM_STATE_MTI) == 0)
+		bam_pipe_set_irq(dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, dev->props.ee);
+	else
+		bam_pipe_set_mti(dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, pipe->irq_gen_addr);
+
+}
+
+/**
+ * Set BAM pipe parameters
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 mask;
+	int wake_up_is_one_shot;
+	int no_queue;
+	int ack_xfers;
+	u32 size;
+	int n;
+
+	/* Capture some options */
+	wake_up_is_one_shot = ((options & SPS_O_WAKEUP_IS_ONESHOT));
+	no_queue = ((options & SPS_O_NO_Q));
+	ack_xfers = ((options & SPS_O_ACK_TRANSFERS));
+
+	/* Create interrupt source mask */
+	mask = 0;
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		/* Is client registering for this event? */
+		if ((options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		mask |= opt_event_table[n].pipe_irq;
+	}
+
+#ifdef SPS_BAM_STATISTICS
+	/* Is an illegal mode change specified? */
+	if (pipe->sys.desc_wr_count > 0 &&
+	    (no_queue != pipe->sys.no_queue
+	     || ack_xfers != pipe->sys.ack_xfers)) {
+		SPS_ERR("Queue/ack mode change after transfer: "
+			"BAM 0x%x pipe %d opt 0x%x",
+			BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Is client setting invalid options for a BAM-to-BAM connection? */
+	if ((pipe->state & BAM_STATE_BAM2BAM) &&
+	    (options & BAM2BAM_O_INVALID)) {
+		SPS_ERR("Invalid option for BAM-to-BAM: BAM 0x%x pipe %d "
+			"opt 0x%x", BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+
+	/* Allocate descriptor FIFO cache if NO_Q option is disabled */
+	if (!no_queue && pipe->sys.desc_cache == NULL && pipe->num_descs > 0
+	    && (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Allocate both descriptor cache and user pointer array */
+		size = pipe->num_descs * sizeof(void *);
+		pipe->sys.desc_cache =
+		kzalloc(pipe->desc_size + size, GFP_KERNEL);
+		if (pipe->sys.desc_cache == NULL) {
+			/*** MUST BE LAST POINT OF FAILURE (see below) *****/
+			SPS_ERR("Desc cache error: BAM 0x%x pipe %d: %d",
+				BAM_ID(dev), pipe_index,
+				pipe->desc_size + size);
+			return SPS_ERROR;
+		}
+		pipe->sys.user_ptrs = (void **)(pipe->sys.desc_cache +
+						 pipe->desc_size);
+		pipe->sys.cache_offset = pipe->sys.acked_offset;
+	}
+
+	/*
+	 * No failures beyond this point. Note that malloc() is last point of
+	 * failure, so no free() handling is needed.
+	 */
+
+	/* Enable/disable the pipe's interrupt sources */
+	pipe->irq_mask = mask;
+	pipe_set_irq(dev, pipe_index, (options & SPS_O_POLL));
+
+	/* Store software feature enables */
+	pipe->wake_up_is_one_shot = wake_up_is_one_shot;
+	pipe->sys.no_queue = no_queue;
+	pipe->sys.ack_xfers = ack_xfers;
+
+	return 0;
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Enable the BAM pipe */
+	bam_pipe_enable(dev->base, pipe_index);
+	pipe->state |= BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Disable a BAM pipe
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Disable the BAM pipe */
+	bam_pipe_disable(dev->base, pipe_index);
+	pipe->state &= ~BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Register an event for a BAM pipe
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev,
+			   u32 pipe_index,
+			   struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_bam_event_reg *event_reg;
+	int n;
+
+	if (pipe->sys.no_queue && reg->xfer_done != NULL &&
+	    reg->mode != SPS_TRIGGER_CALLBACK) {
+		SPS_ERR("Only callback events support for NO_Q: "
+			"BAM 0x%x pipe %d mode %d",
+			BAM_ID(dev), pipe_index, reg->mode);
+		return SPS_ERROR;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		int index;
+
+		/* Is client registering for this event? */
+		if ((reg->options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		index = SPS_EVENT_INDEX(opt_event_table[n].event_id);
+		event_reg = &pipe->sys.event_regs[index];
+		event_reg->xfer_done = reg->xfer_done;
+		event_reg->callback = reg->callback;
+		event_reg->mode = reg->mode;
+		event_reg->user = reg->user;
+	}
+
+	return 0;
+}
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev,
+				    u32 pipe_index, u32 addr, u32 size,
+				    void *user, u32 flags)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	struct sps_iovec iovec;
+	u32 next_write;
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR("Transfer on BAM-to-BAM: BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Client identifier (user pointer) is not supported for
+	 * SPS_O_NO_Q option.
+	 */
+	if (pipe->sys.no_queue && user != NULL) {
+		SPS_ERR("User pointer arg non-NULL: BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine if descriptor can be queued */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (next_write == pipe->sys.acked_offset) {
+		/*
+		 * If pipe is polled and client is not ACK'ing descriptors,
+		 * perform polling operation so that any outstanding ACKs
+		 * can occur.
+		 */
+		if (!pipe->sys.ack_xfers && pipe->polled) {
+			pipe_handler_eot(dev, pipe);
+			if (next_write == pipe->sys.acked_offset) {
+				SPS_DBG("Descriptor FIFO is full for "
+					"BAM 0x%x pipe %d",
+					BAM_ID(dev), pipe_index);
+				return SPS_ERROR;
+			}
+		} else {
+			SPS_DBG("Descriptor FIFO is full for "
+				"BAM 0x%x pipe %d", BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	/* Create descriptor */
+	if (!pipe->sys.no_queue)
+		desc = (struct sps_iovec *) (pipe->sys.desc_cache +
+					      pipe->sys.desc_offset);
+	else
+		desc = &iovec;
+
+	desc->addr = addr;
+	desc->size = size;
+	if ((flags & SPS_IOVEC_FLAG_DEFAULT) == 0) {
+		desc->flags = flags & BAM_IOVEC_FLAG_MASK;
+	} else {
+		if (pipe->mode == SPS_MODE_SRC)
+			desc->flags = SPS_IOVEC_FLAG_INT;
+		else
+			desc->flags = SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT;
+	}
+#ifdef SPS_BAM_STATISTICS
+	if ((flags & SPS_IOVEC_FLAG_INT))
+		pipe->sys.int_flags++;
+	if ((flags & SPS_IOVEC_FLAG_EOT))
+		pipe->sys.eot_flags++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update hardware descriptor FIFO - should result in burst */
+	*((struct sps_iovec *) (pipe->sys.desc_buf + pipe->sys.desc_offset))
+	= *desc;
+
+	/* Record user pointer value */
+	if (!pipe->sys.no_queue) {
+		u32 index = pipe->sys.desc_offset / sizeof(struct sps_iovec);
+		pipe->sys.user_ptrs[index] = user;
+#ifdef SPS_BAM_STATISTICS
+		if (user != NULL)
+			pipe->sys.user_ptrs_count++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	/* Update descriptor ACK offset */
+	pipe->sys.desc_offset = next_write;
+
+#ifdef SPS_BAM_STATISTICS
+	/* Update statistics */
+	pipe->sys.desc_wr_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Notify pipe */
+	if ((flags & SPS_IOVEC_FLAG_NO_SUBMIT) == 0) {
+		wmb(); /* Memory Barrier */
+		bam_pipe_set_desc_write_offset(dev->base, pipe_index,
+					       next_write);
+	}
+
+	return 0;
+}
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev,
+			 u32 pipe_index, struct sps_transfer *transfer)
+{
+	struct sps_iovec *iovec;
+	u32 count;
+	u32 flags;
+	void *user;
+	int n;
+	int result;
+
+	if (transfer->iovec_count == 0) {
+		SPS_ERR("iovec count zero: BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	sps_bam_get_free_count(dev, pipe_index, &count);
+	if (count < transfer->iovec_count) {
+		SPS_ERR("Insufficient free desc: BAM 0x%x pipe %d: %d",
+			BAM_ID(dev), pipe_index, count);
+		return SPS_ERROR;
+	}
+
+	user = NULL;		/* NULL for all except last descriptor */
+	for (n = (int)transfer->iovec_count - 1, iovec = transfer->iovec;
+	    n >= 0; n--, iovec++) {
+		if (n > 0) {
+			/* This is *not* the last descriptor */
+			flags = iovec->flags | SPS_IOVEC_FLAG_NO_SUBMIT;
+		} else {
+			/* This *is* the last descriptor */
+			flags = iovec->flags;
+			user = transfer->user;
+		}
+		result = sps_bam_pipe_transfer_one(dev, pipe_index,
+						 iovec->addr,
+						 iovec->size, user,
+						 flags);
+		if (result)
+			return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * Allocate an event tracking struct
+ *
+ * This function allocates an event tracking struct.
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @return - pointer to event notification struct, or NULL
+ *
+ */
+static struct sps_q_event *alloc_event(struct sps_pipe *pipe,
+					struct sps_bam_event_reg *event_reg)
+{
+	struct sps_q_event *event;
+
+	/* A callback event object is registered, so trigger with payload */
+	event = &pipe->sys.event;
+	memset(event, 0, sizeof(*event));
+
+	return event;
+}
+
+/**
+ * Trigger an event notification
+ *
+ * This function triggers an event notification.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @sps_event - pointer to event struct
+ *
+ */
+static void trigger_event(struct sps_bam *dev,
+			  struct sps_pipe *pipe,
+			  struct sps_bam_event_reg *event_reg,
+			  struct sps_q_event *sps_event)
+{
+	if (sps_event == NULL) {
+		SPS_DBG("sps:trigger_event.sps_event is NULL.");
+		return;
+	}
+
+	if (event_reg->xfer_done) {
+		complete(event_reg->xfer_done);
+		SPS_DBG("sps:trigger_event.done=%d.",
+			event_reg->xfer_done->done);
+	}
+
+	if (event_reg->callback) {
+		event_reg->callback(&sps_event->notify);
+		SPS_DBG("sps:trigger_event.using callback.");
+	}
+
+}
+
+/**
+ * Handle a BAM pipe's generic interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    generic interrupt sources.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_id - event identifier enum
+ *
+ */
+static void pipe_handler_generic(struct sps_bam *dev,
+			       struct sps_pipe *pipe,
+			       enum sps_event event_id)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *sps_event;
+	int index;
+
+	index = SPS_EVENT_INDEX(event_id);
+	if (index < 0 || index >= SPS_EVENT_INDEX(SPS_EVENT_MAX))
+		return;
+
+	event_reg = &pipe->sys.event_regs[index];
+	sps_event = alloc_event(pipe, event_reg);
+	if (sps_event != NULL) {
+		sps_event->notify.event_id = event_id;
+		sps_event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, sps_event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's WAKEUP interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    WAKEUP interrupt source.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_wakeup(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	u32 pipe_index = pipe->pipe_index;
+
+	if (pipe->wake_up_is_one_shot) {
+		/* Disable the pipe WAKEUP interrupt source */
+		pipe->irq_mask &= ~BAM_PIPE_IRQ_WAKE;
+		pipe_set_irq(dev, pipe_index, pipe->polled);
+	}
+
+	event_reg = &pipe->sys.event_regs[SPS_EVENT_INDEX(SPS_EVENT_WAKEUP)];
+	event = alloc_event(pipe, event_reg);
+	if (event != NULL) {
+		event->notify.event_id = SPS_EVENT_WAKEUP;
+		event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's EOT/INT interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's EOT interrupt
+ *    source.  The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_eot(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	struct sps_iovec *desc;
+	struct sps_iovec *cache;
+	void **user;
+	u32 *update_offset;
+	u32 pipe_index = pipe->pipe_index;
+	u32 offset;
+	u32 end_offset;
+	enum sps_event event_id;
+	u32 flags;
+	u32 enabled;
+	int producer = (pipe->mode == SPS_MODE_SRC);
+
+	if (pipe->sys.handler_eot)
+		/*
+		 * This can happen if the pipe is configured for polling
+		 * (IRQ disabled) and callback event generation.
+		 * The client may perform a get_iovec() inside the callback.
+		 */
+		return;
+
+	pipe->sys.handler_eot = true;
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(dev->base, pipe_index);
+
+	/* If no queue, then do not generate any events */
+	if (pipe->sys.no_queue) {
+		if (!pipe->sys.ack_xfers) {
+			/* Client is not ACK'ing transfers, so do it now */
+			pipe->sys.acked_offset = end_offset;
+		}
+		pipe->sys.handler_eot = false;
+		return;
+	}
+
+	/*
+	 * Get offset of last descriptor processed by software,
+	 * and update to the last descriptor completed by the pipe
+	 */
+	if (!pipe->sys.ack_xfers) {
+		update_offset = &pipe->sys.acked_offset;
+		offset = *update_offset;
+	} else {
+		update_offset = &pipe->sys.cache_offset;
+		offset = *update_offset;
+	}
+
+	/* Are there any completed descriptors to process? */
+	if (offset == end_offset) {
+		pipe->sys.handler_eot = false;
+		return;
+	}
+
+	/* Determine enabled events */
+	enabled = 0;
+	if ((pipe->irq_mask & SPS_O_EOT))
+		enabled |= SPS_IOVEC_FLAG_EOT;
+
+	if ((pipe->irq_mask & SPS_O_DESC_DONE))
+		enabled |= SPS_IOVEC_FLAG_INT;
+
+	/*
+	 * For producer pipe, update the cached descriptor byte count and flags.
+	 * For consumer pipe, the BAM does not update the descriptors, so just
+	 * use the cached copies.
+	 */
+	if (producer) {
+		/*
+		 * Do copies in a tight loop to increase chance of
+		 * multi-descriptor burst accesses on the bus
+		 */
+		struct sps_iovec *desc_end;
+
+		/* Set starting point for copy */
+		desc = (struct sps_iovec *) (pipe->sys.desc_buf + offset);
+		cache =	(struct sps_iovec *) (pipe->sys.desc_cache + offset);
+
+		/* Fetch all completed descriptors to end of FIFO (wrap) */
+		if (end_offset < offset) {
+			desc_end = (struct sps_iovec *)
+				   (pipe->sys.desc_buf + pipe->desc_size);
+			while (desc < desc_end)
+				*cache++ = *desc++;
+
+			desc = (void *)pipe->sys.desc_buf;
+			cache = (void *)pipe->sys.desc_cache;
+		}
+
+		/* Fetch all remaining completed descriptors (no wrap) */
+		desc_end = (struct sps_iovec *)	(pipe->sys.desc_buf +
+						 end_offset);
+		while (desc < desc_end)
+			*cache++ = *desc++;
+	}
+
+	/* Process all completed descriptors */
+	cache = (struct sps_iovec *) (pipe->sys.desc_cache + offset);
+	user = &pipe->sys.user_ptrs[offset / sizeof(struct sps_iovec)];
+	for (;;) {
+		/*
+		 * Increment offset to next descriptor and update pipe offset
+		 * so a client callback can fetch the I/O vector.
+		 */
+		offset += sizeof(struct sps_iovec);
+		if (offset >= pipe->desc_size)
+			/* Roll to start of descriptor FIFO */
+			offset = 0;
+
+		*update_offset = offset;
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.desc_rd_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+		/* Did client request notification for this descriptor? */
+		flags = cache->flags & enabled;
+		if (*user != NULL || flags) {
+			int index;
+
+			if ((flags & SPS_IOVEC_FLAG_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			index = SPS_EVENT_INDEX(event_id);
+			event_reg = &pipe->sys.event_regs[index];
+			event = alloc_event(pipe, event_reg);
+			if (event != NULL) {
+				/*
+				 * Store the descriptor and user pointer
+				 * in the notification
+				 */
+				event->notify.data.transfer.iovec = *cache;
+				event->notify.data.transfer.user = *user;
+
+				event->notify.event_id = event_id;
+				event->notify.user = event_reg->user;
+				trigger_event(dev, pipe, event_reg, event);
+			}
+#ifdef SPS_BAM_STATISTICS
+			if (*user != NULL)
+				pipe->sys.user_found++;
+#endif /* SPS_BAM_STATISTICS */
+		}
+
+		/* Increment to next descriptor */
+		if (offset == end_offset)
+			break;	/* No more descriptors */
+
+		if (offset) {
+			cache++;
+			user++;
+		} else {
+			cache = (void *)pipe->sys.desc_cache;
+			user = pipe->sys.user_ptrs;
+		}
+	}
+
+	pipe->sys.handler_eot = false;
+}
+
+/**
+ * Handle a BAM pipe's interrupt sources
+ *
+ * This function handles a BAM pipe's interrupt sources.
+ *    The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return void
+ *
+ */
+static void pipe_handler(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	u32 pipe_index;
+	u32 status;
+	enum sps_event event_id;
+
+	/* Get interrupt sources and ack all */
+	pipe_index = pipe->pipe_index;
+	status = bam_pipe_get_and_clear_irq_status(dev->base, pipe_index);
+
+	SPS_DBG("sps:pipe_handler.bam 0x%x.pipe %d.status=0x%x.",
+			BAM_ID(dev), pipe_index, status);
+
+	/* Check for enabled interrupt sources */
+	status &= pipe->irq_mask;
+	if (status == 0)
+		/* No enabled interrupt sources are active */
+		return;
+
+	/*
+	 * Process the interrupt sources in order of frequency of occurrance.
+	 * Check for early exit opportunities.
+	 */
+
+	if ((status & (SPS_O_EOT | SPS_O_DESC_DONE)) &&
+	    (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		pipe_handler_eot(dev, pipe);
+		if (pipe->sys.no_queue) {
+			/*
+			 * EOT handler will not generate any event if there
+			 * is no queue,
+			 * so generate "empty" (no descriptor) event
+			 */
+			if ((status & SPS_O_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			pipe_handler_generic(dev, pipe, event_id);
+		}
+		status &= ~(SPS_O_EOT | SPS_O_DESC_DONE);
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_WAKEUP)) {
+		pipe_handler_wakeup(dev, pipe);
+		status &= ~SPS_O_WAKEUP;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_INACTIVE)) {
+		pipe_handler_generic(dev, pipe, SPS_EVENT_INACTIVE);
+		status &= ~SPS_O_INACTIVE;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_OUT_OF_DESC)) {
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_OUT_OF_DESC);
+		status &= ~SPS_O_OUT_OF_DESC;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_EVENT_ERROR))
+		pipe_handler_generic(dev, pipe, SPS_EVENT_ERROR);
+}
+
+/**
+ * Get a BAM pipe event
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev,
+			   u32 pipe_index, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_q_event *event_queue;
+
+	if (pipe->sys.no_queue) {
+		SPS_ERR("Invalid connection for event: "
+			"BAM 0x%x pipe %d context 0x%x",
+			BAM_ID(dev), pipe_index, (u32) pipe);
+		notify->event_id = SPS_EVENT_INVALID;
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled, perform polling operation */
+	if (pipe->polled && (pipe->state & BAM_STATE_BAM2BAM) == 0)
+		pipe_handler_eot(dev, pipe);
+
+	/* Pull an event off the synchronous event queue */
+	if (list_empty(&pipe->sys.events_q)) {
+		event_queue = NULL;
+		SPS_DBG("sps:events_q of bam 0x%x is empty.", BAM_ID(dev));
+	} else {
+		SPS_DBG("sps:events_q of bam 0x%x is not empty.", BAM_ID(dev));
+		event_queue =
+		list_first_entry(&pipe->sys.events_q, struct sps_q_event,
+				 list);
+		list_del(&event_queue->list);
+	}
+
+	/* Update client's event buffer */
+	if (event_queue == NULL) {
+		/* No event queued, so set client's event to "invalid" */
+		notify->event_id = SPS_EVENT_INVALID;
+	} else {
+		/*
+		 * Copy event into client's buffer and return the event
+		 * to the pool
+		 */
+		*notify = event_queue->notify;
+		kfree(event_queue);
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.get_events++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	return 0;
+}
+
+/**
+ * Get processed I/O vector
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	u32 read_offset;
+
+	/* Is this a valid pipe configured for get_iovec use? */
+	if (!pipe->sys.ack_xfers ||
+	    (pipe->state & BAM_STATE_BAM2BAM) != 0 ||
+	    (pipe->state & BAM_STATE_REMOTE)) {
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled and queue is enabled, perform polling operation */
+	if (pipe->polled && !pipe->sys.no_queue)
+		pipe_handler_eot(dev, pipe);
+
+	/* Is there a completed descriptor? */
+	if (pipe->sys.no_queue)
+		read_offset =
+		bam_pipe_get_desc_read_offset(dev->base, pipe_index);
+	else
+		read_offset = pipe->sys.cache_offset;
+
+	if (read_offset == pipe->sys.acked_offset) {
+		/* No, so clear the iovec to indicate FIFO is empty */
+		memset(iovec, 0, sizeof(*iovec));
+		return 0;
+	}
+
+	/* Fetch next descriptor */
+	desc = (struct sps_iovec *) (pipe->sys.desc_buf +
+				     pipe->sys.acked_offset);
+	*iovec = *desc;
+#ifdef SPS_BAM_STATISTICS
+	pipe->sys.get_iovecs++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update read/ACK offset */
+	pipe->sys.acked_offset += sizeof(struct sps_iovec);
+	if (pipe->sys.acked_offset >= pipe->desc_size)
+		pipe->sys.acked_offset = 0;
+
+	return 0;
+}
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index,
+				u32 *empty)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 end_offset;
+	u32 acked_offset;
+
+	/* Is this a satellite connection? */
+	if ((pipe->state & BAM_STATE_REMOTE)) {
+		SPS_ERR("Is empty on remote: BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(dev->base, pipe_index);
+
+	if ((pipe->state & BAM_STATE_BAM2BAM) == 0)
+		/* System mode */
+		acked_offset = pipe->sys.acked_offset;
+	else
+		/* BAM-to-BAM */
+		acked_offset = bam_pipe_get_desc_write_offset(dev->base,
+							  pipe_index);
+
+
+	/* Determine descriptor FIFO state */
+	if (end_offset == acked_offset)
+		*empty = true;
+	else
+		*empty = false;
+
+	return 0;
+}
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index,
+				 u32 *count)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 next_write;
+	u32 free;
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR("Free count on BAM-to-BAM or remote: BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		*count = 0;
+		return SPS_ERROR;
+	}
+
+	/* Determine descriptor FIFO state */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (pipe->sys.acked_offset >= next_write)
+		free = pipe->sys.acked_offset - next_write;
+	else
+		free = pipe->desc_size - next_write + pipe->sys.acked_offset;
+
+	free /= sizeof(struct sps_iovec);
+	*count = free;
+
+	return 0;
+}
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/*
+	 * Switch to satellite control is only supported on processor
+	 * that controls the BAM global config on multi-EE BAMs
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_MULTI_EE) == 0 ||
+	    (dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		SPS_ERR("Cannot grant satellite control to BAM 0x%x pipe %d",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR("BAM 0x%x pipe %d not local and active",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Disable local interrupts for this pipe */
+	if (!pipe->polled)
+		bam_pipe_set_irq(dev->base, pipe_index, BAM_DISABLE,
+					 pipe->irq_mask, dev->props.ee);
+
+	if (BAM_VERSION_MTI_SUPPORT(dev->version)) {
+		/*
+		 * Set pipe to MTI interrupt mode.
+		 * Must be performed after IRQ disable,
+		 * because it is necessary to re-enable the IRQ to enable
+		 * MTI generation.
+		 * Set both pipe IRQ mask and MTI dest address to zero.
+		 */
+		if ((pipe->state & BAM_STATE_MTI) == 0 || pipe->polled) {
+			bam_pipe_satellite_mti(dev->base, pipe_index, 0,
+						       dev->props.ee);
+			pipe->state |= BAM_STATE_MTI;
+		}
+	}
+
+	/* Indicate satellite control */
+	list_del(&pipe->list);
+	dev->pipe_active_mask &= ~(1UL << pipe_index);
+	dev->pipe_remote_mask |= pipe->pipe_index_mask;
+	pipe->state |= BAM_STATE_REMOTE;
+
+	return 0;
+}
+
+/**
+ * Perform BAM pipe timer control
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev,
+			    u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result)
+{
+	enum bam_pipe_timer_mode mode;
+	int result = 0;
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR("BAM 0x%x pipe %d not local and active",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Perform the timer operation */
+	switch (timer_ctrl->op) {
+	case SPS_TIMER_OP_CONFIG:
+		mode = (timer_ctrl->mode == SPS_TIMER_MODE_ONESHOT) ?
+			BAM_PIPE_TIMER_ONESHOT :
+			BAM_PIPE_TIMER_PERIODIC;
+		bam_pipe_timer_config(dev->base, pipe_index, mode,
+				    timer_ctrl->timeout_msec * 10);
+		break;
+	case SPS_TIMER_OP_RESET:
+		bam_pipe_timer_reset(dev->base, pipe_index);
+		break;
+	case SPS_TIMER_OP_READ:
+		break;
+	default:
+		result = SPS_ERROR;
+		break;
+	}
+
+	/* Provide the current timer value */
+	if (timer_result != NULL)
+		timer_result->current_timer =
+			bam_pipe_timer_get_count(dev->base, pipe_index);
+
+	return result;
+}
+
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
new file mode 100644
index 0000000..f09948e
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -0,0 +1,547 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations for SPS BAM handling.
+ */
+
+
+#ifndef _SPSBAM_H_
+#define _SPSBAM_H_
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "spsi.h"
+
+#define BAM_MAX_PIPES              31
+#define BAM_HANDLE_INVALID         0
+
+enum bam_irq {
+	BAM_DEV_IRQ_RDY_TO_SLEEP = 0x00000001,
+	BAM_DEV_IRQ_HRESP_ERROR = 0x00000002,
+	BAM_DEV_IRQ_ERROR = 0x00000004,
+};
+
+/* Pipe interrupt mask */
+enum bam_pipe_irq {
+	/* BAM finishes descriptor which has INT bit selected */
+	BAM_PIPE_IRQ_DESC_INT = 0x00000001,
+	/* Inactivity timer Expires */
+	BAM_PIPE_IRQ_TIMER = 0x00000002,
+	/* Wakeup peripheral (i.e. USB) */
+	BAM_PIPE_IRQ_WAKE = 0x00000004,
+	/* Producer - no free space for adding a descriptor */
+	/* Consumer - no descriptors for processing */
+	BAM_PIPE_IRQ_OUT_OF_DESC = 0x00000008,
+	/* Pipe Error interrupt */
+	BAM_PIPE_IRQ_ERROR = 0x00000010,
+	/* End-Of-Transfer */
+	BAM_PIPE_IRQ_EOT = 0x00000020,
+};
+
+/* Halt Type */
+enum bam_halt {
+	BAM_HALT_OFF = 0,
+	BAM_HALT_ON = 1,
+};
+
+/* Threshold values of the DMA channels */
+enum bam_dma_thresh_dma {
+	BAM_DMA_THRESH_512 = 0x3,
+	BAM_DMA_THRESH_256 = 0x2,
+	BAM_DMA_THRESH_128 = 0x1,
+	BAM_DMA_THRESH_64 = 0x0,
+};
+
+/* Weight values of the DMA channels */
+enum bam_dma_weight_dma {
+	BAM_DMA_WEIGHT_HIGH = 7,
+	BAM_DMA_WEIGHT_MED = 3,
+	BAM_DMA_WEIGHT_LOW = 1,
+	BAM_DMA_WEIGHT_DEFAULT = BAM_DMA_WEIGHT_LOW,
+	BAM_DMA_WEIGHT_DISABLE = 0,
+};
+
+
+/* Invalid pipe index value */
+#define SPS_BAM_PIPE_INVALID  ((u32)(-1))
+
+/* Parameters for sps_bam_pipe_connect() */
+struct sps_bam_connect_param {
+	/* which end point must be initialized */
+	enum sps_mode mode;
+
+	/* OR'd connection end point options (see SPS_O defines) */
+	u32 options;
+
+	/* SETPEND/MTI interrupt generation parameters */
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+};
+
+/* Event registration struct */
+struct sps_bam_event_reg {
+	/* Client's event object handle */
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+
+	/* Event trigger mode */
+	enum sps_trigger mode;
+
+	/* User pointer that will be provided in event payload data */
+	void *user;
+
+};
+
+/* Descriptor FIFO cache entry */
+struct sps_bam_desc_cache {
+	struct sps_iovec iovec;
+	void *user; /* User pointer registered with this transfer */
+};
+
+/* Forward declaration */
+struct sps_bam;
+
+/* System mode control */
+struct sps_bam_sys_mode {
+	/* Descriptor FIFO control */
+	u8 *desc_buf; /* Descriptor FIFO for BAM pipe */
+	u32 desc_offset; /* Next new descriptor to be written to hardware */
+	u32 acked_offset; /* Next descriptor to be retired by software */
+
+	/* Descriptor cache control (!no_queue only) */
+	u8 *desc_cache; /* Software cache of descriptor FIFO contents */
+	u32 cache_offset; /* Next descriptor to be cached (ack_xfers only) */
+
+	/* User pointers associated with cached descriptors */
+	void **user_ptrs;
+
+	/* Event handling */
+	struct sps_bam_event_reg event_regs[SPS_EVENT_INDEX(SPS_EVENT_MAX)];
+	struct list_head events_q;
+
+	struct sps_q_event event;	/* Temp storage for event creation */
+	int no_queue;	/* Whether events are queued */
+	int ack_xfers;	/* Whether client must ACK all descriptors */
+	int handler_eot; /* Whether EOT handling is in progress (debug) */
+
+	/* Statistics */
+#ifdef SPS_BAM_STATISTICS
+	u32 desc_wr_count;
+	u32 desc_rd_count;
+	u32 user_ptrs_count;
+	u32 user_found;
+	u32 int_flags;
+	u32 eot_flags;
+	u32 callback_events;
+	u32 wait_events;
+	u32 queued_events;
+	u32 get_events;
+	u32 get_iovecs;
+#endif /* SPS_BAM_STATISTICS */
+};
+
+/* BAM pipe descriptor */
+struct sps_pipe {
+	struct list_head list;
+
+	/* Client state */
+	u32 client_state;
+	struct sps_bam *bam;
+	struct sps_connect connect;
+	const struct sps_connection *map;
+
+	/* Pipe parameters */
+	u32 state;
+	u32 pipe_index;
+	u32 pipe_index_mask;
+	u32 irq_mask;
+	int polled;
+	u32 irq_gen_addr;
+	enum sps_mode mode;
+	u32 num_descs; /* Size (number of elements) of descriptor FIFO */
+	u32 desc_size; /* Size (bytes) of descriptor FIFO */
+	int wake_up_is_one_shot; /* Whether WAKEUP event is a one-shot or not */
+
+	/* System mode control */
+	struct sps_bam_sys_mode sys;
+
+};
+
+/* BAM device descriptor */
+struct sps_bam {
+	struct list_head list;
+
+	/* BAM device properties, including connection defaults */
+	struct sps_bam_props props;
+
+	/* BAM device state */
+	u32 state;
+	struct mutex lock;
+	void *base; /* BAM virtual base address */
+	u32 version;
+	spinlock_t isr_lock;
+
+	/* Pipe state */
+	u32 pipe_active_mask;
+	u32 pipe_remote_mask;
+	struct sps_pipe *pipes[BAM_MAX_PIPES];
+	struct list_head pipes_q;
+
+	/* Statistics */
+	u32 irq_from_disabled_pipe;
+	u32 event_trigger_failures;
+
+};
+
+/**
+ * BAM driver initialization
+ *
+ * This function initializes the BAM driver.
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_driver_init(u32 options);
+
+/**
+ * BAM device initialization
+ *
+ * This function initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_init(struct sps_bam *dev);
+
+/**
+ * BAM device de-initialization
+ *
+ * This function de-initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev);
+
+/**
+ * BAM device reset
+ *
+ * This Function resets a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev);
+
+/**
+ * BAM device enable
+ *
+ * This function enables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_enable(struct sps_bam *dev);
+
+/**
+ * BAM device disable
+ *
+ * This Function disables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev);
+
+/**
+ * Allocate a BAM pipe
+ *
+ * This function allocates a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - client-specified pipe index, or SPS_BAM_PIPE_INVALID if
+ *    any available pipe is acceptable
+ *
+ * @return - allocated pipe index, or SPS_BAM_PIPE_INVALID on error
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Free a BAM pipe
+ *
+ * This function frees a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Establish BAM pipe connection
+ *
+ * This function establishes a connection for a BAM pipe (end point).
+ *
+ * @client - pointer to client pipe state struct
+ *
+ * @params - connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *client,
+			const struct sps_bam_connect_param *params);
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ * This function disconnects a connection for a BAM pipe (end point).
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Set BAM pipe parameters
+ *
+ * This function sets parameters for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @options - bitflag options (see SPS_O_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.  Note that this function
+ *    is separate from the pipe connect function to allow proper
+ *    sequencing of consumer enable followed by producer enable.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Register an event for a BAM pipe
+ *
+ * This function registers an event for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @reg - pointer to event registration struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_register_event *reg);
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ * This function submits a transfer of a single buffer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @addr - physical address of buffer to transfer
+ *
+ * @size - number of bytes to transfer
+ *
+ * @user - user pointer to register for event
+ *
+ * @flags - descriptor flags (see SPS_IOVEC_FLAG defines)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev, u32 pipe_index, u32 addr,
+			      u32 size, void *user, u32 flags);
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ * This function submits a transfer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @transfer - pointer to transfer struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev, u32 pipe_index,
+			 struct sps_transfer *transfer);
+
+/**
+ * Get a BAM pipe event
+ *
+ * This function polls for a BAM pipe event.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @notify - pointer to event notification struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_event_notify *notify);
+
+/**
+ * Get processed I/O vector
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ *   This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec);
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ * This function returns the empty state of a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index, u32 *empty);
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ * This function returns the number of free slots in a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index, u32 *count);
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ * This function sets the BAM pipe to satellite ownership.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Perform BAM pipe timer control
+ *
+ * This function performs BAM pipe timer control operations.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *    This argument can be NULL if no result is expected for the operation.
+ *    If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev, u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result);
+#endif	/* _SPSBAM_H_ */
diff --git a/drivers/platform/msm/sps/sps_core.h b/drivers/platform/msm/sps/sps_core.h
new file mode 100644
index 0000000..5bd7c65
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_core.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations.
+ */
+
+#ifndef _SPS_CORE_H_
+#define _SPS_CORE_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+
+#include "spsi.h"
+#include "sps_bam.h"
+
+/* Connection state definitions */
+#define SPS_STATE_DEF(x)   ('S' | ('P' << 8) | ('S' << 16) | ((x) << 24))
+#define IS_SPS_STATE_OK(x) \
+	(((x)->client_state & 0x00ffffff) == SPS_STATE_DEF(0))
+
+/* Configuration indicating satellite connection */
+#define SPS_CONFIG_SATELLITE  0x11111111
+
+/* Client connection state */
+#define SPS_STATE_DISCONNECT  0
+#define SPS_STATE_ALLOCATE    SPS_STATE_DEF(1)
+#define SPS_STATE_CONNECT     SPS_STATE_DEF(2)
+#define SPS_STATE_ENABLE      SPS_STATE_DEF(3)
+#define SPS_STATE_DISABLE     SPS_STATE_DEF(4)
+
+/* Connection mapping control struct */
+struct sps_rm {
+	struct list_head connections_q;
+	struct mutex lock;
+};
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(u32 h);
+
+/**
+ * Initialize resource manager module
+ *
+ * This function initializes the resource manager module.
+ *
+ * @rm - pointer to resource manager struct
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options);
+
+/**
+ * De-initialize resource manager module
+ *
+ * This function de-initializes the resource manager module.
+ *
+ */
+void sps_rm_de_init(void);
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @connect - pointer to client connection state struct
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect);
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to pipe context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state);
+
+#endif				/* _SPS_CORE_H_ */
diff --git a/drivers/platform/msm/sps/sps_dma.c b/drivers/platform/msm/sps/sps_dma.c
new file mode 100644
index 0000000..9f42403
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_dma.c
@@ -0,0 +1,896 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* BAM-DMA Manager. */
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+#include "bam.h"
+#include "sps_bam.h"		/* bam_dma_thresh_dma */
+#include "sps_core.h"		/* sps_h2bam() */
+
+/**
+ * registers
+ */
+
+#define DMA_ENBL			(0x00000000)
+#define DMA_CHNL_CONFIG(n)		(0x00000004 + 4 * (n))
+#define DMA_CONFIG			(0x00000040)
+
+/**
+ * masks
+ */
+
+/* DMA_CHNL_confign */
+#define DMA_CHNL_HALT_DONE		0x10000
+#define DMA_CHNL_HALT			0x1000
+#define DMA_CHNL_ENABLE                 0x100
+#define DMA_CHNL_ACT_THRESH             0x30
+#define DMA_CHNL_WEIGHT                 0x7
+
+/* DMA_CONFIG */
+#define TESTBUS_SELECT                  0x3
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	SPS_DBG("bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg_field(void *base, u32 offset,
+				       const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+	SPS_DBG("bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/* Round max number of pipes to nearest multiple of 2 */
+#define DMA_MAX_PIPES         ((BAM_MAX_PIPES / 2) * 2)
+
+/* Maximum number of BAM-DMAs supported */
+#define MAX_BAM_DMA_DEVICES   1
+
+/* Maximum number of BAMs that will be registered */
+#define MAX_BAM_DMA_BAMS      1
+
+/* Pipe enable check values */
+#define DMA_PIPES_STATE_DIFF     0
+#define DMA_PIPES_BOTH_DISABLED  1
+#define DMA_PIPES_BOTH_ENABLED   2
+
+/* Even pipe is tx/dest/input/write, odd pipe is rx/src/output/read */
+#define DMA_PIPE_IS_DEST(p)   (((p) & 1) == 0)
+#define DMA_PIPE_IS_SRC(p)    (((p) & 1) != 0)
+
+/* BAM DMA pipe state */
+enum bamdma_pipe_state {
+	PIPE_INACTIVE = 0,
+	PIPE_ACTIVE
+};
+
+/* BAM DMA channel state */
+enum bamdma_chan_state {
+	DMA_CHAN_STATE_FREE = 0,
+	DMA_CHAN_STATE_ALLOC_EXT,	/* Client allocation */
+	DMA_CHAN_STATE_ALLOC_INT	/* Internal (resource mgr) allocation */
+};
+
+struct bamdma_chan {
+	/* Allocation state */
+	enum bamdma_chan_state state;
+
+	/* BAM DMA channel configuration parameters */
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/* HWIO channel configuration parameters */
+	enum bam_dma_thresh_dma thresh;
+	enum bam_dma_weight_dma weight;
+
+};
+
+/* BAM DMA device state */
+struct bamdma_device {
+	/* BAM-DMA device state */
+	int enabled;
+	int local;
+
+	/* BAM device state */
+	struct sps_bam *bam;
+
+	/* BAM handle, for deregistration */
+	u32 h;
+
+	/* BAM DMA device virtual mapping */
+	void *virt_addr;
+	int virtual_mapped;
+	u32 phys_addr;
+	void *hwio;
+
+	/* BAM DMA pipe/channel state */
+	u32 num_pipes;
+	enum bamdma_pipe_state pipes[DMA_MAX_PIPES];
+	struct bamdma_chan chans[DMA_MAX_PIPES / 2];
+
+};
+
+/* BAM-DMA devices */
+static struct bamdma_device bam_dma_dev[MAX_BAM_DMA_DEVICES];
+static struct mutex bam_dma_lock;
+
+/*
+ * The BAM DMA module registers all BAMs in the BSP properties, but only
+ * uses the first BAM-DMA device for allocations.  References to the others
+ * are stored in the following data array.
+ */
+static int num_bams;
+static u32 bam_handles[MAX_BAM_DMA_BAMS];
+
+/**
+ * Find BAM-DMA device
+ *
+ * This function finds the BAM-DMA device associated with the BAM handle.
+ *
+ * @h - BAM handle
+ *
+ * @return - pointer to BAM-DMA device, or NULL on error
+ *
+ */
+static struct bamdma_device *sps_dma_find_device(u32 h)
+{
+	return &bam_dma_dev[0];
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function enables a BAM DMA device and the associated BAM.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_enable(struct bamdma_device *dev)
+{
+	if (dev->enabled)
+		return 0;
+
+	/*
+	 *  If the BAM-DMA device is locally controlled then enable BAM-DMA
+	 *  device
+	 */
+	if (dev->local)
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 1);
+
+	/* Enable BAM device */
+	if (sps_bam_enable(dev->bam)) {
+		SPS_ERR("Failed to enable BAM DMA's BAM: %x", dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = true;
+
+	return 0;
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_disable(struct bamdma_device *dev)
+{
+	u32 pipe_index;
+
+	if (!dev->enabled)
+		return 0;
+
+	/* Do not disable if channels active */
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE)
+			break;
+	}
+
+	if (pipe_index < dev->num_pipes) {
+		SPS_ERR("Failed to disable BAM-DMA %x: channels are active",
+			dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = false;
+
+	/* Disable BAM device */
+	if (sps_bam_disable(dev->bam)) {
+		SPS_ERR("Failed to disable BAM-DMA %x BAM", dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	/* Is the BAM-DMA device locally controlled? */
+	if (dev->local)
+		/* Disable BAM-DMA device */
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 0);
+
+	return 0;
+}
+
+/**
+ * Initialize BAM DMA device
+ *
+ */
+int sps_dma_device_init(u32 h)
+{
+	struct bamdma_device *dev;
+	struct sps_bam_props *props;
+	u32 chan;
+	int result = SPS_ERROR;
+
+	mutex_lock(&bam_dma_lock);
+
+	/* Find a free BAM-DMA device slot */
+	dev = NULL;
+	if (bam_dma_dev[0].bam != NULL) {
+		SPS_ERR("BAM-DMA BAM device already initialized.");
+		goto exit_err;
+	} else {
+		dev = &bam_dma_dev[0];
+	}
+
+	/* Record BAM */
+	memset(dev, 0, sizeof(*dev));
+	dev->h = h;
+	dev->bam = sps_h2bam(h);
+
+	/* Map the BAM DMA device into virtual space, if necessary */
+	props = &dev->bam->props;
+	dev->phys_addr = props->periph_phys_addr;
+	if (props->periph_virt_addr != NULL) {
+		dev->virt_addr = props->periph_virt_addr;
+		dev->virtual_mapped = false;
+	} else {
+		if (props->periph_virt_size == 0) {
+			SPS_ERR("Unable to map BAM DMA IO memory: %x %x",
+			 dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+
+		dev->virt_addr = ioremap(dev->phys_addr,
+					  props->periph_virt_size);
+		if (dev->virt_addr == NULL) {
+			SPS_ERR("Unable to map BAM DMA IO memory: %x %x",
+				dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+		dev->virtual_mapped = true;
+	}
+	dev->hwio = (void *) dev->virt_addr;
+
+	/* Is the BAM-DMA device locally controlled? */
+	if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		SPS_DBG("BAM-DMA is controlled locally: %x",
+			dev->phys_addr);
+		dev->local = true;
+	} else {
+		SPS_DBG("BAM-DMA is controlled remotely: %x",
+			dev->phys_addr);
+		dev->local = false;
+	}
+
+	/*
+	 * Enable the BAM DMA and determine the number of pipes/channels.
+	 * Leave the BAM-DMA enabled, since it is always a shared device.
+	 */
+	if (sps_dma_device_enable(dev))
+		goto exit_err;
+
+	dev->num_pipes = dev->bam->props.num_pipes;
+
+	/* Disable all channels */
+	if (dev->local)
+		for (chan = 0; chan < (dev->num_pipes / 2); chan++) {
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(chan),
+					    DMA_CHNL_ENABLE, 0);
+		}
+
+	result = 0;
+exit_err:
+	if (result) {
+		if (dev != NULL) {
+			if (dev->virtual_mapped)
+				iounmap(dev->virt_addr);
+
+			dev->bam = NULL;
+		}
+	}
+
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * De-initialize BAM DMA device
+ *
+ */
+int sps_dma_device_de_init(u32 h)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	u32 chan;
+	int result = 0;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(h);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: not registered: %x", h);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Check for channel leaks */
+	for (chan = 0; chan < dev->num_pipes / 2; chan++) {
+		if (dev->chans[chan].state != DMA_CHAN_STATE_FREE) {
+			SPS_ERR("BAM-DMA: channel not free: %d", chan);
+			result = SPS_ERROR;
+			dev->chans[chan].state = DMA_CHAN_STATE_FREE;
+		}
+	}
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+			SPS_ERR("BAM-DMA: pipe not inactive: %d", pipe_index);
+			result = SPS_ERROR;
+			dev->pipes[pipe_index] = PIPE_INACTIVE;
+		}
+	}
+
+	/* Disable BAM and BAM-DMA */
+	if (sps_dma_device_disable(dev))
+		result = SPS_ERROR;
+
+	dev->h = BAM_HANDLE_INVALID;
+	dev->bam = NULL;
+	if (dev->virtual_mapped)
+		iounmap(dev->virt_addr);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Initialize BAM DMA module
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props)
+{
+	struct sps_bam_props props;
+	const struct sps_bam_props *bam_reg;
+	u32 h;
+
+	/* Init local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+
+	/* Create a mutex to control access to the BAM-DMA devices */
+	mutex_init(&bam_dma_lock);
+
+	/* Are there any BAM DMA devices? */
+	if (bam_props == NULL)
+		return 0;
+
+	/*
+	 * Registers all BAMs in the BSP properties, but only uses the first
+	 * BAM-DMA device for allocations.
+	 */
+	if (bam_props->phys_addr) {
+		/* Force multi-EE option for all BAM-DMAs */
+		bam_reg = bam_props;
+		if ((bam_props->options & SPS_BAM_OPT_BAMDMA) &&
+		    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) {
+			SPS_DBG("Setting multi-EE options for BAM-DMA: %x",
+				bam_props->phys_addr);
+			props = *bam_props;
+			props.manage |= SPS_BAM_MGR_MULTI_EE;
+			bam_reg = &props;
+		}
+
+		/* Register the BAM */
+		if (sps_register_bam_device(bam_reg, &h)) {
+			SPS_ERR("Failed to register BAM-DMA BAM device: "
+				"phys 0x%0x", bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+
+		/* Record the BAM so that it may be deregistered later */
+		if (num_bams < MAX_BAM_DMA_BAMS) {
+			bam_handles[num_bams] = h;
+			num_bams++;
+		} else {
+			SPS_ERR("BAM-DMA: BAM limit exceeded: %d", num_bams);
+			return SPS_ERROR;
+		}
+	} else {
+		SPS_ERR("BAM-DMA phys_addr is zero.");
+		return SPS_ERROR;
+	}
+
+
+	return 0;
+}
+
+/**
+ * De-initialize BAM DMA module
+ *
+ */
+void sps_dma_de_init(void)
+{
+	int n;
+
+	/* De-initialize the BAM devices */
+	for (n = 0; n < num_bams; n++)
+		sps_deregister_bam_device(bam_handles[n]);
+
+	/* Clear local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+}
+
+/**
+ * Allocate a BAM DMA channel
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan_info)
+{
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 pipe_index;
+	enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0;
+	enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0;
+	int result = SPS_ERROR;
+
+	if (alloc == NULL || chan_info == NULL) {
+		SPS_ERR("sps_alloc_dma_chan. invalid parameters");
+		return SPS_ERROR;
+	}
+
+	/* Translate threshold and priority to hwio values */
+	if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) {
+		if (alloc->threshold >= 512)
+			thresh = BAM_DMA_THRESH_512;
+		else if (alloc->threshold >= 256)
+			thresh = BAM_DMA_THRESH_256;
+		else if (alloc->threshold >= 128)
+			thresh = BAM_DMA_THRESH_128;
+		else
+			thresh = BAM_DMA_THRESH_64;
+	}
+
+	weight = alloc->priority;
+
+	if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) {
+		SPS_ERR("BAM-DMA: invalid priority: %x", alloc->priority);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(alloc->dev);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: invalid BAM handle: %x", alloc->dev);
+		goto exit_err;
+	}
+
+	/* Search for a free set of pipes */
+	for (pipe_index = 0, chan = dev->chans;
+	      pipe_index < dev->num_pipes; pipe_index += 2, chan++) {
+		if (chan->state == DMA_CHAN_STATE_FREE) {
+			/* Just check pipes for safety */
+			if (dev->pipes[pipe_index] != PIPE_INACTIVE ||
+			    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+				SPS_ERR("BAM-DMA: channel %d state error:%d %d",
+					pipe_index / 2, dev->pipes[pipe_index],
+				 dev->pipes[pipe_index + 1]);
+				goto exit_err;
+			}
+			break; /* Found free pipe */
+		}
+	}
+
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR("BAM-DMA: no free channel. num_pipes = %d",
+			dev->num_pipes);
+		goto exit_err;
+	}
+
+	chan->state = DMA_CHAN_STATE_ALLOC_EXT;
+
+	/* Store config values for use when pipes are activated */
+	chan = &dev->chans[pipe_index / 2];
+	chan->threshold = alloc->threshold;
+	chan->thresh = thresh;
+	chan->priority = alloc->priority;
+	chan->weight = weight;
+
+	SPS_DBG("sps_alloc_dma_chan. pipe %d.\n", pipe_index);
+
+	/* Report allocated pipes to client */
+	chan_info->dev = dev->h;
+	/* Dest/input/write pipex */
+	chan_info->dest_pipe_index = pipe_index;
+	/* Source/output/read pipe */
+	chan_info->src_pipe_index = pipe_index + 1;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_alloc_dma_chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	int result = 0;
+
+	if (chan == NULL) {
+		SPS_ERR("sps_free_dma_chan. chan is NULL");
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(chan->dev);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: invalid BAM handle: %x", chan->dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Verify the pipe indices */
+	pipe_index = chan->dest_pipe_index;
+	if (pipe_index >= dev->num_pipes || ((pipe_index & 1)) ||
+	    (pipe_index + 1) != chan->src_pipe_index) {
+		SPS_ERR("sps_free_dma_chan. Invalid pipe indices");
+		SPS_DBG("num_pipes=%d.dest=%d.src=%d.",
+			dev->num_pipes,
+			chan->dest_pipe_index,
+			chan->src_pipe_index);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Are both pipes inactive? */
+	if (dev->chans[pipe_index / 2].state != DMA_CHAN_STATE_ALLOC_EXT ||
+	    dev->pipes[pipe_index] != PIPE_INACTIVE ||
+	    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+		SPS_ERR("BAM-DMA: attempt to free active chan %d: %d %d",
+			pipe_index / 2, dev->pipes[pipe_index],
+			dev->pipes[pipe_index + 1]);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Free the channel */
+	dev->chans[pipe_index / 2].state = DMA_CHAN_STATE_FREE;
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_free_dma_chan);
+
+/**
+ * Activate a BAM DMA pipe
+ *
+ * This function activates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static u32 sps_dma_check_pipes(struct bamdma_device *dev, u32 pipe_index)
+{
+	u32 pipe_in;
+	u32 pipe_out;
+	int enabled_in;
+	int enabled_out;
+	u32 check;
+
+	pipe_in = pipe_index & ~1;
+	pipe_out = pipe_in + 1;
+	enabled_in = bam_pipe_is_enabled(dev->bam->base, pipe_in);
+	enabled_out = bam_pipe_is_enabled(dev->bam->base, pipe_out);
+
+	if (!enabled_in && !enabled_out)
+		check = DMA_PIPES_BOTH_DISABLED;
+	else if (enabled_in && enabled_out)
+		check = DMA_PIPES_BOTH_ENABLED;
+	else
+		check = DMA_PIPES_STATE_DIFF;
+
+	return check;
+}
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_alloc(void *bam_arg, u32 pipe_index, enum sps_mode dir)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	if (bam == NULL) {
+		SPS_ERR("BAM context is NULL");
+		return SPS_ERROR;
+	}
+
+	/* Check pipe direction */
+	if ((DMA_PIPE_IS_DEST(pipe_index) && dir != SPS_MODE_DEST) ||
+	    (DMA_PIPE_IS_SRC(pipe_index) && dir != SPS_MODE_SRC)) {
+		SPS_ERR("BAM-DMA: wrong direction for BAM %x pipe %d",
+			bam->props.phys_addr, pipe_index);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((u32) bam);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: invalid BAM: %x",
+			bam->props.phys_addr);
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR("BAM-DMA: BAM %x invalid pipe: %d",
+			bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+		SPS_ERR("BAM-DMA: BAM %x pipe %d already active",
+			bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+	/* Mark pipe active */
+	dev->pipes[pipe_index] = PIPE_ACTIVE;
+
+	/* If channel is not allocated, make an internal allocation */
+	channel = pipe_index / 2;
+	chan = &dev->chans[channel];
+	if (chan->state != DMA_CHAN_STATE_ALLOC_EXT &&
+	    chan->state != DMA_CHAN_STATE_ALLOC_INT) {
+		chan->state = DMA_CHAN_STATE_ALLOC_INT;
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_enable(void *bam_arg, u32 pipe_index)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	SPS_DBG("sps_dma_pipe_enable.pipe %d", pipe_index);
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((u32) bam);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: invalid BAM");
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR("BAM-DMA: BAM %x invalid pipe: %d",
+			bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE) {
+		SPS_ERR("BAM-DMA: BAM %x pipe %d not active",
+			bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+      /*
+       * The channel must be enabled when the dest/input/write pipe
+       * is enabled
+       */
+	if (DMA_PIPE_IS_DEST(pipe_index)) {
+		/* Configure and enable the channel */
+		channel = pipe_index / 2;
+		chan = &dev->chans[channel];
+
+		if (chan->threshold != SPS_DMA_THRESHOLD_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_ACT_THRESH,
+					    chan->thresh);
+
+		if (chan->priority != SPS_DMA_PRI_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_WEIGHT,
+					    chan->weight);
+
+		dma_write_reg_field(dev->virt_addr,
+				    DMA_CHNL_CONFIG(channel),
+				    DMA_CHNL_ENABLE, 1);
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Deactivate a BAM DMA pipe
+ *
+ * This function deactivates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev,
+					  struct sps_bam *bam,
+					  u32 pipe_index)
+{
+	u32 channel;
+
+	if (dev->bam != bam)
+		return SPS_ERROR;
+	if (pipe_index >= dev->num_pipes)
+		return SPS_ERROR;
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE)
+		return SPS_ERROR;	/* Pipe is not active */
+
+	SPS_DBG("BAM-DMA: deactivate pipe %d", pipe_index);
+
+	/* Mark pipe inactive */
+	dev->pipes[pipe_index] = PIPE_INACTIVE;
+
+	/*
+	 * Channel must be reset when either pipe is disabled, so just always
+	 * reset regardless of other pipe's state
+	 */
+	channel = pipe_index / 2;
+	dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel),
+			    DMA_CHNL_ENABLE, 0);
+
+	/* If the peer pipe is also inactive, reset the channel */
+	if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) {
+		/* Free channel if allocated internally */
+		if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT)
+			dev->chans[channel].state = DMA_CHAN_STATE_FREE;
+	}
+
+	return 0;
+}
+
+/**
+ * Free a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_free(void *bam_arg, u32 pipe_index)
+{
+	struct bamdma_device *dev;
+	struct sps_bam *bam = bam_arg;
+	int result;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((u32) bam);
+	if (dev == NULL) {
+		SPS_ERR("BAM-DMA: invalid BAM");
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	result = sps_dma_deactivate_pipe_atomic(dev, bam, pipe_index);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return bam handle on success, zero on error
+ */
+u32 sps_dma_get_bam_handle(void)
+{
+	return (u32) bam_dma_dev[0].bam;
+}
+EXPORT_SYMBOL(sps_dma_get_bam_handle);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(u32 h)
+{
+}
+EXPORT_SYMBOL(sps_dma_free_bam_handle);
+
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
diff --git a/drivers/platform/msm/sps/sps_map.c b/drivers/platform/msm/sps/sps_map.c
new file mode 100644
index 0000000..16d5065
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_map.c
@@ -0,0 +1,137 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Connection mapping table managment for SPS device driver.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+
+/* Module state */
+struct sps_map_state {
+	const struct sps_map *maps;
+	u32 num_maps;
+	u32 options;
+};
+
+static struct sps_map_state sps_maps;
+
+/**
+ * Initialize connection mapping module
+ *
+ */
+int sps_map_init(const struct sps_map *map_props, u32 options)
+{
+	const struct sps_map *maps;
+
+	/* Are there any connection mappings? */
+	memset(&sps_maps, 0, sizeof(sps_maps));
+	if (map_props == NULL)
+		return 0;
+
+	/* Init the module state */
+	sps_maps.maps = map_props;
+	sps_maps.options = options;
+	for (maps = sps_maps.maps;; maps++, sps_maps.num_maps++)
+		if (maps->src.periph_class == SPS_CLASS_INVALID &&
+		    maps->src.periph_phy_addr == SPS_ADDR_INVALID)
+			break;
+
+	SPS_DBG("SPS driver: %d mappings", sps_maps.num_maps);
+
+	return 0;
+}
+
+/**
+ * De-initialize connection mapping module
+ *
+ */
+void sps_map_de_init(void)
+{
+	memset(&sps_maps, 0, sizeof(sps_maps));
+}
+
+/**
+ * Find matching connection mapping
+ *
+ */
+int sps_map_find(struct sps_connect *connect)
+{
+	const struct sps_map *map;
+	u32 i;
+	void *desc;
+	void *data;
+
+	/* Are there any connection mappings? */
+	if (sps_maps.num_maps == 0)
+		return SPS_ERROR;
+
+	/* Search the mapping table for a match to the specified connection */
+	for (i = sps_maps.num_maps, map = sps_maps.maps;
+	    i > 0; i--, map++)
+		if (map->src.periph_class == (u32) connect->source &&
+		    map->dest.periph_class == (u32) connect->destination
+		    && map->config == (u32) connect->config)
+			break;
+
+	if (i == 0)
+		return SPS_ERROR;
+
+	/*
+	 * Before modifying client parameter struct, perform all
+	 * operations that might fail
+	 */
+	desc = spsi_get_mem_ptr(map->desc_base);
+	if (desc == NULL) {
+		SPS_ERR("Cannot get virt addr for I/O buffer: 0x%x",
+			map->desc_base);
+		return SPS_ERROR;
+	}
+
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		data = spsi_get_mem_ptr(map->data_base);
+		if (data == NULL) {
+			SPS_ERR("Cannot get virt addr for I/O buffer: 0x%x",
+				map->data_base);
+			return SPS_ERROR;
+		}
+	} else {
+		data = NULL;
+	}
+
+	/* Copy mapping values to client parameter struct */
+	if (connect->source != SPS_DEV_HANDLE_MEM)
+		connect->src_pipe_index = map->src.pipe_index;
+
+	if (connect->destination != SPS_DEV_HANDLE_MEM)
+		connect->dest_pipe_index = map->dest.pipe_index;
+
+	if (connect->mode == SPS_MODE_SRC)
+		connect->event_thresh = map->src.event_thresh;
+	else
+		connect->event_thresh = map->dest.event_thresh;
+
+	connect->desc.size = map->desc_size;
+	connect->desc.phys_base = map->desc_base;
+	connect->desc.base = desc;
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		connect->data.size = map->data_size;
+		connect->data.phys_base = map->data_base;
+		connect->data.base = data;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/sps/sps_map.h b/drivers/platform/msm/sps/sps_map.h
new file mode 100644
index 0000000..692e47c
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_map.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* SPS driver mapping table data declarations. */
+
+
+#ifndef _SPS_MAP_H_
+#define _SPS_MAP_H_
+
+#include <linux/types.h>	/* u32 */
+
+/* End point parameters */
+struct sps_map_end_point {
+	u32 periph_class;	/* Peripheral device enumeration class */
+	u32 periph_phy_addr;	/* Peripheral base address */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_thresh;	/* Pipe event threshold */
+};
+
+/* Mapping connection descriptor */
+struct sps_map {
+	/* Source end point parameters */
+	struct sps_map_end_point src;
+
+	/* Destination end point parameters */
+	struct sps_map_end_point dest;
+
+	/* Resource parameters */
+	u32 config;	 /* Configuration (stream) identifier */
+	u32 desc_base;	 /* Physical address of descriptor FIFO */
+	u32 desc_size;	 /* Size (bytes) of descriptor FIFO */
+	u32 data_base;	 /* Physical address of data FIFO */
+	u32 data_size;	 /* Size (bytes) of data FIFO */
+
+};
+
+#endif /* _SPS_MAP_H_ */
diff --git a/drivers/platform/msm/sps/sps_mem.c b/drivers/platform/msm/sps/sps_mem.c
new file mode 100644
index 0000000..3aee4ba
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_mem.c
@@ -0,0 +1,156 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Pipe-Memory allocation/free management.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/errno.h>	/* ENOMEM */
+
+#include "sps_bam.h"
+#include "spsi.h"
+
+static u32 iomem_phys;
+static void *iomem_virt;
+static u32 iomem_size;
+static u32 iomem_offset;
+static struct gen_pool *pool;
+static u32 nid = 0xaa;
+
+/* Debug */
+static u32 total_alloc;
+static u32 total_free;
+
+/**
+ * Translate physical to virtual address
+ *
+ */
+void *spsi_get_mem_ptr(u32 phys_addr)
+{
+	void *virt = NULL;
+
+	if ((phys_addr >= iomem_phys) &&
+	    (phys_addr < (iomem_phys + iomem_size))) {
+		virt = (u8 *) iomem_virt + (phys_addr - iomem_phys);
+	} else {
+		virt = phys_to_virt(phys_addr);
+		SPS_ERR("sps:spsi_get_mem_ptr.invalid phys addr=0x%x.",
+			phys_addr);
+	}
+	return virt;
+}
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ */
+u32 sps_mem_alloc_io(u32 bytes)
+{
+	u32 phys_addr = SPS_ADDR_INVALID;
+	u32 virt_addr = 0;
+
+	virt_addr = gen_pool_alloc(pool, bytes);
+	if (virt_addr) {
+		iomem_offset = virt_addr - (u32) iomem_virt;
+		phys_addr = iomem_phys + iomem_offset;
+		total_alloc += bytes;
+	} else {
+		SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes);
+		return SPS_ADDR_INVALID;
+	}
+
+	SPS_DBG("sps:sps_mem_alloc_io.phys=0x%x.virt=0x%x.size=0x%x.",
+		phys_addr, virt_addr, bytes);
+
+	return phys_addr;
+}
+
+/**
+ * Free I/O memory
+ *
+ */
+void sps_mem_free_io(u32 phys_addr, u32 bytes)
+{
+	u32 virt_addr = 0;
+
+	iomem_offset = phys_addr - iomem_phys;
+	virt_addr = (u32) iomem_virt + iomem_offset;
+
+	SPS_DBG("sps:sps_mem_free_io.phys=0x%x.virt=0x%x.size=0x%x.",
+		phys_addr, virt_addr, bytes);
+
+	gen_pool_free(pool, virt_addr, bytes);
+	total_free += bytes;
+}
+
+/**
+ * Initialize driver memory module
+ *
+ */
+int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
+{
+	int res;
+	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
+	int min_alloc_order = 8;
+
+	iomem_phys = pipemem_phys_base;
+	iomem_size = pipemem_size;
+
+	if (iomem_phys == 0) {
+		SPS_ERR("sps:Invalid Pipe-Mem address");
+		return SPS_ERROR;
+	} else {
+		iomem_virt = ioremap(iomem_phys, iomem_size);
+		if (!iomem_virt) {
+			SPS_ERR("sps:Failed to IO map pipe memory.\n");
+			return -ENOMEM;
+		}
+	}
+
+	iomem_offset = 0;
+	SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
+		iomem_phys, (u32) iomem_virt);
+
+	pool = gen_pool_create(min_alloc_order, nid);
+	res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
+	if (res)
+		return res;
+
+	return 0;
+}
+
+/**
+ * De-initialize driver memory module
+ *
+ */
+int sps_mem_de_init(void)
+{
+	if (iomem_virt != NULL) {
+		gen_pool_destroy(pool);
+		pool = NULL;
+		iounmap(iomem_virt);
+		iomem_virt = NULL;
+	}
+
+	if (total_alloc == total_free)
+		return 0;
+	else {
+		SPS_ERR("sps:sps_mem_de_init:some memory not free");
+		return SPS_ERROR;
+	}
+}
diff --git a/drivers/platform/msm/sps/sps_rm.c b/drivers/platform/msm/sps/sps_rm.c
new file mode 100644
index 0000000..ac1f4d1
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_rm.c
@@ -0,0 +1,807 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Resource management for the SPS device driver. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+#include "sps_core.h"
+
+/* "Clear" value for the connection parameter struct */
+#define SPSRM_CLEAR     0xcccccccc
+
+/* Max BAM FIFO sizes */
+#define SPSRM_MAX_DESC_FIFO_SIZE    0xffff
+#define SPSRM_MAX_DATA_FIFO_SIZE    0xffff
+
+/* Connection control struct pointer */
+static struct sps_rm *sps_rm;
+
+/**
+ * Initialize resource manager module
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options)
+{
+	/* Set the resource manager state struct pointer */
+	sps_rm = rm;
+
+	/* Initialize the state struct */
+	INIT_LIST_HEAD(&sps_rm->connections_q);
+	mutex_init(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Initialize client state context
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect)
+{
+	memset(connect, SPSRM_CLEAR, sizeof(*connect));
+}
+
+/**
+ * Remove reference to connection mapping
+ *
+ * This function removes a reference from a connection mapping struct.
+ *
+ * @map - pointer to connection mapping struct
+ *
+ */
+static void sps_rm_remove_ref(struct sps_connection *map)
+{
+	/* Free this connection */
+	map->refs--;
+	if (map->refs <= 0) {
+		if (map->client_src != NULL || map->client_dest != NULL)
+			SPS_ERR("Failed to allocate connection struct");
+
+		list_del(&map->list);
+		kfree(map);
+	}
+}
+
+/**
+ * Compare map to connect parameters
+ *
+ * This function compares client connect parameters to an allocated
+ * connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - true if match, false otherwise
+ *
+ */
+static int sps_rm_map_match(const struct sps_connect *cfg,
+			    const struct sps_connection *map)
+{
+	if (cfg->source != map->src.dev ||
+	    cfg->destination != map->dest.dev)
+		return false;
+
+	if (cfg->src_pipe_index != SPSRM_CLEAR &&
+	    cfg->src_pipe_index != map->src.pipe_index)
+		return false;
+
+	if (cfg->dest_pipe_index != SPSRM_CLEAR &&
+	    cfg->dest_pipe_index != map->dest.pipe_index)
+		return false;
+
+	if (cfg->config != map->config)
+		return false;
+
+	if (cfg->desc.size != SPSRM_CLEAR) {
+		if (cfg->desc.size != map->desc.size)
+			return false;
+
+		if (cfg->desc.phys_base != SPSRM_CLEAR &&
+		    cfg->desc.base != (void *)SPSRM_CLEAR &&
+		    (cfg->desc.phys_base != map->desc.phys_base ||
+		     cfg->desc.base != map->desc.base)) {
+			return false;
+		}
+	}
+
+	if (cfg->data.size != SPSRM_CLEAR) {
+		if (cfg->data.size != map->data.size)
+			return false;
+
+		if (cfg->data.phys_base != SPSRM_CLEAR &&
+		    cfg->data.base != (void *)SPSRM_CLEAR &&
+		    (cfg->data.phys_base != map->data.phys_base ||
+		     cfg->data.base != map->data.base))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * Find unconnected mapping
+ *
+ * This function finds an allocated a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL if not found
+ *
+ */
+static struct sps_connection *find_unconnected(struct sps_pipe *pipe)
+{
+	struct sps_connect *cfg = &pipe->connect;
+	struct sps_connection *map;
+
+	/* Has this connection already been allocated? */
+	list_for_each_entry(map, &sps_rm->connections_q, list) {
+		if (sps_rm_map_match(cfg, map))
+			if ((cfg->mode == SPS_MODE_SRC
+			     && map->client_src == NULL)
+			    || (cfg->mode != SPS_MODE_SRC
+				&& map->client_dest == NULL))
+				return map;	/* Found */
+	}
+
+	return NULL;		/* Not Found */
+}
+
+/**
+ * Assign connection to client
+ *
+ * This function assigns a connection to a client.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @map - connection mapping
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_assign(struct sps_pipe *pipe,
+			 struct sps_connection *map)
+{
+	struct sps_connect *cfg = &pipe->connect;
+
+	/* Check ownership and BAM */
+	if ((cfg->mode == SPS_MODE_SRC && map->client_src != NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->client_dest != NULL))
+		/* The end point is already connected */
+		return SPS_ERROR;
+
+	/* Check whether this end point is a BAM (not memory) */
+	if ((cfg->mode == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->dest.bam == NULL))
+		return SPS_ERROR;
+
+	/* Record the connection assignment */
+	if (cfg->mode == SPS_MODE_SRC) {
+		map->client_src = pipe;
+		pipe->bam = map->src.bam;
+		pipe->pipe_index = map->src.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->src.event_threshold = pipe->connect.event_thresh;
+	} else {
+		map->client_dest = pipe;
+		pipe->bam = map->dest.bam;
+		pipe->pipe_index = map->dest.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->dest.event_threshold =
+			pipe->connect.event_thresh;
+	}
+	pipe->map = map;
+
+	SPS_DBG("sps:sps_rm_assign.bam 0x%x.pipe_index=%d\n",
+			BAM_ID(pipe->bam), pipe->pipe_index);
+
+	/* Copy parameters to client connect state */
+	pipe->connect.src_pipe_index = map->src.pipe_index;
+	pipe->connect.dest_pipe_index = map->dest.pipe_index;
+	pipe->connect.desc = map->desc;
+	pipe->connect.data = map->data;
+
+	pipe->client_state = SPS_STATE_ALLOCATE;
+
+	return 0;
+}
+
+/**
+ * Free connection mapping resources
+ *
+ * This function frees a connection mapping resources.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ */
+static void sps_rm_free_map_rsrc(struct sps_connection *map)
+{
+	struct sps_bam *bam;
+
+	if (map->client_src != NULL || map->client_dest != NULL)
+		return;
+
+	if (map->alloc_src_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->src.bam;
+		sps_bam_pipe_free(bam, map->src.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA))
+			/* Deallocate and free the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->src.pipe_index);
+#endif
+		map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_dest_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->dest.bam;
+		sps_bam_pipe_free(bam, map->dest.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Deallocate the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->dest.pipe_index);
+		}
+#endif
+		map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_desc_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_desc_base, map->desc.size);
+
+		map->alloc_desc_base = SPS_ADDR_INVALID;
+		map->desc.phys_base = SPS_ADDR_INVALID;
+	}
+	if (map->alloc_data_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_data_base, map->data.size);
+
+		map->alloc_data_base = SPS_ADDR_INVALID;
+		map->data.phys_base = SPS_ADDR_INVALID;
+	}
+}
+
+/**
+ * Init connection mapping from client connect
+ *
+ * This function initializes a connection mapping from the client's
+ * connect parameters.
+ *
+ * @map - connection mapping struct
+ *
+ * @cfg - client connect parameters
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static void sps_rm_init_map(struct sps_connection *map,
+			    const struct sps_connect *cfg)
+{
+	/* Clear the connection mapping struct */
+	memset(map, 0, sizeof(*map));
+	map->desc.phys_base = SPS_ADDR_INVALID;
+	map->data.phys_base = SPS_ADDR_INVALID;
+	map->alloc_desc_base = SPS_ADDR_INVALID;
+	map->alloc_data_base = SPS_ADDR_INVALID;
+	map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+	map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+
+	/* Copy client required parameters */
+	map->src.dev = cfg->source;
+	map->dest.dev = cfg->destination;
+	map->desc.size = cfg->desc.size;
+	map->data.size = cfg->data.size;
+	map->config = cfg->config;
+
+	/* Did client specify descriptor FIFO? */
+	if (map->desc.size != SPSRM_CLEAR &&
+	    cfg->desc.phys_base != SPSRM_CLEAR &&
+	    cfg->desc.base != (void *)SPSRM_CLEAR)
+		map->desc = cfg->desc;
+
+	/* Did client specify data FIFO? */
+	if (map->data.size != SPSRM_CLEAR &&
+	    cfg->data.phys_base != SPSRM_CLEAR &&
+	    cfg->data.base != (void *)SPSRM_CLEAR)
+		map->data = cfg->data;
+
+	/* Did client specify source pipe? */
+	if (cfg->src_pipe_index != SPSRM_CLEAR)
+		map->src.pipe_index = cfg->src_pipe_index;
+	else
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+
+
+	/* Did client specify destination pipe? */
+	if (cfg->dest_pipe_index != SPSRM_CLEAR)
+		map->dest.pipe_index = cfg->dest_pipe_index;
+	else
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+}
+
+/**
+ * Create a new connection mapping
+ *
+ * This function creates a new connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	struct sps_bam *bam;
+	u32 desc_size;
+	u32 data_size;
+	enum sps_mode dir;
+	int success = false;
+
+	/* Allocate new connection */
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL) {
+		SPS_ERR("Failed to allocate connection struct");
+		return NULL;
+	}
+
+	/* Initialize connection struct */
+	sps_rm_init_map(map, &pipe->connect);
+	dir = pipe->connect.mode;
+
+	/* Use a do/while() loop to avoid a "goto" */
+	success = false;
+	/* Get BAMs */
+	map->src.bam = sps_h2bam(map->src.dev);
+	if (map->src.bam == NULL) {
+		if (map->src.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR("Invalid BAM handle: 0x%x", map->src.dev);
+			goto exit_err;
+		}
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	map->dest.bam = sps_h2bam(map->dest.dev);
+	if (map->dest.bam == NULL) {
+		if (map->dest.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR("Invalid BAM handle: 0x%x", map->dest.dev);
+			goto exit_err;
+		}
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+
+	/* Check the BAM device for the pipe */
+	if ((dir == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (dir != SPS_MODE_SRC && map->dest.bam == NULL)) {
+		SPS_ERR("Invalid BAM endpt: dir %d src 0x%x dest 0x%x",
+			dir, map->src.dev, map->dest.dev);
+		goto exit_err;
+	}
+
+	/* Allocate pipes and copy BAM parameters */
+	if (map->src.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->src.bam;
+		map->alloc_src_pipe = sps_bam_pipe_alloc(bam,
+							map->src.pipe_index);
+		if (map->alloc_src_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+		map->src.pipe_index = map->alloc_src_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->src.pipe_index,
+						 SPS_MODE_SRC);
+			if (rc) {
+				SPS_ERR("Failed to alloc BAM-DMA pipe: %d",
+					map->src.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->src.bam_phys = bam->props.phys_addr;
+		map->src.event_threshold = bam->props.event_threshold;
+	}
+	if (map->dest.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->dest.bam;
+		map->alloc_dest_pipe = sps_bam_pipe_alloc(bam,
+							 map->dest.pipe_index);
+		if (map->alloc_dest_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+
+		map->dest.pipe_index = map->alloc_dest_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->dest.pipe_index,
+					       SPS_MODE_DEST);
+			if (rc) {
+				SPS_ERR("Failed to alloc BAM-DMA pipe: %d",
+					map->dest.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->dest.bam_phys = bam->props.phys_addr;
+		map->dest.event_threshold =
+		bam->props.event_threshold;
+	}
+
+	/* Get default FIFO sizes */
+	desc_size = 0;
+	data_size = 0;
+	if (map->src.bam != NULL) {
+		bam = map->src.bam;
+		desc_size = bam->props.desc_size;
+		data_size = bam->props.data_size;
+	}
+	if (map->dest.bam != NULL) {
+		bam = map->dest.bam;
+		if (bam->props.desc_size > desc_size)
+			desc_size = bam->props.desc_size;
+		if (bam->props.data_size > data_size)
+			data_size = bam->props.data_size;
+	}
+
+	/* Set FIFO sizes */
+	if (map->desc.size == SPSRM_CLEAR)
+		map->desc.size = desc_size;
+	if (map->src.bam != NULL && map->dest.bam != NULL) {
+		/* BAM-to-BAM requires data FIFO */
+		if (map->data.size == SPSRM_CLEAR)
+			map->data.size = data_size;
+	} else {
+		map->data.size = 0;
+	}
+	if (map->desc.size > SPSRM_MAX_DESC_FIFO_SIZE) {
+		SPS_ERR("Invalid desc FIFO size: 0x%x",	map->desc.size);
+		goto exit_err;
+	}
+	if (map->src.bam != NULL && map->dest.bam != NULL &&
+	    map->data.size > SPSRM_MAX_DATA_FIFO_SIZE) {
+		SPS_ERR("Invalid data FIFO size: 0x%x",	map->data.size);
+		goto exit_err;
+	}
+
+	/* Allocate descriptor FIFO if necessary */
+	if (map->desc.size && map->desc.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_desc_base = sps_mem_alloc_io(map->desc.size);
+		if (map->alloc_desc_base == SPS_ADDR_INVALID) {
+			SPS_ERR("I/O memory allocation failure: 0x%x",
+				map->desc.size);
+			goto exit_err;
+		}
+		map->desc.phys_base = map->alloc_desc_base;
+		map->desc.base = spsi_get_mem_ptr(map->desc.phys_base);
+		if (map->desc.base == NULL) {
+			SPS_ERR("Cannot get virt addr for I/O buffer: 0x%x",
+				map->desc.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Allocate data FIFO if necessary */
+	if (map->data.size && map->data.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_data_base = sps_mem_alloc_io(map->data.size);
+		if (map->alloc_data_base == SPS_ADDR_INVALID) {
+			SPS_ERR("I/O memory allocation failure: 0x%x",
+				map->data.size);
+			goto exit_err;
+		}
+		map->data.phys_base = map->alloc_data_base;
+		map->data.base = spsi_get_mem_ptr(map->data.phys_base);
+		if (map->data.base == NULL) {
+			SPS_ERR("Cannot get virt addr for I/O buffer: 0x%x",
+				map->data.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Attempt to assign this connection to the client */
+	if (sps_rm_assign(pipe, map))
+		goto exit_err;
+
+	/* Initialization was successful */
+	success = true;
+exit_err:
+
+	/* If initialization failed, free resources */
+	if (!success) {
+		sps_rm_free_map_rsrc(map);
+		kfree(map);
+		return NULL;
+	}
+
+	return map;
+}
+
+/**
+ * Free connection mapping
+ *
+ * This function frees a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_free(struct sps_pipe *pipe)
+{
+	struct sps_connection *map = (void *)pipe->map;
+	struct sps_connect *cfg = &pipe->connect;
+
+	mutex_lock(&sps_rm->lock);
+
+	/* Free this connection */
+	if (cfg->mode == SPS_MODE_SRC)
+		map->client_src = NULL;
+	else
+		map->client_dest = NULL;
+
+	pipe->map = NULL;
+	pipe->client_state = SPS_STATE_DISCONNECT;
+	sps_rm_free_map_rsrc(map);
+
+	sps_rm_remove_ref(map);
+
+	mutex_unlock(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Allocate an SPS connection end point
+ *
+ * This function allocates resources and initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_alloc(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	int result = SPS_ERROR;
+
+	if (pipe->connect.sps_reserved != SPSRM_CLEAR) {
+		/*
+		 * Client did not call sps_get_config()	to init
+		 * struct sps_connect, so only use legacy members.
+		 */
+		u32 source = pipe->connect.source;
+		u32 destination = pipe->connect.destination;
+		enum sps_mode mode = pipe->connect.mode;
+		u32 config = pipe->connect.config;
+		memset(&pipe->connect, SPSRM_CLEAR,
+			      sizeof(pipe->connect));
+		pipe->connect.source = source;
+		pipe->connect.destination = destination;
+		pipe->connect.mode = mode;
+		pipe->connect.config = config;
+	}
+	if (pipe->connect.config == SPSRM_CLEAR)
+		pipe->connect.config = SPS_CONFIG_DEFAULT;
+
+	/*
+	 *  If configuration is not default, then client is specifying a
+	 * connection mapping.  Find a matching mapping, or fail.
+	 * If a match is found, the client's Connect struct will be updated
+	 * with all the mapping's values.
+	 */
+	if (pipe->connect.config != SPS_CONFIG_DEFAULT) {
+		if (sps_map_find(&pipe->connect)) {
+			SPS_ERR("Failed to find connection mapping");
+			return SPS_ERROR;
+		}
+	}
+
+	mutex_lock(&sps_rm->lock);
+	/* Check client state */
+	if (IS_SPS_STATE_OK(pipe)) {
+		SPS_ERR("Client connection already allocated");
+		goto exit_err;
+	}
+
+	/* Are the connection resources already allocated? */
+	map = find_unconnected(pipe);
+	if (map != NULL) {
+		/* Attempt to assign this connection to the client */
+		if (sps_rm_assign(pipe, map))
+			/* Assignment failed, so must allocate new */
+			map = NULL;
+	}
+
+	/* Allocate a new connection if necessary */
+	if (map == NULL) {
+		map = sps_rm_create(pipe);
+		if (map == NULL) {
+			SPS_ERR("Failed to allocate connection");
+			goto exit_err;
+		}
+		list_add_tail(&map->list, &sps_rm->connections_q);
+	}
+
+	/* Add the connection to the allocated queue */
+	map->refs++;
+
+	/* Initialization was successful */
+	result = 0;
+exit_err:
+	mutex_unlock(&sps_rm->lock);
+
+	if (result)
+		return SPS_ERROR;
+
+	return 0;
+}
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function frees resources and de-initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_disconnect(struct sps_pipe *pipe)
+{
+	sps_rm_free(pipe);
+	return 0;
+}
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to client context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state)
+{
+	int auto_enable = false;
+	int result;
+
+	/* Allocate the pipe */
+	if (pipe->client_state == SPS_STATE_DISCONNECT &&
+	    state == SPS_STATE_ALLOCATE) {
+		if (sps_rm_alloc(pipe))
+			return SPS_ERROR;
+	}
+
+	/* Configure the pipe */
+	if (pipe->client_state == SPS_STATE_ALLOCATE &&
+	    state == SPS_STATE_CONNECT) {
+		/* Connect the BAM pipe */
+		struct sps_bam_connect_param params;
+		memset(&params, 0, sizeof(params));
+		params.mode = pipe->connect.mode;
+		if (pipe->connect.options != SPSRM_CLEAR) {
+			params.options = pipe->connect.options;
+			params.irq_gen_addr = pipe->connect.irq_gen_addr;
+			params.irq_gen_data = pipe->connect.irq_gen_data;
+		}
+		result = sps_bam_pipe_connect(pipe, &params);
+		if (result) {
+			SPS_ERR("Failed to connect BAM 0x%x pipe %d",
+				(u32) pipe->bam, pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+
+		/* Set auto-enable for system-mode connections */
+		if (pipe->connect.source == SPS_DEV_HANDLE_MEM ||
+		    pipe->connect.destination == SPS_DEV_HANDLE_MEM) {
+			if (pipe->map->desc.size != 0 &&
+			    pipe->map->desc.phys_base != SPS_ADDR_INVALID)
+				auto_enable = true;
+		}
+	}
+
+	/* Enable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    !(state == SPS_STATE_DISABLE
+	      || state == SPS_STATE_DISCONNECT)
+	    && (state == SPS_STATE_ENABLE || auto_enable
+		|| (pipe->connect.options & SPS_O_AUTO_ENABLE))) {
+		result = sps_bam_pipe_enable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR("Failed to set BAM 0x%x pipe %d flow on",
+				pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((pipe->bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Activate the BAM-DMA channel */
+			result = sps_dma_pipe_enable(pipe->bam,
+						     pipe->pipe_index);
+			if (result) {
+				SPS_ERR("Failed to activate BAM-DMA pipe: %d",
+					pipe->pipe_index);
+				return SPS_ERROR;
+			}
+		}
+#endif
+		pipe->client_state = SPS_STATE_ENABLE;
+	}
+
+	/* Disable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_ENABLE &&
+	    (state == SPS_STATE_DISABLE	|| state == SPS_STATE_DISCONNECT)) {
+		result = sps_bam_pipe_disable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR("Failed to set BAM 0x%x pipe %d flow off",
+				pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+	}
+
+	/* Disconnect the BAM pipe */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    state == SPS_STATE_DISCONNECT) {
+		struct sps_connection *map;
+		u32 pipe_index;
+
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			pipe_index = pipe->map->src.pipe_index;
+		else
+			pipe_index = pipe->map->dest.pipe_index;
+
+
+		result = sps_bam_pipe_disconnect(pipe->bam, pipe_index);
+		if (result) {
+			SPS_ERR("Failed to disconnect BAM 0x%x pipe %d",
+				pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Clear map state */
+		map = (void *)pipe->map;
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			map->client_src = NULL;
+		else if (pipe->connect.mode == SPS_MODE_DEST)
+			map->client_dest = NULL;
+
+		sps_rm_disconnect(pipe);
+
+		/* Clear the client state */
+		pipe->map = NULL;
+		pipe->bam = NULL;
+		pipe->client_state = SPS_STATE_DISCONNECT;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
new file mode 100644
index 0000000..847ac16
--- /dev/null
+++ b/drivers/platform/msm/sps/spsi.h
@@ -0,0 +1,312 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Smart-Peripheral-Switch (SPS) internal API.
+ */
+
+#ifndef _SPSI_H_
+#define _SPSI_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/list.h>		/* list_head */
+#include <linux/kernel.h>	/* pr_info() */
+
+#include <mach/sps.h>
+
+#include "sps_map.h"
+
+/* Adjust for offset of struct sps_q_event */
+#define SPS_EVENT_INDEX(e)    ((e) - 1)
+#define SPS_ERROR -1
+
+/* BAM identifier used in log messages */
+#define BAM_ID(dev)       ((dev)->props.phys_addr)
+
+#ifdef CONFIG_DEBUG_FS
+#define MAX_MSG_LEN 80
+#define SPS_DEBUGFS(msg, args...) do {					\
+			char buf[MAX_MSG_LEN];		\
+			snprintf(buf, MAX_MSG_LEN, msg"\n", ##args);	\
+			sps_debugfs_record(buf);	\
+		} while (0)
+#define SPS_ERR(msg, args...) do {					\
+			pr_err(msg, ##args);	\
+			SPS_DEBUGFS(msg, ##args);	\
+		} while (0)
+#define SPS_INFO(msg, args...) do {					\
+			pr_info(msg, ##args);	\
+			SPS_DEBUGFS(msg, ##args);	\
+		} while (0)
+#define SPS_DBG(msg, args...) do {					\
+			pr_debug(msg, ##args);	\
+			SPS_DEBUGFS(msg, ##args);	\
+		} while (0)
+#else
+#define	SPS_DBG(x...)		pr_debug(x)
+#define	SPS_INFO(x...)		pr_info(x)
+#define	SPS_ERR(x...)		pr_err(x)
+#endif
+
+/* End point parameters */
+struct sps_conn_end_pt {
+	u32 dev;		/* Device handle of BAM */
+	u32 bam_phys;		/* Physical address of BAM. */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_threshold;	/* Pipe event threshold */
+	void *bam;
+};
+
+/* Connection bookkeeping descriptor struct */
+struct sps_connection {
+	struct list_head list;
+
+	/* Source end point parameters */
+	struct sps_conn_end_pt src;
+
+	/* Destination end point parameters */
+	struct sps_conn_end_pt dest;
+
+	/* Resource parameters */
+	struct sps_mem_buffer desc;	/* Descriptor FIFO */
+	struct sps_mem_buffer data;	/* Data FIFO (BAM-to-BAM mode only) */
+	u32 config;		/* Client specified connection configuration */
+
+	/* Connection state */
+	void *client_src;
+	void *client_dest;
+	int refs;		/* Reference counter */
+
+	/* Dynamically allocated resouces, if required */
+	u32 alloc_src_pipe;	/* Source pipe index */
+	u32 alloc_dest_pipe;	/* Destination pipe index */
+	u32 alloc_desc_base;	/* Physical address of descriptor FIFO */
+	u32 alloc_data_base;	/* Physical address of data FIFO */
+};
+
+/* Event bookkeeping descriptor struct */
+struct sps_q_event {
+	struct list_head list;
+	/* Event payload data */
+	struct sps_event_notify notify;
+};
+
+/* Memory heap statistics */
+struct sps_mem_stats {
+	u32 base_addr;
+	u32 size;
+	u32 blocks_used;
+	u32 bytes_used;
+	u32 max_bytes_used;
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *);
+#endif
+
+/**
+ * Translate physical to virtual address
+ *
+ * This Function translates physical to virtual address.
+ *
+ * @phys_addr - physical address to translate
+ *
+ * @return virtual memory pointer
+ *
+ */
+void *spsi_get_mem_ptr(u32 phys_addr);
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ * This function allocates target I/O (pipe) memory.
+ *
+ * @bytes - number of bytes to allocate
+ *
+ * @return physical address of allocated memory, or SPS_ADDR_INVALID on error
+ */
+u32 sps_mem_alloc_io(u32 bytes);
+
+/**
+ * Free I/O (pipe) memory
+ *
+ * This function frees target I/O (pipe) memory.
+ *
+ * @phys_addr - physical address of memory to free
+ *
+ * @bytes - number of bytes to free.
+ */
+void sps_mem_free_io(u32 phys_addr, u32 bytes);
+
+/**
+ * Find matching connection mapping
+ *
+ * This function searches for a connection mapping that matches the
+ * parameters supplied by the client.  If a match is found, the client's
+ * parameter struct is updated with the values specified in the mapping.
+ *
+ * @connect - pointer to client connection parameters
+ *
+ * @return 0 if match is found, negative value otherwise
+ *
+ */
+int sps_map_find(struct sps_connect *connect);
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ * This function allocates a BAM DMA pipe, and is intended to be called
+ * internally from the BAM resource manager.  Allocation implies that
+ * the pipe has been referenced by a client Connect() and is in use.
+ *
+ * BAM DMA is permissive with activations, and allows a pipe to be allocated
+ * with or without a client-initiated allocation.  This allows the client to
+ * specify exactly which pipe should be used directly through the Connect() API.
+ * sps_dma_alloc_chan() does not allow the client to specify the pipes/channel.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @dir - pipe direction
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_dma_pipe_alloc(void *bam, u32 pipe_index, enum sps_mode dir);
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ * This function enables the channel associated with a BAM DMA pipe, and
+ * is intended to be called internally from the BAM resource manager.
+ * Enable must occur *after* the pipe has been enabled so that proper
+ * sequencing between pipe and DMA channel enables can be enforced.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_enable(void *bam, u32 pipe_index);
+
+/**
+ * Free a BAM DMA pipe
+ *
+ * This function disables and frees a BAM DMA pipe, and is intended to be
+ * called internally from the BAM resource manager.  This must occur *after*
+ * the pipe has been disabled/reset so that proper sequencing between pipe and
+ * DMA channel resets can be enforced.
+ *
+ * @bam_arg - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_free(void *bam, u32 pipe_index);
+
+/**
+ * Initialize driver memory module
+ *
+ * This function initializes the driver memory module.
+ *
+ * @pipemem_phys_base - Pipe-Memory physical base.
+ *
+ * @pipemem_size - Pipe-Memory size.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size);
+
+/**
+ * De-initialize driver memory module
+ *
+ * This function de-initializes the driver memory module.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_de_init(void);
+
+/**
+ * Initialize BAM DMA module
+ *
+ * This function initializes the BAM DMA module.
+ *
+ * @bam_props - pointer to BAM DMA devices BSP configuration properties
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props);
+
+/**
+ * De-initialize BAM DMA module
+ *
+ * This function de-initializes the SPS BAM DMA module.
+ *
+ */
+void sps_dma_de_init(void);
+
+/**
+ * Initialize BAM DMA device
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_init(u32 h);
+
+/**
+ * De-initialize BAM DMA device
+ *
+ * This function de-initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_de_init(u32 h);
+
+/**
+ * Initialize connection mapping module
+ *
+ * This function initializes the SPS connection mapping module.
+ *
+ * @map_props - pointer to connection mapping BSP configuration properties
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+
+int sps_map_init(const struct sps_map *map_props, u32 options);
+
+/**
+ * De-initialize connection mapping module
+ *
+ * This function de-initializes the SPS connection mapping module.
+ *
+ */
+void sps_map_de_init(void);
+
+#endif	/* _SPSI_H_ */
diff --git a/drivers/platform/msm/ssbi.c b/drivers/platform/msm/ssbi.c
new file mode 100644
index 0000000..b4fd02e
--- /dev/null
+++ b/drivers/platform/msm/ssbi.c
@@ -0,0 +1,396 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010, Google Inc.
+ *
+ * Original authors: Code Aurora Forum
+ *
+ * Author: Dima Zavin <dima@android.com>
+ *  - Largely rewritten from original to not be an i2c driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/msm_ssbi.h>
+
+/* SSBI 2.0 controller registers */
+#define SSBI2_CMD			0x0008
+#define SSBI2_RD			0x0010
+#define SSBI2_STATUS			0x0014
+#define SSBI2_MODE2			0x001C
+
+/* SSBI_CMD fields */
+#define SSBI_CMD_RDWRN			(1 << 24)
+
+/* SSBI_STATUS fields */
+#define SSBI_STATUS_RD_READY		(1 << 2)
+#define SSBI_STATUS_READY		(1 << 1)
+#define SSBI_STATUS_MCHN_BUSY		(1 << 0)
+
+/* SSBI_MODE2 fields */
+#define SSBI_MODE2_REG_ADDR_15_8_SHFT	0x04
+#define SSBI_MODE2_REG_ADDR_15_8_MASK	(0x7f << SSBI_MODE2_REG_ADDR_15_8_SHFT)
+
+#define SET_SSBI_MODE2_REG_ADDR_15_8(MD, AD) \
+	(((MD) & 0x0F) | ((((AD) >> 8) << SSBI_MODE2_REG_ADDR_15_8_SHFT) & \
+	SSBI_MODE2_REG_ADDR_15_8_MASK))
+
+/* SSBI PMIC Arbiter command registers */
+#define SSBI_PA_CMD			0x0000
+#define SSBI_PA_RD_STATUS		0x0004
+
+/* SSBI_PA_CMD fields */
+#define SSBI_PA_CMD_RDWRN		(1 << 24)
+#define SSBI_PA_CMD_ADDR_MASK		0x7fff /* REG_ADDR_7_0, REG_ADDR_8_14*/
+
+/* SSBI_PA_RD_STATUS fields */
+#define SSBI_PA_RD_STATUS_TRANS_DONE	(1 << 27)
+#define SSBI_PA_RD_STATUS_TRANS_DENIED	(1 << 26)
+
+#define SSBI_TIMEOUT_US			100
+
+struct msm_ssbi {
+	struct device		*dev;
+	struct device		*slave;
+	void __iomem		*base;
+	spinlock_t		lock;
+	enum msm_ssbi_controller_type controller_type;
+	int (*read)(struct msm_ssbi *, u16 addr, u8 *buf, int len);
+	int (*write)(struct msm_ssbi *, u16 addr, u8 *buf, int len);
+};
+
+#define to_msm_ssbi(dev)	platform_get_drvdata(to_platform_device(dev))
+
+static inline u32 ssbi_readl(struct msm_ssbi *ssbi, u32 reg)
+{
+	return readl(ssbi->base + reg);
+}
+
+static inline void ssbi_writel(struct msm_ssbi *ssbi, u32 val, u32 reg)
+{
+	writel(val, ssbi->base + reg);
+}
+
+static int ssbi_wait_mask(struct msm_ssbi *ssbi, u32 set_mask, u32 clr_mask)
+{
+	u32 timeout = SSBI_TIMEOUT_US;
+	u32 val;
+
+	while (timeout--) {
+		val = ssbi_readl(ssbi, SSBI2_STATUS);
+		if (((val & set_mask) == set_mask) && ((val & clr_mask) == 0))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(ssbi->dev, "%s: timeout (status %x set_mask %x clr_mask %x)\n",
+		__func__, ssbi_readl(ssbi, SSBI2_STATUS), set_mask, clr_mask);
+	return -ETIMEDOUT;
+}
+
+static int
+msm_ssbi_read_bytes(struct msm_ssbi *ssbi, u16 addr, u8 *buf, int len)
+{
+	u32 cmd = SSBI_CMD_RDWRN | ((addr & 0xff) << 16);
+	int ret = 0;
+
+	if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) {
+		u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2);
+		mode2 = SET_SSBI_MODE2_REG_ADDR_15_8(mode2, addr);
+		ssbi_writel(ssbi, mode2, SSBI2_MODE2);
+	}
+
+	while (len) {
+		ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0);
+		if (ret)
+			goto err;
+
+		ssbi_writel(ssbi, cmd, SSBI2_CMD);
+		ret = ssbi_wait_mask(ssbi, SSBI_STATUS_RD_READY, 0);
+		if (ret)
+			goto err;
+		*buf++ = ssbi_readl(ssbi, SSBI2_RD) & 0xff;
+		len--;
+	}
+
+err:
+	return ret;
+}
+
+static int
+msm_ssbi_write_bytes(struct msm_ssbi *ssbi, u16 addr, u8 *buf, int len)
+{
+	int ret = 0;
+
+	if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) {
+		u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2);
+		mode2 = SET_SSBI_MODE2_REG_ADDR_15_8(mode2, addr);
+		ssbi_writel(ssbi, mode2, SSBI2_MODE2);
+	}
+
+	while (len) {
+		ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0);
+		if (ret)
+			goto err;
+
+		ssbi_writel(ssbi, ((addr & 0xff) << 16) | *buf, SSBI2_CMD);
+		ret = ssbi_wait_mask(ssbi, 0, SSBI_STATUS_MCHN_BUSY);
+		if (ret)
+			goto err;
+		buf++;
+		len--;
+	}
+
+err:
+	return ret;
+}
+
+static inline int
+msm_ssbi_pa_transfer(struct msm_ssbi *ssbi, u32 cmd, u8 *data)
+{
+	u32 timeout = SSBI_TIMEOUT_US;
+	u32 rd_status = 0;
+
+	ssbi_writel(ssbi, cmd, SSBI_PA_CMD);
+
+	while (timeout--) {
+		rd_status = ssbi_readl(ssbi, SSBI_PA_RD_STATUS);
+
+		if (rd_status & SSBI_PA_RD_STATUS_TRANS_DENIED) {
+			dev_err(ssbi->dev, "%s: transaction denied (0x%x)\n",
+					__func__, rd_status);
+			return -EPERM;
+		}
+
+		if (rd_status & SSBI_PA_RD_STATUS_TRANS_DONE) {
+			if (data)
+				*data = rd_status & 0xff;
+			return 0;
+		}
+		udelay(1);
+	}
+
+	dev_err(ssbi->dev, "%s: timeout, status 0x%x\n", __func__, rd_status);
+	return -ETIMEDOUT;
+}
+
+static int
+msm_ssbi_pa_read_bytes(struct msm_ssbi *ssbi, u16 addr, u8 *buf, int len)
+{
+	u32 cmd;
+	int ret = 0;
+
+	cmd = SSBI_PA_CMD_RDWRN | (addr & SSBI_PA_CMD_ADDR_MASK) << 8;
+
+	while (len) {
+		ret = msm_ssbi_pa_transfer(ssbi, cmd, buf);
+		if (ret)
+			goto err;
+		buf++;
+		len--;
+	}
+
+err:
+	return ret;
+}
+
+static int
+msm_ssbi_pa_write_bytes(struct msm_ssbi *ssbi, u16 addr, u8 *buf, int len)
+{
+	u32 cmd;
+	int ret = 0;
+
+	while (len) {
+		cmd = (addr & SSBI_PA_CMD_ADDR_MASK) << 8 | *buf;
+		ret = msm_ssbi_pa_transfer(ssbi, cmd, NULL);
+		if (ret)
+			goto err;
+		buf++;
+		len--;
+	}
+
+err:
+	return ret;
+}
+
+int msm_ssbi_read(struct device *dev, u16 addr, u8 *buf, int len)
+{
+	struct msm_ssbi *ssbi = to_msm_ssbi(dev);
+	unsigned long flags;
+	int ret;
+
+	if (ssbi->dev != dev)
+		return -ENXIO;
+
+	spin_lock_irqsave(&ssbi->lock, flags);
+	ret = ssbi->read(ssbi, addr, buf, len);
+	spin_unlock_irqrestore(&ssbi->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_ssbi_read);
+
+int msm_ssbi_write(struct device *dev, u16 addr, u8 *buf, int len)
+{
+	struct msm_ssbi *ssbi = to_msm_ssbi(dev);
+	unsigned long flags;
+	int ret;
+
+	if (ssbi->dev != dev)
+		return -ENXIO;
+
+	spin_lock_irqsave(&ssbi->lock, flags);
+	ret = ssbi->write(ssbi, addr, buf, len);
+	spin_unlock_irqrestore(&ssbi->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_ssbi_write);
+
+static int __devinit msm_ssbi_add_slave(struct msm_ssbi *ssbi,
+				const struct msm_ssbi_slave_info *slave)
+{
+	struct platform_device *slave_pdev;
+	int ret;
+
+	if (ssbi->slave) {
+		pr_err("slave already attached??\n");
+		return -EBUSY;
+	}
+
+	slave_pdev = platform_device_alloc(slave->name, -1);
+	if (!slave_pdev) {
+		pr_err("cannot allocate pdev for slave '%s'", slave->name);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	slave_pdev->dev.parent = ssbi->dev;
+	slave_pdev->dev.platform_data = slave->platform_data;
+
+	ret = platform_device_add(slave_pdev);
+	if (ret) {
+		pr_err("cannot add slave platform device for '%s'\n",
+				slave->name);
+		goto err;
+	}
+
+	ssbi->slave = &slave_pdev->dev;
+	return 0;
+
+err:
+	if (slave_pdev)
+		platform_device_put(slave_pdev);
+	return ret;
+}
+
+static int __devinit msm_ssbi_probe(struct platform_device *pdev)
+{
+	const struct msm_ssbi_platform_data *pdata = pdev->dev.platform_data;
+	struct resource *mem_res;
+	struct msm_ssbi *ssbi;
+	int ret = 0;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	pr_debug("%s\n", pdata->slave.name);
+
+	ssbi = kzalloc(sizeof(struct msm_ssbi), GFP_KERNEL);
+	if (!ssbi) {
+		pr_err("can not allocate ssbi_data\n");
+		return -ENOMEM;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res) {
+		pr_err("missing mem resource\n");
+		ret = -EINVAL;
+		goto err_get_mem_res;
+	}
+
+	ssbi->base = ioremap(mem_res->start, resource_size(mem_res));
+	if (!ssbi->base) {
+		pr_err("ioremap of 0x%p failed\n", (void *)mem_res->start);
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+	ssbi->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ssbi);
+
+	ssbi->controller_type = pdata->controller_type;
+	if (ssbi->controller_type == MSM_SBI_CTRL_PMIC_ARBITER) {
+		ssbi->read = msm_ssbi_pa_read_bytes;
+		ssbi->write = msm_ssbi_pa_write_bytes;
+	} else {
+		ssbi->read = msm_ssbi_read_bytes;
+		ssbi->write = msm_ssbi_write_bytes;
+	}
+
+	spin_lock_init(&ssbi->lock);
+
+	ret = msm_ssbi_add_slave(ssbi, &pdata->slave);
+	if (ret)
+		goto err_ssbi_add_slave;
+
+	return 0;
+
+err_ssbi_add_slave:
+	platform_set_drvdata(pdev, NULL);
+	iounmap(ssbi->base);
+err_ioremap:
+err_get_mem_res:
+	kfree(ssbi);
+	return ret;
+}
+
+static int __devexit msm_ssbi_remove(struct platform_device *pdev)
+{
+	struct msm_ssbi *ssbi = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	iounmap(ssbi->base);
+	kfree(ssbi);
+	return 0;
+}
+
+static struct platform_driver msm_ssbi_driver = {
+	.probe		= msm_ssbi_probe,
+	.remove		= __exit_p(msm_ssbi_remove),
+	.driver		= {
+		.name	= "msm_ssbi",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init msm_ssbi_init(void)
+{
+	return platform_driver_register(&msm_ssbi_driver);
+}
+postcore_initcall(msm_ssbi_init);
+
+static void __exit msm_ssbi_exit(void)
+{
+	platform_driver_unregister(&msm_ssbi_driver);
+}
+module_exit(msm_ssbi_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:msm_ssbi");
+MODULE_AUTHOR("Dima Zavin <dima@android.com>");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e57b50b..179a4ac 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -235,4 +235,106 @@
 	  This driver can be build as a module. If so, the module will be
 	  called gpio-charger.
 
+config BATTERY_MSM
+	tristate "MSM battery"
+	depends on ARCH_MSM
+	default m
+	help
+	  Say Y to enable support for the battery in Qualcomm MSM.
+
+config BATTERY_MSM8X60
+	tristate "MSM8X60 battery"
+	select PMIC8058_BATTALARM
+	help
+	  Some MSM boards have dual charging paths to charge the battery.
+	  Say Y to enable support for the battery charging in
+	  such devices.
+
+config PM8058_CHARGER
+	tristate "pmic8058 charger"
+	depends on BATTERY_MSM8X60
+	depends on PMIC8058
+	help
+	  Say Y to enable support for battery charging from the pmic8058.
+	  pmic8058 provides a linear charging circuit connected to the usb
+	  cable on Qualcomm's msm8x60 surf board.
+
+config ISL9519_CHARGER
+	tristate "isl9519 charger"
+	depends on BATTERY_MSM8X60
+	default n
+	help
+	  The isl9519q charger chip from intersil is connected to an external
+	  charger cable and is preferred way of charging the battery because
+	  of its high current rating.
+	  Choose Y if you are compiling for Qualcomm's msm8x60 surf/ffa board.
+
+config SMB137B_CHARGER
+	tristate "smb137b charger"
+	default n
+	depends on I2C
+	help
+	  The smb137b charger chip from summit is a switching mode based
+	  charging solution.
+	  Choose Y if you are compiling for Qualcomm's msm8x60 fluid board.
+	  To compile this driver as a module, choose M here: the module will
+	  be called smb137b.
+
+config BATTERY_MSM_FAKE
+	tristate "Fake MSM battery"
+	depends on ARCH_MSM && BATTERY_MSM
+	default n
+	help
+	  Say Y to bypass actual battery queries.
+
+config PM8058_FIX_USB
+	tristate "pmic8058 software workaround for usb removal"
+	depends on PMIC8058
+	depends on !PM8058_CHARGER
+	help
+	  Say Y to enable the software workaround to USB Vbus line
+	  staying high even when USB cable is removed. This option
+	  is in lieu of a complete pm8058 charging driver.
+
+config BATTERY_QCIBAT
+	tristate "Quanta Computer Inc. Battery"
+	depends on SENSORS_WPCE775X
+	default n
+	help
+	  Say Y here if you want to use the Quanta battery driver for ST15
+	  platform.
+
+config BATTERY_BQ27520
+	tristate "BQ27520 battery driver"
+	depends on I2C
+	default n
+	help
+	  Say Y here to enable support for batteries with BQ27520 (I2C) chips.
+
+config BATTERY_BQ27541
+	tristate "BQ27541 battery driver"
+	depends on I2C
+	default n
+	help
+	  Say Y here to enable support for batteries with BQ27541 (I2C) chips.
+
+config BQ27520_TEST_ENABLE
+	bool "Enable BQ27520 Fuel Gauge Chip Test"
+	depends on BATTERY_BQ27520
+	default n
+	help
+	  Say Y here to enable Test sysfs Interface for BQ27520 Drivers.
+
+config PM8921_CHARGER
+	tristate "PM8921 Charger driver"
+	depends on MFD_PM8921_CORE
+	help
+	  Say Y here to enable support for pm8921 chip charger subdevice
+
+config PM8921_BMS
+	tristate "PM8921 Battery Monitoring System driver"
+	depends on MFD_PM8921_CORE
+	help
+	  Say Y here to enable support for pm8921 chip bms subdevice
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 009a90f..f61c88a 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,3 +36,14 @@
 obj-$(CONFIG_CHARGER_MAX8903)	+= max8903_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)	+= twl4030_charger.o
 obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
+obj-$(CONFIG_BATTERY_MSM)       += msm_battery.o
+obj-$(CONFIG_BATTERY_MSM8X60)   += msm_charger.o
+obj-$(CONFIG_PM8058_CHARGER)    += pmic8058-charger.o
+obj-$(CONFIG_ISL9519_CHARGER)   += isl9519q.o
+obj-$(CONFIG_PM8058_FIX_USB)    += pm8058_usb_fix.o
+obj-$(CONFIG_BATTERY_QCIBAT)    += qci_battery.o
+obj-$(CONFIG_BATTERY_BQ27520)	+= bq27520_fuelgauger.o
+obj-$(CONFIG_BATTERY_BQ27541)	+= bq27541_fuelgauger.o
+obj-$(CONFIG_SMB137B_CHARGER)   += smb137b.o
+obj-$(CONFIG_PM8921_BMS)	+= pm8921-bms.o
+obj-$(CONFIG_PM8921_CHARGER)	+= pm8921-charger.o
diff --git a/drivers/power/bq27520_fuelgauger.c b/drivers/power/bq27520_fuelgauger.c
new file mode 100644
index 0000000..284b134
--- /dev/null
+++ b/drivers/power/bq27520_fuelgauger.c
@@ -0,0 +1,960 @@
+/* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/time.h>
+#include <linux/i2c/bq27520.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/regulator/pmic8058-regulator.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/err.h>
+#include <linux/msm-charger.h>
+
+#define DRIVER_VERSION			"1.1.0"
+/* Bq27520 standard data commands */
+#define BQ27520_REG_CNTL		0x00
+#define BQ27520_REG_AR			0x02
+#define BQ27520_REG_ARTTE		0x04
+#define BQ27520_REG_TEMP		0x06
+#define BQ27520_REG_VOLT		0x08
+#define BQ27520_REG_FLAGS		0x0A
+#define BQ27520_REG_NAC			0x0C
+#define BQ27520_REG_FAC			0x0e
+#define BQ27520_REG_RM			0x10
+#define BQ27520_REG_FCC			0x12
+#define BQ27520_REG_AI			0x14
+#define BQ27520_REG_TTE			0x16
+#define BQ27520_REG_TTF			0x18
+#define BQ27520_REG_SI			0x1a
+#define BQ27520_REG_STTE		0x1c
+#define BQ27520_REG_MLI			0x1e
+#define BQ27520_REG_MLTTE		0x20
+#define BQ27520_REG_AE			0x22
+#define BQ27520_REG_AP			0x24
+#define BQ27520_REG_TTECP		0x26
+#define BQ27520_REG_SOH			0x28
+#define BQ27520_REG_SOC			0x2c
+#define BQ27520_REG_NIC			0x2e
+#define BQ27520_REG_ICR			0x30
+#define BQ27520_REG_LOGIDX		0x32
+#define BQ27520_REG_LOGBUF		0x34
+#define BQ27520_FLAG_DSC		BIT(0)
+#define BQ27520_FLAG_FC			BIT(9)
+#define BQ27520_FLAG_BAT_DET		BIT(3)
+#define BQ27520_CS_DLOGEN		BIT(15)
+#define BQ27520_CS_SS		    BIT(13)
+/* Control subcommands */
+#define BQ27520_SUBCMD_CTNL_STATUS  0x0000
+#define BQ27520_SUBCMD_DEVCIE_TYPE  0x0001
+#define BQ27520_SUBCMD_FW_VER  0x0002
+#define BQ27520_SUBCMD_HW_VER  0x0003
+#define BQ27520_SUBCMD_DF_CSUM  0x0004
+#define BQ27520_SUBCMD_PREV_MACW   0x0007
+#define BQ27520_SUBCMD_CHEM_ID   0x0008
+#define BQ27520_SUBCMD_BD_OFFSET   0x0009
+#define BQ27520_SUBCMD_INT_OFFSET  0x000a
+#define BQ27520_SUBCMD_CC_VER   0x000b
+#define BQ27520_SUBCMD_OCV  0x000c
+#define BQ27520_SUBCMD_BAT_INS   0x000d
+#define BQ27520_SUBCMD_BAT_REM   0x000e
+#define BQ27520_SUBCMD_SET_HIB   0x0011
+#define BQ27520_SUBCMD_CLR_HIB   0x0012
+#define BQ27520_SUBCMD_SET_SLP   0x0013
+#define BQ27520_SUBCMD_CLR_SLP   0x0014
+#define BQ27520_SUBCMD_FCT_RES   0x0015
+#define BQ27520_SUBCMD_ENABLE_DLOG  0x0018
+#define BQ27520_SUBCMD_DISABLE_DLOG 0x0019
+#define BQ27520_SUBCMD_SEALED   0x0020
+#define BQ27520_SUBCMD_ENABLE_IT    0x0021
+#define BQ27520_SUBCMD_DISABLE_IT   0x0023
+#define BQ27520_SUBCMD_CAL_MODE  0x0040
+#define BQ27520_SUBCMD_RESET   0x0041
+
+#define ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN   (-2731)
+#define BQ27520_INIT_DELAY ((HZ)*1)
+#define BQ27520_POLLING_STATUS ((HZ)*3)
+#define BQ27520_COULOMB_POLL ((HZ)*30)
+
+/* If the system has several batteries we need a different name for each
+ * of them...
+ */
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
+struct bq27520_device_info;
+struct bq27520_access_methods {
+	int (*read)(u8 reg, int *rt_value, int b_single,
+		struct bq27520_device_info *di);
+};
+
+struct bq27520_device_info {
+	struct device				*dev;
+	int					id;
+	struct bq27520_access_methods		*bus;
+	struct i2c_client			*client;
+	const struct bq27520_platform_data	*pdata;
+	struct work_struct			counter;
+	/* 300ms delay is needed after bq27520 is powered up
+	 * and before any successful I2C transaction
+	 */
+	struct  delayed_work			hw_config;
+	uint32_t				irq;
+};
+
+enum {
+	GET_BATTERY_STATUS,
+	GET_BATTERY_TEMPERATURE,
+	GET_BATTERY_VOLTAGE,
+	GET_BATTERY_CAPACITY,
+	NUM_OF_STATUS,
+};
+
+struct bq27520_status {
+	/* Informations owned and maintained by Bq27520 driver, updated
+	 * by poller or SOC_INT interrupt, decoupling from I/Oing
+	 * hardware directly
+	 */
+	int			status[NUM_OF_STATUS];
+	spinlock_t		lock;
+	struct delayed_work	poller;
+};
+
+static struct bq27520_status current_battery_status;
+static struct bq27520_device_info *bq27520_di;
+static int coulomb_counter;
+static spinlock_t lock; /* protect access to coulomb_counter */
+static struct timer_list timer; /* charge counter timer every 30 secs */
+
+static int bq27520_i2c_txsubcmd(u8 reg, unsigned short subcmd,
+		struct bq27520_device_info *di);
+
+static int bq27520_read(u8 reg, int *rt_value, int b_single,
+			struct bq27520_device_info *di)
+{
+	return di->bus->read(reg, rt_value, b_single, di);
+}
+
+/*
+ * Return the battery temperature in tenths of degree Celsius
+ * Or < 0 if something fails.
+ */
+static int bq27520_battery_temperature(struct bq27520_device_info *di)
+{
+	int ret, temp = 0;
+
+	ret = bq27520_read(BQ27520_REG_TEMP, &temp, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error %d reading temperature\n", ret);
+		return ret;
+	}
+
+	return temp + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN;
+}
+
+/*
+ * Return the battery Voltage in milivolts
+ * Or < 0 if something fails.
+ */
+static int bq27520_battery_voltage(struct bq27520_device_info *di)
+{
+	int ret, volt = 0;
+
+	ret = bq27520_read(BQ27520_REG_VOLT, &volt, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error %d reading voltage\n", ret);
+		return ret;
+	}
+
+	return volt;
+}
+
+/*
+ * Return the battery Relative State-of-Charge
+ * Or < 0 if something fails.
+ */
+static int bq27520_battery_rsoc(struct bq27520_device_info *di)
+{
+	int ret, rsoc = 0;
+
+	ret = bq27520_read(BQ27520_REG_SOC, &rsoc, 0, di);
+
+	if (ret) {
+		dev_err(di->dev,
+			"error %d reading relative State-of-Charge\n", ret);
+		return ret;
+	}
+
+	return rsoc;
+}
+
+static void bq27520_cntl_cmd(struct bq27520_device_info *di,
+				int subcmd)
+{
+	bq27520_i2c_txsubcmd(BQ27520_REG_CNTL, subcmd, di);
+}
+
+/*
+ * i2c specific code
+ */
+static int bq27520_i2c_txsubcmd(u8 reg, unsigned short subcmd,
+		struct bq27520_device_info *di)
+{
+	struct i2c_msg msg;
+	unsigned char data[3];
+
+	if (!di->client)
+		return -ENODEV;
+
+	memset(data, 0, sizeof(data));
+	data[0] = reg;
+	data[1] = subcmd & 0x00FF;
+	data[2] = (subcmd & 0xFF00) >> 8;
+
+	msg.addr = di->client->addr;
+	msg.flags = 0;
+	msg.len = 3;
+	msg.buf = data;
+
+	if (i2c_transfer(di->client->adapter, &msg, 1) < 0)
+		return -EIO;
+
+	return 0;
+}
+
+static int bq27520_chip_config(struct bq27520_device_info *di)
+{
+	int flags = 0, ret = 0;
+
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_CTNL_STATUS);
+	udelay(66);
+	ret = bq27520_read(BQ27520_REG_CNTL, &flags, 0, di);
+	if (ret < 0) {
+		dev_err(di->dev, "error %d reading register %02x\n",
+			 ret, BQ27520_REG_CNTL);
+		return ret;
+	}
+	udelay(66);
+
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_ENABLE_IT);
+	udelay(66);
+
+	if (di->pdata->enable_dlog && !(flags & BQ27520_CS_DLOGEN)) {
+		bq27520_cntl_cmd(di, BQ27520_SUBCMD_ENABLE_DLOG);
+		udelay(66);
+	}
+
+	return 0;
+}
+
+static void bq27520_every_30secs(unsigned long data)
+{
+	struct bq27520_device_info *di = (struct bq27520_device_info *)data;
+
+	schedule_work(&di->counter);
+	mod_timer(&timer, jiffies + BQ27520_COULOMB_POLL);
+}
+
+static void bq27520_coulomb_counter_work(struct work_struct *work)
+{
+	int value = 0, temp = 0, index = 0, ret = 0, count = 0;
+	struct bq27520_device_info *di;
+	unsigned long flags;
+
+	di = container_of(work, struct bq27520_device_info, counter);
+
+	/* retrieve 30 values from FIFO of coulomb data logging buffer
+	 * and average over time
+	 */
+	do {
+		ret = bq27520_read(BQ27520_REG_LOGBUF, &temp, 0, di);
+		if (ret < 0)
+			break;
+		if (temp != 0x7FFF) {
+			++count;
+			value += temp;
+		}
+		udelay(66);
+		ret = bq27520_read(BQ27520_REG_LOGIDX, &index, 0, di);
+		if (ret < 0)
+			break;
+		udelay(66);
+	} while (index != 0 || temp != 0x7FFF);
+
+	if (ret < 0) {
+		dev_err(di->dev, "Error %d reading datalog register\n", ret);
+		return;
+	}
+
+	if (count) {
+		spin_lock_irqsave(&lock, flags);
+		coulomb_counter = value/count;
+		spin_unlock_irqrestore(&lock, flags);
+	}
+}
+
+static int bq27520_is_battery_present(void)
+{
+	return 1;
+}
+
+static int bq27520_is_battery_temp_within_range(void)
+{
+	return 1;
+}
+
+static int bq27520_is_battery_id_valid(void)
+{
+	return 1;
+}
+
+static int bq27520_status_getter(int function)
+{
+	int status = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&current_battery_status.lock, flags);
+	status = current_battery_status.status[function];
+	spin_unlock_irqrestore(&current_battery_status.lock, flags);
+
+	return status;
+}
+
+static int bq27520_get_battery_mvolts(void)
+{
+	return bq27520_status_getter(GET_BATTERY_VOLTAGE);
+}
+
+static int bq27520_get_battery_temperature(void)
+{
+	return bq27520_status_getter(GET_BATTERY_TEMPERATURE);
+}
+
+static int bq27520_get_battery_status(void)
+{
+	return bq27520_status_getter(GET_BATTERY_STATUS);
+}
+
+static int bq27520_get_remaining_capacity(void)
+{
+	return bq27520_status_getter(GET_BATTERY_CAPACITY);
+}
+
+static struct msm_battery_gauge bq27520_batt_gauge = {
+	.get_battery_mvolts		= bq27520_get_battery_mvolts,
+	.get_battery_temperature	= bq27520_get_battery_temperature,
+	.is_battery_present		= bq27520_is_battery_present,
+	.is_battery_temp_within_range	= bq27520_is_battery_temp_within_range,
+	.is_battery_id_valid		= bq27520_is_battery_id_valid,
+	.get_battery_status		= bq27520_get_battery_status,
+	.get_batt_remaining_capacity	= bq27520_get_remaining_capacity,
+};
+
+static void update_current_battery_status(int data)
+{
+	int status[4], ret = 0;
+	unsigned long flag;
+
+	memset(status, 0, sizeof status);
+	ret = bq27520_battery_rsoc(bq27520_di);
+	status[GET_BATTERY_CAPACITY] = (ret < 0) ? 0 : ret;
+
+	status[GET_BATTERY_VOLTAGE] = bq27520_battery_voltage(bq27520_di);
+	status[GET_BATTERY_TEMPERATURE] =
+				bq27520_battery_temperature(bq27520_di);
+
+	spin_lock_irqsave(&current_battery_status.lock, flag);
+	current_battery_status.status[GET_BATTERY_STATUS] = data;
+	current_battery_status.status[GET_BATTERY_VOLTAGE] =
+						status[GET_BATTERY_VOLTAGE];
+	current_battery_status.status[GET_BATTERY_TEMPERATURE] =
+						status[GET_BATTERY_TEMPERATURE];
+	current_battery_status.status[GET_BATTERY_CAPACITY] =
+						status[GET_BATTERY_CAPACITY];
+	spin_unlock_irqrestore(&current_battery_status.lock, flag);
+}
+
+/* only if battery charging satus changes then notify msm_charger. otherwise
+ * only refresh current_batter_status
+ */
+static int if_notify_msm_charger(int *data)
+{
+	int ret = 0, flags = 0, status = 0;
+	unsigned long flag;
+
+	ret = bq27520_read(BQ27520_REG_FLAGS, &flags, 0, bq27520_di);
+	if (ret < 0) {
+		dev_err(bq27520_di->dev, "error %d reading register %02x\n",
+			ret, BQ27520_REG_FLAGS);
+		status = POWER_SUPPLY_STATUS_UNKNOWN;
+	} else {
+		if (flags & BQ27520_FLAG_FC)
+			status = POWER_SUPPLY_STATUS_FULL;
+		else if (flags & BQ27520_FLAG_DSC)
+			status = POWER_SUPPLY_STATUS_DISCHARGING;
+		else
+			status = POWER_SUPPLY_STATUS_CHARGING;
+	}
+
+	*data = status;
+	spin_lock_irqsave(&current_battery_status.lock, flag);
+	ret = (status != current_battery_status.status[GET_BATTERY_STATUS]);
+	spin_unlock_irqrestore(&current_battery_status.lock, flag);
+	return ret;
+}
+
+static void battery_status_poller(struct work_struct *work)
+{
+	int status = 0, temp = 0;
+
+	temp = if_notify_msm_charger(&status);
+	update_current_battery_status(status);
+	if (temp)
+		msm_charger_notify_event(NULL, CHG_BATT_STATUS_CHANGE);
+
+	schedule_delayed_work(&current_battery_status.poller,
+				BQ27520_POLLING_STATUS);
+}
+
+static void bq27520_hw_config(struct work_struct *work)
+{
+	int ret = 0, flags = 0, type = 0, fw_ver = 0, status = 0;
+	struct bq27520_device_info *di;
+
+	di  = container_of(work, struct bq27520_device_info, hw_config.work);
+
+	pr_debug(KERN_INFO "Enter bq27520_hw_config\n");
+	ret = bq27520_chip_config(di);
+	if (ret) {
+		dev_err(di->dev, "Failed to config Bq27520 ret = %d\n", ret);
+		return;
+	}
+	/* bq27520 is ready for access, update current_battery_status by reading
+	 * from hardware
+	 */
+	if_notify_msm_charger(&status);
+	update_current_battery_status(status);
+	msm_charger_notify_event(NULL, CHG_BATT_STATUS_CHANGE);
+
+	enable_irq(di->irq);
+
+	/* poll battery status every 3 seconds, if charging status changes,
+	 * notify msm_charger
+	 */
+	schedule_delayed_work(&current_battery_status.poller,
+				BQ27520_POLLING_STATUS);
+
+	if (di->pdata->enable_dlog) {
+		schedule_work(&di->counter);
+		init_timer(&timer);
+		timer.function = &bq27520_every_30secs;
+		timer.data = (unsigned long)di;
+		timer.expires = jiffies + BQ27520_COULOMB_POLL;
+		add_timer(&timer);
+	}
+
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_CTNL_STATUS);
+	udelay(66);
+	bq27520_read(BQ27520_REG_CNTL, &flags, 0, di);
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_DEVCIE_TYPE);
+	udelay(66);
+	bq27520_read(BQ27520_REG_CNTL, &type, 0, di);
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_FW_VER);
+	udelay(66);
+	bq27520_read(BQ27520_REG_CNTL, &fw_ver, 0, di);
+
+	dev_info(di->dev, "DEVICE_TYPE is 0x%02X, FIRMWARE_VERSION\
+		is 0x%02X\n", type, fw_ver);
+	dev_info(di->dev, "Complete bq27520 configuration 0x%02X\n", flags);
+}
+
+static int bq27520_read_i2c(u8 reg, int *rt_value, int b_single,
+			struct bq27520_device_info *di)
+{
+	struct i2c_client *client = di->client;
+	struct i2c_msg msg[1];
+	unsigned char data[2];
+	int err;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	msg->addr = client->addr;
+	msg->flags = 0;
+	msg->len = 1;
+	msg->buf = data;
+
+	data[0] = reg;
+	err = i2c_transfer(client->adapter, msg, 1);
+
+	if (err >= 0) {
+		if (!b_single)
+			msg->len = 2;
+		else
+			msg->len = 1;
+
+		msg->flags = I2C_M_RD;
+		err = i2c_transfer(client->adapter, msg, 1);
+		if (err >= 0) {
+			if (!b_single)
+				*rt_value = get_unaligned_le16(data);
+			else
+				*rt_value = data[0];
+
+			return 0;
+		}
+	}
+	return err;
+}
+
+#ifdef CONFIG_BQ27520_TEST_ENABLE
+static int reg;
+static int subcmd;
+static ssize_t bq27520_read_stdcmd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int ret;
+	int temp = 0;
+	struct platform_device *client;
+	struct bq27520_device_info *di;
+
+	client = to_platform_device(dev);
+	di = platform_get_drvdata(client);
+
+	if (reg <= BQ27520_REG_ICR && reg > 0x00) {
+		ret = bq27520_read(reg, &temp, 0, di);
+		if (ret)
+			ret = snprintf(buf, PAGE_SIZE, "Read Error!\n");
+		else
+			ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp);
+	} else
+		ret = snprintf(buf, PAGE_SIZE, "Register Error!\n");
+
+	return ret;
+}
+
+static ssize_t bq27520_write_stdcmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int cmd;
+
+	sscanf(buf, "%x", &cmd);
+	reg = cmd;
+	dev_info(dev, "recv'd cmd is 0x%02X\n", reg);
+	return ret;
+}
+
+static ssize_t bq27520_read_subcmd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int ret, temp = 0;
+	struct platform_device *client;
+	struct bq27520_device_info *di;
+
+	client = to_platform_device(dev);
+	di = platform_get_drvdata(client);
+
+	if (subcmd == BQ27520_SUBCMD_DEVCIE_TYPE ||
+		 subcmd == BQ27520_SUBCMD_FW_VER ||
+		 subcmd == BQ27520_SUBCMD_HW_VER ||
+		 subcmd == BQ27520_SUBCMD_CHEM_ID) {
+
+		bq27520_cntl_cmd(di, subcmd);/* Retrieve Chip status */
+		udelay(66);
+		ret = bq27520_read(BQ27520_REG_CNTL, &temp, 0, di);
+
+		if (ret)
+			ret = snprintf(buf, PAGE_SIZE, "Read Error!\n");
+		else
+			ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp);
+	} else
+		ret = snprintf(buf, PAGE_SIZE, "Register Error!\n");
+
+	return ret;
+}
+
+static ssize_t bq27520_write_subcmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int cmd;
+
+	sscanf(buf, "%x", &cmd);
+	subcmd = cmd;
+	return ret;
+}
+
+static DEVICE_ATTR(std_cmd, S_IRUGO|S_IWUGO, bq27520_read_stdcmd,
+	bq27520_write_stdcmd);
+static DEVICE_ATTR(sub_cmd, S_IRUGO|S_IWUGO, bq27520_read_subcmd,
+	bq27520_write_subcmd);
+static struct attribute *fs_attrs[] = {
+	&dev_attr_std_cmd.attr,
+	&dev_attr_sub_cmd.attr,
+	NULL,
+};
+static struct attribute_group fs_attr_group = {
+	.attrs = fs_attrs,
+};
+
+static struct platform_device this_device = {
+	.name = "bq27520-test",
+	.id = -1,
+	.dev.platform_data = NULL,
+};
+#endif
+
+static irqreturn_t soc_irqhandler(int irq, void *dev_id)
+{
+	int status = 0, temp = 0;
+
+	temp = if_notify_msm_charger(&status);
+	update_current_battery_status(status);
+	if (temp)
+		msm_charger_notify_event(NULL, CHG_BATT_STATUS_CHANGE);
+	return IRQ_HANDLED;
+}
+
+static struct regulator *vreg_bq27520;
+static int bq27520_power(bool enable, struct bq27520_device_info *di)
+{
+	int rc = 0, ret;
+	const struct bq27520_platform_data *platdata;
+
+	platdata = di->pdata;
+	if (enable) {
+		/* switch on Vreg_S3 */
+		rc = regulator_enable(vreg_bq27520);
+		if (rc < 0) {
+			dev_err(di->dev, "%s: vreg %s %s failed (%d)\n",
+				__func__, platdata->vreg_name, "enable", rc);
+			goto vreg_fail;
+		}
+
+		/* Battery gauge enable and switch on onchip 2.5V LDO */
+		rc = gpio_request(platdata->chip_en, "GAUGE_EN");
+		if (rc) {
+			dev_err(di->dev, "%s: fail to request gpio %d (%d)\n",
+				__func__, platdata->chip_en, rc);
+			goto vreg_fail;
+		}
+
+		gpio_direction_output(platdata->chip_en, 0);
+		gpio_set_value(platdata->chip_en, 1);
+		rc = gpio_request(platdata->soc_int, "GAUGE_SOC_INT");
+		if (rc) {
+			dev_err(di->dev, "%s: fail to request gpio %d (%d)\n",
+				__func__, platdata->soc_int, rc);
+			goto gpio_fail;
+		}
+		gpio_direction_input(platdata->soc_int);
+		di->irq = gpio_to_irq(platdata->soc_int);
+		rc = request_threaded_irq(di->irq, NULL, soc_irqhandler,
+				IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING,
+				"BQ27520_IRQ", di);
+		if (rc) {
+			dev_err(di->dev, "%s: fail to request irq %d (%d)\n",
+				__func__, platdata->soc_int, rc);
+			goto irqreq_fail;
+		} else {
+			disable_irq_nosync(di->irq);
+		}
+	} else {
+		free_irq(di->irq, di);
+		gpio_free(platdata->soc_int);
+		/* switch off on-chip 2.5V LDO and disable Battery gauge */
+		gpio_set_value(platdata->chip_en, 0);
+		gpio_free(platdata->chip_en);
+		/* switch off Vreg_S3 */
+		rc = regulator_disable(vreg_bq27520);
+		if (rc < 0) {
+			dev_err(di->dev, "%s: vreg %s %s failed (%d)\n",
+				__func__, platdata->vreg_name, "disable", rc);
+			goto vreg_fail;
+		}
+	}
+	return rc;
+
+irqreq_fail:
+	gpio_free(platdata->soc_int);
+gpio_fail:
+	gpio_set_value(platdata->chip_en, 0);
+	gpio_free(platdata->chip_en);
+vreg_fail:
+	ret = !enable ? regulator_enable(vreg_bq27520) :
+		regulator_disable(vreg_bq27520);
+	if (ret < 0) {
+		dev_err(di->dev, "%s: vreg %s %s failed (%d) in err path\n",
+			__func__, platdata->vreg_name,
+			!enable ? "enable" : "disable", ret);
+	}
+	return rc;
+}
+
+static int bq27520_dev_setup(bool enable, struct bq27520_device_info *di)
+{
+	int rc;
+	const struct bq27520_platform_data *platdata;
+
+	platdata = di->pdata;
+	if (enable) {
+		/* enable and set voltage Vreg_S3 */
+		vreg_bq27520 = regulator_get(NULL,
+				platdata->vreg_name);
+		if (IS_ERR(vreg_bq27520)) {
+			dev_err(di->dev, "%s: regulator get of %s\
+				failed (%ld)\n", __func__, platdata->vreg_name,
+				PTR_ERR(vreg_bq27520));
+			rc = PTR_ERR(vreg_bq27520);
+			goto vreg_get_fail;
+		}
+		rc = regulator_set_voltage(vreg_bq27520,
+			platdata->vreg_value, platdata->vreg_value);
+		if (rc) {
+			dev_err(di->dev, "%s: regulator_set_voltage(%s) failed\
+				 (%d)\n", __func__, platdata->vreg_name, rc);
+			goto vreg_get_fail;
+		}
+	} else {
+		regulator_put(vreg_bq27520);
+	}
+	return 0;
+
+vreg_get_fail:
+	regulator_put(vreg_bq27520);
+	return rc;
+}
+
+static int bq27520_battery_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	struct bq27520_device_info *di;
+	struct bq27520_access_methods *bus;
+	const struct bq27520_platform_data  *pdata;
+	int num, retval = 0;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	pdata = client->dev.platform_data;
+
+	/* Get new ID for the new battery device */
+	retval = idr_pre_get(&battery_id, GFP_KERNEL);
+	if (retval == 0)
+		return -ENOMEM;
+	mutex_lock(&battery_mutex);
+	retval = idr_get_new(&battery_id, client, &num);
+	mutex_unlock(&battery_mutex);
+	if (retval < 0)
+		return retval;
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
+	if (!di) {
+		dev_err(&client->dev, "failed to allocate device info data\n");
+		retval = -ENOMEM;
+		goto batt_failed_1;
+	}
+	di->id = num;
+	di->pdata = pdata;
+
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+	if (!bus) {
+		dev_err(&client->dev, "failed to allocate data\n");
+		retval = -ENOMEM;
+		goto batt_failed_2;
+	}
+
+	i2c_set_clientdata(client, di);
+	di->dev = &client->dev;
+	bus->read = &bq27520_read_i2c;
+	di->bus = bus;
+	di->client = client;
+
+#ifdef CONFIG_BQ27520_TEST_ENABLE
+	platform_set_drvdata(&this_device, di);
+	retval = platform_device_register(&this_device);
+	if (!retval) {
+		retval = sysfs_create_group(&this_device.dev.kobj,
+			 &fs_attr_group);
+		if (retval)
+			goto batt_failed_3;
+	} else
+		goto batt_failed_3;
+#endif
+
+	retval = bq27520_dev_setup(true, di);
+	if (retval) {
+		dev_err(&client->dev, "failed to setup ret = %d\n", retval);
+		goto batt_failed_3;
+	}
+
+	retval = bq27520_power(true, di);
+	if (retval) {
+		dev_err(&client->dev, "failed to powerup ret = %d\n", retval);
+		goto batt_failed_3;
+	}
+
+	spin_lock_init(&lock);
+
+	bq27520_di = di;
+	if (pdata->enable_dlog)
+		INIT_WORK(&di->counter, bq27520_coulomb_counter_work);
+
+	INIT_DELAYED_WORK(&current_battery_status.poller,
+			battery_status_poller);
+	INIT_DELAYED_WORK(&di->hw_config, bq27520_hw_config);
+	schedule_delayed_work(&di->hw_config, BQ27520_INIT_DELAY);
+
+	return 0;
+
+batt_failed_3:
+	kfree(bus);
+batt_failed_2:
+	kfree(di);
+batt_failed_1:
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, num);
+	mutex_unlock(&battery_mutex);
+
+	return retval;
+}
+
+static int bq27520_battery_remove(struct i2c_client *client)
+{
+	struct bq27520_device_info *di = i2c_get_clientdata(client);
+
+	if (di->pdata->enable_dlog) {
+		del_timer_sync(&timer);
+		cancel_work_sync(&di->counter);
+		bq27520_cntl_cmd(di, BQ27520_SUBCMD_DISABLE_DLOG);
+		udelay(66);
+	}
+
+	bq27520_cntl_cmd(di, BQ27520_SUBCMD_DISABLE_IT);
+	cancel_delayed_work_sync(&di->hw_config);
+	cancel_delayed_work_sync(&current_battery_status.poller);
+
+	bq27520_dev_setup(false, di);
+	bq27520_power(false, di);
+
+	kfree(di->bus);
+
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, di->id);
+	mutex_unlock(&battery_mutex);
+
+	kfree(di);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int bq27520_suspend(struct device *dev)
+{
+	struct bq27520_device_info *di = dev_get_drvdata(dev);
+
+	disable_irq_nosync(di->irq);
+	if (di->pdata->enable_dlog) {
+		del_timer_sync(&timer);
+		cancel_work_sync(&di->counter);
+	}
+
+	cancel_delayed_work_sync(&current_battery_status.poller);
+	return 0;
+}
+
+static int bq27520_resume(struct device *dev)
+{
+	struct bq27520_device_info *di = dev_get_drvdata(dev);
+
+	enable_irq(di->irq);
+	if (di->pdata->enable_dlog)
+		add_timer(&timer);
+
+	schedule_delayed_work(&current_battery_status.poller,
+				BQ27520_POLLING_STATUS);
+	return 0;
+}
+
+static const struct dev_pm_ops bq27520_pm_ops = {
+	.suspend = bq27520_suspend,
+	.resume = bq27520_resume,
+};
+#endif
+
+static const struct i2c_device_id bq27520_id[] = {
+	{ "bq27520", 1 },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, BQ27520_id);
+
+static struct i2c_driver bq27520_battery_driver = {
+	.driver = {
+		.name = "bq27520-battery",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &bq27520_pm_ops,
+#endif
+	},
+	.probe = bq27520_battery_probe,
+	.remove = bq27520_battery_remove,
+	.id_table = bq27520_id,
+};
+
+static void init_battery_status(void)
+{
+	spin_lock_init(&current_battery_status.lock);
+	current_battery_status.status[GET_BATTERY_STATUS] =
+			POWER_SUPPLY_STATUS_UNKNOWN;
+}
+
+static int __init bq27520_battery_init(void)
+{
+	int ret;
+
+	/* initialize current_battery_status, and register with msm-charger */
+	init_battery_status();
+	msm_battery_gauge_register(&bq27520_batt_gauge);
+
+	ret = i2c_add_driver(&bq27520_battery_driver);
+	if (ret)
+		printk(KERN_ERR "Unable to register driver ret = %d\n", ret);
+
+	return ret;
+}
+module_init(bq27520_battery_init);
+
+static void __exit bq27520_battery_exit(void)
+{
+	i2c_del_driver(&bq27520_battery_driver);
+	msm_battery_gauge_unregister(&bq27520_batt_gauge);
+}
+module_exit(bq27520_battery_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("BQ27520 battery monitor driver");
diff --git a/drivers/power/bq27541_fuelgauger.c b/drivers/power/bq27541_fuelgauger.c
new file mode 100644
index 0000000..516a861
--- /dev/null
+++ b/drivers/power/bq27541_fuelgauger.c
@@ -0,0 +1,623 @@
+/* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
+ * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/time.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/regulator/pmic8058-regulator.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/err.h>
+#include <linux/msm-charger.h>
+#include <linux/i2c/bq27520.h> /* use the same platform data as bq27520 */
+
+#define DRIVER_VERSION			"1.1.0"
+/* Bq27541 standard data commands */
+#define BQ27541_REG_CNTL		0x00
+#define BQ27541_REG_AR			0x02
+#define BQ27541_REG_ARTTE		0x04
+#define BQ27541_REG_TEMP		0x06
+#define BQ27541_REG_VOLT		0x08
+#define BQ27541_REG_FLAGS		0x0A
+#define BQ27541_REG_NAC			0x0C
+#define BQ27541_REG_FAC			0x0e
+#define BQ27541_REG_RM			0x10
+#define BQ27541_REG_FCC			0x12
+#define BQ27541_REG_AI			0x14
+#define BQ27541_REG_TTE			0x16
+#define BQ27541_REG_TTF			0x18
+#define BQ27541_REG_SI			0x1a
+#define BQ27541_REG_STTE		0x1c
+#define BQ27541_REG_MLI			0x1e
+#define BQ27541_REG_MLTTE		0x20
+#define BQ27541_REG_AE			0x22
+#define BQ27541_REG_AP			0x24
+#define BQ27541_REG_TTECP		0x26
+#define BQ27541_REG_SOH			0x28
+#define BQ27541_REG_SOC			0x2c
+#define BQ27541_REG_NIC			0x2e
+#define BQ27541_REG_ICR			0x30
+#define BQ27541_REG_LOGIDX		0x32
+#define BQ27541_REG_LOGBUF		0x34
+
+#define BQ27541_FLAG_DSC		BIT(0)
+#define BQ27541_FLAG_FC			BIT(9)
+
+#define BQ27541_CS_DLOGEN		BIT(15)
+#define BQ27541_CS_SS		    BIT(13)
+
+/* Control subcommands */
+#define BQ27541_SUBCMD_CTNL_STATUS  0x0000
+#define BQ27541_SUBCMD_DEVCIE_TYPE  0x0001
+#define BQ27541_SUBCMD_FW_VER  0x0002
+#define BQ27541_SUBCMD_HW_VER  0x0003
+#define BQ27541_SUBCMD_DF_CSUM  0x0004
+#define BQ27541_SUBCMD_PREV_MACW   0x0007
+#define BQ27541_SUBCMD_CHEM_ID   0x0008
+#define BQ27541_SUBCMD_BD_OFFSET   0x0009
+#define BQ27541_SUBCMD_INT_OFFSET  0x000a
+#define BQ27541_SUBCMD_CC_VER   0x000b
+#define BQ27541_SUBCMD_OCV  0x000c
+#define BQ27541_SUBCMD_BAT_INS   0x000d
+#define BQ27541_SUBCMD_BAT_REM   0x000e
+#define BQ27541_SUBCMD_SET_HIB   0x0011
+#define BQ27541_SUBCMD_CLR_HIB   0x0012
+#define BQ27541_SUBCMD_SET_SLP   0x0013
+#define BQ27541_SUBCMD_CLR_SLP   0x0014
+#define BQ27541_SUBCMD_FCT_RES   0x0015
+#define BQ27541_SUBCMD_ENABLE_DLOG  0x0018
+#define BQ27541_SUBCMD_DISABLE_DLOG 0x0019
+#define BQ27541_SUBCMD_SEALED   0x0020
+#define BQ27541_SUBCMD_ENABLE_IT    0x0021
+#define BQ27541_SUBCMD_DISABLE_IT   0x0023
+#define BQ27541_SUBCMD_CAL_MODE  0x0040
+#define BQ27541_SUBCMD_RESET   0x0041
+#define ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN   (-2731)
+#define BQ27541_INIT_DELAY   ((HZ)*1)
+
+/* If the system has several batteries we need a different name for each
+ * of them...
+ */
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
+struct bq27541_device_info;
+struct bq27541_access_methods {
+	int (*read)(u8 reg, int *rt_value, int b_single,
+		struct bq27541_device_info *di);
+};
+
+struct bq27541_device_info {
+	struct device			*dev;
+	int				id;
+	struct bq27541_access_methods	*bus;
+	struct i2c_client		*client;
+	struct work_struct		counter;
+	/* 300ms delay is needed after bq27541 is powered up
+	 * and before any successful I2C transaction
+	 */
+	struct  delayed_work		hw_config;
+};
+
+static int coulomb_counter;
+static spinlock_t lock; /* protect access to coulomb_counter */
+
+static int bq27541_i2c_txsubcmd(u8 reg, unsigned short subcmd,
+		struct bq27541_device_info *di);
+
+static int bq27541_read(u8 reg, int *rt_value, int b_single,
+			struct bq27541_device_info *di)
+{
+	return di->bus->read(reg, rt_value, b_single, di);
+}
+
+/*
+ * Return the battery temperature in tenths of degree Celsius
+ * Or < 0 if something fails.
+ */
+static int bq27541_battery_temperature(struct bq27541_device_info *di)
+{
+	int ret;
+	int temp = 0;
+
+	ret = bq27541_read(BQ27541_REG_TEMP, &temp, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error reading temperature\n");
+		return ret;
+	}
+
+	return temp + ZERO_DEGREE_CELSIUS_IN_TENTH_KELVIN;
+}
+
+/*
+ * Return the battery Voltage in milivolts
+ * Or < 0 if something fails.
+ */
+static int bq27541_battery_voltage(struct bq27541_device_info *di)
+{
+	int ret;
+	int volt = 0;
+
+	ret = bq27541_read(BQ27541_REG_VOLT, &volt, 0, di);
+	if (ret) {
+		dev_err(di->dev, "error reading voltage\n");
+		return ret;
+	}
+
+	return volt * 1000;
+}
+
+static void bq27541_cntl_cmd(struct bq27541_device_info *di,
+				int subcmd)
+{
+	bq27541_i2c_txsubcmd(BQ27541_REG_CNTL, subcmd, di);
+}
+
+/*
+ * i2c specific code
+ */
+static int bq27541_i2c_txsubcmd(u8 reg, unsigned short subcmd,
+		struct bq27541_device_info *di)
+{
+	struct i2c_msg msg;
+	unsigned char data[3];
+	int ret;
+
+	if (!di->client)
+		return -ENODEV;
+
+	memset(data, 0, sizeof(data));
+	data[0] = reg;
+	data[1] = subcmd & 0x00FF;
+	data[2] = (subcmd & 0xFF00) >> 8;
+
+	msg.addr = di->client->addr;
+	msg.flags = 0;
+	msg.len = 3;
+	msg.buf = data;
+
+	ret = i2c_transfer(di->client->adapter, &msg, 1);
+	if (ret < 0)
+		return -EIO;
+
+	return 0;
+}
+
+static int bq27541_chip_config(struct bq27541_device_info *di)
+{
+	int flags = 0, ret = 0;
+
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_CTNL_STATUS);
+	udelay(66);
+	ret = bq27541_read(BQ27541_REG_CNTL, &flags, 0, di);
+	if (ret < 0) {
+		dev_err(di->dev, "error reading register %02x ret = %d\n",
+			 BQ27541_REG_CNTL, ret);
+		return ret;
+	}
+	udelay(66);
+
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_ENABLE_IT);
+	udelay(66);
+
+	if (!(flags & BQ27541_CS_DLOGEN)) {
+		bq27541_cntl_cmd(di, BQ27541_SUBCMD_ENABLE_DLOG);
+		udelay(66);
+	}
+
+	return 0;
+}
+
+static void bq27541_coulomb_counter_work(struct work_struct *work)
+{
+	int value = 0, temp = 0, index = 0, ret = 0;
+	struct bq27541_device_info *di;
+	unsigned long flags;
+	int count = 0;
+
+	di = container_of(work, struct bq27541_device_info, counter);
+
+	/* retrieve 30 values from FIFO of coulomb data logging buffer
+	 * and average over time
+	 */
+	do {
+		ret = bq27541_read(BQ27541_REG_LOGBUF, &temp, 0, di);
+		if (ret < 0)
+			break;
+		if (temp != 0x7FFF) {
+			++count;
+			value += temp;
+		}
+		/* delay 66uS, waiting time between continuous reading
+		 * results
+		 */
+		udelay(66);
+		ret = bq27541_read(BQ27541_REG_LOGIDX, &index, 0, di);
+		if (ret < 0)
+			break;
+		udelay(66);
+	} while (index != 0 || temp != 0x7FFF);
+
+	if (ret < 0) {
+		dev_err(di->dev, "Error reading datalog register\n");
+		return;
+	}
+
+	if (count) {
+		spin_lock_irqsave(&lock, flags);
+		coulomb_counter = value/count;
+		spin_unlock_irqrestore(&lock, flags);
+	}
+}
+
+struct bq27541_device_info *bq27541_di;
+
+static int bq27541_get_battery_mvolts(void)
+{
+	return bq27541_battery_voltage(bq27541_di);
+}
+
+static int bq27541_get_battery_temperature(void)
+{
+	return bq27541_battery_temperature(bq27541_di);
+}
+static int bq27541_is_battery_present(void)
+{
+	return 1;
+}
+static int bq27541_is_battery_temp_within_range(void)
+{
+	return 1;
+}
+static int bq27541_is_battery_id_valid(void)
+{
+	return 1;
+}
+
+static struct msm_battery_gauge bq27541_batt_gauge = {
+	.get_battery_mvolts		= bq27541_get_battery_mvolts,
+	.get_battery_temperature	= bq27541_get_battery_temperature,
+	.is_battery_present		= bq27541_is_battery_present,
+	.is_battery_temp_within_range	= bq27541_is_battery_temp_within_range,
+	.is_battery_id_valid		= bq27541_is_battery_id_valid,
+};
+static void bq27541_hw_config(struct work_struct *work)
+{
+	int ret = 0, flags = 0, type = 0, fw_ver = 0;
+	struct bq27541_device_info *di;
+
+	di  = container_of(work, struct bq27541_device_info, hw_config.work);
+	ret = bq27541_chip_config(di);
+	if (ret) {
+		dev_err(di->dev, "Failed to config Bq27541\n");
+		return;
+	}
+	msm_battery_gauge_register(&bq27541_batt_gauge);
+
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_CTNL_STATUS);
+	udelay(66);
+	bq27541_read(BQ27541_REG_CNTL, &flags, 0, di);
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_DEVCIE_TYPE);
+	udelay(66);
+	bq27541_read(BQ27541_REG_CNTL, &type, 0, di);
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_FW_VER);
+	udelay(66);
+	bq27541_read(BQ27541_REG_CNTL, &fw_ver, 0, di);
+
+	dev_info(di->dev, "DEVICE_TYPE is 0x%02X, FIRMWARE_VERSION is 0x%02X\n",
+			type, fw_ver);
+	dev_info(di->dev, "Complete bq27541 configuration 0x%02X\n", flags);
+}
+
+static int bq27541_read_i2c(u8 reg, int *rt_value, int b_single,
+			struct bq27541_device_info *di)
+{
+	struct i2c_client *client = di->client;
+	struct i2c_msg msg[1];
+	unsigned char data[2];
+	int err;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	msg->addr = client->addr;
+	msg->flags = 0;
+	msg->len = 1;
+	msg->buf = data;
+
+	data[0] = reg;
+	err = i2c_transfer(client->adapter, msg, 1);
+
+	if (err >= 0) {
+		if (!b_single)
+			msg->len = 2;
+		else
+			msg->len = 1;
+
+		msg->flags = I2C_M_RD;
+		err = i2c_transfer(client->adapter, msg, 1);
+		if (err >= 0) {
+			if (!b_single)
+				*rt_value = get_unaligned_le16(data);
+			else
+				*rt_value = data[0];
+
+			return 0;
+		}
+	}
+	return err;
+}
+
+#ifdef CONFIG_BQ27541_TEST_ENABLE
+static int reg;
+static int subcmd;
+static ssize_t bq27541_read_stdcmd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int ret;
+	int temp = 0;
+	struct platform_device *client;
+	struct bq27541_device_info *di;
+
+	client = to_platform_device(dev);
+	di = platform_get_drvdata(client);
+
+	if (reg <= BQ27541_REG_ICR && reg > 0x00) {
+		ret = bq27541_read(reg, &temp, 0, di);
+		if (ret)
+			ret = snprintf(buf, PAGE_SIZE, "Read Error!\n");
+		else
+			ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp);
+	} else
+		ret = snprintf(buf, PAGE_SIZE, "Register Error!\n");
+
+	return ret;
+}
+
+static ssize_t bq27541_write_stdcmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int cmd;
+
+	sscanf(buf, "%x", &cmd);
+	reg = cmd;
+	return ret;
+}
+
+static ssize_t bq27541_read_subcmd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int ret;
+	int temp = 0;
+	struct platform_device *client;
+	struct bq27541_device_info *di;
+
+	client = to_platform_device(dev);
+	di = platform_get_drvdata(client);
+
+	if (subcmd == BQ27541_SUBCMD_DEVCIE_TYPE ||
+		 subcmd == BQ27541_SUBCMD_FW_VER ||
+		 subcmd == BQ27541_SUBCMD_HW_VER ||
+		 subcmd == BQ27541_SUBCMD_CHEM_ID) {
+
+		bq27541_cntl_cmd(di, subcmd); /* Retrieve Chip status */
+		udelay(66);
+		ret = bq27541_read(BQ27541_REG_CNTL, &temp, 0, di);
+
+		if (ret)
+			ret = snprintf(buf, PAGE_SIZE, "Read Error!\n");
+		else
+			ret = snprintf(buf, PAGE_SIZE, "0x%02x\n", temp);
+	} else
+		ret = snprintf(buf, PAGE_SIZE, "Register Error!\n");
+
+	return ret;
+}
+
+static ssize_t bq27541_write_subcmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int cmd;
+
+	sscanf(buf, "%x", &cmd);
+	subcmd = cmd;
+	return ret;
+}
+
+static DEVICE_ATTR(std_cmd, S_IRUGO|S_IWUGO, bq27541_read_stdcmd,
+	bq27541_write_stdcmd);
+static DEVICE_ATTR(sub_cmd, S_IRUGO|S_IWUGO, bq27541_read_subcmd,
+	bq27541_write_subcmd);
+static struct attribute *fs_attrs[] = {
+	&dev_attr_std_cmd.attr,
+	&dev_attr_sub_cmd.attr,
+	NULL,
+};
+static struct attribute_group fs_attr_group = {
+	.attrs = fs_attrs,
+};
+
+static struct platform_device this_device = {
+	.name			= "bq27541-test",
+	.id			= -1,
+	.dev.platform_data	= NULL,
+};
+#endif
+
+static int bq27541_battery_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	char *name;
+	struct bq27541_device_info *di;
+	struct bq27541_access_methods *bus;
+	int num;
+	int retval = 0;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	/* Get new ID for the new battery device */
+	retval = idr_pre_get(&battery_id, GFP_KERNEL);
+	if (retval == 0)
+		return -ENOMEM;
+	mutex_lock(&battery_mutex);
+	retval = idr_get_new(&battery_id, client, &num);
+	mutex_unlock(&battery_mutex);
+	if (retval < 0)
+		return retval;
+
+	name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
+	if (!name) {
+		dev_err(&client->dev, "failed to allocate device name\n");
+		retval = -ENOMEM;
+		goto batt_failed_1;
+	}
+
+	di = kzalloc(sizeof(*di), GFP_KERNEL);
+	if (!di) {
+		dev_err(&client->dev, "failed to allocate device info data\n");
+		retval = -ENOMEM;
+		goto batt_failed_2;
+	}
+	di->id = num;
+
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+	if (!bus) {
+		dev_err(&client->dev, "failed to allocate access method "
+					"data\n");
+		retval = -ENOMEM;
+		goto batt_failed_3;
+	}
+
+	i2c_set_clientdata(client, di);
+	di->dev = &client->dev;
+	bus->read = &bq27541_read_i2c;
+	di->bus = bus;
+	di->client = client;
+
+#ifdef CONFIG_BQ27541_TEST_ENABLE
+	platform_set_drvdata(&this_device, di);
+	retval = platform_device_register(&this_device);
+	if (!retval) {
+		retval = sysfs_create_group(&this_device.dev.kobj,
+			 &fs_attr_group);
+		if (retval)
+			goto batt_failed_4;
+	} else
+		goto batt_failed_4;
+#endif
+
+	if (retval) {
+		dev_err(&client->dev, "failed to setup bq27541\n");
+		goto batt_failed_4;
+	}
+
+	if (retval) {
+		dev_err(&client->dev, "failed to powerup bq27541\n");
+		goto batt_failed_4;
+	}
+
+	spin_lock_init(&lock);
+
+	bq27541_di = di;
+	INIT_WORK(&di->counter, bq27541_coulomb_counter_work);
+	INIT_DELAYED_WORK(&di->hw_config, bq27541_hw_config);
+	schedule_delayed_work(&di->hw_config, BQ27541_INIT_DELAY);
+	return 0;
+
+batt_failed_4:
+	kfree(bus);
+batt_failed_3:
+	kfree(di);
+batt_failed_2:
+	kfree(name);
+batt_failed_1:
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, num);
+	mutex_unlock(&battery_mutex);
+
+	return retval;
+}
+
+static int bq27541_battery_remove(struct i2c_client *client)
+{
+	struct bq27541_device_info *di = i2c_get_clientdata(client);
+
+	msm_battery_gauge_unregister(&bq27541_batt_gauge);
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_DISABLE_DLOG);
+	udelay(66);
+	bq27541_cntl_cmd(di, BQ27541_SUBCMD_DISABLE_IT);
+	cancel_delayed_work_sync(&di->hw_config);
+
+	kfree(di->bus);
+
+	mutex_lock(&battery_mutex);
+	idr_remove(&battery_id, di->id);
+	mutex_unlock(&battery_mutex);
+
+	kfree(di);
+	return 0;
+}
+
+static const struct i2c_device_id bq27541_id[] = {
+	{ "bq27541", 1 },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, BQ27541_id);
+
+static struct i2c_driver bq27541_battery_driver = {
+	.driver		= {
+			.name = "bq27541-battery",
+	},
+	.probe		= bq27541_battery_probe,
+	.remove		= bq27541_battery_remove,
+	.id_table	= bq27541_id,
+};
+
+static int __init bq27541_battery_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&bq27541_battery_driver);
+	if (ret)
+		printk(KERN_ERR "Unable to register BQ27541 driver\n");
+
+	return ret;
+}
+module_init(bq27541_battery_init);
+
+static void __exit bq27541_battery_exit(void)
+{
+	i2c_del_driver(&bq27541_battery_driver);
+}
+module_exit(bq27541_battery_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("BQ27541 battery monitor driver");
diff --git a/drivers/power/isl9519q.c b/drivers/power/isl9519q.c
new file mode 100644
index 0000000..4954a45
--- /dev/null
+++ b/drivers/power/isl9519q.c
@@ -0,0 +1,517 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/msm-charger.h>
+#include <linux/slab.h>
+#include <linux/i2c/isl9519.h>
+#include <linux/msm_adc.h>
+
+#define CHG_CURRENT_REG		0x14
+#define MAX_SYS_VOLTAGE_REG	0x15
+#define CONTROL_REG		0x3D
+#define MIN_SYS_VOLTAGE_REG	0x3E
+#define INPUT_CURRENT_REG	0x3F
+#define MANUFACTURER_ID_REG	0xFE
+#define DEVICE_ID_REG		0xFF
+
+#define TRCKL_CHG_STATUS_BIT	0x80
+
+#define ISL9519_CHG_PERIOD	((HZ) * 150)
+
+struct isl9519q_struct {
+	struct i2c_client		*client;
+	struct delayed_work		charge_work;
+	int				present;
+	int				batt_present;
+	bool				charging;
+	int				chgcurrent;
+	int				term_current;
+	int				input_current;
+	int				max_system_voltage;
+	int				min_system_voltage;
+	int				valid_n_gpio;
+	struct dentry			*dent;
+	struct msm_hardware_charger	adapter_hw_chg;
+};
+
+static int isl9519q_read_reg(struct i2c_client *client, int reg,
+	u16 *val)
+{
+	int ret;
+	struct isl9519q_struct *isl_chg;
+
+	isl_chg = i2c_get_clientdata(client);
+	ret = i2c_smbus_read_word_data(isl_chg->client, reg);
+
+	if (ret < 0) {
+		dev_err(&isl_chg->client->dev,
+			"i2c read fail: can't read from %02x: %d\n", reg, ret);
+		return -EAGAIN;
+	} else
+		*val = ret;
+
+	return 0;
+}
+
+static int isl9519q_write_reg(struct i2c_client *client, int reg,
+	u16 val)
+{
+	int ret;
+	struct isl9519q_struct *isl_chg;
+
+	isl_chg = i2c_get_clientdata(client);
+	ret = i2c_smbus_write_word_data(isl_chg->client, reg, val);
+
+	if (ret < 0) {
+		dev_err(&isl_chg->client->dev,
+			"i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int isl_read_adc(int channel, int *mv_reading)
+{
+	int ret;
+	void *h;
+	struct adc_chan_result adc_chan_result;
+	struct completion  conv_complete_evt;
+
+	pr_debug("%s: called for %d\n", __func__, channel);
+	ret = adc_channel_open(channel, &h);
+	if (ret) {
+		pr_err("%s: couldnt open channel %d ret=%d\n",
+					__func__, channel, ret);
+		goto out;
+	}
+	init_completion(&conv_complete_evt);
+	ret = adc_channel_request_conv(h, &conv_complete_evt);
+	if (ret) {
+		pr_err("%s: couldnt request conv channel %d ret=%d\n",
+						__func__, channel, ret);
+		goto out;
+	}
+	ret = wait_for_completion_interruptible(&conv_complete_evt);
+	if (ret) {
+		pr_err("%s: wait interrupted channel %d ret=%d\n",
+						__func__, channel, ret);
+		goto out;
+	}
+	ret = adc_channel_read_result(h, &adc_chan_result);
+	if (ret) {
+		pr_err("%s: couldnt read result channel %d ret=%d\n",
+						__func__, channel, ret);
+		goto out;
+	}
+	ret = adc_channel_close(h);
+	if (ret)
+		pr_err("%s: couldnt close channel %d ret=%d\n",
+					__func__, channel, ret);
+	if (mv_reading)
+		*mv_reading = (int)adc_chan_result.measurement;
+
+	pr_debug("%s: done for %d\n", __func__, channel);
+	return adc_chan_result.physical;
+out:
+	*mv_reading = 0;
+	pr_debug("%s: done with error for %d\n", __func__, channel);
+	return -EINVAL;
+
+}
+
+static void isl9519q_charge(struct work_struct *isl9519_work)
+{
+	u16 temp;
+	int ret;
+	struct isl9519q_struct *isl_chg;
+	int isl_charger_current;
+	int mv_reading;
+
+	isl_chg = container_of(isl9519_work, struct isl9519q_struct,
+			charge_work.work);
+
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+
+	if (isl_chg->charging) {
+		isl_charger_current = isl_read_adc(CHANNEL_ADC_BATT_AMON,
+								&mv_reading);
+		dev_dbg(&isl_chg->client->dev, "%s mv_reading=%d\n",
+				__func__, mv_reading);
+		dev_dbg(&isl_chg->client->dev, "%s isl_charger_current=%d\n",
+				__func__, isl_charger_current);
+		if (isl_charger_current >= 0
+			&& isl_charger_current <= isl_chg->term_current) {
+			msm_charger_notify_event(
+					&isl_chg->adapter_hw_chg,
+					CHG_DONE_EVENT);
+		}
+		isl9519q_write_reg(isl_chg->client, CHG_CURRENT_REG,
+				isl_chg->chgcurrent);
+		ret = isl9519q_read_reg(isl_chg->client, CONTROL_REG, &temp);
+		if (!ret) {
+			if (!(temp & TRCKL_CHG_STATUS_BIT))
+				msm_charger_notify_event(
+						&isl_chg->adapter_hw_chg,
+						CHG_BATT_BEGIN_FAST_CHARGING);
+		} else {
+			dev_err(&isl_chg->client->dev,
+				"%s couldnt read cntrl reg\n", __func__);
+		}
+		schedule_delayed_work(&isl_chg->charge_work,
+						ISL9519_CHG_PERIOD);
+	}
+}
+
+static int isl9519q_start_charging(struct msm_hardware_charger *hw_chg,
+		int chg_voltage, int chg_current)
+{
+	struct isl9519q_struct *isl_chg;
+	int ret = 0;
+
+	isl_chg = container_of(hw_chg, struct isl9519q_struct, adapter_hw_chg);
+	if (isl_chg->charging)
+		/* we are already charging */
+		return 0;
+
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+
+	ret = isl9519q_write_reg(isl_chg->client, CHG_CURRENT_REG,
+						isl_chg->chgcurrent);
+	if (ret) {
+		dev_err(&isl_chg->client->dev,
+			"%s coulnt write to current_reg\n", __func__);
+		goto out;
+	}
+
+	dev_dbg(&isl_chg->client->dev, "%s starting timed work\n",
+							__func__);
+	schedule_delayed_work(&isl_chg->charge_work,
+						ISL9519_CHG_PERIOD);
+	isl_chg->charging = true;
+
+out:
+	return ret;
+}
+
+static int isl9519q_stop_charging(struct msm_hardware_charger *hw_chg)
+{
+	struct isl9519q_struct *isl_chg;
+	int ret = 0;
+
+	isl_chg = container_of(hw_chg, struct isl9519q_struct, adapter_hw_chg);
+	if (!(isl_chg->charging))
+		/* we arent charging */
+		return 0;
+
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+
+	ret = isl9519q_write_reg(isl_chg->client, CHG_CURRENT_REG, 0);
+	if (ret) {
+		dev_err(&isl_chg->client->dev,
+			"%s coulnt write to current_reg\n", __func__);
+		goto out;
+	}
+
+	isl_chg->charging = false;
+	cancel_delayed_work(&isl_chg->charge_work);
+out:
+	return ret;
+}
+
+static int isl9519q_charging_switched(struct msm_hardware_charger *hw_chg)
+{
+	struct isl9519q_struct *isl_chg;
+
+	isl_chg = container_of(hw_chg, struct isl9519q_struct, adapter_hw_chg);
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+	return 0;
+}
+
+static irqreturn_t isl_valid_handler(int irq, void *dev_id)
+{
+	int val;
+	struct isl9519q_struct *isl_chg;
+	struct i2c_client *client = dev_id;
+
+	isl_chg = i2c_get_clientdata(client);
+	val = gpio_get_value_cansleep(isl_chg->valid_n_gpio);
+	if (val < 0) {
+		dev_err(&isl_chg->client->dev,
+			"%s gpio_get_value failed for %d ret=%d\n", __func__,
+			isl_chg->valid_n_gpio, val);
+		goto err;
+	}
+	dev_dbg(&isl_chg->client->dev, "%s val=%d\n", __func__, val);
+
+	if (val) {
+		if (isl_chg->present == 1) {
+			msm_charger_notify_event(&isl_chg->adapter_hw_chg,
+						 CHG_REMOVED_EVENT);
+			isl_chg->present = 0;
+		}
+	} else {
+		if (isl_chg->present == 0) {
+			msm_charger_notify_event(&isl_chg->adapter_hw_chg,
+						 CHG_INSERTED_EVENT);
+			isl_chg->present = 1;
+		}
+	}
+err:
+	return IRQ_HANDLED;
+}
+
+#define MAX_VOLTAGE_REG_MASK  0x3FF0
+#define MIN_VOLTAGE_REG_MASK  0x3F00
+#define DEFAULT_MAX_VOLTAGE_REG_VALUE	0x1070
+#define DEFAULT_MIN_VOLTAGE_REG_VALUE	0x0D00
+
+static int __devinit isl9519q_probe(struct i2c_client *client,
+				    const struct i2c_device_id *id)
+{
+	struct isl_platform_data *pdata;
+	struct isl9519q_struct *isl_chg;
+	int ret;
+
+	ret = 0;
+	pdata = client->dev.platform_data;
+
+	if (pdata == NULL) {
+		dev_err(&client->dev, "%s no platform data\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				I2C_FUNC_SMBUS_WORD_DATA)) {
+		ret = -EIO;
+		goto out;
+	}
+
+	isl_chg = kzalloc(sizeof(*isl_chg), GFP_KERNEL);
+	if (!isl_chg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	INIT_DELAYED_WORK(&isl_chg->charge_work, isl9519q_charge);
+	isl_chg->client = client;
+	isl_chg->chgcurrent = pdata->chgcurrent;
+	isl_chg->term_current = pdata->term_current;
+	isl_chg->input_current = pdata->input_current;
+	isl_chg->max_system_voltage = pdata->max_system_voltage;
+	isl_chg->min_system_voltage = pdata->min_system_voltage;
+	isl_chg->valid_n_gpio = pdata->valid_n_gpio;
+
+	/* h/w ignores lower 7 bits of charging current and input current */
+	isl_chg->chgcurrent &= ~0x7F;
+	isl_chg->input_current &= ~0x7F;
+
+	isl_chg->adapter_hw_chg.type = CHG_TYPE_AC;
+	isl_chg->adapter_hw_chg.rating = 2;
+	isl_chg->adapter_hw_chg.name = "isl-adapter";
+	isl_chg->adapter_hw_chg.start_charging = isl9519q_start_charging;
+	isl_chg->adapter_hw_chg.stop_charging = isl9519q_stop_charging;
+	isl_chg->adapter_hw_chg.charging_switched = isl9519q_charging_switched;
+
+	if (pdata->chg_detection_config) {
+		ret = pdata->chg_detection_config();
+		if (ret) {
+			dev_err(&client->dev, "%s valid config failed ret=%d\n",
+				__func__, ret);
+			goto free_isl_chg;
+		}
+	}
+
+	ret = gpio_request(pdata->valid_n_gpio, "isl_charger_valid");
+	if (ret) {
+		dev_err(&client->dev, "%s gpio_request failed for %d ret=%d\n",
+			__func__, pdata->valid_n_gpio, ret);
+		goto free_isl_chg;
+	}
+
+	i2c_set_clientdata(client, isl_chg);
+
+	ret = msm_charger_register(&isl_chg->adapter_hw_chg);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s msm_charger_register failed for ret =%d\n",
+			__func__, ret);
+		goto free_gpio;
+	}
+
+	ret = request_threaded_irq(client->irq, NULL,
+				   isl_valid_handler,
+				   IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				   "isl_charger_valid", client);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s request_threaded_irq failed for %d ret =%d\n",
+			__func__, client->irq, ret);
+		goto unregister;
+	}
+	irq_set_irq_wake(client->irq, 1);
+
+	isl_chg->max_system_voltage &= MAX_VOLTAGE_REG_MASK;
+	isl_chg->min_system_voltage &= MIN_VOLTAGE_REG_MASK;
+	if (isl_chg->max_system_voltage == 0)
+		isl_chg->max_system_voltage = DEFAULT_MAX_VOLTAGE_REG_VALUE;
+	if (isl_chg->min_system_voltage == 0)
+		isl_chg->min_system_voltage = DEFAULT_MIN_VOLTAGE_REG_VALUE;
+
+	ret = isl9519q_write_reg(isl_chg->client, MAX_SYS_VOLTAGE_REG,
+			isl_chg->max_system_voltage);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s couldnt write to MAX_SYS_VOLTAGE_REG ret=%d\n",
+			__func__, ret);
+		goto free_irq;
+	}
+
+	ret = isl9519q_write_reg(isl_chg->client, MIN_SYS_VOLTAGE_REG,
+			isl_chg->min_system_voltage);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s couldnt write to MIN_SYS_VOLTAGE_REG ret=%d\n",
+			__func__, ret);
+		goto free_irq;
+	}
+
+	if (isl_chg->input_current) {
+		ret = isl9519q_write_reg(isl_chg->client,
+				INPUT_CURRENT_REG,
+				isl_chg->input_current);
+		if (ret) {
+			dev_err(&client->dev,
+				"%s couldnt write INPUT_CURRENT_REG ret=%d\n",
+				__func__, ret);
+			goto free_irq;
+		}
+	}
+
+	ret = gpio_get_value_cansleep(isl_chg->valid_n_gpio);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"%s gpio_get_value failed for %d ret=%d\n", __func__,
+			pdata->valid_n_gpio, ret);
+		/* assume absent */
+		ret = 1;
+	}
+	if (!ret) {
+		msm_charger_notify_event(&isl_chg->adapter_hw_chg,
+				CHG_INSERTED_EVENT);
+		isl_chg->present = 1;
+	}
+
+	pr_debug("%s OK chg_present=%d\n", __func__, isl_chg->present);
+	return 0;
+
+free_irq:
+	free_irq(client->irq, NULL);
+unregister:
+	msm_charger_register(&isl_chg->adapter_hw_chg);
+free_gpio:
+	gpio_free(pdata->valid_n_gpio);
+free_isl_chg:
+	kfree(isl_chg);
+out:
+	return ret;
+}
+
+static int __devexit isl9519q_remove(struct i2c_client *client)
+{
+	struct isl_platform_data *pdata;
+	struct isl9519q_struct *isl_chg = i2c_get_clientdata(client);
+
+	pdata = client->dev.platform_data;
+	gpio_free(pdata->valid_n_gpio);
+	free_irq(client->irq, client);
+	cancel_delayed_work_sync(&isl_chg->charge_work);
+	msm_charger_notify_event(&isl_chg->adapter_hw_chg, CHG_REMOVED_EVENT);
+	msm_charger_unregister(&isl_chg->adapter_hw_chg);
+	return 0;
+}
+
+static const struct i2c_device_id isl9519q_id[] = {
+	{"isl9519q", 0},
+	{},
+};
+
+#ifdef CONFIG_PM
+static int isl9519q_suspend(struct device *dev)
+{
+	struct isl9519q_struct *isl_chg = dev_get_drvdata(dev);
+
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+	/*
+	 * do not suspend while we are charging
+	 * because we need to periodically update the register
+	 * for charging to proceed
+	 */
+	if (isl_chg->charging)
+		return -EBUSY;
+	return 0;
+}
+
+static int isl9519q_resume(struct device *dev)
+{
+	struct isl9519q_struct *isl_chg = dev_get_drvdata(dev);
+
+	dev_dbg(&isl_chg->client->dev, "%s\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops isl9519q_pm_ops = {
+	.suspend = isl9519q_suspend,
+	.resume = isl9519q_resume,
+};
+#endif
+
+static struct i2c_driver isl9519q_driver = {
+	.driver = {
+		   .name = "isl9519q",
+		   .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		   .pm = &isl9519q_pm_ops,
+#endif
+	},
+	.probe = isl9519q_probe,
+	.remove = __devexit_p(isl9519q_remove),
+	.id_table = isl9519q_id,
+};
+
+static int __init isl9519q_init(void)
+{
+	return i2c_add_driver(&isl9519q_driver);
+}
+
+module_init(isl9519q_init);
+
+static void __exit isl9519q_exit(void)
+{
+	return i2c_del_driver(&isl9519q_driver);
+}
+
+module_exit(isl9519q_exit);
+
+MODULE_AUTHOR("Abhijeet Dharmapurikar <adharmap@codeaurora.org>");
+MODULE_DESCRIPTION("Driver for ISL9519Q Charger chip");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/msm_battery.c b/drivers/power/msm_battery.c
new file mode 100644
index 0000000..464a1b5
--- /dev/null
+++ b/drivers/power/msm_battery.c
@@ -0,0 +1,1592 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * this needs to be before <linux/kernel.h> is loaded,
+ * and <linux/sched.h> loads <linux/kernel.h>
+ */
+#define DEBUG  1
+
+#include <linux/slab.h>
+#include <linux/earlysuspend.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+
+#include <mach/msm_rpcrouter.h>
+#include <mach/msm_battery.h>
+
+#define BATTERY_RPC_PROG	0x30000089
+#define BATTERY_RPC_VER_1_1	0x00010001
+#define BATTERY_RPC_VER_2_1	0x00020001
+#define BATTERY_RPC_VER_4_1     0x00040001
+#define BATTERY_RPC_VER_5_1     0x00050001
+
+#define BATTERY_RPC_CB_PROG	(BATTERY_RPC_PROG | 0x01000000)
+
+#define CHG_RPC_PROG		0x3000001a
+#define CHG_RPC_VER_1_1		0x00010001
+#define CHG_RPC_VER_1_3		0x00010003
+#define CHG_RPC_VER_2_2		0x00020002
+#define CHG_RPC_VER_3_1         0x00030001
+#define CHG_RPC_VER_4_1         0x00040001
+
+#define BATTERY_REGISTER_PROC				2
+#define BATTERY_MODIFY_CLIENT_PROC			4
+#define BATTERY_DEREGISTER_CLIENT_PROC			5
+#define BATTERY_READ_MV_PROC				12
+#define BATTERY_ENABLE_DISABLE_FILTER_PROC		14
+
+#define VBATT_FILTER			2
+
+#define BATTERY_CB_TYPE_PROC		1
+#define BATTERY_CB_ID_ALL_ACTIV		1
+#define BATTERY_CB_ID_LOW_VOL		2
+
+#define BATTERY_LOW		3200
+#define BATTERY_HIGH		4300
+
+#define ONCRPC_CHG_GET_GENERAL_STATUS_PROC	12
+#define ONCRPC_CHARGER_API_VERSIONS_PROC	0xffffffff
+
+#define BATT_RPC_TIMEOUT    5000	/* 5 sec */
+
+#define INVALID_BATT_HANDLE    -1
+
+#define RPC_TYPE_REQ     0
+#define RPC_TYPE_REPLY   1
+#define RPC_REQ_REPLY_COMMON_HEADER_SIZE   (3 * sizeof(uint32_t))
+
+
+#if DEBUG
+#define DBG_LIMIT(x...) do {if (printk_ratelimit()) pr_debug(x); } while (0)
+#else
+#define DBG_LIMIT(x...) do {} while (0)
+#endif
+
+enum {
+	BATTERY_REGISTRATION_SUCCESSFUL = 0,
+	BATTERY_DEREGISTRATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL,
+	BATTERY_MODIFICATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL,
+	BATTERY_INTERROGATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL,
+	BATTERY_CLIENT_TABLE_FULL = 1,
+	BATTERY_REG_PARAMS_WRONG = 2,
+	BATTERY_DEREGISTRATION_FAILED = 4,
+	BATTERY_MODIFICATION_FAILED = 8,
+	BATTERY_INTERROGATION_FAILED = 16,
+	/* Client's filter could not be set because perhaps it does not exist */
+	BATTERY_SET_FILTER_FAILED         = 32,
+	/* Client's could not be found for enabling or disabling the individual
+	 * client */
+	BATTERY_ENABLE_DISABLE_INDIVIDUAL_CLIENT_FAILED  = 64,
+	BATTERY_LAST_ERROR = 128,
+};
+
+enum {
+	BATTERY_VOLTAGE_UP = 0,
+	BATTERY_VOLTAGE_DOWN,
+	BATTERY_VOLTAGE_ABOVE_THIS_LEVEL,
+	BATTERY_VOLTAGE_BELOW_THIS_LEVEL,
+	BATTERY_VOLTAGE_LEVEL,
+	BATTERY_ALL_ACTIVITY,
+	VBATT_CHG_EVENTS,
+	BATTERY_VOLTAGE_UNKNOWN,
+};
+
+/*
+ * This enum contains defintions of the charger hardware status
+ */
+enum chg_charger_status_type {
+	/* The charger is good      */
+	CHARGER_STATUS_GOOD,
+	/* The charger is bad       */
+	CHARGER_STATUS_BAD,
+	/* The charger is weak      */
+	CHARGER_STATUS_WEAK,
+	/* Invalid charger status.  */
+	CHARGER_STATUS_INVALID
+};
+
+/*
+ *This enum contains defintions of the charger hardware type
+ */
+enum chg_charger_hardware_type {
+	/* The charger is removed                 */
+	CHARGER_TYPE_NONE,
+	/* The charger is a regular wall charger   */
+	CHARGER_TYPE_WALL,
+	/* The charger is a PC USB                 */
+	CHARGER_TYPE_USB_PC,
+	/* The charger is a wall USB charger       */
+	CHARGER_TYPE_USB_WALL,
+	/* The charger is a USB carkit             */
+	CHARGER_TYPE_USB_CARKIT,
+	/* Invalid charger hardware status.        */
+	CHARGER_TYPE_INVALID
+};
+
+/*
+ *  This enum contains defintions of the battery status
+ */
+enum chg_battery_status_type {
+	/* The battery is good        */
+	BATTERY_STATUS_GOOD,
+	/* The battery is cold/hot    */
+	BATTERY_STATUS_BAD_TEMP,
+	/* The battery is bad         */
+	BATTERY_STATUS_BAD,
+	/* The battery is removed     */
+	BATTERY_STATUS_REMOVED,		/* on v2.2 only */
+	BATTERY_STATUS_INVALID_v1 = BATTERY_STATUS_REMOVED,
+	/* Invalid battery status.    */
+	BATTERY_STATUS_INVALID
+};
+
+/*
+ *This enum contains defintions of the battery voltage level
+ */
+enum chg_battery_level_type {
+	/* The battery voltage is dead/very low (less than 3.2V) */
+	BATTERY_LEVEL_DEAD,
+	/* The battery voltage is weak/low (between 3.2V and 3.4V) */
+	BATTERY_LEVEL_WEAK,
+	/* The battery voltage is good/normal(between 3.4V and 4.2V) */
+	BATTERY_LEVEL_GOOD,
+	/* The battery voltage is up to full (close to 4.2V) */
+	BATTERY_LEVEL_FULL,
+	/* Invalid battery voltage level. */
+	BATTERY_LEVEL_INVALID
+};
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+struct rpc_reply_batt_chg_v1 {
+	struct rpc_reply_hdr hdr;
+	u32 	more_data;
+
+	u32	charger_status;
+	u32	charger_type;
+	u32	battery_status;
+	u32	battery_level;
+	u32     battery_voltage;
+	u32	battery_temp;
+};
+
+struct rpc_reply_batt_chg_v2 {
+	struct rpc_reply_batt_chg_v1	v1;
+
+	u32	is_charger_valid;
+	u32	is_charging;
+	u32	is_battery_valid;
+	u32	ui_event;
+};
+
+union rpc_reply_batt_chg {
+	struct rpc_reply_batt_chg_v1	v1;
+	struct rpc_reply_batt_chg_v2	v2;
+};
+
+static union rpc_reply_batt_chg rep_batt_chg;
+#endif
+
+struct msm_battery_info {
+	u32 voltage_max_design;
+	u32 voltage_min_design;
+	u32 chg_api_version;
+	u32 batt_technology;
+	u32 batt_api_version;
+
+	u32 avail_chg_sources;
+	u32 current_chg_source;
+
+	u32 batt_status;
+	u32 batt_health;
+	u32 charger_valid;
+	u32 batt_valid;
+	u32 batt_capacity; /* in percentage */
+
+	u32 charger_status;
+	u32 charger_type;
+	u32 battery_status;
+	u32 battery_level;
+	u32 battery_voltage; /* in millie volts */
+	u32 battery_temp;  /* in celsius */
+
+	u32(*calculate_capacity) (u32 voltage);
+
+	s32 batt_handle;
+
+	struct power_supply *msm_psy_ac;
+	struct power_supply *msm_psy_usb;
+	struct power_supply *msm_psy_batt;
+	struct power_supply *current_ps;
+
+	struct msm_rpc_client *batt_client;
+	struct msm_rpc_endpoint *chg_ep;
+
+	wait_queue_head_t wait_q;
+
+	u32 vbatt_modify_reply_avail;
+
+	struct early_suspend early_suspend;
+};
+
+static struct msm_battery_info msm_batt_info = {
+	.batt_handle = INVALID_BATT_HANDLE,
+	.charger_status = CHARGER_STATUS_BAD,
+	.charger_type = CHARGER_TYPE_INVALID,
+	.battery_status = BATTERY_STATUS_GOOD,
+	.battery_level = BATTERY_LEVEL_FULL,
+	.battery_voltage = BATTERY_HIGH,
+	.batt_capacity = 100,
+	.batt_status = POWER_SUPPLY_STATUS_DISCHARGING,
+	.batt_health = POWER_SUPPLY_HEALTH_GOOD,
+	.batt_valid  = 1,
+	.battery_temp = 23,
+	.vbatt_modify_reply_avail = 0,
+};
+
+static enum power_supply_property msm_power_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static char *msm_power_supplied_to[] = {
+	"battery",
+};
+
+static int msm_power_get_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  union power_supply_propval *val)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		if (psy->type == POWER_SUPPLY_TYPE_MAINS) {
+			val->intval = msm_batt_info.current_chg_source & AC_CHG
+			    ? 1 : 0;
+		}
+		if (psy->type == POWER_SUPPLY_TYPE_USB) {
+			val->intval = msm_batt_info.current_chg_source & USB_CHG
+			    ? 1 : 0;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct power_supply msm_psy_ac = {
+	.name = "ac",
+	.type = POWER_SUPPLY_TYPE_MAINS,
+	.supplied_to = msm_power_supplied_to,
+	.num_supplicants = ARRAY_SIZE(msm_power_supplied_to),
+	.properties = msm_power_props,
+	.num_properties = ARRAY_SIZE(msm_power_props),
+	.get_property = msm_power_get_property,
+};
+
+static struct power_supply msm_psy_usb = {
+	.name = "usb",
+	.type = POWER_SUPPLY_TYPE_USB,
+	.supplied_to = msm_power_supplied_to,
+	.num_supplicants = ARRAY_SIZE(msm_power_supplied_to),
+	.properties = msm_power_props,
+	.num_properties = ARRAY_SIZE(msm_power_props),
+	.get_property = msm_power_get_property,
+};
+
+static enum power_supply_property msm_batt_power_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int msm_batt_power_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = msm_batt_info.batt_status;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = msm_batt_info.batt_health;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = msm_batt_info.batt_valid;
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = msm_batt_info.batt_technology;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = msm_batt_info.voltage_max_design;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		val->intval = msm_batt_info.voltage_min_design;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = msm_batt_info.battery_voltage;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = msm_batt_info.batt_capacity;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct power_supply msm_psy_batt = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = msm_batt_power_props,
+	.num_properties = ARRAY_SIZE(msm_batt_power_props),
+	.get_property = msm_batt_power_get_property,
+};
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+struct msm_batt_get_volt_ret_data {
+	u32 battery_voltage;
+};
+
+static int msm_batt_get_volt_ret_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct msm_batt_get_volt_ret_data *data_ptr, *buf_ptr;
+
+	data_ptr = (struct msm_batt_get_volt_ret_data *)data;
+	buf_ptr = (struct msm_batt_get_volt_ret_data *)buf;
+
+	data_ptr->battery_voltage = be32_to_cpu(buf_ptr->battery_voltage);
+
+	return 0;
+}
+
+static u32 msm_batt_get_vbatt_voltage(void)
+{
+	int rc;
+
+	struct msm_batt_get_volt_ret_data rep;
+
+	rc = msm_rpc_client_req(msm_batt_info.batt_client,
+			BATTERY_READ_MV_PROC,
+			NULL, NULL,
+			msm_batt_get_volt_ret_func, &rep,
+			msecs_to_jiffies(BATT_RPC_TIMEOUT));
+
+	if (rc < 0) {
+		pr_err("%s: FAIL: vbatt get volt. rc=%d\n", __func__, rc);
+		return 0;
+	}
+
+	return rep.battery_voltage;
+}
+
+#define	be32_to_cpu_self(v)	(v = be32_to_cpu(v))
+
+static int msm_batt_get_batt_chg_status(void)
+{
+	int rc;
+
+	struct rpc_req_batt_chg {
+		struct rpc_request_hdr hdr;
+		u32 more_data;
+	} req_batt_chg;
+	struct rpc_reply_batt_chg_v1 *v1p;
+
+	req_batt_chg.more_data = cpu_to_be32(1);
+
+	memset(&rep_batt_chg, 0, sizeof(rep_batt_chg));
+
+	v1p = &rep_batt_chg.v1;
+	rc = msm_rpc_call_reply(msm_batt_info.chg_ep,
+				ONCRPC_CHG_GET_GENERAL_STATUS_PROC,
+				&req_batt_chg, sizeof(req_batt_chg),
+				&rep_batt_chg, sizeof(rep_batt_chg),
+				msecs_to_jiffies(BATT_RPC_TIMEOUT));
+	if (rc < 0) {
+		pr_err("%s: ERROR. msm_rpc_call_reply failed! proc=%d rc=%d\n",
+		       __func__, ONCRPC_CHG_GET_GENERAL_STATUS_PROC, rc);
+		return rc;
+	} else if (be32_to_cpu(v1p->more_data)) {
+		be32_to_cpu_self(v1p->charger_status);
+		be32_to_cpu_self(v1p->charger_type);
+		be32_to_cpu_self(v1p->battery_status);
+		be32_to_cpu_self(v1p->battery_level);
+		be32_to_cpu_self(v1p->battery_voltage);
+		be32_to_cpu_self(v1p->battery_temp);
+	} else {
+		pr_err("%s: No battery/charger data in RPC reply\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void msm_batt_update_psy_status(void)
+{
+	static u32 unnecessary_event_count;
+	u32	charger_status;
+	u32	charger_type;
+	u32	battery_status;
+	u32	battery_level;
+	u32     battery_voltage;
+	u32	battery_temp;
+	struct	power_supply	*supp;
+
+	if (msm_batt_get_batt_chg_status())
+		return;
+
+	charger_status = rep_batt_chg.v1.charger_status;
+	charger_type = rep_batt_chg.v1.charger_type;
+	battery_status = rep_batt_chg.v1.battery_status;
+	battery_level = rep_batt_chg.v1.battery_level;
+	battery_voltage = rep_batt_chg.v1.battery_voltage;
+	battery_temp = rep_batt_chg.v1.battery_temp;
+
+	/* Make correction for battery status */
+	if (battery_status == BATTERY_STATUS_INVALID_v1) {
+		if (msm_batt_info.chg_api_version < CHG_RPC_VER_3_1)
+			battery_status = BATTERY_STATUS_INVALID;
+	}
+
+	if (charger_status == msm_batt_info.charger_status &&
+	    charger_type == msm_batt_info.charger_type &&
+	    battery_status == msm_batt_info.battery_status &&
+	    battery_level == msm_batt_info.battery_level &&
+	    battery_voltage == msm_batt_info.battery_voltage &&
+	    battery_temp == msm_batt_info.battery_temp) {
+		/* Got unnecessary event from Modem PMIC VBATT driver.
+		 * Nothing changed in Battery or charger status.
+		 */
+		unnecessary_event_count++;
+		if ((unnecessary_event_count % 20) == 1)
+			DBG_LIMIT("BATT: same event count = %u\n",
+				 unnecessary_event_count);
+		return;
+	}
+
+	unnecessary_event_count = 0;
+
+	DBG_LIMIT("BATT: rcvd: %d, %d, %d, %d; %d, %d\n",
+		 charger_status, charger_type, battery_status,
+		 battery_level, battery_voltage, battery_temp);
+
+	if (battery_status == BATTERY_STATUS_INVALID &&
+	    battery_level != BATTERY_LEVEL_INVALID) {
+		DBG_LIMIT("BATT: change status(%d) to (%d) for level=%d\n",
+			 battery_status, BATTERY_STATUS_GOOD, battery_level);
+		battery_status = BATTERY_STATUS_GOOD;
+	}
+
+	if (msm_batt_info.charger_type != charger_type) {
+		if (charger_type == CHARGER_TYPE_USB_WALL ||
+		    charger_type == CHARGER_TYPE_USB_PC ||
+		    charger_type == CHARGER_TYPE_USB_CARKIT) {
+			DBG_LIMIT("BATT: USB charger plugged in\n");
+			msm_batt_info.current_chg_source = USB_CHG;
+			supp = &msm_psy_usb;
+		} else if (charger_type == CHARGER_TYPE_WALL) {
+			DBG_LIMIT("BATT: AC Wall changer plugged in\n");
+			msm_batt_info.current_chg_source = AC_CHG;
+			supp = &msm_psy_ac;
+		} else {
+			if (msm_batt_info.current_chg_source & AC_CHG)
+				DBG_LIMIT("BATT: AC Wall charger removed\n");
+			else if (msm_batt_info.current_chg_source & USB_CHG)
+				DBG_LIMIT("BATT: USB charger removed\n");
+			else
+				DBG_LIMIT("BATT: No charger present\n");
+			msm_batt_info.current_chg_source = 0;
+			supp = &msm_psy_batt;
+
+			/* Correct charger status */
+			if (charger_status != CHARGER_STATUS_INVALID) {
+				DBG_LIMIT("BATT: No charging!\n");
+				charger_status = CHARGER_STATUS_INVALID;
+				msm_batt_info.batt_status =
+					POWER_SUPPLY_STATUS_NOT_CHARGING;
+			}
+		}
+	} else
+		supp = NULL;
+
+	if (msm_batt_info.charger_status != charger_status) {
+		if (charger_status == CHARGER_STATUS_GOOD ||
+		    charger_status == CHARGER_STATUS_WEAK) {
+			if (msm_batt_info.current_chg_source) {
+				DBG_LIMIT("BATT: Charging.\n");
+				msm_batt_info.batt_status =
+					POWER_SUPPLY_STATUS_CHARGING;
+
+				/* Correct when supp==NULL */
+				if (msm_batt_info.current_chg_source & AC_CHG)
+					supp = &msm_psy_ac;
+				else
+					supp = &msm_psy_usb;
+			}
+		} else {
+			DBG_LIMIT("BATT: No charging.\n");
+			msm_batt_info.batt_status =
+				POWER_SUPPLY_STATUS_NOT_CHARGING;
+			supp = &msm_psy_batt;
+		}
+	} else {
+		/* Correct charger status */
+		if (charger_type != CHARGER_TYPE_INVALID &&
+		    charger_status == CHARGER_STATUS_GOOD) {
+			DBG_LIMIT("BATT: In charging\n");
+			msm_batt_info.batt_status =
+				POWER_SUPPLY_STATUS_CHARGING;
+		}
+	}
+
+	/* Correct battery voltage and status */
+	if (!battery_voltage) {
+		if (charger_status == CHARGER_STATUS_INVALID) {
+			DBG_LIMIT("BATT: Read VBATT\n");
+			battery_voltage = msm_batt_get_vbatt_voltage();
+		} else
+			/* Use previous */
+			battery_voltage = msm_batt_info.battery_voltage;
+	}
+	if (battery_status == BATTERY_STATUS_INVALID) {
+		if (battery_voltage >= msm_batt_info.voltage_min_design &&
+		    battery_voltage <= msm_batt_info.voltage_max_design) {
+			DBG_LIMIT("BATT: Battery valid\n");
+			msm_batt_info.batt_valid = 1;
+			battery_status = BATTERY_STATUS_GOOD;
+		}
+	}
+
+	if (msm_batt_info.battery_status != battery_status) {
+		if (battery_status != BATTERY_STATUS_INVALID) {
+			msm_batt_info.batt_valid = 1;
+
+			if (battery_status == BATTERY_STATUS_BAD) {
+				DBG_LIMIT("BATT: Battery bad.\n");
+				msm_batt_info.batt_health =
+					POWER_SUPPLY_HEALTH_DEAD;
+			} else if (battery_status == BATTERY_STATUS_BAD_TEMP) {
+				DBG_LIMIT("BATT: Battery overheat.\n");
+				msm_batt_info.batt_health =
+					POWER_SUPPLY_HEALTH_OVERHEAT;
+			} else {
+				DBG_LIMIT("BATT: Battery good.\n");
+				msm_batt_info.batt_health =
+					POWER_SUPPLY_HEALTH_GOOD;
+			}
+		} else {
+			msm_batt_info.batt_valid = 0;
+			DBG_LIMIT("BATT: Battery invalid.\n");
+			msm_batt_info.batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+		}
+
+		if (msm_batt_info.batt_status != POWER_SUPPLY_STATUS_CHARGING) {
+			if (battery_status == BATTERY_STATUS_INVALID) {
+				DBG_LIMIT("BATT: Battery -> unknown\n");
+				msm_batt_info.batt_status =
+					POWER_SUPPLY_STATUS_UNKNOWN;
+			} else {
+				DBG_LIMIT("BATT: Battery -> discharging\n");
+				msm_batt_info.batt_status =
+					POWER_SUPPLY_STATUS_DISCHARGING;
+			}
+		}
+
+		if (!supp) {
+			if (msm_batt_info.current_chg_source) {
+				if (msm_batt_info.current_chg_source & AC_CHG)
+					supp = &msm_psy_ac;
+				else
+					supp = &msm_psy_usb;
+			} else
+				supp = &msm_psy_batt;
+		}
+	}
+
+	msm_batt_info.charger_status 	= charger_status;
+	msm_batt_info.charger_type 	= charger_type;
+	msm_batt_info.battery_status 	= battery_status;
+	msm_batt_info.battery_level 	= battery_level;
+	msm_batt_info.battery_temp 	= battery_temp;
+
+	if (msm_batt_info.battery_voltage != battery_voltage) {
+		msm_batt_info.battery_voltage  	= battery_voltage;
+		msm_batt_info.batt_capacity =
+			msm_batt_info.calculate_capacity(battery_voltage);
+		DBG_LIMIT("BATT: voltage = %u mV [capacity = %d%%]\n",
+			 battery_voltage, msm_batt_info.batt_capacity);
+
+		if (!supp)
+			supp = msm_batt_info.current_ps;
+	}
+
+	if (supp) {
+		msm_batt_info.current_ps = supp;
+		DBG_LIMIT("BATT: Supply = %s\n", supp->name);
+		power_supply_changed(supp);
+	}
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+struct batt_modify_client_req {
+
+	u32 client_handle;
+
+	/* The voltage at which callback (CB) should be called. */
+	u32 desired_batt_voltage;
+
+	/* The direction when the CB should be called. */
+	u32 voltage_direction;
+
+	/* The registered callback to be called when voltage and
+	 * direction specs are met. */
+	u32 batt_cb_id;
+
+	/* The call back data */
+	u32 cb_data;
+};
+
+struct batt_modify_client_rep {
+	u32 result;
+};
+
+static int msm_batt_modify_client_arg_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct batt_modify_client_req *batt_modify_client_req =
+		(struct batt_modify_client_req *)data;
+	u32 *req = (u32 *)buf;
+	int size = 0;
+
+	*req = cpu_to_be32(batt_modify_client_req->client_handle);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(batt_modify_client_req->desired_batt_voltage);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(batt_modify_client_req->voltage_direction);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(batt_modify_client_req->batt_cb_id);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(batt_modify_client_req->cb_data);
+	size += sizeof(u32);
+
+	return size;
+}
+
+static int msm_batt_modify_client_ret_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct  batt_modify_client_rep *data_ptr, *buf_ptr;
+
+	data_ptr = (struct batt_modify_client_rep *)data;
+	buf_ptr = (struct batt_modify_client_rep *)buf;
+
+	data_ptr->result = be32_to_cpu(buf_ptr->result);
+
+	return 0;
+}
+
+static int msm_batt_modify_client(u32 client_handle, u32 desired_batt_voltage,
+	     u32 voltage_direction, u32 batt_cb_id, u32 cb_data)
+{
+	int rc;
+
+	struct batt_modify_client_req  req;
+	struct batt_modify_client_rep rep;
+
+	req.client_handle = client_handle;
+	req.desired_batt_voltage = desired_batt_voltage;
+	req.voltage_direction = voltage_direction;
+	req.batt_cb_id = batt_cb_id;
+	req.cb_data = cb_data;
+
+	rc = msm_rpc_client_req(msm_batt_info.batt_client,
+			BATTERY_MODIFY_CLIENT_PROC,
+			msm_batt_modify_client_arg_func, &req,
+			msm_batt_modify_client_ret_func, &rep,
+			msecs_to_jiffies(BATT_RPC_TIMEOUT));
+
+	if (rc < 0) {
+		pr_err("%s: ERROR. failed to modify  Vbatt client\n",
+		       __func__);
+		return rc;
+	}
+
+	if (rep.result != BATTERY_MODIFICATION_SUCCESSFUL) {
+		pr_err("%s: ERROR. modify client failed. result = %u\n",
+		       __func__, rep.result);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void msm_batt_early_suspend(struct early_suspend *h)
+{
+	int rc;
+
+	pr_debug("%s: enter\n", __func__);
+
+	if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) {
+		rc = msm_batt_modify_client(msm_batt_info.batt_handle,
+				BATTERY_LOW, BATTERY_VOLTAGE_BELOW_THIS_LEVEL,
+				BATTERY_CB_ID_LOW_VOL, BATTERY_LOW);
+
+		if (rc < 0) {
+			pr_err("%s: msm_batt_modify_client. rc=%d\n",
+			       __func__, rc);
+			return;
+		}
+	} else {
+		pr_err("%s: ERROR. invalid batt_handle\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: exit\n", __func__);
+}
+
+void msm_batt_late_resume(struct early_suspend *h)
+{
+	int rc;
+
+	pr_debug("%s: enter\n", __func__);
+
+	if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) {
+		rc = msm_batt_modify_client(msm_batt_info.batt_handle,
+				BATTERY_LOW, BATTERY_ALL_ACTIVITY,
+			       BATTERY_CB_ID_ALL_ACTIV, BATTERY_ALL_ACTIVITY);
+		if (rc < 0) {
+			pr_err("%s: msm_batt_modify_client FAIL rc=%d\n",
+			       __func__, rc);
+			return;
+		}
+	} else {
+		pr_err("%s: ERROR. invalid batt_handle\n", __func__);
+		return;
+	}
+
+	msm_batt_update_psy_status();
+	pr_debug("%s: exit\n", __func__);
+}
+#endif
+
+struct msm_batt_vbatt_filter_req {
+	u32 batt_handle;
+	u32 enable_filter;
+	u32 vbatt_filter;
+};
+
+struct msm_batt_vbatt_filter_rep {
+	u32 result;
+};
+
+static int msm_batt_filter_arg_func(struct msm_rpc_client *batt_client,
+
+		void *buf, void *data)
+{
+	struct msm_batt_vbatt_filter_req *vbatt_filter_req =
+		(struct msm_batt_vbatt_filter_req *)data;
+	u32 *req = (u32 *)buf;
+	int size = 0;
+
+	*req = cpu_to_be32(vbatt_filter_req->batt_handle);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(vbatt_filter_req->enable_filter);
+	size += sizeof(u32);
+	req++;
+
+	*req = cpu_to_be32(vbatt_filter_req->vbatt_filter);
+	size += sizeof(u32);
+	return size;
+}
+
+static int msm_batt_filter_ret_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+
+	struct msm_batt_vbatt_filter_rep *data_ptr, *buf_ptr;
+
+	data_ptr = (struct msm_batt_vbatt_filter_rep *)data;
+	buf_ptr = (struct msm_batt_vbatt_filter_rep *)buf;
+
+	data_ptr->result = be32_to_cpu(buf_ptr->result);
+	return 0;
+}
+
+static int msm_batt_enable_filter(u32 vbatt_filter)
+{
+	int rc;
+	struct  msm_batt_vbatt_filter_req  vbatt_filter_req;
+	struct  msm_batt_vbatt_filter_rep  vbatt_filter_rep;
+
+	vbatt_filter_req.batt_handle = msm_batt_info.batt_handle;
+	vbatt_filter_req.enable_filter = 1;
+	vbatt_filter_req.vbatt_filter = vbatt_filter;
+
+	rc = msm_rpc_client_req(msm_batt_info.batt_client,
+			BATTERY_ENABLE_DISABLE_FILTER_PROC,
+			msm_batt_filter_arg_func, &vbatt_filter_req,
+			msm_batt_filter_ret_func, &vbatt_filter_rep,
+			msecs_to_jiffies(BATT_RPC_TIMEOUT));
+
+	if (rc < 0) {
+		pr_err("%s: FAIL: enable vbatt filter. rc=%d\n",
+		       __func__, rc);
+		return rc;
+	}
+
+	if (vbatt_filter_rep.result != BATTERY_DEREGISTRATION_SUCCESSFUL) {
+		pr_err("%s: FAIL: enable vbatt filter: result=%d\n",
+		       __func__, vbatt_filter_rep.result);
+		return -EIO;
+	}
+
+	pr_debug("%s: enable vbatt filter: OK\n", __func__);
+	return rc;
+}
+
+struct batt_client_registration_req {
+	/* The voltage at which callback (CB) should be called. */
+	u32 desired_batt_voltage;
+
+	/* The direction when the CB should be called. */
+	u32 voltage_direction;
+
+	/* The registered callback to be called when voltage and
+	 * direction specs are met. */
+	u32 batt_cb_id;
+
+	/* The call back data */
+	u32 cb_data;
+	u32 more_data;
+	u32 batt_error;
+};
+
+struct batt_client_registration_req_4_1 {
+	/* The voltage at which callback (CB) should be called. */
+	u32 desired_batt_voltage;
+
+	/* The direction when the CB should be called. */
+	u32 voltage_direction;
+
+	/* The registered callback to be called when voltage and
+	 * direction specs are met. */
+	u32 batt_cb_id;
+
+	/* The call back data */
+	u32 cb_data;
+	u32 batt_error;
+};
+
+struct batt_client_registration_rep {
+	u32 batt_handle;
+};
+
+struct batt_client_registration_rep_4_1 {
+	u32 batt_handle;
+	u32 more_data;
+	u32 err;
+};
+
+static int msm_batt_register_arg_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct batt_client_registration_req *batt_reg_req =
+		(struct batt_client_registration_req *)data;
+
+	u32 *req = (u32 *)buf;
+	int size = 0;
+
+
+	if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) {
+		*req = cpu_to_be32(batt_reg_req->desired_batt_voltage);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->voltage_direction);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->batt_cb_id);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->cb_data);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->batt_error);
+		size += sizeof(u32);
+
+		return size;
+	} else {
+		*req = cpu_to_be32(batt_reg_req->desired_batt_voltage);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->voltage_direction);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->batt_cb_id);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->cb_data);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->more_data);
+		size += sizeof(u32);
+		req++;
+
+		*req = cpu_to_be32(batt_reg_req->batt_error);
+		size += sizeof(u32);
+
+		return size;
+	}
+
+}
+
+static int msm_batt_register_ret_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct batt_client_registration_rep *data_ptr, *buf_ptr;
+	struct batt_client_registration_rep_4_1 *data_ptr_4_1, *buf_ptr_4_1;
+
+	if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) {
+		data_ptr_4_1 = (struct batt_client_registration_rep_4_1 *)data;
+		buf_ptr_4_1 = (struct batt_client_registration_rep_4_1 *)buf;
+
+		data_ptr_4_1->batt_handle
+			= be32_to_cpu(buf_ptr_4_1->batt_handle);
+		data_ptr_4_1->more_data
+			= be32_to_cpu(buf_ptr_4_1->more_data);
+		data_ptr_4_1->err = be32_to_cpu(buf_ptr_4_1->err);
+		return 0;
+	} else {
+		data_ptr = (struct batt_client_registration_rep *)data;
+		buf_ptr = (struct batt_client_registration_rep *)buf;
+
+		data_ptr->batt_handle = be32_to_cpu(buf_ptr->batt_handle);
+		return 0;
+	}
+}
+
+static int msm_batt_register(u32 desired_batt_voltage,
+			     u32 voltage_direction, u32 batt_cb_id, u32 cb_data)
+{
+	struct batt_client_registration_req batt_reg_req;
+	struct batt_client_registration_req_4_1 batt_reg_req_4_1;
+	struct batt_client_registration_rep batt_reg_rep;
+	struct batt_client_registration_rep_4_1 batt_reg_rep_4_1;
+	void *request;
+	void *reply;
+	int rc;
+
+	if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) {
+		batt_reg_req_4_1.desired_batt_voltage = desired_batt_voltage;
+		batt_reg_req_4_1.voltage_direction = voltage_direction;
+		batt_reg_req_4_1.batt_cb_id = batt_cb_id;
+		batt_reg_req_4_1.cb_data = cb_data;
+		batt_reg_req_4_1.batt_error = 1;
+		request = &batt_reg_req_4_1;
+	} else {
+		batt_reg_req.desired_batt_voltage = desired_batt_voltage;
+		batt_reg_req.voltage_direction = voltage_direction;
+		batt_reg_req.batt_cb_id = batt_cb_id;
+		batt_reg_req.cb_data = cb_data;
+		batt_reg_req.more_data = 1;
+		batt_reg_req.batt_error = 0;
+		request = &batt_reg_req;
+	}
+
+	if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1)
+		reply = &batt_reg_rep_4_1;
+	else
+		reply = &batt_reg_rep;
+
+	rc = msm_rpc_client_req(msm_batt_info.batt_client,
+			BATTERY_REGISTER_PROC,
+			msm_batt_register_arg_func, request,
+			msm_batt_register_ret_func, reply,
+			msecs_to_jiffies(BATT_RPC_TIMEOUT));
+
+	if (rc < 0) {
+		pr_err("%s: FAIL: vbatt register. rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) {
+		if (batt_reg_rep_4_1.more_data != 0
+			&& batt_reg_rep_4_1.err
+				!= BATTERY_REGISTRATION_SUCCESSFUL) {
+			pr_err("%s: vBatt Registration Failed proc_num=%d\n"
+					, __func__, BATTERY_REGISTER_PROC);
+			return -EIO;
+		}
+		msm_batt_info.batt_handle = batt_reg_rep_4_1.batt_handle;
+	} else
+		msm_batt_info.batt_handle = batt_reg_rep.batt_handle;
+
+	return 0;
+}
+
+struct batt_client_deregister_req {
+	u32 batt_handle;
+};
+
+struct batt_client_deregister_rep {
+	u32 batt_error;
+};
+
+static int msm_batt_deregister_arg_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct batt_client_deregister_req *deregister_req =
+		(struct  batt_client_deregister_req *)data;
+	u32 *req = (u32 *)buf;
+	int size = 0;
+
+	*req = cpu_to_be32(deregister_req->batt_handle);
+	size += sizeof(u32);
+
+	return size;
+}
+
+static int msm_batt_deregister_ret_func(struct msm_rpc_client *batt_client,
+				       void *buf, void *data)
+{
+	struct batt_client_deregister_rep *data_ptr, *buf_ptr;
+
+	data_ptr = (struct batt_client_deregister_rep *)data;
+	buf_ptr = (struct batt_client_deregister_rep *)buf;
+
+	data_ptr->batt_error = be32_to_cpu(buf_ptr->batt_error);
+
+	return 0;
+}
+
+static int msm_batt_deregister(u32 batt_handle)
+{
+	int rc;
+	struct batt_client_deregister_req req;
+	struct batt_client_deregister_rep rep;
+
+	req.batt_handle = batt_handle;
+
+	rc = msm_rpc_client_req(msm_batt_info.batt_client,
+			BATTERY_DEREGISTER_CLIENT_PROC,
+			msm_batt_deregister_arg_func, &req,
+			msm_batt_deregister_ret_func, &rep,
+			msecs_to_jiffies(BATT_RPC_TIMEOUT));
+
+	if (rc < 0) {
+		pr_err("%s: FAIL: vbatt deregister. rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	if (rep.batt_error != BATTERY_DEREGISTRATION_SUCCESSFUL) {
+		pr_err("%s: vbatt deregistration FAIL. error=%d, handle=%d\n",
+		       __func__, rep.batt_error, batt_handle);
+		return -EIO;
+	}
+
+	return 0;
+}
+#endif  /* CONFIG_BATTERY_MSM_FAKE */
+
+static int msm_batt_cleanup(void)
+{
+	int rc = 0;
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+	if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) {
+
+		rc = msm_batt_deregister(msm_batt_info.batt_handle);
+		if (rc < 0)
+			pr_err("%s: FAIL: msm_batt_deregister. rc=%d\n",
+			       __func__, rc);
+	}
+
+	msm_batt_info.batt_handle = INVALID_BATT_HANDLE;
+
+	if (msm_batt_info.batt_client)
+		msm_rpc_unregister_client(msm_batt_info.batt_client);
+#endif  /* CONFIG_BATTERY_MSM_FAKE */
+
+	if (msm_batt_info.msm_psy_ac)
+		power_supply_unregister(msm_batt_info.msm_psy_ac);
+
+	if (msm_batt_info.msm_psy_usb)
+		power_supply_unregister(msm_batt_info.msm_psy_usb);
+	if (msm_batt_info.msm_psy_batt)
+		power_supply_unregister(msm_batt_info.msm_psy_batt);
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+	if (msm_batt_info.chg_ep) {
+		rc = msm_rpc_close(msm_batt_info.chg_ep);
+		if (rc < 0) {
+			pr_err("%s: FAIL. msm_rpc_close(chg_ep). rc=%d\n",
+			       __func__, rc);
+		}
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	if (msm_batt_info.early_suspend.suspend == msm_batt_early_suspend)
+		unregister_early_suspend(&msm_batt_info.early_suspend);
+#endif
+#endif
+	return rc;
+}
+
+static u32 msm_batt_capacity(u32 current_voltage)
+{
+	u32 low_voltage = msm_batt_info.voltage_min_design;
+	u32 high_voltage = msm_batt_info.voltage_max_design;
+
+	if (current_voltage <= low_voltage)
+		return 0;
+	else if (current_voltage >= high_voltage)
+		return 100;
+	else
+		return (current_voltage - low_voltage) * 100
+			/ (high_voltage - low_voltage);
+}
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+int msm_batt_get_charger_api_version(void)
+{
+	int rc ;
+	struct rpc_reply_hdr *reply;
+
+	struct rpc_req_chg_api_ver {
+		struct rpc_request_hdr hdr;
+		u32 more_data;
+	} req_chg_api_ver;
+
+	struct rpc_rep_chg_api_ver {
+		struct rpc_reply_hdr hdr;
+		u32 num_of_chg_api_versions;
+		u32 *chg_api_versions;
+	};
+
+	u32 num_of_versions;
+
+	struct rpc_rep_chg_api_ver *rep_chg_api_ver;
+
+
+	req_chg_api_ver.more_data = cpu_to_be32(1);
+
+	msm_rpc_setup_req(&req_chg_api_ver.hdr, CHG_RPC_PROG, CHG_RPC_VER_1_1,
+			  ONCRPC_CHARGER_API_VERSIONS_PROC);
+
+	rc = msm_rpc_write(msm_batt_info.chg_ep, &req_chg_api_ver,
+			sizeof(req_chg_api_ver));
+	if (rc < 0) {
+		pr_err("%s: FAIL: msm_rpc_write. proc=0x%08x, rc=%d\n",
+		       __func__, ONCRPC_CHARGER_API_VERSIONS_PROC, rc);
+		return rc;
+	}
+
+	for (;;) {
+		rc = msm_rpc_read(msm_batt_info.chg_ep, (void *) &reply, -1,
+				BATT_RPC_TIMEOUT);
+		if (rc < 0)
+			return rc;
+		if (rc < RPC_REQ_REPLY_COMMON_HEADER_SIZE) {
+			pr_err("%s: LENGTH ERR: msm_rpc_read. rc=%d (<%d)\n",
+			       __func__, rc, RPC_REQ_REPLY_COMMON_HEADER_SIZE);
+
+			rc = -EIO;
+			break;
+		}
+		/* we should not get RPC REQ or call packets -- ignore them */
+		if (reply->type == RPC_TYPE_REQ) {
+			pr_err("%s: TYPE ERR: type=%d (!=%d)\n",
+			       __func__, reply->type, RPC_TYPE_REQ);
+			kfree(reply);
+			continue;
+		}
+
+		/* If an earlier call timed out, we could get the (no
+		 * longer wanted) reply for it.	 Ignore replies that
+		 * we don't expect
+		 */
+		if (reply->xid != req_chg_api_ver.hdr.xid) {
+			pr_err("%s: XID ERR: xid=%d (!=%d)\n", __func__,
+			       reply->xid, req_chg_api_ver.hdr.xid);
+			kfree(reply);
+			continue;
+		}
+		if (reply->reply_stat != RPCMSG_REPLYSTAT_ACCEPTED) {
+			rc = -EPERM;
+			break;
+		}
+		if (reply->data.acc_hdr.accept_stat !=
+				RPC_ACCEPTSTAT_SUCCESS) {
+			rc = -EINVAL;
+			break;
+		}
+
+		rep_chg_api_ver = (struct rpc_rep_chg_api_ver *)reply;
+
+		num_of_versions =
+			be32_to_cpu(rep_chg_api_ver->num_of_chg_api_versions);
+
+		rep_chg_api_ver->chg_api_versions =  (u32 *)
+			((u8 *) reply + sizeof(struct rpc_reply_hdr) +
+			sizeof(rep_chg_api_ver->num_of_chg_api_versions));
+
+		rc = be32_to_cpu(
+			rep_chg_api_ver->chg_api_versions[num_of_versions - 1]);
+
+		pr_debug("%s: num_of_chg_api_versions = %u. "
+			"The chg api version = 0x%08x\n", __func__,
+			num_of_versions, rc);
+		break;
+	}
+	kfree(reply);
+	return rc;
+}
+
+static int msm_batt_cb_func(struct msm_rpc_client *client,
+			    void *buffer, int in_size)
+{
+	int rc = 0;
+	struct rpc_request_hdr *req;
+	u32 procedure;
+	u32 accept_status;
+
+	req = (struct rpc_request_hdr *)buffer;
+	procedure = be32_to_cpu(req->procedure);
+
+	switch (procedure) {
+	case BATTERY_CB_TYPE_PROC:
+		accept_status = RPC_ACCEPTSTAT_SUCCESS;
+		break;
+
+	default:
+		accept_status = RPC_ACCEPTSTAT_PROC_UNAVAIL;
+		pr_err("%s: ERROR. procedure (%d) not supported\n",
+		       __func__, procedure);
+		break;
+	}
+
+	msm_rpc_start_accepted_reply(msm_batt_info.batt_client,
+			be32_to_cpu(req->xid), accept_status);
+
+	rc = msm_rpc_send_accepted_reply(msm_batt_info.batt_client, 0);
+	if (rc)
+		pr_err("%s: FAIL: sending reply. rc=%d\n", __func__, rc);
+
+	if (accept_status == RPC_ACCEPTSTAT_SUCCESS)
+		msm_batt_update_psy_status();
+
+	return rc;
+}
+#endif  /* CONFIG_BATTERY_MSM_FAKE */
+
+static int __devinit msm_batt_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_psy_batt_pdata *pdata = pdev->dev.platform_data;
+
+	if (pdev->id != -1) {
+		dev_err(&pdev->dev,
+			"%s: MSM chipsets Can only support one"
+			" battery ", __func__);
+		return -EINVAL;
+	}
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+	if (pdata->avail_chg_sources & AC_CHG) {
+#else
+	{
+#endif
+		rc = power_supply_register(&pdev->dev, &msm_psy_ac);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"%s: power_supply_register failed"
+				" rc = %d\n", __func__, rc);
+			msm_batt_cleanup();
+			return rc;
+		}
+		msm_batt_info.msm_psy_ac = &msm_psy_ac;
+		msm_batt_info.avail_chg_sources |= AC_CHG;
+	}
+
+	if (pdata->avail_chg_sources & USB_CHG) {
+		rc = power_supply_register(&pdev->dev, &msm_psy_usb);
+		if (rc < 0) {
+			dev_err(&pdev->dev,
+				"%s: power_supply_register failed"
+				" rc = %d\n", __func__, rc);
+			msm_batt_cleanup();
+			return rc;
+		}
+		msm_batt_info.msm_psy_usb = &msm_psy_usb;
+		msm_batt_info.avail_chg_sources |= USB_CHG;
+	}
+
+	if (!msm_batt_info.msm_psy_ac && !msm_batt_info.msm_psy_usb) {
+
+		dev_err(&pdev->dev,
+			"%s: No external Power supply(AC or USB)"
+			"is avilable\n", __func__);
+		msm_batt_cleanup();
+		return -ENODEV;
+	}
+
+	msm_batt_info.voltage_max_design = pdata->voltage_max_design;
+	msm_batt_info.voltage_min_design = pdata->voltage_min_design;
+	msm_batt_info.batt_technology = pdata->batt_technology;
+	msm_batt_info.calculate_capacity = pdata->calculate_capacity;
+
+	if (!msm_batt_info.voltage_min_design)
+		msm_batt_info.voltage_min_design = BATTERY_LOW;
+	if (!msm_batt_info.voltage_max_design)
+		msm_batt_info.voltage_max_design = BATTERY_HIGH;
+
+	if (msm_batt_info.batt_technology == POWER_SUPPLY_TECHNOLOGY_UNKNOWN)
+		msm_batt_info.batt_technology = POWER_SUPPLY_TECHNOLOGY_LION;
+
+	if (!msm_batt_info.calculate_capacity)
+		msm_batt_info.calculate_capacity = msm_batt_capacity;
+
+	rc = power_supply_register(&pdev->dev, &msm_psy_batt);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "%s: power_supply_register failed"
+			" rc=%d\n", __func__, rc);
+		msm_batt_cleanup();
+		return rc;
+	}
+	msm_batt_info.msm_psy_batt = &msm_psy_batt;
+
+#ifndef CONFIG_BATTERY_MSM_FAKE
+	rc = msm_batt_register(BATTERY_LOW, BATTERY_ALL_ACTIVITY,
+			       BATTERY_CB_ID_ALL_ACTIV, BATTERY_ALL_ACTIVITY);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"%s: msm_batt_register failed rc = %d\n", __func__, rc);
+		msm_batt_cleanup();
+		return rc;
+	}
+
+	rc =  msm_batt_enable_filter(VBATT_FILTER);
+
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"%s: msm_batt_enable_filter failed rc = %d\n",
+			__func__, rc);
+		msm_batt_cleanup();
+		return rc;
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	msm_batt_info.early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+	msm_batt_info.early_suspend.suspend = msm_batt_early_suspend;
+	msm_batt_info.early_suspend.resume = msm_batt_late_resume;
+	register_early_suspend(&msm_batt_info.early_suspend);
+#endif
+	msm_batt_update_psy_status();
+
+#else
+	power_supply_changed(&msm_psy_ac);
+#endif  /* CONFIG_BATTERY_MSM_FAKE */
+
+	return 0;
+}
+
+static int __devexit msm_batt_remove(struct platform_device *pdev)
+{
+	int rc;
+	rc = msm_batt_cleanup();
+
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"%s: msm_batt_cleanup  failed rc=%d\n", __func__, rc);
+		return rc;
+	}
+	return 0;
+}
+
+static struct platform_driver msm_batt_driver = {
+	.probe = msm_batt_probe,
+	.remove = __devexit_p(msm_batt_remove),
+	.driver = {
+		   .name = "msm-battery",
+		   .owner = THIS_MODULE,
+		   },
+};
+
+static int __devinit msm_batt_init_rpc(void)
+{
+	int rc;
+
+#ifdef CONFIG_BATTERY_MSM_FAKE
+	pr_info("Faking MSM battery\n");
+#else
+
+	msm_batt_info.chg_ep =
+		msm_rpc_connect_compatible(CHG_RPC_PROG, CHG_RPC_VER_4_1, 0);
+	msm_batt_info.chg_api_version =  CHG_RPC_VER_4_1;
+	if (msm_batt_info.chg_ep == NULL) {
+		pr_err("%s: rpc connect CHG_RPC_PROG = NULL\n", __func__);
+		return -ENODEV;
+	}
+
+	if (IS_ERR(msm_batt_info.chg_ep)) {
+		msm_batt_info.chg_ep = msm_rpc_connect_compatible(
+				CHG_RPC_PROG, CHG_RPC_VER_3_1, 0);
+		msm_batt_info.chg_api_version =  CHG_RPC_VER_3_1;
+	}
+	if (IS_ERR(msm_batt_info.chg_ep)) {
+		msm_batt_info.chg_ep = msm_rpc_connect_compatible(
+				CHG_RPC_PROG, CHG_RPC_VER_1_1, 0);
+		msm_batt_info.chg_api_version =  CHG_RPC_VER_1_1;
+	}
+	if (IS_ERR(msm_batt_info.chg_ep)) {
+		msm_batt_info.chg_ep = msm_rpc_connect_compatible(
+				CHG_RPC_PROG, CHG_RPC_VER_1_3, 0);
+		msm_batt_info.chg_api_version =  CHG_RPC_VER_1_3;
+	}
+	if (IS_ERR(msm_batt_info.chg_ep)) {
+		msm_batt_info.chg_ep = msm_rpc_connect_compatible(
+				CHG_RPC_PROG, CHG_RPC_VER_2_2, 0);
+		msm_batt_info.chg_api_version =  CHG_RPC_VER_2_2;
+	}
+	if (IS_ERR(msm_batt_info.chg_ep)) {
+		rc = PTR_ERR(msm_batt_info.chg_ep);
+		pr_err("%s: FAIL: rpc connect for CHG_RPC_PROG. rc=%d\n",
+		       __func__, rc);
+		msm_batt_info.chg_ep = NULL;
+		return rc;
+	}
+
+	/* Get the real 1.x version */
+	if (msm_batt_info.chg_api_version == CHG_RPC_VER_1_1)
+		msm_batt_info.chg_api_version =
+			msm_batt_get_charger_api_version();
+
+	/* Fall back to 1.1 for default */
+	if (msm_batt_info.chg_api_version < 0)
+		msm_batt_info.chg_api_version = CHG_RPC_VER_1_1;
+	msm_batt_info.batt_api_version =  BATTERY_RPC_VER_4_1;
+
+	msm_batt_info.batt_client =
+		msm_rpc_register_client("battery", BATTERY_RPC_PROG,
+					BATTERY_RPC_VER_4_1,
+					1, msm_batt_cb_func);
+
+	if (msm_batt_info.batt_client == NULL) {
+		pr_err("%s: FAIL: rpc_register_client. batt_client=NULL\n",
+		       __func__);
+		return -ENODEV;
+	}
+	if (IS_ERR(msm_batt_info.batt_client)) {
+		msm_batt_info.batt_client =
+			msm_rpc_register_client("battery", BATTERY_RPC_PROG,
+						BATTERY_RPC_VER_1_1,
+						1, msm_batt_cb_func);
+		msm_batt_info.batt_api_version =  BATTERY_RPC_VER_1_1;
+	}
+	if (IS_ERR(msm_batt_info.batt_client)) {
+		msm_batt_info.batt_client =
+			msm_rpc_register_client("battery", BATTERY_RPC_PROG,
+						BATTERY_RPC_VER_2_1,
+						1, msm_batt_cb_func);
+		msm_batt_info.batt_api_version =  BATTERY_RPC_VER_2_1;
+	}
+	if (IS_ERR(msm_batt_info.batt_client)) {
+		msm_batt_info.batt_client =
+			msm_rpc_register_client("battery", BATTERY_RPC_PROG,
+						BATTERY_RPC_VER_5_1,
+						1, msm_batt_cb_func);
+		msm_batt_info.batt_api_version =  BATTERY_RPC_VER_5_1;
+	}
+	if (IS_ERR(msm_batt_info.batt_client)) {
+		rc = PTR_ERR(msm_batt_info.batt_client);
+		pr_err("%s: ERROR: rpc_register_client: rc = %d\n ",
+		       __func__, rc);
+		msm_batt_info.batt_client = NULL;
+		return rc;
+	}
+#endif  /* CONFIG_BATTERY_MSM_FAKE */
+
+	rc = platform_driver_register(&msm_batt_driver);
+
+	if (rc < 0)
+		pr_err("%s: FAIL: platform_driver_register. rc = %d\n",
+		       __func__, rc);
+
+	return rc;
+}
+
+static int __init msm_batt_init(void)
+{
+	int rc;
+
+	pr_debug("%s: enter\n", __func__);
+
+	rc = msm_batt_init_rpc();
+
+	if (rc < 0) {
+		pr_err("%s: FAIL: msm_batt_init_rpc.  rc=%d\n", __func__, rc);
+		msm_batt_cleanup();
+		return rc;
+	}
+
+	pr_info("%s: Charger/Battery = 0x%08x/0x%08x (RPC version)\n",
+		__func__, msm_batt_info.chg_api_version,
+		msm_batt_info.batt_api_version);
+
+	return 0;
+}
+
+static void __exit msm_batt_exit(void)
+{
+	platform_driver_unregister(&msm_batt_driver);
+}
+
+module_init(msm_batt_init);
+module_exit(msm_batt_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kiran Kandi, Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("Battery driver for Qualcomm MSM chipsets.");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:msm_battery");
diff --git a/drivers/power/msm_charger.c b/drivers/power/msm_charger.c
new file mode 100644
index 0000000..f40477a
--- /dev/null
+++ b/drivers/power/msm_charger.c
@@ -0,0 +1,1250 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/power_supply.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/msm-charger.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#include <asm/atomic.h>
+
+#include <mach/msm_hsusb.h>
+
+#define MSM_CHG_MAX_EVENTS		16
+#define CHARGING_TEOC_MS		9000000
+#define UPDATE_TIME_MS			60000
+#define RESUME_CHECK_PERIOD_MS		60000
+
+#define DEFAULT_BATT_MAX_V		4200
+#define DEFAULT_BATT_MIN_V		3200
+
+#define MSM_CHARGER_GAUGE_MISSING_VOLTS 3500
+#define MSM_CHARGER_GAUGE_MISSING_TEMP  35
+/**
+ * enum msm_battery_status
+ * @BATT_STATUS_ABSENT: battery not present
+ * @BATT_STATUS_ID_INVALID: battery present but the id is invalid
+ * @BATT_STATUS_DISCHARGING: battery is present and is discharging
+ * @BATT_STATUS_TRKL_CHARGING: battery is being trickle charged
+ * @BATT_STATUS_FAST_CHARGING: battery is being fast charged
+ * @BATT_STATUS_JUST_FINISHED_CHARGING: just finished charging,
+ *		battery is fully charged. Do not begin charging untill the
+ *		voltage falls below a threshold to avoid overcharging
+ * @BATT_STATUS_TEMPERATURE_OUT_OF_RANGE: battery present,
+					no charging, temp is hot/cold
+ */
+enum msm_battery_status {
+	BATT_STATUS_ABSENT,
+	BATT_STATUS_ID_INVALID,
+	BATT_STATUS_DISCHARGING,
+	BATT_STATUS_TRKL_CHARGING,
+	BATT_STATUS_FAST_CHARGING,
+	BATT_STATUS_JUST_FINISHED_CHARGING,
+	BATT_STATUS_TEMPERATURE_OUT_OF_RANGE,
+};
+
+struct msm_hardware_charger_priv {
+	struct list_head list;
+	struct msm_hardware_charger *hw_chg;
+	enum msm_hardware_charger_state hw_chg_state;
+	unsigned int max_source_current;
+	struct power_supply psy;
+};
+
+struct msm_charger_event {
+	enum msm_hardware_charger_event event;
+	struct msm_hardware_charger *hw_chg;
+};
+
+struct msm_charger_mux {
+	int inited;
+	struct list_head msm_hardware_chargers;
+	int count_chargers;
+	struct mutex msm_hardware_chargers_lock;
+
+	struct device *dev;
+
+	unsigned int max_voltage;
+	unsigned int min_voltage;
+
+	unsigned int safety_time;
+	struct delayed_work teoc_work;
+
+	unsigned int update_time;
+	int stop_update;
+	struct delayed_work update_heartbeat_work;
+
+	struct mutex status_lock;
+	enum msm_battery_status batt_status;
+	struct msm_hardware_charger_priv *current_chg_priv;
+	struct msm_hardware_charger_priv *current_mon_priv;
+
+	unsigned int (*get_batt_capacity_percent) (void);
+
+	struct msm_charger_event *queue;
+	int tail;
+	int head;
+	spinlock_t queue_lock;
+	int queue_count;
+	struct work_struct queue_work;
+	struct workqueue_struct *event_wq_thread;
+	struct wake_lock wl;
+};
+
+static struct msm_charger_mux msm_chg;
+
+static struct msm_battery_gauge *msm_batt_gauge;
+
+static int is_chg_capable_of_charging(struct msm_hardware_charger_priv *priv)
+{
+	if (priv->hw_chg_state == CHG_READY_STATE
+	    || priv->hw_chg_state == CHG_CHARGING_STATE)
+		return 1;
+
+	return 0;
+}
+
+static int is_batt_status_capable_of_charging(void)
+{
+	if (msm_chg.batt_status == BATT_STATUS_ABSENT
+	    || msm_chg.batt_status == BATT_STATUS_TEMPERATURE_OUT_OF_RANGE
+	    || msm_chg.batt_status == BATT_STATUS_ID_INVALID
+	    || msm_chg.batt_status == BATT_STATUS_JUST_FINISHED_CHARGING)
+		return 0;
+	return 1;
+}
+
+static int is_batt_status_charging(void)
+{
+	if (msm_chg.batt_status == BATT_STATUS_TRKL_CHARGING
+	    || msm_chg.batt_status == BATT_STATUS_FAST_CHARGING)
+		return 1;
+	return 0;
+}
+
+static int is_battery_present(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->is_battery_present)
+		return msm_batt_gauge->is_battery_present();
+	else {
+		pr_err("msm-charger: no batt gauge batt=absent\n");
+		return 0;
+	}
+}
+
+static int is_battery_temp_within_range(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->is_battery_temp_within_range)
+		return msm_batt_gauge->is_battery_temp_within_range();
+	else {
+		pr_err("msm-charger no batt gauge batt=out_of_temperatur\n");
+		return 0;
+	}
+}
+
+static int is_battery_id_valid(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->is_battery_id_valid)
+		return msm_batt_gauge->is_battery_id_valid();
+	else {
+		pr_err("msm-charger no batt gauge batt=id_invalid\n");
+		return 0;
+	}
+}
+
+static int get_prop_battery_mvolts(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->get_battery_mvolts)
+		return msm_batt_gauge->get_battery_mvolts();
+	else {
+		pr_err("msm-charger no batt gauge assuming 3.5V\n");
+		return MSM_CHARGER_GAUGE_MISSING_VOLTS;
+	}
+}
+
+static int get_battery_temperature(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->get_battery_temperature)
+		return msm_batt_gauge->get_battery_temperature();
+	else {
+		pr_err("msm-charger no batt gauge assuming 35 deg G\n");
+		return MSM_CHARGER_GAUGE_MISSING_TEMP;
+	}
+}
+
+static int get_prop_batt_capacity(void)
+{
+	if (msm_batt_gauge && msm_batt_gauge->get_batt_remaining_capacity)
+		return msm_batt_gauge->get_batt_remaining_capacity();
+
+	return msm_chg.get_batt_capacity_percent();
+}
+
+static int get_prop_batt_health(void)
+{
+	int status = 0;
+
+	if (msm_chg.batt_status == BATT_STATUS_TEMPERATURE_OUT_OF_RANGE)
+		status = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else
+		status = POWER_SUPPLY_HEALTH_GOOD;
+
+	return status;
+}
+
+static int get_prop_charge_type(void)
+{
+	int status = 0;
+
+	if (msm_chg.batt_status == BATT_STATUS_TRKL_CHARGING)
+		status = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	else if (msm_chg.batt_status == BATT_STATUS_FAST_CHARGING)
+		status = POWER_SUPPLY_CHARGE_TYPE_FAST;
+	else
+		status = POWER_SUPPLY_CHARGE_TYPE_NONE;
+
+	return status;
+}
+
+static int get_prop_batt_status(void)
+{
+	int status = 0;
+
+	if (msm_batt_gauge && msm_batt_gauge->get_battery_status) {
+		status = msm_batt_gauge->get_battery_status();
+		if (status == POWER_SUPPLY_STATUS_CHARGING ||
+			status == POWER_SUPPLY_STATUS_FULL ||
+			status == POWER_SUPPLY_STATUS_DISCHARGING)
+			return status;
+	}
+
+	if (is_batt_status_charging())
+		status = POWER_SUPPLY_STATUS_CHARGING;
+	else if (msm_chg.batt_status ==
+		 BATT_STATUS_JUST_FINISHED_CHARGING
+			 && msm_chg.current_chg_priv != NULL)
+		status = POWER_SUPPLY_STATUS_FULL;
+	else
+		status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+	return status;
+}
+
+ /* This function should only be called within handle_event or resume */
+static void update_batt_status(void)
+{
+	if (is_battery_present()) {
+		if (is_battery_id_valid()) {
+			if (msm_chg.batt_status == BATT_STATUS_ABSENT
+				|| msm_chg.batt_status
+					== BATT_STATUS_ID_INVALID) {
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+			}
+		} else
+			msm_chg.batt_status = BATT_STATUS_ID_INVALID;
+	 } else
+		msm_chg.batt_status = BATT_STATUS_ABSENT;
+}
+
+static enum power_supply_property msm_power_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static char *msm_power_supplied_to[] = {
+	"battery",
+};
+
+static int msm_power_get_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  union power_supply_propval *val)
+{
+	struct msm_hardware_charger_priv *priv;
+
+	priv = container_of(psy, struct msm_hardware_charger_priv, psy);
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = !(priv->hw_chg_state == CHG_ABSENT_STATE);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = (priv->hw_chg_state == CHG_READY_STATE)
+			|| (priv->hw_chg_state == CHG_CHARGING_STATE);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property msm_batt_power_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int msm_batt_power_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = get_prop_batt_status();
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = get_prop_charge_type();
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = get_prop_batt_health();
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = !(msm_chg.batt_status == BATT_STATUS_ABSENT);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = msm_chg.max_voltage * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		val->intval = msm_chg.min_voltage * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = get_prop_battery_mvolts();
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = get_prop_batt_capacity();
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct power_supply msm_psy_batt = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = msm_batt_power_props,
+	.num_properties = ARRAY_SIZE(msm_batt_power_props),
+	.get_property = msm_batt_power_get_property,
+};
+
+static int usb_chg_current;
+static struct msm_hardware_charger_priv *usb_hw_chg_priv;
+static void (*notify_vbus_state_func_ptr)(int);
+static int usb_notified_of_insertion;
+
+/* this is passed to the hsusb via platform_data msm_otg_pdata */
+int msm_charger_register_vbus_sn(void (*callback)(int))
+{
+	pr_debug(KERN_INFO "%s\n", __func__);
+	notify_vbus_state_func_ptr = callback;
+	return 0;
+}
+
+/* this is passed to the hsusb via platform_data msm_otg_pdata */
+void msm_charger_unregister_vbus_sn(void (*callback)(int))
+{
+	pr_debug(KERN_INFO "%s\n", __func__);
+	notify_vbus_state_func_ptr = NULL;
+}
+
+static void notify_usb_of_the_plugin_event(struct msm_hardware_charger_priv
+					   *hw_chg, int plugin)
+{
+	plugin = !!plugin;
+	if (plugin == 1 && usb_notified_of_insertion == 0) {
+		usb_notified_of_insertion = 1;
+		if (notify_vbus_state_func_ptr) {
+			dev_dbg(msm_chg.dev, "%s notifying plugin\n", __func__);
+			(*notify_vbus_state_func_ptr) (plugin);
+		} else
+			dev_dbg(msm_chg.dev, "%s unable to notify plugin\n",
+				__func__);
+		usb_hw_chg_priv = hw_chg;
+	}
+	if (plugin == 0 && usb_notified_of_insertion == 1) {
+		if (notify_vbus_state_func_ptr) {
+			dev_dbg(msm_chg.dev, "%s notifying unplugin\n",
+				__func__);
+			(*notify_vbus_state_func_ptr) (plugin);
+		} else
+			dev_dbg(msm_chg.dev, "%s unable to notify unplugin\n",
+				__func__);
+		usb_notified_of_insertion = 0;
+		usb_hw_chg_priv = NULL;
+	}
+}
+
+static unsigned int msm_chg_get_batt_capacity_percent(void)
+{
+	unsigned int current_voltage = get_prop_battery_mvolts();
+	unsigned int low_voltage = msm_chg.min_voltage;
+	unsigned int high_voltage = msm_chg.max_voltage;
+
+	if (current_voltage <= low_voltage)
+		return 0;
+	else if (current_voltage >= high_voltage)
+		return 100;
+	else
+		return (current_voltage - low_voltage) * 100
+		    / (high_voltage - low_voltage);
+}
+
+#ifdef DEBUG
+static inline void debug_print(const char *func,
+			       struct msm_hardware_charger_priv *hw_chg_priv)
+{
+	dev_dbg(msm_chg.dev,
+		"%s current=(%s)(s=%d)(r=%d) new=(%s)(s=%d)(r=%d) batt=%d En\n",
+		func,
+		msm_chg.current_chg_priv ? msm_chg.current_chg_priv->
+		hw_chg->name : "none",
+		msm_chg.current_chg_priv ? msm_chg.
+		current_chg_priv->hw_chg_state : -1,
+		msm_chg.current_chg_priv ? msm_chg.current_chg_priv->
+		hw_chg->rating : -1,
+		hw_chg_priv ? hw_chg_priv->hw_chg->name : "none",
+		hw_chg_priv ? hw_chg_priv->hw_chg_state : -1,
+		hw_chg_priv ? hw_chg_priv->hw_chg->rating : -1,
+		msm_chg.batt_status);
+}
+#else
+static inline void debug_print(const char *func,
+			       struct msm_hardware_charger_priv *hw_chg_priv)
+{
+}
+#endif
+
+static struct msm_hardware_charger_priv *find_best_charger(void)
+{
+	struct msm_hardware_charger_priv *hw_chg_priv;
+	struct msm_hardware_charger_priv *better;
+	int rating;
+
+	better = NULL;
+	rating = 0;
+
+	list_for_each_entry(hw_chg_priv, &msm_chg.msm_hardware_chargers, list) {
+		if (is_chg_capable_of_charging(hw_chg_priv)) {
+			if (hw_chg_priv->hw_chg->rating > rating) {
+				rating = hw_chg_priv->hw_chg->rating;
+				better = hw_chg_priv;
+			}
+		}
+	}
+
+	return better;
+}
+
+static int msm_charging_switched(struct msm_hardware_charger_priv *priv)
+{
+	int ret = 0;
+
+	if (priv->hw_chg->charging_switched)
+		ret = priv->hw_chg->charging_switched(priv->hw_chg);
+	return ret;
+}
+
+static int msm_stop_charging(struct msm_hardware_charger_priv *priv)
+{
+	int ret;
+
+	ret = priv->hw_chg->stop_charging(priv->hw_chg);
+	if (!ret)
+		wake_unlock(&msm_chg.wl);
+	return ret;
+}
+
+/* the best charger has been selected -start charging from current_chg_priv */
+static int msm_start_charging(void)
+{
+	int ret;
+	struct msm_hardware_charger_priv *priv;
+
+	priv = msm_chg.current_chg_priv;
+	wake_lock(&msm_chg.wl);
+	ret = priv->hw_chg->start_charging(priv->hw_chg, msm_chg.max_voltage,
+					 priv->max_source_current);
+	if (ret) {
+		wake_unlock(&msm_chg.wl);
+		dev_err(msm_chg.dev, "%s couldnt start chg error = %d\n",
+			priv->hw_chg->name, ret);
+	} else
+		priv->hw_chg_state = CHG_CHARGING_STATE;
+
+	return ret;
+}
+
+static void handle_charging_done(struct msm_hardware_charger_priv *priv)
+{
+	if (msm_chg.current_chg_priv == priv) {
+		if (msm_chg.current_chg_priv->hw_chg_state ==
+		    CHG_CHARGING_STATE)
+			if (msm_stop_charging(msm_chg.current_chg_priv)) {
+				dev_err(msm_chg.dev, "%s couldnt stop chg\n",
+					msm_chg.current_chg_priv->hw_chg->name);
+			}
+		msm_chg.current_chg_priv->hw_chg_state = CHG_READY_STATE;
+
+		msm_chg.batt_status = BATT_STATUS_JUST_FINISHED_CHARGING;
+		dev_info(msm_chg.dev, "%s: stopping safety timer work\n",
+				__func__);
+		cancel_delayed_work(&msm_chg.teoc_work);
+
+		if (msm_batt_gauge && msm_batt_gauge->monitor_for_recharging)
+			msm_batt_gauge->monitor_for_recharging();
+		else
+			dev_err(msm_chg.dev,
+			      "%s: no batt gauge recharge monitor\n", __func__);
+	}
+}
+
+static void teoc(struct work_struct *work)
+{
+	/* we have been charging too long - stop charging */
+	dev_info(msm_chg.dev, "%s: safety timer work expired\n", __func__);
+
+	mutex_lock(&msm_chg.status_lock);
+	if (msm_chg.current_chg_priv != NULL
+	    && msm_chg.current_chg_priv->hw_chg_state == CHG_CHARGING_STATE) {
+		handle_charging_done(msm_chg.current_chg_priv);
+	}
+	mutex_unlock(&msm_chg.status_lock);
+}
+
+static void handle_battery_inserted(void)
+{
+	/* if a charger is already present start charging */
+	if (msm_chg.current_chg_priv != NULL &&
+	    is_batt_status_capable_of_charging() &&
+	    !is_batt_status_charging()) {
+		if (msm_start_charging()) {
+			dev_err(msm_chg.dev, "%s couldnt start chg\n",
+				msm_chg.current_chg_priv->hw_chg->name);
+			return;
+		}
+		msm_chg.batt_status = BATT_STATUS_TRKL_CHARGING;
+
+		dev_info(msm_chg.dev, "%s: starting safety timer work\n",
+				__func__);
+		queue_delayed_work(msm_chg.event_wq_thread,
+					&msm_chg.teoc_work,
+				      round_jiffies_relative(msecs_to_jiffies
+							     (msm_chg.
+							      safety_time)));
+	}
+}
+
+static void handle_battery_removed(void)
+{
+	/* if a charger is charging the battery stop it */
+	if (msm_chg.current_chg_priv != NULL
+	    && msm_chg.current_chg_priv->hw_chg_state == CHG_CHARGING_STATE) {
+		if (msm_stop_charging(msm_chg.current_chg_priv)) {
+			dev_err(msm_chg.dev, "%s couldnt stop chg\n",
+				msm_chg.current_chg_priv->hw_chg->name);
+		}
+		msm_chg.current_chg_priv->hw_chg_state = CHG_READY_STATE;
+
+		dev_info(msm_chg.dev, "%s: stopping safety timer work\n",
+				__func__);
+		cancel_delayed_work(&msm_chg.teoc_work);
+	}
+}
+
+static void update_heartbeat(struct work_struct *work)
+{
+	int temperature;
+
+	if (msm_chg.batt_status == BATT_STATUS_ABSENT
+		|| msm_chg.batt_status == BATT_STATUS_ID_INVALID) {
+		if (is_battery_present())
+			if (is_battery_id_valid()) {
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+				handle_battery_inserted();
+			}
+	} else {
+		if (!is_battery_present()) {
+			msm_chg.batt_status = BATT_STATUS_ABSENT;
+			handle_battery_removed();
+		}
+		/*
+		 * check battery id because a good battery could be removed
+		 * and replaced with a invalid battery.
+		 */
+		if (!is_battery_id_valid()) {
+			msm_chg.batt_status = BATT_STATUS_ID_INVALID;
+			handle_battery_removed();
+		}
+	}
+	pr_debug("msm-charger %s batt_status= %d\n",
+				__func__, msm_chg.batt_status);
+
+	if (msm_chg.current_chg_priv
+		&& msm_chg.current_chg_priv->hw_chg_state
+			== CHG_CHARGING_STATE) {
+		temperature = get_battery_temperature();
+		/* TODO implement JEITA SPEC*/
+	}
+
+	/* notify that the voltage has changed
+	 * the read of the capacity will trigger a
+	 * voltage read*/
+	power_supply_changed(&msm_psy_batt);
+
+	if (msm_chg.stop_update) {
+		msm_chg.stop_update = 0;
+		return;
+	}
+	queue_delayed_work(msm_chg.event_wq_thread,
+				&msm_chg.update_heartbeat_work,
+			      round_jiffies_relative(msecs_to_jiffies
+						     (msm_chg.update_time)));
+}
+
+/* set the charger state to READY before calling this */
+static void handle_charger_ready(struct msm_hardware_charger_priv *hw_chg_priv)
+{
+	debug_print(__func__, hw_chg_priv);
+
+	if (msm_chg.current_chg_priv != NULL
+	    && hw_chg_priv->hw_chg->rating >
+	    msm_chg.current_chg_priv->hw_chg->rating) {
+		if (msm_chg.current_chg_priv->hw_chg_state ==
+		    CHG_CHARGING_STATE) {
+			if (msm_stop_charging(msm_chg.current_chg_priv)) {
+				dev_err(msm_chg.dev, "%s couldnt stop chg\n",
+					msm_chg.current_chg_priv->hw_chg->name);
+				return;
+			}
+			if (msm_charging_switched(msm_chg.current_chg_priv)) {
+				dev_err(msm_chg.dev, "%s couldnt stop chg\n",
+					msm_chg.current_chg_priv->hw_chg->name);
+				return;
+			}
+		}
+		msm_chg.current_chg_priv->hw_chg_state = CHG_READY_STATE;
+		msm_chg.current_chg_priv = NULL;
+	}
+
+	if (msm_chg.current_chg_priv == NULL) {
+		msm_chg.current_chg_priv = hw_chg_priv;
+		dev_info(msm_chg.dev,
+			 "%s: best charger = %s\n", __func__,
+			 msm_chg.current_chg_priv->hw_chg->name);
+
+		if (!is_batt_status_capable_of_charging())
+			return;
+
+		/* start charging from the new charger */
+		if (!msm_start_charging()) {
+			/* if we simply switched chg continue with teoc timer
+			 * else we update the batt state and set the teoc
+			 * timer */
+			if (!is_batt_status_charging()) {
+				dev_info(msm_chg.dev,
+				       "%s: starting safety timer\n", __func__);
+				queue_delayed_work(msm_chg.event_wq_thread,
+							&msm_chg.teoc_work,
+						      round_jiffies_relative
+						      (msecs_to_jiffies
+						       (msm_chg.safety_time)));
+				msm_chg.batt_status = BATT_STATUS_TRKL_CHARGING;
+			}
+		} else {
+			/* we couldnt start charging from the new readied
+			 * charger */
+			if (is_batt_status_charging())
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+		}
+	}
+}
+
+static void handle_charger_removed(struct msm_hardware_charger_priv
+				   *hw_chg_removed, int new_state)
+{
+	struct msm_hardware_charger_priv *hw_chg_priv;
+
+	debug_print(__func__, hw_chg_removed);
+
+	if (msm_chg.current_chg_priv == hw_chg_removed) {
+		if (msm_chg.current_chg_priv->hw_chg_state
+						== CHG_CHARGING_STATE) {
+			if (msm_stop_charging(hw_chg_removed)) {
+				dev_err(msm_chg.dev, "%s couldnt stop chg\n",
+					msm_chg.current_chg_priv->hw_chg->name);
+			}
+		}
+		msm_chg.current_chg_priv = NULL;
+	}
+
+	hw_chg_removed->hw_chg_state = new_state;
+
+	if (msm_chg.current_chg_priv == NULL) {
+		hw_chg_priv = find_best_charger();
+		if (hw_chg_priv == NULL) {
+			dev_info(msm_chg.dev, "%s: no chargers\n", __func__);
+			/* if the battery was Just finished charging
+			 * we keep that state as is so that we dont rush
+			 * in to charging the battery when a charger is
+			 * plugged in shortly. */
+			if (is_batt_status_charging())
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+		} else {
+			msm_chg.current_chg_priv = hw_chg_priv;
+			dev_info(msm_chg.dev,
+				 "%s: best charger = %s\n", __func__,
+				 msm_chg.current_chg_priv->hw_chg->name);
+
+			if (!is_batt_status_capable_of_charging())
+				return;
+
+			if (msm_start_charging()) {
+				/* we couldnt start charging for some reason */
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+			}
+		}
+	}
+
+	/* if we arent charging stop the safety timer */
+	if (!is_batt_status_charging()) {
+		dev_info(msm_chg.dev, "%s: stopping safety timer work\n",
+				__func__);
+		cancel_delayed_work(&msm_chg.teoc_work);
+	}
+}
+
+static void handle_event(struct msm_hardware_charger *hw_chg, int event)
+{
+	struct msm_hardware_charger_priv *priv = NULL;
+
+	/*
+	 * if hw_chg is NULL then this event comes from non-charger
+	 * parties like battery gauge
+	 */
+	if (hw_chg)
+		priv = hw_chg->charger_private;
+
+	mutex_lock(&msm_chg.status_lock);
+
+	switch (event) {
+	case CHG_INSERTED_EVENT:
+		if (priv->hw_chg_state != CHG_ABSENT_STATE) {
+			dev_info(msm_chg.dev,
+				 "%s insertion detected when cbl present",
+				 hw_chg->name);
+			break;
+		}
+		update_batt_status();
+		if (hw_chg->type == CHG_TYPE_USB) {
+			priv->hw_chg_state = CHG_PRESENT_STATE;
+			notify_usb_of_the_plugin_event(priv, 1);
+			if (usb_chg_current) {
+				priv->max_source_current = usb_chg_current;
+				usb_chg_current = 0;
+				/* usb has already indicated us to charge */
+				priv->hw_chg_state = CHG_READY_STATE;
+				handle_charger_ready(priv);
+			}
+		} else {
+			priv->hw_chg_state = CHG_READY_STATE;
+			handle_charger_ready(priv);
+		}
+		break;
+	case CHG_ENUMERATED_EVENT:	/* only in USB types */
+		if (priv->hw_chg_state == CHG_ABSENT_STATE) {
+			dev_info(msm_chg.dev, "%s enum withuot presence\n",
+				 hw_chg->name);
+			break;
+		}
+		update_batt_status();
+		dev_dbg(msm_chg.dev, "%s enum with %dmA to draw\n",
+			 hw_chg->name, priv->max_source_current);
+		if (priv->max_source_current == 0) {
+			/* usb subsystem doesnt want us to draw
+			 * charging current */
+			/* act as if the charge is removed */
+			if (priv->hw_chg_state != CHG_PRESENT_STATE)
+				handle_charger_removed(priv, CHG_PRESENT_STATE);
+		} else {
+			if (priv->hw_chg_state != CHG_READY_STATE) {
+				priv->hw_chg_state = CHG_READY_STATE;
+				handle_charger_ready(priv);
+			}
+		}
+		break;
+	case CHG_REMOVED_EVENT:
+		if (priv->hw_chg_state == CHG_ABSENT_STATE) {
+			dev_info(msm_chg.dev, "%s cable already removed\n",
+				 hw_chg->name);
+			break;
+		}
+		update_batt_status();
+		if (hw_chg->type == CHG_TYPE_USB) {
+			usb_chg_current = 0;
+			notify_usb_of_the_plugin_event(priv, 0);
+		}
+		handle_charger_removed(priv, CHG_ABSENT_STATE);
+		break;
+	case CHG_DONE_EVENT:
+		if (priv->hw_chg_state == CHG_CHARGING_STATE)
+			handle_charging_done(priv);
+		break;
+	case CHG_BATT_BEGIN_FAST_CHARGING:
+		/* only update if we are TRKL charging */
+		if (msm_chg.batt_status == BATT_STATUS_TRKL_CHARGING)
+			msm_chg.batt_status = BATT_STATUS_FAST_CHARGING;
+		break;
+	case CHG_BATT_NEEDS_RECHARGING:
+		msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+		handle_battery_inserted();
+		priv = msm_chg.current_chg_priv;
+		break;
+	case CHG_BATT_TEMP_OUTOFRANGE:
+		/* the batt_temp out of range can trigger
+		 * when the battery is absent */
+		if (!is_battery_present()
+		    && msm_chg.batt_status != BATT_STATUS_ABSENT) {
+			msm_chg.batt_status = BATT_STATUS_ABSENT;
+			handle_battery_removed();
+			break;
+		}
+		if (msm_chg.batt_status == BATT_STATUS_TEMPERATURE_OUT_OF_RANGE)
+			break;
+		msm_chg.batt_status = BATT_STATUS_TEMPERATURE_OUT_OF_RANGE;
+		handle_battery_removed();
+		break;
+	case CHG_BATT_TEMP_INRANGE:
+		if (msm_chg.batt_status != BATT_STATUS_TEMPERATURE_OUT_OF_RANGE)
+			break;
+		msm_chg.batt_status = BATT_STATUS_ID_INVALID;
+		/* check id */
+		if (!is_battery_id_valid())
+			break;
+		/* assume that we are discharging from the battery
+		 * and act as if the battery was inserted
+		 * if a charger is present charging will be resumed */
+		msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+		handle_battery_inserted();
+		break;
+	case CHG_BATT_INSERTED:
+		if (msm_chg.batt_status != BATT_STATUS_ABSENT)
+			break;
+		/* debounce */
+		if (!is_battery_present())
+			break;
+		msm_chg.batt_status = BATT_STATUS_ID_INVALID;
+		if (!is_battery_id_valid())
+			break;
+		/* assume that we are discharging from the battery */
+		msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+		/* check if a charger is present */
+		handle_battery_inserted();
+		break;
+	case CHG_BATT_REMOVED:
+		if (msm_chg.batt_status == BATT_STATUS_ABSENT)
+			break;
+		/* debounce */
+		if (is_battery_present())
+			break;
+		msm_chg.batt_status = BATT_STATUS_ABSENT;
+		handle_battery_removed();
+		break;
+	case CHG_BATT_STATUS_CHANGE:
+		/* TODO  battery SOC like battery-alarm/charging-full features
+		can be added here for future improvement */
+		break;
+	}
+	dev_dbg(msm_chg.dev, "%s %d done batt_status=%d\n", __func__,
+		event, msm_chg.batt_status);
+
+	/* update userspace */
+	if (msm_batt_gauge)
+		power_supply_changed(&msm_psy_batt);
+	if (priv)
+		power_supply_changed(&priv->psy);
+
+	mutex_unlock(&msm_chg.status_lock);
+}
+
+static int msm_chg_dequeue_event(struct msm_charger_event **event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_chg.queue_lock, flags);
+	if (msm_chg.queue_count == 0) {
+		spin_unlock_irqrestore(&msm_chg.queue_lock, flags);
+		return -EINVAL;
+	}
+	*event = &msm_chg.queue[msm_chg.head];
+	msm_chg.head = (msm_chg.head + 1) % MSM_CHG_MAX_EVENTS;
+	pr_debug("%s dequeueing %d\n", __func__, (*event)->event);
+	msm_chg.queue_count--;
+	spin_unlock_irqrestore(&msm_chg.queue_lock, flags);
+	return 0;
+}
+
+static int msm_chg_enqueue_event(struct msm_hardware_charger *hw_chg,
+			enum msm_hardware_charger_event event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_chg.queue_lock, flags);
+	if (msm_chg.queue_count == MSM_CHG_MAX_EVENTS) {
+		spin_unlock_irqrestore(&msm_chg.queue_lock, flags);
+		pr_err("%s: queue full cannot enqueue %d\n",
+				__func__, event);
+		return -EAGAIN;
+	}
+	pr_debug("%s queueing %d\n", __func__, event);
+	msm_chg.queue[msm_chg.tail].event = event;
+	msm_chg.queue[msm_chg.tail].hw_chg = hw_chg;
+	msm_chg.tail = (msm_chg.tail + 1)%MSM_CHG_MAX_EVENTS;
+	msm_chg.queue_count++;
+	spin_unlock_irqrestore(&msm_chg.queue_lock, flags);
+	return 0;
+}
+
+static void process_events(struct work_struct *work)
+{
+	struct msm_charger_event *event;
+	int rc;
+
+	do {
+		rc = msm_chg_dequeue_event(&event);
+		if (!rc)
+			handle_event(event->hw_chg, event->event);
+	} while (!rc);
+}
+
+/* USB calls these to tell us how much charging current we should draw */
+void msm_charger_vbus_draw(unsigned int mA)
+{
+	if (usb_hw_chg_priv) {
+		usb_hw_chg_priv->max_source_current = mA;
+		msm_charger_notify_event(usb_hw_chg_priv->hw_chg,
+						CHG_ENUMERATED_EVENT);
+	} else
+		/* remember the current, to be used when charger is ready */
+		usb_chg_current = mA;
+}
+
+static int __init determine_initial_batt_status(void)
+{
+	int rc;
+
+	if (is_battery_present())
+		if (is_battery_id_valid())
+			if (is_battery_temp_within_range())
+				msm_chg.batt_status = BATT_STATUS_DISCHARGING;
+			else
+				msm_chg.batt_status
+				    = BATT_STATUS_TEMPERATURE_OUT_OF_RANGE;
+		else
+			msm_chg.batt_status = BATT_STATUS_ID_INVALID;
+	else
+		msm_chg.batt_status = BATT_STATUS_ABSENT;
+
+	if (is_batt_status_capable_of_charging())
+		handle_battery_inserted();
+
+	rc = power_supply_register(msm_chg.dev, &msm_psy_batt);
+	if (rc < 0) {
+		dev_err(msm_chg.dev, "%s: power_supply_register failed"
+			" rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	/* start updaing the battery powersupply every msm_chg.update_time
+	 * milliseconds */
+	queue_delayed_work(msm_chg.event_wq_thread,
+				&msm_chg.update_heartbeat_work,
+			      round_jiffies_relative(msecs_to_jiffies
+						     (msm_chg.update_time)));
+
+	pr_debug("%s:OK batt_status=%d\n", __func__, msm_chg.batt_status);
+	return 0;
+}
+
+static int __devinit msm_charger_probe(struct platform_device *pdev)
+{
+	msm_chg.dev = &pdev->dev;
+	if (pdev->dev.platform_data) {
+		unsigned int milli_secs;
+
+		struct msm_charger_platform_data *pdata
+		    =
+		    (struct msm_charger_platform_data *)pdev->dev.platform_data;
+
+		milli_secs = pdata->safety_time * 60 * MSEC_PER_SEC;
+		if (milli_secs > jiffies_to_msecs(MAX_JIFFY_OFFSET)) {
+			dev_warn(&pdev->dev, "%s: safety time too large"
+				 "%dms\n", __func__, milli_secs);
+			milli_secs = jiffies_to_msecs(MAX_JIFFY_OFFSET);
+		}
+		msm_chg.safety_time = milli_secs;
+
+		milli_secs = pdata->update_time * 60 * MSEC_PER_SEC;
+		if (milli_secs > jiffies_to_msecs(MAX_JIFFY_OFFSET)) {
+			dev_warn(&pdev->dev, "%s: safety time too large"
+				 "%dms\n", __func__, milli_secs);
+			milli_secs = jiffies_to_msecs(MAX_JIFFY_OFFSET);
+		}
+		msm_chg.update_time = milli_secs;
+
+		msm_chg.max_voltage = pdata->max_voltage;
+		msm_chg.min_voltage = pdata->min_voltage;
+		msm_chg.get_batt_capacity_percent =
+		    pdata->get_batt_capacity_percent;
+	}
+	if (msm_chg.safety_time == 0)
+		msm_chg.safety_time = CHARGING_TEOC_MS;
+	if (msm_chg.update_time == 0)
+		msm_chg.update_time = UPDATE_TIME_MS;
+	if (msm_chg.max_voltage == 0)
+		msm_chg.max_voltage = DEFAULT_BATT_MAX_V;
+	if (msm_chg.min_voltage == 0)
+		msm_chg.min_voltage = DEFAULT_BATT_MIN_V;
+	if (msm_chg.get_batt_capacity_percent == NULL)
+		msm_chg.get_batt_capacity_percent =
+		    msm_chg_get_batt_capacity_percent;
+
+	mutex_init(&msm_chg.status_lock);
+	INIT_DELAYED_WORK(&msm_chg.teoc_work, teoc);
+	INIT_DELAYED_WORK(&msm_chg.update_heartbeat_work, update_heartbeat);
+
+	wake_lock_init(&msm_chg.wl, WAKE_LOCK_SUSPEND, "msm_charger");
+	return 0;
+}
+
+static int __devexit msm_charger_remove(struct platform_device *pdev)
+{
+	mutex_destroy(&msm_chg.status_lock);
+	power_supply_unregister(&msm_psy_batt);
+	return 0;
+}
+
+int msm_charger_notify_event(struct msm_hardware_charger *hw_chg,
+			     enum msm_hardware_charger_event event)
+{
+	msm_chg_enqueue_event(hw_chg, event);
+	queue_work(msm_chg.event_wq_thread, &msm_chg.queue_work);
+	return 0;
+}
+EXPORT_SYMBOL(msm_charger_notify_event);
+
+int msm_charger_register(struct msm_hardware_charger *hw_chg)
+{
+	struct msm_hardware_charger_priv *priv;
+	int rc = 0;
+
+	if (!msm_chg.inited) {
+		pr_err("%s: msm_chg is NULL,Too early to register\n", __func__);
+		return -EAGAIN;
+	}
+
+	if (hw_chg->start_charging == NULL
+		|| hw_chg->stop_charging == NULL
+		|| hw_chg->name == NULL
+		|| hw_chg->rating == 0) {
+		pr_err("%s: invalid hw_chg\n", __func__);
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+	if (priv == NULL) {
+		dev_err(msm_chg.dev, "%s kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	priv->psy.name = hw_chg->name;
+	if (hw_chg->type == CHG_TYPE_USB)
+		priv->psy.type = POWER_SUPPLY_TYPE_USB;
+	else
+		priv->psy.type = POWER_SUPPLY_TYPE_MAINS;
+
+	priv->psy.supplied_to = msm_power_supplied_to;
+	priv->psy.num_supplicants = ARRAY_SIZE(msm_power_supplied_to);
+	priv->psy.properties = msm_power_props;
+	priv->psy.num_properties = ARRAY_SIZE(msm_power_props);
+	priv->psy.get_property = msm_power_get_property;
+
+	rc = power_supply_register(NULL, &priv->psy);
+	if (rc) {
+		dev_err(msm_chg.dev, "%s power_supply_register failed\n",
+			__func__);
+		goto out;
+	}
+
+	priv->hw_chg = hw_chg;
+	priv->hw_chg_state = CHG_ABSENT_STATE;
+	INIT_LIST_HEAD(&priv->list);
+	mutex_lock(&msm_chg.msm_hardware_chargers_lock);
+	list_add_tail(&priv->list, &msm_chg.msm_hardware_chargers);
+	mutex_unlock(&msm_chg.msm_hardware_chargers_lock);
+	hw_chg->charger_private = (void *)priv;
+	return 0;
+
+out:
+	wake_lock_destroy(&msm_chg.wl);
+	kfree(priv);
+	return rc;
+}
+EXPORT_SYMBOL(msm_charger_register);
+
+void msm_battery_gauge_register(struct msm_battery_gauge *batt_gauge)
+{
+	if (msm_batt_gauge) {
+		msm_batt_gauge = batt_gauge;
+		pr_err("msm-charger %s multiple battery gauge called\n",
+								__func__);
+	} else {
+		msm_batt_gauge = batt_gauge;
+		determine_initial_batt_status();
+	}
+}
+EXPORT_SYMBOL(msm_battery_gauge_register);
+
+void msm_battery_gauge_unregister(struct msm_battery_gauge *batt_gauge)
+{
+	msm_batt_gauge = NULL;
+}
+EXPORT_SYMBOL(msm_battery_gauge_unregister);
+
+int msm_charger_unregister(struct msm_hardware_charger *hw_chg)
+{
+	struct msm_hardware_charger_priv *priv;
+
+	priv = (struct msm_hardware_charger_priv *)(hw_chg->charger_private);
+	mutex_lock(&msm_chg.msm_hardware_chargers_lock);
+	list_del(&priv->list);
+	mutex_unlock(&msm_chg.msm_hardware_chargers_lock);
+	wake_lock_destroy(&msm_chg.wl);
+	power_supply_unregister(&priv->psy);
+	kfree(priv);
+	return 0;
+}
+EXPORT_SYMBOL(msm_charger_unregister);
+
+static int msm_charger_suspend(struct device *dev)
+{
+	dev_dbg(msm_chg.dev, "%s suspended\n", __func__);
+	msm_chg.stop_update = 1;
+	cancel_delayed_work(&msm_chg.update_heartbeat_work);
+	mutex_lock(&msm_chg.status_lock);
+	handle_battery_removed();
+	mutex_unlock(&msm_chg.status_lock);
+	return 0;
+}
+
+static int msm_charger_resume(struct device *dev)
+{
+	dev_dbg(msm_chg.dev, "%s resumed\n", __func__);
+	msm_chg.stop_update = 0;
+	/* start updaing the battery powersupply every msm_chg.update_time
+	 * milliseconds */
+	queue_delayed_work(msm_chg.event_wq_thread,
+				&msm_chg.update_heartbeat_work,
+			      round_jiffies_relative(msecs_to_jiffies
+						     (msm_chg.update_time)));
+	mutex_lock(&msm_chg.status_lock);
+	handle_battery_inserted();
+	mutex_unlock(&msm_chg.status_lock);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(msm_charger_pm_ops,
+		msm_charger_suspend, msm_charger_resume);
+
+static struct platform_driver msm_charger_driver = {
+	.probe = msm_charger_probe,
+	.remove = __devexit_p(msm_charger_remove),
+	.driver = {
+		   .name = "msm-charger",
+		   .owner = THIS_MODULE,
+		   .pm = &msm_charger_pm_ops,
+	},
+};
+
+static int __init msm_charger_init(void)
+{
+	int rc;
+
+	INIT_LIST_HEAD(&msm_chg.msm_hardware_chargers);
+	msm_chg.count_chargers = 0;
+	mutex_init(&msm_chg.msm_hardware_chargers_lock);
+
+	msm_chg.queue = kzalloc(sizeof(struct msm_charger_event)
+				* MSM_CHG_MAX_EVENTS,
+				GFP_KERNEL);
+	if (!msm_chg.queue) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	msm_chg.tail = 0;
+	msm_chg.head = 0;
+	spin_lock_init(&msm_chg.queue_lock);
+	msm_chg.queue_count = 0;
+	INIT_WORK(&msm_chg.queue_work, process_events);
+	msm_chg.event_wq_thread = create_workqueue("msm_charger_eventd");
+	if (!msm_chg.event_wq_thread) {
+		rc = -ENOMEM;
+		goto free_queue;
+	}
+	rc = platform_driver_register(&msm_charger_driver);
+	if (rc < 0) {
+		pr_err("%s: FAIL: platform_driver_register. rc = %d\n",
+		       __func__, rc);
+		goto destroy_wq_thread;
+	}
+	msm_chg.inited = 1;
+	return 0;
+
+destroy_wq_thread:
+	destroy_workqueue(msm_chg.event_wq_thread);
+free_queue:
+	kfree(msm_chg.queue);
+out:
+	return rc;
+}
+
+static void __exit msm_charger_exit(void)
+{
+	flush_workqueue(msm_chg.event_wq_thread);
+	destroy_workqueue(msm_chg.event_wq_thread);
+	kfree(msm_chg.queue);
+	platform_driver_unregister(&msm_charger_driver);
+}
+
+module_init(msm_charger_init);
+module_exit(msm_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Abhijeet Dharmapurikar <adharmap@codeaurora.org>");
+MODULE_DESCRIPTION("Battery driver for Qualcomm MSM chipsets.");
+MODULE_VERSION("1.0");
diff --git a/drivers/power/pm8058_usb_fix.c b/drivers/power/pm8058_usb_fix.c
new file mode 100644
index 0000000..80b1f87
--- /dev/null
+++ b/drivers/power/pm8058_usb_fix.c
@@ -0,0 +1,357 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <mach/msm_xo.h>
+#include <mach/msm_hsusb.h>
+
+/* Config Regs  and their bits*/
+#define PM8058_CHG_TEST			0x75
+#define IGNORE_LL                       2
+
+#define PM8058_CHG_TEST_2		0xEA
+#define PM8058_CHG_TEST_3		0xEB
+#define PM8058_OVP_TEST_REG		0xF6
+#define FORCE_OVP_OFF			3
+
+#define PM8058_CHG_CNTRL		0x1E
+#define CHG_TRICKLE_EN			7
+#define CHG_USB_SUSPEND			6
+#define CHG_IMON_CAL			5
+#define CHG_IMON_GAIN			4
+#define CHG_VBUS_FROM_BOOST_OVRD	2
+#define CHG_CHARGE_DIS			1
+#define CHG_VCP_EN			0
+
+#define PM8058_CHG_CNTRL_2		0xD8
+#define ATC_DIS				7	/* coincell backed */
+#define CHARGE_AUTO_DIS			6
+#define DUMB_CHG_OVRD			5	/* coincell backed */
+#define ENUM_DONE			4
+#define CHG_TEMP_MODE			3
+#define CHG_BATT_TEMP_DIS		1	/* coincell backed */
+#define CHG_FAILED_CLEAR		0
+
+#define PM8058_CHG_VMAX_SEL		0x21
+#define PM8058_CHG_VBAT_DET		0xD9
+#define PM8058_CHG_IMAX			0x1F
+#define PM8058_CHG_TRICKLE		0xDB
+#define PM8058_CHG_ITERM		0xDC
+#define PM8058_CHG_TTRKL_MAX		0xE1
+#define PM8058_CHG_TCHG_MAX		0xE4
+#define PM8058_CHG_TEMP_THRESH		0xE2
+#define PM8058_CHG_TEMP_REG		0xE3
+#define PM8058_CHG_PULSE		0x22
+
+/* IRQ STATUS and CLEAR */
+#define PM8058_CHG_STATUS_CLEAR_IRQ_1	0x31
+#define PM8058_CHG_STATUS_CLEAR_IRQ_3	0x33
+#define PM8058_CHG_STATUS_CLEAR_IRQ_10	0xB3
+#define PM8058_CHG_STATUS_CLEAR_IRQ_11	0xB4
+
+/* IRQ MASKS */
+#define PM8058_CHG_MASK_IRQ_1		0x38
+
+#define PM8058_CHG_MASK_IRQ_3		0x3A
+#define PM8058_CHG_MASK_IRQ_10		0xBA
+#define PM8058_CHG_MASK_IRQ_11		0xBB
+
+/* IRQ Real time status regs */
+#define PM8058_CHG_STATUS_RT_1		0x3F
+#define STATUS_RTCHGVAL			7
+#define STATUS_RTCHGINVAL		6
+#define STATUS_RTBATT_REPLACE		5
+#define STATUS_RTVBATDET_LOW		4
+#define STATUS_RTCHGILIM		3
+#define STATUS_RTPCTDONE		1
+#define STATUS_RTVCP			0
+#define PM8058_CHG_STATUS_RT_3		0x41
+#define PM8058_CHG_STATUS_RT_10		0xC1
+#define PM8058_CHG_STATUS_RT_11		0xC2
+
+/* VTRIM */
+#define PM8058_CHG_VTRIM		0x1D
+#define PM8058_CHG_VBATDET_TRIM		0x1E
+#define PM8058_CHG_ITRIM		0x1F
+#define PM8058_CHG_TTRIM		0x20
+
+#define AUTO_CHARGING_VMAXSEL				4200
+#define AUTO_CHARGING_FAST_TIME_MAX_MINUTES		512
+#define AUTO_CHARGING_TRICKLE_TIME_MINUTES		30
+#define AUTO_CHARGING_VEOC_ITERM			100
+#define AUTO_CHARGING_IEOC_ITERM			160
+
+#define AUTO_CHARGING_VBATDET				4150
+#define AUTO_CHARGING_VEOC_VBATDET			4100
+#define AUTO_CHARGING_VEOC_TCHG				16
+#define AUTO_CHARGING_VEOC_TCHG_FINAL_CYCLE		32
+#define AUTO_CHARGING_VEOC_BEGIN_TIME_MS		5400000
+
+#define AUTO_CHARGING_VEOC_VBAT_LOW_CHECK_TIME_MS	60000
+#define AUTO_CHARGING_RESUME_CHARGE_DETECTION_COUNTER	5
+
+#define PM8058_CHG_I_STEP_MA 50
+#define PM8058_CHG_I_MIN_MA 50
+#define PM8058_CHG_T_TCHG_SHIFT 2
+#define PM8058_CHG_I_TERM_STEP_MA 10
+#define PM8058_CHG_V_STEP_MV 25
+#define PM8058_CHG_V_MIN_MV  2400
+/*
+ * enum pmic_chg_interrupts: pmic interrupts
+ * @CHGVAL_IRQ: charger V between 3.3 and 7.9
+ * @CHGINVAL_IRQ: charger V outside 3.3 and 7.9
+ * @VBATDET_LOW_IRQ: VBAT < VBATDET
+ * @VCP_IRQ: VDD went below VBAT: BAT_FET is turned on
+ * @CHGILIM_IRQ: mA consumed>IMAXSEL: chgloop draws less mA
+ * @ATC_DONE_IRQ: Auto Trickle done
+ * @ATCFAIL_IRQ: Auto Trickle fail
+ * @AUTO_CHGDONE_IRQ: Auto chg done
+ * @AUTO_CHGFAIL_IRQ: time exceeded w/o reaching term current
+ * @CHGSTATE_IRQ: something happend causing a state change
+ * @FASTCHG_IRQ: trkl charging completed: moving to fastchg
+ * @CHG_END_IRQ: mA has dropped to termination current
+ * @BATTTEMP_IRQ: batt temp is out of range
+ * @CHGHOT_IRQ: the pass device is too hot
+ * @CHGTLIMIT_IRQ: unused
+ * @CHG_GONE_IRQ: charger was removed
+ * @VCPMAJOR_IRQ: vcp major
+ * @VBATDET_IRQ: VBAT >= VBATDET
+ * @BATFET_IRQ: BATFET closed
+ * @BATT_REPLACE_IRQ:
+ * @BATTCONNECT_IRQ:
+ */
+enum pmic_chg_interrupts {
+	CHGVAL_IRQ,
+	CHGINVAL_IRQ,
+	VBATDET_LOW_IRQ,
+	VCP_IRQ,
+	CHGILIM_IRQ,
+	ATC_DONE_IRQ,
+	ATCFAIL_IRQ,
+	AUTO_CHGDONE_IRQ,
+	AUTO_CHGFAIL_IRQ,
+	CHGSTATE_IRQ,
+	FASTCHG_IRQ,
+	CHG_END_IRQ,
+	BATTTEMP_IRQ,
+	CHGHOT_IRQ,
+	CHGTLIMIT_IRQ,
+	CHG_GONE_IRQ,
+	VCPMAJOR_IRQ,
+	VBATDET_IRQ,
+	BATFET_IRQ,
+	BATT_REPLACE_IRQ,
+	BATTCONNECT_IRQ,
+	PMIC_CHG_MAX_INTS
+};
+
+struct pm8058_charger {
+	struct pmic_charger_pdata *pdata;
+	struct pm8058_chip *pm_chip;
+	struct device *dev;
+
+	int pmic_chg_irq[PMIC_CHG_MAX_INTS];
+	DECLARE_BITMAP(enabled_irqs, PMIC_CHG_MAX_INTS);
+
+	struct delayed_work check_vbat_low_work;
+	struct delayed_work veoc_begin_work;
+	int waiting_for_topoff;
+	int waiting_for_veoc;
+	int current_charger_current;
+
+	struct msm_xo_voter *voter;
+	struct dentry *dent;
+};
+
+static struct pm8058_charger pm8058_chg;
+
+static int pm_chg_get_rt_status(int irq)
+{
+	int count = 3;
+	int ret;
+
+	while ((ret =
+		pm8058_irq_get_rt_status(pm8058_chg.pm_chip, irq)) == -EAGAIN
+	       && count--) {
+		dev_info(pm8058_chg.dev, "%s trycount=%d\n", __func__, count);
+		cpu_relax();
+	}
+	if (ret == -EAGAIN)
+		return 0;
+	else
+		return ret;
+}
+
+static int is_chg_plugged_in(void)
+{
+	return pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGVAL_IRQ]);
+}
+
+static irqreturn_t pm8058_chg_chgval_handler(int irq, void *dev_id)
+{
+	u8 old, temp;
+	int ret;
+
+	if (!is_chg_plugged_in()) {	/*this debounces it */
+		ret = pm8058_read(pm8058_chg.pm_chip, PM8058_OVP_TEST_REG,
+					&old, 1);
+		temp = old | BIT(FORCE_OVP_OFF);
+		ret = pm8058_write(pm8058_chg.pm_chip, PM8058_OVP_TEST_REG,
+					&temp, 1);
+		temp = 0xFC;
+		ret = pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST,
+					&temp, 1);
+		pr_debug("%s forced wrote 0xFC to test ret=%d\n",
+							__func__, ret);
+		/* 20 ms sleep is for the VCHG to discharge */
+		msleep(20);
+		temp = 0xF0;
+		ret = pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST,
+					&temp, 1);
+		ret = pm8058_write(pm8058_chg.pm_chip, PM8058_OVP_TEST_REG,
+					&old, 1);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void free_irqs(void)
+{
+	int i;
+
+	for (i = 0; i < PMIC_CHG_MAX_INTS; i++)
+		if (pm8058_chg.pmic_chg_irq[i]) {
+			free_irq(pm8058_chg.pmic_chg_irq[i], NULL);
+			pm8058_chg.pmic_chg_irq[i] = 0;
+		}
+}
+
+static int __devinit request_irqs(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	ret = 0;
+	bitmap_fill(pm8058_chg.enabled_irqs, PMIC_CHG_MAX_INTS);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "CHGVAL");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource CHGVAL\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_any_context_irq(res->start,
+				  pm8058_chg_chgval_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[CHGVAL_IRQ] = res->start;
+		}
+	}
+
+	return 0;
+
+err_out:
+	free_irqs();
+	return -EINVAL;
+}
+
+static int pm8058_usb_voltage_lower_limit(void)
+{
+	u8 temp, old;
+	int ret = 0;
+
+	temp = 0x10;
+	ret |= pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST, &temp, 1);
+	ret |= pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEST, &old, 1);
+	old = old & ~BIT(IGNORE_LL);
+	temp = 0x90  | (0xF & old);
+	pr_debug("%s writing 0x%x to test\n", __func__, temp);
+	ret |= pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST, &temp, 1);
+
+	return ret;
+}
+
+static int __devinit pm8058_charger_probe(struct platform_device *pdev)
+{
+	struct pm8058_chip *pm_chip;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s:no parent data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	pm8058_chg.pm_chip = pm_chip;
+	pm8058_chg.pdata = pdev->dev.platform_data;
+	pm8058_chg.dev = &pdev->dev;
+
+	if (request_irqs(pdev)) {
+		pr_err("%s: couldnt register interrupts\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pm8058_usb_voltage_lower_limit()) {
+		pr_err("%s: couldnt write to IGNORE_LL\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __devexit pm8058_charger_remove(struct platform_device *pdev)
+{
+	free_irqs();
+	return 0;
+}
+
+static struct platform_driver pm8058_charger_driver = {
+	.probe = pm8058_charger_probe,
+	.remove = __devexit_p(pm8058_charger_remove),
+	.driver = {
+		   .name = "pm-usb-fix",
+		   .owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_charger_init(void)
+{
+	return platform_driver_register(&pm8058_charger_driver);
+}
+
+static void __exit pm8058_charger_exit(void)
+{
+	platform_driver_unregister(&pm8058_charger_driver);
+}
+
+late_initcall(pm8058_charger_init);
+module_exit(pm8058_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 BATTERY driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8058_charger");
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
new file mode 100644
index 0000000..6ad6a18
--- /dev/null
+++ b/drivers/power/pm8921-bms.c
@@ -0,0 +1,1178 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/mfd/pm8xxx/pm8921-bms.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#define BMS_CONTROL		0x224
+#define BMS_OUTPUT0		0x230
+#define BMS_OUTPUT1		0x231
+#define BMS_TEST1		0x237
+#define CCADC_ANA_PARAM		0x240
+#define CCADC_DIG_PARAM		0x241
+#define CCADC_RSV		0x242
+#define CCADC_DATA0		0x244
+#define CCADC_DATA1		0x245
+#define CCADC_OFFSET_TRIM1	0x34A
+#define CCADC_OFFSET_TRIM0	0x34B
+
+#define ADC_ARB_SECP_CNTRL 0x190
+#define ADC_ARB_SECP_AMUX_CNTRL 0x191
+#define ADC_ARB_SECP_ANA_PARAM 0x192
+#define ADC_ARB_SECP_RSV 0x194
+#define ADC_ARB_SECP_DATA1 0x195
+#define ADC_ARB_SECP_DATA0 0x196
+
+enum pmic_bms_interrupts {
+	PM8921_BMS_SBI_WRITE_OK,
+	PM8921_BMS_CC_THR,
+	PM8921_BMS_VSENSE_THR,
+	PM8921_BMS_VSENSE_FOR_R,
+	PM8921_BMS_OCV_FOR_R,
+	PM8921_BMS_GOOD_OCV,
+	PM8921_BMS_VSENSE_AVG,
+	PM_BMS_MAX_INTS,
+};
+
+/**
+ * struct pm8921_bms_chip -device information
+ * @dev:	device pointer to access the parent
+ * @dent:	debugfs directory
+ * @r_sense:	batt sense resistance value
+ * @i_test:	peak current
+ * @v_failure:	battery dead voltage
+ * @fcc:	battery capacity
+ *
+ */
+struct pm8921_bms_chip {
+	struct device		*dev;
+	struct dentry		*dent;
+	unsigned int		r_sense;
+	unsigned int		i_test;
+	unsigned int		v_failure;
+	unsigned int		fcc;
+	struct single_row_lut	*fcc_temp_lut;
+	struct single_row_lut	*fcc_sf_lut;
+	struct pc_temp_ocv_lut	*pc_temp_ocv_lut;
+	struct pc_sf_lut	*pc_sf_lut;
+	struct delayed_work	calib_work;
+	unsigned int		calib_delay_ms;
+	unsigned int		pmic_bms_irq[PM_BMS_MAX_INTS];
+	DECLARE_BITMAP(enabled_irqs, PM_BMS_MAX_INTS);
+};
+
+static struct pm8921_bms_chip *the_chip;
+
+#define DEFAULT_RBATT_MOHMS		128
+#define DEFAULT_UNUSABLE_CHARGE_MAH	10
+#define DEFAULT_OCV_MICROVOLTS		3900000
+#define DEFAULT_REMAINING_CHARGE_MAH	990
+#define DEFAULT_COULUMB_COUNTER		0
+#define DEFAULT_CHARGE_CYCLES		0
+
+static int last_rbatt = -EINVAL;
+static int last_fcc = -EINVAL;
+static int last_unusable_charge = -EINVAL;
+static int last_ocv_uv = -EINVAL;
+static int last_remaining_charge = -EINVAL;
+static int last_coulumb_counter = -EINVAL;
+static int last_soc = -EINVAL;
+
+static int last_chargecycles = DEFAULT_CHARGE_CYCLES;
+static int last_charge_increase;
+
+module_param(last_rbatt, int, 0644);
+module_param(last_fcc, int, 0644);
+module_param(last_unusable_charge, int, 0644);
+module_param(last_ocv_uv, int, 0644);
+module_param(last_remaining_charge, int, 0644);
+module_param(last_coulumb_counter, int, 0644);
+module_param(last_chargecycles, int, 0644);
+module_param(last_charge_increase, int, 0644);
+
+static int pm_bms_get_rt_status(struct pm8921_bms_chip *chip, int irq_id)
+{
+	return pm8xxx_read_irq_stat(chip->dev->parent,
+					chip->pmic_bms_irq[irq_id]);
+}
+
+static void pm8921_bms_disable_irq(struct pm8921_bms_chip *chip, int interrupt)
+{
+	if (__test_and_clear_bit(interrupt, chip->enabled_irqs)) {
+		dev_dbg(chip->dev, "%d\n", chip->pmic_bms_irq[interrupt]);
+		disable_irq_nosync(chip->pmic_bms_irq[interrupt]);
+	}
+}
+
+static int pm_bms_masked_write(struct pm8921_bms_chip *chip, u16 addr,
+							u8 mask, u8 val)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_readb(chip->dev->parent, addr, &reg);
+	if (rc) {
+		pr_err("read failed addr = %03X, rc = %d\n", addr, rc);
+		return rc;
+	}
+	reg &= ~mask;
+	reg |= val & mask;
+	rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+	if (rc) {
+		pr_err("write failed addr = %03X, rc = %d\n", addr, rc);
+		return rc;
+	}
+	return 0;
+}
+
+#define SELECT_OUTPUT_DATA	0x1C
+#define SELECT_OUTPUT_TYPE_SHIFT	2
+#define OCV_FOR_RBATT		0x0
+#define VSENSE_FOR_RBATT	0x1
+#define VBATT_FOR_RBATT		0x2
+#define CC_MSB			0x3
+#define CC_LSB			0x4
+#define LAST_GOOD_OCV_VALUE	0x5
+#define VSENSE_AVG		0x6
+#define VBATT_AVG		0x7
+
+static int pm_bms_read_output_data(struct pm8921_bms_chip *chip, int type,
+						int16_t *result)
+{
+	int rc;
+	u8 reg;
+
+	if (!result) {
+		pr_err("result pointer null\n");
+		return -EINVAL;
+	}
+	*result = 0;
+	if (type < OCV_FOR_RBATT || type > VBATT_AVG) {
+		pr_err("invalid type %d asked to read\n", type);
+		return -EINVAL;
+	}
+	rc = pm_bms_masked_write(chip, BMS_CONTROL, SELECT_OUTPUT_DATA,
+					type << SELECT_OUTPUT_TYPE_SHIFT);
+	if (rc) {
+		pr_err("fail to select %d type in BMS_CONTROL rc = %d\n",
+						type, rc);
+		return rc;
+	}
+
+	rc = pm8xxx_readb(chip->dev->parent, BMS_OUTPUT0, &reg);
+	if (rc) {
+		pr_err("fail to read BMS_OUTPUT0 for type %d rc = %d\n",
+			type, rc);
+		return rc;
+	}
+	*result = reg;
+	rc = pm8xxx_readb(chip->dev->parent, BMS_OUTPUT1, &reg);
+	if (rc) {
+		pr_err("fail to read BMS_OUTPUT1 for type %d rc = %d\n",
+			type, rc);
+		return rc;
+	}
+	*result |= reg << 8;
+	pr_debug("type %d result %x", type, *result);
+	return 0;
+}
+
+#define V_PER_BIT_MUL_FACTOR	977
+#define V_PER_BIT_DIV_FACTOR	10
+#define CONV_READING(a)		(((a) * (int)V_PER_BIT_MUL_FACTOR)\
+				/V_PER_BIT_DIV_FACTOR)
+
+/* returns the signed value read from the hardware */
+static int read_cc(struct pm8921_bms_chip *chip, int *result)
+{
+	int rc;
+	uint16_t msw, lsw;
+
+	rc = pm_bms_read_output_data(chip, CC_LSB, &lsw);
+	if (rc) {
+		pr_err("fail to read CC_LSB rc = %d\n", rc);
+		return rc;
+	}
+	rc = pm_bms_read_output_data(chip, CC_MSB, &msw);
+	if (rc) {
+		pr_err("fail to read CC_MSB rc = %d\n", rc);
+		return rc;
+	}
+	*result = msw << 16 | lsw;
+	pr_debug("msw = %04x lsw = %04x cc = %d\n", msw, lsw, *result);
+	return 0;
+}
+
+static int read_last_good_ocv(struct pm8921_bms_chip *chip, uint *result)
+{
+	int rc;
+	uint16_t reading;
+
+	rc = pm_bms_read_output_data(chip, LAST_GOOD_OCV_VALUE, &reading);
+	if (rc) {
+		pr_err("fail to read LAST_GOOD_OCV_VALUE rc = %d\n", rc);
+		return rc;
+	}
+	*result = CONV_READING(reading);
+	pr_debug("raw = %04x ocv_microV = %u\n", reading, *result);
+	return 0;
+}
+
+static int read_vbatt_for_rbatt(struct pm8921_bms_chip *chip, uint *result)
+{
+	int rc;
+	uint16_t reading;
+
+	rc = pm_bms_read_output_data(chip, VBATT_FOR_RBATT, &reading);
+	if (rc) {
+		pr_err("fail to read VBATT_FOR_RBATT rc = %d\n", rc);
+		return rc;
+	}
+	*result = CONV_READING(reading);
+	pr_debug("raw = %04x vbatt_for_r_microV = %u\n", reading, *result);
+	return 0;
+}
+
+static int read_vsense_for_rbatt(struct pm8921_bms_chip *chip, uint *result)
+{
+	int rc;
+	uint16_t reading;
+
+	rc = pm_bms_read_output_data(chip, VSENSE_FOR_RBATT, &reading);
+	if (rc) {
+		pr_err("fail to read VSENSE_FOR_RBATT rc = %d\n", rc);
+		return rc;
+	}
+	*result = CONV_READING(reading);
+	pr_debug("raw = %04x vsense_for_r_microV = %u\n", reading, *result);
+	return 0;
+}
+
+static int read_ocv_for_rbatt(struct pm8921_bms_chip *chip, uint *result)
+{
+	int rc;
+	uint16_t reading;
+
+	rc = pm_bms_read_output_data(chip, OCV_FOR_RBATT, &reading);
+	if (rc) {
+		pr_err("fail to read OCV_FOR_RBATT rc = %d\n", rc);
+		return rc;
+	}
+	*result = CONV_READING(reading);
+	pr_debug("read = %04x ocv_for_r_microV = %u\n", reading, *result);
+	return 0;
+}
+
+static int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+	if (y0 == y1 || x == x0)
+		return y0;
+	if (x1 == x0 || x == x1)
+		return y1;
+
+	return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
+static int interpolate_single_lut(struct single_row_lut *lut, int x)
+{
+	int i, result;
+
+	if (x < lut->x[0]) {
+		pr_err("x %d less than known range returning y = %d\n",
+							x, lut->y[0]);
+		return lut->y[0];
+	}
+	if (x > lut->x[lut->cols - 1]) {
+		pr_err("x %d more than known range returning y = %d\n",
+						x, lut->y[lut->cols - 1]);
+		return lut->y[lut->cols - 1];
+	}
+
+	for (i = 0; i < lut->cols; i++)
+		if (x <= lut->x[i])
+			break;
+	if (x == lut->x[i]) {
+		result = lut->y[i];
+	} else {
+		result = linear_interpolate(
+			lut->y[i - 1],
+			lut->x[i - 1],
+			lut->y[i],
+			lut->x[i],
+			x);
+	}
+	return result;
+}
+
+static int interpolate_fcc(struct pm8921_bms_chip *chip, int batt_temp)
+{
+	return interpolate_single_lut(chip->fcc_temp_lut, batt_temp);
+}
+
+static int interpolate_scalingfactor_fcc(struct pm8921_bms_chip *chip,
+								int cycles)
+{
+	return interpolate_single_lut(chip->fcc_sf_lut, cycles);
+}
+
+static int interpolate_scalingfactor_pc(struct pm8921_bms_chip *chip,
+				int cycles, int pc)
+{
+	int i, scalefactorrow1, scalefactorrow2, scalefactor, row1, row2;
+	int rows = chip->pc_sf_lut->rows;
+	int cols = chip->pc_sf_lut->cols;
+
+	if (pc > chip->pc_sf_lut->percent[0]) {
+		pr_err("pc %d greater than known pc ranges for sfd\n", pc);
+		row1 = 0;
+		row2 = 0;
+	}
+	if (pc < chip->pc_sf_lut->percent[rows - 1]) {
+		pr_err("pc %d less than known pc ranges for sf", pc);
+		row1 = rows - 1;
+		row2 = rows - 1;
+	}
+	for (i = 0; i < rows; i++) {
+		if (pc == chip->pc_sf_lut->percent[i]) {
+			row1 = i;
+			row2 = i;
+			break;
+		}
+		if (pc > chip->pc_sf_lut->percent[i]) {
+			row1 = i - 1;
+			row2 = i;
+			break;
+		}
+	}
+
+	if (cycles < chip->pc_sf_lut->cycles[0])
+		cycles = chip->pc_sf_lut->cycles[0];
+	if (cycles > chip->pc_sf_lut->cycles[cols - 1])
+		cycles = chip->pc_sf_lut->cycles[cols - 1];
+
+	for (i = 0; i < cols; i++)
+		if (cycles <= chip->pc_sf_lut->cycles[i])
+			break;
+	if (cycles == chip->pc_sf_lut->cycles[i]) {
+		scalefactor = linear_interpolate(
+				chip->pc_sf_lut->sf[row1][i],
+				chip->pc_sf_lut->percent[row1],
+				chip->pc_sf_lut->sf[row2][i],
+				chip->pc_sf_lut->percent[row2],
+				pc);
+		return scalefactor;
+	}
+
+	scalefactorrow1 = linear_interpolate(
+				chip->pc_sf_lut->sf[row1][i - 1],
+				chip->pc_sf_lut->cycles[i - 1],
+				chip->pc_sf_lut->sf[row1][i],
+				chip->pc_sf_lut->cycles[i],
+				cycles);
+
+	scalefactorrow2 = linear_interpolate(
+				chip->pc_sf_lut->sf[row2][i - 1],
+				chip->pc_sf_lut->cycles[i - 1],
+				chip->pc_sf_lut->sf[row2][i],
+				chip->pc_sf_lut->cycles[i],
+				cycles);
+
+	scalefactor = linear_interpolate(
+				scalefactorrow1,
+				chip->pc_sf_lut->percent[row1],
+				scalefactorrow2,
+				chip->pc_sf_lut->percent[row2],
+				pc);
+
+	return scalefactor;
+}
+
+static int interpolate_pc(struct pm8921_bms_chip *chip,
+				int batt_temp, int ocv)
+{
+	int i, j, ocvi, ocviplusone, pc = 0;
+	int rows = chip->pc_temp_ocv_lut->rows;
+	int cols = chip->pc_temp_ocv_lut->cols;
+
+	if (batt_temp < chip->pc_temp_ocv_lut->temp[0]) {
+		pr_err("batt_temp %d < known temp range for pc\n", batt_temp);
+		batt_temp = chip->pc_temp_ocv_lut->temp[0];
+	}
+	if (batt_temp > chip->pc_temp_ocv_lut->temp[cols - 1]) {
+		pr_err("batt_temp %d > known temp range for pc\n", batt_temp);
+		batt_temp = chip->pc_temp_ocv_lut->temp[cols - 1];
+	}
+
+	for (j = 0; j < cols; j++)
+		if (batt_temp <= chip->pc_temp_ocv_lut->temp[j])
+			break;
+	if (batt_temp == chip->pc_temp_ocv_lut->temp[j]) {
+		/* found an exact match for temp in the table */
+		if (ocv >= chip->pc_temp_ocv_lut->ocv[0][j])
+			return chip->pc_temp_ocv_lut->percent[0];
+		if (ocv <= chip->pc_temp_ocv_lut->ocv[rows - 1][j])
+			return chip->pc_temp_ocv_lut->percent[rows - 1];
+		for (i = 0; i < rows; i++) {
+			if (ocv >= chip->pc_temp_ocv_lut->ocv[i][j]) {
+				if (ocv == chip->pc_temp_ocv_lut->ocv[i][j])
+					return
+					chip->pc_temp_ocv_lut->percent[i];
+				pc = linear_interpolate(
+					chip->pc_temp_ocv_lut->percent[i],
+					chip->pc_temp_ocv_lut->ocv[i][j],
+					chip->pc_temp_ocv_lut->percent[i - 1],
+					chip->pc_temp_ocv_lut->ocv[i - 1][j],
+					ocv);
+				return pc;
+			}
+		}
+	}
+
+	if (ocv >= chip->pc_temp_ocv_lut->ocv[0][j])
+		return chip->pc_temp_ocv_lut->percent[0];
+	if (ocv <= chip->pc_temp_ocv_lut->ocv[rows - 1][j - 1])
+		return chip->pc_temp_ocv_lut->percent[rows - 1];
+	for (i = 0; i < rows; i++) {
+		if (ocv >= chip->pc_temp_ocv_lut->ocv[i][j - 1]
+			&& ocv <= chip->pc_temp_ocv_lut->ocv[i][j]) {
+			pc = chip->pc_temp_ocv_lut->percent[i];
+
+			if (i < rows - 1
+				&& ocv >=
+					chip->pc_temp_ocv_lut->ocv[i + 1][j - 1]
+				&& ocv <=
+					chip->pc_temp_ocv_lut->ocv[i + 1][j]) {
+				ocvi = linear_interpolate(
+					chip->pc_temp_ocv_lut->ocv[i][j - 1],
+					chip->pc_temp_ocv_lut->temp[j - 1],
+					chip->pc_temp_ocv_lut->ocv[i][j],
+					chip->pc_temp_ocv_lut->temp[j],
+					batt_temp);
+
+				ocviplusone = linear_interpolate(
+					chip->pc_temp_ocv_lut
+							->ocv[i + 1][j - 1],
+					chip->pc_temp_ocv_lut->temp[j - 1],
+					chip->pc_temp_ocv_lut->ocv[i + 1][j],
+					chip->pc_temp_ocv_lut->temp[j],
+					batt_temp);
+
+				pc = linear_interpolate(
+					chip->pc_temp_ocv_lut->percent[i],
+					ocvi,
+					chip->pc_temp_ocv_lut->percent[i + 1],
+					ocviplusone,
+					ocv);
+			}
+			return pc;
+		}
+	}
+
+	pr_err("%d ocv wasn't found for temp %d in the LUT returning pc = %d",
+							ocv, batt_temp, pc);
+	return pc;
+}
+
+static int calculate_rbatt(struct pm8921_bms_chip *chip)
+{
+	int rc;
+	unsigned int ocv, vsense, vbatt, r_batt;
+
+	rc = read_ocv_for_rbatt(chip, &ocv);
+	if (rc) {
+		pr_err("fail to read ocv_for_rbatt rc = %d\n", rc);
+		ocv = 0;
+	}
+	rc = read_vbatt_for_rbatt(chip, &vbatt);
+	if (rc) {
+		pr_err("fail to read vbatt_for_rbatt rc = %d\n", rc);
+		ocv = 0;
+	}
+	rc = read_vsense_for_rbatt(chip, &vsense);
+	if (rc) {
+		pr_err("fail to read vsense_for_rbatt rc = %d\n", rc);
+		ocv = 0;
+	}
+	if (ocv == 0
+		|| ocv == vbatt
+		|| vsense == 0) {
+		pr_warning("incorret reading ocv = %d, vbatt = %d, vsen = %d\n",
+					ocv, vbatt, vsense);
+		return -EINVAL;
+	}
+	r_batt = ((ocv - vbatt) * chip->r_sense) / vsense;
+	pr_debug("r_batt = %umilliOhms", r_batt);
+	return r_batt;
+}
+
+static int calculate_fcc(struct pm8921_bms_chip *chip, int batt_temp,
+							int chargecycles)
+{
+	int initfcc, result, scalefactor = 0;
+
+	initfcc = interpolate_fcc(chip, batt_temp);
+	pr_debug("intfcc = %umAh batt_temp = %d\n", initfcc, batt_temp);
+
+	scalefactor = interpolate_scalingfactor_fcc(chip, chargecycles);
+	pr_debug("scalefactor = %d batt_temp = %d\n", scalefactor, batt_temp);
+
+	/* Multiply the initial FCC value by the scale factor. */
+	result = (initfcc * scalefactor) / 100;
+	pr_debug("fcc mAh = %d\n", result);
+	return result;
+}
+
+static int calculate_pc(struct pm8921_bms_chip *chip, int ocv_uv, int batt_temp,
+							int chargecycles)
+{
+	int pc, scalefactor;
+
+	pc = interpolate_pc(chip, batt_temp, ocv_uv / 1000);
+	pr_debug("pc = %u for ocv = %dmicroVolts batt_temp = %d\n",
+					pc, ocv_uv, batt_temp);
+
+	scalefactor = interpolate_scalingfactor_pc(chip, chargecycles, pc);
+	pr_debug("scalefactor = %u batt_temp = %d\n", scalefactor, batt_temp);
+
+	/* Multiply the initial FCC value by the scale factor. */
+	pc = (pc * scalefactor) / 100;
+	return pc;
+}
+
+#define CC_TO_MICROVOLT(cc)		div_s64(cc * 1085069, 100000);
+#define CCMICROVOLT_TO_UVH(cc_uv)	div_s64(cc_uv * 55, 32768 * 3600)
+
+static void calculate_cc_mvh(struct pm8921_bms_chip *chip, int64_t *val,
+			int *coulumb_counter, int *update_userspace)
+{
+	int rc;
+	int64_t cc_voltage_uv, cc_uvh, cc_mah;
+
+	rc = read_cc(the_chip, coulumb_counter);
+	if (rc) {
+		*coulumb_counter = (last_coulumb_counter < 0) ?
+			DEFAULT_COULUMB_COUNTER : last_coulumb_counter;
+		pr_err("couldn't read coulumb counter err = %d assuming %d\n",
+							rc, *coulumb_counter);
+		*update_userspace = 0;
+	}
+	cc_voltage_uv = (int64_t)*coulumb_counter;
+	cc_voltage_uv = CC_TO_MICROVOLT(cc_voltage_uv);
+	pr_debug("cc_voltage_uv = %lld microvolts\n", cc_voltage_uv);
+	cc_uvh = CCMICROVOLT_TO_UVH(cc_voltage_uv);
+	pr_debug("cc_uvh = %lld micro_volt_hour\n", cc_uvh);
+	cc_mah = div_s64(cc_uvh, chip->r_sense);
+	*val = cc_mah;
+}
+
+static int calculate_state_of_charge(struct pm8921_bms_chip *chip,
+						int batt_temp, int chargecycles)
+{
+	int remaining_usable_charge, fcc, unusable_charge;
+	int remaining_charge, soc, rc, ocv, pc, coulumb_counter;
+	int rbatt, voltage_unusable_uv, pc_unusable;
+	int update_userspace = 1;
+	int64_t cc_mah;
+
+	rbatt = calculate_rbatt(chip);
+	if (rbatt < 0) {
+		rbatt = (last_rbatt < 0) ? DEFAULT_RBATT_MOHMS : last_rbatt;
+		pr_err("failed to read rbatt assuming %d\n",
+						rbatt);
+		update_userspace = 0;
+	}
+	pr_debug("rbatt = %umilliOhms", rbatt);
+
+	fcc = calculate_fcc(chip, batt_temp, chargecycles);
+	if (fcc < -EINVAL) {
+		fcc = (last_fcc < 0) ? chip->fcc : last_fcc;
+		pr_err("failed to read fcc assuming %d\n", fcc);
+		update_userspace = 0;
+	}
+	pr_debug("fcc = %umAh", fcc);
+
+	/* calculate unusable charge */
+	voltage_unusable_uv = (rbatt * chip->i_test)
+						+ (chip->v_failure * 1000);
+	pc_unusable = calculate_pc(chip, voltage_unusable_uv,
+						batt_temp, chargecycles);
+	if (pc_unusable < 0) {
+		unusable_charge = (last_unusable_charge < 0) ?
+			DEFAULT_UNUSABLE_CHARGE_MAH : last_unusable_charge;
+		pr_err("unusable_charge failed assuming %d\n", unusable_charge);
+	} else {
+		unusable_charge = (fcc * pc_unusable) / 100;
+	}
+	pr_debug("unusable_charge = %umAh at temp = %d, fcc = %umAh"
+			"unusable_voltage = %umicroVolts pc_unusable = %d\n",
+			unusable_charge, batt_temp, fcc,
+			voltage_unusable_uv, pc_unusable);
+
+	/* calculate remainging charge */
+	rc = read_last_good_ocv(chip, &ocv);
+	if (rc || ocv == 0) {
+		ocv = (last_ocv_uv < 0) ? DEFAULT_OCV_MICROVOLTS : last_ocv_uv;
+		pr_err("ocv failed assuming %d rc = %d\n", ocv, rc);
+		update_userspace = 0;
+	}
+	pc = calculate_pc(chip, ocv, batt_temp, chargecycles);
+	if (pc < 0) {
+		remaining_charge = (last_remaining_charge < 0) ?
+			DEFAULT_REMAINING_CHARGE_MAH : last_remaining_charge;
+		pr_err("calculate remaining charge failed assuming %d\n",
+				remaining_charge);
+		update_userspace = 0;
+	} else {
+		remaining_charge = (fcc * pc) / 100;
+	}
+	pr_debug("remaining_charge = %umAh ocv = %d pc = %d\n",
+			remaining_charge, ocv, pc);
+
+	/* calculate cc milli_volt_hour */
+	calculate_cc_mvh(chip, &cc_mah, &coulumb_counter, &update_userspace);
+	pr_debug("cc_mah = %lldmAh cc = %d\n", cc_mah, coulumb_counter);
+
+	/* calculate remaining usable charge */
+	remaining_usable_charge = remaining_charge - cc_mah - unusable_charge;
+	pr_debug("remaining_usable_charge = %dmAh\n", remaining_usable_charge);
+	if (remaining_usable_charge < 0) {
+		pr_err("bad rem_usb_chg cc_mah %lld, rem_chg %d unusb_chg %d\n",
+				cc_mah, remaining_charge, unusable_charge);
+		update_userspace = 0;
+	}
+
+	soc = (remaining_usable_charge * 100) / (fcc - unusable_charge);
+	if (soc > 100 || soc < 0) {
+		pr_err("bad soc rem_usb_chg %d fcc %d unusb_chg %d\n",
+				remaining_usable_charge, fcc, unusable_charge);
+		update_userspace = 0;
+	}
+	pr_debug("soc = %u%%\n", soc);
+
+	if (update_userspace) {
+		last_rbatt = rbatt;
+		last_fcc = fcc;
+		last_unusable_charge = unusable_charge;
+		last_ocv_uv = ocv;
+		last_remaining_charge = remaining_charge;
+		last_coulumb_counter = coulumb_counter;
+		last_soc = soc;
+	}
+	return soc;
+}
+
+int pm8921_bms_get_percent_charge(void)
+{
+	/* TODO get batt_temp from ADC */
+	int batt_temp = 73;
+
+	return calculate_state_of_charge(the_chip,
+					batt_temp, last_chargecycles);
+}
+EXPORT_SYMBOL_GPL(pm8921_bms_get_percent_charge);
+
+static int start_percent;
+static int end_percent;
+void pm8921_bms_charging_began(void)
+{
+	/* TODO get batt_temp from ADC */
+	int batt_temp = 73;
+
+	start_percent = calculate_state_of_charge(the_chip,
+					batt_temp, last_chargecycles);
+	pr_debug("start_percent = %u%%\n", start_percent);
+}
+EXPORT_SYMBOL_GPL(pm8921_bms_charging_began);
+
+void pm8921_bms_charging_end(void)
+{
+	/* TODO get batt_temp from ADC */
+	int batt_temp = 73;
+
+	end_percent = calculate_state_of_charge(the_chip,
+					batt_temp, last_chargecycles);
+	if (end_percent > start_percent) {
+		last_charge_increase = end_percent - start_percent;
+		if (last_charge_increase > 100) {
+			last_chargecycles++;
+			last_charge_increase = last_charge_increase % 100;
+		}
+	}
+	pr_debug("end_percent = %u%% last_charge_increase = %d"
+			"last_chargecycles = %d\n",
+			end_percent,
+			last_charge_increase,
+			last_chargecycles);
+}
+EXPORT_SYMBOL_GPL(pm8921_bms_charging_end);
+
+static irqreturn_t pm8921_bms_sbi_write_ok_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8921_bms_cc_thr_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+static irqreturn_t pm8921_bms_vsense_thr_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+static irqreturn_t pm8921_bms_vsense_for_r_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+static irqreturn_t pm8921_bms_ocv_for_r_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+static irqreturn_t pm8921_bms_good_ocv_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+static irqreturn_t pm8921_bms_vsense_avg_handler(int irq, void *data)
+{
+	return IRQ_HANDLED;
+}
+
+struct pm_bms_irq_init_data {
+	unsigned int	irq_id;
+	char		*name;
+	unsigned long	flags;
+	irqreturn_t	(*handler)(int, void *);
+};
+
+#define BMS_IRQ(_id, _flags, _handler) \
+{ \
+	.irq_id		= _id, \
+	.name		= #_id, \
+	.flags		= _flags, \
+	.handler	= _handler, \
+}
+
+struct pm_bms_irq_init_data bms_irq_data[] = {
+	BMS_IRQ(PM8921_BMS_SBI_WRITE_OK, IRQF_TRIGGER_RISING,
+				pm8921_bms_sbi_write_ok_handler),
+	BMS_IRQ(PM8921_BMS_CC_THR, IRQF_TRIGGER_RISING,
+				pm8921_bms_cc_thr_handler),
+	BMS_IRQ(PM8921_BMS_VSENSE_THR, IRQF_TRIGGER_RISING,
+				pm8921_bms_vsense_thr_handler),
+	BMS_IRQ(PM8921_BMS_VSENSE_FOR_R, IRQF_TRIGGER_RISING,
+				pm8921_bms_vsense_for_r_handler),
+	BMS_IRQ(PM8921_BMS_OCV_FOR_R, IRQF_TRIGGER_RISING,
+				pm8921_bms_ocv_for_r_handler),
+	BMS_IRQ(PM8921_BMS_GOOD_OCV, IRQF_TRIGGER_RISING,
+				pm8921_bms_good_ocv_handler),
+	BMS_IRQ(PM8921_BMS_VSENSE_AVG, IRQF_TRIGGER_RISING,
+				pm8921_bms_vsense_avg_handler),
+};
+
+static void free_irqs(struct pm8921_bms_chip *chip)
+{
+	int i;
+
+	for (i = 0; i < PM_BMS_MAX_INTS; i++)
+		if (chip->pmic_bms_irq[i]) {
+			free_irq(chip->pmic_bms_irq[i], NULL);
+			chip->pmic_bms_irq[i] = 0;
+		}
+}
+
+static int __devinit request_irqs(struct pm8921_bms_chip *chip,
+					struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret, i;
+
+	ret = 0;
+	bitmap_fill(chip->enabled_irqs, PM_BMS_MAX_INTS);
+
+	for (i = 0; i < ARRAY_SIZE(bms_irq_data); i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				bms_irq_data[i].name);
+		if (res == NULL) {
+			pr_err("couldn't find %s\n", bms_irq_data[i].name);
+			goto err_out;
+		}
+		ret = request_irq(res->start, bms_irq_data[i].handler,
+			bms_irq_data[i].flags,
+			bms_irq_data[i].name, chip);
+		if (ret < 0) {
+			pr_err("couldn't request %d (%s) %d\n", res->start,
+					bms_irq_data[i].name, ret);
+			goto err_out;
+		}
+		chip->pmic_bms_irq[bms_irq_data[i].irq_id] = res->start;
+		pm8921_bms_disable_irq(chip, bms_irq_data[i].irq_id);
+	}
+	return 0;
+
+err_out:
+	free_irqs(chip);
+	return -EINVAL;
+}
+
+#define EN_BMS_BIT	BIT(7)
+#define EN_PON_HS_BIT	BIT(0)
+static int __devinit pm8921_bms_hw_init(struct pm8921_bms_chip *chip)
+{
+	int rc;
+
+	rc = pm_bms_masked_write(chip, BMS_CONTROL,
+			EN_BMS_BIT | EN_PON_HS_BIT, EN_BMS_BIT | EN_PON_HS_BIT);
+	if (rc) {
+		pr_err("failed to enable pon and bms addr = %d %d",
+				BMS_CONTROL, rc);
+	}
+
+	return 0;
+}
+
+enum {
+	CALC_RBATT,
+	CALC_FCC,
+	CALC_PC,
+	CALC_SOC,
+};
+
+static int test_batt_temp = 5;
+static int test_chargecycle = 150;
+static int test_ocv = 3900000;
+enum {
+	TEST_BATT_TEMP,
+	TEST_CHARGE_CYCLE,
+	TEST_OCV,
+};
+static int get_test_param(void *data, u64 * val)
+{
+	switch ((int)data) {
+	case TEST_BATT_TEMP:
+		*val = test_batt_temp;
+		break;
+	case TEST_CHARGE_CYCLE:
+		*val = test_chargecycle;
+		break;
+	case TEST_OCV:
+		*val = test_ocv;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+static int set_test_param(void *data, u64  val)
+{
+	switch ((int)data) {
+	case TEST_BATT_TEMP:
+		test_batt_temp = (int)val;
+		break;
+	case TEST_CHARGE_CYCLE:
+		test_chargecycle = (int)val;
+		break;
+	case TEST_OCV:
+		test_ocv = (int)val;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(temp_fops, get_test_param, set_test_param, "%llu\n");
+
+static int get_calc(void *data, u64 * val)
+{
+	int param = (int)data;
+	int ret = 0;
+
+	*val = 0;
+
+	/* global irq number passed in via data */
+	switch (param) {
+	case CALC_RBATT:
+		*val = calculate_rbatt(the_chip);
+		break;
+	case CALC_FCC:
+		*val = calculate_fcc(the_chip, test_batt_temp,
+							test_chargecycle);
+		break;
+	case CALC_PC:
+		*val = calculate_pc(the_chip, test_ocv, test_batt_temp,
+							test_chargecycle);
+		break;
+	case CALC_SOC:
+		*val = calculate_state_of_charge(the_chip,
+					test_batt_temp, test_chargecycle);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(calc_fops, get_calc, NULL, "%llu\n");
+
+static int get_reading(void *data, u64 * val)
+{
+	int param = (int)data;
+	int ret = 0;
+
+	*val = 0;
+
+	/* global irq number passed in via data */
+	switch (param) {
+	case CC_MSB:
+	case CC_LSB:
+		read_cc(the_chip, (int *)val);
+		break;
+	case LAST_GOOD_OCV_VALUE:
+		read_last_good_ocv(the_chip, (uint *)val);
+		break;
+	case VBATT_FOR_RBATT:
+		read_vbatt_for_rbatt(the_chip, (uint *)val);
+		break;
+	case VSENSE_FOR_RBATT:
+		read_vsense_for_rbatt(the_chip, (uint *)val);
+		break;
+	case OCV_FOR_RBATT:
+		read_ocv_for_rbatt(the_chip, (uint *)val);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reading_fops, get_reading, NULL, "%llu\n");
+
+static int get_rt_status(void *data, u64 * val)
+{
+	int i = (int)data;
+	int ret;
+
+	/* global irq number passed in via data */
+	ret = pm_bms_get_rt_status(the_chip, i);
+	*val = ret;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(rt_fops, get_rt_status, NULL, "%llu\n");
+
+static int get_reg(void *data, u64 * val)
+{
+	int addr = (int)data;
+	int ret;
+	u8 temp;
+
+	ret = pm8xxx_readb(the_chip->dev->parent, addr, &temp);
+	if (ret) {
+		pr_err("pm8xxx_readb to %x value = %d errored = %d\n",
+			addr, temp, ret);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	int addr = (int)data;
+	int ret;
+	u8 temp;
+
+	temp = (u8) val;
+	ret = pm8xxx_writeb(the_chip->dev->parent, addr, temp);
+	if (ret) {
+		pr_err("pm8xxx_writeb to %x value = %d errored = %d\n",
+			addr, temp, ret);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
+
+static void create_debugfs_entries(struct pm8921_bms_chip *chip)
+{
+	int i;
+
+	chip->dent = debugfs_create_dir("pm8921_bms", NULL);
+
+	if (IS_ERR(chip->dent)) {
+		pr_err("pmic bms couldnt create debugfs dir\n");
+		return;
+	}
+
+	debugfs_create_file("BMS_CONTROL", 0644, chip->dent,
+			(void *)BMS_CONTROL, &reg_fops);
+	debugfs_create_file("BMS_OUTPUT0", 0644, chip->dent,
+			(void *)BMS_OUTPUT0, &reg_fops);
+	debugfs_create_file("BMS_OUTPUT1", 0644, chip->dent,
+			(void *)BMS_OUTPUT1, &reg_fops);
+	debugfs_create_file("BMS_TEST1", 0644, chip->dent,
+			(void *)BMS_TEST1, &reg_fops);
+	debugfs_create_file("CCADC_ANA_PARAM", 0644, chip->dent,
+			(void *)CCADC_ANA_PARAM, &reg_fops);
+	debugfs_create_file("CCADC_DIG_PARAM", 0644, chip->dent,
+			(void *)CCADC_DIG_PARAM, &reg_fops);
+	debugfs_create_file("CCADC_RSV", 0644, chip->dent,
+			(void *)CCADC_RSV, &reg_fops);
+	debugfs_create_file("CCADC_DATA0", 0644, chip->dent,
+			(void *)CCADC_DATA0, &reg_fops);
+	debugfs_create_file("CCADC_DATA1", 0644, chip->dent,
+			(void *)CCADC_DATA1, &reg_fops);
+	debugfs_create_file("CCADC_OFFSET_TRIM1", 0644, chip->dent,
+			(void *)CCADC_OFFSET_TRIM1, &reg_fops);
+	debugfs_create_file("CCADC_OFFSET_TRIM0", 0644, chip->dent,
+			(void *)CCADC_OFFSET_TRIM0, &reg_fops);
+
+	debugfs_create_file("test_batt_temp", 0644, chip->dent,
+				(void *)TEST_BATT_TEMP, &temp_fops);
+	debugfs_create_file("test_chargecycle", 0644, chip->dent,
+				(void *)TEST_CHARGE_CYCLE, &temp_fops);
+	debugfs_create_file("test_ocv", 0644, chip->dent,
+				(void *)TEST_OCV, &temp_fops);
+
+	debugfs_create_file("read_cc", 0644, chip->dent,
+				(void *)CC_MSB, &reading_fops);
+	debugfs_create_file("read_last_good_ocv", 0644, chip->dent,
+				(void *)LAST_GOOD_OCV_VALUE, &reading_fops);
+	debugfs_create_file("read_vbatt_for_rbatt", 0644, chip->dent,
+				(void *)VBATT_FOR_RBATT, &reading_fops);
+	debugfs_create_file("read_vsense_for_rbatt", 0644, chip->dent,
+				(void *)VSENSE_FOR_RBATT, &reading_fops);
+	debugfs_create_file("read_ocv_for_rbatt", 0644, chip->dent,
+				(void *)OCV_FOR_RBATT, &reading_fops);
+
+	debugfs_create_file("show_rbatt", 0644, chip->dent,
+				(void *)CALC_RBATT, &calc_fops);
+	debugfs_create_file("show_fcc", 0644, chip->dent,
+				(void *)CALC_FCC, &calc_fops);
+	debugfs_create_file("show_pc", 0644, chip->dent,
+				(void *)CALC_PC, &calc_fops);
+	debugfs_create_file("show_soc", 0644, chip->dent,
+				(void *)CALC_SOC, &calc_fops);
+
+	for (i = 0; i < ARRAY_SIZE(bms_irq_data); i++) {
+		if (chip->pmic_bms_irq[bms_irq_data[i].irq_id])
+			debugfs_create_file(bms_irq_data[i].name, 0444,
+				chip->dent,
+				(void *)bms_irq_data[i].irq_id,
+				&rt_fops);
+	}
+}
+
+static void calibrate_work(struct work_struct *work)
+{
+	/* TODO */
+}
+
+static int __devinit pm8921_bms_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct pm8921_bms_chip *chip;
+	const struct pm8921_bms_platform_data *pdata
+				= pdev->dev.platform_data;
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct pm8921_bms_chip),
+					GFP_KERNEL);
+	if (!chip) {
+		pr_err("Cannot allocate pm_bms_chip\n");
+		return -ENOMEM;
+	}
+
+	chip->dev = &pdev->dev;
+	chip->r_sense = pdata->r_sense;
+	chip->i_test = pdata->i_test;
+	chip->v_failure = pdata->v_failure;
+	chip->calib_delay_ms = pdata->calib_delay_ms;
+	chip->fcc = pdata->batt_data->fcc;
+
+	chip->fcc_temp_lut = pdata->batt_data->fcc_temp_lut;
+	chip->fcc_sf_lut = pdata->batt_data->fcc_sf_lut;
+	chip->pc_temp_ocv_lut = pdata->batt_data->pc_temp_ocv_lut;
+	chip->pc_sf_lut = pdata->batt_data->pc_sf_lut;
+
+	rc = pm8921_bms_hw_init(chip);
+	if (rc) {
+		pr_err("couldn't init hardware rc = %d\n", rc);
+		goto free_chip;
+	}
+
+	rc = request_irqs(chip, pdev);
+	if (rc) {
+		pr_err("couldn't register interrupts rc = %d\n", rc);
+		goto free_chip;
+	}
+
+	platform_set_drvdata(pdev, chip);
+	the_chip = chip;
+	create_debugfs_entries(chip);
+
+	INIT_DELAYED_WORK(&chip->calib_work, calibrate_work);
+	schedule_delayed_work(&chip->calib_work,
+			round_jiffies_relative(msecs_to_jiffies
+			(chip->calib_delay_ms)));
+	return 0;
+
+free_chip:
+	kfree(chip);
+	return rc;
+}
+
+static int __devexit pm8921_bms_remove(struct platform_device *pdev)
+{
+	struct pm8921_bms_chip *chip = platform_get_drvdata(pdev);
+
+	free_irqs(chip);
+	platform_set_drvdata(pdev, NULL);
+	the_chip = NULL;
+	kfree(chip);
+	return 0;
+}
+
+static struct platform_driver pm8921_bms_driver = {
+	.probe	= pm8921_bms_probe,
+	.remove	= __devexit_p(pm8921_bms_remove),
+	.driver	= {
+		.name	= PM8921_BMS_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8921_bms_init(void)
+{
+	return platform_driver_register(&pm8921_bms_driver);
+}
+
+static void __exit pm8921_bms_exit(void)
+{
+	platform_driver_unregister(&pm8921_bms_driver);
+}
+
+late_initcall(pm8921_bms_init);
+module_exit(pm8921_bms_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8921 bms driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8921_BMS_DEV_NAME);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
new file mode 100644
index 0000000..6775fba
--- /dev/null
+++ b/drivers/power/pm8921-charger.c
@@ -0,0 +1,1560 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/mfd/pm8xxx/pm8921-charger.h>
+#include <linux/mfd/pm8xxx/pm8921-bms.h>
+#include <linux/mfd/pm8921-adc.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/interrupt.h>
+#include <linux/power_supply.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <mach/msm_xo.h>
+#include <mach/msm_hsusb.h>
+
+#define CHG_BUCK_CLOCK_CTRL	0x14
+
+#define PBL_ACCESS1		0x04
+#define PBL_ACCESS2		0x05
+#define SYS_CONFIG_1		0x06
+#define SYS_CONFIG_2		0x07
+#define CHG_CNTRL		0x204
+#define CHG_IBAT_MAX		0x205
+#define CHG_TEST		0x206
+#define CHG_BUCK_CTRL_TEST1	0x207
+#define CHG_BUCK_CTRL_TEST2	0x208
+#define CHG_BUCK_CTRL_TEST3	0x209
+#define COMPARATOR_OVERRIDE	0x20A
+#define PSI_TXRX_SAMPLE_DATA_0	0x20B
+#define PSI_TXRX_SAMPLE_DATA_1	0x20C
+#define PSI_TXRX_SAMPLE_DATA_2	0x20D
+#define PSI_TXRX_SAMPLE_DATA_3	0x20E
+#define PSI_CONFIG_STATUS	0x20F
+#define CHG_IBAT_SAFE		0x210
+#define CHG_ITRICKLE		0x211
+#define CHG_CNTRL_2		0x212
+#define CHG_VBAT_DET		0x213
+#define CHG_VTRICKLE		0x214
+#define CHG_ITERM		0x215
+#define CHG_CNTRL_3		0x216
+#define CHG_VIN_MIN		0x217
+#define CHG_TWDOG		0x218
+#define CHG_TTRKL_MAX		0x219
+#define CHG_TEMP_THRESH		0x21A
+#define CHG_TCHG_MAX		0x21B
+#define USB_OVP_CONTROL		0x21C
+#define DC_OVP_CONTROL		0x21D
+#define USB_OVP_TEST		0x21E
+#define DC_OVP_TEST		0x21F
+#define CHG_VDD_MAX		0x220
+#define CHG_VDD_SAFE		0x221
+#define CHG_VBAT_BOOT_THRESH	0x222
+#define USB_OVP_TRIM		0x355
+#define BUCK_CONTROL_TRIM1	0x356
+#define BUCK_CONTROL_TRIM2	0x357
+#define BUCK_CONTROL_TRIM3	0x358
+#define BUCK_CONTROL_TRIM4	0x359
+#define CHG_DEFAULTS_TRIM	0x35A
+#define CHG_ITRIM		0x35B
+#define CHG_TTRIM		0x35C
+#define CHG_COMP_OVR		0x20A
+
+enum chg_fsm_state {
+	FSM_STATE_OFF_0 = 0,
+	FSM_STATE_BATFETDET_START_12 = 12,
+	FSM_STATE_BATFETDET_END_16 = 16,
+	FSM_STATE_ON_CHG_HIGHI_1 = 1,
+	FSM_STATE_ATC_2A = 2,
+	FSM_STATE_ATC_2B = 18,
+	FSM_STATE_ON_BAT_3 = 3,
+	FSM_STATE_ATC_FAIL_4 = 4 ,
+	FSM_STATE_DELAY_5 = 5,
+	FSM_STATE_ON_CHG_AND_BAT_6 = 6,
+	FSM_STATE_FAST_CHG_7 = 7,
+	FSM_STATE_TRKL_CHG_8 = 8,
+	FSM_STATE_CHG_FAIL_9 = 9,
+	FSM_STATE_EOC_10 = 10,
+	FSM_STATE_ON_CHG_VREGOK_11 = 11,
+	FSM_STATE_ATC_PAUSE_13 = 13,
+	FSM_STATE_FAST_CHG_PAUSE_14 = 14,
+	FSM_STATE_TRKL_CHG_PAUSE_15 = 15,
+	FSM_STATE_START_BOOT = 20,
+	FSM_STATE_FLCB_VREGOK = 21,
+	FSM_STATE_FLCB = 22,
+};
+
+enum pmic_chg_interrupts {
+	USBIN_VALID_IRQ = 0,
+	USBIN_OV_IRQ,
+	BATT_INSERTED_IRQ,
+	VBATDET_LOW_IRQ,
+	USBIN_UV_IRQ,
+	VBAT_OV_IRQ,
+	CHGWDOG_IRQ,
+	VCP_IRQ,
+	ATCDONE_IRQ,
+	ATCFAIL_IRQ,
+	CHGDONE_IRQ,
+	CHGFAIL_IRQ,
+	CHGSTATE_IRQ,
+	LOOP_CHANGE_IRQ,
+	FASTCHG_IRQ,
+	TRKLCHG_IRQ,
+	BATT_REMOVED_IRQ,
+	BATTTEMP_HOT_IRQ,
+	CHGHOT_IRQ,
+	BATTTEMP_COLD_IRQ,
+	CHG_GONE_IRQ,
+	BAT_TEMP_OK_IRQ,
+	COARSE_DET_LOW_IRQ,
+	VDD_LOOP_IRQ,
+	VREG_OV_IRQ,
+	VBATDET_IRQ,
+	BATFET_IRQ,
+	PSI_IRQ,
+	DCIN_VALID_IRQ,
+	DCIN_OV_IRQ,
+	DCIN_UV_IRQ,
+	PM_CHG_MAX_INTS,
+};
+
+struct bms_notify {
+	int			is_charging;
+	struct	work_struct	work;
+};
+
+/**
+ * struct pm8921_chg_chip -device information
+ * @dev:			device pointer to access the parent
+ * @is_usb_path_used:		indicates whether USB charging is used at all
+ * @is_usb_path_used:		indicates whether DC charging is used at all
+ * @usb_present:		present status of usb
+ * @dc_present:			present status of dc
+ * @usb_charger_current:	usb current to charge the battery with used when
+ *				the usb path is enabled or charging is resumed
+ * @safety_time:		max time for which charging will happen
+ * @update_time:		how frequently the userland needs to be updated
+ * @max_voltage:		the max volts the batt should be charged up to
+ * @min_voltage:		the min battery voltage before turning the FETon
+ * @resume_voltage:		the voltage at which the battery should resume
+ *				charging
+ * @term_current:		The charging based term current
+ *
+ */
+struct pm8921_chg_chip {
+	struct device		*dev;
+	unsigned int		usb_present;
+	unsigned int		dc_present;
+	unsigned int		usb_charger_current;
+	unsigned int		pmic_chg_irq[PM_CHG_MAX_INTS];
+	unsigned int		safety_time;
+	unsigned int		update_time;
+	unsigned int		max_voltage;
+	unsigned int		min_voltage;
+	unsigned int		resume_voltage;
+	unsigned int		term_current;
+	unsigned int		vbat_channel;
+	struct power_supply	usb_psy;
+	struct power_supply	dc_psy;
+	struct power_supply	batt_psy;
+	struct dentry		*dent;
+	struct bms_notify	bms_notify;
+	DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
+};
+
+static int charging_disabled;
+
+static struct pm8921_chg_chip *the_chip;
+
+static int pm_chg_masked_write(struct pm8921_chg_chip *chip, u16 addr,
+							u8 mask, u8 val)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_readb(chip->dev->parent, addr, &reg);
+	if (rc) {
+		pr_err("pm8xxx_readb failed: addr=%03X, rc=%d\n", addr, rc);
+		return rc;
+	}
+	reg &= ~mask;
+	reg |= val & mask;
+	rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+	if (rc) {
+		pr_err("pm8xxx_writeb failed: addr=%03X, rc=%d\n", addr, rc);
+		return rc;
+	}
+	return 0;
+}
+
+#define CAPTURE_FSM_STATE_CMD	0xC2
+#define READ_BANK_7		0x70
+#define READ_BANK_4		0x40
+static int pm_chg_get_fsm_state(struct pm8921_chg_chip *chip)
+{
+	u8 temp;
+	int err, ret = 0;
+
+	temp = CAPTURE_FSM_STATE_CMD;
+	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return err;
+	}
+
+	temp = READ_BANK_7;
+	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return err;
+	}
+
+	err = pm8xxx_readb(chip->dev->parent, CHG_TEST, &temp);
+	if (err) {
+		pr_err("pm8xxx_readb fail: addr=%03X, rc=%d\n", CHG_TEST, err);
+		return err;
+	}
+	/* get the lower 4 bits */
+	ret = temp & 0xF;
+
+	temp = READ_BANK_4;
+	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return err;
+	}
+
+	err = pm8xxx_readb(chip->dev->parent, CHG_TEST, &temp);
+	if (err) {
+		pr_err("pm8xxx_readb fail: addr=%03X, rc=%d\n", CHG_TEST, err);
+		return err;
+	}
+	/* get the upper 1 bit */
+	ret |= (temp & 0x1) << 4;
+	return  ret;
+}
+
+#define CHG_USB_SUSPEND_BIT  BIT(2)
+static int pm_chg_usb_suspend_enable(struct pm8921_chg_chip *chip, int enable)
+{
+	return pm_chg_masked_write(chip, CHG_CNTRL_3, CHG_USB_SUSPEND_BIT,
+			enable ? CHG_USB_SUSPEND_BIT : 0);
+}
+
+#define CHG_EN_BIT	BIT(7)
+static int pm_chg_auto_enable(struct pm8921_chg_chip *chip, int enable)
+{
+	return pm_chg_masked_write(chip, CHG_CNTRL_3, CHG_EN_BIT,
+				enable ? CHG_EN_BIT : 0);
+}
+
+#define CHG_CHARGE_DIS_BIT	BIT(1)
+static int pm_chg_charge_dis(struct pm8921_chg_chip *chip, int disable)
+{
+	return pm_chg_masked_write(chip, CHG_CNTRL, CHG_CHARGE_DIS_BIT,
+				disable ? CHG_CHARGE_DIS_BIT : 0);
+}
+
+#define PM8921_CHG_V_MIN_MV	3240
+#define PM8921_CHG_V_STEP_MV	20
+#define PM8921_CHG_VDDMAX_MAX	4500
+#define PM8921_CHG_VDDMAX_MIN	3400
+#define PM8921_CHG_V_MASK	0x7F
+static int pm_chg_vddmax_set(struct pm8921_chg_chip *chip, int voltage)
+{
+	u8 temp;
+
+	if (voltage < PM8921_CHG_VDDMAX_MIN
+			|| voltage > PM8921_CHG_VDDMAX_MAX) {
+		pr_err("bad mV=%d asked to set\n", voltage);
+		return -EINVAL;
+	}
+	temp = (voltage - PM8921_CHG_V_MIN_MV) / PM8921_CHG_V_STEP_MV;
+	pr_debug("voltage=%d setting %02x\n", voltage, temp);
+	return pm_chg_masked_write(chip, CHG_VDD_MAX, PM8921_CHG_V_MASK, temp);
+}
+
+#define PM8921_CHG_VDDSAFE_MIN	3400
+#define PM8921_CHG_VDDSAFE_MAX	4500
+static int pm_chg_vddsafe_set(struct pm8921_chg_chip *chip, int voltage)
+{
+	u8 temp;
+
+	if (voltage < PM8921_CHG_VDDSAFE_MIN
+			|| voltage > PM8921_CHG_VDDSAFE_MAX) {
+		pr_err("bad mV=%d asked to set\n", voltage);
+		return -EINVAL;
+	}
+	temp = (voltage - PM8921_CHG_V_MIN_MV) / PM8921_CHG_V_STEP_MV;
+	pr_debug("voltage=%d setting %02x\n", voltage, temp);
+	return pm_chg_masked_write(chip, CHG_VDD_SAFE, PM8921_CHG_V_MASK, temp);
+}
+
+#define PM8921_CHG_VBATDET_MIN	3240
+#define PM8921_CHG_VBATDET_MAX	5780
+static int pm_chg_vbatdet_set(struct pm8921_chg_chip *chip, int voltage)
+{
+	u8 temp;
+
+	if (voltage < PM8921_CHG_VBATDET_MIN
+			|| voltage > PM8921_CHG_VBATDET_MAX) {
+		pr_err("bad mV=%d asked to set\n", voltage);
+		return -EINVAL;
+	}
+	temp = (voltage - PM8921_CHG_V_MIN_MV) / PM8921_CHG_V_STEP_MV;
+	pr_debug("voltage=%d setting %02x\n", voltage, temp);
+	return pm_chg_masked_write(chip, CHG_VBAT_DET, PM8921_CHG_V_MASK, temp);
+}
+
+#define PM8921_CHG_IBATMAX_MIN	325
+#define PM8921_CHG_IBATMAX_MAX	2000
+#define PM8921_CHG_I_MIN_MA	225
+#define PM8921_CHG_I_STEP_MA	50
+#define PM8921_CHG_I_MASK	0x3F
+static int pm_chg_ibatmax_set(struct pm8921_chg_chip *chip, int chg_current)
+{
+	u8 temp;
+
+	if (chg_current < PM8921_CHG_IBATMAX_MIN
+			|| chg_current > PM8921_CHG_IBATMAX_MAX) {
+		pr_err("bad mA=%d asked to set\n", chg_current);
+		return -EINVAL;
+	}
+	temp = (chg_current - PM8921_CHG_I_MIN_MA) / PM8921_CHG_I_STEP_MA;
+	return pm_chg_masked_write(chip, CHG_IBAT_MAX, PM8921_CHG_I_MASK, temp);
+}
+
+#define PM8921_CHG_IBATSAFE_MIN	225
+#define PM8921_CHG_IBATSAFE_MAX	3375
+static int pm_chg_ibatsafe_set(struct pm8921_chg_chip *chip, int chg_current)
+{
+	u8 temp;
+
+	if (chg_current < PM8921_CHG_IBATSAFE_MIN
+			|| chg_current > PM8921_CHG_IBATSAFE_MAX) {
+		pr_err("bad mA=%d asked to set\n", chg_current);
+		return -EINVAL;
+	}
+	temp = (chg_current - PM8921_CHG_I_MIN_MA) / PM8921_CHG_I_STEP_MA;
+	return pm_chg_masked_write(chip, CHG_IBAT_SAFE,
+						PM8921_CHG_I_MASK, temp);
+}
+
+#define PM8921_CHG_ITERM_MIN		50
+#define PM8921_CHG_ITERM_MAX		200
+#define PM8921_CHG_ITERM_MIN_MA		50
+#define PM8921_CHG_ITERM_STEP_MA	10
+#define PM8921_CHG_ITERM_MASK		0xF
+static int pm_chg_iterm_set(struct pm8921_chg_chip *chip, int chg_current)
+{
+	u8 temp;
+
+	if (chg_current < PM8921_CHG_ITERM_MIN
+			|| chg_current > PM8921_CHG_ITERM_MAX) {
+		pr_err("bad mA=%d asked to set\n", chg_current);
+		return -EINVAL;
+	}
+
+	temp = (chg_current - PM8921_CHG_ITERM_MIN_MA)
+				/ PM8921_CHG_ITERM_STEP_MA;
+	return pm_chg_masked_write(chip, CHG_IBAT_SAFE, PM8921_CHG_ITERM_MASK,
+					 temp);
+}
+
+#define PM8921_CHG_IUSB_MASK 0x1C
+#define PM8921_CHG_IUSB_MAX  7
+#define PM8921_CHG_IUSB_MIN  0
+static int pm_chg_iusbmax_set(struct pm8921_chg_chip *chip, int chg_current)
+{
+	u8 temp;
+
+	if (chg_current < PM8921_CHG_IUSB_MIN
+			|| chg_current > PM8921_CHG_IUSB_MAX) {
+		pr_err("bad mA=%d asked to set\n", chg_current);
+		return -EINVAL;
+	}
+	temp = chg_current << 2;
+	return pm_chg_masked_write(chip, PBL_ACCESS2, PM8921_CHG_IUSB_MASK,
+					 temp);
+}
+
+#define PM8921_CHG_WD_MASK 0x1F
+static int pm_chg_disable_wd(struct pm8921_chg_chip *chip)
+{
+	/* writing 0 to the wd timer disables it */
+	return pm_chg_masked_write(chip, CHG_TWDOG, PM8921_CHG_WD_MASK,
+					 0);
+}
+
+static void pm8921_chg_enable_irq(struct pm8921_chg_chip *chip, int interrupt)
+{
+	if (!__test_and_set_bit(interrupt, chip->enabled_irqs)) {
+		dev_dbg(chip->dev, "%d\n", chip->pmic_chg_irq[interrupt]);
+		enable_irq(chip->pmic_chg_irq[interrupt]);
+	}
+}
+
+static void pm8921_chg_disable_irq(struct pm8921_chg_chip *chip, int interrupt)
+{
+	if (__test_and_clear_bit(interrupt, chip->enabled_irqs)) {
+		dev_dbg(chip->dev, "%d\n", chip->pmic_chg_irq[interrupt]);
+		disable_irq_nosync(chip->pmic_chg_irq[interrupt]);
+	}
+}
+
+static int pm_chg_get_rt_status(struct pm8921_chg_chip *chip, int irq_id)
+{
+	return pm8xxx_read_irq_stat(chip->dev->parent,
+					chip->pmic_chg_irq[irq_id]);
+}
+
+/* Treat OverVoltage/UnderVoltage as source missing */
+static int is_usb_chg_plugged_in(struct pm8921_chg_chip *chip)
+{
+	int pres, ov, uv;
+
+	pres = pm_chg_get_rt_status(chip, USBIN_VALID_IRQ);
+	ov = pm_chg_get_rt_status(chip, USBIN_OV_IRQ);
+	uv = pm_chg_get_rt_status(chip, USBIN_UV_IRQ);
+
+	return pres && !ov && !uv;
+}
+
+/* Treat OverVoltage/UnderVoltage as source missing */
+static int is_dc_chg_plugged_in(struct pm8921_chg_chip *chip)
+{
+	int pres, ov, uv;
+
+	pres = pm_chg_get_rt_status(chip, DCIN_VALID_IRQ);
+	ov = pm_chg_get_rt_status(chip, DCIN_OV_IRQ);
+	uv = pm_chg_get_rt_status(chip, DCIN_UV_IRQ);
+
+	return pres && !ov && !uv;
+}
+
+static int is_battery_charging(int fsm_state)
+{
+	switch (fsm_state) {
+	case FSM_STATE_ATC_2A:
+	case FSM_STATE_ATC_2B:
+	case FSM_STATE_ON_CHG_AND_BAT_6:
+	case FSM_STATE_FAST_CHG_7:
+	case FSM_STATE_TRKL_CHG_8:
+		return 1;
+	}
+	return 0;
+}
+
+static void bms_notify(struct work_struct *work)
+{
+	struct bms_notify *n = container_of(work, struct bms_notify, work);
+
+	if (n->is_charging)
+		pm8921_bms_charging_began();
+	else
+		pm8921_bms_charging_end();
+}
+
+static enum power_supply_property pm_power_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+};
+
+static char *pm_power_supplied_to[] = {
+	"battery",
+};
+
+static int pm_power_get_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  union power_supply_propval *val)
+{
+	struct pm8921_chg_chip *chip;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		if (psy->type == POWER_SUPPLY_TYPE_MAINS) {
+			chip = container_of(psy, struct pm8921_chg_chip,
+							dc_psy);
+			val->intval = is_dc_chg_plugged_in(chip);
+		}
+		if (psy->type == POWER_SUPPLY_TYPE_USB) {
+			chip = container_of(psy, struct pm8921_chg_chip,
+							usb_psy);
+			val->intval = is_usb_chg_plugged_in(chip);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property msm_batt_power_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int get_prop_battery_mvolts(struct pm8921_chg_chip *chip)
+{
+	int rc;
+	struct pm8921_adc_chan_result result;
+
+	rc = pm8921_adc_read(chip->vbat_channel, &result);
+	if (rc) {
+		pr_err("error reading adc channel = %d, rc = %d\n",
+					chip->vbat_channel, rc);
+		return rc;
+	}
+	pr_debug("mvolts phy = %lld meas = 0x%llx", result.physical,
+						result.measurement);
+	return (int)result.physical;
+}
+
+static int get_prop_batt_capacity(struct pm8921_chg_chip *chip)
+{
+	return pm8921_bms_get_percent_charge();
+}
+
+static int get_prop_batt_health(struct pm8921_chg_chip *chip)
+{
+	int temp;
+
+	temp = pm_chg_get_rt_status(chip, BATTTEMP_HOT_IRQ);
+	if (temp)
+		return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+	temp = pm_chg_get_rt_status(chip, BATTTEMP_COLD_IRQ);
+	if (temp)
+		return POWER_SUPPLY_HEALTH_COLD;
+
+	return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static int get_prop_batt_present(struct pm8921_chg_chip *chip)
+{
+	return pm_chg_get_rt_status(chip, BATT_INSERTED_IRQ);
+}
+
+static int get_prop_charge_type(struct pm8921_chg_chip *chip)
+{
+	int temp;
+
+	temp = pm_chg_get_rt_status(chip, TRKLCHG_IRQ);
+	if (temp)
+		return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+	temp = pm_chg_get_rt_status(chip, FASTCHG_IRQ);
+	if (temp)
+		return POWER_SUPPLY_CHARGE_TYPE_FAST;
+
+	return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int get_prop_batt_status(struct pm8921_chg_chip *chip)
+{
+	int temp = 0;
+
+	/* TODO reading the FSM state is more reliable */
+	temp = pm_chg_get_rt_status(chip, TRKLCHG_IRQ);
+
+	temp |= pm_chg_get_rt_status(chip, FASTCHG_IRQ);
+	if (temp)
+		return POWER_SUPPLY_STATUS_CHARGING;
+	/*
+	 * The battery is not charging
+	 * check the FET - if on battery is discharging
+	 *		 - if off battery is isolated(full) and the system
+	 *		   is being driven from a charger
+	 */
+	temp = pm_chg_get_rt_status(chip, BATFET_IRQ);
+	if (temp)
+		return POWER_SUPPLY_STATUS_DISCHARGING;
+
+	return POWER_SUPPLY_STATUS_FULL;
+}
+
+static int pm_batt_power_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	struct pm8921_chg_chip *chip = container_of(psy, struct pm8921_chg_chip,
+								batt_psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = get_prop_batt_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = get_prop_charge_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = get_prop_batt_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = get_prop_batt_present(chip);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = chip->max_voltage;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		val->intval = chip->min_voltage;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = get_prop_battery_mvolts(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = get_prop_batt_capacity(chip);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void (*notify_vbus_state_func_ptr)(int);
+static int usb_chg_current;
+static DEFINE_SPINLOCK(vbus_lock);
+
+int pm8921_charger_register_vbus_sn(void (*callback)(int))
+{
+	pr_debug("%p\n", callback);
+	notify_vbus_state_func_ptr = callback;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm8921_charger_register_vbus_sn);
+
+/* this is passed to the hsusb via platform_data msm_otg_pdata */
+void pm8921_charger_unregister_vbus_sn(void (*callback)(int))
+{
+	pr_debug("%p\n", callback);
+	notify_vbus_state_func_ptr = NULL;
+}
+EXPORT_SYMBOL_GPL(pm8921_charger_unregister_vbus_sn);
+
+static void notify_usb_of_the_plugin_event(int plugin)
+{
+	plugin = !!plugin;
+	if (notify_vbus_state_func_ptr) {
+		pr_debug("notifying plugin\n");
+		(*notify_vbus_state_func_ptr) (plugin);
+	} else {
+		pr_debug("unable to notify plugin\n");
+	}
+}
+
+struct usb_ma_limit_entry {
+	int usb_ma;
+	u8  chg_iusb_value;
+};
+
+static struct usb_ma_limit_entry usb_ma_table[] = {
+	{100, 0},
+	{500, 1},
+	{700, 2},
+	{850, 3},
+	{900, 4},
+	{1100, 5},
+	{1300, 6},
+	{1500, 7},
+};
+
+/* assumes vbus_lock is held */
+static void __pm8921_charger_vbus_draw(unsigned int mA)
+{
+	int i, rc;
+
+	if (mA > 0 && mA <= 2) {
+		usb_chg_current = 0;
+		rc = pm_chg_iusbmax_set(the_chip,
+				usb_ma_table[0].chg_iusb_value);
+		if (rc) {
+			pr_err("unable to set iusb to %d rc = %d\n",
+				usb_ma_table[0].chg_iusb_value, rc);
+		}
+		rc = pm_chg_usb_suspend_enable(the_chip, 1);
+		if (rc)
+			pr_err("fail to set suspend bit rc=%d\n", rc);
+	} else {
+		rc = pm_chg_usb_suspend_enable(the_chip, 0);
+		if (rc)
+			pr_err("fail to reset suspend bit rc=%d\n", rc);
+		for (i = ARRAY_SIZE(usb_ma_table) - 1; i >= 0; i--) {
+			if (usb_ma_table[i].usb_ma <= mA)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		rc = pm_chg_iusbmax_set(the_chip,
+				usb_ma_table[i].chg_iusb_value);
+		if (rc) {
+			pr_err("unable to set iusb to %d rc = %d\n",
+				usb_ma_table[i].chg_iusb_value, rc);
+		}
+	}
+}
+
+/* USB calls these to tell us how much max usb current the system can draw */
+void pm8921_charger_vbus_draw(unsigned int mA)
+{
+	unsigned long flags;
+
+	pr_debug("Enter charge=%d\n", mA);
+	spin_lock_irqsave(&vbus_lock, flags);
+	if (the_chip) {
+		__pm8921_charger_vbus_draw(mA);
+	} else {
+		/*
+		 * called before pmic initialized,
+		 * save this value and use it at probe
+		 */
+		usb_chg_current = mA;
+	}
+	spin_unlock_irqrestore(&vbus_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm8921_charger_vbus_draw);
+
+static void handle_usb_insertion_removal(struct pm8921_chg_chip *chip)
+{
+	int usb_present;
+
+	usb_present = is_usb_chg_plugged_in(chip);
+	if (chip->usb_present ^ usb_present) {
+		notify_usb_of_the_plugin_event(usb_present);
+		chip->usb_present = usb_present;
+		power_supply_changed(&chip->usb_psy);
+	}
+}
+
+static void handle_dc_removal_insertion(struct pm8921_chg_chip *chip)
+{
+	int dc_present;
+
+	dc_present = is_dc_chg_plugged_in(chip);
+	if (chip->dc_present ^ dc_present) {
+		chip->dc_present = dc_present;
+		power_supply_changed(&chip->dc_psy);
+	}
+}
+
+static irqreturn_t usbin_valid_irq_handler(int irq, void *data)
+{
+	handle_usb_insertion_removal(data);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t usbin_ov_irq_handler(int irq, void *data)
+{
+	handle_usb_insertion_removal(data);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_inserted_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+	int status;
+
+	status = pm_chg_get_rt_status(chip,
+				BATT_INSERTED_IRQ);
+	pr_debug("battery present=%d", status);
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+/* this interrupt used to restart charging a battery */
+static irqreturn_t vbatdet_low_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t usbin_uv_irq_handler(int irq, void *data)
+{
+	handle_usb_insertion_removal(data);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vbat_ov_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chgwdog_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vcp_irq_handler(int irq, void *data)
+{
+	pr_warning("VCP triggered BATDET forced on\n");
+	pr_debug("state_changed_to=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atcdone_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atcfail_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chgdone_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("state_changed_to=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chgfail_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("state_changed_to=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chgstate_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+	int new_is_charging = 0, fsm_state;
+
+	pr_debug("state_changed_to=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+
+	fsm_state = pm_chg_get_fsm_state(chip);
+	new_is_charging = is_battery_charging(fsm_state);
+
+	if (chip->bms_notify.is_charging ^ new_is_charging) {
+		chip->bms_notify.is_charging = new_is_charging;
+		schedule_work(&(chip->bms_notify.work));
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t loop_change_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fastchg_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t trklchg_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_removed_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+	int status;
+
+	status = pm_chg_get_rt_status(chip, BATT_REMOVED_IRQ);
+	pr_debug("battery present=%d state=%d", !status,
+					 pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t batttemp_hot_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chghot_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("Chg hot fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t batttemp_cold_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("Batt cold fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_gone_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("Chg gone fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bat_temp_ok_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("batt temp ok fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	power_supply_changed(&chip->batt_psy);
+	power_supply_changed(&chip->usb_psy);
+	power_supply_changed(&chip->dc_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t coarse_det_low_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vdd_loop_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vreg_ov_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t vbatdet_irq_handler(int irq, void *data)
+{
+	pr_debug("fsm_state=%d\n", pm_chg_get_fsm_state(data));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t batfet_irq_handler(int irq, void *data)
+{
+	struct pm8921_chg_chip *chip = data;
+
+	pr_debug("vreg ov\n");
+	power_supply_changed(&chip->batt_psy);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dcin_valid_irq_handler(int irq, void *data)
+{
+	handle_dc_removal_insertion(data);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dcin_ov_irq_handler(int irq, void *data)
+{
+	handle_dc_removal_insertion(data);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dcin_uv_irq_handler(int irq, void *data)
+{
+	handle_dc_removal_insertion(data);
+	return IRQ_HANDLED;
+}
+
+static int set_disable_status_param(const char *val, struct kernel_param *kp)
+{
+	int ret;
+	struct pm8921_chg_chip *chip = the_chip;
+
+	ret = param_set_int(val, kp);
+	if (ret) {
+		pr_err("error setting value %d\n", ret);
+		return ret;
+	}
+	pr_info("factory set disable param to %d\n", charging_disabled);
+	if (chip) {
+		pm_chg_auto_enable(chip, !charging_disabled);
+		pm_chg_charge_dis(chip, charging_disabled);
+	}
+	return 0;
+}
+module_param_call(disabled, set_disable_status_param, param_get_uint,
+					&charging_disabled, 0644);
+
+static void free_irqs(struct pm8921_chg_chip *chip)
+{
+	int i;
+
+	for (i = 0; i < PM_CHG_MAX_INTS; i++)
+		if (chip->pmic_chg_irq[i]) {
+			free_irq(chip->pmic_chg_irq[i], chip);
+			chip->pmic_chg_irq[i] = 0;
+		}
+}
+
+/* determines the initial present states and notifies msm_charger */
+static void __devinit determine_initial_state(struct pm8921_chg_chip *chip)
+{
+	unsigned long flags;
+	int fsm_state;
+
+	chip->dc_present = !!is_dc_chg_plugged_in(chip);
+	chip->usb_present = !!is_usb_chg_plugged_in(chip);
+
+	notify_usb_of_the_plugin_event(chip->usb_present);
+
+	pm8921_chg_enable_irq(chip, DCIN_VALID_IRQ);
+	pm8921_chg_enable_irq(chip, USBIN_VALID_IRQ);
+	pm8921_chg_enable_irq(chip, BATT_REMOVED_IRQ);
+	pm8921_chg_enable_irq(chip, BATT_REMOVED_IRQ);
+	pm8921_chg_enable_irq(chip, CHGSTATE_IRQ);
+
+	spin_lock_irqsave(&vbus_lock, flags);
+	if (usb_chg_current) {
+		/* reissue a vbus draw call */
+		__pm8921_charger_vbus_draw(usb_chg_current);
+	}
+	spin_unlock_irqrestore(&vbus_lock, flags);
+
+	fsm_state = pm_chg_get_fsm_state(chip);
+	if (is_battery_charging(fsm_state)) {
+		chip->bms_notify.is_charging = 1;
+		pm8921_bms_charging_began();
+	}
+
+	pr_debug("usb = %d, dc = %d  batt = %d state=%d\n",
+			chip->usb_present,
+			chip->dc_present,
+			get_prop_batt_present(chip),
+			fsm_state);
+}
+
+struct pm_chg_irq_init_data {
+	unsigned int	irq_id;
+	char		*name;
+	unsigned long	flags;
+	irqreturn_t	(*handler)(int, void *);
+};
+
+#define CHG_IRQ(_id, _flags, _handler) \
+{ \
+	.irq_id		= _id, \
+	.name		= #_id, \
+	.flags		= _flags, \
+	.handler	= _handler, \
+}
+struct pm_chg_irq_init_data chg_irq_data[] = {
+	CHG_IRQ(USBIN_VALID_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						usbin_valid_irq_handler),
+	CHG_IRQ(USBIN_OV_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						usbin_ov_irq_handler),
+	CHG_IRQ(BATT_INSERTED_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						batt_inserted_irq_handler),
+	CHG_IRQ(VBATDET_LOW_IRQ, IRQF_TRIGGER_RISING, vbatdet_low_irq_handler),
+	CHG_IRQ(USBIN_UV_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+							usbin_uv_irq_handler),
+	CHG_IRQ(VBAT_OV_IRQ, IRQF_TRIGGER_RISING, vbat_ov_irq_handler),
+	CHG_IRQ(CHGWDOG_IRQ, IRQF_TRIGGER_RISING, chgwdog_irq_handler),
+	CHG_IRQ(VCP_IRQ, IRQF_TRIGGER_RISING, vcp_irq_handler),
+	CHG_IRQ(ATCDONE_IRQ, IRQF_TRIGGER_RISING, atcdone_irq_handler),
+	CHG_IRQ(ATCFAIL_IRQ, IRQF_TRIGGER_RISING, atcfail_irq_handler),
+	CHG_IRQ(CHGDONE_IRQ, IRQF_TRIGGER_RISING, chgdone_irq_handler),
+	CHG_IRQ(CHGFAIL_IRQ, IRQF_TRIGGER_RISING, chgfail_irq_handler),
+	CHG_IRQ(CHGSTATE_IRQ, IRQF_TRIGGER_RISING, chgstate_irq_handler),
+	CHG_IRQ(LOOP_CHANGE_IRQ, IRQF_TRIGGER_RISING, loop_change_irq_handler),
+	CHG_IRQ(FASTCHG_IRQ, IRQF_TRIGGER_RISING, fastchg_irq_handler),
+	CHG_IRQ(TRKLCHG_IRQ, IRQF_TRIGGER_RISING, trklchg_irq_handler),
+	CHG_IRQ(BATT_REMOVED_IRQ, IRQF_TRIGGER_RISING,
+						batt_removed_irq_handler),
+	CHG_IRQ(BATTTEMP_HOT_IRQ, IRQF_TRIGGER_RISING,
+						batttemp_hot_irq_handler),
+	CHG_IRQ(CHGHOT_IRQ, IRQF_TRIGGER_RISING, chghot_irq_handler),
+	CHG_IRQ(BATTTEMP_COLD_IRQ, IRQF_TRIGGER_RISING,
+						batttemp_cold_irq_handler),
+	CHG_IRQ(CHG_GONE_IRQ, IRQF_TRIGGER_RISING, chg_gone_irq_handler),
+	CHG_IRQ(BAT_TEMP_OK_IRQ, IRQF_TRIGGER_RISING, bat_temp_ok_irq_handler),
+	CHG_IRQ(COARSE_DET_LOW_IRQ, IRQF_TRIGGER_RISING,
+						coarse_det_low_irq_handler),
+	CHG_IRQ(VDD_LOOP_IRQ, IRQF_TRIGGER_RISING, vdd_loop_irq_handler),
+	CHG_IRQ(VREG_OV_IRQ, IRQF_TRIGGER_RISING, vreg_ov_irq_handler),
+	CHG_IRQ(VBATDET_IRQ, IRQF_TRIGGER_RISING, vbatdet_irq_handler),
+	CHG_IRQ(BATFET_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						batfet_irq_handler),
+	CHG_IRQ(DCIN_VALID_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						dcin_valid_irq_handler),
+	CHG_IRQ(DCIN_OV_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						dcin_ov_irq_handler),
+	CHG_IRQ(DCIN_UV_IRQ, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						dcin_uv_irq_handler),
+};
+
+static int __devinit request_irqs(struct pm8921_chg_chip *chip,
+					struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret, i;
+
+	ret = 0;
+	bitmap_fill(chip->enabled_irqs, PM_CHG_MAX_INTS);
+
+	for (i = 0; i < ARRAY_SIZE(chg_irq_data); i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				chg_irq_data[i].name);
+		if (res == NULL) {
+			pr_err("couldn't find %s\n", chg_irq_data[i].name);
+			goto err_out;
+		}
+		ret = request_irq(res->start, chg_irq_data[i].handler,
+			chg_irq_data[i].flags,
+			chg_irq_data[i].name, chip);
+		if (ret < 0) {
+			pr_err("couldn't request %d (%s) %d\n", res->start,
+					chg_irq_data[i].name, ret);
+			goto err_out;
+		}
+		chip->pmic_chg_irq[chg_irq_data[i].irq_id] = res->start;
+		pm8921_chg_disable_irq(chip, chg_irq_data[i].irq_id);
+	}
+	return 0;
+
+err_out:
+	free_irqs(chip);
+	return -EINVAL;
+}
+
+#define ENUM_TIMER_STOP_BIT	BIT(1)
+#define BOOT_DONE_BIT		BIT(6)
+#define CHG_BATFET_ON_BIT	BIT(3)
+#define CHG_VCP_EN		BIT(0)
+#define CHG_BAT_TEMP_DIS_BIT	BIT(2)
+#define SAFE_CURRENT_MA		1500
+static int __devinit pm8921_chg_hw_init(struct pm8921_chg_chip *chip)
+{
+	int rc;
+
+	rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
+					BOOT_DONE_BIT, BOOT_DONE_BIT);
+	if (rc) {
+		pr_err("Failed to set BOOT_DONE_BIT rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm_chg_vddsafe_set(chip, chip->max_voltage);
+	if (rc) {
+		pr_err("Failed to set safe voltage to %d rc=%d\n",
+						chip->max_voltage, rc);
+		return rc;
+	}
+	rc = pm_chg_vbatdet_set(chip, chip->resume_voltage);
+	if (rc) {
+		pr_err("Failed to set vbatdet comprator voltage to %d rc=%d\n",
+						chip->resume_voltage, rc);
+		return rc;
+	}
+
+	rc = pm_chg_vddmax_set(chip, chip->max_voltage);
+	if (rc) {
+		pr_err("Failed to set max voltage to %d rc=%d\n",
+						chip->max_voltage, rc);
+		return rc;
+	}
+	rc = pm_chg_ibatsafe_set(chip, SAFE_CURRENT_MA);
+	if (rc) {
+		pr_err("Failed to set max voltage to %d rc=%d\n",
+						SAFE_CURRENT_MA, rc);
+		return rc;
+	}
+
+	/* TODO needs to be changed as per the temeperature of the battery */
+	rc = pm_chg_ibatmax_set(chip, 400);
+	if (rc) {
+		pr_err("Failed to set max current to 400 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm_chg_iterm_set(chip, chip->term_current);
+	if (rc) {
+		pr_err("Failed to set term current to %d rc=%d\n",
+						chip->term_current, rc);
+		return rc;
+	}
+
+	/* Disable the ENUM TIMER */
+	rc = pm_chg_masked_write(chip, PBL_ACCESS2, ENUM_TIMER_STOP_BIT,
+			ENUM_TIMER_STOP_BIT);
+	if (rc) {
+		pr_err("Failed to set enum timer stop rc=%d\n", rc);
+		return rc;
+	}
+
+	/* init with the lowest USB current */
+	rc = pm_chg_iusbmax_set(chip, usb_ma_table[0].chg_iusb_value);
+	if (rc) {
+		pr_err("Failed to set usb max to %d rc=%d\n",
+					usb_ma_table[0].chg_iusb_value, rc);
+		return rc;
+	}
+	rc = pm_chg_disable_wd(chip);
+	if (rc) {
+		pr_err("Failed to disable wd rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm_chg_masked_write(chip, CHG_CNTRL_2,
+				CHG_BAT_TEMP_DIS_BIT, 0);
+	if (rc) {
+		pr_err("Failed to enable temp control chg rc=%d\n", rc);
+		return rc;
+	}
+	/* switch to a 3.2Mhz for the buck */
+	rc = pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CLOCK_CTRL, 0x15);
+	if (rc) {
+		pr_err("Failed to switch buck clk rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Workarounds for die 1.1 and 1.0 */
+	if (pm8xxx_get_revision(chip->dev->parent) < PM8XXX_REVISION_8921_2p0) {
+		pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST2, 0xF1);
+		pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0x8C);
+		pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xCE);
+		pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xD8);
+	}
+
+	rc = pm_chg_charge_dis(chip, charging_disabled);
+	if (rc) {
+		pr_err("Failed to disable CHG_CHARGE_DIS bit rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm_chg_auto_enable(chip, !charging_disabled);
+	if (rc) {
+		pr_err("Failed to enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int get_rt_status(void *data, u64 * val)
+{
+	int i = (int)data;
+	int ret;
+
+	/* global irq number is passed in via data */
+	ret = pm_chg_get_rt_status(the_chip, i);
+	*val = ret;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(rt_fops, get_rt_status, NULL, "%llu\n");
+
+static int get_fsm_status(void *data, u64 * val)
+{
+	u8 temp;
+
+	temp = pm_chg_get_fsm_state(the_chip);
+	*val = temp;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fsm_fops, get_fsm_status, NULL, "%llu\n");
+
+static int get_reg(void *data, u64 * val)
+{
+	int addr = (int)data;
+	int ret;
+	u8 temp;
+
+	ret = pm8xxx_readb(the_chip->dev->parent, addr, &temp);
+	if (ret) {
+		pr_err("pm8xxx_readb to %x value =%d errored = %d\n",
+			addr, temp, ret);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	int addr = (int)data;
+	int ret;
+	u8 temp;
+
+	temp = (u8) val;
+	ret = pm8xxx_writeb(the_chip->dev->parent, addr, temp);
+	if (ret) {
+		pr_err("pm8xxx_writeb to %x value =%d errored = %d\n",
+			addr, temp, ret);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "0x%02llx\n");
+
+static void create_debugfs_entries(struct pm8921_chg_chip *chip)
+{
+	int i;
+
+	chip->dent = debugfs_create_dir("pm8921_chg", NULL);
+
+	if (IS_ERR(chip->dent)) {
+		pr_err("pmic charger couldnt create debugfs dir\n");
+		return;
+	}
+
+	debugfs_create_file("CHG_CNTRL", 0644, chip->dent,
+			    (void *)CHG_CNTRL, &reg_fops);
+	debugfs_create_file("CHG_CNTRL_2", 0644, chip->dent,
+			    (void *)CHG_CNTRL_2, &reg_fops);
+	debugfs_create_file("CHG_CNTRL_3", 0644, chip->dent,
+			    (void *)CHG_CNTRL_3, &reg_fops);
+	debugfs_create_file("PBL_ACCESS1", 0644, chip->dent,
+			    (void *)PBL_ACCESS1, &reg_fops);
+	debugfs_create_file("PBL_ACCESS2", 0644, chip->dent,
+			    (void *)PBL_ACCESS2, &reg_fops);
+	debugfs_create_file("SYS_CONFIG_1", 0644, chip->dent,
+			    (void *)SYS_CONFIG_1, &reg_fops);
+	debugfs_create_file("SYS_CONFIG_2", 0644, chip->dent,
+			    (void *)SYS_CONFIG_2, &reg_fops);
+	debugfs_create_file("CHG_VDD_MAX", 0644, chip->dent,
+			    (void *)CHG_VDD_MAX, &reg_fops);
+	debugfs_create_file("CHG_VDD_SAFE", 0644, chip->dent,
+			    (void *)CHG_VDD_SAFE, &reg_fops);
+	debugfs_create_file("CHG_VBAT_DET", 0644, chip->dent,
+			    (void *)CHG_VBAT_DET, &reg_fops);
+	debugfs_create_file("CHG_IBAT_MAX", 0644, chip->dent,
+			    (void *)CHG_IBAT_MAX, &reg_fops);
+	debugfs_create_file("CHG_IBAT_SAFE", 0644, chip->dent,
+			    (void *)CHG_IBAT_SAFE, &reg_fops);
+	debugfs_create_file("CHG_VIN_MIN", 0644, chip->dent,
+			    (void *)CHG_VIN_MIN, &reg_fops);
+	debugfs_create_file("CHG_VTRICKLE", 0644, chip->dent,
+			    (void *)CHG_VTRICKLE, &reg_fops);
+	debugfs_create_file("CHG_ITRICKLE", 0644, chip->dent,
+			    (void *)CHG_ITRICKLE, &reg_fops);
+	debugfs_create_file("CHG_ITERM", 0644, chip->dent,
+			    (void *)CHG_ITERM, &reg_fops);
+	debugfs_create_file("CHG_TCHG_MAX", 0644, chip->dent,
+			    (void *)CHG_TCHG_MAX, &reg_fops);
+	debugfs_create_file("CHG_TWDOG", 0644, chip->dent,
+			    (void *)CHG_TWDOG, &reg_fops);
+	debugfs_create_file("CHG_TEMP_THRESH", 0644, chip->dent,
+			    (void *)CHG_TEMP_THRESH, &reg_fops);
+	debugfs_create_file("CHG_COMP_OVR", 0644, chip->dent,
+			    (void *)CHG_COMP_OVR, &reg_fops);
+	debugfs_create_file("CHG_BUCK_CTRL_TEST1", 0644, chip->dent,
+			    (void *)CHG_BUCK_CTRL_TEST1, &reg_fops);
+	debugfs_create_file("CHG_BUCK_CTRL_TEST2", 0644, chip->dent,
+			    (void *)CHG_BUCK_CTRL_TEST2, &reg_fops);
+	debugfs_create_file("CHG_BUCK_CTRL_TEST3", 0644, chip->dent,
+			    (void *)CHG_BUCK_CTRL_TEST3, &reg_fops);
+	debugfs_create_file("CHG_TEST", 0644, chip->dent,
+			    (void *)CHG_TEST, &reg_fops);
+
+	debugfs_create_file("FSM_STATE", 0644, chip->dent, NULL,
+			    &fsm_fops);
+
+	for (i = 0; i < ARRAY_SIZE(chg_irq_data); i++) {
+		if (chip->pmic_chg_irq[chg_irq_data[i].irq_id])
+			debugfs_create_file(chg_irq_data[i].name, 0444,
+				chip->dent,
+				(void *)chg_irq_data[i].irq_id,
+				&rt_fops);
+	}
+}
+
+static int __devinit pm8921_charger_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct pm8921_chg_chip *chip;
+	const struct pm8921_charger_platform_data *pdata
+				= pdev->dev.platform_data;
+
+	if (!pdata) {
+		pr_err("missing platform data\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct pm8921_chg_chip),
+					GFP_KERNEL);
+	if (!chip) {
+		pr_err("Cannot allocate pm_chg_chip\n");
+		return -ENOMEM;
+	}
+
+	chip->dev = &pdev->dev;
+	chip->safety_time = pdata->safety_time;
+	chip->update_time = pdata->update_time;
+	chip->max_voltage = pdata->max_voltage;
+	chip->min_voltage = pdata->min_voltage;
+	chip->resume_voltage = pdata->resume_voltage;
+	chip->term_current = pdata->term_current;
+	chip->vbat_channel = pdata->charger_cdata.vbat_channel;
+
+	rc = pm8921_chg_hw_init(chip);
+	if (rc) {
+		pr_err("couldn't init hardware rc=%d\n", rc);
+		goto free_chip;
+	}
+
+	chip->usb_psy.name = "usb",
+	chip->usb_psy.type = POWER_SUPPLY_TYPE_USB,
+	chip->usb_psy.supplied_to = pm_power_supplied_to,
+	chip->usb_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to),
+	chip->usb_psy.properties = pm_power_props,
+	chip->usb_psy.num_properties = ARRAY_SIZE(pm_power_props),
+	chip->usb_psy.get_property = pm_power_get_property,
+
+	chip->dc_psy.name = "ac",
+	chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS,
+	chip->dc_psy.supplied_to = pm_power_supplied_to,
+	chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to),
+	chip->dc_psy.properties = pm_power_props,
+	chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props),
+	chip->dc_psy.get_property = pm_power_get_property,
+
+	chip->batt_psy.name = "battery",
+	chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY,
+	chip->batt_psy.properties = msm_batt_power_props,
+	chip->batt_psy.num_properties = ARRAY_SIZE(msm_batt_power_props),
+	chip->batt_psy.get_property = pm_batt_power_get_property,
+
+	rc = power_supply_register(chip->dev, &chip->usb_psy);
+	if (rc < 0) {
+		pr_err("power_supply_register usb failed rc = %d\n", rc);
+		goto free_irq;
+	}
+
+	rc = power_supply_register(chip->dev, &chip->dc_psy);
+	if (rc < 0) {
+		pr_err("power_supply_register dc failed rc = %d\n", rc);
+		goto unregister_usb;
+	}
+
+	rc = power_supply_register(chip->dev, &chip->batt_psy);
+	if (rc < 0) {
+		pr_err("power_supply_register batt failed rc = %d\n", rc);
+		goto unregister_dc;
+	}
+
+	rc = request_irqs(chip, pdev);
+	if (rc) {
+		pr_err("couldn't register interrupts rc=%d\n", rc);
+		goto unregister_batt;
+	}
+
+	platform_set_drvdata(pdev, chip);
+	the_chip = chip;
+	create_debugfs_entries(chip);
+
+	INIT_WORK(&chip->bms_notify.work, bms_notify);
+	/* determine what state the charger is in */
+	determine_initial_state(chip);
+
+	return 0;
+
+free_irq:
+	free_irqs(chip);
+unregister_batt:
+	power_supply_unregister(&chip->batt_psy);
+unregister_dc:
+	power_supply_unregister(&chip->dc_psy);
+unregister_usb:
+	power_supply_unregister(&chip->usb_psy);
+free_chip:
+	kfree(chip);
+	return rc;
+}
+
+static int __devexit pm8921_charger_remove(struct platform_device *pdev)
+{
+	struct pm8921_chg_chip *chip = platform_get_drvdata(pdev);
+
+	free_irqs(chip);
+	platform_set_drvdata(pdev, NULL);
+	the_chip = NULL;
+	kfree(chip);
+	return 0;
+}
+
+static struct platform_driver pm8921_charger_driver = {
+	.probe	= pm8921_charger_probe,
+	.remove	= __devexit_p(pm8921_charger_remove),
+	.driver	= {
+		.name	= PM8921_CHARGER_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8921_charger_init(void)
+{
+	return platform_driver_register(&pm8921_charger_driver);
+}
+
+static void __exit pm8921_charger_exit(void)
+{
+	platform_driver_unregister(&pm8921_charger_driver);
+}
+
+late_initcall(pm8921_charger_init);
+module_exit(pm8921_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8921 charger/battery driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8921_CHARGER_DEV_NAME);
diff --git a/drivers/power/pmic8058-charger.c b/drivers/power/pmic8058-charger.c
new file mode 100644
index 0000000..8ea7949
--- /dev/null
+++ b/drivers/power/pmic8058-charger.c
@@ -0,0 +1,1954 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/msm-charger.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/msm_adc.h>
+#include <linux/notifier.h>
+#include <linux/pmic8058-batt-alarm.h>
+
+#include <mach/msm_xo.h>
+#include <mach/msm_hsusb.h>
+
+/* Config Regs  and their bits*/
+#define PM8058_CHG_TEST			0x75
+#define IGNORE_LL			2
+#define PM8058_CHG_TEST_2		0xEA
+#define PM8058_CHG_TEST_3		0xEB
+#define PM8058_OVP_TEST_REG		0xF6
+#define FORCE_OVP_OFF			3
+
+#define PM8058_CHG_CNTRL		0x1E
+#define CHG_TRICKLE_EN			7
+#define CHG_USB_SUSPEND			6
+#define CHG_IMON_CAL			5
+#define CHG_IMON_GAIN			4
+#define CHG_CHARGE_BAT			3
+#define CHG_VBUS_FROM_BOOST_OVRD	2
+#define CHG_CHARGE_DIS			1
+#define CHG_VCP_EN			0
+
+#define PM8058_CHG_CNTRL_2		0xD8
+#define ATC_DIS				7	/* coincell backed */
+#define CHARGE_AUTO_DIS			6
+#define DUMB_CHG_OVRD			5	/* coincell backed */
+#define ENUM_DONE			4
+#define CHG_TEMP_MODE			3
+#define CHG_BATT_TEMP_DIS		1	/* coincell backed */
+#define CHG_FAILED_CLEAR		0
+
+#define PM8058_CHG_VMAX_SEL		0x21
+#define PM8058_CHG_VBAT_DET		0xD9
+#define PM8058_CHG_IMAX			0x1F
+#define PM8058_CHG_TRICKLE		0xDB
+#define PM8058_CHG_ITERM		0xDC
+#define PM8058_CHG_TTRKL_MAX		0xE1
+#define PM8058_CHG_TCHG_MAX		0xE4
+#define PM8058_CHG_TEMP_THRESH		0xE2
+#define PM8058_CHG_TEMP_REG		0xE3
+#define PM8058_CHG_PULSE		0x22
+
+/* IRQ STATUS and CLEAR */
+#define PM8058_CHG_STATUS_CLEAR_IRQ_1	0x31
+#define PM8058_CHG_STATUS_CLEAR_IRQ_3	0x33
+#define PM8058_CHG_STATUS_CLEAR_IRQ_10	0xB3
+#define PM8058_CHG_STATUS_CLEAR_IRQ_11	0xB4
+
+/* IRQ MASKS */
+#define PM8058_CHG_MASK_IRQ_1		0x38
+
+#define PM8058_CHG_MASK_IRQ_3		0x3A
+#define PM8058_CHG_MASK_IRQ_10		0xBA
+#define PM8058_CHG_MASK_IRQ_11		0xBB
+
+/* IRQ Real time status regs */
+#define PM8058_CHG_STATUS_RT_1		0x3F
+#define STATUS_RTCHGVAL			7
+#define STATUS_RTCHGINVAL		6
+#define STATUS_RTBATT_REPLACE		5
+#define STATUS_RTVBATDET_LOW		4
+#define STATUS_RTCHGILIM		3
+#define STATUS_RTPCTDONE		1
+#define STATUS_RTVCP			0
+#define PM8058_CHG_STATUS_RT_3		0x41
+#define PM8058_CHG_STATUS_RT_10		0xC1
+#define PM8058_CHG_STATUS_RT_11		0xC2
+
+/* VTRIM */
+#define PM8058_CHG_VTRIM		0x1D
+#define PM8058_CHG_VBATDET_TRIM		0x1E
+#define PM8058_CHG_ITRIM		0x1F
+#define PM8058_CHG_TTRIM		0x20
+
+#define AUTO_CHARGING_VMAXSEL				4200
+#define AUTO_CHARGING_FAST_TIME_MAX_MINUTES		512
+#define AUTO_CHARGING_TRICKLE_TIME_MINUTES		30
+#define AUTO_CHARGING_VEOC_ITERM			100
+#define AUTO_CHARGING_IEOC_ITERM			160
+#define AUTO_CHARGING_RESUME_MV				4100
+
+#define AUTO_CHARGING_VBATDET				4150
+#define AUTO_CHARGING_VBATDET_DEBOUNCE_TIME_MS		3000
+#define AUTO_CHARGING_VEOC_VBATDET			4100
+#define AUTO_CHARGING_VEOC_TCHG				16
+#define AUTO_CHARGING_VEOC_TCHG_FINAL_CYCLE		32
+#define AUTO_CHARGING_VEOC_BEGIN_TIME_MS		5400000
+
+#define AUTO_CHARGING_VEOC_VBAT_LOW_CHECK_TIME_MS	60000
+#define AUTO_CHARGING_RESUME_CHARGE_DETECTION_COUNTER	5
+
+#define AUTO_CHARGING_DONE_CHECK_TIME_MS		1000
+
+#define PM8058_CHG_I_STEP_MA 50
+#define PM8058_CHG_I_MIN_MA 50
+#define PM8058_CHG_T_TCHG_SHIFT 2
+#define PM8058_CHG_I_TERM_STEP_MA 10
+#define PM8058_CHG_V_STEP_MV 25
+#define PM8058_CHG_V_MIN_MV  2400
+/*
+ * enum pmic_chg_interrupts: pmic interrupts
+ * @CHGVAL_IRQ: charger V between 3.3 and 7.9
+ * @CHGINVAL_IRQ: charger V outside 3.3 and 7.9
+ * @VBATDET_LOW_IRQ: VBAT < VBATDET
+ * @VCP_IRQ: VDD went below VBAT: BAT_FET is turned on
+ * @CHGILIM_IRQ: mA consumed>IMAXSEL: chgloop draws less mA
+ * @ATC_DONE_IRQ: Auto Trickle done
+ * @ATCFAIL_IRQ: Auto Trickle fail
+ * @AUTO_CHGDONE_IRQ: Auto chg done
+ * @AUTO_CHGFAIL_IRQ: time exceeded w/o reaching term current
+ * @CHGSTATE_IRQ: something happend causing a state change
+ * @FASTCHG_IRQ: trkl charging completed: moving to fastchg
+ * @CHG_END_IRQ: mA has dropped to termination current
+ * @BATTTEMP_IRQ: batt temp is out of range
+ * @CHGHOT_IRQ: the pass device is too hot
+ * @CHGTLIMIT_IRQ: unused
+ * @CHG_GONE_IRQ: charger was removed
+ * @VCPMAJOR_IRQ: vcp major
+ * @VBATDET_IRQ: VBAT >= VBATDET
+ * @BATFET_IRQ: BATFET closed
+ * @BATT_REPLACE_IRQ:
+ * @BATTCONNECT_IRQ:
+ */
+enum pmic_chg_interrupts {
+	CHGVAL_IRQ,
+	CHGINVAL_IRQ,
+	VBATDET_LOW_IRQ,
+	VCP_IRQ,
+	CHGILIM_IRQ,
+	ATC_DONE_IRQ,
+	ATCFAIL_IRQ,
+	AUTO_CHGDONE_IRQ,
+	AUTO_CHGFAIL_IRQ,
+	CHGSTATE_IRQ,
+	FASTCHG_IRQ,
+	CHG_END_IRQ,
+	BATTTEMP_IRQ,
+	CHGHOT_IRQ,
+	CHGTLIMIT_IRQ,
+	CHG_GONE_IRQ,
+	VCPMAJOR_IRQ,
+	VBATDET_IRQ,
+	BATFET_IRQ,
+	BATT_REPLACE_IRQ,
+	BATTCONNECT_IRQ,
+	PMIC_CHG_MAX_INTS
+};
+
+struct pm8058_charger {
+	struct pmic_charger_pdata *pdata;
+	struct pm8058_chip *pm_chip;
+	struct device *dev;
+
+	int pmic_chg_irq[PMIC_CHG_MAX_INTS];
+	DECLARE_BITMAP(enabled_irqs, PMIC_CHG_MAX_INTS);
+
+	struct delayed_work chg_done_check_work;
+	struct delayed_work check_vbat_low_work;
+	struct delayed_work veoc_begin_work;
+	struct delayed_work charging_check_work;
+	int waiting_for_topoff;
+	int waiting_for_veoc;
+	int vbatdet;
+	struct msm_hardware_charger hw_chg;
+	int current_charger_current;
+	int disabled;
+
+	struct msm_xo_voter *voter;
+	struct dentry *dent;
+
+	int inited;
+	int present;
+};
+
+static struct pm8058_charger pm8058_chg;
+static struct msm_hardware_charger usb_hw_chg;
+
+
+static int msm_battery_gauge_alarm_notify(struct notifier_block *nb,
+					  unsigned long status, void *unused);
+
+static struct notifier_block alarm_notifier = {
+	.notifier_call = msm_battery_gauge_alarm_notify,
+};
+
+static int resume_mv = AUTO_CHARGING_RESUME_MV;
+static DEFINE_MUTEX(batt_alarm_lock);
+static int resume_mv_set(const char *val, struct kernel_param *kp);
+module_param_call(resume_mv, resume_mv_set, param_get_int,
+				&resume_mv, S_IRUGO | S_IWUSR);
+
+static int resume_mv_set(const char *val, struct kernel_param *kp)
+{
+	int rc;
+
+	mutex_lock(&batt_alarm_lock);
+
+	rc = param_set_int(val, kp);
+	if (rc)
+		goto out;
+
+	rc = pm8058_batt_alarm_threshold_set(resume_mv, 4300);
+
+out:
+	mutex_unlock(&batt_alarm_lock);
+	return rc;
+}
+
+static void pm8058_chg_enable_irq(int interrupt)
+{
+	if (!__test_and_set_bit(interrupt, pm8058_chg.enabled_irqs)) {
+		dev_dbg(pm8058_chg.dev, "%s %d\n", __func__,
+			pm8058_chg.pmic_chg_irq[interrupt]);
+		enable_irq(pm8058_chg.pmic_chg_irq[interrupt]);
+	}
+}
+
+static void pm8058_chg_disable_irq(int interrupt)
+{
+	if (__test_and_clear_bit(interrupt, pm8058_chg.enabled_irqs)) {
+		dev_dbg(pm8058_chg.dev, "%s %d\n", __func__,
+			pm8058_chg.pmic_chg_irq[interrupt]);
+		disable_irq_nosync(pm8058_chg.pmic_chg_irq[interrupt]);
+	}
+}
+
+static int pm_chg_get_rt_status(int irq)
+{
+	int count = 3;
+	int ret;
+
+	while ((ret =
+		pm8058_irq_get_rt_status(pm8058_chg.pm_chip, irq)) == -EAGAIN
+	       && count--) {
+		dev_info(pm8058_chg.dev, "%s trycount=%d\n", __func__, count);
+		cpu_relax();
+	}
+	if (ret == -EAGAIN)
+		return 0;
+	else
+		return ret;
+}
+
+static int is_chg_plugged_in(void)
+{
+	return pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGVAL_IRQ]);
+}
+
+#ifdef DEBUG
+static void __dump_chg_regs(void)
+{
+	u8 temp;
+	int temp2;
+
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_CNTRL = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_CNTRL_2 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_VMAX_SEL, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_VMAX_SEL = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_VBAT_DET, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_VBAT_DET = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_IMAX, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_IMAX = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TRICKLE, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_TRICKLE = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_ITERM, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_ITERM = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TTRKL_MAX, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_TTRKL_MAX = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TCHG_MAX, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_TCHG_MAX = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEMP_THRESH, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_TEMP_THRESH = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEMP_REG, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_TEMP_REG = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_PULSE, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_PULSE = 0x%x\n", temp);
+
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_STATUS_CLEAR_IRQ_1,
+		    &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_STATUS_CLEAR_IRQ_1 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_STATUS_CLEAR_IRQ_3,
+		    &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_STATUS_CLEAR_IRQ_3 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_STATUS_CLEAR_IRQ_10,
+		    &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_STATUS_CLEAR_IRQ_10 = 0x%x\n",
+		temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_STATUS_CLEAR_IRQ_11,
+		    &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_STATUS_CLEAR_IRQ_11 = 0x%x\n",
+		temp);
+
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_MASK_IRQ_1, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_MASK_IRQ_1 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_MASK_IRQ_3, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_MASK_IRQ_3 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_MASK_IRQ_10, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_MASK_IRQ_10 = 0x%x\n", temp);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_MASK_IRQ_11, &temp, 1);
+	dev_dbg(pm8058_chg.dev, "PM8058_CHG_MASK_IRQ_11 = 0x%x\n", temp);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGVAL_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGVAL_IRQ = %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGINVAL_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGINVAL_IRQ = %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VBATDET_LOW_IRQ]);
+	dev_dbg(pm8058_chg.dev, "VBATDET_LOW_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VCP_IRQ]);
+	dev_dbg(pm8058_chg.dev, "VCP_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGILIM_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGILIM_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[ATC_DONE_IRQ]);
+	dev_dbg(pm8058_chg.dev, "ATC_DONE_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[ATCFAIL_IRQ]);
+	dev_dbg(pm8058_chg.dev, "ATCFAIL_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[AUTO_CHGDONE_IRQ]);
+	dev_dbg(pm8058_chg.dev, "AUTO_CHGDONE_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[AUTO_CHGFAIL_IRQ]);
+	dev_dbg(pm8058_chg.dev, "AUTO_CHGFAIL_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGSTATE_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGSTATE_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[FASTCHG_IRQ]);
+	dev_dbg(pm8058_chg.dev, "FASTCHG_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHG_END_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHG_END_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATTTEMP_IRQ]);
+	dev_dbg(pm8058_chg.dev, "BATTTEMP_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGHOT_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGHOT_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHGTLIMIT_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHGTLIMIT_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[CHG_GONE_IRQ]);
+	dev_dbg(pm8058_chg.dev, "CHG_GONE_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VCPMAJOR_IRQ]);
+	dev_dbg(pm8058_chg.dev, "VCPMAJOR_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VBATDET_IRQ]);
+	dev_dbg(pm8058_chg.dev, "VBATDET_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATFET_IRQ]);
+	dev_dbg(pm8058_chg.dev, "BATFET_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATT_REPLACE_IRQ]);
+	dev_dbg(pm8058_chg.dev, "BATT_REPLACE_IRQ= %d\n", temp2);
+
+	temp2 = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ]);
+	dev_dbg(pm8058_chg.dev, "BATTCONNECT_IRQ= %d\n", temp2);
+}
+#else
+static inline void __dump_chg_regs(void)
+{
+}
+#endif
+
+/* SSBI register access helper functions */
+static int pm_chg_suspend(int value)
+{
+	u8 temp;
+	int ret;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+	if (ret)
+		return ret;
+	if (value)
+		temp |= BIT(CHG_USB_SUSPEND);
+	else
+		temp &= ~BIT(CHG_USB_SUSPEND);
+
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+}
+
+static int pm_chg_auto_disable(int value)
+{
+	u8 temp;
+	int ret;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+	if (ret)
+		return ret;
+	if (value)
+		temp |= BIT(CHARGE_AUTO_DIS);
+	else
+		temp &= ~BIT(CHARGE_AUTO_DIS);
+
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+}
+
+static int pm_chg_batt_temp_disable(int value)
+{
+	u8 temp;
+	int ret;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+	if (ret)
+		return ret;
+	if (value)
+		temp |= BIT(CHG_BATT_TEMP_DIS);
+	else
+		temp &= ~BIT(CHG_BATT_TEMP_DIS);
+
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+}
+
+static int pm_chg_vbatdet_set(int voltage)
+{
+	u8 temp;
+	int diff;
+
+	diff = (voltage - PM8058_CHG_V_MIN_MV);
+	if (diff < 0) {
+		dev_warn(pm8058_chg.dev, "%s bad mV=%d asked to set\n",
+			 __func__, voltage);
+		return -EINVAL;
+	}
+
+	temp = diff / PM8058_CHG_V_STEP_MV;
+	dev_dbg(pm8058_chg.dev, "%s voltage=%d setting %02x\n", __func__,
+		voltage, temp);
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_VBAT_DET, &temp, 1);
+}
+
+static int pm_chg_imaxsel_set(int chg_current)
+{
+	u8 temp;
+	int diff;
+
+	diff = chg_current - PM8058_CHG_I_MIN_MA;
+	if (diff < 0) {
+		dev_warn(pm8058_chg.dev, "%s bad mA=%d asked to set\n",
+			 __func__, chg_current);
+		return -EINVAL;
+	}
+	temp = diff / PM8058_CHG_I_STEP_MA;
+	/* make sure we arent writing more than 5 bits of data */
+	if (temp > 31) {
+		dev_warn(pm8058_chg.dev, "%s max mA=1500 requested mA=%d\n",
+			__func__, chg_current);
+		temp = 31;
+	}
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_IMAX, &temp, 1);
+}
+
+#define PM8058_CHG_VMAX_MIN  3300
+#define PM8058_CHG_VMAX_MAX  5500
+static int pm_chg_vmaxsel_set(int voltage)
+{
+	u8 temp;
+
+	if (voltage < PM8058_CHG_VMAX_MIN || voltage > PM8058_CHG_VMAX_MAX) {
+		dev_warn(pm8058_chg.dev, "%s bad mV=%d asked to set\n",
+			 __func__, voltage);
+		return -EINVAL;
+	}
+	temp = (voltage - PM8058_CHG_V_MIN_MV) / PM8058_CHG_V_STEP_MV;
+	dev_dbg(pm8058_chg.dev, "%s mV=%d setting %02x\n", __func__, voltage,
+		temp);
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_VMAX_SEL, &temp, 1);
+}
+
+static int pm_chg_failed_clear(int value)
+{
+	u8 temp;
+	int ret;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+	if (ret)
+		return ret;
+	if (value)
+		temp |= BIT(CHG_FAILED_CLEAR);
+	else
+		temp &= ~BIT(CHG_FAILED_CLEAR);
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+}
+
+static int pm_chg_iterm_set(int chg_current)
+{
+	u8 temp;
+
+	temp = (chg_current / PM8058_CHG_I_TERM_STEP_MA) - 1;
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_ITERM, &temp, 1);
+}
+
+static int pm_chg_tchg_set(int minutes)
+{
+	u8 temp;
+
+	temp = (minutes >> PM8058_CHG_T_TCHG_SHIFT) - 1;
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TCHG_MAX, &temp, 1);
+}
+
+static int pm_chg_ttrkl_set(int minutes)
+{
+	u8 temp;
+
+	temp = minutes - 1;
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TTRKL_MAX, &temp, 1);
+}
+
+static int pm_chg_enum_done_enable(int value)
+{
+	u8 temp;
+	int ret;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+	if (ret)
+		return ret;
+	if (value)
+		temp |= BIT(ENUM_DONE);
+	else
+		temp &= ~BIT(ENUM_DONE);
+
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL_2, &temp, 1);
+}
+
+static uint32_t get_fsm_state(void)
+{
+	u8 temp;
+
+	temp = 0x00;
+	pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST_3, &temp, 1);
+	pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEST_3, &temp, 1);
+	return (uint32_t)temp;
+}
+
+static int get_fsm_status(void *data, u64 * val)
+{
+	*val = get_fsm_state();
+	return 0;
+}
+
+static int __pm8058_start_charging(int chg_current, int termination_current,
+				   int time)
+{
+	int ret = 0;
+
+	if (pm8058_chg.disabled)
+		goto out;
+
+	dev_info(pm8058_chg.dev, "%s %dmA %dmin\n",
+			__func__, chg_current, time);
+
+	ret = pm_chg_auto_disable(1);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_suspend(0);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_imaxsel_set(chg_current);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_failed_clear(1);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_iterm_set(termination_current);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_tchg_set(time);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_ttrkl_set(AUTO_CHARGING_TRICKLE_TIME_MINUTES);
+	if (ret)
+		goto out;
+
+	ret = pm_chg_batt_temp_disable(0);
+	if (ret)
+		goto out;
+
+	if (pm8058_chg.voter == NULL)
+		pm8058_chg.voter = msm_xo_get(MSM_XO_TCXO_D1, "pm8058_charger");
+	msm_xo_mode_vote(pm8058_chg.voter, MSM_XO_MODE_ON);
+
+	ret = pm_chg_enum_done_enable(1);
+	if (ret)
+		goto out;
+
+	wmb();
+
+	ret = pm_chg_auto_disable(0);
+	if (ret)
+		goto out;
+
+	/* wait for the enable to update interrupt status*/
+	msleep(20);
+
+	pm8058_chg_enable_irq(AUTO_CHGFAIL_IRQ);
+	pm8058_chg_enable_irq(CHGHOT_IRQ);
+	pm8058_chg_enable_irq(AUTO_CHGDONE_IRQ);
+	pm8058_chg_enable_irq(CHG_END_IRQ);
+	pm8058_chg_enable_irq(CHGSTATE_IRQ);
+
+out:
+	return ret;
+}
+
+static void chg_done_cleanup(void)
+{
+	dev_info(pm8058_chg.dev, "%s notify pm8058 charging completion"
+		"\n", __func__);
+
+	pm8058_chg_disable_irq(AUTO_CHGDONE_IRQ);
+	cancel_delayed_work_sync(&pm8058_chg.veoc_begin_work);
+	cancel_delayed_work_sync(&pm8058_chg.check_vbat_low_work);
+
+	pm8058_chg_disable_irq(CHG_END_IRQ);
+
+	pm8058_chg_disable_irq(VBATDET_LOW_IRQ);
+	pm8058_chg_disable_irq(VBATDET_IRQ);
+	pm8058_chg.waiting_for_veoc = 0;
+	pm8058_chg.waiting_for_topoff = 0;
+
+	pm_chg_auto_disable(1);
+
+	msm_charger_notify_event(&usb_hw_chg, CHG_DONE_EVENT);
+}
+
+static void chg_done_check_work(struct work_struct *work)
+{
+	chg_done_cleanup();
+}
+
+static void charging_check_work(struct work_struct *work)
+{
+	uint32_t fsm_state = get_fsm_state();
+	int rc;
+
+	switch (fsm_state) {
+	/* We're charging, so disarm alarm */
+	case 2:
+	case 7:
+	case 8:
+		rc = pm8058_batt_alarm_state_set(0, 0);
+		if (rc)
+			dev_err(pm8058_chg.dev,
+				"%s: unable to set alarm state\n", __func__);
+		break;
+	default:
+		/* Still not charging, so update driver state */
+		chg_done_cleanup();
+		break;
+	};
+}
+
+static int pm8058_start_charging(struct msm_hardware_charger *hw_chg,
+				 int chg_voltage, int chg_current)
+{
+	int vbat_higher_than_vbatdet;
+	int ret = 0;
+
+	cancel_delayed_work_sync(&pm8058_chg.charging_check_work);
+
+	/*
+	 * adjust the max current for PC USB connection - set the higher limit
+	 * to 450 and make sure we never cross it
+	 */
+	if (chg_current == 500)
+		chg_current = 450;
+	pm8058_chg.current_charger_current = chg_current;
+	pm8058_chg_enable_irq(FASTCHG_IRQ);
+
+	ret = pm_chg_vmaxsel_set(chg_voltage);
+	if (ret)
+		goto out;
+
+	/* set vbat to  CC to CV threshold */
+	ret = pm_chg_vbatdet_set(AUTO_CHARGING_VBATDET);
+	if (ret)
+		goto out;
+
+	pm8058_chg.vbatdet = AUTO_CHARGING_VBATDET;
+	/*
+	 * get the state of vbat and if it is higher than
+	 * AUTO_CHARGING_VBATDET we start the veoc start timer
+	 * else wait for the voltage to go to AUTO_CHARGING_VBATDET
+	 * and then start the 90 min timer
+	 */
+	vbat_higher_than_vbatdet =
+	    pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VBATDET_IRQ]);
+	if (vbat_higher_than_vbatdet) {
+		/*
+		 * we are in constant voltage phase of charging
+		 * IEOC should happen withing 90 mins of this instant
+		 * else we enable VEOC
+		 */
+		dev_info(pm8058_chg.dev, "%s begin veoc timer\n", __func__);
+		schedule_delayed_work(&pm8058_chg.veoc_begin_work,
+				      round_jiffies_relative(msecs_to_jiffies
+				     (AUTO_CHARGING_VEOC_BEGIN_TIME_MS)));
+	} else
+		pm8058_chg_enable_irq(VBATDET_IRQ);
+
+	ret = __pm8058_start_charging(chg_current, AUTO_CHARGING_IEOC_ITERM,
+				AUTO_CHARGING_FAST_TIME_MAX_MINUTES);
+	pm8058_chg.current_charger_current = chg_current;
+
+	/*
+	 * We want to check the FSM state to verify we're charging. We must
+	 * wait before doing this to allow the VBATDET to settle. The worst
+	 * case for this is two seconds. The batt alarm does not have this
+	 * delay.
+	 */
+	schedule_delayed_work(&pm8058_chg.charging_check_work,
+				      round_jiffies_relative(msecs_to_jiffies
+			     (AUTO_CHARGING_VBATDET_DEBOUNCE_TIME_MS)));
+
+out:
+	return ret;
+}
+
+static void veoc_begin_work(struct work_struct *work)
+{
+	/* we have been doing CV for 90mins with no signs of IEOC
+	 * start checking for VEOC in addition with 16min charges*/
+	dev_info(pm8058_chg.dev, "%s begin veoc detection\n", __func__);
+	pm8058_chg.waiting_for_veoc = 1;
+	/*
+	 * disable VBATDET irq we dont need it unless we are at the end of
+	 * charge cycle
+	 */
+	pm8058_chg_disable_irq(VBATDET_IRQ);
+	__pm8058_start_charging(pm8058_chg.current_charger_current,
+				AUTO_CHARGING_VEOC_ITERM,
+				AUTO_CHARGING_VEOC_TCHG);
+}
+
+static void vbat_low_work(struct work_struct *work)
+{
+	/*
+	 * It has been one minute and the battery still holds voltage
+	 * start the final topoff - charging is almost done
+	 */
+	dev_info(pm8058_chg.dev, "%s vbatt maintains for a minute"
+		"starting topoff\n", __func__);
+	pm8058_chg.waiting_for_veoc = 0;
+	pm8058_chg.waiting_for_topoff = 1;
+	pm8058_chg_disable_irq(VBATDET_LOW_IRQ);
+	pm8058_chg_disable_irq(VBATDET_IRQ);
+	__pm8058_start_charging(pm8058_chg.current_charger_current,
+				AUTO_CHARGING_VEOC_ITERM,
+				AUTO_CHARGING_VEOC_TCHG_FINAL_CYCLE);
+}
+
+
+static irqreturn_t pm8058_chg_chgval_handler(int irq, void *dev_id)
+{
+	u8 old, temp;
+	int ret;
+
+	if (is_chg_plugged_in()) {	/* this debounces it */
+		if (!pm8058_chg.present) {
+			msm_charger_notify_event(&usb_hw_chg,
+						CHG_INSERTED_EVENT);
+			pm8058_chg.present = 1;
+		}
+	} else {
+		if (pm8058_chg.present) {
+			ret = pm8058_read(pm8058_chg.pm_chip,
+						PM8058_OVP_TEST_REG,
+						&old, 1);
+			temp = old | BIT(FORCE_OVP_OFF);
+			ret = pm8058_write(pm8058_chg.pm_chip,
+						PM8058_OVP_TEST_REG,
+						&temp, 1);
+			temp = 0xFC;
+			ret = pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST,
+						&temp, 1);
+			/* 10 ms sleep is for the VCHG to discharge */
+			msleep(10);
+			temp = 0xF0;
+			ret = pm8058_write(pm8058_chg.pm_chip,
+						PM8058_CHG_TEST,
+						&temp, 1);
+			ret = pm8058_write(pm8058_chg.pm_chip,
+						PM8058_OVP_TEST_REG,
+						&old, 1);
+
+			pm_chg_enum_done_enable(0);
+			pm_chg_auto_disable(1);
+			msm_charger_notify_event(&usb_hw_chg,
+						CHG_REMOVED_EVENT);
+			pm8058_chg.present = 0;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_chginval_handler(int irq, void *dev_id)
+{
+	u8 old, temp;
+	int ret;
+
+	if (pm8058_chg.present) {
+		pm8058_chg_disable_irq(CHGINVAL_IRQ);
+
+		pm_chg_enum_done_enable(0);
+		pm_chg_auto_disable(1);
+		ret = pm8058_read(pm8058_chg.pm_chip,
+				PM8058_OVP_TEST_REG, &old, 1);
+		temp = old | BIT(FORCE_OVP_OFF);
+		ret = pm8058_write(pm8058_chg.pm_chip,
+				PM8058_OVP_TEST_REG, &temp, 1);
+		temp = 0xFC;
+		ret = pm8058_write(pm8058_chg.pm_chip,
+				PM8058_CHG_TEST, &temp, 1);
+		/* 10 ms sleep is for the VCHG to discharge */
+		msleep(10);
+		temp = 0xF0;
+		ret = pm8058_write(pm8058_chg.pm_chip,
+				PM8058_CHG_TEST, &temp, 1);
+		ret = pm8058_write(pm8058_chg.pm_chip,
+				PM8058_OVP_TEST_REG, &old, 1);
+
+		if (!is_chg_plugged_in()) {
+			msm_charger_notify_event(&usb_hw_chg,
+					CHG_REMOVED_EVENT);
+			pm8058_chg.present = 0;
+		} else {
+			/* was a fake */
+			pm8058_chg_enable_irq(CHGINVAL_IRQ);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_auto_chgdone_handler(int irq, void *dev_id)
+{
+	dev_info(pm8058_chg.dev, "%s waiting a sec to confirm\n",
+		__func__);
+	pm8058_chg_disable_irq(VBATDET_IRQ);
+	if (!delayed_work_pending(&pm8058_chg.chg_done_check_work)) {
+		schedule_delayed_work(&pm8058_chg.chg_done_check_work,
+				      round_jiffies_relative(msecs_to_jiffies
+			     (AUTO_CHARGING_DONE_CHECK_TIME_MS)));
+	}
+	return IRQ_HANDLED;
+}
+
+/* can only happen with the pmic charger when it has been charing
+ * for either 16 mins wating for VEOC or 32 mins for topoff
+ * without a IEOC indication */
+static irqreturn_t pm8058_chg_auto_chgfail_handler(int irq, void *dev_id)
+{
+	pm8058_chg_disable_irq(AUTO_CHGFAIL_IRQ);
+
+	if (pm8058_chg.waiting_for_topoff == 1) {
+		dev_info(pm8058_chg.dev, "%s topoff done, charging done\n",
+			__func__);
+		pm8058_chg.waiting_for_topoff = 0;
+		/* notify we are done charging */
+		msm_charger_notify_event(&usb_hw_chg, CHG_DONE_EVENT);
+	} else {
+		/* start one minute timer and monitor VBATDET_LOW */
+		dev_info(pm8058_chg.dev, "%s monitoring vbat_low for a"
+			"minute\n", __func__);
+		schedule_delayed_work(&pm8058_chg.check_vbat_low_work,
+				      round_jiffies_relative(msecs_to_jiffies
+			     (AUTO_CHARGING_VEOC_VBAT_LOW_CHECK_TIME_MS)));
+
+		/* note we are waiting on veoc */
+		pm8058_chg.waiting_for_veoc = 1;
+
+		pm_chg_vbatdet_set(AUTO_CHARGING_VEOC_VBATDET);
+		pm8058_chg.vbatdet = AUTO_CHARGING_VEOC_VBATDET;
+		pm8058_chg_enable_irq(VBATDET_LOW_IRQ);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_chgstate_handler(int irq, void *dev_id)
+{
+	u8 temp;
+
+	temp = 0x00;
+	if (!pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST_3, &temp, 1)) {
+		pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEST_3, &temp, 1);
+		dev_dbg(pm8058_chg.dev, "%s state=%d\n", __func__, temp);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_fastchg_handler(int irq, void *dev_id)
+{
+	pm8058_chg_disable_irq(FASTCHG_IRQ);
+
+	/* we have begun the fast charging state */
+	dev_info(pm8058_chg.dev, "%s begin fast charging"
+		, __func__);
+	msm_charger_notify_event(&usb_hw_chg, CHG_BATT_BEGIN_FAST_CHARGING);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_batttemp_handler(int irq, void *dev_id)
+{
+	int ret;
+
+	/* we could get temperature
+	 * interrupt when the battery is plugged out
+	 */
+	ret = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ]);
+	if (ret) {
+		msm_charger_notify_event(&usb_hw_chg, CHG_BATT_REMOVED);
+	} else {
+		/* read status to determine we are inrange or outofrange */
+		ret =
+		    pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATTTEMP_IRQ]);
+		if (ret)
+			msm_charger_notify_event(&usb_hw_chg,
+						 CHG_BATT_TEMP_OUTOFRANGE);
+		else
+			msm_charger_notify_event(&usb_hw_chg,
+						 CHG_BATT_TEMP_INRANGE);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_vbatdet_handler(int irq, void *dev_id)
+{
+	int ret;
+
+	/* settling time */
+	msleep(20);
+	ret = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[VBATDET_IRQ]);
+
+	if (ret) {
+		if (pm8058_chg.vbatdet == AUTO_CHARGING_VBATDET
+			&& !delayed_work_pending(&pm8058_chg.veoc_begin_work)) {
+			/*
+			 * we are in constant voltage phase of charging
+			 * IEOC should happen withing 90 mins of this instant
+			 * else we enable VEOC
+			 */
+			dev_info(pm8058_chg.dev, "%s entered constant voltage"
+				"begin veoc timer\n", __func__);
+			schedule_delayed_work(&pm8058_chg.veoc_begin_work,
+				      round_jiffies_relative
+				      (msecs_to_jiffies
+				      (AUTO_CHARGING_VEOC_BEGIN_TIME_MS)));
+		}
+	} else {
+		if (pm8058_chg.vbatdet == AUTO_CHARGING_VEOC_VBATDET) {
+			cancel_delayed_work_sync(
+				&pm8058_chg.check_vbat_low_work);
+
+			if (pm8058_chg.waiting_for_topoff
+			    || pm8058_chg.waiting_for_veoc) {
+				/*
+				 * the battery dropped its voltage below 4100
+				 * around a minute charge the battery for 16
+				 * mins and check vbat again for a minute
+				 */
+				dev_info(pm8058_chg.dev, "%s batt dropped vlt"
+					"within a minute\n", __func__);
+				pm8058_chg.waiting_for_topoff = 0;
+				pm8058_chg.waiting_for_veoc = 1;
+				pm8058_chg_disable_irq(VBATDET_IRQ);
+				__pm8058_start_charging(pm8058_chg.
+						current_charger_current,
+						AUTO_CHARGING_VEOC_ITERM,
+						AUTO_CHARGING_VEOC_TCHG);
+			}
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_batt_replace_handler(int irq, void *dev_id)
+{
+	int ret;
+
+	pm8058_chg_disable_irq(BATT_REPLACE_IRQ);
+	ret = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATT_REPLACE_IRQ]);
+	if (ret) {
+		msm_charger_notify_event(&usb_hw_chg, CHG_BATT_INSERTED);
+		/*
+		 * battery is present enable batt removal
+		 * and batt temperatture interrupt
+		 */
+		pm8058_chg_enable_irq(BATTCONNECT_IRQ);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8058_chg_battconnect_handler(int irq, void *dev_id)
+{
+	int ret;
+
+	ret = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ]);
+	if (ret) {
+		msm_charger_notify_event(&usb_hw_chg, CHG_BATT_REMOVED);
+	} else {
+		msm_charger_notify_event(&usb_hw_chg, CHG_BATT_INSERTED);
+		pm8058_chg_enable_irq(BATTTEMP_IRQ);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int get_rt_status(void *data, u64 * val)
+{
+	int i = (int)data;
+	int ret;
+
+	ret = pm_chg_get_rt_status(i);
+	*val = ret;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rt_fops, get_rt_status, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fsm_fops, get_fsm_status, NULL, "%llu\n");
+
+static void free_irqs(void)
+{
+	int i;
+
+	for (i = 0; i < PMIC_CHG_MAX_INTS; i++)
+		if (pm8058_chg.pmic_chg_irq[i]) {
+			free_irq(pm8058_chg.pmic_chg_irq[i], NULL);
+			pm8058_chg.pmic_chg_irq[i] = 0;
+		}
+}
+
+static int __devinit request_irqs(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	ret = 0;
+	bitmap_fill(pm8058_chg.enabled_irqs, PMIC_CHG_MAX_INTS);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "CHGVAL");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource CHGVAL\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_chgval_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[CHGVAL_IRQ] = res->start;
+			pm8058_chg_disable_irq(CHGVAL_IRQ);
+			enable_irq_wake(pm8058_chg.pmic_chg_irq[CHGVAL_IRQ]);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "CHGINVAL");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource CHGINVAL\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_chginval_handler,
+				  IRQF_TRIGGER_RISING, res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[CHGINVAL_IRQ] = res->start;
+			pm8058_chg_disable_irq(CHGINVAL_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   "AUTO_CHGDONE");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource AUTO_CHGDONE\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_auto_chgdone_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[AUTO_CHGDONE_IRQ] = res->start;
+			pm8058_chg_disable_irq(AUTO_CHGDONE_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   "AUTO_CHGFAIL");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource AUTO_CHGFAIL\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_auto_chgfail_handler,
+				  IRQF_TRIGGER_RISING, res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[AUTO_CHGFAIL_IRQ] = res->start;
+			pm8058_chg_disable_irq(AUTO_CHGFAIL_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "CHGSTATE");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource CHGSTATE\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_chgstate_handler,
+				  IRQF_TRIGGER_RISING, res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[CHGSTATE_IRQ] = res->start;
+			pm8058_chg_disable_irq(CHGSTATE_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "FASTCHG");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource FASTCHG\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_fastchg_handler,
+				  IRQF_TRIGGER_RISING, res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[FASTCHG_IRQ] = res->start;
+			pm8058_chg_disable_irq(FASTCHG_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "BATTTEMP");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource CHG_END\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_batttemp_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[BATTTEMP_IRQ] = res->start;
+			pm8058_chg_disable_irq(BATTTEMP_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					   "BATT_REPLACE");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource BATT_REPLACE\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_batt_replace_handler,
+				  IRQF_TRIGGER_RISING, res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[BATT_REPLACE_IRQ] = res->start;
+			pm8058_chg_disable_irq(BATT_REPLACE_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "BATTCONNECT");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource BATTCONNECT\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_battconnect_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ] = res->start;
+			pm8058_chg_disable_irq(BATTCONNECT_IRQ);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "VBATDET");
+	if (res == NULL) {
+		dev_err(pm8058_chg.dev,
+			"%s:couldnt find resource VBATDET\n", __func__);
+		goto err_out;
+	} else {
+		ret = request_threaded_irq(res->start, NULL,
+				  pm8058_chg_vbatdet_handler,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  res->name, NULL);
+		if (ret < 0) {
+			dev_err(pm8058_chg.dev, "%s:couldnt request %d %d\n",
+				__func__, res->start, ret);
+			goto err_out;
+		} else {
+			pm8058_chg.pmic_chg_irq[VBATDET_IRQ] = res->start;
+			pm8058_chg_disable_irq(VBATDET_IRQ);
+		}
+	}
+
+	return 0;
+
+err_out:
+	free_irqs();
+	return -EINVAL;
+}
+
+static int pm8058_get_charge_batt(void)
+{
+	u8 temp;
+	int rc;
+
+	rc = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+	if (rc)
+		return rc;
+
+	temp &= BIT(CHG_CHARGE_BAT);
+	if (temp)
+		temp = 1;
+	return temp;
+}
+EXPORT_SYMBOL(pm8058_get_charge_batt);
+
+static int pm8058_set_charge_batt(int on)
+{
+	u8 temp;
+	int rc;
+
+	rc = pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+	if (rc)
+		return rc;
+	if (on)
+		temp |= BIT(CHG_CHARGE_BAT);
+	else
+		temp &= ~BIT(CHG_CHARGE_BAT);
+
+	return pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_CNTRL, &temp, 1);
+
+}
+EXPORT_SYMBOL(pm8058_set_charge_batt);
+
+static int get_charge_batt(void *data, u64 * val)
+{
+	int ret;
+
+	ret = pm8058_get_charge_batt();
+	if (ret < 0)
+		return ret;
+
+	*val = ret;
+	return 0;
+}
+
+static int set_charge_batt(void *data, u64 val)
+{
+	return pm8058_set_charge_batt(val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(fet_fops, get_charge_batt, set_charge_batt, "%llu\n");
+
+static void pm8058_chg_determine_initial_state(void)
+{
+	if (is_chg_plugged_in()) {
+		pm8058_chg.present = 1;
+		msm_charger_notify_event(&usb_hw_chg, CHG_INSERTED_EVENT);
+		dev_info(pm8058_chg.dev, "%s charger present\n", __func__);
+	} else {
+		pm8058_chg.present = 0;
+		dev_info(pm8058_chg.dev, "%s charger absent\n", __func__);
+	}
+	pm8058_chg_enable_irq(CHGVAL_IRQ);
+}
+
+static int pm8058_stop_charging(struct msm_hardware_charger *hw_chg)
+{
+	int ret;
+
+	dev_info(pm8058_chg.dev, "%s stopping charging\n", __func__);
+	cancel_delayed_work_sync(&pm8058_chg.veoc_begin_work);
+	cancel_delayed_work_sync(&pm8058_chg.check_vbat_low_work);
+	cancel_delayed_work_sync(&pm8058_chg.chg_done_check_work);
+	cancel_delayed_work_sync(&pm8058_chg.charging_check_work);
+
+	ret = pm_chg_get_rt_status(pm8058_chg.pmic_chg_irq[FASTCHG_IRQ]);
+	if (ret == 1)
+		pm_chg_suspend(1);
+	else
+		dev_err(pm8058_chg.dev,
+			"%s called when not fast-charging\n", __func__);
+
+	pm_chg_failed_clear(1);
+
+	pm8058_chg.waiting_for_veoc = 0;
+	pm8058_chg.waiting_for_topoff = 0;
+
+	/* disable the irqs enabled while charging */
+	pm8058_chg_disable_irq(AUTO_CHGFAIL_IRQ);
+	pm8058_chg_disable_irq(CHGHOT_IRQ);
+	pm8058_chg_disable_irq(AUTO_CHGDONE_IRQ);
+	pm8058_chg_disable_irq(FASTCHG_IRQ);
+	pm8058_chg_disable_irq(CHG_END_IRQ);
+	pm8058_chg_disable_irq(VBATDET_IRQ);
+	pm8058_chg_disable_irq(VBATDET_LOW_IRQ);
+	if (pm8058_chg.voter)
+		msm_xo_mode_vote(pm8058_chg.voter, MSM_XO_MODE_OFF);
+
+	return 0;
+}
+
+static int get_status(void *data, u64 * val)
+{
+	*val = pm8058_chg.current_charger_current;
+	return 0;
+}
+
+static int set_status(void *data, u64 val)
+{
+
+	pm8058_chg.current_charger_current = val;
+	if (pm8058_chg.current_charger_current)
+		pm8058_start_charging(NULL,
+			AUTO_CHARGING_VMAXSEL,
+			pm8058_chg.current_charger_current);
+	else
+		pm8058_stop_charging(NULL);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(chg_fops, get_status, set_status, "%llu\n");
+
+static int set_disable_status_param(const char *val, struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_int(val, kp);
+	if (ret)
+		return ret;
+
+	if (pm8058_chg.inited && pm8058_chg.disabled) {
+		/*
+		 * stop_charging is called during usb suspend
+		 * act as the usb is removed by disabling auto and enum
+		 */
+		pm_chg_enum_done_enable(0);
+		pm_chg_auto_disable(1);
+		pm8058_stop_charging(NULL);
+	}
+	return 0;
+}
+module_param_call(disabled, set_disable_status_param, param_get_uint,
+					&(pm8058_chg.disabled), 0644);
+
+static int pm8058_charging_switched(struct msm_hardware_charger *hw_chg)
+{
+	u8 temp;
+
+	temp = 0xA3;
+	pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST_2, &temp, 1);
+	temp = 0x84;
+	pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST_2, &temp, 1);
+	msleep(2);
+	temp = 0x80;
+	pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST_2, &temp, 1);
+	return 0;
+}
+
+static int get_reg(void *data, u64 * val)
+{
+	int i = (int)data;
+	int ret;
+	u8 temp;
+
+	ret = pm8058_read(pm8058_chg.pm_chip, i, &temp, 1);
+	if (ret)
+		return -EAGAIN;
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	int i = (int)data;
+	int ret;
+	u8 temp;
+
+	temp = (u8) val;
+	ret = pm8058_write(pm8058_chg.pm_chip, i, &temp, 1);
+	mb();
+	if (ret)
+		return -EAGAIN;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reg_fops, get_reg, set_reg, "%llu\n");
+
+#ifdef CONFIG_DEBUG_FS
+static void create_debugfs_entries(void)
+{
+	pm8058_chg.dent = debugfs_create_dir("pm8058_usb_chg", NULL);
+
+	if (IS_ERR(pm8058_chg.dent)) {
+		pr_err("pmic charger couldnt create debugfs dir\n");
+		return;
+	}
+
+	debugfs_create_file("CHG_CNTRL", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_CNTRL, &reg_fops);
+	debugfs_create_file("CHG_CNTRL_2", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_CNTRL_2, &reg_fops);
+	debugfs_create_file("CHG_VMAX_SEL", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_VMAX_SEL, &reg_fops);
+	debugfs_create_file("CHG_VBAT_DET", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_VBAT_DET, &reg_fops);
+	debugfs_create_file("CHG_IMAX", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_IMAX, &reg_fops);
+	debugfs_create_file("CHG_TRICKLE", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_TRICKLE, &reg_fops);
+	debugfs_create_file("CHG_ITERM", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_ITERM, &reg_fops);
+	debugfs_create_file("CHG_TTRKL_MAX", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_TTRKL_MAX, &reg_fops);
+	debugfs_create_file("CHG_TCHG_MAX", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_TCHG_MAX, &reg_fops);
+	debugfs_create_file("CHG_TEMP_THRESH", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_TEMP_THRESH, &reg_fops);
+	debugfs_create_file("CHG_TEMP_REG", 0644, pm8058_chg.dent,
+			    (void *)PM8058_CHG_TEMP_REG, &reg_fops);
+
+	debugfs_create_file("FSM_STATE", 0644, pm8058_chg.dent, NULL,
+			    &fsm_fops);
+
+	debugfs_create_file("stop", 0644, pm8058_chg.dent, NULL,
+			    &chg_fops);
+
+	if (pm8058_chg.pmic_chg_irq[CHGVAL_IRQ])
+		debugfs_create_file("CHGVAL", 0444, pm8058_chg.dent,
+				    (void *)pm8058_chg.pmic_chg_irq[CHGVAL_IRQ],
+				    &rt_fops);
+
+	if (pm8058_chg.pmic_chg_irq[CHGINVAL_IRQ])
+		debugfs_create_file("CHGINVAL", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHGINVAL_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHGILIM_IRQ])
+		debugfs_create_file("CHGILIM", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHGILIM_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[VCP_IRQ])
+		debugfs_create_file("VCP", 0444, pm8058_chg.dent,
+				    (void *)pm8058_chg.pmic_chg_irq[VCP_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[ATC_DONE_IRQ])
+		debugfs_create_file("ATC_DONE", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[ATC_DONE_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[ATCFAIL_IRQ])
+		debugfs_create_file("ATCFAIL", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[ATCFAIL_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[AUTO_CHGDONE_IRQ])
+		debugfs_create_file("AUTO_CHGDONE", 0444, pm8058_chg.dent,
+				    (void *)
+				    pm8058_chg.pmic_chg_irq[AUTO_CHGDONE_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[AUTO_CHGFAIL_IRQ])
+		debugfs_create_file("AUTO_CHGFAIL", 0444, pm8058_chg.dent,
+				    (void *)
+				    pm8058_chg.pmic_chg_irq[AUTO_CHGFAIL_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHGSTATE_IRQ])
+		debugfs_create_file("CHGSTATE", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHGSTATE_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[FASTCHG_IRQ])
+		debugfs_create_file("FASTCHG", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[FASTCHG_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHG_END_IRQ])
+		debugfs_create_file("CHG_END", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHG_END_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[BATTTEMP_IRQ])
+		debugfs_create_file("BATTTEMP", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[BATTTEMP_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHGHOT_IRQ])
+		debugfs_create_file("CHGHOT", 0444, pm8058_chg.dent,
+				    (void *)pm8058_chg.pmic_chg_irq[CHGHOT_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHGTLIMIT_IRQ])
+		debugfs_create_file("CHGTLIMIT", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHGTLIMIT_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[CHG_GONE_IRQ])
+		debugfs_create_file("CHG_GONE", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[CHG_GONE_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[VCPMAJOR_IRQ])
+		debugfs_create_file("VCPMAJOR", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[VCPMAJOR_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[BATFET_IRQ])
+		debugfs_create_file("BATFET", 0444, pm8058_chg.dent,
+				    (void *)pm8058_chg.pmic_chg_irq[BATFET_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[BATT_REPLACE_IRQ])
+		debugfs_create_file("BATT_REPLACE", 0444, pm8058_chg.dent,
+				    (void *)
+				    pm8058_chg.pmic_chg_irq[BATT_REPLACE_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ])
+		debugfs_create_file("BATTCONNECT", 0444, pm8058_chg.dent,
+				    (void *)
+				    pm8058_chg.pmic_chg_irq[BATTCONNECT_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[VBATDET_IRQ])
+		debugfs_create_file("VBATDET", 0444, pm8058_chg.dent, (void *)
+				    pm8058_chg.pmic_chg_irq[VBATDET_IRQ],
+				    &rt_fops);
+	if (pm8058_chg.pmic_chg_irq[VBATDET_LOW_IRQ])
+		debugfs_create_file("VBATDET_LOW", 0444, pm8058_chg.dent,
+				    (void *)
+				    pm8058_chg.pmic_chg_irq[VBATDET_LOW_IRQ],
+				    &rt_fops);
+	debugfs_create_file("CHARGE_BATT", 0444, pm8058_chg.dent,
+				    NULL,
+				    &fet_fops);
+}
+#else
+static inline void create_debugfs_entries(void)
+{
+}
+#endif
+
+static void remove_debugfs_entries(void)
+{
+	debugfs_remove_recursive(pm8058_chg.dent);
+}
+
+static struct msm_hardware_charger usb_hw_chg = {
+	.type = CHG_TYPE_USB,
+	.rating = 1,
+	.name = "pm8058-usb",
+	.start_charging = pm8058_start_charging,
+	.stop_charging = pm8058_stop_charging,
+	.charging_switched = pm8058_charging_switched,
+};
+
+static int batt_read_adc(int channel, int *mv_reading)
+{
+	int ret;
+	void *h;
+	struct adc_chan_result adc_chan_result;
+	struct completion  conv_complete_evt;
+
+	pr_debug("%s: called for %d\n", __func__, channel);
+	ret = adc_channel_open(channel, &h);
+	if (ret) {
+		pr_err("%s: couldnt open channel %d ret=%d\n",
+					__func__, channel, ret);
+		goto out;
+	}
+	init_completion(&conv_complete_evt);
+	ret = adc_channel_request_conv(h, &conv_complete_evt);
+	if (ret) {
+		pr_err("%s: couldnt request conv channel %d ret=%d\n",
+						__func__, channel, ret);
+		goto out;
+	}
+	wait_for_completion(&conv_complete_evt);
+	ret = adc_channel_read_result(h, &adc_chan_result);
+	if (ret) {
+		pr_err("%s: couldnt read result channel %d ret=%d\n",
+						__func__, channel, ret);
+		goto out;
+	}
+	ret = adc_channel_close(h);
+	if (ret) {
+		pr_err("%s: couldnt close channel %d ret=%d\n",
+						__func__, channel, ret);
+	}
+	if (mv_reading)
+		*mv_reading = adc_chan_result.measurement;
+
+	pr_debug("%s: done for %d\n", __func__, channel);
+	return adc_chan_result.physical;
+out:
+	pr_debug("%s: done for %d\n", __func__, channel);
+	return -EINVAL;
+
+}
+
+#define BATT_THERM_OPEN_MV  2000
+static int pm8058_is_battery_present(void)
+{
+	int mv_reading;
+
+	mv_reading = 0;
+	batt_read_adc(CHANNEL_ADC_BATT_THERM, &mv_reading);
+	pr_debug("%s: therm_raw is %d\n", __func__, mv_reading);
+	if (mv_reading > 0 && mv_reading < BATT_THERM_OPEN_MV)
+		return 1;
+
+	return 0;
+}
+
+static int pm8058_get_battery_temperature(void)
+{
+	return batt_read_adc(CHANNEL_ADC_BATT_THERM, NULL);
+}
+
+#define BATT_THERM_OPERATIONAL_MAX_CELCIUS 40
+#define BATT_THERM_OPERATIONAL_MIN_CELCIUS 0
+static int pm8058_is_battery_temp_within_range(void)
+{
+	int therm_celcius;
+
+	therm_celcius = pm8058_get_battery_temperature();
+	pr_debug("%s: therm_celcius is %d\n", __func__, therm_celcius);
+	if (therm_celcius > 0
+		&& therm_celcius > BATT_THERM_OPERATIONAL_MIN_CELCIUS
+		&& therm_celcius < BATT_THERM_OPERATIONAL_MAX_CELCIUS)
+		return 1;
+
+	return 0;
+}
+
+#define BATT_ID_MAX_MV  800
+#define BATT_ID_MIN_MV  600
+static int pm8058_is_battery_id_valid(void)
+{
+	int batt_id_mv;
+
+	batt_id_mv = batt_read_adc(CHANNEL_ADC_BATT_ID, NULL);
+	pr_debug("%s: batt_id_mv is %d\n", __func__, batt_id_mv);
+
+	/*
+	 * The readings are not in range
+	 * assume battery is present for now
+	 */
+	return 1;
+
+	if (batt_id_mv > 0
+		&& batt_id_mv > BATT_ID_MIN_MV
+		&& batt_id_mv < BATT_ID_MAX_MV)
+		return 1;
+
+	return 0;
+}
+
+/* returns voltage in mV */
+static int pm8058_get_battery_mvolts(void)
+{
+	int vbatt_mv;
+
+	vbatt_mv = batt_read_adc(CHANNEL_ADC_VBATT, NULL);
+	pr_debug("%s: vbatt_mv is %d\n", __func__, vbatt_mv);
+	if (vbatt_mv > 0)
+		return vbatt_mv;
+	/*
+	 * return 0 to tell the upper layers
+	 * we couldnt read the battery voltage
+	 */
+	return 0;
+}
+
+static int msm_battery_gauge_alarm_notify(struct notifier_block *nb,
+		unsigned long status, void *unused)
+{
+	int rc;
+
+	pr_info("%s: status: %lu\n", __func__, status);
+
+	switch (status) {
+	case 0:
+		dev_err(pm8058_chg.dev,
+			"%s: spurious interrupt\n", __func__);
+		break;
+	/* expected case - trip of low threshold */
+	case 1:
+		rc = pm8058_batt_alarm_state_set(0, 0);
+		if (rc)
+			dev_err(pm8058_chg.dev,
+				"%s: unable to set alarm state\n", __func__);
+		msm_charger_notify_event(NULL, CHG_BATT_NEEDS_RECHARGING);
+		break;
+	case 2:
+		dev_err(pm8058_chg.dev,
+			"%s: trip of high threshold\n", __func__);
+		break;
+	default:
+		dev_err(pm8058_chg.dev,
+			"%s: error received\n", __func__);
+	};
+
+	return 0;
+}
+
+static int pm8058_monitor_for_recharging(void)
+{
+	/* enable low comparator */
+	return pm8058_batt_alarm_state_set(1, 0);
+}
+
+static struct msm_battery_gauge pm8058_batt_gauge = {
+	.get_battery_mvolts = pm8058_get_battery_mvolts,
+	.get_battery_temperature = pm8058_get_battery_temperature,
+	.is_battery_present = pm8058_is_battery_present,
+	.is_battery_temp_within_range = pm8058_is_battery_temp_within_range,
+	.is_battery_id_valid = pm8058_is_battery_id_valid,
+	.monitor_for_recharging = pm8058_monitor_for_recharging,
+};
+
+static int pm8058_usb_voltage_lower_limit(void)
+{
+	u8 temp, old;
+	int ret = 0;
+
+	temp = 0x10;
+	ret |= pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST, &temp, 1);
+	ret |= pm8058_read(pm8058_chg.pm_chip, PM8058_CHG_TEST, &old, 1);
+	old = old & ~BIT(IGNORE_LL);
+	temp = 0x90  | (0xF & old);
+	ret |= pm8058_write(pm8058_chg.pm_chip, PM8058_CHG_TEST, &temp, 1);
+
+	return ret;
+}
+
+static int __devinit pm8058_charger_probe(struct platform_device *pdev)
+{
+	struct pm8058_chip *pm_chip;
+	int rc = 0;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s:no parent data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	pm8058_chg.pm_chip = pm_chip;
+	pm8058_chg.pdata = pdev->dev.platform_data;
+	pm8058_chg.dev = &pdev->dev;
+
+	rc = request_irqs(pdev);
+	if (rc) {
+		pr_err("%s: couldnt register interrupts\n", __func__);
+		goto out;
+	}
+
+	rc = pm8058_usb_voltage_lower_limit();
+	if (rc) {
+		pr_err("%s: couldnt set ignore lower limit bit to 0\n",
+								__func__);
+		goto free_irq;
+	}
+
+	rc = msm_charger_register(&usb_hw_chg);
+	if (rc) {
+		pr_err("%s: msm_charger_register failed ret=%d\n",
+							__func__, rc);
+		goto free_irq;
+	}
+
+	pm_chg_batt_temp_disable(0);
+	msm_battery_gauge_register(&pm8058_batt_gauge);
+	__dump_chg_regs();
+
+	create_debugfs_entries();
+	INIT_DELAYED_WORK(&pm8058_chg.veoc_begin_work, veoc_begin_work);
+	INIT_DELAYED_WORK(&pm8058_chg.check_vbat_low_work, vbat_low_work);
+	INIT_DELAYED_WORK(&pm8058_chg.chg_done_check_work, chg_done_check_work);
+	INIT_DELAYED_WORK(&pm8058_chg.charging_check_work, charging_check_work);
+
+	/* determine what state the charger is in */
+	pm8058_chg_determine_initial_state();
+
+	pm8058_chg_enable_irq(BATTTEMP_IRQ);
+	pm8058_chg_enable_irq(BATTCONNECT_IRQ);
+
+	rc = pm8058_batt_alarm_state_set(0, 0);
+	if (rc) {
+		pr_err("%s: unable to set batt alarm state\n", __func__);
+		goto free_irq;
+	}
+
+	/*
+	 * The batt-alarm driver requires sane values for both min / max,
+	 * regardless of whether they're both activated.
+	 */
+	rc = pm8058_batt_alarm_threshold_set(resume_mv, 4300);
+	if (rc) {
+		pr_err("%s: unable to set batt alarm threshold\n", __func__);
+		goto free_irq;
+	}
+
+	rc = pm8058_batt_alarm_hold_time_set(PM8058_BATT_ALARM_HOLD_TIME_16_MS);
+	if (rc) {
+		pr_err("%s: unable to set batt alarm hold time\n", __func__);
+		goto free_irq;
+	}
+
+	/* PWM enabled at 2Hz */
+	rc = pm8058_batt_alarm_pwm_rate_set(1, 7, 4);
+	if (rc) {
+		pr_err("%s: unable to set batt alarm pwm rate\n", __func__);
+		goto free_irq;
+	}
+
+	rc = pm8058_batt_alarm_register_notifier(&alarm_notifier);
+	if (rc) {
+		pr_err("%s: unable to register alarm notifier\n", __func__);
+		goto free_irq;
+	}
+
+	pm8058_chg.inited = 1;
+
+	return 0;
+
+free_irq:
+	free_irqs();
+out:
+	return rc;
+}
+
+static int __devexit pm8058_charger_remove(struct platform_device *pdev)
+{
+	struct pm8058_charger_chip *chip = platform_get_drvdata(pdev);
+	int rc;
+
+	msm_charger_notify_event(&usb_hw_chg, CHG_REMOVED_EVENT);
+	msm_charger_unregister(&usb_hw_chg);
+	cancel_delayed_work_sync(&pm8058_chg.veoc_begin_work);
+	cancel_delayed_work_sync(&pm8058_chg.check_vbat_low_work);
+	cancel_delayed_work_sync(&pm8058_chg.charging_check_work);
+	free_irqs();
+	remove_debugfs_entries();
+	kfree(chip);
+
+	rc = pm8058_batt_alarm_state_set(0, 0);
+	if (rc)
+		pr_err("%s: unable to set batt alarm state\n", __func__);
+
+	rc |= pm8058_batt_alarm_unregister_notifier(&alarm_notifier);
+	if (rc)
+		pr_err("%s: unable to register alarm notifier\n", __func__);
+	return rc;
+}
+
+static struct platform_driver pm8058_charger_driver = {
+	.probe = pm8058_charger_probe,
+	.remove = __devexit_p(pm8058_charger_remove),
+	.driver = {
+		   .name = "pm8058-charger",
+		   .owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_charger_init(void)
+{
+	return platform_driver_register(&pm8058_charger_driver);
+}
+
+static void __exit pm8058_charger_exit(void)
+{
+	platform_driver_unregister(&pm8058_charger_driver);
+}
+
+late_initcall(pm8058_charger_init);
+module_exit(pm8058_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 BATTERY driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8058_charger");
diff --git a/drivers/power/qci_battery.c b/drivers/power/qci_battery.c
new file mode 100644
index 0000000..724bcba
--- /dev/null
+++ b/drivers/power/qci_battery.c
@@ -0,0 +1,662 @@
+/* Quanta I2C Battery Driver
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ *
+ *  The Driver with I/O communications via the I2C Interface for ST15 platform.
+ *  And it is only working on the nuvoTon WPCE775x Embedded Controller.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/wpce775x.h>
+#include <linux/delay.h>
+
+#include "qci_battery.h"
+
+#define QCIBAT_DEFAULT_CHARGE_FULL_CAPACITY 2200 /* 2200 mAh */
+#define QCIBAT_DEFAULT_CHARGE_FULL_DESIGN   2200
+#define QCIBAT_DEFAULT_VOLTAGE_DESIGN      10800 /* 10.8 V */
+#define QCIBAT_STRING_SIZE 16
+
+/* General structure to hold the driver data */
+struct i2cbat_drv_data {
+	struct i2c_client *bi2c_client;
+	struct work_struct work;
+	unsigned int qcibat_irq;
+	unsigned int qcibat_gpio;
+	u8 battery_state;
+	u8 battery_dev_name[QCIBAT_STRING_SIZE];
+	u8 serial_number[QCIBAT_STRING_SIZE];
+	u8 manufacturer_name[QCIBAT_STRING_SIZE];
+	unsigned int charge_full;
+	unsigned int charge_full_design;
+	unsigned int voltage_full_design;
+	unsigned int energy_full;
+};
+
+static struct i2cbat_drv_data context;
+static struct mutex qci_i2c_lock;
+static struct mutex qci_transaction_lock;
+/*********************************************************************
+ *		Power
+ *********************************************************************/
+
+static int get_bat_info(u8 ec_data)
+{
+	u8 byte_read;
+
+	mutex_lock(&qci_i2c_lock);
+	i2c_smbus_write_byte(context.bi2c_client, ec_data);
+	byte_read = i2c_smbus_read_byte(context.bi2c_client);
+	mutex_unlock(&qci_i2c_lock);
+	return byte_read;
+}
+
+static int qci_ac_get_prop(struct power_supply *psy,
+			    enum power_supply_property psp,
+			    union power_supply_propval *val)
+{
+	int ret = 0;
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		if (get_bat_info(ECRAM_POWER_SOURCE) & EC_FLAG_ADAPTER_IN)
+			val->intval =  EC_ADAPTER_PRESENT;
+		else
+			val->intval =  EC_ADAPTER_NOT_PRESENT;
+	break;
+	default:
+		ret = -EINVAL;
+	break;
+	}
+	return ret;
+}
+
+static enum power_supply_property qci_ac_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property qci_bat_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_ENERGY_NOW,
+	POWER_SUPPLY_PROP_ENERGY_FULL,
+	POWER_SUPPLY_PROP_ENERGY_EMPTY,
+};
+
+static int read_data_from_battery(u8 smb_cmd, u8 smb_prtcl)
+{
+	if (context.battery_state & MAIN_BATTERY_STATUS_BAT_IN)	{
+		mutex_lock(&qci_i2c_lock);
+		i2c_smbus_write_byte_data(context.bi2c_client,
+					  ECRAM_SMB_STS, 0);
+		i2c_smbus_write_byte_data(context.bi2c_client, ECRAM_SMB_ADDR,
+					  BATTERY_SLAVE_ADDRESS);
+		i2c_smbus_write_byte_data(context.bi2c_client,
+					  ECRAM_SMB_CMD, smb_cmd);
+		i2c_smbus_write_byte_data(context.bi2c_client,
+					  ECRAM_SMB_PRTCL, smb_prtcl);
+		mutex_unlock(&qci_i2c_lock);
+		msleep(100);
+		return get_bat_info(ECRAM_SMB_STS);
+	} else
+		return SMBUS_DEVICE_NOACK;
+}
+
+static int qbat_get_status(union power_supply_propval *val)
+{
+	int status;
+
+	status = get_bat_info(ECRAM_BATTERY_STATUS);
+
+	if ((status & MAIN_BATTERY_STATUS_BAT_IN) == 0x0)
+		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+	else if (status & MAIN_BATTERY_STATUS_BAT_CHARGING)
+		val->intval = POWER_SUPPLY_STATUS_CHARGING;
+	else if (status & MAIN_BATTERY_STATUS_BAT_FULL)
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+	else if (status & MAIN_BATTERY_STATUS_BAT_DISCHRG)
+		val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+	else
+		val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+	return 0;
+}
+
+static int qbat_get_present(union power_supply_propval *val)
+{
+	if (context.battery_state & MAIN_BATTERY_STATUS_BAT_IN)
+		val->intval = EC_BAT_PRESENT;
+	else
+		val->intval = EC_BAT_NOT_PRESENT;
+	return 0;
+}
+
+static int qbat_get_health(union power_supply_propval *val)
+{
+	u8 health;
+
+	health = get_bat_info(ECRAM_CHARGER_ALARM);
+	if (!(context.battery_state & MAIN_BATTERY_STATUS_BAT_IN))
+		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+	else if (health & ALARM_OVER_TEMP)
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (health & ALARM_REMAIN_CAPACITY)
+		val->intval = POWER_SUPPLY_HEALTH_DEAD;
+	else if (health & ALARM_OVER_CHARGE)
+		val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	else
+		val->intval = POWER_SUPPLY_HEALTH_GOOD;
+	return 0;
+}
+
+static int qbat_get_voltage_avg(union power_supply_propval *val)
+{
+	val->intval = (get_bat_info(ECRAM_BATTERY_VOLTAGE_MSB) << 8 |
+		       get_bat_info(ECRAM_BATTERY_VOLTAGE_LSB)) * 1000;
+	return 0;
+}
+
+static int qbat_get_current_avg(union power_supply_propval *val)
+{
+	val->intval = (get_bat_info(ECRAM_BATTERY_CURRENT_MSB) << 8 |
+		       get_bat_info(ECRAM_BATTERY_CURRENT_LSB));
+	return 0;
+}
+
+static int qbat_get_capacity(union power_supply_propval *val)
+{
+	if (!(context.battery_state & MAIN_BATTERY_STATUS_BAT_IN))
+		val->intval = 0xFF;
+	else
+		val->intval = get_bat_info(ECRAM_BATTERY_CAPACITY);
+	return 0;
+}
+
+static int qbat_get_temp_avg(union power_supply_propval *val)
+{
+	int temp;
+	int rc = 0;
+
+	if (!(context.battery_state & MAIN_BATTERY_STATUS_BAT_IN)) {
+		val->intval = 0xFFFF;
+		rc = -ENODATA;
+	} else {
+		temp = (get_bat_info(ECRAM_BATTERY_TEMP_MSB) << 8) |
+			get_bat_info(ECRAM_BATTERY_TEMP_LSB);
+		val->intval = (temp - 2730) / 10;
+	}
+	return rc;
+}
+
+static int qbat_get_charge_full_design(union power_supply_propval *val)
+{
+	val->intval = context.charge_full_design;
+	return 0;
+}
+
+static int qbat_get_charge_full(union power_supply_propval *val)
+{
+	val->intval = context.charge_full;
+	return 0;
+}
+
+static int qbat_get_charge_counter(union power_supply_propval *val)
+{
+	u16 charge = 0;
+	int rc = 0;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_CYCLE_COUNT,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		charge = get_bat_info(ECRAM_SMB_DATA1);
+		charge = charge << 8;
+		charge |= get_bat_info(ECRAM_SMB_DATA0);
+	} else
+		rc = -ENODATA;
+	mutex_unlock(&qci_transaction_lock);
+	val->intval = charge;
+	return rc;
+}
+
+static int qbat_get_time_empty_avg(union power_supply_propval *val)
+{
+	u16 avg = 0;
+	int rc = 0;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_AVERAGE_TIME_TO_EMPTY,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		avg = get_bat_info(ECRAM_SMB_DATA1);
+		avg = avg << 8;
+		avg |= get_bat_info(ECRAM_SMB_DATA0);
+	} else
+		rc = -ENODATA;
+	mutex_unlock(&qci_transaction_lock);
+	val->intval = avg;
+	return rc;
+}
+
+static int qbat_get_time_full_avg(union power_supply_propval *val)
+{
+	u16 avg = 0;
+	int rc = 0;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_AVERAGE_TIME_TO_FULL,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		avg = get_bat_info(ECRAM_SMB_DATA1);
+		avg = avg << 8;
+		avg |= get_bat_info(ECRAM_SMB_DATA0);
+	} else
+		rc = -ENODATA;
+	mutex_unlock(&qci_transaction_lock);
+	val->intval = avg;
+	return rc;
+}
+
+static int qbat_get_model_name(union power_supply_propval *val)
+{
+	unsigned char i, size;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_DEVICE_NAME,
+				   SMBUS_READ_BLOCK_PRTCL) == SMBUS_DONE) {
+		size = min(get_bat_info(ECRAM_SMB_BCNT), QCIBAT_STRING_SIZE);
+		for (i = 0; i < size; i++) {
+			context.battery_dev_name[i] =
+				get_bat_info(ECRAM_SMB_DATA_START + i);
+		}
+		val->strval = context.battery_dev_name;
+	} else
+		val->strval = "Unknown";
+	mutex_unlock(&qci_transaction_lock);
+	return 0;
+}
+
+static int qbat_get_manufacturer_name(union power_supply_propval *val)
+{
+	val->strval = context.manufacturer_name;
+	return 0;
+}
+
+static int qbat_get_serial_number(union power_supply_propval *val)
+{
+	val->strval = context.serial_number;
+	return 0;
+}
+
+static int qbat_get_technology(union power_supply_propval *val)
+{
+	val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+	return 0;
+}
+
+static int qbat_get_energy_now(union power_supply_propval *val)
+{
+	if (!(get_bat_info(ECRAM_BATTERY_STATUS) & MAIN_BATTERY_STATUS_BAT_IN))
+		val->intval = 0;
+	else
+		val->intval = (get_bat_info(ECRAM_BATTERY_CAPACITY) *
+			       context.energy_full) / 100;
+	return 0;
+}
+
+static int qbat_get_energy_full(union power_supply_propval *val)
+{
+	val->intval = context.energy_full;
+	return 0;
+}
+
+static int qbat_get_energy_empty(union power_supply_propval *val)
+{
+	val->intval = 0;
+	return 0;
+}
+
+static void qbat_init_get_charge_full(void)
+{
+	u16 charge = QCIBAT_DEFAULT_CHARGE_FULL_CAPACITY;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_FULL_CAPACITY,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		charge = get_bat_info(ECRAM_SMB_DATA1);
+		charge = charge << 8;
+		charge |= get_bat_info(ECRAM_SMB_DATA0);
+	}
+	mutex_unlock(&qci_transaction_lock);
+	context.charge_full = charge;
+}
+
+static void qbat_init_get_charge_full_design(void)
+{
+	u16 charge = QCIBAT_DEFAULT_CHARGE_FULL_DESIGN;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_DESIGN_CAPACITY,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		charge = get_bat_info(ECRAM_SMB_DATA1);
+		charge = charge << 8;
+		charge |= get_bat_info(ECRAM_SMB_DATA0);
+	}
+	mutex_unlock(&qci_transaction_lock);
+	context.charge_full_design = charge;
+}
+
+static void qbat_init_get_voltage_full_design(void)
+{
+	u16 voltage = QCIBAT_DEFAULT_VOLTAGE_DESIGN;
+
+	mutex_lock(&qci_transaction_lock);
+	if (read_data_from_battery(BATTERY_DESIGN_VOLTAGE,
+				   SMBUS_READ_WORD_PRTCL) == SMBUS_DONE) {
+		voltage = get_bat_info(ECRAM_SMB_DATA1);
+		voltage = voltage << 8;
+		voltage |= get_bat_info(ECRAM_SMB_DATA0);
+	}
+	mutex_unlock(&qci_transaction_lock);
+	context.voltage_full_design = voltage;
+}
+
+static void qbat_init_get_manufacturer_name(void)
+{
+	u8 size;
+	u8 i;
+	int rc;
+
+	mutex_lock(&qci_transaction_lock);
+	rc = read_data_from_battery(BATTERY_MANUFACTURE_NAME,
+				    SMBUS_READ_BLOCK_PRTCL);
+	if (rc == SMBUS_DONE) {
+		size = min(get_bat_info(ECRAM_SMB_BCNT), QCIBAT_STRING_SIZE);
+		for (i = 0; i < size; i++) {
+			context.manufacturer_name[i] =
+				get_bat_info(ECRAM_SMB_DATA_START + i);
+		}
+	} else
+		strcpy(context.manufacturer_name, "Unknown");
+	mutex_unlock(&qci_transaction_lock);
+}
+
+static void qbat_init_get_serial_number(void)
+{
+	u8 size;
+	u8 i;
+	int rc;
+
+	mutex_lock(&qci_transaction_lock);
+	rc = read_data_from_battery(BATTERY_SERIAL_NUMBER,
+				    SMBUS_READ_BLOCK_PRTCL);
+	if (rc == SMBUS_DONE) {
+		size = min(get_bat_info(ECRAM_SMB_BCNT), QCIBAT_STRING_SIZE);
+		for (i = 0; i < size; i++) {
+			context.serial_number[i] =
+				get_bat_info(ECRAM_SMB_DATA_START + i);
+		}
+	} else
+		strcpy(context.serial_number, "Unknown");
+	mutex_unlock(&qci_transaction_lock);
+}
+
+static void init_battery_stats(void)
+{
+	int i;
+
+	context.battery_state = get_bat_info(ECRAM_BATTERY_STATUS);
+	if (!(context.battery_state & MAIN_BATTERY_STATUS_BAT_IN))
+		return;
+	/* EC bug? needs some initial priming */
+	for (i = 0; i < 5; i++) {
+		read_data_from_battery(BATTERY_DESIGN_CAPACITY,
+				       SMBUS_READ_WORD_PRTCL);
+	}
+
+	qbat_init_get_charge_full_design();
+	qbat_init_get_charge_full();
+	qbat_init_get_voltage_full_design();
+
+	context.energy_full = context.voltage_full_design *
+		context.charge_full;
+
+	qbat_init_get_serial_number();
+	qbat_init_get_manufacturer_name();
+}
+
+/*********************************************************************
+ *		Battery properties
+ *********************************************************************/
+static int qbat_get_property(struct power_supply *psy,
+			     enum power_supply_property psp,
+			     union power_supply_propval *val)
+{
+	int ret = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		ret = qbat_get_status(val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		ret = qbat_get_present(val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		ret = qbat_get_health(val);
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURER:
+		ret = qbat_get_manufacturer_name(val);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		ret = qbat_get_technology(val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		ret = qbat_get_voltage_avg(val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		ret = qbat_get_current_avg(val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		ret = qbat_get_capacity(val);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		ret = qbat_get_temp_avg(val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		ret = qbat_get_charge_full_design(val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		ret = qbat_get_charge_full(val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		ret = qbat_get_charge_counter(val);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		ret = qbat_get_time_empty_avg(val);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		ret = qbat_get_time_full_avg(val);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		ret = qbat_get_model_name(val);
+		break;
+	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+		ret = qbat_get_serial_number(val);
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_NOW:
+		ret = qbat_get_energy_now(val);
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_FULL:
+		ret = qbat_get_energy_full(val);
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_EMPTY:
+		ret = qbat_get_energy_empty(val);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/*********************************************************************
+ *		Initialisation
+ *********************************************************************/
+
+static struct power_supply qci_ac = {
+	.name = "ac",
+	.type = POWER_SUPPLY_TYPE_MAINS,
+	.properties = qci_ac_props,
+	.num_properties = ARRAY_SIZE(qci_ac_props),
+	.get_property = qci_ac_get_prop,
+};
+
+static struct power_supply qci_bat = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = qci_bat_props,
+	.num_properties = ARRAY_SIZE(qci_bat_props),
+	.get_property = qbat_get_property,
+	.use_for_apm = 1,
+};
+
+static irqreturn_t qbat_interrupt(int irq, void *dev_id)
+{
+	struct i2cbat_drv_data *ibat_drv_data = dev_id;
+	schedule_work(&ibat_drv_data->work);
+	return IRQ_HANDLED;
+}
+
+static void qbat_work(struct work_struct *_work)
+{
+	u8 status;
+
+	status = get_bat_info(ECRAM_BATTERY_EVENTS);
+	if (status & EC_EVENT_AC) {
+		context.battery_state = get_bat_info(ECRAM_BATTERY_STATUS);
+		power_supply_changed(&qci_ac);
+	}
+
+	if (status & (EC_EVENT_BATTERY | EC_EVENT_CHARGER | EC_EVENT_TIMER)) {
+		context.battery_state = get_bat_info(ECRAM_BATTERY_STATUS);
+		power_supply_changed(&qci_bat);
+		if (status & EC_EVENT_BATTERY)
+			init_battery_stats();
+	}
+}
+
+static struct platform_device *bat_pdev;
+
+static int __init qbat_init(void)
+{
+	int err = 0;
+
+	mutex_init(&qci_i2c_lock);
+	mutex_init(&qci_transaction_lock);
+
+	context.bi2c_client = wpce_get_i2c_client();
+	if (context.bi2c_client == NULL)
+		return -1;
+
+	i2c_set_clientdata(context.bi2c_client, &context);
+	context.qcibat_gpio = context.bi2c_client->irq;
+
+	/*battery device register*/
+	bat_pdev = platform_device_register_simple("battery", 0, NULL, 0);
+	if (IS_ERR(bat_pdev))
+		return PTR_ERR(bat_pdev);
+
+	err = power_supply_register(&bat_pdev->dev, &qci_ac);
+	if (err)
+		goto ac_failed;
+
+	qci_bat.name = bat_pdev->name;
+	err = power_supply_register(&bat_pdev->dev, &qci_bat);
+	if (err)
+		goto battery_failed;
+
+	/*battery irq configure*/
+	INIT_WORK(&context.work, qbat_work);
+	err = gpio_request(context.qcibat_gpio, "qci-bat");
+	if (err) {
+		dev_err(&context.bi2c_client->dev,
+			"[BAT] err gpio request\n");
+		goto gpio_request_fail;
+	}
+	context.qcibat_irq = gpio_to_irq(context.qcibat_gpio);
+	err = request_irq(context.qcibat_irq, qbat_interrupt,
+		IRQF_TRIGGER_FALLING, BATTERY_ID_NAME, &context);
+	if (err) {
+		dev_err(&context.bi2c_client->dev,
+			"[BAT] unable to get IRQ\n");
+		goto request_irq_fail;
+	}
+
+	init_battery_stats();
+	goto success;
+
+request_irq_fail:
+	gpio_free(context.qcibat_gpio);
+
+gpio_request_fail:
+	power_supply_unregister(&qci_bat);
+
+battery_failed:
+	power_supply_unregister(&qci_ac);
+
+ac_failed:
+	platform_device_unregister(bat_pdev);
+
+	i2c_set_clientdata(context.bi2c_client, NULL);
+success:
+	return err;
+}
+
+static void __exit qbat_exit(void)
+{
+	free_irq(context.qcibat_irq, &context);
+	gpio_free(context.qcibat_gpio);
+	power_supply_unregister(&qci_bat);
+	power_supply_unregister(&qci_ac);
+	platform_device_unregister(bat_pdev);
+	i2c_set_clientdata(context.bi2c_client, NULL);
+}
+
+late_initcall(qbat_init);
+module_exit(qbat_exit);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Quanta Embedded Controller I2C Battery Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/power/qci_battery.h b/drivers/power/qci_battery.h
new file mode 100644
index 0000000..dcbb62b
--- /dev/null
+++ b/drivers/power/qci_battery.h
@@ -0,0 +1,121 @@
+/* Header file for Quanta I2C Battery Driver
+ *
+ * Copyright (C) 2009 Quanta Computer Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+ /*
+ *
+ *  The Driver with I/O communications via the I2C Interface for ON2 of AP BU.
+ *  And it is only working on the nuvoTon WPCE775x Embedded Controller.
+ *
+ */
+
+#ifndef __QCI_BATTERY_H__
+#define __QCI_BATTERY_H__
+
+#define BAT_I2C_ADDRESS 0x1A
+#define BATTERY_ID_NAME          "qci-i2cbat"
+#define EC_FLAG_ADAPTER_IN		0x01
+#define EC_FLAG_POWER_ON		0x02
+#define EC_FLAG_ENTER_S3		0x04
+#define EC_FLAG_ENTER_S4		0x08
+#define EC_FLAG_IN_STANDBY		0x10
+#define EC_FLAG_SYSTEM_ON		0x20
+#define EC_FLAG_WAIT_HWPG		0x40
+#define EC_FLAG_S5_POWER_ON	0x80
+
+#define MAIN_BATTERY_STATUS_BAT_DISCHRG		0x01
+#define MAIN_BATTERY_STATUS_BAT_CHARGING	0x02
+#define MAIN_BATTERY_STATUS_BAT_ABNORMAL	0x04
+#define MAIN_BATTERY_STATUS_BAT_IN		0x08
+#define MAIN_BATTERY_STATUS_BAT_FULL		0x10
+#define MAIN_BATTERY_STATUS_BAT_LOW		0x20
+#define MAIN_BATTERY_STATUS_BAT_SMB_VALID	0x80
+
+#define CHG_STATUS_BAT_CHARGE			0x01
+#define CHG_STATUS_BAT_PRECHG			0x02
+#define CHG_STATUS_BAT_OVERTEMP			0x04
+#define CHG_STATUS_BAT_TYPE			0x08
+#define CHG_STATUS_BAT_GWROK			0x10
+#define CHG_STATUS_BAT_INCHARGE			0x20
+#define CHG_STATUS_BAT_WAKECHRG			0x40
+#define CHG_STATUS_BAT_CHGTIMEOUT		0x80
+
+#define EC_ADAPTER_PRESENT		0x1
+#define EC_BAT_PRESENT		        0x1
+#define EC_ADAPTER_NOT_PRESENT		0x0
+#define EC_BAT_NOT_PRESENT		0x0
+
+#define ECRAM_POWER_SOURCE              0x40
+#define ECRAM_CHARGER_ALARM		0x42
+#define ECRAM_BATTERY_STATUS            0x82
+#define ECRAM_BATTERY_CURRENT_LSB       0x83
+#define ECRAM_BATTERY_CURRENT_MSB       0x84
+#define ECRAM_BATTERY_VOLTAGE_LSB       0x87
+#define ECRAM_BATTERY_VOLTAGE_MSB       0x88
+#define ECRAM_BATTERY_CAPACITY          0x89
+#define ECRAM_BATTERY_TEMP_LSB          0x8C
+#define ECRAM_BATTERY_TEMP_MSB          0x8D
+#define ECRAM_BATTERY_EVENTS            0x99
+
+#define EC_EVENT_BATTERY                0x01
+#define EC_EVENT_CHARGER                0x02
+#define EC_EVENT_AC                     0x10
+#define EC_EVENT_TIMER                  0x40
+
+/* smbus access */
+#define SMBUS_READ_BYTE_PRTCL		0x07
+#define SMBUS_READ_WORD_PRTCL		0x09
+#define SMBUS_READ_BLOCK_PRTCL		0x0B
+
+/* smbus status code */
+#define SMBUS_OK			0x00
+#define SMBUS_DONE			0x80
+#define SMBUS_ALARM			0x40
+#define SMBUS_UNKNOW_FAILURE		0x07
+#define SMBUS_DEVICE_NOACK		0x10
+#define SMBUS_DEVICE_ERROR		0x11
+#define SMBUS_UNKNOW_ERROR		0x13
+#define SMBUS_TIME_OUT			0x18
+#define SMBUS_BUSY			0x1A
+
+/* ec ram mapping */
+#define ECRAM_SMB_PRTCL			0
+#define ECRAM_SMB_STS			1
+#define ECRAM_SMB_ADDR			2
+#define ECRAM_SMB_CMD			3
+#define ECRAM_SMB_DATA_START		4
+#define ECRAM_SMB_DATA0			4
+#define ECRAM_SMB_DATA1			5
+#define ECRAM_SMB_BCNT			36
+#define ECRAM_SMB_ALARM_ADDR		37
+#define ECRAM_SMB_ALARM_DATA0		38
+#define ECRAM_SMB_ALARM_DATA1		39
+
+/* smart battery commands */
+#define BATTERY_SLAVE_ADDRESS		0x16
+#define BATTERY_FULL_CAPACITY		0x10
+#define BATTERY_AVERAGE_TIME_TO_EMPTY	0x12
+#define BATTERY_AVERAGE_TIME_TO_FULL	0x13
+#define BATTERY_CYCLE_COUNT		0x17
+#define BATTERY_DESIGN_CAPACITY		0x18
+#define BATTERY_DESIGN_VOLTAGE		0x19
+#define BATTERY_SERIAL_NUMBER		0x1C
+#define BATTERY_MANUFACTURE_NAME        0x20
+#define BATTERY_DEVICE_NAME		0x21
+
+/* alarm bit */
+#define ALARM_REMAIN_CAPACITY           0x02
+#define ALARM_OVER_TEMP                 0x10
+#define ALARM_OVER_CHARGE               0x80
+#endif
diff --git a/drivers/power/smb137b.c b/drivers/power/smb137b.c
new file mode 100644
index 0000000..7ff8e28
--- /dev/null
+++ b/drivers/power/smb137b.c
@@ -0,0 +1,857 @@
+/* Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c/smb137b.h>
+#include <linux/power_supply.h>
+#include <linux/msm-charger.h>
+
+#define SMB137B_MASK(BITS, POS)  ((unsigned char)(((1 << BITS) - 1) << POS))
+
+#define CHG_CURRENT_REG		0x00
+#define FAST_CHG_CURRENT_MASK		SMB137B_MASK(3, 5)
+#define PRE_CHG_CURRENT_MASK		SMB137B_MASK(2, 3)
+#define TERM_CHG_CURRENT_MASK		SMB137B_MASK(2, 1)
+
+#define INPUT_CURRENT_LIMIT_REG	0x01
+#define IN_CURRENT_MASK			SMB137B_MASK(3, 5)
+#define IN_CURRENT_LIMIT_EN_BIT		BIT(2)
+#define IN_CURRENT_DET_THRESH_MASK	SMB137B_MASK(2, 0)
+
+#define FLOAT_VOLTAGE_REG	0x02
+#define STAT_OUT_POLARITY_BIT		BIT(7)
+#define FLOAT_VOLTAGE_MASK		SMB137B_MASK(7, 0)
+
+#define CONTROL_A_REG		0x03
+#define AUTO_RECHARGE_DIS_BIT		BIT(7)
+#define CURR_CYCLE_TERM_BIT		BIT(6)
+#define PRE_TO_FAST_V_MASK		SMB137B_MASK(3, 3)
+#define TEMP_BEHAV_BIT			BIT(2)
+#define THERM_NTC_CURR_MODE_BIT		BIT(1)
+#define THERM_NTC_47KOHM_BIT		BIT(0)
+
+#define CONTROL_B_REG		0x04
+#define STAT_OUTPUT_MODE_MASK		SMB137B_MASK(2, 6)
+#define BATT_OV_ENDS_CYCLE_BIT		BIT(5)
+#define AUTO_PRE_TO_FAST_DIS_BIT	BIT(4)
+#define SAFETY_TIMER_EN_BIT		BIT(3)
+#define OTG_LBR_WD_EN_BIT		BIT(2)
+#define CHG_WD_TIMER_EN_BIT		BIT(1)
+#define IRQ_OP_MASK			BIT(0)
+
+#define PIN_CTRL_REG		0x05
+#define AUTO_CHG_EN_BIT			BIT(7)
+#define AUTO_LBR_EN_BIT			BIT(6)
+#define OTG_LBR_BIT			BIT(5)
+#define I2C_PIN_BIT			BIT(4)
+#define PIN_EN_CTRL_MASK		SMB137B_MASK(2, 2)
+#define OTG_LBR_PIN_CTRL_MASK		SMB137B_MASK(2, 0)
+
+#define OTG_LBR_CTRL_REG	0x06
+#define BATT_MISSING_DET_EN_BIT		BIT(7)
+#define AUTO_RECHARGE_THRESH_MASK	BIT(6)
+#define USB_DP_DN_DET_EN_MASK		BIT(5)
+#define OTG_LBR_BATT_CURRENT_LIMIT_MASK	SMB137B_MASK(2, 3)
+#define OTG_LBR_UVLO_THRESH_MASK	SMB137B_MASK(3, 0)
+
+#define FAULT_INTR_REG		0x07
+#define SAFETY_TIMER_EXP_MASK		SMB137B_MASK(1, 7)
+#define BATT_TEMP_UNSAFE_MASK		SMB137B_MASK(1, 6)
+#define INPUT_OVLO_IVLO_MASK		SMB137B_MASK(1, 5)
+#define BATT_OVLO_MASK			SMB137B_MASK(1, 4)
+#define INTERNAL_OVER_TEMP_MASK		SMB137B_MASK(1, 2)
+#define ENTER_TAPER_CHG_MASK		SMB137B_MASK(1, 1)
+#define CHG_MASK			SMB137B_MASK(1, 0)
+
+#define CELL_TEMP_MON_REG	0x08
+#define THERMISTOR_CURR_MASK		SMB137B_MASK(2, 6)
+#define LOW_TEMP_CHG_INHIBIT_MASK	SMB137B_MASK(3, 3)
+#define HIGH_TEMP_CHG_INHIBIT_MASK	SMB137B_MASK(3, 0)
+
+#define	SAFETY_TIMER_THERMAL_SHUTDOWN_REG	0x09
+#define DCIN_OVLO_SEL_MASK		SMB137B_MASK(2, 7)
+#define RELOAD_EN_INPUT_VOLTAGE_MASK	SMB137B_MASK(1, 6)
+#define THERM_SHUTDN_EN_MASK		SMB137B_MASK(1, 5)
+#define STANDBY_WD_TIMER_EN_MASK		SMB137B_MASK(1, 4)
+#define COMPLETE_CHG_TMOUT_MASK		SMB137B_MASK(2, 2)
+#define PRE_CHG_TMOUT_MASK		SMB137B_MASK(2, 0)
+
+#define	VSYS_REG	0x0A
+#define	VSYS_MASK			SMB137B_MASK(3, 4)
+
+#define IRQ_RESET_REG	0x30
+
+#define COMMAND_A_REG	0x31
+#define	VOLATILE_REGS_WRITE_PERM_BIT	BIT(7)
+#define	POR_BIT				BIT(6)
+#define	FAST_CHG_SETTINGS_BIT		BIT(5)
+#define	BATT_CHG_EN_BIT			BIT(4)
+#define	USBIN_MODE_500_BIT		BIT(3)
+#define	USBIN_MODE_HCMODE_BIT		BIT(2)
+#define	OTG_LBR_EN_BIT			BIT(1)
+#define	STAT_OE_BIT			BIT(0)
+
+#define STATUS_A_REG	0x32
+#define INTERNAL_TEMP_IRQ_STAT		BIT(4)
+#define DCIN_OV_IRQ_STAT		BIT(3)
+#define DCIN_UV_IRQ_STAT		BIT(2)
+#define USBIN_OV_IRQ_STAT		BIT(1)
+#define USBIN_UV_IRQ_STAT		BIT(0)
+
+#define STATUS_B_REG	0x33
+#define USB_PIN_STAT			BIT(7)
+#define USB51_MODE_STAT			BIT(6)
+#define USB51_HC_MODE_STAT		BIT(5)
+#define INTERNAL_TEMP_LIMIT_B_STAT	BIT(4)
+#define DC_IN_OV_STAT			BIT(3)
+#define DC_IN_UV_STAT			BIT(2)
+#define USB_IN_OV_STAT			BIT(1)
+#define USB_IN_UV_STAT			BIT(0)
+
+#define	STATUS_C_REG	0x34
+#define AUTO_IN_CURR_LIMIT_MASK		SMB137B_MASK(4, 4)
+#define AUTO_IN_CURR_LIMIT_STAT		BIT(3)
+#define AUTO_SOURCE_DET_COMP_STAT_MASK	SMB137B_MASK(2, 1)
+#define AUTO_SOURCE_DET_RESULT_STAT	BIT(0)
+
+#define	STATUS_D_REG	0x35
+#define	VBATT_LESS_THAN_VSYS_STAT	BIT(7)
+#define	USB_FAIL_STAT			BIT(6)
+#define	BATT_TEMP_STAT_MASK		SMB137B_MASK(2, 4)
+#define	INTERNAL_TEMP_LIMIT_STAT	BIT(2)
+#define	OTG_LBR_MODE_EN_STAT		BIT(1)
+#define	OTG_LBR_VBATT_UVLO_STAT		BIT(0)
+
+#define	STATUS_E_REG	0x36
+#define	CHARGE_CYCLE_COUNT_STAT		BIT(7)
+#define	CHARGER_TERM_STAT		BIT(6)
+#define	SAFETY_TIMER_STAT_MASK		SMB137B_MASK(2, 4)
+#define	CHARGER_ERROR_STAT		BIT(3)
+#define	CHARGING_STAT_E			SMB137B_MASK(2, 1)
+#define	CHARGING_EN			BIT(0)
+
+#define	STATUS_F_REG	0x37
+#define	WD_IRQ_ACTIVE_STAT		BIT(7)
+#define	OTG_OVERCURRENT_STAT		BIT(6)
+#define	BATT_PRESENT_STAT		BIT(4)
+#define	BATT_OV_LATCHED_STAT		BIT(3)
+#define	CHARGER_OVLO_STAT		BIT(2)
+#define	CHARGER_UVLO_STAT		BIT(1)
+#define	BATT_LOW_STAT			BIT(0)
+
+#define	STATUS_G_REG	0x38
+#define	CHARGE_TIMEOUT_IRQ_STAT		BIT(7)
+#define	PRECHARGE_TIMEOUT_IRQ_STAT	BIT(6)
+#define	BATT_HOT_IRQ_STAT		BIT(5)
+#define	BATT_COLD_IRQ_STAT		BIT(4)
+#define	BATT_OV_IRQ_STAT		BIT(3)
+#define	TAPER_CHG_IRQ_STAT		BIT(2)
+#define	FAST_CHG_IRQ_STAT		BIT(1)
+#define	CHARGING_IRQ_STAT		BIT(0)
+
+#define	STATUS_H_REG	0x39
+#define	CHARGE_TIMEOUT_STAT		BIT(7)
+#define	PRECHARGE_TIMEOUT_STAT		BIT(6)
+#define	BATT_HOT_STAT			BIT(5)
+#define	BATT_COLD_STAT			BIT(4)
+#define	BATT_OV_STAT			BIT(3)
+#define	TAPER_CHG_STAT			BIT(2)
+#define	FAST_CHG_STAT			BIT(1)
+#define	CHARGING_STAT_H			BIT(0)
+
+#define DEV_ID_REG	0x3B
+
+#define COMMAND_B_REG	0x3C
+#define	THERM_NTC_CURR_VERRIDE		BIT(7)
+
+#define SMB137B_CHG_PERIOD	((HZ) * 150)
+
+#define INPUT_CURRENT_REG_DEFAULT	0xE1
+#define INPUT_CURRENT_REG_MIN		0x01
+#define	COMMAND_A_REG_DEFAULT		0xA0
+#define	COMMAND_A_REG_OTG_MODE		0xA2
+
+#define	PIN_CTRL_REG_DEFAULT		0x00
+#define	PIN_CTRL_REG_CHG_OFF		0x04
+
+#define	FAST_CHG_E_STATUS 0x2
+
+#define SMB137B_DEFAULT_BATT_RATING   950
+struct smb137b_data {
+	struct i2c_client *client;
+	struct delayed_work charge_work;
+
+	bool charging;
+	int chgcurrent;
+	int cur_charging_mode;
+	int max_system_voltage;
+	int min_system_voltage;
+
+	int valid_n_gpio;
+
+	int batt_status;
+	int batt_chg_type;
+	int batt_present;
+	int min_design;
+	int max_design;
+	int batt_mah_rating;
+
+	int usb_status;
+
+	u8 dev_id_reg;
+	struct msm_hardware_charger adapter_hw_chg;
+};
+
+static unsigned int disabled;
+static DEFINE_MUTEX(init_lock);
+static unsigned int init_otg_power;
+
+enum charger_stat {
+	SMB137B_ABSENT,
+	SMB137B_PRESENT,
+	SMB137B_ENUMERATED,
+};
+
+static struct smb137b_data *usb_smb137b_chg;
+
+static int smb137b_read_reg(struct i2c_client *client, int reg,
+	u8 *val)
+{
+	s32 ret;
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = i2c_get_clientdata(client);
+	ret = i2c_smbus_read_byte_data(smb137b_chg->client, reg);
+	if (ret < 0) {
+		dev_err(&smb137b_chg->client->dev,
+			"i2c read fail: can't read from %02x: %d\n", reg, ret);
+		return ret;
+	} else
+		*val = ret;
+
+	return 0;
+}
+
+static int smb137b_write_reg(struct i2c_client *client, int reg,
+	u8 val)
+{
+	s32 ret;
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = i2c_get_clientdata(client);
+	ret = i2c_smbus_write_byte_data(smb137b_chg->client, reg, val);
+	if (ret < 0) {
+		dev_err(&smb137b_chg->client->dev,
+			"i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		return ret;
+	}
+	return 0;
+}
+
+static ssize_t id_reg_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = i2c_get_clientdata(to_i2c_client(dev));
+
+	return sprintf(buf, "%02x\n", smb137b_chg->dev_id_reg);
+}
+static DEVICE_ATTR(id_reg, S_IRUGO | S_IWUSR, id_reg_show, NULL);
+
+#ifdef DEBUG
+static void smb137b_dbg_print_status_regs(struct smb137b_data *smb137b_chg)
+{
+	int ret;
+	u8 temp;
+
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_A_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s A=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_B_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s B=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_C_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s C=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_D_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s D=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_E_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s E=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_F_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s F=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_G_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s G=0x%x\n", __func__, temp);
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_H_REG, &temp);
+	dev_dbg(&smb137b_chg->client->dev, "%s H=0x%x\n", __func__, temp);
+}
+#else
+static void smb137b_dbg_print_status_regs(struct smb137b_data *smb137b_chg)
+{
+}
+#endif
+
+static int smb137b_start_charging(struct msm_hardware_charger *hw_chg,
+		int chg_voltage, int chg_current)
+{
+	int cmd_val = COMMAND_A_REG_DEFAULT;
+	u8 temp = 0;
+	int ret = 0;
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = container_of(hw_chg, struct smb137b_data, adapter_hw_chg);
+	if (disabled) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s called when disabled\n", __func__);
+		goto out;
+	}
+
+	if (smb137b_chg->charging == true
+		&& smb137b_chg->chgcurrent == chg_current)
+		/* we are already charging with the same current*/
+		 dev_err(&smb137b_chg->client->dev,
+			 "%s charge with same current %d called again\n",
+			  __func__, chg_current);
+
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+	if (chg_current < 500)
+		cmd_val &= ~USBIN_MODE_500_BIT;
+	else if (chg_current == 500)
+		cmd_val |= USBIN_MODE_500_BIT;
+	else
+		cmd_val |= USBIN_MODE_HCMODE_BIT;
+
+	smb137b_chg->chgcurrent = chg_current;
+	smb137b_chg->cur_charging_mode = cmd_val;
+
+	/* Due to non-volatile reload feature,always enable volatile
+	 * mirror writes before modifying any 00h~09h control register.
+	 * Current mode needs to be programmed according to what's detected
+	 * Otherwise default 100mA mode might cause VOUTL drop and fail
+	 * the system in case of dead battery.
+	 */
+	ret = smb137b_write_reg(smb137b_chg->client,
+					COMMAND_A_REG, cmd_val);
+	if (ret) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't write to command_reg\n", __func__);
+		goto out;
+	}
+	ret = smb137b_write_reg(smb137b_chg->client,
+					PIN_CTRL_REG, PIN_CTRL_REG_DEFAULT);
+	if (ret) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't write to pin ctrl reg\n", __func__);
+		goto out;
+	}
+	smb137b_chg->charging = true;
+	smb137b_chg->batt_status = POWER_SUPPLY_STATUS_CHARGING;
+	smb137b_chg->batt_chg_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_E_REG, &temp);
+	if (ret) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't read status e reg %d\n", __func__, ret);
+	} else {
+		if (temp & CHARGER_ERROR_STAT) {
+			dev_err(&smb137b_chg->client->dev,
+				"%s chg error E=0x%x\n", __func__, temp);
+			smb137b_dbg_print_status_regs(smb137b_chg);
+		}
+		if (((temp & CHARGING_STAT_E) >> 1) >= FAST_CHG_E_STATUS)
+			smb137b_chg->batt_chg_type
+						= POWER_SUPPLY_CHARGE_TYPE_FAST;
+	}
+	/*schedule charge_work to keep track of battery charging state*/
+	schedule_delayed_work(&smb137b_chg->charge_work, SMB137B_CHG_PERIOD);
+
+out:
+	return ret;
+}
+
+static int smb137b_stop_charging(struct msm_hardware_charger *hw_chg)
+{
+	int ret = 0;
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = container_of(hw_chg, struct smb137b_data, adapter_hw_chg);
+
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+	if (smb137b_chg->charging == false)
+		return 0;
+
+	smb137b_chg->charging = false;
+	smb137b_chg->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
+	smb137b_chg->batt_chg_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+
+	ret = smb137b_write_reg(smb137b_chg->client, COMMAND_A_REG,
+				smb137b_chg->cur_charging_mode);
+	if (ret) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't write to command_reg\n", __func__);
+		goto out;
+	}
+
+	ret = smb137b_write_reg(smb137b_chg->client,
+					PIN_CTRL_REG, PIN_CTRL_REG_CHG_OFF);
+	if (ret)
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't write to pin ctrl reg\n", __func__);
+
+out:
+	return ret;
+}
+
+static int smb137b_charger_switch(struct msm_hardware_charger *hw_chg)
+{
+	struct smb137b_data *smb137b_chg;
+
+	smb137b_chg = container_of(hw_chg, struct smb137b_data, adapter_hw_chg);
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+	return 0;
+}
+
+static irqreturn_t smb137b_valid_handler(int irq, void *dev_id)
+{
+	int val;
+	struct smb137b_data *smb137b_chg;
+	struct i2c_client *client = dev_id;
+
+	smb137b_chg = i2c_get_clientdata(client);
+
+	pr_debug("%s Cable Detected USB inserted\n", __func__);
+	/*extra delay needed to allow CABLE_DET_N settling down and debounce
+	 before	trying to sample its correct value*/
+	usleep_range(1000, 1200);
+	val = gpio_get_value_cansleep(smb137b_chg->valid_n_gpio);
+	if (val < 0) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s gpio_get_value failed for %d ret=%d\n", __func__,
+			smb137b_chg->valid_n_gpio, val);
+		goto err;
+	}
+	dev_dbg(&smb137b_chg->client->dev, "%s val=%d\n", __func__, val);
+
+	if (val) {
+		if (smb137b_chg->usb_status != SMB137B_ABSENT) {
+			smb137b_chg->usb_status = SMB137B_ABSENT;
+			msm_charger_notify_event(&smb137b_chg->adapter_hw_chg,
+					CHG_REMOVED_EVENT);
+		}
+	} else {
+		if (smb137b_chg->usb_status == SMB137B_ABSENT) {
+			smb137b_chg->usb_status = SMB137B_PRESENT;
+			msm_charger_notify_event(&smb137b_chg->adapter_hw_chg,
+					CHG_INSERTED_EVENT);
+		}
+	}
+err:
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static int debug_fs_otg;
+static int otg_get(void *data, u64 *value)
+{
+	*value = debug_fs_otg;
+	return 0;
+}
+static int otg_set(void *data, u64 value)
+{
+	smb137b_otg_power(debug_fs_otg);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(smb137b_otg_fops, otg_get, otg_set, "%llu\n");
+
+static void smb137b_create_debugfs_entries(struct smb137b_data *smb137b_chg)
+{
+	dent = debugfs_create_dir("smb137b", NULL);
+	if (dent) {
+		debugfs_create_file("otg", 0644, dent, NULL, &smb137b_otg_fops);
+	}
+}
+static void smb137b_destroy_debugfs_entries(void)
+{
+	if (dent)
+		debugfs_remove_recursive(dent);
+}
+#else
+static void smb137b_create_debugfs_entries(struct smb137b_data *smb137b_chg)
+{
+}
+static void smb137b_destroy_debugfs_entries(void)
+{
+}
+#endif
+
+static int set_disable_status_param(const char *val, struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_int(val, kp);
+	if (ret)
+		return ret;
+
+	if (usb_smb137b_chg && disabled)
+		msm_charger_notify_event(&usb_smb137b_chg->adapter_hw_chg,
+				CHG_DONE_EVENT);
+
+	pr_debug("%s disabled =%d\n", __func__, disabled);
+	return 0;
+}
+module_param_call(disabled, set_disable_status_param, param_get_uint,
+					&disabled, 0644);
+static void smb137b_charge_sm(struct work_struct *smb137b_work)
+{
+	int ret;
+	struct smb137b_data *smb137b_chg;
+	u8 temp = 0;
+	int notify_batt_changed = 0;
+
+	smb137b_chg = container_of(smb137b_work, struct smb137b_data,
+			charge_work.work);
+
+	/*if not charging, exit smb137b charging state transition*/
+	if (!smb137b_chg->charging)
+		return;
+
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+
+	ret = smb137b_read_reg(smb137b_chg->client, STATUS_F_REG, &temp);
+	if (ret) {
+		dev_err(&smb137b_chg->client->dev,
+			"%s couldn't read status f reg %d\n", __func__, ret);
+		goto out;
+	}
+	if (smb137b_chg->batt_present != !(temp & BATT_PRESENT_STAT)) {
+		smb137b_chg->batt_present = !(temp & BATT_PRESENT_STAT);
+		notify_batt_changed = 1;
+	}
+
+	if (!smb137b_chg->batt_present)
+		smb137b_chg->batt_chg_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+
+	if (!smb137b_chg->batt_present && smb137b_chg->charging)
+		msm_charger_notify_event(&smb137b_chg->adapter_hw_chg,
+				CHG_DONE_EVENT);
+
+	if (smb137b_chg->batt_present
+		&& smb137b_chg->charging
+		&& smb137b_chg->batt_chg_type
+			!= POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		ret = smb137b_read_reg(smb137b_chg->client,
+						STATUS_E_REG, &temp);
+		if (ret) {
+			dev_err(&smb137b_chg->client->dev,
+				"%s couldn't read cntrl reg\n", __func__);
+			goto out;
+
+		} else {
+			if (temp & CHARGER_ERROR_STAT) {
+				dev_err(&smb137b_chg->client->dev,
+					"%s error E=0x%x\n", __func__, temp);
+				smb137b_dbg_print_status_regs(smb137b_chg);
+			}
+			if (((temp & CHARGING_STAT_E) >> 1)
+					>= FAST_CHG_E_STATUS) {
+				smb137b_chg->batt_chg_type
+					= POWER_SUPPLY_CHARGE_TYPE_FAST;
+				notify_batt_changed = 1;
+				msm_charger_notify_event(
+					&smb137b_chg->adapter_hw_chg,
+					CHG_BATT_BEGIN_FAST_CHARGING);
+			} else {
+				smb137b_chg->batt_chg_type
+					= POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+			}
+		}
+	}
+
+out:
+	schedule_delayed_work(&smb137b_chg->charge_work,
+					SMB137B_CHG_PERIOD);
+}
+
+static void __smb137b_otg_power(int on)
+{
+	int ret;
+
+	if (on) {
+		ret = smb137b_write_reg(usb_smb137b_chg->client,
+					PIN_CTRL_REG, PIN_CTRL_REG_CHG_OFF);
+		if (ret) {
+			pr_err("%s turning off charging in pin_ctrl err=%d\n",
+								__func__, ret);
+			/*
+			 * don't change the command register if charging in
+			 * pin control cannot be turned off
+			 */
+			return;
+		}
+
+		ret = smb137b_write_reg(usb_smb137b_chg->client,
+			COMMAND_A_REG, COMMAND_A_REG_OTG_MODE);
+		if (ret)
+			pr_err("%s failed turning on OTG mode ret=%d\n",
+								__func__, ret);
+	} else {
+		ret = smb137b_write_reg(usb_smb137b_chg->client,
+			COMMAND_A_REG, COMMAND_A_REG_DEFAULT);
+		if (ret)
+			pr_err("%s failed turning off OTG mode ret=%d\n",
+								__func__, ret);
+		ret = smb137b_write_reg(usb_smb137b_chg->client,
+				PIN_CTRL_REG, PIN_CTRL_REG_DEFAULT);
+		if (ret)
+			pr_err("%s failed writing to pn_ctrl ret=%d\n",
+								__func__, ret);
+	}
+}
+static int __devinit smb137b_probe(struct i2c_client *client,
+				    const struct i2c_device_id *id)
+{
+	const struct smb137b_platform_data *pdata;
+	struct smb137b_data *smb137b_chg;
+	int ret = 0;
+
+	pdata = client->dev.platform_data;
+
+	if (pdata == NULL) {
+		dev_err(&client->dev, "%s no platform data\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				I2C_FUNC_SMBUS_BYTE_DATA)) {
+		ret = -EIO;
+		goto out;
+	}
+
+	smb137b_chg = kzalloc(sizeof(*smb137b_chg), GFP_KERNEL);
+	if (!smb137b_chg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	INIT_DELAYED_WORK(&smb137b_chg->charge_work, smb137b_charge_sm);
+	smb137b_chg->client = client;
+	smb137b_chg->valid_n_gpio = pdata->valid_n_gpio;
+	smb137b_chg->batt_mah_rating = pdata->batt_mah_rating;
+	if (smb137b_chg->batt_mah_rating == 0)
+		smb137b_chg->batt_mah_rating = SMB137B_DEFAULT_BATT_RATING;
+
+	/*It supports USB-WALL charger and PC USB charger */
+	smb137b_chg->adapter_hw_chg.type = CHG_TYPE_USB;
+	smb137b_chg->adapter_hw_chg.rating = pdata->batt_mah_rating;
+	smb137b_chg->adapter_hw_chg.name = "smb137b-charger";
+	smb137b_chg->adapter_hw_chg.start_charging = smb137b_start_charging;
+	smb137b_chg->adapter_hw_chg.stop_charging = smb137b_stop_charging;
+	smb137b_chg->adapter_hw_chg.charging_switched = smb137b_charger_switch;
+
+	if (pdata->chg_detection_config)
+		ret = pdata->chg_detection_config();
+	if (ret) {
+		dev_err(&client->dev, "%s valid config failed ret=%d\n",
+			__func__, ret);
+		goto free_smb137b_chg;
+	}
+
+	ret = gpio_request(pdata->valid_n_gpio, "smb137b_charger_valid");
+	if (ret) {
+		dev_err(&client->dev, "%s gpio_request failed for %d ret=%d\n",
+			__func__, pdata->valid_n_gpio, ret);
+		goto free_smb137b_chg;
+	}
+
+	i2c_set_clientdata(client, smb137b_chg);
+
+	ret = msm_charger_register(&smb137b_chg->adapter_hw_chg);
+	if (ret) {
+		dev_err(&client->dev, "%s msm_charger_register\
+			failed for ret=%d\n", __func__, ret);
+		goto free_valid_gpio;
+	}
+
+	ret = irq_set_irq_wake(client->irq, 1);
+	if (ret) {
+		dev_err(&client->dev, "%s failed for irq_set_irq_wake %d ret =%d\n",
+			 __func__, client->irq, ret);
+		goto unregister_charger;
+	}
+
+	ret = request_threaded_irq(client->irq, NULL,
+				   smb137b_valid_handler,
+				   IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				   "smb137b_charger_valid", client);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s request_threaded_irq failed for %d ret =%d\n",
+			__func__, client->irq, ret);
+		goto disable_irq_wake;
+	}
+
+	ret = gpio_get_value_cansleep(smb137b_chg->valid_n_gpio);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"%s gpio_get_value failed for %d ret=%d\n", __func__,
+			pdata->valid_n_gpio, ret);
+		/* assume absent */
+		ret = 1;
+	}
+	if (!ret) {
+		msm_charger_notify_event(&smb137b_chg->adapter_hw_chg,
+			CHG_INSERTED_EVENT);
+		smb137b_chg->usb_status = SMB137B_PRESENT;
+	}
+
+	ret = smb137b_read_reg(smb137b_chg->client, DEV_ID_REG,
+			&smb137b_chg->dev_id_reg);
+
+	ret = device_create_file(&smb137b_chg->client->dev, &dev_attr_id_reg);
+
+	/* TODO read min_design and max_design from chip registers */
+	smb137b_chg->min_design = 3200;
+	smb137b_chg->max_design = 4200;
+
+	smb137b_chg->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
+	smb137b_chg->batt_chg_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+
+	device_init_wakeup(&client->dev, 1);
+
+	mutex_lock(&init_lock);
+	usb_smb137b_chg = smb137b_chg;
+	if (init_otg_power)
+		__smb137b_otg_power(init_otg_power);
+	mutex_unlock(&init_lock);
+
+	smb137b_create_debugfs_entries(smb137b_chg);
+	dev_dbg(&client->dev,
+		"%s OK device_id = %x chg_state=%d\n", __func__,
+		smb137b_chg->dev_id_reg, smb137b_chg->usb_status);
+	return 0;
+
+disable_irq_wake:
+	irq_set_irq_wake(client->irq, 0);
+unregister_charger:
+	msm_charger_unregister(&smb137b_chg->adapter_hw_chg);
+free_valid_gpio:
+	gpio_free(pdata->valid_n_gpio);
+free_smb137b_chg:
+	kfree(smb137b_chg);
+out:
+	return ret;
+}
+
+void smb137b_otg_power(int on)
+{
+	pr_debug("%s Enter on=%d\n", __func__, on);
+
+	mutex_lock(&init_lock);
+	if (!usb_smb137b_chg) {
+		init_otg_power = !!on;
+		pr_warning("%s called when not initialized\n", __func__);
+		mutex_unlock(&init_lock);
+		return;
+	}
+	__smb137b_otg_power(on);
+	mutex_unlock(&init_lock);
+}
+
+static int __devexit smb137b_remove(struct i2c_client *client)
+{
+	const struct smb137b_platform_data *pdata;
+	struct smb137b_data *smb137b_chg = i2c_get_clientdata(client);
+
+	pdata = client->dev.platform_data;
+	device_init_wakeup(&client->dev, 0);
+	irq_set_irq_wake(client->irq, 0);
+	free_irq(client->irq, client);
+	gpio_free(pdata->valid_n_gpio);
+	cancel_delayed_work_sync(&smb137b_chg->charge_work);
+
+	msm_charger_notify_event(&smb137b_chg->adapter_hw_chg,
+			 CHG_REMOVED_EVENT);
+	msm_charger_unregister(&smb137b_chg->adapter_hw_chg);
+	smb137b_destroy_debugfs_entries();
+	kfree(smb137b_chg);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int smb137b_suspend(struct device *dev)
+{
+	struct smb137b_data *smb137b_chg = dev_get_drvdata(dev);
+
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+	if (smb137b_chg->charging)
+		return -EBUSY;
+	return 0;
+}
+
+static int smb137b_resume(struct device *dev)
+{
+	struct smb137b_data *smb137b_chg = dev_get_drvdata(dev);
+
+	dev_dbg(&smb137b_chg->client->dev, "%s\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops smb137b_pm_ops = {
+	.suspend = smb137b_suspend,
+	.resume = smb137b_resume,
+};
+#endif
+
+static const struct i2c_device_id smb137b_id[] = {
+	{"smb137b", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, smb137b_id);
+
+static struct i2c_driver smb137b_driver = {
+	.driver = {
+		   .name = "smb137b",
+		   .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		   .pm = &smb137b_pm_ops,
+#endif
+	},
+	.probe = smb137b_probe,
+	.remove = __devexit_p(smb137b_remove),
+	.id_table = smb137b_id,
+};
+
+static int __init smb137b_init(void)
+{
+	return i2c_add_driver(&smb137b_driver);
+}
+module_init(smb137b_init);
+
+static void __exit smb137b_exit(void)
+{
+	return i2c_del_driver(&smb137b_driver);
+}
+module_exit(smb137b_exit);
+
+MODULE_AUTHOR("Abhijeet Dharmapurikar <adharmap@codeaurora.org>");
+MODULE_DESCRIPTION("Driver for SMB137B Charger chip");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb137b");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index d7ed20f..3ebf2aa 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -303,5 +303,47 @@
 	help
 	  This driver supports TPS65910 voltage regulator chips.
 
+config REGULATOR_PMIC8058
+        tristate "PMIC8058 regulator driver"
+        depends on PMIC8058 && (ARCH_MSM8X60 || ARCH_FSM9XXX)
+        default y if PMIC8058 && (ARCH_MSM8X60 || ARCH_FSM9XXX)
+        help
+         Say Y here to support the voltage regulators on PMIC8058
+
+config REGULATOR_PMIC8901
+        tristate "PMIC8901 regulator driver"
+        depends on PMIC8901 && ARCH_MSM8X60
+        default y if PMIC8901 && ARCH_MSM8X60
+        help
+         Say Y here to support the voltage regulators on PMIC8901
+
+config REGULATOR_PM8921
+	tristate "Qualcomm PM8921 PMIC Power regulators"
+	depends on MFD_PM8921_CORE
+	default y if MFD_PM8921_CORE
+	help
+	  This driver supports voltage regulators in the Qualcomm PM8921 PMIC
+	  chip.  The PM8921 provides several different varieties of LDO and
+	  switching regulators.  It also provides a negative charge pump and
+	  voltage switches.
+
+config REGULATOR_GPIO
+	tristate "GPIO regulator"
+	depends on GPIOLIB
+	help
+	  This driver provides a regulator wrapper around a GPIO pin that is set
+	  to output.  It is intended to be used for GPIO pins that provide the
+	  enable signal to a physical regulator.  The GPIO enable signal can
+	  be configured to be active high (default) or active low.
+
+config REGULATOR_PM8058_XO
+	tristate "PM8058 XO Buffer driver"
+	depends on PMIC8058
+	default n
+	help
+	  This driver supports xo buffer control in the Qualcomm PM8058 PMIC
+	  chip. It is only supposed to be used when Linux on application
+	  processor is the master in control of XO buffers.
+
 endif
 
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3932d2e..1dfaa3c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -43,5 +43,10 @@
 obj-$(CONFIG_REGULATOR_AB8500)	+= ab8500.o
 obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
 obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
+obj-$(CONFIG_REGULATOR_PMIC8058) += pmic8058-regulator.o
+obj-$(CONFIG_REGULATOR_PMIC8901) += pmic8901-regulator.o
+obj-$(CONFIG_REGULATOR_PM8921) += pm8921-regulator.o
+obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
+obj-$(CONFIG_REGULATOR_PM8058_XO) += pm8058-xo.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d3e3879..d4d055d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -24,6 +24,9 @@
 #include <linux/mutex.h>
 #include <linux/suspend.h>
 #include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
@@ -47,6 +50,7 @@
 static LIST_HEAD(regulator_map_list);
 static bool has_full_constraints;
 static bool board_wants_dummy_regulator;
+static int suppress_info_printing;
 
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *debugfs_root;
@@ -75,6 +79,7 @@
 	int uA_load;
 	int min_uV;
 	int max_uV;
+	int enabled;
 	char *supply_name;
 	struct device_attribute dev_attr;
 	struct regulator_dev *rdev;
@@ -138,6 +143,15 @@
 		return -EPERM;
 	}
 
+	/* check if requested voltage range actually overlaps the constraints */
+	if (*max_uV < rdev->constraints->min_uV ||
+	    *min_uV > rdev->constraints->max_uV) {
+		rdev_err(rdev, "requested voltage range [%d, %d] does not fit "
+			"within constraints: [%d, %d]\n", *min_uV, *max_uV,
+			rdev->constraints->min_uV, rdev->constraints->max_uV);
+		return -EINVAL;
+	}
+
 	if (*max_uV > rdev->constraints->max_uV)
 		*max_uV = rdev->constraints->max_uV;
 	if (*min_uV < rdev->constraints->min_uV)
@@ -587,13 +601,87 @@
 	.dev_attrs = regulator_dev_attrs,
 };
 
+static int regulator_check_voltage_update(struct regulator_dev *rdev)
+{
+	if (!rdev->constraints)
+		return -ENODEV;
+	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
+		return -EPERM;
+	if (!rdev->desc->ops->set_voltage)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int update_voltage(struct regulator *regulator, int min_uV, int max_uV)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	struct regulator *sibling;
+	int ret = 0;
+
+	list_for_each_entry(sibling, &rdev->consumer_list, list) {
+		if (regulator == sibling || !sibling->enabled)
+			continue;
+		if (max_uV < sibling->min_uV || min_uV > sibling->max_uV) {
+			printk(KERN_ERR "%s: requested voltage range [%d, %d] "
+				"for %s does not fit within previously voted "
+				"range: [%d, %d]\n",
+				__func__, min_uV, max_uV,
+				rdev_get_name(regulator->rdev),
+				sibling->min_uV,
+				sibling->max_uV);
+			ret = -EINVAL;
+			goto out;
+		}
+		if (sibling->max_uV < max_uV)
+			max_uV = sibling->max_uV;
+		if (sibling->min_uV > min_uV)
+			min_uV = sibling->min_uV;
+	}
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+	if (!ret)
+		goto out;
+
+	_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
+
+out:
+	return ret;
+}
+
+static int update_voltage_prev(struct regulator_dev *rdev)
+{
+	int ret, min_uV = INT_MIN, max_uV = INT_MAX;
+	struct regulator *consumer;
+
+	list_for_each_entry(consumer, &rdev->consumer_list, list) {
+		if (!consumer->enabled)
+			continue;
+		if (consumer->max_uV < max_uV)
+			max_uV = consumer->max_uV;
+		if (consumer->min_uV > min_uV)
+			min_uV = consumer->min_uV;
+	}
+
+	if (min_uV == INT_MIN)
+		return 0;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+	if (!ret)
+		return ret;
+
+	_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
+
+	return ret;
+}
+
 /* Calculate the new optimum regulator operating mode based on the new total
  * consumer load. All locks held by caller */
 static void drms_uA_update(struct regulator_dev *rdev)
 {
 	struct regulator *sibling;
 	int current_uA = 0, output_uV, input_uV, err;
-	unsigned int mode;
+	unsigned int regulator_curr_mode, mode;
 
 	err = regulator_check_drms(rdev);
 	if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
@@ -626,6 +714,14 @@
 
 	/* check the new mode is allowed */
 	err = regulator_mode_constrain(rdev, &mode);
+	/* return if the same mode is requested */
+	if (rdev->desc->ops->get_mode) {
+		regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
+		if (regulator_curr_mode == mode)
+			return;
+	} else
+		return;
+
 	if (err == 0)
 		rdev->desc->ops->set_mode(rdev, mode);
 }
@@ -914,7 +1010,8 @@
 		}
 	}
 
-	print_constraints(rdev);
+	if (!suppress_info_printing)
+		print_constraints(rdev);
 out:
 	return ret;
 }
@@ -1389,7 +1486,33 @@
 	int ret = 0;
 
 	mutex_lock(&rdev->mutex);
+
+	if (!regulator_check_voltage_update(rdev)) {
+		if (regulator->min_uV < rdev->constraints->min_uV ||
+		    regulator->max_uV > rdev->constraints->max_uV) {
+			rdev_err(rdev, "invalid input - constraint: [%d, %d], "
+			       "set point: [%d, %d]\n",
+			       rdev->constraints->min_uV,
+			       rdev->constraints->max_uV,
+			       regulator->min_uV,
+			       regulator->max_uV);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ret = update_voltage(regulator, regulator->min_uV,
+				regulator->max_uV);
+		if (ret)
+			goto out;
+	}
+
 	ret = _regulator_enable(rdev);
+	if (ret)
+		goto out;
+
+	regulator->enabled++;
+
+out:
 	mutex_unlock(&rdev->mutex);
 	return ret;
 }
@@ -1463,6 +1586,15 @@
 
 	mutex_lock(&rdev->mutex);
 	ret = _regulator_disable(rdev, &supply_rdev);
+	if (ret)
+		goto out;
+
+	regulator->enabled--;
+
+	if (!regulator_check_voltage_update(rdev))
+		update_voltage_prev(rdev);
+
+out:
 	mutex_unlock(&rdev->mutex);
 
 	/* decrease our supplies ref count and disable if required */
@@ -1770,15 +1902,16 @@
 	ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
 	if (ret < 0)
 		goto out;
+
+	if (regulator->enabled) {
+		ret = update_voltage(regulator, min_uV, max_uV);
+		if (ret)
+			goto out;
+	}
+
 	regulator->min_uV = min_uV;
 	regulator->max_uV = max_uV;
 
-	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
-	if (ret < 0)
-		goto out;
-
-	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
-
 out:
 	mutex_unlock(&rdev->mutex);
 	return ret;
@@ -2514,22 +2647,356 @@
 	return status;
 }
 
+#ifdef CONFIG_DEBUG_FS
+
+#define MAX_DEBUG_BUF_LEN 50
+
+static DEFINE_MUTEX(debug_buf_mutex);
+static char debug_buf[MAX_DEBUG_BUF_LEN];
+
+static int reg_debug_enable_set(void *data, u64 val)
+{
+	int err_info;
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	if (val)
+		err_info = regulator_enable(data);
+	else
+		err_info = regulator_disable(data);
+
+	return err_info;
+}
+
+static int reg_debug_enable_get(void *data, u64 *val)
+{
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	*val = regulator_is_enabled(data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reg_enable_fops, reg_debug_enable_get,
+			reg_debug_enable_set, "%llu\n");
+
+static int reg_debug_fdisable_set(void *data, u64 val)
+{
+	int err_info;
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	if (val > 0)
+		err_info = regulator_force_disable(data);
+	else
+		err_info = 0;
+
+	return err_info;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reg_fdisable_fops, reg_debug_enable_get,
+			reg_debug_fdisable_set, "%llu\n");
+
+static ssize_t reg_debug_volt_set(struct file *file, const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	int err_info, filled;
+	int min, max = -1;
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(file));
+		return -ENOMEM;
+	}
+
+	if (count < MAX_DEBUG_BUF_LEN) {
+		mutex_lock(&debug_buf_mutex);
+
+		if (copy_from_user(debug_buf, (void __user *) buf, count))
+			return -EFAULT;
+
+		debug_buf[count] = '\0';
+		filled = sscanf(debug_buf, "%d %d", &min, &max);
+
+		mutex_unlock(&debug_buf_mutex);
+		/* check that user entered two numbers */
+		if (filled < 2 || min < 0 || max < min) {
+			pr_info("Error, correct format: 'echo \"min max\""
+				" > voltage");
+			return -ENOMEM;
+		} else {
+			err_info = regulator_set_voltage(file->private_data,
+							min, max);
+		}
+	} else {
+		pr_err("Error-Input voltage pair"
+				" string exceeds maximum buffer length");
+
+		return -ENOMEM;
+	}
+
+	return count;
+}
+
+static ssize_t reg_debug_volt_get(struct file *file, char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	int voltage, output, rc;
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(file));
+		return -ENOMEM;
+	}
+
+	voltage = regulator_get_voltage(file->private_data);
+	mutex_lock(&debug_buf_mutex);
+
+	output = snprintf(debug_buf, MAX_DEBUG_BUF_LEN-1, "%d\n", voltage);
+	rc = simple_read_from_buffer((void __user *) buf, output, ppos,
+					(void *) debug_buf, output);
+
+	mutex_unlock(&debug_buf_mutex);
+
+	return rc;
+}
+
+static int reg_debug_volt_open(struct inode *inode, struct file *file)
+{
+	if (IS_ERR(file) || file == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(file));
+		return -ENOMEM;
+	}
+
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations reg_volt_fops = {
+	.write	= reg_debug_volt_set,
+	.open   = reg_debug_volt_open,
+	.read	= reg_debug_volt_get,
+};
+
+static int reg_debug_mode_set(void *data, u64 val)
+{
+	int err_info;
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	err_info = regulator_set_mode(data, (unsigned int)val);
+
+	return err_info;
+}
+
+static int reg_debug_mode_get(void *data, u64 *val)
+{
+	int err_info;
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	err_info = regulator_get_mode(data);
+
+	if (err_info < 0) {
+		pr_err("Regulator_get_mode returned an error!\n");
+		return -ENOMEM;
+	} else {
+		*val = err_info;
+		return 0;
+	}
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get,
+			reg_debug_mode_set, "%llu\n");
+
+static int reg_debug_optimum_mode_set(void *data, u64 val)
+{
+	int err_info;
+	if (IS_ERR(data) || data == NULL) {
+		pr_err("Function Input Error %ld\n", PTR_ERR(data));
+		return -ENOMEM;
+	}
+
+	err_info = regulator_set_optimum_mode(data, (unsigned int)val);
+
+	if (err_info < 0) {
+		pr_err("Regulator_set_optimum_mode returned an error!\n");
+		return err_info;
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reg_optimum_mode_fops, reg_debug_mode_get,
+			reg_debug_optimum_mode_set, "%llu\n");
+
+static int reg_debug_consumers_show(struct seq_file *m, void *v)
+{
+	struct regulator_dev *rdev = m->private;
+	struct regulator *reg;
+	char *supply_name;
+
+	if (!rdev) {
+		pr_err("regulator device missing");
+		return -EINVAL;
+	}
+
+	mutex_lock(&rdev->mutex);
+
+	/* Print a header if there are consumers. */
+	if (rdev->open_count)
+		seq_printf(m, "Device-Supply                    "
+			"EN    Min_uV   Max_uV  load_uA\n");
+
+	list_for_each_entry(reg, &rdev->consumer_list, list) {
+		if (reg->supply_name)
+			supply_name = reg->supply_name;
+		else
+			supply_name = "(null)-(null)";
+
+		seq_printf(m, "%-32s %c   %8d %8d %8d\n", supply_name,
+			(reg->enabled ? 'Y' : 'N'), reg->min_uV, reg->max_uV,
+			reg->uA_load);
+	}
+
+	mutex_unlock(&rdev->mutex);
+
+	return 0;
+}
+
+static int reg_debug_consumers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, reg_debug_consumers_show, inode->i_private);
+}
+
+static const struct file_operations reg_consumers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= reg_debug_consumers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static void rdev_init_debugfs(struct regulator_dev *rdev)
 {
-#ifdef CONFIG_DEBUG_FS
+	struct dentry *err_ptr = NULL;
+	struct regulator *reg;
+	struct regulator_ops *reg_ops;
+	mode_t mode;
+
+	if (IS_ERR(rdev) || rdev == NULL ||
+		IS_ERR(debugfs_root) || debugfs_root == NULL) {
+		pr_err("Error-Bad Function Input\n");
+		goto error;
+	}
+
 	rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
 	if (IS_ERR(rdev->debugfs) || !rdev->debugfs) {
 		rdev_warn(rdev, "Failed to create debugfs directory\n");
 		rdev->debugfs = NULL;
-		return;
+		goto error;
 	}
 
 	debugfs_create_u32("use_count", 0444, rdev->debugfs,
 			   &rdev->use_count);
 	debugfs_create_u32("open_count", 0444, rdev->debugfs,
 			   &rdev->open_count);
-#endif
+	debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
+			    &reg_consumers_fops);
+
+	reg = regulator_get(NULL, rdev->desc->name);
+	if (IS_ERR(reg) || reg == NULL) {
+		pr_err("Error-Bad Function Input\n");
+		goto error;
+	}
+
+	reg_ops = rdev->desc->ops;
+	mode = S_IRUGO | S_IWUSR;
+	/* Enabled File */
+	if (mode)
+		err_ptr = debugfs_create_file("enable", mode, rdev->debugfs,
+						reg, &reg_enable_fops);
+	if (IS_ERR(err_ptr)) {
+		pr_err("Error-Could not create enable file\n");
+		debugfs_remove_recursive(rdev->debugfs);
+		goto error;
+	}
+
+	mode = 0;
+	/* Force-Disable File */
+	if (reg_ops->is_enabled)
+		mode |= S_IRUGO;
+	if (reg_ops->enable || reg_ops->disable)
+		mode |= S_IWUSR;
+	if (mode)
+		err_ptr = debugfs_create_file("force_disable", mode,
+					rdev->debugfs, reg, &reg_fdisable_fops);
+	if (IS_ERR(err_ptr)) {
+		pr_err("Error-Could not create force_disable file\n");
+		debugfs_remove_recursive(rdev->debugfs);
+		goto error;
+	}
+
+	mode = 0;
+	/* Voltage File */
+	if (reg_ops->get_voltage)
+		mode |= S_IRUGO;
+	if (reg_ops->set_voltage)
+		mode |= S_IWUSR;
+	if (mode)
+		err_ptr = debugfs_create_file("voltage", mode, rdev->debugfs,
+						reg, &reg_volt_fops);
+	if (IS_ERR(err_ptr)) {
+		pr_err("Error-Could not create voltage file\n");
+		debugfs_remove_recursive(rdev->debugfs);
+		goto error;
+	}
+
+	mode = 0;
+	/* Mode File */
+	if (reg_ops->get_mode)
+		mode |= S_IRUGO;
+	if (reg_ops->set_mode)
+		mode |= S_IWUSR;
+	if (mode)
+		err_ptr = debugfs_create_file("mode", mode, rdev->debugfs,
+						reg, &reg_mode_fops);
+	if (IS_ERR(err_ptr)) {
+		pr_err("Error-Could not create mode file\n");
+		debugfs_remove_recursive(rdev->debugfs);
+		goto error;
+	}
+
+	mode = 0;
+	/* Optimum Mode File */
+	if (reg_ops->get_mode)
+		mode |= S_IRUGO;
+	if (reg_ops->set_mode)
+		mode |= S_IWUSR;
+	if (mode)
+		err_ptr = debugfs_create_file("optimum_mode", mode,
+				rdev->debugfs, reg, &reg_optimum_mode_fops);
+	if (IS_ERR(err_ptr)) {
+		pr_err("Error-Could not create optimum_mode file\n");
+		debugfs_remove_recursive(rdev->debugfs);
+		goto error;
+	}
+
+error:
+	return;
 }
+#else
+static inline void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+	return;
+}
+#endif
 
 /**
  * regulator_register - register regulator
@@ -2663,9 +3130,9 @@
 
 	list_add(&rdev->list, &regulator_list);
 
-	rdev_init_debugfs(rdev);
 out:
 	mutex_unlock(&regulator_list_mutex);
+	rdev_init_debugfs(rdev);
 	return rdev;
 
 unset_supplies:
@@ -2819,6 +3286,22 @@
 EXPORT_SYMBOL_GPL(regulator_use_dummy_regulator);
 
 /**
+ * regulator_suppress_info_printing - disable printing of info messages
+ *
+ * The regulator framework calls print_constraints() when a regulator is
+ * registered.  It also prints a disable message for each unused regulator in
+ * regulator_init_complete().
+ *
+ * Calling this function ensures that such messages do not end up in the
+ * log.
+ */
+void regulator_suppress_info_printing(void)
+{
+	suppress_info_printing = 1;
+}
+EXPORT_SYMBOL_GPL(regulator_suppress_info_printing);
+
+/**
  * rdev_get_drvdata - get rdev regulator driver data
  * @rdev: regulator
  *
@@ -2936,7 +3419,8 @@
 		if (has_full_constraints) {
 			/* We log since this may kill the system if it
 			 * goes wrong. */
-			rdev_info(rdev, "disabling\n");
+			if (!suppress_info_printing)
+				rdev_info(rdev, "disabling\n");
 			ret = ops->disable(rdev);
 			if (ret != 0) {
 				rdev_err(rdev, "couldn't disable: %d\n", ret);
@@ -2947,7 +3431,9 @@
 			 * so warn even if we aren't going to do
 			 * anything here.
 			 */
-			rdev_warn(rdev, "incomplete constraints, leaving on\n");
+			if (!suppress_info_printing)
+				rdev_warn(rdev, "incomplete constraints, "
+						"leaving on\n");
 		}
 
 unlock:
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
new file mode 100644
index 0000000..b91e32f
--- /dev/null
+++ b/drivers/regulator/gpio-regulator.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/gpio-regulator.h>
+
+struct gpio_vreg {
+	struct regulator_desc	desc;
+	struct regulator_dev	*rdev;
+	char			*gpio_label;
+	char			*name;
+	unsigned		gpio;
+	int			active_low;
+	bool			gpio_requested;
+};
+
+static int gpio_vreg_request_gpio(struct gpio_vreg *vreg)
+{
+	int rc = 0;
+
+	/* Request GPIO now if it hasn't been requested before. */
+	if (!vreg->gpio_requested) {
+		rc = gpio_request(vreg->gpio, vreg->gpio_label);
+		if (rc < 0)
+			pr_err("failed to request gpio %u (%s), rc=%d\n",
+				vreg->gpio, vreg->gpio_label, rc);
+		else
+			vreg->gpio_requested = true;
+
+		rc = gpio_sysfs_set_active_low(vreg->gpio, vreg->active_low);
+		if (rc < 0)
+			pr_err("active_low=%d failed for gpio %u, rc=%d\n",
+				vreg->active_low, vreg->gpio, rc);
+	}
+
+	return rc;
+}
+
+static int gpio_vreg_is_enabled(struct regulator_dev *rdev)
+{
+	struct gpio_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = gpio_vreg_request_gpio(vreg);
+	if (rc < 0)
+		return rc;
+
+	return (gpio_get_value_cansleep(vreg->gpio) ? 1 : 0) ^ vreg->active_low;
+}
+
+static int gpio_vreg_enable(struct regulator_dev *rdev)
+{
+	struct gpio_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = gpio_vreg_request_gpio(vreg);
+	if (rc < 0)
+		return rc;
+
+	return gpio_direction_output(vreg->gpio, !vreg->active_low);
+}
+
+static int gpio_vreg_disable(struct regulator_dev *rdev)
+{
+	struct gpio_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = gpio_vreg_request_gpio(vreg);
+	if (rc < 0)
+		return rc;
+
+	return gpio_direction_output(vreg->gpio, vreg->active_low);
+}
+
+static struct regulator_ops gpio_vreg_ops = {
+	.enable		= gpio_vreg_enable,
+	.disable	= gpio_vreg_disable,
+	.is_enabled	= gpio_vreg_is_enabled,
+};
+
+static int __devinit gpio_vreg_probe(struct platform_device *pdev)
+{
+	const struct gpio_regulator_platform_data *pdata;
+	struct gpio_vreg *vreg;
+	int rc = 0;
+
+	pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		pr_err("platform data required.\n");
+		return -EINVAL;
+	}
+
+	if (!pdata->gpio_label) {
+		pr_err("gpio_label required.\n");
+		return -EINVAL;
+	}
+
+	if (!pdata->regulator_name) {
+		pr_err("regulator_name required.\n");
+		return -EINVAL;
+	}
+
+	vreg = kzalloc(sizeof(struct gpio_vreg), GFP_KERNEL);
+	if (!vreg) {
+		pr_err("kzalloc failed.\n");
+		return -ENOMEM;
+	}
+
+	vreg->name = kstrdup(pdata->regulator_name, GFP_KERNEL);
+	if (!vreg->name) {
+		pr_err("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto free_vreg;
+	}
+
+	vreg->gpio_label = kstrdup(pdata->gpio_label, GFP_KERNEL);
+	if (!vreg->gpio_label) {
+		pr_err("kzalloc failed.\n");
+		rc = -ENOMEM;
+		goto free_name;
+	}
+
+	vreg->gpio		= pdata->gpio;
+	vreg->active_low	= (pdata->active_low ? 1 : 0);
+	vreg->gpio_requested	= false;
+
+	vreg->desc.name		= vreg->name;
+	vreg->desc.id		= pdev->id;
+	vreg->desc.ops		= &gpio_vreg_ops;
+	vreg->desc.type		= REGULATOR_VOLTAGE;
+	vreg->desc.owner	= THIS_MODULE;
+
+	vreg->rdev = regulator_register(&vreg->desc, &pdev->dev,
+					&pdata->init_data, vreg);
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		pr_err("%s: regulator_register failed, rc=%d.\n", vreg->name,
+			rc);
+		goto free_gpio_label;
+	}
+
+	platform_set_drvdata(pdev, vreg);
+
+	pr_info("id=%d, name=%s, gpio=%u, gpio_label=%s\n", pdev->id,
+		vreg->name, vreg->gpio, vreg->gpio_label);
+
+	return rc;
+
+free_gpio_label:
+	kfree(vreg->gpio_label);
+free_name:
+	kfree(vreg->name);
+free_vreg:
+	kfree(vreg);
+
+	return rc;
+}
+
+static int __devexit gpio_vreg_remove(struct platform_device *pdev)
+{
+	struct gpio_vreg *vreg = platform_get_drvdata(pdev);
+
+	if (vreg->gpio_requested)
+		gpio_free(vreg->gpio);
+
+	regulator_unregister(vreg->rdev);
+	kfree(vreg->name);
+	kfree(vreg->gpio_label);
+	kfree(vreg);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver gpio_vreg_driver = {
+	.probe = gpio_vreg_probe,
+	.remove = __devexit_p(gpio_vreg_remove),
+	.driver = {
+		.name = GPIO_REGULATOR_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init gpio_vreg_init(void)
+{
+	return platform_driver_register(&gpio_vreg_driver);
+}
+
+static void __exit gpio_vreg_exit(void)
+{
+	platform_driver_unregister(&gpio_vreg_driver);
+}
+
+postcore_initcall(gpio_vreg_init);
+module_exit(gpio_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("GPIO regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" GPIO_REGULATOR_DEV_NAME);
diff --git a/drivers/regulator/pm8058-xo.c b/drivers/regulator/pm8058-xo.c
new file mode 100644
index 0000000..ac65395
--- /dev/null
+++ b/drivers/regulator/pm8058-xo.c
@@ -0,0 +1,215 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/pm8058-xo.h>
+
+/* XO buffer masks and values */
+
+#define XO_PULLDOWN_MASK	0x08
+#define XO_PULLDOWN_ENABLE	0x08
+#define XO_PULLDOWN_DISABLE	0x00
+
+#define XO_BUFFER_MASK		0x04
+#define XO_BUFFER_ENABLE	0x04
+#define XO_BUFFER_DISABLE	0x00
+
+#define XO_MODE_MASK		0x01
+#define XO_MODE_MANUAL		0x00
+
+#define XO_ENABLE_MASK		(XO_MODE_MASK | XO_BUFFER_MASK)
+#define XO_ENABLE		(XO_MODE_MANUAL | XO_BUFFER_ENABLE)
+#define XO_DISABLE		(XO_MODE_MANUAL | XO_BUFFER_DISABLE)
+
+struct pm8058_xo_buffer {
+	struct pm8058_xo_pdata		*pdata;
+	struct regulator_dev		*rdev;
+	u16				ctrl_addr;
+	u8				ctrl_reg;
+};
+
+#define XO_BUFFER(_id, _ctrl_addr) \
+	[PM8058_XO_ID_##_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+	}
+
+static struct pm8058_xo_buffer pm8058_xo_buffer[] = {
+	XO_BUFFER(A0, 0x185),
+	XO_BUFFER(A1, 0x186),
+};
+
+static int pm8058_xo_buffer_write(struct pm8058_chip *chip,
+		u16 addr, u8 val, u8 mask, u8 *reg_save)
+{
+	u8	reg;
+	int	rc = 0;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save)
+		rc = pm8058_write(chip, addr, &reg, 1);
+
+	if (rc)
+		pr_err("FAIL: pm8058_write: rc=%d\n", rc);
+	else
+		*reg_save = reg;
+	return rc;
+}
+
+static int pm8058_xo_buffer_enable(struct regulator_dev *dev)
+{
+	struct pm8058_xo_buffer *xo = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int rc;
+
+	rc = pm8058_xo_buffer_write(chip, xo->ctrl_addr, XO_ENABLE,
+				    XO_ENABLE_MASK, &xo->ctrl_reg);
+	if (rc)
+		pr_err("FAIL: pm8058_xo_buffer_write: rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8058_xo_buffer_is_enabled(struct regulator_dev *dev)
+{
+	struct pm8058_xo_buffer *xo = rdev_get_drvdata(dev);
+
+	if (xo->ctrl_reg & XO_BUFFER_ENABLE)
+		return 1;
+	else
+		return 0;
+}
+
+static int pm8058_xo_buffer_disable(struct regulator_dev *dev)
+{
+	struct pm8058_xo_buffer *xo = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int rc;
+
+	rc = pm8058_xo_buffer_write(chip, xo->ctrl_addr, XO_DISABLE,
+				    XO_ENABLE_MASK, &xo->ctrl_reg);
+	if (rc)
+		pr_err("FAIL: pm8058_xo_buffer_write: rc=%d\n", rc);
+
+	return rc;
+}
+
+static struct regulator_ops pm8058_xo_ops = {
+	.enable = pm8058_xo_buffer_enable,
+	.disable = pm8058_xo_buffer_disable,
+	.is_enabled = pm8058_xo_buffer_is_enabled,
+};
+
+#define VREG_DESCRIP(_id, _name, _ops) \
+	[_id] = { \
+		.id = _id, \
+		.name = _name, \
+		.ops = _ops, \
+		.type = REGULATOR_VOLTAGE, \
+		.owner = THIS_MODULE, \
+	}
+
+static struct regulator_desc pm8058_xo_buffer_desc[] = {
+	VREG_DESCRIP(PM8058_XO_ID_A0, "8058_xo_a0", &pm8058_xo_ops),
+	VREG_DESCRIP(PM8058_XO_ID_A1, "8058_xo_a1", &pm8058_xo_ops),
+};
+
+static int pm8058_init_xo_buffer(struct pm8058_chip *chip,
+				 struct pm8058_xo_buffer *xo)
+{
+	int	rc;
+
+	/* Save the current control register state */
+	rc = pm8058_read(chip, xo->ctrl_addr, &xo->ctrl_reg, 1);
+
+	if (rc)
+		pr_err("FAIL: pm8058_read: rc=%d\n", rc);
+	return rc;
+}
+
+static int __devinit pm8058_xo_buffer_probe(struct platform_device *pdev)
+{
+	struct regulator_desc *rdesc;
+	struct pm8058_chip *chip;
+	struct pm8058_xo_buffer *xo;
+	int rc = 0;
+
+	if (pdev == NULL)
+		return -EINVAL;
+
+	if (pdev->id >= 0 && pdev->id < PM8058_XO_ID_MAX) {
+		chip = platform_get_drvdata(pdev);
+		rdesc = &pm8058_xo_buffer_desc[pdev->id];
+		xo = &pm8058_xo_buffer[pdev->id];
+		xo->pdata = pdev->dev.platform_data;
+
+		rc = pm8058_init_xo_buffer(chip, xo);
+		if (rc)
+			goto bail;
+
+		xo->rdev = regulator_register(rdesc, &pdev->dev,
+					&xo->pdata->init_data, xo);
+		if (IS_ERR(xo->rdev)) {
+			rc = PTR_ERR(xo->rdev);
+			pr_err("FAIL: regulator_register(%s): rc=%d\n",
+				pm8058_xo_buffer_desc[pdev->id].name, rc);
+		}
+	} else {
+		rc = -ENODEV;
+	}
+
+bail:
+	if (rc)
+		pr_err("Error: xo-id=%d, rc=%d\n", pdev->id, rc);
+
+	return rc;
+}
+
+static int __devexit pm8058_xo_buffer_remove(struct platform_device *pdev)
+{
+	regulator_unregister(pm8058_xo_buffer[pdev->id].rdev);
+	return 0;
+}
+
+static struct platform_driver pm8058_xo_buffer_driver = {
+	.probe = pm8058_xo_buffer_probe,
+	.remove = __devexit_p(pm8058_xo_buffer_remove),
+	.driver = {
+		.name = PM8058_XO_BUFFER_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_xo_buffer_init(void)
+{
+	return platform_driver_register(&pm8058_xo_buffer_driver);
+}
+
+static void __exit pm8058_xo_buffer_exit(void)
+{
+	platform_driver_unregister(&pm8058_xo_buffer_driver);
+}
+
+subsys_initcall(pm8058_xo_buffer_init);
+module_exit(pm8058_xo_buffer_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 XO buffer driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8058_XO_BUFFER_DEV_NAME);
diff --git a/drivers/regulator/pm8921-regulator.c b/drivers/regulator/pm8921-regulator.c
new file mode 100644
index 0000000..a2246eb
--- /dev/null
+++ b/drivers/regulator/pm8921-regulator.c
@@ -0,0 +1,3274 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/pm8921-regulator.h>
+#include <linux/mfd/pm8xxx/core.h>
+
+/* Debug Flag Definitions */
+enum {
+	PM8921_VREG_DEBUG_REQUEST	= BIT(0),
+	PM8921_VREG_DEBUG_DUPLICATE	= BIT(1),
+	PM8921_VREG_DEBUG_INIT		= BIT(2),
+	PM8921_VREG_DEBUG_WRITES	= BIT(3), /* SSBI writes */
+};
+
+static int pm8921_vreg_debug_mask;
+module_param_named(
+	debug_mask, pm8921_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define REGULATOR_TYPE_PLDO		0
+#define REGULATOR_TYPE_NLDO		1
+#define REGULATOR_TYPE_NLDO1200		2
+#define REGULATOR_TYPE_SMPS		3
+#define REGULATOR_TYPE_FTSMPS		4
+#define REGULATOR_TYPE_VS		5
+#define REGULATOR_TYPE_VS300		6
+#define REGULATOR_TYPE_NCP		7
+
+/* Common Masks */
+#define REGULATOR_ENABLE_MASK		0x80
+#define REGULATOR_ENABLE		0x80
+#define REGULATOR_DISABLE		0x00
+
+#define REGULATOR_BANK_MASK		0xF0
+#define REGULATOR_BANK_SEL(n)		((n) << 4)
+#define REGULATOR_BANK_WRITE		0x80
+
+#define LDO_TEST_BANKS			7
+#define NLDO1200_TEST_BANKS		5
+#define SMPS_TEST_BANKS			8
+#define REGULATOR_TEST_BANKS_MAX	SMPS_TEST_BANKS
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN			1
+
+/* LDO masks and values */
+
+/* CTRL register */
+#define LDO_ENABLE_MASK			0x80
+#define LDO_DISABLE			0x00
+#define LDO_ENABLE			0x80
+#define LDO_PULL_DOWN_ENABLE_MASK	0x40
+#define LDO_PULL_DOWN_ENABLE		0x40
+
+#define LDO_CTRL_PM_MASK		0x20
+#define LDO_CTRL_PM_HPM			0x00
+#define LDO_CTRL_PM_LPM			0x20
+
+#define LDO_CTRL_VPROG_MASK		0x1F
+
+/* TEST register bank 0 */
+#define LDO_TEST_LPM_MASK		0x40
+#define LDO_TEST_LPM_SEL_CTRL		0x00
+#define LDO_TEST_LPM_SEL_TCXO		0x40
+
+/* TEST register bank 2 */
+#define LDO_TEST_VPROG_UPDATE_MASK	0x08
+#define LDO_TEST_RANGE_SEL_MASK		0x04
+#define LDO_TEST_FINE_STEP_MASK		0x02
+#define LDO_TEST_FINE_STEP_SHIFT	1
+
+/* TEST register bank 4 */
+#define LDO_TEST_RANGE_EXT_MASK		0x01
+
+/* TEST register bank 5 */
+#define LDO_TEST_PIN_CTRL_MASK		0x0F
+#define LDO_TEST_PIN_CTRL_EN3		0x08
+#define LDO_TEST_PIN_CTRL_EN2		0x04
+#define LDO_TEST_PIN_CTRL_EN1		0x02
+#define LDO_TEST_PIN_CTRL_EN0		0x01
+
+/* TEST register bank 6 */
+#define LDO_TEST_PIN_CTRL_LPM_MASK	0x0F
+
+
+/*
+ * If a given voltage could be output by two ranges, then the preferred one must
+ * be determined by the range limits.  Specified voltage ranges should must
+ * not overlap.
+ *
+ * Allowable voltage ranges:
+ */
+#define PLDO_LOW_UV_MIN			750000
+#define PLDO_LOW_UV_MAX			1487500
+#define PLDO_LOW_UV_FINE_STEP		12500
+
+#define PLDO_NORM_UV_MIN		1500000
+#define PLDO_NORM_UV_MAX		3075000
+#define PLDO_NORM_UV_FINE_STEP		25000
+
+#define PLDO_HIGH_UV_MIN		1750000
+#define PLDO_HIGH_UV_SET_POINT_MIN	3100000
+#define PLDO_HIGH_UV_MAX		4900000
+#define PLDO_HIGH_UV_FINE_STEP		50000
+
+#define PLDO_LOW_SET_POINTS		((PLDO_LOW_UV_MAX - PLDO_LOW_UV_MIN) \
+						/ PLDO_LOW_UV_FINE_STEP + 1)
+#define PLDO_NORM_SET_POINTS		((PLDO_NORM_UV_MAX - PLDO_NORM_UV_MIN) \
+						/ PLDO_NORM_UV_FINE_STEP + 1)
+#define PLDO_HIGH_SET_POINTS		((PLDO_HIGH_UV_MAX \
+						- PLDO_HIGH_UV_SET_POINT_MIN) \
+					/ PLDO_HIGH_UV_FINE_STEP + 1)
+#define PLDO_SET_POINTS			(PLDO_LOW_SET_POINTS \
+						+ PLDO_NORM_SET_POINTS \
+						+ PLDO_HIGH_SET_POINTS)
+
+#define NLDO_UV_MIN			750000
+#define NLDO_UV_MAX			1537500
+#define NLDO_UV_FINE_STEP		12500
+
+#define NLDO_SET_POINTS			((NLDO_UV_MAX - NLDO_UV_MIN) \
+						/ NLDO_UV_FINE_STEP + 1)
+
+/* NLDO1200 masks and values */
+
+/* CTRL register */
+#define NLDO1200_ENABLE_MASK		0x80
+#define NLDO1200_DISABLE		0x00
+#define NLDO1200_ENABLE			0x80
+
+/* Legacy mode */
+#define NLDO1200_LEGACY_PM_MASK		0x20
+#define NLDO1200_LEGACY_PM_HPM		0x00
+#define NLDO1200_LEGACY_PM_LPM		0x20
+
+/* Advanced mode */
+#define NLDO1200_CTRL_RANGE_MASK	0x40
+#define NLDO1200_CTRL_RANGE_HIGH	0x00
+#define NLDO1200_CTRL_RANGE_LOW		0x40
+#define NLDO1200_CTRL_VPROG_MASK	0x3F
+
+#define NLDO1200_LOW_UV_MIN		375000
+#define NLDO1200_LOW_UV_MAX		743750
+#define NLDO1200_LOW_UV_STEP		6250
+
+#define NLDO1200_HIGH_UV_MIN		750000
+#define NLDO1200_HIGH_UV_MAX		1537500
+#define NLDO1200_HIGH_UV_STEP		12500
+
+#define NLDO1200_LOW_SET_POINTS		((NLDO1200_LOW_UV_MAX \
+						- NLDO1200_LOW_UV_MIN) \
+					/ NLDO1200_LOW_UV_STEP + 1)
+#define NLDO1200_HIGH_SET_POINTS	((NLDO1200_HIGH_UV_MAX \
+						- NLDO1200_HIGH_UV_MIN) \
+					/ NLDO1200_HIGH_UV_STEP + 1)
+#define NLDO1200_SET_POINTS		(NLDO1200_LOW_SET_POINTS \
+						+ NLDO1200_HIGH_SET_POINTS)
+
+/* TEST register bank 0 */
+#define NLDO1200_TEST_LPM_MASK		0x04
+#define NLDO1200_TEST_LPM_SEL_CTRL	0x00
+#define NLDO1200_TEST_LPM_SEL_TCXO	0x04
+
+/* TEST register bank 1 */
+#define NLDO1200_PULL_DOWN_ENABLE_MASK	0x02
+#define NLDO1200_PULL_DOWN_ENABLE	0x02
+
+/* TEST register bank 2 */
+#define NLDO1200_ADVANCED_MODE_MASK	0x08
+#define NLDO1200_ADVANCED_MODE		0x00
+#define NLDO1200_LEGACY_MODE		0x08
+
+/* Advanced mode power mode control */
+#define NLDO1200_ADVANCED_PM_MASK	0x02
+#define NLDO1200_ADVANCED_PM_HPM	0x00
+#define NLDO1200_ADVANCED_PM_LPM	0x02
+
+#define NLDO1200_IN_ADVANCED_MODE(vreg) \
+	((vreg->test_reg[2] & NLDO1200_ADVANCED_MODE_MASK) \
+	 == NLDO1200_ADVANCED_MODE)
+
+/* SMPS masks and values */
+
+/* CTRL register */
+
+/* Legacy mode */
+#define SMPS_LEGACY_ENABLE_MASK		0x80
+#define SMPS_LEGACY_DISABLE		0x00
+#define SMPS_LEGACY_ENABLE		0x80
+#define SMPS_LEGACY_PULL_DOWN_ENABLE	0x40
+#define SMPS_LEGACY_VREF_SEL_MASK	0x20
+#define SMPS_LEGACY_VPROG_MASK		0x1F
+
+/* Advanced mode */
+#define SMPS_ADVANCED_BAND_MASK		0xC0
+#define SMPS_ADVANCED_BAND_OFF		0x00
+#define SMPS_ADVANCED_BAND_1		0x40
+#define SMPS_ADVANCED_BAND_2		0x80
+#define SMPS_ADVANCED_BAND_3		0xC0
+#define SMPS_ADVANCED_VPROG_MASK	0x3F
+
+/* Legacy mode voltage ranges */
+#define SMPS_MODE3_UV_MIN		375000
+#define SMPS_MODE3_UV_MAX		725000
+#define SMPS_MODE3_UV_STEP		25000
+
+#define SMPS_MODE2_UV_MIN		750000
+#define SMPS_MODE2_UV_MAX		1475000
+#define SMPS_MODE2_UV_STEP		25000
+
+#define SMPS_MODE1_UV_MIN		1500000
+#define SMPS_MODE1_UV_MAX		3050000
+#define SMPS_MODE1_UV_STEP		50000
+
+#define SMPS_MODE3_SET_POINTS		((SMPS_MODE3_UV_MAX \
+						- SMPS_MODE3_UV_MIN) \
+					/ SMPS_MODE3_UV_STEP + 1)
+#define SMPS_MODE2_SET_POINTS		((SMPS_MODE2_UV_MAX \
+						- SMPS_MODE2_UV_MIN) \
+					/ SMPS_MODE2_UV_STEP + 1)
+#define SMPS_MODE1_SET_POINTS		((SMPS_MODE1_UV_MAX \
+						- SMPS_MODE1_UV_MIN) \
+					/ SMPS_MODE1_UV_STEP + 1)
+#define SMPS_LEGACY_SET_POINTS		(SMPS_MODE3_SET_POINTS \
+						+ SMPS_MODE2_SET_POINTS \
+						+ SMPS_MODE1_SET_POINTS)
+
+/* Advanced mode voltage ranges */
+#define SMPS_BAND1_UV_MIN		375000
+#define SMPS_BAND1_UV_MAX		737500
+#define SMPS_BAND1_UV_STEP		12500
+
+#define SMPS_BAND2_UV_MIN		750000
+#define SMPS_BAND2_UV_MAX		1487500
+#define SMPS_BAND2_UV_STEP		12500
+
+#define SMPS_BAND3_UV_MIN		1500000
+#define SMPS_BAND3_UV_MAX		3075000
+#define SMPS_BAND3_UV_STEP		25000
+
+#define SMPS_BAND1_SET_POINTS		((SMPS_BAND1_UV_MAX \
+						- SMPS_BAND1_UV_MIN) \
+					/ SMPS_BAND1_UV_STEP + 1)
+#define SMPS_BAND2_SET_POINTS		((SMPS_BAND2_UV_MAX \
+						- SMPS_BAND2_UV_MIN) \
+					/ SMPS_BAND2_UV_STEP + 1)
+#define SMPS_BAND3_SET_POINTS		((SMPS_BAND3_UV_MAX \
+						- SMPS_BAND3_UV_MIN) \
+					/ SMPS_BAND3_UV_STEP + 1)
+#define SMPS_ADVANCED_SET_POINTS	(SMPS_BAND1_SET_POINTS \
+						+ SMPS_BAND2_SET_POINTS \
+						+ SMPS_BAND3_SET_POINTS)
+
+/* Test2 register bank 1 */
+#define SMPS_LEGACY_VLOW_SEL_MASK	0x01
+
+/* Test2 register bank 6 */
+#define SMPS_ADVANCED_PULL_DOWN_ENABLE	0x08
+
+/* Test2 register bank 7 */
+#define SMPS_ADVANCED_MODE_MASK		0x02
+#define SMPS_ADVANCED_MODE		0x02
+#define SMPS_LEGACY_MODE		0x00
+
+#define SMPS_IN_ADVANCED_MODE(vreg) \
+	((vreg->test_reg[7] & SMPS_ADVANCED_MODE_MASK) == SMPS_ADVANCED_MODE)
+
+/* BUCK_SLEEP_CNTRL register */
+#define SMPS_PIN_CTRL_MASK		0xF0
+#define SMPS_PIN_CTRL_EN3		0x80
+#define SMPS_PIN_CTRL_EN2		0x40
+#define SMPS_PIN_CTRL_EN1		0x20
+#define SMPS_PIN_CTRL_EN0		0x10
+
+#define SMPS_PIN_CTRL_LPM_MASK		0x0F
+#define SMPS_PIN_CTRL_LPM_EN3		0x08
+#define SMPS_PIN_CTRL_LPM_EN2		0x04
+#define SMPS_PIN_CTRL_LPM_EN1		0x02
+#define SMPS_PIN_CTRL_LPM_EN0		0x01
+
+/* BUCK_CLOCK_CNTRL register */
+#define SMPS_CLK_DIVIDE2		0x40
+
+#define SMPS_CLK_CTRL_MASK		0x30
+#define SMPS_CLK_CTRL_FOLLOW_TCXO	0x00
+#define SMPS_CLK_CTRL_PWM		0x10
+#define SMPS_CLK_CTRL_PFM		0x20
+
+/* FTSMPS masks and values */
+
+/* CTRL register */
+#define FTSMPS_VCTRL_BAND_MASK		0xC0
+#define FTSMPS_VCTRL_BAND_OFF		0x00
+#define FTSMPS_VCTRL_BAND_1		0x40
+#define FTSMPS_VCTRL_BAND_2		0x80
+#define FTSMPS_VCTRL_BAND_3		0xC0
+#define FTSMPS_VCTRL_VPROG_MASK		0x3F
+
+#define FTSMPS_BAND1_UV_MIN		350000
+#define FTSMPS_BAND1_UV_MAX		650000
+/* 3 LSB's of program voltage must be 0 in band 1. */
+/* Logical step size */
+#define FTSMPS_BAND1_UV_LOG_STEP	50000
+/* Physical step size */
+#define FTSMPS_BAND1_UV_PHYS_STEP	6250
+
+#define FTSMPS_BAND2_UV_MIN		700000
+#define FTSMPS_BAND2_UV_MAX		1400000
+#define FTSMPS_BAND2_UV_STEP		12500
+
+#define FTSMPS_BAND3_UV_MIN		1400000
+#define FTSMPS_BAND3_UV_SET_POINT_MIN	1500000
+#define FTSMPS_BAND3_UV_MAX		3300000
+#define FTSMPS_BAND3_UV_STEP		50000
+
+#define FTSMPS_BAND1_SET_POINTS		((FTSMPS_BAND1_UV_MAX \
+						- FTSMPS_BAND1_UV_MIN) \
+					/ FTSMPS_BAND1_UV_LOG_STEP + 1)
+#define FTSMPS_BAND2_SET_POINTS		((FTSMPS_BAND2_UV_MAX \
+						- FTSMPS_BAND2_UV_MIN) \
+					/ FTSMPS_BAND2_UV_STEP + 1)
+#define FTSMPS_BAND3_SET_POINTS		((FTSMPS_BAND3_UV_MAX \
+					  - FTSMPS_BAND3_UV_SET_POINT_MIN) \
+					/ FTSMPS_BAND3_UV_STEP + 1)
+#define FTSMPS_SET_POINTS		(FTSMPS_BAND1_SET_POINTS \
+						+ FTSMPS_BAND2_SET_POINTS \
+						+ FTSMPS_BAND3_SET_POINTS)
+
+/* FTS_CNFG1 register bank 0 */
+#define FTSMPS_CNFG1_PM_MASK		0x0C
+#define FTSMPS_CNFG1_PM_PWM		0x00
+#define FTSMPS_CNFG1_PM_PFM		0x08
+
+/* PWR_CNFG register */
+#define FTSMPS_PULL_DOWN_ENABLE_MASK	0x40
+#define FTSMPS_PULL_DOWN_ENABLE		0x40
+
+/* VS masks and values */
+
+/* CTRL register */
+#define VS_ENABLE_MASK			0x80
+#define VS_DISABLE			0x00
+#define VS_ENABLE			0x80
+#define VS_PULL_DOWN_ENABLE_MASK	0x40
+#define VS_PULL_DOWN_ENABLE		0x40
+
+#define VS_PIN_CTRL_MASK		0x0F
+#define VS_PIN_CTRL_EN0			0x08
+#define VS_PIN_CTRL_EN1			0x04
+#define VS_PIN_CTRL_EN2			0x02
+#define VS_PIN_CTRL_EN3			0x01
+
+/* VS300 masks and values */
+
+/* CTRL register */
+#define VS300_CTRL_ENABLE_MASK		0xC0
+#define VS300_CTRL_DISABLE		0x00
+#define VS300_CTRL_ENABLE		0x40
+
+#define VS300_PULL_DOWN_ENABLE_MASK	0x20
+#define VS300_PULL_DOWN_ENABLE		0x20
+
+/* NCP masks and values */
+
+/* CTRL register */
+#define NCP_ENABLE_MASK			0x80
+#define NCP_DISABLE			0x00
+#define NCP_ENABLE			0x80
+#define NCP_VPROG_MASK			0x1F
+
+#define NCP_UV_MIN			1500000
+#define NCP_UV_MAX			3050000
+#define NCP_UV_STEP			50000
+
+#define NCP_SET_POINTS			((NCP_UV_MAX - NCP_UV_MIN) \
+						/ NCP_UV_STEP + 1)
+
+#define IS_REAL_REGULATOR(id)		((id) >= 0 && \
+					 (id) < PM8921_VREG_ID_L1_PC)
+
+struct pm8921_vreg {
+	/* Configuration data */
+	struct regulator_dev			*rdev;
+	struct regulator_dev			*rdev_pc;
+	struct device				*dev;
+	struct device				*dev_pc;
+	const char				*name;
+	struct pm8921_regulator_platform_data	pdata;
+	const int				hpm_min_load;
+	const u16				ctrl_addr;
+	const u16				test_addr;
+	const u16				clk_ctrl_addr;
+	const u16				sleep_ctrl_addr;
+	const u16				pfm_ctrl_addr;
+	const u16				pwr_cnfg_addr;
+	const u8				type;
+	/* State data */
+	struct mutex				pc_lock;
+	int					save_uV;
+	int					mode;
+	u32					write_count;
+	u32					prev_write_count;
+	bool					is_enabled;
+	bool					is_enabled_pc;
+	u8				test_reg[REGULATOR_TEST_BANKS_MAX];
+	u8					ctrl_reg;
+	u8					clk_ctrl_reg;
+	u8					sleep_ctrl_reg;
+	u8					pfm_ctrl_reg;
+	u8					pwr_cnfg_reg;
+};
+
+#define vreg_err(vreg, fmt, ...) \
+	pr_err("%s: " fmt, vreg->name, ##__VA_ARGS__)
+
+#define PLDO(_id, _ctrl_addr, _test_addr, _hpm_min_load) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_PLDO, \
+		.ctrl_addr	= _ctrl_addr, \
+		.test_addr	= _test_addr, \
+		.hpm_min_load	= PM8921_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+	}
+
+#define NLDO(_id, _ctrl_addr, _test_addr, _hpm_min_load) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_NLDO, \
+		.ctrl_addr	= _ctrl_addr, \
+		.test_addr	= _test_addr, \
+		.hpm_min_load	= PM8921_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+	}
+
+#define NLDO1200(_id, _ctrl_addr, _test_addr, _hpm_min_load) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_NLDO1200, \
+		.ctrl_addr	= _ctrl_addr, \
+		.test_addr	= _test_addr, \
+		.hpm_min_load	= PM8921_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+	}
+
+#define SMPS(_id, _ctrl_addr, _test_addr, _clk_ctrl_addr, _sleep_ctrl_addr, \
+	     _hpm_min_load) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_SMPS, \
+		.ctrl_addr	= _ctrl_addr, \
+		.test_addr	= _test_addr, \
+		.clk_ctrl_addr	= _clk_ctrl_addr, \
+		.sleep_ctrl_addr = _sleep_ctrl_addr, \
+		.hpm_min_load	= PM8921_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+	}
+
+#define FTSMPS(_id, _pwm_ctrl_addr, _fts_cnfg1_addr, _pfm_ctrl_addr, \
+	       _pwr_cnfg_addr, _hpm_min_load) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_FTSMPS, \
+		.ctrl_addr	= _pwm_ctrl_addr, \
+		.test_addr	= _fts_cnfg1_addr, \
+		.pfm_ctrl_addr = _pfm_ctrl_addr, \
+		.pwr_cnfg_addr = _pwr_cnfg_addr, \
+		.hpm_min_load	= PM8921_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+	}
+
+#define VS(_id, _ctrl_addr) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_VS, \
+		.ctrl_addr	= _ctrl_addr, \
+	}
+
+#define VS300(_id, _ctrl_addr) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_VS300, \
+		.ctrl_addr	= _ctrl_addr, \
+	}
+
+#define NCP(_id, _ctrl_addr) \
+	[PM8921_VREG_ID_##_id] = { \
+		.type		= REGULATOR_TYPE_NCP, \
+		.ctrl_addr	= _ctrl_addr, \
+	}
+
+static struct pm8921_vreg pm8921_vreg[] = {
+	/*  id   ctrl   test   hpm_min */
+	NLDO(L1,  0x0AE, 0x0AF, LDO_150),
+	NLDO(L2,  0x0B0, 0x0B1, LDO_150),
+	PLDO(L3,  0x0B2, 0x0B3, LDO_150),
+	PLDO(L4,  0x0B4, 0x0B5, LDO_50),
+	PLDO(L5,  0x0B6, 0x0B7, LDO_300),
+	PLDO(L6,  0x0B8, 0x0B9, LDO_600),
+	PLDO(L7,  0x0BA, 0x0BB, LDO_150),
+	PLDO(L8,  0x0BC, 0x0BD, LDO_300),
+	PLDO(L9,  0x0BE, 0x0BF, LDO_300),
+	PLDO(L10, 0x0C0, 0x0C1, LDO_600),
+	PLDO(L11, 0x0C2, 0x0C3, LDO_150),
+	NLDO(L12, 0x0C4, 0x0C5, LDO_150),
+	PLDO(L14, 0x0C8, 0x0C9, LDO_50),
+	PLDO(L15, 0x0CA, 0x0CB, LDO_150),
+	PLDO(L16, 0x0CC, 0x0CD, LDO_300),
+	PLDO(L17, 0x0CE, 0x0CF, LDO_150),
+	NLDO(L18, 0x0D0, 0x0D1, LDO_150),
+	PLDO(L21, 0x0D6, 0x0D7, LDO_150),
+	PLDO(L22, 0x0D8, 0x0D9, LDO_150),
+	PLDO(L23, 0x0DA, 0x0DB, LDO_150),
+
+	/*       id   ctrl   test   hpm_min */
+	NLDO1200(L24, 0x0DC, 0x0DD, LDO_1200),
+	NLDO1200(L25, 0x0DE, 0x0DF, LDO_1200),
+	NLDO1200(L26, 0x0E0, 0x0E1, LDO_1200),
+	NLDO1200(L27, 0x0E2, 0x0E3, LDO_1200),
+	NLDO1200(L28, 0x0E4, 0x0E5, LDO_1200),
+
+	/*  id   ctrl   test   hpm_min */
+	PLDO(L29, 0x0E6, 0x0E7, LDO_150),
+
+	/*   id  ctrl   test2  clk    sleep  hpm_min */
+	SMPS(S1, 0x1D0, 0x1D5, 0x009, 0x1D2, SMPS_1500),
+	SMPS(S2, 0x1D8, 0x1DD, 0x00A, 0x1DA, SMPS_1500),
+	SMPS(S3, 0x1E0, 0x1E5, 0x00B, 0x1E2, SMPS_1500),
+	SMPS(S4, 0x1E8, 0x1ED, 0x011, 0x1EA, SMPS_1500),
+
+	/*     id  ctrl fts_cnfg1 pfm  pwr_cnfg  hpm_min */
+	FTSMPS(S5, 0x025, 0x02E, 0x026, 0x032, SMPS_2000),
+	FTSMPS(S6, 0x036, 0x03F, 0x037, 0x043, SMPS_2000),
+
+	/*   id  ctrl   test2  clk    sleep  hpm_min */
+	SMPS(S7, 0x1F0, 0x1F5, 0x012, 0x1F2, SMPS_1500),
+	SMPS(S8, 0x1F8, 0x1FD, 0x013, 0x1FA, SMPS_1500),
+
+	/* id		ctrl */
+	VS(LVS1,	0x060),
+	VS300(LVS2,     0x062),
+	VS(LVS3,	0x064),
+	VS(LVS4,	0x066),
+	VS(LVS5,	0x068),
+	VS(LVS6,	0x06A),
+	VS(LVS7,	0x06C),
+	VS300(USB_OTG,  0x06E),
+	VS300(HDMI_MVS, 0x070),
+
+	/*  id   ctrl */
+	NCP(NCP, 0x090),
+};
+
+/* Determines which label to add to the print. */
+enum pm8921_regulator_action {
+	PM8921_REGULATOR_ACTION_INIT,
+	PM8921_REGULATOR_ACTION_ENABLE,
+	PM8921_REGULATOR_ACTION_DISABLE,
+	PM8921_REGULATOR_ACTION_VOLTAGE,
+	PM8921_REGULATOR_ACTION_MODE,
+	PM8921_REGULATOR_ACTION_PIN_CTRL,
+};
+
+/* Debug state printing */
+static void pm8921_vreg_show_state(struct regulator_dev *rdev,
+				   enum pm8921_regulator_action action);
+
+/*
+ * Perform a masked write to a PMIC register only if the new value differs
+ * from the last value written to the register.  This removes redundant
+ * register writing.
+ *
+ * No locking is required because registers are not shared between regulators.
+ */
+static int pm8921_vreg_masked_write(struct pm8921_vreg *vreg, u16 addr, u8 val,
+		u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save) {
+		rc = pm8xxx_writeb(vreg->dev->parent, addr, reg);
+
+		if (rc) {
+			pr_err("%s: pm8xxx_writeb failed; addr=0x%03X, rc=%d\n",
+				vreg->name, addr, rc);
+		} else {
+			*reg_save = reg;
+			vreg->write_count++;
+			if (pm8921_vreg_debug_mask & PM8921_VREG_DEBUG_WRITES)
+				pr_info("%s: write(0x%03X)=0x%02X", vreg->name,
+					addr, reg);
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Perform a masked write to a PMIC register without checking the previously
+ * written value.  This is needed for registers that must be rewritten even if
+ * the value hasn't changed in order for changes in other registers to take
+ * effect.
+ */
+static int pm8921_vreg_masked_write_forced(struct pm8921_vreg *vreg, u16 addr,
+		u8 val, u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	rc = pm8xxx_writeb(vreg->dev->parent, addr, reg);
+
+	if (rc) {
+		pr_err("%s: pm8xxx_writeb failed; addr=0x%03X, rc=%d\n",
+			vreg->name, addr, rc);
+	} else {
+		*reg_save = reg;
+		vreg->write_count++;
+		if (pm8921_vreg_debug_mask & PM8921_VREG_DEBUG_WRITES)
+			pr_info("%s: write(0x%03X)=0x%02X", vreg->name,
+				addr, reg);
+	}
+
+	return rc;
+}
+
+static int pm8921_vreg_is_pin_controlled(struct pm8921_vreg *vreg)
+{
+	int ret = 0;
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_PLDO:
+	case REGULATOR_TYPE_NLDO:
+		ret = ((vreg->test_reg[5] & LDO_TEST_PIN_CTRL_MASK) << 4)
+			| (vreg->test_reg[6] & LDO_TEST_PIN_CTRL_LPM_MASK);
+		break;
+	case REGULATOR_TYPE_SMPS:
+		ret = vreg->sleep_ctrl_reg
+			& (SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK);
+		break;
+	case REGULATOR_TYPE_VS:
+		ret = vreg->ctrl_reg & VS_PIN_CTRL_MASK;
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Returns the logical pin control enable state because the pin control options
+ * present in the hardware out of restart could be different from those desired
+ * by the consumer.
+ */
+static int pm8921_vreg_pin_control_is_enabled(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int enabled;
+
+	mutex_lock(&vreg->pc_lock);
+	enabled = vreg->is_enabled_pc;
+	mutex_unlock(&vreg->pc_lock);
+
+	return enabled;
+}
+
+/* Returns the physical enable state of the regulator. */
+static int _pm8921_vreg_is_enabled(struct pm8921_vreg *vreg)
+{
+	int rc = 0;
+
+	/*
+	 * All regulator types except advanced mode SMPS, FTSMPS, and VS300 have
+	 * enable bit in bit 7 of the control register.
+	 */
+	switch (vreg->type) {
+	case REGULATOR_TYPE_FTSMPS:
+		if ((vreg->ctrl_reg & FTSMPS_VCTRL_BAND_MASK)
+		    != FTSMPS_VCTRL_BAND_OFF)
+			rc = 1;
+		break;
+	case REGULATOR_TYPE_VS300:
+		if ((vreg->ctrl_reg & VS300_CTRL_ENABLE_MASK)
+		    != VS300_CTRL_DISABLE)
+			rc = 1;
+		break;
+	case REGULATOR_TYPE_SMPS:
+		if (SMPS_IN_ADVANCED_MODE(vreg)) {
+			if ((vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK)
+			    != SMPS_ADVANCED_BAND_OFF)
+				rc = 1;
+			break;
+		}
+		/* Fall through for legacy mode SMPS. */
+	default:
+		if ((vreg->ctrl_reg & REGULATOR_ENABLE_MASK)
+		    == REGULATOR_ENABLE)
+			rc = 1;
+	}
+
+	return rc;
+}
+
+/*
+ * Returns the logical enable state of the regulator which may be different from
+ * the physical enable state thanks to HPM/LPM pin control.
+ */
+static int pm8921_vreg_is_enabled(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int enabled;
+
+	if (vreg->type == REGULATOR_TYPE_PLDO
+	    || vreg->type == REGULATOR_TYPE_NLDO
+	    || vreg->type == REGULATOR_TYPE_SMPS
+	    || vreg->type == REGULATOR_TYPE_VS) {
+		/* Pin controllable */
+		mutex_lock(&vreg->pc_lock);
+		enabled = vreg->is_enabled;
+		mutex_unlock(&vreg->pc_lock);
+	} else {
+		/* Not pin controlable */
+		enabled = _pm8921_vreg_is_enabled(vreg);
+	}
+
+	return enabled;
+}
+
+static int pm8921_pldo_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int vmin, fine_step;
+	u8 range_ext, range_sel, vprog, fine_step_reg;
+
+	mutex_lock(&vreg->pc_lock);
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	range_sel = vreg->test_reg[2] & LDO_TEST_RANGE_SEL_MASK;
+	range_ext = vreg->test_reg[4] & LDO_TEST_RANGE_EXT_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	if (range_sel) {
+		/* low range mode */
+		fine_step = PLDO_LOW_UV_FINE_STEP;
+		vmin = PLDO_LOW_UV_MIN;
+	} else if (!range_ext) {
+		/* normal mode */
+		fine_step = PLDO_NORM_UV_FINE_STEP;
+		vmin = PLDO_NORM_UV_MIN;
+	} else {
+		/* high range mode */
+		fine_step = PLDO_HIGH_UV_FINE_STEP;
+		vmin = PLDO_HIGH_UV_MIN;
+	}
+
+	return fine_step * vprog + vmin;
+}
+
+static int pm8921_pldo_list_voltage(struct regulator_dev *rdev,
+				    unsigned selector)
+{
+	int uV;
+
+	if (selector >= PLDO_SET_POINTS)
+		return 0;
+
+	if (selector < PLDO_LOW_SET_POINTS)
+		uV = selector * PLDO_LOW_UV_FINE_STEP + PLDO_LOW_UV_MIN;
+	else if (selector < (PLDO_LOW_SET_POINTS + PLDO_NORM_SET_POINTS))
+		uV = (selector - PLDO_LOW_SET_POINTS) * PLDO_NORM_UV_FINE_STEP
+			+ PLDO_NORM_UV_MIN;
+	else
+		uV = (selector - PLDO_LOW_SET_POINTS - PLDO_NORM_SET_POINTS)
+				* PLDO_HIGH_UV_FINE_STEP
+			+ PLDO_HIGH_UV_SET_POINT_MIN;
+
+	return uV;
+}
+
+static int pm8921_pldo_set_voltage(struct regulator_dev *rdev, int min_uV,
+				   int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0, uV = min_uV;
+	int vmin;
+	unsigned vprog, fine_step;
+	u8 range_ext, range_sel, fine_step_reg, prev_reg;
+	bool reg_changed = false;
+
+	if (uV < PLDO_LOW_UV_MIN && max_uV >= PLDO_LOW_UV_MIN)
+		uV = PLDO_LOW_UV_MIN;
+
+	if (uV < PLDO_LOW_UV_MIN || uV > PLDO_HIGH_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, PLDO_LOW_UV_MIN, PLDO_HIGH_UV_MAX);
+		return -EINVAL;
+	}
+
+	if (uV > PLDO_NORM_UV_MAX) {
+		vmin = PLDO_HIGH_UV_MIN;
+		fine_step = PLDO_HIGH_UV_FINE_STEP;
+		range_ext = LDO_TEST_RANGE_EXT_MASK;
+		range_sel = 0;
+	} else if (uV > PLDO_LOW_UV_MAX) {
+		vmin = PLDO_NORM_UV_MIN;
+		fine_step = PLDO_NORM_UV_FINE_STEP;
+		range_ext = 0;
+		range_sel = 0;
+	} else {
+		vmin = PLDO_LOW_UV_MIN;
+		fine_step = PLDO_LOW_UV_FINE_STEP;
+		range_ext = 0;
+		range_sel = LDO_TEST_RANGE_SEL_MASK;
+	}
+
+	vprog = (uV - vmin + fine_step - 1) / fine_step;
+	uV = vprog * fine_step + vmin;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->pc_lock);
+
+	/* Write fine step, range select and program voltage update. */
+	prev_reg = vreg->test_reg[2];
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			fine_step_reg | range_sel | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | LDO_TEST_RANGE_SEL_MASK
+			 | REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK,
+			&vreg->test_reg[2]);
+	if (rc)
+		goto bail;
+	if (prev_reg != vreg->test_reg[2])
+		reg_changed = true;
+
+	/* Write range extension. */
+	prev_reg = vreg->test_reg[4];
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			range_ext | REGULATOR_BANK_SEL(4)
+			 | REGULATOR_BANK_WRITE,
+			LDO_TEST_RANGE_EXT_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[4]);
+	if (rc)
+		goto bail;
+	if (prev_reg != vreg->test_reg[4])
+		reg_changed = true;
+
+	/* Write new voltage. */
+	if (reg_changed) {
+		/*
+		 * Force a CTRL register write even if the value hasn't changed.
+		 * This is neccessary because range select, range extension, and
+		 * fine step will not update until a value is written into the
+		 * control register.
+		 */
+		rc = pm8921_vreg_masked_write_forced(vreg, vreg->ctrl_addr,
+			vprog, LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	} else {
+		/* Only write to control register if new value is different. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, vprog,
+			LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	}
+bail:
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int pm8921_nldo_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	u8 vprog, fine_step_reg;
+
+	mutex_lock(&vreg->pc_lock);
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	return NLDO_UV_FINE_STEP * vprog + NLDO_UV_MIN;
+}
+
+static int pm8921_nldo_list_voltage(struct regulator_dev *rdev,
+				    unsigned selector)
+{
+	if (selector >= NLDO_SET_POINTS)
+		return 0;
+
+	return selector * NLDO_UV_FINE_STEP + NLDO_UV_MIN;
+}
+
+static int pm8921_nldo_set_voltage(struct regulator_dev *rdev, int min_uV,
+				   int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned vprog, fine_step_reg, prev_reg;
+	int rc;
+	int uV = min_uV;
+
+	if (uV < NLDO_UV_MIN && max_uV >= NLDO_UV_MIN)
+		uV = NLDO_UV_MIN;
+
+	if (uV < NLDO_UV_MIN || uV > NLDO_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, NLDO_UV_MIN, NLDO_UV_MAX);
+		return -EINVAL;
+	}
+
+	vprog = (uV - NLDO_UV_MIN + NLDO_UV_FINE_STEP - 1) / NLDO_UV_FINE_STEP;
+	uV = vprog * NLDO_UV_FINE_STEP + NLDO_UV_MIN;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->pc_lock);
+
+	/* Write fine step. */
+	prev_reg = vreg->test_reg[2];
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			fine_step_reg | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | REGULATOR_BANK_MASK
+			 | LDO_TEST_VPROG_UPDATE_MASK,
+		       &vreg->test_reg[2]);
+	if (rc)
+		goto bail;
+
+	/* Write new voltage. */
+	if (prev_reg != vreg->test_reg[2]) {
+		/*
+		 * Force a CTRL register write even if the value hasn't changed.
+		 * This is neccessary because fine step will not update until a
+		 * value is written into the control register.
+		 */
+		rc = pm8921_vreg_masked_write_forced(vreg, vreg->ctrl_addr,
+			vprog, LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	} else {
+		/* Only write to control register if new value is different. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, vprog,
+			LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	}
+bail:
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int _pm8921_nldo1200_get_voltage(struct pm8921_vreg *vreg)
+{
+	int uV = 0;
+	int vprog;
+
+	if (!NLDO1200_IN_ADVANCED_MODE(vreg)) {
+		pr_warn("%s: currently in legacy mode; voltage unknown.\n",
+			vreg->name);
+		return vreg->save_uV;
+	}
+
+	vprog = vreg->ctrl_reg & NLDO1200_CTRL_VPROG_MASK;
+
+	if ((vreg->ctrl_reg & NLDO1200_CTRL_RANGE_MASK)
+	    == NLDO1200_CTRL_RANGE_LOW)
+		uV = vprog * NLDO1200_LOW_UV_STEP + NLDO1200_LOW_UV_MIN;
+	else
+		uV = vprog * NLDO1200_HIGH_UV_STEP + NLDO1200_HIGH_UV_MIN;
+
+	return uV;
+}
+
+static int pm8921_nldo1200_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return _pm8921_nldo1200_get_voltage(vreg);
+}
+
+static int pm8921_nldo1200_list_voltage(struct regulator_dev *rdev,
+					unsigned selector)
+{
+	int uV;
+
+	if (selector >= NLDO1200_SET_POINTS)
+		return 0;
+
+	if (selector < NLDO1200_LOW_SET_POINTS)
+		uV = selector * NLDO1200_LOW_UV_STEP + NLDO1200_LOW_UV_MIN;
+	else
+		uV = (selector - NLDO1200_LOW_SET_POINTS)
+				* NLDO1200_HIGH_UV_STEP
+			+ NLDO1200_HIGH_UV_MIN;
+
+	return uV;
+}
+
+static int _pm8921_nldo1200_set_voltage(struct pm8921_vreg *vreg, int min_uV,
+		int max_uV)
+{
+	u8 vprog, range;
+	int rc;
+	int uV = min_uV;
+
+	if (uV < NLDO1200_LOW_UV_MIN && max_uV >= NLDO1200_LOW_UV_MIN)
+		uV = NLDO1200_LOW_UV_MIN;
+
+	if (uV < NLDO1200_LOW_UV_MIN || uV > NLDO1200_HIGH_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, NLDO_UV_MIN, NLDO_UV_MAX);
+		return -EINVAL;
+	}
+
+	if (uV > NLDO1200_LOW_UV_MAX) {
+		vprog = (uV - NLDO1200_HIGH_UV_MIN + NLDO1200_HIGH_UV_STEP - 1)
+			/ NLDO1200_HIGH_UV_STEP;
+		uV = vprog * NLDO1200_HIGH_UV_STEP + NLDO1200_HIGH_UV_MIN;
+		vprog &= NLDO1200_CTRL_VPROG_MASK;
+		range = NLDO1200_CTRL_RANGE_HIGH;
+	} else {
+		vprog = (uV - NLDO1200_LOW_UV_MIN + NLDO1200_LOW_UV_STEP - 1)
+			/ NLDO1200_LOW_UV_STEP;
+		uV = vprog * NLDO1200_LOW_UV_STEP + NLDO1200_LOW_UV_MIN;
+		vprog &= NLDO1200_CTRL_VPROG_MASK;
+		range = NLDO1200_CTRL_RANGE_LOW;
+	}
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	/* Set to advanced mode */
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+		NLDO1200_ADVANCED_MODE | REGULATOR_BANK_SEL(2)
+		| REGULATOR_BANK_WRITE, NLDO1200_ADVANCED_MODE_MASK
+		| REGULATOR_BANK_MASK, &vreg->test_reg[2]);
+	if (rc)
+		goto bail;
+
+	/* Set voltage and range selection. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, vprog | range,
+			NLDO1200_CTRL_VPROG_MASK | NLDO1200_CTRL_RANGE_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	vreg->save_uV = uV;
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_nldo1200_set_voltage(struct regulator_dev *rdev, int min_uV,
+				   int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _pm8921_nldo1200_set_voltage(vreg, min_uV, max_uV);
+
+	if (!rc)
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int pm8921_smps_get_voltage_advanced(struct pm8921_vreg *vreg)
+{
+	u8 vprog, band;
+	int uV = 0;
+
+	vprog = vreg->ctrl_reg & SMPS_ADVANCED_VPROG_MASK;
+	band = vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK;
+
+	if (band == SMPS_ADVANCED_BAND_1)
+		uV = vprog * SMPS_BAND1_UV_STEP + SMPS_BAND1_UV_MIN;
+	else if (band == SMPS_ADVANCED_BAND_2)
+		uV = vprog * SMPS_BAND2_UV_STEP + SMPS_BAND2_UV_MIN;
+	else if (band == SMPS_ADVANCED_BAND_3)
+		uV = vprog * SMPS_BAND3_UV_STEP + SMPS_BAND3_UV_MIN;
+	else if (vreg->save_uV > 0)
+		uV = vreg->save_uV;
+	else
+		uV = VOLTAGE_UNKNOWN;
+
+	return uV;
+}
+
+static int pm8921_smps_get_voltage_legacy(struct pm8921_vreg *vreg)
+{
+	u8 vlow, vref, vprog;
+	int uV;
+
+	vlow = vreg->test_reg[1] & SMPS_LEGACY_VLOW_SEL_MASK;
+	vref = vreg->ctrl_reg & SMPS_LEGACY_VREF_SEL_MASK;
+	vprog = vreg->ctrl_reg & SMPS_LEGACY_VPROG_MASK;
+
+	if (vlow && vref) {
+		/* mode 3 */
+		uV = vprog * SMPS_MODE3_UV_STEP + SMPS_MODE3_UV_MIN;
+	} else if (vref) {
+		/* mode 2 */
+		uV = vprog * SMPS_MODE2_UV_STEP + SMPS_MODE2_UV_MIN;
+	} else {
+		/* mode 1 */
+		uV = vprog * SMPS_MODE1_UV_STEP + SMPS_MODE1_UV_MIN;
+	}
+
+	return uV;
+}
+
+static int _pm8921_smps_get_voltage(struct pm8921_vreg *vreg)
+{
+	if (SMPS_IN_ADVANCED_MODE(vreg))
+		return pm8921_smps_get_voltage_advanced(vreg);
+
+	return pm8921_smps_get_voltage_legacy(vreg);
+}
+
+static int pm8921_smps_list_voltage(struct regulator_dev *rdev,
+				    unsigned selector)
+{
+	int uV;
+
+	if (selector >= SMPS_ADVANCED_SET_POINTS)
+		return 0;
+
+	if (selector < SMPS_BAND1_SET_POINTS)
+		uV = selector * SMPS_BAND1_UV_STEP + SMPS_BAND1_UV_MIN;
+	else if (selector < (SMPS_BAND1_SET_POINTS + SMPS_BAND2_SET_POINTS))
+		uV = (selector - SMPS_BAND1_SET_POINTS) * SMPS_BAND2_UV_STEP
+			+ SMPS_BAND2_UV_MIN;
+	else
+		uV = (selector - SMPS_BAND1_SET_POINTS - SMPS_BAND2_SET_POINTS)
+				* SMPS_BAND3_UV_STEP
+			+ SMPS_BAND3_UV_MIN;
+
+	return uV;
+}
+
+static int pm8921_smps_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int uV;
+
+	mutex_lock(&vreg->pc_lock);
+	uV = _pm8921_smps_get_voltage(vreg);
+	mutex_unlock(&vreg->pc_lock);
+
+	return uV;
+}
+
+static int pm8921_smps_set_voltage_advanced(struct pm8921_vreg *vreg,
+					   int min_uV, int max_uV, int force_on)
+{
+	u8 vprog, band;
+	int rc;
+	int uV = min_uV;
+
+	if (uV < SMPS_BAND1_UV_MIN && max_uV >= SMPS_BAND1_UV_MIN)
+		uV = SMPS_BAND1_UV_MIN;
+
+	if (uV < SMPS_BAND1_UV_MIN || uV > SMPS_BAND3_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, SMPS_BAND1_UV_MIN, SMPS_BAND3_UV_MAX);
+		return -EINVAL;
+	}
+
+	if (uV > SMPS_BAND2_UV_MAX) {
+		vprog = (uV - SMPS_BAND3_UV_MIN + SMPS_BAND3_UV_STEP - 1)
+			/ SMPS_BAND3_UV_STEP;
+		band = SMPS_ADVANCED_BAND_3;
+		uV = SMPS_BAND3_UV_MIN + vprog * SMPS_BAND3_UV_STEP;
+	} else if (uV > SMPS_BAND1_UV_MAX) {
+		vprog = (uV - SMPS_BAND2_UV_MIN + SMPS_BAND2_UV_STEP - 1)
+			/ SMPS_BAND2_UV_STEP;
+		band = SMPS_ADVANCED_BAND_2;
+		uV = SMPS_BAND2_UV_MIN + vprog * SMPS_BAND2_UV_STEP;
+	} else {
+		vprog = (uV - SMPS_BAND1_UV_MIN + SMPS_BAND1_UV_STEP - 1)
+			/ SMPS_BAND1_UV_STEP;
+		band = SMPS_ADVANCED_BAND_1;
+		uV = SMPS_BAND1_UV_MIN + vprog * SMPS_BAND1_UV_STEP;
+	}
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	/* Do not set band if regulator currently disabled. */
+	if (!_pm8921_vreg_is_enabled(vreg) && !force_on)
+		band = SMPS_ADVANCED_BAND_OFF;
+
+	/* Set advanced mode bit to 1. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr, SMPS_ADVANCED_MODE
+		| REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7),
+		SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[7]);
+	if (rc)
+		goto bail;
+
+	/* Set voltage and voltage band. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, band | vprog,
+			SMPS_ADVANCED_BAND_MASK | SMPS_ADVANCED_VPROG_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	vreg->save_uV = uV;
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_smps_set_voltage_legacy(struct pm8921_vreg *vreg, int min_uV,
+					  int max_uV)
+{
+	u8 vlow, vref, vprog, pd, en;
+	int rc;
+	int uV = min_uV;
+
+
+	if (uV < SMPS_MODE3_UV_MIN && max_uV >= SMPS_MODE3_UV_MIN)
+		uV = SMPS_MODE3_UV_MIN;
+
+	if (uV < SMPS_MODE3_UV_MIN || uV > SMPS_MODE1_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, SMPS_MODE3_UV_MIN, SMPS_MODE1_UV_MAX);
+		return -EINVAL;
+	}
+
+	if (uV > SMPS_MODE2_UV_MAX) {
+		vprog = (uV - SMPS_MODE1_UV_MIN + SMPS_MODE1_UV_STEP - 1)
+			/ SMPS_MODE1_UV_STEP;
+		vref = 0;
+		vlow = 0;
+		uV = SMPS_MODE1_UV_MIN + vprog * SMPS_MODE1_UV_STEP;
+	} else if (uV > SMPS_MODE3_UV_MAX) {
+		vprog = (uV - SMPS_MODE2_UV_MIN + SMPS_MODE2_UV_STEP - 1)
+			/ SMPS_MODE2_UV_STEP;
+		vref = SMPS_LEGACY_VREF_SEL_MASK;
+		vlow = 0;
+		uV = SMPS_MODE2_UV_MIN + vprog * SMPS_MODE2_UV_STEP;
+	} else {
+		vprog = (uV - SMPS_MODE3_UV_MIN + SMPS_MODE3_UV_STEP - 1)
+			/ SMPS_MODE3_UV_STEP;
+		vref = SMPS_LEGACY_VREF_SEL_MASK;
+		vlow = SMPS_LEGACY_VLOW_SEL_MASK;
+		uV = SMPS_MODE3_UV_MIN + vprog * SMPS_MODE3_UV_STEP;
+	}
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	/* set vlow bit for ultra low voltage mode */
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+		vlow | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(1),
+		REGULATOR_BANK_MASK | SMPS_LEGACY_VLOW_SEL_MASK,
+		&vreg->test_reg[1]);
+	if (rc)
+		goto bail;
+
+	/* Set advanced mode bit to 0. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr, SMPS_LEGACY_MODE
+		| REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7),
+		SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[7]);
+	if (rc)
+		goto bail;
+
+	en = (_pm8921_vreg_is_enabled(vreg) ? SMPS_LEGACY_ENABLE : 0);
+	pd = (vreg->pdata.pull_down_enable ? SMPS_LEGACY_PULL_DOWN_ENABLE : 0);
+
+	/* Set voltage (and the rest of the control register). */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		en | pd | vref | vprog,
+		SMPS_LEGACY_ENABLE_MASK | SMPS_LEGACY_PULL_DOWN_ENABLE
+		  | SMPS_LEGACY_VREF_SEL_MASK | SMPS_LEGACY_VPROG_MASK,
+		&vreg->ctrl_reg);
+
+	vreg->save_uV = uV;
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_smps_set_voltage(struct regulator_dev *rdev, int min_uV,
+				   int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (SMPS_IN_ADVANCED_MODE(vreg) || !pm8921_vreg_is_pin_controlled(vreg))
+		rc = pm8921_smps_set_voltage_advanced(vreg, min_uV, max_uV, 0);
+	else
+		rc = pm8921_smps_set_voltage_legacy(vreg, min_uV, max_uV);
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (!rc)
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int _pm8921_ftsmps_get_voltage(struct pm8921_vreg *vreg)
+{
+	u8 vprog, band;
+	int uV = 0;
+
+	if ((vreg->test_reg[0] & FTSMPS_CNFG1_PM_MASK) == FTSMPS_CNFG1_PM_PFM) {
+		vprog = vreg->pfm_ctrl_reg & FTSMPS_VCTRL_VPROG_MASK;
+		band = vreg->pfm_ctrl_reg & FTSMPS_VCTRL_BAND_MASK;
+		if (band == FTSMPS_VCTRL_BAND_OFF && vprog == 0) {
+			/* PWM_VCTRL overrides PFM_VCTRL */
+			vprog = vreg->ctrl_reg & FTSMPS_VCTRL_VPROG_MASK;
+			band = vreg->ctrl_reg & FTSMPS_VCTRL_BAND_MASK;
+		}
+	} else {
+		vprog = vreg->ctrl_reg & FTSMPS_VCTRL_VPROG_MASK;
+		band = vreg->ctrl_reg & FTSMPS_VCTRL_BAND_MASK;
+	}
+
+	if (band == FTSMPS_VCTRL_BAND_1)
+		uV = vprog * FTSMPS_BAND1_UV_PHYS_STEP + FTSMPS_BAND1_UV_MIN;
+	else if (band == FTSMPS_VCTRL_BAND_2)
+		uV = vprog * FTSMPS_BAND2_UV_STEP + FTSMPS_BAND2_UV_MIN;
+	else if (band == FTSMPS_VCTRL_BAND_3)
+		uV = vprog * FTSMPS_BAND3_UV_STEP + FTSMPS_BAND3_UV_MIN;
+	else if (vreg->save_uV > 0)
+		uV = vreg->save_uV;
+	else
+		uV = VOLTAGE_UNKNOWN;
+
+	return uV;
+}
+
+static int pm8921_ftsmps_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return _pm8921_ftsmps_get_voltage(vreg);
+}
+
+static int pm8921_ftsmps_list_voltage(struct regulator_dev *rdev,
+				      unsigned selector)
+{
+	int uV;
+
+	if (selector >= FTSMPS_SET_POINTS)
+		return 0;
+
+	if (selector < FTSMPS_BAND1_SET_POINTS)
+		uV = selector * FTSMPS_BAND1_UV_LOG_STEP + FTSMPS_BAND1_UV_MIN;
+	else if (selector < (FTSMPS_BAND1_SET_POINTS + FTSMPS_BAND2_SET_POINTS))
+		uV = (selector - FTSMPS_BAND1_SET_POINTS) * FTSMPS_BAND2_UV_STEP
+			+ FTSMPS_BAND2_UV_MIN;
+	else
+		uV = (selector - FTSMPS_BAND1_SET_POINTS
+			- FTSMPS_BAND2_SET_POINTS)
+				* FTSMPS_BAND3_UV_STEP
+			+ FTSMPS_BAND3_UV_SET_POINT_MIN;
+
+	return uV;
+}
+
+static int _pm8921_ftsmps_set_voltage(struct pm8921_vreg *vreg, int min_uV,
+				      int max_uV, int force_on)
+{
+	int rc;
+	u8 vprog, band;
+	int uV = min_uV;
+
+	if (uV < FTSMPS_BAND1_UV_MIN && max_uV >= FTSMPS_BAND1_UV_MIN)
+		uV = FTSMPS_BAND1_UV_MIN;
+
+	if (uV < FTSMPS_BAND1_UV_MIN || uV > FTSMPS_BAND3_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, FTSMPS_BAND1_UV_MIN,
+			 FTSMPS_BAND3_UV_MAX);
+		return -EINVAL;
+	}
+
+	/* Round up for set points in the gaps between bands. */
+	if (uV > FTSMPS_BAND1_UV_MAX && uV < FTSMPS_BAND2_UV_MIN)
+		uV = FTSMPS_BAND2_UV_MIN;
+	else if (uV > FTSMPS_BAND2_UV_MAX
+			&& uV < FTSMPS_BAND3_UV_SET_POINT_MIN)
+		uV = FTSMPS_BAND3_UV_SET_POINT_MIN;
+
+	if (uV > FTSMPS_BAND2_UV_MAX) {
+		vprog = (uV - FTSMPS_BAND3_UV_MIN + FTSMPS_BAND3_UV_STEP - 1)
+			/ FTSMPS_BAND3_UV_STEP;
+		band = FTSMPS_VCTRL_BAND_3;
+		uV = FTSMPS_BAND3_UV_MIN + vprog * FTSMPS_BAND3_UV_STEP;
+	} else if (uV > FTSMPS_BAND1_UV_MAX) {
+		vprog = (uV - FTSMPS_BAND2_UV_MIN + FTSMPS_BAND2_UV_STEP - 1)
+			/ FTSMPS_BAND2_UV_STEP;
+		band = FTSMPS_VCTRL_BAND_2;
+		uV = FTSMPS_BAND2_UV_MIN + vprog * FTSMPS_BAND2_UV_STEP;
+	} else {
+		vprog = (uV - FTSMPS_BAND1_UV_MIN
+				+ FTSMPS_BAND1_UV_LOG_STEP - 1)
+			/ FTSMPS_BAND1_UV_LOG_STEP;
+		uV = FTSMPS_BAND1_UV_MIN + vprog * FTSMPS_BAND1_UV_LOG_STEP;
+		vprog *= FTSMPS_BAND1_UV_LOG_STEP / FTSMPS_BAND1_UV_PHYS_STEP;
+		band = FTSMPS_VCTRL_BAND_1;
+	}
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	/*
+	 * Do not set voltage if regulator is currently disabled because doing
+	 * so will enable it.
+	 */
+	if (_pm8921_vreg_is_enabled(vreg) || force_on) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			band | vprog,
+			FTSMPS_VCTRL_BAND_MASK | FTSMPS_VCTRL_VPROG_MASK,
+			&vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		/* Program PFM_VCTRL as 0x00 so that PWM_VCTRL overrides it. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->pfm_ctrl_addr, 0x00,
+			FTSMPS_VCTRL_BAND_MASK | FTSMPS_VCTRL_VPROG_MASK,
+			&vreg->pfm_ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	vreg->save_uV = uV;
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_ftsmps_set_voltage(struct regulator_dev *rdev, int min_uV,
+				     int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _pm8921_ftsmps_set_voltage(vreg, min_uV, max_uV, 0);
+
+	if (!rc)
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static int pm8921_ncp_get_voltage(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	u8 vprog;
+
+	vprog = vreg->ctrl_reg & NCP_VPROG_MASK;
+
+	return NCP_UV_MIN + vprog * NCP_UV_STEP;
+}
+
+static int pm8921_ncp_list_voltage(struct regulator_dev *rdev,
+				   unsigned selector)
+{
+	if (selector >= NCP_SET_POINTS)
+		return 0;
+
+	return selector * NCP_UV_STEP + NCP_UV_MIN;
+}
+
+static int pm8921_ncp_set_voltage(struct regulator_dev *rdev, int min_uV,
+				  int max_uV, unsigned *selector)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+	int uV = min_uV;
+	u8 val;
+
+	if (uV < NCP_UV_MIN && max_uV >= NCP_UV_MIN)
+		uV = NCP_UV_MIN;
+
+	if (uV < NCP_UV_MIN || uV > NCP_UV_MAX) {
+		vreg_err(vreg,
+			"request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			 min_uV, max_uV, NCP_UV_MIN, NCP_UV_MAX);
+		return -EINVAL;
+	}
+
+	val = (uV - NCP_UV_MIN + NCP_UV_STEP - 1) / NCP_UV_STEP;
+	uV = val * NCP_UV_STEP + NCP_UV_MIN;
+
+	if (uV > max_uV) {
+		vreg_err(vreg,
+			"request v=[%d, %d] cannot be met by any set point\n",
+			min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	/* voltage setting */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, val,
+			NCP_VPROG_MASK, &vreg->ctrl_reg);
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_VOLTAGE);
+
+	return rc;
+}
+
+static unsigned int pm8921_ldo_get_mode(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode = 0;
+
+	mutex_lock(&vreg->pc_lock);
+	mode = vreg->mode;
+	mutex_unlock(&vreg->pc_lock);
+
+	return mode;
+}
+
+static int pm8921_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (mode == REGULATOR_MODE_NORMAL
+	    || (vreg->is_enabled_pc
+		&& vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE)) {
+		/* HPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_CTRL_PM_HPM, LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+	} else {
+		/* LPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_CTRL_PM_LPM, LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+			  | REGULATOR_BANK_SEL(0),
+			LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[0]);
+	}
+
+bail:
+	if (!rc)
+		vreg->mode = mode;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static unsigned int pm8921_nldo1200_get_mode(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode = 0;
+
+	if (NLDO1200_IN_ADVANCED_MODE(vreg)) {
+		/* Advanced mode */
+		if ((vreg->test_reg[2] & NLDO1200_ADVANCED_PM_MASK)
+		    == NLDO1200_ADVANCED_PM_LPM)
+			mode = REGULATOR_MODE_IDLE;
+		else
+			mode = REGULATOR_MODE_NORMAL;
+	} else {
+		/* Legacy mode */
+		if ((vreg->ctrl_reg & NLDO1200_LEGACY_PM_MASK)
+		    == NLDO1200_LEGACY_PM_LPM)
+			mode = REGULATOR_MODE_IDLE;
+		else
+			mode = REGULATOR_MODE_NORMAL;
+	}
+
+	return mode;
+}
+
+static int pm8921_nldo1200_set_mode(struct regulator_dev *rdev,
+				    unsigned int mode)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure that advanced mode is in use.  If it isn't, then set it
+	 * and update the voltage accordingly.
+	 */
+	if (!NLDO1200_IN_ADVANCED_MODE(vreg)) {
+		rc = _pm8921_nldo1200_set_voltage(vreg, vreg->save_uV,
+			vreg->save_uV);
+		if (rc)
+			goto bail;
+	}
+
+	if (mode == REGULATOR_MODE_NORMAL) {
+		/* HPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			NLDO1200_ADVANCED_PM_HPM | REGULATOR_BANK_WRITE
+			| REGULATOR_BANK_SEL(2), NLDO1200_ADVANCED_PM_MASK
+			| REGULATOR_BANK_MASK, &vreg->test_reg[2]);
+	} else {
+		/* LPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			NLDO1200_ADVANCED_PM_LPM | REGULATOR_BANK_WRITE
+			| REGULATOR_BANK_SEL(2), NLDO1200_ADVANCED_PM_MASK
+			| REGULATOR_BANK_MASK, &vreg->test_reg[2]);
+	}
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static unsigned int pm8921_smps_get_mode(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode = 0;
+
+	mutex_lock(&vreg->pc_lock);
+	mode = vreg->mode;
+	mutex_unlock(&vreg->pc_lock);
+
+	return mode;
+}
+
+static int pm8921_smps_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (mode == REGULATOR_MODE_NORMAL
+	    || (vreg->is_enabled_pc
+		&& vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE)) {
+		/* HPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr,
+				       SMPS_CLK_CTRL_PWM, SMPS_CLK_CTRL_MASK,
+				       &vreg->clk_ctrl_reg);
+	} else {
+		/* LPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr,
+				       SMPS_CLK_CTRL_PFM, SMPS_CLK_CTRL_MASK,
+				       &vreg->clk_ctrl_reg);
+	}
+
+	if (!rc)
+		vreg->mode = mode;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static unsigned int pm8921_ftsmps_get_mode(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode = 0;
+
+	if ((vreg->test_reg[0] & FTSMPS_CNFG1_PM_MASK) == FTSMPS_CNFG1_PM_PFM)
+		mode = REGULATOR_MODE_IDLE;
+	else
+		mode = REGULATOR_MODE_NORMAL;
+
+	return mode;
+}
+
+static int pm8921_ftsmps_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	if (mode == REGULATOR_MODE_NORMAL) {
+		/* HPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+				FTSMPS_CNFG1_PM_PWM | REGULATOR_BANK_WRITE
+				| REGULATOR_BANK_SEL(0), FTSMPS_CNFG1_PM_MASK
+				| REGULATOR_BANK_MASK, &vreg->test_reg[0]);
+	} else if (mode == REGULATOR_MODE_IDLE) {
+		/* LPM */
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+				FTSMPS_CNFG1_PM_PFM | REGULATOR_BANK_WRITE
+				| REGULATOR_BANK_SEL(0), FTSMPS_CNFG1_PM_MASK
+				| REGULATOR_BANK_MASK, &vreg->test_reg[0]);
+	} else {
+		vreg_err(vreg, "invalid mode: %u\n", mode);
+		return -EINVAL;
+	}
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_MODE);
+
+	return rc;
+}
+
+static unsigned int pm8921_vreg_get_optimum_mode(struct regulator_dev *rdev,
+		int input_uV, int output_uV, int load_uA)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	if (load_uA + vreg->pdata.system_uA >= vreg->hpm_min_load)
+		mode = REGULATOR_MODE_NORMAL;
+	else
+		mode = REGULATOR_MODE_IDLE;
+
+	return mode;
+}
+
+static int pm8921_ldo_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc, val;
+
+	mutex_lock(&vreg->pc_lock);
+
+	/*
+	 * Choose HPM if previously set to HPM or if pin control is enabled in
+	 * on/off mode.
+	 */
+	val = LDO_CTRL_PM_LPM;
+	if (vreg->mode == REGULATOR_MODE_NORMAL
+		|| (vreg->is_enabled_pc
+			&& vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE))
+		val = LDO_CTRL_PM_HPM;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, val | LDO_ENABLE,
+		LDO_ENABLE_MASK | LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled = true;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_ldo_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	/*
+	 * Only disable the regulator if it isn't still required for HPM/LPM
+	 * pin control.
+	 */
+	if (!vreg->is_enabled_pc
+	    || vreg->pdata.pin_fn != PM8921_VREG_PIN_FN_MODE) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_DISABLE, LDO_ENABLE_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	/* Change to LPM if HPM/LPM pin control is enabled. */
+	if (vreg->is_enabled_pc
+	    && vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_MODE) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_CTRL_PM_LPM, LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+			  | REGULATOR_BANK_SEL(0),
+			LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[0]);
+	}
+
+	if (!rc)
+		vreg->is_enabled = false;
+bail:
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_nldo1200_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, NLDO1200_ENABLE,
+		NLDO1200_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_nldo1200_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, NLDO1200_DISABLE,
+		NLDO1200_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_smps_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	int val;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (SMPS_IN_ADVANCED_MODE(vreg)
+	     || !pm8921_vreg_is_pin_controlled(vreg)) {
+		/* Enable in advanced mode if not using pin control. */
+		rc = pm8921_smps_set_voltage_advanced(vreg, vreg->save_uV,
+			vreg->save_uV, 1);
+	} else {
+		rc = pm8921_smps_set_voltage_legacy(vreg, vreg->save_uV,
+			vreg->save_uV);
+		if (rc)
+			goto bail;
+
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			SMPS_LEGACY_ENABLE, SMPS_LEGACY_ENABLE_MASK,
+			&vreg->ctrl_reg);
+	}
+
+	/*
+	 * Choose HPM if previously set to HPM or if pin control is enabled in
+	 * on/off mode.
+	 */
+	val = SMPS_CLK_CTRL_PFM;
+	if (vreg->mode == REGULATOR_MODE_NORMAL
+		|| (vreg->is_enabled_pc
+			&& vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE))
+		val = SMPS_CLK_CTRL_PWM;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr, val,
+			SMPS_CLK_CTRL_MASK, &vreg->clk_ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled = true;
+bail:
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_smps_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (SMPS_IN_ADVANCED_MODE(vreg)) {
+		/* Change SMPS to legacy mode before disabling. */
+		rc = pm8921_smps_set_voltage_legacy(vreg, vreg->save_uV,
+				vreg->save_uV);
+		if (rc)
+			goto bail;
+	}
+
+	/*
+	 * Only disable the regulator if it isn't still required for HPM/LPM
+	 * pin control.
+	 */
+	if (!vreg->is_enabled_pc
+	    || vreg->pdata.pin_fn != PM8921_VREG_PIN_FN_MODE) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			SMPS_LEGACY_DISABLE, SMPS_LEGACY_ENABLE_MASK,
+			&vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	/* Change to LPM if HPM/LPM pin control is enabled. */
+	if (vreg->is_enabled_pc
+	    && vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_MODE)
+		rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr,
+		       SMPS_CLK_CTRL_PFM, SMPS_CLK_CTRL_MASK,
+		       &vreg->clk_ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled = false;
+
+bail:
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_ftsmps_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _pm8921_ftsmps_set_voltage(vreg, vreg->save_uV, vreg->save_uV, 1);
+
+	if (rc)
+		vreg_err(vreg, "set voltage failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_ftsmps_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		FTSMPS_VCTRL_BAND_OFF, FTSMPS_VCTRL_BAND_MASK, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->pfm_ctrl_addr,
+		FTSMPS_VCTRL_BAND_OFF, FTSMPS_VCTRL_BAND_MASK,
+		&vreg->pfm_ctrl_reg);
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_vs_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, VS_ENABLE,
+		VS_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled = true;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_vs_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, VS_DISABLE,
+		VS_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled = false;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_vs300_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, VS300_CTRL_ENABLE,
+		VS300_CTRL_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_vs300_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, VS300_CTRL_DISABLE,
+		VS300_CTRL_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_ncp_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, NCP_ENABLE,
+		NCP_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_ENABLE);
+
+	return rc;
+}
+
+static int pm8921_ncp_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, NCP_DISABLE,
+		NCP_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_DISABLE);
+
+	return rc;
+}
+
+static int pm8921_ldo_pin_control_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	int bank;
+	u8 val = 0;
+	u8 mask;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_D1)
+		val |= LDO_TEST_PIN_CTRL_EN0;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A0)
+		val |= LDO_TEST_PIN_CTRL_EN1;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A1)
+		val |= LDO_TEST_PIN_CTRL_EN2;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A2)
+		val |= LDO_TEST_PIN_CTRL_EN3;
+
+	bank = (vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE ? 5 : 6);
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+		val | REGULATOR_BANK_SEL(bank) | REGULATOR_BANK_WRITE,
+		LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[bank]);
+	if (rc)
+		goto bail;
+
+	/* Unset pin control bits in unused bank. */
+	bank = (bank == 5 ? 6 : 5);
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+		REGULATOR_BANK_SEL(bank) | REGULATOR_BANK_WRITE,
+		LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[bank]);
+	if (rc)
+		goto bail;
+
+	val = LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+		| REGULATOR_BANK_SEL(0);
+	mask = LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK;
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr, val, mask,
+		&vreg->test_reg[0]);
+	if (rc)
+		goto bail;
+
+	if (vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE) {
+		/* Pin control ON/OFF */
+		val = LDO_CTRL_PM_HPM;
+		/* Leave physically enabled if already enabled. */
+		val |= (vreg->is_enabled ? LDO_ENABLE : LDO_DISABLE);
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, val,
+			LDO_ENABLE_MASK | LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	} else {
+		/* Pin control LPM/HPM */
+		val = LDO_ENABLE;
+		/* Leave in HPM if already enabled in HPM. */
+		val |= (vreg->is_enabled && vreg->mode == REGULATOR_MODE_NORMAL
+			?  LDO_CTRL_PM_HPM : LDO_CTRL_PM_LPM);
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, val,
+			LDO_ENABLE_MASK | LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+bail:
+	if (!rc)
+		vreg->is_enabled_pc = true;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static int pm8921_ldo_pin_control_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			REGULATOR_BANK_SEL(5) | REGULATOR_BANK_WRITE,
+			LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[5]);
+	if (rc)
+		goto bail;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE,
+			LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[6]);
+
+	/*
+	 * Physically disable the regulator if it was enabled in HPM/LPM pin
+	 * control mode previously and it logically should not be enabled.
+	 */
+	if ((vreg->ctrl_reg & LDO_ENABLE_MASK) == LDO_ENABLE
+	    && !vreg->is_enabled) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_DISABLE, LDO_ENABLE_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	/* Change to LPM if LPM was enabled. */
+	if (vreg->is_enabled && vreg->mode == REGULATOR_MODE_IDLE) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			LDO_CTRL_PM_LPM, LDO_CTRL_PM_MASK, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+			  | REGULATOR_BANK_SEL(0),
+			LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[0]);
+		if (rc)
+			goto bail;
+	}
+
+bail:
+	if (!rc)
+		vreg->is_enabled_pc = false;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static int pm8921_smps_pin_control_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+	u8 val = 0;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE) {
+		/* Pin control ON/OFF */
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_D1)
+			val |= SMPS_PIN_CTRL_EN0;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A0)
+			val |= SMPS_PIN_CTRL_EN1;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A1)
+			val |= SMPS_PIN_CTRL_EN2;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A2)
+			val |= SMPS_PIN_CTRL_EN3;
+	} else {
+		/* Pin control LPM/HPM */
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_D1)
+			val |= SMPS_PIN_CTRL_LPM_EN0;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A0)
+			val |= SMPS_PIN_CTRL_LPM_EN1;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A1)
+			val |= SMPS_PIN_CTRL_LPM_EN2;
+		if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A2)
+			val |= SMPS_PIN_CTRL_LPM_EN3;
+	}
+
+	rc = pm8921_smps_set_voltage_legacy(vreg, vreg->save_uV, vreg->save_uV);
+	if (rc)
+		goto bail;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->sleep_ctrl_addr, val,
+			SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK,
+			&vreg->sleep_ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/*
+	 * Physically enable the regulator if using HPM/LPM pin control mode or
+	 * if the regulator should be logically left on.
+	 */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		((vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_MODE
+		  || vreg->is_enabled) ?
+			SMPS_LEGACY_ENABLE : SMPS_LEGACY_DISABLE),
+		SMPS_LEGACY_ENABLE_MASK, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/*
+	 * Set regulator to HPM if using on/off pin control or if the regulator
+	 * is already enabled in HPM.  Otherwise, set it to LPM.
+	 */
+	rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr,
+			(vreg->pdata.pin_fn == PM8921_VREG_PIN_FN_ENABLE
+			 || (vreg->is_enabled
+			     && vreg->mode == REGULATOR_MODE_NORMAL)
+				? SMPS_CLK_CTRL_PWM : SMPS_CLK_CTRL_PFM),
+			SMPS_CLK_CTRL_MASK, &vreg->clk_ctrl_reg);
+
+bail:
+	if (!rc)
+		vreg->is_enabled_pc = true;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static int pm8921_smps_pin_control_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->sleep_ctrl_addr, 0,
+			SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK,
+			&vreg->sleep_ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/*
+	 * Physically disable the regulator if it was enabled in HPM/LPM pin
+	 * control mode previously and it logically should not be enabled.
+	 */
+	if ((vreg->ctrl_reg & SMPS_LEGACY_ENABLE_MASK) == SMPS_LEGACY_ENABLE
+	    && vreg->is_enabled == false) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			SMPS_LEGACY_DISABLE, SMPS_LEGACY_ENABLE_MASK,
+			&vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	/* Change to LPM if LPM was enabled. */
+	if (vreg->is_enabled && vreg->mode == REGULATOR_MODE_IDLE) {
+		rc = pm8921_vreg_masked_write(vreg, vreg->clk_ctrl_addr,
+		       SMPS_CLK_CTRL_PFM, SMPS_CLK_CTRL_MASK,
+		       &vreg->clk_ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+	rc = pm8921_smps_set_voltage_advanced(vreg, vreg->save_uV,
+			vreg->save_uV, 0);
+
+bail:
+	if (!rc)
+		vreg->is_enabled_pc = false;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static int pm8921_vs_pin_control_enable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+	u8 val = 0;
+
+	mutex_lock(&vreg->pc_lock);
+
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_D1)
+		val |= VS_PIN_CTRL_EN0;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A0)
+		val |= VS_PIN_CTRL_EN1;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A1)
+		val |= VS_PIN_CTRL_EN2;
+	if (vreg->pdata.pin_ctrl & PM8921_VREG_PIN_CTRL_A2)
+		val |= VS_PIN_CTRL_EN3;
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, val,
+			VS_PIN_CTRL_MASK | VS_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled_pc = true;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static int pm8921_vs_pin_control_disable(struct regulator_dev *rdev)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&vreg->pc_lock);
+
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr, 0,
+				      VS_PIN_CTRL_MASK, &vreg->ctrl_reg);
+
+	if (!rc)
+		vreg->is_enabled_pc = false;
+
+	mutex_unlock(&vreg->pc_lock);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+	else
+		pm8921_vreg_show_state(rdev, PM8921_REGULATOR_ACTION_PIN_CTRL);
+
+	return rc;
+}
+
+static const char const *pm8921_print_actions[] = {
+	[PM8921_REGULATOR_ACTION_INIT]		= "initial    ",
+	[PM8921_REGULATOR_ACTION_ENABLE]	= "enable     ",
+	[PM8921_REGULATOR_ACTION_DISABLE]	= "disable    ",
+	[PM8921_REGULATOR_ACTION_VOLTAGE]	= "set voltage",
+	[PM8921_REGULATOR_ACTION_MODE]		= "set mode   ",
+	[PM8921_REGULATOR_ACTION_PIN_CTRL]	= "pin control",
+};
+
+static void pm8921_vreg_show_state(struct regulator_dev *rdev,
+				   enum pm8921_regulator_action action)
+{
+	struct pm8921_vreg *vreg = rdev_get_drvdata(rdev);
+	int uV, pc;
+	unsigned int mode;
+	const char *pc_en0 = "", *pc_en1 = "", *pc_en2 = "", *pc_en3 = "";
+	const char *pc_total = "";
+	const char *action_label = pm8921_print_actions[action];
+	const char *enable_label;
+
+	mutex_lock(&vreg->pc_lock);
+
+	/*
+	 * Do not print unless REQUEST is specified and SSBI writes have taken
+	 * place, or DUPLICATE is specified.
+	 */
+	if (!((pm8921_vreg_debug_mask & PM8921_VREG_DEBUG_DUPLICATE)
+	      || ((pm8921_vreg_debug_mask & PM8921_VREG_DEBUG_REQUEST)
+		  && (vreg->write_count != vreg->prev_write_count)))) {
+		mutex_unlock(&vreg->pc_lock);
+		return;
+	}
+
+	vreg->prev_write_count = vreg->write_count;
+
+	pc = vreg->pdata.pin_ctrl;
+	if (vreg->is_enabled_pc) {
+		if (pc & PM8921_VREG_PIN_CTRL_D1)
+			pc_en0 = " D1";
+		if (pc & PM8921_VREG_PIN_CTRL_A0)
+			pc_en1 = " A0";
+		if (pc & PM8921_VREG_PIN_CTRL_A1)
+			pc_en2 = " A1";
+		if (pc & PM8921_VREG_PIN_CTRL_A2)
+			pc_en3 = " A2";
+		if (pc == PM8921_VREG_PIN_CTRL_NONE)
+			pc_total = " none";
+	} else {
+		pc_total = " none";
+	}
+
+	mutex_unlock(&vreg->pc_lock);
+
+	enable_label = pm8921_vreg_is_enabled(rdev) ? "on " : "off";
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_PLDO:
+		uV = pm8921_pldo_get_voltage(rdev);
+		mode = pm8921_ldo_get_mode(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV, mode=%s, pc=%s%s%s%s%s\n",
+			action_label, vreg->name, enable_label, uV,
+			(mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM"),
+			pc_en0, pc_en1, pc_en2, pc_en3, pc_total);
+		break;
+	case REGULATOR_TYPE_NLDO:
+		uV = pm8921_nldo_get_voltage(rdev);
+		mode = pm8921_ldo_get_mode(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV, mode=%s, pc=%s%s%s%s%s\n",
+			action_label, vreg->name, enable_label, uV,
+			(mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM"),
+			pc_en0, pc_en1, pc_en2, pc_en3, pc_total);
+		break;
+	case REGULATOR_TYPE_NLDO1200:
+		uV = pm8921_nldo1200_get_voltage(rdev);
+		mode = pm8921_nldo1200_get_mode(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV, mode=%s\n",
+			action_label, vreg->name, enable_label, uV,
+			(mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM"));
+		break;
+	case REGULATOR_TYPE_SMPS:
+		uV = pm8921_smps_get_voltage(rdev);
+		mode = pm8921_smps_get_mode(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV, mode=%s, pc=%s%s%s%s%s\n",
+			action_label, vreg->name, enable_label, uV,
+			(mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM"),
+			pc_en0, pc_en1, pc_en2, pc_en3, pc_total);
+		break;
+	case REGULATOR_TYPE_FTSMPS:
+		uV = pm8921_ftsmps_get_voltage(rdev);
+		mode = pm8921_ftsmps_get_mode(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV, mode=%s\n",
+			action_label, vreg->name, enable_label, uV,
+			(mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM"));
+		break;
+	case REGULATOR_TYPE_VS:
+		pr_info("%s %-9s: %s, pc=%s%s%s%s%s\n",
+			action_label, vreg->name, enable_label,
+			pc_en0, pc_en1, pc_en2, pc_en3, pc_total);
+		break;
+	case REGULATOR_TYPE_VS300:
+		pr_info("%s %-9s: %s\n",
+			action_label, vreg->name, enable_label);
+		break;
+	case REGULATOR_TYPE_NCP:
+		uV = pm8921_ncp_get_voltage(rdev);
+		pr_info("%s %-9s: %s, v=%7d uV\n",
+			action_label, vreg->name, enable_label, uV);
+		break;
+	default:
+		break;
+	}
+}
+
+
+/* Real regulator operations. */
+static struct regulator_ops pm8921_pldo_ops = {
+	.enable			= pm8921_ldo_enable,
+	.disable		= pm8921_ldo_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_pldo_set_voltage,
+	.get_voltage		= pm8921_pldo_get_voltage,
+	.list_voltage		= pm8921_pldo_list_voltage,
+	.set_mode		= pm8921_ldo_set_mode,
+	.get_mode		= pm8921_ldo_get_mode,
+	.get_optimum_mode	= pm8921_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8921_nldo_ops = {
+	.enable			= pm8921_ldo_enable,
+	.disable		= pm8921_ldo_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_nldo_set_voltage,
+	.get_voltage		= pm8921_nldo_get_voltage,
+	.list_voltage		= pm8921_nldo_list_voltage,
+	.set_mode		= pm8921_ldo_set_mode,
+	.get_mode		= pm8921_ldo_get_mode,
+	.get_optimum_mode	= pm8921_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8921_nldo1200_ops = {
+	.enable			= pm8921_nldo1200_enable,
+	.disable		= pm8921_nldo1200_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_nldo1200_set_voltage,
+	.get_voltage		= pm8921_nldo1200_get_voltage,
+	.list_voltage		= pm8921_nldo1200_list_voltage,
+	.set_mode		= pm8921_nldo1200_set_mode,
+	.get_mode		= pm8921_nldo1200_get_mode,
+	.get_optimum_mode	= pm8921_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8921_smps_ops = {
+	.enable			= pm8921_smps_enable,
+	.disable		= pm8921_smps_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_smps_set_voltage,
+	.get_voltage		= pm8921_smps_get_voltage,
+	.list_voltage		= pm8921_smps_list_voltage,
+	.set_mode		= pm8921_smps_set_mode,
+	.get_mode		= pm8921_smps_get_mode,
+	.get_optimum_mode	= pm8921_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8921_ftsmps_ops = {
+	.enable			= pm8921_ftsmps_enable,
+	.disable		= pm8921_ftsmps_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_ftsmps_set_voltage,
+	.get_voltage		= pm8921_ftsmps_get_voltage,
+	.list_voltage		= pm8921_ftsmps_list_voltage,
+	.set_mode		= pm8921_ftsmps_set_mode,
+	.get_mode		= pm8921_ftsmps_get_mode,
+	.get_optimum_mode	= pm8921_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8921_vs_ops = {
+	.enable			= pm8921_vs_enable,
+	.disable		= pm8921_vs_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+};
+
+static struct regulator_ops pm8921_vs300_ops = {
+	.enable			= pm8921_vs300_enable,
+	.disable		= pm8921_vs300_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+};
+
+static struct regulator_ops pm8921_ncp_ops = {
+	.enable			= pm8921_ncp_enable,
+	.disable		= pm8921_ncp_disable,
+	.is_enabled		= pm8921_vreg_is_enabled,
+	.set_voltage		= pm8921_ncp_set_voltage,
+	.get_voltage		= pm8921_ncp_get_voltage,
+	.list_voltage		= pm8921_ncp_list_voltage,
+};
+
+/* Pin control regulator operations. */
+static struct regulator_ops pm8921_ldo_pc_ops = {
+	.enable			= pm8921_ldo_pin_control_enable,
+	.disable		= pm8921_ldo_pin_control_disable,
+	.is_enabled		= pm8921_vreg_pin_control_is_enabled,
+};
+
+static struct regulator_ops pm8921_smps_pc_ops = {
+	.enable			= pm8921_smps_pin_control_enable,
+	.disable		= pm8921_smps_pin_control_disable,
+	.is_enabled		= pm8921_vreg_pin_control_is_enabled,
+};
+
+static struct regulator_ops pm8921_vs_pc_ops = {
+	.enable			= pm8921_vs_pin_control_enable,
+	.disable		= pm8921_vs_pin_control_disable,
+	.is_enabled		= pm8921_vreg_pin_control_is_enabled,
+};
+
+#define VREG_DESC(_id, _name, _ops, _n_voltages) \
+	[PM8921_VREG_ID_##_id] = { \
+		.id	= PM8921_VREG_ID_##_id, \
+		.name	= _name, \
+		.n_voltages = _n_voltages, \
+		.ops	= _ops, \
+		.type	= REGULATOR_VOLTAGE, \
+		.owner	= THIS_MODULE, \
+	}
+
+static struct regulator_desc pm8921_vreg_description[] = {
+	VREG_DESC(L1,  "8921_l1",  &pm8921_nldo_ops, NLDO_SET_POINTS),
+	VREG_DESC(L2,  "8921_l2",  &pm8921_nldo_ops, NLDO_SET_POINTS),
+	VREG_DESC(L3,  "8921_l3",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L4,  "8921_l4",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L5,  "8921_l5",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L6,  "8921_l6",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L7,  "8921_l7",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L8,  "8921_l8",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L9,  "8921_l9",  &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L10, "8921_l10", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L11, "8921_l11", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L12, "8921_l12", &pm8921_nldo_ops, NLDO_SET_POINTS),
+	VREG_DESC(L14, "8921_l14", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L15, "8921_l15", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L16, "8921_l16", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L17, "8921_l17", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L18, "8921_l18", &pm8921_nldo_ops, NLDO_SET_POINTS),
+	VREG_DESC(L21, "8921_l21", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L22, "8921_l22", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L23, "8921_l23", &pm8921_pldo_ops, PLDO_SET_POINTS),
+	VREG_DESC(L24, "8921_l24", &pm8921_nldo1200_ops, NLDO1200_SET_POINTS),
+	VREG_DESC(L25, "8921_l25", &pm8921_nldo1200_ops, NLDO1200_SET_POINTS),
+	VREG_DESC(L26, "8921_l26", &pm8921_nldo1200_ops, NLDO1200_SET_POINTS),
+	VREG_DESC(L27, "8921_l27", &pm8921_nldo1200_ops, NLDO1200_SET_POINTS),
+	VREG_DESC(L28, "8921_l28", &pm8921_nldo1200_ops, NLDO1200_SET_POINTS),
+	VREG_DESC(L29, "8921_l29", &pm8921_pldo_ops, PLDO_SET_POINTS),
+
+	VREG_DESC(S1, "8921_s1", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+	VREG_DESC(S2, "8921_s2", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+	VREG_DESC(S3, "8921_s3", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+	VREG_DESC(S4, "8921_s4", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+	VREG_DESC(S5, "8921_s5", &pm8921_ftsmps_ops, FTSMPS_SET_POINTS),
+	VREG_DESC(S6, "8921_s6", &pm8921_ftsmps_ops, FTSMPS_SET_POINTS),
+	VREG_DESC(S7, "8921_s7", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+	VREG_DESC(S8, "8921_s8", &pm8921_smps_ops, SMPS_ADVANCED_SET_POINTS),
+
+	VREG_DESC(LVS1, "8921_lvs1", &pm8921_vs_ops, 0),
+	VREG_DESC(LVS2, "8921_lvs2", &pm8921_vs300_ops, 0),
+	VREG_DESC(LVS3, "8921_lvs3", &pm8921_vs_ops, 0),
+	VREG_DESC(LVS4, "8921_lvs4", &pm8921_vs_ops, 0),
+	VREG_DESC(LVS5, "8921_lvs5", &pm8921_vs_ops, 0),
+	VREG_DESC(LVS6, "8921_lvs6", &pm8921_vs_ops, 0),
+	VREG_DESC(LVS7, "8921_lvs7", &pm8921_vs_ops, 0),
+
+	VREG_DESC(USB_OTG, "8921_usb_otg", &pm8921_vs300_ops, 0),
+	VREG_DESC(HDMI_MVS, "8921_hdmi_mvs", &pm8921_vs300_ops, 0),
+	VREG_DESC(NCP, "8921_ncp", &pm8921_ncp_ops, NCP_SET_POINTS),
+
+	VREG_DESC(L1_PC,  "8921_l1_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L2_PC,  "8921_l2_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L3_PC,  "8921_l3_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L4_PC,  "8921_l4_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L5_PC,  "8921_l5_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L6_PC,  "8921_l6_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L7_PC,  "8921_l7_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L8_PC,  "8921_l8_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L9_PC,  "8921_l9_pc",  &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L10_PC, "8921_l10_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L11_PC, "8921_l11_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L12_PC, "8921_l12_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L14_PC, "8921_l14_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L15_PC, "8921_l15_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L16_PC, "8921_l16_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L17_PC, "8921_l17_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L18_PC, "8921_l18_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L21_PC, "8921_l21_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L22_PC, "8921_l22_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L23_PC, "8921_l23_pc", &pm8921_ldo_pc_ops, 0),
+	VREG_DESC(L29_PC, "8921_l29_pc", &pm8921_ldo_pc_ops, 0),
+
+	VREG_DESC(S1_PC, "8921_s1_pc", &pm8921_smps_pc_ops, 0),
+	VREG_DESC(S2_PC, "8921_s2_pc", &pm8921_smps_pc_ops, 0),
+	VREG_DESC(S3_PC, "8921_s3_pc", &pm8921_smps_pc_ops, 0),
+	VREG_DESC(S4_PC, "8921_s4_pc", &pm8921_smps_pc_ops, 0),
+	VREG_DESC(S7_PC, "8921_s7_pc", &pm8921_smps_pc_ops, 0),
+	VREG_DESC(S8_PC, "8921_s8_pc", &pm8921_smps_pc_ops, 0),
+
+	VREG_DESC(LVS1_PC, "8921_lvs1_pc", &pm8921_vs_pc_ops, 0),
+	VREG_DESC(LVS3_PC, "8921_lvs3_pc", &pm8921_vs_pc_ops, 0),
+	VREG_DESC(LVS4_PC, "8921_lvs4_pc", &pm8921_vs_pc_ops, 0),
+	VREG_DESC(LVS5_PC, "8921_lvs5_pc", &pm8921_vs_pc_ops, 0),
+	VREG_DESC(LVS6_PC, "8921_lvs6_pc", &pm8921_vs_pc_ops, 0),
+	VREG_DESC(LVS7_PC, "8921_lvs7_pc", &pm8921_vs_pc_ops, 0),
+};
+
+static int pm8921_init_ldo(struct pm8921_vreg *vreg, bool is_real)
+{
+	int rc = 0;
+	int i;
+	u8 bank;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Save the current test register state. */
+	for (i = 0; i < LDO_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank);
+		if (rc)
+			goto bail;
+
+		rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr,
+				  &vreg->test_reg[i]);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	if (is_real) {
+		/* Set pull down enable based on platform data. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		      (vreg->pdata.pull_down_enable ? LDO_PULL_DOWN_ENABLE : 0),
+		      LDO_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+
+		vreg->is_enabled = !!_pm8921_vreg_is_enabled(vreg);
+
+		vreg->mode = ((vreg->ctrl_reg & LDO_CTRL_PM_MASK)
+					== LDO_CTRL_PM_LPM ?
+				REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+	}
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8xxx_readb/writeb failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_init_nldo1200(struct pm8921_vreg *vreg)
+{
+	int rc = 0;
+	int i;
+	u8 bank;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Save the current test register state. */
+	for (i = 0; i < LDO_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank);
+		if (rc)
+			goto bail;
+
+		rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr,
+				  &vreg->test_reg[i]);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	vreg->save_uV = _pm8921_nldo1200_get_voltage(vreg);
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+		 (vreg->pdata.pull_down_enable ? NLDO1200_PULL_DOWN_ENABLE : 0)
+		 | REGULATOR_BANK_SEL(1) | REGULATOR_BANK_WRITE,
+		 NLDO1200_PULL_DOWN_ENABLE_MASK | REGULATOR_BANK_MASK,
+		 &vreg->test_reg[1]);
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8xxx_readb/writeb failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_init_smps(struct pm8921_vreg *vreg, bool is_real)
+{
+	int rc = 0;
+	int i;
+	u8 bank;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Save the current test2 register state. */
+	for (i = 0; i < SMPS_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank);
+		if (rc)
+			goto bail;
+
+		rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr,
+				  &vreg->test_reg[i]);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	/* Save the current clock control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->clk_ctrl_addr,
+			  &vreg->clk_ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Save the current sleep control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->sleep_ctrl_addr,
+			  &vreg->sleep_ctrl_reg);
+	if (rc)
+		goto bail;
+
+	vreg->save_uV = _pm8921_smps_get_voltage(vreg);
+
+	if (is_real) {
+		/* Set advanced mode pull down enable based on platform data. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->test_addr,
+			(vreg->pdata.pull_down_enable
+				? SMPS_ADVANCED_PULL_DOWN_ENABLE : 0)
+			| REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE,
+			REGULATOR_BANK_MASK | SMPS_ADVANCED_PULL_DOWN_ENABLE,
+			&vreg->test_reg[6]);
+		if (rc)
+			goto bail;
+
+		vreg->is_enabled = !!_pm8921_vreg_is_enabled(vreg);
+
+		vreg->mode = ((vreg->clk_ctrl_reg & SMPS_CLK_CTRL_MASK)
+					== SMPS_CLK_CTRL_PFM ?
+				REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+	}
+
+	if (!SMPS_IN_ADVANCED_MODE(vreg) && is_real) {
+		/* Set legacy mode pull down enable based on platform data. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+			(vreg->pdata.pull_down_enable
+				? SMPS_LEGACY_PULL_DOWN_ENABLE : 0),
+			SMPS_LEGACY_PULL_DOWN_ENABLE, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8xxx_readb/writeb failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_init_ftsmps(struct pm8921_vreg *vreg)
+{
+	int rc, i;
+	u8 bank;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Store current regulator register values. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->pfm_ctrl_addr,
+			  &vreg->pfm_ctrl_reg);
+	if (rc)
+		goto bail;
+
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->pwr_cnfg_addr,
+			  &vreg->pwr_cnfg_reg);
+	if (rc)
+		goto bail;
+
+	/* Save the current fts_cnfg1 register state (uses 'test' member). */
+	for (i = 0; i < SMPS_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8xxx_writeb(vreg->dev->parent, vreg->test_addr, bank);
+		if (rc)
+			goto bail;
+
+		rc = pm8xxx_readb(vreg->dev->parent, vreg->test_addr,
+				  &vreg->test_reg[i]);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	vreg->save_uV = _pm8921_ftsmps_get_voltage(vreg);
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->pwr_cnfg_addr,
+		(vreg->pdata.pull_down_enable ? FTSMPS_PULL_DOWN_ENABLE : 0),
+		FTSMPS_PULL_DOWN_ENABLE_MASK, &vreg->pwr_cnfg_reg);
+
+bail:
+	if (rc)
+		vreg_err(vreg, "pm8xxx_readb/writeb failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_init_vs(struct pm8921_vreg *vreg, bool is_real)
+{
+	int rc = 0;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc) {
+		vreg_err(vreg, "pm8xxx_readb failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (is_real) {
+		/* Set pull down enable based on platform data. */
+		rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		       (vreg->pdata.pull_down_enable ? VS_PULL_DOWN_ENABLE : 0),
+		       VS_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+
+		if (rc)
+			vreg_err(vreg,
+				"pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+		vreg->is_enabled = !!_pm8921_vreg_is_enabled(vreg);
+	}
+
+	return rc;
+}
+
+static int pm8921_init_vs300(struct pm8921_vreg *vreg)
+{
+	int rc;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc) {
+		vreg_err(vreg, "pm8xxx_readb failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8921_vreg_masked_write(vreg, vreg->ctrl_addr,
+		    (vreg->pdata.pull_down_enable ? VS300_PULL_DOWN_ENABLE : 0),
+		    VS300_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+
+	if (rc)
+		vreg_err(vreg, "pm8921_vreg_masked_write failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int pm8921_init_ncp(struct pm8921_vreg *vreg)
+{
+	int rc;
+
+	/* Save the current control register state. */
+	rc = pm8xxx_readb(vreg->dev->parent, vreg->ctrl_addr, &vreg->ctrl_reg);
+	if (rc) {
+		vreg_err(vreg, "pm8xxx_readb failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int pc_id_to_real_id(int id)
+{
+	int real_id;
+
+	if (id >= PM8921_VREG_ID_L1_PC && id <= PM8921_VREG_ID_L23_PC)
+		real_id = id - PM8921_VREG_ID_L1_PC;
+	else if (id >= PM8921_VREG_ID_L29_PC && id <= PM8921_VREG_ID_S4_PC)
+		real_id = id - PM8921_VREG_ID_L29_PC + PM8921_VREG_ID_L29;
+	else if (id >= PM8921_VREG_ID_S7_PC && id <= PM8921_VREG_ID_LVS1_PC)
+		real_id = id - PM8921_VREG_ID_S7_PC + PM8921_VREG_ID_S7;
+	else
+		real_id = id - PM8921_VREG_ID_LVS3_PC + PM8921_VREG_ID_LVS3;
+
+	return real_id;
+}
+
+static int __devinit pm8921_vreg_probe(struct platform_device *pdev)
+{
+	const struct pm8921_regulator_platform_data *pdata;
+	enum pm8921_vreg_pin_function pin_fn;
+	struct regulator_desc *rdesc;
+	struct pm8921_vreg *vreg;
+	const char *reg_name = "";
+	unsigned pin_ctrl;
+	int rc = 0, id = pdev->id;
+
+	if (pdev == NULL)
+		return -EINVAL;
+
+	if (pdev->id >= 0 && pdev->id < PM8921_VREG_ID_MAX) {
+		pdata = pdev->dev.platform_data;
+		rdesc = &pm8921_vreg_description[pdev->id];
+		if (!IS_REAL_REGULATOR(pdev->id))
+			id = pc_id_to_real_id(pdev->id);
+		vreg = &pm8921_vreg[id];
+		reg_name = pm8921_vreg_description[pdev->id].name;
+		if (!pdata) {
+			pr_err("%s requires platform data\n", reg_name);
+			return -EINVAL;
+		}
+
+		mutex_lock(&vreg->pc_lock);
+
+		if (IS_REAL_REGULATOR(pdev->id)) {
+			/* Do not modify pin control and pin function values. */
+			pin_ctrl = vreg->pdata.pin_ctrl;
+			pin_fn = vreg->pdata.pin_fn;
+			memcpy(&(vreg->pdata), pdata,
+				sizeof(struct pm8921_regulator_platform_data));
+			vreg->pdata.pin_ctrl = pin_ctrl;
+			vreg->pdata.pin_fn = pin_fn;
+			vreg->dev = &pdev->dev;
+			vreg->name = reg_name;
+		} else {
+			/* Pin control regulator */
+			if ((pdata->pin_ctrl &
+			   (PM8921_VREG_PIN_CTRL_D1 | PM8921_VREG_PIN_CTRL_A0
+			   | PM8921_VREG_PIN_CTRL_A1 | PM8921_VREG_PIN_CTRL_A2))
+			      == PM8921_VREG_PIN_CTRL_NONE) {
+				pr_err("%s: no pin control input specified\n",
+					reg_name);
+				mutex_unlock(&vreg->pc_lock);
+				return -EINVAL;
+			}
+			vreg->pdata.pin_ctrl = pdata->pin_ctrl;
+			vreg->pdata.pin_fn = pdata->pin_fn;
+			vreg->dev_pc = &pdev->dev;
+			if (!vreg->dev)
+				vreg->dev = &pdev->dev;
+			if (!vreg->name)
+				vreg->name = reg_name;
+		}
+
+		/* Initialize register values. */
+		switch (vreg->type) {
+		case REGULATOR_TYPE_PLDO:
+		case REGULATOR_TYPE_NLDO:
+			rc = pm8921_init_ldo(vreg, IS_REAL_REGULATOR(pdev->id));
+			break;
+		case REGULATOR_TYPE_NLDO1200:
+			rc = pm8921_init_nldo1200(vreg);
+			break;
+		case REGULATOR_TYPE_SMPS:
+			rc = pm8921_init_smps(vreg,
+						IS_REAL_REGULATOR(pdev->id));
+			break;
+		case REGULATOR_TYPE_FTSMPS:
+			rc = pm8921_init_ftsmps(vreg);
+			break;
+		case REGULATOR_TYPE_VS:
+			rc = pm8921_init_vs(vreg, IS_REAL_REGULATOR(pdev->id));
+			break;
+		case REGULATOR_TYPE_VS300:
+			rc = pm8921_init_vs300(vreg);
+			break;
+		case REGULATOR_TYPE_NCP:
+			rc = pm8921_init_ncp(vreg);
+			break;
+		}
+
+		mutex_unlock(&vreg->pc_lock);
+
+		if (rc)
+			goto bail;
+
+		if (IS_REAL_REGULATOR(pdev->id)) {
+			vreg->rdev = regulator_register(rdesc, &pdev->dev,
+					&(pdata->init_data), vreg);
+			if (IS_ERR(vreg->rdev)) {
+				rc = PTR_ERR(vreg->rdev);
+				vreg->rdev = NULL;
+				pr_err("regulator_register failed: %s, rc=%d\n",
+					reg_name, rc);
+			}
+		} else {
+			vreg->rdev_pc = regulator_register(rdesc, &pdev->dev,
+					&(pdata->init_data), vreg);
+			if (IS_ERR(vreg->rdev_pc)) {
+				rc = PTR_ERR(vreg->rdev_pc);
+				vreg->rdev_pc = NULL;
+				pr_err("regulator_register failed: %s, rc=%d\n",
+					reg_name, rc);
+			}
+		}
+		if ((pm8921_vreg_debug_mask & PM8921_VREG_DEBUG_INIT) && !rc
+		    && vreg->rdev)
+			pm8921_vreg_show_state(vreg->rdev,
+						PM8921_REGULATOR_ACTION_INIT);
+	} else {
+		rc = -ENODEV;
+	}
+
+bail:
+	if (rc)
+		pr_err("error for %s, rc=%d\n", reg_name, rc);
+
+	return rc;
+}
+
+static int __devexit pm8921_vreg_remove(struct platform_device *pdev)
+{
+	if (IS_REAL_REGULATOR(pdev->id))
+		regulator_unregister(pm8921_vreg[pdev->id].rdev);
+	else
+		regulator_unregister(
+			pm8921_vreg[pc_id_to_real_id(pdev->id)].rdev_pc);
+
+	return 0;
+}
+
+static struct platform_driver pm8921_vreg_driver = {
+	.probe	= pm8921_vreg_probe,
+	.remove	= __devexit_p(pm8921_vreg_remove),
+	.driver	= {
+		.name	= PM8921_REGULATOR_DEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pm8921_vreg_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pm8921_vreg); i++) {
+		mutex_init(&pm8921_vreg[i].pc_lock);
+		pm8921_vreg[i].write_count = 0;
+		pm8921_vreg[i].prev_write_count = -1;
+	}
+
+	return platform_driver_register(&pm8921_vreg_driver);
+}
+postcore_initcall(pm8921_vreg_init);
+
+static void __exit pm8921_vreg_exit(void)
+{
+	int i;
+
+	platform_driver_unregister(&pm8921_vreg_driver);
+
+	for (i = 0; i < ARRAY_SIZE(pm8921_vreg); i++)
+		mutex_destroy(&pm8921_vreg[i].pc_lock);
+}
+module_exit(pm8921_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8921 regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8921_REGULATOR_DEV_NAME);
diff --git a/drivers/regulator/pmic8058-regulator.c b/drivers/regulator/pmic8058-regulator.c
new file mode 100644
index 0000000..98ba163
--- /dev/null
+++ b/drivers/regulator/pmic8058-regulator.c
@@ -0,0 +1,1769 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/pmic8058-regulator.h>
+
+/* Regulator types */
+#define REGULATOR_TYPE_LDO		0
+#define REGULATOR_TYPE_SMPS		1
+#define REGULATOR_TYPE_LVS		2
+#define REGULATOR_TYPE_NCP		3
+
+/* Common masks */
+#define REGULATOR_EN_MASK		0x80
+
+#define REGULATOR_BANK_MASK		0xF0
+#define REGULATOR_BANK_SEL(n)		((n) << 4)
+#define REGULATOR_BANK_WRITE		0x80
+
+#define LDO_TEST_BANKS			7
+#define SMPS_TEST_BANKS			8
+#define REGULATOR_TEST_BANKS_MAX	SMPS_TEST_BANKS
+
+/* LDO programming */
+
+/* CTRL register */
+#define LDO_ENABLE_MASK			0x80
+#define LDO_ENABLE			0x80
+#define LDO_PULL_DOWN_ENABLE_MASK	0x40
+#define LDO_PULL_DOWN_ENABLE		0x40
+
+#define LDO_CTRL_PM_MASK		0x20
+#define LDO_CTRL_PM_HPM			0x00
+#define LDO_CTRL_PM_LPM			0x20
+
+#define LDO_CTRL_VPROG_MASK		0x1F
+
+/* TEST register bank 0 */
+#define LDO_TEST_LPM_MASK		0x40
+#define LDO_TEST_LPM_SEL_CTRL		0x00
+#define LDO_TEST_LPM_SEL_TCXO		0x40
+
+/* TEST register bank 2 */
+#define LDO_TEST_VPROG_UPDATE_MASK	0x08
+#define LDO_TEST_RANGE_SEL_MASK		0x04
+#define LDO_TEST_FINE_STEP_MASK		0x02
+#define LDO_TEST_FINE_STEP_SHIFT	1
+
+/* TEST register bank 4 */
+#define LDO_TEST_RANGE_EXT_MASK		0x01
+
+/* TEST register bank 5 */
+#define LDO_TEST_PIN_CTRL_MASK		0x0F
+#define LDO_TEST_PIN_CTRL_EN3		0x08
+#define LDO_TEST_PIN_CTRL_EN2		0x04
+#define LDO_TEST_PIN_CTRL_EN1		0x02
+#define LDO_TEST_PIN_CTRL_EN0		0x01
+
+/* TEST register bank 6 */
+#define LDO_TEST_PIN_CTRL_LPM_MASK	0x0F
+
+/* Allowable voltage ranges */
+#define PLDO_LOW_UV_MIN			750000
+#define PLDO_LOW_UV_MAX			1537500
+#define PLDO_LOW_FINE_STEP_UV		12500
+
+#define PLDO_NORM_UV_MIN		1500000
+#define PLDO_NORM_UV_MAX		3075000
+#define PLDO_NORM_FINE_STEP_UV		25000
+
+#define PLDO_HIGH_UV_MIN		1750000
+#define PLDO_HIGH_UV_MAX		4900000
+#define PLDO_HIGH_FINE_STEP_UV		50000
+
+#define NLDO_UV_MIN			750000
+#define NLDO_UV_MAX			1537500
+#define NLDO_FINE_STEP_UV		12500
+
+/* SMPS masks and values */
+
+/* CTRL register */
+
+/* Legacy mode */
+#define SMPS_LEGACY_ENABLE		0x80
+#define SMPS_LEGACY_PULL_DOWN_ENABLE	0x40
+#define SMPS_LEGACY_VREF_SEL_MASK	0x20
+#define SMPS_LEGACY_VPROG_MASK		0x1F
+
+/* Advanced mode */
+#define SMPS_ADVANCED_BAND_MASK		0xC0
+#define SMPS_ADVANCED_BAND_OFF		0x00
+#define SMPS_ADVANCED_BAND_1		0x40
+#define SMPS_ADVANCED_BAND_2		0x80
+#define SMPS_ADVANCED_BAND_3		0xC0
+#define SMPS_ADVANCED_VPROG_MASK	0x3F
+
+/* Legacy mode voltage ranges */
+#define SMPS_MODE1_UV_MIN		1500000
+#define SMPS_MODE1_UV_MAX		3050000
+#define SMPS_MODE1_UV_STEP		50000
+
+#define SMPS_MODE2_UV_MIN		750000
+#define SMPS_MODE2_UV_MAX		1525000
+#define SMPS_MODE2_UV_STEP		25000
+
+#define SMPS_MODE3_UV_MIN		375000
+#define SMPS_MODE3_UV_MAX		1150000
+#define SMPS_MODE3_UV_STEP		25000
+
+/* Advanced mode voltage ranges */
+#define SMPS_BAND3_UV_MIN		1500000
+#define SMPS_BAND3_UV_MAX		3075000
+#define SMPS_BAND3_UV_STEP		25000
+
+#define SMPS_BAND2_UV_MIN		750000
+#define SMPS_BAND2_UV_MAX		1537500
+#define SMPS_BAND2_UV_STEP		12500
+
+#define SMPS_BAND1_UV_MIN		375000
+#define SMPS_BAND1_UV_MAX		1162500
+#define SMPS_BAND1_UV_STEP		12500
+
+#define SMPS_UV_MIN			SMPS_MODE3_UV_MIN
+#define SMPS_UV_MAX			SMPS_MODE1_UV_MAX
+
+/* Test2 register bank 1 */
+#define SMPS_LEGACY_VLOW_SEL_MASK	0x01
+
+/* Test2 register bank 6 */
+#define SMPS_ADVANCED_PULL_DOWN_ENABLE	0x08
+
+/* Test2 register bank 7 */
+#define SMPS_ADVANCED_MODE_MASK		0x02
+#define SMPS_ADVANCED_MODE		0x02
+#define SMPS_LEGACY_MODE		0x00
+
+#define SMPS_IN_ADVANCED_MODE(vreg) \
+	((vreg->test_reg[7] & SMPS_ADVANCED_MODE_MASK) == SMPS_ADVANCED_MODE)
+
+/* BUCK_SLEEP_CNTRL register */
+#define SMPS_PIN_CTRL_MASK		0xF0
+#define SMPS_PIN_CTRL_A1		0x80
+#define SMPS_PIN_CTRL_A0		0x40
+#define SMPS_PIN_CTRL_D1		0x20
+#define SMPS_PIN_CTRL_D0		0x10
+
+#define SMPS_PIN_CTRL_LPM_MASK		0x0F
+#define SMPS_PIN_CTRL_LPM_A1		0x08
+#define SMPS_PIN_CTRL_LPM_A0		0x04
+#define SMPS_PIN_CTRL_LPM_D1		0x02
+#define SMPS_PIN_CTRL_LPM_D0		0x01
+
+/* BUCK_CLOCK_CNTRL register */
+#define SMPS_CLK_DIVIDE2		0x40
+
+#define SMPS_CLK_CTRL_MASK		0x30
+#define SMPS_CLK_CTRL_FOLLOW_TCXO	0x00
+#define SMPS_CLK_CTRL_PWM		0x10
+#define SMPS_CLK_CTRL_PFM		0x20
+
+/* LVS masks and values */
+
+/* CTRL register */
+#define LVS_ENABLE_MASK			0x80
+#define LVS_ENABLE			0x80
+#define LVS_PULL_DOWN_ENABLE_MASK	0x40
+#define LVS_PULL_DOWN_ENABLE		0x00
+#define LVS_PULL_DOWN_DISABLE		0x40
+
+#define LVS_PIN_CTRL_MASK		0x0F
+#define LVS_PIN_CTRL_EN0		0x08
+#define LVS_PIN_CTRL_EN1		0x04
+#define LVS_PIN_CTRL_EN2		0x02
+#define LVS_PIN_CTRL_EN3		0x01
+
+/* NCP masks and values */
+
+/* CTRL register */
+#define NCP_VPROG_MASK			0x1F
+
+#define NCP_UV_MIN			1500000
+#define NCP_UV_MAX			3050000
+#define NCP_UV_STEP			50000
+
+#define GLOBAL_ENABLE_MAX		(2)
+struct pm8058_enable {
+	u16				addr;
+	u8				reg;
+};
+
+struct pm8058_vreg {
+	struct pm8058_vreg_pdata	*pdata;
+	struct regulator_dev		*rdev;
+	struct pm8058_enable		*global_enable[GLOBAL_ENABLE_MAX];
+	int				hpm_min_load;
+	int				save_uV;
+	unsigned			pc_vote;
+	unsigned			optimum;
+	unsigned			mode_initialized;
+	u16				ctrl_addr;
+	u16				test_addr;
+	u16				clk_ctrl_addr;
+	u16				sleep_ctrl_addr;
+	u8				type;
+	u8				ctrl_reg;
+	u8				test_reg[REGULATOR_TEST_BANKS_MAX];
+	u8				clk_ctrl_reg;
+	u8				sleep_ctrl_reg;
+	u8				is_nmos;
+	u8				global_enable_mask[GLOBAL_ENABLE_MAX];
+};
+
+#define LDO_M2(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \
+	      _en0, _en0_mask, _en1, _en1_mask) \
+	[PM8058_VREG_ID_##_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.test_addr = _test_addr, \
+		.type = REGULATOR_TYPE_LDO, \
+		.hpm_min_load = PM8058_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+		.is_nmos = _is_nmos, \
+		.global_enable = { \
+			[0] = _en0, \
+			[1] = _en1, \
+		}, \
+		.global_enable_mask = { \
+			[0] = _en0_mask, \
+			[1] = _en1_mask, \
+		}, \
+	}
+
+#define LDO(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \
+	    _en0, _en0_mask) \
+		LDO_M2(_id, _ctrl_addr, _test_addr, _is_nmos, _hpm_min_load, \
+		      _en0, _en0_mask, NULL, 0)
+
+#define SMPS(_id, _ctrl_addr, _test_addr, _clk_ctrl_addr, _sleep_ctrl_addr, \
+	     _hpm_min_load, _en0, _en0_mask) \
+	[PM8058_VREG_ID_##_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.test_addr = _test_addr, \
+		.clk_ctrl_addr = _clk_ctrl_addr, \
+		.sleep_ctrl_addr = _sleep_ctrl_addr, \
+		.type = REGULATOR_TYPE_SMPS, \
+		.hpm_min_load = PM8058_VREG_##_hpm_min_load##_HPM_MIN_LOAD, \
+		.global_enable = { \
+			[0] = _en0, \
+			[1] = NULL, \
+		}, \
+		.global_enable_mask = { \
+			[0] = _en0_mask, \
+			[1] = 0, \
+		}, \
+	}
+
+#define LVS(_id, _ctrl_addr, _en0, _en0_mask) \
+	[PM8058_VREG_ID_##_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.type = REGULATOR_TYPE_LVS, \
+		.global_enable = { \
+			[0] = _en0, \
+			[1] = NULL, \
+		}, \
+		.global_enable_mask = { \
+			[0] = _en0_mask, \
+			[1] = 0, \
+		}, \
+	}
+
+#define NCP(_id, _ctrl_addr, _test1) \
+	[PM8058_VREG_ID_##_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.type = REGULATOR_TYPE_NCP, \
+		.test_addr = _test1, \
+		.global_enable = { \
+			[0] = NULL, \
+			[1] = NULL, \
+		}, \
+		.global_enable_mask = { \
+			[0] = 0, \
+			[1] = 0, \
+		}, \
+	}
+
+#define MASTER_ENABLE_COUNT	6
+
+#define EN_MSM			0
+#define EN_PH			1
+#define EN_RF			2
+#define EN_GRP_5_4		3
+#define EN_GRP_3_2		4
+#define EN_GRP_1_0		5
+
+/* Master regulator control registers */
+static struct pm8058_enable m_en[MASTER_ENABLE_COUNT] = {
+	[EN_MSM] = {
+		.addr = 0x018, /* VREG_EN_MSM */
+	},
+	[EN_PH] = {
+		.addr = 0x019, /* VREG_EN_PH */
+	},
+	[EN_RF] = {
+		.addr = 0x01A, /* VREG_EN_RF */
+	},
+	[EN_GRP_5_4] = {
+		.addr = 0x1C8, /* VREG_EN_MSM_GRP_5-4 */
+	},
+	[EN_GRP_3_2] = {
+		.addr = 0x1C9, /* VREG_EN_MSM_GRP_3-2 */
+	},
+	[EN_GRP_1_0] = {
+		.addr = 0x1CA, /* VREG_EN_MSM_GRP_1-0 */
+	},
+};
+
+
+static struct pm8058_vreg pm8058_vreg[] = {
+	/*  id   ctrl   test  n/p hpm_min  m_en		      m_en_mask */
+	LDO(L0,  0x009, 0x065, 1, LDO_150, &m_en[EN_GRP_5_4], BIT(3)),
+	LDO(L1,  0x00A, 0x066, 1, LDO_300, &m_en[EN_GRP_5_4], BIT(6) | BIT(2)),
+	LDO(L2,  0x00B, 0x067, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(2)),
+	LDO(L3,  0x00C, 0x068, 0, LDO_150, &m_en[EN_GRP_1_0], BIT(1)),
+	LDO(L4,  0x00D, 0x069, 0, LDO_50,  &m_en[EN_MSM],     0),
+	LDO(L5,  0x00E, 0x06A, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(7)),
+	LDO(L6,  0x00F, 0x06B, 0, LDO_50,  &m_en[EN_GRP_1_0], BIT(2)),
+	LDO(L7,  0x010, 0x06C, 0, LDO_50,  &m_en[EN_GRP_3_2], BIT(3)),
+	LDO(L8,  0x011, 0x06D, 0, LDO_300, &m_en[EN_PH],      BIT(7)),
+	LDO(L9,  0x012, 0x06E, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(3)),
+	LDO(L10, 0x013, 0x06F, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(4)),
+	LDO(L11, 0x014, 0x070, 0, LDO_150, &m_en[EN_PH],      BIT(4)),
+	LDO(L12, 0x015, 0x071, 0, LDO_150, &m_en[EN_PH],      BIT(3)),
+	LDO(L13, 0x016, 0x072, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(1)),
+	LDO(L14, 0x017, 0x073, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(5)),
+	LDO(L15, 0x089, 0x0E5, 0, LDO_300, &m_en[EN_GRP_1_0], BIT(4)),
+	LDO(L16, 0x08A, 0x0E6, 0, LDO_300, &m_en[EN_GRP_3_2], BIT(0)),
+	LDO(L17, 0x08B, 0x0E7, 0, LDO_150, &m_en[EN_RF],      BIT(7)),
+	LDO(L18, 0x11D, 0x125, 0, LDO_150, &m_en[EN_RF],      BIT(6)),
+	LDO(L19, 0x11E, 0x126, 0, LDO_150, &m_en[EN_RF],      BIT(5)),
+	LDO(L20, 0x11F, 0x127, 0, LDO_150, &m_en[EN_RF],      BIT(4)),
+	LDO_M2(L21, 0x120, 0x128, 1, LDO_150, &m_en[EN_GRP_5_4], BIT(1),
+		&m_en[EN_GRP_1_0], BIT(6)),
+	LDO(L22, 0x121, 0x129, 1, LDO_300, &m_en[EN_GRP_3_2], BIT(7)),
+	LDO(L23, 0x122, 0x12A, 1, LDO_300, &m_en[EN_GRP_5_4], BIT(0)),
+	LDO(L24, 0x123, 0x12B, 1, LDO_150, &m_en[EN_RF],      BIT(3)),
+	LDO(L25, 0x124, 0x12C, 1, LDO_150, &m_en[EN_RF],      BIT(2)),
+
+	/*   id  ctrl   test2  clk    sleep hpm_min  m_en	    m_en_mask */
+	SMPS(S0, 0x004, 0x084, 0x1D1, 0x1D8, SMPS, &m_en[EN_MSM],    BIT(7)),
+	SMPS(S1, 0x005, 0x085, 0x1D2, 0x1DB, SMPS, &m_en[EN_MSM],    BIT(6)),
+	SMPS(S2, 0x110, 0x119, 0x1D3, 0x1DE, SMPS, &m_en[EN_GRP_5_4], BIT(5)),
+	SMPS(S3, 0x111, 0x11A, 0x1D4, 0x1E1, SMPS, &m_en[EN_GRP_5_4],
+		BIT(7) | BIT(4)),
+	SMPS(S4, 0x112, 0x11B, 0x1D5, 0x1E4, SMPS, &m_en[EN_GRP_3_2], BIT(5)),
+
+	/*  id	  ctrl   m_en		    m_en_mask */
+	LVS(LVS0, 0x12D, &m_en[EN_RF],      BIT(1)),
+	LVS(LVS1, 0x12F, &m_en[EN_GRP_1_0], BIT(0)),
+
+	/*  id   ctrl   test1 */
+	NCP(NCP, 0x090, 0x0EC),
+};
+
+static int pm8058_smps_set_voltage_advanced(struct pm8058_vreg *vreg,
+					struct pm8058_chip *chip, int uV,
+					int force_on);
+static int pm8058_smps_set_voltage_legacy(struct pm8058_vreg *vreg,
+					struct pm8058_chip *chip, int uV);
+static int _pm8058_vreg_is_enabled(struct pm8058_vreg *vreg);
+
+static unsigned int pm8058_vreg_get_mode(struct regulator_dev *dev);
+
+static void print_write_error(struct pm8058_vreg *vreg, int rc,
+				const char *func);
+
+static int pm8058_vreg_write(struct pm8058_chip *chip,
+		u16 addr, u8 val, u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save)
+		rc = pm8058_write(chip, addr, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_write failed, rc=%d\n", __func__, rc);
+	else
+		*reg_save = reg;
+	return rc;
+}
+
+static int pm8058_vreg_is_global_enabled(struct pm8058_vreg *vreg)
+{
+	int ret = 0, i;
+
+	for (i = 0;
+	     (i < GLOBAL_ENABLE_MAX) && !ret && vreg->global_enable[i]; i++)
+		ret = vreg->global_enable[i]->reg &
+			vreg->global_enable_mask[i];
+
+	return ret;
+}
+
+
+static int pm8058_vreg_set_global_enable(struct pm8058_vreg *vreg,
+					 struct pm8058_chip *chip, int on)
+{
+	int rc = 0, i;
+
+	for (i = 0;
+	     (i < GLOBAL_ENABLE_MAX) && !rc && vreg->global_enable[i]; i++)
+		rc = pm8058_vreg_write(chip, vreg->global_enable[i]->addr,
+					(on ? vreg->global_enable_mask[i] : 0),
+					vreg->global_enable_mask[i],
+					&vreg->global_enable[i]->reg);
+
+	return rc;
+}
+
+static int pm8058_vreg_using_pin_ctrl(struct pm8058_vreg *vreg)
+{
+	int ret = 0;
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_LDO:
+		ret = ((vreg->test_reg[5] & LDO_TEST_PIN_CTRL_MASK) << 4)
+			| (vreg->test_reg[6] & LDO_TEST_PIN_CTRL_LPM_MASK);
+		break;
+	case REGULATOR_TYPE_SMPS:
+		ret = vreg->sleep_ctrl_reg
+			& (SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK);
+		break;
+	case REGULATOR_TYPE_LVS:
+		ret = vreg->ctrl_reg & LVS_PIN_CTRL_MASK;
+		break;
+	}
+
+	return ret;
+}
+
+static int pm8058_vreg_set_pin_ctrl(struct pm8058_vreg *vreg,
+		struct pm8058_chip *chip, int on)
+{
+	int rc = 0, bank;
+	u8 val = 0, mask;
+	unsigned pc = vreg->pdata->pin_ctrl;
+	unsigned pf = vreg->pdata->pin_fn;
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_LDO:
+		if (on) {
+			if (pc & PM8058_VREG_PIN_CTRL_D0)
+				val |= LDO_TEST_PIN_CTRL_EN0;
+			if (pc & PM8058_VREG_PIN_CTRL_D1)
+				val |= LDO_TEST_PIN_CTRL_EN1;
+			if (pc & PM8058_VREG_PIN_CTRL_A0)
+				val |= LDO_TEST_PIN_CTRL_EN2;
+			if (pc & PM8058_VREG_PIN_CTRL_A1)
+				val |= LDO_TEST_PIN_CTRL_EN3;
+
+			bank = (pf == PM8058_VREG_PIN_FN_ENABLE ? 5 : 6);
+			rc = pm8058_vreg_write(chip, vreg->test_addr,
+				val | REGULATOR_BANK_SEL(bank)
+				  | REGULATOR_BANK_WRITE,
+				LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+				&vreg->test_reg[bank]);
+			if (rc)
+				goto bail;
+
+			val = LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+				| REGULATOR_BANK_SEL(0);
+			mask = LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK;
+			rc = pm8058_vreg_write(chip, vreg->test_addr, val, mask,
+						&vreg->test_reg[0]);
+			if (rc)
+				goto bail;
+
+			if (pf == PM8058_VREG_PIN_FN_ENABLE) {
+				/* Pin control ON/OFF */
+				rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+					LDO_CTRL_PM_HPM,
+					LDO_ENABLE_MASK | LDO_CTRL_PM_MASK,
+					&vreg->ctrl_reg);
+				if (rc)
+					goto bail;
+				rc = pm8058_vreg_set_global_enable(vreg, chip,
+								   0);
+				if (rc)
+					goto bail;
+			} else {
+				/* Pin control LPM/HPM */
+				rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+					LDO_ENABLE | LDO_CTRL_PM_LPM,
+					LDO_ENABLE_MASK | LDO_CTRL_PM_MASK,
+					&vreg->ctrl_reg);
+				if (rc)
+					goto bail;
+			}
+		} else {
+			/* Pin control off */
+			rc = pm8058_vreg_write(chip, vreg->test_addr,
+				REGULATOR_BANK_SEL(5) | REGULATOR_BANK_WRITE,
+				LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+				&vreg->test_reg[5]);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_vreg_write(chip, vreg->test_addr,
+				REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE,
+				LDO_TEST_PIN_CTRL_MASK | REGULATOR_BANK_MASK,
+				&vreg->test_reg[6]);
+			if (rc)
+				goto bail;
+		}
+		break;
+
+	case REGULATOR_TYPE_SMPS:
+		if (on) {
+			if (pf == PM8058_VREG_PIN_FN_ENABLE) {
+				/* Pin control ON/OFF */
+				if (pc & PM8058_VREG_PIN_CTRL_D0)
+					val |= SMPS_PIN_CTRL_D0;
+				if (pc & PM8058_VREG_PIN_CTRL_D1)
+					val |= SMPS_PIN_CTRL_D1;
+				if (pc & PM8058_VREG_PIN_CTRL_A0)
+					val |= SMPS_PIN_CTRL_A0;
+				if (pc & PM8058_VREG_PIN_CTRL_A1)
+					val |= SMPS_PIN_CTRL_A1;
+			} else {
+				/* Pin control LPM/HPM */
+				if (pc & PM8058_VREG_PIN_CTRL_D0)
+					val |= SMPS_PIN_CTRL_LPM_D0;
+				if (pc & PM8058_VREG_PIN_CTRL_D1)
+					val |= SMPS_PIN_CTRL_LPM_D1;
+				if (pc & PM8058_VREG_PIN_CTRL_A0)
+					val |= SMPS_PIN_CTRL_LPM_A0;
+				if (pc & PM8058_VREG_PIN_CTRL_A1)
+					val |= SMPS_PIN_CTRL_LPM_A1;
+			}
+			rc = pm8058_vreg_set_global_enable(vreg, chip, 0);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_smps_set_voltage_legacy(vreg, chip,
+							vreg->save_uV);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_vreg_write(chip, vreg->sleep_ctrl_addr, val,
+				SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK,
+				&vreg->sleep_ctrl_reg);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+				(pf == PM8058_VREG_PIN_FN_ENABLE
+				       ? 0 : SMPS_LEGACY_ENABLE),
+				SMPS_LEGACY_ENABLE, &vreg->ctrl_reg);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_vreg_write(chip, vreg->clk_ctrl_addr,
+				(pf == PM8058_VREG_PIN_FN_ENABLE
+				       ? SMPS_CLK_CTRL_PWM : SMPS_CLK_CTRL_PFM),
+				SMPS_CLK_CTRL_MASK, &vreg->clk_ctrl_reg);
+			if (rc)
+				goto bail;
+		} else {
+			/* Pin control off */
+			if (!SMPS_IN_ADVANCED_MODE(vreg)) {
+				if (_pm8058_vreg_is_enabled(vreg))
+					val = SMPS_LEGACY_ENABLE;
+				rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+					val, SMPS_LEGACY_ENABLE,
+					&vreg->ctrl_reg);
+				if (rc)
+					goto bail;
+			}
+
+			rc = pm8058_vreg_write(chip, vreg->sleep_ctrl_addr, 0,
+				SMPS_PIN_CTRL_MASK | SMPS_PIN_CTRL_LPM_MASK,
+				&vreg->sleep_ctrl_reg);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_smps_set_voltage_advanced(vreg, chip,
+							 vreg->save_uV, 0);
+			if (rc)
+				goto bail;
+		}
+		break;
+
+	case REGULATOR_TYPE_LVS:
+		if (on) {
+			if (pc & PM8058_VREG_PIN_CTRL_D0)
+				val |= LVS_PIN_CTRL_EN0;
+			if (pc & PM8058_VREG_PIN_CTRL_D1)
+				val |= LVS_PIN_CTRL_EN1;
+			if (pc & PM8058_VREG_PIN_CTRL_A0)
+				val |= LVS_PIN_CTRL_EN2;
+			if (pc & PM8058_VREG_PIN_CTRL_A1)
+				val |= LVS_PIN_CTRL_EN3;
+
+			rc = pm8058_vreg_write(chip, vreg->ctrl_addr, val,
+					LVS_PIN_CTRL_MASK | LVS_ENABLE_MASK,
+					&vreg->ctrl_reg);
+			if (rc)
+				goto bail;
+
+			rc = pm8058_vreg_set_global_enable(vreg, chip, 0);
+			if (rc)
+				goto bail;
+		} else {
+			/* Pin control off */
+			if (_pm8058_vreg_is_enabled(vreg))
+				val = LVS_ENABLE;
+
+			rc = pm8058_vreg_write(chip, vreg->ctrl_addr, val,
+					LVS_ENABLE_MASK | LVS_PIN_CTRL_MASK,
+					&vreg->ctrl_reg);
+			if (rc)
+				goto bail;
+
+		}
+		break;
+	}
+
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_vreg_enable(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int mode;
+	int rc = 0;
+
+	mode = pm8058_vreg_get_mode(dev);
+
+	if (mode == REGULATOR_MODE_IDLE) {
+		/* Turn on pin control. */
+		rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 1);
+		if (rc)
+			goto bail;
+		return rc;
+	}
+	if (vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg))
+		rc = pm8058_smps_set_voltage_advanced(vreg, chip,
+							vreg->save_uV, 1);
+	else
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr, REGULATOR_EN_MASK,
+			REGULATOR_EN_MASK, &vreg->ctrl_reg);
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int _pm8058_vreg_is_enabled(struct pm8058_vreg *vreg)
+{
+	/*
+	 * All regulator types except advanced mode SMPS have enable bit in
+	 * bit 7 of the control register.  Global enable  and pin control also
+	 * do not work for advanced mode SMPS.
+	 */
+	if (!(vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg))
+		&& ((vreg->ctrl_reg & REGULATOR_EN_MASK)
+			|| pm8058_vreg_is_global_enabled(vreg)
+			|| pm8058_vreg_using_pin_ctrl(vreg)))
+		return 1;
+	else if (vreg->type == REGULATOR_TYPE_SMPS
+		&& SMPS_IN_ADVANCED_MODE(vreg)
+		&& ((vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK)
+			!= SMPS_ADVANCED_BAND_OFF))
+		return 1;
+
+	return 0;
+}
+
+static int pm8058_vreg_is_enabled(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+
+	return _pm8058_vreg_is_enabled(vreg);
+}
+
+static int pm8058_vreg_disable(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int rc = 0;
+
+	/* Disable in global control register. */
+	rc = pm8058_vreg_set_global_enable(vreg, chip, 0);
+	if (rc)
+		goto bail;
+
+	/* Turn off pin control. */
+	rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+	if (rc)
+		goto bail;
+
+	/* Disable in local control register. */
+	if (vreg->type == REGULATOR_TYPE_SMPS && SMPS_IN_ADVANCED_MODE(vreg))
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+			SMPS_ADVANCED_BAND_OFF, SMPS_ADVANCED_BAND_MASK,
+			&vreg->ctrl_reg);
+	else
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr, 0,
+			REGULATOR_EN_MASK, &vreg->ctrl_reg);
+
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_pldo_set_voltage(struct pm8058_chip *chip,
+		struct pm8058_vreg *vreg, int uV)
+{
+	int vmin, rc = 0;
+	unsigned vprog, fine_step;
+	u8 range_ext, range_sel, fine_step_reg;
+
+	if (uV < PLDO_LOW_UV_MIN || uV > PLDO_HIGH_UV_MAX)
+		return -EINVAL;
+
+	if (uV < PLDO_LOW_UV_MAX + PLDO_LOW_FINE_STEP_UV) {
+		vmin = PLDO_LOW_UV_MIN;
+		fine_step = PLDO_LOW_FINE_STEP_UV;
+		range_ext = 0;
+		range_sel = LDO_TEST_RANGE_SEL_MASK;
+	} else if (uV < PLDO_NORM_UV_MAX + PLDO_NORM_FINE_STEP_UV) {
+		vmin = PLDO_NORM_UV_MIN;
+		fine_step = PLDO_NORM_FINE_STEP_UV;
+		range_ext = 0;
+		range_sel = 0;
+	} else {
+		vmin = PLDO_HIGH_UV_MIN;
+		fine_step = PLDO_HIGH_FINE_STEP_UV;
+		range_ext = LDO_TEST_RANGE_EXT_MASK;
+		range_sel = 0;
+	}
+
+	vprog = (uV - vmin) / fine_step;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	/*
+	 * Disable program voltage update if range extension, range select,
+	 * or fine step have changed and the regulator is enabled.
+	 */
+	if (_pm8058_vreg_is_enabled(vreg) &&
+		(((range_ext ^ vreg->test_reg[4]) & LDO_TEST_RANGE_EXT_MASK)
+		|| ((range_sel ^ vreg->test_reg[2]) & LDO_TEST_RANGE_SEL_MASK)
+		|| ((fine_step_reg ^ vreg->test_reg[2])
+			& LDO_TEST_FINE_STEP_MASK))) {
+		rc = pm8058_vreg_write(chip, vreg->test_addr,
+			REGULATOR_BANK_SEL(2) | REGULATOR_BANK_WRITE,
+			REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK,
+			&vreg->test_reg[2]);
+		if (rc)
+			goto bail;
+	}
+
+	/* Write new voltage. */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr, vprog,
+				LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Write range extension. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr,
+			range_ext | REGULATOR_BANK_SEL(4)
+			 | REGULATOR_BANK_WRITE,
+			LDO_TEST_RANGE_EXT_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[4]);
+	if (rc)
+		goto bail;
+
+	/* Write fine step, range select and program voltage update. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr,
+			fine_step_reg | range_sel | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | LDO_TEST_RANGE_SEL_MASK
+			 | REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK,
+			&vreg->test_reg[2]);
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_nldo_set_voltage(struct pm8058_chip *chip,
+		struct pm8058_vreg *vreg, int uV)
+{
+	unsigned vprog, fine_step_reg;
+	int rc;
+
+	if (uV < NLDO_UV_MIN || uV > NLDO_UV_MAX)
+		return -EINVAL;
+
+	vprog = (uV - NLDO_UV_MIN) / NLDO_FINE_STEP_UV;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	/* Write new voltage. */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr, vprog,
+				LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Write fine step. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr,
+			fine_step_reg | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | REGULATOR_BANK_MASK
+			 | LDO_TEST_VPROG_UPDATE_MASK,
+		       &vreg->test_reg[2]);
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_ldo_set_voltage(struct regulator_dev *dev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+
+	if (vreg->is_nmos)
+		return pm8058_nldo_set_voltage(chip, vreg, min_uV);
+	else
+		return pm8058_pldo_set_voltage(chip, vreg, min_uV);
+}
+
+static int pm8058_pldo_get_voltage(struct pm8058_vreg *vreg)
+{
+	int vmin, fine_step;
+	u8 range_ext, range_sel, vprog, fine_step_reg;
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	range_sel = vreg->test_reg[2] & LDO_TEST_RANGE_SEL_MASK;
+	range_ext = vreg->test_reg[4] & LDO_TEST_RANGE_EXT_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	if (range_sel) {
+		/* low range mode */
+		fine_step = PLDO_LOW_FINE_STEP_UV;
+		vmin = PLDO_LOW_UV_MIN;
+	} else if (!range_ext) {
+		/* normal mode */
+		fine_step = PLDO_NORM_FINE_STEP_UV;
+		vmin = PLDO_NORM_UV_MIN;
+	} else {
+		/* high range mode */
+		fine_step = PLDO_HIGH_FINE_STEP_UV;
+		vmin = PLDO_HIGH_UV_MIN;
+	}
+
+	return fine_step * vprog + vmin;
+}
+
+static int pm8058_nldo_get_voltage(struct pm8058_vreg *vreg)
+{
+	u8 vprog, fine_step_reg;
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	return NLDO_FINE_STEP_UV * vprog + NLDO_UV_MIN;
+}
+
+static int pm8058_ldo_get_voltage(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+
+	if (vreg->is_nmos)
+		return pm8058_nldo_get_voltage(vreg);
+	else
+		return pm8058_pldo_get_voltage(vreg);
+}
+
+static int pm8058_smps_get_voltage_advanced(struct pm8058_vreg *vreg)
+{
+	u8 vprog, band;
+	int uV = 0;
+
+	vprog = vreg->ctrl_reg & SMPS_ADVANCED_VPROG_MASK;
+	band = vreg->ctrl_reg & SMPS_ADVANCED_BAND_MASK;
+
+	if (band == SMPS_ADVANCED_BAND_1)
+		uV = vprog * SMPS_BAND1_UV_STEP + SMPS_BAND1_UV_MIN;
+	else if (band == SMPS_ADVANCED_BAND_2)
+		uV = vprog * SMPS_BAND2_UV_STEP + SMPS_BAND2_UV_MIN;
+	else if (band == SMPS_ADVANCED_BAND_3)
+		uV = vprog * SMPS_BAND3_UV_STEP + SMPS_BAND3_UV_MIN;
+	else
+		uV = vreg->save_uV;
+
+	return uV;
+}
+
+static int pm8058_smps_get_voltage_legacy(struct pm8058_vreg *vreg)
+{
+	u8 vlow, vref, vprog;
+	int uV;
+
+	vlow = vreg->test_reg[1] & SMPS_LEGACY_VLOW_SEL_MASK;
+	vref = vreg->ctrl_reg & SMPS_LEGACY_VREF_SEL_MASK;
+	vprog = vreg->ctrl_reg & SMPS_LEGACY_VPROG_MASK;
+
+	if (vlow && vref) {
+		/* mode 3 */
+		uV = vprog * SMPS_MODE3_UV_STEP + SMPS_MODE3_UV_MIN;
+	} else if (vref) {
+		/* mode 2 */
+		uV = vprog * SMPS_MODE2_UV_STEP + SMPS_MODE2_UV_MIN;
+	} else {
+		/* mode 1 */
+		uV = vprog * SMPS_MODE1_UV_STEP + SMPS_MODE1_UV_MIN;
+	}
+
+	return uV;
+}
+
+static int _pm8058_smps_get_voltage(struct pm8058_vreg *vreg)
+{
+	if (SMPS_IN_ADVANCED_MODE(vreg))
+		return pm8058_smps_get_voltage_advanced(vreg);
+
+	return pm8058_smps_get_voltage_legacy(vreg);
+}
+
+static int pm8058_smps_get_voltage(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+
+	return _pm8058_smps_get_voltage(vreg);
+}
+
+static int pm8058_smps_set_voltage_advanced(struct pm8058_vreg *vreg,
+					struct pm8058_chip *chip, int uV,
+					int force_on)
+{
+	u8 vprog, band;
+	int rc, new_uV;
+
+	if (uV < SMPS_BAND1_UV_MAX + SMPS_BAND1_UV_STEP) {
+		vprog = ((uV - SMPS_BAND1_UV_MIN) / SMPS_BAND1_UV_STEP);
+		band = SMPS_ADVANCED_BAND_1;
+		new_uV = SMPS_BAND1_UV_MIN + vprog * SMPS_BAND1_UV_STEP;
+	} else if (uV < SMPS_BAND2_UV_MAX + SMPS_BAND2_UV_STEP) {
+		vprog = ((uV - SMPS_BAND2_UV_MIN) / SMPS_BAND2_UV_STEP);
+		band = SMPS_ADVANCED_BAND_2;
+		new_uV = SMPS_BAND2_UV_MIN + vprog * SMPS_BAND2_UV_STEP;
+	} else {
+		vprog = ((uV - SMPS_BAND3_UV_MIN) / SMPS_BAND3_UV_STEP);
+		band = SMPS_ADVANCED_BAND_3;
+		new_uV = SMPS_BAND3_UV_MIN + vprog * SMPS_BAND3_UV_STEP;
+	}
+
+	/* Do not set band if regulator currently disabled. */
+	if (!_pm8058_vreg_is_enabled(vreg) && !force_on)
+		band = SMPS_ADVANCED_BAND_OFF;
+
+	/* Set advanced mode bit to 1. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr, SMPS_ADVANCED_MODE
+		| REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7),
+		SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[7]);
+	if (rc)
+		goto bail;
+
+	/* Set voltage and voltage band. */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr, band | vprog,
+			SMPS_ADVANCED_BAND_MASK | SMPS_ADVANCED_VPROG_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	vreg->save_uV = new_uV;
+
+bail:
+	return rc;
+}
+
+static int pm8058_smps_set_voltage_legacy(struct pm8058_vreg *vreg,
+					struct pm8058_chip *chip, int uV)
+{
+	u8 vlow, vref, vprog, pd, en;
+	int rc;
+
+	if (uV < SMPS_MODE3_UV_MAX + SMPS_MODE3_UV_STEP) {
+		vprog = ((uV - SMPS_MODE3_UV_MIN) / SMPS_MODE3_UV_STEP);
+		vref = SMPS_LEGACY_VREF_SEL_MASK;
+		vlow = SMPS_LEGACY_VLOW_SEL_MASK;
+	} else if (uV < SMPS_MODE2_UV_MAX + SMPS_MODE2_UV_STEP) {
+		vprog = ((uV - SMPS_MODE2_UV_MIN) / SMPS_MODE2_UV_STEP);
+		vref = SMPS_LEGACY_VREF_SEL_MASK;
+		vlow = 0;
+	} else {
+		vprog = ((uV - SMPS_MODE1_UV_MIN) / SMPS_MODE1_UV_STEP);
+		vref = 0;
+		vlow = 0;
+	}
+
+	/* set vlow bit for ultra low voltage mode */
+	rc = pm8058_vreg_write(chip, vreg->test_addr,
+		vlow | REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(1),
+		REGULATOR_BANK_MASK | SMPS_LEGACY_VLOW_SEL_MASK,
+		&vreg->test_reg[1]);
+	if (rc)
+		goto bail;
+
+	/* Set advanced mode bit to 0. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr, SMPS_LEGACY_MODE
+		| REGULATOR_BANK_WRITE | REGULATOR_BANK_SEL(7),
+		SMPS_ADVANCED_MODE_MASK | REGULATOR_BANK_MASK,
+		&vreg->test_reg[7]);
+	if (rc)
+		goto bail;
+
+	en = (_pm8058_vreg_is_enabled(vreg) ? SMPS_LEGACY_ENABLE : 0);
+	pd = (vreg->pdata->pull_down_enable ? SMPS_LEGACY_PULL_DOWN_ENABLE : 0);
+
+	/* Set voltage (and the rest of the control register). */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr, en | pd | vref | vprog,
+		SMPS_LEGACY_ENABLE | SMPS_LEGACY_PULL_DOWN_ENABLE
+		| SMPS_LEGACY_VREF_SEL_MASK | SMPS_LEGACY_VPROG_MASK,
+		&vreg->ctrl_reg);
+
+	vreg->save_uV = pm8058_smps_get_voltage_legacy(vreg);
+
+bail:
+	return rc;
+}
+
+static int pm8058_smps_set_voltage(struct regulator_dev *dev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int rc = 0;
+
+	if (min_uV < SMPS_UV_MIN || min_uV > SMPS_UV_MAX)
+		return -EINVAL;
+
+	if (SMPS_IN_ADVANCED_MODE(vreg))
+		rc = pm8058_smps_set_voltage_advanced(vreg, chip, min_uV, 0);
+	else
+		rc = pm8058_smps_set_voltage_legacy(vreg, chip, min_uV);
+
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_ncp_set_voltage(struct regulator_dev *dev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	int rc;
+	u8 val;
+
+	if (min_uV < NCP_UV_MIN || min_uV > NCP_UV_MAX)
+		return -EINVAL;
+
+	val = (min_uV - NCP_UV_MIN) / NCP_UV_STEP;
+
+	/* voltage setting */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr, val, NCP_VPROG_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_ncp_get_voltage(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	u8 vprog = vreg->ctrl_reg & NCP_VPROG_MASK;
+	return NCP_UV_MIN + vprog * NCP_UV_STEP;
+}
+
+static int pm8058_ldo_set_mode(struct pm8058_vreg *vreg,
+		struct pm8058_chip *chip, unsigned int mode)
+{
+	int rc = 0;
+	u8 mask, val;
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* HPM */
+		val = (_pm8058_vreg_is_enabled(vreg) ? LDO_ENABLE : 0)
+			| LDO_CTRL_PM_HPM;
+		mask = LDO_ENABLE_MASK | LDO_CTRL_PM_MASK;
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr, val, mask,
+					&vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		if (pm8058_vreg_using_pin_ctrl(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+		if (rc)
+			goto bail;
+		break;
+
+	case REGULATOR_MODE_STANDBY:
+		/* LPM */
+		val = (_pm8058_vreg_is_enabled(vreg) ? LDO_ENABLE : 0)
+			| LDO_CTRL_PM_LPM;
+		mask = LDO_ENABLE_MASK | LDO_CTRL_PM_MASK;
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr, val, mask,
+					&vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+
+		val = LDO_TEST_LPM_SEL_CTRL | REGULATOR_BANK_WRITE
+			| REGULATOR_BANK_SEL(0);
+		mask = LDO_TEST_LPM_MASK | REGULATOR_BANK_MASK;
+		rc = pm8058_vreg_write(chip, vreg->test_addr, val, mask,
+					&vreg->test_reg[0]);
+		if (rc)
+			goto bail;
+
+		if (pm8058_vreg_using_pin_ctrl(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+		if (rc)
+			goto bail;
+		break;
+
+	case REGULATOR_MODE_IDLE:
+		/* Pin Control */
+		if (_pm8058_vreg_is_enabled(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 1);
+		if (rc)
+			goto bail;
+		break;
+
+	default:
+		pr_err("%s: invalid mode: %u\n", __func__, mode);
+		return -EINVAL;
+	}
+
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_smps_set_mode(struct pm8058_vreg *vreg,
+		struct pm8058_chip *chip, unsigned int mode)
+{
+	int rc = 0;
+	u8 mask, val;
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* HPM */
+		val = SMPS_CLK_CTRL_PWM;
+		mask = SMPS_CLK_CTRL_MASK;
+		rc = pm8058_vreg_write(chip, vreg->clk_ctrl_addr, val, mask,
+					&vreg->clk_ctrl_reg);
+		if (rc)
+			goto bail;
+
+		if (pm8058_vreg_using_pin_ctrl(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+		if (rc)
+			goto bail;
+		break;
+
+	case REGULATOR_MODE_STANDBY:
+		/* LPM */
+		val = SMPS_CLK_CTRL_PFM;
+		mask = SMPS_CLK_CTRL_MASK;
+		rc = pm8058_vreg_write(chip, vreg->clk_ctrl_addr, val, mask,
+					&vreg->clk_ctrl_reg);
+		if (rc)
+			goto bail;
+
+		if (pm8058_vreg_using_pin_ctrl(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+		if (rc)
+			goto bail;
+		break;
+
+	case REGULATOR_MODE_IDLE:
+		/* Pin Control */
+		if (_pm8058_vreg_is_enabled(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 1);
+		if (rc)
+			goto bail;
+		break;
+
+	default:
+		pr_err("%s: invalid mode: %u\n", __func__, mode);
+		return -EINVAL;
+	}
+
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8058_lvs_set_mode(struct pm8058_vreg *vreg,
+		struct pm8058_chip *chip, unsigned int mode)
+{
+	int rc = 0;
+
+	if (mode == REGULATOR_MODE_IDLE) {
+		/* Use pin control. */
+		if (_pm8058_vreg_is_enabled(vreg))
+			rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 1);
+	} else {
+		/* Turn off pin control. */
+		rc = pm8058_vreg_set_pin_ctrl(vreg, chip, 0);
+	}
+
+	return rc;
+}
+
+/*
+ * Optimum mode programming:
+ * REGULATOR_MODE_FAST: Go to HPM (highest priority)
+ * REGULATOR_MODE_STANDBY: Go to pin ctrl mode if there are any pin ctrl
+ * votes, else go to LPM
+ *
+ * Pin ctrl mode voting via regulator set_mode:
+ * REGULATOR_MODE_IDLE: Go to pin ctrl mode if the optimum mode is LPM, else
+ * go to HPM
+ * REGULATOR_MODE_NORMAL: Go to LPM if it is the optimum mode, else go to HPM
+ */
+static int pm8058_vreg_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8058_chip *chip = dev_get_drvdata(dev->dev.parent);
+	unsigned prev_optimum = vreg->optimum;
+	unsigned prev_pc_vote = vreg->pc_vote;
+	unsigned prev_mode_initialized = vreg->mode_initialized;
+	int new_mode = REGULATOR_MODE_FAST;
+	int rc = 0;
+
+	/* Determine new mode to go into. */
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		new_mode = REGULATOR_MODE_FAST;
+		vreg->optimum = mode;
+		vreg->mode_initialized = 1;
+		break;
+
+	case REGULATOR_MODE_STANDBY:
+		if (vreg->pc_vote)
+			new_mode = REGULATOR_MODE_IDLE;
+		else
+			new_mode = REGULATOR_MODE_STANDBY;
+		vreg->optimum = mode;
+		vreg->mode_initialized = 1;
+		break;
+
+	case REGULATOR_MODE_IDLE:
+		if (vreg->pc_vote++)
+			goto done; /* already taken care of */
+
+		if (vreg->mode_initialized
+		    && vreg->optimum == REGULATOR_MODE_FAST)
+			new_mode = REGULATOR_MODE_FAST;
+		else
+			new_mode = REGULATOR_MODE_IDLE;
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		if (vreg->pc_vote && --(vreg->pc_vote))
+			goto done; /* already taken care of */
+
+		if (vreg->optimum == REGULATOR_MODE_STANDBY)
+			new_mode = REGULATOR_MODE_STANDBY;
+		else
+			new_mode = REGULATOR_MODE_FAST;
+		break;
+
+	default:
+		pr_err("%s: unknown mode, mode=%u\n", __func__, mode);
+		return -EINVAL;
+	}
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_LDO:
+		rc = pm8058_ldo_set_mode(vreg, chip, new_mode);
+		break;
+	case REGULATOR_TYPE_SMPS:
+		rc = pm8058_smps_set_mode(vreg, chip, new_mode);
+		break;
+	case REGULATOR_TYPE_LVS:
+		rc = pm8058_lvs_set_mode(vreg, chip, new_mode);
+		break;
+	}
+
+	if (rc) {
+		print_write_error(vreg, rc, __func__);
+		vreg->mode_initialized = prev_mode_initialized;
+		vreg->optimum = prev_optimum;
+		vreg->pc_vote = prev_pc_vote;
+		return rc;
+	}
+
+done:
+	return 0;
+}
+
+static unsigned int pm8058_vreg_get_mode(struct regulator_dev *dev)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+
+	if (!vreg->mode_initialized && vreg->pc_vote)
+		return REGULATOR_MODE_IDLE;
+
+	/* Check physical pin control state. */
+	switch (vreg->type) {
+	case REGULATOR_TYPE_LDO:
+		if (!(vreg->ctrl_reg & LDO_ENABLE_MASK)
+		    && !pm8058_vreg_is_global_enabled(vreg)
+		    && (vreg->test_reg[5] & LDO_TEST_PIN_CTRL_MASK))
+			return REGULATOR_MODE_IDLE;
+		else if (((vreg->ctrl_reg & LDO_ENABLE_MASK)
+				|| pm8058_vreg_is_global_enabled(vreg))
+		    && (vreg->ctrl_reg & LDO_CTRL_PM_MASK)
+		    && (vreg->test_reg[6] & LDO_TEST_PIN_CTRL_LPM_MASK))
+			return REGULATOR_MODE_IDLE;
+		break;
+	case REGULATOR_TYPE_SMPS:
+		if (!SMPS_IN_ADVANCED_MODE(vreg)
+		    && !(vreg->ctrl_reg & REGULATOR_EN_MASK)
+		    && !pm8058_vreg_is_global_enabled(vreg)
+		    && (vreg->sleep_ctrl_reg & SMPS_PIN_CTRL_MASK))
+			return REGULATOR_MODE_IDLE;
+		else if (!SMPS_IN_ADVANCED_MODE(vreg)
+		    && ((vreg->ctrl_reg & REGULATOR_EN_MASK)
+			|| pm8058_vreg_is_global_enabled(vreg))
+		    && ((vreg->clk_ctrl_reg & SMPS_CLK_CTRL_MASK)
+			== SMPS_CLK_CTRL_PFM)
+		    && (vreg->sleep_ctrl_reg & SMPS_PIN_CTRL_LPM_MASK))
+			return REGULATOR_MODE_IDLE;
+		break;
+	case REGULATOR_TYPE_LVS:
+		if (!(vreg->ctrl_reg & LVS_ENABLE_MASK)
+		    && !pm8058_vreg_is_global_enabled(vreg)
+		    && (vreg->ctrl_reg & LVS_PIN_CTRL_MASK))
+			return REGULATOR_MODE_IDLE;
+	}
+
+	if (vreg->optimum == REGULATOR_MODE_FAST)
+		return REGULATOR_MODE_FAST;
+	else if (vreg->pc_vote)
+		return REGULATOR_MODE_IDLE;
+	else if (vreg->optimum == REGULATOR_MODE_STANDBY)
+		return REGULATOR_MODE_STANDBY;
+	return REGULATOR_MODE_FAST;
+}
+
+unsigned int pm8058_vreg_get_optimum_mode(struct regulator_dev *dev,
+		int input_uV, int output_uV, int load_uA)
+{
+	struct pm8058_vreg *vreg = rdev_get_drvdata(dev);
+
+	if (load_uA <= 0) {
+		/*
+		 * pm8058_vreg_get_optimum_mode is being called before consumers
+		 * have specified their load currents via
+		 * regulator_set_optimum_mode. Return whatever the existing mode
+		 * is.
+		 */
+		return pm8058_vreg_get_mode(dev);
+	}
+
+	if (load_uA >= vreg->hpm_min_load)
+		return REGULATOR_MODE_FAST;
+	return REGULATOR_MODE_STANDBY;
+}
+
+static struct regulator_ops pm8058_ldo_ops = {
+	.enable = pm8058_vreg_enable,
+	.disable = pm8058_vreg_disable,
+	.is_enabled = pm8058_vreg_is_enabled,
+	.set_voltage = pm8058_ldo_set_voltage,
+	.get_voltage = pm8058_ldo_get_voltage,
+	.set_mode = pm8058_vreg_set_mode,
+	.get_mode = pm8058_vreg_get_mode,
+	.get_optimum_mode = pm8058_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8058_smps_ops = {
+	.enable = pm8058_vreg_enable,
+	.disable = pm8058_vreg_disable,
+	.is_enabled = pm8058_vreg_is_enabled,
+	.set_voltage = pm8058_smps_set_voltage,
+	.get_voltage = pm8058_smps_get_voltage,
+	.set_mode = pm8058_vreg_set_mode,
+	.get_mode = pm8058_vreg_get_mode,
+	.get_optimum_mode = pm8058_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8058_lvs_ops = {
+	.enable = pm8058_vreg_enable,
+	.disable = pm8058_vreg_disable,
+	.is_enabled = pm8058_vreg_is_enabled,
+	.set_mode = pm8058_vreg_set_mode,
+	.get_mode = pm8058_vreg_get_mode,
+};
+
+static struct regulator_ops pm8058_ncp_ops = {
+	.enable = pm8058_vreg_enable,
+	.disable = pm8058_vreg_disable,
+	.is_enabled = pm8058_vreg_is_enabled,
+	.set_voltage = pm8058_ncp_set_voltage,
+	.get_voltage = pm8058_ncp_get_voltage,
+};
+
+#define VREG_DESCRIP(_id, _name, _ops) \
+	[_id] = { \
+		.id = _id, \
+		.name = _name, \
+		.ops = _ops, \
+		.type = REGULATOR_VOLTAGE, \
+		.owner = THIS_MODULE, \
+	}
+
+static struct regulator_desc pm8058_vreg_descrip[] = {
+	VREG_DESCRIP(PM8058_VREG_ID_L0,  "8058_l0",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L1,  "8058_l1",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L2,  "8058_l2",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L3,  "8058_l3",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L4,  "8058_l4",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L5,  "8058_l5",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L6,  "8058_l6",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L7,  "8058_l7",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L8,  "8058_l8",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L9,  "8058_l9",  &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L10, "8058_l10", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L11, "8058_l11", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L12, "8058_l12", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L13, "8058_l13", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L14, "8058_l14", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L15, "8058_l15", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L16, "8058_l16", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L17, "8058_l17", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L18, "8058_l18", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L19, "8058_l19", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L20, "8058_l20", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L21, "8058_l21", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L22, "8058_l22", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L23, "8058_l23", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L24, "8058_l24", &pm8058_ldo_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_L25, "8058_l25", &pm8058_ldo_ops),
+
+	VREG_DESCRIP(PM8058_VREG_ID_S0, "8058_s0", &pm8058_smps_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_S1, "8058_s1", &pm8058_smps_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_S2, "8058_s2", &pm8058_smps_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_S3, "8058_s3", &pm8058_smps_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_S4, "8058_s4", &pm8058_smps_ops),
+
+	VREG_DESCRIP(PM8058_VREG_ID_LVS0, "8058_lvs0", &pm8058_lvs_ops),
+	VREG_DESCRIP(PM8058_VREG_ID_LVS1, "8058_lvs1", &pm8058_lvs_ops),
+
+	VREG_DESCRIP(PM8058_VREG_ID_NCP, "8058_ncp", &pm8058_ncp_ops),
+};
+
+static int pm8058_master_enable_init(struct pm8058_chip *chip)
+{
+	int rc = 0, i;
+
+	for (i = 0; i < MASTER_ENABLE_COUNT; i++) {
+		rc = pm8058_read(chip, m_en[i].addr, &(m_en[i].reg), 1);
+		if (rc)
+			goto bail;
+	}
+
+bail:
+	if (rc)
+		pr_err("%s: pm8058_read failed, rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int pm8058_init_ldo(struct pm8058_chip *chip, struct pm8058_vreg *vreg)
+{
+	int rc = 0, i;
+	u8 bank;
+
+	/* Save the current test register state. */
+	for (i = 0; i < LDO_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8058_write(chip, vreg->test_addr, &bank, 1);
+		if (rc)
+			goto bail;
+
+		rc = pm8058_read(chip, vreg->test_addr, &vreg->test_reg[i], 1);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	if ((vreg->ctrl_reg & LDO_CTRL_PM_MASK) == LDO_CTRL_PM_LPM)
+		vreg->optimum = REGULATOR_MODE_STANDBY;
+	else
+		vreg->optimum = REGULATOR_MODE_FAST;
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+		     (vreg->pdata->pull_down_enable ? LDO_PULL_DOWN_ENABLE : 0),
+		     LDO_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+bail:
+	return rc;
+}
+
+static int pm8058_init_smps(struct pm8058_chip *chip, struct pm8058_vreg *vreg)
+{
+	int rc = 0, i;
+	u8 bank;
+
+	/* Save the current test2 register state. */
+	for (i = 0; i < SMPS_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8058_write(chip, vreg->test_addr, &bank, 1);
+		if (rc)
+			goto bail;
+
+		rc = pm8058_read(chip, vreg->test_addr, &vreg->test_reg[i],
+				1);
+		if (rc)
+			goto bail;
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	/* Save the current clock control register state. */
+	rc = pm8058_read(chip, vreg->clk_ctrl_addr, &vreg->clk_ctrl_reg, 1);
+	if (rc)
+		goto bail;
+
+	/* Save the current sleep control register state. */
+	rc = pm8058_read(chip, vreg->sleep_ctrl_addr, &vreg->sleep_ctrl_reg, 1);
+	if (rc)
+		goto bail;
+
+	vreg->save_uV = 1; /* This is not a no-op. */
+	vreg->save_uV = _pm8058_smps_get_voltage(vreg);
+
+	if ((vreg->clk_ctrl_reg & SMPS_CLK_CTRL_MASK) == SMPS_CLK_CTRL_PFM)
+		vreg->optimum = REGULATOR_MODE_STANDBY;
+	else
+		vreg->optimum = REGULATOR_MODE_FAST;
+
+	/* Set advanced mode pull down enable based on platform data. */
+	rc = pm8058_vreg_write(chip, vreg->test_addr,
+		(vreg->pdata->pull_down_enable
+			? SMPS_ADVANCED_PULL_DOWN_ENABLE : 0)
+		| REGULATOR_BANK_SEL(6) | REGULATOR_BANK_WRITE,
+		REGULATOR_BANK_MASK | SMPS_ADVANCED_PULL_DOWN_ENABLE,
+		&vreg->test_reg[6]);
+	if (rc)
+		goto bail;
+
+	if (!SMPS_IN_ADVANCED_MODE(vreg)) {
+		/* Set legacy mode pull down enable based on platform data. */
+		rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+			(vreg->pdata->pull_down_enable
+				? SMPS_LEGACY_PULL_DOWN_ENABLE : 0),
+			SMPS_LEGACY_PULL_DOWN_ENABLE, &vreg->ctrl_reg);
+		if (rc)
+			goto bail;
+	}
+
+bail:
+	return rc;
+}
+
+static int pm8058_init_lvs(struct pm8058_chip *chip, struct pm8058_vreg *vreg)
+{
+	int rc = 0;
+
+	vreg->optimum = REGULATOR_MODE_FAST;
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8058_vreg_write(chip, vreg->ctrl_addr,
+		(vreg->pdata->pull_down_enable
+			? LVS_PULL_DOWN_ENABLE : LVS_PULL_DOWN_DISABLE),
+		LVS_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+	return rc;
+}
+
+static int pm8058_init_ncp(struct pm8058_chip *chip, struct pm8058_vreg *vreg)
+{
+	int rc = 0;
+
+	/* Save the current test1 register state. */
+	rc = pm8058_read(chip, vreg->test_addr, &vreg->test_reg[0], 1);
+	if (rc)
+		goto bail;
+
+	vreg->optimum = REGULATOR_MODE_FAST;
+
+bail:
+	return rc;
+}
+
+static int pm8058_init_regulator(struct pm8058_chip *chip,
+		struct pm8058_vreg *vreg)
+{
+	static int master_enable_inited;
+	int rc = 0;
+
+	vreg->mode_initialized = 0;
+
+	if (!master_enable_inited) {
+		rc = pm8058_master_enable_init(chip);
+		if (!rc)
+			master_enable_inited = 1;
+	}
+
+	/* save the current control register state */
+	rc = pm8058_read(chip, vreg->ctrl_addr, &vreg->ctrl_reg, 1);
+	if (rc)
+		goto bail;
+
+	switch (vreg->type) {
+	case REGULATOR_TYPE_LDO:
+		rc = pm8058_init_ldo(chip, vreg);
+		break;
+	case REGULATOR_TYPE_SMPS:
+		rc = pm8058_init_smps(chip, vreg);
+		break;
+	case REGULATOR_TYPE_LVS:
+		rc = pm8058_init_lvs(chip, vreg);
+		break;
+	case REGULATOR_TYPE_NCP:
+		rc = pm8058_init_ncp(chip, vreg);
+		break;
+	}
+
+bail:
+	if (rc)
+		pr_err("%s: pm8058_read/write failed; initial register states "
+			"unknown, rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int __devinit pm8058_vreg_probe(struct platform_device *pdev)
+{
+	struct regulator_desc *rdesc;
+	struct pm8058_chip *chip;
+	struct pm8058_vreg *vreg;
+	const char *reg_name = NULL;
+	int rc = 0;
+
+	if (pdev == NULL)
+		return -EINVAL;
+
+	if (pdev->id >= 0 && pdev->id < PM8058_VREG_MAX) {
+		chip = platform_get_drvdata(pdev);
+		rdesc = &pm8058_vreg_descrip[pdev->id];
+		vreg = &pm8058_vreg[pdev->id];
+		vreg->pdata = pdev->dev.platform_data;
+		reg_name = pm8058_vreg_descrip[pdev->id].name;
+
+		rc = pm8058_init_regulator(chip, vreg);
+		if (rc)
+			goto bail;
+
+		/* Disallow idle and normal modes if pin control isn't set. */
+		if (vreg->pdata->pin_ctrl == 0)
+			vreg->pdata->init_data.constraints.valid_modes_mask
+			      &= ~(REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE);
+
+		vreg->rdev = regulator_register(rdesc, &pdev->dev,
+				&vreg->pdata->init_data, vreg);
+		if (IS_ERR(vreg->rdev)) {
+			rc = PTR_ERR(vreg->rdev);
+			pr_err("%s: regulator_register failed for %s, rc=%d\n",
+				__func__, reg_name, rc);
+		}
+	} else {
+		rc = -ENODEV;
+	}
+
+bail:
+	if (rc)
+		pr_err("%s: error for %s, rc=%d\n", __func__, reg_name, rc);
+
+	return rc;
+}
+
+static int __devexit pm8058_vreg_remove(struct platform_device *pdev)
+{
+	regulator_unregister(pm8058_vreg[pdev->id].rdev);
+	return 0;
+}
+
+static struct platform_driver pm8058_vreg_driver = {
+	.probe = pm8058_vreg_probe,
+	.remove = __devexit_p(pm8058_vreg_remove),
+	.driver = {
+		.name = "pm8058-regulator",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_vreg_init(void)
+{
+	return platform_driver_register(&pm8058_vreg_driver);
+}
+
+static void __exit pm8058_vreg_exit(void)
+{
+	platform_driver_unregister(&pm8058_vreg_driver);
+}
+
+static void print_write_error(struct pm8058_vreg *vreg, int rc,
+				const char *func)
+{
+	const char *reg_name = NULL;
+	ptrdiff_t id = vreg - pm8058_vreg;
+
+	if (id >= 0 && id < PM8058_VREG_MAX)
+		reg_name = pm8058_vreg_descrip[id].name;
+	pr_err("%s: pm8058_vreg_write failed for %s, rc=%d\n",
+		func, reg_name, rc);
+}
+
+subsys_initcall(pm8058_vreg_init);
+module_exit(pm8058_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8058-regulator");
diff --git a/drivers/regulator/pmic8901-regulator.c b/drivers/regulator/pmic8901-regulator.c
new file mode 100644
index 0000000..5b4b907
--- /dev/null
+++ b/drivers/regulator/pmic8901-regulator.c
@@ -0,0 +1,1097 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mfd/pmic8901.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/pmic8901-regulator.h>
+#include <mach/mpp.h>
+
+/* Regulator types */
+#define REGULATOR_TYPE_LDO		0
+#define REGULATOR_TYPE_SMPS		1
+#define REGULATOR_TYPE_VS		2
+#define REGULATOR_TYPE_MPP		3
+
+/* Bank select/write macros */
+#define REGULATOR_BANK_SEL(n)           ((n) << 4)
+#define REGULATOR_BANK_WRITE            0x80
+#define LDO_TEST_BANKS			7
+#define REGULATOR_BANK_MASK		0xF0
+
+/* Pin mask resource register programming */
+#define VREG_PMR_STATE_MASK		0x60
+#define VREG_PMR_STATE_HPM		0x60
+#define VREG_PMR_STATE_LPM		0x40
+#define VREG_PMR_STATE_OFF		0x20
+#define VREG_PMR_STATE_PIN_CTRL		0x20
+
+#define VREG_PMR_MODE_ACTION_MASK	0x10
+#define VREG_PMR_MODE_ACTION_SLEEP	0x10
+#define VREG_PMR_MODE_ACTION_OFF	0x00
+
+#define VREG_PMR_MODE_PIN_MASK		0x08
+#define VREG_PMR_MODE_PIN_MASKED	0x08
+
+#define VREG_PMR_CTRL_PIN2_MASK		0x04
+#define VREG_PMR_CTRL_PIN2_MASKED	0x04
+
+#define VREG_PMR_CTRL_PIN1_MASK		0x02
+#define VREG_PMR_CTRL_PIN1_MASKED	0x02
+
+#define VREG_PMR_CTRL_PIN0_MASK		0x01
+#define VREG_PMR_CTRL_PIN0_MASKED	0x01
+
+#define VREG_PMR_PIN_CTRL_ALL_MASK	0x1F
+#define VREG_PMR_PIN_CTRL_ALL_MASKED	0x1F
+
+#define REGULATOR_IS_EN(pmr_reg) \
+	((pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_HPM || \
+	 (pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_LPM)
+
+/* FTSMPS programming */
+
+/* CTRL register */
+#define SMPS_VCTRL_BAND_MASK		0xC0
+#define SMPS_VCTRL_BAND_OFF		0x00
+#define SMPS_VCTRL_BAND_1		0x40
+#define SMPS_VCTRL_BAND_2		0x80
+#define SMPS_VCTRL_BAND_3		0xC0
+#define SMPS_VCTRL_VPROG_MASK		0x3F
+
+#define SMPS_BAND_1_UV_MIN		350000
+#define SMPS_BAND_1_UV_MAX		650000
+#define SMPS_BAND_1_UV_STEP		6250
+
+#define SMPS_BAND_2_UV_MIN		700000
+#define SMPS_BAND_2_UV_MAX		1400000
+#define SMPS_BAND_2_UV_STEP		12500
+
+#define SMPS_BAND_3_UV_SETPOINT_MIN	1500000
+#define SMPS_BAND_3_UV_MIN		1400000
+#define SMPS_BAND_3_UV_MAX		3300000
+#define SMPS_BAND_3_UV_STEP		50000
+
+#define SMPS_UV_MIN			SMPS_BAND_1_UV_MIN
+#define SMPS_UV_MAX			SMPS_BAND_3_UV_MAX
+
+/* PWR_CNFG register */
+#define SMPS_PULL_DOWN_ENABLE_MASK	0x40
+#define SMPS_PULL_DOWN_ENABLE		0x40
+
+/* LDO programming */
+
+/* CTRL register */
+#define LDO_LOCAL_ENABLE_MASK		0x80
+#define LDO_LOCAL_ENABLE		0x80
+
+#define LDO_PULL_DOWN_ENABLE_MASK	0x40
+#define LDO_PULL_DOWN_ENABLE		0x40
+
+#define LDO_CTRL_VPROG_MASK		0x1F
+
+/* TEST register bank 2 */
+#define LDO_TEST_VPROG_UPDATE_MASK	0x08
+#define LDO_TEST_RANGE_SEL_MASK		0x04
+#define LDO_TEST_FINE_STEP_MASK		0x02
+#define LDO_TEST_FINE_STEP_SHIFT	1
+
+/* TEST register bank 4 */
+#define LDO_TEST_RANGE_EXT_MASK	0x01
+
+/* Allowable voltage ranges */
+#define PLDO_LOW_UV_MIN			750000
+#define PLDO_LOW_UV_MAX			1537500
+#define PLDO_LOW_FINE_STEP_UV		12500
+
+#define PLDO_NORM_UV_MIN		1500000
+#define PLDO_NORM_UV_MAX		3075000
+#define PLDO_NORM_FINE_STEP_UV		25000
+
+#define PLDO_HIGH_UV_MIN		1750000
+#define PLDO_HIGH_UV_MAX		4900000
+#define PLDO_HIGH_FINE_STEP_UV		50000
+
+#define NLDO_UV_MIN			750000
+#define NLDO_UV_MAX			1537500
+#define NLDO_FINE_STEP_UV		12500
+
+/* VS programming */
+
+/* CTRL register */
+#define VS_CTRL_ENABLE_MASK		0xC0
+#define VS_CTRL_DISABLE			0x00
+#define VS_CTRL_ENABLE			0x40
+#define VS_CTRL_USE_PMR			0xC0
+
+#define VS_PULL_DOWN_ENABLE_MASK	0x20
+#define VS_PULL_DOWN_ENABLE		0x20
+
+struct pm8901_vreg {
+	struct pm8901_vreg_pdata	*pdata;
+	struct regulator_dev		*rdev;
+	struct pm8901_chip		*chip;
+	int				hpm_min_load;
+	unsigned			pc_vote;
+	unsigned			optimum;
+	unsigned			mode_initialized;
+	u16				ctrl_addr;
+	u16				pmr_addr;
+	u16				test_addr;
+	u16				pfm_ctrl_addr;
+	u16				pwr_cnfg_addr;
+	u8				type;
+	u8				ctrl_reg;
+	u8				pmr_reg;
+	u8				test_reg[LDO_TEST_BANKS];
+	u8				pfm_ctrl_reg;
+	u8				pwr_cnfg_reg;
+	u8				is_nmos;
+	u8				mpp_id;
+	u8				state;
+};
+
+/*
+ * These are used to compensate for the PMIC 8901 v1 FTS regulators which
+ * output ~10% higher than the programmed set point.
+ */
+#define IS_PMIC_8901_V1(rev)		((rev) == PM_8901_REV_1p0 || \
+					 (rev) == PM_8901_REV_1p1)
+
+#define PMIC_8901_V1_SCALE(uV)		((((uV) - 62100) * 23) / 25)
+
+#define PMIC_8901_V1_SCALE_INV(uV)	(((uV) * 25) / 23 + 62100)
+
+/*
+ * Band 1 of PMIC 8901 SMPS regulators only supports set points with the 3 LSB's
+ * equal to 0.  This is accomplished in the macro by truncating the bits.
+ */
+#define PM8901_SMPS_BAND_1_COMPENSATE(vprog)	((vprog) & 0xF8)
+
+#define LDO(_id, _ctrl_addr, _pmr_addr, _test_addr, _is_nmos) \
+	[_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.pmr_addr = _pmr_addr, \
+		.test_addr = _test_addr, \
+		.type = REGULATOR_TYPE_LDO, \
+		.is_nmos = _is_nmos, \
+		.hpm_min_load = PM8901_VREG_LDO_300_HPM_MIN_LOAD, \
+	}
+
+#define SMPS(_id, _ctrl_addr, _pmr_addr, _pfm_ctrl_addr, _pwr_cnfg_addr) \
+	[_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.pmr_addr = _pmr_addr, \
+		.pfm_ctrl_addr = _pfm_ctrl_addr, \
+		.pwr_cnfg_addr = _pwr_cnfg_addr, \
+		.type = REGULATOR_TYPE_SMPS, \
+		.hpm_min_load = PM8901_VREG_FTSMPS_HPM_MIN_LOAD, \
+	}
+
+#define VS(_id, _ctrl_addr, _pmr_addr) \
+	[_id] = { \
+		.ctrl_addr = _ctrl_addr, \
+		.pmr_addr = _pmr_addr, \
+		.type = REGULATOR_TYPE_VS, \
+	}
+
+#define MPP(_id, _mpp_id) \
+	[_id] = { \
+		.mpp_id = _mpp_id, \
+		.type = REGULATOR_TYPE_MPP, \
+	}
+
+static struct pm8901_vreg pm8901_vreg[] = {
+	/*  id                 ctrl   pmr    tst    n/p */
+	LDO(PM8901_VREG_ID_L0, 0x02F, 0x0AB, 0x030, 1),
+	LDO(PM8901_VREG_ID_L1, 0x031, 0x0AC, 0x032, 0),
+	LDO(PM8901_VREG_ID_L2, 0x033, 0x0AD, 0x034, 0),
+	LDO(PM8901_VREG_ID_L3, 0x035, 0x0AE, 0x036, 0),
+	LDO(PM8901_VREG_ID_L4, 0x037, 0x0AF, 0x038, 0),
+	LDO(PM8901_VREG_ID_L5, 0x039, 0x0B0, 0x03A, 0),
+	LDO(PM8901_VREG_ID_L6, 0x03B, 0x0B1, 0x03C, 0),
+
+	/*   id                 ctrl   pmr    pfm    pwr */
+	SMPS(PM8901_VREG_ID_S0, 0x05B, 0x0A6, 0x05C, 0x0E3),
+	SMPS(PM8901_VREG_ID_S1, 0x06A, 0x0A7, 0x06B, 0x0EC),
+	SMPS(PM8901_VREG_ID_S2, 0x079, 0x0A8, 0x07A, 0x0F1),
+	SMPS(PM8901_VREG_ID_S3, 0x088, 0x0A9, 0x089, 0x0F6),
+	SMPS(PM8901_VREG_ID_S4, 0x097, 0x0AA, 0x098, 0x0FB),
+
+	/* id			  MPP ID */
+	MPP(PM8901_VREG_ID_MPP0,    0),
+
+	/* id                       ctrl   pmr */
+	VS(PM8901_VREG_ID_LVS0,     0x046, 0x0B2),
+	VS(PM8901_VREG_ID_LVS1,     0x048, 0x0B3),
+	VS(PM8901_VREG_ID_LVS2,     0x04A, 0x0B4),
+	VS(PM8901_VREG_ID_LVS3,     0x04C, 0x0B5),
+	VS(PM8901_VREG_ID_MVS0,     0x052, 0x0B6),
+	VS(PM8901_VREG_ID_USB_OTG,  0x055, 0x0B7),
+	VS(PM8901_VREG_ID_HDMI_MVS, 0x058, 0x0B8),
+};
+
+static void print_write_error(struct pm8901_vreg *vreg, int rc,
+				const char *func);
+
+static int pm8901_vreg_write(struct pm8901_chip *chip,
+		u16 addr, u8 val, u8 mask, u8 *reg_save)
+{
+	int rc = 0;
+	u8 reg;
+
+	reg = (*reg_save & ~mask) | (val & mask);
+	if (reg != *reg_save)
+		rc = pm8901_write(chip, addr, &reg, 1);
+	if (!rc)
+		*reg_save = reg;
+	return rc;
+}
+
+/* Set pin control bits based on new mode. */
+static int pm8901_vreg_select_pin_ctrl(struct pm8901_vreg *vreg, u8 *pmr_reg)
+{
+	*pmr_reg |= VREG_PMR_PIN_CTRL_ALL_MASKED;
+
+	if ((*pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_PIN_CTRL) {
+		if (vreg->pdata->pin_fn == PM8901_VREG_PIN_FN_MODE)
+			*pmr_reg = (*pmr_reg & ~VREG_PMR_STATE_MASK)
+				   | VREG_PMR_STATE_LPM;
+		if (vreg->pdata->pin_ctrl & PM8901_VREG_PIN_CTRL_A0)
+			*pmr_reg &= ~VREG_PMR_CTRL_PIN0_MASKED;
+		if (vreg->pdata->pin_ctrl & PM8901_VREG_PIN_CTRL_A1)
+			*pmr_reg &= ~VREG_PMR_CTRL_PIN1_MASKED;
+		if (vreg->pdata->pin_ctrl & PM8901_VREG_PIN_CTRL_D0)
+			*pmr_reg &= ~VREG_PMR_CTRL_PIN2_MASKED;
+	}
+
+	return 0;
+}
+
+static int pm8901_vreg_enable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	u8 val = VREG_PMR_STATE_HPM;
+	int rc;
+
+	if (!vreg->mode_initialized && vreg->pc_vote)
+		val = VREG_PMR_STATE_PIN_CTRL;
+	else if (vreg->optimum == REGULATOR_MODE_FAST)
+		val = VREG_PMR_STATE_HPM;
+	else if (vreg->pc_vote)
+		val = VREG_PMR_STATE_PIN_CTRL;
+	else if (vreg->optimum == REGULATOR_MODE_STANDBY)
+		val = VREG_PMR_STATE_LPM;
+
+	pm8901_vreg_select_pin_ctrl(vreg, &val);
+
+	rc = pm8901_vreg_write(chip, vreg->pmr_addr,
+			val,
+			VREG_PMR_STATE_MASK | VREG_PMR_PIN_CTRL_ALL_MASK,
+			&vreg->pmr_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_vreg_disable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	int rc;
+
+	rc = pm8901_vreg_write(chip, vreg->pmr_addr,
+			VREG_PMR_STATE_OFF | VREG_PMR_PIN_CTRL_ALL_MASKED,
+			VREG_PMR_STATE_MASK | VREG_PMR_PIN_CTRL_ALL_MASK,
+			&vreg->pmr_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+/*
+ * Cases that count as enabled:
+ *
+ * 1. PMR register has mode == HPM or LPM.
+ * 2. Any pin control bits are unmasked.
+ * 3. The regulator is an LDO and its local enable bit is set.
+ */
+static int _pm8901_vreg_is_enabled(struct pm8901_vreg *vreg)
+{
+	if ((vreg->type == REGULATOR_TYPE_LDO)
+	    && (vreg->ctrl_reg & LDO_LOCAL_ENABLE_MASK))
+		return 1;
+	else if (vreg->type == REGULATOR_TYPE_VS) {
+		if ((vreg->ctrl_reg & VS_CTRL_ENABLE_MASK) == VS_CTRL_ENABLE)
+			return 1;
+		else if ((vreg->ctrl_reg & VS_CTRL_ENABLE_MASK)
+			 == VS_CTRL_DISABLE)
+			return 0;
+	}
+
+	return REGULATOR_IS_EN(vreg->pmr_reg)
+		|| ((vreg->pmr_reg & VREG_PMR_PIN_CTRL_ALL_MASK)
+		   != VREG_PMR_PIN_CTRL_ALL_MASKED);
+}
+
+static int pm8901_vreg_is_enabled(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+
+	return _pm8901_vreg_is_enabled(vreg);
+}
+
+static int pm8901_ldo_disable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	int rc;
+
+	/* Disassert local enable bit in CTRL register. */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, 0, LDO_LOCAL_ENABLE_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	/* Disassert enable bit in PMR register. */
+	rc = pm8901_vreg_disable(dev);
+
+	return rc;
+}
+
+static int pm8901_pldo_set_voltage(struct pm8901_chip *chip,
+		struct pm8901_vreg *vreg, int uV)
+{
+	int vmin, rc = 0;
+	unsigned vprog, fine_step;
+	u8 range_ext, range_sel, fine_step_reg;
+
+	if (uV < PLDO_LOW_UV_MIN || uV > PLDO_HIGH_UV_MAX)
+		return -EINVAL;
+
+	if (uV < PLDO_LOW_UV_MAX + PLDO_LOW_FINE_STEP_UV) {
+		vmin = PLDO_LOW_UV_MIN;
+		fine_step = PLDO_LOW_FINE_STEP_UV;
+		range_ext = 0;
+		range_sel = LDO_TEST_RANGE_SEL_MASK;
+	} else if (uV < PLDO_NORM_UV_MAX + PLDO_NORM_FINE_STEP_UV) {
+		vmin = PLDO_NORM_UV_MIN;
+		fine_step = PLDO_NORM_FINE_STEP_UV;
+		range_ext = 0;
+		range_sel = 0;
+	} else {
+		vmin = PLDO_HIGH_UV_MIN;
+		fine_step = PLDO_HIGH_FINE_STEP_UV;
+		range_ext = LDO_TEST_RANGE_EXT_MASK;
+		range_sel = 0;
+	}
+
+	vprog = (uV - vmin) / fine_step;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	/*
+	 * Disable program voltage update if range extension, range select,
+	 * or fine step have changed and the regulator is enabled.
+	 */
+	if (_pm8901_vreg_is_enabled(vreg) &&
+		(((range_ext ^ vreg->test_reg[4]) & LDO_TEST_RANGE_EXT_MASK)
+		|| ((range_sel ^ vreg->test_reg[2]) & LDO_TEST_RANGE_SEL_MASK)
+		|| ((fine_step_reg ^ vreg->test_reg[2])
+			& LDO_TEST_FINE_STEP_MASK))) {
+		rc = pm8901_vreg_write(chip, vreg->test_addr,
+			REGULATOR_BANK_SEL(2) | REGULATOR_BANK_WRITE,
+			REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK,
+			&vreg->test_reg[2]);
+		if (rc)
+			goto bail;
+	}
+
+	/* Write new voltage. */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, vprog,
+				LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	/* Write range extension. */
+	rc = pm8901_vreg_write(chip, vreg->test_addr,
+			range_ext | REGULATOR_BANK_SEL(4)
+			 | REGULATOR_BANK_WRITE,
+			LDO_TEST_RANGE_EXT_MASK | REGULATOR_BANK_MASK,
+			&vreg->test_reg[4]);
+	if (rc)
+		goto bail;
+
+	/* Write fine step, range select and program voltage update. */
+	rc = pm8901_vreg_write(chip, vreg->test_addr,
+			fine_step_reg | range_sel | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | LDO_TEST_RANGE_SEL_MASK
+			 | REGULATOR_BANK_MASK | LDO_TEST_VPROG_UPDATE_MASK,
+			&vreg->test_reg[2]);
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_nldo_set_voltage(struct pm8901_chip *chip,
+		struct pm8901_vreg *vreg, int uV)
+{
+	unsigned vprog, fine_step_reg;
+	int rc;
+
+	if (uV < NLDO_UV_MIN || uV > NLDO_UV_MAX)
+		return -EINVAL;
+
+	vprog = (uV - NLDO_UV_MIN) / NLDO_FINE_STEP_UV;
+	fine_step_reg = (vprog & 1) << LDO_TEST_FINE_STEP_SHIFT;
+	vprog >>= 1;
+
+	/* Write new voltage. */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, vprog,
+				LDO_CTRL_VPROG_MASK, &vreg->ctrl_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	/* Write fine step. */
+	rc = pm8901_vreg_write(chip, vreg->test_addr,
+			fine_step_reg | REGULATOR_BANK_SEL(2)
+			 | REGULATOR_BANK_WRITE | LDO_TEST_VPROG_UPDATE_MASK,
+			LDO_TEST_FINE_STEP_MASK | REGULATOR_BANK_MASK
+			 | LDO_TEST_VPROG_UPDATE_MASK,
+		       &vreg->test_reg[2]);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_ldo_set_voltage(struct regulator_dev *dev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+
+	if (vreg->is_nmos)
+		return pm8901_nldo_set_voltage(chip, vreg, min_uV);
+	else
+		return pm8901_pldo_set_voltage(chip, vreg, min_uV);
+}
+
+static int pm8901_pldo_get_voltage(struct pm8901_vreg *vreg)
+{
+	int vmin, fine_step;
+	u8 range_ext, range_sel, vprog, fine_step_reg;
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	range_sel = vreg->test_reg[2] & LDO_TEST_RANGE_SEL_MASK;
+	range_ext = vreg->test_reg[4] & LDO_TEST_RANGE_EXT_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	if (range_sel) {
+		/* low range mode */
+		fine_step = PLDO_LOW_FINE_STEP_UV;
+		vmin = PLDO_LOW_UV_MIN;
+	} else if (!range_ext) {
+		/* normal mode */
+		fine_step = PLDO_NORM_FINE_STEP_UV;
+		vmin = PLDO_NORM_UV_MIN;
+	} else {
+		/* high range mode */
+		fine_step = PLDO_HIGH_FINE_STEP_UV;
+		vmin = PLDO_HIGH_UV_MIN;
+	}
+
+	return fine_step * vprog + vmin;
+}
+
+static int pm8901_nldo_get_voltage(struct pm8901_vreg *vreg)
+{
+	u8 vprog, fine_step_reg;
+
+	fine_step_reg = vreg->test_reg[2] & LDO_TEST_FINE_STEP_MASK;
+	vprog = vreg->ctrl_reg & LDO_CTRL_VPROG_MASK;
+
+	vprog = (vprog << 1) | (fine_step_reg >> LDO_TEST_FINE_STEP_SHIFT);
+
+	return NLDO_FINE_STEP_UV * vprog + NLDO_UV_MIN;
+}
+
+static int pm8901_ldo_get_voltage(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+
+	if (vreg->is_nmos)
+		return pm8901_nldo_get_voltage(vreg);
+	else
+		return pm8901_pldo_get_voltage(vreg);
+}
+
+/*
+ * Optimum mode programming:
+ * REGULATOR_MODE_FAST: Go to HPM (highest priority)
+ * REGULATOR_MODE_STANDBY: Go to pin ctrl mode if there are any pin ctrl
+ * votes, else go to LPM
+ *
+ * Pin ctrl mode voting via regulator set_mode:
+ * REGULATOR_MODE_IDLE: Go to pin ctrl mode if the optimum mode is LPM, else
+ * go to HPM
+ * REGULATOR_MODE_NORMAL: Go to LPM if it is the optimum mode, else go to HPM
+ */
+static int pm8901_vreg_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	unsigned optimum = vreg->optimum;
+	unsigned pc_vote = vreg->pc_vote;
+	unsigned mode_initialized = vreg->mode_initialized;
+	u8 val = 0;
+	int rc = 0;
+
+	/* Determine new mode to go into. */
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		val = VREG_PMR_STATE_HPM;
+		optimum = mode;
+		mode_initialized = 1;
+		break;
+
+	case REGULATOR_MODE_STANDBY:
+		if (pc_vote)
+			val = VREG_PMR_STATE_PIN_CTRL;
+		else
+			val = VREG_PMR_STATE_LPM;
+		optimum = mode;
+		mode_initialized = 1;
+		break;
+
+	case REGULATOR_MODE_IDLE:
+		if (pc_vote++)
+			goto done; /* already taken care of */
+
+		if (mode_initialized && optimum == REGULATOR_MODE_FAST)
+			val = VREG_PMR_STATE_HPM;
+		else
+			val = VREG_PMR_STATE_PIN_CTRL;
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		if (pc_vote && --pc_vote)
+			goto done; /* already taken care of */
+
+		if (optimum == REGULATOR_MODE_STANDBY)
+			val = VREG_PMR_STATE_LPM;
+		else
+			val = VREG_PMR_STATE_HPM;
+		break;
+
+	default:
+		pr_err("%s: unknown mode, mode=%u\n", __func__, mode);
+		return -EINVAL;
+	}
+
+	/* Set pin control bits based on new mode. */
+	pm8901_vreg_select_pin_ctrl(vreg, &val);
+
+	/* Only apply mode setting to hardware if currently enabled. */
+	if (pm8901_vreg_is_enabled(dev))
+		rc = pm8901_vreg_write(chip, vreg->pmr_addr, val,
+			       VREG_PMR_STATE_MASK | VREG_PMR_PIN_CTRL_ALL_MASK,
+			       &vreg->pmr_reg);
+
+	if (rc) {
+		print_write_error(vreg, rc, __func__);
+		return rc;
+	}
+
+done:
+	vreg->mode_initialized = mode_initialized;
+	vreg->optimum = optimum;
+	vreg->pc_vote = pc_vote;
+
+	return 0;
+}
+
+static unsigned int pm8901_vreg_get_mode(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	int pin_mask = VREG_PMR_CTRL_PIN0_MASK | VREG_PMR_CTRL_PIN1_MASK
+			| VREG_PMR_CTRL_PIN2_MASK;
+
+	if (!vreg->mode_initialized && vreg->pc_vote)
+		return REGULATOR_MODE_IDLE;
+	else if (((vreg->pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_OFF)
+		 && ((vreg->pmr_reg & pin_mask) != pin_mask))
+		return REGULATOR_MODE_IDLE;
+	else if (((vreg->pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_LPM)
+		 && ((vreg->pmr_reg & pin_mask) != pin_mask))
+		return REGULATOR_MODE_IDLE;
+	else if (vreg->optimum == REGULATOR_MODE_FAST)
+		return REGULATOR_MODE_FAST;
+	else if (vreg->pc_vote)
+		return REGULATOR_MODE_IDLE;
+	else if (vreg->optimum == REGULATOR_MODE_STANDBY)
+		return REGULATOR_MODE_STANDBY;
+	return REGULATOR_MODE_FAST;
+}
+
+unsigned int pm8901_vreg_get_optimum_mode(struct regulator_dev *dev,
+		int input_uV, int output_uV, int load_uA)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+
+	if (load_uA <= 0) {
+		/*
+		 * pm8901_vreg_get_optimum_mode is being called before consumers
+		 * have specified their load currents via
+		 * regulator_set_optimum_mode. Return whatever the existing mode
+		 * is.
+		 */
+		return pm8901_vreg_get_mode(dev);
+	}
+
+	if (load_uA >= vreg->hpm_min_load)
+		return REGULATOR_MODE_FAST;
+	return REGULATOR_MODE_STANDBY;
+}
+
+static int pm8901_smps_set_voltage(struct regulator_dev *dev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	int rc;
+	u8 val, band;
+
+	if (IS_PMIC_8901_V1(pm8901_rev(chip)))
+		min_uV = PMIC_8901_V1_SCALE(min_uV);
+
+	if (min_uV < SMPS_BAND_1_UV_MIN || min_uV > SMPS_BAND_3_UV_MAX)
+		return -EINVAL;
+
+	/* Round down for set points in the gaps between bands. */
+	if (min_uV > SMPS_BAND_1_UV_MAX && min_uV < SMPS_BAND_2_UV_MIN)
+		min_uV = SMPS_BAND_1_UV_MAX;
+	else if (min_uV > SMPS_BAND_2_UV_MAX
+			&& min_uV < SMPS_BAND_3_UV_SETPOINT_MIN)
+		min_uV = SMPS_BAND_2_UV_MAX;
+
+	if (min_uV < SMPS_BAND_2_UV_MIN) {
+		val = ((min_uV - SMPS_BAND_1_UV_MIN) / SMPS_BAND_1_UV_STEP);
+		val = PM8901_SMPS_BAND_1_COMPENSATE(val);
+		band = SMPS_VCTRL_BAND_1;
+	} else if (min_uV < SMPS_BAND_3_UV_SETPOINT_MIN) {
+		val = ((min_uV - SMPS_BAND_2_UV_MIN) / SMPS_BAND_2_UV_STEP);
+		band = SMPS_VCTRL_BAND_2;
+	} else {
+		val = ((min_uV - SMPS_BAND_3_UV_MIN) / SMPS_BAND_3_UV_STEP);
+		band = SMPS_VCTRL_BAND_3;
+	}
+
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, band | val,
+			SMPS_VCTRL_BAND_MASK | SMPS_VCTRL_VPROG_MASK,
+			&vreg->ctrl_reg);
+	if (rc)
+		goto bail;
+
+	rc = pm8901_vreg_write(chip, vreg->pfm_ctrl_addr, band | val,
+			SMPS_VCTRL_BAND_MASK | SMPS_VCTRL_VPROG_MASK,
+			&vreg->pfm_ctrl_reg);
+bail:
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_smps_get_voltage(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	u8 vprog, band;
+	int ret = 0;
+
+	if ((vreg->pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_LPM) {
+		vprog = vreg->pfm_ctrl_reg & SMPS_VCTRL_VPROG_MASK;
+		band = vreg->pfm_ctrl_reg & SMPS_VCTRL_BAND_MASK;
+	} else {
+		vprog = vreg->ctrl_reg & SMPS_VCTRL_VPROG_MASK;
+		band = vreg->ctrl_reg & SMPS_VCTRL_BAND_MASK;
+	}
+
+	if (band == SMPS_VCTRL_BAND_1)
+		ret = vprog * SMPS_BAND_1_UV_STEP + SMPS_BAND_1_UV_MIN;
+	else if (band == SMPS_VCTRL_BAND_2)
+		ret = vprog * SMPS_BAND_2_UV_STEP + SMPS_BAND_2_UV_MIN;
+	else
+		ret = vprog * SMPS_BAND_3_UV_STEP + SMPS_BAND_3_UV_MIN;
+
+	if (IS_PMIC_8901_V1(pm8901_rev(chip)))
+		ret = PMIC_8901_V1_SCALE_INV(ret);
+
+	return ret;
+}
+
+static int pm8901_vs_enable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	int rc;
+
+	/* Assert enable bit in PMR register. */
+	rc = pm8901_vreg_enable(dev);
+
+	/* Make sure that switch is controlled via PMR register */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, VS_CTRL_USE_PMR,
+			VS_CTRL_ENABLE_MASK, &vreg->ctrl_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_vs_disable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	struct pm8901_chip *chip = vreg->chip;
+	int rc;
+
+	/* Disassert enable bit in PMR register. */
+	rc = pm8901_vreg_disable(dev);
+
+	/* Make sure that switch is controlled via PMR register */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr, VS_CTRL_USE_PMR,
+			VS_CTRL_ENABLE_MASK, &vreg->ctrl_reg);
+	if (rc)
+		print_write_error(vreg, rc, __func__);
+
+	return rc;
+}
+
+static int pm8901_mpp_enable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	int out_val;
+	int rc;
+
+	out_val = (vreg->pdata->active_high
+		   ? PM_MPP_DOUT_CTL_HIGH : PM_MPP_DOUT_CTL_LOW);
+
+	rc = pm8901_mpp_config(vreg->mpp_id, PM_MPP_TYPE_D_OUTPUT,
+			PM8901_MPP_DIG_LEVEL_VPH, out_val);
+
+	if (rc)
+		pr_err("%s: pm8901_mpp_config failed, rc=%d\n", __func__, rc);
+	else
+		vreg->state = 1;
+
+	return rc;
+}
+
+static int pm8901_mpp_disable(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	int out_val;
+	int rc;
+
+	out_val = (vreg->pdata->active_high
+		   ? PM_MPP_DOUT_CTL_LOW : PM_MPP_DOUT_CTL_HIGH);
+
+	rc = pm8901_mpp_config(vreg->mpp_id, PM_MPP_TYPE_D_OUTPUT,
+			PM8901_MPP_DIG_LEVEL_VPH, out_val);
+
+	if (rc)
+		pr_err("%s: pm8901_mpp_config failed, rc=%d\n", __func__, rc);
+	else
+		vreg->state = 0;
+
+	return rc;
+}
+
+static int pm8901_mpp_is_enabled(struct regulator_dev *dev)
+{
+	struct pm8901_vreg *vreg = rdev_get_drvdata(dev);
+	return vreg->state;
+}
+
+static struct regulator_ops pm8901_ldo_ops = {
+	.enable = pm8901_vreg_enable,
+	.disable = pm8901_ldo_disable,
+	.is_enabled = pm8901_vreg_is_enabled,
+	.set_voltage = pm8901_ldo_set_voltage,
+	.get_voltage = pm8901_ldo_get_voltage,
+	.set_mode = pm8901_vreg_set_mode,
+	.get_mode = pm8901_vreg_get_mode,
+	.get_optimum_mode = pm8901_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8901_smps_ops = {
+	.enable = pm8901_vreg_enable,
+	.disable = pm8901_vreg_disable,
+	.is_enabled = pm8901_vreg_is_enabled,
+	.set_voltage = pm8901_smps_set_voltage,
+	.get_voltage = pm8901_smps_get_voltage,
+	.set_mode = pm8901_vreg_set_mode,
+	.get_mode = pm8901_vreg_get_mode,
+	.get_optimum_mode = pm8901_vreg_get_optimum_mode,
+};
+
+static struct regulator_ops pm8901_vs_ops = {
+	.enable = pm8901_vs_enable,
+	.disable = pm8901_vs_disable,
+	.is_enabled = pm8901_vreg_is_enabled,
+	.set_mode = pm8901_vreg_set_mode,
+	.get_mode = pm8901_vreg_get_mode,
+};
+
+static struct regulator_ops pm8901_mpp_ops = {
+	.enable = pm8901_mpp_enable,
+	.disable = pm8901_mpp_disable,
+	.is_enabled = pm8901_mpp_is_enabled,
+};
+
+#define VREG_DESCRIP(_id, _name, _ops) \
+	[_id] = { \
+		.name = _name, \
+		.id = _id, \
+		.ops = _ops, \
+		.type = REGULATOR_VOLTAGE, \
+		.owner = THIS_MODULE, \
+	}
+
+static struct regulator_desc pm8901_vreg_descrip[] = {
+	VREG_DESCRIP(PM8901_VREG_ID_L0, "8901_l0", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L1, "8901_l1", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L2, "8901_l2", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L3, "8901_l3", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L4, "8901_l4", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L5, "8901_l5", &pm8901_ldo_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_L6, "8901_l6", &pm8901_ldo_ops),
+
+	VREG_DESCRIP(PM8901_VREG_ID_S0, "8901_s0", &pm8901_smps_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_S1, "8901_s1", &pm8901_smps_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_S2, "8901_s2", &pm8901_smps_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_S3, "8901_s3", &pm8901_smps_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_S4, "8901_s4", &pm8901_smps_ops),
+
+	VREG_DESCRIP(PM8901_VREG_ID_MPP0,     "8901_mpp0",     &pm8901_mpp_ops),
+
+	VREG_DESCRIP(PM8901_VREG_ID_LVS0,     "8901_lvs0",     &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_LVS1,     "8901_lvs1",     &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_LVS2,     "8901_lvs2",     &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_LVS3,     "8901_lvs3",     &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_MVS0,     "8901_mvs0",     &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_USB_OTG,  "8901_usb_otg",  &pm8901_vs_ops),
+	VREG_DESCRIP(PM8901_VREG_ID_HDMI_MVS, "8901_hdmi_mvs", &pm8901_vs_ops),
+};
+
+static int pm8901_init_ldo(struct pm8901_chip *chip, struct pm8901_vreg *vreg)
+{
+	int rc = 0, i;
+	u8 bank;
+
+	/* Store current regulator register values. */
+	for (i = 0; i < LDO_TEST_BANKS; i++) {
+		bank = REGULATOR_BANK_SEL(i);
+		rc = pm8901_write(chip, vreg->test_addr, &bank, 1);
+		if (rc)
+			goto bail;
+
+		rc = pm8901_read(chip, vreg->test_addr, &vreg->test_reg[i], 1);
+		if (rc)
+			goto bail;
+
+		vreg->test_reg[i] |= REGULATOR_BANK_WRITE;
+	}
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr,
+		     (vreg->pdata->pull_down_enable ? LDO_PULL_DOWN_ENABLE : 0),
+		     LDO_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+bail:
+	return rc;
+}
+
+static int pm8901_init_smps(struct pm8901_chip *chip, struct pm8901_vreg *vreg)
+{
+	int rc;
+
+	/* Store current regulator register values. */
+	rc = pm8901_read(chip, vreg->pfm_ctrl_addr,
+			 &vreg->pfm_ctrl_reg, 1);
+	if (rc)
+		goto bail;
+
+	rc = pm8901_read(chip, vreg->pwr_cnfg_addr,
+			 &vreg->pwr_cnfg_reg, 1);
+	if (rc)
+		goto bail;
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8901_vreg_write(chip, vreg->pwr_cnfg_addr,
+		    (vreg->pdata->pull_down_enable ? SMPS_PULL_DOWN_ENABLE : 0),
+		    SMPS_PULL_DOWN_ENABLE_MASK, &vreg->pwr_cnfg_reg);
+
+bail:
+	return rc;
+}
+
+static int pm8901_init_vs(struct pm8901_chip *chip, struct pm8901_vreg *vreg)
+{
+	int rc = 0;
+
+	/* Set pull down enable based on platform data. */
+	rc = pm8901_vreg_write(chip, vreg->ctrl_addr,
+		      (vreg->pdata->pull_down_enable ? VS_PULL_DOWN_ENABLE : 0),
+		      VS_PULL_DOWN_ENABLE_MASK, &vreg->ctrl_reg);
+
+	return rc;
+}
+
+static int pm8901_init_regulator(struct pm8901_chip *chip,
+		struct pm8901_vreg *vreg)
+{
+	int rc;
+
+	/* Store current regulator register values. */
+	if (vreg->type != REGULATOR_TYPE_MPP) {
+		rc = pm8901_read(chip, vreg->ctrl_addr, &vreg->ctrl_reg, 1);
+		if (rc)
+			goto bail;
+
+		rc = pm8901_read(chip, vreg->pmr_addr, &vreg->pmr_reg, 1);
+		if (rc)
+			goto bail;
+	}
+
+	/* Set initial mode based on hardware state. */
+	if ((vreg->pmr_reg & VREG_PMR_STATE_MASK) == VREG_PMR_STATE_LPM)
+		vreg->optimum = REGULATOR_MODE_STANDBY;
+	else
+		vreg->optimum = REGULATOR_MODE_FAST;
+
+	vreg->mode_initialized = 0;
+
+	if (vreg->type == REGULATOR_TYPE_LDO)
+		rc = pm8901_init_ldo(chip, vreg);
+	else if (vreg->type == REGULATOR_TYPE_SMPS)
+		rc = pm8901_init_smps(chip, vreg);
+	else if (vreg->type == REGULATOR_TYPE_VS)
+		rc = pm8901_init_vs(chip, vreg);
+bail:
+	if (rc)
+		pr_err("%s: pm8901_read/write failed; initial register states "
+			"unknown, rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int __devinit pm8901_vreg_probe(struct platform_device *pdev)
+{
+	struct regulator_desc *rdesc;
+	struct pm8901_chip *chip;
+	struct pm8901_vreg *vreg;
+	const char *reg_name = NULL;
+	int rc = 0;
+
+	if (pdev == NULL)
+		return -EINVAL;
+
+	if (pdev->id >= 0 && pdev->id < PM8901_VREG_MAX) {
+		chip = dev_get_drvdata(pdev->dev.parent);
+		rdesc = &pm8901_vreg_descrip[pdev->id];
+		vreg = &pm8901_vreg[pdev->id];
+		vreg->pdata = pdev->dev.platform_data;
+		vreg->chip = chip;
+		reg_name = pm8901_vreg_descrip[pdev->id].name;
+
+		rc = pm8901_init_regulator(chip, vreg);
+		if (rc)
+			goto bail;
+
+		/* Disallow idle and normal modes if pin control isn't set. */
+		if (vreg->pdata->pin_ctrl == 0)
+			vreg->pdata->init_data.constraints.valid_modes_mask
+			      &= ~(REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE);
+
+		vreg->rdev = regulator_register(rdesc, &pdev->dev,
+				&vreg->pdata->init_data, vreg);
+		if (IS_ERR(vreg->rdev)) {
+			rc = PTR_ERR(vreg->rdev);
+			pr_err("%s: regulator_register failed for %s, rc=%d\n",
+				__func__, reg_name, rc);
+		}
+	} else {
+		rc = -ENODEV;
+	}
+
+bail:
+	if (rc)
+		pr_err("%s: error for %s, rc=%d\n", __func__, reg_name, rc);
+
+	return rc;
+}
+
+static int __devexit pm8901_vreg_remove(struct platform_device *pdev)
+{
+	regulator_unregister(pm8901_vreg[pdev->id].rdev);
+	return 0;
+}
+
+static struct platform_driver pm8901_vreg_driver = {
+	.probe = pm8901_vreg_probe,
+	.remove = __devexit_p(pm8901_vreg_remove),
+	.driver = {
+		.name = "pm8901-regulator",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8901_vreg_init(void)
+{
+	return platform_driver_register(&pm8901_vreg_driver);
+}
+
+static void __exit pm8901_vreg_exit(void)
+{
+	platform_driver_unregister(&pm8901_vreg_driver);
+}
+
+static void print_write_error(struct pm8901_vreg *vreg, int rc,
+				const char *func)
+{
+	const char *reg_name = NULL;
+	ptrdiff_t id = vreg - pm8901_vreg;
+
+	if (id >= 0 && id < PM8901_VREG_MAX)
+		reg_name = pm8901_vreg_descrip[id].name;
+	pr_err("%s: pm8901_vreg_write failed for %s, rc=%d\n",
+		func, reg_name, rc);
+}
+
+subsys_initcall(pm8901_vreg_init);
+module_exit(pm8901_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8901 regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8901-regulator");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 27c3774..93feb81 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -94,6 +94,23 @@
 
 	  If unsure, say Y.
 
+config RTC_INTF_ALARM
+	bool "Android alarm driver"
+	depends on RTC_CLASS
+	default y
+	help
+	  Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+	  elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+	  Also provides an interface to set the wall time which must be used
+	  for elapsed realtime to work.
+
+config RTC_INTF_ALARM_DEV
+	bool "Android alarm device"
+	depends on RTC_INTF_ALARM
+	default y
+	help
+	  Exports the alarm interface to user-space.
+
 config RTC_INTF_DEV_UIE_EMUL
 	bool "RTC UIE emulation on dev interface"
 	depends on RTC_INTF_DEV
@@ -739,6 +756,37 @@
 
 comment "on-CPU RTC drivers"
 
+config RTC_DRV_MSM
+	tristate "RTC on Qualcomm Chipsets"
+	depends on ARCH_MSM
+	default y
+	help
+	  RTC driver for Qualcomm chipsets
+
+
+config RTC_SECURE_TIME_SUPPORT
+        bool "Support for secure time on Qualcomm Chipsets"
+        depends on RTC_DRV_MSM = y
+        default y
+        help
+          Say yes here to have additional handle for reading secure time
+          maintained by ARM9.
+
+config RTC_ASYNC_MODEM_SUPPORT
+	bool "Support for time update on async modem boot"
+        depends on RTC_DRV_MSM && (ARCH_MSM8X60 || ARCH_QSD8X50)
+	default n
+	help
+	 Say yes here to have the system time updated if there is
+	 an asynchronous MODEM boot.
+
+config RTC_DRV_MSM7X00A
+	tristate "MSM7X00A"
+	depends on ARCH_MSM
+	default n
+	help
+	  RTC driver for Qualcomm MSM7K chipsets
+
 config RTC_DRV_DAVINCI
 	tristate "TI DaVinci RTC"
 	depends on ARCH_DAVINCI_DM365
@@ -749,6 +797,13 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-davinci.
 
+config RTC_DRV_MSM7X00A
+	tristate "MSM7X00A"
+	depends on ARCH_MSM
+	default y
+	help
+	  RTC driver for Qualcomm MSM7K chipsets
+
 config RTC_DRV_OMAP
 	tristate "TI OMAP1"
 	depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1078,4 +1133,33 @@
 	  This drive can also be built as a module. If so, the module
 	  will be called rtc-puv3.
 
+config RTC_PM8058
+	tristate "PMIC8058 RTC support"
+	default n
+	depends on PMIC8058
+	help
+	  Say Y here if you want support for the PMIC8058 RTC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pmic8058-rtc.
+
+config RTC_PM8058_WRITE_ENABLE
+	bool "PM8058 RTC write enable"
+	default n
+	depends on RTC_PM8058
+	help
+	  Say Y here if you want to support the write operation for
+	  PMIC8058 RTC.
+
+	  By default the write operation is not supported.
+
+config RTC_DRV_PM8XXX
+	tristate "Qualcomm PMIC8XXX RTC"
+	depends on MFD_PM8XXX
+	help
+	  Say Y here if you want to support the Qualcomm PMIC8XXX RTC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rtc-pm8xxx.
+
 endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7d27958..9e41297 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -67,6 +67,8 @@
 obj-$(CONFIG_RTC_DRV_MAX8998)	+= rtc-max8998.o
 obj-$(CONFIG_RTC_DRV_MAX6902)	+= rtc-max6902.o
 obj-$(CONFIG_RTC_DRV_MC13XXX)	+= rtc-mc13xxx.o
+obj-$(CONFIG_RTC_DRV_MSM)	+= rtc-msm.o
+obj-$(CONFIG_RTC_DRV_MSM7X00A)	+= rtc-msm7x00a.o
 obj-$(CONFIG_RTC_DRV_MSM6242)	+= rtc-msm6242.o
 obj-$(CONFIG_RTC_DRV_MPC5121)	+= rtc-mpc5121.o
 obj-$(CONFIG_RTC_DRV_MV)	+= rtc-mv.o
@@ -77,6 +79,7 @@
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_PCF2123)	+= rtc-pcf2123.o
 obj-$(CONFIG_RTC_DRV_PCF50633)	+= rtc-pcf50633.o
+obj-$(CONFIG_RTC_DRV_PM8XXX)    += rtc-pm8xxx.o
 obj-$(CONFIG_RTC_DRV_PL030)	+= rtc-pl030.o
 obj-$(CONFIG_RTC_DRV_PL031)	+= rtc-pl031.o
 obj-$(CONFIG_RTC_DRV_PS3)	+= rtc-ps3.o
@@ -110,3 +113,4 @@
 obj-$(CONFIG_RTC_DRV_WM831X)	+= rtc-wm831x.o
 obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
+obj-$(CONFIG_RTC_PM8058)	+= rtc-pm8058.o
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
index e0e98dd..3e7f698 100644
--- a/drivers/rtc/alarm.c
+++ b/drivers/rtc/alarm.c
@@ -299,6 +299,30 @@
 	return ret;
 }
 
+
+void
+alarm_update_timedelta(struct timespec tmp_time, struct timespec new_time)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+		hrtimer_try_to_cancel(&alarms[i].timer);
+		alarms[i].stopped = true;
+		alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+	}
+	alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+		alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+		ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+			timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+	for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+		alarms[i].stopped = false;
+		update_timer_locked(&alarms[i], false);
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
 /**
  * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
  *
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index bc90b09..29735c2 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -24,7 +24,7 @@
 
 int rtc_hctosys_ret = -ENODEV;
 
-static int __init rtc_hctosys(void)
+int rtc_hctosys(void)
 {
 	int err = -ENODEV;
 	struct rtc_time tm;
diff --git a/drivers/rtc/rtc-msm.c b/drivers/rtc/rtc-msm.c
new file mode 100644
index 0000000..c17e461
--- /dev/null
+++ b/drivers/rtc/rtc-msm.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009-2011 Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/android_alarm.h>
+
+#include <linux/rtc.h>
+#include <linux/rtc-msm.h>
+#include <linux/msm_rpcrouter.h>
+#include <mach/msm_rpcrouter.h>
+
+#define APP_TIMEREMOTE_PDEV_NAME "rs00000000"
+
+#define TIMEREMOTE_PROCEEDURE_SET_JULIAN	6
+#define TIMEREMOTE_PROCEEDURE_GET_JULIAN	7
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+#define TIMEREMOTE_PROCEEDURE_GET_SECURE_JULIAN	11
+#define TIMEREMOTE_PROCEEDURE_SET_SECURE_JULIAN	16
+#endif
+#define TIMEREMOTE_PROG_NUMBER 0x30000048
+#define TIMEREMOTE_PROG_VER_1 0x00010001
+#define TIMEREMOTE_PROG_VER_2 0x00040001
+
+#define RTC_REQUEST_CB_PROC		0x17
+#define RTC_CLIENT_INIT_PROC		0x12
+#define RTC_EVENT_CB_PROC		0x1
+#define RTC_CB_ID			0x1
+
+/* Client request errors */
+enum rtc_rpc_err {
+	ERR_NONE,
+	ERR_CLIENT_ID_PTR,		/* Invalid client ID pointer */
+	ERR_CLIENT_TYPE,		/* Invalid client type */
+	ERR_CLIENT_ID,			/* Invalid client ID */
+	ERR_TASK_NOT_READY,		/* task is not ready for clients */
+	ERR_INVALID_PROCESSOR,		/* Invalid processor id */
+	ERR_UNSUPPORTED,		/* Unsupported request */
+	ERR_GENERAL,			/* Any General Error */
+	ERR_RPC,			/* Any ONCRPC Error */
+	ERR_ALREADY_REG,		/* Client already registered */
+	ERR_MAX
+};
+
+enum processor_type {
+	CLIENT_PROCESSOR_NONE   = 0,
+	CLIENT_PROCESSOR_MODEM,
+	CLIENT_PROCESSOR_APP1,
+	CLIENT_PROCESSOR_APP2,
+	CLIENT_PROCESSOR_MAX
+};
+
+/* Client types */
+enum client_type {
+	CLIENT_TYPE_GEN1 = 0,
+	CLIENT_FLOATING1,
+	CLIENT_FLOATING2,
+	CLIENT_TYPE_INTERNAL,
+	CLIENT_TYPE_GENOFF_UPDATE,
+	CLIENT_TYPE_MAX
+};
+
+/* Event types */
+enum event_type {
+	EVENT_TOD_CHANGE = 0,
+	EVENT_GENOFF_CHANGE,
+	EVENT_MAX
+};
+
+struct tod_update_info {
+	uint32_t	tick;
+	uint64_t	stamp;
+	uint32_t	freq;
+};
+
+enum time_bases_info {
+	TIME_RTC = 0,
+	TIME_TOD,
+	TIME_USER,
+	TIME_SECURE,
+	TIME_INVALID
+};
+
+struct genoff_update_info {
+	enum time_bases_info time_base;
+	uint64_t	offset;
+};
+
+union cb_info {
+	struct tod_update_info tod_update;
+	struct genoff_update_info genoff_update;
+};
+
+struct rtc_cb_recv {
+	uint32_t client_cb_id;
+	enum event_type event;
+	uint32_t cb_info_ptr;
+	union cb_info cb_info_data;
+};
+
+struct msm_rtc {
+	int proc;
+	struct msm_rpc_client *rpc_client;
+	u8 client_id;
+	struct rtc_device *rtc;
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	struct rtc_device *rtcsecure;
+#endif
+	unsigned long rtcalarm_time;
+};
+
+struct rpc_time_julian {
+	uint32_t year;
+	uint32_t month;
+	uint32_t day;
+	uint32_t hour;
+	uint32_t minute;
+	uint32_t second;
+	uint32_t day_of_week;
+};
+
+struct rtc_tod_args {
+	int proc;
+	struct rtc_time *tm;
+};
+
+#ifdef CONFIG_PM
+struct suspend_state_info {
+	atomic_t state;
+	int64_t tick_at_suspend;
+};
+
+static struct suspend_state_info suspend_state = {ATOMIC_INIT(0), 0};
+
+void msmrtc_updateatsuspend(struct timespec *ts)
+{
+	int64_t now, sleep, sclk_max;
+
+	if (atomic_read(&suspend_state.state)) {
+		now = msm_timer_get_sclk_time(&sclk_max);
+
+		if (now && suspend_state.tick_at_suspend) {
+			if (now < suspend_state.tick_at_suspend) {
+				sleep = sclk_max -
+					suspend_state.tick_at_suspend + now;
+			} else
+				sleep = now - suspend_state.tick_at_suspend;
+
+			timespec_add_ns(ts, sleep);
+			suspend_state.tick_at_suspend = now;
+		} else
+			pr_err("%s: Invalid ticks from SCLK now=%lld"
+				"tick_at_suspend=%lld", __func__, now,
+				suspend_state.tick_at_suspend);
+	}
+
+}
+#else
+void msmrtc_updateatsuspend(struct timespec *ts) { }
+#endif
+EXPORT_SYMBOL(msmrtc_updateatsuspend);
+
+static int msmrtc_tod_proc_args(struct msm_rpc_client *client, void *buff,
+							void *data)
+{
+	struct rtc_tod_args *rtc_args = data;
+
+	if ((rtc_args->proc == TIMEREMOTE_PROCEEDURE_SET_JULIAN)
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	|| (rtc_args->proc == TIMEREMOTE_PROCEEDURE_SET_SECURE_JULIAN)
+#endif
+	) {
+		struct timeremote_set_julian_req {
+			uint32_t opt_arg;
+			struct rpc_time_julian time;
+		};
+		struct timeremote_set_julian_req *set_req = buff;
+
+		set_req->opt_arg = cpu_to_be32(0x1);
+		set_req->time.year = cpu_to_be32(rtc_args->tm->tm_year);
+		set_req->time.month = cpu_to_be32(rtc_args->tm->tm_mon + 1);
+		set_req->time.day = cpu_to_be32(rtc_args->tm->tm_mday);
+		set_req->time.hour = cpu_to_be32(rtc_args->tm->tm_hour);
+		set_req->time.minute = cpu_to_be32(rtc_args->tm->tm_min);
+		set_req->time.second = cpu_to_be32(rtc_args->tm->tm_sec);
+		set_req->time.day_of_week = cpu_to_be32(rtc_args->tm->tm_wday);
+
+		return sizeof(*set_req);
+
+	} else if ((rtc_args->proc == TIMEREMOTE_PROCEEDURE_GET_JULIAN)
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	|| (rtc_args->proc == TIMEREMOTE_PROCEEDURE_GET_SECURE_JULIAN)
+#endif
+	) {
+		*(uint32_t *)buff = (uint32_t) cpu_to_be32(0x1);
+
+		return sizeof(uint32_t);
+	} else
+		return 0;
+}
+
+static bool rtc_check_overflow(struct rtc_time *tm)
+{
+	if (tm->tm_year < 138)
+		return false;
+
+	if (tm->tm_year > 138)
+		return true;
+
+	if ((tm->tm_year == 138) && (tm->tm_mon == 0) && (tm->tm_mday < 19))
+		return false;
+
+	return true;
+}
+
+static int msmrtc_tod_proc_result(struct msm_rpc_client *client, void *buff,
+							void *data)
+{
+	struct rtc_tod_args *rtc_args = data;
+
+	if ((rtc_args->proc == TIMEREMOTE_PROCEEDURE_GET_JULIAN)
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	|| (rtc_args->proc == TIMEREMOTE_PROCEEDURE_GET_SECURE_JULIAN)
+#endif
+	)  {
+		struct timeremote_get_julian_rep {
+			uint32_t opt_arg;
+			struct rpc_time_julian time;
+		};
+		struct timeremote_get_julian_rep *result = buff;
+
+		if (be32_to_cpu(result->opt_arg) != 0x1)
+			return -ENODATA;
+
+		rtc_args->tm->tm_year = be32_to_cpu(result->time.year);
+		rtc_args->tm->tm_mon = be32_to_cpu(result->time.month);
+		rtc_args->tm->tm_mday = be32_to_cpu(result->time.day);
+		rtc_args->tm->tm_hour = be32_to_cpu(result->time.hour);
+		rtc_args->tm->tm_min = be32_to_cpu(result->time.minute);
+		rtc_args->tm->tm_sec = be32_to_cpu(result->time.second);
+		rtc_args->tm->tm_wday = be32_to_cpu(result->time.day_of_week);
+
+		pr_debug("%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n",
+			__func__, rtc_args->tm->tm_mon, rtc_args->tm->tm_mday,
+			rtc_args->tm->tm_year, rtc_args->tm->tm_hour,
+			rtc_args->tm->tm_min, rtc_args->tm->tm_sec,
+			rtc_args->tm->tm_wday);
+
+		/* RTC layer expects years to start at 1900 */
+		rtc_args->tm->tm_year -= 1900;
+		/* RTC layer expects mons to be 0 based */
+		rtc_args->tm->tm_mon--;
+
+		if (rtc_valid_tm(rtc_args->tm) < 0) {
+			pr_err("%s: Retrieved data/time not valid\n", __func__);
+			rtc_time_to_tm(0, rtc_args->tm);
+		}
+
+		/*
+		 * Check if the time received is > 01-19-2038, to prevent
+		 * overflow. In such a case, return the EPOCH time.
+		 */
+		if (rtc_check_overflow(rtc_args->tm) == true) {
+			pr_err("Invalid time (year > 2038)\n");
+			rtc_time_to_tm(0, rtc_args->tm);
+		}
+
+		return 0;
+	} else
+		return 0;
+}
+
+static int
+msmrtc_timeremote_set_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	struct rtc_tod_args rtc_args;
+	struct msm_rtc *rtc_pdata = dev_get_drvdata(dev);
+
+	if (tm->tm_year < 1900)
+		tm->tm_year += 1900;
+
+	if (tm->tm_year < 1970)
+		return -EINVAL;
+
+	dev_dbg(dev, "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n",
+	       __func__, tm->tm_mon, tm->tm_mday, tm->tm_year,
+	       tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+	rtc_args.proc = TIMEREMOTE_PROCEEDURE_SET_JULIAN;
+	rtc_args.tm = tm;
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client,
+				TIMEREMOTE_PROCEEDURE_SET_JULIAN,
+				msmrtc_tod_proc_args, &rtc_args,
+				NULL, NULL, -1);
+	if (rc) {
+		dev_err(dev, "%s: rtc time (TOD) could not be set\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+msmrtc_timeremote_read_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	struct rtc_tod_args rtc_args;
+	struct msm_rtc *rtc_pdata = dev_get_drvdata(dev);
+
+	rtc_args.proc = TIMEREMOTE_PROCEEDURE_GET_JULIAN;
+	rtc_args.tm = tm;
+
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client,
+				TIMEREMOTE_PROCEEDURE_GET_JULIAN,
+				msmrtc_tod_proc_args, &rtc_args,
+				msmrtc_tod_proc_result, &rtc_args, -1);
+
+	if (rc) {
+		dev_err(dev, "%s: Error retrieving rtc (TOD) time\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+msmrtc_virtual_alarm_set(struct device *dev, struct rtc_wkalrm *a)
+{
+	struct msm_rtc *rtc_pdata = dev_get_drvdata(dev);
+	unsigned long now = get_seconds();
+
+	if (!a->enabled) {
+		rtc_pdata->rtcalarm_time = 0;
+		return 0;
+	} else
+		rtc_tm_to_time(&a->time, &(rtc_pdata->rtcalarm_time));
+
+	if (now > rtc_pdata->rtcalarm_time) {
+		dev_err(dev, "%s: Attempt to set alarm in the past\n",
+		       __func__);
+		rtc_pdata->rtcalarm_time = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct rtc_class_ops msm_rtc_ops = {
+	.read_time	= msmrtc_timeremote_read_time,
+	.set_time	= msmrtc_timeremote_set_time,
+	.set_alarm	= msmrtc_virtual_alarm_set,
+};
+
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+static int
+msmrtc_timeremote_set_time_secure(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	struct rtc_tod_args rtc_args;
+	struct msm_rtc *rtc_pdata = dev_get_drvdata(dev);
+
+	if (tm->tm_year < 1900)
+		tm->tm_year += 1900;
+
+	if (tm->tm_year < 1970)
+		return -EINVAL;
+
+	dev_dbg(dev, "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n",
+	       __func__, tm->tm_mon, tm->tm_mday, tm->tm_year,
+	       tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+
+	rtc_args.proc = TIMEREMOTE_PROCEEDURE_SET_SECURE_JULIAN;
+	rtc_args.tm = tm;
+
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client,
+			TIMEREMOTE_PROCEEDURE_SET_SECURE_JULIAN,
+				msmrtc_tod_proc_args, &rtc_args,
+				NULL, NULL, -1);
+	if (rc) {
+		dev_err(dev,
+			"%s: rtc secure time could not be set\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int
+msmrtc_timeremote_read_time_secure(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	struct rtc_tod_args rtc_args;
+	struct msm_rtc *rtc_pdata = dev_get_drvdata(dev);
+	rtc_args.proc = TIMEREMOTE_PROCEEDURE_GET_SECURE_JULIAN;
+	rtc_args.tm = tm;
+
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client,
+		TIMEREMOTE_PROCEEDURE_GET_SECURE_JULIAN, msmrtc_tod_proc_args,
+		&rtc_args, msmrtc_tod_proc_result, &rtc_args, -1);
+
+	if (rc) {
+		dev_err(dev,
+			"%s: Error retrieving secure rtc time\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static struct rtc_class_ops msm_rtc_ops_secure = {
+	.read_time	= msmrtc_timeremote_read_time_secure,
+	.set_time	= msmrtc_timeremote_set_time_secure,
+};
+#endif
+
+static void process_cb_request(void *buffer)
+{
+	struct rtc_cb_recv *rtc_cb = buffer;
+	struct timespec ts, tv;
+
+	rtc_cb->client_cb_id = be32_to_cpu(rtc_cb->client_cb_id);
+	rtc_cb->event = be32_to_cpu(rtc_cb->event);
+	rtc_cb->cb_info_ptr = be32_to_cpu(rtc_cb->cb_info_ptr);
+
+	if (rtc_cb->event == EVENT_TOD_CHANGE) {
+		/* A TOD update has been received from the Modem */
+		rtc_cb->cb_info_data.tod_update.tick =
+			be32_to_cpu(rtc_cb->cb_info_data.tod_update.tick);
+		rtc_cb->cb_info_data.tod_update.stamp =
+			be64_to_cpu(rtc_cb->cb_info_data.tod_update.stamp);
+		rtc_cb->cb_info_data.tod_update.freq =
+			be32_to_cpu(rtc_cb->cb_info_data.tod_update.freq);
+		pr_info("RPC CALL -- TOD TIME UPDATE: ttick = %d\n"
+			"stamp=%lld, freq = %d\n",
+			rtc_cb->cb_info_data.tod_update.tick,
+			rtc_cb->cb_info_data.tod_update.stamp,
+			rtc_cb->cb_info_data.tod_update.freq);
+
+		getnstimeofday(&ts);
+		msmrtc_updateatsuspend(&ts);
+		rtc_hctosys();
+		getnstimeofday(&tv);
+		/* Update the alarm information with the new time info. */
+		alarm_update_timedelta(ts, tv);
+
+	} else
+		pr_err("%s: Unknown event EVENT=%x\n",
+					__func__, rtc_cb->event);
+}
+
+static int msmrtc_cb_func(struct msm_rpc_client *client, void *buffer, int size)
+{
+	int rc = -1;
+	struct rpc_request_hdr *recv = buffer;
+
+	recv->xid = be32_to_cpu(recv->xid);
+	recv->type = be32_to_cpu(recv->type);
+	recv->rpc_vers = be32_to_cpu(recv->rpc_vers);
+	recv->prog = be32_to_cpu(recv->prog);
+	recv->vers = be32_to_cpu(recv->vers);
+	recv->procedure = be32_to_cpu(recv->procedure);
+
+	if (recv->procedure == RTC_EVENT_CB_PROC)
+		process_cb_request((void *) (recv + 1));
+
+	msm_rpc_start_accepted_reply(client, recv->xid,
+				RPC_ACCEPTSTAT_SUCCESS);
+
+	rc = msm_rpc_send_accepted_reply(client, 0);
+	if (rc) {
+		pr_debug("%s: sending reply failed: %d\n", __func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int msmrtc_rpc_proc_args(struct msm_rpc_client *client, void *buff,
+							void *data)
+{
+	struct msm_rtc *rtc_pdata = data;
+
+	if (rtc_pdata->proc == RTC_CLIENT_INIT_PROC) {
+		/* arguments passed to the client_init function */
+		struct rtc_client_init_req {
+			enum client_type client;
+			uint32_t client_id_ptr;
+			u8 client_id;
+			enum processor_type processor;
+		};
+		struct rtc_client_init_req *req_1 = buff;
+
+		req_1->client = cpu_to_be32(CLIENT_TYPE_INTERNAL);
+		req_1->client_id_ptr = cpu_to_be32(0x1);
+		req_1->client_id = (u8) cpu_to_be32(0x1);
+		req_1->processor = cpu_to_be32(CLIENT_PROCESSOR_APP1);
+
+		return sizeof(*req_1);
+
+	} else if (rtc_pdata->proc == RTC_REQUEST_CB_PROC) {
+		/* arguments passed to the request_cb function */
+		struct rtc_event_req {
+			u8 client_id;
+			uint32_t rtc_cb_id;
+		};
+		struct rtc_event_req *req_2 = buff;
+
+		req_2->client_id =  (u8) cpu_to_be32(rtc_pdata->client_id);
+		req_2->rtc_cb_id = cpu_to_be32(RTC_CB_ID);
+
+		return sizeof(*req_2);
+	} else
+		return 0;
+}
+
+static int msmrtc_rpc_proc_result(struct msm_rpc_client *client, void *buff,
+							void *data)
+{
+	uint32_t result = -EINVAL;
+	struct msm_rtc *rtc_pdata = data;
+
+	if (rtc_pdata->proc == RTC_CLIENT_INIT_PROC) {
+		/* process reply received from client_init function */
+		uint32_t client_id_ptr;
+		result = be32_to_cpu(*(uint32_t *)buff);
+		buff += sizeof(uint32_t);
+		client_id_ptr = be32_to_cpu(*(uint32_t *)(buff));
+		buff += sizeof(uint32_t);
+		if (client_id_ptr == 1)
+			rtc_pdata->client_id = (u8)
+					be32_to_cpu(*(uint32_t *)(buff));
+		else {
+			pr_debug("%s: Client-id not received from Modem\n",
+								__func__);
+			return -EINVAL;
+		}
+	} else if (rtc_pdata->proc == RTC_REQUEST_CB_PROC) {
+		/* process reply received from request_cb function */
+		result = be32_to_cpu(*(uint32_t *)buff);
+	}
+
+	if (result == ERR_NONE) {
+		pr_debug("%s: RPC client reply for PROC=%x success\n",
+					 __func__, rtc_pdata->proc);
+		return 0;
+	}
+
+	pr_debug("%s: RPC client registration failed ERROR=%x\n",
+						__func__, result);
+	return -EINVAL;
+}
+
+static int msmrtc_setup_cb(struct msm_rtc *rtc_pdata)
+{
+	int rc;
+
+	/* Register with the server with client specific info */
+	rtc_pdata->proc = RTC_CLIENT_INIT_PROC;
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client, RTC_CLIENT_INIT_PROC,
+				msmrtc_rpc_proc_args, rtc_pdata,
+				msmrtc_rpc_proc_result, rtc_pdata, -1);
+	if (rc) {
+		pr_debug("%s: RPC client registration for PROC:%x failed\n",
+					__func__, RTC_CLIENT_INIT_PROC);
+		return rc;
+	}
+
+	/* Register with server for the callback event */
+	rtc_pdata->proc = RTC_REQUEST_CB_PROC;
+	rc = msm_rpc_client_req(rtc_pdata->rpc_client, RTC_REQUEST_CB_PROC,
+				msmrtc_rpc_proc_args, rtc_pdata,
+				msmrtc_rpc_proc_result, rtc_pdata, -1);
+	if (rc) {
+		pr_debug("%s: RPC client registration for PROC:%x failed\n",
+					__func__, RTC_REQUEST_CB_PROC);
+	}
+
+	return rc;
+}
+
+static int __devinit
+msmrtc_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_rtc *rtc_pdata = NULL;
+	struct rpcsvr_platform_device *rdev =
+		container_of(pdev, struct rpcsvr_platform_device, base);
+	uint32_t prog_version;
+
+
+	if (pdev->id == (TIMEREMOTE_PROG_VER_1 & RPC_VERSION_MAJOR_MASK))
+		prog_version = TIMEREMOTE_PROG_VER_1;
+	else if (pdev->id == (TIMEREMOTE_PROG_VER_2 &
+			      RPC_VERSION_MAJOR_MASK))
+		prog_version = TIMEREMOTE_PROG_VER_2;
+	else
+		return -EINVAL;
+
+	rtc_pdata = kzalloc(sizeof(*rtc_pdata), GFP_KERNEL);
+	if (rtc_pdata == NULL) {
+		dev_err(&pdev->dev,
+			"%s: Unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+	rtc_pdata->rpc_client = msm_rpc_register_client("rtc", rdev->prog,
+				prog_version, 1, msmrtc_cb_func);
+	if (IS_ERR(rtc_pdata->rpc_client)) {
+		dev_err(&pdev->dev,
+			 "%s: init RPC failed! VERS = %x\n", __func__,
+					prog_version);
+		rc = PTR_ERR(rtc_pdata->rpc_client);
+		kfree(rtc_pdata);
+		return rc;
+	}
+
+	/*
+	 * Set up the callback client.
+	 * For older targets this initialization will fail
+	 */
+	rc = msmrtc_setup_cb(rtc_pdata);
+	if (rc)
+		dev_dbg(&pdev->dev, "%s: Could not initialize RPC callback\n",
+								__func__);
+
+	rtc_pdata->rtcalarm_time = 0;
+	platform_set_drvdata(pdev, rtc_pdata);
+
+	rtc_pdata->rtc = rtc_device_register("msm_rtc",
+				  &pdev->dev,
+				  &msm_rtc_ops,
+				  THIS_MODULE);
+	if (IS_ERR(rtc_pdata->rtc)) {
+		dev_err(&pdev->dev, "%s: Can't register RTC device (%ld)\n",
+		       pdev->name, PTR_ERR(rtc_pdata->rtc));
+		rc = PTR_ERR(rtc_pdata->rtc);
+		goto fail_cb_setup;
+	}
+
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	rtc_pdata->rtcsecure = rtc_device_register("msm_rtc_secure",
+				  &pdev->dev,
+				  &msm_rtc_ops_secure,
+				  THIS_MODULE);
+
+	if (IS_ERR(rtc_pdata->rtcsecure)) {
+		dev_err(&pdev->dev,
+			"%s: Can't register RTC Secure device (%ld)\n",
+		       pdev->name, PTR_ERR(rtc_pdata->rtcsecure));
+		rtc_device_unregister(rtc_pdata->rtc);
+		rc = PTR_ERR(rtc_pdata->rtcsecure);
+		goto fail_cb_setup;
+	}
+#endif
+
+#ifdef CONFIG_RTC_ASYNC_MODEM_SUPPORT
+	rtc_hctosys();
+#endif
+
+	return 0;
+
+fail_cb_setup:
+	msm_rpc_unregister_client(rtc_pdata->rpc_client);
+	kfree(rtc_pdata);
+	return rc;
+}
+
+
+#ifdef CONFIG_PM
+
+static void
+msmrtc_alarmtimer_expired(unsigned long _data,
+				struct msm_rtc *rtc_pdata)
+{
+	pr_debug("%s: Generating alarm event (src %lu)\n",
+	       rtc_pdata->rtc->name, _data);
+
+	rtc_update_irq(rtc_pdata->rtc, 1, RTC_IRQF | RTC_AF);
+	rtc_pdata->rtcalarm_time = 0;
+}
+
+static int
+msmrtc_suspend(struct platform_device *dev, pm_message_t state)
+{
+	int rc, diff;
+	struct rtc_time tm;
+	unsigned long now;
+	struct msm_rtc *rtc_pdata = platform_get_drvdata(dev);
+
+	suspend_state.tick_at_suspend = msm_timer_get_sclk_time(NULL);
+	if (rtc_pdata->rtcalarm_time) {
+		rc = msmrtc_timeremote_read_time(&dev->dev, &tm);
+		if (rc) {
+			dev_err(&dev->dev,
+				"%s: Unable to read from RTC\n", __func__);
+			return rc;
+		}
+		rtc_tm_to_time(&tm, &now);
+		diff = rtc_pdata->rtcalarm_time - now;
+		if (diff <= 0) {
+			msmrtc_alarmtimer_expired(1 , rtc_pdata);
+			msm_pm_set_max_sleep_time(0);
+			atomic_inc(&suspend_state.state);
+			return 0;
+		}
+		msm_pm_set_max_sleep_time((int64_t)
+			((int64_t) diff * NSEC_PER_SEC));
+	} else
+		msm_pm_set_max_sleep_time(0);
+	atomic_inc(&suspend_state.state);
+	return 0;
+}
+
+static int
+msmrtc_resume(struct platform_device *dev)
+{
+	int rc, diff;
+	struct rtc_time tm;
+	unsigned long now;
+	struct msm_rtc *rtc_pdata = platform_get_drvdata(dev);
+
+	if (rtc_pdata->rtcalarm_time) {
+		rc = msmrtc_timeremote_read_time(&dev->dev, &tm);
+		if (rc) {
+			dev_err(&dev->dev,
+				"%s: Unable to read from RTC\n", __func__);
+			return rc;
+		}
+		rtc_tm_to_time(&tm, &now);
+		diff = rtc_pdata->rtcalarm_time - now;
+		if (diff <= 0)
+			msmrtc_alarmtimer_expired(2 , rtc_pdata);
+	}
+	suspend_state.tick_at_suspend = 0;
+	atomic_dec(&suspend_state.state);
+	return 0;
+}
+#else
+#define msmrtc_suspend NULL
+#define msmrtc_resume  NULL
+#endif
+
+static int __devexit msmrtc_remove(struct platform_device *pdev)
+{
+	struct msm_rtc *rtc_pdata = platform_get_drvdata(pdev);
+
+	rtc_device_unregister(rtc_pdata->rtc);
+#ifdef CONFIG_RTC_SECURE_TIME_SUPPORT
+	rtc_device_unregister(rtc_pdata->rtcsecure);
+#endif
+	msm_rpc_unregister_client(rtc_pdata->rpc_client);
+	kfree(rtc_pdata);
+
+	return 0;
+}
+
+static struct platform_driver msmrtc_driver = {
+	.probe		= msmrtc_probe,
+	.suspend	= msmrtc_suspend,
+	.resume		= msmrtc_resume,
+	.remove		= __devexit_p(msmrtc_remove),
+	.driver	= {
+		.name	= APP_TIMEREMOTE_PDEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init msmrtc_init(void)
+{
+	int rc;
+
+	/*
+	 * For backward compatibility, register multiple platform
+	 * drivers with the RPC PROG_VERS to be supported.
+	 *
+	 * Explicit cast away of 'constness' for driver.name in order to
+	 * initialize it here.
+	 */
+	snprintf((char *)msmrtc_driver.driver.name,
+		 strlen(msmrtc_driver.driver.name)+1,
+		 "rs%08x", TIMEREMOTE_PROG_NUMBER);
+	pr_debug("RTC Registering with %s\n", msmrtc_driver.driver.name);
+
+	rc = platform_driver_register(&msmrtc_driver);
+	if (rc)
+		pr_err("%s: platfrom_driver_register failed\n", __func__);
+
+	return rc;
+}
+
+static void __exit msmrtc_exit(void)
+{
+	platform_driver_unregister(&msmrtc_driver);
+}
+
+module_init(msmrtc_init);
+module_exit(msmrtc_exit);
+
+MODULE_DESCRIPTION("RTC driver for Qualcomm MSM7x00a chipsets");
+MODULE_AUTHOR("San Mehat <san@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-msm7x00a.c b/drivers/rtc/rtc-msm7x00a.c
new file mode 100644
index 0000000..690bc39
--- /dev/null
+++ b/drivers/rtc/rtc-msm7x00a.c
@@ -0,0 +1,280 @@
+/* drivers/rtc/rtc-msm7x00a.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: San Mehat <san@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/rtc.h>
+#include <linux/msm_rpcrouter.h>
+
+#include <mach/msm_rpcrouter.h>
+
+#define RTC_DEBUG 0
+
+extern void msm_pm_set_max_sleep_time(int64_t sleep_time_ns);
+
+#if CONFIG_MSM_AMSS_VERSION >= 6350 || defined(CONFIG_ARCH_QSD8X50)
+#define APP_TIMEREMOTE_PDEV_NAME "rs30000048:00010000"
+#else
+#define APP_TIMEREMOTE_PDEV_NAME "rs30000048:0da5b528"
+#endif
+
+#define TIMEREMOTE_PROCEEDURE_SET_JULIAN	6
+#define TIMEREMOTE_PROCEEDURE_GET_JULIAN	7
+
+struct rpc_time_julian {
+	uint32_t year;
+	uint32_t month;
+	uint32_t day;
+	uint32_t hour;
+	uint32_t minute;
+	uint32_t second;
+	uint32_t day_of_week;
+};
+
+static struct msm_rpc_endpoint *ep;
+static struct rtc_device *rtc;
+static unsigned long rtcalarm_time;
+
+static int
+msmrtc_timeremote_set_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+
+	struct timeremote_set_julian_req {
+		struct rpc_request_hdr hdr;
+		uint32_t opt_arg;
+
+		struct rpc_time_julian time;
+	} req;
+
+	struct timeremote_set_julian_rep {
+		struct rpc_reply_hdr hdr;
+	} rep;
+
+	if (tm->tm_year < 1900)
+		tm->tm_year += 1900;
+
+	if (tm->tm_year < 1970)
+		return -EINVAL;
+
+#if RTC_DEBUG
+	printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n",
+	       __func__, tm->tm_mon, tm->tm_mday, tm->tm_year,
+	       tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+#endif
+
+	req.opt_arg = cpu_to_be32(1);
+	req.time.year = cpu_to_be32(tm->tm_year);
+	req.time.month = cpu_to_be32(tm->tm_mon + 1);
+	req.time.day = cpu_to_be32(tm->tm_mday);
+	req.time.hour = cpu_to_be32(tm->tm_hour);
+	req.time.minute = cpu_to_be32(tm->tm_min);
+	req.time.second = cpu_to_be32(tm->tm_sec);
+	req.time.day_of_week = cpu_to_be32(tm->tm_wday);
+
+
+	rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_SET_JULIAN,
+				&req, sizeof(req),
+				&rep, sizeof(rep),
+				5 * HZ);
+	return rc;
+}
+
+static int
+msmrtc_timeremote_read_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+
+	struct timeremote_get_julian_req {
+		struct rpc_request_hdr hdr;
+		uint32_t julian_time_not_null;
+	} req;
+
+	struct timeremote_get_julian_rep {
+		struct rpc_reply_hdr hdr;
+		uint32_t opt_arg;
+		struct rpc_time_julian time;
+	} rep;
+
+	req.julian_time_not_null = cpu_to_be32(1);
+
+	rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_GET_JULIAN,
+				&req, sizeof(req),
+				&rep, sizeof(rep),
+				5 * HZ);
+	if (rc < 0)
+		return rc;
+
+	if (!be32_to_cpu(rep.opt_arg)) {
+		printk(KERN_ERR "%s: No data from RTC\n", __func__);
+		return -ENODATA;
+	}
+
+	tm->tm_year = be32_to_cpu(rep.time.year);
+	tm->tm_mon = be32_to_cpu(rep.time.month);
+	tm->tm_mday = be32_to_cpu(rep.time.day);
+	tm->tm_hour = be32_to_cpu(rep.time.hour);
+	tm->tm_min = be32_to_cpu(rep.time.minute);
+	tm->tm_sec = be32_to_cpu(rep.time.second);
+	tm->tm_wday = be32_to_cpu(rep.time.day_of_week);
+
+#if RTC_DEBUG
+	printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n",
+	       __func__, tm->tm_mon, tm->tm_mday, tm->tm_year,
+	       tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
+#endif
+
+	tm->tm_year -= 1900;	/* RTC layer expects years to start at 1900 */
+	tm->tm_mon--;		/* RTC layer expects mons to be 0 based */
+
+	if (rtc_valid_tm(tm) < 0) {
+		dev_err(dev, "retrieved date/time is not valid.\n");
+		rtc_time_to_tm(0, tm);
+	}
+
+	return 0;
+}
+
+
+static int
+msmrtc_virtual_alarm_set(struct device *dev, struct rtc_wkalrm *a)
+{
+	unsigned long now = get_seconds();
+
+	if (!a->enabled) {
+		rtcalarm_time = 0;
+		return 0;
+	} else
+		rtc_tm_to_time(&a->time, &rtcalarm_time);
+
+	if (now > rtcalarm_time) {
+		printk(KERN_ERR "%s: Attempt to set alarm in the past\n",
+		       __func__);
+		rtcalarm_time = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct rtc_class_ops msm_rtc_ops = {
+	.read_time	= msmrtc_timeremote_read_time,
+	.set_time	= msmrtc_timeremote_set_time,
+	.set_alarm	= msmrtc_virtual_alarm_set,
+};
+
+static void
+msmrtc_alarmtimer_expired(unsigned long _data)
+{
+#if RTC_DEBUG
+	printk(KERN_DEBUG "%s: Generating alarm event (src %lu)\n",
+	       rtc->name, _data);
+#endif
+	rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+	rtcalarm_time = 0;
+}
+
+static int
+msmrtc_probe(struct platform_device *pdev)
+{
+	struct rpcsvr_platform_device *rdev =
+		container_of(pdev, struct rpcsvr_platform_device, base);
+
+	ep = msm_rpc_connect(rdev->prog, rdev->vers, 0);
+	if (IS_ERR(ep)) {
+		printk(KERN_ERR "%s: init rpc failed! rc = %ld\n",
+		       __func__, PTR_ERR(ep));
+		return PTR_ERR(ep);
+	}
+
+	rtc = rtc_device_register("msm_rtc",
+				  &pdev->dev,
+				  &msm_rtc_ops,
+				  THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		printk(KERN_ERR "%s: Can't register RTC device (%ld)\n",
+		       pdev->name, PTR_ERR(rtc));
+		return PTR_ERR(rtc);
+	}
+	return 0;
+}
+
+
+static unsigned long msmrtc_get_seconds(void)
+{
+	struct rtc_time tm;
+	unsigned long now;
+
+	msmrtc_timeremote_read_time(NULL, &tm);
+	rtc_tm_to_time(&tm, &now);
+	return now;
+}
+
+static int
+msmrtc_suspend(struct platform_device *dev, pm_message_t state)
+{
+	if (rtcalarm_time) {
+		unsigned long now = msmrtc_get_seconds();
+		int diff = rtcalarm_time - now;
+		if (diff <= 0) {
+			msmrtc_alarmtimer_expired(1);
+			msm_pm_set_max_sleep_time(0);
+			return 0;
+		}
+		msm_pm_set_max_sleep_time((int64_t) ((int64_t) diff * NSEC_PER_SEC));
+	} else
+		msm_pm_set_max_sleep_time(0);
+	return 0;
+}
+
+static int
+msmrtc_resume(struct platform_device *dev)
+{
+	if (rtcalarm_time) {
+		unsigned long now = msmrtc_get_seconds();
+		int diff = rtcalarm_time - now;
+		if (diff <= 0)
+			msmrtc_alarmtimer_expired(2);
+	}
+	return 0;
+}
+
+static struct platform_driver msmrtc_driver = {
+	.probe		= msmrtc_probe,
+	.suspend	= msmrtc_suspend,
+	.resume		= msmrtc_resume,
+	.driver	= {
+		.name	= APP_TIMEREMOTE_PDEV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init msmrtc_init(void)
+{
+	rtcalarm_time = 0;
+	return platform_driver_register(&msmrtc_driver);
+}
+
+module_init(msmrtc_init);
+
+MODULE_DESCRIPTION("RTC driver for Qualcomm MSM7x00a chipsets");
+MODULE_AUTHOR("San Mehat <san@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pm8058.c b/drivers/rtc/rtc-pm8058.c
new file mode 100644
index 0000000..5d9111a
--- /dev/null
+++ b/drivers/rtc/rtc-pm8058.c
@@ -0,0 +1,563 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/rtc/rtc-pm8058.h>
+#include <linux/pm_runtime.h>
+
+#define PM8058_RTC_CTRL		0x1E8
+	#define PM8058_RTC_ENABLE	BIT(7)
+	#define PM8058_RTC_ALARM_ENABLE	BIT(1)
+#define PM8058_RTC_ALARM_CTRL	0x1E9
+	#define PM8058_RTC_ALARM_CLEAR	BIT(0)
+#define PM8058_RTC_TEST		0x1F6
+#define PM8058_RTC_READ_BASE	0x1EE
+#define PM8058_RTC_WRITE_BASE	0x1EA
+#define PM8058_RTC_ALARM_BASE	0x1F2
+
+struct pm8058_rtc {
+	struct rtc_device *rtc0;
+	u8 rtc_ctrl_reg;
+	int rtc_irq;
+	int rtc_alarm_irq;
+	struct pm8058_chip *pm_chip;
+};
+
+static int
+pm8058_rtc_read_bytes(struct pm8058_rtc *rtc_dd, u8 *rtc_val, int base)
+{
+	int i, rc;
+
+	/*
+	 * Read the 32-bit RTC/Alarm Value.
+	 * These values have to be read 8-bit at a time.
+	 */
+	for (i = 0; i < 4; i++) {
+		rc = pm8058_read(rtc_dd->pm_chip, base + i, &rtc_val[i], 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 read failed\n", __func__);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int
+pm8058_rtc_write_bytes(struct pm8058_rtc *rtc_dd, u8 *rtc_val, int base)
+{
+	int i, rc;
+
+	/*
+	 * Write the 32-bit Value.
+	 * These values have to be written 8-bit at a time.
+	 */
+	for (i = 0; i < 4; i++) {
+		rc = pm8058_write(rtc_dd->pm_chip, base + i, &rtc_val[i], 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 read failed\n", __func__);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Steps to write the RTC registers.
+ * 1. Disable alarm if enabled.
+ * 2. Write 0x00 to LSB.
+ * 3. Write Byte[1], Byte[2], Byte[3] then Byte[0].
+ * 4. Enable alarm if disabled earlier.
+ */
+#ifdef CONFIG_RTC_PM8058_WRITE_ENABLE
+static int
+pm8058_rtc0_set_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	unsigned long secs = 0;
+	u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg = 0, i;
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	ctrl_reg = rtc_dd->rtc_ctrl_reg;
+
+	rtc_tm_to_time(tm, &secs);
+
+	value[0] = secs & 0xFF;
+	value[1] = (secs >> 8) & 0xFF;
+	value[2] = (secs >> 16) & 0xFF;
+	value[3] = (secs >> 24) & 0xFF;
+
+	pr_debug("%s: Seconds value to be written to RTC = %lu\n", __func__,
+								secs);
+	 /* Disable alarm before updating RTC */
+	if (ctrl_reg & PM8058_RTC_ALARM_ENABLE) {
+		alarm_enabled = 1;
+		ctrl_reg &= ~PM8058_RTC_ALARM_ENABLE;
+		rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_CTRL,
+							&ctrl_reg, 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 write failed\n", __func__);
+			return rc;
+		}
+	}
+
+	/* Write Byte[1], Byte[2], Byte[3], Byte[0] */
+	reg = 0;
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_WRITE_BASE, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	for (i = 1; i < 4; i++) {
+		rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_WRITE_BASE + i,
+								&value[i], 1);
+		if (rc < 0) {
+			pr_err("%s:Write to RTC registers failed\n", __func__);
+			return rc;
+		}
+	}
+
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_WRITE_BASE,
+							&value[0], 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	if (alarm_enabled) {
+		ctrl_reg |= PM8058_RTC_ALARM_ENABLE;
+		rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_CTRL,
+							&ctrl_reg, 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 write failed\n", __func__);
+			return rc;
+		}
+	}
+
+	rtc_dd->rtc_ctrl_reg = ctrl_reg;
+
+	return 0;
+}
+#endif
+
+static int
+pm8058_rtc0_read_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	u8 value[4], reg;
+	unsigned long secs = 0;
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rc = pm8058_rtc_read_bytes(rtc_dd, value, PM8058_RTC_READ_BASE);
+	if (rc < 0) {
+		pr_err("%s: RTC time read failed\n", __func__);
+		return rc;
+	}
+
+	/*
+	 * Read the LSB again and check if there has been a carry over.
+	 * If there is, redo the read operation.
+	 */
+	rc = pm8058_read(rtc_dd->pm_chip, PM8058_RTC_READ_BASE, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 read failed\n", __func__);
+		return rc;
+	}
+
+	if (unlikely(reg < value[0])) {
+		rc = pm8058_rtc_read_bytes(rtc_dd, value,
+						PM8058_RTC_READ_BASE);
+		if (rc < 0) {
+			pr_err("%s: RTC time read failed\n", __func__);
+			return rc;
+		}
+	}
+
+	secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+
+	rtc_time_to_tm(secs, tm);
+
+	rc = rtc_valid_tm(tm);
+	if (rc < 0) {
+		pr_err("%s: Invalid time read from PMIC8058\n", __func__);
+		return rc;
+	}
+
+	pr_debug("%s: secs = %lu, h::m:s == %d::%d::%d, d/m/y = %d/%d/%d\n",
+		 __func__, secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+		tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	return 0;
+}
+
+static int
+pm8058_rtc0_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4], reg;
+	struct rtc_time rtc_tm;
+	unsigned long secs_alarm, secs_rtc;
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	reg = rtc_dd->rtc_ctrl_reg;
+
+	/* Check if the alarm is valid */
+	rc = rtc_valid_tm(&alarm->time);
+	if (rc < 0) {
+		pr_err("%s: Alarm time invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	rtc_tm_to_time(&alarm->time, &secs_alarm);
+
+	/*
+	 * Read the current RTC time and verify if the alarm time is in the
+	 * past. If yes, return invalid.
+	 */
+	rc = pm8058_rtc0_read_time(dev, &rtc_tm);
+	if (rc) {
+		pr_err("%s: Unable to read RTC time\n", __func__);
+		return -EINVAL;
+	}
+	rtc_tm_to_time(&rtc_tm, &secs_rtc);
+
+	if (secs_alarm < secs_rtc) {
+		pr_err("%s: Trying to set alarm in the past\n", __func__);
+		return -EINVAL;
+	}
+
+	value[0] = secs_alarm & 0xFF;
+	value[1] = (secs_alarm >> 8) & 0xFF;
+	value[2] = (secs_alarm >> 16) & 0xFF;
+	value[3] = (secs_alarm >> 24) & 0xFF;
+
+	rc = pm8058_rtc_write_bytes(rtc_dd, value, PM8058_RTC_ALARM_BASE);
+	if (rc < 0) {
+		pr_err("%s: Alarm could not be set\n", __func__);
+		return rc;
+	}
+
+	reg = (alarm->enabled) ? (reg | PM8058_RTC_ALARM_ENABLE) :
+					(reg & ~PM8058_RTC_ALARM_ENABLE);
+
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_CTRL, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rtc_dd->rtc_ctrl_reg = reg;
+
+	pr_debug("%s: Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+			__func__, alarm->time.tm_hour, alarm->time.tm_min,
+				alarm->time.tm_sec, alarm->time.tm_mday,
+				alarm->time.tm_mon, alarm->time.tm_year);
+
+	return 0;
+}
+
+static int
+pm8058_rtc0_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4], reg;
+	unsigned long secs = 0;
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	reg = rtc_dd->rtc_ctrl_reg;
+
+	alarm->enabled = !!(reg & PM8058_RTC_ALARM_ENABLE);
+
+	rc = pm8058_rtc_read_bytes(rtc_dd, value,
+					PM8058_RTC_ALARM_BASE);
+	if (rc < 0) {
+		pr_err("%s: RTC alarm time read failed\n", __func__);
+		return rc;
+	}
+
+	secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+
+	rtc_time_to_tm(secs, &alarm->time);
+
+	rc = rtc_valid_tm(&alarm->time);
+	if (rc < 0) {
+		pr_err("%s: Invalid time read from PMIC8058\n", __func__);
+		return rc;
+	}
+
+	pr_debug("%s: Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+			__func__, alarm->time.tm_hour, alarm->time.tm_min,
+				alarm->time.tm_sec, alarm->time.tm_mday,
+				alarm->time.tm_mon, alarm->time.tm_year);
+
+	return 0;
+}
+
+
+static int
+pm8058_rtc0_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+	int rc;
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+	u8 reg;
+
+	reg = rtc_dd->rtc_ctrl_reg;
+	reg = (enable) ? (reg | PM8058_RTC_ALARM_ENABLE) :
+				(reg & ~PM8058_RTC_ALARM_ENABLE);
+
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_CTRL, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rtc_dd->rtc_ctrl_reg = reg;
+
+	return rc;
+}
+
+static struct rtc_class_ops pm8058_rtc0_ops = {
+	.read_time	= pm8058_rtc0_read_time,
+	.set_alarm	= pm8058_rtc0_set_alarm,
+	.read_alarm	= pm8058_rtc0_read_alarm,
+	.alarm_irq_enable = pm8058_rtc0_alarm_irq_enable,
+};
+
+static irqreturn_t pm8058_alarm_trigger(int irq, void *dev_id)
+{
+	u8 reg;
+	int rc;
+	unsigned long events = 0;
+	struct pm8058_rtc *rtc_dd = dev_id;
+
+	events = RTC_IRQF | RTC_AF;
+	rtc_update_irq(rtc_dd->rtc0, 1, events);
+
+	pr_debug("%s: Alarm Triggered !!\n", __func__);
+
+	/* Clear the alarm enable bit */
+	reg = rtc_dd->rtc_ctrl_reg;
+
+	reg &= ~PM8058_RTC_ALARM_ENABLE;
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_CTRL,
+						&reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		goto rtc_alarm_handled;
+	}
+
+	rtc_dd->rtc_ctrl_reg = reg;
+
+	/* Clear RTC alarm register */
+	rc = pm8058_read(rtc_dd->pm_chip, PM8058_RTC_ALARM_CTRL, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 read failed\n", __func__);
+		goto rtc_alarm_handled;
+	}
+
+	reg &= ~PM8058_RTC_ALARM_CLEAR;
+	rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_ALARM_CTRL, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 write failed\n", __func__);
+		goto rtc_alarm_handled;
+	}
+
+rtc_alarm_handled:
+	return IRQ_HANDLED;
+}
+
+static int __devinit pm8058_rtc_probe(struct platform_device *pdev)
+{
+	int rc;
+	u8 reg, reg_alarm;
+	struct pm8058_rtc *rtc_dd;
+	struct pm8058_chip *pm_chip;
+
+	pm_chip = platform_get_drvdata(pdev);
+	if (pm_chip == NULL) {
+		pr_err("%s: Invalid driver information\n", __func__);
+		return -ENXIO;
+	}
+
+	rtc_dd = kzalloc(sizeof(*rtc_dd), GFP_KERNEL);
+	if (rtc_dd == NULL) {
+		pr_err("%s: Unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Enable runtime PM ops, start in ACTIVE mode */
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+	pm_runtime_enable(&pdev->dev);
+
+	rtc_dd->rtc_irq = platform_get_irq(pdev, 0);
+	rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 1);
+	if (!rtc_dd->rtc_alarm_irq || !rtc_dd->rtc_irq) {
+		pr_err("%s: RTC Alarm IRQ absent\n", __func__);
+		rc = -ENXIO;
+		goto fail_rtc_enable;
+	}
+
+	rtc_dd->pm_chip = pm_chip;
+
+	rc = pm8058_read(pm_chip, PM8058_RTC_CTRL, &reg, 1);
+	if (rc < 0) {
+		pr_err("%s: PM8058 read failed\n", __func__);
+		goto fail_rtc_enable;
+	}
+
+	if (!(reg & PM8058_RTC_ENABLE)) {
+		/* Enable RTC, clear alarm register */
+		reg |= PM8058_RTC_ENABLE;
+		reg &= ~PM8058_RTC_ALARM_ENABLE;
+		rc = pm8058_write(pm_chip, PM8058_RTC_CTRL, &reg, 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 write failed\n", __func__);
+			goto fail_rtc_enable;
+		}
+
+		/* Clear RTC alarm register */
+		rc = pm8058_read(rtc_dd->pm_chip, PM8058_RTC_ALARM_CTRL,
+								&reg_alarm, 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 read failed\n", __func__);
+			goto fail_rtc_enable;
+		}
+
+		reg_alarm &= ~PM8058_RTC_ALARM_CLEAR;
+		rc = pm8058_write(rtc_dd->pm_chip, PM8058_RTC_ALARM_CTRL,
+								&reg_alarm, 1);
+		if (rc < 0) {
+			pr_err("%s: PM8058 write failed\n", __func__);
+			goto fail_rtc_enable;
+		}
+	}
+	rtc_dd->rtc_ctrl_reg = reg;
+
+#ifdef CONFIG_RTC_PM8058_WRITE_ENABLE
+	pm8058_rtc0_ops.set_time	= pm8058_rtc0_set_time,
+#endif
+
+	/* Register the RTC device */
+	rtc_dd->rtc0 = rtc_device_register("pm8058_rtc0", &pdev->dev,
+				&pm8058_rtc0_ops, THIS_MODULE);
+	if (IS_ERR(rtc_dd->rtc0)) {
+		pr_err("%s: RTC device registration failed (%ld)\n",
+					__func__, PTR_ERR(rtc_dd->rtc0));
+		rc = PTR_ERR(rtc_dd->rtc0);
+		goto fail_rtc_enable;
+	}
+
+	platform_set_drvdata(pdev, rtc_dd);
+
+	/* Request the alarm IRQ */
+	rc = request_threaded_irq(rtc_dd->rtc_alarm_irq, NULL,
+				 pm8058_alarm_trigger, IRQF_TRIGGER_RISING,
+				 "pm8058_rtc_alarm", rtc_dd);
+	if (rc < 0) {
+		pr_err("%s: Request IRQ failed (%d)\n", __func__, rc);
+		goto fail_req_irq;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	pr_debug("%s: Probe success !!\n", __func__);
+
+	return 0;
+
+fail_req_irq:
+	rtc_device_unregister(rtc_dd->rtc0);
+fail_rtc_enable:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(rtc_dd);
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static int pm8058_rtc_resume(struct device *dev)
+{
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+	return 0;
+}
+
+static int pm8058_rtc_suspend(struct device *dev)
+{
+	struct pm8058_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+	return 0;
+}
+
+static struct dev_pm_ops pm8058_rtc_pm_ops = {
+	.suspend = pm8058_rtc_suspend,
+	.resume = pm8058_rtc_resume,
+};
+#endif
+
+static int __devexit pm8058_rtc_remove(struct platform_device *pdev)
+{
+	struct pm8058_rtc *rtc_dd = platform_get_drvdata(pdev);
+
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+	rtc_device_unregister(rtc_dd->rtc0);
+	kfree(rtc_dd);
+
+	return 0;
+}
+
+static struct platform_driver pm8058_rtc_driver = {
+	.probe		= pm8058_rtc_probe,
+	.remove		= __devexit_p(pm8058_rtc_remove),
+	.driver	= {
+		.name	= "pm8058-rtc",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &pm8058_rtc_pm_ops,
+#endif
+	},
+};
+
+static int __init pm8058_rtc_init(void)
+{
+	return platform_driver_register(&pm8058_rtc_driver);
+}
+
+static void __exit pm8058_rtc_exit(void)
+{
+	platform_driver_unregister(&pm8058_rtc_driver);
+}
+
+module_init(pm8058_rtc_init);
+module_exit(pm8058_rtc_exit);
+
+MODULE_ALIAS("platform:pm8058-rtc");
+MODULE_DESCRIPTION("PMIC8058 RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
new file mode 100644
index 0000000..0bdb89e
--- /dev/null
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -0,0 +1,569 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include<linux/spinlock.h>
+
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/rtc.h>
+
+
+/* RTC Register offsets from RTC CTRL REG */
+#define PM8XXX_ALARM_CTRL_OFFSET 0x01
+#define PM8XXX_RTC_WRITE_OFFSET 0x02
+#define PM8XXX_RTC_READ_OFFSET 0x06
+#define PM8XXX_ALARM_RW_OFFSET 0x0A
+
+/* RTC_CTRL register bit fields */
+#define PM8xxx_RTC_ENABLE	BIT(7)
+#define PM8xxx_RTC_ALARM_ENABLE	BIT(1)
+#define PM8xxx_RTC_ALARM_CLEAR  BIT(0)
+
+#define NUM_8_BIT_RTC_REGS	0x4
+
+/**
+ * struct pm8xxx_rtc - rtc driver internal structure
+ * @rtc: rtc device for this driver
+ * @rtc_alarm_irq: rtc alarm irq number
+ */
+struct pm8xxx_rtc {
+	struct rtc_device *rtc;
+	int rtc_alarm_irq;
+	int rtc_base;
+	int rtc_read_base;
+	int rtc_write_base;
+	int alarm_rw_base;
+	u8  ctrl_reg;
+	struct device *rtc_dev;
+	spinlock_t ctrl_reg_lock;
+};
+
+/*
+ * The RTC registers need to be read/written one byte at a time. This is a
+ * hardware limitation.
+ */
+
+static int pm8xxx_read_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
+			int base, int count)
+{
+	int i, rc;
+	struct device *parent = rtc_dd->rtc_dev->parent;
+
+	for (i = 0; i < count; i++) {
+		rc = pm8xxx_readb(parent, base + i, &rtc_val[i]);
+		if (rc < 0) {
+			dev_err(rtc_dd->rtc_dev, "PM8xxx read failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int pm8xxx_write_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
+			int base, int count)
+{
+	int i, rc;
+	struct device *parent = rtc_dd->rtc_dev->parent;
+
+	for (i = 0; i < count; i++) {
+		rc = pm8xxx_writeb(parent, base + i, rtc_val[i]);
+		if (rc < 0) {
+			dev_err(rtc_dd->rtc_dev, "PM8xxx write failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+
+/*
+ * Steps to write the RTC registers.
+ * 1. Disable alarm if enabled.
+ * 2. Write 0x00 to LSB.
+ * 3. Write Byte[1], Byte[2], Byte[3] then Byte[0].
+ * 4. Enable alarm if disabled in step 1.
+ */
+static int
+pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	unsigned long secs, irq_flags;
+	u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg;
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rtc_tm_to_time(tm, &secs);
+
+	value[0] = secs & 0xFF;
+	value[1] = (secs >> 8) & 0xFF;
+	value[2] = (secs >> 16) & 0xFF;
+	value[3] = (secs >> 24) & 0xFF;
+
+	dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+	ctrl_reg = rtc_dd->ctrl_reg;
+
+	if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) {
+		alarm_enabled = 1;
+		ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+		rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+									1);
+		if (rc < 0) {
+			dev_err(dev, "PM8xxx write failed\n");
+			goto rtc_rw_fail;
+		}
+	} else
+		spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+	/* Write Byte[1], Byte[2], Byte[3], Byte[0] */
+	/* Write 0 to Byte[0] */
+	reg = 0;
+	rc = pm8xxx_write_wrapper(rtc_dd, &reg, rtc_dd->rtc_write_base, 1);
+	if (rc < 0) {
+		dev_err(dev, "PM8xxx write failed\n");
+		goto rtc_rw_fail;
+	}
+
+	/* Write Byte[1], Byte[2], Byte[3] */
+	rc = pm8xxx_write_wrapper(rtc_dd, value + 1,
+					rtc_dd->rtc_write_base + 1, 3);
+	if (rc < 0) {
+		dev_err(dev, "Write to RTC registers failed\n");
+		goto rtc_rw_fail;
+	}
+
+	/* Write Byte[0] */
+	rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->rtc_write_base, 1);
+	if (rc < 0) {
+		dev_err(dev, "Write to RTC register failed\n");
+		goto rtc_rw_fail;
+	}
+
+	if (alarm_enabled) {
+		ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+		rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+									1);
+		if (rc < 0) {
+			dev_err(dev, "PM8xxx write failed\n");
+			goto rtc_rw_fail;
+		}
+	}
+
+	rtc_dd->ctrl_reg = ctrl_reg;
+
+rtc_rw_fail:
+	if (alarm_enabled)
+		spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+	return rc;
+}
+
+static int
+pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	int rc;
+	u8 value[4], reg;
+	unsigned long secs;
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->rtc_read_base,
+							NUM_8_BIT_RTC_REGS);
+	if (rc < 0) {
+		dev_err(dev, "RTC time read failed\n");
+		return rc;
+	}
+
+	/*
+	 * Read the LSB again and check if there has been a carry over.
+	 * If there is, redo the read operation.
+	 */
+	rc = pm8xxx_read_wrapper(rtc_dd, &reg, rtc_dd->rtc_read_base, 1);
+	if (rc < 0) {
+		dev_err(dev, "PM8xxx read failed\n");
+		return rc;
+	}
+
+	if (unlikely(reg < value[0])) {
+		rc = pm8xxx_read_wrapper(rtc_dd, value,
+				rtc_dd->rtc_read_base, NUM_8_BIT_RTC_REGS);
+		if (rc < 0) {
+			dev_err(dev, "RTC time read failed\n");
+			return rc;
+		}
+	}
+
+	secs = value[0] | (value[1] << 8) | (value[2] << 16) \
+						| (value[3] << 24);
+
+	rtc_time_to_tm(secs, tm);
+
+	rc = rtc_valid_tm(tm);
+	if (rc < 0) {
+		dev_err(dev, "Invalid time read from PM8xxx\n");
+		return rc;
+	}
+
+	dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+			secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+			tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+	return 0;
+}
+
+static int
+pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4], ctrl_reg;
+	unsigned long secs, secs_rtc, irq_flags;
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+	struct rtc_time rtc_tm;
+
+	rtc_tm_to_time(&alarm->time, &secs);
+
+	/*
+	 * Read the current RTC time and verify if the alarm time is in the
+	 * past. If yes, return invalid.
+	 */
+	rc = pm8xxx_rtc_read_time(dev, &rtc_tm);
+	if (rc < 0) {
+		dev_err(dev, "Unamble to read RTC time\n");
+		return -EINVAL;
+	}
+
+	rtc_tm_to_time(&rtc_tm, &secs_rtc);
+	if (secs < secs_rtc) {
+		dev_err(dev, "Trying to set alarm in the past\n");
+		return -EINVAL;
+	}
+
+	value[0] = secs & 0xFF;
+	value[1] = (secs >> 8) & 0xFF;
+	value[2] = (secs >> 16) & 0xFF;
+	value[3] = (secs >> 24) & 0xFF;
+
+	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+	rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
+							NUM_8_BIT_RTC_REGS);
+	if (rc < 0) {
+		dev_err(dev, "Write to RTC ALARM registers failed\n");
+		goto rtc_rw_fail;
+	}
+
+	ctrl_reg = rtc_dd->ctrl_reg;
+	ctrl_reg = (alarm->enabled) ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
+					(ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
+
+	rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+	if (rc < 0) {
+		dev_err(dev, "PM8xxx write failed\n");
+		goto rtc_rw_fail;
+	}
+
+	rtc_dd->ctrl_reg = ctrl_reg;
+
+	dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+			alarm->time.tm_hour, alarm->time.tm_min,
+			alarm->time.tm_sec, alarm->time.tm_mday,
+			alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+	spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+	return rc;
+}
+
+static int
+pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+	int rc;
+	u8 value[4];
+	unsigned long secs;
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
+			NUM_8_BIT_RTC_REGS);
+	if (rc < 0) {
+		dev_err(dev, "RTC alarm time read failed\n");
+		return rc;
+	}
+
+	secs = value[0] | (value[1] << 8) | (value[2] << 16) | \
+						 (value[3] << 24);
+
+	rtc_time_to_tm(secs, &alarm->time);
+
+	rc = rtc_valid_tm(&alarm->time);
+	if (rc < 0) {
+		dev_err(dev, "Invalid time read from PM8xxx\n");
+		return rc;
+	}
+
+	dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+		alarm->time.tm_hour, alarm->time.tm_min,
+				alarm->time.tm_sec, alarm->time.tm_mday,
+				alarm->time.tm_mon, alarm->time.tm_year);
+
+	return 0;
+}
+
+
+static int
+pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	int rc;
+	unsigned long irq_flags;
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+	u8 ctrl_reg;
+
+	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+	ctrl_reg = rtc_dd->ctrl_reg;
+	ctrl_reg = (enabled) ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
+				(ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
+
+	rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+	if (rc < 0) {
+		dev_err(dev, "PM8xxx write failed\n");
+		goto rtc_rw_fail;
+	}
+
+	rtc_dd->ctrl_reg = ctrl_reg;
+
+rtc_rw_fail:
+	spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+	return rc;
+}
+
+static struct rtc_class_ops pm8xxx_rtc_ops = {
+	.read_time	= pm8xxx_rtc_read_time,
+	.set_alarm	= pm8xxx_rtc_set_alarm,
+	.read_alarm	= pm8xxx_rtc_read_alarm,
+	.alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
+{
+	struct pm8xxx_rtc *rtc_dd = dev_id;
+	u8 ctrl_reg;
+	int rc;
+	unsigned long irq_flags;
+
+	rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+	/* Clear the alarm enable bit */
+	ctrl_reg = rtc_dd->ctrl_reg;
+	ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+
+	rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+	if (rc < 0) {
+		spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+		dev_err(rtc_dd->rtc_dev, "PM8xxx write failed!\n");
+		goto rtc_alarm_handled;
+	}
+
+	rtc_dd->ctrl_reg = ctrl_reg;
+	spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+	/* Clear RTC alarm register */
+	rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
+						PM8XXX_ALARM_CTRL_OFFSET, 1);
+	if (rc < 0) {
+		dev_err(rtc_dd->rtc_dev, "PM8xxx write failed!\n");
+		goto rtc_alarm_handled;
+	}
+
+	ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR;
+	rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
+						PM8XXX_ALARM_CTRL_OFFSET, 1);
+	if (rc < 0)
+		dev_err(rtc_dd->rtc_dev, "PM8xxx write failed!\n");
+
+rtc_alarm_handled:
+	return IRQ_HANDLED;
+}
+
+static int __devinit pm8xxx_rtc_probe(struct platform_device *pdev)
+{
+	int rc;
+	u8 ctrl_reg;
+	bool rtc_write_enable = false;
+	struct pm8xxx_rtc *rtc_dd;
+	struct resource *rtc_resource;
+	const struct pm8xxx_rtc_platform_data *pdata =
+		pdev->dev.platform_data;
+
+	if (pdata != NULL)
+		rtc_write_enable = pdata->rtc_write_enable;
+
+	rtc_dd = kzalloc(sizeof(*rtc_dd), GFP_KERNEL);
+	if (rtc_dd == NULL) {
+		dev_err(&pdev->dev, "Unable to allocate memory!\n");
+		return -ENOMEM;
+	}
+
+	/* Initialise spinlock to protect RTC cntrol register */
+	spin_lock_init(&rtc_dd->ctrl_reg_lock);
+
+	rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0);
+	if (rtc_dd->rtc_alarm_irq < 0) {
+		dev_err(&pdev->dev, "Alarm IRQ resource absent!\n");
+		rc = -ENXIO;
+		goto fail_rtc_enable;
+	}
+
+	rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
+							"pmic_rtc_base");
+	if (!(rtc_resource && rtc_resource->start)) {
+		dev_err(&pdev->dev, "RTC IO resource absent!\n");
+		rc = -ENXIO;
+		goto fail_rtc_enable;
+	}
+
+	rtc_dd->rtc_base = rtc_resource->start;
+
+	/* Setup RTC register addresses */
+	rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
+	rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
+	rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
+
+	rtc_dd->rtc_dev = &(pdev->dev);
+
+	/* Check if the RTC is on, else turn it on */
+	rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "PM8xxx read failed!\n");
+		goto fail_rtc_enable;
+	}
+
+	if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
+		ctrl_reg |= PM8xxx_RTC_ENABLE;
+		rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+									1);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "PM8xxx write failed!\n");
+			goto fail_rtc_enable;
+		}
+	}
+
+	rtc_dd->ctrl_reg = ctrl_reg;
+	if (rtc_write_enable == true)
+		pm8xxx_rtc_ops.set_time = pm8xxx_rtc_set_time;
+
+	platform_set_drvdata(pdev, rtc_dd);
+
+	/* Register the RTC device */
+	rtc_dd->rtc = rtc_device_register("pm8xxx_rtc", &pdev->dev,
+				&pm8xxx_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc_dd->rtc)) {
+		dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
+					__func__, PTR_ERR(rtc_dd->rtc));
+		rc = PTR_ERR(rtc_dd->rtc);
+		goto fail_rtc_enable;
+	}
+
+	/* Request the alarm IRQ */
+	rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+				 pm8xxx_alarm_trigger, IRQF_TRIGGER_RISING,
+				 "pm8xxx_rtc_alarm", rtc_dd);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc);
+		goto fail_req_irq;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	dev_dbg(&pdev->dev, "Probe success !!\n");
+
+	return 0;
+
+fail_req_irq:
+	rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+	platform_set_drvdata(pdev, NULL);
+	kfree(rtc_dd);
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static int pm8xxx_rtc_resume(struct device *dev)
+{
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+	return 0;
+}
+
+static int pm8xxx_rtc_suspend(struct device *dev)
+{
+	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pm8xxx_rtc_pm_ops = {
+	.suspend = pm8xxx_rtc_suspend,
+	.resume = pm8xxx_rtc_resume,
+};
+#endif
+static int __devexit pm8xxx_rtc_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+	rtc_device_unregister(rtc_dd->rtc);
+	platform_set_drvdata(pdev, NULL);
+	kfree(rtc_dd);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_rtc_driver = {
+	.probe		= pm8xxx_rtc_probe,
+	.remove		= __devexit_p(pm8xxx_rtc_remove),
+	.driver	= {
+		.name	= PM8XXX_RTC_DEV_NAME,
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &pm8xxx_rtc_pm_ops,
+#endif
+	},
+};
+
+static int __init pm8xxx_rtc_init(void)
+{
+	return platform_driver_register(&pm8xxx_rtc_driver);
+}
+module_init(pm8xxx_rtc_init);
+
+static void __exit pm8xxx_rtc_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_rtc_driver);
+}
+module_exit(pm8xxx_rtc_exit);
+
+MODULE_ALIAS("platform:rtc-pm8xxx");
+MODULE_DESCRIPTION("PMIC8xxx RTC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anirudh Ghayal <aghayal@codeaurora.org>");
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 0000000..a6a068d
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,19 @@
+#
+# SLIMBUS driver configuration
+#
+menuconfig SLIMBUS
+	bool "Slimbus support"
+	depends on HAS_IOMEM
+	help
+	  Slimbus is standard interface between baseband and
+	  application processors and peripheral components in mobile
+	  terminals.
+
+if SLIMBUS
+config SLIMBUS_MSM_CTRL
+	tristate "Qualcomm Slimbus Master Component"
+	default n
+	help
+	  Select driver for Qualcomm's Slimbus Master Component.
+
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 0000000..436822d
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for kernel slimbus framework.
+#
+obj-$(CONFIG_SLIMBUS)			+= slimbus.o
+obj-$(CONFIG_SLIMBUS_MSM_CTRL)		+= slim-msm-ctrl.o
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
new file mode 100644
index 0000000..decc77f
--- /dev/null
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -0,0 +1,1769 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slimbus/slimbus.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/clk.h>
+#include <mach/sps.h>
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_RX_MSGQ_BUF_LEN	40
+
+#define SLIM_USR_MC_GENERIC_ACK		0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY	0x0
+#define SLIM_USR_MC_REPORT_SATELLITE	0x1
+#define SLIM_USR_MC_ADDR_QUERY		0xD
+#define SLIM_USR_MC_ADDR_REPLY		0xE
+#define SLIM_USR_MC_DEFINE_CHAN		0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN	0x21
+#define SLIM_USR_MC_CHAN_CTRL		0x23
+#define SLIM_USR_MC_RECONFIG_NOW	0x24
+#define SLIM_USR_MC_REQ_BW		0x28
+#define SLIM_USR_MC_CONNECT_SRC		0x2C
+#define SLIM_USR_MC_CONNECT_SINK	0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT	0x2E
+
+/* MSM Slimbus peripheral settings */
+#define MSM_SLIM_PERF_SUMM_THRESHOLD	0x8000
+#define MSM_SLIM_NCHANS			32
+#define MSM_SLIM_NPORTS			24
+
+/*
+ * Need enough descriptors to receive present messages from slaves
+ * if received simultaneously. Present message needs 3 descriptors
+ * and this size will ensure around 10 simultaneous reports.
+ */
+#define MSM_SLIM_DESC_NUM		32
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define MSM_SLIM_NAME	"msm_slim_ctrl"
+#define SLIM_ROOT_FREQ 24576000
+
+#define MSM_CONCUR_MSG	8
+#define SAT_CONCUR_MSG	8
+#define DEF_WATERMARK	(8 << 1)
+#define DEF_ALIGN	0
+#define DEF_PACK	(1 << 6)
+#define ENABLE_PORT	1
+
+#define DEF_BLKSZ	0
+#define DEF_TRANSZ	0
+
+#define SAT_MAGIC_LSB	0xD9
+#define SAT_MAGIC_MSB	0xC5
+#define SAT_MSG_VER	0x1
+#define SAT_MSG_PROT	0x1
+#define MSM_SAT_SUCCSS	0x20
+
+#define QC_MFGID_LSB	0x2
+#define QC_MFGID_MSB	0x17
+#define QC_CHIPID_SL	0x10
+#define QC_DEVID_SAT1	0x3
+#define QC_DEVID_SAT2	0x4
+#define QC_DEVID_PGD	0x5
+
+/* Component registers */
+enum comp_reg {
+	COMP_CFG	= 0,
+	COMP_TRUST_CFG	= 0x14,
+};
+
+/* Manager registers */
+enum mgr_reg {
+	MGR_CFG		= 0x200,
+	MGR_STATUS	= 0x204,
+	MGR_RX_MSGQ_CFG	= 0x208,
+	MGR_INT_EN	= 0x210,
+	MGR_INT_STAT	= 0x214,
+	MGR_INT_CLR	= 0x218,
+	MGR_TX_MSG	= 0x230,
+	MGR_RX_MSG	= 0x270,
+	MGR_VE_STAT	= 0x300,
+};
+
+enum msg_cfg {
+	MGR_CFG_ENABLE		= 1,
+	MGR_CFG_RX_MSGQ_EN	= 1 << 1,
+	MGR_CFG_TX_MSGQ_EN_HIGH	= 1 << 2,
+	MGR_CFG_TX_MSGQ_EN_LOW	= 1 << 3,
+};
+/* Message queue types */
+enum msm_slim_msgq_type {
+	MSGQ_RX		= 0,
+	MSGQ_TX_LOW	= 1,
+	MSGQ_TX_HIGH	= 2,
+};
+/* Framer registers */
+enum frm_reg {
+	FRM_CFG		= 0x400,
+	FRM_STAT	= 0x404,
+	FRM_INT_EN	= 0x410,
+	FRM_INT_STAT	= 0x414,
+	FRM_INT_CLR	= 0x418,
+	FRM_WAKEUP	= 0x41C,
+	FRM_CLKCTL_DONE	= 0x420,
+	FRM_IE_STAT	= 0x430,
+	FRM_VE_STAT	= 0x440,
+};
+
+/* Interface registers */
+enum intf_reg {
+	INTF_CFG	= 0x600,
+	INTF_STAT	= 0x604,
+	INTF_INT_EN	= 0x610,
+	INTF_INT_STAT	= 0x614,
+	INTF_INT_CLR	= 0x618,
+	INTF_IE_STAT	= 0x630,
+	INTF_VE_STAT	= 0x640,
+};
+
+/* Manager PGD registers */
+enum pgd_reg {
+	PGD_CFG			= 0x1000,
+	PGD_STAT		= 0x1004,
+	PGD_INT_EN		= 0x1010,
+	PGD_INT_STAT		= 0x1014,
+	PGD_INT_CLR		= 0x1018,
+	PGD_OWN_EEn		= 0x1020,
+	PGD_PORT_INT_EN_EEn	= 0x1030,
+	PGD_PORT_INT_ST_EEn	= 0x1034,
+	PGD_PORT_INT_CL_EEn	= 0x1038,
+	PGD_PORT_CFGn		= 0x1080,
+	PGD_PORT_STATn		= 0x1084,
+	PGD_PORT_PARAMn		= 0x1088,
+	PGD_PORT_BLKn		= 0x108C,
+	PGD_PORT_TRANn		= 0x1090,
+	PGD_PORT_MCHANn		= 0x1094,
+	PGD_PORT_PSHPLLn	= 0x1098,
+	PGD_PORT_PC_CFGn	= 0x1600,
+	PGD_PORT_PC_VALn	= 0x1604,
+	PGD_PORT_PC_VFR_TSn	= 0x1608,
+	PGD_PORT_PC_VFR_STn	= 0x160C,
+	PGD_PORT_PC_VFR_CLn	= 0x1610,
+	PGD_IE_STAT		= 0x1700,
+	PGD_VE_STAT		= 0x1710,
+};
+
+enum rsc_grp {
+	EE_MGR_RSC_GRP	= 1 << 10,
+	EE_NGD_2	= 2 << 6,
+	EE_NGD_1	= 0,
+};
+
+enum mgr_intr {
+	MGR_INT_RECFG_DONE	= 1 << 24,
+	MGR_INT_TX_NACKED_2	= 1 << 25,
+	MGR_INT_MSG_BUF_CONTE	= 1 << 26,
+	MGR_INT_RX_MSG_RCVD	= 1 << 30,
+	MGR_INT_TX_MSG_SENT	= 1 << 31,
+};
+
+enum frm_cfg {
+	FRM_ACTIVE	= 1,
+	CLK_GEAR	= 7,
+	ROOT_FREQ	= 11,
+	REF_CLK_GEAR	= 15,
+};
+
+struct msm_slim_sps_bam {
+	u32			hdl;
+	void __iomem		*base;
+	int			irq;
+};
+
+struct msm_slim_endp {
+	struct sps_pipe			*sps;
+	struct sps_connect		config;
+	struct sps_register_event	event;
+	struct sps_mem_buffer		buf;
+	struct completion		*xcomp;
+	bool				connected;
+};
+
+struct msm_slim_ctrl {
+	struct slim_controller  ctrl;
+	struct slim_framer	framer;
+	struct device		*dev;
+	void __iomem		*base;
+	u32			curr_bw;
+	u8			msg_cnt;
+	u32			tx_buf[10];
+	u8			rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN];
+	spinlock_t		rx_lock;
+	int			head;
+	int			tail;
+	int			irq;
+	int			err;
+	int			ee;
+	struct completion	*wr_comp;
+	struct msm_slim_sat	*satd;
+	struct msm_slim_endp	pipes[7];
+	struct msm_slim_sps_bam	bam;
+	struct msm_slim_endp	rx_msgq;
+	struct completion	rx_msgq_notify;
+	struct task_struct	*rx_msgq_thread;
+	struct clk		*rclk;
+	struct mutex		tx_lock;
+	u8			pgdla;
+	bool			use_rx_msgqs;
+	int			suspended;
+	int			pipe_b;
+	struct completion	reconf;
+	bool			reconf_busy;
+};
+
+struct msm_slim_sat {
+	struct slim_device	satcl;
+	struct msm_slim_ctrl	*dev;
+	struct workqueue_struct *wq;
+	struct work_struct	wd;
+	u8			sat_msgs[SAT_CONCUR_MSG][40];
+	u16			*satch;
+	u8			nsatch;
+	bool			sent_capability;
+	int			shead;
+	int			stail;
+	spinlock_t lock;
+};
+
+static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
+{
+	spin_lock(&dev->rx_lock);
+	if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
+		spin_unlock(&dev->rx_lock);
+		dev_err(dev->dev, "RX QUEUE full!");
+		return -EXFULL;
+	}
+	memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
+	dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
+	spin_unlock(&dev->rx_lock);
+	return 0;
+}
+
+static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dev->rx_lock, flags);
+	if (dev->tail == dev->head) {
+		spin_unlock_irqrestore(&dev->rx_lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
+	dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
+	spin_unlock_irqrestore(&dev->rx_lock, flags);
+	return 0;
+}
+
+static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
+{
+	struct msm_slim_ctrl *dev = sat->dev;
+	spin_lock(&sat->lock);
+	if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
+		spin_unlock(&sat->lock);
+		dev_err(dev->dev, "SAT QUEUE full!");
+		return -EXFULL;
+	}
+	memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
+	sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
+	spin_unlock(&sat->lock);
+	return 0;
+}
+
+static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sat->lock, flags);
+	if (sat->stail == sat->shead) {
+		spin_unlock_irqrestore(&sat->lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, sat->sat_msgs[sat->shead], 40);
+	sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
+	spin_unlock_irqrestore(&sat->lock, flags);
+	return 0;
+}
+
+static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
+{
+	e_addr[0] = (buffer[1] >> 24) & 0xff;
+	e_addr[1] = (buffer[1] >> 16) & 0xff;
+	e_addr[2] = (buffer[1] >> 8) & 0xff;
+	e_addr[3] = buffer[1] & 0xff;
+	e_addr[4] = (buffer[0] >> 24) & 0xff;
+	e_addr[5] = (buffer[0] >> 16) & 0xff;
+}
+
+static bool msm_is_sat_dev(u8 *e_addr)
+{
+	if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
+		e_addr[2] != QC_CHIPID_SL &&
+		(e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
+		return true;
+	return false;
+}
+
+static irqreturn_t msm_slim_interrupt(int irq, void *d)
+{
+	struct msm_slim_ctrl *dev = d;
+	u32 pstat;
+	u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
+
+	if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
+		if (stat & MGR_INT_TX_MSG_SENT)
+			writel_relaxed(MGR_INT_TX_MSG_SENT,
+					dev->base + MGR_INT_CLR);
+		else {
+			writel_relaxed(MGR_INT_TX_NACKED_2,
+					dev->base + MGR_INT_CLR);
+			dev->err = -EIO;
+		}
+		/*
+		 * Guarantee that interrupt clear bit write goes through before
+		 * signalling completion/exiting ISR
+		 */
+		mb();
+		if (dev->wr_comp)
+			complete(dev->wr_comp);
+	}
+	if (stat & MGR_INT_RX_MSG_RCVD) {
+		u32 rx_buf[10];
+		u32 mc, mt;
+		u8 len, i;
+		rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
+		len = rx_buf[0] & 0x1F;
+		for (i = 1; i < ((len + 3) >> 2); i++) {
+			rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
+						(4 * i));
+			dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
+		}
+		mt = (rx_buf[0] >> 5) & 0x7;
+		mc = (rx_buf[0] >> 8) & 0xff;
+		dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+		if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+				mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
+			struct msm_slim_sat *sat = dev->satd;
+			msm_sat_enqueue(sat, rx_buf, len);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD,
+					dev->base + MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through before
+			 * queuing work
+			 */
+			mb();
+			queue_work(sat->wq, &sat->wd);
+		} else if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 e_addr[6];
+			msm_get_eaddr(e_addr, rx_buf);
+			if (msm_is_sat_dev(e_addr)) {
+				/*
+				 * Consider possibility that this device may
+				 * be reporting more than once?
+				 */
+				struct msm_slim_sat *sat = dev->satd;
+				msm_sat_enqueue(sat, rx_buf, len);
+				writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+							MGR_INT_CLR);
+				/*
+				 * Guarantee that CLR bit write goes through
+				 * before queuing work
+				 */
+				mb();
+				queue_work(sat->wq, &sat->wd);
+			} else {
+				msm_slim_rx_enqueue(dev, rx_buf, len);
+				writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+							MGR_INT_CLR);
+				/*
+				 * Guarantee that CLR bit write goes through
+				 * before signalling completion
+				 */
+				mb();
+				complete(&dev->rx_msgq_notify);
+			}
+		} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+				mc == SLIM_MSG_MC_REPLY_VALUE) {
+			msm_slim_rx_enqueue(dev, rx_buf, len);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before signalling completion
+			 */
+			mb();
+			complete(&dev->rx_msgq_notify);
+		} else {
+			dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
+						mc, mt, len);
+			for (i = 0; i < ((len + 3) >> 2); i++)
+				dev_err(dev->dev, "error msg: %x", rx_buf[i]);
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before exiting
+			 */
+			mb();
+		}
+	}
+	if (stat & MGR_INT_RECFG_DONE) {
+		writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
+		/*
+		 * Guarantee that CLR bit write goes through
+		 * before exiting ISR
+		 */
+		mb();
+		complete(&dev->reconf);
+	}
+	pstat = readl_relaxed(dev->base + PGD_PORT_INT_ST_EEn + (16 * dev->ee));
+	if (pstat != 0) {
+		int i = 0;
+		for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) {
+			if (pstat & 1 << i) {
+				u32 val = readl_relaxed(dev->base +
+						PGD_PORT_STATn + (i * 32));
+				if (val & (1 << 19)) {
+					dev->ctrl.ports[i].err =
+						SLIM_P_DISCONNECT;
+					dev->pipes[i-dev->pipe_b].connected =
+							false;
+					/*
+					 * SPS will call completion since
+					 * ERROR flags are registered
+					 */
+				} else if (val & (1 << 2))
+					dev->ctrl.ports[i].err =
+							SLIM_P_OVERFLOW;
+				else if (val & (1 << 3))
+					dev->ctrl.ports[i].err =
+						SLIM_P_UNDERFLOW;
+			}
+			writel_relaxed(1, dev->base + PGD_PORT_INT_CL_EEn +
+				(dev->ee * 16));
+		}
+		/*
+		 * Guarantee that port interrupt bit(s) clearing writes go
+		 * through before exiting ISR
+		 */
+		mb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int
+msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
+{
+	int ret;
+	struct sps_pipe *endpoint;
+	struct sps_connect *config = &ep->config;
+
+	/* Allocate the endpoint */
+	endpoint = sps_alloc_endpoint();
+	if (!endpoint) {
+		dev_err(dev->dev, "sps_alloc_endpoint failed\n");
+		return -ENOMEM;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	ret = sps_get_config(endpoint, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
+		goto sps_config_failed;
+	}
+
+	ep->sps = endpoint;
+	return 0;
+
+sps_config_failed:
+	sps_free_endpoint(endpoint);
+	return ret;
+}
+
+static void
+msm_slim_free_endpoint(struct msm_slim_endp *ep)
+{
+	sps_free_endpoint(ep->sps);
+	ep->sps = NULL;
+}
+
+static int msm_slim_sps_mem_alloc(
+		struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
+{
+	dma_addr_t phys;
+
+	mem->size = len;
+	mem->min_size = 0;
+	mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+
+	if (!mem->base) {
+		dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+		return -ENOMEM;
+	}
+
+	mem->phys_base = phys;
+	memset(mem->base, 0x00, mem->size);
+	return 0;
+}
+
+static void
+msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
+{
+	dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
+	mem->size = 0;
+	mem->base = NULL;
+	mem->phys_base = 0;
+}
+
+static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
+	u32 int_port = readl_relaxed(dev->base + PGD_PORT_INT_EN_EEn +
+					(dev->ee * 16));
+	writel_relaxed(set_cfg, dev->base + PGD_PORT_CFGn + (pn * 32));
+	writel_relaxed(DEF_BLKSZ, dev->base + PGD_PORT_BLKn + (pn * 32));
+	writel_relaxed(DEF_TRANSZ, dev->base + PGD_PORT_TRANn + (pn * 32));
+	writel_relaxed((int_port | 1 << pn) , dev->base + PGD_PORT_INT_EN_EEn +
+			(dev->ee * 16));
+	/* Make sure that port registers are updated before returning */
+	mb();
+}
+
+static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
+{
+	struct msm_slim_endp *endpoint = &dev->pipes[pn];
+	struct sps_connect *cfg = &endpoint->config;
+	u32 stat;
+	int ret = sps_get_config(dev->pipes[pn].sps, cfg);
+	if (ret) {
+		dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
+		return ret;
+	}
+	cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	if (dev->pipes[pn].connected) {
+		ret = sps_set_config(dev->pipes[pn].sps, cfg);
+		if (ret) {
+			dev_err(dev->dev, "sps pipe-port set config erro:%x\n",
+						ret);
+			return ret;
+		}
+	}
+
+	stat = readl_relaxed(dev->base + PGD_PORT_STATn +
+				(32 * (pn + dev->pipe_b)));
+	if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
+		cfg->destination = dev->bam.hdl;
+		cfg->source = SPS_DEV_HANDLE_MEM;
+		cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->src_pipe_index = 0;
+		dev_dbg(dev->dev, "flow src:pipe num:%d",
+					cfg->dest_pipe_index);
+		cfg->mode = SPS_MODE_DEST;
+	} else {
+		cfg->source = dev->bam.hdl;
+		cfg->destination = SPS_DEV_HANDLE_MEM;
+		cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
+		cfg->dest_pipe_index = 0;
+		dev_dbg(dev->dev, "flow dest:pipe num:%d",
+					cfg->src_pipe_index);
+		cfg->mode = SPS_MODE_SRC;
+	}
+	/* Space for desciptor FIFOs */
+	cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec);
+	cfg->config = SPS_CONFIG_DEFAULT;
+	ret = sps_connect(dev->pipes[pn].sps, cfg);
+	if (!ret) {
+		dev->pipes[pn].connected = true;
+		msm_hw_set_port(dev, pn + dev->pipe_b);
+	}
+	return ret;
+}
+
+static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	/*
+	 * Currently we block a transaction until the current one completes.
+	 * In case we need multiple transactions, use message Q
+	 */
+	return dev->tx_buf;
+}
+
+static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len)
+{
+	int i;
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	for (i = 0; i < (len + 3) >> 2; i++) {
+		dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]);
+		writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4));
+	}
+	/* Guarantee that message is sent before returning */
+	mb();
+	return 0;
+}
+
+static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	u32 *pbuf;
+	u8 *puc;
+	int timeout;
+	u8 la = txn->la;
+	mutex_lock(&dev->tx_lock);
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		dev->reconf_busy) {
+			wait_for_completion(&dev->reconf);
+			dev->reconf_busy = false;
+	}
+	if (dev->suspended) {
+		dev_err(dev->dev, "No transaction in suspended state");
+		mutex_unlock(&dev->tx_lock);
+		return -EBUSY;
+	}
+	txn->rl--;
+	pbuf = msm_get_msg_buf(ctrl, txn->rl);
+	dev->wr_comp = NULL;
+	dev->err = 0;
+
+	if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
+		mutex_unlock(&dev->tx_lock);
+		return -EPROTONOSUPPORT;
+	}
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT))
+		la = dev->pgdla;
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
+					0, la);
+	else
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc,
+					1, la);
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
+		puc = ((u8 *)pbuf) + 3;
+	else
+		puc = ((u8 *)pbuf) + 2;
+	if (txn->rbuf)
+		*(puc++) = txn->tid;
+	if ((txn->mt == SLIM_MSG_MT_CORE) &&
+		((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		(txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		 txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8)&0xFF;
+	}
+	if (txn->wbuf)
+		memcpy(puc, txn->wbuf, txn->len);
+	if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		 txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		 txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		if (txn->mc != SLIM_MSG_MC_DISCONNECT_PORT)
+			dev->err = msm_slim_connect_pipe_port(dev, *puc);
+		else {
+			struct msm_slim_endp *endpoint = &dev->pipes[*puc];
+			struct sps_register_event sps_event;
+			memset(&sps_event, 0, sizeof(sps_event));
+			sps_register_event(endpoint->sps, &sps_event);
+			sps_disconnect(endpoint->sps);
+			/*
+			 * Remove channel disconnects master-side ports from
+			 * channel. No need to send that again on the bus
+			 */
+			dev->pipes[*puc].connected = false;
+			mutex_unlock(&dev->tx_lock);
+			return 0;
+		}
+		if (dev->err) {
+			dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
+			mutex_unlock(&dev->tx_lock);
+			return dev->err;
+		}
+		*(puc) = *(puc) + dev->pipe_b;
+	}
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		txn->mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
+		dev->reconf_busy = true;
+	dev->wr_comp = &done;
+	msm_send_msg_buf(ctrl, pbuf, txn->rl);
+	timeout = wait_for_completion_timeout(&done, HZ);
+	if (!timeout)
+		dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+					txn->mt);
+	mutex_unlock(&dev->tx_lock);
+	return timeout ? dev->err : -ETIMEDOUT;
+}
+
+static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
+				u8 elen, u8 laddr)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	DECLARE_COMPLETION_ONSTACK(done);
+	int timeout;
+	u32 *buf;
+	mutex_lock(&dev->tx_lock);
+	buf = msm_get_msg_buf(ctrl, 9);
+	buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
+					SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+					SLIM_MSG_DEST_LOGICALADDR,
+					ea[5] | ea[4] << 8);
+	buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
+	buf[2] = laddr;
+
+	dev->wr_comp = &done;
+	msm_send_msg_buf(ctrl, buf, 9);
+	timeout = wait_for_completion_timeout(&done, HZ);
+	mutex_unlock(&dev->tx_lock);
+	return timeout ? dev->err : -ETIMEDOUT;
+}
+
+static int msm_config_port(struct slim_controller *ctrl, u8 pn)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	struct msm_slim_endp *endpoint;
+	int ret = 0;
+	if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
+		ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
+		return -EPROTONOSUPPORT;
+	if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b))
+		return -ENODEV;
+
+	endpoint = &dev->pipes[pn];
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
+	return ret;
+}
+
+static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
+				u8 pn, u8 **done_buf, u32 *done_len)
+{
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
+	struct sps_iovec sio;
+	int ret;
+	if (done_len)
+		*done_len = 0;
+	if (done_buf)
+		*done_buf = NULL;
+	if (!dev->pipes[pn].connected)
+		return SLIM_P_DISCONNECT;
+	ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
+	if (!ret) {
+		if (done_len)
+			*done_len = sio.size;
+		if (done_buf)
+			*done_buf = (u8 *)sio.addr;
+	}
+	dev_dbg(dev->dev, "get iovec returned %d\n", ret);
+	return SLIM_P_INPROGRESS;
+}
+
+static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
+			u32 len, struct completion *comp)
+{
+	struct sps_register_event sreg;
+	int ret;
+	struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+	if (pn > 7)
+		return -ENODEV;
+
+
+	ctrl->ports[pn].xcomp = comp;
+	sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
+	sreg.mode = SPS_TRIGGER_WAIT;
+	sreg.xfer_done = comp;
+	sreg.callback = NULL;
+	sreg.user = &ctrl->ports[pn];
+	ret = sps_register_event(dev->pipes[pn].sps, &sreg);
+	if (ret) {
+		dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+		return ret;
+	}
+	ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL,
+				SPS_IOVEC_FLAG_INT);
+	dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
+
+	return ret;
+}
+
+static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
+{
+	struct msm_slim_ctrl *dev = sat->dev;
+	enum slim_ch_control oper;
+	int i;
+	int ret = 0;
+	if (mc == SLIM_USR_MC_CHAN_CTRL) {
+		u16 chanh = sat->satch[buf[5]];
+		oper = ((buf[3] & 0xC0) >> 6);
+		/* part of grp. activating/removing 1 will take care of rest */
+		ret = slim_control_ch(&sat->satcl, chanh, oper, false);
+	} else {
+		u16 chh[40];
+		struct slim_ch prop;
+		u32 exp;
+		u8 coeff, cc;
+		u8 prrate = buf[6];
+		for (i = 8; i < len; i++)
+			chh[i-8] = sat->satch[buf[i]];
+		prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
+		prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
+		prop.baser = SLIM_RATE_4000HZ;
+		if (prrate & 0x8)
+			prop.baser = SLIM_RATE_11025HZ;
+		else
+			prop.baser = SLIM_RATE_4000HZ;
+		prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
+		prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
+		exp = (u32)((buf[5] & 0xF0) >> 4);
+		coeff = (buf[4] & 0x20) >> 5;
+		cc = (coeff ? 3 : 1);
+		prop.ratem = cc * (1 << exp);
+		if (i > 9)
+			ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
+						true, &sat->satch[buf[8]]);
+		else
+			ret = slim_define_ch(&sat->satcl, &prop,
+						&sat->satch[buf[8]], 1, false,
+						NULL);
+		dev_dbg(dev->dev, "define sat grp returned:%d", ret);
+
+		/* part of group so activating 1 will take care of rest */
+		if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
+			ret = slim_control_ch(&sat->satcl,
+					sat->satch[buf[8]],
+					SLIM_CH_ACTIVATE, false);
+	}
+	return ret;
+}
+
+static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
+{
+	u8 buf[40];
+	u8 mc, mt, len;
+	int i, ret;
+	if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
+		len = buf[0] & 0x1F;
+		mt = (buf[0] >> 5) & 0x7;
+		mc = buf[1];
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 laddr;
+			u8 e_addr[6];
+			for (i = 0; i < 6; i++)
+				e_addr[i] = buf[7-i];
+
+			ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
+			/* Is this Qualcomm ported generic device? */
+			if (!ret && e_addr[5] == QC_MFGID_LSB &&
+				e_addr[4] == QC_MFGID_MSB &&
+				e_addr[1] == QC_DEVID_PGD &&
+				e_addr[2] != QC_CHIPID_SL)
+				dev->pgdla = laddr;
+
+		} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+				mc == SLIM_MSG_MC_REPLY_VALUE) {
+			u8 tid = buf[3];
+			dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
+			slim_msg_response(&dev->ctrl, &buf[4], tid,
+						len - 4);
+		} else {
+			dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
+					mc, mt);
+			for (i = 0; i < len; i++)
+				dev_err(dev->dev, "error msg: %x", buf[i]);
+
+		}
+	} else
+		dev_err(dev->dev, "rxwq called and no dequeue");
+}
+
+static void slim_sat_rxprocess(struct work_struct *work)
+{
+	struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
+	struct msm_slim_ctrl *dev = sat->dev;
+	u8 buf[40];
+
+	while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
+		struct slim_msg_txn txn;
+		int i;
+		u8 len, mc, mt;
+		u32 bw_sl;
+		int ret = 0;
+		bool gen_ack = false;
+		u8 tid;
+		u8 wbuf[8];
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+		txn.ec = 0;
+		txn.rbuf = NULL;
+		txn.la = sat->satcl.laddr;
+		/* satellite handling */
+		len = buf[0] & 0x1F;
+		mc = buf[1];
+		mt = (buf[0] >> 5) & 0x7;
+
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			u8 laddr;
+			u8 e_addr[6];
+			for (i = 0; i < 6; i++)
+				e_addr[i] = buf[7-i];
+
+			slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr);
+			sat->satcl.laddr = laddr;
+		}
+		switch (mc) {
+		case SLIM_MSG_MC_REPORT_PRESENT:
+			/* send a Manager capability msg */
+			if (sat->sent_capability)
+				continue;
+			ret = slim_add_device(&dev->ctrl, &sat->satcl);
+			if (ret) {
+				dev_err(dev->dev,
+					"Satellite-init failed");
+				continue;
+			}
+			/* Satellite owns first 21 channels */
+			sat->satch = kzalloc(21 * sizeof(u16), GFP_KERNEL);
+			sat->nsatch = 20;
+			/* alloc all sat chans */
+			for (i = 0; i < 21; i++)
+				slim_alloc_ch(&sat->satcl, &sat->satch[i]);
+			txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
+			txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+			txn.la = sat->satcl.laddr;
+			txn.rl = 8;
+			wbuf[0] = SAT_MAGIC_LSB;
+			wbuf[1] = SAT_MAGIC_MSB;
+			wbuf[2] = SAT_MSG_VER;
+			wbuf[3] = SAT_MSG_PROT;
+			txn.wbuf = wbuf;
+			txn.len = 4;
+			sat->sent_capability = true;
+			msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_USR_MC_ADDR_QUERY:
+			memcpy(&wbuf[1], &buf[4], 6);
+			ret = slim_get_logical_addr(&sat->satcl,
+					&wbuf[1], 6, &wbuf[7]);
+			if (ret)
+				memset(&wbuf[1], 0, 6);
+			wbuf[0] = buf[3];
+			txn.mc = SLIM_USR_MC_ADDR_REPLY;
+			txn.rl = 12;
+			txn.len = 8;
+			txn.wbuf = wbuf;
+			msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_USR_MC_DEFINE_CHAN:
+		case SLIM_USR_MC_DEF_ACT_CHAN:
+		case SLIM_USR_MC_CHAN_CTRL:
+			if (mc != SLIM_USR_MC_CHAN_CTRL)
+				tid = buf[7];
+			else
+				tid = buf[4];
+			gen_ack = true;
+			ret = msm_sat_define_ch(sat, buf, len, mc);
+			if (ret) {
+				dev_err(dev->dev,
+					"SAT define_ch returned:%d",
+					ret);
+			}
+			break;
+		case SLIM_USR_MC_RECONFIG_NOW:
+			tid = buf[3];
+			gen_ack = true;
+			ret = slim_reconfigure_now(&sat->satcl);
+			break;
+		case SLIM_USR_MC_REQ_BW:
+			/* what we get is in SLOTS */
+			bw_sl = (u32)buf[4] << 3 |
+						((buf[3] & 0xE0) >> 5);
+			sat->satcl.pending_msgsl = bw_sl;
+			tid = buf[5];
+			gen_ack = true;
+			break;
+		case SLIM_USR_MC_CONNECT_SRC:
+		case SLIM_USR_MC_CONNECT_SINK:
+			if (mc == SLIM_USR_MC_CONNECT_SRC)
+				txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
+			else
+				txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+			wbuf[0] = buf[4] & 0x1F;
+			wbuf[1] = buf[5];
+			tid = buf[6];
+			txn.la = buf[3];
+			txn.mt = SLIM_MSG_MT_CORE;
+			txn.rl = 6;
+			txn.len = 2;
+			txn.wbuf = wbuf;
+			gen_ack = true;
+			ret = msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_USR_MC_DISCONNECT_PORT:
+			txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
+			wbuf[0] = buf[4] & 0x1F;
+			tid = buf[5];
+			txn.la = buf[3];
+			txn.rl = 5;
+			txn.len = 1;
+			txn.mt = SLIM_MSG_MT_CORE;
+			txn.wbuf = wbuf;
+			gen_ack = true;
+			ret = msm_xfer_msg(&dev->ctrl, &txn);
+		default:
+			break;
+		}
+		if (!gen_ack)
+			continue;
+		wbuf[0] = tid;
+		if (!ret)
+			wbuf[1] = MSM_SAT_SUCCSS;
+		else
+			wbuf[1] = 0;
+		txn.mc = SLIM_USR_MC_GENERIC_ACK;
+		txn.la = sat->satcl.laddr;
+		txn.rl = 6;
+		txn.len = 2;
+		txn.wbuf = wbuf;
+		txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+		msm_xfer_msg(&dev->ctrl, &txn);
+	}
+}
+
+static void
+msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
+{
+	u32 *buf = ev->data.transfer.user;
+	struct sps_iovec *iovec = &ev->data.transfer.iovec;
+
+	/*
+	 * Note the virtual address needs to be offset by the same index
+	 * as the physical address or just pass in the actual virtual address
+	 * if the sps_mem_buffer is not needed.  Note that if completion is
+	 * used, the virtual address won't be available and will need to be
+	 * calculated based on the offset of the physical address
+	 */
+	if (ev->event_id == SPS_EVENT_DESC_DONE) {
+
+		pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
+
+		pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
+			iovec->addr, iovec->size, iovec->flags);
+
+	} else {
+		dev_err(dev->dev, "%s: unknown event %d\n",
+					__func__, ev->event_id);
+	}
+}
+
+static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
+	msm_slim_rx_msgq_event(dev, notify);
+}
+
+/* Queue up Rx message buffer */
+static inline int
+msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
+{
+	int ret;
+	u32 flags = SPS_IOVEC_FLAG_INT;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+
+	/* Rx message queue buffers are 4 bytes in length */
+	u8 *virt_addr = mem->base + (4 * ix);
+	u32 phys_addr = mem->phys_base + (4 * ix);
+
+	pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr);
+
+	ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
+	if (ret)
+		dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
+
+	return ret;
+}
+
+static inline int
+msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
+{
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct sps_pipe *pipe = endpoint->sps;
+	struct sps_iovec iovec;
+	int index;
+	int ret;
+
+	ret = sps_get_iovec(pipe, &iovec);
+	if (ret) {
+		dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
+		goto err_exit;
+	}
+
+	pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
+		iovec.addr, iovec.size, iovec.flags);
+	BUG_ON(iovec.addr < mem->phys_base);
+	BUG_ON(iovec.addr >= mem->phys_base + mem->size);
+
+	/* Calculate buffer index */
+	index = (iovec.addr - mem->phys_base) / 4;
+	*(data + offset) = *((u32 *)mem->base + index);
+
+	pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
+
+	/* Add buffer back to the queue */
+	(void)msm_slim_post_rx_msgq(dev, index);
+
+err_exit:
+	return ret;
+}
+
+static int msm_slim_rx_msgq_thread(void *data)
+{
+	struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
+	struct completion *notify = &dev->rx_msgq_notify;
+	struct msm_slim_sat *sat = NULL;
+	u32 mc = 0;
+	u32 mt = 0;
+	u32 buffer[10];
+	int index = 0;
+	u8 msg_len = 0;
+	int ret;
+
+	dev_dbg(dev->dev, "rx thread started");
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		ret = wait_for_completion_interruptible(notify);
+
+		if (ret)
+			dev_err(dev->dev, "rx thread wait error:%d", ret);
+
+		/* 1 irq notification per message */
+		if (!dev->use_rx_msgqs) {
+			msm_slim_rxwq(dev);
+			continue;
+		}
+
+		ret = msm_slim_rx_msgq_get(dev, buffer, index);
+		if (ret) {
+			dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
+			continue;
+		}
+
+		pr_debug("message[%d] = 0x%x\n", index, *buffer);
+
+		/* Decide if we use generic RX or satellite RX */
+		if (index++ == 0) {
+			msg_len = *buffer & 0x1F;
+			pr_debug("Start of new message, len = %d\n", msg_len);
+			mt = (buffer[0] >> 5) & 0x7;
+			mc = (buffer[0] >> 8) & 0xff;
+			dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
+			if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
+				mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+				sat = dev->satd;
+
+		} else if ((index * 4) >= msg_len) {
+			index = 0;
+			if (mt == SLIM_MSG_MT_CORE &&
+				mc == SLIM_MSG_MC_REPORT_PRESENT) {
+				u8 e_addr[6];
+				msm_get_eaddr(e_addr, buffer);
+				if (msm_is_sat_dev(e_addr))
+					sat = dev->satd;
+			}
+			if (sat) {
+				msm_sat_enqueue(sat, buffer, msg_len);
+				queue_work(sat->wq, &sat->wd);
+				sat = NULL;
+			} else {
+				msm_slim_rx_enqueue(dev, buffer, msg_len);
+				msm_slim_rxwq(dev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev)
+{
+	int i, ret;
+	u32 pipe_offset;
+	struct msm_slim_endp *endpoint = &dev->rx_msgq;
+	struct sps_connect *config = &endpoint->config;
+	struct sps_mem_buffer *descr = &config->desc;
+	struct sps_mem_buffer *mem = &endpoint->buf;
+	struct completion *notify = &dev->rx_msgq_notify;
+
+	struct sps_register_event sps_error_event; /* SPS_ERROR */
+	struct sps_register_event sps_descr_event; /* DESCR_DONE */
+
+	/* Allocate the endpoint */
+	ret = msm_slim_init_endpoint(dev, endpoint);
+	if (ret) {
+		dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
+		goto sps_init_endpoint_failed;
+	}
+
+	/* Get the pipe indices for the message queues */
+	pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2;
+	dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
+
+	config->mode = SPS_MODE_SRC;
+	config->source = dev->bam.hdl;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->src_pipe_index = pipe_offset;
+	config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* Allocate memory for the FIFO descriptors */
+	ret = msm_slim_sps_mem_alloc(dev, descr,
+				MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
+	if (ret) {
+		dev_err(dev->dev, "unable to allocate SPS descriptors\n");
+		goto alloc_descr_failed;
+	}
+
+	ret = sps_connect(endpoint->sps, config);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
+		goto sps_connect_failed;
+	}
+
+	/* Register completion for DESC_DONE */
+	init_completion(notify);
+	memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
+
+	sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
+	sps_descr_event.options = SPS_O_DESC_DONE;
+	sps_descr_event.user = (void *)dev;
+	sps_descr_event.xfer_done = notify;
+
+	ret = sps_register_event(endpoint->sps, &sps_descr_event);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+		goto sps_reg_event_failed;
+	}
+
+	/* Register callback for errors */
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_error_event.mode = SPS_TRIGGER_CALLBACK;
+	sps_error_event.options = SPS_O_ERROR;
+	sps_error_event.user = (void *)dev;
+	sps_error_event.callback = msm_slim_rx_msgq_cb;
+
+	ret = sps_register_event(endpoint->sps, &sps_error_event);
+	if (ret) {
+		dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
+		goto sps_reg_event_failed;
+	}
+
+	/* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
+	ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
+	if (ret) {
+		dev_err(dev->dev, "dma_alloc_coherent failed\n");
+		goto alloc_buffer_failed;
+	}
+
+	/*
+	 * Call transfer_one for each 4-byte buffer
+	 * Use (buf->size/4) - 1 for the number of buffer to post
+	 */
+
+	/* Setup the transfer */
+	for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
+		ret = msm_slim_post_rx_msgq(dev, i);
+		if (ret) {
+			dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret);
+			goto sps_transfer_failed;
+		}
+	}
+
+	/* Fire up the Rx message queue thread */
+	dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
+					MSM_SLIM_NAME "_rx_msgq_thread");
+	if (!dev->rx_msgq_thread) {
+		dev_err(dev->dev, "Failed to start Rx message queue thread\n");
+		ret = -EIO;
+	} else
+		return 0;
+
+sps_transfer_failed:
+	msm_slim_sps_mem_free(dev, mem);
+alloc_buffer_failed:
+	memset(&sps_error_event, 0x00, sizeof(sps_error_event));
+	sps_register_event(endpoint->sps, &sps_error_event);
+sps_reg_event_failed:
+	sps_disconnect(endpoint->sps);
+sps_connect_failed:
+	msm_slim_sps_mem_free(dev, descr);
+alloc_descr_failed:
+	msm_slim_free_endpoint(endpoint);
+sps_init_endpoint_failed:
+	return ret;
+}
+
+/* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
+static int __devinit
+msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem)
+{
+	int i, ret;
+	u32 bam_handle;
+	struct sps_bam_props bam_props = {0};
+
+	static struct sps_bam_sec_config_props sec_props = {
+		.ees = {
+			[0] = {		/* LPASS */
+				.vmid = 0,
+				.pipe_mask = 0xFFFF98,
+			},
+			[1] = {		/* Krait Apps */
+				.vmid = 1,
+				.pipe_mask = 0x3F000007,
+			},
+			[2] = {		/* Modem */
+				.vmid = 2,
+				.pipe_mask = 0x00000060,
+			},
+		},
+	};
+
+	bam_props.ee = dev->ee;
+	bam_props.virt_addr = dev->bam.base;
+	bam_props.phys_addr = bam_mem->start;
+	bam_props.irq = dev->bam.irq;
+	bam_props.manage = SPS_BAM_MGR_LOCAL;
+	bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
+
+	bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
+	bam_props.p_sec_config_props = &sec_props;
+
+	bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
+				SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
+
+	/* First 7 bits are for message Qs */
+	for (i = 7; i < 32; i++) {
+		/* Check what pipes are owned by Apps. */
+		if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
+			break;
+	}
+	dev->pipe_b = i - 7;
+
+	/* Register the BAM device with the SPS driver */
+	ret = sps_register_bam_device(&bam_props, &bam_handle);
+	if (ret) {
+		dev_err(dev->dev, "sps_register_bam_device failed 0x%x\n", ret);
+		return ret;
+	}
+	dev->bam.hdl = bam_handle;
+	dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
+
+	ret = msm_slim_init_rx_msgq(dev);
+	if (ret) {
+		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
+		goto rx_msgq_init_failed;
+	}
+
+	return 0;
+rx_msgq_init_failed:
+	sps_deregister_bam_device(bam_handle);
+	dev->bam.hdl = 0L;
+	return ret;
+}
+
+static void msm_slim_sps_exit(struct msm_slim_ctrl *dev)
+{
+	if (dev->use_rx_msgqs) {
+		struct msm_slim_endp *endpoint = &dev->rx_msgq;
+		struct sps_connect *config = &endpoint->config;
+		struct sps_mem_buffer *descr = &config->desc;
+		struct sps_mem_buffer *mem = &endpoint->buf;
+		struct sps_register_event sps_event;
+		memset(&sps_event, 0x00, sizeof(sps_event));
+		msm_slim_sps_mem_free(dev, mem);
+		sps_register_event(endpoint->sps, &sps_event);
+		sps_disconnect(endpoint->sps);
+		msm_slim_sps_mem_free(dev, descr);
+		msm_slim_free_endpoint(endpoint);
+	}
+	sps_deregister_bam_device(dev->bam.hdl);
+}
+
+static int __devinit msm_slim_probe(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev;
+	int ret;
+	struct resource		*bam_mem, *bam_io;
+	struct resource		*slim_mem, *slim_io;
+	struct resource		*irq, *bam_irq;
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	if (!slim_mem) {
+		dev_err(&pdev->dev, "no slimbus physical memory resource\n");
+		return -ENODEV;
+	}
+	slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
+					pdev->name);
+	if (!slim_io) {
+		dev_err(&pdev->dev, "slimbus memory already claimed\n");
+		return -EBUSY;
+	}
+
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	if (!bam_mem) {
+		dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
+		ret = -ENODEV;
+		goto err_get_res_bam_failed;
+	}
+	bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
+					pdev->name);
+	if (!bam_io) {
+		release_mem_region(slim_mem->start, resource_size(slim_mem));
+		dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
+		ret = -EBUSY;
+		goto err_get_res_bam_failed;
+	}
+	irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_irq");
+	if (!irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+		ret = -ENODEV;
+		goto err_get_res_failed;
+	}
+	bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						"slimbus_bam_irq");
+	if (!bam_irq) {
+		dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
+		ret = -ENODEV;
+		goto err_get_res_failed;
+	}
+
+	dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
+		ret = -ENOMEM;
+		goto err_get_res_failed;
+	}
+	dev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
+	slim_set_ctrldata(&dev->ctrl, dev);
+	dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
+	if (!dev->base) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_failed;
+	}
+	dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
+	if (!dev->bam.base) {
+		dev_err(&pdev->dev, "BAM IOremap failed\n");
+		ret = -ENOMEM;
+		goto err_ioremap_bam_failed;
+	}
+	dev->ctrl.nr = pdev->id;
+	dev->ctrl.nchans = MSM_SLIM_NCHANS;
+	dev->ctrl.nports = MSM_SLIM_NPORTS;
+	dev->ctrl.set_laddr = msm_set_laddr;
+	dev->ctrl.xfer_msg = msm_xfer_msg;
+	dev->ctrl.config_port = msm_config_port;
+	dev->ctrl.port_xfer = msm_slim_port_xfer;
+	dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
+	/* Reserve some messaging BW for satellite-apps driver communication */
+	dev->ctrl.sched.pending_msgsl = 30;
+
+	init_completion(&dev->reconf);
+	mutex_init(&dev->tx_lock);
+	spin_lock_init(&dev->rx_lock);
+	dev->ee = 1;
+	dev->use_rx_msgqs = 1;
+	dev->irq = irq->start;
+	dev->bam.irq = bam_irq->start;
+
+	ret = msm_slim_sps_init(dev, bam_mem);
+	if (ret != 0) {
+		dev_err(dev->dev, "error SPS init\n");
+		goto err_sps_init_failed;
+	}
+
+
+	dev->rclk = clk_get(dev->dev, "audio_slimbus_clk");
+	if (dev->rclk) {
+		clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
+		clk_enable(dev->rclk);
+	} else {
+		dev_err(dev->dev, "slimbus clock not found");
+		goto err_clk_get_failed;
+	}
+	dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+	dev->framer.superfreq =
+		dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	dev->ctrl.a_framer = &dev->framer;
+	dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+	ret = slim_add_numbered_controller(&dev->ctrl);
+	if (ret) {
+		dev_err(dev->dev, "error adding controller\n");
+		goto err_ctrl_failed;
+	}
+
+	ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
+				"msm_slim_irq", dev);
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	dev->satd = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
+	if (!dev->satd) {
+		ret = -ENOMEM;
+		goto err_sat_failed;
+	}
+	dev->satd->dev = dev;
+	dev->satd->satcl.name  = "msm_sat_dev";
+	spin_lock_init(&dev->satd->lock);
+	INIT_WORK(&dev->satd->wd, slim_sat_rxprocess);
+	dev->satd->wq = create_singlethread_workqueue("msm_slim_sat");
+	/* Component register initialization */
+	writel_relaxed(1, dev->base + COMP_CFG);
+	writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+				dev->base + COMP_TRUST_CFG);
+
+	/*
+	 * Manager register initialization
+	 * If RX msg Q is used, disable RX_MSG_RCVD interrupt
+	 */
+	if (dev->use_rx_msgqs)
+		writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
+			MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+	else
+		writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+			MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
+	writel_relaxed(1, dev->base + MGR_CFG);
+	/*
+	 * Framer registers are beyond 1K memory region after Manager and/or
+	 * component registers. Make sure those writes are ordered
+	 * before framer register writes
+	 */
+	wmb();
+
+	/* Framer register initialization */
+	writel_relaxed(1, dev->base + FRM_WAKEUP);
+	writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) |
+		(1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+		dev->base + FRM_CFG);
+	/*
+	 * Make sure that framer wake-up and enabling writes go through
+	 * before any other component is enabled. Framer is responsible for
+	 * clocking the bus and enabling framer first will ensure that other
+	 * devices can report presence when they are enabled
+	 */
+	mb();
+
+	/* Enable RX msg Q */
+	if (dev->use_rx_msgqs)
+		writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
+					dev->base + MGR_CFG);
+	else
+		writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
+	/*
+	 * Make sure that manager-enable is written through before interface
+	 * device is enabled
+	 */
+	mb();
+	writel_relaxed(1, dev->base + INTF_CFG);
+	/*
+	 * Make sure that interface-enable is written through before enabling
+	 * ported generic device inside MSM manager
+	 */
+	mb();
+	writel_relaxed(1, dev->base + PGD_CFG);
+	writel_relaxed(0x3F<<17, dev->base + (PGD_OWN_EEn + (4 * dev->ee)));
+	/*
+	 * Make sure that ported generic device is enabled and port-EE settings
+	 * are written through before finally enabling the component
+	 */
+	mb();
+
+	writel_relaxed(1, dev->base + COMP_CFG);
+	/*
+	 * Make sure that all writes have gone through before exiting this
+	 * function
+	 */
+	mb();
+	dev_dbg(dev->dev, "MSM SB controller is up!\n");
+	return 0;
+
+err_sat_failed:
+	free_irq(dev->irq, dev);
+err_request_irq_failed:
+	slim_del_controller(&dev->ctrl);
+err_ctrl_failed:
+	clk_disable(dev->rclk);
+	clk_put(dev->rclk);
+err_clk_get_failed:
+	msm_slim_sps_exit(dev);
+err_sps_init_failed:
+	iounmap(dev->bam.base);
+err_ioremap_bam_failed:
+	iounmap(dev->base);
+err_ioremap_failed:
+	kfree(dev);
+err_get_res_failed:
+	release_mem_region(bam_mem->start, resource_size(bam_mem));
+err_get_res_bam_failed:
+	release_mem_region(slim_mem->start, resource_size(slim_mem));
+	return ret;
+}
+
+static int __devexit msm_slim_remove(struct platform_device *pdev)
+{
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	struct resource *bam_mem;
+	struct resource *slim_mem;
+	struct msm_slim_sat *sat = dev->satd;
+	slim_remove_device(&sat->satcl);
+	kfree(sat->satch);
+	destroy_workqueue(sat->wq);
+	kfree(sat);
+	free_irq(dev->irq, dev);
+	slim_del_controller(&dev->ctrl);
+	clk_disable(dev->rclk);
+	clk_put(dev->rclk);
+	msm_slim_sps_exit(dev);
+	kthread_stop(dev->rx_msgq_thread);
+	iounmap(dev->bam.base);
+	iounmap(dev->base);
+	kfree(dev);
+	bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_bam_physical");
+	release_mem_region(bam_mem->start, resource_size(bam_mem));
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"slimbus_physical");
+	release_mem_region(slim_mem->start, resource_size(slim_mem));
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_slim_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+
+	/* Make sure we are not transmitting anything */
+	mutex_lock(&dev->tx_lock);
+	if (dev->reconf_busy) {
+		wait_for_completion(&dev->reconf);
+		dev->reconf_busy = false;
+	}
+	dev->suspended = 1;
+	mutex_unlock(&dev->tx_lock);
+	clk_disable(dev->rclk);
+	disable_irq(dev->irq);
+	return 0;
+}
+
+static int msm_slim_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
+	enable_irq(dev->irq);
+	clk_enable(dev->rclk);
+	dev->suspended = 0;
+	return 0;
+}
+#else
+#define msm_slim_suspend NULL
+#define msm_slim_resume NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+static int msm_slim_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idle...\n");
+	return 0;
+}
+
+static int msm_slim_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int msm_slim_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+#else
+#define msm_slim_runtime_idle NULL
+#define msm_slim_runtime_suspend NULL
+#define msm_slim_runtime_resume NULL
+#endif
+
+static const struct dev_pm_ops msm_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(
+		msm_slim_suspend,
+		msm_slim_resume
+	)
+	SET_RUNTIME_PM_OPS(
+		msm_slim_runtime_suspend,
+		msm_slim_runtime_resume,
+		msm_slim_runtime_idle
+	)
+};
+
+static struct platform_driver msm_slim_driver = {
+	.probe = msm_slim_probe,
+	.remove = msm_slim_remove,
+	.driver	= {
+		.name = MSM_SLIM_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_slim_dev_pm_ops,
+	},
+};
+
+static int msm_slim_init(void)
+{
+	return platform_driver_register(&msm_slim_driver);
+}
+subsys_initcall(msm_slim_init);
+
+static void msm_slim_exit(void)
+{
+	platform_driver_unregister(&msm_slim_driver);
+}
+module_exit(msm_slim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("MSM Slimbus controller");
+MODULE_ALIAS("platform:msm-slim");
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
new file mode 100644
index 0000000..59bb008
--- /dev/null
+++ b/drivers/slimbus/slimbus.c
@@ -0,0 +1,2625 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus/slimbus.h>
+
+#define SLIM_PORT_HDL(la, f, p) ((la)<<24 | (f) << 16 | (p))
+
+#define SLIM_HDL_TO_LA(hdl)	((u32)((hdl) & 0xFF000000) >> 24)
+#define SLIM_HDL_TO_FLOW(hdl)	(((u32)(hdl) & 0xFF0000) >> 16)
+#define SLIM_HDL_TO_PORT(hdl)	((u32)(hdl) & 0xFF)
+
+#define SLIM_SLAVE_PORT(p, la)	(((la)<<16) | (p))
+#define SLIM_MGR_PORT(p)	((0xFF << 16) | (p))
+#define SLIM_LA_MANAGER		0xFF
+
+#define SLIM_START_GRP		(1 << 8)
+#define SLIM_END_GRP		(1 << 9)
+
+#define SLIM_MAX_INTR_COEFF_3	(SLIM_SL_PER_SUPERFRAME/3)
+#define SLIM_MAX_INTR_COEFF_1	SLIM_SL_PER_SUPERFRAME
+
+static DEFINE_MUTEX(slim_lock);
+static DEFINE_IDR(ctrl_idr);
+static struct device_type slim_dev_type;
+static struct device_type slim_ctrl_type;
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+					const struct slim_device *slim_dev)
+{
+	while (id->name[0]) {
+		if (strncmp(slim_dev->name, id->name, SLIMBUS_NAME_SIZE) == 0)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+static int slim_device_match(struct device *dev, struct device_driver *driver)
+{
+	struct slim_device *slim_dev;
+	struct slim_driver *drv = to_slim_driver(driver);
+
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+	else
+		return 0;
+	if (drv->id_table)
+		return slim_match(drv->id_table, slim_dev) != NULL;
+
+	if (driver->name)
+		return strncmp(slim_dev->name, driver->name, SLIMBUS_NAME_SIZE)
+			== 0;
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int slim_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->suspend)
+		return 0;
+
+	return driver->suspend(slim_dev, mesg);
+}
+
+static int slim_legacy_resume(struct device *dev)
+{
+	struct slim_device *slim_dev = NULL;
+	struct slim_driver *driver;
+	if (dev->type == &slim_dev_type)
+		slim_dev = to_slim_device(dev);
+
+	if (!slim_dev || !dev->driver)
+		return 0;
+
+	driver = to_slim_driver(dev->driver);
+	if (!driver->resume)
+		return 0;
+
+	return driver->resume(slim_dev);
+}
+
+static int slim_pm_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return slim_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int slim_pm_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_resume(dev);
+	else
+		return slim_legacy_resume(dev);
+}
+
+#else
+#define slim_pm_suspend		NULL
+#define slim_pm_resume		NULL
+#endif
+
+static const struct dev_pm_ops slimbus_pm = {
+	.suspend = slim_pm_suspend,
+	.resume = slim_pm_resume,
+	SET_RUNTIME_PM_OPS(
+		pm_generic_suspend,
+		pm_generic_resume,
+		pm_generic_runtime_idle
+		)
+};
+struct bus_type slimbus_type = {
+	.name		= "slimbus",
+	.match		= slim_device_match,
+	.pm		= &slimbus_pm,
+};
+EXPORT_SYMBOL_GPL(slimbus_type);
+
+struct device slimbus_dev = {
+	.init_name = "slimbus",
+};
+
+static void __exit slimbus_exit(void)
+{
+	device_unregister(&slimbus_dev);
+	bus_unregister(&slimbus_type);
+}
+
+static int __init slimbus_init(void)
+{
+	int retval;
+
+	retval = bus_register(&slimbus_type);
+	if (!retval)
+		retval = device_register(&slimbus_dev);
+
+	if (retval)
+		bus_unregister(&slimbus_type);
+
+	return retval;
+}
+postcore_initcall(slimbus_init);
+module_exit(slimbus_exit);
+
+static int slim_drv_probe(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+	if (sdrv->probe)
+		return sdrv->probe(to_slim_device(dev));
+	return -ENODEV;
+}
+
+static int slim_drv_remove(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+	if (sdrv->remove)
+		return sdrv->remove(to_slim_device(dev));
+	return -ENODEV;
+}
+
+static void slim_drv_shutdown(struct device *dev)
+{
+	const struct slim_driver *sdrv = to_slim_driver(dev->driver);
+
+	if (sdrv->shutdown)
+		sdrv->shutdown(to_slim_device(dev));
+}
+
+/*
+ * slim_driver_register: Client driver registration with slimbus
+ * @drv:Client driver to be associated with client-device.
+ * This API will register the client driver with the slimbus
+ * It is called from the driver's module-init function.
+ */
+int slim_driver_register(struct slim_driver *drv)
+{
+	drv->driver.bus = &slimbus_type;
+	if (drv->probe)
+		drv->driver.probe = slim_drv_probe;
+
+	if (drv->remove)
+		drv->driver.remove = slim_drv_remove;
+
+	if (drv->shutdown)
+		drv->driver.shutdown = slim_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_register);
+
+#define slim_ctrl_attr_gr NULL
+
+static void slim_ctrl_release(struct device *dev)
+{
+	struct slim_controller *ctrl = to_slim_controller(dev);
+
+	complete(&ctrl->dev_released);
+}
+
+static struct device_type slim_ctrl_type = {
+	.groups		= slim_ctrl_attr_gr,
+	.release	= slim_ctrl_release,
+};
+
+static struct slim_controller *slim_ctrl_get(struct slim_controller *ctrl)
+{
+	if (!ctrl || !get_device(&ctrl->dev))
+		return NULL;
+
+	return ctrl;
+}
+
+static void slim_ctrl_put(struct slim_controller *ctrl)
+{
+	if (ctrl)
+		put_device(&ctrl->dev);
+}
+
+#define slim_device_attr_gr NULL
+#define slim_device_uevent NULL
+static void slim_dev_release(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	slim_ctrl_put(sbdev->ctrl);
+}
+
+static struct device_type slim_dev_type = {
+	.groups		= slim_device_attr_gr,
+	.uevent		= slim_device_uevent,
+	.release	= slim_dev_release,
+};
+
+/*
+ * slim_add_device: Add a new device without register board info.
+ * @ctrl: Controller to which this device is to be added to.
+ * Called when device doesn't have an explicit client-driver to be probed, or
+ * the client-driver is a module installed dynamically.
+ */
+int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
+{
+	int ret = 0;
+
+	sbdev->dev.bus = &slimbus_type;
+	sbdev->dev.parent = ctrl->dev.parent;
+	sbdev->dev.type = &slim_dev_type;
+	sbdev->ctrl = ctrl;
+	slim_ctrl_get(ctrl);
+	dev_set_name(&sbdev->dev, "%s", sbdev->name);
+	/* probe slave on this controller */
+	ret = device_register(&sbdev->dev);
+
+	if (ret)
+		return ret;
+
+	mutex_init(&sbdev->sldev_reconf);
+	INIT_LIST_HEAD(&sbdev->mark_define);
+	INIT_LIST_HEAD(&sbdev->mark_suspend);
+	INIT_LIST_HEAD(&sbdev->mark_removal);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_add_device);
+
+struct sbi_boardinfo {
+	struct list_head	list;
+	struct slim_boardinfo	board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(slim_ctrl_list);
+static DEFINE_MUTEX(board_lock);
+
+/* If controller is not present, only add to boards list */
+static void slim_match_ctrl_to_boardinfo(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	int ret;
+	if (ctrl->nr != bi->bus_num)
+		return;
+
+	ret = slim_add_device(ctrl, bi->slim_slave);
+	if (ret != 0)
+		dev_err(ctrl->dev.parent, "can't create new device for %s\n",
+			bi->slim_slave->name);
+}
+
+/*
+ * slim_register_board_info: Board-initialization routine.
+ * @info: List of all devices on all controllers present on the board.
+ * @n: number of entries.
+ * API enumerates respective devices on corresponding controller.
+ * Called from board-init function.
+ */
+int slim_register_board_info(struct slim_boardinfo const *info, unsigned n)
+{
+	struct sbi_boardinfo *bi;
+	int i;
+
+	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+	if (!bi)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++, bi++, info++) {
+		struct slim_controller *ctrl;
+
+		memcpy(&bi->board_info, info, sizeof(*info));
+		mutex_lock(&board_lock);
+		list_add_tail(&bi->list, &board_list);
+		list_for_each_entry(ctrl, &slim_ctrl_list, list)
+			slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+		mutex_unlock(&board_lock);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_register_board_info);
+
+/*
+ * slim_busnum_to_ctrl: Map bus number to controller
+ * @busnum: Bus number
+ * Returns controller representing this bus number
+ */
+struct slim_controller *slim_busnum_to_ctrl(u32 bus_num)
+{
+	struct slim_controller *ctrl;
+	mutex_lock(&board_lock);
+	list_for_each_entry(ctrl, &slim_ctrl_list, list)
+		if (bus_num == ctrl->nr) {
+			mutex_unlock(&board_lock);
+			return ctrl;
+		}
+	mutex_unlock(&board_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(slim_busnum_to_ctrl);
+
+static int slim_register_controller(struct slim_controller *ctrl)
+{
+	int ret = 0;
+	struct sbi_boardinfo *bi;
+
+	/* Can't register until after driver model init */
+	if (WARN_ON(!slimbus_type.p)) {
+		ret = -EAGAIN;
+		goto out_list;
+	}
+
+	dev_set_name(&ctrl->dev, "sb-%d", ctrl->nr);
+	ctrl->dev.bus = &slimbus_type;
+	ctrl->dev.type = &slim_ctrl_type;
+	ctrl->dev.parent = &slimbus_dev;
+	ctrl->num_dev = 0;
+	mutex_init(&ctrl->m_ctrl);
+	mutex_init(&ctrl->sched.m_reconf);
+	ret = device_register(&ctrl->dev);
+	if (ret)
+		goto out_list;
+
+	dev_dbg(&ctrl->dev, "Bus [%s] registered:dev:%x\n", ctrl->name,
+							(u32)&ctrl->dev);
+
+	if (ctrl->nports) {
+		ctrl->ports = kzalloc(ctrl->nports * sizeof(struct slim_port),
+					GFP_KERNEL);
+		if (!ctrl->ports) {
+			ret = -ENOMEM;
+			goto err_port_failed;
+		}
+	}
+	if (ctrl->nchans) {
+		ctrl->chans = kzalloc(ctrl->nchans * sizeof(struct slim_ich),
+					GFP_KERNEL);
+		if (!ctrl->chans) {
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+
+		ctrl->sched.chc1 =
+			kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
+			GFP_KERNEL);
+		if (!ctrl->sched.chc1) {
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+		ctrl->sched.chc3 =
+			kzalloc(ctrl->nchans * sizeof(struct slim_ich *),
+			GFP_KERNEL);
+		if (!ctrl->sched.chc3) {
+			kfree(ctrl->sched.chc1);
+			kfree(ctrl->chans);
+			ret = -ENOMEM;
+			goto err_chan_failed;
+		}
+	}
+#ifdef DEBUG
+	ctrl->sched.slots = kzalloc(SLIM_SL_PER_SUPERFRAME, GFP_KERNEL);
+#endif
+	/*
+	 * If devices on a controller were registered before controller,
+	 * this will make sure that they get probed now that controller is up
+	 */
+	mutex_lock(&board_lock);
+	list_add_tail(&ctrl->list, &slim_ctrl_list);
+	list_for_each_entry(bi, &board_list, list)
+		slim_match_ctrl_to_boardinfo(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+
+	return 0;
+
+err_chan_failed:
+	kfree(ctrl->ports);
+err_port_failed:
+	device_unregister(&ctrl->dev);
+out_list:
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	return ret;
+}
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+void slim_remove_device(struct slim_device *sbdev)
+{
+	device_unregister(&sbdev->dev);
+}
+EXPORT_SYMBOL_GPL(slim_remove_device);
+
+static void slim_ctrl_remove_device(struct slim_controller *ctrl,
+				struct slim_boardinfo *bi)
+{
+	if (ctrl->nr == bi->bus_num)
+		slim_remove_device(bi->slim_slave);
+}
+
+/*
+ * slim_del_controller: Controller tear-down.
+ * Controller added with the above API is teared down using this API.
+ */
+int slim_del_controller(struct slim_controller *ctrl)
+{
+	struct slim_controller *found;
+	struct sbi_boardinfo *bi;
+
+	/* First make sure that this bus was added */
+	mutex_lock(&slim_lock);
+	found = idr_find(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+	if (found != ctrl)
+		return -EINVAL;
+
+	/* Remove all clients */
+	mutex_lock(&board_lock);
+	list_for_each_entry(bi, &board_list, list)
+		slim_ctrl_remove_device(ctrl, &bi->board_info);
+	mutex_unlock(&board_lock);
+
+	init_completion(&ctrl->dev_released);
+	device_unregister(&ctrl->dev);
+
+	wait_for_completion(&ctrl->dev_released);
+	list_del(&ctrl->list);
+	/* free bus id */
+	mutex_lock(&slim_lock);
+	idr_remove(&ctrl_idr, ctrl->nr);
+	mutex_unlock(&slim_lock);
+
+	kfree(ctrl->sched.chc1);
+	kfree(ctrl->sched.chc3);
+#ifdef DEBUG
+	kfree(ctrl->sched.slots);
+#endif
+	kfree(ctrl->chans);
+	kfree(ctrl->ports);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_del_controller);
+
+/*
+ * slim_add_numbered_controller: Controller bring-up.
+ * @ctrl: Controller to be registered.
+ * A controller is registered with the framework using this API. ctrl->nr is the
+ * desired number with which slimbus framework registers the controller.
+ * Function will return -EBUSY if the number is in use.
+ */
+int slim_add_numbered_controller(struct slim_controller *ctrl)
+{
+	int	id;
+	int	status;
+
+	if (ctrl->nr & ~MAX_ID_MASK)
+		return -EINVAL;
+
+retry:
+	if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0)
+		return -ENOMEM;
+
+	mutex_lock(&slim_lock);
+	status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id);
+	if (status == 0 && id != ctrl->nr) {
+		status = -EAGAIN;
+		idr_remove(&ctrl_idr, id);
+	}
+	mutex_unlock(&slim_lock);
+	if (status == -EAGAIN)
+		goto retry;
+
+	if (status == 0)
+		status = slim_register_controller(ctrl);
+	return status;
+}
+EXPORT_SYMBOL_GPL(slim_add_numbered_controller);
+
+/*
+ * slim_msg_response: Deliver Message response received from a device to the
+ *	framework.
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+	int i;
+	struct slim_msg_txn *txn;
+
+	mutex_lock(&ctrl->m_ctrl);
+	txn = ctrl->txnt[tid];
+	if (txn == NULL) {
+		dev_err(&ctrl->dev, "Got response to invalid TID:%d, len:%d",
+				tid, len);
+		mutex_unlock(&ctrl->m_ctrl);
+		return;
+	}
+	for (i = 0; i < len; i++)
+		txn->rbuf[i] = reply[i];
+	if (txn->comp)
+		complete(txn->comp);
+	ctrl->txnt[tid] = NULL;
+	mutex_unlock(&ctrl->m_ctrl);
+	kfree(txn);
+}
+EXPORT_SYMBOL_GPL(slim_msg_response);
+
+static int slim_processtxn(struct slim_controller *ctrl, u8 dt, u8 mc, u16 ec,
+			u8 mt, u8 *rbuf, const u8 *wbuf, u8 len, u8 mlen,
+			struct completion *comp, u8 la, u8 *tid)
+{
+	u8 i = 0;
+	int ret = 0;
+	struct slim_msg_txn *txn = kmalloc(sizeof(struct slim_msg_txn),
+					GFP_KERNEL);
+	if (!txn)
+		return -ENOMEM;
+	if (tid) {
+		mutex_lock(&ctrl->m_ctrl);
+		for (i = 0; i < ctrl->last_tid; i++) {
+			if (ctrl->txnt[i] == NULL)
+				break;
+		}
+		if (i >= ctrl->last_tid) {
+			if (ctrl->last_tid == 255) {
+				mutex_unlock(&ctrl->m_ctrl);
+				kfree(txn);
+				return -ENOMEM;
+			}
+			ctrl->txnt = krealloc(ctrl->txnt,
+					(i + 1) * sizeof(struct slim_msg_txn *),
+					GFP_KERNEL);
+			if (!ctrl->txnt) {
+				mutex_unlock(&ctrl->m_ctrl);
+				kfree(txn);
+				return -ENOMEM;
+			}
+			ctrl->last_tid++;
+		}
+		ctrl->txnt[i] = txn;
+		mutex_unlock(&ctrl->m_ctrl);
+		txn->tid = i;
+		*tid = i;
+	}
+	txn->mc = mc;
+	txn->mt = mt;
+	txn->dt = dt;
+	txn->ec = ec;
+	txn->la = la;
+	txn->rbuf = rbuf;
+	txn->wbuf = wbuf;
+	txn->rl = mlen;
+	txn->len = len;
+	txn->comp = comp;
+
+	ret = ctrl->xfer_msg(ctrl, txn);
+	if (!tid)
+		kfree(txn);
+	return ret;
+}
+
+static int ctrl_getlogical_addr(struct slim_controller *ctrl, const u8 *eaddr,
+				u8 e_len, u8 *laddr)
+{
+	u8 i;
+	for (i = 0; i < ctrl->num_dev; i++) {
+		if (ctrl->addrt[i].valid &&
+			memcmp(ctrl->addrt[i].eaddr, eaddr, e_len) == 0) {
+			*laddr = i;
+			return 0;
+		}
+	}
+	return -ENXIO;
+}
+
+/*
+ * slim_assign_laddr: Assign logical address to a device enumerated.
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: 6-byte elemental address of the device.
+ * @e_len: buffer length for e_addr
+ * @laddr: Return logical address.
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_assign_laddr(struct slim_controller *ctrl, const u8 *e_addr,
+				u8 e_len, u8 *laddr)
+{
+	int ret;
+	u8 i;
+	mutex_lock(&ctrl->m_ctrl);
+	/* already assigned */
+	if (ctrl_getlogical_addr(ctrl, e_addr, e_len, laddr) == 0)
+		i = *laddr;
+	else {
+		if (ctrl->num_dev >= 254) {
+			ret = -EXFULL;
+			goto ret_assigned_laddr;
+		}
+		for (i = 0; i < ctrl->num_dev; i++) {
+			if (ctrl->addrt[i].valid == false)
+				break;
+		}
+		if (i == ctrl->num_dev) {
+			ctrl->addrt = krealloc(ctrl->addrt,
+					(ctrl->num_dev + 1) *
+					sizeof(struct slim_addrt),
+					GFP_KERNEL);
+			if (!ctrl->addrt) {
+				ret = -ENOMEM;
+				goto ret_assigned_laddr;
+			}
+			ctrl->num_dev++;
+		}
+		memcpy(ctrl->addrt[i].eaddr, e_addr, e_len);
+		ctrl->addrt[i].valid = true;
+	}
+
+	ret = ctrl->set_laddr(ctrl, ctrl->addrt[i].eaddr, 6, i);
+	if (ret) {
+		ctrl->addrt[i].valid = false;
+		goto ret_assigned_laddr;
+	}
+	*laddr = i;
+
+	dev_dbg(&ctrl->dev, "setting slimbus l-addr:%x\n", i);
+ret_assigned_laddr:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_assign_laddr);
+
+/*
+ * slim_get_logical_addr: Return the logical address of a slimbus device.
+ * @sb: client handle requesting the adddress.
+ * @e_addr: Elemental address of the device.
+ * @e_len: Length of e_addr
+ * @laddr: output buffer to store the address
+ * context: can sleep
+ * -EINVAL is returned in case of invalid parameters, and -ENXIO is returned if
+ *  the device with this elemental address is not found.
+ */
+int slim_get_logical_addr(struct slim_device *sb, const u8 *e_addr,
+				u8 e_len, u8 *laddr)
+{
+	int ret = 0;
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl || !laddr || !e_addr || e_len != 6)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	ret = ctrl_getlogical_addr(ctrl, e_addr, e_len, laddr);
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_get_logical_addr);
+
+static int slim_ele_access_sanity(struct slim_ele_access *msg, int oper,
+				u8 *rbuf, const u8 *wbuf, u8 len)
+{
+	if (!msg || msg->num_bytes > 16 || msg->start_offset + len > 0xC00)
+		return -EINVAL;
+	switch (oper) {
+	case SLIM_MSG_MC_REQUEST_VALUE:
+	case SLIM_MSG_MC_REQUEST_INFORMATION:
+		if (rbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		if (wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+		if (rbuf == NULL || wbuf == NULL)
+			return -EINVAL;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static u16 slim_slicecodefromsize(u32 req)
+{
+	u8 codetosize[8] = {1, 2, 3, 4, 6, 8, 12, 16};
+	if (req >= 8)
+		return 0;
+	else
+		return codetosize[req];
+}
+
+static u16 slim_slicesize(u32 code)
+{
+	u8 sizetocode[16] = {0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7};
+	if (code == 0)
+		code = 1;
+	if (code > 16)
+		code = 16;
+	return sizetocode[code - 1];
+}
+
+
+/* Message APIs Unicast message APIs used by slimbus slave drivers */
+
+/*
+ * Message API access routines.
+ * @sb: client handle requesting elemental message reads, writes.
+ * @msg: Input structure for start-offset, number of bytes to read.
+ * @rbuf: data buffer to be filled with values read.
+ * @len: data buffer size
+ * @wbuf: data buffer containing value/information to be written
+ * context: can sleep
+ * Returns:
+ * -EINVAL: Invalid parameters
+ * -ETIMEDOUT: If controller could not complete the request. This may happen if
+ *  the bus lines are not clocked, controller is not powered-on, slave with
+ *  given address is not enumerated/responding.
+ */
+int slim_request_val_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_VALUE, buf,
+			NULL, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_val_element);
+
+int slim_request_inf_element(struct slim_device *sb,
+				struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_INFORMATION,
+			buf, NULL, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_inf_element);
+
+int slim_change_val_element(struct slim_device *sb, struct slim_ele_access *msg,
+				const u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CHANGE_VALUE, NULL, buf,
+					len);
+}
+EXPORT_SYMBOL_GPL(slim_change_val_element);
+
+int slim_clear_inf_element(struct slim_device *sb, struct slim_ele_access *msg,
+				u8 *buf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_CLEAR_INFORMATION, NULL,
+					buf, len);
+}
+EXPORT_SYMBOL_GPL(slim_clear_inf_element);
+
+int slim_request_change_val_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg, SLIM_MSG_MC_REQUEST_CHANGE_VALUE,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_change_val_element);
+
+int slim_request_clear_inf_element(struct slim_device *sb,
+					struct slim_ele_access *msg, u8 *rbuf,
+					const u8 *wbuf, u8 len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	if (!ctrl)
+		return -EINVAL;
+	return slim_xfer_msg(ctrl, sb, msg,
+					SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION,
+					rbuf, wbuf, len);
+}
+EXPORT_SYMBOL_GPL(slim_request_clear_inf_element);
+
+/*
+ * Broadcast message API:
+ * call this API directly with sbdev = NULL.
+ * For broadcast reads, make sure that buffers are big-enough to incorporate
+ * replies from all logical addresses.
+ * All controllers may not support broadcast
+ */
+int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
+			struct slim_ele_access *msg, u8 mc, u8 *rbuf,
+			const u8 *wbuf, u8 len)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	int ret;
+	u16 sl, cur;
+	u16 ec;
+	u8 tid, mlen = 6;
+
+	if (sbdev->laddr != SLIM_LA_MANAGER && sbdev->laddr >= ctrl->num_dev)
+		return -ENXIO;
+	ret = slim_ele_access_sanity(msg, mc, rbuf, wbuf, len);
+	if (ret)
+		goto xfer_err;
+
+	sl = slim_slicesize(len);
+	dev_dbg(&ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+				msg->start_offset, len, mc, sl);
+
+	cur = slim_slicecodefromsize(sl);
+	ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+	if (wbuf)
+		mlen += len;
+	if (rbuf) {
+		mlen++;
+		if (!msg->comp)
+			ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
+				mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
+				&complete, sbdev->laddr, &tid);
+		else
+			ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR,
+				mc, ec, SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
+				msg->comp, sbdev->laddr, &tid);
+		/* sync read */
+		if (!ret && !msg->comp)
+			wait_for_completion_timeout(&complete, HZ);
+	} else
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec,
+				SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
+				NULL, sbdev->laddr, NULL);
+xfer_err:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_xfer_msg);
+
+/*
+ * slim_alloc_mgrports: Allocate port on manager side.
+ * @sb: device/client handle.
+ * @req: Port request type.
+ * @nports: Number of ports requested
+ * @rh: output buffer to store the port handles
+ * @hsz: size of buffer storing handles
+ * context: can sleep
+ * This port will be typically used by SW. e.g. client driver wants to receive
+ * some data from audio codec HW using a data channel.
+ * Port allocated using this API will be used to receive the data.
+ * If half-duplex ports are requested, two adjacent ports are allocated for
+ * 1 half-duplex port. So the handle-buffer size should be twice the number
+ * of half-duplex ports to be allocated.
+ * -EDQUOT is returned if all ports are in use.
+ */
+int slim_alloc_mgrports(struct slim_device *sb, enum slim_port_req req,
+				int nports, u32 *rh, int hsz)
+{
+	int i, j, ret;
+	int nphysp = nports;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!rh || !ctrl)
+		return -EINVAL;
+	if (req == SLIM_REQ_HALF_DUP)
+		nphysp *= 2;
+	if (hsz/sizeof(u32) < nphysp)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < ctrl->nports; i++) {
+		bool multiok = true;
+		if (ctrl->ports[i].state != SLIM_P_FREE)
+			continue;
+		/* Start half duplex channel at even port */
+		if (req == SLIM_REQ_HALF_DUP && (i % 2))
+			continue;
+		/* Allocate ports contiguously for multi-ch */
+		if (ctrl->nports < (i + nphysp)) {
+			i = ctrl->nports;
+			break;
+		}
+		if (req == SLIM_REQ_MULTI_CH) {
+			multiok = true;
+			for (j = i; j < i + nphysp; j++) {
+				if (ctrl->ports[j].state != SLIM_P_FREE) {
+					multiok = false;
+					break;
+				}
+			}
+			if (!multiok)
+				continue;
+		}
+		break;
+	}
+	if (i >= ctrl->nports)
+		ret = -EDQUOT;
+	for (j = i; j < i + nphysp; j++) {
+		ctrl->ports[j].state = SLIM_P_UNCFG;
+		ctrl->ports[j].req = req;
+		if (req == SLIM_REQ_HALF_DUP && (j % 2))
+			ctrl->ports[j].flow = SLIM_SINK;
+		else
+			ctrl->ports[j].flow = SLIM_SRC;
+		ret = ctrl->config_port(ctrl, j);
+		if (ret) {
+			for (; j >= i; j--)
+				ctrl->ports[j].state = SLIM_P_FREE;
+			goto alloc_err;
+		}
+		*rh++ = SLIM_PORT_HDL(SLIM_LA_MANAGER, 0, j);
+	}
+alloc_err:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_mgrports);
+
+/* Deallocate the port(s) allocated using the API above */
+int slim_dealloc_mgrports(struct slim_device *sb, u32 *hdl, int nports)
+{
+	int i;
+	struct slim_controller *ctrl = sb->ctrl;
+
+	if (!ctrl || !hdl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->m_ctrl);
+
+	for (i = 0; i < nports; i++) {
+		u8 pn;
+		pn = SLIM_HDL_TO_PORT(hdl[i]);
+		if (ctrl->ports[pn].state == SLIM_P_CFG) {
+			int j;
+			dev_err(&ctrl->dev, "Can't dealloc connected port:%d",
+					i);
+			for (j = i - 1; j >= 0; j--) {
+				pn = SLIM_HDL_TO_PORT(hdl[j]);
+				ctrl->ports[pn].state = SLIM_P_UNCFG;
+			}
+			mutex_unlock(&ctrl->m_ctrl);
+			return -EISCONN;
+		}
+		ctrl->ports[pn].state = SLIM_P_FREE;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_dealloc_mgrports);
+
+/*
+ * slim_get_slaveport: Get slave port handle
+ * @la: slave device logical address.
+ * @idx: port index at slave
+ * @rh: return handle
+ * @flw: Flow type (source or destination)
+ * This API only returns a slave port's representation as expected by slimbus
+ * driver. This port is not managed by the slimbus driver. Caller is expected
+ * to have visibility of this port since it's a device-port.
+ */
+int slim_get_slaveport(u8 la, int idx, u32 *rh, enum slim_port_flow flw)
+{
+	if (rh == NULL)
+		return -EINVAL;
+	*rh = SLIM_PORT_HDL(la, flw, idx);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_get_slaveport);
+
+static int connect_port_ch(struct slim_controller *ctrl, u8 ch, u32 ph,
+				enum slim_port_flow flow)
+{
+	int ret;
+	u8 mc;
+	u8 buf[2];
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+
+	if (flow == SLIM_SRC)
+		mc = SLIM_MSG_MC_CONNECT_SOURCE;
+	else
+		mc = SLIM_MSG_MC_CONNECT_SINK;
+	buf[0] = pn;
+	buf[1] = ch;
+	if (la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].flow = flow;
+	ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
+				SLIM_MSG_MT_CORE, NULL, buf, 2, 6, NULL, la,
+				NULL);
+	if (!ret && la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].state = SLIM_P_CFG;
+	return ret;
+}
+
+static int disconnect_port_ch(struct slim_controller *ctrl, u32 ph)
+{
+	int ret;
+	u8 mc;
+	u32 la = SLIM_HDL_TO_LA(ph);
+	u8 pn = (u8)SLIM_HDL_TO_PORT(ph);
+
+	mc = SLIM_MSG_MC_DISCONNECT_PORT;
+	ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, 0,
+				SLIM_MSG_MT_CORE, NULL, &pn, 1, 5,
+				NULL, la, NULL);
+	if (ret)
+		return ret;
+	if (la == SLIM_LA_MANAGER)
+		ctrl->ports[pn].state = SLIM_P_UNCFG;
+	return 0;
+}
+
+/*
+ * slim_connect_ports: Connect port(s) to channel.
+ * @sb: client handle
+ * @srch: source handles to be connected to this channel
+ * @nrsc: number of source ports
+ * @sinkh: sink handle to be connected to this channel
+ * @chanh: Channel with which the ports need to be associated with.
+ * Per slimbus specification, a channel may have multiple source-ports and 1
+ * sink port.Channel specified in chanh needs to be allocated first.
+ */
+int slim_connect_ports(struct slim_device *sb, u32 *srch, int nsrc, u32 sinkh,
+			u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int j;
+	int ret = 0;
+	u8 chan = (u8)(chanh & 0xFF);
+	struct slim_ich *slc = &ctrl->chans[chan];
+
+	mutex_lock(&ctrl->m_ctrl);
+	/* Make sure the channel is not already pending reconf. or active */
+	if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
+		dev_err(&ctrl->dev, "Channel %d  already active", chan);
+		ret = -EISCONN;
+		goto connect_port_err;
+	}
+
+	/*
+	 * Once channel is removed, its ports can be considered disconnected
+	 * So its ports can be reassigned. Source port array is freed
+	 * when channel is deallocated.
+	 */
+	slc->srch = krealloc(slc->srch, (sizeof(u32) * nsrc), GFP_KERNEL);
+	if (!slc->srch) {
+		ret = -ENOMEM;
+		goto connect_port_err;
+	}
+	/* connect source */
+	for (j = 0; j < nsrc; j++) {
+		ret = connect_port_ch(ctrl, chan, srch[j], SLIM_SRC);
+		if (ret) {
+			for ( ; j >= 0 ; j--)
+				disconnect_port_ch(ctrl,
+						srch[j]);
+			kfree(slc->srch);
+			slc->srch = NULL;
+			goto connect_port_err;
+		}
+	}
+	/* connect sink */
+	ret = connect_port_ch(ctrl, chan, sinkh, SLIM_SINK);
+	if (ret) {
+		for (j = 0; j < nsrc; j++)
+			disconnect_port_ch(ctrl, srch[j]);
+		kfree(slc->srch);
+		slc->srch = NULL;
+		goto connect_port_err;
+	}
+
+	memcpy(slc->srch, srch, (sizeof(u32) * nsrc));
+	slc->nsrc = nsrc;
+	if (sinkh)
+		slc->sinkh = sinkh;
+
+connect_port_err:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_connect_ports);
+
+/*
+ * slim_disconnect_ports: Disconnect port(s) from channel
+ * @sb: client handle
+ * @ph: ports to be disconnected
+ * @nph: number of ports.
+ * Disconnects ports from a channel.
+ */
+int slim_disconnect_ports(struct slim_device *sb, u32 *ph, int nph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i;
+	mutex_lock(&ctrl->m_ctrl);
+	for (i = 0; i < nph; i++)
+		disconnect_port_ch(ctrl, ph[i]);
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_disconnect_ports);
+
+/*
+ * slim_port_xfer: Schedule buffer to be transferred/received using port-handle.
+ * @sb: client handle
+ * @ph: port-handle
+ * @iobuf: buffer to be transferred or populated
+ * @len: buffer size.
+ * @comp: completion signal to indicate transfer done or error.
+ * context: can sleep
+ * Returns number of bytes transferred/received if used synchronously.
+ * Will return 0 if used asynchronously.
+ * Client will call slim_port_get_xfer_status to get error and/or number of
+ * bytes transferred if used asynchronously.
+ */
+int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
+				struct completion *comp)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+	dev_dbg(&ctrl->dev, "port xfer: num:%d", pn);
+	return ctrl->port_xfer(ctrl, pn, iobuf, len, comp);
+}
+EXPORT_SYMBOL_GPL(slim_port_xfer);
+
+/*
+ * slim_port_get_xfer_status: Poll for port transfers, or get transfer status
+ *	after completion is done.
+ * @sb: client handle
+ * @ph: port-handle
+ * @done_buf: return pointer (iobuf from slim_port_xfer) which is processed.
+ * @done_len: Number of bytes transferred.
+ * This can be called when port_xfer complition is signalled.
+ * The API will return port transfer error (underflow/overflow/disconnect)
+ * and/or done_len will reflect number of bytes transferred. Note that
+ * done_len may be valid even if port error (overflow/underflow) has happened.
+ * e.g. If the transfer was scheduled with a few bytes to be transferred and
+ * client has not supplied more data to be transferred, done_len will indicate
+ * number of bytes transferred with underflow error. To avoid frequent underflow
+ * errors, multiple transfers can be queued (e.g. ping-pong buffers) so that
+ * channel has data to be transferred even if client is not ready to transfer
+ * data all the time. done_buf will indicate address of the last buffer
+ * processed from the multiple transfers.
+ */
+enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
+			u8 **done_buf, u32 *done_len)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 pn = SLIM_HDL_TO_PORT(ph);
+	u32 la = SLIM_HDL_TO_LA(ph);
+	enum slim_port_err err;
+	dev_dbg(&ctrl->dev, "get status port num:%d", pn);
+	/*
+	 * Framework only has insight into ports managed by ported device
+	 * used by the manager and not slave
+	 */
+	if (la != SLIM_LA_MANAGER) {
+		if (done_buf)
+			*done_buf = NULL;
+		if (done_len)
+			*done_len = 0;
+		return SLIM_P_NOT_OWNED;
+	}
+	err = ctrl->port_xfer_status(ctrl, pn, done_buf, done_len);
+	if (err == SLIM_P_INPROGRESS)
+		err = ctrl->ports[pn].err;
+	return err;
+}
+EXPORT_SYMBOL_GPL(slim_port_get_xfer_status);
+
+static void slim_add_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i, j;
+	int *len;
+	int sl = slc->seglen << slc->rootexp;
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+		sl *= 3;
+	}
+
+	*len += 1;
+
+	/* Insert the channel based on rootexp and seglen */
+	for (i = 0; i < *len - 1; i++) {
+		/*
+		 * Primary key: exp low to high.
+		 * Secondary key: seglen: high to low
+		 */
+		if ((slc->rootexp > arr[i]->rootexp) ||
+			((slc->rootexp == arr[i]->rootexp) &&
+			(slc->seglen < arr[i]->seglen)))
+			continue;
+		else
+			break;
+	}
+	for (j = *len - 1; j > i; j--)
+		arr[j] = arr[j - 1];
+	arr[i] = slc;
+	ctrl->sched.usedslots += sl;
+
+	return;
+}
+
+static int slim_remove_ch(struct slim_controller *ctrl, struct slim_ich *slc)
+{
+	struct slim_ich **arr;
+	int i;
+	u32 la, ph;
+	int *len;
+	if (slc->coeff == SLIM_COEFF_1) {
+		arr = ctrl->sched.chc1;
+		len = &ctrl->sched.num_cc1;
+	} else {
+		arr = ctrl->sched.chc3;
+		len = &ctrl->sched.num_cc3;
+	}
+
+	for (i = 0; i < *len; i++) {
+		if (arr[i] == slc)
+			break;
+	}
+	if (i >= *len)
+		return -EXFULL;
+	for (; i < *len - 1; i++)
+		arr[i] = arr[i + 1];
+	*len -= 1;
+	arr[*len] = NULL;
+
+	slc->state = SLIM_CH_ALLOCATED;
+	slc->newintr = 0;
+	slc->newoff = 0;
+	for (i = 0; i < slc->nsrc; i++) {
+		ph = slc->srch[i];
+		la = SLIM_HDL_TO_LA(ph);
+		/*
+		 * For ports managed by manager's ported device, no need to send
+		 * disconnect. It is client's responsibility to call disconnect
+		 * on ports owned by the slave device
+		 */
+		if (la == SLIM_LA_MANAGER)
+			ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
+	}
+
+	ph = slc->sinkh;
+	la = SLIM_HDL_TO_LA(ph);
+	if (la == SLIM_LA_MANAGER)
+		ctrl->ports[SLIM_HDL_TO_PORT(ph)].state = SLIM_P_UNCFG;
+
+	return 0;
+}
+
+static u32 slim_calc_prrate(struct slim_controller *ctrl, struct slim_ch *prop)
+{
+	u32 rate = 0, rate4k = 0, rate11k = 0;
+	u32 exp = 0;
+	u32 pr = 0;
+	bool exact = true;
+	bool done = false;
+	enum slim_ch_rate ratefam;
+
+	if (prop->prot >= SLIM_PUSH)
+		return 0;
+	if (prop->baser == SLIM_RATE_1HZ) {
+		rate = prop->ratem / 4000;
+		rate4k = rate;
+		if (rate * 4000 == prop->ratem)
+			ratefam = SLIM_RATE_4000HZ;
+		else {
+			rate = prop->ratem / 11025;
+			rate11k = rate;
+			if (rate * 11025 == prop->ratem)
+				ratefam = SLIM_RATE_11025HZ;
+			else
+				ratefam = SLIM_RATE_1HZ;
+		}
+	} else {
+		ratefam = prop->baser;
+		rate = prop->ratem;
+	}
+	if (ratefam == SLIM_RATE_1HZ) {
+		exact = false;
+		if ((rate4k + 1) * 4000 < (rate11k + 1) * 11025) {
+			rate = rate4k + 1;
+			ratefam = SLIM_RATE_4000HZ;
+		} else {
+			rate = rate11k + 1;
+			ratefam = SLIM_RATE_11025HZ;
+		}
+	}
+	/* covert rate to coeff-exp */
+	while (!done) {
+		while ((rate & 0x1) != 0x1) {
+			rate >>= 1;
+			exp++;
+		}
+		if (rate > 3) {
+			/* roundup if not exact */
+			rate++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (ratefam == SLIM_RATE_4000HZ) {
+		if (rate == 1)
+			pr = 0x10;
+		else {
+			pr = 0;
+			exp++;
+		}
+	} else {
+		pr = 8;
+		exp++;
+	}
+	if (exp <= 7) {
+		pr |= exp;
+		if (exact)
+			pr |= 0x80;
+	} else
+		pr = 0;
+	return pr;
+}
+
+static int slim_nextdefine_ch(struct slim_device *sb, u8 chan)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 chrate = 0;
+	u32 exp = 0;
+	u32 coeff = 0;
+	bool exact = true;
+	bool done = false;
+	int ret = 0;
+	struct slim_ich *slc = &ctrl->chans[chan];
+	struct slim_ch *prop = &slc->prop;
+
+	slc->prrate = slim_calc_prrate(ctrl, prop);
+	dev_dbg(&ctrl->dev, "ch:%d, chan PR rate:%x\n", chan, slc->prrate);
+	if (prop->baser == SLIM_RATE_4000HZ)
+		chrate = 4000 * prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ)
+		chrate = 11025 * prop->ratem;
+	else
+		chrate = prop->ratem;
+	/* max allowed sample freq = 768 seg/frame */
+	if (chrate > 3600000)
+		return -EDQUOT;
+	if (prop->baser == SLIM_RATE_4000HZ &&
+			ctrl->a_framer->superfreq == 4000)
+		coeff = prop->ratem;
+	else if (prop->baser == SLIM_RATE_11025HZ &&
+			ctrl->a_framer->superfreq == 3675)
+		coeff = 3 * prop->ratem;
+	else {
+		u32 tempr = 0;
+		tempr = chrate * SLIM_CL_PER_SUPERFRAME_DIV8;
+		coeff = tempr / ctrl->a_framer->rootfreq;
+		if (coeff * ctrl->a_framer->rootfreq != tempr) {
+			coeff++;
+			exact = false;
+		}
+	}
+
+	/* convert coeff to coeff-exponent */
+	exp = 0;
+	while (!done) {
+		while ((coeff & 0x1) != 0x1) {
+			coeff >>= 1;
+			exp++;
+		}
+		if (coeff > 3) {
+			coeff++;
+			exact = false;
+		} else
+			done = true;
+	}
+	if (prop->prot == SLIM_HARD_ISO && !exact)
+		return -EPROTONOSUPPORT;
+	else if (prop->prot == SLIM_AUTO_ISO) {
+		if (exact)
+			prop->prot = SLIM_HARD_ISO;
+		else {
+			/* Push-Pull not supported for now */
+			return -EPROTONOSUPPORT;
+		}
+	}
+	slc->rootexp = exp;
+	slc->seglen = prop->sampleszbits/SLIM_CL_PER_SL;
+	if (prop->prot != SLIM_HARD_ISO)
+		slc->seglen++;
+	if (prop->prot >= SLIM_EXT_SMPLX)
+		slc->seglen++;
+	/* convert coeff to enum */
+	if (coeff == 1) {
+		if (exp > 9)
+			ret = -EIO;
+		coeff = SLIM_COEFF_1;
+	} else {
+		if (exp > 8)
+			ret = -EIO;
+		coeff = SLIM_COEFF_3;
+	}
+	slc->coeff = coeff;
+
+	return ret;
+}
+
+/*
+ * slim_alloc_ch: Allocate a slimbus channel and return its handle.
+ * @sb: client handle.
+ * @chanh: return channel handle
+ * Slimbus channels are limited to 256 per specification. LSB of the handle
+ * indicates channel number and MSB of the handle is used by the slimbus
+ * framework. -EXFULL is returned if all channels are in use.
+ * Although slimbus specification supports 256 channels, a controller may not
+ * support that many channels.
+ */
+int slim_alloc_ch(struct slim_device *sb, u16 *chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u16 i;
+
+	if (!ctrl)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	for (i = 0; i < ctrl->nchans; i++) {
+		if (ctrl->chans[i].state == SLIM_CH_FREE)
+			break;
+	}
+	if (i >= ctrl->nchans) {
+		mutex_unlock(&ctrl->m_ctrl);
+		return -EXFULL;
+	}
+	*chanh = i;
+	ctrl->chans[i].nextgrp = 0;
+	ctrl->chans[i].state = SLIM_CH_ALLOCATED;
+
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_ch);
+
+/*
+ * slim_dealloc_ch: Deallocate channel allocated using the API above
+ * -EISCONN is returned if the channel is tried to be deallocated without
+ *  being removed first.
+ */
+int slim_dealloc_ch(struct slim_device *sb, u16 chanh)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	u8 chan = (u8)(chanh & 0xFF);
+	struct slim_ich *slc = &ctrl->chans[chan];
+	if (!ctrl)
+		return -EINVAL;
+
+	mutex_lock(&ctrl->m_ctrl);
+	if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
+		dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
+		mutex_unlock(&ctrl->m_ctrl);
+		return -EISCONN;
+	}
+	kfree(slc->srch);
+	slc->srch = NULL;
+	slc->state = SLIM_CH_FREE;
+	mutex_unlock(&ctrl->m_ctrl);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_dealloc_ch);
+
+/*
+ * slim_get_ch_state: Channel state.
+ * This API returns the channel's state (active, suspended, inactive etc)
+ */
+enum slim_ch_state slim_get_ch_state(struct slim_device *sb, u16 chanh)
+{
+	u8 chan = (u8)(chanh & 0xFF);
+	struct slim_ich *slc = &sb->ctrl->chans[chan];
+	return slc->state;
+}
+EXPORT_SYMBOL_GPL(slim_get_ch_state);
+
+/*
+ * slim_define_ch: Define a channel.This API defines channel parameters for a
+ *	given channel.
+ * @sb: client handle.
+ * @prop: slim_ch structure with channel parameters desired to be used.
+ * @chanh: list of channels to be defined.
+ * @nchan: number of channels in a group (1 if grp is false)
+ * @grp: Are the channels grouped
+ * @grph: return group handle if grouping of channels is desired.
+ * Channels can be grouped if multiple channels use same parameters
+ * (e.g. 5.1 audio has 6 channels with same parameters. They will all be grouped
+ * and given 1 handle for simplicity and avoid repeatedly calling the API)
+ * -EISCONN is returned if the channel is already connected. -EBUSY is
+ * returned if the channel is already allocated to some other client.
+ */
+int slim_define_ch(struct slim_device *sb, struct slim_ch *prop, u16 *chanh,
+			u8 nchan, bool grp, u16 *grph)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int i, ret = 0;
+
+	if (!ctrl || !chanh || !prop || !nchan)
+		return -EINVAL;
+	mutex_lock(&ctrl->m_ctrl);
+	for (i = 0; i < nchan; i++) {
+		u8 chan = (u8)(chanh[i] & 0xFF);
+		dev_dbg(&ctrl->dev, "define_ch: port:%d, state:%d", chanh[i],
+				(int)ctrl->chans[chan].state);
+		if (ctrl->chans[chan].state < SLIM_CH_ALLOCATED ||
+			ctrl->chans[chan].state > SLIM_CH_DEFINED) {
+			int j;
+			for (j = i - 1; j >= 0; j--)
+				ctrl->chans[chan].state = SLIM_CH_ALLOCATED;
+			ret = -EBUSY;
+			goto err_define_ch;
+		}
+		ctrl->chans[chan].prop = *prop;
+		ret = slim_nextdefine_ch(sb, chan);
+		if (ret) {
+			int j;
+			for (j = i - 1; j >= 0; j--) {
+				chan = chanh[j] & 0xFF;
+				ctrl->chans[chan].nextgrp = 0;
+				ctrl->chans[chan].state = SLIM_CH_ALLOCATED;
+			}
+			goto err_define_ch;
+		}
+		if (i < (nchan - 1))
+			ctrl->chans[chan].nextgrp = chanh[i + 1];
+		if (i == 0)
+			ctrl->chans[chan].nextgrp |= SLIM_START_GRP;
+		if (i == (nchan - 1))
+			ctrl->chans[chan].nextgrp |= SLIM_END_GRP;
+
+		ctrl->chans[chan].state = SLIM_CH_DEFINED;
+	}
+
+	if (grp)
+		*grph = chanh[0];
+err_define_ch:
+	mutex_unlock(&ctrl->m_ctrl);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_define_ch);
+
+static u32 getsubfrmcoding(u32 *ctrlw, u32 *subfrml, u32 *msgsl)
+{
+	u32 code = 0;
+	if (*ctrlw == *subfrml) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		*msgsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME
+				- SLIM_GDE_SLOTS_PER_SUPERFRAME;
+		return 0;
+	}
+	if (*subfrml == 6) {
+		code = 0;
+		*msgsl = 256;
+	} else if (*subfrml == 8) {
+		code = 1;
+		*msgsl = 192;
+	} else if (*subfrml == 24) {
+		code = 2;
+		*msgsl = 64;
+	} else { /* 32 */
+		code = 3;
+		*msgsl = 48;
+	}
+
+	if (*ctrlw < 8) {
+		if (*ctrlw >= 6) {
+			*ctrlw = 6;
+			code |= 0x14;
+		} else {
+			if (*ctrlw == 5)
+				*ctrlw = 4;
+			code |= (*ctrlw << 2);
+		}
+	} else {
+		code -= 2;
+		if (*ctrlw >= 24) {
+			*ctrlw = 24;
+			code |= 0x1e;
+		} else if (*ctrlw >= 16) {
+			*ctrlw = 16;
+			code |= 0x1c;
+		} else if (*ctrlw >= 12) {
+			*ctrlw = 12;
+			code |= 0x1a;
+		} else {
+			*ctrlw = 8;
+			code |= 0x18;
+		}
+	}
+
+	*msgsl = (*msgsl * *ctrlw) - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+				SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	return code;
+}
+
+static void shiftsegoffsets(struct slim_controller *ctrl, struct slim_ich **ach,
+				int sz, u32 shft)
+{
+	int i;
+	u32 oldoff;
+	for (i = 0; i < sz; i++) {
+		struct slim_ich *slc;
+		if (ach[i] == NULL)
+			continue;
+		slc = ach[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		oldoff = slc->newoff;
+		slc->newoff += shft;
+		/* seg. offset must be <= interval */
+		if (slc->newoff >= slc->newintr)
+			slc->newoff -= slc->newintr;
+	}
+}
+
+static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
+			u32 *ctrlw, u32 *subfrml)
+{
+	int coeff1, coeff3;
+	enum slim_ch_coeff bias;
+	struct slim_controller *ctrl = sb->ctrl;
+	int last1 = ctrl->sched.num_cc1 - 1;
+	int last3 = ctrl->sched.num_cc3 - 1;
+
+	/*
+	 * Find first channels with coeff 1 & 3 as starting points for
+	 * scheduling
+	 */
+	for (coeff3 = 0; coeff3 < ctrl->sched.num_cc3; coeff3++) {
+		struct slim_ich *slc = ctrl->sched.chc3[coeff3];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	for (coeff1 = 0; coeff1 < ctrl->sched.num_cc1; coeff1++) {
+		struct slim_ich *slc = ctrl->sched.chc1[coeff1];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		else
+			break;
+	}
+	if (coeff3 == ctrl->sched.num_cc3 && coeff1 == ctrl->sched.num_cc1) {
+		*ctrlw = 8;
+		*subfrml = 8;
+		return 0;
+	} else if (coeff3 == ctrl->sched.num_cc3)
+		bias = SLIM_COEFF_1;
+	else
+		bias = SLIM_COEFF_3;
+
+	/*
+	 * Find last chan in coeff1, 3 list, we will use to know when we
+	* have done scheduling all coeff1 channels
+	*/
+	while (last1 >= 0) {
+		if (ctrl->sched.chc1[last1] != NULL &&
+			(ctrl->sched.chc1[last1])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last1--;
+	}
+	while (last3 >= 0) {
+		if (ctrl->sched.chc3[last3] != NULL &&
+			(ctrl->sched.chc3[last3])->state !=
+			SLIM_CH_PENDING_REMOVAL)
+			break;
+		last3--;
+	}
+
+	if (bias == SLIM_COEFF_1) {
+		struct slim_ich *slc1 = ctrl->sched.chc1[coeff1];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp;
+		u32 curintr, curmaxsl;
+		int opensl1[2];
+		int maxctrlw1;
+
+		finalexp = (ctrl->sched.chc1[last1])->rootexp;
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_1 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl1[0] = opensl1[1] = curmaxsl;
+
+		while ((coeff1 < ctrl->sched.num_cc1) || (curintr > 24)) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 4K family open slot records */
+			if (opensl1[1] < opensl1[0])
+				opensl1[1] -= curmaxsl;
+			else
+				opensl1[1] = opensl1[0] - curmaxsl;
+			opensl1[0] = curmaxsl;
+			if (opensl1[1] < 0) {
+				opensl1[0] += opensl1[1];
+				opensl1[1] = 0;
+			}
+			if (opensl1[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+						__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+			/* schedule 4k family channels */
+
+			while ((coeff1 < ctrl->sched.num_cc1) && (curexp ==
+					(int)(slc1->rootexp + expshft))) {
+				if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff1++;
+					slc1 = ctrl->sched.chc1[coeff1];
+					continue;
+				}
+				if (opensl1[1] >= opensl1[0] ||
+					(finalexp == (int)slc1->rootexp &&
+					 curintr <= 24 &&
+					 opensl1[0] == curmaxsl)) {
+					opensl1[1] -= slc1->seglen;
+					slc1->newoff = curmaxsl + opensl1[1];
+					if (opensl1[1] < 0 &&
+						opensl1[0] == curmaxsl) {
+						opensl1[0] += opensl1[1];
+						opensl1[1] = 0;
+						if (opensl1[0] < 0) {
+							dev_dbg(&ctrl->dev,
+							"reconfig failed:%d\n",
+							__LINE__);
+							return -EXFULL;
+						}
+					}
+				} else {
+					if (slc1->seglen > opensl1[0]) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+					slc1->newoff = opensl1[0] -
+							slc1->seglen;
+					opensl1[0] = slc1->newoff;
+				}
+				slc1->newintr = curintr;
+				coeff1++;
+				slc1 = ctrl->sched.chc1[coeff1];
+			}
+		}
+		if (opensl1[1] > opensl1[0]) {
+			int temp = opensl1[0];
+			opensl1[0] = opensl1[1];
+			opensl1[1] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+		}
+		/* choose subframe mode to maximize bw */
+		maxctrlw1 = opensl1[0];
+		if (opensl1[0] == curmaxsl)
+			maxctrlw1 += opensl1[1];
+		if (curintr >= 24) {
+			*subfrml = 24;
+			*ctrlw = maxctrlw1;
+		} else if (curintr == 12) {
+			if (maxctrlw1 > opensl1[1] * 4) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 6;
+				*ctrlw = opensl1[1];
+			}
+		} else {
+			*subfrml = 6;
+			*ctrlw = maxctrlw1;
+		}
+	} else {
+		struct slim_ich *slc1;
+		struct slim_ich *slc3 = ctrl->sched.chc3[coeff3];
+		u32 expshft = SLIM_MAX_CLK_GEAR - clkgear;
+		int curexp, finalexp, exp1;
+		u32 curintr, curmaxsl;
+		int opensl3[2];
+		int opensl1[6];
+		bool opensl1valid = false;
+		int maxctrlw1, maxctrlw3, i;
+		finalexp = (ctrl->sched.chc3[last3])->rootexp;
+		if (last1 >= 0) {
+			slc1 = ctrl->sched.chc1[coeff1];
+			exp1 = (ctrl->sched.chc1[last1])->rootexp;
+			if (exp1 > finalexp)
+				finalexp = exp1;
+		}
+		curexp = (int)expshft - 1;
+
+		curintr = (SLIM_MAX_INTR_COEFF_3 * 2) >> (curexp + 1);
+		curmaxsl = curintr >> 1;
+		opensl3[0] = opensl3[1] = curmaxsl;
+
+		while (coeff1 < ctrl->sched.num_cc1 ||
+			coeff3 < ctrl->sched.num_cc3 ||
+			curintr > 32) {
+			curintr >>= 1;
+			curmaxsl >>= 1;
+
+			/* update 12k family open slot records */
+			if (opensl3[1] < opensl3[0])
+				opensl3[1] -= curmaxsl;
+			else
+				opensl3[1] = opensl3[0] - curmaxsl;
+			opensl3[0] = curmaxsl;
+			if (opensl3[1] < 0) {
+				opensl3[0] += opensl3[1];
+				opensl3[1] = 0;
+			}
+			if (opensl3[0] <= 0) {
+				dev_dbg(&ctrl->dev, "reconfig failed:%d\n",
+						__LINE__);
+				return -EXFULL;
+			}
+			curexp++;
+
+			/* schedule 12k family channels */
+			while (coeff3 < ctrl->sched.num_cc3 &&
+				curexp == (int)slc3->rootexp + expshft) {
+				if (slc3->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff3++;
+					slc3 = ctrl->sched.chc3[coeff3];
+					continue;
+				}
+				opensl1valid = false;
+				if (opensl3[1] >= opensl3[0] ||
+					(finalexp == (int)slc3->rootexp &&
+					 curintr <= 32 &&
+					 opensl3[0] == curmaxsl &&
+					 last1 < 0)) {
+					opensl3[1] -= slc3->seglen;
+					slc3->newoff = curmaxsl + opensl3[1];
+					if (opensl3[1] < 0 &&
+						opensl3[0] == curmaxsl) {
+						opensl3[0] += opensl3[1];
+						opensl3[1] = 0;
+					}
+					if (opensl3[0] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				} else {
+					if (slc3->seglen > opensl3[0]) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+					slc3->newoff = opensl3[0] -
+							slc3->seglen;
+					opensl3[0] = slc3->newoff;
+				}
+				slc3->newintr = curintr;
+				coeff3++;
+				slc3 = ctrl->sched.chc3[coeff3];
+			}
+			/* update 4k openslot records */
+			if (opensl1valid == false) {
+				for (i = 0; i < 3; i++) {
+					opensl1[i * 2] = opensl3[0];
+					opensl1[(i * 2) + 1] = opensl3[1];
+				}
+			} else {
+				int opensl1p[6];
+				memcpy(opensl1p, opensl1, sizeof(opensl1));
+				for (i = 0; i < 3; i++) {
+					if (opensl1p[i] < opensl1p[i + 3])
+						opensl1[(i * 2) + 1] =
+							opensl1p[i];
+					else
+						opensl1[(i * 2) + 1] =
+							opensl1p[i + 3];
+				}
+				for (i = 0; i < 3; i++) {
+					opensl1[(i * 2) + 1] -= curmaxsl;
+					opensl1[i * 2] = curmaxsl;
+					if (opensl1[(i * 2) + 1] < 0) {
+						opensl1[i * 2] +=
+							opensl1[(i * 2) + 1];
+						opensl1[(i * 2) + 1] = 0;
+					}
+					if (opensl1[i * 2] < 0) {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				}
+			}
+			/* schedule 4k family channels */
+			while (coeff1 < ctrl->sched.num_cc1 &&
+				curexp == (int)slc1->rootexp + expshft) {
+				/* searchorder effective when opensl valid */
+				static const int srcho[] = { 5, 2, 4, 1, 3, 0 };
+				int maxopensl = 0;
+				int maxi = 0;
+				if (slc1->state == SLIM_CH_PENDING_REMOVAL) {
+					coeff1++;
+					slc1 = ctrl->sched.chc1[coeff1];
+					continue;
+				}
+				opensl1valid = true;
+				for (i = 0; i < 6; i++) {
+					if (opensl1[srcho[i]] > maxopensl) {
+						maxopensl = opensl1[srcho[i]];
+						maxi = srcho[i];
+					}
+				}
+				opensl1[maxi] -= slc1->seglen;
+				slc1->newoff = (curmaxsl * maxi) +
+						opensl1[maxi];
+				if (opensl1[maxi] < 0) {
+					if (((maxi & 1) == 1) &&
+					(opensl1[maxi - 1] == curmaxsl)) {
+						opensl1[maxi - 1] +=
+							opensl1[maxi];
+						if (opensl3[0] >
+							opensl1[maxi - 1])
+							opensl3[0] =
+							opensl1[maxi - 1];
+						opensl3[1] = 0;
+						opensl1[maxi] = 0;
+						if (opensl1[maxi - 1] < 0) {
+							dev_dbg(&ctrl->dev,
+							"reconfig failed:%d\n",
+							__LINE__);
+							return -EXFULL;
+						}
+					} else {
+						dev_dbg(&ctrl->dev,
+						"reconfig failed:%d\n",
+						__LINE__);
+						return -EXFULL;
+					}
+				} else {
+					if (opensl3[maxi & 1] > opensl1[maxi])
+						opensl3[maxi & 1] =
+							opensl1[maxi];
+				}
+				slc1->newintr = curintr * 3;
+				coeff1++;
+				slc1 = ctrl->sched.chc1[coeff1];
+			}
+		}
+		/* swap 1st and 2nd bucket if 2nd bucket has more open slots */
+		if (opensl3[1] > opensl3[0]) {
+			int temp = opensl3[0];
+			opensl3[0] = opensl3[1];
+			opensl3[1] = temp;
+			temp = opensl1[5];
+			opensl1[5] = opensl1[4];
+			opensl1[4] = opensl1[3];
+			opensl1[3] = opensl1[2];
+			opensl1[2] = opensl1[1];
+			opensl1[1] = opensl1[0];
+			opensl1[0] = temp;
+			shiftsegoffsets(ctrl, ctrl->sched.chc1,
+					ctrl->sched.num_cc1, curmaxsl);
+			shiftsegoffsets(ctrl, ctrl->sched.chc3,
+					ctrl->sched.num_cc3, curmaxsl);
+		}
+		/* subframe mode to maximize BW */
+		maxctrlw3 = opensl3[0];
+		maxctrlw1 = opensl1[0];
+		if (opensl3[0] == curmaxsl)
+			maxctrlw3 += opensl3[1];
+		for (i = 0; i < 5 && opensl1[i] == curmaxsl; i++)
+			maxctrlw1 += opensl1[i + 1];
+		if (curintr >= 32) {
+			*subfrml = 32;
+			*ctrlw = maxctrlw3;
+		} else if (curintr == 16) {
+			if (maxctrlw3 > (opensl3[1] * 4)) {
+				*subfrml = 32;
+				*ctrlw = maxctrlw3;
+			} else {
+				*subfrml = 8;
+				*ctrlw = opensl3[1];
+			}
+		} else {
+			if ((maxctrlw1 * 8) >= (maxctrlw3 * 24)) {
+				*subfrml = 24;
+				*ctrlw = maxctrlw1;
+			} else {
+				*subfrml = 8;
+				*ctrlw = maxctrlw3;
+			}
+		}
+	}
+	return 0;
+}
+
+#ifdef DEBUG
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	int sl, i;
+	int cc1 = 0;
+	int cc3 = 0;
+	struct slim_ich *slc = NULL;
+	if (!ctrl->sched.slots)
+		return 0;
+	memset(ctrl->sched.slots, 0, SLIM_SL_PER_SUPERFRAME);
+	dev_dbg(&ctrl->dev, "Clock gear is:%d\n", clkgear);
+	for (sl = 0; sl < SLIM_SL_PER_SUPERFRAME; sl += subfrml) {
+		for (i = 0; i < ctrlw; i++)
+			ctrl->sched.slots[sl + i] = 33;
+	}
+	while (cc1 < ctrl->sched.num_cc1) {
+		slc = ctrl->sched.chc1[cc1];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC1 null in verify: chan%d\n",
+				cc1);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc1 + 1;
+				}
+			}
+		}
+		cc1++;
+	}
+	while (cc3 < ctrl->sched.num_cc3) {
+		slc = ctrl->sched.chc3[cc3];
+		if (slc == NULL) {
+			dev_err(&ctrl->dev, "SLC3 null in verify: chan%d\n",
+				cc3);
+			return -EIO;
+		}
+		dev_dbg(&ctrl->dev, "chan:%d, offset:%d, intr:%d, seglen:%d\n",
+				(slc - ctrl->chans), slc->newoff,
+				slc->newintr, slc->seglen);
+		if (slc->state != SLIM_CH_PENDING_REMOVAL) {
+			for (sl = slc->newoff;
+				sl < SLIM_SL_PER_SUPERFRAME;
+				sl += slc->newintr) {
+				for (i = 0; i < slc->seglen; i++) {
+					if (ctrl->sched.slots[sl + i])
+						return -EXFULL;
+					ctrl->sched.slots[sl + i] = cc3 + 1;
+				}
+			}
+		}
+		cc3++;
+	}
+
+	return 0;
+}
+#else
+static int slim_verifychansched(struct slim_controller *ctrl, u32 ctrlw,
+				u32 subfrml, u32 clkgear)
+{
+	return 0;
+}
+#endif
+
+static void slim_sort_chan_grp(struct slim_controller *ctrl,
+				struct slim_ich *slc)
+{
+	u8  last = (u8)-1;
+	u8 second = 0;
+
+	for (; last > 0; last--) {
+		struct slim_ich *slc1 = slc;
+		struct slim_ich *slc2;
+		u8 next = (u8)(slc1->nextgrp & 0xFF);
+		slc2 = &ctrl->chans[next];
+		for (second = 1; second <= last && slc2 &&
+			(slc2->state == SLIM_CH_ACTIVE ||
+			 slc2->state == SLIM_CH_PENDING_ACTIVE); second++) {
+			if (slc1->newoff > slc2->newoff) {
+				u32 temp = slc2->newoff;
+				slc2->newoff = slc1->newoff;
+				slc1->newoff = temp;
+			}
+			if (slc2->nextgrp & SLIM_END_GRP) {
+				last = second;
+				break;
+			}
+			slc1 = slc2;
+			next = (u8)(slc1->nextgrp & 0xFF);
+			slc2 = &ctrl->chans[next];
+		}
+		if (slc2 == NULL)
+			last = second - 1;
+	}
+}
+
+
+static int slim_allocbw(struct slim_device *sb, int *subfrmc, int *clkgear)
+{
+	u32 msgsl = 0;
+	u32 ctrlw = 0;
+	u32 subfrml = 0;
+	int ret = -EIO;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 usedsl = ctrl->sched.usedslots + ctrl->sched.pending_msgsl;
+	u32 availsl = SLIM_SL_PER_SUPERFRAME - SLIM_FRM_SLOTS_PER_SUPERFRAME -
+			SLIM_GDE_SLOTS_PER_SUPERFRAME;
+	*clkgear = SLIM_MAX_CLK_GEAR;
+
+	dev_dbg(&ctrl->dev, "used sl:%u, availlable sl:%u\n", usedsl, availsl);
+	dev_dbg(&ctrl->dev, "pending:chan sl:%u, :msg sl:%u, clkgear:%u\n",
+				ctrl->sched.usedslots,
+				ctrl->sched.pending_msgsl, *clkgear);
+	while ((usedsl * 2 <= availsl) && (*clkgear > 1)) {
+		*clkgear -= 1;
+		usedsl *= 2;
+	}
+
+	/*
+	 * Try scheduling data channels at current clock gear, if all channels
+	 * can be scheduled, or reserved BW can't be satisfied, increase clock
+	 * gear and try again
+	 */
+	for (; *clkgear <= SLIM_MAX_CLK_GEAR; (*clkgear)++) {
+		ret = slim_sched_chans(sb, *clkgear, &ctrlw, &subfrml);
+
+		if (ret == 0) {
+			*subfrmc = getsubfrmcoding(&ctrlw, &subfrml, &msgsl);
+			if ((msgsl >> (SLIM_MAX_CLK_GEAR - *clkgear) <
+				ctrl->sched.pending_msgsl) &&
+				(*clkgear < SLIM_MAX_CLK_GEAR))
+				continue;
+			else
+				break;
+		}
+	}
+	if (ret == 0) {
+		int i;
+		/* Sort channel-groups */
+		for (i = 0; i < ctrl->sched.num_cc1; i++) {
+			struct slim_ich *slc = ctrl->sched.chc1[i];
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+		for (i = 0; i < ctrl->sched.num_cc3; i++) {
+			struct slim_ich *slc = ctrl->sched.chc3[i];
+			if (slc->state == SLIM_CH_PENDING_REMOVAL)
+				continue;
+			if ((slc->nextgrp & SLIM_START_GRP) &&
+				!(slc->nextgrp & SLIM_END_GRP)) {
+				slim_sort_chan_grp(ctrl, slc);
+			}
+		}
+
+		ret = slim_verifychansched(ctrl, ctrlw, subfrml, *clkgear);
+	}
+
+	return ret;
+}
+
+static void slim_chan_changes(struct slim_device *sb, bool revert)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	while (!list_empty(&sb->mark_define)) {
+		struct slim_ich *slc;
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_define.next,
+					struct slim_pending_ch, pending);
+		slc = &ctrl->chans[pch->chan];
+		if (revert) {
+			u32 sl = slc->seglen << slc->rootexp;
+			if (slc->coeff == SLIM_COEFF_3)
+				sl *= 3;
+			ctrl->sched.usedslots -= sl;
+			slim_remove_ch(ctrl, slc);
+			slc->state = SLIM_CH_DEFINED;
+		} else {
+			slc->offset = slc->newoff;
+			slc->interval = slc->newintr;
+			slc->state = SLIM_CH_ACTIVE;
+		}
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_removal)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_removal.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+		if (revert) {
+			if (slc->coeff == SLIM_COEFF_3)
+				sl *= 3;
+			ctrl->sched.usedslots += sl;
+			slc->state = SLIM_CH_ACTIVE;
+		} else
+			slim_remove_ch(ctrl, slc);
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+
+	while (!list_empty(&sb->mark_suspend)) {
+		struct slim_pending_ch *pch =
+				list_entry(sb->mark_suspend.next,
+					struct slim_pending_ch, pending);
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		if (revert)
+			slc->state = SLIM_CH_ACTIVE;
+		list_del_init(&pch->pending);
+		kfree(pch);
+	}
+}
+
+/*
+ * slim_reconfigure_now: Request reconfiguration now.
+ * @sb: client handle
+ * This API does what commit flag in other scheduling APIs do.
+ * -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration request is already in
+ * progress.
+ */
+int slim_reconfigure_now(struct slim_device *sb)
+{
+	u8 i;
+	u8 wbuf[4];
+	u32 clkgear, subframe;
+	u32 curexp;
+	int ret;
+	struct slim_controller *ctrl = sb->ctrl;
+	u32 expshft;
+	u32 segdist;
+	struct slim_pending_ch *pch;
+
+	mutex_lock(&ctrl->sched.m_reconf);
+	mutex_lock(&ctrl->m_ctrl);
+	ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		slim_add_ch(ctrl, slc);
+		slc->state = SLIM_CH_PENDING_ACTIVE;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		u32 sl = slc->seglen << slc->rootexp;
+		if (slc->coeff == SLIM_COEFF_3)
+			sl *= 3;
+		ctrl->sched.usedslots -= sl;
+		slc->state = SLIM_CH_PENDING_REMOVAL;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		slc->state = SLIM_CH_SUSPENDED;
+	}
+	mutex_unlock(&ctrl->m_ctrl);
+
+	ret = slim_allocbw(sb, &subframe, &clkgear);
+
+	if (!ret) {
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+			SLIM_MSG_MC_BEGIN_RECONFIGURATION, 0, SLIM_MSG_MT_CORE,
+			NULL, NULL, 0, 3, NULL, 0, NULL);
+		dev_dbg(&ctrl->dev, "sending begin_reconfig:ret:%d\n", ret);
+	}
+
+	if (!ret && subframe != ctrl->sched.subfrmcode) {
+		wbuf[0] = (u8)(subframe & 0xFF);
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+			SLIM_MSG_MC_NEXT_SUBFRAME_MODE, 0, SLIM_MSG_MT_CORE,
+			NULL, (u8 *)&subframe, 1, 4, NULL, 0, NULL);
+		dev_dbg(&ctrl->dev, "sending subframe:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (!ret && clkgear != ctrl->clkgear) {
+		wbuf[0] = (u8)(clkgear & 0xFF);
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+			SLIM_MSG_MC_NEXT_CLOCK_GEAR, 0, SLIM_MSG_MT_CORE,
+			NULL, wbuf, 1, 4, NULL, 0, NULL);
+		dev_dbg(&ctrl->dev, "sending clkgear:%d,ret:%d\n",
+				(int)wbuf[0], ret);
+	}
+	if (ret)
+		goto revert_reconfig;
+
+	expshft = SLIM_MAX_CLK_GEAR - clkgear;
+	/* activate/remove channel */
+	list_for_each_entry(pch, &sb->mark_define, pending) {
+		struct slim_ich *slc = &ctrl->chans[pch->chan];
+		/* Define content */
+		wbuf[0] = pch->chan;
+		wbuf[1] = slc->prrate;
+		wbuf[2] = slc->prop.dataf | (slc->prop.auxf << 4);
+		wbuf[3] = slc->prop.sampleszbits / SLIM_CL_PER_SL;
+		dev_dbg(&ctrl->dev, "define content, activate:%x, %x, %x, %x\n",
+				wbuf[0], wbuf[1], wbuf[2], wbuf[3]);
+		/* Right now, channel link bit is not supported */
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+				SLIM_MSG_MC_NEXT_DEFINE_CONTENT, 0,
+				SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 4, 7,
+				NULL, 0, NULL);
+		if (ret)
+			goto revert_reconfig;
+
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+				SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL, 0,
+				SLIM_MSG_MT_CORE, NULL, (u8 *)&wbuf, 1, 4,
+				NULL, 0, NULL);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	list_for_each_entry(pch, &sb->mark_removal, pending) {
+		dev_dbg(&ctrl->dev, "remove chan:%x\n", pch->chan);
+		wbuf[0] = pch->chan;
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+				SLIM_MSG_MC_NEXT_REMOVE_CHANNEL, 0,
+				SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
+				NULL, 0, NULL);
+		if (ret)
+			goto revert_reconfig;
+	}
+	list_for_each_entry(pch, &sb->mark_suspend, pending) {
+		dev_dbg(&ctrl->dev, "suspend chan:%x\n", pch->chan);
+		wbuf[0] = pch->chan;
+		ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+				SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL, 0,
+				SLIM_MSG_MT_CORE, NULL, wbuf, 1, 4,
+				NULL, 0, NULL);
+		if (ret)
+			goto revert_reconfig;
+	}
+
+	/* Define CC1 channel */
+	for (i = 0; i < ctrl->sched.num_cc1; i++) {
+		struct slim_ich *slc = ctrl->sched.chc1[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+
+		if (slc->state < SLIM_CH_ACTIVE ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= (slc->newoff << (curexp + 1)) & 0xC00;
+			wbuf[0] = (u8)(slc - ctrl->chans);
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = slc->seglen;
+			ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+					SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
+					SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
+					7, NULL, 0, NULL);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+
+	/* Define CC3 channels */
+	for (i = 0; i < ctrl->sched.num_cc3; i++) {
+		struct slim_ich *slc = ctrl->sched.chc3[i];
+		if (slc->state == SLIM_CH_PENDING_REMOVAL)
+			continue;
+		curexp = slc->rootexp + expshft;
+		segdist = (slc->newoff << curexp) & 0x1FF;
+		expshft = SLIM_MAX_CLK_GEAR - clkgear;
+
+		if (slc->state < SLIM_CH_ACTIVE ||
+			slc->newintr != slc->interval ||
+			slc->newoff != slc->offset) {
+			segdist |= 0x200;
+			segdist >>= curexp;
+			segdist |= 0xC00;
+			wbuf[0] = (u8)(slc - ctrl->chans);
+			wbuf[1] = (u8)(segdist & 0xFF);
+			wbuf[2] = (u8)((segdist & 0xF00) >> 8) |
+					(slc->prop.prot << 4);
+			wbuf[3] = (u8)(slc->seglen);
+			ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+					SLIM_MSG_MC_NEXT_DEFINE_CHANNEL, 0,
+					SLIM_MSG_MT_CORE, NULL, (u8 *)wbuf, 4,
+					7, NULL, 0, NULL);
+			if (ret)
+				goto revert_reconfig;
+		}
+	}
+	ret = slim_processtxn(ctrl, SLIM_MSG_DEST_BROADCAST,
+			SLIM_MSG_MC_RECONFIGURE_NOW, 0, SLIM_MSG_MT_CORE, NULL,
+			NULL, 0, 3, NULL, 0, NULL);
+	dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
+	if (!ret) {
+		mutex_lock(&ctrl->m_ctrl);
+		ctrl->sched.subfrmcode = subframe;
+		ctrl->clkgear = clkgear;
+		ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
+		sb->cur_msgsl = sb->pending_msgsl;
+		slim_chan_changes(sb, false);
+		mutex_unlock(&ctrl->m_ctrl);
+		mutex_unlock(&ctrl->sched.m_reconf);
+		return 0;
+	}
+
+revert_reconfig:
+	mutex_lock(&ctrl->m_ctrl);
+	/* Revert channel changes */
+	slim_chan_changes(sb, true);
+	mutex_unlock(&ctrl->m_ctrl);
+	mutex_unlock(&ctrl->sched.m_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_reconfigure_now);
+
+static int add_pending_ch(struct list_head *listh, u8 chan)
+{
+	struct slim_pending_ch *pch;
+	pch = kmalloc(sizeof(struct slim_pending_ch), GFP_KERNEL);
+	if (!pch)
+		return -ENOMEM;
+	pch->chan = chan;
+	list_add_tail(&pch->pending, listh);
+	return 0;
+}
+
+/*
+ * slim_control_ch: Channel control API.
+ * @sb: client handle
+ * @chanh: group or channel handle to be controlled
+ * @chctrl: Control command (activate/suspend/remove)
+ * @commit: flag to indicate whether the control should take effect right-away.
+ * This API activates, removes or suspends a channel (or group of channels)
+ * chanh indicates the channel or group handle (returned by the define_ch API).
+ * Reconfiguration may be time-consuming since it can change all other active
+ * channel allocations on the bus, change in clock gear used by the slimbus,
+ * and change in the control space width used for messaging.
+ * commit makes sure that multiple channels can be activated/deactivated before
+ * reconfiguration is started.
+ * -EXFULL is returned if there is no space in TDM to reserve the bandwidth.
+ * -EISCONN/-ENOTCONN is returned if the channel is already connected or not
+ * yet defined.
+ */
+int slim_control_ch(struct slim_device *sb, u16 chanh,
+			enum slim_ch_control chctrl, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	struct slim_ich *slc;
+	int ret = 0;
+	/* Get rid of the group flag in MSB if any */
+	u8 chan = (u8)(chanh & 0xFF);
+	mutex_lock(&sb->sldev_reconf);
+	mutex_lock(&ctrl->m_ctrl);
+	do {
+		slc = &ctrl->chans[chan];
+		if (slc->state < SLIM_CH_DEFINED) {
+			ret = -ENOTCONN;
+			break;
+		}
+		if (chctrl == SLIM_CH_SUSPEND) {
+			ret = add_pending_ch(&sb->mark_suspend, chan);
+			if (ret)
+				break;
+		} else if (chctrl == SLIM_CH_ACTIVATE) {
+			if (slc->state >= SLIM_CH_ACTIVE) {
+				ret = -EISCONN;
+				break;
+			}
+			ret = add_pending_ch(&sb->mark_define, chan);
+			if (ret)
+				break;
+		} else {
+			if (slc->state < SLIM_CH_ACTIVE) {
+				ret = -ENOTCONN;
+				break;
+			}
+			ret = add_pending_ch(&sb->mark_removal, chan);
+			if (ret)
+				break;
+		}
+
+		if (!(slc->nextgrp & SLIM_END_GRP))
+			chan = (u8)(slc->nextgrp & 0xFF);
+	} while (!(slc->nextgrp & SLIM_END_GRP));
+	mutex_unlock(&ctrl->m_ctrl);
+	if (!ret && commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_control_ch);
+
+/*
+ * slim_reservemsg_bw: Request to reserve bandwidth for messages.
+ * @sb: client handle
+ * @bw_bps: message bandwidth in bits per second to be requested
+ * @commit: indicates whether the reconfiguration needs to be acted upon.
+ * This API call can be grouped with slim_control_ch API call with only one of
+ * the APIs specifying the commit flag to avoid reconfiguration being called too
+ * frequently. -EXFULL is returned if there is no space in TDM to reserve the
+ * bandwidth. -EBUSY is returned if reconfiguration is requested, but a request
+ * is already in progress.
+ */
+int slim_reservemsg_bw(struct slim_device *sb, u32 bw_bps, bool commit)
+{
+	struct slim_controller *ctrl = sb->ctrl;
+	int ret = 0;
+	int sl;
+	mutex_lock(&sb->sldev_reconf);
+	if ((bw_bps >> 3) >= ctrl->a_framer->rootfreq)
+		sl = SLIM_SL_PER_SUPERFRAME;
+	else {
+		sl = (bw_bps * (SLIM_CL_PER_SUPERFRAME_DIV8/SLIM_CL_PER_SL/2) +
+			(ctrl->a_framer->rootfreq/2 - 1)) /
+			(ctrl->a_framer->rootfreq/2);
+	}
+	dev_dbg(&ctrl->dev, "request:bw:%d, slots:%d, current:%d\n", bw_bps, sl,
+						sb->cur_msgsl);
+	sb->pending_msgsl = sl;
+	if (commit == true)
+		ret = slim_reconfigure_now(sb);
+	mutex_unlock(&sb->sldev_reconf);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_reservemsg_bw);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Slimbus module");
+MODULE_ALIAS("platform:slimbus");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index de35c3a..7ad57d6 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -310,6 +310,11 @@
 config SPI_PXA2XX_PCI
 	def_bool SPI_PXA2XX && X86_32 && PCI
 
+config SPI_QSD
+	tristate "Qualcomm MSM SPI support"
+	default n
+	depends on ARCH_MSM_SCORPION && !MSM_SMP
+
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
 	depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -420,6 +425,26 @@
 	help
 	  SPI driver for Nuvoton NUC900 series ARM SoCs
 
+config SPI_QSD
+	tristate "Qualcomm MSM SPI support"
+	default n
+	depends on ARCH_MSM_SCORPION && !MSM_SMP
+	help
+	  Support for Serial Peripheral Interface for Qualcomm MSM
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called spi_qsd.
+
+config SPI_QUP
+	tristate "Qualcomm MSM SPI QUPe Support"
+	depends on ARCH_MSM && !SPI_QSD
+	default n
+	help
+	  Support for Serial Peripheral Interface for Qualcomm Universal
+          Peripheral.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called spi_qsd.
 #
 # Add new SPI master controllers in alphabetical order above this line
 #
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f8c69b..560ef93 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -55,6 +55,8 @@
 obj-$(CONFIG_SPI_SH_MSIOF)		+= spi_sh_msiof.o
 obj-$(CONFIG_SPI_STMP3XXX)		+= spi_stmp.o
 obj-$(CONFIG_SPI_NUC900)		+= spi_nuc900.o
+obj-$(CONFIG_SPI_QSD)			+= spi_qsd.o
+obj-$(CONFIG_SPI_QUP)			+= spi_qsd.o
 
 # special build for s3c24xx spi driver with fiq support
 spi_s3c24xx_hw-y			:= spi_s3c24xx.o
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
new file mode 100644
index 0000000..28d3955
--- /dev/null
+++ b/drivers/spi/spi_qsd.c
@@ -0,0 +1,2539 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm MSM platforms
+ *
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <mach/msm_spi.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <mach/dma.h>
+#include <asm/atomic.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/remote_spinlock.h>
+#include <linux/pm_qos_params.h>
+
+#define SPI_DRV_NAME                  "spi_qsd"
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+
+#define QSD_REG(x) (x)
+#define QUP_REG(x)
+
+#define SPI_FIFO_WORD_CNT             0x0048
+
+#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
+
+#define QSD_REG(x)
+#define QUP_REG(x) (x)
+
+#define QUP_CONFIG                    0x0000 /* N & NO_INPUT/NO_OUPUT bits */
+#define QUP_ERROR_FLAGS               0x0308
+#define QUP_ERROR_FLAGS_EN            0x030C
+#define QUP_ERR_MASK                  0x3
+#define SPI_OUTPUT_FIFO_WORD_CNT      0x010C
+#define SPI_INPUT_FIFO_WORD_CNT       0x0214
+#define QUP_MX_WRITE_COUNT            0x0150
+#define QUP_MX_WRITE_CNT_CURRENT      0x0154
+
+#define QUP_CONFIG_SPI_MODE           0x0100
+
+#define GSBI_CTRL_REG                 0x0
+#define GSBI_SPI_CONFIG               0x30
+#endif
+
+#define SPI_CONFIG                    QSD_REG(0x0000) QUP_REG(0x0300)
+#define SPI_IO_CONTROL                QSD_REG(0x0004) QUP_REG(0x0304)
+#define SPI_IO_MODES                  QSD_REG(0x0008) QUP_REG(0x0008)
+#define SPI_SW_RESET                  QSD_REG(0x000C) QUP_REG(0x000C)
+#define SPI_TIME_OUT                  QSD_REG(0x0010) QUP_REG(0x0010)
+#define SPI_TIME_OUT_CURRENT          QSD_REG(0x0014) QUP_REG(0x0014)
+#define SPI_MX_OUTPUT_COUNT           QSD_REG(0x0018) QUP_REG(0x0100)
+#define SPI_MX_OUTPUT_CNT_CURRENT     QSD_REG(0x001C) QUP_REG(0x0104)
+#define SPI_MX_INPUT_COUNT            QSD_REG(0x0020) QUP_REG(0x0200)
+#define SPI_MX_INPUT_CNT_CURRENT      QSD_REG(0x0024) QUP_REG(0x0204)
+#define SPI_MX_READ_COUNT             QSD_REG(0x0028) QUP_REG(0x0208)
+#define SPI_MX_READ_CNT_CURRENT       QSD_REG(0x002C) QUP_REG(0x020C)
+#define SPI_OPERATIONAL               QSD_REG(0x0030) QUP_REG(0x0018)
+#define SPI_ERROR_FLAGS               QSD_REG(0x0034) QUP_REG(0x001C)
+#define SPI_ERROR_FLAGS_EN            QSD_REG(0x0038) QUP_REG(0x0020)
+#define SPI_DEASSERT_WAIT             QSD_REG(0x003C) QUP_REG(0x0310)
+#define SPI_OUTPUT_DEBUG              QSD_REG(0x0040) QUP_REG(0x0108)
+#define SPI_INPUT_DEBUG               QSD_REG(0x0044) QUP_REG(0x0210)
+#define SPI_TEST_CTRL                 QSD_REG(0x004C) QUP_REG(0x0024)
+#define SPI_OUTPUT_FIFO               QSD_REG(0x0100) QUP_REG(0x0110)
+#define SPI_INPUT_FIFO                QSD_REG(0x0200) QUP_REG(0x0218)
+#define SPI_STATE                     QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004)
+
+/* SPI_CONFIG fields */
+#define SPI_CFG_INPUT_FIRST           0x00000200
+#define SPI_NO_INPUT                  0x00000080
+#define SPI_NO_OUTPUT                 0x00000040
+#define SPI_CFG_LOOPBACK              0x00000100
+#define SPI_CFG_N                     0x0000001F
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_CLK_IDLE_HIGH        0x00000400
+#define SPI_IO_C_MX_CS_MODE           0x00000100
+#define SPI_IO_C_CS_N_POLARITY        0x000000F0
+#define SPI_IO_C_CS_N_POLARITY_0      0x00000010
+#define SPI_IO_C_CS_SELECT            0x0000000C
+#define SPI_IO_C_TRISTATE_CS          0x00000002
+#define SPI_IO_C_NO_TRI_STATE         0x00000001
+
+/* SPI_IO_MODES fields */
+#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN  QSD_REG(0x00004000) QUP_REG(0x00010000)
+#define SPI_IO_M_PACK_EN              QSD_REG(0x00002000) QUP_REG(0x00008000)
+#define SPI_IO_M_UNPACK_EN            QSD_REG(0x00001000) QUP_REG(0x00004000)
+#define SPI_IO_M_INPUT_MODE           QSD_REG(0x00000C00) QUP_REG(0x00003000)
+#define SPI_IO_M_OUTPUT_MODE          QSD_REG(0x00000300) QUP_REG(0x00000C00)
+#define SPI_IO_M_INPUT_FIFO_SIZE      QSD_REG(0x000000C0) QUP_REG(0x00000380)
+#define SPI_IO_M_INPUT_BLOCK_SIZE     QSD_REG(0x00000030) QUP_REG(0x00000060)
+#define SPI_IO_M_OUTPUT_FIFO_SIZE     QSD_REG(0x0000000C) QUP_REG(0x0000001C)
+#define SPI_IO_M_OUTPUT_BLOCK_SIZE    QSD_REG(0x00000003) QUP_REG(0x00000003)
+
+#define INPUT_BLOCK_SZ_SHIFT          QSD_REG(4)          QUP_REG(5)
+#define INPUT_FIFO_SZ_SHIFT           QSD_REG(6)          QUP_REG(7)
+#define OUTPUT_BLOCK_SZ_SHIFT         QSD_REG(0)          QUP_REG(0)
+#define OUTPUT_FIFO_SZ_SHIFT          QSD_REG(2)          QUP_REG(2)
+#define OUTPUT_MODE_SHIFT             QSD_REG(8)          QUP_REG(10)
+#define INPUT_MODE_SHIFT              QSD_REG(10)         QUP_REG(12)
+
+/* SPI_OPERATIONAL fields */
+#define SPI_OP_MAX_INPUT_DONE_FLAG    0x00000800
+#define SPI_OP_MAX_OUTPUT_DONE_FLAG   0x00000400
+#define SPI_OP_INPUT_SERVICE_FLAG     0x00000200
+#define SPI_OP_OUTPUT_SERVICE_FLAG    0x00000100
+#define SPI_OP_INPUT_FIFO_FULL        0x00000080
+#define SPI_OP_OUTPUT_FIFO_FULL       0x00000040
+#define SPI_OP_IP_FIFO_NOT_EMPTY      0x00000020
+#define SPI_OP_OP_FIFO_NOT_EMPTY      0x00000010
+#define SPI_OP_STATE_VALID            0x00000004
+#define SPI_OP_STATE                  0x00000003
+
+#define SPI_OP_STATE_CLEAR_BITS       0x2
+enum msm_spi_state {
+	SPI_OP_STATE_RESET = 0x00000000,
+	SPI_OP_STATE_RUN   = 0x00000001,
+	SPI_OP_STATE_PAUSE = 0x00000003,
+};
+
+/* SPI_ERROR_FLAGS fields */
+#define SPI_ERR_OUTPUT_OVER_RUN_ERR   0x00000020
+#define SPI_ERR_INPUT_UNDER_RUN_ERR   0x00000010
+#define SPI_ERR_OUTPUT_UNDER_RUN_ERR  0x00000008
+#define SPI_ERR_INPUT_OVER_RUN_ERR    0x00000004
+#define SPI_ERR_CLK_OVER_RUN_ERR      0x00000002
+#define SPI_ERR_CLK_UNDER_RUN_ERR     0x00000001
+
+/* We don't allow transactions larger than 4K-64 or 64K-64 due to
+   mx_input/output_cnt register size */
+#define SPI_MAX_TRANSFERS             QSD_REG(0xFC0) QUP_REG(0xFC0)
+#define SPI_MAX_LEN                   (SPI_MAX_TRANSFERS * dd->bytes_per_word)
+
+#define SPI_NUM_CHIPSELECTS           4
+#define SPI_SUPPORTED_MODES  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
+
+#define SPI_DELAY_THRESHOLD           1
+/* Default timeout is 10 milliseconds */
+#define SPI_DEFAULT_TIMEOUT           10
+/* 250 microseconds */
+#define SPI_TRYLOCK_DELAY             250
+
+/* Data Mover burst size */
+#define DM_BURST_SIZE                 16
+/* Data Mover commands should be aligned to 64 bit(8 bytes) */
+#define DM_BYTE_ALIGN                 8
+
+static char const * const spi_rsrcs[] = {
+	"spi_clk",
+	"spi_cs",
+	"spi_miso",
+	"spi_mosi"
+};
+
+enum msm_spi_mode {
+	SPI_FIFO_MODE  = 0x0,  /* 00 */
+	SPI_BLOCK_MODE = 0x1,  /* 01 */
+	SPI_DMOV_MODE  = 0x2,  /* 10 */
+	SPI_MODE_NONE  = 0xFF, /* invalid value */
+};
+
+/* Structures for Data Mover */
+struct spi_dmov_cmd {
+	dmov_box box;      /* data aligned to max(dm_burst_size, block_size)
+							   (<= fifo_size) */
+	dmov_s single_pad; /* data unaligned to max(dm_burst_size, block_size)
+			      padded to fit */
+	dma_addr_t cmd_ptr;
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.3");
+MODULE_ALIAS("platform:"SPI_DRV_NAME);
+
+static struct pm_qos_request_list qos_req_list;
+
+#ifdef CONFIG_DEBUG_FS
+/* Used to create debugfs entries */
+static const struct {
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_spi_regs[] = {
+	{"config",                S_IRUGO | S_IWUSR, SPI_CONFIG},
+	{"io_control",            S_IRUGO | S_IWUSR, SPI_IO_CONTROL},
+	{"io_modes",              S_IRUGO | S_IWUSR, SPI_IO_MODES},
+	{"sw_reset",                        S_IWUSR, SPI_SW_RESET},
+	{"time_out",              S_IRUGO | S_IWUSR, SPI_TIME_OUT},
+	{"time_out_current",      S_IRUGO,           SPI_TIME_OUT_CURRENT},
+	{"mx_output_count",       S_IRUGO | S_IWUSR, SPI_MX_OUTPUT_COUNT},
+	{"mx_output_cnt_current", S_IRUGO,           SPI_MX_OUTPUT_CNT_CURRENT},
+	{"mx_input_count",        S_IRUGO | S_IWUSR, SPI_MX_INPUT_COUNT},
+	{"mx_input_cnt_current",  S_IRUGO,           SPI_MX_INPUT_CNT_CURRENT},
+	{"mx_read_count",         S_IRUGO | S_IWUSR, SPI_MX_READ_COUNT},
+	{"mx_read_cnt_current",   S_IRUGO,           SPI_MX_READ_CNT_CURRENT},
+	{"operational",           S_IRUGO | S_IWUSR, SPI_OPERATIONAL},
+	{"error_flags",           S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS},
+	{"error_flags_en",        S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS_EN},
+	{"deassert_wait",         S_IRUGO | S_IWUSR, SPI_DEASSERT_WAIT},
+	{"output_debug",          S_IRUGO,           SPI_OUTPUT_DEBUG},
+	{"input_debug",           S_IRUGO,           SPI_INPUT_DEBUG},
+	{"test_ctrl",             S_IRUGO | S_IWUSR, SPI_TEST_CTRL},
+	{"output_fifo",                     S_IWUSR, SPI_OUTPUT_FIFO},
+	{"input_fifo" ,           S_IRUSR,           SPI_INPUT_FIFO},
+	{"spi_state",             S_IRUGO | S_IWUSR, SPI_STATE},
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+	{"fifo_word_cnt",         S_IRUGO,           SPI_FIFO_WORD_CNT},
+#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
+	{"qup_config",            S_IRUGO | S_IWUSR, QUP_CONFIG},
+	{"qup_error_flags",       S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS},
+	{"qup_error_flags_en",    S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS_EN},
+	{"mx_write_cnt",          S_IRUGO | S_IWUSR, QUP_MX_WRITE_COUNT},
+	{"mx_write_cnt_current",  S_IRUGO,           QUP_MX_WRITE_CNT_CURRENT},
+	{"output_fifo_word_cnt",  S_IRUGO,           SPI_OUTPUT_FIFO_WORD_CNT},
+	{"input_fifo_word_cnt",   S_IRUGO,           SPI_INPUT_FIFO_WORD_CNT},
+#endif
+};
+#endif
+
+struct msm_spi {
+	u8                      *read_buf;
+	const u8                *write_buf;
+	void __iomem            *base;
+	void __iomem            *gsbi_base;
+	struct device           *dev;
+	spinlock_t               queue_lock;
+	struct mutex             core_lock;
+	struct list_head         queue;
+	struct workqueue_struct *workqueue;
+	struct work_struct       work_data;
+	struct spi_message      *cur_msg;
+	struct spi_transfer     *cur_transfer;
+	struct completion        transfer_complete;
+	struct clk              *clk;
+	struct clk              *pclk;
+	unsigned long            mem_phys_addr;
+	size_t                   mem_size;
+	unsigned long            gsbi_mem_phys_addr;
+	size_t                   gsbi_mem_size;
+	int                      input_fifo_size;
+	int                      output_fifo_size;
+	u32                      rx_bytes_remaining;
+	u32                      tx_bytes_remaining;
+	u32                      clock_speed;
+	u32                      irq_in;
+	int                      read_xfr_cnt;
+	int                      write_xfr_cnt;
+	int                      write_len;
+	int                      read_len;
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+	u32                      irq_out;
+	u32                      irq_err;
+#endif
+	int                      bytes_per_word;
+	bool                     suspended;
+	bool                     transfer_pending;
+	wait_queue_head_t        continue_suspend;
+	/* DMA data */
+	enum msm_spi_mode        mode;
+	bool                     use_dma;
+	int                      tx_dma_chan;
+	int                      tx_dma_crci;
+	int                      rx_dma_chan;
+	int                      rx_dma_crci;
+	/* Data Mover Commands */
+	struct spi_dmov_cmd      *tx_dmov_cmd;
+	struct spi_dmov_cmd      *rx_dmov_cmd;
+	/* Physical address of the tx dmov box command */
+	dma_addr_t               tx_dmov_cmd_dma;
+	dma_addr_t               rx_dmov_cmd_dma;
+	struct msm_dmov_cmd      tx_hdr;
+	struct msm_dmov_cmd      rx_hdr;
+	int                      input_block_size;
+	int                      output_block_size;
+	int                      burst_size;
+	atomic_t                 rx_irq_called;
+	/* Used to pad messages unaligned to block size */
+	u8                       *tx_padding;
+	dma_addr_t               tx_padding_dma;
+	u8                       *rx_padding;
+	dma_addr_t               rx_padding_dma;
+	u32                      unaligned_len;
+	/* DMA statistics */
+	int                      stat_dmov_tx_err;
+	int                      stat_dmov_rx_err;
+	int                      stat_rx;
+	int                      stat_dmov_rx;
+	int                      stat_tx;
+	int                      stat_dmov_tx;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dent_spi;
+	struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
+#endif
+	struct msm_spi_platform_data *pdata; /* Platform data */
+	/* Remote Spinlock Data */
+	bool                     use_rlock;
+	remote_mutex_t           r_lock;
+	uint32_t                 pm_lat;
+	/* When set indicates multiple transfers in a single message */
+	bool                     multi_xfr;
+	bool                     done;
+	u32                      cur_msg_len;
+	/* Used in FIFO mode to keep track of the transfer being processed */
+	struct spi_transfer     *cur_tx_transfer;
+	struct spi_transfer     *cur_rx_transfer;
+	/* Temporary buffer used for WR-WR or WR-RD transfers */
+	u8                      *temp_buf;
+	/* GPIO pin numbers for SPI clk, cs, miso and mosi */
+	int                      spi_gpios[ARRAY_SIZE(spi_rsrcs)];
+};
+
+/* Forward declaration */
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state);
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
+
+#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
+/* Interrupt Handling */
+static inline int msm_spi_get_irq_data(struct msm_spi *dd,
+				       struct platform_device *pdev)
+{
+	dd->irq_in  = platform_get_irq_byname(pdev, "spi_irq_in");
+	dd->irq_out = platform_get_irq_byname(pdev, "spi_irq_out");
+	dd->irq_err = platform_get_irq_byname(pdev, "spi_irq_err");
+	if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
+		return -1;
+	return 0;
+}
+
+static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
+					    struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline int msm_spi_request_gsbi(struct msm_spi *dd) { return 0; }
+static inline void msm_spi_release_gsbi(struct msm_spi *dd) {}
+static inline void msm_spi_init_gsbi(struct msm_spi *dd) {}
+
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+	disable_irq(dd->irq_out);
+	disable_irq(dd->irq_err);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+	enable_irq(dd->irq_out);
+	enable_irq(dd->irq_err);
+}
+
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				      const char *name,
+				      struct spi_master *master)
+{
+	int rc;
+	rc = request_irq(dd->irq_in, msm_spi_input_irq, IRQF_TRIGGER_RISING,
+			   name, dd);
+	if (rc)
+		goto error_irq1;
+	rc = request_irq(dd->irq_out, msm_spi_output_irq, IRQF_TRIGGER_RISING,
+			   name, dd);
+	if (rc)
+		goto error_irq2;
+	rc = request_irq(dd->irq_err, msm_spi_error_irq, IRQF_TRIGGER_RISING,
+			   name, master);
+	if (rc)
+		goto error_irq3;
+	return 0;
+
+error_irq3:
+	free_irq(dd->irq_out, dd);
+error_irq2:
+	free_irq(dd->irq_in, dd);
+error_irq1:
+	return rc;
+}
+
+static inline void msm_spi_free_irq(struct msm_spi *dd,
+				    struct spi_master *master)
+{
+	free_irq(dd->irq_err, master);
+	free_irq(dd->irq_out, dd);
+	free_irq(dd->irq_in, dd);
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
+
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_word_to_fifo(dd);
+}
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	complete(&dd->transfer_complete);
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
+}
+
+#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
+
+/* Interrupt Handling */
+/* In QUP the same interrupt line is used for intput, output and error*/
+static inline int msm_spi_get_irq_data(struct msm_spi *dd,
+				       struct platform_device *pdev)
+{
+	dd->irq_in  = platform_get_irq_byname(pdev, "spi_irq_in");
+	if (dd->irq_in < 0)
+		return -1;
+	return 0;
+}
+
+static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
+				       struct platform_device *pdev)
+{
+	struct resource *resource;
+
+	resource  = platform_get_resource_byname(pdev,
+						 IORESOURCE_MEM, "gsbi_base");
+	if (!resource)
+		return -ENXIO;
+	dd->gsbi_mem_phys_addr = resource->start;
+	dd->gsbi_mem_size = resource_size(resource);
+
+	return 0;
+}
+
+static inline void msm_spi_release_gsbi(struct msm_spi *dd)
+{
+	iounmap(dd->gsbi_base);
+	release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
+}
+
+static inline int msm_spi_request_gsbi(struct msm_spi *dd)
+{
+	if (!request_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size,
+				SPI_DRV_NAME)) {
+		return -ENXIO;
+	}
+	dd->gsbi_base = ioremap(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
+	if (!dd->gsbi_base) {
+		release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
+		return -ENXIO;
+	}
+	return 0;
+}
+
+static inline void msm_spi_init_gsbi(struct msm_spi *dd)
+{
+	/* Set GSBI to SPI mode, and CRCI_MUX_CTRL to SPI CRCI ports */
+	writel_relaxed(GSBI_SPI_CONFIG, dd->gsbi_base + GSBI_CTRL_REG);
+}
+
+/* Figure which irq occured and call the relevant functions */
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
+{
+	u32 op, ret = IRQ_NONE;
+	struct msm_spi *dd = dev_id;
+
+	if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
+	    readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
+		struct spi_master *master = dev_get_drvdata(dd->dev);
+		ret |= msm_spi_error_irq(irq, master);
+	}
+
+	op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+	if (op & SPI_OP_INPUT_SERVICE_FLAG) {
+		writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
+			       dd->base + SPI_OPERATIONAL);
+		/*
+		 * Ensure service flag was cleared before further
+		 * processing of interrupt.
+		 */
+		mb();
+		ret |= msm_spi_input_irq(irq, dev_id);
+	}
+
+	if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
+		writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
+			       dd->base + SPI_OPERATIONAL);
+		/*
+		 * Ensure service flag was cleared before further
+		 * processing of interrupt.
+		 */
+		mb();
+		ret |= msm_spi_output_irq(irq, dev_id);
+	}
+
+	if (dd->done) {
+		complete(&dd->transfer_complete);
+		dd->done = 0;
+	}
+	return ret;
+}
+
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				      const char *name,
+				      struct spi_master *master)
+{
+	return request_irq(dd->irq_in, msm_spi_qup_irq, IRQF_TRIGGER_HIGH,
+			   name, dd);
+}
+
+static inline void msm_spi_free_irq(struct msm_spi *dd,
+				    struct spi_master *master)
+{
+	free_irq(dd->irq_in, dd);
+}
+
+static inline void msm_spi_free_output_irq(struct msm_spi *dd) { }
+static inline void msm_spi_free_error_irq(struct msm_spi *dd,
+					  struct spi_master *master) { }
+
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
+{
+	*spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
+{
+	writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
+
+/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
+{
+	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+
+	msm_spi_add_configs(dd, &qup_config, bpw-1);
+	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
+		       dd->base + QUP_CONFIG);
+}
+
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
+{
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+		return -1;
+	if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
+		return -1;
+	return 0;
+}
+
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	if (read_count <= dd->input_fifo_size)
+		msm_spi_write_rmn_to_fifo(dd);
+	else
+		msm_spi_write_word_to_fifo(dd);
+}
+
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
+{
+	writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
+}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	dd->done = 1;
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+}
+
+#endif
+
+static inline int msm_spi_request_gpios(struct msm_spi *dd)
+{
+	int i;
+	int result = 0;
+
+	for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+		if (dd->spi_gpios[i] >= 0) {
+			result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
+			if (result) {
+				pr_err("%s: gpio_request for pin %d failed\
+					with error%d\n", __func__,
+					dd->spi_gpios[i], result);
+				goto error;
+			}
+		}
+	}
+	return 0;
+
+error:
+	for (; --i >= 0;) {
+		if (dd->spi_gpios[i] >= 0)
+			gpio_free(dd->spi_gpios[i]);
+	}
+	return result;
+}
+
+static inline void msm_spi_free_gpios(struct msm_spi *dd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+		if (dd->spi_gpios[i] >= 0)
+			gpio_free(dd->spi_gpios[i]);
+	}
+}
+
+static void msm_spi_clock_set(struct msm_spi *dd, int speed)
+{
+	int rc;
+
+	rc = clk_set_rate(dd->clk, speed);
+	if (!rc)
+		dd->clock_speed = speed;
+}
+
+static int msm_spi_calculate_size(int *fifo_size,
+				  int *block_size,
+				  int block,
+				  int mult)
+{
+	int words;
+
+	switch (block) {
+	case 0:
+		words = 1; /* 4 bytes */
+		break;
+	case 1:
+		words = 4; /* 16 bytes */
+		break;
+	case 2:
+		words = 8; /* 32 bytes */
+		break;
+	default:
+		return -1;
+	}
+	switch (mult) {
+	case 0:
+		*fifo_size = words * 2;
+		break;
+	case 1:
+		*fifo_size = words * 4;
+		break;
+	case 2:
+		*fifo_size = words * 8;
+		break;
+	case 3:
+		*fifo_size = words * 16;
+		break;
+	default:
+		return -1;
+	}
+	*block_size = words * sizeof(u32); /* in bytes */
+	return 0;
+}
+
+static void get_next_transfer(struct msm_spi *dd)
+{
+	struct spi_transfer *t = dd->cur_transfer;
+
+	if (t->transfer_list.next != &dd->cur_msg->transfers) {
+		dd->cur_transfer = list_entry(t->transfer_list.next,
+					      struct spi_transfer,
+					      transfer_list);
+		dd->write_buf          = dd->cur_transfer->tx_buf;
+		dd->read_buf           = dd->cur_transfer->rx_buf;
+	}
+}
+
+static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	int block;
+	int mult;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+
+	block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
+				   block, mult)) {
+		goto fifo_size_err;
+	}
+
+	block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->output_fifo_size,
+				   &dd->output_block_size, block, mult)) {
+		goto fifo_size_err;
+	}
+	/* DM mode is not available for this block size */
+	if (dd->input_block_size == 4 || dd->output_block_size == 4)
+		dd->use_dma = 0;
+
+	/* DM mode is currently unsupported for different block sizes */
+	if (dd->input_block_size != dd->output_block_size)
+		dd->use_dma = 0;
+
+	if (dd->use_dma)
+		dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
+
+	return;
+
+fifo_size_err:
+	dd->use_dma = 0;
+	printk(KERN_WARNING "%s: invalid FIFO size, SPI_IO_MODES=0x%x\n",
+	       __func__, spi_iom);
+	return;
+}
+
+static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
+{
+	u32   data_in;
+	int   i;
+	int   shift;
+
+	data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
+	if (dd->read_buf) {
+		for (i = 0; (i < dd->bytes_per_word) &&
+			     dd->rx_bytes_remaining; i++) {
+			/* The data format depends on bytes_per_word:
+			   4 bytes: 0x12345678
+			   3 bytes: 0x00123456
+			   2 bytes: 0x00001234
+			   1 byte : 0x00000012
+			*/
+			shift = 8 * (dd->bytes_per_word - i - 1);
+			*dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
+			dd->rx_bytes_remaining--;
+		}
+	} else {
+		if (dd->rx_bytes_remaining >= dd->bytes_per_word)
+			dd->rx_bytes_remaining -= dd->bytes_per_word;
+		else
+			dd->rx_bytes_remaining = 0;
+	}
+	dd->read_xfr_cnt++;
+	if (dd->multi_xfr) {
+		if (!dd->rx_bytes_remaining)
+			dd->read_xfr_cnt = 0;
+		else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
+						dd->read_len) {
+			struct spi_transfer *t = dd->cur_rx_transfer;
+			if (t->transfer_list.next != &dd->cur_msg->transfers) {
+				t = list_entry(t->transfer_list.next,
+						struct spi_transfer,
+						transfer_list);
+				dd->read_buf = t->rx_buf;
+				dd->read_len = t->len;
+				dd->read_xfr_cnt = 0;
+				dd->cur_rx_transfer = t;
+			}
+		}
+	}
+}
+
+static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
+{
+	u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
+
+	return spi_op & SPI_OP_STATE_VALID;
+}
+
+static inline int msm_spi_wait_valid(struct msm_spi *dd)
+{
+	unsigned long delay = 0;
+	unsigned long timeout = 0;
+
+	if (dd->clock_speed == 0)
+		return -EINVAL;
+	/*
+	 * Based on the SPI clock speed, sufficient time
+	 * should be given for the SPI state transition
+	 * to occur
+	 */
+	delay = (10 * USEC_PER_SEC) / dd->clock_speed;
+	/*
+	 * For small delay values, the default timeout would
+	 * be one jiffy
+	 */
+	if (delay < SPI_DELAY_THRESHOLD)
+		delay = SPI_DELAY_THRESHOLD;
+	timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT);
+	while (!msm_spi_is_valid_state(dd)) {
+		if (time_after(jiffies, timeout)) {
+			if (dd->cur_msg)
+				dd->cur_msg->status = -EIO;
+			dev_err(dd->dev, "%s: SPI operational state not valid"
+				"\n", __func__);
+			return -1;
+		}
+		/*
+		 * For smaller values of delay, context switch time
+		 * would negate the usage of usleep
+		 */
+		if (delay > 20)
+			usleep(delay);
+		else if (delay)
+			udelay(delay);
+	}
+	return 0;
+}
+
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state)
+{
+	enum msm_spi_state cur_state;
+	if (msm_spi_wait_valid(dd))
+		return -1;
+	cur_state = readl_relaxed(dd->base + SPI_STATE);
+	/* Per spec:
+	   For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
+	if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
+			(state == SPI_OP_STATE_RESET)) {
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+	} else {
+		writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
+		       dd->base + SPI_STATE);
+	}
+	if (msm_spi_wait_valid(dd))
+		return -1;
+
+	return 0;
+}
+
+static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
+{
+	*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
+
+	if (n != (*config & SPI_CFG_N))
+		*config = (*config & ~SPI_CFG_N) | n;
+
+	if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
+		if (dd->read_buf == NULL)
+			*config |= SPI_NO_INPUT;
+		if (dd->write_buf == NULL)
+			*config |= SPI_NO_OUTPUT;
+	}
+}
+
+static void msm_spi_set_config(struct msm_spi *dd, int bpw)
+{
+	u32 spi_config;
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+
+	if (dd->cur_msg->spi->mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+	if (dd->cur_msg->spi->mode & SPI_LOOP)
+		spi_config |= SPI_CFG_LOOPBACK;
+	else
+		spi_config &= ~SPI_CFG_LOOPBACK;
+	msm_spi_add_configs(dd, &spi_config, bpw-1);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+	msm_spi_set_qup_config(dd, bpw);
+}
+
+static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
+{
+	dmov_box *box;
+	int bytes_to_send, num_rows, bytes_sent;
+	u32 num_transfers;
+
+	atomic_set(&dd->rx_irq_called, 0);
+	if (dd->write_len && !dd->read_len) {
+		/* WR-WR transfer */
+		bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
+		dd->write_buf = dd->temp_buf;
+	} else {
+		bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
+		/* For WR-RD transfer, bytes_sent can be negative */
+		if (bytes_sent < 0)
+			bytes_sent = 0;
+	}
+
+	/* We'll send in chunks of SPI_MAX_LEN if larger */
+	bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
+			  SPI_MAX_LEN : dd->tx_bytes_remaining;
+	num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
+	dd->unaligned_len = bytes_to_send % dd->burst_size;
+	num_rows = bytes_to_send / dd->burst_size;
+
+	dd->mode = SPI_DMOV_MODE;
+
+	if (num_rows) {
+		/* src in 16 MSB, dst in 16 LSB */
+		box = &dd->tx_dmov_cmd->box;
+		box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
+		box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
+		box->num_rows = (num_rows << 16) | num_rows;
+		box->row_offset = (dd->burst_size << 16) | 0;
+
+		box = &dd->rx_dmov_cmd->box;
+		box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
+		box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
+		box->num_rows = (num_rows << 16) | num_rows;
+		box->row_offset = (0 << 16) | dd->burst_size;
+
+		dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
+				   DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
+				   offsetof(struct spi_dmov_cmd, box));
+		dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
+				   DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
+				   offsetof(struct spi_dmov_cmd, box));
+	} else {
+		dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
+				   DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
+				   offsetof(struct spi_dmov_cmd, single_pad));
+		dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
+				   DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
+				   offsetof(struct spi_dmov_cmd, single_pad));
+	}
+
+	if (!dd->unaligned_len) {
+		dd->tx_dmov_cmd->box.cmd |= CMD_LC;
+		dd->rx_dmov_cmd->box.cmd |= CMD_LC;
+	} else {
+		dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
+		dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
+		u32 offset = dd->cur_transfer->len - dd->unaligned_len;
+
+		if ((dd->multi_xfr) && (dd->read_len <= 0))
+			offset = dd->cur_msg_len - dd->unaligned_len;
+
+		dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
+		dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
+
+		memset(dd->tx_padding, 0, dd->burst_size);
+		memset(dd->rx_padding, 0, dd->burst_size);
+		if (dd->write_buf)
+			memcpy(dd->tx_padding, dd->write_buf + offset,
+			       dd->unaligned_len);
+
+		tx_cmd->src = dd->tx_padding_dma;
+		rx_cmd->dst = dd->rx_padding_dma;
+		tx_cmd->len = rx_cmd->len = dd->burst_size;
+	}
+	/* This also takes care of the padding dummy buf
+	   Since this is set to the correct length, the
+	   dummy bytes won't be actually sent */
+	if (dd->multi_xfr) {
+		u32 write_transfers = 0;
+		u32 read_transfers = 0;
+
+		if (dd->write_len > 0) {
+			write_transfers = DIV_ROUND_UP(dd->write_len,
+						       dd->bytes_per_word);
+			writel_relaxed(write_transfers,
+				       dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+		if (dd->read_len > 0) {
+			/*
+			 *  The read following a write transfer must take
+			 *  into account, that the bytes pertaining to
+			 *  the write transfer needs to be discarded,
+			 *  before the actual read begins.
+			 */
+			read_transfers = DIV_ROUND_UP(dd->read_len +
+						      dd->write_len,
+						      dd->bytes_per_word);
+			writel_relaxed(read_transfers,
+				       dd->base + SPI_MX_INPUT_COUNT);
+		}
+	} else {
+		if (dd->write_buf)
+			writel_relaxed(num_transfers,
+				       dd->base + SPI_MX_OUTPUT_COUNT);
+		if (dd->read_buf)
+			writel_relaxed(num_transfers,
+				       dd->base + SPI_MX_INPUT_COUNT);
+	}
+}
+
+static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
+{
+	dma_coherent_pre_ops();
+	if (dd->write_buf)
+		msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
+	if (dd->read_buf)
+		msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
+}
+
+/* SPI core can send maximum of 4K transfers, because there is HW problem
+   with infinite mode.
+   Therefore, we are sending several chunks of 3K or less (depending on how
+   much is left).
+   Upon completion we send the next chunk, or complete the transfer if
+   everything is finished.
+*/
+static int msm_spi_dm_send_next(struct msm_spi *dd)
+{
+	/* By now we should have sent all the bytes in FIFO mode,
+	 * However to make things right, we'll check anyway.
+	 */
+	if (dd->mode != SPI_DMOV_MODE)
+		return 0;
+
+	/* We need to send more chunks, if we sent max last time */
+	if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
+		dd->tx_bytes_remaining -= SPI_MAX_LEN;
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		dd->read_len = dd->write_len = 0;
+		msm_spi_setup_dm_transfer(dd);
+		msm_spi_enqueue_dm_commands(dd);
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+			return 0;
+		return 1;
+	} else if (dd->read_len && dd->write_len) {
+		dd->tx_bytes_remaining -= dd->cur_transfer->len;
+		if (list_is_last(&dd->cur_transfer->transfer_list,
+					    &dd->cur_msg->transfers))
+			return 0;
+		get_next_transfer(dd);
+		if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
+			return 0;
+		dd->tx_bytes_remaining = dd->read_len + dd->write_len;
+		dd->read_buf = dd->temp_buf;
+		dd->read_len = dd->write_len = -1;
+		msm_spi_setup_dm_transfer(dd);
+		msm_spi_enqueue_dm_commands(dd);
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+			return 0;
+		return 1;
+	}
+	return 0;
+}
+
+static inline void msm_spi_ack_transfer(struct msm_spi *dd)
+{
+	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
+		       SPI_OP_MAX_OUTPUT_DONE_FLAG,
+		       dd->base + SPI_OPERATIONAL);
+	/* Ensure done flag was cleared before proceeding further */
+	mb();
+}
+
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_rx++;
+
+	if (dd->mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	if (dd->mode == SPI_DMOV_MODE) {
+		u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+		if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
+		    (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
+			msm_spi_ack_transfer(dd);
+			if (dd->unaligned_len == 0) {
+				if (atomic_inc_return(&dd->rx_irq_called) == 1)
+					return IRQ_HANDLED;
+			}
+			msm_spi_complete(dd);
+			return IRQ_HANDLED;
+		}
+		return IRQ_NONE;
+	}
+
+	if (dd->mode == SPI_FIFO_MODE) {
+		while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
+			SPI_OP_IP_FIFO_NOT_EMPTY) &&
+			(dd->rx_bytes_remaining > 0)) {
+			msm_spi_read_word_from_fifo(dd);
+		}
+		if (dd->rx_bytes_remaining == 0)
+			msm_spi_complete(dd);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
+{
+	u32    word;
+	u8     byte;
+	int    i;
+
+	word = 0;
+	if (dd->write_buf) {
+		for (i = 0; (i < dd->bytes_per_word) &&
+			     dd->tx_bytes_remaining; i++) {
+			dd->tx_bytes_remaining--;
+			byte = *dd->write_buf++;
+			word |= (byte << (BITS_PER_BYTE * (3 - i)));
+		}
+	} else
+		if (dd->tx_bytes_remaining > dd->bytes_per_word)
+			dd->tx_bytes_remaining -= dd->bytes_per_word;
+		else
+			dd->tx_bytes_remaining = 0;
+	dd->write_xfr_cnt++;
+	if (dd->multi_xfr) {
+		if (!dd->tx_bytes_remaining)
+			dd->write_xfr_cnt = 0;
+		else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
+						dd->write_len) {
+			struct spi_transfer *t = dd->cur_tx_transfer;
+			if (t->transfer_list.next != &dd->cur_msg->transfers) {
+				t = list_entry(t->transfer_list.next,
+						struct spi_transfer,
+						transfer_list);
+				dd->write_buf = t->tx_buf;
+				dd->write_len = t->len;
+				dd->write_xfr_cnt = 0;
+				dd->cur_tx_transfer = t;
+			}
+		}
+	}
+	writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
+}
+
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
+{
+	int count = 0;
+
+	while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
+	       !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
+		SPI_OP_OUTPUT_FIFO_FULL)) {
+		msm_spi_write_word_to_fifo(dd);
+		count++;
+	}
+}
+
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_tx++;
+
+	if (dd->mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	if (dd->mode == SPI_DMOV_MODE) {
+		/* TX_ONLY transaction is handled here
+		   This is the only place we send complete at tx and not rx */
+		if (dd->read_buf == NULL &&
+		    readl_relaxed(dd->base + SPI_OPERATIONAL) &
+		    SPI_OP_MAX_OUTPUT_DONE_FLAG) {
+			msm_spi_ack_transfer(dd);
+			msm_spi_complete(dd);
+			return IRQ_HANDLED;
+		}
+		return IRQ_NONE;
+	}
+
+	/* Output FIFO is empty. Transmit any outstanding write data. */
+	if (dd->mode == SPI_FIFO_MODE)
+		msm_spi_write_rmn_to_fifo(dd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
+{
+	struct spi_master	*master = dev_id;
+	struct msm_spi          *dd = spi_master_get_devdata(master);
+	u32                      spi_err;
+
+	spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
+	if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output overrun error\n");
+	if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI input underrun error\n");
+	if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output underrun error\n");
+	msm_spi_get_clk_err(dd, &spi_err);
+	if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock overrun error\n");
+	if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock underrun error\n");
+	msm_spi_clear_error_flags(dd);
+	msm_spi_ack_clk_err(dd);
+	/* Ensure clearing of QUP_ERROR_FLAGS was completed */
+	mb();
+	return IRQ_HANDLED;
+}
+
+static int msm_spi_map_dma_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+	struct spi_transfer *first_xfr;
+	struct spi_transfer *nxt_xfr;
+	void *tx_buf, *rx_buf;
+	unsigned tx_len, rx_len;
+	int ret = -EINVAL;
+
+	dev = &dd->cur_msg->spi->dev;
+	first_xfr = dd->cur_transfer;
+	tx_buf = (void *)first_xfr->tx_buf;
+	rx_buf = first_xfr->rx_buf;
+	tx_len = rx_len = first_xfr->len;
+
+	/*
+	 * For WR-WR and WR-RD transfers, we allocate our own temporary
+	 * buffer and copy the data to/from the client buffers.
+	 */
+	if (dd->multi_xfr) {
+		dd->temp_buf = kzalloc(dd->cur_msg_len,
+				       GFP_KERNEL | __GFP_DMA);
+		if (!dd->temp_buf)
+			return -ENOMEM;
+		nxt_xfr = list_entry(first_xfr->transfer_list.next,
+				     struct spi_transfer, transfer_list);
+
+		if (dd->write_len && !dd->read_len) {
+			if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
+				goto error;
+
+			memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
+			memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
+			       nxt_xfr->len);
+			tx_buf = dd->temp_buf;
+			tx_len = dd->cur_msg_len;
+		} else {
+			if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
+				goto error;
+
+			rx_buf = dd->temp_buf;
+			rx_len = dd->cur_msg_len;
+		}
+	}
+	if (tx_buf != NULL) {
+		first_xfr->tx_dma = dma_map_single(dev, tx_buf,
+						   tx_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
+			dev_err(dev, "dma %cX %d bytes error\n",
+				'T', tx_len);
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+	if (rx_buf != NULL) {
+		dma_addr_t dma_handle;
+		dma_handle = dma_map_single(dev, rx_buf,
+					    rx_len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(NULL, dma_handle)) {
+			dev_err(dev, "dma %cX %d bytes error\n",
+				'R', rx_len);
+			if (tx_buf != NULL)
+				dma_unmap_single(NULL, first_xfr->tx_dma,
+						 tx_len, DMA_TO_DEVICE);
+			ret = -ENOMEM;
+			goto error;
+		}
+		if (dd->multi_xfr)
+			nxt_xfr->rx_dma = dma_handle;
+		else
+			first_xfr->rx_dma = dma_handle;
+	}
+	return 0;
+
+error:
+	kfree(dd->temp_buf);
+	dd->temp_buf = NULL;
+	return ret;
+}
+
+static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+	u32 offset;
+
+	dev = &dd->cur_msg->spi->dev;
+	if (dd->cur_msg->is_dma_mapped)
+		goto unmap_end;
+
+	if (dd->multi_xfr) {
+		if (dd->write_len && !dd->read_len) {
+			dma_unmap_single(dev,
+					 dd->cur_transfer->tx_dma,
+					 dd->cur_msg_len,
+					 DMA_TO_DEVICE);
+		} else {
+			struct spi_transfer *prev_xfr;
+			prev_xfr = list_entry(
+				   dd->cur_transfer->transfer_list.prev,
+				   struct spi_transfer,
+				   transfer_list);
+			if (dd->cur_transfer->rx_buf) {
+				dma_unmap_single(dev,
+						 dd->cur_transfer->rx_dma,
+						 dd->cur_msg_len,
+						 DMA_FROM_DEVICE);
+			}
+			if (prev_xfr->tx_buf) {
+				dma_unmap_single(dev,
+						 prev_xfr->tx_dma,
+						 prev_xfr->len,
+						 DMA_TO_DEVICE);
+			}
+			if (dd->unaligned_len && dd->read_buf) {
+				offset = dd->cur_msg_len - dd->unaligned_len;
+				dma_coherent_post_ops();
+				memcpy(dd->read_buf + offset, dd->rx_padding,
+				       dd->unaligned_len);
+				memcpy(dd->cur_transfer->rx_buf,
+				       dd->read_buf + prev_xfr->len,
+				       dd->cur_transfer->len);
+			}
+		}
+		kfree(dd->temp_buf);
+		dd->temp_buf = NULL;
+		return;
+	} else {
+		if (dd->cur_transfer->rx_buf)
+			dma_unmap_single(dev, dd->cur_transfer->rx_dma,
+					 dd->cur_transfer->len,
+					 DMA_FROM_DEVICE);
+		if (dd->cur_transfer->tx_buf)
+			dma_unmap_single(dev, dd->cur_transfer->tx_dma,
+					 dd->cur_transfer->len,
+					 DMA_TO_DEVICE);
+	}
+
+unmap_end:
+	/* If we padded the transfer, we copy it from the padding buf */
+	if (dd->unaligned_len && dd->read_buf) {
+		offset = dd->cur_transfer->len - dd->unaligned_len;
+		dma_coherent_post_ops();
+		memcpy(dd->read_buf + offset, dd->rx_padding,
+		       dd->unaligned_len);
+	}
+}
+
+/**
+ * msm_use_dm - decides whether to use data mover for this
+ * 		transfer
+ * @dd:       device
+ * @tr:       transfer
+ *
+ * Start using DM if:
+ * 1. Transfer is longer than 3*block size.
+ * 2. Buffers should be aligned to cache line.
+ * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
+  */
+static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
+			     u8 bpw)
+{
+	u32 cache_line = dma_get_cache_alignment();
+
+	if (!dd->use_dma)
+		return 0;
+
+	if (dd->cur_msg_len < 3*dd->input_block_size)
+		return 0;
+
+	if (dd->multi_xfr && !dd->read_len && !dd->write_len)
+		return 0;
+
+	if (tr->tx_buf) {
+		if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+			return 0;
+	}
+	if (tr->rx_buf) {
+		if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+			return 0;
+	}
+
+	if (tr->cs_change &&
+	   ((bpw != 8) || (bpw != 16) || (bpw != 32)))
+		return 0;
+	return 1;
+}
+
+static void msm_spi_process_transfer(struct msm_spi *dd)
+{
+	u8  bpw;
+	u32 spi_ioc;
+	u32 spi_iom;
+	u32 spi_ioc_orig;
+	u32 max_speed;
+	u32 chip_select;
+	u32 read_count;
+	u32 timeout;
+	u32 int_loopback = 0;
+
+	dd->tx_bytes_remaining = dd->cur_msg_len;
+	dd->rx_bytes_remaining = dd->cur_msg_len;
+	dd->read_buf           = dd->cur_transfer->rx_buf;
+	dd->write_buf          = dd->cur_transfer->tx_buf;
+	init_completion(&dd->transfer_complete);
+	if (dd->cur_transfer->bits_per_word)
+		bpw = dd->cur_transfer->bits_per_word;
+	else
+		if (dd->cur_msg->spi->bits_per_word)
+			bpw = dd->cur_msg->spi->bits_per_word;
+		else
+			bpw = 8;
+	dd->bytes_per_word = (bpw + 7) / 8;
+
+	if (dd->cur_transfer->speed_hz)
+		max_speed = dd->cur_transfer->speed_hz;
+	else
+		max_speed = dd->cur_msg->spi->max_speed_hz;
+	if (!dd->clock_speed || max_speed != dd->clock_speed)
+		msm_spi_clock_set(dd, max_speed);
+
+	read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
+	if (dd->cur_msg->spi->mode & SPI_LOOP)
+		int_loopback = 1;
+	if (int_loopback && dd->multi_xfr &&
+			(read_count > dd->input_fifo_size)) {
+		if (dd->read_len && dd->write_len)
+			printk(KERN_WARNING
+			"%s:Internal Loopback does not support > fifo size\
+			for write-then-read transactions\n",
+			__func__);
+		else if (dd->write_len && !dd->read_len)
+			printk(KERN_WARNING
+			"%s:Internal Loopback does not support > fifo size\
+			for write-then-write transactions\n",
+			__func__);
+		return;
+	}
+	if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
+		dd->mode = SPI_FIFO_MODE;
+		if (dd->multi_xfr) {
+			dd->read_len = dd->cur_transfer->len;
+			dd->write_len = dd->cur_transfer->len;
+		}
+		/* read_count cannot exceed fifo_size, and only one READ COUNT
+		   interrupt is generated per transaction, so for transactions
+		   larger than fifo size READ COUNT must be disabled.
+		   For those transactions we usually move to Data Mover mode.
+		*/
+		if (read_count <= dd->input_fifo_size) {
+			writel_relaxed(read_count,
+				       dd->base + SPI_MX_READ_COUNT);
+			msm_spi_set_write_count(dd, read_count);
+		} else {
+			writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+			msm_spi_set_write_count(dd, 0);
+		}
+	} else {
+		dd->mode = SPI_DMOV_MODE;
+		if (dd->write_len && dd->read_len) {
+			dd->tx_bytes_remaining = dd->write_len;
+			dd->rx_bytes_remaining = dd->read_len;
+		}
+	}
+
+	/* Write mode - fifo or data mover*/
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+	spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
+	spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
+	/* Turn on packing for data mover */
+	if (dd->mode == SPI_DMOV_MODE)
+		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+	else
+		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+
+	msm_spi_set_config(dd, bpw);
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	if (dd->cur_msg->spi->mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	chip_select = dd->cur_msg->spi->chip_select << 2;
+	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+	if (!dd->cur_transfer->cs_change)
+		spi_ioc |= SPI_IO_C_MX_CS_MODE;
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	if (dd->mode == SPI_DMOV_MODE) {
+		msm_spi_setup_dm_transfer(dd);
+		msm_spi_enqueue_dm_commands(dd);
+	}
+	/* The output fifo interrupt handler will handle all writes after
+	   the first. Restricting this to one write avoids contention
+	   issues and race conditions between this thread and the int handler
+	*/
+	else if (dd->mode == SPI_FIFO_MODE) {
+		if (msm_spi_prepare_for_write(dd))
+			goto transfer_end;
+		msm_spi_start_write(dd, read_count);
+	}
+
+	/* Only enter the RUN state after the first word is written into
+	   the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+	   might fire before the first word is written resulting in a
+	   possible race condition.
+	 */
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+		goto transfer_end;
+
+	timeout = 100 * msecs_to_jiffies(
+	      DIV_ROUND_UP(dd->cur_msg_len * 8,
+		 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
+	/* Assume success, this might change later upon transaction result */
+	dd->cur_msg->status = 0;
+	do {
+		if (!wait_for_completion_timeout(&dd->transfer_complete,
+						 timeout)) {
+				dev_err(dd->dev, "%s: SPI transaction "
+						 "timeout\n", __func__);
+				dd->cur_msg->status = -EIO;
+				if (dd->mode == SPI_DMOV_MODE) {
+					msm_dmov_flush(dd->tx_dma_chan);
+					msm_dmov_flush(dd->rx_dma_chan);
+				}
+				break;
+		}
+	} while (msm_spi_dm_send_next(dd));
+
+transfer_end:
+	if (dd->mode == SPI_DMOV_MODE)
+		msm_spi_unmap_dma_buffers(dd);
+	dd->mode = SPI_MODE_NONE;
+
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
+		       dd->base + SPI_IO_CONTROL);
+}
+
+static void get_transfer_length(struct msm_spi *dd)
+{
+	struct spi_transfer *tr;
+	int num_xfrs = 0;
+	int readlen = 0;
+	int writelen = 0;
+
+	dd->cur_msg_len = 0;
+	dd->multi_xfr = 0;
+	dd->read_len = dd->write_len = 0;
+
+	list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
+		if (tr->tx_buf)
+			writelen += tr->len;
+		if (tr->rx_buf)
+			readlen += tr->len;
+		dd->cur_msg_len += tr->len;
+		num_xfrs++;
+	}
+
+	if (num_xfrs == 2) {
+		struct spi_transfer *first_xfr = dd->cur_transfer;
+
+		dd->multi_xfr = 1;
+		tr = list_entry(first_xfr->transfer_list.next,
+				struct spi_transfer,
+				transfer_list);
+		/*
+		 * We update dd->read_len and dd->write_len only
+		 * for WR-WR and WR-RD transfers.
+		 */
+		if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
+			if (((tr->tx_buf) && (!tr->rx_buf)) ||
+			    ((!tr->tx_buf) && (tr->rx_buf))) {
+				dd->read_len = readlen;
+				dd->write_len = writelen;
+			}
+		}
+	} else if (num_xfrs > 1)
+		dd->multi_xfr = 1;
+}
+
+static inline int combine_transfers(struct msm_spi *dd)
+{
+	struct spi_transfer *t = dd->cur_transfer;
+	struct spi_transfer *nxt;
+	int xfrs_grped = 1;
+
+	dd->cur_msg_len = dd->cur_transfer->len;
+	while (t->transfer_list.next != &dd->cur_msg->transfers) {
+		nxt = list_entry(t->transfer_list.next,
+				 struct spi_transfer,
+				 transfer_list);
+		if (t->cs_change != nxt->cs_change)
+			return xfrs_grped;
+		dd->cur_msg_len += nxt->len;
+		xfrs_grped++;
+		t = nxt;
+	}
+	return xfrs_grped;
+}
+
+static void msm_spi_process_message(struct msm_spi *dd)
+{
+	int xfrs_grped = 0;
+	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
+
+	dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
+					    struct spi_transfer,
+					    transfer_list);
+	get_transfer_length(dd);
+	if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
+		/* Handling of multi-transfers. FIFO mode is used by default */
+		list_for_each_entry(dd->cur_transfer,
+				    &dd->cur_msg->transfers,
+				    transfer_list) {
+			if (!dd->cur_transfer->len)
+				return;
+			if (xfrs_grped) {
+				xfrs_grped--;
+				continue;
+			} else {
+				dd->read_len = dd->write_len = 0;
+				xfrs_grped = combine_transfers(dd);
+			}
+			dd->cur_tx_transfer = dd->cur_transfer;
+			dd->cur_rx_transfer = dd->cur_transfer;
+			msm_spi_process_transfer(dd);
+			xfrs_grped--;
+		}
+	} else {
+		/* Handling of a single transfer or WR-WR or WR-RD transfers */
+		if ((!dd->cur_msg->is_dma_mapped) &&
+		    (msm_use_dm(dd, dd->cur_transfer,
+				dd->cur_transfer->bits_per_word))) {
+			/* Mapping of DMA buffers */
+			int ret = msm_spi_map_dma_buffers(dd);
+			if (ret < 0) {
+				dd->cur_msg->status = ret;
+				return;
+			}
+		}
+		dd->cur_tx_transfer = dd->cur_rx_transfer = dd->cur_transfer;
+		msm_spi_process_transfer(dd);
+	}
+}
+
+/* workqueue - pull messages from queue & process */
+static void msm_spi_workq(struct work_struct *work)
+{
+	struct msm_spi      *dd =
+		container_of(work, struct msm_spi, work_data);
+	unsigned long        flags;
+	u32                  status_error = 0;
+
+	mutex_lock(&dd->core_lock);
+
+	/* Don't allow power collapse until we release mutex */
+	if (pm_qos_request_active(&qos_req_list))
+		pm_qos_update_request(&qos_req_list,
+				  dd->pm_lat);
+	if (dd->use_rlock)
+		remote_mutex_lock(&dd->r_lock);
+
+	clk_enable(dd->clk);
+	clk_enable(dd->pclk);
+	msm_spi_enable_irqs(dd);
+
+	if (!msm_spi_is_valid_state(dd)) {
+		dev_err(dd->dev, "%s: SPI operational state not valid\n",
+			__func__);
+		status_error = 1;
+	}
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	while (!list_empty(&dd->queue)) {
+		dd->cur_msg = list_entry(dd->queue.next,
+					 struct spi_message, queue);
+		list_del_init(&dd->cur_msg->queue);
+		spin_unlock_irqrestore(&dd->queue_lock, flags);
+		if (status_error)
+			dd->cur_msg->status = -EIO;
+		else
+			msm_spi_process_message(dd);
+		if (dd->cur_msg->complete)
+			dd->cur_msg->complete(dd->cur_msg->context);
+		spin_lock_irqsave(&dd->queue_lock, flags);
+	}
+	dd->transfer_pending = 0;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	msm_spi_disable_irqs(dd);
+	clk_disable(dd->clk);
+	clk_disable(dd->pclk);
+
+	if (dd->use_rlock)
+		remote_mutex_unlock(&dd->r_lock);
+
+	if (pm_qos_request_active(&qos_req_list))
+		pm_qos_update_request(&qos_req_list,
+				  PM_QOS_DEFAULT_VALUE);
+
+	mutex_unlock(&dd->core_lock);
+	/* If needed, this can be done after the current message is complete,
+	   and work can be continued upon resume. No motivation for now. */
+	if (dd->suspended)
+		wake_up_interruptible(&dd->continue_suspend);
+}
+
+static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct msm_spi	*dd;
+	unsigned long    flags;
+	struct spi_transfer *tr;
+
+	dd = spi_master_get_devdata(spi->master);
+	if (dd->suspended)
+		return -EBUSY;
+
+	if (list_empty(&msg->transfers) || !msg->complete)
+		return -EINVAL;
+
+	list_for_each_entry(tr, &msg->transfers, transfer_list) {
+		/* Check message parameters */
+		if (tr->speed_hz > dd->pdata->max_clock_speed ||
+		    (tr->bits_per_word &&
+		     (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
+		    (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
+			dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
+					   "tx=%p, rx=%p\n",
+					    tr->speed_hz, tr->bits_per_word,
+					    tr->tx_buf, tr->rx_buf);
+			return -EINVAL;
+		}
+	}
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	if (dd->suspended) {
+		spin_unlock_irqrestore(&dd->queue_lock, flags);
+		return -EBUSY;
+	}
+	dd->transfer_pending = 1;
+	list_add_tail(&msg->queue, &dd->queue);
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+	queue_work(dd->workqueue, &dd->work_data);
+	return 0;
+}
+
+static int msm_spi_setup(struct spi_device *spi)
+{
+	struct msm_spi	*dd;
+	int              rc = 0;
+	u32              spi_ioc;
+	u32              spi_config;
+	u32              mask;
+
+	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+		dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
+			__func__, spi->bits_per_word);
+		rc = -EINVAL;
+	}
+	if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
+		dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
+			__func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
+		rc = -EINVAL;
+	}
+
+	if (rc)
+		goto err_setup_exit;
+
+	dd = spi_master_get_devdata(spi->master);
+
+	mutex_lock(&dd->core_lock);
+	if (dd->suspended) {
+		mutex_unlock(&dd->core_lock);
+		return -EBUSY;
+	}
+
+	if (dd->use_rlock)
+		remote_mutex_lock(&dd->r_lock);
+
+	clk_enable(dd->clk);
+	clk_enable(dd->pclk);
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+	if (spi->mode & SPI_CS_HIGH)
+		spi_ioc |= mask;
+	else
+		spi_ioc &= ~mask;
+	if (spi->mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	if (spi->mode & SPI_LOOP)
+		spi_config |= SPI_CFG_LOOPBACK;
+	else
+		spi_config &= ~SPI_CFG_LOOPBACK;
+	if (spi->mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+
+	/* Ensure previous write completed before disabling the clocks */
+	mb();
+	clk_disable(dd->clk);
+	clk_disable(dd->pclk);
+
+	if (dd->use_rlock)
+		remote_mutex_unlock(&dd->r_lock);
+	mutex_unlock(&dd->core_lock);
+
+err_setup_exit:
+	return rc;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	writel_relaxed(val, data);
+	/* Ensure the previous write completed. */
+	mb();
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	*val = readl_relaxed(data);
+	/* Ensure the previous read completed. */
+	mb();
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+static void spi_debugfs_init(struct msm_spi *dd)
+{
+	dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
+	if (dd->dent_spi) {
+		int i;
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
+			dd->debugfs_spi_regs[i] =
+			   debugfs_create_file(
+			       debugfs_spi_regs[i].name,
+			       debugfs_spi_regs[i].mode,
+			       dd->dent_spi,
+			       dd->base + debugfs_spi_regs[i].offset,
+			       &fops_iomem_x32);
+		}
+	}
+}
+
+static void spi_debugfs_exit(struct msm_spi *dd)
+{
+	if (dd->dent_spi) {
+		int i;
+		debugfs_remove_recursive(dd->dent_spi);
+		dd->dent_spi = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
+			dd->debugfs_spi_regs[i] = NULL;
+	}
+}
+#else
+static void spi_debugfs_init(struct msm_spi *dd) {}
+static void spi_debugfs_exit(struct msm_spi *dd) {}
+#endif
+
+/* ===Device attributes begin=== */
+static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct msm_spi *dd =  spi_master_get_devdata(master);
+
+	return snprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"rx fifo_size = %d spi words\n"
+			"tx fifo_size = %d spi words\n"
+			"use_dma ?    %s\n"
+			"rx block size = %d bytes\n"
+			"tx block size = %d bytes\n"
+			"burst size = %d bytes\n"
+			"DMA configuration:\n"
+			"tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
+			"--statistics--\n"
+			"Rx isrs  = %d\n"
+			"Tx isrs  = %d\n"
+			"DMA error  = %d\n"
+			"--debug--\n"
+			"NA yet\n",
+			dev_name(dev),
+			dd->input_fifo_size,
+			dd->output_fifo_size,
+			dd->use_dma ? "yes" : "no",
+			dd->input_block_size,
+			dd->output_block_size,
+			dd->burst_size,
+			dd->tx_dma_chan,
+			dd->rx_dma_chan,
+			dd->tx_dma_crci,
+			dd->rx_dma_crci,
+			dd->stat_rx + dd->stat_dmov_rx,
+			dd->stat_tx + dd->stat_dmov_tx,
+			dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
+			);
+}
+
+/* Reset statistics on write */
+static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_spi *dd = dev_get_drvdata(dev);
+	dd->stat_rx = 0;
+	dd->stat_tx = 0;
+	dd->stat_dmov_rx = 0;
+	dd->stat_dmov_tx = 0;
+	dd->stat_dmov_rx_err = 0;
+	dd->stat_dmov_tx_err = 0;
+	return count;
+}
+
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+/**
+ * spi_dmov_tx_complete_func - DataMover tx completion callback
+ *
+ * Executed in IRQ context (Data Mover's IRQ) DataMover's
+ * spinlock @msm_dmov_lock held.
+ */
+static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
+				      unsigned int result,
+				      struct msm_dmov_errdata *err)
+{
+	struct msm_spi *dd;
+
+	if (!(result & DMOV_RSLT_VALID)) {
+		pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
+		return;
+	}
+	/* restore original context */
+	dd = container_of(cmd, struct msm_spi, tx_hdr);
+	if (result & DMOV_RSLT_DONE)
+		dd->stat_dmov_tx++;
+	else {
+		/* Error or flush */
+		if (result & DMOV_RSLT_ERROR) {
+			dev_err(dd->dev, "DMA error (0x%08x)\n", result);
+			dd->stat_dmov_tx_err++;
+		}
+		if (result & DMOV_RSLT_FLUSH) {
+			/*
+			 * Flushing normally happens in process of
+			 * removing, when we are waiting for outstanding
+			 * DMA commands to be flushed.
+			 */
+			dev_info(dd->dev,
+				 "DMA channel flushed (0x%08x)\n", result);
+		}
+		if (err)
+			dev_err(dd->dev,
+				"Flush data(%08x %08x %08x %08x %08x %08x)\n",
+				err->flush[0], err->flush[1], err->flush[2],
+				err->flush[3], err->flush[4], err->flush[5]);
+		dd->cur_msg->status = -EIO;
+		complete(&dd->transfer_complete);
+	}
+}
+
+/**
+ * spi_dmov_rx_complete_func - DataMover rx completion callback
+ *
+ * Executed in IRQ context (Data Mover's IRQ)
+ * DataMover's spinlock @msm_dmov_lock held.
+ */
+static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
+				      unsigned int result,
+				      struct msm_dmov_errdata *err)
+{
+	struct msm_spi *dd;
+
+	if (!(result & DMOV_RSLT_VALID)) {
+		pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
+		       result, cmd);
+		return;
+	}
+	/* restore original context */
+	dd = container_of(cmd, struct msm_spi, rx_hdr);
+	if (result & DMOV_RSLT_DONE) {
+		dd->stat_dmov_rx++;
+		if (atomic_inc_return(&dd->rx_irq_called) == 1)
+			return;
+		complete(&dd->transfer_complete);
+	} else {
+		/** Error or flush  */
+		if (result & DMOV_RSLT_ERROR) {
+			dev_err(dd->dev, "DMA error(0x%08x)\n", result);
+			dd->stat_dmov_rx_err++;
+		}
+		if (result & DMOV_RSLT_FLUSH) {
+			dev_info(dd->dev,
+				"DMA channel flushed(0x%08x)\n", result);
+		}
+		if (err)
+			dev_err(dd->dev,
+				"Flush data(%08x %08x %08x %08x %08x %08x)\n",
+				err->flush[0], err->flush[1], err->flush[2],
+				err->flush[3], err->flush[4], err->flush[5]);
+		dd->cur_msg->status = -EIO;
+		complete(&dd->transfer_complete);
+	}
+}
+
+static inline u32 get_chunk_size(struct msm_spi *dd)
+{
+	u32 cache_line = dma_get_cache_alignment();
+
+	return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
+			  roundup(dd->burst_size, cache_line))*2;
+}
+
+static void msm_spi_teardown_dma(struct msm_spi *dd)
+{
+	int limit = 0;
+
+	if (!dd->use_dma)
+		return;
+
+	while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
+		msm_dmov_flush(dd->tx_dma_chan);
+		msm_dmov_flush(dd->rx_dma_chan);
+		msleep(10);
+	}
+
+	dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
+			  dd->tx_dmov_cmd_dma);
+	dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
+	dd->tx_padding = dd->rx_padding = NULL;
+}
+
+static __init int msm_spi_init_dma(struct msm_spi *dd)
+{
+	dmov_box *box;
+	u32 cache_line = dma_get_cache_alignment();
+
+	/* Allocate all as one chunk, since all is smaller than page size */
+
+	/* We send NULL device, since it requires coherent_dma_mask id
+	   device definition, we're okay with using system pool */
+	dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
+					     &dd->tx_dmov_cmd_dma, GFP_KERNEL);
+	if (dd->tx_dmov_cmd == NULL)
+		return -ENOMEM;
+
+	/* DMA addresses should be 64 bit aligned aligned */
+	dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
+			  ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
+	dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
+			      sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
+
+	/* Buffers should be aligned to cache line */
+	dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
+	dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
+			      sizeof(struct spi_dmov_cmd), cache_line);
+	dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
+				     cache_line);
+	dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
+				      cache_line);
+
+	/* Setup DM commands */
+	box = &(dd->rx_dmov_cmd->box);
+	box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
+	box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
+	dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
+				   DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
+				   offsetof(struct spi_dmov_cmd, cmd_ptr));
+	dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
+	dd->rx_hdr.crci_mask = msm_dmov_build_crci_mask(1, dd->rx_dma_crci);
+
+	box = &(dd->tx_dmov_cmd->box);
+	box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
+	box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
+	dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
+			    DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
+			    offsetof(struct spi_dmov_cmd, cmd_ptr));
+	dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
+	dd->tx_hdr.crci_mask = msm_dmov_build_crci_mask(1, dd->tx_dma_crci);
+
+	dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
+					  CMD_DST_CRCI(dd->tx_dma_crci);
+	dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
+					   SPI_OUTPUT_FIFO;
+	dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
+					  CMD_SRC_CRCI(dd->rx_dma_crci);
+	dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
+					  SPI_INPUT_FIFO;
+
+	/* Clear remaining activities on channel */
+	msm_dmov_flush(dd->tx_dma_chan);
+	msm_dmov_flush(dd->rx_dma_chan);
+
+	return 0;
+}
+
+static int __init msm_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master      *master;
+	struct msm_spi	       *dd;
+	struct resource	       *resource;
+	int                     rc = -ENXIO;
+	int                     locked = 0;
+	int                     i = 0;
+	int                     clk_enabled = 0;
+	int                     pclk_enabled = 0;
+	struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
+	if (!master) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev, "master allocation failed\n");
+		goto err_probe_exit;
+	}
+
+	master->bus_num        = pdev->id;
+	master->mode_bits      = SPI_SUPPORTED_MODES;
+	master->num_chipselect = SPI_NUM_CHIPSELECTS;
+	master->setup          = msm_spi_setup;
+	master->transfer       = msm_spi_transfer;
+	platform_set_drvdata(pdev, master);
+	dd = spi_master_get_devdata(master);
+
+	dd->pdata = pdata;
+	rc = msm_spi_get_irq_data(dd, pdev);
+	if (rc)
+		goto err_probe_res;
+	resource = platform_get_resource_byname(pdev,
+						 IORESOURCE_MEM, "spi_base");
+	if (!resource) {
+		rc = -ENXIO;
+		goto err_probe_res;
+	}
+	dd->mem_phys_addr = resource->start;
+	dd->mem_size = resource_size(resource);
+
+	rc = msm_spi_get_gsbi_resource(dd, pdev);
+	if (rc)
+		goto err_probe_res2;
+
+	if (pdata) {
+		if (pdata->dma_config) {
+			rc = pdata->dma_config();
+			if (rc) {
+				dev_warn(&pdev->dev,
+					"%s: DM mode not supported\n",
+					__func__);
+				dd->use_dma = 0;
+				goto skip_dma_resources;
+			}
+		}
+		resource = platform_get_resource_byname(pdev,
+							IORESOURCE_DMA,
+							"spidm_channels");
+		if (resource) {
+			dd->rx_dma_chan = resource->start;
+			dd->tx_dma_chan = resource->end;
+
+			resource = platform_get_resource_byname(pdev,
+							IORESOURCE_DMA,
+							"spidm_crci");
+			if (!resource) {
+				rc = -ENXIO;
+				goto err_probe_res;
+			}
+			dd->rx_dma_crci = resource->start;
+			dd->tx_dma_crci = resource->end;
+			dd->use_dma = 1;
+			master->dma_alignment =	dma_get_cache_alignment();
+		}
+
+skip_dma_resources:
+		if (pdata->gpio_config) {
+			rc = pdata->gpio_config();
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s: error configuring GPIOs\n",
+					__func__);
+				goto err_probe_gpio;
+			}
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
+							spi_rsrcs[i]);
+		dd->spi_gpios[i] = resource ? resource->start : -1;
+	}
+
+	rc = msm_spi_request_gpios(dd);
+	if (rc)
+		goto err_probe_gpio;
+	spin_lock_init(&dd->queue_lock);
+	mutex_init(&dd->core_lock);
+	INIT_LIST_HEAD(&dd->queue);
+	INIT_WORK(&dd->work_data, msm_spi_workq);
+	init_waitqueue_head(&dd->continue_suspend);
+	dd->workqueue = create_singlethread_workqueue(
+		dev_name(master->dev.parent));
+	if (!dd->workqueue)
+		goto err_probe_workq;
+
+	if (!request_mem_region(dd->mem_phys_addr, dd->mem_size,
+				SPI_DRV_NAME)) {
+		rc = -ENXIO;
+		goto err_probe_reqmem;
+	}
+
+	dd->base = ioremap(dd->mem_phys_addr, dd->mem_size);
+	if (!dd->base)
+		goto err_probe_ioremap;
+	rc = msm_spi_request_gsbi(dd);
+	if (rc)
+		goto err_probe_ioremap2;
+	if (pdata && pdata->rsl_id) {
+		struct remote_mutex_id rmid;
+		rmid.r_spinlock_id = pdata->rsl_id;
+		rmid.delay_us = SPI_TRYLOCK_DELAY;
+
+		rc = remote_mutex_init(&dd->r_lock, &rmid);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: unable to init remote_mutex "
+				"(%s), (rc=%d)\n", rmid.r_spinlock_id,
+				__func__, rc);
+			goto err_probe_rlock_init;
+		}
+		dd->use_rlock = 1;
+		dd->pm_lat = pdata->pm_lat;
+		pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY, 
+					    	 PM_QOS_DEFAULT_VALUE);
+	}
+	mutex_lock(&dd->core_lock);
+	if (dd->use_rlock)
+		remote_mutex_lock(&dd->r_lock);
+	locked = 1;
+
+	dd->dev = &pdev->dev;
+	dd->clk = clk_get(&pdev->dev, "spi_clk");
+	if (IS_ERR(dd->clk)) {
+		dev_err(&pdev->dev, "%s: unable to get spi_clk\n", __func__);
+		rc = PTR_ERR(dd->clk);
+		goto err_probe_clk_get;
+	}
+
+	dd->pclk = clk_get(&pdev->dev, "spi_pclk");
+	if (IS_ERR(dd->pclk)) {
+		dev_err(&pdev->dev, "%s: unable to get spi_pclk\n", __func__);
+		rc = PTR_ERR(dd->pclk);
+		goto err_probe_pclk_get;
+	}
+
+	if (pdata && pdata->max_clock_speed)
+		msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
+
+	rc = clk_enable(dd->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable spi_clk\n",
+			__func__);
+		goto err_probe_clk_enable;
+	}
+	clk_enabled = 1;
+
+	rc = clk_enable(dd->pclk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable spi_pclk\n",
+		__func__);
+		goto err_probe_pclk_enable;
+	}
+	pclk_enabled = 1;
+	msm_spi_init_gsbi(dd);
+	msm_spi_calculate_fifo_size(dd);
+	if (dd->use_dma) {
+		rc = msm_spi_init_dma(dd);
+		if (rc)
+			goto err_probe_dma;
+	}
+
+	/* Initialize registers */
+	writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+
+	writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
+	writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
+	writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
+	rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (rc)
+		goto err_probe_state;
+
+	clk_disable(dd->clk);
+	clk_disable(dd->pclk);
+	clk_enabled = 0;
+	pclk_enabled = 0;
+
+	dd->suspended = 0;
+	dd->transfer_pending = 0;
+	dd->multi_xfr = 0;
+	dd->mode = SPI_MODE_NONE;
+
+	rc = msm_spi_request_irq(dd, pdev->name, master);
+	if (rc)
+		goto err_probe_irq;
+
+	msm_spi_disable_irqs(dd);
+	if (dd->use_rlock)
+		remote_mutex_unlock(&dd->r_lock);
+
+	mutex_unlock(&dd->core_lock);
+	locked = 0;
+
+	rc = spi_register_master(master);
+	if (rc)
+		goto err_probe_reg_master;
+
+	rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+
+	spi_debugfs_init(dd);
+
+	return 0;
+
+err_attrs:
+err_probe_reg_master:
+	msm_spi_free_irq(dd, master);
+err_probe_irq:
+err_probe_state:
+	msm_spi_teardown_dma(dd);
+err_probe_dma:
+	if (pclk_enabled)
+		clk_disable(dd->pclk);
+err_probe_pclk_enable:
+	if (clk_enabled)
+		clk_disable(dd->clk);
+err_probe_clk_enable:
+	clk_put(dd->pclk);
+err_probe_pclk_get:
+	clk_put(dd->clk);
+err_probe_clk_get:
+	if (locked) {
+		if (dd->use_rlock)
+			remote_mutex_unlock(&dd->r_lock);
+		mutex_unlock(&dd->core_lock);
+	}
+err_probe_rlock_init:
+	msm_spi_release_gsbi(dd);
+err_probe_ioremap2:
+	iounmap(dd->base);
+err_probe_ioremap:
+	release_mem_region(dd->mem_phys_addr, dd->mem_size);
+err_probe_reqmem:
+	destroy_workqueue(dd->workqueue);
+err_probe_workq:
+	msm_spi_free_gpios(dd);
+err_probe_gpio:
+	if (pdata && pdata->gpio_release)
+		pdata->gpio_release();
+err_probe_res2:
+err_probe_res:
+	spi_master_put(master);
+err_probe_exit:
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd;
+	unsigned long      flags;
+
+	if (!master)
+		goto suspend_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto suspend_exit;
+
+	/* Make sure nothing is added to the queue while we're suspending */
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->suspended = 1;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/* Wait for transactions to end, or time out */
+	wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
+	msm_spi_free_gpios(dd);
+
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_resume(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd;
+
+	if (!master)
+		goto resume_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto resume_exit;
+
+	BUG_ON(msm_spi_request_gpios(dd) != 0);
+	dd->suspended = 0;
+resume_exit:
+	return 0;
+}
+#else
+#define msm_spi_suspend NULL
+#define msm_spi_resume NULL
+#endif /* CONFIG_PM */
+
+static int __devexit msm_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd = spi_master_get_devdata(master);
+	struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
+
+	pm_qos_remove_request(&qos_req_list);
+	spi_debugfs_exit(dd);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+
+	msm_spi_free_irq(dd, master);
+	msm_spi_teardown_dma(dd);
+
+	if (pdata && pdata->gpio_release)
+		pdata->gpio_release();
+
+	msm_spi_free_gpios(dd);
+	iounmap(dd->base);
+	release_mem_region(dd->mem_phys_addr, dd->mem_size);
+	msm_spi_release_gsbi(dd);
+	clk_put(dd->clk);
+	clk_put(dd->pclk);
+	destroy_workqueue(dd->workqueue);
+	platform_set_drvdata(pdev, 0);
+	spi_unregister_master(master);
+	spi_master_put(master);
+
+	return 0;
+}
+
+static struct platform_driver msm_spi_driver = {
+	.driver		= {
+		.name	= SPI_DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+	.suspend        = msm_spi_suspend,
+	.resume         = msm_spi_resume,
+	.remove		= __exit_p(msm_spi_remove),
+};
+
+static int __init msm_spi_init(void)
+{
+	return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
+}
+module_init(msm_spi_init);
+
+static void __exit msm_spi_exit(void)
+{
+	platform_driver_unregister(&msm_spi_driver);
+}
+module_exit(msm_spi_exit);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d9fd862..c174306 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -83,6 +83,7 @@
 	struct mutex		buf_lock;
 	unsigned		users;
 	u8			*buffer;
+	u8			*bufferrx;
 };
 
 static LIST_HEAD(device_list);
@@ -92,6 +93,30 @@
 module_param(bufsiz, uint, S_IRUGO);
 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
 
+/*
+ * This can be used for testing the controller, given the busnum and the
+ * cs required to use. If those parameters are used, spidev is
+ * dynamically added as device on the busnum, and messages can be sent
+ * via this interface.
+ */
+static int busnum = -1;
+module_param(busnum, int, S_IRUGO);
+MODULE_PARM_DESC(busnum, "bus num of the controller");
+
+static int chipselect = -1;
+module_param(chipselect, int, S_IRUGO);
+MODULE_PARM_DESC(chipselect, "chip select of the desired device");
+
+static int maxspeed = 10000000;
+module_param(maxspeed, int, S_IRUGO);
+MODULE_PARM_DESC(maxspeed, "max_speed of the desired device");
+
+static int spimode = SPI_MODE_3;
+module_param(spimode, int, S_IRUGO);
+MODULE_PARM_DESC(spimode, "mode of the desired device");
+
+static struct spi_device *spi;
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -221,7 +246,7 @@
 	struct spi_transfer	*k_tmp;
 	struct spi_ioc_transfer *u_tmp;
 	unsigned		n, total;
-	u8			*buf;
+	u8			*buf, *bufrx;
 	int			status = -EFAULT;
 
 	spi_message_init(&msg);
@@ -234,6 +259,7 @@
 	 * to initialize a kernel version of the same transfer.
 	 */
 	buf = spidev->buffer;
+	bufrx = spidev->bufferrx;
 	total = 0;
 	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
 			n;
@@ -247,7 +273,7 @@
 		}
 
 		if (u_tmp->rx_buf) {
-			k_tmp->rx_buf = buf;
+			k_tmp->rx_buf = bufrx;
 			if (!access_ok(VERIFY_WRITE, (u8 __user *)
 						(uintptr_t) u_tmp->rx_buf,
 						u_tmp->len))
@@ -261,6 +287,7 @@
 				goto done;
 		}
 		buf += k_tmp->len;
+		bufrx += k_tmp->len;
 
 		k_tmp->cs_change = !!u_tmp->cs_change;
 		k_tmp->bits_per_word = u_tmp->bits_per_word;
@@ -285,7 +312,7 @@
 		goto done;
 
 	/* copy any rx data out of bounce buffer */
-	buf = spidev->buffer;
+	buf = spidev->bufferrx;
 	for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
 		if (u_tmp->rx_buf) {
 			if (__copy_to_user((u8 __user *)
@@ -503,6 +530,15 @@
 				status = -ENOMEM;
 			}
 		}
+		if (!spidev->bufferrx) {
+			spidev->bufferrx = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->bufferrx) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				kfree(spidev->buffer);
+				spidev->buffer = NULL;
+				status = -ENOMEM;
+			}
+		}
 		if (status == 0) {
 			spidev->users++;
 			filp->private_data = spidev;
@@ -531,6 +567,8 @@
 
 		kfree(spidev->buffer);
 		spidev->buffer = NULL;
+		kfree(spidev->bufferrx);
+		spidev->bufferrx = NULL;
 
 		/* ... after we unbound from the underlying device? */
 		spin_lock_irq(&spidev->spi_lock);
@@ -674,21 +712,58 @@
 
 	spidev_class = class_create(THIS_MODULE, "spidev");
 	if (IS_ERR(spidev_class)) {
-		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
-		return PTR_ERR(spidev_class);
+		status = PTR_ERR(spidev_class);
+		goto error_class;
 	}
 
 	status = spi_register_driver(&spidev_spi_driver);
-	if (status < 0) {
-		class_destroy(spidev_class);
-		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+	if (status < 0)
+		goto error_register;
+
+	if (busnum != -1 && chipselect != -1) {
+		struct spi_board_info chip = {
+					.modalias	= "spidev",
+					.mode		= spimode,
+					.bus_num	= busnum,
+					.chip_select	= chipselect,
+					.max_speed_hz	= maxspeed,
+		};
+
+		struct spi_master *master;
+
+		master = spi_busnum_to_master(busnum);
+		if (!master) {
+			status = -ENODEV;
+			goto error_busnum;
+		}
+
+		/* We create a virtual device that will sit on the bus */
+		spi = spi_new_device(master, &chip);
+		if (!spi) {
+			status = -EBUSY;
+			goto error_mem;
+		}
+		dev_dbg(&spi->dev, "busnum=%d cs=%d bufsiz=%d maxspeed=%d",
+			busnum, chipselect, bufsiz, maxspeed);
 	}
+	return 0;
+error_mem:
+error_busnum:
+	spi_unregister_driver(&spidev_spi_driver);
+error_register:
+	class_destroy(spidev_class);
+error_class:
+	unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
 	return status;
 }
 module_init(spidev_init);
 
 static void __exit spidev_exit(void)
 {
+	if (spi) {
+		spi_unregister_device(spi);
+		spi = NULL;
+	}
 	spi_unregister_driver(&spidev_spi_driver);
 	class_destroy(spidev_class);
 	unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 11a4b5b..d055412 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -122,8 +122,6 @@
 
 source "drivers/staging/xgifb/Kconfig"
 
-source "drivers/staging/msm/Kconfig"
-
 source "drivers/staging/lirc/Kconfig"
 
 source "drivers/staging/easycap/Kconfig"
diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
diff --git a/drivers/staging/dream/Kconfig b/drivers/staging/dream/Kconfig
new file mode 100644
index 0000000..0c30b19
--- /dev/null
+++ b/drivers/staging/dream/Kconfig
@@ -0,0 +1,13 @@
+config DREAM
+	tristate "HTC Dream support"
+	depends on MACH_TROUT
+
+if DREAM
+
+source "drivers/staging/dream/camera/Kconfig"
+
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+endif
diff --git a/drivers/staging/dream/Makefile b/drivers/staging/dream/Makefile
new file mode 100644
index 0000000..fbea0ab
--- /dev/null
+++ b/drivers/staging/dream/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS=-Idrivers/staging/dream/include
+obj-$(CONFIG_MSM_ADSP)		+= qdsp5/
+obj-$(CONFIG_MSM_CAMERA)	+= camera/
+obj-$(CONFIG_INPUT_GPIO)	+= gpio_axis.o gpio_event.o gpio_input.o gpio_matrix.o gpio_output.o
+
diff --git a/drivers/staging/gobi/Kconfig b/drivers/staging/gobi/Kconfig
new file mode 100644
index 0000000..99d3b8d
--- /dev/null
+++ b/drivers/staging/gobi/Kconfig
@@ -0,0 +1,5 @@
+config GOBI_USBNET
+        tristate "Qualcomm GOBI2k and QCQMI support"
+        help
+        This module adds network device support for GOBI2k 3G radios.
+~
diff --git a/drivers/staging/gobi/QCUSBNet2k/Makefile b/drivers/staging/gobi/QCUSBNet2k/Makefile
new file mode 100755
index 0000000..66c1590
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_GOBI_USBNET) += QCUSBNet2k.o
+QCUSBNet2k-objs += QCUSBNet.o QMIDevice.o QMI.o
diff --git a/drivers/staging/gobi/QCUSBNet2k/QCUSBNet.c b/drivers/staging/gobi/QCUSBNet2k/QCUSBNet.c
new file mode 100644
index 0000000..e7f72e7
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/QCUSBNet.c
@@ -0,0 +1,1227 @@
+/*===========================================================================
+FILE:
+   QCUSBNet.c
+
+DESCRIPTION:
+   Qualcomm USB Network device for Gobi 2000 
+   
+FUNCTIONS:
+   QCSuspend
+   QCResume
+   QCNetDriverBind
+   QCNetDriverUnbind
+   QCUSBNetURBCallback
+   QCUSBNetTXTimeout
+   QCUSBNetAutoPMThread
+   QCUSBNetStartXmit
+   QCUSBNetOpen
+   QCUSBNetStop
+   QCUSBNetProbe
+   QCUSBNetModInit
+   QCUSBNetModExit
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+//---------------------------------------------------------------------------
+// Include Files
+//---------------------------------------------------------------------------
+
+#include "Structs.h"
+#include "QMIDevice.h"
+#include "QMI.h"
+
+//-----------------------------------------------------------------------------
+// Definitions
+//-----------------------------------------------------------------------------
+
+// Version Information
+#define DRIVER_VERSION "1.0.110"
+#define DRIVER_DESC "QCUSBNet2k"
+
+// Debug flag
+int debug;
+
+// Class should be created during module init, so needs to be global
+static struct class * gpClass;
+
+/*===========================================================================
+METHOD:
+   QCSuspend (Public Method)
+
+DESCRIPTION:
+   Stops QMI traffic while device is suspended
+
+PARAMETERS
+   pIntf          [ I ] - Pointer to interface
+   powerEvent     [ I ] - Power management event
+
+RETURN VALUE:
+   int - 0 for success
+         negative errno for failure
+===========================================================================*/
+int QCSuspend( 
+   struct usb_interface *     pIntf,
+   pm_message_t               powerEvent )
+{
+   struct usbnet * pDev;
+   sQCUSBNet * pQCDev;
+   
+   if (pIntf == 0)
+   {
+      return -ENOMEM;
+   }
+   
+#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
+   pDev = usb_get_intfdata( pIntf );
+#else
+   pDev = (struct usbnet *)pIntf->dev.platform_data;
+#endif
+
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get netdevice\n" );
+      return -ENXIO;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return -ENXIO;
+   }
+
+   // Is this autosuspend or system suspend?
+   //    do we allow remote wakeup?
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
+   if (pDev->udev->auto_pm == 0)
+#else
+   if ((powerEvent.event & PM_EVENT_AUTO) == 0)
+#endif
+   {
+      DBG( "device suspended to power level %d\n", 
+           powerEvent.event );
+      QSetDownReason( pQCDev, DRIVER_SUSPENDED );
+   }
+   else
+   {
+      DBG( "device autosuspend\n" );
+   }    
+
+   if (powerEvent.event & PM_EVENT_SUSPEND)
+   {
+      // Stop QMI read callbacks
+      KillRead( pQCDev );
+      pDev->udev->reset_resume = 0;
+      
+      // Store power state to avoid duplicate resumes
+      pIntf->dev.power.power_state.event = powerEvent.event;
+   }
+   else
+   {
+      // Other power modes cause QMI connection to be lost
+      pDev->udev->reset_resume = 1;
+   }
+   
+   // Run usbnet's suspend function
+   return usbnet_suspend( pIntf, powerEvent );
+}
+   
+/*===========================================================================
+METHOD:
+   QCResume (Public Method)
+
+DESCRIPTION:
+   Resume QMI traffic or recreate QMI device
+
+PARAMETERS
+   pIntf          [ I ] - Pointer to interface
+
+RETURN VALUE:
+   int - 0 for success
+         negative errno for failure
+===========================================================================*/
+int QCResume( struct usb_interface * pIntf )
+{
+   struct usbnet * pDev;
+   sQCUSBNet * pQCDev;
+   int nRet;
+   int oldPowerState;
+   
+   if (pIntf == 0)
+   {
+      return -ENOMEM;
+   }
+   
+#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
+   pDev = usb_get_intfdata( pIntf );
+#else
+   pDev = (struct usbnet *)pIntf->dev.platform_data;
+#endif
+
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get netdevice\n" );
+      return -ENXIO;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return -ENXIO;
+   }
+
+   oldPowerState = pIntf->dev.power.power_state.event;
+   pIntf->dev.power.power_state.event = PM_EVENT_ON;
+   DBG( "resuming from power mode %d\n", oldPowerState );
+
+   if (oldPowerState & PM_EVENT_SUSPEND)
+   {
+      // It doesn't matter if this is autoresume or system resume
+      QClearDownReason( pQCDev, DRIVER_SUSPENDED );
+   
+      nRet = usbnet_resume( pIntf );
+      if (nRet != 0)
+      {
+         DBG( "usbnet_resume error %d\n", nRet );
+         return nRet;
+      }
+
+      // Restart QMI read callbacks
+      nRet = StartRead( pQCDev );
+      if (nRet != 0)
+      {
+         DBG( "StartRead error %d\n", nRet );
+         return nRet;
+      }
+
+      // Kick Auto PM thread to process any queued URBs
+      up( &pQCDev->mAutoPM.mThreadDoWork );
+   }
+   else
+   {
+      DBG( "nothing to resume\n" );
+      return 0;
+   }
+   
+   return nRet;
+}
+
+/*===========================================================================
+METHOD:
+   QCNetDriverBind (Public Method)
+
+DESCRIPTION:
+   Setup in and out pipes
+
+PARAMETERS
+   pDev           [ I ] - Pointer to usbnet device
+   pIntf          [ I ] - Pointer to interface
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+static int QCNetDriverBind( 
+   struct usbnet *         pDev, 
+   struct usb_interface *  pIntf )
+{
+   int numEndpoints;
+   int endpointIndex;
+   struct usb_host_endpoint * pEndpoint = NULL;
+   struct usb_host_endpoint * pIn = NULL;
+   struct usb_host_endpoint * pOut = NULL;
+   
+   // Verify one altsetting
+   if (pIntf->num_altsetting != 1)
+   {
+      DBG( "invalid num_altsetting %u\n", pIntf->num_altsetting );
+      return -EINVAL;
+   }
+
+   // Verify correct interface (0)
+   if (pIntf->cur_altsetting->desc.bInterfaceNumber != 0)
+   {
+      DBG( "invalid interface %d\n", 
+           pIntf->cur_altsetting->desc.bInterfaceNumber );
+      return -EINVAL;
+   }
+   
+   // Collect In and Out endpoints
+   numEndpoints = pIntf->cur_altsetting->desc.bNumEndpoints;
+   for (endpointIndex = 0; endpointIndex < numEndpoints; endpointIndex++)
+   {
+      pEndpoint = pIntf->cur_altsetting->endpoint + endpointIndex;
+      if (pEndpoint == NULL)
+      {
+         DBG( "invalid endpoint %u\n", endpointIndex );
+         return -EINVAL;
+      }
+      
+      if (usb_endpoint_dir_in( &pEndpoint->desc ) == true
+      &&  usb_endpoint_xfer_int( &pEndpoint->desc ) == false)
+      {
+         pIn = pEndpoint;
+      }
+      else if (usb_endpoint_dir_out( &pEndpoint->desc ) == true)
+      {
+         pOut = pEndpoint;
+      }
+   }
+   
+   if (pIn == NULL || pOut == NULL)
+   {
+      DBG( "invalid endpoints\n" );
+      return -EINVAL;
+   }
+
+   if (usb_set_interface( pDev->udev, 
+                          pIntf->cur_altsetting->desc.bInterfaceNumber,
+                          0 ) != 0)
+   {
+      DBG( "unable to set interface\n" );
+      return -EINVAL;
+   }
+
+   pDev->in = usb_rcvbulkpipe( pDev->udev,
+                   pIn->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
+   pDev->out = usb_sndbulkpipe( pDev->udev,
+                   pOut->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
+                   
+   DBG( "in %x, out %x\n", 
+        pIn->desc.bEndpointAddress, 
+        pOut->desc.bEndpointAddress );
+
+   // In later versions of the kernel, usbnet helps with this
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
+   pIntf->dev.platform_data = (void *)pDev;
+#endif
+
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   QCNetDriverUnbind (Public Method)
+
+DESCRIPTION:
+   Deregisters QMI device (Registration happened in the probe function)
+
+PARAMETERS
+   pDev           [ I ] - Pointer to usbnet device
+   pIntfUnused    [ I ] - Pointer to interface
+
+RETURN VALUE:
+   None
+===========================================================================*/
+static void QCNetDriverUnbind( 
+   struct usbnet *         pDev, 
+   struct usb_interface *  pIntf)
+{
+   sQCUSBNet * pQCDev = (sQCUSBNet *)pDev->data[0];
+
+   // Should already be down, but just in case...
+   netif_carrier_off( pDev->net );
+
+   DeregisterQMIDevice( pQCDev );
+   
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
+   kfree( pDev->net->netdev_ops );
+   pDev->net->netdev_ops = NULL;
+#endif
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
+   pIntf->dev.platform_data = NULL;
+#endif
+
+   kfree( pQCDev );
+   pQCDev = NULL;
+}
+
+/*===========================================================================
+METHOD:
+   QCUSBNetURBCallback (Public Method)
+
+DESCRIPTION:
+   Write is complete, cleanup and signal that we're ready for next packet
+
+PARAMETERS
+   pURB     [ I ] - Pointer to sAutoPM struct
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void QCUSBNetURBCallback( struct urb * pURB )
+{
+   unsigned long activeURBflags;
+   sAutoPM * pAutoPM = (sAutoPM *)pURB->context;
+   if (pAutoPM == NULL)
+   {
+      // Should never happen
+      DBG( "bad context\n" );
+      return;
+   }
+
+   if (pURB->status != 0)
+   {
+      // Note that in case of an error, the behaviour is no different
+      DBG( "urb finished with error %d\n", pURB->status );
+   }
+
+   // Remove activeURB (memory to be freed later)
+   spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+
+   // EAGAIN used to signify callback is done
+   pAutoPM->mpActiveURB = ERR_PTR( -EAGAIN );
+
+   spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+
+   up( &pAutoPM->mThreadDoWork );
+   
+   usb_free_urb( pURB );
+}
+
+/*===========================================================================
+METHOD:
+   QCUSBNetTXTimeout (Public Method)
+
+DESCRIPTION:
+   Timeout declared by the net driver.  Stop all transfers
+
+PARAMETERS
+   pNet     [ I ] - Pointer to net device
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void QCUSBNetTXTimeout( struct net_device * pNet )
+{
+   struct sQCUSBNet * pQCDev;
+   sAutoPM * pAutoPM;
+   sURBList * pURBListEntry;
+   unsigned long activeURBflags, URBListFlags;
+   struct usbnet * pDev = netdev_priv( pNet );
+
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get usbnet device\n" );
+      return;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return;
+   }
+   pAutoPM = &pQCDev->mAutoPM;
+
+   DBG( "\n" );
+
+   // Stop activeURB
+   spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+
+   if (pAutoPM->mpActiveURB != NULL)
+   {
+      usb_kill_urb( pAutoPM->mpActiveURB );
+   }
+
+   spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+
+   // Cleanup URB List
+   spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
+
+   pURBListEntry = pAutoPM->mpURBList;
+   while (pURBListEntry != NULL)
+   {
+      pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
+      usb_free_urb( pURBListEntry->mpURB );
+      kfree( pURBListEntry );
+      pURBListEntry = pAutoPM->mpURBList;
+   }
+
+   spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+
+   up( &pAutoPM->mThreadDoWork );
+
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   QCUSBNetAutoPMThread (Public Method)
+
+DESCRIPTION:
+   Handle device Auto PM state asynchronously
+   Handle network packet transmission asynchronously
+
+PARAMETERS
+   pData     [ I ] - Pointer to sAutoPM struct
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+static int QCUSBNetAutoPMThread( void * pData )
+{
+   unsigned long activeURBflags, URBListFlags;
+   sURBList * pURBListEntry;
+   int status;
+   struct usb_device * pUdev;
+   sAutoPM * pAutoPM = (sAutoPM *)pData;
+   if (pAutoPM == NULL)
+   {
+      DBG( "passed null pointer\n" );
+      return -EINVAL;
+   }
+   
+   pUdev = interface_to_usbdev( pAutoPM->mpIntf );
+
+   DBG( "traffic thread started\n" );
+
+   while (pAutoPM->mbExit == false)
+   {
+      // Wait for someone to poke us
+      down( &pAutoPM->mThreadDoWork );
+
+      // Time to exit?
+      if (pAutoPM->mbExit == true)
+      {
+         // Stop activeURB
+         spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+
+         if (pAutoPM->mpActiveURB != NULL)
+         {
+            usb_kill_urb( pAutoPM->mpActiveURB );
+         }
+         // Will be freed in callback function
+
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+
+         // Cleanup URB List
+         spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
+
+         pURBListEntry = pAutoPM->mpURBList;
+         while (pURBListEntry != NULL)
+         {
+            pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
+            usb_free_urb( pURBListEntry->mpURB );
+            kfree( pURBListEntry );
+            pURBListEntry = pAutoPM->mpURBList;
+         }
+
+         spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+
+         break;
+      }
+      
+      // Is our URB active?
+      spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+
+      // EAGAIN used to signify callback is done
+      if (IS_ERR( pAutoPM->mpActiveURB ) 
+      &&  PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN )
+      {
+         pAutoPM->mpActiveURB = NULL;
+
+         // Restore IRQs so task can sleep
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+         
+         // URB is done, decrement the Auto PM usage count
+         usb_autopm_put_interface( pAutoPM->mpIntf );
+
+         // Lock ActiveURB again
+         spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+      }
+
+      if (pAutoPM->mpActiveURB != NULL)
+      {
+         // There is already a URB active, go back to sleep
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+         continue;
+      }
+      
+      // Is there a URB waiting to be submitted?
+      spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
+      if (pAutoPM->mpURBList == NULL)
+      {
+         // No more URBs to submit, go back to sleep
+         spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+         continue;
+      }
+
+      // Pop an element
+      pURBListEntry = pAutoPM->mpURBList;
+      pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
+      spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+
+      // Set ActiveURB
+      pAutoPM->mpActiveURB = pURBListEntry->mpURB;
+      spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+
+      // Tell autopm core we need device woken up
+      status = usb_autopm_get_interface( pAutoPM->mpIntf );
+      if (status < 0)
+      {
+         DBG( "unable to autoresume interface: %d\n", status );
+
+         // likely caused by device going from autosuspend -> full suspend
+         if (status == -EPERM)
+         {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
+            pUdev->auto_pm = 0;
+#endif
+            QCSuspend( pAutoPM->mpIntf, PMSG_SUSPEND );
+         }
+
+         // Add pURBListEntry back onto pAutoPM->mpURBList
+         spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
+         pURBListEntry->mpNext = pAutoPM->mpURBList;
+         pAutoPM->mpURBList = pURBListEntry;
+         spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+         
+         spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+         pAutoPM->mpActiveURB = NULL;
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+         
+         // Go back to sleep
+         continue;
+      }
+
+      // Submit URB
+      status = usb_submit_urb( pAutoPM->mpActiveURB, GFP_KERNEL );
+      if (status < 0)
+      {
+         // Could happen for a number of reasons
+         DBG( "Failed to submit URB: %d.  Packet dropped\n", status );
+         spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
+         usb_free_urb( pAutoPM->mpActiveURB );
+         pAutoPM->mpActiveURB = NULL;
+         spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
+         usb_autopm_put_interface( pAutoPM->mpIntf );
+
+         // Loop again
+         up( &pAutoPM->mThreadDoWork );
+      }
+      
+      kfree( pURBListEntry );
+   }   
+   
+   DBG( "traffic thread exiting\n" );
+   pAutoPM->mpThread = NULL;
+   return 0;
+}      
+
+/*===========================================================================
+METHOD:
+   QCUSBNetStartXmit (Public Method)
+
+DESCRIPTION:
+   Convert sk_buff to usb URB and queue for transmit
+
+PARAMETERS
+   pNet     [ I ] - Pointer to net device
+
+RETURN VALUE:
+   NETDEV_TX_OK on success
+   NETDEV_TX_BUSY on error
+===========================================================================*/
+int QCUSBNetStartXmit( 
+   struct sk_buff *     pSKB,
+   struct net_device *  pNet )
+{
+   unsigned long URBListFlags;
+   struct sQCUSBNet * pQCDev;
+   sAutoPM * pAutoPM;
+   sURBList * pURBListEntry, ** ppURBListEnd;
+   void * pURBData;
+   struct usbnet * pDev = netdev_priv( pNet );
+   
+   DBG( "\n" );
+   
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get usbnet device\n" );
+      return NETDEV_TX_BUSY;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return NETDEV_TX_BUSY;
+   }
+   pAutoPM = &pQCDev->mAutoPM;
+   
+   if (QTestDownReason( pQCDev, DRIVER_SUSPENDED ) == true)
+   {
+      // Should not happen
+      DBG( "device is suspended\n" );
+      dump_stack();
+      return NETDEV_TX_BUSY;
+   }
+   
+   // Convert the sk_buff into a URB
+
+   // Allocate URBListEntry
+   pURBListEntry = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
+   if (pURBListEntry == NULL)
+   {
+      DBG( "unable to allocate URBList memory\n" );
+      return NETDEV_TX_BUSY;
+   }
+   pURBListEntry->mpNext = NULL;
+
+   // Allocate URB
+   pURBListEntry->mpURB = usb_alloc_urb( 0, GFP_ATOMIC );
+   if (pURBListEntry->mpURB == NULL)
+   {
+      DBG( "unable to allocate URB\n" );
+      return NETDEV_TX_BUSY;
+   }
+
+   // Allocate URB transfer_buffer
+   pURBData = kmalloc( pSKB->len, GFP_ATOMIC );
+   if (pURBData == NULL)
+   {
+      DBG( "unable to allocate URB data\n" );
+      return NETDEV_TX_BUSY;
+   }
+   // Fill will SKB's data
+   memcpy( pURBData, pSKB->data, pSKB->len );
+
+   usb_fill_bulk_urb( pURBListEntry->mpURB,
+                      pQCDev->mpNetDev->udev,
+                      pQCDev->mpNetDev->out,
+                      pURBData,
+                      pSKB->len,
+                      QCUSBNetURBCallback,
+                      pAutoPM );
+   
+   // Aquire lock on URBList
+   spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
+   
+   // Add URB to end of list
+   ppURBListEnd = &pAutoPM->mpURBList;
+   while ((*ppURBListEnd) != NULL)
+   {
+      ppURBListEnd = &(*ppURBListEnd)->mpNext;
+   }
+   *ppURBListEnd = pURBListEntry;
+
+   spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
+
+   up( &pAutoPM->mThreadDoWork );
+
+   // Start transfer timer
+   pNet->trans_start = jiffies;
+   // Free SKB
+   dev_kfree_skb_any( pSKB );
+
+   return NETDEV_TX_OK;
+}
+
+/*===========================================================================
+METHOD:
+   QCUSBNetOpen (Public Method)
+
+DESCRIPTION:
+   Wrapper to usbnet_open, correctly handling autosuspend
+   Start AutoPM thread
+
+PARAMETERS
+   pNet     [ I ] - Pointer to net device
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QCUSBNetOpen( struct net_device * pNet )
+{
+   int status = 0;
+   struct sQCUSBNet * pQCDev;
+   struct usbnet * pDev = netdev_priv( pNet );
+   
+   if (pDev == NULL)
+   {
+      DBG( "failed to get usbnet device\n" );
+      return -ENXIO;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return -ENXIO;
+   }
+
+   DBG( "\n" );
+
+   // Start the AutoPM thread
+   pQCDev->mAutoPM.mpIntf = pQCDev->mpIntf;
+   pQCDev->mAutoPM.mbExit = false;
+   pQCDev->mAutoPM.mpURBList = NULL;
+   pQCDev->mAutoPM.mpActiveURB = NULL;
+   spin_lock_init( &pQCDev->mAutoPM.mURBListLock );
+   spin_lock_init( &pQCDev->mAutoPM.mActiveURBLock );
+   sema_init( &pQCDev->mAutoPM.mThreadDoWork, 0 );
+   
+   pQCDev->mAutoPM.mpThread = kthread_run( QCUSBNetAutoPMThread, 
+                              &pQCDev->mAutoPM, 
+                              "QCUSBNetAutoPMThread" );
+   if (IS_ERR( pQCDev->mAutoPM.mpThread ))
+   {
+      DBG( "AutoPM thread creation error\n" );
+      return PTR_ERR( pQCDev->mAutoPM.mpThread );
+   }
+
+   // Allow traffic
+   QClearDownReason( pQCDev, NET_IFACE_STOPPED );
+
+   // Pass to usbnet_open if defined
+   if (pQCDev->mpUSBNetOpen != NULL)
+   {
+      status = pQCDev->mpUSBNetOpen( pNet );
+   
+      // If usbnet_open was successful enable Auto PM
+      if (status == 0)
+      {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
+         usb_autopm_enable( pQCDev->mpIntf );
+#else
+         usb_autopm_put_interface( pQCDev->mpIntf );
+#endif
+      }
+   }
+   else
+   {
+      DBG( "no USBNetOpen defined\n" );
+   }
+   
+   return status;
+}
+
+/*===========================================================================
+METHOD:
+   QCUSBNetStop (Public Method)
+
+DESCRIPTION:
+   Wrapper to usbnet_stop, correctly handling autosuspend
+   Stop AutoPM thread
+
+PARAMETERS
+   pNet     [ I ] - Pointer to net device
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QCUSBNetStop( struct net_device * pNet )
+{
+   struct sQCUSBNet * pQCDev;
+   struct usbnet * pDev = netdev_priv( pNet );
+
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get netdevice\n" );
+      return -ENXIO;
+   }
+   
+   pQCDev = (sQCUSBNet *)pDev->data[0];
+   if (pQCDev == NULL)
+   {
+      DBG( "failed to get QMIDevice\n" );
+      return -ENXIO;
+   }
+
+   // Stop traffic
+   QSetDownReason( pQCDev, NET_IFACE_STOPPED );
+
+   // Tell traffic thread to exit
+   pQCDev->mAutoPM.mbExit = true;
+   up( &pQCDev->mAutoPM.mThreadDoWork );
+   
+   // Wait for it to exit
+   while( pQCDev->mAutoPM.mpThread != NULL )
+   {
+      msleep( 100 );
+   }
+   DBG( "thread stopped\n" );
+
+   // Pass to usbnet_stop, if defined
+   if (pQCDev->mpUSBNetStop != NULL)
+   {
+      return pQCDev->mpUSBNetStop( pNet );
+   }
+   else
+   {
+      return 0;
+   }
+}
+
+/*=========================================================================*/
+// Struct driver_info
+/*=========================================================================*/
+static const struct driver_info QCNetInfo = 
+{
+   .description   = "QCUSBNet Ethernet Device",
+   .flags         = FLAG_ETHER,
+   .bind          = QCNetDriverBind,
+   .unbind        = QCNetDriverUnbind,
+   .data          = 0,
+};
+
+/*=========================================================================*/
+// Qualcomm Gobi 2000 VID/PIDs
+/*=========================================================================*/
+static const struct usb_device_id QCVIDPIDTable [] =
+{
+   // Acer Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9215 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Asus Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9265 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // CMOTech Gobi 2000
+   {
+      USB_DEVICE( 0x16d8, 0x8002 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Dell Gobi 2000
+   {
+      USB_DEVICE( 0x413c, 0x8186 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Entourage Gobi 2000
+   {
+      USB_DEVICE( 0x1410, 0xa010 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Entourage Gobi 2000
+   {
+      USB_DEVICE( 0x1410, 0xa011 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Entourage Gobi 2000
+   {
+      USB_DEVICE( 0x1410, 0xa012 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Entourage Gobi 2000
+   {
+      USB_DEVICE( 0x1410, 0xa013 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // HP Gobi 2000
+   { 
+      USB_DEVICE( 0x03f0, 0x251d ),
+      .driver_info = (unsigned long)&QCNetInfo 
+   },
+   // Lenovo Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9205 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Panasonic Gobi 2000
+   {
+      USB_DEVICE( 0x04da, 0x250f ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Samsung Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9245 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9001 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9002 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9003 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9004 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9005 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9006 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9007 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9008 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x9009 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sierra Wireless Gobi 2000
+   {
+      USB_DEVICE( 0x1199, 0x900a ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Sony Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9225 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Top Global Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9235 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // iRex Technologies Gobi 2000
+   {
+      USB_DEVICE( 0x05c6, 0x9275 ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+   // Generic Gobi 2000
+   {
+      USB_DEVICE( 0x5c6, 0x920B ),
+      .driver_info = (unsigned long)&QCNetInfo
+   },
+
+   //Terminating entry
+   { }
+};
+
+MODULE_DEVICE_TABLE( usb, QCVIDPIDTable );
+
+/*===========================================================================
+METHOD:
+   QCUSBNetProbe (Public Method)
+
+DESCRIPTION:
+   Run usbnet_probe
+   Setup QMI device
+
+PARAMETERS
+   pIntf        [ I ] - Pointer to interface
+   pVIDPIDs     [ I ] - Pointer to VID/PID table
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QCUSBNetProbe( 
+   struct usb_interface *        pIntf, 
+   const struct usb_device_id *  pVIDPIDs )
+{
+   int status;
+   struct usbnet * pDev;
+   sQCUSBNet * pQCDev;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
+   struct net_device_ops * pNetDevOps;
+#endif
+
+   status = usbnet_probe( pIntf, pVIDPIDs );
+   if(status < 0 )
+   {
+      DBG( "usbnet_probe failed %d\n", status );
+      return status;
+   }
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
+   pDev = usb_get_intfdata( pIntf );
+#else
+   pDev = (struct usbnet *)pIntf->dev.platform_data;
+#endif
+
+   if (pDev == NULL || pDev->net == NULL)
+   {
+      DBG( "failed to get netdevice\n" );
+      return -ENXIO;
+   }
+
+   pQCDev = kmalloc( sizeof( sQCUSBNet ), GFP_KERNEL );
+   if (pQCDev == NULL)
+   {
+      DBG( "falied to allocate device buffers" );
+      return -ENOMEM;
+   }
+   
+   pDev->data[0] = (unsigned long)pQCDev;
+   
+   pQCDev->mpNetDev = pDev;
+
+   // Overload PM related network functions
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
+   pQCDev->mpUSBNetOpen = pDev->net->open;
+   pDev->net->open = QCUSBNetOpen;
+   pQCDev->mpUSBNetStop = pDev->net->stop;
+   pDev->net->stop = QCUSBNetStop;
+   pDev->net->hard_start_xmit = QCUSBNetStartXmit;
+   pDev->net->tx_timeout = QCUSBNetTXTimeout;
+#else
+   pNetDevOps = kmalloc( sizeof( struct net_device_ops ), GFP_KERNEL );
+   if (pNetDevOps == NULL)
+   {
+      DBG( "falied to allocate net device ops" );
+      return -ENOMEM;
+   }
+   memcpy( pNetDevOps, pDev->net->netdev_ops, sizeof( struct net_device_ops ) );
+   
+   pQCDev->mpUSBNetOpen = pNetDevOps->ndo_open;
+   pNetDevOps->ndo_open = QCUSBNetOpen;
+   pQCDev->mpUSBNetStop = pNetDevOps->ndo_stop;
+   pNetDevOps->ndo_stop = QCUSBNetStop;
+   pNetDevOps->ndo_start_xmit = QCUSBNetStartXmit;
+   pNetDevOps->ndo_tx_timeout = QCUSBNetTXTimeout;
+
+   pDev->net->netdev_ops = pNetDevOps;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 ))
+   memset( &(pQCDev->mpNetDev->stats), 0, sizeof( struct net_device_stats ) );
+#else
+   memset( &(pQCDev->mpNetDev->net->stats), 0, sizeof( struct net_device_stats ) );
+#endif
+
+   pQCDev->mpIntf = pIntf;
+   memset( &(pQCDev->mMEID), '0', 14 );
+   
+   DBG( "Mac Address:\n" );
+   PrintHex( &pQCDev->mpNetDev->net->dev_addr[0], 6 );
+
+   pQCDev->mbQMIValid = false;
+   memset( &pQCDev->mQMIDev, 0, sizeof( sQMIDev ) );
+
+   pQCDev->mQMIDev.mpDevClass = gpClass;
+   
+   sema_init( &pQCDev->mAutoPM.mThreadDoWork, 0 );
+   spin_lock_init( &pQCDev->mQMIDev.mClientMemLock );
+
+   // Default to device down
+   pQCDev->mDownReason = 0;
+   QSetDownReason( pQCDev, NO_NDIS_CONNECTION );
+   QSetDownReason( pQCDev, NET_IFACE_STOPPED );
+
+   // Register QMI
+   status = RegisterQMIDevice( pQCDev );
+   if (status != 0)
+   {
+      // Clean up
+      DeregisterQMIDevice( pQCDev );
+      return status;
+   }
+   
+   // Success
+   return status;
+}
+
+EXPORT_SYMBOL_GPL( QCUSBNetProbe );
+
+static struct usb_driver QCUSBNet =
+{
+   .name       = "QCUSBNet2k",
+   .id_table   = QCVIDPIDTable,
+   .probe      = QCUSBNetProbe,
+   .disconnect = usbnet_disconnect,
+   .suspend    = QCSuspend,
+   .resume     = QCResume,
+   .supports_autosuspend = true,
+};
+
+/*===========================================================================
+METHOD:
+   QCUSBNetModInit (Public Method)
+
+DESCRIPTION:
+   Initialize module
+   Create device class
+   Register out usb_driver struct
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+static int __init QCUSBNetModInit( void )
+{
+   gpClass = class_create( THIS_MODULE, "QCQMI" );
+   if (IS_ERR( gpClass ) == true)
+   {
+      DBG( "error at class_create %ld\n",
+           PTR_ERR( gpClass ) );
+      return -ENOMEM;
+   }
+
+   // This will be shown whenever driver is loaded
+   printk( KERN_INFO "%s: %s\n", DRIVER_DESC, DRIVER_VERSION );
+
+   return usb_register( &QCUSBNet );
+}
+module_init( QCUSBNetModInit );
+
+/*===========================================================================
+METHOD:
+   QCUSBNetModExit (Public Method)
+
+DESCRIPTION:
+   Deregister module
+   Destroy device class
+
+RETURN VALUE:
+   void
+===========================================================================*/
+static void __exit QCUSBNetModExit( void )
+{
+   usb_deregister( &QCUSBNet );
+
+   class_destroy( gpClass );
+}
+module_exit( QCUSBNetModExit );
+
+#ifdef bool
+#undef bool
+#endif
+
+MODULE_VERSION( DRIVER_VERSION );
+MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_LICENSE( "GPL v2" );
+
+module_param( debug, bool, S_IRUGO | S_IWUSR );
+MODULE_PARM_DESC( debug, "Debuging enabled or not" );
+
diff --git a/drivers/staging/gobi/QCUSBNet2k/QMI.c b/drivers/staging/gobi/QCUSBNet2k/QMI.c
new file mode 100644
index 0000000..fe7eebe
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/QMI.c
@@ -0,0 +1,954 @@
+/*===========================================================================
+FILE:
+   QMI.c
+
+DESCRIPTION:
+   Qualcomm QMI driver code
+   
+FUNCTIONS:
+   Generic QMUX functions
+      ParseQMUX
+      FillQMUX
+   
+   Generic QMI functions
+      GetTLV
+      ValidQMIMessage
+      GetQMIMessageID
+
+   Fill Buffers with QMI requests
+      QMICTLGetClientIDReq
+      QMICTLReleaseClientIDReq
+      QMICTLReadyReq
+      QMIWDSSetEventReportReq
+      QMIWDSGetPKGSRVCStatusReq
+      QMIDMSGetMEIDReq
+      
+   Parse data from QMI responses
+      QMICTLGetClientIDResp
+      QMICTLReleaseClientIDResp
+      QMIWDSEventResp
+      QMIDMSGetMEIDResp
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+//---------------------------------------------------------------------------
+// Include Files
+//---------------------------------------------------------------------------
+#include "QMI.h"
+
+
+/*=========================================================================*/
+// Get sizes of buffers needed by QMI requests
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   QMUXHeaderSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMUXHeaderSize( void )
+{
+   return sizeof( sQMUX );
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLGetClientIDReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMICTLGetClientIDReq
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMICTLGetClientIDReqSize( void )
+{
+   return sizeof( sQMUX ) + 10;
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLReleaseClientIDReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq
+ 
+RETURN VALUE:
+   u16 - size of header
+===========================================================================*/
+u16 QMICTLReleaseClientIDReqSize( void )
+{
+   return sizeof( sQMUX ) + 11;
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLReadyReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMICTLReadyReq
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMICTLReadyReqSize( void )
+{
+   return sizeof( sQMUX ) + 6;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSSetEventReportReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMIWDSSetEventReportReq
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMIWDSSetEventReportReqSize( void )
+{
+   return sizeof( sQMUX ) + 15;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSGetPKGSRVCStatusReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMIWDSGetPKGSRVCStatusReqSize( void )
+{
+   return sizeof( sQMUX ) + 7;
+}
+
+/*===========================================================================
+METHOD:
+   QMIDMSGetMEIDReqSize (Public Method)
+
+DESCRIPTION:
+   Get size of buffer needed for QMUX + QMIDMSGetMEIDReq
+ 
+RETURN VALUE:
+   u16 - size of buffer
+===========================================================================*/
+u16 QMIDMSGetMEIDReqSize( void )
+{
+   return sizeof( sQMUX ) + 7;
+}
+
+/*=========================================================================*/
+// Generic QMUX functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   ParseQMUX (Public Method)
+
+DESCRIPTION:
+   Remove QMUX headers from a buffer
+
+PARAMETERS
+   pClientID       [ O ] - On success, will point to Client ID
+   pBuffer         [ I ] - Full Message passed in
+   buffSize        [ I ] - Size of pBuffer
+
+RETURN VALUE:
+   int - Positive for size of QMUX header
+         Negative errno for error
+===========================================================================*/
+int ParseQMUX(
+   u16 *    pClientID,
+   void *   pBuffer,
+   u16      buffSize )
+{
+   sQMUX * pQMUXHeader;
+   
+   if (pBuffer == 0 || buffSize < 12)
+   {
+      return -ENOMEM;
+   }
+
+   // QMUX Header
+   pQMUXHeader = (sQMUX *)pBuffer;
+
+   if (pQMUXHeader->mTF != 1
+   ||  pQMUXHeader->mLength != buffSize - 1
+   ||  pQMUXHeader->mCtrlFlag != 0x80 )
+   {
+      return -EINVAL;
+   }
+
+   // Client ID   
+   *pClientID = (pQMUXHeader->mQMIClientID << 8) 
+              + pQMUXHeader->mQMIService;
+   
+   return sizeof( sQMUX );
+}
+
+/*===========================================================================
+METHOD:
+   FillQMUX (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMUX headers
+
+PARAMETERS
+   clientID        [ I ] - Client ID
+   pBuffer         [ O ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer (must be at least 6)
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int FillQMUX(
+   u16      clientID,
+   void *   pBuffer,
+   u16      buffSize )
+{
+   sQMUX * pQMUXHeader;
+
+   if (pBuffer == 0 ||  buffSize < sizeof( sQMUX ))
+   {
+      return -ENOMEM;
+   }
+
+   // QMUX Header
+   pQMUXHeader = (sQMUX *)pBuffer;
+
+   pQMUXHeader->mTF = 1;
+   pQMUXHeader->mLength = buffSize - 1;
+   pQMUXHeader->mCtrlFlag = 0;
+
+   // Service and Client ID   
+   pQMUXHeader->mQMIService = clientID & 0xff;
+   pQMUXHeader->mQMIClientID = clientID >> 8;
+
+   return 0;
+}
+
+/*=========================================================================*/
+// Generic QMI functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   GetTLV (Public Method)
+
+DESCRIPTION:
+   Get data bufffer of a specified TLV from a QMI message
+
+   QMI Message shall NOT include SDU
+   
+PARAMETERS
+   pQMIMessage    [ I ] - QMI Message buffer
+   messageLen     [ I ] - Size of QMI Message buffer
+   type           [ I ] - Desired Type
+   pOutDataBuf    [ O ] - Buffer to be filled with TLV
+   messageLen     [ I ] - Size of QMI Message buffer
+
+RETURN VALUE:
+   u16 - Size of TLV for success
+         Negative errno for error
+===========================================================================*/
+u16 GetTLV(
+   void *   pQMIMessage,
+   u16      messageLen,
+   u8       type,
+   void *   pOutDataBuf,
+   u16      bufferLen )
+{
+   u16 pos;
+   u16 tlvSize = 0;
+   u16 cpyCount;
+   
+   if (pQMIMessage == 0 || pOutDataBuf == 0)
+   {
+      return -ENOMEM;
+   }   
+   
+   for (pos = 4; 
+        pos + 3 < messageLen; 
+        pos += tlvSize + 3)
+   {
+      tlvSize = *(u16 *)(pQMIMessage + pos + 1);
+      if (*(u8 *)(pQMIMessage + pos) == type)
+      {
+         if (bufferLen < tlvSize)
+         {
+            return -ENOMEM;
+         }
+        
+         /* replacement memcpy
+            memcpy( pOutDataBuf,
+                    pQMIMessage + pos + 3,
+                    tlvSize ); */
+         
+         for (cpyCount = 0; cpyCount < tlvSize; cpyCount++)
+         {
+            *((char*)(pOutDataBuf + cpyCount)) = *((char*)(pQMIMessage + pos + 3 + cpyCount));
+         }
+         
+         return tlvSize;
+      }
+   }
+   
+   return -ENOMSG;
+}
+
+/*===========================================================================
+METHOD:
+   ValidQMIMessage (Public Method)
+
+DESCRIPTION:
+   Check mandatory TLV in a QMI message
+
+   QMI Message shall NOT include SDU
+
+PARAMETERS
+   pQMIMessage    [ I ] - QMI Message buffer
+   messageLen     [ I ] - Size of QMI Message buffer
+
+RETURN VALUE:
+   int - 0 for success (no error)
+         Negative errno for error
+         Positive for QMI error code
+===========================================================================*/
+int ValidQMIMessage(
+   void *   pQMIMessage,
+   u16      messageLen )
+{
+   char mandTLV[4];
+
+   if (GetTLV( pQMIMessage, messageLen, 2, &mandTLV[0], 4 ) == 4)
+   {
+      // Found TLV
+      if (*(u16 *)&mandTLV[0] != 0)
+      {
+         return *(u16 *)&mandTLV[2];
+      }
+      else
+      {
+         return 0;
+      }
+   }
+   else
+   {
+      return -ENOMSG;
+   }
+}      
+
+/*===========================================================================
+METHOD:
+   GetQMIMessageID (Public Method)
+
+DESCRIPTION:
+   Get the message ID of a QMI message
+   
+   QMI Message shall NOT include SDU
+
+PARAMETERS
+   pQMIMessage    [ I ] - QMI Message buffer
+   messageLen     [ I ] - Size of QMI Message buffer
+
+RETURN VALUE:
+   int - Positive for message ID
+         Negative errno for error
+===========================================================================*/
+int GetQMIMessageID(
+   void *   pQMIMessage,
+   u16      messageLen )
+{
+   if (messageLen < 2)
+   {
+      return -ENODATA;
+   }
+   else
+   {
+      return *(u16 *)pQMIMessage;
+   }
+}
+
+/*=========================================================================*/
+// Fill Buffers with QMI requests
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   QMICTLGetClientIDReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI CTL Get Client ID Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+   serviceType     [ I ] - Service type requested
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMICTLGetClientIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID,
+   u8       serviceType )
+{
+   if (pBuffer == 0 || buffSize < QMICTLGetClientIDReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI CTL GET CLIENT ID
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))= 0x00;
+   // Transaction ID
+   *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 2) = 0x0022;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 4) = 0x0004;
+      // QMI Service Type
+      *(u8 *)(pBuffer + sizeof( sQMUX ) + 6)  = 0x01;
+      // Size
+      *(u16 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x0001;
+      // QMI svc type
+      *(u8 *)(pBuffer + sizeof( sQMUX ) + 9)  = serviceType;
+
+  // success
+  return sizeof( sQMUX ) + 10;
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLReleaseClientIDReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI CTL Release Client ID Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+   clientID        [ I ] - Service type requested
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMICTLReleaseClientIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID,
+   u16      clientID )
+{
+   if (pBuffer == 0 || buffSize < QMICTLReleaseClientIDReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI CTL RELEASE CLIENT ID REQ
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))  = 0x00;
+   // Transaction ID
+   *(u8 *)(pBuffer + sizeof( sQMUX ) + 1 ) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 2) = 0x0023;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 4) = 0x0005;
+      // Release client ID
+      *(u8 *)(pBuffer + sizeof( sQMUX ) + 6)  = 0x01;
+      // Size
+      *(u16 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x0002;
+      // QMI svs type / Client ID
+      *(u16 *)(pBuffer + sizeof( sQMUX ) + 9)  = clientID;
+      
+  // success
+  return sizeof( sQMUX ) + 11;
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLReadyReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI CTL Get Version Info Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMICTLReadyReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID )
+{
+   if (pBuffer == 0 || buffSize < QMICTLReadyReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI CTL GET VERSION INFO REQ
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))  = 0x00;
+   // Transaction ID
+   *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 2) = 0x0021;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 4) = 0x0000;
+
+  // success
+  return sizeof( sQMUX ) + 6;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSSetEventReportReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI WDS Set Event Report Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMIWDSSetEventReportReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID )
+{
+   if (pBuffer == 0 || buffSize < QMIWDSSetEventReportReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI WDS SET EVENT REPORT REQ
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))  = 0x00;
+   // Transaction ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 3) = 0x0001;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 5) = 0x0008;
+      // Report channel rate TLV
+      *(u8 *)(pBuffer + sizeof( sQMUX ) + 7)  = 0x11;
+      // Size
+      *(u16 *)(pBuffer + sizeof( sQMUX ) + 8) = 0x0005;
+      // Stats period
+      *(u8 *)(pBuffer + sizeof( sQMUX ) + 10)  = 0x01;
+      // Stats mask
+      *(u32 *)(pBuffer + sizeof( sQMUX ) + 11)  = 0x000000ff;
+
+  // success
+  return sizeof( sQMUX ) + 15;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSGetPKGSRVCStatusReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI WDS Get PKG SRVC Status Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMIWDSGetPKGSRVCStatusReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID )
+{
+   if (pBuffer == 0 || buffSize < QMIWDSGetPKGSRVCStatusReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI WDS Get PKG SRVC Status REQ
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))  = 0x00;
+   // Transaction ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 3) = 0x0022;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 5) = 0x0000;
+
+  // success
+  return sizeof( sQMUX ) + 7;
+}
+
+/*===========================================================================
+METHOD:
+   QMIDMSGetMEIDReq (Public Method)
+
+DESCRIPTION:
+   Fill buffer with QMI DMS Get Serial Numbers Request
+
+PARAMETERS
+   pBuffer         [ 0 ] - Buffer to be filled
+   buffSize        [ I ] - Size of pBuffer
+   transactionID   [ I ] - Transaction ID
+
+RETURN VALUE:
+   int - Positive for resulting size of pBuffer
+         Negative errno for error
+===========================================================================*/
+int QMIDMSGetMEIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID )
+{
+   if (pBuffer == 0 || buffSize < QMIDMSGetMEIDReqSize() )
+   {
+      return -ENOMEM;
+   }
+
+   // QMI DMS GET SERIAL NUMBERS REQ
+   // Request
+   *(u8 *)(pBuffer + sizeof( sQMUX ))  = 0x00;
+   // Transaction ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID;
+   // Message ID
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 3) = 0x0025;
+   // Size of TLV's
+   *(u16 *)(pBuffer + sizeof( sQMUX ) + 5) = 0x0000;
+
+  // success
+  return sizeof( sQMUX ) + 7;
+}
+
+/*=========================================================================*/
+// Parse data from QMI responses
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   QMICTLGetClientIDResp (Public Method)
+
+DESCRIPTION:
+   Parse the QMI CTL Get Client ID Resp
+
+PARAMETERS
+   pBuffer         [ I ] - Buffer to be parsed
+   buffSize        [ I ] - Size of pBuffer
+   pClientID       [ 0 ] - Recieved client ID
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QMICTLGetClientIDResp(
+   void * pBuffer,
+   u16    buffSize,
+   u16 *  pClientID )
+{
+   int result;
+   
+   // Ignore QMUX and SDU
+   //    QMI CTL SDU is 2 bytes, not 3
+   u8 offset = sizeof( sQMUX ) + 2;
+
+   if (pBuffer == 0 || buffSize < offset )
+   {
+      return -ENOMEM;
+   }
+
+   pBuffer = pBuffer + offset;
+   buffSize -= offset;
+
+   result = GetQMIMessageID( pBuffer, buffSize );
+   if (result != 0x22)
+   {
+      return -EFAULT;
+   }
+
+   result = ValidQMIMessage( pBuffer, buffSize );
+   if (result != 0)
+   {
+      return -EFAULT;
+   }
+
+   result = GetTLV( pBuffer, buffSize, 0x01, pClientID, 2 );
+   if (result != 2)
+   {
+      return -EFAULT;
+   }
+
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   QMICTLReleaseClientIDResp (Public Method)
+
+DESCRIPTION:
+   Verify the QMI CTL Release Client ID Resp is valid
+
+PARAMETERS
+   pBuffer         [ I ] - Buffer to be parsed
+   buffSize        [ I ] - Size of pBuffer
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QMICTLReleaseClientIDResp(
+   void *   pBuffer,
+   u16      buffSize )
+{
+   int result;
+   
+   // Ignore QMUX and SDU
+   //    QMI CTL SDU is 2 bytes, not 3
+   u8 offset = sizeof( sQMUX ) + 2;
+
+   if (pBuffer == 0 || buffSize < offset )
+   {
+      return -ENOMEM;
+   }
+
+   pBuffer = pBuffer + offset;
+   buffSize -= offset;
+
+   result = GetQMIMessageID( pBuffer, buffSize );
+   if (result != 0x23)
+   {
+      return -EFAULT;
+   }
+
+   result = ValidQMIMessage( pBuffer, buffSize );
+   if (result != 0)
+   {
+      return -EFAULT;
+   }
+
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSEventResp (Public Method)
+
+DESCRIPTION:
+   Parse the QMI WDS Set Event Report Resp/Indication or
+      QMI WDS Get PKG SRVC Status Resp/Indication
+
+   Return parameters will only be updated if value was received
+
+PARAMETERS
+   pBuffer         [ I ] - Buffer to be parsed
+   buffSize        [ I ] - Size of pBuffer
+   pTXOk           [ O ] - Number of transmitted packets without errors
+   pRXOk           [ O ] - Number of recieved packets without errors
+   pTXErr          [ O ] - Number of transmitted packets with framing errors
+   pRXErr          [ O ] - Number of recieved packets with framing errors
+   pTXOfl          [ O ] - Number of transmitted packets dropped due to overflow
+   pRXOfl          [ O ] - Number of recieved packets dropped due to overflow
+   pTXBytesOk      [ O ] - Number of transmitted bytes without errors
+   pRXBytesOk      [ O ] - Number of recieved bytes without errors
+   pbLinkState     [ 0 ] - Is the link active?
+   pbReconfigure   [ 0 ] - Must interface be reconfigured? (reset IP address)
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QMIWDSEventResp(
+   void *   pBuffer,
+   u16      buffSize,
+   u32 *    pTXOk,
+   u32 *    pRXOk,
+   u32 *    pTXErr,
+   u32 *    pRXErr,
+   u32 *    pTXOfl,
+   u32 *    pRXOfl,
+   u64 *    pTXBytesOk,
+   u64 *    pRXBytesOk,
+   bool *   pbLinkState,
+   bool *   pbReconfigure )
+{
+   int result;
+   u8 pktStatusRead[2];
+
+   // Ignore QMUX and SDU
+   u8 offset = sizeof( sQMUX ) + 3;
+
+   if (pBuffer == 0 
+   || buffSize < offset
+   || pTXOk == 0
+   || pRXOk == 0
+   || pTXErr == 0
+   || pRXErr == 0
+   || pTXOfl == 0
+   || pRXOfl == 0
+   || pTXBytesOk == 0
+   || pRXBytesOk == 0
+   || pbLinkState == 0
+   || pbReconfigure == 0 )
+   {
+      return -ENOMEM;
+   }
+
+   pBuffer = pBuffer + offset;
+   buffSize -= offset;
+
+   // Note: Indications.  No Mandatory TLV required
+
+   result = GetQMIMessageID( pBuffer, buffSize );
+   // QMI WDS Set Event Report Resp
+   if (result == 0x01)
+   {
+      // TLV's are not mandatory
+      GetTLV( pBuffer, buffSize, 0x10, (void*)pTXOk, 4 );
+      GetTLV( pBuffer, buffSize, 0x11, (void*)pRXOk, 4 );
+      GetTLV( pBuffer, buffSize, 0x12, (void*)pTXErr, 4 );
+      GetTLV( pBuffer, buffSize, 0x13, (void*)pRXErr, 4 );
+      GetTLV( pBuffer, buffSize, 0x14, (void*)pTXOfl, 4 );
+      GetTLV( pBuffer, buffSize, 0x15, (void*)pRXOfl, 4 );
+      GetTLV( pBuffer, buffSize, 0x19, (void*)pTXBytesOk, 8 );
+      GetTLV( pBuffer, buffSize, 0x1A, (void*)pRXBytesOk, 8 );
+   }
+   // QMI WDS Get PKG SRVC Status Resp
+   else if (result == 0x22)
+   {
+      result = GetTLV( pBuffer, buffSize, 0x01, &pktStatusRead[0], 2 );
+      // 1 or 2 bytes may be received
+      if (result >= 1)
+      {
+         if (pktStatusRead[0] == 0x02)
+         {
+            *pbLinkState = true;
+         }
+         else
+         {
+            *pbLinkState = false;
+         }
+      }
+      if (result == 2)
+      {
+         if (pktStatusRead[1] == 0x01)
+         {
+            *pbReconfigure = true;
+         }
+         else
+         {
+            *pbReconfigure = false;
+         }
+      }
+      
+      if (result < 0)
+      {
+         return result;
+      }
+   }
+   else
+   {
+      return -EFAULT;
+   }
+
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   QMIDMSGetMEIDResp (Public Method)
+
+DESCRIPTION:
+   Parse the QMI DMS Get Serial Numbers Resp
+
+PARAMETERS
+   pBuffer         [ I ] - Buffer to be parsed
+   buffSize        [ I ] - Size of pBuffer
+   pMEID           [ O ] - Device MEID
+   meidSize        [ I ] - Size of MEID buffer (at least 14)
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for error
+===========================================================================*/
+int QMIDMSGetMEIDResp(
+   void *   pBuffer,
+   u16      buffSize,
+   char *   pMEID,
+   int      meidSize )
+{
+   int result;
+
+   // Ignore QMUX and SDU
+   u8 offset = sizeof( sQMUX ) + 3;
+
+   if (pBuffer == 0 || buffSize < offset || meidSize < 14 )
+   {
+      return -ENOMEM;
+   }
+
+   pBuffer = pBuffer + offset;
+   buffSize -= offset;
+
+   result = GetQMIMessageID( pBuffer, buffSize );
+   if (result != 0x25)
+   {
+      return -EFAULT;
+   }
+
+   result = ValidQMIMessage( pBuffer, buffSize );
+   if (result != 0)
+   {
+      return -EFAULT;
+   }
+
+   result = GetTLV( pBuffer, buffSize, 0x12, (void*)pMEID, 14 );
+   if (result != 14)
+   {
+      return -EFAULT;
+   }
+
+   return 0;
+}
+
diff --git a/drivers/staging/gobi/QCUSBNet2k/QMI.h b/drivers/staging/gobi/QCUSBNet2k/QMI.h
new file mode 100644
index 0000000..4da1285
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/QMI.h
@@ -0,0 +1,251 @@
+/*===========================================================================
+FILE:
+   QMI.h
+
+DESCRIPTION:
+   Qualcomm QMI driver header
+   
+FUNCTIONS:
+   Generic QMUX functions
+      ParseQMUX
+      FillQMUX
+   
+   Generic QMI functions
+      GetTLV
+      ValidQMIMessage
+      GetQMIMessageID
+
+   Get sizes of buffers needed by QMI requests
+      QMUXHeaderSize
+      QMICTLGetClientIDReqSize
+      QMICTLReleaseClientIDReqSize
+      QMICTLReadyReqSize
+      QMIWDSSetEventReportReqSize
+      QMIWDSGetPKGSRVCStatusReqSize
+      QMIDMSGetMEIDReqSize
+
+   Fill Buffers with QMI requests
+      QMICTLGetClientIDReq
+      QMICTLReleaseClientIDReq
+      QMICTLReadyReq
+      QMIWDSSetEventReportReq
+      QMIWDSGetPKGSRVCStatusReq
+      QMIDMSGetMEIDReq
+      
+   Parse data from QMI responses
+      QMICTLGetClientIDResp
+      QMICTLReleaseClientIDResp
+      QMIWDSEventResp
+      QMIDMSGetMEIDResp
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+#pragma once
+
+/*=========================================================================*/
+// Definitions
+/*=========================================================================*/
+
+// QMI Service Types
+#define QMICTL 0
+#define QMIWDS 1
+#define QMIDMS 2
+
+#define u8        unsigned char
+#define u16       unsigned short
+#define u32       unsigned int
+#define u64       unsigned long long
+
+#define bool      u8
+#define true      1
+#define false     0
+
+#define ENOMEM    12
+#define EFAULT    14
+#define EINVAL    22
+#define ENOMSG    42
+#define ENODATA   61
+
+/*=========================================================================*/
+// Struct sQMUX
+//
+//    Structure that defines a QMUX header
+/*=========================================================================*/
+typedef struct sQMUX
+{
+   /* T\F, always 1 */
+   u8         mTF;
+
+   /* Size of message */
+   u16        mLength;
+
+   /* Control flag */
+   u8         mCtrlFlag;
+   
+   /* Service Type */
+   u8         mQMIService;
+   
+   /* Client ID */
+   u8         mQMIClientID;
+
+}__attribute__((__packed__)) sQMUX;
+
+/*=========================================================================*/
+// Generic QMUX functions
+/*=========================================================================*/
+
+// Remove QMUX headers from a buffer
+int ParseQMUX(
+   u16 *    pClientID,
+   void *   pBuffer,
+   u16      buffSize );
+
+// Fill buffer with QMUX headers
+int FillQMUX(
+   u16      clientID,
+   void *   pBuffer,
+   u16      buffSize );
+
+/*=========================================================================*/
+// Generic QMI functions
+/*=========================================================================*/
+
+// Get data bufffer of a specified TLV from a QMI message
+u16 GetTLV(
+   void *   pQMIMessage,
+   u16      messageLen,
+   u8       type,
+   void *   pOutDataBuf,
+   u16      bufferLen );
+
+// Check mandatory TLV in a QMI message
+int ValidQMIMessage(
+   void *   pQMIMessage,
+   u16      messageLen );
+
+// Get the message ID of a QMI message
+int GetQMIMessageID(
+   void *   pQMIMessage,
+   u16      messageLen );
+
+/*=========================================================================*/
+// Get sizes of buffers needed by QMI requests
+/*=========================================================================*/
+
+// Get size of buffer needed for QMUX
+u16 QMUXHeaderSize( void );
+
+// Get size of buffer needed for QMUX + QMICTLGetClientIDReq
+u16 QMICTLGetClientIDReqSize( void );
+
+// Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq
+u16 QMICTLReleaseClientIDReqSize( void );
+
+// Get size of buffer needed for QMUX + QMICTLReadyReq
+u16 QMICTLReadyReqSize( void );
+
+// Get size of buffer needed for QMUX + QMIWDSSetEventReportReq
+u16 QMIWDSSetEventReportReqSize( void );
+
+// Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq
+u16 QMIWDSGetPKGSRVCStatusReqSize( void );
+
+// Get size of buffer needed for QMUX + QMIDMSGetMEIDReq
+u16 QMIDMSGetMEIDReqSize( void );
+
+/*=========================================================================*/
+// Fill Buffers with QMI requests
+/*=========================================================================*/
+
+// Fill buffer with QMI CTL Get Client ID Request
+int QMICTLGetClientIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID,
+   u8       serviceType );
+
+// Fill buffer with QMI CTL Release Client ID Request
+int QMICTLReleaseClientIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID,
+   u16      clientID );
+
+// Fill buffer with QMI CTL Get Version Info Request
+int QMICTLReadyReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u8       transactionID );
+
+// Fill buffer with QMI WDS Set Event Report Request
+int QMIWDSSetEventReportReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID );
+
+// Fill buffer with QMI WDS Get PKG SRVC Status Request
+int QMIWDSGetPKGSRVCStatusReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID );
+
+// Fill buffer with QMI DMS Get Serial Numbers Request
+int QMIDMSGetMEIDReq(
+   void *   pBuffer,
+   u16      buffSize,
+   u16      transactionID );
+
+/*=========================================================================*/
+// Parse data from QMI responses
+/*=========================================================================*/
+
+// Parse the QMI CTL Get Client ID Resp
+int QMICTLGetClientIDResp(
+   void * pBuffer,
+   u16    buffSize,
+   u16 *  pClientID );
+
+// Verify the QMI CTL Release Client ID Resp is valid
+int QMICTLReleaseClientIDResp(
+   void *   pBuffer,
+   u16      buffSize );
+
+// Parse the QMI WDS Set Event Report Resp/Indication or
+//    QMI WDS Get PKG SRVC Status Resp/Indication
+int QMIWDSEventResp(
+   void *   pBuffer,
+   u16      buffSize,
+   u32 *    pTXOk,
+   u32 *    pRXOk,
+   u32 *    pTXErr,
+   u32 *    pRXErr,
+   u32 *    pTXOfl,
+   u32 *    pRXOfl,
+   u64 *    pTXBytesOk,
+   u64 *    pRXBytesOk,
+   bool *   pbLinkState,
+   bool *   pbReconfigure );
+
+// Parse the QMI DMS Get Serial Numbers Resp
+int QMIDMSGetMEIDResp(
+   void *   pBuffer,
+   u16      buffSize,
+   char *   pMEID,
+   int      meidSize );
+
diff --git a/drivers/staging/gobi/QCUSBNet2k/QMIDevice.c b/drivers/staging/gobi/QCUSBNet2k/QMIDevice.c
new file mode 100644
index 0000000..668328c
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/QMIDevice.c
@@ -0,0 +1,3129 @@
+/*===========================================================================
+FILE:
+   QMIDevice.c
+
+DESCRIPTION:
+   Functions related to the QMI interface device
+   
+FUNCTIONS:
+   Generic functions
+      IsDeviceValid
+      PrintHex
+      QSetDownReason
+      QClearDownReason
+      QTestDownReason
+
+   Driver level asynchronous read functions
+      ReadCallback
+      IntCallback
+      StartRead
+      KillRead
+
+   Internal read/write functions
+      ReadAsync
+      UpSem
+      ReadSync
+      WriteSyncCallback
+      WriteSync
+
+   Internal memory management functions
+      GetClientID
+      ReleaseClientID
+      FindClientMem
+      AddToReadMemList
+      PopFromReadMemList
+      AddToNotifyList
+      NotifyAndPopNotifyList
+      AddToURBList
+      PopFromURBList
+
+   Userspace wrappers
+      UserspaceOpen
+      UserspaceIOCTL
+      UserspaceClose
+      UserspaceRead
+      UserspaceWrite
+
+   Initializer and destructor
+      RegisterQMIDevice
+      DeregisterQMIDevice
+
+   Driver level client management
+      QMIReady
+      QMIWDSCallback
+      SetupQMIWDSCallback
+      QMIDMSGetMEID
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+//---------------------------------------------------------------------------
+// Include Files
+//---------------------------------------------------------------------------
+#include "QMIDevice.h"
+
+//-----------------------------------------------------------------------------
+// Definitions
+//-----------------------------------------------------------------------------
+
+extern int debug;
+
+// Prototype to QCSuspend function
+int QCSuspend( 
+   struct usb_interface *     pIntf,
+   pm_message_t               powerEvent );
+
+// IOCTL to generate a client ID for this service type
+#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1
+
+// IOCTL to get the VIDPID of the device
+#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2
+
+// IOCTL to get the MEID of the device
+#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3
+
+// CDC GET_ENCAPSULATED_RESPONSE packet
+#define CDC_GET_ENCAPSULATED_RESPONSE 0x01A1ll
+
+// CDC CONNECTION_SPEED_CHANGE indication packet
+#define CDC_CONNECTION_SPEED_CHANGE 0x08000000002AA1ll
+
+/*=========================================================================*/
+// UserspaceQMIFops
+//    QMI device's userspace file operations
+/*=========================================================================*/
+struct file_operations UserspaceQMIFops = 
+{
+   .owner     = THIS_MODULE,
+   .read      = UserspaceRead,
+   .write     = UserspaceWrite,
+   .ioctl     = UserspaceIOCTL,
+   .open      = UserspaceOpen,
+   .flush     = UserspaceClose,
+};
+
+/*=========================================================================*/
+// Generic functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   IsDeviceValid (Public Method)
+
+DESCRIPTION:
+   Basic test to see if device memory is valid
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool IsDeviceValid( sQCUSBNet * pDev )
+{
+   if (pDev == NULL)
+   {
+      return false;
+   }
+
+   if (pDev->mbQMIValid == false)
+   {
+      return false;
+   }
+   
+   return true;
+} 
+
+/*===========================================================================
+METHOD:
+   PrintHex (Public Method)
+
+DESCRIPTION:
+   Print Hex data, for debug purposes
+
+PARAMETERS:
+   pBuffer       [ I ] - Data buffer
+   bufSize       [ I ] - Size of data buffer
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void PrintHex(
+   void *      pBuffer,
+   u16         bufSize )
+{
+   char * pPrintBuf;
+   u16 pos;
+   int status;
+   
+   pPrintBuf = kmalloc( bufSize * 3 + 1, GFP_ATOMIC );
+   if (pPrintBuf == NULL)
+   {
+      DBG( "Unable to allocate buffer\n" );
+      return;
+   }
+   memset( pPrintBuf, 0 , bufSize * 3 + 1 );
+   
+   for (pos = 0; pos < bufSize; pos++)
+   {
+      status = snprintf( (pPrintBuf + (pos * 3)), 
+                         4, 
+                         "%02X ", 
+                         *(u8 *)(pBuffer + pos) );
+      if (status != 3)
+      {
+         DBG( "snprintf error %d\n", status );
+         return;
+      }
+   }
+   
+   DBG( "   : %s\n", pPrintBuf );
+
+   kfree( pPrintBuf );
+   pPrintBuf = NULL;
+   return;   
+}
+
+/*===========================================================================
+METHOD:
+   QSetDownReason (Public Method)
+
+DESCRIPTION:
+   Sets mDownReason and turns carrier off
+
+PARAMETERS
+   pDev     [ I ] - Device specific memory
+   reason   [ I ] - Reason device is down
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void QSetDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason )
+{
+   set_bit( reason, &pDev->mDownReason );
+   
+   netif_carrier_off( pDev->mpNetDev->net );
+}
+
+/*===========================================================================
+METHOD:
+   QClearDownReason (Public Method)
+
+DESCRIPTION:
+   Clear mDownReason and may turn carrier on
+
+PARAMETERS
+   pDev     [ I ] - Device specific memory
+   reason   [ I ] - Reason device is no longer down
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void QClearDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason )
+{
+   clear_bit( reason, &pDev->mDownReason );
+   
+   if (pDev->mDownReason == 0)
+   {
+      netif_carrier_on( pDev->mpNetDev->net );
+   }
+}
+
+/*===========================================================================
+METHOD:
+   QTestDownReason (Public Method)
+
+DESCRIPTION:
+   Test mDownReason and returns whether reason is set
+
+PARAMETERS
+   pDev     [ I ] - Device specific memory
+   reason   [ I ] - Reason device is down
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool QTestDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason )
+{
+   return test_bit( reason, &pDev->mDownReason );
+}
+
+/*=========================================================================*/
+// Driver level asynchronous read functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   ReadCallback (Public Method)
+
+DESCRIPTION:
+   Put the data in storage and notify anyone waiting for data
+
+PARAMETERS
+   pReadURB       [ I ] - URB this callback is run for
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void ReadCallback( struct urb * pReadURB )
+{
+   int result;
+   u16 clientID;
+   sClientMemList * pClientMem;
+   void * pData;
+   void * pDataCopy;
+   u16 dataSize;
+   sQCUSBNet * pDev;
+   unsigned long flags;
+   u16 transactionID;
+
+   if (pReadURB == NULL)
+   {
+      DBG( "bad read URB\n" );
+      return;
+   }
+   
+   pDev = pReadURB->context;
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return;
+   }   
+
+   if (pReadURB->status != 0)
+   {
+      DBG( "Read status = %d\n", pReadURB->status );
+      return;
+   }
+   DBG( "Read %d bytes\n", pReadURB->actual_length );
+   
+   pData = pReadURB->transfer_buffer;
+   dataSize = pReadURB->actual_length;
+
+   PrintHex( pData, dataSize );
+
+   result = ParseQMUX( &clientID,
+                       pData,
+                       dataSize );
+   if (result < 0)
+   {
+      DBG( "Read error parsing QMUX %d\n", result );
+      return;
+   }
+   
+   // Grab transaction ID
+
+   // Data large enough?
+   if (dataSize < result + 3)
+   {
+      DBG( "Data buffer too small to parse\n" );
+      return;
+   }
+   
+   // Transaction ID size is 1 for QMICTL, 2 for others
+   if (clientID == QMICTL)
+   {
+      transactionID = *(u8*)(pData + result + 1);
+   }
+   else
+   {
+      transactionID = *(u16*)(pData + result + 1);
+   }
+   
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Find memory storage for this service and Client ID
+   // Not using FindClientMem because it can't handle broadcasts
+   pClientMem = pDev->mQMIDev.mpClientMemList;
+   while (pClientMem != NULL)
+   {
+      if (pClientMem->mClientID == clientID 
+      ||  (pClientMem->mClientID | 0xff00) == clientID)
+      {
+         // Make copy of pData
+         pDataCopy = kmalloc( dataSize, GFP_ATOMIC );
+         memcpy( pDataCopy, pData, dataSize );
+
+         if (AddToReadMemList( pDev,
+                               pClientMem->mClientID,
+                               transactionID,
+                               pDataCopy,
+                               dataSize ) == false)
+         {
+            DBG( "Error allocating pReadMemListEntry "
+                 "read will be discarded\n" );
+            kfree( pDataCopy );
+            
+            // End critical section
+            spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+            return;
+         }
+
+         // Success
+         DBG( "Creating new readListEntry for client 0x%04X, TID %x\n",
+              clientID,
+              transactionID );
+
+         // Notify this client data exists
+         NotifyAndPopNotifyList( pDev,
+                                 pClientMem->mClientID,
+                                 transactionID );
+
+         // Not a broadcast
+         if (clientID >> 8 != 0xff)
+         {
+            break;
+         }
+      }
+      
+      // Next element
+      pClientMem = pClientMem->mpNext;
+   }
+   
+   // End critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+}
+
+/*===========================================================================
+METHOD:
+   IntCallback (Public Method)
+
+DESCRIPTION:
+   Data is available, fire off a read URB
+
+PARAMETERS
+   pIntURB       [ I ] - URB this callback is run for
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void IntCallback( struct urb * pIntURB )
+{
+   int status;
+   int interval;
+   
+   sQCUSBNet * pDev = (sQCUSBNet *)pIntURB->context;
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return;
+   }
+
+   // Verify this was a normal interrupt
+   if (pIntURB->status != 0)
+   {
+      DBG( "Int status = %d\n", pIntURB->status );
+      
+      // Ignore EOVERFLOW errors
+      if (pIntURB->status != -EOVERFLOW)
+      {
+         // Read 'thread' dies here
+         return;
+      }
+   }
+   else
+   {
+      // CDC GET_ENCAPSULATED_RESPONSE
+      if ((pIntURB->actual_length == 8)
+      &&  (*(u64*)pIntURB->transfer_buffer == CDC_GET_ENCAPSULATED_RESPONSE))
+      {
+         // Time to read
+         usb_fill_control_urb( pDev->mQMIDev.mpReadURB,
+                               pDev->mpNetDev->udev,
+                               usb_rcvctrlpipe( pDev->mpNetDev->udev, 0 ),
+                               (unsigned char *)pDev->mQMIDev.mpReadSetupPacket,
+                               pDev->mQMIDev.mpReadBuffer,
+                               DEFAULT_READ_URB_LENGTH,
+                               ReadCallback,
+                               pDev );
+         status = usb_submit_urb( pDev->mQMIDev.mpReadURB, GFP_ATOMIC );
+         if (status != 0)
+         {
+            DBG( "Error submitting Read URB %d\n", status );
+            return;
+         }
+      }
+      // CDC CONNECTION_SPEED_CHANGE
+      else if ((pIntURB->actual_length == 16)
+      &&       (*(u64*)pIntURB->transfer_buffer == CDC_CONNECTION_SPEED_CHANGE))
+      {
+         // if upstream or downstream is 0, stop traffic.  Otherwise resume it
+         if ((*(u32*)(pIntURB->transfer_buffer + 8) == 0)
+         ||  (*(u32*)(pIntURB->transfer_buffer + 12) == 0))
+         {
+            QSetDownReason( pDev, CDC_CONNECTION_SPEED );
+            DBG( "traffic stopping due to CONNECTION_SPEED_CHANGE\n" );
+         }
+         else
+         {
+            QClearDownReason( pDev, CDC_CONNECTION_SPEED );
+            DBG( "resuming traffic due to CONNECTION_SPEED_CHANGE\n" );
+         }
+      }
+      else
+      {
+         DBG( "ignoring invalid interrupt in packet\n" );
+         PrintHex( pIntURB->transfer_buffer, pIntURB->actual_length );
+      }
+   }
+
+   interval = (pDev->mpNetDev->udev->speed == USB_SPEED_HIGH) ? 7 : 3;
+
+   // Reschedule interrupt URB
+   usb_fill_int_urb( pIntURB,
+                     pIntURB->dev,
+                     pIntURB->pipe,
+                     pIntURB->transfer_buffer,
+                     pIntURB->transfer_buffer_length,
+                     pIntURB->complete,
+                     pIntURB->context,
+                     interval );
+   status = usb_submit_urb( pIntURB, GFP_ATOMIC );
+   if (status != 0)
+   {
+      DBG( "Error re-submitting Int URB %d\n", status );
+   }   
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   StartRead (Public Method)
+
+DESCRIPTION:
+   Start continuous read "thread" (callback driven)
+   
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   int - 0 for success
+         negative errno for failure
+===========================================================================*/
+int StartRead( sQCUSBNet * pDev )
+{
+   int interval;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+   
+   // Allocate URB buffers
+   pDev->mQMIDev.mpReadURB = usb_alloc_urb( 0, GFP_KERNEL );
+   if (pDev->mQMIDev.mpReadURB == NULL)
+   {
+      DBG( "Error allocating read urb\n" );
+      return -ENOMEM;
+   }
+   
+   pDev->mQMIDev.mpIntURB = usb_alloc_urb( 0, GFP_KERNEL );
+   if (pDev->mQMIDev.mpIntURB == NULL)
+   {
+      DBG( "Error allocating int urb\n" );
+      return -ENOMEM;
+   }
+
+   // Create data buffers
+   pDev->mQMIDev.mpReadBuffer = kmalloc( DEFAULT_READ_URB_LENGTH, GFP_KERNEL );
+   if (pDev->mQMIDev.mpReadBuffer == NULL)
+   {
+      DBG( "Error allocating read buffer\n" );
+      return -ENOMEM;
+   }
+   
+   pDev->mQMIDev.mpIntBuffer = kmalloc( DEFAULT_READ_URB_LENGTH, GFP_KERNEL );
+   if (pDev->mQMIDev.mpIntBuffer == NULL)
+   {
+      DBG( "Error allocating int buffer\n" );
+      return -ENOMEM;
+   }      
+   
+   pDev->mQMIDev.mpReadSetupPacket = kmalloc( sizeof( sURBSetupPacket ), 
+                                              GFP_KERNEL );
+   if (pDev->mQMIDev.mpReadSetupPacket == NULL)
+   {
+      DBG( "Error allocating setup packet buffer\n" );
+      return -ENOMEM;
+   }
+
+   // CDC Get Encapsulated Response packet
+   pDev->mQMIDev.mpReadSetupPacket->mRequestType = 0xA1;
+   pDev->mQMIDev.mpReadSetupPacket->mRequestCode = 1;
+   pDev->mQMIDev.mpReadSetupPacket->mValue = 0;
+   pDev->mQMIDev.mpReadSetupPacket->mIndex = 0;
+   pDev->mQMIDev.mpReadSetupPacket->mLength = DEFAULT_READ_URB_LENGTH;
+
+   interval = (pDev->mpNetDev->udev->speed == USB_SPEED_HIGH) ? 7 : 3;
+   
+   // Schedule interrupt URB
+   usb_fill_int_urb( pDev->mQMIDev.mpIntURB,
+                     pDev->mpNetDev->udev,
+                     usb_rcvintpipe( pDev->mpNetDev->udev, 0x81 ),
+                     pDev->mQMIDev.mpIntBuffer,
+                     DEFAULT_READ_URB_LENGTH,
+                     IntCallback,
+                     pDev,
+                     interval );
+   return usb_submit_urb( pDev->mQMIDev.mpIntURB, GFP_KERNEL );
+}
+
+/*===========================================================================
+METHOD:
+   KillRead (Public Method)
+
+DESCRIPTION:
+   Kill continuous read "thread"
+   
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void KillRead( sQCUSBNet * pDev )
+{
+   // Stop reading
+   if (pDev->mQMIDev.mpReadURB != NULL)
+   {
+      DBG( "Killng read URB\n" );
+      usb_kill_urb( pDev->mQMIDev.mpReadURB );
+   }
+
+   if (pDev->mQMIDev.mpIntURB != NULL)
+   {
+      DBG( "Killng int URB\n" );
+      usb_kill_urb( pDev->mQMIDev.mpIntURB );
+   }
+
+   // Release buffers
+   kfree( pDev->mQMIDev.mpReadSetupPacket );
+   pDev->mQMIDev.mpReadSetupPacket = NULL;
+   kfree( pDev->mQMIDev.mpReadBuffer );
+   pDev->mQMIDev.mpReadBuffer = NULL;
+   kfree( pDev->mQMIDev.mpIntBuffer );
+   pDev->mQMIDev.mpIntBuffer = NULL;
+   
+   // Release URB's
+   usb_free_urb( pDev->mQMIDev.mpReadURB );
+   pDev->mQMIDev.mpReadURB = NULL;
+   usb_free_urb( pDev->mQMIDev.mpIntURB );
+   pDev->mQMIDev.mpIntURB = NULL;
+}
+
+/*=========================================================================*/
+// Internal read/write functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   ReadAsync (Public Method)
+
+DESCRIPTION:
+   Start asynchronous read
+   NOTE: Reading client's data store, not device
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   transactionID     [ I ] - Transaction ID or 0 for any
+   pCallback         [ I ] - Callback to be executed when data is available
+   pData             [ I ] - Data buffer that willl be passed (unmodified) 
+                             to callback
+
+RETURN VALUE:
+   int - 0 for success
+         negative errno for failure
+===========================================================================*/
+int ReadAsync(
+   sQCUSBNet *    pDev,
+   u16            clientID,
+   u16            transactionID,
+   void           (*pCallback)(sQCUSBNet*, u16, void *),
+   void *         pData )
+{
+   sClientMemList * pClientMem;
+   sReadMemList ** ppReadMemList;
+   
+   unsigned long flags;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Find memory storage for this client ID
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find matching client ID 0x%04X\n",
+           clientID );
+           
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      return -ENXIO;
+   }
+   
+   ppReadMemList = &(pClientMem->mpList);
+   
+   // Does data already exist?
+   while (*ppReadMemList != NULL)
+   {
+      // Is this element our data?
+      if (transactionID == 0 
+      ||  transactionID == (*ppReadMemList)->mTransactionID)
+      {
+         // End critical section
+         spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+
+         // Run our own callback
+         pCallback( pDev, clientID, pData );
+         
+         return 0;
+      }
+      
+      // Next
+      ppReadMemList = &(*ppReadMemList)->mpNext;
+   }
+
+   // Data not found, add ourself to list of waiters
+   if (AddToNotifyList( pDev,
+                        clientID,
+                        transactionID, 
+                        pCallback, 
+                        pData ) == false)
+   {
+      DBG( "Unable to register for notification\n" );
+   }
+
+   // End critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Success
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   UpSem (Public Method)
+
+DESCRIPTION:
+   Notification function for synchronous read
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   pData             [ I ] - Buffer that holds semaphore to be up()-ed
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void UpSem( 
+   sQCUSBNet * pDev,
+   u16         clientID,
+   void *      pData )
+{
+   DBG( "0x%04X\n", clientID );
+        
+   up( (struct semaphore *)pData );
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   ReadSync (Public Method)
+
+DESCRIPTION:
+   Start synchronous read
+   NOTE: Reading client's data store, not device
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   ppOutBuffer       [I/O] - On success, will be filled with a 
+                             pointer to read buffer
+   clientID          [ I ] - Requester's client ID
+   transactionID     [ I ] - Transaction ID or 0 for any
+
+RETURN VALUE:
+   int - size of data read for success
+         negative errno for failure
+===========================================================================*/
+int ReadSync(
+   sQCUSBNet *    pDev,
+   void **        ppOutBuffer,
+   u16            clientID,
+   u16            transactionID )
+{
+   int result;
+   sClientMemList * pClientMem;
+   sNotifyList ** ppNotifyList, * pDelNotifyListEntry;
+   struct semaphore readSem;
+   void * pData;
+   unsigned long flags;
+   u16 dataSize;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+   
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Find memory storage for this Client ID
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find matching client ID 0x%04X\n",
+           clientID );
+      
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      return -ENXIO;
+   }
+   
+   // Note: in cases where read is interrupted, 
+   //    this will verify client is still valid
+   while (PopFromReadMemList( pDev,
+                              clientID,
+                              transactionID,
+                              &pData,
+                              &dataSize ) == false)
+   {
+      // Data does not yet exist, wait
+      sema_init( &readSem, 0 );
+
+      // Add ourself to list of waiters
+      if (AddToNotifyList( pDev, 
+                           clientID, 
+                           transactionID, 
+                           UpSem, 
+                           &readSem ) == false)
+      {
+         DBG( "unable to register for notification\n" );
+         spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+         return -EFAULT;
+      }
+
+      // End critical section while we block
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+
+      // Wait for notification
+      result = down_interruptible( &readSem );
+      if (result != 0)
+      {
+         DBG( "Interrupted %d\n", result );
+
+         // readSem will fall out of scope, 
+         // remove from notify list so it's not referenced
+         spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+         ppNotifyList = &(pClientMem->mpReadNotifyList);
+         pDelNotifyListEntry = NULL;
+
+         // Find and delete matching entry
+         while (*ppNotifyList != NULL)
+         {
+            if ((*ppNotifyList)->mpData == &readSem)
+            {
+               pDelNotifyListEntry = *ppNotifyList;
+               *ppNotifyList = (*ppNotifyList)->mpNext;
+               kfree( pDelNotifyListEntry );
+               break;
+            }
+
+            // Next
+            ppNotifyList = &(*ppNotifyList)->mpNext;
+         }
+
+         spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+         return -EINTR;
+      }
+      
+      // Verify device is still valid
+      if (IsDeviceValid( pDev ) == false)
+      {
+         DBG( "Invalid device!\n" );
+         return -ENXIO;
+      }
+      
+      // Restart critical section and continue loop
+      spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+   }
+   
+   // End Critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Success
+   *ppOutBuffer = pData;
+
+   return dataSize;
+}
+
+/*===========================================================================
+METHOD:
+   WriteSyncCallback (Public Method)
+
+DESCRIPTION:
+   Write callback
+
+PARAMETERS
+   pWriteURB       [ I ] - URB this callback is run for
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void WriteSyncCallback( struct urb * pWriteURB )
+{
+   if (pWriteURB == NULL)
+   {
+      DBG( "null urb\n" );
+      return;
+   }
+
+   DBG( "Write status/size %d/%d\n", 
+        pWriteURB->status, 
+        pWriteURB->actual_length );
+
+   // Notify that write has completed by up()-ing semeaphore
+   up( (struct semaphore * )pWriteURB->context );
+   
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   WriteSync (Public Method)
+
+DESCRIPTION:
+   Start synchronous write
+
+PARAMETERS:
+   pDev                 [ I ] - Device specific memory
+   pWriteBuffer         [ I ] - Data to be written
+   writeBufferSize      [ I ] - Size of data to be written
+   clientID             [ I ] - Client ID of requester
+
+RETURN VALUE:
+   int - write size (includes QMUX)
+         negative errno for failure
+===========================================================================*/
+int WriteSync(
+   sQCUSBNet *        pDev,
+   char *             pWriteBuffer,
+   int                writeBufferSize,
+   u16                clientID )
+{
+   int result;
+   struct semaphore writeSem;
+   struct urb * pWriteURB;
+   sURBSetupPacket writeSetup;
+   unsigned long flags;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+
+   pWriteURB = usb_alloc_urb( 0, GFP_KERNEL );
+   if (pWriteURB == NULL)
+   {
+      DBG( "URB mem error\n" );
+      return -ENOMEM;
+   }
+
+   // Fill writeBuffer with QMUX
+   result = FillQMUX( clientID, pWriteBuffer, writeBufferSize );
+   if (result < 0)
+   {
+      usb_free_urb( pWriteURB );
+      return result;
+   }
+
+   // CDC Send Encapsulated Request packet
+   writeSetup.mRequestType = 0x21;
+   writeSetup.mRequestCode = 0;
+   writeSetup.mValue = 0;
+   writeSetup.mIndex = 0;
+   writeSetup.mLength = 0;
+   writeSetup.mLength = writeBufferSize;
+
+   // Create URB   
+   usb_fill_control_urb( pWriteURB,
+                         pDev->mpNetDev->udev,
+                         usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ),
+                         (unsigned char *)&writeSetup,
+                         (void*)pWriteBuffer,
+                         writeBufferSize,
+                         NULL,
+                         pDev );
+
+   DBG( "Actual Write:\n" );
+   PrintHex( pWriteBuffer, writeBufferSize );
+
+   sema_init( &writeSem, 0 );
+   
+   pWriteURB->complete = WriteSyncCallback;
+   pWriteURB->context = &writeSem;
+   
+   // Wake device
+   result = usb_autopm_get_interface( pDev->mpIntf );
+   if (result < 0)
+   {
+      DBG( "unable to resume interface: %d\n", result );
+      
+      // Likely caused by device going from autosuspend -> full suspend
+      if (result == -EPERM)
+      {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
+         pDev->mpNetDev->udev->auto_pm = 0;
+#endif
+         QCSuspend( pDev->mpIntf, PMSG_SUSPEND );
+      }
+
+      return result;
+   }
+
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   if (AddToURBList( pDev, clientID, pWriteURB ) == false)
+   {
+      usb_free_urb( pWriteURB );
+
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );   
+      usb_autopm_put_interface( pDev->mpIntf );
+      return -EINVAL;
+   }
+
+   result = usb_submit_urb( pWriteURB, GFP_KERNEL );
+   if (result < 0)
+   {
+      DBG( "submit URB error %d\n", result );
+      
+      // Get URB back so we can destroy it
+      if (PopFromURBList( pDev, clientID ) != pWriteURB)
+      {
+         // This shouldn't happen
+         DBG( "Didn't get write URB back\n" );
+      }
+
+      usb_free_urb( pWriteURB );
+
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      usb_autopm_put_interface( pDev->mpIntf );
+      return result;
+   }
+   
+   // End critical section while we block
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );   
+
+   // Wait for write to finish
+   result = down_interruptible( &writeSem );
+
+   // Verify device is still valid
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+
+   // Write is done, release device
+   usb_autopm_put_interface( pDev->mpIntf );
+
+   // Restart critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Get URB back so we can destroy it
+   if (PopFromURBList( pDev, clientID ) != pWriteURB)
+   {
+      // This shouldn't happen
+      DBG( "Didn't get write URB back\n" );
+   
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );   
+      return -EINVAL;
+   }
+
+   // End critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );   
+
+   if (result == 0)
+   {
+      // Write is finished
+      if (pWriteURB->status == 0)
+      {
+         // Return number of bytes that were supposed to have been written,
+         //   not size of QMI request
+         result = writeBufferSize;
+      }
+      else
+      {
+         DBG( "bad status = %d\n", pWriteURB->status );
+         
+         // Return error value
+         result = pWriteURB->status;
+      }
+   }
+   else
+   {
+      // We have been forcibly interrupted
+      DBG( "Interrupted %d !!!\n", result );
+      DBG( "Device may be in bad state and need reset !!!\n" );
+
+      // URB has not finished
+      usb_kill_urb( pWriteURB );
+   }
+
+   usb_free_urb( pWriteURB );
+
+   return result;
+}
+
+/*=========================================================================*/
+// Internal memory management functions
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   GetClientID (Public Method)
+
+DESCRIPTION:
+   Construct object/load file into memory
+
+PARAMETERS:
+   pDev           [ I ] - Device specific memory
+   serviceType    [ I ] - Desired QMI service type
+
+RETURN VALUE:
+   int - Client ID for success (positive)
+         Negative errno for error
+===========================================================================*/
+int GetClientID( 
+   sQCUSBNet *    pDev,
+   u8             serviceType )
+{
+   u16 clientID;
+   sClientMemList ** ppClientMem;
+   int result;
+   void * pWriteBuffer;
+   u16 writeBufferSize;
+   void * pReadBuffer;
+   u16 readBufferSize;
+   unsigned long flags;
+   u8 transactionID;
+   
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device!\n" );
+      return -ENXIO;
+   }
+
+   // Run QMI request to be asigned a Client ID
+   if (serviceType != 0)
+   {
+      writeBufferSize = QMICTLGetClientIDReqSize();
+      pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+      if (pWriteBuffer == NULL)
+      {
+         return -ENOMEM;
+      }
+
+      transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+      if (transactionID == 0)
+      {
+         atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+      }
+      result = QMICTLGetClientIDReq( pWriteBuffer, 
+                                     writeBufferSize,
+                                     transactionID,
+                                     serviceType );
+      if (result < 0)
+      {
+         kfree( pWriteBuffer );
+         return result;
+      }
+      
+      result = WriteSync( pDev,
+                          pWriteBuffer,
+                          writeBufferSize,
+                          QMICTL );
+      kfree( pWriteBuffer );
+
+      if (result < 0)
+      {
+         return result;
+      }
+
+      result = ReadSync( pDev,
+                         &pReadBuffer,
+                         QMICTL,
+                         transactionID );
+      if (result < 0)
+      {
+         DBG( "bad read data %d\n", result );
+         return result;
+      }
+      readBufferSize = result;
+
+      result = QMICTLGetClientIDResp( pReadBuffer,
+                                      readBufferSize,
+                                      &clientID );
+      kfree( pReadBuffer );
+
+      if (result < 0)
+      {
+         return result;
+      }
+   }
+   else
+   {
+      // QMI CTL will always have client ID 0
+      clientID = 0;
+   }
+
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Verify client is not already allocated
+   if (FindClientMem( pDev, clientID ) != NULL)
+   {
+      DBG( "Client memory already exists\n" );
+
+      // End Critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      return -ETOOMANYREFS;
+   }
+
+   // Go to last entry in client mem list
+   ppClientMem = &pDev->mQMIDev.mpClientMemList;
+   while (*ppClientMem != NULL)
+   {
+      ppClientMem = &(*ppClientMem)->mpNext;
+   }
+   
+   // Create locations for read to place data into
+   *ppClientMem = kmalloc( sizeof( sClientMemList ), GFP_ATOMIC );
+   if (*ppClientMem == NULL)
+   {
+      DBG( "Error allocating read list\n" );
+
+      // End critical section
+      spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      return -ENOMEM;
+   }
+      
+   (*ppClientMem)->mClientID = clientID;
+   (*ppClientMem)->mpList = NULL;
+   (*ppClientMem)->mpReadNotifyList = NULL;
+   (*ppClientMem)->mpURBList = NULL;
+   (*ppClientMem)->mpNext = NULL;
+
+
+   // End Critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+   
+   return clientID;
+}
+
+/*===========================================================================
+METHOD:
+   ReleaseClientID (Public Method)
+
+DESCRIPTION:
+   Release QMI client and free memory
+
+PARAMETERS:
+   pDev           [ I ] - Device specific memory
+   clientID       [ I ] - Requester's client ID
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void ReleaseClientID(
+   sQCUSBNet *    pDev,
+   u16            clientID )
+{
+   int result;
+   sClientMemList ** ppDelClientMem;
+   sClientMemList * pNextClientMem;
+   struct urb * pDelURB;
+   void * pDelData;
+   u16 dataSize;
+   void * pWriteBuffer;
+   u16 writeBufferSize;
+   void * pReadBuffer;
+   u16 readBufferSize;
+   unsigned long flags;
+   u8 transactionID;
+
+   // Is device is still valid?
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "invalid device\n" );
+      return;
+   }
+   
+   DBG( "releasing 0x%04X\n", clientID );
+
+   // Run QMI ReleaseClientID if this isn't QMICTL   
+   if (clientID != QMICTL)
+   {
+      // Note: all errors are non fatal, as we always want to delete 
+      //    client memory in latter part of function
+      
+      writeBufferSize = QMICTLReleaseClientIDReqSize();
+      pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+      if (pWriteBuffer == NULL)
+      {
+         DBG( "memory error\n" );
+      }
+      else
+      {
+         transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+         if (transactionID == 0)
+         {
+            transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+         }
+         result = QMICTLReleaseClientIDReq( pWriteBuffer, 
+                                            writeBufferSize,
+                                            transactionID,
+                                            clientID );
+         if (result < 0)
+         {
+            kfree( pWriteBuffer );
+            DBG( "error %d filling req buffer\n", result );
+         }
+         else
+         {
+            result = WriteSync( pDev,
+                                pWriteBuffer,
+                                writeBufferSize,
+                                QMICTL );
+            kfree( pWriteBuffer );
+
+            if (result < 0)
+            {
+               DBG( "bad write status %d\n", result );
+            }
+            else
+            {
+               result = ReadSync( pDev,
+                                  &pReadBuffer,
+                                  QMICTL,
+                                  transactionID );
+               if (result < 0)
+               {
+                  DBG( "bad read status %d\n", result );
+               }
+               else
+               {
+                  readBufferSize = result;
+
+                  result = QMICTLReleaseClientIDResp( pReadBuffer,
+                                                      readBufferSize );
+                  kfree( pReadBuffer );
+
+                  if (result < 0)
+                  {
+                     DBG( "error %d parsing response\n", result );
+                  }
+               }
+            }
+         }
+      }
+   }
+
+   // Cleaning up client memory
+   
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+   // Can't use FindClientMem, I need to keep pointer of previous
+   ppDelClientMem = &pDev->mQMIDev.mpClientMemList;
+   while (*ppDelClientMem != NULL)
+   {
+      if ((*ppDelClientMem)->mClientID == clientID)
+      {
+         pNextClientMem = (*ppDelClientMem)->mpNext;
+
+         // Notify all clients
+         while (NotifyAndPopNotifyList( pDev,
+                                        clientID,
+                                        0 ) == true );         
+
+         // Kill and free all URB's
+         pDelURB = PopFromURBList( pDev, clientID );
+         while (pDelURB != NULL)
+         {
+            usb_kill_urb( pDelURB );
+            usb_free_urb( pDelURB );
+            pDelURB = PopFromURBList( pDev, clientID );
+         }
+
+         // Free any unread data
+         while (PopFromReadMemList( pDev, 
+                                    clientID,
+                                    0,
+                                    &pDelData,
+                                    &dataSize ) == true )
+         {
+            kfree( pDelData );
+         }
+
+         // Delete client Mem
+         kfree( *ppDelClientMem );
+
+         // Overwrite the pointer that was to this client mem
+         *ppDelClientMem = pNextClientMem;
+      }
+      else
+      {
+         // I now point to ( a pointer of ((the node I was at)'s mpNext))
+         ppDelClientMem = &(*ppDelClientMem)->mpNext;
+      }
+   }
+   
+   // End Critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   FindClientMem (Public Method)
+
+DESCRIPTION:
+   Find this client's memory
+
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev           [ I ] - Device specific memory
+   clientID       [ I ] - Requester's client ID
+
+RETURN VALUE:
+   sClientMemList - Pointer to requested sClientMemList for success
+                    NULL for error
+===========================================================================*/
+sClientMemList * FindClientMem( 
+   sQCUSBNet *      pDev,
+   u16              clientID )
+{
+   sClientMemList * pClientMem;
+   
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return NULL;
+   }
+   
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+   
+   pClientMem = pDev->mQMIDev.mpClientMemList;
+   while (pClientMem != NULL)
+   {
+      if (pClientMem->mClientID == clientID)
+      {
+         // Success
+         //DBG( "Found client mem %p\n", pClientMem );
+         return pClientMem;
+      }
+      
+      pClientMem = pClientMem->mpNext;
+   }
+
+   DBG( "Could not find client mem 0x%04X\n", clientID );
+   return NULL;
+}
+
+/*===========================================================================
+METHOD:
+   AddToReadMemList (Public Method)
+
+DESCRIPTION:
+   Add Data to this client's ReadMem list
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev           [ I ] - Device specific memory
+   clientID       [ I ] - Requester's client ID
+   transactionID  [ I ] - Transaction ID or 0 for any
+   pData          [ I ] - Data to add
+   dataSize       [ I ] - Size of data to add
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool AddToReadMemList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void *           pData,
+   u16              dataSize )
+{
+   sClientMemList * pClientMem;
+   sReadMemList ** ppThisReadMemList;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n",
+           clientID );
+
+      return false;
+   }
+
+   // Go to last ReadMemList entry
+   ppThisReadMemList = &pClientMem->mpList;
+   while (*ppThisReadMemList != NULL)
+   {
+      ppThisReadMemList = &(*ppThisReadMemList)->mpNext;
+   }
+   
+   *ppThisReadMemList = kmalloc( sizeof( sReadMemList ), GFP_ATOMIC );
+   if (*ppThisReadMemList == NULL)
+   {
+      DBG( "Mem error\n" );
+
+      return false;
+   }   
+   
+   (*ppThisReadMemList)->mpNext = NULL;
+   (*ppThisReadMemList)->mpData = pData;
+   (*ppThisReadMemList)->mDataSize = dataSize;
+   (*ppThisReadMemList)->mTransactionID = transactionID;
+   
+   return true;
+}
+
+/*===========================================================================
+METHOD:
+   PopFromReadMemList (Public Method)
+
+DESCRIPTION:
+   Remove data from this client's ReadMem list if it matches 
+   the specified transaction ID.
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   transactionID     [ I ] - Transaction ID or 0 for any
+   ppData            [I/O] - On success, will be filled with a 
+                             pointer to read buffer
+   pDataSize         [I/O] - On succces, will be filled with the 
+                             read buffer's size
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool PopFromReadMemList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void **          ppData,
+   u16 *            pDataSize )
+{
+   sClientMemList * pClientMem;
+   sReadMemList * pDelReadMemList, ** ppReadMemList;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n",
+           clientID );
+
+      return false;
+   }
+   
+   ppReadMemList = &(pClientMem->mpList);
+   pDelReadMemList = NULL;
+   
+   // Find first message that matches this transaction ID
+   while (*ppReadMemList != NULL)
+   {
+      // Do we care about transaction ID?
+      if (transactionID == 0
+      ||  transactionID == (*ppReadMemList)->mTransactionID )
+      {
+         pDelReadMemList = *ppReadMemList;
+         break;
+      }
+      
+      DBG( "skipping 0x%04X data TID = %x\n", clientID, (*ppReadMemList)->mTransactionID );
+      
+      // Next
+      ppReadMemList = &(*ppReadMemList)->mpNext;
+   }
+   
+   if (pDelReadMemList != NULL)
+   {
+      *ppReadMemList = (*ppReadMemList)->mpNext;
+      
+      // Copy to output
+      *ppData = pDelReadMemList->mpData;
+      *pDataSize = pDelReadMemList->mDataSize;
+      
+      // Free memory
+      kfree( pDelReadMemList );
+      
+      return true;
+   }
+   else
+   {
+      DBG( "No read memory to pop, Client 0x%04X, TID = %x\n", 
+           clientID, 
+           transactionID );
+      return false;
+   }
+}
+
+/*===========================================================================
+METHOD:
+   AddToNotifyList (Public Method)
+
+DESCRIPTION:
+   Add Notify entry to this client's notify List
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   transactionID     [ I ] - Transaction ID or 0 for any
+   pNotifyFunct      [ I ] - Callback function to be run when data is available
+   pData             [ I ] - Data buffer that willl be passed (unmodified) 
+                             to callback
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool AddToNotifyList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void             (* pNotifyFunct)(sQCUSBNet *, u16, void *),
+   void *           pData )
+{
+   sClientMemList * pClientMem;
+   sNotifyList ** ppThisNotifyList;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n", clientID );
+      return false;
+   }
+
+   // Go to last URBList entry
+   ppThisNotifyList = &pClientMem->mpReadNotifyList;
+   while (*ppThisNotifyList != NULL)
+   {
+      ppThisNotifyList = &(*ppThisNotifyList)->mpNext;
+   }
+   
+   *ppThisNotifyList = kmalloc( sizeof( sNotifyList ), GFP_ATOMIC );
+   if (*ppThisNotifyList == NULL)
+   {
+      DBG( "Mem error\n" );
+      return false;
+   }   
+   
+   (*ppThisNotifyList)->mpNext = NULL;
+   (*ppThisNotifyList)->mpNotifyFunct = pNotifyFunct;
+   (*ppThisNotifyList)->mpData = pData;
+   (*ppThisNotifyList)->mTransactionID = transactionID;
+   
+   return true;
+}
+
+/*===========================================================================
+METHOD:
+   NotifyAndPopNotifyList (Public Method)
+
+DESCRIPTION:
+   Remove first Notify entry from this client's notify list 
+   and Run function
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   transactionID     [ I ] - Transaction ID or 0 for any
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool NotifyAndPopNotifyList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID )
+{
+   sClientMemList * pClientMem;
+   sNotifyList * pDelNotifyList, ** ppNotifyList;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n", clientID );
+      return false;
+   }
+
+   ppNotifyList = &(pClientMem->mpReadNotifyList);
+   pDelNotifyList = NULL;
+
+   // Remove from list
+   while (*ppNotifyList != NULL)
+   {
+      // Do we care about transaction ID?
+      if (transactionID == 0
+      ||  (*ppNotifyList)->mTransactionID == 0
+      ||  transactionID == (*ppNotifyList)->mTransactionID)
+      {
+         pDelNotifyList = *ppNotifyList;
+         break;
+      }
+      
+      DBG( "skipping data TID = %x\n", (*ppNotifyList)->mTransactionID );
+      
+      // next
+      ppNotifyList = &(*ppNotifyList)->mpNext;
+   }
+   
+   if (pDelNotifyList != NULL)
+   {
+      // Remove element
+      *ppNotifyList = (*ppNotifyList)->mpNext;
+      
+      // Run notification function
+      if (pDelNotifyList->mpNotifyFunct != NULL)
+      {
+         // Unlock for callback
+         spin_unlock( &pDev->mQMIDev.mClientMemLock );
+      
+         pDelNotifyList->mpNotifyFunct( pDev,
+                                        clientID,
+                                        pDelNotifyList->mpData );
+                                        
+         // Restore lock
+         spin_lock( &pDev->mQMIDev.mClientMemLock );
+      }
+      
+      // Delete memory
+      kfree( pDelNotifyList );
+
+      return true;
+   }
+   else
+   {
+      DBG( "no one to notify for TID %x\n", transactionID );
+      
+      return false;
+   }
+}
+
+/*===========================================================================
+METHOD:
+   AddToURBList (Public Method)
+
+DESCRIPTION:
+   Add URB to this client's URB list
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev              [ I ] - Device specific memory
+   clientID          [ I ] - Requester's client ID
+   pURB              [ I ] - URB to be added
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool AddToURBList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   struct urb *     pURB )
+{
+   sClientMemList * pClientMem;
+   sURBList ** ppThisURBList;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n", clientID );
+      return false;
+   }
+
+   // Go to last URBList entry
+   ppThisURBList = &pClientMem->mpURBList;
+   while (*ppThisURBList != NULL)
+   {
+      ppThisURBList = &(*ppThisURBList)->mpNext;
+   }
+   
+   *ppThisURBList = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
+   if (*ppThisURBList == NULL)
+   {
+      DBG( "Mem error\n" );
+      return false;
+   }   
+   
+   (*ppThisURBList)->mpNext = NULL;
+   (*ppThisURBList)->mpURB = pURB;
+   
+   return true;
+}
+
+/*===========================================================================
+METHOD:
+   PopFromURBList (Public Method)
+
+DESCRIPTION:
+   Remove URB from this client's URB list
+   
+   Caller MUST have lock on mClientMemLock
+
+PARAMETERS:
+   pDev           [ I ] - Device specific memory
+   clientID       [ I ] - Requester's client ID
+
+RETURN VALUE:
+   struct urb - Pointer to requested client's URB
+                NULL for error
+===========================================================================*/
+struct urb * PopFromURBList( 
+   sQCUSBNet *      pDev,
+   u16              clientID )
+{
+   sClientMemList * pClientMem;
+   sURBList * pDelURBList;
+   struct urb * pURB;
+
+#ifdef CONFIG_SMP
+   // Verify Lock
+   if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
+   {
+      DBG( "unlocked\n" );
+      BUG();
+   }
+#endif
+
+   // Get this client's memory location
+   pClientMem = FindClientMem( pDev, clientID );
+   if (pClientMem == NULL)
+   {
+      DBG( "Could not find this client's memory 0x%04X\n", clientID );
+      return NULL;
+   }
+
+   // Remove from list
+   if (pClientMem->mpURBList != NULL)
+   {
+      pDelURBList = pClientMem->mpURBList;
+      pClientMem->mpURBList = pClientMem->mpURBList->mpNext;
+      
+      // Copy to output
+      pURB = pDelURBList->mpURB;
+      
+      // Delete memory
+      kfree( pDelURBList );
+
+      return pURB;
+   }
+   else
+   {
+      DBG( "No URB's to pop\n" );
+      
+      return NULL;
+   }
+}
+
+/*=========================================================================*/
+// Userspace wrappers
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   UserspaceOpen (Public Method)
+
+DESCRIPTION:
+   Userspace open
+      IOCTL must be called before reads or writes
+
+PARAMETERS
+   pInode       [ I ] - kernel file descriptor
+   pFilp        [ I ] - userspace file descriptor
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for failure
+===========================================================================*/
+int UserspaceOpen( 
+   struct inode *         pInode, 
+   struct file *          pFilp )
+{
+   sQMIFilpStorage * pFilpData;
+   
+   // Optain device pointer from pInode
+   sQMIDev * pQMIDev = container_of( pInode->i_cdev,
+                                     sQMIDev,
+                                     mCdev );
+   sQCUSBNet * pDev = container_of( pQMIDev,
+                                    sQCUSBNet,
+                                    mQMIDev );                                    
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return -ENXIO;
+   }
+
+   // Setup data in pFilp->private_data
+   pFilp->private_data = kmalloc( sizeof( sQMIFilpStorage ), GFP_KERNEL );
+   if (pFilp->private_data == NULL)
+   {
+      DBG( "Mem error\n" );
+      return -ENOMEM;
+   }
+   
+   pFilpData = (sQMIFilpStorage *)pFilp->private_data;
+   pFilpData->mClientID = (u16)-1;
+   pFilpData->mpDev = pDev;
+   
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   UserspaceIOCTL (Public Method)
+
+DESCRIPTION:
+   Userspace IOCTL functions
+
+PARAMETERS
+   pUnusedInode [ I ] - (unused) kernel file descriptor
+   pFilp        [ I ] - userspace file descriptor
+   cmd          [ I ] - IOCTL command
+   arg          [ I ] - IOCTL argument
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for failure
+===========================================================================*/
+int UserspaceIOCTL( 
+   struct inode *    pUnusedInode, 
+   struct file *     pFilp,
+   unsigned int      cmd, 
+   unsigned long     arg )
+{
+   int result;
+   u32 devVIDPID;
+   
+   sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
+
+   if (pFilpData == NULL)
+   {
+      DBG( "Bad file data\n" );
+      return -EBADF;
+   }
+   
+   if (IsDeviceValid( pFilpData->mpDev ) == false)
+   {
+      DBG( "Invalid device! Updating f_ops\n" );
+      pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
+      return -ENXIO;
+   }
+
+   switch (cmd)
+   {
+      case IOCTL_QMI_GET_SERVICE_FILE:
+      
+         DBG( "Setting up QMI for service %lu\n", arg );
+         if ((u8)arg == 0)
+         {
+            DBG( "Cannot use QMICTL from userspace\n" );
+            return -EINVAL;
+         }
+
+         // Connection is already setup
+         if (pFilpData->mClientID != (u16)-1)
+         {
+            DBG( "Close the current connection before opening a new one\n" );
+            return -EBADR;
+         }
+         
+         result = GetClientID( pFilpData->mpDev, (u8)arg );
+         if (result < 0)
+         {
+            return result;
+         }
+         pFilpData->mClientID = result;
+
+         return 0;
+         break;
+
+
+      case IOCTL_QMI_GET_DEVICE_VIDPID:
+         if (arg == 0)
+         {
+            DBG( "Bad VIDPID buffer\n" );
+            return -EINVAL;
+         }
+         
+         // Extra verification
+         if (pFilpData->mpDev->mpNetDev == 0)
+         {
+            DBG( "Bad mpNetDev\n" );
+            return -ENOMEM;
+         }
+         if (pFilpData->mpDev->mpNetDev->udev == 0)
+         {
+            DBG( "Bad udev\n" );
+            return -ENOMEM;
+         }
+
+         devVIDPID = ((le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idVendor ) << 16)
+                     + le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idProduct ) );
+
+         result = copy_to_user( (unsigned int *)arg, &devVIDPID, 4 );
+         if (result != 0)
+         {
+            DBG( "Copy to userspace failure\n" );
+         }
+
+         return result;
+                 
+         break;
+
+      case IOCTL_QMI_GET_DEVICE_MEID:
+         if (arg == 0)
+         {
+            DBG( "Bad MEID buffer\n" );
+            return -EINVAL;
+         }
+         
+         result = copy_to_user( (unsigned int *)arg, &pFilpData->mpDev->mMEID[0], 14 );
+         if (result != 0)
+         {
+            DBG( "copy to userspace failure\n" );
+         }
+
+         return result;
+                 
+         break;
+         
+      default:
+         return -EBADRQC;       
+   }
+}
+
+/*===========================================================================
+METHOD:
+   UserspaceClose (Public Method)
+
+DESCRIPTION:
+   Userspace close
+      Release client ID and free memory
+
+PARAMETERS
+   pFilp           [ I ] - userspace file descriptor
+   unusedFileTable [ I ] - (unused) file table
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for failure
+===========================================================================*/
+int UserspaceClose(
+   struct file *       pFilp,
+   fl_owner_t          unusedFileTable )
+{
+   sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
+   struct list_head * pTasks;
+   struct task_struct * pEachTask;
+   struct fdtable * pFDT;
+   int count = 0;
+   int used = 0;
+   unsigned long flags;
+
+   if (pFilpData == NULL)
+   {
+      DBG( "bad file data\n" );
+      return -EBADF;
+   }
+
+   // Fallthough.  If f_count == 1 no need to do more checks
+   if (atomic_read( &pFilp->f_count ) != 1)
+   {
+      // "group_leader" points to the main process' task, which resides in
+      // the global "tasks" list.
+      list_for_each( pTasks, &current->group_leader->tasks )
+      {
+         pEachTask = container_of( pTasks, struct task_struct, tasks );
+         if (pEachTask == NULL || pEachTask->files == NULL)
+         {
+            // Some tasks may not have files (e.g. Xsession)
+            continue;
+         }
+         spin_lock_irqsave( &pEachTask->files->file_lock, flags );
+         pFDT = files_fdtable( pEachTask->files );
+         for (count = 0; count < pFDT->max_fds; count++)
+         {
+            // Before this function was called, this file was removed
+            // from our task's file table so if we find it in a file
+            // table then it is being used by another task
+            if (pFDT->fd[count] == pFilp)
+            {
+               used++;
+               break;
+            }
+         }
+         spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
+      }
+      
+      if (used > 0)
+      {
+         DBG( "not closing, as this FD is open by %d other process\n", used );
+         return 0;
+      }
+   }
+
+   if (IsDeviceValid( pFilpData->mpDev ) == false)
+   {
+      DBG( "Invalid device! Updating f_ops\n" );
+      pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
+      return -ENXIO;
+   }
+   
+   DBG( "0x%04X\n", pFilpData->mClientID );
+   
+   // Disable pFilpData so they can't keep sending read or write 
+   //    should this function hang
+   // Note: memory pointer is still saved in pFilpData to be deleted later
+   pFilp->private_data = NULL;
+
+   if (pFilpData->mClientID != (u16)-1)
+   {
+      ReleaseClientID( pFilpData->mpDev,
+                       pFilpData->mClientID );
+   }
+      
+   kfree( pFilpData );
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   UserspaceRead (Public Method)
+
+DESCRIPTION:
+   Userspace read (synchronous)
+
+PARAMETERS
+   pFilp           [ I ] - userspace file descriptor
+   pBuf            [ I ] - read buffer
+   size            [ I ] - size of read buffer
+   pUnusedFpos     [ I ] - (unused) file position
+
+RETURN VALUE:
+   ssize_t - Number of bytes read for success
+             Negative errno for failure
+===========================================================================*/
+ssize_t UserspaceRead( 
+   struct file *          pFilp,
+   char __user *          pBuf, 
+   size_t                 size,
+   loff_t *               pUnusedFpos )
+{
+   int result;
+   void * pReadData = NULL;
+   void * pSmallReadData;
+   sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
+
+   if (pFilpData == NULL)
+   {
+      DBG( "Bad file data\n" );
+      return -EBADF;
+   }
+
+   if (IsDeviceValid( pFilpData->mpDev ) == false)
+   {
+      DBG( "Invalid device! Updating f_ops\n" );
+      pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
+      return -ENXIO;
+   }
+   
+   if (pFilpData->mClientID == (u16)-1)
+   {
+      DBG( "Client ID must be set before reading 0x%04X\n",
+           pFilpData->mClientID );
+      return -EBADR;
+   }
+   
+   // Perform synchronous read
+   result = ReadSync( pFilpData->mpDev,
+                      &pReadData,
+                      pFilpData->mClientID,
+                      0 );
+   if (result <= 0)
+   {
+      return result;
+   }
+   
+   // Discard QMUX header
+   result -= QMUXHeaderSize();
+   pSmallReadData = pReadData + QMUXHeaderSize();
+
+   if (result > size)
+   {
+      DBG( "Read data is too large for amount user has requested\n" );
+      kfree( pReadData );
+      return -EOVERFLOW;
+   }
+
+   if (copy_to_user( pBuf, pSmallReadData, result ) != 0)
+   {
+      DBG( "Error copying read data to user\n" );
+      result = -EFAULT;
+   }
+   
+   // Reader is responsible for freeing read buffer
+   kfree( pReadData );
+   
+   return result;
+}
+
+/*===========================================================================
+METHOD:
+   UserspaceWrite (Public Method)
+
+DESCRIPTION:
+   Userspace write (synchronous)
+
+PARAMETERS
+   pFilp           [ I ] - userspace file descriptor
+   pBuf            [ I ] - write buffer
+   size            [ I ] - size of write buffer
+   pUnusedFpos     [ I ] - (unused) file position
+
+RETURN VALUE:
+   ssize_t - Number of bytes read for success
+             Negative errno for failure
+===========================================================================*/
+ssize_t UserspaceWrite (
+   struct file *        pFilp, 
+   const char __user *  pBuf, 
+   size_t               size,
+   loff_t *             pUnusedFpos )
+{
+   int status;
+   void * pWriteBuffer;
+   sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
+
+   if (pFilpData == NULL)
+   {
+      DBG( "Bad file data\n" );
+      return -EBADF;
+   }
+
+   if (IsDeviceValid( pFilpData->mpDev ) == false)
+   {
+      DBG( "Invalid device! Updating f_ops\n" );
+      pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
+      return -ENXIO;
+   }
+
+   if (pFilpData->mClientID == (u16)-1)
+   {
+      DBG( "Client ID must be set before writing 0x%04X\n",
+           pFilpData->mClientID );
+      return -EBADR;
+   }
+   
+   // Copy data from user to kernel space
+   pWriteBuffer = kmalloc( size + QMUXHeaderSize(), GFP_KERNEL );
+   if (pWriteBuffer == NULL)
+   {
+      return -ENOMEM;
+   }
+   status = copy_from_user( pWriteBuffer + QMUXHeaderSize(), pBuf, size );
+   if (status != 0)
+   {
+      DBG( "Unable to copy data from userspace %d\n", status );
+      kfree( pWriteBuffer );
+      return status;
+   }
+
+   status = WriteSync( pFilpData->mpDev,
+                       pWriteBuffer, 
+                       size + QMUXHeaderSize(),
+                       pFilpData->mClientID );
+
+   kfree( pWriteBuffer );
+   
+   // On success, return requested size, not full QMI reqest size
+   if (status == size + QMUXHeaderSize())
+   {
+      return size;
+   }
+   else
+   {
+      return status;
+   }
+}
+
+/*=========================================================================*/
+// Initializer and destructor
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   RegisterQMIDevice (Public Method)
+
+DESCRIPTION:
+   QMI Device initialization function
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+   
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for failure
+===========================================================================*/
+int RegisterQMIDevice( sQCUSBNet * pDev )
+{
+   int result;
+   int QCQMIIndex = 0;
+   dev_t devno; 
+   char * pDevName;
+   
+   pDev->mbQMIValid = true;
+
+   // Set up for QMICTL
+   //    (does not send QMI message, just sets up memory)
+   result = GetClientID( pDev, QMICTL );
+   if (result != 0)
+   {
+      pDev->mbQMIValid = false;
+      return result;
+   }
+   atomic_set( &pDev->mQMIDev.mQMICTLTransactionID, 1 );
+
+   // Start Async reading
+   result = StartRead( pDev );
+   if (result != 0)
+   {
+      pDev->mbQMIValid = false;
+      return result;
+   }
+   
+   // Device is not ready for QMI connections right away
+   //   Wait up to 30 seconds before failing
+   if (QMIReady( pDev, 30000 ) == false)
+   {
+      DBG( "Device unresponsive to QMI\n" );
+      return -ETIMEDOUT;
+   }
+
+   // Setup WDS callback
+   result = SetupQMIWDSCallback( pDev );
+   if (result != 0)
+   {
+      pDev->mbQMIValid = false;
+      return result;
+   }
+
+   // Fill MEID for device
+   result = QMIDMSGetMEID( pDev );
+   if (result != 0)
+   {
+      pDev->mbQMIValid = false;
+      return result;
+   }
+
+   // allocate and fill devno with numbers
+   result = alloc_chrdev_region( &devno, 0, 1, "qcqmi" );
+   if (result < 0)
+   {
+      return result;
+   }
+
+   // Create cdev
+   cdev_init( &pDev->mQMIDev.mCdev, &UserspaceQMIFops );
+   pDev->mQMIDev.mCdev.owner = THIS_MODULE;
+   pDev->mQMIDev.mCdev.ops = &UserspaceQMIFops;
+
+   result = cdev_add( &pDev->mQMIDev.mCdev, devno, 1 );
+   if (result != 0)
+   {
+      DBG( "error adding cdev\n" );
+      return result;
+   }
+
+   // Match interface number (usb#)
+   pDevName = strstr( pDev->mpNetDev->net->name, "usb" );
+   if (pDevName == NULL)
+   {
+      DBG( "Bad net name: %s\n", pDev->mpNetDev->net->name );
+      return -ENXIO;
+   }
+   pDevName += strlen("usb");
+   QCQMIIndex = simple_strtoul( pDevName, NULL, 10 );
+   if(QCQMIIndex < 0 )
+   {
+      DBG( "Bad minor number\n" );
+      return -ENXIO;
+   }
+
+   // Always print this output
+   printk( KERN_INFO "creating qcqmi%d\n",
+           QCQMIIndex );
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,27 ))
+   // kernel 2.6.27 added a new fourth parameter to device_create
+   //    void * drvdata : the data to be added to the device for callbacks
+   device_create( pDev->mQMIDev.mpDevClass,
+                  NULL, 
+                  devno,
+                  NULL,
+                  "qcqmi%d", 
+                  QCQMIIndex );
+#else
+   device_create( pDev->mQMIDev.mpDevClass,
+                  NULL, 
+                  devno,
+                  "qcqmi%d", 
+                  QCQMIIndex );
+#endif
+   
+   pDev->mQMIDev.mDevNum = devno;
+   
+   // Success
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   DeregisterQMIDevice (Public Method)
+
+DESCRIPTION:
+   QMI Device cleanup function
+   
+   NOTE: When this function is run the device is no longer valid
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void DeregisterQMIDevice( sQCUSBNet * pDev )
+{
+   struct inode * pOpenInode;
+   struct list_head * pInodeList;
+   struct list_head * pTasks;
+   struct task_struct * pEachTask;
+   struct fdtable * pFDT;
+   struct file * pFilp;
+   unsigned long flags;
+   int count = 0;
+
+   // Should never happen, but check anyway
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "wrong device\n" );
+      return;
+   }
+
+   // Release all clients
+   while (pDev->mQMIDev.mpClientMemList != NULL)
+   {
+      DBG( "release 0x%04X\n", pDev->mQMIDev.mpClientMemList->mClientID );
+   
+      ReleaseClientID( pDev,
+                       pDev->mQMIDev.mpClientMemList->mClientID );
+      // NOTE: pDev->mQMIDev.mpClientMemList will 
+      //       be updated in ReleaseClientID()
+   }
+
+   // Stop all reads
+   KillRead( pDev );
+
+   pDev->mbQMIValid = false;
+
+   // Find each open file handle, and manually close it
+   
+   // Generally there will only be only one inode, but more are possible
+   list_for_each( pInodeList, &pDev->mQMIDev.mCdev.list )
+   {
+      // Get the inode
+      pOpenInode = container_of( pInodeList, struct inode, i_devices );
+      if (pOpenInode != NULL && (IS_ERR( pOpenInode ) == false))
+      {
+         // Look for this inode in each task
+
+         // "group_leader" points to the main process' task, which resides in
+         // the global "tasks" list.
+         list_for_each( pTasks, &current->group_leader->tasks )
+         {
+            pEachTask = container_of( pTasks, struct task_struct, tasks );
+            if (pEachTask == NULL || pEachTask->files == NULL)
+            {
+               // Some tasks may not have files (e.g. Xsession)
+               continue;
+            }
+            // For each file this task has open, check if it's referencing
+            // our inode.
+            spin_lock_irqsave( &pEachTask->files->file_lock, flags );
+            pFDT = files_fdtable( pEachTask->files );
+            for (count = 0; count < pFDT->max_fds; count++)
+            {
+               pFilp = pFDT->fd[count];
+               if (pFilp != NULL &&  pFilp->f_dentry != NULL )
+               {
+                  if (pFilp->f_dentry->d_inode == pOpenInode)
+                  {
+                     // Close this file handle
+                     rcu_assign_pointer( pFDT->fd[count], NULL );                     
+                     spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
+                     
+                     DBG( "forcing close of open file handle\n" );
+                     filp_close( pFilp, pEachTask->files );
+
+                     spin_lock_irqsave( &pEachTask->files->file_lock, flags );
+                  }
+               }
+            }
+            spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
+         }
+      }
+   }
+
+   // Remove device (so no more calls can be made by users)
+   if (IS_ERR(pDev->mQMIDev.mpDevClass) == false)
+   {
+      device_destroy( pDev->mQMIDev.mpDevClass, 
+                      pDev->mQMIDev.mDevNum );   
+   }
+   cdev_del( &pDev->mQMIDev.mCdev );
+   
+   unregister_chrdev_region( pDev->mQMIDev.mDevNum, 1 );
+
+   return;
+}
+
+/*=========================================================================*/
+// Driver level client management
+/*=========================================================================*/
+
+/*===========================================================================
+METHOD:
+   QMIReady (Public Method)
+
+DESCRIPTION:
+   Send QMI CTL GET VERSION INFO REQ
+   Wait for response or timeout
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+   timeout  [ I ] - Milliseconds to wait for response
+
+RETURN VALUE:
+   bool
+===========================================================================*/
+bool QMIReady(
+   sQCUSBNet *    pDev,
+   u16            timeout )
+{
+   int result;
+   void * pWriteBuffer;
+   u16 writeBufferSize;
+   void * pReadBuffer;
+   u16 readBufferSize;
+   struct semaphore readSem;
+   u16 curTime;
+   unsigned long flags;
+   u8 transactionID;
+   
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return -EFAULT;
+   }
+
+   writeBufferSize = QMICTLReadyReqSize();
+   pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+   if (pWriteBuffer == NULL)
+   {
+      return -ENOMEM;
+   }
+
+   // An implimentation of down_timeout has not been agreed on,
+   //    so it's been added and removed from the kernel several times.
+   //    We're just going to ignore it and poll the semaphore.
+
+   // Send a write every 100 ms and see if we get a response
+   for (curTime = 0; curTime < timeout; curTime += 100)
+   {
+      // Start read
+      sema_init( &readSem, 0 );
+   
+      transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+      if (transactionID == 0)
+      {
+         transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
+      }
+      result = ReadAsync( pDev, QMICTL, transactionID, UpSem, &readSem );
+      if (result != 0)
+      {
+         return false;
+      }
+
+      // Fill buffer
+      result = QMICTLReadyReq( pWriteBuffer, 
+                               writeBufferSize,
+                               transactionID );
+      if (result < 0)
+      {
+         kfree( pWriteBuffer );
+         return false;
+      }
+
+      // Disregard status.  On errors, just try again
+      WriteSync( pDev,
+                 pWriteBuffer,
+                 writeBufferSize,
+                 QMICTL );
+
+      msleep( 100 );
+      if (down_trylock( &readSem ) == 0)
+      {
+         // Enter critical section
+         spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+
+         // Pop the read data
+         if (PopFromReadMemList( pDev,
+                                 QMICTL,
+                                 transactionID,
+                                 &pReadBuffer,
+                                 &readBufferSize ) == true)
+         {
+            // Success
+
+            // End critical section
+            spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+         
+            // We don't care about the result
+            kfree( pReadBuffer );
+
+            break;
+         }
+      }
+      else
+      {
+         // Enter critical section
+         spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+         
+         // Timeout, remove the async read
+         NotifyAndPopNotifyList( pDev, QMICTL, transactionID );
+         
+         // End critical section
+         spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
+      }
+   }
+
+   kfree( pWriteBuffer );
+
+   // Did we time out?   
+   if (curTime >= timeout)
+   {
+      return false;
+   }
+   
+   DBG( "QMI Ready after %u milliseconds\n", curTime );
+   
+   // TODO: 3580 and newer firmware does not require this delay
+   msleep( 5000 );
+
+   // Success
+   return true;
+}
+
+/*===========================================================================
+METHOD:
+   QMIWDSCallback (Public Method)
+
+DESCRIPTION:
+   QMI WDS callback function
+   Update net stats or link state
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+   clientID [ I ] - Client ID
+   pData    [ I ] - Callback data (unused)
+
+RETURN VALUE:
+   None
+===========================================================================*/
+void QMIWDSCallback(
+   sQCUSBNet *    pDev,
+   u16            clientID,
+   void *         pData )
+{
+   bool bRet;
+   int result;
+   void * pReadBuffer;
+   u16 readBufferSize;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 ))
+   struct net_device_stats * pStats = &(pDev->mpNetDev->stats);
+#else
+   struct net_device_stats * pStats = &(pDev->mpNetDev->net->stats);
+#endif
+
+   u32 TXOk = (u32)-1;
+   u32 RXOk = (u32)-1;
+   u32 TXErr = (u32)-1;
+   u32 RXErr = (u32)-1;
+   u32 TXOfl = (u32)-1;
+   u32 RXOfl = (u32)-1;
+   u64 TXBytesOk = (u64)-1;
+   u64 RXBytesOk = (u64)-1;
+   bool bLinkState;
+   bool bReconfigure;
+   unsigned long flags;
+   
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return;
+   }
+
+   // Critical section
+   spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
+   
+   bRet = PopFromReadMemList( pDev,
+                              clientID,
+                              0,
+                              &pReadBuffer,
+                              &readBufferSize );
+   
+   // End critical section
+   spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); 
+   
+   if (bRet == false)
+   {
+      DBG( "WDS callback failed to get data\n" );
+      return;
+   }
+   
+   // Default values
+   bLinkState = ! QTestDownReason( pDev, NO_NDIS_CONNECTION );
+   bReconfigure = false;
+
+   result = QMIWDSEventResp( pReadBuffer,
+                             readBufferSize,
+                             &TXOk,
+                             &RXOk,
+                             &TXErr,
+                             &RXErr,
+                             &TXOfl,
+                             &RXOfl,
+                             &TXBytesOk,
+                             &RXBytesOk,
+                             &bLinkState,
+                             &bReconfigure );
+   if (result < 0)
+   {
+      DBG( "bad WDS packet\n" );
+   }
+   else
+   {
+
+      // Fill in new values, ignore max values
+      if (TXOfl != (u32)-1)
+      {
+         pStats->tx_fifo_errors = TXOfl;
+      }
+      
+      if (RXOfl != (u32)-1)
+      {
+         pStats->rx_fifo_errors = RXOfl;
+      }
+
+      if (TXErr != (u32)-1)
+      {
+         pStats->tx_errors = TXErr;
+      }
+      
+      if (RXErr != (u32)-1)
+      {
+         pStats->rx_errors = RXErr;
+      }
+
+      if (TXOk != (u32)-1)
+      {
+         pStats->tx_packets = TXOk + pStats->tx_errors;
+      }
+      
+      if (RXOk != (u32)-1)
+      {
+         pStats->rx_packets = RXOk + pStats->rx_errors;
+      }
+
+      if (TXBytesOk != (u64)-1)
+      {
+         pStats->tx_bytes = TXBytesOk;
+      }
+      
+      if (RXBytesOk != (u64)-1)
+      {
+         pStats->rx_bytes = RXBytesOk;
+      }
+
+      if (bReconfigure == true)
+      {
+         DBG( "Net device link reset\n" );
+         QSetDownReason( pDev, NO_NDIS_CONNECTION );
+         QClearDownReason( pDev, NO_NDIS_CONNECTION );
+      }
+      else 
+      {
+         if (bLinkState == true)
+         {
+            DBG( "Net device link is connected\n" );
+            QClearDownReason( pDev, NO_NDIS_CONNECTION );
+         }
+         else
+         {
+            DBG( "Net device link is disconnected\n" );
+            QSetDownReason( pDev, NO_NDIS_CONNECTION );
+         }
+      }
+   }
+
+   kfree( pReadBuffer );
+
+   // Setup next read
+   result = ReadAsync( pDev,
+                       clientID,
+                       0,
+                       QMIWDSCallback,
+                       pData );
+   if (result != 0)
+   {
+      DBG( "unable to setup next async read\n" );
+   }
+
+   return;
+}
+
+/*===========================================================================
+METHOD:
+   SetupQMIWDSCallback (Public Method)
+
+DESCRIPTION:
+   Request client and fire off reqests and start async read for 
+   QMI WDS callback
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   int - 0 for success
+         Negative errno for failure
+===========================================================================*/
+int SetupQMIWDSCallback( sQCUSBNet * pDev )
+{
+   int result;
+   void * pWriteBuffer;
+   u16 writeBufferSize;
+   u16 WDSClientID;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return -EFAULT;
+   }
+   
+   result = GetClientID( pDev, QMIWDS );
+   if (result < 0)
+   {
+      return result;
+   }
+   WDSClientID = result;
+
+   // QMI WDS Set Event Report
+   writeBufferSize = QMIWDSSetEventReportReqSize();
+   pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+   if (pWriteBuffer == NULL)
+   {
+      return -ENOMEM;
+   }
+   
+   result = QMIWDSSetEventReportReq( pWriteBuffer, 
+                                     writeBufferSize,
+                                     1 );
+   if (result < 0)
+   {
+      kfree( pWriteBuffer );
+      return result;
+   }
+
+   result = WriteSync( pDev,
+                       pWriteBuffer,
+                       writeBufferSize,
+                       WDSClientID );
+   kfree( pWriteBuffer );
+
+   if (result < 0)
+   {
+      return result;
+   }
+
+   // QMI WDS Get PKG SRVC Status
+   writeBufferSize = QMIWDSGetPKGSRVCStatusReqSize();
+   pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+   if (pWriteBuffer == NULL)
+   {
+      return -ENOMEM;
+   }
+
+   result = QMIWDSGetPKGSRVCStatusReq( pWriteBuffer, 
+                                       writeBufferSize,
+                                       2 );
+   if (result < 0)
+   {
+      kfree( pWriteBuffer );
+      return result;
+   }
+   
+   result = WriteSync( pDev,
+                       pWriteBuffer,
+                       writeBufferSize,
+                       WDSClientID );
+   kfree( pWriteBuffer );
+
+   if (result < 0)
+   {
+      return result;
+   }
+
+   // Setup asnyc read callback
+   result = ReadAsync( pDev,
+                       WDSClientID,
+                       0,
+                       QMIWDSCallback,
+                       NULL );
+   if (result != 0)
+   {
+      DBG( "unable to setup async read\n" );
+      return result;
+   }
+
+   // Send SetControlLineState request (USB_CDC)
+   //   Required for Autoconnect
+   result = usb_control_msg( pDev->mpNetDev->udev,
+                             usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ),
+                             0x22,
+                             0x21,
+                             1, // DTR present
+                             0,
+                             NULL,
+                             0,
+                             100 );
+   if (result < 0)
+   {
+      DBG( "Bad SetControlLineState status %d\n", result );
+      return result;
+   }
+
+   return 0;
+}
+
+/*===========================================================================
+METHOD:
+   QMIDMSGetMEID (Public Method)
+
+DESCRIPTION:
+   Register DMS client
+   send MEID req and parse response
+   Release DMS client
+
+PARAMETERS:
+   pDev     [ I ] - Device specific memory
+
+RETURN VALUE:
+   None
+===========================================================================*/
+int QMIDMSGetMEID( sQCUSBNet * pDev )
+{
+   int result;
+   void * pWriteBuffer;
+   u16 writeBufferSize;
+   void * pReadBuffer;
+   u16 readBufferSize;
+   u16 DMSClientID;
+
+   if (IsDeviceValid( pDev ) == false)
+   {
+      DBG( "Invalid device\n" );
+      return -EFAULT;
+   }
+
+   result = GetClientID( pDev, QMIDMS );
+   if (result < 0)
+   {
+      return result;
+   }
+   DMSClientID = result;
+
+   // QMI DMS Get Serial numbers Req
+   writeBufferSize = QMIDMSGetMEIDReqSize();
+   pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
+   if (pWriteBuffer == NULL)
+   {
+      return -ENOMEM;
+   }
+
+   result = QMIDMSGetMEIDReq( pWriteBuffer, 
+                              writeBufferSize,
+                              1 );
+   if (result < 0)
+   {
+      kfree( pWriteBuffer );
+      return result;
+   }
+
+   result = WriteSync( pDev,
+                       pWriteBuffer,
+                       writeBufferSize,
+                       DMSClientID );
+   kfree( pWriteBuffer );
+
+   if (result < 0)
+   {
+      return result;
+   }
+
+   // QMI DMS Get Serial numbers Resp
+   result = ReadSync( pDev,
+                      &pReadBuffer,
+                      DMSClientID,
+                      1 );
+   if (result < 0)
+   {
+      return result;
+   }
+   readBufferSize = result;
+
+   result = QMIDMSGetMEIDResp( pReadBuffer,
+                               readBufferSize,
+                               &pDev->mMEID[0],
+                               14 );
+   kfree( pReadBuffer );
+
+   if (result < 0)
+   {
+      DBG( "bad get MEID resp\n" );
+      
+      // Non fatal error, device did not return any MEID
+      //    Fill with 0's
+      memset( &pDev->mMEID[0], '0', 14 );
+   }
+
+   ReleaseClientID( pDev, DMSClientID );
+
+   // Success
+   return 0;
+}
diff --git a/drivers/staging/gobi/QCUSBNet2k/QMIDevice.h b/drivers/staging/gobi/QCUSBNet2k/QMIDevice.h
new file mode 100644
index 0000000..6fb9c47
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/QMIDevice.h
@@ -0,0 +1,297 @@
+/*===========================================================================
+FILE:
+   QMIDevice.h
+
+DESCRIPTION:
+   Functions related to the QMI interface device
+   
+FUNCTIONS:
+   Generic functions
+      IsDeviceValid
+      PrintHex
+      QSetDownReason
+      QClearDownReason
+      QTestDownReason
+
+   Driver level asynchronous read functions
+      ReadCallback
+      IntCallback
+      StartRead
+      KillRead
+
+   Internal read/write functions
+      ReadAsync
+      UpSem
+      ReadSync
+      WriteSyncCallback
+      WriteSync
+
+   Internal memory management functions
+      GetClientID
+      ReleaseClientID
+      FindClientMem
+      AddToReadMemList
+      PopFromReadMemList
+      AddToNotifyList
+      NotifyAndPopNotifyList
+      AddToURBList
+      PopFromURBList
+
+   Userspace wrappers
+      UserspaceOpen
+      UserspaceIOCTL
+      UserspaceClose
+      UserspaceRead
+      UserspaceWrite
+
+   Initializer and destructor
+      RegisterQMIDevice
+      DeregisterQMIDevice
+
+   Driver level client management
+      QMIReady
+      QMIWDSCallback
+      SetupQMIWDSCallback
+      QMIDMSGetMEID
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+//---------------------------------------------------------------------------
+// Pragmas
+//---------------------------------------------------------------------------
+#pragma once
+
+//---------------------------------------------------------------------------
+// Include Files
+//---------------------------------------------------------------------------
+#include "Structs.h"
+#include "QMI.h"
+
+/*=========================================================================*/
+// Generic functions
+/*=========================================================================*/
+
+// Basic test to see if device memory is valid
+bool IsDeviceValid( sQCUSBNet * pDev );
+
+// Print Hex data, for debug purposes
+void PrintHex(
+   void *         pBuffer,
+   u16            bufSize );
+
+// Sets mDownReason and turns carrier off
+void QSetDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason );
+
+// Clear mDownReason and may turn carrier on
+void QClearDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason );
+
+// Tests mDownReason and returns whether reason is set
+bool QTestDownReason(
+   sQCUSBNet *    pDev,
+   u8             reason );
+
+/*=========================================================================*/
+// Driver level asynchronous read functions
+/*=========================================================================*/
+
+// Read callback
+//    Put the data in storage and notify anyone waiting for data
+void ReadCallback( struct urb * pReadURB );
+
+// Inturrupt callback
+//    Data is available, start a read URB
+void IntCallback( struct urb * pIntURB );
+
+// Start continuous read "thread"
+int StartRead( sQCUSBNet * pDev );
+
+// Kill continuous read "thread"
+void KillRead( sQCUSBNet * pDev );
+
+/*=========================================================================*/
+// Internal read/write functions
+/*=========================================================================*/
+
+// Start asynchronous read
+//     Reading client's data store, not device
+int ReadAsync(
+   sQCUSBNet *    pDev,
+   u16            clientID,
+   u16            transactionID,
+   void           (*pCallback)(sQCUSBNet *, u16, void *),
+   void *         pData );
+
+// Notification function for synchronous read
+void UpSem( 
+   sQCUSBNet *    pDev,
+   u16            clientID,
+   void *         pData );
+
+// Start synchronous read
+//     Reading client's data store, not device
+int ReadSync(
+   sQCUSBNet *    pDev,
+   void **        ppOutBuffer,
+   u16            clientID,
+   u16            transactionID );
+
+// Write callback
+void WriteSyncCallback( struct urb * pWriteURB );
+
+// Start synchronous write
+int WriteSync(
+   sQCUSBNet *    pDev,
+   char *         pInWriteBuffer,
+   int            size,
+   u16            clientID );
+
+/*=========================================================================*/
+// Internal memory management functions
+/*=========================================================================*/
+
+// Create client and allocate memory
+int GetClientID( 
+   sQCUSBNet *      pDev,
+   u8               serviceType );
+
+// Release client and free memory
+void ReleaseClientID(
+   sQCUSBNet *      pDev,
+   u16              clientID );
+
+// Find this client's memory
+sClientMemList * FindClientMem(
+   sQCUSBNet *      pDev,
+   u16              clientID );
+
+// Add Data to this client's ReadMem list
+bool AddToReadMemList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void *           pData,
+   u16              dataSize );
+
+// Remove data from this client's ReadMem list if it matches 
+// the specified transaction ID.
+bool PopFromReadMemList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void **          ppData,
+   u16 *            pDataSize );
+
+// Add Notify entry to this client's notify List
+bool AddToNotifyList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID,
+   void             (* pNotifyFunct)(sQCUSBNet *, u16, void *),
+   void *           pData );
+
+// Remove first Notify entry from this client's notify list 
+//    and Run function
+bool NotifyAndPopNotifyList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   u16              transactionID );
+
+// Add URB to this client's URB list
+bool AddToURBList( 
+   sQCUSBNet *      pDev,
+   u16              clientID,
+   struct urb *     pURB );
+
+// Remove URB from this client's URB list
+struct urb * PopFromURBList( 
+   sQCUSBNet *      pDev,
+   u16              clientID );
+
+/*=========================================================================*/
+// Userspace wrappers
+/*=========================================================================*/
+
+// Userspace open
+int UserspaceOpen( 
+   struct inode *   pInode, 
+   struct file *    pFilp );
+
+// Userspace ioctl
+int UserspaceIOCTL( 
+   struct inode *    pUnusedInode, 
+   struct file *     pFilp,
+   unsigned int      cmd, 
+   unsigned long     arg );
+
+// Userspace close
+int UserspaceClose( 
+   struct file *       pFilp,
+   fl_owner_t          unusedFileTable );
+
+// Userspace read (synchronous)
+ssize_t UserspaceRead( 
+   struct file *        pFilp,
+   char __user *        pBuf, 
+   size_t               size,
+   loff_t *             pUnusedFpos );
+
+// Userspace write (synchronous)
+ssize_t UserspaceWrite(
+   struct file *        pFilp, 
+   const char __user *  pBuf, 
+   size_t               size,
+   loff_t *             pUnusedFpos );
+
+/*=========================================================================*/
+// Initializer and destructor
+/*=========================================================================*/
+
+// QMI Device initialization function
+int RegisterQMIDevice( sQCUSBNet * pDev );
+
+// QMI Device cleanup function
+void DeregisterQMIDevice( sQCUSBNet * pDev );
+
+/*=========================================================================*/
+// Driver level client management
+/*=========================================================================*/
+
+// Check if QMI is ready for use
+bool QMIReady(
+   sQCUSBNet *    pDev,
+   u16            timeout );
+
+// QMI WDS callback function
+void QMIWDSCallback(
+   sQCUSBNet *    pDev,
+   u16            clientID,
+   void *         pData );
+
+// Fire off reqests and start async read for QMI WDS callback
+int SetupQMIWDSCallback( sQCUSBNet * pDev );
+
+// Register client, send req and parse MEID response, release client
+int QMIDMSGetMEID( sQCUSBNet * pDev );
+
+
+
diff --git a/drivers/staging/gobi/QCUSBNet2k/Structs.h b/drivers/staging/gobi/QCUSBNet2k/Structs.h
new file mode 100644
index 0000000..07e3193
--- /dev/null
+++ b/drivers/staging/gobi/QCUSBNet2k/Structs.h
@@ -0,0 +1,318 @@
+/*===========================================================================
+FILE:
+   Structs.h
+
+DESCRIPTION:
+   Declaration of structures used by the Qualcomm Linux USB Network driver
+   
+FUNCTIONS:
+   none
+
+Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 and
+only version 2 as published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+===========================================================================*/
+
+//---------------------------------------------------------------------------
+// Pragmas
+//---------------------------------------------------------------------------
+#pragma once
+
+//---------------------------------------------------------------------------
+// Include Files
+//---------------------------------------------------------------------------
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/cdev.h>
+#include <linux/kthread.h>
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 ))
+   #include "usbnet.h"
+#else
+   #include <linux/usb/usbnet.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,25 ))
+   #include <linux/fdtable.h>
+#else
+   #include <linux/file.h>
+#endif
+
+// DBG macro
+#define DBG( format, arg... ) \
+   if (debug == 1)\
+   { \
+      printk( KERN_INFO "QCUSBNet2k::%s " format, __FUNCTION__, ## arg ); \
+   } \
+
+
+// Used in recursion, defined later below
+struct sQCUSBNet;
+
+/*=========================================================================*/
+// Struct sReadMemList
+//
+//    Structure that defines an entry in a Read Memory linked list
+/*=========================================================================*/
+typedef struct sReadMemList
+{
+   /* Data buffer */
+   void *                     mpData;
+   
+   /* Transaction ID */
+   u16                        mTransactionID;
+
+   /* Size of data buffer */
+   u16                        mDataSize;
+
+   /* Next entry in linked list */
+   struct sReadMemList *      mpNext;
+
+} sReadMemList;
+
+/*=========================================================================*/
+// Struct sNotifyList
+//
+//    Structure that defines an entry in a Notification linked list
+/*=========================================================================*/
+typedef struct sNotifyList
+{
+   /* Function to be run when data becomes available */
+   void                  (* mpNotifyFunct)(struct sQCUSBNet *, u16, void *);
+   
+   /* Transaction ID */
+   u16                   mTransactionID;
+
+   /* Data to provide as parameter to mpNotifyFunct */
+   void *                mpData;
+   
+   /* Next entry in linked list */
+   struct sNotifyList *  mpNext;
+
+} sNotifyList;
+
+/*=========================================================================*/
+// Struct sURBList
+//
+//    Structure that defines an entry in a URB linked list
+/*=========================================================================*/
+typedef struct sURBList
+{
+   /* The current URB */
+   struct urb *       mpURB;
+
+   /* Next entry in linked list */
+   struct sURBList *  mpNext;
+
+} sURBList;
+
+/*=========================================================================*/
+// Struct sClientMemList
+//
+//    Structure that defines an entry in a Client Memory linked list
+//      Stores data specific to a Service Type and Client ID
+/*=========================================================================*/
+typedef struct sClientMemList
+{
+   /* Client ID for this Client */
+   u16                          mClientID;
+
+   /* Linked list of Read entries */
+   /*    Stores data read from device before sending to client */
+   sReadMemList *               mpList;
+   
+   /* Linked list of Notification entries */
+   /*    Stores notification functions to be run as data becomes 
+         available or the device is removed */
+   sNotifyList *                mpReadNotifyList;
+
+   /* Linked list of URB entries */
+   /*    Stores pointers to outstanding URBs which need canceled 
+         when the client is deregistered or the device is removed */
+   sURBList *                   mpURBList;
+   
+   /* Next entry in linked list */
+   struct sClientMemList *      mpNext;
+
+} sClientMemList;
+
+/*=========================================================================*/
+// Struct sURBSetupPacket
+//
+//    Structure that defines a USB Setup packet for Control URBs
+//    Taken from USB CDC specifications
+/*=========================================================================*/
+typedef struct sURBSetupPacket
+{
+   /* Request type */
+   u8    mRequestType;
+
+   /* Request code */
+   u8    mRequestCode;
+
+   /* Value */
+   u16   mValue;
+
+   /* Index */
+   u16   mIndex;
+
+   /* Length of Control URB */
+   u16   mLength;
+
+} sURBSetupPacket;
+
+// Common value for sURBSetupPacket.mLength
+#define DEFAULT_READ_URB_LENGTH 0x1000
+
+
+/*=========================================================================*/
+// Struct sAutoPM
+//
+//    Structure used to manage AutoPM thread which determines whether the
+//    device is in use or may enter autosuspend.  Also submits net 
+//    transmissions asynchronously.
+/*=========================================================================*/
+typedef struct sAutoPM
+{
+   /* Thread for atomic autopm function */
+   struct task_struct *       mpThread;
+
+   /* Up this semaphore when it's time for the thread to work */
+   struct semaphore           mThreadDoWork;
+
+   /* Time to exit? */
+   bool                       mbExit;
+
+   /* List of URB's queued to be sent to the device */
+   sURBList *                 mpURBList;
+
+   /* URB list lock (for adding and removing elements) */
+   spinlock_t                 mURBListLock;
+   
+   /* Active URB */
+   struct urb *               mpActiveURB;
+
+   /* Active URB lock (for adding and removing elements) */
+   spinlock_t                 mActiveURBLock;
+   
+   /* Duplicate pointer to USB device interface */
+   struct usb_interface *     mpIntf;
+
+} sAutoPM;
+
+
+/*=========================================================================*/
+// Struct sQMIDev
+//
+//    Structure that defines the data for the QMI device
+/*=========================================================================*/
+typedef struct sQMIDev
+{
+   /* Device number */
+   dev_t                      mDevNum;
+
+   /* Device class */
+   struct class *             mpDevClass;
+
+   /* cdev struct */
+   struct cdev                mCdev;
+
+   /* Pointer to read URB */
+   struct urb *               mpReadURB;
+
+   /* Read setup packet */
+   sURBSetupPacket *          mpReadSetupPacket;
+
+   /* Read buffer attached to current read URB */
+   void *                     mpReadBuffer;
+   
+   /* Inturrupt URB */
+   /*    Used to asynchronously notify when read data is available */
+   struct urb *               mpIntURB;
+
+   /* Buffer used by Inturrupt URB */
+   void *                     mpIntBuffer;
+   
+   /* Pointer to memory linked list for all clients */
+   sClientMemList *           mpClientMemList;
+   
+   /* Spinlock for client Memory entries */
+   spinlock_t                 mClientMemLock;
+
+   /* Transaction ID associated with QMICTL "client" */
+   atomic_t                   mQMICTLTransactionID;
+
+} sQMIDev;
+
+/*=========================================================================*/
+// Struct sQCUSBNet
+//
+//    Structure that defines the data associated with the Qualcomm USB device
+/*=========================================================================*/
+typedef struct sQCUSBNet
+{
+   /* Net device structure */
+   struct usbnet *        mpNetDev;
+
+   /* Usb device interface */
+   struct usb_interface * mpIntf;
+   
+   /* Pointers to usbnet_open and usbnet_stop functions */
+   int                  (* mpUSBNetOpen)(struct net_device *);
+   int                  (* mpUSBNetStop)(struct net_device *);
+   
+   /* Reason(s) why interface is down */
+   /* Used by Q*DownReason */
+   unsigned long          mDownReason;
+#define NO_NDIS_CONNECTION    0
+#define CDC_CONNECTION_SPEED  1
+#define DRIVER_SUSPENDED      2
+#define NET_IFACE_STOPPED     3
+
+   /* QMI "device" status */
+   bool                   mbQMIValid;
+
+   /* QMI "device" memory */
+   sQMIDev                mQMIDev;
+
+   /* Device MEID */
+   char                   mMEID[14];
+   
+   /* AutoPM thread */
+   sAutoPM                mAutoPM;
+
+} sQCUSBNet;
+
+/*=========================================================================*/
+// Struct sQMIFilpStorage
+//
+//    Structure that defines the storage each file handle contains
+//       Relates the file handle to a client
+/*=========================================================================*/
+typedef struct sQMIFilpStorage
+{
+   /* Client ID */
+   u16                  mClientID;
+   
+   /* Device pointer */
+   sQCUSBNet *          mpDev;
+
+} sQMIFilpStorage;
+
+
diff --git a/drivers/staging/msm/ebi2_l2f.c b/drivers/staging/msm/ebi2_l2f.c
index eea891d..0798019 100644
--- a/drivers/staging/msm/ebi2_l2f.c
+++ b/drivers/staging/msm/ebi2_l2f.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/ebi2_lcd.c b/drivers/staging/msm/ebi2_lcd.c
index b41e123..5f8dd0c 100644
--- a/drivers/staging/msm/ebi2_lcd.c
+++ b/drivers/staging/msm/ebi2_lcd.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/ebi2_tmd20.c b/drivers/staging/msm/ebi2_tmd20.c
index d66d039..0b1dda2 100644
--- a/drivers/staging/msm/ebi2_tmd20.c
+++ b/drivers/staging/msm/ebi2_tmd20.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/hdmi_sii9022.c b/drivers/staging/msm/hdmi_sii9022.c
index 6b82b56..156434a 100644
--- a/drivers/staging/msm/hdmi_sii9022.c
+++ b/drivers/staging/msm/hdmi_sii9022.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/i2c.h>
diff --git a/drivers/staging/msm/lcdc.c b/drivers/staging/msm/lcdc.c
index 8183394..6c0f6bb 100644
--- a/drivers/staging/msm/lcdc.c
+++ b/drivers/staging/msm/lcdc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/lcdc_external.c b/drivers/staging/msm/lcdc_external.c
index 45ff785..6d40f1b 100644
--- a/drivers/staging/msm/lcdc_external.c
+++ b/drivers/staging/msm/lcdc_external.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/lcdc_gordon.c b/drivers/staging/msm/lcdc_gordon.c
index 399ec8c..1a7fe77 100644
--- a/drivers/staging/msm/lcdc_gordon.c
+++ b/drivers/staging/msm/lcdc_gordon.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/delay.h>
diff --git a/drivers/staging/msm/lcdc_panel.c b/drivers/staging/msm/lcdc_panel.c
index b40974e..e494251 100644
--- a/drivers/staging/msm/lcdc_panel.c
+++ b/drivers/staging/msm/lcdc_panel.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/lcdc_prism.c b/drivers/staging/msm/lcdc_prism.c
index d102c98..e04f937 100644
--- a/drivers/staging/msm/lcdc_prism.c
+++ b/drivers/staging/msm/lcdc_prism.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/lcdc_sharp_wvga_pt.c b/drivers/staging/msm/lcdc_sharp_wvga_pt.c
index 1f08cf9..b39b6e6 100644
--- a/drivers/staging/msm/lcdc_sharp_wvga_pt.c
+++ b/drivers/staging/msm/lcdc_sharp_wvga_pt.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/delay.h>
diff --git a/drivers/staging/msm/lcdc_st15.c b/drivers/staging/msm/lcdc_st15.c
index fed8278..ccfadfc 100644
--- a/drivers/staging/msm/lcdc_st15.c
+++ b/drivers/staging/msm/lcdc_st15.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/i2c.h>
diff --git a/drivers/staging/msm/lcdc_st1_wxga.c b/drivers/staging/msm/lcdc_st1_wxga.c
new file mode 100644
index 0000000..4b853af
--- /dev/null
+++ b/drivers/staging/msm/lcdc_st1_wxga.c
@@ -0,0 +1,49 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+
+static int __init lcdc_st1_wxga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+	if (msm_fb_detect_client("lcdc_st1_wxga"))
+		return 0;
+
+	pinfo.xres = 1280;
+	pinfo.yres = 720;
+	pinfo.type = LCDC_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 74250000;
+
+	pinfo.lcdc.h_back_porch = 124;
+	pinfo.lcdc.h_front_porch = 110;
+	pinfo.lcdc.h_pulse_width = 136;
+	pinfo.lcdc.v_back_porch = 19;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 6;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(lcdc_st1_wxga_init);
diff --git a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
index edba78a..64896c7 100644
--- a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
+++ b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/delay.h>
diff --git a/drivers/staging/msm/lcdc_wxga.c b/drivers/staging/msm/lcdc_wxga.c
new file mode 100644
index 0000000..a2dcbc6a
--- /dev/null
+++ b/drivers/staging/msm/lcdc_wxga.c
@@ -0,0 +1,51 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+
+static int __init lcdc_wxga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	if (msm_fb_detect_client("lcdc_wxga"))
+		return 0;
+#endif
+
+	pinfo.xres = 1280;
+	pinfo.yres = 720;
+	pinfo.type = LCDC_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 74250000;
+
+	pinfo.lcdc.h_back_porch = 124;
+	pinfo.lcdc.h_front_porch = 110;
+	pinfo.lcdc.h_pulse_width = 136;
+	pinfo.lcdc.v_back_porch = 19;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 6;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(lcdc_wxga_init);
diff --git a/drivers/staging/msm/mddi.c b/drivers/staging/msm/mddi.c
index 132eb1a..a1f2fd1 100644
--- a/drivers/staging/msm/mddi.c
+++ b/drivers/staging/msm/mddi.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mddi_ext.c b/drivers/staging/msm/mddi_ext.c
index c0c168c7..6a024de 100644
--- a/drivers/staging/msm/mddi_ext.c
+++ b/drivers/staging/msm/mddi_ext.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mddi_ext_lcd.c b/drivers/staging/msm/mddi_ext_lcd.c
index 502e80d..0c72263 100644
--- a/drivers/staging/msm/mddi_ext_lcd.c
+++ b/drivers/staging/msm/mddi_ext_lcd.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddi_prism.c b/drivers/staging/msm/mddi_prism.c
index 489d404..dd3f972 100644
--- a/drivers/staging/msm/mddi_prism.c
+++ b/drivers/staging/msm/mddi_prism.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddi_sharp.c b/drivers/staging/msm/mddi_sharp.c
index 1da1be4..ba9999b 100644
--- a/drivers/staging/msm/mddi_sharp.c
+++ b/drivers/staging/msm/mddi_sharp.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddi_toshiba.c b/drivers/staging/msm/mddi_toshiba.c
index e96342d..70b3dac 100644
--- a/drivers/staging/msm/mddi_toshiba.c
+++ b/drivers/staging/msm/mddi_toshiba.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddi_toshiba_vga.c b/drivers/staging/msm/mddi_toshiba_vga.c
index 7e61d3a..b58e483 100644
--- a/drivers/staging/msm/mddi_toshiba_vga.c
+++ b/drivers/staging/msm/mddi_toshiba_vga.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddi_toshiba_wvga.c b/drivers/staging/msm/mddi_toshiba_wvga.c
new file mode 100644
index 0000000..ea9ff7c
--- /dev/null
+++ b/drivers/staging/msm/mddi_toshiba_wvga.c
@@ -0,0 +1,58 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddi_toshiba.h"
+
+static int __init mddi_toshiba_wvga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	if (msm_fb_detect_client("mddi_toshiba_wvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 800;
+	pinfo.yres = 480;
+	pinfo.pdest = DISPLAY_2;
+	pinfo.type = MDDI_PANEL;
+	pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.refx100 = 6118;
+	pinfo.lcd.v_back_porch = 6;
+	pinfo.lcd.v_front_porch = 0;
+	pinfo.lcd.v_pulse_width = 0;
+	pinfo.lcd.hw_vsync_mode = FALSE;
+	pinfo.lcd.vsync_notifier_period = (1 * HZ);
+	pinfo.bl_max = 4;
+	pinfo.bl_min = 1;
+	pinfo.clk_rate = 192000000;
+	pinfo.clk_min =  190000000;
+	pinfo.clk_max =  200000000;
+	pinfo.fb_num = 2;
+
+	ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM,
+					   LCD_TOSHIBA_2P4_WVGA);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+module_init(mddi_toshiba_wvga_init);
diff --git a/drivers/staging/msm/mddi_toshiba_wvga_pt.c b/drivers/staging/msm/mddi_toshiba_wvga_pt.c
index fc7d4e0..6789f39 100644
--- a/drivers/staging/msm/mddi_toshiba_wvga_pt.c
+++ b/drivers/staging/msm/mddi_toshiba_wvga_pt.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "msm_fb.h"
diff --git a/drivers/staging/msm/mddihost.c b/drivers/staging/msm/mddihost.c
index 58a86d5..78ec8e2 100644
--- a/drivers/staging/msm/mddihost.c
+++ b/drivers/staging/msm/mddihost.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mddihost_e.c b/drivers/staging/msm/mddihost_e.c
index 7de5eda..64579eb 100644
--- a/drivers/staging/msm/mddihost_e.c
+++ b/drivers/staging/msm/mddihost_e.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mddihosti.c b/drivers/staging/msm/mddihosti.c
index f9d6e91..2e89e4b 100644
--- a/drivers/staging/msm/mddihosti.c
+++ b/drivers/staging/msm/mddihosti.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp.c b/drivers/staging/msm/mdp.c
index 58cb404..c5c4c41 100644
--- a/drivers/staging/msm/mdp.c
+++ b/drivers/staging/msm/mdp.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp4_debugfs.c b/drivers/staging/msm/mdp4_debugfs.c
index 36954e8..b124d9c 100644
--- a/drivers/staging/msm/mdp4_debugfs.c
+++ b/drivers/staging/msm/mdp4_debugfs.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
diff --git a/drivers/staging/msm/mdp4_overlay.c b/drivers/staging/msm/mdp4_overlay.c
index b9acf52..78b469f 100644
--- a/drivers/staging/msm/mdp4_overlay.c
+++ b/drivers/staging/msm/mdp4_overlay.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp4_overlay_lcdc.c b/drivers/staging/msm/mdp4_overlay_lcdc.c
index a6ab8ec..8f49cae 100644
--- a/drivers/staging/msm/mdp4_overlay_lcdc.c
+++ b/drivers/staging/msm/mdp4_overlay_lcdc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp4_overlay_mddi.c b/drivers/staging/msm/mdp4_overlay_mddi.c
index be1b287..9ee8608 100644
--- a/drivers/staging/msm/mdp4_overlay_mddi.c
+++ b/drivers/staging/msm/mdp4_overlay_mddi.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp4_util.c b/drivers/staging/msm/mdp4_util.c
index fd97f52..893b883 100644
--- a/drivers/staging/msm/mdp4_util.c
+++ b/drivers/staging/msm/mdp4_util.c
@@ -9,11 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
diff --git a/drivers/staging/msm/mdp_cursor.c b/drivers/staging/msm/mdp_cursor.c
index 7d28f30..5ff395a 100644
--- a/drivers/staging/msm/mdp_cursor.c
+++ b/drivers/staging/msm/mdp_cursor.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_dma.c b/drivers/staging/msm/mdp_dma.c
index 639918b..7c55161 100644
--- a/drivers/staging/msm/mdp_dma.c
+++ b/drivers/staging/msm/mdp_dma.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_dma_lcdc.c b/drivers/staging/msm/mdp_dma_lcdc.c
index b57fa1a..11fe40a 100644
--- a/drivers/staging/msm/mdp_dma_lcdc.c
+++ b/drivers/staging/msm/mdp_dma_lcdc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_dma_s.c b/drivers/staging/msm/mdp_dma_s.c
index 0c34a10..13516f9 100644
--- a/drivers/staging/msm/mdp_dma_s.c
+++ b/drivers/staging/msm/mdp_dma_s.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_dma_tv.c b/drivers/staging/msm/mdp_dma_tv.c
index 70989fb..16f55d4 100644
--- a/drivers/staging/msm/mdp_dma_tv.c
+++ b/drivers/staging/msm/mdp_dma_tv.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_hw_init.c b/drivers/staging/msm/mdp_hw_init.c
index 807362a..d184afb 100644
--- a/drivers/staging/msm/mdp_hw_init.c
+++ b/drivers/staging/msm/mdp_hw_init.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "mdp.h"
diff --git a/drivers/staging/msm/mdp_ppp_dq.c b/drivers/staging/msm/mdp_ppp_dq.c
index 3a687c7..f52445f 100644
--- a/drivers/staging/msm/mdp_ppp_dq.c
+++ b/drivers/staging/msm/mdp_ppp_dq.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include "mdp.h"
diff --git a/drivers/staging/msm/mdp_ppp_v20.c b/drivers/staging/msm/mdp_ppp_v20.c
index b5b7271..ceff8ed 100644
--- a/drivers/staging/msm/mdp_ppp_v20.c
+++ b/drivers/staging/msm/mdp_ppp_v20.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_ppp_v31.c b/drivers/staging/msm/mdp_ppp_v31.c
index 76495db..2442717 100644
--- a/drivers/staging/msm/mdp_ppp_v31.c
+++ b/drivers/staging/msm/mdp_ppp_v31.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/mdp_vsync.c b/drivers/staging/msm/mdp_vsync.c
index bbd4560..1f1489e 100644
--- a/drivers/staging/msm/mdp_vsync.c
+++ b/drivers/staging/msm/mdp_vsync.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/memory_ll.h b/drivers/staging/msm/memory_ll.h
index 18a239a..6e6d7d9 100644
--- a/drivers/staging/msm/memory_ll.h
+++ b/drivers/staging/msm/memory_ll.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -47,7 +47,7 @@
 
 #endif
 
-#ifdef CONFIG_ARCH_MSM_SCORPION
+#ifdef CONFIG_ARCH_MSM_SCORPION || CONFIG_ARCH_MSM_KRAIT
 #define arch_has_speculative_dfetch()	1
 #endif
 
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
index 2a80775..46523c07 100644
--- a/drivers/staging/msm/msm_fb_bl.c
+++ b/drivers/staging/msm/msm_fb_bl.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/msm_fb_panel.c b/drivers/staging/msm/msm_fb_panel.c
index b17a239..098435e 100644
--- a/drivers/staging/msm/msm_fb_panel.c
+++ b/drivers/staging/msm/msm_fb_panel.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/staging-devices.c b/drivers/staging/msm/staging-devices.c
index d6cd919..9f55158 100644
--- a/drivers/staging/msm/staging-devices.c
+++ b/drivers/staging/msm/staging-devices.c
@@ -40,7 +40,6 @@
 #define MSM_PMEM_ADSP_SIZE	0x1C00000
 
 #define MSM_FB_SIZE             0x500000
-#define MSM_FB_SIZE_ST15	0x800000
 #define MSM_AUDIO_SIZE		0x80000
 #define MSM_GPU_PHYS_SIZE 	SZ_2M
 
@@ -106,20 +105,14 @@
 {
 	int ret = -EPERM;
 
-	if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) {
+	if (machine_is_qsd8x50_ffa()) {
 		if (!strncmp(name, "mddi_toshiba_wvga_pt", 20))
 			ret = 0;
 		else
 			ret = -ENODEV;
-	} else if ((machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf())
+	} else if ((machine_is_qsd8x50_surf()) {
 			&& !strcmp(name, "lcdc_external"))
 		ret = 0;
-	else if (machine_is_qsd8x50a_st1_5()) {
-		if (!strcmp(name, "lcdc_st15") ||
-		    !strcmp(name, "hdmi_sii9022"))
-			ret = 0;
-		else
-			ret = -ENODEV;
 	}
 
 	return ret;
@@ -130,8 +123,7 @@
 
 static int msm_fb_allow_set_offset(void)
 {
-	return (machine_is_qsd8x50_st1() ||
-		machine_is_qsd8x50a_st1_5()) ? 1 : 0;
+	return 0;
 }
 
 
@@ -154,10 +146,7 @@
 {
 	void *addr;
 	unsigned long size;
-	if (machine_is_qsd8x50a_st1_5())
-		size = MSM_FB_SIZE_ST15;
-	else
-		size = MSM_FB_SIZE;
+	size = MSM_FB_SIZE;
 
 	addr = alloc_bootmem(size); // (void *)MSM_FB_BASE;
 	if (!addr)
@@ -187,25 +176,6 @@
 			mdelay(100);
 			gpio_set_value(32, 0);
 		}
-	} else if (machine_is_qsd8x50a_st1_5()) {
-		if (on) {
-			gpio_set_value(17, 1);
-			gpio_set_value(19, 1);
-			gpio_set_value(20, 1);
-			gpio_set_value(22, 0);
-			gpio_set_value(32, 1);
-			gpio_set_value(155, 1);
-			//st15_hdmi_power(1);
-			gpio_set_value(22, 1);
-
-		} else {
-			gpio_set_value(17, 0);
-			gpio_set_value(19, 0);
-			gpio_set_value(22, 0);
-			gpio_set_value(32, 0);
-			gpio_set_value(155, 0);
-		//	st15_hdmi_power(0);
-		}
 	}
 	return 0;
 }
@@ -276,23 +246,7 @@
 //	msm_fb_register_device("pmdh", &mddi_pdata);
 //	msm_fb_register_device("emdh", &mddi_pdata);
 //	msm_fb_register_device("tvenc", 0);
-
-	if (machine_is_qsd8x50a_st1_5()) {
-/*		rc = st15_hdmi_vreg_init();
-		if (rc)
-			return;
-*/
-		rc = msm_gpios_request_enable(
-			msm_fb_st15_gpio_config_data,
-			ARRAY_SIZE(msm_fb_st15_gpio_config_data));
-		if (rc) {
-			printk(KERN_ERR "%s: unable to init lcdc gpios\n",
-			       __func__);
-			return;
-		}
-		msm_fb_register_device("lcdc", &lcdc_pdata);
-	} else
-		msm_fb_register_device("lcdc", 0);
+	msm_fb_register_device("lcdc", 0);
 }
 
 int __init staging_init_pmem(void)
diff --git a/drivers/staging/msm/tv_ntsc.c b/drivers/staging/msm/tv_ntsc.c
index 5eb6761..63e5cd3 100644
--- a/drivers/staging/msm/tv_ntsc.c
+++ b/drivers/staging/msm/tv_ntsc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/staging/msm/tv_pal.c b/drivers/staging/msm/tv_pal.c
index 204da51..ff5b51d 100644
--- a/drivers/staging/msm/tv_pal.c
+++ b/drivers/staging/msm/tv_pal.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
diff --git a/drivers/staging/msm/tvenc.c b/drivers/staging/msm/tvenc.c
index 4fbb77b..c7480f7 100644
--- a/drivers/staging/msm/tvenc.c
+++ b/drivers/staging/msm/tvenc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/module.h>
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687..de5ded3 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -22,3 +22,62 @@
 	  requires a 2.10.7/3.0.2 or later lm-sensors userspace.
 
 	  Say Y if your user-space is new enough.
+
+config THERMAL_PM8901
+	tristate "Qualcomm PM8901 Temperature Alarm"
+	depends on PMIC8901
+	depends on THERMAL
+	default n
+	help
+	  This enables a thermal Sysfs driver for the PMIC 8901 device. It
+	  shows up in Sysfs as a thermal zone with multiple trip points.
+	  Enabling the thermal zone device via the mode file results in
+	  shifting over temperature shutdown control of the PMIC from hardware
+	  to software.
+
+config THERMAL_PM8058
+	tristate "Qualcomm PM8058 Temperature Alarm"
+	depends on PMIC8058
+	depends on THERMAL
+	depends on SENSORS_MSM_ADC
+	default n
+	help
+	  This enables a thermal Sysfs driver for the PMIC 8058 device. It
+	  shows up in Sysfs as a thermal zone with multiple trip points.
+	  Enabling the thermal zone device via the mode file results in
+	  shifting over temperature shutdown control of the PMIC from hardware
+	  to software.
+
+config THERMAL_MSM_POPMEM
+	tristate "Qualcomm MSM POP memory temperature sensor"
+	depends on THERMAL
+	default n
+	help
+	  This enables a thermal sysfs driver for MSM POP memory. It shows up in
+	  sysfs as a thermal zone with one trip point. Due to hardware
+	  limitations, the temperatures are reported as "Low Temperature" (20 C)
+	  "Normal Temperature" (50 C) and "Out of Spec High Temperature" (85 C).
+	  This driver is designed to be used in conjunction with a user space
+	  application to make all policy decisions.
+
+config THERMAL_TSENS
+	tristate "Qualcomm Tsens Temperature Alarm"
+	depends on THERMAL
+	default n
+	help
+	  This enables the thermal sysfs driver for the Tsens device. It shows
+	  up in Sysfs as a thermal zone with mutiple trip points. Disabling the
+	  thermal zone device via the mode file results in disabling the sensor.
+	  Also able to set threshold temperature for both hot and cold and update
+	  when a threshold is reached.
+
+config THERMAL_PM8XXX
+	tristate "Qualcomm PMIC PM8xxx Temperature Alarm"
+	depends on THERMAL
+	depends on MFD_PM8XXX
+	help
+	  This enables a thermal Sysfs driver for the PMIC PM8xxx devices. It
+	  shows up in Sysfs as a thermal zone with multiple trip points.
+	  Enabling the thermal zone device via the mode file results in
+	  shifting over temperature shutdown control of the PMIC from hardware
+	  to software.
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31108a0..d1bb466 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,3 +3,8 @@
 #
 
 obj-$(CONFIG_THERMAL)		+= thermal_sys.o
+obj-$(CONFIG_THERMAL_PM8901)	+= pmic8901-tm.o
+obj-$(CONFIG_THERMAL_PM8058)	+= pmic8058-tm.o
+obj-$(CONFIG_THERMAL_MSM_POPMEM)	+= msm_popmem-tm.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm_tsens.o
+obj-$(CONFIG_THERMAL_PM8XXX)	+= pm8xxx-tm.o
diff --git a/drivers/thermal/msm_popmem-tm.c b/drivers/thermal/msm_popmem-tm.c
new file mode 100644
index 0000000..583b2db
--- /dev/null
+++ b/drivers/thermal/msm_popmem-tm.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/msm_memtypes.h>
+
+#define POP_MEM_LPDDR1_REFRESH_MASK	0x00000700
+#define POP_MEM_LPDDR1_REFRESH_SHIFT	0x8
+
+#define POP_MEM_LPDDR2_REFRESH_MASK	0x00000007
+#define POP_MEM_LPDDR2_REFRESH_SHIFT	0x0
+
+#define POP_MEM_REFRESH_REG		0x3C
+
+#define POP_MEM_LOW_TEMPERATURE		25000
+#define POP_MEM_NORMAL_TEMPERATURE	50000
+#define POP_MEM_HIGH_TEMPERATURE	85000
+
+#define POP_MEM_TRIP_OUT_OF_SPEC	0
+#define POP_MEM_TRIP_NUM		1
+
+struct pop_mem_tm_device {
+	unsigned long			baseaddr;
+	struct thermal_zone_device	*tz_dev;
+	unsigned long			refresh_mask;
+	unsigned int			refresh_shift;
+};
+
+
+static int pop_mem_tm_read_refresh(struct pop_mem_tm_device *tm,
+				   unsigned int *ref_rate){
+	unsigned int ref;
+
+	ref = __raw_readl(tm->baseaddr + POP_MEM_REFRESH_REG);
+	*ref_rate = (ref & tm->refresh_mask) >> tm->refresh_shift;
+
+	return 0;
+}
+
+
+static int pop_mem_tm_get_temperature(struct thermal_zone_device *thermal,
+			       unsigned long *temperature)
+{
+	struct pop_mem_tm_device *tm = thermal->devdata;
+	unsigned int ref_rate;
+	int rc;
+
+	if (!tm || !temperature)
+		return -EINVAL;
+
+	rc = pop_mem_tm_read_refresh(tm, &ref_rate);
+	if (rc < 0)
+		return rc;
+
+	switch (ref_rate) {
+	case 0:
+	case 1:
+	case 2:
+		*temperature = POP_MEM_LOW_TEMPERATURE;
+		break;
+	case 3:
+	case 4:
+		*temperature = POP_MEM_NORMAL_TEMPERATURE;
+		break;
+	case 5:
+	case 6:
+	case 7:
+		*temperature = POP_MEM_HIGH_TEMPERATURE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pop_mem_tm_get_trip_type(struct thermal_zone_device *thermal,
+				    int trip, enum thermal_trip_type *type)
+{
+	struct pop_mem_tm_device *tm = thermal->devdata;
+
+	if (!tm || trip < 0 || !type)
+		return -EINVAL;
+
+	if (trip == POP_MEM_TRIP_OUT_OF_SPEC)
+		*type = THERMAL_TRIP_CRITICAL;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int pop_mem_tm_get_trip_temperature(struct thermal_zone_device *thermal,
+				    int trip, unsigned long *temperature)
+{
+	struct pop_mem_tm_device *tm = thermal->devdata;
+
+	if (!tm || trip < 0 || !temperature)
+		return -EINVAL;
+
+	if (trip == POP_MEM_TRIP_OUT_OF_SPEC)
+		*temperature = POP_MEM_HIGH_TEMPERATURE;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+
+static int pop_mem_tm_get_crit_temperature(struct thermal_zone_device *thermal,
+				    unsigned long *temperature)
+{
+	struct pop_mem_tm_device *tm = thermal->devdata;
+
+	if (!tm || !temperature)
+		return -EINVAL;
+
+	*temperature = POP_MEM_HIGH_TEMPERATURE;
+
+	return 0;
+}
+
+
+static struct thermal_zone_device_ops pop_mem_thermal_zone_ops = {
+	.get_temp = pop_mem_tm_get_temperature,
+	.get_trip_type = pop_mem_tm_get_trip_type,
+	.get_trip_temp = pop_mem_tm_get_trip_temperature,
+	.get_crit_temp = pop_mem_tm_get_crit_temperature,
+};
+
+
+static int __devinit pop_mem_tm_probe(struct platform_device *pdev)
+{
+	int rc, len, numcontrollers;
+	struct resource *controller_mem = NULL;
+	struct resource *res_mem = NULL;
+	struct pop_mem_tm_device *tmdev = NULL;
+	void __iomem *base = NULL;
+
+	rc = len = 0;
+	numcontrollers = get_num_populated_chipselects();
+
+	if (pdev->id >= numcontrollers) {
+		pr_err("%s: memory controller %d does not exist", __func__,
+			pdev->id);
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	controller_mem = platform_get_resource_byname(pdev,
+						  IORESOURCE_MEM, "physbase");
+	if (!controller_mem) {
+		pr_err("%s: could not get resources for controller %d",
+			__func__, pdev->id);
+		rc = -EFAULT;
+		goto fail;
+	}
+
+	len = controller_mem->end - controller_mem->start + 1;
+
+	res_mem = request_mem_region(controller_mem->start, len,
+				     controller_mem->name);
+	if (!res_mem) {
+		pr_err("%s: Could not request memory region: "
+			"start=%p, len=%d\n", __func__,
+			(void *) controller_mem->start, len);
+		rc = -EBUSY;
+		goto fail;
+
+	}
+
+	base = ioremap(res_mem->start, len);
+	if (!base) {
+		pr_err("%s: Could not ioremap: start=%p, len=%d\n",
+			 __func__, (void *) controller_mem->start, len);
+		rc = -EBUSY;
+		goto fail;
+
+	}
+
+	tmdev = kzalloc(sizeof(*tmdev), GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	if (numcontrollers == 1) {
+		tmdev->refresh_mask = POP_MEM_LPDDR1_REFRESH_MASK;
+		tmdev->refresh_shift = POP_MEM_LPDDR1_REFRESH_SHIFT;
+	} else {
+		tmdev->refresh_mask = POP_MEM_LPDDR2_REFRESH_MASK;
+		tmdev->refresh_shift = POP_MEM_LPDDR2_REFRESH_SHIFT;
+	}
+	tmdev->baseaddr = (unsigned long) base;
+	tmdev->tz_dev = thermal_zone_device_register("msm_popmem_tz",
+						     POP_MEM_TRIP_NUM, tmdev,
+						     &pop_mem_thermal_zone_ops,
+						     0, 0, 0, 0);
+
+	if (tmdev->tz_dev == NULL) {
+		pr_err("%s: thermal_zone_device_register() failed.\n",
+			__func__);
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, tmdev);
+
+	pr_notice("%s: device %d probed successfully\n", __func__, pdev->id);
+
+	return rc;
+
+fail:
+	if (base)
+		iounmap(base);
+	if (res_mem)
+		release_mem_region(controller_mem->start, len);
+	kfree(tmdev);
+
+	return rc;
+}
+
+static int __devexit pop_mem_tm_remove(struct platform_device *pdev)
+{
+
+	int len;
+	struct pop_mem_tm_device *tmdev = platform_get_drvdata(pdev);
+	struct resource *controller_mem;
+
+	iounmap((void __iomem *)tmdev->baseaddr);
+
+	controller_mem = platform_get_resource_byname(pdev,
+						  IORESOURCE_MEM, "physbase");
+	len = controller_mem->end - controller_mem->start + 1;
+	release_mem_region(controller_mem->start, len);
+
+	thermal_zone_device_unregister(tmdev->tz_dev);
+	platform_set_drvdata(pdev, NULL);
+	kfree(tmdev);
+
+	return 0;
+}
+
+static struct platform_driver pop_mem_tm_driver = {
+	.probe          = pop_mem_tm_probe,
+	.remove         = pop_mem_tm_remove,
+	.driver         = {
+		.name = "msm_popmem-tm",
+		.owner = THIS_MODULE
+	},
+};
+
+static int __init pop_mem_tm_init(void)
+{
+	return platform_driver_register(&pop_mem_tm_driver);
+}
+
+static void __exit pop_mem_tm_exit(void)
+{
+	platform_driver_unregister(&pop_mem_tm_driver);
+}
+
+module_init(pop_mem_tm_init);
+module_exit(pop_mem_tm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Pop memory thermal manager driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:popmem-tm");
diff --git a/drivers/thermal/msm_tsens.c b/drivers/thermal/msm_tsens.c
new file mode 100644
index 0000000..f4e094e
--- /dev/null
+++ b/drivers/thermal/msm_tsens.c
@@ -0,0 +1,618 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm TSENS Thermal Manager driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/io.h>
+#include <mach/msm_iomap.h>
+
+/* Trips: from very hot to very cold */
+enum tsens_trip_type {
+	TSENS_TRIP_STAGE3 = 0,
+	TSENS_TRIP_STAGE2,
+	TSENS_TRIP_STAGE1,
+	TSENS_TRIP_STAGE0,
+	TSENS_TRIP_NUM,
+};
+
+#define TSENS_NUM_SENSORS	1 /* There are 5 but only 1 is useful now */
+#define TSENS_CAL_DEGC		30 /* degree C used for calibration */
+#define TSENS_QFPROM_ADDR (MSM_QFPROM_BASE + 0x000000bc)
+#define TSENS_QFPROM_RED_TEMP_SENSOR0_SHIFT 24
+#define TSENS_QFPROM_TEMP_SENSOR0_SHIFT 16
+#define TSENS_QFPROM_TEMP_SENSOR0_MASK (255 << TSENS_QFPROM_TEMP_SENSOR0_SHIFT)
+#define TSENS_SLOPE (0.702)  /* slope in (degrees_C / ADC_code) */
+#define TSENS_FACTOR (1000)  /* convert floating-point into integer */
+#define TSENS_CONFIG 01      /* this setting found to be optimal */
+#define TSENS_CONFIG_SHIFT 28
+#define TSENS_CONFIG_MASK (3 << TSENS_CONFIG_SHIFT)
+#define TSENS_CNTL_ADDR (MSM_CLK_CTL_BASE + 0x00003620)
+#define TSENS_EN (1 << 0)
+#define TSENS_SW_RST (1 << 1)
+#define SENSOR0_EN (1 << 3)
+#define SENSOR1_EN (1 << 4)
+#define SENSOR2_EN (1 << 5)
+#define SENSOR3_EN (1 << 6)
+#define SENSOR4_EN (1 << 7)
+#define TSENS_MIN_STATUS_MASK (1 << 8)
+#define TSENS_LOWER_STATUS_CLR (1 << 9)
+#define TSENS_UPPER_STATUS_CLR (1 << 10)
+#define TSENS_MAX_STATUS_MASK (1 << 11)
+#define TSENS_MEASURE_PERIOD 4 /* 1 sec. default as required by Willie */
+#define TSENS_SLP_CLK_ENA (1 << 24)
+#define TSENS_THRESHOLD_ADDR (MSM_CLK_CTL_BASE + 0x00003624)
+#define TSENS_THRESHOLD_MAX_CODE (0xff)
+#define TSENS_THRESHOLD_MAX_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << 24)
+#define TSENS_THRESHOLD_MIN_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << 16)
+#define TSENS_THRESHOLD_UPPER_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << 8)
+#define TSENS_THRESHOLD_LOWER_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << 0)
+/* Initial temperature threshold values */
+#define TSENS_LOWER_LIMIT_TH   0x50
+#define TSENS_UPPER_LIMIT_TH   0xdf
+#define TSENS_MIN_LIMIT_TH     0x38
+#define TSENS_MAX_LIMIT_TH     0xff
+
+#define TSENS_S0_STATUS_ADDR (MSM_CLK_CTL_BASE + 0x00003628)
+#define TSENS_INT_STATUS_ADDR (MSM_CLK_CTL_BASE + 0x0000363c)
+#define TSENS_LOWER_INT_MASK (1 << 1)
+#define TSENS_UPPER_INT_MASK (1 << 2)
+#define TSENS_TRDY_MASK (1 << 7)
+
+struct tsens_tm_device_sensor {
+	struct thermal_zone_device	*tz_dev;
+	enum thermal_device_mode	mode;
+	unsigned int			sensor_num;
+};
+
+struct tsens_tm_device {
+	struct tsens_tm_device_sensor sensor[TSENS_NUM_SENSORS];
+	bool prev_reading_avail;
+	int offset;
+	struct work_struct work;
+};
+
+struct tsens_tm_device *tmdev;
+
+/* Temperature on y axis and ADC-code on x-axis */
+static int tsens_tz_code_to_degC(int adc_code)
+{
+	int degC, degcbeforefactor;
+	degcbeforefactor = adc_code * (int)(TSENS_SLOPE * TSENS_FACTOR)
+				+ tmdev->offset;
+	if (degcbeforefactor == 0)
+		degC = degcbeforefactor;
+	else if (degcbeforefactor > 0)
+		degC = (degcbeforefactor + TSENS_FACTOR/2) / TSENS_FACTOR;
+	else  /* rounding for negative degrees */
+		degC = (degcbeforefactor - TSENS_FACTOR/2) / TSENS_FACTOR;
+	return degC;
+}
+
+static int tsens_tz_degC_to_code(int degC)
+{
+	int code = (degC * TSENS_FACTOR - tmdev->offset
+			+ (int)(TSENS_FACTOR * TSENS_SLOPE)/2)
+			/ (int)(TSENS_FACTOR * TSENS_SLOPE);
+	if (code > 255) /* upper bound */
+		code = 255;
+	else if (code < 0) /* lower bound */
+		code = 0;
+	return code;
+}
+
+static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
+			     unsigned long *temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int code;
+
+	if (!tm_sensor || tm_sensor->mode != THERMAL_DEVICE_ENABLED || !temp)
+		return -EINVAL;
+
+	if (!tmdev->prev_reading_avail) {
+		while (!(readl(TSENS_INT_STATUS_ADDR) & TSENS_TRDY_MASK))
+			msleep(1);
+		tmdev->prev_reading_avail = 1;
+	}
+
+	code = readl(TSENS_S0_STATUS_ADDR + (tm_sensor->sensor_num << 2));
+	*temp = tsens_tz_code_to_degC(code);
+
+	return 0;
+}
+
+static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+	if (!tm_sensor || !mode)
+		return -EINVAL;
+
+	*mode = tm_sensor->mode;
+
+	return 0;
+}
+
+static int tsens_tz_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg, mask;
+
+	if (!tm_sensor)
+		return -EINVAL;
+
+	if (mode != tm_sensor->mode) {
+		pr_info("%s: mode: %d --> %d\n", __func__, tm_sensor->mode,
+									 mode);
+
+		reg = readl(TSENS_CNTL_ADDR);
+		mask = 1 << (tm_sensor->sensor_num + 3);
+		if (mode == THERMAL_DEVICE_ENABLED) {
+			writel(reg | TSENS_SW_RST, TSENS_CNTL_ADDR);
+			reg |= mask | TSENS_SLP_CLK_ENA | TSENS_EN;
+			tmdev->prev_reading_avail = 0;
+		} else {
+			reg &= ~mask;
+			if (!(reg & (((1 << TSENS_NUM_SENSORS) - 1) << 3)))
+				reg &= ~(TSENS_SLP_CLK_ENA | TSENS_EN);
+		}
+
+		writel(reg, TSENS_CNTL_ADDR);
+	}
+	tm_sensor->mode = mode;
+
+	return 0;
+}
+
+static int tsens_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+	if (!tm_sensor || trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case TSENS_TRIP_STAGE3:
+		*type = THERMAL_TRIP_CRITICAL;
+		break;
+	case TSENS_TRIP_STAGE2:
+		*type = THERMAL_TRIP_CONFIGURABLE_HI;
+		break;
+	case TSENS_TRIP_STAGE1:
+		*type = THERMAL_TRIP_CONFIGURABLE_LOW;
+		break;
+	case TSENS_TRIP_STAGE0:
+		*type = THERMAL_TRIP_CRITICAL_LOW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsens_tz_activate_trip_type(struct thermal_zone_device *thermal,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_cntl, reg_th, code, hi_code, lo_code, mask;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	lo_code = 0;
+	hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+	reg_cntl = readl(TSENS_CNTL_ADDR);
+	reg_th = readl(TSENS_THRESHOLD_ADDR);
+	switch (trip) {
+	case TSENS_TRIP_STAGE3:
+		code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK) >> 24;
+		mask = TSENS_MAX_STATUS_MASK;
+
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		break;
+	case TSENS_TRIP_STAGE2:
+		code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK) >> 8;
+		mask = TSENS_UPPER_STATUS_CLR;
+
+		if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		break;
+	case TSENS_TRIP_STAGE1:
+		code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK) >> 0;
+		mask = TSENS_LOWER_STATUS_CLR;
+
+		if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		break;
+	case TSENS_TRIP_STAGE0:
+		code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK) >> 16;
+		mask = TSENS_MIN_STATUS_MASK;
+
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+		writel(reg_cntl | mask, TSENS_CNTL_ADDR);
+	else {
+		if (code < lo_code || code > hi_code)
+			return -EINVAL;
+		writel(reg_cntl & ~mask, TSENS_CNTL_ADDR);
+	}
+
+	return 0;
+}
+
+static int tsens_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, unsigned long *temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg;
+
+	if (!tm_sensor || trip < 0 || !temp)
+		return -EINVAL;
+
+	reg = readl(TSENS_THRESHOLD_ADDR);
+	switch (trip) {
+	case TSENS_TRIP_STAGE3:
+		reg = (reg & TSENS_THRESHOLD_MAX_LIMIT_MASK) >> 24;
+		break;
+	case TSENS_TRIP_STAGE2:
+		reg = (reg & TSENS_THRESHOLD_UPPER_LIMIT_MASK) >> 8;
+		break;
+	case TSENS_TRIP_STAGE1:
+		reg = (reg & TSENS_THRESHOLD_LOWER_LIMIT_MASK) >> 0;
+		break;
+	case TSENS_TRIP_STAGE0:
+		reg = (reg & TSENS_THRESHOLD_MIN_LIMIT_MASK) >> 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*temp = tsens_tz_code_to_degC(reg);
+
+	return 0;
+}
+
+static int tsens_tz_get_crit_temp(struct thermal_zone_device *thermal,
+				  unsigned long *temp)
+{
+	return tsens_tz_get_trip_temp(thermal, TSENS_TRIP_STAGE3, temp);
+}
+
+static int tsens_tz_set_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, long temp)
+{
+	struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+	unsigned int reg_th, reg_cntl;
+	int code, hi_code, lo_code, code_err_chk;
+
+	code_err_chk = code = tsens_tz_degC_to_code(temp);
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	lo_code = 0;
+	hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+	reg_cntl = readl(TSENS_CNTL_ADDR);
+	reg_th = readl(TSENS_THRESHOLD_ADDR);
+	switch (trip) {
+	case TSENS_TRIP_STAGE3:
+		code <<= 24;
+		reg_th &= ~TSENS_THRESHOLD_MAX_LIMIT_MASK;
+
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		break;
+	case TSENS_TRIP_STAGE2:
+		code <<= 8;
+		reg_th &= ~TSENS_THRESHOLD_UPPER_LIMIT_MASK;
+
+		if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		break;
+	case TSENS_TRIP_STAGE1:
+		reg_th &= ~TSENS_THRESHOLD_LOWER_LIMIT_MASK;
+
+		if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
+			lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
+									>> 16;
+		if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		break;
+	case TSENS_TRIP_STAGE0:
+		code <<= 16;
+		reg_th &= ~TSENS_THRESHOLD_MIN_LIMIT_MASK;
+
+		if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK);
+		else if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+			hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
+									>> 8;
+		else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
+			hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
+									>> 24;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (code_err_chk < lo_code || code_err_chk > hi_code)
+		return -EINVAL;
+
+	writel(reg_th | code, TSENS_THRESHOLD_ADDR);
+	return 0;
+}
+
+static struct thermal_zone_device_ops tsens_thermal_zone_ops = {
+	.get_temp = tsens_tz_get_temp,
+	.get_mode = tsens_tz_get_mode,
+	.set_mode = tsens_tz_set_mode,
+	.get_trip_type = tsens_tz_get_trip_type,
+	.activate_trip_type = tsens_tz_activate_trip_type,
+	.get_trip_temp = tsens_tz_get_trip_temp,
+	.set_trip_temp = tsens_tz_set_trip_temp,
+	.get_crit_temp = tsens_tz_get_crit_temp,
+};
+
+static void notify_uspace_tsens_fn(struct work_struct *work)
+{
+	struct tsens_tm_device *tm = container_of(work, struct tsens_tm_device,
+					work);
+	/* Currently only Sensor0 is supported. We added support
+	   to notify only the supported Sensor and this portion
+	   needs to be revisited once other sensors are supported */
+	sysfs_notify(&tm->sensor[0].tz_dev->device.kobj,
+					NULL, "type");
+}
+
+static irqreturn_t tsens_isr(int irq, void *data)
+{
+	unsigned int reg = readl(TSENS_CNTL_ADDR);
+
+	writel(reg | TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR,
+			TSENS_CNTL_ADDR);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t tsens_isr_thread(int irq, void *data)
+{
+	struct tsens_tm_device *tm = data;
+	unsigned int threshold, threshold_low, i, code, reg, sensor, mask;
+	bool upper_th_x, lower_th_x;
+	int adc_code;
+
+	mask = ~(TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR);
+	threshold = readl(TSENS_THRESHOLD_ADDR);
+	threshold_low = threshold & TSENS_THRESHOLD_LOWER_LIMIT_MASK;
+	threshold = (threshold & TSENS_THRESHOLD_UPPER_LIMIT_MASK) >> 8;
+	reg = sensor = readl(TSENS_CNTL_ADDR);
+	sensor &= (SENSOR0_EN | SENSOR1_EN | SENSOR2_EN |
+						SENSOR3_EN | SENSOR4_EN);
+	sensor >>= 3;
+	for (i = 0; i < TSENS_NUM_SENSORS; i++) {
+		if (sensor & 1) {
+			code = readl(TSENS_S0_STATUS_ADDR + (i << 2));
+			upper_th_x = code >= threshold;
+			lower_th_x = code <= threshold_low;
+			if (upper_th_x)
+				mask |= TSENS_UPPER_STATUS_CLR;
+			if (lower_th_x)
+				mask |= TSENS_LOWER_STATUS_CLR;
+			if (upper_th_x || lower_th_x) {
+				thermal_zone_device_update(
+							tm->sensor[i].tz_dev);
+
+				/* Notify user space */
+				schedule_work(&tm->work);
+				adc_code = readl(TSENS_S0_STATUS_ADDR
+							+ (i << 2));
+				printk(KERN_INFO"\nTrip point triggered by "
+					"current temperature (%d degrees) "
+					"measured by Temperature-Sensor %d\n",
+					tsens_tz_code_to_degC(adc_code), i);
+			}
+		}
+		sensor >>= 1;
+	}
+	writel(reg & mask, TSENS_CNTL_ADDR);
+	return IRQ_HANDLED;
+}
+
+static int __devinit tsens_tm_probe(struct platform_device *pdev)
+{
+	unsigned int reg, i, calib_data, calib_data_backup;
+	int rc;
+
+	calib_data = (readl(TSENS_QFPROM_ADDR) & TSENS_QFPROM_TEMP_SENSOR0_MASK)
+					>> TSENS_QFPROM_TEMP_SENSOR0_SHIFT;
+	calib_data_backup = readl(TSENS_QFPROM_ADDR)
+					>> TSENS_QFPROM_RED_TEMP_SENSOR0_SHIFT;
+
+	if (calib_data_backup)
+		calib_data = calib_data_backup;
+
+	if (!calib_data) {
+		pr_err("%s: No temperature sensor data for calibration"
+						" in QFPROM!\n", __func__);
+		return -ENODEV;
+	}
+
+	tmdev = kzalloc(sizeof(struct tsens_tm_device), GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, tmdev);
+
+	tmdev->offset = TSENS_FACTOR * TSENS_CAL_DEGC
+			- (int)(TSENS_FACTOR * TSENS_SLOPE) * calib_data;
+	tmdev->prev_reading_avail = 0;
+
+	INIT_WORK(&tmdev->work, notify_uspace_tsens_fn);
+
+	reg = readl(TSENS_CNTL_ADDR);
+	writel(reg | TSENS_SW_RST, TSENS_CNTL_ADDR);
+	reg |= TSENS_SLP_CLK_ENA | TSENS_EN | (TSENS_MEASURE_PERIOD << 16) |
+		TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR |
+		TSENS_MIN_STATUS_MASK | TSENS_MAX_STATUS_MASK |
+		(((1 << TSENS_NUM_SENSORS) - 1) << 3);
+
+	/* set TSENS_CONFIG bits (bits 29:28 of TSENS_CNTL) to '01';
+		this setting found to be optimal. */
+	reg = (reg & ~TSENS_CONFIG_MASK) | (TSENS_CONFIG << TSENS_CONFIG_SHIFT);
+
+	writel(reg, TSENS_CNTL_ADDR);
+
+	writel((TSENS_LOWER_LIMIT_TH << 0) | (TSENS_UPPER_LIMIT_TH << 8) |
+		(TSENS_MIN_LIMIT_TH << 16) | (TSENS_MAX_LIMIT_TH << 24),
+			TSENS_THRESHOLD_ADDR);
+
+	for (i = 0; i < TSENS_NUM_SENSORS; i++) {
+		char name[17];
+		sprintf(name, "tsens_tz_sensor%d", i);
+
+		tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
+		tmdev->sensor[i].tz_dev = thermal_zone_device_register(name,
+				TSENS_TRIP_NUM, &tmdev->sensor[i],
+				&tsens_thermal_zone_ops, 0, 0, 0, 0);
+		if (tmdev->sensor[i].tz_dev == NULL) {
+			pr_err("%s: thermal_zone_device_register() failed.\n",
+			__func__);
+			kfree(tmdev);
+			return -ENODEV;
+		}
+		tmdev->sensor[i].sensor_num = i;
+		thermal_zone_device_update(tmdev->sensor[i].tz_dev);
+		tmdev->sensor[i].mode = THERMAL_DEVICE_DISABLED;
+	}
+
+	rc = request_threaded_irq(TSENS_UPPER_LOWER_INT, tsens_isr,
+		tsens_isr_thread, 0, "tsens", tmdev);
+	if (rc < 0) {
+		pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
+		kfree(tmdev);
+		return rc;
+	}
+
+	writel(reg & ~((((1 << TSENS_NUM_SENSORS) - 1) << 3)
+			| TSENS_SLP_CLK_ENA | TSENS_EN), TSENS_CNTL_ADDR);
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit tsens_tm_remove(struct platform_device *pdev)
+{
+	struct tsens_tm_device *tmdev = platform_get_drvdata(pdev);
+	unsigned int reg, i;
+
+	reg = readl(TSENS_CNTL_ADDR);
+	writel(reg & ~(TSENS_SLP_CLK_ENA | TSENS_EN), TSENS_CNTL_ADDR);
+
+	for (i = 0; i < TSENS_NUM_SENSORS; i++)
+		thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
+	platform_set_drvdata(pdev, NULL);
+	free_irq(TSENS_UPPER_LOWER_INT, tmdev);
+	kfree(tmdev);
+
+	return 0;
+}
+
+static struct platform_driver tsens_tm_driver = {
+	.probe	= tsens_tm_probe,
+	.remove	= __devexit_p(tsens_tm_remove),
+	.driver	= {
+		.name = "tsens-tm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init tsens_init(void)
+{
+	return platform_driver_register(&tsens_tm_driver);
+}
+
+static void __exit tsens_exit(void)
+{
+	platform_driver_unregister(&tsens_tm_driver);
+}
+
+module_init(tsens_init);
+module_exit(tsens_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM Temperature Sensor driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:tsens-tm");
diff --git a/drivers/thermal/pm8xxx-tm.c b/drivers/thermal/pm8xxx-tm.c
new file mode 100644
index 0000000..a094aed
--- /dev/null
+++ b/drivers/thermal/pm8xxx-tm.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Qualcomm PMIC PM8xxx Thermal Manager driver
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/tm.h>
+#include <linux/completion.h>
+#include <linux/mfd/pm8921-adc.h>
+
+/* Register TEMP_ALARM_CTRL bits */
+#define	TEMP_ALARM_CTRL_ST3_SD		0x80
+#define	TEMP_ALARM_CTRL_ST2_SD		0x40
+#define	TEMP_ALARM_CTRL_STATUS_MASK	0x30
+#define	TEMP_ALARM_CTRL_STATUS_SHIFT	4
+#define	TEMP_ALARM_CTRL_THRESH_MASK	0x0C
+#define	TEMP_ALARM_CTRL_THRESH_SHIFT	2
+#define	TEMP_ALARM_CTRL_OVRD_ST3	0x02
+#define	TEMP_ALARM_CTRL_OVRD_ST2	0x01
+#define	TEMP_ALARM_CTRL_OVRD_MASK	0x03
+
+#define	TEMP_STAGE_STEP			20000	/* Stage step: 20.000 C */
+#define	TEMP_STAGE_HYSTERESIS		2000
+
+#define	TEMP_THRESH_MIN			105000	/* Threshold Min: 105 C */
+#define	TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
+
+/* Register TEMP_ALARM_PWM bits */
+#define	TEMP_ALARM_PWM_EN_MASK		0xC0
+#define	TEMP_ALARM_PWM_EN_SHIFT		6
+#define	TEMP_ALARM_PWM_PER_PRE_MASK	0x38
+#define	TEMP_ALARM_PWM_PER_PRE_SHIFT	3
+#define	TEMP_ALARM_PWM_PER_DIV_MASK	0x07
+#define	TEMP_ALARM_PWM_PER_DIV_SHIFT	0
+
+/* Trips: from critical to less critical */
+#define TRIP_STAGE3			0
+#define TRIP_STAGE2			1
+#define TRIP_STAGE1			2
+#define TRIP_NUM			3
+
+struct pm8xxx_tm_chip {
+	struct pm8xxx_tm_core_data	cdata;
+	struct work_struct		irq_work;
+	struct device			*dev;
+	struct thermal_zone_device	*tz_dev;
+	unsigned long			temp;
+	enum thermal_device_mode	mode;
+	unsigned int			thresh;
+	unsigned int			stage;
+	unsigned int			tempstat_irq;
+	unsigned int			overtemp_irq;
+	void				*adc_handle;
+};
+
+enum pmic_thermal_override_mode {
+	SOFTWARE_OVERRIDE_DISABLED = 0,
+	SOFTWARE_OVERRIDE_ENABLED,
+};
+
+static inline int pm8xxx_tm_read_ctrl(struct pm8xxx_tm_chip *chip, u8 *reg)
+{
+	int rc;
+
+	rc = pm8xxx_readb(chip->dev->parent,
+			  chip->cdata.reg_addr_temp_alarm_ctrl, reg);
+	if (rc)
+		pr_err("%s: pm8xxx_readb(0x%03X) failed, rc=%d\n",
+			chip->cdata.tm_name,
+			chip->cdata.reg_addr_temp_alarm_ctrl, rc);
+
+	return rc;
+}
+
+static inline int pm8xxx_tm_write_ctrl(struct pm8xxx_tm_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8xxx_writeb(chip->dev->parent,
+			   chip->cdata.reg_addr_temp_alarm_ctrl, reg);
+	if (rc)
+		pr_err("%s: pm8xxx_writeb(0x%03X)=0x%02X failed, rc=%d\n",
+		       chip->cdata.tm_name,
+		       chip->cdata.reg_addr_temp_alarm_ctrl, reg, rc);
+
+	return rc;
+}
+
+static inline int pm8xxx_tm_write_pwm(struct pm8xxx_tm_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8xxx_writeb(chip->dev->parent,
+			   chip->cdata.reg_addr_temp_alarm_pwm, reg);
+	if (rc)
+		pr_err("%s: pm8xxx_writeb(0x%03X)=0x%02X failed, rc=%d\n",
+			chip->cdata.tm_name,
+			chip->cdata.reg_addr_temp_alarm_pwm, reg, rc);
+
+	return rc;
+}
+
+static inline int
+pm8xxx_tm_shutdown_override(struct pm8xxx_tm_chip *chip,
+			    enum pmic_thermal_override_mode mode)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	reg &= ~(TEMP_ALARM_CTRL_OVRD_MASK | TEMP_ALARM_CTRL_STATUS_MASK);
+	if (mode == SOFTWARE_OVERRIDE_ENABLED)
+		reg |= (TEMP_ALARM_CTRL_OVRD_ST3 | TEMP_ALARM_CTRL_OVRD_ST2) &
+			TEMP_ALARM_CTRL_OVRD_MASK;
+
+	rc = pm8xxx_tm_write_ctrl(chip, reg);
+
+	return rc;
+}
+
+/*
+ * This function initializes the internal temperature value based on only the
+ * current thermal stage and threshold.
+ */
+static int pm8xxx_tm_init_temp_no_adc(struct pm8xxx_tm_chip *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	chip->stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
+			>> TEMP_ALARM_CTRL_STATUS_SHIFT;
+	chip->thresh = (reg & TEMP_ALARM_CTRL_THRESH_MASK)
+			>> TEMP_ALARM_CTRL_THRESH_SHIFT;
+
+	if (chip->stage)
+		chip->temp = chip->thresh * TEMP_THRESH_MIN +
+			   (chip->stage - 1) * TEMP_STAGE_STEP +
+			   TEMP_THRESH_MIN;
+	else
+		chip->temp = chip->cdata.default_no_adc_temp;
+
+	return 0;
+}
+
+/*
+ * This function updates the internal temperature value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int pm8xxx_tm_update_temp_no_adc(struct pm8xxx_tm_chip *chip)
+{
+	unsigned int stage;
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
+		>> TEMP_ALARM_CTRL_STATUS_SHIFT;
+	chip->thresh = (reg & TEMP_ALARM_CTRL_THRESH_MASK)
+			>> TEMP_ALARM_CTRL_THRESH_SHIFT;
+
+	if (stage > chip->stage) {
+		/* increasing stage, use lower bound */
+		chip->temp = (stage - 1) * TEMP_STAGE_STEP
+				+ chip->thresh * TEMP_THRESH_STEP
+				+ TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+	} else if (stage < chip->stage) {
+		/* decreasing stage, use upper bound */
+		chip->temp = stage * TEMP_STAGE_STEP
+				+ chip->thresh * TEMP_THRESH_STEP
+				- TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+	}
+
+	chip->stage = stage;
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
+				     unsigned long *temp)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+	int rc;
+
+	if (!chip || !temp)
+		return -EINVAL;
+
+	rc = pm8xxx_tm_update_temp_no_adc(chip);
+	if (rc < 0)
+		return rc;
+
+	*temp = chip->temp;
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_temp_pm8921_adc(struct thermal_zone_device *thermal,
+				      unsigned long *temp)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+	struct pm8921_adc_chan_result result = {
+		.physical = 0lu,
+	};
+	int rc;
+
+	if (!chip || !temp)
+		return -EINVAL;
+
+	*temp = chip->temp;
+
+	rc = pm8921_adc_read(chip->cdata.adc_channel, &result);
+	if (rc < 0) {
+		pr_err("%s: adc_channel_read_result() failed, rc = %d\n",
+			chip->cdata.tm_name, rc);
+		return rc;
+	}
+
+	*temp = result.physical;
+	chip->temp = result.physical;
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+
+	if (!chip || !mode)
+		return -EINVAL;
+
+	*mode = chip->mode;
+
+	return 0;
+}
+
+static int pm8xxx_tz_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+
+	if (!chip)
+		return -EINVAL;
+
+	if (mode != chip->mode) {
+		if (mode == THERMAL_DEVICE_ENABLED)
+			pm8xxx_tm_shutdown_override(chip,
+						    SOFTWARE_OVERRIDE_ENABLED);
+		else
+			pm8xxx_tm_shutdown_override(chip,
+						    SOFTWARE_OVERRIDE_DISABLED);
+	}
+	chip->mode = mode;
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	if (trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case TRIP_STAGE3:
+		*type = THERMAL_TRIP_CRITICAL;
+		break;
+	case TRIP_STAGE2:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	case TRIP_STAGE1:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, unsigned long *temp)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+	int thresh_temp;
+
+	if (!chip || trip < 0 || !temp)
+		return -EINVAL;
+
+	thresh_temp = chip->thresh * TEMP_THRESH_STEP +
+			TEMP_THRESH_MIN;
+
+	switch (trip) {
+	case TRIP_STAGE3:
+		thresh_temp += 2 * TEMP_STAGE_STEP;
+		break;
+	case TRIP_STAGE2:
+		thresh_temp += TEMP_STAGE_STEP;
+		break;
+	case TRIP_STAGE1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*temp = thresh_temp;
+
+	return 0;
+}
+
+static int pm8xxx_tz_get_crit_temp(struct thermal_zone_device *thermal,
+				   unsigned long *temp)
+{
+	struct pm8xxx_tm_chip *chip = thermal->devdata;
+
+	if (!chip || !temp)
+		return -EINVAL;
+
+	*temp = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
+		2 * TEMP_STAGE_STEP;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops pm8xxx_thermal_zone_ops_no_adc = {
+	.get_temp = pm8xxx_tz_get_temp_no_adc,
+	.get_mode = pm8xxx_tz_get_mode,
+	.set_mode = pm8xxx_tz_set_mode,
+	.get_trip_type = pm8xxx_tz_get_trip_type,
+	.get_trip_temp = pm8xxx_tz_get_trip_temp,
+	.get_crit_temp = pm8xxx_tz_get_crit_temp,
+};
+
+static struct thermal_zone_device_ops pm8xxx_thermal_zone_ops_pm8921_adc = {
+	.get_temp = pm8xxx_tz_get_temp_pm8921_adc,
+	.get_mode = pm8xxx_tz_get_mode,
+	.set_mode = pm8xxx_tz_set_mode,
+	.get_trip_type = pm8xxx_tz_get_trip_type,
+	.get_trip_temp = pm8xxx_tz_get_trip_temp,
+	.get_crit_temp = pm8xxx_tz_get_crit_temp,
+};
+
+static void pm8xxx_tm_work(struct work_struct *work)
+{
+	struct pm8xxx_tm_chip *chip
+		= container_of(work, struct pm8xxx_tm_chip, irq_work);
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		goto bail;
+
+	if (chip->cdata.adc_type == PM8XXX_TM_ADC_NONE) {
+		rc = pm8xxx_tm_update_temp_no_adc(chip);
+		if (rc < 0)
+			goto bail;
+		pr_info("%s: Temp Alarm - stage=%u, threshold=%u, "
+			"temp=%lu mC\n", chip->cdata.tm_name, chip->stage,
+			chip->thresh, chip->temp);
+	} else {
+		chip->stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
+				>> TEMP_ALARM_CTRL_STATUS_SHIFT;
+		chip->thresh = (reg & TEMP_ALARM_CTRL_THRESH_MASK)
+				>> TEMP_ALARM_CTRL_THRESH_SHIFT;
+		pr_info("%s: Temp Alarm - stage=%u, threshold=%u\n",
+			chip->cdata.tm_name, chip->stage, chip->thresh);
+	}
+
+	/* Clear status bits. */
+	if (reg & (TEMP_ALARM_CTRL_ST2_SD | TEMP_ALARM_CTRL_ST3_SD)) {
+		reg &= ~(TEMP_ALARM_CTRL_ST2_SD | TEMP_ALARM_CTRL_ST3_SD
+			 | TEMP_ALARM_CTRL_STATUS_MASK);
+
+		pm8xxx_tm_write_ctrl(chip, reg);
+	}
+
+	thermal_zone_device_update(chip->tz_dev);
+
+	/* Notify user space */
+	if (chip->mode == THERMAL_DEVICE_ENABLED)
+		kobject_uevent(&chip->tz_dev->device.kobj, KOBJ_CHANGE);
+
+bail:
+	enable_irq(chip->tempstat_irq);
+	enable_irq(chip->overtemp_irq);
+}
+
+static irqreturn_t pm8xxx_tm_isr(int irq, void *data)
+{
+	struct pm8xxx_tm_chip *chip = data;
+
+	disable_irq_nosync(chip->tempstat_irq);
+	disable_irq_nosync(chip->overtemp_irq);
+	schedule_work(&chip->irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static int pm8xxx_tm_init_reg(struct pm8xxx_tm_chip *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8xxx_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	chip->stage = (reg & TEMP_ALARM_CTRL_STATUS_MASK)
+			>> TEMP_ALARM_CTRL_STATUS_SHIFT;
+	chip->temp = 0;
+
+	/* Use temperature threshold set 0: (105, 125, 145) */
+	chip->thresh = 0;
+	reg = (chip->thresh << TEMP_ALARM_CTRL_THRESH_SHIFT)
+		& TEMP_ALARM_CTRL_THRESH_MASK;
+	rc = pm8xxx_tm_write_ctrl(chip, reg);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
+	 * helps cut down on the number of unnecessary interrupts fired when
+	 * changing between thermal stages.  Also, Enable the over temperature
+	 * PWM whenever the PMIC is enabled.
+	 */
+	reg =  (1 << TEMP_ALARM_PWM_EN_SHIFT)
+		| (3 << TEMP_ALARM_PWM_PER_PRE_SHIFT)
+		| (3 << TEMP_ALARM_PWM_PER_DIV_SHIFT);
+
+	rc = pm8xxx_tm_write_pwm(chip, reg);
+
+	return rc;
+}
+
+static int __devinit pm8xxx_tm_probe(struct platform_device *pdev)
+{
+	const struct pm8xxx_tm_core_data *cdata = pdev->dev.platform_data;
+	struct thermal_zone_device_ops *tz_ops;
+	struct pm8xxx_tm_chip *chip;
+	struct resource *res;
+	int rc = 0;
+
+	if (!cdata) {
+		pr_err("missing core data\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct pm8xxx_tm_chip), GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	chip->dev = &pdev->dev;
+	memcpy(&(chip->cdata), cdata, sizeof(struct pm8xxx_tm_core_data));
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+		chip->cdata.irq_name_temp_stat);
+	if (res) {
+		chip->tempstat_irq = res->start;
+	} else {
+		pr_err("temp stat IRQ not specified\n");
+		goto err_free_chip;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+		chip->cdata.irq_name_over_temp);
+	if (res) {
+		chip->overtemp_irq = res->start;
+	} else {
+		pr_err("over temp IRQ not specified\n");
+		goto err_free_chip;
+	}
+
+	/* Select proper thermal zone ops functions based on ADC type. */
+	if (chip->cdata.adc_type == PM8XXX_TM_ADC_PM8921_ADC)
+		tz_ops = &pm8xxx_thermal_zone_ops_pm8921_adc;
+	else
+		tz_ops = &pm8xxx_thermal_zone_ops_no_adc;
+
+	chip->tz_dev = thermal_zone_device_register(chip->cdata.tm_name,
+			TRIP_NUM, chip, tz_ops, 0, 0, 0, 0);
+	if (chip->tz_dev == NULL) {
+		pr_err("thermal_zone_device_register() failed.\n");
+		rc = -ENODEV;
+		goto err_free_chip;
+	}
+
+	rc = pm8xxx_tm_init_reg(chip);
+	if (rc < 0)
+		goto err_free_tz;
+	rc = pm8xxx_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+	if (rc < 0)
+		goto err_free_tz;
+
+	if (chip->cdata.adc_type == PM8XXX_TM_ADC_NONE) {
+		rc = pm8xxx_tm_init_temp_no_adc(chip);
+		if (rc < 0)
+			goto err_free_tz;
+	}
+
+	/* Start in HW control; switch to SW control when user changes mode. */
+	chip->mode = THERMAL_DEVICE_DISABLED;
+	thermal_zone_device_update(chip->tz_dev);
+
+	INIT_WORK(&chip->irq_work, pm8xxx_tm_work);
+
+	rc = request_irq(chip->tempstat_irq, pm8xxx_tm_isr, IRQF_TRIGGER_RISING,
+		chip->cdata.irq_name_temp_stat, chip);
+	if (rc < 0) {
+		pr_err("request_irq(%d) failed: %d\n", chip->tempstat_irq, rc);
+		goto err_cancel_work;
+	}
+
+	rc = request_irq(chip->overtemp_irq, pm8xxx_tm_isr, IRQF_TRIGGER_RISING,
+		chip->cdata.irq_name_over_temp, chip);
+	if (rc < 0) {
+		pr_err("request_irq(%d) failed: %d\n", chip->overtemp_irq, rc);
+		goto err_free_irq_tempstat;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	pr_info("OK\n");
+
+	return 0;
+
+err_free_irq_tempstat:
+	free_irq(chip->tempstat_irq, chip);
+err_cancel_work:
+	cancel_work_sync(&chip->irq_work);
+err_free_tz:
+	thermal_zone_device_unregister(chip->tz_dev);
+err_free_chip:
+	kfree(chip);
+	return rc;
+}
+
+static int __devexit pm8xxx_tm_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_tm_chip *chip = platform_get_drvdata(pdev);
+
+	if (chip) {
+		platform_set_drvdata(pdev, NULL);
+		cancel_work_sync(&chip->irq_work);
+		free_irq(chip->overtemp_irq, chip);
+		free_irq(chip->tempstat_irq, chip);
+		pm8xxx_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+		thermal_zone_device_unregister(chip->tz_dev);
+		kfree(chip);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pm8xxx_tm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8xxx_tm_chip *chip = platform_get_drvdata(pdev);
+
+	/* Clear override bits in suspend to allow hardware control */
+	pm8xxx_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+
+	return 0;
+}
+
+static int pm8xxx_tm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8xxx_tm_chip *chip = platform_get_drvdata(pdev);
+
+	/* Override hardware actions so software can control */
+	if (chip->mode == THERMAL_DEVICE_ENABLED)
+		pm8xxx_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pm8xxx_tm_pm_ops = {
+	.suspend = pm8xxx_tm_suspend,
+	.resume = pm8xxx_tm_resume,
+};
+
+#define PM8XXX_TM_PM_OPS	(&pm8xxx_tm_pm_ops)
+#else
+#define PM8XXX_TM_PM_OPS	NULL
+#endif
+
+static struct platform_driver pm8xxx_tm_driver = {
+	.probe	= pm8xxx_tm_probe,
+	.remove	= __devexit_p(pm8xxx_tm_remove),
+	.driver	= {
+		.name = PM8XXX_TM_DEV_NAME,
+		.owner = THIS_MODULE,
+		.pm = PM8XXX_TM_PM_OPS,
+	},
+};
+
+static int __init pm8xxx_tm_init(void)
+{
+	return platform_driver_register(&pm8xxx_tm_driver);
+}
+
+static void __exit pm8xxx_tm_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_tm_driver);
+}
+
+module_init(pm8xxx_tm_init);
+module_exit(pm8xxx_tm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8xxx Thermal Manager driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_TM_DEV_NAME);
diff --git a/drivers/thermal/pmic8058-tm.c b/drivers/thermal/pmic8058-tm.c
new file mode 100644
index 0000000..cc98f37
--- /dev/null
+++ b/drivers/thermal/pmic8058-tm.c
@@ -0,0 +1,509 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 Thermal Manager driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/completion.h>
+
+#include <linux/msm_adc.h>
+
+/* PMIC8058 TEMP_ALRM registers */
+#define	SSBI_REG_TEMP_ALRM_CTRL		0x1B
+#define	SSBI_REG_TEMP_ALRM_PWM		0x9B
+#define	SSBI_REG_TEMP_ALRM_TEST1	0x7A
+#define	SSBI_REG_TEMP_ALRM_TEST2	0xAB
+
+/* TEMP_ALRM_CTRL */
+#define	PM8058_TEMP_ST3_SD		0x80
+#define	PM8058_TEMP_ST2_SD		0x40
+#define	PM8058_TEMP_STATUS_MASK		0x30
+#define	PM8058_TEMP_STATUS_SHIFT	4
+#define	PM8058_TEMP_THRESH_MASK		0x0C
+#define	PM8058_TEMP_THRESH_SHIFT	2
+#define	PM8058_TEMP_OVRD_ST3		0x02
+#define	PM8058_TEMP_OVRD_ST2		0x01
+#define	PM8058_TEMP_OVRD_MASK		0x03
+
+#define	PM8058_TEMP_STAGE_STEP		20000	/* Stage step: 20 C */
+#define	PM8058_TEMP_STAGE_HYSTERESIS	2000
+
+#define	PM8058_TEMP_THRESH_MIN		105000	/* Threshold Min: 105 C */
+#define	PM8058_TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
+
+/* TEMP_ALRM_PWM */
+#define	PM8058_TEMP_PWM_EN_MASK		0xC0
+#define	PM8058_TEMP_PWM_EN_SHIFT	6
+#define	PM8058_TEMP_PWM_PER_PRE_MASK	0x38
+#define	PM8058_TEMP_PWM_PER_PRE_SHIFT	3
+#define	PM8058_TEMP_PWM_PER_DIV_MASK	0x07
+#define	PM8058_TEMP_PWM_PER_DIV_SHIFT	0
+
+/* Trips: from critical to less critical */
+#define PM8058_TRIP_STAGE3	0
+#define PM8058_TRIP_STAGE2	1
+#define PM8058_TRIP_STAGE1	2
+#define PM8058_TRIP_NUM		3
+
+#define PM8058_TEMP_ADC_CH	CHANNEL_ADC_DIE_TEMP
+
+struct pm8058_tm_device {
+	struct pm8058_chip		*pm_chip;
+	struct thermal_zone_device	*tz_dev;
+	unsigned long			temp;
+	enum thermal_device_mode	mode;
+	unsigned int			thresh;
+	unsigned int			stage;
+	unsigned int			irq;
+	void				*adc_handle;
+};
+
+enum pmic_thermal_override_mode {
+	SOFTWARE_OVERRIDE_DISABLED = 0,
+	SOFTWARE_OVERRIDE_ENABLED,
+};
+
+static inline int pm8058_tm_read_ctrl(struct pm8058_chip *chip, u8 *reg)
+{
+	int rc;
+
+	rc = pm8058_read(chip, SSBI_REG_TEMP_ALRM_CTRL, reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_read FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int pm8058_tm_write_ctrl(struct pm8058_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8058_write(chip, SSBI_REG_TEMP_ALRM_CTRL, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_write FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int pm8058_tm_write_pwm(struct pm8058_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8058_write(chip, SSBI_REG_TEMP_ALRM_PWM, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8058_write FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int
+pm8058_tm_shutdown_override(struct pm8058_chip *chip,
+			    enum pmic_thermal_override_mode mode)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8058_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	reg &= ~(PM8058_TEMP_OVRD_MASK | PM8058_TEMP_STATUS_MASK);
+	if (mode == SOFTWARE_OVERRIDE_ENABLED)
+		reg |= (PM8058_TEMP_OVRD_ST3 | PM8058_TEMP_OVRD_ST2) &
+			PM8058_TEMP_OVRD_MASK;
+
+	rc = pm8058_tm_write_ctrl(chip, reg);
+
+	return rc;
+}
+
+static int pm8058_tz_get_temp(struct thermal_zone_device *thermal,
+			      unsigned long *temp)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct adc_chan_result adc_result = {
+		.physical = 0lu,
+	};
+	int rc;
+
+	if (!tm || !temp)
+		return -EINVAL;
+
+	*temp = tm->temp;
+
+	rc = adc_channel_request_conv(tm->adc_handle, &wait);
+	if (rc < 0) {
+		pr_err("%s: adc_channel_request_conv() failed, rc = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	wait_for_completion(&wait);
+
+	rc = adc_channel_read_result(tm->adc_handle, &adc_result);
+	if (rc < 0) {
+		pr_err("%s: adc_channel_read_result() failed, rc = %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	*temp = adc_result.physical;
+	tm->temp = adc_result.physical;
+
+	return 0;
+}
+
+static int pm8058_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+
+	if (!tm || !mode)
+		return -EINVAL;
+
+	*mode = tm->mode;
+
+	return 0;
+}
+
+static int pm8058_tz_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+
+	if (!tm)
+		return -EINVAL;
+
+	if (mode != tm->mode) {
+		if (mode == THERMAL_DEVICE_ENABLED)
+			pm8058_tm_shutdown_override(tm->pm_chip,
+						    SOFTWARE_OVERRIDE_ENABLED);
+		else
+			pm8058_tm_shutdown_override(tm->pm_chip,
+						    SOFTWARE_OVERRIDE_DISABLED);
+	}
+	tm->mode = mode;
+
+	return 0;
+}
+
+static int pm8058_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+
+	if (!tm || trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case PM8058_TRIP_STAGE3:
+		*type = THERMAL_TRIP_CRITICAL;
+		break;
+	case PM8058_TRIP_STAGE2:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	case PM8058_TRIP_STAGE1:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pm8058_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, unsigned long *temp)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+	int thresh_temp;
+
+	if (!tm || trip < 0 || !temp)
+		return -EINVAL;
+
+	thresh_temp = tm->thresh * PM8058_TEMP_THRESH_STEP +
+		      PM8058_TEMP_THRESH_MIN;
+
+	switch (trip) {
+	case PM8058_TRIP_STAGE3:
+		thresh_temp += 2 * PM8058_TEMP_STAGE_STEP;
+		break;
+	case PM8058_TRIP_STAGE2:
+		thresh_temp += PM8058_TEMP_STAGE_STEP;
+		break;
+	case PM8058_TRIP_STAGE1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*temp = thresh_temp;
+
+	return 0;
+}
+
+static int pm8058_tz_get_crit_temp(struct thermal_zone_device *thermal,
+				   unsigned long *temp)
+{
+	struct pm8058_tm_device *tm = thermal->devdata;
+
+	if (!tm || !temp)
+		return -EINVAL;
+
+	*temp = tm->thresh * PM8058_TEMP_THRESH_STEP + PM8058_TEMP_THRESH_MIN +
+		2 * PM8058_TEMP_STAGE_STEP;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops pm8058_thermal_zone_ops = {
+	.get_temp = pm8058_tz_get_temp,
+	.get_mode = pm8058_tz_get_mode,
+	.set_mode = pm8058_tz_set_mode,
+	.get_trip_type = pm8058_tz_get_trip_type,
+	.get_trip_temp = pm8058_tz_get_trip_temp,
+	.get_crit_temp = pm8058_tz_get_crit_temp,
+};
+
+static irqreturn_t pm8058_tm_isr(int irq, void *data)
+{
+	struct pm8058_tm_device *tm = data;
+	int rc;
+	u8 reg;
+
+	rc = pm8058_tm_read_ctrl(tm->pm_chip, &reg);
+	if (rc < 0)
+		goto isr_handled;
+
+	tm->stage = (reg & PM8058_TEMP_STATUS_MASK) >> PM8058_TEMP_STATUS_SHIFT;
+	tm->thresh = (reg & PM8058_TEMP_THRESH_MASK) >>
+			PM8058_TEMP_THRESH_SHIFT;
+
+	if (reg & (PM8058_TEMP_ST2_SD | PM8058_TEMP_ST3_SD)) {
+		reg &= ~(PM8058_TEMP_ST2_SD | PM8058_TEMP_ST3_SD |
+			 PM8058_TEMP_STATUS_MASK);
+		pm8058_tm_write_ctrl(tm->pm_chip, reg);
+	}
+
+	thermal_zone_device_update(tm->tz_dev);
+
+	/* Notify user space */
+	if (tm->mode == THERMAL_DEVICE_ENABLED)
+		kobject_uevent(&tm->tz_dev->device.kobj, KOBJ_CHANGE);
+
+isr_handled:
+	return IRQ_HANDLED;
+}
+
+static int pm8058_tm_init_reg(struct pm8058_tm_device *tm)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8058_tm_read_ctrl(tm->pm_chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	tm->stage = (reg & PM8058_TEMP_STATUS_MASK) >> PM8058_TEMP_STATUS_SHIFT;
+	tm->temp = 0;
+
+	/* Use temperature threshold set 0: (105, 125, 145) */
+	tm->thresh = 0;
+	reg = (tm->thresh << PM8058_TEMP_THRESH_SHIFT) &
+	      PM8058_TEMP_THRESH_MASK;
+	rc = pm8058_tm_write_ctrl(tm->pm_chip, reg);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
+	 * helps cut down on the number of unnecessary interrupts fired when
+	 * changing between thermal stages.  Also, Enable the over temperature
+	 * PWM whenever the PMIC is enabled.
+	 */
+	reg =  1 << PM8058_TEMP_PWM_EN_SHIFT |
+	       3 << PM8058_TEMP_PWM_PER_PRE_SHIFT |
+	       3 << PM8058_TEMP_PWM_PER_DIV_SHIFT;
+
+	rc = pm8058_tm_write_pwm(tm->pm_chip, reg);
+
+	return rc;
+}
+
+static int __devinit pmic8058_tm_probe(struct platform_device *pdev)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct pm8058_tm_device *tmdev;
+	struct pm8058_chip *pm_chip;
+	unsigned int irq;
+	int rc;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no driver data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		pr_err("%s: no IRQ passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	tmdev = kzalloc(sizeof *tmdev, GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = adc_channel_open(PM8058_TEMP_ADC_CH, &(tmdev->adc_handle));
+	if (rc < 0) {
+		pr_err("%s: adc_channel_open() failed.\n", __func__);
+		kfree(tmdev);
+		return rc;
+	}
+
+	/* calibrate the die temperature sensor */
+	if (adc_calib_request(tmdev->adc_handle, &wait) == CALIB_STARTED)
+		wait_for_completion(&wait);
+
+	tmdev->pm_chip = pm_chip;
+	tmdev->tz_dev = thermal_zone_device_register("pm8058_tz",
+						     PM8058_TRIP_NUM, tmdev,
+						     &pm8058_thermal_zone_ops,
+						     0, 0, 0, 0);
+	if (tmdev->tz_dev == NULL) {
+		pr_err("%s: thermal_zone_device_register() failed.\n",
+		       __func__);
+		adc_channel_close(tmdev->adc_handle);
+		kfree(tmdev);
+		return -ENODEV;
+	}
+
+	rc = pm8058_tm_init_reg(tmdev);
+	pm8058_tm_shutdown_override(tmdev->pm_chip, SOFTWARE_OVERRIDE_DISABLED);
+	if (rc < 0) {
+		thermal_zone_device_unregister(tmdev->tz_dev);
+		adc_channel_close(tmdev->adc_handle);
+		kfree(tmdev);
+		return rc;
+	}
+
+	/* start in HW control, switch to SW control when user changes mode */
+	tmdev->mode = THERMAL_DEVICE_DISABLED;
+	thermal_zone_device_update(tmdev->tz_dev);
+
+	platform_set_drvdata(pdev, tmdev);
+
+	rc = request_threaded_irq(irq, NULL, pm8058_tm_isr,
+			 IRQF_TRIGGER_RISING | IRQF_DISABLED,
+			 "pm8058-tm-irq", tmdev);
+	if (rc < 0) {
+		pr_err("%s: request_irq(%d) FAIL: %d\n", __func__, irq, rc);
+		thermal_zone_device_unregister(tmdev->tz_dev);
+		platform_set_drvdata(pdev, tmdev->pm_chip);
+		adc_channel_close(tmdev->adc_handle);
+		kfree(tmdev);
+		return rc;
+	}
+	tmdev->irq = irq;
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8058_tm_remove(struct platform_device *pdev)
+{
+	struct pm8058_tm_device *tmdev = platform_get_drvdata(pdev);
+
+	thermal_zone_device_unregister(tmdev->tz_dev);
+	platform_set_drvdata(pdev, tmdev->pm_chip);
+	pm8058_tm_shutdown_override(tmdev->pm_chip, THERMAL_DEVICE_DISABLED);
+	adc_channel_close(tmdev->adc_handle);
+	free_irq(tmdev->irq, tmdev);
+	kfree(tmdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic8058_tm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8058_tm_device *tm = platform_get_drvdata(pdev);
+
+	/* Clear override bits in suspend to allow hardware control */
+	pm8058_tm_shutdown_override(tm->pm_chip, SOFTWARE_OVERRIDE_DISABLED);
+
+	return 0;
+}
+
+static int pmic8058_tm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8058_tm_device *tm = platform_get_drvdata(pdev);
+
+	/* Override hardware actions so software can control */
+	if (tm->mode == THERMAL_DEVICE_ENABLED)
+		pm8058_tm_shutdown_override(tm->pm_chip,
+					    SOFTWARE_OVERRIDE_ENABLED);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pmic8058_tm_pm_ops = {
+	.suspend = pmic8058_tm_suspend,
+	.resume = pmic8058_tm_resume,
+};
+
+#define PM8058_TM_PM_OPS	(&pmic8058_tm_pm_ops)
+#else
+#define PM8058_TM_PM_OPS	NULL
+#endif
+
+static struct platform_driver pmic8058_tm_driver = {
+	.probe	= pmic8058_tm_probe,
+	.remove	= __devexit_p(pmic8058_tm_remove),
+	.driver	= {
+		.name = "pm8058-tm",
+		.owner = THIS_MODULE,
+		.pm = PM8058_TM_PM_OPS,
+	},
+};
+
+static int __init pm8058_tm_init(void)
+{
+	return platform_driver_register(&pmic8058_tm_driver);
+}
+
+static void __exit pm8058_tm_exit(void)
+{
+	platform_driver_unregister(&pmic8058_tm_driver);
+}
+
+module_init(pm8058_tm_init);
+module_exit(pm8058_tm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 Thermal Manager driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-tm");
diff --git a/drivers/thermal/pmic8901-tm.c b/drivers/thermal/pmic8901-tm.c
new file mode 100644
index 0000000..0ff5788
--- /dev/null
+++ b/drivers/thermal/pmic8901-tm.c
@@ -0,0 +1,594 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8901 Thermal Manager driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/pmic8901.h>
+
+/* PMIC8901 TEMP_ALRM registers */
+#define	SSBI_REG_TEMP_ALRM_CTRL		0x23
+#define	SSBI_REG_TEMP_ALRM_PWM		0x24
+
+/* TEMP_ALRM_CTRL */
+#define	PM8901_TEMP_ST3_SD		0x80
+#define	PM8901_TEMP_ST2_SD		0x40
+#define	PM8901_TEMP_STATUS_MASK		0x30
+#define	PM8901_TEMP_STATUS_SHIFT	4
+#define	PM8901_TEMP_THRESH_MASK		0x0C
+#define	PM8901_TEMP_THRESH_SHIFT	2
+#define	PM8901_TEMP_OVRD_ST3		0x02
+#define	PM8901_TEMP_OVRD_ST2		0x01
+#define	PM8901_TEMP_OVRD_MASK		0x03
+
+#define	PM8901_TEMP_STAGE_STEP		20000	/* Stage step: 20 C */
+#define	PM8901_TEMP_STAGE_HYSTERESIS	2000
+
+#define	PM8901_TEMP_THRESH_MIN		105000	/* Threshold Min: 105 C */
+#define	PM8901_TEMP_THRESH_STEP		5000	/* Threshold step: 5 C */
+
+/* TEMP_ALRM_PWM */
+#define	PM8901_TEMP_PWM_EN_MASK		0xC0
+#define	PM8901_TEMP_PWM_EN_SHIFT	6
+#define	PM8901_TEMP_PWM_PER_PRE_MASK	0x38
+#define	PM8901_TEMP_PWM_PER_PRE_SHIFT	3
+#define	PM8901_TEMP_PWM_PER_DIV_MASK	0x07
+#define	PM8901_TEMP_PWM_PER_DIV_SHIFT	0
+
+/* Trips: from critical to less critical */
+#define PM8901_TRIP_STAGE3	0
+#define PM8901_TRIP_STAGE2	1
+#define PM8901_TRIP_STAGE1	2
+#define PM8901_TRIP_NUM		3
+
+/* Used because there is no means to read the die temperature */
+#define DEFAULT_NO_ADC_TEMP	37000
+
+struct pm8901_tm_device {
+	struct pm8901_chip		*pm_chip;
+	struct thermal_zone_device	*tz_dev;
+	unsigned long			temp;
+	enum thermal_device_mode	mode;
+	unsigned int			thresh;
+	unsigned int			stage;
+	unsigned int			irq;
+	unsigned int			hi_irq;
+};
+
+enum pmic_thermal_override_mode {
+	SOFTWARE_OVERRIDE_DISABLED = 0,
+	SOFTWARE_OVERRIDE_ENABLED,
+};
+
+static inline int pm8901_tm_read_ctrl(struct pm8901_chip *chip, u8 *reg)
+{
+	int rc;
+
+	rc = pm8901_read(chip, SSBI_REG_TEMP_ALRM_CTRL, reg, 1);
+	if (rc)
+		pr_err("%s: pm8901_read FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int pm8901_tm_write_ctrl(struct pm8901_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8901_write(chip, SSBI_REG_TEMP_ALRM_CTRL, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8901_write FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int pm8901_tm_read_pwm(struct pm8901_chip *chip, u8 *reg)
+{
+	int rc;
+
+	rc = pm8901_read(chip, SSBI_REG_TEMP_ALRM_PWM, reg, 1);
+	if (rc)
+		pr_err("%s: pm8901_read FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int pm8901_tm_write_pwm(struct pm8901_chip *chip, u8 reg)
+{
+	int rc;
+
+	rc = pm8901_write(chip, SSBI_REG_TEMP_ALRM_PWM, &reg, 1);
+	if (rc)
+		pr_err("%s: pm8901_write FAIL: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static inline int
+pm8901_tm_shutdown_override(struct pm8901_chip *chip,
+			    enum pmic_thermal_override_mode mode)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8901_tm_read_ctrl(chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	reg &= ~(PM8901_TEMP_OVRD_MASK | PM8901_TEMP_STATUS_MASK);
+	if (mode == SOFTWARE_OVERRIDE_ENABLED)
+		reg |= (PM8901_TEMP_OVRD_ST3 | PM8901_TEMP_OVRD_ST2) &
+			PM8901_TEMP_OVRD_MASK;
+
+	rc = pm8901_tm_write_ctrl(chip, reg);
+
+	return rc;
+}
+
+/*
+ * This function initializes the internal temperature value based on only the
+ * current thermal stage and threshold.
+ */
+static int pm8901_tm_init_temp(struct pm8901_tm_device *tm)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8901_tm_read_ctrl(tm->pm_chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	tm->stage = (reg & PM8901_TEMP_STATUS_MASK) >> PM8901_TEMP_STATUS_SHIFT;
+	tm->thresh = (reg & PM8901_TEMP_THRESH_MASK) >>
+			PM8901_TEMP_THRESH_SHIFT;
+
+	if (tm->stage) {
+		tm->temp = tm->thresh * PM8901_TEMP_THRESH_STEP +
+			   (tm->stage - 1) * PM8901_TEMP_STAGE_STEP +
+			   PM8901_TEMP_THRESH_MIN;
+	} else
+		tm->temp = DEFAULT_NO_ADC_TEMP;
+
+	return 0;
+}
+
+/*
+ * This function updates the internal temperature value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int pm8901_tm_update_temp(struct pm8901_tm_device *tm)
+{
+	unsigned int stage;
+	int rc;
+	u8 reg;
+
+	rc = pm8901_tm_read_ctrl(tm->pm_chip, &reg);
+	if (rc < 0)
+		return rc;
+
+	stage = (reg & PM8901_TEMP_STATUS_MASK) >> PM8901_TEMP_STATUS_SHIFT;
+	tm->thresh = (reg & PM8901_TEMP_THRESH_MASK) >>
+			PM8901_TEMP_THRESH_SHIFT;
+
+	if (stage > tm->stage) {
+		/* increasing stage, use lower bound */
+		tm->temp = (stage-1) * PM8901_TEMP_STAGE_STEP +
+			   tm->thresh * PM8901_TEMP_THRESH_STEP +
+			   PM8901_TEMP_STAGE_HYSTERESIS +
+			   PM8901_TEMP_THRESH_MIN;
+	} else if (stage < tm->stage) {
+		/* decreasing stage, use upper bound */
+		tm->temp = stage * PM8901_TEMP_STAGE_STEP +
+			   tm->thresh * PM8901_TEMP_THRESH_STEP -
+			   PM8901_TEMP_STAGE_HYSTERESIS +
+			   PM8901_TEMP_THRESH_MIN;
+	}
+
+	tm->stage = stage;
+
+	return 0;
+}
+
+static int pm8901_tz_get_temp(struct thermal_zone_device *thermal,
+			      unsigned long *temp)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+	int rc;
+
+	if (!tm || !temp)
+		return -EINVAL;
+
+	rc = pm8901_tm_update_temp(tm);
+	if (rc < 0)
+		return rc;
+
+	*temp = tm->temp;
+
+	return 0;
+}
+
+static int pm8901_tz_get_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode *mode)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+
+	if (!tm || !mode)
+		return -EINVAL;
+
+	*mode = tm->mode;
+
+	return 0;
+}
+
+static int pm8901_tz_set_mode(struct thermal_zone_device *thermal,
+			      enum thermal_device_mode mode)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+
+	if (!tm)
+		return -EINVAL;
+
+	if (mode != tm->mode) {
+		pr_info("%s: mode: %d --> %d\n", __func__, tm->mode, mode);
+
+		if (mode == THERMAL_DEVICE_ENABLED)
+			pm8901_tm_shutdown_override(tm->pm_chip,
+						    SOFTWARE_OVERRIDE_ENABLED);
+		else
+			pm8901_tm_shutdown_override(tm->pm_chip,
+						    SOFTWARE_OVERRIDE_DISABLED);
+	}
+	tm->mode = mode;
+
+	return 0;
+}
+
+static int pm8901_tz_get_trip_type(struct thermal_zone_device *thermal,
+				   int trip, enum thermal_trip_type *type)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+
+	if (!tm || trip < 0 || !type)
+		return -EINVAL;
+
+	switch (trip) {
+	case PM8901_TRIP_STAGE3:
+		*type = THERMAL_TRIP_CRITICAL;
+		break;
+	case PM8901_TRIP_STAGE2:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	case PM8901_TRIP_STAGE1:
+		*type = THERMAL_TRIP_HOT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pm8901_tz_get_trip_temp(struct thermal_zone_device *thermal,
+				   int trip, unsigned long *temp)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+	int thresh_temp;
+
+	if (!tm || trip < 0 || !temp)
+		return -EINVAL;
+
+	thresh_temp = tm->thresh * PM8901_TEMP_THRESH_STEP +
+		      PM8901_TEMP_THRESH_MIN;
+
+	switch (trip) {
+	case PM8901_TRIP_STAGE3:
+		thresh_temp += 2 * PM8901_TEMP_STAGE_STEP;
+		break;
+	case PM8901_TRIP_STAGE2:
+		thresh_temp += PM8901_TEMP_STAGE_STEP;
+		break;
+	case PM8901_TRIP_STAGE1:
+		break;
+	default:
+		return -EINVAL;
+	}
+	*temp = thresh_temp;
+
+	return 0;
+}
+
+static int pm8901_tz_get_crit_temp(struct thermal_zone_device *thermal,
+				   unsigned long *temp)
+{
+	struct pm8901_tm_device *tm = thermal->devdata;
+
+	if (!tm || !temp)
+		return -EINVAL;
+
+	*temp = tm->thresh * PM8901_TEMP_THRESH_STEP +
+			PM8901_TEMP_THRESH_MIN + 2 * PM8901_TEMP_STAGE_STEP;
+
+	return 0;
+}
+
+static struct thermal_zone_device_ops pm8901_thermal_zone_ops = {
+	.get_temp = pm8901_tz_get_temp,
+	.get_mode = pm8901_tz_get_mode,
+	.set_mode = pm8901_tz_set_mode,
+	.get_trip_type = pm8901_tz_get_trip_type,
+	.get_trip_temp = pm8901_tz_get_trip_temp,
+	.get_crit_temp = pm8901_tz_get_crit_temp,
+};
+
+static irqreturn_t pm8901_tm_isr(int irq, void *data)
+{
+	struct pm8901_tm_device *tm = data;
+	int rc;
+	u8 reg;
+
+	rc = pm8901_tm_update_temp(tm);
+	if (rc < 0)
+		goto isr_handled;
+
+	rc = pm8901_tm_read_ctrl(tm->pm_chip, &reg);
+	if (rc < 0)
+		goto isr_handled;
+
+	pr_info("%s: Temp Alarm - stage=%u, threshold=%u, temp=%lu\n",
+		__func__, tm->stage, tm->thresh, tm->temp);
+
+	if (reg & (PM8901_TEMP_ST2_SD | PM8901_TEMP_ST3_SD)) {
+		reg &= ~(PM8901_TEMP_ST2_SD | PM8901_TEMP_ST3_SD |
+			 PM8901_TEMP_STATUS_MASK);
+
+		pm8901_tm_write_ctrl(tm->pm_chip, reg);
+	}
+
+	thermal_zone_device_update(tm->tz_dev);
+
+	/* Notify user space */
+	if (tm->mode == THERMAL_DEVICE_ENABLED)
+		kobject_uevent(&tm->tz_dev->device.kobj, KOBJ_CHANGE);
+
+isr_handled:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pm8901_tm_isr1(int irq, void *data)
+{
+	struct pm8901_tm_device *tm = data;
+	irqreturn_t rc;
+
+	disable_irq(tm->hi_irq);
+	rc = pm8901_tm_isr(irq, data);
+	enable_irq(tm->hi_irq);
+
+	return rc;
+}
+
+static irqreturn_t pm8901_tm_isr2(int irq, void *data)
+{
+	struct pm8901_tm_device *tm = data;
+	irqreturn_t rc;
+
+	disable_irq(tm->irq);
+	rc = pm8901_tm_isr(irq, data);
+	enable_irq(tm->irq);
+
+	return rc;
+}
+
+static int pm8901_tm_init_reg(struct pm8901_tm_device *tm)
+{
+	int rc;
+	u8 reg;
+
+	rc = pm8901_tm_init_temp(tm);
+	if (rc < 0)
+		return rc;
+
+	/* Use temperature threshold set 0: (105, 125, 145) */
+	tm->thresh = 0;
+	reg = (tm->thresh << PM8901_TEMP_THRESH_SHIFT) &
+	      PM8901_TEMP_THRESH_MASK;
+	rc = pm8901_tm_write_ctrl(tm->pm_chip, reg);
+	if (rc < 0)
+		return rc;
+
+	/*
+	 * Set the PMIC alarm module PWM to have a frequency of 8 Hz. This
+	 * helps cut down on the number of unnecessary interrupts fired when
+	 * changing between thermal stages.  Also, Enable the over temperature
+	 * PWM whenever the PMIC is enabled.
+	 */
+	reg =  1 << PM8901_TEMP_PWM_EN_SHIFT |
+	       3 << PM8901_TEMP_PWM_PER_PRE_SHIFT |
+	       3 << PM8901_TEMP_PWM_PER_DIV_SHIFT;
+
+	rc = pm8901_tm_write_pwm(tm->pm_chip, reg);
+
+	return rc;
+}
+
+static int __devinit pmic8901_tm_probe(struct platform_device *pdev)
+{
+	struct pm8901_tm_device	*tmdev;
+	struct pm8901_chip *pm_chip;
+	unsigned int irq, hi_irq;
+	int rc;
+
+	pm_chip = dev_get_drvdata(pdev->dev.parent);
+	if (pm_chip == NULL) {
+		pr_err("%s: no driver data passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		pr_err("%s: no IRQ passed in.\n", __func__);
+		return -EFAULT;
+	}
+	hi_irq = platform_get_irq(pdev, 1);
+	if (!hi_irq) {
+		pr_err("%s: no HI IRQ passed in.\n", __func__);
+		return -EFAULT;
+	}
+
+	tmdev = kzalloc(sizeof *tmdev, GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	tmdev->pm_chip = pm_chip;
+	tmdev->tz_dev = thermal_zone_device_register("pm8901_tz",
+						     PM8901_TRIP_NUM, tmdev,
+						     &pm8901_thermal_zone_ops,
+						     0, 0, 0, 0);
+	if (tmdev->tz_dev == NULL) {
+		pr_err("%s: thermal_zone_device_register() failed.\n",
+		       __func__);
+		kfree(tmdev);
+		return -ENODEV;
+	}
+
+	rc = pm8901_tm_init_reg(tmdev);
+	pm8901_tm_shutdown_override(tmdev->pm_chip, SOFTWARE_OVERRIDE_DISABLED);
+	if (rc < 0) {
+		thermal_zone_device_unregister(tmdev->tz_dev);
+		kfree(tmdev);
+		return rc;
+	}
+
+	/* start in HW control, switch to SW control when user changes mode */
+	tmdev->mode = THERMAL_DEVICE_DISABLED;
+	thermal_zone_device_update(tmdev->tz_dev);
+
+	platform_set_drvdata(pdev, tmdev);
+
+	rc = request_threaded_irq(irq, pm8901_tm_isr1, NULL,
+			 IRQF_TRIGGER_RISING | IRQF_DISABLED,
+			 "pm8901-tm-irq", tmdev);
+	if (rc < 0) {
+		pr_err("%s: request_threaded_irq(%d) FAIL: %d\n",
+		       __func__, irq, rc);
+
+		thermal_zone_device_unregister(tmdev->tz_dev);
+		platform_set_drvdata(pdev, tmdev->pm_chip);
+		kfree(tmdev);
+		return -ENODEV;
+	}
+	tmdev->irq = irq;
+
+	rc = request_threaded_irq(hi_irq, pm8901_tm_isr2, NULL,
+			 IRQF_TRIGGER_RISING | IRQF_DISABLED,
+			 "pm8901-tm-irq2", tmdev);
+	if (rc < 0) {
+		pr_err("%s: request_threaded_irq(%d) FAIL: %d\n",
+		       __func__, hi_irq, rc);
+
+		free_irq(irq, tmdev);
+		thermal_zone_device_unregister(tmdev->tz_dev);
+		platform_set_drvdata(pdev, tmdev->pm_chip);
+		kfree(tmdev);
+		return -ENODEV;
+	}
+	tmdev->hi_irq = hi_irq;
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pmic8901_tm_remove(struct platform_device *pdev)
+{
+	struct pm8901_tm_device *tmdev = platform_get_drvdata(pdev);
+
+	free_irq(tmdev->hi_irq, tmdev);
+	free_irq(tmdev->irq, tmdev);
+	thermal_zone_device_unregister(tmdev->tz_dev);
+	platform_set_drvdata(pdev, tmdev->pm_chip);
+	pm8901_tm_shutdown_override(tmdev->pm_chip, SOFTWARE_OVERRIDE_DISABLED);
+	kfree(tmdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic8901_tm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8901_tm_device *tm = platform_get_drvdata(pdev);
+
+	pm8901_tm_shutdown_override(tm->pm_chip, SOFTWARE_OVERRIDE_DISABLED);
+
+	return 0;
+}
+
+static int pmic8901_tm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pm8901_tm_device *tm = platform_get_drvdata(pdev);
+
+	pm8901_tm_init_temp(tm);
+
+	if (tm->mode == THERMAL_DEVICE_ENABLED)
+		pm8901_tm_shutdown_override(tm->pm_chip,
+					    SOFTWARE_OVERRIDE_ENABLED);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pmic8901_tm_pm_ops = {
+	.suspend = pmic8901_tm_suspend,
+	.resume = pmic8901_tm_resume,
+};
+
+#define PM8901_TM_PM_OPS	(&pmic8901_tm_pm_ops)
+#else
+#define PM8901_TM_PM_OPS	NULL
+#endif
+
+static struct platform_driver pmic8901_tm_driver = {
+	.probe		= pmic8901_tm_probe,
+	.remove		= __devexit_p(pmic8901_tm_remove),
+	.driver		= {
+		.name = "pm8901-tm",
+		.owner = THIS_MODULE,
+		.pm = PM8901_TM_PM_OPS,
+	},
+};
+
+static int __init pm8901_tm_init(void)
+{
+	return platform_driver_register(&pmic8901_tm_driver);
+}
+
+static void __exit pm8901_tm_exit(void)
+{
+	platform_driver_unregister(&pmic8901_tm_driver);
+}
+
+module_init(pm8901_tm_init);
+module_exit(pm8901_tm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8901 Thermal Manager driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8901-tm");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0b1c82a..e0d8ef7 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -188,6 +188,12 @@
 		return sprintf(buf, "critical\n");
 	case THERMAL_TRIP_HOT:
 		return sprintf(buf, "hot\n");
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		return sprintf(buf, "configurable_hi\n");
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		return sprintf(buf, "configurable_low\n");
+	case THERMAL_TRIP_CRITICAL_LOW:
+		return sprintf(buf, "critical_low\n");
 	case THERMAL_TRIP_PASSIVE:
 		return sprintf(buf, "passive\n");
 	case THERMAL_TRIP_ACTIVE:
@@ -198,6 +204,34 @@
 }
 
 static ssize_t
+trip_point_type_activate(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+	int trip, result;
+
+	if (!tz->ops->activate_trip_type)
+		return -EPERM;
+
+	if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
+		return -EINVAL;
+
+	if (!strncmp(buf, "enabled", sizeof("enabled")))
+		result = tz->ops->activate_trip_type(tz, trip,
+					THERMAL_TRIP_ACTIVATION_ENABLED);
+	else if (!strncmp(buf, "disabled", sizeof("disabled")))
+		result = tz->ops->activate_trip_type(tz, trip,
+					THERMAL_TRIP_ACTIVATION_DISABLED);
+	else
+		result = -EINVAL;
+
+	if (result)
+		return result;
+
+	return count;
+}
+
+static ssize_t
 trip_point_temp_show(struct device *dev, struct device_attribute *attr,
 		     char *buf)
 {
@@ -220,6 +254,30 @@
 }
 
 static ssize_t
+trip_point_temp_set(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+	int trip, ret;
+	long temperature;
+
+	if (!tz->ops->set_trip_temp)
+		return -EPERM;
+
+	if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
+		return -EINVAL;
+
+	if (!sscanf(buf, "%ld", &temperature))
+		return -EINVAL;
+
+	ret = tz->ops->set_trip_temp(tz, trip, temperature);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
 passive_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
@@ -287,30 +345,54 @@
 		   passive_store);
 
 static struct device_attribute trip_point_attrs[] = {
-	__ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_10_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_10_temp, 0444, trip_point_temp_show, NULL),
-	__ATTR(trip_point_11_type, 0444, trip_point_type_show, NULL),
-	__ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
+	__ATTR(trip_point_0_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_0_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_1_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_1_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_2_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_2_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_3_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_3_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_4_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_4_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_5_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_5_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_6_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_6_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_7_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_7_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_8_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_8_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_9_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_9_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_10_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_10_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
+	__ATTR(trip_point_11_type, 0644, trip_point_type_show,
+					trip_point_type_activate),
+	__ATTR(trip_point_11_temp, 0644, trip_point_temp_show,
+					trip_point_temp_set),
 };
 
 #define TRIP_POINT_ATTR_ADD(_dev, _index, result)     \
@@ -992,6 +1074,29 @@
 				if (tz->ops->notify)
 					tz->ops->notify(tz, count, trip_type);
 			break;
+		case THERMAL_TRIP_CONFIGURABLE_HI:
+			if (temp >= trip_temp)
+				if (tz->ops->notify)
+					tz->ops->notify(tz, count, trip_type);
+			break;
+		case THERMAL_TRIP_CONFIGURABLE_LOW:
+			if (temp <= trip_temp)
+				if (tz->ops->notify)
+					tz->ops->notify(tz, count, trip_type);
+			break;
+		case THERMAL_TRIP_CRITICAL_LOW:
+			if (temp <= trip_temp) {
+				if (tz->ops->notify)
+					ret = tz->ops->notify(tz, count,
+								trip_type);
+			if (!ret) {
+				printk(KERN_EMERG
+				"Critical temperature reached (%ld C), \
+					shutting down.\n", temp/1000);
+				orderly_poweroff(true);
+				}
+			}
+			break;
 		case THERMAL_TRIP_ACTIVE:
 			list_for_each_entry(instance, &tz->cooling_devices,
 					    node) {
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6fa..7ff1312 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -8,11 +8,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
 
 #include <linux/console.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index b3692e6..852cff5 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1308,6 +1308,44 @@
 	  Choose M here to compile it as a module. The module will be
 	  called msm_serial_hs.
 
+config SERIAL_MSM_CLOCK_CONTROL
+	bool "Allow tty clients to make clock requests to msm uarts."
+	depends on SERIAL_MSM=y
+	default y
+	help
+	 Provides an interface for tty clients to request the msm uart clock
+	 to be turned on or off for power savings.
+
+config SERIAL_MSM_RX_WAKEUP
+	bool "Wakeup the msm uart clock on GPIO activity."
+	depends on SERIAL_MSM_CLOCK_CONTROL
+	default n
+	help
+	 Requires SERIAL_MSM_CLOCK_CONTROL. Wake up the uart while the uart
+	 clock is off, using a wakeup GPIO.
+
+config SERIAL_MSM_HSL
+	tristate "MSM UART High Speed : Legacy mode Serial Driver"
+	depends on ARM && ARCH_MSM
+	select SERIAL_CORE
+	default n
+	help
+	  Select this module to enable MSM high speed UART driver.
+
+config SERIAL_MSM_HSL_CONSOLE
+	bool "MSM High speed serial legacy mode console support"
+	depends on SERIAL_MSM_HSL=y
+	select SERIAL_CORE_CONSOLE
+	default n
+
+config SERIAL_BCM_BT_LPM
+	tristate "Broadcom Bluetooth Low Power Mode"
+	depends on ARM && ARCH_MSM
+	select SERIAL_CORE
+	default n
+	help
+	  Select this module for Broadcom Bluetooth low power management.
+
 config SERIAL_VT8500
 	bool "VIA VT8500 on-chip serial port support"
 	depends on ARM && ARCH_VT8500
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index cb2628f..122c992 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -76,6 +76,8 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
 obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
+obj-$(CONFIG_SERIAL_MSM_HSL) += msm_serial_hs_lite.o
+obj-$(CONFIG_MSM_SERIAL_DEBUGGER) += msm_serial_debugger.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e6ba838..e6646ab 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1,9 +1,9 @@
 /*
- * Driver for msm7k serial device and console
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
  *
  * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  * Author: Robert Love <rlove@google.com>
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -25,122 +25,243 @@
 #include <linux/ioport.h>
 #include <linux/irq.h>
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
+#include <linux/nmi.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
-#include <linux/delay.h>
-
+#include <linux/pm_runtime.h>
+#include <mach/msm_serial_pdata.h>
 #include "msm_serial.h"
 
+
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+enum msm_clk_states_e {
+	MSM_CLK_PORT_OFF,     /* uart port not in use */
+	MSM_CLK_OFF,          /* clock enabled */
+	MSM_CLK_REQUEST_OFF,  /* disable after TX flushed */
+	MSM_CLK_ON,           /* clock disabled */
+};
+#endif
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_wakeup {
+	int irq;  /* < 0 indicates low power wakeup disabled */
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
+	unsigned char inject_rx;
+	char rx_to_inject;
+};
+#endif
+
 struct msm_port {
 	struct uart_port	uart;
 	char			name[16];
 	struct clk		*clk;
-	struct clk		*pclk;
 	unsigned int		imr;
-	unsigned int            *gsbi_base;
-	int			is_uartdm;
-	unsigned int		old_snap_state;
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+	enum msm_clk_states_e	clk_state;
+	struct hrtimer		clk_off_timer;
+	ktime_t			clk_off_delay;
+#endif
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+	struct msm_wakeup wakeup;
+#endif
 };
 
-static inline void wait_for_xmitr(struct uart_port *port, int bits)
+#define UART_TO_MSM(uart_port)	((struct msm_port *) uart_port)
+#define is_console(port)	((port)->cons && \
+				(port)->cons->index == (port)->line)
+
+
+static inline void msm_write(struct uart_port *port, unsigned int val,
+			     unsigned int off)
 {
-	if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
-		while ((msm_read(port, UART_ISR) & bits) != bits)
-			cpu_relax();
+	__raw_writel(val, port->membase + off);
 }
 
+static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+	return __raw_readl(port->membase + off);
+}
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+static inline unsigned int use_low_power_wakeup(struct msm_port *msm_port)
+{
+	return (msm_port->wakeup.irq >= 0);
+}
+#endif
+
 static void msm_stop_tx(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	clk_enable(msm_port->clk);
+
 	msm_port->imr &= ~UART_IMR_TXLEV;
 	msm_write(port, msm_port->imr, UART_IMR);
+
+	clk_disable(msm_port->clk);
 }
 
 static void msm_start_tx(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	clk_enable(msm_port->clk);
+
 	msm_port->imr |= UART_IMR_TXLEV;
 	msm_write(port, msm_port->imr, UART_IMR);
+
+	clk_disable(msm_port->clk);
 }
 
 static void msm_stop_rx(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	clk_enable(msm_port->clk);
+
 	msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
 	msm_write(port, msm_port->imr, UART_IMR);
+
+	clk_disable(msm_port->clk);
 }
 
 static void msm_enable_ms(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	clk_enable(msm_port->clk);
+
 	msm_port->imr |= UART_IMR_DELTA_CTS;
 	msm_write(port, msm_port->imr, UART_IMR);
+
+	clk_disable(msm_port->clk);
 }
 
-static void handle_rx_dm(struct uart_port *port, unsigned int misr)
-{
-	struct tty_struct *tty = port->state->port.tty;
-	unsigned int sr;
-	int count = 0;
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+/* turn clock off if TX buffer is empty, otherwise reschedule */
+static enum hrtimer_restart msm_serial_clock_off(struct hrtimer *timer) {
+	struct msm_port *msm_port = container_of(timer, struct msm_port,
+						 clk_off_timer);
+	struct uart_port *port = &msm_port->uart;
+	struct circ_buf *xmit = &port->state->xmit;
+	unsigned long flags;
+	int ret = HRTIMER_NORESTART;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	if (msm_port->clk_state == MSM_CLK_REQUEST_OFF) {
+		if (uart_circ_empty(xmit)) {
+			struct msm_port *msm_port = UART_TO_MSM(port);
+			clk_disable(msm_port->clk);
+			msm_port->clk_state = MSM_CLK_OFF;
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+			if (use_low_power_wakeup(msm_port)) {
+				msm_port->wakeup.ignore = 1;
+				enable_irq(msm_port->wakeup.irq);
+			}
+#endif
+		} else {
+			hrtimer_forward_now(timer, msm_port->clk_off_delay);
+			ret = HRTIMER_RESTART;
+		}
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+
+/* request to turn off uart clock once pending TX is flushed */
+void msm_serial_clock_request_off(struct uart_port *port) {
+	unsigned long flags;
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
-	if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
-		port->icount.overrun++;
-		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-		msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+	spin_lock_irqsave(&port->lock, flags);
+	if (msm_port->clk_state == MSM_CLK_ON) {
+		msm_port->clk_state = MSM_CLK_REQUEST_OFF;
+		/* turn off TX later. unfortunately not all msm uart's have a
+		 * TXDONE available, and TXLEV does not wait until completely
+		 * flushed, so a timer is our only option
+		 */
+		hrtimer_start(&msm_port->clk_off_timer,
+			      msm_port->clk_off_delay, HRTIMER_MODE_REL);
 	}
-
-	if (misr & UART_IMR_RXSTALE) {
-		count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
-			msm_port->old_snap_state;
-		msm_port->old_snap_state = 0;
-	} else {
-		count = 4 * (msm_read(port, UART_RFWR));
-		msm_port->old_snap_state += count;
-	}
-
-	/* TODO: Precise error reporting */
-
-	port->icount.rx += count;
-
-	while (count > 0) {
-		unsigned int c;
-
-		sr = msm_read(port, UART_SR);
-		if ((sr & UART_SR_RX_READY) == 0) {
-			msm_port->old_snap_state -= count;
-			break;
-		}
-		c = msm_read(port, UARTDM_RF);
-		if (sr & UART_SR_RX_BREAK) {
-			port->icount.brk++;
-			if (uart_handle_break(port))
-				continue;
-		} else if (sr & UART_SR_PAR_FRAME_ERR)
-			port->icount.frame++;
-
-		/* TODO: handle sysrq */
-		tty_insert_flip_string(tty, (char *) &c,
-				       (count > 4) ? 4 : count);
-		count -= 4;
-	}
-
-	tty_flip_buffer_push(tty);
-	if (misr & (UART_IMR_RXSTALE))
-		msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
-	msm_write(port, 0xFFFFFF, UARTDM_DMRX);
-	msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+	spin_unlock_irqrestore(&port->lock, flags);
 }
 
+/* request to immediately turn on uart clock.
+ * ignored if there is a pending off request, unless force = 1.
+ */
+void msm_serial_clock_on(struct uart_port *port, int force) {
+	unsigned long flags;
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	switch (msm_port->clk_state) {
+	case MSM_CLK_OFF:
+		clk_enable(msm_port->clk);
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+		if (use_low_power_wakeup(msm_port))
+			disable_irq(msm_port->wakeup.irq);
+#endif
+		force = 1;
+	case MSM_CLK_REQUEST_OFF:
+		if (force) {
+			hrtimer_try_to_cancel(&msm_port->clk_off_timer);
+			msm_port->clk_state = MSM_CLK_ON;
+		}
+		break;
+	case MSM_CLK_ON: break;
+	case MSM_CLK_PORT_OFF: break;
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+static irqreturn_t msm_rx_irq(int irq, void *dev_id)
+{
+	struct uart_port *port = dev_id;
+	struct msm_port *msm_port = UART_TO_MSM(port);
+	int inject_wakeup = 0;
+
+	spin_lock(&port->lock);
+
+	if (msm_port->clk_state == MSM_CLK_OFF) {
+		/* ignore the first irq - it is a pending irq that occured
+		 * before enable_irq() */
+		if (msm_port->wakeup.ignore)
+			msm_port->wakeup.ignore = 0;
+		else
+			inject_wakeup = 1;
+	}
+
+	msm_serial_clock_on(port, 0);
+
+	/* we missed an rx while asleep - it must be a wakeup indicator
+	 */
+	if (inject_wakeup) {
+		struct tty_struct *tty = port->state->port.tty;
+		tty_insert_flip_char(tty, WAKE_UP_IND, TTY_NORMAL);
+		tty_flip_buffer_push(tty);
+	}
+
+	spin_unlock(&port->lock);
+	return IRQ_HANDLED;
+}
+#endif
+
 static void handle_rx(struct uart_port *port)
 {
 	struct tty_struct *tty = port->state->port.tty;
@@ -189,12 +310,6 @@
 	tty_flip_buffer_push(tty);
 }
 
-static void reset_dm_count(struct uart_port *port)
-{
-	wait_for_xmitr(port, UART_ISR_TX_READY);
-	msm_write(port, 1, UARTDM_NCF_TX);
-}
-
 static void handle_tx(struct uart_port *port)
 {
 	struct circ_buf *xmit = &port->state->xmit;
@@ -202,18 +317,11 @@
 	int sent_tx;
 
 	if (port->x_char) {
-		if (msm_port->is_uartdm)
-			reset_dm_count(port);
-
-		msm_write(port, port->x_char,
-			  msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+		msm_write(port, port->x_char, UART_TF);
 		port->icount.tx++;
 		port->x_char = 0;
 	}
 
-	if (msm_port->is_uartdm)
-		reset_dm_count(port);
-
 	while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
 		if (uart_circ_empty(xmit)) {
 			/* disable tx interrupts */
@@ -221,17 +329,22 @@
 			msm_write(port, msm_port->imr, UART_IMR);
 			break;
 		}
-		msm_write(port, xmit->buf[xmit->tail],
-			  msm_port->is_uartdm ? UARTDM_TF : UART_TF);
 
-		if (msm_port->is_uartdm)
-			reset_dm_count(port);
+		msm_write(port, xmit->buf[xmit->tail], UART_TF);
 
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		port->icount.tx++;
 		sent_tx = 1;
 	}
 
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+	if (sent_tx && msm_port->clk_state == MSM_CLK_REQUEST_OFF)
+		/* new TX - restart the timer */
+		if (hrtimer_try_to_cancel(&msm_port->clk_off_timer) == 1)
+			hrtimer_start(&msm_port->clk_off_timer,
+				msm_port->clk_off_delay, HRTIMER_MODE_REL);
+#endif
+
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
 }
@@ -250,21 +363,19 @@
 	unsigned int misr;
 
 	spin_lock(&port->lock);
+	clk_enable(msm_port->clk);
 	misr = msm_read(port, UART_MISR);
 	msm_write(port, 0, UART_IMR); /* disable interrupt */
 
-	if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
-		if (msm_port->is_uartdm)
-			handle_rx_dm(port, misr);
-		else
-			handle_rx(port);
-	}
+	if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
+		handle_rx(port);
 	if (misr & UART_IMR_TXLEV)
 		handle_tx(port);
 	if (misr & UART_IMR_DELTA_CTS)
 		handle_delta_cts(port);
 
 	msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+	clk_disable(msm_port->clk);
 	spin_unlock(&port->lock);
 
 	return IRQ_HANDLED;
@@ -272,7 +383,14 @@
 
 static unsigned int msm_tx_empty(struct uart_port *port)
 {
-	return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+	unsigned int ret;
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+	clk_enable(msm_port->clk);
+	ret = (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+	clk_disable(msm_port->clk);
+
+	return ret;
 }
 
 static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -280,21 +398,13 @@
 	return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
 }
 
-
-static void msm_reset(struct uart_port *port)
-{
-	/* reset everything */
-	msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
-	msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
-	msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
-	msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
-	msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
-	msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
-}
-
-void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
 	unsigned int mr;
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+	clk_enable(msm_port->clk);
+
 	mr = msm_read(port, UART_MR1);
 
 	if (!(mctrl & TIOCM_RTS)) {
@@ -305,20 +415,27 @@
 		mr |= UART_MR1_RX_RDY_CTL;
 		msm_write(port, mr, UART_MR1);
 	}
+
+	clk_disable(msm_port->clk);
 }
 
 static void msm_break_ctl(struct uart_port *port, int break_ctl)
 {
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+	clk_enable(msm_port->clk);
+
 	if (break_ctl)
 		msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
 	else
 		msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+
+	clk_disable(msm_port->clk);
 }
 
-static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+static void msm_set_baud_rate(struct uart_port *port, unsigned int baud)
 {
 	unsigned int baud_code, rxstale, watermark;
-	struct msm_port *msm_port = UART_TO_MSM(port);
 
 	switch (baud) {
 	case 300:
@@ -368,14 +485,10 @@
 	case 115200:
 	default:
 		baud_code = UART_CSR_115200;
-		baud = 115200;
 		rxstale = 31;
 		break;
 	}
 
-	if (msm_port->is_uartdm)
-		msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
-
 	msm_write(port, baud_code, UART_CSR);
 
 	/* RX stale watermark */
@@ -390,27 +503,57 @@
 
 	/* set TX watermark */
 	msm_write(port, 10, UART_TFWR);
-
-	if (msm_port->is_uartdm) {
-		msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
-		msm_write(port, 0xFFFFFF, UARTDM_DMRX);
-		msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
-	}
-
-	return baud;
 }
 
+static void msm_reset(struct uart_port *port)
+{
+	/* reset everything */
+	msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+	msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+	msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+	msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+	msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+	msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
 
 static void msm_init_clock(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
 	clk_enable(msm_port->clk);
-	if (!IS_ERR(msm_port->pclk))
-		clk_enable(msm_port->pclk);
-	msm_serial_set_mnd_regs(port);
+
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+	msm_port->clk_state = MSM_CLK_ON;
+#endif
+
+	if (port->uartclk == 19200000) {
+		/* clock is TCXO (19.2MHz) */
+		msm_write(port, 0x06, UART_MREG);
+		msm_write(port, 0xF1, UART_NREG);
+		msm_write(port, 0x0F, UART_DREG);
+		msm_write(port, 0x1A, UART_MNDREG);
+	} else {
+		/* clock must be TCXO/4 */
+		msm_write(port, 0x18, UART_MREG);
+		msm_write(port, 0xF6, UART_NREG);
+		msm_write(port, 0x0F, UART_DREG);
+		msm_write(port, 0x0A, UART_MNDREG);
+	}
 }
 
+static void msm_deinit_clock(struct uart_port *port)
+{
+	struct msm_port *msm_port = UART_TO_MSM(port);
+
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+	if (msm_port->clk_state != MSM_CLK_OFF)
+		clk_disable(msm_port->clk);
+	msm_port->clk_state = MSM_CLK_PORT_OFF;
+#else
+	clk_disable(msm_port->clk);
+#endif
+
+}
 static int msm_startup(struct uart_port *port)
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
@@ -425,7 +568,15 @@
 	if (unlikely(ret))
 		return ret;
 
+	if (unlikely(irq_set_irq_wake(port->irq, 1))) {
+		free_irq(port->irq, port);
+		return -ENXIO;
+	}
+
+#ifndef CONFIG_PM_RUNTIME
 	msm_init_clock(port);
+#endif
+	pm_runtime_get_sync(port->dev);
 
 	if (likely(port->fifosize > 12))
 		rfr_level = port->fifosize - 12;
@@ -448,31 +599,29 @@
 		msm_write(port, data, UART_IPR);
 	}
 
-	data = 0;
-	if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
-		msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
-		msm_reset(port);
-		data = UART_CR_TX_ENABLE;
-	}
+	msm_reset(port);
 
-	data |= UART_CR_RX_ENABLE;
-	msm_write(port, data, UART_CR);	/* enable TX & RX */
-
-	/* Make sure IPR is not 0 to start with*/
-	if (msm_port->is_uartdm)
-		msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
+	msm_write(port, 0x05, UART_CR);	/* enable TX & RX */
 
 	/* turn on RX and CTS interrupts */
 	msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
 			UART_IMR_CURRENT_CTS;
-
-	if (msm_port->is_uartdm) {
-		msm_write(port, 0xFFFFFF, UARTDM_DMRX);
-		msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
-		msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
-	}
-
 	msm_write(port, msm_port->imr, UART_IMR);
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+	if (use_low_power_wakeup(msm_port)) {
+		ret = irq_set_irq_wake(msm_port->wakeup.irq, 1);
+		if (unlikely(ret))
+			return ret;
+		ret = request_irq(msm_port->wakeup.irq, msm_rx_irq,
+				  IRQF_TRIGGER_FALLING,
+				  "msm_serial_wakeup", msm_port);
+		if (unlikely(ret))
+			return ret;
+		disable_irq(msm_port->wakeup.irq);
+	}
+#endif
+
 	return 0;
 }
 
@@ -480,12 +629,25 @@
 {
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
+	clk_enable(msm_port->clk);
+
 	msm_port->imr = 0;
 	msm_write(port, 0, UART_IMR); /* disable interrupts */
 
 	clk_disable(msm_port->clk);
 
 	free_irq(port->irq, port);
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+	if (use_low_power_wakeup(msm_port)) {
+		irq_set_irq_wake(msm_port->wakeup.irq, 0);
+		free_irq(msm_port->wakeup.irq, msm_port);
+	}
+#endif
+#ifndef CONFIG_PM_RUNTIME
+	msm_deinit_clock(port);
+#endif
+	pm_runtime_put_sync(port->dev);
 }
 
 static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -493,14 +655,14 @@
 {
 	unsigned long flags;
 	unsigned int baud, mr;
+	struct msm_port *msm_port = UART_TO_MSM(port);
 
 	spin_lock_irqsave(&port->lock, flags);
+	clk_enable(msm_port->clk);
 
 	/* calculate and set baud rate */
 	baud = uart_get_baud_rate(port, termios, old, 300, 115200);
-	baud = msm_set_baud_rate(port, baud);
-	if (tty_termios_baud_rate(termios))
-		tty_termios_encode_baud_rate(termios, baud, baud);
+	msm_set_baud_rate(port, baud);
 
 	/* calculate parity */
 	mr = msm_read(port, UART_MR2);
@@ -560,6 +722,7 @@
 
 	uart_update_timeout(port, termios->c_cflag, baud);
 
+	clk_disable(msm_port->clk);
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -571,105 +734,48 @@
 static void msm_release_port(struct uart_port *port)
 {
 	struct platform_device *pdev = to_platform_device(port->dev);
-	struct msm_port *msm_port = UART_TO_MSM(port);
-	struct resource *uart_resource;
-	struct resource *gsbi_resource;
+	struct resource *resource;
 	resource_size_t size;
 
-	uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (unlikely(!uart_resource))
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!resource))
 		return;
-	size = resource_size(uart_resource);
+	size = resource->end - resource->start + 1;
 
 	release_mem_region(port->mapbase, size);
 	iounmap(port->membase);
 	port->membase = NULL;
-
-	if (msm_port->gsbi_base) {
-		iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
-			  GSBI_CONTROL);
-
-		gsbi_resource = platform_get_resource_byname(pdev,
-							     IORESOURCE_MEM,
-							     "gsbi_resource");
-
-		if (unlikely(!gsbi_resource))
-			return;
-
-		size = resource_size(gsbi_resource);
-		release_mem_region(gsbi_resource->start, size);
-		iounmap(msm_port->gsbi_base);
-		msm_port->gsbi_base = NULL;
-	}
 }
 
 static int msm_request_port(struct uart_port *port)
 {
-	struct msm_port *msm_port = UART_TO_MSM(port);
 	struct platform_device *pdev = to_platform_device(port->dev);
-	struct resource *uart_resource;
-	struct resource *gsbi_resource;
+	struct resource *resource;
 	resource_size_t size;
-	int ret;
 
-	uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						     "uart_resource");
-	if (unlikely(!uart_resource))
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!resource))
 		return -ENXIO;
+	size = resource->end - resource->start + 1;
 
-	size = resource_size(uart_resource);
-
-	if (!request_mem_region(port->mapbase, size, "msm_serial"))
+	if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
 		return -EBUSY;
 
 	port->membase = ioremap(port->mapbase, size);
 	if (!port->membase) {
-		ret = -EBUSY;
-		goto fail_release_port;
-	}
-
-	gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						     "gsbi_resource");
-	/* Is this a GSBI-based port? */
-	if (gsbi_resource) {
-		size = resource_size(gsbi_resource);
-
-		if (!request_mem_region(gsbi_resource->start, size,
-						 "msm_serial")) {
-			ret = -EBUSY;
-			goto fail_release_port;
-		}
-
-		msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
-		if (!msm_port->gsbi_base) {
-			ret = -EBUSY;
-			goto fail_release_gsbi;
-		}
+		release_mem_region(port->mapbase, size);
+		return -EBUSY;
 	}
 
 	return 0;
-
-fail_release_gsbi:
-	release_mem_region(gsbi_resource->start, size);
-fail_release_port:
-	release_mem_region(port->mapbase, size);
-	return ret;
 }
 
 static void msm_config_port(struct uart_port *port, int flags)
 {
-	struct msm_port *msm_port = UART_TO_MSM(port);
-	int ret;
 	if (flags & UART_CONFIG_TYPE) {
 		port->type = PORT_MSM;
-		ret = msm_request_port(port);
-		if (ret)
-			return;
+		msm_request_port(port);
 	}
-
-	if (msm_port->is_uartdm)
-		iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
-			  GSBI_CONTROL);
 }
 
 static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -684,22 +790,20 @@
 static void msm_power(struct uart_port *port, unsigned int state,
 		      unsigned int oldstate)
 {
+#ifndef CONFIG_SERIAL_MSM_CLOCK_CONTROL
 	struct msm_port *msm_port = UART_TO_MSM(port);
 
 	switch (state) {
 	case 0:
 		clk_enable(msm_port->clk);
-		if (!IS_ERR(msm_port->pclk))
-			clk_enable(msm_port->pclk);
 		break;
 	case 3:
 		clk_disable(msm_port->clk);
-		if (!IS_ERR(msm_port->pclk))
-			clk_disable(msm_port->pclk);
 		break;
 	default:
 		printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
 	}
+#endif
 }
 
 static struct uart_ops msm_uart_pops = {
@@ -728,7 +832,7 @@
 			.iotype = UPIO_MEM,
 			.ops = &msm_uart_pops,
 			.flags = UPF_BOOT_AUTOCONF,
-			.fifosize = 64,
+			.fifosize = 512,
 			.line = 0,
 		},
 	},
@@ -737,7 +841,7 @@
 			.iotype = UPIO_MEM,
 			.ops = &msm_uart_pops,
 			.flags = UPF_BOOT_AUTOCONF,
-			.fifosize = 64,
+			.fifosize = 512,
 			.line = 1,
 		},
 	},
@@ -754,23 +858,56 @@
 
 #define UART_NR	ARRAY_SIZE(msm_uart_ports)
 
-static inline struct uart_port *get_port_from_line(unsigned int line)
+static inline struct uart_port * get_port_from_line(unsigned int line)
 {
 	return &msm_uart_ports[line].uart;
 }
 
 #ifdef CONFIG_SERIAL_MSM_CONSOLE
 
+/*
+ *  Wait for transmitter & holding register to empty
+ *  Derived from wait_for_xmitr in 8250 serial driver by Russell King
+ */
+static inline void wait_for_xmitr(struct uart_port *port, int bits)
+{
+	unsigned int status, mr, tmout = 10000;
+
+	/* Wait up to 10ms for the character(s) to be sent. */
+	do {
+		status = msm_read(port, UART_SR);
+
+		if (--tmout == 0)
+			break;
+		udelay(1);
+	} while ((status & bits) != bits);
+
+	mr = msm_read(port, UART_MR1);
+
+	/* Wait up to 1s for flow control if necessary */
+	if (mr & UART_MR1_CTS_CTL) {
+		unsigned int tmout;
+		for (tmout = 1000000; tmout; tmout--) {
+			unsigned int isr = msm_read(port, UART_ISR);
+
+			/* CTS input is active lo */
+			if (!(isr & UART_IMR_CURRENT_CTS))
+				break;
+			udelay(1);
+			touch_nmi_watchdog();
+		}
+	}
+}
+
+
 static void msm_console_putchar(struct uart_port *port, int c)
 {
-	struct msm_port *msm_port = UART_TO_MSM(port);
+	/* This call can incur significant delay if CTS flowcontrol is enabled
+	 * on port and no serial cable is attached.
+	 */
+	wait_for_xmitr(port, UART_SR_TX_READY);
 
-	if (msm_port->is_uartdm)
-		reset_dm_count(port);
-
-	while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
-		;
-	msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+	msm_write(port, c, UART_TF);
 }
 
 static void msm_console_write(struct console *co, const char *s,
@@ -778,35 +915,48 @@
 {
 	struct uart_port *port;
 	struct msm_port *msm_port;
+	int locked;
 
 	BUG_ON(co->index < 0 || co->index >= UART_NR);
 
 	port = get_port_from_line(co->index);
 	msm_port = UART_TO_MSM(port);
 
-	spin_lock(&port->lock);
+	/* not pretty, but we can end up here via various convoluted paths */
+	if (port->sysrq || oops_in_progress)
+		locked = spin_trylock(&port->lock);
+	else {
+		locked = 1;
+		spin_lock(&port->lock);
+	}
+
 	uart_console_write(port, s, count, msm_console_putchar);
-	spin_unlock(&port->lock);
+
+	if (locked)
+		spin_unlock(&port->lock);
 }
 
 static int __init msm_console_setup(struct console *co, char *options)
 {
 	struct uart_port *port;
-	struct msm_port *msm_port;
 	int baud, flow, bits, parity;
 
 	if (unlikely(co->index >= UART_NR || co->index < 0))
 		return -ENXIO;
 
 	port = get_port_from_line(co->index);
-	msm_port = UART_TO_MSM(port);
 
 	if (unlikely(!port->membase))
 		return -ENXIO;
 
 	port->cons = co;
 
+	pm_runtime_get_noresume(port->dev);
+
+#ifndef CONFIG_PM_RUNTIME
 	msm_init_clock(port);
+#endif
+	pm_runtime_resume(port->dev);
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -823,11 +973,6 @@
 
 	msm_reset(port);
 
-	if (msm_port->is_uartdm) {
-		msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
-		msm_write(port, UART_CR_TX_ENABLE, UART_CR);
-	}
-
 	printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
 
 	return uart_set_options(port, co, baud, parity, bits, flow);
@@ -845,7 +990,7 @@
 	.data = &msm_uart_driver,
 };
 
-#define MSM_CONSOLE	(&msm_console)
+#define MSM_CONSOLE	&msm_console
 
 #else
 #define MSM_CONSOLE	NULL
@@ -865,6 +1010,9 @@
 	struct resource *resource;
 	struct uart_port *port;
 	int irq;
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+	struct msm_serial_platform_data *pdata = pdev->dev.platform_data;
+#endif
 
 	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
 		return -ENXIO;
@@ -875,32 +1023,14 @@
 	port->dev = &pdev->dev;
 	msm_port = UART_TO_MSM(port);
 
-	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
-		msm_port->is_uartdm = 1;
-	else
-		msm_port->is_uartdm = 0;
-
-	if (msm_port->is_uartdm) {
-		msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
-		msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
-	} else {
-		msm_port->clk = clk_get(&pdev->dev, "uart_clk");
-		msm_port->pclk = ERR_PTR(-ENOENT);
-	}
-
-	if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) &&
-					       msm_port->is_uartdm)))
-			return PTR_ERR(msm_port->clk);
-
-	if (msm_port->is_uartdm)
-		clk_set_rate(msm_port->clk, 7372800);
-
+	msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+	if (unlikely(IS_ERR(msm_port->clk)))
+		return PTR_ERR(msm_port->clk);
 	port->uartclk = clk_get_rate(msm_port->clk);
-	printk(KERN_INFO "uartclk = %d\n", port->uartclk);
+	if (!port->uartclk)
+		port->uartclk = 19200000;
 
-
-	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						     "uart_resource");
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (unlikely(!resource))
 		return -ENXIO;
 	port->mapbase = resource->start;
@@ -912,6 +1042,29 @@
 
 	platform_set_drvdata(pdev, port);
 
+
+#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
+	if (pdata == NULL)
+		msm_port->wakeup.irq = -1;
+	else {
+		msm_port->wakeup.irq = pdata->wakeup_irq;
+		msm_port->wakeup.ignore = 1;
+		msm_port->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+		msm_port->wakeup.rx_to_inject = pdata->rx_to_inject;
+
+		if (unlikely(msm_port->wakeup.irq <= 0))
+			return -EINVAL;
+	}
+#endif
+
+#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
+	msm_port->clk_state = MSM_CLK_PORT_OFF;
+	hrtimer_init(&msm_port->clk_off_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	msm_port->clk_off_timer.function = msm_serial_clock_off;
+	msm_port->clk_off_delay = ktime_set(0, 1000000);  /* 1 ms */
+#endif
+
+	pm_runtime_enable(port->dev);
 	return uart_add_one_port(&msm_uart_driver, port);
 }
 
@@ -919,16 +1072,84 @@
 {
 	struct msm_port *msm_port = platform_get_drvdata(pdev);
 
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
 	clk_put(msm_port->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int msm_serial_suspend(struct device *dev)
+{
+	struct uart_port *port;
+	struct platform_device *pdev = to_platform_device(dev);
+	port = get_port_from_line(pdev->id);
+
+	if (port) {
+		uart_suspend_port(&msm_uart_driver, port);
+		if (is_console(port))
+			msm_deinit_clock(port);
+	}
+
+	return 0;
+}
+
+static int msm_serial_resume(struct device *dev)
+{
+	struct uart_port *port;
+	struct platform_device *pdev = to_platform_device(dev);
+	port = get_port_from_line(pdev->id);
+
+	if (port) {
+		if (is_console(port))
+			msm_init_clock(port);
+		uart_resume_port(&msm_uart_driver, port);
+	}
+
+	return 0;
+}
+#else
+#define msm_serial_suspend NULL
+#define msm_serial_resume NULL
+#endif
+
+static int msm_serial_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	dev_dbg(dev, "pm_runtime: suspending\n");
+	msm_deinit_clock(port);
+	return 0;
+}
+
+static int msm_serial_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	dev_dbg(dev, "pm_runtime: resuming\n");
+	msm_init_clock(port);
+	return 0;
+}
+
+static struct dev_pm_ops msm_serial_dev_pm_ops = {
+	.suspend = msm_serial_suspend,
+	.resume = msm_serial_resume,
+	.runtime_suspend = msm_serial_runtime_suspend,
+	.runtime_resume = msm_serial_runtime_resume,
+};
+
 static struct platform_driver msm_platform_driver = {
 	.remove = msm_serial_remove,
 	.driver = {
 		.name = "msm_serial",
 		.owner = THIS_MODULE,
+		.pm = &msm_serial_dev_pm_ops,
 	},
 };
 
@@ -963,4 +1184,4 @@
 
 MODULE_AUTHOR("Robert Love <rlove@google.com>");
 MODULE_DESCRIPTION("Driver for msm7x serial device");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index e4acef5..65d0e30 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -46,11 +46,14 @@
 #define UART_CSR_19200	0xBB
 #define UART_CSR_14400	0xAA
 #define UART_CSR_9600	0x99
+#define UART_CSR_7200	0x88
 #define UART_CSR_4800	0x77
 #define UART_CSR_2400	0x55
 #define UART_CSR_1200	0x44
 #define UART_CSR_600	0x33
 #define UART_CSR_300	0x22
+#define UART_CSR_150	0x11
+#define UART_CSR_75	0x00
 
 #define UART_TF		0x000C
 #define UARTDM_TF	0x0070
@@ -128,60 +131,4 @@
 #define UARTDM_NCF_TX		0x40
 #define UARTDM_RX_TOTAL_SNAP	0x38
 
-#define UART_TO_MSM(uart_port)	((struct msm_port *) uart_port)
-
-static inline
-void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
-{
-	__raw_writel(val, port->membase + off);
-}
-
-static inline
-unsigned int msm_read(struct uart_port *port, unsigned int off)
-{
-	return __raw_readl(port->membase + off);
-}
-
-/*
- * Setup the MND registers to use the TCXO clock.
- */
-static inline void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
-{
-	msm_write(port, 0x06, UART_MREG);
-	msm_write(port, 0xF1, UART_NREG);
-	msm_write(port, 0x0F, UART_DREG);
-	msm_write(port, 0x1A, UART_MNDREG);
-}
-
-/*
- * Setup the MND registers to use the TCXO clock divided by 4.
- */
-static inline void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
-{
-	msm_write(port, 0x18, UART_MREG);
-	msm_write(port, 0xF6, UART_NREG);
-	msm_write(port, 0x0F, UART_DREG);
-	msm_write(port, 0x0A, UART_MNDREG);
-}
-
-static inline
-void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
-{
-	if (port->uartclk == 19200000)
-		msm_serial_set_mnd_regs_tcxo(port);
-	else
-		msm_serial_set_mnd_regs_tcxoby4(port);
-}
-
-/*
- * TROUT has a specific defect that makes it report it's uartclk
- * as 19.2Mhz (TCXO) when it's actually 4.8Mhz (TCXO/4). This special
- * cases TROUT to use the right clock.
- */
-#ifdef CONFIG_MACH_TROUT
-#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_tcxoby4
-#else
-#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk
-#endif
-
 #endif	/* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/tty/serial/msm_serial_debugger.c b/drivers/tty/serial/msm_serial_debugger.c
new file mode 100644
index 0000000..88b6784
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_debugger.c
@@ -0,0 +1,421 @@
+/*
+ * drivers/serial/msm_serial_debuger.c
+ *
+ * Serial Debugger Interface for MSM7K
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_debugger.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+
+#include <mach/system.h>
+#include <mach/fiq.h>
+
+#include "msm_serial.h"
+
+static unsigned int debug_port_base;
+static int debug_signal_irq;
+static struct clk *debug_clk;
+static int debug_enable;
+static int debugger_enable;
+static struct {
+	unsigned int	base;
+	int		irq;
+	struct device	*clk_device;
+	int		signal_irq;
+} init_data;
+
+static inline void msm_write(unsigned int val, unsigned int off)
+{
+	__raw_writel(val, debug_port_base + off);
+}
+
+static inline unsigned int msm_read(unsigned int off)
+{
+	return __raw_readl(debug_port_base + off);
+}
+
+static void debug_port_init(void)
+{
+	/* reset everything */
+	msm_write(UART_CR_CMD_RESET_RX, UART_CR);
+	msm_write(UART_CR_CMD_RESET_TX, UART_CR);
+	msm_write(UART_CR_CMD_RESET_ERR, UART_CR);
+	msm_write(UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+	msm_write(UART_CR_CMD_RESET_CTS, UART_CR);
+	msm_write(UART_CR_CMD_SET_RFR, UART_CR);
+
+	/* setup clock dividers */
+	if (clk_get_rate(debug_clk) == 19200000) {
+		/* clock is TCXO (19.2MHz) */
+		msm_write(0x06, UART_MREG);
+		msm_write(0xF1, UART_NREG);
+		msm_write(0x0F, UART_DREG);
+		msm_write(0x1A, UART_MNDREG);
+	} else {
+		/* clock must be TCXO/4 */
+		msm_write(0x18, UART_MREG);
+		msm_write(0xF6, UART_NREG);
+		msm_write(0x0F, UART_DREG);
+		msm_write(0x0A, UART_MNDREG);
+	}
+
+	msm_write(UART_CSR_115200, UART_CSR);
+
+	/* rx interrupt on every character -- keep it simple */
+	msm_write(0, UART_RFWR);
+
+	/* enable TX and RX */
+	msm_write(0x05, UART_CR);
+
+	/* enable RX interrupt */
+	msm_write(UART_IMR_RXLEV, UART_IMR);
+}
+
+static inline int debug_getc(void)
+{
+	if (msm_read(UART_SR) & UART_SR_RX_READY) {
+		return msm_read(UART_RF);
+	} else {
+		return -1;
+	}
+}
+
+static inline void debug_putc(unsigned int c)
+{
+	while (!(msm_read(UART_SR) & UART_SR_TX_READY)) ;
+	msm_write(c, UART_TF);
+}
+
+static inline void debug_flush(void)
+{
+	while (!(msm_read(UART_SR) & UART_SR_TX_EMPTY)) ;
+}
+
+static void debug_puts(char *s)
+{
+	unsigned c;
+	while ((c = *s++)) {
+		if (c == '\n')
+			debug_putc('\r');
+		debug_putc(c);
+	}
+}
+
+static void debug_prompt(void)
+{
+	debug_puts("debug> ");
+}
+
+int log_buf_copy(char *dest, int idx, int len);
+static void dump_kernel_log(void)
+{
+	char buf[1024];
+	int idx = 0;
+	int ret;
+	int saved_oip;
+
+	/* setting oops_in_progress prevents log_buf_copy()
+	 * from trying to take a spinlock which will make it
+	 * very unhappy in some cases...
+	 */
+	saved_oip = oops_in_progress;
+	oops_in_progress = 1;
+	for (;;) {
+		ret = log_buf_copy(buf, idx, 1023);
+		if (ret <= 0)
+			break;
+		buf[ret] = 0;
+		debug_puts(buf);
+		idx += ret;
+	}
+	oops_in_progress = saved_oip;
+}
+
+static char *mode_name(unsigned cpsr)
+{
+	switch (cpsr & MODE_MASK) {
+	case USR_MODE: return "USR";
+	case FIQ_MODE: return "FIQ";
+	case IRQ_MODE: return "IRQ";
+	case SVC_MODE: return "SVC";
+	case ABT_MODE: return "ABT";
+	case UND_MODE: return "UND";
+	case SYSTEM_MODE: return "SYS";
+	default: return "???";
+	}
+}
+
+#define DEBUG_MAX 64
+static char debug_cmd[DEBUG_MAX];
+static int debug_busy;
+static int debug_abort;
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+	char buf[256];
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, 128, fmt, ap);
+	va_end(ap);
+
+	debug_puts(buf);
+	return debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+	char buf[256];
+	va_list ap;
+	unsigned long irq_flags;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, 128, fmt, ap);
+	va_end(ap);
+
+	local_irq_save(irq_flags);
+	debug_puts(buf);
+	debug_flush();
+	local_irq_restore(irq_flags);
+	return debug_abort;
+}
+
+#define dprintf(fmt...) debug_printf(0, fmt)
+
+unsigned int last_irqs[NR_IRQS];
+
+static void dump_irqs(void)
+{
+	int n;
+	dprintf("irqnr       total  since-last   status  name\n");
+	for (n = 1; n < NR_IRQS; n++) {
+		struct irqaction *act = irq_desc[n].action;
+		if (!act && !kstat_cpu(0).irqs[n])
+			continue;
+		dprintf("%5d: %10u %11u %8x  %s\n", n,
+			kstat_cpu(0).irqs[n],
+			kstat_cpu(0).irqs[n] - last_irqs[n],
+			irq_desc[n].status,
+			(act && act->name) ? act->name : "???");
+		last_irqs[n] = kstat_cpu(0).irqs[n];
+	}
+}
+
+static void debug_exec(const char *cmd, unsigned *regs)
+{
+	if (!strcmp(cmd, "pc")) {
+		dprintf(" pc %08x cpsr %08x mode %s\n",
+			regs[15], regs[16], mode_name(regs[16]));
+	} else if (!strcmp(cmd, "regs")) {
+		dprintf(" r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs[0], regs[1], regs[2], regs[3]);
+		dprintf(" r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs[4], regs[5], regs[6], regs[7]);
+		dprintf(" r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+			regs[8], regs[9], regs[10], regs[11],
+			mode_name(regs[16]));
+		dprintf(" ip %08x  sp %08x  lr %08x  pc %08x  cpsr %08x\n",
+			regs[10], regs[13], regs[14], regs[15], regs[16]);
+	} else if (!strcmp(cmd, "reboot")) {
+		if (msm_hw_reset_hook)
+			msm_hw_reset_hook();
+	} else if (!strcmp(cmd, "irqs")) {
+		dump_irqs();
+	} else if (!strcmp(cmd, "kmsg")) {
+		dump_kernel_log();
+	} else if (!strcmp(cmd, "version")) {
+		dprintf("%s\n", linux_banner);
+	} else {
+		if (debug_busy) {
+			dprintf("command processor busy. trying to abort.\n");
+			debug_abort = -1;
+		} else {
+			strcpy(debug_cmd, cmd);
+			debug_busy = 1;
+		}
+		msm_trigger_irq(debug_signal_irq);
+		return;
+	}
+	debug_prompt();
+}
+
+static irqreturn_t debug_irq(int irq, void *dev)
+{
+	if (debug_busy) {
+		struct kdbg_ctxt ctxt;
+
+		ctxt.printf = debug_printf_nfiq;
+		kernel_debugger(&ctxt, debug_cmd);
+		debug_prompt();
+
+		debug_busy = 0;
+	}
+	return IRQ_HANDLED;
+}
+
+static char debug_buf[DEBUG_MAX];
+static int debug_count;
+
+static void debug_fiq(void *data, void *regs)
+{
+	int c;
+	static int last_c;
+
+	while ((c = debug_getc()) != -1) {
+		if (!debug_enable) {
+			if ((c == 13) || (c == 10)) {
+				debug_enable = true;
+				debug_count = 0;
+				debug_prompt();
+			}
+		} else if ((c >= ' ') && (c < 127)) {
+			if (debug_count < (DEBUG_MAX - 1)) {
+				debug_buf[debug_count++] = c;
+				debug_putc(c);
+			}
+		} else if ((c == 8) || (c == 127)) {
+			if (debug_count > 0) {
+				debug_count--;
+				debug_putc(8);
+				debug_putc(' ');
+				debug_putc(8);
+			}
+		} else if ((c == 13) || (c == 10)) {
+			if (c == '\r' || (c == '\n' && last_c != '\r')) {
+				debug_putc('\r');
+				debug_putc('\n');
+			}
+			if (debug_count) {
+				debug_buf[debug_count] = 0;
+				debug_count = 0;
+				debug_exec(debug_buf, regs);
+			} else {
+				debug_prompt();
+			}
+		}
+		last_c = c;
+	}
+	debug_flush();
+}
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE)
+static void debug_console_write(struct console *co,
+				const char *s, unsigned int count)
+{
+	unsigned long irq_flags;
+
+	/* disable irq's while TXing outside of FIQ context */
+	local_irq_save(irq_flags);
+	while (count--) {
+		if (*s == '\n')
+			debug_putc('\r');
+		debug_putc(*s++);
+	}
+	debug_flush();
+	local_irq_restore(irq_flags);
+}
+
+static struct console msm_serial_debug_console = {
+	.name = "debug_console",
+	.write = debug_console_write,
+	.flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+#endif
+
+void msm_serial_debug_enable(int enable) {
+	debug_enable = enable;
+}
+
+void msm_serial_debug_init(unsigned int base, int irq,
+			   struct device *clk_device, int signal_irq)
+{
+	int ret;
+	void *port;
+
+	debug_clk = clk_get(clk_device, "uart_clk");
+	if (debug_clk)
+		clk_enable(debug_clk);
+
+	port = ioremap(base, 4096);
+	if (!port)
+		return;
+
+	init_data.base = base;
+	init_data.irq = irq;
+	init_data.clk_device = clk_device;
+	init_data.signal_irq = signal_irq;
+	debug_port_base = (unsigned int) port;
+	debug_signal_irq = signal_irq;
+	debug_port_init();
+
+	debug_prompt();
+
+	msm_fiq_select(irq);
+	msm_fiq_set_handler(debug_fiq, 0);
+	msm_fiq_enable(irq);
+
+	ret = request_irq(signal_irq, debug_irq,
+			  IRQF_TRIGGER_RISING, "debug", 0);
+	if (ret)
+		printk(KERN_ERR
+		       "serial_debugger: could not install signal_irq");
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE)
+	register_console(&msm_serial_debug_console);
+#endif
+	debugger_enable = 1;
+}
+static int msm_serial_debug_remove(const char *val, struct kernel_param *kp)
+{
+	int ret;
+	static int pre_stat = 1;
+	ret = param_set_bool(val, kp);
+	if (ret)
+		return ret;
+
+	if (pre_stat == *(int *)kp->arg)
+		return 0;
+
+	pre_stat = *(int *)kp->arg;
+
+	if (*(int *)kp->arg) {
+		msm_serial_debug_init(init_data.base, init_data.irq,
+				init_data.clk_device, init_data.signal_irq);
+		printk(KERN_INFO "enable FIQ serial debugger\n");
+		return 0;
+	}
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE)
+	unregister_console(&msm_serial_debug_console);
+#endif
+	free_irq(init_data.signal_irq, 0);
+	msm_fiq_set_handler(NULL, 0);
+	msm_fiq_disable(init_data.irq);
+	msm_fiq_unselect(init_data.irq);
+	clk_disable(debug_clk);
+	printk(KERN_INFO "disable FIQ serial debugger\n");
+	return 0;
+}
+module_param_call(enable, msm_serial_debug_remove, param_get_bool,
+		&debugger_enable, S_IWUSR | S_IRUGO);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 624701f..7c1a9e8 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1,10 +1,14 @@
-/*
- * MSM 7k/8k High speed uart driver
+/* drivers/serial/msm_serial_hs.c
  *
- * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
+ * MSM 7k High speed uart driver
+ *
  * Copyright (c) 2008 Google Inc.
+ * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
  * Modified: Nick Pelly <npelly@google.com>
  *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * version 2 as published by the Free Software Foundation.
@@ -44,163 +48,25 @@
 #include <linux/dma-mapping.h>
 #include <linux/dmapool.h>
 #include <linux/wait.h>
-#include <linux/workqueue.h>
-
-#include <linux/atomic.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/wakelock.h>
+#include <linux/debugfs.h>
+#include <asm/atomic.h>
 #include <asm/irq.h>
 #include <asm/system.h>
 
 #include <mach/hardware.h>
 #include <mach/dma.h>
-#include <linux/platform_data/msm_serial_hs.h>
+#include <mach/msm_serial_hs.h>
 
-/* HSUART Registers */
-#define UARTDM_MR1_ADDR 0x0
-#define UARTDM_MR2_ADDR 0x4
+#include "msm_serial_hs_hwreg.h"
 
-/* Data Mover result codes */
-#define RSLT_FIFO_CNTR_BMSK (0xE << 28)
-#define RSLT_VLD            BIT(1)
+static int hs_serial_debug_mask = 1;
+module_param_named(debug_mask, hs_serial_debug_mask,
+		   int, S_IRUGO | S_IWUSR | S_IWGRP);
 
-/* write only register */
-#define UARTDM_CSR_ADDR 0x8
-#define UARTDM_CSR_115200 0xFF
-#define UARTDM_CSR_57600  0xEE
-#define UARTDM_CSR_38400  0xDD
-#define UARTDM_CSR_28800  0xCC
-#define UARTDM_CSR_19200  0xBB
-#define UARTDM_CSR_14400  0xAA
-#define UARTDM_CSR_9600   0x99
-#define UARTDM_CSR_7200   0x88
-#define UARTDM_CSR_4800   0x77
-#define UARTDM_CSR_3600   0x66
-#define UARTDM_CSR_2400   0x55
-#define UARTDM_CSR_1200   0x44
-#define UARTDM_CSR_600    0x33
-#define UARTDM_CSR_300    0x22
-#define UARTDM_CSR_150    0x11
-#define UARTDM_CSR_75     0x00
-
-/* write only register */
-#define UARTDM_TF_ADDR 0x70
-#define UARTDM_TF2_ADDR 0x74
-#define UARTDM_TF3_ADDR 0x78
-#define UARTDM_TF4_ADDR 0x7C
-
-/* write only register */
-#define UARTDM_CR_ADDR 0x10
-#define UARTDM_IMR_ADDR 0x14
-
-#define UARTDM_IPR_ADDR 0x18
-#define UARTDM_TFWR_ADDR 0x1c
-#define UARTDM_RFWR_ADDR 0x20
-#define UARTDM_HCR_ADDR 0x24
-#define UARTDM_DMRX_ADDR 0x34
-#define UARTDM_IRDA_ADDR 0x38
-#define UARTDM_DMEN_ADDR 0x3c
-
-/* UART_DM_NO_CHARS_FOR_TX */
-#define UARTDM_NCF_TX_ADDR 0x40
-
-#define UARTDM_BADR_ADDR 0x44
-
-#define UARTDM_SIM_CFG_ADDR 0x80
-/* Read Only register */
-#define UARTDM_SR_ADDR 0x8
-
-/* Read Only register */
-#define UARTDM_RF_ADDR  0x70
-#define UARTDM_RF2_ADDR 0x74
-#define UARTDM_RF3_ADDR 0x78
-#define UARTDM_RF4_ADDR 0x7C
-
-/* Read Only register */
-#define UARTDM_MISR_ADDR 0x10
-
-/* Read Only register */
-#define UARTDM_ISR_ADDR 0x14
-#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
-
-#define UARTDM_RXFS_ADDR 0x50
-
-/* Register field Mask Mapping */
-#define UARTDM_SR_PAR_FRAME_BMSK        BIT(5)
-#define UARTDM_SR_OVERRUN_BMSK          BIT(4)
-#define UARTDM_SR_TXEMT_BMSK            BIT(3)
-#define UARTDM_SR_TXRDY_BMSK            BIT(2)
-#define UARTDM_SR_RXRDY_BMSK            BIT(0)
-
-#define UARTDM_CR_TX_DISABLE_BMSK       BIT(3)
-#define UARTDM_CR_RX_DISABLE_BMSK       BIT(1)
-#define UARTDM_CR_TX_EN_BMSK            BIT(2)
-#define UARTDM_CR_RX_EN_BMSK            BIT(0)
-
-/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
-#define RESET_RX                0x10
-#define RESET_TX                0x20
-#define RESET_ERROR_STATUS      0x30
-#define RESET_BREAK_INT         0x40
-#define START_BREAK             0x50
-#define STOP_BREAK              0x60
-#define RESET_CTS               0x70
-#define RESET_STALE_INT         0x80
-#define RFR_LOW                 0xD0
-#define RFR_HIGH                0xE0
-#define CR_PROTECTION_EN        0x100
-#define STALE_EVENT_ENABLE      0x500
-#define STALE_EVENT_DISABLE     0x600
-#define FORCE_STALE_EVENT       0x400
-#define CLEAR_TX_READY          0x300
-#define RESET_TX_ERROR          0x800
-#define RESET_TX_DONE           0x810
-
-#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
-#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
-#define UARTDM_MR1_CTS_CTL_BMSK 0x40
-#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
-
-#define UARTDM_MR2_ERROR_MODE_BMSK 0x40
-#define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30
-
-/* bits per character configuration */
-#define FIVE_BPC  (0 << 4)
-#define SIX_BPC   (1 << 4)
-#define SEVEN_BPC (2 << 4)
-#define EIGHT_BPC (3 << 4)
-
-#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
-#define STOP_BIT_ONE (1 << 2)
-#define STOP_BIT_TWO (3 << 2)
-
-#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
-
-/* Parity configuration */
-#define NO_PARITY 0x0
-#define EVEN_PARITY 0x1
-#define ODD_PARITY 0x2
-#define SPACE_PARITY 0x3
-
-#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
-#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
-
-/* These can be used for both ISR and IMR register */
-#define UARTDM_ISR_TX_READY_BMSK        BIT(7)
-#define UARTDM_ISR_CURRENT_CTS_BMSK     BIT(6)
-#define UARTDM_ISR_DELTA_CTS_BMSK       BIT(5)
-#define UARTDM_ISR_RXLEV_BMSK           BIT(4)
-#define UARTDM_ISR_RXSTALE_BMSK         BIT(3)
-#define UARTDM_ISR_RXBREAK_BMSK         BIT(2)
-#define UARTDM_ISR_RXHUNT_BMSK          BIT(1)
-#define UARTDM_ISR_TXLEV_BMSK           BIT(0)
-
-/* Field definitions for UART_DM_DMEN*/
-#define UARTDM_TX_DM_EN_BMSK 0x1
-#define UARTDM_RX_DM_EN_BMSK 0x2
-
-#define UART_FIFOSIZE 64
-#define UARTCLK 7372800
-
-/* Rx DMA request states */
 enum flush_reason {
 	FLUSH_NONE,
 	FLUSH_DATA_READY,
@@ -210,7 +76,6 @@
 	FLUSH_SHUTDOWN,
 };
 
-/* UART clock states */
 enum msm_hs_clk_states_e {
 	MSM_HS_CLK_PORT_OFF,     /* port not in use */
 	MSM_HS_CLK_OFF,          /* clock disabled */
@@ -227,27 +92,9 @@
 	CLK_REQ_OFF_RXSTALE_FLUSHED,
 };
 
-/**
- * struct msm_hs_tx
- * @tx_ready_int_en: ok to dma more tx?
- * @dma_in_flight: tx dma in progress
- * @xfer: top level DMA command pointer structure
- * @command_ptr: third level command struct pointer
- * @command_ptr_ptr: second level command list struct pointer
- * @mapped_cmd_ptr: DMA view of third level command struct
- * @mapped_cmd_ptr_ptr: DMA view of second level command list struct
- * @tx_count: number of bytes to transfer in DMA transfer
- * @dma_base: DMA view of UART xmit buffer
- *
- * This structure describes a single Tx DMA transaction. MSM DMA
- * commands have two levels of indirection. The top level command
- * ptr points to a list of command ptr which in turn points to a
- * single DMA 'command'. In our case each Tx transaction consists
- * of a single second level pointer pointing to a 'box type' command.
- */
 struct msm_hs_tx {
-	unsigned int tx_ready_int_en;
-	unsigned int dma_in_flight;
+	unsigned int tx_ready_int_en;  /* ok to dma more tx */
+	unsigned int dma_in_flight;    /* tx dma in progress */
 	struct msm_dmov_cmd xfer;
 	dmov_box *command_ptr;
 	u32 *command_ptr_ptr;
@@ -255,25 +102,9 @@
 	dma_addr_t mapped_cmd_ptr_ptr;
 	int tx_count;
 	dma_addr_t dma_base;
+	struct tasklet_struct tlet;
 };
 
-/**
- * struct msm_hs_rx
- * @flush: Rx DMA request state
- * @xfer: top level DMA command pointer structure
- * @cmdptr_dmaaddr: DMA view of second level command structure
- * @command_ptr: third level DMA command pointer structure
- * @command_ptr_ptr: second level DMA command list pointer
- * @mapped_cmd_ptr: DMA view of the third level command structure
- * @wait: wait for DMA completion before shutdown
- * @buffer: destination buffer for RX DMA
- * @rbuffer: DMA view of buffer
- * @pool: dma pool out of which coherent rx buffer is allocated
- * @tty_work: private work-queue for tty flip buffer push task
- *
- * This structure describes a single Rx DMA transaction. Rx DMA
- * transactions use box mode DMA commands.
- */
 struct msm_hs_rx {
 	enum flush_reason flush;
 	struct msm_dmov_cmd xfer;
@@ -284,127 +115,271 @@
 	wait_queue_head_t wait;
 	dma_addr_t rbuffer;
 	unsigned char *buffer;
+	unsigned int buffer_pending;
 	struct dma_pool *pool;
-	struct work_struct tty_work;
+	struct wake_lock wake_lock;
+	struct delayed_work flip_insert_work;
+	struct tasklet_struct tlet;
 };
 
-/**
- * struct msm_hs_rx_wakeup
- * @irq: IRQ line to be configured as interrupt source on Rx activity
- * @ignore: boolean value. 1 = ignore the wakeup interrupt
- * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
- * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
- *
- * This is an optional structure required for UART Rx GPIO IRQ based
- * wakeup from low power state. UART wakeup can be triggered by RX activity
- * (using a wakeup GPIO on the UART RX pin). This should only be used if
- * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
- * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
- * since the first RX byte will always be lost. RTS will be asserted even
- * while the UART is clocked off in this mode of operation.
- */
-struct msm_hs_rx_wakeup {
+enum buffer_states {
+	NONE_PENDING = 0x0,
+	FIFO_OVERRUN = 0x1,
+	PARITY_ERROR = 0x2,
+	CHARS_NORMAL = 0x4,
+};
+
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_hs_wakeup {
 	int irq;  /* < 0 indicates low power wakeup disabled */
-	unsigned char ignore;
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
 	unsigned char inject_rx;
 	char rx_to_inject;
 };
 
-/**
- * struct msm_hs_port
- * @uport: embedded uart port structure
- * @imr_reg: shadow value of UARTDM_IMR
- * @clk: uart input clock handle
- * @tx: Tx transaction related data structure
- * @rx: Rx transaction related data structure
- * @dma_tx_channel: Tx DMA command channel
- * @dma_rx_channel Rx DMA command channel
- * @dma_tx_crci: Tx channel rate control interface number
- * @dma_rx_crci: Rx channel rate control interface number
- * @clk_off_timer: Timer to poll DMA event completion before clock off
- * @clk_off_delay: clk_off_timer poll interval
- * @clk_state: overall clock state
- * @clk_req_off_state: post flush clock states
- * @rx_wakeup: optional rx_wakeup feature related data
- * @exit_lpm_cb: optional callback to exit low power mode
- *
- * Low level serial port structure.
- */
 struct msm_hs_port {
 	struct uart_port uport;
-	unsigned long imr_reg;
+	unsigned long imr_reg;  /* shadow value of UARTDM_IMR */
 	struct clk *clk;
+	struct clk *pclk;
 	struct msm_hs_tx tx;
 	struct msm_hs_rx rx;
-
+	/* gsbi uarts have to do additional writes to gsbi memory */
+	/* block and top control status block. The following pointers */
+	/* keep a handle to these blocks. */
+	unsigned char __iomem	*mapped_gsbi;
 	int dma_tx_channel;
 	int dma_rx_channel;
 	int dma_tx_crci;
 	int dma_rx_crci;
-
-	struct hrtimer clk_off_timer;
+	struct hrtimer clk_off_timer;  /* to poll TXEMT before clock off */
 	ktime_t clk_off_delay;
 	enum msm_hs_clk_states_e clk_state;
 	enum msm_hs_clk_req_off_state_e clk_req_off_state;
 
-	struct msm_hs_rx_wakeup rx_wakeup;
-	void (*exit_lpm_cb)(struct uart_port *);
+	struct msm_hs_wakeup wakeup;
+	struct wake_lock dma_wake_lock;  /* held while any DMA active */
 };
 
 #define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
 #define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
 #define UARTDM_RX_BUF_SIZE 512
-
+#define RETRY_TIMEOUT 5
 #define UARTDM_NR 2
 
+static struct dentry *debug_base;
 static struct msm_hs_port q_uart_port[UARTDM_NR];
 static struct platform_driver msm_serial_hs_platform_driver;
 static struct uart_driver msm_hs_driver;
 static struct uart_ops msm_hs_ops;
-static struct workqueue_struct *msm_hs_workqueue;
 
 #define UARTDM_TO_MSM(uart_port) \
 	container_of((uart_port), struct msm_hs_port, uport)
 
-static unsigned int use_low_power_rx_wakeup(struct msm_hs_port
-						   *msm_uport)
+static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
+			  char *buf)
 {
-	return (msm_uport->rx_wakeup.irq >= 0);
+	int state = 1;
+	enum msm_hs_clk_states_e clk_state;
+	unsigned long flags;
+
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	clk_state = msm_uport->clk_state;
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+
+	if (clk_state <= MSM_HS_CLK_OFF)
+		state = 0;
+
+	return sprintf(buf, "%d\n", state);
 }
 
-static unsigned int msm_hs_read(struct uart_port *uport,
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int state;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+
+	state = buf[0] - '0';
+	switch (state) {
+	case 0: {
+		msm_hs_request_clock_off(&msm_uport->uport);
+		break;
+	}
+	case 1: {
+		msm_hs_request_clock_on(&msm_uport->uport);
+		break;
+	}
+	default: {
+		return -EINVAL;
+	}
+	}
+	return count;
+}
+
+static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
+
+static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
+{
+	return (msm_uport->wakeup.irq > 0);
+}
+
+static inline int is_gsbi_uart(struct msm_hs_port *msm_uport)
+{
+	/* assume gsbi uart if gsbi resource found in pdata */
+	return ((msm_uport->mapped_gsbi != NULL));
+}
+
+static inline unsigned int msm_hs_read(struct uart_port *uport,
 				       unsigned int offset)
 {
-	return ioread32(uport->membase + offset);
+	return readl_relaxed(uport->membase + offset);
 }
 
-static void msm_hs_write(struct uart_port *uport, unsigned int offset,
+static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
 				 unsigned int value)
 {
-	iowrite32(value, uport->membase + offset);
+	writel_relaxed(value, uport->membase + offset);
 }
 
 static void msm_hs_release_port(struct uart_port *port)
 {
-	iounmap(port->membase);
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *gsbi_resource;
+	resource_size_t size;
+
+	if (is_gsbi_uart(msm_uport)) {
+		iowrite32(GSBI_PROTOCOL_IDLE, msm_uport->mapped_gsbi +
+			  GSBI_CONTROL_ADDR);
+		gsbi_resource = platform_get_resource_byname(pdev,
+							     IORESOURCE_MEM,
+							     "gsbi_resource");
+		size = gsbi_resource->end - gsbi_resource->start + 1;
+		release_mem_region(gsbi_resource->start, size);
+		iounmap(msm_uport->mapped_gsbi);
+		msm_uport->mapped_gsbi = NULL;
+	}
 }
 
 static int msm_hs_request_port(struct uart_port *port)
 {
-	port->membase = ioremap(port->mapbase, PAGE_SIZE);
-	if (unlikely(!port->membase))
-		return -ENOMEM;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *gsbi_resource;
+	resource_size_t size;
 
-	/* configure the CR Protection to Enable */
-	msm_hs_write(port, UARTDM_CR_ADDR, CR_PROTECTION_EN);
+	gsbi_resource = platform_get_resource_byname(pdev,
+						     IORESOURCE_MEM,
+						     "gsbi_resource");
+	if (gsbi_resource) {
+		size = gsbi_resource->end - gsbi_resource->start + 1;
+		if (unlikely(!request_mem_region(gsbi_resource->start, size,
+						 "msm_serial_hs")))
+			return -EBUSY;
+		msm_uport->mapped_gsbi = ioremap(gsbi_resource->start,
+						 size);
+		if (!msm_uport->mapped_gsbi) {
+			release_mem_region(gsbi_resource->start, size);
+			return -EBUSY;
+		}
+	}
+	/* no gsbi uart */
 	return 0;
 }
 
+static int msm_serial_loopback_enable_set(void *data, u64 val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	clk_enable(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_enable(msm_uport->pclk);
+
+	if (val) {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
+		ret |= UARTDM_MR2_LOOP_MODE_BMSK;
+		msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
+		ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
+		msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	clk_disable(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable(msm_uport->pclk);
+
+	return 0;
+}
+
+static int msm_serial_loopback_enable_get(void *data, u64 *val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	clk_enable(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_enable(msm_uport->pclk);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	clk_disable(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable(msm_uport->pclk);
+
+	*val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
+			msm_serial_loopback_enable_set, "%llu\n");
+
+/*
+ * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
+ * writing 1 turns on internal loopback mode in HW. Useful for automation
+ * test scripts.
+ * writing 0 disables the internal loopback mode. Default is disabled.
+ */
+static void __init msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
+					   int id)
+{
+	char node_name[15];
+	snprintf(node_name, sizeof(node_name), "loopback.%d", id);
+	if (IS_ERR_OR_NULL(debugfs_create_file(node_name,
+					       S_IRUGO | S_IWUSR,
+					       debug_base,
+					       msm_uport,
+					       &loopback_enable_fops))) {
+		debugfs_remove_recursive(debug_base);
+	}
+}
+
 static int __devexit msm_hs_remove(struct platform_device *pdev)
 {
 
 	struct msm_hs_port *msm_uport;
 	struct device *dev;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+
 
 	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
 		printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
@@ -414,6 +389,13 @@
 	msm_uport = &q_uart_port[pdev->id];
 	dev = msm_uport->uport.dev;
 
+	if (pdata && pdata->gpio_config)
+		if (pdata->gpio_config(0))
+			dev_err(dev, "GPIO config error\n");
+
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	debugfs_remove_recursive(debug_base);
+
 	dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
 			 DMA_TO_DEVICE);
 	dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
@@ -427,6 +409,9 @@
 	dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
 			 DMA_TO_DEVICE);
 
+	wake_lock_destroy(&msm_uport->rx.wake_lock);
+	wake_lock_destroy(&msm_uport->dma_wake_lock);
+
 	uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
 	clk_put(msm_uport->clk);
 
@@ -443,64 +428,48 @@
 	return 0;
 }
 
-static int msm_hs_init_clk_locked(struct uart_port *uport)
+static int msm_hs_init_clk(struct uart_port *uport)
 {
 	int ret;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
+	wake_lock(&msm_uport->dma_wake_lock);
+	/* Set up the MREG/NREG/DREG/MNDREG */
+	ret = clk_set_rate(msm_uport->clk, uport->uartclk);
+	if (ret) {
+		printk(KERN_WARNING "Error setting clock rate on UART\n");
+		return ret;
+	}
+
 	ret = clk_enable(msm_uport->clk);
 	if (ret) {
 		printk(KERN_ERR "Error could not turn on UART clk\n");
 		return ret;
 	}
-
-	/* Set up the MREG/NREG/DREG/MNDREG */
-	ret = clk_set_rate(msm_uport->clk, uport->uartclk);
-	if (ret) {
-		printk(KERN_WARNING "Error setting clock rate on UART\n");
-		clk_disable(msm_uport->clk);
-		return ret;
+	if (msm_uport->pclk) {
+		ret = clk_enable(msm_uport->pclk);
+		if (ret) {
+			dev_err(uport->dev,
+				"Error could not turn on UART pclk\n");
+			return ret;
+		}
 	}
 
 	msm_uport->clk_state = MSM_HS_CLK_ON;
 	return 0;
 }
 
-/* Enable and Disable clocks  (Used for power management) */
-static void msm_hs_pm(struct uart_port *uport, unsigned int state,
-		      unsigned int oldstate)
-{
-	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
-
-	if (use_low_power_rx_wakeup(msm_uport) ||
-	    msm_uport->exit_lpm_cb)
-		return;  /* ignore linux PM states,
-			    use msm_hs_request_clock API */
-
-	switch (state) {
-	case 0:
-		clk_enable(msm_uport->clk);
-		break;
-	case 3:
-		clk_disable(msm_uport->clk);
-		break;
-	default:
-		dev_err(uport->dev, "msm_serial: Unknown PM state %d\n",
-			state);
-	}
-}
-
 /*
  * programs the UARTDM_CSR register with correct bit rates
  *
  * Interrupts should be disabled before we are called, as
  * we modify Set Baud rate
- * Set receive stale interrupt level, dependent on Bit Rate
+ * Set receive stale interrupt level, dependant on Bit Rate
  * Goal is to have around 8 ms before indicate stale.
  * roundup (((Bit Rate * .008) / 10) + 1
  */
 static void msm_hs_set_bps_locked(struct uart_port *uport,
-				  unsigned int bps)
+			       unsigned int bps)
 {
 	unsigned long rxstale;
 	unsigned long data;
@@ -508,63 +477,63 @@
 
 	switch (bps) {
 	case 300:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_75);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
 		rxstale = 1;
 		break;
 	case 600:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_150);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
 		rxstale = 1;
 		break;
 	case 1200:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_300);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
 		rxstale = 1;
 		break;
 	case 2400:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_600);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
 		rxstale = 1;
 		break;
 	case 4800:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_1200);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
 		rxstale = 1;
 		break;
 	case 9600:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
 		rxstale = 2;
 		break;
 	case 14400:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_3600);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
 		rxstale = 3;
 		break;
 	case 19200:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_4800);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
 		rxstale = 4;
 		break;
 	case 28800:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_7200);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
 		rxstale = 6;
 		break;
 	case 38400:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_9600);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
 		rxstale = 8;
 		break;
 	case 57600:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_14400);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
 		rxstale = 16;
 		break;
 	case 76800:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_19200);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
 		rxstale = 16;
 		break;
 	case 115200:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_28800);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
 		rxstale = 31;
 		break;
 	case 230400:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_57600);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
 		rxstale = 31;
 		break;
 	case 460800:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
 		rxstale = 31;
 		break;
 	case 4000000:
@@ -577,21 +546,28 @@
 	case 1152000:
 	case 1000000:
 	case 921600:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
 		rxstale = 31;
 		break;
 	default:
-		msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
 		/* default to 9600 */
 		bps = 9600;
 		rxstale = 2;
 		break;
 	}
-	if (bps > 460800)
+	/*
+	 * uart baud rate depends on CSR and MND Values
+	 * we are updating CSR before and then calling
+	 * clk_set_rate which updates MND Values. Hence
+	 * dsb requires here.
+	 */
+	mb();
+	if (bps > 460800) {
 		uport->uartclk = bps * 16;
-	else
-		uport->uartclk = UARTCLK;
-
+	} else {
+		uport->uartclk = 7372800;
+	}
 	if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
 		printk(KERN_WARNING "Error setting clock rate on UART\n");
 		return;
@@ -603,6 +579,56 @@
 	msm_hs_write(uport, UARTDM_IPR_ADDR, data);
 }
 
+
+static void msm_hs_set_std_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+
+	switch (bps) {
+	case 9600:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+}
+
 /*
  * termios :  new ktermios
  * oldtermios:  old ktermios previous setting
@@ -610,8 +636,8 @@
  * Configure the serial port
  */
 static void msm_hs_set_termios(struct uart_port *uport,
-			       struct ktermios *termios,
-			       struct ktermios *oldtermios)
+				   struct ktermios *termios,
+				   struct ktermios *oldtermios)
 {
 	unsigned int bps;
 	unsigned long data;
@@ -629,18 +655,23 @@
 	if (bps == 200)
 		bps = 3200000;
 
-	msm_hs_set_bps_locked(uport, bps);
+	uport->uartclk = clk_get_rate(msm_uport->clk);
+	if (!uport->uartclk)
+		msm_hs_set_std_bps_locked(uport, bps);
+	else
+		msm_hs_set_bps_locked(uport, bps);
 
 	data = msm_hs_read(uport, UARTDM_MR2_ADDR);
 	data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
 	/* set parity */
 	if (PARENB == (c_cflag & PARENB)) {
-		if (PARODD == (c_cflag & PARODD))
+		if (PARODD == (c_cflag & PARODD)) {
 			data |= ODD_PARITY;
-		else if (CMSPAR == (c_cflag & CMSPAR))
+		} else if (CMSPAR == (c_cflag & CMSPAR)) {
 			data |= SPACE_PARITY;
-		else
+		} else {
 			data |= EVEN_PARITY;
+		}
 	}
 
 	/* Set bits per char */
@@ -696,12 +727,22 @@
 	msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
 
 	if (msm_uport->rx.flush == FLUSH_NONE) {
+		wake_lock(&msm_uport->rx.wake_lock);
 		msm_uport->rx.flush = FLUSH_IGNORE;
-		msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
+		/*
+		 * Before using dmov APIs make sure that
+		 * previous writel are completed. Hence
+		 * dsb requires here.
+		 */
+		mb();
+		/* do discard flush */
+		msm_dmov_stop_cmd(msm_uport->dma_rx_channel,
+				  &msm_uport->rx.xfer, 0);
 	}
 
 	msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
+	/* calling other hardware component here clk_disable API. */
+	mb();
 	clk_disable(msm_uport->clk);
 	spin_unlock_irqrestore(&uport->lock, flags);
 }
@@ -710,7 +751,7 @@
  *  Standard API, Transmitter
  *  Any character in the transmit shift register is sent
  */
-static unsigned int msm_hs_tx_empty(struct uart_port *uport)
+unsigned int msm_hs_tx_empty(struct uart_port *uport)
 {
 	unsigned int data;
 	unsigned int ret = 0;
@@ -726,6 +767,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(msm_hs_tx_empty);
 
 /*
  *  Standard API, Stop transmitter.
@@ -759,10 +801,15 @@
 	data &= ~UARTDM_RX_DM_EN_BMSK;
 	msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
 
+	/* calling DMOV or CLOCK API. Hence mb() */
+	mb();
 	/* Disable the receiver */
-	if (msm_uport->rx.flush == FLUSH_NONE)
-		msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
-
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		wake_lock(&msm_uport->rx.wake_lock);
+		/* do discard flush */
+		msm_dmov_stop_cmd(msm_uport->dma_rx_channel,
+				  &msm_uport->rx.xfer, 0);
+	}
 	if (msm_uport->rx.flush != FLUSH_SHUTDOWN)
 		msm_uport->rx.flush = FLUSH_STOP;
 
@@ -774,7 +821,9 @@
 {
 	int left;
 	int tx_count;
+	int aligned_tx_count;
 	dma_addr_t src_addr;
+	dma_addr_t aligned_src_addr;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 	struct msm_hs_tx *tx = &msm_uport->tx;
 	struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
@@ -797,8 +846,13 @@
 		tx_count = left;
 
 	src_addr = tx->dma_base + tx_buf->tail;
-	dma_sync_single_for_device(uport->dev, src_addr, tx_count,
-				   DMA_TO_DEVICE);
+	/* Mask the src_addr to align on a cache
+	 * and add those bytes to tx_count */
+	aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
+	aligned_tx_count = tx_count + src_addr - aligned_src_addr;
+
+	dma_sync_single_for_device(uport->dev, aligned_src_addr,
+			aligned_tx_count, DMA_TO_DEVICE);
 
 	tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) |
 				     ((tx_count + 15) >> 4);
@@ -809,9 +863,6 @@
 
 	*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
 
-	dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
-				   sizeof(u32 *), DMA_TO_DEVICE);
-
 	/* Save tx_count to use in Callback */
 	tx->tx_count = tx_count;
 	msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
@@ -819,6 +870,12 @@
 	/* Disable the tx_ready interrupt */
 	msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
 	msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
+
+	dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
+				   sizeof(u32 *), DMA_TO_DEVICE);
+
 	msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
 }
 
@@ -826,34 +883,197 @@
 static void msm_hs_start_rx_locked(struct uart_port *uport)
 {
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int buffer_pending = msm_uport->rx.buffer_pending;
+
+	msm_uport->rx.buffer_pending = 0;
+	if (buffer_pending && hs_serial_debug_mask)
+		printk(KERN_ERR "Error: rx started in buffer state = %x",
+		       buffer_pending);
 
 	msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
 	msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
 	msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
 	msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
 	msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
 
 	msm_uport->rx.flush = FLUSH_NONE;
 	msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer);
 
-	/* might have finished RX and be ready to clock off */
-	hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay,
-			HRTIMER_MODE_REL);
+}
+
+static void flip_insert_work(struct work_struct *work)
+{
+	unsigned long flags;
+	int retval;
+	struct msm_hs_port *msm_uport =
+		container_of(work, struct msm_hs_port,
+			     rx.flip_insert_work.work);
+	struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	if (msm_uport->rx.buffer_pending == NONE_PENDING) {
+		if (hs_serial_debug_mask)
+			printk(KERN_ERR "Error: No buffer pending in %s",
+			       __func__);
+		return;
+	}
+	if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
+		retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
+	}
+	if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
+		retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
+	}
+	if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
+		int rx_count, rx_offset;
+		rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
+		rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
+		retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
+						rx_offset, rx_count);
+		msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
+						 PARITY_ERROR);
+		if (retval != rx_count)
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 8 | (rx_count - retval) << 16;
+	}
+	if (msm_uport->rx.buffer_pending)
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work,
+				      msecs_to_jiffies(RETRY_TIMEOUT));
+	else
+		if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
+		    (msm_uport->rx.flush <= FLUSH_IGNORE)) {
+			if (hs_serial_debug_mask)
+				printk(KERN_WARNING
+				       "msm_serial_hs: "
+				       "Pending buffers cleared. "
+				       "Restarting\n");
+			msm_hs_start_rx_locked(&msm_uport->uport);
+		}
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	tty_flip_buffer_push(tty);
+}
+
+static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
+{
+	int retval;
+	int rx_count;
+	unsigned long status;
+	unsigned long flags;
+	unsigned int error_f = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	unsigned int flush;
+	struct tty_struct *tty;
+
+	msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
+				 struct msm_hs_port, rx.tlet);
+	uport = &msm_uport->uport;
+	tty = uport->state->port.tty;
+
+	status = msm_hs_read(uport, UARTDM_SR_ADDR);
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	clk_enable(msm_uport->clk);
+	msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+
+	/* overflow is not connect to data in a FIFO */
+	if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+		     (uport->read_status_mask & CREAD))) {
+		retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		if (!retval)
+			msm_uport->rx.buffer_pending |= TTY_OVERRUN;
+		uport->icount.buf_overrun++;
+		error_f = 1;
+	}
+
+	if (!(uport->ignore_status_mask & INPCK))
+		status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+	if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+		/* Can not tell difference between parity & frame error */
+		uport->icount.parity++;
+		error_f = 1;
+		if (uport->ignore_status_mask & IGNPAR) {
+			retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
+			if (!retval)
+				msm_uport->rx.buffer_pending |= TTY_PARITY;
+		}
+	}
+
+	if (error_f)
+		msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
+
+	if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
+		msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
+	flush = msm_uport->rx.flush;
+	if (flush == FLUSH_IGNORE)
+		if (!msm_uport->rx.buffer_pending)
+			msm_hs_start_rx_locked(uport);
+
+	if (flush == FLUSH_STOP) {
+		msm_uport->rx.flush = FLUSH_SHUTDOWN;
+		wake_up(&msm_uport->rx.wait);
+	}
+	if (flush >= FLUSH_DATA_INVALID)
+		goto out;
+
+	rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
+
+	/* order the read of rx.buffer */
+	rmb();
+
+	if (0 != (uport->read_status_mask & CREAD)) {
+		retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
+						rx_count);
+		if (retval != rx_count) {
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 5 | (rx_count - retval) << 16;
+		}
+	}
+
+	/* order the read of rx.buffer and the start of next rx xfer */
+	wmb();
+
+	if (!msm_uport->rx.buffer_pending)
+		msm_hs_start_rx_locked(uport);
+
+out:
+	if (msm_uport->rx.buffer_pending) {
+		if (hs_serial_debug_mask)
+			printk(KERN_WARNING
+			       "msm_serial_hs: "
+			       "tty buffer exhausted. "
+			       "Stalling\n");
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work
+				      , msecs_to_jiffies(RETRY_TIMEOUT));
+	}
+	clk_disable(msm_uport->clk);
+	/* release wakelock in 500ms, not immediately, because higher layers
+	 * don't always take wakelocks when they should */
+	wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
+	/* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
+	spin_unlock_irqrestore(&uport->lock, flags);
+	if (flush < FLUSH_DATA_INVALID)
+		tty_flip_buffer_push(tty);
 }
 
 /* Enable the transmitter Interrupt */
-static void msm_hs_start_tx_locked(struct uart_port *uport)
+static void msm_hs_start_tx_locked(struct uart_port *uport )
 {
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
 	clk_enable(msm_uport->clk);
 
-	if (msm_uport->exit_lpm_cb)
-		msm_uport->exit_lpm_cb(uport);
-
 	if (msm_uport->tx.tx_ready_int_en == 0) {
 		msm_uport->tx.tx_ready_int_en = 1;
-		msm_hs_submit_tx_locked(uport);
+		if (msm_uport->tx.dma_in_flight == 0)
+			msm_hs_submit_tx_locked(uport);
 	}
 
 	clk_disable(msm_uport->clk);
@@ -870,23 +1090,31 @@
 					unsigned int result,
 					struct msm_dmov_errdata *err)
 {
-	unsigned long flags;
 	struct msm_hs_port *msm_uport;
 
-	/* DMA did not finish properly */
-	WARN_ON((((result & RSLT_FIFO_CNTR_BMSK) >> 28) == 1) &&
-		!(result & RSLT_VLD));
+	WARN_ON(result != 0x80000002);  /* DMA did not finish properly */
 
 	msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
 
-	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	tasklet_schedule(&msm_uport->tx.tlet);
+}
+
+static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
+				tlet_ptr, struct msm_hs_port, tx.tlet);
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
 	clk_enable(msm_uport->clk);
 
 	msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
-	msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+	msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
+	/* Calling clk API. Hence mb() requires. */
+	mb();
 
 	clk_disable(msm_uport->clk);
-	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
 }
 
 /*
@@ -900,87 +1128,11 @@
 					unsigned int result,
 					struct msm_dmov_errdata *err)
 {
-	int retval;
-	int rx_count;
-	unsigned long status;
-	unsigned int error_f = 0;
-	unsigned long flags;
-	unsigned int flush;
-	struct tty_struct *tty;
-	struct uart_port *uport;
 	struct msm_hs_port *msm_uport;
 
 	msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
-	uport = &msm_uport->uport;
 
-	spin_lock_irqsave(&uport->lock, flags);
-	clk_enable(msm_uport->clk);
-
-	tty = uport->state->port.tty;
-
-	msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
-
-	status = msm_hs_read(uport, UARTDM_SR_ADDR);
-
-	/* overflow is not connect to data in a FIFO */
-	if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
-		     (uport->read_status_mask & CREAD))) {
-		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-		uport->icount.buf_overrun++;
-		error_f = 1;
-	}
-
-	if (!(uport->ignore_status_mask & INPCK))
-		status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
-
-	if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
-		/* Can not tell difference between parity & frame error */
-		uport->icount.parity++;
-		error_f = 1;
-		if (uport->ignore_status_mask & IGNPAR)
-			tty_insert_flip_char(tty, 0, TTY_PARITY);
-	}
-
-	if (error_f)
-		msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
-
-	if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
-		msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
-
-	flush = msm_uport->rx.flush;
-	if (flush == FLUSH_IGNORE)
-		msm_hs_start_rx_locked(uport);
-	if (flush == FLUSH_STOP)
-		msm_uport->rx.flush = FLUSH_SHUTDOWN;
-	if (flush >= FLUSH_DATA_INVALID)
-		goto out;
-
-	rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
-
-	if (0 != (uport->read_status_mask & CREAD)) {
-		retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
-						rx_count);
-		BUG_ON(retval != rx_count);
-	}
-
-	msm_hs_start_rx_locked(uport);
-
-out:
-	clk_disable(msm_uport->clk);
-
-	spin_unlock_irqrestore(&uport->lock, flags);
-
-	if (flush < FLUSH_DATA_INVALID)
-		queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
-}
-
-static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
-{
-	struct msm_hs_port *msm_uport =
-			container_of(work, struct msm_hs_port, rx.tty_work);
-	struct tty_struct *tty = msm_uport->uport.state->port.tty;
-
-	tty_flip_buffer_push(tty);
+	tasklet_schedule(&msm_uport->rx.tlet);
 }
 
 /*
@@ -1002,46 +1154,53 @@
 }
 
 /*
- * True enables UART auto RFR, which indicates we are ready for data if the RX
- * buffer is not full. False disables auto RFR, and deasserts RFR to indicate
- * we are not ready for data. Must be called with UART clock on.
+ *  Standard API, Set or clear RFR_signal
+ *
+ * Set RFR high, (Indicate we are not ready for data), we disable auto
+ * ready for receiving and then set RFR_N high. To set RFR to low we just turn
+ * back auto ready for receiving and it should lower RFR signal
+ * when hardware is ready
  */
-static void set_rfr_locked(struct uart_port *uport, int auto_rfr)
-{
-	unsigned int data;
-
-	data = msm_hs_read(uport, UARTDM_MR1_ADDR);
-
-	if (auto_rfr) {
-		/* enable auto ready-for-receiving */
-		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
-		msm_hs_write(uport, UARTDM_MR1_ADDR, data);
-	} else {
-		/* disable auto ready-for-receiving */
-		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
-		msm_hs_write(uport, UARTDM_MR1_ADDR, data);
-		/* RFR is active low, set high */
-		msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
-	}
-}
-
-/*
- *  Standard API, used to set or clear RFR
- */
-static void msm_hs_set_mctrl_locked(struct uart_port *uport,
+void msm_hs_set_mctrl_locked(struct uart_port *uport,
 				    unsigned int mctrl)
 {
-	unsigned int auto_rfr;
+	unsigned int set_rts;
+	unsigned int data;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
 	clk_enable(msm_uport->clk);
 
-	auto_rfr = TIOCM_RTS & mctrl ? 1 : 0;
-	set_rfr_locked(uport, auto_rfr);
+	/* RTS is active low */
+	set_rts = TIOCM_RTS & mctrl ? 0 : 1;
 
+	data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+	if (set_rts) {
+		/*disable auto ready-for-receiving */
+		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+		/* set RFR_N to high */
+		msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
+	} else {
+		/* Enable auto ready-for-receiving */
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+	}
+	/* Calling CLOCK API. Hence mb() requires. */
+	mb();
 	clk_disable(msm_uport->clk);
 }
 
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_set_mctrl_locked(uport, mctrl);
+	spin_unlock_irqrestore(&uport->lock, flags);
+}
+EXPORT_SYMBOL(msm_hs_set_mctrl);
+
 /* Standard API, Enable modem status (CTS) interrupt  */
 static void msm_hs_enable_ms_locked(struct uart_port *uport)
 {
@@ -1052,6 +1211,8 @@
 	/* Enable DELTA_CTS Interrupt */
 	msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
 	msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
 
 	clk_disable(msm_uport->clk);
 
@@ -1065,40 +1226,49 @@
  */
 static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
 {
+	unsigned long flags;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
+	spin_lock_irqsave(&uport->lock, flags);
 	clk_enable(msm_uport->clk);
 	msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
 	clk_disable(msm_uport->clk);
+	spin_unlock_irqrestore(&uport->lock, flags);
 }
 
 static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
 {
 	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
 	spin_lock_irqsave(&uport->lock, flags);
 	if (cfg_flags & UART_CONFIG_TYPE) {
 		uport->type = PORT_MSM;
 		msm_hs_request_port(uport);
 	}
+	if (is_gsbi_uart(msm_uport)) {
+		iowrite32(GSBI_PROTOCOL_UART, msm_uport->mapped_gsbi +
+			  GSBI_CONTROL_ADDR);
+	}
 	spin_unlock_irqrestore(&uport->lock, flags);
 }
 
 /*  Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts(struct uart_port *uport)
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
 {
-	unsigned long flags;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
-	spin_lock_irqsave(&uport->lock, flags);
 	clk_enable(msm_uport->clk);
 
 	/* clear interrupt */
 	msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
 	uport->icount.cts++;
 
 	clk_disable(msm_uport->clk);
-	spin_unlock_irqrestore(&uport->lock, flags);
 
 	/* clear the IOCTL TIOCMIWAIT if called */
 	wake_up_interruptible(&uport->state->port.delta_msr_wait);
@@ -1116,11 +1286,10 @@
 	struct circ_buf *tx_buf = &uport->state->xmit;
 
 	/* Cancel if tx tty buffer is not empty, dma is in flight,
-	 * or tx fifo is not empty, or rx fifo is not empty */
+	 * or tx fifo is not empty */
 	if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
 	    !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
-	    (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) ||
-	    !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK))  {
+	    msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
 		return -1;
 	}
 
@@ -1134,6 +1303,11 @@
 	case CLK_REQ_OFF_START:
 		msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
 		msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
+		/*
+		 * Before returning make sure that device writel completed.
+		 * Hence mb() requires here.
+		 */
+		mb();
 		return 0;  /* RXSTALE flush not complete - retry */
 	case CLK_REQ_OFF_RXSTALE_ISSUED:
 	case CLK_REQ_OFF_FLUSH_ISSUED:
@@ -1150,17 +1324,18 @@
 
 	/* we really want to clock off */
 	clk_disable(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable(msm_uport->pclk);
 	msm_uport->clk_state = MSM_HS_CLK_OFF;
-
-	if (use_low_power_rx_wakeup(msm_uport)) {
-		msm_uport->rx_wakeup.ignore = 1;
-		enable_irq(msm_uport->rx_wakeup.irq);
+	if (use_low_power_wakeup(msm_uport)) {
+		msm_uport->wakeup.ignore = 1;
+		enable_irq(msm_uport->wakeup.irq);
 	}
+	wake_unlock(&msm_uport->dma_wake_lock);
 	return 1;
 }
 
-static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
-{
+static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer) {
 	unsigned long flags;
 	int ret = HRTIMER_NORESTART;
 	struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
@@ -1183,7 +1358,7 @@
 {
 	unsigned long flags;
 	unsigned long isr_status;
-	struct msm_hs_port *msm_uport = dev;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
 	struct uart_port *uport = &msm_uport->uport;
 	struct circ_buf *tx_buf = &uport->state->xmit;
 	struct msm_hs_tx *tx = &msm_uport->tx;
@@ -1195,20 +1370,29 @@
 
 	/* Uart RX starting */
 	if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+		wake_lock(&rx->wake_lock);  /* hold wakelock while rx dma */
 		msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
 		msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+		/* Complete device write for IMR. Hence mb() requires. */
+		mb();
 	}
 	/* Stale rx interrupt */
 	if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
 		msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
 		msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
+		/*
+		 * Complete device write before calling DMOV API. Hence
+		 * mb() requires here.
+		 */
+		mb();
 
 		if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED)
 			msm_uport->clk_req_off_state =
-					CLK_REQ_OFF_FLUSH_ISSUED;
+				CLK_REQ_OFF_FLUSH_ISSUED;
+
 		if (rx->flush == FLUSH_NONE) {
 			rx->flush = FLUSH_DATA_READY;
-			msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
+			msm_dmov_flush(msm_uport->dma_rx_channel);
 		}
 	}
 	/* tx ready interrupt */
@@ -1221,7 +1405,11 @@
 			msm_hs_write(uport, UARTDM_IMR_ADDR,
 				     msm_uport->imr_reg);
 		}
-
+		/*
+		 * Complete both writes before starting new TX.
+		 * Hence mb() requires here.
+		 */
+		mb();
 		/* Complete DMA TX transactions and submit new transactions */
 		tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE;
 
@@ -1238,6 +1426,11 @@
 		/* TX FIFO is empty */
 		msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
 		msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+		/*
+		 * Complete device write before starting clock_off request.
+		 * Hence mb() requires here.
+		 */
+		mb();
 		if (!msm_hs_check_clock_off_locked(uport))
 			hrtimer_start(&msm_uport->clk_off_timer,
 				      msm_uport->clk_off_delay,
@@ -1246,58 +1439,52 @@
 
 	/* Change in CTS interrupt */
 	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
-		msm_hs_handle_delta_cts(uport);
+		msm_hs_handle_delta_cts_locked(uport);
 
 	spin_unlock_irqrestore(&uport->lock, flags);
 
 	return IRQ_HANDLED;
 }
 
-void msm_hs_request_clock_off_locked(struct uart_port *uport)
-{
+/* request to turn off uart clock once pending TX is flushed */
+void msm_hs_request_clock_off(struct uart_port *uport) {
+	unsigned long flags;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
+	spin_lock_irqsave(&uport->lock, flags);
 	if (msm_uport->clk_state == MSM_HS_CLK_ON) {
 		msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
 		msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
-		if (!use_low_power_rx_wakeup(msm_uport))
-			set_rfr_locked(uport, 0);
 		msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
 		msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+		/*
+		 * Complete device write before retuning back.
+		 * Hence mb() requires here.
+		 */
+		mb();
 	}
-}
-
-/**
- * msm_hs_request_clock_off - request to (i.e. asynchronously) turn off uart
- * clock once pending TX is flushed and Rx DMA command is terminated.
- * @uport: uart_port structure for the device instance.
- *
- * This functions puts the device into a partially active low power mode. It
- * waits to complete all pending tx transactions, flushes ongoing Rx DMA
- * command and terminates UART side Rx transaction, puts UART HW in non DMA
- * mode and then clocks off the device. A client calls this when no UART
- * data is expected. msm_request_clock_on() must be called before any further
- * UART can be sent or received.
- */
-void msm_hs_request_clock_off(struct uart_port *uport)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&uport->lock, flags);
-	msm_hs_request_clock_off_locked(uport);
 	spin_unlock_irqrestore(&uport->lock, flags);
 }
+EXPORT_SYMBOL(msm_hs_request_clock_off);
 
-void msm_hs_request_clock_on_locked(struct uart_port *uport)
-{
+static void msm_hs_request_clock_on_locked(struct uart_port *uport) {
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 	unsigned int data;
+	int ret = 0;
 
 	switch (msm_uport->clk_state) {
 	case MSM_HS_CLK_OFF:
+		wake_lock(&msm_uport->dma_wake_lock);
 		clk_enable(msm_uport->clk);
-		disable_irq_nosync(msm_uport->rx_wakeup.irq);
-		/* fall-through */
+		if (msm_uport->pclk)
+			ret = clk_enable(msm_uport->pclk);
+		disable_irq_nosync(msm_uport->wakeup.irq);
+		if (unlikely(ret)) {
+			dev_err(uport->dev, "Clock ON Failure"
+				"Stalling HSUART\n");
+			break;
+		}
+		/* else fall-through */
 	case MSM_HS_CLK_REQUEST_OFF:
 		if (msm_uport->rx.flush == FLUSH_STOP ||
 		    msm_uport->rx.flush == FLUSH_SHUTDOWN) {
@@ -1305,12 +1492,12 @@
 			data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
 			data |= UARTDM_RX_DM_EN_BMSK;
 			msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+			/* Complete above device write. Hence mb() here. */
+			mb();
 		}
 		hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
 		if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
 			msm_hs_start_rx_locked(uport);
-		if (!use_low_power_rx_wakeup(msm_uport))
-			set_rfr_locked(uport, 1);
 		if (msm_uport->rx.flush == FLUSH_STOP)
 			msm_uport->rx.flush = FLUSH_IGNORE;
 		msm_uport->clk_state = MSM_HS_CLK_ON;
@@ -1322,38 +1509,28 @@
 	}
 }
 
-/**
- * msm_hs_request_clock_on - Switch the device from partially active low
- * power mode to fully active (i.e. clock on) mode.
- * @uport: uart_port structure for the device.
- *
- * This function switches on the input clock, puts UART HW into DMA mode
- * and enqueues an Rx DMA command if the device was in partially active
- * mode. It has no effect if called with the device in inactive state.
- */
-void msm_hs_request_clock_on(struct uart_port *uport)
-{
+void msm_hs_request_clock_on(struct uart_port *uport) {
 	unsigned long flags;
-
 	spin_lock_irqsave(&uport->lock, flags);
 	msm_hs_request_clock_on_locked(uport);
 	spin_unlock_irqrestore(&uport->lock, flags);
 }
+EXPORT_SYMBOL(msm_hs_request_clock_on);
 
-static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
+static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
 {
 	unsigned int wakeup = 0;
 	unsigned long flags;
-	struct msm_hs_port *msm_uport = dev;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
 	struct uart_port *uport = &msm_uport->uport;
 	struct tty_struct *tty = NULL;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
-		/* ignore the first irq - it is a pending irq that occurred
+	if (msm_uport->clk_state == MSM_HS_CLK_OFF)  {
+		/* ignore the first irq - it is a pending irq that occured
 		 * before enable_irq() */
-		if (msm_uport->rx_wakeup.ignore)
-			msm_uport->rx_wakeup.ignore = 0;
+		if (msm_uport->wakeup.ignore)
+			msm_uport->wakeup.ignore = 0;
 		else
 			wakeup = 1;
 	}
@@ -1362,23 +1539,24 @@
 		/* the uart was clocked off during an rx, wake up and
 		 * optionally inject char into tty rx */
 		msm_hs_request_clock_on_locked(uport);
-		if (msm_uport->rx_wakeup.inject_rx) {
+		if (msm_uport->wakeup.inject_rx) {
 			tty = uport->state->port.tty;
 			tty_insert_flip_char(tty,
-					     msm_uport->rx_wakeup.rx_to_inject,
+					     msm_uport->wakeup.rx_to_inject,
 					     TTY_NORMAL);
-			queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
 		}
 	}
 
 	spin_unlock_irqrestore(&uport->lock, flags);
 
+	if (wakeup && msm_uport->wakeup.inject_rx)
+		tty_flip_buffer_push(tty);
 	return IRQ_HANDLED;
 }
 
 static const char *msm_hs_type(struct uart_port *port)
 {
-	return (port->type == PORT_MSM) ? "MSM_HS_UART" : NULL;
+	return ("MSM HS UART");
 }
 
 /* Called when port is opened */
@@ -1391,7 +1569,6 @@
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 	struct circ_buf *tx_buf = &uport->state->xmit;
 	struct msm_hs_tx *tx = &msm_uport->tx;
-	struct msm_hs_rx *rx = &msm_uport->rx;
 
 	rfr_level = uport->fifosize;
 	if (rfr_level > 16)
@@ -1400,16 +1577,10 @@
 	tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
 				      DMA_TO_DEVICE);
 
-	/* do not let tty layer execute RX in global workqueue, use a
-	 * dedicated workqueue managed by this driver */
-	uport->state->port.tty->low_latency = 1;
-
 	/* turn on uart clk */
-	ret = msm_hs_init_clk_locked(uport);
-	if (unlikely(ret)) {
-		printk(KERN_ERR "Turning uartclk failed!\n");
-		goto err_msm_hs_init_clk;
-	}
+	ret = msm_hs_init_clk(uport);
+	if (unlikely(ret))
+		return ret;
 
 	/* Set auto RFR Level */
 	data = msm_hs_read(uport, UARTDM_MR1_ADDR);
@@ -1449,7 +1620,9 @@
 	tx->dma_in_flight = 0;
 
 	tx->xfer.complete_func = msm_hs_dmov_tx_callback;
-	tx->xfer.execute_func = NULL;
+
+	tx->xfer.crci_mask = msm_dmov_build_crci_mask(1,
+						      msm_uport->dma_tx_crci);
 
 	tx->command_ptr->cmd = CMD_LC |
 	    CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
@@ -1462,49 +1635,38 @@
 	tx->command_ptr->dst_row_addr =
 	    msm_uport->uport.mapbase + UARTDM_TF_ADDR;
 
-
-	/* Turn on Uart Receive */
-	rx->xfer.complete_func = msm_hs_dmov_rx_callback;
-	rx->xfer.execute_func = NULL;
-
-	rx->command_ptr->cmd = CMD_LC |
-	    CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
-
-	rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
-					   | (MSM_UARTDM_BURST_SIZE);
-	rx->command_ptr->row_offset =  MSM_UARTDM_BURST_SIZE;
-	rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
-
-
 	msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
 	/* Enable reading the current CTS, no harm even if CTS is ignored */
 	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
 
 	msm_hs_write(uport, UARTDM_TFWR_ADDR, 0);  /* TXLEV on empty TX fifo */
+	/*
+	 * Complete all device write related configuration before
+	 * queuing RX request. Hence mb() requires here.
+	 */
+	mb();
 
+	if (use_low_power_wakeup(msm_uport)) {
+		ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
+		if (unlikely(ret))
+			return ret;
+	}
 
 	ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
 			  "msm_hs_uart", msm_uport);
-	if (unlikely(ret)) {
-		printk(KERN_ERR "Request msm_hs_uart IRQ failed!\n");
-		goto err_request_irq;
-	}
-	if (use_low_power_rx_wakeup(msm_uport)) {
-		ret = request_irq(msm_uport->rx_wakeup.irq,
-				  msm_hs_rx_wakeup_isr,
+	if (unlikely(ret))
+		return ret;
+	if (use_low_power_wakeup(msm_uport)) {
+		ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
 				  IRQF_TRIGGER_FALLING,
-				  "msm_hs_rx_wakeup", msm_uport);
-		if (unlikely(ret)) {
-			printk(KERN_ERR "Request msm_hs_rx_wakeup IRQ failed!\n");
-			free_irq(uport->irq, msm_uport);
-			goto err_request_irq;
-		}
-		disable_irq(msm_uport->rx_wakeup.irq);
+				  "msm_hs_wakeup", msm_uport);
+		if (unlikely(ret))
+			return ret;
+		disable_irq(msm_uport->wakeup.irq);
 	}
 
 	spin_lock_irqsave(&uport->lock, flags);
 
-	msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
 	msm_hs_start_rx_locked(uport);
 
 	spin_unlock_irqrestore(&uport->lock, flags);
@@ -1513,17 +1675,12 @@
 		dev_err(uport->dev, "set active error:%d\n", ret);
 	pm_runtime_enable(uport->dev);
 
-	return 0;
 
-err_request_irq:
-err_msm_hs_init_clk:
-	dma_unmap_single(uport->dev, tx->dma_base,
-				UART_XMIT_SIZE, DMA_TO_DEVICE);
-	return ret;
+	return 0;
 }
 
 /* Initialize tx and rx data structures */
-static int __devinit uartdm_init_port(struct uart_port *uport)
+static int uartdm_init_port(struct uart_port *uport)
 {
 	int ret = 0;
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
@@ -1538,7 +1695,7 @@
 	tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
 	if (!tx->command_ptr_ptr) {
 		ret = -ENOMEM;
-		goto err_tx_command_ptr_ptr;
+		goto free_tx_command_ptr;
 	}
 
 	tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
@@ -1549,20 +1706,28 @@
 	tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
 
 	init_waitqueue_head(&rx->wait);
+	wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
+	wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
+		       "msm_serial_hs_dma");
+
+	tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
+			(unsigned long) &rx->tlet);
+	tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
+			(unsigned long) &tx->tlet);
 
 	rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
 				   UARTDM_RX_BUF_SIZE, 16, 0);
 	if (!rx->pool) {
 		pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
 		ret = -ENOMEM;
-		goto err_dma_pool_create;
+		goto exit_tasket_init;
 	}
 
 	rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
 	if (!rx->buffer) {
 		pr_err("%s(): cannot allocate rx->buffer", __func__);
 		ret = -ENOMEM;
-		goto err_dma_pool_alloc;
+		goto free_pool;
 	}
 
 	/* Allocate the command pointer. Needs to be 64 bit aligned */
@@ -1570,14 +1735,14 @@
 	if (!rx->command_ptr) {
 		pr_err("%s(): cannot allocate rx->command_ptr", __func__);
 		ret = -ENOMEM;
-		goto err_rx_command_ptr;
+		goto free_rx_buffer;
 	}
 
 	rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
 	if (!rx->command_ptr_ptr) {
 		pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
 		ret = -ENOMEM;
-		goto err_rx_command_ptr_ptr;
+		goto free_rx_command_ptr;
 	}
 
 	rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
@@ -1585,6 +1750,22 @@
 
 	rx->command_ptr->dst_row_addr = rx->rbuffer;
 
+	/* Set up Uart Receive */
+	msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
+
+	rx->xfer.complete_func = msm_hs_dmov_rx_callback;
+
+	rx->xfer.crci_mask = msm_dmov_build_crci_mask(1,
+						      msm_uport->dma_rx_crci);
+
+	rx->command_ptr->cmd = CMD_LC |
+	    CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
+
+	rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
+					   | (MSM_UARTDM_BURST_SIZE);
+	rx->command_ptr->row_offset =  MSM_UARTDM_BURST_SIZE;
+	rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
+
 	rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
 					    sizeof(dmov_box), DMA_TO_DEVICE);
 
@@ -1594,36 +1775,43 @@
 					    sizeof(u32 *), DMA_TO_DEVICE);
 	rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
 
-	INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
+	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
 
 	return ret;
 
-err_rx_command_ptr_ptr:
+free_rx_command_ptr:
 	kfree(rx->command_ptr);
-err_rx_command_ptr:
+
+free_rx_buffer:
 	dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
-						msm_uport->rx.rbuffer);
-err_dma_pool_alloc:
+			msm_uport->rx.rbuffer);
+
+free_pool:
 	dma_pool_destroy(msm_uport->rx.pool);
-err_dma_pool_create:
+
+exit_tasket_init:
+	wake_lock_destroy(&msm_uport->rx.wake_lock);
+	wake_lock_destroy(&msm_uport->dma_wake_lock);
+	tasklet_kill(&msm_uport->tx.tlet);
+	tasklet_kill(&msm_uport->rx.tlet);
 	dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
-				sizeof(u32 *), DMA_TO_DEVICE);
+			sizeof(u32 *), DMA_TO_DEVICE);
 	dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
-				sizeof(dmov_box), DMA_TO_DEVICE);
+			sizeof(dmov_box), DMA_TO_DEVICE);
 	kfree(msm_uport->tx.command_ptr_ptr);
-err_tx_command_ptr_ptr:
+
+free_tx_command_ptr:
 	kfree(msm_uport->tx.command_ptr);
 	return ret;
 }
 
-static int __devinit msm_hs_probe(struct platform_device *pdev)
+static int __init msm_hs_probe(struct platform_device *pdev)
 {
 	int ret;
 	struct uart_port *uport;
 	struct msm_hs_port *msm_uport;
 	struct resource *resource;
-	const struct msm_serial_hs_platform_data *pdata =
-						pdev->dev.platform_data;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
 
 	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
 		printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
@@ -1638,40 +1826,37 @@
 	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (unlikely(!resource))
 		return -ENXIO;
+	uport->mapbase = resource->start;  /* virtual address */
 
-	uport->mapbase = resource->start;
+	uport->membase = ioremap(uport->mapbase, PAGE_SIZE);
+	if (unlikely(!uport->membase))
+		return -ENOMEM;
+
 	uport->irq = platform_get_irq(pdev, 0);
 	if (unlikely(uport->irq < 0))
 		return -ENXIO;
 
-	if (unlikely(irq_set_irq_wake(uport->irq, 1)))
-		return -ENXIO;
-
-	if (pdata == NULL || pdata->rx_wakeup_irq < 0)
-		msm_uport->rx_wakeup.irq = -1;
-	else {
-		msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq;
-		msm_uport->rx_wakeup.ignore = 1;
-		msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup;
-		msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject;
-
-		if (unlikely(msm_uport->rx_wakeup.irq < 0))
-			return -ENXIO;
-
-		if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
-			return -ENXIO;
-	}
-
 	if (pdata == NULL)
-		msm_uport->exit_lpm_cb = NULL;
-	else
-		msm_uport->exit_lpm_cb = pdata->exit_lpm_cb;
+		msm_uport->wakeup.irq = -1;
+	else {
+		msm_uport->wakeup.irq = pdata->wakeup_irq;
+		msm_uport->wakeup.ignore = 1;
+		msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+		msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
+
+		if (unlikely(msm_uport->wakeup.irq < 0))
+			return -ENXIO;
+
+		if (pdata->gpio_config)
+			if (unlikely(pdata->gpio_config(1)))
+				dev_err(uport->dev, "Cannot configure"
+					"gpios\n");
+	}
 
 	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
 						"uartdm_channels");
 	if (unlikely(!resource))
 		return -ENXIO;
-
 	msm_uport->dma_tx_channel = resource->start;
 	msm_uport->dma_rx_channel = resource->end;
 
@@ -1679,67 +1864,93 @@
 						"uartdm_crci");
 	if (unlikely(!resource))
 		return -ENXIO;
-
 	msm_uport->dma_tx_crci = resource->start;
 	msm_uport->dma_rx_crci = resource->end;
 
 	uport->iotype = UPIO_MEM;
-	uport->fifosize = UART_FIFOSIZE;
+	uport->fifosize = 64;
 	uport->ops = &msm_hs_ops;
 	uport->flags = UPF_BOOT_AUTOCONF;
-	uport->uartclk = UARTCLK;
+	uport->uartclk = 7372800;
 	msm_uport->imr_reg = 0x0;
+
 	msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk");
 	if (IS_ERR(msm_uport->clk))
 		return PTR_ERR(msm_uport->clk);
 
+	msm_uport->pclk = clk_get(&pdev->dev, "uartdm_pclk");
+	/*
+	 * Some configurations do not require explicit pclk control so
+	 * do not flag error on pclk get failure.
+	 */
+	if (IS_ERR(msm_uport->pclk))
+		msm_uport->pclk = NULL;
+
+	ret = clk_set_rate(msm_uport->clk, uport->uartclk);
+	if (ret) {
+		printk(KERN_WARNING "Error setting clock rate on UART\n");
+		return ret;
+	}
+
 	ret = uartdm_init_port(uport);
 	if (unlikely(ret))
 		return ret;
 
+	/* configure the CR Protection to Enable */
+	msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
+	/*
+	 * Enable Command register protection before going ahead as this hw
+	 * configuration makes sure that issued cmd to CR register gets complete
+	 * before next issued cmd start. Hence mb() requires here.
+	 */
+	mb();
+
 	msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
 	hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
 		     HRTIMER_MODE_REL);
 	msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
 	msm_uport->clk_off_delay = ktime_set(0, 1000000);  /* 1ms */
 
+	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	if (unlikely(ret))
+		return ret;
+
+	msm_serial_debugfs_init(msm_uport, pdev->id);
+
 	uport->line = pdev->id;
 	return uart_add_one_port(&msm_hs_driver, uport);
 }
 
 static int __init msm_serial_hs_init(void)
 {
-	int ret, i;
+	int ret;
+	int i;
 
 	/* Init all UARTS as non-configured */
 	for (i = 0; i < UARTDM_NR; i++)
 		q_uart_port[i].uport.type = PORT_UNKNOWN;
 
-	msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs");
-	if (unlikely(!msm_hs_workqueue))
-		return -ENOMEM;
-
 	ret = uart_register_driver(&msm_hs_driver);
 	if (unlikely(ret)) {
-		printk(KERN_ERR "%s failed to load\n", __func__);
-		goto err_uart_register_driver;
+		printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
+		return ret;
 	}
+	debug_base = debugfs_create_dir("msm_serial_hs", NULL);
+	if (IS_ERR_OR_NULL(debug_base))
+		pr_info("msm_serial_hs: Cannot create debugfs dir\n");
 
-	ret = platform_driver_register(&msm_serial_hs_platform_driver);
+	ret = platform_driver_probe(&msm_serial_hs_platform_driver,
+					msm_hs_probe);
 	if (ret) {
-		printk(KERN_ERR "%s failed to load\n", __func__);
-		goto err_platform_driver_register;
+		printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
+		debugfs_remove_recursive(debug_base);
+		uart_unregister_driver(&msm_hs_driver);
+		return ret;
 	}
 
-	return ret;
-
-err_platform_driver_register:
-	uart_unregister_driver(&msm_hs_driver);
-err_uart_register_driver:
-	destroy_workqueue(msm_hs_workqueue);
+	printk(KERN_INFO "msm_serial_hs module loaded\n");
 	return ret;
 }
-module_init(msm_serial_hs_init);
 
 /*
  *  Called by the upper layer when port is closed.
@@ -1752,31 +1963,37 @@
 	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
 
 	BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
+	tasklet_kill(&msm_uport->tx.tlet);
+	wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
+	tasklet_kill(&msm_uport->rx.tlet);
+	cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
 
 	spin_lock_irqsave(&uport->lock, flags);
 	clk_enable(msm_uport->clk);
 
+	pm_runtime_disable(uport->dev);
+	pm_runtime_set_suspended(uport->dev);
+
 	/* Disable the transmitter */
 	msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
 	/* Disable the receiver */
 	msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
 
-	pm_runtime_disable(uport->dev);
-	pm_runtime_set_suspended(uport->dev);
-
-	/* Free the interrupt */
-	free_irq(uport->irq, msm_uport);
-	if (use_low_power_rx_wakeup(msm_uport))
-		free_irq(msm_uport->rx_wakeup.irq, msm_uport);
-
 	msm_uport->imr_reg = 0;
 	msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
-
-	wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
+	/*
+	 * Complete all device write before actually disabling uartclk.
+	 * Hence mb() requires here.
+	 */
+	mb();
 
 	clk_disable(msm_uport->clk);  /* to balance local clk_enable() */
-	if (msm_uport->clk_state != MSM_HS_CLK_OFF)
+	if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
 		clk_disable(msm_uport->clk);  /* to balance clk_state */
+		if (msm_uport->pclk)
+			clk_disable(msm_uport->pclk);
+		wake_unlock(&msm_uport->dma_wake_lock);
+	}
 	msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
 
 	dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
@@ -1784,20 +2001,22 @@
 
 	spin_unlock_irqrestore(&uport->lock, flags);
 
-	if (cancel_work_sync(&msm_uport->rx.tty_work))
-		msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work);
+	if (use_low_power_wakeup(msm_uport))
+		irq_set_irq_wake(msm_uport->wakeup.irq, 0);
+
+	/* Free the interrupt */
+	free_irq(uport->irq, msm_uport);
+	if (use_low_power_wakeup(msm_uport))
+		free_irq(msm_uport->wakeup.irq, msm_uport);
 }
 
 static void __exit msm_serial_hs_exit(void)
 {
-	flush_workqueue(msm_hs_workqueue);
-	destroy_workqueue(msm_hs_workqueue);
+	printk(KERN_INFO "msm_serial_hs module removed\n");
 	platform_driver_unregister(&msm_serial_hs_platform_driver);
 	uart_unregister_driver(&msm_hs_driver);
 }
-module_exit(msm_serial_hs_exit);
 
-#ifdef CONFIG_PM_RUNTIME
 static int msm_hs_runtime_idle(struct device *dev)
 {
 	/*
@@ -1812,7 +2031,6 @@
 	struct platform_device *pdev = container_of(dev, struct
 						    platform_device, dev);
 	struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
-
 	msm_hs_request_clock_on(&msm_uport->uport);
 	return 0;
 }
@@ -1822,15 +2040,9 @@
 	struct platform_device *pdev = container_of(dev, struct
 						    platform_device, dev);
 	struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
-
 	msm_hs_request_clock_off(&msm_uport->uport);
 	return 0;
 }
-#else
-#define msm_hs_runtime_idle NULL
-#define msm_hs_runtime_resume NULL
-#define msm_hs_runtime_suspend NULL
-#endif
 
 static const struct dev_pm_ops msm_hs_dev_pm_ops = {
 	.runtime_suspend = msm_hs_runtime_suspend,
@@ -1839,11 +2051,9 @@
 };
 
 static struct platform_driver msm_serial_hs_platform_driver = {
-	.probe = msm_hs_probe,
-	.remove = __devexit_p(msm_hs_remove),
+	.remove = msm_hs_remove,
 	.driver = {
 		.name = "msm_serial_hs",
-		.owner = THIS_MODULE,
 		.pm   = &msm_hs_dev_pm_ops,
 	},
 };
@@ -1868,13 +2078,14 @@
 	.startup = msm_hs_startup,
 	.shutdown = msm_hs_shutdown,
 	.set_termios = msm_hs_set_termios,
-	.pm = msm_hs_pm,
 	.type = msm_hs_type,
 	.config_port = msm_hs_config_port,
 	.release_port = msm_hs_release_port,
 	.request_port = msm_hs_request_port,
 };
 
+module_init(msm_serial_hs_init);
+module_exit(msm_serial_hs_exit);
 MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
 MODULE_VERSION("1.2");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
new file mode 100644
index 0000000..001d555
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -0,0 +1,179 @@
+/* drivers/serial/msm_serial_hs_hwreg.h
+ *
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * 
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef MSM_SERIAL_HS_HWREG_H
+#define MSM_SERIAL_HS_HWREG_H
+
+#define GSBI_CONTROL_ADDR              0x0
+#define GSBI_PROTOCOL_CODE_MASK        0x30
+#define GSBI_PROTOCOL_I2C_UART         0x60
+#define GSBI_PROTOCOL_UART             0x40
+#define GSBI_PROTOCOL_IDLE             0x0
+
+#define TCSR_ADM_1_A_CRCI_MUX_SEL      0x78
+#define TCSR_ADM_1_B_CRCI_MUX_SEL      0x7C
+#define ADM1_CRCI_GSBI6_RX_SEL         0x800
+#define ADM1_CRCI_GSBI6_TX_SEL         0x400
+
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* write only register */
+#define UARTDM_CSR_ADDR    0x8
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600  0xEE
+#define UARTDM_CSR_38400  0xDD
+#define UARTDM_CSR_28800  0xCC
+#define UARTDM_CSR_19200  0xBB
+#define UARTDM_CSR_14400  0xAA
+#define UARTDM_CSR_9600   0x99
+#define UARTDM_CSR_7200   0x88
+#define UARTDM_CSR_4800   0x77
+#define UARTDM_CSR_3600   0x66
+#define UARTDM_CSR_2400   0x55
+#define UARTDM_CSR_1200   0x44
+#define UARTDM_CSR_600    0x33
+#define UARTDM_CSR_300    0x22
+#define UARTDM_CSR_150    0x11
+#define UARTDM_CSR_75     0x00
+
+/* write only register */
+#define UARTDM_TF_ADDR 0x70
+#define UARTDM_TF2_ADDR 0x74
+#define UARTDM_TF3_ADDR 0x78
+#define UARTDM_TF4_ADDR 0x7C
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+/* write only register */
+#define UARTDM_IMR_ADDR 0x14
+
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_IRDA_ADDR 0x38
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR  0x70
+#define UARTDM_RF2_ADDR 0x74
+#define UARTDM_RF3_ADDR 0x78
+#define UARTDM_RF4_ADDR 0x7C
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_RX_BREAK_BMSK	        BIT(6)
+#define UARTDM_SR_PAR_FRAME_BMSK	BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK		BIT(4)
+#define UARTDM_SR_TXEMT_BMSK		BIT(3)
+#define UARTDM_SR_TXRDY_BMSK		BIT(2)
+#define UARTDM_SR_RXRDY_BMSK		BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK	BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK	BIT(1)
+#define UARTDM_CR_TX_EN_BMSK		BIT(2)
+#define UARTDM_CR_RX_EN_BMSK		BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX		0x10
+#define RESET_TX		0x20
+#define RESET_ERROR_STATUS	0x30
+#define RESET_BREAK_INT		0x40
+#define START_BREAK		0x50
+#define STOP_BREAK		0x60
+#define RESET_CTS		0x70
+#define RESET_STALE_INT		0x80
+#define RFR_LOW			0xD0
+#define RFR_HIGH		0xE0
+#define CR_PROTECTION_EN	0x100
+#define STALE_EVENT_ENABLE	0x500
+#define STALE_EVENT_DISABLE	0x600
+#define FORCE_STALE_EVENT	0x400
+#define CLEAR_TX_READY		0x300
+#define RESET_TX_ERROR		0x800
+#define RESET_TX_DONE		0x810
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+#define UARTDM_MR2_LOOP_MODE_BMSK        0x80
+#define UARTDM_MR2_ERROR_MODE_BMSK       0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK    0x30
+
+#define UARTDM_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+
+/* bits per character configuration */
+#define FIVE_BPC  (0 << 4)
+#define SIX_BPC   (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x1
+#define ODD_PARITY 0x2
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK	BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK	BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK	BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK		BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK		BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK		BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK		BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK		BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+#endif /* MSM_SERIAL_HS_HWREG_H */
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
new file mode 100644
index 0000000..83b734e
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -0,0 +1,1216 @@
+/*
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Acknowledgements:
+ * This file is based on msm_serial.c, originally
+ * Written by Robert Love <rlove@google.com>  */
+
+#if defined(CONFIG_SERIAL_MSM_HSL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/nmi.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <mach/board.h>
+#include <mach/msm_serial_hs_lite.h>
+#include <asm/mach-types.h>
+#include "msm_serial_hs_hwreg.h"
+
+struct msm_hsl_port {
+	struct uart_port	uart;
+	char			name[16];
+	struct clk		*clk;
+	struct clk		*pclk;
+	unsigned int		imr;
+	unsigned int		*uart_csr_code;
+	unsigned int            *gsbi_mapbase;
+	unsigned int            *mapped_gsbi;
+	int			is_uartdm;
+	unsigned int            old_snap_state;
+};
+
+#define UART_TO_MSM(uart_port)	((struct msm_hsl_port *) uart_port)
+#define is_console(port)	((port)->cons && \
+				(port)->cons->index == (port)->line)
+static inline void wait_for_xmitr(struct uart_port *port, int bits);
+static inline void msm_hsl_write(struct uart_port *port,
+				 unsigned int val, unsigned int off)
+{
+	iowrite32(val, port->membase + off);
+}
+static inline unsigned int msm_hsl_read(struct uart_port *port,
+		     unsigned int off)
+{
+	return ioread32(port->membase + off);
+}
+
+static unsigned int msm_serial_hsl_has_gsbi(struct uart_port *port)
+{
+	return UART_TO_MSM(port)->is_uartdm;
+}
+
+static int clk_en(struct uart_port *port, int enable)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	int ret = 0;
+
+	if (enable) {
+
+		ret = clk_enable(msm_hsl_port->clk);
+		if (ret)
+			goto err;
+		if (msm_hsl_port->pclk) {
+			ret = clk_enable(msm_hsl_port->pclk);
+			if (ret) {
+				clk_disable(msm_hsl_port->clk);
+				goto err;
+			}
+		}
+	} else {
+		clk_disable(msm_hsl_port->clk);
+		if (msm_hsl_port->pclk)
+			clk_disable(msm_hsl_port->pclk);
+	}
+err:
+	return ret;
+}
+
+static void msm_hsl_stop_tx(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	clk_en(port, 1);
+
+	msm_hsl_port->imr &= ~UARTDM_ISR_TXLEV_BMSK;
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+
+	clk_en(port, 0);
+}
+
+static void msm_hsl_start_tx(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	clk_en(port, 1);
+
+	msm_hsl_port->imr |= UARTDM_ISR_TXLEV_BMSK;
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+
+	clk_en(port, 0);
+}
+
+static void msm_hsl_stop_rx(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	clk_en(port, 1);
+
+	msm_hsl_port->imr &= ~(UARTDM_ISR_RXLEV_BMSK |
+			       UARTDM_ISR_RXSTALE_BMSK);
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+
+	clk_en(port, 0);
+}
+
+static void msm_hsl_enable_ms(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	clk_en(port, 1);
+
+	msm_hsl_port->imr |= UARTDM_ISR_DELTA_CTS_BMSK;
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+
+	clk_en(port, 0);
+}
+
+static void handle_rx(struct uart_port *port, unsigned int misr)
+{
+	struct tty_struct *tty = port->state->port.tty;
+	unsigned int sr;
+	int count = 0;
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	/*
+	 * Handle overrun. My understanding of the hardware is that overrun
+	 * is not tied to the RX buffer, so we handle the case out of band.
+	 */
+	if ((msm_hsl_read(port, UARTDM_SR_ADDR) & UARTDM_SR_OVERRUN_BMSK)) {
+		port->icount.overrun++;
+		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		msm_hsl_write(port, RESET_ERROR_STATUS, UARTDM_CR_ADDR);
+	}
+
+	if (misr & UARTDM_ISR_RXSTALE_BMSK) {
+		count = msm_hsl_read(port, UARTDM_RX_TOTAL_SNAP_ADDR) -
+			msm_hsl_port->old_snap_state;
+		msm_hsl_port->old_snap_state = 0;
+	} else {
+		count = 4 * (msm_hsl_read(port, UARTDM_RFWR_ADDR));
+		msm_hsl_port->old_snap_state += count;
+	}
+
+	/* and now the main RX loop */
+	while (count > 0) {
+		unsigned int c;
+		char flag = TTY_NORMAL;
+
+		sr = msm_hsl_read(port, UARTDM_SR_ADDR);
+		if ((sr &
+		     UARTDM_SR_RXRDY_BMSK) == 0) {
+			msm_hsl_port->old_snap_state -= count;
+			break;
+		}
+		c = msm_hsl_read(port, UARTDM_RF_ADDR);
+		if (sr & UARTDM_SR_RX_BREAK_BMSK) {
+			port->icount.brk++;
+			if (uart_handle_break(port))
+				continue;
+		} else if (sr & UARTDM_SR_PAR_FRAME_BMSK) {
+			port->icount.frame++;
+		} else {
+			port->icount.rx++;
+		}
+
+		/* Mask conditions we're ignorning. */
+		sr &= port->read_status_mask;
+		if (sr & UARTDM_SR_RX_BREAK_BMSK)
+			flag = TTY_BREAK;
+		else if (sr & UARTDM_SR_PAR_FRAME_BMSK)
+			flag = TTY_FRAME;
+
+		/* TODO: handle sysrq */
+		/* if (!uart_handle_sysrq_char(port, c)) */
+		tty_insert_flip_string(tty, (char *) &c,
+				       (count > 4) ? 4 : count);
+		count -= 4;
+	}
+
+	tty_flip_buffer_push(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+	struct circ_buf *xmit = &port->state->xmit;
+	int sent_tx;
+	int tx_count;
+	int x;
+	unsigned int tf_pointer = 0;
+
+	tx_count = uart_circ_chars_pending(xmit);
+
+	if (tx_count > (UART_XMIT_SIZE - xmit->tail))
+		tx_count = UART_XMIT_SIZE - xmit->tail;
+	if (tx_count >= port->fifosize)
+		tx_count = port->fifosize;
+
+	/* Handle x_char */
+	if (port->x_char) {
+		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
+		msm_hsl_write(port, tx_count + 1, UARTDM_NCF_TX_ADDR);
+		msm_hsl_write(port, port->x_char, UARTDM_TF_ADDR);
+		port->icount.tx++;
+		port->x_char = 0;
+	} else if (tx_count) {
+		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
+		msm_hsl_write(port, tx_count, UARTDM_NCF_TX_ADDR);
+	}
+	if (!tx_count) {
+		msm_hsl_stop_tx(port);
+		return;
+	}
+
+	while (tf_pointer < tx_count)  {
+		if (unlikely(!(msm_hsl_read(port, UARTDM_SR_ADDR) &
+			       UARTDM_SR_TXRDY_BMSK)))
+			continue;
+		switch (tx_count - tf_pointer) {
+		case 1: {
+			x = xmit->buf[xmit->tail];
+			port->icount.tx++;
+			break;
+		}
+		case 2: {
+			x = xmit->buf[xmit->tail]
+				| xmit->buf[xmit->tail+1] << 8;
+			port->icount.tx += 2;
+			break;
+		}
+		case 3: {
+			x = xmit->buf[xmit->tail]
+				| xmit->buf[xmit->tail+1] << 8
+				| xmit->buf[xmit->tail + 2] << 16;
+			port->icount.tx += 3;
+			break;
+		}
+		default: {
+			x = *((int *)&(xmit->buf[xmit->tail]));
+			port->icount.tx += 4;
+			break;
+		}
+		}
+		msm_hsl_write(port, x, UARTDM_TF_ADDR);
+		xmit->tail = ((tx_count - tf_pointer < 4) ?
+			      (tx_count - tf_pointer + xmit->tail) :
+			      (xmit->tail + 4)) & (UART_XMIT_SIZE - 1);
+		tf_pointer += 4;
+		sent_tx = 1;
+	}
+
+	if (uart_circ_empty(xmit))
+		msm_hsl_stop_tx(port);
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+	msm_hsl_write(port, RESET_CTS, UARTDM_CR_ADDR);
+	port->icount.cts++;
+	wake_up_interruptible(&port->state->port.delta_msr_wait);
+}
+
+static irqreturn_t msm_hsl_irq(int irq, void *dev_id)
+{
+	struct uart_port *port = dev_id;
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	unsigned int misr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	clk_en(port, 1);
+	misr = msm_hsl_read(port, UARTDM_MISR_ADDR);
+	msm_hsl_write(port, 0, UARTDM_IMR_ADDR); /* disable interrupt */
+
+	if (misr & (UARTDM_ISR_RXSTALE_BMSK | UARTDM_ISR_RXLEV_BMSK)) {
+		handle_rx(port, misr);
+		if (misr & (UARTDM_ISR_RXSTALE_BMSK))
+			msm_hsl_write(port, RESET_STALE_INT, UARTDM_CR_ADDR);
+		msm_hsl_write(port, 6500, UARTDM_DMRX_ADDR);
+		msm_hsl_write(port, STALE_EVENT_ENABLE, UARTDM_CR_ADDR);
+	}
+	if (misr & UARTDM_ISR_TXLEV_BMSK)
+		handle_tx(port);
+
+	if (misr & UARTDM_ISR_DELTA_CTS_BMSK)
+		handle_delta_cts(port);
+
+	/* restore interrupt */
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+	clk_en(port, 0);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int msm_hsl_tx_empty(struct uart_port *port)
+{
+	unsigned int ret;
+
+	clk_en(port, 1);
+	ret = (msm_hsl_read(port, UARTDM_SR_ADDR) &
+	       UARTDM_SR_TXEMT_BMSK) ? TIOCSER_TEMT : 0;
+	clk_en(port, 0);
+
+	return ret;
+}
+
+static void msm_hsl_reset(struct uart_port *port)
+{
+	/* reset everything */
+	msm_hsl_write(port, RESET_RX, UARTDM_CR_ADDR);
+	msm_hsl_write(port, RESET_TX, UARTDM_CR_ADDR);
+	msm_hsl_write(port, RESET_ERROR_STATUS, UARTDM_CR_ADDR);
+	msm_hsl_write(port, RESET_BREAK_INT, UARTDM_CR_ADDR);
+	msm_hsl_write(port, RESET_CTS, UARTDM_CR_ADDR);
+	msm_hsl_write(port, RFR_LOW, UARTDM_CR_ADDR);
+}
+
+static unsigned int msm_hsl_get_mctrl(struct uart_port *port)
+{
+	return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+static void msm_hsl_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	unsigned int mr;
+	unsigned int loop_mode;
+
+	clk_en(port, 1);
+
+	mr = msm_hsl_read(port, UARTDM_MR1_ADDR);
+
+	if (!(mctrl & TIOCM_RTS)) {
+		mr &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hsl_write(port, mr, UARTDM_MR1_ADDR);
+		msm_hsl_write(port, RFR_HIGH, UARTDM_CR_ADDR);
+	} else {
+		mr |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hsl_write(port, mr, UARTDM_MR1_ADDR);
+	}
+
+	loop_mode = TIOCM_LOOP & mctrl;
+	if (loop_mode) {
+		mr = msm_hsl_read(port, UARTDM_MR2_ADDR);
+		mr |= UARTDM_MR2_LOOP_MODE_BMSK;
+		msm_hsl_write(port, mr, UARTDM_MR2_ADDR);
+
+		/* Reset TX */
+		msm_hsl_reset(port);
+
+		/* Turn on Uart Receiver & Transmitter*/
+		msm_hsl_write(port, UARTDM_CR_RX_EN_BMSK
+			      | UARTDM_CR_TX_EN_BMSK, UARTDM_CR_ADDR);
+	}
+
+	clk_en(port, 0);
+}
+
+static void msm_hsl_break_ctl(struct uart_port *port, int break_ctl)
+{
+	clk_en(port, 1);
+
+	if (break_ctl)
+		msm_hsl_write(port, START_BREAK, UARTDM_CR_ADDR);
+	else
+		msm_hsl_write(port, STOP_BREAK, UARTDM_CR_ADDR);
+
+	clk_en(port, 0);
+}
+
+static void msm_hsl_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+	unsigned int baud_code, rxstale, watermark;
+
+	switch (baud) {
+	case 300:
+		baud_code = UARTDM_CSR_300;
+		rxstale = 1;
+		break;
+	case 600:
+		baud_code = UARTDM_CSR_600;
+		rxstale = 1;
+		break;
+	case 1200:
+		baud_code = UARTDM_CSR_1200;
+		rxstale = 1;
+		break;
+	case 2400:
+		baud_code = UARTDM_CSR_2400;
+		rxstale = 1;
+		break;
+	case 4800:
+		baud_code = UARTDM_CSR_4800;
+		rxstale = 1;
+		break;
+	case 9600:
+		baud_code = UARTDM_CSR_9600;
+		rxstale = 2;
+		break;
+	case 14400:
+		baud_code = UARTDM_CSR_14400;
+		rxstale = 3;
+		break;
+	case 19200:
+		baud_code = UARTDM_CSR_19200;
+		rxstale = 4;
+		break;
+	case 28800:
+		baud_code = UARTDM_CSR_28800;
+		rxstale = 6;
+		break;
+	case 38400:
+		baud_code = UARTDM_CSR_38400;
+		rxstale = 8;
+		break;
+	case 57600:
+		baud_code = UARTDM_CSR_57600;
+		rxstale = 16;
+		break;
+	case 115200:
+	default:
+		baud_code = UARTDM_CSR_115200;
+		rxstale = 31;
+		break;
+	}
+
+	msm_hsl_write(port, RESET_RX, UARTDM_CR_ADDR);
+	msm_hsl_write(port, baud_code, UARTDM_CSR_ADDR);
+
+	/* RX stale watermark */
+	watermark = UARTDM_IPR_STALE_LSB_BMSK & rxstale;
+	watermark |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+	msm_hsl_write(port, watermark, UARTDM_IPR_ADDR);
+
+	/* set RX watermark */
+	watermark = (port->fifosize * 3) / 4;
+	msm_hsl_write(port, watermark, UARTDM_RFWR_ADDR);
+
+	/* set TX watermark */
+	msm_hsl_write(port, 0, UARTDM_TFWR_ADDR);
+
+	msm_hsl_write(port, RESET_STALE_INT, UARTDM_CR_ADDR);
+	msm_hsl_write(port, 6500, UARTDM_DMRX_ADDR);
+	msm_hsl_write(port, STALE_EVENT_ENABLE, UARTDM_CR_ADDR);
+}
+
+static void msm_hsl_init_clock(struct uart_port *port)
+{
+	clk_en(port, 1);
+}
+
+static void msm_hsl_deinit_clock(struct uart_port *port)
+{
+	clk_en(port, 0);
+}
+
+static int msm_hsl_startup(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	const struct msm_serial_hslite_platform_data *pdata =
+					pdev->dev.platform_data;
+	unsigned int data, rfr_level;
+	int ret;
+	unsigned long flags;
+
+	snprintf(msm_hsl_port->name, sizeof(msm_hsl_port->name),
+		 "msm_serial_hsl%d", port->line);
+
+	if (!(is_console(port)) || (!port->cons) ||
+		(port->cons && (!(port->cons->flags & CON_ENABLED)))) {
+
+		if (msm_serial_hsl_has_gsbi(port))
+			if ((ioread32(msm_hsl_port->mapped_gsbi +
+				GSBI_CONTROL_ADDR) & GSBI_PROTOCOL_I2C_UART)
+					!= GSBI_PROTOCOL_I2C_UART)
+				iowrite32(GSBI_PROTOCOL_I2C_UART,
+					msm_hsl_port->mapped_gsbi +
+						GSBI_CONTROL_ADDR);
+
+		if (pdata && pdata->config_gpio) {
+			ret = gpio_request(pdata->uart_tx_gpio,
+						"UART_TX_GPIO");
+			if (unlikely(ret)) {
+				pr_err("%s: gpio request failed for:%d\n",
+						__func__, pdata->uart_tx_gpio);
+				return ret;
+			}
+
+			ret = gpio_request(pdata->uart_rx_gpio, "UART_RX_GPIO");
+			if (unlikely(ret)) {
+				pr_err("%s: gpio request failed for:%d\n",
+						__func__, pdata->uart_rx_gpio);
+				gpio_free(pdata->uart_tx_gpio);
+				return ret;
+			}
+		}
+	}
+#ifndef CONFIG_PM_RUNTIME
+	msm_hsl_init_clock(port);
+#endif
+	pm_runtime_get_sync(port->dev);
+
+	if (likely(port->fifosize > 12))
+		rfr_level = port->fifosize - 12;
+	else
+		rfr_level = port->fifosize;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* set automatic RFR level */
+	data = msm_hsl_read(port, UARTDM_MR1_ADDR);
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+	data |= UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2);
+	data |= UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level;
+	msm_hsl_write(port, data, UARTDM_MR1_ADDR);
+
+
+	/* Make sure IPR is not 0 to start with*/
+	msm_hsl_write(port, UARTDM_IPR_STALE_LSB_BMSK, UARTDM_IPR_ADDR);
+	data = 0;
+
+	if (!(is_console(port)) || (!port->cons) ||
+		(port->cons && (!(port->cons->flags & CON_ENABLED)))) {
+		msm_hsl_write(port, CR_PROTECTION_EN, UARTDM_CR_ADDR);
+		msm_hsl_write(port, UARTDM_MR2_BITS_PER_CHAR_8 | STOP_BIT_ONE,
+			      UARTDM_MR2_ADDR);	/* 8N1 */
+		msm_hsl_reset(port);
+		data = UARTDM_CR_TX_EN_BMSK;
+	}
+
+	data |= UARTDM_CR_RX_EN_BMSK;
+
+	msm_hsl_write(port, data, UARTDM_CR_ADDR);	/* enable TX & RX */
+
+	/* turn on RX and CTS interrupts */
+	msm_hsl_port->imr = UARTDM_ISR_RXSTALE_BMSK
+		| UARTDM_ISR_DELTA_CTS_BMSK | UARTDM_ISR_RXLEV_BMSK;
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	ret = request_irq(port->irq, msm_hsl_irq, IRQF_TRIGGER_HIGH,
+			  msm_hsl_port->name, port);
+	if (unlikely(ret)) {
+		printk(KERN_ERR "%s: failed to request_irq\n", __func__);
+		return ret;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+	msm_hsl_write(port, RESET_STALE_INT, UARTDM_CR_ADDR);
+	msm_hsl_write(port, 6500, UARTDM_DMRX_ADDR);
+	msm_hsl_write(port, STALE_EVENT_ENABLE, UARTDM_CR_ADDR);
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return 0;
+}
+
+static void msm_hsl_shutdown(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	const struct msm_serial_hslite_platform_data *pdata =
+					pdev->dev.platform_data;
+
+	clk_en(port, 1);
+
+	msm_hsl_port->imr = 0;
+	msm_hsl_write(port, 0, UARTDM_IMR_ADDR); /* disable interrupts */
+
+	clk_en(port, 0);
+
+	free_irq(port->irq, port);
+
+#ifndef CONFIG_PM_RUNTIME
+	msm_hsl_deinit_clock(port);
+#endif
+	pm_runtime_put_sync(port->dev);
+	if (!(is_console(port)) || (!port->cons) ||
+		(port->cons && (!(port->cons->flags & CON_ENABLED)))) {
+		if (pdata && pdata->config_gpio) {
+			gpio_free(pdata->uart_tx_gpio);
+			gpio_free(pdata->uart_rx_gpio);
+		}
+	}
+}
+
+static void msm_hsl_set_termios(struct uart_port *port,
+				struct ktermios *termios,
+				struct ktermios *old)
+{
+	unsigned long flags;
+	unsigned int baud, mr;
+
+	spin_lock_irqsave(&port->lock, flags);
+	clk_en(port, 1);
+
+	/* calculate and set baud rate */
+	baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+
+	msm_hsl_set_baud_rate(port, baud);
+
+	/* calculate parity */
+	mr = msm_hsl_read(port, UARTDM_MR2_ADDR);
+	mr &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+	if (termios->c_cflag & PARENB) {
+		if (termios->c_cflag & PARODD)
+			mr |= ODD_PARITY;
+		else if (termios->c_cflag & CMSPAR)
+			mr |= SPACE_PARITY;
+		else
+			mr |= EVEN_PARITY;
+	}
+
+	/* calculate bits per char */
+	mr &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		mr |= FIVE_BPC;
+		break;
+	case CS6:
+		mr |= SIX_BPC;
+		break;
+	case CS7:
+		mr |= SEVEN_BPC;
+		break;
+	case CS8:
+	default:
+		mr |= EIGHT_BPC;
+		break;
+	}
+
+	/* calculate stop bits */
+	mr &= ~(STOP_BIT_ONE | STOP_BIT_TWO);
+	if (termios->c_cflag & CSTOPB)
+		mr |= STOP_BIT_TWO;
+	else
+		mr |= STOP_BIT_ONE;
+
+	/* set parity, bits per char, and stop bit */
+	msm_hsl_write(port, mr, UARTDM_MR2_ADDR);
+
+	/* calculate and set hardware flow control */
+	mr = msm_hsl_read(port, UARTDM_MR1_ADDR);
+	mr &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+	if (termios->c_cflag & CRTSCTS) {
+		mr |= UARTDM_MR1_CTS_CTL_BMSK;
+		mr |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+	}
+	msm_hsl_write(port, mr, UARTDM_MR1_ADDR);
+
+	/* Configure status bits to ignore based on termio flags. */
+	port->read_status_mask = 0;
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |= UARTDM_SR_PAR_FRAME_BMSK;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		port->read_status_mask |= UARTDM_SR_RX_BREAK_BMSK;
+
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	clk_en(port, 0);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_hsl_type(struct uart_port *port)
+{
+	return "MSM";
+}
+
+static void msm_hsl_release_port(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *uart_resource;
+	struct resource *gsbi_resource;
+	resource_size_t size;
+
+	uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						     "uartdm_resource");
+	if (unlikely(!uart_resource))
+		return;
+	size = uart_resource->end - uart_resource->start + 1;
+
+	release_mem_region(port->mapbase, size);
+	iounmap(port->membase);
+	port->membase = NULL;
+
+	if (msm_serial_hsl_has_gsbi(port)) {
+		iowrite32(GSBI_PROTOCOL_IDLE, msm_hsl_port->mapped_gsbi +
+			  GSBI_CONTROL_ADDR);
+		gsbi_resource = platform_get_resource_byname(pdev,
+							     IORESOURCE_MEM,
+							     "gsbi_resource");
+
+		size = gsbi_resource->end - gsbi_resource->start + 1;
+		iounmap(msm_hsl_port->mapped_gsbi);
+		msm_hsl_port->mapped_gsbi = NULL;
+	}
+}
+
+static int msm_hsl_request_port(struct uart_port *port)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *uart_resource;
+	struct resource *gsbi_resource;
+	resource_size_t size;
+
+	uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						     "uartdm_resource");
+	if (unlikely(!uart_resource)) {
+		pr_err("%s: can't get uartdm resource\n", __func__);
+		return -ENXIO;
+	}
+	size = uart_resource->end - uart_resource->start + 1;
+
+	if (unlikely(!request_mem_region(port->mapbase, size,
+					 "msm_serial_hsl"))) {
+		pr_err("%s: can't get mem region for uartdm\n", __func__);
+		return -EBUSY;
+	}
+
+	port->membase = ioremap(port->mapbase, size);
+	if (!port->membase) {
+		release_mem_region(port->mapbase, size);
+		return -EBUSY;
+	}
+
+	if (msm_serial_hsl_has_gsbi(port)) {
+		gsbi_resource = platform_get_resource_byname(pdev,
+							     IORESOURCE_MEM,
+							     "gsbi_resource");
+		if (unlikely(!gsbi_resource)) {
+			pr_err("%s: can't get gsbi resource\n", __func__);
+			return -ENXIO;
+		}
+
+		size = gsbi_resource->end - gsbi_resource->start + 1;
+		msm_hsl_port->mapped_gsbi = ioremap(gsbi_resource->start,
+						    size);
+		if (!msm_hsl_port->mapped_gsbi) {
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+static void msm_hsl_config_port(struct uart_port *port, int flags)
+{
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	if (flags & UART_CONFIG_TYPE) {
+		port->type = PORT_MSM;
+		if (msm_hsl_request_port(port))
+			return;
+	}
+	if (msm_serial_hsl_has_gsbi(port))
+		if ((ioread32(msm_hsl_port->mapped_gsbi + GSBI_CONTROL_ADDR) &
+			GSBI_PROTOCOL_I2C_UART) != GSBI_PROTOCOL_I2C_UART)
+			iowrite32(GSBI_PROTOCOL_I2C_UART,
+				msm_hsl_port->mapped_gsbi + GSBI_CONTROL_ADDR);
+}
+
+static int msm_hsl_verify_port(struct uart_port *port,
+			       struct serial_struct *ser)
+{
+	if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+		return -EINVAL;
+	if (unlikely(port->irq != ser->irq))
+		return -EINVAL;
+	return 0;
+}
+
+static void msm_hsl_power(struct uart_port *port, unsigned int state,
+			  unsigned int oldstate)
+{
+	int ret;
+	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+	switch (state) {
+	case 0:
+		ret = clk_set_rate(msm_hsl_port->clk, 1843200);
+		if (ret)
+			pr_err("%s(): Error setting UART clock rate\n",
+								__func__);
+		clk_en(port, 1);
+		break;
+	case 3:
+		clk_en(port, 0);
+		break;
+	default:
+		pr_err("%s(): msm_serial_hsl: Unknown PM state %d\n",
+							__func__, state);
+	}
+}
+
+static struct uart_ops msm_hsl_uart_pops = {
+	.tx_empty = msm_hsl_tx_empty,
+	.set_mctrl = msm_hsl_set_mctrl,
+	.get_mctrl = msm_hsl_get_mctrl,
+	.stop_tx = msm_hsl_stop_tx,
+	.start_tx = msm_hsl_start_tx,
+	.stop_rx = msm_hsl_stop_rx,
+	.enable_ms = msm_hsl_enable_ms,
+	.break_ctl = msm_hsl_break_ctl,
+	.startup = msm_hsl_startup,
+	.shutdown = msm_hsl_shutdown,
+	.set_termios = msm_hsl_set_termios,
+	.type = msm_hsl_type,
+	.release_port = msm_hsl_release_port,
+	.request_port = msm_hsl_request_port,
+	.config_port = msm_hsl_config_port,
+	.verify_port = msm_hsl_verify_port,
+	.pm = msm_hsl_power,
+};
+
+static struct msm_hsl_port msm_hsl_uart_ports[] = {
+	{
+		.uart = {
+			.iotype = UPIO_MEM,
+			.ops = &msm_hsl_uart_pops,
+			.flags = UPF_BOOT_AUTOCONF,
+			.fifosize = 64,
+			.line = 0,
+		},
+	},
+	{
+		.uart = {
+			.iotype = UPIO_MEM,
+			.ops = &msm_hsl_uart_pops,
+			.flags = UPF_BOOT_AUTOCONF,
+			.fifosize = 64,
+			.line = 1,
+		},
+	},
+	{
+		.uart = {
+			.iotype = UPIO_MEM,
+			.ops = &msm_hsl_uart_pops,
+			.flags = UPF_BOOT_AUTOCONF,
+			.fifosize = 64,
+			.line = 2,
+		},
+	},
+};
+
+#define UART_NR	ARRAY_SIZE(msm_hsl_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+	return &msm_hsl_uart_ports[line].uart;
+}
+
+/*
+ *  Wait for transmitter & holding register to empty
+ *  Derived from wait_for_xmitr in 8250 serial driver by Russell King  */
+void wait_for_xmitr(struct uart_port *port, int bits)
+{
+	if (!(msm_hsl_read(port, UARTDM_SR_ADDR) & UARTDM_SR_TXEMT_BMSK)) {
+		while ((msm_hsl_read(port, UARTDM_ISR_ADDR) & bits) != bits) {
+			udelay(1);
+			touch_nmi_watchdog();
+			cpu_relax();
+		}
+		msm_hsl_write(port, CLEAR_TX_READY, UARTDM_CR_ADDR);
+	}
+}
+
+#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
+static void msm_hsl_console_putchar(struct uart_port *port, int ch)
+{
+	wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
+	msm_hsl_write(port, 1, UARTDM_NCF_TX_ADDR);
+
+	while (!(msm_hsl_read(port, UARTDM_SR_ADDR) & UARTDM_SR_TXRDY_BMSK)) {
+		udelay(1);
+		touch_nmi_watchdog();
+	}
+
+	msm_hsl_write(port, ch, UARTDM_TF_ADDR);
+}
+
+static void msm_hsl_console_write(struct console *co, const char *s,
+				  unsigned int count)
+{
+	struct uart_port *port;
+	struct msm_hsl_port *msm_hsl_port;
+	int locked;
+
+	BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+	port = get_port_from_line(co->index);
+	msm_hsl_port = UART_TO_MSM(port);
+
+	/* not pretty, but we can end up here via various convoluted paths */
+	if (port->sysrq || oops_in_progress)
+		locked = spin_trylock(&port->lock);
+	else {
+		locked = 1;
+		spin_lock(&port->lock);
+	}
+	msm_hsl_write(port, 0, UARTDM_IMR_ADDR);
+	uart_console_write(port, s, count, msm_hsl_console_putchar);
+	msm_hsl_write(port, msm_hsl_port->imr, UARTDM_IMR_ADDR);
+	if (locked == 1)
+		spin_unlock(&port->lock);
+}
+
+static int __init msm_hsl_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port;
+	int baud, flow, bits, parity;
+	int ret;
+
+	if (unlikely(co->index >= UART_NR || co->index < 0))
+		return -ENXIO;
+
+	port = get_port_from_line(co->index);
+
+	if (unlikely(!port->membase))
+		return -ENXIO;
+
+	port->cons = co;
+
+	pm_runtime_get_noresume(port->dev);
+
+#ifndef CONFIG_PM_RUNTIME
+	msm_hsl_init_clock(port);
+#endif
+	pm_runtime_resume(port->dev);
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	bits = 8;
+	parity = 'n';
+	flow = 'n';
+	msm_hsl_write(port, UARTDM_MR2_BITS_PER_CHAR_8 | STOP_BIT_ONE,
+		      UARTDM_MR2_ADDR);	/* 8N1 */
+
+	if (baud < 300 || baud > 115200)
+		baud = 115200;
+	msm_hsl_set_baud_rate(port, baud);
+
+	ret = uart_set_options(port, co, baud, parity, bits, flow);
+	msm_hsl_reset(port);
+	/* Enable transmitter */
+	msm_hsl_write(port, CR_PROTECTION_EN, UARTDM_CR_ADDR);
+	msm_hsl_write(port, UARTDM_CR_TX_EN_BMSK, UARTDM_CR_ADDR);
+
+	printk(KERN_INFO "msm_serial_hsl: console setup on port #%d\n",
+	       port->line);
+
+	return ret;
+}
+
+static struct uart_driver msm_hsl_uart_driver;
+
+static struct console msm_hsl_console = {
+	.name = "ttyHSL",
+	.write = msm_hsl_console_write,
+	.device = uart_console_device,
+	.setup = msm_hsl_console_setup,
+	.flags = CON_PRINTBUFFER,
+	.index = -1,
+	.data = &msm_hsl_uart_driver,
+};
+
+#define MSM_HSL_CONSOLE	(&msm_hsl_console)
+
+#else
+#define MSM_HSL_CONSOLE	NULL
+#endif
+
+static struct uart_driver msm_hsl_uart_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_serial_hsl",
+	.dev_name = "ttyHSL",
+	.nr = UART_NR,
+	.cons = MSM_HSL_CONSOLE,
+};
+
+static int __devinit msm_serial_hsl_probe(struct platform_device *pdev)
+{
+	struct msm_hsl_port *msm_hsl_port;
+	struct resource *uart_resource;
+	struct resource *gsbi_resource;
+	struct uart_port *port;
+	int ret;
+
+	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+		return -ENXIO;
+
+	printk(KERN_INFO "msm_serial_hsl: detected port #%d\n", pdev->id);
+
+	port = get_port_from_line(pdev->id);
+	port->dev = &pdev->dev;
+	msm_hsl_port = UART_TO_MSM(port);
+
+	gsbi_resource =	platform_get_resource_byname(pdev,
+						     IORESOURCE_MEM,
+						     "gsbi_resource");
+	if (gsbi_resource) {
+		msm_hsl_port->is_uartdm = 1;
+		msm_hsl_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
+		msm_hsl_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
+	} else {
+		msm_hsl_port->is_uartdm = 0;
+		msm_hsl_port->clk = clk_get(&pdev->dev, "uartdm_clk");
+		msm_hsl_port->pclk = NULL;
+	}
+
+	if (unlikely(IS_ERR(msm_hsl_port->clk))) {
+		printk(KERN_ERR "%s: Error getting clk\n", __func__);
+		return PTR_ERR(msm_hsl_port->clk);
+	}
+	if (unlikely(IS_ERR(msm_hsl_port->pclk))) {
+		printk(KERN_ERR "%s: Error getting pclk\n", __func__);
+		return PTR_ERR(msm_hsl_port->pclk);
+	}
+
+
+	uart_resource = platform_get_resource_byname(pdev,
+						     IORESOURCE_MEM,
+						     "uartdm_resource");
+	if (unlikely(!uart_resource)) {
+		printk(KERN_ERR "getting uartdm_resource failed\n");
+		return -ENXIO;
+	}
+	port->mapbase = uart_resource->start;
+
+	port->irq = platform_get_irq(pdev, 0);
+	if (unlikely(port->irq < 0)) {
+		printk(KERN_ERR "%s: getting irq failed\n", __func__);
+		return -ENXIO;
+	}
+
+	device_set_wakeup_capable(&pdev->dev, 1);
+	platform_set_drvdata(pdev, port);
+	pm_runtime_enable(port->dev);
+	ret = uart_add_one_port(&msm_hsl_uart_driver, port);
+
+	return ret;
+}
+
+static int __devexit msm_serial_hsl_remove(struct platform_device *pdev)
+{
+	struct msm_hsl_port *msm_hsl_port = platform_get_drvdata(pdev);
+	struct uart_port *port;
+
+	port = get_port_from_line(pdev->id);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	device_set_wakeup_capable(&pdev->dev, 0);
+	platform_set_drvdata(pdev, NULL);
+	uart_remove_one_port(&msm_hsl_uart_driver, port);
+
+	clk_put(msm_hsl_port->pclk);
+	clk_put(msm_hsl_port->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_serial_hsl_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	if (port) {
+		uart_suspend_port(&msm_hsl_uart_driver, port);
+		if (device_may_wakeup(dev))
+			enable_irq_wake(port->irq);
+
+		if (is_console(port))
+			msm_hsl_deinit_clock(port);
+	}
+
+	return 0;
+}
+
+static int msm_serial_hsl_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	if (port) {
+		if (is_console(port))
+			msm_hsl_init_clock(port);
+		uart_resume_port(&msm_hsl_uart_driver, port);
+
+		if (device_may_wakeup(dev))
+			disable_irq_wake(port->irq);
+	}
+
+	return 0;
+}
+#else
+#define msm_serial_hsl_suspend NULL
+#define msm_serial_hsl_resume NULL
+#endif
+
+static int msm_hsl_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	dev_dbg(dev, "pm_runtime: suspending\n");
+	msm_hsl_deinit_clock(port);
+	return 0;
+}
+
+static int msm_hsl_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct uart_port *port;
+	port = get_port_from_line(pdev->id);
+
+	dev_dbg(dev, "pm_runtime: resuming\n");
+	msm_hsl_init_clock(port);
+	return 0;
+}
+
+static struct dev_pm_ops msm_hsl_dev_pm_ops = {
+	.suspend = msm_serial_hsl_suspend,
+	.resume = msm_serial_hsl_resume,
+	.runtime_suspend = msm_hsl_runtime_suspend,
+	.runtime_resume = msm_hsl_runtime_resume,
+};
+
+static struct platform_driver msm_hsl_platform_driver = {
+	.probe = msm_serial_hsl_probe,
+	.remove = __devexit_p(msm_serial_hsl_remove),
+	.driver = {
+		.name = "msm_serial_hsl",
+		.owner = THIS_MODULE,
+		.pm = &msm_hsl_dev_pm_ops,
+	},
+};
+
+static int __init msm_serial_hsl_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&msm_hsl_uart_driver);
+	if (unlikely(ret))
+		return ret;
+
+	ret = platform_driver_register(&msm_hsl_platform_driver);
+	if (unlikely(ret))
+		uart_unregister_driver(&msm_hsl_uart_driver);
+
+	printk(KERN_INFO "msm_serial_hsl: driver initialized\n");
+
+	return ret;
+}
+
+static void __exit msm_serial_hsl_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
+	unregister_console(&msm_hsl_console);
+#endif
+	platform_driver_unregister(&msm_hsl_platform_driver);
+	uart_unregister_driver(&msm_hsl_uart_driver);
+}
+
+module_init(msm_serial_hsl_init);
+module_exit(msm_serial_hsl_exit);
+
+MODULE_DESCRIPTION("Driver for msm HSUART serial device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 48f1781..afe0033 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -168,4 +168,6 @@
 
 source "drivers/usb/otg/Kconfig"
 
+source "drivers/usb/function/Kconfig"
+
 endif # USB_SUPPORT
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 30ddf8d..ef69048 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -8,6 +8,7 @@
 
 obj-$(CONFIG_USB_MON)		+= mon/
 
+obj-$(CONFIG_USB_OTG_UTILS)	+= otg/
 obj-$(CONFIG_PCI)		+= host/
 obj-$(CONFIG_USB_EHCI_HCD)	+= host/
 obj-$(CONFIG_USB_ISP116X_HCD)	+= host/
@@ -23,6 +24,7 @@
 obj-$(CONFIG_USB_ISP1760_HCD)	+= host/
 obj-$(CONFIG_USB_IMX21_HCD)	+= host/
 obj-$(CONFIG_USB_FSL_MPH_DR_OF)	+= host/
+obj-$(CONFIG_USB_PEHCI_HCD)	+= host/
 
 obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
 
@@ -49,5 +51,4 @@
 
 obj-$(CONFIG_USB_MUSB_HDRC)	+= musb/
 obj-$(CONFIG_USB_RENESAS_USBHS)	+= renesas_usbhs/
-obj-$(CONFIG_USB_OTG_UTILS)	+= otg/
 obj-$(CONFIG_USB_GADGET)	+= gadget/
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 34e3da5..97c5690 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1278,6 +1278,44 @@
 	return status;
 }
 
+#ifdef CONFIG_USB_OTG
+void usb_hnp_polling_work(struct work_struct *work)
+{
+	int ret;
+	struct usb_bus *bus =
+		container_of(work, struct usb_bus, hnp_polling.work);
+	struct usb_device *udev = bus->root_hub->children[bus->otg_port - 1];
+	u8 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+
+	if (!status)
+		return;
+
+	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+		USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+		0, OTG_STATUS_SELECTOR, status, sizeof(*status),
+		USB_CTRL_GET_TIMEOUT);
+	if (ret < 0) {
+		/* Peripheral may not be supporting HNP polling */
+		dev_info(&udev->dev, "HNP polling failed. status %d\n", ret);
+		goto out;
+	}
+
+	/* Spec says host must suspend the bus with in 2 sec. */
+	if (*status & (1 << HOST_REQUEST_FLAG)) {
+		do_unbind_rebind(udev, DO_UNBIND);
+		udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+		ret = usb_suspend_both(udev, PMSG_USER_SUSPEND);
+		if (ret)
+			dev_info(&udev->dev, "suspend failed\n");
+	} else {
+		schedule_delayed_work(&bus->hnp_polling,
+			msecs_to_jiffies(THOST_REQ_POLL));
+	}
+out:
+	kfree(status);
+}
+#endif
+
 static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
 {
 	int	w;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ace9f84..54338fc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -872,6 +872,9 @@
 	bus->bandwidth_isoc_reqs = 0;
 
 	INIT_LIST_HEAD (&bus->bus_list);
+#ifdef CONFIG_USB_OTG
+	INIT_DELAYED_WORK(&bus->hnp_polling, usb_hnp_polling_work);
+#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -901,6 +904,11 @@
 	/* Add it to the local list of buses */
 	list_add (&bus->bus_list, &usb_bus_list);
 	mutex_unlock(&usb_bus_list_lock);
+#ifdef CONFIG_USB_OTG
+	/* Obvioulsy HNP is supported on B-host */
+	if (bus->is_b_host)
+		bus->hnp_support = 1;
+#endif
 
 	usb_notify_add_bus(bus);
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a428aa0..5442297 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,12 +24,38 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/freezer.h>
+#include <linux/usb/otg.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
 #include "usb.h"
 
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+int portno;
+int No_Data_Phase;
+EXPORT_SYMBOL(No_Data_Phase);
+int No_Status_Phase;
+EXPORT_SYMBOL(No_Status_Phase);
+unsigned char hub_tier;
+
+#define PDC_HOST_NOTIFY		0x8001	/*completion from core */
+#define UNSUPPORTED_DEVICE	0x8099
+#define UNWANTED_SUSPEND	0x8098
+#define PDC_POWERMANAGEMENT	0x8097
+
+int Unwanted_SecondReset;
+EXPORT_SYMBOL(Unwanted_SecondReset);
+int HostComplianceTest;
+EXPORT_SYMBOL(HostComplianceTest);
+int HostTest;
+EXPORT_SYMBOL(HostTest);
+#endif
+
+
 /* if we are in debug mode, always announce new devices */
 #ifdef DEBUG
 #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -356,8 +382,11 @@
 {
 	int i, status = -ETIMEDOUT;
 
+	/* ISP1763A HUB sometimes returns 2 bytes instead of 4 bytes, retry
+	 * if this happens
+	 */
 	for (i = 0; i < USB_STS_RETRIES &&
-			(status == -ETIMEDOUT || status == -EPIPE); i++) {
+			(status == -ETIMEDOUT || status == -EPIPE || status == 2); i++) {
 		status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
 			USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
 			data, sizeof(*data), USB_STS_TIMEOUT);
@@ -724,6 +753,10 @@
 		 */
 		if (type == HUB_INIT) {
 			delay = hub_power_on(hub, false);
+#ifdef CONFIG_USB_OTG
+			if (hdev->bus->is_b_host)
+				goto init2;
+#endif
 			PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
 			schedule_delayed_work(&hub->init_work,
 					msecs_to_jiffies(delay));
@@ -858,6 +891,11 @@
 	 * will see them later and handle them normally.
 	 */
 	if (need_debounce_delay) {
+#ifdef CONFIG_USB_OTG
+		if (hdev->bus->is_b_host && type == HUB_INIT)
+			goto init3;
+#endif
+
 		delay = HUB_DEBOUNCE_STABLE;
 
 		/* Don't do a long sleep inside a workqueue routine */
@@ -1301,6 +1339,7 @@
 #ifdef	CONFIG_USB_OTG_BLACKLIST_HUB
 	if (hdev->parent) {
 		dev_warn(&intf->dev, "ignoring external hub\n");
+		otg_send_event(OTG_EVENT_HUB_NOT_SUPPORTED);
 		return -ENODEV;
 	}
 #endif
@@ -1649,6 +1688,13 @@
 	dev_info(&udev->dev, "USB disconnect, device number %d\n",
 			udev->devnum);
 
+#ifdef CONFIG_USB_OTG
+	if (udev->bus->hnp_support && udev->portnum == udev->bus->otg_port) {
+		cancel_delayed_work(&udev->bus->hnp_polling);
+		udev->bus->hnp_support = 0;
+	}
+#endif
+
 	usb_lock_device(udev);
 
 	/* Free up all the children before we remove this device */
@@ -1755,15 +1801,30 @@
 					(port1 == bus->otg_port)
 						? "" : "non-");
 
+				/* a_alt_hnp_support is obsoleted */
+				if (port1 != bus->otg_port)
+					goto out;
+
+				bus->hnp_support = 1;
+
+				/* a_hnp_support is not required for devices
+				 * compliant to revision 2.0 or subsequent
+				 * versions.
+				 */
+				if (le16_to_cpu(desc->bcdOTG) >= 0x0200)
+					goto out;
+
+				/* Legacy B-device i.e compliant to spec
+				 * revision 1.3 expect A-device to set
+				 * a_hnp_support or b_hnp_enable before
+				 * selecting configuration.
+				 */
+
 				/* enable HNP before suspend, it's simpler */
-				if (port1 == bus->otg_port)
-					bus->b_hnp_enable = 1;
 				err = usb_control_msg(udev,
 					usb_sndctrlpipe(udev, 0),
 					USB_REQ_SET_FEATURE, 0,
-					bus->b_hnp_enable
-						? USB_DEVICE_B_HNP_ENABLE
-						: USB_DEVICE_A_ALT_HNP_SUPPORT,
+					USB_DEVICE_A_HNP_SUPPORT,
 					0, NULL, 0, USB_CTRL_SET_TIMEOUT);
 				if (err < 0) {
 					/* OTG MESSAGE: report errors here,
@@ -1772,26 +1833,35 @@
 					dev_info(&udev->dev,
 						"can't set HNP mode: %d\n",
 						err);
-					bus->b_hnp_enable = 0;
+					bus->hnp_support = 0;
 				}
 			}
 		}
 	}
-
+out:
 	if (!is_targeted(udev)) {
 
+		otg_send_event(OTG_EVENT_DEV_NOT_SUPPORTED);
+
 		/* Maybe it can talk to us, though we can't talk to it.
 		 * (Includes HNP test device.)
 		 */
-		if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
+		if (udev->bus->hnp_support) {
 			err = usb_port_suspend(udev, PMSG_SUSPEND);
 			if (err < 0)
 				dev_dbg(&udev->dev, "HNP fail, %d\n", err);
 		}
 		err = -ENOTSUPP;
-		goto fail;
+	} else if (udev->bus->hnp_support &&
+		udev->portnum == udev->bus->otg_port) {
+		/* HNP polling is introduced in OTG supplement Rev 2.0
+		 * and older devices may not support. Work is not
+		 * re-armed if device returns STALL. B-Host also perform
+		 * HNP polling.
+		 */
+		schedule_delayed_work(&udev->bus->hnp_polling,
+			msecs_to_jiffies(THOST_REQ_POLL));
 	}
-fail:
 #endif
 	return err;
 }
@@ -2346,6 +2416,22 @@
 				return status;
 		}
 	}
+#ifdef CONFIG_USB_OTG
+	if (!udev->bus->is_b_host && udev->bus->hnp_support &&
+		udev->portnum == udev->bus->otg_port) {
+		status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				USB_REQ_SET_FEATURE, 0,
+				USB_DEVICE_B_HNP_ENABLE,
+				0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+		if (status < 0) {
+			otg_send_event(OTG_EVENT_NO_RESP_FOR_HNP_ENABLE);
+			dev_dbg(&udev->dev, "can't enable HNP on port %d, "
+					"status %d\n", port1, status);
+		} else {
+			udev->bus->b_hnp_enable = 1;
+		}
+	}
+#endif
 
 	/* see 7.1.7.6 */
 	if (hub_is_superspeed(hub->hdev))
@@ -2949,14 +3035,22 @@
 					buf->bMaxPacketSize0;
 			kfree(buf);
 
-			retval = hub_port_reset(hub, port1, udev, delay);
-			if (retval < 0)		/* error or disconnect */
-				goto fail;
-			if (oldspeed != udev->speed) {
-				dev_dbg(&udev->dev,
-					"device reset changed speed!\n");
-				retval = -ENODEV;
-				goto fail;
+			/*
+			 * If it is a HSET Test device, we don't issue a
+			 * second reset which results in failure due to
+			 * speed change.
+			 */
+			if (le16_to_cpu(buf->idVendor) != 0x1a0a) {
+				retval = hub_port_reset(hub, port1, udev,
+							 delay);
+				if (retval < 0)	/* error or disconnect */
+					goto fail;
+				if (oldspeed != udev->speed) {
+					dev_dbg(&udev->dev,
+					       "device reset changed speed!\n");
+					retval = -ENODEV;
+					goto fail;
+				}
 			}
 			if (r) {
 				dev_err(&udev->dev,
@@ -3199,6 +3293,9 @@
 			(portchange & USB_PORT_STAT_C_CONNECTION))
 		clear_bit(port1, hub->removed_bits);
 
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+	if (Unwanted_SecondReset == 0)   /*stericsson*/
+#endif
 	if (portchange & (USB_PORT_STAT_C_CONNECTION |
 				USB_PORT_STAT_C_ENABLE)) {
 		status = hub_port_debounce(hub, port1);
@@ -3337,7 +3434,32 @@
 		status = hub_power_remaining(hub);
 		if (status)
 			dev_dbg(hub_dev, "%dmA power budget left\n", status);
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		if (HostComplianceTest == 1 && udev->devnum > 1) {
+			if (HostTest == 7) {	/*SINGLE_STEP_GET_DEV_DESC */
+				dev_info(hub_dev, "Testing "
+						"SINGLE_STEP_GET_DEV_DESC\n");
+				/* Test the Single Step Get Device Descriptor ,
+				 * take care it should not get status phase
+				 */
+				No_Data_Phase = 1;
+				No_Status_Phase = 1;
 
+				usb_get_device_descriptor(udev, 8);
+				No_Data_Phase = 0;
+				No_Status_Phase = 0;
+			}
+
+			if (HostTest == 8) {
+				dev_info(hub_dev, "Testing "
+						"SINGLE_STEP_SET_FEATURE\n");
+				/* Test Single Step Set Feature */
+				No_Status_Phase = 1;
+				usb_get_device_descriptor(udev, 8);
+				No_Status_Phase = 0;
+			}
+		}
+#endif
 		return;
 
 loop_disable:
@@ -3375,7 +3497,11 @@
 	u16 portchange;
 	int i, ret;
 	int connect_change;
-
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+	int j;
+	int otgport = 0;
+	struct usb_port_status port_status;
+#endif
 	/*
 	 *  We restart the list every time to avoid a deadlock with
 	 * deleting hubs downstream from this one. This should be
@@ -3450,6 +3576,171 @@
 
 		/* deal with port status changes */
 		for (i = 1; i <= hub->descriptor->bNbrPorts; i++) {
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+			struct usb_port_status portsts;
+
+			/*if we have something to do on
+			 * otg port
+			 * */
+			if ((hdev->otgstate & USB_OTG_SUSPEND) ||
+			    (hdev->otgstate & USB_OTG_ENUMERATE) ||
+			    (hdev->otgstate & USB_OTG_DISCONNECT) ||
+			    (hdev->otgstate & USB_OTG_RESUME)) {
+				otgport = 1;
+			}
+
+
+			if (hdev->otgstate & USB_OTG_RESUME) {
+				ret = clear_port_feature(hdev, i,
+							 USB_PORT_FEAT_SUSPEND);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg port Resume"
+						" fails, %d\n", ret);
+				}
+				hdev->otgstate &= ~USB_OTG_RESUME;
+			}
+			if ((hdev->otgstate & USB_OTG_SUSPEND)
+			    && (hdev->children[0])) {
+				hdev->otgstate &= ~USB_OTG_SUSPEND;
+
+				ret = set_port_feature(hdev, 1,
+						       USB_PORT_FEAT_SUSPEND);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg port suspend"
+						" fails, %d\n", ret);
+					break;
+				}
+				msleep(1);
+				ret = get_port_status(hdev, i, &portsts);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg get port"
+						" status fails, %d\n", ret);
+					break;
+				}
+				portchange = le16_to_cpu(portsts.wPortChange);
+				if (portchange & USB_PORT_STAT_C_SUSPEND) {
+					clear_port_feature(hdev, i,
+						USB_PORT_FEAT_C_SUSPEND);
+				}
+				break;
+			}
+
+			if (hdev->otgstate & USB_OTG_REMOTEWAKEUP) {
+
+				for (j = 1; j <= hub->descriptor->bNbrPorts;
+				     j++) {
+					if (hdev->children[j - 1]) {
+						dev_dbg(hub_dev, "child"
+						     " found at port %d\n", j);
+						ret = usb_control_msg(hdev->
+						      children[j - 1],
+						      usb_sndctrlpipe(hdev->
+								children[j - 1],
+								0),
+						      USB_REQ_SET_FEATURE,
+						      USB_RECIP_DEVICE,
+						      USB_DEVICE_REMOTE_WAKEUP,
+						      0, NULL,
+						      0,
+						      USB_CTRL_SET_TIMEOUT);
+						if (ret < 0) {
+							dev_err(hub_dev, "Port"
+							  " %d doesn't support"
+							  "remote wakeup\n", j);
+						} else {
+							dev_dbg(hub_dev, "Port"
+							  " %d supports"
+							  "remote wakeup\n", j);
+						}
+						ret = set_port_feature(hdev, j,
+							USB_PORT_FEAT_SUSPEND);
+						if (ret < 0) {
+							dev_err(hub_dev, "Port"
+							  " %d NOT ABLE TO"
+							  " SUSPEND\n", j);
+						} else {
+							dev_dbg(hub_dev, "Port"
+							  " %d is ABLE TO"
+							  " SUSPEND\n", j);
+						}
+					}
+				}
+				ret = usb_control_msg(hdev,
+						      usb_sndctrlpipe(hdev, 0),
+						      USB_REQ_SET_FEATURE,
+						      USB_RECIP_DEVICE,
+						      USB_DEVICE_REMOTE_WAKEUP,
+						      0, NULL, 0,
+						      USB_CTRL_SET_TIMEOUT);
+				if (ret < 0) {
+					dev_err(hub_dev, "HUB doesn't support"
+							" REMOTE WAKEUP\n");
+				} else {
+					dev_dbg(hub_dev, "HUB supports"
+							" REMOTE WAKEUP\n");
+				}
+				ret = 0;
+				msleep(10);
+				if (hdev->parent == hdev->bus->root_hub) {
+					if (hdev->hcd_suspend &&
+					    hdev->hcd_priv) {
+						dev_dbg(hub_dev, "calling"
+						  " suspend after remote wakeup"
+						  " command is issued\n");
+						hdev->hcd_suspend(hdev->
+								   hcd_priv);
+					}
+					if (hdev->otg_notif)
+						hdev->otg_notif(hdev->otgpriv,
+						       PDC_POWERMANAGEMENT, 10);
+				}
+			}
+
+			if (hdev->otgstate & USB_OTG_WAKEUP_ALL) {
+				(void) usb_control_msg(hdev,
+						       usb_sndctrlpipe(hdev, 0),
+						       USB_REQ_CLEAR_FEATURE,
+						       USB_RECIP_DEVICE,
+						       USB_DEVICE_REMOTE_WAKEUP,
+						       0, NULL, 0,
+						       USB_CTRL_SET_TIMEOUT);
+				dev_dbg(hub_dev, "Hub CLEARED REMOTE WAKEUP\n");
+				for (j = 1; j <= hub->descriptor->bNbrPorts;
+				     j++) {
+					if (hdev->children[j - 1]) {
+						dev_dbg(hub_dev, "PORT %d"
+						   " SUSPEND IS CLEARD\n", j);
+						clear_port_feature(hdev, j,
+						   USB_PORT_FEAT_C_SUSPEND);
+						msleep(50);
+						(void) usb_control_msg(hdev->
+						       children[j - 1],
+						       usb_sndctrlpipe(
+							  hdev->children[j - 1],
+							  0),
+						       USB_REQ_CLEAR_FEATURE,
+						       USB_RECIP_DEVICE,
+						       USB_DEVICE_REMOTE_WAKEUP,
+						       0, NULL,
+						       0,
+						       USB_CTRL_SET_TIMEOUT);
+						dev_dbg(hub_dev, "PORT %d "
+							"REMOTE WAKEUP IS "
+							"CLEARD\n", j);
+						msleep(10);
+					}
+				}
+
+
+			}
+
+
+			/*
+			 * reset the state of otg device,
+			 * regardless of otg device
+			 */
+			hdev->otgstate = 0;
+#endif
 			if (test_bit(i, hub->busy_bits))
 				continue;
 			connect_change = test_bit(i, hub->change_bits);
@@ -3573,9 +3864,19 @@
 				hub_port_warm_reset(hub, i);
 			}
 
-			if (connect_change)
+			if (connect_change) {
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+				if (hdev->parent == hdev->bus->root_hub)
+					if (hdev->otg_notif
+					    && (HostComplianceTest == 0))
+						hdev->otg_notif(hdev->otgpriv,
+								PDC_HOST_NOTIFY,
+								5);
+				portno = i;
+#endif
 				hub_port_connect_change(hub, i,
 						portstatus, portchange);
+				}
 		} /* end for i */
 
 		/* deal with hub status changes */
@@ -3607,7 +3908,105 @@
 						"condition\n");
 			}
 		}
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		/* if we have something on otg */
+		if (otgport) {
+			otgport = 0;
+			/* notify otg controller about it */
+			if (hdev->parent == hdev->bus->root_hub)
+				if (hdev->otg_notif)
+					hdev->otg_notif(hdev->otgpriv,
+							PDC_HOST_NOTIFY, 0);
+		}
 
+		if (HostComplianceTest && hdev->devnum > 1) {
+			/* TEST_SE0_NAK */
+			if (HostTest == 1) {
+				dev_info(hub_dev, "Testing for TEST_SE0_NAK\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x300,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			/*TEST_J*/
+			if (HostTest == 2) {
+				dev_info(hub_dev, "Testing TEST_J\n");
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x100,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 3) {
+				dev_info(hub_dev, "Testing TEST_K\n");
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x200,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 4) {
+				dev_info(hub_dev, "Testing TEST_PACKET at Port"
+						  " %d\n", portno);
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" C_CONNECTION failed\n");
+
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" SUSPEND failed\n");
+
+				ret = set_port_feature(hdev, portno | 0x400,
+						       USB_PORT_FEAT_TEST);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" TEST failed\n");
+
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+				if (ret < 0)
+					dev_err(hub_dev, "Get port status"
+						" failed\n");
+			}
+			if (HostTest == 5) {
+				dev_info(hub_dev, "Testing TEST_FORCE_ENBLE\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						 USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x500,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 6) {
+				dev_info(hub_dev, "Testing "
+					 "HS_HOST_PORT_SUSPEND_RESUME\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						     USB_PORT_FEAT_SUSPEND);
+				msleep(3000);
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_SUSPEND);
+				HostTest = 0;
+			}
+		}
+#endif
  loop_autopm:
 		/* Balance the usb_autopm_get_interface() above */
 		usb_autopm_put_interface_no_suspend(intf);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0b5ec23..415593c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1769,6 +1769,9 @@
 		goto free_interfaces;
 	}
 
+	dev->actconfig = cp;
+	if (cp)
+		usb_notify_config_device(dev);
 	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			      USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
 			      NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1776,11 +1779,11 @@
 		/* All the old state is gone, so what else can we do?
 		 * The device is probably useless now anyway.
 		 */
-		cp = NULL;
+		dev->actconfig = cp = NULL;
 	}
 
-	dev->actconfig = cp;
 	if (!cp) {
+		usb_notify_config_device(dev);
 		usb_set_device_state(dev, USB_STATE_ADDRESS);
 		usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
 		mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7542dce..15311d8 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -57,6 +57,12 @@
 	mutex_unlock(&usbfs_mutex);
 }
 
+void usb_notify_config_device(struct usb_device *udev)
+{
+	blocking_notifier_call_chain(&usb_notifier_list,
+			USB_DEVICE_CONFIG, udev);
+}
+
 void usb_notify_add_bus(struct usb_bus *ubus)
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index e8cdce5..cec4167 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -92,7 +92,30 @@
 		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) &&
 		    (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
 			continue;
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		/*Hub is targeted device,so code execution should reach here */
+		if (USB_CLASS_HUB == dev->descriptor.bDeviceClass) {
+			/* count the tiers and if it is more than 6, return 0 */
+			unsigned char tier = 0;
+			struct usb_device *root_hub;
 
+			root_hub = dev->bus->root_hub;
+			while ((dev->parent != NULL) && /* root hub not count */
+				(dev->parent != root_hub) &&
+				(tier != 6))  {/* interal hub not count */
+				tier++;
+				dev = dev->parent;
+			}
+
+			if (tier == 6) {
+				dev_err(&dev->dev, "5 tier of hubs reached,"
+					" newly added hub will not be"
+					" supported!\n");
+				hub_tier = 1;
+				return 0;
+			}
+		}
+#endif
 		return 1;
 	}
 
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index d44d4b7..c36c72a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -96,6 +96,10 @@
 
 #endif
 
+#ifdef CONFIG_USB_OTG
+extern void usb_hnp_polling_work(struct work_struct *work);
+#endif
+
 extern struct bus_type usb_bus_type;
 extern struct device_type usb_device_type;
 extern struct device_type usb_if_device_type;
@@ -159,6 +163,7 @@
 /* internal notify stuff */
 extern void usb_notify_add_device(struct usb_device *udev);
 extern void usb_notify_remove_device(struct usb_device *udev);
+extern void usb_notify_config_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
 
diff --git a/drivers/usb/function/Kconfig b/drivers/usb/function/Kconfig
new file mode 100644
index 0000000..90d776c
--- /dev/null
+++ b/drivers/usb/function/Kconfig
@@ -0,0 +1,163 @@
+menu "USB Function Support"
+	depends on !USB_GADGET
+
+config USB_MSM_OTG
+	bool "OTG support for Qualcomm on-chip USB controller"
+	depends on USB && USB_FUNCTION && USB_EHCI_MSM
+	help
+	  USB OTG driver.
+	  This driver is required if you want to use USB in
+	  Host mode and Device mode.
+
+config USB_FUNCTION
+	boolean "Support for USB Function Drivers"
+	help
+	   The USB Function framework is similar to the Gadget framework
+	   but a little simpler and a little more plugable.  It trades
+	   some flexibility in the framework for smaller and simpler
+	   function drivers that can be combined into a composite driver.
+
+choice
+	prompt "USB Peripheral Controller"
+	depends on USB_FUNCTION
+	help
+	  USB devices interfaces with the host using a controller.
+	  Many controller drivers are platform-specific; these
+	  often need board-specific hooks.
+
+config USB_FUNCTION_MSM_HSUSB
+	boolean "MSM Highspeed USB Peripheral Controller"
+	depends on ARCH_MSM
+	help
+	  High speed USB device controller for Qualcomm chipsets using
+	  USB Function framework. Controller supports IAD and
+	  32 endpoints(16 IN and 16 OUT).
+
+endchoice
+
+config USB_FUNCTION_NULL
+	boolean "Null Function -- eats packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ZERO
+	boolean "Zero Function -- generates packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_LOOPBACK
+	boolean "Loopback Function -- returns packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ADB
+	tristate "ADB Transport Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Function Driver for the Android ADB Protocol
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "adb"
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_UMS
+	boolean "USB Mass Storage Function (userspace)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_MASS_STORAGE
+	tristate "USB Mass Storage Function (kernel based)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB && SWITCH
+	help
+	  The File-backed Storage function driver acts as a USB Mass Storage
+	  disk drive.  As its storage repository it can use a regular
+	  file or a block device specified as a module parameter. Initial
+	  driver version is derived from Gadget framework and ported to
+	  Function driver framework.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "file_storage".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_CSW_HACK
+	boolean "USB Mass storage csw hack Feature"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MASS_STORAGE
+	help
+	 This csw hack feature is for increasing the performance of the mass
+	 storage
+
+	default n
+
+config USB_FUNCTION_DIAG
+	tristate "USB MSM Diag Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Simple bridge driver between smd and debug client(host side)
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "diag".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_ETHER
+	tristate "USB Ethernet Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements the Ethernet style communication using CDC/ECM.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ether".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_SERIAL
+	tristate "USB Serial Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements serial communication using single interface; uses
+	  two endpoints(bulk-in and bulk out) for data transfer and a
+	  interrupt endpoint for control data transfer.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "serial".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_RMNET
+	bool "RmNet function driver"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+	help
+	  Implements Rmnet function.
+	  Rmnet is an alternative to CDC-ECM and Windows RNDIS. It uses
+	  QUALCOMM MSM Interface for control transfers. It acts like a
+	  bridge between Host and modem found in MSM chipsets.
+
+config RMNET_SMD_CTL_CHANNEL
+	string "control SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+	string "Data SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Data SMD channel for transferring network data
+
+endmenu
diff --git a/drivers/usb/function/Makefile b/drivers/usb/function/Makefile
new file mode 100644
index 0000000..7614d3b
--- /dev/null
+++ b/drivers/usb/function/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_USB_MSM_OTG)		+= msm_otg.o
+obj-$(CONFIG_USB_FUNCTION_MSM_HSUSB)	+= msm_hsusb.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= null.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= zero.o
+obj-$(CONFIG_USB_FUNCTION_LOOPBACK)	+= loopback.o
+obj-$(CONFIG_USB_FUNCTION_ADB)		+= adb.o
+obj-$(CONFIG_USB_FUNCTION_UMS)		+= ums.o
+obj-$(CONFIG_USB_FUNCTION_MASS_STORAGE)	+= mass_storage.o
+obj-$(CONFIG_USB_FUNCTION_DIAG)		+= diag.o
+obj-$(CONFIG_USB_FUNCTION_SERIAL)       += serial.o
+obj-$(CONFIG_USB_FUNCTION_ETHER)	+= ether_cdc_ecm.o
+obj-$(CONFIG_USB_FUNCTION_RMNET)	+= rmnet.o
diff --git a/drivers/usb/function/adb.c b/drivers/usb/function/adb.c
new file mode 100644
index 0000000..dd91be3
--- /dev/null
+++ b/drivers/usb/function/adb.c
@@ -0,0 +1,624 @@
+/* drivers/usb/function/adb.c
+ *
+ * Function Device for the Android ADB Protocol
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+#define ADB_FUNCTION_NAME "adb"
+
+struct adb_context
+{
+	int online;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	atomic_t enable_excl;
+	spinlock_t lock;
+
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+	unsigned read_count;
+	unsigned bound;
+};
+
+static struct adb_context _context;
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength =		sizeof intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	0xff,
+	.bInterfaceSubClass =	0x42,
+	.bInterfaceProtocol =	0x01,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_function usb_func_adb;
+
+static inline int _lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void req_put(struct adb_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *req_get(struct adb_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void adb_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0)
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void adb_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+			size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_read(%d)\n", count);
+
+	if (_lock(&ctxt->read_excl))
+		return -EBUSY;
+
+	/* we will block until we're online */
+	while (!(ctxt->online || ctxt->error)) {
+		DBG("adb_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if (ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* if we have idle read requests, get them queued */
+		while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+requeue_req:
+			req->length = TXN_MAX;
+			ret = usb_ept_queue_xfer(ctxt->out, req);
+			if (ret < 0) {
+				DBG("adb_read: failed to queue req %p (%d)\n", req, ret);
+				r = -EIO;
+				ctxt->error = 1;
+				req_put(ctxt, &ctxt->rx_idle, req);
+				goto fail;
+			} else {
+				DBG("rx %p queue\n", req);
+			}
+		}
+
+		/* if we have data pending, give it to userspace */
+		if (ctxt->read_count > 0) {
+			xfer = (ctxt->read_count < count) ? ctxt->read_count : count;
+
+			if (copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+			ctxt->read_buf += xfer;
+			ctxt->read_count -= xfer;
+			buf += xfer;
+			count -= xfer;
+
+			/* if we've emptied the buffer, release the request */
+			if (ctxt->read_count == 0) {
+				req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+				ctxt->read_req = 0;
+			}
+			continue;
+		}
+
+		/* wait for a request to complete */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->read_wq,
+					       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+
+		if (req != 0) {
+			/* if we got a 0-len one we need to put it back into
+			** service.  if we made it the current read req we'd
+			** be stuck forever
+			*/
+			if (req->actual == 0)
+				goto requeue_req;
+
+			ctxt->read_req = req;
+			ctxt->read_count = req->actual;
+			ctxt->read_buf = req->buf;
+			DBG("rx %p %d\n", req, req->actual);
+		}
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+			 size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_write(%d)\n", count);
+
+	if (_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq,
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if (copy_from_user(req->buf, buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if (ret < 0) {
+				DBG("adb_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+
+
+	if (req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations adb_fops = {
+	.owner =   THIS_MODULE,
+	.read =    adb_read,
+	.write =   adb_write,
+	.open =    adb_open,
+	.release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb",
+	.fops = &adb_fops,
+};
+
+static int adb_enable_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->enable_excl))
+		return -EBUSY;
+
+	printk(KERN_INFO "enabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 1);
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_enable_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	printk(KERN_INFO "disabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 0);
+	_unlock(&ctxt->enable_excl);
+	return 0;
+}
+
+static struct file_operations adb_enable_fops = {
+	.owner =   THIS_MODULE,
+	.open =    adb_enable_open,
+	.release = adb_enable_release,
+};
+
+static struct miscdevice adb_enable_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb_enable",
+	.fops = &adb_enable_fops,
+};
+
+static void adb_unbind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (!ctxt->bound)
+		return;
+
+	while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+		usb_ept_free_req(ctxt->out, req);
+	}
+	while ((req = req_get(ctxt, &ctxt->tx_idle))) {
+		usb_ept_free_req(ctxt->in, req);
+	}
+	if (ctxt->in) {
+		usb_ept_fifo_flush(ctxt->in);
+		usb_ept_enable(ctxt->in,  0);
+		usb_free_endpoint(ctxt->in);
+	}
+	if (ctxt->out) {
+		usb_ept_fifo_flush(ctxt->out);
+		usb_ept_enable(ctxt->out,  0);
+		usb_free_endpoint(ctxt->out);
+	}
+
+	ctxt->online = 0;
+	ctxt->error = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+	ctxt->bound = 0;
+}
+
+static void adb_bind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_adb);
+
+	ctxt->in = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->in) {
+		hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+		fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+	}
+
+	ctxt->out = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->out) {
+		hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+		fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+	}
+
+	for (n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for (n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+	ctxt->bound = 1;
+	return;
+
+fail:
+	printk(KERN_ERR "adb_bind() could not allocate requests\n");
+	adb_unbind(ctxt);
+}
+
+static void adb_configure(int configured, void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (configured) {
+		ctxt->online = 1;
+
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->in, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->in, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->in,  1);
+		usb_ept_enable(ctxt->out, 1);
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		ctxt->read_count = 0;
+		if (ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while ((req = req_get(ctxt, &ctxt->rx_done)))
+			req_put(ctxt, &ctxt->rx_idle, req);
+
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_adb = {
+	.bind = adb_bind,
+	.unbind = adb_unbind,
+	.configure = adb_configure,
+
+	.name = ADB_FUNCTION_NAME,
+	.context = &_context,
+
+};
+
+struct usb_descriptor_header *adb_hs_descriptors[4];
+struct usb_descriptor_header *adb_fs_descriptors[4];
+static int __init adb_init(void)
+{
+	int ret = 0;
+	struct adb_context *ctxt = &_context;
+	DBG("adb_init()\n");
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	atomic_set(&ctxt->enable_excl, 0);
+
+	spin_lock_init(&ctxt->lock);
+
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+
+	adb_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	adb_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	adb_hs_descriptors[3] = NULL;
+
+	adb_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	adb_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	adb_fs_descriptors[3] = NULL;
+
+	usb_func_adb.hs_descriptors = adb_hs_descriptors;
+	usb_func_adb.fs_descriptors = adb_fs_descriptors;
+
+	ret = misc_register(&adb_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		return ret;
+	}
+	ret = misc_register(&adb_enable_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc enable device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		misc_deregister(&adb_device);
+		return ret;
+	}
+
+	ret = usb_function_register(&usb_func_adb);
+	if (ret) {
+		misc_deregister(&adb_device);
+		misc_deregister(&adb_enable_device);
+	}
+	return ret;
+}
+
+module_init(adb_init);
+
+static void __exit adb_exit(void)
+{
+	misc_deregister(&adb_device);
+	misc_deregister(&adb_enable_device);
+
+	usb_function_unregister(&usb_func_adb);
+}
+module_exit(adb_exit);
diff --git a/drivers/usb/function/diag.c b/drivers/usb/function/diag.c
new file mode 100644
index 0000000..94c32e7
--- /dev/null
+++ b/drivers/usb/function/diag.c
@@ -0,0 +1,567 @@
+/* drivers/usb/function/diag.c
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+
+#include <mach/msm_smd.h>
+#include <mach/usbdiag.h>
+
+#include "usb_function.h"
+
+#define WRITE_COMPLETE 0
+#define READ_COMPLETE  0
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof intf_desc,
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength 			=	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType 	=	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes 		=	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize 	=	__constant_cpu_to_le16(512),
+	.bInterval 			=	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+/* list of requests */
+struct diag_req_entry {
+	struct list_head re_entry;
+	struct usb_request *usb_req;
+	void *diag_request;
+};
+struct diag_context {
+	struct usb_endpoint *epout;
+	struct usb_endpoint *epin;
+	spinlock_t dev_lock;
+	/* linked list of read requets*/
+	struct list_head dev_read_req_list;
+	/* linked list of write requets*/
+	struct list_head dev_write_req_list;
+	struct diag_operations *operations;
+	struct workqueue_struct *diag_wq;
+	struct work_struct usb_config_work;
+	unsigned configured;
+	unsigned bound;
+	int diag_opened;
+};
+
+static struct usb_function usb_func_diag;
+static struct diag_context _context;
+static void diag_write_complete(struct usb_endpoint *,
+		struct usb_request *);
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *,
+		unsigned len, gfp_t);
+static void diag_free_req_entry(struct usb_endpoint *, struct diag_req_entry *);
+static void diag_read_complete(struct usb_endpoint *, struct usb_request *);
+
+
+static void diag_unbind(void *context)
+{
+
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+	if (!ctxt->bound)
+		return;
+	if (ctxt->epin) {
+		usb_ept_fifo_flush(ctxt->epin);
+		usb_ept_enable(ctxt->epin, 0);
+		usb_free_endpoint(ctxt->epin);
+		}
+	if (ctxt->epout) {
+		usb_ept_fifo_flush(ctxt->epout);
+		usb_ept_enable(ctxt->epout, 0);
+		usb_free_endpoint(ctxt->epout);
+		}
+	ctxt->bound = 0;
+}
+static void diag_bind(void *context)
+{
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_diag);
+
+	ctxt->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->epin) {
+		hs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+		fs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+	}
+
+	ctxt->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->epout) {
+		hs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+		fs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+	}
+
+	ctxt->bound = 1;
+}
+static void diag_configure(int configured, void *_ctxt)
+
+{
+	struct diag_context *ctxt = _ctxt;
+
+	if (!ctxt)
+		return;
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->epin, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->epin, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->epin,  1);
+		usb_ept_enable(ctxt->epout, 1);
+		ctxt->configured = 1;
+		queue_work(_context.diag_wq, &(_context.usb_config_work));
+	} else {
+		/* all pending requests will be canceled */
+		ctxt->configured = 0;
+		if (ctxt->epin) {
+			usb_ept_fifo_flush(ctxt->epin);
+			usb_ept_enable(ctxt->epin, 0);
+		}
+		if (ctxt->epout) {
+			usb_ept_fifo_flush(ctxt->epout);
+			usb_ept_enable(ctxt->epout, 0);
+		}
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_disconnect))
+				ctxt->operations->diag_disconnect();
+	}
+
+}
+static struct usb_function usb_func_diag = {
+	.bind = diag_bind,
+	.configure = diag_configure,
+	.unbind = diag_unbind,
+
+
+	.name = "diag",
+	.context = &_context,
+};
+int diag_usb_register(struct diag_operations *func)
+{
+	struct diag_context *ctxt = &_context;
+
+	if (func == NULL) {
+		printk(KERN_ERR "diag_usb_register:registering"
+				"diag char operations NULL\n");
+		return -1;
+	}
+	ctxt->operations = func;
+	if (ctxt->configured == 1)
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_connect))
+				ctxt->operations->diag_connect();
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_register);
+
+int diag_usb_unregister(void)
+{
+	struct diag_context *ctxt = &_context;
+
+	ctxt->operations = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_unregister);
+
+int diag_open(int num_req)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *write_entry;
+	struct diag_req_entry *read_entry;
+	int i = 0;
+
+	for (i = 0; i < num_req; i++) {
+		write_entry = diag_alloc_req_entry(ctxt->epin, 0, GFP_KERNEL);
+		if (write_entry) {
+			write_entry->usb_req->complete = diag_write_complete;
+			write_entry->usb_req->device = (void *)ctxt;
+			list_add(&write_entry->re_entry,
+					&ctxt->dev_write_req_list);
+		} else
+			goto write_error;
+	}
+
+	for (i = 0; i < num_req ; i++) {
+		read_entry = diag_alloc_req_entry(ctxt->epout, 0 , GFP_KERNEL);
+		if (read_entry) {
+			read_entry->usb_req->complete = diag_read_complete;
+			read_entry->usb_req->device = (void *)ctxt;
+			list_add(&read_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+		} else
+			goto read_error;
+		}
+	ctxt->diag_opened = 1;
+	return 0;
+read_error:
+	printk(KERN_ERR "%s:read requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		read_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&read_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, read_entry);
+	}
+write_error:
+	printk(KERN_ERR "%s: write requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		write_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&write_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, write_entry);
+	}
+	ctxt->diag_opened = 0;
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(diag_open);
+
+void diag_close(void)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *req_entry;
+	/* free write requests */
+
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, req_entry);
+	}
+
+	/* free read requests */
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, req_entry);
+	}
+	return;
+}
+EXPORT_SYMBOL(diag_close);
+
+static void diag_free_req_entry(struct usb_endpoint *ep,
+		struct diag_req_entry *req)
+{
+	if (ep != NULL && req != NULL) {
+		if (req->usb_req != NULL)
+			usb_ept_free_req(ep, req->usb_req);
+		kfree(req);
+	}
+}
+
+
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *ep,
+		unsigned len, gfp_t kmalloc_flags)
+{
+	struct diag_req_entry *req;
+
+	req = kmalloc(sizeof(struct diag_req_entry), kmalloc_flags);
+	if (req == NULL)
+		return ERR_PTR(-ENOMEM);
+
+
+	req->usb_req = usb_ept_alloc_req(ep , 0);
+	if (req->usb_req == NULL) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->usb_req->context = req;
+	return req;
+}
+
+int diag_read(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epout, req)) {
+			/* If error add the link to the linked list again. */
+			spin_lock_irqsave(&ctxt->dev_lock , flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+			printk(KERN_ERR "diag_read:can't queue the request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR
+				"diag_read:no requests avialable\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_read);
+
+int diag_write(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epin, req)) {
+			/* If error add the link to linked list again*/
+			spin_lock_irqsave(&ctxt->dev_lock, flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_write_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+			printk(KERN_ERR "diag_write: cannot queue"
+					" read request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR	"diag_write: no requests available\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_write);
+
+static void diag_write_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = (struct diag_context *)req->device;
+	struct diag_req_entry *diag_req = req->context;
+	struct diag_request *d_req = (struct diag_request *)
+						diag_req->diag_request;
+	unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_write_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == WRITE_COMPLETE) {
+		if ((req->length >= ep->max_pkt) &&
+				((req->length % ep->max_pkt) == 0)) {
+			req->length = 0;
+			req->device = ctxt;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			usb_ept_queue_xfer(ctxt->epin, req);
+			return;
+		}
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_write_req_list);
+		if (req->length != 0) {
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+		}
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+			&ctxt->dev_write_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	}
+}
+static void diag_read_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	 struct diag_context *ctxt = (struct diag_context *)req->device;
+	 struct diag_req_entry *diag_req = req->context;
+	 struct diag_request *d_req = (struct diag_request *)
+							diag_req->diag_request;
+	 unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_read_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == READ_COMPLETE) {
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	}
+}
+void usb_config_work_func(struct work_struct *work)
+{
+	struct diag_context *ctxt = &_context;
+	if ((ctxt->operations) &&
+		(ctxt->operations->diag_connect))
+			ctxt->operations->diag_connect();
+}
+
+struct usb_descriptor_header *diag_hs_descriptors[4];
+struct usb_descriptor_header *diag_fs_descriptors[4];
+
+static int __init diag_init(void)
+{
+	int r;
+	struct diag_context *ctxt = &_context;
+
+	diag_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	diag_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	diag_hs_descriptors[3] = NULL;
+
+	diag_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	diag_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	diag_fs_descriptors[3] = NULL;
+	INIT_LIST_HEAD(&ctxt->dev_read_req_list);
+	INIT_LIST_HEAD(&ctxt->dev_write_req_list);
+	ctxt->diag_wq  = create_singlethread_workqueue("diag");
+	if (ctxt->diag_wq == NULL)
+		return -1;
+	INIT_WORK(&_context.usb_config_work , usb_config_work_func);
+
+	usb_func_diag.hs_descriptors = diag_hs_descriptors;
+	usb_func_diag.fs_descriptors = diag_fs_descriptors;
+	spin_lock_init(&_context.dev_lock);
+	r = usb_function_register(&usb_func_diag);
+	if (r < 0)
+		destroy_workqueue(ctxt->diag_wq);
+	return r;
+}
+
+module_init(diag_init);
+static void __exit diag_exit(void)
+{
+	struct diag_context *ctxt = &_context;
+	if (!ctxt)
+		return;
+	if (!ctxt)
+		BUG_ON(1);
+
+	usb_function_unregister(&usb_func_diag);
+	destroy_workqueue(ctxt->diag_wq);
+}
+module_exit(diag_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/ether.c b/drivers/usb/function/ether.c
new file mode 100644
index 0000000..f31032e9
--- /dev/null
+++ b/drivers/usb/function/ether.c
@@ -0,0 +1,327 @@
+/* drivers/usb/function/ether.c
+ *
+ * Simple Ethernet Function Device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Implements the "cdc_subset" bulk-only protocol supported by Linux.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "usb_function.h"
+
+/* Ethernet frame is 1514 + FCS, but round up to 512 * 3 so we
+ * always queue a multiple of the USB max packet size (64 or 512)
+ */
+#define USB_MTU 1536
+
+#define MAX_TX 8
+#define MAX_RX 8
+
+struct ether_context {
+	spinlock_t lock;
+	struct net_device *dev;
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head rx_reqs;
+	struct list_head tx_reqs;
+
+	struct net_device_stats stats;
+};
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req);
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req);
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req);
+
+static void ether_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+	unsigned long flags;
+	int n;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	for (n = 0; n < MAX_RX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 0);
+		if (!req)
+			break;
+		req->complete = ether_out_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+	for (n = 0; n < MAX_TX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 0);
+		if (!req)
+			break;
+		req->complete = ether_in_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+}
+
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		ctxt->stats.tx_packets++;
+		ctxt->stats.tx_bytes += req->actual;
+	} else {
+		ctxt->stats.tx_errors++;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs))
+		netif_start_queue(ctxt->dev);
+	list_add_tail(&req->list, &ctxt->tx_reqs);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req)
+{
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		skb_put(skb, req->actual);
+		skb->protocol = eth_type_trans(skb, ctxt->dev);
+		ctxt->stats.rx_packets++;
+		ctxt->stats.rx_bytes += req->actual;
+		netif_rx(skb);
+	} else {
+		dev_kfree_skb_any(skb);
+		ctxt->stats.rx_errors++;
+	}
+
+	/* don't bother requeuing if we just went offline */
+	if (req->status == -ENODEV) {
+		unsigned long flags;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	} else {
+		if (ether_queue_out(ctxt, req))
+			pr_err("ether_out: cannot requeue\n");
+	}
+}
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = alloc_skb(USB_MTU + NET_IP_ALIGN, GFP_ATOMIC);
+	if (!skb) {
+		pr_err("ether_queue_out: failed to alloc skb\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	*((void **) skb->cb) = ctxt;
+	req->buf = skb->data;
+	req->length = USB_MTU;
+	req->context = skb;
+
+	ret = usb_ept_queue_xfer(ctxt->out, req);
+	if (ret) {
+fail:
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+
+	return ret;
+}
+
+static void ether_configure(int configured, void *_ctxt)
+{
+	unsigned long flags;
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	pr_info("ether_configure() %d\n", configured);
+
+	if (configured) {
+		/* we're online -- get all rx requests queued */
+		for (;;) {
+			spin_lock_irqsave(&ctxt->lock, flags);
+			if (list_empty(&ctxt->rx_reqs)) {
+				req = 0;
+			} else {
+				req = list_first_entry(&ctxt->rx_reqs,
+						       struct usb_request,
+						       list);
+				list_del(&req->list);
+			}
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+			if (!req)
+				break;
+			if (ether_queue_out(ctxt, req))
+				break;
+		}
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_ether = {
+	.bind = ether_bind,
+	.configure = ether_configure,
+
+	.name = "ether",
+
+	.ifc_class = 0x02,
+	.ifc_subclass = 0x0a,
+	.ifc_protocol = 0x00,
+
+	.ifc_name = "ether",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int usb_ether_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	struct usb_request *req;
+	unsigned long flags;
+	unsigned len;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&ctxt->tx_reqs,
+				       struct usb_request, list);
+		list_del(&req->list);
+		if (list_empty(&ctxt->tx_reqs))
+			netif_stop_queue(dev);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (!req) {
+		pr_err("usb_ether_xmit: could not obtain tx request\n");
+		return 1;
+	}
+
+	/* ensure that we end with a short packet */
+	len = skb->len;
+	if (!(len & 63) || !(len & 511))
+		len++;
+
+	*((void **) skb->cb) = ctxt;
+	req->context = skb;
+	req->buf = skb->data;
+	req->length = len;
+
+	if (usb_ept_queue_xfer(ctxt->in, req)) {
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		netif_start_queue(dev);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+
+		dev_kfree_skb_any(skb);
+		ctxt->stats.tx_dropped++;
+
+		pr_err("usb_ether_xmit: could not queue tx request\n");
+	}
+
+	return 0;
+}
+
+static int usb_ether_open(struct net_device *dev)
+{
+	return 0;
+}
+
+static int usb_ether_stop(struct net_device *dev)
+{
+	return 0;
+}
+
+static struct net_device_stats *usb_ether_get_stats(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	return &ctxt->stats;
+}
+
+static void __init usb_ether_setup(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+
+	pr_info("usb_ether_setup()\n");
+
+	INIT_LIST_HEAD(&ctxt->rx_reqs);
+	INIT_LIST_HEAD(&ctxt->tx_reqs);
+	spin_lock_init(&ctxt->lock);
+	ctxt->dev = dev;
+
+	dev->open = usb_ether_open;
+	dev->stop = usb_ether_stop;
+	dev->hard_start_xmit = usb_ether_xmit;
+	dev->get_stats = usb_ether_get_stats;
+	dev->watchdog_timeo = 20;
+
+	ether_setup(dev);
+
+	random_ether_addr(dev->dev_addr);
+}
+
+static int __init ether_init(void)
+{
+	struct net_device *dev;
+	int ret;
+
+	dev = alloc_netdev(sizeof(struct ether_context),
+			   "usb%d", usb_ether_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = register_netdev(dev);
+	if (ret) {
+		free_netdev(dev);
+	} else {
+		struct ether_context *ctxt = netdev_priv(dev);
+		usb_func_ether.context = ctxt;
+		usb_function_register(&usb_func_ether);
+	}
+	return ret;
+}
+
+module_init(ether_init);
diff --git a/drivers/usb/function/ether_cdc_ecm.c b/drivers/usb/function/ether_cdc_ecm.c
new file mode 100644
index 0000000..8fa5af1
--- /dev/null
+++ b/drivers/usb/function/ether_cdc_ecm.c
@@ -0,0 +1,1337 @@
+/*
+ * ether_cdc_ecm.c -- Ethernet Function driver, with CDC
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This file has been derived from gadget/ether.c
+ *
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Ethernet function driver -- with CDC options
+ * Builds on hardware support for a full duplex link.
+ *
+ * CDC Ethernet is the standard USB solution for sending Ethernet frames
+ * using USB.  Real hardware tends to use the same framing protocol but look
+ * different for control features.  This driver strongly prefers to use
+ * this USB-IF standard as its open-systems interoperability solution;
+ * most host side USB stacks (except from Microsoft) support it.
+ */
+
+#define DRIVER_DESC		"Ethernet Function CDC ECM"
+#define DRIVER_VERSION		"1.0"
+
+static const char shortname[] = "ether";
+static const char driver_desc[] = DRIVER_DESC;
+
+static unsigned int string_data;
+static unsigned int string_control;
+static unsigned int string_ethaddr;
+#define RX_EXTRA	20		/* guard against rx overflows */
+
+
+
+/* outgoing packet filters. */
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+			| USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			| USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			| USB_CDC_PACKET_TYPE_DIRECTED)
+
+/*-------------------------------------------------------------------------*/
+
+struct eth_dev {
+	spinlock_t		lock;
+	struct usb_request	*req;		/* for control responses */
+	struct usb_request	*stat_req;	/* for cdc status */
+
+	unsigned		configured:1;
+	struct usb_endpoint	*in_ep, *out_ep, *status_ep;
+
+	spinlock_t		req_lock;
+	struct list_head	tx_reqs, rx_reqs;
+
+	struct net_device	*net;
+	struct net_device_stats	stats;
+	atomic_t		tx_qlen;
+
+	struct work_struct	work;
+	unsigned		zlp:1;
+	unsigned		suspended:1;
+	u16			cdc_filter;
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+	u8			host_mac[ETH_ALEN];
+
+	int alt_set;
+};
+
+static struct usb_function usb_func_ether;
+
+/* Ethernet function descriptors */
+#define USB_DT_IAD_SIZE		8
+struct usb_interface_assoc_descriptor	eth_IAD = {
+	.bLength           = USB_DT_IAD_SIZE,
+	.bDescriptorType   = USB_DT_INTERFACE_ASSOCIATION,
+	.bInterfaceCount   = 2,
+	.bFunctionClass    = USB_CLASS_COMM,
+	.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol = USB_CDC_PROTO_NONE,
+	.iFunction         = 0,
+};
+
+struct usb_interface_descriptor		eth_control_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+};
+
+struct usb_cdc_header_desc		eth_header_desc = {
+	.bLength =		sizeof(struct usb_cdc_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
+
+struct usb_cdc_union_desc		eth_union_desc = {
+	.bLength =		sizeof(struct usb_cdc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+};
+
+struct usb_cdc_ether_desc 		eth_ether_desc = {
+	.bLength =		sizeof(struct usb_cdc_ether_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+	/* this descriptor actually adds value, surprise! */
+	.bmEthernetStatistics =	__constant_cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	__constant_cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	__constant_cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_hs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_fs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_zero_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    0,
+	.bNumEndpoints =        0,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   0,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_one_intf = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    1,
+	.bNumEndpoints =        2,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA ,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   USB_CDC_PROTO_NONE,
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct eth_dev *eth_device;
+
+/* Some systems will want different product identifers published in the
+ * device descriptor, either numbers or strings or both.  These string
+ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static char ethaddr[2 * ETH_ALEN + 1];
+static int eth_bound;
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+/* peak bulk transfer bits-per-second */
+#define	HS_BPS		(13 * 512 * 8 * 1000 * 8)
+
+/* for dual-speed hardware, use deeper queues at highspeed */
+#define qlen (DEFAULT_QLEN * 5) /* High Speed */
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt, (d)->net->name, ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev, fmt, args...) \
+	xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDEBUG	DEBUG
+#else
+#define VDEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev, KERN_ERR, fmt, ## args)
+#ifdef WARN
+#undef WARN
+#endif
+#define WARN(dev, fmt, args...) \
+	xprintk(dev, KERN_WARNING, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* include the status endpoint if we can, even where it's optional.
+ * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real ether
+ * can provide.  more advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+#define STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags);
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
+
+static int set_ether_config(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	int result = 0;
+
+	if (dev->status_ep)
+		usb_ept_enable(dev->status_ep, 1);
+
+	result = alloc_requests(dev, qlen , gfp_flags);
+	if (result == 0)
+		DEBUG(dev, "qlen %d\n", qlen);
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+static void eth_reset_config(struct eth_dev *dev)
+{
+	struct usb_request	*req;
+	unsigned long  flags;
+
+	DEBUG(dev, "%s\n", __func__);
+
+	if (!dev)
+		return;
+	if (!dev->net)
+		return;
+
+	if (dev->configured == 0)
+		return;
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	/* disable endpoints, forcing (synchronous) completion of
+	 * pending i/o.  then free the requests.
+	 */
+	if (dev->in_ep) {
+		usb_ept_enable(dev->in_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->tx_reqs))) {
+			req = container_of(dev->tx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->in_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	if (dev->out_ep) {
+		usb_ept_enable(dev->out_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->rx_reqs))) {
+			req = container_of(dev->rx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->out_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+
+	if (dev->status_ep)
+		usb_ept_free_req(dev->status_ep, 0);
+	dev->cdc_filter = 0;
+	dev->configured = 0;
+}
+
+/* change our operational config.  must agree with the code
+ * that returns config descriptors, and altsetting code.
+ */
+static int eth_set_config(struct eth_dev *dev,  gfp_t gfp_flags)
+{
+	int result = 0;
+
+	eth_reset_config(dev);
+	result = set_ether_config(dev, gfp_flags);
+	if (result)
+		eth_reset_config(dev);
+	else
+		dev->configured = 1;
+	return result;
+}
+
+static void eth_configure(int configured, void *_ctxt)
+{
+	int                     result = 0;
+	struct eth_dev *dev = (struct eth_dev *) _ctxt;
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (!configured) {
+		eth_reset_config(dev);
+		return ;
+	}
+	if (dev->configured == 1)
+		return ;
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_hs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_hs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_hs_bulk_out_ep_desc);
+	} else {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_fs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_fs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_fs_bulk_out_ep_desc);
+	}
+	result = eth_set_config(dev, GFP_ATOMIC);
+}
+/* The interrupt endpoint is used in CDC networking models (Ethernet, ATM)
+ * only to notify the host about link status changes (which we support)
+ * Since we want this CDC Ethernet code to be vendor-neutral, only one
+ * status request is ever queued.
+ */
+
+static void
+eth_status_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct usb_cdc_notification	*event = req->buf;
+	int				value = req->status;
+
+	/* issue the second notification if host reads the first */
+	if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
+			&& value == 0) {
+		__le32	*data = req->buf + sizeof *event;
+
+		event->bmRequestType = 0xA1;
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = __constant_cpu_to_le16(0);
+		event->wIndex =	__constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+		event->wLength = __constant_cpu_to_le16(8);
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data[0] = data[1] = cpu_to_le32(HS_BPS);
+
+		req->length = STATUS_BYTECOUNT;
+		value = usb_ept_queue_xfer(ep, req);
+		DEBUG(dev, "send SPEED_CHANGE --> %d\n", value);
+		if (value == 0)
+			return;
+	} else if (value != -ECONNRESET)
+		DEBUG(dev, "event %02x --> %d\n",
+			event->bNotificationType, value);
+	req->context = NULL;
+}
+
+static void issue_start_status(struct eth_dev *dev)
+{
+	struct usb_request		*req = dev->stat_req;
+	struct usb_cdc_notification	*event;
+	int				value;
+
+	DEBUG(dev, "%s, flush old status first\n", __func__);
+
+	/* flush old status
+	 *
+	 * FIXME ugly idiom, maybe we'd be better with just
+	 * a "cancel the whole queue" primitive since any
+	 * unlink-one primitive has way too many error modes.
+	 * here, we "know" toggle is already clear...
+	 *
+	 * FIXME iff req->context != null just dequeue it
+	 */
+	usb_ept_enable(dev->status_ep,  0);
+	usb_ept_enable(dev->status_ep, 1);
+
+	/* 3.8.1 says to issue first NETWORK_CONNECTION, then
+	 * a SPEED_CHANGE.  could be useful in some configs.
+	 */
+	event = req->buf;
+	event->bmRequestType = 0xA1;
+	event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+	event->wValue = __constant_cpu_to_le16(1);	/* connected */
+	event->wIndex = __constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+	event->wLength = 0;
+
+	req->length = sizeof *event;
+	req->complete = eth_status_complete;
+	req->context = dev;
+
+	value = usb_ept_queue_xfer(dev->status_ep, req);
+	if (value < 0)
+		DEBUG(dev, "status buf queue --> %d\n", value);
+}
+
+static int  eth_set_interface(int  wIndex, int wValue, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+	unsigned long		flags;
+
+	if (dev == NULL)
+		return 1;
+
+	if ((wIndex == eth_data_alt_one_intf.bInterfaceNumber)
+			&& (wValue == 1)) {
+		dev->alt_set = 1;
+		usb_ept_enable(dev->in_ep, 1);
+		usb_ept_enable(dev->out_ep, 1);
+		dev->cdc_filter = DEFAULT_FILTER;
+		netif_carrier_on(dev->net);
+		issue_start_status(dev);
+		if (netif_running(dev->net)) {
+			spin_lock_irqsave(&dev->lock, flags);
+			eth_start(dev, GFP_ATOMIC);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	} else {
+		dev->alt_set = 0;
+		netif_stop_queue(dev->net);
+		netif_carrier_off(dev->net);
+	}
+	return 0;
+}
+
+static int eth_get_interface(int wIndex, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+
+	return dev->alt_set;
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's not
+ * handled lower down.  CDC has a number of less-common features:
+ *
+ *  - class-specific descriptors for the control interface
+ *  - class-specific control requests
+ */
+static int
+eth_setup(struct usb_ctrlrequest *ctrl, void *buf, int len, void *_ctxt)
+{
+	struct eth_dev	*dev = (struct eth_dev *) _ctxt;
+	int		value = -EOPNOTSUPP;
+	u16		wIndex = le16_to_cpu(ctrl->wIndex);
+	u16		wValue = le16_to_cpu(ctrl->wValue);
+	u16		wLength = le16_to_cpu(ctrl->wLength);
+	u16		data_int = eth_data_alt_one_intf.bInterfaceNumber;
+	u16		ctrl_int = eth_control_intf.bInterfaceNumber;
+	switch (ctrl->bRequest) {
+	case USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+			|| wLength != 0
+			|| ((wIndex != data_int) && (wIndex != ctrl_int)))
+			break;
+		DEBUG(dev, "packet filter %02x\n", wValue);
+		dev->cdc_filter = wValue;
+		value = 0;
+		break;
+
+	/* and potentially:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+		VDEBUG(dev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+	}
+	return value;
+}
+
+
+static void eth_disconnect(void *_ctxt)
+{
+	struct eth_dev		*dev = (struct eth_dev *) _ctxt;
+	unsigned long		flags;
+
+	printk(KERN_INFO "eth_disconnect()\n");
+	spin_lock_irqsave(&dev->lock, flags);
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+	eth_reset_config(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int usb_eth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		return -ERANGE;
+	/* no zero-length packet read wanted after mtu-sized packets */
+	if (((new_mtu + sizeof(struct ethhdr)) %
+			(usb_ept_get_max_packet(dev->in_ep))) == 0)
+		return -EDOM;
+	net->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *eth_get_stats(struct net_device *net)
+{
+	return &((struct eth_dev *)netdev_priv(net))->stats;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	strlcpy(p->driver, shortname, sizeof p->driver);
+	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
+	strlcpy(p->fw_version, "ethernet", sizeof p->fw_version);
+}
+
+static u32 eth_get_link(struct net_device *net)
+{
+	return 1;
+}
+
+static struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = eth_get_link
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		DEBUG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req);
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff		*skb;
+	int			retval = -ENOMEM;
+	size_t			size;
+	unsigned long		flags;
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*max_pkt), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*max_pkt), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 */
+	size = (sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA);
+	size += usb_ept_get_max_packet(dev->out_ep) - 1;
+	size -= size % usb_ept_get_max_packet(dev->out_ep);
+	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	if (skb  == NULL) {
+		DEBUG(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	req->complete = rx_complete;
+	req->context = skb;
+
+	retval = usb_ept_queue_xfer(dev->out_ep, req);
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	if (retval) {
+		DEBUG(dev, "rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return retval;
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+	int		status = req->status;
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+		/* we know MaxPacketsPerTransfer == 1 here */
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > ETH_FRAME_LEN) {
+			dev->stats.rx_errors++;
+			dev->stats.rx_length_errors++;
+			DEBUG(dev, "rx length %d\n", skb->len);
+			break;
+		}
+
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+
+		/* no buffer copies needed, unless hardware can't
+		 * use skb buffers.
+		 */
+		status = netif_rx(skb);
+		skb = NULL;
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDEBUG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DEBUG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		dev->stats.rx_errors++;
+		DEBUG(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	if (!netif_running(dev->net)) {
+clean:
+		spin_lock(&dev->req_lock);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock(&dev->req_lock);
+		req = NULL;
+	}
+	if (req)
+		rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_endpoint *ep,
+			unsigned n, gfp_t gfp_flags)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		/* CDC ECM uses skb buffer pointer for requests */
+		req = usb_ept_alloc_req(ep, 0);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		list_add(&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		usb_ept_free_req(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
+{
+	int status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	status = prealloc(&dev->tx_reqs, dev->in_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, dev->out_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DEBUG(dev, "can't alloc requests\n");
+done:
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		DEBUG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+
+	switch (req->status) {
+	default:
+		dev->stats.tx_errors++;
+		VDEBUG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->tx_reqs);
+	spin_unlock(&dev->req_lock);
+	dev_kfree_skb_any(skb);
+
+	atomic_dec(&dev->tx_qlen);
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int eth_is_promisc(struct eth_dev *dev)
+{
+	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+
+	/* apply outgoing CDC filters */
+	if (!eth_is_promisc(dev)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(dev->cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return 0;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the function (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	if (list_empty(&dev->tx_reqs)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		return 1;
+	}
+
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 */
+	req->buf = skb->data;
+	req->context = skb;
+	req->complete = tx_complete;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (!dev->zlp && (length % usb_ept_get_max_packet(dev->in_ep)) == 0)
+		length++;
+
+	req->length = length;
+
+	retval = usb_ept_queue_xfer(dev->in_ep, req);
+	switch (retval) {
+	default:
+		DEBUG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc(&dev->tx_qlen);
+	}
+	if (retval) {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return 0;
+}
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	DEBUG(dev, "%s\n", __func__);
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	atomic_set(&dev->tx_qlen, 0);
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	DEBUG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	VDEBUG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	DEBUG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->stats.rx_packets, dev->stats.tx_packets,
+		dev->stats.rx_errors, dev->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	if (dev->configured) {
+		usb_ept_enable(dev->in_ep, 0);
+		usb_ept_enable(dev->out_ep, 0);
+		if (netif_carrier_ok(dev->net)) {
+			DEBUG(dev, "host still using in/out endpoints\n");
+			/* FIXME idiom may leave toggle wrong here */
+			usb_ept_enable(dev->in_ep, 1);
+			usb_ept_enable(dev->out_ep, 1);
+		}
+		if (dev->status_ep) {
+			usb_ept_enable(dev->status_ep, 0);
+			usb_ept_enable(dev->status_ep,  1);
+		}
+	}
+
+	return 0;
+}
+
+
+static u8 __devinit nibble(unsigned char c)
+{
+	if (likely(isdigit(c)))
+		return c - '0';
+	c = toupper(c);
+	if (likely(isxdigit(c)))
+		return 10 + c - 'A';
+	return 0;
+}
+
+static int __devinit get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = nibble(*str++) << 4;
+			num |= (nibble(*str++));
+			dev_addr[i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	random_ether_addr(dev_addr);
+	return 1;
+}
+
+static void  eth_unbind(void *_ctxt)
+{
+	struct eth_dev   *dev = (struct eth_dev *)_ctxt ;
+
+	pr_debug("%s ()\n", __func__);
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (dev->in_ep) {
+		usb_ept_fifo_flush(dev->in_ep);
+		usb_ept_enable(dev->in_ep, 0);
+		usb_free_endpoint(dev->in_ep);
+	}
+	if (dev->out_ep) {
+		usb_ept_fifo_flush(dev->out_ep);
+		usb_ept_enable(dev->out_ep, 0);
+		usb_free_endpoint(dev->out_ep);
+	}
+	if (dev->status_ep) {
+		usb_ept_fifo_flush(dev->status_ep);
+		usb_ept_enable(dev->status_ep, 0);
+		usb_free_endpoint(dev->status_ep);
+	}
+
+
+	if (dev->net) {
+		unregister_netdev(dev->net);
+		free_netdev(dev->net);
+	}
+	eth_bound = 0;
+	return ;
+}
+
+static void  eth_bind(void *_ctxt)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	u8			zlp = 1;
+	struct usb_endpoint     *in_ep, *out_ep, *status_ep = NULL;
+	int			status = -ENOMEM;
+	int			ret;
+	struct device		*get_dev;
+
+	get_dev = usb_get_device();
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_control_intf.bInterfaceNumber = ret;
+	eth_control_intf.iInterface = string_control;
+	eth_IAD.bFirstInterface = ret;
+	eth_union_desc.bMasterInterface0 = ret;
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_data_alt_zero_intf.bInterfaceNumber = ret;
+	eth_data_alt_zero_intf.iInterface = 0;
+	eth_data_alt_one_intf.bInterfaceNumber = ret;
+	eth_data_alt_one_intf.iInterface = string_data;
+	eth_union_desc.bSlaveInterface0 = ret;
+
+	/* Enable IAD */
+	usb_msm_enable_iad();
+
+	/* Configuring STATUS endpoint */
+	status_ep = usb_alloc_endpoint(USB_DIR_IN);
+	status_ep->max_pkt = 64;
+
+	eth_control_intf_hs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.wMaxPacketSize =
+						status_ep->max_pkt;
+	eth_control_intf_fs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.bInterval = 4;
+
+	/* Configuring OUT endpoint */
+	out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	out_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+	eth_data_intf_hs_bulk_out_ep_desc.wMaxPacketSize = out_ep->max_pkt;
+	eth_data_intf_fs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+
+	/*Configuring IN Endpoint*/
+	in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	in_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+	eth_data_intf_hs_bulk_in_ep_desc.wMaxPacketSize = in_ep->max_pkt;
+	eth_data_intf_fs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net) {
+		printk(KERN_DEBUG "eth_bind: alloc_etherdev failed \n");
+		return ;
+	}
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	/* network device setup */
+	dev->net = net;
+	strcpy(net->name, "usb%d");
+	dev->zlp = zlp;
+	dev->in_ep = in_ep;
+	dev->out_ep = out_ep;
+	dev->status_ep = status_ep;
+
+	eth_device = dev;
+	usb_func_ether.context = eth_device;
+
+	/* Module params for these addresses should come from ID proms.
+	 * The host side address is used with CDC, and commonly
+	 * ends up in a persistent config database.  It's not clear if
+	 * host side code for the SAFE thing cares -- its original BLAN
+	 * thing didn't, Sharp never assigned those addresses on Zaurii.
+	 */
+	if (get_ether_addr(dev_addr, net->dev_addr))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "self");
+	if (get_ether_addr(host_addr, dev->host_mac))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "host");
+	snprintf(ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	net->change_mtu = usb_eth_change_mtu;
+	net->get_stats = eth_get_stats;
+	net->hard_start_xmit = eth_start_xmit;
+	net->open = eth_open;
+	net->stop = eth_stop;
+	/* watchdog_timeo, tx_timeout ...
+	 * set_multicast_list */
+	SET_ETHTOOL_OPS(net, &ops);
+	/* ... and maybe likewise for status transfer */
+	if (dev->status_ep) {
+		dev->stat_req = usb_ept_alloc_req(dev->status_ep,
+					STATUS_BYTECOUNT);
+		if (!dev->stat_req) {
+			usb_ept_free_req(dev->status_ep, dev->req);
+			goto fail;
+		}
+		dev->stat_req->context = NULL;
+	}
+	/* finish hookup to lower layer ... */
+	/* two kinds of host-initiated state changes:
+	 *  - iff DATA transfer is active, carrier is "on"
+	 *  - tx queueing enabled if open *and* carrier is "on"
+	 */
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	SET_NETDEV_DEV(dev->net, get_dev);
+	status = register_netdev(dev->net);
+	if (status < 0)
+		goto fail1;
+
+	INFO(dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
+	INFO(dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		net->dev_addr[0], net->dev_addr[1],
+		net->dev_addr[2], net->dev_addr[3],
+		net->dev_addr[4], net->dev_addr[5]);
+
+	INFO(dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	string_data = usb_msm_get_next_strdesc_id("Ethernet Data");
+	if (string_data != 0) {
+		string_control = usb_msm_get_next_strdesc_id
+				 ("CDC Communications Control");
+		if (string_control != 0) {
+			string_ethaddr = usb_msm_get_next_strdesc_id(ethaddr);
+			if (string_ethaddr != 0) {
+				eth_ether_desc.iMACAddress = string_ethaddr;
+				eth_bound = 1;
+				return ;
+			}
+		}
+	}
+fail1:
+	dev_dbg(get_dev, "register_netdev failed, %d\n", status);
+fail:
+	eth_bound = 1;
+	printk(KERN_INFO"eth_bind: returning from eth_bind\n");
+	return ;
+}
+
+
+static struct usb_function usb_func_ether = {
+	.name		= "ethernet",
+	.bind		= eth_bind,
+	.unbind		= eth_unbind,
+	.configure	= eth_configure,
+	.disconnect	= eth_disconnect,
+	.setup		= eth_setup,
+	.set_interface	= eth_set_interface,
+	.get_interface	= eth_get_interface,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+
+#define TOTAL_ETH_DESCRIPTORS 11
+struct usb_descriptor_header *eth_hs_descriptors[TOTAL_ETH_DESCRIPTORS];
+struct usb_descriptor_header *eth_fs_descriptors[TOTAL_ETH_DESCRIPTORS];
+
+static int __init init(void)
+{
+	int rc;
+
+	eth_hs_descriptors[0] = (struct usb_descriptor_header *)
+				&eth_IAD;
+	eth_hs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_hs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_hs_descriptors[3] = (struct usb_descriptor_header *)
+				&eth_union_desc;
+	eth_hs_descriptors[4] = (struct usb_descriptor_header *)
+				&eth_ether_desc;
+	eth_hs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_hs_int_in_ep_desc;
+	eth_hs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_hs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_hs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_out_ep_desc;
+	eth_hs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_in_ep_desc;
+	eth_hs_descriptors[10] = NULL;
+
+	eth_fs_descriptors[0] = (struct usb_descriptor_header *)&eth_IAD;
+	eth_fs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_fs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_fs_descriptors[3] = (struct usb_descriptor_header *)&eth_union_desc;
+	eth_fs_descriptors[4] = (struct usb_descriptor_header *)&eth_ether_desc;
+	eth_fs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_fs_int_in_ep_desc;
+	eth_fs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_fs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_fs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_out_ep_desc;
+	eth_fs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_in_ep_desc;
+	eth_fs_descriptors[10] = NULL;
+
+	usb_func_ether.hs_descriptors = eth_hs_descriptors;
+	usb_func_ether.fs_descriptors = eth_fs_descriptors;
+	rc = usb_function_register(&usb_func_ether);
+
+	if (rc < 0)
+		printk(KERN_INFO "cdcecm init:usb function register failed \n");
+	return rc;
+}
+module_init(init);
+
+static void __exit eth_cleanup(void)
+{
+	struct eth_dev          *dev = eth_device;
+
+	usb_function_unregister(&usb_func_ether);
+	if (dev) {
+		dev->net = NULL;
+		dev = NULL;
+	}
+}
+module_exit(eth_cleanup);
diff --git a/drivers/usb/function/loopback.c b/drivers/usb/function/loopback.c
new file mode 100644
index 0000000..d7c93a3
--- /dev/null
+++ b/drivers/usb/function/loopback.c
@@ -0,0 +1,128 @@
+/* drivers/usb/function/loopback.c
+ *
+ * Simple Loopback Function Device
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct loopback_context
+{
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+	struct usb_request *req_out;
+	struct usb_request *req_in;
+};
+
+static struct loopback_context _context;
+
+static void loopback_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	printk(KERN_INFO "loopback_bind() %p, %p\n", ctxt->out, ctxt->in);
+
+	ctxt->req_out = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req_in = usb_ept_alloc_req(ctxt->in, 4096);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len);
+static void loopback_queue_out(struct loopback_context *ctxt);
+
+static void loopback_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_out_complete (%d)\n", req->actual);
+	loopback_queue_out(ctxt);
+}
+
+static void loopback_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_in_complete (%d)\n", req->actual);
+
+	if (req->status == 0) {
+		loopback_queue_in(ctxt, req->buf, req->actual);
+	} else {
+		loopback_queue_out(ctxt);
+	}
+}
+
+static void loopback_queue_out(struct loopback_context *ctxt)
+{
+	struct usb_request *req = ctxt->req_out;
+
+	req->complete = loopback_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len)
+{
+	struct usb_request *req = ctxt->req_in;
+
+	memcpy(req->buf, data, len);
+	req->complete = loopback_in_complete;
+	req->context = ctxt;
+	req->length = len;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void loopback_configure(int configured, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+	printk(KERN_INFO "loopback_configure() %d\n", configured);
+
+	if (configured) {
+		loopback_queue_out(ctxt);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_loopback = {
+	.bind = loopback_bind,
+	.configure = loopback_configure,
+
+	.name = "loopback",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xff,
+	.ifc_protocol = 0xff,
+
+	.ifc_name = "loopback",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init loopback_init(void)
+{
+	printk(KERN_INFO "loopback_init()\n");
+	return usb_function_register(&usb_func_loopback);
+}
+
+module_init(loopback_init);
diff --git a/drivers/usb/function/mass_storage.c b/drivers/usb/function/mass_storage.c
new file mode 100644
index 0000000..f679cd0
--- /dev/null
+++ b/drivers/usb/function/mass_storage.c
@@ -0,0 +1,3009 @@
+/* drivers/usb/function/mass_storage.c
+ *
+ * Function Driver for USB Mass Storage
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * Based heavily on the file_storage gadget driver in
+ * drivers/usb/gadget/file_storage.c and licensed under the same terms:
+ *
+ * Copyright (C) 2003-2007 Alan Stern
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define DEBUG
+//#define VERBOSE_DEBUG
+//#define DUMP_MSGS
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/switch.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/mass_storage_function.h>
+#include <linux/usb_usual.h>
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_NAME		"usb_mass_storage"
+#define MAX_LUNS		8
+
+#ifdef DEBUG
+#define LDBG(lun, fmt, args...) \
+	dev_dbg(&(lun)->dev , fmt , ## args)
+#define MDBG(fmt,args...) \
+	printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
+#else
+#define LDBG(lun, fmt, args...) \
+	do { } while (0)
+#define MDBG(fmt,args...) \
+	do { } while (0)
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) \
+	do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LERROR(lun, fmt, args...) \
+	dev_err(&(lun)->dev , fmt , ## args)
+#define LWARN(lun, fmt, args...) \
+	dev_warn(&(lun)->dev , fmt , ## args)
+#define LINFO(lun, fmt, args...) \
+	dev_info(&(lun)->dev , fmt , ## args)
+
+#define MINFO(fmt,args...) \
+	printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
+
+#define DBG(d, fmt, args...) \
+	dev_dbg(&(d)->pdev->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) \
+	dev_vdbg(&(d)->pdev->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) \
+	dev_err(&(d)->pdev->dev , fmt , ## args)
+#define MS_WARN(d, fmt, args...) \
+	dev_warn(&(d)->pdev->dev , fmt , ## args)
+#define INFO(d, fmt, args...) \
+	dev_info(&(d)->pdev->dev , fmt , ## args)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct bulk_cb_wrap {
+	__le32	Signature;		/* Contains 'USBC' */
+	u32	Tag;			/* Unique per command id */
+	__le32	DataTransferLength;	/* Size of the data */
+	u8	Flags;			/* Direction in bit 7 */
+	u8	Lun;			/* LUN (normally 0) */
+	u8	Length;			/* Of the CDB, <= MAX_COMMAND_SIZE */
+	u8	CDB[16];		/* Command Data Block */
+};
+
+#define USB_BULK_CB_WRAP_LEN	31
+#define USB_BULK_CB_SIG		0x43425355	/* Spells out USBC */
+#define USB_BULK_IN_FLAG	0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		/* Should = 'USBS' */
+	u32	Tag;			/* Same as original command */
+	__le32	Residue;		/* Amount not transferred */
+	u8	Status;			/* See below */
+};
+
+#define USB_BULK_CS_WRAP_LEN	13
+#define USB_BULK_CS_SIG		0x53425355	/* Spells out 'USBS' */
+#define USB_STATUS_PASS		0
+#define USB_STATUS_FAIL		1
+#define USB_STATUS_PHASE_ERROR	2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST		0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT			0x04
+#define SC_INQUIRY			0x12
+#define SC_MODE_SELECT_6		0x15
+#define SC_MODE_SELECT_10		0x55
+#define SC_MODE_SENSE_6			0x1a
+#define SC_MODE_SENSE_10		0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
+#define SC_READ_6			0x08
+#define SC_READ_10			0x28
+#define SC_READ_12			0xa8
+#define SC_READ_CAPACITY		0x25
+#define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_RELEASE			0x17
+#define SC_REQUEST_SENSE		0x03
+#define SC_RESERVE			0x16
+#define SC_SEND_DIAGNOSTIC		0x1d
+#define SC_START_STOP_UNIT		0x1b
+#define SC_SYNCHRONIZE_CACHE		0x35
+#define SC_TEST_UNIT_READY		0x00
+#define SC_VERIFY			0x2f
+#define SC_WRITE_6			0x0a
+#define SC_WRITE_10			0x2a
+#define SC_WRITE_12			0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+struct lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	ro : 1;
+	unsigned int	prevent_medium_removal : 1;
+	unsigned int	registered : 1;
+	unsigned int	info_valid : 1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	struct device	dev;
+};
+
+#define backing_file_is_open(curlun)	((curlun)->filp != NULL)
+
+
+static struct lun *dev_to_lun(struct device *dev)
+{
+	return container_of(dev, struct lun, dev);
+}
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define NUM_BUFFERS	4
+#else
+#define NUM_BUFFERS	2
+#endif
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+	void				*buf;
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/* The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here. */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+int can_stall = 1;
+
+struct fsg_dev {
+	/* lock protects: state and all the req_busy's */
+	spinlock_t		lock;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* reference counting: wait until all LUNs are released */
+	struct kref		ref;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+
+	u8			config, new_config;
+
+	unsigned int		running : 1;
+	unsigned int		phase_error : 1;
+	unsigned int		short_packet_received : 1;
+	unsigned int		bad_lun_okay : 1;
+
+	unsigned long		atomic_bitflags;
+#define REGISTERED		0
+#define CLEAR_BULK_HALTS	1
+#define SUSPENDED		2
+
+	struct usb_endpoint		*bulk_in;
+	struct usb_endpoint		*bulk_out;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	buffhds[NUM_BUFFERS];
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	unsigned int		lun;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		nluns;
+	struct lun		*luns;
+	struct lun		*curlun;
+
+	u32				buf_size;
+	const char		*vendor;
+	const char		*product;
+	int				release;
+
+	struct platform_device *pdev;
+	struct switch_dev sdev;
+	int	bound;
+	struct wake_lock wake_lock, wake_lock_idle;
+};
+static int send_status(struct fsg_dev *fsg);
+
+static int exception_in_progress(struct fsg_dev *fsg)
+{
+	return (fsg->state > FSG_STATE_IDLE);
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_dev *fsg,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % fsg->bulk_out_maxpacket;
+	if (rem > 0)
+		length += fsg->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+static struct fsg_dev			*the_fsg;
+
+static void	close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
+static void	close_all_backing_files(struct fsg_dev *fsg);
+
+
+static struct usb_function		fsg_function;
+/*-------------------------------------------------------------------------*/
+
+#ifdef DUMP_MSGS
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{
+	if (length < 512) {
+		DBG(fsg, "%s, length %u:\n", label, length);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+				16, 1, buf, length, 0);
+	}
+}
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#else
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{}
+
+#ifdef VERBOSE_DEBUG
+
+static void dump_cdb(struct fsg_dev *fsg)
+{
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
+			16, 1, fsg->cmnd, fsg->cmnd_size, 0);
+}
+
+#else
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#endif /* VERBOSE_DEBUG */
+#endif /* DUMP_MSGS */
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_endpoint *ep)
+{
+	const char  *name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		return -1;
+
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ept_set_halt(ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Routines for unaligned data access */
+
+static u16 get_be16(u8 *buf)
+{
+	return ((u16) buf[0] << 8) | ((u16) buf[1]);
+}
+
+static u32 get_be32(u8 *buf)
+{
+	return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
+			((u32) buf[2] << 8) | ((u32) buf[3]);
+}
+
+static void put_be16(u8 *buf, u16 val)
+{
+	buf[0] = val >> 8;
+	buf[1] = val;
+}
+
+static void put_be32(u8 *buf, u32 val)
+{
+	buf[0] = val >> 24;
+	buf[1] = val >> 16;
+	buf[2] = val >> 8;
+	buf[3] = val & 0xff;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* There is only one interface. */
+#define USB_SC_SCSI     0x06            /* Transparent SCSI */
+#define USB_PR_BULK     0x50            /* Bulk-only */
+static struct usb_interface_descriptor
+intf_desc = {
+	.bLength 		= sizeof intf_desc,
+	.bDescriptorType 	= USB_DT_INTERFACE,
+	.bNumEndpoints 		= 2,
+	.bInterfaceClass 	= USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass 	= USB_SC_SCSI,
+	.bInterfaceProtocol 	= USB_PR_BULK,
+};
+
+
+static struct usb_endpoint_descriptor
+hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+
+static struct usb_descriptor_header *hs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+static struct usb_descriptor_header *fs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+};
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_dev *fsg)
+{
+	/* Tell the main thread that something has happened */
+	fsg->thread_wakeup_needed = 1;
+	if (fsg->thread_task)
+		wake_up_process(fsg->thread_task);
+}
+
+
+static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	DBG(fsg, "raise_exception %d\n", (int)new_state);
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&fsg->lock, flags);
+	if (fsg->state <= new_state) {
+		fsg->state = new_state;
+		if (fsg->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+					fsg->thread_task);
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->inreq_busy = 0;
+		bh->state = BUF_STATE_EMPTY;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->inreq_busy = 0;
+}
+
+static void bulk_out_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	dump_msg(fsg, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->outreq_busy = 0;
+		bh->state = BUF_STATE_FULL;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->outreq_busy = 0;
+}
+
+static int fsg_setup(struct usb_ctrlrequest *ctrl, void *buf,
+			int len, void *context)
+{
+	struct fsg_dev		*fsg = context;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg->config)
+		return value;
+
+	if (w_index != intf_desc.bInterfaceNumber)
+		return value;
+
+	/* Handle Bulk-only class-specific requests */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		switch (ctrl->bRequest) {
+		case USB_BULK_RESET_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+
+			/* Raise an exception to stop the current operation
+			 * and reinitialize our state. */
+			DBG(fsg, "bulk reset request\n");
+			value = 0;
+			break;
+
+		case USB_BULK_GET_MAX_LUN_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_IN |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+			VDBG(fsg, "get max LUN\n");
+			*(u8 *) buf = fsg->nluns - 1;
+			value = 1;
+			break;
+		}
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(fsg,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request *req, int *pbusy,
+		enum fsg_buffer_state *state)
+{
+	int	rc;
+	unsigned long		flags;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+	rc = usb_ept_queue_xfer(ep, req);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			MS_WARN(fsg, "error in submission: %s --> %d\n",
+				(ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
+				rc);
+	}
+}
+
+
+static int sleep_thread(struct fsg_dev *fsg)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (fsg->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	fsg->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_READ_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Carry out the file reads */
+	amount_left = fsg->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 * Finally, if we're not at a page boundary, don't read past
+		 *	the next page.
+		 * If this means reading 0 then we were asked to read past
+		 *	the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+		if (partial_page > 0)
+			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+					partial_page);
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a block */
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		fsg->residue -= nread;
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		fsg->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nwritten;
+	int			rc;
+
+#ifdef CONFIG_USB_CSW_HACK
+	int			csw_hack_sent = 0;
+	int			i;
+#endif
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_WRITE_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (fsg->cmnd[1] & 0x08)	/* FUA */
+			curlun->filp->f_flags |= O_SYNC;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << 9;
+	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount.
+			 * But don't get more than the buffer size.
+			 * And don't try to go past the end of the file.
+			 * If we're not at a page boundary,
+			 *	don't go past the next page.
+			 * If this means getting 0, then we were asked
+			 *	to write past the end of file.
+			 * Finally, round down to a block boundary. */
+			amount = min(amount_left_to_req, (u32)fsg->buf_size);
+			amount = min((loff_t) amount, curlun->file_length -
+					usb_offset);
+			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+			if (partial_page > 0)
+				amount = min(amount,
+	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+			if (amount == 0) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> 9;
+				curlun->info_valid = 1;
+				continue;
+			}
+			amount -= (amount & 511);
+			if (amount == 0) {
+
+				/* Why were we were asked to transfer a
+				 * partial block? */
+				get_some_more = 0;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			fsg->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = fsg->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+		/*
+		 * If the csw packet is already submmitted to the DCD,
+		 * by marking the state of buffer as full, then by checking
+		 * the residue, we make sure that this csw packet is not
+		 * written on to the storage media.
+		 */
+		if (bh->state == BUF_STATE_FULL && fsg->residue) {
+#else
+		if (bh->state == BUF_STATE_FULL) {
+#endif
+			smp_rmb();
+			fsg->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten -= (nwritten & 511);
+						/* Round down to a block */
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			fsg->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+#ifdef CONFIG_USB_CSW_HACK
+				/*
+				 * If csw is already sent & write failure
+				 * occured, then detach the storage media
+				 * from the corresponding lun, and cable must
+				 * be disconnected to recover fom this error.
+				 */
+				if (csw_hack_sent) {
+					if (backing_file_is_open(curlun)) {
+						close_backing_file(fsg, curlun);
+						curlun->unit_attention_data =
+							SS_MEDIUM_NOT_PRESENT;
+					}
+					break;
+				}
+#endif
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+#ifdef CONFIG_USB_CSW_HACK
+			if ((nwritten == amount) && !csw_hack_sent) {
+				/*
+				 * Check if any of the buffer is in the
+				 * busy state, if any buffer is in busy state,
+				 * means the complete data is not received
+				 * yet from the host. So there is no point in
+				 * csw right away without the complete data.
+				 */
+				for (i = 0; i < NUM_BUFFERS; i++) {
+					if (fsg->buffhds[i].state ==
+							BUF_STATE_BUSY)
+						break;
+				}
+				/* Check whether we received the complete
+				 * data from the host, before sending csw */
+				if (!amount_left_to_req && i == NUM_BUFFERS) {
+					csw_hack_sent = 1;
+					send_status(fsg);
+				}
+			}
+#endif
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual != bh->outreq->length) {
+				fsg->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * The caller must own fsg->filesem.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsync_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode;
+	int		rc, err;
+
+	if (curlun->ro || !filp)
+		return 0;
+	if (!filp->f_op->fsync)
+		return -EINVAL;
+
+	inode = filp->f_path.dentry->d_inode;
+	mutex_lock(&inode->i_mutex);
+	rc = filemap_fdatawrite(inode->i_mapping);
+	err = filp->f_op->fsync(filp, filp->f_path.dentry, 1);
+	if (!rc)
+		rc = err;
+	err = filemap_fdatawait(inode->i_mapping);
+	if (!rc)
+		rc = err;
+	mutex_unlock(&inode->i_mutex);
+	VLDBG(curlun, "fdatasync -> %d\n", rc);
+	return rc;
+}
+
+static void fsync_all(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		fsync_sub(&fsg->luns[i]);
+}
+
+static int do_synchronize_cache(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_be32(&fsg->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if ((fsg->cmnd[1] & ~0x10) != 0) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_be16(&fsg->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << 9;
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 * If this means reading 0 then we were asked to read
+		 * past the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a sector */
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!fsg->curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		return 36;
+	}
+
+	memset(buf, 0, 8);	/* Non-removable, direct-access device */
+
+	buf[1] = 0x80;	/* set removable bit */
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+				/* No special options */
+	sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
+			fsg->product, fsg->release);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_be32(&buf[3], sdinfo);		/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u32		lba = get_be32(&fsg->cmnd[2]);
+	int		pmi = fsg->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_be32(&buf[0], curlun->num_sectors - 1);	/* Max logical block */
+	put_be32(&buf[4], 512);				/* Block length */
+	return 8;
+}
+
+
+static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		mscmnd = fsg->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((fsg->cmnd[1] & ~0x08) != 0) {		/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = fsg->cmnd[2] >> 6;
+	page_code = fsg->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == SC_MODE_SENSE_6) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* SC_MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;
+	}
+
+	/* No block descriptors */
+
+	/* Disabled to workaround USB reset problems with a Vista host.
+	 */
+#if 0
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_be16(&buf[4], 0xffff);  /* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_be16(&buf[8], 0xffff);  /* Maximum prefetch */
+			/* Maximum prefetch ceiling */
+			put_be16(&buf[10], 0xffff);
+		}
+		buf += 12;
+	}
+#else
+	valid_page = 1;
+#endif
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == SC_MODE_SENSE_6)
+		buf0[0] = len - 1;
+	else
+		put_be16(buf0, len - 2);
+	return len;
+}
+
+static int do_start_stop(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		loej, start;
+
+	/* int immed = fsg->cmnd[1] & 0x01; */
+	loej = fsg->cmnd[4] & 0x02;
+	start = fsg->cmnd[4] & 0x01;
+
+	if (loej) {
+		/* eject request from the host */
+		if (backing_file_is_open(curlun)) {
+			close_backing_file(fsg, curlun);
+			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+		}
+	}
+
+	return 0;
+}
+
+static int do_prevent_allow(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		prevent;
+
+	prevent = fsg->cmnd[4] & 0x01;
+	if ((fsg->cmnd[4] & ~0x01) != 0) {		/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_dev *fsg,
+			struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_be32(&buf[0], curlun->num_sectors);	/* Number of blocks */
+	put_be32(&buf[4], 512);				/* Block length */
+	buf[4] = 0x02;					/* Current capacity */
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+
+	/* We don't support MODE SELECT */
+	curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int     rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		DBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			DBG(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ept_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+/*-------------------------------------------------------------------------*/
+#if 0
+static int write_zero(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+
+	DBG(fsg, "write_zero\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	bh->inreq->length = 0;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+#endif
+
+static int throw_away_data(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	DBG(fsg, "throw_away_data\n");
+	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
+			fsg->usb_amount_left > 0) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			fsg->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual != bh->outreq->length ||
+					bh->outreq->status != 0) {
+				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
+			amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			fsg->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	int			rc = 0;
+	int			i;
+
+	switch (fsg->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	case DATA_DIR_UNKNOWN:
+		rc = -EINVAL;
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (fsg->data_size == 0)
+			;		/* Nothing to send */
+
+		/* If there's no residue, simply send the last buffer */
+		else if (fsg->residue == 0) {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+		} else {
+			if (can_stall) {
+				bh->state = BUF_STATE_EMPTY;
+				for (i = 0; i < NUM_BUFFERS; ++i) {
+					struct fsg_buffhd
+							*bh = &fsg->buffhds[i];
+					while (bh->state != BUF_STATE_EMPTY) {
+						rc = sleep_thread(fsg);
+						if (rc)
+							return rc;
+					}
+				}
+				rc = halt_bulk_in_endpoint(fsg);
+			} else {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			}
+#if 0
+	/* this is unnecessary, and was causing problems with MacOS */
+			if (length > 0)
+				write_zero(fsg);
+#endif
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (fsg->residue == 0)
+			;		/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		else if (fsg->short_packet_received) {
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		fsg_set_halt(fsg, fsg->bulk_out);
+		raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+		rc = -EINTR;
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		else
+			rc = throw_away_data(fsg);
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u8			status = USB_STATUS_PASS;
+	u32			sd, sdinfo = 0;
+	struct bulk_cs_wrap	*csw;
+
+	DBG(fsg, "send_status\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (fsg->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (fsg->phase_error) {
+		DBG(fsg, "sending phase-error status\n");
+		status = USB_STATUS_PHASE_ERROR;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(fsg, "sending command-failure status\n");
+		status = USB_STATUS_FAIL;
+		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	csw = bh->buf;
+
+	/* Store and send the Bulk-only CSW */
+	csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
+	csw->Tag = fsg->tag;
+#ifdef CONFIG_USB_CSW_HACK
+	/* Since csw is being sent early, before
+	 * writing on to storage media, need to set
+	 * residue to zero,assuming that write will succeed.
+	 */
+	csw->Residue = 0;
+#else
+	csw->Residue = cpu_to_le32(fsg->residue);
+#endif
+	csw->Status = status;
+
+	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = fsg->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct lun		*curlun;
+
+	hdlen[0] = 0;
+	if (fsg->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
+				fsg->data_size);
+	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+			name, cmnd_size, dirletter[(int) data_dir],
+			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (fsg->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	/* CB or CBI */
+		fsg->data_dir = data_dir;
+		fsg->data_size = fsg->data_size_from_cmnd;
+
+	} else {					/* Bulk-only */
+		if (fsg->data_size < fsg->data_size_from_cmnd) {
+
+			/* Host data size < Device data size is a phase error.
+			 * Carry out the command, but only transfer as much
+			 * as we are allowed. */
+			DBG(fsg, "phase error 1\n");
+			fsg->data_size_from_cmnd = fsg->data_size;
+			fsg->phase_error = 1;
+		}
+	}
+	fsg->residue = fsg->usb_amount_left = fsg->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
+		fsg->phase_error = 1;
+		DBG(fsg, "phase error 2\n");
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != fsg->cmnd_size) {
+
+		/* Special case workaround: MS-Windows issues REQUEST SENSE/
+		 * INQUIRY with cbw->Length == 12 (it should be 6). */
+		if ((fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
+		 || (fsg->cmnd[0] == SC_INQUIRY && fsg->cmnd_size == 12))
+			cmnd_size = fsg->cmnd_size;
+		else {
+			fsg->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (fsg->lun != lun)
+		DBG(fsg, "using LUN %d from CBW, "
+				"not LUN %d from CDB\n",
+				fsg->lun, lun);
+
+	/* Check the LUN */
+	if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
+		fsg->curlun = curlun = &fsg->luns[fsg->lun];
+		if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		fsg->curlun = curlun = NULL;
+		fsg->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (fsg->cmnd[0] != SC_INQUIRY &&
+				fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			fsg->cmnd[0] != SC_INQUIRY &&
+			fsg->cmnd[0] != SC_REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	fsg->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (fsg->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int do_scsi_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(fsg);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	fsg->phase_error = 0;
+	fsg->short_packet_received = 0;
+
+	down_read(&fsg->filesem);	/* We're using the backing file */
+	switch (fsg->cmnd[0]) {
+
+	case SC_INQUIRY:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"INQUIRY")) == 0)
+			reply = do_inquiry(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(1<<1) | (1<<4), 0,
+				"MODE SELECT(6)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (3<<7), 0,
+				"MODE SELECT(10)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (1<<4), 0,
+				"MODE SENSE(6)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (3<<7), 0,
+				"MODE SENSE(10)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<4), 0,
+				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
+			reply = do_prevent_allow(fsg);
+		break;
+
+	case SC_READ_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(7<<1) | (1<<4), 1,
+				"READ(6)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"READ(10)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"READ(12)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_CAPACITY:
+		fsg->data_size_from_cmnd = 8;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(0xf<<2) | (1<<8), 1,
+				"READ CAPACITY")) == 0)
+			reply = do_read_capacity(fsg, bh);
+		break;
+
+	case SC_READ_FORMAT_CAPACITIES:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7), 1,
+				"READ FORMAT CAPACITIES")) == 0)
+			reply = do_read_format_capacities(fsg, bh);
+		break;
+
+	case SC_REQUEST_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"REQUEST SENSE")) == 0)
+			reply = do_request_sense(fsg, bh);
+		break;
+
+	case SC_START_STOP_UNIT:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<1) | (1<<4), 0,
+				"START-STOP UNIT")) == 0)
+			reply = do_start_stop(fsg);
+		break;
+
+	case SC_SYNCHRONIZE_CACHE:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(0xf<<2) | (3<<7), 1,
+				"SYNCHRONIZE CACHE")) == 0)
+			reply = do_synchronize_cache(fsg);
+		break;
+
+	case SC_TEST_UNIT_READY:
+		fsg->data_size_from_cmnd = 0;
+		reply = check_command(fsg, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case SC_VERIFY:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"VERIFY")) == 0)
+			reply = do_verify(fsg);
+		break;
+
+	case SC_WRITE_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(7<<1) | (1<<4), 1,
+				"WRITE(6)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"WRITE(10)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"WRITE(12)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case SC_FORMAT_UNIT:
+	case SC_RELEASE:
+	case SC_RESERVE:
+	case SC_SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+		fsg->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+		if ((reply = check_command(fsg, fsg->cmnd_size,
+				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+			fsg->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&fsg->filesem);
+
+	VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
+			reply, fsg->data_size_from_cmnd);
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, fsg->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		fsg->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+
+	/* Was this a real packet? */
+	if (req->status)
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != USB_BULK_CB_WRAP_LEN ||
+			cbw->Signature != __constant_cpu_to_le32(
+				USB_BULK_CB_SIG)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	fsg->cmnd_size = cbw->Length;
+	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
+	if (cbw->Flags & USB_BULK_IN_FLAG)
+		fsg->data_dir = DATA_DIR_TO_HOST;
+	else
+		fsg->data_dir = DATA_DIR_FROM_HOST;
+	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (fsg->data_size == 0)
+		fsg->data_dir = DATA_DIR_NONE;
+	fsg->lun = cbw->Lun;
+	fsg->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
+	start_transfer(fsg, fsg->bulk_out, bh->outreq,
+			&bh->outreq_busy, &bh->state);
+
+	/* We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill. */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = received_cbw(fsg, bh);
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int alloc_request(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ept_alloc_req(ep, 0);
+	if (*preq)
+		return 0;
+	ERROR(fsg, "can't allocate request for bulk %s\n",
+			(ep == fsg->bulk_in ? "IN" : "OUT"));
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_dev *fsg, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+
+	if (fsg->running)
+		DBG(fsg, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd *bh = &fsg->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ept_cancel_xfer(fsg->bulk_in, bh->inreq);
+			usb_ept_free_req(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ept_cancel_xfer(fsg->bulk_out, bh->outreq);
+			usb_ept_free_req(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+
+	fsg->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(fsg, "set interface %d\n", altsetting);
+
+	fsg->bulk_out_maxpacket = usb_ept_get_max_packet(fsg->bulk_out);
+
+	/* Allocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
+		if (rc != 0)
+			goto reset;
+		rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
+		if (rc != 0)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+
+	fsg->running = 1;
+	for (i = 0; i < fsg->nluns; ++i)
+		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+
+	return rc;
+}
+
+static void adjust_wake_lock(struct fsg_dev *fsg)
+{
+	int ums_active = 0;
+	int i;
+	unsigned long		flags;
+	
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	if (fsg->config) {
+		for (i = 0; i < fsg->nluns; ++i) {
+			if (backing_file_is_open(&fsg->luns[i]))
+				ums_active = 1;
+		}
+	}
+
+	if (ums_active)
+		wake_lock(&fsg->wake_lock);
+	else
+		wake_unlock(&fsg->wake_lock);
+
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_dev *fsg, u8 new_config)
+{
+	int	rc = 0;
+
+	if (new_config == fsg->config)
+		return rc;
+
+	/* Disable the single interface */
+	if (fsg->config != 0) {
+		DBG(fsg, "reset config\n");
+		fsg->config = 0;
+		rc = do_set_interface(fsg, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		fsg->config = new_config;
+		rc = do_set_interface(fsg, 0);
+		if (rc != 0)
+			fsg->config = 0;	/* Reset on errors */
+		else
+			INFO(fsg, "config #%d\n", fsg->config);
+	}
+
+	switch_set_state(&fsg->sdev, new_config);
+	adjust_wake_lock(fsg);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_dev *fsg)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct lun		*curlun;
+	int			rc;
+	unsigned long		flags;
+
+	DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (fsg->state < FSG_STATE_EXIT)
+				DBG(fsg, "Main thread exiting on signal\n");
+			raise_exception(fsg, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		bh = &fsg->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
+			&fsg->buffhds[0];
+
+	new_config = fsg->new_config;
+	old_state = fsg->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		fsg->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < fsg->nluns; ++i) {
+			curlun = &fsg->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = curlun->unit_attention_data =
+					SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		fsg->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	default:
+		break;
+
+	case FSG_STATE_ABORT_BULK_OUT:
+		DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (fsg->state == FSG_STATE_STATUS_PHASE)
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+
+	case FSG_STATE_RESET:
+		/* really not much to do here */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(fsg, new_config);
+		if (new_config == 0) {
+			/* We're using the backing file */
+			down_read(&fsg->filesem);
+			fsync_all(fsg);
+			up_read(&fsg->filesem);
+		}
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(fsg, 0);			/* Free resources */
+		spin_lock_irqsave(&fsg->lock, flags);
+		fsg->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *fsg_)
+{
+	struct fsg_dev		*fsg = fsg_;
+	unsigned long		flags;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (fsg->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(fsg) || signal_pending(current)) {
+			handle_exception(fsg);
+			continue;
+		}
+
+		if (!fsg->running) {
+			sleep_thread(fsg);
+			continue;
+		}
+
+		if (get_next_command(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+		if (do_scsi_command(fsg) || finish_reply(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+#ifdef CONFIG_USB_CSW_HACK
+		/* Since status is already sent for write scsi command,
+		 * need to skip sending status once again if it is a
+		 * write scsi command.
+		 */
+		if (fsg->cmnd[0] == SC_WRITE_6  || fsg->cmnd[0] == SC_WRITE_10
+					|| fsg->cmnd[0] == SC_WRITE_12)
+			continue;
+#endif
+		if (send_status(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		}
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	fsg->thread_task = NULL;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* In case we are exiting because of a signal, unregister the
+	 * gadget driver and close the backing file. */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		close_all_backing_files(fsg);
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&fsg->thread_notifier, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (-EROFS == PTR_ERR(filp))
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_path.dentry)
+		inode = filp->f_path.dentry->d_inode;
+	if (inode && S_ISBLK(inode->i_mode)) {
+		if (bdev_read_only(inode->i_bdev))
+			ro = 1;
+	} else if (!inode || !S_ISREG(inode->i_mode)) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/* If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only. */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+	num_sectors = size >> 9;	/* File size in 512-byte sectors */
+	if (num_sectors == 0) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
+			filename, size, num_sectors);
+	rc = 0;
+	adjust_wake_lock(fsg);
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
+{
+	if (curlun->filp) {
+		int rc;
+
+		/*
+		 * XXX: San: Ugly hack here added to ensure that
+		 * our pages get synced to disk.
+		 * Also drop caches here just to be extra-safe
+		 */
+		rc = vfs_fsync(curlun->filp, curlun->filp->f_path.dentry, 1);
+		if (rc < 0)
+			printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
+		/* drop_pagecache and drop_slab are no longer available */
+		/* drop_pagecache(); */
+		/* drop_slab(); */
+
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+		adjust_wake_lock(fsg);
+	}
+}
+
+static void close_all_backing_files(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		close_backing_file(fsg, &fsg->luns[i]);
+}
+
+static ssize_t show_file(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(&fsg->filesem);
+	return rc;
+}
+
+static ssize_t store_file(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	DBG(fsg, "store_file: \"%s\"\n", buf);
+#if 0
+	/* disabled because we need to allow closing the backing file if the media was removed */
+	if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+#endif
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	/* Eject current medium */
+	down_write(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {
+		close_backing_file(fsg, curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = open_backing_file(fsg, curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(&fsg->filesem);
+	return (rc < 0 ? rc : count);
+}
+
+
+static DEVICE_ATTR(file, 0444, show_file, store_file);
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_release(struct kref *ref)
+{
+	struct fsg_dev	*fsg = container_of(ref, struct fsg_dev, ref);
+
+	kfree(fsg->luns);
+	kfree(fsg);
+}
+
+static void lun_release(struct device *dev)
+{
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+
+	kref_put(&fsg->ref, fsg_release);
+}
+
+static void /* __init_or_exit */ fsg_unbind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = _ctxt;
+	int			i;
+	struct lun		*curlun;
+
+	pr_debug("%s ()\n", __func__);
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	fsg->running = 0;
+	clear_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Unregister the sysfs attribute files and the LUNs */
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (curlun->registered) {
+			device_remove_file(&curlun->dev, &dev_attr_file);
+			device_unregister(&curlun->dev);
+			curlun->registered = 0;
+		}
+	}
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (fsg->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->thread_notifier);
+
+	}
+
+	/* Free the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		kfree(fsg->buffhds[i].buf);
+		fsg->buffhds[i].buf = NULL;
+	}
+
+	if (fsg->bulk_in) {
+		usb_ept_fifo_flush(fsg->bulk_in);
+		usb_ept_enable(fsg->bulk_in,  0);
+		usb_free_endpoint(fsg->bulk_in);
+	}
+	if (fsg->bulk_out) {
+		usb_ept_fifo_flush(fsg->bulk_out);
+		usb_ept_enable(fsg->bulk_out,  0);
+		usb_free_endpoint(fsg->bulk_out);
+	}
+	fsg->bound = 0;
+}
+
+static void fsg_bind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	int			rc;
+	int			i;
+	unsigned int 		ret;
+	struct lun		*curlun;
+	char			*pathbuf, *p;
+	struct usb_function	*usb_func = &fsg_function;
+	struct usb_endpoint *ep;
+
+
+	dev_attr_file.attr.mode = 0644;
+	fsg->running = 0;
+
+	/* Find out how many LUNs there should be */
+	i = fsg->nluns;
+	if (i == 0)
+		i = 1;
+	if (i > MAX_LUNS) {
+		ERROR(fsg, "invalid number of LUNs: %d\n", i);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
+	if (!fsg->luns) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	fsg->nluns = i;
+
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		curlun->ro = 0;
+		curlun->dev.release = lun_release;
+		curlun->dev.parent = &fsg->pdev->dev;
+		dev_set_drvdata(&curlun->dev, fsg);
+		snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
+				"lun%d", i);
+
+		rc = device_register(&curlun->dev);
+		if (rc != 0) {
+			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
+			goto out;
+		}
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc != 0) {
+			ERROR(fsg, "device_create_file failed: %d\n", rc);
+			device_unregister(&curlun->dev);
+			goto out;
+		}
+		curlun->registered = 1;
+		kref_get(&fsg->ref);
+	}
+	ret = usb_msm_get_next_ifc_number(usb_func);
+	intf_desc.bInterfaceNumber = ret;
+	pr_debug("%s: interface number = %d\n", __func__, ret);
+
+	ep = fsg->bulk_in = usb_alloc_endpoint(USB_DIR_IN);
+	hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk in endpoint number = %d\n",
+						__func__, ep->num);
+
+	ep = fsg->bulk_out = usb_alloc_endpoint(USB_DIR_OUT);
+	hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk out endpoint number = %d\n",
+						__func__, ep->num);
+
+	/* Allocate the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		/* Allocate for the bulk-in endpoint.  We assume that
+		 * the buffer will also work with the bulk-out (and
+		 * interrupt-in) endpoint. */
+		bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
+		if (!bh->buf)
+			goto out;
+		bh->next = bh + 1;
+	}
+	fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+
+	fsg->state = FSG_STATE_IDLE;
+	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
+			"USB mass_storage");
+	if (IS_ERR(fsg->thread_task)) {
+		rc = PTR_ERR(fsg->thread_task);
+		ERROR(fsg, "kthread_create failed: %d\n", rc);
+		goto out;
+	}
+
+	DBG(fsg, "Number of LUNs=%d\n", fsg->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (backing_file_is_open(curlun)) {
+			p = NULL;
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = NULL;
+			}
+			LINFO(curlun, "ro=%d, file: %s\n",
+					curlun->ro, (p ? p : "(error)"));
+		}
+	}
+	kfree(pathbuf);
+
+	set_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Tell the thread to start working */
+	wake_up_process(fsg->thread_task);
+	fsg->bound = 1;
+	return;
+
+out:
+	fsg->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	fsg->bound = 1;
+	fsg_unbind(fsg);
+	close_all_backing_files(fsg);
+}
+
+static void fsg_configure(int configured, void *_ctxt)
+{
+	struct fsg_dev *fsg = _ctxt;
+
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	/* Clear out the controller's fifos */
+	if ((fsg->new_config) && (fsg->bulk_in))
+		usb_ept_fifo_flush(fsg->bulk_in);
+	if ((fsg->new_config) && (fsg->bulk_out))
+		usb_ept_fifo_flush(fsg->bulk_out);
+
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(fsg->bulk_in, &hs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(fsg->bulk_in, &fs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&fs_bulk_out_desc);
+		}
+
+		usb_ept_enable(fsg->bulk_in, 1);
+		usb_ept_enable(fsg->bulk_out, 1);
+		wake_lock(&fsg->wake_lock_idle);
+	} else
+		wake_unlock(&fsg->wake_lock_idle);
+
+	fsg->new_config = configured;
+	raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_function		fsg_function = {
+	.bind		= fsg_bind,
+	.unbind		= fsg_unbind,
+	.configure  = fsg_configure,
+	.setup		= fsg_setup,
+
+	.name = "mass_storage",
+
+};
+
+
+static int __init fsg_alloc(void)
+{
+	struct fsg_dev		*fsg;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (!fsg)
+		return -ENOMEM;
+	spin_lock_init(&fsg->lock);
+	init_rwsem(&fsg->filesem);
+	kref_init(&fsg->ref);
+	init_completion(&fsg->thread_notifier);
+
+	the_fsg = fsg;
+	return 0;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct fsg_dev	*fsg = container_of(sdev, struct fsg_dev, sdev);
+	return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
+}
+static int __exit fsg_remove(struct platform_device *pdev)
+{
+	struct fsg_dev  *fsg = the_fsg;
+
+	usb_function_unregister(&fsg_function);
+	wake_lock_destroy(&fsg->wake_lock_idle);
+	switch_dev_unregister(&fsg->sdev);
+	test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags);
+	close_all_backing_files(fsg);
+	kref_put(&fsg->ref, fsg_release);
+
+	return 0;
+}
+
+static int __init fsg_probe(struct platform_device *pdev)
+{
+	struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data;
+	int		rc;
+
+	rc = fsg_alloc();
+	if (rc != 0)
+		return rc;
+
+	the_fsg->pdev = pdev;
+	the_fsg->sdev.name = DRIVER_NAME;
+	the_fsg->nluns = pdata->nluns;
+	the_fsg->buf_size = pdata->buf_size;
+	the_fsg->vendor = pdata->vendor;
+	the_fsg->product = pdata->product;
+	the_fsg->release = pdata->release;
+	the_fsg->sdev.print_name = print_switch_name;
+	the_fsg->sdev.print_state = print_switch_state;
+	rc = switch_dev_register(&the_fsg->sdev);
+	if (rc < 0)
+		goto err_switch_dev_register;
+
+	wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
+		       "usb_mass_storage");
+	wake_lock_init(&the_fsg->wake_lock_idle, WAKE_LOCK_IDLE,
+		       "mass_storage_hold_idle");
+
+	fsg_function.hs_descriptors = hs_function;
+	fsg_function.fs_descriptors = fs_function;
+	fsg_function.context = the_fsg;
+	rc = usb_function_register(&fsg_function);
+	if (rc != 0)
+		goto err_usb_function_register;
+
+	return 0;
+
+err_usb_function_register:
+	switch_dev_unregister(&the_fsg->sdev);
+err_switch_dev_register:
+	kref_put(&the_fsg->ref, fsg_release);
+
+	return rc;
+}
+
+static struct platform_driver fsg_driver = {
+	.probe = fsg_probe,
+	.remove = __exit_p(fsg_remove),
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init fsg_init(void)
+{
+	return platform_driver_register(&fsg_driver);
+}
+module_init(fsg_init);
+
+static void __exit fsg_cleanup(void)
+{
+	platform_driver_unregister(&fsg_driver);
+
+}
+module_exit(fsg_cleanup);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/msm_hsusb.c b/drivers/usb/function/msm_hsusb.c
new file mode 100644
index 0000000..eebd9d4
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb.c
@@ -0,0 +1,3948 @@
+/* drivers/usb/function/msm_hsusb.c
+ *
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <linux/switch.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <mach/rpc_hsusb.h>
+#include <mach/rpc_pmapp.h>
+#include <mach/gpio.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm_otg.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos_params.h>
+#include <mach/clk.h>
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#include "usb_function.h"
+
+#define EPT_FLAG_IN	0x0001
+#define USB_DIR_MASK	USB_DIR_IN
+#define SETUP_BUF_SIZE	4096
+
+/* IDs for string descriptors */
+#define STRING_LANGUAGE_ID      0
+#define STRING_SERIAL           1
+#define STRING_PRODUCT          2
+#define STRING_MANUFACTURER     3
+
+#define LANGUAGE_ID             0x0409 /* en-US */
+#define SOC_ROC_2_0		0x10002 /* ROC 2.0 */
+
+#define TRUE			1
+#define FALSE			0
+#define USB_LINK_RESET_TIMEOUT	(msecs_to_jiffies(10))
+#define USB_CHG_DET_DELAY	msecs_to_jiffies(1000)
+
+#define is_phy_45nm()     (PHY_MODEL(ui->phy_info) == USB_PHY_MODEL_45NM)
+#define is_phy_external() (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL)
+
+static int pid = 0x9018;
+
+struct usb_fi_ept {
+	struct usb_endpoint *ept;
+	struct usb_endpoint_descriptor desc;
+};
+
+struct usb_function_info {
+	struct list_head list;
+	unsigned enabled;
+	struct usb_function *func;
+};
+
+struct msm_request {
+	struct usb_request req;
+
+	struct usb_info *ui;
+	struct msm_request *next;
+
+	unsigned busy:1;
+	unsigned live:1;
+	unsigned alloced:1;
+	unsigned dead:1;
+
+	dma_addr_t dma;
+
+	struct ept_queue_item *item;
+	dma_addr_t item_dma;
+};
+static unsigned char str_lang_desc[] = {4,
+				USB_DT_STRING,
+				(unsigned char)LANGUAGE_ID,
+				(unsigned char)(LANGUAGE_ID >> 8)};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+static int usb_hw_reset(struct usb_info *ui);
+static void usb_vbus_online(struct usb_info *);
+static void usb_vbus_offline(struct usb_info *ui);
+static void usb_lpm_exit(struct usb_info *ui);
+static void usb_lpm_wakeup_phy(struct work_struct *);
+static void usb_exit(void);
+static int usb_is_online(struct usb_info *ui);
+static void usb_do_work(struct work_struct *w);
+static int usb_lpm_enter(struct usb_info *ui);
+int (*usb_lpm_config_gpio)(int);
+static void usb_enable_pullup(struct usb_info *ui);
+static void usb_disable_pullup(struct usb_info *ui);
+
+static struct workqueue_struct *usb_work;
+static void usb_chg_stop(struct work_struct *w);
+
+#define USB_STATE_IDLE    0
+#define USB_STATE_ONLINE  1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START          0x0001
+#define USB_FLAG_VBUS_ONLINE    0x0002
+#define USB_FLAG_VBUS_OFFLINE   0x0004
+#define USB_FLAG_RESET          0x0008
+#define USB_FLAG_SUSPEND	0x0010
+#define USB_FLAG_CONFIGURE	0x0020
+#define USB_FLAG_RESUME	0x0040
+#define USB_FLAG_REG_OTG 0x0080
+
+#define USB_MSC_ONLY_FUNC_MAP	0x10
+#define DRIVER_NAME		"msm_hsusb_peripheral"
+
+struct lpm_info {
+	struct work_struct wakeup_phy;
+};
+
+enum charger_type {
+	USB_CHG_TYPE__SDP,
+	USB_CHG_TYPE__CARKIT,
+	USB_CHG_TYPE__WALLCHARGER,
+	USB_CHG_TYPE__INVALID
+};
+
+struct usb_info {
+	/* lock for register/queue/device state changes */
+	spinlock_t lock;
+
+	/* single request used for handling setup transactions */
+	struct usb_request *setup_req;
+	struct usb_request *ep0out_req;
+
+	struct platform_device *pdev;
+	struct msm_hsusb_platform_data *pdata;
+	int irq;
+	int gpio_irq[2];
+	void *addr;
+
+	unsigned state;
+	unsigned flags;
+
+	unsigned online;
+	unsigned running;
+	unsigned bound;
+
+	struct dma_pool *pool;
+
+	/* dma page to back the queue heads and items */
+	unsigned char *buf;
+	dma_addr_t dma;
+
+	struct ept_queue_head *head;
+
+	/* used for allocation */
+	unsigned next_item;
+	unsigned next_ifc_num;
+	unsigned stopped:1;
+	unsigned remote_wakeup:1;
+	unsigned configured:1;
+	unsigned selfpowered:1;
+	unsigned iad:1;
+	unsigned char maxpower;
+	enum usb_device_speed speed;
+	unsigned phy_info;
+
+	/* endpoints are ordered based on their status bits,
+	** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+	*/
+	struct usb_endpoint ept[32];
+
+	struct delayed_work work;
+	struct delayed_work chg_legacy_det;
+	unsigned phy_status;
+	unsigned phy_fail_count;
+	struct usb_composition *composition;
+
+	struct usb_function_info **func;
+	unsigned num_funcs;
+	struct usb_function_map *functions_map;
+
+#define MAX_INTERFACE_NUM	15
+	struct usb_function *func2ifc_map[MAX_INTERFACE_NUM];
+
+#define ep0out ept[0]
+#define ep0in  ept[16]
+
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *cclk;
+	unsigned int clk_enabled;
+
+	struct vreg *vreg;
+	unsigned int vreg_enabled;
+
+	unsigned in_lpm;
+	struct lpm_info li;
+
+	enum charger_type chg_type;
+	struct work_struct chg_stop;
+#define MAX_STRDESC_NUM		100
+	char **strdesc;
+	int strdesc_index;
+
+	u16 test_mode;
+	struct wake_lock wlock;
+	struct msm_otg_transceiver *xceiv;
+	int active;
+	enum usb_device_state usb_state;
+	int vbus_sn_notif;
+	struct switch_dev sdev;
+};
+static struct usb_info *the_usb_info;
+
+static unsigned short usb_validate_product_id(unsigned short pid);
+static unsigned short usb_get_product_id(unsigned long enabled_functions);
+static void usb_switch_composition(unsigned short pid);
+static unsigned short usb_set_composition(unsigned short pid);
+static void usb_configure_device_descriptor(struct usb_info *ui);
+static void usb_uninit(struct usb_info *ui);
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg);
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg);
+
+
+
+struct usb_device_descriptor desc_device = {
+	.bLength = USB_DT_DEVICE_SIZE,
+	.bDescriptorType = USB_DT_DEVICE,
+	.bcdUSB = 0x0200,
+	.bDeviceClass = 0,
+	.bDeviceSubClass = 0,
+	.bDeviceProtocol = 0,
+	.bMaxPacketSize0 = 64,
+	/* the following fields are filled in by usb_probe */
+	.idVendor = 0,
+	.idProduct = 0,
+	.bcdDevice = 0,
+	.iManufacturer = 0,
+	.iProduct = 0,
+	.iSerialNumber = 0,
+	.bNumConfigurations = 1,
+};
+
+static void flush_endpoint(struct usb_endpoint *ept);
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *, int);
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return sprintf(buf, "%s\n", (ui->online ? "online" : "offline"));
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+	unsigned long flags;
+	enum charger_type temp;
+	int suspended;
+	int configured;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+	configured = ui->configured;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__INVALID)
+		return -ENODEV;
+
+	if (temp == USB_CHG_TYPE__WALLCHARGER)
+		return USB_WALLCHARGER_CHG_CURRENT;
+
+	if (suspended || !configured)
+		return 0;
+
+	return ui->maxpower * 2;
+}
+
+static void usb_chg_legacy_detect(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	enum charger_type temp = USB_CHG_TYPE__INVALID;
+	int maxpower;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ui->usb_state == USB_STATE_NOTATTACHED) {
+		ret = -ENODEV;
+		goto chg_legacy_det_out;
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
+		ui->chg_type = temp = USB_CHG_TYPE__WALLCHARGER;
+		goto chg_legacy_det_out;
+	}
+
+	ui->chg_type = temp = USB_CHG_TYPE__SDP;
+chg_legacy_det_out:
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (ret)
+		return;
+
+	msm_chg_usb_charger_connected(temp);
+	maxpower = usb_get_max_power(ui);
+	if (maxpower > 0)
+		msm_chg_usb_i_is_available(maxpower);
+
+	/* USB driver prevents idle and suspend power collapse(pc)
+	 * while usb cable is connected. But when dedicated charger is
+	 * connected, driver can vote for idle and suspend pc. In order
+	 * to allow pc, driver has to initiate low power mode which it
+	 * cannot do as phy cannot be accessed when dedicated charger
+	 * is connected due to phy lockup issues. Just to allow idle &
+	 * suspend pc when dedicated charger is connected, release the
+	 * wakelock, set driver latency to default and act as if we are
+	 * in low power mode so that, driver will re-acquire wakelocks
+	 * for any sub-sequent usb interrupts.
+	 */
+	if (temp == USB_CHG_TYPE__WALLCHARGER) {
+		pr_info("\n%s: WALL-CHARGER\n", __func__);
+		spin_lock_irqsave(&ui->lock, flags);
+		if (ui->usb_state == USB_STATE_NOTATTACHED) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return;
+		}
+		ui->in_lpm = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	} else
+		pr_info("\n%s: Standard Downstream Port\n", __func__);
+}
+
+int usb_msm_get_next_strdesc_id(char *str)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned id;
+	unsigned long flags;
+	int len;
+
+	len = strlen(str);
+	if (!len) {
+		printk(KERN_ERR "usb next_strdesc_id(); null string\n");
+		return -EPERM;
+	}
+	/* for null character */
+	len = len + 1;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	id = ui->strdesc_index;
+	if (id >= MAX_STRDESC_NUM) {
+		id = -EPERM;
+		printk(KERN_ERR "reached max strdesc number\n");
+		goto get_strd_id_exit;
+	}
+
+	ui->strdesc[id] = kmalloc(len, GFP_ATOMIC);
+	if (ui->strdesc[id]) {
+		memcpy(ui->strdesc[id], str, len);
+		ui->strdesc_index++;
+	} else {
+		id = -EPERM;
+		printk(KERN_ERR "usb next_strdesc_id(); Out of memory:(%s)\n",
+			str);
+	}
+
+get_strd_id_exit:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return id;
+}
+EXPORT_SYMBOL(usb_msm_get_next_strdesc_id);
+
+
+inline int usb_msm_is_iad(void)
+{
+	return the_usb_info->iad;
+}
+EXPORT_SYMBOL(usb_msm_is_iad);
+
+inline void usb_msm_enable_iad(void)
+{
+	the_usb_info->iad = 1;
+}
+EXPORT_SYMBOL(usb_msm_enable_iad);
+
+int usb_msm_get_speed()
+{
+	return the_usb_info->speed;
+}
+EXPORT_SYMBOL(usb_msm_get_speed);
+
+int usb_msm_get_next_ifc_number(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	int ifc_num = -1;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	for (i = 0; i < ui->pdata->num_functions; i++) {
+		if (strcmp(ui->functions_map[i].name, driver->name))
+			continue;
+		if (!(ui->composition->functions & (1 << i)))
+			continue;
+		ifc_num = ui->next_ifc_num++;
+		ui->func2ifc_map[ifc_num] = driver;
+		break;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return ifc_num;
+}
+EXPORT_SYMBOL(usb_msm_get_next_ifc_number);
+
+static inline int usb_msm_get_selfpowered(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->selfpowered;
+}
+static inline int usb_msm_get_remotewakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->remote_wakeup;
+}
+
+static void usb_clk_enable(struct usb_info *ui)
+{
+	if (!ui->clk_enabled) {
+		clk_enable(ui->pclk);
+		if (ui->cclk)
+			clk_enable(ui->cclk);
+		ui->clk_enabled = 1;
+	}
+}
+
+static void usb_clk_disable(struct usb_info *ui)
+{
+	if (ui->clk_enabled) {
+		clk_disable(ui->pclk);
+		if (ui->cclk)
+			clk_disable(ui->cclk);
+		ui->clk_enabled = 0;
+	}
+}
+
+static void usb_vreg_enable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && !ui->vreg_enabled) {
+		vreg_enable(ui->vreg);
+		ui->vreg_enabled = 1;
+	}
+}
+
+static void usb_vreg_disable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && ui->vreg_enabled) {
+		vreg_disable(ui->vreg);
+		ui->vreg_enabled = 0;
+	}
+}
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg)
+{
+	unsigned timeout = 100000;
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_read: timeout %08x\n",
+			readl(USB_ULPI_VIEWPORT));
+		return 0xffffffff;
+	}
+	return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg)
+{
+	unsigned timeout = 10000;
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_write: timeout\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *ui, int acquire)
+{
+	if (acquire) {
+		wake_lock(&ui->wlock);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME, ui->pdata->swfi_latency);
+		/* targets like 7x30 have introduced core clock
+		 * to remove the dependency on max axi frequency
+		 */
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, MSM_AXI_MAX_FREQ);
+	} else {
+		wake_lock_timeout(&ui->wlock, HZ / 2);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+					DRIVER_NAME,
+					PM_QOS_DEFAULT_VALUE);
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	}
+}
+
+static void msm_hsusb_suspend_locks_init(struct usb_info *ui, int init)
+{
+	if (init) {
+		wake_lock_init(&ui->wlock, WAKE_LOCK_SUSPEND,
+				"usb_bus_active");
+		pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME,
+				PM_QOS_DEFAULT_VALUE);
+		pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+				DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	} else {
+		wake_lock_destroy(&ui->wlock);
+		pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME);
+		pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME);
+	}
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++) {
+		struct usb_endpoint *ept = ui->ept + n;
+
+		ept->ui = ui;
+		ept->bit = n;
+		ept->num = n & 15;
+		ept->alloced = 0;
+
+		if (ept->bit > 15) {
+			/* IN endpoint */
+			ept->head = ui->head + (ept->num << 1) + 1;
+			ept->flags = EPT_FLAG_IN;
+		} else {
+			/* OUT endpoint */
+			ept->head = ui->head + (ept->num << 1);
+			ept->flags = 0;
+		}
+	}
+}
+
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc)
+{
+	unsigned cfg = 0;
+	unsigned long flags;
+	struct usb_info *ui = ep->ui;
+
+	if (!ui)
+		return;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ep_desc) {
+		ep->max_pkt = ep_desc->wMaxPacketSize;
+		ep->ep_descriptor = ep_desc;
+	}
+
+	if (!ep->max_pkt) {
+		printk(KERN_ERR "cannot configure zero length max pkt\n");
+		goto cfg_ept_end;
+	}
+
+	cfg = CONFIG_MAX_PKT(ep->max_pkt) | CONFIG_ZLT;
+	/* ep0 out needs interrupt-on-setup */
+	if (ep->bit == 0)
+		cfg |= CONFIG_IOS;
+	ep->head->config = cfg;
+	ep->head->next = TERMINATE;
+
+	pr_debug("ept #%d %s max:%d head:%p bit:%d\n",
+		       ep->num,
+		       (ep->flags & EPT_FLAG_IN) ? "in" : "out",
+		       ep->max_pkt, ep->head, ep->bit);
+
+cfg_ept_end:
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+EXPORT_SYMBOL(usb_configure_endpoint);
+
+#define NUM_EPTS 15	/* number of in or out non-ctrl endpoints */
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_endpoint *ept = NULL;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (direction & USB_DIR_IN)
+		ept = (&ui->ep0in);
+	else
+		ept = (&ui->ep0out);
+
+	for (i = 0; i < NUM_EPTS; i++) {
+		ept++;
+		if (!ept->alloced) {
+			ept->alloced = 1;
+			ept->ui = ui;
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return ept;
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return NULL;
+}
+EXPORT_SYMBOL(usb_alloc_endpoint);
+
+int usb_free_endpoint(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (!ept)
+		return -EINVAL;
+	spin_lock_irqsave(&ui->lock, flags);
+	ept->alloced = 0;
+	ept->ui = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_free_endpoint);
+
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept,
+			unsigned bufsize)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req;
+
+	if (!ui)
+		return NULL;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		goto fail1;
+
+	req->item = dma_pool_alloc(ui->pool, GFP_ATOMIC, &req->item_dma);
+	if (!req->item)
+		goto fail2;
+
+	if (bufsize) {
+		req->req.buf = kmalloc(bufsize, GFP_ATOMIC);
+		if (!req->req.buf)
+			goto fail3;
+		req->alloced = 1;
+	}
+
+	return &req->req;
+
+fail3:
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+	kfree(req);
+fail1:
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_alloc_req);
+
+static void do_free_req(struct usb_info *ui, struct msm_request *req)
+{
+	if (req->alloced)
+		kfree(req->req.buf);
+
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+	kfree(req);
+}
+
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct msm_request *req, *temp_req, *prev_req;
+	struct usb_info *ui;
+	unsigned long flags;
+	int dead = 0;
+	if (!ept || !_req)
+		return;
+
+	ui = ept->ui;
+	if (!ui)
+		return;
+
+	req = to_msm_request(_req);
+	spin_lock_irqsave(&ui->lock, flags);
+	/* defer freeing resources if request is still busy */
+	if (req->busy)
+		dead = req->dead = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if req->dead, then we will clean up when the request finishes */
+	if (!dead) {
+		temp_req = ept->req;
+		prev_req = temp_req;
+		while (temp_req != NULL) {
+			if (req == temp_req && ept->req != temp_req)
+				prev_req->next = temp_req->next;
+
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		}
+		if (ept->req == req)
+			ept->req = req->next;
+		req->req.complete = NULL;
+		do_free_req(ui, req);
+	} else
+		pr_err("%s: req is busy, can't free req\n", __func__);
+}
+EXPORT_SYMBOL(usb_ept_free_req);
+
+void usb_ept_enable(struct usb_endpoint *ept, int yes)
+{
+	struct usb_info *ui;
+	int in;
+	unsigned n;
+	unsigned char xfer;
+
+	if (!ept || !ept->ui)
+		return;
+	ui = ept->ui;
+	in = ept->flags & EPT_FLAG_IN;
+	if (!ept->ep_descriptor)
+		return;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	xfer = ept->ep_descriptor->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_INT;
+		if (yes)
+			n |= CTRL_TXE | CTRL_TXR;
+		else
+			n &= (~CTRL_TXE);
+	} else {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_INT;
+		if (yes)
+			n |= CTRL_RXE | CTRL_RXR;
+		else
+			n &= ~(CTRL_RXE);
+	}
+	/* complete all the updates to ept->head before enabling endpoint*/
+	dma_coherent_pre_ops();
+	writel(n, USB_ENDPTCTRL(ept->num));
+}
+EXPORT_SYMBOL(usb_ept_enable);
+
+static void usb_ept_start(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req = ept->req;
+
+	BUG_ON(req->live);
+
+	/* link the hw queue head to the request's transaction item */
+	ept->head->next = req->item_dma;
+	ept->head->info = 0;
+
+	/* memory barrier to flush the data before priming endpoint*/
+	dma_coherent_pre_ops();
+	/* start the endpoint */
+	writel(1 << ept->bit, USB_ENDPTPRIME);
+
+	/* mark this chain of requests as live */
+	while (req) {
+		req->live = 1;
+		if (req->item->next == TERMINATE)
+			break;
+		req = req->next;
+	}
+}
+
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	unsigned long flags;
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_request *last;
+	struct usb_info *ui = ept->ui;
+	struct ept_queue_item *item = req->item;
+	unsigned length = req->req.length;
+
+	if (length > 0x4000)
+		return -EMSGSIZE;
+
+	if (ui->in_lpm) {
+		req->req.status = usb_remote_wakeup();
+		if (req->req.status) {
+			pr_debug("%s:RWakeup generation failed, EP = %x\n",
+							__func__, ept->bit);
+			return req->req.status;
+		}
+	}
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (req->busy) {
+		req->req.status = -EBUSY;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO
+		       "usb_ept_queue_xfer() tried to queue busy request\n");
+		return -EBUSY;
+	}
+
+	if (!ui->online && (ept->num != 0)) {
+		req->req.status = -ENODEV;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO "usb_ept_queue_xfer() tried to queue request"
+				"while offline; ept->bit: %x\n", ept->bit);
+		return -ENODEV;
+	}
+
+	req->busy = 1;
+	req->live = 0;
+	req->next = 0;
+	req->req.status = -EBUSY;
+
+	req->dma = dma_map_single(NULL, req->req.buf, length,
+				  (ept->flags & EPT_FLAG_IN) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	/* prepare the transaction descriptor item for the hardware */
+	item->next = TERMINATE;
+	item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE;
+	item->page0 = req->dma;
+	item->page1 = (req->dma + 0x1000) & 0xfffff000;
+	item->page2 = (req->dma + 0x2000) & 0xfffff000;
+	item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+	/* Add the new request to the end of the queue */
+	last = ept->last;
+	if (last) {
+		/* Already requests in the queue. add us to the
+		 * end, but let the completion interrupt actually
+		 * start things going, to avoid hw issues
+		 */
+		last->next = req;
+
+		/* only modify the hw transaction next pointer if
+		 * that request is not live
+		 */
+		if (!last->live)
+			last->item->next = req->item_dma;
+	} else {
+		/* queue was empty -- kick the hardware */
+		ept->req = req;
+		usb_ept_start(ept);
+	}
+	ept->last = req;
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_queue_xfer);
+
+int usb_ept_flush(struct usb_endpoint *ept)
+{
+	printk("usb_ept_flush \n");
+	flush_endpoint(ept);
+	return 0;
+}
+
+int usb_ept_get_max_packet(struct usb_endpoint *ept)
+{
+	return ept->max_pkt;
+}
+EXPORT_SYMBOL(usb_ept_get_max_packet);
+
+int usb_remote_wakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (!ui->remote_wakeup) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: remote wakeup not supported\n", __func__);
+		return -ENOTSUPP;
+	}
+
+	if (!ui->online) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: device is not configured\n", __func__);
+		return -ENODEV;
+	}
+
+	if (ui->in_lpm)
+		usb_lpm_exit(ui);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if usb_lpm_exit is unable to set PHCD,
+	 * it would initiate workthread to set the PHCD
+	 */
+	if (cancel_work_sync(&ui->li.wakeup_phy))
+		usb_lpm_wakeup_phy(NULL);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: cannot bring controller out of lpm\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!usb_is_online(ui)) {
+		pr_debug("%s: enabling force resume\n", __func__);
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+	} else
+		pr_debug("%s: controller seems to be out of suspend already\n",
+				__func__);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_remote_wakeup);
+
+/* --- endpoint 0 handling --- */
+
+static void set_configuration(struct usb_info *ui, int yes)
+{
+	unsigned i;
+
+	ui->online = !!yes;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(yes, fi->func->context);
+	}
+}
+
+static void ep0out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	req->complete = 0;
+}
+
+static void ep0in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	/* queue up the receive of the ACK response from the host */
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0out_complete;
+		usb_ept_queue_xfer(&ui->ep0out, req);
+	}
+}
+
+static void ep0in_complete_sendzero(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0in_complete;
+		usb_ept_queue_xfer(&ui->ep0in, req);
+	}
+}
+
+static void ep0_status_complete(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct usb_info *ui = ept->ui;
+	unsigned int i;
+
+	if (!ui->test_mode)
+		return;
+
+	switch (ui->test_mode) {
+	case J_TEST:
+		pr_info("usb electrical test mode: (J)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_J_STATE, USB_PORTSC);
+		break;
+
+	case K_TEST:
+		pr_info("usb electrical test mode: (K)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_K_STATE, USB_PORTSC);
+		break;
+
+	case SE0_NAK_TEST:
+		pr_info("usb electrical test mode: (SE0-NAK)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+		break;
+
+	case TST_PKT_TEST:
+		pr_info("usb electrical test mode: (TEST_PKT)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_TST_PKT, USB_PORTSC);
+		break;
+	default:
+		pr_err("usb:%s: undefined test mode: (%x)\n",
+				__func__, ui->test_mode);
+	}
+
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+	struct usb_request *req = ui->setup_req;
+	req->length = 0;
+	req->complete = ep0_status_complete;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+	writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_receive(struct usb_info *ui, unsigned len)
+{
+	ui->ep0out_req->length = len;
+	usb_ept_queue_xfer(&ui->ep0out, ui->ep0out_req);
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned wlen)
+{
+	struct usb_request *req = ui->setup_req;
+	struct usb_endpoint *ept = &ui->ep0in;
+
+	/* never send more data than the host requested */
+	if (req->length > wlen)
+		req->length = wlen;
+
+	/* if we are sending a short response that ends on
+	 * a packet boundary, we'll need to send a zero length
+	 * packet as well.
+	 */
+	if ((req->length != wlen) && ((req->length & 63) == 0)) {
+		req->complete = ep0in_complete_sendzero;
+	} else {
+		req->complete = ep0in_complete;
+	}
+
+	usb_ept_queue_xfer(ept, req);
+}
+
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req);
+
+static void handle_setup(struct usb_info *ui)
+{
+	struct usb_ctrlrequest ctl;
+
+	memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+	writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+	/* any pending ep0 transactions must be canceled */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	/* let functions handle vendor and class requests */
+	if ((ctl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+		struct usb_function *func;
+
+		/* Send stall if received interface number is invalid */
+		if (ctl.wIndex >= ui->next_ifc_num)
+			goto stall;
+
+		func = ui->func2ifc_map[ctl.wIndex];
+		if (func && func->setup) {
+			if (ctl.bRequestType & USB_DIR_IN) {
+				struct usb_request *req = ui->setup_req;
+				int ret = func->setup(&ctl,
+						req->buf, SETUP_BUF_SIZE,
+						func->context);
+				if (ret >= 0) {
+					req->length = ret;
+					ep0_setup_send(ui, ctl.wLength);
+					return;
+				}
+			} else {
+				int ret = func->setup(&ctl, NULL, 0,
+							func->context);
+				if (ret == 0) {
+					ep0_setup_ack(ui);
+					return;
+				} else if (ret > 0) {
+					ep0_setup_receive(ui, ret);
+					return;
+				}
+			}
+		}
+		goto stall;
+		return;
+	}
+
+	switch (ctl.bRequest) {
+	case USB_REQ_GET_STATUS:
+	{
+		struct usb_request *req = ui->setup_req;
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+		if (ctl.wLength != 2)
+			break;
+		req->length = 2;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			struct usb_endpoint *ept;
+
+			if (num == 0)
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			ept = ui->ept + num;
+			memcpy(req->buf, &ept->ept_halted, 2);
+			break;
+		}
+
+		case USB_RECIP_DEVICE:
+		{
+			unsigned short temp = 0;
+			if (usb_msm_get_selfpowered())
+				temp = 1 << USB_DEVICE_SELF_POWERED;
+			if (usb_msm_get_remotewakeup())
+				temp |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+			memcpy(req->buf, &temp, 2);
+			break;
+		}
+
+		case USB_RECIP_INTERFACE:
+			memset(req->buf, 0, 2);
+			break;
+		default:
+			printk(KERN_ERR "Unreconginized recipient\n");
+			break;
+		}
+
+		ep0_setup_send(ui, 2);
+		return;
+	}
+
+	case USB_REQ_GET_DESCRIPTOR:
+	{
+		struct usb_request *req;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+
+		req = ui->setup_req;
+		if (!usb_find_descriptor(ui, &ctl, req)) {
+			if (req->length > ctl.wLength)
+				req->length = ctl.wLength;
+			ep0_setup_send(ui, ctl.wLength);
+			return;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_FEATURE:
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP) {
+				ui->remote_wakeup = 1;
+				ep0_setup_ack(ui);
+				return;
+			} else if (ctl.wValue == USB_DEVICE_TEST_MODE) {
+				if (ctl.wIndex & 0x0f)
+					break;
+				ui->test_mode = ctl.wIndex;
+				ep0_setup_ack(ui);
+				return;
+			}
+			break;
+
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if ((num == 0) || (ctl.wValue != 0))
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			usb_ept_set_halt(ui->ept + num);
+			ep0_setup_ack(ui);
+			return;
+		}
+
+		default:
+			pr_err("usb: %s: set_feature: unrecognized recipient\n",
+					__func__);
+			break;
+		}
+		break;
+
+	case USB_REQ_CLEAR_FEATURE:
+	{
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue != USB_DEVICE_REMOTE_WAKEUP)
+				break;
+			ui->remote_wakeup = 0;
+			ep0_setup_ack(ui);
+			return;
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num;
+			if (ctl.wValue != USB_ENDPOINT_HALT)
+				break;
+			num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if (num != 0) {
+				if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+					num += 16;
+				usb_ept_clear_halt(ui->ept + num);
+			}
+			ep0_setup_ack(ui);
+			return;
+		}
+		default:
+			pr_info("unsupported clear feature command\n");
+			pr_info("Request-type:(%08x) wValue:(%08x) "
+					"wIndex:(%08x) wLength:(%08x)\n",
+						ctl.bRequestType, ctl.wValue,
+						ctl.wIndex, ctl.wLength);
+			break;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_INTERFACE:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_INTERFACE))
+			break;
+		if (ui->func2ifc_map[ctl.wIndex]->set_interface) {
+			ui->func2ifc_map[ctl.wIndex]->set_interface(ctl.wIndex,
+					ctl.wValue,
+					ui->func2ifc_map[ctl.wIndex]->context);
+			ep0_setup_ack(ui);
+			return;
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		{
+		struct usb_function *f;
+		struct usb_request *req = ui->setup_req;
+		int ifc_num = ctl.wIndex;
+		int ret = 0;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+					!= (USB_DIR_IN | USB_RECIP_INTERFACE))
+			break;
+
+		f = ui->func2ifc_map[ifc_num];
+		if (!f->get_interface)
+			break;
+		ret = f->get_interface(ifc_num,
+				ui->func2ifc_map[ifc_num]->context);
+		if (ret < 0)
+			break;
+		req->length = ctl.wLength;
+		memcpy(req->buf, &ret, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+		}
+	case USB_REQ_SET_CONFIGURATION:
+		if ((ctl.bRequestType & USB_DIR_MASK) != USB_DIR_OUT)
+			break;
+		ui->configured = ctl.wValue;
+		pr_info("hsusb set_configuration wValue = %d usbcmd = %x\n",
+						ctl.wValue, readl(USB_USBCMD));
+		set_configuration(ui, ctl.wValue);
+		ep0_setup_ack(ui);
+		ui->flags = USB_FLAG_CONFIGURE;
+		if (ui->configured)
+			ui->usb_state = USB_STATE_CONFIGURED;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		return;
+
+	case USB_REQ_GET_CONFIGURATION:
+	{
+		unsigned conf;
+		struct usb_request *req = ui->setup_req;
+		req->length = 1;
+		conf = ui->configured;
+		memcpy(req->buf, &conf, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+	}
+
+	case USB_REQ_SET_ADDRESS:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_DEVICE))
+			break;
+		ui->usb_state = USB_STATE_ADDRESS;
+		writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+		ep0_setup_ack(ui);
+		return;
+	}
+
+stall:
+	ep0_setup_stall(ui);
+	return;
+
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+	struct usb_endpoint *ept = ui->ept + bit;
+	struct msm_request *req;
+	unsigned long flags;
+	unsigned info;
+
+#if 0
+	printk(KERN_INFO "handle_endpoint() %d %s req=%p(%08x)\n",
+	       ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+	       ept->req, ept->req ? ept->req->item_dma : 0);
+#endif
+	if (!ept) {
+		pr_err("%s: ept is null: ep bit = %d\n", __func__, bit);
+		return;
+	}
+
+	/* expire all requests that are no longer active */
+	spin_lock_irqsave(&ui->lock, flags);
+	while ((req = ept->req)) {
+		/* clean speculative fetches on req->item->info */
+		dma_coherent_post_ops();
+		info = req->item->info;
+
+		/* if we've processed all live requests, time to
+		 * restart the hardware on the next non-live request
+		 */
+		if (!req->live) {
+			usb_ept_start(ept);
+			break;
+		}
+
+		/* if the transaction is still in-flight, stop here */
+		if (info & INFO_ACTIVE)
+			break;
+
+		/* advance ept queue to the next request */
+		ept->req = req->next;
+		if (ept->req == 0)
+			ept->last = 0;
+
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+			/* XXX pass on more specific error code */
+			req->req.status = -EIO;
+			req->req.actual = 0;
+			printk(KERN_INFO "hsusb: ept %d %s error. info=%08x\n",
+				ept->num,
+				(ept->flags & EPT_FLAG_IN) ? "in" : "out",
+			       info);
+		} else {
+			req->req.status = 0;
+			req->req.actual = req->req.length - ((info >> 16) & 0x7FFF);
+		}
+		req->busy = 0;
+		req->live = 0;
+		if (req->dead)
+			do_free_req(ui, req);
+
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+	/* flush endpoint, canceling transactions
+	** - this can take a "large amount of time" (per databook)
+	** - the flush can fail in some cases, thus we check STAT
+	**   and repeat if we're still operating
+	**   (does the fact that this doesn't use the tripwire matter?!)
+	*/
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	do {
+		writel(bits, USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & bits)
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req, *next;
+	unsigned long flags;
+
+	/* inactive endpoints have nothing to do here */
+	if (!ui || !ept->alloced || !ept->max_pkt)
+		return;
+
+	/* put the queue head in a sane state */
+	ept->head->info = 0;
+	ept->head->next = TERMINATE;
+
+	/* cancel any pending requests */
+	spin_lock_irqsave(&ui->lock, flags);
+	req = ept->req;
+	ept->req = 0;
+	ept->last = 0;
+	while (req != 0) {
+		next = req->next;
+
+		req->busy = 0;
+		req->live = 0;
+		req->req.status = -ENODEV;
+		req->req.actual = 0;
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+		if (req->dead)
+			do_free_req(ui, req);
+		req = req->next;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct usb_endpoint *ept)
+{
+	if (!ept->ui)
+		return;
+
+	flush_endpoint_hw(ept->ui, (1 << ept->bit));
+	flush_endpoint_sw(ept);
+}
+
+static void flush_all_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	flush_endpoint_hw(ui, 0xffffffff);
+
+	for (n = 0; n < 32; n++)
+		flush_endpoint_sw(ui->ept + n);
+}
+
+#define HW_DELAY_FOR_LPM msecs_to_jiffies(1000)
+#define DELAY_FOR_USB_VBUS_STABILIZE msecs_to_jiffies(500)
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+	struct usb_info *ui = data;
+	unsigned n;
+	unsigned speed;
+
+	if (!ui->active)
+		return IRQ_HANDLED;
+
+	if (ui->in_lpm) {
+		usb_lpm_exit(ui);
+		return IRQ_HANDLED;
+	}
+
+	n = readl(USB_USBSTS);
+	writel(n, USB_USBSTS);
+
+	/* somehow we got an IRQ while in the reset sequence: ignore it */
+	if (ui->running == 0) {
+		pr_err("%s: ui->running is zero\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	if (n & STS_PCI) {
+		if (!(readl(USB_PORTSC) & PORTSC_PORT_RESET)) {
+			speed = (readl(USB_PORTSC) & PORTSC_PORT_SPEED_MASK);
+			switch (speed) {
+			case PORTSC_PORT_SPEED_HIGH:
+				pr_info("hsusb resume: speed = HIGH\n");
+				ui->speed = USB_SPEED_HIGH;
+				break;
+
+			case PORTSC_PORT_SPEED_FULL:
+				pr_info("hsusb resume: speed = FULL\n");
+				ui->speed = USB_SPEED_FULL;
+				break;
+
+			default:
+				pr_err("hsusb resume: Unknown Speed\n");
+				ui->speed = USB_SPEED_UNKNOWN;
+				break;
+			}
+		}
+
+		/* pci interrutpt would also be generated when resuming
+		 * from bus suspend, following check would avoid kick
+		 * starting usb main thread in case of pci interrupts
+		 * during enumeration
+		 */
+		if (ui->configured && ui->chg_type == USB_CHG_TYPE__SDP) {
+			ui->usb_state = USB_STATE_CONFIGURED;
+			ui->flags = USB_FLAG_RESUME;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	if (n & STS_URI) {
+		pr_info("hsusb reset interrupt\n");
+		ui->usb_state = USB_STATE_DEFAULT;
+		ui->configured = 0;
+		schedule_work(&ui->chg_stop);
+
+		writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+		writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+		writel(0xffffffff, USB_ENDPTFLUSH);
+		writel(0, USB_ENDPTCTRL(1));
+
+		if (ui->online != 0) {
+			/* marking us offline will cause ept queue attempts to fail */
+			ui->online = 0;
+
+			flush_all_endpoints(ui);
+
+			/* XXX: we can't seem to detect going offline, so deconfigure
+			 * XXX: on reset for the time being
+			 */
+			set_configuration(ui, 0);
+		}
+	}
+
+	if (n & STS_SLI) {
+		pr_info("hsusb suspend interrupt\n");
+		ui->usb_state = USB_STATE_SUSPENDED;
+
+		/* stop usb charging */
+		schedule_work(&ui->chg_stop);
+	}
+
+	if (n & STS_UI) {
+		n = readl(USB_ENDPTSETUPSTAT);
+		if (n & EPT_RX(0))
+			handle_setup(ui);
+
+		n = readl(USB_ENDPTCOMPLETE);
+		writel(n, USB_ENDPTCOMPLETE);
+		while (n) {
+			unsigned bit = __ffs(n);
+			handle_endpoint(ui, bit);
+			n = n & (~(1 << bit));
+		}
+	}
+
+	n = readl(USB_OTGSC);
+	writel(n, USB_OTGSC);
+
+	if (n & OTGSC_BSVIS) {
+		/*Verify B Session Valid Bit to verify vbus status*/
+		if (B_SESSION_VALID & n)	{
+			pr_info("usb cable connected\n");
+			ui->usb_state = USB_STATE_POWERED;
+			ui->flags = USB_FLAG_VBUS_ONLINE;
+			/* Wait for 100ms to stabilize VBUS before initializing
+			 * USB and detecting charger type
+			 */
+			queue_delayed_work(usb_work, &ui->work, 0);
+		} else {
+			int i;
+
+			usb_disable_pullup(ui);
+
+			printk(KERN_INFO "usb cable disconnected\n");
+			ui->usb_state = USB_STATE_NOTATTACHED;
+			ui->configured = 0;
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (!fi ||
+				!(ui->composition->functions & (1 << i)))
+					continue;
+				if (fi->func->disconnect)
+					fi->func->disconnect
+						(fi->func->context);
+			}
+			ui->flags = USB_FLAG_VBUS_OFFLINE;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+	memset(ui->buf, 0, 4096);
+	ui->head = (void *) (ui->buf + 0);
+
+	/* only important for reset/reinit */
+	memset(ui->ept, 0, sizeof(ui->ept));
+	ui->next_item = 0;
+	ui->speed = USB_SPEED_UNKNOWN;
+
+	init_endpoints(ui);
+
+	ui->ep0in.max_pkt = 64;
+	ui->ep0in.ui = ui;
+	ui->ep0in.alloced = 1;
+	ui->ep0out.max_pkt = 64;
+	ui->ep0out.ui = ui;
+	ui->ep0out.alloced = 1;
+
+	ui->setup_req = usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE);
+	ui->ep0out_req = usb_ept_alloc_req(&ui->ep0out, ui->ep0out.max_pkt);
+
+	INIT_WORK(&ui->chg_stop, usb_chg_stop);
+	INIT_WORK(&ui->li.wakeup_phy, usb_lpm_wakeup_phy);
+	INIT_DELAYED_WORK(&ui->work, usb_do_work);
+	INIT_DELAYED_WORK(&ui->chg_legacy_det, usb_chg_legacy_detect);
+}
+
+static int usb_is_online(struct usb_info *ui)
+{
+	/* continue lpm if bus is suspended or disconnected or stopped*/
+	if (((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) ||
+			((readl(USB_PORTSC) & PORTSC_CCS) == 0) ||
+			((readl(USB_USBCMD) & USBCMD_RS) == 0))
+		return 0;
+
+	pr_debug("usb is online\n");
+	pr_debug("usbcmd:(%08x) usbsts:(%08x) portsc:(%08x)\n",
+			readl(USB_USBCMD),
+			readl(USB_USBSTS),
+			readl(USB_PORTSC));
+	return -1;
+}
+
+static int usb_wakeup_phy(struct usb_info *ui)
+{
+	int i;
+
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	/* some circuits automatically clear PHCD bit */
+	for (i = 0; i < 5 && (readl(USB_PORTSC) & PORTSC_PHCD); i++) {
+		writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+		msleep(1);
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("%s: cannot clear phcd bit\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int usb_suspend_phy(struct usb_info *ui)
+{
+	int i;
+	unsigned long flags;
+
+	if (usb_is_online(ui))
+		return -1;
+
+	/* spec talks about following bits in LPM for external phy.
+	 * But they are ignored because
+	 * 1. disabling interface protection circuit: by disabling
+	 * interface protection curcuit we cannot come out
+	 * of lpm as async interrupts would be disabled
+	 * 2. setting the suspendM bit: this bit would be set by usb
+	 * controller once we set phcd bit.
+	 */
+	switch (PHY_TYPE(ui->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (!is_phy_45nm())
+			ulpi_read(ui, 0x14);
+
+		/* turn on/off otg comparators */
+		if (ui->vbus_sn_notif &&
+			ui->usb_state == USB_STATE_NOTATTACHED)
+			ulpi_write(ui, 0x00, 0x30);
+		else
+			ulpi_write(ui, 0x01, 0x30);
+
+		if (!is_phy_45nm())
+			ulpi_write(ui, 0x08, 0x09);
+
+		break;
+
+	case USB_PHY_UNDEFINED:
+		pr_err("%s: undefined phy type\n", __func__);
+		return -1;
+	}
+
+	/* loop for large amount of time */
+	for (i = 0; i < 500; i++) {
+		spin_lock_irqsave(&ui->lock, flags);
+		if (usb_is_online(ui)) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return -1;
+		}
+		/* set phy to be in lpm */
+		writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msleep(1);
+		if (readl(USB_PORTSC) & PORTSC_PHCD)
+			goto blk_stp_sig;
+	}
+
+	if (!(readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("unable to set phcd of portsc reg\n");
+		pr_err("Reset HW link and phy to recover from phcd error\n");
+		usb_hw_reset(ui);
+		return -1;
+	}
+
+	/* we have to set this bit again to work-around h/w bug */
+	writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+
+blk_stp_sig:
+	/* block the stop signal */
+	writel(readl(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD);
+
+	return 0;
+}
+
+/* SW workarounds
+Issue#2		- Integrated PHY Calibration
+Symptom		- Electrical compliance failure in eye-diagram tests
+SW workaround		- Try to raise amplitude to 400mV
+
+Issue#3		- AHB Posted Writes
+Symptom		- USB stability
+SW workaround		- This programs xtor ON, BURST disabled and
+			unspecified length of INCR burst enabled
+*/
+static int usb_hw_reset(struct usb_info *ui)
+{
+	unsigned i;
+	struct msm_hsusb_platform_data *pdata;
+	unsigned long timeout;
+	unsigned val = 0;
+
+	pdata = ui->pdev->dev.platform_data;
+
+	clk_enable(ui->clk);
+	/* reset the phy before resetting link */
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+	/* rpc call for phy_reset */
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	/* RESET */
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* select DEVICE mode with SDIS active */
+	writel((USBMODE_SDIS | USBMODE_DEVICE), USB_USBMODE);
+	msleep(1);
+
+	/* select ULPI phy */
+	i = (readl(USB_PORTSC) & ~PORTSC_PTS);
+	writel(i | PORTSC_PTS_ULPI, USB_PORTSC);
+	/* set usb controller interrupt latency to zero*/
+	writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+							USB_USBCMD);
+
+	/* If the target is 7x01 and roc version is > 1.2, set
+	 * the AHB mode to 2 for maximum performance, else set
+	 * it to 1, to bypass the AHB transactor for stability.
+	 */
+	if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+		if (pdata->soc_version >= SOC_ROC_2_0)
+			writel(0x02, USB_ROC_AHB_MODE);
+		else
+			writel(0x01, USB_ROC_AHB_MODE);
+	} else {
+		unsigned cfg_val;
+
+		/* Raise  amplitude to 400mV
+		 * SW workaround, Issue#2
+		 */
+		cfg_val = ulpi_read(ui, ULPI_CONFIG_REG);
+		cfg_val |= ULPI_AMPLITUDE_MAX;
+		ulpi_write(ui, cfg_val, ULPI_CONFIG_REG);
+
+		writel(0x0, USB_AHB_BURST);
+		writel(0x00, USB_AHB_MODE);
+	}
+
+	/* TBD: do we have to add DpRise, ChargerRise and
+	 * IdFloatRise for 45nm
+	 */
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	/* we are just setting the pointer in the hwblock. Since the
+	 * endpoint isnt enabled the hw block doenst read the contents
+	 * of ui->dma - so we dont need a barrier here
+	 * */
+	writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+	clk_disable(ui->clk);
+
+	return 0;
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+#if 0
+	/* we should flush and shutdown cleanly if already running */
+	writel(0xffffffff, USB_ENDPTFLUSH);
+	msleep(2);
+#endif
+
+	if (usb_hw_reset(ui)) {
+		pr_info("%s: h/w reset failed\n", __func__);
+		return;
+	}
+
+	usb_configure_endpoint(&ui->ep0in, NULL);
+	usb_configure_endpoint(&ui->ep0out, NULL);
+
+	/* marking us offline will cause ept queue attempts to fail */
+	ui->online = 0;
+
+	/* terminate any pending transactions */
+	flush_all_endpoints(ui);
+
+	set_configuration(ui, 0);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void usb_enable(void *handle, int enable)
+{
+	struct usb_info *ui = handle;
+	unsigned long flags;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (enable) {
+		ui->flags |= USB_FLAG_RESET;
+		ui->active = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_do_work(&ui->work.work);
+	} else {
+		ui->active = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_clk_disable(ui);
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	}
+}
+
+static struct msm_otg_ops dcd_ops = {
+	.request = usb_enable,
+};
+
+void usb_start(struct usb_info *ui)
+{
+	int i, ret;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1<<i)))
+			continue;
+		if (fi->enabled) {
+			pr_info("usb_bind_func() (%s)\n", fi->func->name);
+			fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->clk_enabled = 0;
+	ui->vreg_enabled = 0;
+
+	ui->xceiv = msm_otg_get_transceiver();
+	if (ui->xceiv) {
+		ui->flags = USB_FLAG_REG_OTG;
+		queue_delayed_work(usb_work, &ui->work, 0);
+	} else {
+		/*Initialize pm app RPC */
+		ret = msm_pm_app_rpc_init();
+		if (ret) {
+			pr_err("%s: pm_app_rpc connect failed\n", __func__);
+			goto out;
+		}
+		pr_info("%s: pm_app_rpc connect success\n", __func__);
+
+		ret = msm_pm_app_register_vbus_sn(&msm_hsusb_set_vbus_state);
+		if (ret) {
+			pr_err("%s:PMIC VBUS SN notif not supported\n", \
+					__func__);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		pr_info("%s:PMIC VBUS SN notif supported\n", \
+					__func__);
+
+		ret = msm_pm_app_enable_usb_ldo(1);
+		if (ret) {
+			pr_err("%s: unable to turn on internal LDO", \
+					__func__);
+			msm_pm_app_unregister_vbus_sn(
+					&msm_hsusb_set_vbus_state);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		ui->vbus_sn_notif = 1;
+out:
+		ui->active = 1;
+		ui->flags |= (USB_FLAG_START | USB_FLAG_RESET);
+		queue_delayed_work(usb_work, &ui->work, 0);
+	}
+
+}
+
+static LIST_HEAD(usb_function_list);
+static DEFINE_MUTEX(usb_function_list_lock);
+
+
+static struct usb_function_info *usb_find_function(const char *name)
+{
+	struct list_head *entry;
+	list_for_each(entry, &usb_function_list) {
+		struct usb_function_info *fi =
+			list_entry(entry, struct usb_function_info, list);
+		if (fi) {
+			if (!strcmp(name, fi->func->name))
+				return fi;
+		}
+	}
+
+	return NULL;
+}
+
+static void usb_try_to_bind(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long enabled_functions = 0;
+	int i;
+
+	if (!ui || ui->bound || !ui->pdev || !ui->composition)
+		return;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (ui->func[i])
+			enabled_functions |= (1 << i);
+	}
+	if ((enabled_functions & ui->composition->functions)
+					!= ui->composition->functions)
+		return;
+
+	usb_set_composition(ui->composition->product_id);
+	usb_configure_device_descriptor(ui);
+
+	/* we have found all the needed functions */
+	ui->bound = 1;
+	printk(KERN_INFO "msm_hsusb: functions bound. starting.\n");
+	usb_start(ui);
+}
+
+static int usb_get_function_index(const char *name)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (!strcmp(name, ui->functions_map[i].name))
+			return i;
+	}
+	return -1;
+}
+
+int usb_function_register(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_function_info *fi;
+	int ret = 0;
+	int index;
+
+	mutex_lock(&usb_function_list_lock);
+
+	index = usb_get_function_index(driver->name);
+	if (index < 0) {
+		pr_err("%s: unsupported function = %s\n",
+				__func__, driver->name);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fi = kzalloc(sizeof(*fi), GFP_KERNEL);
+	if (!fi) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fi->func = driver;
+	list_add(&fi->list, &usb_function_list);
+	ui->func[index] = fi;
+	fi->func->ep0_out_req = ui->ep0out_req;
+	fi->func->ep0_in_req = ui->setup_req;
+	fi->func->ep0_out = &ui->ep0out;
+	fi->func->ep0_in = &ui->ep0in;
+	pr_info("%s: name = '%s',  map = %d\n", __func__, driver->name, index);
+
+	usb_try_to_bind();
+fail:
+	mutex_unlock(&usb_function_list_lock);
+	return ret;
+}
+EXPORT_SYMBOL(usb_function_register);
+
+static unsigned short usb_validate_product_id(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!ui || !ui->pdata)
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid)
+			break;
+	}
+
+	if (i < ui->pdata->num_compositions) {
+		struct usb_composition *comp = &ui->pdata->compositions[i];
+		for (i = 0; i < ui->num_funcs; i++) {
+			if (comp->functions & (1 << i)) {
+				if (!ui->func[i]) {
+					pr_err("%s: func(%d) not available\n",
+								__func__, i);
+					return 0;
+				}
+			}
+		}
+		return comp->product_id;
+	} else
+		pr_err("%s: Product id (%x) is not supported\n", __func__, pid);
+	return 0;
+}
+
+static unsigned short usb_get_product_id(unsigned long enabled_functions)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].functions == enabled_functions)
+			return ui->pdata->compositions[i].product_id;
+	}
+	return 0;
+}
+
+static void usb_uninit(struct usb_info *ui)
+{
+	int i;
+
+	for (i = 0; i < ui->strdesc_index; i++)
+		kfree(ui->strdesc[i]);
+	ui->strdesc_index = 1;
+	ui->next_ifc_num = 0;
+}
+
+static unsigned short usb_set_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return 0;
+
+	/* Retrieve product id on enabled functions */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (ui->func && fi && fi->func) {
+					fi->enabled = (ui->composition->
+							functions >> i) & 1;
+				}
+			}
+			pr_info("%s: composition set to product id = %x\n",
+				__func__, ui->composition->product_id);
+			return ui->composition->product_id;
+		}
+	}
+	pr_err("%s: product id (%x) not supported\n", __func__, pid);
+	return 0;
+}
+
+static void usb_switch_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	unsigned long flags;
+
+
+	if (!ui->active)
+		return;
+	if (!usb_validate_product_id(pid))
+		return;
+
+	disable_irq(ui->irq);
+	if (cancel_delayed_work_sync(&ui->work))
+		pr_info("%s: Removed work successfully\n", __func__);
+	if (ui->running) {
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+
+			if (ui->usb_state == USB_STATE_NOTATTACHED
+						&& ui->vbus_sn_notif)
+				msm_pm_app_enable_usb_ldo(1);
+
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		ui->usb_state = USB_STATE_NOTATTACHED;
+		switch_set_state(&ui->sdev, 0);
+		/* Before starting again, wait for 300ms
+		 * to make sure host detects soft disconnection
+		 **/
+		msleep(300);
+	}
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !fi->func || !fi->enabled)
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(0, fi->func->context);
+		if (fi->func->unbind)
+			fi->func->unbind(fi->func->context);
+	}
+
+	usb_uninit(ui);
+	usb_set_composition(pid);
+	usb_configure_device_descriptor(ui);
+
+	/* initialize functions */
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->enabled) {
+			if (fi->func->bind)
+				fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->bound = 1;
+	ui->flags = USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_enable(const char *function, int enable)
+{
+	struct usb_function_info *fi;
+	struct usb_info *ui = the_usb_info;
+	unsigned long functions_mask;
+	int curr_enable;
+	unsigned short pid;
+	int i;
+
+	if (!ui)
+		return;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, function, enable);
+
+	fi = usb_find_function(function);
+	if (!fi) {
+		pr_err("%s: function (%s) not registered with DCD\n",
+							__func__, function);
+		return;
+	}
+	if (fi->enabled == enable) {
+		pr_err("%s: function (%s) state is same\n",
+						__func__, function);
+		return;
+	}
+	functions_mask = 0;
+	curr_enable = fi->enabled;
+	fi->enabled = enable;
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (fi && fi->enabled)
+			functions_mask |= (1 << i);
+	}
+
+	pid = usb_get_product_id(functions_mask);
+	if (!pid) {
+		fi->enabled = curr_enable;
+		pr_err("%s: mask (%lx) not matching with any products\n",
+						__func__, functions_mask);
+		pr_err("%s: continuing with current composition\n", __func__);
+		return;
+	}
+	usb_switch_composition(pid);
+}
+EXPORT_SYMBOL(usb_function_enable);
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+	disable_irq_wake(ui->irq);
+	free_irq(ui->irq, ui);
+	if (ui->gpio_irq[0])
+		free_irq(ui->gpio_irq[0], NULL);
+	if (ui->gpio_irq[1])
+		free_irq(ui->gpio_irq[1], NULL);
+
+	dma_pool_destroy(ui->pool);
+	dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+	kfree(ui->func);
+	kfree(ui->strdesc);
+	iounmap(ui->addr);
+	clk_put(ui->clk);
+	clk_put(ui->pclk);
+	clk_put(ui->cclk);
+	msm_hsusb_suspend_locks_init(ui, 0);
+	kfree(ui);
+
+	return ret;
+}
+
+static int usb_vbus_is_on(struct usb_info *ui)
+{
+	unsigned tmp;
+
+	/* disable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_C);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_C);
+
+	tmp = ulpi_read(ui, ULPI_USBINTR_STATUS);
+
+	/* enable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_S);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_S);
+
+	if (tmp & (1 << 2))
+		return 1;
+	return 0;
+}
+static void usb_do_work(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, work.work);
+	unsigned long iflags;
+	unsigned long flags, ret;
+
+	for (;;) {
+		spin_lock_irqsave(&ui->lock, iflags);
+		flags = ui->flags;
+		ui->flags = 0;
+		spin_unlock_irqrestore(&ui->lock, iflags);
+
+		/* give up if we have nothing to do */
+		if (flags == 0)
+			break;
+
+		switch (ui->state) {
+		case USB_STATE_IDLE:
+			if (flags & USB_FLAG_REG_OTG) {
+				dcd_ops.handle = (void *) ui;
+				ret = ui->xceiv->set_peripheral(ui->xceiv,
+								&dcd_ops);
+				if (ret)
+					pr_err("%s: Can't register peripheral"
+						"driver with OTG", __func__);
+				break;
+			}
+			if ((flags & USB_FLAG_START) ||
+					(flags & USB_FLAG_RESET)) {
+				disable_irq(ui->irq);
+				if (ui->vbus_sn_notif)
+					msm_pm_app_enable_usb_ldo(1);
+				usb_clk_enable(ui);
+				usb_vreg_enable(ui);
+				usb_vbus_online(ui);
+
+				/* if VBUS is present move to ONLINE state
+				 * otherwise move to OFFLINE state
+				 */
+				if (usb_vbus_is_on(ui)) {
+					ui->usb_state = USB_STATE_POWERED;
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+					ui->state = USB_STATE_ONLINE;
+					usb_enable_pullup(ui);
+					schedule_delayed_work(
+							&ui->chg_legacy_det,
+							USB_CHG_DET_DELAY);
+					pr_info("hsusb: IDLE -> ONLINE\n");
+				} else {
+					ui->usb_state = USB_STATE_NOTATTACHED;
+					ui->state = USB_STATE_OFFLINE;
+
+					msleep(500);
+					usb_lpm_enter(ui);
+					pr_info("hsusb: IDLE -> OFFLINE\n");
+					if (ui->vbus_sn_notif)
+						msm_pm_app_enable_usb_ldo(0);
+				}
+				enable_irq(ui->irq);
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_ONLINE:
+			/* If at any point when we were online, we received
+			 * the signal to go offline, we must honor it
+			 */
+			if (flags & USB_FLAG_VBUS_OFFLINE) {
+				enum charger_type temp;
+				unsigned long f;
+
+				cancel_delayed_work_sync(&ui->chg_legacy_det);
+
+				spin_lock_irqsave(&ui->lock, f);
+				temp = ui->chg_type;
+				ui->chg_type = USB_CHG_TYPE__INVALID;
+				spin_unlock_irqrestore(&ui->lock, f);
+
+				if (temp != USB_CHG_TYPE__INVALID) {
+					/* re-acquire wakelock and restore axi
+					 * freq if they have been reduced by
+					 * charger work item
+					 */
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+
+					msm_chg_usb_i_is_not_available();
+					msm_chg_usb_charger_disconnected();
+				}
+
+				/* reset usb core and usb phy */
+				disable_irq(ui->irq);
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_offline(ui);
+				usb_lpm_enter(ui);
+				if ((ui->vbus_sn_notif) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED))
+					msm_pm_app_enable_usb_ldo(0);
+				ui->state = USB_STATE_OFFLINE;
+				enable_irq(ui->irq);
+				switch_set_state(&ui->sdev, 0);
+				pr_info("hsusb: ONLINE -> OFFLINE\n");
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				ui->usb_state = USB_STATE_SUSPENDED;
+				usb_lpm_enter(ui);
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				break;
+			}
+			if ((flags & USB_FLAG_RESUME) ||
+					(flags & USB_FLAG_CONFIGURE)) {
+				int maxpower = usb_get_max_power(ui);
+
+				if (maxpower > 0)
+					msm_chg_usb_i_is_available(maxpower);
+
+				if (flags & USB_FLAG_CONFIGURE)
+					switch_set_state(&ui->sdev, 1);
+
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_OFFLINE:
+			/* If we were signaled to go online and vbus is still
+			 * present when we received the signal, go online.
+			 */
+			if ((flags & USB_FLAG_VBUS_ONLINE)) {
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				disable_irq(ui->irq);
+				ui->state = USB_STATE_ONLINE;
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_online(ui);
+				if (!(B_SESSION_VALID & readl(USB_OTGSC))) {
+					writel(((readl(USB_OTGSC) &
+						~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+					enable_irq(ui->irq);
+					goto reset;
+				}
+				usb_enable_pullup(ui);
+				schedule_delayed_work(
+						&ui->chg_legacy_det,
+						USB_CHG_DET_DELAY);
+				pr_info("hsusb: OFFLINE -> ONLINE\n");
+				enable_irq(ui->irq);
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				usb_lpm_enter(ui);
+				wake_unlock(&ui->wlock);
+				break;
+			}
+		default:
+reset:
+			/* For RESET or any unknown flag in a particular state
+			 * go to IDLE state and reset HW to bring to known state
+			 */
+			ui->flags = USB_FLAG_RESET;
+			ui->state = USB_STATE_IDLE;
+		}
+	}
+}
+
+void msm_hsusb_set_vbus_state(int online)
+{
+	struct usb_info *ui = the_usb_info;
+
+	if (ui && online) {
+		msm_pm_app_enable_usb_ldo(1);
+		usb_lpm_exit(ui);
+		/* Turn on PHY comparators */
+		if (!(ulpi_read(ui, 0x30) & 0x01))
+				ulpi_write(ui, 0x01, 0x30);
+	}
+}
+
+static irqreturn_t usb_lpm_gpio_isr(int irq, void *data)
+{
+	disable_irq(irq);
+
+	return IRQ_HANDLED;
+}
+
+static void usb_lpm_exit(struct usb_info *ui)
+{
+	if (ui->in_lpm == 0)
+		return;
+
+	if (usb_lpm_config_gpio)
+		usb_lpm_config_gpio(0);
+
+	wake_lock(&ui->wlock);
+	usb_clk_enable(ui);
+	usb_vreg_enable(ui);
+
+	writel(readl(USB_USBCMD) & ~ASYNC_INTR_CTRL, USB_USBCMD);
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD) {
+		disable_irq(ui->irq);
+		schedule_work(&ui->li.wakeup_phy);
+	} else {
+		ui->in_lpm = 0;
+		if (ui->xceiv)
+			ui->xceiv->set_suspend(ui->xceiv, 0);
+	}
+	pr_info("%s(): USB exited from low power mode\n", __func__);
+}
+
+static int usb_lpm_enter(struct usb_info *ui)
+{
+	unsigned long flags;
+	unsigned connected;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_debug("already in lpm, nothing to do\n");
+		return 0;
+	}
+
+	if (usb_is_online(ui)) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: lpm procedure aborted\n", __func__);
+		return -1;
+	}
+
+	ui->in_lpm = 1;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 1);
+	disable_irq(ui->irq);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (usb_suspend_phy(ui)) {
+		ui->in_lpm = 0;
+		ui->flags = USB_FLAG_RESET;
+		enable_irq(ui->irq);
+		pr_err("%s: phy suspend failed, lpm procedure aborted\n",
+				__func__);
+		return -1;
+	}
+
+	if ((B_SESSION_VALID & readl(USB_OTGSC)) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED)) {
+		ui->in_lpm = 0;
+		writel(((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+		ui->flags = USB_FLAG_VBUS_ONLINE;
+		ui->usb_state = USB_STATE_POWERED;
+		usb_wakeup_phy(ui);
+		enable_irq(ui->irq);
+		return -1;
+	}
+
+	/* enable async interrupt */
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL, USB_USBCMD);
+	connected = readl(USB_USBCMD) & USBCMD_RS;
+
+	usb_vreg_disable(ui);
+	usb_clk_disable(ui);
+
+	if (usb_lpm_config_gpio) {
+		if (usb_lpm_config_gpio(1)) {
+			spin_lock_irqsave(&ui->lock, flags);
+			usb_lpm_exit(ui);
+			spin_unlock_irqrestore(&ui->lock, flags);
+			enable_irq(ui->irq);
+			return -1;
+		}
+		enable_irq(ui->gpio_irq[0]);
+		enable_irq(ui->gpio_irq[1]);
+	}
+
+	enable_irq(ui->irq);
+	msm_hsusb_suspend_locks_acquire(ui, 0);
+	pr_info("%s: usb in low power mode\n", __func__);
+	return 0;
+}
+
+static void usb_enable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+	writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+	enable_irq(ui->irq);
+}
+
+/* SW workarounds
+Issue #1	- USB Spoof Disconnect Failure
+Symptom	- Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround	- Making opmode non-driving and SuspendM set in function
+		register of SMSC phy
+*/
+static void usb_disable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(readl(USB_USBINTR) & ~(STS_URI | STS_SLI | STS_UI | STS_PCI),
+			USB_USBINTR);
+	writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+	/* S/W workaround, Issue#1 */
+	if (!is_phy_external() && !is_phy_45nm())
+		ulpi_write(ui, 0x48, 0x04);
+
+	enable_irq(ui->irq);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	enum charger_type temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__SDP)
+		msm_chg_usb_i_is_not_available();
+}
+
+static void usb_vbus_online(struct usb_info *ui)
+{
+	if (ui->in_lpm) {
+		if (usb_lpm_config_gpio)
+			usb_lpm_config_gpio(0);
+		usb_vreg_enable(ui);
+		usb_clk_enable(ui);
+		usb_wakeup_phy(ui);
+		ui->in_lpm = 0;
+	}
+
+	usb_reset(ui);
+}
+
+static void usb_vbus_offline(struct usb_info *ui)
+{
+	unsigned long timeout;
+	unsigned val = 0;
+
+	if (ui->online != 0) {
+		ui->online = 0;
+		flush_all_endpoints(ui);
+		set_configuration(ui, 0);
+	}
+
+	/* reset h/w at cable disconnetion becasuse
+	 * of h/w bugs and to flush any resource that
+	 * h/w might be holding
+	 */
+	clk_enable(ui->clk);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	clk_disable(ui->clk);
+}
+
+static void usb_lpm_wakeup_phy(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (usb_wakeup_phy(ui)) {
+		pr_err("fatal error: cannot bring phy out of lpm\n");
+		pr_err("%s: resetting controller\n", __func__);
+
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_disable_pullup(ui);
+		ui->flags = USB_FLAG_RESET;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		enable_irq(ui->irq);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+
+	ui->in_lpm = 0;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_reenumerate(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	/* disable and re-enable the D+ pullup */
+	pr_info("hsusb: disable pullup\n");
+	usb_disable_pullup(ui);
+
+	msleep(10);
+
+	pr_info("hsusb: enable pullup\n");
+	usb_enable_pullup(ui);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	struct usb_endpoint *ept;
+	struct msm_request *req;
+	int n;
+	int i = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+		       readl(USB_ENDPTSETUPSTAT),
+		       readl(USB_ENDPTPRIME),
+		       readl(USB_ENDPTSTAT),
+		       readl(USB_ENDPTCOMPLETE));
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs:   cmd=%08x   sts=%08x intr=%08x port=%08x\n\n",
+		       readl(USB_USBCMD),
+		       readl(USB_USBSTS),
+		       readl(USB_USBINTR),
+		       readl(USB_PORTSC));
+
+
+	for (n = 0; n < 32; n++) {
+		ept = ui->ept + n;
+		if (ept->max_pkt == 0)
+			continue;
+
+		i += scnprintf(buf + i, PAGE_SIZE - i,
+			       "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+			       ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+			       ept->head->config, ept->head->active,
+			       ept->head->next, ept->head->info);
+
+		for (req = ept->req; req; req = req->next)
+			i += scnprintf(buf + i, PAGE_SIZE - i,
+				       "  req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+				       req->item_dma, req->item->next,
+				       req->item->info, req->item->page0,
+				       req->busy ? 'B' : ' ',
+				       req->live ? 'L' : ' '
+				);
+	}
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "phy failure count: %d\n", ui->phy_fail_count);
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	usb_function_reenumerate();
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+	.open = debug_open,
+	.read = debug_read_status,
+};
+
+
+
+const struct file_operations debug_reset_ops = {
+	.open = debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+	.open = debug_open,
+	.write = debug_write_cycle,
+};
+
+static struct dentry *debugfs_dent;
+static struct dentry *debugfs_status;
+static struct dentry *debugfs_reset;
+static struct dentry *debugfs_cycle;
+static void usb_debugfs_init(struct usb_info *ui)
+{
+	debugfs_dent = debugfs_create_dir("usb", 0);
+	if (IS_ERR(debugfs_dent))
+		return;
+
+	debugfs_status = debugfs_create_file("status", 0444,
+				debugfs_dent, ui, &debug_stat_ops);
+	debugfs_reset = debugfs_create_file("reset", 0222,
+				debugfs_dent, ui, &debug_reset_ops);
+	debugfs_cycle = debugfs_create_file("cycle", 0222,
+				debugfs_dent, ui, &debug_cycle_ops);
+}
+
+static void usb_debugfs_uninit(void)
+{
+	debugfs_remove(debugfs_status);
+	debugfs_remove(debugfs_reset);
+	debugfs_remove(debugfs_cycle);
+	debugfs_remove(debugfs_dent);
+}
+
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+static void usb_debugfs_uninit(void) {}
+#endif
+
+static void usb_configure_device_descriptor(struct usb_info *ui)
+{
+	desc_device.idVendor = ui->pdata->vendor_id;
+	desc_device.idProduct = ui->composition->product_id;
+	desc_device.bcdDevice = ui->pdata->version;
+
+	if (ui->pdata->serial_number)
+		desc_device.iSerialNumber =
+			usb_msm_get_next_strdesc_id(ui->pdata->serial_number);
+	if (ui->pdata->product_name)
+		desc_device.iProduct =
+			usb_msm_get_next_strdesc_id(ui->pdata->product_name);
+	if (ui->pdata->manufacturer_name)
+		desc_device.iManufacturer =
+			usb_msm_get_next_strdesc_id(
+				ui->pdata->manufacturer_name);
+
+	/* Send Serial number to A9 for software download */
+	if (ui->pdata->serial_number) {
+		msm_hsusb_is_serial_num_null(FALSE);
+		msm_hsusb_send_serial_number(ui->pdata->serial_number);
+	} else
+		msm_hsusb_is_serial_num_null(TRUE);
+
+	msm_hsusb_send_productID(desc_device.idProduct);
+
+}
+static ssize_t msm_hsusb_store_func_enable(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	char name[20];
+	int enable = 0;
+	int i;
+
+	for (i = 0; buf[i] != 0; i++) {
+		if (buf[i] == '=')
+			break;
+		name[i] = buf[i];
+	}
+	name[i++] = 0;
+	if (buf[i] == '0' || buf[i] == '1')
+		enable = buf[i] - '0';
+	else
+		return size;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, name, enable);
+	usb_function_enable(name, enable);
+	return size;
+}
+static ssize_t msm_hsusb_show_compswitch(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (ui->composition)
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = %x\n",
+					ui->composition->product_id);
+	else
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = 0\n");
+	return i;
+}
+
+static ssize_t msm_hsusb_store_compswitch(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	unsigned long pid;
+
+	if (!strict_strtoul(buf, 16, &pid)) {
+		pr_info("%s: Requested New Product id = %lx\n", __func__, pid);
+		usb_switch_composition((unsigned short)pid);
+	} else
+		pr_info("%s: strict_strtoul conversion failed\n", __func__);
+
+	return size;
+}
+static ssize_t msm_hsusb_store_autoresume(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	usb_remote_wakeup();
+
+	return size;
+}
+
+static ssize_t msm_hsusb_show_state(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+			"USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+			"USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+			"USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+			"USB_STATE_SUSPENDED"
+	};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", state[ui->usb_state]);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_lpm(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	i = scnprintf(buf, PAGE_SIZE, "%d\n", ui->in_lpm);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_speed(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+			"USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->speed]);
+	return i;
+}
+
+static DEVICE_ATTR(composition, 0664,
+		msm_hsusb_show_compswitch, msm_hsusb_store_compswitch);
+static DEVICE_ATTR(func_enable, S_IWUSR,
+		NULL, msm_hsusb_store_func_enable);
+static DEVICE_ATTR(autoresume, 0222,
+		NULL, msm_hsusb_store_autoresume);
+static DEVICE_ATTR(state, 0664, msm_hsusb_show_state, NULL);
+static DEVICE_ATTR(lpm, 0664, msm_hsusb_show_lpm, NULL);
+static DEVICE_ATTR(speed, 0664, msm_hsusb_show_speed, NULL);
+
+static struct attribute *msm_hsusb_attrs[] = {
+	&dev_attr_composition.attr,
+	&dev_attr_func_enable.attr,
+	&dev_attr_autoresume.attr,
+	&dev_attr_state.attr,
+	&dev_attr_lpm.attr,
+	&dev_attr_speed.attr,
+	NULL,
+};
+static struct attribute_group msm_hsusb_attr_grp = {
+	.attrs = msm_hsusb_attrs,
+};
+
+#define msm_hsusb_func_attr(function, index)				\
+static ssize_t  show_##function(struct device *dev,			\
+		struct device_attribute *attr, char *buf)		\
+{									\
+	struct usb_info *ui = the_usb_info;				\
+	struct usb_function_info *fi = ui->func[index];			\
+									\
+	return sprintf(buf, "%d", fi->enabled);				\
+									\
+}									\
+									\
+static DEVICE_ATTR(function, S_IRUGO, show_##function, NULL);
+
+msm_hsusb_func_attr(diag, 0);
+msm_hsusb_func_attr(adb, 1);
+msm_hsusb_func_attr(modem, 2);
+msm_hsusb_func_attr(nmea, 3);
+msm_hsusb_func_attr(mass_storage, 4);
+msm_hsusb_func_attr(ethernet, 5);
+msm_hsusb_func_attr(rmnet, 6);
+
+static struct attribute *msm_hsusb_func_attrs[] = {
+	&dev_attr_diag.attr,
+	&dev_attr_adb.attr,
+	&dev_attr_modem.attr,
+	&dev_attr_nmea.attr,
+	&dev_attr_mass_storage.attr,
+	&dev_attr_ethernet.attr,
+	&dev_attr_rmnet.attr,
+	NULL,
+};
+
+static struct attribute_group msm_hsusb_func_attr_grp = {
+	.name  = "functions",
+	.attrs = msm_hsusb_func_attrs,
+};
+
+static int __init usb_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct usb_info *ui;
+	int irq;
+	int ulpi_irq1 = 0;
+	int ulpi_irq2 = 0;
+	int i;
+	int ret = 0;
+
+	if (!pdev || !pdev->dev.platform_data) {
+		pr_err("%s:pdev or platform data is null\n", __func__);
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		pr_err("%s: failed to get irq num from platform_get_irq\n",
+				__func__);
+		return -ENODEV;
+	}
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("%s: failed to get mem resource\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create sysfs group\n", __func__);
+		return ret;
+	}
+
+	usb_work = create_singlethread_workqueue("usb_work");
+	if (!usb_work) {
+		pr_err("%s: unable to create work queue\n", __func__);
+		ret = -ENOMEM;
+		goto free_sysfs_grp;
+	}
+
+	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+	if (!ui) {
+		pr_err("%s: unable to allocate memory for ui\n", __func__);
+		ret = -ENOMEM;
+		goto free_workqueue;
+	}
+
+	ui->pdev = pdev;
+	ui->pdata = pdev->dev.platform_data;
+
+	for (i = 0; i < ui->pdata->num_compositions; i++)
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			break;
+		}
+	if (!ui->composition) {
+		pr_err("%s: unable to find the composition with pid:(%d)\n",
+				__func__, pid);
+		ret = -ENODEV;
+		goto free_ui;
+	}
+
+	ui->phy_info = ui->pdata->phy_info;
+	if (ui->phy_info == USB_PHY_UNDEFINED) {
+		pr_err("undefined phy_info: (%d)\n", ui->phy_info);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	/* zero is reserved for language id */
+	ui->strdesc_index = 1;
+	ui->strdesc = kzalloc(sizeof(char *) * MAX_STRDESC_NUM, GFP_KERNEL);
+	if (!ui->strdesc) {
+		pr_err("%s: unable allocate mem for string descriptors\n",
+				__func__);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	ui->num_funcs = ui->pdata->num_functions;
+	ui->func = kzalloc(sizeof(struct usb_function *) * ui->num_funcs,
+				GFP_KERNEL);
+	if (!ui->func) {
+		pr_err("%s: unable allocate mem for functions\n", __func__);
+		ret = -ENOMEM;
+		goto free_str_desc;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create functions sysfs group\n",
+				__func__);
+		goto free_func;
+	}
+
+	ui->addr = ioremap(res->start, resource_size(res));
+	if (!ui->addr) {
+		pr_err("%s: unable ioremap\n", __func__);
+		ret = -ENOMEM;
+		goto free_func_sysfs_grp;
+	}
+
+	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+	if (!ui->buf) {
+		pr_err("%s: failed allocate dma coherent memory\n", __func__);
+		ret = -ENOMEM;
+		goto free_iounmap;
+	}
+
+	ui->pool = dma_pool_create("hsusb", NULL, 32, 32, 0);
+	if (!ui->pool) {
+		pr_err("%s: unable to allocate dma pool\n", __func__);
+		ret = -ENOMEM;
+		goto free_dma_coherent;
+	}
+
+	ui->clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(ui->clk)) {
+		pr_err("%s: unable get usb_hs_clk\n", __func__);
+		ret = PTR_ERR(ui->clk);
+		goto free_dma_pool;
+	}
+
+	ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+	if (IS_ERR(ui->pclk)) {
+		pr_err("%s: unable get usb_hs_pclk\n", __func__);
+		ret = PTR_ERR(ui->pclk);
+		goto free_hs_clk;
+	}
+
+	if (ui->pdata->core_clk) {
+		ui->cclk = clk_get(&pdev->dev, "usb_hs_core_clk");
+		if (IS_ERR(ui->cclk)) {
+			pr_err("%s: unable get usb_hs_core_clk\n", __func__);
+			ret = PTR_ERR(ui->cclk);
+			goto free_hs_pclk;
+		}
+	}
+
+	if (ui->pdata->vreg5v_required) {
+		ui->vreg = vreg_get(NULL, "boost");
+		if (IS_ERR(ui->vreg)) {
+			pr_err("%s: vreg get failed\n", __func__);
+			ui->vreg = NULL;
+			ret = PTR_ERR(ui->vreg);
+			goto free_hs_cclk;
+		}
+	}
+
+	/* disable interrupts before requesting irq */
+	usb_clk_enable(ui);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	usb_clk_disable(ui);
+
+	ret = request_irq(irq, usb_interrupt, IRQF_SHARED, pdev->name, ui);
+	if (ret) {
+		pr_err("%s: request_irq failed\n", __func__);
+		goto free_vreg5v;
+	}
+	ui->irq = irq;
+
+	if (ui->pdata->config_gpio) {
+		usb_lpm_config_gpio = ui->pdata->config_gpio;
+
+		ulpi_irq1 = platform_get_irq_byname(pdev, "vbus_interrupt");
+		if (ulpi_irq1 < 0) {
+			pr_err("%s: failed to get vbus gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ulpi_irq2 = platform_get_irq_byname(pdev, "id_interrupt");
+		if (ulpi_irq2 < 0) {
+			pr_err("%s: failed to get id gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ret = request_irq(ulpi_irq1,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_HIGH,
+				"vbus_interrupt", NULL);
+		if (ret) {
+			pr_err("%s: failed to request vbus interrupt:(%d)\n",
+					__func__, ulpi_irq1);
+			goto free_irq;
+		}
+
+		ret = request_irq(ulpi_irq2,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_RISING,
+				"usb_ulpi_data3", NULL);
+		if (ret) {
+			pr_err("%s: failed to request irq ulpi_data_3:(%d)\n",
+							__func__, ulpi_irq2);
+			goto free_ulpi_irq1;
+		}
+
+		ui->gpio_irq[0] = ulpi_irq1;
+		ui->gpio_irq[1] = ulpi_irq2;
+	}
+
+	ui->sdev.name = DRIVER_NAME;
+	ui->sdev.print_name = print_switch_name;
+	ui->sdev.print_state = print_switch_state;
+
+	ret = switch_dev_register(&ui->sdev);
+	if (ret < 0) {
+		pr_err("%s(): switch_dev_register failed ret = %d\n",
+				__func__, ret);
+		goto free_ulpi_irq2;
+	}
+
+	the_usb_info = ui;
+	ui->functions_map = ui->pdata->function_map;
+	ui->selfpowered = 0;
+	ui->remote_wakeup = 0;
+	ui->maxpower = 0xFA;
+	ui->chg_type = USB_CHG_TYPE__INVALID;
+	/* to allow swfi latency, driver latency
+	 * must be above listed swfi latency
+	 */
+	ui->pdata->swfi_latency += 1;
+
+	spin_lock_init(&ui->lock);
+	msm_hsusb_suspend_locks_init(ui, 1);
+	enable_irq_wake(irq);
+
+	/* memory barrier initialization in non-interrupt context */
+	dmb();
+
+	usb_debugfs_init(ui);
+	usb_prepare(ui);
+
+	pr_info("%s: io=%p, irq=%d, dma=%p(%x)\n",
+			__func__, ui->addr, ui->irq, ui->buf, ui->dma);
+	return 0;
+
+free_ulpi_irq2:
+	free_irq(ulpi_irq2, NULL);
+free_ulpi_irq1:
+	free_irq(ulpi_irq1, NULL);
+free_irq:
+	free_irq(ui->irq, ui);
+free_vreg5v:
+	if (ui->pdata->vreg5v_required)
+		vreg_put(ui->vreg);
+free_hs_cclk:
+	clk_put(ui->cclk);
+free_hs_pclk:
+	clk_put(ui->pclk);
+free_hs_clk:
+	clk_put(ui->clk);
+free_dma_pool:
+	dma_pool_destroy(ui->pool);
+free_dma_coherent:
+	dma_free_coherent(&pdev->dev, 4096, ui->buf, ui->dma);
+free_iounmap:
+	iounmap(ui->addr);
+free_func_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+free_func:
+	kfree(ui->func);
+free_str_desc:
+	kfree(ui->strdesc);
+free_ui:
+	kfree(ui);
+free_workqueue:
+	destroy_workqueue(usb_work);
+free_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int usb_platform_suspend(struct platform_device *pdev,
+		pm_message_t state)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (!ui->active) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: peripheral mode is not active"
+				"nothing to be done\n", __func__);
+		return 0;
+	}
+
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: we are already in lpm, nothing to be done\n",
+					__func__);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	ret = usb_lpm_enter(ui);
+	if (ret)
+		pr_err("%s: failed to enter lpm\n", __func__);
+
+	return ret;
+}
+#endif
+
+static struct platform_driver usb_driver = {
+	.probe = usb_probe,
+#ifdef CONFIG_PM
+	.suspend = usb_platform_suspend,
+#endif
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init usb_module_init(void)
+{
+	/* rpc connect for phy_reset */
+	msm_hsusb_rpc_connect();
+	/* rpc connect for charging */
+	msm_chg_rpc_connect();
+
+	return platform_driver_register(&usb_driver);
+}
+
+static void free_usb_info(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int i;
+	if (ui) {
+		INIT_LIST_HEAD(&usb_function_list);
+
+		for (i = 0; i < ui->num_funcs; i++)
+			kfree(ui->func[i]);
+		ui->num_funcs = 0;
+		usb_uninit(ui);
+		kfree(ui->strdesc);
+		usb_ept_free_req(&ui->ep0in, ui->setup_req);
+		if (ui->ept[0].ui == ui)
+			flush_all_endpoints(ui);
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_clk_disable(ui);
+		usb_vreg_disable(ui);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_free(ui, 0);
+		the_usb_info = NULL;
+	}
+}
+static void usb_exit(void)
+{
+	struct usb_info *ui = the_usb_info;
+	/* free the dev state structure */
+	if (!ui)
+		return;
+
+	if (ui->xceiv) {
+		ui->xceiv->set_peripheral(ui->xceiv, NULL);
+		msm_otg_put_transceiver(ui->xceiv);
+	}
+
+	cancel_work_sync(&ui->li.wakeup_phy);
+
+	destroy_workqueue(usb_work);
+	/* free the usb_info structure */
+	free_usb_info();
+	switch_dev_unregister(&ui->sdev);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_attr_grp);
+	usb_debugfs_uninit();
+	platform_driver_unregister(&usb_driver);
+	msm_hsusb_rpc_close();
+	msm_chg_rpc_close();
+	msm_pm_app_unregister_vbus_sn(&msm_hsusb_set_vbus_state);
+	msm_pm_app_rpc_deinit();
+}
+
+static void __exit usb_module_exit(void)
+{
+	usb_exit();
+}
+
+module_param(pid, int, 0);
+MODULE_PARM_DESC(pid, "Product ID of the desired composition");
+
+module_init(usb_module_init);
+module_exit(usb_module_exit);
+
+static void copy_string_descriptor(char *string, char *buffer)
+{
+	int length, i;
+
+	if (string) {
+		length = strlen(string);
+		buffer[0] = 2 * length + 2;
+		buffer[1] = USB_DT_STRING;
+		for (i = 0; i < length; i++) {
+			buffer[2 * i + 2] = string[i];
+			buffer[2 * i + 3] = 0;
+		}
+	}
+}
+static int get_qualifier_descriptor(struct usb_qualifier_descriptor *dq)
+{
+	struct usb_qualifier_descriptor *dev_qualifier = dq;
+	dev_qualifier->bLength = sizeof(struct usb_qualifier_descriptor),
+	dev_qualifier->bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+	dev_qualifier->bcdUSB =  __constant_cpu_to_le16(0x0200),
+	dev_qualifier->bDeviceClass = USB_CLASS_PER_INTERFACE,
+	dev_qualifier->bDeviceSubClass = 0;
+	dev_qualifier->bDeviceProtocol = 0;
+	dev_qualifier->bMaxPacketSize0 = 64;
+	dev_qualifier->bNumConfigurations = 1;
+	dev_qualifier->bRESERVED = 0;
+	return sizeof(struct usb_qualifier_descriptor);
+}
+
+static int usb_fill_descriptors(void *ptr,
+		struct usb_descriptor_header **descriptors)
+{
+	unsigned char *buf = ptr;
+	struct usb_descriptor_header *item = descriptors[0];
+	unsigned cnt = 0;
+
+	while (NULL != item) {
+		unsigned len = item->bLength;
+		memcpy(buf, item, len);
+		buf += len;
+		cnt++;
+		item = descriptors[cnt];
+	}
+
+	return buf-(u8 *)ptr;
+}
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req)
+{
+	int i;
+	unsigned short id = ctl->wValue;
+	unsigned short type = id >> 8;
+	id &= 0xff;
+
+	if ((type == USB_DT_DEVICE) && (id == 0)) {
+		req->length = sizeof(desc_device);
+		if (usb_msm_is_iad()) {
+			desc_device.bDeviceClass = 0xEF;
+			desc_device.bDeviceSubClass = 0x02;
+			desc_device.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &desc_device, req->length);
+		return 0;
+	}
+	if ((type == USB_DT_DEVICE_QUALIFIER) && (id == 0)) {
+		struct usb_qualifier_descriptor dq;
+		req->length = get_qualifier_descriptor(&dq);
+		if (usb_msm_is_iad()) {
+			dq.bDeviceClass = 0xEF;
+			dq.bDeviceSubClass = 0x02;
+			dq.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &dq, req->length);
+		return 0;
+	}
+
+	if ((type == USB_DT_OTHER_SPEED_CONFIG) && (id == 0))
+		goto get_config;
+
+	if ((type == USB_DT_CONFIG) && (id == 0)) {
+		struct usb_config_descriptor cfg;
+		unsigned ifc_count = 0;
+		char *ptr, *start;
+get_config:
+		ifc_count = 0;
+		start = req->buf;
+		ptr = start + USB_DT_CONFIG_SIZE;
+		ifc_count = ui->next_ifc_num;
+
+		for (i = 0; i < ui->num_funcs; i++) {
+			struct usb_function_info *fi = ui->func[i];
+			struct usb_descriptor_header **dh = NULL;
+
+			if (!fi || !(ui->composition->functions & (1 << i)))
+				continue;
+			switch (ui->speed) {
+			case USB_SPEED_HIGH:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->fs_descriptors;
+				else
+					dh = fi->func->hs_descriptors;
+				break;
+
+			case USB_SPEED_FULL:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->hs_descriptors;
+				else
+					dh = fi->func->fs_descriptors;
+				break;
+
+			default:
+				printk(KERN_ERR "Unsupported speed(%x)\n",
+						ui->speed);
+				return -1;
+			}
+			ptr += usb_fill_descriptors(ptr, dh);
+		}
+
+#define	USB_REMOTE_WAKEUP_SUPPORT	1
+		cfg.bLength = USB_DT_CONFIG_SIZE;
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			cfg.bDescriptorType =  USB_DT_OTHER_SPEED_CONFIG;
+		else
+			cfg.bDescriptorType = USB_DT_CONFIG;
+		cfg.wTotalLength = ptr - start;
+		cfg.bNumInterfaces = ifc_count;
+		cfg.bConfigurationValue = 1;
+		cfg.iConfiguration = 0;
+		cfg.bmAttributes = USB_CONFIG_ATT_ONE |
+			ui->selfpowered << USB_CONFIG_ATT_SELFPOWER_POS |
+			USB_REMOTE_WAKEUP_SUPPORT << USB_CONFIG_ATT_WAKEUP_POS;
+		cfg.bMaxPower = ui->maxpower;
+
+		memcpy(start, &cfg, USB_DT_CONFIG_SIZE);
+
+		req->length = ptr - start;
+		return 0;
+	}
+
+	if (type == USB_DT_STRING) {
+		char *buffer = req->buf;
+
+		buffer[0] = 0;
+		if (id > ui->strdesc_index)
+			return -1;
+		 if (id == STRING_LANGUAGE_ID)
+			memcpy(buffer, str_lang_desc, str_lang_desc[0]);
+		 else
+			copy_string_descriptor(ui->strdesc[id], buffer);
+
+		if (buffer[0]) {
+			req->length = buffer[0];
+			return 0;
+		} else
+			return -1;
+	}
+	return -1;
+}
+
+/*****Gadget Framework Functions***/
+struct device *usb_get_device(void)
+{
+	if (the_usb_info) {
+		if (the_usb_info->pdev)
+			return &(the_usb_info->pdev->dev);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(usb_get_device);
+
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct usb_info 	*ui = the_usb_info;
+	struct msm_request      *req = to_msm_request(_req);
+	struct msm_request 	*temp_req, *prev_req;
+	unsigned long		flags;
+
+	if (!(ui && req && ept->req))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (req->busy) {
+		req->req.status = 0;
+		req->busy = 0;
+
+		/* See if the request is the first request in the ept queue */
+		if (ept->req == req) {
+			/* Stop the transfer */
+			do {
+				writel((1 << ept->bit), USB_ENDPTFLUSH);
+				while (readl(USB_ENDPTFLUSH) & (1 << ept->bit))
+					udelay(100);
+			} while (readl(USB_ENDPTSTAT) & (1 << ept->bit));
+			if (!req->next)
+				ept->last = NULL;
+			ept->req = req->next;
+			ept->head->next = req->item->next;
+			goto cancel_req;
+		}
+		/* Request could be in the middle of ept queue */
+		prev_req = temp_req = ept->req;
+		do {
+			if (req == temp_req) {
+				if (req->live) {
+					/* Stop the transfer */
+					do {
+						writel((1 << ept->bit),
+							USB_ENDPTFLUSH);
+						while (readl(USB_ENDPTFLUSH) &
+							(1 << ept->bit))
+							udelay(100);
+					} while (readl(USB_ENDPTSTAT) &
+						(1 << ept->bit));
+				}
+				prev_req->next = temp_req->next;
+				prev_req->item->next = temp_req->item->next;
+				if (!req->next)
+					ept->last = prev_req;
+				goto cancel_req;
+			}
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		} while (temp_req != NULL);
+		goto error;
+cancel_req:
+	if (req->live) {
+		/* prepare the transaction descriptor item for the hardware */
+		req->item->next = TERMINATE;
+		req->item->info = 0;
+		req->live = 0;
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		/* Reprime the endpoint for the remaining transfers */
+		if (ept->req) {
+			temp_req = ept->req;
+			while (temp_req != NULL) {
+				temp_req->live = 0;
+				temp_req = temp_req->next;
+			}
+			usb_ept_start(ept);
+		}
+	} else
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+	}
+error:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(usb_ept_cancel_xfer);
+
+int usb_ept_set_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	ept->ept_halted = 1;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in)
+		n |= CTRL_TXS;
+	else
+		n |= CTRL_RXS;
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_set_halt);
+
+int usb_ept_clear_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	if (ept->ept_halted)
+		ept->ept_halted = 0;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	/*clear stall bit and set data toggle bit*/
+	if (in) {
+		n &= (~CTRL_TXS);
+		n |= (CTRL_TXR);
+	} else {
+		n &= ~(CTRL_RXS);
+		n |= (CTRL_RXR);
+	}
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_clear_halt);
+
+int usb_ept_is_stalled(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in && (n & CTRL_TXS))
+		return 1;
+	else if (n & CTRL_RXS)
+		return 1;
+	return 0;
+}
+
+void usb_ept_fifo_flush(struct usb_endpoint *ept)
+{
+	flush_endpoint(ept);
+}
+EXPORT_SYMBOL(usb_ept_fifo_flush);
+
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept)
+{
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_get_function);
+
+
+void usb_free_endpoint_all_req(struct usb_endpoint *ep)
+{
+	struct msm_request *temp;
+	struct msm_request *req;
+	if (!ep)
+		return;
+	req = ep->req;
+	while (req) {
+		temp = req->next;
+		req->busy = 0;
+		if (&req->req)
+			usb_ept_free_req(ep, &req->req);
+		req = temp;
+	}
+}
+EXPORT_SYMBOL(usb_free_endpoint_all_req);
+
+int usb_function_unregister(struct usb_function *func)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	struct usb_function_info *fi;
+	unsigned long flags;
+
+	if (!func)
+		return -EINVAL;
+
+	fi = usb_find_function(func->name);
+	if (!fi)
+		return -EINVAL;
+
+	if (ui->running) {
+		disable_irq(ui->irq);
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_uninit(ui);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		msleep(100);
+		enable_irq(ui->irq);
+	}
+
+	pr_info("%s: func->name = %s\n", __func__, func->name);
+
+	ui->composition = NULL;
+
+	if (func->configure)
+		func->configure(0, func->context);
+	if (func->unbind)
+		func->unbind(func->context);
+
+	list_del(&fi->list);
+	for (i = 0; i < ui->num_funcs; i++)
+		if (fi == ui->func[i])
+			ui->func[i] = NULL;
+	kfree(fi);
+	return 0;
+}
+EXPORT_SYMBOL(usb_function_unregister);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/function/msm_hsusb_hw.h b/drivers/usb/function/msm_hsusb_hw.h
new file mode 100644
index 0000000..c016c3f
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb_hw.h
@@ -0,0 +1,163 @@
+/* drivers/usb/function/msm_hsusb_hw.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _USB_FUNCTION_MSM_HSUSB_HW_H
+#define _USB_FUNCTION_MSM_HSUSB_HW_H
+
+#define USB_ID               (MSM_USB_BASE + 0x0000)
+#define USB_HWGENERAL        (MSM_USB_BASE + 0x0004)
+#define USB_HWHOST           (MSM_USB_BASE + 0x0008)
+#define USB_HWDEVICE         (MSM_USB_BASE + 0x000C)
+#define USB_HWTXBUF          (MSM_USB_BASE + 0x0010)
+#define USB_HWRXBUF          (MSM_USB_BASE + 0x0014)
+#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
+
+#define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
+#define USB_HCIVERSION       (MSM_USB_BASE + 0x0102) /* 16 bit */
+#define USB_HCSPARAMS        (MSM_USB_BASE + 0x0104)
+#define USB_HCCPARAMS        (MSM_USB_BASE + 0x0108)
+#define USB_DCIVERSION       (MSM_USB_BASE + 0x0120) /* 16 bit */
+#define USB_USBCMD           (MSM_USB_BASE + 0x0140)
+#define USB_USBSTS           (MSM_USB_BASE + 0x0144)
+#define USB_USBINTR          (MSM_USB_BASE + 0x0148)
+#define USB_FRINDEX          (MSM_USB_BASE + 0x014C)
+#define USB_DEVICEADDR       (MSM_USB_BASE + 0x0154)
+#define USB_ENDPOINTLISTADDR (MSM_USB_BASE + 0x0158)
+#define USB_BURSTSIZE        (MSM_USB_BASE + 0x0160)
+#define USB_TXFILLTUNING     (MSM_USB_BASE + 0x0164)
+#define USB_ULPI_VIEWPORT    (MSM_USB_BASE + 0x0170)
+#define USB_ENDPTNAK         (MSM_USB_BASE + 0x0178)
+#define USB_ENDPTNAKEN       (MSM_USB_BASE + 0x017C)
+#define USB_PORTSC           (MSM_USB_BASE + 0x0184)
+#define USB_OTGSC            (MSM_USB_BASE + 0x01A4)
+#define USB_USBMODE          (MSM_USB_BASE + 0x01A8)
+#define USB_ENDPTSETUPSTAT   (MSM_USB_BASE + 0x01AC)
+#define USB_ENDPTPRIME       (MSM_USB_BASE + 0x01B0)
+#define USB_ENDPTFLUSH       (MSM_USB_BASE + 0x01B4)
+#define USB_ENDPTSTAT        (MSM_USB_BASE + 0x01B8)
+#define USB_ENDPTCOMPLETE    (MSM_USB_BASE + 0x01BC)
+#define USB_ENDPTCTRL(n)     (MSM_USB_BASE + 0x01C0 + (4 * (n)))
+
+
+#define USBCMD_RESET   2
+#define USBCMD_ATTACH  1
+#define USBCMD_ATDTW   (1 << 14)
+
+#define USBMODE_DEVICE 2
+#define USBMODE_HOST   3
+
+struct ept_queue_head
+{
+    unsigned config;
+    unsigned active; /* read-only */
+
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved_0;
+
+    unsigned char setup_data[8];
+
+    unsigned reserved_1;
+    unsigned reserved_2;
+    unsigned reserved_3;
+    unsigned reserved_4;
+};
+
+#define CONFIG_MAX_PKT(n)     ((n) << 16)
+#define CONFIG_ZLT            (1 << 29)    /* stop on zero-len xfer */
+#define CONFIG_IOS            (1 << 15)    /* IRQ on setup */
+
+struct ept_queue_item
+{
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved;
+};
+
+#define TERMINATE 1
+
+#define INFO_BYTES(n)         ((n) << 16)
+#define INFO_IOC              (1 << 15)
+#define INFO_ACTIVE           (1 << 7)
+#define INFO_HALTED           (1 << 6)
+#define INFO_BUFFER_ERROR     (1 << 5)
+#define INFO_TXN_ERROR        (1 << 3)
+
+
+#define STS_NAKI              (1 << 16)  /* */
+#define STS_SLI               (1 << 8)   /* R/WC - suspend state entered */
+#define STS_SRI               (1 << 7)   /* R/WC - SOF recv'd */
+#define STS_URI               (1 << 6)   /* R/WC - RESET recv'd - write to clear */
+#define STS_FRI               (1 << 3)   /* R/WC - Frame List Rollover */
+#define STS_PCI               (1 << 2)   /* R/WC - Port Change Detect */
+#define STS_UEI               (1 << 1)   /* R/WC - USB Error */
+#define STS_UI                (1 << 0)   /* R/WC - USB Transaction Complete */
+
+
+/* bits used in all the endpoint status registers */
+#define EPT_TX(n) (1 << ((n) + 16))
+#define EPT_RX(n) (1 << (n))
+
+
+#define CTRL_TXE              (1 << 23)
+#define CTRL_TXR              (1 << 22)
+#define CTRL_TXI              (1 << 21)
+#define CTRL_TXD              (1 << 17)
+#define CTRL_TXS              (1 << 16)
+#define CTRL_RXE              (1 << 7)
+#define CTRL_RXR              (1 << 6)
+#define CTRL_RXI              (1 << 5)
+#define CTRL_RXD              (1 << 1)
+#define CTRL_RXS              (1 << 0)
+
+#define CTRL_TXT_MASK         (3 << 18)
+#define CTRL_TXT_CTRL         (0 << 18)
+#define CTRL_TXT_ISOCH        (1 << 18)
+#define CTRL_TXT_BULK         (2 << 18)
+#define CTRL_TXT_INT          (3 << 18)
+
+#define CTRL_RXT_MASK         (3 << 2)
+#define CTRL_RXT_CTRL         (0 << 2)
+#define CTRL_RXT_ISOCH        (1 << 2)
+#define CTRL_RXT_BULK         (2 << 2)
+#define CTRL_RXT_INT          (3 << 2)
+
+#define ULPI_WAKEUP           (1 << 31)
+#define ULPI_RUN              (1 << 30)
+#define ULPI_WRITE            (1 << 29)
+#define ULPI_READ             (0 << 29)
+#define ULPI_STATE_NORMAL     (1 << 27)
+#define ULPI_ADDR(n)          (((n) & 255) << 16)
+#define ULPI_DATA(n)          ((n) & 255)
+#define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
+
+/* USB_PORTSC bits for determining port speed */
+#define PORTSC_PSPD_FS        (0 << 26)
+#define PORTSC_PSPD_LS        (1 << 26)
+#define PORTSC_PSPD_HS        (2 << 26)
+#define PORTSC_PSPD_MASK      (3 << 26)
+
+#endif
diff --git a/drivers/usb/function/msm_otg.c b/drivers/usb/function/msm_otg.c
new file mode 100644
index 0000000..c931290
--- /dev/null
+++ b/drivers/usb/function/msm_otg.c
@@ -0,0 +1,368 @@
+/* drivers/usb/otg/msm_otg.c
+ *
+ * OTG Driver for HighSpeed USB
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/msm_otg.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/board.h>
+
+#define MSM_USB_BASE (xceiv->regs)
+
+#define A_HOST 0
+#define B_DEVICE 1
+#define A_TO_B 0
+#define B_TO_A 1
+
+static struct msm_otg_transceiver *xceiv;
+
+struct msm_otg_transceiver *msm_otg_get_transceiver(void)
+{
+	if (xceiv)
+		get_device(xceiv->dev);
+	return xceiv;
+}
+EXPORT_SYMBOL(msm_otg_get_transceiver);
+
+void msm_otg_put_transceiver(struct msm_otg_transceiver *xceiv)
+{
+	if (xceiv)
+		put_device(xceiv->dev);
+}
+EXPORT_SYMBOL(msm_otg_put_transceiver);
+
+static void msm_otg_set_clk(int on)
+{
+	if (on) {
+		clk_enable(xceiv->clk);
+		clk_enable(xceiv->pclk);
+	} else {
+		clk_disable(xceiv->clk);
+		clk_disable(xceiv->pclk);
+	}
+}
+
+static inline int is_host(void)
+{
+	int ret;
+
+	ret = (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1;
+	return ret;
+}
+
+static void msm_otg_enable(void)
+{
+	msm_otg_set_clk(1);
+	/* Enable ID interrupts */
+	writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC);
+
+	if (is_host()) {
+		pr_info("%s: configuring USB in host mode\n", __func__);
+		xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_START);
+		xceiv->state = A_HOST;
+	} else {
+		pr_info("%s: configuring USB in device mode\n", __func__);
+		xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_START);
+		xceiv->state = B_DEVICE;
+	}
+	msm_otg_set_clk(0);
+	xceiv->active = 1;
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static void msm_otg_disable(int mode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->active = 0;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+
+	pr_info("%s: OTG is disabled\n", __func__);
+
+	if (mode != xceiv->state)
+		return;
+	switch (mode) {
+	case A_HOST:
+		if (xceiv->state == A_HOST) {
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->state == B_DEVICE) {
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+
+}
+
+static void msm_otg_do_work(struct work_struct *w)
+{
+	switch (xceiv->state) {
+	case A_HOST:
+		if (xceiv->flags == A_TO_B) {
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->flags == B_TO_A) {
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	u32 otgsc;
+	u32 temp;
+
+	if (!xceiv->active)
+		return IRQ_HANDLED;
+
+	if (xceiv->in_lpm)
+		return IRQ_HANDLED;
+
+	otgsc = readl(USB_OTGSC);
+	temp = otgsc & ~OTGSC_INTR_STS_MASK;
+	if (otgsc & OTGSC_IDIS) {
+		wake_lock(&xceiv->wlock);
+		if (is_host()) {
+			xceiv->flags = B_TO_A;
+			schedule_work(&xceiv->work);
+		} else {
+			xceiv->flags = A_TO_B;
+			schedule_work(&xceiv->work);
+		}
+		disable_irq(xceiv->irq);
+		writel(temp | OTGSC_IDIS, USB_OTGSC);
+	}
+
+	return IRQ_HANDLED;
+
+}
+
+static DEFINE_MUTEX(otg_register_lock);
+
+static int msm_otg_set_peripheral(struct msm_otg_transceiver *xceiv,
+					struct msm_otg_ops *ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!ops) {
+		xceiv->dcd_ops = NULL;
+		pr_info("%s: Peripheral driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(B_DEVICE);
+		goto unlock;
+	}
+	if (xceiv->dcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->dcd_ops = ops;
+	xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_STOP);
+	if (xceiv->hcd_ops)
+		msm_otg_enable();
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_host(struct msm_otg_transceiver *xceiv,
+				struct msm_otg_ops *hcd_ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!hcd_ops) {
+		xceiv->hcd_ops = NULL;
+		pr_info("%s: Host driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(A_HOST);
+		goto unlock;
+	}
+	if (xceiv->hcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->hcd_ops = hcd_ops;
+	xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_STOP);
+	if (xceiv->dcd_ops)
+		msm_otg_enable();
+
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_suspend(struct msm_otg_transceiver *otg, int suspend)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->in_lpm = suspend;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+	return 0;
+}
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+	xceiv = kzalloc(sizeof(struct msm_otg_transceiver), GFP_KERNEL);
+	if (!xceiv)
+		return -ENOMEM;
+
+	xceiv->clk = clk_get(NULL, "usb_hs_clk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->clk);
+		goto free_xceiv;
+	}
+	xceiv->pclk = clk_get(NULL, "usb_hs_pclk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->pclk);
+		goto put_clk;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto put_pclk;
+	}
+
+	xceiv->regs = ioremap(res->start, resource_size(res));
+	if (!xceiv->regs) {
+		ret = -ENOMEM;
+		goto put_pclk;
+	}
+	xceiv->irq = platform_get_irq(pdev, 0);
+	if (!xceiv->irq) {
+		ret = -ENODEV;
+		goto free_regs;
+	}
+
+	/* disable interrupts before requesting irq */
+	msm_otg_set_clk(1);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	msm_otg_set_clk(0);
+
+	ret = request_irq(xceiv->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", pdev);
+	if (ret)
+		goto free_regs;
+	disable_irq(xceiv->irq);
+
+	INIT_WORK(&xceiv->work, msm_otg_do_work);
+	spin_lock_init(&xceiv->lock);
+	wake_lock_init(&xceiv->wlock, WAKE_LOCK_SUSPEND, "usb_otg");
+	wake_lock(&xceiv->wlock);
+
+	xceiv->set_host = msm_otg_set_host;
+	xceiv->set_peripheral = msm_otg_set_peripheral;
+	xceiv->set_suspend = msm_otg_set_suspend;
+
+	return 0;
+free_regs:
+	iounmap(xceiv->regs);
+put_pclk:
+	clk_put(xceiv->pclk);
+put_clk:
+	clk_put(xceiv->clk);
+free_xceiv:
+	kfree(xceiv);
+	return ret;
+
+}
+
+static int __exit msm_otg_remove(struct platform_device *pdev)
+{
+	cancel_work_sync(&xceiv->work);
+	free_irq(xceiv->irq, pdev);
+	iounmap(xceiv->regs);
+	clk_put(xceiv->pclk);
+	clk_put(xceiv->clk);
+	kfree(xceiv);
+	return 0;
+}
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __exit_p(msm_otg_remove),
+	.driver = {
+		.name = "msm_hsusb_otg",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+subsys_initcall(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB OTG driver");
+MODULE_VERSION("1.00");
diff --git a/drivers/usb/function/null.c b/drivers/usb/function/null.c
new file mode 100644
index 0000000..68f1e35
--- /dev/null
+++ b/drivers/usb/function/null.c
@@ -0,0 +1,118 @@
+/* driver/usb/function/null.c
+ *
+ * Null Function Device - A Data Sink
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct null_context
+{
+	struct usb_endpoint *out;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct null_context _context;
+
+static void null_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	ctxt->out = ept[0];
+	printk(KERN_INFO "null_bind() %p\n", ctxt->out);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->out, 4096);
+}
+
+static void null_unbind(void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->out, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->out, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->out = 0;
+}
+
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req);
+
+static void null_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct null_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		null_queue_out(ctxt, req);
+}
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req)
+{
+	req->complete = null_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void null_configure(int configured, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_configure() %d\n", configured);
+
+	if (configured) {
+		null_queue_out(ctxt, ctxt->req0);
+		null_queue_out(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_null = {
+	.bind = null_bind,
+	.unbind = null_unbind,
+	.configure = null_configure,
+
+	.name = "null",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x01,
+
+	.ifc_name = "null",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_OUT },
+};
+
+static int __init null_init(void)
+{
+	printk(KERN_INFO "null_init()\n");
+	usb_function_register(&usb_func_null);
+	return 0;
+}
+
+module_init(null_init);
diff --git a/drivers/usb/function/rmnet.c b/drivers/usb/function/rmnet.c
new file mode 100644
index 0000000..e618ec0
--- /dev/null
+++ b/drivers/usb/function/rmnet.c
@@ -0,0 +1,1086 @@
+/*
+ * rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX		4
+#define QMI_REQ_SIZE		2048
+#define QMI_RESP_MAX		8
+#define QMI_RESP_SIZE		2048
+
+#define RX_REQ_MAX		8
+#define RX_REQ_SIZE		2048
+#define TX_REQ_MAX		8
+#define TX_REQ_SIZE		2048
+
+#define TXN_MAX 		2048
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+	struct smd_channel 	*ch;
+	struct tasklet_struct	tx_tlet;
+	struct tasklet_struct	rx_tlet;
+#define CH_OPENED	0
+	unsigned long		flags;
+	/* pending rx packet length */
+	atomic_t		rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t	wait;
+};
+
+struct rmnet_dev {
+	struct usb_endpoint	*epout;
+	struct usb_endpoint	*epin;
+	struct usb_endpoint	*epnotify;
+	struct usb_request 	*notify_req;
+
+	u8			ifc_id;
+	/* QMI lists */
+	struct list_head	qmi_req_pool;
+	struct list_head	qmi_resp_pool;
+	struct list_head	qmi_req_q;
+	struct list_head	qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head 	tx_idle;
+	struct list_head 	rx_idle;
+	struct list_head	rx_queue;
+
+	spinlock_t		lock;
+	atomic_t		online;
+	atomic_t		notify_count;
+
+	struct rmnet_smd_info	smd_ctl;
+	struct rmnet_smd_info	smd_data;
+
+	struct workqueue_struct *wq;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+};
+
+static struct usb_function rmnet_function;
+
+struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+	struct qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+rmnet_alloc_req(struct usb_endpoint *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ept_alloc_req(ep, 0);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ept_free_req(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+void rmnet_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ept_free_req(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		pr_err("%s: rmnet notify ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != dev->epnotify)
+			break;
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			pr_err("%s: rmnet notify ep enqueue error %d\n",
+					__func__, status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		pr_err("%s: rmnet notify ep enqueue error %d\n",
+				__func__, status);
+	}
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_smd_info *smd_info = priv;
+	int len = atomic_read(&smd_info->rx_pkt);
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_resp;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(dev->smd_ctl.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_ctl.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_resp_pool)) {
+			pr_err("%s: rmnet QMI Tx buffers full\n", __func__);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+				struct qmi_buf, list);
+		list_del(&qmi_resp->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_response_available(dev);
+	}
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+
+		if (list_empty(&dev->qmi_req_q)) {
+			atomic_set(&dev->smd_ctl.rx_pkt, 0);
+			break;
+		}
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+				struct qmi_buf, list);
+		if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+			atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+			pr_debug("%s: rmnet control smd channel full\n",
+					__func__);
+			break;
+		}
+
+		list_del(&qmi_req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != qmi_req->len) {
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+			break;
+		}
+
+		list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_function *func = &rmnet_function;
+	struct usb_request *in_req;
+	struct qmi_buf *qmi_req;
+	int ret;
+
+	if (req->status < 0) {
+		pr_err("%s: rmnet command error %d\n", __func__, req->status);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	/* no pending control rx packet */
+	if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+		if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+			atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+		goto ep0_ack;
+	}
+queue_req:
+	if (list_empty(&dev->qmi_req_pool)) {
+		spin_unlock(&dev->lock);
+		pr_err("%s: rmnet QMI pool is empty\n", __func__);
+		return;
+	}
+
+	qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+	list_del(&qmi_req->list);
+	spin_unlock(&dev->lock);
+	memcpy(qmi_req->buf, req->buf, req->actual);
+	qmi_req->len = req->actual;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+ep0_ack:
+	/* Send ACK on EP0 IN */
+	in_req = func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(func->ep0_in, in_req);
+}
+
+static int rmnet_setup(struct usb_ctrlrequest *ctrl, void *buf,
+				int len, void *context)
+{
+	struct rmnet_dev *dev = context;
+	struct usb_request *req = rmnet_function.ep0_out_req;
+	int			ret = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+	struct qmi_buf *resp;
+	int schedule = 0;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		else {
+			spin_lock(&dev->lock);
+			resp = list_first_entry(&dev->qmi_resp_q,
+					struct qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+			memcpy(buf, resp->buf, resp->len);
+			ret = resp->len;
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_pool))
+				schedule = 1;
+			list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+			if (schedule)
+				tasklet_schedule(&dev->smd_ctl.tx_tlet);
+			spin_unlock(&dev->lock);
+		}
+		break;
+	default:
+
+invalid:
+		pr_debug("%s: invalid control req%02x.%02x v%04x i%04x l%d\n",
+			__func__, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	int status;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ept_queue_xfer(dev->epout, req);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			pr_err("%s: rmnet data rx enqueue err %d\n",
+					__func__, status);
+			list_add_tail(&req->list, &dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+
+		sz = smd_cur_packet_size(dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			pr_debug("%s: rmnet data Tx buffers full\n", __func__);
+			break;
+		}
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+		status = usb_ept_queue_xfer(dev->epin, req);
+		if (status) {
+			pr_err("%s: rmnet tx data enqueue err %d\n",
+					__func__, status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+	}
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (list_empty(&dev->rx_queue)) {
+			atomic_set(&dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			pr_debug("%s: rmnet SMD data channel full\n", __func__);
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			pr_err("%s: rmnet SMD data write failed\n", __func__);
+			break;
+		}
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int ret;
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		pr_err("%s: response error %d, %d/%d\n",
+			__func__, status, req->actual,
+			req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!atomic_read(&dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet data smd write failed\n", __func__);
+		/* Restart Rx */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int schedule = 0;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		pr_err("%s: rmnet data tx ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &dev->tx_idle);
+
+		if (schedule)
+			tasklet_schedule(&dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+					disconnect_work);
+
+	atomic_set(&dev->notify_count, 0);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+
+	list_for_each_safe(act, tmp, &dev->rx_queue) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	smd_close(dev->smd_ctl.ch);
+	dev->smd_ctl.flags = 0;
+
+	smd_close(dev->smd_data.ch);
+	dev->smd_data.flags = 0;
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+	int ret;
+
+	/* Control channel for QMI messages */
+	ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+			&dev->smd_ctl, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open control smd channel\n", __func__);
+		return;
+	}
+	wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+				&dev->smd_ctl.flags));
+
+	/* Data channel for network packets */
+	ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+			&dev->smd_data, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open data smd channel\n", __func__);
+		smd_close(dev->smd_ctl.ch);
+	}
+	wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+				&dev->smd_data.flags));
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->epin, &rmnet_hs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_hs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_hs_notify_desc);
+	} else {
+		usb_configure_endpoint(dev->epin, &rmnet_fs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_fs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_fs_notify_desc);
+	}
+
+	usb_ept_enable(dev->epin,  1);
+	usb_ept_enable(dev->epout, 1);
+	usb_ept_enable(dev->epnotify, 1);
+
+	atomic_set(&dev->online, 1);
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+}
+
+static void rmnet_configure(int configured, void *context)
+
+{
+	struct rmnet_dev *dev = context;
+
+	if (configured) {
+		queue_work(dev->wq, &dev->connect_work);
+	} else {
+		/* all pending requests will be canceled */
+		if (!atomic_read(&dev->online))
+			return;
+
+		atomic_set(&dev->online, 0);
+
+		usb_ept_fifo_flush(dev->epnotify);
+		usb_ept_enable(dev->epnotify, 0);
+
+		usb_ept_fifo_flush(dev->epout);
+		usb_ept_enable(dev->epout, 0);
+
+		usb_ept_fifo_flush(dev->epin);
+		usb_ept_enable(dev->epin, 0);
+
+		/* cleanup work */
+		queue_work(dev->wq, &dev->disconnect_work);
+	}
+
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+
+static void rmnet_bind(void *context)
+{
+	struct rmnet_dev *dev = context;
+	int i, ret;
+	struct usb_request *req;
+	struct qmi_buf *qmi;
+
+	dev->ifc_id = usb_msm_get_next_ifc_number(&rmnet_function);
+	rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+	/*Configuring IN Endpoint*/
+	dev->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epin)
+		return;
+
+	rmnet_hs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+	rmnet_fs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+
+	/*Configuring OUT Endpoint*/
+	dev->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!dev->epout)
+		goto free_epin;
+
+	rmnet_hs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+	rmnet_fs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+
+	/*Configuring NOTIFY Endpoint*/
+	dev->epnotify = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epnotify)
+		goto free_epout;
+
+	rmnet_hs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+	rmnet_fs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+
+	dev->notify_req = usb_ept_alloc_req(dev->epnotify, 0);
+	if (!dev->notify_req)
+		goto free_epnotify;
+
+	dev->notify_req->buf = kmalloc(RMNET_MAX_NOTIFY_SIZE, GFP_KERNEL);
+	if (!dev->notify_req->buf)
+		goto free_buf;;
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+	/* Allocate the qmi request and response buffers */
+	for (i = 0; i < QMI_REQ_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	for (i = 0; i < QMI_RESP_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	/* Allocate bulk in/out requests for data transfer */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->length = TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, TX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->context = dev;
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+
+	pr_info("Rmnet function bind completed\n");
+
+	return;
+
+free_buf:
+	rmnet_free_buf(dev);
+free_epnotify:
+	usb_free_endpoint(dev->epnotify);
+free_epout:
+	usb_free_endpoint(dev->epout);
+free_epin:
+	usb_free_endpoint(dev->epin);
+
+}
+
+static void rmnet_unbind(void *context)
+{
+	struct rmnet_dev *dev = context;
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	flush_workqueue(dev->wq);
+
+	rmnet_free_buf(dev);
+	usb_free_endpoint(dev->epin);
+	usb_free_endpoint(dev->epout);
+	usb_free_endpoint(dev->epnotify);
+
+	kfree(dev);
+
+}
+static struct usb_function rmnet_function = {
+	.bind = rmnet_bind,
+	.configure = rmnet_configure,
+	.unbind = rmnet_unbind,
+	.setup  = rmnet_setup,
+	.name = "rmnet",
+};
+
+struct usb_descriptor_header *rmnet_hs_descriptors[5];
+struct usb_descriptor_header *rmnet_fs_descriptors[5];
+static int __init rmnet_init(void)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->smd_ctl.rx_pkt, 0);
+	atomic_set(&dev->smd_data.rx_pkt, 0);
+
+	INIT_WORK(&dev->connect_work, rmnet_connect_work);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+	tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&dev->smd_ctl.wait);
+	init_waitqueue_head(&dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&dev->qmi_req_pool);
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_pool);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->rx_queue);
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	rmnet_hs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_hs_in_desc;
+	rmnet_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_hs_out_desc;
+	rmnet_hs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_hs_notify_desc;
+	rmnet_hs_descriptors[4] = NULL;
+
+	rmnet_fs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_fs_in_desc;
+	rmnet_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_fs_out_desc;
+	rmnet_fs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_fs_notify_desc;
+	rmnet_fs_descriptors[4] = NULL;
+
+	rmnet_function.hs_descriptors = rmnet_hs_descriptors;
+	rmnet_function.fs_descriptors = rmnet_fs_descriptors;
+	rmnet_function.context = dev;
+
+	ret = usb_function_register(&rmnet_function);
+	if (ret)
+		goto free_wq;
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+static void __exit rmnet_exit(void)
+{
+	usb_function_unregister(&rmnet_function);
+}
+
+module_init(rmnet_init);
+module_exit(rmnet_exit);
+MODULE_DESCRIPTION("RmNet usb function driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/serial.c b/drivers/usb/function/serial.c
new file mode 100644
index 0000000..0539351
--- /dev/null
+++ b/drivers/usb/function/serial.c
@@ -0,0 +1,2252 @@
+/*
+ * serial.c -- USB Serial Function driver
+ *
+ * Copyright 2003 (C) Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This code is based in part on the Gadget Zero driver, which
+ * is Copyright (C) 2003 by David Brownell, all rights reserved.
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/wait.h>
+#include <linux/serial.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/cdc.h>
+#include "usb_function.h"
+
+#include <linux/workqueue.h>
+/* Defines */
+
+#define GS_VERSION_STR			"v2.2"
+#define GS_VERSION_NUM			0x0202
+
+#define GS_LONG_NAME			"Serial Function"
+#define GS_SHORT_NAME			"serial"
+
+static int instances = 2;
+#define MAX_INSTANCES 2
+
+#define GS_MAJOR			127
+#define GS_MINOR_START			0
+
+#define GS_NUM_PORTS			16
+
+#define GS_NO_CONFIG_ID			0
+#define GS_ACM_CONFIG_ID		2
+
+#define GS_MAX_DESC_LEN			256
+
+/* defines for maintaining serial states */
+#define	MSR_CTS		(1 << 4)
+#define	MSR_DSR		(1 << 5)
+#define	MSR_RI		(1 << 6)
+#define	MSR_CD		(1 << 7)
+#define	MCR_DTR		(1 << 0)
+#define	MCR_RTS		(1 << 1)
+#define	MCR_LOOP	(1 << 4)
+
+/* USB CDC control line state defines */
+#define USB_CDC_SET_CONTROL_LINE_STATE_DTR 0x1
+#define USB_CDC_SET_CONTROL_LINE_STATE_RTS 0x2
+
+#define GS_DEFAULT_READ_Q_SIZE		16
+#define GS_DEFAULT_WRITE_Q_SIZE		16
+#define GS_DEFAULT_INT_REQ		1
+
+#define GS_DEFAULT_WRITE_BUF_SIZE	8192
+#define GS_TMP_BUF_SIZE			8192
+
+#define GS_CLOSE_TIMEOUT		15
+
+#define GS_DEFAULT_USE_ACM		0
+
+#define GS_DEFAULT_DTE_RATE		9600
+#define GS_DEFAULT_DATA_BITS		8
+#define GS_DEFAULT_PARITY		USB_CDC_NO_PARITY
+#define GS_DEFAULT_CHAR_FORMAT		USB_CDC_1_STOP_BITS
+
+/* #define GS_DEBUG */
+
+/* debug settings */
+#ifdef GS_DEBUG
+static int debug = 1;
+
+#define gs_debug(format, arg...) \
+	do { if (debug) printk(KERN_DEBUG format, ## arg); } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { if (debug >= level) printk(KERN_DEBUG format, ## arg); } while (0)
+
+#else
+
+#define gs_debug(format, arg...) \
+	do { } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { } while (0)
+
+#endif /* GS_DEBUG */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		8
+#define SERIAL_CONFIGURED        1
+#define SERIAL_UNCONFIGURED      0
+
+/* Structures */
+
+struct gs_dev;
+
+/* circular buffer */
+struct gs_buf {
+	unsigned int buf_size;
+	char *buf_buf;
+	char *buf_get;
+	char *buf_put;
+};
+
+/* list of requests */
+struct gs_req_entry {
+	struct list_head re_entry;
+	struct usb_request *re_req;
+};
+
+/* the port structure holds info for each port, one for each minor number */
+struct gs_port {
+	struct gs_dev *port_dev;	/* pointer to device struct */
+	struct tty_struct *port_tty;	/* pointer to tty struct */
+	spinlock_t port_lock;
+	struct mutex	mutex_lock;	/* protect open/close */
+	int port_num;
+	int port_open_count;
+	int port_in_use;	/* open/close in progress */
+	wait_queue_head_t port_write_wait;	/* waiting to write */
+	struct gs_buf *port_write_buf;
+	struct usb_cdc_line_coding port_line_coding;
+	struct list_head        read_pool;
+	struct list_head        read_queue;
+	struct list_head	write_pool;
+	unsigned                n_read;
+	unsigned int msr;
+	unsigned int prev_msr;
+	unsigned int mcr;
+	struct work_struct push_work;
+};
+
+/*-------------------------------------------------------------*/
+/*Allocate DMA buffer in non interrupt context(gs_bind)*/
+
+struct gs_reqbuf {
+	void *buf;
+};
+
+/*-------------------------------------------------------------*/
+
+/* the device structure holds info for the USB device */
+struct gs_dev {
+	/* lock for set/reset config */
+	spinlock_t dev_lock;
+	/* configuration number */
+	int dev_config;
+	/* address of notify endpoint */
+	struct usb_endpoint *dev_notify_ep;
+	/* address of in endpoint */
+	struct usb_endpoint *dev_in_ep;
+	struct usb_request *notify_req;
+	unsigned long notify_queued;
+	/* address of out endpoint */
+	struct usb_endpoint *dev_out_ep;
+	/* list of write requests */
+	struct list_head dev_req_list;
+	/* round robin port scheduled */
+	int dev_sched_port;
+	struct gs_port *dev_port[GS_NUM_PORTS];	/* the ports */
+	struct gs_reqbuf statusreqbuf;
+	u16 interface_num;
+
+	/*interface, endpoint descriptors*/
+	struct usb_interface_descriptor gs_ifc_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkin_desc, gs_fs_bulkin_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkout_desc, gs_fs_bulkout_desc;
+	struct usb_endpoint_descriptor gs_hs_notifyin_desc, gs_fs_notifyin_desc;
+	struct usb_descriptor_header **gs_fullspeed_header;
+	struct usb_descriptor_header **gs_highspeed_header;
+
+	struct usb_function *func;
+	int configured;
+	int bound;
+};
+
+/* Functions */
+
+/* module */
+static int __init gs_module_init(void);
+static void __exit gs_module_exit(void);
+
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req);
+/* tty driver */
+static int gs_open(struct tty_struct *tty, struct file *file);
+static void gs_close(struct tty_struct *tty, struct file *file);
+static int gs_write(struct tty_struct *tty,
+		    const unsigned char *buf, int count);
+static int gs_put_char(struct tty_struct *tty, unsigned char ch);
+static void gs_flush_chars(struct tty_struct *tty);
+static int gs_write_room(struct tty_struct *tty);
+static int gs_chars_in_buffer(struct tty_struct *tty);
+static void gs_throttle(struct tty_struct *tty);
+static void gs_unthrottle(struct tty_struct *tty);
+static int gs_break(struct tty_struct *tty, int break_state);
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg);
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old);
+static unsigned gs_start_rx(struct gs_dev *dev);
+
+static int gs_send(struct gs_dev *dev);
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size);
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req);
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req);
+static int gs_tiocmget(struct tty_struct *tty, struct file *file);
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+			unsigned int set, unsigned int clear);
+
+/* Function driver */
+static void gs_bind(void *);
+static void gs_unbind(void *);
+static int gs_setup(struct usb_ctrlrequest *req,
+		void *buf, int len, void *_ctxt);
+
+static void gs_configure(int config, void *_ctxt);
+static void gs_disconnect(void *_ctxt);
+static void gs_reset_config(struct gs_dev *dev);
+
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len);
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req);
+
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
+static void gs_free_ports(struct gs_dev *dev);
+
+/* circular buffer */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
+static void gs_buf_free(struct gs_buf *gb);
+static void gs_buf_clear(struct gs_buf *gb);
+static unsigned int gs_buf_data_avail(struct gs_buf *gb);
+static unsigned int gs_buf_space_avail(struct gs_buf *gb);
+static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
+			       unsigned int count);
+static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
+			       unsigned int count);
+
+/* Globals */
+static struct gs_dev **gs_devices;
+
+static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
+
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
+
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
+
+static struct workqueue_struct *gs_tty_wq;
+
+
+/* tty driver struct */
+static const struct tty_operations gs_tty_ops = {
+	.open = gs_open,
+	.close = gs_close,
+	.write = gs_write,
+	.put_char = gs_put_char,
+	.flush_chars = gs_flush_chars,
+	.write_room = gs_write_room,
+	.ioctl = gs_ioctl,
+	.set_termios = gs_set_termios,
+	.throttle = gs_throttle,
+	.unthrottle = gs_unthrottle,
+	.break_ctl = gs_break,
+	.chars_in_buffer = gs_chars_in_buffer,
+	.tiocmget = gs_tiocmget,
+	.tiocmset = gs_tiocmset,
+};
+static struct tty_driver *gs_tty_driver;
+
+/* Function  driver struct */
+static struct usb_function usb_function_serial[2];
+
+struct usb_function *global_func_serial;
+struct gs_dev **dum_device;
+
+/* Module */
+MODULE_DESCRIPTION(GS_LONG_NAME);
+MODULE_AUTHOR("Al Borchers");
+MODULE_LICENSE("GPL");
+
+#ifdef GS_DEBUG
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
+#endif
+
+module_param(read_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
+
+module_param(write_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
+
+module_param(write_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
+
+module_param(instances, int, 0);
+MODULE_PARM_DESC(instances, "Number of serial instances");
+
+module_init(gs_module_init);
+module_exit(gs_module_exit);
+
+/******************************************************************************/
+
+/*
+ * CDC-ACM Class specific Descriptors
+ */
+
+static const struct usb_cdc_header_desc gs_header_desc = {
+	.bLength = sizeof(gs_header_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_HEADER_TYPE,
+	.bcdCDC = __constant_cpu_to_le16(0x0110),
+};
+
+static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
+	.bLength = sizeof(gs_call_mgmt_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities = 0,
+	.bDataInterface = 0,
+};
+
+static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
+	.bLength = sizeof(gs_acm_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_ACM_TYPE,
+	.bmCapabilities = 3,  /* bits should be 00000011 (refer to 5.2.3.3) */
+};
+
+static const struct usb_cdc_union_desc gs_union_desc = {
+	.bLength = sizeof(gs_union_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_UNION_TYPE,
+	.bMasterInterface0 = 0,
+	.bSlaveInterface0 = 0,
+};
+
+static void gs_init_ifc_desc(struct usb_interface_descriptor *ifc_desc)
+{
+	ifc_desc->bLength =		USB_DT_INTERFACE_SIZE;
+	ifc_desc->bDescriptorType =	USB_DT_INTERFACE;
+	ifc_desc->bNumEndpoints =	3;
+	ifc_desc->bInterfaceClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->iInterface =		0;
+}
+
+#define HIGHSPEED	1
+#define	FULLSPEED	2
+
+#define BULK	1
+#define INTERRUPT	2
+static void gs_init_ep_desc(struct usb_endpoint_descriptor *ep_desc,
+				unsigned type, unsigned speed)
+{
+	ep_desc->bLength =		USB_DT_ENDPOINT_SIZE;
+	ep_desc->bDescriptorType =	USB_DT_ENDPOINT;
+
+	if (type == BULK) {
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_BULK;
+		if (speed == HIGHSPEED)
+			ep_desc->wMaxPacketSize = 512;
+		else
+			ep_desc->wMaxPacketSize = 64;
+	} else {
+
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_INT;
+		ep_desc->wMaxPacketSize = 64;
+		ep_desc->bInterval = 4;
+	}
+}
+
+static void gs_init_header_desc(struct gs_dev *dev)
+{
+	dev->gs_highspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_highspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkin_desc;
+	dev->gs_highspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkout_desc;
+	dev->gs_highspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_hs_notifyin_desc;
+	dev->gs_highspeed_header[4] = NULL;
+
+	dev->gs_fullspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_fullspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkin_desc;
+	dev->gs_fullspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkout_desc;
+	dev->gs_fullspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_fs_notifyin_desc;
+	dev->gs_fullspeed_header[4] = NULL;
+}
+
+/*****************************************************************************/
+/*
+ *  gs_module_init
+ *
+ *  Register as a USB gadget driver and a tty driver.
+ */
+
+char *a[] = {"modem", "nmea"};
+
+static int __init gs_module_init(void)
+{
+	int i, retval;
+	struct usb_function *func;
+
+	if (instances > MAX_INSTANCES || instances == 0) {
+		printk(KERN_ERR "Incorrect instances entered \n");
+		return -ENODEV;
+	}
+
+	gs_tty_wq = create_singlethread_workqueue("gs_tty");
+	if (gs_tty_wq == 0)
+		return -ENOMEM;
+	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+	if (!gs_tty_driver) {
+		destroy_workqueue(gs_tty_wq);
+		return -ENOMEM;
+	}
+	gs_tty_driver->owner = THIS_MODULE;
+	gs_tty_driver->driver_name = GS_SHORT_NAME;
+	gs_tty_driver->name = "ttyHSUSB";
+	gs_tty_driver->major = GS_MAJOR;
+	gs_tty_driver->minor_start = GS_MINOR_START;
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags =  TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+				| TTY_DRIVER_RESET_TERMIOS;
+	gs_tty_driver->init_termios = tty_std_termios;
+	gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL
+	    | CLOCAL;
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	for (i = 0; i < GS_NUM_PORTS; i++)
+		sema_init(&gs_open_close_sem[i], 1);
+
+	retval = tty_register_driver(gs_tty_driver);
+	if (retval) {
+		/*usb_function_unregister(&usb_func_serial); */
+		put_tty_driver(gs_tty_driver);
+		printk(KERN_ERR
+		       "gs_module_init: cannot register tty driver,ret = %d\n",
+		       retval);
+		return retval;
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_register_device(gs_tty_driver, i, NULL);
+
+	gs_devices = kzalloc(sizeof(struct gs_dev *) * instances,
+				GFP_KERNEL);
+	if (!gs_devices)
+		return -ENOMEM;
+
+	for (i = 0; i < instances; i++) {
+		func = &usb_function_serial[i];
+
+		gs_devices[i] = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
+		if (!gs_devices[i])
+			return -ENOMEM;
+		spin_lock_init(&gs_devices[i]->dev_lock);
+		INIT_LIST_HEAD(&gs_devices[i]->dev_req_list);
+		gs_devices[i]->func = func;
+		/*1 - Interface, 3 Endpoints-> Total 4 + 1 for NULL*/
+		gs_devices[i]->gs_fullspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+		gs_devices[i]->gs_highspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+
+		gs_init_ifc_desc(&gs_devices[i]->gs_ifc_desc);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkin_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkout_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_notifyin_desc, INTERRUPT,
+				HIGHSPEED);
+
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkin_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkout_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_notifyin_desc, INTERRUPT,
+				FULLSPEED);
+		gs_init_header_desc(gs_devices[i]);
+
+		/*Initializing Directions*/
+		gs_devices[i]->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_hs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_hs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_fs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+
+		func->bind = gs_bind;
+		func->unbind = gs_unbind;
+		func->configure = gs_configure;
+		func->disconnect = gs_disconnect;
+		func->setup = gs_setup;
+		func->name = a[i];
+		func->context = gs_devices[i];
+		func->fs_descriptors = gs_devices[i]->gs_fullspeed_header;
+		func->hs_descriptors = gs_devices[i]->gs_highspeed_header;
+
+		retval = usb_function_register(func);
+		if (retval) {
+			printk(KERN_ERR
+	      "gs_module_init: cannot register Function driver, ret = %d\n",
+			       retval);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+/*
+* gs_module_exit
+*
+* Unregister as a tty driver and a USB gadget driver.
+*/
+static void __exit gs_module_exit(void)
+{
+	int i;
+	for (i = 0; i < instances; i++)
+		usb_function_unregister(&usb_function_serial[i]);
+
+	for (i = 0; i < instances; ++i) {
+		kfree(gs_devices[i]->gs_fullspeed_header);
+		kfree(gs_devices[i]->gs_highspeed_header);
+		kfree(gs_devices[i]);
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_unregister_device(gs_tty_driver, i);
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	printk(KERN_INFO "gs_module_exit: %s %s unloaded\n", GS_LONG_NAME,
+	       GS_VERSION_STR);
+}
+
+/* TTY Driver */
+/*
+ * gs_open
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int port_num;
+	unsigned long flags;
+	struct gs_port *port;
+	struct gs_dev *dev;
+	struct gs_buf *buf;
+	struct semaphore *sem;
+	int ret;
+
+	port_num = tty->index;
+
+	gs_debug("gs_open: (%d,%p,%p)\n", port_num, tty, file);
+
+	if (port_num < 0 || port_num >= GS_NUM_PORTS) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	dev = gs_devices[tty->index];
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	sem = &gs_open_close_sem[port_num];
+	if (down_interruptible(sem)) {
+		printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
+		       port_num, tty, file);
+		return -ERESTARTSYS;
+	}
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
+		       port_num, tty, file);
+		ret = -ENODEV;
+		goto exit_unlock_dev;
+	}
+
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
+		       port_num, tty, file);
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	if (port->port_open_count > 0) {
+		++port->port_open_count;
+		gs_debug("gs_open: (%d,%p,%p) already open\n",
+			 port_num, tty, file);
+		ret = 0;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = NULL;
+
+	/* mark port as in use, we can drop port lock and sleep if necessary */
+	port->port_in_use = 1;
+
+	/* allocate write buffer on first open */
+	if (port->port_write_buf == NULL) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		buf = gs_buf_alloc(write_buf_size, GFP_KERNEL);
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		/* might have been disconnected while asleep, check */
+		if (port->port_dev == NULL) {
+			printk(KERN_ERR
+			       "gs_open: (%d,%p,%p) port disconnected (2)\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -EIO;
+			goto exit_unlock_port;
+		}
+
+		port->port_write_buf = buf;
+		if (port->port_write_buf == NULL) {
+			printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -ENOMEM;
+			goto exit_unlock_port;
+		}
+
+	}
+
+	/* wait for carrier detect (not implemented) */
+
+	/* might have been disconnected while asleep, check */
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
+		       port_num, tty, file);
+		port->port_in_use = 0;
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = port;
+	port->port_tty = tty;
+	port->port_tty->low_latency = 1;
+	port->port_open_count = 1;
+	port->port_in_use = 0;
+
+	gs_debug("gs_open: (%d,%p,%p) completed\n", port_num, tty, file);
+	/* Queue RX requests */
+	port->n_read = 0;
+	gs_start_rx(dev);
+
+	ret = 0;
+
+exit_unlock_port:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	up(sem);
+	return ret;
+
+exit_unlock_dev:
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+	up(sem);
+	return ret;
+
+}
+
+/*
+ * gs_close
+ */
+
+#define GS_WRITE_FINISHED_EVENT_SAFELY(p)			\
+({								\
+	int cond;						\
+								\
+	spin_lock_irq(&(p)->port_lock);				\
+	cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf); \
+	spin_unlock_irq(&(p)->port_lock);			\
+	cond;							\
+})
+
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port = tty->driver_data;
+	struct semaphore *sem;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_close: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
+
+	sem = &gs_open_close_sem[port->port_num];
+	down(sem);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR
+		       "gs_close: (%d,%p,%p) port is already closed\n",
+		       port->port_num, tty, file);
+		goto exit;
+	}
+
+	if (port->port_open_count > 1) {
+		--port->port_open_count;
+		goto exit;
+	}
+
+	/* free disconnected port on final close */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	/* mark port as closed but in use, we can drop port lock */
+	/* and sleep if necessary */
+	port->port_in_use = 1;
+	port->port_open_count = 0;
+
+	/* wait for write buffer to drain, or */
+	/* at most GS_CLOSE_TIMEOUT seconds */
+	if (gs_buf_data_avail(port->port_write_buf) > 0) {
+		spin_unlock_irq(&port->port_lock);
+		wait_event_interruptible_timeout(port->port_write_wait,
+						 GS_WRITE_FINISHED_EVENT_SAFELY
+						 (port), GS_CLOSE_TIMEOUT * HZ);
+		spin_lock_irq(&port->port_lock);
+	}
+
+	/* free disconnected port on final close */
+	/* (might have happened during the above sleep) */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	gs_buf_clear(port->port_write_buf);
+
+	/* Flush bulk-out pipe */
+	usb_ept_fifo_flush(port->port_dev->dev_out_ep);
+	tty->driver_data = NULL;
+	port->port_tty = NULL;
+	port->port_in_use = 0;
+
+	gs_debug("gs_close: (%d,%p,%p) completed\n", port->port_num, tty, file);
+
+exit:
+	spin_unlock_irq(&port->port_lock);
+	up(sem);
+	if (port->port_dev == NULL)
+		kfree(port);
+}
+
+/*
+ * gs_write
+ */
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+	int ret;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_write: NULL port pointer\n");
+		return -EIO;
+	}
+
+	gs_debug("gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
+		 count);
+
+	if (count == 0)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		ret = -EBADF;
+		goto exit;
+	}
+
+	count = gs_buf_put(port->port_write_buf, buf, count);
+
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
+		 count);
+
+	return count;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	return ret;
+}
+
+/*
+ * gs_put_char
+ */
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_put_char: NULL port pointer\n");
+		goto out;
+	}
+
+	gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n",
+		 port->port_num, tty, ch, __builtin_return_address(0),
+		 __builtin_return_address(1), __builtin_return_address(2));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	ret = gs_buf_put(port->port_write_buf, &ch, 1);
+
+exit_unlock:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+	return ret;
+}
+
+/*
+ * gs_flush_chars
+ */
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_flush_chars: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR
+		       "gs_flush_chars: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+	return;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_write_room
+ */
+static int gs_write_room(struct tty_struct *tty)
+{
+
+	int room = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		room = gs_buf_space_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write_room: (%d,%p) room=%d\n", port->port_num, tty, room);
+
+	return room;
+}
+
+/*
+ * gs_chars_in_buffer
+ */
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	int chars = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		chars = gs_buf_data_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		 port->port_num, tty, chars);
+
+	return chars;
+}
+
+/*
+ * gs_throttle
+ */
+static void gs_throttle(struct tty_struct *tty)
+{
+}
+
+/*
+ * gs_unthrottle
+ */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+	struct gs_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	queue_work(gs_tty_wq, &port->push_work);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_break
+ */
+static int gs_break(struct tty_struct *tty, int break_state)
+{
+	return 0;
+}
+
+/*
+ * gs_ioctl
+ */
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg)
+{
+	/* could not handle ioctl */
+	return -ENOIOCTLCMD;
+}
+
+/*
+ * gs_set_termios
+ */
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+}
+
+/*
+* gs_send
+*
+* This function finds available write requests, calls
+* gs_send_packet to fill these packets with data, and
+* continues until either there are no more write requests
+* available or no more data to send.  This function is
+* run whenever data arrives or write requests are available.
+*/
+static int gs_send(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->write_pool;
+	int status = 0;
+	static long prev_len;
+	bool do_tty_wake = false;
+	struct usb_endpoint *ep = dev->dev_in_ep;
+
+	while (!list_empty(pool)) {
+		struct usb_request *req;
+		int len;
+		req = list_entry(pool->next, struct usb_request, list);
+		len = gs_send_packet(dev, req->buf, usb_ept_get_max_packet(ep));
+		if (len == 0) {
+			/* Queue zero length packet */
+			if (prev_len == usb_ept_get_max_packet(ep)) {
+				req->length = 0;
+				list_del(&req->list);
+
+				spin_unlock(&port->port_lock);
+				status = usb_ept_queue_xfer(ep, req);
+				spin_lock(&port->port_lock);
+				if (status) {
+					printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+					list_add(&req->list, pool);
+				}
+				prev_len = 0;
+			}
+			wake_up_interruptible(&port->port_write_wait);
+			break;
+		}
+		do_tty_wake = true;
+
+		req->length = len;
+		list_del(&req->list);
+
+		/* Drop lock while we call out of driver; completions
+		 * could be issued while we do so.  Disconnection may
+		 * happen too; maybe immediately before we queue this!
+		 * NOTE that we may keep sending data for a while after
+		 * the TTY closed (dev->ioport->port_tty is NULL).
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+			list_add(&req->list, pool);
+			break;
+		}
+		prev_len = req->length;
+
+	}
+
+	if (do_tty_wake && port->port_tty)
+		tty_wakeup(port->port_tty);
+	return status;
+
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.  If there is any error a negative
+ * error number is returned.
+ *
+ * Called during USB completion routine, on interrupt time.
+ *
+ * We assume that disconnect will not happen until all completion
+ * routines have completed, so we can assume that the dev_port
+ * array does not change during the lifetime of this function.
+ */
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size)
+{
+	unsigned int len;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_recv_packet:NULL device pointer\n");
+		return -EIO;
+	}
+
+	/* TEMPORARY -- only port 0 is supported right now */
+	port = dev->dev_port[0];
+	if (port == NULL) {
+		printk(KERN_ERR
+		       "gs_send_packet: port=%d, NULL port pointer\n", 0);
+		return -EIO;
+	}
+
+
+	len = gs_buf_data_avail(port->port_write_buf);
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = gs_buf_get(port->port_write_buf, packet, size);
+
+
+
+	if (port->port_tty)
+		tty_wakeup(port->port_tty);
+
+	return size;
+}
+
+static void gs_rx_push(struct work_struct *work)
+{
+	struct gs_port *port = container_of(work,
+					struct gs_port,
+					push_work);
+	struct tty_struct *tty;
+	struct list_head *queue = &port->read_queue;
+	bool do_push = false;
+	struct gs_dev *dev = port->port_dev;
+
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	tty = port->port_tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+			/*printk(KERN_INFO "tty_push:%d\n",size);*/
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count == 0)
+				printk(KERN_INFO "%s: tty buffer is full: throttle\n",
+							__func__);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+	}
+	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
+		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+		/* tty may have been closed */
+		tty = port->port_tty;
+	}
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				queue_work(gs_tty_wq, &port->push_work);
+		}
+	}
+	gs_start_rx(dev);
+	spin_unlock_irq(&port->port_lock);
+}
+
+/*
+* gs_read_complete
+*/
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	/* used global variable */
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct tty_struct *tty;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_read_complete: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+	tty = port->port_tty;
+	switch (req->status) {
+	case 0:
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &port->read_queue);
+		if (!test_bit(TTY_THROTTLED, &tty->flags))
+			queue_work(gs_tty_wq, &port->push_work);
+		spin_unlock(&port->port_lock);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_read_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	case -ENODEV:
+		list_add_tail(&req->list, &port->read_pool);
+		/* Implemented handling in future if needed */
+		break;
+	default:
+		list_add_tail(&req->list, &port->read_pool);
+		printk(KERN_ERR
+		"gs_read_complete: unexpected status error, status=%d\n",
+			req->status);
+		/* goto requeue; */
+		break;
+	}
+}
+
+/*
+* gs_write_complete
+*/
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port	*port = dev->dev_port[0];
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_write_complete: NULL device pointer\n");
+		return;
+	}
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		printk(KERN_ERR "%s: unexpected status %d\n",
+				__func__, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+
+		if ((req->length == 0) &&
+			(gs_buf_data_avail(port->port_write_buf) == 0)) {
+			break;
+		}
+		if (dev->dev_config)
+			gs_send(dev);
+
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		printk(KERN_DEBUG "%s: shutdown\n", __func__);
+		break;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* Send Notification to host if Status changes */
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct usb_cdc_notification *notify;
+	struct gs_port *port;
+	unsigned int msr, ret;
+	__le16 *data;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "send_notify_data: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"send_notify_data:port is NULL\n");
+		return;
+	}
+
+	if (test_bit(0, &dev->notify_queued))
+		usb_ept_cancel_xfer(dev->dev_notify_ep,
+		dev->notify_req);
+	notify = req->buf;
+	msr = port->msr;
+	notify->bmRequestType  = 0xA1;
+	notify->bNotificationType  = USB_CDC_NOTIFY_SERIAL_STATE;
+	notify->wValue  = __constant_cpu_to_le16(0);
+	notify->wIndex  = __constant_cpu_to_le16(dev->interface_num);
+	notify->wLength  = __constant_cpu_to_le16(2);
+	data = req->buf + sizeof *notify;
+	data[0] = __constant_cpu_to_le16(((msr & MSR_CD) ? 1 : 0)
+			| ((msr & MSR_DSR) ? (1<<1) : (0<<1))
+			| ((msr & MSR_RI) ? (1<<3) : (0<<3)));
+
+	set_bit(0, &dev->notify_queued);
+	ret = usb_ept_queue_xfer(ep, req);
+	if (ret) {
+		clear_bit(0, &dev->notify_queued);
+		printk(KERN_ERR
+		"send_notify_data: cannot queue status request,ret = %d\n",
+			       ret);
+	}
+}
+
+/* Free request if -ESHUTDOWN */
+static void gs_status_complete(struct usb_endpoint *ep,
+				struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_status_complete : NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_status_complete: NULL port pointer\n");
+		return;
+	}
+
+	clear_bit(0, &dev->notify_queued);
+	switch (req->status) {
+	case 0:
+
+		gs_debug("%s:port->msr=%x,dev=%p,ep=%p,req=%p", __func__,
+			port->msr, dev, dev->dev_notify_ep, dev->notify_req);
+		/* executed only if data missed because of
+		** request already in queue and user modifies using tiocmset */
+		if (port->prev_msr != port->msr) {
+			send_notify_data(dev->dev_notify_ep, dev->notify_req);
+			port->prev_msr = port->msr;
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_status_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		printk(KERN_ERR
+	       "gs_status_complete: unexpected status error, status=%d\n",
+		       req->status);
+		break;
+	}
+}
+
+/* Function Driver */
+/*
+ * gs_bind
+ *
+ * Called on module load.  Allocates and initializes the device
+ * structure and a control request.
+ */
+static void gs_bind(void *_ctxt)
+{
+	struct usb_endpoint *ep;
+	struct gs_dev *dev = _ctxt;
+	struct usb_function *func = dev->func;
+	int i = 0;
+	int ret;
+
+	if (func == NULL) {
+		pr_err("%s: NULL function pointer\n", __func__);
+		return;
+	}
+
+	ret = gs_alloc_ports(dev, GFP_KERNEL);
+	if (ret != 0) {
+		pr_err("%s: cannot allocate ports\n", __func__);
+		gs_unbind(_ctxt);
+		return;
+	}
+
+	ret = usb_msm_get_next_ifc_number(func);
+	dev->gs_ifc_desc.bInterfaceNumber = ret;
+	dev->gs_ifc_desc.iInterface = 0;
+
+	/*Configuring IN Endpoint*/
+	ep = dev->dev_in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("%s: in endpoint allocation failed\n", __func__);
+		return;
+	}
+	dev->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring OUT endpoint*/
+	ep = dev->dev_out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!ep) {
+		pr_err("out endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	dev->gs_fs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk_out_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring NOTIFY endpoint*/
+	ep = dev->dev_notify_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("notify endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: notify_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+
+
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		bh->buf = kmalloc(64, GFP_KERNEL);
+		if (!bh->buf)
+			return;
+	}
+
+	dev->bound = 1;
+	return;
+}
+
+/*
+ * gs_unbind
+ *
+ * Called on module unload.  Frees the control request and device
+ * structure.
+ */
+static void /* __init_or_exit */ gs_unbind(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+
+	if (!dev) {
+		pr_err("%s: error: null device\n", __func__);
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	kfree(dev->statusreqbuf.buf);
+
+	if (dev->dev_in_ep) {
+		usb_ept_fifo_flush(dev->dev_in_ep);
+		usb_ept_enable(dev->dev_in_ep,  0);
+		usb_free_endpoint(dev->dev_in_ep);
+	}
+	if (dev->dev_out_ep) {
+		usb_ept_fifo_flush(dev->dev_out_ep);
+		usb_ept_enable(dev->dev_out_ep,  0);
+		usb_free_endpoint(dev->dev_out_ep);
+	}
+	if (dev->dev_notify_ep) {
+		usb_ept_fifo_flush(dev->dev_notify_ep);
+		usb_ept_enable(dev->dev_notify_ep,  0);
+		usb_free_endpoint(dev->dev_notify_ep);
+	}
+
+	gs_free_ports(dev);
+	dev->bound = 0;
+	pr_debug("%s: %s %s\n", __func__, GS_LONG_NAME, GS_VERSION_STR);
+}
+
+static void gser_complete_set_line_coding(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct usb_cdc_line_coding *value;
+	struct usb_request *in_req;
+
+	port = dev->dev_port[0];
+	if (!(dev && dev->dev_port[0])) {
+		printk(KERN_ERR "%s(): dev or dev_port is null\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+	if (req->actual != sizeof(port->port_line_coding)) {
+		printk(KERN_ERR "%s(): received wrong data\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	/* Use Host assigned port_line setting */
+	value = req->buf;
+	port->port_line_coding = *value;
+
+	/* Send ACK on EP0 IN */
+	in_req = dev->func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(dev->func->ep0_in, in_req);
+}
+
+static int gs_setup(struct usb_ctrlrequest *ctrl,
+		void *buf, int len, void *_ctxt)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port;/* ACM only has one port */
+	u16 wIndex = le16_to_cpu(ctrl->wIndex);
+	u16 wValue = le16_to_cpu(ctrl->wValue);
+	u16 wLength = le16_to_cpu(ctrl->wLength);
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_setup:device pointer NULL\n");
+		return 0;
+	}
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"gs_setup: port pointer is NULL\n");
+		return 0;
+	}
+	switch (ctrl->bRequest) {
+
+	case USB_CDC_REQ_SET_LINE_CODING:
+		if (port) {
+			struct usb_request *req = dev->func->ep0_out_req;
+			ret = min(wLength,
+				(u16) sizeof(struct usb_cdc_line_coding));
+			if (ret != sizeof(struct usb_cdc_line_coding))
+				ret = -EOPNOTSUPP;
+			else {
+				req->device = dev;
+				req->complete = gser_complete_set_line_coding;
+				}
+		} else
+			ret = -ENODEV;
+		break;
+
+	case USB_CDC_REQ_GET_LINE_CODING:
+		port = dev->dev_port[0];/* ACM only has one port */
+		ret = min(wLength, (u16) sizeof(struct usb_cdc_line_coding));
+		if (port) {
+			spin_lock(&port->port_lock);
+			memcpy(buf, &port->port_line_coding, ret);
+			spin_unlock(&port->port_lock);
+		}
+		break;
+	case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		port = dev->dev_port[0];/* ACM only has one port */
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_DTR) {
+			port->mcr |= MCR_DTR;
+		} else	{
+			port->mcr &= ~MCR_DTR;
+		}
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_RTS)
+			port->mcr |= MCR_RTS;
+		else
+			port->mcr &= ~MCR_RTS;
+
+		dev->interface_num = wIndex;
+		ret = 0;
+		break;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void gs_disconnect(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port = dev->dev_port[0];
+	unsigned long flags;
+
+	/* tell the TTY glue not to do I/O here any more */
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = 0;
+	if (port->port_open_count > 0 || port->port_in_use) {
+		wake_up_interruptible(&port->port_write_wait);
+		if (port->port_tty) {
+			wake_up_interruptible(&port->port_tty->read_wait);
+			wake_up_interruptible(&port->port_tty->write_wait);
+			tty_hangup(port->port_tty);
+		}
+	}
+	port->mcr = 0;
+	port->msr = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+}
+/*
+ * gs_configure
+ *
+ * Configures the device by enabling device specific
+ * optimizations, setting up the endpoints, allocating
+ * read and write requests and queuing read requests.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_configure(int config, void *_ctxt)
+{
+	int i, ret = 0;
+	unsigned MaxPacketSize;
+	struct gs_dev *dev = _ctxt;
+	struct usb_endpoint *ep;
+	struct usb_request *req;
+	struct gs_port *port;
+	struct list_head *rhead;
+	struct list_head *whead;
+	unsigned started = 0;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_configure: NULL device pointer\n");
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	port = dev->dev_port[0];
+	rhead = &port->read_pool;
+	whead = &port->write_pool;
+	if (port == NULL) {
+		printk(KERN_ERR "gs_configure:port is NULL\n");
+		return;
+	}
+
+
+	if (!config) {
+		gs_debug("gs_configure: Deconfigure\n");
+		dev->configured = SERIAL_UNCONFIGURED;
+		gs_reset_config(dev);
+		return;
+	}
+	dev->dev_config = config;
+
+	if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL ||
+	    (dev->dev_notify_ep == NULL)) {
+		printk(KERN_ERR "gs_configure : cannot find endpoints\n");
+		ret = -ENODEV;
+		goto reset_config;
+	}
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_hs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_hs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_hs_notifyin_desc);
+	} else {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_fs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_fs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_fs_notifyin_desc);
+	}
+	usb_ept_enable(dev->dev_in_ep, 1);
+	usb_ept_enable(dev->dev_out_ep, 1);
+	usb_ept_enable(dev->dev_notify_ep, 1);
+
+	gs_debug("gs_configure: endpoint sizes and buffers\n");
+	/* allocate and queue read requests */
+	ep = dev->dev_out_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < read_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_read_complete;
+			list_add_tail(&req->list, rhead);
+			gs_debug("gs_configure: queuing read request(%d)\n", i);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate read request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	/* allocate write requests, and put on free list */
+	ep = dev->dev_in_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < write_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_write_complete;
+			list_add_tail(&req->list, whead);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate write request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	ep = dev->dev_notify_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		dev->notify_req = req = gs_alloc_req(ep, 0);
+		if (req) {
+			req->device = (void *)dev;
+			req->buf = bh->buf;
+			req->length = MaxPacketSize;
+			req->complete = gs_status_complete;
+		}
+	}
+	if (port->port_open_count) {
+		unsigned long flags;
+		spin_lock_irqsave(&port->port_lock, flags);
+		started = gs_start_rx(dev);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		if (started)
+			tty_wakeup(port->port_tty);
+	}
+
+	dev->configured = SERIAL_CONFIGURED;
+
+	return;
+
+reset_config:
+	printk(KERN_ERR "gs_configure(end): error, calling gs_reset_config\n");
+	gs_reset_config(dev);
+	return;
+}
+static unsigned gs_start_rx(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->read_pool;
+	unsigned ret = 0;
+	struct usb_endpoint *ep = dev->dev_out_ep;
+	unsigned started = 0;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		struct tty_struct	*tty;
+		tty = port->port_tty;
+		if (!tty) {
+			printk(KERN_ERR "%s: tty is null\n", __func__);
+			break;
+		}
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock(&port->port_lock);
+		ret = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+		if (ret) {
+			list_add(&req->list, pool);
+			break;
+		}
+		started++;
+
+	}
+	return started;
+}
+/*
+ * gs_reset_config
+ *
+ * Mark the device as not configured, disable all endpoints,
+ * which forces completion of pending I/O and frees queued
+ * requests, and free the remaining write requests on the
+ * free list.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_reset_config(struct gs_dev *dev)
+{
+	struct gs_port *port;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_reset_config: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+
+
+	if (dev->dev_out_ep)
+		usb_free_endpoint_all_req(dev->dev_out_ep);
+	if (dev->dev_in_ep)
+		usb_free_endpoint_all_req(dev->dev_in_ep);
+	if (dev->dev_notify_ep)
+		usb_free_endpoint_all_req(dev->dev_notify_ep);
+
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = GS_NO_CONFIG_ID;
+	/* free write requests on the free list */
+	while (!list_empty(&port->write_pool)) {
+		req = list_entry(port->write_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_in_ep, req);
+	}
+
+	/* free read requests from read pool */
+	while (!list_empty(&port->read_pool)) {
+		req = list_entry(port->read_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+
+	/* free read requests from read queue */
+	while (!list_empty(&port->read_queue)) {
+		req = list_entry(port->read_queue.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len)
+{
+	struct usb_request *req;
+	if (ep == NULL)
+		return NULL;
+	req = usb_ept_alloc_req(ep, len);
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	if (ep != NULL && req != NULL)
+		usb_ept_free_req(ep, req);
+}
+
+/*
+ * gs_alloc_ports
+ *
+ * Allocate all ports and set the gs_dev struct to point to them.
+ * Return 0 if successful, or a negative error number.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
+{
+	int i;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return -EIO;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = kzalloc(sizeof(struct gs_port), kmalloc_flags);
+		if (port == NULL)
+			return -ENOMEM;
+
+		INIT_WORK(&port->push_work, gs_rx_push);
+		INIT_LIST_HEAD(&port->read_pool);
+		INIT_LIST_HEAD(&port->read_queue);
+		INIT_LIST_HEAD(&port->write_pool);
+		port->msr = 0;
+		port->prev_msr = 0;
+		port->mcr = 0;
+		port->port_dev = dev;
+		port->port_num = i;
+		port->port_line_coding.dwDTERate =
+		    cpu_to_le32(GS_DEFAULT_DTE_RATE);
+		port->port_line_coding.bCharFormat = GS_DEFAULT_CHAR_FORMAT;
+		port->port_line_coding.bParityType = GS_DEFAULT_PARITY;
+		port->port_line_coding.bDataBits = GS_DEFAULT_DATA_BITS;
+		spin_lock_init(&port->port_lock);
+		mutex_init(&port->mutex_lock);
+		init_waitqueue_head(&port->port_write_wait);
+
+		dev->dev_port[i] = port;
+	}
+
+	return 0;
+}
+
+/*
+ * gs_free_ports
+ *
+ * Free all closed ports.  Open ports are disconnected by
+ * freeing their write buffers, setting their device pointers
+ * and the pointers to them in the device to NULL.  These
+ * ports will be freed when closed.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static void gs_free_ports(struct gs_dev *dev)
+{
+	int i;
+	unsigned long flags;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = dev->dev_port[i];
+		if (port != NULL) {
+			dev->dev_port[i] = NULL;
+
+			spin_lock_irqsave(&port->port_lock, flags);
+
+			if (port->port_write_buf != NULL) {
+				gs_buf_free(port->port_write_buf);
+				port->port_write_buf = NULL;
+			}
+
+			if (port->port_open_count > 0 || port->port_in_use) {
+				port->port_dev = NULL;
+				wake_up_interruptible(&port->port_write_wait);
+				if (port->port_tty) {
+					wake_up_interruptible
+					    (&port->port_tty->read_wait);
+					wake_up_interruptible
+					    (&port->port_tty->write_wait);
+				}
+				spin_unlock_irqrestore(&port->port_lock, flags);
+			} else {
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				kfree(port);
+			}
+
+		}
+	}
+}
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
+{
+	struct gs_buf *gb;
+
+	if (size == 0)
+		return NULL;
+
+	gb = kmalloc(sizeof(struct gs_buf), kmalloc_flags);
+	if (gb == NULL)
+		return NULL;
+
+	gb->buf_buf = kmalloc(size, kmalloc_flags);
+	if (gb->buf_buf == NULL) {
+		kfree(gb);
+		return NULL;
+	}
+
+	gb->buf_size = size;
+	gb->buf_get = gb->buf_put = gb->buf_buf;
+
+	return gb;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+void gs_buf_free(struct gs_buf *gb)
+{
+	if (gb) {
+		kfree(gb->buf_buf);
+		kfree(gb);
+	}
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+void gs_buf_clear(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		gb->buf_get = gb->buf_put;
+	/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_data_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_put - gb->buf_get)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_space_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_get - gb->buf_put - 1)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_space_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf + len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else		/* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf + len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else		/* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+* gs_tiocmget
+*/
+static int gs_tiocmget(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port;
+	unsigned int mcr, msr;
+	unsigned int result = 0;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+
+	port = dev->dev_port[0];
+	if (port == NULL)
+		return -EIO;
+
+	mutex_lock(&port->mutex_lock);
+	mcr = port->mcr;
+	msr = port->msr;
+
+	result = ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+		| ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+		| ((mcr & MCR_LOOP) ? TIOCM_LOOP : 0)
+		| ((msr & MSR_CD) ? TIOCM_CD : 0)
+		| ((msr & MSR_RI) ? TIOCM_RI : 0)
+		| ((msr & MSR_DSR) ? TIOCM_DSR : 0)
+		| ((msr & MSR_CTS) ? TIOCM_CTS : 0);
+
+	mutex_unlock(&port->mutex_lock);
+	return result;
+}
+
+/*
+* gs_tiocmset
+*/
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+	unsigned int set, unsigned int clear)
+{
+	struct gs_port *port;
+	unsigned int mcr;
+	unsigned int msr;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+	port = dev->dev_port[0];
+
+	if (port == NULL)
+		return -EIO;
+
+	mcr = port->mcr;
+	msr = port->msr;
+	if (dev->configured != SERIAL_CONFIGURED)
+		return -EIO;
+
+	set &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (set & TIOCM_DSR)
+		msr |= MSR_DSR;
+	if (set & TIOCM_RI)
+		msr |= MSR_RI;
+	if (set & TIOCM_CD)
+		msr |= MSR_CD;
+	if (set & TIOCM_CTS)
+		msr |= MSR_CTS;
+
+	clear &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (clear & TIOCM_RI)
+		msr &= ~MSR_RI;
+	if (clear & TIOCM_DSR)
+		msr &= ~MSR_DSR;
+	if (clear & TIOCM_CD)
+		msr &= ~MSR_CD;
+	if (clear & TIOCM_CTS)
+		msr &= ~MSR_CTS;
+
+	mutex_lock(&port->mutex_lock);
+	port->mcr = mcr;
+	port->msr = msr;
+
+	if (port->prev_msr != port->msr) {
+		send_notify_data(dev->dev_notify_ep, dev->notify_req);
+		port->prev_msr = port->msr;
+	}
+	mutex_unlock(&port->mutex_lock);
+
+	return 0;
+}
diff --git a/drivers/usb/function/ums.c b/drivers/usb/function/ums.c
new file mode 100644
index 0000000..509387f
--- /dev/null
+++ b/drivers/usb/function/ums.c
@@ -0,0 +1,469 @@
+/* drivers/usb/function/ums.c
+ *
+ * Function Device for USB Mass Storage
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb_usual.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* UMS setup class requests */
+#define USB_BULK_GET_MAX_LUN_REQUEST   0xFE
+#define USB_BULK_RESET_REQUEST         0xFF
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+/* FIXME - add ioctl() support for LUN count */
+int lun_count = 1;
+
+struct ums_context
+{
+	int online;
+	int error;
+	
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	spinlock_t lock;
+	
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+	
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+};
+
+static struct ums_context _context;
+
+static inline int _lock(atomic_t *excl)
+{
+	if(atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct ums_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct ums_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+	
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if(list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void ums_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_in length: %d, actual: %d \n", req->length, req->actual);
+    
+	if(req->status != 0) 
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void ums_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_out length: %d, actual: %d \n", req->length, req->actual);
+
+	if(req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t ums_read(struct file *fp, char __user *buf,
+                            size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_read(%d)\n", count);
+	
+	if(_lock(&ctxt->read_excl))
+		return -EBUSY;
+	
+	/* we will block until we're online */
+	while(!(ctxt->online || ctxt->error)) {
+		DBG("ums_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if(ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	if(ctxt->error) {
+		r = -EIO;
+		goto fail;
+	}
+
+		/* if we have idle read requests, get them queued */
+	if((req = req_get(ctxt, &ctxt->rx_idle))) {
+		req->length = count;
+		ret = usb_ept_queue_xfer(ctxt->out, req);
+		if(ret < 0) {
+			DBG("ums_read: failed to queue req %p (%d)\n", req, ret);
+			r = -EIO;
+			ctxt->error = 1;
+			req_put(ctxt, &ctxt->rx_idle, req);
+			goto fail;
+		} else {
+			DBG("rx %p queue\n", req);
+		}
+	} else {
+		DBG("req_get failed!\n");
+		goto fail;
+	}
+
+	/* wait for a request to complete */
+	req = 0;
+	ret = wait_event_interruptible(ctxt->read_wq, 
+				       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+	
+	if(req != 0) {
+		ctxt->read_req = req;
+		ctxt->read_buf = req->buf;
+		DBG("rx %p %d\n", req, req->actual);
+
+		xfer = req->actual;
+		if (xfer > count) {
+			xfer = count;
+		}
+		r = xfer;
+
+		if (xfer > 0) {	
+			DBG("copy_to_user %d bytes\n", xfer); 
+			if(copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+			}
+
+		}		
+		req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+		ctxt->read_req = 0;
+	} else {
+		r = ret;
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	DBG("ums_read returning %d\n", r);
+	return r;
+} 
+
+static ssize_t ums_write(struct file *fp, const char __user *buf,
+                             size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_write(%d)\n", count);
+
+	if(_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while(count >= 0) {
+		if(ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq, 
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+		
+		if(ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if(req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if(copy_from_user(req->buf, buf, xfer)){
+				r = -EFAULT;
+				break;
+			}
+			
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if(ret < 0) {
+				DBG("ums_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+			if (count == 0) {
+			    break;
+			}
+		}
+	}
+
+
+	if(req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	DBG("ums_write returning %d\n", r);
+	return r;
+}
+
+static int ums_open(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+	
+	if(_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+	
+	return 0;
+}
+
+static int ums_release(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations ums_fops = {
+	.owner =   THIS_MODULE,
+	.read =    ums_read,
+	.write =   ums_write,
+	.open =    ums_open,
+	.release = ums_release,
+};
+	
+static struct miscdevice ums_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_ums",
+	.fops = &ums_fops,
+};
+
+static void ums_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+	
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	DBG("ums_bind() %p, %p\n", ctxt->out, ctxt->in);
+	
+	for(n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for(n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+
+	printk("ums_bind() allocated %d rx and %d tx requests\n",
+	       RX_REQ_MAX, TX_REQ_MAX);
+	
+	misc_register(&ums_device);
+	return;
+	
+fail:
+	printk("ums_bind() could not allocate requests\n");
+
+	/* XXX release any we did allocate */
+}
+
+static int ums_setup(struct usb_ctrlrequest* req, void* buf, int len, void *_ctxt)
+{
+	if ((req->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		if (req->bRequest == USB_BULK_GET_MAX_LUN_REQUEST) {
+			if ((req->bRequestType & USB_DIR_IN) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			((u8*)buf)[0] = lun_count - 1;
+			printk("USB_BULK_GET_MAX_LUN_REQUEST returning %d\n", lun_count - 1);
+			return 1;
+		} else if (req->bRequest == USB_BULK_RESET_REQUEST) {
+			if ((req->bRequestType & USB_DIR_OUT) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			/* FIXME - I'm not sure what to do here */
+			printk("USB_BULK_RESET_REQUEST\n");
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+static void ums_configure(int configured, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	
+	DBG("ums_configure() %d\n", configured);
+
+	if(configured) {
+		ctxt->online = 1;
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		if(ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while((req = req_get(ctxt, &ctxt->rx_done))) {
+			req_put(ctxt, &ctxt->rx_idle, req);
+		}
+		
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_ums = {
+	.bind = ums_bind,
+	.configure = ums_configure,
+	.setup = ums_setup,
+
+	.name = "ums",
+	.context = &_context,
+
+	.ifc_class = USB_CLASS_MASS_STORAGE,
+	.ifc_subclass = US_SC_SCSI,
+	.ifc_protocol = US_PR_BULK,
+
+	.ifc_name = "ums",
+	
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init ums_init(void)
+{
+	struct ums_context *ctxt = &_context;
+	DBG("ums_init()\n");
+
+	spin_lock_init(&ctxt->lock);
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+	
+	return usb_function_register(&usb_func_ums);
+}
+
+module_init(ums_init);
diff --git a/drivers/usb/function/usb_function.h b/drivers/usb/function/usb_function.h
new file mode 100644
index 0000000..35eb257
--- /dev/null
+++ b/drivers/usb/function/usb_function.h
@@ -0,0 +1,187 @@
+/* drivers/usb/function/usb_function.h
+ *
+ * USB Function Device Interface
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+#define _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+
+#include <linux/list.h>
+#include <linux/usb/ch9.h>
+
+#define EPT_BULK_IN   1
+#define EPT_BULK_OUT  2
+#define EPT_INT_IN  3
+
+#define USB_CONFIG_ATT_SELFPOWER_POS	(6)	/* self powered */
+#define USB_CONFIG_ATT_WAKEUP_POS	(5)	/* can wakeup */
+
+struct usb_endpoint {
+	struct usb_info *ui;
+	struct msm_request *req; /* head of pending requests */
+	struct msm_request *last;
+	unsigned flags;
+
+	/* bit number (0-31) in various status registers
+	** as well as the index into the usb_info's array
+	** of all endpoints
+	*/
+	unsigned char bit;
+	unsigned char num;
+
+	unsigned short max_pkt;
+
+	unsigned ept_halted;
+
+	/* pointers to DMA transfer list area */
+	/* these are allocated from the usb_info dma space */
+	struct ept_queue_head *head;
+	struct usb_endpoint_descriptor *ep_descriptor;
+	unsigned int alloced;
+};
+
+struct usb_request {
+	void *buf;          /* pointer to associated data buffer */
+	unsigned length;    /* requested transfer length */
+	int status;         /* status upon completion */
+	unsigned actual;    /* actual bytes transferred */
+
+	void (*complete)(struct usb_endpoint *ep, struct usb_request *req);
+	void *context;
+
+	void *device;
+
+	struct list_head list;
+};
+
+struct usb_function {
+	/* bind() is called once when the function has had its endpoints
+	** allocated, but before the bus is active.
+	**
+	** might be a good place to allocate some usb_request objects
+	*/
+	void (*bind)(void *);
+
+	/* unbind() is called when the function is being removed.
+	** it is illegal to call and usb_ept_* hooks at this point
+	** and all endpoints must be released.
+	*/
+	void (*unbind)(void *);
+
+	/* configure() is called when the usb client has been configured
+	** by the host and again when the device is unconfigured (or
+	** when the client is detached)
+	**
+	** currently called from interrupt context.
+	*/
+	void (*configure)(int configured, void *);
+	void (*disconnect)(void *);
+
+	/* setup() is called to allow functions to handle class and vendor
+	** setup requests.  If the request is unsupported or can not be handled,
+	** setup() should return -1.
+	** For OUT requests, buf will point to a buffer to data received in the
+	** request's data phase, and len will contain the length of the data.
+	** setup() should return 0 after handling an OUT request successfully.
+	** for IN requests, buf will contain a pointer to a buffer for setup()
+	** to write data to, and len will be the maximum size of the data to
+	** be written back to the host.
+	** After successfully handling an IN request, setup() should return
+	** the number of bytes written to buf that should be sent in the
+	** response to the host.
+	*/
+	int (*setup)(struct usb_ctrlrequest *req, void *buf,
+			int len, void *);
+
+	int (*set_interface)(int ifc_num, int alt_set, void *_ctxt);
+	int (*get_interface)(int ifc_num, void *ctxt);
+	/* driver name */
+	const char *name;
+	void *context;
+
+	/* interface class/subclass/protocol for descriptor */
+	unsigned char ifc_class;
+	unsigned char ifc_subclass;
+	unsigned char ifc_protocol;
+
+	/* name string for descriptor */
+	const char *ifc_name;
+
+	/* number of needed endpoints and their types */
+	unsigned char ifc_ept_count;
+	unsigned char ifc_ept_type[8];
+
+	/* if the endpoint is disabled, its interface will not be
+	** included in the configuration descriptor
+	*/
+	unsigned char   disabled;
+
+	struct usb_descriptor_header **fs_descriptors;
+	struct usb_descriptor_header **hs_descriptors;
+
+	struct usb_request *ep0_out_req, *ep0_in_req;
+	struct usb_endpoint *ep0_out, *ep0_in;
+};
+
+int usb_function_register(struct usb_function *driver);
+int usb_function_unregister(struct usb_function *driver);
+
+int usb_msm_get_speed(void);
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc);
+int usb_remote_wakeup(void);
+/* To allocate endpoint from function driver*/
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction);
+int usb_free_endpoint(struct usb_endpoint *ept);
+/* To enable endpoint from frunction driver*/
+void usb_ept_enable(struct usb_endpoint *ept, int yes);
+int usb_msm_get_next_ifc_number(struct usb_function *);
+int usb_msm_get_next_strdesc_id(char *);
+void usb_msm_enable_iad(void);
+
+void usb_function_enable(const char *function, int enable);
+
+/* Allocate a USB request.
+** Must be called from a context that can sleep.
+** If bufsize is nonzero, req->buf will be allocated for
+** you and free'd when the request is free'd.  Otherwise
+** it is your responsibility to provide.
+*/
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept, unsigned bufsize);
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *req);
+
+/* safely callable from any context
+** returns 0 if successfully queued and sets req->status = -EBUSY
+** req->status will change to a different value upon completion
+** (0 for success, -EIO, -ENODEV, etc for error)
+*/
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *req);
+int usb_ept_flush(struct usb_endpoint *ept);
+int usb_ept_get_max_packet(struct usb_endpoint *ept);
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req);
+void usb_ept_fifo_flush(struct usb_endpoint *ept);
+int usb_ept_set_halt(struct usb_endpoint *ept);
+int usb_ept_clear_halt(struct usb_endpoint *ept);
+struct device *usb_get_device(void);
+struct usb_endpoint *usb_ept_find(struct usb_endpoint **ept, int type);
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept);
+int usb_ept_is_stalled(struct usb_endpoint *ept);
+void usb_request_set_buffer(struct usb_request *req, void *buf, dma_addr_t dma);
+void usb_free_endpoint_all_req(struct usb_endpoint *ep);
+void usb_remove_function_driver(struct usb_function *func);
+int usb_remote_wakeup(void);
+#endif
diff --git a/drivers/usb/function/zero.c b/drivers/usb/function/zero.c
new file mode 100644
index 0000000..449bcbf
--- /dev/null
+++ b/drivers/usb/function/zero.c
@@ -0,0 +1,120 @@
+/* driver/usb/function/zero.c
+ *
+ * Zero Function Device - A Trivial Data Source
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct zero_context
+{
+	struct usb_endpoint *in;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct zero_context _context;
+
+static void zero_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	ctxt->in = ept[0];
+	printk(KERN_INFO "zero_bind() %p\n", ctxt->in);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->in, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->in, 4096);
+
+	memset(ctxt->req0->buf, 0, 4096);
+	memset(ctxt->req1->buf, 0, 4096);
+}
+
+static void zero_unbind(void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->in, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->in, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->in = 0;
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req);
+
+static void zero_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct zero_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		zero_queue_in(ctxt, req);
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req)
+{
+	req->complete = zero_in_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void zero_configure(int configured, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "zero_configure() %d\n", configured);
+
+	if (configured) {
+		zero_queue_in(ctxt, ctxt->req0);
+		zero_queue_in(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_zero = {
+	.bind = zero_bind,
+	.unbind = zero_unbind,
+	.configure = zero_configure,
+
+	.name = "zero",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x02,
+
+	.ifc_name = "zero",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_IN },
+};
+
+static int __init zero_init(void)
+{
+	printk(KERN_INFO "zero_init()\n");
+	usb_function_register(&usb_func_zero);
+	return 0;
+}
+
+module_init(zero_init);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 144a8c8..117d3bf 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -598,6 +598,24 @@
 # LAST -- dummy/emulated controller
 #
 
+config USB_GADGET_MSM_72K
+	boolean "MSM 72K Device Controller"
+	depends on ARCH_MSM
+	select USB_GADGET_SELECTED
+	select USB_GADGET_DUALSPEED
+	help
+	   USB gadget driver for Qualcomm MSM 72K architecture.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "msm72k" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_MSM_72K
+	tristate
+	depends on USB_GADGET_MSM_72K
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 config USB_GADGET_DUMMY_HCD
 	boolean "Dummy HCD (DEVELOPMENT)"
 	depends on USB=y || (USB=m && USB_GADGET=m)
@@ -1064,4 +1082,84 @@
 
 endchoice
 
+config USB_CSW_HACK
+	boolean "USB Mass storage csw hack Feature"
+	default y
+	help
+	 This csw hack feature is for increasing the performance of the mass
+	 storage
+
+config MODEM_SUPPORT
+	boolean "modem support in generic serial function driver"
+	depends on USB_G_ANDROID
+	default y
+	help
+          This feature enables the modem functionality in the
+	  generic serial.
+	  adds interrupt endpoint support to send modem notifications
+	  to host.
+	  adds CDC descriptors to enumerate the generic serial as MODEM.
+	  adds CDC class requests to configure MODEM line settings.
+	  Say "y" to enable MODEM support in the generic serial driver.
+
+config RMNET_SMD_CTL_CHANNEL
+	string "RMNET control SMD channel name"
+	depends on USB_G_ANDROID && MSM_SMD
+	default ""
+	help
+	  Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+	string "RMNET Data SMD channel name"
+	depends on USB_G_ANDROID && MSM_SMD
+	default ""
+	help
+	  Data SMD channel for transferring network data
+
+config RMNET_SDIO_CTL_CHANNEL
+       int "RMNET control SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Control SDIO channel for transferring RMNET QMI messages
+
+config RMNET_SDIO_DATA_CHANNEL
+       int "RMNET Data SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Data SDIO channel for transferring network data
+
+config RMNET_SMD_SDIO_CTL_CHANNEL
+       int "RMNET(sdio_smd) Control SDIO channel id"
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       default 8
+       help
+         Control SDIO channel for transferring QMI messages
+
+config RMNET_SMD_SDIO_DATA_CHANNEL
+       int "RMNET(sdio_smd) Data SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Data SDIO channel for transferring network data
+
+config RMNET_SDIO_SMD_DATA_CHANNEL
+       string "RMNET(sdio_smd) Data SMD channel name"
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       default "DATA40"
+       help
+	  Data SMD channel for transferring network data
+
+config USB_ANDROID_RMNET_CTRL_SMD
+       boolean "RmNet(BAM) control over SMD driver"
+       depends on MSM_SMD
+       help
+         Enabling this option adds rmnet control over SMD
+	 support to the android gadget. Rmnet is an
+	 alternative to CDC-ECM and Windows RNDIS.
+	 It uses QUALCOMM MSM Interface for control
+	 transfers. This option enables only control interface.
+	 Data interface used is BAM.
+
 endif # USB_GADGET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index ab17a4c..064960c 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -29,6 +29,7 @@
 mv_udc-y			:= mv_udc_core.o mv_udc_phy.o
 obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
 obj-$(CONFIG_USB_FUSB300)	+= fusb300_udc.o
+obj-$(CONFIG_USB_MSM_72K)	+= msm72k_udc.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index b13633b..8146af7 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -30,6 +30,7 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/composite.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/android.h>
 
 #include "gadget_chips.h"
 
@@ -45,9 +46,10 @@
 #include "epautoconf.c"
 #include "composite.c"
 
+#include "f_diag.c"
 #include "f_mass_storage.c"
-#include "u_serial.c"
-#include "f_acm.c"
+//#include "u_serial.c"
+//#include "f_acm.c"
 #include "f_adb.c"
 #include "f_mtp.c"
 #include "f_accessory.c"
@@ -99,6 +101,7 @@
 	struct list_head enabled_functions;
 	struct usb_composite_dev *cdev;
 	struct device *dev;
+	struct android_usb_platform_data *pdata;
 
 	bool enabled;
 	bool connected;
@@ -187,6 +190,68 @@
 /*-------------------------------------------------------------------------*/
 /* Supported functions initialization */
 
+char diag_clients[32];	    /* enabled DIAG clients - "diag[,diag_mdm]" */
+static ssize_t clients_store(
+		struct device *device, struct device_attribute *attr,
+		const char *buff, size_t size)
+{
+	strncpy(diag_clients, buff, sizeof(diag_clients));
+
+	return size;
+}
+
+static DEVICE_ATTR(clients, S_IWUSR, NULL, clients_store);
+static struct device_attribute *diag_function_attributes[] =
+					 { &dev_attr_clients, NULL };
+
+static int diag_function_init(struct android_usb_function *f,
+				 struct usb_composite_dev *cdev)
+{
+	return diag_setup();
+}
+
+static void diag_function_cleanup(struct android_usb_function *f)
+{
+	diag_cleanup();
+}
+
+static int diag_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	char *name;
+	char buf[32], *b;
+	int once = 0, err = -1;
+	int (*notify)(uint32_t, const char *);
+
+	strncpy(buf, diag_clients, sizeof(buf));
+	b = strim(buf);
+
+	while (b) {
+		name = strsep(&b, ",");
+		/* Allow only first diag channel to update pid and serial no */
+		if (!once++)
+			notify = _android_dev->pdata->update_pid_and_serial_num;
+		else
+			notify = NULL;
+
+		if (name) {
+			err = diag_function_add(c, name, notify);
+			if (err)
+				pr_err("diag: Cannot open channel '%s'", name);
+		}
+	}
+
+	return err;
+}
+
+static struct android_usb_function diag_function = {
+	.name		= "diag",
+	.init		= diag_function_init,
+	.cleanup	= diag_function_cleanup,
+	.bind_config	= diag_function_bind_config,
+	.attributes	= diag_function_attributes,
+};
+
 static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
 {
 	return adb_setup();
@@ -209,7 +274,7 @@
 	.bind_config	= adb_function_bind_config,
 };
 
-
+#if 0
 #define MAX_ACM_INSTANCES 4
 struct acm_function_config {
 	int instances;
@@ -280,7 +345,7 @@
 	.bind_config	= acm_function_bind_config,
 	.attributes	= acm_function_attributes,
 };
-
+#endif
 
 static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
 {
@@ -644,8 +709,9 @@
 
 
 static struct android_usb_function *supported_functions[] = {
+	&diag_function,
 	&adb_function,
-	&acm_function,
+//	&acm_function,
 	&mtp_function,
 	&ptp_function,
 	&rndis_function,
@@ -1104,6 +1170,19 @@
 	return 0;
 }
 
+static int __devinit android_probe(struct platform_device *pdev)
+{
+	struct android_usb_platform_data *pdata = pdev->dev.platform_data;
+	struct android_dev *dev = _android_dev;
+
+	dev->pdata = pdata;
+	
+	return 0;
+}
+
+static struct platform_driver android_platform_driver = {
+	.driver = { .name = "android_usb"},
+};
 
 static int __init init(void)
 {
@@ -1135,6 +1214,8 @@
 	composite_driver.setup = android_setup;
 	composite_driver.disconnect = android_disconnect;
 
+	platform_driver_probe(&android_platform_driver, android_probe);
+
 	return usb_composite_probe(&android_usb_driver, android_bind);
 }
 module_init(init);
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 139ac94..8a75420 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,11 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
  */
 
 #include <linux/module.h>
@@ -64,7 +59,8 @@
 	.flags			= CI13XXX_REGS_SHARED |
 				  CI13XXX_REQUIRE_TRANSCEIVER |
 				  CI13XXX_PULLUP_ON_VBUS |
-				  CI13XXX_DISABLE_STREAMING,
+				  CI13XXX_DISABLE_STREAMING |
+				  CI13XXX_ZERO_ITC,
 
 	.notify_event		= ci13xxx_msm_notify_event,
 };
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index baaf87e..9a03ca7 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -318,6 +318,17 @@
 	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
 	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
 
+	/*
+	 * ITC (Interrupt Threshold Control) field is to set the maximum
+	 * rate at which the device controller will issue interrupts.
+	 * The maximum interrupt interval measured in micro frames.
+	 * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
+	 * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
+	 * can be set to lesser value to gain performance.
+	 */
+	if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
+		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
+
 	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
 		pr_err("cannot enter in device mode");
 		pr_err("lpm = %i", hw_bank.lpm);
@@ -417,6 +428,10 @@
 		data |= ENDPTCTRL_RXE;
 	}
 	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+
+	/* make sure endpoint is enabled before returning */
+	mb();
+
 	return 0;
 }
 
@@ -1219,7 +1234,7 @@
 {
 	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
 	unsigned long flags;
-	u32 dump[512];
+	u32 *dump;
 	unsigned i, k, n = 0;
 
 	dbg_trace("[%s] %p\n", __func__, buf);
@@ -1227,9 +1242,12 @@
 		dev_err(dev, "[%s] EINVAL\n", __func__);
 		return 0;
 	}
+	dump = kmalloc(2048, GFP_KERNEL);
+	if (dump == NULL)
+		return -ENOMEM;
 
 	spin_lock_irqsave(udc->lock, flags);
-	k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+	k = hw_register_read(dump, 512);
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	for (i = 0; i < k; i++) {
@@ -1237,7 +1255,7 @@
 			       "reg[0x%04X] = 0x%08X\n",
 			       i * (unsigned)sizeof(u32), dump[i]);
 	}
-
+	kfree(dump);
 	return n;
 }
 
@@ -1317,6 +1335,42 @@
 }
 static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
 
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+	int ret = 0;
+
+	trace();
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (!udc->remote_wakeup) {
+		ret = -EOPNOTSUPP;
+		dbg_trace("remote wakeup feature is not enabled\n");
+		goto out;
+	}
+	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+		ret = -EINVAL;
+		dbg_trace("port is not suspended\n");
+		goto out;
+	}
+	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+	spin_unlock_irqrestore(udc->lock, flags);
+	return ret;
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+
+	ci13xxx_wakeup(&udc->gadget);
+
+	return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+
 /**
  * dbg_create_files: initializes the attribute interface
  * @dev: device
@@ -1353,8 +1407,13 @@
 	retval = device_create_file(dev, &dev_attr_requests);
 	if (retval)
 		goto rm_registers;
+	retval = device_create_file(dev, &dev_attr_wakeup);
+	if (retval)
+		goto rm_remote_wakeup;
 	return 0;
 
+rm_remote_wakeup:
+	device_remove_file(dev, &dev_attr_wakeup);
  rm_registers:
 	device_remove_file(dev, &dev_attr_registers);
  rm_qheads:
@@ -1391,6 +1450,7 @@
 	device_remove_file(dev, &dev_attr_events);
 	device_remove_file(dev, &dev_attr_driver);
 	device_remove_file(dev, &dev_attr_device);
+	device_remove_file(dev, &dev_attr_wakeup);
 	return 0;
 }
 
@@ -1619,6 +1679,7 @@
 	udc->gadget.speed = USB_SPEED_UNKNOWN;
 	udc->remote_wakeup = 0;
 	udc->suspended = 0;
+	udc->configured = 0;
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	/* flush all endpoints */
@@ -1930,6 +1991,8 @@
 		do {
 			hw_test_and_set_setup_guard();
 			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+			/* Ensure buffer is read before acknowledging to h/w */
+			mb();
 		} while (!hw_test_and_clear_setup_guard());
 
 		type = req.bRequestType;
@@ -1991,6 +2054,10 @@
 				break;
 			err = isr_setup_status_phase(udc);
 			break;
+		case USB_REQ_SET_CONFIGURATION:
+			if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
+				udc->configured = !!req.wValue;
+			goto delegate;
 		case USB_REQ_SET_FEATURE:
 			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
 					le16_to_cpu(req.wValue) ==
@@ -2104,12 +2171,15 @@
 	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
 		mEp->qh.ptr->cap &= ~QH_MULT;
 	else
-		mEp->qh.ptr->cap &= ~QH_ZLT;
+		mEp->qh.ptr->cap |= QH_ZLT;
 
 	mEp->qh.ptr->cap |=
 		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
 	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
 
+	/* complete all the updates to ept->head before enabling endpoint*/
+	mb();
+
 	/*
 	 * Enable endpoints in the HW other than ep0 as ep0
 	 * is always enabled
@@ -2467,7 +2537,8 @@
 		if (is_active) {
 			pm_runtime_get_sync(&_gadget->dev);
 			hw_device_reset(udc);
-			hw_device_state(udc->ep0out.qh.dma);
+			if (udc->softconnect)
+				hw_device_state(udc->ep0out.qh.dma);
 		} else {
 			hw_device_state(0);
 			if (udc->udc_driver->notify_event)
@@ -2481,31 +2552,6 @@
 	return 0;
 }
 
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int ret = 0;
-
-	trace();
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->remote_wakeup) {
-		ret = -EOPNOTSUPP;
-		dbg_trace("remote wakeup feature is not enabled\n");
-		goto out;
-	}
-	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
-		ret = -EINVAL;
-		dbg_trace("port is not suspended\n");
-		goto out;
-	}
-	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
-out:
-	spin_unlock_irqrestore(udc->lock, flags);
-	return ret;
-}
-
 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
 {
 	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
@@ -2515,6 +2561,32 @@
 	return -ENOTSUPP;
 }
 
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(udc->lock, flags);
+	udc->softconnect = is_active;
+	if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
+			!udc->vbus_active) || !udc->driver) {
+		spin_unlock_irqrestore(udc->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (is_active) {
+		hw_device_state(udc->ep0out.qh.dma);
+	} else {
+		hw_device_state(0);
+		if (udc->udc_driver->notify_event)
+			udc->udc_driver->notify_event(udc,
+				CI13XXX_CONTROLLER_STOPPED_EVENT);
+	}
+	return 0;
+}
+
+
 /**
  * Device operations part of the API to the USB controller hardware,
  * which don't involve endpoints (or i/o)
@@ -2524,6 +2596,7 @@
 	.vbus_session	= ci13xxx_vbus_session,
 	.wakeup		= ci13xxx_wakeup,
 	.vbus_draw	= ci13xxx_vbus_draw,
+	.pullup		= ci13xxx_pullup,
 };
 
 /**
@@ -2627,6 +2700,7 @@
 	/* bind gadget */
 	driver->driver.bus     = NULL;
 	udc->gadget.dev.driver = &driver->driver;
+	udc->softconnect = 1;
 
 	spin_unlock_irqrestore(udc->lock, flags);
 	retval = bind(&udc->gadget);                /* MAY SLEEP */
@@ -2649,6 +2723,9 @@
 		}
 	}
 
+	if (!udc->softconnect)
+		goto done;
+
 	retval = hw_device_state(udc->ep0out.qh.dma);
 	if (retval)
 		pm_runtime_put_sync(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 2370777..27af8aa 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -108,6 +108,7 @@
 #define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
 #define CI13XXX_PULLUP_ON_VBUS		BIT(2)
 #define CI13XXX_DISABLE_STREAMING	BIT(3)
+#define CI13XXX_ZERO_ITC		BIT(4)
 
 #define CI13XXX_CONTROLLER_RESET_EVENT		0
 #define CI13XXX_CONTROLLER_STOPPED_EVENT	1
@@ -131,11 +132,13 @@
 	u8                         remote_wakeup; /* Is remote wakeup feature
 							enabled by the host? */
 	u8                         suspended;  /* suspended by the host */
+	u8                         configured;  /* is device configured */
 	u8                         test_mode;  /* the selected test mode */
 
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
 	int                        vbus_active; /* is VBUS active */
+	int                        softconnect; /* is pull-up enable allowed */
 	struct otg_transceiver    *transceiver; /* Transceiver struct */
 };
 
@@ -189,6 +192,8 @@
 #define    USBMODE_CM_HOST    (0x03UL <<  0)
 #define USBMODE_SLOM          BIT(3)
 #define USBMODE_SDIS          BIT(4)
+#define USBCMD_ITC(n)         (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
+#define USBCMD_ITC_MASK       (0xFF << 16)
 
 /* ENDPTCTRL */
 #define ENDPTCTRL_RXS         BIT(0)
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 68b1a8e..3fd12b1 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -5,6 +5,7 @@
  * Copyright (C) 2008 by David Brownell
  * Copyright (C) 2008 by Nokia Corporation
  * Copyright (C) 2009 by Samsung Electronics
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
  * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
  *
  * This software is distributed under the terms of the GNU General
@@ -17,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/usb/android_composite.h>
 
 #include "u_serial.h"
 #include "gadget_chips.h"
@@ -49,6 +51,7 @@
 	struct gserial			port;
 	u8				ctrl_id, data_id;
 	u8				port_num;
+	enum transport_type		transport;
 
 	u8				pending;
 
@@ -83,6 +86,17 @@
 #define ACM_CTRL_DCD		(1 << 0)
 };
 
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+
+static struct port_info {
+	enum transport_type	transport;
+	unsigned		port_num;
+	unsigned		client_port_num;
+} gacm_ports[GSERIAL_NO_PORTS];
+
 static inline struct f_acm *func_to_acm(struct usb_function *f)
 {
 	return container_of(f, struct f_acm, port.func);
@@ -93,6 +107,95 @@
 	return container_of(p, struct f_acm, port);
 }
 
+static char *transport_to_str(enum transport_type t)
+{
+	switch (t) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		return "TTY";
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		return "SDIO";
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		return "SMD";
+	}
+
+	return "NONE";
+}
+
+static int gport_setup(struct usb_configuration *c)
+{
+	int ret = 0;
+
+	pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+	if (no_tty_ports)
+		ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+	if (no_sdio_ports)
+		ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+	if (no_smd_ports)
+		ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+	return ret;
+}
+
+static int gport_connect(struct f_acm *acm)
+{
+	unsigned port_num;
+
+	port_num = gacm_ports[acm->port_num].client_port_num;
+
+
+	pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_port_no:%d\n",
+			__func__, transport_to_str(acm->transport),
+			acm, &acm->port, acm->port_num, port_num);
+
+	switch (acm->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_connect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_connect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_connect(&acm->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport: %s\n", __func__,
+				transport_to_str(acm->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_acm *acm)
+{
+	unsigned port_num;
+
+	port_num = gacm_ports[acm->port_num].client_port_num;
+
+	pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_pno:%d\n",
+			__func__, transport_to_str(acm->transport),
+			acm, &acm->port, acm->port_num, port_num);
+
+	switch (acm->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_disconnect(&acm->port);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_disconnect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_disconnect(&acm->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport:%s\n", __func__,
+				transport_to_str(acm->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
 /*-------------------------------------------------------------------------*/
 
 /* notification endpoint uses smallish and infrequent fixed-size messages */
@@ -333,8 +436,7 @@
 	/* SET_LINE_CODING ... just read and save what the host sends */
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_LINE_CODING:
-		if (w_length != sizeof(struct usb_cdc_line_coding)
-				|| w_index != acm->ctrl_id)
+		if (w_length != sizeof(struct usb_cdc_line_coding))
 			goto invalid;
 
 		value = w_length;
@@ -345,8 +447,6 @@
 	/* GET_LINE_CODING ... return what host sent, or initial value */
 	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_GET_LINE_CODING:
-		if (w_index != acm->ctrl_id)
-			goto invalid;
 
 		value = min_t(unsigned, w_length,
 				sizeof(struct usb_cdc_line_coding));
@@ -356,9 +456,6 @@
 	/* SET_CONTROL_LINE_STATE ... save what the host sent */
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
-		if (w_index != acm->ctrl_id)
-			goto invalid;
-
 		value = 0;
 
 		/* FIXME we should not allow data to flow until the
@@ -366,6 +463,12 @@
 		 * that bit, we should return to that no-flow state.
 		 */
 		acm->port_handshake_bits = w_value;
+		if (acm->port.notify_modem) {
+			unsigned port_num =
+				gacm_ports[acm->port_num].client_port_num;
+
+			acm->port.notify_modem(&acm->port, port_num, w_value);
+		}
 		break;
 
 	default:
@@ -415,7 +518,7 @@
 	} else if (intf == acm->data_id) {
 		if (acm->port.in->driver_data) {
 			DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
-			gserial_disconnect(&acm->port);
+			gport_disconnect(acm);
 		} else {
 			DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
 		}
@@ -423,7 +526,7 @@
 				acm->hs.in, acm->fs.in);
 		acm->port.out_desc = ep_choose(cdev->gadget,
 				acm->hs.out, acm->fs.out);
-		gserial_connect(&acm->port, acm->port_num);
+		gport_connect(acm);
 
 	} else
 		return -EINVAL;
@@ -437,7 +540,7 @@
 	struct usb_composite_dev *cdev = f->config->cdev;
 
 	DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
-	gserial_disconnect(&acm->port);
+	gport_disconnect(acm);
 	usb_ep_disable(acm->notify);
 	acm->notify->driver_data = NULL;
 }
@@ -568,6 +671,15 @@
 	return acm_notify_serial_state(acm);
 }
 
+static int acm_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+	struct f_acm *acm = port_to_acm(port);
+
+	acm->serial_state = ctrl_bits;
+
+	return acm_notify_serial_state(acm);
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* ACM function driver setup/binding */
@@ -764,12 +876,14 @@
 	spin_lock_init(&acm->lock);
 
 	acm->port_num = port_num;
+	acm->transport = gacm_ports[port_num].transport;
 
 	acm->port.connect = acm_connect;
 	acm->port.disconnect = acm_disconnect;
 	acm->port.send_break = acm_send_break;
+	acm->port.send_modem_ctrl_bits = acm_send_modem_ctrl_bits;
 
-	acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num);
+	acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num + 1);
 	if (!acm->port.func.name) {
 		kfree(acm);
 		return -ENOMEM;
@@ -787,3 +901,117 @@
 		kfree(acm);
 	return status;
 }
+
+#ifdef CONFIG_USB_ANDROID_ACM
+#include <linux/platform_device.h>
+
+static struct acm_platform_data *acm_pdata;
+
+static int acm_probe(struct platform_device *pdev)
+{
+	acm_pdata = pdev->dev.platform_data;
+	return 0;
+}
+
+static struct platform_driver acm_platform_driver = {
+	.driver = { .name = "acm", },
+	.probe = acm_probe,
+};
+
+int acm1_function_bind_config(struct usb_configuration *c)
+{
+	int ret = acm_bind_config(c, 0);
+	if (ret == 0)
+		gport_setup(c);
+	return ret;
+}
+
+int acm2_function_bind_config(struct usb_configuration *c)
+{
+	int ret = acm_bind_config(c, 1);
+
+	return ret;
+}
+
+static struct android_usb_function acm1_function = {
+	.name = "acm1",
+	.bind_config = acm1_function_bind_config,
+};
+
+static struct android_usb_function acm2_function = {
+	.name = "acm2",
+	.bind_config = acm2_function_bind_config,
+};
+
+static int facm_remove(struct platform_device *pdev)
+{
+	gserial_cleanup();
+
+	return 0;
+}
+
+static struct platform_driver usb_facm = {
+	.remove		= facm_remove,
+	.driver = {
+		.name = "usb_facm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init facm_probe(struct platform_device *pdev)
+{
+	struct usb_gadget_facm_pdata *pdata = pdev->dev.platform_data;
+	int i;
+
+	dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+	if (!pdata)
+		goto probe_android_register;
+
+	for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+		gacm_ports[i].transport = pdata->transport[i];
+		gacm_ports[i].port_num = i;
+
+		switch (gacm_ports[i].transport) {
+		case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+			gacm_ports[i].client_port_num = no_tty_ports;
+			no_tty_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+			gacm_ports[i].client_port_num = no_sdio_ports;
+			no_sdio_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+			gacm_ports[i].client_port_num = no_smd_ports;
+			no_smd_ports++;
+			break;
+		default:
+			pr_err("%s: Un-supported transport transport: %u\n",
+					__func__, gacm_ports[i].transport);
+			return -ENODEV;
+		}
+
+		nr_ports++;
+	}
+
+	pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+			"smd_ports:%u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports,
+			no_smd_ports, nr_ports);
+
+probe_android_register:
+	android_register_function(&acm1_function);
+	android_register_function(&acm2_function);
+
+	return 0;
+}
+
+static int __init init(void)
+{
+	printk(KERN_INFO "f_acm init\n");
+
+	return platform_driver_probe(&usb_facm, facm_probe);
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
new file mode 100644
index 0000000..53660186
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.c
@@ -0,0 +1,752 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/usbdiag.h>
+#include <mach/rpc_hsusb.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof intf_desc,
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength 			=	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType 	=	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes 		=	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize 	=	__constant_cpu_to_le16(512),
+	.bInterval 			=	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+	};
+static struct usb_descriptor_header *hs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @config_work: Work item schedule after interface is configured to notify
+ *               CONNECT event to diag char driver and updating product id
+ *               and serial number to MODEM/IMEM.
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+	struct usb_function function;
+	struct usb_ep *out;
+	struct usb_ep *in;
+	struct usb_endpoint_descriptor  *in_desc;
+	struct usb_endpoint_descriptor  *out_desc;
+	struct list_head read_pool;
+	struct list_head write_pool;
+	struct work_struct config_work;
+	spinlock_t lock;
+	unsigned configured;
+	struct usb_composite_dev *cdev;
+	int (*update_pid_and_serial_num)(uint32_t, const char *);
+	struct usb_diag_ch ch;
+
+	/* pkt counters */
+	unsigned long dpkts_tolaptop;
+	unsigned long dpkts_tomodem;
+	unsigned dpkts_tolaptop_pending;
+};
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+	return container_of(f, struct diag_context, function);
+}
+
+static void usb_config_work_func(struct work_struct *work)
+{
+	struct diag_context *ctxt = container_of(work,
+			struct diag_context, config_work);
+	struct usb_composite_dev *cdev = ctxt->cdev;
+	struct usb_gadget_strings *table;
+	struct usb_string *s;
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_CONNECT, NULL);
+
+	if (!ctxt->update_pid_and_serial_num)
+		return;
+
+	/* pass on product id and serial number to dload */
+	if (!cdev->desc.iSerialNumber) {
+		ctxt->update_pid_and_serial_num(
+					cdev->desc.idProduct, 0);
+		return;
+	}
+
+	/*
+	 * Serial number is filled by the composite driver. So
+	 * it is fair enough to assume that it will always be
+	 * found at first table of strings.
+	 */
+	table = *(cdev->driver->strings);
+	for (s = table->strings; s && s->s; s++)
+		if (s->id == cdev->desc.iSerialNumber) {
+			ctxt->update_pid_and_serial_num(
+					cdev->desc.idProduct, s->s);
+			break;
+		}
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	ctxt->dpkts_tolaptop_pending--;
+
+	if (!req->status) {
+		if ((req->length >= ep->maxpacket) &&
+				((req->length % ep->maxpacket) == 0)) {
+			ctxt->dpkts_tolaptop_pending++;
+			req->length = 0;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			usb_ep_queue(ctxt->in, req, GFP_ATOMIC);
+			return;
+		}
+	}
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->write_pool);
+	if (req->length != 0) {
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_WRITE_DONE, d_req);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	d_req->actual = req->actual;
+	d_req->status = req->status;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->read_pool);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	ctxt->dpkts_tomodem++;
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_READ_DONE, d_req);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+		void (*notify)(void *, unsigned, struct diag_request *))
+{
+	struct usb_diag_ch *ch;
+	struct diag_context *ctxt;
+	unsigned long flags;
+	int found = 0;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	/* Check if we already have a channel with this name */
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	if (!found) {
+		ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+		if (!ctxt)
+			return ERR_PTR(-ENOMEM);
+
+		ch = &ctxt->ch;
+	}
+
+	ch->name = name;
+	ch->priv = priv;
+	ch->notify = notify;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	list_add_tail(&ch->list, &usb_diag_ch_list);
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+	struct diag_context *dev = container_of(ch, struct diag_context, ch);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	ch->priv = NULL;
+	ch->notify = NULL;
+	/* Free-up the resources if channel is no more active */
+	if (!ch->priv_usb) {
+		list_del(&ch->list);
+		kfree(dev);
+	}
+
+	spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+/**
+ * usb_diag_free_req() - Free USB requests
+ * @ch: Channel handler
+ *
+ * This function free read and write USB requests for the interface
+ * associated with this channel.
+ *
+ */
+void usb_diag_free_req(struct usb_diag_ch *ch)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	if (!ctxt)
+		return;
+
+	list_for_each_safe(act, tmp, &ctxt->write_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->in, req);
+	}
+
+	list_for_each_safe(act, tmp, &ctxt->read_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->out, req);
+	}
+}
+EXPORT_SYMBOL(usb_diag_free_req);
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	struct usb_request *req;
+	int i;
+
+	if (!ctxt)
+		return -ENODEV;
+
+	for (i = 0; i < n_write; i++) {
+		req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		req->complete = diag_write_complete;
+		list_add_tail(&req->list, &ctxt->write_pool);
+	}
+
+	for (i = 0; i < n_read; i++) {
+		req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		req->complete = diag_read_complete;
+		list_add_tail(&req->list, &ctxt->read_pool);
+	}
+
+	return 0;
+
+fail:
+	usb_diag_free_req(ch);
+	return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	if (list_empty(&ctxt->read_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+	if (usb_ep_queue(ctxt->out, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->read_pool);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: cannot queue"
+				" read request\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req = NULL;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	if (list_empty(&ctxt->write_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+	if (usb_ep_queue(ctxt->in, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->write_pool);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: cannot queue"
+				" read request\n", __func__);
+		return -EIO;
+	}
+
+	ctxt->dpkts_tolaptop++;
+	ctxt->dpkts_tolaptop_pending++;
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	unsigned long flags;
+
+	DBG(dev->cdev, "diag_function_disable\n");
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (dev->ch.notify)
+		dev->ch.notify(dev->ch.priv, USB_DIAG_DISCONNECT, NULL);
+
+	usb_ep_disable(dev->in);
+	dev->in->driver_data = NULL;
+
+	usb_ep_disable(dev->out);
+	dev->out->driver_data = NULL;
+
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+	struct usb_diag_ch *ch;
+	int rc = 0;
+
+	dev->in_desc = ep_choose(cdev->gadget,
+			&hs_bulk_in_desc, &fs_bulk_in_desc);
+	dev->out_desc = ep_choose(cdev->gadget,
+			&hs_bulk_out_desc, &fs_bulk_out_desc);
+	dev->in->driver_data = dev;
+	rc = usb_ep_enable(dev->in, dev->in_desc);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->in->name, rc);
+		return rc;
+	}
+	dev->out->driver_data = dev;
+	rc = usb_ep_enable(dev->out, dev->out_desc);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->out->name, rc);
+		usb_ep_disable(dev->in);
+		return rc;
+	}
+	schedule_work(&dev->config_work);
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+		ctxt->dpkts_tolaptop = 0;
+		ctxt->dpkts_tomodem = 0;
+		ctxt->dpkts_tolaptop_pending = 0;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 1;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct diag_context *ctxt = func_to_diag(f);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+
+	usb_free_descriptors(f->descriptors);
+	ctxt->ch.priv_usb = NULL;
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct diag_context *ctxt = func_to_diag(f);
+	struct usb_ep *ep;
+	int status = -ENODEV;
+
+	intf_desc.bInterfaceNumber =  usb_interface_id(c, f);
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+	ctxt->in = ep;
+	ep->driver_data = ctxt;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+	ctxt->out = ep;
+	ep->driver_data = ctxt;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(fs_diag_desc);
+	if (!f->descriptors)
+		goto fail;
+
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		hs_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+	}
+	return 0;
+fail:
+	if (ctxt->out)
+		ctxt->out->driver_data = NULL;
+	if (ctxt->in)
+		ctxt->in->driver_data = NULL;
+	return status;
+
+}
+
+int diag_function_add(struct usb_configuration *c, const char *name,
+			int (*update_pid)(uint32_t, const char *))
+{
+	struct diag_context *dev;
+	struct usb_diag_ch *_ch;
+	int found = 0, ret;
+
+	DBG(c->cdev, "diag_function_add\n");
+
+	list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, _ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ERROR(c->cdev, "unable to get diag usb channel\n");
+		return -ENODEV;
+	}
+
+	dev = container_of(_ch, struct diag_context, ch);
+	/* claim the channel for this USB interface */
+	_ch->priv_usb = dev;
+
+	dev->update_pid_and_serial_num = update_pid; 
+	dev->cdev = c->cdev;
+	dev->function.name = _ch->name;
+	dev->function.descriptors = fs_diag_desc;
+	dev->function.hs_descriptors = hs_diag_desc;
+	dev->function.bind = diag_function_bind;
+	dev->function.unbind = diag_function_unbind;
+	dev->function.set_alt = diag_function_set_alt;
+	dev->function.disable = diag_function_disable;
+	spin_lock_init(&dev->lock);
+	INIT_LIST_HEAD(&dev->read_pool);
+	INIT_LIST_HEAD(&dev->write_pool);
+	INIT_WORK(&dev->config_work, usb_config_work_func);
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret) {
+		INFO(c->cdev, "usb_add_function failed\n");
+		_ch->priv_usb = NULL;
+	}
+
+	return ret;
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char *buf = debug_buffer;
+	int temp = 0;
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"---Name: %s---\n"
+				"dpkts_tolaptop: %lu\n"
+				"dpkts_tomodem:  %lu\n"
+				"pkts_tolaptop_pending: %u\n",
+				ch->name, ctxt->dpkts_tolaptop,
+				ctxt->dpkts_tomodem,
+				ctxt->dpkts_tolaptop_pending);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+
+		ctxt->dpkts_tolaptop = 0;
+		ctxt->dpkts_tomodem = 0;
+		ctxt->dpkts_tolaptop_pending = 0;
+	}
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void fdiag_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_diag", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_fdiag_ops);
+}
+#else
+static void fdiag_debugfs_init(void)
+{
+	return;
+}
+#endif
+
+static void diag_cleanup(void)
+{
+	struct diag_context *dev;
+	struct list_head *act, *tmp;
+	struct usb_diag_ch *_ch;
+	unsigned long flags;
+
+	list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+		_ch = list_entry(act, struct usb_diag_ch, list);
+		dev = container_of(_ch, struct diag_context, ch);
+
+		spin_lock_irqsave(&ch_lock, flags);
+		/* Free if diagchar is not using the channel anymore */
+		if (!_ch->priv) {
+			list_del(&_ch->list);
+			kfree(dev);
+		}
+		spin_unlock_irqrestore(&ch_lock, flags);
+
+	}
+}
+
+static int diag_setup(void)
+{
+	fdiag_debugfs_init();
+
+	return 0;
+}
diff --git a/drivers/usb/gadget/f_diag.h b/drivers/usb/gadget/f_diag.h
new file mode 100644
index 0000000..82d9a25
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.h
@@ -0,0 +1,24 @@
+/* drivers/usb/gadget/f_diag.h
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __F_DIAG_H
+#define __F_DIAG_H
+
+int diag_function_add(struct usb_configuration *c, const char *);
+
+#endif /* __F_DIAG_H */
+
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5440c6d..ccd9c2d 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -312,7 +312,10 @@
 
 #include "storage_common.c"
 
-
+#ifdef CONFIG_USB_CSW_HACK
+static int write_error_after_csw_sent;
+static int csw_hack_sent;
+#endif
 /*-------------------------------------------------------------------------*/
 
 struct fsg_dev;
@@ -469,6 +472,7 @@
 }
 
 typedef void (*fsg_routine_t)(struct fsg_dev *);
+static int send_status(struct fsg_common *common);
 
 static int exception_in_progress(struct fsg_common *common)
 {
@@ -625,7 +629,7 @@
 		if (ctrl->bRequestType !=
 		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
 			break;
-		if (w_index != fsg->interface_number || w_value != 0)
+		if (w_value != 0)
 			return -EDOM;
 
 		/*
@@ -640,7 +644,7 @@
 		if (ctrl->bRequestType !=
 		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
 			break;
-		if (w_index != fsg->interface_number || w_value != 0)
+		if (w_value != 0)
 			return -EDOM;
 		VDBG(fsg, "get max LUN\n");
 		*(u8 *)req->buf = fsg->common->nluns - 1;
@@ -881,6 +885,9 @@
 	ssize_t			nwritten;
 	int			rc;
 
+#ifdef CONFIG_USB_CSW_HACK
+	int			i;
+#endif
 	if (curlun->ro) {
 		curlun->sense_data = SS_WRITE_PROTECTED;
 		return -EINVAL;
@@ -994,7 +1001,17 @@
 		bh = common->next_buffhd_to_drain;
 		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
 			break;			/* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+		/*
+		 * If the csw packet is already submmitted to the hardware,
+		 * by marking the state of buffer as full, then by checking
+		 * the residue, we make sure that this csw packet is not
+		 * written on to the storage media.
+		 */
+		if (bh->state == BUF_STATE_FULL && common->residue) {
+#else
 		if (bh->state == BUF_STATE_FULL) {
+#endif
 			smp_rmb();
 			common->next_buffhd_to_drain = bh->next;
 			bh->state = BUF_STATE_EMPTY;
@@ -1045,9 +1062,36 @@
 				curlun->sense_data = SS_WRITE_ERROR;
 				curlun->sense_data_info = file_offset >> 9;
 				curlun->info_valid = 1;
+#ifdef CONFIG_USB_CSW_HACK
+				write_error_after_csw_sent = 1;
+				goto write_error;
+#endif
 				break;
 			}
 
+#ifdef CONFIG_USB_CSW_HACK
+write_error:
+			if ((nwritten == amount) && !csw_hack_sent) {
+				if (write_error_after_csw_sent)
+					break;
+				/*
+				 * Check if any of the buffer is in the
+				 * busy state, if any buffer is in busy state,
+				 * means the complete data is not received
+				 * yet from the host. So there is no point in
+				 * csw right away without the complete data.
+				 */
+				for (i = 0; i < FSG_NUM_BUFFERS; i++) {
+					if (common->buffhds[i].state ==
+							BUF_STATE_BUSY)
+						break;
+				}
+				if (!amount_left_to_req && i == FSG_NUM_BUFFERS) {
+					csw_hack_sent = 1;
+					send_status(common);
+				}
+			}
+#endif
 			/* Did the host decide to stop early? */
 			if (bh->outreq->actual != bh->outreq->length) {
 				common->short_packet_received = 1;
@@ -1508,8 +1552,7 @@
 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
 		return -EINVAL;
 	}
-
-	if (curlun->prevent_medium_removal && !prevent)
+	if (!curlun->nofua && curlun->prevent_medium_removal && !prevent)
 		fsg_lun_fsync_sub(curlun);
 	curlun->prevent_medium_removal = prevent;
 	return 0;
@@ -1790,6 +1833,19 @@
 	csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
 	csw->Tag = common->tag;
 	csw->Residue = cpu_to_le32(common->residue);
+#ifdef CONFIG_USB_CSW_HACK
+	/* Since csw is being sent early, before
+	 * writing on to storage media, need to set
+	 * residue to zero,assuming that write will succeed.
+	 */
+	if (write_error_after_csw_sent) {
+		write_error_after_csw_sent = 0;
+		csw->Residue = cpu_to_le32(common->residue);
+	} else
+		csw->Residue = 0;
+#else
+	csw->Residue = cpu_to_le32(common->residue);
+#endif
 	csw->Status = status;
 
 	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
@@ -2349,7 +2405,6 @@
 /* Reset interface setting and re-init endpoint state (toggle etc). */
 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
 {
-	const struct usb_endpoint_descriptor *d;
 	struct fsg_dev *fsg;
 	int i, rc = 0;
 
@@ -2374,15 +2429,6 @@
 			}
 		}
 
-		/* Disable the endpoints */
-		if (fsg->bulk_in_enabled) {
-			usb_ep_disable(fsg->bulk_in);
-			fsg->bulk_in_enabled = 0;
-		}
-		if (fsg->bulk_out_enabled) {
-			usb_ep_disable(fsg->bulk_out);
-			fsg->bulk_out_enabled = 0;
-		}
 
 		common->fsg = NULL;
 		wake_up(&common->fsg_wait);
@@ -2395,22 +2441,6 @@
 	common->fsg = new_fsg;
 	fsg = common->fsg;
 
-	/* Enable the endpoints */
-	d = fsg_ep_desc(common->gadget,
-			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
-	rc = enable_endpoint(common, fsg->bulk_in, d);
-	if (rc)
-		goto reset;
-	fsg->bulk_in_enabled = 1;
-
-	d = fsg_ep_desc(common->gadget,
-			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
-	rc = enable_endpoint(common, fsg->bulk_out, d);
-	if (rc)
-		goto reset;
-	fsg->bulk_out_enabled = 1;
-	common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
-	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 
 	/* Allocate the requests */
 	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
@@ -2440,6 +2470,29 @@
 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
+	struct fsg_common *common = fsg->common;
+	const struct usb_endpoint_descriptor *d;
+	int rc;
+
+	/* Enable the endpoints */
+	d = fsg_ep_desc(common->gadget,
+			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+	rc = enable_endpoint(common, fsg->bulk_in, d);
+	if (rc)
+		return rc;
+	fsg->bulk_in_enabled = 1;
+
+	d = fsg_ep_desc(common->gadget,
+			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+	rc = enable_endpoint(common, fsg->bulk_out, d);
+	if (rc) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+		return rc;
+	}
+	fsg->bulk_out_enabled = 1;
+	common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 	fsg->common->new_fsg = fsg;
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 	return USB_GADGET_DELAYED_STATUS;
@@ -2448,6 +2501,18 @@
 static void fsg_disable(struct usb_function *f)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
+
+	/* Disable the endpoints */
+	if (fsg->bulk_in_enabled) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+		fsg->bulk_in->driver_data = NULL;
+	}
+	if (fsg->bulk_out_enabled) {
+		usb_ep_disable(fsg->bulk_out);
+		fsg->bulk_out_enabled = 0;
+		fsg->bulk_out->driver_data = NULL;
+	}
 	fsg->common->new_fsg = NULL;
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 }
@@ -2559,6 +2624,7 @@
 		 */
 		if (!fsg_is_set(common))
 			break;
+		common->ep0req->length = 0;
 		if (test_and_clear_bit(IGNORE_BULK_OUT,
 				       &common->fsg->atomic_bitflags))
 			usb_ep_clear_halt(common->fsg->bulk_in);
@@ -2654,6 +2720,16 @@
 			common->state = FSG_STATE_STATUS_PHASE;
 		spin_unlock_irq(&common->lock);
 
+#ifdef CONFIG_USB_CSW_HACK
+		/* Since status is already sent for write scsi command,
+		 * need to skip sending status once again if it is a
+		 * write scsi command.
+		 */
+		if (csw_hack_sent) {
+			csw_hack_sent = 0;
+			continue;
+		}
+#endif
 		if (send_status(common))
 			continue;
 
@@ -2779,6 +2855,7 @@
 		curlun->ro = lcfg->cdrom || lcfg->ro;
 		curlun->initially_ro = curlun->ro;
 		curlun->removable = lcfg->removable;
+		curlun->nofua = lcfg->nofua;
 		curlun->dev.release = fsg_lun_release;
 		curlun->dev.parent = &gadget->dev;
 		/* curlun->dev.driver = &fsg_driver.driver; XXX */
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
new file mode 100644
index 0000000..770a225
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <linux/spinlock.h>
+
+#include <linux/platform_data/usb_rmnet.h>
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+struct rmnet_descs {
+	struct usb_endpoint_descriptor	*in;
+	struct usb_endpoint_descriptor	*out;
+	struct usb_endpoint_descriptor	*notify;
+};
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_rmnet {
+	struct grmnet			port;
+	int				ifc_id;
+	u8				port_num;
+	atomic_t			online;
+	struct usb_composite_dev	*cdev;
+
+	spinlock_t			lock;
+
+	/* usb descriptors */
+	struct rmnet_descs		fs;
+	struct rmnet_descs		hs;
+
+	/* usb eps*/
+	struct usb_ep			*notify;
+	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
+
+	/* control info */
+	struct list_head		cpkt_resp_q;
+	atomic_t			notify_count;
+	unsigned long			cpkts_len;
+};
+
+#define NR_PORTS	1
+static unsigned int nr_ports;
+static struct rmnet_ports {
+	unsigned			port_num;
+	struct f_rmnet			*port;
+#ifdef CONFIG_USB_ANDROID
+	struct android_usb_function	android_f;
+#endif
+} ports[NR_PORTS];
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+	return container_of(f, struct f_rmnet, port.func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+	return container_of(r, struct f_rmnet, port);
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->length = len;
+
+	return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_setup(int no_ports)
+{
+	int ret;
+
+	pr_debug("%s: no_ports:%d\n", __func__, no_ports);
+
+	ret = gbam_setup(no_ports);
+	if (ret)
+		return ret;
+
+	ret = gsmd_ctrl_setup(no_ports);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int gport_connect(struct f_rmnet *dev)
+{
+	int ret;
+
+	pr_debug("%s:dev:%p portno:%d\n",
+			__func__, dev, dev->port_num);
+
+	ret = gsmd_ctrl_connect(&dev->port, dev->port_num);
+	if (ret) {
+		pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = gbam_connect(&dev->port, dev->port_num);
+	if (ret) {
+		pr_err("%s: gbam_connect failed: err:%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_rmnet *dev)
+{
+	pr_debug("%s:dev:%p portno:%d\n",
+			__func__, dev, dev->port_num);
+
+	gbam_disconnect(&dev->port, dev->port_num);
+
+	gsmd_ctrl_disconnect(&dev->port, dev->port_num);
+
+	return 0;
+}
+
+static int frmnet_remove(struct platform_device *dev)
+{
+	/* TBD:
+	 *  1. Unregister android function
+	 *  2. Free name from ports
+	 *  3. Free rmnet device
+	 *  4. Free Copy Descriptors
+	 */
+	return 0;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet *dev = func_to_rmnet(f);
+
+	pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	frmnet_free_req(dev->notify, dev->notify_req);
+
+	kfree(dev);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+	struct f_rmnet *dev = func_to_rmnet(f);
+
+	pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+	usb_ep_disable(dev->notify);
+
+	atomic_set(&dev->online, 0);
+
+	gport_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+	int				ret;
+
+	pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (dev->notify->driver_data) {
+		pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+		usb_ep_disable(dev->notify);
+	}
+	dev->notify_desc = ep_choose(cdev->gadget,
+				dev->hs.notify,
+				dev->fs.notify);
+	ret = usb_ep_enable(dev->notify, dev->notify_desc);
+	if (ret) {
+		pr_err("%s: usb ep#%s enable failed, err#%d\n",
+				__func__, dev->notify->name, ret);
+		return ret;
+	}
+	dev->notify->driver_data = dev;
+
+	if (dev->port.in->driver_data) {
+		pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+		gport_disconnect(dev);
+	}
+
+	dev->port.in_desc = ep_choose(cdev->gadget,
+			dev->hs.in, dev->fs.in);
+	dev->port.out_desc = ep_choose(cdev->gadget,
+			dev->hs.out, dev->fs.out);
+
+	ret = gport_connect(dev);
+
+	atomic_set(&dev->online, 1);
+
+	return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event;
+	unsigned long			flags;
+	int				ret;
+
+	pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (!atomic_read(&dev->online) || !req || !req->buf) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	if (atomic_inc_return(&dev->notify_count) != 1) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	event = req->buf;
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+	if (ret) {
+		atomic_dec(&dev->notify_count);
+		pr_debug("ep enqueue error %d\n", ret);
+	}
+}
+
+static int
+frmnet_send_cpkt_response(struct grmnet *gr, struct rmnet_ctrl_pkt *cpkt)
+{
+	struct f_rmnet		*dev;
+	unsigned long		flags;
+
+	if (!gr || !cpkt) {
+		pr_err("%s: Invalid grmnet/cpkt, grmnet:%p cpkt:%p\n",
+				__func__, gr, cpkt);
+		return -ENODEV;
+	}
+
+	dev = port_to_rmnet(gr);
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (!atomic_read(&dev->online)) {
+		rmnet_free_ctrl_pkt(cpkt);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add(&cpkt->list, &dev->cpkt_resp_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	frmnet_ctrl_response_available(dev);
+
+	return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet			*dev = req->context;
+	struct usb_composite_dev	*cdev;
+	struct rmnet_ctrl_pkt		*cpkt;
+
+	if (!dev) {
+		pr_err("%s: rmnet dev is null\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	cdev = dev->cdev;
+
+	cpkt = rmnet_alloc_ctrl_pkt(req->actual, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+		return;
+	}
+
+	memcpy(cpkt->buf, req->buf, req->actual);
+
+	if (dev->port.send_cpkt_request)
+		dev->port.send_cpkt_request(&dev->port, dev->port_num, cpkt);
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet *dev = req->context;
+	int status = req->status;
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		pr_err("rmnet notify ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			pr_debug("ep enqueue error %d\n", status);
+		}
+		break;
+	}
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+	struct usb_request		*req = cdev->req;
+	u16				w_index = le16_to_cpu(ctrl->wIndex);
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+	int				ret = -EOPNOTSUPP;
+
+	pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: usb cable is not connected\n", __func__);
+		return -ENOTCONN;
+	}
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = frmnet_cmd_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+			struct rmnet_ctrl_pkt *cpkt;
+
+			spin_lock(&dev->lock);
+			if (list_empty(&dev->cpkt_resp_q)) {
+				pr_err("ctrl resp queue empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+
+			cpkt = list_first_entry(&dev->cpkt_resp_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, cpkt->len);
+			memcpy(req->buf, cpkt->buf, len);
+			ret = len;
+
+			rmnet_free_ctrl_pkt(cpkt);
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		if (dev->port.send_cbits_tomodem)
+			dev->port.send_cbits_tomodem(&dev->port,
+							dev->port_num,
+							w_value);
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_ep			*ep;
+	struct usb_composite_dev	*cdev = c->cdev;
+	int				ret = -ENODEV;
+
+	dev->ifc_id = usb_interface_id(c, f);
+	if (dev->ifc_id < 0) {
+		pr_err("%s: unable to allocate ifc id, err:%d",
+				__func__, dev->ifc_id);
+		return dev->ifc_id;
+	}
+	rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep) {
+		pr_err("%s: usb epin autoconfig failed\n", __func__);
+		return -ENODEV;
+	}
+	dev->port.in = ep;
+	ep->driver_data = cdev;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep) {
+		pr_err("%s: usb epout autoconfig failed\n", __func__);
+		ret = -ENODEV;
+		goto ep_auto_out_fail;
+	}
+	dev->port.out = ep;
+	ep->driver_data = cdev;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep) {
+		pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+		ret = -ENODEV;
+		goto ep_auto_notify_fail;
+	}
+	dev->notify = ep;
+	ep->driver_data = cdev;
+
+	dev->notify_req = frmnet_alloc_req(ep,
+				sizeof(struct usb_cdc_notification) + 2,
+				GFP_KERNEL);
+	if (IS_ERR(dev->notify_req)) {
+		pr_err("%s: unable to allocate memory for notify req\n",
+				__func__);
+		ret = -ENOMEM;
+		goto ep_notify_alloc_fail;
+	}
+
+	dev->notify_req->complete = frmnet_notify_complete;
+	dev->notify_req->context = dev;
+
+	f->descriptors = usb_copy_descriptors(rmnet_fs_function);
+
+	dev->fs.in = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_in_desc);
+	dev->fs.out = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_out_desc);
+	dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_notify_desc);
+
+	if (gadget_is_dualspeed(cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+				rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+				rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+				rmnet_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
+
+		dev->hs.in = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_in_desc);
+		dev->hs.out = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_out_desc);
+		dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_notify_desc);
+	}
+
+	pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
+			__func__, dev->port_num,
+			gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+			dev->port.in->name, dev->port.out->name);
+
+	return 0;
+
+ep_notify_alloc_fail:
+	dev->notify->driver_data = NULL;
+	dev->notify = NULL;
+ep_auto_notify_fail:
+	dev->port.out->driver_data = NULL;
+	dev->port.out = NULL;
+ep_auto_out_fail:
+	dev->port.in->driver_data = NULL;
+	dev->port.in = NULL;
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID
+static int frmnet_bind_config(struct usb_configuration *c)
+{
+	static unsigned		portno;
+	int			status;
+	struct f_rmnet		*dev;
+	struct usb_function	*f;
+	unsigned long		flags;
+
+	pr_debug("%s: usb config:%p\n", __func__, c);
+
+	if (portno >= nr_ports) {
+		pr_err("%s: supporting ports#%u port_id:%u", __func__,
+				nr_ports, portno);
+		return -ENODEV;
+	}
+
+	if (rmnet_string_defs[0].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0) {
+			pr_err("%s: failed to get string id, err:%d\n",
+					__func__, status);
+			return status;
+		}
+		rmnet_string_defs[0].id = status;
+	}
+
+	dev = ports[portno].port;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->cdev = c->cdev;
+	f = &dev->port.func;
+	f->name = ports[portno].android_f.name;
+	portno++;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	f->strings = rmnet_strings;
+	f->bind = frmnet_bind;
+	f->unbind = frmnet_unbind;
+	f->disable = frmnet_disable;
+	f->set_alt = frmnet_set_alt;
+	f->setup = frmnet_setup;
+	dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+
+	status = usb_add_function(c, f);
+	if (status) {
+		pr_err("%s: usb add function failed: %d\n",
+				__func__, status);
+		kfree(ports[portno].android_f.name);
+		kfree(dev);
+		return status;
+	}
+
+	pr_debug("%s: complete\n", __func__);
+
+	return status;
+}
+
+static struct platform_driver usb_rmnet = {
+	.remove = frmnet_remove,
+	.driver = {
+		.name = "usb_rmnet",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __devinit frmnet_probe(struct platform_device *pdev)
+{
+	struct usb_rmnet_pdata *pdata = pdev->dev.platform_data;
+	int i;
+	struct f_rmnet *dev;
+	int ret;
+	int instances;
+
+	instances = 1;
+	if (pdata)
+		instances = pdata->num_instances;
+
+	pr_debug("%s: instances :%d\n", __func__, instances);
+
+	for (i = 0; i < instances; i++) {
+		dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+		if (!dev) {
+			pr_err("%s: Unable to allocate rmnet device\n",
+					__func__);
+			ret = -ENOMEM;
+			goto fail_probe;
+		}
+
+		dev->port_num = i;
+		spin_lock_init(&dev->lock);
+		INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+		ports[i].port = dev;
+		ports[i].port_num = i;
+		ports[i].android_f.name = kasprintf(GFP_KERNEL, "rmnet%d", i);
+		ports[i].android_f.bind_config = frmnet_bind_config;
+
+		pr_debug("%s: anroid f_name:%s\n", __func__,
+				ports[i].android_f.name);
+
+		nr_ports++;
+
+		android_register_function(&ports[i].android_f);
+	}
+
+	gport_setup(nr_ports);
+
+	return 0;
+
+fail_probe:
+	for (i = 0; i < nr_ports; i++) {
+		/* android_unregister_function(&ports[i].android_f); */
+		kfree(ports[i].android_f.name);
+		kfree(ports[i].port);
+	}
+
+	return ret;
+}
+
+static int __init frmnet_init(void)
+{
+	return platform_driver_probe(&usb_rmnet, frmnet_probe);
+}
+module_init(frmnet_init);
+
+static void __exit frmnet_exit(void)
+{
+	platform_driver_unregister(&usb_rmnet);
+}
+module_exit(frmnet_exit);
+
+MODULE_DESCRIPTION("rmnet function driver");
+MODULE_LICENSE("GPL v2");
+#endif
diff --git a/drivers/usb/gadget/f_rmnet.h b/drivers/usb/gadget/f_rmnet.h
new file mode 100644
index 0000000..2d816c6
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __F_RMNET_H
+#define __F_RMNET_H
+
+int rmnet_function_add(struct usb_configuration *c);
+
+#endif /* __F_RMNET_H */
diff --git a/drivers/usb/gadget/f_rmnet_sdio.c b/drivers/usb/gadget/f_rmnet_sdio.c
new file mode 100644
index 0000000..aa8fd3a
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_sdio.c
@@ -0,0 +1,1314 @@
+/*
+ * f_rmnet_sdio.c -- RmNet SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define SDIO_MUX_HDR           8
+#define RMNET_SDIO_NOTIFY_INTERVAL  5
+#define RMNET_SDIO_MAX_NFY_SZE  sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX             16
+#define RMNET_SDIO_RX_REQ_SIZE            2048
+#define RMNET_SDIO_TX_REQ_MAX             200
+
+#define TX_PKT_DROP_THRESHOLD			1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD		1000
+#define RX_PKT_FLOW_CTRL_DISABLE		500
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+/* QMI requests & responses buffer*/
+struct rmnet_sdio_qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep           *epout;
+	struct usb_ep           *epin;
+	struct usb_ep           *epnotify;
+	struct usb_request      *notify_req;
+
+	u8                      ifc_id;
+	/* QMI lists */
+	struct list_head        qmi_req_q;
+	struct list_head        qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head        tx_idle;
+	struct sk_buff_head	tx_skb_queue;
+	struct list_head        rx_idle;
+	struct sk_buff_head	rx_skb_queue;
+
+	spinlock_t              lock;
+	atomic_t                online;
+	atomic_t                notify_count;
+
+	struct workqueue_struct *wq;
+	struct work_struct disconnect_work;
+
+	struct work_struct ctl_rx_work;
+	struct work_struct data_rx_work;
+
+	struct delayed_work sdio_open_work;
+	atomic_t sdio_open;
+
+	unsigned int dpkts_pending_atdmux;
+	int cbits_to_modem;
+	struct work_struct set_modem_ctl_bits_work;
+
+	/* pkt logging dpkt - data pkt; cpkt - control pkt*/
+	unsigned long dpkt_tolaptop;
+	unsigned long dpkt_tomodem;
+	unsigned long tx_drp_cnt;
+	unsigned long cpkt_tolaptop;
+	unsigned long cpkt_tomodem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =        3,
+	.bInterfaceClass =      USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =   USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =   USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "QMI RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =             0x0409, /* en-us */
+	.strings =              rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static struct rmnet_sdio_qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+
+{
+	struct rmnet_sdio_qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct rmnet_sdio_qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (len && req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notifyep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request              *req = dev->notify_req;
+	struct usb_cdc_notification     *event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+#define MAX_CTRL_PKT_SIZE	4096
+static void rmnet_ctl_receive_cb(void *data, int size, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_resp;
+	unsigned long flags;
+
+	if (!data || !size)
+		return;
+
+	if (size > MAX_CTRL_PKT_SIZE) {
+		ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n",
+				size, MAX_CTRL_PKT_SIZE);
+		return;
+	}
+
+	if (!atomic_read(&dev->online)) {
+		DBG(cdev, "USB disconnected\n");
+		return;
+	}
+
+	qmi_resp = rmnet_alloc_qmi(size, GFP_KERNEL);
+	if (IS_ERR(qmi_resp)) {
+		DBG(cdev, "unable to allocate memory for QMI resp\n");
+		return;
+	}
+	memcpy(qmi_resp->buf, data, size);
+	qmi_resp->len = size;
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	qmi_response_available(dev);
+}
+
+static void rmnet_ctl_write_done(void *data, int size, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	VDBG(cdev, "rmnet control write done = %d bytes\n", size);
+}
+
+static void rmnet_sts_callback(int id, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	DBG(cdev, "rmnet_sts_callback: id: %d\n", id);
+}
+
+static void rmnet_control_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, ctl_rx_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_req;
+	unsigned long flags;
+	int ret;
+
+	while (1) {
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_req_q))
+			goto unlock;
+
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+					struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi_req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf,
+					qmi_req->len);
+		if (ret != qmi_req->len) {
+			ERROR(cdev, "rmnet control SDIO write failed\n");
+			return;
+		}
+
+		dev->cpkt_tomodem++;
+
+		/*
+		 * cmux_write API copies the buffer and gives it to sdio_al.
+		 * Hence freeing the memory before write is completed.
+		 */
+		rmnet_free_qmi(qmi_req);
+	}
+unlock:
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	switch (req->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case 0:
+		return;
+	default:
+		INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+			ep->name, req->status,
+			req->actual, req->length);
+	}
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_req;
+	int len = req->actual;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	/* discard the packet if sdio is not available */
+	if (!atomic_read(&dev->sdio_open))
+		return;
+
+	qmi_req = rmnet_alloc_qmi(len, GFP_ATOMIC);
+	if (IS_ERR(qmi_req)) {
+		ERROR(cdev, "unable to allocate memory for QMI req\n");
+		return;
+	}
+	memcpy(qmi_req->buf, req->buf, len);
+	qmi_req->len = len;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+	queue_work(dev->wq, &dev->ctl_rx_work);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request      *req = cdev->req;
+	int                     ret = -EOPNOTSUPP;
+	u16                     w_index = le16_to_cpu(ctrl->wIndex);
+	u16                     w_value = le16_to_cpu(ctrl->wValue);
+	u16                     w_length = le16_to_cpu(ctrl->wLength);
+	struct rmnet_sdio_qmi_buf *resp;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_q)) {
+				INFO(cdev, "qmi resp empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+
+			resp = list_first_entry(&dev->qmi_resp_q,
+				struct rmnet_sdio_qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, resp->len);
+			memcpy(req->buf, resp->buf, len);
+			ret = len;
+			req->context = dev;
+			req->complete = rmnet_response_complete;
+			rmnet_free_qmi(resp);
+
+			/* check if its the right place to add */
+			dev->cpkt_tolaptop++;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			dev->cbits_to_modem |= TIOCM_DTR;
+		else
+			dev->cbits_to_modem &= ~TIOCM_DTR;
+		queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+	DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest,
+		w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int
+rmnet_rx_submit(struct rmnet_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+	if (skb == NULL)
+		return -ENOMEM;
+	skb_reserve(skb, SDIO_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = RMNET_SDIO_RX_REQ_SIZE;
+	req->context = skb;
+
+	retval = usb_ep_queue(dev->epout, req, gfp_flags);
+	if (retval)
+		dev_kfree_skb_any(skb);
+
+	return retval;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_err("%s: USB not connected\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, &dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void usb_rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+	unsigned long			flags;
+	int				status;
+	struct sk_buff			*skb;
+	struct usb_request		*req;
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(&dev->tx_idle)) {
+		skb = __skb_dequeue(&dev->tx_skb_queue);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			return;
+		}
+
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+		spin_unlock(&dev->lock);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		spin_lock(&dev->lock);
+		if (status) {
+			/* USB still online, queue requests back */
+			if (atomic_read(&dev->online)) {
+				ERROR(cdev, "rmnet tx data enqueue err %d\n",
+						status);
+				list_add_tail(&req->list, &dev->tx_idle);
+				__skb_queue_head(&dev->tx_skb_queue, skb);
+			} else {
+				req->buf = 0;
+				rmnet_free_req(dev->epin, req);
+				dev_kfree_skb_any(skb);
+			}
+			break;
+		}
+		dev->dpkt_tolaptop++;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	unsigned long flags;
+
+	/* SDIO mux sends NULL SKB when link state changes */
+	if (!skb)
+		return;
+
+	if (!atomic_read(&dev->online)) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	if (dev->tx_skb_queue.qlen > tx_pkt_drop_thld) {
+		if (printk_ratelimit())
+			pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+					__func__, dev->tx_drp_cnt);
+		dev->tx_drp_cnt++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	__skb_queue_tail(&dev->tx_skb_queue, skb);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_data_write_done(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+
+	/* SDIO mux sends NULL SKB when link state changes */
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+	/* this function is called from
+	 * sdio mux from spin_lock_irqsave
+	 */
+	spin_lock(&dev->lock);
+	dev->dpkts_pending_atdmux--;
+
+	if (dev->dpkts_pending_atdmux >= rx_fctrl_dis_thld) {
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	rmnet_start_rx(dev);
+}
+
+static void rmnet_data_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, data_rx_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while ((skb = __skb_dequeue(&dev->rx_skb_queue))) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret < 0) {
+			ERROR(cdev, "rmnet SDIO data write failed\n");
+			dev_kfree_skb_any(skb);
+		} else {
+			dev->dpkt_tomodem++;
+			dev->dpkts_pending_atdmux++;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb = req->context;
+	int status = req->status;
+	int queue = 0;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	if (queue) {
+		__skb_queue_tail(&dev->rx_skb_queue, skb);
+		queue_work(dev->wq, &dev->data_rx_work);
+	}
+
+	if (dev->dpkts_pending_atdmux >= rx_fctrl_en_thld) {
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct sk_buff  *skb = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	list_add_tail(&req->list, &dev->tx_idle);
+	spin_unlock(&dev->lock);
+	dev_kfree_skb_any(skb);
+
+	usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->dpkt_tolaptop = 0;
+	dev->dpkt_tomodem = 0;
+	dev->cpkt_tolaptop = 0;
+	dev->cpkt_tomodem = 0;
+	dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_set_modem_ctl_bits_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev;
+
+	dev = container_of(w, struct rmnet_dev, set_modem_ctl_bits_work);
+
+	if (!atomic_read(&dev->sdio_open))
+		return;
+
+	pr_debug("%s: cbits_to_modem:%d\n",
+			__func__, dev->cbits_to_modem);
+
+	sdio_cmux_tiocmset(rmnet_sdio_ctl_ch,
+			dev->cbits_to_modem,
+			~dev->cbits_to_modem);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	/* REVISIT: Push all the data to sdio if anythign is pending */
+}
+static void rmnet_suspend(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+	/* This is a workaround for Windows Host bug during suspend.
+	 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+	 * Since it is not beind done, Hence exclusively dropping the DTR
+	 * from function driver suspend.
+	 */
+	dev->cbits_to_modem &= ~TIOCM_DTR;
+	queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	usb_ep_disable(dev->epnotify);
+	usb_ep_disable(dev->epout);
+	usb_ep_disable(dev->epin);
+
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->notify_count, 0);
+	rmnet_free_buf(dev);
+
+	/* cleanup work */
+	queue_work(dev->wq, &dev->disconnect_work);
+	dev->cbits_to_modem = 0;
+	queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY	msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY	90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev =
+			container_of(w, struct rmnet_dev, sdio_open_work.work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret;
+	static int retry_cnt;
+	static bool ctl_ch_opened, data_ch_opened;
+
+	if (!ctl_ch_opened) {
+		/* Control channel for QMI messages */
+		ret = sdio_cmux_open(rmnet_sdio_ctl_ch, rmnet_ctl_receive_cb,
+				rmnet_ctl_write_done, rmnet_sts_callback, dev);
+		if (!ret)
+			ctl_ch_opened = true;
+	}
+	if (!data_ch_opened) {
+		/* Data channel for network packets */
+		ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+				rmnet_data_receive_cb,
+				rmnet_data_write_done);
+		if (!ret)
+			data_ch_opened = true;
+	}
+
+	if (ctl_ch_opened && data_ch_opened) {
+		atomic_set(&dev->sdio_open, 1);
+
+		/* if usb cable is connected, update DTR status to modem */
+		if (atomic_read(&dev->online))
+			queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+		pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+				__func__, retry_cnt);
+		return;
+	}
+
+	retry_cnt++;
+	pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n",
+			__func__, retry_cnt);
+
+	if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+		if (!ctl_ch_opened)
+			ERROR(cdev, "Unable to open control SDIO channel\n");
+		else
+			sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+		if (!data_ch_opened)
+			ERROR(cdev, "Unable to open DATA SDIO channel\n");
+		else
+			msm_sdio_dmux_close(rmnet_sdio_data_ch);
+
+	} else {
+		queue_delayed_work(dev->wq, &dev->sdio_open_work,
+				SDIO_OPEN_RETRY_DELAY);
+	}
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+			unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret, i;
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify,
+				RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+	if (IS_ERR(dev->notify_req)) {
+		ret = PTR_ERR(dev->notify_req);
+		goto free_buf;
+	}
+	for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, 0, GFP_ATOMIC);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, 0, GFP_ATOMIC);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+
+	dev->epin->driver_data = dev;
+	usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	dev->epout->driver_data = dev;
+	usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+	usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+
+	atomic_set(&dev->online, 1);
+
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+
+	return 0;
+
+free_buf:
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+	return ret;
+}
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int id;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+			rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+			rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+			rmnet_fs_notify_desc.bEndpointAddress;
+	}
+
+	queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
+
+	return 0;
+
+out:
+	if (dev->epnotify)
+		dev->epnotify->driver_data = NULL;
+	if (dev->epout)
+		dev->epout->driver_data = NULL;
+	if (dev->epin)
+		dev->epin->driver_data = NULL;
+
+	return -ENODEV;
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+       destroy_workqueue(dev->wq);
+
+       rmnet_free_buf(dev);
+       dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+	msm_sdio_dmux_close(rmnet_sdio_data_ch);
+	sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+	atomic_set(&dev->sdio_open, 0);
+
+       kfree(dev);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	char *buf;
+	unsigned long flags;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(buf, PAGE_SIZE,
+			"dpkts_to_modem:  %lu\n"
+			"dpkts_to_laptop: %lu\n"
+			"cpkts_to_modem:  %lu\n"
+			"cpkts_to_laptop: %lu\n"
+			"cbits_to_modem:  %d\n"
+			"tx skb size:     %u\n"
+			"rx_skb_size:     %u\n"
+			"dpkts_pending_at_dmux: %u\n"
+			"tx drp cnt: %lu\n"
+			"cbits_tomodem: %d",
+			dev->dpkt_tomodem, dev->dpkt_tolaptop,
+			dev->cpkt_tomodem, dev->cpkt_tolaptop,
+			dev->cbits_to_modem,
+			dev->tx_skb_queue.qlen, dev->rx_skb_queue.qlen,
+			dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+			dev->cbits_to_modem);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+
+	dev->dpkt_tolaptop = 0;
+	dev->dpkt_tomodem = 0;
+	dev->cpkt_tolaptop = 0;
+	dev->cpkt_tomodem = 0;
+	dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+
+	/* TBD: How do we reset skb qlen
+	 * it might have side effects
+	 */
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, dev, &debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	return;
+}
+#endif
+
+int rmnet_sdio_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+	INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_set_modem_ctl_bits_work);
+
+	INIT_WORK(&dev->ctl_rx_work, rmnet_control_rx_work);
+	INIT_WORK(&dev->data_rx_work, rmnet_data_rx_work);
+
+	INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work);
+
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	skb_queue_head_init(&dev->tx_skb_queue);
+	skb_queue_head_init(&dev->rx_skb_queue);
+
+	dev->function.name = "rmnet_sdio";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+	dev->function.suspend = rmnet_suspend;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+       return 0;
+
+free_wq:
+       destroy_workqueue(dev->wq);
+free_dev:
+       kfree(dev);
+
+       return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SDIO
+static struct android_usb_function rmnet_function = {
+       .name = "rmnet_sdio",
+       .bind_config = rmnet_sdio_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+       android_register_function(&rmnet_function);
+       return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_rmnet_smd.c b/drivers/usb/gadget/f_rmnet_smd.c
new file mode 100644
index 0000000..00925f9
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd.c
@@ -0,0 +1,1333 @@
+/*
+ * f_rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+
+#include "gadget_chips.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX		4
+#define QMI_REQ_SIZE		2048
+#define QMI_RESP_MAX		8
+#define QMI_RESP_SIZE		2048
+
+#define RX_REQ_MAX		8
+#define RX_REQ_SIZE		2048
+#define TX_REQ_MAX		8
+#define TX_REQ_SIZE		2048
+
+#define TXN_MAX 		2048
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+	struct smd_channel 	*ch;
+	struct tasklet_struct	tx_tlet;
+	struct tasklet_struct	rx_tlet;
+#define CH_OPENED	0
+	unsigned long		flags;
+	/* pending rx packet length */
+	atomic_t		rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t	wait;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep		*epout;
+	struct usb_ep		*epin;
+	struct usb_ep		*epnotify;
+	struct usb_request 	*notify_req;
+
+	u8			ifc_id;
+	/* QMI lists */
+	struct list_head	qmi_req_pool;
+	struct list_head	qmi_resp_pool;
+	struct list_head	qmi_req_q;
+	struct list_head	qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head 	tx_idle;
+	struct list_head 	rx_idle;
+	struct list_head	rx_queue;
+
+	spinlock_t		lock;
+	atomic_t		online;
+	atomic_t		notify_count;
+
+	struct rmnet_smd_info	smd_ctl;
+	struct rmnet_smd_info	smd_data;
+
+	struct workqueue_struct *wq;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+
+	unsigned long	dpkts_to_host;
+	unsigned long	dpkts_from_modem;
+	unsigned long	dpkts_from_host;
+	unsigned long	dpkts_to_modem;
+
+	unsigned long	cpkts_to_host;
+	unsigned long	cpkts_from_modem;
+	unsigned long	cpkts_from_host;
+	unsigned long	cpkts_to_modem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "QMI RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+	struct qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notify ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != dev->epnotify)
+			break;
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enqueue error %d\n",
+					status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_smd_info *smd_info = priv;
+	int len = atomic_read(&smd_info->rx_pkt);
+	struct rmnet_dev *dev = (struct rmnet_dev *) smd_info->tx_tlet.data;
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+		if (!atomic_read(&dev->online))
+			break;
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_resp;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(dev->smd_ctl.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_ctl.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_resp_pool)) {
+			ERROR(cdev, "rmnet QMI Tx buffers full\n");
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+				struct qmi_buf, list);
+		list_del(&qmi_resp->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->cpkts_from_modem++;
+		list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_response_available(dev);
+	}
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+
+		if (list_empty(&dev->qmi_req_q)) {
+			atomic_set(&dev->smd_ctl.rx_pkt, 0);
+			break;
+		}
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+				struct qmi_buf, list);
+		if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+			atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+			DBG(cdev, "rmnet control smd channel full\n");
+			break;
+		}
+
+		list_del(&qmi_req->list);
+		dev->cpkts_from_host++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != qmi_req->len) {
+			ERROR(cdev, "rmnet control smd write failed\n");
+			break;
+		}
+		dev->cpkts_to_modem++;
+		list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_req;
+	int ret;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	dev->cpkts_from_host++;
+	/* no pending control rx packet */
+	if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+		if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+			atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet control smd write failed\n");
+		spin_lock(&dev->lock);
+		dev->cpkts_to_modem++;
+		spin_unlock(&dev->lock);
+		return;
+	}
+queue_req:
+	if (list_empty(&dev->qmi_req_pool)) {
+		spin_unlock(&dev->lock);
+		ERROR(cdev, "rmnet QMI pool is empty\n");
+		return;
+	}
+
+	qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+	list_del(&qmi_req->list);
+	spin_unlock(&dev->lock);
+	memcpy(qmi_req->buf, req->buf, req->actual);
+	qmi_req->len = req->actual;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+}
+static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+
+	spin_lock(&dev->lock);
+	dev->cpkts_to_host++;
+	spin_unlock(&dev->lock);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			ret = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+	struct qmi_buf *resp;
+	int schedule = 0;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			spin_lock(&dev->lock);
+			if (list_empty(&dev->qmi_resp_q)) {
+				INFO(cdev, "qmi resp empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+			resp = list_first_entry(&dev->qmi_resp_q,
+					struct qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+			memcpy(req->buf, resp->buf, resp->len);
+			ret = resp->len;
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_pool))
+				schedule = 1;
+			list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+			if (schedule)
+				tasklet_schedule(&dev->smd_ctl.tx_tlet);
+			spin_unlock(&dev->lock);
+			req->complete = rmnet_txcommand_complete;
+			req->context = dev;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
+		else
+			ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
+
+		break;
+	default:
+
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool = &dev->rx_idle;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(pool)) {
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+
+		sz = smd_cur_packet_size(dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			DBG(cdev, "rmnet data Tx buffers full\n");
+			break;
+		}
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		if (status) {
+			ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->dpkts_from_modem++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (list_empty(&dev->rx_queue)) {
+			atomic_set(&dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			DBG(cdev, "rmnet SMD data channel full\n");
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			ERROR(cdev, "rmnet SMD data write failed\n");
+			break;
+		}
+		dev->dpkts_to_modem++;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int ret;
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	dev->dpkts_from_host++;
+	if (!atomic_read(&dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet data smd write failed\n");
+		/* Restart Rx */
+		spin_lock(&dev->lock);
+		dev->dpkts_to_modem++;
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int schedule = 0;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &dev->tx_idle);
+		dev->dpkts_to_host++;
+		if (schedule)
+			tasklet_schedule(&dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+					disconnect_work);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.tx_tlet);
+
+	smd_close(dev->smd_ctl.ch);
+	dev->smd_ctl.flags = 0;
+
+	smd_close(dev->smd_data.ch);
+	dev->smd_data.flags = 0;
+
+	atomic_set(&dev->notify_count, 0);
+
+	list_for_each_safe(act, tmp, &dev->rx_queue) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+}
+
+/* SMD close may sleep
+ * schedule a work to close smd channels
+ */
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	atomic_set(&dev->online, 0);
+
+	usb_ep_fifo_flush(dev->epnotify);
+	usb_ep_disable(dev->epnotify);
+	usb_ep_fifo_flush(dev->epout);
+	usb_ep_disable(dev->epout);
+
+	usb_ep_fifo_flush(dev->epin);
+	usb_ep_disable(dev->epin);
+
+	/* cleanup work */
+	queue_work(dev->wq, &dev->disconnect_work);
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret = 0;
+
+	/* Control channel for QMI messages */
+	ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+			&dev->smd_ctl, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open control smd channel\n");
+		return;
+	}
+	wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+				&dev->smd_ctl.flags));
+
+	/* Data channel for network packets */
+	ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+			&dev->smd_data, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open data smd channel\n");
+		smd_close(dev->smd_ctl.ch);
+		return;
+	}
+	wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+				&dev->smd_data.flags));
+
+	atomic_set(&dev->online, 1);
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+}
+
+/* SMD open may sleep.
+ * Schedule a work to open smd channels and enable
+ * endpoints if smd channels are opened successfully.
+ */
+static int rmnet_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret = 0;
+
+	ret = usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epin->name, ret);
+		return ret;
+	}
+	ret = usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epout->name, ret);
+		usb_ep_disable(dev->epin);
+		return ret;
+	}
+
+	ret = usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epnotify->name, ret);
+		usb_ep_disable(dev->epin);
+		usb_ep_disable(dev->epout);
+		return ret;
+	}
+
+	queue_work(dev->wq, &dev->connect_work);
+	return 0;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	dev->dpkts_to_host = 0;
+	dev->dpkts_from_modem = 0;
+	dev->dpkts_from_host = 0;
+	dev->dpkts_to_modem = 0;
+
+	dev->cpkts_to_host = 0;
+	dev->cpkts_from_modem = 0;
+	dev->cpkts_from_host = 0;
+	dev->cpkts_to_modem = 0;
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int i, id, ret;
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* clain endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+				rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+				rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+				rmnet_fs_notify_desc.bEndpointAddress;
+
+	}
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify, RMNET_MAX_NOTIFY_SIZE,
+							GFP_KERNEL);
+	if (IS_ERR(dev->notify_req))
+		return PTR_ERR(dev->notify_req);
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+	/* Allocate the qmi request and response buffers */
+	for (i = 0; i < QMI_REQ_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	for (i = 0; i < QMI_RESP_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	/* Allocate bulk in/out requests for data transfer */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->length = TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, TX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->context = dev;
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+	return 0;
+
+free_buf:
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+	return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_smd_info smd_ctl_info = dev->smd_ctl;
+	struct rmnet_smd_info smd_data_info = dev->smd_data;
+	char *buf;
+	unsigned long flags;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(buf, 512,
+			"smd_control_ch_opened: %lu\n"
+			"smd_data_ch_opened: %lu\n"
+			"usb online : %d\n"
+			"dpkts_from_modem: %lu\n"
+			"dpkts_to_host: %lu\n"
+			"pending_dpkts_to_host: %lu\n"
+			"dpkts_from_host: %lu\n"
+			"dpkts_to_modem: %lu\n"
+			"pending_dpkts_to_modem: %lu\n"
+			"cpkts_from_modem: %lu\n"
+			"cpkts_to_host: %lu\n"
+			"pending_cpkts_to_host: %lu\n"
+			"cpkts_from_host: %lu\n"
+			"cpkts_to_modem: %lu\n"
+			"pending_cpkts_to_modem: %lu\n"
+			"smd_read_avail_ctrl: %d\n"
+			"smd_write_avail_ctrl: %d\n"
+			"smd_read_avail_data: %d\n"
+			"smd_write_avail_data: %d\n",
+			smd_ctl_info.flags, smd_data_info.flags,
+			atomic_read(&dev->online),
+			dev->dpkts_from_modem, dev->dpkts_to_host,
+			(dev->dpkts_from_modem - dev->dpkts_to_host),
+			dev->dpkts_from_host, dev->dpkts_to_modem,
+			(dev->dpkts_from_host - dev->dpkts_to_modem),
+			dev->cpkts_from_modem, dev->cpkts_to_host,
+			(dev->cpkts_from_modem - dev->cpkts_to_host),
+			dev->cpkts_from_host, dev->cpkts_to_modem,
+			(dev->cpkts_from_host - dev->cpkts_to_modem),
+			smd_read_avail(dev->smd_ctl.ch),
+			smd_write_avail(dev->smd_ctl.ch),
+			smd_read_avail(dev->smd_data.ch),
+			smd_write_avail(dev->smd_data.ch));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->dpkts_to_host = 0;
+	dev->dpkts_from_modem = 0;
+	dev->dpkts_from_host = 0;
+	dev->dpkts_to_modem = 0;
+
+	dev->cpkts_to_host = 0;
+	dev->cpkts_from_modem = 0;
+	dev->cpkts_from_host = 0;
+	dev->cpkts_to_modem = 0;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations rmnet_debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+struct dentry *dent;
+struct dentry *dent_status;
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	dent_status = debugfs_create_file("status", 0444, dent, dev,
+			&rmnet_debug_stats_ops);
+
+	if (!dent_status) {
+		debugfs_remove(dent);
+		dent = NULL;
+		return;
+	}
+
+	return;
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.tx_tlet);
+
+	flush_workqueue(dev->wq);
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+	destroy_workqueue(dev->wq);
+	debugfs_remove_recursive(dent);
+	kfree(dev);
+
+}
+
+int rmnet_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->smd_ctl.rx_pkt, 0);
+	atomic_set(&dev->smd_data.rx_pkt, 0);
+
+	INIT_WORK(&dev->connect_work, rmnet_connect_work);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+	tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&dev->smd_ctl.wait);
+	init_waitqueue_head(&dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&dev->qmi_req_pool);
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_pool);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->rx_queue);
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	dev->function.name = "rmnet";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET
+static struct android_usb_function rmnet_function = {
+	.name = "rmnet",
+	.bind_config = rmnet_function_add,
+};
+
+static int __init init(void)
+{
+	android_register_function(&rmnet_function);
+	return 0;
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET */
diff --git a/drivers/usb/gadget/f_rmnet_smd_sdio.c b/drivers/usb/gadget/f_rmnet_smd_sdio.c
new file mode 100644
index 0000000..e99716b
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd_sdio.c
@@ -0,0 +1,1995 @@
+/*
+ * f_rmnet_smd_sdio.c -- RmNet SMD & SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <asm/ioctls.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+static char *rmnet_smd_data_ch = CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL;
+module_param(rmnet_smd_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_smd_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define SDIO_MUX_HDR           8
+#define RMNET_SDIO_NOTIFY_INTERVAL  5
+#define RMNET_SDIO_MAX_NFY_SZE  sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX             16
+#define RMNET_SDIO_RX_REQ_SIZE            2048
+#define RMNET_SDIO_TX_REQ_MAX             100
+
+#define RMNET_SDIO_TX_PKT_DROP_THRESHOLD		1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD	1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE		500
+
+static uint32_t sdio_tx_pkt_drop_thld = RMNET_SDIO_TX_PKT_DROP_THRESHOLD;
+module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_en_thld =
+		RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_dis_thld = RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE;
+module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+
+#define RMNET_SMD_RX_REQ_MAX		8
+#define RMNET_SMD_RX_REQ_SIZE		2048
+#define RMNET_SMD_TX_REQ_MAX		8
+#define RMNET_SMD_TX_REQ_SIZE		2048
+#define RMNET_SMD_TXN_MAX		2048
+
+struct rmnet_ctrl_pkt {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+enum usb_rmnet_xport_type {
+	USB_RMNET_XPORT_UNDEFINED,
+	USB_RMNET_XPORT_SDIO,
+	USB_RMNET_XPORT_SMD,
+};
+
+struct rmnet_ctrl_dev {
+	struct list_head tx_q;
+	wait_queue_head_t tx_wait_q;
+	unsigned long tx_len;
+
+	struct list_head rx_q;
+	unsigned long rx_len;
+
+	unsigned long cbits_to_modem;
+
+	unsigned	opened;
+};
+
+struct rmnet_sdio_dev {
+	/* Tx/Rx lists */
+	struct list_head tx_idle;
+	struct sk_buff_head    tx_skb_queue;
+	struct list_head rx_idle;
+	struct sk_buff_head    rx_skb_queue;
+
+
+
+	struct work_struct data_rx_work;
+
+	struct delayed_work open_work;
+	atomic_t sdio_open;
+
+	unsigned int dpkts_pending_atdmux;
+};
+
+/* Data SMD channel */
+struct rmnet_smd_info {
+	struct smd_channel *ch;
+	struct tasklet_struct tx_tlet;
+	struct tasklet_struct rx_tlet;
+#define CH_OPENED 0
+	unsigned long flags;
+	/* pending rx packet length */
+	atomic_t rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t wait;
+};
+
+struct rmnet_smd_dev {
+	/* Tx/Rx lists */
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_queue;
+
+	struct rmnet_smd_info smd_data;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep *epout;
+	struct usb_ep *epin;
+	struct usb_ep *epnotify;
+	struct usb_request *notify_req;
+
+	struct rmnet_smd_dev smd_dev;
+	struct rmnet_sdio_dev sdio_dev;
+	struct rmnet_ctrl_dev ctrl_dev;
+
+	u8 ifc_id;
+	enum usb_rmnet_xport_type xport;
+	spinlock_t lock;
+	atomic_t online;
+	atomic_t notify_count;
+	struct workqueue_struct *wq;
+	struct work_struct disconnect_work;
+
+	/* pkt counters */
+	unsigned long dpkts_tomsm;
+	unsigned long dpkts_tomdm;
+	unsigned long dpkts_tolaptop;
+	unsigned long tx_drp_cnt;
+	unsigned long cpkts_tolaptop;
+	unsigned long cpkts_tomdm;
+	unsigned long cpkts_drp_cnt;
+};
+
+static struct rmnet_dev *_dev;
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bNumEndpoints =        3,
+	.bInterfaceClass =      USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =   USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =   USB_CLASS_VENDOR_SPEC,
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =             0x0409, /* en-us */
+	.strings =              rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static char *xport_to_str(enum usb_rmnet_xport_type t)
+{
+	switch (t) {
+	case USB_RMNET_XPORT_SDIO:
+		return "SDIO";
+	case USB_RMNET_XPORT_SMD:
+		return "SMD";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *cpkt;
+
+	cpkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!cpkt)
+		return 0;
+
+	cpkt->buf = kzalloc(len, flags);
+	if (!cpkt->buf) {
+		kfree(cpkt);
+		return 0;
+	}
+
+	cpkt->len = len;
+
+	return cpkt;
+
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *cpkt)
+{
+	kfree(cpkt->buf);
+	kfree(cpkt);
+}
+
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (len && req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static int rmnet_sdio_rx_submit(struct rmnet_dev *dev, struct usb_request *req,
+				gfp_t gfp_flags)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+	if (skb == NULL)
+		return -ENOMEM;
+	skb_reserve(skb, SDIO_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = RMNET_SDIO_RX_REQ_SIZE;
+	req->context = skb;
+
+	retval = usb_ep_queue(dev->epout, req, gfp_flags);
+	if (retval)
+		dev_kfree_skb_any(skb);
+
+	return retval;
+}
+
+static void rmnet_sdio_start_rx(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB not connected\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	pool = &sdio_dev->rx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = rmnet_sdio_rx_submit(dev, req, GFP_KERNEL);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, &sdio_dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+	unsigned long			flags;
+	int				status;
+	struct sk_buff			*skb;
+	struct usb_request		*req;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(&sdio_dev->tx_idle)) {
+		skb = __skb_dequeue(&sdio_dev->tx_skb_queue);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			return;
+		}
+
+		req = list_first_entry(&sdio_dev->tx_idle,
+				struct usb_request, list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+		spin_unlock(&dev->lock);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		spin_lock(&dev->lock);
+		if (status) {
+			/* USB still online, queue requests back */
+			if (atomic_read(&dev->online)) {
+				ERROR(cdev, "rmnet tx data enqueue err %d\n",
+						status);
+				list_add_tail(&req->list, &sdio_dev->tx_idle);
+				__skb_queue_head(&sdio_dev->tx_skb_queue, skb);
+			} else {
+				req->buf = 0;
+				rmnet_free_req(dev->epin, req);
+				dev_kfree_skb_any(skb);
+			}
+			break;
+		}
+		dev->dpkts_tolaptop++;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	unsigned long flags;
+
+	if (!skb)
+		return;
+	if (!atomic_read(&dev->online)) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	spin_lock_irqsave(&dev->lock, flags);
+	if (sdio_dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) {
+		pr_err_ratelimited("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+			__func__, dev->tx_drp_cnt);
+		dev->tx_drp_cnt++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	__skb_queue_tail(&sdio_dev->tx_skb_queue, skb);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+	/* this function is called from
+	 * sdio mux from spin_lock_irqsave
+	 */
+	spin_lock(&dev->lock);
+	sdio_dev->dpkts_pending_atdmux--;
+
+	if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) {
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	rmnet_sdio_start_rx(dev);
+}
+
+static void rmnet_sdio_data_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+			sdio_dev.data_rx_work);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	struct sk_buff *skb;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue))) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret < 0) {
+			ERROR(cdev, "rmnet SDIO data write failed\n");
+			dev_kfree_skb_any(skb);
+		} else {
+			dev->dpkts_tomdm++;
+			sdio_dev->dpkts_pending_atdmux++;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void
+rmnet_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb = req->context;
+	int status = req->status;
+	int queue = 0;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	if (queue) {
+		__skb_queue_tail(&sdio_dev->rx_skb_queue, skb);
+		queue_work(dev->wq, &sdio_dev->data_rx_work);
+	}
+
+	if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) {
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+	}
+}
+
+static void
+rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct sk_buff  *skb = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	list_add_tail(&req->list, &sdio_dev->tx_idle);
+	spin_unlock(&dev->lock);
+	dev_kfree_skb_any(skb);
+
+	rmnet_sdio_start_tx(dev);
+}
+
+static int rmnet_sdio_enable(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	int i;
+	struct usb_request *req;
+
+	/*
+	 * If the memory allocation fails, all the allocated
+	 * requests will be freed upon cable disconnect.
+	 */
+	for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, 0, GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->complete = rmnet_sdio_complete_epout;
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+	}
+	for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, 0, GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->complete = rmnet_sdio_complete_epin;
+		list_add_tail(&req->list, &sdio_dev->tx_idle);
+	}
+
+	rmnet_sdio_start_rx(dev);
+	return 0;
+}
+
+static void rmnet_smd_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool = &smd_dev->rx_idle;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(pool)) {
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_smd_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		if (!atomic_read(&dev->online))
+			break;
+		sz = smd_cur_packet_size(smd_dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(smd_dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&smd_dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			DBG(cdev, "rmnet data Tx buffers full\n");
+			break;
+		}
+		req = list_first_entry(&smd_dev->tx_idle,
+				struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(smd_dev->smd_data.ch, req->buf, sz);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		if (status) {
+			ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &smd_dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		dev->dpkts_tolaptop++;
+	}
+
+}
+
+static void rmnet_smd_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (!atomic_read(&dev->online))
+			break;
+		if (list_empty(&smd_dev->rx_queue)) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&smd_dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+			DBG(cdev, "rmnet SMD data channel full\n");
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			ERROR(cdev, "rmnet SMD data write failed\n");
+			break;
+		}
+		dev->dpkts_tomsm++;
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_smd_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void
+rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int ret;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!atomic_read(&smd_dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet data smd write failed\n");
+		/* Restart Rx */
+		dev->dpkts_tomsm++;
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_smd_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &smd_dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int schedule = 0;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&smd_dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+
+		if (schedule)
+			tasklet_schedule(&smd_dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_smd_info *smd_info = &dev->smd_dev.smd_data;
+	int len = atomic_read(&smd_info->rx_pkt);
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+		if (!atomic_read(&dev->online))
+			break;
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static int rmnet_smd_enable(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	int i, ret;
+	struct usb_request *req;
+
+	if (test_bit(CH_OPENED, &smd_dev->smd_data.flags))
+		goto smd_alloc_req;
+
+	ret = smd_open(rmnet_smd_data_ch, &smd_dev->smd_data.ch,
+			dev, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open data smd channel\n");
+		return ret;
+	}
+
+	wait_event(smd_dev->smd_data.wait, test_bit(CH_OPENED,
+				&smd_dev->smd_data.flags));
+
+	/* Allocate bulk in/out requests for data transfer.
+	 * If the memory allocation fails, all the allocated
+	 * requests will be freed upon cable disconnect.
+	 */
+smd_alloc_req:
+	for (i = 0; i < RMNET_SMD_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RMNET_SMD_RX_REQ_SIZE,
+				GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->length = RMNET_SMD_TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_smd_complete_epout;
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+	}
+
+	for (i = 0; i < RMNET_SMD_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, RMNET_SMD_TX_REQ_SIZE,
+				GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->context = dev;
+		req->complete = rmnet_smd_complete_epin;
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+	}
+
+	rmnet_smd_start_rx(dev);
+	return 0;
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notifyep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+		}
+		break;
+	}
+}
+
+static void ctrl_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request              *req = dev->notify_req;
+	struct usb_cdc_notification     *event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+#define MAX_CTRL_PKT_SIZE	4096
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	switch (req->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case 0:
+		return;
+	default:
+		INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+			ep->name, req->status,
+			req->actual, req->length);
+	}
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev		*dev = req->context;
+	struct usb_composite_dev	*cdev = dev->cdev;
+	struct rmnet_ctrl_dev		*ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt		*cpkt;
+	int				len = req->actual;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+	if (!cpkt) {
+		ERROR(cdev, "unable to allocate memory for ctrl req\n");
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!ctrl_dev->opened) {
+		spin_unlock(&dev->lock);
+		kfree(cpkt);
+		dev->cpkts_drp_cnt++;
+		pr_err_ratelimited(
+			"%s: ctrl pkts dropped: cpkts_drp_cnt: %lu\n",
+			__func__, dev->cpkts_drp_cnt);
+		return;
+	}
+
+	memcpy(cpkt->buf, req->buf, len);
+
+	list_add_tail(&cpkt->list, &ctrl_dev->tx_q);
+	ctrl_dev->tx_len++;
+	spin_unlock(&dev->lock);
+
+	/* wakeup read thread */
+	wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request      *req = cdev->req;
+	int                     ret = -EOPNOTSUPP;
+	u16                     w_index = le16_to_cpu(ctrl->wIndex);
+	u16                     w_value = le16_to_cpu(ctrl->wValue);
+	u16                     w_length = le16_to_cpu(ctrl->wLength);
+	struct rmnet_ctrl_pkt	*cpkt;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+
+			spin_lock(&dev->lock);
+			if (list_empty(&ctrl_dev->rx_q)) {
+				DBG(cdev, "ctrl resp queue empty"
+					" %02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+
+			}
+			cpkt = list_first_entry(&ctrl_dev->rx_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			ctrl_dev->rx_len--;
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, cpkt->len);
+			memcpy(req->buf, cpkt->buf, len);
+			ret = len;
+			req->complete = rmnet_response_complete;
+			req->context = dev;
+			rmnet_free_ctrl_pkt(cpkt);
+
+			dev->cpkts_tolaptop++;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			ctrl_dev->cbits_to_modem |= TIOCM_DTR;
+		else
+			ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+	DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest,
+		w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	struct usb_request *req;
+	struct list_head *pool;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* free all usb requests in SDIO tx pool */
+	pool = &sdio_dev->tx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epout, req);
+	}
+
+	pool = &sdio_dev->rx_idle;
+	/* free all usb requests in SDIO rx pool */
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epin, req);
+	}
+
+	while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	/* free all usb requests in SMD tx pool */
+	pool = &smd_dev->tx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	pool = &smd_dev->rx_idle;
+	/* free all usb requests in SMD rx pool */
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all usb requests in SMD rx queue */
+	pool = &smd_dev->rx_queue;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	pool = &ctrl_dev->tx_q;
+	while (!list_empty(pool)) {
+		cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		rmnet_free_ctrl_pkt(cpkt);
+		ctrl_dev->tx_len--;
+	}
+
+	pool = &ctrl_dev->rx_q;
+	while (!list_empty(pool)) {
+		cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		rmnet_free_ctrl_pkt(cpkt);
+		ctrl_dev->rx_len--;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+			disconnect_work);
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (dev->xport == USB_RMNET_XPORT_SMD) {
+		tasklet_kill(&smd_dev->smd_data.rx_tlet);
+		tasklet_kill(&smd_dev->smd_data.tx_tlet);
+	}
+
+	rmnet_free_buf(dev);
+	dev->xport = 0;
+
+	/* wakeup read thread */
+	wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static void rmnet_suspend(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (!atomic_read(&dev->online))
+		return;
+	/* This is a workaround for Windows Host bug during suspend.
+	 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+	 * Since it is not being done, Hence exclusively dropping the DTR
+	 * from function driver suspend.
+	 */
+	ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+}
+
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	atomic_set(&dev->online, 0);
+
+	usb_ep_fifo_flush(dev->epnotify);
+	usb_ep_disable(dev->epnotify);
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+
+	usb_ep_fifo_flush(dev->epout);
+	usb_ep_disable(dev->epout);
+
+	usb_ep_fifo_flush(dev->epin);
+	usb_ep_disable(dev->epin);
+
+	/* cleanup work */
+	ctrl_dev->cbits_to_modem = 0;
+	queue_work(dev->wq, &dev->disconnect_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY	msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY	90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev =
+		container_of(w, struct rmnet_dev, sdio_dev.open_work.work);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret;
+	static int retry_cnt;
+
+	/* Data channel for network packets */
+	ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+				rmnet_sdio_data_receive_cb,
+				rmnet_sdio_data_write_done);
+	if (ret) {
+		if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+			ERROR(cdev, "Unable to open SDIO DATA channel\n");
+			return;
+		}
+		retry_cnt++;
+		queue_delayed_work(dev->wq, &sdio_dev->open_work,
+					SDIO_OPEN_RETRY_DELAY);
+		return;
+	}
+
+
+	atomic_set(&sdio_dev->sdio_open, 1);
+	pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+				__func__, retry_cnt);
+	retry_cnt = 0;
+	return;
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+			unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify,
+				RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+	if (IS_ERR(dev->notify_req))
+		return PTR_ERR(dev->notify_req);
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+	usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+
+	dev->epin->driver_data = dev;
+	usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	dev->epout->driver_data = dev;
+	usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+
+	dev->dpkts_tolaptop = 0;
+	dev->cpkts_tolaptop = 0;
+	dev->cpkts_tomdm = 0;
+	dev->dpkts_tomdm = 0;
+	dev->dpkts_tomsm = 0;
+	dev->tx_drp_cnt = 0;
+	dev->cpkts_drp_cnt = 0;
+	sdio_dev->dpkts_pending_atdmux = 0;
+	atomic_set(&dev->online, 1);
+
+	return 0;
+}
+
+static ssize_t transport_store(
+		struct device *device, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct usb_function *f = dev_get_drvdata(device);
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int value;
+	enum usb_rmnet_xport_type given_xport;
+	enum usb_rmnet_xport_type t;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct list_head *pool;
+	struct sk_buff_head *skb_pool;
+	struct sk_buff *skb;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_err("%s: usb cable is not connected\n", __func__);
+		return -EINVAL;
+	}
+
+	sscanf(buf, "%d", &value);
+	if (value)
+		given_xport = USB_RMNET_XPORT_SDIO;
+	else
+		given_xport = USB_RMNET_XPORT_SMD;
+
+	if (given_xport == dev->xport) {
+		pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n",
+				__func__, xport_to_str(given_xport),
+				xport_to_str(dev->xport));
+		return 0;
+	}
+
+	pr_debug("usb_rmnet: TransportRequested: %s\n",
+			xport_to_str(given_xport));
+
+	/* prevent any other pkts to/from usb  */
+	t = dev->xport;
+	dev->xport = USB_RMNET_XPORT_UNDEFINED;
+	if (t != USB_RMNET_XPORT_UNDEFINED) {
+		usb_ep_fifo_flush(dev->epin);
+		usb_ep_fifo_flush(dev->epout);
+	}
+
+	switch (t) {
+	case USB_RMNET_XPORT_SDIO:
+		spin_lock_irqsave(&dev->lock, flags);
+		/* tx_idle */
+
+		sdio_dev->dpkts_pending_atdmux = 0;
+
+		pool = &sdio_dev->tx_idle;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			req->buf = NULL;
+			rmnet_free_req(dev->epout, req);
+		}
+
+		/* rx_idle */
+		pool = &sdio_dev->rx_idle;
+		/* free all usb requests in SDIO rx pool */
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			req->buf = NULL;
+			rmnet_free_req(dev->epin, req);
+		}
+
+		/* tx_skb_queue */
+		skb_pool = &sdio_dev->tx_skb_queue;
+		while ((skb = __skb_dequeue(skb_pool)))
+			dev_kfree_skb_any(skb);
+		/* rx_skb_queue */
+		skb_pool = &sdio_dev->rx_skb_queue;
+		while ((skb = __skb_dequeue(skb_pool)))
+			dev_kfree_skb_any(skb);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		break;
+	case USB_RMNET_XPORT_SMD:
+		/* close smd xport */
+		tasklet_kill(&smd_dev->smd_data.rx_tlet);
+		tasklet_kill(&smd_dev->smd_data.tx_tlet);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		/* free all usb requests in SMD tx pool */
+		pool = &smd_dev->tx_idle;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epout, req);
+		}
+
+		pool = &smd_dev->rx_idle;
+		/* free all usb requests in SMD rx pool */
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epin, req);
+		}
+
+		/* free all usb requests in SMD rx queue */
+		pool = &smd_dev->rx_queue;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epin, req);
+		}
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		break;
+	default:
+		pr_debug("%s: undefined xport, do nothing\n", __func__);
+	}
+
+	dev->xport = given_xport;
+
+	switch (dev->xport) {
+	case USB_RMNET_XPORT_SDIO:
+		rmnet_sdio_enable(dev);
+		break;
+	case USB_RMNET_XPORT_SMD:
+		rmnet_smd_enable(dev);
+		break;
+	default:
+		/* we should never come here */
+		pr_err("%s: undefined transport\n", __func__);
+	}
+
+	return size;
+}
+static DEVICE_ATTR(transport, S_IRUGO | S_IWUSR, NULL, transport_store);
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	int id, ret;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+			rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+			rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+			rmnet_fs_notify_desc.bEndpointAddress;
+	}
+
+	ret = device_create_file(f->dev, &dev_attr_transport);
+	if (ret)
+		goto out;
+
+	queue_delayed_work(dev->wq, &sdio_dev->open_work, 0);
+
+	return 0;
+
+out:
+	if (dev->epnotify)
+		dev->epnotify->driver_data = NULL;
+	if (dev->epout)
+		dev->epout->driver_data = NULL;
+	if (dev->epin)
+		dev->epin->driver_data = NULL;
+
+	return -ENODEV;
+}
+
+static void rmnet_smd_init(struct rmnet_smd_dev *smd_dev)
+{
+	struct rmnet_dev *dev = container_of(smd_dev,
+			struct rmnet_dev, smd_dev);
+
+	atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+	tasklet_init(&smd_dev->smd_data.rx_tlet, rmnet_smd_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&smd_dev->smd_data.tx_tlet, rmnet_smd_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&smd_dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&smd_dev->rx_idle);
+	INIT_LIST_HEAD(&smd_dev->rx_queue);
+	INIT_LIST_HEAD(&smd_dev->tx_idle);
+}
+
+static void rmnet_sdio_init(struct rmnet_sdio_dev *sdio_dev)
+{
+	INIT_WORK(&sdio_dev->data_rx_work, rmnet_sdio_data_rx_work);
+
+	INIT_DELAYED_WORK(&sdio_dev->open_work, rmnet_open_sdio_work);
+
+	INIT_LIST_HEAD(&sdio_dev->rx_idle);
+	INIT_LIST_HEAD(&sdio_dev->tx_idle);
+	skb_queue_head_init(&sdio_dev->tx_skb_queue);
+	skb_queue_head_init(&sdio_dev->rx_skb_queue);
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+
+	smd_close(smd_dev->smd_data.ch);
+	smd_dev->smd_data.flags = 0;
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	char *debug_buf;
+	unsigned long flags;
+	int ret;
+
+	debug_buf = kmalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!debug_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(debug_buf, DEBUG_BUF_SIZE,
+			"dpkts_tomsm:  %lu\n"
+			"dpkts_tomdm: %lu\n"
+			"cpkts_tomdm: %lu\n"
+			"dpkts_tolaptop: %lu\n"
+			"cpkts_tolaptop:  %lu\n"
+			"cbits_to_modem: %lu\n"
+			"tx skb size:     %u\n"
+			"rx_skb_size:     %u\n"
+			"dpkts_pending_at_dmux: %u\n"
+			"tx drp cnt: %lu\n"
+			"cpkts_drp_cnt: %lu\n"
+			"cpkt_tx_qlen: %lu\n"
+			"cpkt_rx_qlen_to_modem: %lu\n"
+			"xport: %s\n"
+			"ctr_ch_opened:	%d\n",
+			dev->dpkts_tomsm, dev->dpkts_tomdm,
+			dev->cpkts_tomdm, dev->dpkts_tolaptop,
+			dev->cpkts_tolaptop, ctrl_dev->cbits_to_modem,
+			sdio_dev->tx_skb_queue.qlen,
+			sdio_dev->rx_skb_queue.qlen,
+			sdio_dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+			dev->cpkts_drp_cnt,
+			ctrl_dev->tx_len, ctrl_dev->rx_len,
+			xport_to_str(dev->xport), ctrl_dev->opened);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, debug_buf, ret);
+
+	kfree(debug_buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+	dev->dpkts_tolaptop = 0;
+	dev->cpkts_tolaptop = 0;
+	dev->cpkts_tomdm = 0;
+	dev->dpkts_tomdm = 0;
+	dev->dpkts_tomsm = 0;
+	sdio_dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+	dev->cpkts_drp_cnt = 0;
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations rmnet_svlte_debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, dev,
+			&rmnet_svlte_debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+int usb_rmnet_ctrl_open(struct inode *inode, struct file *fp)
+{
+	struct rmnet_dev *dev =  _dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (ctrl_dev->opened) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		pr_err("%s: device is already opened\n", __func__);
+		return -EBUSY;
+	}
+
+	ctrl_dev->opened = 1;
+	fp->private_data = dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+
+int usb_rmnet_ctrl_release(struct inode *inode, struct file *fp)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ctrl_dev->opened = 0;
+	fp->private_data = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+ssize_t usb_rmnet_ctrl_read(struct file *fp,
+		      char __user *buf,
+		      size_t count,
+		      loff_t *ppos)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+	int ret = 0;
+
+ctrl_read:
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB cable not connected\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(&ctrl_dev->tx_q)) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		/* Implement sleep and wakeup here */
+		ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+					!list_empty(&ctrl_dev->tx_q) ||
+					!atomic_read(&dev->online));
+		if (ret < 0)
+			return ret;
+
+		goto ctrl_read;
+	}
+
+	cpkt = list_first_entry(&ctrl_dev->tx_q, struct rmnet_ctrl_pkt, list);
+	if (cpkt->len > count) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		pr_err("%s: cpkt size:%d > buf size:%d\n",
+				__func__, cpkt->len, count);
+		return -ENOMEM;
+	}
+	list_del(&cpkt->list);
+	ctrl_dev->tx_len--;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	count = cpkt->len;
+
+	ret = copy_to_user(buf, cpkt->buf, count);
+	dev->cpkts_tomdm++;
+
+	rmnet_free_ctrl_pkt(cpkt);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+ssize_t usb_rmnet_ctrl_write(struct file *fp,
+		       const char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB cable not connected\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!count) {
+		pr_err("%s: zero length ctrl pkt\n", __func__);
+		return -ENODEV;
+	}
+
+	if (count > MAX_CTRL_PKT_SIZE) {
+		pr_err("%s: max_pkt_size:%d given_pkt_size:%d\n",
+				__func__, MAX_CTRL_PKT_SIZE, count);
+		return -ENOMEM;
+	}
+
+	cpkt = rmnet_alloc_ctrl_pkt(count, GFP_KERNEL);
+	if (!cpkt) {
+		pr_err("%s: cannot allocate rmnet ctrl pkt\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(cpkt->buf, buf, count);
+	if (ret) {
+		pr_err("%s: copy_from_user failed err:%d\n",
+				__func__, ret);
+		rmnet_free_ctrl_pkt(cpkt);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ctrl_dev->rx_len++;
+	list_add(&cpkt->list, &ctrl_dev->rx_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ctrl_response_available(dev);
+
+	return count;
+}
+
+
+#define RMNET_CTRL_GET_DTR	_IOR(0xFE, 0, int)
+static long
+usb_rmnet_ctrl_ioctl(struct file *fp, unsigned c, unsigned long value)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long *temp = (unsigned long *)value;
+	int ret = 0;
+
+	if (c != RMNET_CTRL_GET_DTR)
+		return -ENODEV;
+
+	ret = copy_to_user(temp,
+			&ctrl_dev->cbits_to_modem,
+			sizeof(*temp));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct file_operations rmnet_ctrl_fops = {
+	.owner		= THIS_MODULE,
+	.open		= usb_rmnet_ctrl_open,
+	.release	= usb_rmnet_ctrl_release,
+	.read		= usb_rmnet_ctrl_read,
+	.write		= usb_rmnet_ctrl_write,
+	.unlocked_ioctl	= usb_rmnet_ctrl_ioctl,
+};
+
+static struct miscdevice rmnet_ctrl_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "rmnet_ctrl",
+	.fops = &rmnet_ctrl_fops,
+};
+
+static int rmnet_ctrl_device_init(struct rmnet_dev *dev)
+{
+	int ret;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	INIT_LIST_HEAD(&ctrl_dev->tx_q);
+	INIT_LIST_HEAD(&ctrl_dev->rx_q);
+	init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+	ret = misc_register(&rmnet_ctrl_dev);
+	if (ret) {
+		pr_err("%s: failed to register misc device\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmnet_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	_dev = dev;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+	rmnet_smd_init(&dev->smd_dev);
+	rmnet_sdio_init(&dev->sdio_dev);
+
+	ret = rmnet_ctrl_device_init(dev);
+	if (ret) {
+		pr_debug("%s: rmnet_ctrl_device_init failed, err:%d\n",
+				__func__, ret);
+		goto free_wq;
+	}
+
+	dev->function.name = "rmnet_smd_sdio";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+	dev->function.suspend = rmnet_suspend;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SMD_SDIO
+static struct android_usb_function rmnet_function = {
+	.name = "rmnet_smd_sdio",
+	.bind_config = rmnet_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+	android_register_function(&rmnet_function);
+	return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 490b00b..0c31544 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -13,6 +13,8 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <mach/usb_gadget_fserial.h>
 
 #include "u_serial.h"
 #include "gadget_chips.h"
@@ -30,6 +32,9 @@
 struct gser_descs {
 	struct usb_endpoint_descriptor	*in;
 	struct usb_endpoint_descriptor	*out;
+#ifdef CONFIG_MODEM_SUPPORT
+	struct usb_endpoint_descriptor	*notify;
+#endif
 };
 
 struct f_gser {
@@ -39,29 +44,129 @@
 
 	struct gser_descs		fs;
 	struct gser_descs		hs;
+	u8				online;
+	enum transport_type		transport;
+
+#ifdef CONFIG_MODEM_SUPPORT
+	u8				pending;
+	spinlock_t			lock;
+	struct usb_ep			*notify;
+	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
+
+	struct usb_cdc_line_coding	port_line_coding;
+
+	/* SetControlLineState request */
+	u16				port_handshake_bits;
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+#endif
 };
 
+#ifdef CONFIG_USB_F_SERIAL
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+#endif
+
+static struct port_info {
+	enum transport_type	transport;
+	unsigned		port_num;
+	unsigned		client_port_num;
+} gserial_ports[GSERIAL_NO_PORTS];
+
+static inline bool is_transport_sdio(enum transport_type t)
+{
+	if (t == USB_GADGET_FSERIAL_TRANSPORT_SDIO)
+		return 1;
+	return 0;
+}
+
 static inline struct f_gser *func_to_gser(struct usb_function *f)
 {
 	return container_of(f, struct f_gser, port.func);
 }
 
+#ifdef CONFIG_MODEM_SUPPORT
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+	return container_of(p, struct f_gser, port);
+}
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* interface descriptor: */
 
-static struct usb_interface_descriptor gser_interface_desc __initdata = {
+static struct usb_interface_descriptor gser_interface_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	/* .bInterfaceNumber = DYNAMIC */
+#ifdef CONFIG_MODEM_SUPPORT
+	.bNumEndpoints =	3,
+#else
 	.bNumEndpoints =	2,
+#endif
 	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
 	.bInterfaceSubClass =	0,
 	.bInterfaceProtocol =	0,
 	/* .iInterface = DYNAMIC */
 };
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_cdc_header_desc gser_header_desc  = {
+	.bLength =		sizeof(gser_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
 
+static struct usb_cdc_call_mgmt_descriptor
+gser_call_mgmt_descriptor  = {
+	.bLength =		sizeof(gser_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities =	0,
+	/* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor  = {
+	.bLength =		sizeof(gser_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+	.bmCapabilities =	USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc  = {
+	.bLength =		sizeof(gser_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+#endif
 /* full speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+#endif
 
 static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
@@ -79,29 +184,53 @@
 
 static struct usb_descriptor_header *gser_fs_function[] __initdata = {
 	(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+	(struct usb_descriptor_header *) &gser_header_desc,
+	(struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gser_descriptor,
+	(struct usb_descriptor_header *) &gser_union_desc,
+	(struct usb_descriptor_header *) &gser_fs_notify_desc,
+#endif
 	(struct usb_descriptor_header *) &gser_fs_in_desc,
 	(struct usb_descriptor_header *) &gser_fs_out_desc,
 	NULL,
 };
 
 /* high speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+#endif
 
 static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
 };
 
-static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor gser_hs_out_desc = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
 };
 
 static struct usb_descriptor_header *gser_hs_function[] __initdata = {
 	(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+	(struct usb_descriptor_header *) &gser_header_desc,
+	(struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gser_descriptor,
+	(struct usb_descriptor_header *) &gser_union_desc,
+	(struct usb_descriptor_header *) &gser_hs_notify_desc,
+#endif
 	(struct usb_descriptor_header *) &gser_hs_in_desc,
 	(struct usb_descriptor_header *) &gser_hs_out_desc,
 	NULL,
@@ -124,27 +253,232 @@
 	NULL,
 };
 
+static char *transport_to_str(enum transport_type t)
+{
+	switch (t) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		return "TTY";
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		return "SDIO";
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		return "SMD";
+	}
+
+	return "NONE";
+}
+
+#ifdef CONFIG_USB_F_SERIAL
+static int gport_setup(struct usb_configuration *c)
+{
+	int ret = 0;
+
+	pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+	if (no_tty_ports)
+		ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+	if (no_sdio_ports)
+		ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+	if (no_smd_ports)
+		ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+	return ret;
+}
+#endif
+static int gport_connect(struct f_gser *gser)
+{
+	unsigned port_num;
+
+	pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+			__func__, transport_to_str(gser->transport),
+			gser, &gser->port, gser->port_num);
+
+	port_num = gserial_ports[gser->port_num].client_port_num;
+
+	switch (gser->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_connect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_connect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_connect(&gser->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport: %s\n", __func__,
+				transport_to_str(gser->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_gser *gser)
+{
+	unsigned port_num;
+
+	pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+			__func__, transport_to_str(gser->transport),
+			gser, &gser->port, gser->port_num);
+
+	port_num = gserial_ports[gser->port_num].client_port_num;
+
+	switch (gser->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_disconnect(&gser->port);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_disconnect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_disconnect(&gser->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport:%s\n", __func__,
+				transport_to_str(gser->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_MODEM_SUPPORT
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_gser            *gser = ep->driver_data;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	if (req->status != 0) {
+		DBG(cdev, "gser ttyGS%d completion, err %d\n",
+				gser->port_num, req->status);
+		return;
+	}
+
+	/* normal completion */
+	if (req->actual != sizeof(gser->port_line_coding)) {
+		DBG(cdev, "gser ttyGS%d short resp, len %d\n",
+				gser->port_num, req->actual);
+		usb_ep_set_halt(ep);
+	} else {
+		struct usb_cdc_line_coding	*value = req->buf;
+		gser->port_line_coding = *value;
+	}
+}
 /*-------------------------------------------------------------------------*/
 
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_gser            *gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	 *req = cdev->req;
+	int			 value = -EOPNOTSUPP;
+	u16			 w_index = le16_to_cpu(ctrl->wIndex);
+	u16			 w_value = le16_to_cpu(ctrl->wValue);
+	u16			 w_length = le16_to_cpu(ctrl->wLength);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* SET_LINE_CODING ... just read and save what the host sends */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_LINE_CODING:
+		if (w_length != sizeof(struct usb_cdc_line_coding))
+			goto invalid;
+
+		value = w_length;
+		cdev->gadget->ep0->driver_data = gser;
+		req->complete = gser_complete_set_line_coding;
+		break;
+
+	/* GET_LINE_CODING ... return what host sent, or initial value */
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_GET_LINE_CODING:
+		value = min_t(unsigned, w_length,
+				sizeof(struct usb_cdc_line_coding));
+		memcpy(req->buf, &gser->port_line_coding, value);
+		break;
+
+	/* SET_CONTROL_LINE_STATE ... save what the host sent */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+		value = 0;
+		gser->port_handshake_bits = w_value;
+		if (gser->port.notify_modem) {
+			unsigned port_num =
+				gserial_ports[gser->port_num].client_port_num;
+
+			gser->port.notify_modem(&gser->port,
+					port_num, w_value);
+		}
+		break;
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+			gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+					gser->port_num, value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+#endif
 static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct f_gser		*gser = func_to_gser(f);
 	struct usb_composite_dev *cdev = f->config->cdev;
+	int rc = 0;
 
 	/* we know alt == 0, so this is an activation or a reset */
 
-	if (gser->port.in->driver_data) {
-		DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
-		gserial_disconnect(&gser->port);
-	} else {
-		DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-		gser->port.in_desc = ep_choose(cdev->gadget,
-				gser->hs.in, gser->fs.in);
-		gser->port.out_desc = ep_choose(cdev->gadget,
-				gser->hs.out, gser->fs.out);
+#ifdef CONFIG_MODEM_SUPPORT
+	if (gser->notify->driver_data) {
+		DBG(cdev, "reset generic ctl ttyGS%d\n", gser->port_num);
+		usb_ep_disable(gser->notify);
 	}
-	gserial_connect(&gser->port, gser->port_num);
-	return 0;
+	gser->notify_desc = ep_choose(cdev->gadget,
+			gser->hs.notify,
+			gser->fs.notify);
+	rc = usb_ep_enable(gser->notify, gser->notify_desc);
+	if (rc) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					gser->notify->name, rc);
+		return rc;
+	}
+	gser->notify->driver_data = gser;
+#endif
+
+	if (gser->port.in->driver_data) {
+		DBG(cdev, "reset generic data ttyGS%d\n", gser->port_num);
+		gport_disconnect(gser);
+	} else {
+		DBG(cdev, "activate generic data ttyGS%d\n", gser->port_num);
+	}
+	gser->port.in_desc = ep_choose(cdev->gadget,
+			gser->hs.in, gser->fs.in);
+	gser->port.out_desc = ep_choose(cdev->gadget,
+			gser->hs.out, gser->fs.out);
+
+	gport_connect(gser);
+
+	gser->online = 1;
+	return rc;
 }
 
 static void gser_disable(struct usb_function *f)
@@ -153,9 +487,180 @@
 	struct usb_composite_dev *cdev = f->config->cdev;
 
 	DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num);
-	gserial_disconnect(&gser->port);
+
+	gport_disconnect(gser);
+
+#ifdef CONFIG_MODEM_SUPPORT
+	usb_ep_fifo_flush(gser->notify);
+	usb_ep_disable(gser->notify);
+#endif
+	gser->online = 0;
+}
+#ifdef CONFIG_MODEM_SUPPORT
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = gser->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	req = gser->notify_req;
+	gser->notify_req = NULL;
+	gser->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(gser->data_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status < 0) {
+		ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+				gser->port_num, status);
+		gser->notify_req = req;
+	}
+
+	return status;
 }
 
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+	int			 status;
+	unsigned long flags;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	spin_lock_irqsave(&gser->lock, flags);
+	if (gser->notify_req) {
+		DBG(cdev, "gser ttyGS%d serial state %04x\n",
+				gser->port_num, gser->serial_state);
+		status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &gser->serial_state,
+					sizeof(gser->serial_state));
+	} else {
+		gser->pending = true;
+		status = 0;
+	}
+	spin_unlock_irqrestore(&gser->lock, flags);
+	return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_gser *gser = req->context;
+	u8	      doit = false;
+	unsigned long flags;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock_irqsave(&gser->lock, flags);
+	if (req->status != -ESHUTDOWN)
+		doit = gser->pending;
+	gser->notify_req = req;
+	spin_unlock_irqrestore(&gser->lock, flags);
+
+	if (doit && gser->online)
+		gser_notify_serial_state(gser);
+}
+static void gser_connect(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	gser_notify_serial_state(gser);
+}
+
+unsigned int gser_get_dtr(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	if (gser->port_handshake_bits & ACM_CTRL_DTR)
+		return 1;
+	else
+		return 0;
+}
+
+unsigned int gser_get_rts(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	if (gser->port_handshake_bits & ACM_CTRL_RTS)
+		return 1;
+	else
+		return 0;
+}
+
+unsigned int gser_send_carrier_detect(struct gserial *port, unsigned int yes)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_DCD;
+	if (yes)
+		state |= ACM_CTRL_DCD;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+
+}
+
+unsigned int gser_send_ring_indicator(struct gserial *port, unsigned int yes)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_RI;
+	if (yes)
+		state |= ACM_CTRL_RI;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+
+}
+static void gser_disconnect(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state = ctrl_bits;
+
+	return gser_notify_serial_state(gser);
+}
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* serial function driver setup/binding */
@@ -190,6 +695,23 @@
 	gser->port.out = ep;
 	ep->driver_data = cdev;	/* claim */
 
+#ifdef CONFIG_MODEM_SUPPORT
+	ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	gser->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+	/* allocate notification */
+	gser->notify_req = gs_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!gser->notify_req)
+		goto fail;
+
+	gser->notify_req->complete = gser_notify_complete;
+	gser->notify_req->context = gser;
+#endif
+
 	/* copy descriptors, and track endpoint copies */
 	f->descriptors = usb_copy_descriptors(gser_fs_function);
 
@@ -197,6 +719,10 @@
 			f->descriptors, &gser_fs_in_desc);
 	gser->fs.out = usb_find_endpoint(gser_fs_function,
 			f->descriptors, &gser_fs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+	gser->fs.notify = usb_find_endpoint(gser_fs_function,
+			f->descriptors, &gser_fs_notify_desc);
+#endif
 
 
 	/* support all relevant hardware speeds... we expect that when
@@ -208,6 +734,10 @@
 				gser_fs_in_desc.bEndpointAddress;
 		gser_hs_out_desc.bEndpointAddress =
 				gser_fs_out_desc.bEndpointAddress;
+#ifdef CONFIG_MODEM_SUPPORT
+		gser_hs_notify_desc.bEndpointAddress =
+				gser_fs_notify_desc.bEndpointAddress;
+#endif
 
 		/* copy descriptors, and track endpoint copies */
 		f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
@@ -216,6 +746,10 @@
 				f->hs_descriptors, &gser_hs_in_desc);
 		gser->hs.out = usb_find_endpoint(gser_hs_function,
 				f->hs_descriptors, &gser_hs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+		gser->hs.notify = usb_find_endpoint(gser_hs_function,
+				f->hs_descriptors, &gser_hs_notify_desc);
+#endif
 	}
 
 	DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
@@ -225,6 +759,14 @@
 	return 0;
 
 fail:
+#ifdef CONFIG_MODEM_SUPPORT
+	if (gser->notify_req)
+		gs_free_req(gser->notify, gser->notify_req);
+
+	/* we might as well release our claims on endpoints */
+	if (gser->notify)
+		gser->notify->driver_data = NULL;
+#endif
 	/* we might as well release our claims on endpoints */
 	if (gser->port.out)
 		gser->port.out->driver_data = NULL;
@@ -239,9 +781,15 @@
 static void
 gser_unbind(struct usb_configuration *c, struct usb_function *f)
 {
+#ifdef CONFIG_MODEM_SUPPORT
+	struct f_gser *gser = func_to_gser(f);
+#endif
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
+#ifdef CONFIG_MODEM_SUPPORT
+	gs_free_req(gser->notify, gser->notify_req);
+#endif
 	kfree(func_to_gser(f));
 }
 
@@ -279,6 +827,9 @@
 	if (!gser)
 		return -ENOMEM;
 
+#ifdef CONFIG_MODEM_SUPPORT
+	spin_lock_init(&gser->lock);
+#endif
 	gser->port_num = port_num;
 
 	gser->port.func.name = "gser";
@@ -287,9 +838,130 @@
 	gser->port.func.unbind = gser_unbind;
 	gser->port.func.set_alt = gser_set_alt;
 	gser->port.func.disable = gser_disable;
+	gser->transport		= gserial_ports[port_num].transport;
+#ifdef CONFIG_MODEM_SUPPORT
+	/* We support only two ports for now */
+	if (port_num == 0)
+		gser->port.func.name = "modem";
+	else
+		gser->port.func.name = "nmea";
+	gser->port.func.setup = gser_setup;
+	gser->port.connect = gser_connect;
+	gser->port.get_dtr = gser_get_dtr;
+	gser->port.get_rts = gser_get_rts;
+	gser->port.send_carrier_detect = gser_send_carrier_detect;
+	gser->port.send_ring_indicator = gser_send_ring_indicator;
+	gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+	gser->port.disconnect = gser_disconnect;
+	gser->port.send_break = gser_send_break;
+#endif
 
 	status = usb_add_function(c, &gser->port.func);
 	if (status)
 		kfree(gser);
 	return status;
 }
+
+#ifdef CONFIG_USB_F_SERIAL
+
+int fserial_nmea_bind_config(struct usb_configuration *c)
+{
+	return gser_bind_config(c, 1);
+}
+
+static struct android_usb_function nmea_function = {
+	.name = "nmea",
+	.bind_config = fserial_nmea_bind_config,
+};
+
+int fserial_modem_bind_config(struct usb_configuration *c)
+{
+	int ret;
+
+	/* See if composite driver can allocate
+	 * serial ports. But for now allocate
+	 * two ports for modem and nmea.
+	 */
+	ret = gport_setup(c);
+
+	if (ret)
+		return ret;
+	return gser_bind_config(c, 0);
+}
+
+static struct android_usb_function modem_function = {
+	.name = "modem",
+	.bind_config = fserial_modem_bind_config,
+};
+
+static int fserial_remove(struct platform_device *dev)
+{
+	gserial_cleanup();
+
+	return 0;
+}
+
+static struct platform_driver usb_fserial = {
+	.remove		= fserial_remove,
+	.driver = {
+		.name = "usb_fserial",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init fserial_probe(struct platform_device *pdev)
+{
+	struct usb_gadget_fserial_platform_data	*pdata =
+					pdev->dev.platform_data;
+	int i;
+
+	dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+	if (!pdata)
+		goto probe_android_register;
+
+	for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+		gserial_ports[i].transport = pdata->transport[i];
+		gserial_ports[i].port_num = i;
+
+		switch (gserial_ports[i].transport) {
+		case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+			gserial_ports[i].client_port_num = no_tty_ports;
+			no_tty_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+			gserial_ports[i].client_port_num = no_sdio_ports;
+			no_sdio_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+			gserial_ports[i].client_port_num = no_smd_ports;
+			no_smd_ports++;
+			break;
+		default:
+			pr_err("%s: Un-supported transport transport: %u\n",
+					__func__, gserial_ports[i].transport);
+			return -ENODEV;
+		}
+
+		nr_ports++;
+	}
+
+	pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+			"smd_ports:%u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports,
+			no_smd_ports, nr_ports);
+
+probe_android_register:
+	android_register_function(&modem_function);
+	android_register_function(&nmea_function);
+
+	return 0;
+}
+
+static int __init fserial_init(void)
+{
+	return platform_driver_probe(&usb_fserial, fserial_probe);
+}
+module_init(fserial_init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcdac7c..05692bb 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -120,6 +120,12 @@
 #define gadget_is_ci13xxx_pci(g)	0
 #endif
 
+#ifdef CONFIG_USB_GADGET_MSM_72K
+#define	gadget_is_msm72k(g)	!strcmp("msm72k_udc", (g)->name)
+#else
+#define	gadget_is_msm72k(g)	0
+#endif
+
 // CONFIG_USB_GADGET_SX2
 // CONFIG_USB_GADGET_AU1X00
 // ...
@@ -223,6 +229,8 @@
 		return 0x29;
 	else if (gadget_is_s3c_hsudc(gadget))
 		return 0x30;
+	else if (gadget_is_msm72k(gadget))
+		return 0x31;
 
 	return -ENOENT;
 }
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
new file mode 100644
index 0000000..24ba619
--- /dev/null
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -0,0 +1,2653 @@
+/*
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *         Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/switch.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/msm72k_otg.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <linux/device.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/clk.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+static const char driver_name[] = "msm72k_udc";
+
+/* #define DEBUG */
+/* #define VERBOSE */
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#define	DRIVER_DESC		"MSM 72K USB Peripheral Controller"
+#define	DRIVER_NAME		"MSM72K_UDC"
+
+#define EPT_FLAG_IN        0x0001
+
+#define SETUP_BUF_SIZE     8
+
+
+static const char *const ep_name[] = {
+	"ep0out", "ep1out", "ep2out", "ep3out",
+	"ep4out", "ep5out", "ep6out", "ep7out",
+	"ep8out", "ep9out", "ep10out", "ep11out",
+	"ep12out", "ep13out", "ep14out", "ep15out",
+	"ep0in", "ep1in", "ep2in", "ep3in",
+	"ep4in", "ep5in", "ep6in", "ep7in",
+	"ep8in", "ep9in", "ep10in", "ep11in",
+	"ep12in", "ep13in", "ep14in", "ep15in"
+};
+
+/*To release the wakelock from debugfs*/
+static int release_wlocks;
+
+struct msm_request {
+	struct usb_request req;
+
+	/* saved copy of req.complete */
+	void	(*gadget_complete)(struct usb_ep *ep,
+					struct usb_request *req);
+
+
+	struct usb_info *ui;
+	struct msm_request *next;
+	struct msm_request *prev;
+
+	unsigned busy:1;
+	unsigned live:1;
+	unsigned alloced:1;
+
+	dma_addr_t dma;
+	dma_addr_t item_dma;
+
+	struct ept_queue_item *item;
+};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep)
+#define to_msm_otg(xceiv)  container_of(xceiv, struct msm_otg, otg)
+#define is_b_sess_vld()	((OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0)
+#define is_usb_online(ui) (ui->usb_state != USB_STATE_NOTATTACHED)
+
+struct msm_endpoint {
+	struct usb_ep ep;
+	struct usb_info *ui;
+	struct msm_request *req; /* head of pending requests */
+	struct msm_request *last;
+	unsigned flags;
+
+	/* bit number (0-31) in various status registers
+	** as well as the index into the usb_info's array
+	** of all endpoints
+	*/
+	unsigned char bit;
+	unsigned char num;
+
+	unsigned wedged:1;
+	/* pointers to DMA transfer list area */
+	/* these are allocated from the usb_info dma space */
+	struct ept_queue_head *head;
+};
+
+/* PHY status check timer to monitor phy stuck up on reset */
+static struct timer_list phy_status_timer;
+
+static void usb_do_work(struct work_struct *w);
+static void usb_do_remote_wakeup(struct work_struct *w);
+
+
+#define USB_STATE_IDLE    0
+#define USB_STATE_ONLINE  1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START          0x0001
+#define USB_FLAG_VBUS_ONLINE    0x0002
+#define USB_FLAG_VBUS_OFFLINE   0x0004
+#define USB_FLAG_RESET          0x0008
+#define USB_FLAG_SUSPEND        0x0010
+#define USB_FLAG_CONFIGURED     0x0020
+
+#define USB_CHG_DET_DELAY	msecs_to_jiffies(1000)
+#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(1000)
+#define PHY_STATUS_CHECK_DELAY	(jiffies + msecs_to_jiffies(1000))
+
+struct usb_info {
+	/* lock for register/queue/device state changes */
+	spinlock_t lock;
+
+	/* single request used for handling setup transactions */
+	struct usb_request *setup_req;
+
+	struct platform_device *pdev;
+	int irq;
+	void *addr;
+
+	unsigned state;
+	unsigned flags;
+
+	atomic_t configured;
+	atomic_t running;
+
+	struct dma_pool *pool;
+
+	/* dma page to back the queue heads and items */
+	unsigned char *buf;
+	dma_addr_t dma;
+
+	struct ept_queue_head *head;
+
+	/* used for allocation */
+	unsigned next_item;
+	unsigned next_ifc_num;
+
+	/* endpoints are ordered based on their status bits,
+	** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+	*/
+	struct msm_endpoint ept[32];
+
+
+	/* max power requested by selected configuration */
+	unsigned b_max_pow;
+	unsigned chg_current;
+	struct delayed_work chg_det;
+	struct delayed_work chg_stop;
+	struct msm_hsusb_gadget_platform_data *pdata;
+	struct work_struct phy_status_check;
+
+	struct work_struct work;
+	unsigned phy_status;
+	unsigned phy_fail_count;
+
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct switch_dev sdev;
+
+#define ep0out ept[0]
+#define ep0in  ept[16]
+
+	atomic_t ep0_dir;
+	atomic_t test_mode;
+	atomic_t offline_pending;
+	atomic_t softconnect;
+#ifdef CONFIG_USB_OTG
+	u8 hnp_avail;
+#endif
+
+	atomic_t remote_wakeup;
+	atomic_t self_powered;
+	struct delayed_work rw_work;
+
+	struct otg_transceiver *xceiv;
+	enum usb_device_state usb_state;
+	struct wake_lock	wlock;
+};
+
+static const struct usb_ep_ops msm72k_ep_ops;
+static struct usb_info *the_usb_info;
+
+static int msm72k_wakeup(struct usb_gadget *_gadget);
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active);
+static int msm72k_set_halt(struct usb_ep *_ep, int value);
+static void flush_endpoint(struct msm_endpoint *ept);
+static void usb_reset(struct usb_info *ui);
+static int usb_ept_set_halt(struct usb_ep *_ep, int value);
+
+static void msm_hsusb_set_speed(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) {
+	case PORTSC_PSPD_FS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_FULL\n");
+		ui->gadget.speed = USB_SPEED_FULL;
+		break;
+	case PORTSC_PSPD_LS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_LOW\n");
+		ui->gadget.speed = USB_SPEED_LOW;
+		break;
+	case PORTSC_PSPD_HS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_HIGH\n");
+		ui->gadget.speed = USB_SPEED_HIGH;
+		break;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void msm_hsusb_set_state(enum usb_device_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&the_usb_info->lock, flags);
+	the_usb_info->usb_state = state;
+	spin_unlock_irqrestore(&the_usb_info->lock, flags);
+}
+
+static enum usb_device_state msm_hsusb_get_state(void)
+{
+	unsigned long flags;
+	enum usb_device_state state;
+
+	spin_lock_irqsave(&the_usb_info->lock, flags);
+	state = the_usb_info->usb_state;
+	spin_unlock_irqrestore(&the_usb_info->lock, flags);
+
+	return state;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", sdev->state ? "online" : "offline");
+}
+
+static inline enum chg_type usb_get_chg_type(struct usb_info *ui)
+{
+	if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS)
+		return USB_CHG_TYPE__WALLCHARGER;
+	else
+		return USB_CHG_TYPE__SDP;
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long flags;
+	enum chg_type temp;
+	int suspended;
+	int configured;
+	unsigned bmaxpow;
+
+	if (ui->gadget.is_a_peripheral)
+		return -EINVAL;
+
+	temp = atomic_read(&otg->chg_type);
+	spin_lock_irqsave(&ui->lock, flags);
+	suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+	configured = atomic_read(&ui->configured);
+	bmaxpow = ui->b_max_pow;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__INVALID)
+		return -ENODEV;
+
+	if (temp == USB_CHG_TYPE__WALLCHARGER)
+		return USB_WALLCHARGER_CHG_CURRENT;
+
+	if (suspended || !configured)
+		return 0;
+
+	return bmaxpow;
+}
+
+static int usb_phy_stuck_check(struct usb_info *ui)
+{
+	/*
+	 * write some value (0xAA) into scratch reg (0x16) and read it back,
+	 * If the read value is same as written value, means PHY is normal
+	 * otherwise, PHY seems to have stuck.
+	 */
+
+	if (otg_io_write(ui->xceiv, 0xAA, 0x16) == -1) {
+		dev_dbg(&ui->pdev->dev,
+				"%s(): ulpi write timeout\n", __func__);
+		return -EIO;
+	}
+
+	if (otg_io_read(ui->xceiv, 0x16) != 0xAA) {
+		dev_dbg(&ui->pdev->dev,
+				"%s(): read value is incorrect\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * This function checks the phy status by reading/writing to the
+ * phy scratch register. If the phy is stuck resets the HW
+ * */
+static void usb_phy_stuck_recover(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->gadget.speed != USB_SPEED_UNKNOWN ||
+			ui->usb_state == USB_STATE_NOTATTACHED ||
+			ui->driver == NULL) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	disable_irq(otg->irq);
+	if (usb_phy_stuck_check(ui)) {
+#ifdef CONFIG_USB_MSM_ACA
+		del_timer_sync(&otg->id_timer);
+#endif
+		ui->phy_fail_count++;
+		dev_err(&ui->pdev->dev,
+				"%s():PHY stuck, resetting HW\n", __func__);
+		/*
+		 * PHY seems to have stuck,
+		 * reset the PHY and HW link to recover the PHY
+		 */
+		usb_reset(ui);
+#ifdef CONFIG_USB_MSM_ACA
+		mod_timer(&otg->id_timer, jiffies +
+				 msecs_to_jiffies(OTG_ID_POLL_MS));
+#endif
+		msm72k_pullup_internal(&ui->gadget, 1);
+	}
+	enable_irq(otg->irq);
+}
+
+static void usb_phy_status_check_timer(unsigned long data)
+{
+	struct usb_info *ui = the_usb_info;
+
+	schedule_work(&ui->phy_status_check);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, chg_stop.work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	enum chg_type temp;
+
+	temp = atomic_read(&otg->chg_type);
+
+	if (temp == USB_CHG_TYPE__SDP)
+		otg_set_power(ui->xceiv, 0);
+}
+
+static void usb_chg_detect(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, chg_det.work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	enum chg_type temp = USB_CHG_TYPE__INVALID;
+	unsigned long flags;
+	int maxpower;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->usb_state == USB_STATE_NOTATTACHED) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+
+	temp = usb_get_chg_type(ui);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	atomic_set(&otg->chg_type, temp);
+	maxpower = usb_get_max_power(ui);
+	if (maxpower > 0)
+		otg_set_power(ui->xceiv, maxpower);
+
+	/* USB driver prevents idle and suspend power collapse(pc)
+	 * while USB cable is connected. But when dedicated charger is
+	 * connected, driver can vote for idle and suspend pc.
+	 * OTG driver handles idle pc as part of above otg_set_power call
+	 * when wallcharger is attached. To allow suspend pc, release the
+	 * wakelock which will be re-acquired for any sub-sequent usb interrupts
+	 * */
+	if (temp == USB_CHG_TYPE__WALLCHARGER) {
+		pm_runtime_put_sync(&ui->pdev->dev);
+		wake_unlock(&ui->wlock);
+	}
+}
+
+static int usb_ep_get_stall(struct msm_endpoint *ept)
+{
+	unsigned int n;
+	struct usb_info *ui = ept->ui;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+	if (ept->flags & EPT_FLAG_IN)
+		return (CTRL_TXS & n) ? 1 : 0;
+	else
+		return (CTRL_RXS & n) ? 1 : 0;
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+
+		ept->ui = ui;
+		ept->bit = n;
+		ept->num = n & 15;
+		ept->ep.name = ep_name[n];
+		ept->ep.ops = &msm72k_ep_ops;
+
+		if (ept->bit > 15) {
+			/* IN endpoint */
+			ept->head = ui->head + (ept->num << 1) + 1;
+			ept->flags = EPT_FLAG_IN;
+		} else {
+			/* OUT endpoint */
+			ept->head = ui->head + (ept->num << 1);
+			ept->flags = 0;
+		}
+
+	}
+}
+
+static void config_ept(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT;
+
+	/* ep0 out needs interrupt-on-setup */
+	if (ept->bit == 0)
+		cfg |= CONFIG_IOS;
+
+	ept->head->config = cfg;
+	ept->head->next = TERMINATE;
+
+	if (ept->ep.maxpacket)
+		dev_dbg(&ui->pdev->dev,
+			"ept #%d %s max:%d head:%p bit:%d\n",
+		       ept->num,
+		       (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+		       ept->ep.maxpacket, ept->head, ept->bit);
+}
+
+static void configure_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++)
+		config_ept(ui->ept + n);
+}
+
+struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept,
+			unsigned bufsize, gfp_t gfp_flags)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		goto fail1;
+
+	req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma);
+	if (!req->item)
+		goto fail2;
+
+	if (bufsize) {
+		req->req.buf = kmalloc(bufsize, gfp_flags);
+		if (!req->req.buf)
+			goto fail3;
+		req->alloced = 1;
+	}
+
+	return &req->req;
+
+fail3:
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+	kfree(req);
+fail1:
+	return 0;
+}
+
+static void usb_ept_enable(struct msm_endpoint *ept, int yes,
+		unsigned char ep_type)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (yes) {
+			n = (n & (~CTRL_TXT_MASK)) |
+				(ep_type << CTRL_TXT_EP_TYPE_SHIFT);
+			n |= CTRL_TXE | CTRL_TXR;
+		} else
+			n &= (~CTRL_TXE);
+	} else {
+		if (yes) {
+			n = (n & (~CTRL_RXT_MASK)) |
+				(ep_type << CTRL_RXT_EP_TYPE_SHIFT);
+			n |= CTRL_RXE | CTRL_RXR;
+		} else
+			n &= ~(CTRL_RXE);
+	}
+	/* complete all the updates to ept->head before enabling endpoint*/
+	mb();
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	/* Ensure endpoint is enabled before returning */
+	mb();
+
+	dev_dbg(&ui->pdev->dev, "ept %d %s %s\n",
+	       ept->num, in ? "in" : "out", yes ? "enabled" : "disabled");
+}
+
+static void usb_ept_start(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req = ept->req;
+	struct msm_request *f_req = ept->req;
+	unsigned n = 1 << ept->bit;
+	unsigned info;
+	int reprime_cnt = 0;
+
+	BUG_ON(req->live);
+
+	while (req) {
+		req->live = 1;
+		/* prepare the transaction descriptor item for the hardware */
+		req->item->info =
+			INFO_BYTES(req->req.length) | INFO_IOC | INFO_ACTIVE;
+		req->item->page0 = req->dma;
+		req->item->page1 = (req->dma + 0x1000) & 0xfffff000;
+		req->item->page2 = (req->dma + 0x2000) & 0xfffff000;
+		req->item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+		if (req->next == NULL) {
+			req->item->next = TERMINATE;
+			break;
+		}
+		req->item->next = req->next->item_dma;
+		req = req->next;
+	}
+
+	rmb();
+	/* link the hw queue head to the request's transaction item */
+	ept->head->next = ept->req->item_dma;
+	ept->head->info = 0;
+
+reprime_ept:
+	/* flush buffers before priming ept */
+	mb();
+	/* during high throughput testing it is observed that
+	 * ept stat bit is not set even thoguh all the data
+	 * structures are updated properly and ept prime bit
+	 * is set. To workaround the issue, use dTD INFO bit
+	 * to make decision on re-prime or not.
+	 */
+	writel_relaxed(n, USB_ENDPTPRIME);
+	/* busy wait till endptprime gets clear */
+	while ((readl_relaxed(USB_ENDPTPRIME) & n))
+		;
+	if (readl_relaxed(USB_ENDPTSTAT) & n)
+		return;
+
+	rmb();
+	info = f_req->item->info;
+	if (info & INFO_ACTIVE) {
+		if (reprime_cnt++ < 3)
+			goto reprime_ept;
+		else
+			pr_err("%s(): ept%d%s prime failed. ept: config: %x"
+				"active: %x next: %x info: %x\n"
+				" req@ %x next: %x info: %x\n",
+				__func__, ept->num,
+				ept->flags & EPT_FLAG_IN ? "in" : "out",
+				ept->head->config, ept->head->active,
+				ept->head->next, ept->head->info,
+				f_req->item_dma, f_req->item->next, info);
+	}
+}
+
+int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req)
+{
+	unsigned long flags;
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_request *last;
+	struct usb_info *ui = ept->ui;
+	unsigned length = req->req.length;
+
+	if (length > 0x4000)
+		return -EMSGSIZE;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (req->busy) {
+		req->req.status = -EBUSY;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		dev_err(&ui->pdev->dev,
+			"usb_ept_queue_xfer() tried to queue busy request\n");
+		return -EBUSY;
+	}
+
+	if (!atomic_read(&ui->configured) && (ept->num != 0)) {
+		req->req.status = -ESHUTDOWN;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		if (printk_ratelimit())
+			dev_err(&ui->pdev->dev,
+				"%s: called while offline\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	if (ui->usb_state == USB_STATE_SUSPENDED) {
+		if (!atomic_read(&ui->remote_wakeup)) {
+			req->req.status = -EAGAIN;
+			spin_unlock_irqrestore(&ui->lock, flags);
+			if (printk_ratelimit())
+				dev_err(&ui->pdev->dev,
+				"%s: cannot queue as bus is suspended "
+				"ept #%d %s max:%d head:%p bit:%d\n",
+				__func__, ept->num,
+				(ept->flags & EPT_FLAG_IN) ? "in" : "out",
+				ept->ep.maxpacket, ept->head, ept->bit);
+
+			return -EAGAIN;
+		}
+
+		wake_lock(&ui->wlock);
+		otg_set_suspend(ui->xceiv, 0);
+		schedule_delayed_work(&ui->rw_work, REMOTE_WAKEUP_DELAY);
+	}
+
+	req->busy = 1;
+	req->live = 0;
+	req->next = 0;
+	req->req.status = -EBUSY;
+
+	req->dma = dma_map_single(NULL, req->req.buf, length,
+				  (ept->flags & EPT_FLAG_IN) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+
+	/* Add the new request to the end of the queue */
+	last = ept->last;
+	if (last) {
+		/* Already requests in the queue. add us to the
+		 * end, but let the completion interrupt actually
+		 * start things going, to avoid hw issues
+		 */
+		last->next = req;
+		req->prev = last;
+
+	} else {
+		/* queue was empty -- kick the hardware */
+		ept->req = req;
+		req->prev = NULL;
+		usb_ept_start(ept);
+	}
+	ept->last = req;
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+
+/* --- endpoint 0 handling --- */
+
+static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_request *r = to_msm_request(req);
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+
+	req->complete = r->gadget_complete;
+	r->gadget_complete = 0;
+	if	(req->complete)
+		req->complete(&ui->ep0in.ep, req);
+}
+
+static void ep0_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct usb_request *req = _req->context;
+	struct msm_request *r;
+	struct msm_endpoint *ept;
+	struct usb_info *ui;
+
+	pr_debug("%s:\n", __func__);
+	if (!req)
+		return;
+
+	r = to_msm_request(req);
+	ept = to_msm_endpoint(ep);
+	ui = ept->ui;
+	_req->context = 0;
+
+	req->complete = r->gadget_complete;
+	req->zero = 0;
+	r->gadget_complete = 0;
+	if (req->complete)
+		req->complete(&ui->ep0in.ep, req);
+
+}
+
+static void ep0_status_phase(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+
+	pr_debug("%s:\n", __func__);
+
+	req->length = 0;
+	req->complete = ep0_status_complete;
+
+	/* status phase */
+	if (atomic_read(&ui->ep0_dir) == USB_DIR_IN)
+		usb_ept_queue_xfer(&ui->ep0out, req);
+	else
+		usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0in_send_zero_leng_pkt(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct usb_request *req = ui->setup_req;
+
+	pr_debug("%s:\n", __func__);
+
+	req->length = 0;
+	req->complete = ep0_status_phase;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_queue_ack_complete(struct usb_ep *ep,
+	struct usb_request *_req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+	struct usb_request *req = ui->setup_req;
+
+	pr_debug("%s: _req:%p actual:%d length:%d zero:%d\n",
+			__func__, _req, _req->actual,
+			_req->length, _req->zero);
+
+	/* queue up the receive of the ACK response from the host */
+	if (_req->status == 0 && _req->actual == _req->length) {
+		req->context = _req;
+		if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) {
+			if (_req->zero && _req->length &&
+					!(_req->length % ep->maxpacket)) {
+				ep0in_send_zero_leng_pkt(&ui->ep0in);
+				return;
+			}
+		}
+		ep0_status_phase(ep, req);
+	} else
+		ep0_complete(ep, _req);
+}
+
+static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+	unsigned int temp;
+	int test_mode = atomic_read(&ui->test_mode);
+
+	if (!test_mode)
+		return;
+
+	switch (test_mode) {
+	case J_TEST:
+		dev_info(&ui->pdev->dev, "usb electrical test mode: (J)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC);
+		break;
+
+	case K_TEST:
+		dev_info(&ui->pdev->dev, "usb electrical test mode: (K)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC);
+		break;
+
+	case SE0_NAK_TEST:
+		dev_info(&ui->pdev->dev,
+			"usb electrical test mode: (SE0-NAK)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+		break;
+
+	case TST_PKT_TEST:
+		dev_info(&ui->pdev->dev,
+			"usb electrical test mode: (TEST_PKT)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC);
+		break;
+	}
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+	struct usb_request *req = ui->setup_req;
+	req->length = 0;
+	req->complete = ep0_setup_ack_complete;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+	writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned length)
+{
+	struct usb_request *req = ui->setup_req;
+	struct msm_request *r = to_msm_request(req);
+	struct msm_endpoint *ept = &ui->ep0in;
+
+	req->length = length;
+	req->complete = ep0_queue_ack_complete;
+	r->gadget_complete = 0;
+	usb_ept_queue_xfer(ept, req);
+}
+
+static void handle_setup(struct usb_info *ui)
+{
+	struct usb_ctrlrequest ctl;
+	struct usb_request *req = ui->setup_req;
+	int ret;
+#ifdef CONFIG_USB_OTG
+	u8 hnp;
+	unsigned long flags;
+#endif
+
+	memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+	/* Ensure buffer is read before acknowledging to h/w */
+	mb();
+
+	writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+	if (ctl.bRequestType & USB_DIR_IN)
+		atomic_set(&ui->ep0_dir, USB_DIR_IN);
+	else
+		atomic_set(&ui->ep0_dir, USB_DIR_OUT);
+
+	/* any pending ep0 transactions must be canceled */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	dev_dbg(&ui->pdev->dev,
+		"setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n",
+	       ctl.bRequestType, ctl.bRequest, ctl.wValue,
+	       ctl.wIndex, ctl.wLength);
+
+	if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) ==
+					(USB_DIR_IN | USB_TYPE_STANDARD)) {
+		if (ctl.bRequest == USB_REQ_GET_STATUS) {
+			/* OTG supplement Rev 2.0 introduces another device
+			 * GET_STATUS request for HNP polling with length = 1.
+			 */
+			u8 len = 2;
+			switch (ctl.bRequestType & USB_RECIP_MASK) {
+			case USB_RECIP_ENDPOINT:
+			{
+				struct msm_endpoint *ept;
+				unsigned num =
+					ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+				u16 temp = 0;
+
+				if (num == 0) {
+					memset(req->buf, 0, 2);
+					break;
+				}
+				if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+					num += 16;
+				ept = &ui->ep0out + num;
+				temp = usb_ep_get_stall(ept);
+				temp = temp << USB_ENDPOINT_HALT;
+				memcpy(req->buf, &temp, 2);
+				break;
+			}
+			case USB_RECIP_DEVICE:
+			{
+				u16 temp = 0;
+
+				if (ctl.wIndex == OTG_STATUS_SELECTOR) {
+#ifdef CONFIG_USB_OTG
+					spin_lock_irqsave(&ui->lock, flags);
+					hnp = (ui->gadget.host_request <<
+							HOST_REQUEST_FLAG);
+					ui->hnp_avail = 1;
+					spin_unlock_irqrestore(&ui->lock,
+							flags);
+					memcpy(req->buf, &hnp, 1);
+					len = 1;
+#else
+					goto stall;
+#endif
+				} else {
+					temp = (atomic_read(&ui->self_powered)
+						<< USB_DEVICE_SELF_POWERED);
+					temp |= (atomic_read(&ui->remote_wakeup)
+						<< USB_DEVICE_REMOTE_WAKEUP);
+					memcpy(req->buf, &temp, 2);
+				}
+				break;
+			}
+			case USB_RECIP_INTERFACE:
+				memset(req->buf, 0, 2);
+				break;
+			default:
+				goto stall;
+			}
+			ep0_setup_send(ui, len);
+			return;
+		}
+	}
+	if (ctl.bRequestType ==
+		    (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) {
+		if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) ||
+				(ctl.bRequest == USB_REQ_SET_FEATURE)) {
+			if ((ctl.wValue == 0) && (ctl.wLength == 0)) {
+				unsigned num = ctl.wIndex & 0x0f;
+
+				if (num != 0) {
+					struct msm_endpoint *ept;
+
+					if (ctl.wIndex & 0x80)
+						num += 16;
+					ept = &ui->ep0out + num;
+
+					if (ept->wedged)
+						goto ack;
+					if (ctl.bRequest == USB_REQ_SET_FEATURE)
+						usb_ept_set_halt(&ept->ep, 1);
+					else
+						usb_ept_set_halt(&ept->ep, 0);
+				}
+				goto ack;
+			}
+		}
+	}
+	if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) {
+		if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) {
+			atomic_set(&ui->configured, !!ctl.wValue);
+			msm_hsusb_set_state(USB_STATE_CONFIGURED);
+		} else if (ctl.bRequest == USB_REQ_SET_ADDRESS) {
+			/*
+			 * Gadget speed should be set when PCI interrupt
+			 * occurs. But sometimes, PCI interrupt is not
+			 * occuring after reset. Hence update the gadget
+			 * speed here.
+			 */
+			if (ui->gadget.speed == USB_SPEED_UNKNOWN) {
+				dev_info(&ui->pdev->dev,
+					"PCI intr missed"
+					"set speed explictly\n");
+				msm_hsusb_set_speed(ui);
+			}
+			msm_hsusb_set_state(USB_STATE_ADDRESS);
+
+			/* write address delayed (will take effect
+			** after the next IN txn)
+			*/
+			writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+			goto ack;
+		} else if (ctl.bRequest == USB_REQ_SET_FEATURE) {
+			switch (ctl.wValue) {
+			case USB_DEVICE_TEST_MODE:
+				switch (ctl.wIndex) {
+				case J_TEST:
+				case K_TEST:
+				case SE0_NAK_TEST:
+				case TST_PKT_TEST:
+					atomic_set(&ui->test_mode, ctl.wIndex);
+					goto ack;
+				}
+				goto stall;
+			case USB_DEVICE_REMOTE_WAKEUP:
+				atomic_set(&ui->remote_wakeup, 1);
+				goto ack;
+#ifdef CONFIG_USB_OTG
+			case USB_DEVICE_B_HNP_ENABLE:
+				ui->gadget.b_hnp_enable = 1;
+				goto ack;
+			case USB_DEVICE_A_HNP_SUPPORT:
+			case USB_DEVICE_A_ALT_HNP_SUPPORT:
+				/* B-devices compliant to OTG spec
+				 * Rev 2.0 are not required to
+				 * suppport these features.
+				 */
+				goto stall;
+#endif
+			}
+		} else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) &&
+				(ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) {
+			atomic_set(&ui->remote_wakeup, 0);
+			goto ack;
+		}
+	}
+
+	/* delegate if we get here */
+	if (ui->driver) {
+		ret = ui->driver->setup(&ui->gadget, &ctl);
+		if (ret >= 0)
+			return;
+	}
+
+stall:
+	/* stall ep0 on error */
+	ep0_setup_stall(ui);
+	return;
+
+ack:
+	ep0_setup_ack(ui);
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+	struct msm_endpoint *ept = ui->ept + bit;
+	struct msm_request *req;
+	unsigned long flags;
+	unsigned info;
+
+	/*
+	INFO("handle_endpoint() %d %s req=%p(%08x)\n",
+		ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+		ept->req, ept->req ? ept->req->item_dma : 0);
+	*/
+
+	/* expire all requests that are no longer active */
+	spin_lock_irqsave(&ui->lock, flags);
+	while ((req = ept->req)) {
+		/* if we've processed all live requests, time to
+		 * restart the hardware on the next non-live request
+		 */
+		if (!req->live) {
+			usb_ept_start(ept);
+			break;
+		}
+
+		/* clean speculative fetches on req->item->info */
+		dma_coherent_post_ops();
+		info = req->item->info;
+		/* if the transaction is still in-flight, stop here */
+		if (info & INFO_ACTIVE)
+			break;
+
+		/* advance ept queue to the next request */
+		ept->req = req->next;
+		if (ept->req == 0)
+			ept->last = 0;
+
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				 (ept->flags & EPT_FLAG_IN) ?
+				 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+			/* XXX pass on more specific error code */
+			req->req.status = -EIO;
+			req->req.actual = 0;
+			dev_err(&ui->pdev->dev,
+				"ept %d %s error. info=%08x\n",
+			       ept->num,
+			       (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+			       info);
+		} else {
+			req->req.status = 0;
+			req->req.actual =
+				req->req.length - ((info >> 16) & 0x7FFF);
+		}
+		req->busy = 0;
+		req->live = 0;
+
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(&ept->ep, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+	/* flush endpoint, canceling transactions
+	** - this can take a "large amount of time" (per databook)
+	** - the flush can fail in some cases, thus we check STAT
+	**   and repeat if we're still operating
+	**   (does the fact that this doesn't use the tripwire matter?!)
+	*/
+	do {
+		writel(bits, USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & bits)
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req, *next_req = NULL;
+	unsigned long flags;
+
+	/* inactive endpoints have nothing to do here */
+	if (ept->ep.maxpacket == 0)
+		return;
+
+	/* put the queue head in a sane state */
+	ept->head->info = 0;
+	ept->head->next = TERMINATE;
+
+	/* cancel any pending requests */
+	spin_lock_irqsave(&ui->lock, flags);
+	req = ept->req;
+	ept->req = 0;
+	ept->last = 0;
+	while (req != 0) {
+		req->busy = 0;
+		req->live = 0;
+		req->req.status = -ESHUTDOWN;
+		req->req.actual = 0;
+
+		/* Gadget driver may free the request in completion
+		 * handler. So keep a copy of next req pointer
+		 * before calling completion handler.
+		 */
+		next_req = req->next;
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(&ept->ep, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+		req = next_req;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct msm_endpoint *ept)
+{
+	flush_endpoint_hw(ept->ui, (1 << ept->bit));
+	flush_endpoint_sw(ept);
+}
+
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+	struct usb_info *ui = data;
+	unsigned n;
+	unsigned long flags;
+
+	n = readl(USB_USBSTS);
+	writel(n, USB_USBSTS);
+
+	/* somehow we got an IRQ while in the reset sequence: ignore it */
+	if (!atomic_read(&ui->running))
+		return IRQ_HANDLED;
+
+	if (n & STS_PCI) {
+		msm_hsusb_set_speed(ui);
+		if (atomic_read(&ui->configured)) {
+			wake_lock(&ui->wlock);
+
+			spin_lock_irqsave(&ui->lock, flags);
+			ui->usb_state = USB_STATE_CONFIGURED;
+			ui->flags = USB_FLAG_CONFIGURED;
+			spin_unlock_irqrestore(&ui->lock, flags);
+
+			ui->driver->resume(&ui->gadget);
+			schedule_work(&ui->work);
+		} else {
+			msm_hsusb_set_state(USB_STATE_DEFAULT);
+		}
+
+#ifdef CONFIG_USB_OTG
+		/* notify otg to clear A_BIDL_ADIS timer */
+		if (ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 0);
+#endif
+	}
+
+	if (n & STS_URI) {
+		dev_dbg(&ui->pdev->dev, "reset\n");
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->gadget.speed = USB_SPEED_UNKNOWN;
+		spin_unlock_irqrestore(&ui->lock, flags);
+#ifdef CONFIG_USB_OTG
+		/* notify otg to clear A_BIDL_ADIS timer */
+		if (ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 0);
+		spin_lock_irqsave(&ui->lock, flags);
+		/* Host request is persistent across reset */
+		ui->gadget.b_hnp_enable = 0;
+		ui->hnp_avail = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+#endif
+		msm_hsusb_set_state(USB_STATE_DEFAULT);
+		atomic_set(&ui->remote_wakeup, 0);
+		if (!ui->gadget.is_a_peripheral)
+			schedule_delayed_work(&ui->chg_stop, 0);
+
+		writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+		writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+		writel(0xffffffff, USB_ENDPTFLUSH);
+		writel(0, USB_ENDPTCTRL(1));
+
+		wake_lock(&ui->wlock);
+		if (atomic_read(&ui->configured)) {
+			/* marking us offline will cause ept queue attempts
+			** to fail
+			*/
+			atomic_set(&ui->configured, 0);
+			/* Defer sending offline uevent to userspace */
+			atomic_set(&ui->offline_pending, 1);
+
+			/* XXX: we can't seem to detect going offline,
+			 * XXX:  so deconfigure on reset for the time being
+			 */
+			if (ui->driver) {
+				dev_dbg(&ui->pdev->dev,
+					"usb: notify offline\n");
+				ui->driver->disconnect(&ui->gadget);
+			}
+			/* cancel pending ep0 transactions */
+			flush_endpoint(&ui->ep0out);
+			flush_endpoint(&ui->ep0in);
+
+		}
+		/* Start phy stuck timer */
+		if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+			mod_timer(&phy_status_timer, PHY_STATUS_CHECK_DELAY);
+	}
+
+	if (n & STS_SLI) {
+		dev_dbg(&ui->pdev->dev, "suspend\n");
+
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->usb_state = USB_STATE_SUSPENDED;
+		ui->flags = USB_FLAG_SUSPEND;
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		ui->driver->suspend(&ui->gadget);
+		schedule_work(&ui->work);
+#ifdef CONFIG_USB_OTG
+		/* notify otg for
+		 * 1. kicking A_BIDL_ADIS timer in case of A-peripheral
+		 * 2. disabling pull-up and kicking B_ASE0_RST timer
+		 */
+		if (ui->gadget.b_hnp_enable || ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 1);
+#endif
+	}
+
+	if (n & STS_UI) {
+		n = readl(USB_ENDPTSETUPSTAT);
+		if (n & EPT_RX(0))
+			handle_setup(ui);
+
+		n = readl(USB_ENDPTCOMPLETE);
+		writel(n, USB_ENDPTCOMPLETE);
+		while (n) {
+			unsigned bit = __ffs(n);
+			handle_endpoint(ui, bit);
+			n = n & (~(1 << bit));
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+	spin_lock_init(&ui->lock);
+
+	memset(ui->buf, 0, 4096);
+	ui->head = (void *) (ui->buf + 0);
+
+	/* only important for reset/reinit */
+	memset(ui->ept, 0, sizeof(ui->ept));
+	ui->next_item = 0;
+	ui->next_ifc_num = 0;
+
+	init_endpoints(ui);
+
+	ui->ep0in.ep.maxpacket = 64;
+	ui->ep0out.ep.maxpacket = 64;
+
+	ui->setup_req =
+		usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL);
+
+	INIT_WORK(&ui->work, usb_do_work);
+	INIT_DELAYED_WORK(&ui->chg_det, usb_chg_detect);
+	INIT_DELAYED_WORK(&ui->chg_stop, usb_chg_stop);
+	INIT_DELAYED_WORK(&ui->rw_work, usb_do_remote_wakeup);
+	if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+		INIT_WORK(&ui->phy_status_check, usb_phy_stuck_recover);
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	dev_dbg(&ui->pdev->dev, "reset controller\n");
+
+	atomic_set(&ui->running, 0);
+
+	/*
+	 * PHY reset takes minimum 100 msec. Hence reset only link
+	 * during HNP. Reset PHY and link in B-peripheral mode.
+	 */
+	if (ui->gadget.is_a_peripheral)
+		otg->reset(ui->xceiv, 0);
+	else
+		otg->reset(ui->xceiv, 1);
+
+	/* set usb controller interrupt threshold to zero*/
+	writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+							USB_USBCMD);
+
+	writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+	configure_endpoints(ui);
+
+	/* marking us offline will cause ept queue attempts to fail */
+	atomic_set(&ui->configured, 0);
+
+	if (ui->driver) {
+		dev_dbg(&ui->pdev->dev, "usb: notify offline\n");
+		ui->driver->disconnect(&ui->gadget);
+	}
+
+	/* cancel pending ep0 transactions */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	/* enable interrupts */
+	writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+
+	/* Ensure that h/w RESET is completed before returning */
+	mb();
+
+	atomic_set(&ui->running, 1);
+}
+
+static void usb_start(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_START;
+	schedule_work(&ui->work);
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+	dev_dbg(&ui->pdev->dev, "usb_free(%d)\n", ret);
+
+	if (ui->xceiv)
+		otg_put_transceiver(ui->xceiv);
+
+	if (ui->irq)
+		free_irq(ui->irq, 0);
+	if (ui->pool)
+		dma_pool_destroy(ui->pool);
+	if (ui->dma)
+		dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+	kfree(ui);
+	return ret;
+}
+
+static void usb_do_work_check_vbus(struct usb_info *ui)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&ui->lock, iflags);
+	if (is_usb_online(ui))
+		ui->flags |= USB_FLAG_VBUS_ONLINE;
+	else
+		ui->flags |= USB_FLAG_VBUS_OFFLINE;
+	spin_unlock_irqrestore(&ui->lock, iflags);
+}
+
+static void usb_do_work(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long iflags;
+	unsigned flags, _vbus;
+
+	for (;;) {
+		spin_lock_irqsave(&ui->lock, iflags);
+		flags = ui->flags;
+		ui->flags = 0;
+		_vbus = is_usb_online(ui);
+		spin_unlock_irqrestore(&ui->lock, iflags);
+
+		/* give up if we have nothing to do */
+		if (flags == 0)
+			break;
+
+		switch (ui->state) {
+		case USB_STATE_IDLE:
+			if (flags & USB_FLAG_START) {
+				int ret;
+
+				if (!_vbus) {
+					ui->state = USB_STATE_OFFLINE;
+					break;
+				}
+
+				pm_runtime_get_noresume(&ui->pdev->dev);
+				pm_runtime_resume(&ui->pdev->dev);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: IDLE -> ONLINE\n");
+				usb_reset(ui);
+				ret = request_irq(otg->irq, usb_interrupt,
+							IRQF_SHARED,
+							ui->pdev->name, ui);
+				/* FIXME: should we call BUG_ON when
+				 * requst irq fails
+				 */
+				if (ret) {
+					dev_err(&ui->pdev->dev,
+						"hsusb: peripheral: request irq"
+						" failed:(%d)", ret);
+					break;
+				}
+				ui->irq = otg->irq;
+				ui->state = USB_STATE_ONLINE;
+				usb_do_work_check_vbus(ui);
+
+				if (!atomic_read(&ui->softconnect))
+					break;
+
+				msm72k_pullup_internal(&ui->gadget, 1);
+
+				if (!ui->gadget.is_a_peripheral)
+					schedule_delayed_work(
+							&ui->chg_det,
+							USB_CHG_DET_DELAY);
+
+			}
+			break;
+		case USB_STATE_ONLINE:
+			if (atomic_read(&ui->offline_pending)) {
+				switch_set_state(&ui->sdev, 0);
+				atomic_set(&ui->offline_pending, 0);
+			}
+
+			/* If at any point when we were online, we received
+			 * the signal to go offline, we must honor it
+			 */
+			if (flags & USB_FLAG_VBUS_OFFLINE) {
+
+				ui->chg_current = 0;
+				/* wait incase chg_detect is running */
+				if (!ui->gadget.is_a_peripheral)
+					cancel_delayed_work_sync(&ui->chg_det);
+
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: ONLINE -> OFFLINE\n");
+
+				atomic_set(&ui->running, 0);
+				atomic_set(&ui->remote_wakeup, 0);
+				atomic_set(&ui->configured, 0);
+
+				if (ui->driver) {
+					dev_dbg(&ui->pdev->dev,
+						"usb: notify offline\n");
+					ui->driver->disconnect(&ui->gadget);
+				}
+				/* cancel pending ep0 transactions */
+				flush_endpoint(&ui->ep0out);
+				flush_endpoint(&ui->ep0in);
+
+				/* synchronize with irq context */
+				spin_lock_irqsave(&ui->lock, iflags);
+#ifdef CONFIG_USB_OTG
+				ui->gadget.host_request = 0;
+				ui->gadget.b_hnp_enable = 0;
+				ui->hnp_avail = 0;
+#endif
+				msm72k_pullup_internal(&ui->gadget, 0);
+				spin_unlock_irqrestore(&ui->lock, iflags);
+
+
+				/* if charger is initialized to known type
+				 * we must let modem know about charger
+				 * disconnection
+				 */
+				otg_set_power(ui->xceiv, 0);
+
+				if (ui->irq) {
+					free_irq(ui->irq, ui);
+					ui->irq = 0;
+				}
+
+
+				switch_set_state(&ui->sdev, 0);
+
+				ui->state = USB_STATE_OFFLINE;
+				usb_do_work_check_vbus(ui);
+				pm_runtime_put_noidle(&ui->pdev->dev);
+				pm_runtime_suspend(&ui->pdev->dev);
+				wake_unlock(&ui->wlock);
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				int maxpower = usb_get_max_power(ui);
+
+				if (maxpower < 0)
+					break;
+
+				otg_set_power(ui->xceiv, 0);
+				/* To support TCXO during bus suspend
+				 * This might be dummy check since bus suspend
+				 * is not implemented as of now
+				 * */
+				if (release_wlocks)
+					wake_unlock(&ui->wlock);
+
+				/* TBD: Initiate LPM at usb bus suspend */
+				break;
+			}
+			if (flags & USB_FLAG_CONFIGURED) {
+				int maxpower = usb_get_max_power(ui);
+
+				/* We may come here even when no configuration
+				 * is selected. Send online/offline event
+				 * accordingly.
+				 */
+				switch_set_state(&ui->sdev,
+						atomic_read(&ui->configured));
+
+				if (maxpower < 0)
+					break;
+
+				ui->chg_current = maxpower;
+				otg_set_power(ui->xceiv, maxpower);
+				break;
+			}
+			if (flags & USB_FLAG_RESET) {
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: ONLINE -> RESET\n");
+				msm72k_pullup_internal(&ui->gadget, 0);
+				usb_reset(ui);
+				msm72k_pullup_internal(&ui->gadget, 1);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: RESET -> ONLINE\n");
+				break;
+			}
+			break;
+		case USB_STATE_OFFLINE:
+			/* If we were signaled to go online and vbus is still
+			 * present when we received the signal, go online.
+			 */
+			if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) {
+				int ret;
+
+				pm_runtime_get_noresume(&ui->pdev->dev);
+				pm_runtime_resume(&ui->pdev->dev);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: OFFLINE -> ONLINE\n");
+
+				usb_reset(ui);
+				ui->state = USB_STATE_ONLINE;
+				usb_do_work_check_vbus(ui);
+				ret = request_irq(otg->irq, usb_interrupt,
+							IRQF_SHARED,
+							ui->pdev->name, ui);
+				/* FIXME: should we call BUG_ON when
+				 * requst irq fails
+				 */
+				if (ret) {
+					dev_err(&ui->pdev->dev,
+						"hsusb: peripheral: request irq"
+						" failed:(%d)", ret);
+					break;
+				}
+				ui->irq = otg->irq;
+				enable_irq_wake(otg->irq);
+
+				if (!atomic_read(&ui->softconnect))
+					break;
+				msm72k_pullup_internal(&ui->gadget, 1);
+
+				if (!ui->gadget.is_a_peripheral)
+					schedule_delayed_work(
+							&ui->chg_det,
+							USB_CHG_DET_DELAY);
+			}
+			break;
+		}
+	}
+}
+
+/* FIXME - the callers of this function should use a gadget API instead.
+ * This is called from htc_battery.c and board-halibut.c
+ * WARNING - this can get called before this driver is initialized.
+ */
+void msm_hsusb_set_vbus_state(int online)
+{
+	unsigned long flags;
+	struct usb_info *ui = the_usb_info;
+
+	if (!ui) {
+		pr_err("%s called before driver initialized\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (is_usb_online(ui) ==  online)
+		goto out;
+
+	if (online) {
+		ui->usb_state = USB_STATE_POWERED;
+		ui->flags |= USB_FLAG_VBUS_ONLINE;
+	} else {
+		ui->gadget.speed = USB_SPEED_UNKNOWN;
+		ui->usb_state = USB_STATE_NOTATTACHED;
+		ui->flags |= USB_FLAG_VBUS_OFFLINE;
+	}
+	if (in_interrupt()) {
+		schedule_work(&ui->work);
+	} else {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_do_work(&ui->work);
+		return;
+	}
+out:
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+void usb_function_reenumerate(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	/* disable and re-enable the D+ pullup */
+	dev_dbg(&ui->pdev->dev, "disable pullup\n");
+	writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+	msleep(10);
+
+	dev_dbg(&ui->pdev->dev, "enable pullup\n");
+	writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+}
+
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	struct msm_endpoint *ept;
+	struct msm_request *req;
+	int n;
+	int i = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		   "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+		   readl(USB_ENDPTSETUPSTAT),
+		   readl(USB_ENDPTPRIME),
+		   readl(USB_ENDPTSTAT),
+		   readl(USB_ENDPTCOMPLETE));
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		   "regs:   cmd=%08x   sts=%08x intr=%08x port=%08x\n\n",
+		   readl(USB_USBCMD),
+		   readl(USB_USBSTS),
+		   readl(USB_USBINTR),
+		   readl(USB_PORTSC));
+
+
+	for (n = 0; n < 32; n++) {
+		ept = ui->ept + n;
+		if (ept->ep.maxpacket == 0)
+			continue;
+
+		i += scnprintf(buf + i, PAGE_SIZE - i,
+			"ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+			ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+			ept->head->config, ept->head->active,
+			ept->head->next, ept->head->info);
+
+		for (req = ept->req; req; req = req->next)
+			i += scnprintf(buf + i, PAGE_SIZE - i,
+			"  req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+				req->item_dma, req->item->next,
+				req->item->info, req->item->page0,
+				req->busy ? 'B' : ' ',
+				req->live ? 'L' : ' ');
+	}
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+			   "phy failure count: %d\n", ui->phy_fail_count);
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_RESET;
+	schedule_work(&ui->work);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	usb_function_reenumerate();
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+	.open = debug_open,
+	.read = debug_read_status,
+};
+
+const struct file_operations debug_reset_ops = {
+	.open = debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+	.open = debug_open,
+	.write = debug_write_cycle,
+};
+
+static ssize_t debug_read_release_wlocks(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	char kbuf[10];
+	size_t c = 0;
+
+	memset(kbuf, 0, 10);
+
+	c = scnprintf(kbuf, 10, "%d", release_wlocks);
+
+	if (copy_to_user(ubuf, kbuf, c))
+		return -EFAULT;
+
+	return c;
+}
+static ssize_t debug_write_release_wlocks(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	char kbuf[10];
+	long temp;
+
+	memset(kbuf, 0, 10);
+
+	if (copy_from_user(kbuf, buf, count > 10 ? 10 : count))
+		return -EFAULT;
+
+	if (strict_strtol(kbuf, 10, &temp))
+		return -EINVAL;
+
+	if (temp)
+		release_wlocks = 1;
+	else
+		release_wlocks = 0;
+
+	return count;
+}
+static int debug_wake_lock_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+const struct file_operations debug_wlocks_ops = {
+	.open = debug_wake_lock_open,
+	.read = debug_read_release_wlocks,
+	.write = debug_write_release_wlocks,
+};
+static void usb_debugfs_init(struct usb_info *ui)
+{
+	struct dentry *dent;
+	dent = debugfs_create_dir(dev_name(&ui->pdev->dev), 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops);
+	debugfs_create_file("reset", 0222, dent, ui, &debug_reset_ops);
+	debugfs_create_file("cycle", 0222, dent, ui, &debug_cycle_ops);
+	debugfs_create_file("release_wlocks", 0666, dent, ui,
+						&debug_wlocks_ops);
+}
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+#endif
+
+static int
+msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	unsigned char ep_type =
+			desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	_ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+	config_ept(ept);
+	ept->wedged = 0;
+	usb_ept_enable(ept, 1, ep_type);
+	return 0;
+}
+
+static int msm72k_disable(struct usb_ep *_ep)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+	usb_ept_enable(ept, 0, 0);
+	flush_endpoint(ept);
+	return 0;
+}
+
+static struct usb_request *
+msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags);
+}
+
+static void
+msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	struct usb_info *ui = ept->ui;
+
+	/* request should not be busy */
+	BUG_ON(req->busy);
+	if (req->alloced)
+		kfree(req->req.buf);
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+	kfree(req);
+}
+
+static int
+msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct msm_endpoint *ep = to_msm_endpoint(_ep);
+	struct usb_info *ui = ep->ui;
+
+	if (ep == &ui->ep0in) {
+		struct msm_request *r = to_msm_request(req);
+		if (!req->length)
+			goto ep_queue_done;
+		r->gadget_complete = req->complete;
+		/* ep0_queue_ack_complete queue a receive for ACK before
+		** calling req->complete
+		*/
+		req->complete = ep0_queue_ack_complete;
+		if (atomic_read(&ui->ep0_dir) == USB_DIR_OUT)
+			ep = &ui->ep0out;
+		goto ep_queue_done;
+	}
+
+ep_queue_done:
+	return usb_ept_queue_xfer(ep, req);
+}
+
+static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct msm_endpoint *ep = to_msm_endpoint(_ep);
+	struct msm_request *req = to_msm_request(_req);
+	struct usb_info *ui = ep->ui;
+
+	struct msm_request *temp_req;
+	unsigned long flags;
+
+	if (!(ui && req && ep->req))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (!req->busy) {
+		dev_dbg(&ui->pdev->dev, "%s: !req->busy\n", __func__);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return -EINVAL;
+	}
+	/* Stop the transfer */
+	do {
+		writel((1 << ep->bit), USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & (1 << ep->bit))
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & (1 << ep->bit));
+
+	req->req.status = 0;
+	req->busy = 0;
+
+	if (ep->req == req) {
+		ep->req = req->next;
+		ep->head->next = req->item->next;
+	} else {
+		req->prev->next = req->next;
+		if (req->next)
+			req->next->prev = req->prev;
+		req->prev->item->next = req->item->next;
+	}
+
+	if (!req->next)
+		ep->last = req->prev;
+
+	/* initialize request to default */
+	req->item->next = TERMINATE;
+	req->item->info = 0;
+	req->live = 0;
+	dma_unmap_single(NULL, req->dma, req->req.length,
+		(ep->flags & EPT_FLAG_IN) ?
+		DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	if (req->req.complete) {
+		req->req.status = -ECONNRESET;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		req->req.complete(&ep->ep, &req->req);
+		spin_lock_irqsave(&ui->lock, flags);
+	}
+
+	if (!req->live) {
+		/* Reprime the endpoint for the remaining transfers */
+		for (temp_req = ep->req ; temp_req ; temp_req = temp_req->next)
+			temp_req->live = 0;
+		if (ep->req)
+			usb_ept_start(ep);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+
+static int
+usb_ept_set_halt(struct usb_ep *_ep, int value)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	struct usb_info *ui = ept->ui;
+	unsigned int in = ept->flags & EPT_FLAG_IN;
+	unsigned int n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (value)
+			n |= CTRL_TXS;
+		else {
+			n &= ~CTRL_TXS;
+			n |= CTRL_TXR;
+		}
+	} else {
+		if (value)
+			n |= CTRL_RXS;
+		else {
+			n &= ~CTRL_RXS;
+			n |= CTRL_RXR;
+		}
+	}
+	writel(n, USB_ENDPTCTRL(ept->num));
+	if (!value)
+		ept->wedged = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+
+static int
+msm72k_set_halt(struct usb_ep *_ep, int value)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	unsigned int in = ept->flags & EPT_FLAG_IN;
+
+	if (value && in && ept->req)
+		return -EAGAIN;
+
+	usb_ept_set_halt(_ep, value);
+
+	return 0;
+}
+
+static int
+msm72k_fifo_status(struct usb_ep *_ep)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+msm72k_fifo_flush(struct usb_ep *_ep)
+{
+	flush_endpoint(to_msm_endpoint(_ep));
+}
+static int msm72k_set_wedge(struct usb_ep *_ep)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+	if (ept->num == 0)
+		return -EINVAL;
+
+	ept->wedged = 1;
+
+	return msm72k_set_halt(_ep, 1);
+}
+
+static const struct usb_ep_ops msm72k_ep_ops = {
+	.enable		= msm72k_enable,
+	.disable	= msm72k_disable,
+
+	.alloc_request	= msm72k_alloc_request,
+	.free_request	= msm72k_free_request,
+
+	.queue		= msm72k_queue,
+	.dequeue	= msm72k_dequeue,
+
+	.set_halt	= msm72k_set_halt,
+	.set_wedge	= msm72k_set_wedge,
+	.fifo_status	= msm72k_fifo_status,
+	.fifo_flush	= msm72k_fifo_flush,
+};
+
+static int msm72k_get_frame(struct usb_gadget *_gadget)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+
+	/* frame number is in bits 13:3 */
+	return (readl(USB_FRINDEX) >> 3) & 0x000007FF;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	if (is_active || atomic_read(&otg->chg_type)
+					 == USB_CHG_TYPE__WALLCHARGER)
+		wake_lock(&ui->wlock);
+
+	msm_hsusb_set_vbus_state(is_active);
+	return 0;
+}
+
+/* SW workarounds
+Issue #1	- USB Spoof Disconnect Failure
+Symptom	- Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround	- Making opmode non-driving and SuspendM set in function
+		register of SMSC phy
+*/
+/* drivers may have software control over D+ pullup */
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+	if (is_active) {
+		spin_lock_irqsave(&ui->lock, flags);
+		if (is_usb_online(ui) && ui->driver)
+			writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+		spin_unlock_irqrestore(&ui->lock, flags);
+	} else {
+		writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+		/* S/W workaround, Issue#1 */
+		otg_io_write(ui->xceiv, 0x48, 0x04);
+	}
+
+	/* Ensure pull-up operation is completed before returning */
+	mb();
+
+	return 0;
+}
+
+static int msm72k_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+
+	atomic_set(&ui->softconnect, is_active);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	msm72k_pullup_internal(_gadget, is_active);
+
+	if (is_active && !ui->gadget.is_a_peripheral)
+		schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY);
+
+	return 0;
+}
+
+static int msm72k_wakeup(struct usb_gadget *_gadget)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	if (!atomic_read(&ui->remote_wakeup)) {
+		dev_err(&ui->pdev->dev,
+			"%s: remote wakeup not supported\n", __func__);
+		return -ENOTSUPP;
+	}
+
+	if (!atomic_read(&ui->configured)) {
+		dev_err(&ui->pdev->dev,
+			"%s: device is not configured\n", __func__);
+		return -ENODEV;
+	}
+	otg_set_suspend(ui->xceiv, 0);
+
+	disable_irq(otg->irq);
+
+	if (!is_usb_active())
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+
+	/* Ensure that USB port is resumed before enabling the IRQ */
+	mb();
+
+	enable_irq(otg->irq);
+
+	return 0;
+}
+
+/* when Gadget is configured, it will indicate how much power
+ * can be pulled from vbus, as specified in configuiration descriptor
+ */
+static int msm72k_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->b_max_pow = mA;
+	ui->flags = USB_FLAG_CONFIGURED;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	schedule_work(&ui->work);
+
+	return 0;
+}
+
+static int msm72k_set_selfpowered(struct usb_gadget *_gadget, int set)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (set) {
+		if (ui->pdata && ui->pdata->self_powered)
+			atomic_set(&ui->self_powered, 1);
+		else
+			ret = -EOPNOTSUPP;
+	} else {
+		/* We can always work as a bus powered device */
+		atomic_set(&ui->self_powered, 0);
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return ret;
+
+}
+
+static const struct usb_gadget_ops msm72k_ops = {
+	.get_frame	= msm72k_get_frame,
+	.vbus_session	= msm72k_udc_vbus_session,
+	.vbus_draw	= msm72k_udc_vbus_draw,
+	.pullup		= msm72k_pullup,
+	.wakeup		= msm72k_wakeup,
+	.set_selfpowered = msm72k_set_selfpowered,
+};
+
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+
+	msm72k_wakeup(&ui->gadget);
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+
+	msm72k_wakeup(&ui->gadget);
+
+	return count;
+}
+
+static ssize_t show_usb_state(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	size_t i;
+	char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+			"USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+			"USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+			"USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+			"USB_STATE_SUSPENDED"
+	};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", state[msm_hsusb_get_state()]);
+	return i;
+}
+
+static ssize_t show_usb_speed(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t i;
+	char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+			"USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->gadget.speed]);
+	return i;
+}
+
+static ssize_t store_usb_chg_current(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long mA;
+
+	if (ui->gadget.is_a_peripheral)
+		return -EINVAL;
+
+	if (strict_strtoul(buf, 10, &mA))
+		return -EINVAL;
+
+	ui->chg_current = mA;
+	otg_set_power(ui->xceiv, mA);
+
+	return count;
+}
+
+static ssize_t show_usb_chg_current(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t count;
+
+	count = sprintf(buf, "%d", ui->chg_current);
+
+	return count;
+}
+
+static ssize_t show_usb_chg_type(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	size_t count;
+	char *chg_type[] = {"STD DOWNSTREAM PORT",
+			"CARKIT",
+			"DEDICATED CHARGER",
+			"INVALID"};
+
+	count = sprintf(buf, "%s",
+			chg_type[atomic_read(&otg->chg_type)]);
+
+	return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+static DEVICE_ATTR(usb_state, S_IRUSR, show_usb_state, 0);
+static DEVICE_ATTR(usb_speed, S_IRUSR, show_usb_speed, 0);
+static DEVICE_ATTR(chg_type, S_IRUSR, show_usb_chg_type, 0);
+static DEVICE_ATTR(chg_current, S_IWUSR | S_IRUSR,
+		show_usb_chg_current, store_usb_chg_current);
+
+#ifdef CONFIG_USB_OTG
+static ssize_t store_host_req(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long val, flags;
+
+	if (strict_strtoul(buf, 10, &val))
+		return -EINVAL;
+
+	dev_dbg(&ui->pdev->dev, "%s host request\n",
+			val ? "set" : "clear");
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->hnp_avail)
+		ui->gadget.host_request = !!val;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+static DEVICE_ATTR(host_request, S_IWUSR, NULL, store_host_req);
+
+/* How do we notify user space about HNP availability?
+ * As we are compliant to Rev 2.0, Host will not set a_hnp_support.
+ * Introduce hnp_avail flag and set when HNP polling request arrives.
+ * The expectation is that user space checks hnp availability before
+ * requesting host role via above sysfs node.
+ */
+static ssize_t show_host_avail(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	count = sprintf(buf, "%d\n", ui->hnp_avail);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+static DEVICE_ATTR(host_avail, S_IRUSR, show_host_avail, NULL);
+
+static struct attribute *otg_attrs[] = {
+	&dev_attr_host_request.attr,
+	&dev_attr_host_avail.attr,
+	NULL,
+};
+
+static struct attribute_group otg_attr_grp = {
+	.name  = "otg",
+	.attrs = otg_attrs,
+};
+#endif
+
+static int msm72k_probe(struct platform_device *pdev)
+{
+	struct usb_info *ui;
+	struct msm_otg *otg;
+	int retval;
+
+	dev_dbg(&pdev->dev, "msm72k_probe\n");
+	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+	if (!ui)
+		return -ENOMEM;
+
+	ui->pdev = pdev;
+	ui->pdata = pdev->dev.platform_data;
+
+	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+	if (!ui->buf)
+		return usb_free(ui, -ENOMEM);
+
+	ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0);
+	if (!ui->pool)
+		return usb_free(ui, -ENOMEM);
+
+	ui->xceiv = otg_get_transceiver();
+	if (!ui->xceiv)
+		return usb_free(ui, -ENODEV);
+
+	otg = to_msm_otg(ui->xceiv);
+	ui->addr = otg->regs;
+
+	ui->gadget.ops = &msm72k_ops;
+	ui->gadget.is_dualspeed = 1;
+	device_initialize(&ui->gadget.dev);
+	dev_set_name(&ui->gadget.dev, "gadget");
+	ui->gadget.dev.parent = &pdev->dev;
+	ui->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+#ifdef CONFIG_USB_OTG
+	ui->gadget.is_otg = 1;
+#endif
+
+	ui->sdev.name = DRIVER_NAME;
+	ui->sdev.print_name = print_switch_name;
+	ui->sdev.print_state = print_switch_state;
+
+	retval = switch_dev_register(&ui->sdev);
+	if (retval)
+		return usb_free(ui, retval);
+
+	the_usb_info = ui;
+
+	wake_lock_init(&ui->wlock,
+			WAKE_LOCK_SUSPEND, "usb_bus_active");
+
+	usb_debugfs_init(ui);
+
+	usb_prepare(ui);
+
+#ifdef CONFIG_USB_OTG
+	retval = sysfs_create_group(&pdev->dev.kobj, &otg_attr_grp);
+	if (retval) {
+		dev_err(&ui->pdev->dev,
+			"failed to create otg sysfs directory:"
+			"err:(%d)\n", retval);
+	}
+#endif
+
+	retval = otg_set_peripheral(ui->xceiv, &ui->gadget);
+	if (retval) {
+		dev_err(&ui->pdev->dev,
+			"%s: Cannot bind the transceiver, retval:(%d)\n",
+			__func__, retval);
+		switch_dev_unregister(&ui->sdev);
+		wake_lock_destroy(&ui->wlock);
+		return usb_free(ui, retval);
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	/* Setup phy stuck timer */
+	if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+		setup_timer(&phy_status_timer, usb_phy_status_check_timer, 0);
+	return 0;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+			    int (*bind)(struct usb_gadget *))
+{
+	struct usb_info *ui = the_usb_info;
+	int			retval, n;
+
+	if (!driver
+			|| driver->speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!ui)
+		return -ENODEV;
+	if (ui->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	ui->driver = driver;
+	ui->gadget.dev.driver = &driver->driver;
+	ui->gadget.name = driver_name;
+	INIT_LIST_HEAD(&ui->gadget.ep_list);
+	ui->gadget.ep0 = &ui->ep0in.ep;
+	INIT_LIST_HEAD(&ui->gadget.ep0->ep_list);
+	ui->gadget.speed = USB_SPEED_UNKNOWN;
+	atomic_set(&ui->softconnect, 1);
+
+	for (n = 1; n < 16; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+		list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+		ept->ep.maxpacket = 512;
+	}
+	for (n = 17; n < 32; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+		list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+		ept->ep.maxpacket = 512;
+	}
+
+	retval = device_add(&ui->gadget.dev);
+	if (retval)
+		goto fail;
+
+	retval = bind(&ui->gadget);
+	if (retval) {
+		dev_err(&ui->pdev->dev, "bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		device_del(&ui->gadget.dev);
+		goto fail;
+	}
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			"(wakeup) error: (%d)\n", retval);
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_state);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			" (usb_state) error: (%d)\n", retval);
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_speed);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			" (usb_speed) error: (%d)\n", retval);
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_type);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev,
+			"failed to create sysfs entry(chg_type): err:(%d)\n",
+					retval);
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_current);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev,
+			"failed to create sysfs entry(chg_current):"
+			"err:(%d)\n", retval);
+
+	dev_dbg(&ui->pdev->dev, "registered gadget driver '%s'\n",
+			driver->driver.name);
+	usb_start(ui);
+
+	return 0;
+
+fail:
+	ui->driver = NULL;
+	ui->gadget.dev.driver = NULL;
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct usb_info *dev = the_usb_info;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver || !driver->unbind)
+		return -EINVAL;
+
+	msm72k_pullup_internal(&dev->gadget, 0);
+	if (dev->irq) {
+		free_irq(dev->irq, dev);
+		dev->irq = 0;
+	}
+	dev->state = USB_STATE_IDLE;
+	atomic_set(&dev->configured, 0);
+	switch_set_state(&dev->sdev, 0);
+	/* cancel pending ep0 transactions */
+	flush_endpoint(&dev->ep0out);
+	flush_endpoint(&dev->ep0in);
+
+	device_remove_file(&dev->gadget.dev, &dev_attr_wakeup);
+	device_remove_file(&dev->gadget.dev, &dev_attr_usb_state);
+	device_remove_file(&dev->gadget.dev, &dev_attr_usb_speed);
+	device_remove_file(&dev->gadget.dev, &dev_attr_chg_type);
+	device_remove_file(&dev->gadget.dev, &dev_attr_chg_current);
+	driver->disconnect(&dev->gadget);
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	device_del(&dev->gadget.dev);
+
+	dev_dbg(&dev->pdev->dev,
+		"unregistered gadget driver '%s'\n", driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+static int msm72k_udc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int msm72k_udc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int msm72k_udc_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static struct dev_pm_ops msm72k_udc_dev_pm_ops = {
+	.runtime_suspend = msm72k_udc_runtime_suspend,
+	.runtime_resume = msm72k_udc_runtime_resume,
+	.runtime_idle = msm72k_udc_runtime_idle
+};
+
+static struct platform_driver usb_driver = {
+	.probe = msm72k_probe,
+	.driver = { .name = "msm_hsusb",
+		    .pm = &msm72k_udc_dev_pm_ops, },
+};
+
+static int __init init(void)
+{
+	return platform_driver_register(&usb_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	platform_driver_unregister(&usb_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Mike Lockwood, Brian Swetland");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/qcom_maemo.c b/drivers/usb/gadget/qcom_maemo.c
new file mode 100644
index 0000000..39686c4
--- /dev/null
+++ b/drivers/usb/gadget/qcom_maemo.c
@@ -0,0 +1,304 @@
+/*
+ * Qualcomm Maemo Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [git.kernel.org ] is subject to the
+ * notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+
+
+#define DRIVER_DESC		"Qcom Maemo Composite Gadget"
+#define VENDOR_ID		0x05c6
+#define PRODUCT_ID		0x902E
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#define USB_ETH
+
+#define USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
+#  include "f_rndis.c"
+#  include "rndis.c"
+#endif
+
+
+#include "u_serial.c"
+#include "f_serial.c"
+
+#include "u_ether.c"
+
+#undef DBG     /* u_ether.c has broken idea about macros */
+#undef VDBG    /* so clean up after it */
+#undef ERROR
+#undef INFO
+
+#include "f_mass_storage.c"
+#include "f_diag.c"
+#include "f_rmnet.c"
+
+/*-------------------------------------------------------------------------*/
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX         0
+#define STRING_PRODUCT_IDX              1
+#define STRING_SERIAL_IDX               2
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+	/* These dummy values should be overridden by platform data */
+	[STRING_MANUFACTURER_IDX].s = "Qualcomm Incorporated",
+	[STRING_PRODUCT_IDX].s = "Usb composition",
+	[STRING_SERIAL_IDX].s = "0123456789ABCDEF",
+	{  }                    /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language       = 0x0409,       /* en-us */
+	.strings        = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength              = sizeof(device_desc),
+	.bDescriptorType      = USB_DT_DEVICE,
+	.bcdUSB               = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass         = USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass      =      0,
+	.bDeviceProtocol      =      0,
+	.idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+	.idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+	.bcdDevice            = __constant_cpu_to_le16(0xffff),
+	.bNumConfigurations   = 1,
+};
+
+static u8 hostaddr[ETH_ALEN];
+static struct usb_diag_ch *diag_ch;
+static struct usb_diag_platform_data usb_diag_pdata = {
+	.ch_name = DIAG_LEGACY,
+};
+
+/****************************** Configurations ******************************/
+static struct fsg_module_parameters mod_data = {
+	.stall = 0
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static struct fsg_common *fsg_common;
+static int maemo_setup_config(struct usb_configuration *c,
+			const struct usb_ctrlrequest *ctrl);
+
+static int maemo_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	ret = rndis_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = diag_function_add(c);
+	if (ret < 0)
+		return ret;
+
+	ret = gser_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = gser_bind_config(c, 1);
+	if (ret < 0)
+		return ret;
+
+	ret = rmnet_function_add(c);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_add(c->cdev, c, fsg_common);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct usb_configuration maemo_config_driver = {
+	.label			= "Qcom Maemo Gadget",
+	.bind			= maemo_do_config,
+	.setup			= maemo_setup_config,
+	.bConfigurationValue	= 1,
+	.bMaxPower		= 0xFA,
+};
+static int maemo_setup_config(struct usb_configuration *c,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int i;
+	int ret = -EOPNOTSUPP;
+
+	for (i = 0; i < maemo_config_driver.next_interface_id; i++) {
+		if (maemo_config_driver.interface[i]->setup) {
+			ret = maemo_config_driver.interface[i]->setup(
+				maemo_config_driver.interface[i], ctrl);
+			if (ret >= 0)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int maemo_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int status, gcnum;
+
+	/* set up diag channel */
+	diag_ch = diag_setup(&usb_diag_pdata);
+	if (IS_ERR(diag_ch))
+		return PTR_ERR(diag_ch);
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		goto diag_clean;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 2);
+	if (status < 0)
+		goto fail0;
+
+	/* set up mass storage function */
+	fsg_common = fsg_common_from_params(0, cdev, &mod_data);
+	if (IS_ERR(fsg_common)) {
+		status = PTR_ERR(fsg_common);
+		goto fail1;
+	}
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* gadget zero is so simple (for now, no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so just warn about unrcognized controllers -- don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		WARNING(cdev, "controller '%s' not recognized\n",
+			gadget->name);
+		device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+	}
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	*/
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	if (!usb_gadget_set_selfpowered(gadget))
+		maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_SELFPOWER;
+
+	if (gadget->ops->wakeup)
+		maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+	/* register our first configuration */
+	status = usb_add_config(cdev, &maemo_config_driver);
+	if (status < 0)
+		goto fail2;
+
+	usb_gadget_set_selfpowered(gadget);
+	dev_info(&gadget->dev, DRIVER_DESC "\n");
+	fsg_common_put(fsg_common);
+	return 0;
+
+fail2:
+	fsg_common_put(fsg_common);
+fail1:
+	gserial_cleanup();
+fail0:
+	gether_cleanup();
+diag_clean:
+	diag_cleanup(diag_ch);
+
+	return status;
+}
+
+static int __exit maemo_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+	gether_cleanup();
+	diag_cleanup(diag_ch);
+	return 0;
+}
+
+static struct usb_composite_driver qcom_maemo_driver = {
+	.name		= "Qcom Maemo Gadget",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.bind		= maemo_bind,
+	.unbind		= __exit_p(maemo_unbind),
+};
+
+static int __init qcom_maemo_usb_init(void)
+{
+	return usb_composite_register(&qcom_maemo_driver);
+}
+module_init(qcom_maemo_usb_init);
+
+static void __exit qcom_maemo_usb_cleanup(void)
+{
+	usb_composite_unregister(&qcom_maemo_driver);
+}
+module_exit(qcom_maemo_usb_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index a872248..c3ccb72 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -262,8 +262,13 @@
 #define EP0_BUFSIZE	256
 #define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
 
-/* Number of buffers we will use.  2 is enough for double-buffering */
-#define FSG_NUM_BUFFERS	2
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define FSG_NUM_BUFFERS    4
+#else
+#define FSG_NUM_BUFFERS    2
+#endif
+
 
 /* Default size of buffer length. */
 #define FSG_BUFLEN	((u32)16384)
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
new file mode 100644
index 0000000..a047cfc
--- /dev/null
+++ b/drivers/usb/gadget/u_bam.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/netdevice.h>
+#include <mach/bam_dmux.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS	1
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static unsigned bam_ch_ids[] = { 8 };
+
+#define TX_PKT_DROP_THRESHOLD			1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD		1000
+#define RX_PKT_FLOW_CTRL_DISABLE		500
+#define RX_PKT_FLOW_CTRL_SUPPORT		1
+
+#define BAM_MUX_HDR				8
+
+#define RX_Q_SIZE				16
+#define TX_Q_SIZE				200
+#define RX_REQ_SIZE				(2048 - BAM_MUX_HDR)
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_support = RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int tx_q_size = TX_Q_SIZE;
+module_param(tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_q_size = RX_Q_SIZE;
+module_param(rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_req_size = RX_REQ_SIZE;
+module_param(rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+struct bam_ch_info {
+	atomic_t		opened;
+	unsigned		id;
+
+	struct list_head        tx_idle;
+	struct sk_buff_head	tx_skb_q;
+
+	struct list_head        rx_idle;
+	struct sk_buff_head	rx_skb_q;
+
+	struct gbam_port	*port;
+	struct work_struct	write_tobam_w;
+
+	/* stats */
+	unsigned int		pending_with_bam;
+	unsigned int		tohost_drp_cnt;
+	unsigned int		tomodem_drp_cnt;
+	unsigned int		tx_len;
+	unsigned int		rx_len;
+	unsigned long		to_modem;
+	unsigned long		to_host;
+};
+
+struct gbam_port {
+	unsigned		port_num;
+	spinlock_t		port_lock;
+
+	struct grmnet		*port_usb;
+
+	struct bam_ch_info	data_ch;
+
+	struct work_struct	connect_w;
+};
+
+static struct bam_portmaster {
+	struct gbam_port *port;
+} bam_ports[N_PORTS];
+
+static void gbam_start_rx(struct gbam_port *port);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num,
+		void (*cb)(struct usb_ep *ep, struct usb_request *),
+		gfp_t flags)
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
+			ep, head, num, cb);
+
+	for (i = 0; i < num; i++) {
+		req = usb_ep_alloc_request(ep, flags);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+	unsigned long			flags;
+	struct bam_ch_info		*d = &port->data_ch;
+	struct sk_buff			*skb;
+	int				ret;
+	struct usb_request		*req;
+	struct usb_ep			*ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	ep = port->port_usb->in;
+
+	while (!list_empty(&d->tx_idle)) {
+		skb = __skb_dequeue(&d->tx_skb_q);
+		if (!skb) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			return;
+		}
+		req = list_first_entry(&d->tx_idle,
+				struct usb_request,
+				list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+
+		spin_unlock(&port->port_lock);
+		ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb epIn failed\n", __func__);
+			list_add(&req->list, &d->tx_idle);
+			dev_kfree_skb_any(skb);
+			break;
+		}
+		d->to_host++;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+	struct gbam_port	*port = p;
+	struct bam_ch_info	*d = &port->data_ch;
+	unsigned long		flags;
+
+	if (!skb)
+		return;
+
+	pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
+			port, port->port_num, d, skb->len);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (d->tx_skb_q.qlen > tx_pkt_drop_thld) {
+		d->tohost_drp_cnt++;
+		if (printk_ratelimit())
+			pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+					__func__, d->tohost_drp_cnt);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	__skb_queue_tail(&d->tx_skb_q, skb);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+	struct gbam_port	*port = p;
+	struct bam_ch_info	*d = &port->data_ch;
+	unsigned long		flags;
+
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	d->pending_with_bam--;
+
+	pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
+			port, d, d->to_modem,
+			d->pending_with_bam, port->port_num);
+
+	if (rx_fctrl_support &&
+			d->pending_with_bam >= rx_fctrl_dis_thld) {
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gbam_start_rx(port);
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	int			ret;
+
+	d = container_of(w, struct bam_ch_info, write_tobam_w);
+	port = d->port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	while ((skb = __skb_dequeue(&d->rx_skb_q))) {
+		d->pending_with_bam++;
+		d->to_modem++;
+
+		pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
+				port, d, d->to_modem, d->pending_with_bam,
+				port->port_num);
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = msm_bam_dmux_write(d->id, skb);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret) {
+			pr_debug("%s: write error:%d\n", __func__, ret);
+			d->pending_with_bam--;
+			d->to_modem--;
+			d->tomodem_drp_cnt++;
+			dev_kfree_skb_any(skb);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gbam_port	*port = ep->driver_data;
+	struct bam_ch_info	*d;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		pr_err("%s: data tx ep error %d\n",
+				__func__, status);
+		break;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	if (!port)
+		return;
+
+	spin_lock(&port->port_lock);
+	d = &port->data_ch;
+	list_add_tail(&req->list, &d->tx_idle);
+	spin_unlock(&port->port_lock);
+
+	gbam_write_data_tohost(port);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gbam_port	*port = ep->driver_data;
+	struct bam_ch_info	*d = &port->data_ch;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+	int			queue = 0;
+
+	switch (status) {
+	case 0:
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* cable disconnection */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		usb_ep_free_request(ep, req);
+		return;
+	default:
+		if (printk_ratelimit())
+			pr_err("%s: %s response error %d, %d/%d\n",
+				__func__, ep->name, status,
+				req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&port->port_lock);
+	if (queue) {
+		__skb_queue_tail(&d->rx_skb_q, skb);
+		queue_work(gbam_wq, &d->write_tobam_w);
+	}
+
+	/* TODO: Handle flow control gracefully by having
+	 * having call back mechanism from bam driver
+	 */
+	if (rx_fctrl_support &&
+		d->pending_with_bam >= rx_fctrl_en_thld) {
+
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+		return;
+	}
+	spin_unlock(&port->port_lock);
+
+	skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+	if (!skb) {
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+		return;
+	}
+	skb_reserve(skb, BAM_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = rx_req_size;
+	req->context = skb;
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status) {
+		dev_kfree_skb_any(skb);
+
+		if (printk_ratelimit())
+			pr_err("%s: data rx enqueue err %d\n",
+					__func__, status);
+
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+	}
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+	struct usb_request		*req;
+	struct bam_ch_info		*d;
+	struct usb_ep			*ep;
+	unsigned long			flags;
+	int				ret;
+	struct sk_buff			*skb;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	d = &port->data_ch;
+	ep = port->port_usb->out;
+
+	while (port->port_usb && !list_empty(&d->rx_idle)) {
+		req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+		skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+		if (!skb)
+			break;
+		skb_reserve(skb, BAM_MUX_HDR);
+
+		list_del(&req->list);
+		req->buf = skb->data;
+		req->length = rx_req_size;
+		req->context = skb;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret) {
+			dev_kfree_skb_any(skb);
+
+			if (printk_ratelimit())
+				pr_err("%s: rx queue failed\n", __func__);
+
+			if (port->port_usb)
+				list_add(&req->list, &d->rx_idle);
+			else
+				usb_ep_free_request(ep, req);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+	unsigned long		flags;
+	struct usb_ep		*ep;
+	int			ret;
+	struct bam_ch_info	*d;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	d = &port->data_ch;
+	ep = port->port_usb->out;
+	ret = gbam_alloc_requests(ep, &d->rx_idle, rx_q_size,
+			gbam_epout_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: rx req allocation failed\n", __func__);
+		return;
+	}
+
+	ep = port->port_usb->in;
+	ret = gbam_alloc_requests(ep, &d->tx_idle, tx_q_size,
+			gbam_epin_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: tx req allocation failed\n", __func__);
+		gbam_free_requests(ep, &d->rx_idle);
+		return;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* queue out requests */
+	gbam_start_rx(port);
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+	struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+	struct bam_ch_info *d = &port->data_ch;
+	int ret;
+
+	ret = msm_bam_dmux_open(d->id, port,
+				gbam_data_recv_cb,
+				gbam_data_write_done);
+	if (ret) {
+		pr_err("%s: unable open bam ch:%d err:%d\n",
+				__func__, d->id, ret);
+		return;
+	}
+	atomic_set(&d->opened, 1);
+
+	gbam_start_io(port);
+
+	pr_debug("%s: done\n", __func__);
+}
+
+static void gbam_port_free(int portno)
+{
+	struct gbam_port *port = bam_ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+
+	port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+
+	/* port initialization */
+	spin_lock_init(&port->port_lock);
+	INIT_WORK(&port->connect_w, gbam_connect_work);
+
+	/* data ch */
+	d = &port->data_ch;
+	d->port = port;
+	INIT_LIST_HEAD(&d->tx_idle);
+	INIT_LIST_HEAD(&d->rx_idle);
+	INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+	skb_queue_head_init(&d->tx_skb_q);
+	skb_queue_head_init(&d->rx_skb_q);
+	d->id = bam_ch_ids[portno];
+
+	bam_ports[portno].port = port;
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_bam_ports; i++) {
+		port = bam_ports[i].port;
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		d = &port->data_ch;
+
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"#PORT:%d port:%p data_ch:%p#\n"
+				"dpkts_to_usbhost: %lu\n"
+				"dpkts_to_modem:  %lu\n"
+				"dpkts_pwith_bam: %u\n"
+				"to_usbhost_dcnt:  %u\n"
+				"tomodem__dcnt:  %u\n"
+				"tx_buf_len:	 %u\n"
+				"data_ch_opened: %d\n",
+				i, port, &port->data_ch,
+				d->to_host, d->to_modem,
+				d->pending_with_bam,
+				d->tohost_drp_cnt, d->tomodem_drp_cnt,
+				d->tx_skb_q.qlen, atomic_read(&d->opened));
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < n_bam_ports; i++) {
+		port = bam_ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		d = &port->data_ch;
+
+		d->to_host = 0;
+		d->to_modem = 0;
+		d->pending_with_bam = 0;
+		d->tohost_drp_cnt = 0;
+		d->tomodem_drp_cnt = 0;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+	.read = gbam_read_stats,
+	.write = gbam_reset_stats,
+};
+
+static void gbam_debugfs_init(void)
+{
+	struct dentry *dent;
+	struct dentry *dfile;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	/* TODO: Implement cleanup function to remove created file */
+	dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
+	if (!dfile || IS_ERR(dfile))
+		debugfs_remove(dent);
+}
+#else
+static void gam_debugfs_init(void) { }
+#endif
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	struct bam_ch_info	*d;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port || !port->port_usb)
+		goto free_buf_out;
+
+	d = &port->data_ch;
+
+	gbam_free_requests(port->port_usb->in, &d->tx_idle);
+	gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+	while ((skb = __skb_dequeue(&d->tx_skb_q)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&d->rx_skb_q)))
+		dev_kfree_skb_any(skb);
+
+free_buf_out:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num)
+{
+	struct gbam_port	*port;
+	unsigned long		flags;
+	struct bam_ch_info	*d;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_bam_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return;
+	}
+
+	port = bam_ports[port_num].port;
+	d = &port->data_ch;
+
+	gbam_free_buffers(port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints */
+	usb_ep_disable(gr->out);
+	usb_ep_disable(gr->in);
+
+	if (atomic_read(&d->opened))
+		msm_bam_dmux_close(d->id);
+
+	atomic_set(&d->opened, 0);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	int			ret;
+	unsigned long		flags;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_bam_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = bam_ports[port_num].port;
+	d = &port->data_ch;
+
+	ret = usb_ep_enable(gr->in, gr->in_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+				__func__, gr->in);
+		return ret;
+	}
+	gr->in->driver_data = port;
+
+	ret = usb_ep_enable(gr->out, gr->out_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+				__func__, gr->out);
+		gr->in->driver_data = 0;
+		return ret;
+	}
+	gr->out->driver_data = port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gr;
+
+	d->to_host = 0;
+	d->to_modem = 0;
+	d->pending_with_bam = 0;
+	d->tohost_drp_cnt = 0;
+	d->tomodem_drp_cnt = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+	queue_work(gbam_wq, &port->connect_w);
+
+	return 0;
+}
+
+int gbam_setup(unsigned int count)
+{
+	int	i;
+	int	ret;
+
+	pr_debug("%s: requested ports:%d\n", __func__, count);
+
+	if (!count || count > BAM_N_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, count);
+		return -EINVAL;
+	}
+
+	gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!gbam_wq) {
+		pr_err("%s: Unable to create workqueue gbam_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		ret = gbam_port_alloc(i);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_bam_ports;
+		}
+		n_bam_ports++;
+	}
+
+	gbam_debugfs_init();
+
+	return 0;
+free_bam_ports:
+	for (i = 0; i < n_bam_ports; i++)
+		gbam_port_free(i);
+
+	destroy_workqueue(gbam_wq);
+
+	return ret;
+}
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
new file mode 100644
index 0000000..aeaddee
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+	void			*buf;
+	int			len;
+	struct list_head	list;
+};
+
+struct grmnet {
+	struct usb_function		func;
+
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+	struct usb_endpoint_descriptor	*in_desc;
+	struct usb_endpoint_descriptor	*out_desc;
+
+	/* to usb host, aka laptop, windows pc etc. Will
+	 * be filled by usb driver of rmnet functionality
+	 */
+	int (*send_cpkt_response)(struct grmnet *g,
+				struct rmnet_ctrl_pkt *pkt);
+
+	/* to modem, and to be filled by driver implementing
+	 * control function
+	 */
+	int (*send_cpkt_request)(struct grmnet *g,
+				u8 port_num,
+				struct rmnet_ctrl_pkt *pkt);
+
+	void (*send_cbits_tomodem)(struct grmnet *g,
+				u8 port_num,
+				int cbits);
+};
+
+int gbam_setup(unsigned int count);
+int gbam_connect(struct grmnet *, u8 port_num);
+void gbam_disconnect(struct grmnet *, u8 port_num);
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
+int gsmd_ctrl_setup(unsigned int count);
+
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
new file mode 100644
index 0000000..4449d9e
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define NR_PORTS	1
+static int n_ports;
+static char *rmnet_ctrl_names[] = { "DATA40_CNTL" };
+static struct workqueue_struct *grmnet_ctrl_wq;
+
+#define SMD_CH_MAX_LEN	20
+#define CH_OPENED	0
+#define CH_READY	1
+struct smd_ch_info {
+	struct smd_channel	*ch;
+	char			*name;
+	unsigned long		flags;
+	wait_queue_head_t	wait;
+	unsigned		dtr;
+
+	struct list_head	tx_q;
+	unsigned long		tx_len;
+
+	struct work_struct	read_w;
+	struct work_struct	write_w;
+
+	struct rmnet_ctrl_port	*port;
+
+	int			cbits_tomodem;
+	/* stats */
+	unsigned long		to_modem;
+	unsigned long		to_host;
+};
+
+struct rmnet_ctrl_port {
+	struct smd_ch_info	ctrl_ch;
+	unsigned int		port_num;
+	struct grmnet		*port_usb;
+
+	spinlock_t		port_lock;
+	struct work_struct	connect_w;
+};
+
+static struct rmnet_ctrl_ports {
+	struct rmnet_ctrl_port *port;
+	struct platform_driver pdrv;
+} ports[NR_PORTS];
+
+
+/*---------------misc functions---------------- */
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void rmnet_ctrl_pkt_free(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+/*--------------------------------------------- */
+
+/*---------------control/smd channel functions---------------- */
+
+static void grmnet_ctrl_smd_read_w(struct work_struct *w)
+{
+	struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w);
+	struct rmnet_ctrl_port *port = c->port;
+	int sz;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(c->ch);
+		if (sz == 0)
+			break;
+
+		if (smd_read_avail(c->ch) < sz)
+			break;
+
+		cpkt = rmnet_alloc_ctrl_pkt(sz, GFP_KERNEL);
+		if (IS_ERR(cpkt)) {
+			pr_err("%s: unable to allocate rmnet control pkt\n",
+					__func__);
+			return;
+		}
+		cpkt->len = smd_read(c->ch, cpkt->buf, sz);
+
+		/* send it to USB here */
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (port->port_usb && port->port_usb->send_cpkt_response) {
+			port->port_usb->send_cpkt_response(
+							port->port_usb,
+							cpkt);
+			c->to_host++;
+		}
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+}
+
+static void grmnet_ctrl_smd_write_w(struct work_struct *w)
+{
+	struct smd_ch_info *c = container_of(w, struct smd_ch_info, write_w);
+	struct rmnet_ctrl_port *port = c->port;
+	unsigned long flags;
+	struct rmnet_ctrl_pkt *cpkt;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	while (1) {
+		if (list_empty(&c->tx_q))
+			break;
+
+		cpkt = list_first_entry(&c->tx_q, struct rmnet_ctrl_pkt, list);
+
+		if (smd_write_avail(c->ch) < cpkt->len)
+			break;
+
+		list_del(&cpkt->list);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = smd_write(c->ch, cpkt->buf, cpkt->len);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret != cpkt->len) {
+			pr_err("%s: smd_write failed err:%d\n",
+					__func__, ret);
+			rmnet_ctrl_pkt_free(cpkt);
+			break;
+		}
+		rmnet_ctrl_pkt_free(cpkt);
+		c->to_modem++;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int
+grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno,
+			struct rmnet_ctrl_pkt *cpkt)
+{
+	unsigned long		flags;
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+
+	if (portno >= n_ports) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	c = &port->ctrl_ch;
+
+	/* drop cpkt if ch is not open */
+	if (!test_bit(CH_OPENED, &c->flags)) {
+		rmnet_ctrl_pkt_free(cpkt);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return 0;
+	}
+
+	list_add_tail(&cpkt->list, &c->tx_q);
+	queue_work(grmnet_ctrl_wq, &c->write_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return 0;
+}
+
+#define ACM_CTRL_DTR		0x01
+static void
+gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			set_bits = 0;
+	int			clear_bits = 0;
+	int			temp = 0;
+
+	if (portno >= n_ports) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet is null\n", __func__);
+		return;
+	}
+
+	port = ports[portno].port;
+	cbits = cbits & ACM_CTRL_DTR;
+	c = &port->ctrl_ch;
+
+	/* host driver will only send DTR, but to have generic
+	 * set and clear bit implementation using two separate
+	 * checks
+	 */
+	if (cbits & ACM_CTRL_DTR)
+		set_bits |= TIOCM_DTR;
+	else
+		clear_bits |= TIOCM_DTR;
+
+	temp |= set_bits;
+	temp &= ~clear_bits;
+
+	if (temp == c->cbits_tomodem)
+		return;
+
+	c->cbits_tomodem = temp;
+
+	if (!test_bit(CH_OPENED, &c->flags))
+		return;
+
+	pr_debug("%s: ctrl_tomodem:%d ctrl_bits:%d setbits:%d clearbits:%d\n",
+			__func__, temp, cbits, set_bits, clear_bits);
+
+	smd_tiocmset(c->ch, set_bits, clear_bits);
+}
+
+static char *get_smd_event(unsigned event)
+{
+	switch (event) {
+	case SMD_EVENT_DATA:
+		return "DATA";
+	case SMD_EVENT_OPEN:
+		return "OPEN";
+	case SMD_EVENT_CLOSE:
+		return "CLOSE";
+	}
+
+	return "UNDEFINED";
+}
+
+static void grmnet_ctrl_smd_notify(void *p, unsigned event)
+{
+	struct rmnet_ctrl_port	*port = p;
+	struct smd_ch_info	*c = &port->ctrl_ch;
+
+	pr_debug("%s: EVENT_(%s)\n", __func__, get_smd_event(event));
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		if (smd_read_avail(c->ch))
+			queue_work(grmnet_ctrl_wq, &c->read_w);
+		if (smd_write_avail(c->ch))
+			queue_work(grmnet_ctrl_wq, &c->write_w);
+		break;
+	case SMD_EVENT_OPEN:
+		set_bit(CH_OPENED, &c->flags);
+		wake_up(&c->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		clear_bit(CH_OPENED, &c->flags);
+		break;
+	}
+}
+/*------------------------------------------------------------ */
+
+static void grmnet_ctrl_smd_connect_w(struct work_struct *w)
+{
+	struct rmnet_ctrl_port *port =
+			container_of(w, struct rmnet_ctrl_port, connect_w);
+	struct smd_ch_info *c = &port->ctrl_ch;
+	unsigned long flags;
+	int ret;
+
+	pr_debug("%s:\n", __func__);
+
+	if (!test_bit(CH_READY, &c->flags))
+		return;
+
+	ret = smd_open(c->name, &c->ch, port, grmnet_ctrl_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open smd ch:%s err:%d\n",
+				__func__, c->name, ret);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	unsigned long		flags;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = ports[port_num].port;
+	c = &port->ctrl_ch;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gr;
+	gr->send_cpkt_request = grmnet_ctrl_smd_send_cpkt_tomodem;
+	gr->send_cbits_tomodem = gsmd_ctrl_send_cbits_tomodem;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	queue_work(grmnet_ctrl_wq, &port->connect_w);
+
+	return 0;
+}
+
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num)
+{
+	struct rmnet_ctrl_port	*port;
+	unsigned long		flags;
+	struct smd_ch_info	*c;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return;
+	}
+
+	port = ports[port_num].port;
+	c = &port->ctrl_ch;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	gr->send_cpkt_request = 0;
+	gr->send_cbits_tomodem = 0;
+	c->cbits_tomodem = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (test_bit(CH_OPENED, &c->flags)) {
+		/* this should send the dtr zero */
+		smd_close(c->ch);
+		clear_bit(CH_OPENED, &c->flags);
+	}
+}
+
+#define SMD_CH_MAX_LEN	20
+static int grmnet_ctrl_smd_ch_probe(struct platform_device *pdev)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+	unsigned long		flags;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		c = &port->ctrl_ch;
+
+		if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+			set_bit(CH_READY, &c->flags);
+
+			/* if usb is online, try opening smd_ch */
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->port_usb)
+				queue_work(grmnet_ctrl_wq, &port->connect_w);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int grmnet_ctrl_smd_ch_remove(struct platform_device *pdev)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		c = &port->ctrl_ch;
+
+		if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+			clear_bit(CH_READY, &c->flags);
+			clear_bit(CH_OPENED, &c->flags);
+			smd_close(c->ch);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+
+static void grmnet_ctrl_smd_port_free(int portno)
+{
+	struct rmnet_ctrl_port	*port = ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int grmnet_ctrl_smd_port_alloc(int portno)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	struct platform_driver	*pdrv;
+
+	port = kzalloc(sizeof(struct rmnet_ctrl_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+
+	spin_lock_init(&port->port_lock);
+	INIT_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w);
+
+	c = &port->ctrl_ch;
+	c->name = rmnet_ctrl_names[portno];
+	c->port = port;
+	init_waitqueue_head(&c->wait);
+	INIT_LIST_HEAD(&c->tx_q);
+	INIT_WORK(&c->read_w, grmnet_ctrl_smd_read_w);
+	INIT_WORK(&c->write_w, grmnet_ctrl_smd_write_w);
+
+	ports[portno].port = port;
+
+	pdrv = &ports[portno].pdrv;
+	pdrv->probe = grmnet_ctrl_smd_ch_probe;
+	pdrv->remove = grmnet_ctrl_smd_ch_remove;
+	pdrv->driver.name = c->name;
+	pdrv->driver.owner = THIS_MODULE;
+
+	platform_driver_register(pdrv);
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+int gsmd_ctrl_setup(unsigned int count)
+{
+	int	i;
+	int	ret;
+
+	pr_debug("%s: requested ports:%d\n", __func__, count);
+
+	if (!count || count > NR_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, count);
+		return -EINVAL;
+	}
+
+	grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
+				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!grmnet_ctrl_wq) {
+		pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		ret = grmnet_ctrl_smd_port_alloc(i);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_ports;
+		}
+		n_ports++;
+	}
+
+	return 0;
+
+free_ports:
+	for (i = 0; i < n_ports; i++)
+		grmnet_ctrl_smd_port_free(i);
+
+	destroy_workqueue(grmnet_ctrl_wq);
+
+	return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t gsmd_ctrl_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		c = &port->ctrl_ch;
+
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"#PORT:%d port:%p ctrl_ch:%p#\n"
+				"to_usbhost: %lu\n"
+				"to_modem:   %lu\n"
+				"DTR:        %s\n"
+				"ch_open:    %d\n"
+				"ch_ready:   %d\n"
+				"read_avail: %d\n"
+				"write_avail:%d\n",
+				i, port, &port->ctrl_ch,
+				c->to_host, c->to_modem,
+				c->cbits_tomodem ? "HIGH" : "LOW",
+				test_bit(CH_OPENED, &c->flags),
+				test_bit(CH_READY, &c->flags),
+				smd_read_avail(c->ch),
+				smd_write_avail(c->ch));
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gsmd_ctrl_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		c = &port->ctrl_ch;
+
+		c->to_host = 0;
+		c->to_modem = 0;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations gsmd_ctrl_stats_ops = {
+	.read = gsmd_ctrl_read_stats,
+	.write = gsmd_ctrl_reset_stats,
+};
+
+struct dentry *smd_ctrl_dent;
+struct dentry *smd_ctrl_dfile;
+static void gsmd_ctrl_debugfs_init(void)
+{
+	smd_ctrl_dent = debugfs_create_dir("usb_rmnet_ctrl_smd", 0);
+	if (IS_ERR(smd_ctrl_dent))
+		return;
+
+	smd_ctrl_dfile = debugfs_create_file("status", 0444, smd_ctrl_dent, 0,
+			&gsmd_ctrl_stats_ops);
+	if (!smd_ctrl_dfile || IS_ERR(smd_ctrl_dfile))
+		debugfs_remove(smd_ctrl_dent);
+}
+
+static void gsmd_ctrl_debugfs_exit(void)
+{
+	debugfs_remove(smd_ctrl_dfile);
+	debugfs_remove(smd_ctrl_dent);
+}
+
+#else
+static void gsmd_ctrl_debugfs_init(void) { }
+static void gsmd_ctrl_debugfs_exit(void) { }
+#endif
+
+static int __init gsmd_ctrl_init(void)
+{
+	gsmd_ctrl_debugfs_init();
+
+	return 0;
+}
+module_init(gsmd_ctrl_init);
+
+static void __exit gsmd_ctrl_exit(void)
+{
+	gsmd_ctrl_debugfs_exit();
+}
+module_exit(gsmd_ctrl_exit);
+MODULE_DESCRIPTION("smd control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
new file mode 100644
index 0000000..09d898f
--- /dev/null
+++ b/drivers/usb/gadget/u_sdio.c
@@ -0,0 +1,1097 @@
+/*
+ * u_sdio.c - utilities for USB gadget serial over sdio
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [kernel.org] is subject to the notice below.
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_al.h>
+#include <mach/sdio_cmux.h>
+#include "u_serial.h"
+
+#define SDIO_RX_QUEUE_SIZE		8
+#define SDIO_RX_BUF_SIZE		2048
+
+#define SDIO_TX_QUEUE_SIZE		8
+#define SDIO_TX_BUF_SIZE		2048
+
+/* 1 - DUN, 2-NMEA/GPS */
+#define SDIO_N_PORTS	2
+static struct sdio_portmaster {
+	struct mutex lock;
+	struct gsdio_port *port;
+	struct platform_driver gsdio_ch;
+} sdio_ports[SDIO_N_PORTS];
+static unsigned n_sdio_ports;
+
+struct sdio_port_info {
+	/* data channel info */
+	char *data_ch_name;
+	struct sdio_channel *ch;
+
+	/* control channel info */
+	int ctrl_ch_id;
+};
+
+struct sdio_port_info sport_info[SDIO_N_PORTS] = {
+	{
+		.data_ch_name = "SDIO_DUN",
+		.ctrl_ch_id = 9,
+	},
+	{
+		.data_ch_name = "SDIO_NMEA",
+		.ctrl_ch_id = 10,
+	},
+};
+
+static struct workqueue_struct *gsdio_wq;
+
+struct gsdio_port {
+	unsigned			port_num;
+	spinlock_t			port_lock;
+
+	unsigned			n_read;
+	struct list_head		read_pool;
+	struct list_head		read_queue;
+	struct work_struct		push;
+	unsigned long			rp_len;
+	unsigned long			rq_len;
+
+	struct list_head		write_pool;
+	struct work_struct		pull;
+	unsigned long			wp_len;
+
+	struct work_struct		notify_modem;
+
+	struct gserial			*port_usb;
+	struct usb_cdc_line_coding	line_coding;
+
+	int				sdio_open;
+	int				ctrl_ch_err;
+	struct sdio_port_info		*sport_info;
+	struct delayed_work		sdio_open_work;
+
+#define SDIO_ACM_CTRL_RI		(1 << 3)
+#define SDIO_ACM_CTRL_DSR		(1 << 1)
+#define SDIO_ACM_CTRL_DCD		(1 << 0)
+	int				cbits_to_laptop;
+
+#define SDIO_ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define SDIO_ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+	int				cbits_to_modem;
+
+	/* pkt logging */
+	unsigned long			nbytes_tolaptop;
+	unsigned long			nbytes_tomodem;
+};
+
+void gsdio_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+struct usb_request *
+gsdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req) {
+		pr_err("%s: usb alloc request failed\n", __func__);
+		return NULL;
+	}
+
+	req->length = len;
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		pr_err("%s: request buf allocation failed\n", __func__);
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+void gsdio_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gsdio_free_req(ep, req);
+	}
+}
+
+int gsdio_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num, int size,
+		void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+			ep, head, num, size, cb);
+
+	for (i = 0; i < num; i++) {
+		req = gsdio_alloc_req(ep, size, GFP_ATOMIC);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+
+void gsdio_start_rx(struct gsdio_port *port)
+{
+	struct list_head	*pool;
+	struct usb_ep		*out;
+	int ret;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb is disconnected\n", __func__);
+		goto start_rx_end;
+	}
+
+	pool = &port->read_pool;
+	out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = SDIO_RX_BUF_SIZE;
+		port->rp_len--;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(out, req, GFP_ATOMIC);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d\n",
+					__func__, port, port->port_num);
+			list_add_tail(&req->list, pool);
+			port->rp_len++;
+			break;
+		}
+
+		/* usb could have disconnected while we released spin lock */
+		if (!port->port_usb) {
+			pr_debug("%s: usb is disconnected\n", __func__);
+			goto start_rx_end;
+		}
+	}
+
+start_rx_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_write(struct gsdio_port *port, struct usb_request *req)
+{
+	unsigned	avail;
+	char		*packet = req->buf;
+	unsigned	size = req->actual;
+	unsigned	n;
+	int		ret = 0;
+
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!req) {
+		pr_err("%s: usb request is null port#%d\n",
+				__func__, port->port_num);
+		return -ENODEV;
+	}
+
+	pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n",
+			__func__, port, port->port_num, req,
+			req->actual, port->n_read);
+
+	if (!port->sdio_open) {
+		pr_debug("%s: SDIO IO is not supported\n", __func__);
+		return -ENODEV;
+	}
+
+	avail = sdio_write_avail(port->sport_info->ch);
+
+	pr_debug("%s: sdio_write_avail:%d", __func__, avail);
+
+	if (!avail)
+		return -EBUSY;
+
+	if (!req->actual) {
+		pr_debug("%s: req->actual is already zero,update bytes read\n",
+				__func__);
+		port->n_read = 0;
+		return -ENODEV;
+	}
+
+	packet = req->buf;
+	n = port->n_read;
+	if (n) {
+		packet += n;
+		size -= n;
+	}
+
+	if (size > avail)
+		size = avail;
+
+	spin_unlock_irq(&port->port_lock);
+	ret = sdio_write(port->sport_info->ch, packet, size);
+	spin_lock_irq(&port->port_lock);
+	if (ret) {
+		pr_err("%s: port#%d sdio write failed err:%d",
+				__func__, port->port_num, ret);
+		/* try again later */
+		return ret;
+	}
+
+	port->nbytes_tomodem += size;
+
+	if (size + n == req->actual)
+		port->n_read = 0;
+	else
+		port->n_read += size;
+
+	return ret;
+}
+
+void gsdio_rx_push(struct work_struct *w)
+{
+	struct gsdio_port *port = container_of(w, struct gsdio_port, push);
+	struct list_head *q = &port->read_queue;
+	struct usb_ep		*out;
+	int ret;
+
+	pr_debug("%s: port:%p port#%d read_queue:%p", __func__,
+			port, port->port_num, q);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb cable is disconencted\n", __func__);
+		spin_unlock_irq(&port->port_lock);
+		return;
+	}
+
+	out = port->port_usb->out;
+
+	while (!list_empty(q)) {
+		struct usb_request *req;
+
+		req = list_first_entry(q, struct usb_request, list);
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			pr_debug("%s: req status shutdown portno#%d port:%p",
+					__func__, port->port_num, port);
+			goto rx_push_end;
+		default:
+			pr_warning("%s: port:%p port#%d"
+					" Unexpected Rx Status:%d\n", __func__,
+					port, port->port_num, req->status);
+			/* FALL THROUGH */
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		if (!port->sdio_open) {
+			pr_err("%s: sio channel is not open\n", __func__);
+			list_move(&req->list, &port->read_pool);
+			port->rp_len++;
+			port->rq_len--;
+			goto rx_push_end;
+		}
+
+
+		list_del(&req->list);
+		port->rq_len--;
+
+		ret = gsdio_write(port, req);
+		/* as gsdio_write drops spin_lock while writing data
+		 * to sdio usb cable may have been disconnected
+		 */
+		if (!port->port_usb) {
+			port->n_read = 0;
+			gsdio_free_req(out, req);
+			spin_unlock_irq(&port->port_lock);
+			return;
+		}
+
+		if (ret || port->n_read) {
+			list_add(&req->list, &port->read_queue);
+			port->rq_len++;
+			goto rx_push_end;
+		}
+
+		list_add(&req->list, &port->read_pool);
+		port->rp_len++;
+	}
+
+	if (port->sdio_open && !list_empty(q)) {
+		if (sdio_write_avail(port->sport_info->ch))
+			queue_work(gsdio_wq, &port->push);
+	}
+rx_push_end:
+	spin_unlock_irq(&port->port_lock);
+
+	/* start queuing out requests again to host */
+	gsdio_start_rx(port);
+}
+
+void gsdio_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsdio_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add_tail(&req->list, &port->read_queue);
+	port->rq_len++;
+	queue_work(gsdio_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+void gsdio_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsdio_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+	port->wp_len++;
+
+	switch (req->status) {
+	default:
+		pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+				__func__, port, port->port_num,
+				ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		queue_work(gsdio_wq, &port->pull);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_debug("%s: %s shutdown\n", __func__, ep->name);
+		break;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+void gsdio_read_pending(struct gsdio_port *port)
+{
+	struct sdio_channel *ch;
+	char buf[1024];
+	int avail;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	ch = port->sport_info->ch;
+
+	if (!ch)
+		return;
+
+	while ((avail = sdio_read_avail(ch))) {
+		if (avail > 1024)
+			avail = 1024;
+		sdio_read(ch, buf, avail);
+
+		pr_debug("%s: flushed out %d bytes\n", __func__, avail);
+	}
+}
+
+void gsdio_tx_pull(struct work_struct *w)
+{
+	struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
+	struct list_head *pool = &port->write_pool;
+
+	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+			port, port->port_num, pool);
+
+	if (!port->port_usb) {
+		pr_err("%s: usb disconnected\n", __func__);
+
+		/* take out all the pending data from sdio */
+		gsdio_read_pending(port);
+
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+
+	while (!list_empty(pool)) {
+		int avail;
+		struct usb_ep *in = port->port_usb->in;
+		struct sdio_channel *ch = port->sport_info->ch;
+		struct usb_request *req;
+		unsigned len = SDIO_TX_BUF_SIZE;
+		int ret;
+
+
+		req = list_entry(pool->next, struct usb_request, list);
+
+		if (!port->sdio_open) {
+			pr_debug("%s: SDIO channel is not open\n", __func__);
+			goto tx_pull_end;
+		}
+
+		avail = sdio_read_avail(ch);
+		if (!avail) {
+			/* REVISIT: for ZLP */
+			pr_debug("%s: read_avail:%d port:%p port#%d\n",
+					__func__, avail, port, port->port_num);
+			goto tx_pull_end;
+		}
+
+		if (avail > len)
+			avail = len;
+
+		list_del(&req->list);
+		port->wp_len--;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = sdio_read(ch, req->buf, avail);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: port:%p port#%d sdio read failed err:%d",
+					__func__, port, port->port_num, ret);
+
+			/* check if usb is still active */
+			if (!port->port_usb) {
+				gsdio_free_req(in, req);
+			} else {
+				list_add(&req->list, pool);
+				port->wp_len++;
+			}
+			goto tx_pull_end;
+		}
+
+		req->length = avail;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(in, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d err:%d\n",
+					__func__, port, port->port_num, ret);
+
+			/* could be usb disconnected */
+			if (!port->port_usb) {
+				gsdio_free_req(in, req);
+			} else {
+				list_add(&req->list, pool);
+				port->wp_len++;
+			}
+			goto tx_pull_end;
+		}
+
+		port->nbytes_tolaptop += avail;
+	}
+tx_pull_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_start_io(struct gsdio_port *port)
+{
+	int			ret;
+	unsigned long		flags;
+
+	pr_debug("%s:\n", __func__);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return -ENODEV;
+	}
+
+	/* start usb out queue */
+	ret = gsdio_alloc_requests(port->port_usb->out,
+				&port->read_pool,
+				SDIO_RX_QUEUE_SIZE, SDIO_RX_BUF_SIZE,
+				gsdio_read_complete);
+	if (ret) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s: unable to allocate out reqs\n", __func__);
+		return ret;
+	}
+	port->rp_len = SDIO_RX_QUEUE_SIZE;
+
+	ret = gsdio_alloc_requests(port->port_usb->in,
+				&port->write_pool,
+				SDIO_TX_QUEUE_SIZE, SDIO_TX_BUF_SIZE,
+				gsdio_write_complete);
+	if (ret) {
+		gsdio_free_requests(port->port_usb->out, &port->read_pool);
+		port->rp_len = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s: unable to allocate in reqs\n", __func__);
+		return ret;
+	}
+	port->wp_len = SDIO_TX_QUEUE_SIZE;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gsdio_start_rx(port);
+	queue_work(gsdio_wq, &port->pull);
+
+	return 0;
+}
+
+void gsdio_port_free(unsigned portno)
+{
+	struct gsdio_port *port = sdio_ports[portno].port;
+	struct platform_driver *pdriver = &sdio_ports[portno].gsdio_ch;
+
+	if (!port) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	platform_driver_unregister(pdriver);
+
+	kfree(port);
+}
+
+void gsdio_ctrl_wq(struct work_struct *w)
+{
+	struct gsdio_port *port;
+
+	port = container_of(w, struct gsdio_port, notify_modem);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	if (!port->sdio_open || port->ctrl_ch_err)
+		return;
+
+	sdio_cmux_tiocmset(port->sport_info->ctrl_ch_id,
+			port->cbits_to_modem, ~(port->cbits_to_modem));
+}
+
+void gsdio_ctrl_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+	struct gsdio_port *port;
+	int temp;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = sdio_ports[portno].port;
+
+	temp = ctrl_bits & SDIO_ACM_CTRL_DTR ? TIOCM_DTR : 0;
+
+	if (port->cbits_to_modem == temp)
+		return;
+
+	 port->cbits_to_modem = temp;
+
+	/* TIOCM_DTR - 0x002 - bit(1) */
+	pr_debug("%s: port:%p port#%d ctrl_bits:%08x\n", __func__,
+		port, port->port_num, ctrl_bits);
+
+	if (!port->sdio_open) {
+		pr_err("%s: port:%p port#%d sdio not connected\n",
+				__func__, port, port->port_num);
+		return;
+	}
+
+	/* whenever DTR is high let laptop know that modem status */
+	if (port->cbits_to_modem && gser->send_modem_ctrl_bits)
+		gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+
+	queue_work(gsdio_wq, &port->notify_modem);
+}
+
+void gsdio_ctrl_modem_status(int ctrl_bits, void *_dev)
+{
+	struct gsdio_port *port = _dev;
+
+	/* TIOCM_CD - 0x040 - bit(6)
+	 * TIOCM_RI - 0x080 - bit(7)
+	 * TIOCM_DSR- 0x100 - bit(8)
+	 */
+	pr_debug("%s: port:%p port#%d event:%08x\n", __func__,
+		port, port->port_num, ctrl_bits);
+
+	port->cbits_to_laptop = 0;
+	ctrl_bits &= TIOCM_RI | TIOCM_CD | TIOCM_DSR;
+	if (ctrl_bits & TIOCM_RI)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_RI;
+	if (ctrl_bits & TIOCM_CD)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_DCD;
+	if (ctrl_bits & TIOCM_DSR)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_DSR;
+
+	if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+		port->port_usb->send_modem_ctrl_bits(port->port_usb,
+					port->cbits_to_laptop);
+}
+
+void gsdio_ch_notify(void *_dev, unsigned event)
+{
+	struct gsdio_port *port = _dev;
+
+	pr_debug("%s: port:%p port#%d event:%s\n", __func__,
+		port, port->port_num,
+		event == 1 ? "READ AVAIL" : "WRITE_AVAIL");
+
+	if (event == SDIO_EVENT_DATA_WRITE_AVAIL)
+		queue_work(gsdio_wq, &port->push);
+	if (event == SDIO_EVENT_DATA_READ_AVAIL)
+		queue_work(gsdio_wq, &port->pull);
+}
+
+static void gsdio_open_work(struct work_struct *w)
+{
+	struct gsdio_port *port =
+			container_of(w, struct gsdio_port, sdio_open_work.work);
+	struct sdio_port_info *pi = port->sport_info;
+	struct gserial *gser;
+	int ret;
+	int ctrl_bits;
+	int startio;
+
+	ret = sdio_open(pi->data_ch_name, &pi->ch, port, gsdio_ch_notify);
+	if (ret) {
+		pr_err("%s: port:%p port#%d unable to open sdio ch:%s\n",
+				__func__, port, port->port_num,
+				pi->data_ch_name);
+		return;
+	}
+
+	ret = sdio_cmux_open(pi->ctrl_ch_id, 0, 0,
+			gsdio_ctrl_modem_status, port);
+	if (ret) {
+		pr_err("%s: port:%p port#%d unable to open ctrl ch:%d\n",
+				__func__, port, port->port_num, pi->ctrl_ch_id);
+		port->ctrl_ch_err = 1;
+	}
+
+	/* check for latest status update from modem */
+	if (!port->ctrl_ch_err) {
+		ctrl_bits = sdio_cmux_tiocmget(pi->ctrl_ch_id);
+		gsdio_ctrl_modem_status(ctrl_bits, port);
+	}
+
+	pr_debug("%s: SDIO data:%s ctrl:%d are open\n", __func__,
+					pi->data_ch_name,
+					pi->ctrl_ch_id);
+
+	port->sdio_open = 1;
+
+	/* start tx if usb is open already */
+	spin_lock_irq(&port->port_lock);
+	startio = port->port_usb ? 1 : 0;
+	gser = port->port_usb;
+	spin_unlock_irq(&port->port_lock);
+
+	if (startio) {
+		pr_debug("%s: USB is already open, start io\n", __func__);
+		gsdio_start_io(port);
+		 if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+	}
+}
+
+#define SDIO_CH_NAME_MAX_LEN	9
+#define SDIO_OPEN_DELAY		msecs_to_jiffies(10000)
+static int gsdio_ch_probe(struct platform_device *dev)
+{
+	struct gsdio_port	*port;
+	struct sdio_port_info	*pi;
+	int i;
+
+	pr_debug("%s: name:%s\n", __func__, dev->name);
+
+	for (i = 0; i < n_sdio_ports; i++) {
+		port = sdio_ports[i].port;
+		pi = port->sport_info;
+
+		pr_debug("%s: sdio_ch_name:%s dev_name:%s\n", __func__,
+				pi->data_ch_name, dev->name);
+
+		/* unfortunately cmux channle might not be ready even if
+		 * sdio channel is ready. as we dont have good notification
+		 * mechanism schedule a delayed work
+		 */
+		if (!strncmp(pi->data_ch_name, dev->name,
+					SDIO_CH_NAME_MAX_LEN)) {
+			queue_delayed_work(gsdio_wq,
+				&port->sdio_open_work, SDIO_OPEN_DELAY);
+			return 0;
+		}
+	}
+
+	pr_info("%s: name:%s is not found\n", __func__, dev->name);
+
+	return -ENODEV;
+}
+
+int gsdio_port_alloc(unsigned portno,
+		struct usb_cdc_line_coding *coding,
+		struct sdio_port_info *pi)
+{
+	struct gsdio_port *port;
+	struct platform_driver *pdriver;
+
+	port = kzalloc(sizeof(struct gsdio_port), GFP_KERNEL);
+	if (!port) {
+		pr_err("%s: port allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	port->port_num = portno;
+	spin_lock_init(&port->port_lock);
+	port->line_coding = *coding;
+
+	/* READ: read from usb and write into sdio */
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_WORK(&port->push, gsdio_rx_push);
+
+	INIT_LIST_HEAD(&port->write_pool);
+	INIT_WORK(&port->pull, gsdio_tx_pull);
+
+	INIT_WORK(&port->notify_modem, gsdio_ctrl_wq);
+
+	INIT_DELAYED_WORK(&port->sdio_open_work, gsdio_open_work);
+
+	sdio_ports[portno].port = port;
+
+	port->sport_info = pi;
+	pdriver = &sdio_ports[portno].gsdio_ch;
+
+	pdriver->probe = gsdio_ch_probe;
+	pdriver->driver.name = pi->data_ch_name;
+	pdriver->driver.owner = THIS_MODULE;
+
+	pr_debug("%s: port:%p port#%d sdio_name: %s\n", __func__,
+			port, port->port_num, pi->data_ch_name);
+
+	platform_driver_register(pdriver);
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	return 0;
+}
+
+int gsdio_connect(struct gserial *gser, u8 portno)
+{
+	struct gsdio_port *port;
+	int ret = 0;
+	unsigned long flags;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return -EINVAL;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return -EINVAL;
+	}
+
+	port = sdio_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gser;
+	gser->notify_modem = gsdio_ctrl_notify_modem;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	ret = usb_ep_enable(gser->in, gser->in_desc);
+	if (ret) {
+		pr_err("%s: failed to enable in ep w/ err:%d\n",
+					__func__, ret);
+		port->port_usb = 0;
+		return ret;
+	}
+	gser->in->driver_data = port;
+
+	ret = usb_ep_enable(gser->out, gser->out_desc);
+	if (ret) {
+		pr_err("%s: failed to enable in ep w/ err:%d\n",
+					__func__, ret);
+		usb_ep_disable(gser->in);
+		port->port_usb = 0;
+		gser->in->driver_data = 0;
+		return ret;
+	}
+	gser->out->driver_data = port;
+
+	if (port->sdio_open) {
+		pr_debug("%s: sdio is already open, start io\n", __func__);
+		gsdio_start_io(port);
+		 if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+	}
+
+	return 0;
+}
+
+void gsdio_disconnect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	struct gsdio_port *port;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = sdio_ports[portno].port;
+
+	/* send dtr zero to modem to notify disconnect */
+	port->cbits_to_modem = 0;
+	queue_work(gsdio_wq, &port->notify_modem);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	port->nbytes_tomodem = 0;
+	port->nbytes_tolaptop = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+
+	usb_ep_disable(gser->in);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	gsdio_free_requests(gser->out, &port->read_pool);
+	gsdio_free_requests(gser->out, &port->read_queue);
+	gsdio_free_requests(gser->in, &port->write_pool);
+
+	port->rp_len = 0;
+	port->rq_len = 0;
+	port->wp_len = 0;
+	port->n_read = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_sdio_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gsdio_port *port;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	int i = 0;
+	int temp = 0;
+
+	while (i < n_sdio_ports) {
+		port = sdio_ports[i].port;
+		spin_lock_irqsave(&port->port_lock, flags);
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"###PORT:%d port:%p###\n"
+				"nbytes_tolaptop: %lu\n"
+				"nbytes_tomodem:  %lu\n"
+				"cbits_to_modem:  %u\n"
+				"cbits_to_laptop: %u\n"
+				"read_pool_len:   %lu\n"
+				"read_queue_len:  %lu\n"
+				"write_pool_len:  %lu\n"
+				"n_read:          %u\n",
+				i, port,
+				port->nbytes_tolaptop, port->nbytes_tomodem,
+				port->cbits_to_modem, port->cbits_to_laptop,
+				port->rp_len, port->rq_len, port->wp_len,
+				port->n_read);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		i++;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_sdio_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gsdio_port *port;
+	unsigned long flags;
+	int i = 0;
+
+	while (i < n_sdio_ports) {
+		port = sdio_ports[i].port;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		port->nbytes_tolaptop = 0;
+		port->nbytes_tomodem = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		i++;
+	}
+
+	return count;
+}
+
+static int debug_sdio_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_gsdio_ops = {
+	.open = debug_sdio_open,
+	.read = debug_sdio_read_stats,
+	.write = debug_sdio_reset_stats,
+};
+
+static void gsdio_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_gsdio", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_gsdio_ops);
+}
+#else
+static void gsdio_debugfs_init(void)
+{
+	return;
+}
+#endif
+
+/* connect, disconnect, alloc_requests, free_requests */
+int gsdio_setup(struct usb_gadget *g, unsigned count)
+{
+	struct usb_cdc_line_coding	coding;
+	int i;
+	int ret = 0;
+	struct sdio_port_info *port_info;
+
+	pr_debug("%s: gadget:(%p) count:%d\n", __func__, g, count);
+
+	if (count == 0 || count > SDIO_N_PORTS) {
+		pr_err("%s: invalid number of ports count:%d max_ports:%d\n",
+				__func__, count, SDIO_N_PORTS);
+		return -EINVAL;
+	}
+
+	coding.dwDTERate = cpu_to_le32(9600);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	gsdio_wq = create_singlethread_workqueue("k_gserial");
+	if (!gsdio_wq) {
+		pr_err("%s: unable to create workqueue gsdio_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		mutex_init(&sdio_ports[i].lock);
+		ret = gsdio_port_alloc(i, &coding, sport_info + i);
+		if (ret) {
+			pr_err("%s: sdio logical port allocation failed\n",
+					__func__);
+			goto free_sdio_ports;
+		}
+		n_sdio_ports++;
+		port_info++;
+
+#ifdef DEBUG
+		/* REVISIT: create one file per port
+		 * or do not create any file
+		 */
+		if (i == 0) {
+			ret = device_create_file(&g->dev, &dev_attr_input);
+			if (ret)
+				pr_err("%s: unable to create device file\n",
+						__func__);
+		}
+#endif
+
+	}
+
+	gsdio_debugfs_init();
+
+	return 0;
+
+free_sdio_ports:
+	for (i = 0; i < n_sdio_ports; i++)
+		gsdio_port_free(i);
+	destroy_workqueue(gsdio_wq);
+
+	return ret;
+}
+
+/* TODO: Add gserial_cleanup */
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 3fdcc9a..7bd9f33 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -25,6 +25,7 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 
 #include "u_serial.h"
 
@@ -77,9 +78,14 @@
  * next layer of buffering.  For TX that's a circular buffer; for RX
  * consider it a NOP.  A third layer is provided by the TTY code.
  */
-#define QUEUE_SIZE		16
+#define TX_QUEUE_SIZE		8
+#define TX_BUF_SIZE		4096
 #define WRITE_BUF_SIZE		8192		/* TX only */
 
+#define RX_QUEUE_SIZE		8
+#define RX_BUF_SIZE		4096
+
+
 /* circular buffer */
 struct gs_buf {
 	unsigned		buf_size;
@@ -109,7 +115,7 @@
 	int read_allocated;
 	struct list_head	read_queue;
 	unsigned		n_read;
-	struct tasklet_struct	push;
+	struct work_struct	push;
 
 	struct list_head	write_pool;
 	int write_started;
@@ -119,6 +125,10 @@
 
 	/* REVISIT this state ... */
 	struct usb_cdc_line_coding port_line_coding;	/* 8-N-1 etc */
+	unsigned long           nbytes_from_host;
+	unsigned long           nbytes_to_tty;
+	unsigned long           nbytes_from_tty;
+	unsigned long           nbytes_to_host;
 };
 
 /* increase N_PORTS if you need more */
@@ -129,6 +139,8 @@
 } ports[N_PORTS];
 static unsigned	n_ports;
 
+static struct workqueue_struct *gserial_wq;
+
 #define GS_CLOSE_TIMEOUT		15		/* seconds */
 
 
@@ -361,18 +373,37 @@
 	struct list_head	*pool = &port->write_pool;
 	struct usb_ep		*in = port->port_usb->in;
 	int			status = 0;
+	static long 		prev_len;
 	bool			do_tty_wake = false;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
 		int			len;
 
-		if (port->write_started >= QUEUE_SIZE)
+		if (port->write_started >= TX_QUEUE_SIZE)
 			break;
 
 		req = list_entry(pool->next, struct usb_request, list);
-		len = gs_send_packet(port, req->buf, in->maxpacket);
+		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
 		if (len == 0) {
+			/* Queue zero length packet */
+			if (prev_len && (prev_len % in->maxpacket == 0)) {
+				req->length = 0;
+				list_del(&req->list);
+				spin_unlock(&port->port_lock);
+				status = usb_ep_queue(in, req, GFP_ATOMIC);
+				spin_lock(&port->port_lock);
+				if (!port->port_usb) {
+					gs_free_req(in, req);
+					break;
+				}
+				if (status) {
+					printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+					list_add(&req->list, pool);
+				}
+				prev_len = 0;
+			}
 			wake_up_interruptible(&port->drain_wait);
 			break;
 		}
@@ -396,19 +427,25 @@
 		spin_unlock(&port->port_lock);
 		status = usb_ep_queue(in, req, GFP_ATOMIC);
 		spin_lock(&port->port_lock);
-
+		/*
+		 * If port_usb is NULL, gserial disconnect is called
+		 * while the spinlock is dropped and all requests are
+		 * freed. Free the current request here.
+		 */
+		if (!port->port_usb) {
+			do_tty_wake = false;
+			gs_free_req(in, req);
+			break;
+		}
 		if (status) {
 			pr_debug("%s: %s %s err %d\n",
 					__func__, "queue", in->name, status);
 			list_add(&req->list, pool);
 			break;
 		}
+		prev_len = req->length;
+		port->nbytes_from_tty += req->length;
 
-		port->write_started++;
-
-		/* abort immediately after disconnect */
-		if (!port->port_usb)
-			break;
 	}
 
 	if (do_tty_wake && port->port_tty)
@@ -427,6 +464,7 @@
 {
 	struct list_head	*pool = &port->read_pool;
 	struct usb_ep		*out = port->port_usb->out;
+	unsigned		started = 0;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
@@ -438,12 +476,12 @@
 		if (!tty)
 			break;
 
-		if (port->read_started >= QUEUE_SIZE)
+		if (port->read_started >= RX_QUEUE_SIZE)
 			break;
 
 		req = list_entry(pool->next, struct usb_request, list);
 		list_del(&req->list);
-		req->length = out->maxpacket;
+		req->length = RX_BUF_SIZE;
 
 		/* drop lock while we call out; the controller driver
 		 * may need to call us back (e.g. for disconnect)
@@ -451,7 +489,16 @@
 		spin_unlock(&port->port_lock);
 		status = usb_ep_queue(out, req, GFP_ATOMIC);
 		spin_lock(&port->port_lock);
-
+		/*
+		 * If port_usb is NULL, gserial disconnect is called
+		 * while the spinlock is dropped and all requests are
+		 * freed. Free the current request here.
+		 */
+		if (!port->port_usb) {
+			started = 0;
+			gs_free_req(out, req);
+			break;
+		}
 		if (status) {
 			pr_debug("%s: %s %s err %d\n",
 					__func__, "queue", out->name, status);
@@ -460,9 +507,6 @@
 		}
 		port->read_started++;
 
-		/* abort immediately after disconnect */
-		if (!port->port_usb)
-			break;
 	}
 	return port->read_started;
 }
@@ -477,9 +521,9 @@
  * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
  * can be buffered before the TTY layer's buffers (currently 64 KB).
  */
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *w)
 {
-	struct gs_port		*port = (void *)_port;
+	struct gs_port		*port = container_of(w, struct gs_port, push);
 	struct tty_struct	*tty;
 	struct list_head	*queue = &port->read_queue;
 	bool			disconnect = false;
@@ -532,6 +576,7 @@
 			}
 
 			count = tty_insert_flip_string(tty, packet, size);
+			port->nbytes_to_tty += count;
 			if (count)
 				do_push = true;
 			if (count != size) {
@@ -549,11 +594,17 @@
 		port->read_started--;
 	}
 
-	/* Push from tty to ldisc; without low_latency set this is handled by
-	 * a workqueue, so we won't get callbacks and can hold port_lock
+	/* Push from tty to ldisc; this is immediate with low_latency, and
+	 * may trigger callbacks to this driver ... so drop the spinlock.
 	 */
 	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
 		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+
+		/* tty may have been closed */
+		tty = port->port_tty;
 	}
 
 
@@ -562,13 +613,13 @@
 	 * this time around, there may be trouble unless there's an
 	 * implicit tty_unthrottle() call on its way...
 	 *
-	 * REVISIT we should probably add a timer to keep the tasklet
+	 * REVISIT we should probably add a timer to keep the work queue
 	 * from starving ... but it's not clear that case ever happens.
 	 */
 	if (!list_empty(queue) && tty) {
 		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
 			if (do_push)
-				tasklet_schedule(&port->push);
+				queue_work(gserial_wq, &port->push);
 			else
 				pr_warning(PREFIX "%d: RX not scheduled?\n",
 					port->port_num);
@@ -585,19 +636,23 @@
 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct gs_port	*port = ep->driver_data;
+	unsigned long flags;
 
 	/* Queue all received data until the tty layer is ready for it. */
-	spin_lock(&port->port_lock);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->nbytes_from_host += req->actual;
 	list_add_tail(&req->list, &port->read_queue);
-	tasklet_schedule(&port->push);
-	spin_unlock(&port->port_lock);
+	queue_work(gserial_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct gs_port	*port = ep->driver_data;
+	unsigned long flags;
 
-	spin_lock(&port->port_lock);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->nbytes_to_host += req->actual;
 	list_add(&req->list, &port->write_pool);
 	port->write_started--;
 
@@ -609,7 +664,8 @@
 		/* FALL THROUGH */
 	case 0:
 		/* normal completion */
-		gs_start_tx(port);
+		if (port->port_usb)
+			gs_start_tx(port);
 		break;
 
 	case -ESHUTDOWN:
@@ -618,7 +674,7 @@
 		break;
 	}
 
-	spin_unlock(&port->port_lock);
+	spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
@@ -636,19 +692,18 @@
 }
 
 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
-		void (*fn)(struct usb_ep *, struct usb_request *),
+		int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
 		int *allocated)
 {
 	int			i;
 	struct usb_request	*req;
-	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
 
 	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
 	 * do quite that many this time, don't fail ... we just won't
 	 * be as speedy as we might otherwise be.
 	 */
-	for (i = 0; i < n; i++) {
-		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+	for (i = 0; i < num; i++) {
+		req = gs_alloc_req(ep, size, GFP_ATOMIC);
 		if (!req)
 			return list_empty(head) ? -ENOMEM : 0;
 		req->complete = fn;
@@ -681,13 +736,13 @@
 	 * configurations may use different endpoints with a given port;
 	 * and high speed vs full speed changes packet sizes too.
 	 */
-	status = gs_alloc_requests(ep, head, gs_read_complete,
-		&port->read_allocated);
+	status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
+			 gs_read_complete, &port->read_allocated);
 	if (status)
 		return status;
 
 	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
-			gs_write_complete, &port->write_allocated);
+			TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
 	if (status) {
 		gs_free_requests(ep, head, &port->read_allocated);
 		return status;
@@ -697,6 +752,8 @@
 	port->n_read = 0;
 	started = gs_start_rx(port);
 
+	if (!port->port_usb)
+		return -EIO;
 	/* unblock any pending writes into our circular buffer */
 	if (started) {
 		tty_wakeup(port->port_tty);
@@ -801,6 +858,13 @@
 	port->open_count = 1;
 	port->openclose = false;
 
+	/* low_latency means ldiscs work is carried in the same context
+	 * of tty_flip_buffer_push. The same can be called from IRQ with
+	 * low_latency = 0. But better to use a dedicated worker thread
+	 * to push the data.
+	 */
+	tty->low_latency = 1;
+
 	/* if connected, start the I/O stream */
 	if (port->port_usb) {
 		struct gserial	*gser = port->port_usb;
@@ -874,7 +938,7 @@
 
 	/* Iff we're disconnected, there can be no I/O in flight so it's
 	 * ok to free the circular buffer; else just scrub it.  And don't
-	 * let the push tasklet fire again until we're re-opened.
+	 * let the push work queue fire again until we're re-opened.
 	 */
 	if (gser == NULL)
 		gs_buf_free(&port->port_write_buf);
@@ -890,6 +954,22 @@
 			port->port_num, tty, file);
 
 	wake_up_interruptible(&port->close_wait);
+
+	/*
+	 * Freeing the previously queued requests as they are
+	 * allocated again as a part of gs_open()
+	 */
+	if (port->port_usb) {
+		spin_unlock_irq(&port->port_lock);
+		usb_ep_fifo_flush(gser->out);
+		usb_ep_fifo_flush(gser->in);
+		spin_lock_irq(&port->port_lock);
+		gs_free_requests(gser->out, &port->read_queue, NULL);
+		gs_free_requests(gser->out, &port->read_pool, NULL);
+		gs_free_requests(gser->in, &port->write_pool, NULL);
+	}
+	port->read_allocated = port->read_started =
+		port->write_allocated = port->write_started = 0;
 exit:
 	spin_unlock_irq(&port->port_lock);
 }
@@ -988,7 +1068,7 @@
 		 * rts/cts, or other handshaking with the host, but if the
 		 * read queue backs up enough we'll be NAKing OUT packets.
 		 */
-		tasklet_schedule(&port->push);
+		queue_work(gserial_wq, &port->push);
 		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
 	}
 	spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1012,6 +1092,77 @@
 	return status;
 }
 
+static int gs_tiocmget(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	struct gserial	*gser;
+	unsigned int result = 0;
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (!gser) {
+		result = -ENODEV;
+		goto fail;
+	}
+
+	if (gser->get_dtr)
+		result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+
+	if (gser->get_rts)
+		result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
+
+	if (gser->serial_state & TIOCM_CD)
+		result |= TIOCM_CD;
+
+	if (gser->serial_state & TIOCM_RI)
+		result |= TIOCM_RI;
+fail:
+	spin_unlock_irq(&port->port_lock);
+	return result;
+}
+
+static int gs_tiocmset(struct tty_struct *tty,
+	unsigned int set, unsigned int clear)
+{
+	struct gs_port	*port = tty->driver_data;
+	struct gserial *gser;
+	int	status = 0;
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (!gser) {
+		status = -ENODEV;
+		goto fail;
+	}
+
+	if (set & TIOCM_RI) {
+		if (gser->send_ring_indicator) {
+			gser->serial_state |= TIOCM_RI;
+			status = gser->send_ring_indicator(gser, 1);
+		}
+	}
+	if (clear & TIOCM_RI) {
+		if (gser->send_ring_indicator) {
+			gser->serial_state &= ~TIOCM_RI;
+			status = gser->send_ring_indicator(gser, 0);
+		}
+	}
+	if (set & TIOCM_CD) {
+		if (gser->send_carrier_detect) {
+			gser->serial_state |= TIOCM_CD;
+			status = gser->send_carrier_detect(gser, 1);
+		}
+	}
+	if (clear & TIOCM_CD) {
+		if (gser->send_carrier_detect) {
+			gser->serial_state &= ~TIOCM_CD;
+			status = gser->send_carrier_detect(gser, 0);
+		}
+	}
+fail:
+	spin_unlock_irq(&port->port_lock);
+	return status;
+}
 static const struct tty_operations gs_tty_ops = {
 	.open =			gs_open,
 	.close =		gs_close,
@@ -1022,6 +1173,8 @@
 	.chars_in_buffer =	gs_chars_in_buffer,
 	.unthrottle =		gs_unthrottle,
 	.break_ctl =		gs_break_ctl,
+	.tiocmget  =		gs_tiocmget,
+	.tiocmset  =		gs_tiocmset,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -1041,7 +1194,7 @@
 	init_waitqueue_head(&port->close_wait);
 	init_waitqueue_head(&port->drain_wait);
 
-	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+	INIT_WORK(&port->push, gs_rx_push);
 
 	INIT_LIST_HEAD(&port->read_pool);
 	INIT_LIST_HEAD(&port->read_queue);
@@ -1055,6 +1208,116 @@
 	return 0;
 }
 
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define BUF_SIZE	512
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct gs_port *ui_dev = file->private_data;
+	struct tty_struct       *tty;
+	struct gserial		*gser;
+	char *buf;
+	unsigned long flags;
+	int i = 0;
+	int ret;
+	int result = 0;
+
+	tty = ui_dev->port_tty;
+	gser = ui_dev->port_usb;
+
+	buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ui_dev->port_lock, flags);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
+
+	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
+			(ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
+
+	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
+			(ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
+
+	if (tty)
+		i += scnprintf(buf + i, BUF_SIZE - i,
+			"tty_flags: %lu\n", tty->flags);
+
+	if (gser->get_dtr) {
+		result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+		i += scnprintf(buf + i, BUF_SIZE - i,
+			"DTR_status: %d\n", result);
+	}
+
+	spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct gs_port *ui_dev = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui_dev->port_lock, flags);
+	ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
+			ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
+	spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+	return count;
+}
+
+static int serial_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_rst_ops = {
+	.open = serial_debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_adb_ops = {
+	.open = serial_debug_open,
+	.read = debug_read_status,
+};
+
+static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
+{
+	struct dentry *dent;
+	char buf[48];
+
+	snprintf(buf, 48, "usb_serial%d", port_num);
+	dent = debugfs_create_dir(buf, 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
+	debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
+}
+#else
+static void usb_debugfs_init(struct gs_port *ui_dev) {}
+#endif
+
 /**
  * gserial_setup - initialize TTY driver for one or more ports
  * @g: gadget to associate with these ports
@@ -1094,7 +1357,8 @@
 
 	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
 	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
-	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+				| TTY_DRIVER_RESET_TERMIOS;
 	gs_tty_driver->init_termios = tty_std_termios;
 
 	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
@@ -1113,6 +1377,12 @@
 
 	tty_set_operations(gs_tty_driver, &gs_tty_ops);
 
+	gserial_wq = create_singlethread_workqueue("k_gserial");
+	if (!gserial_wq) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
 	/* make devices be openable */
 	for (i = 0; i < count; i++) {
 		mutex_init(&ports[i].lock);
@@ -1127,6 +1397,7 @@
 	/* export the driver ... */
 	status = tty_register_driver(gs_tty_driver);
 	if (status) {
+		put_tty_driver(gs_tty_driver);
 		pr_err("%s: cannot register, err %d\n",
 				__func__, status);
 		goto fail;
@@ -1142,6 +1413,9 @@
 				__func__, i, PTR_ERR(tty_dev));
 	}
 
+	for (i = 0; i < count; i++)
+		usb_debugfs_init(ports[i].port, i);
+
 	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
 			count, (count == 1) ? "" : "s");
 
@@ -1149,6 +1423,7 @@
 fail:
 	while (count--)
 		kfree(ports[count].port);
+	destroy_workqueue(gserial_wq);
 	put_tty_driver(gs_tty_driver);
 	gs_tty_driver = NULL;
 	return status;
@@ -1195,7 +1470,7 @@
 		ports[i].port = NULL;
 		mutex_unlock(&ports[i].lock);
 
-		tasklet_kill(&port->push);
+		cancel_work_sync(&port->push);
 
 		/* wait for old opens to finish */
 		wait_event(port->close_wait, gs_closed(port));
@@ -1206,6 +1481,7 @@
 	}
 	n_ports = 0;
 
+	destroy_workqueue(gserial_wq);
 	tty_unregister_driver(gs_tty_driver);
 	put_tty_driver(gs_tty_driver);
 	gs_tty_driver = NULL;
@@ -1344,5 +1620,8 @@
 	port->read_allocated = port->read_started =
 		port->write_allocated = port->write_started = 0;
 
+	port->nbytes_from_host = port->nbytes_to_tty =
+		port->nbytes_from_tty = port->nbytes_to_host = 0;
+
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 300f0ed..fea53d8 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -40,11 +40,22 @@
 
 	/* REVISIT avoid this CDC-ACM support harder ... */
 	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+	u16				serial_state;
+
+	/* control signal callbacks*/
+	unsigned int (*get_dtr)(struct gserial *p);
+	unsigned int (*get_rts)(struct gserial *p);
 
 	/* notification callbacks */
 	void (*connect)(struct gserial *p);
 	void (*disconnect)(struct gserial *p);
 	int (*send_break)(struct gserial *p, int duration);
+	unsigned int (*send_carrier_detect)(struct gserial *p, unsigned int);
+	unsigned int (*send_ring_indicator)(struct gserial *p, unsigned int);
+	int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+
+	/* notification changes to modem */
+	void (*notify_modem)(struct gserial *gser, u8 portno, int ctrl_bits);
 };
 
 /* utilities to allocate/free request and buffer */
@@ -59,6 +70,15 @@
 int gserial_connect(struct gserial *, u8 port_num);
 void gserial_disconnect(struct gserial *);
 
+/* sdio related functions */
+int gsdio_setup(struct usb_gadget *g, unsigned n_ports);
+int gsdio_connect(struct gserial *, u8 port_num);
+void gsdio_disconnect(struct gserial *, u8 portno);
+
+int gsmd_setup(struct usb_gadget *g, unsigned n_ports);
+int gsmd_connect(struct gserial *, u8 port_num);
+void gsmd_disconnect(struct gserial *, u8 portno);
+
 /* functions are bound to configurations by a config or gadget driver */
 int acm_bind_config(struct usb_configuration *c, u8 port_num);
 int gser_bind_config(struct usb_configuration *c, u8 port_num);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
new file mode 100644
index 0000000..0e8f247
--- /dev/null
+++ b/drivers/usb/gadget/u_smd.c
@@ -0,0 +1,887 @@
+/*
+ * u_smd.c - utilities for USB gadget serial over smd
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+
+#include "u_serial.h"
+
+#define SMD_RX_QUEUE_SIZE		8
+#define SMD_RX_BUF_SIZE			2048
+
+#define SMD_TX_QUEUE_SIZE		8
+#define SMD_TX_BUF_SIZE			2048
+
+static struct workqueue_struct *gsmd_wq;
+
+#define SMD_N_PORTS	2
+#define CH_OPENED	0
+struct smd_port_info {
+	struct smd_channel	*ch;
+	char			*name;
+	unsigned long		flags;
+	wait_queue_head_t	wait;
+};
+
+struct smd_port_info smd_pi[SMD_N_PORTS] = {
+	{
+		.name = "DS",
+	},
+	{
+		.name = "UNUSED",
+	},
+};
+
+struct gsmd_port {
+	unsigned		port_num;
+	spinlock_t		port_lock;
+
+	unsigned		n_read;
+	struct list_head	read_pool;
+	struct list_head	read_queue;
+	struct work_struct	push;
+
+	struct list_head	write_pool;
+	struct work_struct	pull;
+
+	struct gserial		*port_usb;
+
+	struct smd_port_info	*pi;
+	struct work_struct	connect_work;
+
+	/* At present, smd does not notify
+	 * control bit change info from modem
+	 */
+	struct work_struct	update_modem_ctrl_sig;
+
+#define SMD_ACM_CTRL_DTR		0x01
+#define SMD_ACM_CTRL_RTS		0x02
+	unsigned		cbits_to_modem;
+
+#define SMD_ACM_CTRL_DCD		0x01
+#define SMD_ACM_CTRL_DSR		0x02
+#define SMD_ACM_CTRL_BRK		0x04
+#define SMD_ACM_CTRL_RI		0x08
+	unsigned		cbits_to_laptop;
+
+	/* pkt counters */
+	unsigned long		nbytes_tomodem;
+	unsigned long		nbytes_tolaptop;
+};
+
+static struct smd_portmaster {
+	struct mutex lock;
+	struct gsmd_port *port;
+} smd_ports[SMD_N_PORTS];
+static unsigned n_smd_ports;
+
+static void gsmd_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void gsmd_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gsmd_free_req(ep, req);
+	}
+}
+
+static struct usb_request *
+gsmd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req) {
+		pr_err("%s: usb alloc request failed\n", __func__);
+		return 0;
+	}
+
+	req->length = len;
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		pr_err("%s: request buf allocation failed\n", __func__);
+		usb_ep_free_request(ep, req);
+		return 0;
+	}
+
+	return req;
+}
+
+static int gsmd_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num, int size,
+		void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+			ep, head, num, size, cb);
+
+	for (i = 0; i < num; i++) {
+		req = gsmd_alloc_req(ep, size, GFP_ATOMIC);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+
+static void gsmd_start_rx(struct gsmd_port *port)
+{
+	struct list_head	*pool;
+	struct usb_ep		*out;
+	int ret;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: USB disconnected\n", __func__);
+		goto start_rx_end;
+	}
+
+	pool = &port->read_pool;
+	out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = SMD_RX_BUF_SIZE;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(out, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d\n",
+					 __func__, port, port->port_num);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+start_rx_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+static void gsmd_rx_push(struct work_struct *w)
+{
+	struct gsmd_port *port = container_of(w, struct gsmd_port, push);
+	struct list_head *q;
+
+	pr_debug("%s: port:%p port#%d", __func__, port, port->port_num);
+
+	spin_lock_irq(&port->port_lock);
+
+	q = &port->read_queue;
+	while (!list_empty(q)) {
+		struct usb_request *req;
+		int avail;
+		struct smd_port_info *pi = port->pi;
+
+		req = list_first_entry(q, struct usb_request, list);
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			pr_debug("%s: req status shutdown portno#%d port:%p\n",
+					__func__, port->port_num, port);
+			goto rx_push_end;
+		default:
+			pr_warning("%s: port:%p port#%d"
+					" Unexpected Rx Status:%d\n", __func__,
+					port, port->port_num, req->status);
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		avail = smd_write_avail(pi->ch);
+		if (!avail)
+			goto rx_push_end;
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			unsigned	count;
+
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = smd_write(pi->ch, packet, size);
+			if (count < 0) {
+				pr_err("%s: smd write failed err:%d\n",
+						__func__, count);
+				goto rx_push_end;
+			}
+
+			if (count != size) {
+				port->n_read += count;
+				goto rx_push_end;
+			}
+
+			port->nbytes_tomodem += count;
+		}
+
+		port->n_read = 0;
+		list_move(&req->list, &port->read_pool);
+	}
+
+rx_push_end:
+	spin_unlock_irq(&port->port_lock);
+
+	gsmd_start_rx(port);
+}
+
+static void gsmd_read_pending(struct gsmd_port *port)
+{
+	int avail;
+
+	if (!port || !port->pi->ch)
+		return;
+
+	/* passing null buffer discards the data */
+	while ((avail = smd_read_avail(port->pi->ch)))
+		smd_read(port->pi->ch, 0, avail);
+
+	return;
+}
+
+static void gsmd_tx_pull(struct work_struct *w)
+{
+	struct gsmd_port *port = container_of(w, struct gsmd_port, pull);
+	struct list_head *pool = &port->write_pool;
+
+	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+			port, port->port_num, pool);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb is disconnected\n", __func__);
+		gsmd_read_pending(port);
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+	while (!list_empty(pool)) {
+		struct usb_request *req;
+		struct usb_ep *in = port->port_usb->in;
+		struct smd_port_info *pi = port->pi;
+		int avail;
+		int ret;
+
+		avail = smd_read_avail(pi->ch);
+		if (!avail)
+			break;
+
+		avail = avail > SMD_TX_BUF_SIZE ? SMD_TX_BUF_SIZE : avail;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = smd_read(pi->ch, req->buf, avail);
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(in, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d err:%d\n",
+					__func__, port, port->port_num, ret);
+			/* could be usb disconnected */
+			if (!port->port_usb)
+				gsmd_free_req(in, req);
+			else
+				list_add(&req->list, pool);
+			goto tx_pull_end;
+		}
+
+		port->nbytes_tolaptop += req->length;
+	}
+
+tx_pull_end:
+	/* TBD: Check how code behaves on USB bus suspend */
+	if (port->port_usb && smd_read_avail(port->pi->ch) && !list_empty(pool))
+		queue_work(gsmd_wq, &port->pull);
+
+	spin_unlock_irq(&port->port_lock);
+
+	return;
+}
+
+static void gsmd_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsmd_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add_tail(&req->list, &port->read_queue);
+	queue_work(gsmd_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+static void gsmd_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsmd_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+
+	switch (req->status) {
+	default:
+		pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+				__func__, port, port->port_num,
+				ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		queue_work(gsmd_wq, &port->pull);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_debug("%s: %s shutdown\n", __func__, ep->name);
+		gsmd_free_req(ep, req);
+		break;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+static void gsmd_start_io(struct gsmd_port *port)
+{
+	int		ret = -ENODEV;
+	unsigned long	flags;
+
+	pr_debug("%s: port: %p\n", __func__, port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb)
+		goto start_io_out;
+
+	ret = gsmd_alloc_requests(port->port_usb->out,
+				&port->read_pool,
+				SMD_RX_QUEUE_SIZE, SMD_RX_BUF_SIZE,
+				gsmd_read_complete);
+	if (ret) {
+		pr_err("%s: unable to allocate out requests\n",
+				__func__);
+		goto start_io_out;
+	}
+
+	ret = gsmd_alloc_requests(port->port_usb->in,
+				&port->write_pool,
+				SMD_TX_QUEUE_SIZE, SMD_TX_BUF_SIZE,
+				gsmd_write_complete);
+	if (ret) {
+		gsmd_free_requests(port->port_usb->out, &port->read_pool);
+		pr_err("%s: unable to allocate IN requests\n",
+				__func__);
+		goto start_io_out;
+	}
+
+start_io_out:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (ret)
+		return;
+
+	gsmd_start_rx(port);
+}
+
+static unsigned int convert_uart_sigs_to_acm(unsigned uart_sig)
+{
+	unsigned int acm_sig = 0;
+
+	/* should this needs to be in calling functions ??? */
+	uart_sig &= (TIOCM_RI | TIOCM_CD | TIOCM_DSR);
+
+	if (uart_sig & TIOCM_RI)
+		acm_sig |= SMD_ACM_CTRL_RI;
+	if (uart_sig & TIOCM_CD)
+		acm_sig |= SMD_ACM_CTRL_DCD;
+	if (uart_sig & TIOCM_DSR)
+		acm_sig |= SMD_ACM_CTRL_DSR;
+
+	return acm_sig;
+}
+
+static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig)
+{
+	unsigned int uart_sig = 0;
+
+	/* should this needs to be in calling functions ??? */
+	acm_sig &= (SMD_ACM_CTRL_DTR | SMD_ACM_CTRL_RTS);
+
+	if (acm_sig & SMD_ACM_CTRL_DTR)
+		uart_sig |= TIOCM_DTR;
+	if (acm_sig & SMD_ACM_CTRL_RTS)
+		uart_sig |= TIOCM_RTS;
+
+	return uart_sig;
+}
+
+static void gsmd_notify(void *priv, unsigned event)
+{
+	struct gsmd_port *port = priv;
+	struct smd_port_info *pi = port->pi;
+	int i;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		pr_debug("%s: Event data\n", __func__);
+		if (smd_read_avail(pi->ch))
+			queue_work(gsmd_wq, &port->pull);
+		if (smd_write_avail(pi->ch))
+			queue_work(gsmd_wq, &port->push);
+		break;
+	case SMD_EVENT_OPEN:
+		pr_debug("%s: Event Open\n", __func__);
+		set_bit(CH_OPENED, &pi->flags);
+		wake_up(&pi->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		pr_debug("%s: Event Close\n", __func__);
+		clear_bit(CH_OPENED, &pi->flags);
+		break;
+	case SMD_EVENT_STATUS:
+		i = smd_tiocmget(port->pi->ch);
+		port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+		if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+			port->port_usb->send_modem_ctrl_bits(port->port_usb,
+						port->cbits_to_laptop);
+		break;
+	}
+}
+
+#define MAX_SMD_RETRY_CNT	20
+static void gsmd_connect_work(struct work_struct *w)
+{
+	struct gsmd_port *port;
+	struct smd_port_info *pi;
+	int ret;
+	int retry_cnt = 0;
+
+	port = container_of(w, struct gsmd_port, connect_work);
+	pi = port->pi;
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	/* SMD driver comes online gets initialized and loads modem
+	 * 10 seconds after boot up. If USB cable is connected at boot-up,
+	 * this might result smd open failure. To work-around, retry
+	 * opening multiple times.
+	 */
+	do {
+		if (!port->port_usb)
+			return;
+
+		ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM,
+					&pi->ch, port, gsmd_notify);
+		if (!ret)
+			break;
+
+		retry_cnt++;
+		msleep(1000);
+	} while (retry_cnt < MAX_SMD_RETRY_CNT);
+
+	if (ret) {
+		pr_err("%s: unable to open smd port:%s err:%d\n",
+				__func__, pi->name, ret);
+		return;
+	}
+
+	pr_debug("%s: SMD port open successful retrycnt:%d\n",
+			__func__, retry_cnt);
+
+	wait_event(pi->wait, test_bit(CH_OPENED, &pi->flags));
+
+	if (!port->port_usb)
+		return;
+
+	/* update usb control signals to modem */
+	if (port->cbits_to_modem)
+		smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+
+	gsmd_start_io(port);
+}
+
+static void gsmd_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+	struct gsmd_port *port;
+	int temp;
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = smd_ports[portno].port;
+
+	temp = convert_acm_sigs_to_uart(ctrl_bits);
+
+	if (temp == port->cbits_to_modem)
+		return;
+
+	port->cbits_to_modem = temp;
+
+	/* usb could send control signal before smd is ready */
+	if (!test_bit(CH_OPENED, &port->pi->flags))
+		return;
+
+	/* if DTR is high, update latest modem info to laptop */
+	if (port->cbits_to_modem & TIOCM_DTR) {
+		unsigned i;
+
+		i = smd_tiocmget(port->pi->ch);
+		port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+
+		if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(
+					port->port_usb,
+					port->cbits_to_laptop);
+	}
+
+	smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+}
+
+int gsmd_connect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	int ret;
+	struct gsmd_port *port;
+
+	pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: Invalid port no#%d", __func__, portno);
+		return -EINVAL;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return -EINVAL;
+	}
+
+	port = smd_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gser;
+	gser->notify_modem = gsmd_notify_modem;
+	port->nbytes_tomodem = 0;
+	port->nbytes_tolaptop = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	ret = usb_ep_enable(gser->in, gser->in_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+				__func__, gser->in);
+		port->port_usb = 0;
+		return ret;
+	}
+	gser->in->driver_data = port;
+
+	ret = usb_ep_enable(gser->out, gser->out_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+				__func__, gser->out);
+		port->port_usb = 0;
+		gser->in->driver_data = 0;
+		return ret;
+	}
+	gser->out->driver_data = port;
+
+	queue_work(gsmd_wq, &port->connect_work);
+
+	return 0;
+}
+
+void gsmd_disconnect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	struct gsmd_port *port;
+
+	pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = smd_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+	usb_ep_disable(gser->in);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	gsmd_free_requests(gser->out, &port->read_pool);
+	gsmd_free_requests(gser->out, &port->read_queue);
+	gsmd_free_requests(gser->in, &port->write_pool);
+	port->n_read = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (!test_bit(CH_OPENED, &port->pi->flags))
+		return;
+
+	/* lower the dtr */
+	port->cbits_to_modem = 0;
+	smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+
+	smd_close(port->pi->ch);
+	port->pi->flags = 0;
+}
+
+static void gsmd_port_free(int portno)
+{
+	struct gsmd_port *port = smd_ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding)
+{
+	struct gsmd_port *port;
+
+	port = kzalloc(sizeof(struct gsmd_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+	port->pi = &smd_pi[portno];
+
+	spin_lock_init(&port->port_lock);
+
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_WORK(&port->push, gsmd_rx_push);
+
+	INIT_LIST_HEAD(&port->write_pool);
+	INIT_WORK(&port->pull, gsmd_tx_pull);
+
+	INIT_WORK(&port->connect_work, gsmd_connect_work);
+	init_waitqueue_head(&port->pi->wait);
+
+	smd_ports[portno].port = port;
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gsmd_port *port;
+	char *buf;
+	unsigned long flags;
+	int temp = 0;
+	int i;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_smd_ports; i++) {
+		port = smd_ports[i].port;
+		spin_lock_irqsave(&port->port_lock, flags);
+		temp += scnprintf(buf + temp, 512 - temp,
+				"###PORT:%d###\n"
+				"nbytes_tolaptop: %lu\n"
+				"nbytes_tomodem:  %lu\n"
+				"cbits_to_modem:  %u\n"
+				"cbits_to_laptop: %u\n"
+				"n_read: %u\n",
+				i, port->nbytes_tolaptop, port->nbytes_tomodem,
+				port->cbits_to_modem, port->cbits_to_laptop,
+				port->n_read);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+
+}
+
+static ssize_t debug_smd_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gsmd_port *port;
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < n_smd_ports; i++) {
+		port = smd_ports[i].port;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		port->nbytes_tolaptop = 0;
+		port->nbytes_tomodem = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	return count;
+}
+
+static int debug_smd_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_gsmd_ops = {
+	.open = debug_smd_open,
+	.read = debug_smd_read_stats,
+	.write = debug_smd_reset_stats,
+};
+
+static void gsmd_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_gsmd", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_gsmd_ops);
+}
+#else
+static void gsmd_debugfs_init(void) {}
+#endif
+
+int gsmd_setup(struct usb_gadget *g, unsigned count)
+{
+	struct usb_cdc_line_coding	coding;
+	int ret;
+	int i;
+
+	pr_debug("%s: g:%p count: %d\n", __func__, g, count);
+
+	if (!count || count > SMD_N_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d gadget:%p\n",
+				__func__, count, g);
+		return -EINVAL;
+	}
+
+	coding.dwDTERate = cpu_to_le32(9600);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	gsmd_wq = create_singlethread_workqueue("k_gsmd");
+	if (!gsmd_wq) {
+		pr_err("%s: Unable to create workqueue gsmd_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		mutex_init(&smd_ports[i].lock);
+		ret = gsmd_port_alloc(i, &coding);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_smd_ports;
+		}
+		n_smd_ports++;
+	}
+
+	gsmd_debugfs_init();
+
+	return 0;
+free_smd_ports:
+	for (i = 0; i < n_smd_ports; i++)
+		gsmd_port_free(i);
+
+	destroy_workqueue(gsmd_wq);
+
+	return ret;
+}
+
+void gsmd_cleanup(struct usb_gadget *g, unsigned count)
+{
+	/* TBD */
+}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ab085f1..4447b0f 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -59,6 +59,19 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ehci-hcd.
 
+config USB_EHCI_EHSET
+	bool "Embedded High-speed Host Electrical Test Support"
+	depends on USB_EHCI_HCD
+	---help---
+	  This option is required for EHSET Host Compliance Tests support on an
+	  embedded Hi-speed USB Host or OTG port.
+
+	  This enables the software support for the "Single Step Set Featue" test.
+	  Apart from this test, other EHSET tests TEST_SE0/J/K/PACKET are part
+	  of EHCI specification and their support already exists in the EHCI driver.
+
+	  If unsure, say N.
+
 config USB_EHCI_ROOT_HUB_TT
 	bool "Root Hub Transaction Translators"
 	depends on USB_EHCI_HCD
@@ -230,6 +243,22 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called oxu210hp-hcd.
 
+config USB_EHCI_MSM_72K
+	bool "Support for Legacy Qualcomm on-chip EHCI USB controller"
+	depends on USB_EHCI_HCD && USB_MSM_OTG_72K && ARCH_MSM
+	---help---
+	  This driver enables support for USB host controller
+	  in pre 8660 qualcomm chipsets(8660, 7X30, 8X50 and 7X27).
+
+config USB_FS_HOST
+	bool "Support for Full Speed Host Mode"
+	depends on USB_EHCI_MSM_72K && ARCH_QSD8X50
+	default n
+	---help---
+	  Enables support for the full speed USB controller core present
+	  on the Qualcomm chipsets
+
+
 config USB_ISP116X_HCD
 	tristate "ISP116X HCD support"
 	depends on USB
@@ -528,6 +557,15 @@
 	  To compile this driver a module, choose M here: the module
 	  will be called "whci-hcd".
 
+config USB_PEHCI_HCD
+	tristate "ST-E ISP1763A Host Controller"
+	depends on USB
+	help
+	  Driver for ST-E isp1763A USB Host 2.0 Controllers.
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "pehci".
+
 config USB_HWA_HCD
 	tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362..6f5b0e1 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -15,6 +15,7 @@
 xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
 
 obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+obj-$(CONFIG_USB_PEHCI_HCD)	+= pehci/
 
 obj-$(CONFIG_PCI)		+= pci-quirks.o
 
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 40a844c..dd67cad 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -39,7 +39,7 @@
  * (host controller _Structural_ parameters)
  * see EHCI spec, Table 2-4 for each value
  */
-static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
+static void __maybe_unused dbg_hcs_params (struct ehci_hcd *ehci, char *label)
 {
 	u32	params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
@@ -83,7 +83,7 @@
  * (host controller _Capability_ parameters)
  * see EHCI Spec, Table 2-5 for each value
  * */
-static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
+static void __maybe_unused dbg_hcc_params (struct ehci_hcd *ehci, char *label)
 {
 	u32	params = ehci_readl(ehci, &ehci->caps->hcc_params);
 
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9ff9abc..8e6ef28 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -451,7 +451,7 @@
 	spin_unlock_irq(&ehci->lock);
 }
 
-static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
+static void __maybe_unused ehci_port_power (struct ehci_hcd *ehci, int is_on)
 {
 	unsigned port;
 
@@ -667,7 +667,7 @@
 }
 
 /* start HC running; it's halted, ehci_init() has been run (once) */
-static int ehci_run (struct usb_hcd *hcd)
+static int __maybe_unused ehci_run (struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	int			retval;
@@ -1115,7 +1115,7 @@
 	spin_unlock_irqrestore (&ehci->lock, flags);
 }
 
-static void
+static void __maybe_unused
 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
@@ -1254,6 +1254,11 @@
 #define PLATFORM_DRIVER		spear_ehci_hcd_driver
 #endif
 
+#ifdef CONFIG_USB_EHCI_MSM_72K
+#include "ehci-msm72k.c"
+#define PLATFORM_DRIVER		ehci_msm_driver
+#endif
+
 #ifdef CONFIG_USB_EHCI_MSM
 #include "ehci-msm.c"
 #define PLATFORM_DRIVER		ehci_msm_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 88cfb8f..15cac20 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -735,6 +735,151 @@
 }
 
 /*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_EHCI_EHSET
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+
+static void usb_ehset_completion(struct urb *urb)
+{
+	struct completion  *done = urb->context;
+
+	complete(done);
+}
+static int submit_single_step_set_feature(
+	struct usb_hcd  *hcd,
+	struct urb      *urb,
+	int 		is_setup
+);
+
+/* Allocate a URB and initialize the various fields of it.
+ * This API is used by the single_step_set_feature test of
+ * EHSET where IN packet of the GetDescriptor request is
+ * sent after 15secs of the SETUP packet.
+ * Return NULL if failed.
+ */
+static struct urb *
+request_single_step_set_feature_urb(
+	struct usb_device 	*udev,
+	void 			*dr,
+	void 			*buf,
+	struct completion 	*done
+) {
+	struct urb *urb;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+	struct usb_host_endpoint	*ep;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return NULL;
+
+	urb->pipe = usb_rcvctrlpipe(udev, 0);
+	ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+			[usb_pipeendpoint(urb->pipe)];
+	if (!ep) {
+		usb_free_urb(urb);
+		return NULL;
+	}
+
+	/* Initialize the various URB fields as these are used
+	 * by the HCD driver to queue it and as well as
+	 * when completion happens.
+	 */
+	urb->ep = ep;
+	urb->dev = udev;
+	urb->setup_packet = (void *)dr;
+	urb->transfer_buffer = buf;
+	urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+	urb->complete = usb_ehset_completion;
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK)
+				| URB_DIR_IN ;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	urb->setup_dma = dma_map_single(
+			hcd->self.controller,
+			urb->setup_packet,
+			sizeof(struct usb_ctrlrequest),
+			DMA_TO_DEVICE);
+	urb->transfer_dma = dma_map_single(
+			hcd->self.controller,
+			urb->transfer_buffer,
+			urb->transfer_buffer_length,
+			DMA_FROM_DEVICE);
+	urb->context = done;
+	return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+	int retval = -ENOMEM;
+	struct usb_ctrlrequest *dr;
+	struct urb *urb;
+	struct usb_device *udev ;
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	struct usb_device_descriptor *buf;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	/*Obtain udev of the rhub's child port */
+	udev = hcd->self.root_hub->children[port];
+	if (!udev) {
+		ehci_err(ehci, "No device attached to the RootHub\n");
+		return -ENODEV;
+	}
+	buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	if (!dr) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	/* Fill Setup packet for GetDescriptor */
+	dr->bRequestType = USB_DIR_IN;
+	dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+	dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+	dr->wIndex = 0;
+	dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+	urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+	if (!urb)
+		goto cleanup;
+
+	/* Now complete just the SETUP stage */
+	retval = submit_single_step_set_feature(hcd, urb, 1);
+	if (retval)
+		goto out1;
+	if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+		goto out1;
+	}
+	msleep(15 * 1000);
+	/* Complete remaining DATA and status stages */
+	/* No need to free the URB, we can reuse the same */
+	urb->status = -EINPROGRESS;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	retval = submit_single_step_set_feature(hcd, urb, 0);
+	if (!retval && !wait_for_completion_timeout(&done,
+						msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+	}
+out1:
+	usb_free_urb(urb);
+cleanup:
+	kfree(dr);
+	kfree(buf);
+	return retval;
+}
+#endif
+/*-------------------------------------------------------------------------*/
 
 static int ehci_hub_control (
 	struct usb_hcd	*hcd,
@@ -1056,6 +1201,16 @@
 					|| (temp & PORT_RESET) != 0)
 				goto error;
 
+			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+#ifdef	CONFIG_USB_OTG
+			if (hcd->self.otg_port == (wIndex + 1) &&
+					hcd->self.b_hnp_enable &&
+					ehci->start_hnp) {
+				set_bit(wIndex, &ehci->suspended_ports);
+				ehci->start_hnp(ehci);
+				break;
+			}
+#endif
 			/* After above check the port must be connected.
 			 * Set appropriate bit thus could put phy into low power
 			 * mode if we have hostpc feature
@@ -1118,12 +1273,23 @@
 		 * about the EHCI-specific stuff.
 		 */
 		case USB_PORT_FEAT_TEST:
-			if (!selector || selector > 5)
+			if (selector && selector <= 5) {
+				ehci_quiesce(ehci);
+				ehci_halt(ehci);
+				temp |= selector << 16;
+				ehci_writel(ehci, temp, status_reg);
+			}
+#ifdef CONFIG_USB_EHCI_EHSET
+			else if (selector
+				  == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				retval = ehset_single_step_set_feature(hcd,
+								   wIndex);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+#endif
+			else
 				goto error;
-			ehci_quiesce(ehci);
-			ehci_halt(ehci);
-			temp |= selector << 16;
-			ehci_writel(ehci, temp, status_reg);
 			break;
 
 		default:
@@ -1151,7 +1317,7 @@
 	set_owner(ehci, --portnum, PORT_OWNER);
 }
 
-static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
+static int __maybe_unused ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
 	u32 __iomem		*reg;
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index b5a0bf6..411fa97 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -220,6 +220,9 @@
 
 	dev_dbg(dev, "ehci-msm PM suspend\n");
 
+	if (!hcd->rh_registered)
+		return 0;
+
 	/*
 	 * EHCI helper function has also the same check before manipulating
 	 * port wakeup flags.  We do check here the same condition before
@@ -241,6 +244,10 @@
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
 
 	dev_dbg(dev, "ehci-msm PM resume\n");
+
+	if (!hcd->rh_registered)
+		return 0;
+
 	ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd));
 
 	return 0;
diff --git a/drivers/usb/host/ehci-msm72k.c b/drivers/usb/host/ehci-msm72k.c
new file mode 100644
index 0000000..e550e2b
--- /dev/null
+++ b/drivers/usb/host/ehci-msm72k.c
@@ -0,0 +1,823 @@
+/* ehci-msm.c - HSUSB Host Controller Driver Implementation
+ *
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * Partly derived from ehci-fsl.c and ehci-hcd.c
+ * Copyright (c) 2000-2004 by David Brownell
+ * Copyright (c) 2005 MontaVista Software
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+
+#include <mach/board.h>
+#include <mach/rpc_hsusb.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm_otg.h>
+#include <mach/clk.h>
+#include <linux/wakelock.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/msm72k_otg.h>
+
+#define MSM_USB_BASE (hcd->regs)
+
+struct msmusb_hcd {
+	struct ehci_hcd ehci;
+	struct clk *clk;
+	struct clk *pclk;
+	unsigned in_lpm;
+	struct work_struct lpm_exit_work;
+	spinlock_t lock;
+	struct wake_lock wlock;
+	unsigned int clk_enabled;
+	struct msm_usb_host_platform_data *pdata;
+	unsigned running;
+	struct otg_transceiver *xceiv;
+	struct work_struct otg_work;
+	unsigned flags;
+	struct msm_otg_ops otg_ops;
+};
+
+static inline struct msmusb_hcd *hcd_to_mhcd(struct usb_hcd *hcd)
+{
+	return (struct msmusb_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *mhcd_to_hcd(struct msmusb_hcd *mhcd)
+{
+	return container_of((void *) mhcd, struct usb_hcd, hcd_priv);
+}
+
+static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	/* if otg driver is available, it would take
+	 * care of voting for appropriate pclk source
+	 */
+	if (mhcd->xceiv)
+		return;
+
+	if (vote)
+		clk_enable(pdata->ebi1_clk);
+	else
+		clk_disable(pdata->ebi1_clk);
+}
+
+static void msm_xusb_enable_clks(struct msmusb_hcd *mhcd)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	if (mhcd->clk_enabled)
+		return;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		/* OTG driver takes care of clock management */
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		clk_enable(mhcd->clk);
+		clk_enable(mhcd->pclk);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		return;
+	}
+	mhcd->clk_enabled = 1;
+}
+
+static void msm_xusb_disable_clks(struct msmusb_hcd *mhcd)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	if (!mhcd->clk_enabled)
+		return;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		/* OTG driver takes care of clock management */
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		clk_disable(mhcd->clk);
+		clk_disable(mhcd->pclk);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		return;
+	}
+	mhcd->clk_enabled = 0;
+
+}
+
+static int usb_wakeup_phy(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+	int ret = -ENODEV;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		ret = msm_fsusb_resume_phy();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int usb_suspend_phy(struct usb_hcd *hcd)
+{
+	int ret = 0;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		ret = msm_fsusb_set_remote_wakeup();
+		ret = msm_fsusb_suspend_phy();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		ret = -ENODEV;
+		break;
+	}
+
+	return ret;
+}
+
+static int usb_lpm_enter(struct usb_hcd *hcd)
+{
+	struct device *dev = container_of((void *)hcd, struct device,
+							platform_data);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	disable_irq(hcd->irq);
+	if (mhcd->in_lpm) {
+		pr_info("%s: already in lpm. nothing to do\n", __func__);
+		enable_irq(hcd->irq);
+		return 0;
+	}
+
+	if (HC_IS_RUNNING(hcd->state)) {
+		pr_info("%s: can't enter into lpm. controller is runnning\n",
+			__func__);
+		enable_irq(hcd->irq);
+		return -1;
+	}
+
+	pr_info("%s: lpm enter procedure started\n", __func__);
+
+	mhcd->in_lpm = 1;
+
+	if (usb_suspend_phy(hcd)) {
+		mhcd->in_lpm = 0;
+		enable_irq(hcd->irq);
+		pr_info("phy suspend failed\n");
+		pr_info("%s: lpm enter procedure end\n", __func__);
+		return -1;
+	}
+
+	msm_xusb_disable_clks(mhcd);
+
+	if (mhcd->xceiv && mhcd->xceiv->set_suspend)
+		mhcd->xceiv->set_suspend(mhcd->xceiv, 1);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(hcd->irq);
+	enable_irq(hcd->irq);
+	pr_info("%s: lpm enter procedure end\n", __func__);
+	return 0;
+}
+#endif
+
+void usb_lpm_exit_w(struct work_struct *work)
+{
+	struct msmusb_hcd *mhcd = container_of((void *) work,
+			struct msmusb_hcd, lpm_exit_work);
+
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+
+	struct device *dev = container_of((void *)hcd, struct device,
+							platform_data);
+	msm_xusb_enable_clks(mhcd);
+
+
+	if (usb_wakeup_phy(hcd)) {
+		pr_err("fatal error: cannot bring phy out of lpm\n");
+		return;
+	}
+
+	/* If resume signalling finishes before lpm exit, PCD is not set in
+	 * USBSTS register. Drive resume signal to the downstream device now
+	 * so that EHCI can process the upcoming port change interrupt.*/
+
+	writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+
+	if (mhcd->xceiv && mhcd->xceiv->set_suspend)
+		mhcd->xceiv->set_suspend(mhcd->xceiv, 0);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(hcd->irq);
+	enable_irq(hcd->irq);
+}
+
+static void usb_lpm_exit(struct usb_hcd *hcd)
+{
+	unsigned long flags;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	spin_lock_irqsave(&mhcd->lock, flags);
+	if (!mhcd->in_lpm) {
+		spin_unlock_irqrestore(&mhcd->lock, flags);
+		return;
+	}
+	mhcd->in_lpm = 0;
+	disable_irq_nosync(hcd->irq);
+	schedule_work(&mhcd->lpm_exit_work);
+	spin_unlock_irqrestore(&mhcd->lock, flags);
+}
+
+static irqreturn_t ehci_msm_irq(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+
+	/*
+	 * OTG scheduled a work to get Integrated PHY out of LPM,
+	 * WAIT till then */
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
+		if (atomic_read(&otg->in_lpm))
+			return IRQ_HANDLED;
+
+	return ehci_irq(hcd);
+}
+
+#ifdef CONFIG_PM
+
+static int ehci_msm_bus_suspend(struct usb_hcd *hcd)
+{
+	int ret;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct device *dev = hcd->self.controller;
+
+	ret = ehci_bus_suspend(hcd);
+	if (ret) {
+		pr_err("ehci_bus suspend faield\n");
+		return ret;
+	}
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
+		ret = otg_set_suspend(mhcd->xceiv, 1);
+	else
+		ret = usb_lpm_enter(hcd);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_suspend(dev);
+	wake_unlock(&mhcd->wlock);
+	return ret;
+}
+
+static int ehci_msm_bus_resume(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct device *dev = hcd->self.controller;
+
+	wake_lock(&mhcd->wlock);
+	pm_runtime_get_noresume(dev);
+	pm_runtime_resume(dev);
+
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) {
+		otg_set_suspend(mhcd->xceiv, 0);
+	} else { /* PMIC serial phy */
+		usb_lpm_exit(hcd);
+		if (cancel_work_sync(&(mhcd->lpm_exit_work)))
+			usb_lpm_exit_w(&mhcd->lpm_exit_work);
+	}
+
+	return ehci_bus_resume(hcd);
+
+}
+
+#else
+
+#define ehci_msm_bus_suspend NULL
+#define ehci_msm_bus_resume NULL
+
+#endif	/* CONFIG_PM */
+
+static int ehci_msm_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	ehci->caps = USB_CAPLENGTH;
+	ehci->regs = USB_CAPLENGTH +
+		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* cache the data to minimize the chip reads*/
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	hcd->has_tt = 1;
+	ehci->sbrn = HCD_USB2;
+
+	retval = ehci_reset(ehci);
+
+	/* SW workaround for USB stability issues*/
+	writel(0x0, USB_AHB_MODE);
+	writel(0x0, USB_AHB_BURST);
+
+	return retval;
+}
+
+#define PTS_VAL(x) (PHY_TYPE(x) == USB_PHY_SERIAL_PMIC) ? PORTSC_PTS_SERIAL : \
+							PORTSC_PTS_ULPI
+
+static int ehci_msm_run(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci  = hcd_to_ehci(hcd);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	int             retval = 0;
+	int     	port   = HCS_N_PORTS(ehci->hcs_params);
+	u32 __iomem     *reg_ptr;
+	u32             hcc_params;
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	hcd->uses_new_polling = 1;
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+	/* set hostmode */
+	reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
+	ehci_writel(ehci, (USBMODE_VBUS | USBMODE_SDIS), reg_ptr);
+
+	/* port configuration - phy, port speed, port power, port enable */
+	while (port--)
+		ehci_writel(ehci, (PTS_VAL(pdata->phy_info) | PORT_POWER |
+				PORT_PE), &ehci->regs->port_status[port]);
+
+	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+	ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params))
+		ehci_writel(ehci, 0, &ehci->regs->segment);
+
+	ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+	ehci->command |= CMD_RUN;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/*Enable appropriate Interrupts*/
+	ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
+	return retval;
+}
+
+static struct hc_driver msm_hc_driver = {
+	.description		= hcd_name,
+	.product_desc 		= "Qualcomm On-Chip EHCI Host Controller",
+	.hcd_priv_size 		= sizeof(struct msmusb_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq 			= ehci_msm_irq,
+	.flags 			= HCD_USB2,
+
+	.reset 			= ehci_msm_reset,
+	.start 			= ehci_msm_run,
+
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.bus_suspend		= ehci_msm_bus_suspend,
+	.bus_resume		= ehci_msm_bus_resume,
+	.relinquish_port	= ehci_relinquish_port,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static void msm_hsusb_request_host(void *handle, int request)
+{
+	struct msmusb_hcd *mhcd = handle;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+	struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+#ifdef CONFIG_USB_OTG
+	struct usb_device *udev = hcd->self.root_hub;
+#endif
+	struct device *dev = hcd->self.controller;
+
+	switch (request) {
+#ifdef CONFIG_USB_OTG
+	case REQUEST_HNP_SUSPEND:
+		/* disable Root hub auto suspend. As hardware is configured
+		 * for peripheral mode, mark hardware is not available.
+		 */
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
+			pm_runtime_disable(&udev->dev);
+			/* Mark root hub as disconnected. This would
+			 * protect suspend/resume via sysfs.
+			 */
+			udev->state = USB_STATE_NOTATTACHED;
+			clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+			hcd->state = HC_STATE_HALT;
+			pm_runtime_put_noidle(dev);
+			pm_runtime_suspend(dev);
+		}
+		break;
+	case REQUEST_HNP_RESUME:
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
+			pm_runtime_get_noresume(dev);
+			pm_runtime_resume(dev);
+			disable_irq(hcd->irq);
+			ehci_msm_reset(hcd);
+			ehci_msm_run(hcd);
+			set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+			pm_runtime_enable(&udev->dev);
+			udev->state = USB_STATE_CONFIGURED;
+			enable_irq(hcd->irq);
+		}
+		break;
+#endif
+	case REQUEST_RESUME:
+		usb_hcd_resume_root_hub(hcd);
+		break;
+	case REQUEST_START:
+		if (mhcd->running)
+			break;
+		pm_runtime_get_noresume(dev);
+		pm_runtime_resume(dev);
+		wake_lock(&mhcd->wlock);
+		msm_xusb_pm_qos_update(mhcd, 1);
+		msm_xusb_enable_clks(mhcd);
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
+			if (otg->set_clk)
+				otg->set_clk(mhcd->xceiv, 1);
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 1);
+		if (pdata->config_gpio)
+			pdata->config_gpio(1);
+		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+		mhcd->running = 1;
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
+			if (otg->set_clk)
+				otg->set_clk(mhcd->xceiv, 0);
+		break;
+	case REQUEST_STOP:
+		if (!mhcd->running)
+			break;
+		mhcd->running = 0;
+		/* come out of lpm before deregistration */
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) {
+			usb_lpm_exit(hcd);
+			if (cancel_work_sync(&(mhcd->lpm_exit_work)))
+				usb_lpm_exit_w(&mhcd->lpm_exit_work);
+		}
+		usb_remove_hcd(hcd);
+		if (pdata->config_gpio)
+			pdata->config_gpio(0);
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 0);
+		msm_xusb_disable_clks(mhcd);
+		wake_lock_timeout(&mhcd->wlock, HZ/2);
+		msm_xusb_pm_qos_update(mhcd, 0);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_suspend(dev);
+		break;
+	}
+}
+
+static void msm_hsusb_otg_work(struct work_struct *work)
+{
+	struct msmusb_hcd *mhcd;
+
+	mhcd = container_of(work, struct msmusb_hcd, otg_work);
+	msm_hsusb_request_host((void *)mhcd, mhcd->flags);
+}
+static void msm_hsusb_start_host(struct usb_bus *bus, int start)
+{
+	struct usb_hcd *hcd = bus_to_hcd(bus);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	mhcd->flags = start;
+	if (in_interrupt())
+		schedule_work(&mhcd->otg_work);
+	else
+		msm_hsusb_request_host((void *)mhcd, mhcd->flags);
+
+}
+
+static int msm_xusb_init_phy(struct msmusb_hcd *mhcd)
+{
+	int ret = -ENODEV;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		ret = 0;
+	case USB_PHY_SERIAL_PMIC:
+		msm_xusb_enable_clks(mhcd);
+		writel(0, USB_USBINTR);
+		ret = msm_fsusb_rpc_init(&mhcd->otg_ops);
+		if (!ret)
+			msm_fsusb_init_phy();
+		msm_xusb_disable_clks(mhcd);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+
+	return ret;
+}
+
+static int msm_xusb_rpc_close(struct msmusb_hcd *mhcd)
+{
+	int retval = -ENODEV;
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (!mhcd->xceiv)
+			retval = msm_hsusb_rpc_close();
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		retval = msm_fsusb_reset_phy();
+		msm_fsusb_rpc_deinit();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+	return retval;
+}
+
+#ifdef	CONFIG_USB_OTG
+static void ehci_msm_start_hnp(struct ehci_hcd *ehci)
+{
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	/* OTG driver handles HNP */
+	otg_start_hnp(mhcd->xceiv);
+}
+#else
+#define ehci_msm_start_hnp	NULL
+#endif
+
+static int msm_xusb_init_host(struct msmusb_hcd *mhcd)
+{
+	int ret = 0;
+	struct msm_otg *otg;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		msm_hsusb_rpc_connect();
+
+		if (pdata->vbus_init)
+			pdata->vbus_init(1);
+
+		/* VBUS might be present. Turn off vbus */
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 0);
+
+		INIT_WORK(&mhcd->otg_work, msm_hsusb_otg_work);
+		mhcd->xceiv = otg_get_transceiver();
+		if (!mhcd->xceiv)
+			return -ENODEV;
+		otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+		hcd->regs = otg->regs;
+		otg->start_host = msm_hsusb_start_host;
+		ehci->start_hnp = ehci_msm_start_hnp;
+
+		ret = otg_set_host(mhcd->xceiv, &hcd->self);
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+
+		if (!hcd->regs)
+			return -EFAULT;
+		/* get usb clocks */
+		mhcd->clk = clk_get(NULL, "usb_hs2_clk");
+		if (IS_ERR(mhcd->clk)) {
+			iounmap(hcd->regs);
+			return PTR_ERR(mhcd->clk);
+		}
+
+		mhcd->pclk = clk_get(NULL, "usb_hs2_pclk");
+		if (IS_ERR(mhcd->pclk)) {
+			iounmap(hcd->regs);
+			clk_put(mhcd->clk);
+			return PTR_ERR(mhcd->pclk);
+		}
+		mhcd->otg_ops.request = msm_hsusb_request_host;
+		mhcd->otg_ops.handle = (void *) mhcd;
+		ret = msm_xusb_init_phy(mhcd);
+		if (ret < 0) {
+			iounmap(hcd->regs);
+			clk_put(mhcd->clk);
+			clk_put(mhcd->pclk);
+		}
+		break;
+	default:
+		pr_err("phy type is bad\n");
+	}
+	return ret;
+}
+
+static int __devinit ehci_msm_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res;
+	struct msm_usb_host_platform_data *pdata;
+	int retval;
+	struct msmusb_hcd *mhcd;
+
+	hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd)
+		return  -ENOMEM;
+
+	hcd->irq = platform_get_irq(pdev, 0);
+	if (hcd->irq < 0) {
+		usb_put_hcd(hcd);
+		return hcd->irq;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		usb_put_hcd(hcd);
+		return -ENODEV;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	mhcd = hcd_to_mhcd(hcd);
+	spin_lock_init(&mhcd->lock);
+	mhcd->in_lpm = 0;
+	mhcd->running = 0;
+	device_init_wakeup(&pdev->dev, 1);
+
+	pdata = pdev->dev.platform_data;
+	if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) {
+		usb_put_hcd(hcd);
+		return -ENODEV;
+	}
+	hcd->power_budget = pdata->power_budget;
+	mhcd->pdata = pdata;
+	INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w);
+
+	wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
+	pdata->ebi1_clk = clk_get(NULL, "ebi1_usb_clk");
+	if (IS_ERR(pdata->ebi1_clk))
+		pdata->ebi1_clk = NULL;
+	else
+		clk_set_rate(pdata->ebi1_clk, INT_MAX);
+
+	retval = msm_xusb_init_host(mhcd);
+
+	if (retval < 0) {
+		wake_lock_destroy(&mhcd->wlock);
+		usb_put_hcd(hcd);
+		clk_put(pdata->ebi1_clk);
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	return retval;
+}
+
+static void msm_xusb_uninit_host(struct msmusb_hcd *mhcd)
+{
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (pdata->vbus_init)
+			pdata->vbus_init(0);
+		otg_set_host(mhcd->xceiv, NULL);
+		otg_put_transceiver(mhcd->xceiv);
+		cancel_work_sync(&mhcd->otg_work);
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		iounmap(hcd->regs);
+		clk_put(mhcd->clk);
+		clk_put(mhcd->pclk);
+		msm_fsusb_reset_phy();
+		msm_fsusb_rpc_deinit();
+		break;
+	default:
+		pr_err("phy type is bad\n");
+	}
+}
+static int __exit ehci_msm_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata;
+	int retval = 0;
+
+	pdata = pdev->dev.platform_data;
+	device_init_wakeup(&pdev->dev, 0);
+
+	msm_hsusb_request_host((void *)mhcd, REQUEST_STOP);
+	msm_xusb_uninit_host(mhcd);
+	retval = msm_xusb_rpc_close(mhcd);
+
+	wake_lock_destroy(&mhcd->wlock);
+	usb_put_hcd(hcd);
+	clk_put(pdata->ebi1_clk);
+
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	return retval;
+}
+
+static int ehci_msm_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int ehci_msm_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int ehci_msm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
+	.runtime_suspend = ehci_msm_runtime_suspend,
+	.runtime_resume = ehci_msm_runtime_resume,
+	.runtime_idle = ehci_msm_runtime_idle
+};
+
+static struct platform_driver ehci_msm_driver = {
+	.probe	= ehci_msm_probe,
+	.remove	= __exit_p(ehci_msm_remove),
+	.driver	= {.name = "msm_hsusb_host",
+		    .pm = &ehci_msm_dev_pm_ops, },
+};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 0917e3a..a5bb387 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1144,7 +1144,110 @@
 		qtd_list_free (ehci, urb, qtd_list);
 	return rc;
 }
+/*-------------------------------------------------------------------------*/
+/* This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+#ifdef CONFIG_USB_EHCI_EHSET
+static int
+submit_single_step_set_feature(
+	struct usb_hcd  *hcd,
+	struct urb      *urb,
+	int 		is_setup
+) {
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct list_head	qtd_list;
+	struct list_head	*head ;
 
+	struct ehci_qtd		*qtd, *qtd_prev;
+	dma_addr_t		buf;
+	int			len, maxpacket;
+	u32			token;
+
+	INIT_LIST_HEAD(&qtd_list);
+	head = &qtd_list;
+
+	/*
+	 * URBs map to sequences of QTDs:  one logical transaction
+	 */
+	qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+	if (unlikely(!qtd))
+		return -1;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+
+	len = urb->transfer_buffer_length;
+	/* Check if the request is to perform just the SETUP stage (getDesc)
+	 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+	 * 15 secs after the setup
+	 */
+	if (is_setup) {
+		/* SETUP pid */
+		qtd_fill(ehci, qtd, urb->setup_dma,
+				sizeof(struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+
+		submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+		return 0; /*Return now; we shall come back after 15 seconds*/
+	}
+
+	/*---------------------------------------------------------------------
+	 * IN: data transfer stage:  buffer setup : start the IN txn phase for
+	 * the get_Desc SETUP which was sent 15seconds back
+	 */
+	token ^= QTD_TOGGLE;   /*We need to start IN with DATA-1 Pid-sequence*/
+	buf = urb->transfer_dma;
+
+	token |= (1 /* "in" */ << 8);  /*This is IN stage*/
+
+	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+
+	qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+	/* Our IN phase shall always be a short read; so keep the queue running
+	* and let it advance to the next qtd which zero length OUT status */
+
+	qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+	/*----------------------------------------------------------------------
+	 * STATUS stage for GetDesc control request
+	 */
+	token ^= 0x0100;	/* "in" <--> "out"  */
+	token |= QTD_TOGGLE;	/* force DATA1 */
+
+	qtd_prev = qtd;
+	qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+	if (unlikely(!qtd))
+		goto cleanup;
+	qtd->urb = urb;
+	qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+	list_add_tail(&qtd->qtd_list, head);
+
+	/* dont fill any data in such packets */
+	qtd_fill(ehci, qtd, 0, 0, token, 0);
+
+	/* by default, enable interrupt on urb completion */
+	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+
+	submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+	return 0;
+
+cleanup:
+	qtd_list_free(ehci, urb, head);
+	return -1;
+}
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 989e0a8..95802d9 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -124,6 +124,8 @@
 	ktime_t			last_periodic_enable;
 	u32			command;
 
+	void (*start_hnp)(struct ehci_hcd *ehci);
+
 	/* SILICON QUIRKS */
 	unsigned		no_selective_suspend:1;
 	unsigned		has_fsl_port_bug:1; /* FreeScale */
diff --git a/drivers/usb/host/pehci/Makefile b/drivers/usb/host/pehci/Makefile
new file mode 100644
index 0000000..8c0d17f
--- /dev/null
+++ b/drivers/usb/host/pehci/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += hal/ host/
+
diff --git a/drivers/usb/host/pehci/hal/Makefile b/drivers/usb/host/pehci/hal/Makefile
new file mode 100644
index 0000000..91408e5
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += hal_msm.o
+
diff --git a/drivers/usb/host/pehci/hal/hal_intf.h b/drivers/usb/host/pehci/hal/hal_intf.h
new file mode 100644
index 0000000..2d66e57
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_intf.h
@@ -0,0 +1,313 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef HAL_INTF_H
+#define HAL_INTF_H
+
+
+/* Specify package here instead of including package.h */
+/* #include "package.h" */
+#define HCD_PACKAGE
+
+#define NON_PCI
+//#define PXA300
+
+//#define MSEC_INT_BASED
+#ifdef MSEC_INT_BASED
+#define THREAD_BASED 
+#endif
+
+#ifndef DATABUS_WIDTH_16
+#define DATABUS_WIDTH_16
+#endif
+
+#ifdef	DATABUS_WIDTH_16
+/*DMA SUPPORT */
+/* #define	ENABLE_PLX_DMA */
+//#undef	ENABLE_PLX_DMA//PXA300
+#endif
+
+//#define	EDGE_INTERRUPT
+//#define 	POL_HIGH_INTERRUPT
+
+#define	DMA_BUF_SIZE	(4096 * 2)
+
+#define ISP1763_CHIPID  0x176320
+
+/* Values for id_flags filed of isp1763_driver_t */
+#define ISP1763_HC				0	/* Host Controller Driver */
+#define ISP1763_DC				1	/* Device Controller Driver */
+#define ISP1763_OTG				2	/* Otg Controller Driver */
+#define ISP1763_LAST_DEV			(ISP1763_OTG + 1)
+#define ISP1763_1ST_DEV				(ISP1763_HC)
+
+#ifdef PXA300
+#define HC_SPARAMS_REG					(0x04<<1)	/* Structural Parameters Register */
+#define HC_CPARAMS_REG					(0x08<<1)	/* Capability Parameters Register */
+
+#define HC_USBCMD_REG						(0x8C<<1)	/* USB Command Register */
+#define HC_USBSTS_REG						(0x90<<1)	/* USB Status Register */
+#define HC_INTERRUPT_REG_EHCI				(0x94<<1)	/* INterrupt Enable Register */
+#define HC_FRINDEX_REG						(0x98<<1)	/* Frame Index Register */
+
+#define HC_CONFIGFLAG_REG					(0x9C<<1)	/* Conigured Flag  Register */
+#define HC_PORTSC1_REG					(0xA0<<1)	/* Port Status Control for Port1 */
+
+/*ISO Transfer Registers */
+#define HC_ISO_PTD_DONEMAP_REG			(0xA4<<1)	/* ISO PTD Done Map Register */
+#define HC_ISO_PTD_SKIPMAP_REG			(0xA6<<1)	/* ISO PTD Skip Map Register */
+#define HC_ISO_PTD_LASTPTD_REG				(0xA8<<1)	/* ISO PTD Last PTD Register */
+
+/*INT Transfer Registers */
+#define HC_INT_PTD_DONEMAP_REG			(0xAA<<1)	/* INT PTD Done Map Register */
+#define HC_INT_PTD_SKIPMAP_REG				(0xAC<<1)	/* INT PTD Skip Map Register */
+#define HC_INT_PTD_LASTPTD_REG				(0xAE<<1)	/* INT PTD Last PTD Register  */
+
+/*ATL Transfer Registers */
+#define HC_ATL_PTD_DONEMAP_REG			(0xB0<<1)	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_SKIPMAP_REG				(0xB2<<1)	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_LASTPTD_REG				(0xB4<<1)	/* ATL PTD Last PTD Register  */
+
+/*General Purpose Registers */
+#define HC_HW_MODE_REG					(0x0C<<1)	/* H/W Mode Register  */
+#define HC_CHIP_ID_REG						(0x70<<1)	/* Chip ID Register */
+#define HC_SCRATCH_REG					(0x78<<1)	/* Scratch Register */
+#define HC_RESET_REG						(0xB8<<1)	/* HC Reset Register */
+#define HC_HWMODECTRL_REG				(0xB6<<1)
+#define HC_UNLOCK_DEVICE					(0x7C<<1)
+
+/* Interrupt Registers */
+#define HC_INTERRUPT_REG					(0xD4<<1)	/* Interrupt Register */
+#define HC_INTENABLE_REG					(0xD6<<1)	/* Interrupt enable Register */
+#define HC_ISO_IRQ_MASK_OR_REG			(0xD8<<1)	/* ISO Mask OR Register */
+#define HC_INT_IRQ_MASK_OR_REG			(0xDA<<1)	/* INT Mask OR Register */
+#define HC_ATL_IRQ_MASK_OR_REG			(0xDC<<1)	/* ATL Mask OR Register */
+#define HC_ISO_IRQ_MASK_AND_REG			(0xDE<<1)	/* ISO Mask AND Register */
+#define HC_INT_IRQ_MASK_AND_REG			(0xE0<<1)	/* INT Mask AND Register */
+#define HC_ATL_IRQ_MASK_AND_REG			(0xE2<<1)	/* ATL Mask AND Register */
+
+/*power control reg */
+#define HC_POWER_DOWN_CONTROL_REG		(0xD0<<1)
+
+/*RAM Registers */
+#define HC_DMACONFIG_REG					(0xBC<<1)	/* DMA Config Register */
+#define HC_MEM_READ_REG					(0xC4<<1)	/* Memory Register */
+#define HC_DATA_REG						(0xC6<<1)	/* Data Register */
+
+#define OTG_CTRL_SET_REG					(0xE4<<1)
+#define OTG_CTRL_CLEAR_REG					(0xE6<<1)
+#define OTG_SOURCE_REG					(0xE8<<1)
+
+#define OTG_INTR_EN_F_SET_REG				(0xF0<<1)
+#define OTG_INTR_EN_R_SET_REG				(0xF4<<1)	/* OTG Interrupt Enable Rise register */
+
+#else
+#define HC_SPARAMS_REG					0x04	/* Structural Parameters Register */
+#define HC_CPARAMS_REG					0x08	/* Capability Parameters Register */
+
+#define HC_USBCMD_REG					0x8C	/* USB Command Register */
+#define HC_USBSTS_REG					0x90	/* USB Status Register */
+#define HC_INTERRUPT_REG_EHCI			0x94	/* INterrupt Enable Register */
+#define HC_FRINDEX_REG					0x98	/* Frame Index Register */
+
+#define HC_CONFIGFLAG_REG				0x9C	/* Conigured Flag  Register */
+#define HC_PORTSC1_REG					0xA0	/* Port Status Control for Port1 */
+
+/*ISO Transfer Registers */
+#define HC_ISO_PTD_DONEMAP_REG			0xA4	/* ISO PTD Done Map Register */
+#define HC_ISO_PTD_SKIPMAP_REG			0xA6	/* ISO PTD Skip Map Register */
+#define HC_ISO_PTD_LASTPTD_REG			0xA8	/* ISO PTD Last PTD Register */
+
+/*INT Transfer Registers */
+#define HC_INT_PTD_DONEMAP_REG			0xAA	/* INT PTD Done Map Register */
+#define HC_INT_PTD_SKIPMAP_REG			0xAC	/* INT PTD Skip Map Register */
+#define HC_INT_PTD_LASTPTD_REG			0xAE	/* INT PTD Last PTD Register  */
+
+/*ATL Transfer Registers */
+#define HC_ATL_PTD_DONEMAP_REG			0xB0	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_SKIPMAP_REG			0xB2	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_LASTPTD_REG			0xB4	/* ATL PTD Last PTD Register  */
+
+/*General Purpose Registers */
+#define HC_HW_MODE_REG					0x0C //0xB6	/* H/W Mode Register  */
+#define HC_CHIP_ID_REG					0x70	/* Chip ID Register */
+#define HC_SCRATCH_REG					0x78	/* Scratch Register */
+#define HC_RESET_REG					0xB8	/* HC Reset Register */
+#define HC_HWMODECTRL_REG				0xB6 //0x0C /* H/W Mode control Register  */
+#define HC_UNLOCK_DEVICE				0x7C
+
+/* Interrupt Registers */
+#define HC_INTERRUPT_REG				0xD4	/* Interrupt Register */
+#define HC_INTENABLE_REG				0xD6	/* Interrupt enable Register */
+#define HC_ISO_IRQ_MASK_OR_REG			0xD8	/* ISO Mask OR Register */
+#define HC_INT_IRQ_MASK_OR_REG			0xDA	/* INT Mask OR Register */
+#define HC_ATL_IRQ_MASK_OR_REG			0xDC	/* ATL Mask OR Register */
+#define HC_ISO_IRQ_MASK_AND_REG			0xDE	/* ISO Mask AND Register */
+#define HC_INT_IRQ_MASK_AND_REG			0xE0	/* INT Mask AND Register */
+#define HC_ATL_IRQ_MASK_AND_REG			0xE2	/* ATL Mask AND Register */
+
+/*power control reg */
+#define HC_POWER_DOWN_CONTROL_REG		0xD0
+
+/*RAM Registers */
+#define HC_DMACONFIG_REG				0xBC	/* DMA Config Register */
+#define HC_MEM_READ_REG					0xC4	/* Memory Register */
+#define HC_DATA_REG						0xC6	/* Data Register */
+
+#define OTG_CTRL_SET_REG				0xE4
+#define OTG_CTRL_CLEAR_REG				0xE6
+#define OTG_SOURCE_REG					0xE8
+
+#define OTG_INTR_EN_F_SET_REG			0xF0	/* OTG Interrupt Enable Fall register */
+#define OTG_INTR_EN_R_SET_REG			0xF4	/* OTG Interrupt Enable Rise register */
+
+#endif
+
+#define	OTG_CTRL_DPPULLUP				0x0001
+#define	OTG_CTRL_DPPULLDOWN				0x0002
+#define	OTG_CTRL_DMPULLDOWN				0x0004
+#define	OTG_CTRL_VBUS_DRV				0x0010
+#define	OTG_CTRL_VBUS_DISCHRG			0x0020
+#define	OTG_CTRL_VBUS_CHRG				0x0040
+#define	OTG_CTRL_SW_SEL_HC_DC			0x0080
+#define	OTG_CTRL_BDIS_ACON_EN			0x0100
+#define	OTG_CTRL_OTG_SE0_EN				0x0200
+#define	OTG_CTRL_OTG_DISABLE			0x0400
+#define	OTG_CTRL_VBUS_DRV_PORT2			0x1000
+#define	OTG_CTRL_SW_SEL_HC_2			0x8000
+
+/*interrupt count and buffer status register*/
+
+
+#ifdef PXA300
+#define HC_BUFFER_STATUS_REG			(0xBA<<1)
+#define HC_INT_THRESHOLD_REG			(0xC8<<1)
+#else
+#define HC_BUFFER_STATUS_REG			0xBA
+#define HC_INT_THRESHOLD_REG			0xC8
+#endif
+
+#define HC_OTG_INTERRUPT				0x400
+
+#ifdef PXA300
+#define DC_CHIPID						(0x70<<1)
+#else
+#define DC_CHIPID						0x70
+#endif
+
+
+#ifdef PXA300
+#define FPGA_CONFIG_REG				(0x100<<1)
+#else
+#define FPGA_CONFIG_REG					0x100
+#endif
+
+#define HC_HW_MODE_GOBAL_INTR_ENABLE	0x01
+#define HC_HW_MODE_INTR_EDGE			0x02
+#define HC_HW_MODE_INTR_POLARITY_HIGH	0x04
+#define HC_HW_MODE_LOCK				0x08
+#define HC_HW_MODE_DATABUSWIDTH_8	0x10
+#define HC_HW_MODE_DREQ_POL_HIGH		0x20
+#define HC_HW_MODE_DACK_POL_HIGH		0x40
+#define HC_HW_MODE_COMN_INT			0x80
+
+struct isp1763_driver;
+typedef struct _isp1763_id {
+	u16 idVendor;
+	u16 idProduct;
+	u32 driver_info;
+} isp1763_id;
+
+typedef struct isp1763_dev {
+	/*added for pci device */
+#ifdef  NON_PCI 
+		struct platform_device *dev;
+#else /*PCI*/
+	struct pci_dev *pcidev;
+#endif
+	struct isp1763_driver *driver;	/* which driver has allocated this device */
+	void *driver_data;	/* data private to the host controller driver */
+	void *otg_driver_data;	/*data private for otg controler */
+	unsigned char index;	/* local controller (HC/DC/OTG) */
+	unsigned int irq;	/*Interrupt Channel allocated for this device */
+	void (*handler) (struct isp1763_dev * dev, void *isr_data);	/* Interrupt Serrvice Routine */
+	void *isr_data;		/* isr data of the driver */
+	unsigned long int_reg;	/* Interrupt register */
+	unsigned long alt_int_reg;	/* Interrupt register 2 */
+	unsigned long start;
+	unsigned long length;
+	struct resource *mem_res;
+	unsigned long io_base;	/* Start Io address space for this device */
+	unsigned long io_len;	/* IO address space length for this device */
+
+	unsigned long chip_id;	/* Chip Id */
+
+	char name[80];		/* device name */
+	int active;		/* device status */
+
+	/* DMA resources should come here */
+	unsigned long dma;
+	u8 *baseaddress;	/*base address for i/o ops */
+	u8 *dmabase;
+	isp1763_id *id;
+} isp1763_dev_t;
+
+
+typedef struct isp1763_driver {
+	char *name;
+	unsigned long index;	/* HC or DC or OTG */
+	isp1763_id *id;		/*device ids */
+	int (*probe) (struct isp1763_dev * dev, isp1763_id * id);	/* New device inserted */
+	void (*remove) (struct isp1763_dev * dev);	/* Device removed (NULL if not a hot-plug capable driver) */
+	
+	void (*suspend) (struct isp1763_dev * dev);	/* Device suspended */
+	void (*resume) (struct isp1763_dev * dev);	/* Device woken up */
+	void (*remotewakeup) (struct isp1763_dev *dev);  /* Remote Wakeup */
+	void (*powerup) (struct isp1763_dev *dev);  /* Device poweup mode */
+	void (*powerdown)	(struct isp1763_dev *dev); /* Device power down mode */
+} isp_1763_driver_t;
+
+struct usb_device *phci_register_otg_device(struct isp1763_dev *dev);
+
+/*otg exported function from host*/
+int phci_suspend_otg_port(struct isp1763_dev *dev, u32 command);
+int phci_enumerate_otg_port(struct isp1763_dev *dev, u32 command);
+
+extern int isp1763_register_driver(struct isp1763_driver *drv);
+extern void isp1763_unregister_driver(struct isp1763_driver *drv);
+extern int isp1763_request_irq(void (*handler)(struct isp1763_dev * dev, void *isr_data),
+		      struct isp1763_dev *dev, void *isr_data);
+extern void isp1763_free_irq(struct isp1763_dev *dev, void *isr_data);
+
+extern u32 isp1763_reg_read32(isp1763_dev_t * dev, u16 reg, u32 data);
+extern u16 isp1763_reg_read16(isp1763_dev_t * dev, u16 reg, u16 data);
+extern u8 isp1763_reg_read8(struct isp1763_dev *dev, u16 reg, u8 data);
+extern void isp1763_reg_write32(isp1763_dev_t * dev, u16 reg, u32 data);
+extern void isp1763_reg_write16(isp1763_dev_t * dev, u16 reg, u16 data);
+extern void isp1763_reg_write8(struct isp1763_dev *dev, u16 reg, u8 data);
+extern int isp1763_mem_read(isp1763_dev_t * dev, u32 start_add,
+		     u32 end_add, u32 * buffer, u32 length, u16 dir);
+extern int isp1763_mem_write(isp1763_dev_t * dev, u32 start_add,
+		      u32 end_add, u32 * buffer, u32 length, u16 dir);
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/usb/host/pehci/hal/hal_msm.c b/drivers/usb/host/pehci/hal/hal_msm.c
new file mode 100644
index 0000000..35c0203
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_msm.c
@@ -0,0 +1,748 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux HCD Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is the main hardware abstraction layer file. Hardware initialization, interupt
+* processing and read/write routines are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/gpio.h>
+#include <mach/board.h>
+#include <linux/poll.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+
+/*--------------------------------------------------------------*
+ *               linux system include files
+ *--------------------------------------------------------------*/
+#include "hal_msm.h"
+#include "../hal/hal_intf.h"
+#include "../hal/isp1763.h"
+
+
+/*--------------------------------------------------------------*
+ *               Local variable Definitions
+ *--------------------------------------------------------------*/
+struct isp1763_dev isp1763_loc_dev[ISP1763_LAST_DEV];
+
+
+/*--------------------------------------------------------------*
+ *               Local # Definitions
+ *--------------------------------------------------------------*/
+#define         PCI_ACCESS_RETRY_COUNT  20
+#define         ISP1763_DRIVER_NAME     "isp1763_usb"
+
+/*--------------------------------------------------------------*
+ *               Local Function
+ *--------------------------------------------------------------*/
+
+static int __devexit isp1763_remove(struct platform_device *pdev);
+static int __devinit isp1763_probe(struct platform_device *pdev);
+
+
+/*--------------------------------------------------------------*
+ *               Platform Driver Interface Functions
+ *--------------------------------------------------------------*/
+
+static struct platform_driver isp1763_usb_driver = {
+	.remove = __exit_p(isp1763_remove),
+	.driver = {
+		.name = ISP1763_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+
+/*--------------------------------------------------------------*
+ *               ISP1763 Read write routine
+ *--------------------------------------------------------------*/
+/*
+ * EBI2 on 8660 ignores the first bit and shifts the address by
+ * one bit to the right.
+ * Hence, shift left all the register addresses before accessing
+ * them over EBI2.
+ * This logic applies only for the register read/writes, for
+ * read/write from ISP memory this conversion is not needed
+ * as the ISP obtains the memory address from 'memory' register
+ */
+
+/* Write a 32 bit Register of isp1763 */
+void
+isp1763_reg_write32(struct isp1763_dev *dev, u16 reg, u32 data)
+{
+	/* Write the 32bit to the register address given to us */
+
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	writew((u16) data, dev->baseaddress + ((reg)));
+	writew((u16) (data >> 16), dev->baseaddress + (((reg + 4))));
+#else
+	writeb((u8) data, dev->baseaddress + (reg));
+	writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
+	writeb((u8) (data >> 16), dev->baseaddress + ((reg + 2)));
+	writeb((u8) (data >> 24), dev->baseaddress + ((reg + 3)));
+#endif
+
+}
+EXPORT_SYMBOL(isp1763_reg_write32);
+
+
+/* Read a 32 bit Register of isp1763 */
+u32
+isp1763_reg_read32(struct isp1763_dev *dev, u16 reg, u32 data)
+{
+
+#ifdef DATABUS_WIDTH_16
+	u16 wvalue1, wvalue2;
+#else
+	u8 bval1, bval2, bval3, bval4;
+#endif
+	data = 0;
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	wvalue1 = readw(dev->baseaddress + ((reg)));
+	wvalue2 = readw(dev->baseaddress + (((reg + 4))));
+	data |= wvalue2;
+	data <<= 16;
+	data |= wvalue1;
+#else
+
+	bval1 = readb(dev->baseaddress + (reg));
+	bval2 = readb(dev->baseaddress + (reg + 1));
+	bval3 = readb(dev->baseaddress + (reg + 2));
+	bval4 = readb(dev->baseaddress + (reg + 3));
+	data = 0;
+	data |= bval4;
+	data <<= 8;
+	data |= bval3;
+	data <<= 8;
+	data |= bval2;
+	data <<= 8;
+	data |= bval1;
+
+#endif
+
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read32);
+
+
+/* Read a 16 bit Register of isp1763 */
+u16
+isp1763_reg_read16(struct isp1763_dev * dev, u16 reg, u16 data)
+{
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	data = readw(dev->baseaddress + ((reg)));
+#else
+	u8 bval1, bval2;
+	bval1 = readb(dev->baseaddress + (reg));
+	if (reg == HC_DATA_REG){
+		bval2 = readb(dev->baseaddress + (reg));
+	} else {
+		bval2 = readb(dev->baseaddress + ((reg + 1)));
+	}
+	data = 0;
+	data |= bval2;
+	data <<= 8;
+	data |= bval1;
+
+#endif
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read16);
+
+/* Write a 16 bit Register of isp1763 */
+void
+isp1763_reg_write16(struct isp1763_dev *dev, u16 reg, u16 data)
+{
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	writew(data, dev->baseaddress + ((reg)));
+#else
+	writeb((u8) data, dev->baseaddress + (reg));
+	if (reg == HC_DATA_REG){
+		writeb((u8) (data >> 8), dev->baseaddress + (reg));
+	}else{
+		writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
+	}
+
+#endif
+}
+EXPORT_SYMBOL(isp1763_reg_write16);
+
+/* Read a 8 bit Register of isp1763 */
+u8
+isp1763_reg_read8(struct isp1763_dev *dev, u16 reg, u8 data)
+{
+	reg <<= 1;
+	data = readb((dev->baseaddress + (reg)));
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read8);
+
+/* Write a 8 bit Register of isp1763 */
+void
+isp1763_reg_write8(struct isp1763_dev *dev, u16 reg, u8 data)
+{
+	reg <<= 1;
+	writeb(data, (dev->baseaddress + (reg)));
+}
+EXPORT_SYMBOL(isp1763_reg_write8);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_mem_read
+ *
+ * Memory read using PIO method.
+ *
+ *  Input: struct isp1763_driver *drv  -->  Driver structure.
+ *                      u32 start_add     --> Starting address of memory
+ *              u32 end_add     ---> End address
+ *
+ *              u32 * buffer      --> Buffer pointer.
+ *              u32 length       ---> Length
+ *              u16 dir          ---> Direction ( Inc or Dec)
+ *
+ *  Output     int Length  ----> Number of bytes read
+ *
+ *  Called by: system function
+ *
+ *
+ *--------------------------------------------------------------*/
+/* Memory read function PIO */
+
+int
+isp1763_mem_read(struct isp1763_dev *dev, u32 start_add,
+	u32 end_add, u32 * buffer, u32 length, u16 dir)
+{
+	u8 *one = (u8 *) buffer;
+	u16 *two = (u16 *) buffer;
+	u32 a = (u32) length;
+	u32 w;
+	u32 w2;
+
+	if (buffer == 0) {
+		printk("Buffer address zero\n");
+		return 0;
+	}
+
+
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
+	/* This delay requirement comes from the ISP1763A programming guide */
+	ndelay(100);
+last:
+	w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+	w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
+	w2 <<= 16;
+	w = w | w2;
+	if (a == 1) {
+		*one = (u8) w;
+		return 0;
+	}
+	if (a == 2) {
+		*two = (u16) w;
+		return 0;
+	}
+
+	if (a == 3) {
+		*two = (u16) w;
+		two += 1;
+		w >>= 16;
+		*two = (u8) (w);
+		return 0;
+
+	}
+	while (a > 0) {
+		*buffer = w;
+		a -= 4;
+		if (a <= 0) {
+			break;
+		}
+		if (a < 4) {
+			buffer += 1;
+			one = (u8 *) buffer;
+			two = (u16 *) buffer;
+			goto last;
+		}
+		buffer += 1;
+		w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+		w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
+		w2 <<= 16;
+		w = w | w2;
+	}
+	return ((a < 0) || (a == 0)) ? 0 : (-1);
+
+}
+EXPORT_SYMBOL(isp1763_mem_read);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_mem_write
+ *
+ * Memory write using PIO method.
+ *
+ *  Input: struct isp1763_driver *drv  -->  Driver structure.
+ *                      u32 start_add     --> Starting address of memory
+ *              u32 end_add     ---> End address
+ *
+ *              u32 * buffer      --> Buffer pointer.
+ *              u32 length       ---> Length
+ *              u16 dir          ---> Direction ( Inc or Dec)
+ *
+ *  Output     int Length  ----> Number of bytes read
+ *
+ *  Called by: system function
+ *
+ *
+ *--------------------------------------------------------------*/
+
+/* Memory read function IO */
+
+int
+isp1763_mem_write(struct isp1763_dev *dev,
+	u32 start_add, u32 end_add, u32 * buffer, u32 length, u16 dir)
+{
+	int a = length;
+	u8 one = (u8) (*buffer);
+	u16 two = (u16) (*buffer);
+
+
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
+	/* This delay requirement comes from the ISP1763A programming guide */
+	ndelay(100);
+
+	if (a == 1) {
+		isp1763_reg_write16(dev, HC_DATA_REG, one);
+		return 0;
+	}
+	if (a == 2) {
+		isp1763_reg_write16(dev, HC_DATA_REG, two);
+		return 0;
+	}
+
+	while (a > 0) {
+		isp1763_reg_write16(dev, HC_DATA_REG, (u16) (*buffer));
+		if (a >= 3)
+			isp1763_reg_write16(dev, HC_DATA_REG,
+					    (u16) ((*buffer) >> 16));
+		start_add += 4;
+		a -= 4;
+		if (a <= 0)
+			break;
+		buffer += 1;
+
+	}
+
+	return ((a < 0) || (a == 0)) ? 0 : (-1);
+
+}
+EXPORT_SYMBOL(isp1763_mem_write);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_register_driver
+ *
+ * This function is used by top driver (OTG, HCD, DCD) to register
+ * their communication functions (probe, remove, suspend, resume) using
+ * the drv data structure.
+ * This function will call the probe function of the driver if the ISP1763
+ * corresponding to the driver is enabled
+ *
+ *  Input: struct isp1763_driver *drv  --> Driver structure.
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *--------------------------------------------------------------*/
+
+int
+isp1763_register_driver(struct isp1763_driver *drv)
+{
+	struct isp1763_dev *dev;
+	int result = -EINVAL;
+
+	hal_entry("%s: Entered\n", __FUNCTION__);
+	info("isp1763_register_driver(drv=%p)\n", drv);
+
+	if (!drv) {
+		return -EINVAL;
+	}
+
+	dev = &isp1763_loc_dev[drv->index];
+	if (!dev->baseaddress)
+		return -EINVAL;
+
+	dev->active = 1;	/* set the driver as active*/
+
+	if (drv->probe) {
+		result = drv->probe(dev, drv->id);
+	} else {
+		printk("%s no probe function for indes %d \n", __FUNCTION__,
+			(int)drv->index);
+	}
+
+	if (result >= 0) {
+		pr_debug(KERN_INFO __FILE__ ": Registered Driver %s\n",
+			drv->name);
+		dev->driver = drv;
+	}
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return result;
+}				/* End of isp1763_register_driver */
+EXPORT_SYMBOL(isp1763_register_driver);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_unregister_driver
+ *
+ * This function is used by top driver (OTG, HCD, DCD) to de-register
+ * their communication functions (probe, remove, suspend, resume) using
+ * the drv data structure.
+ * This function will check whether the driver is registered or not and
+ * call the remove function of the driver if registered
+ *
+ *  Input: struct isp1763_driver *drv  --> Driver structure.
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *--------------------------------------------------------------*/
+
+void
+isp1763_unregister_driver(struct isp1763_driver *drv)
+{
+	struct isp1763_dev *dev;
+	hal_entry("%s: Entered\n", __FUNCTION__);
+
+	info("isp1763_unregister_driver(drv=%p)\n", drv);
+	dev = &isp1763_loc_dev[drv->index];
+	if (dev->driver == drv) {
+		/* driver registered is same as the requestig driver */
+		drv->remove(dev);
+		dev->driver = NULL;
+		info(": De-registered Driver %s\n", drv->name);
+		return;
+	}
+	hal_entry("%s: Exit\n", __FUNCTION__);
+}				/* End of isp1763_unregister_driver */
+EXPORT_SYMBOL(isp1763_unregister_driver);
+
+
+/*--------------------------------------------------------------*
+ *               ISP1763 Platform driver interface routine.
+ *--------------------------------------------------------------*/
+
+
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_module_init
+ *
+ *  This  is the module initialization function. It registers to
+ *  driver for a isp1763 platform device. And also resets the
+ *  internal data structures.
+ *
+ *  Input: void
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *
+ -------------------------------------------------------------------*/
+static int __init
+isp1763_module_init(void)
+{
+	int result = 0;
+	hal_entry("%s: Entered\n", __FUNCTION__);
+	pr_debug(KERN_NOTICE "+isp1763_module_init\n");
+	memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
+
+	result = platform_driver_probe(&isp1763_usb_driver, isp1763_probe);
+
+	pr_debug(KERN_NOTICE "-isp1763_module_init\n");
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return result;
+}
+
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_module_cleanup
+ *
+ * This  is the module cleanup function. It de-registers the
+ * Platform driver and resets the internal data structures.
+ *
+ *  Input: void
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------*/
+
+static void __exit
+isp1763_module_cleanup(void)
+{
+	pr_debug("Hal Module Cleanup\n");
+	platform_driver_unregister(&isp1763_usb_driver);
+
+	memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
+}
+
+void dummy_mem_read(struct isp1763_dev *dev)
+{
+	u32 w = 0;
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, 0x0400);
+	w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+
+	pr_debug("dummy_read DONE: %x\n", w);
+	msleep(10);
+}
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_probe
+ *
+ * probe function of ISP1763
+ * This function is called from module_init if the corresponding platform
+ * device is present. This function initializes the information
+ * for the Host Controller with the assigned resources and tests the register
+ * access to the controller and do a software reset and makes it ready
+ * for the driver to play with. It also calls setup_gpio passed from pdata
+ * to setup GPIOs (e.g. used for IRQ and RST lines).
+ *
+ *  Input:
+ *              struct platform_device *dev   ----> Platform Device structure
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------**/
+
+static int __devinit
+isp1763_probe(struct platform_device *pdev)
+{
+	u32 reg_data = 0;
+	struct isp1763_dev *loc_dev;
+	int status = 1;
+	u32 hwmodectrl = 0;
+	u16 us_reset_hc = 0;
+	u32 chipid = 0;
+	struct isp1763_platform_data *pdata = pdev->dev.platform_data;
+
+	hal_entry("%s: Entered\n", __FUNCTION__);
+
+	hal_init(("isp1763_probe(dev=%p)\n", dev));
+
+	loc_dev = &(isp1763_loc_dev[ISP1763_HC]);
+	loc_dev->dev = pdev;
+
+	/* Get the Host Controller IO and INT resources */
+	loc_dev->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!loc_dev->mem_res) {
+		pr_err("%s: failed to get platform resource mem\n", __func__);
+		return -ENODEV;
+	}
+
+	loc_dev->baseaddress = ioremap_nocache(loc_dev->mem_res->start,
+					resource_size(loc_dev->mem_res));
+	if (!loc_dev->baseaddress) {
+		pr_err("%s: ioremap failed\n", __func__);
+		status = -ENOMEM;
+		goto put_mem_res;
+	}
+	pr_info("%s: ioremap done at: %x\n", __func__,
+					(int)loc_dev->baseaddress);
+	loc_dev->irq = platform_get_irq(pdev, 0);
+	if (!loc_dev->irq) {
+		pr_err("%s: platform_get_irq failed\n", __func__);
+		status = -ENODEV;
+		goto free_regs;
+	}
+
+	loc_dev->index = ISP1763_HC;	/*zero */
+	loc_dev->length = resource_size(loc_dev->mem_res);
+
+	hal_init(("isp1763 HC MEM Base= %p irq = %d\n",
+		loc_dev->baseaddress, loc_dev->irq));
+
+	/* Setup GPIOs and isssue RESET_N to Controller */
+	if (pdata->setup_gpio)
+		if (pdata->setup_gpio(1))
+			pr_err("%s: Failed to setup GPIOs for isp1763\n",
+								 __func__);
+	if (pdata->reset_gpio) {
+		gpio_set_value(pdata->reset_gpio, 0);
+		msleep(10);
+		gpio_set_value(pdata->reset_gpio, 1);
+	} else {
+		pr_err("%s: Failed to issue RESET_N to isp1763\n", __func__);
+	}
+
+	dummy_mem_read(loc_dev);
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_info("START: chip id:%x\n", chipid);
+
+	/*reset the host controller  */
+	pr_debug("RESETTING\n");
+	us_reset_hc |= 0x1;
+	isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
+	msleep(20);
+	us_reset_hc = 0;
+	us_reset_hc |= 0x2;
+	isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_info("after HC reset, chipid:%x\n", chipid);
+
+	msleep(20);
+	hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	pr_debug("Mode Ctrl Value b4 setting buswidth: %x\n", hwmodectrl);
+#ifdef DATABUS_WIDTH_16
+	hwmodectrl &= 0xFFEF;	/*enable the 16 bit bus */
+#else
+	pr_debug("Setting 8-BIT mode\n");
+	hwmodectrl |= 0x0010;	/*enable the 8 bit bus */
+#endif
+	isp1763_reg_write16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	pr_debug("writing 0x%x to hw mode reg\n", hwmodectrl);
+
+	hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	msleep(100);
+
+	pr_debug("Mode Ctrl Value after setting buswidth: %x\n", hwmodectrl);
+
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_debug("after setting HW MODE to 8bit, chipid:%x\n", chipid);
+
+
+
+	hal_init(("isp1763 DC MEM Base= %lx irq = %d\n",
+		loc_dev->io_base, loc_dev->irq));
+	reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
+	pr_debug("Scratch register is 0x%x\n", reg_data);
+	reg_data = 0xABCD;
+	isp1763_reg_write16(loc_dev, HC_SCRATCH_REG, reg_data);
+	reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
+	pr_debug("After write, Scratch register is 0x%x\n", reg_data);
+
+	if (reg_data != 0xABCD) {
+		pr_err("%s: Scratch register write mismatch!!\n", __func__);
+		status = -ENODEV;
+		goto free_gpios;
+	}
+
+	memcpy(loc_dev->name, ISP1763_DRIVER_NAME, sizeof(ISP1763_DRIVER_NAME));
+	loc_dev->name[sizeof(ISP1763_DRIVER_NAME)] = 0;
+
+	pr_debug(KERN_NOTICE "-isp1763_pci_probe\n");
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return 0;
+
+free_gpios:
+	if (pdata->setup_gpio)
+		pdata->setup_gpio(0);
+free_regs:
+	iounmap(loc_dev->baseaddress);
+put_mem_res:
+	loc_dev->baseaddress = NULL;
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return status;
+}				/* End of isp1763_probe */
+
+
+/*--------------------------------------------------------------*
+ *
+ *  Module details: isp1763_remove
+ *
+ * cleanup function of ISP1763
+ * This functions de-initializes the local variables, frees GPIOs
+ * and releases memory resource.
+ *
+ *  Input:
+ *              struct platform_device *dev    ----> Platform Device structure
+ *
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------*/
+static int __devexit
+isp1763_remove(struct platform_device *pdev)
+{
+	struct isp1763_dev *loc_dev;
+	struct isp1763_platform_data *pdata = pdev->dev.platform_data;
+
+	hal_init(("isp1763_pci_remove(dev=%p)\n", dev));
+
+	loc_dev = &isp1763_loc_dev[ISP1763_HC];
+	iounmap(loc_dev->baseaddress);
+	loc_dev->baseaddress = NULL;
+	if (pdata->setup_gpio)
+		return pdata->setup_gpio(0);
+
+	return 0;
+}				/* End of isp1763_remove */
+
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_init(isp1763_module_init);
+module_exit(isp1763_module_cleanup);
diff --git a/drivers/usb/host/pehci/hal/hal_msm.h b/drivers/usb/host/pehci/hal/hal_msm.h
new file mode 100644
index 0000000..a7a65b7
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_msm.h
@@ -0,0 +1,85 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	HAL_X86_H
+#define	HAL_X86_H
+
+#define	DRIVER_AUTHOR	"ST-ERICSSON	  "
+#define	DRIVER_DESC	"ISP1763 bus driver"
+
+/* Driver tuning, per ST-ERICSSON requirements:	*/
+
+#define	MEM_TO_CHECK		4096	/*bytes, must be multiple of 2 */
+
+/* BIT defines */
+#define	BIT0	(1 << 0)
+#define	BIT1	(1 << 1)
+#define	BIT2	(1 << 2)
+#define	BIT3	(1 << 3)
+#define	BIT4	(1 << 4)
+#define	BIT5	(1 << 5)
+#define	BIT6	(1 << 6)
+#define	BIT7	(1 << 7)
+#define	BIT8	(1 << 8)
+#define	BIT9	(1 << 9)
+#define	BIT10	(1 << 10)
+#define	BIT11	(1 << 11)
+#define	BIT12	(1 << 12)
+#define	BIT13	(1 << 13)
+#define	BIT14	(1 << 14)
+#define	BIT15	(1 << 15)
+#define	BIT16	(1 << 16)
+#define	BIT17	(1 << 17)
+#define	BIT18	(1 << 18)
+#define	BIT19	(1 << 19)
+#define	BIT20	(1 << 20)
+#define	BIT21	(1 << 21)
+#define	BIT22	(1 << 22)
+#define	BIT23	(1 << 23)
+#define	BIT24	(1 << 24)
+#define	BIT25	(1 << 26)
+#define	BIT27	(1 << 27)
+#define	BIT28	(1 << 28)
+#define	BIT29	(1 << 29)
+#define	BIT30	(1 << 30)
+#define	BIT31	(1 << 31)
+
+/* Definitions Related to Chip Address and CPU Physical	Address
+ * cpu_phy_add:	CPU Physical Address , it uses 32 bit data per address
+ * chip_add   :	Chip Address, it uses double word(64) bit data per address
+ */
+#define	chip_add(cpu_phy_add)		(((cpu_phy_add)	- 0x400) / 8)
+#define	cpu_phy_add(chip_add)		((8 * (chip_add)) + 0x400)
+
+/* for getting end add,	and start add, provided	we have	one address with us */
+/* IMPORTANT length  hex(base16) and dec(base10) works fine*/
+#define	end_add(start_add, length)	(start_add + (length - 4))
+#define	start_add(end_add, length)	(end_add - (length - 4))
+
+/* Device Registers*/
+#define	DEV_UNLOCK_REGISTER		0x7C
+#define	DEV_INTERRUPT_REGISTER		0x18
+#define	INT_ENABLE_REGISTER		0x14
+
+#endif /*_HAL_X86_H_ */
diff --git a/drivers/usb/host/pehci/hal/isp1763.h b/drivers/usb/host/pehci/hal/isp1763.h
new file mode 100644
index 0000000..7355185
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/isp1763.h
@@ -0,0 +1,227 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	ISP1763_H
+#define	ISP1763_H
+
+
+
+/* For debugging option: ------------------- */
+#define PTD_DUMP_SCHEDULE
+#undef  PTD_DUMP_SCHEDULE
+
+#define PTD_DUMP_COMPLETE
+#undef  PTD_DUMP_COMPLETE
+/* ------------------------------------*/
+#define CONFIG_ISO_SUPPORT 
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define	ISO_DBG_ENTRY 1
+#define	ISO_DBG_EXIT  1
+#define	ISO_DBG_ADDR 1
+#define	ISO_DBG_DATA 1
+#define	ISO_DBG_ERR  1
+#define	ISO_DBG_INFO 1
+
+#if 0				/* Set to 1 to enable isochronous debugging */
+#define	iso_dbg(category, format, arg...) \
+do \
+{ \
+	if(category) \
+	{ \
+		printk(format, ## arg);	\
+	} \
+} while(0)
+#else
+#define	iso_dbg(category, format, arg...) while(0)
+#endif
+
+#endif /* CONFIG_ISO_SUPPORT */
+
+/*Debug	For Entry/Exit of the functions	*/
+//#define HCD_DEBUG_LEVEL1 
+#ifdef HCD_DEBUG_LEVEL1
+#define	pehci_entry(format, args... ) printk(format, ##args)
+#else
+#define	pehci_entry(format, args...) do	{ } while(0)
+#endif
+
+/*Debug	for Port Info and Errors */
+//#define HCD_DEBUG_LEVEL2 
+#ifdef HCD_DEBUG_LEVEL2
+#define	pehci_print(format, args... ) printk(format, ##args)
+#else
+#define	pehci_print(format, args...) do	{ } while(0)
+#endif
+
+/*Debug	For the	Port changes and Enumeration */
+//#define HCD_DEBUG_LEVEL3 
+#ifdef HCD_DEBUG_LEVEL3
+#define	pehci_info(format,arg...) printk(format, ##arg)
+#else
+#define	pehci_info(format,arg...) do {}	while (0)
+#endif
+
+/*Debug	For Transfer flow  */
+// #define HCD_DEBUG_LEVEL4 
+#ifdef HCD_DEBUG_LEVEL4
+#define	pehci_check(format,args...) printk(format, ##args)
+#else
+#define	pehci_check(format,args...)
+#endif
+/*******************END	HOST CONTROLLER**********************************/
+
+
+
+/*******************START DEVICE CONTROLLER******************************/
+
+/* For MTP support */
+#undef MTP_ENABLE		/* Enable to add MTP support; But requires MTP class driver to be present to work */
+/*For CHAPTER8 TEST */
+#undef	CHAPTER8_TEST		/* Enable to Pass Chapter 8 Test */
+
+/* Debug Entery/Exit of	Function as well as some other Info */
+//#define DEV_DEBUG_LEVEL2
+#ifdef DEV_DEBUG_LEVEL2
+#define	dev_print(format,arg...) printk(format,	##arg)
+#else
+#define	dev_print(format,arg...) do {} while (0)
+#endif
+
+/*Debug	for Interrupt ,	Registers , device Enable/Disable and some other info */
+//#define DEV_DEBUG_LEVEL3
+#undef dev_info
+#ifdef DEV_DEBUG_LEVEL3
+#define	dev_info(format,arg...)	printk(format, ##arg)
+#else
+#define	dev_info(format,arg...)	do {} while (0)
+#endif
+
+/*Debug	for Tranffer flow , Enumeration	and Packet info	*/
+//#define DEV_DEBUG_LEVEL4
+#ifdef DEV_DEBUG_LEVEL4
+#define	dev_check(format,args...) printk(format, ##args)
+#else
+#define	dev_check(format,args...) do{}while(0)
+#endif
+/*******************END	DEVICE CONTROLLER********************************/
+
+
+/*******************START MSCD*******************************************/
+/*Debug	Entery/Exit of Function	as well	as some	other Information*/
+//#define MSCD_DEBUG_LEVEL2
+#ifdef MSCD_DEBUG_LEVEL2
+#define	mscd_print(format,arg...) printk(format, ##arg)
+#else
+#define	mscd_print(format,arg...) do {}	while (0)
+#endif
+
+/*Debug	for Info */
+//#define MSCD_DEBUG_LEVEL3
+#ifdef MSCD_DEBUG_LEVEL3
+#define	mscd_info(format,arg...) printk(format,	##arg)
+#else
+#define	mscd_info(format,arg...) do {} while (0)
+#endif
+/*******************END	MSCD*********************************************/
+
+
+/*******************START OTG CONTROLLER*********************************/
+/*#define	OTG */			/*undef	for Device only	and Host only */
+#define	ALL_FSM_FLAGS
+/*Debug	for Entry/Exit and Info	*/
+/* #define OTG_DEBUG_LEVEL1 */
+#ifdef OTG_DEBUG_LEVEL1
+#define	otg_entry(format, args... ) printk(format, ##args)
+#else
+#define	otg_entry(format, args...) do {	} while(0)
+#endif
+
+/*Debug	for State Machine Flow */
+/* #define OTG_DEBUG_LEVEL2 */
+#ifdef OTG_DEBUG_LEVEL2
+#define	otg_print(format,arg...) printk(format,	##arg)
+#else
+#define	otg_print(format,arg...) do {} while (0)
+#endif
+/*Debug	for Info */
+/* #define OTG_DEBUG_LEVEL3 */
+#ifdef OTG_DEBUG_LEVEL3
+#define	otg_info(format,arg...)	printk(format, ##arg)
+#else
+#define	otg_info(format,arg...)	do {} while (0)
+#endif
+
+/* #define OTG_DEBUG_LEVEL4 */
+#ifdef OTG_DEBUG_LEVEL4
+#define	otg_printB(format,arg...) printk(format, ##arg)
+#else
+#define	otg_printB(format,arg...) do {}	while (0)
+#endif
+/*******************END	OTG CONTROLLER***********************************/
+
+
+
+/*******************START FOR HAL ***************************************/
+#define info pr_debug
+#define warn pr_warn
+/*Debug For Entry and Exit of the functions */
+#undef HAL_DEBUG_LEVEL1
+#ifdef HAL_DEBUG_LEVEL1
+#define	hal_entry(format, args... ) printk(format, ##args)
+#else
+#define	hal_entry(format, args...) do {	} while(0)
+#endif
+
+/*Debug	For Interrupt information */
+#undef HAL_DEBUG_LEVEL2
+#ifdef HAL_DEBUG_LEVEL2
+#define	hal_int(format,	args...	) printk(format, ##args)
+#else
+#define	hal_int(format,	args...) do { }	while(0)
+#endif
+
+/*Debug	For HAL	Initialisation and Mem Initialisation */
+#undef HAL_DEBUG_LEVEL3
+#ifdef HAL_DEBUG_LEVEL3
+#define	hal_init(format, args... ) printk(format, ##args)
+#else
+#define	hal_init(format, args...) do { } while(0)
+#endif
+/*******************END	FOR HAL*******************************************/
+
+
+
+/*******************START FOR ALL CONTROLLERS*****************************/
+/*#define	CONFIG_USB_OTG */	/*undef	for Device only	and Host only */
+/*#define	ISP1763_DEVICE */
+
+#ifdef CONFIG_USB_DEBUG
+#define	DEBUG
+#else
+#undef DEBUG
+#endif
+/*******************END	FOR ALL	CONTROLLERS*******************************/
+#endif
diff --git a/drivers/usb/host/pehci/host/Makefile b/drivers/usb/host/pehci/host/Makefile
new file mode 100644
index 0000000..0c8552e
--- /dev/null
+++ b/drivers/usb/host/pehci/host/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += pehci.o
+
diff --git a/drivers/usb/host/pehci/host/itdptd.c b/drivers/usb/host/pehci/host/itdptd.c
new file mode 100644
index 0000000..6699c3a
--- /dev/null
+++ b/drivers/usb/host/pehci/host/itdptd.c
@@ -0,0 +1,2156 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. Isochronous event processing is handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+#ifdef CONFIG_ISO_SUPPORT
+void phcd_clean_periodic_ep(void);
+#endif
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define MAX_URBS		8
+#define MAX_EPS			2/*maximum 2 endpoints supported in ISO transfers.*/
+/*number of microframe per frame which is scheduled, for high speed device
+* actually , NUMMICROFRAME should be 8 , but the micro frame #7 is fail , so
+* there's just 4 microframe is used (#0 -> #4)
+* Writer : LyNguyen - 25Nov09
+*/
+#define NUMMICROFRAME		8
+struct urb *gstUrb_pending[MAX_URBS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+struct usb_host_endpoint *periodic_ep[MAX_EPS];
+
+int giUrbCount = 0;		/* count the pending urb*/
+int giUrbIndex = 0;		/*the index of urb need to be scheduled next*/
+/*
+ * phcd_iso_sitd_to_ptd - convert an SITD into a PTD
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * void  * ptd
+ *  - Points to the ISO ptd structure that needs to be initialized
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  -Initializing the PTD that will be used for the ISO transfer
+ */
+void *
+phcd_iso_sitd_to_ptd(phci_hcd * hcd,
+	struct ehci_sitd *sitd, struct urb *urb, void *ptd)
+{
+	struct _isp1763_isoptd *iso_ptd;
+	struct isp1763_mem_addr *mem_addr;
+
+	unsigned long max_packet, mult, length, td_info1, td_info3;
+	unsigned long token, port_num, hub_num, data_addr;
+	unsigned long frame_number;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_sitd_to_ptd entry\n");
+
+	/* Variable initialization */
+	iso_ptd = (struct _isp1763_isoptd *) ptd;
+	mem_addr = &sitd->mem_addr;
+
+	/*
+	 * For both ISO and INT endpoints descriptors, new bit fields we added to
+	 * specify whether or not the endpoint supports high bandwidth, and if so
+	 * the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 * Bits 12:11 specify whether the endpoint supports high-bandwidth transfers
+	 * Valid values:
+	 *             00 None (1 transaction/uFrame)
+	 *             01 1 additional transaction
+	 *             10 2 additional transactions
+	 *             11 reserved
+	 */
+	max_packet = usb_maxpacket(urb->dev, urb->pipe,usb_pipeout(urb->pipe));
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	mult = 1 + ((max_packet >> 11) & 0x3);
+	max_packet &= 0x7ff;
+
+	/* This is the size of the request (bytes to write or bytes to read) */
+	length = sitd->length;
+
+	/*
+	 * Set V bit to indicate that there is payload to be sent or received. And
+	 * indicate that the current PTD is active.
+	 */
+	td_info1 = QHA_VALID;
+
+	/*
+	 * Set the number of bytes that can be transferred by this PTD. This indicates
+	 * the depth of the data field.
+	 */
+	td_info1 |= (length << 3);
+
+	/*
+	 * Set the maximum packet length which indicates the maximum number of bytes that
+	 * can be sent to or received from the endpoint in a single data packet.
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * According to the ISP1763 specs for sITDs, OUT token max packet should
+		 * not be more  than 188 bytes, while IN token max packet not more than
+		 * 192 bytes (ISP1763 Rev 3.01, Table 72, page 79
+		 */
+		if (usb_pipein(urb->pipe) && (max_packet > 192)) {
+			iso_dbg(ISO_DBG_INFO,
+				"IN Max packet over maximum\n");
+			max_packet = 192;
+		}
+
+		if ((!usb_pipein(urb->pipe)) && (max_packet > 188)) {
+			iso_dbg(ISO_DBG_INFO,
+				"OUT Max packet over maximum\n");
+			max_packet = 188;
+		}
+	}
+	td_info1 |= (max_packet << 18);
+
+	/*
+	 * Place the FIRST BIT of the endpoint number here.
+	 */
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+
+	/*
+	 * Set the number of successive packets the HC can submit to the endpoint.
+	 */
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		td_info1 |= MULTI(mult);
+	}
+
+	/* Set the first DWORD */
+	iso_ptd->td_info1 = td_info1;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",
+		iso_ptd->td_info1);
+
+	/*
+	 * Since the first bit have already been added on the first DWORD of the PTD
+	 * we only need to add the last 3-bits of the endpoint number.
+	 */
+	token = (usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+
+	/*
+	 * Get the device address and set it accordingly to its assigned bits of the 2nd
+	 * DWORD.
+	 */
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	/* See a split transaction is needed */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * If we are performing a SPLIT transaction indicate that it is so by setting
+		 * the S bit of the second DWORD.
+		 */
+		token |= 1 << 14;
+
+		port_num = urb->dev->ttport;
+		hub_num = urb->dev->tt->hub->devnum;
+
+		/* Set the the port number of the hub or embedded TT */
+		token |= port_num << 18;
+
+		/*
+		 * Set the hub address, this should be zero for the internal or
+		 * embedded hub
+		 */
+		token |= hub_num << 25;
+	}
+
+	/* if(urb->dev->speed != USB_SPEED_HIGH) */
+	/*
+	 * Determine if the direction of this pipe is IN, if so set the Token bit of
+	 * the second DWORD to indicate it as IN. Since it is initialized to zero and
+	 * zero indicates an OUT token, then we do not need anything to the Token bit
+	 * if it is an OUT token.
+	 */
+	if (usb_pipein(urb->pipe)) {
+		token |= (IN_PID << 10);
+	}
+
+	/* Set endpoint type to Isochronous */
+	token |= EPTYPE_ISO;
+
+	/* Set the second DWORD */
+	iso_ptd->td_info2 = token;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",
+		iso_ptd->td_info2);
+
+	/*
+	 * Get the physical address of the memory location that was allocated for this PTD
+	 * in the PAYLOAD region, using the formula indicated in sectin 7.2.2 of the ISP1763 specs
+	 * rev 3.01 page 17 to 18.
+	 */
+	data_addr = ((unsigned long) (mem_addr->phy_addr) & 0xffff) - 0x400;
+	data_addr >>= 3;
+
+	/*  Set it to its location in the third DWORD */
+	td_info3 =( 0xffff&data_addr) << 8;
+
+	/*
+	 * Set the frame number when this PTD will be sent for ISO OUT or IN
+	 * Bits 0 to 2 are don't care, only bits 3 to 7.
+	 */
+	frame_number = sitd->framenumber;
+	frame_number = sitd->start_frame;
+	td_info3 |= (0xff& ((frame_number) << 3));
+
+	/* Set the third DWORD */
+	iso_ptd->td_info3 = td_info3;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",
+		iso_ptd->td_info3);
+
+	/*
+	 * Set the A bit of the fourth DWORD to 1 to indicate that this PTD is active.
+	 * This have the same functionality with the V bit of DWORD0
+	 */
+	iso_ptd->td_info4 = QHA_ACTIVE;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",
+		iso_ptd->td_info4);
+
+	/* Set the fourth DWORD to specify which uSOFs the start split needs to be placed */
+	if (usb_pipein(urb->pipe)){
+		iso_ptd->td_info5 = (sitd->ssplit);
+	}else{
+		iso_ptd->td_info5 = (sitd->ssplit << 2);
+	}
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",
+		iso_ptd->td_info5);
+
+	/*
+	 * Set the fifth DWORD to specify which uSOFs the complete split needs to be sent.
+	 * This is VALID only for IN (since ISO transfers don't have handshake stages)
+	 */
+	iso_ptd->td_info6 = sitd->csplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",
+		iso_ptd->td_info6);
+
+	/*printk(" [phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",iso_ptd->td_info1);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",iso_ptd->td_info2);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",iso_ptd->td_info3);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",iso_ptd->td_info4);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",iso_ptd->td_info5);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",iso_ptd->td_info6);*/
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_to_ptd exit\n");
+	return iso_ptd;
+}
+
+
+/*
+ * phcd_iso_itd_to_ptd - convert an ITD into a PTD
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more ST-ERICSSON specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * void  * ptd
+ *  - Points to the ISO ptd structure that needs to be initialized
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  -Initializing the PTD that will be used for the ISO transfer
+ */
+void *
+phcd_iso_itd_to_ptd(phci_hcd * hcd,
+	struct ehci_itd *itd, struct urb *urb, void *ptd)
+{
+	struct _isp1763_isoptd *iso_ptd;
+	struct isp1763_mem_addr *mem_addr;
+
+	unsigned long max_packet, mult, length, td_info1, td_info3;
+	unsigned long token, port_num, hub_num, data_addr;
+	unsigned long frame_number;
+	int maxpacket;
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_to_ptd entry\n");
+
+	/* Variable initialization */
+	iso_ptd = (struct _isp1763_isoptd *) ptd;
+	mem_addr = &itd->mem_addr;
+
+	/*
+	 * For both ISO and INT endpoints descriptors, new bit fields we added to
+	 * specify whether or not the endpoint supports high bandwidth, and if so
+	 * the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 * Bits 12:11 specify whether the endpoint supports high-bandwidth transfers
+	 * Valid values:
+	 *             00 None (1 transaction/uFrame)
+	 *             01 1 additional transaction
+	 *             10 2 additional transactions
+	 *             11 reserved
+	 */
+	max_packet = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));	
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	maxpacket &= 0x7ff;
+	mult = 1 + ((max_packet >> 11) & 0x3);
+
+
+	max_packet &= 0x7ff;
+
+	/* This is the size of the request (bytes to write or bytes to read) */
+	length = itd->length;
+
+	/*
+	 * Set V bit to indicate that there is payload to be sent or received. And
+	 * indicate that the current PTD is active.
+	 */
+	td_info1 = QHA_VALID;
+
+	/*
+	 * Set the number of bytes that can be transferred by this PTD. This indicates
+	 * the depth of the data field.
+	 */
+	td_info1 |= (length << 3);
+
+	/*
+	 * Set the maximum packet length which indicates the maximum number of bytes that
+	 * can be sent to or received from the endpoint in a single data packet.
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * According to the ISP1763 specs for sITDs, OUT token max packet should
+		 * not be more  than 188 bytes, while IN token max packet not more than
+		 * 192 bytes (ISP1763 Rev 3.01, Table 72, page 79
+		 */
+		if (usb_pipein(urb->pipe) && (max_packet > 192)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_itd_to_ptd]: IN Max packet over maximum\n");
+			max_packet = 192;
+		}
+
+		if ((!usb_pipein(urb->pipe)) && (max_packet > 188)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_itd_to_ptd]: OUT Max packet over maximum\n");
+			max_packet = 188;
+		}
+	} else {		/*HIGH SPEED */
+
+		if (max_packet > 1024){
+			max_packet = 1024;
+		}
+	}
+	td_info1 |= (max_packet << 18);
+
+	/*
+	 * Place the FIRST BIT of the endpoint number here.
+	 */
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+
+	/*
+	 * Set the number of successive packets the HC can submit to the endpoint.
+	 */
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		td_info1 |= MULTI(mult);
+	}
+
+	/* Set the first DWORD */
+	iso_ptd->td_info1 = td_info1;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",
+		iso_ptd->td_info1);
+
+	/*
+	 * Since the first bit have already been added on the first DWORD of the PTD
+	 * we only need to add the last 3-bits of the endpoint number.
+	 */
+	token = (usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+
+	/*
+	 * Get the device address and set it accordingly to its assigned bits of the 2nd
+	 * DWORD.
+	 */
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	/* See a split transaction is needed */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * If we are performing a SPLIT transaction indicate that it is so by setting
+		 * the S bit of the second DWORD.
+		 */
+		token |= 1 << 14;
+
+		port_num = urb->dev->ttport;
+		hub_num = urb->dev->tt->hub->devnum;
+
+		/* Set the the port number of the hub or embedded TT */
+		token |= port_num << 18;
+
+		/*
+		 * Set the hub address, this should be zero for the internal or
+		 * embedded hub
+		 */
+		token |= hub_num << 25;
+	}
+
+	/* if(urb->dev->speed != USB_SPEED_HIGH) */
+	/*
+	 * Determine if the direction of this pipe is IN, if so set the Token bit of
+	 * the second DWORD to indicate it as IN. Since it is initialized to zero and
+	 * zero indicates an OUT token, then we do not need anything to the Token bit
+	 * if it is an OUT token.
+	 */
+	if (usb_pipein(urb->pipe)){
+		token |= (IN_PID << 10);
+	}
+
+	/* Set endpoint type to Isochronous */
+	token |= EPTYPE_ISO;
+
+	/* Set the second DWORD */
+	iso_ptd->td_info2 = token;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",
+		iso_ptd->td_info2);
+
+	/*
+	 * Get the physical address of the memory location that was allocated for this PTD
+	 * in the PAYLOAD region, using the formula indicated in sectin 7.2.2 of the ISP1763 specs
+	 * rev 3.01 page 17 to 18.
+	 */
+	data_addr = ((unsigned long) (mem_addr->phy_addr) & 0xffff) - 0x400;
+	data_addr >>= 3;
+
+	/*  Set it to its location in the third DWORD */
+	td_info3 = (data_addr&0xffff) << 8;
+
+	/*
+	 * Set the frame number when this PTD will be sent for ISO OUT or IN
+	 * Bits 0 to 2 are don't care, only bits 3 to 7.
+	 */
+	frame_number = itd->framenumber;
+	td_info3 |= (0xff&(frame_number << 3));
+
+	/* Set the third DWORD */
+	iso_ptd->td_info3 = td_info3;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",
+		iso_ptd->td_info3);
+
+	/*
+	 * Set the A bit of the fourth DWORD to 1 to indicate that this PTD is active.
+	 * This have the same functionality with the V bit of DWORD0
+	 */
+	iso_ptd->td_info4 = QHA_ACTIVE;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",
+		iso_ptd->td_info4);
+
+	/* Set the fourth DWORD to specify which uSOFs the start split needs to be placed */
+	iso_ptd->td_info5 = itd->ssplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",
+		iso_ptd->td_info5);
+
+	/*
+	 * Set the fifth DWORD to specify which uSOFs the complete split needs to be sent.
+	 * This is VALID only for IN (since ISO transfers don't have handshake stages)
+	 */
+	iso_ptd->td_info6 = itd->csplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",
+		iso_ptd->td_info6);
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_to_ptd exit\n");
+	return iso_ptd;
+}				/* phcd_iso_itd_to_ptd */
+
+/*
+ * phcd_iso_scheduling_info - Initializing the start split and complete split.
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_qh *qhead
+ *  - Contains information about the endpoint.
+ * unsigned long max_pkt
+ *  - Maximum packet size that the endpoint in capable of handling
+ * unsigned long high_speed
+ *  - Indicates if the bus is a high speed bus
+ * unsigned long ep_in
+ *  - Inidcates if the endpoint is an IN endpoint
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Determining the number of start split needed during an OUT transaction or
+ *    the number of complete splits needed during an IN transaction.
+ */
+unsigned long
+phcd_iso_scheduling_info(phci_hcd * hcd,
+	struct ehci_qh *qhead,
+	unsigned long max_pkt,
+	unsigned long high_speed, unsigned long ep_in)
+{
+	unsigned long count, usof, temp;
+
+	/* Local variable initialization */
+	usof = 0x1;
+
+	if (high_speed) {
+		qhead->csplit = 0;
+
+		/* Always send high speed transfers in first uframes */
+		qhead->ssplit = 0x1;
+		return 0;
+	}
+
+	/* Determine how many 188 byte-transfers are needed to send all data */
+	count = max_pkt / 188;
+
+	/*
+	 * Check is the data is not a factor of 188, if it is not then we need
+	 * one more 188 transfer to move the last set of data less than 188.
+	 */
+	if (max_pkt % 188){
+		count += 1;
+	}
+
+	/*
+	 * Remember that usof was initialized to 0x1 so that means
+	 * that usof is always guranteed a value of 0x1 and then
+	 * depending on the maxp, other bits of usof will also be set.
+	 */
+	for (temp = 0; temp < count; temp++){
+		usof |= (0x1 << temp);
+	}
+
+	if (ep_in) {
+		/*
+		 * Send start split into first frame.
+		 */
+		qhead->ssplit = 0x1;
+
+		/*
+		 * Inidicate that we can send a complete split starting from
+		 * the third uFrame to how much complete split is needed to
+		 * retrieve all data.
+		 *
+		 * Of course, the first uFrame is reserved for the start split, the
+		 * second is reserved for the TT to send the request and get some
+		 * data.
+		 */
+		qhead->csplit = (usof << 2);
+	} else {
+		/*
+		 * For ISO OUT we don't need to send out a complete split
+		 * since we do not require and data coming in to us (since ISO
+		 * do not have integrity checking/handshake).
+		 *
+		 * For start split we indicate that we send a start split from the
+		 * first uFrame up to the the last uFrame needed to retrieve all
+		 * data
+		 */
+		qhead->ssplit = usof;
+		qhead->csplit = 0;
+	}	/* else for if(ep_in) */
+	return 0;
+}				/* phcd_iso_scheduling_info */
+
+/*
+ * phcd_iso_sitd_fill - Allocate memory from the PAYLOAD memory region
+ *
+ * phci_hcd *pHcd_st
+ *  - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more  specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long packets
+ *  - Total number of packets to completely transfer this ISO transfer request.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Initialize the following elements of the ITS structure
+ *       > sitd->length = length;        -- the size of the request
+ *       > sitd->multi = multi;          -- the number of transactions for
+ *                                         this EP per micro frame
+ *       > sitd->hw_bufp[0] = buf_dma;   -- The base address of the buffer where
+ *                                         to put the data (this base address was
+ *                                         the buffer provided plus the offset)
+ * - Allocating memory from the PAYLOAD memory area, where the data coming from
+ *   the requesting party will be placed or data requested by the requesting party will
+ *   be retrieved when it is available.
+ */
+unsigned long
+phcd_iso_sitd_fill(phci_hcd * hcd,
+	struct ehci_sitd *sitd,
+	struct urb *urb, unsigned long packets)
+{
+	unsigned long length, offset, pipe;
+	unsigned long max_pkt;
+	dma_addr_t buff_dma;
+	struct isp1763_mem_addr *mem_addr;
+
+#ifdef COMMON_MEMORY
+	struct ehci_qh *qhead = NULL;
+#endif
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_fill entry\n");
+	/*
+	 * The value for both these variables are supplied by the one
+	 * who submitted the URB.
+	 */
+	length = urb->iso_frame_desc[packets].length;
+	offset = urb->iso_frame_desc[packets].offset;
+
+	/* Initialize the status and actual length of this packet */
+	urb->iso_frame_desc[packets].actual_length = 0;
+	urb->iso_frame_desc[packets].status = -EXDEV;
+
+	/* Buffer for this packet */
+	buff_dma = (u32) ((unsigned char *) urb->transfer_buffer + offset);
+
+	/* Memory for this packet */
+	mem_addr = &sitd->mem_addr;
+
+	pipe = urb->pipe;
+	max_pkt = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+
+	max_pkt = max_pkt & 0x7FF;
+
+	if ((length < 0) || (max_pkt < length)) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No available memory.\n");
+		return -ENOSPC;
+	}
+	sitd->buf_dma = buff_dma;
+
+
+#ifndef COMMON_MEMORY
+	/*
+	 * Allocate memory in the PAYLOAD memory region for the
+	 * data buffer for this SITD
+	 */
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		mem_addr = 0;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead=urb->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (qhead) {
+
+		mem_addr->phy_addr = qhead->memory_addr.phy_addr + offset;
+
+		mem_addr->virt_addr = qhead->memory_addr.phy_addr + offset;
+	} else {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+
+
+#endif
+	/* Length of this packet */
+	sitd->length = length;
+
+	/* Buffer address, one ptd per packet */
+	sitd->hw_bufp[0] = buff_dma;
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_sitd_fill exit\n");
+	return 0;
+}
+
+/*
+ * phcd_iso_itd_fill - Allocate memory from the PAYLOAD memory region
+ *
+ * phci_hcd *pHcd_st
+ *  - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more IC specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long packets
+ *  - Total number of packets to completely transfer this ISO transfer request.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Initialize the following elements of the ITS structure
+ *       > itd->length = length;        -- the size of the request
+ *       > itd->multi = multi;          -- the number of transactions for
+ *                                         this EP per micro frame
+ *       > itd->hw_bufp[0] = buf_dma;   -- The base address of the buffer where
+ *                                         to put the data (this base address was
+ *                                         the buffer provided plus the offset)
+ * - Allocating memory from the PAYLOAD memory area, where the data coming from
+ *   the requesting party will be placed or data requested by the requesting party will
+ *   be retrieved when it is available.
+ */
+unsigned long
+phcd_iso_itd_fill(phci_hcd * hcd,
+	struct ehci_itd *itd,
+	struct urb *urb,
+	unsigned long packets, unsigned char numofPkts)
+{
+	unsigned long length, offset, pipe;
+	unsigned long max_pkt, mult;
+	dma_addr_t buff_dma;
+	struct isp1763_mem_addr *mem_addr;
+#ifdef COMMON_MEMORY
+	struct ehci_qh *qhead = NULL;
+#endif
+	int i = 0;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_fill entry\n");
+	for (i = 0; i < 8; i++){
+		itd->hw_transaction[i] = 0;
+	}
+	/*
+	 * The value for both these variables are supplied by the one
+	 * who submitted the URB.
+	 */
+	length = urb->iso_frame_desc[packets].length;
+	offset = urb->iso_frame_desc[packets].offset;
+
+	/* Initialize the status and actual length of this packet */
+	urb->iso_frame_desc[packets].actual_length = 0;
+	urb->iso_frame_desc[packets].status = -EXDEV;
+
+	/* Buffer for this packet */
+	buff_dma = cpu_to_le32((unsigned char *) urb->transfer_buffer + offset);
+
+	/* Memory for this packet */
+	mem_addr = &itd->mem_addr;
+
+	pipe = urb->pipe;
+	max_pkt = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+
+	mult = 1 + ((max_pkt >> 11) & 0x3);
+	max_pkt = max_pkt & 0x7FF;
+	max_pkt *= mult;
+
+	if ((length < 0) || (max_pkt < length)) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No available memory.\n");
+		return -ENOSPC;
+	}
+	itd->buf_dma = buff_dma;
+	for (i = packets + 1; i < numofPkts + packets; i++)
+		length += urb->iso_frame_desc[i].length;
+
+	/*
+	 * Allocate memory in the PAYLOAD memory region for the
+	 * data buffer for this ITD
+	 */
+#ifndef COMMON_MEMORY
+
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		mem_addr = 0;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	qhead = urb->ep->hcpriv;
+#else
+	qhead=urb->hcpriv;
+#endif
+	if (qhead) {
+
+		mem_addr->phy_addr = qhead->memory_addr.phy_addr + offset;
+
+		mem_addr->virt_addr = qhead->memory_addr.phy_addr + offset;
+	} else {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+
+
+#endif
+	/* Length of this packet */
+	itd->length = length;
+
+	/* Number of transaction per uframe */
+	itd->multi = mult;
+
+	/* Buffer address, one ptd per packet */
+	itd->hw_bufp[0] = buff_dma;
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_fill exit\n");
+	return 0;
+}				/* phcd_iso_itd_fill */
+
+/*
+ * phcd_iso_get_sitd_ptd_index - Allocate an ISO PTD from the ISO PTD map list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more  specific elements.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Allocating an ISO PTD from the ISO PTD map list
+ * - Set the equivalent bit of the allocated PTD to active
+ *   in the bitmap so that this PTD will be included into
+ *   the periodic schedule
+ */
+void
+phcd_iso_get_sitd_ptd_index(phci_hcd * hcd, struct ehci_sitd *sitd)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	unsigned long buff_type, max_ptds;
+	unsigned char sitd_index, bitmap;
+
+	/* Local variable initialization */
+	bitmap = 0x1;
+	buff_type = td_ptd_pipe_x_buff_type[TD_PTD_BUFF_TYPE_ISTL];
+	ptd_map_buff = (td_ptd_map_buff_t *) & (td_ptd_map_buff[buff_type]);
+	max_ptds = ptd_map_buff->max_ptds;
+	sitd->sitd_index = TD_PTD_INV_PTD_INDEX;
+
+	for (sitd_index = 0; sitd_index < max_ptds; sitd_index++) {
+		/*
+		 * ISO have 32 PTDs, the first thing to do is look for a free PTD.
+		 */
+		if (ptd_map_buff->map_list[sitd_index].state == TD_PTD_NEW) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_get_itd_ptd_index] There's a free PTD No. %d\n",
+				sitd_index);
+			/*
+			 * Determine if this is a newly allocated SITD by checking the
+			 * itd_index, since it was set to TD_PTD_INV_PTD_INDEX during
+			 * initialization
+			 */
+			if (sitd->sitd_index == TD_PTD_INV_PTD_INDEX) {
+				sitd->sitd_index = sitd_index;
+			}
+
+			/* Once there is a free slot, indicate that it is already taken */
+			ptd_map_buff->map_list[sitd_index].datatoggle = 0;
+			ptd_map_buff->map_list[sitd_index].state =
+				TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[sitd_index].qtd = NULL;
+
+			/* Put a connection to the SITD with the PTD maplist */
+			ptd_map_buff->map_list[sitd_index].sitd = sitd;
+			ptd_map_buff->map_list[sitd_index].itd = NULL;
+			ptd_map_buff->map_list[sitd_index].qh = NULL;
+
+			/* ptd_bitmap just holds the bit assigned to this PTD. */
+			ptd_map_buff->map_list[sitd_index].ptd_bitmap =
+				bitmap << sitd_index;
+
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[sitd_index], sitd->sitd_index,
+				buff_type);
+
+			/*
+			 * Indicate that this SITD is the last in the list and update
+			 * the number of active PTDs
+			 */
+			ptd_map_buff->map_list[sitd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;
+
+
+			ptd_map_buff->active_ptd_bitmap |=
+				(bitmap << sitd_index);
+			ptd_map_buff->pending_ptd_bitmap |= (bitmap << sitd_index);	
+			break;
+		}		/* if(ptd_map_buff->map_list[sitd_index].state == TD_PTD_NEW) */
+	}			/* for(itd_index = 0; itd_index < max_ptds; itd_index++) */
+	return;
+}
+
+/*
+ * phcd_iso_get_itd_ptd_index - Allocate an ISO PTD from the ISO PTD map list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more IC specific elements.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Allocating an ISO PTD from the ISO PTD map list
+ * - Set the equivalent bit of the allocated PTD to active
+ *   in the bitmap so that this PTD will be included into
+ *   the periodic schedule
+ */
+void
+phcd_iso_get_itd_ptd_index(phci_hcd * hcd, struct ehci_itd *itd)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	unsigned long buff_type, max_ptds;
+	unsigned char itd_index, bitmap;
+
+	/* Local variable initialization */
+	bitmap = 0x1;
+	buff_type = td_ptd_pipe_x_buff_type[TD_PTD_BUFF_TYPE_ISTL];
+	ptd_map_buff = (td_ptd_map_buff_t *) & (td_ptd_map_buff[buff_type]);
+	max_ptds = ptd_map_buff->max_ptds;
+
+	itd->itd_index = TD_PTD_INV_PTD_INDEX;
+
+	for (itd_index = 0; itd_index < max_ptds; itd_index++) {
+		/*
+		 * ISO have 32 PTDs, the first thing to do is look for a free PTD.
+		 */
+		if (ptd_map_buff->map_list[itd_index].state == TD_PTD_NEW) {
+			/*
+			 * Determine if this is a newly allocated ITD by checking the
+			 * itd_index, since it was set to TD_PTD_INV_PTD_INDEX during
+			 * initialization
+			 */
+			if (itd->itd_index == TD_PTD_INV_PTD_INDEX) {
+				itd->itd_index = itd_index;
+			}
+
+			/* Once there is a free slot, indicate that it is already taken */
+			ptd_map_buff->map_list[itd_index].datatoggle = 0;
+			ptd_map_buff->map_list[itd_index].state = TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[itd_index].qtd = NULL;
+
+			/* Put a connection to the ITD with the PTD maplist */
+			ptd_map_buff->map_list[itd_index].itd = itd;
+			ptd_map_buff->map_list[itd_index].qh = NULL;
+
+			/* ptd_bitmap just holds the bit assigned to this PTD. */
+			ptd_map_buff->map_list[itd_index].ptd_bitmap =
+				bitmap << itd_index;
+
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[itd_index],
+				itd->itd_index, buff_type);
+
+			/*
+			 * Indicate that this ITD is the last in the list and update
+			 * the number of active PTDs
+			 */
+			ptd_map_buff->map_list[itd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;
+
+			ptd_map_buff->active_ptd_bitmap |=
+				(bitmap << itd_index);
+			ptd_map_buff->pending_ptd_bitmap |= (bitmap << itd_index);	
+			break;
+		}		/* if(ptd_map_buff->map_list[itd_index].state == TD_PTD_NEW) */
+	}			/* for(itd_index = 0; itd_index < max_ptds; itd_index++) */
+	return;
+}				/* phcd_iso_get_itd_ptd_index */
+
+/*
+ * phcd_iso_sitd_free_list - Free memory used by SITDs in SITD list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long status
+ *  - Variable provided by the calling routine that contain the status of the
+ *        SITD list.
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Cleaning up memory used by each SITD in the SITD list
+ */
+void
+phcd_iso_sitd_free_list(phci_hcd * hcd, struct urb *urb, unsigned long status)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct ehci_sitd *first_sitd, *next_sitd, *sitd;
+	td_ptd_map_t *td_ptd_map;
+
+	/* Local variable initialization */
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	first_sitd = (struct ehci_sitd *) urb->hcpriv;
+	sitd = first_sitd;
+
+	/*
+	 * Check if there is only one SITD, if so immediately
+	 * go and clean it up.
+	 */
+	if (sitd->hw_next == EHCI_LIST_END) {
+		if (sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+			td_ptd_map = &ptd_map_buff->map_list[sitd->sitd_index];
+			td_ptd_map->state = TD_PTD_NEW;
+		}
+
+		if (status != -ENOMEM) {
+			phci_hcd_mem_free(&sitd->mem_addr);
+		}
+
+		list_del(&sitd->sitd_list);
+		qha_free(qha_cache, sitd);
+
+		urb->hcpriv = 0;
+		return;
+	}
+	/* if(sitd->hw_next == EHCI_LIST_END) */
+	while (1) {
+		/* Get the SITD following the head SITD */
+		next_sitd = (struct ehci_sitd *) (sitd->hw_next);
+		if (next_sitd->hw_next == EHCI_LIST_END) {
+			/*
+			 * If the next SITD is the end of the list, check if space have
+			 * already been allocated in the PTD array.
+			 */
+			if (next_sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+				/* Free up its allocation */
+				td_ptd_map =
+					&ptd_map_buff->map_list[next_sitd->
+					sitd_index];
+				td_ptd_map->state = TD_PTD_NEW;
+			}
+
+			/*
+			 * If the error is not about memory allocation problems, then
+			 * free up the memory used.
+			 */
+			if (status != -ENOMEM) {
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_iso_itd_free_list Error]: Memory not available\n");
+				phci_hcd_mem_free(&next_sitd->mem_addr);
+			}
+
+			/* Remove from the SITD list and free up space allocated for SITD structure */
+			list_del(&next_sitd->sitd_list);
+			qha_free(qha_cache, next_sitd);
+			break;
+		}
+
+		/* if(next_itd->hw_next == EHCI_LIST_END) */
+		/*
+		 * If SITD is not the end of the list, it only means that it already have everything allocated
+		 * and there is no need to check which procedure failed. So just free all resourcs immediately
+		 */
+		sitd->hw_next = next_sitd->hw_next;
+
+		td_ptd_map = &ptd_map_buff->map_list[next_sitd->sitd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+		phci_hcd_mem_free(&next_sitd->mem_addr);
+		list_del(&next_sitd->sitd_list);
+		qha_free(qha_cache, next_sitd);
+	}			/*  while(1) */
+
+	/* Now work on the head SITD, it is the last one processed. */
+	if (first_sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+		td_ptd_map = &ptd_map_buff->map_list[first_sitd->sitd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+	}
+
+	if (status != -ENOMEM) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_free_list Error]: No memory\n");
+		phci_hcd_mem_free(&first_sitd->mem_addr);
+	}
+
+	list_del(&first_sitd->sitd_list);
+	qha_free(qha_cache, first_sitd);
+	urb->hcpriv = 0;
+	return;
+}
+
+/*
+ * phcd_iso_itd_free_list - Free memory used by ITDs in ITD list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long status
+ *  - Variable provided by the calling routine that contain the status of the
+ *        ITD list.
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Cleaning up memory used by each ITD in the ITD list
+ */
+void
+phcd_iso_itd_free_list(phci_hcd * hcd, struct urb *urb, unsigned long status)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct ehci_itd *first_itd, *next_itd, *itd;
+	td_ptd_map_t *td_ptd_map;
+
+	/* Local variable initialization */
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	first_itd = (struct ehci_itd *) urb->hcpriv;
+	itd = first_itd;
+
+	/*
+	 * Check if there is only one ITD, if so immediately
+	 * go and clean it up.
+	 */
+	if (itd->hw_next == EHCI_LIST_END) {
+		if (itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+			td_ptd_map = &ptd_map_buff->map_list[itd->itd_index];
+			td_ptd_map->state = TD_PTD_NEW;
+		}
+
+		if (status != -ENOMEM) {
+			phci_hcd_mem_free(&itd->mem_addr);
+		}
+
+		list_del(&itd->itd_list);
+		qha_free(qha_cache, itd);
+
+		urb->hcpriv = 0;
+		return;
+	}
+	/* if(itd->hw_next == EHCI_LIST_END) */
+	while (1) {
+		/* Get the ITD following the head ITD */
+		next_itd = (struct ehci_itd *) le32_to_cpu(itd->hw_next);
+		if (next_itd->hw_next == EHCI_LIST_END) {
+			/*
+			 * If the next ITD is the end of the list, check if space have
+			 * already been allocated in the PTD array.
+			 */
+			if (next_itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+				/* Free up its allocation */
+				td_ptd_map =
+					&ptd_map_buff->map_list[next_itd->
+					itd_index];
+				td_ptd_map->state = TD_PTD_NEW;
+			}
+
+			/*
+			 * If the error is not about memory allocation problems, then
+			 * free up the memory used.
+			 */
+			if (status != -ENOMEM) {
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_iso_itd_free_list Error]: Memory not available\n");
+				phci_hcd_mem_free(&next_itd->mem_addr);
+			}
+
+			/* Remove from the ITD list and free up space allocated for ITD structure */
+			list_del(&next_itd->itd_list);
+			qha_free(qha_cache, next_itd);
+			break;
+		}
+
+		/* if(next_itd->hw_next == EHCI_LIST_END) */
+		/*
+		 * If ITD is not the end of the list, it only means that it already have everything allocated
+		 * and there is no need to check which procedure failed. So just free all resourcs immediately
+		 */
+		itd->hw_next = next_itd->hw_next;
+
+		td_ptd_map = &ptd_map_buff->map_list[next_itd->itd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+		phci_hcd_mem_free(&next_itd->mem_addr);
+		list_del(&next_itd->itd_list);
+		qha_free(qha_cache, next_itd);
+	}			/*  while(1) */
+
+	/* Now work on the head ITD, it is the last one processed. */
+	if (first_itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+		td_ptd_map = &ptd_map_buff->map_list[first_itd->itd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+	}
+
+	if (status != -ENOMEM) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_free_list Error]: No memory\n");
+		phci_hcd_mem_free(&first_itd->mem_addr);
+	}
+
+	list_del(&first_itd->itd_list);
+	qha_free(qha_cache, first_itd);
+	urb->hcpriv = 0;
+	return;
+}				/* phcd_iso_itd_free_list */
+
+void
+phcd_clean_iso_qh(phci_hcd * hcd, struct ehci_qh *qh)
+{
+	unsigned int i = 0;
+	u16 skipmap=0;
+	struct ehci_sitd *sitd;
+	struct ehci_itd *itd;
+
+	iso_dbg(ISO_DBG_ERR, "phcd_clean_iso_qh \n");
+	if (!qh){
+		return;
+	}
+	skipmap = isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+	skipmap |= qh->periodic_list.ptdlocation;
+	isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+#ifdef COMMON_MEMORY
+	phci_hcd_mem_free(&qh->memory_addr);
+#endif
+	for (i = 0; i < 16 && qh->periodic_list.ptdlocation; i++) {
+		if (qh->periodic_list.ptdlocation & (0x1 << i)) {
+			printk("[phcd_clean_iso_qh] : %x \n",
+				qh->periodic_list.high_speed);
+
+			qh->periodic_list.ptdlocation &= ~(0x1 << i);
+
+			if (qh->periodic_list.high_speed == 0) {
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].sitd) {
+
+					printk("SITD found \n");
+					sitd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd;
+#ifndef COMMON_MEMORY
+					phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+					/*
+					if(sitd->urb)
+						urb=sitd->urb;
+					*/
+					sitd->urb = NULL;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd = NULL;
+					qha_free(qha_cache, sitd);
+				}
+			} else {
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].itd) {
+
+					printk("ITD found \n");
+					itd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd;
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&itd->mem_addr);
+#endif
+
+					/*
+					if(itd->urb)
+					urb=itd->urb;
+					*/
+					itd->urb = NULL;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd = NULL;
+					qha_free(qha_cache, itd);
+				}
+			}
+
+		}
+	}
+
+
+}
+
+
+/*
+ * phcd_store_urb_pending - store requested URB into a queue
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long *status
+ *  - Variable provided by the calling routine that will contain the status of the
+ *        phcd_submit_iso actions
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Store URB into a queue
+ *  - If ther's enough free PTD slots , repairing the PTDs
+ */
+void phcd_clean_periodic_ep(void){
+	periodic_ep[0] = NULL;
+	periodic_ep[1] = NULL;
+}
+
+int
+phcd_clean_urb_pending(phci_hcd * hcd, struct urb *urb)
+{
+	unsigned int i = 0;
+	struct ehci_qh *qhead;
+	struct ehci_sitd *sitd;
+	struct ehci_itd *itd;
+	u16 skipmap=0;;
+
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_clean_urb_pending] : Enter\n");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead=urb->hcpriv;
+	if (periodic_ep[0] == qhead->ep) {
+		periodic_ep[0] = NULL;
+
+	}
+
+	if (periodic_ep[1] == qhead->ep) {
+		periodic_ep[1] = NULL;
+	}
+#else	
+	qhead = urb->ep->hcpriv;
+	if (periodic_ep[0] == urb->ep) {
+		periodic_ep[0] = NULL;
+
+	}
+
+	if (periodic_ep[1] == urb->ep) {
+		periodic_ep[1] = NULL;
+	}
+#endif	
+	if (!qhead) {
+		return 0;
+	}
+	skipmap = isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+	skipmap |= qhead->periodic_list.ptdlocation;
+	isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+#ifdef COMMON_MEMORY
+	phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+
+	for (i = 0; i < 16 && qhead->periodic_list.ptdlocation; i++) {
+
+		qhead->periodic_list.ptdlocation &= ~(0x1 << i);
+
+		if (qhead->periodic_list.ptdlocation & (0x1 << i)) {
+
+			printk("[phcd_clean_urb_pending] : %x \n",
+				qhead->periodic_list.high_speed);
+
+			if (qhead->periodic_list.high_speed == 0) {
+
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].sitd) {
+
+					sitd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd;
+#ifndef COMMON_MEMORY
+					phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd = NULL;
+					qha_free(qha_cache, sitd);
+				}
+			} else {
+
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].itd) {
+
+					itd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd;
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&itd->mem_addr);
+#endif
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd = NULL;
+					qha_free(qha_cache, itd);
+				}
+			}
+
+		}
+
+	}
+	INIT_LIST_HEAD(&qhead->periodic_list.sitd_itd_head);
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_clean_urb_pending] : Exit\n");
+	return 0;
+}
+
+
+
+int
+phcd_store_urb_pending(phci_hcd * hcd, int index, struct urb *urb, int *status)
+{
+	unsigned int uiNumofPTDs = 0;
+	unsigned int uiNumofSlots = 0;
+	unsigned int uiMult = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_store_urb_pending] : Enter\n");
+	if (urb != NULL) {
+		if (periodic_ep[0] != urb->ep && periodic_ep[1] != urb->ep) {
+			if (periodic_ep[0] == NULL) {
+			//	printk("storing in 0 %x %x\n",urb,urb->pipe);
+				periodic_ep[0] = urb->ep;
+			} else if (periodic_ep[1] == NULL) {
+				printk("storing in 1\n");
+				periodic_ep[1] = urb->ep;
+				usb_hcd_link_urb_to_ep(&(hcd->usb_hcd), urb);
+				return -1;
+			} else {
+				iso_dbg(ISO_DBG_ERR,
+					"Support only 2 ISO endpoints simultaneously \n");
+				*status = -1;
+				return -1;
+			}
+		}
+		usb_hcd_link_urb_to_ep(&(hcd->usb_hcd), urb);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : Add an urb into gstUrb_pending array at index : %d\n",
+			giUrbCount);
+		giUrbCount++;
+	} else {
+
+		iso_dbg(ISO_DBG_ENTRY,
+			"[phcd_store_urb_pending] : getting urb from list \n");
+		if (index > 0 && index < 2) {
+			if (periodic_ep[index - 1]){
+				urb = container_of(periodic_ep[index - 1]->
+					urb_list.next, struct urb,
+					urb_list);
+			}
+		} else {
+			iso_dbg(ISO_DBG_ERR, " Unknown enpoints Error \n");
+			*status = -1;
+			return -1;
+		}
+
+	}
+
+
+	if ((urb != NULL && (urb->ep->urb_list.next == &urb->urb_list))){
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : periodic_sched : %d\n",
+			hcd->periodic_sched);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : number_of_packets : %d\n",
+			urb->number_of_packets);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : Maximum PacketSize : %d\n",
+			usb_maxpacket(urb->dev,urb->pipe, usb_pipeout(urb->pipe)));
+		/*if enough free slots */
+		if (urb->dev->speed == USB_SPEED_FULL) {	/*for FULL SPEED */
+	//		if (hcd->periodic_sched < 
+		//		MAX_PERIODIC_SIZE - urb->number_of_packets) {
+			if(1){
+				if (phcd_submit_iso(hcd, 
+					#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						struct usb_host_endpoint *ep,
+					#endif
+						urb,
+						( unsigned long *) &status) == 0) {
+					pehci_hcd_iso_schedule(hcd, urb);
+				} else{
+				//*status = 0;
+				}
+			}
+		} else if (urb->dev->speed == USB_SPEED_HIGH) {	/*for HIGH SPEED */
+			/*number of slots for 1 PTD */
+			uiNumofSlots = NUMMICROFRAME / urb->interval;
+			/*max packets size */
+			uiMult = usb_maxpacket(urb->dev, urb->pipe,
+					usb_pipeout(urb->pipe));
+			/*mult */
+			uiMult = 1 + ((uiMult >> 11) & 0x3);
+			/*number of PTDs need to schedule for this PTD */
+			uiNumofPTDs =
+				(urb->number_of_packets / uiMult) /
+				uiNumofSlots;
+			if ((urb->number_of_packets / uiMult) % uiNumofSlots != 0){
+				uiNumofPTDs += 1;
+			}
+
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : interval : %d\n",
+				urb->interval);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : uiMult : %d\n",
+				uiMult);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : uiNumofPTDs : %d\n",
+				uiNumofPTDs);
+
+			if (hcd->periodic_sched <=
+				MAX_PERIODIC_SIZE - uiNumofPTDs) {
+
+				if (phcd_submit_iso(hcd,
+					#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						struct usb_host_endpoint *ep,
+					#endif
+					urb, (unsigned long *) &status)== 0) {
+
+					pehci_hcd_iso_schedule(hcd, urb);
+				}
+			} else{
+				*status = 0;
+			}
+		}
+	} else{
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : nextUrb is NULL\n");
+	}
+#endif
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_store_urb_pending] : Exit\n");
+	return 0;
+}
+
+/*
+ * phcd_submit_iso - ISO transfer URB submit routine
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long *status
+ *  - Variable provided by the calling routine that will contain the status of the
+ *        phcd_submit_iso actions
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Allocating memory for the endpoint information structure (pQHead_st)
+ *  - Requesting for bus bandwidth from the USB core
+ *  - Allocating and initializing Payload and PTD memory
+ */
+unsigned long
+phcd_submit_iso(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+		struct urb *urb, unsigned long *status)
+{
+	struct _periodic_list *periodic_list;
+	struct hcd_dev *dev;
+	struct ehci_qh *qhead;
+	struct ehci_itd *itd, *prev_itd;
+	struct ehci_sitd *sitd, *prev_sitd;
+	struct list_head *sitd_itd_list;
+	unsigned long ep_in, max_pkt, mult;
+	unsigned long bus_time, high_speed, start_frame;
+	unsigned long temp;
+	unsigned long packets;
+	/*for high speed device */
+	unsigned int iMicroIndex = 0;
+	unsigned int iNumofSlots = 0;
+	unsigned int iNumofPTDs = 0;
+	unsigned int iPTDIndex = 0;
+	unsigned int iNumofPks = 0;
+	int iPG = 0;
+	dma_addr_t buff_dma;
+	unsigned long length, offset;
+	int i = 0;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_submit_iso Entry\n");
+
+	*status = 0;
+	/* Local variable initialization */
+	high_speed = 0;
+	periodic_list = &hcd->periodic_list[0];
+	dev = (struct hcd_dev *) urb->hcpriv;
+	urb->hcpriv = (void *) 0;
+	prev_itd = (struct ehci_itd *) 0;
+	itd = (struct ehci_itd *) 0;
+	prev_sitd = (struct ehci_sitd *) 0;
+	sitd = (struct ehci_sitd *) 0;
+	start_frame = 0;
+
+	ep_in = usb_pipein(urb->pipe);
+
+	/*
+	 * Take the endpoint, if there is still no memory allocated
+	 * for it allocate some and indicate this is for ISO.
+	 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead = ep->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (!qhead) {
+
+		qhead = phci_hcd_qh_alloc(hcd);
+		if (qhead == 0) {
+			iso_dbg(ISO_DBG_ERR,
+				"[phcd_submit_iso Error]: Not enough memory\n");
+			return -ENOMEM;
+		}
+
+		qhead->type = TD_PTD_BUFF_TYPE_ISTL;
+		INIT_LIST_HEAD(&qhead->periodic_list.sitd_itd_head);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qhead->ep=ep;
+		ep->hcpriv = qhead;
+		urb->hcpriv=qhead;
+#else
+		urb->ep->hcpriv = qhead;
+#endif
+	}
+
+		urb->hcpriv=qhead;
+
+	/* if(!qhead) */
+	/*
+	 * Get the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 */
+	max_pkt = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	mult = 1 + ((max_pkt >> 11) & 0x3);
+
+	/* This is the actual length per for the whole transaction */
+	max_pkt *= mult;
+
+	/* Check bandwidth */
+	bus_time = 0;
+
+	if (urb->dev->speed == USB_SPEED_FULL) {
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		if (urb->bandwidth == 0) {
+			bus_time = usb_check_bandwidth(urb->dev, urb);
+			if (bus_time < 0) {
+				usb_dec_dev_use(urb->dev);
+				*status = bus_time;
+				return *status;
+			}
+		}
+#else
+#endif
+	} else {			/*HIGH SPEED */
+
+		high_speed = 1;
+
+		/*
+		 * Calculate bustime as dictated by the USB Specs Section 5.11.3
+		 * for high speed ISO
+		 */
+		bus_time = 633232L;
+		bus_time +=
+			(2083L * ((3167L + BitTime(max_pkt) * 1000L) / 1000L));
+		bus_time = bus_time / 1000L;
+		bus_time += BW_HOST_DELAY;
+		bus_time = NS_TO_US(bus_time);
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	usb_claim_bandwidth(urb->dev, urb, bus_time, 1);
+#else
+#endif
+
+	qhead->periodic_list.ptdlocation = 0;
+	/* Initialize the start split (ssplit) and complete split (csplit) variables of qhead */
+	if (phcd_iso_scheduling_info(hcd, qhead, max_pkt, high_speed, ep_in) <
+		0) {
+
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_submit_iso Error]: No space available\n");
+		return -ENOSPC;
+	}
+
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		iNumofSlots = NUMMICROFRAME / urb->interval;
+		/*number of PTDs need to schedule for this PTD */
+		iNumofPTDs = (urb->number_of_packets / mult) / iNumofSlots;
+		if ((urb->number_of_packets / mult) % iNumofSlots != 0){	
+			/*get remainder */
+			iNumofPTDs += 1;
+		}
+	}
+	if (urb->iso_frame_desc[0].offset != 0) {
+		*status = -EINVAL;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_submit_iso Error]: Invalid value\n");
+		return *status;
+	}
+	if (1) {
+		/* Calculate the current frame number */
+		if (0){
+			if (urb->transfer_flags & URB_ISO_ASAP){
+				start_frame =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.frameindex,
+						start_frame);
+			} else {
+				start_frame = urb->start_frame;
+			}
+		}
+
+		start_frame =
+			isp1763_reg_read16(hcd->dev, hcd->regs.frameindex,
+				start_frame);
+
+		/* The only valid bits of the frame index is the lower 14 bits. */
+
+		/*
+		 * Remove the count for the micro frame (uSOF) and just leave the
+		 * count for the frame (SOF). Since 1 SOF is equal to 8 uSOF then
+		 * shift right by three is like dividing it by 8 (each shift is divide by two)
+		 */
+		start_frame >>= 3;
+		if (urb->dev->speed != USB_SPEED_HIGH){
+			start_frame += 1;
+		}else{
+			start_frame += 2;
+		}
+		start_frame = start_frame & PTD_FRAME_MASK;
+		temp = start_frame;
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+			qhead->next_uframe =
+				start_frame + urb->number_of_packets;
+		} else {
+			qhead->next_uframe = start_frame + iNumofPTDs;
+		}
+		qhead->next_uframe %= PTD_FRAME_MASK;
+		iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: startframe = %ld\n",
+			start_frame);
+	} else {
+		/*
+		 * The periodic frame list size is only 32 elements deep, so we need
+		 * the frame index to be less than or equal to 32 (actually 31 if we
+		 * start from 0)
+		 */
+		start_frame = (qhead->next_uframe) % PTD_FRAME_MASK;
+		if (urb->dev->speed != USB_SPEED_HIGH){
+			qhead->next_uframe =
+				start_frame + urb->number_of_packets;
+				iNumofPTDs=urb->number_of_packets;
+		} else {
+			qhead->next_uframe = start_frame + iNumofPTDs;
+		}
+
+		qhead->next_uframe %= PTD_FRAME_MASK;
+	}
+
+
+	iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: Start frame index: %ld\n",
+		start_frame);
+	iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: Max packet: %d\n",
+		(int) max_pkt);
+
+#ifdef COMMON_MEMORY
+	if(urb->number_of_packets>8 && urb->dev->speed!=USB_SPEED_HIGH)
+		phci_hcd_mem_alloc(8*max_pkt, &qhead->memory_addr, 0);
+	else
+	phci_hcd_mem_alloc(urb->transfer_buffer_length, &qhead->memory_addr, 0);
+	if (urb->transfer_buffer_length && ((qhead->memory_addr.phy_addr == 0)
+		|| (qhead->memory_addr.virt_addr ==0))) {
+		iso_dbg(ISO_DBG_ERR,
+			"[URB FILL MEMORY Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#endif
+
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		iNumofPks = urb->number_of_packets;
+		qhead->totalptds=urb->number_of_packets;
+		qhead->actualptds=0;
+
+		/* Make as many tds as number of packets */
+		for (packets = 0; packets < urb->number_of_packets; packets++) {
+			/*
+			 * Allocate memory for the SITD data structure and initialize it.
+			 *
+			 * This data structure follows the format of the SITD
+			 * structure defined by the EHCI standard on the top part
+			 * but also contains specific elements in the bottom
+			 * part
+			 */
+			sitd = kmalloc(sizeof(*sitd), GFP_ATOMIC);
+			if (!sitd) {
+				*status = -ENOMEM;
+				if (((int)(qhead->next_uframe -
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+					
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb, 
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: No memory available\n");
+				return *status;
+			}
+
+			memset(sitd, 0, sizeof(struct ehci_sitd));
+
+			INIT_LIST_HEAD(&sitd->sitd_list);
+
+			sitd->sitd_dma = (u32) (sitd);
+			sitd->urb = urb;
+
+			/*
+			 * Indicate that this SITD is the last in the list.
+			 *
+			 * Also set the itd_index to TD_PTD_INV_PTD_INDEX
+			 * (0xFFFFFFFF). This would indicate when we allocate
+			 * a PTD that this SITD did not have a PTD allocated
+			 * before.
+			 */
+
+			sitd->hw_next = EHCI_LIST_END;
+			sitd->sitd_index = TD_PTD_INV_PTD_INDEX;
+
+			/* This SITD will go into this frame */
+			sitd->framenumber = start_frame + packets;
+			sitd->start_frame = temp + packets;
+
+			/* Number of the packet */
+			sitd->index = packets;
+
+			sitd->framenumber = sitd->framenumber & PTD_FRAME_MASK;
+			sitd->ssplit = qhead->ssplit;
+			sitd->csplit = qhead->csplit;
+
+			/* Initialize the following elements of the ITS structure
+			 *      > sitd->length = length;                 -- the size of the request
+			 *      > sitd->multi = multi;                   -- the number of transactions for
+			 *                                         this EP per micro frame
+			 *      > sitd->hw_bufp[0] = buf_dma;    -- The base address of the buffer where
+			 *                                         to put the data (this base address was
+			 *                                         the buffer provided plus the offset)
+			 * And then, allocating memory from the PAYLOAD memory area, where the data
+			 * coming from the requesting party will be placed or data requested by the
+			 * requesting party will be retrieved when it is available.
+			 */
+			*status = phcd_iso_sitd_fill(hcd, sitd, urb, packets);
+
+			if (*status != 0) {
+				if (((int)(qhead->next_uframe - 
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + 
+						PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb,
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: Error in filling up SITD\n");
+				return *status;
+			}
+
+			/*
+			 * If this SITD is not the head/root SITD, link this SITD to the SITD
+			 * that came before it.
+			 */
+			if (prev_sitd) {
+				prev_sitd->hw_next = (u32) (sitd);
+			}
+
+			prev_sitd = sitd;
+
+			if(packets<8){  //bcs of memory constraint , we use only first 8 PTDs if number_of_packets is more than 8.
+			/*
+			 * Allocate an ISO PTD from the ISO PTD map list and
+			 * set the equivalent bit of the allocated PTD to active
+			 * in the bitmap so that this PTD will be included into
+			 * the periodic schedule
+			 */
+			phcd_iso_get_sitd_ptd_index(hcd, sitd);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: SITD index %d\n",
+				sitd->sitd_index);
+
+			/*if we dont have any space left */
+			if (sitd->sitd_index == TD_PTD_INV_PTD_INDEX) {
+				*status = -ENOSPC;
+				if (((int) (qhead->next_uframe -
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb,
+						*status);
+				}
+				return *status;
+			}
+					qhead->actualptds++;
+			}
+			/* Insert this td into the periodic list */
+
+			sitd_itd_list = &qhead->periodic_list.sitd_itd_head;
+			list_add_tail(&sitd->sitd_list, sitd_itd_list);
+			qhead->periodic_list.high_speed = 0;
+			if(sitd->sitd_index!=TD_PTD_INV_PTD_INDEX)
+			qhead->periodic_list.ptdlocation |=
+				0x1 << sitd->sitd_index;
+			/* Inidcate that a new SITD have been scheduled */
+			hcd->periodic_sched++;
+
+			/* Determine if there are any SITD scheduled before this one. */
+			if (urb->hcpriv == 0){
+				urb->hcpriv = sitd;
+			}
+		}	/* for(packets = 0; packets... */
+	} else if (urb->dev->speed == USB_SPEED_HIGH) {	
+		iNumofPks = iNumofPTDs;
+
+		packets = 0;
+		iPTDIndex = 0;
+		while (packets < urb->number_of_packets) {
+			iNumofSlots = NUMMICROFRAME / urb->interval;
+			/*
+			 * Allocate memory for the ITD data structure and initialize it.
+			 *
+			 * This data structure follows the format of the ITD
+			 * structure defined by the EHCI standard on the top part
+			 * but also contains specific elements in the bottom
+			 * part
+			 */
+			itd = kmalloc(sizeof(*itd), GFP_ATOMIC);
+			if (!itd) {
+				*status = -ENOMEM;
+				if(((int) (qhead->next_uframe - iNumofPTDs))<0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + 
+						PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle ITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+							       *status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: No memory available\n");
+				return *status;
+			}
+			memset(itd, 0, sizeof(struct ehci_itd));
+
+			INIT_LIST_HEAD(&itd->itd_list);
+
+			itd->itd_dma = (u32) (itd);
+			itd->urb = urb;
+			/*
+			 * Indicate that this ITD is the last in the list.
+			 *
+			 * Also set the itd_index to TD_PTD_INV_PTD_INDEX
+			 * (0xFFFFFFFF). This would indicate when we allocate
+			 * a PTD that this SITD did not have a PTD allocated
+			 * before.
+			 */
+
+			itd->hw_next = EHCI_LIST_END;
+			itd->itd_index = TD_PTD_INV_PTD_INDEX;
+			/* This ITD will go into this frame */
+			itd->framenumber = start_frame + iPTDIndex;
+			/* Number of the packet */
+			itd->index = packets;
+
+			itd->framenumber = itd->framenumber & 0x1F;
+
+			itd->ssplit = qhead->ssplit;
+			itd->csplit = qhead->csplit;
+
+			/*caculate the number of packets for this itd */
+			itd->num_of_pkts = iNumofSlots * mult;
+			/*for the case , urb number_of_packets is less than (number of slot*mult*x times) */
+			if (itd->num_of_pkts >= urb->number_of_packets)
+			{
+				itd->num_of_pkts = urb->number_of_packets;
+			}
+			else {
+				if (itd->num_of_pkts >
+					urb->number_of_packets - packets){
+					itd->num_of_pkts =
+						urb->number_of_packets -
+						packets;
+				}
+			}
+
+			/* Initialize the following elements of the ITS structure
+			 *      > itd->length = length;                 -- the size of the request
+			 *      > itd->multi = multi;                   -- the number of transactions for
+			 *                                         this EP per micro frame
+			 *      > itd->hw_bufp[0] = buf_dma;    -- The base address of the buffer where
+			 *                                         to put the data (this base address was
+			 *                                         the buffer provided plus the offset)
+			 * And then, allocating memory from the PAYLOAD memory area, where the data
+			 * coming from the requesting party will be placed or data requested by the
+			 * requesting party will be retrieved when it is available.
+			 */
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso] packets index = %ld itd->num_of_pkts = %d\n",
+				packets, itd->num_of_pkts);
+			*status =
+				phcd_iso_itd_fill(hcd, itd, urb, packets,
+						itd->num_of_pkts);
+			if (*status != 0) {
+				if (((int) (qhead->next_uframe - iNumofPTDs)) <
+					0) {
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	/*plus max PTDs*/
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: Error in filling up ITD\n");
+				return *status;
+			}
+
+			iPG = 0;
+			iMicroIndex = 0;
+			while (iNumofSlots > 0) {
+				offset = urb->iso_frame_desc[packets].offset;
+				/* Buffer for this packet */
+				buff_dma =
+					(u32) ((unsigned char *) urb->
+						transfer_buffer + offset);
+
+				/*for the case mult is 2 or 3 */
+				length = 0;
+				for (i = packets; i < packets + mult; i++) {
+					length += urb->iso_frame_desc[i].length;
+				}
+				itd->hw_transaction[iMicroIndex] =
+					EHCI_ISOC_ACTIVE | (length & 
+					EHCI_ITD_TRANLENGTH)
+					<< 16 | iPG << 12 | buff_dma;
+					
+				if (itd->hw_bufp[iPG] != buff_dma){
+					itd->hw_bufp[++iPG] = buff_dma;
+				}
+
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] offset : %ld buff_dma : 0x%08x length : %ld\n",
+					__FUNCTION__, offset,
+					(unsigned int) buff_dma, length);
+
+				itd->ssplit |= 1 << iMicroIndex;
+				packets++;
+				iMicroIndex += urb->interval;
+				iNumofSlots--;
+
+				/*last packets or last slot */
+				if (packets == urb->number_of_packets
+					|| iNumofSlots == 0) {
+
+					itd->hw_transaction[iMicroIndex] |=
+						EHCI_ITD_IOC;
+
+					break;
+					
+				}
+			}
+
+			/*
+			 * If this SITD is not the head/root SITD, link this SITD to the SITD
+			 * that came before it.
+			 */
+			if (prev_itd) {
+				prev_itd->hw_next = (u32) (itd);
+			}
+
+			prev_itd = itd;
+
+			/*
+			 * Allocate an ISO PTD from the ISO PTD map list and
+			 * set the equivalent bit of the allocated PTD to active
+			 * in the bitmap so that this PTD will be included into
+			 * the periodic schedule
+			 */
+
+
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: ITD index %d\n",
+				itd->framenumber);
+			phcd_iso_get_itd_ptd_index(hcd, itd);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: ITD index %d\n",
+				itd->itd_index);
+
+			/*if we dont have any space left */
+			if (itd->itd_index == TD_PTD_INV_PTD_INDEX) {
+				*status = -ENOSPC;
+				if (((int) (qhead->next_uframe - iNumofPTDs)) <
+					0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+							       *status);
+				}
+				return *status;
+			}
+
+			sitd_itd_list = &qhead->periodic_list.sitd_itd_head;
+			list_add_tail(&itd->itd_list, sitd_itd_list);
+			qhead->periodic_list.high_speed = 1;
+			qhead->periodic_list.ptdlocation |=
+				0x1 << itd->itd_index;
+
+			/* Inidcate that a new SITD have been scheduled */
+			hcd->periodic_sched++;
+
+			/* Determine if there are any ITD scheduled before this one. */
+			if (urb->hcpriv == 0){
+				urb->hcpriv = itd;
+			}
+			iPTDIndex++;
+
+		}		/*end of while */
+	}
+
+	/*end of HIGH SPEED */
+	/* Last td of current transaction */
+	if (high_speed == 0){
+		sitd->hw_next = EHCI_LIST_END;
+	}
+	urb->error_count = 0;
+	return *status;
+}				/* phcd_submit_iso */
+#endif /* CONFIG_ISO_SUPPORT */
diff --git a/drivers/usb/host/pehci/host/mem.c b/drivers/usb/host/pehci/host/mem.c
new file mode 100644
index 0000000..dbf28a9
--- /dev/null
+++ b/drivers/usb/host/pehci/host/mem.c
@@ -0,0 +1,355 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. Memory initialization, allocation, and 
+* deallocation are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifdef CONFIG_ISO_SUPPORT
+
+/*memory utilization fuctions*/
+void
+phci_hcd_mem_init(void)
+{
+	int i = 0;
+	u32 start_addr = 0x1000;
+	struct isp1763_mem_addr *memaddr;
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		memset(memaddr, 0, sizeof *memaddr);
+	}
+	/*initialize block of 128bytes */
+	for (i = 0; i < BLK_128_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_128;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_128;
+	}
+	/*initialize block of 256bytes */
+	for (i = BLK_128_; i < BLK_256_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_256;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_256;
+	}
+	/*initialize block of 1024bytes */
+	for (i = BLK_128_ + BLK_256_; i < (BLK_128_ + BLK_256_ + BLK_1024_);
+		i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_1024;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_1024;
+	}
+
+	/*initialize block of  2kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_);
+		i < (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_2048;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_2048;
+	}
+	/* initialize block of 4kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_);
+		i < (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_ + BLK_4096_); 
+		i++){
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_4096;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_4096;
+	}
+	/* initialize block of 8kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_); i <
+		(BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_ + BLK_4096_ +
+		BLK_8196_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_8192;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_8192;
+	}
+
+}
+
+
+/*free memory*/
+static void
+phci_hcd_mem_free(struct isp1763_mem_addr *memptr)
+{
+	/*block number to be freed */
+	int block = memptr->blk_num;
+
+	if (block < BLK_TOTAL){
+		if ((memptr->blk_size) && (memalloc[block].used != 0)) {
+			memalloc[block].used = 0;
+			memptr->used = 0;
+		}
+	}
+}
+
+
+/*allocate memory*/
+static void
+phci_hcd_mem_alloc(u32 size, struct isp1763_mem_addr *memptr, u32 flag)
+{
+	u32 blk_size = size;
+	u16 i;
+	u32 nextblk1 = 0, nextblk4 = 0;
+	u32 start = 0, end = 0;
+	struct isp1763_mem_addr *memaddr = 0;
+
+	memset(memptr, 0, sizeof *memptr);
+
+	pehci_print("phci_hcd_mem_alloc(size = %d)\n", size);
+
+	if (blk_size == 0) {
+		memptr->phy_addr = 0;
+		memptr->virt_addr = 0;
+		memptr->blk_size = 0;
+		memptr->num_alloc = 0;
+		memptr->blk_num = 0;
+		return;
+	}
+
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used && size <= memaddr->blk_size) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = memaddr->blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	return;
+	/*end of the 1k blocks */
+	nextblk1 = BLK_256_ + BLK_1024_;
+	/*end of the 4k blocks */
+	nextblk4 = nextblk1 + BLK_4096_;
+
+	if (blk_size <= BLK_SIZE_128) {
+		blk_size = BLK_SIZE_128;
+		start = 0;
+		end = BLK_256_;
+	}
+	if (blk_size <= BLK_SIZE_256) {
+		blk_size = BLK_SIZE_256;
+		start = 0;
+		end = BLK_256_;
+	} else if (blk_size <= BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_1024;
+		start = BLK_256_;
+		end = start + BLK_1024_;
+	} else if (blk_size > BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_4096;
+		start = BLK_256_ + BLK_1024_;
+		end = start + BLK_4096_;
+	}
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->blk_num = i;
+			memptr->used = 1;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	/*look for in the next block if memory is free */
+	/*start from the first place of the next block */
+	start = end;
+
+	/*for 1k and 256 size request only 4k can be returned */
+	end = nextblk4;
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+}
+
+#else
+
+void
+phci_hcd_mem_init(void)
+{
+	int i = 0;
+	u32 start_addr = 0x1000;
+	struct isp1763_mem_addr *memaddr;
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		memset(memaddr, 0, sizeof *memaddr);
+	}
+
+	/*initialize block of 256bytes */
+	for (i = 0; i < BLK_256_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_256;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_256;
+	}
+	/*initialize block of 1024bytes */
+	for (i = BLK_256_; i < (BLK_256_ + BLK_1024_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_1024;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_1024;
+	}
+
+	/*initialize block of  4kbytes */
+	for (i = (BLK_256_ + BLK_1024_); i < (BLK_256_ + BLK_1024_ + BLK_4096_);
+		i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_4096;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_4096;
+	}
+
+}
+
+
+/*free memory*/
+static void
+phci_hcd_mem_free(struct isp1763_mem_addr *memptr)
+{
+	/*block number to be freed */
+	int block = memptr->blk_num;
+
+	if (block < BLK_TOTAL)
+		if ((memptr->blk_size) && (memalloc[block].used != 0)) {
+			memalloc[block].used = 0;
+			memptr->used = 0;
+		}
+}
+
+
+/*allocate memory*/
+static void
+phci_hcd_mem_alloc(u32 size, struct isp1763_mem_addr *memptr, u32 flag)
+{
+	u32 blk_size = size;
+	u16 i;
+	u32 nextblk1 = 0, nextblk4 = 0;
+	u32 start = 0, end = 0;
+	struct isp1763_mem_addr *memaddr = 0;
+
+	memset(memptr, 0, sizeof *memptr);
+
+	pehci_print("phci_hcd_mem_alloc(size = %d)\n", size);
+
+	if (blk_size == 0) {
+		memptr->phy_addr = 0;
+		memptr->virt_addr = 0;
+		memptr->blk_size = 0;
+		memptr->num_alloc = 0;
+		memptr->blk_num = 0;
+		return;
+	}
+
+	/*end of the 1k blocks */
+	nextblk1 = BLK_256_ + BLK_1024_;
+	/*end of the 4k blocks */
+	nextblk4 = nextblk1 + BLK_4096_;
+
+
+	if (blk_size <= BLK_SIZE_256) {
+		blk_size = BLK_SIZE_256;
+		start = 0;
+		end = BLK_256_;
+	} else if (blk_size <= BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_1024;
+		start = BLK_256_;
+		end = start + BLK_1024_;
+	} else if (blk_size > BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_4096;
+		start = BLK_256_ + BLK_1024_;
+		end = start + BLK_4096_;
+	}
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->blk_num = i;
+			memptr->used = 1;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	/*look for in the next block if memory is free */
+	/*start from the first place of the next block */
+	start = end;
+
+	/*for 1k and 256 size request only 4k can be returned */
+	end = nextblk4;
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+}
+
+#endif
diff --git a/drivers/usb/host/pehci/host/otg.c b/drivers/usb/host/pehci/host/otg.c
new file mode 100755
index 0000000..546d9e9
--- /dev/null
+++ b/drivers/usb/host/pehci/host/otg.c
@@ -0,0 +1,189 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. OTG related events are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+/*hub device which connected with root port*/
+struct usb_device *hubdev = 0;
+/* hub interrupt urb*/
+struct urb *huburb;
+
+/*return otghub from here*/
+struct usb_device *
+phci_register_otg_device(struct isp1763_dev *dev)
+{
+	printk("OTG dev %x %d\n",(u32) hubdev, hubdev->devnum);
+	if (hubdev && hubdev->devnum >= 0x2) {
+		return hubdev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(phci_register_otg_device);
+
+/*suspend the otg port(0)
+ * needed when port is switching
+ * from host to device
+ * */
+int
+phci_suspend_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	int status = 0;
+	hubdev->otgstate = USB_OTG_SUSPEND;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+
+	huburb->status = 0;
+	huburb->complete(huburb);
+	return status;
+}
+EXPORT_SYMBOL(phci_suspend_otg_port);
+
+/*set the flag to enumerate the device*/
+int
+phci_enumerate_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	/*set the flag to enumerate */
+	/*connect change interrupt will happen from
+	 * phci_intl_worker only
+	 * */
+	hubdev->otgstate = USB_OTG_ENUMERATE;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+	/*complete the urb */
+
+	huburb->complete(huburb);
+
+	/*reset the otghub urb status */
+	huburb->status = -EINPROGRESS;
+	return 0;
+}
+EXPORT_SYMBOL(phci_enumerate_otg_port);
+
+/*host controller resume sequence at otg port*/
+int
+phci_resume_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	printk("Resume is called\n");
+	hubdev->otgstate = USB_OTG_RESUME;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+	/*complete the urb */
+
+	huburb->complete(huburb);
+
+	/*reset the otghub urb status */
+	huburb->status = -EINPROGRESS;
+	return 0;
+}
+EXPORT_SYMBOL(phci_resume_otg_port);
+/*host controller remote wakeup sequence at otg port*/
+int
+phci_remotewakeup(struct isp1763_dev *dev)
+{
+    printk("phci_remotewakeup_otg_port is called\n");
+    hubdev->otgstate = USB_OTG_REMOTEWAKEUP;
+    if(huburb->status == -EINPROGRESS)
+        huburb->status = 0;
+    /*complete the urb*/
+#if ((defined LINUX_269) || defined (LINUX_2611))
+    huburb->complete(huburb,NULL);      
+#else
+	 huburb->complete(huburb);
+#endif
+    /*reset the otghub urb status*/
+    huburb->status = -EINPROGRESS;
+    return 0;
+}
+EXPORT_SYMBOL(phci_remotewakeup);
+
+/*host controller wakeup sequence at otg port*/
+int
+phci_resume_wakeup(struct isp1763_dev *dev)
+{
+    printk("phci_wakeup_otg_port is called\n");
+#if 0
+    hubdev->otgstate = USB_OTG_WAKEUP_ALL;
+    if(huburb->status == -EINPROGRESS)
+#endif
+        huburb->status = 0;
+    /*complete the urb*/
+#if ((defined LINUX_269) || defined (LINUX_2611))
+    huburb->complete(huburb,NULL);      
+#else
+	 huburb->complete(huburb);
+#endif
+    /*reset the otghub urb status*/
+    huburb->status = -EINPROGRESS;
+    return 0;
+}
+EXPORT_SYMBOL(phci_resume_wakeup);
+
+struct isp1763_driver *host_driver;
+struct isp1763_driver *device_driver;
+
+void
+pehci_delrhtimer(struct isp1763_dev *dev)
+{
+
+	struct usb_hcd *usb_hcd =
+		container_of(huburb->dev->parent->bus, struct usb_hcd, self);
+	del_timer_sync(&usb_hcd->rh_timer);
+	del_timer(&usb_hcd->rh_timer);
+
+}
+EXPORT_SYMBOL(pehci_delrhtimer);
+
+int
+pehci_Deinitialize(struct isp1763_dev *dev)
+{
+	dev -= 2;
+	if (dev->index == 0) {
+		if (dev->driver) {
+			if (dev->driver->powerdown) {
+				dev->driver->powerdown(dev);
+			}
+		}
+	}
+return 0;
+}
+EXPORT_SYMBOL(pehci_Deinitialize);
+
+int
+pehci_Reinitialize(struct isp1763_dev *dev)
+{
+
+	dev -= 2;
+	if (dev->index == 0) {
+		if(dev->driver->powerup){
+			dev->driver->powerup(dev);
+		}
+	}
+return 0;
+}
+EXPORT_SYMBOL(pehci_Reinitialize);
+
+
diff --git a/drivers/usb/host/pehci/host/pehci.c b/drivers/usb/host/pehci/host/pehci.c
new file mode 100644
index 0000000..19e9441
--- /dev/null
+++ b/drivers/usb/host/pehci/host/pehci.c
@@ -0,0 +1,6567 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* Refer to the follwing files in ~/drivers/usb/host for copyright owners:
+* ehci-dbg.c, ehci-hcd.c, ehci-hub.c, ehci-mem.c, ehci-q.c and ehic-sched.c (kernel version 2.6.9)
+* Code is modified for ST-Ericsson product 
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <stdarg.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <linux/version.h>
+
+#include "../hal/isp1763.h"
+#include "pehci.h"
+#include "../hal/hal_intf.h"
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+
+extern int HostComplianceTest;
+extern int HostTest;
+extern int No_Data_Phase;
+extern int No_Status_Phase;
+#define	EHCI_TUNE_CERR		3
+#define	URB_NO_INTERRUPT	0x0080
+#define	EHCI_TUNE_RL_TT		0
+#define	EHCI_TUNE_MULT_TT	1
+#define	EHCI_TUNE_RL_HS		0
+#define	EHCI_TUNE_MULT_HS	1
+
+
+#define POWER_DOWN_CTRL_NORMAL_VALUE	0xffff1ba0
+#define POWER_DOWN_CTRL_SUSPEND_VALUE	0xffff08b0
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+//This macro is not supported in linux-2.6.35
+#define	USB_PORT_FEAT_HIGHSPEED 10
+#endif
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define	FALSE 0
+#define	TRUE (!FALSE)
+extern void *phcd_iso_sitd_to_ptd(phci_hcd * hcd,
+	struct ehci_sitd *sitd,
+	struct urb *urb, void *ptd);
+extern void *phcd_iso_itd_to_ptd(phci_hcd * hcd,
+	struct	ehci_itd *itd,
+	struct	urb *urb, void *ptd);
+
+extern unsigned	long phcd_submit_iso(phci_hcd *	hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+	struct urb *urb, unsigned long *status);
+void pehci_hcd_iso_schedule(phci_hcd * hcd, struct urb *);
+unsigned long lgFrameIndex = 0;
+unsigned long lgScheduledPTDIndex = 0;
+int igNumOfPkts = 0;
+#endif /* CONFIG_ISO_SUPPORT */
+
+struct isp1763_dev *isp1763_hcd;
+
+#ifdef HCD_PACKAGE
+/*file operation*/
+struct fasync_struct *fasync_q;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs);
+#else
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map);
+#endif
+
+#include "otg.c"  /*OTG and HCD package needs it */
+
+
+int hcdpowerdown = 0;
+int portchange=0; //for remotewakeup
+EXPORT_SYMBOL(hcdpowerdown);
+unsigned char otg_se0_enable;
+EXPORT_SYMBOL(otg_se0_enable);
+
+
+/*Enable all other interrupt.*/
+
+#ifdef MSEC_INT_BASED
+#ifdef THREAD_BASED//This is to test interrupt mapping problem
+//#define	INTR_ENABLE_MASK (HC_OPR_REG_INT|HC_CLK_RDY_INT )
+#define INTR_ENABLE_MASK (/*HC_MSEC_INT |*/ HC_INTL_INT | HC_ATL_INT| HC_ISO_INT /*| HC_EOT_INT | HC_ISO_INT*/)
+#else
+#define	INTR_ENABLE_MASK (HC_MSEC_INT|HC_OPR_REG_INT|HC_CLK_RDY_INT )
+#endif
+#else
+#define	INTR_ENABLE_MASK ( HC_INTL_INT | HC_ATL_INT |HC_ISO_INT| HC_EOT_INT|HC_OPR_REG_INT|HC_CLK_RDY_INT)
+#endif
+
+
+
+#ifdef THREAD_BASED
+
+#define NO_SOF_REQ_IN_TSK 		0x1
+#define NO_SOF_REQ_IN_ISR 		0x2
+#define NO_SOF_REQ_IN_REQ 	0x3
+#define MSEC_INTERVAL_CHECKING 5
+
+typedef struct _st_UsbIt_Msg_Struc {
+	struct usb_hcd 		*usb_hcd;
+	u8				uIntStatus;
+	struct list_head 		list;
+} st_UsbIt_Msg_Struc, *pst_UsbIt_Msg_Struc ;
+
+typedef struct _st_UsbIt_Thread {
+    wait_queue_head_t       	ulThrdWaitQhead;
+    int                           		lThrdWakeUpNeeded;
+    struct task_struct           	*phThreadTask;
+    spinlock_t              lock;
+} st_UsbIt_Thread, *pst_UsbIt_Thread;
+
+st_UsbIt_Thread g_stUsbItThreadHandler;
+
+st_UsbIt_Msg_Struc 	g_messList;
+st_UsbIt_Msg_Struc 	g_enqueueMessList;
+spinlock_t              	enqueue_lock;
+
+int pehci_hcd_process_irq_it_handle(struct usb_hcd* usb_hcd_);
+int pehci_hcd_process_irq_in_thread(struct usb_hcd *usb_hcd_);
+
+#endif /*THREAD_BASED*/
+
+#ifdef THREAD_BASED
+phci_hcd *g_pehci_hcd;
+#endif
+
+
+struct wake_lock pehci_wake_lock;
+
+/*---------------------------------------------------
+ *    Globals for EHCI
+ -----------------------------------------------------*/
+
+/* used	when updating hcd data */
+static spinlock_t hcd_data_lock	= SPIN_LOCK_UNLOCKED;
+
+static const char hcd_name[] = "ST-Ericsson ISP1763";
+static td_ptd_map_buff_t td_ptd_map_buff[TD_PTD_TOTAL_BUFF_TYPES];	/* td-ptd map buffer for all 1362 buffers */
+
+static u8 td_ptd_pipe_x_buff_type[TD_PTD_TOTAL_BUFF_TYPES] = {
+	TD_PTD_BUFF_TYPE_ATL,
+	TD_PTD_BUFF_TYPE_INTL,
+	TD_PTD_BUFF_TYPE_ISTL
+};
+
+
+/*global memory	blocks*/
+isp1763_mem_addr_t memalloc[BLK_TOTAL];
+#include "mem.c"
+#include "qtdptd.c"
+
+#ifdef CONFIG_ISO_SUPPORT
+#include "itdptd.c"
+#endif /* CONFIG_ISO_SUPPORT */
+
+static int
+pehci_rh_control(struct	usb_hcd	*usb_hcd, u16 typeReq,
+		 u16 wValue, u16 wIndex, char *buf, u16	wLength);
+
+static int pehci_bus_suspend(struct usb_hcd *usb_hcd);
+static int pehci_bus_resume(struct usb_hcd *usb_hcd);
+/*----------------------------------------------------*/
+static void
+pehci_complete_device_removal(phci_hcd * hcd, struct ehci_qh *qh)
+{
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *td_ptd_buff;
+	struct urb * urb;
+	urb_priv_t *urb_priv;
+	struct ehci_qtd	*qtd = 0;
+//	struct usb_hcd *usb_hcd=&hcd->usb_hcd;
+	u16 skipmap=0;
+
+	if (qh->type ==	TD_PTD_BUFF_TYPE_ISTL) {
+#ifdef COMMON_MEMORY
+		phci_hcd_mem_free(&qh->memory_addr);
+#endif
+		return;
+	}
+
+	td_ptd_buff = &td_ptd_map_buff[qh->type];
+	td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+
+	/*this flag should only	be set when device is going */
+	td_ptd_map->state = TD_PTD_REMOVE;
+	/*if nothing there */
+	if (list_empty(&qh->qtd_list)) {
+		if (td_ptd_map->state != TD_PTD_NEW) {
+			phci_hcd_release_td_ptd_index(qh);
+		}
+		qha_free(qha_cache, qh);
+		qh = 0;
+		return;
+	} else {
+	
+		if(!list_empty(&qh->qtd_list)){
+				qtd=NULL;
+				qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
+				if(qtd){
+					urb=qtd->urb;
+					urb_priv= urb->hcpriv;
+					
+					if(urb)
+					switch (usb_pipetype(urb->pipe)) {
+						case PIPE_CONTROL:
+						case PIPE_BULK:
+							break;
+						case PIPE_INTERRUPT:
+							td_ptd_buff = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL];
+							td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+
+							/*urb is already been removed */
+						//	if (td_ptd_map->state == TD_PTD_NEW) {
+						//		kfree(urb_priv);
+						//		break;
+						//	}
+
+							/* These TDs are not pending anymore */
+							td_ptd_buff->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+							td_ptd_map->state = TD_PTD_REMOVE;
+							urb_priv->state	|= DELETE_URB;
+
+							/*read the skipmap, to see if this transfer has	to be rescheduled */
+							skipmap	=
+							isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+							skipmap);
+
+							isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+							skipmap | td_ptd_map->ptd_bitmap);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+							pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+							pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+							break;
+					}
+
+					
+				}else{
+					//break;
+				}
+		}
+		qha_free(qha_cache, qh);
+		qh = 0;
+		return;
+	}
+	/*MUST not come	down below this	*/
+	err("Never Error: Should not come to this portion of code\n");
+
+	return;
+}
+
+/*functions looks for the values in register
+  specified in ptr, if register	values masked
+  with the mask	and result is equal to done,
+  operation is successful else fails with timeout*/
+static int
+pehci_hcd_handshake(phci_hcd * hcd, u32	ptr, u32 mask, u32 done, int usec)
+{
+	u32 result = 0;
+	do {
+		result = isp1763_reg_read16(hcd->dev, ptr, result);
+		printk(KERN_NOTICE "Registr %x val is %x\n", ptr, result);
+		if (result == ~(u32) 0)	{/* card removed */
+			return -ENODEV;
+		}
+		result &= mask;
+		if (result == done) {
+			return 0;
+		}
+		udelay(1);
+		usec--;
+	} while	(usec >	0);
+
+	return -ETIMEDOUT;
+}
+
+#ifndef	MSEC_INT_BASED
+/*schedule atl and interrupt tds,
+  only when we are not running on sof interrupt
+ */
+static void
+pehci_hcd_td_ptd_submit_urb(phci_hcd * hcd, struct ehci_qh *qh,	u8 bufftype)
+{
+	unsigned long flags=0;
+	struct ehci_qtd	*qtd = 0;
+	struct urb *urb	= 0;
+	struct _isp1763_qha *qha = 0;
+	u16 location = 0;
+	u16 skipmap = 0;
+	u16 buffstatus = 0;
+	u16 ormask = 0;
+	u16 intormask =	0;
+	u32 length = 0;
+	struct list_head *head;
+
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("Buuffer type %d\n", bufftype);
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	ptd_map_buff = &td_ptd_map_buff[bufftype];
+
+	qha = &hcd->qha;
+
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+					   skipmap);
+
+		intormask =
+			isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+					   intormask);
+		break;
+	default:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+					   skipmap);
+		break;
+
+	}
+
+
+	buffstatus =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+				   buffstatus);
+
+	/*header, qtd, and urb of current transfer */
+	location = qh->qtd_ptd_index;
+	td_ptd_map = &ptd_map_buff->map_list[location];
+
+	if (!(qh->qh_state & QH_STATE_TAKE_NEXT)) {
+		pehci_check("qh	will schdule from interrupt routine,map	%x\n",
+			    td_ptd_map->ptd_bitmap);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		return;
+	}
+	head = &qh->qtd_list;
+	qtd = list_entry(head->next, struct ehci_qtd, qtd_list);
+
+	/*already scheduled, may be from interrupt */
+	if (!(qtd->state & QTD_STATE_NEW)) {
+		pehci_check("qtd already in, state %x\n", qtd->state);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		return;
+	}
+
+	qtd->state &= ~QTD_STATE_NEW;
+	qtd->state |= QTD_STATE_SCHEDULED;
+
+	qh->qh_state &=	~QH_STATE_TAKE_NEXT;
+	/*take the first td */
+	td_ptd_map->qtd	= qtd;
+	/*take the urb */
+	urb = qtd->urb;
+	ptd_map_buff->active_ptds++;
+
+	/*trust	the atl	worker,	at this	location there wont be any td */
+	/*if this td is	the last one */
+	if (qtd->state & QTD_STATE_LAST) {
+		qh->hw_current = cpu_to_le32(0);
+		/*else update the hw_next of qh	to the next td */
+	} else {
+		qh->hw_current = qtd->hw_next;
+	}
+	memset(qha, 0, sizeof(isp1763_qha));
+
+	pehci_check("td	being scheduled	: length: %d, device: %d, map: %x\n",
+		    qtd->length, urb->dev->devnum, td_ptd_map->ptd_bitmap);
+	/*NEW, now need	to get the memory for this transfer */
+	length = qtd->length;
+	mem_addr = &qtd->mem_addr;
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		err("Never Error: Can not allocate memory for the current td,length %d\n", length);
+		/*should not happen */
+		/*can happen only when we exceed the limit of devices we support
+		   MAX 4 mass storage at a time	*/
+	}
+	phci_hcd_qha_from_qtd(hcd, qtd, qtd->urb, (void *) qha,
+		td_ptd_map->ptd_ram_data_addr, qh);
+	if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+		phci_hcd_qhint_schedule(hcd, qh, qtd, (isp1763_qhint *)	qha,
+					qtd->urb);
+	}
+	/*write	qha into the header of the host	controller */
+	isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			  (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+
+	/*if this is SETUP/OUT token , then need to write into the buffer */
+	/*length should	be valid and supported by the ptd */
+	if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)){
+		switch (PTD_PID(qha->td_info2))	{
+		case OUT_PID:
+		case SETUP_PID:
+
+			isp1763_mem_write(hcd->dev, (u32) mem_addr->phy_addr, 0,
+					  (void	*) qtd->hw_buf[0], length, 0);
+
+
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+			
+
+			break;
+		}
+	}
+
+	/*unskip the tds at this location */
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+		/*enable atl interrupts	on donemap */
+		ormask |= td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+				    ormask);
+		break;
+
+	case TD_PTD_BUFF_TYPE_INTL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+		intormask |= td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+				    intormask);
+		break;
+
+	case TD_PTD_BUFF_TYPE_ISTL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+		break;
+	}
+
+	/*if any new schedule, enable the atl buffer */
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | ATL_BUFFER);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		buffstatus |= ATL_BUFFER;
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | INT_BUFFER);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+		break;
+	case TD_PTD_BUFF_TYPE_ISTL:
+		/*not supposed to be seen here */
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | ISO_BUFFER);
+		break;
+	}
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+
+}
+#endif
+
+
+
+#ifdef MSEC_INT_BASED
+/*schedule next	(atl/int)tds and any pending tds*/
+static void
+pehci_hcd_schedule_pending_ptds(phci_hcd * hcd, u16 donemap, u8 bufftype,
+				u16 only)
+{
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh = 0;
+	struct list_head *qtd_list = 0;
+	struct _isp1763_qha allqha;
+	struct _isp1763_qha *qha = 0;
+	u16 mask = 0x1,	index =	0;
+	u16 location = 0;
+	u16 skipmap = 0;
+	u32 newschedule	= 0;
+	u16 buffstatus = 0;
+	u16 schedulemap	= 0;
+#ifndef	CONFIG_ISO_SUPPORT
+	u16 lasttd = 1;
+#endif
+	u16 lastmap = 0;
+	struct urb *urb	= 0;
+	urb_priv_t *urbpriv = 0;
+	int length = 0;
+	u16 ormask = 0,	andmask	= 0;
+	u16 intormask =	0;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("Buffer type %d\n",	bufftype);
+
+	/*need to hold this lock if another interrupt is comming
+	   for previously scheduled transfer, while scheduling new tds
+	 */
+	spin_lock(&hcd_data_lock);
+	ptd_map_buff = &td_ptd_map_buff[bufftype];
+	qha = &allqha;
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+		rmb();
+
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+
+		andmask	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_and,
+					   andmask);
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+					   skipmap);
+		/*read the interrupt mask registers */
+
+		intormask =
+			isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+					   intormask);
+		break;
+	default:
+		err("Never Error: Bogus	type of	bufer\n");
+		return;
+	}
+
+	buffstatus =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+				   buffstatus);
+	/*td headers need attention */
+	schedulemap = donemap;
+	while (schedulemap) {
+		index =	schedulemap & mask;
+		schedulemap &= ~mask;
+		mask <<= 1;
+
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		td_ptd_map = &ptd_map_buff->map_list[location];
+		/*	can happen if donemap comes after
+		   removal of the urb and associated tds
+		 */
+		if ((td_ptd_map->state == TD_PTD_NEW) ||
+			(td_ptd_map->state == TD_PTD_REMOVE)) {
+			qh = td_ptd_map->qh;
+			pehci_check
+				("should not come here,	map %x,pending map %x\n",
+				 td_ptd_map->ptd_bitmap,
+				 ptd_map_buff->pending_ptd_bitmap);
+
+			pehci_check("buffer type %s\n",
+				(bufftype == 0) ? "ATL" : "INTL");
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*clear	the pending map	*/
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*no endpoint at this location */
+		if (!(td_ptd_map->qh)) {
+			err("queue head	can not	be null	here\n");
+			/*move to the next location */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*current endpoint */
+		qh = td_ptd_map->qh;
+		if (!(skipmap &	td_ptd_map->ptd_bitmap)) {
+			/*should not happen, if	happening, then	*/
+			pehci_check("buffertype	%d,td_ptd_map %x,skipnap %x\n",
+				    bufftype, td_ptd_map->ptd_bitmap, skipmap);
+			lastmap	= td_ptd_map->ptd_bitmap;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*if we	processed all the tds in ths transfer */
+		if (td_ptd_map->lasttd)	{
+			err("should not	show  map %x,qtd %p\n",
+			td_ptd_map->ptd_bitmap, td_ptd_map->qtd);
+			/*this can happen in case the transfer is not being
+			 * procesed by the host	, tho the transfer is there
+			 * */
+			qh->hw_current = cpu_to_le32(td_ptd_map->qtd);
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*if we	have ptd that is going for reload */
+		if ((td_ptd_map->qtd) && (td_ptd_map->state & TD_PTD_RELOAD)) {
+			warn("%s: reload td\n",	__FUNCTION__);
+			td_ptd_map->state &= ~TD_PTD_RELOAD;
+			qtd = td_ptd_map->qtd;
+			goto loadtd;
+		}
+
+		/* qh is there but no qtd so it	means fresh transfer */
+		if ((td_ptd_map->qh) &&	!(td_ptd_map->qtd)) {
+			if (list_empty(&qh->qtd_list)) {
+				/*should not hapen again, as it	comes here
+				   when	it has td in its map
+				 */
+				pehci_check
+					("must not come	here any more, td map %x\n",
+					 td_ptd_map->ptd_bitmap);
+				/*this location	is idle	and can	be free	next time if
+				   no new transfers are	comming	for this */
+				donemap	&= ~td_ptd_map->ptd_bitmap;
+				td_ptd_map->state |= TD_PTD_IDLE;
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+				location++;
+				continue;
+			}
+			qtd_list = &qh->qtd_list;
+			qtd = td_ptd_map->qtd =
+				list_entry(qtd_list->next, struct ehci_qtd,
+					   qtd_list);
+			/*got the td, now goto reload */
+			goto loadtd;
+		}
+
+		/*if there is already one qtd there in the transfer */
+		if (td_ptd_map->qtd) {
+			/*new schedule */
+			qtd = td_ptd_map->qtd;
+		}
+		loadtd:
+		/*should not happen */
+		if (!qtd) {
+			err("this piece	of code	should not be executed\n");
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		ptd_map_buff->active_ptds++;
+		/*clear	the pending map	here */
+		ptd_map_buff->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+
+
+		/*if this td is	the last one */
+		if (qtd->state & QTD_STATE_LAST) {
+			/*no qtd anymore */
+			qh->hw_current = cpu_to_le32(0);
+
+			/*else update the hw_next of qh	to the next td */
+		} else {
+			qh->hw_current = qtd->hw_next;
+		}
+
+		if (location !=	qh->qtd_ptd_index) {
+			err("Never Error: Endpoint header location and scheduling information are not same\n");
+		}
+
+		/*next location	*/
+		location++;
+		/*found	new transfer */
+		newschedule = 1;
+		/*take the urb */
+		urb = qtd->urb;
+		/*sometimes we miss due	to skipmap
+		   so to make sure that	we dont	put again the
+		   same	stuff
+		 */
+		if (!(qtd->state & QTD_STATE_NEW)) {
+			err("Never Error: We should not	put the	same stuff\n");
+			continue;
+		}
+
+		urbpriv	= (urb_priv_t *) urb->hcpriv;
+		urbpriv->timeout = 0;
+
+		/*no more new */
+		qtd->state &= ~QTD_STATE_NEW;
+		qtd->state |= QTD_STATE_SCHEDULED;
+
+
+
+		/*NEW, now need	to get the memory for this transfer */
+		length = qtd->length;
+		mem_addr = &qtd->mem_addr;
+		phci_hcd_mem_alloc(length, mem_addr, 0);
+		if (length && ((mem_addr->phy_addr == 0)
+			       || (mem_addr->virt_addr == 0))) {
+
+			err("Never Error: Can not allocate memory for the current td,length %d\n", length);
+			location++;
+			continue;
+		}
+
+		pehci_check("qtd being scheduled %p, device %d,map %x\n", qtd,
+			    urb->dev->devnum, td_ptd_map->ptd_bitmap);
+
+
+		memset(qha, 0, sizeof(isp1763_qha));
+		/*convert qtd to qha */
+		phci_hcd_qha_from_qtd(hcd, qtd,	qtd->urb, (void	*) qha,
+			td_ptd_map->ptd_ram_data_addr, qh);
+
+		if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+			phci_hcd_qhint_schedule(hcd, qh, qtd,
+				(isp1763_qhint *) qha,
+				qtd->urb);
+
+		}
+
+
+		length = PTD_XFERRED_LENGTH(qha->td_info1 >> 3);
+		if (length > HC_ATL_PL_SIZE) {
+			err("Never Error: Bogus	length,length %d(max %d)\n",
+			qtd->length, HC_ATL_PL_SIZE);
+		}
+
+		/*write	qha into the header of the host	controller */
+		isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			(u32 *) (qha), PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_SCHEDULE
+		printk("SCHEDULE next (atl/int)tds PTD header\n");
+		printk("DW0: 0x%08X\n", qha->td_info1);
+		printk("DW1: 0x%08X\n", qha->td_info2);
+		printk("DW2: 0x%08X\n", qha->td_info3);
+		printk("DW3: 0x%08X\n", qha->td_info4);
+#endif
+		
+		/*if this is SETUP/OUT token , then need to write into the buffer */
+		/*length should	be valid */
+		if (qtd->length && (length <= HC_ATL_PL_SIZE)){
+			switch (PTD_PID(qha->td_info2))	{
+			case OUT_PID:
+			case SETUP_PID:
+
+				isp1763_mem_write(hcd->dev,
+					(u32)	mem_addr->phy_addr, 0,
+					(void	*) qtd->hw_buf[0],
+					length, 0);
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+
+
+				break;
+			}
+		}
+
+		/*unskip the tds at this location */
+		switch (bufftype) {
+		case TD_PTD_BUFF_TYPE_ATL:
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+			lastmap	= td_ptd_map->ptd_bitmap;
+			/*try to reduce	the interrupts */
+			ormask |= td_ptd_map->ptd_bitmap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+			break;
+
+		case TD_PTD_BUFF_TYPE_INTL:
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+			lastmap	= td_ptd_map->ptd_bitmap;
+			intormask |= td_ptd_map->ptd_bitmap;
+			;
+			isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+					    intormask);
+			break;
+
+		case TD_PTD_BUFF_TYPE_ISTL:
+#ifdef CONFIG_ISO_SUPPORT
+			iso_dbg(ISO_DBG_INFO,
+				"Never Error: Should not come here\n");
+#else
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,
+					    skipmap);
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.isotdlastmap,
+				lasttd);
+#endif /* CONFIG_ISO_SUPPORT */
+			break;
+		}
+
+
+	}
+	/*if any new schedule, enable the atl buffer */
+
+	if (newschedule) {
+		switch (bufftype) {
+		case TD_PTD_BUFF_TYPE_ATL:
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | ATL_BUFFER);
+			/*i am comming here to only those tds that has to be scheduled */
+			/*so skip map must be in place */
+			if (skipmap & donemap) {
+				pehci_check
+					("must be both ones compliment of each other\n");
+				pehci_check
+					("problem, skipmap %x, donemap %x,\n",
+					 skipmap, donemap);
+
+			}
+			skipmap	&= ~donemap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+
+			break;
+		case TD_PTD_BUFF_TYPE_INTL:
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | INT_BUFFER);
+			skipmap	&= ~donemap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+					    skipmap);
+			break;
+		case TD_PTD_BUFF_TYPE_ISTL:
+#ifndef	CONFIG_ISO_SUPPORT
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | ISO_BUFFER);
+#endif
+			break;
+		}
+	}
+	spin_unlock(&hcd_data_lock);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+#endif
+
+
+
+static void
+pehci_hcd_qtd_schedule(phci_hcd	* hcd, struct ehci_qtd *qtd,
+		       struct ehci_qh *qh, td_ptd_map_t	* td_ptd_map)
+{
+	struct urb *urb;
+	urb_priv_t *urbpriv = 0;
+	u32 length=0;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	struct _isp1763_qha *qha, qhtemp;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	if (qtd->state & QTD_STATE_SCHEDULED) {
+		return;
+	}
+	/*redundant */
+	qha = &qhtemp;
+
+	/*if this td is	the last one */
+	if (qtd->state & QTD_STATE_LAST) {
+		/*no qtd anymore */
+		qh->hw_current = cpu_to_le32(0);
+
+		/*else update the hw_next of qh	to the next td */
+	} else {
+		qh->hw_current = qtd->hw_next;
+	}
+
+	urb = qtd->urb;
+	urbpriv	= (urb_priv_t *) urb->hcpriv;
+	urbpriv->timeout = 0;
+
+	/*NEW, now need	to get the memory for this transfer */
+	length = qtd->length;
+	mem_addr = &qtd->mem_addr;
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		err("Never Error: Cannot allocate memory for the current td,length %d\n", length);
+		return;
+	}
+
+	pehci_check("newqtd being scheduled, device: %d,map: %x\n",
+		    urb->dev->devnum, td_ptd_map->ptd_bitmap);
+
+	//udelay(100);
+
+	memset(qha, 0, sizeof(isp1763_qha));
+	/*convert qtd to qha */
+	phci_hcd_qha_from_qtd(hcd, qtd,	qtd->urb, (void	*) qha,
+			      td_ptd_map->ptd_ram_data_addr, qh
+			      /*td_ptd_map->datatoggle */ );
+
+	if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+		phci_hcd_qhint_schedule(hcd, qh, qtd, (isp1763_qhint *)	qha,
+					qtd->urb);
+	}
+
+
+	length = PTD_XFERRED_LENGTH(qha->td_info1 >> 3);
+	if (length > HC_ATL_PL_SIZE) {
+		err("Never Error: Bogus	length,length %d(max %d)\n",
+		qtd->length, HC_ATL_PL_SIZE);
+	}
+
+	/*write	qha into the header of the host	controller */
+	isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			  (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+	
+#if 0 //def PTD_DUMP_SCHEDULE
+		printk("SCHEDULE Next qtd\n");
+		printk("DW0: 0x%08X\n", qha->td_info1);
+		printk("DW1: 0x%08X\n", qha->td_info2);
+		printk("DW2: 0x%08X\n", qha->td_info3);
+		printk("DW3: 0x%08X\n", qha->td_info4);
+#endif
+	
+	/*if this is SETUP/OUT token , then need to write into the buffer */
+	/*length should	be valid */
+	if (qtd->length && (length <= HC_ATL_PL_SIZE)){
+		switch (PTD_PID(qha->td_info2))	{
+		case OUT_PID:
+		case SETUP_PID:
+
+			isp1763_mem_write(hcd->dev, (u32) mem_addr->phy_addr, 0,
+				(void	*) qtd->hw_buf[0], length, 0);
+
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+
+			break;
+		}
+	}
+	/*qtd is scheduled */
+	qtd->state &= ~QTD_STATE_NEW;
+	qtd->state |= QTD_STATE_SCHEDULED;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+}
+#ifdef USBNET 
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_delayed_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_urb_delayed_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map)
+#endif
+{
+	static u32 remove = 0;
+	static u32 qh_state = 0;
+
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+#ifdef USBNET 
+	struct isp1763_async_cleanup_urb *urb_st = 0;
+#endif
+
+
+
+	urb_priv->timeout = 0;
+
+	if((td_ptd_map->state == TD_PTD_REMOVE	) ||
+		  (urb_priv->state == DELETE_URB) ||
+		     !HCD_IS_RUNNING(hcd->state)){
+	remove=1;
+	}
+	qh_state=qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	/*remove the done tds */
+	spin_lock(&hcd_data_lock);
+	phci_hcd_urb_free_priv(hcd, urb_priv, qh);
+	spin_unlock(&hcd_data_lock);
+
+	urb_priv->timeout = 0;
+	kfree(urb_priv);
+	urb->hcpriv = 0;
+
+
+	/*if normal completion */
+	if (urb->status	== -EINPROGRESS) {
+		urb->status = 0;
+	}
+
+	if(remove)
+	if (list_empty(&qh->qtd_list)) {
+		phci_hcd_release_td_ptd_index(qh);
+	}
+	remove=0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,urb);
+#endif
+
+//if(qh_state!=QH_STATE_COMPLETING)
+{
+//	spin_unlock(&hcd->lock);
+	/* assume interrupt has been disabled and has acquired hcd->lock */
+	urb_st = (struct isp1763_async_cleanup_urb *)kmalloc(sizeof(struct isp1763_async_cleanup_urb), GFP_ATOMIC);
+	urb_st->urb = urb;
+	list_add_tail(&urb_st->urb_list, &(hcd->cleanup_urb.urb_list));
+
+//	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, INTR_ENABLE_MASK | HC_SOF_INT);
+	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, HC_MSOF_INT);
+//	spin_lock(&hcd->lock);
+}
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map)
+#endif
+{
+	static u32 remove = 0;
+	static u32 qh_state = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+	
+	if(urb_priv==NULL){
+	printk("***************urb_priv is NULL ************ %s: Entered\n",	__FUNCTION__);
+	goto exit;
+	}
+	pehci_check("complete the td , length: %d\n", td_ptd_map->qtd->length);
+	urb_priv->timeout = 0;
+
+	if((td_ptd_map->state == TD_PTD_REMOVE	) ||
+		  (urb_priv->state == DELETE_URB) ||
+		     !HCD_IS_RUNNING(hcd->state)){
+	remove=1;
+	}
+
+
+	qh_state=qh->qh_state;
+
+	qh->qh_state = QH_STATE_COMPLETING;
+	/*remove the done tds */
+	spin_lock(&hcd_data_lock);
+	phci_hcd_urb_free_priv(hcd, urb_priv, qh);
+	spin_unlock(&hcd_data_lock);
+
+	urb_priv->timeout = 0;
+	kfree(urb_priv);
+	urb->hcpriv = 0;
+
+
+	/*if normal completion */
+	if (urb->status	== -EINPROGRESS) {
+		urb->status = 0;
+	}
+
+	if(remove)
+	if (list_empty(&qh->qtd_list)) {
+		phci_hcd_release_td_ptd_index(qh);
+	}
+	remove=0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+	{
+		usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,urb);
+	}
+#endif
+	spin_unlock(&hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+	usb_hcd_giveback_urb(&hcd->usb_hcd, urb, urb->status);
+#endif
+	spin_lock(&hcd->lock);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	
+}
+
+/*update the error status of the td*/
+static void
+pehci_hcd_update_error_status(u32 ptdstatus, struct urb	*urb)
+{
+	/*if ptd status	is halted */
+	if (ptdstatus &	PTD_STATUS_HALTED) {
+		if (ptdstatus &	PTD_XACT_ERROR)	{
+			/*transaction error results due	to retry count goes to zero */
+			if (PTD_RETRY(ptdstatus)) {
+				/*halt the endpoint */
+				printk("transaction error , retries %d\n",
+					PTD_RETRY(ptdstatus));
+				urb->status = -EPIPE;
+			} else {
+				printk("transaction error , retries %d\n",
+					PTD_RETRY(ptdstatus));
+				/*protocol error */
+				urb->status = -EPROTO;
+			}
+		} else if (ptdstatus & PTD_BABBLE) {
+			printk("babble error, qha %x\n", ptdstatus);
+			/*babble error */
+			urb->status = -EOVERFLOW;
+		} else if (PTD_RETRY(ptdstatus)) {
+			printk("endpoint halted with retrie remaining %d\n",
+				PTD_RETRY(ptdstatus));
+			urb->status = -EPIPE;
+		} else {	/*unknown error, i will	report it as halted, as	i will never see xact error bit	set */
+			printk("protocol error, qha %x\n", ptdstatus);
+			urb->status = -EPIPE;
+		}
+
+		/*if halted need to recover */
+		if (urb->status	== -EPIPE) {
+		}
+	}
+}
+
+#ifdef CONFIG_ISO_SUPPORT	/* New code for	ISO support */
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *	Host controller	driver structure which contains	almost all data
+ *	needed by the host controller driver to	process	data and interact
+ *	with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This	is the ISOCHRONOUS Transfer handler, mainly responsible	for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an	ITD into a PTD,	which is the data
+ *    structure	that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer	status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory	used by	an ITDs	once it	is not needed anymore.
+ ************************************************************************/
+void 
+pehci_hcd_iso_sitd_schedule(phci_hcd *hcd,struct urb* urb,struct ehci_sitd* sitd){
+		td_ptd_map_t *td_ptd_map;
+		td_ptd_map_buff_t *ptd_map_buff;
+		struct _isp1763_isoptd *iso_ptd;
+		u32 ormask = 0, skip_map = 0,last_map=0,buff_stat=0;
+		struct isp1763_mem_addr *mem_addr;
+		ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+		
+		/* Get the PTD allocated for this SITD. */
+		td_ptd_map =
+				&ptd_map_buff->map_list[sitd->
+					sitd_index];
+		iso_ptd = &hcd->isotd;
+		
+		memset(iso_ptd, 0,	sizeof(struct _isp1763_isoptd));
+		/* Read buffer status register to check later if the ISO buffer is
+		filled or not */
+		buff_stat =
+			isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,buff_stat);
+
+		/* Read the contents of the ISO skipmap register */
+		skip_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+				skip_map);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_sitd_schedule]: Read skip map: 0x%08x\n",
+			(unsigned int) skip_map);
+
+		/* Read the contents of the ISO lastmap  register */
+		last_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdlastmap,
+			last_map);
+
+		/* Read the contents of the ISO ormask  register */
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or,
+			ormask);
+		
+		/* Create a PTD from an SITD */
+		phcd_iso_sitd_to_ptd(hcd, sitd, sitd->urb,
+				(void *) iso_ptd);	
+		/* Indicate that this SITD's PTD have been
+		filled up */
+		ptd_map_buff->pending_ptd_bitmap &=
+			~td_ptd_map->ptd_bitmap;		
+
+				/*
+				 * Place the newly initialized ISO PTD structure into
+				 the location allocated for this PTD in the ISO PTD
+				 memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd, PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+
+				/*
+ 				* Set this flag to avoid unlinking before
+ 				schedule at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction is
+				 OUT then  copy the  data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (sitd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &sitd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr-> phy_addr,
+							0, (u32*)
+							((sitd->hw_bufp[0])),
+							sitd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr,
+							0, (u32 *)
+							sitd->hw_bufp[0],
+							sitd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2))*/
+				}
+
+				/* if(sitd->length) */
+				/* If this is the last td, indicate to complete
+				the URB */
+				if (sitd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PTD in
+				 the skip map so that it will be processed on
+				 the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_sitd_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_sitd_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+				
+				isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,skip_map);
+		
+}
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *	Host controller	driver structure which contains	almost all data
+ *	needed by the host controller driver to	process	data and interact
+ *	with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This	is the ISOCHRONOUS Transfer handler, mainly responsible	for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an	ITD into a PTD,	which is the data
+ *    structure	that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer	status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory	used by	an ITDs	once it	is not needed anymore.
+ ************************************************************************/
+void
+pehci_hcd_iso_schedule(phci_hcd * hcd, struct urb *urb)
+{
+	struct list_head *sitd_itd_sched, *position;
+	struct ehci_itd *itd;
+	struct ehci_sitd *sitd;
+	td_ptd_map_t *td_ptd_map;
+	unsigned long last_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct _isp1763_isoptd *iso_ptd;
+	unsigned long buff_stat;
+	struct isp1763_mem_addr *mem_addr;
+	u32 ormask = 0, skip_map = 0;
+	u32 iNumofPkts;
+	unsigned int iNumofSlots = 0, mult = 0;
+	struct ehci_qh *qhead;
+
+	buff_stat = 0;
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_schedule]: Enter\n");
+	iso_ptd = &hcd->isotd;
+
+	last_map = 0;
+	/* Check if there are any ITDs scheduled  for processing */
+	if (hcd->periodic_sched == 0) {
+		return;
+	}
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		mult = usb_maxpacket(urb->dev, urb->pipe,
+				usb_pipeout(urb->pipe));
+		mult = 1 + ((mult >> 11) & 0x3);
+		iNumofSlots = NUMMICROFRAME / urb->interval;
+		/*number of PTDs need to schedule for this PTD */
+		iNumofPkts = (urb->number_of_packets / mult) / iNumofSlots;
+		if ((urb->number_of_packets / mult) % iNumofSlots != 0){
+			/*get remainder */
+			iNumofPkts += 1;
+		}
+	} else{
+		iNumofPkts = urb->number_of_packets;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead = urb->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (!qhead) {
+		iso_dbg(ISO_DBG_ENTRY,
+			"[pehci_hcd_iso_schedule]: Qhead==NULL\n");
+		return ;
+	}
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+
+	while (iNumofPkts > 0) {
+	/* Read buffer status register to check later if the ISO buffer is
+	filled or not */
+	buff_stat =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,buff_stat);
+
+		/* Read the contents of the ISO skipmap register */
+		skip_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+				skip_map);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_schedule]: Read skip map: 0x%08x\n",
+			(unsigned int) skip_map);
+
+		/* Read the contents of the ISO lastmap  register */
+		last_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdlastmap,
+			last_map);
+
+		/* Read the contents of the ISO ormask  register */
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or,
+			ormask);
+
+		/* Process ITDs linked to this frame, checking if there are any that needs to
+		be scheduled */
+		sitd_itd_sched = &qhead->periodic_list.sitd_itd_head;
+		if (list_empty(sitd_itd_sched)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[pehci_hcd_iso_schedule]: ISO schedule list's empty. Nothing to schedule.\n");
+			return;
+		}
+
+		list_for_each(position, sitd_itd_sched) {
+			if (qhead->periodic_list.high_speed == 0){
+				/* Get an SITD in the list for processing */
+				sitd = list_entry(position, struct ehci_sitd,
+					sitd_list);
+				iNumofPkts--;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: SITD Index:%d\n", sitd->sitd_index);
+				if(sitd->sitd_index==TD_PTD_INV_PTD_INDEX)
+					continue;
+				/* Get the PTD allocated for this SITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[sitd->
+					sitd_index];
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/* Create a PTD from an SITD */
+				phcd_iso_sitd_to_ptd(hcd, sitd, sitd->urb,
+					(void *) iso_ptd);
+
+				/* Indicate that this SITD's PTD have been
+				filled up */
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+
+				/*
+				 * Place the newly initialized ISO PTD structure into
+				 the location allocated for this PTD in the ISO PTD
+				 memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd, PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+
+				/*
+ 				* Set this flag to avoid unlinking before
+ 				schedule at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction is
+				 OUT then  copy the  data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (sitd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &sitd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr-> phy_addr,
+							0, (u32*)
+							((sitd->hw_bufp[0])),
+							sitd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr,
+							0, (u32 *)
+							sitd->hw_bufp[0],
+							sitd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2))*/
+				}
+
+				/* if(sitd->length) */
+				/* If this is the last td, indicate to complete
+				the URB */
+				if (sitd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PTD in
+				 the skip map so that it will be processed on
+				 the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+
+			} else {	/*HIGH SPEED */
+
+				/* Get an ITD in the list for processing */
+				itd = list_entry(position, struct ehci_itd,
+					itd_list);
+				iNumofPkts--;
+
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: ITD Index: %d\n",	itd->itd_index);
+				/* Get the PTD allocated for this ITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[itd->itd_index];
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/* Create a PTD from an ITD */
+				phcd_iso_itd_to_ptd(hcd, itd, itd->urb,
+					(void *) iso_ptd);
+
+				/* Indicate that this SITD's PTD have been
+				filled up */
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+
+				/*
+				 * Place the newly initialized ISO PTD
+				 structure into the location allocated
+				 * for this PTD in the ISO PTD memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+				/*
+				 * Set this flag to avoid unlinking before schedule
+				 * at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction
+				 is OUT then copy the data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (itd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &itd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr, 0,
+							(u32*)
+							((itd->hw_bufp[0])),
+							itd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr, 0,
+							(u32 *)itd->hw_bufp[0],
+							itd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+
+				
+				/* If this is the last td, indicate to
+				complete the URB */
+				if (itd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PT D
+				 in the skip map so that it will be processed
+				 on the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+			}
+		}		/* list_for_each(position, itd_sched) */
+		isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,skip_map);
+	}/*end of while (igNumOfPkts) */
+
+	iso_dbg(ISO_DBG_INFO,
+		"[pehci_hcd_iso_schedule]: ISO-Frame scheduling done\n");
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_schedule]: Exit\n");
+}
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *      Host controller driver structure which contains almost all data
+ *      needed by the host controller driver to process data and interact
+ *      with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This is the ISOCHRONOUS Transfer handler, mainly responsible for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an ITD into a PTD, which is the data
+ *    structure that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory used by an ITDs once it is not needed anymore.
+ ************************************************************************/
+
+int debugiso = 0;
+
+void
+pehci_hcd_iso_worker(phci_hcd * hcd)
+{
+	u32 donemap = 0, skipmap = 0; /*ormask = 0,  buff_stat = 0;*/
+	u32 pendingmap = 0;
+	u32 mask = 0x1, index = 0, donetoclear = 0;
+	u32 uFrIndex = 0;
+	unsigned char last_td = FALSE, iReject = 0;
+	struct isp1763_mem_addr *mem_addr;
+	struct _isp1763_isoptd *iso_ptd;
+	unsigned long length = 0, uframe_cnt, usof_stat;
+	struct ehci_qh *qhead;
+	struct ehci_itd *itd, *current_itd;
+	struct ehci_sitd *sitd=0, *current_sitd=0;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct list_head *sitd_itd_remove, *position;// *lst_temp;	
+	struct urb *urb;
+	u8 i = 0;
+	unsigned long startAdd = 0;
+	int ret = 0;
+
+
+	iso_ptd = &hcd->isotd;
+
+	/* Check if there are any ITDs scheduled  for processing */
+	if (hcd->periodic_sched == 0) {
+		goto exit;
+	}
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+
+	/*read the done map for interrupt transfers */
+	donemap = isp1763_reg_read16(hcd->dev, hcd->regs.isotddonemap, donemap);
+
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_worker]: Enter %x \n", donemap);
+	if (!donemap) {		/*there isnt any completed PTD */
+		goto exit;
+	}
+	donetoclear = donemap;
+	uFrIndex = 0;
+	while (donetoclear) {
+		mask = 0x1 << uFrIndex;
+		index = uFrIndex;
+		uFrIndex++;
+		if (!(donetoclear & mask))
+			continue;
+		donetoclear &= ~mask;
+		iso_dbg(ISO_DBG_DATA, "[pehci_hcd_iso_worker]: uFrIndex = %d\n", index);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_worker]:donetoclear = 0x%x mask = 0x%x\n",
+			donetoclear, mask);
+
+
+		if (ptd_map_buff->map_list[index].sitd) {
+			urb = ptd_map_buff->map_list[index].sitd->urb;
+			if (!urb) {
+				printk("ERROR : URB is NULL \n");
+				continue;
+			}
+			sitd = ptd_map_buff->map_list[index].sitd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			qhead=urb->hcpriv;
+#else
+			qhead = urb->ep->hcpriv;
+#endif
+			if (!qhead) {
+				printk("ERROR : Qhead is NULL \n");
+				continue;
+			}
+
+			sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+		} else if (ptd_map_buff->map_list[index].itd) {
+			urb = ptd_map_buff->map_list[index].itd->urb;
+			if (!urb) {
+				printk("ERROR : URB is NULL \n");
+				continue;
+			}
+			itd = ptd_map_buff->map_list[index].itd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			qhead=urb->hcpriv;
+#else
+			qhead = urb->ep->hcpriv;
+#endif
+			if (!qhead) {
+				printk("ERROR : Qhead is NULL \n");
+				continue;
+			}
+
+			sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+
+		} else {
+			printk("ERROR : NO sitd in that PTD location : \n");
+			continue;
+		}
+		/* Process ITDs linked to this frame, checking for completed ITDs */
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_worker]: Removal Frame number: %d\n",
+			(int) index);
+		if (list_empty(sitd_itd_remove)) {
+			continue;
+		}
+
+		if (urb) {
+			last_td = FALSE;
+			if (qhead->periodic_list.high_speed == 0)/*FULL SPEED*/
+			{
+
+				/* Get the PTD that was allocated for this
+				particular SITD*/
+				td_ptd_map =
+					&ptd_map_buff->map_list[sitd->
+								sitd_index];
+
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: PTD is done,%d\n",index);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: SITD Index: %d\n",sitd->sitd_index);
+				urb = sitd->urb;
+
+				/*
+				 * Get the base address of the memory allocated
+				 in the PAYLOAD region for this SITD
+				 */
+				mem_addr = &sitd->mem_addr;
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/*
+				 * Read this ptd from the ram address,
+				 address is in the td_ptd_map->ptd_header_addr
+				 */
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr,
+					0, (u32 *) iso_ptd,
+					PHCI_QHA_LENGTH, 0);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD0 = 0x%08x\n", iso_ptd->td_info1);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD1 = 0x%08x\n", iso_ptd->td_info2);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD2 = 0x%08x\n", iso_ptd->td_info3);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD3 = 0x%08x\n", iso_ptd->td_info4);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD4 = 0x%08x\n", iso_ptd->td_info5);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD5 = 0x%08x\n", iso_ptd->td_info6);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD6 = 0x%08x\n", iso_ptd->td_info7);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD7 = 0x%08x\n", iso_ptd->td_info8);
+
+				/* Go over the status of each of the 8 Micro Frames */
+				for (uframe_cnt = 0; uframe_cnt < 8;
+					uframe_cnt++) {
+					/*
+					 * We go over the status one at a time. The status bits and their
+					 * equivalent status are:
+					 * Bit 0 - Transaction Error (IN and OUT)
+					 * Bit 1 - Babble (IN token only)
+					 * Bit 2 - Underrun (OUT token only)
+					 */
+					usof_stat =
+						iso_ptd->td_info5 >> (8 +
+						(uframe_cnt * 3));
+
+					switch (usof_stat & 0x7) {
+					case INT_UNDERRUN:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Buffer underrun\n");
+							urb->error_count++;
+						break;
+					case INT_EXACT:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Transaction error\n");
+							printk("[pehci_hcd_iso_worker Error]: Transaction error\n");
+							urb->error_count++;
+						break;
+					case INT_BABBLE:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Babble error\n");
+							printk("[pehci_hcd_iso_worker Error]: Babble error\n");
+						urb->iso_frame_desc[sitd->sitd_index].status
+							= -EOVERFLOW;
+						urb->error_count++;
+						break;
+					}	/* switch(usof_stat & 0x7) */
+				}	/* end of for( ulMicroFrmCnt = 0; ulMicroFrmCnt < 8; ulMicroFrmCnt++) */
+
+				/*
+				 * Get the number of bytes transferred. This indicates the number of
+				 * bytes sent or received for this transaction.
+				 */
+				if (urb->dev->speed != USB_SPEED_HIGH) {
+					/* Length is 1K for full/low speed device */
+					length = PTD_XFERRED_NONHSLENGTH
+						(iso_ptd->td_info4);
+				} else {
+					/* Length is 32K for high speed device */
+					length = PTD_XFERRED_LENGTH(iso_ptd->
+						td_info4);
+				}
+
+				/* Halted, need to finish all the transfer on this endpoint */
+				if (iso_ptd->td_info4 & PTD_STATUS_HALTED) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error] PTD Halted\n");
+						printk("[pehci_hcd_iso_worker Error] PTD Halted\n");
+					/*
+					 * When there is an error, do not process the other PTDs.
+					 * Stop at the PTD with the error and remove all other PTDs.
+					 */
+					td_ptd_map->lasttd = 1;
+
+					/*
+					 * In case of halt, next transfer will start with toggle zero,
+					 * USB specs, 5.8.5
+					 */
+					td_ptd_map->datatoggle = 0;
+				}
+
+				/* if(iso_ptd->td_info4 & PTD_STATUS_HALTED) */
+				/* Update the actual length of the transfer from the data we got earlier */
+				urb->iso_frame_desc[sitd->index].actual_length =
+					length;
+
+				/* If the PTD have been executed properly the V bit should be cleared */
+				if (iso_ptd->td_info1 & QHA_VALID) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+						printk("[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+					urb->iso_frame_desc[sitd->index].
+						status = -ENOSPC;
+				} else {
+					urb->iso_frame_desc[sitd->index].
+						status = 0;
+				}
+
+				/* Check if this is the last SITD either due to some error or normal completion */
+				if ((td_ptd_map->lasttd)
+					|| (sitd->hw_next == EHCI_LIST_END)) {
+					last_td = TRUE;
+				}
+
+				/* Copy data to/from */
+				if (length && (length <= MAX_PTD_BUFFER_SIZE)) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case IN_PID:
+						/*
+						 * Get the data from the PAYLOAD area and place it into
+						 * the buffer provided by the requestor.
+						 */
+
+						isp1763_mem_read(hcd->dev,
+							(unsigned long)mem_addr->
+							phy_addr, 0,(u32 *)
+							sitd->hw_bufp[0],
+							length, 0);
+
+					case OUT_PID:
+						/*
+						 * urb->actual length was initialized to zero, so for the first
+						 * uFrame having it incremented immediately is not a problem.
+						 */
+						urb->actual_length += length;
+						break;
+					}/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+				/* if(length && (length <= MAX_PTD_BUFFER_SIZE)) */
+//				removesitd:
+				/*read skip-map */
+				skipmap =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.isotdskipmap,
+						skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] : read skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+				if (last_td == TRUE) {
+					/* Start removing the ITDs in the list */
+					while (1) {
+						/*
+						 * This indicates that we are processing the tail PTD.
+						 * Perform cleanup procedure on this last PTD
+						 */
+						if (sitd->hw_next == EHCI_LIST_END) {
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[sitd->
+								sitd_index];
+
+							/*
+							 * Free up our allocation in the PAYLOAD area so that others can use
+							 * it.
+							 */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free
+								(&sitd->
+								mem_addr);
+#endif
+							/* Remove this SITD entry in the SITD list */
+							list_del(&sitd->
+								sitd_list);
+
+							/* Free up the memory allocated for the SITD structure */
+							qha_free(qha_cache,
+								sitd);
+
+							/* Indicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+
+							/* All ITDs in this list have been successfully removed. */
+							break;
+						} else {
+						/*
+						* This indicates that we stopped due to an error on a PTD that is
+						* not the last in the list. We need to free up this PTD as well as
+						* the PTDs after it.
+						*/
+						/*
+						 * Put the current SITD error onto this variable.
+						 * We will be unlinking this from the list and free up its
+						 * resources later.
+						 */
+							current_sitd = sitd;
+
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[sitd->
+								sitd_index];
+
+							/*
+							 * Get the next SITD, and place it to the sitd variable.
+							 * In a way we are moving forward in the SITD list.
+							 */
+							sitd = (struct ehci_sitd
+								*)
+								(current_sitd->
+								hw_next);
+							/* Free up the current SITD's resources */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free
+								(&current_sitd->
+								 mem_addr);
+#endif
+							/* Remove this SITD entry in the SITD list */
+							list_del(&current_sitd->
+								sitd_list);
+
+							/* Free up the memory allocated for the SITD structure */
+							qha_free(qha_cache,
+								current_sitd);
+
+							/* Inidicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Sine it is done, skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+							/*
+							 * Start all over again until it gets to the tail of the
+							 * list of PTDs/ITDs
+							 */
+							continue;
+						}	/* else of if(sitd->hw_next == EHCI_LIST_END) */
+
+						/* It should never get here, but I put this as a precaution */
+						break;
+					}	/*end of while(1) */
+
+					/* Check if there were ITDs that were not processed due to the error */
+					if (urb->status == -EINPROGRESS) {
+						if ((urb->actual_length !=
+							urb->transfer_buffer_length)
+							&& (urb->transfer_flags &
+							URB_SHORT_NOT_OK)) {
+							iso_dbg(ISO_DBG_ERR,
+								"[pehci_hcd_iso_worker Error]: Short Packet\n");
+							urb->status =
+								-EREMOTEIO;
+						} else {
+							urb->status = 0;
+						}
+					}
+
+					urb->hcpriv = 0;
+					iso_dbg(ISO_DBG_DATA,
+						"[%s] : remain skipmap =0x%x\n",
+						__FUNCTION__, skipmap);
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+					/* We need to unlock this here, since this was locked when we are called
+					 * from the interrupt handler */
+					spin_unlock(&hcd->lock);
+					/* Perform URB cleanup */
+					iso_dbg(ISO_DBG_INFO,
+						"[pehci_hcd_iso_worker] Complete a URB\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,
+						urb);
+#endif
+					hcd->periodic_more_urb = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						qhead=urb->hcpriv;
+					if (!list_empty(&qhead->ep->urb_list))
+#else
+					if (!list_empty(&urb->ep->urb_list))
+#endif
+						hcd->periodic_more_urb = 1;
+					
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb, urb->status);
+#endif
+
+					spin_lock(&hcd->lock);
+					continue;
+				}
+
+				/* if( last_td == TRUE ) */
+				/*
+				 * If the last_td is not set then we do not need to check for errors and directly
+				 * proceed with the cleaning sequence.
+				 */
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: last_td is not set\n");
+				/*update skipmap */
+				skipmap |= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"%s : remain skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+
+				/* Decrement the count of active PTDs */
+				hcd->periodic_sched--;
+				/*schedule next PTD for this URB */
+				if(qhead->actualptds<qhead->totalptds)
+				{
+					sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+					/* find sitd to schedule */
+					list_for_each(position, sitd_itd_remove) {
+						
+						if (qhead->periodic_list.high_speed == 0){
+						/* Get an SITD in the list for processing */
+							current_sitd= list_entry(position, struct ehci_sitd,
+									sitd_list);		
+							if(current_sitd->sitd_index==TD_PTD_INV_PTD_INDEX)
+								break;
+						}	
+					}
+				      if(current_sitd->sitd_index==TD_PTD_INV_PTD_INDEX){
+					  	qhead->actualptds++;
+					/*allocate memory and PTD index */
+						memcpy(&current_sitd->mem_addr,&sitd->mem_addr,sizeof(struct isp1763_mem_addr));
+//				printk("current %x\n",sitd->sitd_index);
+						current_sitd->sitd_index=sitd->sitd_index;
+					/*schedule PTD */
+						td_ptd_map->sitd = current_sitd;
+						hcd->periodic_sched++;
+						pehci_hcd_iso_sitd_schedule(hcd, urb,current_sitd);
+				      }
+
+				/* Remove this SITD from the list of active ITDs */
+				list_del(&sitd->sitd_list);
+
+				/* Free up the memory we allocated for the SITD structure */
+				qha_free(qha_cache, sitd);
+
+					
+				}else{
+#ifndef COMMON_MEMORY
+				phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+				/* Remove this SITD from the list of active ITDs */
+				list_del(&sitd->sitd_list);
+
+				/* Free up the memory we allocated for the SITD structure */
+				qha_free(qha_cache, sitd);
+
+				/*
+				 * Clear the bit associated with this PTD from the grouptdmap and
+				 * make this PTD available for other transfers
+				 */
+				td_ptd_map->state = TD_PTD_NEW;
+				td_ptd_map->sitd = NULL;
+				td_ptd_map->itd = NULL;
+
+				}		
+
+				
+				
+			}	else {	/*HIGH SPEED */
+
+				/* Get an ITD in the list for processing */
+				itd = ptd_map_buff->map_list[index].itd;
+
+				/* Get the PTD that was allocated for this particular ITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[itd->itd_index];
+
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: PTD is done , %d\n",
+					index);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: ITD Index: %d\n",
+					itd->itd_index);
+
+				urb = itd->urb;
+
+				/*
+				 * Get the base address of the memory allocated in the
+				 * PAYLOAD region for this ITD
+				 */
+				mem_addr = &itd->mem_addr;
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/*
+				 * Read this ptd from the ram address,address is in the
+				 * td_ptd_map->ptd_header_addr
+				 */
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr,
+					0, (u32 *) iso_ptd,
+					PHCI_QHA_LENGTH, 0);
+
+				/* 
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD0 =
+					0x%08x\n", iso_ptd->td_info1);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD1 =
+					0x%08x\n", iso_ptd->td_info2);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD2 =
+					0x%08x\n", iso_ptd->td_info3);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD3 =
+					0x%08x\n", iso_ptd->td_info4);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD4 =
+					0x%08x\n",iso_ptd->td_info5);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD5 =
+					0x%08x\n", iso_ptd->td_info6);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD6 =
+					0x%08x\n", iso_ptd->td_info7);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD7 =
+					0x%08x\n", iso_ptd->td_info8);
+				*/
+
+
+				/* If the PTD have been executed properly,
+				the V bit should be cleared */
+				if (iso_ptd->td_info1 & QHA_VALID) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+					for(i = 0; i<itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index
+							+ i].status = -ENOSPC;
+					}
+				} else {
+					for (i = 0; i<itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index
+							+i].status = 0;
+					}
+				}
+
+				/* Go over the status of each of the 8 Micro Frames */
+				for (uframe_cnt = 0; (uframe_cnt < 8)
+					&& (uframe_cnt < itd->num_of_pkts);
+					uframe_cnt++) {
+					/*
+					 * We go over the status one at a time. The status bits and their
+					 * equivalent status are:
+					 * Bit 0 - Transaction Error (IN and OUT)
+					 * Bit 1 - Babble (IN token only)
+					 * Bit 2 - Underrun (OUT token only)
+					 */
+					usof_stat =
+						iso_ptd->td_info5 >> (8 +
+						(uframe_cnt * 3));
+
+					switch (usof_stat & 0x7) {
+					case INT_UNDERRUN:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Buffer underrun\n");
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+						status = -ECOMM;
+						urb->error_count++;
+						break;
+					case INT_EXACT:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: %p Transaction error\n",
+							urb);
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+							status = -EPROTO;
+						urb->error_count++;
+						debugiso = 25;
+						break;
+					case INT_BABBLE:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Babble error\n");
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+							status = -EOVERFLOW;
+						urb->error_count++;
+						break;
+					}/* switch(usof_stat & 0x7) */
+				}/* end of for( ulMicroFrmCnt = 0; ulMicroFrmCnt < 8; ulMicroFrmCnt++) */
+
+				/*
+				 * Get the number of bytes transferred. This indicates the number of
+				 * bytes sent or received for this transaction.
+				 */
+
+				/* Length is 32K for high speed device */
+				length = PTD_XFERRED_LENGTH(iso_ptd->td_info4);
+
+				/* Halted, need to finish all the transfer on this endpoint */
+				if (iso_ptd->td_info4 & PTD_STATUS_HALTED) {
+
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error] PTD Halted\n");
+					printk("[pehci_hcd_iso_worker Error] PTD Halted===============\n");
+					/*
+					 * When there is an error, do not process the other PTDs.
+					 * Stop at the PTD with the error and remove all other PTDs.
+					 */
+					td_ptd_map->lasttd = 1;
+
+					/*
+					 * In case of halt, next transfer will start with toggle zero,
+					 * USB specs, 5.8.5
+					 */
+					td_ptd_map->datatoggle = 0;
+				}
+				/* if(iso_ptd->td_info4 & PTD_STATUS_HALTED) */
+				/* Update the actual length of the transfer from the data we got earlier */
+				if (PTD_PID(iso_ptd->td_info2) == OUT_PID) {
+					for (i = 0; i < itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index +
+						i].actual_length =(unsigned int)
+						length / itd->num_of_pkts;
+					}
+				} else{
+					iso_dbg(ISO_DBG_DATA,
+						"itd->num_of_pkts = %d, itd->ssplit = %x\n",
+						itd->num_of_pkts, itd->ssplit);
+					urb->iso_frame_desc[itd->index +
+						0].actual_length =
+						iso_ptd->td_info6 & 0x00000FFF;
+					iso_dbg(ISO_DBG_DATA,
+						"actual length[0] = %d\n",
+						urb->iso_frame_desc[itd->index +0].
+						actual_length);
+
+					if((itd->num_of_pkts > 1)
+						&& ((itd->ssplit & 0x2) == 0x2)
+						&& (urb->iso_frame_desc[itd->index +
+						1].status ==0)) {
+						
+						urb->iso_frame_desc[itd->index +1].
+							actual_length =	(iso_ptd->
+							td_info6 & 0x00FFF000)>> 12;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[1] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index + 1].
+							actual_length);
+					}else{
+						urb->iso_frame_desc[itd->index +1].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 2)
+						&& ((itd->ssplit & 0x4) == 0x4)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						2].status ==0)) {
+						
+						urb->iso_frame_desc[itd->index +
+							2].actual_length =
+							((iso_ptd->td_info6 &
+							0xFF000000 )>> 24)
+							| ((iso_ptd->td_info7
+							& 0x0000000F)<< 8);
+						
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[2] = %d\n",
+							urb->iso_frame_desc[itd->
+							index + 2].actual_length);
+					} else{
+						urb->iso_frame_desc[itd->index +2].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 3)
+						&& ((itd->ssplit & 0x8) == 0x8)
+						&& (urb->iso_frame_desc[itd->index +
+						3].status == 0)) {
+
+						urb->iso_frame_desc[itd->index + 3].
+							actual_length =(iso_ptd->
+							td_info7 & 0x0000FFF0)>> 4;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[3] = %d\n",
+							urb->iso_frame_desc[itd->
+							index + 3].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +3].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 4)
+						&& ((itd->ssplit & 0x10) == 0x10)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						4].status ==0)) {
+
+						urb->iso_frame_desc[itd->index +
+							4].actual_length =
+							(iso_ptd->
+							td_info7 & 0x0FFF0000) >> 16;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[4] = %d\n",
+							urb->iso_frame_desc[itd->index +
+							4].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							4].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 5)
+						&& ((itd->ssplit & 0x20) == 0x20)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						5].status ==
+						0)) {
+
+						urb->iso_frame_desc[itd->index +
+							5].actual_length =
+							((iso_ptd->
+							td_info7 & 0xF0000000) >> 28) | 
+							((iso_ptd->td_info8 &
+							0x000000FF)
+							<< 4);
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[5] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							5].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							5].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 6)
+						&& ((itd->ssplit & 0x40) == 0x40)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						6].status ==0)) {
+
+						urb->iso_frame_desc[itd->index +
+							6].actual_length =
+							(iso_ptd->
+							td_info8 & 0x000FFF00)
+							>> 8;
+						
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[6] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							6].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							6].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 7)
+						&& ((itd->ssplit & 0x80) == 0x80)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						7].status ==
+						0)) {
+
+						urb->iso_frame_desc[itd->index +
+							7].actual_length =
+							(iso_ptd->
+							td_info8 & 0xFFF00000) >> 20;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[7] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							7].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							7].actual_length = 0;
+					}
+				}
+				/* Check if this is the last ITD either due to some error or normal completion */
+				if ((td_ptd_map->lasttd)
+					|| (itd->hw_next == EHCI_LIST_END)) {
+
+					last_td = TRUE;
+
+				}
+
+				/* Copy data to/from */
+				if (length && (length <= MAX_PTD_BUFFER_SIZE)) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case IN_PID:
+						/*
+						 * Get the data from the PAYLOAD area and place it into
+						 * the buffer provided by the requestor.
+						 */
+						/*for first packet*/
+						startAdd = mem_addr->phy_addr;
+						iso_dbg(ISO_DBG_DATA,
+							"start add = %ld hw_bufp[0] = 0x%08x length = %d\n",
+							startAdd,
+							itd->hw_bufp[0],
+							urb->
+							iso_frame_desc[itd->
+							index].actual_length);
+						if (urb->
+							iso_frame_desc[itd->index].
+							status == 0) {
+
+							if (itd->hw_bufp[0] ==0) {
+								dma_addr_t
+									buff_dma;
+
+								buff_dma =
+									(u32) ((unsigned char *) urb->transfer_buffer +
+									urb->iso_frame_desc[itd->index].offset);
+								itd->buf_dma =
+									buff_dma;
+								itd->hw_bufp[0]
+									=
+									buff_dma;
+							}
+							if (itd->hw_bufp[0] !=0) {
+
+								ret = isp1763_mem_read(hcd->dev, (unsigned long)
+									startAdd,
+									0,(u32*)itd->
+									hw_bufp[0],
+									urb->
+									iso_frame_desc
+									[itd->
+									index].
+									actual_length,
+									0);
+
+							} else {
+								printk("isp1763_mem_read data payload fail\n");
+								printk("start add = %ld hw_bufp[0] = 0x%08x length = %d\n",
+									startAdd, itd->hw_bufp[0],
+									urb->iso_frame_desc[itd->index].actual_length);
+								urb->iso_frame_desc[itd->index].status = -EPROTO;
+								urb->error_count++;
+							}
+						}
+
+
+						for (i = 1;
+							i < itd->num_of_pkts;
+							i++) {
+							startAdd +=
+								(unsigned
+								long) (urb->
+								iso_frame_desc
+								[itd->
+								index +
+								i - 1].
+								actual_length);
+
+							iso_dbg(ISO_DBG_DATA,
+								"start add = %ld hw_bufp[%d] = 0x%08x length = %d\n",
+								startAdd, i,
+								itd->hw_bufp[i],
+								urb->
+								iso_frame_desc
+								[itd->index +
+								i].
+								actual_length);
+							if (urb->
+								iso_frame_desc[itd->
+								index + i].
+								status == 0) {
+
+								isp1763_mem_read
+									(hcd->dev,
+									startAdd,
+									0,(u32*)
+									itd->
+									hw_bufp
+									[i],urb->
+									iso_frame_desc
+									[itd->
+									index + i].
+									actual_length,
+									0);
+
+								if (ret == -EINVAL){
+									printk("isp1763_mem_read data payload fail %d\n", i);
+								}
+							}
+						}
+
+					case OUT_PID:
+						/*
+						 * urb->actual length was initialized to zero, so for the first
+						 * uFrame having it incremented immediately is not a problem.
+						 */
+						urb->actual_length += length;
+						break;
+					}	/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+
+				/* if(length && (length <= MAX_PTD_BUFFER_SIZE)) */
+//				removeitd:
+				/*read skip-map */
+				skipmap =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.isotdskipmap,
+						skipmap);
+
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] : read skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+				if (last_td == TRUE) {
+					/* Start removing the ITDs in the list */
+					while (1) {
+						/*
+						 * This indicates that we are processing the tail PTD.
+						 * Perform cleanup procedure on this last PTD
+						 */
+						if (itd->hw_next ==
+							EHCI_LIST_END) {
+							td_ptd_map =
+							&ptd_map_buff->
+							map_list[itd->
+							itd_index];
+
+							/*
+							 * Free up our allocation in the PAYLOAD area so that others can use
+							 * it.
+							 */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free(&itd->
+								mem_addr);
+#endif
+
+							/* Remove this ITD entry in the ITD list */
+							list_del(&itd->
+								itd_list);
+
+							/* Free up the memory allocated for the ITD structure */
+							qha_free(qha_cache,
+								itd);
+
+							/* Indicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+
+							/* All ITDs in this list have been successfully removed. */
+							break;
+						}
+						/* if(itd->hw_next == EHCI_LIST_END) */
+						/*
+						 * This indicates that we stopped due to an error on a PTD that is
+						 * not the last in the list. We need to free up this PTD as well as
+						 * the PTDs after it.
+						 */
+						else {
+							/*
+							 * Put the current ITD error onto this variable.
+							 * We will be unlinking this from the list and free up its
+							 * resources later.
+							 */
+							current_itd = itd;
+
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[itd->
+								itd_index];
+
+							/*
+							 * Get the next ITD, and place it to the itd variable.
+							 * In a way we are moving forward in the ITD list.
+							 */
+							itd = (struct ehci_itd
+								*) (current_itd->
+								hw_next);
+#ifndef COMMON_MEMORY
+							/* Free up the current ITD's resources */
+							phci_hcd_mem_free
+								(&current_itd->
+								mem_addr);
+#endif
+
+							/* Remove this ITD entry in the ITD list */
+							list_del(&current_itd->
+								itd_list);
+
+							/* Free up the memory allocated for the ITD structure */
+							qha_free(qha_cache,
+								current_itd);
+
+							/* Inidicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Sine it is done, skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+							/*
+							 * Start all over again until it gets to the tail of the
+							 * list of PTDs/ITDs
+							 */
+							continue;
+						}/* else of if(itd->hw_next == EHCI_LIST_END) */
+						/* It should never get here, but I put this as a precaution */
+						break;
+					}	/*end of while(1) */
+					/* Check if there were ITDs that were not processed due to the error */
+					if (urb->status == -EINPROGRESS) {
+						if ((urb->actual_length !=
+							urb->transfer_buffer_length)
+							&& (urb->
+							transfer_flags &
+							URB_SHORT_NOT_OK)) {
+
+							iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Short Packet\n");
+
+							urb->status =
+								-EREMOTEIO;
+						} else {
+							urb->status = 0;
+						}
+					}
+
+					urb->hcpriv = 0;
+					iso_dbg(ISO_DBG_DATA,
+						"[%s] : remain skipmap =0x%x\n",
+						__FUNCTION__, skipmap);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+//					if (urb->reject.counter) {
+					if (unlikely(atomic_read(&urb->reject))) {// kernel reference code hcd.c
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#else
+					if (unlikely(urb->reject)) {
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#endif
+
+/*
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)
+
+					if (urb->reject.counter) {
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#else
+				        if (unlikely(urb->reject)) {					       
+				
+					
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#endif
+*/
+
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+					/* We need to unlock this here, since this was locked when we are called */
+					/* from the interrupt handler */
+					spin_unlock(&hcd->lock);
+					/* Perform URB cleanup */
+					iso_dbg(ISO_DBG_INFO,
+						"[pehci_hcd_iso_worker] Complete a URB\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd, urb);
+#endif
+					hcd->periodic_more_urb = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						qhead=urb->hcpriv;
+					if (!list_empty(&qhead->ep->urb_list)){
+
+#else
+					if (!list_empty(&urb->ep->urb_list)){
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						if (urb->hcpriv== periodic_ep[0]){
+#else
+						if (urb->ep == periodic_ep[0]){
+#endif
+							hcd->periodic_more_urb =
+							1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						} else if (urb->hcpriv==
+							 periodic_ep[1]){
+#else
+						} else if (urb->ep ==
+							 periodic_ep[1]){
+#endif							 
+							hcd->periodic_more_urb =
+							2;
+						} else {
+							hcd->periodic_more_urb =
+							0;
+						}
+
+
+					}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb, 
+										urb->status);
+#endif
+
+					spin_lock(&hcd->lock);
+					continue;
+				}
+				/* if( last_td == TRUE ) */
+				/*
+				 * If the last_td is not set then we do not need to check for errors and directly
+				 * proceed with the cleaning sequence.
+				 */
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: last_td is not set\n");
+				/*update skipmap */
+				skipmap |= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"%s : remain skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+
+				/* Decrement the count of active PTDs */
+				hcd->periodic_sched--;
+#ifndef COMMON_MEMORY
+				/* Free up the memory we allocated in the PAYLOAD area */
+				phci_hcd_mem_free(&itd->mem_addr);
+#endif
+				/* Remove this ITD from the list of active ITDs */
+				list_del(&itd->itd_list);
+
+				/* Free up the memory we allocated for the ITD structure */
+				qha_free(qha_cache, itd);
+				/*
+				 * Clear the bit associated with this PTD from the grouptdmap and
+				 * make this PTD available for other transfers
+				 */
+				td_ptd_map->state = TD_PTD_NEW;
+				td_ptd_map->sitd = NULL;
+				td_ptd_map->itd = NULL;
+			}	/*end of HIGH SPEED */
+		}		/* end of list_for_each_safe(position, lst_temp, itd_remove) */
+		iso_dbg(ISO_DBG_INFO,
+			"[pehci_hcd_iso_worker]: ISO-Frame removal done\n");
+
+
+	}			/* while donetoclear */
+
+
+	if (iReject) {
+		spin_unlock(&hcd->lock);
+		if (hcd->periodic_more_urb) {
+
+			if(periodic_ep[hcd->periodic_more_urb])
+			while (&periodic_ep[hcd->periodic_more_urb - 1]->
+				urb_list) {
+
+				urb = container_of(periodic_ep
+					[hcd->periodic_more_urb -
+					1]->urb_list.next,
+					struct urb, urb_list);
+				
+				if (urb) {
+					urb->status = -ENOENT;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->
+					usb_hcd,urb);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+					usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+					usb_hcd_giveback_urb(&hcd->usb_hcd, urb,
+						urb->status);
+#endif
+				}
+			}
+		}
+
+		spin_lock(&hcd->lock);
+	}
+
+	/* When there is no more PTDs queued for scheduling or removal
+	 * clear the buffer status to indicate there are no more PTDs for
+	 * processing and set the skip map to 1 to indicate that the first
+	 * PTD is also the last PTD.
+	 */
+
+	if (hcd->periodic_more_urb) {
+		int status = 0;
+		iso_dbg(ISO_DBG_INFO,
+			"[phcd_iso_handler]: No more PTDs queued\n");
+		hcd->periodic_sched = 0;
+		phcd_store_urb_pending(hcd, hcd->periodic_more_urb, NULL,
+				       &status);
+		hcd->periodic_more_urb = 0;
+	}
+exit:
+	iso_dbg(ISO_DBG_ENTRY, "-- %s: Exit\n", __FUNCTION__);
+}				/* end of pehci_hcd_iso_worker */
+
+#endif /* CONFIG_ISO_SUPPORT */
+
+/*interrupt transfer handler*/
+/********************************************************
+  1. read done map
+  2. read the ptd to see any errors
+  3. copy the payload to and from
+  4. update ehci td
+  5. make new ptd if transfer there and earlier done
+  6. schedule
+ *********************************************************/
+static void
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+pehci_hcd_intl_worker(phci_hcd * hcd, struct pt_regs *regs)
+#else
+pehci_hcd_intl_worker(phci_hcd * hcd)
+#endif
+{
+	int i =	0;
+	u16 donemap = 0, donetoclear;
+	u16 mask = 0x1,	index =	0;
+	u16 pendingmap = 0;
+	u16 location = 0;
+	u32 length = 0;
+	u16 skipmap = 0;
+	u16 ormask = 0;
+	u32 usofstatus = 0;
+	struct urb *urb;
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh = 0;
+
+	struct _isp1763_qhint *qhint = &hcd->qhint;
+
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u16 dontschedule = 0;
+
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+	/*read the done	map for	interrupt transfers */
+	donetoclear = donemap =
+		isp1763_reg_read16(hcd->dev, hcd->regs.inttddonemap, donemap);
+	if (donemap) {
+		/*skip done tds	*/
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+		donemap	|= pendingmap;
+	}
+	/*if sof interrupt is enabled */
+#ifdef MSEC_INT_BASED
+	else {
+		/*if there is something	pending	, put this transfer in */
+		if (ptd_map_buff->pending_ptd_bitmap) {
+			pehci_hcd_schedule_pending_ptds(hcd, pendingmap, (u8)
+				TD_PTD_BUFF_TYPE_INTL,
+				1);
+		}
+		//return 0;
+		goto exit;
+	}
+#else
+	else {
+	goto exit;	
+	//return 0;
+	}
+
+#endif
+
+
+	ormask = isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+		ormask);
+	/*process all the endpoints first those	are done */
+	donetoclear = donemap;
+	while (donetoclear) {
+		/*index	is the number of endpoints open	currently */
+		index =	donetoclear & mask;
+		donetoclear &= ~mask;
+		mask <<= 1;
+		/*what if we are in the	middle of schedule
+		   where nothing is done */
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		/*read our td_ptd_map */
+		td_ptd_map = &ptd_map_buff->map_list[location];
+
+		/*if this one is already in the	removal	*/
+		if (td_ptd_map->state == TD_PTD_REMOVE ||
+			td_ptd_map->state == TD_PTD_NEW) {
+			pehci_check("interrupt td is being removed\n");
+			/*this will be handled by urb_remove */
+			/*if this is last urb no need to complete it again */
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*if there is something	pending	*/
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			continue;
+		}
+
+
+		/*if we	found something	already	in */
+		if (!(skipmap &	td_ptd_map->ptd_bitmap)) {
+			pehci_check("intr td_ptd_map %x,skipnap	%x\n",
+			td_ptd_map->ptd_bitmap, skipmap);
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;;
+			location++;
+			continue;
+		}
+
+
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			pehci_check
+				("interrupt not	come here, map %x,location %d\n",
+				 td_ptd_map->ptd_bitmap, location);
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*move to the next schedule */
+		location++;
+		/*endpoint, td,	urb and	memory
+		 * for current transfer*/
+		qh = td_ptd_map->qh;
+		qtd = td_ptd_map->qtd;
+		if (qtd->state & QTD_STATE_NEW)	{
+			/*we need to schedule it */
+			goto schedule;
+		}
+		urb = qtd->urb;
+		mem_addr = &qtd->mem_addr;
+
+		/*clear	the irq	mask for this transfer */
+		ormask &= ~td_ptd_map->ptd_bitmap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+			ormask);
+
+		ptd_map_buff->active_ptds--;
+		memset(qhint, 0, sizeof(struct _isp1763_qhint));
+
+		/*read this ptd	from the ram address,address is	in the
+		   td_ptd_map->ptd_header_addr */
+		isp1763_mem_read(hcd->dev, td_ptd_map->ptd_header_addr,	0,
+				 (u32 *) (qhint), PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_COMPLETE
+		printk("INTL PTD header after COMPLETION\n");
+		printk("CDW0: 0x%08X\n", qhint->td_info1);
+		printk("CDW1: 0x%08X\n", qhint->td_info2);
+		printk("CDW2: 0x%08X\n", qhint->td_info3);
+		printk("CDW3: 0x%08X\n", qhint->td_info4);
+#endif
+
+		/*statuc of 8 uframes */
+		for (i = 0; i <	8; i++)	{
+			/*take care of errors */
+			usofstatus = qhint->td_info5 >>	(8 + i * 3);
+			switch (usofstatus & 0x7) {
+			case INT_UNDERRUN:
+				pehci_print("under run , %x\n",	usofstatus);
+				break;
+			case INT_EXACT:
+				pehci_print("transaction error,	%x\n",
+					    usofstatus);
+				break;
+			case INT_BABBLE:
+				pehci_print("babble error, %x\n", usofstatus);
+				break;
+			}
+		}
+
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+			/*length is 1K for full/low speed device */
+			length = PTD_XFERRED_NONHSLENGTH(qhint->td_info4);
+		} else {
+			/*length is 32K	for high speed device */
+			length = PTD_XFERRED_LENGTH(qhint->td_info4);
+		}
+
+		pehci_hcd_update_error_status(qhint->td_info4, urb);
+		/*halted, need to finish all the transfer on this endpoint */
+		if (qhint->td_info4 & PTD_STATUS_HALTED) {
+			qtd->state |= QTD_STATE_LAST;
+			/*in case of halt, next	transfer will start with toggle	zero,
+			 *USB speck, 5.8.5*/
+			qh->datatoggle = td_ptd_map->datatoggle	= 0;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			dontschedule = 1;
+			goto copylength;
+		}
+
+
+		copylength:
+		/*preserve the current data toggle */
+		qh->datatoggle = td_ptd_map->datatoggle	=
+			PTD_NEXTTOGGLE(qhint->td_info4);
+		/*copy data from the host */
+		switch (PTD_PID(qhint->td_info2)) {
+		case IN_PID:
+			if (length && (length <= MAX_PTD_BUFFER_SIZE))
+				/*do read only when there is somedata */
+				isp1763_mem_read(hcd->dev,
+					(u32) mem_addr->phy_addr, 0,
+					urb->transfer_buffer +
+					urb->actual_length, length, 0);
+
+		case OUT_PID:
+			urb->actual_length += length;
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state &= ~QTD_STATE_NEW;
+			qtd->state |= QTD_STATE_DONE;
+			break;
+		}
+
+		if (qtd->state & QTD_STATE_LAST) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, regs);
+#else
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+			if (dontschedule) {	/*cleanup will start from drivers */
+				dontschedule = 0;
+				continue;
+			}
+
+			/*take the next	if in the queue	*/
+			if (!list_empty(&qh->qtd_list))	{
+				struct list_head *head;
+				/*last td of previous urb */
+				head = &qh->qtd_list;
+				qtd = list_entry(head->next, struct ehci_qtd,
+					qtd_list);
+				td_ptd_map->qtd	= qtd;
+				qh->hw_current = cpu_to_le32(qtd);
+				qh->qh_state = QH_STATE_LINKED;
+
+			} else {
+				td_ptd_map->qtd	=
+						 (struct ehci_qtd *) le32_to_cpu(0);
+				qh->hw_current = cpu_to_le32(0);
+				qh->qh_state = QH_STATE_IDLE;
+				donemap	&= ~td_ptd_map->ptd_bitmap;
+				ptd_map_buff->pending_ptd_bitmap &= 
+						~td_ptd_map->ptd_bitmap;
+	       			td_ptd_map->state=TD_PTD_NEW;
+				continue;
+			}
+
+		}
+
+		schedule:
+		{
+			/*current td comes from	qh->hw_current */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			ormask |= td_ptd_map->ptd_bitmap;
+			ptd_map_buff->active_ptds++;
+			pehci_check
+				("inter	schedule next qtd %p, active tds %d\n",
+				 qtd, ptd_map_buff->active_ptds);
+			pehci_hcd_qtd_schedule(hcd, qtd, qh, td_ptd_map);
+		}
+
+	}			/*end of while */
+
+
+	/*clear	all the	tds inside this	routine	*/
+	skipmap	&= ~donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+	ormask |= donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or, ormask);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	
+//	return (int)0;
+}
+
+/*atl(bulk/control) transfer handler*/
+/*1. read done map
+  2. read the ptd to see any errors
+  3. copy the payload to and from
+  4. update ehci td
+  5. make new ptd if transfer there and	earlier	done
+  6. schedule
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_atl_worker(phci_hcd * hcd, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_atl_worker(phci_hcd * hcd)
+#endif
+{
+	u16 donemap = 0, donetoclear = 0;
+	u16 pendingmap = 0;
+	u32 rl = 0;
+	u16 mask = 0x1,	index =	0;
+	u16 location = 0;
+	u32 nakcount = 0;
+	u32 active = 0;
+	u32 length = 0;
+	u16 skipmap = 0;
+	u16 tempskipmap	= 0;
+	u16 ormask = 0;
+	struct urb *urb;
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh;
+	struct _isp1763_qha atlqha;
+	struct _isp1763_qha *qha;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	urb_priv_t *urbpriv = 0;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u16 dontschedule = 0;
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ATL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+#ifdef MSEC_INT_BASED
+	/*running on skipmap rather donemap,
+	   some	cases donemap may not be set
+	   for complete	transfer
+	 */
+	skipmap	= isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+	tempskipmap = ~skipmap;
+	tempskipmap &= 0xffff;
+
+	if (tempskipmap) {
+		donemap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltddonemap,
+					   donemap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		qha = &atlqha;
+		donemap	|= pendingmap;
+		tempskipmap &= ~donemap;
+	}  else {
+
+	/*if sof interrupt enabled */
+
+		/*if there is something	pending	, put this transfer in */
+		if (pendingmap)	{
+			pehci_hcd_schedule_pending_ptds(hcd, pendingmap, (u8)
+				TD_PTD_BUFF_TYPE_ATL,
+				1);
+		}
+		goto exit;
+	}
+#else
+
+	donemap	= isp1763_reg_read16(hcd->dev, hcd->regs.atltddonemap, donemap);
+	if (donemap) {
+
+
+		pehci_info("DoneMap Value in ATL Worker	%x\n", donemap);
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		qha = &atlqha;
+	} else {
+		pehci_info("Done Map Value is 0x%X \n",	donemap);
+		pehci_entry("--	%s: Exit abnormally with DoneMap all zero \n",
+			    __FUNCTION__);
+		goto exit;
+
+	}
+#endif
+
+	/*read the interrupt mask registers */
+	ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+				    ormask);
+
+
+	/*this map is used only	to update and
+	 * scheduling for the tds who are not
+	 * complete. the tds those are complete
+	 * new schedule	will happen from
+	 * td_ptd_submit_urb routine
+	 * */
+	donetoclear = donemap;
+	/*we will be processing	skipped	tds also */
+	donetoclear |= tempskipmap;
+	/*process all the endpoints first those	are done */
+	while (donetoclear) {
+		/*index	is the number of endpoint open currently */
+		index =	donetoclear & mask;
+		donetoclear &= ~mask;
+		mask <<= 1;
+		/*what if we are in the	middle of schedule
+		   where nothing is done
+		 */
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		/*read our td_ptd_map */
+		td_ptd_map = &ptd_map_buff->map_list[location];
+
+		/*urb is in remove */
+		if (td_ptd_map->state == TD_PTD_NEW ||
+			td_ptd_map->state == TD_PTD_REMOVE)	{
+			pehci_check
+				("atl td is being removed,map %x, skipmap %x\n",
+				 td_ptd_map->ptd_bitmap, skipmap);
+			pehci_check("temp skipmap %x, pendign map %x,done %x\n",
+				    tempskipmap, pendingmap, donemap);
+
+			/*unlink urb will take care of this */
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			location++;
+			continue;
+		}
+
+
+		/*move to the next endpoint */
+		location++;
+		/*endpoint, td,	urb and	memory
+		 * for current endpoint*/
+		qh = td_ptd_map->qh;
+		qtd = td_ptd_map->qtd;
+		if (!qh	|| !qtd) {
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			continue;
+		}
+#ifdef MSEC_INT_BASED
+		/*new td must be scheduled */
+		if ((qtd->state	& QTD_STATE_NEW)	/*&&
+							   (pendingmap & td_ptd_map->ptd_bitmap) */ ) {
+			/*this td will come here first time from
+			 *pending tds, so its qh->hw_current needs to
+			 * adjusted
+			 */
+			qh->hw_current = QTD_NEXT(qtd->qtd_dma);
+			goto schedule;
+		}
+#endif
+		urb = qtd->urb;
+		if (urb	== NULL) {
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			continue;
+		}
+		urbpriv	= (urb_priv_t *) urb->hcpriv;
+		mem_addr = &qtd->mem_addr;
+
+#ifdef MSEC_INT_BASED
+		/*check	here for the td	if its done */
+		if (donemap & td_ptd_map->ptd_bitmap) {
+			/*nothing to do	*/
+			;
+		} else {
+			/*if td	is not done, lets check	how long
+			   its been scheduled
+			 */
+			if (tempskipmap	& td_ptd_map->ptd_bitmap) {
+				/*i will give 20 msec to complete */
+				if (urbpriv->timeout < 20) {
+					urbpriv->timeout++;
+					continue;
+				}
+				urbpriv->timeout++;
+				/*otherwise check its status */
+			}
+
+		}
+#endif
+		memset(qha, 0, sizeof(struct _isp1763_qha));
+
+		/*read this ptd	from the ram address,address is	in the
+		   td_ptd_map->ptd_header_addr */
+		isp1763_mem_read(hcd->dev, td_ptd_map->ptd_header_addr,	0,
+				 (u32 *) (qha),	PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_COMPLETE
+		printk("ATL PTD header after COMPLETION\n");
+		printk("CDW0: 0x%08X\n", qha->td_info1);
+		printk("CDW1: 0x%08X\n", qha->td_info2);
+		printk("CDW2: 0x%08X\n", qha->td_info3);
+		printk("CDW3: 0x%08X\n", qha->td_info4);
+#endif
+
+#ifdef MSEC_INT_BASED
+		/*since	we are running on skipmap
+		   tds will be checked for completion state
+		 */
+		if ((qha->td_info1 & QHA_VALID)) {
+
+			pehci_check
+				("pendign map %x, donemap %x, tempskipmap %x\n",
+				 pendingmap, donemap, tempskipmap);
+			/*this could be	one of the unprotected urbs, clear it */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			/*here also we need to increment the tds timeout count */
+			urbpriv->timeout++;
+			continue;
+		} else {
+			/*this td is going to be done,
+			   this	td could be the	one un-skipped but no donemap or
+			   maybe it could be one of those where	we get unprotected urbs,
+			   so checking against tempskipmap may not give	us correct td
+			 */
+
+			skipmap	|= td_ptd_map->ptd_bitmap;
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+
+			/*of course this is going to be	as good
+			   as td that is done and donemap is set
+			   also	skipmap	is set
+			 */
+			donemap	|= td_ptd_map->ptd_bitmap;
+		}
+#endif
+		/*clear	the corrosponding mask register	*/
+		ormask &= ((~td_ptd_map->ptd_bitmap) & 0xffff);
+		isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+			ormask);
+
+		ptd_map_buff->active_ptds--;
+
+		urbpriv->timeout = 0;
+
+		/*take care of errors */
+		pehci_hcd_update_error_status(qha->td_info4, urb);
+		/*halted, need to finish all the transfer on this endpoint */
+		if (qha->td_info4 & PTD_STATUS_HALTED) {
+
+			printk(KERN_NOTICE "Endpoint is	halted\n");
+			qtd->state |= QTD_STATE_LAST;
+
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			/*in case of halt, next	transfer will start with toggle
+			   zero,USB speck, 5.8.5 */
+			qh->datatoggle = td_ptd_map->datatoggle	= 0;
+			/*cleanup the ping */
+			qh->ping = 0;
+			/*force	cleanup	after this */
+			dontschedule = 1;
+			goto copylength;
+		}
+
+
+
+		/*read the reload count	*/
+		rl = (qha->td_info3 >> 23);
+		rl &= 0xf;
+
+
+
+		/*if there is a	transaction error and the status is not	halted,
+		 * process whatever the	length we got.if the length is what we
+		 * expected complete the transfer*/
+		if ((qha->td_info4 & PTD_XACT_ERROR) &&
+			!(qha->td_info4 & PTD_STATUS_HALTED) &&
+			(qha->td_info4 & QHA_ACTIVE)) {
+
+			if (PTD_XFERRED_LENGTH(qha->td_info4) == qtd->length) {
+				;	/*nothing to do	its fake */
+			} else {
+
+				pehci_print
+					("xact error, info1 0x%08x,info4 0x%08x\n",
+					 qha->td_info1,	qha->td_info4);
+
+				/*if this is the case then we need to
+				   resubmit the	td again */
+				qha->td_info1 |= QHA_VALID;
+				skipmap	&= ~td_ptd_map->ptd_bitmap;
+				ormask |= td_ptd_map->ptd_bitmap;
+				donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+
+				/*set the retry	count to 3 again */
+				qha->td_info4 |= (rl <<	19);
+				/*set the active bit, if cleared, will be cleared if we	have some length */
+				qha->td_info4 |= QHA_ACTIVE;
+
+				/*clear	the xact error */
+				qha->td_info4 &= ~PTD_XACT_ERROR;
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atl_irq_mask_or,
+						    ormask);
+
+				/*copy back into the header, payload is	already
+				 * present no need to write again
+				 */
+				isp1763_mem_write(hcd->dev,
+						  td_ptd_map->ptd_header_addr,
+						  0, (u32 *) (qha),
+						  PHCI_QHA_LENGTH, 0);
+				/*unskip this td */
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atltdskipmap,
+						    skipmap);
+				continue;
+			}
+			goto copylength;
+		}
+
+		/*check	for the	nak count and active condition
+		 * to reload the ptd if	needed*/
+		nakcount = qha->td_info4 >> 19;
+		nakcount &= 0xf;
+		active = qha->td_info4 & QHA_ACTIVE;
+		/*if nak count is zero and active bit is set , it
+		 *means	that device is naking and need to reload
+		 *the same td*/
+		if (!nakcount && active) {
+			pehci_info("%s:	ptd is going for reload,length %d\n",
+				   __FUNCTION__, length);
+			/*make this td valid */
+			qha->td_info1 |= QHA_VALID;
+			donemap	&= ((~td_ptd_map->ptd_bitmap & 0xffff));
+			/*just like fresh td */
+
+			/*set the retry	count to 3 again */
+			qha->td_info4 |= (rl <<	19);
+			qha->td_info4 &= ~0x3;
+			qha->td_info4 |= (0x2 << 23);
+			ptd_map_buff->active_ptds++;
+			skipmap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			ormask |= td_ptd_map->ptd_bitmap;
+			isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+			/*copy back into the header, payload is	already
+			 * present no need to write again */
+			isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr,
+					  0, (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+			/*unskip this td */
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+			continue;
+		}
+
+		copylength:
+		/*read the length transferred */
+		length = PTD_XFERRED_LENGTH(qha->td_info4);
+
+
+		/*short	complete in case of BULK only */
+		if ((length < qtd->length) && usb_pipebulk(urb->pipe)) {
+
+			/*if current ptd is not	able to	fetech enough data as
+			 * been	asked then device has no data, so complete this	transfer
+			 * */
+			/*can we complete our transfer here */
+			if ((urb->transfer_flags & URB_SHORT_NOT_OK)) {
+				pehci_check
+					("short	read, length %d(expected %d)\n",
+					 length, qtd->length);
+				urb->status = -EREMOTEIO;
+				/*if this is the only td,donemap will be cleared
+				   at completion, otherwise take the next one
+				 */
+				donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+				ptd_map_buff->pending_ptd_bitmap &=
+					((~td_ptd_map->ptd_bitmap) & 0xffff);
+				/*force	the cleanup from here */
+				dontschedule = 1;
+			}
+
+			/*this will be the last	td,in case of short read/write */
+			/*donemap, pending maps	will be	handled	at the while scheduling	or completion */
+			qtd->state |= QTD_STATE_LAST;
+
+		}
+		/*preserve the current data toggle */
+		qh->datatoggle = td_ptd_map->datatoggle	=
+			PTD_NEXTTOGGLE(qha->td_info4);
+		qh->ping = PTD_PING_STATE(qha->td_info4);
+		/*copy data from */
+		switch (PTD_PID(qha->td_info2))	{
+		case IN_PID:
+			qh->ping = 0;
+			/*do read only when there is some data */
+			if (length && (length <= HC_ATL_PL_SIZE)) {
+				isp1763_mem_read(hcd->dev,
+						 (u32) mem_addr->phy_addr, 0,
+						 (u32*) (le32_to_cpu(qtd->hw_buf[0])), length, 0);
+#if 0
+			//	printk("IN PayLoad length:%d\n", length); 
+			if(length<=4)	{
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+				}
+#endif
+			}
+
+		case OUT_PID:
+			urb->actual_length += length;
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state |= QTD_STATE_DONE;
+
+			break;
+		case SETUP_PID:
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state |= QTD_STATE_DONE;
+			break;
+		}
+
+		if (qtd->state & QTD_STATE_LAST) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, regs);
+#else
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+			if (dontschedule) {	/*cleanup will start from drivers */
+				dontschedule = 0;
+				/*so that we can take next one */
+				qh->qh_state = QH_STATE_TAKE_NEXT;
+				continue;
+			}
+			/*take the next	if in the queue	*/
+			if (!list_empty(&qh->qtd_list))	{
+				struct list_head *head;
+				/*last td of previous urb */
+				head = &qh->qtd_list;
+				qtd = list_entry(head->next, struct ehci_qtd,
+						 qtd_list);
+				td_ptd_map->qtd	= qtd;
+				qh->hw_current = cpu_to_le32(qtd);
+				qh->qh_state = QH_STATE_LINKED;
+
+			} else {
+				td_ptd_map->qtd	=
+					(struct	ehci_qtd *) le32_to_cpu(0);
+				qh->hw_current = cpu_to_le32(0);
+				qh->qh_state = QH_STATE_TAKE_NEXT;
+				donemap	&= ((~td_ptd_map->ptd_bitmap & 0xffff));
+				ptd_map_buff->pending_ptd_bitmap &=
+					((~td_ptd_map->ptd_bitmap) & 0xffff);
+				continue;
+			}
+		}
+
+#ifdef MSEC_INT_BASED
+		schedule:
+#endif
+		{
+			/*current td comes from	qh->hw_current */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			td_ptd_map->qtd	=
+				(struct	ehci_qtd
+				 *) (le32_to_cpu(qh->hw_current));
+			qtd = td_ptd_map->qtd;
+			ormask |= td_ptd_map->ptd_bitmap;
+			ptd_map_buff->active_ptds++;
+			pehci_hcd_qtd_schedule(hcd, qtd, qh, td_ptd_map);
+		}
+
+	}			/*end of while */
+
+/*clear	all the	tds inside this	routine*/
+	skipmap	&= ((~donemap) & 0xffff);
+	isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+	ormask |= donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or, ormask);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*--------------------------------------------------------*
+  root hub functions
+ *--------------------------------------------------------*/
+
+/*return root hub descriptor, can not fail*/
+static void
+pehci_hub_descriptor(phci_hcd *	hcd, struct usb_hub_descriptor *desc)
+{
+	u32 ports = 0;
+	u16 temp = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	ports =	0x11;
+	ports =	ports &	0xf;
+
+	pehci_info("%s:	number of ports	%d\n", __FUNCTION__, ports);
+
+	desc->bDescriptorType =	0x29;
+	desc->bPwrOn2PwrGood = 10;
+
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts	= ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 +	2 * temp;
+	/* two bitmaps:	 ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+
+	memset(&desc->DeviceRemovable[0], 0, temp);
+	memset(&desc->PortPwrCtrlMask[temp], 0xff, temp);
+
+	temp = 0x0008;		/* per-port overcurrent	reporting */
+	temp |=	0x0001;		/* per-port power control */
+	temp |=	0x0080;		/* per-port indicators (LEDs) */
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*after	reset on root hub,
+ * device high speed or	non-high speed
+ * */
+static int
+phci_check_reset_complete(phci_hcd * hcd, int index, int port_status)
+{
+	pehci_print("check reset complete\n");
+	if (!(port_status & PORT_CONNECT)) {
+		hcd->reset_done[index] = 0;
+		return port_status;
+	}
+
+	/* if reset finished and it's still not	enabled	-- handoff */
+	if (!(port_status & PORT_PE)) {
+		printk("port %d	full speed --> companion\n", index + 1);
+		port_status |= PORT_OWNER;
+		isp1763_reg_write32(hcd->dev, hcd->regs.ports[index],
+				    port_status);
+
+	} else {
+		pehci_print("port %d high speed\n", index + 1);
+	}
+
+	return port_status;
+
+}
+
+/*----------------------------------------------*
+  host controller initialization, removal functions
+ *----------------------------------------------*/
+
+
+/*initialize all three buffer(iso/atl/int) type	headers*/
+static void
+pehci_hcd_init_map_buffers(phci_hcd * phci)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	u8 buff_type, ptd_index;
+	u32 bitmap;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("phci_init_map_buffers(phci	= 0x%p)\n", phci);
+	/* initialize for each buffer type */
+	for (buff_type = 0; buff_type <	TD_PTD_TOTAL_BUFF_TYPES; buff_type++) {
+		ptd_map_buff = &(td_ptd_map_buff[buff_type]);
+		ptd_map_buff->buffer_type = buff_type;
+		ptd_map_buff->active_ptds = 0;
+		ptd_map_buff->total_ptds = 0;
+		/*each bufer type can have atleast 32 ptds */
+		ptd_map_buff->max_ptds = 16;
+		ptd_map_buff->active_ptd_bitmap	= 0;
+		/*everything skipped */
+		/*nothing is pending */
+		ptd_map_buff->pending_ptd_bitmap = 0x00000000;
+
+		/* For each ptd	index of this buffer, set the fiedls */
+		bitmap = 0x00000001;
+		for (ptd_index = 0; ptd_index <	TD_PTD_MAX_BUFF_TDS;
+			ptd_index++) {
+			/*datatoggle zero */
+			ptd_map_buff->map_list[ptd_index].datatoggle = 0;
+			/*td state is not used */
+			ptd_map_buff->map_list[ptd_index].state	= TD_PTD_NEW;
+			/*no endpoint, no qtd */
+			ptd_map_buff->map_list[ptd_index].qh = NULL;
+			ptd_map_buff->map_list[ptd_index].qtd =	NULL;
+			ptd_map_buff->map_list[ptd_index].ptd_header_addr =
+				0xFFFF;
+		}		/* for(	ptd_index */
+	}			/* for(buff_type */
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}				/* phci_init_map_buffers */
+
+
+/*put the host controller into operational mode
+ * called phci_hcd_start routine,
+ * return 0, success else
+ * timeout, fails*/
+
+static int
+pehci_hcd_start_controller(phci_hcd * hcd)
+{
+	u32 temp = 0;
+	u32 command = 0;
+	int retval = 0;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+
+
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+	printk(KERN_NOTICE "HC Command Reg val ...1 %x\n", command);
+
+	/*initialize the host controller */
+	command	|= CMD_RUN;
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.command, command);
+
+
+	command	&= 0;
+
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+	printk(KERN_NOTICE "HC Command Reg val ...2 %x\n", command);
+
+	/*should be in operation in 1000 usecs */
+	if ((retval =
+		pehci_hcd_handshake(hcd, hcd->regs.command, CMD_RUN, CMD_RUN,
+		100000))) {
+		err("Host is not up(CMD_RUN) in	1000 usecs\n");
+		return retval;
+	}
+
+	printk(KERN_NOTICE "ISP1763 HC is running \n");
+
+
+	/*put the host controller to ehci mode */
+	command	&= 0;
+	command	|= 1;
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.configflag, command);
+	mdelay(5);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.configflag, temp);
+	pehci_print("%s: Config	Flag reg value:	0x%08x\n", __FUNCTION__, temp);
+
+	/*check	if ehci	mode switching is correct or not */
+	if ((retval =
+		pehci_hcd_handshake(hcd, hcd->regs.configflag, 1, 1, 100))) {
+		err("Host is not into ehci mode	in 100 usecs\n");
+		return retval;
+	}
+
+	mdelay(5);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	printk(KERN_NOTICE "-- %s: Exit\n", __FUNCTION__);
+	return retval;
+}
+
+
+/*enable the interrupts
+ *called phci_1763_start routine
+ * return void*/
+static void
+pehci_hcd_enable_interrupts(phci_hcd * hcd)
+{
+	u32 temp = 0;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+	/*disable the interrupt	source */
+	temp &=	0;
+	/*clear	all the	interrupts that	may be there */
+	temp |=	INTR_ENABLE_MASK;
+	isp1763_reg_write16(hcd->dev, hcd->regs.interrupt, temp);
+
+	/*enable interrupts */
+	temp = 0;
+	
+#ifdef OTG_PACKAGE
+	temp |= INTR_ENABLE_MASK | HC_OTG_INT;
+#else
+	temp |= INTR_ENABLE_MASK;
+#endif	
+	pehci_print("%s: enabled mask 0x%08x\n", __FUNCTION__, temp);
+	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, temp);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.interruptenable, temp);
+	pehci_print("%s: Intr enable reg value:	0x%08x\n", __FUNCTION__, temp);
+	
+#ifdef HCD_PACKAGE
+	temp = 0;
+	temp = isp1763_reg_read32(hcd->dev, HC_INT_THRESHOLD_REG, temp);
+//	temp |= 0x0800000F;
+	temp |= 0x0100000F;//125 micro second minimum width between two edge interrupts, 500ns int will remain low
+	//	15/30MHz=500 ns
+	isp1763_reg_write32(hcd->dev, HC_INT_THRESHOLD_REG, temp);
+#endif
+	/*enable the global interrupt */
+	temp &=	0;
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.hwmodecontrol, temp);
+	temp |=	0x01;		/*enable the global interrupt */
+#ifdef EDGE_INTERRUPT
+	temp |=	0x02;		/*enable the edge interrupt */
+#endif
+
+#ifdef POL_HIGH_INTERRUPT
+	temp |=	0x04;		/* enable interrupt polarity high */
+#endif
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.hwmodecontrol, temp);
+
+	/*maximum rate is one msec */
+	/*enable the atl interrupts OR and AND mask */
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_and, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_and, temp);
+	temp = 0x0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.iso_irq_mask_and, temp);
+	temp = 0xffff;
+	isp1763_reg_write16(hcd->dev, hcd->regs.iso_irq_mask_or, temp);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or, temp);
+	pehci_print("%s:Iso irq	mask reg value:	0x%08x\n", __FUNCTION__, temp);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*initialize the host controller register map from Isp1763 to EHCI */
+static void
+pehci_hcd_init_reg(phci_hcd * hcd)
+{
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/* scratch pad for the test */
+	hcd->regs.scratch = HC_SCRATCH_REG;
+
+	/*make a copy of our interrupt locations */
+	hcd->regs.command = HC_USBCMD_REG;
+	hcd->regs.usbstatus = HC_USBSTS_REG;
+	hcd->regs.usbinterrupt = HC_INTERRUPT_REG_EHCI;
+
+	hcd->regs.hcsparams = HC_SPARAMS_REG;
+	hcd->regs.frameindex = HC_FRINDEX_REG;
+
+	/*transfer specific registers */
+	hcd->regs.hwmodecontrol	= HC_HWMODECTRL_REG;
+	hcd->regs.interrupt = HC_INTERRUPT_REG;
+	hcd->regs.interruptenable = HC_INTENABLE_REG;
+	hcd->regs.atl_irq_mask_and = HC_ATL_IRQ_MASK_AND_REG;
+	hcd->regs.atl_irq_mask_or = HC_ATL_IRQ_MASK_OR_REG;
+
+	hcd->regs.int_irq_mask_and = HC_INT_IRQ_MASK_AND_REG;
+	hcd->regs.int_irq_mask_or = HC_INT_IRQ_MASK_OR_REG;
+	hcd->regs.iso_irq_mask_and = HC_ISO_IRQ_MASK_AND_REG;
+	hcd->regs.iso_irq_mask_or = HC_ISO_IRQ_MASK_OR_REG;
+	hcd->regs.buffer_status	= HC_BUFFER_STATUS_REG;
+	hcd->regs.interruptthreshold = HC_INT_THRESHOLD_REG;
+	/*initialization specific */
+	hcd->regs.reset	= HC_RESET_REG;
+	hcd->regs.configflag = HC_CONFIGFLAG_REG;
+	hcd->regs.ports[0] = HC_PORTSC1_REG;
+	hcd->regs.ports[1] = 0;	/*port1,port2,port3 status reg are removed */
+	hcd->regs.ports[2] = 0;
+	hcd->regs.ports[3] = 0;
+	hcd->regs.pwrdwn_ctrl =	HC_POWER_DOWN_CONTROL_REG;
+	/*transfer registers */
+	hcd->regs.isotddonemap = HC_ISO_PTD_DONEMAP_REG;
+	hcd->regs.isotdskipmap = HC_ISO_PTD_SKIPMAP_REG;
+	hcd->regs.isotdlastmap = HC_ISO_PTD_LASTPTD_REG;
+
+	hcd->regs.inttddonemap = HC_INT_PTD_DONEMAP_REG;
+
+	hcd->regs.inttdskipmap = HC_INT_PTD_SKIPMAP_REG;
+	hcd->regs.inttdlastmap = HC_INT_PTD_LASTPTD_REG;
+
+	hcd->regs.atltddonemap = HC_ATL_PTD_DONEMAP_REG;
+	hcd->regs.atltdskipmap = HC_ATL_PTD_SKIPMAP_REG;
+	hcd->regs.atltdlastmap = HC_ATL_PTD_LASTPTD_REG;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_interrupt_handler(phci_hcd * hcd, struct pt_regs *regs)
+{
+	spin_lock(&hcd->lock);
+#ifdef CONFIG_ISO_SUPPORT
+	phcd_iso_handler(hcd, regs);
+#endif
+	pehci_hcd_intl_worker(hcd, regs);
+	pehci_hcd_atl_worker(hcd, regs);
+	spin_unlock(&hcd->lock);
+	return;
+}
+#else
+static void
+pehci_interrupt_handler(phci_hcd * hcd)
+{
+	spin_lock(&hcd->lock);
+#ifdef CONFIG_ISO_SUPPORT
+	pehci_hcd_iso_worker(hcd);
+#endif
+	pehci_hcd_intl_worker(hcd);
+	pehci_hcd_atl_worker(hcd);
+	spin_unlock(&hcd->lock);
+	return;
+}
+#endif
+irqreturn_t pehci_hcd_irq(struct usb_hcd *usb_hcd)
+{
+
+	int work = 0;
+	phci_hcd *pehci_hcd;
+	struct isp1763_dev *dev;
+	u32 intr = 0;
+	u32 resume=0;
+	u32 temp=0;
+	u32 irq_mask = 0;
+
+	if (!(usb_hcd->state & USB_STATE_READY)) {
+		info("interrupt	handler	state not ready	yet\n");
+	usb_hcd->state=USB_STATE_READY;
+	//	return IRQ_NONE;
+	}
+
+	/*our host */
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	dev = pehci_hcd->dev;
+
+	spin_lock(&pehci_hcd->lock);
+	dev->int_reg = isp1763_reg_read16(dev, HC_INTERRUPT_REG, dev->int_reg);
+	/*Clear the interrupt*/
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, dev->int_reg);
+
+	irq_mask = isp1763_reg_read16(dev, HC_INTENABLE_REG, irq_mask);
+	dev->int_reg &= irq_mask;
+
+	intr = dev->int_reg;
+
+
+	if (atomic_read(&pehci_hcd->nuofsofs)) {
+		spin_unlock(&pehci_hcd->lock);
+		return IRQ_HANDLED;
+	}
+	atomic_inc(&pehci_hcd->nuofsofs);
+
+	irq_mask=isp1763_reg_read32(dev,HC_USBSTS_REG,0);
+	isp1763_reg_write32(dev,HC_USBSTS_REG,irq_mask);
+	if(irq_mask & 0x4){  // port status register.
+		if(intr & 0x50) {   // OPR register change
+			temp=isp1763_reg_read32(dev,HC_PORTSC1_REG,0);
+			if(temp & 0x4){   // Force resume bit is set
+				if (dev) {
+					if (dev->driver) {
+						if (dev->driver->resume) {
+						dev->driver->resume(dev);
+							resume=1;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
+
+#ifndef THREAD_BASED
+/*-----------------------------------------------------------*/
+#ifdef MSEC_INT_BASED
+	work = 1;
+#else
+	if (intr & (HC_MSEC_INT	& INTR_ENABLE_MASK)) {
+		work = 1;	/* phci_iso_worker(hcd); */
+	}
+
+#ifdef USBNET 
+	if (intr & HC_MSOF_INT ) {
+		struct list_head *pos, *q;
+	
+		list_for_each_safe(pos, q, &pehci_hcd->cleanup_urb.urb_list) {
+		struct isp1763_async_cleanup_urb *tmp;
+		
+			tmp = list_entry(pos, struct isp1763_async_cleanup_urb, urb_list);
+			if (tmp) {
+				spin_unlock(&pehci_hcd->lock);
+				usb_hcd_giveback_urb(usb_hcd, tmp->urb, tmp->urb->status);
+				spin_lock(&pehci_hcd->lock);
+
+				list_del(pos);
+				if(tmp)
+				kfree(tmp);
+			}
+		}
+		isp1763_reg_write16(dev, HC_INTENABLE_REG, INTR_ENABLE_MASK );
+	}
+#endif
+
+
+	if (intr & (HC_INTL_INT	& INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_intl_worker(pehci_hcd, regs);
+#else
+		pehci_hcd_intl_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_intl_worker(hcd); */
+	}
+	
+	if (intr & (HC_ATL_INT & INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_atl_worker(pehci_hcd, regs);
+#else
+		pehci_hcd_atl_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_atl_worker(hcd);	*/
+	}
+#ifdef CONFIG_ISO_SUPPORT
+	if (intr & (HC_ISO_INT & INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_iso_worker(pehci_hcd);
+#else
+		pehci_hcd_iso_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_atl_worker(hcd); */
+	}
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	if (work){
+		spin_unlock(&pehci_hcd->lock);
+		pehci_interrupt_handler(pehci_hcd, regs);
+		spin_lock(&pehci_hcd->lock);
+	}
+#else
+	if (work){
+		spin_unlock(&pehci_hcd->lock);
+		pehci_interrupt_handler(pehci_hcd);
+		spin_lock(&pehci_hcd->lock);
+	}
+#endif
+
+/*-----------------------------------------------------------*/
+#else
+	if ((intr & (HC_INTL_INT & INTR_ENABLE_MASK)) ||(intr & (HC_ATL_INT & INTR_ENABLE_MASK)))
+	{ //send
+		st_UsbIt_Msg_Struc *stUsbItMsgSnd ;
+		
+		stUsbItMsgSnd = (st_UsbIt_Msg_Struc *)kmalloc(sizeof(st_UsbIt_Msg_Struc), GFP_ATOMIC);
+		if (!stUsbItMsgSnd) return -ENOMEM;
+		
+		memset(stUsbItMsgSnd, 0, sizeof(stUsbItMsgSnd));
+		
+		stUsbItMsgSnd->usb_hcd = usb_hcd;
+		stUsbItMsgSnd->uIntStatus = NO_SOF_REQ_IN_ISR;
+		list_add_tail(&(stUsbItMsgSnd->list), &(g_messList.list));
+
+		pehci_print("\n------------- send mess : %d------------\n",stUsbItMsgSnd->uIntStatus);
+		if ((g_stUsbItThreadHandler.phThreadTask != NULL) && (g_stUsbItThreadHandler.lThrdWakeUpNeeded == 0))
+		{
+			pehci_print("\n------- wake up thread : %d-----\n",stUsbItMsgSnd->uIntStatus);
+			g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+			wake_up(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+		}
+	}
+/*-----------------------------------------------------------*/
+#endif
+
+	atomic_dec(&pehci_hcd->nuofsofs);
+	spin_unlock(&pehci_hcd->lock);
+		if(resume){
+			usb_hcd_poll_rh_status(usb_hcd);
+	}
+	return IRQ_HANDLED;
+}
+
+/*reset	the host controller
+ *called phci_hcd_start	routine
+ *return 0, success else
+ *timeout, fails*/
+static int
+pehci_hcd_reset(struct usb_hcd *usb_hcd)
+{
+	u32 command = 0;
+	u32 temp = 0;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+	pehci_hcd_init_reg(hcd);
+	printk("chipid %x \n", isp1763_reg_read32(hcd->dev, HC_CHIP_ID_REG, temp)); //0x70
+
+	/*reset	the atx controller */
+	temp &=	0;
+	temp |=	8;
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+	mdelay(10);
+	
+	/*reset	the host controller */
+	temp &=	0;
+	temp |=	1;
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+
+	command	= 0;
+	do {
+
+		temp = isp1763_reg_read16(hcd->dev, hcd->regs.reset, temp);
+		mdelay(10);
+		command++;
+		if (command > 100) {
+			printk("not able to reset\n");
+			break;
+		}
+	} while	(temp &	0x01);
+
+
+	/*reset	the ehci controller registers */
+	temp = 0;
+	temp |=	(1 << 1);
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+	command	= 0;
+	do {
+		temp = isp1763_reg_read16(hcd->dev, hcd->regs.reset, temp);
+		mdelay(10);
+		command++;
+		if (command > 100) {
+			printk("not able to reset\n");
+			break;
+		}
+	} while	(temp &	0x02);
+
+	/*read the command register */
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+
+	command	|= CMD_RESET;
+	/*write	back and wait for, 250 msec */
+	isp1763_reg_write16(hcd->dev, hcd->regs.command, command);
+	/*wait for maximum 250 msecs */
+	mdelay(200);
+	printk("command	%x\n",
+		isp1763_reg_read16(hcd->dev, hcd->regs.command, command));
+	printk(KERN_NOTICE "-- %s: Exit	\n", __FUNCTION__);
+	return 0;
+}
+
+/*host controller initialize routine,
+ *called by phci_hcd_probe
+ * */
+static int
+pehci_hcd_start(struct usb_hcd *usb_hcd)
+{
+
+	int retval;
+	int count = 0;
+	phci_hcd *pehci_hcd = NULL;
+	u32 temp = 0;
+	u32 hwmodectrl = 0;
+	u32 ul_scratchval = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	spin_lock_init(&pehci_hcd->lock);
+	atomic_set(&pehci_hcd->nuofsofs, 0);
+	atomic_set(&pehci_hcd->missedsofs, 0);
+
+	/*Initialize host controller registers */
+	pehci_hcd_init_reg(pehci_hcd);
+
+	/*reset	the host controller */
+	retval = pehci_hcd_reset(usb_hcd);
+	if (retval) {
+		err("phci_1763_start: error failing with status	%x\n", retval);
+		return retval;
+	}
+
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+#ifdef DATABUS_WIDTH_16
+	printk(KERN_NOTICE "Mode Ctrl Value before 16width: %x\n", hwmodectrl);
+	hwmodectrl &= 0xFFEF;	/*enable the 16	bit bus	*/
+	hwmodectrl |= 0x0400;	/*enable common	int */
+#else
+	printk(KERN_NOTICE "Mode Ctrl Value before 8width : %x\n", hwmodectrl);
+	hwmodectrl |= 0x0010;	/*enable the 8 bit bus */
+	hwmodectrl |= 0x0400;	/*enable common	int */
+#endif
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.hwmodecontrol,
+			    hwmodectrl);
+
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+	hwmodectrl |=0x9;  //lock interface and enable global interrupt.
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.hwmodecontrol,
+		hwmodectrl);	
+	printk(KERN_NOTICE "Mode Ctrl Value after buswidth: %x\n", hwmodectrl);
+
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.scratch, 0x3344);
+
+	ul_scratchval =
+		isp1763_reg_read16(pehci_hcd->dev, pehci_hcd->regs.scratch,
+				   ul_scratchval);
+	printk(KERN_NOTICE "Scratch Reg	Value :	%x\n", ul_scratchval);
+	if (ul_scratchval != 0x3344) {
+		printk(KERN_NOTICE "Scratch Reg	Value Mismatch:	%x\n",
+		       ul_scratchval);
+
+	}
+
+
+	/*initialize the host controller initial values	*/
+	/*disable all the buffer */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.buffer_status, 0);
+	/*skip all the transfers */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	/*clear	done map */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltddonemap,
+			    NO_TRANSFER_DONE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttddonemap,
+			    NO_TRANSFER_DONE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotddonemap,
+			    NO_TRANSFER_DONE);
+	
+#ifdef HCD_PACKAGE
+	/*port1 as Host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0400);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x0080);
+	/*port2 as Host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0000);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x8000);
+	
+	#if 0 /* do not use bit 1&2 for pure host application */
+	ul_scratchval =	isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,0);
+	ul_scratchval |= 0x006;	
+	isp1763_reg_write32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,ul_scratchval);
+	#endif
+	
+#elif defined(HCD_DCD_PACKAGE)
+
+	/*port1 as device */
+	isp1763_reg_write16(pehci_hcd->dev,OTG_CTRL_SET_REG, 
+			OTG_CTRL_DMPULLDOWN |OTG_CTRL_DPPULLDOWN | 
+			OTG_CTRL_SW_SEL_HC_DC |OTG_CTRL_OTG_DISABLE);	/* pure	Device Mode and	OTG disabled */
+
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0480);
+	/*port2 as host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0000);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x8000);
+	ul_scratchval =
+		isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+		0);
+#endif
+
+	/*enable interrupts */
+	pehci_hcd_enable_interrupts(pehci_hcd);
+
+	/*put controller into operational mode */
+	retval = pehci_hcd_start_controller(pehci_hcd);
+	if (retval) {
+		err("phci_1763_start: error failing with status	%x\n", retval);
+		return retval;
+	}
+
+	/*Init the phci	qtd <->	ptd map	buffers	*/
+	pehci_hcd_init_map_buffers(pehci_hcd);
+
+	/*set last maps, for iso its only 1, else 32 tds bitmap	*/
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltdlastmap,
+			    0x8000);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttdlastmap, 0x80);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotdlastmap, 0x01);
+	/*iso transfers	are not	active */
+	pehci_hcd->next_uframe = -1;
+	pehci_hcd->periodic_sched = 0;
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+
+	/*initialize the periodic list */
+	for (count = 0; count < PTD_PERIODIC_SIZE; count++) {
+		pehci_hcd->periodic_list[count].framenumber = 0;
+		INIT_LIST_HEAD(&pehci_hcd->periodic_list[count].sitd_itd_head);
+	}
+
+
+	/*set the state	of the host to ready,
+	 * start processing interrupts
+	 * */
+
+	usb_hcd->state = HC_STATE_RUNNING;
+	pehci_hcd->state = HC_STATE_RUNNING;
+
+
+	/*initialize root hub timer */
+	init_timer(&pehci_hcd->rh_timer);
+	/*initialize watchdog */
+	init_timer(&pehci_hcd->watchdog);
+
+	temp = isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+				  temp);
+	
+	temp = 0x3e81bA0;
+#if 0
+	temp |=	0x306;
+#endif
+	isp1763_reg_write32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG, temp);
+	temp = isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+				  temp);
+	printk(" Powerdown Reg Val: %x\n", temp);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+	return 0;
+}
+
+static void
+pehci_hcd_stop(struct usb_hcd *usb_hcd)
+{
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	/* no more interrupts ... */
+	if (usb_hcd->state == USB_STATE_RUNNING) {
+		mdelay(2);
+	}
+	if (in_interrupt()) {	/* must	not happen!! */
+		pehci_info("stopped in_interrupt!\n");
+
+		return;
+	}
+
+	/*power	off our	root hub */
+	pehci_rh_control(usb_hcd, ClearPortFeature, USB_PORT_FEAT_POWER,
+			 1, NULL, 0);
+
+	/*let the roothub power	go off */
+	mdelay(20);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+	return;
+}
+
+
+/*submit urb , other than root hub*/
+static int
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+pehci_hcd_urb_enqueue(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep,
+		struct urb *urb, gfp_t mem_flags)
+#else
+pehci_hcd_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)
+#endif
+{
+
+	struct list_head qtd_list;
+	struct ehci_qh *qh = 0;
+	phci_hcd *pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	int status = 0;
+	int temp = 0, max = 0, num_tds = 0, mult = 0;
+	urb_priv_t *urb_priv = NULL;
+	unsigned long  flags;
+	
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	
+	if (unlikely(atomic_read(&urb->reject))) 
+		return -EINVAL;
+	
+	INIT_LIST_HEAD(&qtd_list);
+	urb->transfer_flags &= ~EHCI_STATE_UNLINK;
+
+
+	temp = usb_pipetype(urb->pipe);
+	max = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe));
+	
+
+	if (hcdpowerdown == 1) {
+		printk("Enqueue	hcd power down\n");
+		return -EINVAL;
+	}
+
+
+	/*pathch to get	the otg	device */
+	if (!hubdev || 
+		(urb->dev->parent==usb_hcd->self.root_hub && 
+		hubdev!=urb->dev)) {
+		if(urb->dev->parent== usb_hcd->self.root_hub) {
+			hubdev = urb->dev;
+		}
+	}
+
+	switch (temp) {
+	case PIPE_INTERRUPT:
+		/*only one td */
+		num_tds	= 1;
+		mult = 1 + ((max >> 11)	& 0x03);
+		max &= 0x07ff;
+		max *= mult;
+
+		if (urb->transfer_buffer_length	> max) {
+			err("interrupt urb length is greater then %d\n", max);
+			return -EINVAL;
+		}
+
+		if (hubdev && urb->dev->parent == usb_hcd->self.root_hub) {
+			huburb = urb;
+		}
+
+		break;
+
+	case PIPE_CONTROL:
+		/*calculate the	number of tds, follow 1	pattern	*/
+		if (No_Data_Phase && No_Status_Phase) {
+			printk("Only SetUP Phase\n");
+			num_tds	= (urb->transfer_buffer_length == 0) ? 1 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	1);
+		} else if (!No_Data_Phase && No_Status_Phase) {
+			printk("SetUP Phase and	Data Phase\n");
+			num_tds	= (urb->transfer_buffer_length == 0) ? 2 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	3);
+		} else if (!No_Data_Phase && !No_Status_Phase) {
+			num_tds	= (urb->transfer_buffer_length == 0) ? 2 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	3);
+		}
+		
+		break;
+		
+	case PIPE_BULK:
+		num_tds	=
+			(urb->transfer_buffer_length - 1) / HC_ATL_PL_SIZE + 1;
+		if ((urb->transfer_flags & URB_ZERO_PACKET)
+			&& !(urb->transfer_buffer_length % max)) {
+			num_tds++;
+		}
+		
+		break;
+		
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		/* Don't need to do anything here */
+		break;
+#endif
+	default:
+		return -EINVAL;	/*not supported	isoc transfers */
+
+
+	}
+
+#ifdef CONFIG_ISO_SUPPORT
+	if (temp != PIPE_ISOCHRONOUS) {
+#endif
+		/*make number of tds required */
+		urb_priv = kmalloc(sizeof(urb_priv_t) +
+				   num_tds * sizeof(struct ehci_qtd),
+				   mem_flags);
+		if (!urb_priv) {
+			err("memory   allocation error\n");
+			return -ENOMEM;
+		}
+
+		memset(urb_priv, 0, sizeof(urb_priv_t) +
+			num_tds * sizeof(struct ehci_qtd));
+		INIT_LIST_HEAD(&urb_priv->qtd_list);
+		urb_priv->qtd[0] = NULL;
+		urb_priv->length = num_tds;
+		{
+			int i =	0;
+			/*allocate number of tds here. better to do this in qtd_make routine */
+			for (i = 0; i <	num_tds; i++) {
+				urb_priv->qtd[i] =
+					phci_hcd_qtd_allocate(mem_flags);
+				if (!urb_priv->qtd[i]) {
+					phci_hcd_urb_free_priv(pehci_hcd,
+							       urb_priv, NULL);
+					return -ENOMEM;
+				}
+			}
+		}
+		/*keep a copy of this */
+		urb->hcpriv = urb_priv;
+#ifdef CONFIG_ISO_SUPPORT
+	}
+#endif
+
+	switch (temp) {
+	case PIPE_INTERRUPT:
+		phci_hcd_make_qtd(pehci_hcd, &urb_priv->qtd_list,	urb, &status);
+		if (status < 0)	{
+			return status;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qh = phci_hcd_submit_interrupt(pehci_hcd, ep, &urb_priv->qtd_list, urb,
+			&status);
+#else
+		qh = phci_hcd_submit_interrupt(pehci_hcd, &urb_priv->qtd_list, urb,
+			&status);
+#endif
+		if (status < 0)
+			return status;
+		break;
+
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+
+#ifdef THREAD_BASED
+	spin_lock_irqsave (&pehci_hcd->lock, flags);
+#endif
+		phci_hcd_make_qtd(pehci_hcd, &qtd_list,	urb, &status);
+		if (status < 0) {
+			return status;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qh = phci_hcd_submit_async(pehci_hcd, ep, &qtd_list, urb,
+			&status);
+#else
+		qh = phci_hcd_submit_async(pehci_hcd, &qtd_list, urb, &status);
+#endif
+
+#ifdef THREAD_BASED
+	spin_unlock_irqrestore (&pehci_hcd->lock, flags);
+#endif
+
+		if (status < 0) {
+			return status;
+		}
+		break;
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_urb_enqueue]: URB Transfer buffer: 0x%08x\n",
+			(long) urb->transfer_buffer);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_urb_enqueue]: URB Buffer Length: %d\n",
+			(long) urb->transfer_buffer_length);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		phcd_submit_iso(pehci_hcd, ep, urb, (unsigned long *) &status);
+#else
+		spin_lock_irqsave(&pehci_hcd->lock, flags);
+		phcd_store_urb_pending(pehci_hcd, 0, urb, (int *) &status);
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+#endif
+
+		return status;
+
+		break;
+#endif
+	default:
+		return -ENODEV;
+	}			/*end of switch	*/
+
+#if (defined MSEC_INT_BASED)
+	return 0;
+#elif (defined THREAD_BASED)
+{ //send
+		st_UsbIt_Msg_Struc *stUsbItMsgSnd ;
+		unsigned long flags;
+		spin_lock_irqsave(&pehci_hcd->lock,flags);
+	
+		//local_irq_save(flags); /*disable interrupt*/
+		stUsbItMsgSnd = (st_UsbIt_Msg_Struc *)kmalloc(sizeof(st_UsbIt_Msg_Struc), GFP_ATOMIC);
+		if (!stUsbItMsgSnd)
+		{
+			return -ENOMEM;
+		}
+		
+		memset(stUsbItMsgSnd, 0, sizeof(stUsbItMsgSnd));
+		
+		stUsbItMsgSnd->usb_hcd = usb_hcd;
+		stUsbItMsgSnd->uIntStatus = NO_SOF_REQ_IN_REQ;
+		spin_lock(&enqueue_lock);
+		if(list_empty(&g_enqueueMessList.list))
+			list_add_tail(&(stUsbItMsgSnd->list), &(g_enqueueMessList.list));
+		spin_unlock(&enqueue_lock);
+		
+		pehci_print("\n------------- send mess : %d------------\n",stUsbItMsgSnd->uIntStatus);
+
+		//local_irq_restore(flags); /*disable interrupt*/
+		
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		if ((g_stUsbItThreadHandler.phThreadTask != NULL) && (g_stUsbItThreadHandler.lThrdWakeUpNeeded == 0))
+		{
+			pehci_print("\n------- wake up thread : %d-----\n",stUsbItMsgSnd->uIntStatus);
+			g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+			wake_up(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+		}
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+
+		spin_unlock_irqrestore(&pehci_hcd->lock,flags);
+	}
+	pehci_entry("-- %s: Exit\n",__FUNCTION__);
+    return 0;
+#else
+	/*submit tds but iso */
+    if (temp != PIPE_ISOCHRONOUS)
+	pehci_hcd_td_ptd_submit_urb(pehci_hcd, qh, qh->type);
+#endif
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return 0;
+
+}
+
+/*---------------------------------------------*
+  io request handlers
+ *---------------------------------------------*/
+
+/*unlink urb*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static int
+pehci_hcd_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb)
+#else
+static int
+pehci_hcd_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	int status = 0;
+#endif
+	int retval = 0;
+	td_ptd_map_buff_t *td_ptd_buf;
+	td_ptd_map_t *td_ptd_map;
+	struct ehci_qh *qh = 0;
+	u32 skipmap = 0;
+	u32 buffstatus = 0;
+	unsigned long flags;
+	struct ehci_qtd	*qtd = 0;
+	struct usb_host_endpoint *ep;
+
+	struct ehci_qtd	*cancel_qtd = 0;	/*added	for stopping ptd*/
+	struct urb *cancel_urb = 0;	/*added	for stopping ptd*/
+	urb_priv_t *cancel_urb_priv = 0;	/* added for stopping ptd */
+	struct _isp1763_qha atlqha;
+	struct _isp1763_qha *qha;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u32 ormask = 0;
+	struct list_head *qtd_list = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	pehci_info("device %d\n", urb->dev->devnum);
+
+	if(urb_priv==NULL){
+		printk("*******urb_priv is NULL*******	%s: Entered\n",	__FUNCTION__);
+		return 0;
+		}
+	spin_lock_irqsave(&hcd->lock, flags);
+
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	//	status = 0;
+		qh = urb_priv->qh;
+		if(qh==NULL)
+			break;
+
+		td_ptd_buf = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_ATL];
+		td_ptd_map = &td_ptd_buf->map_list[qh->qtd_ptd_index];
+
+		/*if its already been removed */
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			break;
+		}
+/* patch added for stopping Full speed PTD */
+/* patch starts	ere */
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+
+			cancel_qtd = td_ptd_map->qtd;
+			if (!qh	|| !cancel_qtd)	{
+				err("Never Error:QH and	QTD must not be	zero\n");
+			} else {
+				cancel_urb = cancel_qtd->urb;
+				cancel_urb_priv	=
+					(urb_priv_t *) cancel_urb->hcpriv;
+				mem_addr = &cancel_qtd->mem_addr;
+				qha = &atlqha;
+				memset(qha, 0, sizeof(struct _isp1763_qha));
+
+				skipmap	=
+					isp1763_reg_read16(hcd->dev,
+							   hcd->regs.
+							   atltdskipmap,
+							   skipmap);
+				skipmap	|= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atltdskipmap,
+						    skipmap);
+
+				/*read this ptd	from the ram address,address is	in the
+				   td_ptd_map->ptd_header_addr */
+				isp1763_mem_read(hcd->dev,
+						 td_ptd_map->ptd_header_addr, 0,
+						 (u32 *) (qha),	PHCI_QHA_LENGTH,
+						 0);
+				if ((qha->td_info1 & QHA_VALID)
+					|| (qha->td_info4 &	QHA_ACTIVE)) {
+
+					qha->td_info2 |= 0x00008000;
+					qha->td_info1 |= QHA_VALID;
+					qha->td_info4 |= QHA_ACTIVE;
+					skipmap	&= ~td_ptd_map->ptd_bitmap;
+					ormask |= td_ptd_map->ptd_bitmap;
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.
+						atl_irq_mask_or,
+						ormask);
+					/* copy back into the header, payload is	already
+					 * present no need to write again */
+					isp1763_mem_write(hcd->dev,
+						td_ptd_map->
+						ptd_header_addr, 0,
+						(u32 *) (qha),
+						PHCI_QHA_LENGTH, 0);
+					/*unskip this td */
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.
+						atltdskipmap,
+						skipmap);
+					udelay(100);
+				}
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) (qha),	PHCI_QHA_LENGTH,
+					0);
+				if (!(qha->td_info1 & QHA_VALID)
+					&& !(qha->td_info4 & QHA_ACTIVE)) {
+					printk(KERN_NOTICE
+					"ptd has	been retired \n");
+				}
+
+			}
+		}
+
+/*   Patch Ends	*/
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+		/*tell atl worker this urb is going to be removed */
+		td_ptd_map->state = TD_PTD_REMOVE;
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+		/*tell atl worker this urb is going to be removed */
+		td_ptd_map->state = TD_PTD_REMOVE;
+		urb_priv->state	|= DELETE_URB;
+
+		/*read the skipmap, to see if this transfer has	to be rescheduled */
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+			skipmap);
+		pehci_check("remove skip map %x, ptd map %x\n",	skipmap,
+			td_ptd_map->ptd_bitmap);
+
+		buffstatus =
+			isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+			buffstatus);
+
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+			skipmap | td_ptd_map->ptd_bitmap);
+
+		while (!(skipmap & td_ptd_map->ptd_bitmap)) {
+			udelay(125);
+
+			skipmap	= isp1763_reg_read16(hcd->dev,
+				hcd->regs.atltdskipmap,
+				skipmap);
+		}
+
+		/* if all  transfers skipped,
+		 * then	disable	the atl	buffer,
+		 * so that new transfer	can come in
+		 * need	to see the side	effects
+		 * */
+		if (skipmap == NO_TRANSFER_ACTIVE) {
+			/*disable the buffer */
+			pehci_info("disable the	atl buffer\n");
+			buffstatus &= ~ATL_BUFFER;
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				buffstatus);
+		}
+
+		qtd_list = &qh->qtd_list;
+		/*this should remove all pending transfers */
+		pehci_check("num tds %d, urb length %d,device %d\n",
+			urb_priv->length, urb->transfer_buffer_length,
+			urb->dev->devnum);
+
+		pehci_check("remove first qtd address %p\n", urb_priv->qtd[0]);
+		pehci_check("length of the urb %d, completed %d\n",
+			urb->transfer_buffer_length, urb->actual_length);
+		qtd = urb_priv->qtd[urb_priv->length - 1];
+		pehci_check("qtd state is %x\n", qtd->state);
+
+
+		urb->status=status;
+		status = 0;
+#ifdef USBNET 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_delayed_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_delayed_complete(hcd, qh, urb, td_ptd_map);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+
+#endif
+		break;
+
+	case PIPE_INTERRUPT:
+		pehci_check("phci_1763_urb_dequeue: INTR needs to be done\n");
+		urb->status = status; //-ENOENT;//This will allow to suspend the system. in auto suspend mode
+		status = 0;
+		qh = urb_priv->qh;
+		if(qh==NULL)
+			break;
+
+		td_ptd_buf = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL];
+		td_ptd_map = &td_ptd_buf->map_list[qh->qtd_ptd_index];
+
+		/*urb is already been removed */
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			kfree(urb_priv);
+			break;
+		}
+
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+		td_ptd_map->state = TD_PTD_REMOVE;
+		urb_priv->state	|= DELETE_URB;
+
+		/*read the skipmap, to see if this transfer has	to be rescheduled */
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap | td_ptd_map->ptd_bitmap);
+		qtd_list = &qh->qtd_list;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+		break;
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		pehci_info("urb dequeue %x %x\n", urb,urb->pipe);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	if(urb->dev->speed==USB_SPEED_HIGH){
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb, status);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+
+
+		}
+	}
+#endif
+
+		
+		status = 0;
+		ep=urb->ep;
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		mdelay(100);
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						if (urb->hcpriv!= periodic_ep[0]){
+#else
+						if (urb->ep != periodic_ep[0]){
+#endif
+	if(!list_empty(&ep->urb_list)){	
+		while(!list_empty(&ep->urb_list)){
+			urb=container_of(ep->urb_list.next,struct urb,urb_list);
+			pehci_info("list is not empty %x %x\n",urb,urb->dev->state);
+			if(urb){
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb,0);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+		}
+			urb->status=-ESHUTDOWN;
+	#if LINUX_VERSION_CODE <KERNEL_VERSION(2,6,24)
+			usb_hcd_giveback_urb(usb_hcd,urb);
+	#else
+			usb_hcd_giveback_urb(usb_hcd,urb,urb->status);
+	#endif
+				
+			}
+		}
+		}else{
+	if(urb){
+		pehci_info("list empty %x\n",urb->dev->state);
+		phcd_clean_urb_pending(hcd, urb);
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb,0);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+		}
+			urb->status=-ESHUTDOWN;
+	#if LINUX_VERSION_CODE <KERNEL_VERSION(2,6,24)
+			usb_hcd_giveback_urb(usb_hcd,urb);
+	#else
+			usb_hcd_giveback_urb(usb_hcd,urb,urb->status);
+	#endif
+				
+			}
+			
+		}
+	}	
+#endif
+		return 0;
+		/*nothing to do	here, wait till	all transfers are done in iso worker */
+		break;
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	pehci_info("status %d\n", status);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return status;
+}
+
+/* bulk	qh holds the data toggle */
+
+static void
+pehci_hcd_endpoint_disable(struct usb_hcd *usb_hcd,
+			   struct usb_host_endpoint *ep)
+{
+	phci_hcd *ehci = usb_hcd_to_pehci_hcd(usb_hcd);
+	struct urb *urb;
+
+	unsigned long flags;
+	struct ehci_qh *qh;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/* ASSERT:  any	requests/urbs are being	unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+	
+#ifdef CONFIG_ISO_SUPPORT
+	mdelay(100);  //delay for ISO
+#endif
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	qh = ep->hcpriv;
+
+	if (!qh) {
+		goto done;
+	} else {
+#ifdef CONFIG_ISO_SUPPORT
+		pehci_info("disable endpoint %x %x\n", ep->desc.bEndpointAddress,qh->type);
+
+		
+		if (qh->type == TD_PTD_BUFF_TYPE_ISTL) {
+
+			/*wait for urb to get complete*/
+			pehci_info("disable %x \n", list_empty(&ep->urb_list));
+			while (!list_empty(&ep->urb_list)) {
+			
+				urb = container_of(ep->urb_list.next,
+					struct urb, urb_list);
+				if (urb) {
+					phcd_clean_urb_pending(ehci, urb);
+					spin_unlock_irqrestore(&ehci->lock,
+						flags);
+
+					urb->status = -ESHUTDOWN;
+					
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+					usb_hcd_giveback_urb(usb_hcd, urb);
+#else
+					usb_hcd_giveback_urb(usb_hcd, urb,
+						urb->status);
+#endif
+					spin_lock_irqsave(&ehci->lock, flags);
+
+				}
+
+			}
+		}
+#endif
+		/*i will complete whatever left	on this	endpoint */
+		pehci_complete_device_removal(ehci, qh);
+#ifdef CONFIG_ISO_SUPPORT
+		phcd_clean_periodic_ep();
+#endif
+		ep->hcpriv = NULL;
+
+		goto done;
+	}
+	done:
+
+	ep->hcpriv = NULL;
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	printk("disable endpoint exit\n");
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+}
+
+/*called by core, for current frame number*/
+static int
+pehci_hcd_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	u32 framenumber	= 0;
+	phci_hcd *pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	framenumber =
+		isp1763_reg_read16(pehci_hcd->dev, pehci_hcd->regs.frameindex,
+		framenumber);
+	return framenumber;
+}
+
+/*root hub status data,	called by root hub timer
+ *return 0, if no change, else
+ *	1, incase of high speed	device
+ */
+static int
+pehci_rh_status_data(struct usb_hcd *usb_hcd, char *buf)
+{
+
+	u32 temp = 0, status = 0;
+	u32 ports = 0, i, retval = 1;
+	unsigned long flags;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+
+	if (hcdpowerdown == 1)
+		return 0;
+
+	buf[0] = 0;
+	if(portchange==1){
+		printk("Remotewakeup-enumerate again \n");
+		buf[0] |= 2;
+		hcd->reset_done[0] = 0;
+		return 1;
+	}
+	/* init	status to no-changes */
+	buf[0] = 0;
+	/*number of ports */
+	ports =	0x1;
+	spin_lock_irqsave(&hcd->lock, flags);
+	/*read the port	status registers */
+	for (i = 0; i <	ports; i++) {
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[i],	temp);
+		if (temp & PORT_OWNER) {
+			/* dont	report the port	status change in case of CC HCD
+			 * but clear the port status , if there	is any*/
+			if (temp & PORT_CSC) {
+				temp &=	~PORT_CSC;
+				isp1763_reg_write32(hcd->dev,
+						    hcd->regs.ports[i],	temp);
+				continue;
+			}
+		}
+
+		if (!(temp & PORT_CONNECT)) {
+			hcd->reset_done[i] = 0;
+		}
+		if ((temp & (PORT_CSC |	PORT_PEC | PORT_OCC)) != 0) {
+			if (i <	7) {
+				buf[0] |= 1 << (i + 1);
+			} else {
+				buf[1] |= 1 << (i - 7);
+			}
+			status = STS_PCD;
+		}
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	return status ?	retval : 0;
+}
+
+/*root hub control requests*/
+static int
+pehci_rh_control(struct	usb_hcd	*usb_hcd, u16 typeReq, u16 wValue,
+		 u16 wIndex, char *buf,	u16 wLength)
+{
+	u32 ports = 0;
+	u32 temp = 0, status;
+	unsigned long flags;
+	int retval = 0;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+
+	ports =	0x11;
+
+	printk("%s: request %x,wValuse:0x%x, wIndex:0x%x \n",__func__, typeReq,wValue,wIndex);
+	
+	spin_lock_irqsave(&hcd->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue)	{
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		pehci_print("ClearPortFeature:0x%x\n", ClearPortFeature);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("ClearPortFeature not valid port number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		if (temp & PORT_OWNER) {
+			printk("port is	owned by the CC	host\n");
+			break;
+		}
+
+		switch (wValue)	{
+		case USB_PORT_FEAT_ENABLE:
+			pehci_print("enable the	port\n");
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp & ~PORT_PE);
+
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			printk("disable	the port\n");
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_PEC);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+		case USB_PORT_FEAT_C_SUSPEND:
+			printk("clear feature suspend  \n");
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (ports & 0x10) {	/*port has has power control switches */
+				isp1763_reg_write32(hcd->dev,
+						    hcd->regs.ports[wIndex],
+						    temp & ~PORT_POWER);
+			}
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			pehci_print("connect change, status is 0x%08x\n", temp);
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_CSC);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_OCC);
+			break;
+		default:
+			goto error;
+
+		}
+		break;
+
+	case GetHubDescriptor:
+		pehci_hub_descriptor(hcd, (struct usb_hub_descriptor *)	buf);
+		break;
+
+	case GetHubStatus:
+		pehci_print("GetHubStatus:0x%x\n", GetHubStatus);
+		/* no hub-wide feature/status flags */
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		pehci_print("GetPortStatus:0x%x\n", GetPortStatus);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("GetPortStatus,not valid port number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		status = 0;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		printk("root port status:0x%x\n", temp);
+		/*connect status chnage	*/
+		if (temp & PORT_CSC) {
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+			pehci_print("feature CSC 0x%08x	and status 0x%08x  \n",
+				    temp, status);
+		}
+		if(portchange){
+			portchange=0;
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+		}
+		/*port enable change */
+		if (temp & PORT_PEC) {
+			status |= 1 << USB_PORT_FEAT_C_ENABLE;
+			pehci_print("feature PEC  0x%08x and status 0x%08x  \n",
+				    temp, status);
+		}
+		/*port over-current */
+		if (temp & PORT_OCC) {
+			status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+			pehci_print("feature OCC 0x%08x	and status 0x%08x  \n",
+				    temp, status);
+		}
+
+		/* whoever resets must GetPortStatus to	complete it!! */
+		if ((temp & PORT_RESET)	&& jiffies > hcd->reset_done[wIndex]) {
+			status |= 1 << USB_PORT_FEAT_C_RESET;
+			pehci_print("feature reset 0x%08x and status 0x%08x\n",
+				temp, status);
+			printk(KERN_NOTICE
+				"feature	reset 0x%08x and status	0x%08x\n", temp,
+				status);
+			/* force reset to complete */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp & ~PORT_RESET);
+			do {
+				mdelay(20);
+				temp = isp1763_reg_read32(hcd->dev,
+							  hcd->regs.
+							  ports[wIndex], temp);
+			} while	(temp &	PORT_RESET);
+
+			/* see what we found out */
+			printk(KERN_NOTICE "after portreset: %x\n", temp);
+
+			temp = phci_check_reset_complete(hcd, wIndex, temp);
+			printk(KERN_NOTICE "after checkportreset: %x\n", temp);
+		}
+
+		/* don't show wPortStatus if it's owned	by a companion hc */
+
+		if (!(temp & PORT_OWNER)) {
+
+			if (temp & PORT_CONNECT) {
+				status |= 1 << USB_PORT_FEAT_CONNECTION;
+				status |= 1 << USB_PORT_FEAT_HIGHSPEED;
+			}
+			if (temp & PORT_PE) {
+				status |= 1 << USB_PORT_FEAT_ENABLE;
+			}
+			if (temp & PORT_SUSPEND) {
+				status |= 1 << USB_PORT_FEAT_SUSPEND;
+			}
+			if (temp & PORT_OC) {
+				status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+			}
+			if (temp & PORT_RESET) {
+				status |= 1 << USB_PORT_FEAT_RESET;
+			}
+			if (temp & PORT_POWER) {
+				status |= 1 << USB_PORT_FEAT_POWER;
+			}
+		}
+
+		/* This	alignment is good, caller used kmalloc() */
+		*((u32 *) buf) = cpu_to_le32(status);
+		break;
+
+	case SetHubFeature:
+		pehci_print("SetHubFeature:0x%x\n", SetHubFeature);
+		switch (wValue)	{
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		pehci_print("SetPortFeature:%x\n", SetPortFeature);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("SetPortFeature not valid port	number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		pehci_print("SetPortFeature:PortSc Val 0x%x\n",	temp);
+		if (temp & PORT_OWNER) {
+			break;
+		}
+		switch (wValue)	{
+		case USB_PORT_FEAT_ENABLE:
+			/*enable the port */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp | PORT_PE);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			
+			#if 0 /* Port suspend will be added in suspend function */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp | PORT_SUSPEND);
+			#endif
+			
+			break;
+		case USB_PORT_FEAT_POWER:
+			pehci_print("Set Port Power 0x%x and Ports %x\n",
+				USB_PORT_FEAT_POWER, ports);
+			if (ports & 0x10) {
+				printk(KERN_NOTICE
+					"PortSc Reg %x an Value %x\n",
+					hcd->regs.ports[wIndex],
+					(temp | PORT_POWER));
+
+				isp1763_reg_write32(hcd->dev,
+					hcd->regs.ports[wIndex],
+					temp | PORT_POWER);
+			}
+			break;
+		case USB_PORT_FEAT_RESET:
+			pehci_print("Set Port Reset 0x%x\n",
+				USB_PORT_FEAT_RESET);
+			if ((temp & (PORT_PE | PORT_CONNECT)) == PORT_CONNECT
+				&& PORT_USB11(temp)) {
+				printk("error:port %d low speed	--> companion\n", wIndex + 1);
+				temp |=	PORT_OWNER;
+			} else {
+				temp |=	PORT_RESET;
+				temp &=	~PORT_PE;
+
+				/*
+				 * caller must wait, then call GetPortStatus
+				 * usb 2.0 spec	says 50	ms resets on root
+				 */
+				hcd->reset_done[wIndex]	= jiffies
+					+ ((50 /* msec */  * HZ) / 1000);
+			}
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp);
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+		pehci_print("this request doesnt fit anywhere\n");
+	error:
+		/* "stall" on error */
+		pehci_info
+			("unhandled root hub request: typereq 0x%08x, wValue %d, wIndex	%d\n",
+			 typeReq, wValue, wIndex);
+		retval = -EPIPE;
+	}
+
+	pehci_info("rh_control:exit\n");
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	return retval;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver pehci_driver = {
+	.description = hcd_name,
+	.product_desc =	"ST-ERICSSON ISP1763",
+	.hcd_priv_size = sizeof(phci_hcd),
+#ifdef LINUX_2620
+	.irq = NULL,
+#else
+	.irq = pehci_hcd_irq,
+#endif
+	/*
+	 * generic hardware linkage
+	 */
+	.flags = HCD_USB2 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset = pehci_hcd_reset,
+	.start = pehci_hcd_start,
+	.bus_suspend = pehci_bus_suspend,
+	.bus_resume  = pehci_bus_resume,
+	.stop =	pehci_hcd_stop,
+	/*
+	 * managing i/o	requests and associated	device resources
+	 */
+	.urb_enqueue = pehci_hcd_urb_enqueue,
+	.urb_dequeue = pehci_hcd_urb_dequeue,
+	.endpoint_disable = pehci_hcd_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number = pehci_hcd_get_frame_number,
+
+	/*
+	 * root	hub support
+	 */
+	.hub_status_data = pehci_rh_status_data,
+	.hub_control = pehci_rh_control,
+};
+
+/*probe the PCI host*/
+
+#ifdef THREAD_BASED
+int pehci_hcd_process_irq_it_handle(struct usb_hcd* usb_hcd_)
+{
+	int istatus;
+	
+	struct usb_hcd 		*usb_hcd;
+	char					uIntStatus;
+	phci_hcd    *pehci_hcd;
+
+	struct list_head *pos, *lst_tmp;
+	st_UsbIt_Msg_Struc *mess;
+	unsigned long flags;
+	
+	g_stUsbItThreadHandler.phThreadTask = current;
+	siginitsetinv(&((g_stUsbItThreadHandler.phThreadTask)->blocked), sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));		
+	pehci_info("pehci_hcd_process_irq_it_thread ID : %d\n", g_stUsbItThreadHandler.phThreadTask->pid);
+	
+	while (1)
+	{
+		if (signal_pending(g_stUsbItThreadHandler.phThreadTask))
+		{
+	       	printk("thread handler:  Thread received signal\n");
+	       	break;
+		}
+
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		g_stUsbItThreadHandler.lThrdWakeUpNeeded = 0;
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+		
+		/* Wait until a signal arrives or we are woken up or timeout (5second)*/
+		istatus = wait_event_interruptible_timeout(g_stUsbItThreadHandler.ulThrdWaitQhead, (g_stUsbItThreadHandler.lThrdWakeUpNeeded== 1), msecs_to_jiffies(MSEC_INTERVAL_CHECKING));
+
+		local_irq_save(flags); /*disable interrupt*/
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+		//receive mess	
+		if (!list_empty(&g_messList.list)) //mess list not empty
+		{
+
+			list_for_each_safe(pos, lst_tmp, &(g_messList.list))
+			{
+				mess = list_entry(pos, st_UsbIt_Msg_Struc, list);
+
+				usb_hcd = mess->usb_hcd;
+				uIntStatus = mess->uIntStatus;
+				//pehci_print("-------------receive mess : %d------------\n",uIntStatus);
+				pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+				if((uIntStatus & NO_SOF_REQ_IN_TSK)  || (uIntStatus & NO_SOF_REQ_IN_ISR) || (uIntStatus & NO_SOF_REQ_IN_REQ))
+					pehci_interrupt_handler(pehci_hcd);
+				spin_lock(&g_stUsbItThreadHandler.lock);
+				list_del(pos);
+				kfree(mess);
+				spin_unlock(&g_stUsbItThreadHandler.lock);				
+			}
+		}
+		else if(!list_empty(&g_enqueueMessList.list))
+		{
+			mess = list_first_entry(&(g_enqueueMessList.list), st_UsbIt_Msg_Struc, list);
+			usb_hcd = mess->usb_hcd;
+			uIntStatus = mess->uIntStatus;
+
+			pehci_print("-------------receive mess : %d------------\n",uIntStatus);
+			pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+			if((uIntStatus & NO_SOF_REQ_IN_REQ))
+			{
+				pehci_interrupt_handler(pehci_hcd);
+			}	
+
+			{
+				spin_lock(&enqueue_lock);
+				list_del((g_enqueueMessList.list).next);
+				kfree(mess);
+				spin_unlock(&enqueue_lock);
+			}	
+		}
+		else if(istatus == 0) //timeout
+		{
+			pehci_hcd = NULL;
+			pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd_);
+			pehci_interrupt_handler(pehci_hcd);
+
+		}
+		local_irq_restore(flags);  /*enable interrupt*/
+	}
+
+	flush_signals(g_stUsbItThreadHandler.phThreadTask);
+	g_stUsbItThreadHandler.phThreadTask = NULL;
+	return 0;
+	
+}
+
+int pehci_hcd_process_irq_in_thread(struct usb_hcd* usb_hcd_)
+{
+	
+	//status = msgq_create("usb_it_queue", 10, sizeof(st_UsbIt_Msg_Struc), &uUsbIt_MsgQueId);
+	INIT_LIST_HEAD(&g_messList.list);
+	INIT_LIST_HEAD(&g_enqueueMessList.list);
+	spin_lock_init(&enqueue_lock);
+
+	memset(&g_stUsbItThreadHandler, 0, sizeof(st_UsbIt_Thread));
+	init_waitqueue_head(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+	g_stUsbItThreadHandler.lThrdWakeUpNeeded = 0;
+	spin_lock_init(&g_stUsbItThreadHandler.lock);
+	kernel_thread(pehci_hcd_process_irq_it_handle, usb_hcd_, 0);
+	
+    return 0;
+}
+#endif
+
+
+/*probe	the PCI	host*/
+int
+pehci_hcd_probe(struct isp1763_dev *isp1763_dev, isp1763_id * ids)
+{
+#ifdef NON_PCI
+    struct platform_device *dev = isp1763_dev->dev;
+#else /* PCI */
+	struct pci_dev *dev = isp1763_dev->pcidev;
+#endif
+	struct usb_hcd *usb_hcd;
+	phci_hcd *pehci_hcd;
+	int status = 0;
+
+#ifndef NON_PCI
+	u32 intcsr=0;
+#endif
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	if (usb_disabled()) {
+		return -ENODEV;
+	}
+
+	usb_hcd	= usb_create_hcd(&pehci_driver,&dev->dev, "ISP1763");
+
+	if (usb_hcd == NULL) {
+		status = -ENOMEM;
+		goto clean;
+	}
+
+	/* this	is our host */
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	pehci_hcd->dev = isp1763_dev;
+	pehci_hcd->iobase = (u8	*) isp1763_dev->baseaddress;
+	pehci_hcd->iolength = isp1763_dev->length;
+
+
+	/* lets	keep our host here */
+	isp1763_dev->driver_data = usb_hcd;
+#ifdef NON_PCI
+//Do nothing
+#else
+	/* Enable the interrupts from PLX to PCI */
+	/* CONFIGURE PCI/PLX interrupt */
+#ifdef DATABUS_WIDTH_16
+	wvalue1	= readw(pehci_hcd->plxiobase + 0x68);
+	wvalue2	= readw(pehci_hcd->plxiobase + 0x68 + 2);
+	intcsr |= wvalue2;
+	intcsr <<= 16;
+	intcsr |= wvalue1;
+	printk(KERN_NOTICE "Enable PCI Intr: %x	\n", intcsr);
+	intcsr |= 0x900;
+	writew((u16) intcsr, pehci_hcd->plxiobase + 0x68);
+	writew((u16) (intcsr >>	16), pehci_hcd->plxiobase + 0x68 + 2);
+#else
+	bvalue1	= readb(pehci_hcd->plxiobase + 0x68);
+	bvalue2	= readb(pehci_hcd->plxiobase + 0x68 + 1);
+	bvalue3	= readb(pehci_hcd->plxiobase + 0x68 + 2);
+	bvalue4	= readb(pehci_hcd->plxiobase + 0x68 + 3);
+	intcsr |= bvalue4;
+	intcsr <<= 8;
+	intcsr |= bvalue3;
+	intcsr <<= 8;
+	intcsr |= bvalue2;
+	intcsr <<= 8;
+	intcsr |= bvalue1;
+	writeb((u8) intcsr, pehci_hcd->plxiobase + 0x68);
+	writeb((u8) (intcsr >> 8), pehci_hcd->plxiobase	+ 0x68 + 1);
+	writeb((u8) (intcsr >> 16), pehci_hcd->plxiobase + 0x68	+ 2);
+	writeb((u8) (intcsr >> 24), pehci_hcd->plxiobase + 0x68	+ 3);
+#endif
+#endif
+
+	No_Data_Phase =	0;
+	No_Status_Phase	= 0;
+	usb_hcd->self.controller->dma_mask = 0;
+	usb_hcd->self.otg_port = 1;
+#if 0
+#ifndef THREAD_BASED 	
+	status = isp1763_request_irq(pehci_hcd_irq, isp1763_dev, usb_hcd);
+#endif
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	if (status == 0) {
+		status = usb_add_hcd(usb_hcd, isp1763_dev->irq, SA_SHIRQ);
+	}
+#else /* Linux 2.6.28*/
+	usb_hcd->self.uses_dma = 0;
+	if (status == 0){
+		status = usb_add_hcd(usb_hcd, isp1763_dev->irq,
+		IRQF_SHARED | IRQF_DISABLED | IRQF_TRIGGER_LOW);
+	}
+#endif
+
+#ifdef THREAD_BASED 	
+	g_pehci_hcd = pehci_hcd;
+#endif
+
+#ifdef USBNET 
+	// initialize clean up urb list
+	INIT_LIST_HEAD(&(pehci_hcd->cleanup_urb.urb_list));
+#endif
+	enable_irq_wake(isp1763_dev->irq);
+	wake_lock_init(&pehci_wake_lock, WAKE_LOCK_SUSPEND,
+						dev_name(&dev->dev));
+	wake_lock(&pehci_wake_lock);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	isp1763_hcd=isp1763_dev;
+	return status;
+
+	clean:
+	return status;
+
+}
+/*--------------------------------------------------------------*
+ *
+ *  Module details: pehci_hcd_powerup
+ *
+ *  This function powerdown the chip completely, which make chip works in minimal power
+ *
+ *  Input: struct isp1763_Dev *
+ *
+ *
+ *  
+ *
+ *  Called by: IOCTL function
+ *
+ *
+ --------------------------------------------------------------*/
+void 
+pehci_hcd_powerup(struct	isp1763_dev *dev)
+{
+	printk("%s\n", __FUNCTION__);
+	hcdpowerdown = 0;
+	dev->driver->probe(dev,dev->driver->id);
+
+	
+}
+void
+pehci_hcd_powerdown(struct	isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+
+	phci_hcd *hcd = NULL;
+	u32 temp;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	
+	printk("%s\n", __FUNCTION__);
+	hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+	printk("++ %s: Entered\n", __FUNCTION__);
+
+//	isp1763_free_irq(dev,usb_hcd);
+	usb_remove_hcd(usb_hcd);
+	dev->driver_data = NULL;
+	
+
+	temp = isp1763_reg_read16(dev, HC_INTENABLE_REG, temp); //0xD6
+	temp &= ~0x400;		/*disable otg interrupt*/
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, temp); //0xD6
+
+	isp1763_reg_write16(dev, HC_UNLOCK_DEVICE, 0xAA37);	/*unlock the device 0x7c*/
+	mdelay(1);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+
+
+	if ((temp & 0x1005) == 0x1005) {
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1000);
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1104);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1007);
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1005);
+
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	}
+	
+	printk("port status %x\n ", temp);
+	temp &= ~0x2;
+	temp &= ~0x40;		/*force port resume*/
+	temp |= 0x80;		/*suspend*/
+
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	printk("port status %x\n ", temp);
+	mdelay(200);
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp |= 0x2c;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+	mdelay(20);
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0); //0xc
+	temp = 0xc;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, 0xffff0800);
+
+	wake_unlock(&pehci_wake_lock);
+	wake_lock_destroy(&pehci_wake_lock);
+
+	hcdpowerdown = 1;
+	
+}
+
+static int pehci_bus_suspend(struct usb_hcd *usb_hcd)
+{
+	u32 temp=0;
+	unsigned long flags;
+	phci_hcd *pehci_hcd = NULL;
+	struct isp1763_dev *dev = NULL;
+
+	
+	if (!usb_hcd) {
+		return -EBUSY;
+	}
+	
+	printk("++ %s \n",__FUNCTION__);
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	dev = pehci_hcd->dev;
+	
+	spin_lock_irqsave(&pehci_hcd->lock, flags);
+	if(hcdpowerdown){
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+		return 0;
+	}
+
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG, 0x4); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x4); //0x94
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, INTR_ENABLE_MASK); //0xd4
+
+	temp=isp1763_reg_read16(dev, HC_INTERRUPT_REG, 0); //0xd4
+
+	isp1763_reg_write16(dev,HC_INTENABLE_REG,INTR_ENABLE_MASK);
+	temp=isp1763_reg_read16(dev,HC_INTENABLE_REG,0);
+
+	hcdpowerdown = 1;
+	
+	/* stop the controller first */
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	/* suspend root port which will suspend host controller of the ISP1763A */
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp |= (PORT_SUSPEND);//0x80
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	
+	/* suspend device controller of the ISP1763a*/
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);
+	temp |= 0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);
+	mdelay(1); // make sure there will not be huge delay here max is 1 ms
+	temp &= ~0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);
+	/* put host controoler into low power mode */
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_SUSPEND_VALUE);
+
+//	usb_hcd->state = HC_STATE_SUSPENDED;
+
+	spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+
+	printk("-- %s \n",__FUNCTION__);
+
+	wake_unlock(&pehci_wake_lock);
+
+	return 0;
+	
+
+}
+
+static int pehci_bus_resume(struct usb_hcd *usb_hcd)
+{
+	u32 temp,i;
+	phci_hcd *pehci_hcd = NULL;
+	struct isp1763_dev *dev = NULL;
+	unsigned long flags;
+	u32 portsc1;
+
+	printk("%s Enter \n",__func__);
+
+	if (!usb_hcd) {
+		return -EBUSY;
+	}
+
+	if(hcdpowerdown ==0){
+		printk("%s already executed\n ",__func__);
+		return 0;
+	}
+
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	dev = pehci_hcd->dev;
+	spin_lock_irqsave(&pehci_hcd->lock, flags);
+
+	for (temp = 0; temp < 100; temp++)
+	{
+		i = isp1763_reg_read32(dev, HC_CHIP_ID_REG, 0);
+		if(i==0x176320)
+			break;
+		mdelay(2);
+	}
+	printk("temp=%d, chipid:0x%x \n",temp,i);
+	mdelay(10);
+	isp1763_reg_write16(dev, HC_UNLOCK_DEVICE, 0xAA37);	/*unlock the device 0x7c*/
+	i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+	printk("POWER DOWN CTRL REG value during suspend =0x%x\n", i);
+	for (temp = 0; temp < 100; temp++) {
+		mdelay(1);
+		isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_NORMAL_VALUE);
+		mdelay(1);
+		i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+		if(i==POWER_DOWN_CTRL_NORMAL_VALUE)
+			break;
+	}
+	if (temp == 100) {
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+		pr_err("%s:isp1763a failed to resume\n", __func__);
+		return -1;
+	}
+
+	wake_lock(&pehci_wake_lock);
+
+	printk("%s: Powerdown Reg Val: 0x%08x -- %d\n", __func__, i, temp);
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG,0x0); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x0); //0x94
+	isp1763_reg_write16(dev, HC_INTENABLE_REG,0); //0xD6
+
+	portsc1 = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("%s PORTSC1: 0x%x\n", __func__, portsc1);
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp |= 0x01;		/* Start the controller */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+	mdelay(10);
+
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	if (temp & PORT_SUSPEND)
+		pr_err("%s: HC_PORTSC1_REG: 0x%08x\n", __func__, temp);
+	temp |= PORT_SUSPEND;    //0x80;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	mdelay(50);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp |= PORT_RESUME;     //0x40;
+	temp &= ~(PORT_SUSPEND); //0x80;		/*suspend*/
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp &= ~(PORT_RESUME);  //0x40;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+
+	temp = INTR_ENABLE_MASK;
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, temp); //0xD6
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("%s resume port status: 0x%x\n", __func__, temp);
+	if(!(temp & 0x4)){ //port is disabled
+		isp1763_reg_write16(dev, HC_INTENABLE_REG, 0x1005); //0xD6
+		mdelay(10);
+	}
+//	phci_resume_wakeup(dev);
+
+	hcdpowerdown = 0;
+	if(hubdev){
+		hubdev->hcd_priv    = NULL;
+		hubdev->hcd_suspend = NULL;
+	}
+
+	spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+	printk("%s Leave\n",__func__);
+
+	return 0;
+}
+
+void
+pehci_hcd_resume(struct	isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+	u32 temp,i;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+
+	if(hcdpowerdown ==0){
+		return ;
+	}
+
+	printk("%s \n",__FUNCTION__);
+
+	for (temp = 0; temp < 10; temp++)
+	{
+	i = isp1763_reg_read32(dev, HC_CHIP_ID_REG, 0);
+	printk("temp=%d, chipid:0x%x \n",temp,i);
+	if(i==0x176320)
+	break;
+	mdelay(1);
+	}
+
+	/* Start the controller */
+	temp = 0x01;		
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	/* update power down control reg value */
+	for (temp = 0; temp < 100; temp++) {
+		isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_NORMAL_VALUE);
+		i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+		if(i==POWER_DOWN_CTRL_NORMAL_VALUE)
+		break;
+	}
+	
+	if (temp == 100) {
+		pr_err("%s:isp1763a failed to resume\n", __func__);
+		return;
+	}
+
+	wake_lock(&pehci_wake_lock);
+
+	isp1763_reg_write16(dev, HC_INTENABLE_REG,0); //0xD6
+	isp1763_reg_write32(dev,HC_INTERRUPT_REG_EHCI,0x4); //0x94 
+	isp1763_reg_write32(dev,HC_INTERRUPT_REG,0xFFFF); //0x94 
+	/* clear suspend bit and resume bit */	
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp &= ~(PORT_SUSPEND); //0x80;		/*suspend*/
+	temp &= ~(PORT_RESUME);  // 0x40;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, INTR_ENABLE_MASK); //0xD6
+	/*this is just make sure port is resumed back */	
+	mdelay(1);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("after hcd resume :port status %x\n ", temp);
+	
+	hcdpowerdown = 0;	
+
+	phci_resume_wakeup(dev);
+
+	if(hubdev){
+		hubdev->hcd_priv=NULL;
+		hubdev->hcd_suspend=NULL;
+	}
+//	usb_hcd->state = HC_STATE_RUNNING;
+
+}
+
+
+void
+pehci_hcd_suspend(struct isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+	u32 temp;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	printk("%s \n",__FUNCTION__);
+	if(hcdpowerdown){
+		return ;
+	}
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG, 0x4); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x4); //0x94
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, INTR_ENABLE_MASK); //0xd4
+	
+	temp=isp1763_reg_read16(dev, HC_INTERRUPT_REG, 0); //0xd4
+
+	printk("suspend :Interrupt Status %x\n",temp);
+	isp1763_reg_write16(dev,HC_INTENABLE_REG,INTR_ENABLE_MASK);
+	temp=isp1763_reg_read16(dev,HC_INTENABLE_REG,0);
+	printk("suspend :Interrupt Enable %x\n",temp);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	
+	printk("suspend :port status %x\n ", temp);
+	temp &= ~0x2;
+	temp &= ~0x40;		/*force port resume*/
+	temp |= 0x80;		/*suspend*/
+//	temp |= 0x700000;	/*WKCNNT_E,WKDSCNNT_E,WKOC_E*/
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+  //  mdelay(10);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("suspend :port status %x\n ", temp);
+	hcdpowerdown = 1;
+
+
+	temp = isp1763_reg_read16(dev,HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp&=0xff7b;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp |= 0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);//0xc
+	mdelay(2);
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);//0xc
+	temp &= 0xffdf;
+	temp &= ~0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);//0xc
+
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, 0xffff0830);
+
+	wake_unlock(&pehci_wake_lock);
+	
+}
+
+void 
+pehci_hcd_remotewakeup(struct isp1763_dev *dev){
+	if(hubdev){
+		hubdev->hcd_priv=dev;
+		hubdev->hcd_suspend=(void *)pehci_hcd_suspend;
+		}
+	phci_remotewakeup(dev);
+}
+
+/*remove the host controller*/
+static void
+pehci_hcd_remove(struct	isp1763_dev *isp1763_dev)
+{
+
+	struct usb_hcd *usb_hcd;
+	
+#ifdef NON_PCI
+#else	/* PCI */
+//	struct pci_dev *dev = isp1763_dev->pcidev;
+#endif
+
+	phci_hcd *hcd =	NULL;
+	u32 temp;
+	usb_hcd	= (struct usb_hcd *) isp1763_dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	hcd=usb_hcd_to_pehci_hcd(usb_hcd);
+	isp1763_reg_write32(hcd->dev,hcd->regs.hwmodecontrol,0);
+	isp1763_reg_write32(hcd->dev,hcd->regs.interruptenable,0);
+	hubdev=0;
+	huburb=0;
+	temp = isp1763_reg_read16(hcd->dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(hcd->dev, HC_USBCMD_REG, temp);
+//	isp1763_free_irq(isp1763_dev,usb_hcd);
+	usb_remove_hcd(usb_hcd);
+
+	wake_unlock(&pehci_wake_lock);
+	wake_lock_destroy(&pehci_wake_lock);
+
+	return ;
+}
+
+
+static isp1763_id ids =	{
+	.idVendor = 0x04CC,	/*st ericsson isp1763 vendor_id	*/
+	.idProduct = 0x1A64,	/*st ericsson isp1763 product_id */
+	.driver_info = (unsigned long) &pehci_driver,
+};
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct isp1763_driver pehci_hcd_pci_driver = {
+	.name =	(char *) hcd_name,
+	.index = 0,
+	.id = &ids,
+	.probe = pehci_hcd_probe,
+	.remove	= pehci_hcd_remove,
+	.suspend = pehci_hcd_suspend,
+	.resume	= pehci_hcd_resume,
+	.remotewakeup=pehci_hcd_remotewakeup,
+	.powerup	=	pehci_hcd_powerup,
+	.powerdown	=	pehci_hcd_powerdown,
+};
+
+#ifdef HCD_PACKAGE
+int
+usb_hcddev_open(struct inode *inode, struct file *fp)
+{
+
+	return 0;
+}
+
+int
+usb_hcddev_close(struct inode *inode, struct file *fp)
+{
+
+	return 0;
+}
+
+int
+usb_hcddev_fasync(int fd, struct file *fp, int mode)
+{
+
+	return fasync_helper(fd, fp, mode, &fasync_q);
+}
+
+long
+usb_hcddev_ioctl(struct file *fp,
+		 unsigned int cmd, unsigned long arg)
+{
+
+	switch (cmd) {
+	case HCD_IOC_POWERDOWN:	/* SET HCD DEEP SUSPEND MODE */
+		printk("HCD IOC POWERDOWN MODE\n");
+		if(isp1763_hcd->driver->powerdown)
+			isp1763_hcd->driver->powerdown(isp1763_hcd);
+
+		break;
+
+	case HCD_IOC_POWERUP:	/* Set HCD POWER UP */
+		printk("HCD IOC POWERUP MODE\n");
+		if(isp1763_hcd->driver->powerup)
+			isp1763_hcd->driver->powerup(isp1763_hcd);
+
+		break;
+	case HCD_IOC_TESTSE0_NACK:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_SE0_NAK;
+		break;
+	case   HCD_IOC_TEST_J:		
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_J;
+		break;
+	case    HCD_IOC_TEST_K:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_K;
+		break;
+		
+	case   HCD_IOC_TEST_TESTPACKET:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_PACKET;
+		break;
+	case HCD_IOC_TEST_FORCE_ENABLE:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_FORCE_ENABLE;
+		break;
+	case	HCD_IOC_TEST_SUSPEND_RESUME:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_HS_HOST_PORT_SUSPEND_RESUME;
+		break;
+	case HCD_IOC_TEST_SINGLE_STEP_GET_DEV_DESC:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_SINGLE_STEP_GET_DEV_DESC;		
+		break;
+	case HCD_IOC_TEST_SINGLE_STEP_SET_FEATURE:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_SINGLE_STEP_SET_FEATURE;		
+		break;
+	case HCD_IOC_TEST_STOP:
+		HostComplianceTest = 0;
+		HostTest = 0;		
+		break;
+	case     HCD_IOC_SUSPEND_BUS:
+		printk("isp1763:SUSPEND bus\n");
+		if(isp1763_hcd->driver->suspend)
+			isp1763_hcd->driver->suspend(isp1763_hcd);
+		break;
+	case	HCD_IOC_RESUME_BUS:
+		printk("isp1763:RESUME bus\n");
+		if(isp1763_hcd->driver->resume)
+			isp1763_hcd->driver->resume(isp1763_hcd);		
+		break;
+	case     HCD_IOC_REMOTEWAKEUP_BUS:
+		printk("isp1763:SUSPEND bus\n");
+		if(isp1763_hcd->driver->remotewakeup)
+			isp1763_hcd->driver->remotewakeup(isp1763_hcd);
+		break;		
+	default:
+
+		break;
+
+	}
+	return 0;
+}
+
+
+/* HCD file operations */
+static struct file_operations usb_hcddev_fops = {
+	owner:THIS_MODULE,
+	read:NULL,
+	write:NULL,
+	poll:NULL,
+	unlocked_ioctl:usb_hcddev_ioctl,
+	open:usb_hcddev_open,
+	release:usb_hcddev_close,
+	fasync:usb_hcddev_fasync,
+};
+
+#endif
+
+
+static int __init
+pehci_module_init(void)
+{
+	int result = 0;
+	phci_hcd_mem_init();
+
+	/*register driver */
+	result = isp1763_register_driver(&pehci_hcd_pci_driver);
+	if (!result) {
+		info("Host Driver has been Registered");
+	} else {
+		err("Host Driver has not been Registered with errors : %x",
+			result);
+	}
+
+#ifdef THREAD_BASED 	
+	pehci_hcd_process_irq_in_thread(&(g_pehci_hcd->usb_hcd));
+   	printk("kernel_thread() Enter\n"); 
+#endif
+	
+#ifdef HCD_PACKAGE
+	printk("Register Char Driver for HCD\n");
+	result = register_chrdev(USB_HCD_MAJOR, USB_HCD_MODULE_NAME,
+		&usb_hcddev_fops);
+	
+#endif
+	return result;
+
+}
+
+static void __exit
+pehci_module_cleanup(void)
+{
+#ifdef THREAD_BASED	
+	printk("module exit:  Sending signal to stop thread\n");
+	if (g_stUsbItThreadHandler.phThreadTask != NULL)
+	{
+		send_sig(SIGKILL, g_stUsbItThreadHandler.phThreadTask, 1);
+		mdelay(6);
+	}
+#endif
+
+#ifdef HCD_PACKAGE
+	unregister_chrdev(USB_HCD_MAJOR, USB_HCD_MODULE_NAME);
+#endif
+	isp1763_unregister_driver(&pehci_hcd_pci_driver);
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+module_init(pehci_module_init);
+module_exit(pehci_module_cleanup);
diff --git a/drivers/usb/host/pehci/host/pehci.h b/drivers/usb/host/pehci/host/pehci.h
new file mode 100644
index 0000000..cc6a06b
--- /dev/null
+++ b/drivers/usb/host/pehci/host/pehci.h
@@ -0,0 +1,752 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* Refer to file ~/drivers/usb/host/ehci-dbg.h for copyright owners (kernel version 2.6.9)
+* Code is modified for ST-Ericsson product 
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	__PEHCI_H__
+#define	__PEHCI_H__
+
+
+#define	DRIVER_AUTHOR	"ST-ERICSSON	  "
+#define	DRIVER_DESC "ISP1763 'Enhanced'	Host Controller	(EHCI) Driver"
+
+/*    bus related stuff	*/
+#define	__ACTIVE		0x01
+#define	__SLEEPY		0x02
+#define	__SUSPEND		0x04
+#define	__TRANSIENT		0x80
+
+#define	USB_STATE_HALT		0
+#define	USB_STATE_RUNNING	(__ACTIVE)
+#define	USB_STATE_READY		(__ACTIVE|__SLEEPY)
+#define	USB_STATE_QUIESCING	(__SUSPEND|__TRANSIENT|__ACTIVE)
+#define	USB_STATE_RESUMING	(__SUSPEND|__TRANSIENT)
+#define	USB_STATE_SUSPENDED	(__SUSPEND)
+
+/* System flags	 */
+#define	HCD_MEMORY		0x0001
+#define	HCD_USB2		0x0020
+#define	HCD_USB11		0x0010
+
+#define	HCD_IS_RUNNING(state) ((state) & __ACTIVE)
+#define	HCD_IS_SUSPENDED(state)	((state) & __SUSPEND)
+
+
+/*---------------------------------------------------
+ *    Host controller related
+ -----------------------------------------------------*/
+/* IRQ line for	the ISP1763 */
+#define	HCD_IRQ			IRQ_GPIO(25)
+#define	CMD_RESET		(1<<1)	/* reset HC not	bus */
+#define	CMD_RUN			(1<<0)	/* start/stop HC */
+#define	STS_PCD			(1<<2)	/* port	change detect */
+/* NOTE:  urb->transfer_flags expected to not use this bit !!! */
+#define	EHCI_STATE_UNLINK	0x8000	/* urb being unlinked */
+
+/*  Bits definations for qha*/
+/* Bits	PID*/
+#define	SETUP_PID		(2)
+#define	OUT_PID			(0)
+#define	IN_PID			(1)
+
+/* Bits	MULTI*/
+#define	MULTI(x)		((x)<< 29)
+#define	XFER_PER_UFRAME(x)	(((x) >> 29) & 0x3)
+
+/*Active, EP type and speed bits */
+#define	QHA_VALID		(1<<0)
+#define	QHA_ACTIVE		(1<<31)
+
+/*1763 error bit maps*/
+#define	HC_MSOF_INT		(1<< 0)
+#define	HC_MSEC_INT		(1 << 1)
+#define	HC_EOT_INT		(1 << 3)
+#define     HC_OPR_REG_INT	(1<<4)
+#define     HC_CLK_RDY_INT	(1<<6)
+#define	HC_INTL_INT		(1 << 7)
+#define	HC_ATL_INT		(1 << 8)
+#define	HC_ISO_INT		(1 << 9)
+#define	HC_OTG_INT		(1 << 10)
+
+/*PTD error codes*/
+#define	PTD_STATUS_HALTED	(1 << 30)
+#define	PTD_XACT_ERROR		(1 << 28)
+#define	PTD_BABBLE		(1 << 29)
+#define PTD_ERROR		(PTD_STATUS_HALTED | PTD_XACT_ERROR | PTD_BABBLE)
+/*ep types*/
+#define	EPTYPE_BULK		(2 << 12)
+#define	EPTYPE_CONTROL		(0 << 12)
+#define	EPTYPE_INT		(3 << 12)
+#define	EPTYPE_ISO		(1 << 12)
+
+#define	PHCI_QHA_LENGTH		32
+
+#define usb_inc_dev_use		usb_get_dev
+#define usb_dec_dev_use		usb_put_dev
+#define usb_free_dev		usb_put_dev
+/*1763 host controller periodic	size*/
+#define PTD_PERIODIC_SIZE	16
+#define MAX_PERIODIC_SIZE	16
+#define PTD_FRAME_MASK		0x1f
+/*periodic list*/
+struct _periodic_list {
+	int framenumber;
+	struct list_head sitd_itd_head;
+	char high_speed;	/*1 - HS ; 0 - FS*/
+	u16 ptdlocation;
+};
+typedef	struct _periodic_list periodic_list;
+
+
+/*iso ptd*/
+struct _isp1763_isoptd {
+	u32 td_info1;
+	u32 td_info2;
+	u32 td_info3;
+	u32 td_info4;
+	u32 td_info5;
+	u32 td_info6;
+	u32 td_info7;
+	u32 td_info8;
+} __attribute__	((aligned(32)));
+
+typedef	struct _isp1763_isoptd isp1763_isoptd;
+
+struct _isp1763_qhint {
+	u32 td_info1;
+	u32 td_info2;
+	u32 td_info3;
+	u32 td_info4;
+	u32 td_info5;
+#define	INT_UNDERRUN (1	<< 2)
+#define	INT_BABBLE    (1 << 1)
+#define	INT_EXACT     (1 << 0)
+	u32 td_info6;
+	u32 td_info7;
+	u32 td_info8;
+} __attribute__	((aligned(32)));
+
+typedef	struct _isp1763_qhint isp1763_qhint;
+
+
+struct _isp1763_qha {
+	u32 td_info1;		/* First 32 bit	*/
+	u32 td_info2;		/* Second 32 bit */
+	u32 td_info3;		/* third 32 bit	*/
+	u32 td_info4;		/* fourth 32 bit */
+	u32 reserved[4];
+};
+typedef	struct _isp1763_qha isp1763_qha, *pisp1763_qha;
+
+
+
+
+/*this does not	cover all interrupts in	1763 chip*/
+typedef	struct _ehci_regs {
+
+	/*standard ehci	registers */
+	u32 command;
+	u32 usbinterrupt;
+	u32 usbstatus;
+	u32 hcsparams;
+	u32 frameindex;
+
+	/*isp1763 interrupt specific registers */
+	u16 hwmodecontrol;
+	u16 interrupt;
+	u16 interruptenable;
+	u32 interruptthreshold;
+	u16 iso_irq_mask_or;
+	u16 int_irq_mask_or;
+	u16 atl_irq_mask_or;
+	u16 iso_irq_mask_and;
+	u16 int_irq_mask_and;
+	u16 atl_irq_mask_and;
+	u16 buffer_status;
+
+	/*isp1763 initialization registers */
+	u32 reset;
+	u32 configflag;
+	u32 ports[4];
+	u32 pwrdwn_ctrl;
+
+	/*isp1763 transfer specific registers */
+	u16 isotddonemap;
+	u16 inttddonemap;
+	u16 atltddonemap;
+	u16 isotdskipmap;
+	u16 inttdskipmap;
+	u16 atltdskipmap;
+	u16 isotdlastmap;
+	u16 inttdlastmap;
+	u16 atltdlastmap;
+	u16 scratch;
+
+} ehci_regs, *pehci_regs;
+
+/*memory management structures*/
+#define MEM_KV
+#ifdef MEM_KV
+typedef struct isp1763_mem_addr {
+	u32 phy_addr;		/* Physical address of the memory */
+	u32 virt_addr;		/* after ioremap() function call */
+	u8 num_alloc;		/* In case n*smaller size is allocated then for clearing purpose */
+	u32 blk_size;		/*block size */
+	u8 blk_num;		/* number of the block */
+	u8 used;		/*used/free */
+} isp1763_mem_addr_t;
+#else
+typedef struct isp1763_mem_addr {
+	void *phy_addr;		/* Physical address of the memory */
+	void *virt_addr;	/* after ioremap() function call */
+	u8 usage;
+	u32 blk_size;		/*block size */
+} isp1763_mem_addr_t;
+
+#endif
+/* type	tag from {qh,itd,sitd,fstn}->hw_next */
+#define	Q_NEXT_TYPE(dma) ((dma)	& __constant_cpu_to_le32 (3 << 1))
+
+/* values for that type	tag */
+#define	Q_TYPE_ITD	__constant_cpu_to_le32 (0 << 1)
+#define	Q_TYPE_QH	__constant_cpu_to_le32 (1 << 1)
+#define	Q_TYPE_SITD	__constant_cpu_to_le32 (2 << 1)
+#define	Q_TYPE_FSTN	__constant_cpu_to_le32 (3 << 1)
+
+/*next queuehead in execution*/
+#define	QH_NEXT(dma)	cpu_to_le32((u32)dma)
+
+struct ehci_qh {
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.6.1 */
+	u32 hw_info1;		/* see EHCI 3.6.2 */
+
+	u32 hw_info2;		/* see EHCI 3.6.2 */
+	u32 hw_current;		/* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct ehci_qtd) */
+	u32 hw_qtd_next;
+	u32 hw_alt_next;
+	u32 hw_token;
+	u32 hw_buf[5];
+	u32 hw_buf_hi[5];
+	
+	/* the rest is HCD-private */
+	dma_addr_t qh_dma;	/* address of qh */
+	struct list_head qtd_list;	/* sw qtd list */
+	struct ehci_qtd	*dummy;
+	struct ehci_qh *reclaim;	/* next	to reclaim */
+
+	atomic_t refcount;
+	wait_queue_head_t waitforcomplete;
+	unsigned stamp;
+
+	u8 qh_state;
+
+	/* periodic schedule info */
+	u8 usecs;		/* intr	bandwidth */
+	u8 gap_uf;		/* uframes split/csplit	gap */
+	u8 c_usecs;		/* ... split completion	bw */
+	unsigned short period;	/* polling interval */
+	unsigned short start;	/* where polling starts	*/
+	u8 datatoggle;		/*data toggle */
+
+	/*handling the ping stuffs */
+	u8 ping;		/*ping bit */
+
+	/*qtd <-> ptd management */
+
+	u32 qtd_ptd_index;	/* Td-PTD map index for	this ptd */
+	u32 type;		/* endpoint type */
+
+	/*iso stuffs */
+	struct usb_host_endpoint *ep;
+	int next_uframe;	/*next uframe for this endpoint	*/
+	struct list_head itd_list;	/*list of tds to this endpoint */
+	isp1763_mem_addr_t memory_addr;
+	struct _periodic_list periodic_list;
+	/*scheduling requirements for this endpoint */
+	u32 ssplit;
+	u32 csplit;
+	u8 totalptds;   // total number of PTDs needed for current URB
+	u8 actualptds;	// scheduled PTDs until now for current URB
+};
+
+/* urb private part for	the driver. */
+typedef	struct {
+	struct ehci_qh *qh;
+	u16 length;		/* number of tds associated with this request */
+	u16 td_cnt;		/* number of tds already serviced */
+	int state;		/* State machine state when URB	is deleted  */
+	int timeout;		/* timeout for bulk transfers */
+	wait_queue_head_t wait;	/* wait	State machine state when URB is	deleted	*/
+	/*FIX solve the	full speed dying */
+	struct timer_list urb_timer;
+	struct list_head qtd_list;
+	struct ehci_qtd	*qtd[0];	/* list	pointer	to all corresponding TDs associated with this request */
+
+} urb_priv_t;
+
+/*
+ * EHCI	Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt	endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear	in both	the async and (for interrupt) periodic schedules.
+ */
+
+
+/*Defination required for the ehci Queuehead */
+#define	QH_HEAD			0x00008000
+#define	QH_STATE_LINKED		1	/* HC sees this	*/
+#define	QH_STATE_UNLINK		2	/* HC may still	see this */
+#define	QH_STATE_IDLE		3	/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4	/* LINKED and on reclaim q */
+#define	QH_STATE_COMPLETING	5	/* don't touch token.HALT */
+#define	QH_STATE_TAKE_NEXT	8	/*take the new transfer	from */
+#define	NO_FRAME ((unsigned short)~0)	/* pick	new start */
+
+
+#define EHCI_ITD_TRANLENGTH	0x0fff0000	/*transaction length */
+#define EHCI_ITD_PG		0x00007000	/*page select */
+#define EHCI_ITD_TRANOFFSET	0x00000fff	/*transaction offset */
+#define EHCI_ITD_BUFFPTR	0xfffff000	/*buffer pointer */
+
+struct ehci_sitd {
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.3.1 */
+	u32 hw_transaction[8];	/* see EHCI 3.3.2 */
+#define EHCI_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
+#define EHCI_ISOC_BUF_ERR	(1<<30)	/* Data buffer error */
+#define EHCI_ISOC_BABBLE	(1<<29)	/* babble detected */
+#define EHCI_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
+
+#define EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define EHCI_ITD_IOC		(1 << 15)	/* interrupt on complete */
+
+	u32 hw_bufp[7];		/* see EHCI 3.3.3 */
+	u32 hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t sitd_dma;	/* for this itd */
+	struct urb *urb;
+	struct list_head sitd_list;	/* list of urb frames' itds */
+	dma_addr_t buf_dma;	/* frame's buffer address */
+
+	/* for now, only one hw_transaction per itd */
+	u32 transaction;
+	u16 index;		/* in urb->iso_frame_desc */
+	u16 uframe;		/* in periodic schedule */
+	u16 usecs;
+	/*memory address */
+	struct isp1763_mem_addr mem_addr;
+	int length;
+	u32 framenumber;
+	u32 ptdframe;
+	int sitd_index;
+	/*scheduling fields */
+	u32 ssplit;
+	u32 csplit;
+	u32 start_frame;
+};
+
+struct ehci_itd	{
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.3.1 */
+	u32 hw_transaction[8];	/* see EHCI 3.3.2 */
+#define	EHCI_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
+#define	EHCI_ISOC_BUF_ERR	(1<<30)	/* Data	buffer error */
+#define	EHCI_ISOC_BABBLE	(1<<29)	/* babble detected */
+#define	EHCI_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
+
+#define	EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	EHCI_ITD_IOC		(1 << 15)	/* interrupt on	complete */
+
+	u32 hw_bufp[7];		/* see EHCI 3.3.3 */
+	u32 hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t itd_dma;	/* for this itd	*/
+	struct urb *urb;
+	struct list_head itd_list;	/* list	of urb frames' itds */
+	dma_addr_t buf_dma;	/* frame's buffer address */
+	u8 num_of_pkts;		/*number of packets for this ITD */
+	/* for now, only one hw_transaction per	itd */
+	u32 transaction;
+	u16 index;		/* in urb->iso_frame_desc */
+	u16 uframe;		/* in periodic schedule	*/
+	u16 usecs;
+	/*memory address */
+	struct isp1763_mem_addr	mem_addr;
+	int length;
+	u32 multi;
+	u32 framenumber;
+	u32 ptdframe;
+	int itd_index;
+	/*scheduling fields */
+	u32 ssplit;
+	u32 csplit;
+};
+
+/*
+ * EHCI	Specification 0.95 Section 3.5
+ * QTD:	describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block	Diagram".
+ *
+ * These are associated	only with "QH" (Queue Head) structures,
+ * used	with control, bulk, and	interrupt transfers.
+ */
+struct ehci_qtd	{
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.5.1 */
+	u32 hw_alt_next;	/* see EHCI 3.5.2 */
+	u32 hw_token;		/* see EHCI 3.5.3 */
+
+	u32 hw_buf[5];		/* see EHCI 3.5.4 */
+	u32 hw_buf_hi[5];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t qtd_dma;	/* qtd address */
+	struct list_head qtd_list;	/* sw qtd list */
+	struct urb *urb;	/* qtd's urb */
+	size_t length;		/* length of buffer */
+	u32 state;		/*state	of the qtd */
+#define	QTD_STATE_NEW			0x100
+#define	QTD_STATE_DONE			0x200
+#define	QTD_STATE_SCHEDULED		0x400
+#define	QTD_STATE_LAST			0x800
+	struct isp1763_mem_addr	mem_addr;
+};
+
+#define	QTD_TOGGLE			(1 << 31)	/* data	toggle */
+#define	QTD_LENGTH(tok)			(((tok)>>16) & 0x7fff)
+#define	QTD_IOC				(1 << 15)	/* interrupt on	complete */
+#define	QTD_CERR(tok)			(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)			(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE			(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT			(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE			(1 << 5)	/* data	buffer error (in HC) */
+#define	QTD_STS_BABBLE			(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT			(1 << 3)	/* device gave illegal response	*/
+#define	QTD_STS_MMF			(1 << 2)	/* incomplete split transaction	*/
+#define	QTD_STS_STS			(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING			(1 << 0)	/* issue PING? */
+
+/* for periodic/async schedules	and qtd	lists, mark end	of list	*/
+#define	EHCI_LIST_END	__constant_cpu_to_le32(1)	/* "null pointer" to hw	*/
+#define	QTD_NEXT(dma)	cpu_to_le32((u32)dma)
+
+struct _phci_driver;
+struct _isp1763_hcd;
+#define	EHCI_MAX_ROOT_PORTS 1
+
+#include <linux/usb/hcd.h>
+
+#define USBNET
+#ifdef USBNET 
+struct isp1763_async_cleanup_urb {
+        struct list_head urb_list;
+        struct urb *urb;
+};
+#endif
+
+
+/*host controller*/
+typedef	struct _phci_hcd {
+
+	struct usb_hcd usb_hcd;
+	spinlock_t lock;
+
+	/* async schedule support */
+	struct ehci_qh *async;
+	struct ehci_qh *reclaim;
+	/* periodic schedule support */
+	unsigned periodic_size;
+	int next_uframe;	/* scan	periodic, start	here */
+	int periodic_sched;	/* periodic activity count */
+	int periodic_more_urb;
+	struct usb_device *otgdev;	/*otg deice, with address 2 */
+	struct timer_list rh_timer;	/* drives root hub */
+	struct list_head dev_list;	/* devices on this bus */
+	struct list_head urb_list;	/*iso testing */
+
+	/*msec break in	interrupts */
+	atomic_t nuofsofs;
+	atomic_t missedsofs;
+
+	struct isp1763_dev *dev;
+	/*hw info */
+	u8 *iobase;
+	u32 iolength;
+	u8 *plxiobase;
+	u32 plxiolength;
+
+	int irq;		/* irq allocated */
+	int state;		/*state	of the host controller */
+	unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
+	ehci_regs regs;
+
+	struct _isp1763_qha qha;
+	struct _isp1763_qhint qhint;
+	struct _isp1763_isoptd isotd;
+
+	struct tasklet_struct tasklet;
+	/*this timer is	going to run every 20 msec */
+	struct timer_list watchdog;
+	void (*worker_function)	(struct	_phci_hcd * hcd);
+	struct _periodic_list periodic_list[PTD_PERIODIC_SIZE];
+#ifdef USBNET 
+	struct isp1763_async_cleanup_urb cleanup_urb;
+#endif
+} phci_hcd, *pphci_hcd;
+
+/*usb_device->hcpriv, points to	this structure*/
+typedef	struct hcd_dev {
+	struct list_head dev_list;
+	struct list_head urb_list;
+} hcd_dev;
+
+#define	usb_hcd_to_pehci_hcd(hcd)   container_of(hcd, struct _phci_hcd,	usb_hcd)
+
+/*td allocation*/
+#ifdef CONFIG_PHCI_MEM_SLAB
+
+#define	qha_alloc(t,c) kmem_cache_alloc(c,ALLOC_FLAGS)
+#define	qha_free(c,x) kmem_cache_free(c,x)
+static kmem_cache_t *qha_cache,	*qh_cache, *qtd_cache;
+static int
+phci_hcd_mem_init(void)
+{
+	/* qha TDs accessed by controllers and host */
+	qha_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				      SLAB_HWCACHE_ALIGN, NULL,	NULL);
+	if (!qha_cache)	{
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+
+	/* qh TDs accessed by controllers and host */
+	qh_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				     SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!qh_cache) {
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+
+	/* qtd	accessed by controllers	and host */
+	qtd_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				      SLAB_HWCACHE_ALIGN, NULL,	NULL);
+	if (!qtd_cache)	{
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+	return 0;
+}
+static void
+phci_mem_cleanup(void)
+{
+	if (qha_cache && kmem_cache_destroy(qha_cache))
+		err("td_cache remained");
+	qha_cache = 0;
+}
+#else
+
+#define	qha_alloc(t,c)			kmalloc(t,ALLOC_FLAGS)
+#define	qha_free(c,x)			kfree(x)
+#define	qha_cache			0
+
+
+#ifdef CONFIG_ISO_SUPPORT
+/*memory constants*/
+#define BLK_128_	2
+#define BLK_256_	3
+#define BLK_1024_	1
+#define BLK_2048_	3
+#define BLK_4096_	3 //1
+#define BLK_8196_	0 //1
+#define BLK_TOTAL	(BLK_128_+BLK_256_ + BLK_1024_ +BLK_2048_+ BLK_4096_+BLK_8196_)
+
+#define BLK_SIZE_128	128
+#define BLK_SIZE_256	256
+#define BLK_SIZE_1024	1024
+#define BLK_SIZE_2048	2048
+#define BLK_SIZE_4096	4096
+#define BLK_SIZE_8192	8192
+
+#define  COMMON_MEMORY	1
+
+#else
+#define BLK_256_	8
+#define BLK_1024_	6
+#define BLK_4096_	3
+#define BLK_TOTAL	(BLK_256_ + BLK_1024_ + BLK_4096_)
+#define BLK_SIZE_256	256
+#define BLK_SIZE_1024	1024
+#define BLK_SIZE_4096	4096
+#endif
+static void phci_hcd_mem_init(void);
+static inline void
+phci_mem_cleanup(void)
+{
+	return;
+}
+
+#endif
+
+#define	PORT_WKOC_E			(1<<22)	/* wake	on overcurrent (enable)	*/
+#define	PORT_WKDISC_E			(1<<21)	/* wake	on disconnect (enable) */
+#define	PORT_WKCONN_E			(1<<20)	/* wake	on connect (enable) */
+/* 19:16 for port testing */
+/* 15:14 for using port	indicator leds (if HCS_INDICATOR allows) */
+#define	PORT_OWNER			(1<<13)	/* true: companion hc owns this	port */
+#define	PORT_POWER			(1<<12)	/* true: has power (see	PPC) */
+#define	PORT_USB11(x)			(((x)&(3<<10))==(1<<10))	/* USB 1.1 device */
+/* 11:10 for detecting lowspeed	devices	(reset vs release ownership) */
+/* 9 reserved */
+#define	PORT_RESET			(1<<8)	/* reset port */
+#define	PORT_SUSPEND			(1<<7)	/* suspend port	*/
+#define	PORT_RESUME			(1<<6)	/* resume it */
+#define	PORT_OCC			(1<<5)	/* over	current	change */
+
+#define	PORT_OC				(1<<4)	/* over	current	active */
+#define	PORT_PEC			(1<<3)	/* port	enable change */
+#define	PORT_PE				(1<<2)	/* port	enable */
+#define	PORT_CSC			(1<<1)	/* connect status change */
+#define	PORT_CONNECT			(1<<0)	/* device connected */
+#define PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_OCC)	
+/*Legends,
+ * ATL	  control, bulk	transfer
+ * INTL	  interrupt transfer
+ * ISTL	  iso transfer
+ * */
+
+/*buffer(transfer) bitmaps*/
+#define	ATL_BUFFER			0x1
+#define	INT_BUFFER			0x2
+#define	ISO_BUFFER			0x4
+#define	BUFFER_MAP			0x7
+
+/* buffer type for ST-ERICSSON HC */
+#define	TD_PTD_BUFF_TYPE_ATL		0	/* ATL buffer */
+#define	TD_PTD_BUFF_TYPE_INTL		1	/* INTL	buffer */
+#define	TD_PTD_BUFF_TYPE_ISTL		2	/* ISO buffer */
+#define	TD_PTD_TOTAL_BUFF_TYPES		(TD_PTD_BUFF_TYPE_ISTL +1)
+/*maximum number of tds	per transfer type*/
+#define	TD_PTD_MAX_BUFF_TDS		16
+
+/*invalid td index in the headers*/
+#define	TD_PTD_INV_PTD_INDEX		0xFFFF
+/*Host controller buffer defination*/
+#define	INVALID_FRAME_NUMBER		0xFFFFFFFF
+/*per td transfer size*/
+#define	HC_ATL_PL_SIZE			4096
+#define	HC_ISTL_PL_SIZE			1024
+#define	HC_INTL_PL_SIZE			1024
+
+/*TD_PTD_MAP states*/
+#define	TD_PTD_NEW			0x0000
+#define	TD_PTD_ACTIVE			0x0001
+#define	TD_PTD_IDLE			0x0002
+#define	TD_PTD_REMOVE			0x0004
+#define	TD_PTD_RELOAD			0x0008
+#define	TD_PTD_IN_SCHEDULE		0x0010
+#define	TD_PTD_DONE			0x0020
+
+#define	PTD_RETRY(x)			(((x) >> 23) & 0x3)
+#define	PTD_PID(x)			(((x) >> 10) & (0x3))
+#define	PTD_NEXTTOGGLE(x)		(((x) >> 25) & (0x1))
+#define	PTD_XFERRED_LENGTH(x)		((x) & 0x7fff)
+#define	PTD_XFERRED_NONHSLENGTH(x)	((x) & 0x7ff)
+#define	PTD_PING_STATE(x)		(((x) >> 26) & (0x1))
+
+/* urb state*/
+#define	DELETE_URB			0x0008
+#define	NO_TRANSFER_ACTIVE		0xFFFF
+#define	NO_TRANSFER_DONE		0x0000
+#define	MAX_PTD_BUFFER_SIZE		4096	/*max ptd size */
+
+/*information of the td	in headers of host memory*/
+typedef	struct td_ptd_map {
+	u32 state;		/* ACTIVE, NEW,	TO_BE_REMOVED */
+	u8 datatoggle;		/*to preserve the data toggle for ATL/ISTL transfers */
+	u32 ptd_bitmap;		/* Bitmap of this ptd in HC headers */
+	u32 ptd_header_addr;	/* headers address of  this td */
+	u32 ptd_data_addr;	/*data address of this td to write in and read from */
+	/*this is address is actual RAM	address	not the	CPU address
+	 * RAM address = (CPU ADDRESS-0x400) >>	3
+	 * */
+	u32 ptd_ram_data_addr;
+	u8 lasttd;		/*last td , complete the transfer */
+	struct ehci_qh *qh;	/* endpoint */
+	struct ehci_qtd	*qtd;	/* qtds	for this endpoint */
+	struct ehci_itd	*itd;	/*itd pointer */
+	struct ehci_sitd *sitd;	/*itd pointer */
+	/*iso specific only */
+	u32 grouptdmap;		/*if td	need to	complete with error, then process all the tds
+				   in the groupmap    */
+} td_ptd_map_t;
+
+/*buffer(ATL/ISTL/INTL)	managemnet*/
+typedef	struct td_ptd_map_buff {
+	u8 buffer_type;		/* Buffer type:	BUFF_TYPE_ATL/INTL/ISTL0/ISTL1 */
+	u8 active_ptds;		/* number of active td's in the	buffer */
+	u8 total_ptds;		/* Total number	of td's	present	in the buffer (active +	tobe removed + skip) */
+	u8 max_ptds;		/* Maximum number of ptd's(32) this buffer can withstand */
+	u16 active_ptd_bitmap;	/* Active PTD's	bitmap */
+	u16 pending_ptd_bitmap;	/* skip	PTD's bitmap */
+	td_ptd_map_t map_list[TD_PTD_MAX_BUFF_TDS];	/* td_ptd_map list */
+} td_ptd_map_buff_t;
+
+
+#define     USB_HCD_MAJOR           0
+#define     USB_HCD_MODULE_NAME     "isp1763hcd"
+/* static char devpath[] = "/dev/isp1763hcd"; */
+
+#define HCD_IOC_MAGIC	'h'
+
+#define     HCD_IOC_POWERDOWN							_IO(HCD_IOC_MAGIC, 1)
+#define     HCD_IOC_POWERUP								_IO(HCD_IOC_MAGIC, 2)
+#define     HCD_IOC_TESTSE0_NACK						_IO(HCD_IOC_MAGIC, 3)
+#define     HCD_IOC_TEST_J								_IO(HCD_IOC_MAGIC,4)
+#define     HCD_IOC_TEST_K								_IO(HCD_IOC_MAGIC,5)
+#define     HCD_IOC_TEST_TESTPACKET						_IO(HCD_IOC_MAGIC,6)
+#define     HCD_IOC_TEST_FORCE_ENABLE					_IO(HCD_IOC_MAGIC,7)
+#define	  HCD_IOC_TEST_SUSPEND_RESUME				_IO(HCD_IOC_MAGIC,8)
+#define     HCD_IOC_TEST_SINGLE_STEP_GET_DEV_DESC		_IO(HCD_IOC_MAGIC,9)
+#define     HCD_IOC_TEST_SINGLE_STEP_SET_FEATURE		_IO(HCD_IOC_MAGIC,10)
+#define     HCD_IOC_TEST_STOP							_IO(HCD_IOC_MAGIC,11)
+#define     HCD_IOC_SUSPEND_BUS							_IO(HCD_IOC_MAGIC,12)
+#define     HCD_IOC_RESUME_BUS							_IO(HCD_IOC_MAGIC,13)
+#define     HCD_IOC_REMOTEWAKEUP_BUS					_IO(HCD_IOC_MAGIC,14)
+
+#define HOST_COMPILANCE_TEST_ENABLE	1
+#define HOST_COMP_TEST_SE0_NAK	1
+#define HOST_COMP_TEST_J	2
+#define HOST_COMP_TEST_K	3
+#define HOST_COMP_TEST_PACKET		4
+#define HOST_COMP_TEST_FORCE_ENABLE	5
+#define HOST_COMP_HS_HOST_PORT_SUSPEND_RESUME	6
+#define HOST_COMP_SINGLE_STEP_GET_DEV_DESC	7
+#define HOST_COMP_SINGLE_STEP_SET_FEATURE	8
+
+#endif
diff --git a/drivers/usb/host/pehci/host/qtdptd.c b/drivers/usb/host/pehci/host/qtdptd.c
new file mode 100644
index 0000000..093800e
--- /dev/null
+++ b/drivers/usb/host/pehci/host/qtdptd.c
@@ -0,0 +1,1315 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file.  QTD processing is handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+/*   Td	managenment routines  */
+
+#define	QUEUE_HEAD_NOT_EMPTY	0x001
+
+
+/*free the location used by removed urb/endpoint*/
+static void
+phci_hcd_release_td_ptd_index(struct ehci_qh *qh)
+{
+	td_ptd_map_buff_t *td_ptd_buff = &td_ptd_map_buff[qh->type];
+	td_ptd_map_t *td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/*hold the global lock here */
+	td_ptd_map->state = TD_PTD_NEW;
+	qh->qh_state = QH_STATE_IDLE;
+	/*
+	   set these values to NULL as schedule
+	   is based on these values,
+	   rather td_ptd_map state
+	 */
+	td_ptd_map->qh = NULL;
+	td_ptd_map->qtd	= NULL;
+
+	td_ptd_buff->active_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+	/* Only	pending	transfers on current QH	must be	cleared	*/
+	td_ptd_buff->pending_ptd_bitmap	&= ~td_ptd_map->ptd_bitmap;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+}
+
+/*print	ehciqtd*/
+static void
+print_ehci_qtd(struct ehci_qtd *qtd)
+{
+	pehci_print("hwnext 0x%08x, altnext 0x%08x,token 0x%08x, length	%d\n",
+		    qtd->hw_next, qtd->hw_alt_next,
+		    le32_to_cpu(qtd->hw_token),	qtd->length);
+
+	pehci_print("buf[0] 0x%08x\n", qtd->hw_buf[0]);
+
+}
+
+/*delete all qtds linked with this urb*/
+static void
+phci_hcd_qtd_list_free(phci_hcd	* ehci,
+		       struct urb *urb,	struct list_head *qtd_list)
+{
+	struct list_head *entry, *temp;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	list_for_each_safe(entry, temp,	qtd_list) {
+		struct ehci_qtd	*qtd;
+		qtd = list_entry(entry,	struct ehci_qtd, qtd_list);
+	if(!list_empty(&qtd->qtd_list))
+		list_del_init(&qtd->qtd_list);
+		qha_free(qha_cache, qtd);
+	}
+
+	pehci_entry("--	%s: Exit \n", __FUNCTION__);
+}
+
+
+/*
+ * free	all the	qtds for this transfer,	also
+ * free	the Host memory	to be reused
+ */
+static void
+phci_hcd_urb_free_priv(phci_hcd	* hcd,
+		       urb_priv_t * urb_priv_to_remove,	struct ehci_qh *qh)
+{
+	int i =	0;
+	struct ehci_qtd	*qtd;
+	for (i = 0; i <	urb_priv_to_remove->length; i++) {
+		if (urb_priv_to_remove->qtd[i])	{
+			qtd = urb_priv_to_remove->qtd[i];
+
+			if(!list_empty(&qtd->qtd_list))
+				list_del_init(&qtd->qtd_list);
+
+			/* This	is required when the device is abruptly	disconnected and the
+			 * PTDs	are not	completely processed
+			 */
+			if (qtd->length)
+				phci_hcd_mem_free(&qtd->mem_addr);
+
+			qha_free(qha_cache, qtd);
+			urb_priv_to_remove->qtd[i] = 0;
+			qtd = 0;
+		}
+
+	}
+	
+	return;
+}
+
+
+/*allocate the qtd*/
+struct ehci_qtd	*
+phci_hcd_qtd_allocate(int mem_flags)
+{
+
+	struct ehci_qtd	*qtd = 0;
+	qtd = kmalloc(sizeof *qtd, mem_flags);
+	if (!qtd)
+	{
+		return 0;
+	}
+	
+	memset(qtd, 0, sizeof *qtd);
+	qtd->qtd_dma = cpu_to_le32(qtd);
+	qtd->hw_next = EHCI_LIST_END;
+	qtd->hw_alt_next = EHCI_LIST_END;
+	qtd->state = QTD_STATE_NEW;
+	INIT_LIST_HEAD(&qtd->qtd_list);
+	return qtd;
+}
+
+/*
+ * calculates host memory for current length transfer td,
+ * maximum td length is	4K(custom made)
+ * */
+static int
+phci_hcd_qtd_fill(struct urb *urb,
+		  struct ehci_qtd *qtd,
+		  dma_addr_t buf, size_t len, int token, int *status)
+{
+	int count = 0;
+
+	qtd->hw_buf[0] = (u32) buf;
+	/*max lenggth is HC_ATL_PL_SIZE	*/
+	if (len	> HC_ATL_PL_SIZE) {
+		count =	HC_ATL_PL_SIZE;
+	} else {
+		count =	len;
+	}
+	qtd->hw_token =	cpu_to_le32((count << 16) | token);
+	qtd->length = count;
+
+	pehci_print("%s:qtd %p,	token %8x bytes	%d dma %x\n",
+		__FUNCTION__, qtd, le32_to_cpu(qtd->hw_token), count,
+		qtd->hw_buf[0]);
+
+	return count;
+}
+
+
+/*
+ * makes number	of qtds	required for
+ * interrupt/bulk/control transfer length
+ * and initilize qtds
+ * */
+struct list_head *
+phci_hcd_make_qtd(phci_hcd * hcd,
+		  struct list_head *head, struct urb *urb, int *status)
+{
+
+	struct ehci_qtd	*qtd, *qtd_prev;
+	dma_addr_t buf,	map_buf;
+	int len, maxpacket;
+	int is_input;
+	u32 token;
+	int cnt	= 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+	pehci_entry("++	%s, Entered\n",	__FUNCTION__);
+
+	/*take the qtd from already allocated
+	   structure from hcd_submit_urb
+	 */
+	qtd = urb_priv->qtd[cnt];
+	if (unlikely(!qtd)) {
+		*status	= -ENOMEM;
+		return 0;
+	}
+
+	qtd_prev = 0;
+	list_add_tail(&qtd->qtd_list, head);
+
+	qtd->urb = urb;
+
+	token =	QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+
+	len = urb->transfer_buffer_length;
+
+	is_input = usb_pipein(urb->pipe);
+
+	if (usb_pipecontrol(urb->pipe))	{
+		/* SETUP pid */
+		if (phci_hcd_qtd_fill(urb, qtd,	cpu_to_le32(urb->setup_packet),
+			sizeof(struct usb_ctrlrequest),
+			token |	(2 /* "setup" */	<< 8),
+			status)	<	0) {
+			goto cleanup;
+		}
+
+		cnt++;		/* increment the index */
+		print_ehci_qtd(qtd);
+		/* ... and always at least one more pid	*/
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = urb_priv->qtd[cnt];
+		if (unlikely(!qtd)) {
+			*status	= -ENOMEM;
+			goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/*
+	 * data	transfer stage:	 buffer	setup
+	 */
+	len = urb->transfer_buffer_length;
+	if (likely(len > 0)) {
+		/*update the buffer address */
+		buf = cpu_to_le32(urb->transfer_buffer);
+	} else {
+		buf = map_buf =	cpu_to_le32(0);	/*set-up stage has no data. */
+	}
+
+	/* So are we waiting for the ack only or there is a data stage with out. */
+	if (!buf || usb_pipein(urb->pipe)) {
+		token |= (1 /* "in" */	<< 8);
+	}
+	/* else	it's already initted to	"out" pid (0 <<	8) */
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe,
+				  usb_pipeout(urb->pipe)) & 0x07ff;
+
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last	one may	be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+
+	for (;;) {
+		int this_qtd_len;
+		this_qtd_len =
+			phci_hcd_qtd_fill(urb, qtd, buf, len, token, status);
+		if (this_qtd_len < 0)
+			goto cleanup;
+		print_ehci_qtd(qtd);
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+		cnt++;
+		/* qh makes control packets use	qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) {
+			token ^= QTD_TOGGLE;
+		}
+
+		if (likely(len <= 0)) {
+			break;
+		}
+		qtd_prev = qtd;
+		qtd = urb_priv->qtd[cnt];
+		if (unlikely(!qtd)) {
+			goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/*
+	 * control requests may	need a terminating data	"status" ack;
+	 * bulk	ones may need a	terminating short packet (zero length).
+	 */
+	if (likely(buf != 0)) {
+		int one_more = 0;
+		if (usb_pipecontrol(urb->pipe))	{
+			one_more = 1;
+			token ^= 0x0100;	/* "in"	<--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+
+		} else if (usb_pipebulk(urb->pipe)	/* bulk	data exactly terminated	on zero	lenth */
+			&&(urb->transfer_flags & URB_ZERO_PACKET)
+			&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = urb_priv->qtd[cnt];
+			if (unlikely(!qtd)) {
+				goto cleanup;
+			}
+
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+			list_add_tail(&qtd->qtd_list, head);
+			phci_hcd_qtd_fill(urb, qtd, 0, 0, token, status);
+			print_ehci_qtd(qtd);
+			cnt++;
+		}
+	}
+
+	/*this is our last td for current transfer */
+	qtd->state |= QTD_STATE_LAST;
+
+	/*number of tds	*/
+	if (urb_priv->length !=	cnt) {
+		err("Never Error: number of tds	allocated %d exceeding %d\n",
+		    urb_priv->length, cnt);
+	}
+	/* by default, enable interrupt	on urb completion */
+	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) {
+		qtd->hw_token |= __constant_cpu_to_le32(QTD_IOC);
+	}
+
+	pehci_entry("--	%s, Exit\n", __FUNCTION__);
+	return head;
+
+	cleanup:
+	phci_hcd_qtd_list_free(hcd, urb, head);
+	return 0;
+}
+
+/*allocates a queue head(endpoint*/
+struct ehci_qh *
+phci_hcd_qh_alloc(phci_hcd * hcd)
+{
+
+	struct ehci_qh *qh = kmalloc(sizeof(struct ehci_qh), GFP_ATOMIC);
+	if (!qh)
+	{
+		return qh;
+	}
+	
+	memset(qh, 0, sizeof *qh);
+	atomic_set(&qh->refcount, 1);
+	init_waitqueue_head(&qh->waitforcomplete);
+	qh->qh_dma = (u32) qh;
+	INIT_LIST_HEAD(&qh->qtd_list);
+	INIT_LIST_HEAD(&qh->itd_list);
+	qh->next_uframe	= -1;
+	return qh;
+}
+
+/* calculates header address for the tds*/
+static int
+phci_hcd_fill_ptd_addresses(td_ptd_map_t * td_ptd_map, int index, int bufftype)
+{
+	int i =	0;
+	unsigned long tdlocation = 0;
+	/*
+	 * the below payloadlocation and
+	 * payloadsize are redundant
+	 * */
+	unsigned long payloadlocation =	0;
+	unsigned long payloadsize = 0;
+	pehci_entry("++	%s: enter\n", __FUNCTION__);
+	switch (bufftype) {
+		/*atl header starts at 0xc00 */
+	case TD_PTD_BUFF_TYPE_ATL:
+		tdlocation = 0x0c00;
+		/*redundant */
+		payloadsize = 0x1000;
+		payloadlocation	= 0x1000;
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+		/*interrupt header
+		 * starts at 0x800
+		 * */
+		tdlocation = 0x0800;
+		/*redundant */
+		payloadlocation	= 0x1000;
+		payloadsize = 0x1000;
+		break;
+
+	case TD_PTD_BUFF_TYPE_ISTL:
+		/*iso header starts
+		 * at 0x400*/
+
+		tdlocation = 0x0400;
+		/*redunndant */
+		payloadlocation	= 0x1000;
+		payloadsize = 0x1000;
+
+		break;
+	}
+
+
+	i = index;
+	payloadlocation	+= (i) * payloadsize;	/*each payload is of 4096 bytes	*/
+	tdlocation += (i) * PHCI_QHA_LENGTH;	/*each td is of	32 bytes */
+	td_ptd_map->ptd_header_addr = tdlocation;
+	td_ptd_map->ptd_data_addr = payloadlocation;
+	td_ptd_map->ptd_ram_data_addr =	((payloadlocation - 0x0400) >> 3);
+	pehci_print
+		("Index: %d, Header: 0x%08x, Payload: 0x%08x,Data start	address: 0x%08x\n",
+		 index,	td_ptd_map->ptd_header_addr, td_ptd_map->ptd_data_addr,
+		 td_ptd_map->ptd_ram_data_addr);
+	pehci_entry("--	%s: Exit", __FUNCTION__);
+	return payloadlocation;
+}
+
+
+/*--------------------------------------------------------------*
+ * calculate the header	location for the current
+ * endpoint, if	found returns a	valid index
+ * else	invalid
+ -----------------------------------------------------------*/
+static void
+phci_hcd_get_qtd_ptd_index(struct ehci_qh *qh,
+			   struct ehci_qtd *qtd, struct	ehci_itd *itd)
+{
+	u8 buff_type = td_ptd_pipe_x_buff_type[qh->type];
+	u8 qtd_ptd_index;	/*, index; */
+	/*this is the location of the ptd's skip map/done map, also
+	   calculating the td header, payload, data start address
+	   location */
+	u8 bitmap = 0x1;
+	u8 max_ptds;
+
+	td_ptd_map_buff_t *ptd_map_buff	= &(td_ptd_map_buff[buff_type]);
+	pehci_entry("++	%s, Entered, buffer type %d\n",	__FUNCTION__,
+		    buff_type);
+
+	/* ATL PTDs can	wait */
+	max_ptds = (buff_type == TD_PTD_BUFF_TYPE_ATL)
+		? TD_PTD_MAX_BUFF_TDS :	ptd_map_buff->max_ptds;
+
+	for (qtd_ptd_index = 0;	qtd_ptd_index <	max_ptds; qtd_ptd_index++) {	/* Find	the first free slot */
+		if (ptd_map_buff->map_list[qtd_ptd_index].state	== TD_PTD_NEW) {
+			/* Found a free	slot */
+			if (qh->qtd_ptd_index == TD_PTD_INV_PTD_INDEX) {
+				qh->qtd_ptd_index = qtd_ptd_index;
+			}
+			ptd_map_buff->map_list[qtd_ptd_index].datatoggle = 0;
+			/*put the ptd_index into operational state */
+			ptd_map_buff->map_list[qtd_ptd_index].state =
+				TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[qtd_ptd_index].qtd = qtd;
+			/* No td transfer is in	progress */
+			ptd_map_buff->map_list[qtd_ptd_index].itd = itd;
+			/*initialize endpoint(queuehead) */
+			ptd_map_buff->map_list[qtd_ptd_index].qh = qh;
+			ptd_map_buff->map_list[qtd_ptd_index].ptd_bitmap =
+				bitmap << qtd_ptd_index;
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[qtd_ptd_index],
+				qh->qtd_ptd_index,
+				buff_type);
+			ptd_map_buff->map_list[qtd_ptd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;	/* update # of total td's */
+			/*make the queuehead map, to process in	the phci_schedule_ptds */
+			ptd_map_buff->active_ptd_bitmap	|=
+				(bitmap	<< qtd_ptd_index);
+			break;
+		}
+	}
+	pehci_entry("--	%s, Exit\n", __FUNCTION__);
+	return;
+
+}				/* phci_get_td_ptd_index */
+
+
+
+/*
+ * calculate the header	location for the endpoint and
+ * all tds on this endpoint will use the same
+ * header location for all transfers on	this endpoint.
+ * also	puts the endpoint into the linked state
+ * */
+static void
+phci_hcd_qh_link_async(phci_hcd	* hcd, struct ehci_qh *qh, int *status)
+{
+	struct ehci_qtd	*qtd = 0;
+	struct list_head *qtd_list = &qh->qtd_list;
+
+#ifdef MSEC_INT_BASED
+	td_ptd_map_buff_t *ptd_map_buff;
+	td_ptd_map_t *td_ptd_map;
+#endif
+
+	/*  take the first td, in case we are not able to schedule the new td
+	   and this is going for remove
+	 */
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	/* Assign a td-ptd index for this ed so	that we	can put	ptd's in the HC	buffers	*/
+
+	qh->qtd_ptd_index = TD_PTD_INV_PTD_INDEX;
+	phci_hcd_get_qtd_ptd_index(qh, qtd, NULL);	/* Get a td-ptd	index */
+	if (qh->qtd_ptd_index == TD_PTD_INV_PTD_INDEX) {
+		err("can not find the location in our buffer\n");
+		*status	= -ENOSPC;
+		return;
+	}
+#ifdef MSEC_INT_BASED
+	/*first	transfers in sof interrupt goes	into pending */
+	ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+	td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+	ptd_map_buff->pending_ptd_bitmap |= td_ptd_map->ptd_bitmap;
+
+#endif
+	/* open	the halt so that it acessed */
+	qh->hw_token &=	~__constant_cpu_to_le32(QTD_STS_HALT);
+	qh->qh_state = QH_STATE_LINKED;
+	qh->qh_state |=	QH_STATE_TAKE_NEXT;
+	pehci_entry("--	%s: Exit , qh %p\n", __FUNCTION__, qh);
+
+
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * mainly used for setting up current td on current
+ * endpoint(queuehead),	endpoint may be	new or
+ * halted one
+ * */
+
+static inline void
+phci_hcd_qh_update(phci_hcd * ehci, struct ehci_qh *qh,	struct ehci_qtd	*qtd)
+{
+	/*make this current td */
+	qh->hw_current = QTD_NEXT(qtd->qtd_dma);
+	qh->hw_qtd_next	= QTD_NEXT(qtd->qtd_dma);
+	qh->hw_alt_next	= EHCI_LIST_END;
+	/* HC must see latest qtd and qh data before we	clear ACTIVE+HALT */
+	wmb();
+	qh->hw_token &=	__constant_cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
+}
+
+/*
+ * used	for ATL, INT transfers
+ * function creates new	endpoint,
+ * calculates bandwidth	for interrupt transfers,
+ * and initialize the qh based on endpoint type/speed
+ * */
+struct ehci_qh *
+phci_hcd_make_qh(phci_hcd * hcd,
+		 struct	urb *urb, struct list_head *qtd_list, int *status)
+{
+	struct ehci_qh *qh = 0;
+	u32 info1 = 0, info2 = 0;
+	int is_input, type;
+	int maxp = 0;
+	int mult = 0;
+	int bustime = 0;
+	struct ehci_qtd	*qtd =
+		list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	qh = phci_hcd_qh_alloc(hcd);
+	if (!qh) {
+		*status	= -ENOMEM;
+		return 0;
+	}
+
+	/*
+	 * init	endpoint/device	data for this QH
+	 */
+	info1 |= usb_pipeendpoint(urb->pipe) <<	8;
+	info1 |= usb_pipedevice(urb->pipe) << 0;
+
+	is_input = usb_pipein(urb->pipe);
+	type = usb_pipetype(urb->pipe);
+	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+	mult = 1 + ((maxp >> 11) & 0x3);
+
+	/*set this queueheads index to invalid */
+	qh->qtd_ptd_index = TD_PTD_INV_PTD_INDEX;
+
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		qh->type = TD_PTD_BUFF_TYPE_ATL;
+		break;
+
+	case PIPE_INTERRUPT:
+		qh->type = TD_PTD_BUFF_TYPE_INTL;
+		break;
+	case PIPE_ISOCHRONOUS:
+		qh->type = TD_PTD_BUFF_TYPE_ISTL;
+		break;
+
+	}
+
+
+
+	if (type == PIPE_INTERRUPT) {
+		/*for this interrupt transfer check how	much bustime in	usecs required */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		bustime = usb_check_bandwidth(urb->dev, urb);
+
+		if (bustime < 0) {
+			*status = -ENOSPC;
+			goto done;
+		}
+
+		usb_claim_bandwidth(urb->dev, urb, bustime,
+			usb_pipeisoc(urb->pipe));
+#else
+#endif
+		qh->usecs = bustime;
+
+		qh->start = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->c_usecs = 0;
+			qh->gap_uf = 0;
+			/*after	how many uframes this interrupt	is to be executed */
+			qh->period = urb->interval >> 3;
+			if (qh->period < 1) {
+				printk("intr period %d uframes,\n",
+				urb->interval);
+			}
+			/*restore the original urb->interval in	qh->period */
+			qh->period = urb->interval;
+
+		} else {
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + 7;	/*usb_calc_bus_time (urb->dev->speed,
+						   is_input, 0,	maxp) /	(125 * 1000); */
+
+			if (is_input) {	/* SPLIT, gap, CSPLIT+DATA */
+
+				qh->c_usecs = qh->usecs	+ 1;	/*HS_USECS (0);	*/
+				qh->usecs = 10;	/*HS_USECS (1);	*/
+			} else {	/* SPLIT+DATA, gap, CSPLIT */
+				qh->usecs += 10;	/*HS_USECS (1);	*/
+				qh->c_usecs = 1;	/*HS_USECS (0);	*/
+			}
+
+
+			/*take the period ss/cs	scheduling will	be
+			   handled by submit urb
+			 */
+			qh->period = urb->interval;
+		}
+	}
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= (1 << 12);	/* EPS "low" */
+		/* FALL	THROUGH	*/
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT) {
+			info1 |= (EHCI_TUNE_RL_TT << 28);
+		}
+		if (type == PIPE_CONTROL) {
+			info1 |= (1 << 27);	/* for TT */
+			info1 |= 1 << 14;	/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (EHCI_TUNE_MULT_TT << 30);
+		info2 |= urb->dev->ttport << 23;
+		info2 |= urb->dev->tt->hub->devnum << 16;
+		break;
+
+
+	case USB_SPEED_HIGH:	/* no TT involved */
+		info1 |= (2 << 12);	/* EPS "high" */
+		if (type == PIPE_CONTROL) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2	fixed maxpacket	*/
+
+			info1 |= 1 << 14;	/* toggle from qtd */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else if (type	== PIPE_BULK) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 512 <<	16;	/* usb2	fixed maxpacket	*/
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else {	/* PIPE_INTERRUPT */
+			info1 |= (maxp & 0x7ff)	/*max_packet (maxp) */ <<16;
+			info2 |= mult /*hb_mult	(maxp) */  << 30;
+		}
+		break;
+
+	default:
+		pehci_print("bogus dev %p speed	%d", urb->dev, urb->dev->speed);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	done:
+#else
+#endif
+		qha_free(qha_cache, qh);
+		return 0;
+	}			/*end of switch	*/
+
+	/* NOTE:  if (PIPE_INTERRUPT) {	scheduler sets s-mask }	*/
+
+	/* init	as halted, toggle clear, advance to dummy */
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_info1 = cpu_to_le32(info1);
+	qh->hw_info2 = cpu_to_le32(info2);
+	/*link the tds here */
+	list_splice(qtd_list, &qh->qtd_list);
+	phci_hcd_qh_update(hcd,	qh, qtd);
+	qh->hw_token = cpu_to_le32(QTD_STS_HALT);
+	if (!usb_pipecontrol(urb->pipe)) {
+		usb_settoggle(urb->dev,	usb_pipeendpoint(urb->pipe), !is_input,
+			1);
+	}
+	pehci_entry("--	%s: Exit, qh %p\n", __FUNCTION__, qh);
+	return qh;
+}
+
+
+/*-----------------------------------------------------------*/
+/*
+ * Hardware maintains data toggle (like	OHCI) ... here we (re)initialize
+ * the hardware	data toggle in the QH, and set the pseudo-toggle in udev
+ * so we can see if usb_clear_halt() was called.  NOP for control, since
+ * we set up qh->hw_info1 to always use	the QTD	toggle bits.
+ */
+static inline void
+phci_hcd_clear_toggle(struct usb_device	*udev, int ep, int is_out,
+		      struct ehci_qh *qh)
+{
+	pehci_print("clear toggle, dev %d ep 0x%x-%s\n",
+		    udev->devnum, ep, is_out ? "out" : "in");
+	qh->hw_token &=	~__constant_cpu_to_le32(QTD_TOGGLE);
+	usb_settoggle(udev, ep,	is_out,	1);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs	appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null	if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+struct ehci_qh *
+phci_hcd_qh_append_tds(phci_hcd	* hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+	struct urb *urb,	struct list_head *qtd_list,
+	void **ptr, int *status)
+{
+
+	int epnum;
+
+	struct ehci_qh *qh = 0;
+	struct ehci_qtd	*qtd =
+		list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+	td_ptd_map_buff_t *ptd_map_buff;
+	td_ptd_map_t *td_ptd_map;
+
+
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	epnum = ep->desc.bEndpointAddress;
+#else
+	epnum = urb->ep->desc.bEndpointAddress;
+#endif
+
+	qh = (struct ehci_qh *)	*ptr;
+	if (likely(qh != 0)) {
+		u32 hw_next = QTD_NEXT(qtd->qtd_dma);
+		pehci_print("%Queue head already %p\n",	qh);
+
+		ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+		td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+
+		/* maybe patch the qh used for set_address */
+		if (unlikely
+			(epnum == 0	&& le32_to_cpu(qh->hw_info1 & 0x7f) == 0)) {
+			qh->hw_info1 |=	cpu_to_le32(usb_pipedevice(urb->pipe));
+		}
+
+		/* is an URB is	queued to this qh already? */
+		if (unlikely(!list_empty(&qh->qtd_list))) {
+			struct ehci_qtd	*last_qtd;
+			/* update the last qtd's "next"	pointer	*/
+			last_qtd = list_entry(qh->qtd_list.prev,
+				struct ehci_qtd, qtd_list);
+
+			/*queue	head is	not empty just add the
+			   td at the end of it , and return from here
+			 */
+			last_qtd->hw_next = hw_next;
+
+			/*set the status as positive */
+			*status	= (u32)	QUEUE_HEAD_NOT_EMPTY;
+
+			/* no URB queued */
+		} else {
+
+	//		qh->qh_state = QH_STATE_IDLE;
+
+
+			/* usb_clear_halt() means qh data toggle gets reset */
+			if (usb_pipebulk(urb->pipe)
+				&& unlikely(!usb_gettoggle(urb->dev, (epnum	& 0x0f),
+				!(epnum & 0x80)))) {
+
+				phci_hcd_clear_toggle(urb->dev,
+					epnum & 0x0f,
+					!(epnum &	0x80), qh);
+
+				/*reset	our data toggle	*/
+
+				qh->datatoggle = 0;
+				qh->ping = 0;
+
+			}
+			phci_hcd_qh_update(hcd,	qh, qtd);
+		}
+		/*put everything in pedning, will be cleared during scheduling */
+		ptd_map_buff->pending_ptd_bitmap |= td_ptd_map->ptd_bitmap;
+		list_splice(qtd_list, qh->qtd_list.prev);
+	} else {
+		qh = phci_hcd_make_qh(hcd, urb,	qtd_list, status);
+		*ptr = qh;
+	}
+	pehci_entry("--	%s: Exit qh %p\n", __FUNCTION__, qh);
+	return qh;
+}
+
+/*link qtds to endpoint(qh)*/
+struct ehci_qh *
+phci_hcd_submit_async(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+		      struct list_head *qtd_list, struct urb *urb, int *status)
+{
+	struct ehci_qtd	*qtd;
+	struct hcd_dev *dev;
+	int epnum;
+
+#ifndef THREAD_BASED
+	unsigned long flags;
+#endif
+
+	
+	struct ehci_qh *qh = 0;
+
+	urb_priv_t *urb_priv = urb->hcpriv;
+
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+	dev = (struct hcd_dev *) urb->hcpriv;
+	epnum =	usb_pipeendpoint(urb->pipe);
+	if (usb_pipein(urb->pipe) && !usb_pipecontrol(urb->pipe)) {
+		epnum |= 0x10;
+	}
+
+	pehci_entry("++	%s, enter\n", __FUNCTION__);
+
+	/* ehci_hcd->lock guards shared	data against other CPUs:
+	 *   ehci_hcd:	    async, reclaim, periodic (and shadow), ...
+	 *   hcd_dev:	    ep[]
+
+	 *   ehci_qh:	    qh_next, qtd_list
+
+	 *   ehci_qtd:	    qtd_list
+	 *
+	 * Also, hold this lock	when talking to	HC registers or
+	 * when	updating hw_* fields in	shared qh/qtd/... structures.
+	 */
+#ifndef THREAD_BASED
+	spin_lock_irqsave(&hcd->lock, flags);
+#endif
+
+	spin_lock(&hcd_data_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	usb_hcd_link_urb_to_ep(&hcd->usb_hcd, urb);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qh = phci_hcd_qh_append_tds(hcd, ep, urb, qtd_list, &ep->hcpriv,
+		status);
+#else
+	qh = phci_hcd_qh_append_tds(hcd, urb, qtd_list, &urb->ep->hcpriv,
+		status);
+#endif
+	if (!qh	|| *status < 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd, urb);
+#endif
+		goto cleanup;
+	}
+	/* Control/bulk	operations through TTs don't need scheduling,
+	 * the HC and TT handle	it when	the TT has a buffer ready.
+	 */
+
+	/* now the quehead can not be in the unlink state */
+
+//	printk("qh->qh_state:0x%x \n",qh->qh_state);
+	if (qh->qh_state == QH_STATE_UNLINK) {
+		pehci_info("%s:	free the urb,qh->state %x\n", __FUNCTION__,
+			   qh->qh_state);
+		phci_hcd_qtd_list_free(hcd, urb, &qh->qtd_list);
+		spin_unlock(&hcd_data_lock);
+		
+#ifndef THREAD_BASED			
+		spin_unlock_irqrestore(&hcd->lock, flags);
+#endif
+		*status	= -ENODEV;
+		return 0;
+	}
+
+	if (likely(qh != 0)) {
+		urb_priv->qh = qh;
+		if (likely(qh->qh_state	== QH_STATE_IDLE))
+			phci_hcd_qh_link_async(hcd, qh,	status);
+	}
+
+	cleanup:
+	spin_unlock(&hcd_data_lock);
+
+#ifndef THREAD_BASED			
+	/* free	it from	lock systme can	sleep now */
+	spin_unlock_irqrestore(&hcd->lock, flags);
+#endif
+	
+	/* could not get the QH	terminate and clean. */
+	if (unlikely(qh	== 0) || *status < 0) {
+		phci_hcd_qtd_list_free(hcd, urb, qtd_list);
+		return qh;
+	}
+	return qh;
+}
+
+/*
+ * initilaize the s-mask c-mask	for
+ * interrupt transfers.
+ */
+static int
+phci_hcd_qhint_schedule(phci_hcd * hcd,
+			struct ehci_qh *qh,
+			struct ehci_qtd	*qtd,
+			struct _isp1763_qhint *qha, struct urb *urb)
+{
+	int i =	0;
+	u32 td_info3 = 0;
+	u32 td_info5 = 0;
+	u32 period = 0;
+	u32 usofmask = 1;
+	u32 usof = 0;
+	u32 ssplit = 0,	csplit = 0xFF;
+	int maxpacket;
+	u32 numberofusofs = 0;
+
+	/*and since whol msec frame is empty, i	can schedule in	any uframe */
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe));
+	maxpacket &= 0x7ff;
+	/*length of the	data per uframe	*/
+	maxpacket = XFER_PER_UFRAME(qha->td_info1) * maxpacket;
+
+	/*caculate the number of uframes are required */
+	numberofusofs =	urb->transfer_buffer_length / maxpacket;
+	/*if something left */
+	if (urb->transfer_buffer_length	% maxpacket) {
+		numberofusofs += 1;
+	}
+
+	for (i = 0; i <	numberofusofs; i++) {
+		usofmask <<= i;
+		usof |=	usofmask;
+
+	}
+
+	/*
+	   for full/low	speed devices, as we
+	   have	seperate location for all the endpoints
+	   let the start split goto the	first uframe, means 0 uframe
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH && usb_pipeint(urb->pipe)) {
+		/*set the complete splits */
+		/*set all the bits and lets see	whats happening	*/
+		/*but this will	be set based on	the maximum packet size	*/
+		ssplit = usof;
+		/*  need to fix	it */
+		csplit = 0x1C;
+		qha->td_info6 =	csplit;
+		period = qh->period;
+		if (period >= 32) {
+			period = qh->period / 2;
+		}
+		td_info3 = period;
+		goto done;
+
+	} else {
+		if (qh->period >= 8) {
+			period = qh->period / 8;
+		} else {
+			period = qh->period;
+		}
+	}
+	/*our limitaion	is maximum of 32 ie 31,	5 bits */
+	if (period >= 32) {
+		period = 32;
+		/*devide by 2 */
+		period >>= 1;
+	}
+	if (qh->period >= 8) {
+		/*millisecond period */
+		td_info3 = (period << 3);
+	} else {
+		/*usof based tranmsfers	*/
+		/*minimum 4 usofs */
+		td_info3 = period;
+		usof = 0x11;
+	}
+
+	done:
+	td_info5 = usof;
+	qha->td_info3 |= td_info3;
+	qha->td_info5 |= usof;
+	return numberofusofs;
+}
+
+/*link interrupts qtds to endpoint*/
+struct ehci_qh *
+phci_hcd_submit_interrupt(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+			  struct list_head *qtd_list,
+			  struct urb *urb, int *status)
+{
+	struct ehci_qtd	*qtd;
+	struct _hcd_dev	*dev;
+	int epnum;
+	unsigned long flags;
+	struct ehci_qh *qh = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	dev = (struct hcd_dev *) urb->hcpriv;
+	epnum = ep->desc.bEndpointAddress;
+
+	pehci_entry("++ %s, enter\n", __FUNCTION__);
+
+
+	/*check for more than one urb queued for this endpoint */
+	qh = ep->hcpriv;
+#else
+	dev = (struct _hcd_dev *) (urb->hcpriv);
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	pehci_entry("++ %s, enter\n", __FUNCTION__);
+
+
+	/*check for more than one urb queued for this endpoint */
+	qh = (struct ehci_qh *) urb->ep->hcpriv;
+#endif
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	if (unlikely(qh	!= 0)) {
+		if (!list_empty(&qh->qtd_list))	{
+			*status	= -EBUSY;
+			goto done;
+		} else {
+			td_ptd_map_buff_t *ptd_map_buff;
+			td_ptd_map_t *td_ptd_map;
+			ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+			td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+			ptd_map_buff->pending_ptd_bitmap |=
+				td_ptd_map->ptd_bitmap;
+			 /*NEW*/ td_ptd_map->qtd = qtd;
+			/* maybe reset hardware's data toggle in the qh	*/
+			if (unlikely(!usb_gettoggle(urb->dev, epnum & 0x0f,
+				!(epnum & 0x80)))) {
+
+				/*reset	our data toggle	*/
+				td_ptd_map->datatoggle = 0;
+				usb_settoggle(urb->dev,	epnum &	0x0f,
+					!(epnum &	0x80), 1);
+				qh->datatoggle = 0;
+			}
+			/* trust the QH	was set	up as interrupt	... */
+			list_splice(qtd_list, &qh->qtd_list);
+		}
+	}
+
+
+	if (!qh) {
+		qh = phci_hcd_make_qh(hcd, urb,	qtd_list, status);
+		if (likely(qh == 0)) {
+			*status	= -ENOMEM;
+			goto done;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		ep->hcpriv = qh;
+#else
+		urb->ep->hcpriv = qh;
+#endif
+	}
+
+	if (likely(qh != 0)) {
+		urb_priv->qh = qh;
+		if (likely(qh->qh_state	== QH_STATE_IDLE)) {
+			phci_hcd_qh_link_async(hcd, qh,	status);
+		}
+	}
+
+
+	done:
+	/* free	it from	lock systme can	sleep now */
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	/* could not get the QH	terminate and clean. */
+	if (unlikely(qh	== 0) || *status < 0) {
+		phci_hcd_qtd_list_free(hcd, urb, qtd_list);
+		return qh;
+	}
+	return qh;
+}
+
+
+
+
+/*
+ * converts original EHCI QTD into PTD(Proprietary transfer descriptor)
+ * we call PTD as qha also for atl transfers
+ * for ATL and INT transfers
+ */
+void *
+phci_hcd_qha_from_qtd(phci_hcd * hcd,
+	struct ehci_qtd *qtd,
+	struct urb *urb,
+	void *ptd, u32 ptd_data_addr, struct ehci_qh *qh)
+{
+	u8 toggle = qh->datatoggle;
+	u32 token = 0;
+	u32 td_info1 = 0;
+	u32 td_info3 = 0;
+	u32 td_info4 = 0;
+	int maxpacket =	0;
+	u32 length = 0,	temp = 0;
+	/*for non high speed devices */
+	u32 portnum = 0;
+	u32 hubnum = 0;
+	u32 se = 0, rl = 0x0, nk = 0x0;
+	u8 datatoggle =	0;
+	struct isp1763_mem_addr	*mem_addr = &qtd->mem_addr;
+	u32 data_addr =	0;
+	u32 multi = 0;
+	struct _isp1763_qha *qha = (isp1763_qha	*) ptd;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	multi =	1 + ((maxpacket	>> 11) & 0x3);
+
+	maxpacket &= 0x7ff;
+
+	/************************first word*********************************/
+	length = qtd->length;
+	td_info1 = QHA_VALID;
+	td_info1 |= (length << 3);
+	td_info1 |= (maxpacket << 18);
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+	td_info1 |= MULTI(multi);
+	/*set the first	dword */
+	qha->td_info1 =	td_info1;
+
+	pehci_print("%s: length	%d, 1st	word 0x%08x\n",	__FUNCTION__, length,
+		    qha->td_info1);
+
+	/*******************second word***************************************/
+	temp = qtd->hw_token;
+
+	/*take the pid,	thats of only interest to me from qtd,
+	 */
+
+	temp = temp & 0x0300;
+	temp = temp >> 8;
+	/*take the endpoint and	its 3 bits */
+	token =	(usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		pehci_print("device is full/low	speed, %d\n", urb->dev->speed);
+		token |= 1 << 14;
+		portnum	= urb->dev->ttport;
+		 /*IMMED*/ hubnum = urb->dev->tt->hub->devnum;
+		token |= portnum << 18;
+		token |= hubnum	<< 25;
+		/*for non-high speed transfer
+		   reload and nak counts are zero
+		 */
+		rl = 0x0;
+		nk = 0x0;
+
+	}
+
+	/*se should be 0x2 for only low	speed devices */
+	if (urb->dev->speed == USB_SPEED_LOW) {
+		se = 0x2;
+	}
+
+	if (usb_pipeint(urb->pipe)) {
+		/*	reload count and nakcount is
+		   required for	only async transfers
+		 */
+		rl = 0x0;
+	}
+
+	/*set the se field, should be zero for all
+	   but low speed devices
+	 */
+	token |= se << 16;
+	/*take the pid */
+	token |= temp << 10;
+
+	if (usb_pipebulk(urb->pipe)) {
+		token |= EPTYPE_BULK;
+	} else if (usb_pipeint(urb->pipe)) {
+		token |= EPTYPE_INT;
+	} else if (usb_pipeisoc(urb->pipe)) {
+		token |= EPTYPE_ISO;
+	}
+
+
+	qha->td_info2 =	token;
+
+	pehci_print("%s: second	word 0x%08x, qtd token 0x%08x\n",
+		    __FUNCTION__, qha->td_info2, temp);
+
+	/***********************Third word*************************************/
+
+	/*calculate the	data start address from	mem_addr for qha */
+
+	data_addr = ((u32) (mem_addr->phy_addr)	& 0xffff) - 0x400;
+	data_addr >>= 3;
+	pehci_print("data start	address	%x\n", data_addr);
+	/*use this field only if there
+	 * is something	to transfer
+	 * */
+	if (length) {
+		td_info3 = data_addr <<	8;
+	}
+	/*RL Count, 16 */
+	td_info3 |= (rl	<< 25);
+	qha->td_info3 =	td_info3;
+
+	pehci_print("%s: third word 0x%08x, tdinfo 0x%08x\n",
+		__FUNCTION__, qha->td_info3, td_info3);
+
+
+	/**************************fourt word*************************************/
+
+	if (usb_pipecontrol(urb->pipe))	{
+		datatoggle = qtd->hw_token >> 31;
+	} else {
+		/*take the data	toggle from the	previous completed transfer
+		   or zero in case of fresh */
+		datatoggle = toggle;
+	}
+
+	td_info4 = QHA_ACTIVE;
+	/*dt */
+	td_info4 |= datatoggle << 25;	/*QHA_DATA_TOGGLE; */
+	/*3 retry count	for setup else forever */
+	if (PTD_PID(qha->td_info2) == SETUP_PID) {
+		td_info4 |= (3 << 23);
+	} else {
+		td_info4 |= (0 << 23);
+	}
+	
+	/*nak count */
+	td_info4 |= (nk	<< 19);
+
+	td_info4 |= (qh->ping << 26);
+	qha->td_info4 =	td_info4;
+#ifdef PTD_DUMP_SCHEDULE
+	printk("SCHEDULE PTD DUMPE\n") ;
+	printk("SDW0: 0x%08x\n",qha->td_info1);
+	printk("SDW1: 0x%08x\n",qha->td_info2);
+	printk("SDW2: 0x%08x\n",qha->td_info3);
+	printk("SDW3: 0x%08x\n",qha->td_info4);
+#endif
+	pehci_print("%s: fourt word 0x%08x\n", __FUNCTION__, qha->td_info4);
+	pehci_entry("--	%s: Exit, qha %p\n", __FUNCTION__, qha);
+	return qha;
+
+}
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 1bfcd02..cf5d452 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -218,6 +218,21 @@
 	  See <http://www.linux-usb.org/usbtest/> for more information,
 	  including sample test device firmware and "how to use it".
 
+config USB_EHSET_TEST_FIXTURE
+	tristate "USB EHSET Test Fixture Driver"
+	depends on USB && USB_EHCI_EHSET
+	default n
+	help
+	  Say Y here if you want to use EHSET Test Fixture device for host
+	  compliance testing.
+
+	  This driver initiates test modes on the downstream port to which the
+	  test fixture is attached.
+
+	  See <http://www.usb.org/developers/onthego/EHSET_v1.01.pdf>
+	  for more information.
+
+
 config USB_ISIGHTFW
 	tristate "iSight firmware loading support"
 	depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 796ce7e..c8e777a 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_USB_LEGOTOWER)		+= legousbtower.o
 obj-$(CONFIG_USB_RIO500)		+= rio500.o
 obj-$(CONFIG_USB_TEST)			+= usbtest.o
+obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)	+= ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)	+= trancevibrator.o
 obj-$(CONFIG_USB_USS720)		+= uss720.o
 obj-$(CONFIG_USB_SEVSEG)		+= usbsevseg.o
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
new file mode 100644
index 0000000..30879e0
--- /dev/null
+++ b/drivers/usb/misc/ehset.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/hcd.h>
+
+#define TEST_SE0_NAK_PID		0x0101
+#define TEST_J_PID			0x0102
+#define TEST_K_PID			0x0103
+#define TEST_PACKET_PID			0x0104
+#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
+#define TEST_SINGLE_STEP_GET_DEV_DESC	0x0107
+#define TEST_SINGLE_STEP_SET_FEATURE	0x0108
+
+static int ehset_probe(struct usb_interface *intf,
+		       const struct usb_device_id *id)
+{
+	int status = -1;
+	struct usb_device *dev = interface_to_usbdev(intf);
+	struct usb_device *rh_udev = dev->bus->root_hub;
+	struct usb_device *hub_udev = dev->parent;
+	int port1 = dev->portnum;
+	int test_mode = le16_to_cpu(dev->descriptor.idProduct);
+
+	switch (test_mode) {
+	case TEST_SE0_NAK_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(3 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_J_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(1 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_K_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(2 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_PACKET_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(4 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_HS_HOST_PORT_SUSPEND_RESUME:
+		/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
+		msleep(15 * 1000);
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT,
+			USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000);
+		if (status < 0)
+			break;
+		msleep(15 * 1000);
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
+			USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000);
+		break;
+	case TEST_SINGLE_STEP_GET_DEV_DESC:
+		/* Test: wait for 15secs -> GetDescriptor request */
+		msleep(15 * 1000);
+		{
+			struct usb_device_descriptor *buf;
+			buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+			if (!buf)
+				return -ENOMEM;
+
+			status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+				USB_DT_DEVICE << 8, 0,
+				buf, USB_DT_DEVICE_SIZE,
+				USB_CTRL_GET_TIMEOUT);
+			kfree(buf);
+		}
+		break;
+	case TEST_SINGLE_STEP_SET_FEATURE:
+		/* GetDescriptor's SETUP request -> 15secs delay -> IN & STATUS
+		 * Issue request to ehci root hub driver with portnum = 1
+		 */
+		status = usb_control_msg(rh_udev, usb_sndctrlpipe(rh_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(6 << 8) | 1, NULL, 0, 60 * 1000);
+
+		break;
+	default:
+		pr_err("%s: undefined test mode ( %X )\n", __func__, test_mode);
+		return -EINVAL;
+	}
+
+	return (status < 0) ? status : 0;
+}
+
+static void ehset_disconnect(struct usb_interface *intf)
+{
+}
+
+static struct usb_device_id ehset_id_table[] = {
+	{ USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_J_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_K_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
+	{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
+	{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
+	{ }			/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ehset_id_table);
+
+static struct usb_driver ehset_driver = {
+	.name =		"usb_ehset_test",
+	.probe =	ehset_probe,
+	.disconnect =	ehset_disconnect,
+	.id_table =	ehset_id_table,
+};
+
+static int __init ehset_init(void)
+{
+	return usb_register(&ehset_driver);
+}
+
+static void __exit ehset_exit(void)
+{
+	usb_deregister(&ehset_driver);
+}
+
+module_init(ehset_init);
+module_exit(ehset_exit);
+
+MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1309348..9cc6cb0 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -14,7 +14,7 @@
 	select TWL4030_USB if MACH_OMAP_3430SDP
 	select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
 	select USB_OTG_UTILS
-	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+	bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	help
 	  Say Y here if your system has a dual role high speed USB
 	  controller based on the Mentor Graphics silicon IP.  Then
@@ -30,8 +30,8 @@
 
 	  If you do not know what this is, please say N.
 
-	  To compile this driver as a module, choose M here; the
-	  module will be called "musb-hdrc".
+#	  To compile this driver as a module, choose M here; the
+#	  module will be called "musb-hdrc".
 
 choice
 	prompt "Platform Glue Layer"
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index cd77719..0081182 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -74,6 +74,34 @@
 	  This transceiver supports high and full speed devices plus,
 	  in host mode, low speed.
 
+config USB_MSM_OTG_72K
+	bool "OTG support for Legcay Qualcomm on-chip USB controller"
+	depends on ARCH_MSM
+	select USB_OTG_UTILS
+	default USB_MSM_72K
+	help
+	  Enable this to support the USB OTG transceiver on MSM chips. It
+	  handles PHY initialization, clock management, low power mode and
+	  workarounds required after resetting the hardware. This driver is
+	  required for even peripheral only or host only mode configuration.
+	  Supports SRP and HNP when both gadget and Host are selected.
+
+config MSM_OTG_ENABLE_A_WAIT_BCON_TIMEOUT
+	bool "Enable A-device timeout for B-device connection"
+	depends on USB_MSM_OTG_72K
+	default n
+	help
+	   OTG specification allows A-device to turn off VBUS if B-device
+	   fails to signal connect event before TA_WAIT_BCON (1.1 - 30 sec).
+	   SRP detection is enabled and hardware is put into low power mode
+	   upon this timeout.
+
+	   If you say yes, VBUS will be turned off if B-device does not signal
+	   connect in 30 sec. Otherwise VBUS is not turned off when Micro-A
+	   cable is connected. But hardware is put into LPM. Say no if leakage
+	   currents in your system are minimum.
+
+
 config TWL6030_USB
 	tristate "TWL6030 USB Transceiver Driver"
 	depends on TWL4030_CORE
@@ -121,6 +149,15 @@
 	  This driver is not supported on boards like trout which
 	  has an external PHY.
 
+config USB_MSM_ACA
+	bool "Support for Accessory Charger Adapter (ACA)"
+	depends on (USB_MSM_OTG || USB_MSM_OTG_72K) && ARCH_MSM
+	default n
+	help
+	  Accesory Charger Adapter is a charger specified in USB Battery
+	  Charging Specification(1.1). It enables OTG devices to charge
+	  while operating as a host or peripheral at the same time.
+
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
         depends on AB8500_CORE
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index d2c0a7b..2984ee1 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_NOP_USB_XCEIV)	+= nop-usb-xceiv.o
 obj-$(CONFIG_USB_ULPI)		+= ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)	+= ulpi_viewport.o
+obj-$(CONFIG_USB_MSM_OTG_72K)	+= msm72k_otg.o
 obj-$(CONFIG_USB_MSM_OTG)	+= msm_otg.o
 obj-$(CONFIG_AB8500_USB)	+= ab8500-usb.o
 fsl_usb2_otg-objs		:= fsl_otg.o otg_fsm.o
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm72k_otg.c
new file mode 100644
index 0000000..dddfa33
--- /dev/null
+++ b/drivers/usb/otg/msm72k_otg.c
@@ -0,0 +1,2957 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/device.h>
+#include <linux/pm_qos_params.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm72k_otg.h>
+#include <mach/msm_hsusb.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <mach/clk.h>
+#include <mach/msm_xo.h>
+
+#define MSM_USB_BASE	(dev->regs)
+#define USB_LINK_RESET_TIMEOUT	(msecs_to_jiffies(10))
+#define DRIVER_NAME	"msm_otg"
+static void otg_reset(struct otg_transceiver *xceiv, int phy_reset);
+static void msm_otg_set_vbus_state(int online);
+static void msm_otg_set_id_state(int online);
+
+struct msm_otg *the_msm_otg;
+
+static int is_host(void)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (dev->pmic_id_notif_supp)
+		return dev->pmic_id_status ? 0 : 1;
+	else if (dev->pdata->otg_mode == OTG_ID)
+		return (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1;
+	else
+		return !test_bit(ID, &dev->inputs);
+}
+
+static int is_b_sess_vld(void)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (dev->pdata->otg_mode == OTG_ID)
+		return (OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0;
+	else
+		return test_bit(B_SESS_VLD, &dev->inputs);
+}
+
+static unsigned ulpi_read(struct msm_otg *dev, unsigned reg)
+{
+	unsigned ret, timeout = 100000;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout))
+		cpu_relax();
+
+	if (timeout == 0) {
+		pr_err("%s: timeout %08x\n", __func__,
+				 readl(USB_ULPI_VIEWPORT));
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return 0xffffffff;
+	}
+	ret = ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return ret;
+}
+
+static int ulpi_write(struct msm_otg *dev, unsigned val, unsigned reg)
+{
+	unsigned timeout = 10000;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout))
+		;
+
+	if (timeout == 0) {
+		pr_err("%s: timeout\n", __func__);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -1;
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+static int usb_ulpi_write(struct otg_transceiver *xceiv, u32 val, u32 reg)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	return ulpi_write(dev, val, reg);
+}
+
+static int usb_ulpi_read(struct otg_transceiver *xceiv, u32 reg)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	return ulpi_read(dev, reg);
+}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+static void enable_idgnd(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<4), 0x0E);
+	ulpi_write(dev, (1<<4), 0x11);
+	writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC);
+}
+
+static void disable_idgnd(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<4), 0x0F);
+	ulpi_write(dev, (1<<4), 0x12);
+	writel(readl(USB_OTGSC) & ~OTGSC_IDIE, USB_OTGSC);
+}
+#endif
+
+static void enable_idabc(struct msm_otg *dev)
+{
+#ifdef CONFIG_USB_MSM_ACA
+	ulpi_write(dev, (1<<5), 0x0E);
+	ulpi_write(dev, (1<<5), 0x11);
+#endif
+}
+static void disable_idabc(struct msm_otg *dev)
+{
+#ifdef CONFIG_USB_MSM_ACA
+	ulpi_write(dev, (1<<5), 0x0F);
+	ulpi_write(dev, (1<<5), 0x12);
+#endif
+}
+
+static void enable_sess_valid(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<2), 0x0E);
+	ulpi_write(dev, (1<<2), 0x11);
+	writel(readl(USB_OTGSC) | OTGSC_BSVIE, USB_OTGSC);
+}
+
+static void disable_sess_valid(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<2), 0x0F);
+	ulpi_write(dev, (1<<2), 0x12);
+	writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+}
+#ifdef CONFIG_USB_MSM_ACA
+static void set_aca_id_inputs(struct msm_otg *dev)
+{
+	u8		phy_ints;
+
+	phy_ints = ulpi_read(dev, 0x13);
+	if (phy_ints == -ETIMEDOUT)
+		return;
+
+	pr_debug("phy_ints = %x\n", phy_ints);
+	clear_bit(ID_A, &dev->inputs);
+	clear_bit(ID_B, &dev->inputs);
+	clear_bit(ID_C, &dev->inputs);
+	if (phy_id_state_a(phy_ints)) {
+		pr_debug("ID_A set\n");
+		set_bit(ID_A, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+	} else if (phy_id_state_b(phy_ints)) {
+		pr_debug("ID_B set\n");
+		set_bit(ID_B, &dev->inputs);
+	} else if (phy_id_state_c(phy_ints)) {
+		pr_debug("ID_C set\n");
+		set_bit(ID_C, &dev->inputs);
+	}
+	if (is_b_sess_vld())
+		set_bit(B_SESS_VLD, &dev->inputs);
+	else
+		clear_bit(B_SESS_VLD, &dev->inputs);
+}
+#define get_aca_bmaxpower(dev)		(dev->b_max_power)
+#define set_aca_bmaxpower(dev, power)	(dev->b_max_power = power)
+#else
+#define get_aca_bmaxpower(dev)		0
+#define set_aca_bmaxpower(dev, power)
+#endif
+static inline void set_pre_emphasis_level(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->pemp_level == PRE_EMPHASIS_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG3);
+	res &= ~(ULPI_PRE_EMPHASIS_MASK);
+	if (dev->pdata->pemp_level != PRE_EMPHASIS_DISABLE)
+		res |= dev->pdata->pemp_level;
+	ulpi_write(dev, res, ULPI_CONFIG_REG3);
+}
+
+static inline void set_hsdrv_slope(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->hsdrvslope == HS_DRV_SLOPE_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG3);
+	res &= ~(ULPI_HSDRVSLOPE_MASK);
+	res |= (dev->pdata->hsdrvslope & ULPI_HSDRVSLOPE_MASK);
+	ulpi_write(dev, res, ULPI_CONFIG_REG3);
+}
+
+static inline void set_cdr_auto_reset(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->cdr_autoreset == CDR_AUTO_RESET_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_DIGOUT_CTRL);
+	if (dev->pdata->cdr_autoreset == CDR_AUTO_RESET_ENABLE)
+		res &=  ~ULPI_CDR_AUTORESET;
+	else
+		res |=  ULPI_CDR_AUTORESET;
+	ulpi_write(dev, res, ULPI_DIGOUT_CTRL);
+}
+
+static inline void set_se1_gating(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->se1_gating == SE1_GATING_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_DIGOUT_CTRL);
+	if (dev->pdata->se1_gating == SE1_GATING_ENABLE)
+		res &=  ~ULPI_SE1_GATE;
+	else
+		res |=  ULPI_SE1_GATE;
+	ulpi_write(dev, res, ULPI_DIGOUT_CTRL);
+}
+static inline void set_driver_amplitude(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->drv_ampl == HS_DRV_AMPLITUDE_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG2);
+	res &= ~ULPI_DRV_AMPL_MASK;
+	if (dev->pdata->drv_ampl != HS_DRV_AMPLITUDE_ZERO_PERCENT)
+		res |= dev->pdata->drv_ampl;
+	ulpi_write(dev, res, ULPI_CONFIG_REG2);
+}
+
+static const char *state_string(enum usb_otg_state state)
+{
+	switch (state) {
+	case OTG_STATE_A_IDLE:		return "a_idle";
+	case OTG_STATE_A_WAIT_VRISE:	return "a_wait_vrise";
+	case OTG_STATE_A_WAIT_BCON:	return "a_wait_bcon";
+	case OTG_STATE_A_HOST:		return "a_host";
+	case OTG_STATE_A_SUSPEND:	return "a_suspend";
+	case OTG_STATE_A_PERIPHERAL:	return "a_peripheral";
+	case OTG_STATE_A_WAIT_VFALL:	return "a_wait_vfall";
+	case OTG_STATE_A_VBUS_ERR:	return "a_vbus_err";
+	case OTG_STATE_B_IDLE:		return "b_idle";
+	case OTG_STATE_B_SRP_INIT:	return "b_srp_init";
+	case OTG_STATE_B_PERIPHERAL:	return "b_peripheral";
+	case OTG_STATE_B_WAIT_ACON:	return "b_wait_acon";
+	case OTG_STATE_B_HOST:		return "b_host";
+	default:			return "UNDEFINED";
+	}
+}
+
+static const char *timer_string(int bit)
+{
+	switch (bit) {
+	case A_WAIT_VRISE:		return "a_wait_vrise";
+	case A_WAIT_VFALL:		return "a_wait_vfall";
+	case B_SRP_FAIL:		return "b_srp_fail";
+	case A_WAIT_BCON:		return "a_wait_bcon";
+	case A_AIDL_BDIS:		return "a_aidl_bdis";
+	case A_BIDL_ADIS:		return "a_bidl_adis";
+	case B_ASE0_BRST:		return "b_ase0_brst";
+	default:			return "UNDEFINED";
+	}
+}
+
+/* Prevent idle power collapse(pc) while operating in peripheral mode */
+static void otg_pm_qos_update_latency(struct msm_otg *dev, int vote)
+{
+	struct msm_otg_platform_data *pdata = dev->pdata;
+	u32 swfi_latency = 0;
+
+	if (pdata)
+		swfi_latency = pdata->swfi_latency + 1;
+
+	if (vote)
+		pm_qos_update_request(&pdata->pm_qos_req_dma,
+				swfi_latency);
+	else
+		pm_qos_update_request(&pdata->pm_qos_req_dma,
+				PM_QOS_DEFAULT_VALUE);
+}
+
+/* If USB Core is running its protocol engine based on PCLK,
+ * PCLK must be running at >60Mhz for correct HSUSB operation and
+ * USB core cannot tolerate frequency changes on PCLK. For such
+ * USB cores, vote for maximum clk frequency on pclk source
+ */
+static void msm_otg_vote_for_pclk_source(struct msm_otg *dev, int vote)
+{
+	if (dev->pclk_src && pclk_requires_voting(&dev->otg)) {
+
+		if (vote)
+			clk_enable(dev->pclk_src);
+		else
+			clk_disable(dev->pclk_src);
+	}
+}
+
+/* Controller gives interrupt for every 1 mesc if 1MSIE is set in OTGSC.
+ * This interrupt can be used as a timer source and OTG timers can be
+ * implemented. But hrtimers on MSM hardware can give atleast 1/32 KHZ
+ * precision. This precision is more than enough for OTG timers.
+ */
+static enum hrtimer_restart msm_otg_timer_func(struct hrtimer *_timer)
+{
+	struct msm_otg *dev = container_of(_timer, struct msm_otg, timer);
+
+	/* Phy lockup issues are observed when VBUS Valid interrupt is
+	 * enabled. Hence set A_VBUS_VLD upon timer exipration.
+	 */
+	if (dev->active_tmout == A_WAIT_VRISE)
+		set_bit(A_VBUS_VLD, &dev->inputs);
+	else
+		set_bit(dev->active_tmout, &dev->tmouts);
+
+	pr_debug("expired %s timer\n", timer_string(dev->active_tmout));
+	queue_work(dev->wq, &dev->sm_work);
+	return HRTIMER_NORESTART;
+}
+
+static void msm_otg_del_timer(struct msm_otg *dev)
+{
+	int bit = dev->active_tmout;
+
+	pr_debug("deleting %s timer. remaining %lld msec \n", timer_string(bit),
+			div_s64(ktime_to_us(hrtimer_get_remaining(&dev->timer)),
+					1000));
+	hrtimer_cancel(&dev->timer);
+	clear_bit(bit, &dev->tmouts);
+}
+
+static void msm_otg_start_timer(struct msm_otg *dev, int time, int bit)
+{
+	clear_bit(bit, &dev->tmouts);
+	dev->active_tmout = bit;
+	pr_debug("starting %s timer\n", timer_string(bit));
+	hrtimer_start(&dev->timer,
+			ktime_set(time / 1000, (time % 1000) * 1000000),
+			HRTIMER_MODE_REL);
+}
+
+/* No two otg timers run in parallel. So one hrtimer is sufficient */
+static void msm_otg_init_timer(struct msm_otg *dev)
+{
+	hrtimer_init(&dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	dev->timer.function = msm_otg_timer_func;
+}
+
+static const char *event_string(enum usb_otg_event event)
+{
+	switch (event) {
+	case OTG_EVENT_DEV_CONN_TMOUT:
+		return "DEV_CONN_TMOUT";
+	case OTG_EVENT_NO_RESP_FOR_HNP_ENABLE:
+		return "NO_RESP_FOR_HNP_ENABLE";
+	case OTG_EVENT_HUB_NOT_SUPPORTED:
+		return "HUB_NOT_SUPPORTED";
+	case OTG_EVENT_DEV_NOT_SUPPORTED:
+		return "DEV_NOT_SUPPORTED,";
+	case OTG_EVENT_HNP_FAILED:
+		return "HNP_FAILED";
+	case OTG_EVENT_NO_RESP_FOR_SRP:
+		return "NO_RESP_FOR_SRP";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static int msm_otg_send_event(struct otg_transceiver *xceiv,
+				enum usb_otg_event event)
+{
+	char module_name[16];
+	char udev_event[128];
+	char *envp[] = { module_name, udev_event, NULL };
+	int ret;
+
+	pr_debug("sending %s event\n", event_string(event));
+
+	snprintf(module_name, 16, "MODULE=%s", DRIVER_NAME);
+	snprintf(udev_event, 128, "EVENT=%s", event_string(event));
+	ret = kobject_uevent_env(&xceiv->dev->kobj, KOBJ_CHANGE, envp);
+	if (ret < 0)
+		pr_info("uevent sending failed with ret = %d\n", ret);
+	return ret;
+}
+
+static int msm_otg_start_hnp(struct otg_transceiver *xceiv)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_A_HOST) {
+		pr_err("HNP can not be initiated in %s state\n",
+				state_string(state));
+		return -EINVAL;
+	}
+
+	pr_debug("A-Host: HNP initiated\n");
+	clear_bit(A_BUS_REQ, &dev->inputs);
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+	return 0;
+}
+
+static int msm_otg_start_srp(struct otg_transceiver *xceiv)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	u32	val;
+	int ret = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_B_IDLE) {
+		pr_err("SRP can not be initiated in %s state\n",
+				state_string(state));
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if ((jiffies - dev->b_last_se0_sess) < msecs_to_jiffies(TB_SRP_INIT)) {
+		pr_debug("initial conditions of SRP are not met. Try again"
+				"after some time\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* Harware auto assist data pulsing: Data pulse is given
+	 * for 7msec; wait for vbus
+	 */
+	val = readl(USB_OTGSC);
+	writel((val & ~OTGSC_INTR_STS_MASK) | OTGSC_HADP, USB_OTGSC);
+
+	/* VBUS plusing is obsoleted in OTG 2.0 supplement */
+out:
+	return ret;
+}
+
+static int msm_otg_set_power(struct otg_transceiver *xceiv, unsigned mA)
+{
+	static enum chg_type 	curr_chg = USB_CHG_TYPE__INVALID;
+	struct msm_otg		*dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+	enum chg_type 		new_chg = atomic_read(&dev->chg_type);
+	unsigned 		charge = mA;
+
+	/* Call chg_connected only if the charger has changed */
+	if (new_chg != curr_chg && pdata->chg_connected) {
+		curr_chg = new_chg;
+		pdata->chg_connected(new_chg);
+	}
+
+	/* Always use USB_IDCHG_MAX for charging in ID_B and ID_C */
+	if (test_bit(ID_C, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs))
+		charge = USB_IDCHG_MAX;
+
+	pr_debug("Charging with %dmA current\n", charge);
+	/* Call vbus_draw only if the charger is of known type and also
+	 * ignore request to stop charging as a result of suspend interrupt
+	 * when wall-charger is used.
+	 */
+	if (pdata->chg_vbus_draw && new_chg != USB_CHG_TYPE__INVALID &&
+		(charge || new_chg != USB_CHG_TYPE__WALLCHARGER))
+			pdata->chg_vbus_draw(charge);
+
+	if (new_chg == USB_CHG_TYPE__WALLCHARGER) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return 0;
+}
+
+static int msm_otg_set_clk(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (on)
+		/* enable clocks */
+		clk_enable(dev->hs_clk);
+	else
+		clk_disable(dev->hs_clk);
+
+	return 0;
+}
+static void msm_otg_start_peripheral(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+
+	if (!xceiv->gadget)
+		return;
+
+	if (on) {
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_PERIPHERAL);
+		/* vote for minimum dma_latency to prevent idle
+		 * power collapse(pc) while running in peripheral mode.
+		 */
+		otg_pm_qos_update_latency(dev, 1);
+
+		/* increment the clk reference count so that
+		 * it would be still on when disabled from
+		 * low power mode routine
+		 */
+		if (dev->pdata->pclk_required_during_lpm)
+			clk_enable(dev->hs_pclk);
+
+		usb_gadget_vbus_connect(xceiv->gadget);
+	} else {
+		atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+		usb_gadget_vbus_disconnect(xceiv->gadget);
+
+		/* decrement the clk reference count so that
+		 * it would be off when disabled from
+		 * low power mode routine
+		 */
+		if (dev->pdata->pclk_required_during_lpm)
+			clk_disable(dev->hs_pclk);
+
+		otg_pm_qos_update_latency(dev, 0);
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_DISABLE);
+	}
+}
+
+static void msm_otg_start_host(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+
+	if (!xceiv->host)
+		return;
+
+	if (dev->start_host) {
+		/* Some targets, e.g. ST1.5, use GPIO to choose b/w connector */
+		if (on && pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_HOST);
+
+		/* increment or decrement the clk reference count
+		 * to avoid usb h/w lockup issues when low power
+		 * mode is initiated and vbus is on.
+		 */
+		if (dev->pdata->pclk_required_during_lpm) {
+			if (on)
+				clk_enable(dev->hs_pclk);
+			else
+				clk_disable(dev->hs_pclk);
+		}
+
+		dev->start_host(xceiv->host, on);
+
+		if (!on && pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_DISABLE);
+	}
+}
+
+static int msm_otg_suspend(struct msm_otg *dev)
+{
+	unsigned long timeout;
+	bool host_bus_suspend;
+	unsigned ret;
+	enum chg_type chg_type = atomic_read(&dev->chg_type);
+	unsigned long flags;
+
+	disable_irq(dev->irq);
+	if (atomic_read(&dev->in_lpm))
+		goto out;
+#ifdef CONFIG_USB_MSM_ACA
+	/*
+	 * ACA interrupts are disabled before entering into LPM.
+	 * If LPM is allowed in host mode with accessory charger
+	 * connected or only accessory charger is connected,
+	 * there is a chance that charger is removed and we will
+	 * not know about it.
+	 *
+	 * REVISIT
+	 *
+	 * Allowing LPM in case of gadget bus suspend is tricky.
+	 * Bus suspend can happen in two states.
+	 * 1. ID_float:  Allowing LPM has pros and cons. If LPM is allowed
+	 * and accessory charger is connected, we miss ID_float --> ID_C
+	 * transition where we could draw large amount of current
+	 * compared to the suspend current.
+	 * 2. ID_C: We can not allow LPM. If accessory charger is removed
+	 * we should not draw more than what host could supply which will
+	 * be less compared to accessory charger.
+	 *
+	 * For simplicity, LPM is not allowed in bus suspend.
+	 */
+#ifndef CONFIG_USB_MSM_STANDARD_ACA
+	/*
+	 * RID_A and IdGnd states are only possible with standard ACA.  We can
+	 * exit from low power mode with !BSV or IdGnd interrupt.  Hence LPM
+	 * is allowed.
+	 */
+	if ((test_bit(ID, &dev->inputs) && test_bit(B_SESS_VLD, &dev->inputs) &&
+			chg_type != USB_CHG_TYPE__WALLCHARGER) ||
+			test_bit(ID_A, &dev->inputs))
+		goto out;
+#endif
+	/* Disable ID_abc interrupts else it causes spurious interrupt */
+	disable_idabc(dev);
+#endif
+	ulpi_read(dev, 0x14);/* clear PHY interrupt latch register */
+
+	/*
+	 * Turn on PHY comparators if,
+	 * 1. USB wall charger is connected (bus suspend is not supported)
+	 * 2. Host bus suspend
+	 * 3. host is supported, but, id is not routed to pmic
+	 * 4. peripheral is supported, but, vbus is not routed to pmic
+	 */
+	host_bus_suspend = dev->otg.host && is_host();
+	if ((dev->otg.gadget && chg_type == USB_CHG_TYPE__WALLCHARGER) ||
+		host_bus_suspend ||
+		(dev->otg.host && !dev->pmic_id_notif_supp) ||
+		(dev->otg.gadget && !dev->pmic_vbus_notif_supp)) {
+		ulpi_write(dev, 0x01, 0x30);
+	}
+
+	ulpi_write(dev, 0x08, 0x09);/* turn off PLL on integrated phy */
+
+	timeout = jiffies + msecs_to_jiffies(500);
+	disable_phy_clk();
+	while (!is_phy_clk_disabled()) {
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Unable to suspend phy\n", __func__);
+			/*
+			 * Start otg state machine in default state upon
+			 * phy suspend failure*/
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_UNDEFINED;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			queue_work(dev->wq, &dev->sm_work);
+			goto out;
+		}
+		msleep(1);
+		/* check if there are any pending interrupts*/
+		if (((readl(USB_OTGSC) & OTGSC_INTR_MASK) >> 8) &
+				readl(USB_OTGSC)) {
+			enable_idabc(dev);
+			goto out;
+		}
+	}
+
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
+	/* Ensure that above operation is completed before turning off clocks */
+	mb();
+
+	if (dev->hs_pclk)
+		clk_disable(dev->hs_pclk);
+	if (dev->hs_cclk)
+		clk_disable(dev->hs_cclk);
+	/* usb phy no more require TCXO clock, hence vote for TCXO disable*/
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_OFF);
+	if (ret)
+		pr_err("%s failed to devote for"
+			"TCXO D1 buffer%d\n", __func__, ret);
+
+	if (device_may_wakeup(dev->otg.dev)) {
+		enable_irq_wake(dev->irq);
+		if (dev->vbus_on_irq)
+			enable_irq_wake(dev->vbus_on_irq);
+	}
+
+	msm_otg_vote_for_pclk_source(dev, 0);
+
+	atomic_set(&dev->in_lpm, 1);
+
+	if (!host_bus_suspend && dev->pmic_vbus_notif_supp) {
+		pr_debug("phy can power collapse: (%d)\n",
+			can_phy_power_collapse(dev));
+		if (can_phy_power_collapse(dev) && dev->pdata->ldo_enable) {
+			pr_debug("disabling the regulators\n");
+			dev->pdata->ldo_enable(0);
+		}
+	}
+
+	/* phy can interrupts when vddcx is at 0.75, so irrespective
+	 * of pmic notification support, configure vddcx @0.75
+	 */
+	if (dev->pdata->config_vddcx)
+		dev->pdata->config_vddcx(0);
+	pr_info("%s: usb in low power mode\n", __func__);
+
+out:
+	enable_irq(dev->irq);
+
+	return 0;
+}
+
+static int msm_otg_resume(struct msm_otg *dev)
+{
+	unsigned temp;
+	unsigned ret;
+
+	if (!atomic_read(&dev->in_lpm))
+		return 0;
+	/* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */
+	if (dev->pdata->config_vddcx) {
+		ret = dev->pdata->config_vddcx(1);
+		if (ret) {
+			pr_err("%s: unable to enable vddcx digital core:%d\n",
+				__func__, ret);
+		}
+	}
+	if (dev->pdata->ldo_set_voltage)
+		dev->pdata->ldo_set_voltage(3400);
+
+	/* Vote for TCXO when waking up the phy */
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON);
+	if (ret)
+		pr_err("%s failed to vote for"
+			"TCXO D1 buffer%d\n", __func__, ret);
+
+	msm_otg_vote_for_pclk_source(dev, 1);
+
+	if (dev->hs_pclk)
+		clk_enable(dev->hs_pclk);
+	if (dev->hs_cclk)
+		clk_enable(dev->hs_cclk);
+
+	temp = readl(USB_USBCMD);
+	temp &= ~ASYNC_INTR_CTRL;
+	temp &= ~ULPI_STP_CTRL;
+	writel(temp, USB_USBCMD);
+
+	if (device_may_wakeup(dev->otg.dev)) {
+		disable_irq_wake(dev->irq);
+		if (dev->vbus_on_irq)
+			disable_irq_wake(dev->vbus_on_irq);
+	}
+
+	atomic_set(&dev->in_lpm, 0);
+
+	pr_info("%s: usb exited from low power mode\n", __func__);
+
+	return 0;
+}
+
+static void msm_otg_get_resume(struct msm_otg *dev)
+{
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_get_noresume(dev->otg.dev);
+	pm_runtime_resume(dev->otg.dev);
+#else
+	msm_otg_resume(dev);
+#endif
+}
+
+static void msm_otg_put_suspend(struct msm_otg *dev)
+{
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_put_sync(dev->otg.dev);
+#else
+	msm_otg_suspend(dev);
+#endif
+}
+
+static void msm_otg_resume_w(struct work_struct *w)
+{
+	struct msm_otg	*dev = container_of(w, struct msm_otg, otg_resume_work);
+	unsigned long timeout;
+
+	msm_otg_get_resume(dev);
+
+	if (!is_phy_clk_disabled())
+		goto phy_resumed;
+
+	timeout = jiffies + usecs_to_jiffies(100);
+	enable_phy_clk();
+	while (is_phy_clk_disabled() || !is_phy_active()) {
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Unable to wakeup phy. is_phy_active: %x\n",
+				 __func__, !!is_phy_active());
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			break;
+		}
+		udelay(10);
+	}
+
+phy_resumed:
+	/* Enable Idabc interrupts as these were disabled before entering LPM */
+	enable_idabc(dev);
+
+	/* If resume signalling finishes before lpm exit, PCD is not set in
+	 * USBSTS register. Drive resume signal to the downstream device now
+	 * so that host driver can process the upcoming port change interrupt.*/
+	if (is_host() || test_bit(ID_A, &dev->inputs)) {
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+		msm_otg_start_host(&dev->otg, REQUEST_RESUME);
+	}
+
+	/* Enable irq which was disabled before scheduling this work.
+	 * But don't release wake_lock, as we got async interrupt and
+	 * there will be some work pending for OTG state machine.
+	 */
+	enable_irq(dev->irq);
+}
+
+static int msm_otg_set_suspend(struct otg_transceiver *xceiv, int suspend)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	pr_debug("suspend request in state: %s\n",
+			state_string(state));
+
+	if (suspend) {
+		switch (state) {
+#ifndef CONFIG_MSM_OTG_ENABLE_A_WAIT_BCON_TIMEOUT
+		case OTG_STATE_A_WAIT_BCON:
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(xceiv, USB_IDCHG_MIN - 100);
+			msm_otg_put_suspend(dev);
+			break;
+#endif
+		case OTG_STATE_A_HOST:
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			wake_lock(&dev->wlock);
+			queue_work(dev->wq, &dev->sm_work);
+			break;
+		case OTG_STATE_B_PERIPHERAL:
+			if (xceiv->gadget->b_hnp_enable) {
+				set_bit(A_BUS_SUSPEND, &dev->inputs);
+				set_bit(B_BUS_REQ, &dev->inputs);
+				wake_lock(&dev->wlock);
+				queue_work(dev->wq, &dev->sm_work);
+			}
+			break;
+		case OTG_STATE_A_PERIPHERAL:
+			msm_otg_start_timer(dev, TA_BIDL_ADIS,
+					A_BIDL_ADIS);
+			break;
+		default:
+			break;
+		}
+	} else {
+		unsigned long timeout;
+
+		switch (state) {
+		case OTG_STATE_A_PERIPHERAL:
+			/* A-peripheral observed activity on bus.
+			 * clear A_BIDL_ADIS timer.
+			 */
+			msm_otg_del_timer(dev);
+			break;
+		case OTG_STATE_A_SUSPEND:
+			/* Remote wakeup or resume */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_A, &dev->inputs) &&
+				(get_aca_bmaxpower(dev) < USB_IDCHG_MIN))
+				msm_otg_set_power(xceiv,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+			break;
+		default:
+			break;
+		}
+
+		if (suspend == atomic_read(&dev->in_lpm))
+			return 0;
+
+		disable_irq(dev->irq);
+		if (dev->pmic_vbus_notif_supp)
+			if (can_phy_power_collapse(dev) &&
+					dev->pdata->ldo_enable)
+				dev->pdata->ldo_enable(1);
+
+		msm_otg_get_resume(dev);
+
+		if (!is_phy_clk_disabled())
+			goto out;
+
+		timeout = jiffies + usecs_to_jiffies(100);
+		enable_phy_clk();
+		while (is_phy_clk_disabled() || !is_phy_active()) {
+			if (time_after(jiffies, timeout)) {
+				pr_err("%s: Unable to wakeup phy. "
+					"is_phy_active: %x\n",
+					__func__, !!is_phy_active());
+				/* Reset both phy and link */
+				otg_reset(&dev->otg, 1);
+				break;
+			}
+			udelay(10);
+		}
+out:
+		enable_idabc(dev);
+		enable_irq(dev->irq);
+
+	}
+
+	return 0;
+}
+
+static int msm_otg_set_peripheral(struct otg_transceiver *xceiv,
+			struct usb_gadget *gadget)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (!gadget) {
+		msm_otg_start_peripheral(xceiv, 0);
+		dev->otg.gadget = 0;
+		disable_sess_valid(dev);
+		if (!dev->otg.host)
+			disable_idabc(dev);
+		return 0;
+	}
+	dev->otg.gadget = gadget;
+	pr_info("peripheral driver registered w/ tranceiver\n");
+
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+	return 0;
+}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+static int usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *device)
+{
+	enum usb_otg_state state;
+	struct msm_otg *dev = container_of(self, struct msm_otg, usbdev_nb);
+	struct usb_device *udev = device;
+	int work = 1;
+	unsigned long flags;
+
+	/* Interested in only devices directly connected
+	 * to root hub directly.
+	 */
+	if (!udev->parent || udev->parent->parent)
+		goto out;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	switch (state) {
+	case OTG_STATE_A_WAIT_BCON:
+		if (action == USB_DEVICE_ADD) {
+			pr_debug("B_CONN set\n");
+			set_bit(B_CONN, &dev->inputs);
+			if (udev->actconfig) {
+				set_aca_bmaxpower(dev,
+					udev->actconfig->desc.bMaxPower * 2);
+				goto do_work;
+			}
+			if (udev->portnum == udev->bus->otg_port)
+				set_aca_bmaxpower(dev, USB_IB_UNCFG);
+			else
+				set_aca_bmaxpower(dev, 100);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		if (action == USB_DEVICE_REMOVE) {
+			pr_debug("B_CONN clear\n");
+			clear_bit(B_CONN, &dev->inputs);
+			set_aca_bmaxpower(dev, 0);
+		}
+		break;
+	default:
+		work = 0;
+		break;
+	}
+do_work:
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+out:
+	return NOTIFY_OK;
+}
+
+static int msm_otg_set_host(struct otg_transceiver *xceiv, struct usb_bus *host)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (!dev->start_host)
+		return -ENODEV;
+
+	if (!host) {
+		msm_otg_start_host(xceiv, REQUEST_STOP);
+		usb_unregister_notify(&dev->usbdev_nb);
+		dev->otg.host = 0;
+		dev->start_host = 0;
+		disable_idgnd(dev);
+		if (!dev->otg.gadget)
+			disable_idabc(dev);
+		return 0;
+	}
+#ifdef CONFIG_USB_OTG
+	host->otg_port = 1;
+#endif
+	dev->usbdev_nb.notifier_call = usbdev_notify;
+	usb_register_notify(&dev->usbdev_nb);
+	dev->otg.host = host;
+	pr_info("host driver registered w/ tranceiver\n");
+
+#ifndef CONFIG_USB_MSM_72K
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+#endif
+	return 0;
+}
+#endif
+
+void msm_otg_set_id_state(int id)
+{
+	struct msm_otg *dev = the_msm_otg;
+	unsigned long flags;
+
+	if (id == dev->pmic_id_status)
+		return;
+
+	if (id) {
+		set_bit(ID, &dev->inputs);
+		dev->pmic_id_status = 1;
+	} else {
+		clear_bit(ID, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		dev->pmic_id_status = 0;
+	}
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->otg.state != OTG_STATE_UNDEFINED) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+void msm_otg_set_vbus_state(int online)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (!atomic_read(&dev->in_lpm) || !online)
+		return;
+
+	wake_lock(&dev->wlock);
+	set_bit(B_SESS_VLD, &dev->inputs);
+	queue_work(dev->wq, &dev->sm_work);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	struct msm_otg *dev = data;
+	u32 otgsc, sts, pc, sts_mask;
+	irqreturn_t ret = IRQ_HANDLED;
+	int work = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (atomic_read(&dev->in_lpm)) {
+		disable_irq_nosync(dev->irq);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->otg_resume_work);
+		goto out;
+	}
+
+	/* Return immediately if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return IRQ_NONE;
+
+
+	otgsc = readl(USB_OTGSC);
+	sts = readl(USB_USBSTS);
+
+	sts_mask = (otgsc & OTGSC_INTR_MASK) >> 8;
+
+	if (!((otgsc & sts_mask) || (sts & STS_PCI))) {
+		ret = IRQ_NONE;
+		goto out;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	pr_debug("IRQ state: %s\n", state_string(state));
+	pr_debug("otgsc = %x\n", otgsc);
+
+	if ((otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
+		if (otgsc & OTGSC_ID) {
+			pr_debug("Id set\n");
+			set_bit(ID, &dev->inputs);
+		} else {
+			pr_debug("Id clear\n");
+			/* Assert a_bus_req to supply power on
+			 * VBUS when Micro/Mini-A cable is connected
+			 * with out user intervention.
+			 */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			clear_bit(ID, &dev->inputs);
+		}
+		writel(otgsc, USB_OTGSC);
+		work = 1;
+	} else if (otgsc & OTGSC_BSVIS) {
+		writel(otgsc, USB_OTGSC);
+		/* BSV interrupt comes when operating as an A-device
+		 * (VBUS on/off).
+		 * But, handle BSV when charger is removed from ACA in ID_A
+		 */
+		if ((state >= OTG_STATE_A_IDLE) &&
+			!test_bit(ID_A, &dev->inputs))
+			goto out;
+		if (otgsc & OTGSC_BSV) {
+			pr_debug("BSV set\n");
+			set_bit(B_SESS_VLD, &dev->inputs);
+		} else {
+			pr_debug("BSV clear\n");
+			clear_bit(B_SESS_VLD, &dev->inputs);
+		}
+		work = 1;
+	} else if (otgsc & OTGSC_DPIS) {
+		pr_debug("DPIS detected\n");
+		writel(otgsc, USB_OTGSC);
+		set_bit(A_SRP_DET, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		work = 1;
+	} else if (sts & STS_PCI) {
+		pc = readl(USB_PORTSC);
+		pr_debug("portsc = %x\n", pc);
+		ret = IRQ_NONE;
+		/* HCD Acks PCI interrupt. We use this to switch
+		 * between different OTG states.
+		 */
+		work = 1;
+		switch (state) {
+		case OTG_STATE_A_SUSPEND:
+			if (dev->otg.host->b_hnp_enable && (pc & PORTSC_CSC) &&
+					!(pc & PORTSC_CCS)) {
+				pr_debug("B_CONN clear\n");
+				clear_bit(B_CONN, &dev->inputs);
+			}
+			break;
+		case OTG_STATE_B_WAIT_ACON:
+			if ((pc & PORTSC_CSC) && (pc & PORTSC_CCS)) {
+				pr_debug("A_CONN set\n");
+				set_bit(A_CONN, &dev->inputs);
+				/* Clear ASE0_BRST timer */
+				msm_otg_del_timer(dev);
+			}
+			break;
+		case OTG_STATE_B_HOST:
+			if ((pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) {
+				pr_debug("A_CONN clear\n");
+				clear_bit(A_CONN, &dev->inputs);
+			}
+			break;
+		default:
+			work = 0;
+			break;
+		}
+	}
+	if (work) {
+#ifdef CONFIG_USB_MSM_ACA
+		/* With ACA, ID can change bcoz of BSVIS as well, so update */
+		if ((otgsc & OTGSC_IDIS) || (otgsc & OTGSC_BSVIS))
+			set_aca_id_inputs(dev);
+#endif
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+out:
+	return ret;
+}
+
+#define ULPI_VERIFY_MAX_LOOP_COUNT  5
+#define PHY_CALIB_RETRY_COUNT 10
+static void phy_clk_reset(struct msm_otg *dev)
+{
+	unsigned rc;
+	enum clk_reset_action assert = CLK_RESET_ASSERT;
+
+	if (dev->pdata->phy_reset_sig_inverted)
+		assert = CLK_RESET_DEASSERT;
+
+	rc = clk_reset(dev->phy_reset_clk, assert);
+	if (rc) {
+		pr_err("%s: phy clk assert failed\n", __func__);
+		return;
+	}
+
+	msleep(1);
+
+	rc = clk_reset(dev->phy_reset_clk, !assert);
+	if (rc) {
+		pr_err("%s: phy clk deassert failed\n", __func__);
+		return;
+	}
+
+	msleep(1);
+}
+
+static unsigned ulpi_read_with_reset(struct msm_otg *dev, unsigned reg)
+{
+	int temp;
+	unsigned res;
+
+	for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) {
+		res = ulpi_read(dev, reg);
+		if (res != 0xffffffff)
+			return res;
+
+		phy_clk_reset(dev);
+	}
+
+	pr_err("%s: ulpi read failed for %d times\n",
+			__func__, ULPI_VERIFY_MAX_LOOP_COUNT);
+
+	return -1;
+}
+
+static int ulpi_write_with_reset(struct msm_otg *dev,
+unsigned val, unsigned reg)
+{
+	int temp, res;
+
+	for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) {
+		res = ulpi_write(dev, val, reg);
+		if (!res)
+			return 0;
+		phy_clk_reset(dev);
+	}
+	pr_err("%s: ulpi write failed for %d times\n",
+		__func__, ULPI_VERIFY_MAX_LOOP_COUNT);
+
+	return -1;
+}
+
+/* some of the older targets does not turn off the PLL
+ * if onclock bit is set and clocksuspendM bit is on,
+ * hence clear them too and initiate the suspend mode
+ * by clearing SupendM bit.
+ */
+static inline int turn_off_phy_pll(struct msm_otg *dev)
+{
+	unsigned res;
+
+	res = ulpi_read_with_reset(dev, ULPI_CONFIG_REG1);
+	if (res == 0xffffffff)
+		return -ETIMEDOUT;
+
+	res = ulpi_write_with_reset(dev,
+		res & ~(ULPI_ONCLOCK), ULPI_CONFIG_REG1);
+	if (res)
+		return -ETIMEDOUT;
+
+	res = ulpi_write_with_reset(dev,
+		ULPI_CLOCK_SUSPENDM, ULPI_IFC_CTRL_CLR);
+	if (res)
+		return -ETIMEDOUT;
+
+	/*Clear SuspendM bit to initiate suspend mode */
+	res = ulpi_write_with_reset(dev,
+		ULPI_SUSPENDM, ULPI_FUNC_CTRL_CLR);
+	if (res)
+		return -ETIMEDOUT;
+
+	return res;
+}
+
+static inline int check_phy_caliberation(struct msm_otg *dev)
+{
+	unsigned res;
+
+	res = ulpi_read_with_reset(dev, ULPI_DEBUG);
+
+	if (res == 0xffffffff)
+		return -ETIMEDOUT;
+
+	if (!(res & ULPI_CALIB_STS) && ULPI_CALIB_VAL(res))
+		return 0;
+
+	return -1;
+}
+
+static int msm_otg_phy_caliberate(struct msm_otg *dev)
+{
+	int i = 0;
+	unsigned long res;
+
+	do {
+		res = turn_off_phy_pll(dev);
+		if (res)
+			return -ETIMEDOUT;
+
+		/* bring phy out of suspend */
+		phy_clk_reset(dev);
+
+		res = check_phy_caliberation(dev);
+		if (!res)
+			return res;
+		i++;
+
+	} while (i < PHY_CALIB_RETRY_COUNT);
+
+	return res;
+}
+
+static int msm_otg_phy_reset(struct msm_otg *dev)
+{
+	unsigned rc;
+	unsigned temp;
+	unsigned long timeout;
+
+	rc = clk_reset(dev->hs_clk, CLK_RESET_ASSERT);
+	if (rc) {
+		pr_err("%s: usb hs clk assert failed\n", __func__);
+		return -1;
+	}
+
+	phy_clk_reset(dev);
+
+	rc = clk_reset(dev->hs_clk, CLK_RESET_DEASSERT);
+	if (rc) {
+		pr_err("%s: usb hs clk deassert failed\n", __func__);
+		return -1;
+	}
+	/* Observing ulpi timeouts as part of PHY calibration. On resetting
+	 * the HW link explicity by setting the RESET bit in the USBCMD
+	 * register before PHY calibration fixes the ulpi timeout issue.
+	 * This workaround is required for unicorn target
+	 */
+	writel_relaxed(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		usleep_range(1000, 1200);
+	} while (readl_relaxed(USB_USBCMD) & USBCMD_RESET);
+
+	/* select ULPI phy */
+	temp = (readl(USB_PORTSC) & ~PORTSC_PTS);
+	writel(temp | PORTSC_PTS_ULPI, USB_PORTSC);
+
+	if (atomic_read(&dev->chg_type) !=
+				USB_CHG_TYPE__WALLCHARGER) {
+		rc = msm_otg_phy_caliberate(dev);
+		if (rc)
+			return rc;
+	}
+
+	/* TBD: There are two link resets. One is below and other one
+	 * is done immediately after this function. See if we can
+	 * eliminate one of these.
+	 */
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	} while (readl(USB_USBCMD) & USBCMD_RESET);
+
+	if (readl(USB_USBCMD) & USBCMD_RESET) {
+		pr_err("%s: usb core reset failed\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void otg_reset(struct otg_transceiver *xceiv, int phy_reset)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	unsigned long timeout;
+	u32 mode, work = 0;
+
+	clk_enable(dev->hs_clk);
+
+	if (!phy_reset)
+		goto reset_link;
+
+	if (dev->pdata->phy_reset)
+		dev->pdata->phy_reset(dev->regs);
+	else
+		msm_otg_phy_reset(dev);
+
+	/*disable all phy interrupts*/
+	ulpi_write(dev, 0xFF, 0x0F);
+	ulpi_write(dev, 0xFF, 0x12);
+	msleep(100);
+
+reset_link:
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	} while (readl(USB_USBCMD) & USBCMD_RESET);
+
+	/* select ULPI phy */
+	writel(0x80000000, USB_PORTSC);
+
+	set_pre_emphasis_level(dev);
+	set_hsdrv_slope(dev);
+	set_cdr_auto_reset(dev);
+	set_driver_amplitude(dev);
+	set_se1_gating(dev);
+
+	writel(0x0, USB_AHB_BURST);
+	writel(0x00, USB_AHB_MODE);
+	/* Ensure that RESET operation is completed before turning off clock */
+	mb();
+
+	clk_disable(dev->hs_clk);
+
+	if ((xceiv->gadget && xceiv->gadget->is_a_peripheral) ||
+			test_bit(ID, &dev->inputs))
+		mode = USBMODE_SDIS | USBMODE_DEVICE;
+	else
+		mode = USBMODE_SDIS | USBMODE_HOST;
+	writel(mode, USB_USBMODE);
+
+	writel_relaxed((readl_relaxed(USB_OTGSC) | OTGSC_IDPU), USB_OTGSC);
+	if (dev->otg.gadget) {
+		enable_sess_valid(dev);
+		/* Due to the above 100ms delay, interrupts from PHY are
+		 * sometimes missed during fast plug-in/plug-out of cable.
+		 * Check for such cases here.
+		 */
+		if (is_b_sess_vld() && !test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("%s: handle missing BSV event\n", __func__);
+			set_bit(B_SESS_VLD, &dev->inputs);
+			work = 1;
+		} else if (!is_b_sess_vld() && test_bit(B_SESS_VLD,
+				&dev->inputs)) {
+			pr_debug("%s: handle missing !BSV event\n", __func__);
+			clear_bit(B_SESS_VLD, &dev->inputs);
+			work = 1;
+		}
+	}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	if (dev->otg.host && !dev->pmic_id_notif_supp) {
+		enable_idgnd(dev);
+		/* Handle missing ID_GND interrupts during fast PIPO */
+		if (is_host() && test_bit(ID, &dev->inputs)) {
+			pr_debug("%s: handle missing ID_GND event\n", __func__);
+			clear_bit(ID, &dev->inputs);
+			work = 1;
+		} else if (!is_host() && !test_bit(ID, &dev->inputs)) {
+			pr_debug("%s: handle missing !ID_GND event\n",
+						__func__);
+			set_bit(ID, &dev->inputs);
+			work = 1;
+		}
+	} else {
+		disable_idgnd(dev);
+	}
+#endif
+
+	enable_idabc(dev);
+
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+}
+
+static void msm_otg_sm_work(struct work_struct *w)
+{
+	struct msm_otg	*dev = container_of(w, struct msm_otg, sm_work);
+	enum chg_type	chg_type = atomic_read(&dev->chg_type);
+	int ret;
+	int work = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (atomic_read(&dev->in_lpm))
+		msm_otg_set_suspend(&dev->otg, 0);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	switch (state) {
+	case OTG_STATE_UNDEFINED:
+
+		/*
+		 * We can come here when LPM fails with wall charger
+		 * connected. Increment the PM usage counter to reflect
+		 * the actual device state. Change the state to
+		 * B_PERIPHERAL and schedule the work which takes care
+		 * of resetting the PHY and putting the hardware in
+		 * low power mode.
+		 */
+		if (atomic_read(&dev->chg_type) ==
+				USB_CHG_TYPE__WALLCHARGER) {
+			msm_otg_get_resume(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+			break;
+		}
+
+		/* Reset both phy and link */
+		otg_reset(&dev->otg, 1);
+
+#ifdef CONFIG_USB_MSM_ACA
+		set_aca_id_inputs(dev);
+#endif
+		if (dev->pdata->otg_mode == OTG_USER_CONTROL) {
+			if ((dev->pdata->usb_mode == USB_PERIPHERAL_MODE) ||
+					!dev->otg.host) {
+				set_bit(ID, &dev->inputs);
+				set_bit(B_SESS_VLD, &dev->inputs);
+			}
+		} else {
+			if (!dev->otg.host || !is_host())
+				set_bit(ID, &dev->inputs);
+
+			if (dev->otg.gadget && is_b_sess_vld())
+				set_bit(B_SESS_VLD, &dev->inputs);
+		}
+		spin_lock_irqsave(&dev->lock, flags);
+		if ((test_bit(ID, &dev->inputs)) &&
+				!test_bit(ID_A, &dev->inputs)) {
+			dev->otg.state = OTG_STATE_B_IDLE;
+		} else {
+			set_bit(A_BUS_REQ, &dev->inputs);
+			dev->otg.state = OTG_STATE_A_IDLE;
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		work = 1;
+		break;
+	case OTG_STATE_B_IDLE:
+		dev->otg.default_a = 0;
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs)) {
+			pr_debug("!id || id_A\n");
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			otg_reset(&dev->otg, 0);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			work = 1;
+		} else if (test_bit(B_SESS_VLD, &dev->inputs) &&
+				!test_bit(ID_B, &dev->inputs)) {
+			pr_debug("b_sess_vld\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			msm_otg_start_peripheral(&dev->otg, 1);
+		} else if (test_bit(B_BUS_REQ, &dev->inputs)) {
+			pr_debug("b_sess_end && b_bus_req\n");
+			ret = msm_otg_start_srp(&dev->otg);
+			if (ret < 0) {
+				/* notify user space */
+				clear_bit(B_BUS_REQ, &dev->inputs);
+				work = 1;
+				break;
+			}
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_SRP_INIT;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TB_SRP_FAIL, B_SRP_FAIL);
+			break;
+		} else if (test_bit(ID_B, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		} else {
+			msm_otg_set_power(&dev->otg, 0);
+			pr_debug("entering into lpm\n");
+			msm_otg_put_suspend(dev);
+
+			if (dev->pdata->ldo_set_voltage)
+				dev->pdata->ldo_set_voltage(3075);
+		}
+		break;
+	case OTG_STATE_B_SRP_INIT:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_C, &dev->inputs) ||
+				(test_bit(B_SESS_VLD, &dev->inputs) &&
+				!test_bit(ID_B, &dev->inputs))) {
+			pr_debug("!id || id_a/c || b_sess_vld+!id_b\n");
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+		} else if (test_bit(B_SRP_FAIL, &dev->tmouts)) {
+			pr_debug("b_srp_fail\n");
+			/* notify user space */
+			msm_otg_send_event(&dev->otg,
+				OTG_EVENT_NO_RESP_FOR_SRP);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			clear_bit(B_SRP_FAIL, &dev->tmouts);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			dev->b_last_se0_sess = jiffies;
+			work = 1;
+		}
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs) ||
+				!test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("!id  || id_a/b || !b_sess_vld\n");
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->b_last_se0_sess = jiffies;
+
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(B_BUS_REQ, &dev->inputs) &&
+				dev->otg.gadget->b_hnp_enable &&
+				test_bit(A_BUS_SUSPEND, &dev->inputs)) {
+			pr_debug("b_bus_req && b_hnp_en && a_bus_suspend\n");
+			msm_otg_start_timer(dev, TB_ASE0_BRST, B_ASE0_BRST);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_WAIT_ACON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* start HCD even before A-device enable
+			 * pull-up to meet HNP timings.
+			 */
+			dev->otg.host->is_b_host = 1;
+			msm_otg_start_host(&dev->otg, REQUEST_START);
+
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		} else if (chg_type == USB_CHG_TYPE__WALLCHARGER) {
+#ifdef CONFIG_USB_MSM_ACA
+			del_timer_sync(&dev->id_timer);
+#endif
+			/* Workaround: Reset PHY in SE1 state */
+			otg_reset(&dev->otg, 1);
+			pr_debug("entering into lpm with wall-charger\n");
+			msm_otg_put_suspend(dev);
+			/* Allow idle power collapse */
+			otg_pm_qos_update_latency(dev, 0);
+		}
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs) ||
+				!test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("!id || id_a/b || !b_sess_vld\n");
+			msm_otg_del_timer(dev);
+			/* A-device is physically disconnected during
+			 * HNP. Remove HCD.
+			 */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			dev->b_last_se0_sess = jiffies;
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(A_CONN, &dev->inputs)) {
+			pr_debug("a_conn\n");
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_C, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+			}
+		} else if (test_bit(B_ASE0_BRST, &dev->tmouts)) {
+			/* TODO: A-device may send reset after
+			 * enabling HNP; a_bus_resume case is
+			 * not handled for now.
+			 */
+			pr_debug("b_ase0_brst_tmout\n");
+			msm_otg_send_event(&dev->otg,
+				OTG_EVENT_HNP_FAILED);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+			clear_bit(B_ASE0_BRST, &dev->tmouts);
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 1);
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		}
+		break;
+	case OTG_STATE_B_HOST:
+		/* B_BUS_REQ is not exposed to user space. So
+		 * it must be A_CONN for now.
+		 */
+		if (!test_bit(B_BUS_REQ, &dev->inputs) ||
+				!test_bit(A_CONN, &dev->inputs)) {
+			pr_debug("!b_bus_req || !a_conn\n");
+			clear_bit(A_CONN, &dev->inputs);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		}
+		break;
+	case OTG_STATE_A_IDLE:
+		dev->otg.default_a = 1;
+		if (test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) {
+			pr_debug("id && !id_a\n");
+			dev->otg.default_a = 0;
+			otg_reset(&dev->otg, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			work = 1;
+		} else if (!test_bit(A_BUS_DROP, &dev->inputs) &&
+				(test_bit(A_SRP_DET, &dev->inputs) ||
+				 test_bit(A_BUS_REQ, &dev->inputs))) {
+			pr_debug("!a_bus_drop && (a_srp_det || a_bus_req)\n");
+
+			clear_bit(A_SRP_DET, &dev->inputs);
+			/* Disable SRP detection */
+			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) &
+					~OTGSC_DPIE, USB_OTGSC);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VRISE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* ACA: ID_A: Stop charging untill enumeration */
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg, 0);
+			else
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+			msm_otg_start_timer(dev, TA_WAIT_VRISE, A_WAIT_VRISE);
+			/* no need to schedule work now */
+		} else {
+			pr_debug("No session requested\n");
+
+			/* A-device is not providing power on VBUS.
+			 * Enable SRP detection.
+			 */
+			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
+					OTGSC_DPIE, USB_OTGSC);
+			msm_otg_put_suspend(dev);
+
+		}
+		break;
+	case OTG_STATE_A_WAIT_VRISE:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_WAIT_VRISE, &dev->tmouts)) {
+			pr_debug("id || a_bus_drop || a_wait_vrise_tmout\n");
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_del_timer(dev);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+		} else if (test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("a_vbus_vld\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			/* Start HCD to detect peripherals. */
+			msm_otg_start_host(&dev->otg, REQUEST_START);
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_WAIT_BCON, &dev->tmouts)) {
+			pr_debug("id_f/b/c || a_bus_drop ||"
+					"a_wait_bcon_tmout\n");
+			if (test_bit(A_WAIT_BCON, &dev->tmouts))
+				msm_otg_send_event(&dev->otg,
+					OTG_EVENT_DEV_CONN_TMOUT);
+			msm_otg_del_timer(dev);
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			/* ACA: ID_A with NO accessory, just the A plug is
+			 * attached to ACA: Use IDCHG_MAX for charging
+			 */
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+			else
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+		} else if (test_bit(B_CONN, &dev->inputs)) {
+			pr_debug("b_conn\n");
+			msm_otg_del_timer(dev);
+			/* HCD is added already. just move to
+			 * A_HOST state.
+			 */
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_A, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+			}
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			msm_otg_del_timer(dev);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs)) {
+			pr_debug("id_f/b/c || a_bus_drop\n");
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			/* no work */
+		} else if (!test_bit(A_BUS_REQ, &dev->inputs)) {
+			/* a_bus_req is de-asserted when root hub is
+			 * suspended or HNP is in progress.
+			 */
+			pr_debug("!a_bus_req\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_SUSPEND;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (dev->otg.host->b_hnp_enable) {
+				msm_otg_start_timer(dev, TA_AIDL_BDIS,
+						A_AIDL_BDIS);
+			} else {
+				/* No HNP. Root hub suspended */
+				msm_otg_put_suspend(dev);
+			}
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg,
+						USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(B_CONN, &dev->inputs)) {
+			pr_debug("!b_conn\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_set_power(&dev->otg,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+		} else if (!test_bit(ID, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_SUSPEND:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_AIDL_BDIS, &dev->tmouts)) {
+			pr_debug("id_f/b/c || a_bus_drop ||"
+					"a_aidl_bdis_tmout\n");
+			if (test_bit(A_AIDL_BDIS, &dev->tmouts))
+				msm_otg_send_event(&dev->otg,
+					OTG_EVENT_HNP_FAILED);
+			msm_otg_del_timer(dev);
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			msm_otg_del_timer(dev);
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+		} else if (!test_bit(B_CONN, &dev->inputs) &&
+				dev->otg.host->b_hnp_enable) {
+			pr_debug("!b_conn && b_hnp_enable");
+			/* Clear AIDL_BDIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			msm_otg_start_host(&dev->otg, REQUEST_HNP_SUSPEND);
+
+			/* We may come here even when B-dev is physically
+			 * disconnected during HNP. We go back to host
+			 * role if bus is idle for BIDL_ADIS time.
+			 */
+			dev->otg.gadget->is_a_peripheral = 1;
+			msm_otg_start_peripheral(&dev->otg, 1);
+			/* If ID_A: we can charge in a_peripheral as well */
+			if (test_bit(ID_A, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+			}
+		} else if (!test_bit(B_CONN, &dev->inputs) &&
+				!dev->otg.host->b_hnp_enable) {
+			pr_debug("!b_conn && !b_hnp_enable");
+			/* bus request is dropped during suspend.
+			 * acquire again for next device.
+			 */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs)) {
+			pr_debug("id _f/b/c || a_bus_drop\n");
+			/* Clear BIDL_ADIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+			/* HCD was suspended before. Stop it now */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			/* Clear BIDL_ADIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+			/* HCD was suspended before. Stop it now */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+		} else if (test_bit(A_BIDL_ADIS, &dev->tmouts)) {
+			pr_debug("a_bidl_adis_tmout\n");
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			set_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_start_host(&dev->otg, REQUEST_HNP_RESUME);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		if (test_bit(A_WAIT_VFALL, &dev->tmouts)) {
+			clear_bit(A_VBUS_VLD, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+		}
+		break;
+	case OTG_STATE_A_VBUS_ERR:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_CLR_ERR, &dev->inputs)) {
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		}
+		break;
+	default:
+		pr_err("invalid OTG state\n");
+	}
+
+	if (work)
+		queue_work(dev->wq, &dev->sm_work);
+
+#ifdef CONFIG_USB_MSM_ACA
+	/* Start id_polling if (ID_FLOAT&BSV) || ID_A/B/C */
+	if ((test_bit(ID, &dev->inputs) &&
+			test_bit(B_SESS_VLD, &dev->inputs) &&
+			chg_type != USB_CHG_TYPE__WALLCHARGER) ||
+			test_bit(ID_A, &dev->inputs)) {
+		mod_timer(&dev->id_timer, jiffies +
+				 msecs_to_jiffies(OTG_ID_POLL_MS));
+		return;
+	}
+	del_timer(&dev->id_timer);
+#endif
+	/* IRQ/sysfs may queue work. Check work_pending. otherwise
+	 * we might endup releasing wakelock after it is acquired
+	 * in IRQ/sysfs.
+	 */
+	if (!work_pending(&dev->sm_work) && !hrtimer_active(&dev->timer) &&
+			!work_pending(&dev->otg_resume_work))
+		wake_unlock(&dev->wlock);
+}
+
+#ifdef CONFIG_USB_MSM_ACA
+static void msm_otg_id_func(unsigned long _dev)
+{
+	struct msm_otg	*dev = (struct msm_otg *) _dev;
+	u8		phy_ints;
+
+#ifdef CONFIG_USB_MSM_STANDARD_ACA
+	/*
+	 * When standard ACA is attached RID_A and RID_GND states are only
+	 * possible.  RID_A-->RID_GND transition generates IdGnd interrupt
+	 * from PHY.  Hence polling is disabled.
+	 */
+	if (test_bit(ID_A, &dev->inputs))
+		goto out;
+#endif
+
+	if (atomic_read(&dev->in_lpm))
+		msm_otg_set_suspend(&dev->otg, 0);
+
+	phy_ints = ulpi_read(dev, 0x13);
+
+	/*
+	 * ACA timer will be kicked again after the PHY
+	 * state is recovered.
+	 */
+	if (phy_ints == -ETIMEDOUT)
+		return;
+
+
+	/* If id_gnd happened then stop and let isr take care of this */
+	if (phy_id_state_gnd(phy_ints))
+		goto out;
+
+	if ((test_bit(ID_A, &dev->inputs) == phy_id_state_a(phy_ints)) &&
+	    (test_bit(ID_B, &dev->inputs) == phy_id_state_b(phy_ints)) &&
+	    (test_bit(ID_C, &dev->inputs) == phy_id_state_c(phy_ints))) {
+		mod_timer(&dev->id_timer,
+				jiffies + msecs_to_jiffies(OTG_ID_POLL_MS));
+		goto out;
+	} else {
+		set_aca_id_inputs(dev);
+	}
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+out:
+	/* OOPS: runing while !BSV, schedule work to initiate LPM */
+	if (!is_b_sess_vld()) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+	return;
+}
+#endif
+#ifdef CONFIG_USB_OTG
+static ssize_t
+set_pwr_down(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	int value;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* Applicable for only A-Device */
+	if (state <= OTG_STATE_A_IDLE)
+		return -EINVAL;
+
+	sscanf(buf, "%d", &value);
+
+	if (test_bit(A_BUS_DROP, &dev->inputs) != !!value) {
+		change_bit(A_BUS_DROP, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(pwr_down, S_IRUGO | S_IWUSR, NULL, set_pwr_down);
+
+static ssize_t
+set_srp_req(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_B_IDLE)
+		return -EINVAL;
+
+	set_bit(B_BUS_REQ, &dev->inputs);
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+
+	return count;
+}
+static DEVICE_ATTR(srp_req, S_IRUGO | S_IWUSR, NULL, set_srp_req);
+
+static ssize_t
+set_clr_err(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state == OTG_STATE_A_VBUS_ERR) {
+		set_bit(A_CLR_ERR, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(clr_err, S_IRUGO | S_IWUSR, NULL, set_clr_err);
+
+static struct attribute *msm_otg_attrs[] = {
+	&dev_attr_pwr_down.attr,
+	&dev_attr_srp_req.attr,
+	&dev_attr_clr_err.attr,
+	NULL,
+};
+
+static struct attribute_group msm_otg_attr_grp = {
+	.attrs = msm_otg_attrs,
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int otg_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t otg_mode_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct msm_otg *dev = file->private_data;
+	int ret = count;
+	int work = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->pdata->otg_mode = OTG_USER_CONTROL;
+	if (!memcmp(buf, "none", count - 1)) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		set_bit(ID, &dev->inputs);
+		work = 1;
+	} else if (!memcmp(buf, "peripheral", count - 1)) {
+		set_bit(B_SESS_VLD, &dev->inputs);
+		set_bit(ID, &dev->inputs);
+		work = 1;
+	} else if (!memcmp(buf, "host", count - 1)) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		clear_bit(ID, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		work = 1;
+	} else {
+		pr_info("%s: unknown mode specified\n", __func__);
+		ret = -EINVAL;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return ret;
+}
+const struct file_operations otgfs_fops = {
+	.open	= otg_open,
+	.write	= otg_mode_write,
+};
+
+#define OTG_INFO_SIZE 512
+static ssize_t otg_info_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char *buf;
+	int temp = 0;
+	int ret;
+	struct msm_otg *dev = file->private_data;
+
+	buf = kzalloc(sizeof(char) * OTG_INFO_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	temp += scnprintf(buf + temp, OTG_INFO_SIZE - temp,
+			"OTG State:             %s\n"
+			"OTG Mode:              %d\n"
+			"OTG Inputs:            0x%lx\n"
+			"Charger Type:          %d\n"
+			"PMIC VBUS Support:     %u\n"
+			"PMIC ID Support:       %u\n"
+			"Core Clock:            %u\n"
+			"USB In SPS:            %d\n"
+			"pre_emphasis_level:    0x%x\n"
+			"cdr_auto_reset:        0x%x\n"
+			"hs_drv_amplitude:      0x%x\n"
+			"se1_gate_state:        0x%x\n"
+			"swfi_latency:          0x%x\n"
+			"PHY Powercollapse:     0x%x\n"
+			"PCLK Voting:           0x%x\n",
+			state_string(dev->otg.state),
+			dev->pdata->otg_mode,
+			dev->inputs,
+			atomic_read(&dev->chg_type),
+			dev->pmic_vbus_notif_supp,
+			dev->pmic_id_notif_supp,
+			dev->pdata->core_clk,
+			dev->pdata->usb_in_sps,
+			dev->pdata->pemp_level,
+			dev->pdata->cdr_autoreset,
+			dev->pdata->drv_ampl,
+			dev->pdata->se1_gating,
+			dev->pdata->swfi_latency,
+			dev->pdata->phy_can_powercollapse,
+			pclk_requires_voting(&dev->otg));
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+const struct file_operations otgfs_info_fops = {
+	.open	= otg_open,
+	.read	= otg_info_read,
+};
+
+struct dentry *otg_debug_root;
+struct dentry *otg_debug_mode;
+struct dentry *otg_debug_info;
+#endif
+
+static int otg_debugfs_init(struct msm_otg *dev)
+{
+#ifdef CONFIG_DEBUG_FS
+	otg_debug_root = debugfs_create_dir("otg", NULL);
+	if (!otg_debug_root)
+		return -ENOENT;
+
+	otg_debug_mode = debugfs_create_file("mode", 0222,
+						otg_debug_root, dev,
+						&otgfs_fops);
+	if (!otg_debug_mode)
+		goto free_root;
+
+	otg_debug_info = debugfs_create_file("info", 0444,
+						otg_debug_root, dev,
+						&otgfs_info_fops);
+	if (!otg_debug_info)
+		goto free_mode;
+
+	return 0;
+
+free_mode:
+	debugfs_remove(otg_debug_mode);
+	otg_debug_mode = NULL;
+
+free_root:
+	debugfs_remove(otg_debug_root);
+	otg_debug_root = NULL;
+	return -ENOENT;
+#endif
+	return 0;
+}
+
+static void otg_debugfs_cleanup(void)
+{
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(otg_debug_info);
+	debugfs_remove(otg_debug_mode);
+	debugfs_remove(otg_debug_root);
+#endif
+}
+
+struct otg_io_access_ops msm_otg_io_ops = {
+	.read = usb_ulpi_read,
+	.write = usb_ulpi_write,
+};
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	struct msm_otg *dev;
+
+	dev = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	the_msm_otg = dev;
+	dev->otg.dev = &pdev->dev;
+	dev->pdata = pdev->dev.platform_data;
+
+	if (!dev->pdata) {
+		ret = -ENODEV;
+		goto free_dev;
+	}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	if (!dev->pdata->vbus_power) {
+		ret = -ENODEV;
+		goto free_dev;
+	} else
+		dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+
+#endif
+
+
+	if (dev->pdata->rpc_connect) {
+		ret = dev->pdata->rpc_connect(1);
+		pr_debug("%s: rpc_connect(%d)\n", __func__, ret);
+		if (ret) {
+			pr_err("%s: rpc connect failed\n", __func__);
+			ret = -ENODEV;
+			goto free_dev;
+		}
+	}
+
+	dev->hs_clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(dev->hs_clk)) {
+		pr_err("%s: failed to get usb_hs_clk\n", __func__);
+		ret = PTR_ERR(dev->hs_clk);
+		goto rpc_fail;
+	}
+	clk_set_rate(dev->hs_clk, 60000000);
+
+	/* pm qos request to prevent apps idle power collapse */
+	pm_qos_add_request(&dev->pdata->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
+	/* If USB Core is running its protocol engine based on PCLK,
+	 * PCLK must be running at >60Mhz for correct HSUSB operation and
+	 * USB core cannot tolerate frequency changes on PCLK. For such
+	 * USB cores, vote for maximum clk frequency on pclk source
+	 */
+	if (dev->pdata->pclk_src_name) {
+		dev->pclk_src = clk_get(0, dev->pdata->pclk_src_name);
+		if (IS_ERR(dev->pclk_src))
+			goto put_hs_clk;
+		clk_set_rate(dev->pclk_src, INT_MAX);
+		msm_otg_vote_for_pclk_source(dev, 1);
+	}
+
+	if (!dev->pdata->pclk_is_hw_gated) {
+		dev->hs_pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+		if (IS_ERR(dev->hs_pclk)) {
+			pr_err("%s: failed to get usb_hs_pclk\n", __func__);
+			ret = PTR_ERR(dev->hs_pclk);
+			goto put_pclk_src;
+		}
+		clk_enable(dev->hs_pclk);
+	}
+
+	if (dev->pdata->core_clk) {
+		dev->hs_cclk = clk_get(&pdev->dev, "usb_hs_core_clk");
+		if (IS_ERR(dev->hs_cclk)) {
+			pr_err("%s: failed to get usb_hs_core_clk\n", __func__);
+			ret = PTR_ERR(dev->hs_cclk);
+			goto put_hs_pclk;
+		}
+		clk_enable(dev->hs_cclk);
+	}
+
+	if (!dev->pdata->phy_reset) {
+		dev->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk");
+		if (IS_ERR(dev->phy_reset_clk)) {
+			pr_err("%s: failed to get usb_phy_clk\n", __func__);
+			ret = PTR_ERR(dev->phy_reset_clk);
+			goto put_hs_cclk;
+		}
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("%s: failed to get platform resource mem\n", __func__);
+		ret = -ENODEV;
+		goto put_phy_clk;
+	}
+
+	dev->regs = ioremap(res->start, resource_size(res));
+	if (!dev->regs) {
+		pr_err("%s: ioremap failed\n", __func__);
+		ret = -ENOMEM;
+		goto put_phy_clk;
+	}
+	dev->irq = platform_get_irq(pdev, 0);
+	if (!dev->irq) {
+		pr_err("%s: platform_get_irq failed\n", __func__);
+		ret = -ENODEV;
+		goto free_regs;
+	}
+	dev->xo_handle = msm_xo_get(MSM_XO_TCXO_D1, "usb");
+	if (IS_ERR(dev->xo_handle)) {
+		pr_err(" %s not able to get the handle"
+			"to vote for TCXO D1 buffer\n", __func__);
+		ret = PTR_ERR(dev->xo_handle);
+		goto free_regs;
+	}
+
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON);
+	if (ret) {
+		pr_err("%s failed to vote for TCXO"
+			"D1 buffer%d\n", __func__, ret);
+		goto free_xo_handle;
+	}
+
+
+	msm_otg_init_timer(dev);
+	INIT_WORK(&dev->sm_work, msm_otg_sm_work);
+	INIT_WORK(&dev->otg_resume_work, msm_otg_resume_w);
+	spin_lock_init(&dev->lock);
+	wake_lock_init(&dev->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
+
+	dev->wq = alloc_workqueue("k_otg", WQ_NON_REENTRANT, 0);
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_wlock;
+	}
+
+	if (dev->pdata->init_gpio) {
+		ret = dev->pdata->init_gpio(1);
+		if (ret) {
+			pr_err("%s: gpio init failed with err:%d\n",
+					__func__, ret);
+			goto free_wq;
+		}
+	}
+	/* To reduce phy power consumption and to avoid external LDO
+	 * on the board, PMIC comparators can be used to detect VBUS
+	 * session change.
+	 */
+	if (dev->pdata->pmic_vbus_notif_init) {
+		ret = dev->pdata->pmic_vbus_notif_init
+			(&msm_otg_set_vbus_state, 1);
+		if (!ret) {
+			dev->pmic_vbus_notif_supp = 1;
+		} else if (ret != -ENOTSUPP) {
+			pr_err("%s: pmic_vbus_notif_init() failed, err:%d\n",
+					__func__, ret);
+			goto free_gpio;
+		}
+	}
+
+	if (dev->pdata->pmic_id_notif_init) {
+		ret = dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 1);
+		if (!ret) {
+			dev->pmic_id_notif_supp = 1;
+		} else if (ret != -ENOTSUPP) {
+			pr_err("%s: pmic_id_ notif_init failed err:%d",
+					__func__, ret);
+			goto free_pmic_vbus_notif;
+		}
+	}
+
+	if (dev->pdata->pmic_vbus_irq)
+		dev->vbus_on_irq = dev->pdata->pmic_vbus_irq;
+
+	/* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */
+	if (dev->pdata->init_vddcx) {
+		ret = dev->pdata->init_vddcx(1);
+		if (ret) {
+			pr_err("%s: unable to enable vddcx digital core:%d\n",
+				__func__, ret);
+			goto free_pmic_id_notif;
+		}
+	}
+
+	if (dev->pdata->ldo_init) {
+		ret = dev->pdata->ldo_init(1);
+		if (ret) {
+			pr_err("%s: ldo_init failed with err:%d\n",
+					__func__, ret);
+			goto free_config_vddcx;
+		}
+	}
+
+	if (dev->pdata->ldo_enable) {
+		ret = dev->pdata->ldo_enable(1);
+		if (ret) {
+			pr_err("%s: ldo_enable failed with err:%d\n",
+					__func__, ret);
+			goto free_ldo_init;
+		}
+	}
+
+
+	/* ACk all pending interrupts and clear interrupt enable registers */
+	writel((readl(USB_OTGSC) & ~OTGSC_INTR_MASK), USB_OTGSC);
+	writel(readl(USB_USBSTS), USB_USBSTS);
+	writel(0, USB_USBINTR);
+	/* Ensure that above STOREs are completed before enabling interrupts */
+	mb();
+
+	ret = request_irq(dev->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", dev);
+	if (ret) {
+		pr_err("%s: request irq failed\n", __func__);
+		goto free_ldo_enable;
+	}
+
+	dev->otg.set_peripheral = msm_otg_set_peripheral;
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	dev->otg.set_host = msm_otg_set_host;
+#endif
+	dev->otg.set_suspend = msm_otg_set_suspend;
+	dev->otg.start_hnp = msm_otg_start_hnp;
+	dev->otg.send_event = msm_otg_send_event;
+	dev->otg.set_power = msm_otg_set_power;
+	dev->set_clk = msm_otg_set_clk;
+	dev->reset = otg_reset;
+	dev->otg.io_ops = &msm_otg_io_ops;
+	if (otg_set_transceiver(&dev->otg)) {
+		WARN_ON(1);
+		goto free_otg_irq;
+	}
+#ifdef CONFIG_USB_MSM_ACA
+	/* Link doesnt support id_a/b/c interrupts, hence polling
+	 * needs to be done to support ACA charger
+	 */
+	init_timer(&dev->id_timer);
+	dev->id_timer.function = msm_otg_id_func;
+	dev->id_timer.data = (unsigned long) dev;
+#endif
+
+	atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+	if (dev->pdata->chg_init && dev->pdata->chg_init(1))
+		pr_err("%s: chg_init failed\n", __func__);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	ret = pm_runtime_set_active(&pdev->dev);
+	if (ret < 0)
+		pr_err("%s: pm_runtime: Fail to set active\n", __func__);
+
+	ret = 0;
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get(&pdev->dev);
+
+
+	ret = otg_debugfs_init(dev);
+	if (ret) {
+		pr_err("%s: otg_debugfs_init failed\n", __func__);
+		goto chg_deinit;
+	}
+
+#ifdef CONFIG_USB_OTG
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_otg_attr_grp);
+	if (ret < 0) {
+		pr_err("%s: Failed to create the sysfs entry\n", __func__);
+		otg_debugfs_cleanup();
+		goto chg_deinit;
+	}
+#endif
+
+
+	return 0;
+
+chg_deinit:
+	if (dev->pdata->chg_init)
+		dev->pdata->chg_init(0);
+free_otg_irq:
+	free_irq(dev->irq, dev);
+free_ldo_enable:
+	if (dev->pdata->ldo_enable)
+		dev->pdata->ldo_enable(0);
+	if (dev->pdata->setup_gpio)
+		dev->pdata->setup_gpio(USB_SWITCH_DISABLE);
+free_ldo_init:
+	if (dev->pdata->ldo_init)
+		dev->pdata->ldo_init(0);
+free_config_vddcx:
+	if (dev->pdata->init_vddcx)
+		dev->pdata->init_vddcx(0);
+free_pmic_id_notif:
+	if (dev->pdata->pmic_id_notif_init && dev->pmic_id_notif_supp)
+		dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0);
+free_pmic_vbus_notif:
+	if (dev->pdata->pmic_vbus_notif_init && dev->pmic_vbus_notif_supp)
+		dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0);
+free_gpio:
+	if (dev->pdata->init_gpio)
+		dev->pdata->init_gpio(0);
+free_wq:
+	destroy_workqueue(dev->wq);
+free_wlock:
+	wake_lock_destroy(&dev->wlock);
+free_xo_handle:
+	msm_xo_put(dev->xo_handle);
+free_regs:
+	iounmap(dev->regs);
+put_phy_clk:
+	if (dev->phy_reset_clk)
+		clk_put(dev->phy_reset_clk);
+put_hs_cclk:
+	if (dev->hs_cclk) {
+		clk_disable(dev->hs_cclk);
+		clk_put(dev->hs_cclk);
+	}
+put_hs_pclk:
+	if (dev->hs_pclk) {
+		clk_disable(dev->hs_pclk);
+		clk_put(dev->hs_pclk);
+	}
+put_pclk_src:
+	if (dev->pclk_src) {
+		msm_otg_vote_for_pclk_source(dev, 0);
+		clk_put(dev->pclk_src);
+	}
+put_hs_clk:
+	if (dev->hs_clk)
+		clk_put(dev->hs_clk);
+rpc_fail:
+	if (dev->pdata->rpc_connect)
+		dev->pdata->rpc_connect(0);
+free_dev:
+	kfree(dev);
+	return ret;
+}
+
+static int __exit msm_otg_remove(struct platform_device *pdev)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	otg_debugfs_cleanup();
+#ifdef CONFIG_USB_OTG
+	sysfs_remove_group(&pdev->dev.kobj, &msm_otg_attr_grp);
+#endif
+	destroy_workqueue(dev->wq);
+	wake_lock_destroy(&dev->wlock);
+
+	if (dev->pdata->setup_gpio)
+		dev->pdata->setup_gpio(USB_SWITCH_DISABLE);
+
+	if (dev->pdata->init_vddcx)
+		dev->pdata->init_vddcx(0);
+	if (dev->pdata->ldo_enable)
+		dev->pdata->ldo_enable(0);
+
+	if (dev->pdata->ldo_init)
+		dev->pdata->ldo_init(0);
+
+	if (dev->pmic_vbus_notif_supp)
+		dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0);
+
+	if (dev->pmic_id_notif_supp)
+		dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0);
+
+#ifdef CONFIG_USB_MSM_ACA
+	del_timer_sync(&dev->id_timer);
+#endif
+	if (dev->pdata->chg_init)
+		dev->pdata->chg_init(0);
+	free_irq(dev->irq, pdev);
+	iounmap(dev->regs);
+	if (dev->hs_cclk) {
+		clk_disable(dev->hs_cclk);
+		clk_put(dev->hs_cclk);
+	}
+	if (dev->hs_pclk) {
+		clk_disable(dev->hs_pclk);
+		clk_put(dev->hs_pclk);
+	}
+	if (dev->hs_clk)
+		clk_put(dev->hs_clk);
+	if (dev->phy_reset_clk)
+		clk_put(dev->phy_reset_clk);
+	if (dev->pdata->rpc_connect)
+		dev->pdata->rpc_connect(0);
+	msm_xo_put(dev->xo_handle);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(dev);
+	pm_qos_remove_request(&dev->pdata->pm_qos_req_dma);
+	clk_put(dev->pclk_src);
+	return 0;
+}
+
+static int msm_otg_runtime_suspend(struct device *dev)
+{
+	struct msm_otg *otg = the_msm_otg;
+
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	msm_otg_suspend(otg);
+	return  0;
+}
+
+static int msm_otg_runtime_resume(struct device *dev)
+{
+	struct msm_otg *otg = the_msm_otg;
+
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	msm_otg_resume(otg);
+	return  0;
+}
+
+static int msm_otg_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return  0;
+}
+
+static struct dev_pm_ops msm_otg_dev_pm_ops = {
+	.runtime_suspend = msm_otg_runtime_suspend,
+	.runtime_resume = msm_otg_runtime_resume,
+	.runtime_idle = msm_otg_runtime_idle,
+};
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __exit_p(msm_otg_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_otg_dev_pm_ops,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+module_init(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM usb transceiver driver");
+MODULE_VERSION("1.00");
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index b276f8f..ead2976 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -9,11 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
  */
 
 #include <linux/module.h>
@@ -39,12 +34,34 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/usb/msm_hsusb_hw.h>
 #include <linux/regulator/consumer.h>
+#include <linux/mfd/pm8xxx/pm8921-charger.h>
 
 #include <mach/clk.h>
 
 #define MSM_USB_BASE	(motg->regs)
 #define DRIVER_NAME	"msm_otg"
 
+#ifdef CONFIG_USB_MSM_ACA
+static void msm_chg_enable_aca_det(struct msm_otg *motg);
+static void msm_chg_enable_aca_intr(struct msm_otg *motg);
+#else
+static inline bool msm_chg_aca_detect(struct msm_otg *motg)
+{
+	return false;
+}
+
+static inline void msm_chg_enable_aca_det(struct msm_otg *motg)
+{
+}
+static inline void msm_chg_enable_aca_intr(struct msm_otg *motg)
+{
+}
+static inline bool msm_chg_check_aca_intr(struct msm_otg *motg)
+{
+	return false;
+}
+#endif
+
 #define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
 
 #define USB_PHY_3P3_VOL_MIN	3050000 /* uV */
@@ -60,6 +77,8 @@
 #define USB_PHY_VDD_DIG_VOL_MIN	1000000 /* uV */
 #define USB_PHY_VDD_DIG_VOL_MAX	1320000 /* uV */
 
+static struct msm_otg *the_msm_otg;
+
 static struct regulator *hsusb_3p3;
 static struct regulator *hsusb_1p8;
 static struct regulator *hsusb_vddcx;
@@ -87,18 +106,28 @@
 
 		ret = regulator_enable(hsusb_vddcx);
 		if (ret) {
-			dev_err(motg->otg.dev, "unable to enable hsusb vddcx\n");
+			regulator_set_voltage(hsusb_vddcx, 0,
+			USB_PHY_VDD_DIG_VOL_MIN);
 			regulator_put(hsusb_vddcx);
+			dev_err(motg->otg.dev, "unable to enable the hsusb vddcx\n");
+			return ret;
 		}
+
 	} else {
-		ret = regulator_set_voltage(hsusb_vddcx, 0,
-			USB_PHY_VDD_DIG_VOL_MAX);
-		if (ret)
-			dev_err(motg->otg.dev, "unable to set the voltage "
-					"for hsusb vddcx\n");
+
 		ret = regulator_disable(hsusb_vddcx);
-		if (ret)
+		if (ret) {
 			dev_err(motg->otg.dev, "unable to disable hsusb vddcx\n");
+			return ret;
+		}
+
+		ret = regulator_set_voltage(hsusb_vddcx, 0,
+			USB_PHY_VDD_DIG_VOL_MIN);
+		if (ret) {
+			dev_err(motg->otg.dev, "unable to set the voltage"
+					"for hsusb vddcx\n");
+			return ret;
+		}
 
 		regulator_put(hsusb_vddcx);
 	}
@@ -120,42 +149,32 @@
 		rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN,
 				USB_PHY_3P3_VOL_MAX);
 		if (rc) {
-			dev_err(motg->otg.dev, "unable to set voltage level "
-					"for hsusb 3p3\n");
-			goto put_3p3;
-		}
-		rc = regulator_enable(hsusb_3p3);
-		if (rc) {
-			dev_err(motg->otg.dev, "unable to enable the hsusb 3p3\n");
+			dev_err(motg->otg.dev, "unable to set voltage level for"
+					"hsusb 3p3\n");
 			goto put_3p3;
 		}
 		hsusb_1p8 = regulator_get(motg->otg.dev, "HSUSB_1p8");
 		if (IS_ERR(hsusb_1p8)) {
 			dev_err(motg->otg.dev, "unable to get hsusb 1p8\n");
 			rc = PTR_ERR(hsusb_1p8);
-			goto disable_3p3;
+			goto put_3p3_lpm;
 		}
 		rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN,
 				USB_PHY_1P8_VOL_MAX);
 		if (rc) {
-			dev_err(motg->otg.dev, "unable to set voltage level "
-					"for hsusb 1p8\n");
-			goto put_1p8;
-		}
-		rc = regulator_enable(hsusb_1p8);
-		if (rc) {
-			dev_err(motg->otg.dev, "unable to enable the hsusb 1p8\n");
+			dev_err(motg->otg.dev, "unable to set voltage level for"
+					"hsusb 1p8\n");
 			goto put_1p8;
 		}
 
 		return 0;
 	}
 
-	regulator_disable(hsusb_1p8);
 put_1p8:
+	regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX);
 	regulator_put(hsusb_1p8);
-disable_3p3:
-	regulator_disable(hsusb_3p3);
+put_3p3_lpm:
+	regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX);
 put_3p3:
 	regulator_put(hsusb_3p3);
 	return rc;
@@ -187,7 +206,7 @@
 }
 #endif
 
-static int msm_hsusb_ldo_set_mode(int on)
+static int msm_hsusb_ldo_enable(struct msm_otg *motg, int on)
 {
 	int ret = 0;
 
@@ -205,29 +224,61 @@
 		ret = regulator_set_optimum_mode(hsusb_1p8,
 				USB_PHY_1P8_HPM_LOAD);
 		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator "
+			pr_err("%s: Unable to set HPM of the regulator:"
 				"HSUSB_1p8\n", __func__);
 			return ret;
 		}
+
+		ret = regulator_enable(hsusb_1p8);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to enable the hsusb 1p8\n",
+				__func__);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			return ret;
+		}
+
 		ret = regulator_set_optimum_mode(hsusb_3p3,
 				USB_PHY_3P3_HPM_LOAD);
 		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator "
+			pr_err("%s: Unable to set HPM of the regulator:"
 				"HSUSB_3p3\n", __func__);
-			regulator_set_optimum_mode(hsusb_1p8,
-				USB_PHY_1P8_LPM_LOAD);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			regulator_disable(hsusb_1p8);
 			return ret;
 		}
+
+		ret = regulator_enable(hsusb_3p3);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to enable the hsusb 3p3\n",
+				__func__);
+			regulator_set_optimum_mode(hsusb_3p3, 0);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			regulator_disable(hsusb_1p8);
+			return ret;
+		}
+
 	} else {
-		ret = regulator_set_optimum_mode(hsusb_1p8,
-				USB_PHY_1P8_LPM_LOAD);
+		ret = regulator_disable(hsusb_1p8);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to disable the hsusb 1p8\n",
+				__func__);
+			return ret;
+		}
+
+		ret = regulator_set_optimum_mode(hsusb_1p8, 0);
 		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator "
+			pr_err("%s: Unable to set LPM of the regulator:"
 				"HSUSB_1p8\n", __func__);
-		ret = regulator_set_optimum_mode(hsusb_3p3,
-				USB_PHY_3P3_LPM_LOAD);
+
+		ret = regulator_disable(hsusb_3p3);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to disable the hsusb 3p3\n",
+				 __func__);
+			return ret;
+		}
+		ret = regulator_set_optimum_mode(hsusb_3p3, 0);
 		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator "
+			pr_err("%s: Unable to set LPM of the regulator:"
 				"HSUSB_3p3\n", __func__);
 	}
 
@@ -399,6 +450,7 @@
 	u32 val = 0;
 	u32 ulpi_val = 0;
 
+	clk_enable(motg->clk);
 	ret = msm_otg_phy_reset(motg);
 	if (ret) {
 		dev_err(otg->dev, "phy_reset failed\n");
@@ -425,19 +477,24 @@
 	writel(0x0, USB_AHBBURST);
 	writel(0x00, USB_AHBMODE);
 
-	if (pdata->otg_control == OTG_PHY_CONTROL) {
-		val = readl(USB_OTGSC);
-		if (pdata->mode == USB_OTG) {
-			ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
-			val |= OTGSC_IDIE | OTGSC_BSVIE;
-		} else if (pdata->mode == USB_PERIPHERAL) {
-			ulpi_val = ULPI_INT_SESS_VALID;
-			val |= OTGSC_BSVIE;
-		}
-		writel(val, USB_OTGSC);
-		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
-		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
+	/* Ensure that RESET operation is completed before turning off clock */
+	mb();
+	clk_disable(motg->clk);
+
+	val = readl_relaxed(USB_OTGSC);
+	if (pdata->mode == USB_OTG) {
+		ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
+		val |= OTGSC_IDIE | OTGSC_BSVIE;
+	} else if (pdata->mode == USB_PERIPHERAL) {
+		ulpi_val = ULPI_INT_SESS_VALID;
+		val |= OTGSC_BSVIE;
 	}
+	writel_relaxed(val, USB_OTGSC);
+	ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
+	ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
+
+	msm_chg_enable_aca_det(motg);
+	msm_chg_enable_aca_intr(motg);
 
 	return 0;
 }
@@ -452,11 +509,14 @@
 	struct usb_bus *bus = otg->host;
 	struct msm_otg_platform_data *pdata = motg->pdata;
 	int cnt = 0;
+	bool session_active;
 
 	if (atomic_read(&motg->in_lpm))
 		return 0;
 
 	disable_irq(motg->irq);
+	session_active = (otg->host && !test_bit(ID, &motg->inputs)) ||
+				test_bit(B_SESS_VLD, &motg->inputs);
 	/*
 	 * Chipidea 45-nm PHY suspend sequence:
 	 *
@@ -482,6 +542,16 @@
 	}
 
 	/*
+	 * Turn off the OTG comparators, if depends on PMIC for
+	 * VBUS and ID notifications.
+	 */
+	if ((motg->caps & ALLOW_PHY_COMP_DISABLE) && !session_active) {
+		ulpi_write(otg, OTG_COMP_DISABLE,
+			ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+		motg->lpm_flags |= PHY_OTG_COMP_DISABLED;
+	}
+
+	/*
 	 * PHY may take some time or even fail to enter into low power
 	 * mode (LPM). Hence poll for 500 msec and reset the PHY and link
 	 * in failure case.
@@ -510,31 +580,40 @@
 	 */
 	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL)
-		writel(readl(USB_PHY_CTRL) | PHY_RETEN, USB_PHY_CTRL);
+	if (motg->caps & ALLOW_PHY_RETENTION && !session_active) {
+		writel_relaxed(readl_relaxed(USB_PHY_CTRL) & ~PHY_RETEN,
+				USB_PHY_CTRL);
+		motg->lpm_flags |= PHY_RETENTIONED;
+	}
 
+	/* Ensure that above operation is completed before turning off clocks */
+	mb();
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
 	if (motg->core_clk)
 		clk_disable(motg->core_clk);
 
 	if (!IS_ERR(motg->pclk_src))
 		clk_disable(motg->pclk_src);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-		msm_hsusb_ldo_set_mode(0);
-		msm_hsusb_config_vddcx(0);
+	if (motg->caps & ALLOW_PHY_POWER_COLLAPSE && !session_active) {
+		msm_hsusb_ldo_enable(motg, 0);
+		motg->lpm_flags |= PHY_PWR_COLLAPSED;
 	}
 
-	if (device_may_wakeup(otg->dev))
+	if (motg->lpm_flags & PHY_RETENTIONED)
+		msm_hsusb_config_vddcx(0);
+
+	if (device_may_wakeup(otg->dev)) {
 		enable_irq_wake(motg->irq);
+		if (motg->pdata->pmic_id_irq)
+			enable_irq_wake(motg->pdata->pmic_id_irq);
+	}
 	if (bus)
 		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
 
 	atomic_set(&motg->in_lpm, 1);
 	enable_irq(motg->irq);
+	wake_unlock(&motg->wlock);
 
 	dev_info(otg->dev, "USB in low power mode\n");
 
@@ -551,19 +630,24 @@
 	if (!atomic_read(&motg->in_lpm))
 		return 0;
 
+	wake_lock(&motg->wlock);
 	if (!IS_ERR(motg->pclk_src))
 		clk_enable(motg->pclk_src);
 
 	clk_enable(motg->pclk);
-	clk_enable(motg->clk);
 	if (motg->core_clk)
 		clk_enable(motg->core_clk);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-		msm_hsusb_ldo_set_mode(1);
+	if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
+		msm_hsusb_ldo_enable(motg, 1);
+		motg->lpm_flags &= ~PHY_PWR_COLLAPSED;
+	}
+
+	if (motg->lpm_flags & PHY_RETENTIONED) {
 		msm_hsusb_config_vddcx(1);
-		writel(readl(USB_PHY_CTRL) & ~PHY_RETEN, USB_PHY_CTRL);
+		writel_relaxed(readl_relaxed(USB_PHY_CTRL) | PHY_RETEN,
+				USB_PHY_CTRL);
+		motg->lpm_flags &= ~PHY_RETENTIONED;
 	}
 
 	temp = readl(USB_USBCMD);
@@ -598,8 +682,17 @@
 	}
 
 skip_phy_resume:
-	if (device_may_wakeup(otg->dev))
+	/* Turn on the OTG comparators on resume */
+	if (motg->lpm_flags & PHY_OTG_COMP_DISABLED) {
+		ulpi_write(otg, OTG_COMP_DISABLE,
+			ULPI_CLR(ULPI_PWR_CLK_MNG_REG));
+		motg->lpm_flags &= ~PHY_OTG_COMP_DISABLED;
+	}
+	if (device_may_wakeup(otg->dev)) {
 		disable_irq_wake(motg->irq);
+		if (motg->pdata->pmic_id_irq)
+			disable_irq_wake(motg->pdata->pmic_id_irq);
+	}
 	if (bus)
 		set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
 
@@ -622,8 +715,8 @@
 	if (motg->cur_power == mA)
 		return;
 
-	/* TODO: Notify PMIC about available current */
 	dev_info(motg->otg.dev, "Avail curr from USB = %u\n", mA);
+	pm8921_charger_vbus_draw(mA);
 	motg->cur_power = mA;
 }
 
@@ -658,8 +751,6 @@
 	if (on) {
 		dev_dbg(otg->dev, "host on\n");
 
-		if (pdata->vbus_power)
-			pdata->vbus_power(1);
 		/*
 		 * Some boards have a switch cotrolled by gpio
 		 * to enable/disable internal HUB. Enable internal
@@ -667,22 +758,49 @@
 		 */
 		if (pdata->setup_gpio)
 			pdata->setup_gpio(OTG_STATE_A_HOST);
-#ifdef CONFIG_USB
 		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
-#endif
 	} else {
 		dev_dbg(otg->dev, "host off\n");
 
-#ifdef CONFIG_USB
 		usb_remove_hcd(hcd);
-#endif
 		if (pdata->setup_gpio)
 			pdata->setup_gpio(OTG_STATE_UNDEFINED);
-		if (pdata->vbus_power)
-			pdata->vbus_power(0);
 	}
 }
 
+static int msm_otg_usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *priv)
+{
+	struct msm_otg *motg = container_of(self, struct msm_otg, usbdev_nb);
+	struct usb_device *udev;
+
+	switch (action) {
+	case USB_DEVICE_ADD:
+	case USB_DEVICE_CONFIG:
+		udev = priv;
+		/*
+		 * Interested in devices connected directly to the root hub.
+		 * ACA dock can supply IDEV_CHG irrespective devices connected
+		 * on the accessory port.
+		 */
+		if (!udev->parent || udev->parent->parent ||
+				motg->chg_type == USB_ACA_DOCK_CHARGER)
+			break;
+		if (udev->actconfig)
+			motg->mA_port = udev->actconfig->desc.bMaxPower * 2;
+		else
+			motg->mA_port = IUNIT;
+
+		if (test_bit(ID_A, &motg->inputs))
+			msm_otg_notify_charger(motg, IDEV_CHG_MIN -
+					motg->mA_port);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
 static int msm_otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
 {
 	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
@@ -700,7 +818,10 @@
 	if (!host) {
 		if (otg->state == OTG_STATE_A_HOST) {
 			pm_runtime_get_sync(otg->dev);
+			usb_unregister_notify(&motg->usbdev_nb);
 			msm_otg_start_host(otg, 0);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
 			otg->host = NULL;
 			otg->state = OTG_STATE_UNDEFINED;
 			schedule_work(&motg->sm_work);
@@ -714,6 +835,8 @@
 	hcd = bus_to_hcd(host);
 	hcd->power_budget = motg->pdata->power_budget;
 
+	motg->usbdev_nb.notifier_call = msm_otg_usbdev_notify;
+	usb_register_notify(&motg->usbdev_nb);
 	otg->host = host;
 	dev_dbg(otg->dev, "host driver registered w/ tranceiver\n");
 
@@ -798,6 +921,108 @@
 	return 0;
 }
 
+#ifdef CONFIG_USB_MSM_ACA
+static bool msm_chg_aca_detect(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	u32 int_sts;
+	bool ret = false;
+
+	if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY)
+		goto out;
+
+	int_sts = ulpi_read(otg, 0x87);
+	switch (int_sts & 0x1C) {
+	case 0x08:
+		if (!test_and_set_bit(ID_A, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_A\n");
+			motg->chg_type = USB_ACA_A_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_B, &motg->inputs);
+			clear_bit(ID_C, &motg->inputs);
+			ret = true;
+		}
+		break;
+	case 0x0C:
+		if (!test_and_set_bit(ID_B, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_B\n");
+			motg->chg_type = USB_ACA_B_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_A, &motg->inputs);
+			clear_bit(ID_C, &motg->inputs);
+			ret = true;
+		}
+		break;
+	case 0x10:
+		if (!test_and_set_bit(ID_C, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_C\n");
+			motg->chg_type = USB_ACA_C_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_A, &motg->inputs);
+			clear_bit(ID_B, &motg->inputs);
+			ret = true;
+		}
+		break;
+	default:
+		ret = test_and_clear_bit(ID_A, &motg->inputs) |
+			test_and_clear_bit(ID_B, &motg->inputs) |
+			test_and_clear_bit(ID_C, &motg->inputs);
+		if (ret) {
+			dev_dbg(otg->dev, "ID A/B/C is no more\n");
+			motg->chg_type = USB_INVALID_CHARGER;
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+		}
+	}
+out:
+	return ret;
+}
+
+static void msm_chg_enable_aca_det(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		/* ACA ID pin resistance detection enable */
+		ulpi_write(otg, 0x20, 0x85);
+		break;
+	default:
+		break;
+	}
+}
+
+static void msm_chg_enable_aca_intr(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		/* Enables ACA Detection interrupt (on any RID change) */
+		ulpi_write(otg, 0x20, 0x94);
+		break;
+	default:
+		break;
+	}
+}
+
+static bool msm_chg_check_aca_intr(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	bool ret = false;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		if (ulpi_read(otg, 0x91) & 1) {
+			dev_dbg(otg->dev, "RID change\n");
+			ulpi_write(otg, 0x01, 0x92);
+			ret = msm_chg_aca_detect(motg);
+		}
+	default:
+		break;
+	}
+	return ret;
+}
+#endif
 static bool msm_chg_check_secondary_det(struct msm_otg *motg)
 {
 	struct otg_transceiver *otg = &motg->otg;
@@ -1039,7 +1264,7 @@
 {
 	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
 	struct otg_transceiver *otg = &motg->otg;
-	bool is_dcd, tmout, vout;
+	bool is_dcd, tmout, vout, is_aca;
 	unsigned long delay;
 
 	dev_dbg(otg->dev, "chg detection work\n");
@@ -1048,11 +1273,25 @@
 		pm_runtime_get_sync(otg->dev);
 		msm_chg_block_on(motg);
 		msm_chg_enable_dcd(motg);
+		msm_chg_enable_aca_det(motg);
 		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
 		motg->dcd_retries = 0;
 		delay = MSM_CHG_DCD_POLL_TIME;
 		break;
 	case USB_CHG_STATE_WAIT_FOR_DCD:
+		is_aca = msm_chg_aca_detect(motg);
+		if (is_aca) {
+			/*
+			 * ID_A can be ACA dock too. continue
+			 * primary detection after DCD.
+			 */
+			if (test_bit(ID_A, &motg->inputs)) {
+				motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+			} else {
+				delay = 0;
+				break;
+			}
+		}
 		is_dcd = msm_chg_check_dcd(motg);
 		tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
 		if (is_dcd || tmout) {
@@ -1066,6 +1305,13 @@
 		break;
 	case USB_CHG_STATE_DCD_DONE:
 		vout = msm_chg_check_primary_det(motg);
+		is_aca = msm_chg_aca_detect(motg);
+		if (is_aca) {
+			if (vout && test_bit(ID_A, &motg->inputs))
+				motg->chg_type = USB_ACA_DOCK_CHARGER;
+			delay = 0;
+			break;
+		}
 		if (vout) {
 			msm_chg_enable_secondary_det(motg);
 			delay = MSM_CHG_SECONDARY_DET_TIME;
@@ -1088,6 +1334,8 @@
 		motg->chg_state = USB_CHG_STATE_DETECTED;
 	case USB_CHG_STATE_DETECTED:
 		msm_chg_block_off(motg);
+		msm_chg_enable_aca_det(motg);
+		msm_chg_enable_aca_intr(motg);
 		dev_dbg(otg->dev, "charger = %d\n", motg->chg_type);
 		schedule_work(&motg->sm_work);
 		return;
@@ -1112,17 +1360,7 @@
 
 	switch (pdata->mode) {
 	case USB_OTG:
-		if (pdata->otg_control == OTG_PHY_CONTROL) {
-			if (otgsc & OTGSC_ID)
-				set_bit(ID, &motg->inputs);
-			else
-				clear_bit(ID, &motg->inputs);
-
-			if (otgsc & OTGSC_BSV)
-				set_bit(B_SESS_VLD, &motg->inputs);
-			else
-				clear_bit(B_SESS_VLD, &motg->inputs);
-		} else if (pdata->otg_control == OTG_USER_CONTROL) {
+		if (pdata->otg_control == OTG_USER_CONTROL) {
 			if (pdata->default_mode == USB_HOST) {
 				clear_bit(ID, &motg->inputs);
 			} else if (pdata->default_mode == USB_PERIPHERAL) {
@@ -1132,6 +1370,16 @@
 				set_bit(ID, &motg->inputs);
 				clear_bit(B_SESS_VLD, &motg->inputs);
 			}
+		} else {
+			if (otgsc & OTGSC_ID)
+				set_bit(ID, &motg->inputs);
+			else
+				clear_bit(ID, &motg->inputs);
+
+			if (otgsc & OTGSC_BSV)
+				set_bit(B_SESS_VLD, &motg->inputs);
+			else
+				clear_bit(B_SESS_VLD, &motg->inputs);
 		}
 		break;
 	case USB_HOST:
@@ -1163,9 +1411,16 @@
 		/* FALL THROUGH */
 	case OTG_STATE_B_IDLE:
 		dev_dbg(otg->dev, "OTG_STATE_B_IDLE state\n");
-		if (!test_bit(ID, &motg->inputs) && otg->host) {
+		if ((!test_bit(ID, &motg->inputs) ||
+				test_bit(ID_A, &motg->inputs)) && otg->host) {
 			/* disable BSV bit */
 			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->chg_type == USB_ACA_DOCK_CHARGER)
+				msm_otg_notify_charger(motg,
+						IDEV_CHG_MAX);
+			else if (!test_bit(ID_A, &motg->inputs) &&
+					motg->pdata->vbus_power)
+				motg->pdata->vbus_power(1);
 			msm_otg_start_host(otg, 1);
 			otg->state = OTG_STATE_A_HOST;
 		} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
@@ -1176,10 +1431,12 @@
 			case USB_CHG_STATE_DETECTED:
 				switch (motg->chg_type) {
 				case USB_DCP_CHARGER:
+				case USB_ACA_B_CHARGER:
 					msm_otg_notify_charger(motg,
 							IDEV_CHG_MAX);
 					break;
 				case USB_CDP_CHARGER:
+				case USB_ACA_C_CHARGER:
 					msm_otg_notify_charger(motg,
 							IDEV_CHG_MAX);
 					msm_otg_start_peripheral(otg, 1);
@@ -1216,23 +1473,51 @@
 	case OTG_STATE_B_PERIPHERAL:
 		dev_dbg(otg->dev, "OTG_STATE_B_PERIPHERAL state\n");
 		if (!test_bit(B_SESS_VLD, &motg->inputs) ||
-				!test_bit(ID, &motg->inputs)) {
+				!test_bit(ID, &motg->inputs) ||
+				!test_bit(ID_C, &motg->inputs)) {
 			msm_otg_notify_charger(motg, 0);
 			msm_otg_start_peripheral(otg, 0);
+			if (!test_bit(ID_B, &motg->inputs) &&
+				!test_bit(ID_A, &motg->inputs)) {
+				motg->chg_state = USB_CHG_STATE_UNDEFINED;
+				motg->chg_type = USB_INVALID_CHARGER;
+			}
+			otg->state = OTG_STATE_B_IDLE;
+			msm_otg_reset(otg);
+			schedule_work(w);
+		} else if (test_bit(ID_C, &motg->inputs)) {
+			msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+			pm_runtime_put_sync(otg->dev);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
+		if (test_bit(ID, &motg->inputs) &&
+				!test_bit(ID_A, &motg->inputs)) {
+			msm_otg_start_host(otg, 0);
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
 			motg->chg_state = USB_CHG_STATE_UNDEFINED;
 			motg->chg_type = USB_INVALID_CHARGER;
 			otg->state = OTG_STATE_B_IDLE;
 			msm_otg_reset(otg);
 			schedule_work(w);
-		}
-		break;
-	case OTG_STATE_A_HOST:
-		dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
-		if (test_bit(ID, &motg->inputs)) {
-			msm_otg_start_host(otg, 0);
-			otg->state = OTG_STATE_B_IDLE;
-			msm_otg_reset(otg);
-			schedule_work(w);
+		} else if (test_bit(ID_A, &motg->inputs)) {
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
+			msm_otg_notify_charger(motg,
+					IDEV_CHG_MIN - motg->mA_port);
+			pm_runtime_put_sync(otg->dev);
+		} else if (!test_bit(ID, &motg->inputs)) {
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_otg_notify_charger(motg, 0);
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(1);
+			pm_runtime_put_sync(otg->dev);
 		}
 		break;
 	default:
@@ -1244,7 +1529,7 @@
 {
 	struct msm_otg *motg = data;
 	struct otg_transceiver *otg = &motg->otg;
-	u32 otgsc = 0;
+	u32 otgsc = 0, usbsts;
 
 	if (atomic_read(&motg->in_lpm)) {
 		disable_irq_nosync(irq);
@@ -1253,6 +1538,16 @@
 		return IRQ_HANDLED;
 	}
 
+	usbsts = readl(USB_USBSTS);
+	if ((usbsts & PHY_ALT_INT)) {
+		writel(PHY_ALT_INT, USB_USBSTS);
+		if (msm_chg_check_aca_intr(motg)) {
+			pm_runtime_get_noresume(otg->dev);
+			schedule_work(&motg->sm_work);
+		}
+		return IRQ_HANDLED;
+	}
+
 	otgsc = readl(USB_OTGSC);
 	if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
 		return IRQ_NONE;
@@ -1263,6 +1558,7 @@
 		else
 			clear_bit(ID, &motg->inputs);
 		dev_dbg(otg->dev, "ID set/clear\n");
+		schedule_work(&motg->sm_work);
 		pm_runtime_get_noresume(otg->dev);
 	} else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
 		if (otgsc & OTGSC_BSV)
@@ -1270,11 +1566,36 @@
 		else
 			clear_bit(B_SESS_VLD, &motg->inputs);
 		dev_dbg(otg->dev, "BSV set/clear\n");
+		schedule_work(&motg->sm_work);
 		pm_runtime_get_noresume(otg->dev);
 	}
 
 	writel(otgsc, USB_OTGSC);
-	schedule_work(&motg->sm_work);
+	return IRQ_HANDLED;
+}
+
+static void msm_otg_set_vbus_state(int online)
+{
+	struct msm_otg *motg = the_msm_otg;
+
+	/* We depend on PMIC for only VBUS ON interrupt */
+	if (!atomic_read(&motg->in_lpm) || !online)
+		return;
+
+	/*
+	 * Let interrupt handler take care of resuming
+	 * the hardware.
+	 */
+	msm_otg_irq(motg->irq, (void *) motg);
+}
+
+static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+{
+	struct msm_otg *motg = data;
+
+	if (atomic_read(&motg->in_lpm) && !motg->async_int)
+		msm_otg_irq(motg->irq, motg);
+
 	return IRQ_HANDLED;
 }
 
@@ -1428,6 +1749,7 @@
 		return -ENOMEM;
 	}
 
+	the_msm_otg = motg;
 	motg->pdata = pdev->dev.platform_data;
 	otg = &motg->otg;
 	otg->dev = &pdev->dev;
@@ -1503,24 +1825,30 @@
 		goto free_regs;
 	}
 
-	clk_enable(motg->clk);
 	clk_enable(motg->pclk);
 
 	ret = msm_hsusb_init_vddcx(motg, 1);
 	if (ret) {
-		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+		dev_err(&pdev->dev, "hsusb vddcx init failed\n");
 		goto free_regs;
 	}
 
+	ret = msm_hsusb_config_vddcx(1);
+	if (ret) {
+		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+		goto free_init_vddcx;
+	}
+
 	ret = msm_hsusb_ldo_init(motg, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
-		goto vddcx_exit;
+		goto free_init_vddcx;
 	}
-	ret = msm_hsusb_ldo_set_mode(1);
+
+	ret = msm_hsusb_ldo_enable(motg, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vreg enable failed\n");
-		goto ldo_exit;
+		goto free_ldo_init;
 	}
 
 	if (motg->core_clk)
@@ -1528,14 +1856,17 @@
 
 	writel(0, USB_USBINTR);
 	writel(0, USB_OTGSC);
+	/* Ensure that above STOREs are completed before enabling interrupts */
+	mb();
 
+	wake_lock_init(&motg->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
 	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
 	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
 	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
 					"msm_otg", motg);
 	if (ret) {
 		dev_err(&pdev->dev, "request irq failed\n");
-		goto disable_clks;
+		goto destroy_wlock;
 	}
 
 	otg->init = msm_otg_reset;
@@ -1551,8 +1882,27 @@
 		goto free_irq;
 	}
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL) {
+		if (motg->pdata->pmic_id_irq) {
+			ret = request_irq(motg->pdata->pmic_id_irq,
+						msm_pmic_id_irq,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING,
+						"msm_otg", motg);
+			if (ret) {
+				dev_err(&pdev->dev, "request irq failed for PMIC ID\n");
+				goto remove_otg;
+			}
+		} else {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "PMIC IRQ for ID notifications doesn't exist\n");
+			goto remove_otg;
+		}
+	}
+
 	platform_set_drvdata(pdev, motg);
 	device_init_wakeup(&pdev->dev, 1);
+	motg->mA_port = IUNIT;
 
 	if (motg->pdata->mode == USB_OTG &&
 			motg->pdata->otg_control == OTG_USER_CONTROL) {
@@ -1562,25 +1912,39 @@
 					"not available\n");
 	}
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
+		pm8921_charger_register_vbus_sn(&msm_otg_set_vbus_state);
+
+	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
+			motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+			motg->pdata->pmic_id_irq)
+		motg->caps = ALLOW_PHY_POWER_COLLAPSE |
+				ALLOW_PHY_RETENTION |
+				ALLOW_PHY_COMP_DISABLE;
+
+	wake_lock(&motg->wlock);
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
 	return 0;
+
+remove_otg:
+	otg_set_transceiver(NULL);
 free_irq:
 	free_irq(motg->irq, motg);
-disable_clks:
+destroy_wlock:
+	wake_lock_destroy(&motg->wlock);
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
-ldo_exit:
+	msm_hsusb_ldo_enable(motg, 0);
+free_ldo_init:
 	msm_hsusb_ldo_init(motg, 0);
-vddcx_exit:
+free_init_vddcx:
 	msm_hsusb_init_vddcx(motg, 0);
 free_regs:
 	iounmap(motg->regs);
 put_core_clk:
 	if (motg->core_clk)
 		clk_put(motg->core_clk);
-	clk_put(motg->pclk);
 put_pclk_src:
 	if (!IS_ERR(motg->pclk_src)) {
 		clk_disable(motg->pclk_src);
@@ -1604,6 +1968,8 @@
 	if (otg->host || otg->gadget)
 		return -EBUSY;
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
+		pm8921_charger_unregister_vbus_sn(0);
 	msm_otg_debugfs_cleanup();
 	cancel_delayed_work_sync(&motg->chg_work);
 	cancel_work_sync(&motg->sm_work);
@@ -1612,7 +1978,10 @@
 
 	device_init_wakeup(&pdev->dev, 0);
 	pm_runtime_disable(&pdev->dev);
+	wake_lock_destroy(&motg->wlock);
 
+	if (motg->pdata->pmic_id_irq)
+		free_irq(motg->pdata->pmic_id_irq, motg);
 	otg_set_transceiver(NULL);
 	free_irq(motg->irq, motg);
 
@@ -1633,14 +2002,15 @@
 		dev_err(otg->dev, "Unable to suspend PHY\n");
 
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
 	if (motg->core_clk)
 		clk_disable(motg->core_clk);
 	if (!IS_ERR(motg->pclk_src)) {
 		clk_disable(motg->pclk_src);
 		clk_put(motg->pclk_src);
 	}
+	msm_hsusb_ldo_enable(motg, 0);
 	msm_hsusb_ldo_init(motg, 0);
+	msm_hsusb_init_vddcx(motg, 0);
 
 	iounmap(motg->regs);
 	pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
index fb7adef..40a34ec 100644
--- a/drivers/usb/otg/otg.c
+++ b/drivers/usb/otg/otg.c
@@ -99,3 +99,18 @@
 	}
 }
 EXPORT_SYMBOL(otg_state_string);
+
+int otg_send_event(enum usb_otg_event event)
+{
+	struct otg_transceiver *otg = otg_get_transceiver();
+	int ret = -ENOTSUPP;
+
+	if (otg && otg->send_event)
+		ret = otg->send_event(otg, event);
+
+	if (otg)
+		otg_put_transceiver(otg);
+
+	return ret;
+}
+EXPORT_SYMBOL(otg_send_event);
\ No newline at end of file
diff --git a/drivers/usb/otg/otg_id.c b/drivers/usb/otg/otg_id.c
index ce22b46..64e1bd4 100644
--- a/drivers/usb/otg/otg_id.c
+++ b/drivers/usb/otg/otg_id.c
@@ -42,7 +42,7 @@
 
 static void __otg_id_notify(void)
 {
-	int ret;
+	int ret = 0;
 	struct otg_id_notifier_block *otg_id_nb;
 	bool proxy_wait = false;
 	if (plist_head_empty(&otg_id_plist))
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4c85a4b..a564a03 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -25,6 +25,8 @@
 
 source "drivers/gpu/ion/Kconfig"
 
+source "drivers/gpu/msm/Kconfig"
+
 config VGASTATE
        tristate
        default n
@@ -2323,13 +2325,6 @@
 	  Select this option if display contents should be inherited as set by
 	  the bootloader.
 
-config FB_MSM
-	tristate "MSM Framebuffer support"
-	depends on FB && ARCH_MSM
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-
 config FB_MX3
 	tristate "MX3 Framebuffer support"
 	depends on FB && MX3_IPU
@@ -2385,6 +2380,8 @@
 	  Choose this option if you want to use the Unigfx device as a
 	  framebuffer device. Without the support of PCI & AGP.
 
+source "drivers/video/msm/Kconfig"
+
 source "drivers/video/omap/Kconfig"
 source "drivers/video/omap2/Kconfig"
 
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 5aac00e..a024064 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1169,14 +1169,11 @@
 		unlock_fb_info(info);
 		break;
 	default:
-		if (!lock_fb_info(info))
-			return -ENODEV;
 		fb = info->fbops;
 		if (fb->fb_ioctl)
 			ret = fb->fb_ioctl(info, cmd, arg);
 		else
 			ret = -ENOTTY;
-		unlock_fb_info(info);
 	}
 	return ret;
 }
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
new file mode 100644
index 0000000..677863d
--- /dev/null
+++ b/drivers/video/msm/Kconfig
@@ -0,0 +1,661 @@
+
+source "drivers/video/msm/vidc/Kconfig"
+
+config FB_MSM
+	tristate "MSM Framebuffer support"
+	depends on FB && ARCH_MSM
+	select FB_BACKLIGHT if FB_MSM_BACKLIGHT
+	select NEW_LEDS
+	select LEDS_CLASS
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	---help---
+	  Support for MSM Framebuffer.
+
+if FB_MSM
+
+config FB_MSM_BACKLIGHT
+	bool "Support for backlight control"
+	default y
+	---help---
+	  Say Y here if you want to control the backlight of your display.
+
+config FB_MSM_LOGO
+	bool "MSM Frame Buffer Logo"
+	default n
+	---help---
+	  Show /initlogo.rle during boot.
+
+config FB_MSM_LCDC_HW
+	bool
+	default n
+
+config FB_MSM_TRIPLE_BUFFER
+	bool "Support for triple frame buffer"
+	default n
+
+choice
+	prompt "MDP HW version"
+	default FB_MSM_MDP22
+
+config FB_MSM_MDP22
+	bool "MDP HW ver2.2"
+	---help---
+	  Support for MSM MDP HW revision 2.2
+	  Say Y here if this is msm7201 variant platform.
+
+config FB_MSM_MDP30
+	select FB_MSM_LCDC_HW
+	bool "MDP HW ver3.0"
+	---help---
+	  Support for MSM MDP HW revision 3.0
+	  Say Y here if this is msm7x25 variant platform.
+
+config FB_MSM_MDP303
+	depends on FB_MSM_MDP30
+	bool "MDP HW ver3.03"
+	default n
+	---help---
+	  Support for MSM MDP HW revision 3.03. This is a new version of
+	  MDP3.0 which has the required functionality to support the features
+	  required for msm7x2xA platform.
+	  Say Y here if this is msm7x2xA variant platform.
+
+config FB_MSM_MDP31
+	select FB_MSM_LCDC_HW
+	bool "MDP HW ver3.1"
+	---help---
+	  Support for MSM MDP HW revision 3.1
+	  Say Y here if this is msm8x50 variant platform.
+
+config FB_MSM_MDP40
+	select FB_MSM_LCDC_HW
+	bool "MDP HW ver4.0"
+	---help---
+	  Support for MSM MDP HW revision 4.0
+	  Say Y here if this is msm7x30 variant platform.
+endchoice
+
+config FB_MSM_EBI2
+	bool
+	default n
+
+config FB_MSM_MDDI
+	bool
+	default n
+
+config FB_MSM_MIPI_DSI
+	bool
+	default n
+
+config FB_MSM_LCDC
+	bool
+	default n
+
+config FB_MSM_OVERLAY
+	depends on FB_MSM_MDP40 && ANDROID_PMEM
+	bool "MDP4 overlay support"
+	default n
+
+config FB_MSM_DTV
+	depends on FB_MSM_OVERLAY
+	bool
+	default n
+
+config FB_MSM_EXTMDDI
+	bool
+	default n
+
+config FB_MSM_TVOUT
+	bool
+	default n
+
+config FB_MSM_MDDI_TOSHIBA_COMMON
+	bool
+	select FB_MSM_MDDI
+	default n
+
+config FB_MSM_MDDI_TOSHIBA_COMMON_VGA
+	bool
+	select FB_MSM_MDDI_TOSHIBA_COMMON
+	default n
+
+config FB_MSM_MDDI_ORISE
+	bool
+	select FB_MSM_MDDI
+	default n
+
+config FB_MSM_MDDI_QUICKVX
+	bool
+	select FB_MSM_MDDI
+	default n
+
+config FB_MSM_MDDI_AUTO_DETECT
+	bool
+	select FB_MSM_MDDI
+	default n
+
+config FB_MSM_LCDC_AUTO_DETECT
+	bool
+	select FB_MSM_LCDC
+	default n
+
+config FB_MSM_LCDC_PANEL
+	bool
+	select FB_MSM_LCDC
+	default n
+
+config FB_MSM_MIPI_DSI_TOSHIBA
+	bool
+	select FB_MSM_MIPI_DSI
+	default n
+
+config FB_MSM_MIPI_DSI_RENESAS
+	bool
+	select FB_MSM_MIPI_DSI
+	default n
+
+config FB_MSM_MIPI_DSI_SIMULATOR
+	bool
+	select FB_MSM_MIPI_DSI
+	default n
+
+config FB_MSM_MIPI_DSI_NOVATEK
+        bool
+        select FB_MSM_MIPI_DSI
+        default n
+
+config FB_MSM_LCDC_ST15_WXGA
+    bool
+    select FB_MSM_LCDC_PANEL
+    default n
+
+config FB_MSM_LCDC_ST15_PANEL
+    depends on FB_MSM_LCDC_HW
+    bool "LCDC ST1.5 Panel"
+    select FB_MSM_LCDC_ST15_WXGA
+    ---help---
+      Support for ST1.5 WXGA (1366x768) panel
+
+config FB_MSM_LCDC_PRISM_WVGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_SAMSUNG_WSVGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_CHIMEI_WXGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_GORDON_VGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_TOSHIBA_WVGA_PT
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_TOSHIBA_FWVGA_PT
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_SHARP_WVGA_PT
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_AUO_WVGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_SAMSUNG_OLED_PT
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_LCDC_WXGA
+	bool
+	select FB_MSM_LCDC_PANEL
+	default n
+
+config FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT
+	bool
+	select FB_MSM_MIPI_DSI_TOSHIBA
+	default n
+
+config FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT
+	bool
+	select FB_MSM_MIPI_DSI_TOSHIBA
+	default n
+
+config FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT
+	bool
+	select FB_MSM_MIPI_DSI_NOVATEK
+	default n
+
+config FB_MSM_MIPI_NOVATEK_CMD_QHD_PT
+	bool
+	select FB_MSM_MIPI_DSI_NOVATEK
+	default n
+
+config FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT
+	bool
+	select FB_MSM_MIPI_DSI_RENESAS
+	default n
+
+config FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT
+	bool
+	select FB_MSM_MIPI_DSI_RENESAS
+	default n
+
+config FB_MSM_MIPI_SIMULATOR_VIDEO
+	bool
+	select FB_MSM_MIPI_DSI_SIMULATOR
+	default n
+
+
+config FB_MSM_OVERLAY_WRITEBACK
+	depends on FB_MSM_OVERLAY
+        bool "MDP overlay write back mode enable"
+	---help---
+	  Support for MDP4 OVERLAY write back mode
+
+choice
+	prompt "LCD Panel"
+	default FB_MSM_MDDI_AUTO_DETECT
+
+config FB_MSM_LCDC_PRISM_WVGA_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Prism WVGA Panel"
+	select FB_MSM_LCDC_PRISM_WVGA
+	---help---
+	  Support for LCDC Prism WVGA (800x480) panel
+
+config FB_MSM_LCDC_SAMSUNG_WSVGA_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Samsung WSVGA Panel"
+	select FB_MSM_LCDC_SAMSUNG_WSVGA
+	---help---
+	  Support for LCDC Samsung WSVGA (1024x600) panel
+
+config FB_MSM_LCDC_CHIMEI_WXGA_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Chimei WXGA Panel"
+	select FB_MSM_LCDC_CHIMEI_WXGA
+	---help---
+	  Support for LCDC Chimei WXGA (1366x768) panel
+
+config FB_MSM_LCDC_GORDON_VGA_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Gordon VGA Panel"
+	select FB_MSM_LCDC_GORDON_VGA
+	---help---
+	  Support for LCDC Gordon VGA (480x640) panel
+
+config FB_MSM_LCDC_TOSHIBA_WVGA_PT_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Toshiba WVGA PT Panel"
+	select FB_MSM_LCDC_TOSHIBA_WVGA_PT
+	---help---
+	  Support for LCDC Toshiba WVGA PT (480x800) panel
+
+config FB_MSM_LCDC_TOSHIBA_FWVGA_PT_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Toshiba FWVGA PT Panel"
+	select FB_MSM_LCDC_TOSHIBA_FWVGA_PT
+	---help---
+	  Support for LCDC Toshiba FWVGA PT (480x864) panel. This
+	  configuration has to be selected to support the Toshiba
+	  FWVGA (480x864) portrait panel.
+	  .
+	  .
+
+config FB_MSM_LCDC_SHARP_WVGA_PT_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Sharp WVGA PT Panel"
+	select FB_MSM_LCDC_SHARP_WVGA_PT
+	---help---
+	  Support for LCDC Sharp WVGA PT (480x800) panel
+
+config FB_MSM_LCDC_AUO_WVGA_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC AUO WVGA Panel"
+	select FB_MSM_LCDC_AUO_WVGA
+	---help---
+	  Support for LCDC AUO WVGA(480x800) panel
+	  .
+	  .
+	  .
+config FB_MSM_LCDC_SAMSUNG_OLED_PT_PANEL
+	depends on FB_MSM_LCDC_HW
+	bool "LCDC Samsung OLED PT Panel"
+	select FB_MSM_LCDC_SAMSUNG_OLED_PT
+	---help---
+	  Support for LCDC Samsung OLED PT (480x800) panel
+	  .
+	  .
+	  .
+
+config FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+	depends on FB_MSM_LCDC_HW
+	bool "MDDI Panel Auto Detect + LCDC Prism WVGA"
+	select FB_MSM_MDDI_AUTO_DETECT
+	select FB_MSM_LCDC_PRISM_WVGA
+	select FB_MSM_LCDC_GORDON_VGA
+	select FB_MSM_LCDC_WXGA
+	select FB_MSM_LCDC_TOSHIBA_WVGA_PT
+	select FB_MSM_LCDC_TOSHIBA_FWVGA_PT
+	select FB_MSM_LCDC_SHARP_WVGA_PT
+	select FB_MSM_LCDC_ST15_WXGA
+	---help---
+	  Support for MDDI panel auto detect.
+	  If it can't find any MDDI panel, it will load an LCDC panel.
+
+config FB_MSM_MIPI_PANEL_DETECT
+	depends on FB_MSM_LCDC_HW
+	bool "MIPI Panel Detect + LCDC Panel Auto Detect"
+	select FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT
+	select FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT
+	select FB_MSM_LCDC_AUTO_DETECT
+	select FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT
+	select FB_MSM_MIPI_SIMULATOR_VIDEO
+	select FB_MSM_LCDC_SAMSUNG_WSVGA
+	select FB_MSM_LCDC_AUO_WVGA
+	select FB_MSM_LCDC_SAMSUNG_OLED_PT
+
+config FB_MSM_MDDI_PANEL_AUTO_DETECT
+	bool "MDDI Panel Auto Detect"
+	select FB_MSM_MDDI_AUTO_DETECT
+	---help---
+	  Support for MDDI panel auto detect
+
+config FB_MSM_LCDC_PANEL_AUTO_DETECT
+	bool "LCDC Panel Auto Detect"
+	select FB_MSM_LCDC_AUTO_DETECT
+	select FB_MSM_LCDC_SAMSUNG_WSVGA
+	select FB_MSM_LCDC_AUO_WVGA
+	select FB_MSM_LCDC_SAMSUNG_OLED_PT
+	---help---
+	  Support for LCDC panel auto detect
+	  .
+	  .
+	  .
+
+config FB_MSM_MDDI_PRISM_WVGA
+	bool "MDDI Prism WVGA Panel"
+	select FB_MSM_MDDI
+	---help---
+	  Support for MDDI Prism WVGA (800x480) panel
+
+config FB_MSM_MDDI_TOSHIBA_WVGA_PORTRAIT
+	bool "MDDI Toshiba WVGA Portrait Panel"
+	select FB_MSM_MDDI_TOSHIBA_COMMON
+	---help---
+	  Support for MDDI Toshiba WVGA (480x800) panel
+
+config FB_MSM_MDDI_TOSHIBA_VGA
+	bool "MDDI Toshiba VGA Panel"
+	select FB_MSM_MDDI_TOSHIBA_COMMON_VGA
+	---help---
+	  Support for MDDI Toshiba VGA (480x640) and QCIF (176x220) panel
+
+config FB_MSM_MDDI_TOSHIBA_WVGA
+	bool "MDDI Toshiba WVGA panel"
+	select FB_MSM_MDDI_TOSHIBA_COMMON
+	---help---
+	  Support for MDDI Toshiba (800x480) WVGA panel
+
+config FB_MSM_MDDI_SHARP_QVGA_128x128
+	bool "MDDI Sharp QVGA Dual Panel"
+	select FB_MSM_MDDI
+	---help---
+	  Support for MDDI Sharp QVGA (240x320) and 128x128 dual panel
+
+config FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT_PANEL
+	bool "MIPI Toshiba WVGA PT Panel"
+	select FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT
+
+config FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT_PANEL
+	bool "MIPI Toshiba WSVGA PT Panel"
+	select FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT
+
+config FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT_PANEL
+	bool "MIPI NOVATEK VIDEO QHD PT Panel"
+	select FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT
+
+config FB_MSM_MIPI_NOVATEK_CMD_QHD_PT_PANEL
+	bool "MIPI NOVATEK CMD QHD PT Panel"
+	select FB_MSM_MIPI_NOVATEK_CMD_QHD_PT
+
+config FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT_PANEL
+	bool "MIPI Renesas Video FWVGA PT Panel"
+	select FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT
+
+config FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT_PANEL
+	bool "MIPI Renesas Command FWVGA PT Panel"
+	select FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT
+
+config FB_MSM_MIPI_SIMULATOR_VIDEO_PANEL
+	bool "MIPI Simulator Video Panel"
+	select FB_MSM_MIPI_SIMULATOR_VIDEO
+
+config FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF
+	bool "EBI2 TMD QVGA Epson QCIF Dual Panel"
+	select FB_MSM_EBI2
+	---help---
+	  Support for EBI2 TMD QVGA (240x320) and Epson QCIF (176x220) panel
+
+config FB_MSM_PANEL_NONE
+	bool "NONE"
+	---help---
+	  This will disable LCD panel
+endchoice
+
+choice
+	prompt "Secondary LCD Panel"
+	depends on  FB_MSM_MDP31
+	default FB_MSM_SECONDARY_PANEL_NONE
+
+config FB_MSM_LCDC_EXTERNAL_WXGA
+	depends on FB_MSM_MDP31
+	bool "External WXGA on LCDC"
+	select FB_MSM_LCDC_PANEL
+	---help---
+	  Support for external WXGA display (1280x720)
+
+config FB_MSM_HDMI_SII_EXTERNAL_720P
+	depends on FB_MSM_MDP31
+	bool "External SiI9022 HDMI 720P"
+	select FB_MSM_LCDC_PANEL
+	---help---
+	  Support for external HDMI 720p display (1280x720p)
+	  Using SiI9022 chipset
+
+config FB_MSM_SECONDARY_PANEL_NONE
+	bool "NONE"
+	---help---
+	  No secondary panel
+endchoice
+
+config FB_MSM_LCDC_DSUB
+	depends on FB_MSM_LCDC_SAMSUNG_WSVGA && FB_MSM_MDP40 && FB_MSM_LCDC_HW
+	bool "External DSUB support"
+	default n
+	---help---
+	  Support for external DSUB (VGA) display up to 1440x900.  The DSUB
+	  display shares the same video bus as the primary LCDC attached display.
+	  Typically only one of the two displays can be used at one time.
+
+config FB_MSM_EXT_INTERFACE_COMMON
+	bool
+	default n
+
+config FB_MSM_HDMI_COMMON
+	bool
+	default n
+
+config FB_MSM_HDMI_3D
+	bool
+	default n
+
+config FB_MSM_HDMI_ADV7520_PANEL
+	depends on FB_MSM_MDP40 && FB_MSM_OVERLAY
+        bool "LCDC HDMI ADV7520 720p Panel"
+        select FB_MSM_DTV
+        select FB_MSM_EXT_INTERFACE_COMMON
+	select FB_MSM_HDMI_COMMON
+	default n
+        ---help---
+        Support for LCDC 720p HDMI panel attached to ADV7520
+
+config FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+        depends on FB_MSM_HDMI_ADV7520_PANEL
+        bool "Use HDCP mode"
+        default y
+        ---help---
+          Support for HDCP mode for ADV7520 HDMI 720p Panel
+          Choose to enable HDCP
+
+
+config FB_MSM_HDMI_MSM_PANEL
+	depends on FB_MSM_MDP40
+	bool "MSM HDMI 1080p Panel"
+	select FB_MSM_DTV
+        select FB_MSM_EXT_INTERFACE_COMMON
+	select FB_MSM_HDMI_COMMON
+	select FB_MSM_HDMI_3D
+	default n
+	---help---
+	  Support for 480p/720p/1080i/1080p output through MSM HDMI
+
+config FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT
+	depends on FB_MSM_HDMI_MSM_PANEL
+	bool "Use DVI mode"
+	default n
+	---help---
+	  Support for DVI mode for MSM HDMI 1080p Panel
+
+config FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	depends on FB_MSM_HDMI_MSM_PANEL
+	bool "Use HDCP mode"
+	default y
+	---help---
+	  Support for HDCP mode for MSM HDMI 1080p Panel
+	  Choose to enable HDCP
+	  .
+	  .
+
+choice
+	depends on  (FB_MSM_MDP22 || FB_MSM_MDP31 || FB_MSM_MDP40)
+	prompt "TVOut Region"
+	default FB_MSM_TVOUT_NONE
+
+config FB_MSM_TVOUT_NTSC_M
+	bool "NTSC M"
+	select FB_MSM_TVOUT
+        select FB_MSM_EXT_INTERFACE_COMMON
+	---help---
+	  Support for NTSC M region (North American and Korea)
+          .
+          .
+          .
+
+config FB_MSM_TVOUT_NTSC_J
+	bool "NTSC J"
+	select FB_MSM_TVOUT
+        select FB_MSM_EXT_INTERFACE_COMMON
+	---help---
+	  Support for NTSC J region (Japan)
+          .
+          .
+          .
+
+config FB_MSM_TVOUT_PAL_BDGHIN
+	bool "PAL BDGHIN"
+	select FB_MSM_TVOUT
+        select FB_MSM_EXT_INTERFACE_COMMON
+	---help---
+	  Support for PAL BDGHIN region (Non-argentina PAL-N)
+          .
+          .
+          .
+
+config FB_MSM_TVOUT_PAL_M
+	bool "PAL M"
+	select FB_MSM_TVOUT
+        select FB_MSM_EXT_INTERFACE_COMMON
+	---help---
+	  Support for PAL M region
+          .
+          .
+          .
+
+config FB_MSM_TVOUT_PAL_N
+	bool "PAL N"
+	select FB_MSM_TVOUT
+        select FB_MSM_EXT_INTERFACE_COMMON
+	---help---
+	  Support for PAL N region (Argentina PAL-N)
+          .
+          .
+          .
+
+config FB_MSM_TVOUT_NONE
+	bool "NONE"
+	---help---
+	  This will disable TV Out functionality.
+endchoice
+
+config FB_MSM_TVOUT_SVIDEO
+	bool "TVOut on S-video"
+	depends on FB_MSM_TVOUT
+	default n
+	---help---
+	  Selects whether the TVOut signal uses S-video.
+	  Choose n for composite output.
+
+choice
+	depends on  FB_MSM_MDP22
+	prompt "External MDDI"
+	default FB_MSM_EXTMDDI_SVGA
+
+config FB_MSM_EXTMDDI_SVGA
+	bool "External MDDI SVGA"
+	select FB_MSM_MDDI
+	select FB_MSM_EXTMDDI
+	---help---
+	  Support for MSM SVGA (800x600) external MDDI panel
+
+config FB_MSM_EXTMDDI_NONE
+	bool "NONE"
+	---help---
+	  This will disable External MDDI functionality.
+endchoice
+
+choice
+	prompt "Default framebuffer color depth"
+	depends on FB_MSM_MDP40 || FB_MSM_MDP31
+	default FB_MSM_DEFAULT_DEPTH_RGBA8888
+
+config FB_MSM_DEFAULT_DEPTH_RGB565
+	bool "16 bits per pixel (RGB565)"
+
+config FB_MSM_DEFAULT_DEPTH_ARGB8888
+	bool "32 bits per pixel (ARGB8888)"
+
+config FB_MSM_DEFAULT_DEPTH_RGBA8888
+	bool "32 bits per pixel (RGBA8888)"
+
+endchoice
+
+endif
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index 802d6ae..280e528 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -1,19 +1,150 @@
-
-# core framebuffer
-#
 obj-y := msm_fb.o
 
-# MDP DMA/PPP engine
-#
-obj-y += mdp.o mdp_scale_tables.o mdp_ppp.o
+obj-$(CONFIG_FB_MSM_LOGO) += logo.o
+obj-$(CONFIG_FB_BACKLIGHT) += msm_fb_bl.o
 
-# MDDI interface
-#
-obj-y += mddi.o
+# MDP
+obj-y += mdp.o
 
-# MDDI client/panel drivers
-#
-obj-y += mddi_client_dummy.o
-obj-y += mddi_client_toshiba.o
-obj-y += mddi_client_nt35399.o
+obj-$(CONFIG_DEBUG_FS) += mdp_debugfs.o
 
+ifeq ($(CONFIG_FB_MSM_MDP40),y)
+obj-y += mdp4_util.o
+else
+obj-y += mdp_hw_init.o
+obj-y += mdp_ppp.o
+ifeq ($(CONFIG_FB_MSM_MDP31),y)
+obj-y += mdp_ppp_v31.o
+else
+obj-y += mdp_ppp_v20.o
+endif
+endif
+
+ifeq ($(CONFIG_FB_MSM_OVERLAY),y)
+obj-y += mdp4_overlay.o
+obj-y += mdp4_overlay_lcdc.o
+ifeq ($(CONFIG_FB_MSM_MIPI_DSI),y)
+obj-y += mdp4_overlay_dsi_video.o
+obj-y += mdp4_overlay_dsi_cmd.o
+else
+obj-y += mdp4_overlay_mddi.o
+endif
+else
+obj-y += mdp_dma_lcdc.o
+endif
+
+obj-$(CONFIG_FB_MSM_MDP303) += mdp_dma_dsi_video.o
+
+ifeq ($(CONFIG_FB_MSM_DTV),y)
+obj-y += mdp4_dtv.o
+obj-y += mdp4_overlay_dtv.o
+endif
+
+obj-y += mdp_dma.o
+obj-y += mdp_dma_s.o
+obj-y += mdp_vsync.o
+obj-y += mdp_cursor.o
+obj-y += mdp_dma_tv.o
+obj-$(CONFIG_ARCH_MSM7X27A) += msm_dss_io_7x27a.o
+obj-$(CONFIG_ARCH_MSM8X60) += msm_dss_io_8x60.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_dss_io_8960.o
+
+# EBI2
+obj-$(CONFIG_FB_MSM_EBI2) += ebi2_lcd.o
+
+# LCDC
+obj-$(CONFIG_FB_MSM_LCDC) += lcdc.o
+
+# MDDI
+msm_mddi-objs := mddi.o mddihost.o mddihosti.o
+obj-$(CONFIG_FB_MSM_MDDI) += msm_mddi.o
+
+# External MDDI
+msm_mddi_ext-objs := mddihost_e.o mddi_ext.o
+obj-$(CONFIG_FB_MSM_EXTMDDI) += msm_mddi_ext.o
+
+# MIPI gereric
+msm_mipi-objs := mipi_dsi.o mipi_dsi_host.o
+obj-$(CONFIG_FB_MSM_MIPI_DSI) += msm_mipi.o
+
+# MIPI manufacture
+obj-$(CONFIG_FB_MSM_MIPI_DSI_TOSHIBA) += mipi_toshiba.o
+obj-$(CONFIG_FB_MSM_MIPI_DSI_NOVATEK) += mipi_novatek.o
+obj-$(CONFIG_FB_MSM_MIPI_DSI_RENESAS) += mipi_renesas.o
+obj-$(CONFIG_FB_MSM_MIPI_DSI_SIMULATOR) += mipi_simulator.o
+
+# TVEnc
+obj-$(CONFIG_FB_MSM_TVOUT) += tvenc.o
+ifeq ($(CONFIG_FB_MSM_OVERLAY),y)
+obj-$(CONFIG_FB_MSM_TVOUT) += mdp4_overlay_atv.o
+endif
+
+# MSM FB Panel
+obj-y += msm_fb_panel.o
+obj-$(CONFIG_FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF) += ebi2_tmd20.o
+obj-$(CONFIG_FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF) += ebi2_l2f.o
+
+ifeq ($(CONFIG_FB_MSM_MDDI_AUTO_DETECT),y)
+obj-y += mddi_prism.o
+obj-y += mddi_toshiba.o
+obj-y += mddi_toshiba_vga.o
+obj-y += mddi_toshiba_wvga_pt.o
+obj-y += mddi_toshiba_wvga.o
+obj-y += mddi_sharp.o
+obj-y += mddi_orise.o
+obj-y += mddi_quickvx.o
+else
+obj-$(CONFIG_FB_MSM_MDDI_PRISM_WVGA) += mddi_prism.o
+obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON) += mddi_toshiba.o
+obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON_VGA) += mddi_toshiba_vga.o
+obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA_PORTRAIT) += mddi_toshiba_wvga_pt.o
+obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA) += mddi_toshiba_wvga.o
+obj-$(CONFIG_FB_MSM_MDDI_SHARP_QVGA_128x128) += mddi_sharp.o
+obj-$(CONFIG_FB_MSM_MDDI_ORISE) += mddi_orise.o
+obj-$(CONFIG_FB_MSM_MDDI_QUICKVX) += mddi_quickvx.o
+endif
+
+ifeq ($(CONFIG_FB_MSM_MIPI_PANEL_DETECT),y)
+obj-y += mipi_toshiba_video_wvga_pt.o mipi_toshiba_video_wsvga_pt.o
+obj-y += mipi_novatek_video_qhd_pt.o mipi_novatek_cmd_qhd_pt.o
+obj-y += mipi_renesas_video_fwvga_pt.o mipi_renesas_cmd_fwvga_pt.o
+else
+obj-$(CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT) += mipi_toshiba_video_wvga_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT) += mipi_toshiba_video_wsvga_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT) += mipi_novatek_video_qhd_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_NOVATEK_CMD_QHD_PT) += mipi_novatek_cmd_qhd_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT) += mipi_renesas_video_fwvga_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT) += mipi_renesas_cmd_fwvga_pt.o
+obj-$(CONFIG_FB_MSM_MIPI_SIMULATOR_VIDEO) += mipi_simulator_video.o
+endif
+
+
+
+
+obj-$(CONFIG_FB_MSM_LCDC_PANEL) += lcdc_panel.o
+obj-$(CONFIG_FB_MSM_LCDC_PRISM_WVGA) += lcdc_prism.o
+obj-$(CONFIG_FB_MSM_LCDC_SAMSUNG_WSVGA) += lcdc_samsung_wsvga.o
+obj-$(CONFIG_FB_MSM_LCDC_CHIMEI_WXGA) += lcdc_chimei_wxga.o
+obj-$(CONFIG_FB_MSM_LCDC_EXTERNAL_WXGA) += lcdc_external.o
+obj-$(CONFIG_FB_MSM_HDMI_SII_EXTERNAL_720P) += hdmi_sii9022.o
+obj-$(CONFIG_FB_MSM_LCDC_GORDON_VGA) += lcdc_gordon.o
+obj-$(CONFIG_FB_MSM_LCDC_WXGA) += lcdc_wxga.o
+obj-$(CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT) += lcdc_toshiba_wvga_pt.o
+obj-$(CONFIG_FB_MSM_LCDC_TOSHIBA_FWVGA_PT) += lcdc_toshiba_fwvga_pt.o
+obj-$(CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT) += lcdc_sharp_wvga_pt.o
+obj-$(CONFIG_FB_MSM_LCDC_AUO_WVGA) += lcdc_auo_wvga.o
+obj-$(CONFIG_FB_MSM_LCDC_SAMSUNG_OLED_PT) += lcdc_samsung_oled_pt.o
+obj-$(CONFIG_FB_MSM_HDMI_ADV7520_PANEL) += adv7520.o
+obj-$(CONFIG_FB_MSM_LCDC_ST15_WXGA) += lcdc_st15.o
+obj-$(CONFIG_FB_MSM_HDMI_MSM_PANEL) += hdmi_msm.o
+obj-$(CONFIG_FB_MSM_EXT_INTERFACE_COMMON) += external_common.o
+
+obj-$(CONFIG_FB_MSM_TVOUT) += tvout_msm.o
+
+obj-$(CONFIG_FB_MSM_EXTMDDI_SVGA) += mddi_ext_lcd.o
+
+obj-$(CONFIG_MSM_VIDC_1080P) += vidc/
+obj-$(CONFIG_MSM_VIDC_720P) += vidc/
+
+clean:
+	rm *.o .*cmd
diff --git a/drivers/video/msm/adv7520.c b/drivers/video/msm/adv7520.c
new file mode 100644
index 0000000..0900f23
--- /dev/null
+++ b/drivers/video/msm/adv7520.c
@@ -0,0 +1,1005 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/adv7520.h>
+#include <linux/time.h>
+#include <linux/completion.h>
+#include <linux/wakelock.h>
+#include <asm/atomic.h>
+#include "msm_fb.h"
+
+#define DEBUG
+#define DEV_DBG_PREFIX "HDMI: "
+
+#include "external_common.h"
+
+/* #define PORT_DEBUG */
+/* #define TESTING_FORCE_480p */
+
+#define HPD_DUTY_CYCLE	4 /*secs*/
+
+static struct external_common_state_type hdmi_common;
+
+static struct i2c_client *hclient;
+
+static bool chip_power_on = FALSE;	/* For chip power on/off */
+static bool enable_5v_on = FALSE;
+static bool hpd_power_on = FALSE;
+static atomic_t comm_power_on;	/* For dtv power on/off (I2C) */
+static int suspend_count;
+
+static u8 reg[256];	/* HDMI panel registers */
+
+struct hdmi_data {
+	struct msm_hdmi_platform_data *pd;
+	struct work_struct isr_work;
+};
+static struct hdmi_data *dd;
+static struct work_struct hpd_timer_work;
+
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+static struct work_struct hdcp_handle_work;
+static int hdcp_activating;
+static DEFINE_MUTEX(hdcp_state_mutex);
+static int has_hdcp_hw_support = true;
+#endif
+
+static struct timer_list hpd_timer;
+static struct timer_list hpd_duty_timer;
+static struct work_struct hpd_duty_work;
+static unsigned int monitor_sense;
+static boolean hpd_cable_chg_detected;
+
+struct wake_lock wlock;
+
+/* Change HDMI state */
+static void change_hdmi_state(int online)
+{
+	if (!external_common_state)
+		return;
+
+	mutex_lock(&external_common_state_hpd_mutex);
+	external_common_state->hpd_state = online;
+	mutex_unlock(&external_common_state_hpd_mutex);
+
+	if (!external_common_state->uevent_kobj)
+		return;
+
+	if (online)
+		kobject_uevent(external_common_state->uevent_kobj,
+			KOBJ_ONLINE);
+	else
+		kobject_uevent(external_common_state->uevent_kobj,
+			KOBJ_OFFLINE);
+	DEV_INFO("adv7520_uevent: %d [suspend# %d]\n", online, suspend_count);
+}
+
+
+/*
+ * Read a value from a register on ADV7520 device
+ * If sucessfull returns value read , otherwise error.
+ */
+static u8 adv7520_read_reg(struct i2c_client *client, u8 reg)
+{
+	int err;
+	struct i2c_msg msg[2];
+	u8 reg_buf[] = { reg };
+	u8 data_buf[] = { 0 };
+
+	if (!client->adapter)
+		return -ENODEV;
+	if (!atomic_read(&comm_power_on)) {
+		DEV_WARN("%s: WARN: missing GPIO power\n", __func__);
+		return -ENODEV;
+	}
+
+	msg[0].addr = client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = reg_buf;
+
+	msg[1].addr = client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = 1;
+	msg[1].buf = data_buf;
+
+	err = i2c_transfer(client->adapter, msg, 2);
+
+	if (err < 0) {
+		DEV_INFO("%s: I2C err: %d\n", __func__, err);
+		return err;
+	}
+
+#ifdef PORT_DEBUG
+	DEV_INFO("HDMI[%02x] [R] %02x\n", reg, data);
+#endif
+	return *data_buf;
+}
+
+/*
+ * Write a value to a register on adv7520 device.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int adv7520_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+	int err;
+	struct i2c_msg msg[1];
+	unsigned char data[2];
+
+	if (!client->adapter)
+		return -ENODEV;
+	if (!atomic_read(&comm_power_on)) {
+		DEV_WARN("%s: WARN: missing GPIO power\n", __func__);
+		return -ENODEV;
+	}
+
+	msg->addr = client->addr;
+	msg->flags = 0;
+	msg->len = 2;
+	msg->buf = data;
+	data[0] = reg;
+	data[1] = val;
+
+	err = i2c_transfer(client->adapter, msg, 1);
+	if (err >= 0)
+		return 0;
+#ifdef PORT_DEBUG
+	DEV_INFO("HDMI[%02x] [W] %02x [%d]\n", reg, val, err);
+#endif
+	return err;
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+static void adv7520_close_hdcp_link(void)
+{
+	if (!external_common_state->hdcp_active && !hdcp_activating)
+		return;
+
+	DEV_INFO("HDCP: Close link\n");
+
+	reg[0xD5] = adv7520_read_reg(hclient, 0xD5);
+	reg[0xD5] &= 0xFE;
+	adv7520_write_reg(hclient, 0xD5, (u8)reg[0xD5]);
+
+	reg[0x16] = adv7520_read_reg(hclient, 0x16);
+	reg[0x16] &= 0xFE;
+	adv7520_write_reg(hclient, 0x16, (u8)reg[0x16]);
+
+	/* UnMute Audio */
+	adv7520_write_reg(hclient, 0x0C, (u8)0x84);
+
+	external_common_state->hdcp_active = FALSE;
+	mutex_lock(&hdcp_state_mutex);
+	hdcp_activating = FALSE;
+	mutex_unlock(&hdcp_state_mutex);
+}
+
+static void adv7520_comm_power(int on, int show);
+static void adv7520_hdcp_enable(struct work_struct *work)
+{
+	DEV_INFO("HDCP: Start reg[0xaf]=%02x (mute audio)\n", reg[0xaf]);
+
+	adv7520_comm_power(1, 1);
+
+	/* Mute Audio */
+	adv7520_write_reg(hclient, 0x0C, (u8)0xC3);
+
+	msleep(200);
+	/* Wait for BKSV ready interrupt */
+	/* Read BKSV's keys from HDTV */
+	reg[0xBF] = adv7520_read_reg(hclient, 0xBF);
+	reg[0xC0] = adv7520_read_reg(hclient, 0xC0);
+	reg[0xC1] = adv7520_read_reg(hclient, 0xC1);
+	reg[0xC2] = adv7520_read_reg(hclient, 0xC2);
+	reg[0xc3] = adv7520_read_reg(hclient, 0xC3);
+
+	DEV_DBG("HDCP: BKSV={%02x,%02x,%02x,%02x,%02x}\n", reg[0xbf], reg[0xc0],
+		reg[0xc1], reg[0xc2], reg[0xc3]);
+
+	/* Is SINK repeater */
+	reg[0xBE] = adv7520_read_reg(hclient, 0xBE);
+	if (~(reg[0xBE] & 0x40)) {
+		; /* compare with revocation list */
+		/* Check 20 1's and 20 zero's */
+	} else {
+		/* Don't implement HDCP if sink as a repeater */
+		adv7520_write_reg(hclient, 0x0C, (u8)0x84);
+		mutex_lock(&hdcp_state_mutex);
+		hdcp_activating = FALSE;
+		mutex_unlock(&hdcp_state_mutex);
+		DEV_WARN("HDCP: Sink Repeater (%02x), (unmute audio)\n",
+			reg[0xbe]);
+
+		adv7520_comm_power(0, 1);
+		return;
+	}
+
+	msleep(200);
+	reg[0xB8] = adv7520_read_reg(hclient, 0xB8);
+	DEV_INFO("HDCP: Status reg[0xB8] is %02x\n", reg[0xb8]);
+	if (reg[0xb8] & 0x40) {
+		/* UnMute Audio */
+		adv7520_write_reg(hclient, 0x0C, (u8)0x84);
+		DEV_INFO("HDCP: A/V content Encrypted (unmute audio)\n");
+		external_common_state->hdcp_active = TRUE;
+	}
+	adv7520_comm_power(0, 1);
+
+	mutex_lock(&hdcp_state_mutex);
+	hdcp_activating = FALSE;
+	mutex_unlock(&hdcp_state_mutex);
+}
+#endif
+
+static int adv7520_read_edid_block(int block, uint8 *edid_buf)
+{
+	u8 r = 0;
+	int ret;
+	struct i2c_msg msg[] = {
+		{ .addr = reg[0x43] >> 1,
+		  .flags = 0,
+		  .len = 1,
+		  .buf = &r },
+		{ .addr = reg[0x43] >> 1,
+		  .flags = I2C_M_RD,
+		  .len = 0x100,
+		  .buf = edid_buf } };
+
+	if (block > 0)
+		return 0;
+	ret = i2c_transfer(hclient->adapter, msg, 2);
+	DEV_DBG("EDID block: addr=%02x, ret=%d\n", reg[0x43] >> 1, ret);
+	return (ret < 2) ? -ENODEV : 0;
+}
+
+static void adv7520_read_edid(void)
+{
+	external_common_state->read_edid_block = adv7520_read_edid_block;
+	if (hdmi_common_read_edid()) {
+		u8 timeout;
+		DEV_INFO("%s: retry\n", __func__);
+		adv7520_write_reg(hclient, 0xc9, 0x13);
+		msleep(500);
+		timeout = (adv7520_read_reg(hclient, 0x96) & (1 << 2));
+		if (timeout) {
+			hdmi_common_read_edid();
+		}
+	}
+}
+
+static void adv7520_chip_on(void)
+{
+	if (!chip_power_on) {
+		/* Get the current register holding the power bit. */
+		unsigned long reg0xaf = adv7520_read_reg(hclient, 0xaf);
+
+		dd->pd->core_power(1, 1);
+
+		/* Set the HDMI select bit. */
+		set_bit(1, &reg0xaf);
+		DEV_INFO("%s: turn on chip power\n", __func__);
+		adv7520_write_reg(hclient, 0x41, 0x10);
+		adv7520_write_reg(hclient, 0xaf, (u8)reg0xaf);
+		chip_power_on = TRUE;
+	} else
+		DEV_INFO("%s: chip already has power\n", __func__);
+}
+
+static void adv7520_chip_off(void)
+{
+	if (chip_power_on) {
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+		if (has_hdcp_hw_support)
+			adv7520_close_hdcp_link();
+#endif
+
+		DEV_INFO("%s: turn off chip power\n", __func__);
+		adv7520_write_reg(hclient, 0x41, 0x50);
+		dd->pd->core_power(0, 1);
+		chip_power_on = FALSE;
+	} else
+		DEV_INFO("%s: chip is already off\n", __func__);
+
+	monitor_sense = 0;
+	hpd_cable_chg_detected = FALSE;
+
+	if (enable_5v_on) {
+		dd->pd->enable_5v(0);
+		enable_5v_on = FALSE;
+	}
+}
+
+/*  Power ON/OFF  ADV7520 chip */
+static void adv7520_isr_w(struct work_struct *work);
+static void adv7520_comm_power(int on, int show)
+{
+	if (!on)
+		atomic_dec(&comm_power_on);
+	dd->pd->comm_power(on, 0/*show*/);
+	if (on)
+		atomic_inc(&comm_power_on);
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+static void adv7520_start_hdcp(void);
+#endif
+static int adv7520_power_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	external_common_state->dev = &pdev->dev;
+	if (mfd != NULL) {
+		DEV_INFO("adv7520_power: ON (%dx%d %d)\n",
+			mfd->var_xres, mfd->var_yres, mfd->var_pixclock);
+		hdmi_common_get_video_format_from_drv_data(mfd);
+	}
+
+	adv7520_comm_power(1, 1);
+	/* Check if HPD is signaled */
+	if (adv7520_read_reg(hclient, 0x42) & (1 << 6)) {
+		DEV_INFO("power_on: cable detected\n");
+		monitor_sense = adv7520_read_reg(hclient, 0xC6);
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+		if (has_hdcp_hw_support) {
+			if (!hdcp_activating)
+				adv7520_start_hdcp();
+		}
+#endif
+	} else
+		DEV_INFO("power_on: cable NOT detected\n");
+	adv7520_comm_power(0, 1);
+	wake_lock(&wlock);
+
+	return 0;
+}
+
+static int adv7520_power_off(struct platform_device *pdev)
+{
+	DEV_INFO("power_off\n");
+	adv7520_comm_power(1, 1);
+	adv7520_chip_off();
+	wake_unlock(&wlock);
+	adv7520_comm_power(0, 1);
+
+	return 0;
+}
+
+
+/* AV7520 chip specific initialization */
+static void adv7520_chip_init(void)
+{
+	/* Initialize the variables used to read/write the ADV7520 chip. */
+	memset(&reg, 0xff, sizeof(reg));
+
+	/* Get the values from the "Fixed Registers That Must Be Set". */
+	reg[0x98] = adv7520_read_reg(hclient, 0x98);
+	reg[0x9c] = adv7520_read_reg(hclient, 0x9c);
+	reg[0x9d] = adv7520_read_reg(hclient, 0x9d);
+	reg[0xa2] = adv7520_read_reg(hclient, 0xa2);
+	reg[0xa3] = adv7520_read_reg(hclient, 0xa3);
+	reg[0xde] = adv7520_read_reg(hclient, 0xde);
+
+	/* Get the "HDMI/DVI Selection" register. */
+	reg[0xaf] = adv7520_read_reg(hclient, 0xaf);
+
+	/* Read Packet Memory I2C Address */
+	reg[0x45] = adv7520_read_reg(hclient, 0x45);
+
+	/* Hard coded values provided by ADV7520 data sheet. */
+	reg[0x98] = 0x03;
+	reg[0x9c] = 0x38;
+	reg[0x9d] = 0x61;
+	reg[0xa2] = 0x94;
+	reg[0xa3] = 0x94;
+	reg[0xde] = 0x88;
+
+	/* Set the HDMI select bit. */
+	reg[0xaf] |= 0x16;
+
+	/* Set the audio related registers. */
+	reg[0x01] = 0x00;
+	reg[0x02] = 0x2d;
+	reg[0x03] = 0x80;
+	reg[0x0a] = 0x4d;
+	reg[0x0b] = 0x0e;
+	reg[0x0c] = 0x84;
+	reg[0x0d] = 0x10;
+	reg[0x12] = 0x00;
+	reg[0x14] = 0x00;
+	reg[0x15] = 0x20;
+	reg[0x44] = 0x79;
+	reg[0x73] = 0x01;
+	reg[0x76] = 0x00;
+
+	/* Set 720p display related registers */
+	reg[0x16] = 0x00;
+
+	reg[0x18] = 0x46;
+	reg[0x55] = 0x00;
+	reg[0x3c] = 0x04;
+
+	/* Set Interrupt Mask register for HPD/HDCP */
+	reg[0x94] = 0xC0;
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+	if (has_hdcp_hw_support)
+		reg[0x95] = 0xC0;
+	else
+		reg[0x95] = 0x00;
+#else
+	reg[0x95] = 0x00;
+#endif
+	adv7520_write_reg(hclient, 0x94, reg[0x94]);
+	adv7520_write_reg(hclient, 0x95, reg[0x95]);
+
+	/* Set Packet Memory I2C Address */
+	reg[0x45] = 0x74;
+
+	/* Set the values from the "Fixed Registers That Must Be Set". */
+	adv7520_write_reg(hclient, 0x98, reg[0x98]);
+	adv7520_write_reg(hclient, 0x9c, reg[0x9c]);
+	adv7520_write_reg(hclient, 0x9d, reg[0x9d]);
+	adv7520_write_reg(hclient, 0xa2, reg[0xa2]);
+	adv7520_write_reg(hclient, 0xa3, reg[0xa3]);
+	adv7520_write_reg(hclient, 0xde, reg[0xde]);
+
+	/* Set the "HDMI/DVI Selection" register. */
+	adv7520_write_reg(hclient, 0xaf, reg[0xaf]);
+
+	/* Set EDID Monitor address */
+	reg[0x43] = 0x7E;
+	adv7520_write_reg(hclient, 0x43, reg[0x43]);
+
+	/* Enable the i2s audio input. */
+	adv7520_write_reg(hclient, 0x01, reg[0x01]);
+	adv7520_write_reg(hclient, 0x02, reg[0x02]);
+	adv7520_write_reg(hclient, 0x03, reg[0x03]);
+	adv7520_write_reg(hclient, 0x0a, reg[0x0a]);
+	adv7520_write_reg(hclient, 0x0b, reg[0x0b]);
+	adv7520_write_reg(hclient, 0x0c, reg[0x0c]);
+	adv7520_write_reg(hclient, 0x0d, reg[0x0d]);
+	adv7520_write_reg(hclient, 0x12, reg[0x12]);
+	adv7520_write_reg(hclient, 0x14, reg[0x14]);
+	adv7520_write_reg(hclient, 0x15, reg[0x15]);
+	adv7520_write_reg(hclient, 0x44, reg[0x44]);
+	adv7520_write_reg(hclient, 0x73, reg[0x73]);
+	adv7520_write_reg(hclient, 0x76, reg[0x76]);
+
+	/* Enable 720p display */
+	adv7520_write_reg(hclient, 0x16, reg[0x16]);
+	adv7520_write_reg(hclient, 0x18, reg[0x18]);
+	adv7520_write_reg(hclient, 0x55, reg[0x55]);
+	adv7520_write_reg(hclient, 0x3c, reg[0x3c]);
+
+	/* Set Packet Memory address to avoid conflict
+	with Bosch Accelerometer */
+	adv7520_write_reg(hclient, 0x45, reg[0x45]);
+
+	/* Ensure chip is in low-power state */
+	adv7520_write_reg(hclient, 0x41, 0x50);
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+static void adv7520_start_hdcp(void)
+{
+	mutex_lock(&hdcp_state_mutex);
+	if (hdcp_activating) {
+		DEV_WARN("adv7520_timer: HDCP already"
+			" activating, skipping\n");
+		mutex_unlock(&hdcp_state_mutex);
+		return;
+	}
+	hdcp_activating = TRUE;
+	mutex_unlock(&hdcp_state_mutex);
+
+	del_timer(&hpd_duty_timer);
+
+	adv7520_comm_power(1, 1);
+
+	if (!enable_5v_on) {
+		dd->pd->enable_5v(1);
+		enable_5v_on = TRUE;
+		adv7520_chip_on();
+	}
+
+	/* request for HDCP */
+	reg[0xaf] = adv7520_read_reg(hclient, 0xaf);
+	reg[0xaf] |= 0x90;
+	adv7520_write_reg(hclient, 0xaf, reg[0xaf]);
+	reg[0xaf] = adv7520_read_reg(hclient, 0xaf);
+
+	reg[0xba] = adv7520_read_reg(hclient, 0xba);
+	reg[0xba] |= 0x10;
+	adv7520_write_reg(hclient, 0xba, reg[0xba]);
+	reg[0xba] = adv7520_read_reg(hclient, 0xba);
+	adv7520_comm_power(0, 1);
+
+	DEV_INFO("HDCP: reg[0xaf]=0x%02x, reg[0xba]=0x%02x, waiting for BKSV\n",
+				reg[0xaf], reg[0xba]);
+
+	/* will check for HDCP Error or BKSV ready */
+	mod_timer(&hpd_duty_timer, jiffies + HZ/2);
+}
+#endif
+
+static void adv7520_hpd_timer_w(struct work_struct *work)
+{
+	if (!external_common_state->hpd_feature_on) {
+		DEV_INFO("adv7520_timer: skipping, feature off\n");
+		return;
+	}
+
+	if ((monitor_sense & 0x4) && !external_common_state->hpd_state) {
+		int timeout;
+		DEV_DBG("adv7520_timer: Cable Detected\n");
+		adv7520_comm_power(1, 1);
+		adv7520_chip_on();
+
+		if (hpd_cable_chg_detected) {
+			hpd_cable_chg_detected = FALSE;
+			/* Ensure 5V to read EDID */
+			if (!enable_5v_on) {
+				dd->pd->enable_5v(1);
+				enable_5v_on = TRUE;
+			}
+			msleep(500);
+			timeout = (adv7520_read_reg(hclient, 0x96) & (1 << 2));
+			if (timeout) {
+				DEV_DBG("adv7520_timer: EDID-Ready..\n");
+				adv7520_read_edid();
+			} else
+				DEV_DBG("adv7520_timer: EDID TIMEOUT (C9=%02x)"
+					"\n", adv7520_read_reg(hclient, 0xC9));
+		}
+#ifdef TESTING_FORCE_480p
+		external_common_state->disp_mode_list.num_of_elements = 1;
+		external_common_state->disp_mode_list.disp_mode_list[0] =
+			HDMI_VFRMT_720x480p60_16_9;
+#endif
+		adv7520_comm_power(0, 1);
+#ifndef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+		/* HDMI_5V_EN not needed anymore */
+		if (enable_5v_on) {
+			DEV_DBG("adv7520_timer: EDID done, no HDCP, 5V not "
+				"needed anymore\n");
+			dd->pd->enable_5v(0);
+			enable_5v_on = FALSE;
+		}
+#endif
+		change_hdmi_state(1);
+	} else if (external_common_state->hpd_state) {
+		adv7520_comm_power(1, 1);
+		adv7520_chip_off();
+		adv7520_comm_power(0, 1);
+		DEV_DBG("adv7520_timer: Cable Removed\n");
+		change_hdmi_state(0);
+	}
+}
+
+static void adv7520_hpd_timer_f(unsigned long data)
+{
+	schedule_work(&hpd_timer_work);
+}
+
+static void adv7520_isr_w(struct work_struct *work)
+{
+	static int state_count;
+	static u8 last_reg0x96;
+	u8 reg0xc8;
+	u8 reg0x96;
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+	static u8 last_reg0x97;
+	u8 reg0x97 = 0;
+#endif
+	if (!external_common_state->hpd_feature_on) {
+		DEV_DBG("adv7520_irq: skipping, hpd off\n");
+		return;
+	}
+
+	adv7520_comm_power(1, 1);
+	reg0x96 = adv7520_read_reg(hclient, 0x96);
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+	if (has_hdcp_hw_support) {
+		reg0x97 = adv7520_read_reg(hclient, 0x97);
+		/* Clearing the Interrupts */
+		adv7520_write_reg(hclient, 0x97, reg0x97);
+	}
+#endif
+	/* Clearing the Interrupts */
+	adv7520_write_reg(hclient, 0x96, reg0x96);
+
+	if ((reg0x96 == 0xC0) || (reg0x96 & 0x40)) {
+#ifdef DEBUG
+		unsigned int hpd_state = adv7520_read_reg(hclient, 0x42);
+#endif
+		monitor_sense = adv7520_read_reg(hclient, 0xC6);
+		DEV_DBG("adv7520_irq: reg[0x42]=%02x && reg[0xC6]=%02x\n",
+			hpd_state, monitor_sense);
+
+		if (!enable_5v_on) {
+			dd->pd->enable_5v(1);
+			enable_5v_on = TRUE;
+		}
+		if (!hpd_power_on) {
+			dd->pd->core_power(1, 1);
+			hpd_power_on = TRUE;
+		}
+
+		/* Timer for catching interrupt debouning */
+		DEV_DBG("adv7520_irq: Timer in .5sec\n");
+		hpd_cable_chg_detected = TRUE;
+		mod_timer(&hpd_timer, jiffies + HZ/2);
+	}
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+	if (has_hdcp_hw_support) {
+		if (hdcp_activating) {
+			/* HDCP controller error Interrupt */
+			if (reg0x97 & 0x80) {
+				DEV_ERR("adv7520_irq: HDCP_ERROR\n");
+				state_count = 0;
+				adv7520_close_hdcp_link();
+			/* BKSV Ready interrupts */
+			} else if (reg0x97 & 0x40) {
+				DEV_INFO("adv7520_irq: BKSV keys ready, Begin"
+					" HDCP encryption\n");
+				state_count = 0;
+				schedule_work(&hdcp_handle_work);
+			} else if (++state_count > 2 && (monitor_sense & 0x4)) {
+				DEV_INFO("adv7520_irq: Still waiting for BKSV,"
+				"restart HDCP\n");
+				hdcp_activating = FALSE;
+				state_count = 0;
+				adv7520_chip_off();
+				adv7520_start_hdcp();
+			}
+			reg0xc8 = adv7520_read_reg(hclient, 0xc8);
+			DEV_INFO("adv7520_irq: DDC controller reg[0xC8]=0x%02x,"
+				"state_count=%d, monitor_sense=%x\n",
+				reg0xc8, state_count, monitor_sense);
+		} else if (!external_common_state->hdcp_active
+			&& (monitor_sense & 0x4)) {
+			DEV_INFO("adv7520_irq: start HDCP with"
+				" monitor sense\n");
+			state_count = 0;
+			adv7520_start_hdcp();
+		} else
+			state_count = 0;
+		if (last_reg0x97 != reg0x97 || last_reg0x96 != reg0x96)
+			DEV_DBG("adv7520_irq: reg[0x96]=%02x "
+				"reg[0x97]=%02x: HDCP: %d\n", reg0x96, reg0x97,
+				external_common_state->hdcp_active);
+		last_reg0x97 = reg0x97;
+	} else {
+		if (last_reg0x96 != reg0x96)
+			DEV_DBG("adv7520_irq: reg[0x96]=%02x\n", reg0x96);
+	}
+#else
+	if (last_reg0x96 != reg0x96)
+		DEV_DBG("adv7520_irq: reg[0x96]=%02x\n", reg0x96);
+#endif
+	last_reg0x96 = reg0x96;
+	adv7520_comm_power(0, 1);
+}
+
+static void adv7520_hpd_duty_work(struct work_struct *work)
+{
+	if (!external_common_state->hpd_feature_on) {
+		DEV_WARN("%s: hpd feature is off, skipping\n", __func__);
+		return;
+	}
+
+	dd->pd->core_power(1, 0);
+	msleep(10);
+	adv7520_isr_w(NULL);
+	dd->pd->core_power(0, 0);
+}
+
+static void adv7520_hpd_duty_timer_f(unsigned long data)
+{
+	if (!external_common_state->hpd_feature_on) {
+		DEV_WARN("%s: hpd feature is off, skipping\n", __func__);
+		return;
+	}
+
+	mod_timer(&hpd_duty_timer, jiffies + HPD_DUTY_CYCLE*HZ);
+	schedule_work(&hpd_duty_work);
+}
+
+static const struct i2c_device_id adv7520_id[] = {
+	{ ADV7520_DRV_NAME , 0},
+	{}
+};
+
+static struct msm_fb_panel_data hdmi_panel_data = {
+	.on  = adv7520_power_on,
+	.off = adv7520_power_off,
+};
+
+static struct platform_device hdmi_device = {
+	.name = ADV7520_DRV_NAME ,
+	.id   = 2,
+	.dev  = {
+		.platform_data = &hdmi_panel_data,
+		}
+};
+
+static void adv7520_ensure_init(void)
+{
+	static boolean init_done;
+	if (!init_done) {
+		int rc = dd->pd->init_irq();
+		if (rc) {
+			DEV_ERR("adv7520_init: init_irq: %d\n", rc);
+			return;
+		}
+
+		init_done = TRUE;
+	}
+	DEV_INFO("adv7520_init: chip init\n");
+	adv7520_comm_power(1, 1);
+	adv7520_chip_init();
+	adv7520_comm_power(0, 1);
+}
+
+static int adv7520_hpd_feature(int on)
+{
+	int rc = 0;
+
+	if (!on) {
+		if (enable_5v_on) {
+			dd->pd->enable_5v(0);
+			enable_5v_on = FALSE;
+		}
+		if (hpd_power_on) {
+			dd->pd->core_power(0, 1);
+			hpd_power_on = FALSE;
+		}
+
+		DEV_DBG("adv7520_hpd: %d: stop duty timer\n", on);
+		del_timer(&hpd_timer);
+		del_timer(&hpd_duty_timer);
+		external_common_state->hpd_state = 0;
+	}
+
+	if (on) {
+		dd->pd->core_power(1, 0);
+		adv7520_ensure_init();
+
+		adv7520_comm_power(1, 1);
+		monitor_sense = adv7520_read_reg(hclient, 0xC6);
+		DEV_DBG("adv7520_irq: reg[0xC6]=%02x\n", monitor_sense);
+		adv7520_comm_power(0, 1);
+		dd->pd->core_power(0, 0);
+
+		if (monitor_sense & 0x4) {
+			if (!enable_5v_on) {
+				dd->pd->enable_5v(1);
+				enable_5v_on = TRUE;
+			}
+			if (!hpd_power_on) {
+				dd->pd->core_power(1, 1);
+				hpd_power_on = TRUE;
+			}
+
+			hpd_cable_chg_detected = TRUE;
+			mod_timer(&hpd_timer, jiffies + HZ/2);
+		}
+
+		DEV_DBG("adv7520_hpd: %d start duty timer\n", on);
+		mod_timer(&hpd_duty_timer, jiffies + HZ/100);
+	}
+
+	DEV_INFO("adv7520_hpd: %d\n", on);
+	return rc;
+}
+
+static int __devinit
+	adv7520_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	int rc;
+	struct platform_device *fb_dev;
+
+	dd = kzalloc(sizeof *dd, GFP_KERNEL);
+	if (!dd) {
+		rc = -ENOMEM;
+		goto probe_exit;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+		return -ENODEV;
+
+	external_common_state->dev = &client->dev;
+
+	/* Init real i2c_client */
+	hclient = client;
+
+	i2c_set_clientdata(client, dd);
+	dd->pd = client->dev.platform_data;
+	if (!dd->pd) {
+		rc = -ENODEV;
+		goto probe_free;
+	}
+
+	INIT_WORK(&dd->isr_work, adv7520_isr_w);
+	INIT_WORK(&hpd_timer_work, adv7520_hpd_timer_w);
+#ifdef CONFIG_FB_MSM_HDMI_ADV7520_PANEL_HDCP_SUPPORT
+	if (dd->pd->check_hdcp_hw_support)
+		has_hdcp_hw_support = dd->pd->check_hdcp_hw_support();
+
+	if (has_hdcp_hw_support)
+		INIT_WORK(&hdcp_handle_work, adv7520_hdcp_enable);
+	else
+		DEV_INFO("%s: no hdcp hw support.\n", __func__);
+#endif
+
+	init_timer(&hpd_timer);
+	hpd_timer.function = adv7520_hpd_timer_f;
+	hpd_timer.data = (unsigned long)NULL;
+	hpd_timer.expires = 0xffffffff;
+	add_timer(&hpd_timer);
+
+	external_common_state->hpd_feature = adv7520_hpd_feature;
+	DEV_INFO("adv7520_probe: HPD detection on request\n");
+	init_timer(&hpd_duty_timer);
+	hpd_duty_timer.function = adv7520_hpd_duty_timer_f;
+	hpd_duty_timer.data = (unsigned long)NULL;
+	hpd_duty_timer.expires = 0xffffffff;
+	add_timer(&hpd_duty_timer);
+	INIT_WORK(&hpd_duty_work, adv7520_hpd_duty_work);
+	DEV_INFO("adv7520_probe: HPD detection ON (duty)\n");
+
+	fb_dev = msm_fb_add_device(&hdmi_device);
+
+	if (fb_dev) {
+		rc = external_common_state_create(fb_dev);
+		if (rc)
+			goto probe_free;
+	} else
+		DEV_ERR("adv7520_probe: failed to add fb device\n");
+
+	return 0;
+
+probe_free:
+	kfree(dd);
+	dd = NULL;
+probe_exit:
+	return rc;
+
+}
+
+static int __devexit adv7520_remove(struct i2c_client *client)
+{
+	if (!client->adapter) {
+		DEV_ERR("%s: No HDMI Device\n", __func__);
+		return -ENODEV;
+	}
+	wake_lock_destroy(&wlock);
+	kfree(dd);
+	dd = NULL;
+	return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int adv7520_i2c_suspend(struct device *dev)
+{
+	DEV_INFO("%s\n", __func__);
+
+	++suspend_count;
+
+	if (external_common_state->hpd_feature_on) {
+		DEV_DBG("%s: stop duty timer\n", __func__);
+		del_timer(&hpd_duty_timer);
+		del_timer(&hpd_timer);
+	}
+
+	/* Turn off LDO8 and go into low-power state */
+	if (chip_power_on) {
+		DEV_DBG("%s: turn off power\n", __func__);
+		adv7520_comm_power(1, 1);
+		adv7520_write_reg(hclient, 0x41, 0x50);
+		adv7520_comm_power(0, 1);
+		dd->pd->core_power(0, 1);
+	}
+
+	return 0;
+}
+
+static int adv7520_i2c_resume(struct device *dev)
+{
+	DEV_INFO("%s\n", __func__);
+
+	/* Turn on LDO8 and go into normal-power state */
+	if (chip_power_on) {
+		DEV_DBG("%s: turn on power\n", __func__);
+		dd->pd->core_power(1, 1);
+		adv7520_comm_power(1, 1);
+		adv7520_write_reg(hclient, 0x41, 0x10);
+		adv7520_comm_power(0, 1);
+	}
+
+	if (external_common_state->hpd_feature_on) {
+		DEV_DBG("%s: start duty timer\n", __func__);
+		mod_timer(&hpd_duty_timer, jiffies + HPD_DUTY_CYCLE*HZ);
+	}
+
+	return 0;
+}
+#else
+#define adv7520_i2c_suspend	NULL
+#define adv7520_i2c_resume	NULL
+#endif
+
+static const struct dev_pm_ops adv7520_device_pm_ops = {
+	.suspend = adv7520_i2c_suspend,
+	.resume = adv7520_i2c_resume,
+};
+
+static struct i2c_driver hdmi_i2c_driver = {
+	.driver		= {
+		.name   = ADV7520_DRV_NAME,
+		.owner  = THIS_MODULE,
+		.pm     = &adv7520_device_pm_ops,
+	},
+	.probe		= adv7520_probe,
+	.id_table	= adv7520_id,
+	.remove		= __devexit_p(adv7520_remove),
+};
+
+static int __init adv7520_init(void)
+{
+	int rc;
+
+	pr_info("%s\n", __func__);
+	external_common_state = &hdmi_common;
+	external_common_state->video_resolution = HDMI_VFRMT_1280x720p60_16_9;
+	HDMI_SETUP_LUT(640x480p60_4_3);		/* 25.20MHz */
+	HDMI_SETUP_LUT(720x480p60_16_9);	/* 27.03MHz */
+	HDMI_SETUP_LUT(1280x720p60_16_9);	/* 74.25MHz */
+
+	HDMI_SETUP_LUT(720x576p50_16_9);	/* 27.00MHz */
+	HDMI_SETUP_LUT(1280x720p50_16_9);	/* 74.25MHz */
+
+	hdmi_common_init_panel_info(&hdmi_panel_data.panel_info);
+
+	rc = i2c_add_driver(&hdmi_i2c_driver);
+	if (rc) {
+		pr_err("hdmi_init FAILED: i2c_add_driver rc=%d\n", rc);
+		goto init_exit;
+	}
+
+	if (machine_is_msm7x30_surf() || machine_is_msm8x55_surf()) {
+		short *hdtv_mux = (short *)ioremap(0x8e000170 , 0x100);
+		*hdtv_mux++ = 0x020b;
+		*hdtv_mux = 0x8000;
+		iounmap(hdtv_mux);
+	}
+	wake_lock_init(&wlock, WAKE_LOCK_IDLE, "hdmi_active");
+
+	return 0;
+
+init_exit:
+	return rc;
+}
+
+static void __exit adv7520_exit(void)
+{
+	i2c_del_driver(&hdmi_i2c_driver);
+}
+
+module_init(adv7520_init);
+module_exit(adv7520_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("ADV7520 HDMI driver");
diff --git a/drivers/video/msm/ebi2_l2f.c b/drivers/video/msm/ebi2_l2f.c
new file mode 100644
index 0000000..767b802
--- /dev/null
+++ b/drivers/video/msm/ebi2_l2f.c
@@ -0,0 +1,566 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include "linux/proc_fs.h"
+
+#include <linux/delay.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+/* The following are for MSM5100 on Gator
+*/
+#ifdef FEATURE_PM1000
+#include "pm1000.h"
+#endif /* FEATURE_PM1000 */
+/* The following are for MSM6050 on Bambi
+*/
+#ifdef FEATURE_PMIC_LCDKBD_LED_DRIVER
+#include "pm.h"
+#endif /* FEATURE_PMIC_LCDKBD_LED_DRIVER */
+
+#ifdef DISP_DEVICE_18BPP
+#undef DISP_DEVICE_18BPP
+#define DISP_DEVICE_16BPP
+#endif
+
+#define QCIF_WIDTH        176
+#define QCIF_HEIGHT       220
+
+static void *DISP_CMD_PORT;
+static void *DISP_DATA_PORT;
+
+#define DISP_CMD_DISON    0xaf
+#define DISP_CMD_DISOFF   0xae
+#define DISP_CMD_DISNOR   0xa6
+#define DISP_CMD_DISINV   0xa7
+#define DISP_CMD_DISCTL   0xca
+#define DISP_CMD_GCP64    0xcb
+#define DISP_CMD_GCP16    0xcc
+#define DISP_CMD_GSSET    0xcd
+#define DISP_GS_2       0x02
+#define DISP_GS_16      0x01
+#define DISP_GS_64      0x00
+#define DISP_CMD_SLPIN    0x95
+#define DISP_CMD_SLPOUT   0x94
+#define DISP_CMD_SD_PSET  0x75
+#define DISP_CMD_MD_PSET  0x76
+#define DISP_CMD_SD_CSET  0x15
+#define DISP_CMD_MD_CSET  0x16
+#define DISP_CMD_DATCTL   0xbc
+#define DISP_DATCTL_666 0x08
+#define DISP_DATCTL_565 0x28
+#define DISP_DATCTL_444 0x38
+#define DISP_CMD_RAMWR    0x5c
+#define DISP_CMD_RAMRD    0x5d
+#define DISP_CMD_PTLIN    0xa8
+#define DISP_CMD_PTLOUT   0xa9
+#define DISP_CMD_ASCSET   0xaa
+#define DISP_CMD_SCSTART  0xab
+#define DISP_CMD_VOLCTL   0xc6
+#define DISP_VOLCTL_TONE 0x80
+#define DISP_CMD_NOp      0x25
+#define DISP_CMD_OSSEL    0xd0
+#define DISP_CMD_3500KSET 0xd1
+#define DISP_CMD_3500KEND 0xd2
+#define DISP_CMD_14MSET   0xd3
+#define DISP_CMD_14MEND   0xd4
+
+#define DISP_CMD_OUT(cmd) outpw(DISP_CMD_PORT, cmd);
+
+#define DISP_DATA_OUT(data) outpw(DISP_DATA_PORT, data);
+
+#define DISP_DATA_IN() inpw(DISP_DATA_PORT);
+
+/* Epson device column number starts at 2
+*/
+#define DISP_SET_RECT(ulhc_row, lrhc_row, ulhc_col, lrhc_col) \
+	  DISP_CMD_OUT(DISP_CMD_SD_PSET) \
+	  DISP_DATA_OUT((ulhc_row) & 0xFF) \
+	  DISP_DATA_OUT((ulhc_row) >> 8) \
+	  DISP_DATA_OUT((lrhc_row) & 0xFF) \
+	  DISP_DATA_OUT((lrhc_row) >> 8) \
+	  DISP_CMD_OUT(DISP_CMD_SD_CSET) \
+	  DISP_DATA_OUT(((ulhc_col)+2) & 0xFF) \
+	  DISP_DATA_OUT(((ulhc_col)+2) >> 8) \
+	  DISP_DATA_OUT(((lrhc_col)+2) & 0xFF) \
+	  DISP_DATA_OUT(((lrhc_col)+2) >> 8)
+
+#define DISP_MIN_CONTRAST      0
+#define DISP_MAX_CONTRAST      127
+#define DISP_DEFAULT_CONTRAST  80
+
+#define DISP_MIN_BACKLIGHT     0
+#define DISP_MAX_BACKLIGHT     15
+#define DISP_DEFAULT_BACKLIGHT 2
+
+#define WAIT_SEC(sec) mdelay((sec)/1000)
+
+static word disp_area_start_row;
+static word disp_area_end_row;
+static byte disp_contrast = DISP_DEFAULT_CONTRAST;
+static boolean disp_powered_up;
+static boolean disp_initialized = FALSE;
+/* For some reason the contrast set at init time is not good. Need to do
+ * it again
+ */
+static boolean display_on = FALSE;
+static void epsonQcif_disp_init(struct platform_device *pdev);
+static void epsonQcif_disp_set_contrast(word contrast);
+static void epsonQcif_disp_set_display_area(word start_row, word end_row);
+static int epsonQcif_disp_off(struct platform_device *pdev);
+static int epsonQcif_disp_on(struct platform_device *pdev);
+static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres);
+
+volatile word databack;
+static void epsonQcif_disp_init(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	int i;
+
+	if (disp_initialized)
+		return;
+
+	mfd = platform_get_drvdata(pdev);
+
+	DISP_CMD_PORT = mfd->cmd_port;
+	DISP_DATA_PORT = mfd->data_port;
+
+	/* Sleep in */
+	DISP_CMD_OUT(DISP_CMD_SLPIN);
+
+	/* Display off */
+	DISP_CMD_OUT(DISP_CMD_DISOFF);
+
+	/* Display normal */
+	DISP_CMD_OUT(DISP_CMD_DISNOR);
+
+	/* Set data mode */
+	DISP_CMD_OUT(DISP_CMD_DATCTL);
+	DISP_DATA_OUT(DISP_DATCTL_565);
+
+	/* Set display timing */
+	DISP_CMD_OUT(DISP_CMD_DISCTL);
+	DISP_DATA_OUT(0x1c);	/* p1 */
+	DISP_DATA_OUT(0x02);	/* p1 */
+	DISP_DATA_OUT(0x82);	/* p2 */
+	DISP_DATA_OUT(0x00);	/* p3 */
+	DISP_DATA_OUT(0x00);	/* p4 */
+	DISP_DATA_OUT(0xe0);	/* p5 */
+	DISP_DATA_OUT(0x00);	/* p5 */
+	DISP_DATA_OUT(0xdc);	/* p6 */
+	DISP_DATA_OUT(0x00);	/* p6 */
+	DISP_DATA_OUT(0x02);	/* p7 */
+	DISP_DATA_OUT(0x00);	/* p8 */
+
+	/* Set 64 gray scale level */
+	DISP_CMD_OUT(DISP_CMD_GCP64);
+	DISP_DATA_OUT(0x08);	/* p01 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x2a);	/* p02 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x4e);	/* p03 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x6b);	/* p04 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x88);	/* p05 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0xa3);	/* p06 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0xba);	/* p07 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0xd1);	/* p08 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0xe5);	/* p09 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0xf3);	/* p10 */
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x03);	/* p11 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x13);	/* p12 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x22);	/* p13 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x2f);	/* p14 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x3b);	/* p15 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x46);	/* p16 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x51);	/* p17 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x5b);	/* p18 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x64);	/* p19 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x6c);	/* p20 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x74);	/* p21 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x7c);	/* p22 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x83);	/* p23 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x8a);	/* p24 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x91);	/* p25 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x98);	/* p26 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x9f);	/* p27 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xa6);	/* p28 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xac);	/* p29 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xb2);	/* p30 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xb7);	/* p31 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xbc);	/* p32 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xc1);	/* p33 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xc6);	/* p34 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xcb);	/* p35 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xd0);	/* p36 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xd4);	/* p37 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xd8);	/* p38 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xdc);	/* p39 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xe0);	/* p40 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xe4);	/* p41 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xe8);	/* p42 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xec);	/* p43 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xf0);	/* p44 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xf4);	/* p45 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xf8);	/* p46 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xfb);	/* p47 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xfe);	/* p48 */
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0x01);	/* p49 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x03);	/* p50 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x05);	/* p51 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x07);	/* p52 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x09);	/* p53 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x0b);	/* p54 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x0d);	/* p55 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x0f);	/* p56 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x11);	/* p57 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x13);	/* p58 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x15);	/* p59 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x17);	/* p60 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x19);	/* p61 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x1b);	/* p62 */
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x1c);	/* p63 */
+	DISP_DATA_OUT(0x02);
+
+	/* Set 16 gray scale level */
+	DISP_CMD_OUT(DISP_CMD_GCP16);
+	DISP_DATA_OUT(0x1a);	/* p01 */
+	DISP_DATA_OUT(0x32);	/* p02 */
+	DISP_DATA_OUT(0x42);	/* p03 */
+	DISP_DATA_OUT(0x4c);	/* p04 */
+	DISP_DATA_OUT(0x58);	/* p05 */
+	DISP_DATA_OUT(0x5f);	/* p06 */
+	DISP_DATA_OUT(0x66);	/* p07 */
+	DISP_DATA_OUT(0x6b);	/* p08 */
+	DISP_DATA_OUT(0x70);	/* p09 */
+	DISP_DATA_OUT(0x74);	/* p10 */
+	DISP_DATA_OUT(0x78);	/* p11 */
+	DISP_DATA_OUT(0x7b);	/* p12 */
+	DISP_DATA_OUT(0x7e);	/* p13 */
+	DISP_DATA_OUT(0x80);	/* p14 */
+	DISP_DATA_OUT(0x82);	/* p15 */
+
+	/* Set DSP column */
+	DISP_CMD_OUT(DISP_CMD_MD_CSET);
+	DISP_DATA_OUT(0xff);
+	DISP_DATA_OUT(0x03);
+	DISP_DATA_OUT(0xff);
+	DISP_DATA_OUT(0x03);
+
+	/* Set DSP page */
+	DISP_CMD_OUT(DISP_CMD_MD_PSET);
+	DISP_DATA_OUT(0xff);
+	DISP_DATA_OUT(0x01);
+	DISP_DATA_OUT(0xff);
+	DISP_DATA_OUT(0x01);
+
+	/* Set ARM column */
+	DISP_CMD_OUT(DISP_CMD_SD_CSET);
+	DISP_DATA_OUT(0x02);
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT((QCIF_WIDTH + 1) & 0xFF);
+	DISP_DATA_OUT((QCIF_WIDTH + 1) >> 8);
+
+	/* Set ARM page */
+	DISP_CMD_OUT(DISP_CMD_SD_PSET);
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT(0x00);
+	DISP_DATA_OUT((QCIF_HEIGHT - 1) & 0xFF);
+	DISP_DATA_OUT((QCIF_HEIGHT - 1) >> 8);
+
+	/* Set 64 gray scales */
+	DISP_CMD_OUT(DISP_CMD_GSSET);
+	DISP_DATA_OUT(DISP_GS_64);
+
+	DISP_CMD_OUT(DISP_CMD_OSSEL);
+	DISP_DATA_OUT(0);
+
+	/* Sleep out */
+	DISP_CMD_OUT(DISP_CMD_SLPOUT);
+
+	WAIT_SEC(40000);
+
+	/* Initialize power IC */
+	DISP_CMD_OUT(DISP_CMD_VOLCTL);
+	DISP_DATA_OUT(DISP_VOLCTL_TONE);
+
+	WAIT_SEC(40000);
+
+	/* Set electronic volume, d'xx */
+	DISP_CMD_OUT(DISP_CMD_VOLCTL);
+	DISP_DATA_OUT(DISP_DEFAULT_CONTRAST);	/* value from 0 to 127 */
+
+	/* Initialize display data */
+	DISP_SET_RECT(0, (QCIF_HEIGHT - 1), 0, (QCIF_WIDTH - 1));
+	DISP_CMD_OUT(DISP_CMD_RAMWR);
+	for (i = 0; i < QCIF_HEIGHT * QCIF_WIDTH; i++)
+		DISP_DATA_OUT(0xffff);
+
+	DISP_CMD_OUT(DISP_CMD_RAMRD);
+	databack = DISP_DATA_IN();
+	databack = DISP_DATA_IN();
+	databack = DISP_DATA_IN();
+	databack = DISP_DATA_IN();
+
+	WAIT_SEC(80000);
+
+	DISP_CMD_OUT(DISP_CMD_DISON);
+
+	disp_area_start_row = 0;
+	disp_area_end_row = QCIF_HEIGHT - 1;
+	disp_powered_up = TRUE;
+	disp_initialized = TRUE;
+	epsonQcif_disp_set_display_area(0, QCIF_HEIGHT - 1);
+	display_on = TRUE;
+}
+
+static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres)
+{
+	if (!disp_initialized)
+		return;
+
+	DISP_SET_RECT(y, y + yres - 1, x, x + xres - 1);
+	DISP_CMD_OUT(DISP_CMD_RAMWR);
+}
+
+static void epsonQcif_disp_set_display_area(word start_row, word end_row)
+{
+	if (!disp_initialized)
+		return;
+
+	if ((start_row == disp_area_start_row)
+	    && (end_row == disp_area_end_row))
+		return;
+	disp_area_start_row = start_row;
+	disp_area_end_row = end_row;
+
+	/* Range checking
+	 */
+	if (end_row >= QCIF_HEIGHT)
+		end_row = QCIF_HEIGHT - 1;
+	if (start_row > end_row)
+		start_row = end_row;
+
+	/* When display is not the full screen, gray scale is set to
+	 ** 2; otherwise it is set to 64.
+	 */
+	if ((start_row == 0) && (end_row == (QCIF_HEIGHT - 1))) {
+		/* The whole screen */
+		DISP_CMD_OUT(DISP_CMD_PTLOUT);
+		WAIT_SEC(10000);
+		DISP_CMD_OUT(DISP_CMD_DISOFF);
+		WAIT_SEC(100000);
+		DISP_CMD_OUT(DISP_CMD_GSSET);
+		DISP_DATA_OUT(DISP_GS_64);
+		WAIT_SEC(100000);
+		DISP_CMD_OUT(DISP_CMD_DISON);
+	} else {
+		/* partial screen */
+		DISP_CMD_OUT(DISP_CMD_PTLIN);
+		DISP_DATA_OUT(start_row);
+		DISP_DATA_OUT(start_row >> 8);
+		DISP_DATA_OUT(end_row);
+		DISP_DATA_OUT(end_row >> 8);
+		DISP_CMD_OUT(DISP_CMD_GSSET);
+		DISP_DATA_OUT(DISP_GS_2);
+	}
+}
+
+static int epsonQcif_disp_off(struct platform_device *pdev)
+{
+	if (!disp_initialized)
+		epsonQcif_disp_init(pdev);
+
+	if (display_on) {
+		DISP_CMD_OUT(DISP_CMD_DISOFF);
+		DISP_CMD_OUT(DISP_CMD_SLPIN);
+		display_on = FALSE;
+	}
+
+	return 0;
+}
+
+static int epsonQcif_disp_on(struct platform_device *pdev)
+{
+	if (!disp_initialized)
+		epsonQcif_disp_init(pdev);
+
+	if (!display_on) {
+		DISP_CMD_OUT(DISP_CMD_SLPOUT);
+		WAIT_SEC(40000);
+		DISP_CMD_OUT(DISP_CMD_DISON);
+		epsonQcif_disp_set_contrast(disp_contrast);
+		display_on = TRUE;
+	}
+
+	return 0;
+}
+
+static void epsonQcif_disp_set_contrast(word contrast)
+{
+	if (!disp_initialized)
+		return;
+
+	/* Initialize power IC, d'24 */
+	DISP_CMD_OUT(DISP_CMD_VOLCTL);
+	DISP_DATA_OUT(DISP_VOLCTL_TONE);
+
+	WAIT_SEC(40000);
+
+	/* Set electronic volume, d'xx */
+	DISP_CMD_OUT(DISP_CMD_VOLCTL);
+	if (contrast > 127)
+		contrast = 127;
+	DISP_DATA_OUT(contrast);	/* value from 0 to 127 */
+	disp_contrast = (byte) contrast;
+}				/* End disp_set_contrast */
+
+static void epsonQcif_disp_clear_screen_area(
+	word start_row, word end_row, word start_column, word end_column) {
+	int32 i;
+
+	/* Clear the display screen */
+	DISP_SET_RECT(start_row, end_row, start_column, end_column);
+	DISP_CMD_OUT(DISP_CMD_RAMWR);
+	i = (end_row - start_row + 1) * (end_column - start_column + 1);
+	for (; i > 0; i--)
+		DISP_DATA_OUT(0xffff);
+}
+
+static int __init epsonQcif_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = epsonQcif_probe,
+	.driver = {
+		.name   = "ebi2_epson_qcif",
+	},
+};
+
+static struct msm_fb_panel_data epsonQcif_panel_data = {
+	.on = epsonQcif_disp_on,
+	.off = epsonQcif_disp_off,
+	.set_rect = epsonQcif_disp_set_rect,
+};
+
+static struct platform_device this_device = {
+	.name   = "ebi2_epson_qcif",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &epsonQcif_panel_data,
+	}
+};
+
+static int __init epsonQcif_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+	ret = platform_driver_register(&this_driver);
+	if (!ret) {
+		pinfo = &epsonQcif_panel_data.panel_info;
+		pinfo->xres = QCIF_WIDTH;
+		pinfo->yres = QCIF_HEIGHT;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = EBI2_PANEL;
+		pinfo->pdest = DISPLAY_2;
+		pinfo->wait_cycle = 0x808000;
+		pinfo->bpp = 16;
+		pinfo->fb_num = 2;
+		pinfo->lcd.vsync_enable = FALSE;
+
+		ret = platform_device_register(&this_device);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+	}
+
+	return ret;
+}
+
+module_init(epsonQcif_init);
diff --git a/drivers/video/msm/ebi2_lcd.c b/drivers/video/msm/ebi2_lcd.c
new file mode 100644
index 0000000..68590af
--- /dev/null
+++ b/drivers/video/msm/ebi2_lcd.c
@@ -0,0 +1,268 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+
+#include "msm_fb.h"
+
+static int ebi2_lcd_probe(struct platform_device *pdev);
+static int ebi2_lcd_remove(struct platform_device *pdev);
+
+static int ebi2_lcd_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int ebi2_lcd_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static struct dev_pm_ops ebi2_lcd_dev_pm_ops = {
+	.runtime_suspend = ebi2_lcd_runtime_suspend,
+	.runtime_resume = ebi2_lcd_runtime_resume,
+};
+
+static struct platform_driver ebi2_lcd_driver = {
+	.probe = ebi2_lcd_probe,
+	.remove = ebi2_lcd_remove,
+	.suspend = NULL,
+	.suspend_late = NULL,
+	.resume_early = NULL,
+	.resume = NULL,
+	.shutdown = NULL,
+	.driver = {
+		   .name = "ebi2_lcd",
+		   .pm = &ebi2_lcd_dev_pm_ops,
+		   },
+};
+
+static void *ebi2_base;
+static void *ebi2_lcd_cfg0;
+static void *ebi2_lcd_cfg1;
+static void __iomem *lcd01_base;
+static void __iomem *lcd02_base;
+static int ebi2_lcd_resource_initialized;
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static int ebi2_lcd_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc, i;
+
+	if (pdev->id == 0) {
+		for (i = 0; i < pdev->num_resources; i++) {
+			if (!strncmp(pdev->resource[i].name, "base", 4)) {
+				ebi2_base = ioremap(pdev->resource[i].start,
+						pdev->resource[i].end -
+						pdev->resource[i].start + 1);
+				if (!ebi2_base) {
+					printk(KERN_ERR
+						"ebi2_base ioremap failed!\n");
+					return -ENOMEM;
+				}
+				ebi2_lcd_cfg0 = (void *)(ebi2_base + 0x20);
+				ebi2_lcd_cfg1 = (void *)(ebi2_base + 0x24);
+			} else if (!strncmp(pdev->resource[i].name,
+						"lcd01", 5)) {
+				lcd01_base = ioremap(pdev->resource[i].start,
+						pdev->resource[i].end -
+						pdev->resource[i].start + 1);
+				if (!lcd01_base) {
+					printk(KERN_ERR
+						"lcd01_base ioremap failed!\n");
+					return -ENOMEM;
+				}
+			} else if (!strncmp(pdev->resource[i].name,
+						"lcd02", 5)) {
+				lcd02_base = ioremap(pdev->resource[i].start,
+						pdev->resource[i].end -
+						pdev->resource[i].start + 1);
+				if (!lcd02_base) {
+					printk(KERN_ERR
+						"lcd02_base ioremap failed!\n");
+					return -ENOMEM;
+				}
+			}
+		}
+		ebi2_lcd_resource_initialized = 1;
+		return 0;
+	}
+
+	if (!ebi2_lcd_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	if (ebi2_base == NULL)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/* link to the latest pdev */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_LCD;
+
+	/* add panel data */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		printk(KERN_ERR "ebi2_lcd_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+
+	/* data chain */
+	pdata = mdp_dev->dev.platform_data;
+	pdata->on = panel_next_on;
+	pdata->off = panel_next_off;
+	pdata->next = pdev;
+
+	/* get/set panel specific fb info */
+	mfd->panel_info = pdata->panel_info;
+
+	if (mfd->panel_info.bpp == 24)
+		mfd->fb_imgType = MDP_RGB_888;
+	else
+		mfd->fb_imgType = MDP_RGB_565;
+
+	/* config msm ebi2 lcd register */
+	if (mfd->panel_info.pdest == DISPLAY_1) {
+		outp32(ebi2_base,
+		       (inp32(ebi2_base) & (~(EBI2_PRIM_LCD_CLR))) |
+		       EBI2_PRIM_LCD_SEL);
+		/*
+		 * current design has one set of cfg0/1 register to control
+		 * both EBI2 channels. so, we're using the PRIM channel to
+		 * configure both.
+		 */
+		outp32(ebi2_lcd_cfg0, mfd->panel_info.wait_cycle);
+		if (mfd->panel_info.bpp == 18)
+			outp32(ebi2_lcd_cfg1, 0x01000000);
+		else
+			outp32(ebi2_lcd_cfg1, 0x0);
+	} else {
+#ifdef DEBUG_EBI2_LCD
+		/*
+		 * confliting with QCOM SURF FPGA CS.
+		 * OEM should enable below for their CS mapping
+		 */
+		 outp32(ebi2_base, (inp32(ebi2_base)&(~(EBI2_SECD_LCD_CLR)))
+					|EBI2_SECD_LCD_SEL);
+#endif
+	}
+
+	/*
+	 * map cs (chip select) address
+	 */
+	if (mfd->panel_info.pdest == DISPLAY_1) {
+		mfd->cmd_port = lcd01_base;
+		mfd->data_port =
+		    (void *)((uint32) mfd->cmd_port + EBI2_PRIM_LCD_RS_PIN);
+		mfd->data_port_phys =
+		    (void *)(LCD_PRIM_BASE_PHYS + EBI2_PRIM_LCD_RS_PIN);
+	} else {
+		mfd->cmd_port = lcd01_base;
+		mfd->data_port =
+		    (void *)((uint32) mfd->cmd_port + EBI2_SECD_LCD_RS_PIN);
+		mfd->data_port_phys =
+		    (void *)(LCD_SECD_BASE_PHYS + EBI2_SECD_LCD_RS_PIN);
+	}
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc) {
+		goto ebi2_lcd_probe_err;
+	}
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+
+	pdev_list[pdev_list_cnt++] = pdev;
+	return 0;
+
+      ebi2_lcd_probe_err:
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int ebi2_lcd_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return 0;
+
+	if (mfd->key != MFD_KEY)
+		return 0;
+
+	iounmap(mfd->cmd_port);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static int ebi2_lcd_register_driver(void)
+{
+	return platform_driver_register(&ebi2_lcd_driver);
+}
+
+static int __init ebi2_lcd_driver_init(void)
+{
+	return ebi2_lcd_register_driver();
+}
+
+module_init(ebi2_lcd_driver_init);
diff --git a/drivers/video/msm/ebi2_tmd20.c b/drivers/video/msm/ebi2_tmd20.c
new file mode 100644
index 0000000..280373f
--- /dev/null
+++ b/drivers/video/msm/ebi2_tmd20.c
@@ -0,0 +1,1120 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include "linux/proc_fs.h"
+
+#include <linux/delay.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+/* #define TMD20QVGA_LCD_18BPP */
+#define QVGA_WIDTH        240
+#define QVGA_HEIGHT       320
+
+#ifdef TMD20QVGA_LCD_18BPP
+#define DISP_QVGA_18BPP(x)  ((((x)<<2) & 0x3FC00)|(( (x)<<1)& 0x1FE))
+#define DISP_REG(name)  uint32 register_##name;
+#define OUTPORT(x, y)  outpdw(x, y)
+#define INPORT(x)   inpdw(x)
+#else
+#define DISP_QVGA_18BPP(x)  (x)
+#define DISP_REG(name)  uint16 register_##name;
+#define OUTPORT(x, y)  outpw(x, y)
+#define INPORT(x)   intpw(x)
+#endif
+
+static void *DISP_CMD_PORT;
+static void *DISP_DATA_PORT;
+
+#define DISP_RNTI         0x10
+
+#define DISP_CMD_OUT(cmd) OUTPORT(DISP_CMD_PORT, DISP_QVGA_18BPP(cmd))
+#define DISP_DATA_OUT(data) OUTPORT(DISP_DATA_PORT, data)
+#define DISP_DATA_IN() INPORT(DISP_DATA_PORT)
+
+#if (defined(TMD20QVGA_LCD_18BPP))
+#define DISP_DATA_OUT_16TO18BPP(x) \
+	DISP_DATA_OUT((((x)&0xf800)<<2|((x)&0x80000)>>3) \
+		     | (((x)&0x7e0)<<1) \
+		     | (((x)&0x1F)<<1|((x)&0x10)>>4))
+#else
+#define DISP_DATA_OUT_16TO18BPP(x) \
+	DISP_DATA_OUT(x)
+#endif
+
+#define DISP_WRITE_OUT(addr, data) \
+   register_##addr = DISP_QVGA_18BPP(data); \
+   DISP_CMD_OUT(addr); \
+   DISP_DATA_OUT(register_##addr);
+
+#define DISP_UPDATE_VALUE(addr, bitmask, data) \
+   DISP_WRITE_OUT(##addr, (register_##addr & ~(bitmask)) | (data));
+
+#define DISP_VAL_IF(bitvalue, bitmask) \
+   ((bitvalue) ? (bitmask) : 0)
+
+/* QVGA = 256 x 320 */
+/* actual display is 240 x 320...offset by 0x10 */
+#define DISP_ROW_COL_TO_ADDR(row, col) ((row) * 0x100 + col)
+#define DISP_SET_RECT(ulhc_row, lrhc_row, ulhc_col, lrhc_col) \
+   { \
+   DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_1_ADDR, (ulhc_col) + tmd20qvga_panel_offset); \
+   DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_2_ADDR, (lrhc_col) + tmd20qvga_panel_offset); \
+   DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_1_ADDR, (ulhc_row)); \
+   DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_2_ADDR, (lrhc_row)); \
+   DISP_WRITE_OUT(DISP_RAM_ADDR_SET_1_ADDR, (ulhc_col) + tmd20qvga_panel_offset); \
+   DISP_WRITE_OUT(DISP_RAM_ADDR_SET_2_ADDR, (ulhc_row)); \
+   }
+
+#define WAIT_MSEC(msec) mdelay(msec)
+
+/*
+ * TMD QVGA Address
+ */
+/* Display Control */
+#define DISP_START_OSCILLATION_ADDR     0x000
+DISP_REG(DISP_START_OSCILLATION_ADDR)
+#define DISP_DRIVER_OUTPUT_CTL_ADDR     0x001
+    DISP_REG(DISP_DRIVER_OUTPUT_CTL_ADDR)
+#define DISP_LCD_DRIVING_SIG_ADDR     0x002
+    DISP_REG(DISP_LCD_DRIVING_SIG_ADDR)
+#define DISP_ENTRY_MODE_ADDR            0x003
+    DISP_REG(DISP_ENTRY_MODE_ADDR)
+#define DISP_DISPLAY_CTL_1_ADDR         0x007
+    DISP_REG(DISP_DISPLAY_CTL_1_ADDR)
+#define DISP_DISPLAY_CTL_2_ADDR         0x008
+    DISP_REG(DISP_DISPLAY_CTL_2_ADDR)
+
+/* DISPLAY MODE 0x009 partial display not supported */
+#define DISP_POWER_SUPPLY_INTF_ADDR     0x00A
+    DISP_REG(DISP_POWER_SUPPLY_INTF_ADDR)
+
+/* DISPLAY MODE 0x00B xZoom feature is not supported */
+#define DISP_EXT_DISPLAY_CTL_1_ADDR     0x00C
+    DISP_REG(DISP_EXT_DISPLAY_CTL_1_ADDR)
+
+#define DISP_FRAME_CYCLE_CTL_ADDR       0x00D
+    DISP_REG(DISP_FRAME_CYCLE_CTL_ADDR)
+
+#define DISP_EXT_DISPLAY_CTL_2_ADDR     0x00E
+    DISP_REG(DISP_EXT_DISPLAY_CTL_2_ADDR)
+
+#define DISP_EXT_DISPLAY_CTL_3_ADDR     0x00F
+    DISP_REG(DISP_EXT_DISPLAY_CTL_3_ADDR)
+
+#define DISP_LTPS_CTL_1_ADDR            0x012
+    DISP_REG(DISP_LTPS_CTL_1_ADDR)
+#define DISP_LTPS_CTL_2_ADDR            0x013
+    DISP_REG(DISP_LTPS_CTL_2_ADDR)
+#define DISP_LTPS_CTL_3_ADDR            0x014
+    DISP_REG(DISP_LTPS_CTL_3_ADDR)
+#define DISP_LTPS_CTL_4_ADDR            0x018
+    DISP_REG(DISP_LTPS_CTL_4_ADDR)
+#define DISP_LTPS_CTL_5_ADDR            0x019
+    DISP_REG(DISP_LTPS_CTL_5_ADDR)
+#define DISP_LTPS_CTL_6_ADDR            0x01A
+    DISP_REG(DISP_LTPS_CTL_6_ADDR)
+#define DISP_AMP_SETTING_ADDR           0x01C
+    DISP_REG(DISP_AMP_SETTING_ADDR)
+#define DISP_MODE_SETTING_ADDR          0x01D
+    DISP_REG(DISP_MODE_SETTING_ADDR)
+#define DISP_POFF_LN_SETTING_ADDR       0x01E
+    DISP_REG(DISP_POFF_LN_SETTING_ADDR)
+/* Power Contol */
+#define DISP_POWER_CTL_1_ADDR           0x100
+    DISP_REG(DISP_POWER_CTL_1_ADDR)
+#define DISP_POWER_CTL_2_ADDR           0x101
+    DISP_REG(DISP_POWER_CTL_2_ADDR)
+#define DISP_POWER_CTL_3_ADDR           0x102
+    DISP_REG(DISP_POWER_CTL_3_ADDR)
+#define DISP_POWER_CTL_4_ADDR           0x103
+    DISP_REG(DISP_POWER_CTL_4_ADDR)
+#define DISP_POWER_CTL_5_ADDR           0x104
+    DISP_REG(DISP_POWER_CTL_5_ADDR)
+#define DISP_POWER_CTL_6_ADDR           0x105
+    DISP_REG(DISP_POWER_CTL_6_ADDR)
+#define DISP_POWER_CTL_7_ADDR           0x106
+    DISP_REG(DISP_POWER_CTL_7_ADDR)
+/* RAM Access */
+#define DISP_RAM_ADDR_SET_1_ADDR        0x200
+    DISP_REG(DISP_RAM_ADDR_SET_1_ADDR)
+#define DISP_RAM_ADDR_SET_2_ADDR        0x201
+    DISP_REG(DISP_RAM_ADDR_SET_2_ADDR)
+#define DISP_CMD_RAMRD                  DISP_CMD_RAMWR
+#define DISP_CMD_RAMWR                  0x202
+    DISP_REG(DISP_CMD_RAMWR)
+#define DISP_RAM_DATA_MASK_1_ADDR       0x203
+    DISP_REG(DISP_RAM_DATA_MASK_1_ADDR)
+#define DISP_RAM_DATA_MASK_2_ADDR       0x204
+    DISP_REG(DISP_RAM_DATA_MASK_2_ADDR)
+/* Gamma Control, Contrast, Gray Scale Setting */
+#define DISP_GAMMA_CONTROL_1_ADDR       0x300
+    DISP_REG(DISP_GAMMA_CONTROL_1_ADDR)
+#define DISP_GAMMA_CONTROL_2_ADDR       0x301
+    DISP_REG(DISP_GAMMA_CONTROL_2_ADDR)
+#define DISP_GAMMA_CONTROL_3_ADDR       0x302
+    DISP_REG(DISP_GAMMA_CONTROL_3_ADDR)
+#define DISP_GAMMA_CONTROL_4_ADDR       0x303
+    DISP_REG(DISP_GAMMA_CONTROL_4_ADDR)
+#define DISP_GAMMA_CONTROL_5_ADDR       0x304
+    DISP_REG(DISP_GAMMA_CONTROL_5_ADDR)
+/* Coordinate Control */
+#define DISP_VERT_SCROLL_CTL_1_ADDR     0x400
+    DISP_REG(DISP_VERT_SCROLL_CTL_1_ADDR)
+#define DISP_VERT_SCROLL_CTL_2_ADDR     0x401
+    DISP_REG(DISP_VERT_SCROLL_CTL_2_ADDR)
+#define DISP_SCREEN_1_DRV_POS_1_ADDR    0x402
+    DISP_REG(DISP_SCREEN_1_DRV_POS_1_ADDR)
+#define DISP_SCREEN_1_DRV_POS_2_ADDR    0x403
+    DISP_REG(DISP_SCREEN_1_DRV_POS_2_ADDR)
+#define DISP_SCREEN_2_DRV_POS_1_ADDR    0x404
+    DISP_REG(DISP_SCREEN_2_DRV_POS_1_ADDR)
+#define DISP_SCREEN_2_DRV_POS_2_ADDR    0x405
+    DISP_REG(DISP_SCREEN_2_DRV_POS_2_ADDR)
+#define DISP_HORZ_RAM_ADDR_POS_1_ADDR   0x406
+    DISP_REG(DISP_HORZ_RAM_ADDR_POS_1_ADDR)
+#define DISP_HORZ_RAM_ADDR_POS_2_ADDR   0x407
+    DISP_REG(DISP_HORZ_RAM_ADDR_POS_2_ADDR)
+#define DISP_VERT_RAM_ADDR_POS_1_ADDR   0x408
+    DISP_REG(DISP_VERT_RAM_ADDR_POS_1_ADDR)
+#define DISP_VERT_RAM_ADDR_POS_2_ADDR   0x409
+    DISP_REG(DISP_VERT_RAM_ADDR_POS_2_ADDR)
+#define DISP_TMD_700_ADDR               0x700	/*  0x700 */
+    DISP_REG(DISP_TMD_700_ADDR)
+#define DISP_TMD_015_ADDR               0x015	/*  0x700 */
+    DISP_REG(DISP_TMD_015_ADDR)
+#define DISP_TMD_305_ADDR               0x305	/*  0x700 */
+    DISP_REG(DISP_TMD_305_ADDR)
+
+/*
+ * TMD QVGA Bit Definations
+ */
+
+#define DISP_BIT_IB15              0x8000
+#define DISP_BIT_IB14              0x4000
+#define DISP_BIT_IB13              0x2000
+#define DISP_BIT_IB12              0x1000
+#define DISP_BIT_IB11              0x0800
+#define DISP_BIT_IB10              0x0400
+#define DISP_BIT_IB09              0x0200
+#define DISP_BIT_IB08              0x0100
+#define DISP_BIT_IB07              0x0080
+#define DISP_BIT_IB06              0x0040
+#define DISP_BIT_IB05              0x0020
+#define DISP_BIT_IB04              0x0010
+#define DISP_BIT_IB03              0x0008
+#define DISP_BIT_IB02              0x0004
+#define DISP_BIT_IB01              0x0002
+#define DISP_BIT_IB00              0x0001
+/*
+ * Display Control
+ * DISP_START_OSCILLATION_ADDR     Start Oscillation
+ * DISP_DRIVER_OUTPUT_CTL_ADDR     Driver Output Control
+ */
+#define DISP_BITMASK_SS            DISP_BIT_IB08
+#define DISP_BITMASK_NL5           DISP_BIT_IB05
+#define DISP_BITMASK_NL4           DISP_BIT_IB04
+#define DISP_BITMASK_NL3           DISP_BIT_IB03
+#define DISP_BITMASK_NL2           DISP_BIT_IB02
+#define DISP_BITMASK_NL1           DISP_BIT_IB01
+#define DISP_BITMASK_NL0           DISP_BIT_IB00
+/* DISP_LCD_DRIVING_SIG_ADDR       LCD Driving Signal Setting */
+#define DISP_BITMASK_BC            DISP_BIT_IB09
+/* DISP_ENTRY_MODE_ADDR            Entry Mode */
+#define DISP_BITMASK_TRI           DISP_BIT_IB15
+#define DISP_BITMASK_DFM1          DISP_BIT_IB14
+#define DISP_BITMASK_DFM0          DISP_BIT_IB13
+#define DISP_BITMASK_BGR           DISP_BIT_IB12
+#define DISP_BITMASK_HWM0          DISP_BIT_IB08
+#define DISP_BITMASK_ID1           DISP_BIT_IB05
+#define DISP_BITMASK_ID0           DISP_BIT_IB04
+#define DISP_BITMASK_AM            DISP_BIT_IB03
+/* DISP_DISPLAY_CTL_1_ADDR         Display Control (1) */
+#define DISP_BITMASK_COL1          DISP_BIT_IB15
+#define DISP_BITMASK_COL0          DISP_BIT_IB14
+#define DISP_BITMASK_VLE2          DISP_BIT_IB10
+#define DISP_BITMASK_VLE1          DISP_BIT_IB09
+#define DISP_BITMASK_SPT           DISP_BIT_IB08
+#define DISP_BITMASK_PT1           DISP_BIT_IB07
+#define DISP_BITMASK_PT0           DISP_BIT_IB06
+#define DISP_BITMASK_REV           DISP_BIT_IB02
+/* DISP_DISPLAY_CTL_2_ADDR         Display Control (2) */
+#define DISP_BITMASK_FP3           DISP_BIT_IB11
+#define DISP_BITMASK_FP2           DISP_BIT_IB10
+#define DISP_BITMASK_FP1           DISP_BIT_IB09
+#define DISP_BITMASK_FP0           DISP_BIT_IB08
+#define DISP_BITMASK_BP3           DISP_BIT_IB03
+#define DISP_BITMASK_BP2           DISP_BIT_IB02
+#define DISP_BITMASK_BP1           DISP_BIT_IB01
+#define DISP_BITMASK_BP0           DISP_BIT_IB00
+/* DISP_POWER_SUPPLY_INTF_ADDR     Power Supply IC Interface Control */
+#define DISP_BITMASK_CSE           DISP_BIT_IB12
+#define DISP_BITMASK_TE            DISP_BIT_IB08
+#define DISP_BITMASK_IX3           DISP_BIT_IB03
+#define DISP_BITMASK_IX2           DISP_BIT_IB02
+#define DISP_BITMASK_IX1           DISP_BIT_IB01
+#define DISP_BITMASK_IX0           DISP_BIT_IB00
+/* DISP_EXT_DISPLAY_CTL_1_ADDR     External Display Interface Control (1) */
+#define DISP_BITMASK_RM            DISP_BIT_IB08
+#define DISP_BITMASK_DM1           DISP_BIT_IB05
+#define DISP_BITMASK_DM0           DISP_BIT_IB04
+#define DISP_BITMASK_RIM1          DISP_BIT_IB01
+#define DISP_BITMASK_RIM0          DISP_BIT_IB00
+/* DISP_FRAME_CYCLE_CTL_ADDR       Frame Frequency Adjustment Control */
+#define DISP_BITMASK_DIVI1         DISP_BIT_IB09
+#define DISP_BITMASK_DIVI0         DISP_BIT_IB08
+#define DISP_BITMASK_RTNI4         DISP_BIT_IB04
+#define DISP_BITMASK_RTNI3         DISP_BIT_IB03
+#define DISP_BITMASK_RTNI2         DISP_BIT_IB02
+#define DISP_BITMASK_RTNI1         DISP_BIT_IB01
+#define DISP_BITMASK_RTNI0         DISP_BIT_IB00
+/* DISP_EXT_DISPLAY_CTL_2_ADDR     External Display Interface Control (2) */
+#define DISP_BITMASK_DIVE1         DISP_BIT_IB09
+#define DISP_BITMASK_DIVE0         DISP_BIT_IB08
+#define DISP_BITMASK_RTNE7         DISP_BIT_IB07
+#define DISP_BITMASK_RTNE6         DISP_BIT_IB06
+#define DISP_BITMASK_RTNE5         DISP_BIT_IB05
+#define DISP_BITMASK_RTNE4         DISP_BIT_IB04
+#define DISP_BITMASK_RTNE3         DISP_BIT_IB03
+#define DISP_BITMASK_RTNE2         DISP_BIT_IB02
+#define DISP_BITMASK_RTNE1         DISP_BIT_IB01
+#define DISP_BITMASK_RTNE0         DISP_BIT_IB00
+/* DISP_EXT_DISPLAY_CTL_3_ADDR     External Display Interface Control (3) */
+#define DISP_BITMASK_VSPL          DISP_BIT_IB04
+#define DISP_BITMASK_HSPL          DISP_BIT_IB03
+#define DISP_BITMASK_VPL           DISP_BIT_IB02
+#define DISP_BITMASK_EPL           DISP_BIT_IB01
+#define DISP_BITMASK_DPL           DISP_BIT_IB00
+/* DISP_LTPS_CTL_1_ADDR            LTPS Interface Control (1) */
+#define DISP_BITMASK_CLWI3         DISP_BIT_IB11
+#define DISP_BITMASK_CLWI2         DISP_BIT_IB10
+#define DISP_BITMASK_CLWI1         DISP_BIT_IB09
+#define DISP_BITMASK_CLWI0         DISP_BIT_IB08
+#define DISP_BITMASK_CLTI1         DISP_BIT_IB01
+#define DISP_BITMASK_CLTI0         DISP_BIT_IB00
+/* DISP_LTPS_CTL_2_ADDR            LTPS Interface Control (2) */
+#define DISP_BITMASK_OEVBI1        DISP_BIT_IB09
+#define DISP_BITMASK_OEVBI0        DISP_BIT_IB08
+#define DISP_BITMASK_OEVFI1        DISP_BIT_IB01
+#define DISP_BITMASK_OEVFI0        DISP_BIT_IB00
+/* DISP_LTPS_CTL_3_ADDR            LTPS Interface Control (3) */
+#define DISP_BITMASK_SHI1          DISP_BIT_IB01
+#define DISP_BITMASK_SHI0          DISP_BIT_IB00
+/* DISP_LTPS_CTL_4_ADDR            LTPS Interface Control (4) */
+#define DISP_BITMASK_CLWE5         DISP_BIT_IB13
+#define DISP_BITMASK_CLWE4         DISP_BIT_IB12
+#define DISP_BITMASK_CLWE3         DISP_BIT_IB11
+#define DISP_BITMASK_CLWE2         DISP_BIT_IB10
+#define DISP_BITMASK_CLWE1         DISP_BIT_IB09
+#define DISP_BITMASK_CLWE0         DISP_BIT_IB08
+#define DISP_BITMASK_CLTE3         DISP_BIT_IB03
+#define DISP_BITMASK_CLTE2         DISP_BIT_IB02
+#define DISP_BITMASK_CLTE1         DISP_BIT_IB01
+#define DISP_BITMASK_CLTE0         DISP_BIT_IB00
+/* DISP_LTPS_CTL_5_ADDR            LTPS Interface Control (5) */
+#define DISP_BITMASK_OEVBE3        DISP_BIT_IB11
+#define DISP_BITMASK_OEVBE2        DISP_BIT_IB10
+#define DISP_BITMASK_OEVBE1        DISP_BIT_IB09
+#define DISP_BITMASK_OEVBE0        DISP_BIT_IB08
+#define DISP_BITMASK_OEVFE3        DISP_BIT_IB03
+#define DISP_BITMASK_OEVFE2        DISP_BIT_IB02
+#define DISP_BITMASK_OEVFE1        DISP_BIT_IB01
+#define DISP_BITMASK_OEVFE0        DISP_BIT_IB00
+/* DISP_LTPS_CTL_6_ADDR            LTPS Interface Control (6) */
+#define DISP_BITMASK_SHE3          DISP_BIT_IB03
+#define DISP_BITMASK_SHE2          DISP_BIT_IB02
+#define DISP_BITMASK_SHE1          DISP_BIT_IB01
+#define DISP_BITMASK_SHE0          DISP_BIT_IB00
+/* DISP_AMP_SETTING_ADDR           Amplify Setting */
+#define DISP_BITMASK_ABSW1         DISP_BIT_IB01
+#define DISP_BITMASK_ABSW0         DISP_BIT_IB00
+/* DISP_MODE_SETTING_ADDR          Mode Setting */
+#define DISP_BITMASK_DSTB          DISP_BIT_IB02
+#define DISP_BITMASK_STB           DISP_BIT_IB00
+/* DISP_POFF_LN_SETTING_ADDR       Power Off Line Setting */
+#define DISP_BITMASK_POFH3         DISP_BIT_IB03
+#define DISP_BITMASK_POFH2         DISP_BIT_IB02
+#define DISP_BITMASK_POFH1         DISP_BIT_IB01
+#define DISP_BITMASK_POFH0         DISP_BIT_IB00
+
+/* Power Contol */
+/* DISP_POWER_CTL_1_ADDR           Power Control (1) */
+#define DISP_BITMASK_PO            DISP_BIT_IB11
+#define DISP_BITMASK_VCD           DISP_BIT_IB09
+#define DISP_BITMASK_VSC           DISP_BIT_IB08
+#define DISP_BITMASK_CON           DISP_BIT_IB07
+#define DISP_BITMASK_ASW1          DISP_BIT_IB06
+#define DISP_BITMASK_ASW0          DISP_BIT_IB05
+#define DISP_BITMASK_OEV           DISP_BIT_IB04
+#define DISP_BITMASK_OEVE          DISP_BIT_IB03
+#define DISP_BITMASK_FR            DISP_BIT_IB02
+#define DISP_BITMASK_D1            DISP_BIT_IB01
+#define DISP_BITMASK_D0            DISP_BIT_IB00
+/* DISP_POWER_CTL_2_ADDR           Power Control (2) */
+#define DISP_BITMASK_DC4           DISP_BIT_IB15
+#define DISP_BITMASK_DC3           DISP_BIT_IB14
+#define DISP_BITMASK_SAP2          DISP_BIT_IB13
+#define DISP_BITMASK_SAP1          DISP_BIT_IB12
+#define DISP_BITMASK_SAP0          DISP_BIT_IB11
+#define DISP_BITMASK_BT2           DISP_BIT_IB10
+#define DISP_BITMASK_BT1           DISP_BIT_IB09
+#define DISP_BITMASK_BT0           DISP_BIT_IB08
+#define DISP_BITMASK_DC2           DISP_BIT_IB07
+#define DISP_BITMASK_DC1           DISP_BIT_IB06
+#define DISP_BITMASK_DC0           DISP_BIT_IB05
+#define DISP_BITMASK_AP2           DISP_BIT_IB04
+#define DISP_BITMASK_AP1           DISP_BIT_IB03
+#define DISP_BITMASK_AP0           DISP_BIT_IB02
+/* DISP_POWER_CTL_3_ADDR           Power Control (3) */
+#define DISP_BITMASK_VGL4          DISP_BIT_IB10
+#define DISP_BITMASK_VGL3          DISP_BIT_IB09
+#define DISP_BITMASK_VGL2          DISP_BIT_IB08
+#define DISP_BITMASK_VGL1          DISP_BIT_IB07
+#define DISP_BITMASK_VGL0          DISP_BIT_IB06
+#define DISP_BITMASK_VGH4          DISP_BIT_IB04
+#define DISP_BITMASK_VGH3          DISP_BIT_IB03
+#define DISP_BITMASK_VGH2          DISP_BIT_IB02
+#define DISP_BITMASK_VGH1          DISP_BIT_IB01
+#define DISP_BITMASK_VGH0          DISP_BIT_IB00
+/* DISP_POWER_CTL_4_ADDR           Power Control (4) */
+#define DISP_BITMASK_VC2           DISP_BIT_IB02
+#define DISP_BITMASK_VC1           DISP_BIT_IB01
+#define DISP_BITMASK_VC0           DISP_BIT_IB00
+/* DISP_POWER_CTL_5_ADDR           Power Control (5) */
+#define DISP_BITMASK_VRL3          DISP_BIT_IB11
+#define DISP_BITMASK_VRL2          DISP_BIT_IB10
+#define DISP_BITMASK_VRL1          DISP_BIT_IB09
+#define DISP_BITMASK_VRL0          DISP_BIT_IB08
+#define DISP_BITMASK_PON           DISP_BIT_IB04
+#define DISP_BITMASK_VRH3          DISP_BIT_IB03
+#define DISP_BITMASK_VRH2          DISP_BIT_IB02
+#define DISP_BITMASK_VRH1          DISP_BIT_IB01
+#define DISP_BITMASK_VRH0          DISP_BIT_IB00
+/* DISP_POWER_CTL_6_ADDR           Power Control (6) */
+#define DISP_BITMASK_VCOMG         DISP_BIT_IB13
+#define DISP_BITMASK_VDV4          DISP_BIT_IB12
+#define DISP_BITMASK_VDV3          DISP_BIT_IB11
+#define DISP_BITMASK_VDV2          DISP_BIT_IB10
+#define DISP_BITMASK_VDV1          DISP_BIT_IB09
+#define DISP_BITMASK_VDV0          DISP_BIT_IB08
+#define DISP_BITMASK_VCM4          DISP_BIT_IB04
+#define DISP_BITMASK_VCM3          DISP_BIT_IB03
+#define DISP_BITMASK_VCM2          DISP_BIT_IB02
+#define DISP_BITMASK_VCM1          DISP_BIT_IB01
+#define DISP_BITMASK_VCM0          DISP_BIT_IB00
+/* RAM Access */
+/* DISP_RAM_ADDR_SET_1_ADDR        RAM Address Set (1) */
+#define DISP_BITMASK_AD7           DISP_BIT_IB07
+#define DISP_BITMASK_AD6           DISP_BIT_IB06
+#define DISP_BITMASK_AD5           DISP_BIT_IB05
+#define DISP_BITMASK_AD4           DISP_BIT_IB04
+#define DISP_BITMASK_AD3           DISP_BIT_IB03
+#define DISP_BITMASK_AD2           DISP_BIT_IB02
+#define DISP_BITMASK_AD1           DISP_BIT_IB01
+#define DISP_BITMASK_AD0           DISP_BIT_IB00
+/* DISP_RAM_ADDR_SET_2_ADDR        RAM Address Set (2) */
+#define DISP_BITMASK_AD16          DISP_BIT_IB08
+#define DISP_BITMASK_AD15          DISP_BIT_IB07
+#define DISP_BITMASK_AD14          DISP_BIT_IB06
+#define DISP_BITMASK_AD13          DISP_BIT_IB05
+#define DISP_BITMASK_AD12          DISP_BIT_IB04
+#define DISP_BITMASK_AD11          DISP_BIT_IB03
+#define DISP_BITMASK_AD10          DISP_BIT_IB02
+#define DISP_BITMASK_AD9           DISP_BIT_IB01
+#define DISP_BITMASK_AD8           DISP_BIT_IB00
+/*
+ * DISP_CMD_RAMWR       RAM Data Read/Write
+ * Use Data Bit Configuration
+ */
+/* DISP_RAM_DATA_MASK_1_ADDR       RAM Write Data Mask (1) */
+#define DISP_BITMASK_WM11          DISP_BIT_IB13
+#define DISP_BITMASK_WM10          DISP_BIT_IB12
+#define DISP_BITMASK_WM9           DISP_BIT_IB11
+#define DISP_BITMASK_WM8           DISP_BIT_IB10
+#define DISP_BITMASK_WM7           DISP_BIT_IB09
+#define DISP_BITMASK_WM6           DISP_BIT_IB08
+#define DISP_BITMASK_WM5           DISP_BIT_IB05
+#define DISP_BITMASK_WM4           DISP_BIT_IB04
+#define DISP_BITMASK_WM3           DISP_BIT_IB03
+#define DISP_BITMASK_WM2           DISP_BIT_IB02
+#define DISP_BITMASK_WM1           DISP_BIT_IB01
+#define DISP_BITMASK_WM0           DISP_BIT_IB00
+/* DISP_RAM_DATA_MASK_2_ADDR       RAM Write Data Mask (2) */
+#define DISP_BITMASK_WM17          DISP_BIT_IB05
+#define DISP_BITMASK_WM16          DISP_BIT_IB04
+#define DISP_BITMASK_WM15          DISP_BIT_IB03
+#define DISP_BITMASK_WM14          DISP_BIT_IB02
+#define DISP_BITMASK_WM13          DISP_BIT_IB01
+#define DISP_BITMASK_WM12          DISP_BIT_IB00
+/*Gamma Control */
+/* DISP_GAMMA_CONTROL_1_ADDR       Gamma Control (1) */
+#define DISP_BITMASK_PKP12         DISP_BIT_IB10
+#define DISP_BITMASK_PKP11         DISP_BIT_IB08
+#define DISP_BITMASK_PKP10         DISP_BIT_IB09
+#define DISP_BITMASK_PKP02         DISP_BIT_IB02
+#define DISP_BITMASK_PKP01         DISP_BIT_IB01
+#define DISP_BITMASK_PKP00         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_2_ADDR       Gamma Control (2) */
+#define DISP_BITMASK_PKP32         DISP_BIT_IB10
+#define DISP_BITMASK_PKP31         DISP_BIT_IB09
+#define DISP_BITMASK_PKP30         DISP_BIT_IB08
+#define DISP_BITMASK_PKP22         DISP_BIT_IB02
+#define DISP_BITMASK_PKP21         DISP_BIT_IB01
+#define DISP_BITMASK_PKP20         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_3_ADDR       Gamma Control (3) */
+#define DISP_BITMASK_PKP52         DISP_BIT_IB10
+#define DISP_BITMASK_PKP51         DISP_BIT_IB09
+#define DISP_BITMASK_PKP50         DISP_BIT_IB08
+#define DISP_BITMASK_PKP42         DISP_BIT_IB02
+#define DISP_BITMASK_PKP41         DISP_BIT_IB01
+#define DISP_BITMASK_PKP40         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_4_ADDR       Gamma Control (4) */
+#define DISP_BITMASK_PRP12         DISP_BIT_IB10
+#define DISP_BITMASK_PRP11         DISP_BIT_IB08
+#define DISP_BITMASK_PRP10         DISP_BIT_IB09
+#define DISP_BITMASK_PRP02         DISP_BIT_IB02
+#define DISP_BITMASK_PRP01         DISP_BIT_IB01
+#define DISP_BITMASK_PRP00         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_5_ADDR       Gamma Control (5) */
+#define DISP_BITMASK_VRP14         DISP_BIT_IB12
+#define DISP_BITMASK_VRP13         DISP_BIT_IB11
+#define DISP_BITMASK_VRP12         DISP_BIT_IB10
+#define DISP_BITMASK_VRP11         DISP_BIT_IB08
+#define DISP_BITMASK_VRP10         DISP_BIT_IB09
+#define DISP_BITMASK_VRP03         DISP_BIT_IB03
+#define DISP_BITMASK_VRP02         DISP_BIT_IB02
+#define DISP_BITMASK_VRP01         DISP_BIT_IB01
+#define DISP_BITMASK_VRP00         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_6_ADDR       Gamma Control (6) */
+#define DISP_BITMASK_PKN12         DISP_BIT_IB10
+#define DISP_BITMASK_PKN11         DISP_BIT_IB08
+#define DISP_BITMASK_PKN10         DISP_BIT_IB09
+#define DISP_BITMASK_PKN02         DISP_BIT_IB02
+#define DISP_BITMASK_PKN01         DISP_BIT_IB01
+#define DISP_BITMASK_PKN00         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_7_ADDR       Gamma Control (7) */
+#define DISP_BITMASK_PKN32         DISP_BIT_IB10
+#define DISP_BITMASK_PKN31         DISP_BIT_IB08
+#define DISP_BITMASK_PKN30         DISP_BIT_IB09
+#define DISP_BITMASK_PKN22         DISP_BIT_IB02
+#define DISP_BITMASK_PKN21         DISP_BIT_IB01
+#define DISP_BITMASK_PKN20         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_8_ADDR       Gamma Control (8) */
+#define DISP_BITMASK_PKN52         DISP_BIT_IB10
+#define DISP_BITMASK_PKN51         DISP_BIT_IB08
+#define DISP_BITMASK_PKN50         DISP_BIT_IB09
+#define DISP_BITMASK_PKN42         DISP_BIT_IB02
+#define DISP_BITMASK_PKN41         DISP_BIT_IB01
+#define DISP_BITMASK_PKN40         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_9_ADDR       Gamma Control (9) */
+#define DISP_BITMASK_PRN12         DISP_BIT_IB10
+#define DISP_BITMASK_PRN11         DISP_BIT_IB08
+#define DISP_BITMASK_PRN10         DISP_BIT_IB09
+#define DISP_BITMASK_PRN02         DISP_BIT_IB02
+#define DISP_BITMASK_PRN01         DISP_BIT_IB01
+#define DISP_BITMASK_PRN00         DISP_BIT_IB00
+/* DISP_GAMMA_CONTROL_10_ADDR      Gamma Control (10) */
+#define DISP_BITMASK_VRN14         DISP_BIT_IB12
+#define DISP_BITMASK_VRN13         DISP_BIT_IB11
+#define DISP_BITMASK_VRN12         DISP_BIT_IB10
+#define DISP_BITMASK_VRN11         DISP_BIT_IB08
+#define DISP_BITMASK_VRN10         DISP_BIT_IB09
+#define DISP_BITMASK_VRN03         DISP_BIT_IB03
+#define DISP_BITMASK_VRN02         DISP_BIT_IB02
+#define DISP_BITMASK_VRN01         DISP_BIT_IB01
+#define DISP_BITMASK_VRN00         DISP_BIT_IB00
+/* Coordinate Control */
+/* DISP_VERT_SCROLL_CTL_1_ADDR     Vertical Scroll Control (1) */
+#define DISP_BITMASK_VL18          DISP_BIT_IB08
+#define DISP_BITMASK_VL17          DISP_BIT_IB07
+#define DISP_BITMASK_VL16          DISP_BIT_IB06
+#define DISP_BITMASK_VL15          DISP_BIT_IB05
+#define DISP_BITMASK_VL14          DISP_BIT_IB04
+#define DISP_BITMASK_VL13          DISP_BIT_IB03
+#define DISP_BITMASK_VL12          DISP_BIT_IB02
+#define DISP_BITMASK_VL11          DISP_BIT_IB01
+#define DISP_BITMASK_VL10          DISP_BIT_IB00
+/* DISP_VERT_SCROLL_CTL_2_ADDR     Vertical Scroll Control (2) */
+#define DISP_BITMASK_VL28          DISP_BIT_IB08
+#define DISP_BITMASK_VL27          DISP_BIT_IB07
+#define DISP_BITMASK_VL26          DISP_BIT_IB06
+#define DISP_BITMASK_VL25          DISP_BIT_IB05
+#define DISP_BITMASK_VL24          DISP_BIT_IB04
+#define DISP_BITMASK_VL23          DISP_BIT_IB03
+#define DISP_BITMASK_VL22          DISP_BIT_IB02
+#define DISP_BITMASK_VL21          DISP_BIT_IB01
+#define DISP_BITMASK_VL20          DISP_BIT_IB00
+/* DISP_SCREEN_1_DRV_POS_1_ADDR    First Screen Driving Position (1) */
+#define DISP_BITMASK_SS18          DISP_BIT_IB08
+#define DISP_BITMASK_SS17          DISP_BIT_IB07
+#define DISP_BITMASK_SS16          DISP_BIT_IB06
+#define DISP_BITMASK_SS15          DISP_BIT_IB05
+#define DISP_BITMASK_SS14          DISP_BIT_IB04
+#define DISP_BITMASK_SS13          DISP_BIT_IB03
+#define DISP_BITMASK_SS12          DISP_BIT_IB02
+#define DISP_BITMASK_SS11          DISP_BIT_IB01
+#define DISP_BITMASK_SS10          DISP_BIT_IB00
+/* DISP_SCREEN_1_DRV_POS_2_ADDR    First Screen Driving Position (2) */
+#define DISP_BITMASK_SE18          DISP_BIT_IB08
+#define DISP_BITMASK_SE17          DISP_BIT_IB07
+#define DISP_BITMASK_SE16          DISP_BIT_IB06
+#define DISP_BITMASK_SE15          DISP_BIT_IB05
+#define DISP_BITMASK_SE14          DISP_BIT_IB04
+#define DISP_BITMASK_SE13          DISP_BIT_IB03
+#define DISP_BITMASK_SE12          DISP_BIT_IB02
+#define DISP_BITMASK_SE11          DISP_BIT_IB01
+#define DISP_BITMASK_SE10          DISP_BIT_IB00
+/* DISP_SCREEN_2_DRV_POS_1_ADDR    Second Screen Driving Position (1) */
+#define DISP_BITMASK_SS28          DISP_BIT_IB08
+#define DISP_BITMASK_SS27          DISP_BIT_IB07
+#define DISP_BITMASK_SS26          DISP_BIT_IB06
+#define DISP_BITMASK_SS25          DISP_BIT_IB05
+#define DISP_BITMASK_SS24          DISP_BIT_IB04
+#define DISP_BITMASK_SS23          DISP_BIT_IB03
+#define DISP_BITMASK_SS22          DISP_BIT_IB02
+#define DISP_BITMASK_SS21          DISP_BIT_IB01
+#define DISP_BITMASK_SS20          DISP_BIT_IB00
+/* DISP_SCREEN_3_DRV_POS_2_ADDR    Second Screen Driving Position (2) */
+#define DISP_BITMASK_SE28          DISP_BIT_IB08
+#define DISP_BITMASK_SE27          DISP_BIT_IB07
+#define DISP_BITMASK_SE26          DISP_BIT_IB06
+#define DISP_BITMASK_SE25          DISP_BIT_IB05
+#define DISP_BITMASK_SE24          DISP_BIT_IB04
+#define DISP_BITMASK_SE23          DISP_BIT_IB03
+#define DISP_BITMASK_SE22          DISP_BIT_IB02
+#define DISP_BITMASK_SE21          DISP_BIT_IB01
+#define DISP_BITMASK_SE20          DISP_BIT_IB00
+/* DISP_HORZ_RAM_ADDR_POS_1_ADDR   Horizontal RAM Address Position (1) */
+#define DISP_BITMASK_HSA7          DISP_BIT_IB07
+#define DISP_BITMASK_HSA6          DISP_BIT_IB06
+#define DISP_BITMASK_HSA5          DISP_BIT_IB05
+#define DISP_BITMASK_HSA4          DISP_BIT_IB04
+#define DISP_BITMASK_HSA3          DISP_BIT_IB03
+#define DISP_BITMASK_HSA2          DISP_BIT_IB02
+#define DISP_BITMASK_HSA1          DISP_BIT_IB01
+#define DISP_BITMASK_HSA0          DISP_BIT_IB00
+/* DISP_HORZ_RAM_ADDR_POS_2_ADDR   Horizontal RAM Address Position (2) */
+#define DISP_BITMASK_HEA7          DISP_BIT_IB07
+#define DISP_BITMASK_HEA6          DISP_BIT_IB06
+#define DISP_BITMASK_HEA5          DISP_BIT_IB05
+#define DISP_BITMASK_HEA4          DISP_BIT_IB04
+#define DISP_BITMASK_HEA3          DISP_BIT_IB03
+#define DISP_BITMASK_HEA2          DISP_BIT_IB02
+#define DISP_BITMASK_HEA1          DISP_BIT_IB01
+#define DISP_BITMASK_HEA0          DISP_BIT_IB00
+/* DISP_VERT_RAM_ADDR_POS_1_ADDR   Vertical RAM Address Position (1) */
+#define DISP_BITMASK_VSA8          DISP_BIT_IB08
+#define DISP_BITMASK_VSA7          DISP_BIT_IB07
+#define DISP_BITMASK_VSA6          DISP_BIT_IB06
+#define DISP_BITMASK_VSA5          DISP_BIT_IB05
+#define DISP_BITMASK_VSA4          DISP_BIT_IB04
+#define DISP_BITMASK_VSA3          DISP_BIT_IB03
+#define DISP_BITMASK_VSA2          DISP_BIT_IB02
+#define DISP_BITMASK_VSA1          DISP_BIT_IB01
+#define DISP_BITMASK_VSA0          DISP_BIT_IB00
+/* DISP_VERT_RAM_ADDR_POS_2_ADDR   Vertical RAM Address Position (2) */
+#define DISP_BITMASK_VEA8          DISP_BIT_IB08
+#define DISP_BITMASK_VEA7          DISP_BIT_IB07
+#define DISP_BITMASK_VEA6          DISP_BIT_IB06
+#define DISP_BITMASK_VEA5          DISP_BIT_IB05
+#define DISP_BITMASK_VEA4          DISP_BIT_IB04
+#define DISP_BITMASK_VEA3          DISP_BIT_IB03
+#define DISP_BITMASK_VEA2          DISP_BIT_IB02
+#define DISP_BITMASK_VEA1          DISP_BIT_IB01
+#define DISP_BITMASK_VEA0          DISP_BIT_IB00
+static word disp_area_start_row;
+static word disp_area_end_row;
+static boolean disp_initialized = FALSE;
+/* For some reason the contrast set at init time is not good. Need to do
+* it again
+*/
+static boolean display_on = FALSE;
+
+static uint32 tmd20qvga_lcd_rev;
+uint16 tmd20qvga_panel_offset;
+
+#ifdef DISP_DEVICE_8BPP
+static word convert_8_to_16_tbl[256] = {
+	0x0000, 0x2000, 0x4000, 0x6000, 0x8000, 0xA000, 0xC000, 0xE000,
+	0x0100, 0x2100, 0x4100, 0x6100, 0x8100, 0xA100, 0xC100, 0xE100,
+	0x0200, 0x2200, 0x4200, 0x6200, 0x8200, 0xA200, 0xC200, 0xE200,
+	0x0300, 0x2300, 0x4300, 0x6300, 0x8300, 0xA300, 0xC300, 0xE300,
+	0x0400, 0x2400, 0x4400, 0x6400, 0x8400, 0xA400, 0xC400, 0xE400,
+	0x0500, 0x2500, 0x4500, 0x6500, 0x8500, 0xA500, 0xC500, 0xE500,
+	0x0600, 0x2600, 0x4600, 0x6600, 0x8600, 0xA600, 0xC600, 0xE600,
+	0x0700, 0x2700, 0x4700, 0x6700, 0x8700, 0xA700, 0xC700, 0xE700,
+	0x0008, 0x2008, 0x4008, 0x6008, 0x8008, 0xA008, 0xC008, 0xE008,
+	0x0108, 0x2108, 0x4108, 0x6108, 0x8108, 0xA108, 0xC108, 0xE108,
+	0x0208, 0x2208, 0x4208, 0x6208, 0x8208, 0xA208, 0xC208, 0xE208,
+	0x0308, 0x2308, 0x4308, 0x6308, 0x8308, 0xA308, 0xC308, 0xE308,
+	0x0408, 0x2408, 0x4408, 0x6408, 0x8408, 0xA408, 0xC408, 0xE408,
+	0x0508, 0x2508, 0x4508, 0x6508, 0x8508, 0xA508, 0xC508, 0xE508,
+	0x0608, 0x2608, 0x4608, 0x6608, 0x8608, 0xA608, 0xC608, 0xE608,
+	0x0708, 0x2708, 0x4708, 0x6708, 0x8708, 0xA708, 0xC708, 0xE708,
+	0x0010, 0x2010, 0x4010, 0x6010, 0x8010, 0xA010, 0xC010, 0xE010,
+	0x0110, 0x2110, 0x4110, 0x6110, 0x8110, 0xA110, 0xC110, 0xE110,
+	0x0210, 0x2210, 0x4210, 0x6210, 0x8210, 0xA210, 0xC210, 0xE210,
+	0x0310, 0x2310, 0x4310, 0x6310, 0x8310, 0xA310, 0xC310, 0xE310,
+	0x0410, 0x2410, 0x4410, 0x6410, 0x8410, 0xA410, 0xC410, 0xE410,
+	0x0510, 0x2510, 0x4510, 0x6510, 0x8510, 0xA510, 0xC510, 0xE510,
+	0x0610, 0x2610, 0x4610, 0x6610, 0x8610, 0xA610, 0xC610, 0xE610,
+	0x0710, 0x2710, 0x4710, 0x6710, 0x8710, 0xA710, 0xC710, 0xE710,
+	0x0018, 0x2018, 0x4018, 0x6018, 0x8018, 0xA018, 0xC018, 0xE018,
+	0x0118, 0x2118, 0x4118, 0x6118, 0x8118, 0xA118, 0xC118, 0xE118,
+	0x0218, 0x2218, 0x4218, 0x6218, 0x8218, 0xA218, 0xC218, 0xE218,
+	0x0318, 0x2318, 0x4318, 0x6318, 0x8318, 0xA318, 0xC318, 0xE318,
+	0x0418, 0x2418, 0x4418, 0x6418, 0x8418, 0xA418, 0xC418, 0xE418,
+	0x0518, 0x2518, 0x4518, 0x6518, 0x8518, 0xA518, 0xC518, 0xE518,
+	0x0618, 0x2618, 0x4618, 0x6618, 0x8618, 0xA618, 0xC618, 0xE618,
+	0x0718, 0x2718, 0x4718, 0x6718, 0x8718, 0xA718, 0xC718, 0xE718
+};
+#endif /* DISP_DEVICE_8BPP */
+
+static void tmd20qvga_disp_set_rect(int x, int y, int xres, int yres);
+static void tmd20qvga_disp_init(struct platform_device *pdev);
+static void tmd20qvga_disp_set_contrast(void);
+static void tmd20qvga_disp_set_display_area(word start_row, word end_row);
+static int tmd20qvga_disp_off(struct platform_device *pdev);
+static int tmd20qvga_disp_on(struct platform_device *pdev);
+static void tmd20qvga_set_revId(int);
+
+/* future use */
+void tmd20qvga_disp_clear_screen_area(word start_row, word end_row,
+				      word start_column, word end_column);
+
+static void tmd20qvga_set_revId(int id)
+{
+
+	tmd20qvga_lcd_rev = id;
+
+	if (tmd20qvga_lcd_rev == 1)
+		tmd20qvga_panel_offset = 0x10;
+	else
+		tmd20qvga_panel_offset = 0;
+}
+
+static void tmd20qvga_disp_init(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	if (disp_initialized)
+		return;
+
+	mfd = platform_get_drvdata(pdev);
+
+	DISP_CMD_PORT = mfd->cmd_port;
+	DISP_DATA_PORT = mfd->data_port;
+
+#ifdef TMD20QVGA_LCD_18BPP
+	tmd20qvga_set_revId(2);
+#else
+	tmd20qvga_set_revId(1);
+#endif
+
+	disp_initialized = TRUE;
+	tmd20qvga_disp_set_contrast();
+	tmd20qvga_disp_set_display_area(0, QVGA_HEIGHT - 1);
+}
+
+static void tmd20qvga_disp_set_rect(int x, int y, int xres, int yres)
+{
+	if (!disp_initialized)
+		return;
+
+	DISP_SET_RECT(y, y + yres - 1, x, x + xres - 1);
+
+	DISP_CMD_OUT(DISP_CMD_RAMWR);
+}
+
+static void tmd20qvga_disp_set_display_area(word start_row, word end_row)
+{
+	word start_driving = start_row;
+	word end_driving = end_row;
+
+	if (!disp_initialized)
+		return;
+
+	/* Range checking
+	 */
+	if (end_driving >= QVGA_HEIGHT)
+		end_driving = QVGA_HEIGHT - 1;
+	if (start_driving > end_driving) {
+		/* Probably Backwards Switch */
+		start_driving = end_driving;
+		end_driving = start_row;	/* Has not changed */
+		if (end_driving >= QVGA_HEIGHT)
+			end_driving = QVGA_HEIGHT - 1;
+	}
+
+	if ((start_driving == disp_area_start_row)
+	    && (end_driving == disp_area_end_row))
+		return;
+
+	disp_area_start_row = start_driving;
+	disp_area_end_row = end_driving;
+
+	DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR,
+		       DISP_VAL_IF(start_driving & 0x100,
+				   DISP_BITMASK_SS18) |
+		       DISP_VAL_IF(start_driving & 0x080,
+				   DISP_BITMASK_SS17) |
+		       DISP_VAL_IF(start_driving & 0x040,
+				   DISP_BITMASK_SS16) |
+		       DISP_VAL_IF(start_driving & 0x020,
+				   DISP_BITMASK_SS15) |
+		       DISP_VAL_IF(start_driving & 0x010,
+				   DISP_BITMASK_SS14) |
+		       DISP_VAL_IF(start_driving & 0x008,
+				   DISP_BITMASK_SS13) |
+		       DISP_VAL_IF(start_driving & 0x004,
+				   DISP_BITMASK_SS12) |
+		       DISP_VAL_IF(start_driving & 0x002,
+				   DISP_BITMASK_SS11) |
+		       DISP_VAL_IF(start_driving & 0x001, DISP_BITMASK_SS10));
+
+	DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR,
+			DISP_VAL_IF(end_driving & 0x100, DISP_BITMASK_SE18) |
+			DISP_VAL_IF(end_driving & 0x080, DISP_BITMASK_SE17) |
+			DISP_VAL_IF(end_driving & 0x040, DISP_BITMASK_SE16) |
+			DISP_VAL_IF(end_driving & 0x020, DISP_BITMASK_SE15) |
+			DISP_VAL_IF(end_driving & 0x010, DISP_BITMASK_SE14) |
+			DISP_VAL_IF(end_driving & 0x008, DISP_BITMASK_SE13) |
+			DISP_VAL_IF(end_driving & 0x004, DISP_BITMASK_SE12) |
+			DISP_VAL_IF(end_driving & 0x002, DISP_BITMASK_SE11) |
+			DISP_VAL_IF(end_driving & 0x001, DISP_BITMASK_SE10));
+}
+
+static int tmd20qvga_disp_off(struct platform_device *pdev)
+{
+	if (!disp_initialized)
+		tmd20qvga_disp_init(pdev);
+
+	if (display_on) {
+		if (tmd20qvga_lcd_rev == 2) {
+			DISP_WRITE_OUT(DISP_POFF_LN_SETTING_ADDR, 0x000A);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xFFEE);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xF812);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xE811);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xC011);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x4011);
+			WAIT_MSEC(20);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0010);
+
+		} else {
+			DISP_WRITE_OUT(DISP_POFF_LN_SETTING_ADDR, 0x000F);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BFE);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BED);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(40);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x00CD);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(20);
+			DISP_WRITE_OUT(DISP_START_OSCILLATION_ADDR, 0x0);
+		}
+
+		DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0004);
+		DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0000);
+
+		display_on = FALSE;
+	}
+
+	return 0;
+}
+
+static int tmd20qvga_disp_on(struct platform_device *pdev)
+{
+	if (!disp_initialized)
+		tmd20qvga_disp_init(pdev);
+
+	if (!display_on) {
+		/* Deep Stand-by -> Stand-by */
+		DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
+		WAIT_MSEC(1);
+		DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
+		WAIT_MSEC(1);
+		DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
+		WAIT_MSEC(1);
+
+		/* OFF -> Deep Stan-By -> Stand-by */
+		/* let's change the state from "Stand-by" to "Sleep" */
+		DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0005);
+		WAIT_MSEC(1);
+
+		/* Sleep -> Displaying */
+		DISP_WRITE_OUT(DISP_START_OSCILLATION_ADDR, 0x0001);
+		DISP_WRITE_OUT(DISP_DRIVER_OUTPUT_CTL_ADDR, 0x0127);
+		DISP_WRITE_OUT(DISP_LCD_DRIVING_SIG_ADDR, 0x200);
+		/* fast write mode */
+		DISP_WRITE_OUT(DISP_ENTRY_MODE_ADDR, 0x0130);
+		if (tmd20qvga_lcd_rev == 2)
+			DISP_WRITE_OUT(DISP_TMD_700_ADDR, 0x0003);
+		/* back porch = 14 + front porch = 2 --> 16 lines */
+		if (tmd20qvga_lcd_rev == 2) {
+#ifdef TMD20QVGA_LCD_18BPP
+			/* 256k color */
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x0000);
+#else
+			/* 65k color */
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x4000);
+#endif
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_2_ADDR, 0x0302);
+		} else {
+#ifdef TMD20QVGA_LCD_18BPP
+			/* 256k color */
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x0004);
+#else
+			/* 65k color */
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x4004);
+#endif
+			DISP_WRITE_OUT(DISP_DISPLAY_CTL_2_ADDR, 0x020E);
+		}
+		/* 16 bit one transfer */
+		if (tmd20qvga_lcd_rev == 2) {
+			DISP_WRITE_OUT(DISP_EXT_DISPLAY_CTL_1_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_FRAME_CYCLE_CTL_ADDR, 0x0010);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_1_ADDR, 0x0302);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_2_ADDR, 0x0102);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_3_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_TMD_015_ADDR, 0x2000);
+
+			DISP_WRITE_OUT(DISP_AMP_SETTING_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0304);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0403);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0101);
+			DISP_WRITE_OUT(DISP_TMD_305_ADDR, 0);
+
+			DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR, 0x013F);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_3_ADDR, 0x077D);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_4_ADDR, 0x0005);
+			DISP_WRITE_OUT(DISP_POWER_CTL_5_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_POWER_CTL_6_ADDR, 0x0015);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xC010);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x0001);
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xFFFE);
+			WAIT_MSEC(60);
+		} else {
+			DISP_WRITE_OUT(DISP_EXT_DISPLAY_CTL_1_ADDR, 0x0001);
+			DISP_WRITE_OUT(DISP_FRAME_CYCLE_CTL_ADDR, 0x0010);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_1_ADDR, 0x0301);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_2_ADDR, 0x0001);
+			DISP_WRITE_OUT(DISP_LTPS_CTL_3_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_AMP_SETTING_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0507);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0405);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0607);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0502);
+			DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0301);
+			DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR, 0x013F);
+			DISP_WRITE_OUT(DISP_POWER_CTL_3_ADDR, 0x0795);
+
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0102);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_4_ADDR, 0x0450);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0103);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_5_ADDR, 0x0008);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0104);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_6_ADDR, 0x0C00);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0105);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_7_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0106);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0801);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(1);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x001F);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0101);
+			WAIT_MSEC(60);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x009F);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0101);
+			WAIT_MSEC(10);
+
+			DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_1_ADDR, 0x0010);
+			DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_2_ADDR, 0x00FF);
+			DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_1_ADDR, 0x0000);
+			DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_2_ADDR, 0x013F);
+			/* RAM starts at address 0x10 */
+			DISP_WRITE_OUT(DISP_RAM_ADDR_SET_1_ADDR, 0x0010);
+			DISP_WRITE_OUT(DISP_RAM_ADDR_SET_2_ADDR, 0x0000);
+
+			/* lcd controller uses internal clock, not ext. vsync */
+			DISP_CMD_OUT(DISP_CMD_RAMWR);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0881);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(40);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BE1);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+			WAIT_MSEC(40);
+
+			DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BFF);
+			DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
+		}
+		display_on = TRUE;
+	}
+
+	return 0;
+}
+
+static void tmd20qvga_disp_set_contrast(void)
+{
+#if (defined(TMD20QVGA_LCD_18BPP))
+
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0302);
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0403);
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0F07);
+
+#else
+	int newcontrast = 0x46;
+
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
+
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR,
+			DISP_VAL_IF(newcontrast & 0x0001, DISP_BITMASK_PKP20) |
+			DISP_VAL_IF(newcontrast & 0x0002, DISP_BITMASK_PKP21) |
+			DISP_VAL_IF(newcontrast & 0x0004, DISP_BITMASK_PKP22) |
+			DISP_VAL_IF(newcontrast & 0x0010, DISP_BITMASK_PKP30) |
+			DISP_VAL_IF(newcontrast & 0x0020, DISP_BITMASK_PKP31) |
+			DISP_VAL_IF(newcontrast & 0x0040, DISP_BITMASK_PKP32));
+
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR,
+			DISP_VAL_IF(newcontrast & 0x0010, DISP_BITMASK_PKP40) |
+			DISP_VAL_IF(newcontrast & 0x0020, DISP_BITMASK_PKP41) |
+			DISP_VAL_IF(newcontrast & 0x0040, DISP_BITMASK_PKP42) |
+			DISP_VAL_IF(newcontrast & 0x0001, DISP_BITMASK_PKP50) |
+			DISP_VAL_IF(newcontrast & 0x0002, DISP_BITMASK_PKP51) |
+			DISP_VAL_IF(newcontrast & 0x0004, DISP_BITMASK_PKP52));
+
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
+	DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0F07);
+
+#endif /* defined(TMD20QVGA_LCD_18BPP) */
+
+}	/* End disp_set_contrast */
+
+void tmd20qvga_disp_clear_screen_area
+    (word start_row, word end_row, word start_column, word end_column) {
+	int32 i;
+
+	/* Clear the display screen */
+	DISP_SET_RECT(start_row, end_row, start_column, end_column);
+	DISP_CMD_OUT(DISP_CMD_RAMWR);
+	i = (end_row - start_row + 1) * (end_column - start_column + 1);
+	for (; i > 0; i--)
+		DISP_DATA_OUT_16TO18BPP(0x0);
+}
+
+static int __init tmd20qvga_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = tmd20qvga_probe,
+	.driver = {
+		.name   = "ebi2_tmd_qvga",
+	},
+};
+
+static struct msm_fb_panel_data tmd20qvga_panel_data = {
+	.on = tmd20qvga_disp_on,
+	.off = tmd20qvga_disp_off,
+	.set_rect = tmd20qvga_disp_set_rect,
+};
+
+static struct platform_device this_device = {
+	.name   = "ebi2_tmd_qvga",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &tmd20qvga_panel_data,
+	}
+};
+
+static int __init tmd20qvga_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+	ret = platform_driver_register(&this_driver);
+	if (!ret) {
+		pinfo = &tmd20qvga_panel_data.panel_info;
+		pinfo->xres = 240;
+		pinfo->yres = 320;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = EBI2_PANEL;
+		pinfo->pdest = DISPLAY_1;
+		pinfo->wait_cycle = 0x808000;
+#ifdef TMD20QVGA_LCD_18BPP
+		pinfo->bpp = 18;
+#else
+		pinfo->bpp = 16;
+#endif
+		pinfo->fb_num = 2;
+		pinfo->lcd.vsync_enable = TRUE;
+		pinfo->lcd.refx100 = 6000;
+		pinfo->lcd.v_back_porch = 16;
+		pinfo->lcd.v_front_porch = 4;
+		pinfo->lcd.v_pulse_width = 0;
+		pinfo->lcd.hw_vsync_mode = FALSE;
+		pinfo->lcd.vsync_notifier_period = 0;
+
+		ret = platform_device_register(&this_device);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+	}
+
+	return ret;
+}
+
+module_init(tmd20qvga_init);
+
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
new file mode 100644
index 0000000..694450a
--- /dev/null
+++ b/drivers/video/msm/external_common.c
@@ -0,0 +1,1229 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+
+#define DEBUG
+#define DEV_DBG_PREFIX "EXT_COMMON: "
+
+#include "msm_fb.h"
+#include "external_common.h"
+
+struct external_common_state_type *external_common_state;
+EXPORT_SYMBOL(external_common_state);
+DEFINE_MUTEX(external_common_state_hpd_mutex);
+EXPORT_SYMBOL(external_common_state_hpd_mutex);
+
+static int atoi(const char *name)
+{
+	int val = 0;
+
+	for (;; name++) {
+		switch (*name) {
+		case '0' ... '9':
+			val = 10*val+(*name-'0');
+			break;
+		default:
+			return val;
+		}
+	}
+}
+
+const char *video_format_2string(uint32 format)
+{
+	switch (format) {
+	default:
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+	case HDMI_VFRMT_640x480p60_4_3:    return " 640x 480 p60  4/3";
+	case HDMI_VFRMT_720x480p60_4_3:    return " 720x 480 p60  4/3";
+	case HDMI_VFRMT_720x480p60_16_9:   return " 720x 480 p60 16/9";
+	case HDMI_VFRMT_1280x720p60_16_9:  return "1280x 720 p60 16/9";
+	case HDMI_VFRMT_1920x1080i60_16_9: return "1920x1080 i60 16/9";
+	case HDMI_VFRMT_1440x480i60_4_3:   return "1440x 480 i60  4/3";
+	case HDMI_VFRMT_1440x480i60_16_9:  return "1440x 480 i60 16/9";
+	case HDMI_VFRMT_1440x240p60_4_3:   return "1440x 240 p60  4/3";
+	case HDMI_VFRMT_1440x240p60_16_9:  return "1440x 240 p60 16/9";
+	case HDMI_VFRMT_2880x480i60_4_3:   return "2880x 480 i60  4/3";
+	case HDMI_VFRMT_2880x480i60_16_9:  return "2880x 480 i60 16/9";
+	case HDMI_VFRMT_2880x240p60_4_3:   return "2880x 240 p60  4/3";
+	case HDMI_VFRMT_2880x240p60_16_9:  return "2880x 240 p60 16/9";
+	case HDMI_VFRMT_1440x480p60_4_3:   return "1440x 480 p60  4/3";
+	case HDMI_VFRMT_1440x480p60_16_9:  return "1440x 480 p60 16/9";
+	case HDMI_VFRMT_1920x1080p60_16_9: return "1920x1080 p60 16/9";
+	case HDMI_VFRMT_720x576p50_4_3:    return " 720x 576 p50  4/3";
+	case HDMI_VFRMT_720x576p50_16_9:   return " 720x 576 p50 16/9";
+	case HDMI_VFRMT_1280x720p50_16_9:  return "1280x 720 p50 16/9";
+	case HDMI_VFRMT_1920x1080i50_16_9: return "1920x1080 i50 16/9";
+	case HDMI_VFRMT_1440x576i50_4_3:   return "1440x 576 i50  4/3";
+	case HDMI_VFRMT_1440x576i50_16_9:  return "1440x 576 i50 16/9";
+	case HDMI_VFRMT_1440x288p50_4_3:   return "1440x 288 p50  4/3";
+	case HDMI_VFRMT_1440x288p50_16_9:  return "1440x 288 p50 16/9";
+	case HDMI_VFRMT_2880x576i50_4_3:   return "2880x 576 i50  4/3";
+	case HDMI_VFRMT_2880x576i50_16_9:  return "2880x 576 i50 16/9";
+	case HDMI_VFRMT_2880x288p50_4_3:   return "2880x 288 p50  4/3";
+	case HDMI_VFRMT_2880x288p50_16_9:  return "2880x 288 p50 16/9";
+	case HDMI_VFRMT_1440x576p50_4_3:   return "1440x 576 p50  4/3";
+	case HDMI_VFRMT_1440x576p50_16_9:  return "1440x 576 p50 16/9";
+	case HDMI_VFRMT_1920x1080p50_16_9: return "1920x1080 p50 16/9";
+	case HDMI_VFRMT_1920x1080p24_16_9: return "1920x1080 p24 16/9";
+	case HDMI_VFRMT_1920x1080p25_16_9: return "1920x1080 p25 16/9";
+	case HDMI_VFRMT_1920x1080p30_16_9: return "1920x1080 p30 16/9";
+	case HDMI_VFRMT_2880x480p60_4_3:   return "2880x 480 p60  4/3";
+	case HDMI_VFRMT_2880x480p60_16_9:  return "2880x 480 p60 16/9";
+	case HDMI_VFRMT_2880x576p50_4_3:   return "2880x 576 p50  4/3";
+	case HDMI_VFRMT_2880x576p50_16_9:  return "2880x 576 p50 16/9";
+	case HDMI_VFRMT_1920x1250i50_16_9: return "1920x1250 i50 16/9";
+	case HDMI_VFRMT_1920x1080i100_16_9:return "1920x1080 i100 16/9";
+	case HDMI_VFRMT_1280x720p100_16_9: return "1280x 720 p100 16/9";
+	case HDMI_VFRMT_720x576p100_4_3:   return " 720x 576 p100  4/3";
+	case HDMI_VFRMT_720x576p100_16_9:  return " 720x 576 p100 16/9";
+	case HDMI_VFRMT_1440x576i100_4_3:  return "1440x 576 i100  4/3";
+	case HDMI_VFRMT_1440x576i100_16_9: return "1440x 576 i100 16/9";
+	case HDMI_VFRMT_1920x1080i120_16_9:return "1920x1080 i120 16/9";
+	case HDMI_VFRMT_1280x720p120_16_9: return "1280x 720 p120 16/9";
+	case HDMI_VFRMT_720x480p120_4_3:   return " 720x 480 p120  4/3";
+	case HDMI_VFRMT_720x480p120_16_9:  return " 720x 480 p120 16/9";
+	case HDMI_VFRMT_1440x480i120_4_3:  return "1440x 480 i120  4/3";
+	case HDMI_VFRMT_1440x480i120_16_9: return "1440x 480 i120 16/9";
+	case HDMI_VFRMT_720x576p200_4_3:   return " 720x 576 p200  4/3";
+	case HDMI_VFRMT_720x576p200_16_9:  return " 720x 576 p200 16/9";
+	case HDMI_VFRMT_1440x576i200_4_3:  return "1440x 576 i200  4/3";
+	case HDMI_VFRMT_1440x576i200_16_9: return "1440x 576 i200 16/9";
+	case HDMI_VFRMT_720x480p240_4_3:   return " 720x 480 p240  4/3";
+	case HDMI_VFRMT_720x480p240_16_9:  return " 720x 480 p240 16/9";
+	case HDMI_VFRMT_1440x480i240_4_3:  return "1440x 480 i240  4/3";
+	case HDMI_VFRMT_1440x480i240_16_9: return "1440x 480 i240 16/9";
+#elif defined(CONFIG_FB_MSM_TVOUT)
+	case TVOUT_VFRMT_NTSC_M_720x480i:     return "NTSC_M_720x480i";
+	case TVOUT_VFRMT_NTSC_J_720x480i:     return "NTSC_J_720x480i";
+	case TVOUT_VFRMT_PAL_BDGHIN_720x576i: return "PAL_BDGHIN_720x576i";
+	case TVOUT_VFRMT_PAL_M_720x480i:      return "PAL_M_720x480i";
+	case TVOUT_VFRMT_PAL_N_720x480i:      return "PAL_N_720x480i";
+#endif
+
+	}
+}
+EXPORT_SYMBOL(video_format_2string);
+
+static ssize_t external_common_rda_video_mode_str(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%s\n",
+		video_format_2string(external_common_state->video_resolution));
+	DEV_DBG("%s: '%s'\n", __func__,
+		video_format_2string(external_common_state->video_resolution));
+	return ret;
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+struct hdmi_disp_mode_timing_type
+	hdmi_common_supported_video_mode_lut[HDMI_VFRMT_MAX] = {
+	HDMI_SETTINGS_640x480p60_4_3,
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1280x720p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080i60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x240p60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x240p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x480i60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x480i60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x240p60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x240p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480p60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1280x720p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080i50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x288p50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x288p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x576i50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x576i50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x288p50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x288p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576p50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080p24_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080p25_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080p30_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x480p60_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x480p60_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x576p50_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_2880x576p50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1250i50_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080i100_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1280x720p100_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p100_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p100_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i100_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i100_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1920x1080i120_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1280x720p120_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p120_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p120_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i120_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i120_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p200_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x576p200_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i200_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x576i200_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p240_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p240_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_4_3),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_16_9),
+};
+EXPORT_SYMBOL(hdmi_common_supported_video_mode_lut);
+
+static ssize_t hdmi_common_rda_edid_modes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	int i;
+
+	buf[0] = 0;
+	if (external_common_state->disp_mode_list.num_of_elements) {
+		uint32 *video_mode = external_common_state->disp_mode_list
+			.disp_mode_list;
+		for (i = 0; i < external_common_state->disp_mode_list
+			.num_of_elements; ++i) {
+			if (ret > 0)
+				ret += snprintf(buf+ret, PAGE_SIZE-ret, ",%d",
+					*video_mode++ + 1);
+			else
+				ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d",
+					*video_mode++ + 1);
+		}
+	} else
+		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d",
+			external_common_state->video_resolution+1);
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+	ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
+	return ret;
+}
+
+static ssize_t hdmi_common_rda_hdcp(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->hdcp_active);
+	DEV_DBG("%s: '%d'\n", __func__,
+		external_common_state->hdcp_active);
+	return ret;
+}
+
+static ssize_t hdmi_common_rda_hpd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	if (external_common_state->hpd_feature) {
+		ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			external_common_state->hpd_feature_on);
+		DEV_DBG("%s: '%d'\n", __func__,
+			external_common_state->hpd_feature_on);
+	} else {
+		ret = snprintf(buf, PAGE_SIZE, "-1\n");
+		DEV_DBG("%s: 'not supported'\n", __func__);
+	}
+	return ret;
+}
+
+static ssize_t hdmi_common_wta_hpd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int hpd = atoi(buf);
+
+	if (external_common_state->hpd_feature) {
+		if (hpd == 0 && external_common_state->hpd_feature_on) {
+			external_common_state->hpd_feature(0);
+			external_common_state->hpd_feature_on = 0;
+			DEV_DBG("%s: '%d'\n", __func__,
+				external_common_state->hpd_feature_on);
+		} else if (hpd == 1 && !external_common_state->hpd_feature_on) {
+			external_common_state->hpd_feature(1);
+			external_common_state->hpd_feature_on = 1;
+			DEV_DBG("%s: '%d'\n", __func__,
+				external_common_state->hpd_feature_on);
+		} else {
+			DEV_DBG("%s: '%d' (unchanged)\n", __func__,
+				external_common_state->hpd_feature_on);
+		}
+	} else {
+		DEV_DBG("%s: 'not supported'\n", __func__);
+	}
+
+	return ret;
+}
+
+static ssize_t hdmi_common_rda_3d_present(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->present_3d);
+	DEV_DBG("%s: '%d'\n", __func__,
+			external_common_state->present_3d);
+	return ret;
+}
+
+static ssize_t hdmi_common_rda_hdcp_present(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->present_hdcp);
+	DEV_DBG("%s: '%d'\n", __func__,
+			external_common_state->present_hdcp);
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_FB_MSM_HDMI_3D
+static ssize_t hdmi_3d_rda_format_3d(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->format_3d);
+	DEV_DBG("%s: '%d'\n", __func__,
+		external_common_state->format_3d);
+	return ret;
+}
+
+static ssize_t hdmi_3d_wta_format_3d(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	int format_3d = atoi(buf);
+
+	if (format_3d >= 0 && format_3d <= 2) {
+		if (format_3d != external_common_state->format_3d) {
+			external_common_state->format_3d = format_3d;
+			if (external_common_state->switch_3d)
+				external_common_state->switch_3d(format_3d);
+			DEV_DBG("%s: '%d'\n", __func__,
+				external_common_state->format_3d);
+		} else {
+			DEV_DBG("%s: '%d' (unchanged)\n", __func__,
+				external_common_state->format_3d);
+		}
+	} else {
+		DEV_DBG("%s: '%d' (unknown)\n", __func__, format_3d);
+	}
+
+	return ret;
+}
+#endif
+
+static ssize_t external_common_rda_video_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->video_resolution+1);
+	DEV_DBG("%s: '%d'\n", __func__,
+			external_common_state->video_resolution+1);
+	return ret;
+}
+
+static ssize_t external_common_wta_video_mode(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	uint32 video_mode;
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+	const struct hdmi_disp_mode_timing_type *disp_mode;
+#endif
+	mutex_lock(&external_common_state_hpd_mutex);
+	if (!external_common_state->hpd_state) {
+		mutex_unlock(&external_common_state_hpd_mutex);
+		DEV_INFO("%s: FAILED: display off or cable disconnected\n",
+			__func__);
+		return ret;
+	}
+	mutex_unlock(&external_common_state_hpd_mutex);
+
+	video_mode = atoi(buf)-1;
+	kobject_uevent(external_common_state->uevent_kobj, KOBJ_OFFLINE);
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+	disp_mode = hdmi_common_get_supported_mode(video_mode);
+	if (!disp_mode) {
+		DEV_INFO("%s: FAILED: mode not supported (%d)\n",
+			__func__, video_mode);
+		return ret;
+	}
+	external_common_state->disp_mode_list.num_of_elements = 1;
+	external_common_state->disp_mode_list.disp_mode_list[0] = video_mode;
+#elif defined(CONFIG_FB_MSM_TVOUT)
+	external_common_state->video_resolution = video_mode;
+#endif
+	DEV_DBG("%s: 'mode=%d %s' successful (sending OFF/ONLINE)\n", __func__,
+		video_mode, video_format_2string(video_mode));
+	kobject_uevent(external_common_state->uevent_kobj, KOBJ_ONLINE);
+	return ret;
+}
+
+static ssize_t external_common_rda_connected(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	mutex_lock(&external_common_state_hpd_mutex);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		external_common_state->hpd_state);
+	DEV_DBG("%s: '%d'\n", __func__,
+		external_common_state->hpd_state);
+	mutex_unlock(&external_common_state_hpd_mutex);
+	return ret;
+}
+
+static DEVICE_ATTR(video_mode, S_IRUGO | S_IWUGO,
+	external_common_rda_video_mode, external_common_wta_video_mode);
+static DEVICE_ATTR(video_mode_str, S_IRUGO, external_common_rda_video_mode_str,
+	NULL);
+static DEVICE_ATTR(connected, S_IRUGO, external_common_rda_connected, NULL);
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+static DEVICE_ATTR(edid_modes, S_IRUGO, hdmi_common_rda_edid_modes, NULL);
+static DEVICE_ATTR(hpd, S_IRUGO | S_IWUGO, hdmi_common_rda_hpd,
+	hdmi_common_wta_hpd);
+static DEVICE_ATTR(hdcp, S_IRUGO, hdmi_common_rda_hdcp, NULL);
+static DEVICE_ATTR(3d_present, S_IRUGO, hdmi_common_rda_3d_present, NULL);
+static DEVICE_ATTR(hdcp_present, S_IRUGO, hdmi_common_rda_hdcp_present, NULL);
+#endif
+#ifdef CONFIG_FB_MSM_HDMI_3D
+static DEVICE_ATTR(format_3d, S_IRUGO | S_IWUGO, hdmi_3d_rda_format_3d,
+	hdmi_3d_wta_format_3d);
+#endif
+
+static struct attribute *external_common_fs_attrs[] = {
+	&dev_attr_video_mode.attr,
+	&dev_attr_video_mode_str.attr,
+	&dev_attr_connected.attr,
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+	&dev_attr_edid_modes.attr,
+	&dev_attr_hdcp.attr,
+	&dev_attr_hpd.attr,
+	&dev_attr_3d_present.attr,
+	&dev_attr_hdcp_present.attr,
+#endif
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	&dev_attr_format_3d.attr,
+#endif
+	NULL,
+};
+static struct attribute_group external_common_fs_attr_group = {
+	.attrs = external_common_fs_attrs,
+};
+
+/* create external interface kobject and initialize */
+int external_common_state_create(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	if (!mfd) {
+		DEV_ERR("%s: mfd not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!mfd->fbi) {
+		DEV_ERR("%s: mfd->fbi not found\n", __func__);
+		return -ENODEV;
+	}
+	if (!mfd->fbi->dev) {
+		DEV_ERR("%s: mfd->fbi->dev not found\n", __func__);
+		return -ENODEV;
+	}
+	rc = sysfs_create_group(&mfd->fbi->dev->kobj,
+		&external_common_fs_attr_group);
+	if (rc) {
+		DEV_ERR("%s: sysfs group creation failed, rc=%d\n", __func__,
+			rc);
+		return rc;
+	}
+	external_common_state->uevent_kobj = &mfd->fbi->dev->kobj;
+	DEV_ERR("%s: sysfs group %p\n", __func__,
+		external_common_state->uevent_kobj);
+
+	kobject_uevent(external_common_state->uevent_kobj, KOBJ_ADD);
+	DEV_DBG("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
+	return 0;
+}
+EXPORT_SYMBOL(external_common_state_create);
+
+void external_common_state_remove(void)
+{
+	if (external_common_state->uevent_kobj)
+		sysfs_remove_group(external_common_state->uevent_kobj,
+			&external_common_fs_attr_group);
+	external_common_state->uevent_kobj = NULL;
+}
+EXPORT_SYMBOL(external_common_state_remove);
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+/* The Logic ID for HDMI TX Core. Currently only support 1 HDMI TX Core. */
+struct hdmi_edid_video_mode_property_type {
+	uint32	video_code;
+	uint32	active_h;
+	uint32	active_v;
+	boolean	interlaced;
+	uint32	total_h;
+	uint32	total_blank_h;
+	uint32	total_v;
+	uint32	total_blank_v;
+	/* Must divide by 1000 to get the frequency */
+	uint32	freq_h;
+	/* Must divide by 1000 to get the frequency */
+	uint32	freq_v;
+	/* Must divide by 1000 to get the frequency */
+	uint32	pixel_freq;
+	/* Must divide by 1000 to get the frequency */
+	uint32	refresh_rate;
+	boolean	aspect_ratio_4_3;
+};
+
+/* LUT is sorted from lowest Active H to highest Active H - ease searching */
+static struct hdmi_edid_video_mode_property_type
+	hdmi_edid_disp_mode_lut[] = {
+
+	/* All 640 H Active */
+	{HDMI_VFRMT_640x480p60_4_3, 640, 480, FALSE, 800, 160, 525, 45,
+	 31465, 59940, 25175, 59940, TRUE},
+	{HDMI_VFRMT_640x480p60_4_3, 640, 480, FALSE, 800, 160, 525, 45,
+	 31500, 60000, 25200, 60000, TRUE},
+
+	/* All 720 H Active */
+	{HDMI_VFRMT_720x576p50_4_3,  720, 576, FALSE, 864, 144, 625, 49,
+	 31250, 50000, 27000, 50000, TRUE},
+	{HDMI_VFRMT_720x480p60_4_3,  720, 480, FALSE, 858, 138, 525, 45,
+	 31465, 59940, 27000, 59940, TRUE},
+	{HDMI_VFRMT_720x480p60_4_3,  720, 480, FALSE, 858, 138, 525, 45,
+	 31500, 60000, 27030, 60000, TRUE},
+	{HDMI_VFRMT_720x576p100_4_3, 720, 576, FALSE, 864, 144, 625, 49,
+	 62500, 100000, 54000, 100000, TRUE},
+	{HDMI_VFRMT_720x480p120_4_3, 720, 480, FALSE, 858, 138, 525, 45,
+	 62937, 119880, 54000, 119880, TRUE},
+	{HDMI_VFRMT_720x480p120_4_3, 720, 480, FALSE, 858, 138, 525, 45,
+	 63000, 120000, 54054, 120000, TRUE},
+	{HDMI_VFRMT_720x576p200_4_3, 720, 576, FALSE, 864, 144, 625, 49,
+	 125000, 200000, 108000, 200000, TRUE},
+	{HDMI_VFRMT_720x480p240_4_3, 720, 480, FALSE, 858, 138, 525, 45,
+	 125874, 239760, 108000, 239000, TRUE},
+	{HDMI_VFRMT_720x480p240_4_3, 720, 480, FALSE, 858, 138, 525, 45,
+	 126000, 240000, 108108, 240000, TRUE},
+
+	/* All 1280 H Active */
+	{HDMI_VFRMT_1280x720p50_16_9,  1280, 720, FALSE, 1980, 700, 750, 30,
+	 37500, 50000, 74250, 50000, FALSE},
+	{HDMI_VFRMT_1280x720p60_16_9,  1280, 720, FALSE, 1650, 370, 750, 30,
+	 44955, 59940, 74176, 59940, FALSE},
+	{HDMI_VFRMT_1280x720p60_16_9,  1280, 720, FALSE, 1650, 370, 750, 30,
+	 45000, 60000, 74250, 60000, FALSE},
+	{HDMI_VFRMT_1280x720p100_16_9, 1280, 720, FALSE, 1980, 700, 750, 30,
+	 75000, 100000, 148500, 100000, FALSE},
+	{HDMI_VFRMT_1280x720p120_16_9, 1280, 720, FALSE, 1650, 370, 750, 30,
+	 89909, 119880, 148352, 119880, FALSE},
+	{HDMI_VFRMT_1280x720p120_16_9, 1280, 720, FALSE, 1650, 370, 750, 30,
+	 90000, 120000, 148500, 120000, FALSE},
+
+	/* All 1440 H Active */
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 576, TRUE,  1728, 288, 625, 24,
+	 15625, 50000, 27000, 50000, TRUE},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, FALSE, 1728, 288, 312, 24,
+	 15625, 50080, 27000, 50000, TRUE},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, FALSE, 1728, 288, 313, 25,
+	 15625, 49920, 27000, 50000, TRUE},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, FALSE, 1728, 288, 314, 26,
+	 15625, 49761, 27000, 50000, TRUE},
+	{HDMI_VFRMT_1440x576p50_4_3, 1440, 576, FALSE, 1728, 288, 625, 49,
+	 31250, 50000, 54000, 50000, TRUE},
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 15734, 59940, 27000, 59940, TRUE},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, FALSE, 1716, 276, 262, 22,
+	 15734, 60054, 27000, 59940, TRUE},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, FALSE, 1716, 276, 263, 23,
+	 15734, 59826, 27000, 59940, TRUE},
+	{HDMI_VFRMT_1440x480p60_4_3, 1440, 480, FALSE, 1716, 276, 525, 45,
+	 31469, 59940, 54000, 59940, TRUE},
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 15750, 60000, 27027, 60000, TRUE},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, FALSE, 1716, 276, 262, 22,
+	 15750, 60115, 27027, 60000, TRUE},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, FALSE, 1716, 276, 263, 23,
+	 15750, 59886, 27027, 60000, TRUE},
+	{HDMI_VFRMT_1440x480p60_4_3, 1440, 480, FALSE, 1716, 276, 525, 45,
+	 31500, 60000, 54054, 60000, TRUE},
+	{HDMI_VFRMT_1440x576i100_4_3, 1440, 576, TRUE,  1728, 288, 625, 24,
+	 31250, 100000, 54000, 100000, TRUE},
+	{HDMI_VFRMT_1440x480i120_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 31469, 119880, 54000, 119880, TRUE},
+	{HDMI_VFRMT_1440x480i120_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 31500, 120000, 54054, 120000, TRUE},
+	{HDMI_VFRMT_1440x576i200_4_3, 1440, 576, TRUE,  1728, 288, 625, 24,
+	 62500, 200000, 108000, 200000, TRUE},
+	{HDMI_VFRMT_1440x480i240_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 62937, 239760, 108000, 239000, TRUE},
+	{HDMI_VFRMT_1440x480i240_4_3, 1440, 480, TRUE,  1716, 276, 525, 22,
+	 63000, 240000, 108108, 240000, TRUE},
+
+	/* All 1920 H Active */
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 1080, FALSE, 2200, 280, 1125,
+	 45, 67433, 59940, 148352, 59940, FALSE},
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 1080, TRUE,  2200, 280, 1125,
+	 45, 67500, 60000, 148500, 60000, FALSE},
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 1080, FALSE, 2640, 720, 1125,
+	 45, 56250, 50000, 148500, 50000, FALSE},
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 1080, FALSE, 2750, 830, 1125,
+	 45, 26973, 23976, 74176, 24000, FALSE},
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 1080, FALSE, 2750, 830, 1125,
+	 45, 27000, 24000, 74250, 24000, FALSE},
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 1080, FALSE, 2640, 720, 1125,
+	 45, 28125, 25000, 74250, 25000, FALSE},
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 1080, FALSE, 2200, 280, 1125,
+	 45, 33716, 29970, 74176, 30000, FALSE},
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 1080, FALSE, 2200, 280, 1125,
+	 45, 33750, 30000, 74250, 30000, FALSE},
+	{HDMI_VFRMT_1920x1080i50_16_9, 1920, 1080, TRUE,  2304, 384, 1250,
+	 85, 31250, 50000, 72000, 50000, FALSE},
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 1080, TRUE,  2200, 280, 1125,
+	 22, 33716, 59940, 74176, 59940, FALSE},
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 1080, TRUE,  2200, 280, 1125,
+	 22, 33750, 60000, 74250, 60000, FALSE},
+	{HDMI_VFRMT_1920x1080i100_16_9, 1920, 1080, TRUE,  2640, 720, 1125,
+	 22, 56250, 100000, 148500, 100000, FALSE},
+	{HDMI_VFRMT_1920x1080i120_16_9, 1920, 1080, TRUE,  2200, 280, 1125,
+	 22, 67432, 119880, 148352, 119980, FALSE},
+	{HDMI_VFRMT_1920x1080i120_16_9, 1920, 1080, TRUE,  2200, 280, 1125,
+	 22, 67500, 120000, 148500, 120000, FALSE},
+
+	/* All 2880 H Active */
+	{HDMI_VFRMT_2880x576i50_4_3, 2880, 576, TRUE,  3456, 576, 625, 24,
+	 15625, 50000, 54000, 50000, TRUE},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, FALSE, 3456, 576, 312, 24,
+	 15625, 50080, 54000, 50000, TRUE},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, FALSE, 3456, 576, 313, 25,
+	 15625, 49920, 54000, 50000, TRUE},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, FALSE, 3456, 576, 314, 26,
+	 15625, 49761, 54000, 50000, TRUE},
+	{HDMI_VFRMT_2880x576p50_4_3, 2880, 576, FALSE, 3456, 576, 625, 49,
+	 31250, 50000, 108000, 50000, TRUE},
+	{HDMI_VFRMT_2880x480i60_4_3, 2880, 480, TRUE,  3432, 552, 525, 22,
+	 15734, 59940, 54000, 59940, TRUE},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 480, FALSE, 3432, 552, 262, 22,
+	 15734, 60054, 54000, 59940, TRUE},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 480, FALSE, 3432, 552, 263, 23,
+	 15734, 59940, 54000, 59940, TRUE},
+	{HDMI_VFRMT_2880x480p60_4_3, 2880, 480, FALSE, 3432, 552, 525, 45,
+	 31469, 59940, 108000, 59940, TRUE},
+	{HDMI_VFRMT_2880x480i60_4_3, 2880, 480, TRUE,  3432, 552, 525, 22,
+	 15750, 60000, 54054, 60000, TRUE},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 240, FALSE, 3432, 552, 262, 22,
+	 15750, 60115, 54054, 60000, TRUE},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 240, FALSE, 3432, 552, 262, 23,
+	 15750, 59886, 54054, 60000, TRUE},
+	{HDMI_VFRMT_2880x480p60_4_3, 2880, 480, FALSE, 3432, 552, 525, 45,
+	 31500, 60000, 108108, 60000, TRUE},
+};
+
+static const uint8 *hdmi_edid_find_block(const uint8 *in_buf, uint8 type,
+	uint8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	uint32 offset = 4;
+
+	*len = 0;
+	if (in_buf[2] == 4) { /* no non-DTD data present */
+		DEV_WARN("EDID: no non-DTD data present\n");
+		return NULL;
+	}
+	while (offset < 0x80) {
+		uint8 block_len = in_buf[offset] & 0x1F;
+		if ((in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			DEV_DBG("EDID: block=%d found @ %d with length=%d\n",
+				type, offset, block_len);
+			return in_buf+offset;
+		}
+		offset += 1 + block_len;
+	}
+	DEV_WARN("EDID: block=%d not found in EDID block\n", type);
+	return NULL;
+}
+
+static void hdmi_edid_extract_vendor_id(const uint8 *in_buf,
+	char *vendor_id)
+{
+	uint32 id_codes = ((uint32)in_buf[8] << 8) + in_buf[9];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+}
+
+static uint32 hdmi_edid_extract_ieee_reg_id(const uint8 *in_buf)
+{
+	uint8 len;
+	const uint8 *vsd = hdmi_edid_find_block(in_buf, 3, &len);
+
+	if (vsd == NULL)
+		return 0;
+
+	DEV_DBG("EDID: VSD PhyAddr=%04x, MaxTMDS=%dMHz\n",
+		((uint32)vsd[6] << 8) + (uint32)vsd[5], (uint32)vsd[7] * 5);
+	return ((uint32)vsd[3] << 16) + ((uint32)vsd[2] << 8) + (uint32)vsd[1];
+}
+
+static void hdmi_edid_extract_3d_present(const uint8 *in_buf)
+{
+	uint8 len, offset;
+	const uint8 *vsd = hdmi_edid_find_block(in_buf, 3, &len);
+
+	external_common_state->present_3d = 0;
+	if (vsd == NULL || len < 9) {
+		DEV_DBG("EDID[3D]: block-id 3 not found or not long enough\n");
+		return;
+	}
+
+	offset = !(vsd[8] & BIT(7)) ? 9 : 13;
+	DEV_DBG("EDID: 3D present @ %d = %02x\n", offset, vsd[offset]);
+	if (vsd[offset] >> 7) { /* 3D format indication present */
+		DEV_INFO("EDID: 3D present, 3D-len=%d\n", vsd[offset+1] & 0x1F);
+		external_common_state->present_3d = 1;
+	}
+}
+
+
+static void hdmi_edid_extract_latency_fields(const uint8 *in_buf)
+{
+	uint8 len;
+	const uint8 *vsd = hdmi_edid_find_block(in_buf, 3, &len);
+
+	if (vsd == NULL || len < 12 || !(vsd[8] & BIT(7))) {
+		external_common_state->video_latency = (uint16)-1;
+		external_common_state->audio_latency = (uint16)-1;
+		DEV_DBG("EDID: No audio/video latency present\n");
+	} else {
+		external_common_state->video_latency = vsd[9];
+		external_common_state->audio_latency = vsd[10];
+		DEV_DBG("EDID: video-latency=%04x, audio-latency=%04x\n",
+			external_common_state->video_latency,
+			external_common_state->audio_latency);
+	}
+}
+
+static void hdmi_edid_extract_speaker_allocation_data(const uint8 *in_buf)
+{
+	uint8 len;
+	const uint8 *sad = hdmi_edid_find_block(in_buf, 4, &len);
+
+	if (sad == NULL)
+		return;
+
+	external_common_state->speaker_allocation_block = sad[1];
+	DEV_DBG("EDID: speaker allocation data=%s%s%s%s%s%s%s\n",
+		(sad[1] & BIT(0)) ? "FL/FR," : "",
+		(sad[1] & BIT(1)) ? "LFE," : "",
+		(sad[1] & BIT(2)) ? "FC," : "",
+		(sad[1] & BIT(3)) ? "RL/RR," : "",
+		(sad[1] & BIT(4)) ? "RC," : "",
+		(sad[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sad[1] & BIT(6)) ? "LFE," : "");
+}
+
+static void hdmi_edid_extract_audio_data_blocks(const uint8 *in_buf)
+{
+	uint8 len;
+	const uint8 *sad = hdmi_edid_find_block(in_buf, 1, &len);
+	uint32 *adb = external_common_state->audio_data_blocks;
+
+	if (sad == NULL)
+		return;
+
+	external_common_state->audio_data_block_cnt = 0;
+	while (len >= 3 && external_common_state->audio_data_block_cnt < 16) {
+		DEV_DBG("EDID: Audio Data Block=<ch=%d, format=%d "
+			"sampling=0x%02x bit-depth=0x%02x>\n",
+			(sad[1] & 0x7)+1, sad[1] >> 3, sad[2], sad[3]);
+		*adb++ = (uint32)sad[1] + ((uint32)sad[2] << 8)
+			+ ((uint32)sad[2] << 16);
+		++external_common_state->audio_data_block_cnt;
+		len -= 3;
+		sad += 3;
+	}
+}
+
+
+static void hdmi_edid_detail_desc(const uint8 *data_buf, uint32 *disp_mode)
+{
+	boolean	aspect_ratio_4_3    = FALSE;
+	boolean	interlaced          = FALSE;
+	uint32	active_h            = 0;
+	uint32	active_v            = 0;
+	uint32	blank_h             = 0;
+	uint32	blank_v             = 0;
+	uint32	ndx                 = 0;
+	uint32	max_num_of_elements = 0;
+	uint32	img_size_h          = 0;
+	uint32	img_size_v          = 0;
+
+	/* See VESA Spec */
+	/* EDID_TIMING_DESC_UPPER_H_NIBBLE[0x4]: Relative Offset to the EDID
+	 *   detailed timing descriptors - Upper 4 bit for each H active/blank
+	 *   field */
+	/* EDID_TIMING_DESC_H_ACTIVE[0x2]: Relative Offset to the EDID detailed
+	 *   timing descriptors - H active */
+	active_h = ((((uint32)data_buf[0x4] >> 0x4) & 0xF) << 8)
+		| data_buf[0x2];
+
+	/* EDID_TIMING_DESC_H_BLANK[0x3]: Relative Offset to the EDID detailed
+	 *   timing descriptors - H blank */
+	blank_h = (((uint32)data_buf[0x4] & 0xF) << 8)
+		| data_buf[0x3];
+
+	/* EDID_TIMING_DESC_UPPER_V_NIBBLE[0x7]: Relative Offset to the EDID
+	 *   detailed timing descriptors - Upper 4 bit for each V active/blank
+	 *   field */
+	/* EDID_TIMING_DESC_V_ACTIVE[0x5]: Relative Offset to the EDID detailed
+	 *   timing descriptors - V active */
+	active_v = ((((uint32)data_buf[0x7] >> 0x4) & 0xF) << 8)
+		| data_buf[0x5];
+
+	/* EDID_TIMING_DESC_V_BLANK[0x6]: Relative Offset to the EDID detailed
+	 *   timing descriptors - V blank */
+	blank_v = (((uint32)data_buf[0x7] & 0xF) << 8)
+		| data_buf[0x6];
+
+	/* EDID_TIMING_DESC_IMAGE_SIZE_UPPER_NIBBLE[0xE]: Relative Offset to the
+	 *   EDID detailed timing descriptors - Image Size upper nibble
+	 *   V and H */
+	/* EDID_TIMING_DESC_H_IMAGE_SIZE[0xC]: Relative Offset to the EDID
+	 *   detailed timing descriptors - H image size */
+	/* EDID_TIMING_DESC_V_IMAGE_SIZE[0xD]: Relative Offset to the EDID
+	 *   detailed timing descriptors - V image size */
+	img_size_h = ((((uint32)data_buf[0xE] >> 0x4) & 0xF) << 8)
+		| data_buf[0xC];
+	img_size_v = (((uint32)data_buf[0xE] & 0xF) << 8)
+		| data_buf[0xD];
+
+	aspect_ratio_4_3 = (img_size_h * 3 == img_size_v * 4);
+
+	max_num_of_elements  = sizeof(hdmi_edid_disp_mode_lut)
+		/ sizeof(*hdmi_edid_disp_mode_lut);
+
+	/* Break table in half and search using H Active */
+	ndx = active_h < hdmi_edid_disp_mode_lut[max_num_of_elements / 2]
+		.active_h ? 0 : max_num_of_elements / 2;
+
+	/* EDID_TIMING_DESC_INTERLACE[0xD:8]: Relative Offset to the EDID
+	 *   detailed timing descriptors - Interlace flag */
+	interlaced = (data_buf[0xD] & 0x80) >> 7;
+
+	DEV_DBG("%s: A[%ux%u] B[%ux%u] V[%ux%u] %s\n", __func__,
+		active_h, active_v, blank_h, blank_v, img_size_h, img_size_v,
+		interlaced ? "i" : "p");
+
+	*disp_mode = HDMI_VFRMT_FORCE_32BIT;
+	while (ndx < max_num_of_elements) {
+		const struct hdmi_edid_video_mode_property_type *edid =
+			hdmi_edid_disp_mode_lut+ndx;
+
+		if ((interlaced    == edid->interlaced)    &&
+			(active_h  == edid->active_h)      &&
+			(blank_h   == edid->total_blank_h) &&
+			(blank_v   == edid->total_blank_v) &&
+			((active_v == edid->active_v) ||
+			 (active_v == (edid->active_v + 1)))
+		) {
+			if (edid->aspect_ratio_4_3 && !aspect_ratio_4_3)
+				/* Aspect ratio 16:9 */
+				*disp_mode = edid->video_code + 1;
+			else
+				/* Aspect ratio 4:3 */
+				*disp_mode = edid->video_code;
+
+			DEV_DBG("%s: mode found:%d\n", __func__, *disp_mode);
+			break;
+		}
+		++ndx;
+	}
+	if (ndx == max_num_of_elements)
+		DEV_INFO("%s: *no mode* found\n", __func__);
+}
+
+static void add_supported_video_format(
+	struct hdmi_disp_mode_list_type *disp_mode_list,
+	uint32 video_format)
+{
+	const struct hdmi_disp_mode_timing_type *timing =
+		hdmi_common_get_supported_mode(video_format);
+	boolean supported = timing != NULL;
+
+	if (video_format >= HDMI_VFRMT_MAX)
+		return;
+
+	DEV_DBG("EDID: format: %d [%s], %s\n",
+		video_format, video_format_2string(video_format),
+		supported ? "Supported" : "Not-Supported");
+	if (supported)
+		disp_mode_list->disp_mode_list[
+			disp_mode_list->num_of_elements++] = video_format;
+}
+
+static void hdmi_edid_get_display_mode(const uint8 *data_buf,
+	struct hdmi_disp_mode_list_type *disp_mode_list,
+	uint32 num_og_cea_blocks)
+{
+	uint8 i			= 0;
+	uint32 video_format	= HDMI_VFRMT_640x480p60_4_3;
+	boolean has480p		= FALSE;
+	uint8 len;
+	const uint8 *svd = num_og_cea_blocks ?
+		hdmi_edid_find_block(data_buf+0x80, 2, &len) : NULL;
+
+	disp_mode_list->num_of_elements = 0;
+	if (svd != NULL) {
+		++svd;
+		for (i = 0; i < len; ++i, ++svd) {
+			/* Subtract 1 because it is zero based in the driver,
+			 * while the Video identification code is 1 based in the
+			 * CEA_861D spec */
+			video_format = (*svd & 0x7F) - 1;
+			add_supported_video_format(disp_mode_list,
+				video_format);
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = TRUE;
+		}
+	} else if (!num_og_cea_blocks) {
+		/* Detailed timing descriptors */
+		uint32 desc_offset = 0;
+		/* Maximum 4 timing descriptor in block 0 - No CEA
+		 * extension in this case */
+		/* EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
+		 *   descriptor */
+		/* EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed timing
+		 *   descriptor has block size of 18 */
+		while (4 > i && 0 != data_buf[0x36+desc_offset]) {
+			hdmi_edid_detail_desc(data_buf+0x36+desc_offset,
+				&video_format);
+			add_supported_video_format(disp_mode_list,
+				video_format);
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = TRUE;
+			desc_offset += 0x12;
+			++i;
+		}
+	} else if (1 == num_og_cea_blocks) {
+		uint32 desc_offset = 0;
+		/* Parse block 1 - CEA extension byte offset of first
+		 * detailed timing generation - offset is relevant to
+		 * the offset of block 1 */
+
+		/* EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+		 * extension first timing desc - indicate the offset of
+		 * the first detailed timing descriptor */
+		 /* EDID_BLOCK_SIZE = 0x80  Each page size in the EDID ROM */
+		desc_offset = data_buf[0x82];
+		while (0 != data_buf[0x80 + desc_offset]) {
+			hdmi_edid_detail_desc(data_buf+0x36+desc_offset,
+				&video_format);
+			add_supported_video_format(disp_mode_list,
+				video_format);
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = TRUE;
+			desc_offset += 0x12;
+			++i;
+		}
+	}
+
+	if (!has480p)
+		/* Need to add default 640 by 480 timings, in case not described
+		 * in the EDID structure.
+		 * All DTV sink devices should support this mode */
+		add_supported_video_format(disp_mode_list,
+			HDMI_VFRMT_640x480p60_4_3);
+}
+
+static int hdmi_common_read_edid_block(int block, uint8 *edid_buf)
+{
+	uint32 ndx, check_sum;
+	int status = external_common_state->read_edid_block(block, edid_buf);
+	if (status || block > 0)
+		goto error;
+
+	/* Calculate checksum */
+	check_sum = 0;
+	for (ndx = 0; ndx < 0x80; ++ndx)
+		check_sum += edid_buf[ndx];
+
+	if (check_sum & 0xFF) {
+#ifdef DEBUG
+		const u8 *b = edid_buf;
+#endif
+		DEV_ERR("%s: failed CHECKSUM (read:%x, expected:%x)\n",
+			__func__, (uint8)edid_buf[0x7F], (uint8)check_sum);
+
+#ifdef DEBUG
+		for (ndx = 0; ndx < 0x100; ndx += 16)
+			DEV_DBG("EDID[%02x-%02x] %02x %02x %02x %02x  "
+				"%02x %02x %02x %02x    %02x %02x %02x %02x  "
+				"%02x %02x %02x %02x\n", ndx, ndx+15,
+				b[ndx+0], b[ndx+1], b[ndx+2], b[ndx+3],
+				b[ndx+4], b[ndx+5], b[ndx+6], b[ndx+7],
+				b[ndx+8], b[ndx+9], b[ndx+10], b[ndx+11],
+				b[ndx+12], b[ndx+13], b[ndx+14], b[ndx+15]);
+#endif
+		status = -EPROTO;
+		goto error;
+	}
+
+error:
+	return status;
+}
+
+static boolean check_edid_header(const uint8 *edid_buf)
+{
+	return (edid_buf[0] == 0x00) && (edid_buf[1] == 0xff)
+		&& (edid_buf[2] == 0xff) && (edid_buf[3] == 0xff)
+		&& (edid_buf[4] == 0xff) && (edid_buf[5] == 0xff)
+		&& (edid_buf[6] == 0xff) && (edid_buf[7] == 0x00);
+}
+
+int hdmi_common_read_edid(void)
+{
+	int status = 0;
+	uint32 cea_extension_ver = 0;
+	uint32 num_og_cea_blocks  = 0;
+	uint32 ieee_reg_id = 0;
+	uint32 i = 1;
+	char vendor_id[5];
+	/* EDID_BLOCK_SIZE[0x80] Each page size in the EDID ROM */
+	uint8 edid_buf[0x80 * 4];
+
+	external_common_state->present_3d = 0;
+	memset(&external_common_state->disp_mode_list, 0,
+		sizeof(external_common_state->disp_mode_list));
+	memset(edid_buf, 0, sizeof(edid_buf));
+
+	status = hdmi_common_read_edid_block(0, edid_buf);
+	if (status || !check_edid_header(edid_buf)) {
+		if (!status)
+			status = -EPROTO;
+		DEV_ERR("%s: edid read block(0) failed: %d "
+			"[%02x%02x%02x%02x%02x%02x%02x%02x]\n", __func__,
+			status,
+			edid_buf[0], edid_buf[1], edid_buf[2], edid_buf[3],
+			edid_buf[4], edid_buf[5], edid_buf[6], edid_buf[7]);
+		goto error;
+	}
+	hdmi_edid_extract_vendor_id(edid_buf, vendor_id);
+
+	/* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */
+	num_og_cea_blocks = edid_buf[0x7E];
+
+	DEV_DBG("[JSR] (%s): No. of CEA blocks is  [%u]\n", __func__,
+		num_og_cea_blocks);
+	/* Find out any CEA extension blocks following block 0 */
+	switch (num_og_cea_blocks) {
+	case 0: /* No CEA extension */
+		external_common_state->hdmi_sink = false;
+		DEV_DBG("HDMI DVI mode: %s\n",
+			external_common_state->hdmi_sink ? "no" : "yes");
+		break;
+	case 1: /* Read block 1 */
+		status = hdmi_common_read_edid_block(1, edid_buf+0x80);
+		if (status) {
+			DEV_ERR("%s: ddc read block(1) failed: %d\n", __func__,
+				status);
+			goto error;
+		}
+		if (edid_buf[0x80] != 2)
+			num_og_cea_blocks = 0;
+		if (num_og_cea_blocks) {
+			ieee_reg_id =
+				hdmi_edid_extract_ieee_reg_id(edid_buf+0x80);
+			if (ieee_reg_id == 0x0c03)
+				external_common_state->hdmi_sink = TRUE ;
+			else
+				external_common_state->hdmi_sink = FALSE ;
+			hdmi_edid_extract_latency_fields(edid_buf+0x80);
+			hdmi_edid_extract_speaker_allocation_data(
+				edid_buf+0x80);
+			hdmi_edid_extract_audio_data_blocks(edid_buf+0x80);
+			hdmi_edid_extract_3d_present(edid_buf+0x80);
+		}
+		break;
+	case 2:
+	case 3:
+	case 4:
+		for (i = 1; i <= num_og_cea_blocks; i++) {
+			if (!(i % 2)) {
+					status = hdmi_common_read_edid_block(i,
+								edid_buf+0x00);
+					if (status) {
+						DEV_ERR("%s: ddc read block(%d)"
+						"failed: %d\n", __func__, i,
+							status);
+						goto error;
+					}
+			} else {
+				status = hdmi_common_read_edid_block(i,
+							edid_buf+0x80);
+				if (status) {
+					DEV_ERR("%s: ddc read block(%d)"
+					"failed:%d\n", __func__, i,
+						status);
+					goto error;
+				}
+			}
+		}
+		break;
+	default:
+		DEV_ERR("%s: ddc read failed, not supported multi-blocks: %d\n",
+			__func__, num_og_cea_blocks);
+		status = -EPROTO;
+		goto error;
+	}
+
+	if (num_og_cea_blocks) {
+		/* EDID_CEA_EXTENSION_VERSION[0x81]: Offset to CEA extension
+		 * version number - v1,v2,v3 (v1 is seldom, v2 is obsolete,
+		 * v3 most common) */
+		cea_extension_ver = edid_buf[0x81];
+	}
+
+	/* EDID_VERSION[0x12] - EDID Version */
+	/* EDID_REVISION[0x13] - EDID Revision */
+	DEV_INFO("EDID (V=%d.%d, #CEABlocks=%d[V%d], ID=%s, IEEE=%04x, "
+		"EDID-Ext=0x%02x)\n", edid_buf[0x12], edid_buf[0x13],
+		num_og_cea_blocks, cea_extension_ver, vendor_id, ieee_reg_id,
+		edid_buf[0x80]);
+
+	hdmi_edid_get_display_mode(edid_buf,
+		&external_common_state->disp_mode_list, num_og_cea_blocks);
+
+	return 0;
+
+error:
+	external_common_state->disp_mode_list.num_of_elements = 1;
+	external_common_state->disp_mode_list.disp_mode_list[0] =
+		external_common_state->video_resolution;
+	return status;
+}
+EXPORT_SYMBOL(hdmi_common_read_edid);
+
+bool hdmi_common_get_video_format_from_drv_data(struct msm_fb_data_type *mfd)
+{
+	uint32 format;
+	struct fb_var_screeninfo *var = &mfd->fbi->var;
+	bool changed = TRUE;
+
+	if (var->reserved[3]) {
+		format = var->reserved[3]-1;
+	} else {
+		DEV_DBG("detecting resolution from %dx%d use var->reserved[3]"
+			" to specify mode", mfd->var_xres, mfd->var_yres);
+		switch (mfd->var_xres) {
+		default:
+		case  640:
+			format = HDMI_VFRMT_640x480p60_4_3;
+			break;
+		case  720:
+			format = (mfd->var_yres == 480)
+				? HDMI_VFRMT_720x480p60_16_9
+				: HDMI_VFRMT_720x576p50_16_9;
+			break;
+		case 1280:
+			format = HDMI_VFRMT_1280x720p60_16_9;
+			break;
+		case 1440:
+			format = (mfd->var_yres == 480)
+				? HDMI_VFRMT_1440x480i60_16_9
+				: HDMI_VFRMT_1440x576i50_16_9;
+			break;
+		case 1920:
+			format = HDMI_VFRMT_1920x1080p60_16_9;
+			break;
+		}
+	}
+
+	changed = external_common_state->video_resolution != format;
+	if (external_common_state->video_resolution != format)
+		DEV_DBG("switching %s => %s", video_format_2string(
+			external_common_state->video_resolution),
+			video_format_2string(format));
+	else
+		DEV_DBG("resolution %s", video_format_2string(
+			external_common_state->video_resolution));
+	external_common_state->video_resolution = format;
+	return changed;
+}
+EXPORT_SYMBOL(hdmi_common_get_video_format_from_drv_data);
+
+const struct hdmi_disp_mode_timing_type *hdmi_common_get_mode(uint32 mode)
+{
+	if (mode >= HDMI_VFRMT_MAX)
+		return NULL;
+
+	return &hdmi_common_supported_video_mode_lut[mode];
+}
+EXPORT_SYMBOL(hdmi_common_get_mode);
+
+const struct hdmi_disp_mode_timing_type *hdmi_common_get_supported_mode(
+	uint32 mode)
+{
+	const struct hdmi_disp_mode_timing_type *ret
+		= hdmi_common_get_mode(mode);
+
+	if (ret == NULL || !ret->supported)
+		return NULL;
+	return ret;
+}
+EXPORT_SYMBOL(hdmi_common_get_supported_mode);
+
+void hdmi_common_init_panel_info(struct msm_panel_info *pinfo)
+{
+	const struct hdmi_disp_mode_timing_type *timing =
+		hdmi_common_get_supported_mode(
+		external_common_state->video_resolution);
+
+	if (timing == NULL)
+		return;
+
+	pinfo->xres = timing->active_h;
+	pinfo->yres = timing->active_v;
+	pinfo->clk_rate = timing->pixel_freq*1000;
+
+	pinfo->lcdc.h_back_porch = timing->back_porch_h;
+	pinfo->lcdc.h_front_porch = timing->front_porch_h;
+	pinfo->lcdc.h_pulse_width = timing->pulse_width_h;
+	pinfo->lcdc.v_back_porch = timing->back_porch_v;
+	pinfo->lcdc.v_front_porch = timing->front_porch_v;
+	pinfo->lcdc.v_pulse_width = timing->pulse_width_v;
+
+	pinfo->type = DTV_PANEL;
+	pinfo->pdest = DISPLAY_2;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 1;
+
+	/* blk */
+	pinfo->lcdc.border_clr = 0;
+	/* blue */
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+}
+EXPORT_SYMBOL(hdmi_common_init_panel_info);
+#endif
diff --git a/drivers/video/msm/external_common.h b/drivers/video/msm/external_common.h
new file mode 100644
index 0000000..30a8f48
--- /dev/null
+++ b/drivers/video/msm/external_common.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __EXTERNAL_COMMON_H__
+#define __EXTERNAL_COMMON_H__
+
+#ifdef DEBUG
+#ifndef DEV_DBG_PREFIX
+#define DEV_DBG_PREFIX "EXT_INTERFACE: "
+#endif
+#define DEV_DBG(args...)	pr_debug(DEV_DBG_PREFIX args)
+#else
+#define DEV_DBG(args...)	(void)0
+#endif /* DEBUG */
+#define DEV_INFO(args...)	dev_info(external_common_state->dev, args)
+#define DEV_WARN(args...)	dev_warn(external_common_state->dev, args)
+#define DEV_ERR(args...)	dev_err(external_common_state->dev, args)
+
+#ifdef CONFIG_FB_MSM_TVOUT
+#define TVOUT_VFRMT_NTSC_M_720x480i		0
+#define TVOUT_VFRMT_NTSC_J_720x480i		1
+#define TVOUT_VFRMT_PAL_BDGHIN_720x576i		2
+#define TVOUT_VFRMT_PAL_M_720x480i		3
+#define TVOUT_VFRMT_PAL_N_720x480i		4
+#elif defined(CONFIG_FB_MSM_HDMI_COMMON)
+/* all video formats defined by EIA CEA 861D */
+#define HDMI_VFRMT_640x480p60_4_3	0
+#define HDMI_VFRMT_720x480p60_4_3	1
+#define HDMI_VFRMT_720x480p60_16_9	2
+#define HDMI_VFRMT_1280x720p60_16_9	3
+#define HDMI_VFRMT_1920x1080i60_16_9	4
+#define HDMI_VFRMT_720x480i60_4_3	5
+#define HDMI_VFRMT_1440x480i60_4_3	HDMI_VFRMT_720x480i60_4_3
+#define HDMI_VFRMT_720x480i60_16_9	6
+#define HDMI_VFRMT_1440x480i60_16_9	HDMI_VFRMT_720x480i60_16_9
+#define HDMI_VFRMT_720x240p60_4_3	7
+#define HDMI_VFRMT_1440x240p60_4_3	HDMI_VFRMT_720x240p60_4_3
+#define HDMI_VFRMT_720x240p60_16_9	8
+#define HDMI_VFRMT_1440x240p60_16_9	HDMI_VFRMT_720x240p60_16_9
+#define HDMI_VFRMT_2880x480i60_4_3	9
+#define HDMI_VFRMT_2880x480i60_16_9	10
+#define HDMI_VFRMT_2880x240p60_4_3	11
+#define HDMI_VFRMT_2880x240p60_16_9	12
+#define HDMI_VFRMT_1440x480p60_4_3	13
+#define HDMI_VFRMT_1440x480p60_16_9	14
+#define HDMI_VFRMT_1920x1080p60_16_9	15
+#define HDMI_VFRMT_720x576p50_4_3	16
+#define HDMI_VFRMT_720x576p50_16_9	17
+#define HDMI_VFRMT_1280x720p50_16_9	18
+#define HDMI_VFRMT_1920x1080i50_16_9	19
+#define HDMI_VFRMT_720x576i50_4_3	20
+#define HDMI_VFRMT_1440x576i50_4_3	HDMI_VFRMT_720x576i50_4_3
+#define HDMI_VFRMT_720x576i50_16_9	21
+#define HDMI_VFRMT_1440x576i50_16_9	HDMI_VFRMT_720x576i50_16_9
+#define HDMI_VFRMT_720x288p50_4_3	22
+#define HDMI_VFRMT_1440x288p50_4_3	HDMI_VFRMT_720x288p50_4_3
+#define HDMI_VFRMT_720x288p50_16_9	23
+#define HDMI_VFRMT_1440x288p50_16_9	HDMI_VFRMT_720x288p50_16_9
+#define HDMI_VFRMT_2880x576i50_4_3	24
+#define HDMI_VFRMT_2880x576i50_16_9	25
+#define HDMI_VFRMT_2880x288p50_4_3	26
+#define HDMI_VFRMT_2880x288p50_16_9	27
+#define HDMI_VFRMT_1440x576p50_4_3	28
+#define HDMI_VFRMT_1440x576p50_16_9	29
+#define HDMI_VFRMT_1920x1080p50_16_9	30
+#define HDMI_VFRMT_1920x1080p24_16_9	31
+#define HDMI_VFRMT_1920x1080p25_16_9	32
+#define HDMI_VFRMT_1920x1080p30_16_9	33
+#define HDMI_VFRMT_2880x480p60_4_3	34
+#define HDMI_VFRMT_2880x480p60_16_9	35
+#define HDMI_VFRMT_2880x576p50_4_3	36
+#define HDMI_VFRMT_2880x576p50_16_9	37
+#define HDMI_VFRMT_1920x1250i50_16_9	38
+#define HDMI_VFRMT_1920x1080i100_16_9	39
+#define HDMI_VFRMT_1280x720p100_16_9	40
+#define HDMI_VFRMT_720x576p100_4_3	41
+#define HDMI_VFRMT_720x576p100_16_9	42
+#define HDMI_VFRMT_720x576i100_4_3	43
+#define HDMI_VFRMT_1440x576i100_4_3	HDMI_VFRMT_720x576i100_4_3
+#define HDMI_VFRMT_720x576i100_16_9	44
+#define HDMI_VFRMT_1440x576i100_16_9	HDMI_VFRMT_720x576i100_16_9
+#define HDMI_VFRMT_1920x1080i120_16_9	45
+#define HDMI_VFRMT_1280x720p120_16_9	46
+#define HDMI_VFRMT_720x480p120_4_3	47
+#define HDMI_VFRMT_720x480p120_16_9	48
+#define HDMI_VFRMT_720x480i120_4_3	49
+#define HDMI_VFRMT_1440x480i120_4_3	HDMI_VFRMT_720x480i120_4_3
+#define HDMI_VFRMT_720x480i120_16_9	50
+#define HDMI_VFRMT_1440x480i120_16_9	HDMI_VFRMT_720x480i120_16_9
+#define HDMI_VFRMT_720x576p200_4_3	51
+#define HDMI_VFRMT_720x576p200_16_9	52
+#define HDMI_VFRMT_720x576i200_4_3	53
+#define HDMI_VFRMT_1440x576i200_4_3	HDMI_VFRMT_720x576i200_4_3
+#define HDMI_VFRMT_720x576i200_16_9	54
+#define HDMI_VFRMT_1440x576i200_16_9	HDMI_VFRMT_720x576i200_16_9
+#define HDMI_VFRMT_720x480p240_4_3	55
+#define HDMI_VFRMT_720x480p240_16_9	56
+#define HDMI_VFRMT_720x480i240_4_3	57
+#define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
+#define HDMI_VFRMT_720x480i240_16_9	58
+#define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
+#define HDMI_VFRMT_MAX			59
+#define HDMI_VFRMT_FORCE_32BIT		0x7FFFFFFF
+
+struct hdmi_disp_mode_timing_type {
+	uint32	video_format;
+	uint32	active_h;
+	uint32	front_porch_h;
+	uint32	pulse_width_h;
+	uint32	back_porch_h;
+	boolean	active_low_h;
+	uint32	active_v;
+	uint32	front_porch_v;
+	uint32	pulse_width_v;
+	uint32	back_porch_v;
+	boolean	active_low_v;
+	/* Must divide by 1000 to get the actual frequency in MHZ */
+	uint32	pixel_freq;
+	/* Must divide by 1000 to get the actual frequency in HZ */
+	uint32	refresh_rate;
+	boolean	interlaced;
+	boolean	supported;
+};
+
+#define HDMI_SETTINGS_640x480p60_4_3					\
+	{HDMI_VFRMT_640x480p60_4_3,      640,  16,  96,  48,  TRUE,	\
+	 480, 10, 2, 33, TRUE, 25200, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_720x480p60_4_3					\
+	{HDMI_VFRMT_720x480p60_4_3,      720,  16,  62,  60,  TRUE,	\
+	 480, 9, 6, 30,  TRUE, 27030, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_720x480p60_16_9					\
+	{HDMI_VFRMT_720x480p60_16_9,     720,  16,  62,  60,  TRUE,	\
+	 480, 9, 6, 30,  TRUE, 27030, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_1280x720p60_16_9					\
+	{HDMI_VFRMT_1280x720p60_16_9,    1280, 110, 40,  220, FALSE,	\
+	 720, 5, 5, 20, FALSE, 74250, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_1920x1080i60_16_9					\
+	{HDMI_VFRMT_1920x1080i60_16_9,   1920, 88,  44,  148, FALSE,	\
+	 540, 2, 5, 5, FALSE, 74250, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_1440x480i60_4_3					\
+	{HDMI_VFRMT_1440x480i60_4_3,     1440, 38,  124, 114, TRUE,	\
+	 240, 4, 3, 15, TRUE, 27000, 60000, TRUE, TRUE}
+#define HDMI_SETTINGS_1440x480i60_16_9					\
+	{HDMI_VFRMT_1440x480i60_16_9,    1440, 38,  124, 114, TRUE,	\
+	 240, 4, 3, 15, TRUE, 27000, 60000, TRUE, TRUE}
+#define HDMI_SETTINGS_1920x1080p60_16_9					\
+	{HDMI_VFRMT_1920x1080p60_16_9,   1920, 88,  44,  148,  FALSE,	\
+	 1080, 4, 5, 36, FALSE, 148500, 60000, FALSE, TRUE}
+#define HDMI_SETTINGS_720x576p50_4_3					\
+	{HDMI_VFRMT_720x576p50_4_3,      720,  12,  64,  68,   TRUE,	\
+	 576,  5, 5, 39, TRUE, 27000, 50000, FALSE, TRUE}
+#define HDMI_SETTINGS_720x576p50_16_9					\
+	{HDMI_VFRMT_720x576p50_16_9,     720,  12,  64,  68,   TRUE,	\
+	 576,  5, 5, 39, TRUE, 27000, 50000, FALSE, TRUE}
+#define HDMI_SETTINGS_1280x720p50_16_9					\
+	{HDMI_VFRMT_1280x720p50_16_9,    1280, 440, 40,  220,  FALSE,	\
+	 720,  5, 5, 20, FALSE, 74250, 50000, FALSE, TRUE}
+#define HDMI_SETTINGS_1440x576i50_4_3					\
+	{HDMI_VFRMT_1440x576i50_4_3,     1440, 24,  126, 138,  TRUE,	\
+	 288,  2, 3, 19, TRUE, 27000, 50000, TRUE, TRUE}
+#define HDMI_SETTINGS_1440x576i50_16_9					\
+	{HDMI_VFRMT_1440x576i50_16_9,    1440, 24,  126, 138,  TRUE,	\
+	 288,  2, 3, 19, TRUE, 27000, 50000, TRUE, TRUE}
+#define HDMI_SETTINGS_1920x1080p50_16_9					\
+	{HDMI_VFRMT_1920x1080p50_16_9,   1920,  528,  44,  148,  FALSE,	\
+	 1080, 4, 5, 36, FALSE, 148500, 50000, FALSE, TRUE}
+#define HDMI_SETTINGS_1920x1080p24_16_9					\
+	{HDMI_VFRMT_1920x1080p24_16_9,   1920,  638,  44,  148,  FALSE,	\
+	 1080, 4, 5, 36, FALSE, 74250, 24000, FALSE, TRUE}
+#define HDMI_SETTINGS_1920x1080p25_16_9					\
+	{HDMI_VFRMT_1920x1080p25_16_9,   1920,  528,  44,  148,  FALSE,	\
+	 1080, 4, 5, 36, FALSE, 74250, 25000, FALSE, TRUE}
+#define HDMI_SETTINGS_1920x1080p30_16_9					\
+	{HDMI_VFRMT_1920x1080p30_16_9,   1920,  88,   44,  148,  FALSE,	\
+	 1080, 4, 5, 36, FALSE, 74250, 30000, FALSE, TRUE}
+
+/* A lookup table for all the supported display modes by the HDMI
+ * hardware and driver.  Use HDMI_SETUP_LUT in the module init to
+ * setup the LUT with the supported modes. */
+extern struct hdmi_disp_mode_timing_type
+	hdmi_common_supported_video_mode_lut[HDMI_VFRMT_MAX];
+
+/* Structure that encapsulates all the supported display modes by the HDMI sink
+ * device */
+struct hdmi_disp_mode_list_type {
+	uint32	disp_mode_list[HDMI_VFRMT_MAX];
+	uint32	num_of_elements;
+};
+#endif
+
+struct external_common_state_type {
+	boolean hpd_state;
+	struct kobject *uevent_kobj;
+	uint32 video_resolution;
+	struct device *dev;
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	boolean format_3d;
+	void (*switch_3d)(boolean on);
+#endif
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+	boolean hdcp_active;
+	boolean hpd_feature_on;
+	boolean hdmi_sink;
+	struct hdmi_disp_mode_list_type disp_mode_list;
+	uint8 speaker_allocation_block;
+	uint16 video_latency, audio_latency;
+	uint8 audio_data_block_cnt;
+	boolean present_3d;
+	boolean present_hdcp;
+	uint32 audio_data_blocks[16];
+	int (*read_edid_block)(int block, uint8 *edid_buf);
+	int (*hpd_feature)(int on);
+#endif
+};
+
+/* The external interface driver needs to initialize the common state. */
+extern struct external_common_state_type *external_common_state;
+extern struct mutex external_common_state_hpd_mutex;
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+#define VFRMT_NOT_SUPPORTED(VFRMT) \
+	{VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FALSE}
+#define HDMI_SETUP_LUT(MODE) do {					\
+		struct hdmi_disp_mode_timing_type mode			\
+			= HDMI_SETTINGS_ ## MODE;			\
+		hdmi_common_supported_video_mode_lut[mode.video_format]	\
+			= mode;						\
+	} while (0)
+
+int hdmi_common_read_edid(void);
+const char *video_format_2string(uint32 format);
+bool hdmi_common_get_video_format_from_drv_data(struct msm_fb_data_type *mfd);
+const struct hdmi_disp_mode_timing_type *hdmi_common_get_mode(uint32 mode);
+const struct hdmi_disp_mode_timing_type *hdmi_common_get_supported_mode(
+	uint32 mode);
+void hdmi_common_init_panel_info(struct msm_panel_info *pinfo);
+#endif
+
+int external_common_state_create(struct platform_device *pdev);
+void external_common_state_remove(void);
+
+#endif /* __EXTERNAL_COMMON_H__ */
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
new file mode 100644
index 0000000..53dc911
--- /dev/null
+++ b/drivers/video/msm/hdmi_msm.c
@@ -0,0 +1,3731 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+#define DEV_DBG_PREFIX "HDMI: "
+/* #define REG_DUMP */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <mach/msm_hdmi_audio.h>
+#include <mach/clk.h>
+#include <mach/msm_iomap.h>
+
+#include "msm_fb.h"
+#include "hdmi_msm.h"
+
+/* Supported HDMI Audio channels */
+#define MSM_HDMI_AUDIO_CHANNEL_2		0
+#define MSM_HDMI_AUDIO_CHANNEL_4		1
+#define MSM_HDMI_AUDIO_CHANNEL_6		2
+#define MSM_HDMI_AUDIO_CHANNEL_8		3
+#define MSM_HDMI_AUDIO_CHANNEL_MAX		4
+#define MSM_HDMI_AUDIO_CHANNEL_FORCE_32BIT	0x7FFFFFFF
+
+/* Supported HDMI Audio sample rates */
+#define MSM_HDMI_SAMPLE_RATE_32KHZ		0
+#define MSM_HDMI_SAMPLE_RATE_44_1KHZ		1
+#define MSM_HDMI_SAMPLE_RATE_48KHZ		2
+#define MSM_HDMI_SAMPLE_RATE_88_2KHZ		3
+#define MSM_HDMI_SAMPLE_RATE_96KHZ		4
+#define MSM_HDMI_SAMPLE_RATE_176_4KHZ		5
+#define MSM_HDMI_SAMPLE_RATE_192KHZ		6
+#define MSM_HDMI_SAMPLE_RATE_MAX		7
+#define MSM_HDMI_SAMPLE_RATE_FORCE_32BIT	0x7FFFFFFF
+
+struct workqueue_struct *hdmi_work_queue;
+struct hdmi_msm_state_type *hdmi_msm_state;
+
+static DEFINE_MUTEX(hdmi_msm_state_mutex);
+static DEFINE_MUTEX(hdcp_auth_state_mutex);
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static void hdmi_msm_hdcp_enable(void);
+#else
+static inline void hdmi_msm_hdcp_enable(void) {}
+#endif
+
+uint32 hdmi_msm_get_io_base(void)
+{
+	return (uint32)MSM_HDMI_BASE;
+}
+EXPORT_SYMBOL(hdmi_msm_get_io_base);
+
+/* Table indicating the video format supported by the HDMI TX Core v1.0 */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static void hdmi_msm_setup_video_mode_lut(void)
+{
+	HDMI_SETUP_LUT(640x480p60_4_3);
+	HDMI_SETUP_LUT(720x480p60_4_3);
+	HDMI_SETUP_LUT(720x480p60_16_9);
+	HDMI_SETUP_LUT(1280x720p60_16_9);
+	HDMI_SETUP_LUT(1920x1080i60_16_9);
+	HDMI_SETUP_LUT(1440x480i60_4_3);
+	HDMI_SETUP_LUT(1440x480i60_16_9);
+	HDMI_SETUP_LUT(1920x1080p60_16_9);
+	HDMI_SETUP_LUT(720x576p50_4_3);
+	HDMI_SETUP_LUT(720x576p50_16_9);
+	HDMI_SETUP_LUT(1280x720p50_16_9);
+	HDMI_SETUP_LUT(1440x576i50_4_3);
+	HDMI_SETUP_LUT(1440x576i50_16_9);
+	HDMI_SETUP_LUT(1920x1080p50_16_9);
+	HDMI_SETUP_LUT(1920x1080p24_16_9);
+	HDMI_SETUP_LUT(1920x1080p25_16_9);
+	HDMI_SETUP_LUT(1920x1080p30_16_9);
+}
+
+#ifdef PORT_DEBUG
+const char *hdmi_msm_name(uint32 offset)
+{
+	switch (offset) {
+	case 0x0000: return "CTRL";
+	case 0x0020: return "AUDIO_PKT_CTRL1";
+	case 0x0024: return "ACR_PKT_CTRL";
+	case 0x0028: return "VBI_PKT_CTRL";
+	case 0x002C: return "INFOFRAME_CTRL0";
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	case 0x0034: return "GEN_PKT_CTRL";
+#endif
+	case 0x003C: return "ACP";
+	case 0x0040: return "GC";
+	case 0x0044: return "AUDIO_PKT_CTRL2";
+	case 0x0048: return "ISRC1_0";
+	case 0x004C: return "ISRC1_1";
+	case 0x0050: return "ISRC1_2";
+	case 0x0054: return "ISRC1_3";
+	case 0x0058: return "ISRC1_4";
+	case 0x005C: return "ISRC2_0";
+	case 0x0060: return "ISRC2_1";
+	case 0x0064: return "ISRC2_2";
+	case 0x0068: return "ISRC2_3";
+	case 0x006C: return "AVI_INFO0";
+	case 0x0070: return "AVI_INFO1";
+	case 0x0074: return "AVI_INFO2";
+	case 0x0078: return "AVI_INFO3";
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	case 0x0084: return "GENERIC0_HDR";
+	case 0x0088: return "GENERIC0_0";
+	case 0x008C: return "GENERIC0_1";
+#endif
+	case 0x00C4: return "ACR_32_0";
+	case 0x00C8: return "ACR_32_1";
+	case 0x00CC: return "ACR_44_0";
+	case 0x00D0: return "ACR_44_1";
+	case 0x00D4: return "ACR_48_0";
+	case 0x00D8: return "ACR_48_1";
+	case 0x00E4: return "AUDIO_INFO0";
+	case 0x00E8: return "AUDIO_INFO1";
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	case 0x0110: return "HDCP_CTRL";
+	case 0x0114: return "HDCP_DEBUG_CTRL";
+	case 0x0118: return "HDCP_INT_CTRL";
+	case 0x011C: return "HDCP_LINK0_STATUS";
+	case 0x012C: return "HDCP_ENTROPY_CTRL0";
+	case 0x0130: return "HDCP_RESET";
+	case 0x0134: return "HDCP_RCVPORT_DATA0";
+	case 0x0138: return "HDCP_RCVPORT_DATA1";
+	case 0x013C: return "HDCP_RCVPORT_DATA2";
+	case 0x0144: return "HDCP_RCVPORT_DATA3";
+	case 0x0148: return "HDCP_RCVPORT_DATA4";
+	case 0x014C: return "HDCP_RCVPORT_DATA5";
+	case 0x0150: return "HDCP_RCVPORT_DATA6";
+	case 0x0168: return "HDCP_RCVPORT_DATA12";
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+	case 0x01D0: return "AUDIO_CFG";
+	case 0x0208: return "USEC_REFTIMER";
+	case 0x020C: return "DDC_CTRL";
+	case 0x0214: return "DDC_INT_CTRL";
+	case 0x0218: return "DDC_SW_STATUS";
+	case 0x021C: return "DDC_HW_STATUS";
+	case 0x0220: return "DDC_SPEED";
+	case 0x0224: return "DDC_SETUP";
+	case 0x0228: return "DDC_TRANS0";
+	case 0x022C: return "DDC_TRANS1";
+	case 0x0238: return "DDC_DATA";
+	case 0x0250: return "HPD_INT_STATUS";
+	case 0x0254: return "HPD_INT_CTRL";
+	case 0x0258: return "HPD_CTRL";
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	case 0x025C: return "HDCP_ENTROPY_CTRL1";
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+	case 0x027C: return "DDC_REF";
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	case 0x0284: return "HDCP_SW_UPPER_AKSV";
+	case 0x0288: return "HDCP_SW_LOWER_AKSV";
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+	case 0x02B4: return "ACTIVE_H";
+	case 0x02B8: return "ACTIVE_V";
+	case 0x02BC: return "ACTIVE_V_F2";
+	case 0x02C0: return "TOTAL";
+	case 0x02C4: return "V_TOTAL_F2";
+	case 0x02C8: return "FRAME_CTRL";
+	case 0x02CC: return "AUD_INT";
+	case 0x0300: return "PHY_REG0";
+	case 0x0304: return "PHY_REG1";
+	case 0x0308: return "PHY_REG2";
+	case 0x030C: return "PHY_REG3";
+	case 0x0310: return "PHY_REG4";
+	case 0x0314: return "PHY_REG5";
+	case 0x0318: return "PHY_REG6";
+	case 0x031C: return "PHY_REG7";
+	case 0x0320: return "PHY_REG8";
+	case 0x0324: return "PHY_REG9";
+	case 0x0328: return "PHY_REG10";
+	case 0x032C: return "PHY_REG11";
+	case 0x0330: return "PHY_REG12";
+	default: return "???";
+	}
+}
+
+void hdmi_outp(uint32 offset, uint32 value)
+{
+	uint32 in_val;
+
+	outpdw(MSM_HDMI_BASE+offset, value);
+	in_val = inpdw(MSM_HDMI_BASE+offset);
+	DEV_DBG("HDMI[%04x] => %08x [%08x] %s\n",
+		offset, value, in_val, hdmi_msm_name(offset));
+}
+
+uint32 hdmi_inp(uint32 offset)
+{
+	uint32 value = inpdw(MSM_HDMI_BASE+offset);
+	DEV_DBG("HDMI[%04x] <= %08x %s\n",
+		offset, value, hdmi_msm_name(offset));
+	return value;
+}
+#endif /* DEBUG */
+
+static void hdmi_msm_turn_on(void);
+static int hdmi_msm_audio_off(void);
+static int hdmi_msm_read_edid(void);
+static void hdmi_msm_hpd_off(void);
+
+static void hdmi_msm_hpd_state_work(struct work_struct *work)
+{
+	boolean hpd_state;
+	char *envp[2];
+
+	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
+		!MSM_HDMI_BASE) {
+		DEV_DBG("%s: ignored, probe failed\n", __func__);
+		return;
+	}
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+	/* HPD_INT_STATUS[0x0250] */
+	hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
+	mutex_lock(&external_common_state_hpd_mutex);
+	mutex_lock(&hdmi_msm_state_mutex);
+	if ((external_common_state->hpd_state != hpd_state) || (hdmi_msm_state->
+			hpd_prev_state != external_common_state->hpd_state)) {
+		external_common_state->hpd_state = hpd_state;
+		hdmi_msm_state->hpd_prev_state =
+				external_common_state->hpd_state;
+		DEV_DBG("%s: state not stable yet, wait again (%d|%d|%d)\n",
+			__func__, hdmi_msm_state->hpd_prev_state,
+			external_common_state->hpd_state, hpd_state);
+		mutex_unlock(&external_common_state_hpd_mutex);
+		hdmi_msm_state->hpd_stable = 0;
+		mutex_unlock(&hdmi_msm_state_mutex);
+		mod_timer(&hdmi_msm_state->hpd_state_timer, jiffies + HZ/2);
+		return;
+	}
+	mutex_unlock(&external_common_state_hpd_mutex);
+
+	if (hdmi_msm_state->hpd_stable++) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_DBG("%s: no more timer, depending for IRQ now\n",
+			__func__);
+		return;
+	}
+
+	hdmi_msm_state->hpd_stable = 1;
+	DEV_INFO("HDMI HPD: event detected\n");
+
+	if (!hdmi_msm_state->hpd_cable_chg_detected) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		if (hpd_state) {
+			if (!external_common_state->
+					disp_mode_list.num_of_elements)
+				hdmi_msm_read_edid();
+			hdmi_msm_turn_on();
+		}
+	} else {
+		hdmi_msm_state->hpd_cable_chg_detected = FALSE;
+		mutex_unlock(&hdmi_msm_state_mutex);
+		if (hpd_state) {
+			hdmi_msm_read_edid();
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+			hdmi_msm_state->reauth = FALSE ;
+#endif
+			/* Build EDID table */
+			envp[0] = "HDCP_STATE=FAIL";
+			envp[1] = NULL;
+			DEV_INFO("HDMI HPD: QDSP OFF\n");
+			kobject_uevent_env(external_common_state->uevent_kobj,
+				KOBJ_CHANGE, envp);
+			hdmi_msm_turn_on();
+			DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+			kobject_uevent(external_common_state->uevent_kobj,
+				KOBJ_ONLINE);
+			hdmi_msm_hdcp_enable();
+		} else {
+			DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
+				);
+			kobject_uevent(external_common_state->uevent_kobj,
+				KOBJ_OFFLINE);
+		}
+	}
+
+	/* HPD_INT_CTRL[0x0254]
+	 *   31:10 Reserved
+	 *   9     RCV_PLUGIN_DET_MASK	receiver plug in interrupt mask.
+	 *                              When programmed to 1,
+	 *                              RCV_PLUGIN_DET_INT will toggle
+	 *                              the interrupt line
+	 *   8:6   Reserved
+	 *   5     RX_INT_EN		Panel RX interrupt enable
+	 *         0: Disable
+	 *         1: Enable
+	 *   4     RX_INT_ACK		WRITE ONLY. Panel RX interrupt
+	 *                              ack
+	 *   3     Reserved
+	 *   2     INT_EN		Panel interrupt control
+	 *         0: Disable
+	 *         1: Enable
+	 *   1     INT_POLARITY		Panel interrupt polarity
+	 *         0: generate interrupt on disconnect
+	 *         1: generate interrupt on connect
+	 *   0     INT_ACK		WRITE ONLY. Panel interrupt ack */
+	/* Set IRQ for HPD */
+	HDMI_OUTP(0x0254, 4 | (hpd_state ? 0 : 2));
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static void hdcp_deauthenticate(void);
+static void hdmi_msm_hdcp_reauth_work(struct work_struct *work)
+{
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("HDCP: deauthenticating skipped, pm_suspended\n");
+		return;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+	/* Don't process recursive actions */
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->hdcp_activating) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		return;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+
+	/*
+	 * Reauth=>deauth, hdcp_auth
+	 * hdcp_auth=>turn_on() which calls
+	 * HDMI Core reset without informing the Audio QDSP
+	 * this can do bad things to video playback on the HDTV
+	 * Therefore, as surprising as it may sound do reauth
+	 * only if the device is HDCP-capable
+	 */
+	if (external_common_state->present_hdcp) {
+		hdcp_deauthenticate();
+		mod_timer(&hdmi_msm_state->hdcp_timer, jiffies + HZ/2);
+	}
+}
+
+static void hdmi_msm_hdcp_work(struct work_struct *work)
+{
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("HDCP: Re-enable skipped, pm_suspended\n");
+		return;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+	/* Only re-enable if cable still connected */
+	mutex_lock(&external_common_state_hpd_mutex);
+	if (external_common_state->hpd_state &&
+	    !(hdmi_msm_state->full_auth_done)) {
+		mutex_unlock(&external_common_state_hpd_mutex);
+		hdmi_msm_state->reauth = TRUE;
+		hdmi_msm_turn_on();
+	} else
+		mutex_unlock(&external_common_state_hpd_mutex);
+}
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+static irqreturn_t hdmi_msm_isr(int irq, void *dev_id)
+{
+	uint32 hpd_int_status;
+	uint32 hpd_int_ctrl;
+	uint32 ddc_int_ctrl;
+	uint32 audio_int_val;
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	uint32 hdcp_int_val;
+	char *envp[2];
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+	static uint32 fifo_urun_int_occurred;
+	static uint32 sample_drop_int_occurred;
+	const uint32 occurrence_limit = 5;
+
+	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
+		!MSM_HDMI_BASE) {
+		DEV_DBG("ISR ignored, probe failed\n");
+		return IRQ_HANDLED;
+	}
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("ISR ignored, pm_suspended\n");
+		return IRQ_HANDLED;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+	/* Process HPD Interrupt */
+	/* HDMI_HPD_INT_STATUS[0x0250] */
+	hpd_int_status = HDMI_INP_ND(0x0250);
+	/* HDMI_HPD_INT_CTRL[0x0254] */
+	hpd_int_ctrl = HDMI_INP_ND(0x0254);
+	if ((hpd_int_ctrl & (1 << 2)) && (hpd_int_status & (1 << 0))) {
+		boolean cable_detected = (hpd_int_status & 2) >> 1;
+
+		/* HDMI_HPD_INT_CTRL[0x0254] */
+		/* Clear all interrupts, timer will turn IRQ back on */
+		HDMI_OUTP(0x0254, 1 << 0);
+
+		DEV_DBG("%s: HPD IRQ, Ctrl=%04x, State=%04x\n", __func__,
+			hpd_int_ctrl, hpd_int_status);
+		mutex_lock(&hdmi_msm_state_mutex);
+		hdmi_msm_state->hpd_cable_chg_detected = TRUE;
+
+		/* ensure 2 readouts */
+		hdmi_msm_state->hpd_prev_state = cable_detected ? 0 : 1;
+		external_common_state->hpd_state = cable_detected ? 1 : 0;
+		hdmi_msm_state->hpd_stable = 0;
+		mod_timer(&hdmi_msm_state->hpd_state_timer, jiffies + HZ/2);
+		mutex_unlock(&hdmi_msm_state_mutex);
+		/*
+		 * HDCP Compliance 1A-01:
+		 * The Quantum Data Box 882 triggers two consecutive
+		 * HPD events very close to each other as a part of this
+		 * test which can trigger two parallel HDCP auth threads
+		 * if HDCP authentication is going on and we get ISR
+		 * then stop the authentication , rather than
+		 * reauthenticating it again
+		 */
+		if (!(hdmi_msm_state->full_auth_done)) {
+			DEV_DBG("%s getting hpd while authenticating\n",\
+			    __func__);
+			mutex_lock(&hdcp_auth_state_mutex);
+			hdmi_msm_state->hpd_during_auth = TRUE;
+			mutex_unlock(&hdcp_auth_state_mutex);
+		}
+		return IRQ_HANDLED;
+	}
+
+	/* Process DDC Interrupts */
+	/* HDMI_DDC_INT_CTRL[0x0214] */
+	ddc_int_ctrl = HDMI_INP_ND(0x0214);
+	if ((ddc_int_ctrl & (1 << 2)) && (ddc_int_ctrl & (1 << 0))) {
+		/* SW_DONE INT occured, clr it */
+		HDMI_OUTP_ND(0x0214, ddc_int_ctrl | (1 << 1));
+		complete(&hdmi_msm_state->ddc_sw_done);
+		return IRQ_HANDLED;
+	}
+
+	/* FIFO Underrun Int is enabled */
+	/* HDMI_AUD_INT[0x02CC]
+	 *   [3] AUD_SAM_DROP_MASK [R/W]
+	 *   [2] AUD_SAM_DROP_ACK [W], AUD_SAM_DROP_INT [R]
+	 *   [1] AUD_FIFO_URUN_MASK [R/W]
+	 *   [0] AUD_FIFO_URUN_ACK [W], AUD_FIFO_URUN_INT [R] */
+	audio_int_val = HDMI_INP_ND(0x02CC);
+	if ((audio_int_val & (1 << 1)) && (audio_int_val & (1 << 0))) {
+		/* FIFO Underrun occured, clr it */
+		HDMI_OUTP(0x02CC, audio_int_val | (1 << 0));
+
+		++fifo_urun_int_occurred;
+		DEV_INFO("HDMI AUD_FIFO_URUN: %d\n", fifo_urun_int_occurred);
+
+		if (fifo_urun_int_occurred >= occurrence_limit) {
+			HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 1));
+			DEV_INFO("HDMI AUD_FIFO_URUN: INT has been disabled "
+				"by the ISR after %d occurences...\n",
+				fifo_urun_int_occurred);
+		}
+		return IRQ_HANDLED;
+	}
+
+	/* Audio Sample Drop int is enabled */
+	if ((audio_int_val & (1 << 3)) && (audio_int_val & (1 << 2))) {
+		/* Audio Sample Drop occured, clr it */
+		HDMI_OUTP(0x02CC, audio_int_val | (1 << 2));
+		DEV_DBG("%s: AUD_SAM_DROP", __func__);
+
+		++sample_drop_int_occurred;
+		if (sample_drop_int_occurred >= occurrence_limit) {
+			HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) & ~(1 << 3));
+			DEV_INFO("HDMI AUD_SAM_DROP: INT has been disabled "
+				"by the ISR after %d occurences...\n",
+				sample_drop_int_occurred);
+		}
+		return IRQ_HANDLED;
+	}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	/* HDCP_INT_CTRL[0x0118]
+	 *    [0] AUTH_SUCCESS_INT	[R]	HDCP Authentication Success
+	 *		interrupt status
+	 *    [1] AUTH_SUCCESS_ACK	[W]	Acknowledge bit for HDCP
+	 *		Authentication Success bit - write 1 to clear
+	 *    [2] AUTH_SUCCESS_MASK	[R/W]	Mask bit for HDCP Authentication
+	 *		Success interrupt - set to 1 to enable interrupt */
+	hdcp_int_val = HDMI_INP_ND(0x0118);
+	if ((hdcp_int_val & (1 << 2)) && (hdcp_int_val & (1 << 0))) {
+		/* AUTH_SUCCESS_INT */
+		HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 1)) & ~(1 << 0));
+		DEV_INFO("HDCP: AUTH_SUCCESS_INT received\n");
+		complete_all(&hdmi_msm_state->hdcp_success_done);
+		return IRQ_HANDLED;
+	}
+	/*    [4] AUTH_FAIL_INT		[R]	HDCP Authentication Lost
+	 *		interrupt Status
+	 *    [5] AUTH_FAIL_ACK		[W]	Acknowledge bit for HDCP
+	 *		Authentication Lost bit - write 1 to clear
+	 *    [6] AUTH_FAIL_MASK	[R/W]	Mask bit fo HDCP Authentication
+	 *		Lost interrupt set to 1 to enable interrupt
+	 *    [7] AUTH_FAIL_INFO_ACK	[W]	Acknowledge bit for HDCP
+	 *		Authentication Failure Info field - write 1 to clear */
+	if ((hdcp_int_val & (1 << 6)) && (hdcp_int_val & (1 << 4))) {
+		/* AUTH_FAIL_INT */
+		/* Clear and Disable */
+		HDMI_OUTP(0x0118, (hdcp_int_val | (1 << 5))
+			& ~((1 << 6) | (1 << 4)));
+		DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
+			HDMI_INP_ND(0x011C));
+		if (hdmi_msm_state->full_auth_done) {
+			envp[0] = "HDCP_STATE=FAIL";
+			envp[1] = NULL;
+			DEV_INFO("HDMI HPD:QDSP OFF\n");
+			kobject_uevent_env(external_common_state->uevent_kobj,
+			KOBJ_CHANGE, envp);
+			mutex_lock(&hdcp_auth_state_mutex);
+			hdmi_msm_state->full_auth_done = FALSE;
+			mutex_unlock(&hdcp_auth_state_mutex);
+			/* Calling reauth only when authentication
+			 * is sucessful or else we always go into
+			 * the reauth loop
+			 */
+			queue_work(hdmi_work_queue,
+			    &hdmi_msm_state->hdcp_reauth_work);
+		}
+		mutex_lock(&hdcp_auth_state_mutex);
+		/* This flag prevents other threads from re-authenticating
+		 * after we've just authenticated (i.e., finished part3)
+		 */
+		hdmi_msm_state->full_auth_done = FALSE;
+
+		mutex_unlock(&hdcp_auth_state_mutex);
+		DEV_DBG("calling reauthenticate from %s HDCP FAIL INT ",
+		    __func__);
+
+		return IRQ_HANDLED;
+	}
+	/*    [8] DDC_XFER_REQ_INT	[R]	HDCP DDC Transfer Request
+	 *		interrupt status
+	 *    [9] DDC_XFER_REQ_ACK	[W]	Acknowledge bit for HDCP DDC
+	 *		Transfer Request bit - write 1 to clear
+	 *   [10] DDC_XFER_REQ_MASK	[R/W]	Mask bit for HDCP DDC Transfer
+	 *		Request interrupt - set to 1 to enable interrupt */
+	if ((hdcp_int_val & (1 << 10)) && (hdcp_int_val & (1 << 8))) {
+		/* DDC_XFER_REQ_INT */
+		HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 9)) & ~(1 << 8));
+		if (!(hdcp_int_val & (1 << 12)))
+			return IRQ_HANDLED;
+	}
+	/*   [12] DDC_XFER_DONE_INT	[R]	HDCP DDC Transfer done interrupt
+	 *		status
+	 *   [13] DDC_XFER_DONE_ACK	[W]	Acknowledge bit for HDCP DDC
+	 *		Transfer done bit - write 1 to clear
+	 *   [14] DDC_XFER_DONE_MASK	[R/W]	Mask bit for HDCP DDC Transfer
+	 *		done interrupt - set to 1 to enable interrupt */
+	if ((hdcp_int_val & (1 << 14)) && (hdcp_int_val & (1 << 12))) {
+		/* DDC_XFER_DONE_INT */
+		HDMI_OUTP_ND(0x0118, (hdcp_int_val | (1 << 13)) & ~(1 << 12));
+		DEV_INFO("HDCP: DDC_XFER_DONE received\n");
+		return IRQ_HANDLED;
+	}
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	DEV_DBG("%s: HPD<Ctrl=%04x, State=%04x>, ddc_int_ctrl=%04x, "
+		"aud_int=%04x, cec_int=%04x\n", __func__, hpd_int_ctrl,
+		hpd_int_status, ddc_int_ctrl, audio_int_val,
+		HDMI_INP_ND(0x029C));
+
+	return IRQ_HANDLED;
+}
+
+static int check_hdmi_features(void)
+{
+	/* RAW_FEAT_CONFIG_ROW0_LSB */
+	uint32 val = inpdw(QFPROM_BASE + 0x0238);
+	/* HDMI_DISABLE */
+	boolean hdmi_disabled = (val & 0x00200000) >> 21;
+	/* HDCP_DISABLE */
+	boolean hdcp_disabled = (val & 0x00400000) >> 22;
+
+	DEV_DBG("Features <val:0x%08x, HDMI:%s, HDCP:%s>\n", val,
+		hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON");
+	if (hdmi_disabled) {
+		DEV_ERR("ERROR: HDMI disabled\n");
+		return -ENODEV;
+	}
+
+	if (hdcp_disabled)
+		DEV_WARN("WARNING: HDCP disabled\n");
+
+	return 0;
+}
+
+static boolean hdmi_msm_has_hdcp(void)
+{
+	/* RAW_FEAT_CONFIG_ROW0_LSB, HDCP_DISABLE */
+	return (inpdw(QFPROM_BASE + 0x0238) & 0x00400000) ? FALSE : TRUE;
+}
+
+static boolean hdmi_msm_is_power_on(void)
+{
+	/* HDMI_CTRL, ENABLE */
+	return (HDMI_INP_ND(0x0000) & 0x00000001) ? TRUE : FALSE;
+}
+
+/* 1.2.1.2.1 DVI Operation
+ * HDMI compliance requires the HDMI core to support DVI as well. The
+ * HDMI core also supports DVI. In DVI operation there are no preambles
+ * and guardbands transmitted. THe TMDS encoding of video data remains
+ * the same as HDMI. There are no VBI or audio packets transmitted. In
+ * order to enable DVI mode in HDMI core, HDMI_DVI_SEL field of
+ * HDMI_CTRL register needs to be programmed to 0. */
+static boolean hdmi_msm_is_dvi_mode(void)
+{
+	/* HDMI_CTRL, HDMI_DVI_SEL */
+	return (HDMI_INP_ND(0x0000) & 0x00000002) ? FALSE : TRUE;
+}
+
+static void hdmi_msm_set_mode(boolean power_on)
+{
+	uint32 reg_val = 0;
+	if (power_on) {
+		/* ENABLE */
+		reg_val |= 0x00000001; /* Enable the block */
+		if (external_common_state->hdmi_sink == 0) {
+			/* HDMI_DVI_SEL */
+			reg_val |= 0x00000002;
+			/* HDMI_CTRL */
+			HDMI_OUTP(0x0000, reg_val);
+			/* HDMI_DVI_SEL */
+			reg_val &= ~0x00000002;
+		} else
+			reg_val |= 0x00000002;
+	} else
+		reg_val = 0x00000002;
+
+	/* HDMI_CTRL */
+	HDMI_OUTP(0x0000, reg_val);
+	DEV_DBG("HDMI Core: %s\n", power_on ? "Enable" : "Disable");
+}
+
+static void msm_hdmi_init_ddc(void)
+{
+	/* 0x0220 HDMI_DDC_SPEED
+	   [31:16] PRESCALE prescale = (m * xtal_frequency) /
+		(desired_i2c_speed), where m is multiply
+		factor, default: m = 1
+	   [1:0]   THRESHOLD Select threshold to use to determine whether value
+		sampled on SDA is a 1 or 0. Specified in terms of the ratio
+		between the number of sampled ones and the total number of times
+		SDA is sampled.
+		* 0x0: >0
+		* 0x1: 1/4 of total samples
+		* 0x2: 1/2 of total samples
+		* 0x3: 3/4 of total samples */
+	/* Configure the Pre-Scale multiplier
+	 * Configure the Threshold */
+	HDMI_OUTP_ND(0x0220, (10 << 16) | (2 << 0));
+
+	/* 0x0224 HDMI_DDC_SETUP */
+	HDMI_OUTP_ND(0x0224, 0);
+
+	/* 0x027C HDMI_DDC_REF
+	   [6] REFTIMER_ENABLE	Enable the timer
+		* 0: Disable
+		* 1: Enable
+	   [15:0] REFTIMER	Value to set the register in order to generate
+		DDC strobe. This register counts on HDCP application clock */
+	/* Enable reference timer
+	 * 27 micro-seconds */
+	HDMI_OUTP_ND(0x027C, (1 << 16) | (27 << 0));
+}
+
+static int hdmi_msm_ddc_clear_irq(const char *what)
+{
+	const uint32 time_out = 0xFFFF;
+	uint32 time_out_count, reg_val;
+
+	/* clear pending and enable interrupt */
+	time_out_count = time_out;
+	do {
+		--time_out_count;
+		/* HDMI_DDC_INT_CTRL[0x0214]
+		   [2] SW_DONE_MK Mask bit for SW_DONE_INT. Set to 1 to enable
+		       interrupt.
+		   [1] SW_DONE_ACK WRITE ONLY. Acknowledge bit for SW_DONE_INT.
+		       Write 1 to clear interrupt.
+		   [0] SW_DONE_INT READ ONLY. SW_DONE interrupt status */
+		/* Clear and Enable DDC interrupt */
+		/* Write */
+		HDMI_OUTP_ND(0x0214, (1 << 2) | (1 << 1));
+		/* Read back */
+		reg_val = HDMI_INP_ND(0x0214);
+	} while ((reg_val & 0x1) && time_out_count);
+	if (!time_out_count) {
+		DEV_ERR("%s[%s]: timedout\n", __func__, what);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static int hdmi_msm_ddc_write(uint32 dev_addr, uint32 offset,
+	const uint8 *data_buf, uint32 data_len, const char *what)
+{
+	uint32 reg_val, ndx;
+	int status = 0, retry = 10;
+	uint32 time_out_count;
+
+	if (NULL == data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s[%s]: invalid input paramter\n", __func__, what);
+		goto error;
+	}
+
+again:
+	status = hdmi_msm_ddc_clear_irq(what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	dev_addr &= 0xFE;
+
+	/* 0x0238 HDMI_DDC_DATA
+	   [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
+		1 while writing HDMI_DDC_DATA.
+	   [23:16] INDEX Use to set index into DDC buffer for next read or
+		current write, or to read index of current read or next write.
+		Writable only when INDEX_WRITE=1.
+	   [15:8] DATA Use to fill or read the DDC buffer
+	   [0] DATA_RW Select whether buffer access will be a read or write.
+		For writes, address auto-increments on write to HDMI_DDC_DATA.
+		For reads, address autoincrements on reads to HDMI_DDC_DATA.
+		* 0: Write
+		* 1: Read */
+
+	/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x1 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset) */
+	HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8));
+
+	/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	HDMI_OUTP_ND(0x0238, offset << 8);
+
+	/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = data_buf[ndx]
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	for (ndx = 0; ndx < data_len; ++ndx)
+		HDMI_OUTP_ND(0x0238, ((uint32)data_buf[ndx]) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/* 0x0228 HDMI_DDC_TRANS0
+	   [23:16] CNT0 Byte count for first transaction (excluding the first
+		byte, which is usually the address).
+	   [13] STOP0 Determines whether a stop bit will be sent after the first
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	   [12] START0 Determines whether a start bit will be sent before the
+		first transaction
+		* 0: NO START
+		* 1: START
+	   [8] STOP_ON_NACK0 Determines whether the current transfer will stop
+		if a NACK is received during the first transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	   [0] RW0 Read/write indicator for first transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	      order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address) */
+	HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
+
+	/* 0x022C HDMI_DDC_TRANS1
+	  [23:16] CNT1 Byte count for second transaction (excluding the first
+		byte, which is usually the address).
+	  [13] STOP1 Determines whether a stop bit will be sent after the second
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	  [12] START1 Determines whether a start bit will be sent before the
+		second transaction
+		* 0: NO START
+		* 1: START
+	  [8] STOP_ON_NACK1 Determines whether the current transfer will stop if
+		a NACK is received during the second transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	  [0] RW1 Read/write indicator for second transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	      order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (0xN (write N bytes of data))
+	 *    Byte count for second transition (excluding the first
+	 *    Byte which is usually the address) */
+	HDMI_OUTP_ND(0x022C, (1 << 13) | ((data_len-1) << 16));
+
+	/* Trigger the I2C transfer */
+	/* 0x020C HDMI_DDC_CTRL
+	   [21:20] TRANSACTION_CNT
+		Number of transactions to be done in current transfer.
+		* 0x0: transaction0 only
+		* 0x1: transaction0, transaction1
+		* 0x2: transaction0, transaction1, transaction2
+		* 0x3: transaction0, transaction1, transaction2, transaction3
+	   [3] SW_STATUS_RESET
+		Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
+		ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
+		STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
+	   [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
+		data) at start of transfer.  This sequence is sent after GO is
+		written to 1, before the first transaction only.
+	   [1] SOFT_RESET Write 1 to reset DDC controller
+	   [0] GO WRITE ONLY. Write 1 to start DDC transfer. */
+
+	/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x1 (execute transaction0 followed by
+	 *    transaction1)
+	 *    GO = 0x1 (kicks off hardware) */
+	INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
+	HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&hdmi_msm_state->ddc_sw_done, HZ/2);
+	HDMI_OUTP_ND(0x0214, 0x2);
+	if (!time_out_count) {
+		if (retry-- > 0) {
+			DEV_INFO("%s[%s]: failed timout, retry=%d\n", __func__,
+				what, retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s[%s]: timedout, DDC SW Status=%08x, HW "
+			"Status=%08x, Int Ctrl=%08x\n", __func__, what,
+			HDMI_INP_ND(0x0218), HDMI_INP_ND(0x021C),
+			HDMI_INP_ND(0x0214));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = HDMI_INP_ND(0x0218);
+	reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		if (retry > 1)
+			HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
+		else
+			HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
+		if (retry-- > 0) {
+			DEV_DBG("%s[%s]: failed NACK=%08x, retry=%d\n",
+				__func__, what, reg_val, retry);
+			msleep(100);
+			goto again;
+		}
+		status = -EIO;
+		DEV_ERR("%s[%s]: failed NACK: %08x\n", __func__, what, reg_val);
+		goto error;
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, what);
+
+error:
+	return status;
+}
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+static int hdmi_msm_ddc_read_retry(uint32 dev_addr, uint32 offset,
+	uint8 *data_buf, uint32 data_len, uint32 request_len, int retry,
+	const char *what)
+{
+	uint32 reg_val, ndx;
+	int status = 0;
+	uint32 time_out_count;
+	int log_retry_fail = retry != 1;
+
+	if (NULL == data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s: invalid input paramter\n", __func__);
+		goto error;
+	}
+
+again:
+	status = hdmi_msm_ddc_clear_irq(what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	dev_addr &= 0xFE;
+
+	/* 0x0238 HDMI_DDC_DATA
+	   [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
+		1 while writing HDMI_DDC_DATA.
+	   [23:16] INDEX Use to set index into DDC buffer for next read or
+		current write, or to read index of current read or next write.
+		Writable only when INDEX_WRITE=1.
+	   [15:8] DATA Use to fill or read the DDC buffer
+	   [0] DATA_RW Select whether buffer access will be a read or write.
+		For writes, address auto-increments on write to HDMI_DDC_DATA.
+		For reads, address autoincrements on reads to HDMI_DDC_DATA.
+		* 0: Write
+		* 1: Read */
+
+	/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset) */
+	HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (dev_addr << 8));
+
+	/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	HDMI_OUTP_ND(0x0238, offset << 8);
+
+	/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress + 1 (primary link address 0x74 and reading)
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/* 0x0228 HDMI_DDC_TRANS0
+	   [23:16] CNT0 Byte count for first transaction (excluding the first
+		byte, which is usually the address).
+	   [13] STOP0 Determines whether a stop bit will be sent after the first
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	   [12] START0 Determines whether a start bit will be sent before the
+		first transaction
+		* 0: NO START
+		* 1: START
+	   [8] STOP_ON_NACK0 Determines whether the current transfer will stop
+		if a NACK is received during the first transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	   [0] RW0 Read/write indicator for first transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	      order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address) */
+	HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
+
+	/* 0x022C HDMI_DDC_TRANS1
+	  [23:16] CNT1 Byte count for second transaction (excluding the first
+		byte, which is usually the address).
+	  [13] STOP1 Determines whether a stop bit will be sent after the second
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	  [12] START1 Determines whether a start bit will be sent before the
+		second transaction
+		* 0: NO START
+		* 1: START
+	  [8] STOP_ON_NACK1 Determines whether the current transfer will stop if
+		a NACK is received during the second transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	  [0] RW1 Read/write indicator for second transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	      order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read) */
+	HDMI_OUTP_ND(0x022C, 1 | (1 << 12) | (1 << 13) | (request_len << 16));
+
+	/* Trigger the I2C transfer */
+	/* 0x020C HDMI_DDC_CTRL
+	   [21:20] TRANSACTION_CNT
+		Number of transactions to be done in current transfer.
+		* 0x0: transaction0 only
+		* 0x1: transaction0, transaction1
+		* 0x2: transaction0, transaction1, transaction2
+		* 0x3: transaction0, transaction1, transaction2, transaction3
+	   [3] SW_STATUS_RESET
+		Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
+		ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
+		STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
+	   [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
+		data) at start of transfer.  This sequence is sent after GO is
+		written to 1, before the first transaction only.
+	   [1] SOFT_RESET Write 1 to reset DDC controller
+	   [0] GO WRITE ONLY. Write 1 to start DDC transfer. */
+
+	/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x1 (execute transaction0 followed by
+	 *    transaction1)
+	 *    SEND_RESET = Set to 1 to send reset sequence
+	 *    GO = 0x1 (kicks off hardware) */
+	INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
+	HDMI_OUTP_ND(0x020C, (1 << 0) | (1 << 20));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&hdmi_msm_state->ddc_sw_done, HZ/2);
+	HDMI_OUTP_ND(0x0214, 0x2);
+	if (!time_out_count) {
+		if (retry-- > 0) {
+			DEV_INFO("%s: failed timout, retry=%d\n", __func__,
+				retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW "
+			"Status=%08x, Int Ctrl=%08x\n", __func__,
+			HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = HDMI_INP_ND(0x0218);
+	reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
+		if (retry == 1)
+			HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
+		if (retry-- > 0) {
+			DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, "
+				"dev-addr=0x%02x, offset=0x%02x, "
+				"length=%d\n", __func__, what,
+				reg_val, retry, dev_addr,
+				offset, data_len);
+			goto again;
+		}
+		status = -EIO;
+		if (log_retry_fail)
+			DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, "
+				"offset=0x%02x, length=%d\n", __func__, what,
+				reg_val, dev_addr, offset, data_len);
+		goto error;
+	}
+
+	/* 0x0238 HDMI_DDC_DATA
+	   [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1
+		while writing HDMI_DDC_DATA.
+	   [23:16] INDEX Use to set index into DDC buffer for next read or
+		current write, or to read index of current read or next write.
+		Writable only when INDEX_WRITE=1.
+	   [15:8] DATA Use to fill or read the DDC buffer
+	   [0] DATA_RW Select whether buffer access will be a read or write.
+		For writes, address auto-increments on write to HDMI_DDC_DATA.
+		For reads, address autoincrements on reads to HDMI_DDC_DATA.
+		* 0: Write
+		* 1: Read */
+
+	/* 8. ALL data is now available and waiting in the DDC buffer.
+	 *    Read HDMI_I2C_DATA with the following fields set
+	 *    RW = 0x1 (read)
+	 *    DATA = BCAPS (this is field where data is pulled from)
+	 *    INDEX = 0x3 (where the data has been placed in buffer by hardware)
+	 *    INDEX_WRITE = 0x1 (explicitly define offset) */
+	/* Write this data to DDC buffer */
+	HDMI_OUTP_ND(0x0238, 0x1 | (3 << 16) | (1 << 31));
+
+	/* Discard first byte */
+	HDMI_INP_ND(0x0238);
+	for (ndx = 0; ndx < data_len; ++ndx) {
+		reg_val = HDMI_INP_ND(0x0238);
+		data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8);
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, what);
+
+error:
+	return status;
+}
+
+static int hdmi_msm_ddc_read_edid_seg(uint32 dev_addr, uint32 offset,
+	uint8 *data_buf, uint32 data_len, uint32 request_len, int retry,
+	const char *what)
+{
+	uint32 reg_val, ndx;
+	int status = 0;
+	uint32 time_out_count;
+	int log_retry_fail = retry != 1;
+	int seg_addr = 0x60, seg_num = 0x01;
+
+	if (NULL == data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s: invalid input paramter\n", __func__);
+		goto error;
+	}
+
+again:
+	status = hdmi_msm_ddc_clear_irq(what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	dev_addr &= 0xFE;
+
+	/* 0x0238 HDMI_DDC_DATA
+	   [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to
+		1 while writing HDMI_DDC_DATA.
+	   [23:16] INDEX Use to set index into DDC buffer for next read or
+		current write, or to read index of current read or next write.
+		Writable only when INDEX_WRITE=1.
+	   [15:8] DATA Use to fill or read the DDC buffer
+	   [0] DATA_RW Select whether buffer access will be a read or write.
+		For writes, address auto-increments on write to HDMI_DDC_DATA.
+		For reads, address autoincrements on reads to HDMI_DDC_DATA.
+		* 0: Write
+		* 1: Read */
+
+	/* 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset) */
+	HDMI_OUTP_ND(0x0238, (0x1UL << 31) | (seg_addr << 8));
+
+	/* 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	HDMI_OUTP_ND(0x0238, seg_num << 8);
+
+	/* 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress + 1 (primary link address 0x74 and reading)
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware) */
+	HDMI_OUTP_ND(0x0238, dev_addr << 8);
+	HDMI_OUTP_ND(0x0238, offset << 8);
+	HDMI_OUTP_ND(0x0238, (dev_addr | 1) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/* 0x0228 HDMI_DDC_TRANS0
+	   [23:16] CNT0 Byte count for first transaction (excluding the first
+		byte, which is usually the address).
+	   [13] STOP0 Determines whether a stop bit will be sent after the first
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	   [12] START0 Determines whether a start bit will be sent before the
+		first transaction
+		* 0: NO START
+		* 1: START
+	   [8] STOP_ON_NACK0 Determines whether the current transfer will stop
+		if a NACK is received during the first transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	   [0] RW0 Read/write indicator for first transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	      order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address) */
+	HDMI_OUTP_ND(0x0228, (1 << 12) | (1 << 16));
+
+	/* 0x022C HDMI_DDC_TRANS1
+	  [23:16] CNT1 Byte count for second transaction (excluding the first
+		byte, which is usually the address).
+	  [13] STOP1 Determines whether a stop bit will be sent after the second
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	  [12] START1 Determines whether a start bit will be sent before the
+		second transaction
+		* 0: NO START
+		* 1: START
+	  [8] STOP_ON_NACK1 Determines whether the current transfer will stop if
+		a NACK is received during the second transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	  [0] RW1 Read/write indicator for second transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	      order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read) */
+	HDMI_OUTP_ND(0x022C, (1 << 12) | (1 << 16));
+
+	/* 0x022C HDMI_DDC_TRANS2
+	  [23:16] CNT1 Byte count for second transaction (excluding the first
+		byte, which is usually the address).
+	  [13] STOP1 Determines whether a stop bit will be sent after the second
+		transaction
+		* 0: NO STOP
+		* 1: STOP
+	  [12] START1 Determines whether a start bit will be sent before the
+		second transaction
+		* 0: NO START
+		* 1: START
+	  [8] STOP_ON_NACK1 Determines whether the current transfer will stop if
+		a NACK is received during the second transaction (current
+		transaction always stops).
+		* 0: STOP CURRENT TRANSACTION, GO TO NEXT TRANSACTION
+		* 1: STOP ALL TRANSACTIONS, SEND STOP BIT
+	  [0] RW1 Read/write indicator for second transaction - set to 0 for
+		write, 1 for read. This bit only controls HDMI_DDC behaviour -
+		the R/W bit in the transaction is programmed into the DDC buffer
+		as the LSB of the address byte.
+		* 0: WRITE
+		* 1: READ */
+
+	/* 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	      order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read) */
+	HDMI_OUTP_ND(0x0230, 1 | (1 << 12) | (1 << 13) | (request_len << 16));
+
+	/* Trigger the I2C transfer */
+	/* 0x020C HDMI_DDC_CTRL
+	   [21:20] TRANSACTION_CNT
+		Number of transactions to be done in current transfer.
+		* 0x0: transaction0 only
+		* 0x1: transaction0, transaction1
+		* 0x2: transaction0, transaction1, transaction2
+		* 0x3: transaction0, transaction1, transaction2, transaction3
+	   [3] SW_STATUS_RESET
+		Write 1 to reset HDMI_DDC_SW_STATUS flags, will reset SW_DONE,
+		ABORTED, TIMEOUT, SW_INTERRUPTED, BUFFER_OVERFLOW,
+		STOPPED_ON_NACK, NACK0, NACK1, NACK2, NACK3
+	   [2] SEND_RESET Set to 1 to send reset sequence (9 clocks with no
+		data) at start of transfer.  This sequence is sent after GO is
+		written to 1, before the first transaction only.
+	   [1] SOFT_RESET Write 1 to reset DDC controller
+	   [0] GO WRITE ONLY. Write 1 to start DDC transfer. */
+
+	/* 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x2 (execute transaction0 followed by
+	 *    transaction1)
+	 *    GO = 0x1 (kicks off hardware) */
+	INIT_COMPLETION(hdmi_msm_state->ddc_sw_done);
+	HDMI_OUTP_ND(0x020C, (1 << 0) | (2 << 20));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&hdmi_msm_state->ddc_sw_done, HZ/2);
+	HDMI_OUTP_ND(0x0214, 0x2);
+	if (!time_out_count) {
+		if (retry-- > 0) {
+			DEV_INFO("%s: failed timout, retry=%d\n", __func__,
+				retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s: timedout(7), DDC SW Status=%08x, HW "
+			"Status=%08x, Int Ctrl=%08x\n", __func__,
+			HDMI_INP(0x0218), HDMI_INP(0x021C), HDMI_INP(0x0214));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = HDMI_INP_ND(0x0218);
+	reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		HDMI_OUTP_ND(0x020C, BIT(3)); /* SW_STATUS_RESET */
+		if (retry == 1)
+			HDMI_OUTP_ND(0x020C, BIT(1)); /* SOFT_RESET */
+		if (retry-- > 0) {
+			DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d, "
+				"dev-addr=0x%02x, offset=0x%02x, "
+				"length=%d\n", __func__, what,
+				reg_val, retry, dev_addr,
+				offset, data_len);
+			goto again;
+		}
+		status = -EIO;
+		if (log_retry_fail)
+			DEV_ERR("%s(%s): failed NACK=0x%08x, dev-addr=0x%02x, "
+				"offset=0x%02x, length=%d\n", __func__, what,
+				reg_val, dev_addr, offset, data_len);
+		goto error;
+	}
+
+	/* 0x0238 HDMI_DDC_DATA
+	   [31] INDEX_WRITE WRITE ONLY. To write index field, set this bit to 1
+		while writing HDMI_DDC_DATA.
+	   [23:16] INDEX Use to set index into DDC buffer for next read or
+		current write, or to read index of current read or next write.
+		Writable only when INDEX_WRITE=1.
+	   [15:8] DATA Use to fill or read the DDC buffer
+	   [0] DATA_RW Select whether buffer access will be a read or write.
+		For writes, address auto-increments on write to HDMI_DDC_DATA.
+		For reads, address autoincrements on reads to HDMI_DDC_DATA.
+		* 0: Write
+		* 1: Read */
+
+	/* 8. ALL data is now available and waiting in the DDC buffer.
+	 *    Read HDMI_I2C_DATA with the following fields set
+	 *    RW = 0x1 (read)
+	 *    DATA = BCAPS (this is field where data is pulled from)
+	 *    INDEX = 0x3 (where the data has been placed in buffer by hardware)
+	 *    INDEX_WRITE = 0x1 (explicitly define offset) */
+	/* Write this data to DDC buffer */
+	HDMI_OUTP_ND(0x0238, 0x1 | (3 << 16) | (1 << 31));
+
+	/* Discard first byte */
+	HDMI_INP_ND(0x0238);
+	for (ndx = 0; ndx < data_len; ++ndx) {
+		reg_val = HDMI_INP_ND(0x0238);
+		data_buf[ndx] = (uint8) ((reg_val & 0x0000FF00) >> 8);
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, what);
+
+error:
+	return status;
+}
+
+
+static int hdmi_msm_ddc_read(uint32 dev_addr, uint32 offset, uint8 *data_buf,
+	uint32 data_len, int retry, const char *what, boolean no_align)
+{
+	int ret = hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf, data_len,
+		data_len, retry, what);
+	if (!ret)
+		return 0;
+	if (no_align) {
+		return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf,
+			data_len, data_len, retry, what);
+	} else {
+		return hdmi_msm_ddc_read_retry(dev_addr, offset, data_buf,
+			data_len, 32 * ((data_len + 31) / 32), retry, what);
+	}
+}
+
+
+static int hdmi_msm_read_edid_block(int block, uint8 *edid_buf)
+{
+	int i, rc = 0;
+	int block_size = 0x80;
+
+	do {
+		DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
+			block, block_size);
+		for (i = 0; i < 0x80; i += block_size) {
+			/*Read EDID twice with 32bit alighnment too */
+			if (block < 2) {
+				rc = hdmi_msm_ddc_read(0xA0, block*0x80 + i,
+					edid_buf+i, block_size, 1,
+					"EDID", FALSE);
+			} else {
+				rc = hdmi_msm_ddc_read_edid_seg(0xA0,
+				block*0x80 + i, edid_buf+i, block_size,
+				block_size, 1, "EDID");
+			}
+			if (rc)
+				break;
+		}
+
+		block_size /= 2;
+	} while (rc && (block_size >= 16));
+
+	return rc;
+}
+
+static int hdmi_msm_read_edid(void)
+{
+	int status;
+
+	msm_hdmi_init_ddc();
+	/* Looks like we need to turn on HDMI engine before any
+	 * DDC transaction */
+	if (!hdmi_msm_is_power_on()) {
+		DEV_ERR("%s: failed: HDMI power is off", __func__);
+		status = -ENXIO;
+		goto error;
+	}
+
+	external_common_state->read_edid_block = hdmi_msm_read_edid_block;
+	status = hdmi_common_read_edid();
+	if (!status)
+		DEV_DBG("EDID: successfully read\n");
+
+error:
+	return status;
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static void hdcp_auth_info(uint32 auth_info)
+{
+	switch (auth_info) {
+	case 0:
+		DEV_INFO("%s: None", __func__);
+		break;
+	case 1:
+		DEV_INFO("%s: Software Disabled Authentication", __func__);
+		break;
+	case 2:
+		DEV_INFO("%s: An Written", __func__);
+		break;
+	case 3:
+		DEV_INFO("%s: Invalid Aksv", __func__);
+		break;
+	case 4:
+		DEV_INFO("%s: Invalid Bksv", __func__);
+		break;
+	case 5:
+		DEV_INFO("%s: RI Mismatch (including RO)", __func__);
+		break;
+	case 6:
+		DEV_INFO("%s: consecutive Pj Mismatches", __func__);
+		break;
+	case 7:
+		DEV_INFO("%s: HPD Disconnect", __func__);
+		break;
+	case 8:
+	default:
+		DEV_INFO("%s: Reserved", __func__);
+		break;
+	}
+}
+
+static void hdcp_key_state(uint32 key_state)
+{
+	switch (key_state) {
+	case 0:
+		DEV_WARN("%s: No HDCP Keys", __func__);
+		break;
+	case 1:
+		DEV_WARN("%s: Not Checked", __func__);
+		break;
+	case 2:
+		DEV_DBG("%s: Checking", __func__);
+		break;
+	case 3:
+		DEV_DBG("%s: HDCP Keys Valid", __func__);
+		break;
+	case 4:
+		DEV_WARN("%s: AKSV not valid", __func__);
+		break;
+	case 5:
+		DEV_WARN("%s: Checksum Mismatch", __func__);
+		break;
+	case 6:
+		DEV_DBG("%s: Production AKSV"
+			"with ENABLE_USER_DEFINED_AN=1", __func__);
+		break;
+	case 7:
+	default:
+		DEV_INFO("%s: Reserved", __func__);
+		break;
+	}
+}
+
+static int hdmi_msm_count_one(uint8 *array, uint8 len)
+{
+	int i, j, count = 0;
+	for (i = 0; i < len; i++)
+		for (j = 0; j < 8; j++)
+			count += (((array[i] >> j) & 0x1) ? 1 : 0);
+	return count;
+}
+
+static void hdcp_deauthenticate(void)
+{
+	int hdcp_link_status = HDMI_INP(0x011C);
+
+	external_common_state->hdcp_active = FALSE;
+	/* 0x0130 HDCP_RESET
+	  [0] LINK0_DEAUTHENTICATE */
+	HDMI_OUTP(0x0130, 0x1);
+
+	/* 0x0110 HDCP_CTRL
+	  [8] ENCRYPTION_ENABLE
+	  [0] ENABLE */
+	/* encryption_enable = 0 | hdcp block enable = 1 */
+	HDMI_OUTP(0x0110, 0x0);
+
+	if (hdcp_link_status & 0x00000004)
+		hdcp_auth_info((hdcp_link_status & 0x000000F0) >> 4);
+}
+
+static int hdcp_authentication_part1(void)
+{
+	int ret = 0;
+	boolean is_match;
+	boolean is_part1_done = FALSE;
+	uint32 timeout_count;
+	uint8 bcaps;
+	uint8 aksv[5];
+	uint32 qfprom_aksv_0, qfprom_aksv_1, link0_aksv_0, link0_aksv_1;
+	uint8 bksv[5];
+	uint32 link0_bksv_0, link0_bksv_1;
+	uint8 an[8];
+	uint32 link0_an_0, link0_an_1;
+	uint32 hpd_int_status, hpd_int_ctrl;
+
+
+	static uint8 buf[0xFF];
+	memset(buf, 0, sizeof(buf));
+
+	if (!is_part1_done) {
+		is_part1_done = TRUE;
+
+		/* Fetch aksv from QFprom, this info should be public. */
+		qfprom_aksv_0 = inpdw(QFPROM_BASE + 0x000060D8);
+		qfprom_aksv_1 = inpdw(QFPROM_BASE + 0x000060DC);
+
+		/* copy an and aksv to byte arrays for transmission */
+		aksv[0] =  qfprom_aksv_0        & 0xFF;
+		aksv[1] = (qfprom_aksv_0 >> 8)  & 0xFF;
+		aksv[2] = (qfprom_aksv_0 >> 16) & 0xFF;
+		aksv[3] = (qfprom_aksv_0 >> 24) & 0xFF;
+		aksv[4] =  qfprom_aksv_1        & 0xFF;
+		/* check there are 20 ones in AKSV */
+		if (hdmi_msm_count_one(aksv, 5) != 20) {
+			DEV_ERR("HDCP: AKSV read from QFPROM doesn't have\
+				20 1's and 20 0's, FAIL (AKSV=%02x%08x)\n",
+			qfprom_aksv_1, qfprom_aksv_0);
+			ret = -EINVAL;
+			goto error;
+		}
+		DEV_DBG("HDCP: AKSV=%02x%08x\n", qfprom_aksv_1, qfprom_aksv_0);
+
+		/* 0x0288 HDCP_SW_LOWER_AKSV
+			[31:0] LOWER_AKSV */
+		/* 0x0284 HDCP_SW_UPPER_AKSV
+			[7:0] UPPER_AKSV */
+
+		/* This is the lower 32 bits of the SW
+		 * injected AKSV value(AKSV[31:0]) read
+		 * from the EFUSE. It is needed for HDCP
+		 * authentication and must be written
+		 * before enabling HDCP. */
+		HDMI_OUTP(0x0288, qfprom_aksv_0);
+		HDMI_OUTP(0x0284, qfprom_aksv_1);
+
+		msm_hdmi_init_ddc();
+
+		/* Read Bksv 5 bytes at 0x00 in HDCP port */
+		ret = hdmi_msm_ddc_read(0x74, 0x00, bksv, 5, 5, "Bksv", TRUE);
+		if (ret) {
+			DEV_ERR("%s(%d): Read BKSV failed", __func__, __LINE__);
+			goto error;
+		}
+		/* check there are 20 ones in BKSV */
+		if (hdmi_msm_count_one(bksv, 5) != 20) {
+			DEV_ERR("HDCP: BKSV read from Sink doesn't have\
+				20 1's and 20 0's, FAIL (BKSV=\
+				%02x%02x%02x%02x%02x)\n",
+				bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+			ret = -EINVAL;
+			goto error;
+		}
+
+		link0_bksv_0 = bksv[3];
+		link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+		link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+		link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+		link0_bksv_1 = bksv[4];
+		DEV_DBG("HDCP: BKSV=%02x%08x\n", link0_bksv_1, link0_bksv_0);
+
+		/* read Bcaps at 0x40 in HDCP Port */
+		ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps",
+			TRUE);
+		if (ret) {
+			DEV_ERR("%s(%d): Read Bcaps failed", __func__,
+			    __LINE__);
+			goto error;
+		}
+		DEV_DBG("HDCP: Bcaps=%02x\n", bcaps);
+
+		/* HDCP setup prior to HDCP enabled */
+
+		/* 0x0148 HDCP_RCVPORT_DATA4
+			[15:8] LINK0_AINFO
+			[7:0] LINK0_AKSV_1 */
+		/* LINK0_AINFO	= 0x2 FEATURE 1.1 on.
+		 *		= 0x0 FEATURE 1.1 off*/
+		HDMI_OUTP(0x0148, 0x2 << 8);
+
+		/* 0x012C HDCP_ENTROPY_CTRL0
+			[31:0] BITS_OF_INFLUENCE_0 */
+		/* 0x025C HDCP_ENTROPY_CTRL1
+			[31:0] BITS_OF_INFLUENCE_1 */
+		HDMI_OUTP(0x012C, 0xB1FFB0FF);
+		HDMI_OUTP(0x025C, 0xF00DFACE);
+
+		/* 0x0114 HDCP_DEBUG_CTRL
+			[2]	DEBUG_RNG_CIPHER
+			else default 0 */
+		HDMI_OUTP(0x0114, HDMI_INP(0x0114) & 0xFFFFFFFB);
+
+		/* 0x0110 HDCP_CTRL
+			[8] ENCRYPTION_ENABLE
+			[0] ENABLE */
+		/* encryption_enable | enable  */
+		HDMI_OUTP(0x0110, (1 << 8) | (1 << 0));
+
+		/* 0x0118 HDCP_INT_CTRL
+		 *    [2] AUTH_SUCCESS_MASK	[R/W]	Mask bit for\
+		 *					HDCP Authentication
+		 *		Success interrupt - set to 1 to enable interrupt
+		 *
+		 *    [6] AUTH_FAIL_MASK	[R/W]	Mask bit for HDCP
+		 *					Authentication
+		 *		Lost interrupt set to 1 to enable interrupt
+		 *
+		 *    [7] AUTH_FAIL_INFO_ACK	[W]	Acknwledge bit for HDCP
+		 *		Auth Failure Info field - write 1 to clear
+		 *
+		 *   [10] DDC_XFER_REQ_MASK	[R/W]	Mask bit for HDCP\
+		 *					DDC Transfer
+		 *		Request interrupt - set to 1 to enable interrupt
+		 *
+		 *   [14] DDC_XFER_DONE_MASK	[R/W]	Mask bit for HDCP\
+		 *					DDC Transfer
+		 *		done interrupt - set to 1 to enable interrupt */
+		/* enable all HDCP ints */
+		HDMI_OUTP(0x0118, (1 << 2) | (1 << 6) | (1 << 7));
+
+		/* 0x011C HDCP_LINK0_STATUS
+		[8] AN_0_READY
+		[9] AN_1_READY */
+		/* wait for an0 and an1 ready bits to be set in LINK0_STATUS */
+		timeout_count = 100;
+		while (((HDMI_INP_ND(0x011C) & (0x3 << 8)) != (0x3 << 8))
+			&& timeout_count--)
+			msleep(20);
+		if (!timeout_count) {
+			ret = -ETIMEDOUT;
+			DEV_ERR("%s(%d): timedout, An0=%d, An1=%d\n",
+				__func__, __LINE__,
+			(HDMI_INP_ND(0x011C) & BIT(8)) >> 8,
+			(HDMI_INP_ND(0x011C) & BIT(9)) >> 9);
+			goto error;
+		}
+
+		/* 0x0168 HDCP_RCVPORT_DATA12
+		   [23:8] BSTATUS
+		   [7:0] BCAPS */
+		HDMI_OUTP(0x0168, bcaps);
+
+		/* 0x014C HDCP_RCVPORT_DATA5
+		   [31:0] LINK0_AN_0 */
+		/* read an0 calculation */
+		link0_an_0 = HDMI_INP(0x014C);
+
+		/* 0x0150 HDCP_RCVPORT_DATA6
+		   [31:0] LINK0_AN_1 */
+		/* read an1 calculation */
+		link0_an_1 = HDMI_INP(0x0150);
+
+		/* three bits 28..30 */
+		hdcp_key_state((HDMI_INP(0x011C) >> 28) & 0x7);
+
+		/* 0x0144 HDCP_RCVPORT_DATA3
+		[31:0] LINK0_AKSV_0 public key
+		0x0148 HDCP_RCVPORT_DATA4
+		[15:8] LINK0_AINFO
+		[7:0]  LINK0_AKSV_1 public key */
+		link0_aksv_0 = HDMI_INP(0x0144);
+		link0_aksv_1 = HDMI_INP(0x0148);
+
+		/* copy an and aksv to byte arrays for transmission */
+		aksv[0] =  link0_aksv_0        & 0xFF;
+		aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+		aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+		aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+		aksv[4] =  link0_aksv_1        & 0xFF;
+
+		an[0] =  link0_an_0        & 0xFF;
+		an[1] = (link0_an_0 >> 8)  & 0xFF;
+		an[2] = (link0_an_0 >> 16) & 0xFF;
+		an[3] = (link0_an_0 >> 24) & 0xFF;
+		an[4] =  link0_an_1        & 0xFF;
+		an[5] = (link0_an_1 >> 8)  & 0xFF;
+		an[6] = (link0_an_1 >> 16) & 0xFF;
+		an[7] = (link0_an_1 >> 24) & 0xFF;
+
+		/* Write An 8 bytes to offset 0x18 */
+		ret = hdmi_msm_ddc_write(0x74, 0x18, an, 8, "An");
+		if (ret) {
+			DEV_ERR("%s(%d): Write An failed", __func__, __LINE__);
+			goto error;
+		}
+
+		/* Write Aksv 5 bytes to offset 0x10 */
+		ret = hdmi_msm_ddc_write(0x74, 0x10, aksv, 5, "Aksv");
+		if (ret) {
+			DEV_ERR("%s(%d): Write Aksv failed", __func__,
+			    __LINE__);
+			goto error;
+		}
+		DEV_DBG("HDCP: Link0-AKSV=%02x%08x\n",
+			link0_aksv_1 & 0xFF, link0_aksv_0);
+
+		/* 0x0134 HDCP_RCVPORT_DATA0
+		   [31:0] LINK0_BKSV_0 */
+		HDMI_OUTP(0x0134, link0_bksv_0);
+		/* 0x0138 HDCP_RCVPORT_DATA1
+		   [31:0] LINK0_BKSV_1 */
+		HDMI_OUTP(0x0138, link0_bksv_1);
+		DEV_DBG("HDCP: Link0-BKSV=%02x%08x\n", link0_bksv_1,
+		    link0_bksv_0);
+
+		/* HDMI_HPD_INT_STATUS[0x0250] */
+		hpd_int_status = HDMI_INP_ND(0x0250);
+		/* HDMI_HPD_INT_CTRL[0x0254] */
+		hpd_int_ctrl = HDMI_INP_ND(0x0254);
+		DEV_DBG("[SR-DEUG]: HPD_INTR_CTRL=[%u] HPD_INTR_STATUS=[%u]\
+		    before reading R0'\n", hpd_int_ctrl, hpd_int_status);
+
+		/*
+		* HDCP Compliace Test case 1B-01:
+		* Wait here until all the ksv bytes have been
+		* read from the KSV FIFO register.
+		*/
+		msleep(125);
+
+		/* Reading R0' 2 bytes at offset 0x08 */
+		ret = hdmi_msm_ddc_read(0x74, 0x08, buf, 2, 5, "RO'", TRUE);
+		if (ret) {
+			DEV_ERR("%s(%d): Read RO's failed", __func__,
+			    __LINE__);
+			goto error;
+		}
+
+		/* 0x013C HDCP_RCVPORT_DATA2_0
+		[15:0] LINK0_RI */
+		HDMI_OUTP(0x013C, (((uint32)buf[1]) << 8) | buf[0]);
+		DEV_DBG("HDCP: R0'=%02x%02x\n", buf[1], buf[0]);
+
+		INIT_COMPLETION(hdmi_msm_state->hdcp_success_done);
+		timeout_count = wait_for_completion_interruptible_timeout(
+			&hdmi_msm_state->hdcp_success_done, HZ*2);
+
+		if (!timeout_count) {
+			ret = -ETIMEDOUT;
+			is_match = HDMI_INP(0x011C) & BIT(12);
+			DEV_ERR("%s(%d): timedout, Link0=<%s>\n", __func__,
+			  __LINE__,
+			  is_match ? "RI_MATCH" : "No RI Match INTR in time");
+			if (!is_match)
+				goto error;
+		}
+
+		/* 0x011C HDCP_LINK0_STATUS
+		[12] RI_MATCHES	[0] MISMATCH, [1] MATCH
+		[0] AUTH_SUCCESS */
+		/* Checking for RI, R0 Match */
+		/* RI_MATCHES */
+		if ((HDMI_INP(0x011C) & BIT(12)) != BIT(12)) {
+			ret = -EINVAL;
+			DEV_ERR("%s: HDCP_LINK0_STATUS[RI_MATCHES]: MISMATCH\n",
+			    __func__);
+			goto error;
+		}
+
+		DEV_INFO("HDCP: authentication part I, successful\n");
+		is_part1_done = FALSE;
+		return 0;
+error:
+		DEV_ERR("[%s]: HDCP Reauthentication\n", __func__);
+		is_part1_done = FALSE;
+		return ret;
+	} else {
+		return 1;
+	}
+}
+
+static int hdmi_msm_transfer_v_h(void)
+{
+	/* Read V'.HO 4 Byte at offset 0x20 */
+	char what[20];
+	int ret;
+	uint8 buf[4];
+
+	snprintf(what, sizeof(what), "V' H0");
+	ret = hdmi_msm_ddc_read(0x74, 0x20, buf, 4, 5, what, TRUE);
+	if (ret) {
+		DEV_ERR("%s: Read %s failed", __func__, what);
+		return ret;
+	}
+	DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
+			buf[0] , buf[1] , buf[2] , buf[3]);
+
+	/* 0x0154 HDCP_RCVPORT_DATA7
+	   [31:0] V_HO */
+	HDMI_OUTP(0x0154 ,
+		(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
+
+	snprintf(what, sizeof(what), "V' H1");
+	ret = hdmi_msm_ddc_read(0x74, 0x24, buf, 4, 5, what, TRUE);
+	if (ret) {
+		DEV_ERR("%s: Read %s failed", __func__, what);
+		return ret;
+	}
+	DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
+			buf[0] , buf[1] , buf[2] , buf[3]);
+
+	/* 0x0158 HDCP_RCVPORT_ DATA8
+	   [31:0] V_H1 */
+	HDMI_OUTP(0x0158,
+		(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
+
+
+	snprintf(what, sizeof(what), "V' H2");
+	ret = hdmi_msm_ddc_read(0x74, 0x28, buf, 4, 5, what, TRUE);
+	if (ret) {
+		DEV_ERR("%s: Read %s failed", __func__, what);
+		return ret;
+	}
+	DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
+			buf[0] , buf[1] , buf[2] , buf[3]);
+
+	/* 0x015c HDCP_RCVPORT_DATA9
+	   [31:0] V_H2 */
+	HDMI_OUTP(0x015c ,
+		(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
+
+	snprintf(what, sizeof(what), "V' H3");
+	ret = hdmi_msm_ddc_read(0x74, 0x2c, buf, 4, 5, what, TRUE);
+	if (ret) {
+		DEV_ERR("%s: Read %s failed", __func__, what);
+		return ret;
+	}
+	DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
+			buf[0] , buf[1] , buf[2] , buf[3]);
+
+	/* 0x0160 HDCP_RCVPORT_DATA10
+	   [31:0] V_H3 */
+	HDMI_OUTP(0x0160,
+		(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
+
+	snprintf(what, sizeof(what), "V' H4");
+	ret = hdmi_msm_ddc_read(0x74, 0x30, buf, 4, 5, what, TRUE);
+	if (ret) {
+		DEV_ERR("%s: Read %s failed", __func__, what);
+		return ret;
+	}
+	DEV_DBG("buf[0]= %x , buf[1] = %x , buf[2] = %x , buf[3] = %x\n ",
+			buf[0] , buf[1] , buf[2] , buf[3]);
+	/* 0x0164 HDCP_RCVPORT_DATA11
+	   [31:0] V_H4 */
+	HDMI_OUTP(0x0164,
+		(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0]));
+
+	return 0;
+}
+
+static int hdcp_authentication_part2(void)
+{
+	int ret = 0;
+	uint32 timeout_count;
+	int i = 0;
+	int cnt = 0;
+	uint bstatus;
+	uint8 bcaps;
+	uint32 down_stream_devices;
+	uint32 ksv_bytes;
+
+	static uint8 buf[0xFF];
+	static uint8 kvs_fifo[5 * 127];
+
+	boolean max_devs_exceeded = 0;
+	boolean max_cascade_exceeded = 0;
+
+	boolean ksv_done = FALSE;
+
+	memset(buf, 0, sizeof(buf));
+	memset(kvs_fifo, 0, sizeof(kvs_fifo));
+
+	/* wait until READY bit is set in bcaps */
+	timeout_count = 50;
+	do {
+		timeout_count--;
+		/* read bcaps 1 Byte at offset 0x40 */
+		ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 1,
+		    "Bcaps", FALSE);
+		if (ret) {
+			DEV_ERR("%s(%d): Read Bcaps failed", __func__,
+			    __LINE__);
+			goto error;
+		}
+		msleep(100);
+	} while ((0 == (bcaps & 0x20)) && timeout_count); /* READY (Bit 5) */
+	if (!timeout_count) {
+		ret = -ETIMEDOUT;
+		DEV_ERR("%s:timedout(1)", __func__);
+		goto error;
+	}
+
+	/* read bstatus 2 bytes at offset 0x41 */
+
+	ret = hdmi_msm_ddc_read(0x74, 0x41, buf, 2, 5, "Bstatus", FALSE);
+	if (ret) {
+		DEV_ERR("%s(%d): Read Bstatus failed", __func__, __LINE__);
+		goto error;
+	}
+	bstatus = buf[1];
+	bstatus = (bstatus << 8) | buf[0];
+	/* 0x0168 DCP_RCVPORT_DATA12
+	[7:0] BCAPS
+	[23:8 BSTATUS */
+	HDMI_OUTP(0x0168, bcaps | (bstatus << 8));
+	/* BSTATUS [6:0] DEVICE_COUNT Number of HDMI device attached to repeater
+	* - see HDCP spec */
+	down_stream_devices = bstatus & 0x7F;
+
+	if (down_stream_devices == 0x0) {
+		/* There isn't any devices attaced to the Repeater */
+		DEV_ERR("%s: there isn't any devices attached to the "
+		    "Repeater\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	/*
+	* HDCP Compliance 1B-05:
+	* Check if no. of devices connected to repeater
+	* exceed max_devices_connected from bit 7 of Bstatus.
+	*/
+	max_devs_exceeded = (bstatus & 0x80) >> 7;
+	if (max_devs_exceeded == 0x01) {
+		DEV_ERR("%s: Number of devs connected to repeater "
+		    "exceeds max_devs\n", __func__);
+		ret = -EINVAL;
+		goto hdcp_error;
+	}
+
+	/*
+	* HDCP Compliance 1B-06:
+	* Check if no. of cascade connected to repeater
+	* exceed max_cascade_connected from bit 11 of Bstatus.
+	*/
+	max_cascade_exceeded = (bstatus & 0x800) >> 11;
+	if (max_cascade_exceeded == 0x01) {
+		DEV_ERR("%s: Number of cascade connected to repeater "
+		    "exceeds max_cascade\n", __func__);
+		ret = -EINVAL;
+		goto hdcp_error;
+	}
+
+	/* Read KSV FIFO over DDC
+	* Key Slection vector FIFO
+	* Used to pull downstream KSVs from HDCP Repeaters.
+	* All bytes (DEVICE_COUNT * 5) must be read in a single,
+	*   auto incrementing access.
+	* All bytes read as 0x00 for HDCP Receivers that are not
+	*   HDCP Repeaters (REPEATER == 0). */
+	ksv_bytes = 5 * down_stream_devices;
+	/* Reading KSV FIFO / KSV FIFO */
+	ksv_done = FALSE;
+
+	ret = hdmi_msm_ddc_read(0x74, 0x43, kvs_fifo, ksv_bytes, 5,
+	"KSV FIFO", TRUE);
+	do {
+		if (ret) {
+			DEV_ERR("%s(%d): Read KSV FIFO failed",
+			    __func__, __LINE__);
+			/*
+			* HDCP Compliace Test case 1B-01:
+			* Wait here until all the ksv bytes have been
+			* read from the KSV FIFO register.
+			*/
+			msleep(25);
+		} else {
+			ksv_done = TRUE;
+		}
+		cnt++;
+	} while (!ksv_done && cnt != 20);
+
+	if (ksv_done == FALSE)
+		goto error;
+
+	ret = hdmi_msm_transfer_v_h();
+	if (ret)
+		goto error;
+
+	/* Next: Write KSV FIFO to HDCP_SHA_DATA.
+	* This is done 1 byte at time starting with the LSB.
+	* On the very last byte write,
+	* the HDCP_SHA_DATA_DONE bit[0]
+	*/
+
+	/* 0x023C HDCP_SHA_CTRL
+	[0] RESET	[0] Enable, [1] Reset
+	[4] SELECT	[0] DIGA_HDCP, [1] DIGB_HDCP */
+	/* reset SHA engine */
+	HDMI_OUTP(0x023C, 1);
+	/* enable SHA engine, SEL=DIGA_HDCP */
+	HDMI_OUTP(0x023C, 0);
+
+	for (i = 0; i < ksv_bytes - 1; i++) {
+		/* Write KSV byte and do not set DONE bit[0] */
+		HDMI_OUTP_ND(0x0244, kvs_fifo[i] << 16);
+	}
+	/* Write l to DONE bit[0] */
+	HDMI_OUTP_ND(0x0244, (kvs_fifo[ksv_bytes - 1] << 16) | 0x1);
+
+	/* 0x0240 HDCP_SHA_STATUS
+	[4] COMP_DONE */
+	/* Now wait for HDCP_SHA_COMP_DONE */
+	timeout_count = 100;
+	while ((0x10 != (HDMI_INP_ND(0x0240) & 0x10)) && timeout_count--)
+		msleep(20);
+	if (!timeout_count) {
+		ret = -ETIMEDOUT;
+		DEV_ERR("%s(%d): timedout", __func__, __LINE__);
+		goto error;
+	}
+
+	/* 0x011C HDCP_LINK0_STATUS
+	[20] V_MATCHES */
+	timeout_count = 100;
+	while (((HDMI_INP_ND(0x011C) & (1 << 20)) != (1 << 20))
+	    && timeout_count--)
+		msleep(20);
+	if (!timeout_count) {
+		ret = -ETIMEDOUT;
+		DEV_ERR("%s(%d): timedout", __func__, __LINE__);
+		goto error;
+	}
+
+	DEV_INFO("HDCP: authentication part II, successful\n");
+
+hdcp_error:
+error:
+	return ret;
+}
+
+static int hdcp_authentication_part3(uint32 found_repeater)
+{
+	int ret = 0;
+	int poll = 3000;
+	while (poll) {
+		/* 0x011C HDCP_LINK0_STATUS
+		    [30:28]  KEYS_STATE = 3 = "Valid"
+		    [24] RO_COMPUTATION_DONE	[0] Not Done, [1] Done
+		    [20] V_MATCHES		[0] Mismtach, [1] Match
+		    [12] RI_MATCHES		[0] Mismatch, [1] Match
+		    [0] AUTH_SUCCESS */
+		if (HDMI_INP_ND(0x011C) != (0x31001001 |
+		    (found_repeater << 20))) {
+			DEV_ERR("HDCP: autentication part III, FAILED, "
+			    "Link Status=%08x\n", HDMI_INP(0x011C));
+			ret = -EINVAL;
+			goto error;
+		}
+		poll--;
+	}
+
+	DEV_INFO("HDCP: authentication part III, successful\n");
+
+error:
+	return ret;
+}
+
+static void hdmi_msm_hdcp_enable(void)
+{
+	int ret = 0;
+	uint8 bcaps;
+	uint32 found_repeater = 0x0;
+	char *envp[2];
+
+	if (!hdmi_msm_has_hdcp())
+		return;
+
+	mutex_lock(&hdmi_msm_state_mutex);
+	hdmi_msm_state->hdcp_activating = TRUE;
+	mutex_unlock(&hdmi_msm_state_mutex);
+
+	fill_black_screen();
+
+	mutex_lock(&hdcp_auth_state_mutex);
+	/*
+	 * Initialize this to zero here to make
+	 * sure HPD has not happened yet
+	 */
+	hdmi_msm_state->hpd_during_auth = FALSE;
+	/* This flag prevents other threads from re-authenticating
+	* after we've just authenticated (i.e., finished part3)
+	* We probably need to protect this in a mutex lock */
+	hdmi_msm_state->full_auth_done = FALSE;
+	mutex_unlock(&hdcp_auth_state_mutex);
+
+	/* PART I Authentication*/
+	ret = hdcp_authentication_part1();
+	if (ret)
+		goto error;
+
+	/* PART II Authentication*/
+	/* read Bcaps at 0x40 in HDCP Port */
+	ret = hdmi_msm_ddc_read(0x74, 0x40, &bcaps, 1, 5, "Bcaps", FALSE);
+	if (ret) {
+		DEV_ERR("%s(%d): Read Bcaps failed\n", __func__, __LINE__);
+		goto error;
+	}
+	DEV_DBG("HDCP: Bcaps=0x%02x (%s)\n", bcaps,
+		(bcaps & BIT(6)) ? "repeater" : "no repeater");
+
+	/* if REPEATER (Bit 6), perform Part2 Authentication */
+	if (bcaps & BIT(6)) {
+		found_repeater = 0x1;
+		ret = hdcp_authentication_part2();
+		if (ret)
+			goto error;
+	} else
+		DEV_INFO("HDCP: authentication part II skipped, no repeater\n");
+
+	/* PART III Authentication*/
+	ret = hdcp_authentication_part3(found_repeater);
+	if (ret)
+		goto error;
+
+	unfill_black_screen();
+
+	external_common_state->hdcp_active = TRUE;
+	mutex_lock(&hdmi_msm_state_mutex);
+	hdmi_msm_state->hdcp_activating = FALSE;
+	mutex_unlock(&hdmi_msm_state_mutex);
+
+	mutex_lock(&hdcp_auth_state_mutex);
+	/*
+	 * This flag prevents other threads from re-authenticating
+	 * after we've just authenticated (i.e., finished part3)
+	 */
+	hdmi_msm_state->full_auth_done = TRUE;
+	mutex_unlock(&hdcp_auth_state_mutex);
+
+	if (!hdmi_msm_is_dvi_mode()) {
+		DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
+		envp[0] = "HDCP_STATE=PASS";
+		envp[1] = NULL;
+		kobject_uevent_env(external_common_state->uevent_kobj,
+		    KOBJ_CHANGE, envp);
+	}
+	return;
+
+error:
+	mutex_lock(&hdmi_msm_state_mutex);
+	hdmi_msm_state->hdcp_activating = FALSE;
+	mutex_unlock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->hpd_during_auth) {
+		DEV_WARN("Calling Deauthentication: HPD occured during\
+		    authentication  from [%s]\n", __func__);
+		hdcp_deauthenticate();
+		mutex_lock(&hdcp_auth_state_mutex);
+		hdmi_msm_state->hpd_during_auth = FALSE;
+		mutex_unlock(&hdcp_auth_state_mutex);
+	} else {
+		DEV_WARN("[DEV_DBG]: Calling reauth from [%s]\n", __func__);
+		if (hdmi_msm_state->panel_power_on)
+			queue_work(hdmi_work_queue,
+			    &hdmi_msm_state->hdcp_reauth_work);
+	}
+}
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+static void hdmi_msm_video_setup(int video_format)
+{
+	uint32 total_v   = 0;
+	uint32 total_h   = 0;
+	uint32 start_h   = 0;
+	uint32 end_h     = 0;
+	uint32 start_v   = 0;
+	uint32 end_v     = 0;
+	const struct hdmi_disp_mode_timing_type *timing =
+		hdmi_common_get_supported_mode(video_format);
+
+	/* timing register setup */
+	if (timing == NULL) {
+		DEV_ERR("video format not supported: %d\n", video_format);
+		return;
+	}
+
+	/* Hsync Total and Vsync Total */
+	total_h = timing->active_h + timing->front_porch_h
+		+ timing->back_porch_h + timing->pulse_width_h - 1;
+	total_v = timing->active_v + timing->front_porch_v
+		+ timing->back_porch_v + timing->pulse_width_v - 1;
+	/* 0x02C0 HDMI_TOTAL
+	   [27:16] V_TOTAL Vertical Total
+	   [11:0]  H_TOTAL Horizontal Total */
+	HDMI_OUTP(0x02C0, ((total_v << 16) & 0x0FFF0000)
+		| ((total_h << 0) & 0x00000FFF));
+
+	/* Hsync Start and Hsync End */
+	start_h = timing->back_porch_h + timing->pulse_width_h;
+	end_h   = (total_h + 1) - timing->front_porch_h;
+	/* 0x02B4 HDMI_ACTIVE_H
+	   [27:16] END Horizontal end
+	   [11:0]  START Horizontal start */
+	HDMI_OUTP(0x02B4, ((end_h << 16) & 0x0FFF0000)
+		| ((start_h << 0) & 0x00000FFF));
+
+	start_v = timing->back_porch_v + timing->pulse_width_v - 1;
+	end_v   = total_v - timing->front_porch_v;
+	/* 0x02B8 HDMI_ACTIVE_V
+	   [27:16] END Vertical end
+	   [11:0]  START Vertical start */
+	HDMI_OUTP(0x02B8, ((end_v << 16) & 0x0FFF0000)
+		| ((start_v << 0) & 0x00000FFF));
+
+	if (timing->interlaced) {
+		/* 0x02C4 HDMI_V_TOTAL_F2
+		   [11:0] V_TOTAL_F2 Vertical total for field2 */
+		HDMI_OUTP(0x02C4, ((total_v + 1) << 0) & 0x00000FFF);
+
+		/* 0x02BC HDMI_ACTIVE_V_F2
+		   [27:16] END_F2 Vertical end for field2
+		   [11:0]  START_F2 Vertical start for Field2 */
+		HDMI_OUTP(0x02BC,
+			  (((start_v + 1) << 0) & 0x00000FFF)
+			| (((end_v + 1) << 16) & 0x0FFF0000));
+	} else {
+		/* HDMI_V_TOTAL_F2 */
+		HDMI_OUTP(0x02C4, 0);
+		/* HDMI_ACTIVE_V_F2 */
+		HDMI_OUTP(0x02BC, 0);
+	}
+
+	hdmi_frame_ctrl_cfg(timing);
+}
+
+struct hdmi_msm_audio_acr {
+	uint32 n;	/* N parameter for clock regeneration */
+	uint32 cts;	/* CTS parameter for clock regeneration */
+};
+
+struct hdmi_msm_audio_arcs {
+	uint32 pclk;
+	struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
+};
+
+#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { pclk, __VA_ARGS__ }
+
+/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static const struct hdmi_msm_audio_arcs hdmi_msm_audio_acr_lut[] = {
+	/*  25.200MHz  */
+	HDMI_MSM_AUDIO_ARCS(25200, {
+		{4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
+		{12288, 25200}, {25088, 28000}, {24576, 25200} }),
+	/*  27.000MHz  */
+	HDMI_MSM_AUDIO_ARCS(27000, {
+		{4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
+		{12288, 27000}, {25088, 30000}, {24576, 27000} }),
+	/*  27.030MHz */
+	HDMI_MSM_AUDIO_ARCS(27030, {
+		{4096, 27030}, {6272, 30030}, {6144, 27030}, {12544, 30030},
+		{12288, 27030}, {25088, 30030}, {24576, 27030} }),
+	/*  74.250MHz */
+	HDMI_MSM_AUDIO_ARCS(74250, {
+		{4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
+		{12288, 74250}, {25088, 82500}, {24576, 74250} }),
+	/* 148.500MHz */
+	HDMI_MSM_AUDIO_ARCS(148500, {
+		{4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
+		{12288, 148500}, {25088, 165000}, {24576, 148500} }),
+};
+
+static void hdmi_msm_audio_acr_setup(boolean enabled, int video_format,
+	int audio_sample_rate, int num_of_channels)
+{
+	/* Read first before writing */
+	/* HDMI_ACR_PKT_CTRL[0x0024] */
+	uint32 acr_pck_ctrl_reg = HDMI_INP(0x0024);
+
+	if (enabled) {
+		const struct hdmi_disp_mode_timing_type *timing =
+			hdmi_common_get_supported_mode(video_format);
+		const struct hdmi_msm_audio_arcs *audio_arc =
+			&hdmi_msm_audio_acr_lut[0];
+		const int lut_size = sizeof(hdmi_msm_audio_acr_lut)
+			/sizeof(*hdmi_msm_audio_acr_lut);
+		uint32 i, n, cts, layout, multiplier, aud_pck_ctrl_2_reg;
+
+		if (timing == NULL) {
+			DEV_WARN("%s: video format %d not supported\n",
+				__func__, video_format);
+			return;
+		}
+
+		for (i = 0; i < lut_size;
+			audio_arc = &hdmi_msm_audio_acr_lut[++i]) {
+			if (audio_arc->pclk == timing->pixel_freq)
+				break;
+		}
+		if (i >= lut_size) {
+			DEV_WARN("%s: pixel clock %d not supported\n", __func__,
+				timing->pixel_freq);
+			return;
+		}
+
+		n = audio_arc->lut[audio_sample_rate].n;
+		cts = audio_arc->lut[audio_sample_rate].cts;
+		layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
+
+		if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate) ||
+		    (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio_sample_rate)) {
+			multiplier = 4;
+			n >>= 2; /* divide N by 4 and use multiplier */
+		} else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) ||
+			  (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio_sample_rate)) {
+			multiplier = 2;
+			n >>= 1; /* divide N by 2 and use multiplier */
+		} else {
+			multiplier = 1;
+		}
+		DEV_DBG("%s: n=%u, cts=%u, layout=%u\n", __func__, n, cts,
+			layout);
+
+		/* AUDIO_PRIORITY | SOURCE */
+		acr_pck_ctrl_reg |= 0x80000100;
+		/* N_MULTIPLE(multiplier) */
+		acr_pck_ctrl_reg |= (multiplier & 7) << 16;
+
+		if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio_sample_rate) ||
+		    (MSM_HDMI_SAMPLE_RATE_96KHZ == audio_sample_rate) ||
+		    (MSM_HDMI_SAMPLE_RATE_192KHZ == audio_sample_rate)) {
+			/* SELECT(3) */
+			acr_pck_ctrl_reg |= 3 << 4;
+			/* CTS_48 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			/* HDMI_ACR_48_0 */
+			HDMI_OUTP(0x00D4, cts);
+			/* N */
+			/* HDMI_ACR_48_1 */
+			HDMI_OUTP(0x00D8, n);
+		} else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio_sample_rate)
+			   || (MSM_HDMI_SAMPLE_RATE_88_2KHZ ==
+			       audio_sample_rate)
+			   || (MSM_HDMI_SAMPLE_RATE_176_4KHZ ==
+			       audio_sample_rate)) {
+			/* SELECT(2) */
+			acr_pck_ctrl_reg |= 2 << 4;
+			/* CTS_44 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			/* HDMI_ACR_44_0 */
+			HDMI_OUTP(0x00CC, cts);
+			/* N */
+			/* HDMI_ACR_44_1 */
+			HDMI_OUTP(0x00D0, n);
+		} else {	/* default to 32k */
+			/* SELECT(1) */
+			acr_pck_ctrl_reg |= 1 << 4;
+			/* CTS_32 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			/* HDMI_ACR_32_0 */
+			HDMI_OUTP(0x00C4, cts);
+			/* N */
+			/* HDMI_ACR_32_1 */
+			HDMI_OUTP(0x00C8, n);
+		}
+		/* Payload layout depends on number of audio channels */
+		/* LAYOUT_SEL(layout) */
+		aud_pck_ctrl_2_reg = 1 | (layout << 1);
+		/* override | layout */
+		/* HDMI_AUDIO_PKT_CTRL2[0x00044] */
+		HDMI_OUTP(0x00044, aud_pck_ctrl_2_reg);
+
+		/* SEND | CONT */
+		acr_pck_ctrl_reg |= 0x00000003;
+	} else {
+		/* ~(SEND | CONT) */
+		acr_pck_ctrl_reg &= ~0x00000003;
+	}
+	/* HDMI_ACR_PKT_CTRL[0x0024] */
+	HDMI_OUTP(0x0024, acr_pck_ctrl_reg);
+}
+
+static void hdmi_msm_outpdw_chk(uint32 offset, uint32 data)
+{
+	uint32 check, i = 0;
+
+#ifdef DEBUG
+	HDMI_OUTP(offset, data);
+#endif
+	do {
+		outpdw(MSM_HDMI_BASE+offset, data);
+		check = inpdw(MSM_HDMI_BASE+offset);
+	} while (check != data && i++ < 10);
+
+	if (check != data)
+		DEV_ERR("%s: failed addr=%08x, data=%x, check=%x",
+			__func__, offset, data, check);
+}
+
+static void hdmi_msm_rmw32or(uint32 offset, uint32 data)
+{
+	uint32 reg_data;
+	reg_data = inpdw(MSM_HDMI_BASE+offset);
+	reg_data = inpdw(MSM_HDMI_BASE+offset);
+	hdmi_msm_outpdw_chk(offset, reg_data | data);
+}
+
+
+#define HDMI_AUDIO_CFG				0x01D0
+#define HDMI_AUDIO_ENGINE_ENABLE		1
+#define HDMI_AUDIO_FIFO_MASK			0x000000F0
+#define HDMI_AUDIO_FIFO_WATERMARK_SHIFT		4
+#define HDMI_AUDIO_FIFO_MAX_WATER_MARK		8
+
+
+int hdmi_audio_enable(bool on , u32 fifo_water_mark)
+{
+	u32 hdmi_audio_config;
+
+	hdmi_audio_config = HDMI_INP(HDMI_AUDIO_CFG);
+
+	if (on) {
+
+		if (fifo_water_mark > HDMI_AUDIO_FIFO_MAX_WATER_MARK) {
+			pr_err("%s : HDMI audio fifo water mark can not be more"
+				" than %u\n", __func__,
+				HDMI_AUDIO_FIFO_MAX_WATER_MARK);
+			return -EINVAL;
+		}
+
+		/*
+		 *  Enable HDMI Audio engine.
+		 *  MUST be enabled after Audio DMA is enabled.
+		*/
+		hdmi_audio_config &= ~(HDMI_AUDIO_FIFO_MASK);
+
+		hdmi_audio_config |= (HDMI_AUDIO_ENGINE_ENABLE |
+			 (fifo_water_mark << HDMI_AUDIO_FIFO_WATERMARK_SHIFT));
+
+	} else
+		 hdmi_audio_config &= ~(HDMI_AUDIO_ENGINE_ENABLE);
+
+	HDMI_OUTP(HDMI_AUDIO_CFG, hdmi_audio_config);
+
+	return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_enable);
+
+static void hdmi_msm_audio_info_setup(boolean enabled, int num_of_channels,
+	int level_shift, boolean down_mix)
+{
+	uint32 channel_allocation = 0;	/* Default to FR,FL */
+	uint32 channel_count = 1;	/* Default to 2 channels
+					   -> See Table 17 in CEA-D spec */
+	uint32 check_sum, audio_info_0_reg, audio_info_1_reg;
+	uint32 audio_info_ctrl_reg;
+
+	/* Please see table 20 Audio InfoFrame in HDMI spec
+	   FL  = front left
+	   FC  = front Center
+	   FR  = front right
+	   FLC = front left center
+	   FRC = front right center
+	   RL  = rear left
+	   RC  = rear center
+	   RR  = rear right
+	   RLC = rear left center
+	   RRC = rear right center
+	   LFE = low frequency effect
+	 */
+
+	/* Read first then write because it is bundled with other controls */
+	/* HDMI_INFOFRAME_CTRL0[0x002C] */
+	audio_info_ctrl_reg = HDMI_INP(0x002C);
+
+	if (enabled) {
+		switch (num_of_channels) {
+		case MSM_HDMI_AUDIO_CHANNEL_2:
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_4:
+			channel_count = 3;
+			/* FC,LFE,FR,FL */
+			channel_allocation = 0x3;
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_6:
+			channel_count = 5;
+			/* RR,RL,FC,LFE,FR,FL */
+			channel_allocation = 0xB;
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_8:
+			channel_count = 7;
+			/* FRC,FLC,RR,RL,FC,LFE,FR,FL */
+			channel_allocation = 0x1f;
+			break;
+		default:
+			break;
+		}
+
+		/* Program the Channel-Speaker allocation */
+		audio_info_1_reg = 0;
+		/* CA(channel_allocation) */
+		audio_info_1_reg |= channel_allocation & 0xff;
+		/* Program the Level shifter */
+		/* LSV(level_shift) */
+		audio_info_1_reg |= (level_shift << 11) & 0x00007800;
+		/* Program the Down-mix Inhibit Flag */
+		/* DM_INH(down_mix) */
+		audio_info_1_reg |= (down_mix << 15) & 0x00008000;
+
+		/* HDMI_AUDIO_INFO1[0x00E8] */
+		HDMI_OUTP(0x00E8, audio_info_1_reg);
+
+		/* Calculate CheckSum
+		   Sum of all the bytes in the Audio Info Packet bytes
+		   (See table 8.4 in HDMI spec) */
+		check_sum = 0;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_TYPE[0x84] */
+		check_sum += 0x84;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_VERSION[0x01] */
+		check_sum += 1;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH[0x0A] */
+		check_sum += 0x0A;
+		check_sum += channel_count;
+		check_sum += channel_allocation;
+		/* See Table 8.5 in HDMI spec */
+		check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+		check_sum &= 0xFF;
+		check_sum = (uint8) (256 - check_sum);
+
+		audio_info_0_reg = 0;
+		/* CHECKSUM(check_sum) */
+		audio_info_0_reg |= check_sum & 0xff;
+		/* CC(channel_count) */
+		audio_info_0_reg |= (channel_count << 8) & 0x00000700;
+
+		/* HDMI_AUDIO_INFO0[0x00E4] */
+		HDMI_OUTP(0x00E4, audio_info_0_reg);
+
+		/* Set these flags */
+		/* AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT
+		 | AUDIO_INFO_SEND */
+		audio_info_ctrl_reg |= 0x000000F0;
+	} else {
+		/* Clear these flags */
+		/* ~(AUDIO_INFO_UPDATE | AUDIO_INFO_SOURCE | AUDIO_INFO_CONT
+		   | AUDIO_INFO_SEND) */
+		audio_info_ctrl_reg &= ~0x000000F0;
+	}
+	/* HDMI_INFOFRAME_CTRL0[0x002C] */
+	HDMI_OUTP(0x002C, audio_info_ctrl_reg);
+}
+
+static void hdmi_msm_audio_ctrl_setup(boolean enabled, int delay)
+{
+	uint32 audio_pkt_ctrl_reg = 0;
+
+	/* Enable Packet Transmission */
+	audio_pkt_ctrl_reg |= enabled ? 0x00000001 : 0;
+	audio_pkt_ctrl_reg |= (delay << 4);
+
+	/* HDMI_AUDIO_PKT_CTRL1[0x0020] */
+	HDMI_OUTP(0x0020, audio_pkt_ctrl_reg);
+}
+
+static void hdmi_msm_en_gc_packet(boolean av_mute_is_requested)
+{
+	/* HDMI_GC[0x0040] */
+	HDMI_OUTP(0x0040, av_mute_is_requested ? 1 : 0);
+
+	/* GC packet enable (every frame) */
+	/* HDMI_VBI_PKT_CTRL[0x0028] */
+	hdmi_msm_rmw32or(0x0028, 3 << 4);
+}
+
+static void hdmi_msm_en_isrc_packet(boolean isrc_is_continued)
+{
+	static const char isrc_psuedo_data[] =
+					"ISRC1:0123456789isrc2=ABCDEFGHIJ";
+	const uint32 * isrc_data = (const uint32 *) isrc_psuedo_data;
+
+	/* ISRC_STATUS =0b010 | ISRC_CONTINUE | ISRC_VALID */
+	/* HDMI_ISRC1_0[0x00048] */
+	HDMI_OUTP(0x00048, 2 | (isrc_is_continued ? 1 : 0) << 6 | 0 << 7);
+
+	/* HDMI_ISRC1_1[0x004C] */
+	HDMI_OUTP(0x004C, *isrc_data++);
+	/* HDMI_ISRC1_2[0x0050] */
+	HDMI_OUTP(0x0050, *isrc_data++);
+	/* HDMI_ISRC1_3[0x0054] */
+	HDMI_OUTP(0x0054, *isrc_data++);
+	/* HDMI_ISRC1_4[0x0058] */
+	HDMI_OUTP(0x0058, *isrc_data++);
+
+	/* HDMI_ISRC2_0[0x005C] */
+	HDMI_OUTP(0x005C, *isrc_data++);
+	/* HDMI_ISRC2_1[0x0060] */
+	HDMI_OUTP(0x0060, *isrc_data++);
+	/* HDMI_ISRC2_2[0x0064] */
+	HDMI_OUTP(0x0064, *isrc_data++);
+	/* HDMI_ISRC2_3[0x0068] */
+	HDMI_OUTP(0x0068, *isrc_data);
+
+	/* HDMI_VBI_PKT_CTRL[0x0028] */
+	/* ISRC Send + Continuous */
+	hdmi_msm_rmw32or(0x0028, 3 << 8);
+}
+
+static void hdmi_msm_en_acp_packet(uint32 byte1)
+{
+	/* HDMI_ACP[0x003C] */
+	HDMI_OUTP(0x003C, 2 | 1 << 8 | byte1 << 16);
+
+	/* HDMI_VBI_PKT_CTRL[0x0028] */
+	/* ACP send, s/w source */
+	hdmi_msm_rmw32or(0x0028, 3 << 12);
+}
+
+static void hdmi_msm_audio_setup(void)
+{
+	const int channels = MSM_HDMI_AUDIO_CHANNEL_2;
+
+	/* (0) for clr_avmute, (1) for set_avmute */
+	hdmi_msm_en_gc_packet(0);
+	/* (0) for isrc1 only, (1) for isrc1 and isrc2 */
+	hdmi_msm_en_isrc_packet(1);
+	/* arbitrary bit pattern for byte1 */
+	hdmi_msm_en_acp_packet(0x5a);
+
+	hdmi_msm_audio_acr_setup(TRUE,
+		external_common_state->video_resolution,
+		MSM_HDMI_SAMPLE_RATE_48KHZ, channels);
+	hdmi_msm_audio_info_setup(TRUE, channels, 0, FALSE);
+	hdmi_msm_audio_ctrl_setup(TRUE, 1);
+
+	/* Turn on Audio FIFO and SAM DROP ISR */
+	HDMI_OUTP(0x02CC, HDMI_INP(0x02CC) | BIT(1) | BIT(3));
+	DEV_INFO("HDMI Audio: Enabled\n");
+}
+
+static int hdmi_msm_audio_off(void)
+{
+	uint32 audio_pkt_ctrl, audio_cfg;
+	 /* Number of wait iterations */
+	int i = 10;
+	audio_pkt_ctrl = HDMI_INP_ND(0x0020);
+	audio_cfg = HDMI_INP_ND(0x01D0);
+
+	/* Checking BIT[0] of AUDIO PACKET CONTROL and */
+	/* AUDIO CONFIGURATION register */
+	while (((audio_pkt_ctrl & 0x00000001) || (audio_cfg & 0x00000001))
+		&& (i--)) {
+		audio_pkt_ctrl = HDMI_INP_ND(0x0020);
+		audio_cfg = HDMI_INP_ND(0x01D0);
+		DEV_DBG("%d times :: HDMI AUDIO PACKET is %08x and "
+		"AUDIO CFG is %08x", i, audio_pkt_ctrl, audio_cfg);
+		msleep(100);
+		if (!i) {
+			DEV_ERR("%s:failed to set BIT[0] AUDIO PACKET"
+			"CONTROL or AUDIO CONFIGURATION REGISTER\n",
+				__func__);
+			return -ETIMEDOUT;
+		}
+	}
+	hdmi_msm_audio_info_setup(FALSE, 0, 0, FALSE);
+	hdmi_msm_audio_ctrl_setup(FALSE, 0);
+	hdmi_msm_audio_acr_setup(FALSE, 0, 0, 0);
+	DEV_INFO("HDMI Audio: Disabled\n");
+	return 0;
+}
+
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static uint8 hdmi_msm_avi_iframe_lut[][14] = {
+/*	480p60	480i60	576p50	576i50	720p60	720p50	1080p60	1080i60	1080p50
+	1080i50	1080p24	1080p30	1080p25	640x480p */
+	{0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,
+	 0x10,	0x10,	0x10,	0x10,	0x10},
+	{0x18,	0x18,	0x28,	0x28,	0x28,	0x28,	0x28,	0x28,	0x28,
+	 0x28,	0x28,	0x28,	0x28,	0x18},
+	{0x04,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,
+	 0x04,	0x04,	0x04,	0x04,	0x88},
+	{0x02,	0x06,	0x11,	0x15,	0x04,	0x13,	0x10,	0x05,	0x1F,
+	 0x14,	0x20,	0x22,	0x21,	0x01},
+	{0x00,	0x01,	0x00,	0x01,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00},
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00},
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00},
+	{0xE1,	0xE1,	0x41,	0x41,	0xD1,	0xd1,	0x39,	0x39,	0x39,
+	 0x39,	0x39,	0x39,	0x39,	0xe1},
+	{0x01,	0x01,	0x02,	0x02,	0x02,	0x02,	0x04,	0x04,	0x04,
+	 0x04,	0x04,	0x04,	0x04,	0x01},
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00},
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00},
+	{0xD1,	0xD1,	0xD1,	0xD1,	0x01,	0x01,	0x81,	0x81,	0x81,
+	 0x81,	0x81,	0x81,	0x81,	0x81},
+	{0x02,	0x02,	0x02,	0x02,	0x05,	0x05,	0x07,	0x07,	0x07,
+	 0x07,	0x07,	0x07,	0x07,	0x02}
+};
+
+static void hdmi_msm_avi_info_frame(void)
+{
+	/* two header + length + 13 data */
+	uint8 aviInfoFrame[16];
+	uint8 checksum;
+	uint32 sum;
+	uint32 regVal;
+	int i;
+	int mode = 0;
+
+	switch (external_common_state->video_resolution) {
+	case HDMI_VFRMT_720x480p60_16_9:
+		mode = 0;
+		break;
+	case HDMI_VFRMT_720x480i60_16_9:
+		mode = 1;
+		break;
+	case HDMI_VFRMT_720x576p50_16_9:
+		mode = 2;
+		break;
+	case HDMI_VFRMT_720x576i50_16_9:
+		mode = 3;
+		break;
+	case HDMI_VFRMT_1280x720p60_16_9:
+		mode = 4;
+		break;
+	case HDMI_VFRMT_1280x720p50_16_9:
+		mode = 5;
+		break;
+	case HDMI_VFRMT_1920x1080p60_16_9:
+		mode = 6;
+		break;
+	case HDMI_VFRMT_1920x1080i60_16_9:
+		mode = 7;
+		break;
+	case HDMI_VFRMT_1920x1080p50_16_9:
+		mode = 8;
+		break;
+	case HDMI_VFRMT_1920x1080i50_16_9:
+		mode = 9;
+		break;
+	case HDMI_VFRMT_1920x1080p24_16_9:
+		mode = 10;
+		break;
+	case HDMI_VFRMT_1920x1080p30_16_9:
+		mode = 11;
+		break;
+	case HDMI_VFRMT_1920x1080p25_16_9:
+		mode = 12;
+		break;
+	case HDMI_VFRMT_640x480p60_4_3:
+		mode = 13;
+		break;
+	default:
+		DEV_INFO("%s: mode %d not supported\n", __func__,
+			external_common_state->video_resolution);
+		return;
+	}
+
+	/* InfoFrame Type = 82 */
+	aviInfoFrame[0]  = 0x82;
+	/* Version = 2 */
+	aviInfoFrame[1]  = 2;
+	/* Length of AVI InfoFrame = 13 */
+	aviInfoFrame[2]  = 13;
+
+	/* Data Byte 01: 0 Y1 Y0 A0 B1 B0 S1 S0 */
+	aviInfoFrame[3]  = hdmi_msm_avi_iframe_lut[0][mode];
+	/* Data Byte 02: C1 C0 M1 M0 R3 R2 R1 R0 */
+	aviInfoFrame[4]  = hdmi_msm_avi_iframe_lut[1][mode];
+	/* Data Byte 03: ITC EC2 EC1 EC0 Q1 Q0 SC1 SC0 */
+	aviInfoFrame[5]  = hdmi_msm_avi_iframe_lut[2][mode];
+	/* Data Byte 04: 0 VIC6 VIC5 VIC4 VIC3 VIC2 VIC1 VIC0 */
+	aviInfoFrame[6]  = hdmi_msm_avi_iframe_lut[3][mode];
+	/* Data Byte 05: 0 0 0 0 PR3 PR2 PR1 PR0 */
+	aviInfoFrame[7]  = hdmi_msm_avi_iframe_lut[4][mode];
+	/* Data Byte 06: LSB Line No of End of Top Bar */
+	aviInfoFrame[8]  = hdmi_msm_avi_iframe_lut[5][mode];
+	/* Data Byte 07: MSB Line No of End of Top Bar */
+	aviInfoFrame[9]  = hdmi_msm_avi_iframe_lut[6][mode];
+	/* Data Byte 08: LSB Line No of Start of Bottom Bar */
+	aviInfoFrame[10] = hdmi_msm_avi_iframe_lut[7][mode];
+	/* Data Byte 09: MSB Line No of Start of Bottom Bar */
+	aviInfoFrame[11] = hdmi_msm_avi_iframe_lut[8][mode];
+	/* Data Byte 10: LSB Pixel Number of End of Left Bar */
+	aviInfoFrame[12] = hdmi_msm_avi_iframe_lut[9][mode];
+	/* Data Byte 11: MSB Pixel Number of End of Left Bar */
+	aviInfoFrame[13] = hdmi_msm_avi_iframe_lut[10][mode];
+	/* Data Byte 12: LSB Pixel Number of Start of Right Bar */
+	aviInfoFrame[14] = hdmi_msm_avi_iframe_lut[11][mode];
+	/* Data Byte 13: MSB Pixel Number of Start of Right Bar */
+	aviInfoFrame[15] = hdmi_msm_avi_iframe_lut[12][mode];
+
+	sum = 0;
+	for (i = 0; i < 16; i++)
+		sum += aviInfoFrame[i];
+	sum &= 0xFF;
+	sum = 256 - sum;
+	checksum = (uint8) sum;
+
+	regVal = aviInfoFrame[5];
+	regVal = regVal << 8 | aviInfoFrame[4];
+	regVal = regVal << 8 | aviInfoFrame[3];
+	regVal = regVal << 8 | checksum;
+	HDMI_OUTP(0x006C, regVal);
+
+	regVal = aviInfoFrame[9];
+	regVal = regVal << 8 | aviInfoFrame[8];
+	regVal = regVal << 8 | aviInfoFrame[7];
+	regVal = regVal << 8 | aviInfoFrame[6];
+	HDMI_OUTP(0x0070, regVal);
+
+	regVal = aviInfoFrame[13];
+	regVal = regVal << 8 | aviInfoFrame[12];
+	regVal = regVal << 8 | aviInfoFrame[11];
+	regVal = regVal << 8 | aviInfoFrame[10];
+	HDMI_OUTP(0x0074, regVal);
+
+	regVal = aviInfoFrame[1];
+	regVal = regVal << 16 | aviInfoFrame[15];
+	regVal = regVal << 8 | aviInfoFrame[14];
+	HDMI_OUTP(0x0078, regVal);
+
+	/* INFOFRAME_CTRL0[0x002C] */
+	/* 0x3 for AVI InfFrame enable (every frame) */
+	HDMI_OUTP(0x002C, HDMI_INP(0x002C) | 0x00000003L);
+}
+#endif
+
+#ifdef CONFIG_FB_MSM_HDMI_3D
+static void hdmi_msm_vendor_infoframe_packetsetup(void)
+{
+	uint32 packet_header      = 0;
+	uint32 check_sum          = 0;
+	uint32 packet_payload     = 0;
+
+	if (!external_common_state->format_3d) {
+		HDMI_OUTP(0x0034, 0);
+		return;
+	}
+
+	/* 0x0084 GENERIC0_HDR
+	 *   HB0             7:0  NUM
+	 *   HB1            15:8  NUM
+	 *   HB2           23:16  NUM */
+	/* Setup Packet header and payload */
+	/* 0x81 VS_INFO_FRAME_ID
+	   0x01 VS_INFO_FRAME_VERSION
+	   0x1B VS_INFO_FRAME_PAYLOAD_LENGTH */
+	packet_header  = 0x81 | (0x01 << 8) | (0x1B << 16);
+	HDMI_OUTP(0x0084, packet_header);
+
+	check_sum  = packet_header & 0xff;
+	check_sum += (packet_header >> 8) & 0xff;
+	check_sum += (packet_header >> 16) & 0xff;
+
+	/* 0x008C GENERIC0_1
+	 *   BYTE4           7:0  NUM
+	 *   BYTE5          15:8  NUM
+	 *   BYTE6         23:16  NUM
+	 *   BYTE7         31:24  NUM */
+	/* 0x02 VS_INFO_FRAME_3D_PRESENT */
+	packet_payload  = 0x02 << 5;
+	switch (external_common_state->format_3d) {
+	case 1:
+		/* 0b1000 VIDEO_3D_FORMAT_SIDE_BY_SIDE_HALF */
+		packet_payload |= (0x08 << 8) << 4;
+		break;
+	case 2:
+		/* 0b0110 VIDEO_3D_FORMAT_TOP_AND_BOTTOM_HALF */
+		packet_payload |= (0x06 << 8) << 4;
+		break;
+	}
+	HDMI_OUTP(0x008C, packet_payload);
+
+	check_sum += packet_payload & 0xff;
+	check_sum += (packet_payload >> 8) & 0xff;
+
+	#define IEEE_REGISTRATION_ID	0xC03
+	/* Next 3 bytes are IEEE Registration Identifcation */
+	/* 0x0088 GENERIC0_0
+	 *   BYTE0           7:0  NUM (checksum)
+	 *   BYTE1          15:8  NUM
+	 *   BYTE2         23:16  NUM
+	 *   BYTE3         31:24  NUM */
+	check_sum += IEEE_REGISTRATION_ID & 0xff;
+	check_sum += (IEEE_REGISTRATION_ID >> 8) & 0xff;
+	check_sum += (IEEE_REGISTRATION_ID >> 16) & 0xff;
+
+	HDMI_OUTP(0x0088, (0x100 - (0xff & check_sum))
+		| ((IEEE_REGISTRATION_ID & 0xff) << 8)
+		| (((IEEE_REGISTRATION_ID >> 8) & 0xff) << 16)
+		| (((IEEE_REGISTRATION_ID >> 16) & 0xff) << 24));
+
+	/* 0x0034 GEN_PKT_CTRL
+	 *   GENERIC0_SEND   0      0 = Disable Generic0 Packet Transmission
+	 *                          1 = Enable Generic0 Packet Transmission
+	 *   GENERIC0_CONT   1      0 = Send Generic0 Packet on next frame only
+	 *                          1 = Send Generic0 Packet on every frame
+	 *   GENERIC0_UPDATE 2      NUM
+	 *   GENERIC1_SEND   4      0 = Disable Generic1 Packet Transmission
+	 *                          1 = Enable Generic1 Packet Transmission
+	 *   GENERIC1_CONT   5      0 = Send Generic1 Packet on next frame only
+	 *                          1 = Send Generic1 Packet on every frame
+	 *   GENERIC0_LINE   21:16  NUM
+	 *   GENERIC1_LINE   29:24  NUM
+	 */
+	/* GENERIC0_LINE | GENERIC0_UPDATE | GENERIC0_CONT | GENERIC0_SEND
+	 * Setup HDMI TX generic packet control
+	 * Enable this packet to transmit every frame
+	 * Enable this packet to transmit every frame
+	 * Enable HDMI TX engine to transmit Generic packet 0 */
+	HDMI_OUTP(0x0034, (1 << 16) | (1 << 2) | BIT(1) | BIT(0));
+}
+
+static void hdmi_msm_switch_3d(boolean on)
+{
+	mutex_lock(&external_common_state_hpd_mutex);
+	if (external_common_state->hpd_state)
+		hdmi_msm_vendor_infoframe_packetsetup();
+	mutex_unlock(&external_common_state_hpd_mutex);
+}
+#endif
+
+static int hdmi_msm_clk(int on)
+{
+	int rc;
+
+	DEV_DBG("HDMI Clk: %s\n", on ? "Enable" : "Disable");
+	if (on) {
+		rc = clk_enable(hdmi_msm_state->hdmi_app_clk);
+		if (rc) {
+			DEV_ERR("'hdmi_app_clk' clock enable failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_enable(hdmi_msm_state->hdmi_m_pclk);
+		if (rc) {
+			DEV_ERR("'hdmi_m_pclk' clock enable failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_enable(hdmi_msm_state->hdmi_s_pclk);
+		if (rc) {
+			DEV_ERR("'hdmi_s_pclk' clock enable failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		clk_disable(hdmi_msm_state->hdmi_app_clk);
+		clk_disable(hdmi_msm_state->hdmi_m_pclk);
+		clk_disable(hdmi_msm_state->hdmi_s_pclk);
+	}
+
+	return 0;
+}
+
+static void hdmi_msm_reset_core(void)
+{
+	hdmi_msm_set_mode(FALSE);
+	hdmi_msm_clk(0);
+	udelay(5);
+	hdmi_msm_clk(1);
+
+	clk_reset(hdmi_msm_state->hdmi_app_clk, CLK_RESET_ASSERT);
+	clk_reset(hdmi_msm_state->hdmi_m_pclk, CLK_RESET_ASSERT);
+	clk_reset(hdmi_msm_state->hdmi_s_pclk, CLK_RESET_ASSERT);
+	udelay(20);
+	clk_reset(hdmi_msm_state->hdmi_app_clk, CLK_RESET_DEASSERT);
+	clk_reset(hdmi_msm_state->hdmi_m_pclk, CLK_RESET_DEASSERT);
+	clk_reset(hdmi_msm_state->hdmi_s_pclk, CLK_RESET_DEASSERT);
+}
+
+static void hdmi_msm_turn_on(void)
+{
+	uint32 hpd_ctrl;
+
+	hdmi_msm_reset_core();
+	hdmi_msm_init_phy(external_common_state->video_resolution);
+	/* HDMI_USEC_REFTIMER[0x0208] */
+	HDMI_OUTP(0x0208, 0x0001001B);
+
+	hdmi_msm_video_setup(external_common_state->video_resolution);
+	if (!hdmi_msm_is_dvi_mode())
+		hdmi_msm_audio_setup();
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	hdmi_msm_avi_info_frame();
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	hdmi_msm_vendor_infoframe_packetsetup();
+#endif
+
+	/* set timeout to 4.1ms (max) for hardware debounce */
+	hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
+
+	/* Toggle HPD circuit to trigger HPD sense */
+	HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
+	HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
+
+	hdmi_msm_set_mode(TRUE);
+
+	/* Setup HPD IRQ */
+	HDMI_OUTP(0x0254, 4 | (external_common_state->hpd_state ? 0 : 2));
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	if (hdmi_msm_state->reauth) {
+		hdmi_msm_hdcp_enable();
+		hdmi_msm_state->reauth = FALSE ;
+	}
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+	DEV_INFO("HDMI Core: Initialized\n");
+}
+
+static void hdmi_msm_hpd_state_timer(unsigned long data)
+{
+	queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_state_work);
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+static void hdmi_msm_hdcp_timer(unsigned long data)
+{
+	queue_work(hdmi_work_queue, &hdmi_msm_state->hdcp_work);
+}
+#endif
+
+static void hdmi_msm_hpd_read_work(struct work_struct *work)
+{
+	uint32 hpd_ctrl;
+
+	clk_enable(hdmi_msm_state->hdmi_app_clk);
+	hdmi_msm_state->pd->core_power(1, 1);
+	hdmi_msm_state->pd->enable_5v(1);
+	hdmi_msm_set_mode(FALSE);
+	hdmi_msm_init_phy(external_common_state->video_resolution);
+	/* HDMI_USEC_REFTIMER[0x0208] */
+	HDMI_OUTP(0x0208, 0x0001001B);
+	hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
+
+	/* Toggle HPD circuit to trigger HPD sense */
+	HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
+	HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
+
+	hdmi_msm_set_mode(TRUE);
+	msleep(1000);
+	external_common_state->hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
+	if (external_common_state->hpd_state) {
+		hdmi_msm_read_edid();
+		DEV_DBG("%s: sense CONNECTED: send ONLINE\n", __func__);
+		kobject_uevent(external_common_state->uevent_kobj,
+			KOBJ_ONLINE);
+	}
+	hdmi_msm_hpd_off();
+	hdmi_msm_set_mode(FALSE);
+	hdmi_msm_state->pd->core_power(0, 1);
+	hdmi_msm_state->pd->enable_5v(0);
+	clk_disable(hdmi_msm_state->hdmi_app_clk);
+}
+
+static void hdmi_msm_hpd_off(void)
+{
+	DEV_DBG("%s: (timer, clk, 5V, core, IRQ off)\n", __func__);
+	del_timer(&hdmi_msm_state->hpd_state_timer);
+	disable_irq(hdmi_msm_state->irq);
+
+	hdmi_msm_set_mode(FALSE);
+	HDMI_OUTP_ND(0x0308, 0x7F); /*0b01111111*/
+	hdmi_msm_state->hpd_initialized = FALSE;
+	hdmi_msm_state->pd->enable_5v(0);
+	hdmi_msm_state->pd->core_power(0, 1);
+	hdmi_msm_clk(0);
+	hdmi_msm_state->hpd_initialized = FALSE;
+}
+
+static void hdmi_msm_dump_regs(const char *prefex)
+{
+#ifdef REG_DUMP
+	print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+		(void *)MSM_HDMI_BASE, 0x0334, false);
+#endif
+}
+
+static int hdmi_msm_hpd_on(bool trigger_handler)
+{
+	static int phy_reset_done;
+
+	hdmi_msm_clk(1);
+	hdmi_msm_state->pd->core_power(1, 1);
+	hdmi_msm_state->pd->enable_5v(1);
+	hdmi_msm_dump_regs("HDMI-INIT: ");
+	hdmi_msm_set_mode(FALSE);
+
+	if (!phy_reset_done) {
+		hdmi_phy_reset();
+		phy_reset_done = 1;
+	}
+
+	hdmi_msm_init_phy(external_common_state->video_resolution);
+	/* HDMI_USEC_REFTIMER[0x0208] */
+	HDMI_OUTP(0x0208, 0x0001001B);
+
+	/* Check HPD State */
+	if (!hdmi_msm_state->hpd_initialized) {
+		uint32 hpd_ctrl;
+		enable_irq(hdmi_msm_state->irq);
+
+		/* set timeout to 4.1ms (max) for hardware debounce */
+		hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
+
+		/* Toggle HPD circuit to trigger HPD sense */
+		HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
+		HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
+
+		DEV_DBG("%s: (clk, 5V, core, IRQ on) <trigger:%s>\n", __func__,
+			trigger_handler ? "true" : "false");
+
+		if (trigger_handler) {
+			/* Set HPD state machine: ensure at least 2 readouts */
+			mutex_lock(&hdmi_msm_state_mutex);
+			hdmi_msm_state->hpd_stable = 0;
+			hdmi_msm_state->hpd_prev_state = TRUE;
+			mutex_lock(&external_common_state_hpd_mutex);
+			external_common_state->hpd_state = FALSE;
+			mutex_unlock(&external_common_state_hpd_mutex);
+			hdmi_msm_state->hpd_cable_chg_detected = TRUE;
+			mutex_unlock(&hdmi_msm_state_mutex);
+			mod_timer(&hdmi_msm_state->hpd_state_timer,
+				jiffies + HZ/2);
+		}
+
+		hdmi_msm_state->hpd_initialized = TRUE;
+	}
+	hdmi_msm_set_mode(TRUE);
+
+	return 0;
+}
+
+static int hdmi_msm_power_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	bool changed;
+
+	if (!hdmi_msm_state || !hdmi_msm_state->hdmi_app_clk || !MSM_HDMI_BASE)
+		return -ENODEV;
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return -ENODEV;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+	DEV_INFO("power: ON (%dx%d %d)\n", mfd->var_xres, mfd->var_yres,
+		mfd->var_pixclock);
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->hdcp_activating) {
+		hdmi_msm_state->panel_power_on = TRUE;
+		DEV_INFO("HDCP: activating, returning\n");
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	changed = hdmi_common_get_video_format_from_drv_data(mfd);
+	if (!external_common_state->hpd_feature_on) {
+		int rc = hdmi_msm_hpd_on(true);
+		DEV_INFO("HPD: panel power without 'hpd' feature on\n");
+		if (rc) {
+			DEV_WARN("HPD: activation failed: rc=%d\n", rc);
+			return rc;
+		}
+	}
+	hdmi_msm_audio_info_setup(TRUE, 0, 0, FALSE);
+
+	mutex_lock(&external_common_state_hpd_mutex);
+	hdmi_msm_state->panel_power_on = TRUE;
+	if ((external_common_state->hpd_state && !hdmi_msm_is_power_on())
+		|| changed) {
+		mutex_unlock(&external_common_state_hpd_mutex);
+		hdmi_msm_turn_on();
+	} else
+		mutex_unlock(&external_common_state_hpd_mutex);
+
+	hdmi_msm_dump_regs("HDMI-ON: ");
+
+	DEV_INFO("power=%s DVI= %s\n",
+		hdmi_msm_is_power_on() ? "ON" : "OFF" ,
+		hdmi_msm_is_dvi_mode() ? "ON" : "OFF");
+	return 0;
+}
+
+/* Note that power-off will also be called when the cable-remove event is
+ * processed on the user-space and as a result the framebuffer is powered
+ * down.  However, we are still required to be able to detect a cable-insert
+ * event; so for now leave the HDMI engine running; so that the HPD IRQ is
+ * still being processed.
+ */
+static int hdmi_msm_power_off(struct platform_device *pdev)
+{
+	if (!hdmi_msm_state->hdmi_app_clk)
+		return -ENODEV;
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return -ENODEV;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->hdcp_activating) {
+		hdmi_msm_state->panel_power_on = FALSE;
+		mutex_unlock(&hdmi_msm_state_mutex);
+		DEV_INFO("HDCP: activating, returning\n");
+		return 0;
+	}
+	mutex_unlock(&hdmi_msm_state_mutex);
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	DEV_INFO("power: OFF (audio off, Reset Core)\n");
+	hdmi_msm_audio_off();
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	hdcp_deauthenticate();
+#endif
+	hdmi_msm_hpd_off();
+	hdmi_msm_powerdown_phy();
+	hdmi_msm_dump_regs("HDMI-OFF: ");
+	hdmi_msm_hpd_on(false);
+
+	mutex_lock(&external_common_state_hpd_mutex);
+	if (!external_common_state->hpd_feature_on)
+		hdmi_msm_hpd_off();
+	mutex_unlock(&external_common_state_hpd_mutex);
+
+	hdmi_msm_state->panel_power_on = FALSE;
+	return 0;
+}
+
+static int __devinit hdmi_msm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct platform_device *fb_dev;
+
+	if (!hdmi_msm_state) {
+		pr_err("%s: hdmi_msm_state is NULL\n", __func__);
+		return -ENOMEM;
+	}
+
+	external_common_state->dev = &pdev->dev;
+	DEV_DBG("probe\n");
+	if (pdev->id == 0) {
+		struct resource *res;
+
+		#define GET_RES(name, mode) do {			\
+			res = platform_get_resource_byname(pdev, mode, name); \
+			if (!res) {					\
+				DEV_ERR("'" name "' resource not found\n"); \
+				rc = -ENODEV;				\
+				goto error;				\
+			}						\
+		} while (0)
+
+		#define IO_REMAP(var, name) do {			\
+			GET_RES(name, IORESOURCE_MEM);			\
+			var = ioremap(res->start, resource_size(res));	\
+			if (!var) {					\
+				DEV_ERR("'" name "' ioremap failed\n");	\
+				rc = -ENOMEM;				\
+				goto error;				\
+			}						\
+		} while (0)
+
+		#define GET_IRQ(var, name) do {				\
+			GET_RES(name, IORESOURCE_IRQ);			\
+			var = res->start;				\
+		} while (0)
+
+		IO_REMAP(hdmi_msm_state->qfprom_io, "hdmi_msm_qfprom_addr");
+		hdmi_msm_state->hdmi_io = MSM_HDMI_BASE;
+		GET_IRQ(hdmi_msm_state->irq, "hdmi_msm_irq");
+
+		hdmi_msm_state->pd = pdev->dev.platform_data;
+
+		#undef GET_RES
+		#undef IO_REMAP
+		#undef GET_IRQ
+		return 0;
+	}
+
+	hdmi_msm_state->hdmi_app_clk = clk_get(NULL, "hdmi_app_clk");
+	if (IS_ERR(hdmi_msm_state->hdmi_app_clk)) {
+		DEV_ERR("'hdmi_app_clk' clk not found\n");
+		rc = IS_ERR(hdmi_msm_state->hdmi_app_clk);
+		goto error;
+	}
+
+	hdmi_msm_state->hdmi_m_pclk = clk_get(NULL, "hdmi_m_pclk");
+	if (IS_ERR(hdmi_msm_state->hdmi_m_pclk)) {
+		DEV_ERR("'hdmi_m_pclk' clk not found\n");
+		rc = IS_ERR(hdmi_msm_state->hdmi_m_pclk);
+		goto error;
+	}
+
+	hdmi_msm_state->hdmi_s_pclk = clk_get(NULL, "hdmi_s_pclk");
+	if (IS_ERR(hdmi_msm_state->hdmi_s_pclk)) {
+		DEV_ERR("'hdmi_s_pclk' clk not found\n");
+		rc = IS_ERR(hdmi_msm_state->hdmi_s_pclk);
+		goto error;
+	}
+
+	rc = check_hdmi_features();
+	if (rc) {
+		DEV_ERR("Init FAILED: check_hdmi_features rc=%d\n", rc);
+		goto error;
+	}
+
+	if (!hdmi_msm_state->pd->core_power) {
+		DEV_ERR("Init FAILED: core_power function missing\n");
+		rc = -ENODEV;
+		goto error;
+	}
+	if (!hdmi_msm_state->pd->enable_5v) {
+		DEV_ERR("Init FAILED: enable_5v function missing\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	rc = request_threaded_irq(hdmi_msm_state->irq, NULL, &hdmi_msm_isr,
+		IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "hdmi_msm_isr", NULL);
+	if (rc) {
+		DEV_ERR("Init FAILED: IRQ request, rc=%d\n", rc);
+		goto error;
+	}
+	disable_irq(hdmi_msm_state->irq);
+
+	init_timer(&hdmi_msm_state->hpd_state_timer);
+	hdmi_msm_state->hpd_state_timer.function =
+		hdmi_msm_hpd_state_timer;
+	hdmi_msm_state->hpd_state_timer.data = (uint32)NULL;
+
+	hdmi_msm_state->hpd_state_timer.expires = 0xffffffffL;
+	add_timer(&hdmi_msm_state->hpd_state_timer);
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	init_timer(&hdmi_msm_state->hdcp_timer);
+	hdmi_msm_state->hdcp_timer.function =
+		hdmi_msm_hdcp_timer;
+	hdmi_msm_state->hdcp_timer.data = (uint32)NULL;
+
+	hdmi_msm_state->hdcp_timer.expires = 0xffffffffL;
+	add_timer(&hdmi_msm_state->hdcp_timer);
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	fb_dev = msm_fb_add_device(pdev);
+	if (fb_dev) {
+		rc = external_common_state_create(fb_dev);
+		if (rc) {
+			DEV_ERR("Init FAILED: hdmi_msm_state_create, rc=%d\n",
+				rc);
+			goto error;
+		}
+	} else
+		DEV_ERR("Init FAILED: failed to add fb device\n");
+
+	DEV_INFO("HDMI HPD: ON\n");
+
+	rc = hdmi_msm_hpd_on(true);
+	if (rc)
+		goto error;
+
+	if (hdmi_msm_has_hdcp())
+		external_common_state->present_hdcp = TRUE;
+	else {
+		external_common_state->present_hdcp = FALSE;
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+		/*
+		 * If the device is not hdcp capable do
+		 * not start hdcp timer.
+		 */
+		del_timer(&hdmi_msm_state->hdcp_timer);
+#endif
+	}
+
+	queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_read_work);
+	return 0;
+
+error:
+	if (hdmi_msm_state->qfprom_io)
+		iounmap(hdmi_msm_state->qfprom_io);
+	hdmi_msm_state->qfprom_io = NULL;
+
+	if (hdmi_msm_state->hdmi_io)
+		iounmap(hdmi_msm_state->hdmi_io);
+	hdmi_msm_state->hdmi_io = NULL;
+
+	external_common_state_remove();
+
+	if (hdmi_msm_state->hdmi_app_clk)
+		clk_put(hdmi_msm_state->hdmi_app_clk);
+	if (hdmi_msm_state->hdmi_m_pclk)
+		clk_put(hdmi_msm_state->hdmi_m_pclk);
+	if (hdmi_msm_state->hdmi_s_pclk)
+		clk_put(hdmi_msm_state->hdmi_s_pclk);
+
+	hdmi_msm_state->hdmi_app_clk = NULL;
+	hdmi_msm_state->hdmi_m_pclk = NULL;
+	hdmi_msm_state->hdmi_s_pclk = NULL;
+
+	return rc;
+}
+
+static int __devexit hdmi_msm_remove(struct platform_device *pdev)
+{
+	DEV_INFO("HDMI device: remove\n");
+
+	DEV_INFO("HDMI HPD: OFF\n");
+	hdmi_msm_hpd_off();
+	free_irq(hdmi_msm_state->irq, NULL);
+
+	if (hdmi_msm_state->qfprom_io)
+		iounmap(hdmi_msm_state->qfprom_io);
+	hdmi_msm_state->qfprom_io = NULL;
+
+	if (hdmi_msm_state->hdmi_io)
+		iounmap(hdmi_msm_state->hdmi_io);
+	hdmi_msm_state->hdmi_io = NULL;
+
+	external_common_state_remove();
+
+	if (hdmi_msm_state->hdmi_app_clk)
+		clk_put(hdmi_msm_state->hdmi_app_clk);
+	if (hdmi_msm_state->hdmi_m_pclk)
+		clk_put(hdmi_msm_state->hdmi_m_pclk);
+	if (hdmi_msm_state->hdmi_s_pclk)
+		clk_put(hdmi_msm_state->hdmi_s_pclk);
+
+	hdmi_msm_state->hdmi_app_clk = NULL;
+	hdmi_msm_state->hdmi_m_pclk = NULL;
+	hdmi_msm_state->hdmi_s_pclk = NULL;
+
+	kfree(hdmi_msm_state);
+	hdmi_msm_state = NULL;
+
+	return 0;
+}
+
+static int hdmi_msm_hpd_feature(int on)
+{
+	int rc = 0;
+
+	DEV_INFO("%s: %d\n", __func__, on);
+	if (on)
+		rc = hdmi_msm_hpd_on(true);
+	else
+		hdmi_msm_hpd_off();
+
+	return rc;
+}
+
+
+#ifdef CONFIG_SUSPEND
+static int hdmi_msm_device_pm_suspend(struct device *dev)
+{
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		return 0;
+	}
+
+	DEV_DBG("pm_suspend\n");
+
+	del_timer(&hdmi_msm_state->hpd_state_timer);
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	del_timer(&hdmi_msm_state->hdcp_timer);
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	disable_irq(hdmi_msm_state->irq);
+	clk_disable(hdmi_msm_state->hdmi_app_clk);
+	clk_disable(hdmi_msm_state->hdmi_m_pclk);
+	clk_disable(hdmi_msm_state->hdmi_s_pclk);
+
+	hdmi_msm_state->pm_suspended = TRUE;
+	mutex_unlock(&hdmi_msm_state_mutex);
+
+	hdmi_msm_powerdown_phy();
+	hdmi_msm_state->pd->enable_5v(0);
+	hdmi_msm_state->pd->core_power(0, 1);
+	return 0;
+}
+
+static int hdmi_msm_device_pm_resume(struct device *dev)
+{
+	mutex_lock(&hdmi_msm_state_mutex);
+	if (!hdmi_msm_state->pm_suspended) {
+		mutex_unlock(&hdmi_msm_state_mutex);
+		return 0;
+	}
+
+	DEV_DBG("pm_resume\n");
+
+	hdmi_msm_state->pd->core_power(1, 1);
+	hdmi_msm_state->pd->enable_5v(1);
+	clk_enable(hdmi_msm_state->hdmi_app_clk);
+	clk_enable(hdmi_msm_state->hdmi_m_pclk);
+	clk_enable(hdmi_msm_state->hdmi_s_pclk);
+
+	hdmi_msm_state->pm_suspended = FALSE;
+	mutex_unlock(&hdmi_msm_state_mutex);
+	enable_irq(hdmi_msm_state->irq);
+	return 0;
+}
+#else
+#define hdmi_msm_device_pm_suspend	NULL
+#define hdmi_msm_device_pm_resume	NULL
+#endif
+
+static const struct dev_pm_ops hdmi_msm_device_pm_ops = {
+	.suspend = hdmi_msm_device_pm_suspend,
+	.resume = hdmi_msm_device_pm_resume,
+};
+
+static struct platform_driver this_driver = {
+	.probe = hdmi_msm_probe,
+	.remove = hdmi_msm_remove,
+	.driver.name = "hdmi_msm",
+	.driver.pm = &hdmi_msm_device_pm_ops,
+};
+
+static struct msm_fb_panel_data hdmi_msm_panel_data = {
+	.on = hdmi_msm_power_on,
+	.off = hdmi_msm_power_off,
+};
+
+static struct platform_device this_device = {
+	.name = "hdmi_msm",
+	.id = 1,
+	.dev.platform_data = &hdmi_msm_panel_data,
+};
+
+static int __init hdmi_msm_init(void)
+{
+	int rc;
+
+	hdmi_msm_setup_video_mode_lut();
+	hdmi_msm_state = kzalloc(sizeof(*hdmi_msm_state), GFP_KERNEL);
+	if (!hdmi_msm_state) {
+		pr_err("hdmi_msm_init FAILED: out of memory\n");
+		rc = -ENOMEM;
+		goto init_exit;
+	}
+
+	external_common_state = &hdmi_msm_state->common;
+	external_common_state->video_resolution = HDMI_VFRMT_1920x1080p60_16_9;
+#ifdef CONFIG_FB_MSM_HDMI_3D
+	external_common_state->switch_3d = hdmi_msm_switch_3d;
+#endif
+
+	/*
+	 * Create your work queue
+	 * allocs and returns ptr
+	*/
+	hdmi_work_queue = create_workqueue("hdmi_hdcp");
+	external_common_state->hpd_feature = hdmi_msm_hpd_feature;
+
+	rc = platform_driver_register(&this_driver);
+	if (rc) {
+		pr_err("hdmi_msm_init FAILED: platform_driver_register rc=%d\n",
+		       rc);
+		goto init_exit;
+	}
+
+	hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
+	init_completion(&hdmi_msm_state->ddc_sw_done);
+	INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
+	INIT_WORK(&hdmi_msm_state->hpd_read_work, hdmi_msm_hpd_read_work);
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	init_completion(&hdmi_msm_state->hdcp_success_done);
+	INIT_WORK(&hdmi_msm_state->hdcp_reauth_work, hdmi_msm_hdcp_reauth_work);
+	INIT_WORK(&hdmi_msm_state->hdcp_work, hdmi_msm_hdcp_work);
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	rc = platform_device_register(&this_device);
+	if (rc) {
+		pr_err("hdmi_msm_init FAILED: platform_device_register rc=%d\n",
+		       rc);
+		platform_driver_unregister(&this_driver);
+		goto init_exit;
+	}
+
+	pr_debug("%s: success:"
+#ifdef DEBUG
+		" DEBUG"
+#else
+		" RELEASE"
+#endif
+		" AUDIO EDID HPD HDCP"
+#ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+		":0"
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+		" DVI"
+#ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT
+		":0"
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT */
+		"\n", __func__);
+
+	return 0;
+
+init_exit:
+	kfree(hdmi_msm_state);
+	hdmi_msm_state = NULL;
+
+	return rc;
+}
+
+static void __exit hdmi_msm_exit(void)
+{
+	platform_device_unregister(&this_device);
+	platform_driver_unregister(&this_driver);
+}
+
+module_init(hdmi_msm_init);
+module_exit(hdmi_msm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.3");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("HDMI MSM TX driver");
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
new file mode 100644
index 0000000..41c756f
--- /dev/null
+++ b/drivers/video/msm/hdmi_msm.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_MSM_H__
+#define __HDMI_MSM_H__
+
+#include <mach/msm_iomap.h>
+#include "external_common.h"
+/* #define PORT_DEBUG */
+
+#ifdef PORT_DEBUG
+const char *hdmi_msm_name(uint32 offset);
+void hdmi_outp(uint32 offset, uint32 value);
+uint32 hdmi_inp(uint32 offset);
+
+#define HDMI_OUTP_ND(offset, value)	outpdw(MSM_HDMI_BASE+(offset), (value))
+#define HDMI_OUTP(offset, value)	hdmi_outp((offset), (value))
+#define HDMI_INP_ND(offset)		inpdw(MSM_HDMI_BASE+(offset))
+#define HDMI_INP(offset)		hdmi_inp((offset))
+#else
+#define HDMI_OUTP_ND(offset, value)	outpdw(MSM_HDMI_BASE+(offset), (value))
+#define HDMI_OUTP(offset, value)	outpdw(MSM_HDMI_BASE+(offset), (value))
+#define HDMI_INP_ND(offset)		inpdw(MSM_HDMI_BASE+(offset))
+#define HDMI_INP(offset)		inpdw(MSM_HDMI_BASE+(offset))
+#endif
+
+#define QFPROM_BASE		((uint32)hdmi_msm_state->qfprom_io)
+
+struct hdmi_msm_state_type {
+	boolean panel_power_on;
+	boolean hpd_initialized;
+#ifdef CONFIG_SUSPEND
+	boolean pm_suspended;
+#endif
+	int hpd_stable;
+	boolean hpd_prev_state;
+	boolean hpd_cable_chg_detected;
+	boolean full_auth_done;
+	boolean hpd_during_auth;
+	struct work_struct hpd_state_work, hpd_read_work;
+	struct timer_list hpd_state_timer;
+	struct completion ddc_sw_done;
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
+	boolean hdcp_activating;
+	boolean reauth ;
+	struct work_struct hdcp_reauth_work, hdcp_work;
+	struct completion hdcp_success_done;
+	struct timer_list hdcp_timer;
+#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
+
+	int irq;
+	struct msm_hdmi_platform_data *pd;
+	struct clk *hdmi_app_clk;
+	struct clk *hdmi_m_pclk;
+	struct clk *hdmi_s_pclk;
+	void __iomem *qfprom_io;
+	void __iomem *hdmi_io;
+
+	struct external_common_state_type common;
+};
+
+extern struct hdmi_msm_state_type *hdmi_msm_state;
+
+uint32 hdmi_msm_get_io_base(void);
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+void hdmi_phy_reset(void);
+void hdmi_msm_init_phy(int video_format);
+void hdmi_msm_powerdown_phy(void);
+void hdmi_frame_ctrl_cfg(const struct hdmi_disp_mode_timing_type *timing);
+void hdmi_msm_phy_status_poll(void);
+#endif
+
+#endif /* __HDMI_MSM_H__ */
diff --git a/drivers/video/msm/hdmi_sii9022.c b/drivers/video/msm/hdmi_sii9022.c
new file mode 100644
index 0000000..3d27488
--- /dev/null
+++ b/drivers/video/msm/hdmi_sii9022.c
@@ -0,0 +1,245 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "msm_fb.h"
+
+#define DEVICE_NAME "sii9022"
+#define SII9022_DEVICE_ID   0xB0
+
+struct sii9022_i2c_addr_data{
+	u8 addr;
+	u8 data;
+};
+
+/* video mode data */
+static u8 video_mode_data[] = {
+	0x00,
+	0xF9, 0x1C, 0x70, 0x17, 0x72, 0x06, 0xEE, 0x02,
+};
+
+static u8 avi_io_format[] = {
+	0x09,
+	0x00, 0x00,
+};
+
+/* power state */
+static struct sii9022_i2c_addr_data regset0[] = {
+	{ 0x60, 0x04 },
+	{ 0x63, 0x00 },
+	{ 0x1E, 0x00 },
+};
+
+static u8 video_infoframe[] = {
+	0x0C,
+	0xF0, 0x00, 0x68, 0x00, 0x04, 0x00, 0x19, 0x00,
+	0xE9, 0x02, 0x04, 0x01, 0x04, 0x06,
+};
+
+/* configure audio */
+static struct sii9022_i2c_addr_data regset1[] = {
+	{ 0x26, 0x90 },
+	{ 0x20, 0x90 },
+	{ 0x1F, 0x80 },
+	{ 0x26, 0x80 },
+	{ 0x24, 0x02 },
+	{ 0x25, 0x0B },
+	{ 0xBC, 0x02 },
+	{ 0xBD, 0x24 },
+	{ 0xBE, 0x02 },
+};
+
+/* enable audio */
+static u8 misc_infoframe[] = {
+	0xBF,
+	0xC2, 0x84, 0x01, 0x0A, 0x6F, 0x02, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* set HDMI, active */
+static struct sii9022_i2c_addr_data regset2[] = {
+	{ 0x1A, 0x01 },
+	{ 0x3D, 0x00 },
+};
+
+static int send_i2c_data(struct i2c_client *client,
+			 struct sii9022_i2c_addr_data *regset,
+			 int size)
+{
+	int i;
+	int rc = 0;
+
+	for (i = 0; i < size; i++) {
+		rc = i2c_smbus_write_byte_data(
+			client,
+			regset[i].addr, regset[i].data);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+static int hdmi_sii_enable(struct i2c_client *client)
+{
+	int rc;
+	int retries = 10;
+	int count;
+
+	rc = i2c_smbus_write_byte_data(client, 0xC7, 0x00);
+	if (rc)
+		goto enable_exit;
+
+	do {
+		msleep(1);
+		rc = i2c_smbus_read_byte_data(client, 0x1B);
+	} while ((rc != SII9022_DEVICE_ID) && retries--);
+
+	if (rc != SII9022_DEVICE_ID)
+		return -ENODEV;
+
+	rc = i2c_smbus_write_byte_data(client, 0x1A, 0x11);
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(video_mode_data);
+	rc = i2c_master_send(client, video_mode_data, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = i2c_smbus_write_byte_data(client, 0x08, 0x20);
+	if (rc)
+		goto enable_exit;
+	count = ARRAY_SIZE(avi_io_format);
+	rc = i2c_master_send(client, avi_io_format, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset0, ARRAY_SIZE(regset0));
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(video_infoframe);
+	rc = i2c_master_send(client, video_infoframe, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset1, ARRAY_SIZE(regset1));
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(misc_infoframe);
+	rc = i2c_master_send(client, misc_infoframe, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset2, ARRAY_SIZE(regset2));
+	if (rc)
+		goto enable_exit;
+
+	return 0;
+enable_exit:
+	printk(KERN_ERR "%s: exited rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static const struct i2c_device_id hmdi_sii_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+
+static int hdmi_sii_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int rc;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+		return -ENODEV;
+	rc = hdmi_sii_enable(client);
+	return rc;
+}
+
+
+static struct i2c_driver hdmi_sii_i2c_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = hdmi_sii_probe,
+	.remove =  __exit_p(hdmi_sii_remove),
+	.id_table = hmdi_sii_id,
+};
+
+static int __init hdmi_sii_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+	if (msm_fb_detect_client("hdmi_sii9022"))
+		return 0;
+
+	pinfo.xres = 1280;
+	pinfo.yres = 720;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = HDMI_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 74250000;
+
+	pinfo.lcdc.h_back_porch = 124;
+	pinfo.lcdc.h_front_porch = 110;
+	pinfo.lcdc.h_pulse_width = 136;
+	pinfo.lcdc.v_back_porch = 19;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 6;
+	pinfo.lcdc.border_clr = 0;
+	pinfo.lcdc.underflow_clr = 0xff;
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device\n", __func__);
+		goto init_exit;
+	}
+
+	ret = i2c_add_driver(&hdmi_sii_i2c_driver);
+	if (ret)
+		printk(KERN_ERR "%s: failed to add i2c driver\n", __func__);
+
+init_exit:
+	return ret;
+}
+
+static void __exit hdmi_sii_exit(void)
+{
+	i2c_del_driver(&hdmi_sii_i2c_driver);
+}
+
+module_init(hdmi_sii_init);
+module_exit(hdmi_sii_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("SiI9022 HDMI driver");
+MODULE_ALIAS("platform:hdmi-sii9022");
diff --git a/drivers/video/msm/lcdc.c b/drivers/video/msm/lcdc.c
new file mode 100644
index 0000000..e3f1907
--- /dev/null
+++ b/drivers/video/msm/lcdc.c
@@ -0,0 +1,294 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <mach/msm_reqs.h>
+
+#include "msm_fb.h"
+
+static int lcdc_probe(struct platform_device *pdev);
+static int lcdc_remove(struct platform_device *pdev);
+
+static int lcdc_off(struct platform_device *pdev);
+static int lcdc_on(struct platform_device *pdev);
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static struct clk *pixel_mdp_clk; /* drives the lcdc block in mdp */
+static struct clk *pixel_lcdc_clk; /* drives the lcdc interface */
+
+static struct platform_driver lcdc_driver = {
+	.probe = lcdc_probe,
+	.remove = lcdc_remove,
+	.suspend = NULL,
+	.resume = NULL,
+	.shutdown = NULL,
+	.driver = {
+		   .name = "lcdc",
+		   },
+};
+
+static struct lcdc_platform_data *lcdc_pdata;
+
+static int lcdc_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+	ret = panel_next_off(pdev);
+
+	clk_disable(pixel_mdp_clk);
+	clk_disable(pixel_lcdc_clk);
+
+	if (lcdc_pdata && lcdc_pdata->lcdc_power_save)
+		lcdc_pdata->lcdc_power_save(0);
+
+	if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config)
+		ret = lcdc_pdata->lcdc_gpio_config(0);
+
+#ifndef CONFIG_MSM_BUS_SCALING
+	if (mdp_rev != MDP_REV_303) {
+		if (mfd->ebi1_clk)
+			clk_disable(mfd->ebi1_clk);
+	}
+#else
+	mdp_bus_scale_update_request(0);
+#endif
+
+	return ret;
+}
+
+static int lcdc_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+	unsigned long panel_pixclock_freq = 0;
+#ifndef CONFIG_MSM_BUS_SCALING
+	unsigned long pm_qos_rate;
+#endif
+	mfd = platform_get_drvdata(pdev);
+
+	if (lcdc_pdata && lcdc_pdata->lcdc_get_clk)
+		panel_pixclock_freq = lcdc_pdata->lcdc_get_clk();
+
+	if (!panel_pixclock_freq)
+		panel_pixclock_freq = mfd->fbi->var.pixclock;
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(2);
+#else
+#ifdef CONFIG_MSM_NPA_SYSTEM_BUS
+	pm_qos_rate = MSM_AXI_FLOW_MDP_LCDC_WVGA_2BPP;
+#else
+	if (panel_pixclock_freq > 65000000)
+		/* pm_qos_rate should be in Khz */
+		pm_qos_rate = panel_pixclock_freq / 1000 ;
+	else
+		pm_qos_rate = 65000;
+#endif
+
+	if (mdp_rev != MDP_REV_303) {
+		if (mfd->ebi1_clk) {
+			clk_set_rate(mfd->ebi1_clk, pm_qos_rate * 1000);
+			clk_enable(mfd->ebi1_clk);
+		}
+	}
+#endif
+	mfd = platform_get_drvdata(pdev);
+
+	mfd->fbi->var.pixclock = clk_round_rate(pixel_mdp_clk,
+					mfd->fbi->var.pixclock);
+	ret = clk_set_rate(pixel_mdp_clk, mfd->fbi->var.pixclock);
+	if (ret) {
+		pr_err("%s: Can't set MDP LCDC pixel clock to rate %u\n",
+			__func__, mfd->fbi->var.pixclock);
+		goto out;
+	}
+
+	clk_enable(pixel_mdp_clk);
+	clk_enable(pixel_lcdc_clk);
+
+	if (lcdc_pdata && lcdc_pdata->lcdc_power_save)
+		lcdc_pdata->lcdc_power_save(1);
+	if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config)
+		ret = lcdc_pdata->lcdc_gpio_config(1);
+
+	ret = panel_next_on(pdev);
+
+out:
+	return ret;
+}
+
+static int lcdc_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct fb_info *fbi;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+
+	if (pdev->id == 0) {
+		lcdc_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_LCDC;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		printk(KERN_ERR "lcdc_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data;
+	pdata->on = lcdc_on;
+	pdata->off = lcdc_off;
+	pdata->next = pdev;
+
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+
+	if (mfd->index == 0)
+		mfd->fb_imgType = MSMFB_DEFAULT_TYPE;
+	else
+		mfd->fb_imgType = MDP_RGB_565;
+
+	fbi = mfd->fbi;
+	fbi->var.pixclock = clk_round_rate(pixel_mdp_clk,
+					mfd->panel_info.clk_rate);
+	fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch;
+	fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch;
+	fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch;
+	fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch;
+	fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width;
+	fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width;
+
+#ifndef CONFIG_MSM_BUS_SCALING
+	mfd->ebi1_clk = clk_get(NULL, "ebi1_lcdc_clk");
+	if (IS_ERR(mfd->ebi1_clk))
+		return PTR_ERR(mfd->ebi1_clk);
+#endif
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto lcdc_probe_err;
+
+	pdev_list[pdev_list_cnt++] = pdev;
+
+	return 0;
+
+lcdc_probe_err:
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int lcdc_remove(struct platform_device *pdev)
+{
+#ifndef CONFIG_MSM_BUS_SCALING
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	clk_put(mfd->ebi1_clk);
+#endif
+	return 0;
+}
+
+static int lcdc_register_driver(void)
+{
+	return platform_driver_register(&lcdc_driver);
+}
+
+static int __init lcdc_driver_init(void)
+{
+
+	pixel_mdp_clk = clk_get(NULL, "pixel_mdp_clk");
+	if (IS_ERR(pixel_mdp_clk))
+		pixel_mdp_clk = NULL;
+
+	if (pixel_mdp_clk) {
+		pixel_lcdc_clk = clk_get(NULL, "pixel_lcdc_clk");
+		if (IS_ERR(pixel_lcdc_clk)) {
+			printk(KERN_ERR "Couldnt find pixel_lcdc_clk\n");
+			return -EINVAL;
+		}
+	} else {
+		pixel_mdp_clk = clk_get(NULL, "mdp_lcdc_pclk_clk");
+		if (IS_ERR(pixel_mdp_clk)) {
+			printk(KERN_ERR "Couldnt find mdp_lcdc_pclk_clk\n");
+			return -EINVAL;
+		}
+
+		pixel_lcdc_clk = clk_get(NULL, "mdp_lcdc_pad_pclk_clk");
+		if (IS_ERR(pixel_lcdc_clk)) {
+			printk(KERN_ERR "Couldnt find mdp_lcdc_pad_pclk_clk\n");
+			return -EINVAL;
+		}
+	}
+
+	return lcdc_register_driver();
+}
+
+module_init(lcdc_driver_init);
diff --git a/drivers/video/msm/lcdc_auo_wvga.c b/drivers/video/msm/lcdc_auo_wvga.c
new file mode 100644
index 0000000..b1c3af0
--- /dev/null
+++ b/drivers/video/msm/lcdc_auo_wvga.c
@@ -0,0 +1,413 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#ifdef CONFIG_SPI_QUP
+#include <linux/spi/spi.h>
+#else
+#include <mach/gpio.h>
+#endif
+#include "msm_fb.h"
+
+#define MAX_BACKLIGHT_LEVEL			15
+#define PANEL_CMD_BACKLIGHT_LEVEL	0x6A18
+#define PANEL_CMD_FORMAT			0x3A00
+#define PANEL_CMD_RGBCTRL			0x3B00
+#define PANEL_CMD_BCTRL				0x5300
+#define PANEL_CMD_PWM_EN			0x6A17
+
+#define PANEL_CMD_SLEEP_OUT			0x1100
+#define PANEL_CMD_DISP_ON			0x2900
+#define PANEL_CMD_DISP_OFF			0x2800
+#define PANEL_CMD_SLEEP_IN			0x1000
+
+#define LCDC_AUO_PANEL_NAME			"lcdc_auo_wvga"
+
+#ifdef CONFIG_SPI_QUP
+#define LCDC_AUO_SPI_DEVICE_NAME	"lcdc_auo_nt35582"
+static struct spi_device *lcdc_spi_client;
+#else
+static int spi_cs;
+static int spi_sclk;
+static int spi_mosi;
+#endif
+
+struct auo_state_type {
+	boolean display_on;
+	int bl_level;
+};
+
+
+static struct auo_state_type auo_state = { .bl_level = 10 };
+static struct msm_panel_common_pdata *lcdc_auo_pdata;
+
+#ifndef CONFIG_SPI_QUP
+static void auo_spi_write_byte(u8 data)
+{
+	uint32 bit;
+	int bnum;
+
+	bnum = 8;			/* 8 data bits */
+	bit = 0x80;
+	while (bnum--) {
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		gpio_set_value(spi_mosi, (data & bit) ? 1 : 0);
+		udelay(1);
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		udelay(1);
+		bit >>= 1;
+	}
+	gpio_set_value(spi_mosi, 0);
+}
+
+static void auo_spi_read_byte(u16 cmd_16, u8 *data)
+{
+	int bnum;
+	u8 cmd_hi = (u8)(cmd_16 >> 8);
+	u8 cmd_low = (u8)(cmd_16);
+
+	/* Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(2);
+
+	/* command byte first */
+	auo_spi_write_byte(0x20);
+	udelay(2);
+	auo_spi_write_byte(cmd_hi);
+	udelay(2);
+	auo_spi_write_byte(0x00);
+	udelay(2);
+	auo_spi_write_byte(cmd_low);
+	udelay(2);
+	auo_spi_write_byte(0xc0);
+	udelay(2);
+
+	gpio_direction_input(spi_mosi);
+
+	/* followed by data bytes */
+	bnum = 1 * 8;	/* number of bits */
+	*data = 0;
+	while (bnum) {
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		udelay(1);
+		*data <<= 1;
+		*data |= gpio_get_value(spi_mosi) ? 1 : 0;
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		udelay(1);
+		--bnum;
+		if ((bnum % 8) == 0)
+			++data;
+	}
+
+	gpio_direction_output(spi_mosi, 0);
+
+	/* Chip Select - high */
+	udelay(2);
+	gpio_set_value(spi_cs, 1);
+}
+#endif
+
+static int auo_serigo(u8 *input_data, int input_len)
+{
+#ifdef CONFIG_SPI_QUP
+	int                 rc;
+	struct spi_message  m;
+	struct spi_transfer t;
+
+	if (!lcdc_spi_client) {
+		pr_err("%s lcdc_spi_client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+
+	t.tx_buf = input_data;
+	t.len = input_len;
+	t.bits_per_word = 16;
+
+	spi_setup(lcdc_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	rc = spi_sync(lcdc_spi_client, &m);
+
+	return rc;
+#else
+	int i;
+
+	/* Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(2);
+
+	for (i = 0; i < input_len; ++i) {
+		auo_spi_write_byte(input_data[i]);
+		udelay(2);
+	}
+
+	/* Chip Select - high */
+	gpio_set_value(spi_cs, 1);
+
+	return 0;
+#endif
+}
+
+#ifndef CONFIG_SPI_QUP
+static void auo_spi_init(void)
+{
+	spi_sclk = *(lcdc_auo_pdata->gpio_num);
+	spi_cs   = *(lcdc_auo_pdata->gpio_num + 1);
+	spi_mosi = *(lcdc_auo_pdata->gpio_num + 2);
+
+	/* Set the output so that we don't disturb the slave device */
+	gpio_set_value(spi_sclk, 1);
+	gpio_set_value(spi_mosi, 0);
+
+	/* Set the Chip Select deasserted (active low) */
+	gpio_set_value(spi_cs, 1);
+}
+#endif
+
+static struct work_struct disp_on_delayed_work;
+static void auo_write_cmd(u16  cmd)
+{
+	u8  local_data[4];
+
+	local_data[0] = 0x20;
+	local_data[1] = (u8)(cmd >> 8);
+	local_data[2] = 0;
+	local_data[3] = (u8)cmd;
+	auo_serigo(local_data, 4);
+}
+static void auo_write_cmd_1param(u16  cmd, u8  para1)
+{
+	u8  local_data[6];
+
+	local_data[0] = 0x20;
+	local_data[1] = (u8)(cmd >> 8);
+	local_data[2] = 0;
+	local_data[3] = (u8)cmd;
+	local_data[4] = 0x40;
+	local_data[5] = para1;
+	auo_serigo(local_data, 6);
+}
+static void lcdc_auo_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int bl_level;
+
+	bl_level = mfd->bl_level;
+	if (auo_state.display_on) {
+		auo_write_cmd_1param(PANEL_CMD_BACKLIGHT_LEVEL,
+			bl_level * 255 / MAX_BACKLIGHT_LEVEL);
+		auo_state.bl_level = bl_level;
+	}
+
+}
+static void auo_disp_on_delayed_work(struct work_struct *work_ptr)
+{
+	/* 0x1100: Sleep Out */
+	auo_write_cmd(PANEL_CMD_SLEEP_OUT);
+
+	msleep(180);
+
+	/* SET_PIXEL_FORMAT: Set how many bits per pixel are used (3A00h)*/
+	auo_write_cmd_1param(PANEL_CMD_FORMAT, 0x66); /* 18 bits */
+
+	/* RGBCTRL: RGB Interface Signal Control (3B00h) */
+	auo_write_cmd_1param(PANEL_CMD_RGBCTRL, 0x2B);
+
+	/* Display ON command */
+	auo_write_cmd(PANEL_CMD_DISP_ON);
+	msleep(20);
+
+	/*Backlight on */
+	auo_write_cmd_1param(PANEL_CMD_BCTRL, 0x24); /*BCTRL, BL */
+	auo_write_cmd_1param(PANEL_CMD_PWM_EN, 0x01); /*Enable PWM Level */
+
+	msleep(20);
+}
+
+static void auo_disp_on(void)
+{
+	if (!auo_state.display_on) {
+		INIT_WORK(&disp_on_delayed_work, auo_disp_on_delayed_work);
+#ifdef CONFIG_SPI_QUP
+		if (lcdc_spi_client)
+#endif
+			schedule_work(&disp_on_delayed_work);
+		auo_state.display_on = TRUE;
+	}
+}
+
+static int lcdc_auo_panel_on(struct platform_device *pdev)
+{
+	pr_info("%s\n", __func__);
+	if (!auo_state.display_on) {
+#ifndef CONFIG_SPI_QUP
+		lcdc_auo_pdata->panel_config_gpio(1);
+		auo_spi_init();
+#endif
+		auo_disp_on();
+	}
+	return 0;
+}
+
+static int lcdc_auo_panel_off(struct platform_device *pdev)
+{
+	pr_info("%s\n", __func__);
+	if (auo_state.display_on) {
+		/* 0x2800: Display Off */
+		auo_write_cmd(PANEL_CMD_DISP_OFF);
+		msleep(120);
+		/* 0x1000: Sleep In */
+		auo_write_cmd(PANEL_CMD_SLEEP_IN);
+		msleep(120);
+
+		auo_state.display_on = FALSE;
+	}
+	return 0;
+}
+
+static int auo_probe(struct platform_device *pdev)
+{
+	pr_info("%s: id=%d\n", __func__, pdev->id);
+	if (pdev->id == 0) {
+		lcdc_auo_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_SPI_QUP
+static int __devinit lcdc_auo_spi_probe(struct spi_device *spi)
+{
+	pr_info("%s\n", __func__);
+	lcdc_spi_client = spi;
+	lcdc_spi_client->bits_per_word = 32;
+	if (auo_state.display_on)
+		schedule_work(&disp_on_delayed_work);
+	return 0;
+}
+static int __devexit lcdc_auo_spi_remove(struct spi_device *spi)
+{
+	lcdc_spi_client = NULL;
+	return 0;
+}
+static struct spi_driver lcdc_auo_spi_driver = {
+	.driver.name   = LCDC_AUO_SPI_DEVICE_NAME,
+	.driver.owner  = THIS_MODULE,
+	.probe         = lcdc_auo_spi_probe,
+	.remove        = __devexit_p(lcdc_auo_spi_remove),
+};
+#endif
+
+static struct platform_driver this_driver = {
+	.probe		= auo_probe,
+	.driver.name	= LCDC_AUO_PANEL_NAME,
+};
+
+static struct msm_fb_panel_data auo_panel_data = {
+	.on = lcdc_auo_panel_on,
+	.off = lcdc_auo_panel_off,
+	.set_backlight = lcdc_auo_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name	= LCDC_AUO_PANEL_NAME,
+	.id	= 1,
+	.dev.platform_data = &auo_panel_data,
+};
+
+static int __init lcdc_auo_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_LCDC_AUTO_DETECT
+	if (msm_fb_detect_client(LCDC_AUO_PANEL_NAME)) {
+		pr_err("%s: detect failed\n", __func__);
+		return 0;
+	}
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret) {
+		pr_err("%s: driver register failed, rc=%d\n", __func__, ret);
+		return ret;
+	}
+
+	pinfo = &auo_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 800;
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 25600000;
+	pinfo->bl_max = MAX_BACKLIGHT_LEVEL;
+	pinfo->bl_min = 1;
+
+	pinfo->lcdc.h_back_porch = 16-2;	/* HBP-HLW */
+	pinfo->lcdc.h_front_porch = 16;
+	pinfo->lcdc.h_pulse_width = 2;
+
+	pinfo->lcdc.v_back_porch = 3-2;		/* VBP-VLW */
+	pinfo->lcdc.v_front_porch = 28;
+	pinfo->lcdc.v_pulse_width = 2;
+
+	pinfo->lcdc.border_clr = 0;
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret) {
+		pr_err("%s: device register failed, rc=%d\n", __func__, ret);
+		goto fail_driver;
+	}
+#ifdef CONFIG_SPI_QUP
+	ret = spi_register_driver(&lcdc_auo_spi_driver);
+
+	if (ret) {
+		pr_err("%s: spi register failed: rc=%d\n", __func__, ret);
+		goto fail_device;
+	}
+	pr_info("%s: SUCCESS (SPI)\n", __func__);
+#else
+	pr_info("%s: SUCCESS (BitBang)\n", __func__);
+#endif
+	return ret;
+
+#ifdef CONFIG_SPI_QUP
+fail_device:
+	platform_device_unregister(&this_device);
+#endif
+fail_driver:
+	platform_driver_unregister(&this_driver);
+
+	return ret;
+}
+
+module_init(lcdc_auo_panel_init);
+static void __exit lcdc_auo_panel_exit(void)
+{
+	pr_info("%s\n", __func__);
+	platform_device_unregister(&this_device);
+	platform_driver_unregister(&this_driver);
+#ifdef CONFIG_SPI_QUP
+	spi_unregister_driver(&lcdc_auo_spi_driver);
+#endif
+}
+module_exit(lcdc_auo_panel_exit);
diff --git a/drivers/video/msm/lcdc_chimei_wxga.c b/drivers/video/msm/lcdc_chimei_wxga.c
new file mode 100644
index 0000000..b5cce2c
--- /dev/null
+++ b/drivers/video/msm/lcdc_chimei_wxga.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#ifdef CONFIG_PMIC8058_PWM
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwm.h>
+#endif
+#include <mach/gpio.h>
+#include "msm_fb.h"
+
+
+
+
+static struct pwm_device *bl_pwm;
+
+#define PWM_FREQ_HZ 210
+#define PWM_PERIOD_USEC (USEC_PER_SEC / PWM_FREQ_HZ)
+#define PWM_DUTY_LEVEL (PWM_PERIOD_USEC / PWM_LEVEL)
+#define PWM_LEVEL 15
+
+static struct msm_panel_common_pdata *cm_pdata;
+static struct platform_device *cm_fbpdev;
+static int led_pwm;		/* pm8058 gpio 24, channel 0 */
+static int led_en;		/* pm8058 gpio 1 */
+static int lvds_pwr_down;	/* msm gpio 30 */
+static int chimei_bl_level = 1;
+
+
+static void lcdc_chimei_set_backlight(int level)
+{
+	int ret;
+
+	if (bl_pwm) {
+		ret = pwm_config(bl_pwm, PWM_DUTY_LEVEL * level,
+			PWM_PERIOD_USEC);
+		if (ret) {
+			pr_err("%s: pwm_config on pwm failed %d\n",
+					__func__, ret);
+			return;
+		}
+
+		ret = pwm_enable(bl_pwm);
+		if (ret) {
+			pr_err("%s: pwm_enable on pwm failed %d\n",
+					__func__, ret);
+			return;
+		}
+	}
+
+	chimei_bl_level = level;
+}
+
+static int lcdc_chimei_panel_on(struct platform_device *pdev)
+{
+	int ret;
+
+	/* panel powered on here */
+
+	ret = gpio_request(lvds_pwr_down, "lvds_pwr_down");
+	if (ret == 0) {
+		/* output, pull high to enable */
+		gpio_direction_output(lvds_pwr_down, 1);
+	} else {
+		pr_err("%s: lvds_pwr_down=%d, gpio_request failed\n",
+			__func__, lvds_pwr_down);
+	}
+
+	msleep(200);
+	/* power on led pwm power >= 200 ms */
+
+	if (chimei_bl_level == 0)
+		chimei_bl_level = 1;
+	lcdc_chimei_set_backlight(chimei_bl_level);
+
+	msleep(10);
+
+	ret = gpio_request(led_en, "led_en");
+	if (ret == 0) {
+		/* output, pull high */
+		gpio_direction_output(led_en, 1);
+	} else {
+		pr_err("%s: led_en=%d, gpio_request failed\n",
+			__func__, led_en);
+	}
+	return ret;
+}
+
+static int lcdc_chimei_panel_off(struct platform_device *pdev)
+{
+	/* pull low to disable */
+	gpio_set_value_cansleep(led_en, 0);
+	gpio_free(led_en);
+
+	msleep(10);
+
+	lcdc_chimei_set_backlight(0);
+
+	msleep(200);
+	/* power off led pwm power >= 200 ms */
+
+	/* pull low to shut down lvds */
+	gpio_set_value_cansleep(lvds_pwr_down, 0);
+	gpio_free(lvds_pwr_down);
+
+	/* panel power off here */
+
+	return 0;
+}
+
+static void lcdc_chimei_panel_backlight(struct msm_fb_data_type *mfd)
+{
+	lcdc_chimei_set_backlight(mfd->bl_level);
+}
+
+static int __devinit chimei_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	if (pdev->id == 0) {
+		cm_pdata = pdev->dev.platform_data;
+		if (cm_pdata == NULL) {
+			pr_err("%s: no PWM gpio specified\n", __func__);
+			return 0;
+		}
+		led_pwm = cm_pdata->gpio_num[0];
+		led_en = cm_pdata->gpio_num[1];
+		lvds_pwr_down = cm_pdata->gpio_num[2];
+		pr_info("%s: led_pwm=%d led_en=%d lvds_pwr_down=%d\n",
+			__func__, led_pwm, led_en, lvds_pwr_down);
+		return 0;
+	}
+
+	if (cm_pdata == NULL)
+		return -ENODEV;
+
+	bl_pwm = pwm_request(led_pwm, "backlight");
+	if (bl_pwm == NULL || IS_ERR(bl_pwm)) {
+		pr_err("%s pwm_request() failed\n", __func__);
+		bl_pwm = NULL;
+	}
+
+	cm_fbpdev = msm_fb_add_device(pdev);
+	if (!cm_fbpdev) {
+		dev_err(&pdev->dev, "failed to add msm_fb device\n");
+		rc = -ENODEV;
+		goto probe_exit;
+	}
+
+probe_exit:
+	return rc;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = chimei_probe,
+	.driver = {
+		.name   = "lcdc_chimei_lvds_wxga",
+	},
+};
+
+static struct msm_fb_panel_data chimei_panel_data = {
+	.on = lcdc_chimei_panel_on,
+	.off = lcdc_chimei_panel_off,
+	.set_backlight = lcdc_chimei_panel_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_chimei_lvds_wxga",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &chimei_panel_data,
+	}
+};
+
+static int __init lcdc_chimei_lvds_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("lcdc_chimei_lvds_wxga"))
+		return 0;
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &chimei_panel_data.panel_info;
+	pinfo->xres = 1366;
+	pinfo->yres = 768;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 69300000;
+	pinfo->bl_max = PWM_LEVEL;
+	pinfo->bl_min = 1;
+
+	/*
+	 * this panel is operated by de,
+	 * vsycn and hsync are ignored
+	 */
+	pinfo->lcdc.h_back_porch = 108;
+	pinfo->lcdc.h_front_porch = 0;
+	pinfo->lcdc.h_pulse_width = 1;
+	pinfo->lcdc.v_back_porch = 0;
+	pinfo->lcdc.v_front_porch = 16;
+	pinfo->lcdc.v_pulse_width = 1;
+	pinfo->lcdc.border_clr = 0;
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret)
+		platform_driver_unregister(&this_driver);
+
+	return ret;
+}
+
+module_init(lcdc_chimei_lvds_panel_init);
diff --git a/drivers/video/msm/lcdc_external.c b/drivers/video/msm/lcdc_external.c
new file mode 100644
index 0000000..ca82def
--- /dev/null
+++ b/drivers/video/msm/lcdc_external.c
@@ -0,0 +1,51 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+static int __init lcdc_external_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+	if (msm_fb_detect_client("lcdc_external"))
+		return 0;
+
+	pinfo.xres = 1280;
+	pinfo.yres = 720;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = LCDC_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 74250000;
+
+	pinfo.lcdc.h_back_porch = 124;
+	pinfo.lcdc.h_front_porch = 110;
+	pinfo.lcdc.h_pulse_width = 136;
+	pinfo.lcdc.v_back_porch = 19;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 6;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(lcdc_external_init);
diff --git a/drivers/video/msm/lcdc_gordon.c b/drivers/video/msm/lcdc_gordon.c
new file mode 100644
index 0000000..f9532b4
--- /dev/null
+++ b/drivers/video/msm/lcdc_gordon.c
@@ -0,0 +1,443 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <mach/gpio.h>
+#include "msm_fb.h"
+
+/* registers */
+#define GORDON_REG_NOP          0x00
+#define GORDON_REG_IMGCTL1      0x10
+#define GORDON_REG_IMGCTL2      0x11
+#define GORDON_REG_IMGSET1      0x12
+#define GORDON_REG_IMGSET2      0x13
+#define GORDON_REG_IVBP1        0x14
+#define GORDON_REG_IHBP1        0x15
+#define GORDON_REG_IVNUM1       0x16
+#define GORDON_REG_IHNUM1       0x17
+#define GORDON_REG_IVBP2        0x18
+#define GORDON_REG_IHBP2        0x19
+#define GORDON_REG_IVNUM2       0x1A
+#define GORDON_REG_IHNUM2       0x1B
+#define GORDON_REG_LCDIFCTL1    0x30
+#define GORDON_REG_VALTRAN      0x31
+#define GORDON_REG_AVCTL        0x33
+#define GORDON_REG_LCDIFCTL2    0x34
+#define GORDON_REG_LCDIFCTL3    0x35
+#define GORDON_REG_LCDIFSET1    0x36
+#define GORDON_REG_PCCTL        0x3C
+#define GORDON_REG_TPARAM1      0x40
+#define GORDON_REG_TLCDIF1      0x41
+#define GORDON_REG_TSSPB_ST1    0x42
+#define GORDON_REG_TSSPB_ED1    0x43
+#define GORDON_REG_TSCK_ST1     0x44
+#define GORDON_REG_TSCK_WD1     0x45
+#define GORDON_REG_TGSPB_VST1   0x46
+#define GORDON_REG_TGSPB_VED1   0x47
+#define GORDON_REG_TGSPB_CH1    0x48
+#define GORDON_REG_TGCK_ST1     0x49
+#define GORDON_REG_TGCK_ED1     0x4A
+#define GORDON_REG_TPCTL_ST1    0x4B
+#define GORDON_REG_TPCTL_ED1    0x4C
+#define GORDON_REG_TPCHG_ED1    0x4D
+#define GORDON_REG_TCOM_CH1     0x4E
+#define GORDON_REG_THBP1        0x4F
+#define GORDON_REG_TPHCTL1      0x50
+#define GORDON_REG_EVPH1        0x51
+#define GORDON_REG_EVPL1        0x52
+#define GORDON_REG_EVNH1        0x53
+#define GORDON_REG_EVNL1        0x54
+#define GORDON_REG_TBIAS1       0x55
+#define GORDON_REG_TPARAM2      0x56
+#define GORDON_REG_TLCDIF2      0x57
+#define GORDON_REG_TSSPB_ST2    0x58
+#define GORDON_REG_TSSPB_ED2    0x59
+#define GORDON_REG_TSCK_ST2     0x5A
+#define GORDON_REG_TSCK_WD2     0x5B
+#define GORDON_REG_TGSPB_VST2   0x5C
+#define GORDON_REG_TGSPB_VED2   0x5D
+#define GORDON_REG_TGSPB_CH2    0x5E
+#define GORDON_REG_TGCK_ST2     0x5F
+#define GORDON_REG_TGCK_ED2     0x60
+#define GORDON_REG_TPCTL_ST2    0x61
+#define GORDON_REG_TPCTL_ED2    0x62
+#define GORDON_REG_TPCHG_ED2    0x63
+#define GORDON_REG_TCOM_CH2     0x64
+#define GORDON_REG_THBP2        0x65
+#define GORDON_REG_TPHCTL2      0x66
+#define GORDON_REG_POWCTL       0x80
+
+static int lcdc_gordon_panel_off(struct platform_device *pdev);
+
+static int spi_cs;
+static int spi_sclk;
+static int spi_sdo;
+static int spi_sdi;
+static int spi_dac;
+static unsigned char bit_shift[8] = { (1 << 7),	/* MSB */
+	(1 << 6),
+	(1 << 5),
+	(1 << 4),
+	(1 << 3),
+	(1 << 2),
+	(1 << 1),
+	(1 << 0)		               /* LSB */
+};
+
+struct gordon_state_type{
+	boolean disp_initialized;
+	boolean display_on;
+	boolean disp_powered_up;
+};
+
+static struct gordon_state_type gordon_state = { 0 };
+static struct msm_panel_common_pdata *lcdc_gordon_pdata;
+
+static void serigo(uint16 reg, uint8 data)
+{
+	unsigned int tx_val = ((0x00FF & reg) << 8) | data;
+	unsigned char i, val = 0;
+
+	/* Enable the Chip Select */
+	gpio_set_value(spi_cs, 1);
+	udelay(33);
+
+	/* Transmit it in two parts, Higher Byte first, then Lower Byte */
+	val = (unsigned char)((tx_val & 0xFF00) >> 8);
+
+	/* Clock should be Low before entering ! */
+	for (i = 0; i < 8; i++) {
+		/* #1: Drive the Data (High or Low) */
+		if (val & bit_shift[i])
+			gpio_set_value(spi_sdi, 1);
+		else
+			gpio_set_value(spi_sdi, 0);
+
+		/* #2: Drive the Clk High and then Low */
+		udelay(33);
+		gpio_set_value(spi_sclk, 1);
+		udelay(33);
+		gpio_set_value(spi_sclk, 0);
+	}
+
+	/* Idle state of SDO (MOSI) is Low */
+	gpio_set_value(spi_sdi, 0);
+	/* ..then Lower Byte */
+	val = (uint8) (tx_val & 0x00FF);
+	/* Before we enter here the Clock should be Low ! */
+
+	for (i = 0; i < 8; i++) {
+		/* #1: Drive the Data (High or Low) */
+		if (val & bit_shift[i])
+			gpio_set_value(spi_sdi, 1);
+		else
+			gpio_set_value(spi_sdi, 0);
+
+		/* #2: Drive the Clk High and then Low */
+		udelay(33);
+
+		gpio_set_value(spi_sclk, 1);
+		udelay(33);
+		gpio_set_value(spi_sclk, 0);
+	}
+
+	/* Idle state of SDO (MOSI) is Low */
+	gpio_set_value(spi_sdi, 0);
+
+	/* Now Disable the Chip Select */
+	udelay(33);
+	gpio_set_value(spi_cs, 0);
+}
+
+static void spi_init(void)
+{
+	/* Setting the Default GPIO's */
+	spi_sclk = *(lcdc_gordon_pdata->gpio_num);
+	spi_cs   = *(lcdc_gordon_pdata->gpio_num + 1);
+	spi_sdi  = *(lcdc_gordon_pdata->gpio_num + 2);
+	spi_sdo  = *(lcdc_gordon_pdata->gpio_num + 3);
+
+	/* Set the output so that we dont disturb the slave device */
+	gpio_set_value(spi_sclk, 0);
+	gpio_set_value(spi_sdi, 0);
+
+	/* Set the Chip Select De-asserted */
+	gpio_set_value(spi_cs, 0);
+
+}
+
+static void gordon_disp_powerup(void)
+{
+	if (!gordon_state.disp_powered_up && !gordon_state.display_on) {
+		/* Reset the hardware first */
+		/* Include DAC power up implementation here */
+	      gordon_state.disp_powered_up = TRUE;
+	}
+}
+
+static void gordon_init(void)
+{
+	/* Image interface settings */
+	serigo(GORDON_REG_IMGCTL2, 0x00);
+	serigo(GORDON_REG_IMGSET1, 0x00);
+
+	/* Exchange the RGB signal for J510(Softbank mobile) */
+	serigo(GORDON_REG_IMGSET2, 0x12);
+	serigo(GORDON_REG_LCDIFSET1, 0x00);
+
+	/* Pre-charge settings */
+	serigo(GORDON_REG_PCCTL, 0x09);
+	serigo(GORDON_REG_LCDIFCTL2, 0x7B);
+
+	mdelay(1);
+}
+
+static void gordon_disp_on(void)
+{
+	if (gordon_state.disp_powered_up && !gordon_state.display_on) {
+		gordon_init();
+		mdelay(20);
+		/* gordon_dispmode setting */
+		serigo(GORDON_REG_TPARAM1, 0x30);
+		serigo(GORDON_REG_TLCDIF1, 0x00);
+		serigo(GORDON_REG_TSSPB_ST1, 0x8B);
+		serigo(GORDON_REG_TSSPB_ED1, 0x93);
+		serigo(GORDON_REG_TSCK_ST1, 0x88);
+		serigo(GORDON_REG_TSCK_WD1, 0x00);
+		serigo(GORDON_REG_TGSPB_VST1, 0x01);
+		serigo(GORDON_REG_TGSPB_VED1, 0x02);
+		serigo(GORDON_REG_TGSPB_CH1, 0x5E);
+		serigo(GORDON_REG_TGCK_ST1, 0x80);
+		serigo(GORDON_REG_TGCK_ED1, 0x3C);
+		serigo(GORDON_REG_TPCTL_ST1, 0x50);
+		serigo(GORDON_REG_TPCTL_ED1, 0x74);
+		serigo(GORDON_REG_TPCHG_ED1, 0x78);
+		serigo(GORDON_REG_TCOM_CH1, 0x50);
+		serigo(GORDON_REG_THBP1, 0x84);
+		serigo(GORDON_REG_TPHCTL1, 0x00);
+		serigo(GORDON_REG_EVPH1, 0x70);
+		serigo(GORDON_REG_EVPL1, 0x64);
+		serigo(GORDON_REG_EVNH1, 0x56);
+		serigo(GORDON_REG_EVNL1, 0x48);
+		serigo(GORDON_REG_TBIAS1, 0x88);
+
+		/* QVGA settings */
+		serigo(GORDON_REG_TPARAM2, 0x28);
+		serigo(GORDON_REG_TLCDIF2, 0x14);
+		serigo(GORDON_REG_TSSPB_ST2, 0x49);
+		serigo(GORDON_REG_TSSPB_ED2, 0x4B);
+		serigo(GORDON_REG_TSCK_ST2, 0x4A);
+		serigo(GORDON_REG_TSCK_WD2, 0x02);
+		serigo(GORDON_REG_TGSPB_VST2, 0x02);
+		serigo(GORDON_REG_TGSPB_VED2, 0x03);
+		serigo(GORDON_REG_TGSPB_CH2, 0x2F);
+		serigo(GORDON_REG_TGCK_ST2, 0x40);
+		serigo(GORDON_REG_TGCK_ED2, 0x1E);
+		serigo(GORDON_REG_TPCTL_ST2, 0x2C);
+		serigo(GORDON_REG_TPCTL_ED2, 0x3A);
+		serigo(GORDON_REG_TPCHG_ED2, 0x3C);
+		serigo(GORDON_REG_TCOM_CH2, 0x28);
+		serigo(GORDON_REG_THBP2, 0x4D);
+		serigo(GORDON_REG_TPHCTL2, 0x1A);
+
+		/* VGA settings */
+		serigo(GORDON_REG_IVBP1, 0x02);
+		serigo(GORDON_REG_IHBP1, 0x90);
+		serigo(GORDON_REG_IVNUM1, 0xA0);
+		serigo(GORDON_REG_IHNUM1, 0x78);
+
+		/* QVGA settings */
+		serigo(GORDON_REG_IVBP2, 0x02);
+		serigo(GORDON_REG_IHBP2, 0x48);
+		serigo(GORDON_REG_IVNUM2, 0x50);
+		serigo(GORDON_REG_IHNUM2, 0x3C);
+
+		/* Gordon Charge pump settings and ON */
+		serigo(GORDON_REG_POWCTL, 0x03);
+		mdelay(15);
+		serigo(GORDON_REG_POWCTL, 0x07);
+		mdelay(15);
+
+		serigo(GORDON_REG_POWCTL, 0x0F);
+		mdelay(15);
+
+		serigo(GORDON_REG_AVCTL, 0x03);
+		mdelay(15);
+
+		serigo(GORDON_REG_POWCTL, 0x1F);
+		mdelay(15);
+
+		serigo(GORDON_REG_POWCTL, 0x5F);
+		mdelay(15);
+
+		serigo(GORDON_REG_POWCTL, 0x7F);
+		mdelay(15);
+
+		serigo(GORDON_REG_LCDIFCTL1, 0x02);
+		mdelay(15);
+
+		serigo(GORDON_REG_IMGCTL1, 0x00);
+		mdelay(15);
+
+		serigo(GORDON_REG_LCDIFCTL3, 0x00);
+		mdelay(15);
+
+		serigo(GORDON_REG_VALTRAN, 0x01);
+		mdelay(15);
+
+		serigo(GORDON_REG_LCDIFCTL1, 0x03);
+		mdelay(1);
+		gordon_state.display_on = TRUE;
+	}
+}
+
+static int lcdc_gordon_panel_on(struct platform_device *pdev)
+{
+	if (!gordon_state.disp_initialized) {
+		/* Configure reset GPIO that drives DAC */
+		lcdc_gordon_pdata->panel_config_gpio(1);
+		spi_dac = *(lcdc_gordon_pdata->gpio_num + 4);
+		gpio_set_value(spi_dac, 0);
+		udelay(15);
+		gpio_set_value(spi_dac, 1);
+		spi_init();	/* LCD needs SPI */
+		gordon_disp_powerup();
+		gordon_disp_on();
+		gordon_state.disp_initialized = TRUE;
+	}
+	return 0;
+}
+
+static int lcdc_gordon_panel_off(struct platform_device *pdev)
+{
+	if (gordon_state.disp_powered_up && gordon_state.display_on) {
+		serigo(GORDON_REG_LCDIFCTL2, 0x7B);
+		serigo(GORDON_REG_VALTRAN, 0x01);
+		serigo(GORDON_REG_LCDIFCTL1, 0x02);
+		serigo(GORDON_REG_LCDIFCTL3, 0x01);
+		mdelay(20);
+		serigo(GORDON_REG_VALTRAN, 0x01);
+		serigo(GORDON_REG_IMGCTL1, 0x01);
+		serigo(GORDON_REG_LCDIFCTL1, 0x00);
+		mdelay(20);
+
+		serigo(GORDON_REG_POWCTL, 0x1F);
+		mdelay(40);
+
+		serigo(GORDON_REG_POWCTL, 0x07);
+		mdelay(40);
+
+		serigo(GORDON_REG_POWCTL, 0x03);
+		mdelay(40);
+
+		serigo(GORDON_REG_POWCTL, 0x00);
+		mdelay(40);
+		lcdc_gordon_pdata->panel_config_gpio(0);
+		gordon_state.display_on = FALSE;
+		gordon_state.disp_initialized = FALSE;
+	}
+	return 0;
+}
+
+static void lcdc_gordon_set_backlight(struct msm_fb_data_type *mfd)
+{
+		int bl_level = mfd->bl_level;
+
+		if (bl_level <= 1) {
+			/* keep back light OFF */
+			serigo(GORDON_REG_LCDIFCTL2, 0x0B);
+			udelay(15);
+			serigo(GORDON_REG_VALTRAN, 0x01);
+		} else {
+			/* keep back light ON */
+			serigo(GORDON_REG_LCDIFCTL2, 0x7B);
+			udelay(15);
+			serigo(GORDON_REG_VALTRAN, 0x01);
+		}
+}
+
+static int __devinit gordon_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		lcdc_gordon_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+	msm_fb_add_device(pdev);
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = gordon_probe,
+	.driver = {
+		.name   = "lcdc_gordon_vga",
+	},
+};
+
+static struct msm_fb_panel_data gordon_panel_data = {
+	.on = lcdc_gordon_panel_on,
+	.off = lcdc_gordon_panel_off,
+	.set_backlight = lcdc_gordon_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_gordon_vga",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &gordon_panel_data,
+	}
+};
+
+static int __init lcdc_gordon_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+	if (msm_fb_detect_client("lcdc_gordon_vga"))
+		return 0;
+#endif
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &gordon_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 640;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 24500000;
+	pinfo->bl_max = 4;
+	pinfo->bl_min = 1;
+
+	pinfo->lcdc.h_back_porch = 84;
+	pinfo->lcdc.h_front_porch = 33;
+	pinfo->lcdc.h_pulse_width = 60;
+	pinfo->lcdc.v_back_porch = 0;
+	pinfo->lcdc.v_front_porch = 2;
+	pinfo->lcdc.v_pulse_width = 2;
+	pinfo->lcdc.border_clr = 0;     /* blk */
+	pinfo->lcdc.underflow_clr = 0xff;       /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret)
+		platform_driver_unregister(&this_driver);
+
+	return ret;
+}
+
+module_init(lcdc_gordon_panel_init);
diff --git a/drivers/video/msm/lcdc_panel.c b/drivers/video/msm/lcdc_panel.c
new file mode 100644
index 0000000..5705325
--- /dev/null
+++ b/drivers/video/msm/lcdc_panel.c
@@ -0,0 +1,84 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+static int lcdc_panel_on(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int lcdc_panel_off(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int __devinit lcdc_panel_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = lcdc_panel_probe,
+	.driver = {
+		.name   = "lcdc_panel",
+	},
+};
+
+static struct msm_fb_panel_data lcdc_panel_data = {
+	.on = lcdc_panel_on,
+	.off = lcdc_panel_off,
+};
+
+static int lcdc_dev_id;
+
+int lcdc_device_register(struct msm_panel_info *pinfo)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	pdev = platform_device_alloc("lcdc_panel", ++lcdc_dev_id);
+	if (!pdev)
+		return -ENOMEM;
+
+	lcdc_panel_data.panel_info = *pinfo;
+	ret = platform_device_add_data(pdev, &lcdc_panel_data,
+		sizeof(lcdc_panel_data));
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init lcdc_panel_init(void)
+{
+	return platform_driver_register(&this_driver);
+}
+
+module_init(lcdc_panel_init);
diff --git a/drivers/video/msm/lcdc_prism.c b/drivers/video/msm/lcdc_prism.c
new file mode 100644
index 0000000..d127f63
--- /dev/null
+++ b/drivers/video/msm/lcdc_prism.c
@@ -0,0 +1,61 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+#include "mddihosti.h"
+#endif
+
+static int __init lcdc_prism_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+	ret = msm_fb_detect_client("lcdc_prism_wvga");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret && (mddi_get_client_id() != 0))
+		return 0;
+#endif
+
+	pinfo.xres = 800;
+	pinfo.yres = 480;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = LCDC_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 30720000;
+
+	pinfo.lcdc.h_back_porch = 21;
+	pinfo.lcdc.h_front_porch = 81;
+	pinfo.lcdc.h_pulse_width = 60;
+	pinfo.lcdc.v_back_porch = 18;
+	pinfo.lcdc.v_front_porch = 27;
+	pinfo.lcdc.v_pulse_width = 2;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(lcdc_prism_init);
diff --git a/drivers/video/msm/lcdc_samsung_oled_pt.c b/drivers/video/msm/lcdc_samsung_oled_pt.c
new file mode 100644
index 0000000..dccc997
--- /dev/null
+++ b/drivers/video/msm/lcdc_samsung_oled_pt.c
@@ -0,0 +1,590 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#ifdef CONFIG_SPI_QUP
+#include <linux/spi/spi.h>
+#else
+#include <mach/gpio.h>
+#endif
+#include "msm_fb.h"
+
+#define DEBUG
+/* #define SYSFS_DEBUG_CMD */
+
+#ifdef CONFIG_SPI_QUP
+#define LCDC_SAMSUNG_SPI_DEVICE_NAME	"lcdc_samsung_ams367pe02"
+static struct spi_device *lcdc_spi_client;
+#else
+static int spi_cs;
+static int spi_sclk;
+static int spi_mosi;
+#endif
+
+struct samsung_state_type {
+	boolean disp_initialized;
+	boolean display_on;
+	boolean disp_powered_up;
+	int brightness;
+};
+
+struct samsung_spi_data {
+	u8 addr;
+	u8 len;
+	u8 data[22];
+};
+
+static struct samsung_spi_data panel_sequence[] = {
+	{ .addr = 0xf8, .len = 14, .data = { 0x01, 0x27, 0x27, 0x07, 0x07,
+	 0x54, 0x9f, 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00 } },
+};
+static struct samsung_spi_data display_sequence[] = {
+	{ .addr = 0xf2, .len = 5, .data = { 0x02, 0x03, 0x1c, 0x10, 0x10 } },
+	{ .addr = 0xf7, .len = 3, .data = { 0x00, 0x00, 0x30 } },
+};
+
+/* lum=300 cd/m2 */
+static struct samsung_spi_data gamma_sequence_300[] = {
+	{ .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x7d, 0x77,
+	 0x5b, 0xbe, 0xc1, 0xb1, 0xb3, 0xb7, 0xa6, 0xc3, 0xc5, 0xb9, 0x00, 0xb3,
+	 0x00, 0xaf, 0x00, 0xe8 } },
+	{ .addr = 0xFA, .len = 1, .data = { 0x03 } },
+};
+/* lum = 180 cd/m2*/
+static struct samsung_spi_data gamma_sequence_180[] = {
+	{ .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x83, 0x78,
+	 0x60, 0xc5, 0xc6, 0xb8, 0xba, 0xbe, 0xad, 0xcb, 0xcd, 0xc2, 0x00, 0x92,
+	 0x00, 0x8e, 0x00, 0xbc } },
+	{ .addr = 0xFA, .len = 1, .data = { 0x03 } },
+};
+/* lum = 80 cd/m2*/
+static struct samsung_spi_data gamma_sequence_80[] = {
+	{ .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x94, 0x73,
+	 0x6c, 0xcb, 0xca, 0xbe, 0xc4, 0xc7, 0xb8, 0xd3, 0xd5, 0xcb, 0x00, 0x6d,
+	 0x00, 0x69, 0x00, 0x8b } },
+	{ .addr = 0xFA, .len = 1, .data = { 0x03 } },
+};
+
+static struct samsung_spi_data etc_sequence[] = {
+	{ .addr = 0xF6, .len = 3, .data = { 0x00, 0x8e, 0x07 } },
+	{ .addr = 0xB3, .len = 1, .data = { 0x0C } },
+};
+
+static struct samsung_state_type samsung_state = { .brightness = 180 };
+static struct msm_panel_common_pdata *lcdc_samsung_pdata;
+
+#ifndef CONFIG_SPI_QUP
+static void samsung_spi_write_byte(boolean dc, u8 data)
+{
+	uint32 bit;
+	int bnum;
+
+	gpio_set_value(spi_sclk, 0);
+	gpio_set_value(spi_mosi, dc ? 1 : 0);
+	udelay(1);			/* at least 20 ns */
+	gpio_set_value(spi_sclk, 1);	/* clk high */
+	udelay(1);			/* at least 20 ns */
+
+	bnum = 8;			/* 8 data bits */
+	bit = 0x80;
+	while (bnum--) {
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		gpio_set_value(spi_mosi, (data & bit) ? 1 : 0);
+		udelay(1);
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		udelay(1);
+		bit >>= 1;
+	}
+	gpio_set_value(spi_mosi, 0);
+
+}
+
+static void samsung_spi_read_bytes(u8 cmd, u8 *data, int num)
+{
+	int bnum;
+
+	/* Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(2);
+
+	/* command byte first */
+	samsung_spi_write_byte(0, cmd);
+	udelay(2);
+
+	gpio_direction_input(spi_mosi);
+
+	if (num > 1) {
+		/* extra dummy clock */
+		gpio_set_value(spi_sclk, 0);
+		udelay(1);
+		gpio_set_value(spi_sclk, 1);
+		udelay(1);
+	}
+
+	/* followed by data bytes */
+	bnum = num * 8;	/* number of bits */
+	*data = 0;
+	while (bnum) {
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		udelay(1);
+		*data <<= 1;
+		*data |= gpio_get_value(spi_mosi) ? 1 : 0;
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		udelay(1);
+		--bnum;
+		if ((bnum % 8) == 0)
+			++data;
+	}
+
+	gpio_direction_output(spi_mosi, 0);
+
+	/* Chip Select - high */
+	udelay(2);
+	gpio_set_value(spi_cs, 1);
+}
+#endif
+
+#ifdef DEBUG
+static const char *byte_to_binary(const u8 *buf, int len)
+{
+	static char b[32*8+1];
+	char *p = b;
+	int i, z;
+
+	for (i = 0; i < len; ++i) {
+		u8 val = *buf++;
+		for (z = 1 << 7; z > 0; z >>= 1)
+			*p++ = (val & z) ? '1' : '0';
+	}
+	*p = 0;
+
+	return b;
+}
+#endif
+
+#define BIT_OFFSET	(bit_size % 8)
+#define ADD_BIT(val) do { \
+		tx_buf[bit_size / 8] |= \
+			(u8)((val ? 1 : 0) << (7 - BIT_OFFSET)); \
+		++bit_size; \
+	} while (0)
+
+#define ADD_BYTE(data) do { \
+		tx_buf[bit_size / 8] |= (u8)(data >> BIT_OFFSET); \
+		bit_size += 8; \
+		if (BIT_OFFSET != 0) \
+			tx_buf[bit_size / 8] |= (u8)(data << (8 - BIT_OFFSET));\
+	} while (0)
+
+static int samsung_serigo(struct samsung_spi_data data)
+{
+#ifdef CONFIG_SPI_QUP
+	char                tx_buf[32];
+	int                 bit_size = 0, i, rc;
+	struct spi_message  m;
+	struct spi_transfer t;
+
+	if (!lcdc_spi_client) {
+		pr_err("%s lcdc_spi_client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+	memset(tx_buf, 0, sizeof tx_buf);
+	t.tx_buf = tx_buf;
+	spi_setup(lcdc_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	ADD_BIT(FALSE);
+	ADD_BYTE(data.addr);
+	for (i = 0; i < data.len; ++i) {
+		ADD_BIT(TRUE);
+		ADD_BYTE(data.data[i]);
+	}
+
+	/* add padding bits so we round to next byte */
+	t.len = (bit_size+7) / 8;
+	if (t.len <= 4)
+		t.bits_per_word = bit_size;
+
+	rc = spi_sync(lcdc_spi_client, &m);
+#ifdef DEBUG
+	pr_info("%s: addr=0x%02x, #args=%d[%d] [%s], rc=%d\n",
+		__func__, data.addr, t.len, t.bits_per_word,
+		byte_to_binary(tx_buf, t.len), rc);
+#endif
+	return rc;
+#else
+	int i;
+
+	/* Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(2);
+
+	samsung_spi_write_byte(FALSE, data.addr);
+	udelay(2);
+
+	for (i = 0; i < data.len; ++i) {
+		samsung_spi_write_byte(TRUE, data.data[i]);
+		udelay(2);
+	}
+
+	/* Chip Select - high */
+	gpio_set_value(spi_cs, 1);
+#ifdef DEBUG
+	pr_info("%s: cmd=0x%02x, #args=%d\n", __func__, data.addr, data.len);
+#endif
+	return 0;
+#endif
+}
+
+static int samsung_write_cmd(u8 cmd)
+{
+#ifdef CONFIG_SPI_QUP
+	char                tx_buf[2];
+	int                 bit_size = 0, rc;
+	struct spi_message  m;
+	struct spi_transfer t;
+
+	if (!lcdc_spi_client) {
+		pr_err("%s lcdc_spi_client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+	memset(tx_buf, 0, sizeof tx_buf);
+	t.tx_buf = tx_buf;
+	spi_setup(lcdc_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	ADD_BIT(FALSE);
+	ADD_BYTE(cmd);
+
+	t.len = 2;
+	t.bits_per_word = 9;
+
+	rc = spi_sync(lcdc_spi_client, &m);
+#ifdef DEBUG
+	pr_info("%s: addr=0x%02x, #args=%d[%d] [%s], rc=%d\n",
+		__func__, cmd, t.len, t.bits_per_word,
+		byte_to_binary(tx_buf, t.len), rc);
+#endif
+	return rc;
+#else
+	/* Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(2);
+
+	samsung_spi_write_byte(FALSE, cmd);
+
+	/* Chip Select - high */
+	udelay(2);
+	gpio_set_value(spi_cs, 1);
+#ifdef DEBUG
+	pr_info("%s: cmd=0x%02x\n", __func__, cmd);
+#endif
+	return 0;
+#endif
+}
+
+static int samsung_serigo_list(struct samsung_spi_data *data, int count)
+{
+	int i, rc;
+	for (i = 0; i < count; ++i, ++data) {
+		rc = samsung_serigo(*data);
+		if (rc)
+			return rc;
+		msleep(10);
+	}
+	return 0;
+}
+
+#ifndef CONFIG_SPI_QUP
+static void samsung_spi_init(void)
+{
+	spi_sclk = *(lcdc_samsung_pdata->gpio_num);
+	spi_cs   = *(lcdc_samsung_pdata->gpio_num + 1);
+	spi_mosi = *(lcdc_samsung_pdata->gpio_num + 2);
+
+	/* Set the output so that we don't disturb the slave device */
+	gpio_set_value(spi_sclk, 1);
+	gpio_set_value(spi_mosi, 0);
+
+	/* Set the Chip Select deasserted (active low) */
+	gpio_set_value(spi_cs, 1);
+}
+#endif
+
+static void samsung_disp_powerup(void)
+{
+	if (!samsung_state.disp_powered_up && !samsung_state.display_on)
+		samsung_state.disp_powered_up = TRUE;
+}
+
+static struct work_struct disp_on_delayed_work;
+static void samsung_disp_on_delayed_work(struct work_struct *work_ptr)
+{
+	/* 0x01: Software Reset */
+	samsung_write_cmd(0x01);
+	msleep(120);
+
+	msleep(300);
+	samsung_serigo_list(panel_sequence,
+		sizeof(panel_sequence)/sizeof(*panel_sequence));
+	samsung_serigo_list(display_sequence,
+		sizeof(display_sequence)/sizeof(*display_sequence));
+
+	switch (samsung_state.brightness) {
+	case 300:
+		samsung_serigo_list(gamma_sequence_300,
+			sizeof(gamma_sequence_300)/sizeof(*gamma_sequence_300));
+		break;
+	case 180:
+	default:
+		samsung_serigo_list(gamma_sequence_180,
+			sizeof(gamma_sequence_180)/sizeof(*gamma_sequence_180));
+		break;
+	case 80:
+		samsung_serigo_list(gamma_sequence_80,
+			sizeof(gamma_sequence_80)/sizeof(*gamma_sequence_80));
+		break;
+	}
+
+	samsung_serigo_list(etc_sequence,
+		sizeof(etc_sequence)/sizeof(*etc_sequence));
+
+	/* 0x11: Sleep Out */
+	samsung_write_cmd(0x11);
+	msleep(120);
+	/* 0x13: Normal Mode On */
+	samsung_write_cmd(0x13);
+
+#ifndef CONFIG_SPI_QUP
+	{
+		u8 data;
+
+		msleep(120);
+		/* 0x0A: Read Display Power Mode */
+		samsung_spi_read_bytes(0x0A, &data, 1);
+		pr_info("%s: power=[%s]\n", __func__,
+			byte_to_binary(&data, 1));
+
+		msleep(120);
+		/* 0x0C: Read Display Pixel Format */
+		samsung_spi_read_bytes(0x0C, &data, 1);
+		pr_info("%s: pixel-format=[%s]\n", __func__,
+			byte_to_binary(&data, 1));
+	}
+#endif
+	msleep(120);
+	/* 0x29: Display On */
+	samsung_write_cmd(0x29);
+}
+
+static void samsung_disp_on(void)
+{
+	if (samsung_state.disp_powered_up && !samsung_state.display_on) {
+		INIT_WORK(&disp_on_delayed_work, samsung_disp_on_delayed_work);
+		schedule_work(&disp_on_delayed_work);
+
+		samsung_state.display_on = TRUE;
+	}
+}
+
+static int lcdc_samsung_panel_on(struct platform_device *pdev)
+{
+	pr_info("%s\n", __func__);
+	if (!samsung_state.disp_initialized) {
+#ifndef CONFIG_SPI_QUP
+		lcdc_samsung_pdata->panel_config_gpio(1);
+		samsung_spi_init();
+#endif
+		samsung_disp_powerup();
+		samsung_disp_on();
+		samsung_state.disp_initialized = TRUE;
+	}
+	return 0;
+}
+
+static int lcdc_samsung_panel_off(struct platform_device *pdev)
+{
+	pr_info("%s\n", __func__);
+	if (samsung_state.disp_powered_up && samsung_state.display_on) {
+		/* 0x10: Sleep In */
+		samsung_write_cmd(0x10);
+		msleep(120);
+
+		samsung_state.display_on = FALSE;
+		samsung_state.disp_initialized = FALSE;
+	}
+	return 0;
+}
+
+#ifdef SYSFS_DEBUG_CMD
+static ssize_t samsung_rda_cmd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = snprintf(buf, PAGE_SIZE, "n/a\n");
+	pr_info("%s: 'n/a'\n", __func__);
+	return ret;
+}
+
+static ssize_t samsung_wta_cmd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	uint32 cmd;
+
+	sscanf(buf, "%x", &cmd);
+	samsung_write_cmd((u8)cmd);
+
+	return ret;
+}
+
+static DEVICE_ATTR(cmd, S_IRUGO | S_IWUGO, samsung_rda_cmd, samsung_wta_cmd);
+static struct attribute *fs_attrs[] = {
+	&dev_attr_cmd.attr,
+	NULL,
+};
+static struct attribute_group fs_attr_group = {
+	.attrs = fs_attrs,
+};
+#endif
+
+static struct msm_fb_panel_data samsung_panel_data = {
+	.on = lcdc_samsung_panel_on,
+	.off = lcdc_samsung_panel_off,
+};
+
+static int __devinit samsung_probe(struct platform_device *pdev)
+{
+	struct msm_panel_info *pinfo;
+#ifdef SYSFS_DEBUG_CMD
+	struct platform_device *fb_dev;
+	struct msm_fb_data_type *mfd;
+	int rc;
+#endif
+
+	pr_info("%s: id=%d\n", __func__, pdev->id);
+	lcdc_samsung_pdata = pdev->dev.platform_data;
+
+	pinfo = &samsung_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 800;
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 25600000; /* Max 27.77MHz */
+	pinfo->bl_max = 15;
+	pinfo->bl_min = 1;
+
+	/* AMS367PE02 Operation Manual, Page 7 */
+	pinfo->lcdc.h_back_porch = 16-2;	/* HBP-HLW */
+	pinfo->lcdc.h_front_porch = 16;
+	pinfo->lcdc.h_pulse_width = 2;
+	/* AMS367PE02 Operation Manual, Page 6 */
+	pinfo->lcdc.v_back_porch = 3-2;		/* VBP-VLW */
+	pinfo->lcdc.v_front_porch = 28;
+	pinfo->lcdc.v_pulse_width = 2;
+
+	pinfo->lcdc.border_clr = 0;
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+	pdev->dev.platform_data = &samsung_panel_data;
+
+#ifndef SYSFS_DEBUG_CMD
+	msm_fb_add_device(pdev);
+#else
+	fb_dev = msm_fb_add_device(pdev);
+	mfd = platform_get_drvdata(fb_dev);
+	rc = sysfs_create_group(&mfd->fbi->dev->kobj, &fs_attr_group);
+	if (rc) {
+		pr_err("%s: sysfs group creation failed, rc=%d\n", __func__,
+			rc);
+		return rc;
+	}
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_SPI_QUP
+static int __devinit lcdc_samsung_spi_probe(struct spi_device *spi)
+{
+	pr_info("%s\n", __func__);
+	lcdc_spi_client = spi;
+	lcdc_spi_client->bits_per_word = 32;
+	return 0;
+}
+static int __devexit lcdc_samsung_spi_remove(struct spi_device *spi)
+{
+	lcdc_spi_client = NULL;
+	return 0;
+}
+static struct spi_driver lcdc_samsung_spi_driver = {
+	.driver.name   = LCDC_SAMSUNG_SPI_DEVICE_NAME,
+	.driver.owner  = THIS_MODULE,
+	.probe         = lcdc_samsung_spi_probe,
+	.remove        = __devexit_p(lcdc_samsung_spi_remove),
+};
+#endif
+
+static struct platform_driver this_driver = {
+	.probe		= samsung_probe,
+	.driver.name	= "lcdc_samsung_oled",
+};
+
+static int __init lcdc_samsung_panel_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_LCDC_AUTO_DETECT
+	if (msm_fb_detect_client("lcdc_samsung_oled")) {
+		pr_err("%s: detect failed\n", __func__);
+		return 0;
+	}
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret) {
+		pr_err("%s: driver register failed, rc=%d\n", __func__, ret);
+		return ret;
+	}
+
+#ifdef CONFIG_SPI_QUP
+	ret = spi_register_driver(&lcdc_samsung_spi_driver);
+
+	if (ret) {
+		pr_err("%s: spi register failed: rc=%d\n", __func__, ret);
+		platform_driver_unregister(&this_driver);
+	} else
+		pr_info("%s: SUCCESS (SPI)\n", __func__);
+#else
+	pr_info("%s: SUCCESS (BitBang)\n", __func__);
+#endif
+	return ret;
+}
+
+module_init(lcdc_samsung_panel_init);
+static void __exit lcdc_samsung_panel_exit(void)
+{
+	pr_info("%s\n", __func__);
+#ifdef CONFIG_SPI_QUP
+	spi_unregister_driver(&lcdc_samsung_spi_driver);
+#endif
+	platform_driver_unregister(&this_driver);
+}
+module_exit(lcdc_samsung_panel_exit);
diff --git a/drivers/video/msm/lcdc_samsung_wsvga.c b/drivers/video/msm/lcdc_samsung_wsvga.c
new file mode 100644
index 0000000..b4bf8cf
--- /dev/null
+++ b/drivers/video/msm/lcdc_samsung_wsvga.c
@@ -0,0 +1,272 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#ifdef CONFIG_PMIC8058_PWM
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwm.h>
+#endif
+#include <mach/gpio.h>
+#include "msm_fb.h"
+
+
+
+#ifdef CONFIG_PMIC8058_PWM
+static struct pwm_device *bl_pwm0;
+static struct pwm_device *bl_pwm1;
+
+/* for samsung panel 300hz was the minimum freq where flickering wasnt
+ * observed as the screen was dimmed
+ */
+
+#define PWM_FREQ_HZ 300
+#define PWM_PERIOD_USEC (USEC_PER_SEC / PWM_FREQ_HZ)
+#define PWM_LEVEL 100
+#define PWM_DUTY_LEVEL (PWM_PERIOD_USEC / PWM_LEVEL)
+#endif
+
+struct lcdc_samsung_data {
+	struct msm_panel_common_pdata *pdata;
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+	int vga_enabled;
+#endif
+	struct platform_device *fbpdev;
+};
+
+static struct lcdc_samsung_data *dd;
+
+
+static void lcdc_samsung_panel_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int bl_level;
+	int ret;
+
+	bl_level = mfd->bl_level;
+
+#ifdef CONFIG_PMIC8058_PWM
+	if (bl_pwm0) {
+		ret = pwm_config(bl_pwm0, PWM_DUTY_LEVEL * bl_level,
+			PWM_PERIOD_USEC);
+		if (ret)
+			printk(KERN_ERR "pwm_config on pwm 0 failed %d\n", ret);
+	}
+
+	if (bl_pwm1) {
+		ret = pwm_config(bl_pwm1,
+			PWM_PERIOD_USEC - (PWM_DUTY_LEVEL * bl_level),
+			PWM_PERIOD_USEC);
+		if (ret)
+			printk(KERN_ERR "pwm_config on pwm 1 failed %d\n", ret);
+	}
+
+	if (bl_pwm0) {
+		ret = pwm_enable(bl_pwm0);
+		if (ret)
+			printk(KERN_ERR "pwm_enable on pwm 0 failed %d\n", ret);
+	}
+
+	if (bl_pwm1) {
+		ret = pwm_enable(bl_pwm1);
+		if (ret)
+			printk(KERN_ERR "pwm_enable on pwm 1 failed %d\n", ret);
+	}
+#endif
+
+}
+
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+static ssize_t show_vga_enable(struct device *device,
+			       struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", dd->vga_enabled);
+}
+
+static ssize_t store_vga_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	unsigned long enable;
+	int rc;
+
+	rc = strict_strtoul(buf, 10, &enable);
+	if (rc)
+		return -EINVAL;
+
+	if (dd->pdata && dd->pdata->vga_switch)
+		rc = dd->pdata->vga_switch(enable);
+	else
+		rc = -ENODEV;
+	if (!rc) {
+		dd->vga_enabled = enable;
+		rc = count;
+	}
+	return rc;
+}
+
+static DEVICE_ATTR(vga_enable, S_IRUGO|S_IWUSR, show_vga_enable,
+		   store_vga_enable);
+static struct attribute *attrs[] = {
+	&dev_attr_vga_enable.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+#endif
+
+static int __devinit samsung_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+	struct msm_fb_data_type *mfd;
+#endif
+
+	if (pdev->id == 0) {
+		dd = kzalloc(sizeof *dd, GFP_KERNEL);
+		if (!dd)
+			return -ENOMEM;
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+		dd->vga_enabled = 0;
+#endif
+		dd->pdata = pdev->dev.platform_data;
+		return 0;
+	} else if (!dd)
+		return -ENODEV;
+
+#ifdef CONFIG_PMIC8058_PWM
+	bl_pwm0 = pwm_request(dd->pdata->gpio_num[0], "backlight1");
+	if (bl_pwm0 == NULL || IS_ERR(bl_pwm0)) {
+		pr_err("%s pwm_request() failed\n", __func__);
+		bl_pwm0 = NULL;
+	}
+
+	bl_pwm1 = pwm_request(dd->pdata->gpio_num[1], "backlight2");
+	if (bl_pwm1 == NULL || IS_ERR(bl_pwm1)) {
+		pr_err("%s pwm_request() failed\n", __func__);
+		bl_pwm1 = NULL;
+	}
+
+	pr_debug("samsung_probe: bl_pwm0=%p LPG_chan0=%d "
+			"bl_pwm1=%p LPG_chan1=%d\n",
+			bl_pwm0, (int)dd->pdata->gpio_num[0],
+			bl_pwm1, (int)dd->pdata->gpio_num[1]
+			);
+#endif
+
+
+	dd->fbpdev = msm_fb_add_device(pdev);
+	if (!dd->fbpdev) {
+		dev_err(&pdev->dev, "failed to add msm_fb device\n");
+		rc = -ENODEV;
+		goto probe_exit;
+	}
+
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+	mfd = platform_get_drvdata(dd->fbpdev);
+	if (mfd && mfd->fbi && mfd->fbi->dev) {
+		rc = sysfs_create_group(&mfd->fbi->dev->kobj, &attr_group);
+		if (rc)
+			dev_err(&pdev->dev, "failed to create sysfs group\n");
+	} else {
+		dev_err(&pdev->dev, "no dev to create sysfs group\n");
+		rc = -ENODEV;
+	}
+#endif
+
+probe_exit:
+	return rc;
+}
+
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+static int __devexit samsung_remove(struct platform_device *pdev)
+{
+	sysfs_remove_group(&dd->fbpdev->dev.kobj, &attr_group);
+	return 0;
+}
+#endif
+
+static struct platform_driver this_driver = {
+	.probe  = samsung_probe,
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+	.remove = samsung_remove,
+#endif
+	.driver = {
+		.name   = "lcdc_samsung_wsvga",
+	},
+};
+
+static struct msm_fb_panel_data samsung_panel_data = {
+	.set_backlight = lcdc_samsung_panel_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_samsung_wsvga",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &samsung_panel_data,
+	}
+};
+
+static int __init lcdc_samsung_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_LCDC_AUTO_DETECT
+	if (msm_fb_detect_client("lcdc_samsung_wsvga"))
+		return 0;
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &samsung_panel_data.panel_info;
+	pinfo->xres = 1024;
+	pinfo->yres = 600;
+#ifdef CONFIG_FB_MSM_LCDC_DSUB
+	/* DSUB (VGA) is on the same bus, this allows us to allocate for the
+	 * max resolution of the DSUB display */
+	pinfo->mode2_xres = 1440;
+	pinfo->mode2_yres = 900;
+	pinfo->mode2_bpp = 16;
+#else
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+#endif
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 43192000;
+	pinfo->bl_max = PWM_LEVEL;
+	pinfo->bl_min = 1;
+
+	pinfo->lcdc.h_back_porch = 80;
+	pinfo->lcdc.h_front_porch = 48;
+	pinfo->lcdc.h_pulse_width = 32;
+	pinfo->lcdc.v_back_porch = 4;
+	pinfo->lcdc.v_front_porch = 3;
+	pinfo->lcdc.v_pulse_width = 1;
+	pinfo->lcdc.border_clr = 0;
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret)
+		platform_driver_unregister(&this_driver);
+
+	return ret;
+}
+
+module_init(lcdc_samsung_panel_init);
diff --git a/drivers/video/msm/lcdc_sharp_wvga_pt.c b/drivers/video/msm/lcdc_sharp_wvga_pt.c
new file mode 100644
index 0000000..2ba2618
--- /dev/null
+++ b/drivers/video/msm/lcdc_sharp_wvga_pt.c
@@ -0,0 +1,414 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/pwm.h>
+#ifdef CONFIG_PMIC8058_PWM
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwm.h>
+#endif
+#ifdef CONFIG_SPI_QSD
+#include <linux/spi/spi.h>
+#endif
+#include <mach/gpio.h>
+#include "msm_fb.h"
+
+#ifdef CONFIG_SPI_QSD
+#define LCDC_SHARP_SPI_DEVICE_NAME	"lcdc_sharp_ls038y7dx01"
+static struct spi_device *lcdc_spi_client;
+#endif
+static int lcdc_sharp_panel_off(struct platform_device *pdev);
+
+#define BL_MAX		16
+
+#ifdef CONFIG_PMIC8058_PWM
+static struct pwm_device *bl_pwm;
+
+#define PWM_PERIOD	1000	/* us, period of 1Khz */
+#define DUTY_LEVEL	(PWM_PERIOD / BL_MAX)
+#endif
+
+#ifndef CONFIG_SPI_QSD
+static int spi_cs;
+static int spi_sclk;
+static int spi_mosi;
+static int spi_miso;
+static unsigned char bit_shift[8] = { (1 << 7),	/* MSB */
+	(1 << 6),
+	(1 << 5),
+	(1 << 4),
+	(1 << 3),
+	(1 << 2),
+	(1 << 1),
+	(1 << 0)		               /* LSB */
+};
+#endif
+
+struct sharp_state_type {
+	boolean disp_initialized;
+	boolean display_on;
+	boolean disp_powered_up;
+};
+
+struct sharp_spi_data {
+	u8 addr;
+	u8 data;
+};
+
+static struct sharp_spi_data init_sequence[] = {
+	{  15, 0x01 },
+	{   5, 0x01 },
+	{   7, 0x10 },
+	{   9, 0x1E },
+	{  10, 0x04 },
+	{  17, 0xFF },
+	{  21, 0x8A },
+	{  22, 0x00 },
+	{  23, 0x82 },
+	{  24, 0x24 },
+	{  25, 0x22 },
+	{  26, 0x6D },
+	{  27, 0xEB },
+	{  28, 0xB9 },
+	{  29, 0x3A },
+	{  49, 0x1A },
+	{  50, 0x16 },
+	{  51, 0x05 },
+	{  55, 0x7F },
+	{  56, 0x15 },
+	{  57, 0x7B },
+	{  60, 0x05 },
+	{  61, 0x0C },
+	{  62, 0x80 },
+	{  63, 0x00 },
+	{  92, 0x90 },
+	{  97, 0x01 },
+	{  98, 0xFF },
+	{ 113, 0x11 },
+	{ 114, 0x02 },
+	{ 115, 0x08 },
+	{ 123, 0xAB },
+	{ 124, 0x04 },
+	{   6, 0x02 },
+	{ 133, 0x00 },
+	{ 134, 0xFE },
+	{ 135, 0x22 },
+	{ 136, 0x0B },
+	{ 137, 0xFF },
+	{ 138, 0x0F },
+	{ 139, 0x00 },
+	{ 140, 0xFE },
+	{ 141, 0x22 },
+	{ 142, 0x0B },
+	{ 143, 0xFF },
+	{ 144, 0x0F },
+	{ 145, 0x00 },
+	{ 146, 0xFE },
+	{ 147, 0x22 },
+	{ 148, 0x0B },
+	{ 149, 0xFF },
+	{ 150, 0x0F },
+	{ 202, 0x30 },
+	{  30, 0x01 },
+	{   4, 0x01 },
+	{  31, 0x41 },
+};
+
+static struct sharp_state_type sharp_state = { 0 };
+static struct msm_panel_common_pdata *lcdc_sharp_pdata;
+
+#ifndef CONFIG_SPI_QSD
+static void sharp_spi_write_byte(u8 val)
+{
+	int i;
+
+	/* Clock should be Low before entering */
+	for (i = 0; i < 8; i++) {
+		/* #1: Drive the Data (High or Low) */
+		if (val & bit_shift[i])
+			gpio_set_value(spi_mosi, 1);
+		else
+			gpio_set_value(spi_mosi, 0);
+
+		/* #2: Drive the Clk High and then Low */
+		gpio_set_value(spi_sclk, 1);
+		gpio_set_value(spi_sclk, 0);
+	}
+}
+#endif
+
+static int serigo(u8 reg, u8 data)
+{
+#ifdef CONFIG_SPI_QSD
+	char                tx_buf[2];
+	int                 rc;
+	struct spi_message  m;
+	struct spi_transfer t;
+
+	if (!lcdc_spi_client) {
+		printk(KERN_ERR "%s lcdc_spi_client is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+	t.tx_buf = tx_buf;
+	spi_setup(lcdc_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	tx_buf[0] = reg;
+	tx_buf[1] = data;
+	t.rx_buf = NULL;
+	t.len = 2;
+	rc = spi_sync(lcdc_spi_client, &m);
+	return rc;
+#else
+	/* Enable the Chip Select - low */
+	gpio_set_value(spi_cs, 0);
+	udelay(1);
+
+	/* Transmit register address first, then data */
+	sharp_spi_write_byte(reg);
+
+	/* Idle state of MOSI is Low */
+	gpio_set_value(spi_mosi, 0);
+	udelay(1);
+	sharp_spi_write_byte(data);
+
+	gpio_set_value(spi_mosi, 0);
+	gpio_set_value(spi_cs, 1);
+	return 0;
+#endif
+}
+
+#ifndef CONFIG_SPI_QSD
+static void sharp_spi_init(void)
+{
+	spi_sclk = *(lcdc_sharp_pdata->gpio_num);
+	spi_cs   = *(lcdc_sharp_pdata->gpio_num + 1);
+	spi_mosi = *(lcdc_sharp_pdata->gpio_num + 2);
+	spi_miso = *(lcdc_sharp_pdata->gpio_num + 3);
+
+	/* Set the output so that we don't disturb the slave device */
+	gpio_set_value(spi_sclk, 0);
+	gpio_set_value(spi_mosi, 0);
+
+	/* Set the Chip Select deasserted (active low) */
+	gpio_set_value(spi_cs, 1);
+}
+#endif
+
+static void sharp_disp_powerup(void)
+{
+	if (!sharp_state.disp_powered_up && !sharp_state.display_on)
+		sharp_state.disp_powered_up = TRUE;
+}
+
+static void sharp_disp_on(void)
+{
+	int i;
+
+	if (sharp_state.disp_powered_up && !sharp_state.display_on) {
+		for (i = 0; i < ARRAY_SIZE(init_sequence); i++) {
+			serigo(init_sequence[i].addr,
+			       init_sequence[i].data);
+		}
+		mdelay(10);
+		serigo(31, 0xC1);
+		mdelay(10);
+		serigo(31, 0xD9);
+		serigo(31, 0xDF);
+
+		sharp_state.display_on = TRUE;
+	}
+}
+
+static int lcdc_sharp_panel_on(struct platform_device *pdev)
+{
+	if (!sharp_state.disp_initialized) {
+#ifndef CONFIG_SPI_QSD
+		lcdc_sharp_pdata->panel_config_gpio(1);
+		sharp_spi_init();
+#endif
+		sharp_disp_powerup();
+		sharp_disp_on();
+		sharp_state.disp_initialized = TRUE;
+	}
+	return 0;
+}
+
+static int lcdc_sharp_panel_off(struct platform_device *pdev)
+{
+	if (sharp_state.disp_powered_up && sharp_state.display_on) {
+		serigo(4, 0x00);
+		mdelay(40);
+		serigo(31, 0xC1);
+		mdelay(40);
+		serigo(31, 0x00);
+		msleep(16);
+		sharp_state.display_on = FALSE;
+		sharp_state.disp_initialized = FALSE;
+	}
+	return 0;
+}
+
+static void lcdc_sharp_panel_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int bl_level;
+
+	bl_level = mfd->bl_level;
+
+#ifdef CONFIG_PMIC8058_PWM
+	if (bl_pwm) {
+		pwm_config(bl_pwm, DUTY_LEVEL * bl_level, PWM_PERIOD);
+		pwm_enable(bl_pwm);
+	}
+#endif
+}
+
+static int __devinit sharp_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		lcdc_sharp_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+#ifdef CONFIG_PMIC8058_PWM
+	bl_pwm = pwm_request(lcdc_sharp_pdata->gpio, "backlight");
+	if (bl_pwm == NULL || IS_ERR(bl_pwm)) {
+		pr_err("%s pwm_request() failed\n", __func__);
+		bl_pwm = NULL;
+	}
+
+	printk(KERN_INFO "sharp_probe: bl_pwm=%x LPG_chan=%d\n",
+			(int) bl_pwm, (int)lcdc_sharp_pdata->gpio);
+#endif
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_SPI_QSD
+static int __devinit lcdc_sharp_spi_probe(struct spi_device *spi)
+{
+	lcdc_spi_client = spi;
+	lcdc_spi_client->bits_per_word = 32;
+	return 0;
+}
+static int __devexit lcdc_sharp_spi_remove(struct spi_device *spi)
+{
+	lcdc_spi_client = NULL;
+	return 0;
+}
+static struct spi_driver lcdc_sharp_spi_driver = {
+	.driver = {
+		.name  = LCDC_SHARP_SPI_DEVICE_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe         = lcdc_sharp_spi_probe,
+	.remove        = __devexit_p(lcdc_sharp_spi_remove),
+};
+#endif
+static struct platform_driver this_driver = {
+	.probe  = sharp_probe,
+	.driver = {
+		.name   = "lcdc_sharp_wvga",
+	},
+};
+
+static struct msm_fb_panel_data sharp_panel_data = {
+	.on = lcdc_sharp_panel_on,
+	.off = lcdc_sharp_panel_off,
+	.set_backlight = lcdc_sharp_panel_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_sharp_wvga",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &sharp_panel_data,
+	}
+};
+
+static int __init lcdc_sharp_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	if (msm_fb_detect_client("lcdc_sharp_wvga_pt"))
+		return 0;
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &sharp_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 800;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 24500000;
+	pinfo->bl_max = BL_MAX;
+	pinfo->bl_min = 1;
+
+	pinfo->lcdc.h_back_porch = 20;
+	pinfo->lcdc.h_front_porch = 10;
+	pinfo->lcdc.h_pulse_width = 10;
+	pinfo->lcdc.v_back_porch = 2;
+	pinfo->lcdc.v_front_porch = 2;
+	pinfo->lcdc.v_pulse_width = 2;
+	pinfo->lcdc.border_clr = 0;
+	pinfo->lcdc.underflow_clr = 0xff;
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret) {
+		printk(KERN_ERR "%s not able to register the device\n",
+			__func__);
+		goto fail_driver;
+	}
+#ifdef CONFIG_SPI_QSD
+	ret = spi_register_driver(&lcdc_sharp_spi_driver);
+
+	if (ret) {
+		printk(KERN_ERR "%s not able to register spi\n", __func__);
+		goto fail_device;
+	}
+#endif
+	return ret;
+#ifdef CONFIG_SPI_QSD
+fail_device:
+	platform_device_unregister(&this_device);
+#endif
+fail_driver:
+		platform_driver_unregister(&this_driver);
+
+	return ret;
+}
+
+module_init(lcdc_sharp_panel_init);
+#ifdef CONFIG_SPI_QSD
+static void __exit lcdc_sharp_panel_exit(void)
+{
+	spi_unregister_driver(&lcdc_sharp_spi_driver);
+}
+module_exit(lcdc_sharp_panel_exit);
+#endif
+
diff --git a/drivers/video/msm/lcdc_st15.c b/drivers/video/msm/lcdc_st15.c
new file mode 100644
index 0000000..cdae358
--- /dev/null
+++ b/drivers/video/msm/lcdc_st15.c
@@ -0,0 +1,413 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "msm_fb.h"
+
+#define DEVICE_NAME "sii9022"
+#define SII9022_DEVICE_ID   0xB0
+#define SII9022_ISR                   0x3D
+#define SII9022_ISR_RXS_STATUS        0x08
+
+static int lcdc_sii9022_panel_on(struct platform_device *pdev);
+static int lcdc_sii9022_panel_off(struct platform_device *pdev);
+
+static struct i2c_client *sii9022_i2c_client;
+
+struct sii9022_data {
+	struct msm_hdmi_platform_data *pd;
+	struct platform_device *pdev;
+	struct work_struct work;
+	int x_res;
+	int y_res;
+	int sysfs_entry_created;
+	int hdmi_attached;
+};
+static struct sii9022_data *dd;
+
+struct sii9022_i2c_addr_data{
+	u8 addr;
+	u8 data;
+};
+
+/* video mode data */
+static u8 video_mode_data[] = {
+	0x00,
+	0xF9, 0x1C, 0x70, 0x17, 0x72, 0x06, 0xEE, 0x02,
+};
+
+static u8 avi_io_format[] = {
+	0x09,
+	0x00, 0x00,
+};
+
+/* power state */
+static struct sii9022_i2c_addr_data regset0[] = {
+	{ 0x60, 0x04 },
+	{ 0x63, 0x00 },
+	{ 0x1E, 0x00 },
+};
+
+static u8 video_infoframe[] = {
+	0x0C,
+	0xF0, 0x00, 0x68, 0x00, 0x04, 0x00, 0x19, 0x00,
+	0xE9, 0x02, 0x04, 0x01, 0x04, 0x06,
+};
+
+/* configure audio */
+static struct sii9022_i2c_addr_data regset1[] = {
+	{ 0x26, 0x90 },
+	{ 0x20, 0x90 },
+	{ 0x1F, 0x80 },
+	{ 0x26, 0x80 },
+	{ 0x24, 0x02 },
+	{ 0x25, 0x0B },
+	{ 0xBC, 0x02 },
+	{ 0xBD, 0x24 },
+	{ 0xBE, 0x02 },
+};
+
+/* enable audio */
+static u8 misc_infoframe[] = {
+	0xBF,
+	0xC2, 0x84, 0x01, 0x0A, 0x6F, 0x02, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* set HDMI, active */
+static struct sii9022_i2c_addr_data regset2[] = {
+	{ 0x1A, 0x01 },
+	{ 0x3D, 0x00 },
+	{ 0x3C, 0x02 },
+};
+
+static struct msm_fb_panel_data sii9022_panel_data = {
+	.on = lcdc_sii9022_panel_on,
+	.off = lcdc_sii9022_panel_off,
+};
+
+static struct platform_device sii9022_device = {
+	.name   = DEVICE_NAME,
+	.id	= 1,
+	.dev	= {
+		.platform_data = &sii9022_panel_data,
+	}
+};
+
+static int send_i2c_data(struct i2c_client *client,
+			 struct sii9022_i2c_addr_data *regset,
+			 int size)
+{
+	int i;
+	int rc = 0;
+
+	for (i = 0; i < size; i++) {
+		rc = i2c_smbus_write_byte_data(
+			client,
+			regset[i].addr, regset[i].data);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+static void sii9022_work_f(struct work_struct *work)
+{
+	int isr;
+
+	isr = i2c_smbus_read_byte_data(sii9022_i2c_client, SII9022_ISR);
+	if (isr < 0) {
+		dev_err(&sii9022_i2c_client->dev,
+			"i2c read of isr failed rc = 0x%x\n", isr);
+		return;
+	}
+	if (isr == 0)
+		return;
+
+	/* reset any set bits */
+	i2c_smbus_write_byte_data(sii9022_i2c_client, SII9022_ISR, isr);
+	dd->hdmi_attached = isr & SII9022_ISR_RXS_STATUS;
+	if (dd->pd->cable_detect)
+		dd->pd->cable_detect(dd->hdmi_attached);
+	if (dd->hdmi_attached) {
+		dd->x_res = 1280;
+		dd->y_res = 720;
+	} else {
+		dd->x_res = sii9022_panel_data.panel_info.xres;
+		dd->y_res = sii9022_panel_data.panel_info.yres;
+	}
+}
+
+static irqreturn_t sii9022_interrupt(int irq, void *dev_id)
+{
+	struct sii9022_data *dd = dev_id;
+
+	schedule_work(&dd->work);
+	return IRQ_HANDLED;
+}
+
+static int hdmi_sii_enable(struct i2c_client *client)
+{
+	int rc;
+	int retries = 10;
+	int count;
+
+	rc = i2c_smbus_write_byte_data(client, 0xC7, 0x00);
+	if (rc)
+		goto enable_exit;
+
+	do {
+		msleep(1);
+		rc = i2c_smbus_read_byte_data(client, 0x1B);
+	} while ((rc != SII9022_DEVICE_ID) && retries--);
+
+	if (rc != SII9022_DEVICE_ID)
+		return -ENODEV;
+
+	rc = i2c_smbus_write_byte_data(client, 0x1A, 0x11);
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(video_mode_data);
+	rc = i2c_master_send(client, video_mode_data, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = i2c_smbus_write_byte_data(client, 0x08, 0x20);
+	if (rc)
+		goto enable_exit;
+	count = ARRAY_SIZE(avi_io_format);
+	rc = i2c_master_send(client, avi_io_format, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset0, ARRAY_SIZE(regset0));
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(video_infoframe);
+	rc = i2c_master_send(client, video_infoframe, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset1, ARRAY_SIZE(regset1));
+	if (rc)
+		goto enable_exit;
+
+	count = ARRAY_SIZE(misc_infoframe);
+	rc = i2c_master_send(client, misc_infoframe, count);
+	if (rc != count) {
+		rc = -EIO;
+		goto enable_exit;
+	}
+
+	rc = send_i2c_data(client, regset2, ARRAY_SIZE(regset2));
+	if (rc)
+		goto enable_exit;
+
+	return 0;
+enable_exit:
+	printk(KERN_ERR "%s: exited rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static ssize_t show_res(struct device *device,
+			 struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%dx%d\n", dd->x_res, dd->y_res);
+}
+
+static struct device_attribute device_attrs[] = {
+	__ATTR(screen_resolution, S_IRUGO|S_IWUSR, show_res, NULL),
+};
+
+static int lcdc_sii9022_panel_on(struct platform_device *pdev)
+{
+	int rc;
+	if (!dd->sysfs_entry_created) {
+		dd->pdev = pdev;
+		rc = device_create_file(&pdev->dev, &device_attrs[0]);
+		if (!rc)
+			dd->sysfs_entry_created = 1;
+	}
+
+	rc = hdmi_sii_enable(sii9022_i2c_client);
+	if (rc) {
+		dd->hdmi_attached = 0;
+		dd->x_res = sii9022_panel_data.panel_info.xres;
+		dd->y_res = sii9022_panel_data.panel_info.yres;
+	}
+	if (dd->pd->irq)
+		enable_irq(dd->pd->irq);
+	/* Don't return the value from hdmi_sii_enable().
+	 * It may fail on some ST1.5s, but we must return 0 from this
+	 * function in order for the on-board display to turn on.
+	 */
+	return 0;
+}
+
+static int lcdc_sii9022_panel_off(struct platform_device *pdev)
+{
+	if (dd->pd->irq)
+		disable_irq(dd->pd->irq);
+	return 0;
+}
+
+static const struct i2c_device_id hmdi_sii_id[] = {
+	{ DEVICE_NAME, 0 },
+	{ }
+};
+
+static int hdmi_sii_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int rc;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+		return -ENODEV;
+
+	dd = kzalloc(sizeof *dd, GFP_KERNEL);
+	if (!dd) {
+		rc = -ENOMEM;
+		goto probe_exit;
+	}
+	sii9022_i2c_client = client;
+	i2c_set_clientdata(client, dd);
+	dd->pd = client->dev.platform_data;
+	if (!dd->pd) {
+		rc = -ENODEV;
+		goto probe_free;
+	}
+	if (dd->pd->irq) {
+		INIT_WORK(&dd->work, sii9022_work_f);
+		rc = request_irq(dd->pd->irq,
+				 &sii9022_interrupt,
+				 IRQF_TRIGGER_FALLING,
+				 "sii9022_cable", dd);
+		if (rc)
+			goto probe_free;
+		disable_irq(dd->pd->irq);
+	}
+	msm_fb_add_device(&sii9022_device);
+	dd->x_res = sii9022_panel_data.panel_info.xres;
+	dd->y_res = sii9022_panel_data.panel_info.yres;
+
+	return 0;
+
+probe_free:
+	i2c_set_clientdata(client, NULL);
+	kfree(dd);
+probe_exit:
+	return rc;
+}
+
+static int __devexit hdmi_sii_remove(struct i2c_client *client)
+{
+	int err = 0 ;
+	struct msm_hdmi_platform_data *pd;
+
+	if (dd->sysfs_entry_created)
+		device_remove_file(&dd->pdev->dev, &device_attrs[0]);
+	pd = client->dev.platform_data;
+	if (pd && pd->irq)
+		free_irq(pd->irq, dd);
+	i2c_set_clientdata(client, NULL);
+	kfree(dd);
+
+	return err ;
+}
+
+#ifdef CONFIG_PM
+static int sii9022_suspend(struct device *dev)
+{
+	if (dd && dd->pd && dd->pd->irq)
+		disable_irq(dd->pd->irq);
+	return 0;
+}
+
+static int sii9022_resume(struct device *dev)
+{
+	if (dd && dd->pd && dd->pd->irq)
+		enable_irq(dd->pd->irq);
+	return 0;
+}
+
+static struct dev_pm_ops sii9022_pm_ops = {
+	.suspend = sii9022_suspend,
+	.resume = sii9022_resume,
+};
+#endif
+
+static struct i2c_driver hdmi_sii_i2c_driver = {
+	.driver = {
+		.name = DEVICE_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm     = &sii9022_pm_ops,
+#endif
+	},
+	.probe = hdmi_sii_probe,
+	.remove =  __exit_p(hdmi_sii_remove),
+	.id_table = hmdi_sii_id,
+};
+
+static int __init lcdc_st15_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+	if (msm_fb_detect_client("lcdc_st15"))
+		return 0;
+
+	pinfo = &sii9022_panel_data.panel_info;
+	pinfo->xres = 1366;
+	pinfo->yres = 768;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 2;
+	pinfo->clk_rate = 74250000;
+
+	pinfo->lcdc.h_back_porch = 120;
+	pinfo->lcdc.h_front_porch = 20;
+	pinfo->lcdc.h_pulse_width = 40;
+	pinfo->lcdc.v_back_porch = 25;
+	pinfo->lcdc.v_front_porch = 1;
+	pinfo->lcdc.v_pulse_width = 7;
+	pinfo->lcdc.border_clr = 0;      /* blk */
+	pinfo->lcdc.underflow_clr = 0xff;        /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = i2c_add_driver(&hdmi_sii_i2c_driver);
+	if (ret)
+		printk(KERN_ERR "%s: failed to add i2c driver\n", __func__);
+
+	return ret;
+}
+
+static void __exit hdmi_sii_exit(void)
+{
+	i2c_del_driver(&hdmi_sii_i2c_driver);
+}
+
+module_init(lcdc_st15_init);
+module_exit(hdmi_sii_exit);
diff --git a/drivers/video/msm/lcdc_toshiba_fwvga_pt.c b/drivers/video/msm/lcdc_toshiba_fwvga_pt.c
new file mode 100644
index 0000000..3e81471
--- /dev/null
+++ b/drivers/video/msm/lcdc_toshiba_fwvga_pt.c
@@ -0,0 +1,471 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <mach/gpio.h>
+#include <mach/pmic.h>
+#include <mach/socinfo.h>
+#include "msm_fb.h"
+
+static int spi_cs0_N;
+static int spi_sclk;
+static int spi_mosi;
+static int spi_miso;
+
+struct toshiba_state_type {
+	boolean disp_initialized;
+	boolean display_on;
+	boolean disp_powered_up;
+};
+
+static struct toshiba_state_type toshiba_state = { 0 };
+static struct msm_panel_common_pdata *lcdc_toshiba_pdata;
+
+static int toshiba_spi_write(char data1, char data2, int rs)
+{
+	uint32 bitdata = 0, bnum = 24, bmask = 0x800000;
+
+	gpio_set_value_cansleep(spi_cs0_N, 0);	/* cs* low */
+	udelay(1);
+
+	if (rs)
+		bitdata = (0x72 << 16);
+	else
+		bitdata = (0x70 << 16);
+
+	bitdata |= ((data1 << 8) | data2);
+
+	while (bnum) {
+		gpio_set_value_cansleep(spi_sclk, 0); /* clk low */
+		udelay(1);
+
+		if (bitdata & bmask)
+			gpio_set_value_cansleep(spi_mosi, 1);
+		else
+			gpio_set_value_cansleep(spi_mosi, 0);
+
+		udelay(1);
+		gpio_set_value_cansleep(spi_sclk, 1); /* clk high */
+		udelay(1);
+		bmask >>= 1;
+		bnum--;
+	}
+
+	gpio_set_value_cansleep(spi_cs0_N, 1);	/* cs* high */
+	udelay(1);
+	return 0;
+}
+
+static void spi_pin_assign(void)
+{
+	/* Setting the Default GPIO's */
+	spi_mosi  = *(lcdc_toshiba_pdata->gpio_num);
+	spi_miso  = *(lcdc_toshiba_pdata->gpio_num + 1);
+	spi_sclk  = *(lcdc_toshiba_pdata->gpio_num + 2);
+	spi_cs0_N = *(lcdc_toshiba_pdata->gpio_num + 3);
+}
+
+static void toshiba_disp_powerup(void)
+{
+	if (!toshiba_state.disp_powered_up && !toshiba_state.display_on) {
+		/* Reset the hardware first */
+		/* Include DAC power up implementation here */
+	      toshiba_state.disp_powered_up = TRUE;
+	}
+}
+
+static void toshiba_disp_on(void)
+{
+	if (toshiba_state.disp_powered_up && !toshiba_state.display_on) {
+		toshiba_spi_write(0x01, 0x00, 0);
+		toshiba_spi_write(0x30, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x01, 0);
+		toshiba_spi_write(0x40, 0x10, 1);
+
+#ifdef TOSHIBA_FWVGA_FULL_INIT
+		udelay(500);
+		toshiba_spi_write(0x01, 0x06, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+		msleep(20);
+
+		toshiba_spi_write(0x00, 0x01, 0);
+		toshiba_spi_write(0x03, 0x10, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x02, 0);
+		toshiba_spi_write(0x01, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x03, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x07, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x08, 0);
+		toshiba_spi_write(0x00, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x09, 0);
+		toshiba_spi_write(0x00, 0x0c, 1);
+#endif
+		udelay(500);
+		toshiba_spi_write(0x00, 0x0c, 0);
+		toshiba_spi_write(0x40, 0x10, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x0e, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x20, 0);
+		toshiba_spi_write(0x01, 0x3f, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x22, 0);
+		toshiba_spi_write(0x76, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x23, 0);
+		toshiba_spi_write(0x1c, 0x0a, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x24, 0);
+		toshiba_spi_write(0x1c, 0x2c, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x25, 0);
+		toshiba_spi_write(0x1c, 0x4e, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x27, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x28, 0);
+		toshiba_spi_write(0x76, 0x0c, 1);
+
+#ifdef TOSHIBA_FWVGA_FULL_INIT
+		udelay(500);
+		toshiba_spi_write(0x03, 0x00, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x01, 0);
+		toshiba_spi_write(0x05, 0x02, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x02, 0);
+		toshiba_spi_write(0x07, 0x05, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x03, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x04, 0);
+		toshiba_spi_write(0x02, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x05, 0);
+		toshiba_spi_write(0x07, 0x07, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x06, 0);
+		toshiba_spi_write(0x10, 0x10, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x07, 0);
+		toshiba_spi_write(0x02, 0x02, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x08, 0);
+		toshiba_spi_write(0x07, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x09, 0);
+		toshiba_spi_write(0x07, 0x07, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x0a, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x0b, 0);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x0c, 0);
+		toshiba_spi_write(0x07, 0x07, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x0d, 0);
+		toshiba_spi_write(0x10, 0x10, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x10, 0);
+		toshiba_spi_write(0x01, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x11, 0);
+		toshiba_spi_write(0x05, 0x03, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x12, 0);
+		toshiba_spi_write(0x03, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x15, 0);
+		toshiba_spi_write(0x03, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x16, 0);
+		toshiba_spi_write(0x03, 0x1c, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x17, 0);
+		toshiba_spi_write(0x02, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x18, 0);
+		toshiba_spi_write(0x04, 0x02, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x19, 0);
+		toshiba_spi_write(0x03, 0x05, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x1c, 0);
+		toshiba_spi_write(0x07, 0x07, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x1d, 0);
+		toshiba_spi_write(0x02, 0x1f, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x20, 0);
+		toshiba_spi_write(0x05, 0x07, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x21, 0);
+		toshiba_spi_write(0x06, 0x04, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x22, 0);
+		toshiba_spi_write(0x04, 0x05, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x27, 0);
+		toshiba_spi_write(0x02, 0x03, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x28, 0);
+		toshiba_spi_write(0x03, 0x00, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x03, 0x29, 0);
+		toshiba_spi_write(0x00, 0x02, 1);
+
+#endif
+		udelay(500);
+		toshiba_spi_write(0x01, 0x00, 0);
+		toshiba_spi_write(0x36, 0x3c, 1);
+		udelay(500);
+
+		toshiba_spi_write(0x01, 0x01, 0);
+		toshiba_spi_write(0x40, 0x03, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x02, 0);
+		toshiba_spi_write(0x00, 0x01, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x03, 0);
+		toshiba_spi_write(0x3c, 0x58, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x0c, 0);
+		toshiba_spi_write(0x01, 0x35, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x06, 0);
+		toshiba_spi_write(0x00, 0x02, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x00, 0x29, 0);
+		toshiba_spi_write(0x03, 0xbf, 1);
+
+		udelay(500);
+		toshiba_spi_write(0x01, 0x06, 0);
+		toshiba_spi_write(0x00, 0x03, 1);
+		msleep(32);
+
+		toshiba_spi_write(0x01, 0x01, 0);
+		toshiba_spi_write(0x40, 0x10, 1);
+		msleep(80);
+
+		toshiba_state.display_on = TRUE;
+	}
+}
+
+static int lcdc_toshiba_panel_on(struct platform_device *pdev)
+{
+	if (!toshiba_state.disp_initialized) {
+		/* Configure reset GPIO that drives DAC */
+		if (lcdc_toshiba_pdata->panel_config_gpio)
+			lcdc_toshiba_pdata->panel_config_gpio(1);
+		toshiba_disp_powerup();
+		toshiba_disp_on();
+		toshiba_state.disp_initialized = TRUE;
+	}
+	return 0;
+}
+
+static int lcdc_toshiba_panel_off(struct platform_device *pdev)
+{
+	if (toshiba_state.disp_powered_up && toshiba_state.display_on) {
+		toshiba_spi_write(0x01, 0x06, 1);
+		toshiba_spi_write(0x00, 0x02, 1);
+		msleep(80);
+
+		toshiba_spi_write(0x01, 0x06, 1);
+		toshiba_spi_write(0x00, 0x00, 1);
+
+		toshiba_spi_write(0x00, 0x29, 1);
+		toshiba_spi_write(0x00, 0x02, 1);
+
+		toshiba_spi_write(0x01, 0x00, 1);
+		toshiba_spi_write(0x30, 0x00, 1);
+
+		if (lcdc_toshiba_pdata->panel_config_gpio)
+			lcdc_toshiba_pdata->panel_config_gpio(0);
+		toshiba_state.display_on = FALSE;
+		toshiba_state.disp_initialized = FALSE;
+	}
+
+	return 0;
+}
+
+static void lcdc_toshiba_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int ret;
+	int bl_level;
+
+	bl_level = mfd->bl_level;
+
+	if (lcdc_toshiba_pdata && lcdc_toshiba_pdata->pmic_backlight)
+		ret = lcdc_toshiba_pdata->pmic_backlight(bl_level);
+	else
+		pr_err("%s(): Backlight level set failed", __func__);
+
+	return;
+}
+
+static int __devinit toshiba_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		lcdc_toshiba_pdata = pdev->dev.platform_data;
+		spi_pin_assign();
+		return 0;
+	}
+	msm_fb_add_device(pdev);
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = toshiba_probe,
+	.driver = {
+		.name   = "lcdc_toshiba_fwvga_pt",
+	},
+};
+
+static struct msm_fb_panel_data toshiba_panel_data = {
+	.on = lcdc_toshiba_panel_on,
+	.off = lcdc_toshiba_panel_off,
+	.set_backlight = lcdc_toshiba_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_toshiba_fwvga_pt",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &toshiba_panel_data,
+	}
+};
+
+static int __init lcdc_toshiba_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+	ret = msm_fb_detect_client("lcdc_toshiba_fwvga_pt");
+	if (ret)
+		return 0;
+
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &toshiba_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 864;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	/* 30Mhz mdp_lcdc_pclk and mdp_lcdc_pad_pcl */
+	pinfo->clk_rate = 30720000;
+	pinfo->bl_max = 100;
+	pinfo->bl_min = 1;
+
+	if (cpu_is_msm7x25a() || cpu_is_msm7x25aa()) {
+		pinfo->yres = 320;
+		pinfo->lcdc.h_back_porch = 10;
+		pinfo->lcdc.h_front_porch = 21;
+		pinfo->lcdc.h_pulse_width = 5;
+		pinfo->lcdc.v_back_porch = 8;
+		pinfo->lcdc.v_front_porch = 540;
+		pinfo->lcdc.v_pulse_width = 42;
+		pinfo->lcdc.border_clr = 0;     /* blk */
+		pinfo->lcdc.underflow_clr = 0xff;       /* blue */
+		pinfo->lcdc.hsync_skew = 0;
+	} else {
+		pinfo->lcdc.h_back_porch = 8;
+		pinfo->lcdc.h_front_porch = 16;
+		pinfo->lcdc.h_pulse_width = 8;
+		pinfo->lcdc.v_back_porch = 2;
+		pinfo->lcdc.v_front_porch = 2;
+		pinfo->lcdc.v_pulse_width = 2;
+		pinfo->lcdc.border_clr = 0;     /* blk */
+		pinfo->lcdc.underflow_clr = 0xff;       /* blue */
+		pinfo->lcdc.hsync_skew = 0;
+	}
+
+	ret = platform_device_register(&this_device);
+	if (ret) {
+		printk(KERN_ERR "%s not able to register the device\n",
+			 __func__);
+		platform_driver_unregister(&this_driver);
+	}
+	return ret;
+}
+
+device_initcall(lcdc_toshiba_panel_init);
diff --git a/drivers/video/msm/lcdc_toshiba_wvga_pt.c b/drivers/video/msm/lcdc_toshiba_wvga_pt.c
new file mode 100644
index 0000000..f0aa8f5
--- /dev/null
+++ b/drivers/video/msm/lcdc_toshiba_wvga_pt.c
@@ -0,0 +1,519 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#ifdef CONFIG_SPI_QSD
+#include <linux/spi/spi.h>
+#endif
+#include <mach/gpio.h>
+#include <mach/pmic.h>
+#include "msm_fb.h"
+
+#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+#include "mddihosti.h"
+#endif
+
+#ifdef CONFIG_SPI_QSD
+#define LCDC_TOSHIBA_SPI_DEVICE_NAME "lcdc_toshiba_ltm030dd40"
+static struct spi_device *lcdc_toshiba_spi_client;
+#else
+static int spi_cs;
+static int spi_sclk;
+static int spi_mosi;
+static int spi_miso;
+#endif
+struct toshiba_state_type{
+	boolean disp_initialized;
+	boolean display_on;
+	boolean disp_powered_up;
+};
+
+static struct toshiba_state_type toshiba_state = { 0 };
+static struct msm_panel_common_pdata *lcdc_toshiba_pdata;
+
+#ifndef CONFIG_SPI_QSD
+static void toshiba_spi_write_byte(char dc, uint8 data)
+{
+	uint32 bit;
+	int bnum;
+
+	gpio_set_value(spi_sclk, 0); /* clk low */
+	/* dc: 0 for command, 1 for parameter */
+	gpio_set_value(spi_mosi, dc);
+	udelay(1);	/* at least 20 ns */
+	gpio_set_value(spi_sclk, 1); /* clk high */
+	udelay(1);	/* at least 20 ns */
+	bnum = 8;	/* 8 data bits */
+	bit = 0x80;
+	while (bnum) {
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		if (data & bit)
+			gpio_set_value(spi_mosi, 1);
+		else
+			gpio_set_value(spi_mosi, 0);
+		udelay(1);
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		udelay(1);
+		bit >>= 1;
+		bnum--;
+	}
+}
+#endif
+
+static int toshiba_spi_write(char cmd, uint32 data, int num)
+{
+	char *bp;
+#ifdef CONFIG_SPI_QSD
+	char                tx_buf[4];
+	int                 rc, i;
+	struct spi_message  m;
+	struct spi_transfer t;
+	uint32 final_data = 0;
+
+	if (!lcdc_toshiba_spi_client) {
+		printk(KERN_ERR "%s lcdc_toshiba_spi_client is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+	t.tx_buf = tx_buf;
+	spi_setup(lcdc_toshiba_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	/* command byte first */
+	final_data |= cmd << 23;
+	t.len = num + 2;
+	if (t.len < 4)
+		t.bits_per_word = 8 * t.len;
+	/* followed by parameter bytes */
+	if (num) {
+		bp = (char *)&data;;
+		bp += (num - 1);
+		i = 1;
+		while (num) {
+			final_data |= 1 << (((4 - i) << 3) - i - 1);
+			final_data |= *bp << (((4 - i - 1) << 3) - i - 1);
+			num--;
+			bp--;
+			i++;
+		}
+	}
+
+	bp = (char *)&final_data;
+	for (i = 0; i < t.len; i++)
+		tx_buf[i] = bp[3 - i];
+	t.rx_buf = NULL;
+	rc = spi_sync(lcdc_toshiba_spi_client, &m);
+	if (rc)
+		printk(KERN_ERR "spi_sync _write failed %d\n", rc);
+	return rc;
+#else
+	gpio_set_value(spi_cs, 1);	/* cs high */
+
+	/* command byte first */
+	toshiba_spi_write_byte(0, cmd);
+
+	/* followed by parameter bytes */
+	if (num) {
+		bp = (char *)&data;;
+		bp += (num - 1);
+		while (num) {
+			toshiba_spi_write_byte(1, *bp);
+			num--;
+			bp--;
+		}
+	}
+
+	gpio_set_value(spi_cs, 0);	/* cs low */
+	udelay(1);
+	return 0;
+#endif
+}
+
+static int toshiba_spi_read_bytes(char cmd, uint32 *data, int num)
+{
+#ifdef CONFIG_SPI_QSD
+	char            tx_buf[5];
+	char		    rx_buf[5];
+	int                 rc;
+	struct spi_message  m;
+	struct spi_transfer t;
+
+	if (!lcdc_toshiba_spi_client) {
+		printk(KERN_ERR "%s lcdc_toshiba_spi_client is NULL\n",
+			 __func__);
+		return -EINVAL;
+	}
+
+	memset(&t, 0, sizeof t);
+	t.tx_buf = tx_buf;
+	t.rx_buf = rx_buf;
+	spi_setup(lcdc_toshiba_spi_client);
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+
+	/* command byte first */
+	tx_buf[0] = 0 | ((cmd >> 1) & 0x7f);
+	tx_buf[1] = (cmd & 0x01) << 7;
+	tx_buf[2] = 0;
+	tx_buf[3] = 0;
+	tx_buf[4] = 0;
+
+	t.len = 5;
+
+	rc = spi_sync(lcdc_toshiba_spi_client, &m);
+	*data = 0;
+	*data = ((rx_buf[1] & 0x1f) << 19) | (rx_buf[2] << 11) |
+		(rx_buf[3] << 3) | ((rx_buf[4] & 0xe0) >> 5);
+	if (rc)
+		printk(KERN_ERR "spi_sync _read failed %d\n", rc);
+	return rc;
+#else
+	uint32 dbit, bits;
+	int bnum;
+
+	gpio_set_value(spi_cs, 1);	/* cs high */
+
+	/* command byte first */
+	toshiba_spi_write_byte(0, cmd);
+
+	if (num > 1) {
+		/* extra dc bit */
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		udelay(1);
+		dbit = gpio_get_value(spi_miso);/* dc bit */
+		udelay(1);
+		gpio_set_value(spi_sclk, 1); /* clk high */
+	}
+
+	/* followed by data bytes */
+	bnum = num * 8;	/* number of bits */
+	bits = 0;
+	while (bnum) {
+		bits <<= 1;
+		gpio_set_value(spi_sclk, 0); /* clk low */
+		udelay(1);
+		dbit = gpio_get_value(spi_miso);
+		udelay(1);
+		gpio_set_value(spi_sclk, 1); /* clk high */
+		bits |= dbit;
+		bnum--;
+	}
+
+	*data = bits;
+
+	udelay(1);
+	gpio_set_value(spi_cs, 0);	/* cs low */
+	udelay(1);
+	return 0;
+#endif
+}
+
+#ifndef CONFIG_SPI_QSD
+static void spi_pin_assign(void)
+{
+	/* Setting the Default GPIO's */
+	spi_sclk = *(lcdc_toshiba_pdata->gpio_num);
+	spi_cs   = *(lcdc_toshiba_pdata->gpio_num + 1);
+	spi_mosi  = *(lcdc_toshiba_pdata->gpio_num + 2);
+	spi_miso  = *(lcdc_toshiba_pdata->gpio_num + 3);
+}
+#endif
+
+static void toshiba_disp_powerup(void)
+{
+	if (!toshiba_state.disp_powered_up && !toshiba_state.display_on) {
+		/* Reset the hardware first */
+		/* Include DAC power up implementation here */
+	      toshiba_state.disp_powered_up = TRUE;
+	}
+}
+
+static void toshiba_disp_on(void)
+{
+	uint32	data;
+
+#ifndef CONFIG_SPI_QSD
+	gpio_set_value(spi_cs, 0);	/* low */
+	gpio_set_value(spi_sclk, 1);	/* high */
+	gpio_set_value(spi_mosi, 0);
+	gpio_set_value(spi_miso, 0);
+#endif
+
+	if (toshiba_state.disp_powered_up && !toshiba_state.display_on) {
+		toshiba_spi_write(0, 0, 0);
+		mdelay(7);
+		toshiba_spi_write(0, 0, 0);
+		mdelay(7);
+		toshiba_spi_write(0, 0, 0);
+		mdelay(7);
+		toshiba_spi_write(0xba, 0x11, 1);
+		toshiba_spi_write(0x36, 0x00, 1);
+		mdelay(1);
+		toshiba_spi_write(0x3a, 0x60, 1);
+		toshiba_spi_write(0xb1, 0x5d, 1);
+		mdelay(1);
+		toshiba_spi_write(0xb2, 0x33, 1);
+		toshiba_spi_write(0xb3, 0x22, 1);
+		mdelay(1);
+		toshiba_spi_write(0xb4, 0x02, 1);
+		toshiba_spi_write(0xb5, 0x1e, 1); /* vcs -- adjust brightness */
+		mdelay(1);
+		toshiba_spi_write(0xb6, 0x27, 1);
+		toshiba_spi_write(0xb7, 0x03, 1);
+		mdelay(1);
+		toshiba_spi_write(0xb9, 0x24, 1);
+		toshiba_spi_write(0xbd, 0xa1, 1);
+		mdelay(1);
+		toshiba_spi_write(0xbb, 0x00, 1);
+		toshiba_spi_write(0xbf, 0x01, 1);
+		mdelay(1);
+		toshiba_spi_write(0xbe, 0x00, 1);
+		toshiba_spi_write(0xc0, 0x11, 1);
+		mdelay(1);
+		toshiba_spi_write(0xc1, 0x11, 1);
+		toshiba_spi_write(0xc2, 0x11, 1);
+		mdelay(1);
+		toshiba_spi_write(0xc3, 0x3232, 2);
+		mdelay(1);
+		toshiba_spi_write(0xc4, 0x3232, 2);
+		mdelay(1);
+		toshiba_spi_write(0xc5, 0x3232, 2);
+		mdelay(1);
+		toshiba_spi_write(0xc6, 0x3232, 2);
+		mdelay(1);
+		toshiba_spi_write(0xc7, 0x6445, 2);
+		mdelay(1);
+		toshiba_spi_write(0xc8, 0x44, 1);
+		toshiba_spi_write(0xc9, 0x52, 1);
+		mdelay(1);
+		toshiba_spi_write(0xca, 0x00, 1);
+		mdelay(1);
+		toshiba_spi_write(0xec, 0x02a4, 2);	/* 0x02a4 */
+		mdelay(1);
+		toshiba_spi_write(0xcf, 0x01, 1);
+		mdelay(1);
+		toshiba_spi_write(0xd0, 0xc003, 2);	/* c003 */
+		mdelay(1);
+		toshiba_spi_write(0xd1, 0x01, 1);
+		mdelay(1);
+		toshiba_spi_write(0xd2, 0x0028, 2);
+		mdelay(1);
+		toshiba_spi_write(0xd3, 0x0028, 2);
+		mdelay(1);
+		toshiba_spi_write(0xd4, 0x26a4, 2);
+		mdelay(1);
+		toshiba_spi_write(0xd5, 0x20, 1);
+		mdelay(1);
+		toshiba_spi_write(0xef, 0x3200, 2);
+		mdelay(32);
+		toshiba_spi_write(0xbc, 0x80, 1);	/* wvga pass through */
+		toshiba_spi_write(0x3b, 0x00, 1);
+		mdelay(1);
+		toshiba_spi_write(0xb0, 0x16, 1);
+		mdelay(1);
+		toshiba_spi_write(0xb8, 0xfff5, 2);
+		mdelay(1);
+		toshiba_spi_write(0x11, 0, 0);
+		mdelay(5);
+		toshiba_spi_write(0x29, 0, 0);
+		mdelay(5);
+		toshiba_state.display_on = TRUE;
+	}
+
+	data = 0;
+	toshiba_spi_read_bytes(0x04, &data, 3);
+	printk(KERN_INFO "toshiba_disp_on: id=%x\n", data);
+
+}
+
+static int lcdc_toshiba_panel_on(struct platform_device *pdev)
+{
+	if (!toshiba_state.disp_initialized) {
+		/* Configure reset GPIO that drives DAC */
+		if (lcdc_toshiba_pdata->panel_config_gpio)
+			lcdc_toshiba_pdata->panel_config_gpio(1);
+		toshiba_disp_powerup();
+		toshiba_disp_on();
+		toshiba_state.disp_initialized = TRUE;
+	}
+	return 0;
+}
+
+static int lcdc_toshiba_panel_off(struct platform_device *pdev)
+{
+	if (toshiba_state.disp_powered_up && toshiba_state.display_on) {
+		/* Main panel power off (Deep standby in) */
+
+		toshiba_spi_write(0x28, 0, 0);	/* display off */
+		mdelay(1);
+		toshiba_spi_write(0xb8, 0x8002, 2);	/* output control */
+		mdelay(1);
+		toshiba_spi_write(0x10, 0x00, 1);	/* sleep mode in */
+		mdelay(85);		/* wait 85 msec */
+		toshiba_spi_write(0xb0, 0x00, 1);	/* deep standby in */
+		mdelay(1);
+		if (lcdc_toshiba_pdata->panel_config_gpio)
+			lcdc_toshiba_pdata->panel_config_gpio(0);
+		toshiba_state.display_on = FALSE;
+		toshiba_state.disp_initialized = FALSE;
+	}
+	return 0;
+}
+
+static void lcdc_toshiba_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int bl_level;
+	int ret = -EPERM;
+	int i = 0;
+
+	bl_level = mfd->bl_level;
+
+	while (i++ < 3) {
+		ret = pmic_set_led_intensity(LED_LCD, bl_level);
+		if (ret == 0)
+			return;
+		msleep(10);
+	}
+
+	printk(KERN_WARNING "%s: can't set lcd backlight!\n",
+				__func__);
+}
+
+static int __devinit toshiba_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		lcdc_toshiba_pdata = pdev->dev.platform_data;
+#ifndef CONFIG_SPI_QSD
+		spi_pin_assign();
+#endif
+		return 0;
+	}
+	msm_fb_add_device(pdev);
+	return 0;
+}
+
+#ifdef CONFIG_SPI_QSD
+static int __devinit lcdc_toshiba_spi_probe(struct spi_device *spi)
+{
+	lcdc_toshiba_spi_client = spi;
+	lcdc_toshiba_spi_client->bits_per_word = 32;
+	return 0;
+}
+static int __devexit lcdc_toshiba_spi_remove(struct spi_device *spi)
+{
+	lcdc_toshiba_spi_client = NULL;
+	return 0;
+}
+
+static struct spi_driver lcdc_toshiba_spi_driver = {
+	.driver = {
+		.name  = LCDC_TOSHIBA_SPI_DEVICE_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe         = lcdc_toshiba_spi_probe,
+	.remove        = __devexit_p(lcdc_toshiba_spi_remove),
+};
+#endif
+static struct platform_driver this_driver = {
+	.probe  = toshiba_probe,
+	.driver = {
+		.name   = "lcdc_toshiba_wvga",
+	},
+};
+
+static struct msm_fb_panel_data toshiba_panel_data = {
+	.on = lcdc_toshiba_panel_on,
+	.off = lcdc_toshiba_panel_off,
+	.set_backlight = lcdc_toshiba_set_backlight,
+};
+
+static struct platform_device this_device = {
+	.name   = "lcdc_toshiba_wvga",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &toshiba_panel_data,
+	}
+};
+
+static int __init lcdc_toshiba_panel_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
+	if (mddi_get_client_id() != 0)
+		return 0;
+
+	ret = msm_fb_detect_client("lcdc_toshiba_wvga_pt");
+	if (ret)
+		return 0;
+
+#endif
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		return ret;
+
+	pinfo = &toshiba_panel_data.panel_info;
+	pinfo->xres = 480;
+	pinfo->yres = 800;
+	MSM_FB_SINGLE_MODE_PANEL(pinfo);
+	pinfo->type = LCDC_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 18;
+	pinfo->fb_num = 2;
+	/* 30Mhz mdp_lcdc_pclk and mdp_lcdc_pad_pcl */
+	pinfo->clk_rate = 30720000;
+	pinfo->bl_max = 15;
+	pinfo->bl_min = 1;
+
+	pinfo->lcdc.h_back_porch = 184;	/* hsw = 8 + hbp=184 */
+	pinfo->lcdc.h_front_porch = 4;
+	pinfo->lcdc.h_pulse_width = 8;
+	pinfo->lcdc.v_back_porch = 2;	/* vsw=1 + vbp = 2 */
+	pinfo->lcdc.v_front_porch = 3;
+	pinfo->lcdc.v_pulse_width = 1;
+	pinfo->lcdc.border_clr = 0;     /* blk */
+	pinfo->lcdc.underflow_clr = 0xff;       /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+
+	ret = platform_device_register(&this_device);
+	if (ret) {
+		printk(KERN_ERR "%s not able to register the device\n",
+			 __func__);
+		goto fail_driver;
+	}
+#ifdef CONFIG_SPI_QSD
+	ret = spi_register_driver(&lcdc_toshiba_spi_driver);
+
+	if (ret) {
+		printk(KERN_ERR "%s not able to register spi\n", __func__);
+		goto fail_device;
+	}
+#endif
+	return ret;
+
+#ifdef CONFIG_SPI_QSD
+fail_device:
+	platform_device_unregister(&this_device);
+#endif
+fail_driver:
+	platform_driver_unregister(&this_driver);
+	return ret;
+}
+
+device_initcall(lcdc_toshiba_panel_init);
diff --git a/drivers/video/msm/lcdc_wxga.c b/drivers/video/msm/lcdc_wxga.c
new file mode 100644
index 0000000..3204704
--- /dev/null
+++ b/drivers/video/msm/lcdc_wxga.c
@@ -0,0 +1,53 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+
+static int __init lcdc_wxga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	if (msm_fb_detect_client("lcdc_wxga"))
+		return 0;
+#endif
+
+	pinfo.xres = 1280;
+	pinfo.yres = 720;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = LCDC_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 74250000;
+
+	pinfo.lcdc.h_back_porch = 124;
+	pinfo.lcdc.h_front_porch = 110;
+	pinfo.lcdc.h_pulse_width = 136;
+	pinfo.lcdc.v_back_porch = 19;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 6;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+
+	ret = lcdc_device_register(&pinfo);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(lcdc_wxga_init);
diff --git a/drivers/video/msm/logo.c b/drivers/video/msm/logo.c
new file mode 100644
index 0000000..c061e86
--- /dev/null
+++ b/drivers/video/msm/logo.c
@@ -0,0 +1,97 @@
+/* drivers/video/msm/logo.c
+ *
+ * Show Logo in RLE 565 format
+ *
+ * Copyright (C) 2008 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fb.h>
+#include <linux/vt_kern.h>
+#include <linux/unistd.h>
+#include <linux/syscalls.h>
+
+#include <linux/irq.h>
+#include <asm/system.h>
+
+#define fb_width(fb)	((fb)->var.xres)
+#define fb_height(fb)	((fb)->var.yres)
+#define fb_size(fb)	((fb)->var.xres * (fb)->var.yres * 2)
+
+static void memset16(void *_ptr, unsigned short val, unsigned count)
+{
+	unsigned short *ptr = _ptr;
+	count >>= 1;
+	while (count--)
+		*ptr++ = val;
+}
+
+/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */
+int load_565rle_image(char *filename)
+{
+	struct fb_info *info;
+	int fd, count, err = 0;
+	unsigned max;
+	unsigned short *data, *bits, *ptr;
+
+	info = registered_fb[0];
+	if (!info) {
+		printk(KERN_WARNING "%s: Can not access framebuffer\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	fd = sys_open(filename, O_RDONLY, 0);
+	if (fd < 0) {
+		printk(KERN_WARNING "%s: Can not open %s\n",
+			__func__, filename);
+		return -ENOENT;
+	}
+	count = sys_lseek(fd, (off_t)0, 2);
+	if (count <= 0) {
+		err = -EIO;
+		goto err_logo_close_file;
+	}
+	sys_lseek(fd, (off_t)0, 0);
+	data = kmalloc(count, GFP_KERNEL);
+	if (!data) {
+		printk(KERN_WARNING "%s: Can not alloc data\n", __func__);
+		err = -ENOMEM;
+		goto err_logo_close_file;
+	}
+	if (sys_read(fd, (char *)data, count) != count) {
+		err = -EIO;
+		goto err_logo_free_data;
+	}
+
+	max = fb_width(info) * fb_height(info);
+	ptr = data;
+	bits = (unsigned short *)(info->screen_base);
+	while (count > 3) {
+		unsigned n = ptr[0];
+		if (n > max)
+			break;
+		memset16(bits, ptr[1], n << 1);
+		bits += n;
+		max -= n;
+		ptr += 2;
+		count -= 4;
+	}
+
+err_logo_free_data:
+	kfree(data);
+err_logo_close_file:
+	sys_close(fd);
+	return err;
+}
+EXPORT_SYMBOL(load_565rle_image);
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index 178b072..9b9ee94 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -2,7 +2,7 @@
  * MSM MDDI Transport
  *
  * Copyright (C) 2007 Google Incorporated
- * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -10,816 +10,578 @@
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
  */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/spinlock.h>
-#include <linux/clk.h>
-#include <linux/io.h>
 #include <linux/sched.h>
-#include <mach/msm_iomap.h>
-#include <mach/irqs.h>
-#include <mach/board.h>
-#include <mach/msm_fb.h>
-#include "mddi_hw.h"
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <asm/io.h>
 
-#define FLAG_DISABLE_HIBERNATION 0x0001
-#define FLAG_HAVE_CAPS		 0x0002
-#define FLAG_HAS_VSYNC_IRQ	 0x0004
-#define FLAG_HAVE_STATUS	 0x0008
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include "msm_fb.h"
+#include "mddihosti.h"
+#include "mddihost.h"
+#include <mach/gpio.h>
+#include <mach/clk.h>
 
-#define CMD_GET_CLIENT_CAP     0x0601
-#define CMD_GET_CLIENT_STATUS  0x0602
+static int mddi_probe(struct platform_device *pdev);
+static int mddi_remove(struct platform_device *pdev);
 
-union mddi_rev {
-	unsigned char raw[MDDI_REV_BUFFER_SIZE];
-	struct mddi_rev_packet hdr;
-	struct mddi_client_status status;
-	struct mddi_client_caps caps;
-	struct mddi_register_access reg;
-};
+static int mddi_off(struct platform_device *pdev);
+static int mddi_on(struct platform_device *pdev);
 
-struct reg_read_info {
-	struct completion done;
-	uint32_t reg;
-	uint32_t status;
-	uint32_t result;
-};
+#ifdef CONFIG_PM
+static int mddi_suspend(struct platform_device *pdev, pm_message_t state);
+static int mddi_resume(struct platform_device *pdev);
+#endif
 
-struct mddi_info {
-	uint16_t flags;
-	uint16_t version;
-	char __iomem *base;
-	int irq;
-	struct clk *clk;
-	struct msm_mddi_client_data client_data;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mddi_early_suspend(struct early_suspend *h);
+static void mddi_early_resume(struct early_suspend *h);
+#endif
 
-	/* buffer for rev encap packets */
-	void *rev_data;
-	dma_addr_t rev_addr;
-	struct mddi_llentry *reg_write_data;
-	dma_addr_t reg_write_addr;
-	struct mddi_llentry *reg_read_data;
-	dma_addr_t reg_read_addr;
-	size_t rev_data_curr;
+static void pmdh_clk_disable(void);
+static void pmdh_clk_enable(void);
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+static struct clk *mddi_clk;
+static struct clk *mddi_pclk;
+static struct mddi_platform_data *mddi_pdata;
 
-	spinlock_t int_lock;
-	uint32_t int_enable;
-	uint32_t got_int;
-	wait_queue_head_t int_wait;
+DEFINE_MUTEX(mddi_timer_lock);
 
-	struct mutex reg_write_lock;
-	struct mutex reg_read_lock;
-	struct reg_read_info *reg_read;
-
-	struct mddi_client_caps caps;
-	struct mddi_client_status status;
-
-	void (*power_client)(struct msm_mddi_client_data *, int);
-
-	/* client device published to bind us to the
-	 * appropriate mddi_client driver
-	 */
-	char client_name[20];
-
-	struct platform_device client_pdev;
-};
-
-static void mddi_init_rev_encap(struct mddi_info *mddi);
-
-#define mddi_readl(r) readl(mddi->base + (MDDI_##r))
-#define mddi_writel(v, r) writel((v), mddi->base + (MDDI_##r))
-
-void mddi_activate_link(struct msm_mddi_client_data *cdata)
+static int mddi_runtime_suspend(struct device *dev)
 {
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-
-	mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
-}
-
-static void mddi_handle_link_list_done(struct mddi_info *mddi)
-{
-}
-
-static void mddi_reset_rev_encap_ptr(struct mddi_info *mddi)
-{
-	printk(KERN_INFO "mddi: resetting rev ptr\n");
-	mddi->rev_data_curr = 0;
-	mddi_writel(mddi->rev_addr, REV_PTR);
-	mddi_writel(mddi->rev_addr, REV_PTR);
-	mddi_writel(MDDI_CMD_FORCE_NEW_REV_PTR, CMD);
-}
-
-static void mddi_handle_rev_data(struct mddi_info *mddi, union mddi_rev *rev)
-{
-	int i;
-	struct reg_read_info *ri;
-
-	if ((rev->hdr.length <= MDDI_REV_BUFFER_SIZE - 2) &&
-	   (rev->hdr.length >= sizeof(struct mddi_rev_packet) - 2)) {
-
-		switch (rev->hdr.type) {
-		case TYPE_CLIENT_CAPS:
-			memcpy(&mddi->caps, &rev->caps,
-			       sizeof(struct mddi_client_caps));
-			mddi->flags |= FLAG_HAVE_CAPS;
-			wake_up(&mddi->int_wait);
-			break;
-		case TYPE_CLIENT_STATUS:
-			memcpy(&mddi->status, &rev->status,
-			       sizeof(struct mddi_client_status));
-			mddi->flags |= FLAG_HAVE_STATUS;
-			wake_up(&mddi->int_wait);
-			break;
-		case TYPE_REGISTER_ACCESS:
-			ri = mddi->reg_read;
-			if (ri == 0) {
-				printk(KERN_INFO "rev: got reg %x = %x without "
-						 " pending read\n",
-				       rev->reg.register_address,
-				       rev->reg.register_data_list);
-				break;
-			}
-			if (ri->reg != rev->reg.register_address) {
-				printk(KERN_INFO "rev: got reg %x = %x for "
-						 "wrong register, expected "
-						 "%x\n",
-				       rev->reg.register_address,
-				       rev->reg.register_data_list, ri->reg);
-				break;
-			}
-			mddi->reg_read = NULL;
-			ri->status = 0;
-			ri->result = rev->reg.register_data_list;
-			complete(&ri->done);
-			break;
-		default:
-			printk(KERN_INFO "rev: unknown reverse packet: "
-					 "len=%04x type=%04x CURR_REV_PTR=%x\n",
-			       rev->hdr.length, rev->hdr.type,
-			       mddi_readl(CURR_REV_PTR));
-			for (i = 0; i < rev->hdr.length + 2; i++) {
-				if ((i % 16) == 0)
-					printk(KERN_INFO "\n");
-				printk(KERN_INFO " %02x", rev->raw[i]);
-			}
-			printk(KERN_INFO "\n");
-			mddi_reset_rev_encap_ptr(mddi);
-		}
-	} else {
-		printk(KERN_INFO "bad rev length, %d, CURR_REV_PTR %x\n",
-		       rev->hdr.length, mddi_readl(CURR_REV_PTR));
-		mddi_reset_rev_encap_ptr(mddi);
-	}
-}
-
-static void mddi_wait_interrupt(struct mddi_info *mddi, uint32_t intmask);
-
-static void mddi_handle_rev_data_avail(struct mddi_info *mddi)
-{
-	uint32_t rev_data_count;
-	uint32_t rev_crc_err_count;
-	struct reg_read_info *ri;
-	size_t prev_offset;
-	uint16_t length;
-
-	union mddi_rev *crev = mddi->rev_data + mddi->rev_data_curr;
-
-	/* clear the interrupt */
-	mddi_writel(MDDI_INT_REV_DATA_AVAIL, INT);
-	rev_data_count = mddi_readl(REV_PKT_CNT);
-	rev_crc_err_count = mddi_readl(REV_CRC_ERR);
-	if (rev_data_count > 1)
-		printk(KERN_INFO "rev_data_count %d\n", rev_data_count);
-
-	if (rev_crc_err_count) {
-		printk(KERN_INFO "rev_crc_err_count %d, INT %x\n",
-		       rev_crc_err_count,  mddi_readl(INT));
-		ri = mddi->reg_read;
-		if (ri == 0) {
-			printk(KERN_INFO "rev: got crc error without pending "
-			       "read\n");
-		} else {
-			mddi->reg_read = NULL;
-			ri->status = -EIO;
-			ri->result = -1;
-			complete(&ri->done);
-		}
-	}
-
-	if (rev_data_count == 0)
-		return;
-
-	prev_offset = mddi->rev_data_curr;
-
-	length = *((uint8_t *)mddi->rev_data + mddi->rev_data_curr);
-	mddi->rev_data_curr++;
-	if (mddi->rev_data_curr == MDDI_REV_BUFFER_SIZE)
-		mddi->rev_data_curr = 0;
-	length += *((uint8_t *)mddi->rev_data + mddi->rev_data_curr) << 8;
-	mddi->rev_data_curr += 1 + length;
-	if (mddi->rev_data_curr >= MDDI_REV_BUFFER_SIZE)
-		mddi->rev_data_curr =
-			mddi->rev_data_curr % MDDI_REV_BUFFER_SIZE;
-
-	if (length > MDDI_REV_BUFFER_SIZE - 2) {
-		printk(KERN_INFO "mddi: rev data length greater than buffer"
-			"size\n");
-		mddi_reset_rev_encap_ptr(mddi);
-		return;
-	}
-
-	if (prev_offset + 2 + length >= MDDI_REV_BUFFER_SIZE) {
-		union mddi_rev tmprev;
-		size_t rem = MDDI_REV_BUFFER_SIZE - prev_offset;
-		memcpy(&tmprev.raw[0], mddi->rev_data + prev_offset, rem);
-		memcpy(&tmprev.raw[rem], mddi->rev_data, 2 + length - rem);
-		mddi_handle_rev_data(mddi, &tmprev);
-	} else {
-		mddi_handle_rev_data(mddi, crev);
-	}
-
-	if (prev_offset < MDDI_REV_BUFFER_SIZE / 2 &&
-	    mddi->rev_data_curr >= MDDI_REV_BUFFER_SIZE / 2) {
-		mddi_writel(mddi->rev_addr, REV_PTR);
-	}
-}
-
-static irqreturn_t mddi_isr(int irq, void *data)
-{
-	struct msm_mddi_client_data *cdata = data;
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	uint32_t active, status;
-
-	spin_lock(&mddi->int_lock);
-
-	active = mddi_readl(INT);
-	status = mddi_readl(STAT);
-
-	mddi_writel(active, INT);
-
-	/* ignore any interrupts we have disabled */
-	active &= mddi->int_enable;
-
-	mddi->got_int |= active;
-	wake_up(&mddi->int_wait);
-
-	if (active & MDDI_INT_PRI_LINK_LIST_DONE) {
-		mddi->int_enable &= (~MDDI_INT_PRI_LINK_LIST_DONE);
-		mddi_handle_link_list_done(mddi);
-	}
-	if (active & MDDI_INT_REV_DATA_AVAIL)
-		mddi_handle_rev_data_avail(mddi);
-
-	if (active & ~MDDI_INT_NEED_CLEAR)
-		mddi->int_enable &= ~(active & ~MDDI_INT_NEED_CLEAR);
-
-	if (active & MDDI_INT_LINK_ACTIVE) {
-		mddi->int_enable &= (~MDDI_INT_LINK_ACTIVE);
-		mddi->int_enable |= MDDI_INT_IN_HIBERNATION;
-	}
-
-	if (active & MDDI_INT_IN_HIBERNATION) {
-		mddi->int_enable &= (~MDDI_INT_IN_HIBERNATION);
-		mddi->int_enable |= MDDI_INT_LINK_ACTIVE;
-	}
-
-	mddi_writel(mddi->int_enable, INTEN);
-	spin_unlock(&mddi->int_lock);
-
-	return IRQ_HANDLED;
-}
-
-static long mddi_wait_interrupt_timeout(struct mddi_info *mddi,
-					uint32_t intmask, int timeout)
-{
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&mddi->int_lock, irq_flags);
-	mddi->got_int &= ~intmask;
-	mddi->int_enable |= intmask;
-	mddi_writel(mddi->int_enable, INTEN);
-	spin_unlock_irqrestore(&mddi->int_lock, irq_flags);
-	return wait_event_timeout(mddi->int_wait, mddi->got_int & intmask,
-				  timeout);
-}
-
-static void mddi_wait_interrupt(struct mddi_info *mddi, uint32_t intmask)
-{
-	if (mddi_wait_interrupt_timeout(mddi, intmask, HZ/10) == 0)
-		printk(KERN_INFO "mddi_wait_interrupt %d, timeout "
-		       "waiting for %x, INT = %x, STAT = %x gotint = %x\n",
-		       current->pid, intmask, mddi_readl(INT), mddi_readl(STAT),
-		       mddi->got_int);
-}
-
-static void mddi_init_rev_encap(struct mddi_info *mddi)
-{
-	memset(mddi->rev_data, 0xee, MDDI_REV_BUFFER_SIZE);
-	mddi_writel(mddi->rev_addr, REV_PTR);
-	mddi_writel(MDDI_CMD_FORCE_NEW_REV_PTR, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-}
-
-void mddi_set_auto_hibernate(struct msm_mddi_client_data *cdata, int on)
-{
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	mddi_writel(MDDI_CMD_POWERDOWN, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_IN_HIBERNATION);
-	mddi_writel(MDDI_CMD_HIBERNATE | !!on, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-}
-
-
-static uint16_t mddi_init_registers(struct mddi_info *mddi)
-{
-	mddi_writel(0x0001, VERSION);
-	mddi_writel(MDDI_HOST_BYTES_PER_SUBFRAME, BPS);
-	mddi_writel(0x0003, SPM); /* subframes per media */
-	mddi_writel(0x0005, TA1_LEN);
-	mddi_writel(MDDI_HOST_TA2_LEN, TA2_LEN);
-	mddi_writel(0x0096, DRIVE_HI);
-	/* 0x32 normal, 0x50 for Toshiba display */
-	mddi_writel(0x0050, DRIVE_LO);
-	mddi_writel(0x003C, DISP_WAKE); /* wakeup counter */
-	mddi_writel(MDDI_HOST_REV_RATE_DIV, REV_RATE_DIV);
-
-	mddi_writel(MDDI_REV_BUFFER_SIZE, REV_SIZE);
-	mddi_writel(MDDI_MAX_REV_PKT_SIZE, REV_ENCAP_SZ);
-
-	/* disable periodic rev encap */
-	mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-
-	if (mddi_readl(PAD_CTL) == 0) {
-		/* If we are turning on band gap, need to wait 5us before
-		 * turning on the rest of the PAD */
-		mddi_writel(0x08000, PAD_CTL);
-		udelay(5);
-	}
-
-	/* Recommendation from PAD hw team */
-	mddi_writel(0xa850f, PAD_CTL);
-
-
-	/* Need an even number for counts */
-	mddi_writel(0x60006, DRIVER_START_CNT);
-
-	mddi_set_auto_hibernate(&mddi->client_data, 0);
-
-	mddi_writel(MDDI_CMD_DISP_IGNORE, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-
-	mddi_init_rev_encap(mddi);
-	return mddi_readl(CORE_VER) & 0xffff;
-}
-
-static void mddi_suspend(struct msm_mddi_client_data *cdata)
-{
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	/* turn off the client */
-	if (mddi->power_client)
-		mddi->power_client(&mddi->client_data, 0);
-	/* turn off the link */
-	mddi_writel(MDDI_CMD_RESET, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	/* turn off the clock */
-	clk_disable(mddi->clk);
-}
-
-static void mddi_resume(struct msm_mddi_client_data *cdata)
-{
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	mddi_set_auto_hibernate(&mddi->client_data, 0);
-	/* turn on the client */
-	if (mddi->power_client)
-		mddi->power_client(&mddi->client_data, 1);
-	/* turn on the clock */
-	clk_enable(mddi->clk);
-	/* set up the local registers */
-	mddi->rev_data_curr = 0;
-	mddi_init_registers(mddi);
-	mddi_writel(mddi->int_enable, INTEN);
-	mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
-	mddi_writel(MDDI_CMD_SEND_RTD, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	mddi_set_auto_hibernate(&mddi->client_data, 1);
-}
-
-static int __init mddi_get_client_caps(struct mddi_info *mddi)
-{
-	int i, j;
-
-	/* clear any stale interrupts */
-	mddi_writel(0xffffffff, INT);
-
-	mddi->int_enable = MDDI_INT_LINK_ACTIVE |
-			   MDDI_INT_IN_HIBERNATION |
-			   MDDI_INT_PRI_LINK_LIST_DONE |
-			   MDDI_INT_REV_DATA_AVAIL |
-			   MDDI_INT_REV_OVERFLOW |
-			   MDDI_INT_REV_OVERWRITE |
-			   MDDI_INT_RTD_FAILURE;
-	mddi_writel(mddi->int_enable, INTEN);
-
-	mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-
-	for (j = 0; j < 3; j++) {
-		/* the toshiba vga panel does not respond to get
-		 * caps unless you SEND_RTD, but the first SEND_RTD
-		 * will fail...
-		 */
-		for (i = 0; i < 4; i++) {
-			uint32_t stat;
-
-			mddi_writel(MDDI_CMD_SEND_RTD, CMD);
-			mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-			stat = mddi_readl(STAT);
-			printk(KERN_INFO "mddi cmd send rtd: int %x, stat %x, "
-					"rtd val %x\n", mddi_readl(INT), stat,
-					mddi_readl(RTD_VAL));
-			if ((stat & MDDI_STAT_RTD_MEAS_FAIL) == 0)
-				break;
-			msleep(1);
-		}
-
-		mddi_writel(CMD_GET_CLIENT_CAP, CMD);
-		mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-		wait_event_timeout(mddi->int_wait, mddi->flags & FLAG_HAVE_CAPS,
-				   HZ / 100);
-
-		if (mddi->flags & FLAG_HAVE_CAPS)
-			break;
-		printk(KERN_INFO "mddi_init, timeout waiting for caps\n");
-	}
-	return mddi->flags & FLAG_HAVE_CAPS;
-}
-
-/* link must be active when this is called */
-int mddi_check_status(struct mddi_info *mddi)
-{
-	int ret = -1, retry = 3;
-	mutex_lock(&mddi->reg_read_lock);
-	mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-
-	do {
-		mddi->flags &= ~FLAG_HAVE_STATUS;
-		mddi_writel(CMD_GET_CLIENT_STATUS, CMD);
-		mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-		wait_event_timeout(mddi->int_wait,
-				   mddi->flags & FLAG_HAVE_STATUS,
-				   HZ / 100);
-
-		if (mddi->flags & FLAG_HAVE_STATUS) {
-			if (mddi->status.crc_error_count)
-				printk(KERN_INFO "mddi status: crc_error "
-					"count: %d\n",
-					mddi->status.crc_error_count);
-			else
-				ret = 0;
-			break;
-		} else
-			printk(KERN_INFO "mddi status: failed to get client "
-				"status\n");
-		mddi_writel(MDDI_CMD_SEND_RTD, CMD);
-		mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	} while (--retry);
-
-	mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 0, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	mutex_unlock(&mddi->reg_read_lock);
-	return ret;
-}
-
-
-void mddi_remote_write(struct msm_mddi_client_data *cdata, uint32_t val,
-		       uint32_t reg)
-{
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	struct mddi_llentry *ll;
-	struct mddi_register_access *ra;
-
-	mutex_lock(&mddi->reg_write_lock);
-
-	ll = mddi->reg_write_data;
-
-	ra = &(ll->u.r);
-	ra->length = 14 + 4;
-	ra->type = TYPE_REGISTER_ACCESS;
-	ra->client_id = 0;
-	ra->read_write_info = MDDI_WRITE | 1;
-	ra->crc16 = 0;
-
-	ra->register_address = reg;
-	ra->register_data_list = val;
-
-	ll->flags = 1;
-	ll->header_count = 14;
-	ll->data_count = 4;
-	ll->data = mddi->reg_write_addr + offsetof(struct mddi_llentry,
-						   u.r.register_data_list);
-	ll->next = 0;
-	ll->reserved = 0;
-
-	mddi_writel(mddi->reg_write_addr, PRI_PTR);
-
-	mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE);
-	mutex_unlock(&mddi->reg_write_lock);
-}
-
-uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg)
-{
-	struct mddi_info *mddi = container_of(cdata, struct mddi_info,
-					      client_data);
-	struct mddi_llentry *ll;
-	struct mddi_register_access *ra;
-	struct reg_read_info ri;
-	unsigned s;
-	int retry_count = 2;
-	unsigned long irq_flags;
-
-	mutex_lock(&mddi->reg_read_lock);
-
-	ll = mddi->reg_read_data;
-
-	ra = &(ll->u.r);
-	ra->length = 14;
-	ra->type = TYPE_REGISTER_ACCESS;
-	ra->client_id = 0;
-	ra->read_write_info = MDDI_READ | 1;
-	ra->crc16 = 0;
-
-	ra->register_address = reg;
-
-	ll->flags = 0x11;
-	ll->header_count = 14;
-	ll->data_count = 0;
-	ll->data = 0;
-	ll->next = 0;
-	ll->reserved = 0;
-
-	s = mddi_readl(STAT);
-
-	ri.reg = reg;
-	ri.status = -1;
-
-	do {
-		init_completion(&ri.done);
-		mddi->reg_read = &ri;
-		mddi_writel(mddi->reg_read_addr, PRI_PTR);
-
-		mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE);
-
-		/* Enable Periodic Reverse Encapsulation. */
-		mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD);
-		mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-		if (wait_for_completion_timeout(&ri.done, HZ/10) == 0 &&
-		    !ri.done.done) {
-			printk(KERN_INFO "mddi_remote_read(%x) timeout "
-					 "(%d %d %d)\n",
-			       reg, ri.status, ri.result, ri.done.done);
-			spin_lock_irqsave(&mddi->int_lock, irq_flags);
-			mddi->reg_read = NULL;
-			spin_unlock_irqrestore(&mddi->int_lock, irq_flags);
-			ri.status = -1;
-			ri.result = -1;
-		}
-		if (ri.status == 0)
-			break;
-
-		mddi_writel(MDDI_CMD_SEND_RTD, CMD);
-		mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
-		mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-		printk(KERN_INFO "mddi_remote_read: failed, sent "
-		       "MDDI_CMD_SEND_RTD: int %x, stat %x, rtd val %x "
-		       "curr_rev_ptr %x\n", mddi_readl(INT), mddi_readl(STAT),
-		       mddi_readl(RTD_VAL), mddi_readl(CURR_REV_PTR));
-	} while (retry_count-- > 0);
-	/* Disable Periodic Reverse Encapsulation. */
-	mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 0, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	mddi->reg_read = NULL;
-	mutex_unlock(&mddi->reg_read_lock);
-	return ri.result;
-}
-
-static struct mddi_info mddi_info[2];
-
-static int __init mddi_clk_setup(struct platform_device *pdev,
-				 struct mddi_info *mddi,
-				 unsigned long clk_rate)
-{
-	int ret;
-
-	/* set up the clocks */
-	mddi->clk = clk_get(&pdev->dev, "mddi_clk");
-	if (IS_ERR(mddi->clk)) {
-		printk(KERN_INFO "mddi: failed to get clock\n");
-		return PTR_ERR(mddi->clk);
-	}
-	ret =  clk_enable(mddi->clk);
-	if (ret)
-		goto fail;
-	ret = clk_set_rate(mddi->clk, clk_rate);
-	if (ret)
-		goto fail;
-	return 0;
-
-fail:
-	clk_put(mddi->clk);
-	return ret;
-}
-
-static int __init mddi_rev_data_setup(struct mddi_info *mddi)
-{
-	void *dma;
-	dma_addr_t dma_addr;
-
-	/* set up dma buffer */
-	dma = dma_alloc_coherent(NULL, 0x1000, &dma_addr, GFP_KERNEL);
-	if (dma == 0)
-		return -ENOMEM;
-	mddi->rev_data = dma;
-	mddi->rev_data_curr = 0;
-	mddi->rev_addr = dma_addr;
-	mddi->reg_write_data = dma + MDDI_REV_BUFFER_SIZE;
-	mddi->reg_write_addr = dma_addr + MDDI_REV_BUFFER_SIZE;
-	mddi->reg_read_data = mddi->reg_write_data + 1;
-	mddi->reg_read_addr = mddi->reg_write_addr +
-			      sizeof(*mddi->reg_write_data);
+	dev_dbg(dev, "pm_runtime: suspending...\n");
 	return 0;
 }
 
-static int __devinit mddi_probe(struct platform_device *pdev)
+static int mddi_runtime_resume(struct device *dev)
 {
-	struct msm_mddi_platform_data *pdata = pdev->dev.platform_data;
-	struct mddi_info *mddi = &mddi_info[pdev->id];
-	struct resource *resource;
-	int ret, i;
-
-	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!resource) {
-		printk(KERN_ERR "mddi: no associated mem resource!\n");
-		return -ENOMEM;
-	}
-	mddi->base = ioremap(resource->start, resource_size(resource));
-	if (!mddi->base) {
-		printk(KERN_ERR "mddi: failed to remap base!\n");
-		ret = -EINVAL;
-		goto error_ioremap;
-	}
-	resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!resource) {
-		printk(KERN_ERR "mddi: no associated irq resource!\n");
-		ret = -EINVAL;
-		goto error_get_irq_resource;
-	}
-	mddi->irq = resource->start;
-	printk(KERN_INFO "mddi: init() base=0x%p irq=%d\n", mddi->base,
-	       mddi->irq);
-	mddi->power_client = pdata->power_client;
-
-	mutex_init(&mddi->reg_write_lock);
-	mutex_init(&mddi->reg_read_lock);
-	spin_lock_init(&mddi->int_lock);
-	init_waitqueue_head(&mddi->int_wait);
-
-	ret = mddi_clk_setup(pdev, mddi, pdata->clk_rate);
-	if (ret) {
-		printk(KERN_ERR "mddi: failed to setup clock!\n");
-		goto error_clk_setup;
-	}
-
-	ret = mddi_rev_data_setup(mddi);
-	if (ret) {
-		printk(KERN_ERR "mddi: failed to setup rev data!\n");
-		goto error_rev_data;
-	}
-
-	mddi->int_enable = 0;
-	mddi_writel(mddi->int_enable, INTEN);
-	ret = request_irq(mddi->irq, mddi_isr, IRQF_DISABLED, "mddi",
-			  &mddi->client_data);
-	if (ret) {
-		printk(KERN_ERR "mddi: failed to request enable irq!\n");
-		goto error_request_irq;
-	}
-
-	/* turn on the mddi client bridge chip */
-	if (mddi->power_client)
-		mddi->power_client(&mddi->client_data, 1);
-
-	/* initialize the mddi registers */
-	mddi_set_auto_hibernate(&mddi->client_data, 0);
-	mddi_writel(MDDI_CMD_RESET, CMD);
-	mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
-	mddi->version = mddi_init_registers(mddi);
-	if (mddi->version < 0x20) {
-		printk(KERN_ERR "mddi: unsupported version 0x%x\n",
-		       mddi->version);
-		ret = -ENODEV;
-		goto error_mddi_version;
-	}
-
-	/* read the capabilities off the client */
-	if (!mddi_get_client_caps(mddi)) {
-		printk(KERN_INFO "mddi: no client found\n");
-		/* power down the panel */
-		mddi_writel(MDDI_CMD_POWERDOWN, CMD);
-		printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT));
-		msleep(100);
-		printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT));
-		return 0;
-	}
-	mddi_set_auto_hibernate(&mddi->client_data, 1);
-
-	if (mddi->caps.Mfr_Name == 0 && mddi->caps.Product_Code == 0)
-		pdata->fixup(&mddi->caps.Mfr_Name, &mddi->caps.Product_Code);
-
-	mddi->client_pdev.id = 0;
-	for (i = 0; i < pdata->num_clients; i++) {
-		if (pdata->client_platform_data[i].product_id ==
-		    (mddi->caps.Mfr_Name << 16 | mddi->caps.Product_Code)) {
-			mddi->client_data.private_client_data =
-				pdata->client_platform_data[i].client_data;
-			mddi->client_pdev.name =
-				pdata->client_platform_data[i].name;
-			mddi->client_pdev.id =
-				pdata->client_platform_data[i].id;
-			/* XXX: possibly set clock */
-			break;
-		}
-	}
-
-	if (i >= pdata->num_clients)
-		mddi->client_pdev.name = "mddi_c_dummy";
-	printk(KERN_INFO "mddi: registering panel %s\n",
-		mddi->client_pdev.name);
-
-	mddi->client_data.suspend = mddi_suspend;
-	mddi->client_data.resume = mddi_resume;
-	mddi->client_data.activate_link = mddi_activate_link;
-	mddi->client_data.remote_write = mddi_remote_write;
-	mddi->client_data.remote_read = mddi_remote_read;
-	mddi->client_data.auto_hibernate = mddi_set_auto_hibernate;
-	mddi->client_data.fb_resource = pdata->fb_resource;
-	if (pdev->id == 0)
-		mddi->client_data.interface_type = MSM_MDDI_PMDH_INTERFACE;
-	else if (pdev->id == 1)
-		mddi->client_data.interface_type = MSM_MDDI_EMDH_INTERFACE;
-	else {
-		printk(KERN_ERR "mddi: can not determine interface %d!\n",
-		       pdev->id);
-		ret = -EINVAL;
-		goto error_mddi_interface;
-	}
-
-	mddi->client_pdev.dev.platform_data = &mddi->client_data;
-	printk(KERN_INFO "mddi: publish: %s\n", mddi->client_name);
-	platform_device_register(&mddi->client_pdev);
+	dev_dbg(dev, "pm_runtime: resuming...\n");
 	return 0;
-
-error_mddi_interface:
-error_mddi_version:
-	free_irq(mddi->irq, 0);
-error_request_irq:
-	dma_free_coherent(NULL, 0x1000, mddi->rev_data, mddi->rev_addr);
-error_rev_data:
-error_clk_setup:
-error_get_irq_resource:
-	iounmap(mddi->base);
-error_ioremap:
-
-	printk(KERN_INFO "mddi: mddi_init() failed (%d)\n", ret);
-	return ret;
 }
 
+static int mddi_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static struct dev_pm_ops mddi_dev_pm_ops = {
+	.runtime_suspend = mddi_runtime_suspend,
+	.runtime_resume = mddi_runtime_resume,
+	.runtime_idle = mddi_runtime_idle,
+};
+
+static int pmdh_clk_status;
+int irq_enabled;
+unsigned char mddi_timer_shutdown_flag;
 
 static struct platform_driver mddi_driver = {
 	.probe = mddi_probe,
-	.driver = { .name = "msm_mddi" },
+	.remove = mddi_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+	.suspend = mddi_suspend,
+	.resume = mddi_resume,
+#endif
+#endif
+	.shutdown = NULL,
+	.driver = {
+		.name = "mddi",
+		.pm = &mddi_dev_pm_ops,
+		   },
 };
 
-static int __init _mddi_init(void)
+extern int int_mddi_pri_flag;
+DEFINE_MUTEX(pmdh_clk_lock);
+
+int pmdh_clk_func(int value)
+{
+	int ret = 0;
+
+	switch (value) {
+	case 0:
+		pmdh_clk_disable();
+		break;
+	case 1:
+		pmdh_clk_enable();
+		break;
+	case 2:
+	default:
+		mutex_lock(&pmdh_clk_lock);
+		ret = pmdh_clk_status;
+		mutex_unlock(&pmdh_clk_lock);
+		break;
+	}
+	return ret;
+}
+
+static void pmdh_clk_disable()
+{
+	mutex_lock(&pmdh_clk_lock);
+	if (pmdh_clk_status == 0) {
+		mutex_unlock(&pmdh_clk_lock);
+		return;
+	}
+
+	if (mddi_host_timer.function) {
+		mutex_lock(&mddi_timer_lock);
+		mddi_timer_shutdown_flag = 1;
+		mutex_unlock(&mddi_timer_lock);
+		del_timer_sync(&mddi_host_timer);
+		mutex_lock(&mddi_timer_lock);
+		mddi_timer_shutdown_flag = 0;
+		mutex_unlock(&mddi_timer_lock);
+	}
+	if (int_mddi_pri_flag && irq_enabled) {
+		disable_irq(INT_MDDI_PRI);
+		irq_enabled = 0;
+	}
+
+	if (mddi_clk) {
+		clk_disable(mddi_clk);
+		pmdh_clk_status = 0;
+	}
+	if (mddi_pclk)
+		clk_disable(mddi_pclk);
+	mutex_unlock(&pmdh_clk_lock);
+}
+
+static void pmdh_clk_enable()
+{
+	mutex_lock(&pmdh_clk_lock);
+	if (pmdh_clk_status == 1) {
+		mutex_unlock(&pmdh_clk_lock);
+		return;
+	}
+
+	if (mddi_clk) {
+		clk_enable(mddi_clk);
+		pmdh_clk_status = 1;
+	}
+	if (mddi_pclk)
+		clk_enable(mddi_pclk);
+
+	if (int_mddi_pri_flag && !irq_enabled) {
+		enable_irq(INT_MDDI_PRI);
+		irq_enabled = 1;
+	}
+
+	if (mddi_host_timer.function)
+		mddi_host_timer_service(0);
+
+	mutex_unlock(&pmdh_clk_lock);
+}
+
+static int mddi_off(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	boolean dma_pending, dma_update_flag;
+	int ret, i;
+
+	mfd = platform_get_drvdata(pdev);
+
+	for (i = 0; i < 6; i++) {
+		dma_update_flag = mfd->dma_update_flag;
+		dma_pending = mfd->dma->busy;
+		if (dma_update_flag && !dma_pending)
+			break;
+		msleep(5);
+	}
+
+	pmdh_clk_enable();
+	ret = panel_next_off(pdev);
+	pmdh_clk_disable();
+
+	if (mddi_pdata && mddi_pdata->mddi_power_save)
+		mddi_pdata->mddi_power_save(0);
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(0);
+#else
+	if (mfd->ebi1_clk)
+		clk_disable(mfd->ebi1_clk);
+#endif
+	pm_runtime_put(&pdev->dev);
+	return ret;
+}
+
+static int mddi_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct msm_fb_data_type *mfd;
+#ifdef ENABLE_FWD_LINK_SKEW_CALIBRATION
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	u32 stat_reg;
+#endif
+
+	mfd = platform_get_drvdata(pdev);
+	pm_runtime_get(&pdev->dev);
+	if (mddi_pdata && mddi_pdata->mddi_power_save)
+		mddi_pdata->mddi_power_save(1);
+
+	pmdh_clk_enable();
+#ifdef ENABLE_FWD_LINK_SKEW_CALIBRATION
+	if (mddi_client_type < 2) {
+		/* For skew calibration, clock should be less than 50MHz */
+		if (!clk_set_min_rate(mddi_clk, 49000000)) {
+			stat_reg = mddi_host_reg_in(STAT);
+			printk(KERN_DEBUG "\n stat_reg = 0x%x", stat_reg);
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
+			if (stat_reg & (0x1 << 4))
+				mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+
+			mddi_host_reg_out(CMD, MDDI_CMD_SEND_RTD);
+			mddi_send_fw_link_skew_cal(host_idx);
+			mddi_host_reg_out(CMD, MDDI_CMD_SEND_RTD);
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
+		} else {
+			printk(KERN_ERR "%s: clk_set_min_rate failed\n",
+				__func__);
+		}
+	}
+#endif
+
+	clk_rate = mfd->fbi->var.pixclock;
+	clk_rate = min(clk_rate, mfd->panel_info.clk_max);
+
+	if (mddi_pdata &&
+	    mddi_pdata->mddi_sel_clk &&
+	    mddi_pdata->mddi_sel_clk(&clk_rate))
+			printk(KERN_ERR
+			  "%s: can't select mddi io clk targate rate = %d\n",
+			  __func__, clk_rate);
+
+	if (clk_set_min_rate(mddi_clk, clk_rate) < 0)
+		printk(KERN_ERR "%s: clk_set_min_rate failed\n",
+			__func__);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(2);
+#else
+	if (mfd->ebi1_clk)
+		clk_enable(mfd->ebi1_clk);
+#endif
+	ret = panel_next_on(pdev);
+
+	return ret;
+}
+
+static int mddi_resource_initialized;
+
+static int mddi_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+	resource_size_t size ;
+	u32 clk_rate;
+
+	if ((pdev->id == 0) && (pdev->num_resources >= 0)) {
+		mddi_pdata = pdev->dev.platform_data;
+
+		size =  resource_size(&pdev->resource[0]);
+		msm_pmdh_base =  ioremap(pdev->resource[0].start, size);
+
+		MSM_FB_INFO("primary mddi base phy_addr = 0x%x virt = 0x%x\n",
+				pdev->resource[0].start, (int) msm_pmdh_base);
+
+		if (unlikely(!msm_pmdh_base))
+			return -ENOMEM;
+
+		if (mddi_pdata && mddi_pdata->mddi_power_save)
+			mddi_pdata->mddi_power_save(1);
+
+		mddi_resource_initialized = 1;
+		return 0;
+	}
+
+	if (!mddi_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_LCD;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		printk(KERN_ERR "mddi_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = mdp_dev->dev.platform_data;
+	pdata->on = mddi_on;
+	pdata->off = mddi_off;
+	pdata->next = pdev;
+	pdata->clk_func = pmdh_clk_func;
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+
+	if (mfd->index == 0)
+		mfd->fb_imgType = MSMFB_DEFAULT_TYPE;
+	else
+		mfd->fb_imgType = MDP_RGB_565;
+
+	clk_rate = mfd->panel_info.clk_max;
+	if (mddi_pdata &&
+	    mddi_pdata->mddi_sel_clk &&
+	    mddi_pdata->mddi_sel_clk(&clk_rate))
+			printk(KERN_ERR
+			  "%s: can't select mddi io clk targate rate = %d\n",
+			  __func__, clk_rate);
+
+	if (clk_set_max_rate(mddi_clk, clk_rate) < 0)
+		printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__);
+	mfd->panel_info.clk_rate = mfd->panel_info.clk_min;
+
+	if (!mddi_client_type)
+		mddi_client_type = mfd->panel_info.lcd.rev;
+	else if (!mfd->panel_info.lcd.rev)
+		printk(KERN_ERR
+		"%s: mddi client is trying to revert back to type 1	!!!\n",
+		__func__);
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		printk(KERN_ERR "pm_runtime: fail to set active\n");
+
+	rc = 0;
+	pm_runtime_enable(&pdev->dev);
+#ifndef CONFIG_MSM_BUS_SCALING
+	mfd->ebi1_clk = clk_get(NULL, "ebi1_mddi_clk");
+	if (IS_ERR(mfd->ebi1_clk))
+		return PTR_ERR(mfd->ebi1_clk);
+	clk_set_rate(mfd->ebi1_clk, 65000000);
+#endif
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto mddi_probe_err;
+
+	pdev_list[pdev_list_cnt++] = pdev;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	mfd->mddi_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+	mfd->mddi_early_suspend.suspend = mddi_early_suspend;
+	mfd->mddi_early_suspend.resume = mddi_early_resume;
+	register_early_suspend(&mfd->mddi_early_suspend);
+#endif
+
+	return 0;
+
+mddi_probe_err:
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int mddi_pad_ctrl;
+static int mddi_power_locked;
+
+int mddi_client_power(unsigned int client_id)
+{
+	int ret = 0;
+	if (mddi_pdata && mddi_pdata->mddi_client_power)
+		ret = mddi_pdata->mddi_client_power(client_id);
+	return ret;
+}
+
+void mddi_disable(int lock)
+{
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+
+	if (mddi_power_locked)
+		return;
+
+	if (lock)
+		mddi_power_locked = 1;
+	pmdh_clk_enable();
+
+	mddi_pad_ctrl = mddi_host_reg_in(PAD_CTL);
+	mddi_host_reg_out(PAD_CTL, 0x0);
+
+	if (clk_set_min_rate(mddi_clk, 0) < 0)
+		printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__);
+
+	pmdh_clk_disable();
+
+	if (mddi_pdata && mddi_pdata->mddi_power_save)
+		mddi_pdata->mddi_power_save(0);
+}
+
+#ifdef CONFIG_PM
+static int mddi_is_in_suspend;
+
+static int mddi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	if (mddi_is_in_suspend)
+		return 0;
+
+	mddi_is_in_suspend = 1;
+
+	if (mddi_power_locked)
+		return 0;
+
+	pmdh_clk_enable();
+
+	mddi_pad_ctrl = mddi_host_reg_in(PAD_CTL);
+	mddi_host_reg_out(PAD_CTL, 0x0);
+
+	if (clk_set_min_rate(mddi_clk, 0) < 0)
+		printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__);
+
+	pmdh_clk_disable();
+
+	return 0;
+}
+
+static int mddi_resume(struct platform_device *pdev)
+{
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+
+	if (!mddi_is_in_suspend)
+		return 0;
+
+	mddi_is_in_suspend = 0;
+
+	if (mddi_power_locked)
+		return 0;
+
+	pmdh_clk_enable();
+
+	mddi_host_reg_out(PAD_CTL, mddi_pad_ctrl);
+
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mddi_early_suspend(struct early_suspend *h)
+{
+	pm_message_t state;
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+							mddi_early_suspend);
+
+	state.event = PM_EVENT_SUSPEND;
+	mddi_suspend(mfd->pdev, state);
+}
+
+static void mddi_early_resume(struct early_suspend *h)
+{
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+							mddi_early_suspend);
+	mddi_resume(mfd->pdev);
+}
+#endif
+
+static int mddi_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+	if (mddi_host_timer.function) {
+		mutex_lock(&mddi_timer_lock);
+		mddi_timer_shutdown_flag = 1;
+		mutex_unlock(&mddi_timer_lock);
+		del_timer_sync(&mddi_host_timer);
+		mutex_lock(&mddi_timer_lock);
+		mddi_timer_shutdown_flag = 0;
+		mutex_unlock(&mddi_timer_lock);
+	}
+
+	iounmap(msm_pmdh_base);
+
+	return 0;
+}
+
+static int mddi_register_driver(void)
 {
 	return platform_driver_register(&mddi_driver);
 }
 
-module_init(_mddi_init);
+static int __init mddi_driver_init(void)
+{
+	int ret;
+	pmdh_clk_status = 0;
+
+	mddi_clk = clk_get(NULL, "mddi_clk");
+	if (IS_ERR(mddi_clk)) {
+		printk(KERN_ERR "can't find mddi_clk\n");
+		return PTR_ERR(mddi_clk);
+	}
+	ret = clk_set_min_rate(mddi_clk, 49000000);
+	if (ret)
+		printk(KERN_ERR "Can't set mddi_clk min rate to 49000000\n");
+
+	printk(KERN_INFO "mddi_clk init rate is %lu\n",
+		clk_get_rate(mddi_clk));
+	mddi_pclk = clk_get(NULL, "mddi_pclk");
+	if (IS_ERR(mddi_pclk))
+		mddi_pclk = NULL;
+	pmdh_clk_enable();
+
+	ret = mddi_register_driver();
+	if (ret) {
+		pmdh_clk_disable();
+		clk_put(mddi_clk);
+		if (mddi_pclk)
+			clk_put(mddi_pclk);
+		printk(KERN_ERR "mddi_register_driver() failed!\n");
+		return ret;
+	}
+
+	mddi_init();
+
+	return ret;
+}
+
+module_init(mddi_driver_init);
diff --git a/drivers/video/msm/mddi_client_dummy.c b/drivers/video/msm/mddi_client_dummy.c
index d2a091c..ebbae87 100644
--- a/drivers/video/msm/mddi_client_dummy.c
+++ b/drivers/video/msm/mddi_client_dummy.c
@@ -15,7 +15,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/msm/mddi_client_nt35399.c
index f239f4a..c9e9349 100644
--- a/drivers/video/msm/mddi_client_nt35399.c
+++ b/drivers/video/msm/mddi_client_nt35399.c
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/gpio.h>
-#include <linux/slab.h>
 #include <mach/msm_fb.h>
 
 static DECLARE_WAIT_QUEUE_HEAD(nt35399_vsync_wait);
diff --git a/drivers/video/msm/mddi_client_toshiba.c b/drivers/video/msm/mddi_client_toshiba.c
index f9bc932..8868781 100644
--- a/drivers/video/msm/mddi_client_toshiba.c
+++ b/drivers/video/msm/mddi_client_toshiba.c
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/gpio.h>
 #include <linux/sched.h>
-#include <linux/slab.h>
 #include <mach/msm_fb.h>
 
 
@@ -60,6 +59,7 @@
 	struct msm_panel_data panel_data;
 	struct msmfb_callback *toshiba_callback;
 	int toshiba_got_int;
+	int irq;
 };
 
 
@@ -175,47 +175,6 @@
 	return IRQ_HANDLED;
 }
 
-static int setup_vsync(struct panel_info *panel,
-		       int init)
-{
-	int ret;
-	int gpio = 97;
-	unsigned int irq;
-
-	if (!init) {
-		ret = 0;
-		goto uninit;
-	}
-	ret = gpio_request(gpio, "vsync");
-	if (ret)
-		goto err_request_gpio_failed;
-
-	ret = gpio_direction_input(gpio);
-	if (ret)
-		goto err_gpio_direction_input_failed;
-
-	ret = irq = gpio_to_irq(gpio);
-	if (ret < 0)
-		goto err_get_irq_num_failed;
-
-	ret = request_irq(irq, toshiba_vsync_interrupt, IRQF_TRIGGER_RISING,
-			  "vsync", panel);
-	if (ret)
-		goto err_request_irq_failed;
-	printk(KERN_INFO "vsync on gpio %d now %d\n",
-	       gpio, gpio_get_value(gpio));
-	return 0;
-
-uninit:
-	free_irq(gpio_to_irq(gpio), panel);
-err_request_irq_failed:
-err_get_irq_num_failed:
-err_gpio_direction_input_failed:
-	gpio_free(gpio);
-err_request_gpio_failed:
-	return ret;
-}
-
 static int mddi_toshiba_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -232,10 +191,16 @@
 	client_data->remote_write(client_data, GPIOSEL_VWAKEINT, GPIOSEL);
 	client_data->remote_write(client_data, INTMASK_VWAKEOUT, INTMASK);
 
-	ret = setup_vsync(panel, 1);
+	ret = platform_get_irq_byname(pdev, "vsync");
+	if (ret < 0)
+		goto err_plat_get_irq;
+
+	panel->irq = ret;
+	ret = request_irq(panel->irq, toshiba_vsync_interrupt,
+			  IRQF_TRIGGER_RISING, "vsync", panel);
 	if (ret) {
 		dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n");
-		return ret;
+		goto err_req_irq;
 	}
 
 	panel->client_data = client_data;
@@ -258,13 +223,19 @@
 	platform_device_register(&panel->pdev);
 
 	return 0;
+
+err_req_irq:
+err_plat_get_irq:
+	kfree(panel);
+	return ret;
 }
 
 static int mddi_toshiba_remove(struct platform_device *pdev)
 {
 	struct panel_info *panel = platform_get_drvdata(pdev);
 
-	setup_vsync(panel, 0);
+	platform_set_drvdata(pdev, NULL);
+	free_irq(panel->irq, panel);
 	kfree(panel);
 	return 0;
 }
diff --git a/drivers/video/msm/mddi_ext.c b/drivers/video/msm/mddi_ext.c
new file mode 100644
index 0000000..0ecd593
--- /dev/null
+++ b/drivers/video/msm/mddi_ext.c
@@ -0,0 +1,363 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <asm/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "msm_fb.h"
+#include "mddihosti.h"
+
+static int mddi_ext_probe(struct platform_device *pdev);
+static int mddi_ext_remove(struct platform_device *pdev);
+
+static int mddi_ext_off(struct platform_device *pdev);
+static int mddi_ext_on(struct platform_device *pdev);
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state);
+static int mddi_ext_resume(struct platform_device *pdev);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mddi_ext_early_suspend(struct early_suspend *h);
+static void mddi_ext_early_resume(struct early_suspend *h);
+#endif
+
+static int mddi_ext_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int mddi_ext_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int mddi_ext_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+static struct dev_pm_ops mddi_ext_dev_pm_ops = {
+	.runtime_suspend = mddi_ext_runtime_suspend,
+	.runtime_resume = mddi_ext_runtime_resume,
+	.runtime_idle = mddi_ext_runtime_idle,
+};
+
+static struct platform_driver mddi_ext_driver = {
+	.probe = mddi_ext_probe,
+	.remove = mddi_ext_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+	.suspend = mddi_ext_suspend,
+	.resume = mddi_ext_resume,
+#endif
+#endif
+	.resume_early = NULL,
+	.resume = NULL,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mddi_ext",
+		.pm = &mddi_ext_dev_pm_ops,
+		   },
+};
+
+static struct clk *mddi_ext_clk;
+static struct clk *mddi_ext_pclk;
+static struct mddi_platform_data *mddi_ext_pdata;
+
+extern int int_mddi_ext_flag;
+
+static int mddi_ext_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	ret = panel_next_off(pdev);
+	mddi_host_stop_ext_display();
+	pm_runtime_put(&pdev->dev);
+	return ret;
+}
+
+static int mddi_ext_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+	pm_runtime_get(&pdev->dev);
+	clk_rate = mfd->fbi->var.pixclock;
+	clk_rate = min(clk_rate, mfd->panel_info.clk_max);
+
+	if (mddi_ext_pdata &&
+	    mddi_ext_pdata->mddi_sel_clk &&
+	    mddi_ext_pdata->mddi_sel_clk(&clk_rate))
+		printk(KERN_ERR
+			  "%s: can't select mddi io clk targate rate = %d\n",
+			  __func__, clk_rate);
+
+	if (clk_set_min_rate(mddi_ext_clk, clk_rate) < 0)
+		printk(KERN_ERR "%s: clk_set_min_rate failed\n",
+			__func__);
+
+	mddi_host_start_ext_display();
+	ret = panel_next_on(pdev);
+
+	return ret;
+}
+
+static int mddi_ext_resource_initialized;
+
+static int mddi_ext_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+	resource_size_t size ;
+	u32 clk_rate;
+
+	if ((pdev->id == 0) && (pdev->num_resources >= 0)) {
+		mddi_ext_pdata = pdev->dev.platform_data;
+
+		size =  resource_size(&pdev->resource[0]);
+		msm_emdh_base = ioremap(pdev->resource[0].start, size);
+
+		MSM_FB_INFO("external mddi base address = 0x%x\n",
+				pdev->resource[0].start);
+
+		if (unlikely(!msm_emdh_base))
+			return -ENOMEM;
+
+		mddi_ext_resource_initialized = 1;
+		return 0;
+	}
+
+	if (!mddi_ext_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_EXT_MDDI;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		printk(KERN_ERR "mddi_ext_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = mdp_dev->dev.platform_data;
+	pdata->on = mddi_ext_on;
+	pdata->off = mddi_ext_off;
+	pdata->next = pdev;
+
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+	mfd->fb_imgType = MDP_RGB_565;
+
+	clk_rate = mfd->panel_info.clk_max;
+	if (mddi_ext_pdata &&
+	    mddi_ext_pdata->mddi_sel_clk &&
+	    mddi_ext_pdata->mddi_sel_clk(&clk_rate))
+			printk(KERN_ERR
+			  "%s: can't select mddi io clk targate rate = %d\n",
+			  __func__, clk_rate);
+
+	if (clk_set_max_rate(mddi_ext_clk, clk_rate) < 0)
+		printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__);
+	mfd->panel_info.clk_rate = mfd->panel_info.clk_min;
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+	rc = pm_runtime_set_active(&pdev->dev);
+	if (rc < 0)
+		printk(KERN_ERR "pm_runtime: fail to set active\n");
+
+	rc = 0;
+	pm_runtime_enable(&pdev->dev);
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto mddi_ext_probe_err;
+
+	pdev_list[pdev_list_cnt++] = pdev;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	mfd->mddi_ext_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+	mfd->mddi_ext_early_suspend.suspend = mddi_ext_early_suspend;
+	mfd->mddi_ext_early_suspend.resume = mddi_ext_early_resume;
+	register_early_suspend(&mfd->mddi_ext_early_suspend);
+#endif
+
+	return 0;
+
+mddi_ext_probe_err:
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int mddi_ext_is_in_suspend;
+
+static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	if (mddi_ext_is_in_suspend)
+		return 0;
+
+	mddi_ext_is_in_suspend = 1;
+
+	if (clk_set_min_rate(mddi_ext_clk, 0) < 0)
+		printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__);
+
+	clk_disable(mddi_ext_clk);
+	if (mddi_ext_pclk)
+		clk_disable(mddi_ext_pclk);
+
+	disable_irq(INT_MDDI_EXT);
+
+	return 0;
+}
+
+static int mddi_ext_resume(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mddi_ext_is_in_suspend)
+		return 0;
+
+	mddi_ext_is_in_suspend = 0;
+	enable_irq(INT_MDDI_EXT);
+
+	clk_enable(mddi_ext_clk);
+	if (mddi_ext_pclk)
+		clk_enable(mddi_ext_pclk);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mddi_ext_early_suspend(struct early_suspend *h)
+{
+	pm_message_t state;
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+							mddi_ext_early_suspend);
+
+	state.event = PM_EVENT_SUSPEND;
+	mddi_ext_suspend(mfd->pdev, state);
+}
+
+static void mddi_ext_early_resume(struct early_suspend *h)
+{
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+							mddi_ext_early_suspend);
+	mddi_ext_resume(mfd->pdev);
+}
+#endif
+
+static int mddi_ext_remove(struct platform_device *pdev)
+{
+	pm_runtim_disable(&pdev->dev);
+	iounmap(msm_emdh_base);
+	return 0;
+}
+
+static int mddi_ext_register_driver(void)
+{
+	return platform_driver_register(&mddi_ext_driver);
+}
+
+static int __init mddi_ext_driver_init(void)
+{
+	int ret;
+
+	mddi_ext_clk = clk_get(NULL, "emdh_clk");
+	if (IS_ERR(mddi_ext_clk)) {
+		printk(KERN_ERR "can't find emdh_clk\n");
+		return PTR_ERR(mddi_ext_clk);
+	}
+	clk_enable(mddi_ext_clk);
+
+	mddi_ext_pclk = clk_get(NULL, "emdh_pclk");
+	if (IS_ERR(mddi_ext_pclk))
+		mddi_ext_pclk = NULL;
+	else
+		clk_enable(mddi_ext_pclk);
+
+	ret = mddi_ext_register_driver();
+	if (ret) {
+		clk_disable(mddi_ext_clk);
+		clk_put(mddi_ext_clk);
+		if (mddi_ext_pclk) {
+			clk_disable(mddi_ext_pclk);
+			clk_put(mddi_ext_pclk);
+		}
+		printk(KERN_ERR "mddi_ext_register_driver() failed!\n");
+		return ret;
+	}
+	mddi_init();
+
+	return ret;
+}
+
+module_init(mddi_ext_driver_init);
diff --git a/drivers/video/msm/mddi_ext_lcd.c b/drivers/video/msm/mddi_ext_lcd.c
new file mode 100644
index 0000000..da79513
--- /dev/null
+++ b/drivers/video/msm/mddi_ext_lcd.c
@@ -0,0 +1,90 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+static int mddi_ext_lcd_on(struct platform_device *pdev);
+static int mddi_ext_lcd_off(struct platform_device *pdev);
+
+static int mddi_ext_lcd_on(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int mddi_ext_lcd_off(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int __init mddi_ext_lcd_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mddi_ext_lcd_probe,
+	.driver = {
+		.name   = "extmddi_svga",
+	},
+};
+
+static struct msm_fb_panel_data mddi_ext_lcd_panel_data = {
+	.panel_info.xres = 800,
+	.panel_info.yres = 600,
+	.panel_info.mode2_xres = 0;
+	.panel_info.mode2_yres = 0;
+	.panel_info.mode2_bpp = 0;
+	.panel_info.type = EXT_MDDI_PANEL,
+	.panel_info.pdest = DISPLAY_1,
+	.panel_info.wait_cycle = 0,
+	.panel_info.bpp = 18,
+	.panel_info.fb_num = 2,
+	.panel_info.clk_rate = 122880000,
+	.panel_info.clk_min  = 120000000,
+	.panel_info.clk_max  = 125000000,
+	.on = mddi_ext_lcd_on,
+	.off = mddi_ext_lcd_off,
+};
+
+static struct platform_device this_device = {
+	.name   = "extmddi_svga",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &mddi_ext_lcd_panel_data,
+	}
+};
+
+static int __init mddi_ext_lcd_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+	ret = platform_driver_register(&this_driver);
+	if (!ret) {
+		pinfo = &mddi_ext_lcd_panel_data.panel_info;
+		pinfo->lcd.vsync_enable = FALSE;
+		pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+
+		ret = platform_device_register(&this_device);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+	}
+
+	return ret;
+}
+
+module_init(mddi_ext_lcd_init);
diff --git a/drivers/video/msm/mddi_hw.h b/drivers/video/msm/mddi_hw.h
index 45cc01f..47bb449 100644
--- a/drivers/video/msm/mddi_hw.h
+++ b/drivers/video/msm/mddi_hw.h
@@ -53,6 +53,9 @@
 #define MDDI_MF_CNT             0x0084
 #define MDDI_CURR_REV_PTR       0x0088
 #define MDDI_CORE_VER           0x008c
+#define MDDI_FIFO_ALLOC         0x0090
+#define MDDI_PAD_IO_CTL         0x00a0
+#define MDDI_PAD_CAL            0x00a4
 
 #define MDDI_INT_PRI_PTR_READ       0x0001
 #define MDDI_INT_SEC_PTR_READ       0x0002
@@ -125,8 +128,14 @@
 /* MDP sends 256 pixel packets, so lower value hibernates more without
  * significantly increasing latency of waiting for next subframe */
 #define MDDI_HOST_BYTES_PER_SUBFRAME  0x3C00
+
+#if defined(CONFIG_MSM_MDP31) || defined(CONFIG_MSM_MDP40)
+#define MDDI_HOST_TA2_LEN       0x001a
+#define MDDI_HOST_REV_RATE_DIV  0x0004
+#else
 #define MDDI_HOST_TA2_LEN       0x000c
 #define MDDI_HOST_REV_RATE_DIV  0x0002
+#endif
 
 
 struct __attribute__((packed)) mddi_rev_packet {
diff --git a/drivers/video/msm/mddi_orise.c b/drivers/video/msm/mddi_orise.c
new file mode 100644
index 0000000..dc913e6
--- /dev/null
+++ b/drivers/video/msm/mddi_orise.c
@@ -0,0 +1,127 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+#define MDDI_ORISE_1_2 1
+#define write_client_reg(__X, __Y, __Z) {\
+	mddi_queue_register_write(__X, __Y, TRUE, 0);\
+}
+
+static int mddi_orise_lcd_on(struct platform_device *pdev);
+static int mddi_orise_lcd_off(struct platform_device *pdev);
+static int __init mddi_orise_probe(struct platform_device *pdev);
+static int __init mddi_orise_init(void);
+
+/* function used to turn on the display */
+static void mddi_orise_prim_lcd_init(void)
+{
+	write_client_reg(0x00110000, 0, TRUE);
+	mddi_wait(150);
+	write_client_reg(0x00290000, 0, TRUE);
+}
+
+static struct platform_driver this_driver = {
+	.driver = {
+		.name   = "mddi_orise",
+	},
+};
+
+static struct msm_fb_panel_data mddi_orise_panel_data = {
+	.on = mddi_orise_lcd_on,
+	.off = mddi_orise_lcd_off,
+};
+
+static struct platform_device this_device = {
+	.name	= "mddi_orise",
+	.id	= MDDI_ORISE_1_2,
+	.dev	= {
+		.platform_data = &mddi_orise_panel_data,
+	}
+};
+
+static int mddi_orise_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mddi_orise_prim_lcd_init();
+
+	return 0;
+}
+
+static int mddi_orise_lcd_off(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int __init mddi_orise_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+	return 0;
+}
+
+static int __init mddi_orise_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 id;
+	ret = msm_fb_detect_client("mddi_orise");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret) {
+		id = mddi_get_client_id();
+		if (((id >> 16) != 0xbe8d) || ((id & 0xffff) != 0x8031))
+			return 0;
+	}
+#endif
+	ret = platform_driver_probe(&this_driver, mddi_orise_probe);
+	if (!ret) {
+		pinfo = &mddi_orise_panel_data.panel_info;
+		pinfo->xres = 480;
+		pinfo->yres = 800;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = MDDI_PANEL;
+		pinfo->pdest = DISPLAY_1;
+		pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+		pinfo->wait_cycle = 0;
+		pinfo->bpp = 18;
+		pinfo->fb_num = 2;
+		pinfo->clk_rate = 192000000;
+		pinfo->clk_min = 192000000;
+		pinfo->clk_max = 192000000;
+		pinfo->lcd.rev = 2;
+		pinfo->lcd.vsync_enable = FALSE;
+		pinfo->lcd.refx100 = 6050;
+		pinfo->lcd.v_back_porch = 2;
+		pinfo->lcd.v_front_porch = 2;
+		pinfo->lcd.v_pulse_width = 105;
+		pinfo->lcd.hw_vsync_mode = TRUE;
+		pinfo->lcd.vsync_notifier_period = 0;
+
+		ret = platform_device_register(&this_device);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+	}
+	return ret;
+}
+module_init(mddi_orise_init);
diff --git a/drivers/video/msm/mddi_prism.c b/drivers/video/msm/mddi_prism.c
new file mode 100644
index 0000000..5269b22
--- /dev/null
+++ b/drivers/video/msm/mddi_prism.c
@@ -0,0 +1,111 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+static int prism_lcd_on(struct platform_device *pdev);
+static int prism_lcd_off(struct platform_device *pdev);
+
+static int prism_lcd_on(struct platform_device *pdev)
+{
+	/* Set the MDP pixel data attributes for Primary Display */
+	mddi_host_write_pix_attr_reg(0x00C3);
+
+	return 0;
+}
+
+static int prism_lcd_off(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int __devinit prism_probe(struct platform_device *pdev)
+{
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = prism_probe,
+	.driver = {
+		.name   = "mddi_prism_wvga",
+	},
+};
+
+static struct msm_fb_panel_data prism_panel_data = {
+	.on = prism_lcd_on,
+	.off = prism_lcd_off,
+};
+
+static struct platform_device this_device = {
+	.name   = "mddi_prism_wvga",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &prism_panel_data,
+	}
+};
+
+static int __init prism_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 id;
+
+	ret = msm_fb_detect_client("mddi_prism_wvga");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret) {
+		id = mddi_get_client_id();
+
+		if (((id >> 16) != 0x4474) || ((id & 0xffff) == 0x8960))
+			return 0;
+	}
+#endif
+	ret = platform_driver_register(&this_driver);
+	if (!ret) {
+		pinfo = &prism_panel_data.panel_info;
+		pinfo->xres = 800;
+		pinfo->yres = 480;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = MDDI_PANEL;
+		pinfo->pdest = DISPLAY_1;
+		pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+		pinfo->wait_cycle = 0;
+		pinfo->bpp = 18;
+		pinfo->fb_num = 2;
+		pinfo->clk_rate = 153600000;
+		pinfo->clk_min = 140000000;
+		pinfo->clk_max = 160000000;
+		pinfo->lcd.vsync_enable = TRUE;
+		pinfo->lcd.refx100 = 6050;
+		pinfo->lcd.v_back_porch = 23;
+		pinfo->lcd.v_front_porch = 20;
+		pinfo->lcd.v_pulse_width = 105;
+		pinfo->lcd.hw_vsync_mode = TRUE;
+		pinfo->lcd.vsync_notifier_period = 0;
+
+		ret = platform_device_register(&this_device);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+	}
+
+	return ret;
+}
+
+module_init(prism_init);
diff --git a/drivers/video/msm/mddi_quickvx.c b/drivers/video/msm/mddi_quickvx.c
new file mode 100644
index 0000000..330e679
--- /dev/null
+++ b/drivers/video/msm/mddi_quickvx.c
@@ -0,0 +1,718 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <mach/pmic.h>
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+/* WVGA Primary Display */
+#define MDDI_QUICKVX_1_2		1
+/* MDDI Manufacturer Code */
+#define QUICKVX_MDDI_MFR_CODE	0xc583
+/* MDDI Product Code */
+#define QUICKVX_MDDI_PRD_CODE	0x5800
+
+/* Register Address Maps */
+/* MDDI Address Anti-fuse values for bits [31:22] */
+#define QUICKVX_ADDR_31_22_AF	(0X000 << 22)
+
+/* MDDI Address Maps */
+/* VEE Block Address Base */
+#define QUICKVX_VEE_BASE		(QUICKVX_ADDR_31_22_AF | 0x00000000)
+/* SPI Block Address Base */
+#define QUICKVX_SPI_BASE		(QUICKVX_ADDR_31_22_AF | 0x00010000)
+/* Clock and Reset (CAR) Address Base */
+#define QUICKVX_CAR_BASE		(QUICKVX_ADDR_31_22_AF | 0x00020000)
+/* Register Control Block (RCB) Address Base */
+#define QUICKVX_RCB_BASE		(QUICKVX_ADDR_31_22_AF | 0x00030000)
+/* Cellular RAM Address Base */
+#define QUICKVX_CELLRAM_BASE	(QUICKVX_ADDR_31_22_AF | 0x00100000)
+/* FB through A2F Address Base */
+#define QUICKVX_FB_A2F_BASE		(QUICKVX_ADDR_31_22_AF | 0x00200000)
+
+
+/***************************************************
+ * Common Registers in Register Control Block (RCB) Registers
+ ***************************************************/
+ /* CellRAM Configuration RCR Register */
+#define QUICKVX_RCB_RCR_REG			(QUICKVX_RCB_BASE | 0x00000000)
+/* Image Effect Register */
+#define QUICKVX_RCB_IER_REG			(QUICKVX_RCB_BASE | 0x00000004)
+/* Row Number Register */
+#define QUICKVX_RCB_ROWNUM_REG		(QUICKVX_RCB_BASE | 0x00000008)
+/* TCON Timing0 Register */
+#define QUICKVX_RCB_TCON0_REG		(QUICKVX_RCB_BASE | 0x0000000C)
+/* TCON Timing1 Register */
+#define QUICKVX_RCB_TCON1_REG		(QUICKVX_RCB_BASE | 0x00000010)
+/* TCON Timing2 Register */
+#define QUICKVX_RCB_TCON2_REG		(QUICKVX_RCB_BASE | 0x00000014)
+/* PWM Control Register */
+#define QUICKVX_RCB_PWMC_REG		(QUICKVX_RCB_BASE | 0x00000018)
+/* PWM Width Register */
+#define QUICKVX_RCB_PWMW_REG		(QUICKVX_RCB_BASE | 0x0000001C)
+/* VEE Configuration Register */
+#define QUICKVX_RCB_VEECONF_REG		(QUICKVX_RCB_BASE | 0x00000020)
+/* CellRAM Configuration BCR Register */
+#define QUICKVX_RCB_CELLBCR_REG		(QUICKVX_RCB_BASE | 0x00000024)
+/* CellRAM Configuration Control Register */
+#define QUICKVX_RCB_CELLCC_REG		(QUICKVX_RCB_BASE | 0x00000028)
+/* Use Case Register */
+#define QUICKVX_RCB_USECASE_REG		(QUICKVX_RCB_BASE | 0x00000100)
+/* Video Parameter Register */
+#define QUICKVX_RCB_VPARM_REG		(QUICKVX_RCB_BASE | 0x00000104)
+/* MDDI Client Wake-up Register */
+#define QUICKVX_RCB_MCW_REG			(QUICKVX_RCB_BASE | 0x00000108)
+/* Burst Length Register */
+#define QUICKVX_RCB_BURSTLN_REG		(QUICKVX_RCB_BASE | 0x0000010C)
+/* Display Attributes Register */
+#define QUICKVX_RCB_DISPATTR_REG	(QUICKVX_RCB_BASE | 0x00000110)
+/* Error Status Register */
+#define QUICKVX_RCB_ERRSTAT_REG		(QUICKVX_RCB_BASE | 0x00000114)
+/* Error Mask Register */
+#define QUICKVX_RCB_ERRMSK_REG		(QUICKVX_RCB_BASE | 0x00000118)
+/* MDDI ASSP FIFO Overflow Address Register */
+#define QUICKVX_RCB_ASSPFOA_REG		(QUICKVX_RCB_BASE | 0x0000011C)
+/* MDDI Fabric FIFO Overflow Address Register */
+#define QUICKVX_RCB_FABFOA_REG		(QUICKVX_RCB_BASE | 0x00000120)
+/* Incoming RGB FIFO Overflow Address Register */
+#define QUICKVX_RCB_IRFOA_REG		(QUICKVX_RCB_BASE | 0x00000124)
+/* SPI Overflow Address Register */
+#define QUICKVX_RCB_SPIOA_REG		(QUICKVX_RCB_BASE | 0x00000128)
+/* Ping Buffer Address Register */
+#define QUICKVX_RCB_PINGBA_REG		(QUICKVX_RCB_BASE | 0x0000012C)
+/* Pong Buffer Address Register */
+#define QUICKVX_RCB_PONGBA_REG		(QUICKVX_RCB_BASE | 0x00000130)
+/* Configuration Done Register */
+#define QUICKVX_RCB_CONFDONE_REG	(QUICKVX_RCB_BASE | 0x00000134)
+/* FIFO Flush Register */
+#define QUICKVX_RCB_FFLUSH_REG		(QUICKVX_RCB_BASE | 0x00000138)
+
+
+/***************************************************
+ * SPI Block Registers
+ ***************************************************/
+/* SPI Rx0 Register */
+#define QUICKVX_SPI_RX0_REG			(QUICKVX_SPI_BASE | 0x00000000)
+/* SPI Rx1 Register */
+#define QUICKVX_SPI_RX1_REG			(QUICKVX_SPI_BASE | 0x00000004)
+/* SPI Rx2 Register */
+#define QUICKVX_SPI_RX2_REG			(QUICKVX_SPI_BASE | 0x00000008)
+/* SPI Rx3 Register */
+#define QUICKVX_SPI_RX3_REG			(QUICKVX_SPI_BASE | 0x0000000C)
+/* SPI Rx4 Register */
+#define QUICKVX_SPI_RX4_REG			(QUICKVX_SPI_BASE | 0x00000010)
+/* SPI Rx5 Register */
+#define QUICKVX_SPI_RX5_REG			(QUICKVX_SPI_BASE | 0x00000014)
+/* SPI Rx6 Register */
+#define QUICKVX_SPI_RX6_REG			(QUICKVX_SPI_BASE | 0x00000018)
+/* SPI Rx7 Register */
+#define QUICKVX_SPI_RX7_REG			(QUICKVX_SPI_BASE | 0x0000001C)
+/* SPI Tx0 Register */
+#define QUICKVX_SPI_TX0_REG			(QUICKVX_SPI_BASE | 0x00000020)
+/* SPI Tx1 Register */
+#define QUICKVX_SPI_TX1_REG			(QUICKVX_SPI_BASE | 0x00000024)
+/* SPI Tx2 Register */
+#define QUICKVX_SPI_TX2_REG			(QUICKVX_SPI_BASE | 0x00000028)
+/* SPI Tx3 Register */
+#define QUICKVX_SPI_TX3_REG			(QUICKVX_SPI_BASE | 0x0000002C)
+/* SPI Tx4 Register */
+#define QUICKVX_SPI_TX4_REG			(QUICKVX_SPI_BASE | 0x00000030)
+/* SPI Tx5 Register */
+#define QUICKVX_SPI_TX5_REG			(QUICKVX_SPI_BASE | 0x00000034)
+/* SPI Tx6 Register */
+#define QUICKVX_SPI_TX6_REG			(QUICKVX_SPI_BASE | 0x00000038)
+/* SPI Tx7 Register */
+#define QUICKVX_SPI_TX7_REG			(QUICKVX_SPI_BASE | 0x0000003C)
+/* SPI Control Register */
+#define QUICKVX_SPI_CTRL_REG		(QUICKVX_SPI_BASE | 0x00000040)
+/* SPI Transfer Length Register */
+#define QUICKVX_SPI_TLEN_REG		(QUICKVX_SPI_BASE | 0x00000044)
+
+
+/***************************************************
+ * Clock and Reset (CAR) Block Registers
+ ***************************************************/
+/* ASSP Global Clock Enable Register */
+#define QUICKVX_CAR_ASSP_GCE_REG	(QUICKVX_CAR_BASE | 0x00000000)
+/* VLP Control1 Register */
+#define QUICKVX_CAR_VLPCTRL1_REG	(QUICKVX_CAR_BASE | 0x00000004)
+/* VLP Control2 Register */
+#define QUICKVX_CAR_VLPCTRL2_REG	(QUICKVX_CAR_BASE | 0x00000008)
+/* Clock Selection Register */
+#define QUICKVX_CAR_CLKSEL_REG		(QUICKVX_CAR_BASE | 0x0000000C)
+/* PLL Control Register */
+#define QUICKVX_CAR_PLLCTRL_REG		(QUICKVX_CAR_BASE | 0x00000010)
+/* PLL Clock Ratio Register */
+#define QUICKVX_CAR_PLLCLKRATIO_REG	(QUICKVX_CAR_BASE | 0x00000014)
+
+
+/***************************************************
+ * VEE Block Registers
+ ***************************************************/
+/* VEE Control Register */
+#define QUICKVX_VEE_VEECTRL_REG		(QUICKVX_VEE_BASE | 0x00000000)
+/* Strength Register */
+#define QUICKVX_VEE_STRENGTH_REG	(QUICKVX_VEE_BASE | 0x0000000C)
+/* Variance Register */
+#define QUICKVX_VEE_VARIANCE_REG	(QUICKVX_VEE_BASE | 0x00000010)
+/* Slope Register */
+#define QUICKVX_VEE_SLOPE_REG		(QUICKVX_VEE_BASE | 0x00000014)
+/* Sharpen Control0 Register */
+#define QUICKVX_VEE_SHRPCTRL0_REG	(QUICKVX_VEE_BASE | 0x0000001C)
+/* Sharpen Control1 Register */
+#define QUICKVX_VEE_SHRPCTRL1_REG	(QUICKVX_VEE_BASE | 0x00000020)
+/* Upper Horizontal Positon Register */
+#define QUICKVX_VEE_UHPOS_REG		(QUICKVX_VEE_BASE | 0x00000024)
+/* Lower Horizontal Positon Register */
+#define QUICKVX_VEE_LHPOS_REG		(QUICKVX_VEE_BASE | 0x00000028)
+/* Upper Vertical Positon Register */
+#define QUICKVX_VEE_UVPOS_REG		(QUICKVX_VEE_BASE | 0x0000002C)
+/* Lower Vertical Positon Register */
+#define QUICKVX_VEE_LVPOS_REG		(QUICKVX_VEE_BASE | 0x00000030)
+/* Upper Frame Width Register */
+#define QUICKVX_VEE_UFWDTH_REG		(QUICKVX_VEE_BASE | 0x00000034)
+/* Lower Frame Width Register */
+#define QUICKVX_VEE_LFWDTH_REG		(QUICKVX_VEE_BASE | 0x00000038)
+/* Upper Frame Height Register */
+#define QUICKVX_VEE_UFHGHT_REG		(QUICKVX_VEE_BASE | 0x0000003C)
+/* Lower Frame Height Register */
+#define QUICKVX_VEE_LFHGHT_REG		(QUICKVX_VEE_BASE | 0x00000040)
+/* Control0 Register */
+#define QUICKVX_VEE_CTRL0_REG		(QUICKVX_VEE_BASE | 0x00000044)
+/* Control1 Register */
+#define QUICKVX_VEE_CTRL1_REG		(QUICKVX_VEE_BASE | 0x00000048)
+/* Video Enhancement Enable Register */
+#define QUICKVX_VEE_VDOEEN_REG		(QUICKVX_VEE_BASE | 0x0000004C)
+/* Black Level Register */
+#define QUICKVX_VEE_BLCKLEV_REG		(QUICKVX_VEE_BASE | 0x00000050)
+/* White Level Register */
+#define QUICKVX_VEE_WHTLEV_REG		(QUICKVX_VEE_BASE | 0x00000054)
+/* Amplification Limits Register */
+#define QUICKVX_VEE_AMPLMTS_REG		(QUICKVX_VEE_BASE | 0x00000060)
+/* Dithering Mode Register */
+#define QUICKVX_VEE_DITHMOD_REG		(QUICKVX_VEE_BASE | 0x00000064)
+/* Upper Look-up Data Register */
+#define QUICKVX_VEE_ULUD_REG		(QUICKVX_VEE_BASE | 0x00000080)
+/* Lower Look-up Data Register */
+#define QUICKVX_VEE_LLUD_REG		(QUICKVX_VEE_BASE | 0x00000084)
+/* Look-up Address Register */
+#define QUICKVX_VEE_LUADDR_REG		(QUICKVX_VEE_BASE | 0x00000088)
+/* Look-up Write Enable Register */
+#define QUICKVX_VEE_LUWREN_REG		(QUICKVX_VEE_BASE | 0x0000008C)
+/* VEE ID Register */
+#define QUICKVX_VEE_VEEID_REG		(QUICKVX_VEE_BASE | 0x000003FC)
+/* M_11 Register */
+#define QUICKVX_VEE_M_11_REG		(QUICKVX_VEE_BASE | 0x000000C0)
+/* M_12 Register */
+#define QUICKVX_VEE_M_12_REG		(QUICKVX_VEE_BASE | 0x000000C4)
+/* M_13 Register */
+#define QUICKVX_VEE_M_13_REG		(QUICKVX_VEE_BASE | 0x000000C8)
+/* M_21 Register */
+#define QUICKVX_VEE_M_21_REG		(QUICKVX_VEE_BASE | 0x000000CC)
+/* M_22 Register */
+#define QUICKVX_VEE_M_22_REG		(QUICKVX_VEE_BASE | 0x000000D0)
+/* M_23 Register */
+#define QUICKVX_VEE_M_23_REG		(QUICKVX_VEE_BASE | 0x000000D4)
+/* M_31 Register */
+#define QUICKVX_VEE_M_31_REG		(QUICKVX_VEE_BASE | 0x000000D8)
+/* M_32 Register */
+#define QUICKVX_VEE_M_32_REG		(QUICKVX_VEE_BASE | 0x000000DC)
+/* M_33 Register */
+#define QUICKVX_VEE_M_33_REG		(QUICKVX_VEE_BASE | 0x000000E0)
+/* R Offset Register */
+#define QUICKVX_VEE_OFFSET_R_REG	(QUICKVX_VEE_BASE | 0x000000E8)
+/* G Offset Register */
+#define QUICKVX_VEE_OFFSET_G_REG	(QUICKVX_VEE_BASE | 0x000000EC)
+/* B Offset Register */
+#define QUICKVX_VEE_OFFSET_B_REG	(QUICKVX_VEE_BASE | 0x000000F0)
+
+/* LCD Reset Register */
+#define QUICKVX_FB_A2F_LCD_RESET_REG (QUICKVX_FB_A2F_BASE | 0x00000000)
+
+/* Register bit defines */
+/* PLL Lock bit in the PLL Control Register */
+#define QUICKVX_PLL_LOCK_BIT		(1 << 7)
+
+#define QL_SPI_CTRL_rSPISTart(x) (x)
+#define QL_SPI_CTRL_rCPHA(x) (x << 1)
+#define QL_SPI_CTRL_rCPOL(x) (x << 2)
+#define QL_SPI_CTRL_rLSB(x) (x << 3)
+#define QL_SPI_CTRL_rSLVSEL(x) (x << 4)
+#define QL_SPI_CTRL_MASK_rTxDone (1 << 9)
+
+#define QL_SPI_LCD_DEV_ID 0x1c
+#define QL_SPI_LCD_RS(x) (x << 1)
+#define QL_SPI_LCD_RW(x) (x)
+#define QL_SPI_LCD_INDEX_START_BYTE ((QL_SPI_LCD_DEV_ID << 2) | \
+	QL_SPI_LCD_RS(0) | QL_SPI_LCD_RW(0))
+#define QL_SPI_LCD_CMD_START_BYTE ((QL_SPI_LCD_DEV_ID << 2) | \
+	QL_SPI_LCD_RS(1) | QL_SPI_LCD_RW(0))
+#define QL_SPI_CTRL_LCD_START (QL_SPI_CTRL_rSPISTart(1) | \
+	QL_SPI_CTRL_rCPHA(1) | QL_SPI_CTRL_rCPOL(1) | \
+	QL_SPI_CTRL_rLSB(0) | QL_SPI_CTRL_rSLVSEL(0))
+
+int ql_mddi_write(uint32 address, uint32 value)
+{
+	uint32 regval = 0;
+	int ret = 0;
+
+	ret = mddi_queue_register_write(address, value, TRUE, 0);
+
+	if (!ret) {
+		ret = mddi_queue_register_read(address, &regval, TRUE, 0);
+		if (regval != value) {
+			MDDI_MSG_DEBUG("\nMismatch: ql_mddi_write[0x%x]->0x%x "
+				"r0x%x\n", address, value, regval);
+		} else {
+			MDDI_MSG_DEBUG("\nMatch: ql_mddi_write[0x%x]->0x%x "
+				"r0x%x\n", address, value, regval);
+		}
+	}
+
+	return ret;
+}
+
+int ql_mddi_read(uint32 address, uint32 *regval)
+{
+	int ret = 0;
+
+	ret = mddi_queue_register_read(address, regval, TRUE, 0);
+	MDDI_MSG_DEBUG("\nql_mddi_read[0x%x]=0x%x", address, *regval);
+
+	return ret;
+}
+
+int ql_send_spi_cmd_to_lcd(uint32 index, uint32 cmd)
+{
+	int retry, ret;
+	uint32 readval;
+
+	MDDI_MSG_DEBUG("\n %s(): index 0x%x, cmd 0x%x", __func__, index, cmd);
+	/* do the index phase */
+	/* send 24 bits in the index phase */
+	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23);
+
+	/* send 24 bits in the index phase, starting at bit 23 of TX0 reg */
+	ql_mddi_write(QUICKVX_SPI_TX0_REG,
+		(QL_SPI_LCD_INDEX_START_BYTE << 16) | index);
+
+	/* set start */
+	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
+	retry = 0;
+
+	do {
+		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+
+		if (ret || ++retry > 5) {
+			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
+				"timeout at index phase, ret = %d", ret);
+			return -EIO;
+		}
+		mddi_wait(1);
+	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
+
+	/* do the command phase */
+	/* send 24 bits in the cmd phase */
+	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23);
+
+	/* send 24 bits in the cmd phase, starting at bit 23 of TX0 reg. */
+	ql_mddi_write(QUICKVX_SPI_TX0_REG,
+		(QL_SPI_LCD_CMD_START_BYTE << 16) | cmd);
+
+	/* set start */
+	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
+	retry = 0;
+
+	do {
+		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+
+		if (ret || ++retry > 5) {
+			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
+				"timeout at cmd phase, ret = %d", ret);
+			return -EIO;
+		}
+		mddi_wait(1);
+	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
+
+	return 0;
+}
+
+
+int ql_send_spi_data_from_lcd(uint32 index, uint32 *value)
+{
+	int retry, ret;
+	uint32 readval;
+
+	MDDI_MSG_DEBUG("\n %s(): index 0x%x", __func__, index);
+	/* do the index phase */
+	/* send 24 bits in the index phase */
+	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 23);
+
+	/* send 24 bits in the index phase, starting at bit 23 of TX0 reg */
+	ql_mddi_write(QUICKVX_SPI_TX0_REG,
+		(QL_SPI_LCD_INDEX_START_BYTE << 16) | index);
+
+	/* set start */
+	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
+	retry = 0;
+
+	do {
+		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+
+		if (ret || ++retry > 5) {
+			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
+				"timeout at index phase, ret = %d", ret);
+			return -EIO;
+		}
+		mddi_wait(1);
+	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
+
+	/* do the command phase */
+	/* send 8 bits  and read 24 bits in the cmd phase, so total 32 bits */
+	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 31);
+
+	/* send 24 bits in the cmd phase, starting at bit 31 of TX0 reg */
+	ql_mddi_write(QUICKVX_SPI_TX0_REG,
+		((QL_SPI_LCD_CMD_START_BYTE << 16)) << 8);
+
+	/* set start */
+	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
+	retry = 0;
+
+	do {
+		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+
+		if (ret || ++retry > 5) {
+			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
+				"timeout at cmd phase, ret = %d", ret);
+			return -EIO;
+		}
+		mddi_wait(1);
+	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
+
+	/* value will appear at lower 16 bits */
+	ret = ql_mddi_read(QUICKVX_SPI_RX0_REG, value);
+
+	if (!ret) {
+		*value = *value & 0xffff;
+		MDDI_MSG_DEBUG("\n QUICKVX_SPI_RX0_REG value = 0x%x", *value);
+	} else
+		MDDI_MSG_DEBUG("\n Read QUICKVX_SPI_RX0_REG Failed");
+
+	return ret;
+}
+
+/* Global Variables */
+static uint32 mddi_quickvx_rows_per_second;
+static uint32 mddi_quickvx_usecs_per_refresh;
+static uint32 mddi_quickvx_rows_per_refresh;
+
+void mddi_quickvx_configure_registers(void)
+{
+	MDDI_MSG_DEBUG("\n%s(): ", __func__);
+	ql_mddi_write(QUICKVX_CAR_CLKSEL_REG, 0x00007000);
+
+	ql_mddi_write(QUICKVX_RCB_PWMW_REG, 0x0000FFFF);
+
+	ql_mddi_write(QUICKVX_RCB_PWMC_REG, 0x00000001);
+
+	ql_mddi_write(QUICKVX_RCB_CONFDONE_REG, 0x00000000);
+
+	/* display is x width = 480, y width = 864 */
+	ql_mddi_write(QUICKVX_RCB_TCON0_REG, 0x035f01df);
+
+	/* VFP=2, VBP=4, HFP=16, HBP=16 */
+	ql_mddi_write(QUICKVX_RCB_TCON1_REG, 0x01e301e1);
+
+	/* VSW =2, HSW=8 */
+	ql_mddi_write(QUICKVX_RCB_TCON2_REG, 0x000000e1);
+
+	ql_mddi_write(QUICKVX_RCB_DISPATTR_REG, 0x00000000);
+
+	ql_mddi_write(QUICKVX_RCB_USECASE_REG, 0x00000025);
+
+	ql_mddi_write(QUICKVX_RCB_VPARM_REG, 0x00000888);
+
+	ql_mddi_write(QUICKVX_RCB_VEECONF_REG, 0x00000001);
+
+	ql_mddi_write(QUICKVX_RCB_IER_REG, 0x00000000);
+
+	ql_mddi_write(QUICKVX_RCB_RCR_REG, 0x80000010);
+
+	ql_mddi_write(QUICKVX_RCB_CELLBCR_REG, 0x8008746F);
+
+	ql_mddi_write(QUICKVX_RCB_CELLCC_REG, 0x800000A3);
+
+	ql_mddi_write(QUICKVX_RCB_CONFDONE_REG, 0x00000001);
+}
+
+void mddi_quickvx_prim_lcd_init(void)
+{
+	uint32 value;
+
+	MDDI_MSG_DEBUG("\n%s(): ", __func__);
+	ql_send_spi_data_from_lcd(0, &value);
+
+	ql_send_spi_cmd_to_lcd(0x0100, 0x3000); /* power control1 */
+	ql_send_spi_cmd_to_lcd(0x0101, 0x4010); /* power control2 */
+	ql_send_spi_cmd_to_lcd(0x0106, 0x0000); /* auto seq setting */
+	mddi_wait(3);
+
+	ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000001);
+	mddi_wait(1);
+	ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000000);
+	mddi_wait(1);
+	ql_mddi_write(QUICKVX_FB_A2F_LCD_RESET_REG, 0x00000001);
+	mddi_wait(10);
+
+	ql_send_spi_cmd_to_lcd(0x0001, 0x0310); /* driver out control */
+	ql_send_spi_cmd_to_lcd(0x0002, 0x0100); /* lcd ac control */
+	ql_send_spi_cmd_to_lcd(0x0003, 0x0000); /* entry mode */
+	ql_send_spi_cmd_to_lcd(0x0007, 0x0000); /* disp cont1 */
+	ql_send_spi_cmd_to_lcd(0x0008, 0x0004); /* disp cont2 */
+	ql_send_spi_cmd_to_lcd(0x0009, 0x000C); /* disp cont3 */
+	ql_send_spi_cmd_to_lcd(0x000C, 0x4010); /* disp if cont1 */
+	ql_send_spi_cmd_to_lcd(0x000E, 0x0000); /* disp if cont2 */
+	ql_send_spi_cmd_to_lcd(0x0020, 0x013F); /* panel if cont1 */
+	ql_send_spi_cmd_to_lcd(0x0022, 0x7600); /* panel if cont3 */
+	ql_send_spi_cmd_to_lcd(0x0023, 0x1C0A); /* panel if cont4 */
+	ql_send_spi_cmd_to_lcd(0x0024, 0x1C2C); /* panel if cont5 */
+	ql_send_spi_cmd_to_lcd(0x0025, 0x1C4E); /* panel if cont6 */
+	ql_send_spi_cmd_to_lcd(0x0027, 0x0000); /* panel if cont8 */
+	ql_send_spi_cmd_to_lcd(0x0028, 0x760C); /* panel if cont9 */
+	ql_send_spi_cmd_to_lcd(0x0300, 0x0000); /* gamma adj0 */
+	ql_send_spi_cmd_to_lcd(0x0301, 0x0502); /* gamma adj1 */
+	ql_send_spi_cmd_to_lcd(0x0302, 0x0705); /* gamma adj2 */
+	ql_send_spi_cmd_to_lcd(0x0303, 0x0000); /* gamma adj3 */
+	ql_send_spi_cmd_to_lcd(0x0304, 0x0200); /* gamma adj4 */
+	ql_send_spi_cmd_to_lcd(0x0305, 0x0707); /* gamma adj5 */
+	ql_send_spi_cmd_to_lcd(0x0306, 0x1010); /* gamma adj6 */
+	ql_send_spi_cmd_to_lcd(0x0307, 0x0202); /* gamma adj7 */
+	ql_send_spi_cmd_to_lcd(0x0308, 0x0704); /* gamma adj8 */
+	ql_send_spi_cmd_to_lcd(0x0309, 0x0707); /* gamma adj9 */
+	ql_send_spi_cmd_to_lcd(0x030A, 0x0000); /* gamma adja */
+	ql_send_spi_cmd_to_lcd(0x030B, 0x0000); /* gamma adjb */
+	ql_send_spi_cmd_to_lcd(0x030C, 0x0707); /* gamma adjc */
+	ql_send_spi_cmd_to_lcd(0x030D, 0x1010); /* gamma adjd */
+	ql_send_spi_cmd_to_lcd(0x0310, 0x0104); /* gamma adj10 */
+	ql_send_spi_cmd_to_lcd(0x0311, 0x0503); /* gamma adj11 */
+	ql_send_spi_cmd_to_lcd(0x0312, 0x0304); /* gamma adj12 */
+	ql_send_spi_cmd_to_lcd(0x0315, 0x0304); /* gamma adj15 */
+	ql_send_spi_cmd_to_lcd(0x0316, 0x031C); /* gamma adj16 */
+	ql_send_spi_cmd_to_lcd(0x0317, 0x0204); /* gamma adj17 */
+	ql_send_spi_cmd_to_lcd(0x0318, 0x0402); /* gamma adj18 */
+	ql_send_spi_cmd_to_lcd(0x0319, 0x0305); /* gamma adj19 */
+	ql_send_spi_cmd_to_lcd(0x031C, 0x0707); /* gamma adj1c */
+	ql_send_spi_cmd_to_lcd(0x031D, 0x021F); /* gamma adj1d */
+	ql_send_spi_cmd_to_lcd(0x0320, 0x0507); /* gamma adj20 */
+	ql_send_spi_cmd_to_lcd(0x0321, 0x0604); /* gamma adj21 */
+	ql_send_spi_cmd_to_lcd(0x0322, 0x0405); /* gamma adj22 */
+	ql_send_spi_cmd_to_lcd(0x0327, 0x0203); /* gamma adj27 */
+	ql_send_spi_cmd_to_lcd(0x0328, 0x0300); /* gamma adj28 */
+	ql_send_spi_cmd_to_lcd(0x0329, 0x0002); /* gamma adj29 */
+	ql_send_spi_cmd_to_lcd(0x0100, 0x363C); /* power cont1 */
+	mddi_wait(1);
+	ql_send_spi_cmd_to_lcd(0x0101, 0x4003); /* power cont2 */
+	ql_send_spi_cmd_to_lcd(0x0102, 0x0001); /* power cont3 */
+	ql_send_spi_cmd_to_lcd(0x0103, 0x3C58); /* power cont4 */
+	ql_send_spi_cmd_to_lcd(0x010C, 0x0135); /* power cont6 */
+	ql_send_spi_cmd_to_lcd(0x0106, 0x0002); /* auto seq */
+	ql_send_spi_cmd_to_lcd(0x0029, 0x03BF); /* panel if cont10 */
+	ql_send_spi_cmd_to_lcd(0x0106, 0x0003); /* auto seq */
+	mddi_wait(5);
+	ql_send_spi_cmd_to_lcd(0x0101, 0x4010); /* power cont2 */
+	mddi_wait(10);
+}
+
+/* Function to Power On the Primary and Secondary LCD panels */
+static int mddi_quickvx_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	MDDI_MSG_DEBUG("\n%s(): ", __func__);
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd) {
+		MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_on: Device not found!");
+		return -ENODEV;
+	}
+
+	if (mfd->key != MFD_KEY) {
+		MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_on: Invalid MFD key!");
+		return -EINVAL;
+	}
+
+	mddi_host_client_cnt_reset();
+	mddi_quickvx_configure_registers();
+	mddi_quickvx_prim_lcd_init();
+
+	return 0;
+}
+
+
+/* Function to Power Off the Primary and Secondary LCD panels */
+static int mddi_quickvx_lcd_off(struct platform_device *pdev)
+{
+	MDDI_MSG_DEBUG("\n%s(): ", __func__);
+	mddi_wait(1);
+	ql_send_spi_cmd_to_lcd(0x0106, 0x0002); /* Auto Sequencer setting */
+	mddi_wait(10);
+	ql_send_spi_cmd_to_lcd(0x0106, 0x0000); /* Auto Sequencer setting */
+	ql_send_spi_cmd_to_lcd(0x0029, 0x0002); /* Panel IF control 10 */
+	ql_send_spi_cmd_to_lcd(0x0100, 0x300D); /* Power Control 1 */
+	mddi_wait(1);
+
+	return 0;
+}
+
+/* Function to set the Backlight brightness level */
+static void mddi_quickvx_lcd_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int32 level, i = 0, ret;
+
+	MDDI_MSG_DEBUG("%s(): ", __func__);
+
+	level = mfd->bl_level;
+	MDDI_MSG_DEBUG("\n level = %d", level);
+	if (level < 0) {
+		MDDI_MSG_DEBUG("mddi_quickvx_lcd_set_backlight: "
+			"Invalid backlight level (%d)!\n", level);
+		return;
+	}
+	while (i++ < 3) {
+		ret = pmic_set_led_intensity(LED_LCD, level);
+		if (ret == 0)
+			return;
+		msleep(10);
+	}
+
+	MDDI_MSG_DEBUG("%s: can't set lcd backlight!\n",
+				__func__);
+}
+
+/* Driver Probe function */
+static int __devinit mddi_quickvx_lcd_probe(struct platform_device *pdev)
+{
+	MDDI_MSG_DEBUG("\n%s(): id is %d", __func__, pdev->id);
+	msm_fb_add_device(pdev);
+	return 0;
+}
+
+/* Driver data structure */
+static struct platform_driver this_driver = {
+	.probe  = mddi_quickvx_lcd_probe,
+	.driver	= {
+		.name	= "mddi_quickvx",
+	},
+};
+
+
+/* Primary LCD panel data structure */
+static struct msm_fb_panel_data mddi_quickvx_panel_data0 = {
+	.on					= mddi_quickvx_lcd_on,
+	.off				= mddi_quickvx_lcd_off,
+	.set_backlight		= mddi_quickvx_lcd_set_backlight,
+};
+
+
+/* Primary LCD panel device structure */
+static struct platform_device this_device0 = {
+	.name   = "mddi_quickvx",
+	.id		= MDDI_QUICKVX_1_2,
+	.dev	= {
+		.platform_data = &mddi_quickvx_panel_data0,
+	}
+};
+
+/* Module init - driver main entry point */
+static int __init mddi_quickvx_lcd_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 cid;
+	MDDI_MSG_DEBUG("\n%s(): ", __func__);
+
+	ret = msm_fb_detect_client("mddi_quickvx");
+
+	if (ret == -ENODEV)	{
+		/* Device not found */
+		MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: No device found!");
+		return 0;
+	}
+
+	if (ret) {
+		cid = mddi_get_client_id();
+
+		MDDI_MSG_DEBUG("\n cid = 0x%x", cid);
+		if (((cid >> 16) != QUICKVX_MDDI_MFR_CODE) ||
+			((cid & 0xFFFF) != QUICKVX_MDDI_PRD_CODE)) {
+			/* MDDI Client ID not matching */
+			MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: "
+				"Client ID missmatch!");
+
+			return 0;
+		}
+		MDDI_MSG_DEBUG("\n mddi_quickvx_lcd_init: "
+			"QuickVX LCD panel detected!");
+	}
+
+#endif /* CONFIG_FB_MSM_MDDI_AUTO_DETECT */
+
+	mddi_quickvx_rows_per_refresh = 872;
+	mddi_quickvx_rows_per_second = 52364;
+	mddi_quickvx_usecs_per_refresh = 16574;
+
+	ret = platform_driver_register(&this_driver);
+
+	if (!ret) {
+		pinfo = &mddi_quickvx_panel_data0.panel_info;
+		pinfo->xres = 480;
+		pinfo->yres = 864;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = MDDI_PANEL;
+		pinfo->pdest = DISPLAY_1;
+		pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+		pinfo->wait_cycle = 0;
+		pinfo->bpp = 24;
+		pinfo->fb_num = 2;
+
+		pinfo->clk_rate = 192000000;
+		pinfo->clk_min = 192000000;
+		pinfo->clk_max = 200000000;
+		pinfo->lcd.rev = 1;
+		pinfo->lcd.vsync_enable = TRUE;
+		pinfo->lcd.refx100 = (mddi_quickvx_rows_per_second \
+			* 100)/mddi_quickvx_rows_per_refresh;
+		pinfo->lcd.v_back_porch = 4;
+		pinfo->lcd.v_front_porch = 2;
+		pinfo->lcd.v_pulse_width = 2;
+		pinfo->lcd.hw_vsync_mode = TRUE;
+		pinfo->lcd.vsync_notifier_period = (1 * HZ);
+		pinfo->bl_max = 10;
+		pinfo->bl_min = 0;
+
+		ret = platform_device_register(&this_device0);
+		if (ret) {
+			platform_driver_unregister(&this_driver);
+			MDDI_MSG_DEBUG("mddi_quickvx_lcd_init: "
+				"Primary device registration failed!\n");
+		}
+	}
+
+	return ret;
+}
+
+module_init(mddi_quickvx_lcd_init);
+
diff --git a/drivers/video/msm/mddi_sharp.c b/drivers/video/msm/mddi_sharp.c
new file mode 100644
index 0000000..b2a7188
--- /dev/null
+++ b/drivers/video/msm/mddi_sharp.c
@@ -0,0 +1,900 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+#define SHARP_QVGA_PRIM 1
+#define SHARP_128X128_SECD 2
+
+extern uint32 mddi_host_core_version;
+static boolean mddi_debug_prim_wait = FALSE;
+static boolean mddi_sharp_vsync_wake = TRUE;
+static boolean mddi_sharp_monitor_refresh_value = TRUE;
+static boolean mddi_sharp_report_refresh_measurements = FALSE;
+static uint32 mddi_sharp_rows_per_second = 13830;	/* 5200000/376 */
+static uint32 mddi_sharp_rows_per_refresh = 338;
+static uint32 mddi_sharp_usecs_per_refresh = 24440;	/* (376+338)/5200000 */
+static boolean mddi_sharp_debug_60hz_refresh = FALSE;
+
+extern mddi_gpio_info_type mddi_gpio;
+extern boolean mddi_vsync_detect_enabled;
+static msm_fb_vsync_handler_type mddi_sharp_vsync_handler;
+static void *mddi_sharp_vsync_handler_arg;
+static uint16 mddi_sharp_vsync_attempts;
+
+static void mddi_sharp_prim_lcd_init(void);
+static void mddi_sharp_sub_lcd_init(void);
+static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd);
+static void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler,
+					 void *);
+static void mddi_sharp_lcd_vsync_detected(boolean detected);
+static struct msm_panel_common_pdata *mddi_sharp_pdata;
+
+#define REG_SYSCTL    0x0000
+#define REG_INTR    0x0006
+#define REG_CLKCNF    0x000C
+#define REG_CLKDIV1    0x000E
+#define REG_CLKDIV2    0x0010
+
+#define REG_GIOD    0x0040
+#define REG_GIOA    0x0042
+
+#define REG_AGM      0x010A
+#define REG_FLFT    0x0110
+#define REG_FRGT    0x0112
+#define REG_FTOP    0x0114
+#define REG_FBTM    0x0116
+#define REG_FSTRX    0x0118
+#define REG_FSTRY    0x011A
+#define REG_VRAM    0x0202
+#define REG_SSDCTL    0x0330
+#define REG_SSD0    0x0332
+#define REG_PSTCTL1    0x0400
+#define REG_PSTCTL2    0x0402
+#define REG_PTGCTL    0x042A
+#define REG_PTHP    0x042C
+#define REG_PTHB    0x042E
+#define REG_PTHW    0x0430
+#define REG_PTHF    0x0432
+#define REG_PTVP    0x0434
+#define REG_PTVB    0x0436
+#define REG_PTVW    0x0438
+#define REG_PTVF    0x043A
+#define REG_VBLKS    0x0458
+#define REG_VBLKE    0x045A
+#define REG_SUBCTL    0x0700
+#define REG_SUBTCMD    0x0702
+#define REG_SUBTCMDD  0x0704
+#define REG_REVBYTE    0x0A02
+#define REG_REVCNT    0x0A04
+#define REG_REVATTR    0x0A06
+#define REG_REVFMT    0x0A08
+
+#define SHARP_SUB_UNKNOWN 0xffffffff
+#define SHARP_SUB_HYNIX 1
+#define SHARP_SUB_ROHM  2
+
+static uint32 sharp_subpanel_type = SHARP_SUB_UNKNOWN;
+
+static void sub_through_write(int sub_rs, uint32 sub_data)
+{
+	mddi_queue_register_write(REG_SUBTCMDD, sub_data, FALSE, 0);
+
+	/* CS=1,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0);
+
+	/* CS=0,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
+
+	/* CS=0,RD=1,WE=0,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0004 | sub_rs, FALSE, 0);
+
+	/* CS=0,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
+
+	/* CS=1,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0);
+}
+
+static uint32 sub_through_read(int sub_rs)
+{
+	uint32 sub_data;
+
+	/* CS=1,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0);
+
+	/* CS=0,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
+
+	/* CS=0,RD=1,WE=0,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0002 | sub_rs, TRUE, 0);
+
+	mddi_queue_register_read(REG_SUBTCMDD, &sub_data, TRUE, 0);
+
+	/* CS=0,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
+
+	/* CS=1,RD=1,WE=1,RS=sub_rs */
+	mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0);
+
+	return sub_data;
+}
+
+static void serigo(uint32 ssd)
+{
+	uint32 ssdctl;
+
+	mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0);
+	ssdctl = ((ssdctl & 0xE7) | 0x02);
+
+	mddi_queue_register_write(REG_SSD0, ssd, FALSE, 0);
+	mddi_queue_register_write(REG_SSDCTL, ssdctl, TRUE, 0);
+
+	do {
+		mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0);
+	} while ((ssdctl & 0x0002) != 0);
+
+	if (mddi_debug_prim_wait)
+		mddi_wait(2);
+}
+
+static void mddi_sharp_lcd_powerdown(void)
+{
+	serigo(0x0131);
+	serigo(0x0300);
+	mddi_wait(40);
+	serigo(0x0135);
+	mddi_wait(20);
+	serigo(0x2122);
+	mddi_wait(20);
+	serigo(0x0201);
+	mddi_wait(20);
+	serigo(0x2100);
+	mddi_wait(20);
+	serigo(0x2000);
+	mddi_wait(20);
+
+	mddi_queue_register_write(REG_PSTCTL1, 0x1, TRUE, 0);
+	mddi_wait(100);
+	mddi_queue_register_write(REG_PSTCTL1, 0x0, TRUE, 0);
+	mddi_wait(2);
+	mddi_queue_register_write(REG_SYSCTL, 0x1, TRUE, 0);
+	mddi_wait(2);
+	mddi_queue_register_write(REG_CLKDIV1, 0x3, TRUE, 0);
+	mddi_wait(2);
+	mddi_queue_register_write(REG_SSDCTL, 0x0000, TRUE, 0);	/* SSDRESET */
+	mddi_queue_register_write(REG_SYSCTL, 0x0, TRUE, 0);
+	mddi_wait(2);
+}
+
+static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd)
+{
+	uint32 regdata;
+	int32 level;
+	int max = mfd->panel_info.bl_max;
+	int min = mfd->panel_info.bl_min;
+
+	if (mddi_sharp_pdata && mddi_sharp_pdata->backlight_level) {
+		level = mddi_sharp_pdata->backlight_level(mfd->bl_level,
+							  max,
+							  min);
+
+		if (level < 0)
+			return;
+
+		/* use Rodem GPIO(2:0) to give 8 levels of backlight (7-0) */
+		/* Set lower 3 GPIOs as Outputs (set to 0) */
+		mddi_queue_register_read(REG_GIOA, &regdata, TRUE, 0);
+		mddi_queue_register_write(REG_GIOA, regdata & 0xfff8, TRUE, 0);
+
+		/* Set lower 3 GPIOs as level */
+		mddi_queue_register_read(REG_GIOD, &regdata, TRUE, 0);
+		mddi_queue_register_write(REG_GIOD,
+			  (regdata & 0xfff8) | (0x07 & level), TRUE, 0);
+	}
+}
+
+static void mddi_sharp_prim_lcd_init(void)
+{
+	mddi_queue_register_write(REG_SYSCTL, 0x4000, TRUE, 0);
+	mddi_wait(1);
+	mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0);
+	mddi_wait(5);
+	mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0);
+	mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0);
+
+	/* new reg write below */
+	if (mddi_sharp_debug_60hz_refresh)
+		mddi_queue_register_write(REG_CLKCNF, 0x070d, FALSE, 0);
+	else
+		mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0);
+
+	mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0);
+	mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0);
+	mddi_queue_register_write(REG_PTHP, 4, FALSE, 0);
+	mddi_queue_register_write(REG_PTHB, 40, FALSE, 0);
+	mddi_queue_register_write(REG_PTHW, 240, FALSE, 0);
+	if (mddi_sharp_debug_60hz_refresh)
+		mddi_queue_register_write(REG_PTHF, 12, FALSE, 0);
+	else
+		mddi_queue_register_write(REG_PTHF, 92, FALSE, 0);
+
+	mddi_wait(1);
+
+	mddi_queue_register_write(REG_PTVP, 1, FALSE, 0);
+	mddi_queue_register_write(REG_PTVB, 2, FALSE, 0);
+	mddi_queue_register_write(REG_PTVW, 320, FALSE, 0);
+	mddi_queue_register_write(REG_PTVF, 15, FALSE, 0);
+
+	mddi_wait(1);
+
+	/* vram_color set REG_AGM???? */
+	mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0);
+
+	mddi_queue_register_write(REG_SSDCTL, 0x0000, FALSE, 0);
+	mddi_queue_register_write(REG_SSDCTL, 0x0001, TRUE, 0);
+	mddi_wait(1);
+	mddi_queue_register_write(REG_PSTCTL1, 0x0001, TRUE, 0);
+	mddi_wait(10);
+
+	serigo(0x0701);
+	/* software reset */
+	mddi_wait(1);
+	/* Wait over 50us */
+
+	serigo(0x0400);
+	/* DCLK~ACHSYNC~ACVSYNC polarity setting */
+	serigo(0x2900);
+	/* EEPROM start read address setting */
+	serigo(0x2606);
+	/* EEPROM start read register setting */
+	mddi_wait(20);
+	/* Wait over 20ms */
+
+	serigo(0x0503);
+	/* Horizontal timing setting */
+	serigo(0x062C);
+	/* Veritical timing setting */
+	serigo(0x2001);
+	/* power initialize setting(VDC2) */
+	mddi_wait(20);
+	/* Wait over 20ms */
+
+	serigo(0x2120);
+	/* Initialize power setting(CPS) */
+	mddi_wait(20);
+	/* Wait over 20ms */
+
+	serigo(0x2130);
+	/* Initialize power setting(CPS) */
+	mddi_wait(20);
+	/* Wait over 20ms */
+
+	serigo(0x2132);
+	/* Initialize power setting(CPS) */
+	mddi_wait(10);
+	/* Wait over 10ms */
+
+	serigo(0x2133);
+	/* Initialize power setting(CPS) */
+	mddi_wait(20);
+	/* Wait over 20ms */
+
+	serigo(0x0200);
+	/* Panel initialize release(INIT) */
+	mddi_wait(1);
+	/* Wait over 1ms */
+
+	serigo(0x0131);
+	/* Panel setting(CPS) */
+	mddi_wait(1);
+	/* Wait over 1ms */
+
+	mddi_queue_register_write(REG_PSTCTL1, 0x0003, TRUE, 0);
+
+	/* if (FFA LCD is upside down) -> serigo(0x0100); */
+	serigo(0x0130);
+
+	/* Black mask release(display ON) */
+	mddi_wait(1);
+	/* Wait over 1ms */
+
+	if (mddi_sharp_vsync_wake) {
+		mddi_queue_register_write(REG_VBLKS, 0x1001, TRUE, 0);
+		mddi_queue_register_write(REG_VBLKE, 0x1002, TRUE, 0);
+	}
+
+	/* Set the MDP pixel data attributes for Primary Display */
+	mddi_host_write_pix_attr_reg(0x00C3);
+	return;
+
+}
+
+void mddi_sharp_sub_lcd_init(void)
+{
+
+	mddi_queue_register_write(REG_SYSCTL, 0x4000, FALSE, 0);
+	mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0);
+	mddi_wait(100);
+
+	mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0);
+	mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0);
+	mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0);
+	mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0);
+	mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0);
+	mddi_queue_register_write(REG_PTHP, 4, FALSE, 0);
+	mddi_queue_register_write(REG_PTHB, 40, FALSE, 0);
+	mddi_queue_register_write(REG_PTHW, 128, FALSE, 0);
+	mddi_queue_register_write(REG_PTHF, 92, FALSE, 0);
+	mddi_queue_register_write(REG_PTVP, 1, FALSE, 0);
+	mddi_queue_register_write(REG_PTVB, 2, FALSE, 0);
+	mddi_queue_register_write(REG_PTVW, 128, FALSE, 0);
+	mddi_queue_register_write(REG_PTVF, 15, FALSE, 0);
+
+	/* Now the sub display..... */
+	/* Reset High */
+	mddi_queue_register_write(REG_SUBCTL, 0x0200, FALSE, 0);
+	/* CS=1,RD=1,WE=1,RS=1 */
+	mddi_queue_register_write(REG_SUBTCMD, 0x000f, TRUE, 0);
+	mddi_wait(1);
+	/* Wait 5us */
+
+	if (sharp_subpanel_type == SHARP_SUB_UNKNOWN) {
+		uint32 data;
+
+		sub_through_write(1, 0x05);
+		sub_through_write(1, 0x6A);
+		sub_through_write(1, 0x1D);
+		sub_through_write(1, 0x05);
+		data = sub_through_read(1);
+		if (data == 0x6A) {
+			sharp_subpanel_type = SHARP_SUB_HYNIX;
+		} else {
+			sub_through_write(0, 0x36);
+			sub_through_write(1, 0xA8);
+			sub_through_write(0, 0x09);
+			data = sub_through_read(1);
+			data = sub_through_read(1);
+			if (data == 0x54) {
+				sub_through_write(0, 0x36);
+				sub_through_write(1, 0x00);
+				sharp_subpanel_type = SHARP_SUB_ROHM;
+			}
+		}
+	}
+
+	if (sharp_subpanel_type == SHARP_SUB_HYNIX) {
+		sub_through_write(1, 0x00);	/* Display setting 1 */
+		sub_through_write(1, 0x04);
+		sub_through_write(1, 0x01);
+		sub_through_write(1, 0x05);
+		sub_through_write(1, 0x0280);
+		sub_through_write(1, 0x0301);
+		sub_through_write(1, 0x0402);
+		sub_through_write(1, 0x0500);
+		sub_through_write(1, 0x0681);
+		sub_through_write(1, 0x077F);
+		sub_through_write(1, 0x08C0);
+		sub_through_write(1, 0x0905);
+		sub_through_write(1, 0x0A02);
+		sub_through_write(1, 0x0B00);
+		sub_through_write(1, 0x0C00);
+		sub_through_write(1, 0x0D00);
+		sub_through_write(1, 0x0E00);
+		sub_through_write(1, 0x0F00);
+
+		sub_through_write(1, 0x100B);	/* Display setting 2 */
+		sub_through_write(1, 0x1103);
+		sub_through_write(1, 0x1237);
+		sub_through_write(1, 0x1300);
+		sub_through_write(1, 0x1400);
+		sub_through_write(1, 0x1500);
+		sub_through_write(1, 0x1605);
+		sub_through_write(1, 0x1700);
+		sub_through_write(1, 0x1800);
+		sub_through_write(1, 0x192E);
+		sub_through_write(1, 0x1A00);
+		sub_through_write(1, 0x1B00);
+		sub_through_write(1, 0x1C00);
+
+		sub_through_write(1, 0x151A);	/* Power setting */
+
+		sub_through_write(1, 0x2002);	/* Gradation Palette setting */
+		sub_through_write(1, 0x2107);
+		sub_through_write(1, 0x220C);
+		sub_through_write(1, 0x2310);
+		sub_through_write(1, 0x2414);
+		sub_through_write(1, 0x2518);
+		sub_through_write(1, 0x261C);
+		sub_through_write(1, 0x2720);
+		sub_through_write(1, 0x2824);
+		sub_through_write(1, 0x2928);
+		sub_through_write(1, 0x2A2B);
+		sub_through_write(1, 0x2B2E);
+		sub_through_write(1, 0x2C31);
+		sub_through_write(1, 0x2D34);
+		sub_through_write(1, 0x2E37);
+		sub_through_write(1, 0x2F3A);
+		sub_through_write(1, 0x303C);
+		sub_through_write(1, 0x313E);
+		sub_through_write(1, 0x323F);
+		sub_through_write(1, 0x3340);
+		sub_through_write(1, 0x3441);
+		sub_through_write(1, 0x3543);
+		sub_through_write(1, 0x3646);
+		sub_through_write(1, 0x3749);
+		sub_through_write(1, 0x384C);
+		sub_through_write(1, 0x394F);
+		sub_through_write(1, 0x3A52);
+		sub_through_write(1, 0x3B59);
+		sub_through_write(1, 0x3C60);
+		sub_through_write(1, 0x3D67);
+		sub_through_write(1, 0x3E6E);
+		sub_through_write(1, 0x3F7F);
+		sub_through_write(1, 0x4001);
+		sub_through_write(1, 0x4107);
+		sub_through_write(1, 0x420C);
+		sub_through_write(1, 0x4310);
+		sub_through_write(1, 0x4414);
+		sub_through_write(1, 0x4518);
+		sub_through_write(1, 0x461C);
+		sub_through_write(1, 0x4720);
+		sub_through_write(1, 0x4824);
+		sub_through_write(1, 0x4928);
+		sub_through_write(1, 0x4A2B);
+		sub_through_write(1, 0x4B2E);
+		sub_through_write(1, 0x4C31);
+		sub_through_write(1, 0x4D34);
+		sub_through_write(1, 0x4E37);
+		sub_through_write(1, 0x4F3A);
+		sub_through_write(1, 0x503C);
+		sub_through_write(1, 0x513E);
+		sub_through_write(1, 0x523F);
+		sub_through_write(1, 0x5340);
+		sub_through_write(1, 0x5441);
+		sub_through_write(1, 0x5543);
+		sub_through_write(1, 0x5646);
+		sub_through_write(1, 0x5749);
+		sub_through_write(1, 0x584C);
+		sub_through_write(1, 0x594F);
+		sub_through_write(1, 0x5A52);
+		sub_through_write(1, 0x5B59);
+		sub_through_write(1, 0x5C60);
+		sub_through_write(1, 0x5D67);
+		sub_through_write(1, 0x5E6E);
+		sub_through_write(1, 0x5F7E);
+		sub_through_write(1, 0x6000);
+		sub_through_write(1, 0x6107);
+		sub_through_write(1, 0x620C);
+		sub_through_write(1, 0x6310);
+		sub_through_write(1, 0x6414);
+		sub_through_write(1, 0x6518);
+		sub_through_write(1, 0x661C);
+		sub_through_write(1, 0x6720);
+		sub_through_write(1, 0x6824);
+		sub_through_write(1, 0x6928);
+		sub_through_write(1, 0x6A2B);
+		sub_through_write(1, 0x6B2E);
+		sub_through_write(1, 0x6C31);
+		sub_through_write(1, 0x6D34);
+		sub_through_write(1, 0x6E37);
+		sub_through_write(1, 0x6F3A);
+		sub_through_write(1, 0x703C);
+		sub_through_write(1, 0x713E);
+		sub_through_write(1, 0x723F);
+		sub_through_write(1, 0x7340);
+		sub_through_write(1, 0x7441);
+		sub_through_write(1, 0x7543);
+		sub_through_write(1, 0x7646);
+		sub_through_write(1, 0x7749);
+		sub_through_write(1, 0x784C);
+		sub_through_write(1, 0x794F);
+		sub_through_write(1, 0x7A52);
+		sub_through_write(1, 0x7B59);
+		sub_through_write(1, 0x7C60);
+		sub_through_write(1, 0x7D67);
+		sub_through_write(1, 0x7E6E);
+		sub_through_write(1, 0x7F7D);
+
+		sub_through_write(1, 0x1851);	/* Display on */
+
+		mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0);
+
+		/* 1 pixel / 1 post clock */
+		mddi_queue_register_write(REG_CLKDIV2, 0x3b00, FALSE, 0);
+
+		/* SUB LCD select */
+		mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0);
+
+		/* RS=0,command initiate number=0,select master mode */
+		mddi_queue_register_write(REG_SUBCTL, 0x0202, FALSE, 0);
+
+		/* Sub LCD Data transform start */
+		mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0);
+
+	} else if (sharp_subpanel_type == SHARP_SUB_ROHM) {
+
+		sub_through_write(0, 0x01);	/* Display setting */
+		sub_through_write(1, 0x00);
+
+		mddi_wait(1);
+		/* Wait 100us  <----- ******* Update 2005/01/24 */
+
+		sub_through_write(0, 0xB6);
+		sub_through_write(1, 0x0C);
+		sub_through_write(1, 0x4A);
+		sub_through_write(1, 0x20);
+		sub_through_write(0, 0x3A);
+		sub_through_write(1, 0x05);
+		sub_through_write(0, 0xB7);
+		sub_through_write(1, 0x01);
+		sub_through_write(0, 0xBA);
+		sub_through_write(1, 0x20);
+		sub_through_write(1, 0x02);
+		sub_through_write(0, 0x25);
+		sub_through_write(1, 0x4F);
+		sub_through_write(0, 0xBB);
+		sub_through_write(1, 0x00);
+		sub_through_write(0, 0x36);
+		sub_through_write(1, 0x00);
+		sub_through_write(0, 0xB1);
+		sub_through_write(1, 0x05);
+		sub_through_write(0, 0xBE);
+		sub_through_write(1, 0x80);
+		sub_through_write(0, 0x26);
+		sub_through_write(1, 0x01);
+		sub_through_write(0, 0x2A);
+		sub_through_write(1, 0x02);
+		sub_through_write(1, 0x81);
+		sub_through_write(0, 0x2B);
+		sub_through_write(1, 0x00);
+		sub_through_write(1, 0x7F);
+
+		sub_through_write(0, 0x2C);
+		sub_through_write(0, 0x11);	/* Sleep mode off */
+
+		mddi_wait(1);
+		/* Wait 100 ms <----- ******* Update 2005/01/24 */
+
+		sub_through_write(0, 0x29);	/* Display on */
+		sub_through_write(0, 0xB3);
+		sub_through_write(1, 0x20);
+		sub_through_write(1, 0xAA);
+		sub_through_write(1, 0xA0);
+		sub_through_write(1, 0x20);
+		sub_through_write(1, 0x30);
+		sub_through_write(1, 0xA6);
+		sub_through_write(1, 0xFF);
+		sub_through_write(1, 0x9A);
+		sub_through_write(1, 0x9F);
+		sub_through_write(1, 0xAF);
+		sub_through_write(1, 0xBC);
+		sub_through_write(1, 0xCF);
+		sub_through_write(1, 0xDF);
+		sub_through_write(1, 0x20);
+		sub_through_write(1, 0x9C);
+		sub_through_write(1, 0x8A);
+
+		sub_through_write(0, 0x002C);	/* Display on */
+
+		/* 1 pixel / 2 post clock */
+		mddi_queue_register_write(REG_CLKDIV2, 0x7b00, FALSE, 0);
+
+		/* SUB LCD select */
+		mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0);
+
+		/* RS=1,command initiate number=0,select master mode */
+		mddi_queue_register_write(REG_SUBCTL, 0x0242, FALSE, 0);
+
+		/* Sub LCD Data transform start */
+		mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0);
+
+	}
+
+	/* Set the MDP pixel data attributes for Sub Display */
+	mddi_host_write_pix_attr_reg(0x00C0);
+}
+
+void mddi_sharp_lcd_vsync_detected(boolean detected)
+{
+	/* static timetick_type start_time = 0; */
+	static struct timeval start_time;
+	static boolean first_time = TRUE;
+	/* uint32 mdp_cnt_val = 0; */
+	/* timetick_type elapsed_us; */
+	struct timeval now;
+	uint32 elapsed_us;
+	uint32 num_vsyncs;
+
+	if ((detected) || (mddi_sharp_vsync_attempts > 5)) {
+		if ((detected) && (mddi_sharp_monitor_refresh_value)) {
+			/* if (start_time != 0) */
+			if (!first_time) {
+				jiffies_to_timeval(jiffies, &now);
+				elapsed_us =
+				    (now.tv_sec - start_time.tv_sec) * 1000000 +
+				    now.tv_usec - start_time.tv_usec;
+				/*
+				* LCD is configured for a refresh every usecs,
+				* so to determine the number of vsyncs that
+				* have occurred since the last measurement add
+				* half that to the time difference and divide
+				* by the refresh rate.
+				*/
+				num_vsyncs = (elapsed_us +
+					      (mddi_sharp_usecs_per_refresh >>
+					       1)) /
+				    mddi_sharp_usecs_per_refresh;
+				/*
+				 * LCD is configured for * hsyncs (rows) per
+				 * refresh cycle. Calculate new rows_per_second
+				 * value based upon these new measurements.
+				 * MDP can update with this new value.
+				 */
+				mddi_sharp_rows_per_second =
+				    (mddi_sharp_rows_per_refresh * 1000 *
+				     num_vsyncs) / (elapsed_us / 1000);
+			}
+			/* start_time = timetick_get(); */
+			first_time = FALSE;
+			jiffies_to_timeval(jiffies, &start_time);
+			if (mddi_sharp_report_refresh_measurements) {
+				/* mdp_cnt_val = MDP_LINE_COUNT; */
+			}
+		}
+		/* if detected = TRUE, client initiated wakeup was detected */
+		if (mddi_sharp_vsync_handler != NULL) {
+			(*mddi_sharp_vsync_handler)
+			    (mddi_sharp_vsync_handler_arg);
+			mddi_sharp_vsync_handler = NULL;
+		}
+		mddi_vsync_detect_enabled = FALSE;
+		mddi_sharp_vsync_attempts = 0;
+		/* need to clear this vsync wakeup */
+		if (!mddi_queue_register_write_int(REG_INTR, 0x0000)) {
+			MDDI_MSG_ERR("Vsync interrupt clear failed!\n");
+		}
+		if (!detected) {
+			/* give up after 5 failed attempts but show error */
+			MDDI_MSG_NOTICE("Vsync detection failed!\n");
+		} else if ((mddi_sharp_monitor_refresh_value) &&
+			(mddi_sharp_report_refresh_measurements)) {
+			MDDI_MSG_NOTICE("  Lines Per Second=%d!\n",
+				mddi_sharp_rows_per_second);
+		}
+	} else
+		/* if detected = FALSE, we woke up from hibernation, but did not
+		 * detect client initiated wakeup.
+		 */
+		mddi_sharp_vsync_attempts++;
+}
+
+/* ISR to be executed */
+void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler, void *arg)
+{
+	boolean error = FALSE;
+	unsigned long flags;
+
+	/* Disable interrupts */
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+	/* INTLOCK(); */
+
+	if (mddi_sharp_vsync_handler != NULL)
+		error = TRUE;
+
+	/* Register the handler for this particular GROUP interrupt source */
+	mddi_sharp_vsync_handler = handler;
+	mddi_sharp_vsync_handler_arg = arg;
+
+	/* Restore interrupts */
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+	/* INTFREE(); */
+
+	if (error)
+		MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n");
+
+	/* Enable the vsync wakeup */
+	mddi_queue_register_write(REG_INTR, 0x8100, FALSE, 0);
+
+	mddi_sharp_vsync_attempts = 1;
+	mddi_vsync_detect_enabled = TRUE;
+}				/* mddi_sharp_vsync_set_handler */
+
+static int mddi_sharp_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mddi_host_client_cnt_reset();
+
+	if (mfd->panel.id == SHARP_QVGA_PRIM)
+		mddi_sharp_prim_lcd_init();
+	else
+		mddi_sharp_sub_lcd_init();
+
+	return 0;
+}
+
+static int mddi_sharp_lcd_off(struct platform_device *pdev)
+{
+	if (mddi_sharp_vsync_handler != NULL) {
+		(*mddi_sharp_vsync_handler)
+			    (mddi_sharp_vsync_handler_arg);
+		mddi_sharp_vsync_handler = NULL;
+		printk(KERN_INFO "%s: clean up vsyn_handler=%x\n", __func__,
+				(int)mddi_sharp_vsync_handler);
+	}
+
+	mddi_sharp_lcd_powerdown();
+	return 0;
+}
+
+static int __devinit mddi_sharp_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mddi_sharp_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mddi_sharp_probe,
+	.driver = {
+		.name   = "mddi_sharp_qvga",
+	},
+};
+
+static struct msm_fb_panel_data mddi_sharp_panel_data0 = {
+	.on = mddi_sharp_lcd_on,
+	.off = mddi_sharp_lcd_off,
+	.set_backlight = mddi_sharp_lcd_set_backlight,
+	.set_vsync_notifier = mddi_sharp_vsync_set_handler,
+};
+
+static struct platform_device this_device_0 = {
+	.name   = "mddi_sharp_qvga",
+	.id	= SHARP_QVGA_PRIM,
+	.dev	= {
+		.platform_data = &mddi_sharp_panel_data0,
+	}
+};
+
+static struct msm_fb_panel_data mddi_sharp_panel_data1 = {
+	.on = mddi_sharp_lcd_on,
+	.off = mddi_sharp_lcd_off,
+};
+
+static struct platform_device this_device_1 = {
+	.name   = "mddi_sharp_qvga",
+	.id	= SHARP_128X128_SECD,
+	.dev	= {
+		.platform_data = &mddi_sharp_panel_data1,
+	}
+};
+
+static int __init mddi_sharp_init(void)
+{
+	int ret;
+	struct msm_panel_info *pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 id;
+
+	ret = msm_fb_detect_client("mddi_sharp_qvga");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret) {
+		id = mddi_get_client_id();
+
+		if (((id >> 16) != 0x0) || ((id & 0xffff) != 0x8835))
+			return 0;
+	}
+#endif
+	if (mddi_host_core_version > 8) {
+		/* can use faster refresh with newer hw revisions */
+		mddi_sharp_debug_60hz_refresh = TRUE;
+
+		/* Timing variables for tracking vsync */
+		/* dot_clock = 6.00MHz
+		 * horizontal count = 296
+		 * vertical count = 338
+		 * refresh rate = 6000000/(296+338) = 60Hz
+		 */
+		mddi_sharp_rows_per_second = 20270;	/* 6000000/296 */
+		mddi_sharp_rows_per_refresh = 338;
+		mddi_sharp_usecs_per_refresh = 16674;	/* (296+338)/6000000 */
+	} else {
+		/* Timing variables for tracking vsync */
+		/* dot_clock = 5.20MHz
+		 * horizontal count = 376
+		 * vertical count = 338
+		 * refresh rate = 5200000/(376+338) = 41Hz
+		 */
+		mddi_sharp_rows_per_second = 13830;	/* 5200000/376 */
+		mddi_sharp_rows_per_refresh = 338;
+		mddi_sharp_usecs_per_refresh = 24440;	/* (376+338)/5200000 */
+	}
+
+	ret = platform_driver_register(&this_driver);
+	if (!ret) {
+		pinfo = &mddi_sharp_panel_data0.panel_info;
+		pinfo->xres = 240;
+		pinfo->yres = 320;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = MDDI_PANEL;
+		pinfo->pdest = DISPLAY_1;
+		pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+		pinfo->wait_cycle = 0;
+		pinfo->bpp = 18;
+		pinfo->fb_num = 2;
+		pinfo->clk_rate = 122880000;
+		pinfo->clk_min = 120000000;
+		pinfo->clk_max = 125000000;
+		pinfo->lcd.vsync_enable = TRUE;
+		pinfo->lcd.refx100 =
+			(mddi_sharp_rows_per_second * 100) /
+			mddi_sharp_rows_per_refresh;
+		pinfo->lcd.v_back_porch = 12;
+		pinfo->lcd.v_front_porch = 6;
+		pinfo->lcd.v_pulse_width = 0;
+		pinfo->lcd.hw_vsync_mode = FALSE;
+		pinfo->lcd.vsync_notifier_period = (1 * HZ);
+		pinfo->bl_max = 7;
+		pinfo->bl_min = 1;
+
+		ret = platform_device_register(&this_device_0);
+		if (ret)
+			platform_driver_unregister(&this_driver);
+
+		pinfo = &mddi_sharp_panel_data1.panel_info;
+		pinfo->xres = 128;
+		pinfo->yres = 128;
+		MSM_FB_SINGLE_MODE_PANEL(pinfo);
+		pinfo->type = MDDI_PANEL;
+		pinfo->pdest = DISPLAY_2;
+		pinfo->mddi.vdopkt = 0x400;
+		pinfo->wait_cycle = 0;
+		pinfo->bpp = 18;
+		pinfo->clk_rate = 122880000;
+		pinfo->clk_min = 120000000;
+		pinfo->clk_max = 125000000;
+		pinfo->fb_num = 2;
+
+		ret = platform_device_register(&this_device_1);
+		if (ret) {
+			platform_device_unregister(&this_device_0);
+			platform_driver_unregister(&this_driver);
+		}
+	}
+
+	if (!ret)
+		mddi_lcd.vsync_detected = mddi_sharp_lcd_vsync_detected;
+
+	return ret;
+}
+
+module_init(mddi_sharp_init);
diff --git a/drivers/video/msm/mddi_toshiba.c b/drivers/video/msm/mddi_toshiba.c
new file mode 100644
index 0000000..9727453
--- /dev/null
+++ b/drivers/video/msm/mddi_toshiba.c
@@ -0,0 +1,1753 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+#include "mddi_toshiba.h"
+
+#define TM_GET_DID(id) ((id) & 0xff)
+#define TM_GET_PID(id) (((id) & 0xff00)>>8)
+
+#define MDDI_CLIENT_CORE_BASE  0x108000
+#define LCD_CONTROL_BLOCK_BASE 0x110000
+#define SPI_BLOCK_BASE         0x120000
+#define PWM_BLOCK_BASE         0x140000
+#define SYSTEM_BLOCK1_BASE     0x160000
+
+#define TTBUSSEL    (MDDI_CLIENT_CORE_BASE|0x18)
+#define DPSET0      (MDDI_CLIENT_CORE_BASE|0x1C)
+#define DPSET1      (MDDI_CLIENT_CORE_BASE|0x20)
+#define DPSUS       (MDDI_CLIENT_CORE_BASE|0x24)
+#define DPRUN       (MDDI_CLIENT_CORE_BASE|0x28)
+#define SYSCKENA    (MDDI_CLIENT_CORE_BASE|0x2C)
+
+#define BITMAP0     (MDDI_CLIENT_CORE_BASE|0x44)
+#define BITMAP1     (MDDI_CLIENT_CORE_BASE|0x48)
+#define BITMAP2     (MDDI_CLIENT_CORE_BASE|0x4C)
+#define BITMAP3     (MDDI_CLIENT_CORE_BASE|0x50)
+#define BITMAP4     (MDDI_CLIENT_CORE_BASE|0x54)
+
+#define SRST        (LCD_CONTROL_BLOCK_BASE|0x00)
+#define PORT_ENB    (LCD_CONTROL_BLOCK_BASE|0x04)
+#define START       (LCD_CONTROL_BLOCK_BASE|0x08)
+#define PORT        (LCD_CONTROL_BLOCK_BASE|0x0C)
+
+#define INTFLG      (LCD_CONTROL_BLOCK_BASE|0x18)
+#define INTMSK      (LCD_CONTROL_BLOCK_BASE|0x1C)
+#define MPLFBUF     (LCD_CONTROL_BLOCK_BASE|0x20)
+
+#define PXL         (LCD_CONTROL_BLOCK_BASE|0x30)
+#define HCYCLE      (LCD_CONTROL_BLOCK_BASE|0x34)
+#define HSW         (LCD_CONTROL_BLOCK_BASE|0x38)
+#define HDE_START   (LCD_CONTROL_BLOCK_BASE|0x3C)
+#define HDE_SIZE    (LCD_CONTROL_BLOCK_BASE|0x40)
+#define VCYCLE      (LCD_CONTROL_BLOCK_BASE|0x44)
+#define VSW         (LCD_CONTROL_BLOCK_BASE|0x48)
+#define VDE_START   (LCD_CONTROL_BLOCK_BASE|0x4C)
+#define VDE_SIZE    (LCD_CONTROL_BLOCK_BASE|0x50)
+#define WAKEUP      (LCD_CONTROL_BLOCK_BASE|0x54)
+#define REGENB      (LCD_CONTROL_BLOCK_BASE|0x5C)
+#define VSYNIF      (LCD_CONTROL_BLOCK_BASE|0x60)
+#define WRSTB       (LCD_CONTROL_BLOCK_BASE|0x64)
+#define RDSTB       (LCD_CONTROL_BLOCK_BASE|0x68)
+#define ASY_DATA    (LCD_CONTROL_BLOCK_BASE|0x6C)
+#define ASY_DATB    (LCD_CONTROL_BLOCK_BASE|0x70)
+#define ASY_DATC    (LCD_CONTROL_BLOCK_BASE|0x74)
+#define ASY_DATD    (LCD_CONTROL_BLOCK_BASE|0x78)
+#define ASY_DATE    (LCD_CONTROL_BLOCK_BASE|0x7C)
+#define ASY_DATF    (LCD_CONTROL_BLOCK_BASE|0x80)
+#define ASY_DATG    (LCD_CONTROL_BLOCK_BASE|0x84)
+#define ASY_DATH    (LCD_CONTROL_BLOCK_BASE|0x88)
+#define ASY_CMDSET  (LCD_CONTROL_BLOCK_BASE|0x8C)
+#define MONI        (LCD_CONTROL_BLOCK_BASE|0xB0)
+#define VPOS        (LCD_CONTROL_BLOCK_BASE|0xC0)
+
+#define SSICTL      (SPI_BLOCK_BASE|0x00)
+#define SSITIME     (SPI_BLOCK_BASE|0x04)
+#define SSITX       (SPI_BLOCK_BASE|0x08)
+#define SSIINTS     (SPI_BLOCK_BASE|0x14)
+
+#define TIMER0LOAD    (PWM_BLOCK_BASE|0x00)
+#define TIMER0CTRL    (PWM_BLOCK_BASE|0x08)
+#define PWM0OFF       (PWM_BLOCK_BASE|0x1C)
+#define TIMER1LOAD    (PWM_BLOCK_BASE|0x20)
+#define TIMER1CTRL    (PWM_BLOCK_BASE|0x28)
+#define PWM1OFF       (PWM_BLOCK_BASE|0x3C)
+#define TIMER2LOAD    (PWM_BLOCK_BASE|0x40)
+#define TIMER2CTRL    (PWM_BLOCK_BASE|0x48)
+#define PWM2OFF       (PWM_BLOCK_BASE|0x5C)
+#define PWMCR         (PWM_BLOCK_BASE|0x68)
+
+#define GPIOIS      (GPIO_BLOCK_BASE|0x08)
+#define GPIOIEV     (GPIO_BLOCK_BASE|0x10)
+#define GPIOIC      (GPIO_BLOCK_BASE|0x20)
+
+#define WKREQ       (SYSTEM_BLOCK1_BASE|0x00)
+#define CLKENB      (SYSTEM_BLOCK1_BASE|0x04)
+#define DRAMPWR     (SYSTEM_BLOCK1_BASE|0x08)
+#define INTMASK     (SYSTEM_BLOCK1_BASE|0x0C)
+#define CNT_DIS     (SYSTEM_BLOCK1_BASE|0x10)
+
+typedef enum {
+	TOSHIBA_STATE_OFF,
+	TOSHIBA_STATE_PRIM_SEC_STANDBY,
+	TOSHIBA_STATE_PRIM_SEC_READY,
+	TOSHIBA_STATE_PRIM_NORMAL_MODE,
+	TOSHIBA_STATE_SEC_NORMAL_MODE
+} mddi_toshiba_state_t;
+
+static uint32 mddi_toshiba_curr_vpos;
+static boolean mddi_toshiba_monitor_refresh_value = FALSE;
+static boolean mddi_toshiba_report_refresh_measurements = FALSE;
+
+boolean mddi_toshiba_61Hz_refresh = TRUE;
+
+/* Modifications to timing to increase refresh rate to > 60Hz.
+ *   20MHz dot clock.
+ *   646 total rows.
+ *   506 total columns.
+ *   refresh rate = 61.19Hz
+ */
+static uint32 mddi_toshiba_rows_per_second = 39526;
+static uint32 mddi_toshiba_usecs_per_refresh = 16344;
+static uint32 mddi_toshiba_rows_per_refresh = 646;
+extern boolean mddi_vsync_detect_enabled;
+
+static msm_fb_vsync_handler_type mddi_toshiba_vsync_handler;
+static void *mddi_toshiba_vsync_handler_arg;
+static uint16 mddi_toshiba_vsync_attempts;
+
+static mddi_toshiba_state_t toshiba_state = TOSHIBA_STATE_OFF;
+
+static struct msm_panel_common_pdata *mddi_toshiba_pdata;
+
+static int mddi_toshiba_lcd_on(struct platform_device *pdev);
+static int mddi_toshiba_lcd_off(struct platform_device *pdev);
+
+static void mddi_toshiba_state_transition(mddi_toshiba_state_t a,
+					  mddi_toshiba_state_t b)
+{
+	if (toshiba_state != a) {
+		MDDI_MSG_ERR("toshiba state trans. (%d->%d) found %d\n", a, b,
+			     toshiba_state);
+	}
+	toshiba_state = b;
+}
+
+#define GORDON_REG_IMGCTL1      0x10	/* Image interface control 1   */
+#define GORDON_REG_IMGCTL2      0x11	/* Image interface control 2   */
+#define GORDON_REG_IMGSET1      0x12	/* Image interface settings 1  */
+#define GORDON_REG_IMGSET2      0x13	/* Image interface settings 2  */
+#define GORDON_REG_IVBP1        0x14	/* DM0: Vert back porch        */
+#define GORDON_REG_IHBP1        0x15	/* DM0: Horiz back porch       */
+#define GORDON_REG_IVNUM1       0x16	/* DM0: Num of vert lines      */
+#define GORDON_REG_IHNUM1       0x17	/* DM0: Num of pixels per line */
+#define GORDON_REG_IVBP2        0x18	/* DM1: Vert back porch        */
+#define GORDON_REG_IHBP2        0x19	/* DM1: Horiz back porch       */
+#define GORDON_REG_IVNUM2       0x1A	/* DM1: Num of vert lines      */
+#define GORDON_REG_IHNUM2       0x1B	/* DM1: Num of pixels per line */
+#define GORDON_REG_LCDIFCTL1    0x30	/* LCD interface control 1     */
+#define GORDON_REG_VALTRAN      0x31	/* LCD IF ctl: VALTRAN sync flag */
+#define GORDON_REG_AVCTL        0x33
+#define GORDON_REG_LCDIFCTL2    0x34	/* LCD interface control 2     */
+#define GORDON_REG_LCDIFCTL3    0x35	/* LCD interface control 3     */
+#define GORDON_REG_LCDIFSET1    0x36	/* LCD interface settings 1    */
+#define GORDON_REG_PCCTL        0x3C
+#define GORDON_REG_TPARAM1      0x40
+#define GORDON_REG_TLCDIF1      0x41
+#define GORDON_REG_TSSPB_ST1    0x42
+#define GORDON_REG_TSSPB_ED1    0x43
+#define GORDON_REG_TSCK_ST1     0x44
+#define GORDON_REG_TSCK_WD1     0x45
+#define GORDON_REG_TGSPB_VST1   0x46
+#define GORDON_REG_TGSPB_VED1   0x47
+#define GORDON_REG_TGSPB_CH1    0x48
+#define GORDON_REG_TGCK_ST1     0x49
+#define GORDON_REG_TGCK_ED1     0x4A
+#define GORDON_REG_TPCTL_ST1    0x4B
+#define GORDON_REG_TPCTL_ED1    0x4C
+#define GORDON_REG_TPCHG_ED1    0x4D
+#define GORDON_REG_TCOM_CH1     0x4E
+#define GORDON_REG_THBP1        0x4F
+#define GORDON_REG_TPHCTL1      0x50
+#define GORDON_REG_EVPH1        0x51
+#define GORDON_REG_EVPL1        0x52
+#define GORDON_REG_EVNH1        0x53
+#define GORDON_REG_EVNL1        0x54
+#define GORDON_REG_TBIAS1       0x55
+#define GORDON_REG_TPARAM2      0x56
+#define GORDON_REG_TLCDIF2      0x57
+#define GORDON_REG_TSSPB_ST2    0x58
+#define GORDON_REG_TSSPB_ED2    0x59
+#define GORDON_REG_TSCK_ST2     0x5A
+#define GORDON_REG_TSCK_WD2     0x5B
+#define GORDON_REG_TGSPB_VST2   0x5C
+#define GORDON_REG_TGSPB_VED2   0x5D
+#define GORDON_REG_TGSPB_CH2    0x5E
+#define GORDON_REG_TGCK_ST2     0x5F
+#define GORDON_REG_TGCK_ED2     0x60
+#define GORDON_REG_TPCTL_ST2    0x61
+#define GORDON_REG_TPCTL_ED2    0x62
+#define GORDON_REG_TPCHG_ED2    0x63
+#define GORDON_REG_TCOM_CH2     0x64
+#define GORDON_REG_THBP2        0x65
+#define GORDON_REG_TPHCTL2      0x66
+#define GORDON_REG_EVPH2        0x67
+#define GORDON_REG_EVPL2        0x68
+#define GORDON_REG_EVNH2        0x69
+#define GORDON_REG_EVNL2        0x6A
+#define GORDON_REG_TBIAS2       0x6B
+#define GORDON_REG_POWCTL       0x80
+#define GORDON_REG_POWOSC1      0x81
+#define GORDON_REG_POWOSC2      0x82
+#define GORDON_REG_POWSET       0x83
+#define GORDON_REG_POWTRM1      0x85
+#define GORDON_REG_POWTRM2      0x86
+#define GORDON_REG_POWTRM3      0x87
+#define GORDON_REG_POWTRMSEL    0x88
+#define GORDON_REG_POWHIZ       0x89
+
+void serigo(uint16 reg, uint8 data)
+{
+	uint32 mddi_val = 0;
+	mddi_queue_register_read(SSIINTS, &mddi_val, TRUE, 0);
+	if (mddi_val & (1 << 8))
+		mddi_wait(1);
+	/* No De-assert of CS and send 2 bytes */
+	mddi_val = 0x90000 | ((0x00FF & reg) << 8) | data;
+	mddi_queue_register_write(SSITX, mddi_val, TRUE, 0);
+}
+
+void gordon_init(void)
+{
+       /* Image interface settings ***/
+	serigo(GORDON_REG_IMGCTL2, 0x00);
+	serigo(GORDON_REG_IMGSET1, 0x01);
+
+	/* Exchange the RGB signal for J510(Softbank mobile) */
+	serigo(GORDON_REG_IMGSET2, 0x12);
+	serigo(GORDON_REG_LCDIFSET1, 0x00);
+	mddi_wait(2);
+
+	/* Pre-charge settings */
+	serigo(GORDON_REG_PCCTL, 0x09);
+	serigo(GORDON_REG_LCDIFCTL2, 0x1B);
+	mddi_wait(1);
+}
+
+void gordon_disp_on(void)
+{
+	/*gordon_dispmode setting */
+	/*VGA settings */
+	serigo(GORDON_REG_TPARAM1, 0x30);
+	serigo(GORDON_REG_TLCDIF1, 0x00);
+	serigo(GORDON_REG_TSSPB_ST1, 0x8B);
+	serigo(GORDON_REG_TSSPB_ED1, 0x93);
+	mddi_wait(2);
+	serigo(GORDON_REG_TSCK_ST1, 0x88);
+	serigo(GORDON_REG_TSCK_WD1, 0x00);
+	serigo(GORDON_REG_TGSPB_VST1, 0x01);
+	serigo(GORDON_REG_TGSPB_VED1, 0x02);
+	mddi_wait(2);
+	serigo(GORDON_REG_TGSPB_CH1, 0x5E);
+	serigo(GORDON_REG_TGCK_ST1, 0x80);
+	serigo(GORDON_REG_TGCK_ED1, 0x3C);
+	serigo(GORDON_REG_TPCTL_ST1, 0x50);
+	mddi_wait(2);
+	serigo(GORDON_REG_TPCTL_ED1, 0x74);
+	serigo(GORDON_REG_TPCHG_ED1, 0x78);
+	serigo(GORDON_REG_TCOM_CH1, 0x50);
+	serigo(GORDON_REG_THBP1, 0x84);
+	mddi_wait(2);
+	serigo(GORDON_REG_TPHCTL1, 0x00);
+	serigo(GORDON_REG_EVPH1, 0x70);
+	serigo(GORDON_REG_EVPL1, 0x64);
+	serigo(GORDON_REG_EVNH1, 0x56);
+	mddi_wait(2);
+	serigo(GORDON_REG_EVNL1, 0x48);
+	serigo(GORDON_REG_TBIAS1, 0x88);
+	mddi_wait(2);
+	serigo(GORDON_REG_TPARAM2, 0x28);
+	serigo(GORDON_REG_TLCDIF2, 0x14);
+	serigo(GORDON_REG_TSSPB_ST2, 0x49);
+	serigo(GORDON_REG_TSSPB_ED2, 0x4B);
+	mddi_wait(2);
+	serigo(GORDON_REG_TSCK_ST2, 0x4A);
+	serigo(GORDON_REG_TSCK_WD2, 0x02);
+	serigo(GORDON_REG_TGSPB_VST2, 0x02);
+	serigo(GORDON_REG_TGSPB_VED2, 0x03);
+	mddi_wait(2);
+	serigo(GORDON_REG_TGSPB_CH2, 0x2F);
+	serigo(GORDON_REG_TGCK_ST2, 0x40);
+	serigo(GORDON_REG_TGCK_ED2, 0x1E);
+	serigo(GORDON_REG_TPCTL_ST2, 0x2C);
+	mddi_wait(2);
+	serigo(GORDON_REG_TPCTL_ED2, 0x3A);
+	serigo(GORDON_REG_TPCHG_ED2, 0x3C);
+	serigo(GORDON_REG_TCOM_CH2, 0x28);
+	serigo(GORDON_REG_THBP2, 0x4D);
+	mddi_wait(2);
+	serigo(GORDON_REG_TPHCTL2, 0x1A);
+	mddi_wait(2);
+	serigo(GORDON_REG_IVBP1, 0x02);
+	serigo(GORDON_REG_IHBP1, 0x90);
+	serigo(GORDON_REG_IVNUM1, 0xA0);
+	serigo(GORDON_REG_IHNUM1, 0x78);
+	mddi_wait(2);
+	serigo(GORDON_REG_IVBP2, 0x02);
+	serigo(GORDON_REG_IHBP2, 0x48);
+	serigo(GORDON_REG_IVNUM2, 0x50);
+	serigo(GORDON_REG_IHNUM2, 0x3C);
+	mddi_wait(2);
+	serigo(GORDON_REG_POWCTL, 0x03);
+	mddi_wait(15);
+	serigo(GORDON_REG_POWCTL, 0x07);
+	mddi_wait(15);
+	serigo(GORDON_REG_POWCTL, 0x0F);
+	mddi_wait(15);
+	serigo(GORDON_REG_AVCTL, 0x03);
+	mddi_wait(15);
+	serigo(GORDON_REG_POWCTL, 0x1F);
+	mddi_wait(15);
+	serigo(GORDON_REG_POWCTL, 0x5F);
+	mddi_wait(15);
+	serigo(GORDON_REG_POWCTL, 0x7F);
+	mddi_wait(15);
+	serigo(GORDON_REG_LCDIFCTL1, 0x02);
+	mddi_wait(15);
+	serigo(GORDON_REG_IMGCTL1, 0x00);
+	mddi_wait(15);
+	serigo(GORDON_REG_LCDIFCTL3, 0x00);
+	mddi_wait(15);
+	serigo(GORDON_REG_VALTRAN, 0x01);
+	mddi_wait(15);
+	serigo(GORDON_REG_LCDIFCTL1, 0x03);
+	serigo(GORDON_REG_LCDIFCTL1, 0x03);
+	mddi_wait(1);
+}
+
+void gordon_disp_off(void)
+{
+	serigo(GORDON_REG_LCDIFCTL2, 0x7B);
+	serigo(GORDON_REG_VALTRAN, 0x01);
+	serigo(GORDON_REG_LCDIFCTL1, 0x02);
+	serigo(GORDON_REG_LCDIFCTL3, 0x01);
+	mddi_wait(20);
+	serigo(GORDON_REG_VALTRAN, 0x01);
+	serigo(GORDON_REG_IMGCTL1, 0x01);
+	serigo(GORDON_REG_LCDIFCTL1, 0x00);
+	mddi_wait(20);
+	serigo(GORDON_REG_POWCTL, 0x1F);
+	mddi_wait(40);
+	serigo(GORDON_REG_POWCTL, 0x07);
+	mddi_wait(40);
+	serigo(GORDON_REG_POWCTL, 0x03);
+	mddi_wait(40);
+	serigo(GORDON_REG_POWCTL, 0x00);
+	mddi_wait(40);
+}
+
+void gordon_disp_init(void)
+{
+	gordon_init();
+	mddi_wait(20);
+	gordon_disp_on();
+}
+
+static void toshiba_common_initial_setup(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) {
+		write_client_reg(DPSET0    , 0x4bec0066, TRUE);
+		write_client_reg(DPSET1    , 0x00000113, TRUE);
+		write_client_reg(DPSUS     , 0x00000000, TRUE);
+		write_client_reg(DPRUN     , 0x00000001, TRUE);
+		mddi_wait(5);
+		write_client_reg(SYSCKENA  , 0x00000001, TRUE);
+		write_client_reg(CLKENB    , 0x0000a0e9, TRUE);
+
+		write_client_reg(GPIODATA  , 0x03FF0000, TRUE);
+		write_client_reg(GPIODIR   , 0x0000024D, TRUE);
+		write_client_reg(GPIOSEL   , 0x00000173, TRUE);
+		write_client_reg(GPIOPC    , 0x03C300C0, TRUE);
+		write_client_reg(WKREQ     , 0x00000000, TRUE);
+		write_client_reg(GPIOIS    , 0x00000000, TRUE);
+		write_client_reg(GPIOIEV   , 0x00000001, TRUE);
+		write_client_reg(GPIOIC    , 0x000003FF, TRUE);
+		write_client_reg(GPIODATA  , 0x00040004, TRUE);
+
+		write_client_reg(GPIODATA  , 0x00080008, TRUE);
+		write_client_reg(DRAMPWR   , 0x00000001, TRUE);
+		write_client_reg(CLKENB    , 0x0000a0eb, TRUE);
+		write_client_reg(PWMCR     , 0x00000000, TRUE);
+		mddi_wait(1);
+
+		write_client_reg(SSICTL    , 0x00060399, TRUE);
+		write_client_reg(SSITIME   , 0x00000100, TRUE);
+		write_client_reg(CNT_DIS   , 0x00000002, TRUE);
+		write_client_reg(SSICTL    , 0x0006039b, TRUE);
+
+		write_client_reg(SSITX     , 0x00000000, TRUE);
+		mddi_wait(7);
+		write_client_reg(SSITX     , 0x00000000, TRUE);
+		mddi_wait(7);
+		write_client_reg(SSITX     , 0x00000000, TRUE);
+		mddi_wait(7);
+
+		write_client_reg(SSITX     , 0x000800BA, TRUE);
+		write_client_reg(SSITX     , 0x00000111, TRUE);
+		write_client_reg(SSITX     , 0x00080036, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x0008003A, TRUE);
+		write_client_reg(SSITX     , 0x00000160, TRUE);
+		write_client_reg(SSITX     , 0x000800B1, TRUE);
+		write_client_reg(SSITX     , 0x0000015D, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B2, TRUE);
+		write_client_reg(SSITX     , 0x00000133, TRUE);
+		write_client_reg(SSITX     , 0x000800B3, TRUE);
+		write_client_reg(SSITX     , 0x00000122, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B4, TRUE);
+		write_client_reg(SSITX     , 0x00000102, TRUE);
+		write_client_reg(SSITX     , 0x000800B5, TRUE);
+		write_client_reg(SSITX     , 0x0000011E, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B6, TRUE);
+		write_client_reg(SSITX     , 0x00000127, TRUE);
+		write_client_reg(SSITX     , 0x000800B7, TRUE);
+		write_client_reg(SSITX     , 0x00000103, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B9, TRUE);
+		write_client_reg(SSITX     , 0x00000124, TRUE);
+		write_client_reg(SSITX     , 0x000800BD, TRUE);
+		write_client_reg(SSITX     , 0x000001A1, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800BB, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		write_client_reg(SSITX     , 0x000800BF, TRUE);
+		write_client_reg(SSITX     , 0x00000101, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800BE, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		write_client_reg(SSITX     , 0x000800C0, TRUE);
+		write_client_reg(SSITX     , 0x00000111, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C1, TRUE);
+		write_client_reg(SSITX     , 0x00000111, TRUE);
+		write_client_reg(SSITX     , 0x000800C2, TRUE);
+		write_client_reg(SSITX     , 0x00000111, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C3, TRUE);
+		write_client_reg(SSITX     , 0x00080132, TRUE);
+		write_client_reg(SSITX     , 0x00000132, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C4, TRUE);
+		write_client_reg(SSITX     , 0x00080132, TRUE);
+		write_client_reg(SSITX     , 0x00000132, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C5, TRUE);
+		write_client_reg(SSITX     , 0x00080132, TRUE);
+		write_client_reg(SSITX     , 0x00000132, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C6, TRUE);
+		write_client_reg(SSITX     , 0x00080132, TRUE);
+		write_client_reg(SSITX     , 0x00000132, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C7, TRUE);
+		write_client_reg(SSITX     , 0x00080164, TRUE);
+		write_client_reg(SSITX     , 0x00000145, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800C8, TRUE);
+		write_client_reg(SSITX     , 0x00000144, TRUE);
+		write_client_reg(SSITX     , 0x000800C9, TRUE);
+		write_client_reg(SSITX     , 0x00000152, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800CA, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800EC, TRUE);
+		write_client_reg(SSITX     , 0x00080101, TRUE);
+		write_client_reg(SSITX     , 0x000001FC, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800CF, TRUE);
+		write_client_reg(SSITX     , 0x00000101, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D0, TRUE);
+		write_client_reg(SSITX     , 0x00080110, TRUE);
+		write_client_reg(SSITX     , 0x00000104, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D1, TRUE);
+		write_client_reg(SSITX     , 0x00000101, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D2, TRUE);
+		write_client_reg(SSITX     , 0x00080100, TRUE);
+		write_client_reg(SSITX     , 0x00000128, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D3, TRUE);
+		write_client_reg(SSITX     , 0x00080100, TRUE);
+		write_client_reg(SSITX     , 0x00000128, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D4, TRUE);
+		write_client_reg(SSITX     , 0x00080126, TRUE);
+		write_client_reg(SSITX     , 0x000001A4, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800D5, TRUE);
+		write_client_reg(SSITX     , 0x00000120, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800EF, TRUE);
+		write_client_reg(SSITX     , 0x00080132, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		mddi_wait(1);
+
+		write_client_reg(BITMAP0   , 0x032001E0, TRUE);
+		write_client_reg(BITMAP1   , 0x032001E0, TRUE);
+		write_client_reg(BITMAP2   , 0x014000F0, TRUE);
+		write_client_reg(BITMAP3   , 0x014000F0, TRUE);
+		write_client_reg(BITMAP4   , 0x014000F0, TRUE);
+		write_client_reg(CLKENB    , 0x0000A1EB, TRUE);
+		write_client_reg(PORT_ENB  , 0x00000001, TRUE);
+		write_client_reg(PORT      , 0x00000004, TRUE);
+		write_client_reg(PXL       , 0x00000002, TRUE);
+		write_client_reg(MPLFBUF   , 0x00000000, TRUE);
+		write_client_reg(HCYCLE    , 0x000000FD, TRUE);
+		write_client_reg(HSW       , 0x00000003, TRUE);
+		write_client_reg(HDE_START , 0x00000007, TRUE);
+		write_client_reg(HDE_SIZE  , 0x000000EF, TRUE);
+		write_client_reg(VCYCLE    , 0x00000325, TRUE);
+		write_client_reg(VSW       , 0x00000001, TRUE);
+		write_client_reg(VDE_START , 0x00000003, TRUE);
+		write_client_reg(VDE_SIZE  , 0x0000031F, TRUE);
+		write_client_reg(START     , 0x00000001, TRUE);
+		mddi_wait(32);
+		write_client_reg(SSITX     , 0x000800BC, TRUE);
+		write_client_reg(SSITX     , 0x00000180, TRUE);
+		write_client_reg(SSITX     , 0x0008003B, TRUE);
+		write_client_reg(SSITX     , 0x00000100, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B0, TRUE);
+		write_client_reg(SSITX     , 0x00000116, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x000800B8, TRUE);
+		write_client_reg(SSITX     , 0x000801FF, TRUE);
+		write_client_reg(SSITX     , 0x000001F5, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX     , 0x00000011, TRUE);
+		mddi_wait(5);
+		write_client_reg(SSITX     , 0x00000029, TRUE);
+		return;
+	}
+
+	if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
+		write_client_reg(DPSET0, 0x4BEC0066, TRUE);
+		write_client_reg(DPSET1, 0x00000113, TRUE);
+		write_client_reg(DPSUS, 0x00000000, TRUE);
+		write_client_reg(DPRUN, 0x00000001, TRUE);
+		mddi_wait(14);
+		write_client_reg(SYSCKENA, 0x00000001, TRUE);
+		write_client_reg(CLKENB, 0x000000EF, TRUE);
+		write_client_reg(GPIO_BLOCK_BASE, 0x03FF0000, TRUE);
+		write_client_reg(GPIODIR, 0x0000024D, TRUE);
+		write_client_reg(SYSTEM_BLOCK2_BASE, 0x00000173, TRUE);
+		write_client_reg(GPIOPC, 0x03C300C0, TRUE);
+		write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000000, TRUE);
+		write_client_reg(GPIOIS, 0x00000000, TRUE);
+		write_client_reg(GPIOIEV, 0x00000001, TRUE);
+		write_client_reg(GPIOIC, 0x000003FF, TRUE);
+		write_client_reg(GPIO_BLOCK_BASE, 0x00060006, TRUE);
+		write_client_reg(GPIO_BLOCK_BASE, 0x00080008, TRUE);
+		write_client_reg(GPIO_BLOCK_BASE, 0x02000200, TRUE);
+		write_client_reg(DRAMPWR, 0x00000001, TRUE);
+		write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
+		write_client_reg(PWM_BLOCK_BASE, 0x00001388, TRUE);
+		write_client_reg(PWM0OFF, 0x00001387, TRUE);
+		write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
+		write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
+		write_client_reg(PWM1OFF, 0x00001387, TRUE);
+		write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
+		write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
+		write_client_reg(PWMCR, 0x00000003, TRUE);
+		mddi_wait(1);
+		write_client_reg(SPI_BLOCK_BASE, 0x00063111, TRUE);
+		write_client_reg(SSITIME, 0x00000100, TRUE);
+		write_client_reg(SPI_BLOCK_BASE, 0x00063113, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(CLKENB, 0x0000A1EF, TRUE);
+		write_client_reg(START, 0x00000000, TRUE);
+		write_client_reg(WRSTB, 0x0000003F, TRUE);
+		write_client_reg(RDSTB, 0x00000432, TRUE);
+		write_client_reg(PORT_ENB, 0x00000002, TRUE);
+		write_client_reg(VSYNIF, 0x00000000, TRUE);
+		write_client_reg(ASY_DATA, 0x80000000, TRUE);
+		write_client_reg(ASY_DATB, 0x00000001, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+		mddi_wait(10);
+		write_client_reg(ASY_DATA, 0x80000000, TRUE);
+		write_client_reg(ASY_DATB, 0x80000000, TRUE);
+		write_client_reg(ASY_DATC, 0x80000000, TRUE);
+		write_client_reg(ASY_DATD, 0x80000000, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
+		write_client_reg(ASY_DATA, 0x80000007, TRUE);
+		write_client_reg(ASY_DATB, 0x00004005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+		mddi_wait(20);
+		write_client_reg(ASY_DATA, 0x80000059, TRUE);
+		write_client_reg(ASY_DATB, 0x00000000, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+
+		write_client_reg(VSYNIF, 0x00000001, TRUE);
+		write_client_reg(PORT_ENB, 0x00000001, TRUE);
+	} else {
+		write_client_reg(DPSET0, 0x4BEC0066, TRUE);
+		write_client_reg(DPSET1, 0x00000113, TRUE);
+		write_client_reg(DPSUS, 0x00000000, TRUE);
+		write_client_reg(DPRUN, 0x00000001, TRUE);
+		mddi_wait(14);
+		write_client_reg(SYSCKENA, 0x00000001, TRUE);
+		write_client_reg(CLKENB, 0x000000EF, TRUE);
+		write_client_reg(GPIODATA, 0x03FF0000, TRUE);
+		write_client_reg(GPIODIR, 0x0000024D, TRUE);
+		write_client_reg(GPIOSEL, 0x00000173, TRUE);
+		write_client_reg(GPIOPC, 0x03C300C0, TRUE);
+		write_client_reg(WKREQ, 0x00000000, TRUE);
+		write_client_reg(GPIOIS, 0x00000000, TRUE);
+		write_client_reg(GPIOIEV, 0x00000001, TRUE);
+		write_client_reg(GPIOIC, 0x000003FF, TRUE);
+		write_client_reg(GPIODATA, 0x00060006, TRUE);
+		write_client_reg(GPIODATA, 0x00080008, TRUE);
+		write_client_reg(GPIODATA, 0x02000200, TRUE);
+
+		if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA) {
+			mddi_wait(400);
+			write_client_reg(DRAMPWR, 0x00000001, TRUE);
+
+			write_client_reg(CNT_DIS, 0x00000002, TRUE);
+			write_client_reg(BITMAP0, 0x01E00320, TRUE);
+			write_client_reg(PORT_ENB, 0x00000001, TRUE);
+			write_client_reg(PORT, 0x00000004, TRUE);
+			write_client_reg(PXL, 0x0000003A, TRUE);
+			write_client_reg(MPLFBUF, 0x00000000, TRUE);
+			write_client_reg(HCYCLE, 0x00000253, TRUE);
+			write_client_reg(HSW, 0x00000003, TRUE);
+			write_client_reg(HDE_START, 0x00000017, TRUE);
+			write_client_reg(HDE_SIZE, 0x0000018F, TRUE);
+			write_client_reg(VCYCLE, 0x000001FF, TRUE);
+			write_client_reg(VSW, 0x00000001, TRUE);
+			write_client_reg(VDE_START, 0x00000003, TRUE);
+			write_client_reg(VDE_SIZE, 0x000001DF, TRUE);
+			write_client_reg(START, 0x00000001, TRUE);
+			mddi_wait(1);
+			write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
+			write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
+			write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
+			write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
+			write_client_reg(PWM1OFF, 0x00000087, TRUE);
+		} else {
+			write_client_reg(DRAMPWR, 0x00000001, TRUE);
+			write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
+			write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
+			write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
+			write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
+			write_client_reg(PWM1OFF, 0x00001387, TRUE);
+		}
+
+		write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
+		write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
+		write_client_reg(PWMCR, 0x00000003, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSICTL, 0x00000799, TRUE);
+		write_client_reg(SSITIME, 0x00000100, TRUE);
+		write_client_reg(SSICTL, 0x0000079b, TRUE);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000000, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x000800BA, TRUE);
+		write_client_reg(SSITX, 0x00000111, TRUE);
+		write_client_reg(SSITX, 0x00080036, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800BB, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		write_client_reg(SSITX, 0x0008003A, TRUE);
+		write_client_reg(SSITX, 0x00000160, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800BF, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		write_client_reg(SSITX, 0x000800B1, TRUE);
+		write_client_reg(SSITX, 0x0000015D, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800B2, TRUE);
+		write_client_reg(SSITX, 0x00000133, TRUE);
+		write_client_reg(SSITX, 0x000800B3, TRUE);
+		write_client_reg(SSITX, 0x00000122, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800B4, TRUE);
+		write_client_reg(SSITX, 0x00000102, TRUE);
+		write_client_reg(SSITX, 0x000800B5, TRUE);
+		write_client_reg(SSITX, 0x0000011F, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800B6, TRUE);
+		write_client_reg(SSITX, 0x00000128, TRUE);
+		write_client_reg(SSITX, 0x000800B7, TRUE);
+		write_client_reg(SSITX, 0x00000103, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800B9, TRUE);
+		write_client_reg(SSITX, 0x00000120, TRUE);
+		write_client_reg(SSITX, 0x000800BD, TRUE);
+		write_client_reg(SSITX, 0x00000102, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800BE, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		write_client_reg(SSITX, 0x000800C0, TRUE);
+		write_client_reg(SSITX, 0x00000111, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C1, TRUE);
+		write_client_reg(SSITX, 0x00000111, TRUE);
+		write_client_reg(SSITX, 0x000800C2, TRUE);
+		write_client_reg(SSITX, 0x00000111, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C3, TRUE);
+		write_client_reg(SSITX, 0x0008010A, TRUE);
+		write_client_reg(SSITX, 0x0000010A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C4, TRUE);
+		write_client_reg(SSITX, 0x00080160, TRUE);
+		write_client_reg(SSITX, 0x00000160, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C5, TRUE);
+		write_client_reg(SSITX, 0x00080160, TRUE);
+		write_client_reg(SSITX, 0x00000160, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C6, TRUE);
+		write_client_reg(SSITX, 0x00080160, TRUE);
+		write_client_reg(SSITX, 0x00000160, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C7, TRUE);
+		write_client_reg(SSITX, 0x00080133, TRUE);
+		write_client_reg(SSITX, 0x00000143, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800C8, TRUE);
+		write_client_reg(SSITX, 0x00000144, TRUE);
+		write_client_reg(SSITX, 0x000800C9, TRUE);
+		write_client_reg(SSITX, 0x00000133, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800CA, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800EC, TRUE);
+		write_client_reg(SSITX, 0x00080102, TRUE);
+		write_client_reg(SSITX, 0x00000118, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800CF, TRUE);
+		write_client_reg(SSITX, 0x00000101, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D0, TRUE);
+		write_client_reg(SSITX, 0x00080110, TRUE);
+		write_client_reg(SSITX, 0x00000104, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D1, TRUE);
+		write_client_reg(SSITX, 0x00000101, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D2, TRUE);
+		write_client_reg(SSITX, 0x00080100, TRUE);
+		write_client_reg(SSITX, 0x0000013A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D3, TRUE);
+		write_client_reg(SSITX, 0x00080100, TRUE);
+		write_client_reg(SSITX, 0x0000013A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D4, TRUE);
+		write_client_reg(SSITX, 0x00080124, TRUE);
+		write_client_reg(SSITX, 0x0000016E, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x000800D5, TRUE);
+		write_client_reg(SSITX, 0x00000124, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800ED, TRUE);
+		write_client_reg(SSITX, 0x00080101, TRUE);
+		write_client_reg(SSITX, 0x0000010A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D6, TRUE);
+		write_client_reg(SSITX, 0x00000101, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D7, TRUE);
+		write_client_reg(SSITX, 0x00080110, TRUE);
+		write_client_reg(SSITX, 0x0000010A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D8, TRUE);
+		write_client_reg(SSITX, 0x00000101, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800D9, TRUE);
+		write_client_reg(SSITX, 0x00080100, TRUE);
+		write_client_reg(SSITX, 0x00000114, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800DE, TRUE);
+		write_client_reg(SSITX, 0x00080100, TRUE);
+		write_client_reg(SSITX, 0x00000114, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800DF, TRUE);
+		write_client_reg(SSITX, 0x00080112, TRUE);
+		write_client_reg(SSITX, 0x0000013F, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E0, TRUE);
+		write_client_reg(SSITX, 0x0000010B, TRUE);
+		write_client_reg(SSITX, 0x000800E2, TRUE);
+		write_client_reg(SSITX, 0x00000101, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E3, TRUE);
+		write_client_reg(SSITX, 0x00000136, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E4, TRUE);
+		write_client_reg(SSITX, 0x00080100, TRUE);
+		write_client_reg(SSITX, 0x00000103, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E5, TRUE);
+		write_client_reg(SSITX, 0x00080102, TRUE);
+		write_client_reg(SSITX, 0x00000104, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E6, TRUE);
+		write_client_reg(SSITX, 0x00000103, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E7, TRUE);
+		write_client_reg(SSITX, 0x00080104, TRUE);
+		write_client_reg(SSITX, 0x0000010A, TRUE);
+		mddi_wait(2);
+		write_client_reg(SSITX, 0x000800E8, TRUE);
+		write_client_reg(SSITX, 0x00000104, TRUE);
+		write_client_reg(CLKENB, 0x000001EF, TRUE);
+		write_client_reg(START, 0x00000000, TRUE);
+		write_client_reg(WRSTB, 0x0000003F, TRUE);
+		write_client_reg(RDSTB, 0x00000432, TRUE);
+		write_client_reg(PORT_ENB, 0x00000002, TRUE);
+		write_client_reg(VSYNIF, 0x00000000, TRUE);
+		write_client_reg(ASY_DATA, 0x80000000, TRUE);
+		write_client_reg(ASY_DATB, 0x00000001, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+		mddi_wait(10);
+		write_client_reg(ASY_DATA, 0x80000000, TRUE);
+		write_client_reg(ASY_DATB, 0x80000000, TRUE);
+		write_client_reg(ASY_DATC, 0x80000000, TRUE);
+		write_client_reg(ASY_DATD, 0x80000000, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
+		write_client_reg(ASY_DATA, 0x80000007, TRUE);
+		write_client_reg(ASY_DATB, 0x00004005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+		mddi_wait(20);
+		write_client_reg(ASY_DATA, 0x80000059, TRUE);
+		write_client_reg(ASY_DATB, 0x00000000, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+		write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+		write_client_reg(VSYNIF, 0x00000001, TRUE);
+		write_client_reg(PORT_ENB, 0x00000001, TRUE);
+	}
+
+	mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_STANDBY,
+				      TOSHIBA_STATE_PRIM_SEC_READY);
+}
+
+static void toshiba_prim_start(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
+		write_client_reg(BITMAP1, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP2, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP3, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
+		write_client_reg(CLKENB, 0x000001EF, TRUE);
+		write_client_reg(PORT_ENB, 0x00000001, TRUE);
+		write_client_reg(PORT, 0x00000016, TRUE);
+		write_client_reg(PXL, 0x00000002, TRUE);
+		write_client_reg(MPLFBUF, 0x00000000, TRUE);
+		write_client_reg(HCYCLE, 0x00000185, TRUE);
+		write_client_reg(HSW, 0x00000018, TRUE);
+		write_client_reg(HDE_START, 0x0000004A, TRUE);
+		write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
+		write_client_reg(VCYCLE, 0x0000028E, TRUE);
+		write_client_reg(VSW, 0x00000004, TRUE);
+		write_client_reg(VDE_START, 0x00000009, TRUE);
+		write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
+		write_client_reg(START, 0x00000001, TRUE);
+		write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000002, TRUE);
+	} else{
+
+		write_client_reg(VSYNIF, 0x00000001, TRUE);
+		write_client_reg(PORT_ENB, 0x00000001, TRUE);
+		write_client_reg(BITMAP1, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP2, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP3, 0x01E000F0, TRUE);
+		write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
+		write_client_reg(CLKENB, 0x000001EF, TRUE);
+		write_client_reg(PORT_ENB, 0x00000001, TRUE);
+		write_client_reg(PORT, 0x00000004, TRUE);
+		write_client_reg(PXL, 0x00000002, TRUE);
+		write_client_reg(MPLFBUF, 0x00000000, TRUE);
+
+		if (mddi_toshiba_61Hz_refresh) {
+			write_client_reg(HCYCLE, 0x000000FC, TRUE);
+			mddi_toshiba_rows_per_second = 39526;
+			mddi_toshiba_rows_per_refresh = 646;
+			mddi_toshiba_usecs_per_refresh = 16344;
+		} else {
+			write_client_reg(HCYCLE, 0x0000010b, TRUE);
+			mddi_toshiba_rows_per_second = 37313;
+			mddi_toshiba_rows_per_refresh = 646;
+			mddi_toshiba_usecs_per_refresh = 17313;
+		}
+
+		write_client_reg(HSW, 0x00000003, TRUE);
+		write_client_reg(HDE_START, 0x00000007, TRUE);
+		write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
+		write_client_reg(VCYCLE, 0x00000285, TRUE);
+		write_client_reg(VSW, 0x00000001, TRUE);
+		write_client_reg(VDE_START, 0x00000003, TRUE);
+		write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
+		write_client_reg(START, 0x00000001, TRUE);
+		mddi_wait(10);
+		write_client_reg(SSITX, 0x000800BC, TRUE);
+		write_client_reg(SSITX, 0x00000180, TRUE);
+		write_client_reg(SSITX, 0x0008003B, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x000800B0, TRUE);
+		write_client_reg(SSITX, 0x00000116, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x000800B8, TRUE);
+		write_client_reg(SSITX, 0x000801FF, TRUE);
+		write_client_reg(SSITX, 0x000001F5, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x00000011, TRUE);
+		write_client_reg(SSITX, 0x00000029, TRUE);
+		write_client_reg(WKREQ, 0x00000000, TRUE);
+		write_client_reg(WAKEUP, 0x00000000, TRUE);
+		write_client_reg(INTMSK, 0x00000001, TRUE);
+	}
+
+	mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
+				      TOSHIBA_STATE_PRIM_NORMAL_MODE);
+}
+
+static void toshiba_sec_start(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(PORT_ENB, 0x00000002, TRUE);
+	write_client_reg(CLKENB, 0x000011EF, TRUE);
+	write_client_reg(BITMAP0, 0x028001E0, TRUE);
+	write_client_reg(BITMAP1, 0x00000000, TRUE);
+	write_client_reg(BITMAP2, 0x00000000, TRUE);
+	write_client_reg(BITMAP3, 0x00000000, TRUE);
+	write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
+	write_client_reg(PORT, 0x00000000, TRUE);
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(MPLFBUF, 0x00000004, TRUE);
+	write_client_reg(HCYCLE, 0x0000006B, TRUE);
+	write_client_reg(HSW, 0x00000003, TRUE);
+	write_client_reg(HDE_START, 0x00000007, TRUE);
+	write_client_reg(HDE_SIZE, 0x00000057, TRUE);
+	write_client_reg(VCYCLE, 0x000000E6, TRUE);
+	write_client_reg(VSW, 0x00000001, TRUE);
+	write_client_reg(VDE_START, 0x00000003, TRUE);
+	write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
+	write_client_reg(ASY_DATA, 0x80000001, TRUE);
+	write_client_reg(ASY_DATB, 0x0000011B, TRUE);
+	write_client_reg(ASY_DATC, 0x80000002, TRUE);
+	write_client_reg(ASY_DATD, 0x00000700, TRUE);
+	write_client_reg(ASY_DATE, 0x80000003, TRUE);
+	write_client_reg(ASY_DATF, 0x00000230, TRUE);
+	write_client_reg(ASY_DATG, 0x80000008, TRUE);
+	write_client_reg(ASY_DATH, 0x00000402, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000009, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_DATC, 0x8000000B, TRUE);
+	write_client_reg(ASY_DATD, 0x00000000, TRUE);
+	write_client_reg(ASY_DATE, 0x8000000C, TRUE);
+	write_client_reg(ASY_DATF, 0x00000000, TRUE);
+	write_client_reg(ASY_DATG, 0x8000000D, TRUE);
+	write_client_reg(ASY_DATH, 0x00000409, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x8000000E, TRUE);
+	write_client_reg(ASY_DATB, 0x00000409, TRUE);
+	write_client_reg(ASY_DATC, 0x80000030, TRUE);
+	write_client_reg(ASY_DATD, 0x00000000, TRUE);
+	write_client_reg(ASY_DATE, 0x80000031, TRUE);
+	write_client_reg(ASY_DATF, 0x00000100, TRUE);
+	write_client_reg(ASY_DATG, 0x80000032, TRUE);
+	write_client_reg(ASY_DATH, 0x00000104, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000033, TRUE);
+	write_client_reg(ASY_DATB, 0x00000400, TRUE);
+	write_client_reg(ASY_DATC, 0x80000034, TRUE);
+	write_client_reg(ASY_DATD, 0x00000306, TRUE);
+	write_client_reg(ASY_DATE, 0x80000035, TRUE);
+	write_client_reg(ASY_DATF, 0x00000706, TRUE);
+	write_client_reg(ASY_DATG, 0x80000036, TRUE);
+	write_client_reg(ASY_DATH, 0x00000707, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000037, TRUE);
+	write_client_reg(ASY_DATB, 0x00000004, TRUE);
+	write_client_reg(ASY_DATC, 0x80000038, TRUE);
+	write_client_reg(ASY_DATD, 0x00000000, TRUE);
+	write_client_reg(ASY_DATE, 0x80000039, TRUE);
+	write_client_reg(ASY_DATF, 0x00000000, TRUE);
+	write_client_reg(ASY_DATG, 0x8000003A, TRUE);
+	write_client_reg(ASY_DATH, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000044, TRUE);
+	write_client_reg(ASY_DATB, 0x0000AF00, TRUE);
+	write_client_reg(ASY_DATC, 0x80000045, TRUE);
+	write_client_reg(ASY_DATD, 0x0000DB00, TRUE);
+	write_client_reg(ASY_DATE, 0x08000042, TRUE);
+	write_client_reg(ASY_DATF, 0x0000DB00, TRUE);
+	write_client_reg(ASY_DATG, 0x80000021, TRUE);
+	write_client_reg(ASY_DATH, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(PXL, 0x0000000C, TRUE);
+	write_client_reg(VSYNIF, 0x00000001, TRUE);
+	write_client_reg(ASY_DATA, 0x80000022, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000003, TRUE);
+	write_client_reg(START, 0x00000001, TRUE);
+	mddi_wait(60);
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(START, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000050, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_DATC, 0x80000051, TRUE);
+	write_client_reg(ASY_DATD, 0x00000E00, TRUE);
+	write_client_reg(ASY_DATE, 0x80000052, TRUE);
+	write_client_reg(ASY_DATF, 0x00000D01, TRUE);
+	write_client_reg(ASY_DATG, 0x80000053, TRUE);
+	write_client_reg(ASY_DATH, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	write_client_reg(ASY_DATA, 0x80000058, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_DATC, 0x8000005A, TRUE);
+	write_client_reg(ASY_DATD, 0x00000E01, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
+	write_client_reg(ASY_DATA, 0x80000011, TRUE);
+	write_client_reg(ASY_DATB, 0x00000812, TRUE);
+	write_client_reg(ASY_DATC, 0x80000012, TRUE);
+	write_client_reg(ASY_DATD, 0x00000003, TRUE);
+	write_client_reg(ASY_DATE, 0x80000013, TRUE);
+	write_client_reg(ASY_DATF, 0x00000909, TRUE);
+	write_client_reg(ASY_DATG, 0x80000010, TRUE);
+	write_client_reg(ASY_DATH, 0x00000040, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	mddi_wait(40);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000340, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(60);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00003340, TRUE);
+	write_client_reg(ASY_DATC, 0x80000007, TRUE);
+	write_client_reg(ASY_DATD, 0x00004007, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
+	mddi_wait(1);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004017, TRUE);
+	write_client_reg(ASY_DATC, 0x8000005B, TRUE);
+	write_client_reg(ASY_DATD, 0x00000000, TRUE);
+	write_client_reg(ASY_DATE, 0x80000059, TRUE);
+	write_client_reg(ASY_DATF, 0x00000011, TRUE);
+	write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
+	write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
+	mddi_wait(20);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	/* LTPS I/F control */
+	write_client_reg(ASY_DATB, 0x00000019, TRUE);
+	/* Direct cmd transfer enable */
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	/* Direct cmd transfer disable */
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(20);
+	/* Index setting of SUB LCDD */
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	/* LTPS I/F control */
+	write_client_reg(ASY_DATB, 0x00000079, TRUE);
+	/* Direct cmd transfer enable */
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	/* Direct cmd transfer disable */
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(20);
+	/* Index setting of SUB LCDD */
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	/* LTPS I/F control */
+	write_client_reg(ASY_DATB, 0x000003FD, TRUE);
+	/* Direct cmd transfer enable */
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	/* Direct cmd transfer disable */
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(20);
+	mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
+				      TOSHIBA_STATE_SEC_NORMAL_MODE);
+}
+
+static void toshiba_prim_lcd_off(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
+		gordon_disp_off();
+	} else{
+
+		/* Main panel power off (Deep standby in) */
+		write_client_reg(SSITX, 0x000800BC, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+		write_client_reg(SSITX, 0x00000028, TRUE);
+		mddi_wait(1);
+		write_client_reg(SSITX, 0x000800B8, TRUE);
+		write_client_reg(SSITX, 0x00000180, TRUE);
+		write_client_reg(SSITX, 0x00000102, TRUE);
+		write_client_reg(SSITX, 0x00000010, TRUE);
+	}
+	write_client_reg(PORT, 0x00000003, TRUE);
+	write_client_reg(REGENB, 0x00000001, TRUE);
+	mddi_wait(1);
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(START, 0x00000000, TRUE);
+	write_client_reg(REGENB, 0x00000001, TRUE);
+	mddi_wait(3);
+	if (TM_GET_PID(mfd->panel.id) != LCD_SHARP_2P4_VGA) {
+		write_client_reg(SSITX, 0x000800B0, TRUE);
+		write_client_reg(SSITX, 0x00000100, TRUE);
+	}
+	mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_NORMAL_MODE,
+				      TOSHIBA_STATE_PRIM_SEC_STANDBY);
+}
+
+static void toshiba_sec_lcd_off(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(PORT_ENB, 0x00000002, TRUE);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004016, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000019, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x0000000B, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000002, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(4);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000300, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(4);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004004, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(PORT, 0x00000000, TRUE);
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(START, 0x00000000, TRUE);
+	write_client_reg(VSYNIF, 0x00000001, TRUE);
+	write_client_reg(PORT_ENB, 0x00000001, TRUE);
+	write_client_reg(REGENB, 0x00000001, TRUE);
+	mddi_toshiba_state_transition(TOSHIBA_STATE_SEC_NORMAL_MODE,
+				      TOSHIBA_STATE_PRIM_SEC_STANDBY);
+}
+
+static void toshiba_sec_cont_update_start(struct msm_fb_data_type *mfd)
+{
+
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(PORT_ENB, 0x00000002, TRUE);
+	write_client_reg(INTMASK, 0x00000001, TRUE);
+	write_client_reg(TTBUSSEL, 0x0000000B, TRUE);
+	write_client_reg(MONI, 0x00000008, TRUE);
+	write_client_reg(CLKENB, 0x000000EF, TRUE);
+	write_client_reg(CLKENB, 0x000010EF, TRUE);
+	write_client_reg(CLKENB, 0x000011EF, TRUE);
+	write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
+	write_client_reg(HCYCLE, 0x0000006B, TRUE);
+	write_client_reg(HSW, 0x00000003, TRUE);
+	write_client_reg(HDE_START, 0x00000002, TRUE);
+	write_client_reg(HDE_SIZE, 0x00000057, TRUE);
+	write_client_reg(VCYCLE, 0x000000E6, TRUE);
+	write_client_reg(VSW, 0x00000001, TRUE);
+	write_client_reg(VDE_START, 0x00000003, TRUE);
+	write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
+	write_client_reg(WRSTB, 0x00000015, TRUE);
+	write_client_reg(MPLFBUF, 0x00000004, TRUE);
+	write_client_reg(ASY_DATA, 0x80000021, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_DATC, 0x80000022, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000007, TRUE);
+	write_client_reg(PXL, 0x00000089, TRUE);
+	write_client_reg(VSYNIF, 0x00000001, TRUE);
+	mddi_wait(2);
+}
+
+static void toshiba_sec_cont_update_stop(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(START, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	mddi_wait(3);
+	write_client_reg(SRST, 0x00000002, TRUE);
+	mddi_wait(3);
+	write_client_reg(SRST, 0x00000003, TRUE);
+}
+
+static void toshiba_sec_backlight_on(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
+	write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
+	write_client_reg(PWM0OFF, 0x00000001, TRUE);
+	write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
+	write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
+	write_client_reg(PWM1OFF, 0x00001387, TRUE);
+	write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
+	write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
+	write_client_reg(PWMCR, 0x00000003, TRUE);
+}
+
+static void toshiba_sec_sleep_in(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(PORT_ENB, 0x00000002, TRUE);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004016, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000019, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x0000000B, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000002, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(4);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000300, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(4);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000000, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004004, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(PORT, 0x00000000, TRUE);
+	write_client_reg(PXL, 0x00000000, TRUE);
+	write_client_reg(START, 0x00000000, TRUE);
+	write_client_reg(REGENB, 0x00000001, TRUE);
+	/* Sleep in sequence */
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000302, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+}
+
+static void toshiba_sec_sleep_out(struct msm_fb_data_type *mfd)
+{
+	if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
+		return;
+
+	write_client_reg(VSYNIF, 0x00000000, TRUE);
+	write_client_reg(PORT_ENB, 0x00000002, TRUE);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000300, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	/*  Display ON sequence */
+	write_client_reg(ASY_DATA, 0x80000011, TRUE);
+	write_client_reg(ASY_DATB, 0x00000812, TRUE);
+	write_client_reg(ASY_DATC, 0x80000012, TRUE);
+	write_client_reg(ASY_DATD, 0x00000003, TRUE);
+	write_client_reg(ASY_DATE, 0x80000013, TRUE);
+	write_client_reg(ASY_DATF, 0x00000909, TRUE);
+	write_client_reg(ASY_DATG, 0x80000010, TRUE);
+	write_client_reg(ASY_DATH, 0x00000040, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
+	mddi_wait(4);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00000340, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(6);
+	write_client_reg(ASY_DATA, 0x80000010, TRUE);
+	write_client_reg(ASY_DATB, 0x00003340, TRUE);
+	write_client_reg(ASY_DATC, 0x80000007, TRUE);
+	write_client_reg(ASY_DATD, 0x00004007, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
+	mddi_wait(1);
+	write_client_reg(ASY_DATA, 0x80000007, TRUE);
+	write_client_reg(ASY_DATB, 0x00004017, TRUE);
+	write_client_reg(ASY_DATC, 0x8000005B, TRUE);
+	write_client_reg(ASY_DATD, 0x00000000, TRUE);
+	write_client_reg(ASY_DATE, 0x80000059, TRUE);
+	write_client_reg(ASY_DATF, 0x00000011, TRUE);
+	write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
+	write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000019, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x00000079, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+	write_client_reg(ASY_DATA, 0x80000059, TRUE);
+	write_client_reg(ASY_DATB, 0x000003FD, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
+	write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
+	mddi_wait(2);
+}
+
+static void mddi_toshiba_lcd_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int32 level;
+	int ret = -EPERM;
+	int max = mfd->panel_info.bl_max;
+	int min = mfd->panel_info.bl_min;
+	int i = 0;
+
+	if (mddi_toshiba_pdata && mddi_toshiba_pdata->pmic_backlight) {
+		while (i++ < 3) {
+			ret = mddi_toshiba_pdata->pmic_backlight(mfd->bl_level);
+			if (!ret)
+				return;
+			msleep(10);
+		}
+		printk(KERN_WARNING "%s: pmic_backlight Failed\n", __func__);
+	}
+
+
+	if (ret && mddi_toshiba_pdata && mddi_toshiba_pdata->backlight_level) {
+		level = mddi_toshiba_pdata->backlight_level(mfd->bl_level,
+								max, min);
+
+		if (level < 0)
+			return;
+
+		if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
+			write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
+	} else {
+		if (!max)
+			level = 0;
+		else
+			level = (mfd->bl_level * 4999) / max;
+	}
+
+	write_client_reg(PWM0OFF, level, TRUE);
+}
+
+static void mddi_toshiba_vsync_set_handler(msm_fb_vsync_handler_type handler,	/* ISR to be executed */
+					   void *arg)
+{
+	boolean error = FALSE;
+	unsigned long flags;
+
+	/* Disable interrupts */
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+	/* INTLOCK(); */
+
+	if (mddi_toshiba_vsync_handler != NULL) {
+		error = TRUE;
+	} else {
+		/* Register the handler for this particular GROUP interrupt source */
+		mddi_toshiba_vsync_handler = handler;
+		mddi_toshiba_vsync_handler_arg = arg;
+	}
+
+	/* Restore interrupts */
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+	/* MDDI_INTFREE(); */
+	if (error) {
+		MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n");
+	} else {
+		/* Enable the vsync wakeup */
+		mddi_queue_register_write(INTMSK, 0x0000, FALSE, 0);
+
+		mddi_toshiba_vsync_attempts = 1;
+		mddi_vsync_detect_enabled = TRUE;
+	}
+}				/* mddi_toshiba_vsync_set_handler */
+
+static void mddi_toshiba_lcd_vsync_detected(boolean detected)
+{
+	/* static timetick_type start_time = 0; */
+	static struct timeval start_time;
+	static boolean first_time = TRUE;
+	/* uint32 mdp_cnt_val = 0; */
+	/* timetick_type elapsed_us; */
+	struct timeval now;
+	uint32 elapsed_us;
+	uint32 num_vsyncs;
+
+	if ((detected) || (mddi_toshiba_vsync_attempts > 5)) {
+		if ((detected) && (mddi_toshiba_monitor_refresh_value)) {
+			/* if (start_time != 0) */
+			if (!first_time) {
+				jiffies_to_timeval(jiffies, &now);
+				elapsed_us =
+				    (now.tv_sec - start_time.tv_sec) * 1000000 +
+				    now.tv_usec - start_time.tv_usec;
+				/*
+				 * LCD is configured for a refresh every usecs,
+				 *  so to determine the number of vsyncs that
+				 *  have occurred since the last measurement
+				 *  add half that to the time difference and
+				 *  divide by the refresh rate.
+				 */
+				num_vsyncs = (elapsed_us +
+					      (mddi_toshiba_usecs_per_refresh >>
+					       1)) /
+				    mddi_toshiba_usecs_per_refresh;
+				/*
+				 * LCD is configured for * hsyncs (rows) per
+				 * refresh cycle. Calculate new rows_per_second
+				 * value based upon these new measurements.
+				 * MDP can update with this new value.
+				 */
+				mddi_toshiba_rows_per_second =
+				    (mddi_toshiba_rows_per_refresh * 1000 *
+				     num_vsyncs) / (elapsed_us / 1000);
+			}
+			/* start_time = timetick_get(); */
+			first_time = FALSE;
+			jiffies_to_timeval(jiffies, &start_time);
+			if (mddi_toshiba_report_refresh_measurements) {
+				(void)mddi_queue_register_read_int(VPOS,
+								   &mddi_toshiba_curr_vpos);
+				/* mdp_cnt_val = MDP_LINE_COUNT; */
+			}
+		}
+		/* if detected = TRUE, client initiated wakeup was detected */
+		if (mddi_toshiba_vsync_handler != NULL) {
+			(*mddi_toshiba_vsync_handler)
+			    (mddi_toshiba_vsync_handler_arg);
+			mddi_toshiba_vsync_handler = NULL;
+		}
+		mddi_vsync_detect_enabled = FALSE;
+		mddi_toshiba_vsync_attempts = 0;
+		/* need to disable the interrupt wakeup */
+		if (!mddi_queue_register_write_int(INTMSK, 0x0001))
+			MDDI_MSG_ERR("Vsync interrupt disable failed!\n");
+		if (!detected) {
+			/* give up after 5 failed attempts but show error */
+			MDDI_MSG_NOTICE("Vsync detection failed!\n");
+		} else if ((mddi_toshiba_monitor_refresh_value) &&
+			   (mddi_toshiba_report_refresh_measurements)) {
+			MDDI_MSG_NOTICE("  Last Line Counter=%d!\n",
+					mddi_toshiba_curr_vpos);
+		/* MDDI_MSG_NOTICE("  MDP Line Counter=%d!\n",mdp_cnt_val); */
+			MDDI_MSG_NOTICE("  Lines Per Second=%d!\n",
+					mddi_toshiba_rows_per_second);
+		}
+		/* clear the interrupt */
+		if (!mddi_queue_register_write_int(INTFLG, 0x0001))
+			MDDI_MSG_ERR("Vsync interrupt clear failed!\n");
+	} else {
+		/* if detected = FALSE, we woke up from hibernation, but did not
+		 * detect client initiated wakeup.
+		 */
+		mddi_toshiba_vsync_attempts++;
+	}
+}
+
+static void mddi_toshiba_prim_init(struct msm_fb_data_type *mfd)
+{
+
+	switch (toshiba_state) {
+	case TOSHIBA_STATE_PRIM_SEC_READY:
+		break;
+	case TOSHIBA_STATE_OFF:
+		toshiba_state = TOSHIBA_STATE_PRIM_SEC_STANDBY;
+		toshiba_common_initial_setup(mfd);
+		break;
+	case TOSHIBA_STATE_PRIM_SEC_STANDBY:
+		toshiba_common_initial_setup(mfd);
+		break;
+	case TOSHIBA_STATE_SEC_NORMAL_MODE:
+		toshiba_sec_cont_update_stop(mfd);
+		toshiba_sec_sleep_in(mfd);
+		toshiba_sec_sleep_out(mfd);
+		toshiba_sec_lcd_off(mfd);
+		toshiba_common_initial_setup(mfd);
+		break;
+	default:
+		MDDI_MSG_ERR("mddi_toshiba_prim_init from state %d\n",
+			     toshiba_state);
+	}
+
+	toshiba_prim_start(mfd);
+	if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
+		gordon_disp_init();
+	mddi_host_write_pix_attr_reg(0x00C3);
+}
+
+static void mddi_toshiba_sec_init(struct msm_fb_data_type *mfd)
+{
+
+	switch (toshiba_state) {
+	case TOSHIBA_STATE_PRIM_SEC_READY:
+		break;
+	case TOSHIBA_STATE_PRIM_SEC_STANDBY:
+		toshiba_common_initial_setup(mfd);
+		break;
+	case TOSHIBA_STATE_PRIM_NORMAL_MODE:
+		toshiba_prim_lcd_off(mfd);
+		toshiba_common_initial_setup(mfd);
+		break;
+	default:
+		MDDI_MSG_ERR("mddi_toshiba_sec_init from state %d\n",
+			     toshiba_state);
+	}
+
+	toshiba_sec_start(mfd);
+	toshiba_sec_backlight_on(mfd);
+	toshiba_sec_cont_update_start(mfd);
+	mddi_host_write_pix_attr_reg(0x0400);
+}
+
+static void mddi_toshiba_lcd_powerdown(struct msm_fb_data_type *mfd)
+{
+	switch (toshiba_state) {
+	case TOSHIBA_STATE_PRIM_SEC_READY:
+		mddi_toshiba_prim_init(mfd);
+		mddi_toshiba_lcd_powerdown(mfd);
+		return;
+	case TOSHIBA_STATE_PRIM_SEC_STANDBY:
+		break;
+	case TOSHIBA_STATE_PRIM_NORMAL_MODE:
+		toshiba_prim_lcd_off(mfd);
+		break;
+	case TOSHIBA_STATE_SEC_NORMAL_MODE:
+		toshiba_sec_cont_update_stop(mfd);
+		toshiba_sec_sleep_in(mfd);
+		toshiba_sec_sleep_out(mfd);
+		toshiba_sec_lcd_off(mfd);
+		break;
+	default:
+		MDDI_MSG_ERR("mddi_toshiba_lcd_powerdown from state %d\n",
+			     toshiba_state);
+	}
+}
+
+static int mddi_sharpgordon_firsttime = 1;
+
+static int mddi_toshiba_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mddi_host_client_cnt_reset();
+
+	if (TM_GET_DID(mfd->panel.id) == TOSHIBA_VGA_PRIM)
+		mddi_toshiba_prim_init(mfd);
+	else
+		mddi_toshiba_sec_init(mfd);
+	if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
+		if (mddi_sharpgordon_firsttime) {
+			mddi_sharpgordon_firsttime = 0;
+			write_client_reg(REGENB, 0x00000001, TRUE);
+		}
+	}
+	return 0;
+}
+
+static int mddi_toshiba_lcd_off(struct platform_device *pdev)
+{
+	if (mddi_toshiba_vsync_handler != NULL) {
+		(*mddi_toshiba_vsync_handler)
+			    (mddi_toshiba_vsync_handler_arg);
+		mddi_toshiba_vsync_handler = NULL;
+		printk(KERN_INFO "%s: clean up vsyn_handler=%x\n", __func__,
+				(int)mddi_toshiba_vsync_handler);
+	}
+
+	mddi_toshiba_lcd_powerdown(platform_get_drvdata(pdev));
+	return 0;
+}
+
+static int __devinit mddi_toshiba_lcd_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mddi_toshiba_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mddi_toshiba_lcd_probe,
+	.driver = {
+		.name   = "mddi_toshiba",
+	},
+};
+
+static struct msm_fb_panel_data toshiba_panel_data = {
+	.on 		= mddi_toshiba_lcd_on,
+	.off 		= mddi_toshiba_lcd_off,
+};
+
+static int ch_used[3];
+
+int mddi_toshiba_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	if ((channel >= 3) || ch_used[channel])
+		return -ENODEV;
+
+	if ((channel != TOSHIBA_VGA_PRIM) &&
+	    mddi_toshiba_pdata && mddi_toshiba_pdata->panel_num)
+		if (mddi_toshiba_pdata->panel_num() < 2)
+			return -ENODEV;
+
+	ch_used[channel] = TRUE;
+
+	pdev = platform_device_alloc("mddi_toshiba", (panel << 8)|channel);
+	if (!pdev)
+		return -ENOMEM;
+
+	if (channel == TOSHIBA_VGA_PRIM) {
+		toshiba_panel_data.set_backlight =
+				mddi_toshiba_lcd_set_backlight;
+
+		if (pinfo->lcd.vsync_enable) {
+			toshiba_panel_data.set_vsync_notifier =
+				mddi_toshiba_vsync_set_handler;
+			mddi_lcd.vsync_detected =
+				mddi_toshiba_lcd_vsync_detected;
+		}
+	} else {
+		toshiba_panel_data.set_backlight = NULL;
+		toshiba_panel_data.set_vsync_notifier = NULL;
+	}
+
+	toshiba_panel_data.panel_info = *pinfo;
+
+	ret = platform_device_add_data(pdev, &toshiba_panel_data,
+		sizeof(toshiba_panel_data));
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init mddi_toshiba_lcd_init(void)
+{
+	return platform_driver_register(&this_driver);
+}
+
+module_init(mddi_toshiba_lcd_init);
diff --git a/drivers/video/msm/mddi_toshiba.h b/drivers/video/msm/mddi_toshiba.h
new file mode 100644
index 0000000..646f5e9
--- /dev/null
+++ b/drivers/video/msm/mddi_toshiba.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDDI_TOSHIBA_H
+#define MDDI_TOSHIBA_H
+
+#define TOSHIBA_VGA_PRIM 1
+#define TOSHIBA_VGA_SECD 2
+
+#define LCD_TOSHIBA_2P4_VGA 	0
+#define LCD_TOSHIBA_2P4_WVGA 	1
+#define LCD_TOSHIBA_2P4_WVGA_PT	2
+#define LCD_SHARP_2P4_VGA 	3
+
+#define GPIO_BLOCK_BASE        0x150000
+#define SYSTEM_BLOCK2_BASE     0x170000
+
+#define GPIODIR     (GPIO_BLOCK_BASE|0x04)
+#define GPIOSEL     (SYSTEM_BLOCK2_BASE|0x00)
+#define GPIOPC      (GPIO_BLOCK_BASE|0x28)
+#define GPIODATA    (GPIO_BLOCK_BASE|0x00)
+
+#define write_client_reg(__X, __Y, __Z) {\
+  mddi_queue_register_write(__X, __Y, TRUE, 0);\
+}
+
+#endif /* MDDI_TOSHIBA_H */
diff --git a/drivers/video/msm/mddi_toshiba_vga.c b/drivers/video/msm/mddi_toshiba_vga.c
new file mode 100644
index 0000000..794edff
--- /dev/null
+++ b/drivers/video/msm/mddi_toshiba_vga.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+#include "mddi_toshiba.h"
+
+static uint32 read_client_reg(uint32 addr)
+{
+	uint32 val;
+	mddi_queue_register_read(addr, &val, TRUE, 0);
+	return val;
+}
+
+static uint32 toshiba_lcd_gpio_read(void)
+{
+	uint32 val;
+
+	write_client_reg(GPIODIR, 0x0000000C, TRUE);
+	write_client_reg(GPIOSEL, 0x00000000, TRUE);
+	write_client_reg(GPIOSEL, 0x00000000, TRUE);
+	write_client_reg(GPIOPC, 0x03CF00C0, TRUE);
+	val = read_client_reg(GPIODATA) & 0x2C0;
+
+	return val;
+}
+
+static u32 mddi_toshiba_panel_detect(void)
+{
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	uint32 lcd_gpio;
+	u32 mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
+
+	/* Toshiba display requires larger drive_lo value */
+	mddi_host_reg_out(DRIVE_LO, 0x0050);
+
+	lcd_gpio = toshiba_lcd_gpio_read();
+	switch (lcd_gpio) {
+	case 0x0080:
+		mddi_toshiba_lcd = LCD_SHARP_2P4_VGA;
+		break;
+
+	case 0x00C0:
+	default:
+		mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
+		break;
+	}
+
+	return mddi_toshiba_lcd;
+}
+
+static int __init mddi_toshiba_vga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+	u32 panel;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 id;
+
+	ret = msm_fb_detect_client("mddi_toshiba_vga");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret) {
+		id = mddi_get_client_id();
+		if ((id >> 16) != 0xD263)
+			return 0;
+	}
+#endif
+
+	panel = mddi_toshiba_panel_detect();
+
+	pinfo.xres = 480;
+	pinfo.yres = 640;
+	pinfo.type = MDDI_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.refx100 = 6118;
+	pinfo.lcd.v_back_porch = 6;
+	pinfo.lcd.v_front_porch = 0;
+	pinfo.lcd.v_pulse_width = 0;
+	pinfo.lcd.hw_vsync_mode = FALSE;
+	pinfo.lcd.vsync_notifier_period = (1 * HZ);
+	pinfo.bl_max = 99;
+	pinfo.bl_min = 1;
+	pinfo.clk_rate = 122880000;
+	pinfo.clk_min =  120000000;
+	pinfo.clk_max =  200000000;
+	pinfo.fb_num = 2;
+
+	ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM, panel);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+		return ret;
+	}
+
+	pinfo.xres = 176;
+	pinfo.yres = 220;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = MDDI_PANEL;
+	pinfo.pdest = DISPLAY_2;
+	pinfo.mddi.vdopkt = 0x400;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.clk_rate = 122880000;
+	pinfo.clk_min =  120000000;
+	pinfo.clk_max =  200000000;
+	pinfo.fb_num = 2;
+
+	ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_SECD, panel);
+	if (ret)
+		printk(KERN_WARNING
+			"%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mddi_toshiba_vga_init);
diff --git a/drivers/video/msm/mddi_toshiba_wvga.c b/drivers/video/msm/mddi_toshiba_wvga.c
new file mode 100644
index 0000000..ad4ce46
--- /dev/null
+++ b/drivers/video/msm/mddi_toshiba_wvga.c
@@ -0,0 +1,60 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddi_toshiba.h"
+
+static int __init mddi_toshiba_wvga_init(void)
+{
+	int ret;
+	struct msm_panel_info pinfo;
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	if (msm_fb_detect_client("mddi_toshiba_wvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 800;
+	pinfo.yres = 480;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.pdest = DISPLAY_2;
+	pinfo.type = MDDI_PANEL;
+	pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.refx100 = 6118;
+	pinfo.lcd.v_back_porch = 6;
+	pinfo.lcd.v_front_porch = 0;
+	pinfo.lcd.v_pulse_width = 0;
+	pinfo.lcd.hw_vsync_mode = FALSE;
+	pinfo.lcd.vsync_notifier_period = (1 * HZ);
+	pinfo.bl_max = 4;
+	pinfo.bl_min = 1;
+	pinfo.clk_rate = 192000000;
+	pinfo.clk_min =  190000000;
+	pinfo.clk_max =  200000000;
+	pinfo.fb_num = 2;
+
+	ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM,
+					   LCD_TOSHIBA_2P4_WVGA);
+	if (ret) {
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+module_init(mddi_toshiba_wvga_init);
diff --git a/drivers/video/msm/mddi_toshiba_wvga_pt.c b/drivers/video/msm/mddi_toshiba_wvga_pt.c
new file mode 100644
index 0000000..edf739d
--- /dev/null
+++ b/drivers/video/msm/mddi_toshiba_wvga_pt.c
@@ -0,0 +1,68 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+#include "mddi_toshiba.h"
+
+static struct msm_panel_info pinfo;
+
+static int __init mddi_toshiba_wvga_pt_init(void)
+{
+	int ret;
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	uint id;
+
+	ret = msm_fb_detect_client("mddi_toshiba_wvga_pt");
+	if (ret == -ENODEV)
+		return 0;
+
+	if (ret) {
+		id = mddi_get_client_id();
+		if (id != 0xd2638722)
+			return 0;
+	}
+#endif
+
+	pinfo.xres = 480;
+	pinfo.yres = 800;
+	MSM_FB_SINGLE_MODE_PANEL(&pinfo);
+	pinfo.type = MDDI_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 18;
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.refx100 = 6096; /* adjust refx100 to prevent tearing */
+	pinfo.lcd.v_back_porch = 2;     /* vsw=1 + vbp = 2 */
+	pinfo.lcd.v_front_porch = 3;
+	pinfo.lcd.v_pulse_width = 1;
+	pinfo.lcd.hw_vsync_mode = FALSE;
+	pinfo.lcd.vsync_notifier_period = (1 * HZ);
+	pinfo.bl_max = 15;
+	pinfo.bl_min = 1;
+	pinfo.clk_rate = 222750000;
+	pinfo.clk_min =  200000000;
+	pinfo.clk_max =  240000000;
+	pinfo.fb_num = 2;
+
+	ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM,
+						LCD_TOSHIBA_2P4_WVGA_PT);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mddi_toshiba_wvga_pt_init);
diff --git a/drivers/video/msm/mddihost.c b/drivers/video/msm/mddihost.c
new file mode 100644
index 0000000..c6acf9f
--- /dev/null
+++ b/drivers/video/msm/mddihost.c
@@ -0,0 +1,626 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+#include <linux/clk.h>
+#include <mach/clk.h>
+
+struct semaphore mddi_host_mutex;
+
+struct clk *mddi_io_clk;
+static boolean mddi_host_powered = FALSE;
+static boolean mddi_host_initialized = FALSE;
+extern uint32 *mddi_reg_read_value_ptr;
+
+mddi_lcd_func_type mddi_lcd;
+
+extern mddi_client_capability_type mddi_client_capability_pkt;
+
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+/* Tables showing number of rows that would cause a packet length
+ * ending in 0x02, for each number of columns. These tables have
+ * been generated for MDDI packets that have 16 and 16 bits-per-pixel.
+ * This is a work-around for MDDI clients that declare a CRC error
+ * on MDDI packets where ((length & 0x00ff) == 0x02).
+ */
+static uint16 error_vals_16bpp[] = {
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 12, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 10, 0, 0, 0, 14, 0, 0, 0, 2, 0, 0, 4, 6, 12, 0,
+0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0,
+0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 11, 4, 0, 12, 0,
+0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
+0, 10, 0, 1, 0, 14, 0, 0, 0, 2, 0, 3, 4, 6, 12, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 12, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 10, 0, 0, 0, 14, 0, 0, 0, 2, 0, 0, 4, 6, 12, 0,
+0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0,
+0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 11, 4, 0, 12, 0,
+0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
+};
+
+static uint16 error_vals_18bpp[] = {
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 14,
+0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 9, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 7,
+0, 0, 0, 0, 0, 0, 1, 0, 0, 16, 0, 0, 0, 0, 0, 6,
+14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+7, 0, 0, 0, 0, 0, 0, 4, 0, 16, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+0, 7, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 9, 0
+};
+#endif
+
+#ifdef FEATURE_MDDI_HITACHI
+extern void mddi_hitachi_window_adjust(uint16 x1,
+				       uint16 x2, uint16 y1, uint16 y2);
+#endif
+
+extern void mddi_toshiba_lcd_init(void);
+
+#ifdef FEATURE_MDDI_S6D0142
+extern void mddi_s6d0142_lcd_init(void);
+extern void mddi_s6d0142_window_adjust(uint16 x1,
+				       uint16 x2,
+				       uint16 y1,
+				       uint16 y2,
+				       mddi_llist_done_cb_type done_cb);
+#endif
+
+void mddi_init(void)
+{
+	if (mddi_host_initialized)
+		return;
+
+	mddi_host_initialized = TRUE;
+
+	sema_init(&mddi_host_mutex, 1);
+
+	if (!mddi_host_powered) {
+		down(&mddi_host_mutex);
+		mddi_host_init(MDDI_HOST_PRIM);
+		mddi_host_powered = TRUE;
+		up(&mddi_host_mutex);
+		mdelay(10);
+	}
+}
+
+int mddi_host_register_read(uint32 reg_addr,
+     uint32 *reg_value_ptr, boolean wait, mddi_host_type host) {
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+	int ret = 0;
+
+	if (in_interrupt())
+		MDDI_MSG_CRIT("Called from ISR context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		mddi_init();
+	}
+
+	down(&mddi_host_mutex);
+
+	mddi_reg_read_value_ptr = reg_value_ptr;
+	curr_llist_idx = mddi_get_reg_read_llist_item(host, TRUE);
+	if (curr_llist_idx == UNASSIGNED_INDEX) {
+		up(&mddi_host_mutex);
+
+		/* need to change this to some sort of wait */
+		MDDI_MSG_ERR("Attempting to queue up more than 1 reg read\n");
+		return -EINVAL;
+	}
+
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_ptr->link_controller_flags = 0x11;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count = 0;
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->packet_data_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = 0x8001;
+	regacc_pkt_ptr->register_address = reg_addr;
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
+				   NULL, host);
+	/* need to check if we can write the pointer or not */
+
+	up(&mddi_host_mutex);
+
+	if (wait) {
+		int wait_ret;
+
+		mddi_linked_list_notify_type *llist_notify_ptr;
+		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
+		wait_ret = wait_for_completion_timeout(
+					&(llist_notify_ptr->done_comp), 5 * HZ);
+
+		if (wait_ret <= 0)
+			ret = -EBUSY;
+
+		if (wait_ret < 0)
+			printk(KERN_ERR "%s: failed to wait for completion!\n",
+				__func__);
+		else if (!wait_ret)
+			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
+
+		if (!ret && (mddi_reg_read_value_ptr == reg_value_ptr) &&
+			(*reg_value_ptr == -EBUSY)) {
+			printk(KERN_ERR "%s - failed to get data from client",
+				   __func__);
+			mddi_reg_read_value_ptr = NULL;
+			ret = -EBUSY;
+		}
+	}
+
+	MDDI_MSG_DEBUG("Reg Read value=0x%x\n", *reg_value_ptr);
+
+	return ret;
+}				/* mddi_host_register_read */
+
+int mddi_host_register_write(uint32 reg_addr,
+     uint32 reg_val, enum mddi_data_packet_size_type packet_size,
+     boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host) {
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_linked_list_type *curr_llist_dma_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+	int ret = 0;
+
+	if (in_interrupt())
+		MDDI_MSG_CRIT("Called from ISR context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		mddi_init();
+	}
+
+	down(&mddi_host_mutex);
+
+	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];
+
+	curr_llist_ptr->link_controller_flags = 1;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count = 4;
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count +
+					(uint16)packet_size;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = 0x0001;
+	regacc_pkt_ptr->register_address = reg_addr;
+	regacc_pkt_ptr->register_data_list[0] = reg_val;
+
+	MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n",
+		       regacc_pkt_ptr->register_address,
+		       regacc_pkt_ptr->register_data_list[0]);
+
+	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
+	curr_llist_ptr->packet_data_pointer =
+	    (void *)(&regacc_pkt_ptr->register_data_list[0]);
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
+				   done_cb, host);
+
+	up(&mddi_host_mutex);
+
+	if (wait) {
+		int wait_ret;
+
+		mddi_linked_list_notify_type *llist_notify_ptr;
+		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
+		wait_ret = wait_for_completion_timeout(
+					&(llist_notify_ptr->done_comp), 5 * HZ);
+
+		if (wait_ret <= 0)
+			ret = -EBUSY;
+
+		if (wait_ret < 0)
+			printk(KERN_ERR "%s: failed to wait for completion!\n",
+				__func__);
+		else if (!wait_ret)
+			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
+	}
+
+	return ret;
+}				/* mddi_host_register_write */
+
+boolean mddi_host_register_read_int
+    (uint32 reg_addr, uint32 *reg_value_ptr, mddi_host_type host) {
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+
+	if (!in_interrupt())
+		MDDI_MSG_CRIT("Called from TASK context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		return FALSE;
+	}
+
+	if (down_trylock(&mddi_host_mutex) != 0)
+		return FALSE;
+
+	mddi_reg_read_value_ptr = reg_value_ptr;
+	curr_llist_idx = mddi_get_reg_read_llist_item(host, FALSE);
+	if (curr_llist_idx == UNASSIGNED_INDEX) {
+		up(&mddi_host_mutex);
+		return FALSE;
+	}
+
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_ptr->link_controller_flags = 0x11;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count = 0;
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->packet_data_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = 0x8001;
+	regacc_pkt_ptr->register_address = reg_addr;
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, FALSE,
+				   NULL, host);
+	/* need to check if we can write the pointer or not */
+
+	up(&mddi_host_mutex);
+
+	return TRUE;
+
+}				/* mddi_host_register_read */
+
+boolean mddi_host_register_write_int
+    (uint32 reg_addr,
+     uint32 reg_val, mddi_llist_done_cb_type done_cb, mddi_host_type host) {
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_linked_list_type *curr_llist_dma_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+
+	if (!in_interrupt())
+		MDDI_MSG_CRIT("Called from TASK context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		return FALSE;
+	}
+
+	if (down_trylock(&mddi_host_mutex) != 0)
+		return FALSE;
+
+	curr_llist_idx = mddi_get_next_free_llist_item(host, FALSE);
+	if (curr_llist_idx == UNASSIGNED_INDEX) {
+		up(&mddi_host_mutex);
+		return FALSE;
+	}
+
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];
+
+	curr_llist_ptr->link_controller_flags = 1;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count = 4;
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + 4;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = 0x0001;
+	regacc_pkt_ptr->register_address = reg_addr;
+	regacc_pkt_ptr->register_data_list[0] = reg_val;
+
+	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
+	curr_llist_ptr->packet_data_pointer =
+	    (void *)(&(regacc_pkt_ptr->register_data_list[0]));
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, FALSE,
+				   done_cb, host);
+	up(&mddi_host_mutex);
+
+	return TRUE;
+
+}				/* mddi_host_register_write */
+
+void mddi_wait(uint16 time_ms)
+{
+	mdelay(time_ms);
+}
+
+void mddi_client_lcd_vsync_detected(boolean detected)
+{
+	if (mddi_lcd.vsync_detected)
+		(*mddi_lcd.vsync_detected) (detected);
+}
+
+/* extended version of function includes done callback */
+void mddi_window_adjust_ext(struct msm_fb_data_type *mfd,
+			    uint16 x1,
+			    uint16 x2,
+			    uint16 y1,
+			    uint16 y2, mddi_llist_done_cb_type done_cb)
+{
+#ifdef FEATURE_MDDI_HITACHI
+	if (mfd->panel.id == HITACHI)
+		mddi_hitachi_window_adjust(x1, x2, y1, y2);
+#elif defined(FEATURE_MDDI_S6D0142)
+	if (mfd->panel.id == MDDI_LCD_S6D0142)
+		mddi_s6d0142_window_adjust(x1, x2, y1, y2, done_cb);
+#else
+	/* Do nothing then... except avoid lint/compiler warnings */
+	(void)x1;
+	(void)x2;
+	(void)y1;
+	(void)y2;
+	(void)done_cb;
+#endif
+}
+
+void mddi_window_adjust(struct msm_fb_data_type *mfd,
+			uint16 x1, uint16 x2, uint16 y1, uint16 y2)
+{
+	mddi_window_adjust_ext(mfd, x1, x2, y1, y2, NULL);
+}
+
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+uint16 mddi_assign_pkt_height(uint16 pkt_width,
+	uint16 pkt_height, uint16 bpp)
+{
+	uint16 new_pkt_height;
+	uint16 problem_height = 0;
+
+	if (pkt_width <= 240) {
+		if (bpp == 16)
+			problem_height = error_vals_16bpp[pkt_width-1];
+		else if (bpp == 18)
+			problem_height = error_vals_18bpp[pkt_width-1];
+		else {
+			printk(KERN_ERR"Invalid bpp value");
+			return -EINVAL;
+		}
+	}
+	if (problem_height == pkt_height)
+		new_pkt_height = problem_height - 1;
+	else
+		new_pkt_height = pkt_height;
+
+	return new_pkt_height;
+}
+#endif
+
+#ifdef ENABLE_MDDI_MULTI_READ_WRITE
+int mddi_host_register_multiwrite(uint32 reg_addr,
+	uint32 *value_list_ptr,
+	uint32 value_count, boolean wait, mddi_llist_done_cb_type done_cb,
+	mddi_host_type host)
+{
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_linked_list_type *curr_llist_dma_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+	int ret = 0;
+
+	if (!value_list_ptr || !value_count ||
+		value_count > MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) {
+		MDDI_MSG_ERR("\n Invalid value_list or value_count");
+		return -EINVAL;
+	}
+
+	if (in_interrupt())
+		MDDI_MSG_CRIT("Called from ISR context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		mddi_init();
+	}
+
+	down(&mddi_host_mutex);
+
+	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];
+
+	curr_llist_ptr->link_controller_flags = 1;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count =
+		(uint16)(value_count * 4);
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count
+		+ curr_llist_ptr->packet_data_count;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = value_count;
+	regacc_pkt_ptr->register_address = reg_addr;
+	memcpy((void *)&regacc_pkt_ptr->register_data_list[0], value_list_ptr,
+		   curr_llist_ptr->packet_data_count);
+
+	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
+	curr_llist_ptr->packet_data_pointer =
+		(void *)(&regacc_pkt_ptr->register_data_list[0]);
+	MDDI_MSG_DEBUG("MultiReg Access write reg=0x%x, value[0]=0x%x\n",
+		       regacc_pkt_ptr->register_address,
+		       regacc_pkt_ptr->register_data_list[0]);
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
+				   done_cb, host);
+
+	up(&mddi_host_mutex);
+
+	if (wait) {
+		int wait_ret;
+
+		mddi_linked_list_notify_type *llist_notify_ptr;
+		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
+		wait_ret = wait_for_completion_timeout(
+					&(llist_notify_ptr->done_comp), 5 * HZ);
+
+		if (wait_ret <= 0)
+			ret = -EBUSY;
+
+		if (wait_ret < 0)
+			printk(KERN_ERR "%s: failed to wait for completion!\n",
+				__func__);
+		else if (!wait_ret)
+			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
+	}
+
+	return ret;
+}
+
+int mddi_host_register_multiread(uint32 reg_addr,
+	uint32 *value_list_ptr, uint32 value_count,
+	boolean wait, mddi_host_type host) {
+	mddi_linked_list_type *curr_llist_ptr;
+	mddi_register_access_packet_type *regacc_pkt_ptr;
+	uint16 curr_llist_idx;
+	int ret = 0;
+
+	if (!value_list_ptr || !value_count ||
+		value_count >= MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) {
+		MDDI_MSG_ERR("\n Invalid value_list or value_count");
+		return -EINVAL;
+	}
+
+	if (in_interrupt())
+		MDDI_MSG_CRIT("Called from ISR context\n");
+
+	if (!mddi_host_powered) {
+		MDDI_MSG_ERR("MDDI powered down!\n");
+		mddi_init();
+	}
+
+	down(&mddi_host_mutex);
+
+	mddi_reg_read_value_ptr = value_list_ptr;
+	curr_llist_idx = mddi_get_reg_read_llist_item(host, TRUE);
+	if (curr_llist_idx == UNASSIGNED_INDEX) {
+		up(&mddi_host_mutex);
+
+		/* need to change this to some sort of wait */
+		MDDI_MSG_ERR("Attempting to queue up more than 1 reg read\n");
+		return -EINVAL;
+	}
+
+	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
+	curr_llist_ptr->link_controller_flags = 0x11;
+	curr_llist_ptr->packet_header_count = 14;
+	curr_llist_ptr->packet_data_count = 0;
+
+	curr_llist_ptr->next_packet_pointer = NULL;
+	curr_llist_ptr->packet_data_pointer = NULL;
+	curr_llist_ptr->reserved = 0;
+
+	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
+
+	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
+	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
+	regacc_pkt_ptr->bClient_ID = 0;
+	regacc_pkt_ptr->read_write_info = 0x8000 | value_count;
+	regacc_pkt_ptr->register_address = reg_addr;
+
+	/* now adjust pointers */
+	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
+				   NULL, host);
+	/* need to check if we can write the pointer or not */
+
+	up(&mddi_host_mutex);
+
+	if (wait) {
+		int wait_ret;
+
+		mddi_linked_list_notify_type *llist_notify_ptr;
+		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
+		wait_ret = wait_for_completion_timeout(
+					&(llist_notify_ptr->done_comp), 5 * HZ);
+
+		if (wait_ret <= 0)
+			ret = -EBUSY;
+
+		if (wait_ret < 0)
+			printk(KERN_ERR "%s: failed to wait for completion!\n",
+				__func__);
+		else if (!wait_ret)
+			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
+
+		if (!ret && (mddi_reg_read_value_ptr == value_list_ptr) &&
+			(*value_list_ptr == -EBUSY)) {
+			printk(KERN_ERR "%s - failed to get data from client",
+				   __func__);
+			mddi_reg_read_value_ptr = NULL;
+			ret = -EBUSY;
+		}
+	}
+
+	MDDI_MSG_DEBUG("MultiReg Read value[0]=0x%x\n", *value_list_ptr);
+
+	return ret;
+}
+#endif
diff --git a/drivers/video/msm/mddihost.h b/drivers/video/msm/mddihost.h
new file mode 100644
index 0000000..52bc67c
--- /dev/null
+++ b/drivers/video/msm/mddihost.h
@@ -0,0 +1,231 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDDIHOST_H
+#define MDDIHOST_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include "linux/proc_fs.h"
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include "msm_fb_panel.h"
+
+#undef FEATURE_MDDI_MC4
+#undef FEATURE_MDDI_S6D0142
+#undef FEATURE_MDDI_HITACHI
+#define FEATURE_MDDI_SHARP
+#define FEATURE_MDDI_TOSHIBA
+#undef FEATURE_MDDI_E751
+#define FEATURE_MDDI_CORONA
+#define FEATURE_MDDI_PRISM
+
+#define T_MSM7500
+
+typedef enum {
+	format_16bpp,
+	format_18bpp,
+	format_24bpp
+} mddi_video_format;
+
+typedef enum {
+	MDDI_LCD_NONE = 0,
+	MDDI_LCD_MC4,
+	MDDI_LCD_S6D0142,
+	MDDI_LCD_SHARP,
+	MDDI_LCD_E751,
+	MDDI_LCD_CORONA,
+	MDDI_LCD_HITACHI,
+	MDDI_LCD_TOSHIBA,
+	MDDI_LCD_PRISM,
+	MDDI_LCD_TP2,
+	MDDI_NUM_LCD_TYPES,
+	MDDI_LCD_DEFAULT = MDDI_LCD_TOSHIBA
+} mddi_lcd_type;
+
+typedef enum {
+	MDDI_HOST_PRIM = 0,
+	MDDI_HOST_EXT,
+	MDDI_NUM_HOST_CORES
+} mddi_host_type;
+
+typedef enum {
+	MDDI_DRIVER_RESET,	/* host core registers have not been written. */
+	MDDI_DRIVER_DISABLED,	/* registers written, interrupts disabled. */
+	MDDI_DRIVER_ENABLED	/* registers written, interrupts enabled. */
+} mddi_host_driver_state_type;
+
+typedef enum {
+	MDDI_GPIO_INT_0 = 0,
+	MDDI_GPIO_INT_1,
+	MDDI_GPIO_INT_2,
+	MDDI_GPIO_INT_3,
+	MDDI_GPIO_INT_4,
+	MDDI_GPIO_INT_5,
+	MDDI_GPIO_INT_6,
+	MDDI_GPIO_INT_7,
+	MDDI_GPIO_INT_8,
+	MDDI_GPIO_INT_9,
+	MDDI_GPIO_INT_10,
+	MDDI_GPIO_INT_11,
+	MDDI_GPIO_INT_12,
+	MDDI_GPIO_INT_13,
+	MDDI_GPIO_INT_14,
+	MDDI_GPIO_INT_15,
+	MDDI_GPIO_NUM_INTS
+} mddi_gpio_int_type;
+
+enum mddi_data_packet_size_type {
+	MDDI_DATA_PACKET_4_BYTES  = 4,
+	MDDI_DATA_PACKET_8_BYTES  = 8,
+	MDDI_DATA_PACKET_12_BYTES = 12,
+	MDDI_DATA_PACKET_16_BYTES = 16,
+	MDDI_DATA_PACKET_24_BYTES = 24
+};
+
+typedef struct {
+	uint32 addr;
+	uint32 value;
+} mddi_reg_write_type;
+
+boolean mddi_vsync_set_handler(msm_fb_vsync_handler_type handler, void *arg);
+
+typedef void (*mddi_llist_done_cb_type) (void);
+
+typedef void (*mddi_rev_handler_type) (void *);
+
+boolean mddi_set_rev_handler(mddi_rev_handler_type handler, uint16 pkt_type);
+
+#define MDDI_DEFAULT_PRIM_PIX_ATTR 0xC3
+#define MDDI_DEFAULT_SECD_PIX_ATTR 0xC0
+
+typedef int gpio_int_polarity_type;
+typedef int gpio_int_handler_type;
+
+typedef struct {
+	void (*vsync_detected) (boolean);
+} mddi_lcd_func_type;
+
+extern mddi_lcd_func_type mddi_lcd;
+extern int irq_enabled;
+extern unsigned char mddi_timer_shutdown_flag;
+extern struct mutex mddi_timer_lock;
+
+void mddi_init(void);
+void mddi_powerdown(void);
+
+void mddi_host_start_ext_display(void);
+void mddi_host_stop_ext_display(void);
+
+extern spinlock_t mddi_host_spin_lock;
+#ifdef T_MSM7500
+void mddi_reset(void);
+#ifdef FEATURE_DUAL_PROC_MODEM_DISPLAY
+void mddi_host_switch_proc_control(boolean on);
+#endif
+#endif
+void mddi_host_exit_power_collapse(void);
+
+void mddi_queue_splash_screen
+    (void *buf_ptr,
+     boolean clear_area,
+     int16 src_width,
+     int16 src_starting_row,
+     int16 src_starting_column,
+     int16 num_of_rows,
+     int16 num_of_columns, int16 dst_starting_row, int16 dst_starting_column);
+
+void mddi_queue_image
+    (void *buf_ptr,
+     uint8 stereo_video,
+     boolean clear_area,
+     int16 src_width,
+     int16 src_starting_row,
+     int16 src_starting_column,
+     int16 num_of_rows,
+     int16 num_of_columns, int16 dst_starting_row, int16 dst_starting_column);
+
+int mddi_host_register_read
+    (uint32 reg_addr,
+     uint32 *reg_value_ptr, boolean wait, mddi_host_type host_idx);
+int mddi_host_register_write
+    (uint32 reg_addr, uint32 reg_val,
+     enum mddi_data_packet_size_type packet_size,
+     boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host);
+boolean mddi_host_register_write_int
+    (uint32 reg_addr,
+     uint32 reg_val, mddi_llist_done_cb_type done_cb, mddi_host_type host);
+boolean mddi_host_register_read_int
+    (uint32 reg_addr, uint32 *reg_value_ptr, mddi_host_type host_idx);
+void mddi_queue_register_write_static
+    (uint32 reg_addr,
+     uint32 reg_val, boolean wait, mddi_llist_done_cb_type done_cb);
+void mddi_queue_static_window_adjust
+    (const mddi_reg_write_type *reg_write,
+     uint16 num_writes, mddi_llist_done_cb_type done_cb);
+
+#ifdef ENABLE_MDDI_MULTI_READ_WRITE
+int mddi_host_register_multiwrite(uint32 reg_addr,
+	uint32 *value_list_ptr, uint32 value_count,
+    boolean wait, mddi_llist_done_cb_type done_cb,
+	mddi_host_type host);
+int mddi_host_register_multiread(uint32 reg_addr,
+	uint32 *value_list_ptr, uint32 value_count,
+	boolean wait, mddi_host_type host);
+#endif
+
+#define mddi_queue_register_read(reg, val_ptr, wait, sig) \
+	mddi_host_register_read(reg, val_ptr, wait, MDDI_HOST_PRIM)
+#define mddi_queue_register_write(reg, val, wait, sig) \
+	mddi_host_register_write(reg, val, MDDI_DATA_PACKET_4_BYTES,\
+	wait, NULL, MDDI_HOST_PRIM)
+#define mddi_queue_register_write_extn(reg, val, pkt_size, wait, sig) \
+	mddi_host_register_write(reg, val, pkt_size, \
+	wait, NULL, MDDI_HOST_PRIM)
+#define mddi_queue_register_write_int(reg, val) \
+	mddi_host_register_write_int(reg, val, NULL, MDDI_HOST_PRIM)
+#define mddi_queue_register_read_int(reg, val_ptr) \
+	mddi_host_register_read_int(reg, val_ptr, MDDI_HOST_PRIM)
+#define mddi_queue_register_writes(reg_ptr, val, wait, sig) \
+	mddi_host_register_writes(reg_ptr, val, wait, sig, MDDI_HOST_PRIM)
+
+void mddi_wait(uint16 time_ms);
+void mddi_assign_max_pkt_dimensions(uint16 image_cols,
+				    uint16 image_rows,
+				    uint16 bpp,
+				    uint16 *max_cols, uint16 * max_rows);
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+uint16 mddi_assign_pkt_height(uint16 pkt_width, uint16 pkt_height, uint16 bpp);
+#endif
+void mddi_queue_reverse_encapsulation(boolean wait);
+int mddi_client_power(unsigned int client_id);
+void mddi_disable(int lock);
+void mddi_window_adjust(struct msm_fb_data_type *mfd,
+	uint16 x1, uint16 x2, uint16 y1, uint16 y2);
+void mddi_send_fw_link_skew_cal(mddi_host_type host_idx);
+int pmdh_clk_func(int enable);
+
+#endif /* MDDIHOST_H */
diff --git a/drivers/video/msm/mddihost_e.c b/drivers/video/msm/mddihost_e.c
new file mode 100644
index 0000000..d53aa6f
--- /dev/null
+++ b/drivers/video/msm/mddihost_e.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+#include <linux/clk.h>
+#include <mach/clk.h>
+
+extern struct semaphore mddi_host_mutex;
+static boolean mddi_host_ext_powered = FALSE;
+
+void mddi_host_start_ext_display(void)
+{
+	down(&mddi_host_mutex);
+
+	if (!mddi_host_ext_powered) {
+		mddi_host_init(MDDI_HOST_EXT);
+
+		mddi_host_ext_powered = TRUE;
+	}
+
+	up(&mddi_host_mutex);
+}
+
+void mddi_host_stop_ext_display(void)
+{
+	down(&mddi_host_mutex);
+
+	if (mddi_host_ext_powered) {
+		mddi_host_powerdown(MDDI_HOST_EXT);
+
+		mddi_host_ext_powered = FALSE;
+	}
+
+	up(&mddi_host_mutex);
+}
diff --git a/drivers/video/msm/mddihosti.c b/drivers/video/msm/mddihosti.c
new file mode 100644
index 0000000..4989d35
--- /dev/null
+++ b/drivers/video/msm/mddihosti.c
@@ -0,0 +1,2304 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "msm_fb_panel.h"
+#include "mddihost.h"
+#include "mddihosti.h"
+
+#define FEATURE_MDDI_UNDERRUN_RECOVERY
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+static void mddi_read_rev_packet(byte *data_ptr);
+#endif
+
+struct timer_list mddi_host_timer;
+
+#define MDDI_DEFAULT_TIMER_LENGTH 5000	/* 5 seconds */
+uint32 mddi_rtd_frequency = 60000;	/* send RTD every 60 seconds */
+uint32 mddi_client_status_frequency = 60000;	/* get status pkt every 60 secs */
+
+boolean mddi_vsync_detect_enabled = FALSE;
+mddi_gpio_info_type mddi_gpio;
+
+uint32 mddi_host_core_version;
+boolean mddi_debug_log_statistics = FALSE;
+/* #define FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION */
+/* default to TRUE in case MDP does not vote */
+static boolean mddi_host_mdp_active_flag = TRUE;
+static uint32 mddi_log_stats_counter;
+uint32 mddi_log_stats_frequency = 4000;
+int32 mddi_client_type;
+
+#define MDDI_DEFAULT_REV_PKT_SIZE            0x20
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+static boolean mddi_rev_ptr_workaround = TRUE;
+static uint32 mddi_reg_read_retry;
+static uint32 mddi_reg_read_retry_max = 20;
+static boolean mddi_enable_reg_read_retry = TRUE;
+static boolean mddi_enable_reg_read_retry_once = FALSE;
+
+#define MDDI_MAX_REV_PKT_SIZE                0x60
+
+#define MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE  0x60
+
+#define MDDI_VIDEO_REV_PKT_SIZE              0x40
+#define MDDI_REV_BUFFER_SIZE  MDDI_MAX_REV_PKT_SIZE
+static byte rev_packet_data[MDDI_MAX_REV_PKT_SIZE];
+#endif /* FEATURE_MDDI_DISABLE_REVERSE */
+/* leave these variables so graphics will compile */
+
+#define MDDI_MAX_REV_DATA_SIZE  128
+/*lint -d__align(x) */
+boolean mddi_debug_clear_rev_data = TRUE;
+
+uint32 *mddi_reg_read_value_ptr;
+
+mddi_client_capability_type mddi_client_capability_pkt;
+static boolean mddi_client_capability_request = FALSE;
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+
+#define MAX_MDDI_REV_HANDLERS 2
+#define INVALID_PKT_TYPE 0xFFFF
+
+typedef struct {
+	mddi_rev_handler_type handler;	/* ISR to be executed */
+	uint16 pkt_type;
+} mddi_rev_pkt_handler_type;
+static mddi_rev_pkt_handler_type mddi_rev_pkt_handler[MAX_MDDI_REV_HANDLERS] =
+    { {NULL, INVALID_PKT_TYPE}, {NULL, INVALID_PKT_TYPE} };
+
+static boolean mddi_rev_encap_user_request = FALSE;
+static mddi_linked_list_notify_type mddi_rev_user;
+
+spinlock_t mddi_host_spin_lock;
+extern uint32 mdp_in_processing;
+#endif
+
+typedef enum {
+	MDDI_REV_IDLE
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	    , MDDI_REV_REG_READ_ISSUED,
+	MDDI_REV_REG_READ_SENT,
+	MDDI_REV_ENCAP_ISSUED,
+	MDDI_REV_STATUS_REQ_ISSUED,
+	MDDI_REV_CLIENT_CAP_ISSUED
+#endif
+} mddi_rev_link_state_type;
+
+typedef enum {
+	MDDI_LINK_DISABLED,
+	MDDI_LINK_HIBERNATING,
+	MDDI_LINK_ACTIVATING,
+	MDDI_LINK_ACTIVE
+} mddi_host_link_state_type;
+
+typedef struct {
+	uint32 count;
+	uint32 in_count;
+	uint32 disp_req_count;
+	uint32 state_change_count;
+	uint32 ll_done_count;
+	uint32 rev_avail_count;
+	uint32 error_count;
+	uint32 rev_encap_count;
+	uint32 llist_ptr_write_1;
+	uint32 llist_ptr_write_2;
+} mddi_host_int_type;
+
+typedef struct {
+	uint32 fwd_crc_count;
+	uint32 rev_crc_count;
+	uint32 pri_underflow;
+	uint32 sec_underflow;
+	uint32 rev_overflow;
+	uint32 pri_overwrite;
+	uint32 sec_overwrite;
+	uint32 rev_overwrite;
+	uint32 dma_failure;
+	uint32 rtd_failure;
+	uint32 reg_read_failure;
+#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
+	uint32 pri_underrun_detected;
+#endif
+} mddi_host_stat_type;
+
+typedef struct {
+	uint32 rtd_cnt;
+	uint32 rev_enc_cnt;
+	uint32 vid_cnt;
+	uint32 reg_acc_cnt;
+	uint32 cli_stat_cnt;
+	uint32 cli_cap_cnt;
+	uint32 reg_read_cnt;
+	uint32 link_active_cnt;
+	uint32 link_hibernate_cnt;
+	uint32 vsync_response_cnt;
+	uint32 fwd_crc_cnt;
+	uint32 rev_crc_cnt;
+} mddi_log_params_struct_type;
+
+typedef struct {
+	uint32 rtd_value;
+	uint32 rtd_counter;
+	uint32 client_status_cnt;
+	boolean rev_ptr_written;
+	uint8 *rev_ptr_start;
+	uint8 *rev_ptr_curr;
+	uint32 mddi_rev_ptr_write_val;
+	dma_addr_t rev_data_dma_addr;
+	uint16 rev_pkt_size;
+	mddi_rev_link_state_type rev_state;
+	mddi_host_link_state_type link_state;
+	mddi_host_driver_state_type driver_state;
+	boolean disable_hibernation;
+	uint32 saved_int_reg;
+	uint32 saved_int_en;
+	mddi_linked_list_type *llist_ptr;
+	dma_addr_t llist_dma_addr;
+	mddi_linked_list_type *llist_dma_ptr;
+	uint32 *rev_data_buf;
+	struct completion mddi_llist_avail_comp;
+	boolean mddi_waiting_for_llist_avail;
+	mddi_host_int_type int_type;
+	mddi_host_stat_type stats;
+	mddi_log_params_struct_type log_parms;
+	mddi_llist_info_type llist_info;
+	mddi_linked_list_notify_type llist_notify[MDDI_MAX_NUM_LLIST_ITEMS];
+} mddi_host_cntl_type;
+
+static mddi_host_type mddi_curr_host = MDDI_HOST_PRIM;
+static mddi_host_cntl_type mhctl[MDDI_NUM_HOST_CORES];
+mddi_linked_list_type *llist_extern[MDDI_NUM_HOST_CORES];
+mddi_linked_list_type *llist_dma_extern[MDDI_NUM_HOST_CORES];
+mddi_linked_list_notify_type *llist_extern_notify[MDDI_NUM_HOST_CORES];
+static mddi_log_params_struct_type prev_parms[MDDI_NUM_HOST_CORES];
+
+extern uint32 mdp_total_vdopkts;
+
+static boolean mddi_host_io_clock_on = FALSE;
+static boolean mddi_host_hclk_on = FALSE;
+
+int int_mddi_pri_flag = FALSE;
+int int_mddi_ext_flag = FALSE;
+
+static void mddi_report_errors(uint32 int_reg)
+{
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if (int_reg & MDDI_INT_PRI_UNDERFLOW) {
+		pmhctl->stats.pri_underflow++;
+		MDDI_MSG_ERR("!!! MDDI Primary Underflow !!!\n");
+	}
+	if (int_reg & MDDI_INT_SEC_UNDERFLOW) {
+		pmhctl->stats.sec_underflow++;
+		MDDI_MSG_ERR("!!! MDDI Secondary Underflow !!!\n");
+	}
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	if (int_reg & MDDI_INT_REV_OVERFLOW) {
+		pmhctl->stats.rev_overflow++;
+		MDDI_MSG_ERR("!!! MDDI Reverse Overflow !!!\n");
+		pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+		mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
+
+	}
+	if (int_reg & MDDI_INT_CRC_ERROR)
+		MDDI_MSG_ERR("!!! MDDI Reverse CRC Error !!!\n");
+#endif
+	if (int_reg & MDDI_INT_PRI_OVERWRITE) {
+		pmhctl->stats.pri_overwrite++;
+		MDDI_MSG_ERR("!!! MDDI Primary Overwrite !!!\n");
+	}
+	if (int_reg & MDDI_INT_SEC_OVERWRITE) {
+		pmhctl->stats.sec_overwrite++;
+		MDDI_MSG_ERR("!!! MDDI Secondary Overwrite !!!\n");
+	}
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	if (int_reg & MDDI_INT_REV_OVERWRITE) {
+		pmhctl->stats.rev_overwrite++;
+		/* This will show up normally and is not a problem */
+		MDDI_MSG_DEBUG("MDDI Reverse Overwrite!\n");
+	}
+	if (int_reg & MDDI_INT_RTD_FAILURE) {
+		mddi_host_reg_outm(INTEN, MDDI_INT_RTD_FAILURE, 0);
+		pmhctl->stats.rtd_failure++;
+		MDDI_MSG_ERR("!!! MDDI RTD Failure !!!\n");
+	}
+#endif
+	if (int_reg & MDDI_INT_DMA_FAILURE) {
+		pmhctl->stats.dma_failure++;
+		MDDI_MSG_ERR("!!! MDDI DMA Abort !!!\n");
+	}
+}
+
+static void mddi_host_enable_io_clock(void)
+{
+	if (!MDDI_HOST_IS_IO_CLOCK_ON)
+		MDDI_HOST_ENABLE_IO_CLOCK;
+}
+
+static void mddi_host_enable_hclk(void)
+{
+
+	if (!MDDI_HOST_IS_HCLK_ON)
+		MDDI_HOST_ENABLE_HCLK;
+}
+
+static void mddi_host_disable_io_clock(void)
+{
+#ifndef FEATURE_MDDI_HOST_IO_CLOCK_CONTROL_DISABLE
+	if (MDDI_HOST_IS_IO_CLOCK_ON)
+		MDDI_HOST_DISABLE_IO_CLOCK;
+#endif
+}
+
+static void mddi_host_disable_hclk(void)
+{
+#ifndef FEATURE_MDDI_HOST_HCLK_CONTROL_DISABLE
+	if (MDDI_HOST_IS_HCLK_ON)
+		MDDI_HOST_DISABLE_HCLK;
+#endif
+}
+
+static void mddi_vote_to_sleep(mddi_host_type host_idx, boolean sleep)
+{
+	uint16 vote_mask;
+
+	if (host_idx == MDDI_HOST_PRIM)
+		vote_mask = 0x01;
+	else
+		vote_mask = 0x02;
+}
+
+static void mddi_report_state_change(uint32 int_reg)
+{
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if ((pmhctl->saved_int_reg & MDDI_INT_IN_HIBERNATION) &&
+	    (pmhctl->saved_int_reg & MDDI_INT_LINK_ACTIVE)) {
+		/* recover from condition where the io_clock was turned off by the
+		   clock driver during a transition to hibernation. The io_clock
+		   disable is to prevent MDP/MDDI underruns when changing ARM
+		   clock speeds. In the process of halting the ARM, the hclk
+		   divider needs to be set to 1. When it is set to 1, there is
+		   a small time (usecs) when hclk is off or slow, and this can
+		   cause an underrun. To prevent the underrun, clock driver turns
+		   off the MDDI io_clock before making the change. */
+		mddi_host_reg_out(CMD, MDDI_CMD_POWERUP);
+	}
+
+	if (int_reg & MDDI_INT_LINK_ACTIVE) {
+		pmhctl->link_state = MDDI_LINK_ACTIVE;
+		pmhctl->log_parms.link_active_cnt++;
+		pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
+		MDDI_MSG_DEBUG("!!! MDDI Active RTD:0x%x!!!\n",
+			       pmhctl->rtd_value);
+		/* now interrupt on hibernation */
+		mddi_host_reg_outm(INTEN,
+				   (MDDI_INT_IN_HIBERNATION |
+				    MDDI_INT_LINK_ACTIVE),
+				   MDDI_INT_IN_HIBERNATION);
+
+#ifdef DEBUG_MDDIHOSTI
+		/* if gpio interrupt is enabled, start polling at fastest
+		 * registered rate
+		 */
+		if (mddi_gpio.polling_enabled) {
+			timer_reg(&mddi_gpio_poll_timer,
+		mddi_gpio_poll_timer_cb, 0, mddi_gpio.polling_interval, 0);
+		}
+#endif
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (mddi_rev_ptr_workaround) {
+			/* HW CR: need to reset reverse register stuff */
+			pmhctl->rev_ptr_written = FALSE;
+			pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+		}
+#endif
+		/* vote on sleep */
+		mddi_vote_to_sleep(host_idx, FALSE);
+
+		if (host_idx == MDDI_HOST_PRIM) {
+			if (mddi_vsync_detect_enabled) {
+				/*
+				 * Indicate to client specific code that vsync
+				 * was enabled, but we did not detect a client
+				 * intiated wakeup. The client specific
+				 * handler can either reassert vsync detection,
+				 * or treat this as a valid vsync.
+				 */
+				mddi_client_lcd_vsync_detected(FALSE);
+				pmhctl->log_parms.vsync_response_cnt++;
+			}
+		}
+	}
+	if (int_reg & MDDI_INT_IN_HIBERNATION) {
+		pmhctl->link_state = MDDI_LINK_HIBERNATING;
+		pmhctl->log_parms.link_hibernate_cnt++;
+		MDDI_MSG_DEBUG("!!! MDDI Hibernating !!!\n");
+
+		if (mddi_client_type == 2) {
+			mddi_host_reg_out(PAD_CTL, 0x402a850f);
+			mddi_host_reg_out(PAD_CAL, 0x10220020);
+			mddi_host_reg_out(TA1_LEN, 0x0010);
+			mddi_host_reg_out(TA2_LEN, 0x0040);
+		}
+		/* now interrupt on link_active */
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+		mddi_host_reg_outm(INTEN,
+				   (MDDI_INT_MDDI_IN |
+				    MDDI_INT_IN_HIBERNATION |
+				    MDDI_INT_LINK_ACTIVE),
+				   MDDI_INT_LINK_ACTIVE);
+#else
+		mddi_host_reg_outm(INTEN,
+				   (MDDI_INT_MDDI_IN |
+				    MDDI_INT_IN_HIBERNATION |
+				    MDDI_INT_LINK_ACTIVE),
+				   (MDDI_INT_MDDI_IN | MDDI_INT_LINK_ACTIVE));
+
+		pmhctl->rtd_counter = mddi_rtd_frequency;
+
+		if (pmhctl->rev_state != MDDI_REV_IDLE) {
+			/* a rev_encap will not wake up the link, so we do that here */
+			pmhctl->link_state = MDDI_LINK_ACTIVATING;
+			mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+		}
+#endif
+
+		if (pmhctl->disable_hibernation) {
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
+			mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+			pmhctl->link_state = MDDI_LINK_ACTIVATING;
+		}
+#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
+		if ((pmhctl->llist_info.transmitting_start_idx !=
+		     UNASSIGNED_INDEX)
+		    &&
+		    ((pmhctl->
+		      saved_int_reg & (MDDI_INT_PRI_LINK_LIST_DONE |
+				       MDDI_INT_PRI_PTR_READ)) ==
+		     MDDI_INT_PRI_PTR_READ)) {
+			mddi_linked_list_type *llist_dma;
+			llist_dma = pmhctl->llist_dma_ptr;
+			/*
+			 * All indications are that we have not received a
+			 * linked list done interrupt, due to an underrun
+			 * condition. Recovery attempt is to send again.
+			 */
+			dma_coherent_pre_ops();
+			/* Write to primary pointer register again */
+			mddi_host_reg_out(PRI_PTR,
+					  &llist_dma[pmhctl->llist_info.
+						     transmitting_start_idx]);
+			pmhctl->stats.pri_underrun_detected++;
+		}
+#endif
+
+		/* vote on sleep */
+		if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
+			mddi_vote_to_sleep(host_idx, TRUE);
+		}
+
+#ifdef DEBUG_MDDIHOSTI
+		/* need to stop polling timer */
+		if (mddi_gpio.polling_enabled) {
+			(void) timer_clr(&mddi_gpio_poll_timer, T_NONE);
+		}
+#endif
+	}
+}
+
+void mddi_host_timer_service(unsigned long data)
+{
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	unsigned long flags;
+#endif
+	mddi_host_type host_idx;
+	mddi_host_cntl_type *pmhctl;
+
+	unsigned long time_ms = MDDI_DEFAULT_TIMER_LENGTH;
+	init_timer(&mddi_host_timer);
+	for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
+	     host_idx++) {
+		pmhctl = &(mhctl[host_idx]);
+		mddi_log_stats_counter += (uint32) time_ms;
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		pmhctl->rtd_counter += (uint32) time_ms;
+		pmhctl->client_status_cnt += (uint32) time_ms;
+
+		if (host_idx == MDDI_HOST_PRIM) {
+			if (pmhctl->client_status_cnt >=
+			    mddi_client_status_frequency) {
+				if ((pmhctl->link_state ==
+				     MDDI_LINK_HIBERNATING)
+				    && (pmhctl->client_status_cnt >
+					mddi_client_status_frequency)) {
+					/*
+					 * special case where we are hibernating
+					 * and mddi_host_isr is not firing, so
+					 * kick the link so that the status can
+					 * be retrieved
+					 */
+
+					/* need to wake up link before issuing
+					 * rev encap command
+					 */
+					MDDI_MSG_INFO("wake up link!\n");
+					spin_lock_irqsave(&mddi_host_spin_lock,
+							  flags);
+					mddi_host_enable_hclk();
+					mddi_host_enable_io_clock();
+					pmhctl->link_state =
+					    MDDI_LINK_ACTIVATING;
+					mddi_host_reg_out(CMD,
+							  MDDI_CMD_LINK_ACTIVE);
+					spin_unlock_irqrestore
+					    (&mddi_host_spin_lock, flags);
+				} else
+				    if ((pmhctl->link_state == MDDI_LINK_ACTIVE)
+					&& pmhctl->disable_hibernation) {
+					/*
+					 * special case where we have disabled
+					 * hibernation and mddi_host_isr
+					 * is not firing, so enable interrupt
+					 * for no pkts pending, which will
+					 * generate an interrupt
+					 */
+					MDDI_MSG_INFO("kick isr!\n");
+					spin_lock_irqsave(&mddi_host_spin_lock,
+							  flags);
+					mddi_host_enable_hclk();
+					mddi_host_reg_outm(INTEN,
+							   MDDI_INT_NO_CMD_PKTS_PEND,
+							   MDDI_INT_NO_CMD_PKTS_PEND);
+					spin_unlock_irqrestore
+					    (&mddi_host_spin_lock, flags);
+				}
+			}
+		}
+#endif /* #ifndef FEATURE_MDDI_DISABLE_REVERSE */
+	}
+
+	/* Check if logging is turned on */
+	for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
+	     host_idx++) {
+		mddi_log_params_struct_type *prev_ptr = &(prev_parms[host_idx]);
+		pmhctl = &(mhctl[host_idx]);
+
+		if (mddi_debug_log_statistics) {
+
+			/* get video pkt count from MDP, since MDDI sw cannot know this */
+			pmhctl->log_parms.vid_cnt = mdp_total_vdopkts;
+
+			if (mddi_log_stats_counter >= mddi_log_stats_frequency) {
+				/* mddi_log_stats_counter = 0; */
+				if (mddi_debug_log_statistics) {
+					MDDI_MSG_NOTICE
+					    ("MDDI Statistics since last report:\n");
+					MDDI_MSG_NOTICE("  Packets sent:\n");
+					MDDI_MSG_NOTICE
+					    ("    %d RTD packet(s)\n",
+					     pmhctl->log_parms.rtd_cnt -
+					     prev_ptr->rtd_cnt);
+					if (prev_ptr->rtd_cnt !=
+					    pmhctl->log_parms.rtd_cnt) {
+						unsigned long flags;
+						spin_lock_irqsave
+						    (&mddi_host_spin_lock,
+						     flags);
+						mddi_host_enable_hclk();
+						pmhctl->rtd_value =
+						    mddi_host_reg_in(RTD_VAL);
+						spin_unlock_irqrestore
+						    (&mddi_host_spin_lock,
+						     flags);
+						MDDI_MSG_NOTICE
+						    ("      RTD value=%d\n",
+						     pmhctl->rtd_value);
+					}
+					MDDI_MSG_NOTICE
+					    ("    %d VIDEO packets\n",
+					     pmhctl->log_parms.vid_cnt -
+					     prev_ptr->vid_cnt);
+					MDDI_MSG_NOTICE
+					    ("    %d Register Access packets\n",
+					     pmhctl->log_parms.reg_acc_cnt -
+					     prev_ptr->reg_acc_cnt);
+					MDDI_MSG_NOTICE
+					    ("    %d Reverse Encapsulation packet(s)\n",
+					     pmhctl->log_parms.rev_enc_cnt -
+					     prev_ptr->rev_enc_cnt);
+					if (prev_ptr->rev_enc_cnt !=
+					    pmhctl->log_parms.rev_enc_cnt) {
+						/* report # of reverse CRC errors */
+						MDDI_MSG_NOTICE
+						    ("      %d reverse CRC errors detected\n",
+						     pmhctl->log_parms.
+						     rev_crc_cnt -
+						     prev_ptr->rev_crc_cnt);
+					}
+					MDDI_MSG_NOTICE
+					    ("  Packets received:\n");
+					MDDI_MSG_NOTICE
+					    ("    %d Client Status packets",
+					     pmhctl->log_parms.cli_stat_cnt -
+					     prev_ptr->cli_stat_cnt);
+					if (prev_ptr->cli_stat_cnt !=
+					    pmhctl->log_parms.cli_stat_cnt) {
+						MDDI_MSG_NOTICE
+						    ("      %d forward CRC errors reported\n",
+						     pmhctl->log_parms.
+						     fwd_crc_cnt -
+						     prev_ptr->fwd_crc_cnt);
+					}
+					MDDI_MSG_NOTICE
+					    ("    %d Register Access Read packets\n",
+					     pmhctl->log_parms.reg_read_cnt -
+					     prev_ptr->reg_read_cnt);
+
+					if (pmhctl->link_state ==
+					    MDDI_LINK_ACTIVE) {
+						MDDI_MSG_NOTICE
+						    ("  Current Link Status: Active\n");
+					} else
+					    if ((pmhctl->link_state ==
+						 MDDI_LINK_HIBERNATING)
+						|| (pmhctl->link_state ==
+						    MDDI_LINK_ACTIVATING)) {
+						MDDI_MSG_NOTICE
+						    ("  Current Link Status: Hibernation\n");
+					} else {
+						MDDI_MSG_NOTICE
+						    ("  Current Link Status: Inactive\n");
+					}
+					MDDI_MSG_NOTICE
+					    ("    Active state entered %d times\n",
+					     pmhctl->log_parms.link_active_cnt -
+					     prev_ptr->link_active_cnt);
+					MDDI_MSG_NOTICE
+					    ("    Hibernation state entered %d times\n",
+					     pmhctl->log_parms.
+					     link_hibernate_cnt -
+					     prev_ptr->link_hibernate_cnt);
+				}
+			}
+			prev_parms[host_idx] = pmhctl->log_parms;
+		}
+	}
+	if (mddi_log_stats_counter >= mddi_log_stats_frequency)
+		mddi_log_stats_counter = 0;
+
+	mutex_lock(&mddi_timer_lock);
+	if (!mddi_timer_shutdown_flag) {
+		mddi_host_timer.function = mddi_host_timer_service;
+		mddi_host_timer.data = 0;
+		mddi_host_timer.expires = jiffies + ((time_ms * HZ) / 1000);
+		add_timer(&mddi_host_timer);
+	}
+	mutex_unlock(&mddi_timer_lock);
+
+	return;
+}				/* mddi_host_timer_cb */
+
+static void mddi_process_link_list_done(void)
+{
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	/* normal forward linked list packet(s) were sent */
+	if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
+		MDDI_MSG_ERR("**** getting LL done, but no list ****\n");
+	} else {
+		uint16 idx;
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (pmhctl->rev_state == MDDI_REV_REG_READ_ISSUED) {
+			/* special case where a register read packet was sent */
+			pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
+			if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
+				MDDI_MSG_ERR
+				    ("**** getting LL done, but no list ****\n");
+			}
+		}
+#endif
+		for (idx = pmhctl->llist_info.transmitting_start_idx;;) {
+			uint16 next_idx = pmhctl->llist_notify[idx].next_idx;
+			/* with reg read we don't release the waiting tcb until after
+			 * the reverse encapsulation has completed.
+			 */
+			if (idx != pmhctl->llist_info.reg_read_idx) {
+				/* notify task that may be waiting on this completion */
+				if (pmhctl->llist_notify[idx].waiting) {
+					complete(&
+						 (pmhctl->llist_notify[idx].
+						  done_comp));
+				}
+				if (pmhctl->llist_notify[idx].done_cb != NULL) {
+					(*(pmhctl->llist_notify[idx].done_cb))
+					    ();
+				}
+
+				pmhctl->llist_notify[idx].in_use = FALSE;
+				pmhctl->llist_notify[idx].waiting = FALSE;
+				pmhctl->llist_notify[idx].done_cb = NULL;
+				if (idx < MDDI_NUM_DYNAMIC_LLIST_ITEMS) {
+					/* static LLIST items are configured only once */
+					pmhctl->llist_notify[idx].next_idx =
+					    UNASSIGNED_INDEX;
+				}
+				/*
+				 * currently, all linked list packets are
+				 * register access, so we can increment the
+				 * counter for that packet type here.
+				 */
+				pmhctl->log_parms.reg_acc_cnt++;
+			}
+			if (idx == pmhctl->llist_info.transmitting_end_idx)
+				break;
+			idx = next_idx;
+			if (idx == UNASSIGNED_INDEX)
+				MDDI_MSG_CRIT("MDDI linked list corruption!\n");
+		}
+
+		pmhctl->llist_info.transmitting_start_idx = UNASSIGNED_INDEX;
+		pmhctl->llist_info.transmitting_end_idx = UNASSIGNED_INDEX;
+
+		if (pmhctl->mddi_waiting_for_llist_avail) {
+			if (!
+			    (pmhctl->
+			     llist_notify[pmhctl->llist_info.next_free_idx].
+			     in_use)) {
+				pmhctl->mddi_waiting_for_llist_avail = FALSE;
+				complete(&(pmhctl->mddi_llist_avail_comp));
+			}
+		}
+	}
+
+	/* Turn off MDDI_INT_PRI_LINK_LIST_DONE interrupt */
+	mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE, 0);
+
+}
+
+static void mddi_queue_forward_linked_list(void)
+{
+	uint16 first_pkt_index;
+	mddi_linked_list_type *llist_dma;
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+	llist_dma = pmhctl->llist_dma_ptr;
+
+	first_pkt_index = UNASSIGNED_INDEX;
+
+	if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (pmhctl->llist_info.reg_read_waiting) {
+			if (pmhctl->rev_state == MDDI_REV_IDLE) {
+				/*
+				 * we have a register read to send and
+				 * can send it now
+				 */
+				pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
+				mddi_reg_read_retry = 0;
+				first_pkt_index =
+				    pmhctl->llist_info.waiting_start_idx;
+				pmhctl->llist_info.reg_read_waiting = FALSE;
+			}
+		} else
+#endif
+		{
+			/*
+			 * not register read to worry about, go ahead and write
+			 * anything that may be on the waiting list.
+			 */
+			first_pkt_index = pmhctl->llist_info.waiting_start_idx;
+		}
+	}
+
+	if (first_pkt_index != UNASSIGNED_INDEX) {
+		pmhctl->llist_info.transmitting_start_idx =
+		    pmhctl->llist_info.waiting_start_idx;
+		pmhctl->llist_info.transmitting_end_idx =
+		    pmhctl->llist_info.waiting_end_idx;
+		pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
+		pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
+
+		/* write to the primary pointer register */
+		MDDI_MSG_DEBUG("MDDI writing primary ptr with idx=%d\n",
+			       first_pkt_index);
+
+		pmhctl->int_type.llist_ptr_write_2++;
+
+		dma_coherent_pre_ops();
+		mddi_host_reg_out(PRI_PTR, &llist_dma[first_pkt_index]);
+
+		/* enable interrupt when complete */
+		mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
+				   MDDI_INT_PRI_LINK_LIST_DONE);
+
+	}
+
+}
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+static void mddi_read_rev_packet(byte *data_ptr)
+{
+	uint16 i, length;
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	uint8 *rev_ptr_overflow =
+	    (pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE);
+
+	/* first determine the length and handle invalid lengths */
+	length = *pmhctl->rev_ptr_curr++;
+	if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
+		pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+	length |= ((*pmhctl->rev_ptr_curr++) << 8);
+	if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
+		pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+	if (length > (pmhctl->rev_pkt_size - 2)) {
+		MDDI_MSG_ERR("Invalid rev pkt length %d\n", length);
+		/* rev_pkt_size should always be <= rev_ptr_size so limit to packet size */
+		length = pmhctl->rev_pkt_size - 2;
+	}
+
+	/* If the data pointer is NULL, just increment the pmhctl->rev_ptr_curr.
+	 * Loop around if necessary. Don't bother reading the data.
+	 */
+	if (data_ptr == NULL) {
+		pmhctl->rev_ptr_curr += length;
+		if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
+			pmhctl->rev_ptr_curr -= MDDI_REV_BUFFER_SIZE;
+		return;
+	}
+
+	data_ptr[0] = length & 0x0ff;
+	data_ptr[1] = length >> 8;
+	data_ptr += 2;
+	/* copy the data to data_ptr byte-at-a-time */
+	for (i = 0; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow);
+	     i++)
+		*data_ptr++ = *pmhctl->rev_ptr_curr++;
+	if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
+		pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+	for (; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow); i++)
+		*data_ptr++ = *pmhctl->rev_ptr_curr++;
+}
+
+static void mddi_process_rev_packets(void)
+{
+	uint32 rev_packet_count;
+	word i;
+	uint32 crc_errors;
+	boolean mddi_reg_read_successful = FALSE;
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	pmhctl->log_parms.rev_enc_cnt++;
+	if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
+	    (pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED) &&
+	    (pmhctl->rev_state != MDDI_REV_CLIENT_CAP_ISSUED)) {
+		MDDI_MSG_ERR("Wrong state %d for reverse int\n",
+			     pmhctl->rev_state);
+	}
+	/* Turn off MDDI_INT_REV_AVAIL interrupt */
+	mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL, 0);
+
+	/* Clear rev data avail int */
+	mddi_host_reg_out(INT, MDDI_INT_REV_DATA_AVAIL);
+
+	/* Get Number of packets */
+	rev_packet_count = mddi_host_reg_in(REV_PKT_CNT);
+
+#ifndef T_MSM7500
+	/* Clear out rev packet counter */
+	mddi_host_reg_out(REV_PKT_CNT, 0x0000);
+#endif
+
+#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
+	if ((pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) &&
+	    (rev_packet_count > 0) &&
+	    (mddi_host_core_version == 0x28 ||
+	     mddi_host_core_version == 0x30)) {
+
+		uint32 int_reg;
+		uint32 max_count = 0;
+
+		mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
+		int_reg = mddi_host_reg_in(INT);
+		while ((int_reg & 0x100000) == 0) {
+			udelay(3);
+			int_reg = mddi_host_reg_in(INT);
+			if (++max_count > 100)
+				break;
+		}
+	}
+#endif
+
+	/* Get CRC error count */
+	crc_errors = mddi_host_reg_in(REV_CRC_ERR);
+	if (crc_errors != 0) {
+		pmhctl->log_parms.rev_crc_cnt += crc_errors;
+		pmhctl->stats.rev_crc_count += crc_errors;
+		MDDI_MSG_ERR("!!! MDDI %d Reverse CRC Error(s) !!!\n",
+			     crc_errors);
+#ifndef T_MSM7500
+		/* Clear CRC error count */
+		mddi_host_reg_out(REV_CRC_ERR, 0x0000);
+#endif
+		/* also issue an RTD to attempt recovery */
+		pmhctl->rtd_counter = mddi_rtd_frequency;
+	}
+
+	pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
+
+	MDDI_MSG_DEBUG("MDDI rev pkt cnt=%d, ptr=0x%x, RTD:0x%x\n",
+		       rev_packet_count,
+		       pmhctl->rev_ptr_curr - pmhctl->rev_ptr_start,
+		       pmhctl->rtd_value);
+
+	if (rev_packet_count >= 1) {
+		mddi_invalidate_cache_lines((uint32 *) pmhctl->rev_ptr_start,
+					    MDDI_REV_BUFFER_SIZE);
+	} else {
+		MDDI_MSG_ERR("Reverse pkt sent, no data rxd\n");
+		if (mddi_reg_read_value_ptr)
+			*mddi_reg_read_value_ptr = -EBUSY;
+	}
+	/* order the reads */
+	dma_coherent_post_ops();
+	for (i = 0; i < rev_packet_count; i++) {
+		mddi_rev_packet_type *rev_pkt_ptr;
+
+		mddi_read_rev_packet(rev_packet_data);
+
+		rev_pkt_ptr = (mddi_rev_packet_type *) rev_packet_data;
+
+		if (rev_pkt_ptr->packet_length > pmhctl->rev_pkt_size) {
+			MDDI_MSG_ERR("!!!invalid packet size: %d\n",
+				     rev_pkt_ptr->packet_length);
+		}
+
+		MDDI_MSG_DEBUG("MDDI rev pkt 0x%x size 0x%x\n",
+			       rev_pkt_ptr->packet_type,
+			       rev_pkt_ptr->packet_length);
+
+		/* Do whatever you want to do with the data based on the packet type */
+		switch (rev_pkt_ptr->packet_type) {
+		case 66:	/* Client Capability */
+			{
+				mddi_client_capability_type
+				    *client_capability_pkt_ptr;
+
+				client_capability_pkt_ptr =
+				    (mddi_client_capability_type *)
+				    rev_packet_data;
+				MDDI_MSG_NOTICE
+				    ("Client Capability: Week=%d, Year=%d\n",
+				     client_capability_pkt_ptr->
+				     Week_of_Manufacture,
+				     client_capability_pkt_ptr->
+				     Year_of_Manufacture);
+				memcpy((void *)&mddi_client_capability_pkt,
+				       (void *)rev_packet_data,
+				       sizeof(mddi_client_capability_type));
+				pmhctl->log_parms.cli_cap_cnt++;
+			}
+			break;
+
+		case 70:	/* Display Status */
+			{
+				mddi_client_status_type *client_status_pkt_ptr;
+
+				client_status_pkt_ptr =
+				    (mddi_client_status_type *) rev_packet_data;
+				if ((client_status_pkt_ptr->crc_error_count !=
+				     0)
+				    || (client_status_pkt_ptr->
+					reverse_link_request != 0)) {
+					MDDI_MSG_ERR
+					    ("Client Status: RevReq=%d, CrcErr=%d\n",
+					     client_status_pkt_ptr->
+					     reverse_link_request,
+					     client_status_pkt_ptr->
+					     crc_error_count);
+				} else {
+					MDDI_MSG_DEBUG
+					    ("Client Status: RevReq=%d, CrcErr=%d\n",
+					     client_status_pkt_ptr->
+					     reverse_link_request,
+					     client_status_pkt_ptr->
+					     crc_error_count);
+				}
+				pmhctl->log_parms.fwd_crc_cnt +=
+				    client_status_pkt_ptr->crc_error_count;
+				pmhctl->stats.fwd_crc_count +=
+				    client_status_pkt_ptr->crc_error_count;
+				pmhctl->log_parms.cli_stat_cnt++;
+			}
+			break;
+
+		case 146:	/* register access packet */
+			{
+				mddi_register_access_packet_type
+				    * regacc_pkt_ptr;
+				uint32 data_count;
+
+				regacc_pkt_ptr =
+				    (mddi_register_access_packet_type *)
+				    rev_packet_data;
+
+				/* Bits[0:13] - read data count */
+				data_count = regacc_pkt_ptr->read_write_info
+					& 0x3FFF;
+				MDDI_MSG_DEBUG("\n MDDI rev read: 0x%x",
+					regacc_pkt_ptr->read_write_info);
+				MDDI_MSG_DEBUG("Reg Acc parse reg=0x%x,"
+					"value=0x%x\n", regacc_pkt_ptr->
+					register_address, regacc_pkt_ptr->
+					register_data_list[0]);
+
+				/* Copy register value to location passed in */
+				if (mddi_reg_read_value_ptr) {
+#if defined(T_MSM6280) && !defined(T_MSM7200)
+					/* only least significant 16 bits are valid with 6280 */
+					*mddi_reg_read_value_ptr =
+					    regacc_pkt_ptr->
+					    register_data_list[0] & 0x0000ffff;
+					mddi_reg_read_successful = TRUE;
+					mddi_reg_read_value_ptr = NULL;
+#else
+				if (data_count && data_count <=
+					MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) {
+					memcpy(mddi_reg_read_value_ptr,
+						(void *)&regacc_pkt_ptr->
+						register_data_list[0],
+						data_count * 4);
+					mddi_reg_read_successful = TRUE;
+					mddi_reg_read_value_ptr = NULL;
+				}
+#endif
+				}
+
+#ifdef DEBUG_MDDIHOSTI
+				if ((mddi_gpio.polling_enabled) &&
+				    (regacc_pkt_ptr->register_address ==
+				     mddi_gpio.polling_reg)) {
+					/*
+					 * ToDo: need to call Linux GPIO call
+					 * here...
+					 */
+					 mddi_client_lcd_gpio_poll(
+					 regacc_pkt_ptr->register_data_list[0]);
+				}
+#endif
+				pmhctl->log_parms.reg_read_cnt++;
+			}
+			break;
+
+		case INVALID_PKT_TYPE:	/* 0xFFFF */
+			MDDI_MSG_ERR("!!!INVALID_PKT_TYPE rcvd\n");
+			break;
+
+		default:	/* any other packet */
+			{
+				uint16 hdlr;
+
+				for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS;
+				     hdlr++) {
+					if (mddi_rev_pkt_handler[hdlr].
+							handler == NULL)
+						continue;
+					if (mddi_rev_pkt_handler[hdlr].
+					    pkt_type ==
+					    rev_pkt_ptr->packet_type) {
+						(*(mddi_rev_pkt_handler[hdlr].
+						  handler)) (rev_pkt_ptr);
+					/* pmhctl->rev_state = MDDI_REV_IDLE; */
+						break;
+					}
+				}
+				if (hdlr >= MAX_MDDI_REV_HANDLERS)
+					MDDI_MSG_ERR("MDDI unknown rev pkt\n");
+			}
+			break;
+		}
+	}
+	if ((pmhctl->rev_ptr_curr + pmhctl->rev_pkt_size) >=
+	    (pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE)) {
+		pmhctl->rev_ptr_written = FALSE;
+	}
+
+	if (pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) {
+		pmhctl->rev_state = MDDI_REV_IDLE;
+		if (mddi_rev_user.waiting) {
+			mddi_rev_user.waiting = FALSE;
+			complete(&(mddi_rev_user.done_comp));
+		} else if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
+			MDDI_MSG_ERR
+			    ("Reverse Encap state, but no reg read in progress\n");
+		} else {
+			if ((!mddi_reg_read_successful) &&
+			    (mddi_reg_read_retry < mddi_reg_read_retry_max) &&
+			    (mddi_enable_reg_read_retry)) {
+				/*
+				 * There is a race condition that can happen
+				 * where the reverse encapsulation message is
+				 * sent out by the MDDI host before the register
+				 * read packet is sent. As a work-around for
+				 * that problem we issue the reverse
+				 * encapsulation one more time before giving up.
+				 */
+				if (mddi_enable_reg_read_retry_once)
+					mddi_reg_read_retry =
+					    mddi_reg_read_retry_max;
+				else
+					mddi_reg_read_retry++;
+				pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
+				pmhctl->stats.reg_read_failure++;
+			} else {
+				uint16 reg_read_idx =
+				    pmhctl->llist_info.reg_read_idx;
+
+				mddi_reg_read_retry = 0;
+				if (pmhctl->llist_notify[reg_read_idx].waiting) {
+					complete(&
+						 (pmhctl->
+						  llist_notify[reg_read_idx].
+						  done_comp));
+				}
+				pmhctl->llist_info.reg_read_idx =
+				    UNASSIGNED_INDEX;
+				if (pmhctl->llist_notify[reg_read_idx].
+				    done_cb != NULL) {
+					(*
+					 (pmhctl->llist_notify[reg_read_idx].
+					  done_cb)) ();
+				}
+				pmhctl->llist_notify[reg_read_idx].next_idx =
+				    UNASSIGNED_INDEX;
+				pmhctl->llist_notify[reg_read_idx].in_use =
+				    FALSE;
+				pmhctl->llist_notify[reg_read_idx].waiting =
+				    FALSE;
+				pmhctl->llist_notify[reg_read_idx].done_cb =
+				    NULL;
+				if (!mddi_reg_read_successful)
+					pmhctl->stats.reg_read_failure++;
+			}
+		}
+	} else if (pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) {
+#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
+		if (mddi_host_core_version == 0x28 ||
+		    mddi_host_core_version == 0x30) {
+			mddi_host_reg_out(FIFO_ALLOC, 0x00);
+			pmhctl->rev_ptr_written = TRUE;
+			mddi_host_reg_out(REV_PTR,
+				pmhctl->mddi_rev_ptr_write_val);
+			pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+			mddi_host_reg_out(CMD, 0xC00);
+		}
+#endif
+
+		if (mddi_rev_user.waiting) {
+			mddi_rev_user.waiting = FALSE;
+			complete(&(mddi_rev_user.done_comp));
+		}
+		pmhctl->rev_state = MDDI_REV_IDLE;
+	} else {
+		pmhctl->rev_state = MDDI_REV_IDLE;
+	}
+
+	/* pmhctl->rev_state = MDDI_REV_IDLE; */
+
+	/* Re-enable interrupt */
+	mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL,
+			   MDDI_INT_REV_DATA_AVAIL);
+
+}
+
+static void mddi_issue_reverse_encapsulation(void)
+{
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+	/* Only issue a reverse encapsulation packet if:
+	 * 1) another reverse is not in progress (MDDI_REV_IDLE).
+	 * 2) a register read has been sent (MDDI_REV_REG_READ_SENT).
+	 * 3) forward is not in progress, because of a hw bug in client that
+	 *    causes forward crc errors on packet immediately after rev encap.
+	 */
+	if (((pmhctl->rev_state == MDDI_REV_IDLE) ||
+	     (pmhctl->rev_state == MDDI_REV_REG_READ_SENT)) &&
+	    (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
+	    (!mdp_in_processing)) {
+		uint32 mddi_command = MDDI_CMD_SEND_REV_ENCAP;
+
+		if ((pmhctl->rev_state == MDDI_REV_REG_READ_SENT) ||
+		    (mddi_rev_encap_user_request == TRUE)) {
+			mddi_host_enable_io_clock();
+			if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
+				/* need to wake up link before issuing rev encap command */
+				MDDI_MSG_DEBUG("wake up link!\n");
+				pmhctl->link_state = MDDI_LINK_ACTIVATING;
+				mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+			} else {
+				if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
+					MDDI_MSG_DEBUG
+					    ("mddi sending RTD command!\n");
+					mddi_host_reg_out(CMD,
+							  MDDI_CMD_SEND_RTD);
+					pmhctl->rtd_counter = 0;
+					pmhctl->log_parms.rtd_cnt++;
+				}
+				if (pmhctl->rev_state != MDDI_REV_REG_READ_SENT) {
+					/* this is generic reverse request by user, so
+					 * reset the waiting flag. */
+					mddi_rev_encap_user_request = FALSE;
+				}
+				/* link is active so send reverse encap to get register read results */
+				pmhctl->rev_state = MDDI_REV_ENCAP_ISSUED;
+				mddi_command = MDDI_CMD_SEND_REV_ENCAP;
+				MDDI_MSG_DEBUG("sending rev encap!\n");
+			}
+		} else
+		    if ((pmhctl->client_status_cnt >=
+			 mddi_client_status_frequency)
+			|| mddi_client_capability_request) {
+			mddi_host_enable_io_clock();
+			if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
+				/* only wake up the link if it client status is overdue */
+				if ((pmhctl->client_status_cnt >=
+				     (mddi_client_status_frequency * 2))
+				    || mddi_client_capability_request) {
+					/* need to wake up link before issuing rev encap command */
+					MDDI_MSG_DEBUG("wake up link!\n");
+					pmhctl->link_state =
+					    MDDI_LINK_ACTIVATING;
+					mddi_host_reg_out(CMD,
+							  MDDI_CMD_LINK_ACTIVE);
+				}
+			} else {
+				if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
+					MDDI_MSG_DEBUG
+					    ("mddi sending RTD command!\n");
+					mddi_host_reg_out(CMD,
+							  MDDI_CMD_SEND_RTD);
+					pmhctl->rtd_counter = 0;
+					pmhctl->log_parms.rtd_cnt++;
+				}
+				/* periodically get client status */
+				MDDI_MSG_DEBUG
+				    ("mddi sending rev enc! (get status)\n");
+				if (mddi_client_capability_request) {
+					pmhctl->rev_state =
+					    MDDI_REV_CLIENT_CAP_ISSUED;
+					mddi_command = MDDI_CMD_GET_CLIENT_CAP;
+					mddi_client_capability_request = FALSE;
+				} else {
+					pmhctl->rev_state =
+					    MDDI_REV_STATUS_REQ_ISSUED;
+					pmhctl->client_status_cnt = 0;
+					mddi_command =
+					    MDDI_CMD_GET_CLIENT_STATUS;
+				}
+			}
+		}
+		if ((pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) ||
+		    (pmhctl->rev_state == MDDI_REV_STATUS_REQ_ISSUED) ||
+		    (pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED)) {
+			pmhctl->int_type.rev_encap_count++;
+#if defined(T_MSM6280) && !defined(T_MSM7200)
+			mddi_rev_pointer_written = TRUE;
+			mddi_host_reg_out(REV_PTR, mddi_rev_ptr_write_val);
+			mddi_rev_ptr_curr = mddi_rev_ptr_start;
+			/* force new rev ptr command */
+			mddi_host_reg_out(CMD, 0xC00);
+#else
+			if (!pmhctl->rev_ptr_written) {
+				MDDI_MSG_DEBUG("writing reverse pointer!\n");
+				pmhctl->rev_ptr_written = TRUE;
+#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
+				if ((pmhctl->rev_state ==
+				     MDDI_REV_CLIENT_CAP_ISSUED) &&
+				    (mddi_host_core_version == 0x28 ||
+				     mddi_host_core_version == 0x30)) {
+					pmhctl->rev_ptr_written = FALSE;
+					mddi_host_reg_out(FIFO_ALLOC, 0x02);
+				} else
+					mddi_host_reg_out(REV_PTR,
+						  pmhctl->
+						  mddi_rev_ptr_write_val);
+#else
+				mddi_host_reg_out(REV_PTR,
+						  pmhctl->
+						  mddi_rev_ptr_write_val);
+#endif
+			}
+#endif
+			if (mddi_debug_clear_rev_data) {
+				uint16 i;
+				for (i = 0; i < MDDI_MAX_REV_DATA_SIZE / 4; i++)
+					pmhctl->rev_data_buf[i] = 0xdddddddd;
+				/* clean cache */
+				mddi_flush_cache_lines(pmhctl->rev_data_buf,
+						       MDDI_MAX_REV_DATA_SIZE);
+			}
+
+			/* send reverse encapsulation to get needed data */
+			mddi_host_reg_out(CMD, mddi_command);
+		}
+	}
+
+}
+
+static void mddi_process_client_initiated_wakeup(void)
+{
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	/* Disable MDDI_INT Interrupt, we detect client initiated wakeup one
+	 * time for each entry into hibernation */
+	mddi_host_reg_outm(INTEN, MDDI_INT_MDDI_IN, 0);
+
+	if (host_idx == MDDI_HOST_PRIM) {
+		if (mddi_vsync_detect_enabled) {
+			mddi_host_enable_io_clock();
+#ifndef MDDI_HOST_DISP_LISTEN
+			/* issue command to bring up link */
+			/* need to do this to clear the vsync condition */
+			if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
+				pmhctl->link_state = MDDI_LINK_ACTIVATING;
+				mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+			}
+#endif
+			/*
+			 * Indicate to client specific code that vsync was
+			 * enabled, and we did not detect a client initiated
+			 * wakeup. The client specific handler can clear the
+			 * condition if necessary to prevent subsequent
+			 * client initiated wakeups.
+			 */
+			mddi_client_lcd_vsync_detected(TRUE);
+			pmhctl->log_parms.vsync_response_cnt++;
+			MDDI_MSG_NOTICE("MDDI_INT_IN condition\n");
+
+		}
+	}
+
+	if (mddi_gpio.polling_enabled) {
+		mddi_host_enable_io_clock();
+		/* check interrupt status now */
+		(void)mddi_queue_register_read_int(mddi_gpio.polling_reg,
+						   &mddi_gpio.polling_val);
+	}
+}
+#endif /* FEATURE_MDDI_DISABLE_REVERSE */
+
+static void mddi_host_isr(void)
+{
+	uint32 int_reg, int_en;
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	uint32 status_reg;
+#endif
+	mddi_host_type host_idx = mddi_curr_host;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if (!MDDI_HOST_IS_HCLK_ON) {
+		MDDI_HOST_ENABLE_HCLK;
+	}
+	int_reg = mddi_host_reg_in(INT);
+	int_en = mddi_host_reg_in(INTEN);
+	pmhctl->saved_int_reg = int_reg;
+	pmhctl->saved_int_en = int_en;
+	int_reg = int_reg & int_en;
+	pmhctl->int_type.count++;
+
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	status_reg = mddi_host_reg_in(STAT);
+
+	if ((int_reg & MDDI_INT_MDDI_IN) ||
+	    ((int_en & MDDI_INT_MDDI_IN) &&
+	     ((int_reg == 0) || (status_reg & MDDI_STAT_CLIENT_WAKEUP_REQ)))) {
+		/*
+		 * The MDDI_IN condition will clear itself, and so it is
+		 * possible that MDDI_IN was the reason for the isr firing,
+		 * even though the interrupt register does not have the
+		 * MDDI_IN bit set. To check if this was the case we need to
+		 * look at the status register bit that signifies a client
+		 * initiated wakeup. If the status register bit is set, as well
+		 * as the MDDI_IN interrupt enabled, then we treat this as a
+		 * client initiated wakeup.
+		 */
+		if (int_reg & MDDI_INT_MDDI_IN)
+			pmhctl->int_type.in_count++;
+		mddi_process_client_initiated_wakeup();
+	}
+#endif
+
+	if (int_reg & MDDI_INT_LINK_STATE_CHANGES) {
+		pmhctl->int_type.state_change_count++;
+		mddi_report_state_change(int_reg);
+	}
+
+	if (int_reg & MDDI_INT_PRI_LINK_LIST_DONE) {
+		pmhctl->int_type.ll_done_count++;
+		mddi_process_link_list_done();
+	}
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	if (int_reg & MDDI_INT_REV_DATA_AVAIL) {
+		pmhctl->int_type.rev_avail_count++;
+		mddi_process_rev_packets();
+	}
+#endif
+
+	if (int_reg & MDDI_INT_ERROR_CONDITIONS) {
+		pmhctl->int_type.error_count++;
+		mddi_report_errors(int_reg);
+
+		mddi_host_reg_out(INT, int_reg & MDDI_INT_ERROR_CONDITIONS);
+	}
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	mddi_issue_reverse_encapsulation();
+
+	if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
+	    (pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED))
+#endif
+		/* don't want simultaneous reverse and forward with Eagle */
+		mddi_queue_forward_linked_list();
+
+	if (int_reg & MDDI_INT_NO_CMD_PKTS_PEND) {
+		/* this interrupt is used to kick the isr when hibernation is disabled */
+		mddi_host_reg_outm(INTEN, MDDI_INT_NO_CMD_PKTS_PEND, 0);
+	}
+
+	if ((!mddi_host_mdp_active_flag) &&
+	    (!mddi_vsync_detect_enabled) &&
+	    (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
+	    (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
+	    (pmhctl->rev_state == MDDI_REV_IDLE)) {
+		if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
+			mddi_host_disable_io_clock();
+			mddi_host_disable_hclk();
+		}
+#ifdef FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION
+		else if ((pmhctl->link_state == MDDI_LINK_ACTIVE) &&
+			 (!pmhctl->disable_hibernation)) {
+			mddi_host_reg_out(CMD, MDDI_CMD_POWERDOWN);
+		}
+#endif
+	}
+}
+
+static void mddi_host_isr_primary(void)
+{
+	mddi_curr_host = MDDI_HOST_PRIM;
+	mddi_host_isr();
+}
+
+irqreturn_t mddi_pmdh_isr_proxy(int irq, void *ptr)
+{
+	mddi_host_isr_primary();
+	return IRQ_HANDLED;
+}
+
+static void mddi_host_isr_external(void)
+{
+	mddi_curr_host = MDDI_HOST_EXT;
+	mddi_host_isr();
+	mddi_curr_host = MDDI_HOST_PRIM;
+}
+
+irqreturn_t mddi_emdh_isr_proxy(int irq, void *ptr)
+{
+	mddi_host_isr_external();
+	return IRQ_HANDLED;
+}
+
+static void mddi_host_initialize_registers(mddi_host_type host_idx)
+{
+	uint32 pad_reg_val;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
+		return;
+
+	/* turn on HCLK to MDDI host core */
+	mddi_host_enable_hclk();
+
+	/* MDDI Reset command */
+	mddi_host_reg_out(CMD, MDDI_CMD_RESET);
+
+	/* Version register (= 0x01) */
+	mddi_host_reg_out(VERSION, 0x0001);
+
+	/* Bytes per subframe register */
+	mddi_host_reg_out(BPS, MDDI_HOST_BYTES_PER_SUBFRAME);
+
+	/* Subframes per media frames register (= 0x03) */
+	mddi_host_reg_out(SPM, 0x0003);
+
+	/* Turn Around 1 register (= 0x05) */
+	mddi_host_reg_out(TA1_LEN, 0x0005);
+
+	/* Turn Around 2 register (= 0x0C) */
+	mddi_host_reg_out(TA2_LEN, MDDI_HOST_TA2_LEN);
+
+	/* Drive hi register (= 0x96) */
+	mddi_host_reg_out(DRIVE_HI, 0x0096);
+
+	/* Drive lo register (= 0x32) */
+	mddi_host_reg_out(DRIVE_LO, 0x0032);
+
+	/* Display wakeup count register (= 0x3c) */
+	mddi_host_reg_out(DISP_WAKE, 0x003c);
+
+	/* Reverse Rate Divisor register (= 0x2) */
+	mddi_host_reg_out(REV_RATE_DIV, MDDI_HOST_REV_RATE_DIV);
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	/* Reverse Pointer Size */
+	mddi_host_reg_out(REV_SIZE, MDDI_REV_BUFFER_SIZE);
+
+	/* Rev Encap Size */
+	mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
+#endif
+
+	/* Periodic Rev Encap */
+	/* don't send periodically */
+	mddi_host_reg_out(CMD, MDDI_CMD_PERIODIC_REV_ENCAP);
+
+	pad_reg_val = mddi_host_reg_in(PAD_CTL);
+	if (pad_reg_val == 0) {
+		/* If we are turning on band gap, need to wait 5us before turning
+		 * on the rest of the PAD */
+		mddi_host_reg_out(PAD_CTL, 0x08000);
+		udelay(5);
+	}
+#ifdef T_MSM7200
+	/* Recommendation from PAD hw team */
+	mddi_host_reg_out(PAD_CTL, 0xa850a);
+#else
+	/* Recommendation from PAD hw team */
+	mddi_host_reg_out(PAD_CTL, 0xa850f);
+#endif
+
+	pad_reg_val = 0x00220020;
+
+#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
+	mddi_host_reg_out(PAD_IO_CTL, 0x00320000);
+	mddi_host_reg_out(PAD_CAL, pad_reg_val);
+#endif
+
+	mddi_host_core_version = mddi_host_reg_inm(CORE_VER, 0xffff);
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	if (mddi_host_core_version >= 8)
+		mddi_rev_ptr_workaround = FALSE;
+	pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
+#endif
+
+	if ((mddi_host_core_version > 8) && (mddi_host_core_version < 0x19))
+		mddi_host_reg_out(TEST, 0x2);
+
+	/* Need an even number for counts */
+	mddi_host_reg_out(DRIVER_START_CNT, 0x60006);
+
+#ifndef T_MSM7500
+	/* Setup defaults for MDP related register */
+	mddi_host_reg_out(MDP_VID_FMT_DES, 0x5666);
+	mddi_host_reg_out(MDP_VID_PIX_ATTR, 0x00C3);
+	mddi_host_reg_out(MDP_VID_CLIENTID, 0);
+#endif
+
+	/* automatically hibernate after 1 empty subframe */
+	if (pmhctl->disable_hibernation)
+		mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
+	else
+		mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
+
+	/* Bring up link if display (client) requests it */
+#ifdef MDDI_HOST_DISP_LISTEN
+	mddi_host_reg_out(CMD, MDDI_CMD_DISP_LISTEN);
+#else
+	mddi_host_reg_out(CMD, MDDI_CMD_DISP_IGNORE);
+#endif
+
+}
+
+void mddi_host_configure_interrupts(mddi_host_type host_idx, boolean enable)
+{
+	unsigned long flags;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+
+	/* turn on HCLK to MDDI host core if it has been disabled */
+	mddi_host_enable_hclk();
+	/* Clear MDDI Interrupt enable reg */
+	mddi_host_reg_out(INTEN, 0);
+
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+	if (enable) {
+		pmhctl->driver_state = MDDI_DRIVER_ENABLED;
+
+		if (host_idx == MDDI_HOST_PRIM) {
+			if (request_irq
+			    (INT_MDDI_PRI, mddi_pmdh_isr_proxy, IRQF_DISABLED,
+			     "PMDH", 0) != 0)
+				printk(KERN_ERR
+				       "a mddi: unable to request_irq\n");
+			else {
+				int_mddi_pri_flag = TRUE;
+				irq_enabled = 1;
+			}
+		} else {
+			if (request_irq
+			    (INT_MDDI_EXT, mddi_emdh_isr_proxy, IRQF_DISABLED,
+			     "EMDH", 0) != 0)
+				printk(KERN_ERR
+				       "b mddi: unable to request_irq\n");
+			else
+				int_mddi_ext_flag = TRUE;
+		}
+
+		/* Set MDDI Interrupt enable reg -- Enable Reverse data avail */
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+		mddi_host_reg_out(INTEN,
+				  MDDI_INT_ERROR_CONDITIONS |
+				  MDDI_INT_LINK_STATE_CHANGES);
+#else
+		/* Reverse Pointer register */
+		pmhctl->rev_ptr_written = FALSE;
+
+		mddi_host_reg_out(INTEN,
+				  MDDI_INT_REV_DATA_AVAIL |
+				  MDDI_INT_ERROR_CONDITIONS |
+				  MDDI_INT_LINK_STATE_CHANGES);
+		pmhctl->rtd_counter = mddi_rtd_frequency;
+		pmhctl->client_status_cnt = 0;
+#endif
+	} else {
+		if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
+			pmhctl->driver_state = MDDI_DRIVER_DISABLED;
+	}
+
+}
+
+/*
+ * mddi_host_client_cnt_reset:
+ * reset client_status_cnt to 0 to make sure host does not
+ * send RTD cmd to client right after resume before mddi
+ * client be powered up. this fix "MDDI RTD Failure" problem
+ */
+void mddi_host_client_cnt_reset(void)
+{
+	unsigned long flags;
+	mddi_host_cntl_type *pmhctl;
+
+	pmhctl = &(mhctl[MDDI_HOST_PRIM]);
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+	pmhctl->client_status_cnt = 0;
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+}
+
+static void mddi_host_powerup(mddi_host_type host_idx)
+{
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if (pmhctl->link_state != MDDI_LINK_DISABLED)
+		return;
+
+	/* enable IO_CLK and hclk to MDDI host core */
+	mddi_host_enable_io_clock();
+
+	mddi_host_initialize_registers(host_idx);
+	mddi_host_configure_interrupts(host_idx, TRUE);
+
+	pmhctl->link_state = MDDI_LINK_ACTIVATING;
+
+	/* Link activate command */
+	mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
+
+#ifdef CLKRGM_MDDI_IO_CLOCK_IN_MHZ
+	MDDI_MSG_NOTICE("MDDI Host: Activating Link %d Mbps\n",
+			CLKRGM_MDDI_IO_CLOCK_IN_MHZ * 2);
+#else
+	MDDI_MSG_NOTICE("MDDI Host: Activating Link\n");
+#endif
+
+	/* Initialize the timer */
+	if (host_idx == MDDI_HOST_PRIM)
+		mddi_host_timer_service(0);
+}
+
+void mddi_send_fw_link_skew_cal(mddi_host_type host_idx)
+{
+	mddi_host_reg_out(CMD, MDDI_CMD_FW_LINK_SKEW_CAL);
+	MDDI_MSG_DEBUG("%s: Skew Calibration done!!\n", __func__);
+}
+
+
+void mddi_host_init(mddi_host_type host_idx)
+/* Write out the MDDI configuration registers */
+{
+	static boolean initialized = FALSE;
+	mddi_host_cntl_type *pmhctl;
+
+	if (host_idx >= MDDI_NUM_HOST_CORES) {
+		MDDI_MSG_ERR("Invalid host core index\n");
+		return;
+	}
+
+	if (!initialized) {
+		uint16 idx;
+		mddi_host_type host;
+
+		for (host = MDDI_HOST_PRIM; host < MDDI_NUM_HOST_CORES; host++) {
+			pmhctl = &(mhctl[host]);
+			initialized = TRUE;
+
+			pmhctl->llist_ptr =
+			    dma_alloc_coherent(NULL, MDDI_LLIST_POOL_SIZE,
+					       &(pmhctl->llist_dma_addr),
+					       GFP_KERNEL);
+			pmhctl->llist_dma_ptr =
+			    (mddi_linked_list_type *) (void *)pmhctl->
+			    llist_dma_addr;
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+			pmhctl->rev_data_buf = NULL;
+			if (pmhctl->llist_ptr == NULL)
+#else
+			mddi_rev_user.waiting = FALSE;
+			init_completion(&(mddi_rev_user.done_comp));
+			pmhctl->rev_data_buf =
+			    dma_alloc_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
+					       &(pmhctl->rev_data_dma_addr),
+					       GFP_KERNEL);
+			if ((pmhctl->llist_ptr == NULL)
+			    || (pmhctl->rev_data_buf == NULL))
+#endif
+			{
+				MDDI_MSG_CRIT
+				    ("unable to alloc non-cached memory\n");
+			}
+			llist_extern[host] = pmhctl->llist_ptr;
+			llist_dma_extern[host] = pmhctl->llist_dma_ptr;
+			llist_extern_notify[host] = pmhctl->llist_notify;
+
+			for (idx = 0; idx < UNASSIGNED_INDEX; idx++) {
+				init_completion(&
+						(pmhctl->llist_notify[idx].
+						 done_comp));
+			}
+			init_completion(&(pmhctl->mddi_llist_avail_comp));
+			spin_lock_init(&mddi_host_spin_lock);
+			pmhctl->mddi_waiting_for_llist_avail = FALSE;
+			pmhctl->mddi_rev_ptr_write_val =
+			    (uint32) (void *)(pmhctl->rev_data_dma_addr);
+			pmhctl->rev_ptr_start = (void *)pmhctl->rev_data_buf;
+
+			pmhctl->rev_pkt_size = MDDI_DEFAULT_REV_PKT_SIZE;
+			pmhctl->rev_state = MDDI_REV_IDLE;
+#ifdef IMAGE_MODEM_PROC
+			/* assume hibernation state is last state from APPS proc, so that
+			 * we don't reinitialize the host core */
+			pmhctl->link_state = MDDI_LINK_HIBERNATING;
+#else
+			pmhctl->link_state = MDDI_LINK_DISABLED;
+#endif
+			pmhctl->driver_state = MDDI_DRIVER_DISABLED;
+			pmhctl->disable_hibernation = FALSE;
+
+			/* initialize llist variables */
+			pmhctl->llist_info.transmitting_start_idx =
+			    UNASSIGNED_INDEX;
+			pmhctl->llist_info.transmitting_end_idx =
+			    UNASSIGNED_INDEX;
+			pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
+			pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
+			pmhctl->llist_info.reg_read_idx = UNASSIGNED_INDEX;
+			pmhctl->llist_info.next_free_idx =
+			    MDDI_FIRST_DYNAMIC_LLIST_IDX;
+			pmhctl->llist_info.reg_read_waiting = FALSE;
+
+			mddi_vsync_detect_enabled = FALSE;
+			mddi_gpio.polling_enabled = FALSE;
+
+			pmhctl->int_type.count = 0;
+			pmhctl->int_type.in_count = 0;
+			pmhctl->int_type.disp_req_count = 0;
+			pmhctl->int_type.state_change_count = 0;
+			pmhctl->int_type.ll_done_count = 0;
+			pmhctl->int_type.rev_avail_count = 0;
+			pmhctl->int_type.error_count = 0;
+			pmhctl->int_type.rev_encap_count = 0;
+			pmhctl->int_type.llist_ptr_write_1 = 0;
+			pmhctl->int_type.llist_ptr_write_2 = 0;
+
+			pmhctl->stats.fwd_crc_count = 0;
+			pmhctl->stats.rev_crc_count = 0;
+			pmhctl->stats.pri_underflow = 0;
+			pmhctl->stats.sec_underflow = 0;
+			pmhctl->stats.rev_overflow = 0;
+			pmhctl->stats.pri_overwrite = 0;
+			pmhctl->stats.sec_overwrite = 0;
+			pmhctl->stats.rev_overwrite = 0;
+			pmhctl->stats.dma_failure = 0;
+			pmhctl->stats.rtd_failure = 0;
+			pmhctl->stats.reg_read_failure = 0;
+#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
+			pmhctl->stats.pri_underrun_detected = 0;
+#endif
+
+			pmhctl->log_parms.rtd_cnt = 0;
+			pmhctl->log_parms.rev_enc_cnt = 0;
+			pmhctl->log_parms.vid_cnt = 0;
+			pmhctl->log_parms.reg_acc_cnt = 0;
+			pmhctl->log_parms.cli_stat_cnt = 0;
+			pmhctl->log_parms.cli_cap_cnt = 0;
+			pmhctl->log_parms.reg_read_cnt = 0;
+			pmhctl->log_parms.link_active_cnt = 0;
+			pmhctl->log_parms.link_hibernate_cnt = 0;
+			pmhctl->log_parms.fwd_crc_cnt = 0;
+			pmhctl->log_parms.rev_crc_cnt = 0;
+			pmhctl->log_parms.vsync_response_cnt = 0;
+
+			prev_parms[host_idx] = pmhctl->log_parms;
+			mddi_client_capability_pkt.packet_length = 0;
+		}
+
+#ifndef T_MSM7500
+		/* tell clock driver we are user of this PLL */
+		MDDI_HOST_ENABLE_IO_CLOCK;
+#endif
+	}
+
+	mddi_host_powerup(host_idx);
+	pmhctl = &(mhctl[host_idx]);
+}
+
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+static uint32 mddi_client_id;
+
+uint32 mddi_get_client_id(void)
+{
+
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	static boolean client_detection_try = FALSE;
+	mddi_host_cntl_type *pmhctl;
+	unsigned long flags;
+	uint16 saved_rev_pkt_size;
+	int ret;
+
+	if (!client_detection_try) {
+		/* Toshiba display requires larger drive_lo value */
+		mddi_host_reg_out(DRIVE_LO, 0x0050);
+
+		pmhctl = &(mhctl[MDDI_HOST_PRIM]);
+
+		saved_rev_pkt_size = pmhctl->rev_pkt_size;
+
+		/* Increase Rev Encap Size */
+		pmhctl->rev_pkt_size = MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE;
+		mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
+
+		/* disable hibernation temporarily */
+		if (!pmhctl->disable_hibernation)
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
+
+		mddi_rev_user.waiting = TRUE;
+		INIT_COMPLETION(mddi_rev_user.done_comp);
+
+		spin_lock_irqsave(&mddi_host_spin_lock, flags);
+
+		/* turn on clock(s), if they have been disabled */
+		mddi_host_enable_hclk();
+		mddi_host_enable_io_clock();
+
+		mddi_client_capability_request = TRUE;
+
+		if (pmhctl->rev_state == MDDI_REV_IDLE) {
+			/* attempt to send the reverse encapsulation now */
+			mddi_issue_reverse_encapsulation();
+		}
+		spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+		wait_for_completion_killable(&(mddi_rev_user.done_comp));
+
+		/* Set Rev Encap Size back to its original value */
+		pmhctl->rev_pkt_size = saved_rev_pkt_size;
+		mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
+
+		/* reenable auto-hibernate */
+		if (!pmhctl->disable_hibernation)
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
+
+		mddi_host_reg_out(DRIVE_LO, 0x0032);
+		client_detection_try = TRUE;
+
+		mddi_client_id = (mddi_client_capability_pkt.Mfr_Name<<16) |
+				mddi_client_capability_pkt.Product_Code;
+
+		if (!mddi_client_id)
+			mddi_disable(1);
+
+		ret = mddi_client_power(mddi_client_id);
+		if (ret < 0)
+			MDDI_MSG_ERR("mddi_client_power return %d", ret);
+	}
+
+#if 0
+	switch (mddi_client_capability_pkt.Mfr_Name) {
+	case 0x4474:
+		if ((mddi_client_capability_pkt.Product_Code != 0x8960) &&
+		    (target == DISPLAY_1)) {
+			ret = PRISM_WVGA;
+		}
+		break;
+
+	case 0xD263:
+		if (target == DISPLAY_1)
+			ret = TOSHIBA_VGA_PRIM;
+		else if (target == DISPLAY_2)
+			ret = TOSHIBA_QCIF_SECD;
+		break;
+
+	case 0:
+		if (mddi_client_capability_pkt.Product_Code == 0x8835) {
+			if (target == DISPLAY_1)
+				ret = SHARP_QVGA_PRIM;
+			else if (target == DISPLAY_2)
+				ret = SHARP_128x128_SECD;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	if ((!client_detection_try) && (ret != TOSHIBA_VGA_PRIM)
+	    && (ret != TOSHIBA_QCIF_SECD)) {
+		/* Not a Toshiba display, so change drive_lo back to default value */
+		mddi_host_reg_out(DRIVE_LO, 0x0032);
+	}
+#endif
+
+#endif
+
+	return mddi_client_id;
+}
+#endif
+
+void mddi_host_powerdown(mddi_host_type host_idx)
+{
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if (host_idx >= MDDI_NUM_HOST_CORES) {
+		MDDI_MSG_ERR("Invalid host core index\n");
+		return;
+	}
+
+	if (pmhctl->driver_state == MDDI_DRIVER_RESET) {
+		return;
+	}
+
+	if (host_idx == MDDI_HOST_PRIM) {
+		/* disable timer */
+		del_timer(&mddi_host_timer);
+	}
+
+	mddi_host_configure_interrupts(host_idx, FALSE);
+
+	/* turn on HCLK to MDDI host core if it has been disabled */
+	mddi_host_enable_hclk();
+
+	/* MDDI Reset command */
+	mddi_host_reg_out(CMD, MDDI_CMD_RESET);
+
+	/* Pad Control Register */
+	mddi_host_reg_out(PAD_CTL, 0x0);
+
+	/* disable IO_CLK and hclk to MDDI host core */
+	mddi_host_disable_io_clock();
+	mddi_host_disable_hclk();
+
+	pmhctl->link_state = MDDI_LINK_DISABLED;
+	pmhctl->driver_state = MDDI_DRIVER_RESET;
+
+	MDDI_MSG_NOTICE("MDDI Host: Disabling Link\n");
+
+}
+
+uint16 mddi_get_next_free_llist_item(mddi_host_type host_idx, boolean wait)
+{
+	unsigned long flags;
+	uint16 ret_idx;
+	boolean forced_wait = FALSE;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	ret_idx = pmhctl->llist_info.next_free_idx;
+
+	pmhctl->llist_info.next_free_idx++;
+	if (pmhctl->llist_info.next_free_idx >= MDDI_NUM_DYNAMIC_LLIST_ITEMS)
+		pmhctl->llist_info.next_free_idx = MDDI_FIRST_DYNAMIC_LLIST_IDX;
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+	if (pmhctl->llist_notify[ret_idx].in_use) {
+		if (!wait) {
+			pmhctl->llist_info.next_free_idx = ret_idx;
+			ret_idx = UNASSIGNED_INDEX;
+		} else {
+			forced_wait = TRUE;
+			INIT_COMPLETION(pmhctl->mddi_llist_avail_comp);
+		}
+	}
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+	if (forced_wait) {
+		wait_for_completion_killable(&
+						  (pmhctl->
+						   mddi_llist_avail_comp));
+		MDDI_MSG_ERR("task waiting on mddi llist item\n");
+	}
+
+	if (ret_idx != UNASSIGNED_INDEX) {
+		pmhctl->llist_notify[ret_idx].waiting = FALSE;
+		pmhctl->llist_notify[ret_idx].done_cb = NULL;
+		pmhctl->llist_notify[ret_idx].in_use = TRUE;
+		pmhctl->llist_notify[ret_idx].next_idx = UNASSIGNED_INDEX;
+	}
+
+	return ret_idx;
+}
+
+uint16 mddi_get_reg_read_llist_item(mddi_host_type host_idx, boolean wait)
+{
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+	MDDI_MSG_CRIT("No reverse link available\n");
+	(void)wait;
+	return FALSE;
+#else
+	unsigned long flags;
+	uint16 ret_idx;
+	boolean error = FALSE;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+	if (pmhctl->llist_info.reg_read_idx != UNASSIGNED_INDEX) {
+		/* need to block here or is this an error condition? */
+		error = TRUE;
+		ret_idx = UNASSIGNED_INDEX;
+	}
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+	if (!error) {
+		ret_idx = pmhctl->llist_info.reg_read_idx =
+		    mddi_get_next_free_llist_item(host_idx, wait);
+		/* clear the reg_read_waiting flag */
+		pmhctl->llist_info.reg_read_waiting = FALSE;
+	}
+
+	if (error)
+		MDDI_MSG_ERR("***** Reg read still in progress! ****\n");
+	return ret_idx;
+#endif
+
+}
+
+void mddi_queue_forward_packets(uint16 first_llist_idx,
+				uint16 last_llist_idx,
+				boolean wait,
+				mddi_llist_done_cb_type llist_done_cb,
+				mddi_host_type host_idx)
+{
+	unsigned long flags;
+	mddi_linked_list_type *llist;
+	mddi_linked_list_type *llist_dma;
+	mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
+
+	if ((first_llist_idx >= UNASSIGNED_INDEX) ||
+	    (last_llist_idx >= UNASSIGNED_INDEX)) {
+		MDDI_MSG_ERR("MDDI queueing invalid linked list\n");
+		return;
+	}
+
+	if (pmhctl->link_state == MDDI_LINK_DISABLED)
+		MDDI_MSG_CRIT("MDDI host powered down!\n");
+
+	llist = pmhctl->llist_ptr;
+	llist_dma = pmhctl->llist_dma_ptr;
+
+	/* clean cache so MDDI host can read data */
+	memory_barrier();
+
+	pmhctl->llist_notify[last_llist_idx].waiting = wait;
+	if (wait)
+		INIT_COMPLETION(pmhctl->llist_notify[last_llist_idx].done_comp);
+	pmhctl->llist_notify[last_llist_idx].done_cb = llist_done_cb;
+
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+
+	if ((pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
+	    (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
+	    (pmhctl->rev_state == MDDI_REV_IDLE)) {
+		/* no packets are currently transmitting */
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
+			/* This is the special case where the packet is a register read. */
+			pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
+			mddi_reg_read_retry = 0;
+			/* mddi_rev_reg_read_attempt = 1; */
+		}
+#endif
+		/* assign transmitting index values */
+		pmhctl->llist_info.transmitting_start_idx = first_llist_idx;
+		pmhctl->llist_info.transmitting_end_idx = last_llist_idx;
+
+		/* turn on clock(s), if they have been disabled */
+		mddi_host_enable_hclk();
+		mddi_host_enable_io_clock();
+		pmhctl->int_type.llist_ptr_write_1++;
+		/* Write to primary pointer register */
+		dma_coherent_pre_ops();
+		mddi_host_reg_out(PRI_PTR, &llist_dma[first_llist_idx]);
+
+		/* enable interrupt when complete */
+		mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
+				   MDDI_INT_PRI_LINK_LIST_DONE);
+
+	} else if (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) {
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
+			/*
+			 * we have a register read to send but need to wait
+			 * for current reverse activity to end or there are
+			 * packets currently transmitting
+			 */
+			/* mddi_rev_reg_read_attempt = 0; */
+			pmhctl->llist_info.reg_read_waiting = TRUE;
+		}
+#endif
+
+		/* assign waiting index values */
+		pmhctl->llist_info.waiting_start_idx = first_llist_idx;
+		pmhctl->llist_info.waiting_end_idx = last_llist_idx;
+	} else {
+		uint16 prev_end_idx = pmhctl->llist_info.waiting_end_idx;
+#ifndef FEATURE_MDDI_DISABLE_REVERSE
+		if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
+			/*
+			 * we have a register read to send but need to wait
+			 * for current reverse activity to end or there are
+			 * packets currently transmitting
+			 */
+			/* mddi_rev_reg_read_attempt = 0; */
+			pmhctl->llist_info.reg_read_waiting = TRUE;
+		}
+#endif
+
+		llist = pmhctl->llist_ptr;
+
+		/* clear end flag in previous last packet */
+		llist[prev_end_idx].link_controller_flags = 0;
+		pmhctl->llist_notify[prev_end_idx].next_idx = first_llist_idx;
+
+		/* set the next_packet_pointer of the previous last packet */
+		llist[prev_end_idx].next_packet_pointer =
+		    (void *)(&llist_dma[first_llist_idx]);
+
+		/* clean cache so MDDI host can read data */
+		memory_barrier();
+
+		/* assign new waiting last index value */
+		pmhctl->llist_info.waiting_end_idx = last_llist_idx;
+	}
+
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+}
+
+void mddi_host_write_pix_attr_reg(uint32 value)
+{
+	(void)value;
+}
+
+void mddi_queue_reverse_encapsulation(boolean wait)
+{
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+	MDDI_MSG_CRIT("No reverse link available\n");
+	(void)wait;
+#else
+	unsigned long flags;
+	boolean error = FALSE;
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
+
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+
+	/* turn on clock(s), if they have been disabled */
+	mddi_host_enable_hclk();
+	mddi_host_enable_io_clock();
+
+	if (wait) {
+		if (!mddi_rev_user.waiting) {
+			mddi_rev_user.waiting = TRUE;
+			INIT_COMPLETION(mddi_rev_user.done_comp);
+		} else
+			error = TRUE;
+	}
+	mddi_rev_encap_user_request = TRUE;
+
+	if (pmhctl->rev_state == MDDI_REV_IDLE) {
+		/* attempt to send the reverse encapsulation now */
+		mddi_host_type orig_host_idx = mddi_curr_host;
+		mddi_curr_host = host_idx;
+		mddi_issue_reverse_encapsulation();
+		mddi_curr_host = orig_host_idx;
+	}
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+	if (error) {
+		MDDI_MSG_ERR("Reverse Encap request already in progress\n");
+	} else if (wait)
+		wait_for_completion_killable(&(mddi_rev_user.done_comp));
+#endif
+}
+
+/* ISR to be executed */
+boolean mddi_set_rev_handler(mddi_rev_handler_type handler, uint16 pkt_type)
+{
+#ifdef FEATURE_MDDI_DISABLE_REVERSE
+	MDDI_MSG_CRIT("No reverse link available\n");
+	(void)handler;
+	(void)pkt_type;
+	return (FALSE);
+#else
+	unsigned long flags;
+	uint16 hdlr;
+	boolean handler_set = FALSE;
+	boolean overwrite = FALSE;
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
+
+	/* Disable interrupts */
+	spin_lock_irqsave(&mddi_host_spin_lock, flags);
+
+	for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
+		if (mddi_rev_pkt_handler[hdlr].pkt_type == pkt_type) {
+			mddi_rev_pkt_handler[hdlr].handler = handler;
+			if (handler == NULL) {
+				/* clearing handler from table */
+				mddi_rev_pkt_handler[hdlr].pkt_type =
+				    INVALID_PKT_TYPE;
+				handler_set = TRUE;
+				if (pkt_type == 0x10) {	/* video stream packet */
+					/* ensure HCLK on to MDDI host core before register write */
+					mddi_host_enable_hclk();
+					/* No longer getting video, so reset rev encap size to default */
+					pmhctl->rev_pkt_size =
+					    MDDI_DEFAULT_REV_PKT_SIZE;
+					mddi_host_reg_out(REV_ENCAP_SZ,
+							  pmhctl->rev_pkt_size);
+				}
+			} else {
+				/* already a handler for this packet */
+				overwrite = TRUE;
+			}
+			break;
+		}
+	}
+	if ((hdlr >= MAX_MDDI_REV_HANDLERS) && (handler != NULL)) {
+		/* assigning new handler */
+		for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
+			if (mddi_rev_pkt_handler[hdlr].pkt_type ==
+			    INVALID_PKT_TYPE) {
+				if ((pkt_type == 0x10) &&	/* video stream packet */
+				    (pmhctl->rev_pkt_size <
+				     MDDI_VIDEO_REV_PKT_SIZE)) {
+					/* ensure HCLK on to MDDI host core before register write */
+					mddi_host_enable_hclk();
+					/* Increase Rev Encap Size */
+					pmhctl->rev_pkt_size =
+					    MDDI_VIDEO_REV_PKT_SIZE;
+					mddi_host_reg_out(REV_ENCAP_SZ,
+							  pmhctl->rev_pkt_size);
+				}
+				mddi_rev_pkt_handler[hdlr].handler = handler;
+				mddi_rev_pkt_handler[hdlr].pkt_type = pkt_type;
+				handler_set = TRUE;
+				break;
+			}
+		}
+	}
+
+	/* Restore interrupts */
+	spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+
+	if (overwrite)
+		MDDI_MSG_ERR("Overwriting previous rev packet handler\n");
+
+	return handler_set;
+
+#endif
+}				/* mddi_set_rev_handler */
+
+void mddi_host_disable_hibernation(boolean disable)
+{
+	mddi_host_type host_idx = MDDI_HOST_PRIM;
+	mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
+
+	if (disable) {
+		pmhctl->disable_hibernation = TRUE;
+		/* hibernation will be turned off by isr next time it is entered */
+	} else {
+		if (pmhctl->disable_hibernation) {
+			unsigned long flags;
+			spin_lock_irqsave(&mddi_host_spin_lock, flags);
+			if (!MDDI_HOST_IS_HCLK_ON)
+				MDDI_HOST_ENABLE_HCLK;
+			mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
+			spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
+			pmhctl->disable_hibernation = FALSE;
+		}
+	}
+}
+
+void mddi_mhctl_remove(mddi_host_type host_idx)
+{
+	mddi_host_cntl_type *pmhctl;
+
+	pmhctl = &(mhctl[host_idx]);
+
+	dma_free_coherent(NULL, MDDI_LLIST_POOL_SIZE, (void *)pmhctl->llist_ptr,
+			  pmhctl->llist_dma_addr);
+
+	dma_free_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
+			  (void *)pmhctl->rev_data_buf,
+			  pmhctl->rev_data_dma_addr);
+}
diff --git a/drivers/video/msm/mddihosti.h b/drivers/video/msm/mddihosti.h
new file mode 100644
index 0000000..166d15c
--- /dev/null
+++ b/drivers/video/msm/mddihosti.h
@@ -0,0 +1,552 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDDIHOSTI_H
+#define MDDIHOSTI_H
+
+#include "msm_fb.h"
+#include "mddihost.h"
+#include <linux/clk.h>
+
+/* Register offsets in MDDI, applies to both msm_pmdh_base and
+ * (u32)msm_emdh_base. */
+#define MDDI_CMD   		0x0000
+#define MDDI_VERSION   		0x0004
+#define MDDI_PRI_PTR		0x0008
+#define MDDI_BPS		0x0010
+#define MDDI_SPM		0x0014
+#define MDDI_INT		0x0018
+#define MDDI_INTEN		0x001c
+#define MDDI_REV_PTR		0x0020
+#define MDDI_REV_SIZE		0x0024
+#define MDDI_STAT		0x0028
+#define MDDI_REV_RATE_DIV	0x002c
+#define MDDI_REV_CRC_ERR	0x0030
+#define MDDI_TA1_LEN		0x0034
+#define MDDI_TA2_LEN		0x0038
+#define MDDI_TEST		0x0040
+#define MDDI_REV_PKT_CNT	0x0044
+#define MDDI_DRIVE_HI		0x0048
+#define MDDI_DRIVE_LO		0x004c
+#define MDDI_DISP_WAKE		0x0050
+#define MDDI_REV_ENCAP_SZ	0x0054
+#define MDDI_RTD_VAL		0x0058
+#define MDDI_PAD_CTL		0x0068
+#define MDDI_DRIVER_START_CNT	0x006c
+#define MDDI_CORE_VER		0x008c
+#define MDDI_FIFO_ALLOC         0x0090
+#define MDDI_PAD_IO_CTL         0x00a0
+#define MDDI_PAD_CAL            0x00a4
+
+#ifdef ENABLE_MDDI_MULTI_READ_WRITE
+#define MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR 128
+#else
+#define MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR 1
+#endif
+
+extern int32 mddi_client_type;
+extern u32 mddi_msg_level;
+
+/* No longer need to write to clear these registers */
+#define xxxx_mddi_host_reg_outm(reg, mask, val)  \
+do { \
+	if (host_idx == MDDI_HOST_PRIM) \
+		mddi_host_reg_outm_pmdh(reg, mask, val); \
+	else \
+		mddi_host_reg_outm_emdh(reg, mask, val); \
+} while (0)
+
+#define mddi_host_reg_outm(reg, mask, val) \
+do { \
+	unsigned long __addr; \
+	if (host_idx == MDDI_HOST_PRIM) \
+		__addr = (u32)msm_pmdh_base + MDDI_##reg; \
+	else \
+		__addr = (u32)msm_emdh_base + MDDI_##reg; \
+	writel((readl(__addr) & ~(mask)) | ((val) & (mask)), __addr); \
+} while (0)
+
+#define xxxx_mddi_host_reg_out(reg, val) \
+do { \
+	if (host_idx == MDDI_HOST_PRIM)  \
+		mddi_host_reg_out_pmdh(reg, val); \
+	else \
+		mddi_host_reg_out_emdh(reg, val); \
+	} while (0)
+
+#define mddi_host_reg_out(reg, val) \
+do { \
+	if (host_idx == MDDI_HOST_PRIM) \
+		writel(val, (u32)msm_pmdh_base + MDDI_##reg); \
+	else \
+		writel(val, (u32)msm_emdh_base + MDDI_##reg); \
+} while (0)
+
+#define xxxx_mddi_host_reg_in(reg)  \
+  ((host_idx) ? \
+     mddi_host_reg_in_emdh(reg) : mddi_host_reg_in_pmdh(reg));
+
+#define mddi_host_reg_in(reg) \
+((host_idx) ? \
+	readl((u32)msm_emdh_base + MDDI_##reg) : \
+	readl((u32)msm_pmdh_base + MDDI_##reg)) \
+
+#define xxxx_mddi_host_reg_inm(reg, mask)  \
+  ((host_idx) ? \
+    mddi_host_reg_inm_emdh(reg, mask) : \
+    mddi_host_reg_inm_pmdh(reg, mask);)
+
+#define mddi_host_reg_inm(reg, mask) \
+((host_idx) ? \
+	readl((u32)msm_emdh_base + MDDI_##reg) & (mask) : \
+	readl((u32)msm_pmdh_base + MDDI_##reg) & (mask)) \
+
+/* Using non-cacheable pmem, so do nothing */
+#define mddi_invalidate_cache_lines(addr_start, num_bytes)
+/*
+ * Using non-cacheable pmem, so do nothing with cache
+ * but, ensure write goes out to memory
+ */
+#define mddi_flush_cache_lines(addr_start, num_bytes)  \
+    (void) addr_start; \
+    (void) num_bytes;  \
+    memory_barrier()
+
+/* Since this translates to Remote Procedure Calls to check on clock status
+* just use a local variable to keep track of io_clock */
+#define MDDI_HOST_IS_IO_CLOCK_ON mddi_host_io_clock_on
+#define MDDI_HOST_ENABLE_IO_CLOCK
+#define MDDI_HOST_DISABLE_IO_CLOCK
+#define MDDI_HOST_IS_HCLK_ON mddi_host_hclk_on
+#define MDDI_HOST_ENABLE_HCLK
+#define MDDI_HOST_DISABLE_HCLK
+#define FEATURE_MDDI_HOST_IO_CLOCK_CONTROL_DISABLE
+#define FEATURE_MDDI_HOST_HCLK_CONTROL_DISABLE
+
+#define TRAMP_MDDI_HOST_ISR TRAMP_MDDI_PRI_ISR
+#define TRAMP_MDDI_HOST_EXT_ISR TRAMP_MDDI_EXT_ISR
+#define MDP_LINE_COUNT_BMSK  0x3ff
+#define MDP_SYNC_STATUS  0x000c
+#define MDP_LINE_COUNT      \
+(readl(msm_mdp_base + MDP_SYNC_STATUS) & MDP_LINE_COUNT_BMSK)
+
+/* MDP sends 256 pixel packets, so lower value hibernates more without
+* significantly increasing latency of waiting for next subframe */
+#define MDDI_HOST_BYTES_PER_SUBFRAME  0x3C00
+
+#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
+#define MDDI_HOST_TA2_LEN       0x001a
+#define MDDI_HOST_REV_RATE_DIV  0x0004
+#else
+#define MDDI_HOST_TA2_LEN       0x000c
+#define MDDI_HOST_REV_RATE_DIV  0x0002
+#endif
+
+#define MDDI_MSG_EMERG(msg, ...)    \
+	if (mddi_msg_level > 0)  \
+		printk(KERN_EMERG msg, ## __VA_ARGS__);
+#define MDDI_MSG_ALERT(msg, ...)    \
+	if (mddi_msg_level > 1)  \
+		printk(KERN_ALERT msg, ## __VA_ARGS__);
+#define MDDI_MSG_CRIT(msg, ...)    \
+	if (mddi_msg_level > 2)  \
+		printk(KERN_CRIT msg, ## __VA_ARGS__);
+#define MDDI_MSG_ERR(msg, ...)    \
+	if (mddi_msg_level > 3)  \
+		printk(KERN_ERR msg, ## __VA_ARGS__);
+#define MDDI_MSG_WARNING(msg, ...)    \
+	if (mddi_msg_level > 4)  \
+		printk(KERN_WARNING msg, ## __VA_ARGS__);
+#define MDDI_MSG_NOTICE(msg, ...)    \
+	if (mddi_msg_level > 5)  \
+		printk(KERN_NOTICE msg, ## __VA_ARGS__);
+#define MDDI_MSG_INFO(msg, ...)    \
+	if (mddi_msg_level > 6)  \
+		printk(KERN_INFO msg, ## __VA_ARGS__);
+#define MDDI_MSG_DEBUG(msg, ...)    \
+	if (mddi_msg_level > 7)  \
+		printk(KERN_DEBUG msg, ## __VA_ARGS__);
+
+#define GCC_PACKED __attribute__((packed))
+typedef struct GCC_PACKED {
+	uint16 packet_length;
+	/* total # of bytes in the packet not including
+		the packet_length field. */
+
+	uint16 packet_type;
+	/* A Packet Type of 70 identifies the packet as
+		a Client status Packet. */
+
+	uint16 bClient_ID;
+	/* This field is reserved for future use and shall
+		be set to zero. */
+
+} mddi_rev_packet_type;
+
+typedef struct GCC_PACKED {
+	uint16 packet_length;
+	/* total # of bytes in the packet not including
+		the packet_length field. */
+
+	uint16 packet_type;
+	/* A Packet Type of 70 identifies the packet as
+		a Client status Packet. */
+
+	uint16 bClient_ID;
+	/* This field is reserved for future use and shall
+		be set to zero. */
+
+	uint16 reverse_link_request;
+	/* 16 bit unsigned integer with number of bytes client
+		needs in the * reverse encapsulation message
+		to transmit data. */
+
+	uint8 crc_error_count;
+	uint8 capability_change;
+	uint16 graphics_busy_flags;
+
+	uint16 parameter_CRC;
+	/* 16-bit CRC of all the bytes in the packet
+		including Packet Length. */
+
+} mddi_client_status_type;
+
+typedef struct GCC_PACKED {
+	uint16 packet_length;
+	/* total # of bytes in the packet not including
+		the packet_length field. */
+
+	uint16 packet_type;
+	/* A Packet Type of 66 identifies the packet as
+		a Client Capability Packet. */
+
+	uint16 bClient_ID;
+	/* This field is reserved for future use and
+		shall be set to zero. */
+
+	uint16 Protocol_Version;
+	uint16 Minimum_Protocol_Version;
+	uint16 Data_Rate_Capability;
+	uint8 Interface_Type_Capability;
+	uint8 Number_of_Alt_Displays;
+	uint16 PostCal_Data_Rate;
+	uint16 Bitmap_Width;
+	uint16 Bitmap_Height;
+	uint16 Display_Window_Width;
+	uint16 Display_Window_Height;
+	uint32 Color_Map_Size;
+	uint16 Color_Map_RGB_Width;
+	uint16 RGB_Capability;
+	uint8 Monochrome_Capability;
+	uint8 Reserved_1;
+	uint16 Y_Cb_Cr_Capability;
+	uint16 Bayer_Capability;
+	uint16 Alpha_Cursor_Image_Planes;
+	uint32 Client_Feature_Capability_Indicators;
+	uint8 Maximum_Video_Frame_Rate_Capability;
+	uint8 Minimum_Video_Frame_Rate_Capability;
+	uint16 Minimum_Sub_frame_Rate;
+	uint16 Audio_Buffer_Depth;
+	uint16 Audio_Channel_Capability;
+	uint16 Audio_Sample_Rate_Capability;
+	uint8 Audio_Sample_Resolution;
+	uint8 Mic_Audio_Sample_Resolution;
+	uint16 Mic_Sample_Rate_Capability;
+	uint8 Keyboard_Data_Format;
+	uint8 pointing_device_data_format;
+	uint16 content_protection_type;
+	uint16 Mfr_Name;
+	uint16 Product_Code;
+	uint16 Reserved_3;
+	uint32 Serial_Number;
+	uint8 Week_of_Manufacture;
+	uint8 Year_of_Manufacture;
+
+	uint16 parameter_CRC;
+	/* 16-bit CRC of all the bytes in the packet including Packet Length. */
+
+} mddi_client_capability_type;
+
+typedef struct GCC_PACKED {
+	uint16 packet_length;
+	/* total # of bytes in the packet not including the packet_length field. */
+
+	uint16 packet_type;
+	/* A Packet Type of 16 identifies the packet as a Video Stream Packet. */
+
+	uint16 bClient_ID;
+	/* This field is reserved for future use and shall be set to zero. */
+
+	uint16 video_data_format_descriptor;
+	/* format of each pixel in the Pixel Data in the present stream in the
+	 * present packet.
+	 * If bits [15:13] = 000 monochrome
+	 * If bits [15:13] = 001 color pixels (palette).
+	 * If bits [15:13] = 010 color pixels in raw RGB
+	 * If bits [15:13] = 011 data in 4:2:2 Y Cb Cr format
+	 * If bits [15:13] = 100 Bayer pixels
+	 */
+
+	uint16 pixel_data_attributes;
+	/* interpreted as follows:
+	 * Bits [1:0] = 11  pixel data is displayed to both eyes
+	 * Bits [1:0] = 10  pixel data is routed to the left eye only.
+	 * Bits [1:0] = 01  pixel data is routed to the right eye only.
+	 * Bits [1:0] = 00  pixel data is routed to the alternate display.
+	 * Bit 2 is 0  Pixel Data is in the standard progressive format.
+	 * Bit 2 is 1  Pixel Data is in interlace format.
+	 * Bit 3 is 0  Pixel Data is in the standard progressive format.
+	 * Bit 3 is 1  Pixel Data is in alternate pixel format.
+	 * Bit 4 is 0  Pixel Data is to or from the display frame buffer.
+	 * Bit 4 is 1  Pixel Data is to or from the camera.
+	 * Bit 5 is 0  pixel data contains the next consecutive row of pixels.
+	 * Bit 5 is 1  X Left Edge, Y Top Edge, X Right Edge, Y Bottom Edge,
+	 *             X Start, and Y Start parameters are not defined and
+	 *             shall be ignored by the client.
+	 * Bits [7:6] = 01  Pixel data is written to the offline image buffer.
+	 * Bits [7:6] = 00  Pixel data is written to the buffer to refresh display.
+	 * Bits [7:6] = 11  Pixel data is written to all image buffers.
+	 * Bits [7:6] = 10  Invalid. Reserved for future use.
+	 * Bits 8 through 11 alternate display number.
+	 * Bits 12 through 14 are reserved for future use and shall be set to zero.
+	 * Bit 15 is 1 the row of pixels is the last row of pixels in a frame.
+	 */
+
+	uint16 x_left_edge;
+	uint16 y_top_edge;
+	/* X,Y coordinate of the top left edge of the screen window */
+
+	uint16 x_right_edge;
+	uint16 y_bottom_edge;
+	/*  X,Y coordinate of the bottom right edge of the window being updated. */
+
+	uint16 x_start;
+	uint16 y_start;
+	/*  (X Start, Y Start) is the first pixel in the Pixel Data field below. */
+
+	uint16 pixel_count;
+	/*  number of pixels in the Pixel Data field below. */
+
+	uint16 parameter_CRC;
+	/*  16-bit CRC of all bytes from the Packet Length to the Pixel Count. */
+
+	uint16 reserved;
+	/* 16-bit variable to make structure align on 4 byte boundary */
+
+} mddi_video_stream_packet_type;
+
+typedef struct GCC_PACKED {
+	uint16 packet_length;
+	/* total # of bytes in the packet not including the packet_length field. */
+
+	uint16 packet_type;
+	/* A Packet Type of 146 identifies the packet as a Register Access Packet. */
+
+	uint16 bClient_ID;
+	/* This field is reserved for future use and shall be set to zero. */
+
+	uint16 read_write_info;
+	/* Bits 13:0  a 14-bit unsigned integer that specifies the number of
+	 *            32-bit Register Data List items to be transferred in the
+	 *            Register Data List field.
+	 * Bits[15:14] = 00  Write to register(s);
+	 * Bits[15:14] = 10  Read from register(s);
+	 * Bits[15:14] = 11  Response to a Read.
+	 * Bits[15:14] = 01  this value is reserved for future use. */
+
+	uint32 register_address;
+	/* the register address that is to be written to or read from. */
+
+	uint16 parameter_CRC;
+	/* 16-bit CRC of all bytes from the Packet Length to the Register Address. */
+
+	uint32 register_data_list[MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR];
+	/* list of 4-byte register data values for/from client registers */
+	/* For multi-read/write, 512(128 * 4) bytes of data available */
+
+} mddi_register_access_packet_type;
+
+typedef union GCC_PACKED {
+	mddi_video_stream_packet_type video_pkt;
+	mddi_register_access_packet_type register_pkt;
+#ifdef ENABLE_MDDI_MULTI_READ_WRITE
+	/* add 1008 byte pad to ensure 1024 byte llist struct, that can be
+	 * manipulated easily with cache */
+	uint32 alignment_pad[252];	/* 1008 bytes */
+#else
+	/* add 48 byte pad to ensure 64 byte llist struct, that can be
+	 * manipulated easily with cache */
+	uint32 alignment_pad[12];	/* 48 bytes */
+#endif
+} mddi_packet_header_type;
+
+typedef struct GCC_PACKED mddi_host_llist_struct {
+	uint16 link_controller_flags;
+	uint16 packet_header_count;
+	uint16 packet_data_count;
+	void *packet_data_pointer;
+	struct mddi_host_llist_struct *next_packet_pointer;
+	uint16 reserved;
+	mddi_packet_header_type packet_header;
+} mddi_linked_list_type;
+
+typedef struct {
+	struct completion done_comp;
+	mddi_llist_done_cb_type done_cb;
+	uint16 next_idx;
+	boolean waiting;
+	boolean in_use;
+} mddi_linked_list_notify_type;
+
+#ifdef ENABLE_MDDI_MULTI_READ_WRITE
+#define MDDI_LLIST_POOL_SIZE 0x10000
+#else
+#define MDDI_LLIST_POOL_SIZE 0x1000
+#endif
+#define MDDI_MAX_NUM_LLIST_ITEMS (MDDI_LLIST_POOL_SIZE / \
+		 sizeof(mddi_linked_list_type))
+#define UNASSIGNED_INDEX MDDI_MAX_NUM_LLIST_ITEMS
+#define MDDI_FIRST_DYNAMIC_LLIST_IDX 0
+
+/* Static llist items can be used for applications that frequently send
+ * the same set of packets using the linked list interface. */
+/* Here we configure for 6 static linked list items:
+ *  The 1st is used for a the adaptive backlight setting.
+ *  and the remaining 5 are used for sending window adjustments for
+ *  MDDI clients that need windowing info sent separate from video
+ *  packets. */
+#define MDDI_NUM_STATIC_ABL_ITEMS 1
+#define MDDI_NUM_STATIC_WINDOW_ITEMS 5
+#define MDDI_NUM_STATIC_LLIST_ITEMS (MDDI_NUM_STATIC_ABL_ITEMS + \
+				MDDI_NUM_STATIC_WINDOW_ITEMS)
+#define MDDI_NUM_DYNAMIC_LLIST_ITEMS (MDDI_MAX_NUM_LLIST_ITEMS - \
+				MDDI_NUM_STATIC_LLIST_ITEMS)
+
+#define MDDI_FIRST_STATIC_LLIST_IDX  MDDI_NUM_DYNAMIC_LLIST_ITEMS
+#define MDDI_FIRST_STATIC_ABL_IDX  MDDI_FIRST_STATIC_LLIST_IDX
+#define MDDI_FIRST_STATIC_WINDOW_IDX  (MDDI_FIRST_STATIC_LLIST_IDX + \
+				MDDI_NUM_STATIC_ABL_ITEMS)
+
+/* GPIO registers */
+#define VSYNC_WAKEUP_REG          0x80
+#define GPIO_REG                  0x81
+#define GPIO_OUTPUT_REG           0x82
+#define GPIO_INTERRUPT_REG        0x83
+#define GPIO_INTERRUPT_ENABLE_REG 0x84
+#define GPIO_POLARITY_REG         0x85
+
+/* Interrupt Bits */
+#define MDDI_INT_PRI_PTR_READ       0x0001
+#define MDDI_INT_SEC_PTR_READ       0x0002
+#define MDDI_INT_REV_DATA_AVAIL     0x0004
+#define MDDI_INT_DISP_REQ           0x0008
+#define MDDI_INT_PRI_UNDERFLOW      0x0010
+#define MDDI_INT_SEC_UNDERFLOW      0x0020
+#define MDDI_INT_REV_OVERFLOW       0x0040
+#define MDDI_INT_CRC_ERROR          0x0080
+#define MDDI_INT_MDDI_IN            0x0100
+#define MDDI_INT_PRI_OVERWRITE      0x0200
+#define MDDI_INT_SEC_OVERWRITE      0x0400
+#define MDDI_INT_REV_OVERWRITE      0x0800
+#define MDDI_INT_DMA_FAILURE        0x1000
+#define MDDI_INT_LINK_ACTIVE        0x2000
+#define MDDI_INT_IN_HIBERNATION     0x4000
+#define MDDI_INT_PRI_LINK_LIST_DONE 0x8000
+#define MDDI_INT_SEC_LINK_LIST_DONE 0x10000
+#define MDDI_INT_NO_CMD_PKTS_PEND   0x20000
+#define MDDI_INT_RTD_FAILURE        0x40000
+
+#define MDDI_INT_ERROR_CONDITIONS ( \
+	MDDI_INT_PRI_UNDERFLOW | MDDI_INT_SEC_UNDERFLOW | \
+	MDDI_INT_REV_OVERFLOW | MDDI_INT_CRC_ERROR | \
+	MDDI_INT_PRI_OVERWRITE | MDDI_INT_SEC_OVERWRITE | \
+	MDDI_INT_RTD_FAILURE | \
+	MDDI_INT_REV_OVERWRITE | MDDI_INT_DMA_FAILURE)
+
+#define MDDI_INT_LINK_STATE_CHANGES ( \
+	MDDI_INT_LINK_ACTIVE | MDDI_INT_IN_HIBERNATION)
+
+/* Status Bits */
+#define MDDI_STAT_LINK_ACTIVE        0x0001
+#define MDDI_STAT_NEW_REV_PTR        0x0002
+#define MDDI_STAT_NEW_PRI_PTR        0x0004
+#define MDDI_STAT_NEW_SEC_PTR        0x0008
+#define MDDI_STAT_IN_HIBERNATION     0x0010
+#define MDDI_STAT_PRI_LINK_LIST_DONE 0x0020
+#define MDDI_STAT_SEC_LINK_LIST_DONE 0x0040
+#define MDDI_STAT_PENDING_TIMING_PKT 0x0080
+#define MDDI_STAT_PENDING_REV_ENCAP  0x0100
+#define MDDI_STAT_PENDING_POWERDOWN  0x0200
+#define MDDI_STAT_RTD_MEAS_FAIL      0x0800
+#define MDDI_STAT_CLIENT_WAKEUP_REQ  0x1000
+
+/* Command Bits */
+#define MDDI_CMD_POWERDOWN           0x0100
+#define MDDI_CMD_POWERUP             0x0200
+#define MDDI_CMD_HIBERNATE           0x0300
+#define MDDI_CMD_RESET               0x0400
+#define MDDI_CMD_DISP_IGNORE         0x0501
+#define MDDI_CMD_DISP_LISTEN         0x0500
+#define MDDI_CMD_SEND_REV_ENCAP      0x0600
+#define MDDI_CMD_GET_CLIENT_CAP      0x0601
+#define MDDI_CMD_GET_CLIENT_STATUS   0x0602
+#define MDDI_CMD_SEND_RTD            0x0700
+#define MDDI_CMD_LINK_ACTIVE         0x0900
+#define MDDI_CMD_PERIODIC_REV_ENCAP  0x0A00
+#define MDDI_CMD_FW_LINK_SKEW_CAL    0x0D00
+
+extern void mddi_host_init(mddi_host_type host);
+extern void mddi_host_powerdown(mddi_host_type host);
+extern uint16 mddi_get_next_free_llist_item(mddi_host_type host, boolean wait);
+extern uint16 mddi_get_reg_read_llist_item(mddi_host_type host, boolean wait);
+extern void mddi_queue_forward_packets(uint16 first_llist_idx,
+				       uint16 last_llist_idx,
+				       boolean wait,
+				       mddi_llist_done_cb_type llist_done_cb,
+				       mddi_host_type host);
+
+extern void mddi_host_write_pix_attr_reg(uint32 value);
+extern void mddi_client_lcd_gpio_poll(uint32 poll_reg_val);
+extern void mddi_client_lcd_vsync_detected(boolean detected);
+extern void mddi_host_disable_hibernation(boolean disable);
+
+extern mddi_linked_list_type *llist_extern[];
+extern mddi_linked_list_type *llist_dma_extern[];
+extern mddi_linked_list_notify_type *llist_extern_notify[];
+extern struct timer_list mddi_host_timer;
+
+typedef struct {
+	uint16 transmitting_start_idx;
+	uint16 transmitting_end_idx;
+	uint16 waiting_start_idx;
+	uint16 waiting_end_idx;
+	uint16 reg_read_idx;
+	uint16 next_free_idx;
+	boolean reg_read_waiting;
+} mddi_llist_info_type;
+
+extern mddi_llist_info_type mddi_llist;
+
+#define MDDI_GPIO_DEFAULT_POLLING_INTERVAL 200
+typedef struct {
+	uint32 polling_reg;
+	uint32 polling_val;
+	uint32 polling_interval;
+	boolean polling_enabled;
+} mddi_gpio_info_type;
+
+uint32 mddi_get_client_id(void);
+void mddi_mhctl_remove(mddi_host_type host_idx);
+void mddi_host_timer_service(unsigned long data);
+void mddi_host_client_cnt_reset(void);
+#endif /* MDDIHOSTI_H */
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index c3636d5..8df57ae 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2,7 +2,7 @@
  *
  * MSM MDP Interface (used by framebuffer core)
  *
- * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
  * Copyright (C) 2007 Google Incorporated
  *
  * This software is licensed under the terms of the GNU General Public
@@ -15,507 +15,1659 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/fb.h>
-#include <linux/msm_mdp.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
 #include <linux/clk.h>
-#include <linux/file.h>
-#include <linux/major.h>
-#include <linux/slab.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
 
-#include <mach/msm_iomap.h>
-#include <mach/msm_fb.h>
-#include <linux/platform_device.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <mach/clk.h>
+#include "mdp.h"
+#include "msm_fb.h"
+#ifdef CONFIG_FB_MSM_MDP40
+#include "mdp4.h"
+#endif
+#include "mipi_dsi.h"
 
-#include "mdp_hw.h"
+uint32 mdp4_extn_disp;
 
-struct class *mdp_class;
+static struct clk *mdp_clk;
+static struct clk *mdp_pclk;
+static struct clk *mdp_axi_clk;
+static struct clk *mdp_lut_clk;
+int mdp_rev;
 
-#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
+struct regulator *footswitch;
 
-static uint16_t mdp_default_ccs[] = {
-	0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
-	0x010, 0x080, 0x080
-};
+struct completion mdp_ppp_comp;
+struct semaphore mdp_ppp_mutex;
+struct semaphore mdp_pipe_ctrl_mutex;
 
-static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
-static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
-static struct msmfb_callback *dma_callback;
-static struct clk *clk;
-static unsigned int mdp_irq_mask;
-static DEFINE_SPINLOCK(mdp_lock);
-DEFINE_MUTEX(mdp_mutex);
+unsigned long mdp_timer_duration = (HZ/20);   /* 50 msecond */
 
-static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+boolean mdp_ppp_waiting = FALSE;
+uint32 mdp_tv_underflow_cnt;
+uint32 mdp_lcdc_underflow_cnt;
+
+boolean mdp_current_clk_on = FALSE;
+boolean mdp_is_in_isr = FALSE;
+
+/*
+ * legacy mdp_in_processing is only for DMA2-MDDI
+ * this applies to DMA2 block only
+ */
+uint32 mdp_in_processing = FALSE;
+
+#ifdef CONFIG_FB_MSM_MDP40
+uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
+#else
+uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
+#endif
+
+MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
+
+atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
+
+spinlock_t mdp_spin_lock;
+struct workqueue_struct *mdp_dma_wq;	/*mdp dma wq */
+struct workqueue_struct *mdp_vsync_wq;	/*mdp vsync wq */
+
+static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
+static struct delayed_work mdp_pipe_ctrl_worker;
+
+static boolean mdp_suspended = FALSE;
+DEFINE_MUTEX(mdp_suspend_mutex);
+
+#ifdef CONFIG_FB_MSM_MDP40
+struct mdp_dma_data dma2_data;
+struct mdp_dma_data dma_s_data;
+struct mdp_dma_data dma_e_data;
+ulong mdp4_display_intf;
+#else
+static struct mdp_dma_data dma2_data;
+static struct mdp_dma_data dma_s_data;
+#ifndef CONFIG_FB_MSM_MDP303
+static struct mdp_dma_data dma_e_data;
+#endif
+#endif
+static struct mdp_dma_data dma3_data;
+
+extern ktime_t mdp_dma2_last_update_time;
+
+extern uint32 mdp_dma2_update_time_in_usec;
+extern int mdp_lcd_rd_cnt_offset_slow;
+extern int mdp_lcd_rd_cnt_offset_fast;
+extern int mdp_usec_diff_threshold;
+
+#ifdef CONFIG_FB_MSM_LCDC
+extern int first_pixel_start_x;
+extern int first_pixel_start_y;
+#endif
+
+#ifdef MSM_FB_ENABLE_DBGFS
+struct dentry *mdp_dir;
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
+static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
+#else
+#define mdp_suspend NULL
+#endif
+
+struct timeval mdp_dma2_timeval;
+struct timeval mdp_ppp_timeval;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static struct early_suspend early_suspend;
+#endif
+
+static u32 mdp_irq;
+
+static uint32 mdp_prim_panel_type = NO_PANEL;
+#ifndef CONFIG_FB_MSM_MDP22
+DEFINE_MUTEX(mdp_lut_push_sem);
+static int mdp_lut_i;
+static int mdp_lut_hw_update(struct fb_cmap *cmap)
 {
-	unsigned long irq_flags;
-	int ret = 0;
+	int i;
+	u16 *c[3];
+	u16 r, g, b;
 
-	BUG_ON(!mask);
+	c[0] = cmap->green;
+	c[1] = cmap->blue;
+	c[2] = cmap->red;
 
-	spin_lock_irqsave(&mdp_lock, irq_flags);
-	/* if the mask bits are already set return an error, this interrupt
-	 * is already enabled */
-	if (mdp_irq_mask & mask) {
-		printk(KERN_ERR "mdp irq already on already on %x %x\n",
-		       mdp_irq_mask, mask);
-		ret = -1;
-	}
-	/* if the mdp irq is not already enabled enable it */
-	if (!mdp_irq_mask) {
-		if (clk)
-			clk_enable(clk);
-		enable_irq(mdp->irq);
+	for (i = 0; i < cmap->len; i++) {
+		if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
+		    copy_from_user(&g, cmap->green++, sizeof(g)) ||
+		    copy_from_user(&b, cmap->blue++, sizeof(b)))
+			return -EFAULT;
+
+#ifdef CONFIG_FB_MSM_MDP40
+		MDP_OUTP(MDP_BASE + 0x94800 +
+#else
+		MDP_OUTP(MDP_BASE + 0x93800 +
+#endif
+			(0x400*mdp_lut_i) + cmap->start*4 + i*4,
+				((g & 0xff) |
+				 ((b & 0xff) << 8) |
+				 ((r & 0xff) << 16)));
 	}
 
-	/* update the irq mask to reflect the fact that the interrupt is
-	 * enabled */
-	mdp_irq_mask |= mask;
-	spin_unlock_irqrestore(&mdp_lock, irq_flags);
-	return ret;
+	return 0;
 }
 
-static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+static int mdp_lut_push;
+static int mdp_lut_push_i;
+static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
 {
-	/* this interrupt is already disabled! */
-	if (!(mdp_irq_mask & mask)) {
-		printk(KERN_ERR "mdp irq already off %x %x\n",
-		       mdp_irq_mask, mask);
-		return -1;
+	int ret;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	ret = mdp_lut_hw_update(cmap);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	if (ret)
+		return ret;
+
+	mutex_lock(&mdp_lut_push_sem);
+	mdp_lut_push = 1;
+	mdp_lut_push_i = mdp_lut_i;
+	mutex_unlock(&mdp_lut_push_sem);
+
+	mdp_lut_i = (mdp_lut_i + 1)%2;
+
+	return 0;
+}
+
+static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
+{
+	int ret;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	ret = mdp_lut_hw_update(cmap);
+
+	if (ret) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		return ret;
 	}
-	/* update the irq mask to reflect the fact that the interrupt is
-	 * disabled */
-	mdp_irq_mask &= ~(mask);
-	/* if no one is waiting on the interrupt, disable it */
-	if (!mdp_irq_mask) {
-		disable_irq_nosync(mdp->irq);
-		if (clk)
-			clk_disable(clk);
+
+	MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_lut_i = (mdp_lut_i + 1)%2;
+
+	return 0;
+}
+
+static void mdp_lut_enable(void)
+{
+	if (mdp_lut_push) {
+		mutex_lock(&mdp_lut_push_sem);
+		mdp_lut_push = 0;
+		MDP_OUTP(MDP_BASE + 0x90070,
+				(mdp_lut_push_i << 10) | 0x17);
+		mutex_unlock(&mdp_lut_push_sem);
+	}
+}
+
+#define MDP_HIST_MAX_BIN 32
+static __u32 mdp_hist_r[MDP_HIST_MAX_BIN];
+static __u32 mdp_hist_g[MDP_HIST_MAX_BIN];
+static __u32 mdp_hist_b[MDP_HIST_MAX_BIN];
+
+#ifdef CONFIG_FB_MSM_MDP40
+struct mdp_histogram mdp_hist;
+struct completion mdp_hist_comp;
+boolean mdp_is_hist_start = FALSE;
+#else
+static struct mdp_histogram mdp_hist;
+static struct completion mdp_hist_comp;
+static boolean mdp_is_hist_start = FALSE;
+#endif
+static DEFINE_MUTEX(mdp_hist_mutex);
+
+int mdp_histogram_ctrl(boolean en)
+{
+	unsigned long flag;
+	boolean hist_start;
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	hist_start = mdp_is_hist_start;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (hist_start == TRUE) {
+		if (en == TRUE) {
+			mdp_enable_irq(MDP_HISTOGRAM_TERM);
+			mdp_hist.frame_cnt = 1;
+			mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+#ifdef CONFIG_FB_MSM_MDP40
+			MDP_OUTP(MDP_BASE + 0x95010, 1);
+			MDP_OUTP(MDP_BASE + 0x9501c, INTR_HIST_DONE);
+			MDP_OUTP(MDP_BASE + 0x95004, 1);
+			MDP_OUTP(MDP_BASE + 0x95000, 1);
+#else
+			MDP_OUTP(MDP_BASE + 0x94004, 1);
+			MDP_OUTP(MDP_BASE + 0x94000, 1);
+#endif
+			mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
+					FALSE);
+		} else
+			mdp_disable_irq(MDP_HISTOGRAM_TERM);
 	}
 	return 0;
 }
 
-static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+int mdp_start_histogram(struct fb_info *info)
 {
-	unsigned long irq_flags;
-	int ret;
+	unsigned long flag;
 
-	spin_lock_irqsave(&mdp_lock, irq_flags);
-	ret = locked_disable_mdp_irq(mdp, mask);
-	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+	int ret = 0;
+	mutex_lock(&mdp_hist_mutex);
+	if (mdp_is_hist_start == TRUE) {
+		printk(KERN_ERR "%s histogram already started\n", __func__);
+		ret = -EPERM;
+		goto mdp_hist_start_err;
+	}
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_is_hist_start = TRUE;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_HISTOGRAM_TERM);
+	mdp_hist.frame_cnt = 1;
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+#ifdef CONFIG_FB_MSM_MDP40
+	MDP_OUTP(MDP_BASE + 0x95004, 1);
+	MDP_OUTP(MDP_BASE + 0x95000, 1);
+#else
+	MDP_OUTP(MDP_BASE + 0x94004, 1);
+	MDP_OUTP(MDP_BASE + 0x94000, 1);
+#endif
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+mdp_hist_start_err:
+	mutex_unlock(&mdp_hist_mutex);
+	return ret;
+
+}
+int mdp_stop_histogram(struct fb_info *info)
+{
+	unsigned long flag;
+	int ret = 0;
+	mutex_lock(&mdp_hist_mutex);
+	if (!mdp_is_hist_start) {
+		printk(KERN_ERR "%s histogram already stopped\n", __func__);
+		ret = -EPERM;
+		goto mdp_hist_stop_err;
+	}
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_is_hist_start = FALSE;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	/* disable the irq for histogram since we handled it
+	   when the control reaches here */
+	mdp_disable_irq(MDP_HISTOGRAM_TERM);
+
+mdp_hist_stop_err:
+	mutex_unlock(&mdp_hist_mutex);
 	return ret;
 }
 
-static irqreturn_t mdp_isr(int irq, void *data)
+static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
 {
-	uint32_t status;
-	unsigned long irq_flags;
-	struct mdp_info *mdp = data;
+	int ret = 0;
 
-	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (!hist->frame_cnt || (hist->bin_cnt == 0) ||
+				 (hist->bin_cnt > MDP_HIST_MAX_BIN))
+		return -EINVAL;
+	mutex_lock(&mdp_hist_mutex);
+	if (!mdp_is_hist_start) {
+		printk(KERN_ERR "%s histogram not started\n", __func__);
+		mutex_unlock(&mdp_hist_mutex);
+		return -EPERM;
+	}
+	mutex_unlock(&mdp_hist_mutex);
 
-	status = mdp_readl(mdp, MDP_INTR_STATUS);
-	mdp_writel(mdp, status, MDP_INTR_CLEAR);
+	INIT_COMPLETION(mdp_hist_comp);
 
-	status &= mdp_irq_mask;
-	if (status & DL0_DMA2_TERM_DONE) {
-		if (dma_callback) {
-			dma_callback->func(dma_callback);
-			dma_callback = NULL;
-		}
-		wake_up(&mdp_dma2_waitqueue);
+	mdp_hist.bin_cnt = hist->bin_cnt;
+	mdp_hist.frame_cnt = hist->frame_cnt;
+	mdp_hist.r = (hist->r) ? mdp_hist_r : 0;
+	mdp_hist.g = (hist->g) ? mdp_hist_g : 0;
+	mdp_hist.b = (hist->b) ? mdp_hist_b : 0;
+
+	wait_for_completion_killable(&mdp_hist_comp);
+
+	if (hist->r) {
+		ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4);
+		if (ret)
+			goto hist_err;
+	}
+	if (hist->g) {
+		ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4);
+		if (ret)
+			goto hist_err;
+	}
+	if (hist->b) {
+		ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4);
+		if (ret)
+			goto hist_err;
+	}
+	return 0;
+
+hist_err:
+	printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
+	return ret;
+}
+#endif
+
+/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
+
+int mdp_ppp_pipe_wait(void)
+{
+	int ret = 1;
+
+	/* wait 5 seconds for the operation to complete before declaring
+	the MDP hung */
+
+	if (mdp_ppp_waiting == TRUE) {
+		ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
+								5 * HZ);
+
+		if (!ret)
+			printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
+					__func__);
 	}
 
-	if (status & DL0_ROI_DONE)
-		wake_up(&mdp_ppp_waitqueue);
+	return ret;
+}
 
-	if (status)
-		locked_disable_mdp_irq(mdp, status);
+static DEFINE_SPINLOCK(mdp_lock);
+static int mdp_irq_mask;
+static int mdp_irq_enabled;
 
+/*
+ * mdp_enable_irq: can not be called from isr
+ */
+void mdp_enable_irq(uint32 term)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (mdp_irq_mask & term) {
+		printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
+				__func__, term, mdp_irq_mask, mdp_irq_enabled);
+	} else {
+		mdp_irq_mask |= term;
+		if (mdp_irq_mask && !mdp_irq_enabled) {
+			mdp_irq_enabled = 1;
+			enable_irq(mdp_irq);
+		}
+	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+/*
+ * mdp_disable_irq: can not be called from isr
+ */
+void mdp_disable_irq(uint32 term)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (!(mdp_irq_mask & term)) {
+		printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
+				__func__, term, mdp_irq_mask, mdp_irq_enabled);
+	} else {
+		mdp_irq_mask &= ~term;
+		if (!mdp_irq_mask && mdp_irq_enabled) {
+			mdp_irq_enabled = 0;
+			disable_irq(mdp_irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+void mdp_disable_irq_nosync(uint32 term)
+{
+	spin_lock(&mdp_lock);
+	if (!(mdp_irq_mask & term)) {
+		printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
+				__func__, term, mdp_irq_mask, mdp_irq_enabled);
+	} else {
+		mdp_irq_mask &= ~term;
+		if (!mdp_irq_mask && mdp_irq_enabled) {
+			mdp_irq_enabled = 0;
+			disable_irq_nosync(mdp_irq);
+		}
+	}
+	spin_unlock(&mdp_lock);
+}
+
+void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
+{
+	/* complete all the writes before starting */
+	wmb();
+
+	/* kick off PPP engine */
+	if (term == MDP_PPP_TERM) {
+		if (mdp_debug[MDP_PPP_BLOCK])
+			jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
+
+		/* let's turn on PPP block */
+		mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+		mdp_enable_irq(term);
+		INIT_COMPLETION(mdp_ppp_comp);
+		mdp_ppp_waiting = TRUE;
+		outpdw(MDP_BASE + 0x30, 0x1000);
+		wait_for_completion_killable(&mdp_ppp_comp);
+		mdp_disable_irq(term);
+
+		if (mdp_debug[MDP_PPP_BLOCK]) {
+			struct timeval now;
+
+			jiffies_to_timeval(jiffies, &now);
+			mdp_ppp_timeval.tv_usec =
+			    now.tv_usec - mdp_ppp_timeval.tv_usec;
+			MSM_FB_DEBUG("MDP-PPP: %d\n",
+				    (int)mdp_ppp_timeval.tv_usec);
+		}
+	} else if (term == MDP_DMA2_TERM) {
+		if (mdp_debug[MDP_DMA2_BLOCK]) {
+			MSM_FB_DEBUG("MDP-DMA2: %d\n",
+				    (int)mdp_dma2_timeval.tv_usec);
+			jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
+		}
+		/* DMA update timestamp */
+		mdp_dma2_last_update_time = ktime_get_real();
+		/* let's turn on DMA2 block */
+#if 0
+		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+#endif
+#ifdef CONFIG_FB_MSM_MDP22
+		outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
+#else
+		mdp_lut_enable();
+
+#ifdef CONFIG_FB_MSM_MDP40
+		outpdw(MDP_BASE + 0x000c, 0x0);	/* start DMA */
+#else
+		outpdw(MDP_BASE + 0x0044, 0x0);	/* start DMA */
+
+#ifdef CONFIG_FB_MSM_MDP303
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+		mipi_dsi_cmd_mdp_sw_trigger();
+#endif
+
+#endif
+
+#endif
+#endif
+#ifdef CONFIG_FB_MSM_MDP40
+	} else if (term == MDP_DMA_S_TERM) {
+		mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		outpdw(MDP_BASE + 0x0010, 0x0);	/* start DMA */
+	} else if (term == MDP_DMA_E_TERM) {
+		mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		outpdw(MDP_BASE + 0x0014, 0x0);	/* start DMA */
+	} else if (term == MDP_OVERLAY0_TERM) {
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp_lut_enable();
+		outpdw(MDP_BASE + 0x0004, 0);
+	} else if (term == MDP_OVERLAY1_TERM) {
+		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp_lut_enable();
+		outpdw(MDP_BASE + 0x0008, 0);
+	}
+#else
+	} else if (term == MDP_DMA_S_TERM) {
+		mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		outpdw(MDP_BASE + 0x0048, 0x0);	/* start DMA */
+	} else if (term == MDP_DMA_E_TERM) {
+		mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		outpdw(MDP_BASE + 0x004C, 0x0);
+	}
+#endif
+}
+static int mdp_clk_rate;
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
+{
+	mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+		   boolean isr)
+{
+	boolean mdp_all_blocks_off = TRUE;
+	int i;
+	unsigned long flag;
+	struct msm_fb_panel_data *pdata;
+
+	/*
+	 * It is assumed that if isr = TRUE then start = OFF
+	 * if start = ON when isr = TRUE it could happen that the usercontext
+	 * could turn off the clocks while the interrupt is updating the
+	 * power to ON
+	 */
+	WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (MDP_BLOCK_POWER_ON == state) {
+		atomic_inc(&mdp_block_power_cnt[block]);
+
+		if (MDP_DMA2_BLOCK == block)
+			mdp_in_processing = TRUE;
+	} else {
+		atomic_dec(&mdp_block_power_cnt[block]);
+
+		if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
+			/*
+			* Master has to serve a request to power off MDP always
+			* It also has a timer to power off.  So, in case of
+			* timer expires first and DMA2 finishes later,
+			* master has to power off two times
+			* There shouldn't be multiple power-off request for
+			* other blocks
+			*/
+			if (block != MDP_MASTER_BLOCK) {
+				MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
+				multiple power-off request\n", block);
+			}
+			atomic_set(&mdp_block_power_cnt[block], 0);
+		}
+
+		if (MDP_DMA2_BLOCK == block)
+			mdp_in_processing = FALSE;
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	/*
+	 * If it's in isr, we send our request to workqueue.
+	 * Otherwise, processing happens in the current context
+	 */
+	if (isr) {
+		if (mdp_current_clk_on) {
+			/* checking all blocks power state */
+			for (i = 0; i < MDP_MAX_BLOCK; i++) {
+				if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
+					mdp_all_blocks_off = FALSE;
+					break;
+				}
+			}
+
+			if (mdp_all_blocks_off) {
+				/* send workqueue to turn off mdp power */
+				queue_delayed_work(mdp_pipe_ctrl_wq,
+						   &mdp_pipe_ctrl_worker,
+						   mdp_timer_duration);
+			}
+		}
+	} else {
+		down(&mdp_pipe_ctrl_mutex);
+		/* checking all blocks power state */
+		for (i = 0; i < MDP_MAX_BLOCK; i++) {
+			if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
+				mdp_all_blocks_off = FALSE;
+				break;
+			}
+		}
+
+		/*
+		 * find out whether a delayable work item is currently
+		 * pending
+		 */
+
+		if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
+			/*
+			 * try to cancel the current work if it fails to
+			 * stop (which means del_timer can't delete it
+			 * from the list, it's about to expire and run),
+			 * we have to let it run. queue_delayed_work won't
+			 * accept the next job which is same as
+			 * queue_delayed_work(mdp_timer_duration = 0)
+			 */
+			cancel_delayed_work(&mdp_pipe_ctrl_worker);
+		}
+
+		if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
+			mutex_lock(&mdp_suspend_mutex);
+			if (block == MDP_MASTER_BLOCK || mdp_suspended) {
+				mdp_current_clk_on = FALSE;
+				mb();
+				/* turn off MDP clks */
+				mdp_vsync_clk_disable();
+				for (i = 0; i < pdev_list_cnt; i++) {
+					pdata = (struct msm_fb_panel_data *)
+						pdev_list[i]->dev.platform_data;
+					if (pdata && pdata->clk_func)
+						pdata->clk_func(0);
+				}
+				if (mdp_clk != NULL) {
+					mdp_clk_rate = clk_get_rate(mdp_clk);
+					clk_disable(mdp_clk);
+					if (mdp_hw_revision <=
+						MDP4_REVISION_V2_1 &&
+						mdp_clk_rate > 122880000) {
+						clk_set_rate(mdp_clk,
+							 122880000);
+					}
+					MSM_FB_DEBUG("MDP CLK OFF\n");
+				}
+				if (mdp_pclk != NULL) {
+					clk_disable(mdp_pclk);
+					MSM_FB_DEBUG("MDP PCLK OFF\n");
+				}
+				if (mdp_axi_clk != NULL)
+					clk_disable(mdp_axi_clk);
+				if (mdp_lut_clk != NULL)
+					clk_disable(mdp_lut_clk);
+			} else {
+				/* send workqueue to turn off mdp power */
+				queue_delayed_work(mdp_pipe_ctrl_wq,
+						   &mdp_pipe_ctrl_worker,
+						   mdp_timer_duration);
+			}
+			mutex_unlock(&mdp_suspend_mutex);
+		} else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
+			mdp_current_clk_on = TRUE;
+			/* turn on MDP clks */
+			for (i = 0; i < pdev_list_cnt; i++) {
+				pdata = (struct msm_fb_panel_data *)
+					pdev_list[i]->dev.platform_data;
+				if (pdata && pdata->clk_func)
+					pdata->clk_func(1);
+			}
+			if (mdp_clk != NULL) {
+				if (mdp_hw_revision <=
+					MDP4_REVISION_V2_1 &&
+					mdp_clk_rate > 122880000) {
+					clk_set_rate(mdp_clk,
+						 mdp_clk_rate);
+				}
+				clk_enable(mdp_clk);
+				MSM_FB_DEBUG("MDP CLK ON\n");
+			}
+			if (mdp_pclk != NULL) {
+				clk_enable(mdp_pclk);
+				MSM_FB_DEBUG("MDP PCLK ON\n");
+			}
+			if (mdp_axi_clk != NULL)
+				clk_enable(mdp_axi_clk);
+			if (mdp_lut_clk != NULL)
+				clk_enable(mdp_lut_clk);
+			mdp_vsync_clk_enable();
+		}
+		up(&mdp_pipe_ctrl_mutex);
+	}
+}
+
+#ifndef CONFIG_FB_MSM_MDP40
+irqreturn_t mdp_isr(int irq, void *ptr)
+{
+	uint32 mdp_interrupt = 0;
+	struct mdp_dma_data *dma;
+
+	mdp_is_in_isr = TRUE;
+	do {
+		mdp_interrupt = inp32(MDP_INTR_STATUS);
+		outp32(MDP_INTR_CLEAR, mdp_interrupt);
+
+		mdp_interrupt &= mdp_intr_mask;
+
+		if (mdp_interrupt & TV_ENC_UNDERRUN) {
+			mdp_interrupt &= ~(TV_ENC_UNDERRUN);
+			mdp_tv_underflow_cnt++;
+		}
+
+		if (!mdp_interrupt)
+			break;
+
+		/* DMA3 TV-Out Start */
+		if (mdp_interrupt & TV_OUT_DMA3_START) {
+			/* let's disable TV out interrupt */
+			mdp_intr_mask &= ~TV_OUT_DMA3_START;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+
+			dma = &dma3_data;
+			if (dma->waiting) {
+				dma->waiting = FALSE;
+				complete(&dma->comp);
+			}
+		}
+#ifndef CONFIG_FB_MSM_MDP22
+		if (mdp_interrupt & MDP_HIST_DONE) {
+			outp32(MDP_BASE + 0x94018, 0x3);
+			outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
+			if (mdp_hist.r)
+				memcpy(mdp_hist.r, MDP_BASE + 0x94100,
+						mdp_hist.bin_cnt*4);
+			if (mdp_hist.g)
+				memcpy(mdp_hist.g, MDP_BASE + 0x94200,
+						mdp_hist.bin_cnt*4);
+			if (mdp_hist.b)
+				memcpy(mdp_hist.b, MDP_BASE + 0x94300,
+						mdp_hist.bin_cnt*4);
+			complete(&mdp_hist_comp);
+			if (mdp_is_hist_start == TRUE) {
+				MDP_OUTP(MDP_BASE + 0x94004,
+						 mdp_hist.frame_cnt);
+				MDP_OUTP(MDP_BASE + 0x94000, 1);
+			}
+		}
+
+		/* LCDC UnderFlow */
+		if (mdp_interrupt & LCDC_UNDERFLOW) {
+			mdp_lcdc_underflow_cnt++;
+			/*when underflow happens HW resets all the histogram
+			 registers that were set before so restore them back
+			 to normal.*/
+			MDP_OUTP(MDP_BASE + 0x94010, 1);
+			MDP_OUTP(MDP_BASE + 0x9401c, 2);
+			if (mdp_is_hist_start == TRUE) {
+				MDP_OUTP(MDP_BASE + 0x94004,
+						 mdp_hist.frame_cnt);
+				MDP_OUTP(MDP_BASE + 0x94000, 1);
+			}
+		}
+		/* LCDC Frame Start */
+		if (mdp_interrupt & LCDC_FRAME_START) {
+			/* let's disable LCDC interrupt */
+			mdp_intr_mask &= ~LCDC_FRAME_START;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+
+			dma = &dma2_data;
+			if (dma->waiting) {
+				dma->waiting = FALSE;
+				complete(&dma->comp);
+			}
+		}
+
+		/* DMA2 LCD-Out Complete */
+		if (mdp_interrupt & MDP_DMA_S_DONE) {
+			dma = &dma_s_data;
+			dma->busy = FALSE;
+			mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
+				      TRUE);
+			complete(&dma->comp);
+		}
+		/* DMA_E LCD-Out Complete */
+		if (mdp_interrupt & MDP_DMA_E_DONE) {
+			dma = &dma_s_data;
+			dma->busy = FALSE;
+			mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
+				TRUE);
+			complete(&dma->comp);
+		}
+
+#endif
+
+		/* DMA2 LCD-Out Complete */
+		if (mdp_interrupt & MDP_DMA_P_DONE) {
+			struct timeval now;
+
+			mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
+				mdp_dma2_last_update_time);
+			if (mdp_debug[MDP_DMA2_BLOCK]) {
+				jiffies_to_timeval(jiffies, &now);
+				mdp_dma2_timeval.tv_usec =
+				    now.tv_usec - mdp_dma2_timeval.tv_usec;
+			}
+#ifndef CONFIG_FB_MSM_MDP303
+			dma = &dma2_data;
+			dma->busy = FALSE;
+			mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
+				      TRUE);
+			complete(&dma->comp);
+#else
+			if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
+				dma = &dma2_data;
+				dma->busy = FALSE;
+				mdp_pipe_ctrl(MDP_DMA2_BLOCK,
+					MDP_BLOCK_POWER_OFF, TRUE);
+				complete(&dma->comp);
+			}
+#endif
+		}
+		/* PPP Complete */
+		if (mdp_interrupt & MDP_PPP_DONE) {
+#ifdef	CONFIG_FB_MSM_MDP31
+			MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
+#endif
+			mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
+			if (mdp_ppp_waiting) {
+				mdp_ppp_waiting = FALSE;
+				complete(&mdp_ppp_comp);
+			}
+		}
+	} while (1);
+
+	mdp_is_in_isr = FALSE;
+
 	return IRQ_HANDLED;
 }
+#endif
 
-static uint32_t mdp_check_mask(uint32_t mask)
+static void mdp_drv_init(void)
 {
-	uint32_t ret;
-	unsigned long irq_flags;
+	int i;
 
-	spin_lock_irqsave(&mdp_lock, irq_flags);
-	ret = mdp_irq_mask & mask;
-	spin_unlock_irqrestore(&mdp_lock, irq_flags);
-	return ret;
-}
-
-static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
-{
-	int ret = 0;
-	unsigned long irq_flags;
-
-	wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
-
-	spin_lock_irqsave(&mdp_lock, irq_flags);
-	if (mdp_irq_mask & mask) {
-		locked_disable_mdp_irq(mdp, mask);
-		printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
-		       mask);
-		ret = -ETIMEDOUT;
-	}
-	spin_unlock_irqrestore(&mdp_lock, irq_flags);
-
-	return ret;
-}
-
-void mdp_dma_wait(struct mdp_device *mdp_dev)
-{
-#define MDP_MAX_TIMEOUTS 20
-	static int timeout_count;
-	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
-
-	if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
-		timeout_count++;
-	else
-		timeout_count = 0;
-
-	if (timeout_count > MDP_MAX_TIMEOUTS) {
-		printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
-		       MDP_MAX_TIMEOUTS);
-		BUG();
-	}
-}
-
-static int mdp_ppp_wait(struct mdp_info *mdp)
-{
-	return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
-}
-
-void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
-		     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
-		     struct msmfb_callback *callback)
-{
-	uint32_t dma2_cfg;
-	uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
-
-	if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
-		printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
-		return;
+	for (i = 0; i < MDP_MAX_BLOCK; i++) {
+		mdp_debug[i] = 0;
 	}
 
-	dma_callback = callback;
+	/* initialize spin lock and workqueue */
+	spin_lock_init(&mdp_spin_lock);
+	mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
+	mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
+	mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
+	INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
+			  mdp_pipe_ctrl_workqueue_handler);
 
-	dma2_cfg = DMA_PACK_TIGHT |
-		DMA_PACK_ALIGN_LSB |
-		DMA_PACK_PATTERN_RGB |
-		DMA_OUT_SEL_AHB |
-		DMA_IBUF_NONCONTIGUOUS;
+	/* initialize semaphore */
+	init_completion(&mdp_ppp_comp);
+	sema_init(&mdp_ppp_mutex, 1);
+	sema_init(&mdp_pipe_ctrl_mutex, 1);
 
-	dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
+	dma2_data.busy = FALSE;
+	dma2_data.dmap_busy = FALSE;
+	dma2_data.waiting = FALSE;
+	init_completion(&dma2_data.comp);
+	init_completion(&dma2_data.dmap_comp);
+	sema_init(&dma2_data.mutex, 1);
+	mutex_init(&dma2_data.ov_mutex);
 
-	dma2_cfg |= DMA_OUT_SEL_MDDI;
+	dma3_data.busy = FALSE;
+	dma3_data.waiting = FALSE;
+	init_completion(&dma3_data.comp);
+	sema_init(&dma3_data.mutex, 1);
 
-	dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
+	dma_s_data.busy = FALSE;
+	dma_s_data.waiting = FALSE;
+	init_completion(&dma_s_data.comp);
+	sema_init(&dma_s_data.mutex, 1);
 
-	dma2_cfg |= DMA_DITHER_EN;
+#ifndef CONFIG_FB_MSM_MDP303
+	dma_e_data.busy = FALSE;
+	dma_e_data.waiting = FALSE;
+	init_completion(&dma_e_data.comp);
+	mutex_init(&dma_e_data.ov_mutex);
+#endif
 
-	/* setup size, address, and stride */
-	mdp_writel(mdp, (height << 16) | (width),
-		   MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
-	mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
-	mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
+#ifndef CONFIG_FB_MSM_MDP22
+	init_completion(&mdp_hist_comp);
+#endif
 
-	/* 666 18BPP */
-	dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
-
-	/* set y & x offset and MDDI transaction parameters */
-	mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
-	mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
-	mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
-		   MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
-
-	mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
-
-	/* start DMA2 */
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
-}
-
-void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
-	     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
-	     struct msmfb_callback *callback, int interface)
-{
-	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
-
-	if (interface == MSM_MDDI_PMDH_INTERFACE) {
-		mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
-				callback);
-	}
-}
-
-int get_img(struct mdp_img *img, struct fb_info *info,
-	    unsigned long *start, unsigned long *len,
-	    struct file **filep)
-{
-	int put_needed, ret = 0;
-	struct file *file;
-
-	file = fget_light(img->memory_id, &put_needed);
-	if (file == NULL)
-		return -1;
-
-	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
-		*start = info->fix.smem_start;
-		*len = info->fix.smem_len;
-	} else
-		ret = -1;
-	fput_light(file, put_needed);
-
-	return ret;
-}
-
-void put_img(struct file *src_file, struct file *dst_file)
-{
-}
-
-int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
-	     struct mdp_blit_req *req)
-{
-	int ret;
-	unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
-	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
-	struct file *src_file = 0, *dst_file = 0;
-
-	/* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
-	if (unlikely(req->src_rect.h == 0 ||
-		     req->src_rect.w == 0)) {
-		printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
-		return -EINVAL;
-	}
-	if (unlikely(req->dst_rect.h == 0 ||
-		     req->dst_rect.w == 0))
-		return -EINVAL;
-
-	/* do this first so that if this fails, the caller can always
-	 * safely call put_img */
-	if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
-		printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
-				"memory\n");
-		return -EINVAL;
+	/* initializing mdp power block counter to 0 */
+	for (i = 0; i < MDP_MAX_BLOCK; i++) {
+		atomic_set(&mdp_block_power_cnt[i], 0);
 	}
 
-	if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
-		printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
-				"memory\n");
-		return -EINVAL;
-	}
-	mutex_lock(&mdp_mutex);
+#ifdef MSM_FB_ENABLE_DBGFS
+	{
+		struct dentry *root;
+		char sub_name[] = "mdp";
 
-	/* transp_masking unimplemented */
-	req->transp_mask = MDP_TRANSP_NOP;
-	if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
-		      req->alpha != MDP_ALPHA_NOP ||
-		      HAS_ALPHA(req->src.format)) &&
-		     (req->flags & MDP_ROT_90 &&
-		      req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
-		int i;
-		unsigned int tiles = req->dst_rect.h / 16;
-		unsigned int remainder = req->dst_rect.h % 16;
-		req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
-		req->dst_rect.h = 16;
-		for (i = 0; i < tiles; i++) {
-			enable_mdp_irq(mdp, DL0_ROI_DONE);
-			ret = mdp_ppp_blit(mdp, req, src_file, src_start,
-					   src_len, dst_file, dst_start,
-					   dst_len);
-			if (ret)
-				goto err_bad_blit;
-			ret = mdp_ppp_wait(mdp);
-			if (ret)
-				goto err_wait_failed;
-			req->dst_rect.y += 16;
-			req->src_rect.x += req->src_rect.w;
+		root = msm_fb_get_debugfs_root();
+		if (root != NULL) {
+			mdp_dir = debugfs_create_dir(sub_name, root);
+
+			if (mdp_dir) {
+				msm_fb_debugfs_file_create(mdp_dir,
+					"dma2_update_time_in_usec",
+					(u32 *) &mdp_dma2_update_time_in_usec);
+				msm_fb_debugfs_file_create(mdp_dir,
+					"vs_rdcnt_slow",
+					(u32 *) &mdp_lcd_rd_cnt_offset_slow);
+				msm_fb_debugfs_file_create(mdp_dir,
+					"vs_rdcnt_fast",
+					(u32 *) &mdp_lcd_rd_cnt_offset_fast);
+				msm_fb_debugfs_file_create(mdp_dir,
+					"mdp_usec_diff_threshold",
+					(u32 *) &mdp_usec_diff_threshold);
+				msm_fb_debugfs_file_create(mdp_dir,
+					"mdp_current_clk_on",
+					(u32 *) &mdp_current_clk_on);
+#ifdef CONFIG_FB_MSM_LCDC
+				msm_fb_debugfs_file_create(mdp_dir,
+					"lcdc_start_x",
+					(u32 *) &first_pixel_start_x);
+				msm_fb_debugfs_file_create(mdp_dir,
+					"lcdc_start_y",
+					(u32 *) &first_pixel_start_y);
+#endif
+			}
 		}
-		if (!remainder)
-			goto end;
-		req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
-		req->dst_rect.h = remainder;
 	}
-	enable_mdp_irq(mdp, DL0_ROI_DONE);
-	ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
-			   dst_start,
-			   dst_len);
-	if (ret)
-		goto err_bad_blit;
-	ret = mdp_ppp_wait(mdp);
-	if (ret)
-		goto err_wait_failed;
-end:
-	put_img(src_file, dst_file);
-	mutex_unlock(&mdp_mutex);
+#endif
+}
+
+static int mdp_probe(struct platform_device *pdev);
+static int mdp_remove(struct platform_device *pdev);
+
+static int mdp_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
 	return 0;
-err_bad_blit:
-	disable_mdp_irq(mdp, DL0_ROI_DONE);
-err_wait_failed:
-	put_img(src_file, dst_file);
-	mutex_unlock(&mdp_mutex);
-	return ret;
 }
 
-void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
+static int mdp_runtime_resume(struct device *dev)
 {
-	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
-
-	disp_id &= 0xf;
-	mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
-}
-
-int register_mdp_client(struct class_interface *cint)
-{
-	if (!mdp_class) {
-		pr_err("mdp: no mdp_class when registering mdp client\n");
-		return -ENODEV;
-	}
-	cint->class = mdp_class;
-	return class_interface_register(cint);
-}
-
-#include "mdp_csc_table.h"
-#include "mdp_scale_tables.h"
-
-int mdp_probe(struct platform_device *pdev)
-{
-	struct resource *resource;
-	int ret;
-	int n;
-	struct mdp_info *mdp;
-
-	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!resource) {
-		pr_err("mdp: can not get mdp mem resource!\n");
-		return -ENOMEM;
-	}
-
-	mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
-	if (!mdp)
-		return -ENOMEM;
-
-	mdp->irq = platform_get_irq(pdev, 0);
-	if (mdp->irq < 0) {
-		pr_err("mdp: can not get mdp irq\n");
-		ret = mdp->irq;
-		goto error_get_irq;
-	}
-
-	mdp->base = ioremap(resource->start,
-			    resource->end - resource->start);
-	if (mdp->base == 0) {
-		printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
-		ret = -ENOMEM;
-		goto error_ioremap;
-	}
-
-	mdp->mdp_dev.dma = mdp_dma;
-	mdp->mdp_dev.dma_wait = mdp_dma_wait;
-	mdp->mdp_dev.blit = mdp_blit;
-	mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
-
-	clk = clk_get(&pdev->dev, "mdp_clk");
-	if (IS_ERR(clk)) {
-		printk(KERN_INFO "mdp: failed to get mdp clk");
-		return PTR_ERR(clk);
-	}
-
-	ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
-	if (ret)
-		goto error_request_irq;
-	disable_irq(mdp->irq);
-	mdp_irq_mask = 0;
-
-	/* debug interface write access */
-	mdp_writel(mdp, 1, 0x60);
-
-	mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
-	mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
-
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
-
-	for (n = 0; n < ARRAY_SIZE(csc_table); n++)
-		mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
-
-	/* clear up unused fg/main registers */
-	/* comp.plane 2&3 ystride */
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
-
-	/* unpacked pattern */
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
-
-	/* comp.plane 2 & 3 */
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
-
-	/* clear unused bg registers */
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
-	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
-
-	for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
-		mdp_writel(mdp, mdp_upscale_table[n].val,
-		       mdp_upscale_table[n].reg);
-
-	for (n = 0; n < 9; n++)
-		mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
-	mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
-	mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
-	mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
-
-	/* register mdp device */
-	mdp->mdp_dev.dev.parent = &pdev->dev;
-	mdp->mdp_dev.dev.class = mdp_class;
-	dev_set_name(&mdp->mdp_dev.dev, "mdp%d", pdev->id);
-
-	/* if you can remove the platform device you'd have to implement
-	 * this:
-	mdp_dev.release = mdp_class; */
-
-	ret = device_register(&mdp->mdp_dev.dev);
-	if (ret)
-		goto error_device_register;
+	dev_dbg(dev, "pm_runtime: resuming...\n");
 	return 0;
-
-error_device_register:
-	free_irq(mdp->irq, mdp);
-error_request_irq:
-	iounmap(mdp->base);
-error_get_irq:
-error_ioremap:
-	kfree(mdp);
-	return ret;
 }
 
-static struct platform_driver msm_mdp_driver = {
-	.probe = mdp_probe,
-	.driver = {.name = "msm_mdp"},
+static struct dev_pm_ops mdp_dev_pm_ops = {
+	.runtime_suspend = mdp_runtime_suspend,
+	.runtime_resume = mdp_runtime_resume,
 };
 
-static int __init mdp_init(void)
+
+static struct platform_driver mdp_driver = {
+	.probe = mdp_probe,
+	.remove = mdp_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = mdp_suspend,
+	.resume = NULL,
+#endif
+	.shutdown = NULL,
+	.driver = {
+		/*
+		 * Driver name must match the device name added in
+		 * platform.c.
+		 */
+		.name = "mdp",
+		.pm = &mdp_dev_pm_ops,
+	},
+};
+
+static int mdp_off(struct platform_device *pdev)
 {
-	mdp_class = class_create(THIS_MODULE, "msm_mdp");
-	if (IS_ERR(mdp_class)) {
-		printk(KERN_ERR "Error creating mdp class\n");
-		return PTR_ERR(mdp_class);
-	}
-	return platform_driver_register(&msm_mdp_driver);
+	int ret = 0;
+	mdp_histogram_ctrl(FALSE);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	ret = panel_next_off(pdev);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
 }
 
-subsys_initcall(mdp_init);
+static int mdp_on(struct platform_device *pdev)
+{
+	int ret = 0;
+#ifdef CONFIG_FB_MSM_MDP40
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	if (is_mdp4_hw_reset()) {
+		mdp4_hw_init();
+		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+#endif
+	mdp_histogram_ctrl(TRUE);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	ret = panel_next_on(pdev);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	return ret;
+}
+
+static int mdp_resource_initialized;
+static struct msm_panel_common_pdata *mdp_pdata;
+
+uint32 mdp_hw_revision;
+
+/*
+ * mdp_hw_revision:
+ * 0 == V1
+ * 1 == V2
+ * 2 == V2.1
+ *
+ */
+void mdp_hw_version(void)
+{
+	char *cp;
+	uint32 *hp;
+
+	if (mdp_pdata == NULL)
+		return;
+
+	mdp_hw_revision = MDP4_REVISION_NONE;
+	if (mdp_pdata->hw_revision_addr == 0)
+		return;
+
+	/* tlmmgpio2 shadow */
+	cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
+
+	if (cp == NULL)
+		return;
+
+	hp = (uint32 *)cp;	/* HW_REVISION_NUMBER */
+	mdp_hw_revision = *hp;
+	iounmap(cp);
+
+	mdp_hw_revision >>= 28;	/* bit 31:28 */
+	mdp_hw_revision &= 0x0f;
+
+	MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
+				__func__, mdp_hw_revision);
+}
+
+#ifdef CONFIG_FB_MSM_MDP40
+static void configure_mdp_core_clk_table(uint32 min_clk_rate)
+{
+	uint8 count;
+	uint32 current_rate;
+	if (mdp_clk && mdp_pdata
+		&& mdp_pdata->mdp_core_clk_table) {
+		if (clk_set_min_rate(mdp_clk,
+				 min_clk_rate) < 0)
+			printk(KERN_ERR "%s: clk_set_min_rate failed\n",
+							 __func__);
+		else {
+			count = 0;
+			current_rate = clk_get_rate(mdp_clk);
+			while (count < mdp_pdata->num_mdp_clk) {
+				if (mdp_pdata->mdp_core_clk_table[count]
+						< current_rate) {
+					mdp_pdata->
+					mdp_core_clk_table[count] =
+							current_rate;
+				}
+				count++;
+			}
+		}
+	}
+}
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static uint32_t mdp_bus_scale_handle;
+int mdp_bus_scale_update_request(uint32_t index)
+{
+	if (!mdp_pdata && (!mdp_pdata->mdp_bus_scale_table
+	     || index > (mdp_pdata->mdp_bus_scale_table->num_usecases - 1))) {
+		printk(KERN_ERR "%s invalid table or index\n", __func__);
+		return -EINVAL;
+	}
+	if (mdp_bus_scale_handle < 1) {
+		printk(KERN_ERR "%s invalid bus handle\n", __func__);
+		return -EINVAL;
+	}
+	return msm_bus_scale_client_update_request(mdp_bus_scale_handle,
+							index);
+}
+#endif
+DEFINE_MUTEX(mdp_clk_lock);
+int mdp_set_core_clk(uint16 perf_level)
+{
+	int ret = -EINVAL;
+	if (mdp_clk && mdp_pdata
+		 && mdp_pdata->mdp_core_clk_table) {
+		if (perf_level > mdp_pdata->num_mdp_clk)
+			printk(KERN_ERR "%s invalid perf level\n", __func__);
+		else {
+			mutex_lock(&mdp_clk_lock);
+			if (mdp4_extn_disp)
+				perf_level = 1;
+			ret = clk_set_rate(mdp_clk,
+				mdp_pdata->
+				mdp_core_clk_table[mdp_pdata->num_mdp_clk
+						 - perf_level]);
+			mutex_unlock(&mdp_clk_lock);
+			if (ret) {
+				printk(KERN_ERR "%s unable to set mdp_core_clk rate\n",
+					__func__);
+			}
+		}
+	}
+	return ret;
+}
+
+unsigned long mdp_get_core_clk(void)
+{
+	unsigned long clk_rate = 0;
+	if (mdp_clk) {
+		mutex_lock(&mdp_clk_lock);
+		clk_rate = clk_get_rate(mdp_clk);
+		mutex_unlock(&mdp_clk_lock);
+	}
+
+	return clk_rate;
+}
+
+unsigned long mdp_perf_level2clk_rate(uint32 perf_level)
+{
+	unsigned long clk_rate = 0;
+
+	if (mdp_pdata && mdp_pdata->mdp_core_clk_table) {
+		if (perf_level > mdp_pdata->num_mdp_clk) {
+			printk(KERN_ERR "%s invalid perf level\n", __func__);
+			clk_rate = mdp_get_core_clk();
+		} else {
+			if (mdp4_extn_disp)
+				perf_level = 1;
+			clk_rate = mdp_pdata->
+				mdp_core_clk_table[mdp_pdata->num_mdp_clk
+					- perf_level];
+		}
+	} else
+		clk_rate = mdp_get_core_clk();
+
+	return clk_rate;
+}
+
+static int mdp_irq_clk_setup(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MDP40
+	ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
+#else
+	ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
+#endif
+	if (ret) {
+		printk(KERN_ERR "mdp request_irq() failed!\n");
+		return ret;
+	}
+	disable_irq(mdp_irq);
+
+	footswitch = regulator_get(NULL, "fs_mdp");
+	if (IS_ERR(footswitch))
+		footswitch = NULL;
+	else
+		regulator_enable(footswitch);
+
+	mdp_clk = clk_get(NULL, "mdp_clk");
+	if (IS_ERR(mdp_clk)) {
+		ret = PTR_ERR(mdp_clk);
+		printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
+		free_irq(mdp_irq, 0);
+		return ret;
+	}
+
+	mdp_pclk = clk_get(NULL, "mdp_pclk");
+	if (IS_ERR(mdp_pclk))
+		mdp_pclk = NULL;
+
+	if (mdp_rev == MDP_REV_42) {
+		mdp_axi_clk = clk_get(NULL, "mdp_axi_clk");
+		if (IS_ERR(mdp_axi_clk)) {
+			ret = PTR_ERR(mdp_axi_clk);
+			clk_put(mdp_clk);
+			pr_err("can't get mdp_axi_clk error:%d!\n", ret);
+			return ret;
+		}
+
+		mdp_lut_clk = clk_get(NULL, "lut_mdp");
+		if (IS_ERR(mdp_lut_clk)) {
+			ret = PTR_ERR(mdp_lut_clk);
+			pr_err("can't get mdp_clk error:%d!\n", ret);
+			clk_put(mdp_clk);
+			clk_put(mdp_axi_clk);
+			free_irq(mdp_irq, 0);
+			return ret;
+		}
+	} else {
+		mdp_axi_clk = NULL;
+		mdp_lut_clk = NULL;
+	}
+
+#ifdef CONFIG_FB_MSM_MDP40
+	/*
+	 * mdp_clk should greater than mdp_pclk always
+	 */
+	if (mdp_pdata && mdp_pdata->mdp_core_clk_rate) {
+		mutex_lock(&mdp_clk_lock);
+		clk_set_rate(mdp_clk, mdp_pdata->mdp_core_clk_rate);
+		if (mdp_lut_clk != NULL)
+			clk_set_rate(mdp_lut_clk, mdp_pdata->mdp_core_clk_rate);
+		mutex_unlock(&mdp_clk_lock);
+	}
+	MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
+#endif
+	return 0;
+}
+
+static int mdp_probe(struct platform_device *pdev)
+{
+	struct platform_device *msm_fb_dev = NULL;
+	struct msm_fb_data_type *mfd;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+	resource_size_t  size ;
+#ifdef CONFIG_FB_MSM_MDP40
+	int intf, if_no;
+#else
+	unsigned long flag;
+#endif
+#if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
+	struct mipi_panel_info *mipi;
+#endif
+
+	if ((pdev->id == 0) && (pdev->num_resources > 0)) {
+		mdp_pdata = pdev->dev.platform_data;
+
+		size =  resource_size(&pdev->resource[0]);
+		msm_mdp_base = ioremap(pdev->resource[0].start, size);
+
+		MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
+			(int)pdev->resource[0].start, (int)msm_mdp_base);
+
+		if (unlikely(!msm_mdp_base))
+			return -ENOMEM;
+
+		mdp_irq = platform_get_irq(pdev, 0);
+		if (mdp_irq < 0) {
+			pr_err("mdp: can not get mdp irq\n");
+			return -ENOMEM;
+		}
+
+		mdp_rev = mdp_pdata->mdp_rev;
+		rc = mdp_irq_clk_setup();
+
+		if (rc)
+			return rc;
+
+		mdp_hw_version();
+
+		/* initializing mdp hw */
+#ifdef CONFIG_FB_MSM_MDP40
+		mdp4_hw_init();
+		mdp4_fetch_cfg(clk_get_rate(mdp_clk));
+#else
+		mdp_hw_init();
+#endif
+
+#ifdef CONFIG_FB_MSM_OVERLAY
+		mdp_hw_cursor_init();
+#endif
+
+		mdp_resource_initialized = 1;
+		return 0;
+	}
+
+	if (!mdp_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
+	if (!msm_fb_dev)
+		return -ENOMEM;
+
+	/* link to the latest pdev */
+	mfd->pdev = msm_fb_dev;
+
+	/* add panel data */
+	if (platform_device_add_data
+	    (msm_fb_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
+		rc = -ENOMEM;
+		goto mdp_probe_err;
+	}
+	/* data chain */
+	pdata = msm_fb_dev->dev.platform_data;
+	pdata->on = mdp_on;
+	pdata->off = mdp_off;
+	pdata->next = pdev;
+
+	mdp_prim_panel_type = mfd->panel.type;
+	switch (mfd->panel.type) {
+	case EXT_MDDI_PANEL:
+	case MDDI_PANEL:
+	case EBI2_PANEL:
+		INIT_WORK(&mfd->dma_update_worker,
+			  mdp_lcd_update_workqueue_handler);
+		INIT_WORK(&mfd->vsync_resync_worker,
+			  mdp_vsync_resync_workqueue_handler);
+		mfd->hw_refresh = FALSE;
+
+		if (mfd->panel.type == EXT_MDDI_PANEL) {
+			/* 15 fps -> 66 msec */
+			mfd->refresh_timer_duration = (66 * HZ / 1000);
+		} else {
+			/* 24 fps -> 42 msec */
+			mfd->refresh_timer_duration = (42 * HZ / 1000);
+		}
+
+#ifdef CONFIG_FB_MSM_MDP22
+		mfd->dma_fnc = mdp_dma2_update;
+		mfd->dma = &dma2_data;
+#else
+		if (mfd->panel_info.pdest == DISPLAY_1) {
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+			mfd->dma_fnc = mdp4_mddi_overlay;
+			mfd->cursor_update = mdp4_mddi_overlay_cursor;
+#else
+			mfd->dma_fnc = mdp_dma2_update;
+#endif
+			mfd->dma = &dma2_data;
+			mfd->lut_update = mdp_lut_update_nonlcdc;
+			mfd->do_histogram = mdp_do_histogram;
+		} else {
+			mfd->dma_fnc = mdp_dma_s_update;
+			mfd->dma = &dma_s_data;
+		}
+#endif
+		if (mdp_pdata)
+			mfd->vsync_gpio = mdp_pdata->gpio;
+		else
+			mfd->vsync_gpio = -1;
+
+#ifdef CONFIG_FB_MSM_MDP40
+		if (mfd->panel.type == EBI2_PANEL)
+			intf = EBI2_INTF;
+		else
+			intf = MDDI_INTF;
+
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			if_no = PRIMARY_INTF_SEL;
+		else
+			if_no = SECONDARY_INTF_SEL;
+
+		mdp4_display_intf_sel(if_no, intf);
+#endif
+		mdp_config_vsync(mfd);
+		break;
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+	case MIPI_VIDEO_PANEL:
+#ifndef CONFIG_FB_MSM_MDP303
+		pdata->on = mdp4_dsi_video_on;
+		pdata->off = mdp4_dsi_video_off;
+		mfd->hw_refresh = TRUE;
+		mfd->dma_fnc = mdp4_dsi_video_overlay;
+		if (mfd->panel_info.pdest == DISPLAY_1) {
+			if_no = PRIMARY_INTF_SEL;
+			mfd->dma = &dma2_data;
+		} else {
+			if_no = EXTERNAL_INTF_SEL;
+			mfd->dma = &dma_e_data;
+		}
+		mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
+#else
+		pdata->on = mdp_dsi_video_on;
+		pdata->off = mdp_dsi_video_off;
+		mfd->hw_refresh = TRUE;
+		mfd->dma_fnc = mdp_dsi_video_update;
+		mfd->do_histogram = mdp_do_histogram;
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			mfd->dma = &dma2_data;
+		else {
+			printk(KERN_ERR "Invalid Selection of destination panel\n");
+			rc = -ENODEV;
+			goto mdp_probe_err;
+		}
+
+#endif
+		break;
+
+	case MIPI_CMD_PANEL:
+#ifndef CONFIG_FB_MSM_MDP303
+		mfd->dma_fnc = mdp4_dsi_cmd_overlay;
+#ifdef CONFIG_FB_MSM_MDP40
+		mipi = &mfd->panel_info.mipi;
+		configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
+#endif
+		if (mfd->panel_info.pdest == DISPLAY_1) {
+			if_no = PRIMARY_INTF_SEL;
+			mfd->dma = &dma2_data;
+		} else {
+			if_no = SECONDARY_INTF_SEL;
+			mfd->dma = &dma_s_data;
+		}
+		mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
+#else
+		mfd->dma_fnc = mdp_dma2_update;
+		mfd->do_histogram = mdp_do_histogram;
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			mfd->dma = &dma2_data;
+		else {
+			printk(KERN_ERR "Invalid Selection of destination panel\n");
+			rc = -ENODEV;
+			goto mdp_probe_err;
+		}
+#endif
+		mdp_config_vsync(mfd);
+		break;
+#endif
+
+#ifdef CONFIG_FB_MSM_DTV
+	case DTV_PANEL:
+		pdata->on = mdp4_dtv_on;
+		pdata->off = mdp4_dtv_off;
+		mfd->hw_refresh = TRUE;
+		mfd->cursor_update = mdp_hw_cursor_update;
+		mfd->dma_fnc = mdp4_dtv_overlay;
+		mfd->dma = &dma_e_data;
+		mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
+		break;
+#endif
+	case HDMI_PANEL:
+	case LCDC_PANEL:
+		pdata->on = mdp_lcdc_on;
+		pdata->off = mdp_lcdc_off;
+		mfd->hw_refresh = TRUE;
+#if	defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
+		mfd->cursor_update = mdp_hw_cursor_sync_update;
+#else
+		mfd->cursor_update = mdp_hw_cursor_update;
+#endif
+#ifndef CONFIG_FB_MSM_MDP22
+		mfd->lut_update = mdp_lut_update_lcdc;
+		mfd->do_histogram = mdp_do_histogram;
+#endif
+#ifdef CONFIG_FB_MSM_OVERLAY
+		mfd->dma_fnc = mdp4_lcdc_overlay;
+#else
+		mfd->dma_fnc = mdp_lcdc_update;
+#endif
+
+#ifdef CONFIG_FB_MSM_MDP40
+		configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
+								* 23 / 20);
+		if (mfd->panel.type == HDMI_PANEL) {
+			mfd->dma = &dma_e_data;
+			mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
+		} else {
+			mfd->dma = &dma2_data;
+			mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
+		}
+#else
+		mfd->dma = &dma2_data;
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+		mdp_intr_mask &= ~MDP_DMA_P_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+#endif
+		break;
+
+	case TV_PANEL:
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
+		pdata->on = mdp4_atv_on;
+		pdata->off = mdp4_atv_off;
+		mfd->dma_fnc = mdp4_atv_overlay;
+		mfd->dma = &dma_e_data;
+		mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
+#else
+		pdata->on = mdp_dma3_on;
+		pdata->off = mdp_dma3_off;
+		mfd->hw_refresh = TRUE;
+		mfd->dma_fnc = mdp_dma3_update;
+		mfd->dma = &dma3_data;
+#endif
+		break;
+
+	default:
+		printk(KERN_ERR "mdp_probe: unknown device type!\n");
+		rc = -ENODEV;
+		goto mdp_probe_err;
+	}
+#ifdef CONFIG_FB_MSM_MDP40
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (!mdp_bus_scale_handle && mdp_pdata &&
+		mdp_pdata->mdp_bus_scale_table) {
+		mdp_bus_scale_handle =
+			msm_bus_scale_register_client(
+					mdp_pdata->mdp_bus_scale_table);
+		if (!mdp_bus_scale_handle) {
+			printk(KERN_ERR "%s not able to get bus scale\n",
+				__func__);
+			return -ENOMEM;
+		}
+	}
+#endif
+	/* set driver data */
+	platform_set_drvdata(msm_fb_dev, mfd);
+
+	rc = platform_device_add(msm_fb_dev);
+	if (rc) {
+		goto mdp_probe_err;
+	}
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	pdev_list[pdev_list_cnt++] = pdev;
+	mdp4_extn_disp = 0;
+	return 0;
+
+      mdp_probe_err:
+	platform_device_put(msm_fb_dev);
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
+		mdp_bus_scale_handle > 0)
+		msm_bus_scale_unregister_client(mdp_bus_scale_handle);
+#endif
+	return rc;
+}
+
+#ifdef CONFIG_PM
+static void mdp_suspend_sub(void)
+{
+	/* cancel pipe ctrl worker */
+	cancel_delayed_work(&mdp_pipe_ctrl_worker);
+
+	/* for workder can't be cancelled... */
+	flush_workqueue(mdp_pipe_ctrl_wq);
+
+	/* let's wait for PPP completion */
+	while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
+		cpu_relax();
+
+	/* try to power down */
+	mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	mutex_lock(&mdp_suspend_mutex);
+	mdp_suspended = TRUE;
+	mutex_unlock(&mdp_suspend_mutex);
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
+static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	if (pdev->id == 0) {
+		mdp_suspend_sub();
+		if (mdp_current_clk_on) {
+			printk(KERN_WARNING"MDP suspend failed\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mdp_early_suspend(struct early_suspend *h)
+{
+	mdp_suspend_sub();
+}
+
+static void mdp_early_resume(struct early_suspend *h)
+{
+	mutex_lock(&mdp_suspend_mutex);
+	mdp_suspended = FALSE;
+	mutex_unlock(&mdp_suspend_mutex);
+}
+#endif
+
+static int mdp_remove(struct platform_device *pdev)
+{
+	if (footswitch != NULL)
+		regulator_put(footswitch);
+	iounmap(msm_mdp_base);
+	pm_runtime_disable(&pdev->dev);
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (mdp_pdata && mdp_pdata->mdp_bus_scale_table &&
+		mdp_bus_scale_handle > 0)
+		msm_bus_scale_unregister_client(mdp_bus_scale_handle);
+#endif
+	return 0;
+}
+
+static int mdp_register_driver(void)
+{
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
+	early_suspend.suspend = mdp_early_suspend;
+	early_suspend.resume = mdp_early_resume;
+	register_early_suspend(&early_suspend);
+#endif
+
+	return platform_driver_register(&mdp_driver);
+}
+
+static int __init mdp_driver_init(void)
+{
+	int ret;
+
+	mdp_drv_init();
+
+	ret = mdp_register_driver();
+	if (ret) {
+		printk(KERN_ERR "mdp_register_driver() failed!\n");
+		return ret;
+	}
+
+#if defined(CONFIG_DEBUG_FS)
+	mdp_debugfs_init();
+#endif
+
+	return 0;
+
+}
+
+module_init(mdp_driver_init);
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
new file mode 100644
index 0000000..165502c
--- /dev/null
+++ b/drivers/video/msm/mdp.h
@@ -0,0 +1,727 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP_H
+#define MDP_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/hrtimer.h>
+#include <linux/msm_mdp.h>
+
+#include <mach/hardware.h>
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#endif
+
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include "msm_fb_panel.h"
+
+extern uint32 mdp_hw_revision;
+extern ulong mdp4_display_intf;
+extern spinlock_t mdp_spin_lock;
+extern int mdp_rev;
+
+#define MDP4_REVISION_V1		0
+#define MDP4_REVISION_V2		1
+#define MDP4_REVISION_V2_1	2
+#define MDP4_REVISION_NONE	0xffffffff
+
+#ifdef BIT
+#undef BIT
+#endif
+
+#define BIT(x)  (1<<(x))
+
+#define MDPOP_NOP               0
+#define MDPOP_LR                BIT(0)	/* left to right flip */
+#define MDPOP_UD                BIT(1)	/* up and down flip */
+#define MDPOP_ROT90             BIT(2)	/* rotate image to 90 degree */
+#define MDPOP_ROT180            (MDPOP_UD|MDPOP_LR)
+#define MDPOP_ROT270            (MDPOP_ROT90|MDPOP_UD|MDPOP_LR)
+#define MDPOP_ASCALE            BIT(7)
+#define MDPOP_ALPHAB            BIT(8)	/* enable alpha blending */
+#define MDPOP_TRANSP            BIT(9)	/* enable transparency */
+#define MDPOP_DITHER            BIT(10)	/* enable dither */
+#define MDPOP_SHARPENING	BIT(11) /* enable sharpening */
+#define MDPOP_BLUR		BIT(12) /* enable blur */
+#define MDPOP_FG_PM_ALPHA       BIT(13)
+
+struct mdp_table_entry {
+	uint32_t reg;
+	uint32_t val;
+};
+
+extern struct mdp_ccs mdp_ccs_yuv2rgb ;
+extern struct mdp_ccs mdp_ccs_rgb2yuv ;
+
+/*
+ * MDP Image Structure
+ */
+typedef struct mdpImg_ {
+	uint32 imgType;		/* Image type */
+	uint32 *bmy_addr;	/* bitmap or y addr */
+	uint32 *cbcr_addr;	/* cbcr addr */
+	uint32 width;		/* image width */
+	uint32 mdpOp;		/* image opertion (rotation,flip up/down, alpha/tp) */
+	uint32 tpVal;		/* transparency color */
+	uint32 alpha;		/* alpha percentage 0%(0x0) ~ 100%(0x100) */
+	int    sp_value;        /* sharpening strength */
+} MDPIMG;
+
+#define MDP_OUTP(addr, data) outpdw((addr), (data))
+
+#define MDP_BASE msm_mdp_base
+
+typedef enum {
+	MDP_BC_SCALE_POINT2_POINT4,
+	MDP_BC_SCALE_POINT4_POINT6,
+	MDP_BC_SCALE_POINT6_POINT8,
+	MDP_BC_SCALE_POINT8_1,
+	MDP_BC_SCALE_UP,
+	MDP_PR_SCALE_POINT2_POINT4,
+	MDP_PR_SCALE_POINT4_POINT6,
+	MDP_PR_SCALE_POINT6_POINT8,
+	MDP_PR_SCALE_POINT8_1,
+	MDP_PR_SCALE_UP,
+	MDP_SCALE_BLUR,
+	MDP_INIT_SCALE
+} MDP_SCALE_MODE;
+
+typedef enum {
+	MDP_BLOCK_POWER_OFF,
+	MDP_BLOCK_POWER_ON
+} MDP_BLOCK_POWER_STATE;
+
+typedef enum {
+	MDP_CMD_BLOCK,
+	MDP_OVERLAY0_BLOCK,
+	MDP_MASTER_BLOCK,
+	MDP_PPP_BLOCK,
+	MDP_DMA2_BLOCK,
+	MDP_DMA3_BLOCK,
+	MDP_DMA_S_BLOCK,
+	MDP_DMA_E_BLOCK,
+	MDP_OVERLAY1_BLOCK,
+	MDP_MAX_BLOCK
+} MDP_BLOCK_TYPE;
+
+/* Let's keep Q Factor power of 2 for optimization */
+#define MDP_SCALE_Q_FACTOR 512
+
+#ifdef CONFIG_FB_MSM_MDP31
+#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*8)
+#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/8)
+#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*8)
+#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/8)
+#else
+#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+#endif
+
+/* SHIM Q Factor */
+#define PHI_Q_FACTOR          29
+#define PQF_PLUS_5            (PHI_Q_FACTOR + 5)	/* due to 32 phases */
+#define PQF_PLUS_4            (PHI_Q_FACTOR + 4)
+#define PQF_PLUS_2            (PHI_Q_FACTOR + 2)	/* to get 4.0 */
+#define PQF_MINUS_2           (PHI_Q_FACTOR - 2)	/* to get 0.25 */
+#define PQF_PLUS_5_PLUS_2     (PQF_PLUS_5 + 2)
+#define PQF_PLUS_5_MINUS_2    (PQF_PLUS_5 - 2)
+
+#define MDP_CONVTP(tpVal) (((tpVal&0xF800)<<8)|((tpVal&0x7E0)<<5)|((tpVal&0x1F)<<3))
+
+#define MDPOP_ROTATION (MDPOP_ROT90|MDPOP_LR|MDPOP_UD)
+#define MDP_CHKBIT(val, bit) ((bit) == ((val) & (bit)))
+
+/* overlay interface API defines */
+typedef enum {
+	MORE_IBUF,
+	FINAL_IBUF,
+	COMPLETE_IBUF
+} MDP_IBUF_STATE;
+
+struct mdp_dirty_region {
+	__u32 xoffset;		/* source origin in the x-axis */
+	__u32 yoffset;		/* source origin in the y-axis */
+	__u32 width;		/* number of pixels in the x-axis */
+	__u32 height;		/* number of pixels in the y-axis */
+};
+
+/*
+ * MDP extended data types
+ */
+typedef struct mdp_roi_s {
+	uint32 x;
+	uint32 y;
+	uint32 width;
+	uint32 height;
+	int32 lcd_x;
+	int32 lcd_y;
+	uint32 dst_width;
+	uint32 dst_height;
+} MDP_ROI;
+
+typedef struct mdp_ibuf_s {
+	uint8 *buf;
+	uint32 bpp;
+	uint32 ibuf_type;
+	uint32 ibuf_width;
+	uint32 ibuf_height;
+
+	MDP_ROI roi;
+	MDPIMG mdpImg;
+
+	int32 dma_x;
+	int32 dma_y;
+	uint32 dma_w;
+	uint32 dma_h;
+
+	uint32 vsync_enable;
+} MDPIBUF;
+
+struct mdp_dma_data {
+	boolean busy;
+	boolean dmap_busy;
+	boolean waiting;
+	struct mutex ov_mutex;
+	struct semaphore mutex;
+	struct completion comp;
+	struct completion dmap_comp;
+};
+
+#define MDP_CMD_DEBUG_ACCESS_BASE   (MDP_BASE+0x10000)
+
+#define MDP_DMA2_TERM 0x1
+#define MDP_DMA3_TERM 0x2
+#define MDP_PPP_TERM 0x4
+#define MDP_DMA_S_TERM 0x8
+#define MDP_DMA_E_TERM 0x10
+#ifdef CONFIG_FB_MSM_MDP40
+#define MDP_OVERLAY0_TERM 0x20
+#define MDP_OVERLAY1_TERM 0x40
+#endif
+#define MDP_HISTOGRAM_TERM 0x80
+
+#define ACTIVE_START_X_EN BIT(31)
+#define ACTIVE_START_Y_EN BIT(31)
+#define ACTIVE_HIGH 0
+#define ACTIVE_LOW 1
+#define MDP_DMA_S_DONE  BIT(2)
+#define MDP_DMA_E_DONE  BIT(3)
+#define LCDC_FRAME_START    BIT(15)
+#define LCDC_UNDERFLOW      BIT(16)
+
+#ifdef CONFIG_FB_MSM_MDP22
+#define MDP_DMA_P_DONE 	BIT(2)
+#else
+#define MDP_DMA_P_DONE 	BIT(14)
+#endif
+
+#define MDP_PPP_DONE 				BIT(0)
+#define TV_OUT_DMA3_DONE    BIT(6)
+#define TV_ENC_UNDERRUN     BIT(7)
+#define TV_OUT_DMA3_START   BIT(13)
+#define MDP_HIST_DONE       BIT(20)
+
+#ifdef CONFIG_FB_MSM_MDP22
+#define MDP_ANY_INTR_MASK (MDP_PPP_DONE| \
+			MDP_DMA_P_DONE| \
+			TV_ENC_UNDERRUN)
+#else
+#define MDP_ANY_INTR_MASK (MDP_PPP_DONE| \
+			MDP_DMA_P_DONE| \
+			MDP_DMA_S_DONE| \
+			MDP_DMA_E_DONE| \
+			LCDC_UNDERFLOW| \
+			MDP_HIST_DONE| \
+			TV_ENC_UNDERRUN)
+#endif
+
+#define MDP_TOP_LUMA       16
+#define MDP_TOP_CHROMA     0
+#define MDP_BOTTOM_LUMA    19
+#define MDP_BOTTOM_CHROMA  3
+#define MDP_LEFT_LUMA      22
+#define MDP_LEFT_CHROMA    6
+#define MDP_RIGHT_LUMA     25
+#define MDP_RIGHT_CHROMA   9
+
+#define CLR_G 0x0
+#define CLR_B 0x1
+#define CLR_R 0x2
+#define CLR_ALPHA 0x3
+
+#define CLR_Y  CLR_G
+#define CLR_CB CLR_B
+#define CLR_CR CLR_R
+
+/* from lsb to msb */
+#define MDP_GET_PACK_PATTERN(a,x,y,z,bit) (((a)<<(bit*3))|((x)<<(bit*2))|((y)<<bit)|(z))
+
+/*
+ * 0x0000 0x0004 0x0008 MDP sync config
+ */
+#ifdef CONFIG_FB_MSM_MDP22
+#define MDP_SYNCFG_HGT_LOC 22
+#define MDP_SYNCFG_VSYNC_EXT_EN BIT(21)
+#define MDP_SYNCFG_VSYNC_INT_EN BIT(20)
+#else
+#define MDP_SYNCFG_HGT_LOC 21
+#define MDP_SYNCFG_VSYNC_EXT_EN BIT(20)
+#define MDP_SYNCFG_VSYNC_INT_EN BIT(19)
+#define MDP_HW_VSYNC
+#endif
+
+/*
+ * 0x0018 MDP VSYNC THREASH
+ */
+#define MDP_PRIM_BELOW_LOC 0
+#define MDP_PRIM_ABOVE_LOC 8
+
+/*
+ * MDP_PRIMARY_VSYNC_OUT_CTRL
+ * 0x0080,84,88 internal vsync pulse config
+ */
+#define VSYNC_PULSE_EN BIT(31)
+#define VSYNC_PULSE_INV BIT(30)
+
+/*
+ * 0x008c MDP VSYNC CONTROL
+ */
+#define DISP0_VSYNC_MAP_VSYNC0 0
+#define DISP0_VSYNC_MAP_VSYNC1 BIT(0)
+#define DISP0_VSYNC_MAP_VSYNC2 BIT(0)|BIT(1)
+
+#define DISP1_VSYNC_MAP_VSYNC0 0
+#define DISP1_VSYNC_MAP_VSYNC1 BIT(2)
+#define DISP1_VSYNC_MAP_VSYNC2 BIT(2)|BIT(3)
+
+#define PRIMARY_LCD_SYNC_EN BIT(4)
+#define PRIMARY_LCD_SYNC_DISABLE 0
+
+#define SECONDARY_LCD_SYNC_EN BIT(5)
+#define SECONDARY_LCD_SYNC_DISABLE 0
+
+#define EXTERNAL_LCD_SYNC_EN BIT(6)
+#define EXTERNAL_LCD_SYNC_DISABLE 0
+
+/*
+ * 0x101f0 MDP VSYNC Threshold
+ */
+#define VSYNC_THRESHOLD_ABOVE_LOC 0
+#define VSYNC_THRESHOLD_BELOW_LOC 16
+#define VSYNC_ANTI_TEAR_EN BIT(31)
+
+/*
+ * 0x10004 command config
+ */
+#define MDP_CMD_DBGBUS_EN BIT(0)
+
+/*
+ * 0x10124 or 0x101d4PPP source config
+ */
+#define PPP_SRC_C0G_8BITS (BIT(1)|BIT(0))
+#define PPP_SRC_C1B_8BITS (BIT(3)|BIT(2))
+#define PPP_SRC_C2R_8BITS (BIT(5)|BIT(4))
+#define PPP_SRC_C3A_8BITS (BIT(7)|BIT(6))
+
+#define PPP_SRC_C0G_6BITS BIT(1)
+#define PPP_SRC_C1B_6BITS BIT(3)
+#define PPP_SRC_C2R_6BITS BIT(5)
+
+#define PPP_SRC_C0G_5BITS BIT(0)
+#define PPP_SRC_C1B_5BITS BIT(2)
+#define PPP_SRC_C2R_5BITS BIT(4)
+
+#define PPP_SRC_C3_ALPHA_EN BIT(8)
+
+#define PPP_SRC_BPP_INTERLVD_1BYTES 0
+#define PPP_SRC_BPP_INTERLVD_2BYTES BIT(9)
+#define PPP_SRC_BPP_INTERLVD_3BYTES BIT(10)
+#define PPP_SRC_BPP_INTERLVD_4BYTES (BIT(10)|BIT(9))
+
+#define PPP_SRC_BPP_ROI_ODD_X BIT(11)
+#define PPP_SRC_BPP_ROI_ODD_Y BIT(12)
+#define PPP_SRC_INTERLVD_2COMPONENTS BIT(13)
+#define PPP_SRC_INTERLVD_3COMPONENTS BIT(14)
+#define PPP_SRC_INTERLVD_4COMPONENTS (BIT(14)|BIT(13))
+
+/*
+ * RGB666 unpack format
+ * TIGHT means R6+G6+B6 together
+ * LOOSE means R6+2 +G6+2+ B6+2 (with MSB)
+ * or 2+R6 +2+G6 +2+B6 (with LSB)
+ */
+#define PPP_SRC_UNPACK_TIGHT BIT(17)
+#define PPP_SRC_UNPACK_LOOSE 0
+#define PPP_SRC_UNPACK_ALIGN_LSB 0
+#define PPP_SRC_UNPACK_ALIGN_MSB BIT(18)
+
+#define PPP_SRC_FETCH_PLANES_INTERLVD 0
+#define PPP_SRC_FETCH_PLANES_PSEUDOPLNR BIT(20)
+
+#define PPP_SRC_WMV9_MODE BIT(21)	/* window media version 9 */
+
+/*
+ * 0x10138 PPP operation config
+ */
+#define PPP_OP_SCALE_X_ON BIT(0)
+#define PPP_OP_SCALE_Y_ON BIT(1)
+
+#define PPP_OP_CONVERT_RGB2YCBCR 0
+#define PPP_OP_CONVERT_YCBCR2RGB BIT(2)
+#define PPP_OP_CONVERT_ON BIT(3)
+
+#define PPP_OP_CONVERT_MATRIX_PRIMARY 0
+#define PPP_OP_CONVERT_MATRIX_SECONDARY BIT(4)
+
+#define PPP_OP_LUT_C0_ON BIT(5)
+#define PPP_OP_LUT_C1_ON BIT(6)
+#define PPP_OP_LUT_C2_ON BIT(7)
+
+/* rotate or blend enable */
+#define PPP_OP_ROT_ON BIT(8)
+
+#define PPP_OP_ROT_90 BIT(9)
+#define PPP_OP_FLIP_LR BIT(10)
+#define PPP_OP_FLIP_UD BIT(11)
+
+#define PPP_OP_BLEND_ON BIT(12)
+
+#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0
+#define PPP_OP_BLEND_DSTPIXEL_ALPHA BIT(13)
+#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14)
+#define PPP_OP_BLEND_SRCPIXEL_TRANSP (BIT(13)|BIT(14))
+
+#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0
+#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE BIT(15)
+
+#define PPP_OP_DITHER_EN BIT(16)
+
+#define PPP_OP_COLOR_SPACE_RGB 0
+#define PPP_OP_COLOR_SPACE_YCBCR BIT(17)
+
+#define PPP_OP_SRC_CHROMA_RGB 0
+#define PPP_OP_SRC_CHROMA_H2V1 BIT(18)
+#define PPP_OP_SRC_CHROMA_H1V2 BIT(19)
+#define PPP_OP_SRC_CHROMA_420 (BIT(18)|BIT(19))
+#define PPP_OP_SRC_CHROMA_COSITE 0
+#define PPP_OP_SRC_CHROMA_OFFSITE BIT(20)
+
+#define PPP_OP_DST_CHROMA_RGB 0
+#define PPP_OP_DST_CHROMA_H2V1 BIT(21)
+#define PPP_OP_DST_CHROMA_H1V2 BIT(22)
+#define PPP_OP_DST_CHROMA_420 (BIT(21)|BIT(22))
+#define PPP_OP_DST_CHROMA_COSITE 0
+#define PPP_OP_DST_CHROMA_OFFSITE BIT(23)
+
+#define PPP_BLEND_CALPHA_TRNASP BIT(24)
+
+#define PPP_OP_BG_CHROMA_RGB 0
+#define PPP_OP_BG_CHROMA_H2V1 BIT(25)
+#define PPP_OP_BG_CHROMA_H1V2 BIT(26)
+#define PPP_OP_BG_CHROMA_420 BIT(25)|BIT(26)
+#define PPP_OP_BG_CHROMA_SITE_COSITE 0
+#define PPP_OP_BG_CHROMA_SITE_OFFSITE BIT(27)
+#define PPP_OP_DEINT_EN BIT(28)
+
+#define PPP_BLEND_BG_USE_ALPHA_SEL      (1 << 0)
+#define PPP_BLEND_BG_ALPHA_REVERSE      (1 << 3)
+#define PPP_BLEND_BG_SRCPIXEL_ALPHA     (0 << 1)
+#define PPP_BLEND_BG_DSTPIXEL_ALPHA     (1 << 1)
+#define PPP_BLEND_BG_CONSTANT_ALPHA     (2 << 1)
+#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24)
+
+#define PPP_OP_DST_RGB 0
+#define PPP_OP_DST_YCBCR BIT(30)
+/*
+ * 0x10150 PPP destination config
+ */
+#define PPP_DST_C0G_8BIT (BIT(0)|BIT(1))
+#define PPP_DST_C1B_8BIT (BIT(3)|BIT(2))
+#define PPP_DST_C2R_8BIT (BIT(5)|BIT(4))
+#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
+
+#define PPP_DST_C0G_6BIT BIT(1)
+#define PPP_DST_C1B_6BIT BIT(3)
+#define PPP_DST_C2R_6BIT BIT(5)
+
+#define PPP_DST_C0G_5BIT BIT(0)
+#define PPP_DST_C1B_5BIT BIT(2)
+#define PPP_DST_C2R_5BIT BIT(4)
+
+#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
+#define PPP_DST_C3ALPHA_EN BIT(8)
+
+#define PPP_DST_PACKET_CNT_INTERLVD_2ELEM BIT(9)
+#define PPP_DST_PACKET_CNT_INTERLVD_3ELEM BIT(10)
+#define PPP_DST_PACKET_CNT_INTERLVD_4ELEM (BIT(10)|BIT(9))
+#define PPP_DST_PACKET_CNT_INTERLVD_6ELEM (BIT(11)|BIT(9))
+
+#define PPP_DST_PACK_LOOSE 0
+#define PPP_DST_PACK_TIGHT BIT(13)
+#define PPP_DST_PACK_ALIGN_LSB 0
+#define PPP_DST_PACK_ALIGN_MSB BIT(14)
+
+#define PPP_DST_OUT_SEL_AXI 0
+#define PPP_DST_OUT_SEL_MDDI BIT(15)
+
+#define PPP_DST_BPP_2BYTES BIT(16)
+#define PPP_DST_BPP_3BYTES BIT(17)
+#define PPP_DST_BPP_4BYTES (BIT(17)|BIT(16))
+
+#define PPP_DST_PLANE_INTERLVD 0
+#define PPP_DST_PLANE_PLANAR BIT(18)
+#define PPP_DST_PLANE_PSEUDOPLN BIT(19)
+
+#define PPP_DST_TO_TV BIT(20)
+
+#define PPP_DST_MDDI_PRIMARY 0
+#define PPP_DST_MDDI_SECONDARY BIT(21)
+#define PPP_DST_MDDI_EXTERNAL BIT(22)
+
+/*
+ * 0x10180 DMA config
+ */
+#define DMA_DSTC0G_8BITS (BIT(1)|BIT(0))
+#define DMA_DSTC1B_8BITS (BIT(3)|BIT(2))
+#define DMA_DSTC2R_8BITS (BIT(5)|BIT(4))
+
+#define DMA_DSTC0G_6BITS BIT(1)
+#define DMA_DSTC1B_6BITS BIT(3)
+#define DMA_DSTC2R_6BITS BIT(5)
+
+#define DMA_DSTC0G_5BITS BIT(0)
+#define DMA_DSTC1B_5BITS BIT(2)
+#define DMA_DSTC2R_5BITS BIT(4)
+
+#define DMA_PACK_TIGHT                      BIT(6)
+#define DMA_PACK_LOOSE                      0
+#define DMA_PACK_ALIGN_LSB                  0
+/*
+ * use DMA_PACK_ALIGN_MSB if the upper 6 bits from 8 bits output
+ * from LCDC block maps into 6 pins out to the panel
+ */
+#define DMA_PACK_ALIGN_MSB                  BIT(7)
+#define DMA_PACK_PATTERN_RGB \
+       (MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 2)<<8)
+#define DMA_PACK_PATTERN_BGR \
+       (MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 2)<<8)
+#define DMA_OUT_SEL_AHB                     0
+#define DMA_OUT_SEL_LCDC                    BIT(20)
+#define DMA_IBUF_FORMAT_RGB888              0
+#define DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888  BIT(26)
+
+#ifdef CONFIG_FB_MSM_MDP303
+#define DMA_OUT_SEL_DSI_CMD                  BIT(19)
+#define DMA_OUT_SEL_DSI_VIDEO               (3 << 19)
+#endif
+
+#ifdef CONFIG_FB_MSM_MDP22
+#define DMA_OUT_SEL_MDDI BIT(14)
+#define DMA_AHBM_LCD_SEL_PRIMARY 0
+#define DMA_AHBM_LCD_SEL_SECONDARY BIT(15)
+#define DMA_IBUF_C3ALPHA_EN BIT(16)
+#define DMA_DITHER_EN BIT(17)
+#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY 0
+#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY BIT(18)
+#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL BIT(19)
+#define DMA_IBUF_FORMAT_RGB565 BIT(20)
+#define DMA_IBUF_FORMAT_RGB888_OR_ARGB8888 0
+#define DMA_IBUF_NONCONTIGUOUS BIT(21)
+#else
+#define DMA_OUT_SEL_MDDI                    BIT(19)
+#define DMA_AHBM_LCD_SEL_PRIMARY            0
+#define DMA_AHBM_LCD_SEL_SECONDARY          0
+#define DMA_IBUF_C3ALPHA_EN                 0
+#define DMA_BUF_FORMAT_RGB565		BIT(25)
+#define DMA_DITHER_EN                       BIT(24)	/* dma_p */
+#define DMA_DEFLKR_EN                       BIT(24)	/* dma_e */
+#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY     0
+#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY   0
+#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL    0
+#define DMA_IBUF_FORMAT_RGB565              BIT(25)
+#define DMA_IBUF_NONCONTIGUOUS 0
+#endif
+
+/*
+ * MDDI Register
+ */
+#define MDDI_VDO_PACKET_DESC_16  0x5565
+#define MDDI_VDO_PACKET_DESC	 0x5666	/* 18 bits */
+#define MDDI_VDO_PACKET_DESC_24  0x5888
+
+#ifdef CONFIG_FB_MSM_MDP40
+#define MDP_INTR_ENABLE		(msm_mdp_base + 0x0050)
+#define MDP_INTR_STATUS		(msm_mdp_base + 0x0054)
+#define MDP_INTR_CLEAR		(msm_mdp_base + 0x0058)
+#define MDP_EBI2_LCD0		(msm_mdp_base + 0x0060)
+#define MDP_EBI2_LCD1		(msm_mdp_base + 0x0064)
+#define MDP_EBI2_PORTMAP_MODE	(msm_mdp_base + 0x0070)
+
+#define MDP_DMA_P_HIST_INTR_STATUS 	(msm_mdp_base + 0x95014)
+#define MDP_DMA_P_HIST_INTR_CLEAR 	(msm_mdp_base + 0x95018)
+#define MDP_DMA_P_HIST_INTR_ENABLE 	(msm_mdp_base + 0x9501C)
+#else
+#define MDP_INTR_ENABLE		(msm_mdp_base + 0x0020)
+#define MDP_INTR_STATUS		(msm_mdp_base + 0x0024)
+#define MDP_INTR_CLEAR		(msm_mdp_base + 0x0028)
+#define MDP_EBI2_LCD0		(msm_mdp_base + 0x003c)
+#define MDP_EBI2_LCD1		(msm_mdp_base + 0x0040)
+#define MDP_EBI2_PORTMAP_MODE	(msm_mdp_base + 0x005c)
+#endif
+
+#define MDP_FULL_BYPASS_WORD43  (msm_mdp_base + 0x101ac)
+
+#define MDP_CSC_PFMVn(n)	(msm_mdp_base + 0x40400 + 4 * (n))
+#define MDP_CSC_PRMVn(n)	(msm_mdp_base + 0x40440 + 4 * (n))
+#define MDP_CSC_PRE_BV1n(n)	(msm_mdp_base + 0x40500 + 4 * (n))
+#define MDP_CSC_PRE_BV2n(n)	(msm_mdp_base + 0x40540 + 4 * (n))
+#define MDP_CSC_POST_BV1n(n)	(msm_mdp_base + 0x40580 + 4 * (n))
+#define MDP_CSC_POST_BV2n(n)	(msm_mdp_base + 0x405c0 + 4 * (n))
+
+#ifdef CONFIG_FB_MSM_MDP31
+#define MDP_CSC_PRE_LV1n(n)	(msm_mdp_base + 0x40600 + 4 * (n))
+#define MDP_CSC_PRE_LV2n(n)	(msm_mdp_base + 0x40640 + 4 * (n))
+#define MDP_CSC_POST_LV1n(n)	(msm_mdp_base + 0x40680 + 4 * (n))
+#define MDP_CSC_POST_LV2n(n)	(msm_mdp_base + 0x406c0 + 4 * (n))
+#define MDP_PPP_SCALE_COEFF_LSBn(n)	(msm_mdp_base + 0x50400 + 8 * (n))
+#define MDP_PPP_SCALE_COEFF_MSBn(n)	(msm_mdp_base + 0x50404 + 8 * (n))
+
+#define SCALE_D0_SET  0
+#define SCALE_D1_SET  BIT(0)
+#define SCALE_D2_SET  BIT(1)
+#define SCALE_U1_SET  (BIT(0)|BIT(1))
+
+#else
+#define MDP_CSC_PRE_LV1n(n)	(msm_mdp_base + 0x40580 + 4 * (n))
+#endif
+
+#define MDP_CURSOR_WIDTH 64
+#define MDP_CURSOR_HEIGHT 64
+#define MDP_CURSOR_SIZE (MDP_CURSOR_WIDTH*MDP_CURSOR_WIDTH*4)
+
+#define MDP_DMA_P_LUT_C0_EN   BIT(0)
+#define MDP_DMA_P_LUT_C1_EN   BIT(1)
+#define MDP_DMA_P_LUT_C2_EN   BIT(2)
+#define MDP_DMA_P_LUT_POST    BIT(4)
+
+void mdp_hw_init(void);
+int mdp_ppp_pipe_wait(void);
+void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+		   boolean isr);
+void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
+			  boolean sync);
+void mdp_dma_pan_update(struct fb_info *info);
+void mdp_refresh_screen(unsigned long data);
+int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req);
+void mdp_lcd_update_workqueue_handler(struct work_struct *work);
+void mdp_vsync_resync_workqueue_handler(struct work_struct *work);
+void mdp_dma2_update(struct msm_fb_data_type *mfd);
+void mdp_config_vsync(struct msm_fb_data_type *);
+uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd);
+enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht);
+void mdp_set_scale(MDPIBUF *iBuf,
+		   uint32 dst_roi_width,
+		   uint32 dst_roi_height,
+		   boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr);
+void mdp_init_scale_table(void);
+void mdp_adjust_start_addr(uint8 **src0,
+			   uint8 **src1,
+			   int v_slice,
+			   int h_slice,
+			   int x,
+			   int y,
+			   uint32 width,
+			   uint32 height, int bpp, MDPIBUF *iBuf, int layer);
+void mdp_set_blend_attr(MDPIBUF *iBuf,
+			uint32 *alpha,
+			uint32 *tpVal,
+			uint32 perPixelAlpha, uint32 *pppop_reg_ptr);
+
+int mdp_dma3_on(struct platform_device *pdev);
+int mdp_dma3_off(struct platform_device *pdev);
+void mdp_dma3_update(struct msm_fb_data_type *mfd);
+
+int mdp_lcdc_on(struct platform_device *pdev);
+int mdp_lcdc_off(struct platform_device *pdev);
+void mdp_lcdc_update(struct msm_fb_data_type *mfd);
+
+#ifdef CONFIG_FB_MSM_MDP303
+int mdp_dsi_video_on(struct platform_device *pdev);
+int mdp_dsi_video_off(struct platform_device *pdev);
+void mdp_dsi_video_update(struct msm_fb_data_type *mfd);
+void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
+#endif
+
+int mdp_hw_cursor_update(struct fb_info *info, struct fb_cursor *cursor);
+int mdp_hw_cursor_sync_update(struct fb_info *info, struct fb_cursor *cursor);
+void mdp_enable_irq(uint32 term);
+void mdp_disable_irq(uint32 term);
+void mdp_disable_irq_nosync(uint32 term);
+int mdp_get_bytes_per_pixel(uint32_t format,
+				 struct msm_fb_data_type *mfd);
+int mdp_set_core_clk(uint16 perf_level);
+unsigned long mdp_get_core_clk(void);
+unsigned long mdp_perf_level2clk_rate(uint32 perf_level);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+int mdp_bus_scale_update_request(uint32_t index);
+#endif
+
+#ifdef MDP_HW_VSYNC
+void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd);
+void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd);
+void mdp_vsync_clk_disable(void);
+void mdp_vsync_clk_enable(void);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+int mdp_debugfs_init(void);
+#endif
+
+void mdp_dma_s_update(struct msm_fb_data_type *mfd);
+int mdp_start_histogram(struct fb_info *info);
+int mdp_stop_histogram(struct fb_info *info);
+int mdp_histogram_ctrl(boolean en);
+
+#ifdef CONFIG_FB_MSM_MDP303
+static inline void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+{
+	/* empty */
+}
+
+static inline void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
+{
+	/* empty */
+}
+static inline void mdp4_overlay_dsi_state_set(int state)
+{
+	/* empty */
+}
+#endif
+
+#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
new file mode 100644
index 0000000..d6cf2d3
--- /dev/null
+++ b/drivers/video/msm/mdp4.h
@@ -0,0 +1,565 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP4_H
+#define MDP4_H
+
+extern struct mdp_dma_data dma2_data;
+extern struct mdp_dma_data dma_s_data;
+extern struct mdp_dma_data dma_e_data;
+extern struct mdp_histogram mdp_hist;
+extern struct completion mdp_hist_comp;
+extern boolean mdp_is_hist_start;
+extern boolean mdp_is_in_isr;
+extern uint32 mdp_intr_mask;
+extern spinlock_t mdp_spin_lock;
+extern struct mdp4_statistic mdp4_stat;
+extern uint32 mdp4_extn_disp;
+
+#define MDP4_OVERLAYPROC0_BASE	0x10000
+#define MDP4_OVERLAYPROC1_BASE	0x18000
+
+#define MDP4_VIDEO_BASE 0x20000
+#define MDP4_VIDEO_OFF 0x10000
+
+#define MDP4_RGB_BASE 0x40000
+#define MDP4_RGB_OFF 0x10000
+
+enum mdp4_overlay_status {
+	MDP4_OVERLAY_TYPE_UNSET,
+	MDP4_OVERLAY_TYPE_SET,
+	MDP4_OVERLAY_TYPE_MAX
+};
+
+typedef int (*cmd_fxn_t)(struct platform_device *pdev);
+
+enum {		/* display */
+	PRIMARY_INTF_SEL,
+	SECONDARY_INTF_SEL,
+	EXTERNAL_INTF_SEL
+};
+
+enum {
+	LCDC_RGB_INTF,			/* 0 */
+	DTV_INTF = LCDC_RGB_INTF,	/* 0 */
+	MDDI_LCDC_INTF,			/* 1 */
+	MDDI_INTF,			/* 2 */
+	EBI2_INTF,			/* 3 */
+	TV_INTF = EBI2_INTF,		/* 3 */
+	DSI_VIDEO_INTF,
+	DSI_CMD_INTF
+};
+
+enum {
+	MDDI_PRIMARY_SET,
+	MDDI_SECONDARY_SET,
+	MDDI_EXTERNAL_SET
+};
+
+enum {
+	EBI2_LCD0,
+	EBI2_LCD1
+};
+
+#define MDP4_3D_NONE		0
+#define MDP4_3D_SIDE_BY_SIDE	1
+#define MDP4_3D_TOP_DOWN	2
+
+#define MDP4_PANEL_MDDI		BIT(0)
+#define MDP4_PANEL_LCDC		BIT(1)
+#define MDP4_PANEL_DTV		BIT(2)
+#define MDP4_PANEL_ATV		BIT(3)
+#define MDP4_PANEL_DSI_VIDEO	BIT(4)
+#define MDP4_PANEL_DSI_CMD	BIT(5)
+
+enum {
+	OVERLAY_MODE_NONE,
+	OVERLAY_MODE_BLT
+};
+
+enum {
+	OVERLAY_REFRESH_ON_DEMAND,
+	OVERLAY_REFRESH_VSYNC,
+	OVERLAY_REFRESH_VSYNC_HALF,
+	OVERLAY_REFRESH_VSYNC_QUARTER
+};
+
+enum {
+	OVERLAY_FRAMEBUF,
+	OVERLAY_DIRECTOUT
+};
+
+/* system interrupts */
+#define INTR_OVERLAY0_DONE		BIT(0)
+#define INTR_OVERLAY1_DONE		BIT(1)
+#define INTR_DMA_S_DONE			BIT(2)
+#define INTR_DMA_E_DONE			BIT(3)
+#define INTR_DMA_P_DONE			BIT(4)
+#define INTR_VG1_HISTOGRAM		BIT(5)
+#define INTR_VG2_HISTOGRAM		BIT(6)
+#define INTR_PRIMARY_VSYNC		BIT(7)
+#define INTR_PRIMARY_INTF_UDERRUN	BIT(8)
+#define INTR_EXTERNAL_VSYNC		BIT(9)
+#define INTR_EXTERNAL_INTF_UDERRUN	BIT(10)
+#define INTR_PRIMARY_READ_PTR		BIT(11)
+#define INTR_DMA_P_HISTOGRAM		BIT(17)
+
+/* histogram interrupts */
+#define INTR_HIST_DONE			BIT(1)
+#define INTR_HIST_RESET_SEQ_DONE	BIT(0)
+
+
+#ifdef CONFIG_FB_MSM_OVERLAY
+#define MDP4_ANY_INTR_MASK	(INTR_OVERLAY0_DONE|INTR_DMA_S_DONE | \
+					INTR_DMA_P_HISTOGRAM)
+#else
+#define MDP4_ANY_INTR_MASK	(INTR_DMA_P_DONE| \
+				INTR_DMA_P_HISTOGRAM)
+#endif
+
+enum {
+	OVERLAY_PIPE_RGB1,
+	OVERLAY_PIPE_RGB2,
+	OVERLAY_PIPE_VG1,	/* video/graphic */
+	OVERLAY_PIPE_VG2,
+	OVERLAY_PIPE_MAX
+};
+
+/* 2 VG pipes can be shared by RGB and VIDEO */
+#define MDP4_MAX_PIPE 	(OVERLAY_PIPE_MAX + 2)
+
+#define OVERLAY_TYPE_RGB	0x01
+#define	OVERLAY_TYPE_VIDEO	0x02
+
+enum {
+	MDP4_MIXER0,
+	MDP4_MIXER1,
+	MDP4_MIXER_MAX
+};
+
+#define MDP4_MAX_MIXER	2
+
+enum {
+	OVERLAY_PLANE_INTERLEAVED,
+	OVERLAY_PLANE_PLANAR,
+	OVERLAY_PLANE_PSEUDO_PLANAR
+};
+
+enum {
+	MDP4_MIXER_STAGE_UNUNSED,	/* pipe not used */
+	MDP4_MIXER_STAGE_BASE,
+	MDP4_MIXER_STAGE0,	/* zorder 0 */
+	MDP4_MIXER_STAGE1,	/* zorder 1 */
+	MDP4_MIXER_STAGE2	/* zorder 2 */
+};
+
+#define MDP4_MAX_STAGE	4
+
+enum {
+	MDP4_FRAME_FORMAT_LINEAR,
+	MDP4_FRAME_FORMAT_ARGB_TILE,
+	MDP4_FRAME_FORMAT_VIDEO_SUPERTILE
+};
+
+enum {
+	MDP4_CHROMA_RGB,
+	MDP4_CHROMA_H2V1,
+	MDP4_CHROMA_H1V2,
+	MDP4_CHROMA_420
+};
+
+#define MDP4_BLEND_BG_TRANSP_EN		BIT(9)
+#define MDP4_BLEND_FG_TRANSP_EN		BIT(8)
+#define MDP4_BLEND_BG_MOD_ALPHA		BIT(7)
+#define MDP4_BLEND_BG_INV_ALPHA		BIT(6)
+#define MDP4_BLEND_BG_ALPHA_FG_CONST	(0 << 4)
+#define MDP4_BLEND_BG_ALPHA_BG_CONST	(1 << 4)
+#define MDP4_BLEND_BG_ALPHA_FG_PIXEL	(2 << 4)
+#define MDP4_BLEND_BG_ALPHA_BG_PIXEL	(3 << 4)
+#define MDP4_BLEND_FG_MOD_ALPHA		BIT(3)
+#define MDP4_BLEND_FG_INV_ALPHA		BIT(2)
+#define MDP4_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define MDP4_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define MDP4_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define MDP4_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+
+#define MDP4_FORMAT_SOLID_FILL		BIT(22)
+#define MDP4_FORMAT_UNPACK_ALIGN_MSB	BIT(18)
+#define MDP4_FORMAT_UNPACK_TIGHT	BIT(17)
+#define MDP4_FORMAT_90_ROTATED		BIT(12)
+#define MDP4_FORMAT_ALPHA_ENABLE	BIT(8)
+
+#define MDP4_OP_DEINT_ODD_REF  	BIT(19)
+#define MDP4_OP_DEINT_EN	BIT(18)
+#define MDP4_OP_IGC_LUT_EN	BIT(16)
+#define MDP4_OP_DITHER_EN     	BIT(15)
+#define MDP4_OP_FLIP_UD		BIT(14)
+#define MDP4_OP_FLIP_LR		BIT(13)
+#define MDP4_OP_CSC_EN		BIT(11)
+#define MDP4_OP_SRC_DATA_YCBCR	BIT(9)
+#define MDP4_OP_SCALEY_FIR 		(0 << 4)
+#define MDP4_OP_SCALEY_MN_PHASE 	(1 << 4)
+#define MDP4_OP_SCALEY_PIXEL_RPT	(2 << 4)
+#define MDP4_OP_SCALEX_FIR 		(0 << 2)
+#define MDP4_OP_SCALEX_MN_PHASE 	(1 << 2)
+#define MDP4_OP_SCALEX_PIXEL_RPT 	(2 << 2)
+#define MDP4_OP_SCALEY_EN	BIT(1)
+#define MDP4_OP_SCALEX_EN	BIT(0)
+
+#define MDP4_PIPE_PER_MIXER	2
+
+#define MDP4_MAX_PLANE		4
+
+
+struct mdp4_overlay_pipe {
+	uint32 pipe_used;
+	uint32 pipe_type;		/* rgb, video/graphic */
+	uint32 pipe_num;
+	uint32 pipe_ndx;
+	uint32 pipe_share;
+	uint32 mixer_num;		/* which mixer used */
+	uint32 mixer_stage;		/* which stage of mixer used */
+	uint32 src_format;
+	uint32 src_width;	/* source img width */
+	uint32 src_height;	/* source img height */
+	uint32 is_3d;
+	uint32 src_width_3d;	/* source img width */
+	uint32 src_height_3d;	/* source img height */
+	uint32 src_w;		/* roi */
+	uint32 src_h;		/* roi */
+	uint32 src_x;		/* roi */
+	uint32 src_y;		/* roi */
+	uint32 dst_w;		/* roi */
+	uint32 dst_h;		/* roi */
+	uint32 dst_x;		/* roi */
+	uint32 dst_y;		/* roi */
+	uint32 flags;
+	uint32 op_mode;
+	uint32 transp;
+	uint32 blend_op;
+	uint32 phasex_step;
+	uint32 phasey_step;
+	uint32 alpha;
+	uint32 is_fg;		/* control alpha & color key */
+	uint32 srcp0_addr;	/* interleave, luma */
+	uint32 srcp0_ystride;
+	uint32 srcp1_addr;	/* pseudoplanar, chroma plane */
+	uint32 srcp1_ystride;
+	uint32 srcp2_addr;	/* planar color 2*/
+	uint32 srcp2_ystride;
+	uint32 srcp3_addr;	/* alpha/color 3 */
+	uint32 srcp3_ystride;
+	uint32 fetch_plane;
+	uint32 frame_format;		/* video */
+	uint32 chroma_site;		/* video */
+	uint32 chroma_sample;		/* video */
+	uint32 solid_fill;
+	uint32 vc1_reduce;		/* video */
+	uint32 unpack_align_msb;/* 0 to LSB, 1 to MSB */
+	uint32 unpack_tight;/* 0 for loose, 1 for tight */
+	uint32 unpack_count;/* 0 = 1 component, 1 = 2 component ... */
+	uint32 rotated_90; /* has been rotated 90 degree */
+	uint32 bpp;	/* byte per pixel */
+	uint32 alpha_enable;/*  source has alpha */
+	/*
+	 * number of bits for source component,
+	 * 0 = 1 bit, 1 = 2 bits, 2 = 6 bits, 3 = 8 bits
+	 */
+	uint32 a_bit;	/* component 3, alpha */
+	uint32 r_bit;	/* component 2, R_Cr */
+	uint32 b_bit;	/* component 1, B_Cb */
+	uint32 g_bit;	/* component 0, G_lumz */
+	/*
+	 * unpack pattern
+	 * A = C3, R = C2, B = C1, G = C0
+	 */
+	uint32 element3; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
+	uint32 element2; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
+	uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
+	uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
+	struct completion comp;
+	ulong blt_addr; /* blt mode addr */
+	ulong blt_base;
+	ulong blt_offset;
+	uint32 blt_cnt;
+	uint32 ov_cnt;
+	uint32 dmap_cnt;
+	uint32 blt_end;
+	uint32 luma_align_size;
+	struct completion dmas_comp;
+	struct mdp_overlay req_data;
+};
+
+#define MDP4_MAX_SHARE	2
+
+struct mdp4_pipe_desc {
+	int share;
+	int ref_cnt;
+	int ndx_list[MDP4_MAX_SHARE];
+	struct mdp4_overlay_pipe *player;
+};
+
+struct mdp4_statistic {
+	ulong intr_tot;
+	ulong intr_dma_p;
+	ulong intr_dma_s;
+	ulong intr_dma_e;
+	ulong intr_overlay0;
+	ulong intr_overlay1;
+	ulong intr_underrun_p;	/* Primary interface */
+	ulong intr_underrun_e;	/* external interface */
+	ulong intr_dsi;
+	ulong kickoff_mddi;
+	ulong kickoff_lcdc;
+	ulong kickoff_dtv;
+	ulong kickoff_atv;
+	ulong kickoff_dsi;
+	ulong writeback;	/* blt */
+	ulong overlay_set[MDP4_MIXER_MAX];
+	ulong overlay_unset[MDP4_MIXER_MAX];
+	ulong overlay_play[MDP4_MIXER_MAX];
+	ulong pipe[MDP4_MAX_PIPE];
+	ulong dsi_clkoff;
+	ulong err_mixer;
+	ulong err_zorder;
+	ulong err_size;
+	ulong err_scale;
+	ulong err_format;
+};
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+static inline int mdp4_overlay_writeback_setup(struct fb_info *fbi,
+		struct mdp4_overlay_pipe *pipe, uint8 *buf, int bpp)
+{
+	int off;
+
+	pipe->blt_base = (ulong) buf;
+	off = ALIGN(fbi->var.xres, 32) * fbi->var.yres * bpp * 2;
+	off += (1920 * 1080 * 2 * 1); /* hdmi */
+	pipe->blt_base += off;
+
+	pr_info("%s: base=%x offset=%x\n",
+			__func__, (int) pipe->blt_base, (int)off);
+
+	return off;
+
+}
+#else
+static inline int mdp4_overlay_writeback_setup(struct fb_info *fbi,
+		struct mdp4_overlay_pipe *pipe, uint8 *buf, int bpp)
+{
+	return 0;
+}
+#endif
+
+void mdp4_sw_reset(unsigned long bits);
+void mdp4_display_intf_sel(int output, unsigned long intf);
+void mdp4_overlay_cfg(int layer, int blt_mode, int refresh, int direct_out);
+void mdp4_ebi2_lcd_setup(int lcd, unsigned long base, int ystride);
+void mdp4_mddi_setup(int which, unsigned long id);
+unsigned long mdp4_display_status(void);
+void mdp4_enable_clk_irq(void);
+void mdp4_disable_clk_irq(void);
+void mdp4_dma_p_update(struct msm_fb_data_type *mfd);
+void mdp4_dma_s_update(struct msm_fb_data_type *mfd);
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+		   boolean isr);
+void mdp4_pipe_kickoff(uint32 pipe, struct msm_fb_data_type *mfd);
+int mdp4_lcdc_on(struct platform_device *pdev);
+int mdp4_lcdc_off(struct platform_device *pdev);
+void mdp4_lcdc_update(struct msm_fb_data_type *mfd);
+void mdp4_intr_clear_set(ulong clear, ulong set);
+void mdp4_dma_p_cfg(void);
+unsigned is_mdp4_hw_reset(void);
+void mdp4_hw_init(void);
+void mdp4_isr_read(int);
+void mdp4_clear_lcdc(void);
+void mdp4_mixer_blend_init(int mixer_num);
+void mdp4_vg_qseed_init(int vg_num);
+void mdp4_vg_csc_mv_setup(int vp_num);
+void mdp4_vg_csc_pre_bv_setup(int vp_num);
+void mdp4_vg_csc_post_bv_setup(int vp_num);
+void mdp4_vg_csc_pre_lv_setup(int vp_num);
+void mdp4_vg_csc_post_lv_setup(int vp_num);
+void mdp4_mixer1_csc_mv_setup(void);
+void mdp4_mixer1_csc_pre_bv_setup(void);
+void mdp4_mixer1_csc_post_bv_setup(void);
+void mdp4_mixer1_csc_pre_lv_setup(void);
+void mdp4_mixer1_csc_post_lv_setup(void);
+irqreturn_t mdp4_isr(int irq, void *ptr);
+void mdp4_overlay_format_to_pipe(uint32 format, struct mdp4_overlay_pipe *pipe);
+uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe);
+uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe);
+uint32 mdp4_overlay_op_mode(struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd);
+void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dtv_vsync_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe);
+void mdp4_dtv_overlay(struct msm_fb_data_type *mfd);
+int mdp4_dtv_on(struct platform_device *pdev);
+int mdp4_dtv_off(struct platform_device *pdev);
+void mdp4_atv_overlay(struct msm_fb_data_type *mfd);
+int mdp4_atv_on(struct platform_device *pdev);
+int mdp4_atv_off(struct platform_device *pdev);
+void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn);
+void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd);
+int mdp4_dsi_video_on(struct platform_device *pdev);
+int mdp4_dsi_video_off(struct platform_device *pdev);
+void mdp4_overlay0_done_dsi_video(void);
+void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma);
+void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd);
+void mdp4_overlay_dsi_state_set(int state);
+void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all);
+void mdp4_mixer_blend_setup(struct mdp4_overlay_pipe *pipe);
+struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage);
+void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe);
+int mdp4_mixer_stage_can_run(struct mdp4_overlay_pipe *pipe);
+void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe);
+void mdp4_mddi_overlay(struct msm_fb_data_type *mfd);
+int mdp4_overlay_format2type(uint32 format);
+int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
+int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
+int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
+int mdp4_overlay_unset(struct fb_info *info, int ndx);
+int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req,
+		struct file **pp_src_file, struct file **pp_src_plane1_file,
+		struct file **pp_src_plane2_file);
+struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer,
+				int req_share);
+void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc);
+void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv);
+void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe);
+int mdp4_overlay_pipe_staged(int mixer);
+void mdp4_lcdc_primary_vsyn(void);
+void mdp4_overlay0_done_lcdc(void);
+void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
+void mdp4_dma_s_done_mddi(void);
+void mdp4_dma_p_done_mddi(void);
+void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma);
+void mdp4_overlay1_done_dtv(void);
+void mdp4_overlay1_done_atv(void);
+void mdp4_primary_vsync_lcdc(void);
+void mdp4_external_vsync_dtv(void);
+void mdp4_mddi_overlay_restore(void);
+void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd);
+void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_mddi_overlay_dmas_restore(void);
+void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd);
+void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_rgb_igc_lut_setup(int num);
+void mdp4_vg_igc_lut_setup(int num);
+void mdp4_mixer_gc_lut_setup(int mixer_num);
+void mdp4_fetch_cfg(uint32 clk);
+uint32 mdp4_rgb_igc_lut_cvt(uint32 ndx);
+void mdp4_vg_qseed_init(int);
+int mdp4_overlay_blt(struct fb_info *info, struct msmfb_overlay_blt *req);
+int mdp4_overlay_blt_offset(struct fb_info *info,
+					struct msmfb_overlay_blt *req);
+
+int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd);
+int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd);
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+void mdp4_dsi_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+
+void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+
+#ifdef CONFIG_FB_MSM_MDP40
+static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+{
+	/* empty */
+}
+#endif
+#else
+static inline void mdp4_dsi_overlay_blt(
+	struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req)
+{
+}
+static inline int mdp4_dsi_overlay_blt_offset(
+	struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req)
+{
+	return -ENODEV;
+}
+static inline void mdp4_dsi_video_overlay_blt(
+	struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req)
+{
+}
+static inline int mdp4_dsi_video_overlay_blt_offset(
+	struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req)
+{
+	return -ENODEV;
+}
+#endif
+
+void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+int mdp4_lcdc_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req);
+
+int mdp4_mddi_overlay_blt_offset(int *off);
+void mdp4_mddi_overlay_blt(ulong addr);
+void mdp4_overlay_panel_mode(int mixer_num, uint32 mode);
+int mdp4_overlay_mixer_play(int mixer_num);
+uint32 mdp4_overlay_panel_list(void);
+void mdp4_lcdc_overlay_kickoff(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe);
+
+void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+
+void mdp4_mddi_read_ptr_intr(void);
+
+void mdp4_dsi_cmd_dma_busy_check(void);
+void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
+void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
+void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_kickoff_video(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_overlay_restore(void);
+
+void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d);
+int mdp4_overlay_3d(struct fb_info *info, struct msmfb_overlay_3d *req);
+void mdp4_dsi_cmd_3d(struct msm_fb_data_type *mfd,
+			 struct msmfb_overlay_3d *r3d);
+
+void mdp_dmap_vsync_set(int enable);
+int mdp_dmap_vsync_get(void);
+void mdp_hw_cursor_done(void);
+void mdp_hw_cursor_init(void);
+int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor);
+int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req);
+void mdp4_overlay_resource_release(void);
+void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd);
+void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe);
+void mdp4_primary_vsync_dsi_video(void);
+uint32_t mdp4_ss_table_value(int8_t param, int8_t index);
+void mdp4_overlay_status_write(enum mdp4_overlay_status type, bool val);
+bool mdp4_overlay_status_read(enum mdp4_overlay_status type);
+#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4_dtv.c b/drivers/video/msm/mdp4_dtv.c
new file mode 100644
index 0000000..b039da8
--- /dev/null
+++ b/drivers/video/msm/mdp4_dtv.c
@@ -0,0 +1,329 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/msm_reqs.h>
+#include <linux/pm_runtime.h>
+#include <mach/clk.h>
+
+#include "msm_fb.h"
+#include "mdp4.h"
+
+static int dtv_probe(struct platform_device *pdev);
+static int dtv_remove(struct platform_device *pdev);
+
+static int dtv_off(struct platform_device *pdev);
+static int dtv_on(struct platform_device *pdev);
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static struct clk *tv_src_clk;
+static struct clk *hdmi_clk;
+static struct clk *mdp_tv_clk;
+
+
+static int mdp4_dtv_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int mdp4_dtv_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops mdp4_dtv_dev_pm_ops = {
+	.runtime_suspend = mdp4_dtv_runtime_suspend,
+	.runtime_resume = mdp4_dtv_runtime_resume,
+};
+
+static struct platform_driver dtv_driver = {
+	.probe = dtv_probe,
+	.remove = dtv_remove,
+	.suspend = NULL,
+	.resume = NULL,
+	.shutdown = NULL,
+	.driver = {
+		   .name = "dtv",
+		   .pm = &mdp4_dtv_dev_pm_ops,
+		   },
+};
+
+static struct lcdc_platform_data *dtv_pdata;
+#ifdef CONFIG_MSM_BUS_SCALING
+static uint32_t dtv_bus_scale_handle;
+#else
+static struct clk *ebi1_clk;
+#endif
+
+static int dtv_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	ret = panel_next_off(pdev);
+
+	pr_info("%s\n", __func__);
+
+	clk_disable(hdmi_clk);
+	if (mdp_tv_clk)
+		clk_disable(mdp_tv_clk);
+
+	if (dtv_pdata && dtv_pdata->lcdc_power_save)
+		dtv_pdata->lcdc_power_save(0);
+
+	if (dtv_pdata && dtv_pdata->lcdc_gpio_config)
+		ret = dtv_pdata->lcdc_gpio_config(0);
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (dtv_bus_scale_handle > 0)
+		msm_bus_scale_client_update_request(dtv_bus_scale_handle,
+							0);
+#else
+	if (ebi1_clk)
+		clk_disable(ebi1_clk);
+#endif
+	mdp4_extn_disp = 0;
+	return ret;
+}
+
+static int dtv_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+	unsigned long panel_pixclock_freq , pm_qos_rate;
+
+	mfd = platform_get_drvdata(pdev);
+	panel_pixclock_freq = mfd->fbi->var.pixclock;
+
+#ifdef CONFIG_MSM_NPA_SYSTEM_BUS
+	pm_qos_rate = MSM_AXI_FLOW_MDP_DTV_720P_2BPP;
+#else
+	if (panel_pixclock_freq > 58000000)
+		/* pm_qos_rate should be in Khz */
+		pm_qos_rate = panel_pixclock_freq / 1000 ;
+	else
+		pm_qos_rate = 58000;
+#endif
+	mdp_set_core_clk(1);
+	mdp4_extn_disp = 1;
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (dtv_bus_scale_handle > 0)
+		msm_bus_scale_client_update_request(dtv_bus_scale_handle,
+							1);
+#else
+	if (ebi1_clk) {
+		clk_set_rate(ebi1_clk, pm_qos_rate * 1000);
+		clk_enable(ebi1_clk);
+	}
+#endif
+	mfd = platform_get_drvdata(pdev);
+
+	ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock);
+	if (ret) {
+		pr_info("%s: clk_set_rate(%d) failed\n", __func__,
+			mfd->fbi->var.pixclock);
+		if (mfd->fbi->var.pixclock == 27030000)
+			mfd->fbi->var.pixclock = 27000000;
+		ret = clk_set_rate(tv_src_clk, mfd->fbi->var.pixclock);
+	}
+	pr_info("%s: tv_src_clk=%dkHz, pm_qos_rate=%ldkHz, [%d]\n", __func__,
+		mfd->fbi->var.pixclock/1000, pm_qos_rate, ret);
+
+	clk_enable(hdmi_clk);
+	clk_reset(hdmi_clk, CLK_RESET_ASSERT);
+	udelay(20);
+	clk_reset(hdmi_clk, CLK_RESET_DEASSERT);
+
+	if (mdp_tv_clk)
+		clk_enable(mdp_tv_clk);
+
+	if (dtv_pdata && dtv_pdata->lcdc_power_save)
+		dtv_pdata->lcdc_power_save(1);
+	if (dtv_pdata && dtv_pdata->lcdc_gpio_config)
+		ret = dtv_pdata->lcdc_gpio_config(1);
+
+	ret = panel_next_on(pdev);
+	return ret;
+}
+
+static int dtv_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct fb_info *fbi;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+
+	if (pdev->id == 0) {
+		dtv_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_LCDC;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		pr_err("dtv_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data;
+	pdata->on = dtv_on;
+	pdata->off = dtv_off;
+	pdata->next = pdev;
+
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+	mfd->fb_imgType = MDP_RGB_565;
+
+	fbi = mfd->fbi;
+	fbi->var.pixclock = mfd->panel_info.clk_rate;
+	fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch;
+	fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch;
+	fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch;
+	fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch;
+	fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width;
+	fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (!dtv_bus_scale_handle && dtv_pdata &&
+		dtv_pdata->bus_scale_table) {
+		dtv_bus_scale_handle =
+			msm_bus_scale_register_client(
+					dtv_pdata->bus_scale_table);
+		if (!dtv_bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+		}
+	}
+#else
+	ebi1_clk = clk_get(NULL, "ebi1_dtv_clk");
+	if (IS_ERR(ebi1_clk)) {
+		ebi1_clk = NULL;
+		pr_warning("%s: Couldn't get ebi1 clock\n", __func__);
+	}
+#endif
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto dtv_probe_err;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	pdev_list[pdev_list_cnt++] = pdev;
+	return 0;
+
+dtv_probe_err:
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (dtv_pdata && dtv_pdata->bus_scale_table &&
+		dtv_bus_scale_handle > 0)
+		msm_bus_scale_unregister_client(dtv_bus_scale_handle);
+#endif
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int dtv_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (dtv_pdata && dtv_pdata->bus_scale_table &&
+		dtv_bus_scale_handle > 0)
+		msm_bus_scale_unregister_client(dtv_bus_scale_handle);
+#else
+	if (ebi1_clk)
+		clk_put(ebi1_clk);
+#endif
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static int dtv_register_driver(void)
+{
+	return platform_driver_register(&dtv_driver);
+}
+
+static int __init dtv_driver_init(void)
+{
+	tv_src_clk = clk_get(NULL, "tv_src_clk");
+	if (IS_ERR(tv_src_clk)) {
+		pr_err("error: can't get tv_src_clk!\n");
+		return IS_ERR(tv_src_clk);
+	}
+
+	hdmi_clk = clk_get(NULL, "hdmi_clk");
+	if (IS_ERR(hdmi_clk)) {
+		pr_err("error: can't get hdmi_clk!\n");
+		return IS_ERR(hdmi_clk);
+	}
+
+	mdp_tv_clk = clk_get(NULL, "mdp_tv_clk");
+	if (IS_ERR(mdp_tv_clk))
+		mdp_tv_clk = NULL;
+
+	return dtv_register_driver();
+}
+
+module_init(dtv_driver_init);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
new file mode 100644
index 0000000..cc3bd1b
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -0,0 +1,2290 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <linux/file.h>
+#include <linux/android_pmem.h>
+#include <linux/major.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/msm_kgsl.h>
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#define VERSION_KEY_MASK	0xFFFFFF00
+
+struct mdp4_overlay_ctrl {
+	struct mdp4_pipe_desc ov_pipe[OVERLAY_PIPE_MAX];/* 4 */
+	struct mdp4_overlay_pipe plist[MDP4_MAX_PIPE];	/* 4 + 2 */
+	struct mdp4_overlay_pipe *stage[MDP4_MAX_MIXER][MDP4_MAX_STAGE + 2];
+	uint32 panel_3d;
+	uint32 panel_mode;
+	uint32 mixer0_played;
+	uint32 mixer1_played;
+} mdp4_overlay_db = {
+	.ov_pipe = {
+			{
+				.share = 0,	/* RGB 1 */
+			},
+			{
+				.share = 0,	/* RGB 2 */
+			},
+			{
+				.share = 1,	/* VG 1 */
+			},
+			{
+				.share = 1,	/* VG 2 */
+			},
+		},
+	.plist = {
+		{
+			.pipe_type = OVERLAY_TYPE_RGB,
+			.pipe_num = OVERLAY_PIPE_RGB1,
+			.pipe_ndx = 1,
+		},
+		{
+			.pipe_type = OVERLAY_TYPE_RGB,
+			.pipe_num = OVERLAY_PIPE_RGB2,
+			.pipe_ndx = 2,
+		},
+		{
+			.pipe_type = OVERLAY_TYPE_RGB, /* shared */
+			.pipe_num = OVERLAY_PIPE_VG1,
+			.pipe_ndx = 3,
+		},
+		{
+			.pipe_type = OVERLAY_TYPE_RGB, /* shared */
+			.pipe_num = OVERLAY_PIPE_VG2,
+			.pipe_ndx = 4,
+		},
+		{
+			.pipe_type = OVERLAY_TYPE_VIDEO, /* shared */
+			.pipe_num = OVERLAY_PIPE_VG1,
+			.pipe_ndx = 5,
+		},
+		{
+			.pipe_type = OVERLAY_TYPE_VIDEO, /* shared */
+			.pipe_num = OVERLAY_PIPE_VG2,
+			.pipe_ndx = 6,
+		},
+	},
+};
+
+static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
+static uint32 perf_level;
+static uint32 mdp4_del_res_rel;
+/* static array with index 0 for unset status and 1 for set status */
+static bool overlay_status[MDP4_OVERLAY_TYPE_MAX];
+
+void mdp4_overlay_status_write(enum mdp4_overlay_status type, bool val)
+{
+	overlay_status[type] = val;
+}
+
+bool mdp4_overlay_status_read(enum mdp4_overlay_status type)
+{
+	return overlay_status[type];
+}
+
+int mdp4_overlay_mixer_play(int mixer_num)
+{
+	if (mixer_num == MDP4_MIXER1)
+		return ctrl->mixer1_played;
+	else
+		return ctrl->mixer0_played;
+}
+
+void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d)
+{
+	ctrl->panel_3d = panel_3d;
+}
+
+void mdp4_overlay_panel_mode(int mixer_num, uint32 mode)
+{
+	ctrl->panel_mode |= mode;
+}
+
+uint32 mdp4_overlay_panel_list(void)
+{
+	return ctrl->panel_mode;
+}
+
+void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
+{
+	uint32	dmae_cfg_reg;
+
+	if (atv)
+		dmae_cfg_reg = DMA_DEFLKR_EN;
+	else
+		dmae_cfg_reg = 0;
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dmae_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dmae_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+
+	if (mfd->panel_info.bpp == 18) {
+		dmae_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	} else if (mfd->panel_info.bpp == 16) {
+		dmae_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+	} else {
+		dmae_cfg_reg |= DMA_DSTC0G_8BITS |	/* 888 16BPP */
+		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* dma2 config register */
+	MDP_OUTP(MDP_BASE + 0xb0000, dmae_cfg_reg);
+	if (atv) {
+		MDP_OUTP(MDP_BASE + 0xb0070, 0xeb0010);
+		MDP_OUTP(MDP_BASE + 0xb0074, 0xf00010);
+		MDP_OUTP(MDP_BASE + 0xb0078, 0xf00010);
+		MDP_OUTP(MDP_BASE + 0xb3000, 0x80);
+		MDP_OUTP(MDP_BASE + 0xb3010, 0x1800040);
+		MDP_OUTP(MDP_BASE + 0xb3014, 0x1000080);
+		MDP_OUTP(MDP_BASE + 0xb4004, 0x67686970);
+	} else {
+		MDP_OUTP(MDP_BASE + 0xb0070, 0xff0000);
+		MDP_OUTP(MDP_BASE + 0xb0074, 0xff0000);
+		MDP_OUTP(MDP_BASE + 0xb0078, 0xff0000);
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void unfill_black_screen(void)
+{
+	uint32 temp_src_format;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	/*
+	* VG2 Constant Color
+	*/
+	temp_src_format = inpdw(MDP_BASE + 0x30050);
+	MDP_OUTP(MDP_BASE + 0x30050, temp_src_format&(~BIT(22)));
+	/*
+	* MDP_OVERLAY_REG_FLUSH
+	*/
+	MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void fill_black_screen(void)
+{
+	/*Black color*/
+	uint32 color = 0x00000000;
+	uint32 temp_src_format;
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	/*
+	* VG2 Constant Color
+	*/
+	MDP_OUTP(MDP_BASE + 0x31008, color);
+	/*
+	* MDP_VG2_SRC_FORMAT
+	*/
+	temp_src_format = inpdw(MDP_BASE + 0x30050);
+	MDP_OUTP(MDP_BASE + 0x30050, temp_src_format | BIT(22));
+	/*
+	* MDP_OVERLAY_REG_FLUSH
+	*/
+	MDP_OUTP(MDP_BASE + 0x18000, BIT(3));
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe)
+{
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* dma_p source */
+	MDP_OUTP(MDP_BASE + 0xb0004,
+			(pipe->src_height << 16 | pipe->src_width));
+	MDP_OUTP(MDP_BASE + 0xb0008, pipe->srcp0_addr);
+	MDP_OUTP(MDP_BASE + 0xb000c, pipe->srcp0_ystride);
+
+	/* dma_p dest */
+	MDP_OUTP(MDP_BASE + 0xb0010, (pipe->dst_y << 16 | pipe->dst_x));
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc)
+{
+	uint32	dma2_cfg_reg;
+
+	dma2_cfg_reg = DMA_DITHER_EN;
+#ifdef BLT_RGB565
+	/* RGB888 is 0 */
+	dma2_cfg_reg |= DMA_BUF_FORMAT_RGB565; /* blt only */
+#endif
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+
+	if (mfd->panel_info.bpp == 18) {
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	} else if (mfd->panel_info.bpp == 16) {
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+	} else {
+		dma2_cfg_reg |= DMA_DSTC0G_8BITS |	/* 888 16BPP */
+		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+#ifndef CONFIG_FB_MSM_LCDC_CHIMEI_WXGA_PANEL
+	if (lcdc)
+		dma2_cfg_reg |= DMA_PACK_ALIGN_MSB;
+#endif
+
+	/* dma2 config register */
+	MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+/*
+ * mdp4_overlay_dmap_xy: called form baselayer only
+ */
+void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, bpp;
+
+	if (mdp_is_in_isr == FALSE)
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* dma_p source */
+	MDP_OUTP(MDP_BASE + 0x90004,
+			(pipe->src_height << 16 | pipe->src_width));
+	if (pipe->blt_addr) {
+#ifdef BLT_RGB565
+		bpp = 2; /* overlay ouput is RGB565 */
+#else
+		bpp = 3; /* overlay ouput is RGB888 */
+#endif
+		off = 0;
+		if (pipe->dmap_cnt & 0x01)
+			off = pipe->src_height * pipe->src_width * bpp;
+		MDP_OUTP(MDP_BASE + 0x90008, pipe->blt_addr + off);
+		/* RGB888, output of overlay blending */
+		MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
+	} else {
+		MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
+		MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
+	}
+
+	/* dma_p dest */
+	MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
+
+	if (mdp_is_in_isr == FALSE)
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT	0x20000000
+#define MDP4_VG_PHASE_STEP_SHIFT	29
+
+static int mdp4_leading_0(uint32 num)
+{
+	uint32 bit = 0x80000000;
+	int i;
+
+	for (i = 0; i < 32; i++) {
+		if (bit & num)
+			return i;
+		bit >>= 1;
+	}
+
+	return i;
+}
+
+static uint32 mdp4_scale_phase_step(int f_num, uint32 src, uint32 dst)
+{
+	uint32 val;
+	int	n;
+
+	n = mdp4_leading_0(src);
+	if (n > f_num)
+		n = f_num;
+	val = src << n;	/* maximum to reduce lose of resolution */
+	val /= dst;
+	if (n < f_num) {
+		n = f_num - n;
+		val <<= n;
+	}
+
+	return val;
+}
+
+static void mdp4_scale_setup(struct mdp4_overlay_pipe *pipe)
+{
+	int ptype;
+
+	pipe->phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+	pipe->phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+	ptype = mdp4_overlay_format2type(pipe->src_format);
+
+	if (pipe->dst_h && pipe->src_h != pipe->dst_h) {
+		if (pipe->dst_h > pipe->src_h * 8)	/* too much */
+			return;
+		pipe->op_mode |= MDP4_OP_SCALEY_EN;
+
+		if (pipe->pipe_num >= OVERLAY_PIPE_VG1) {
+			if (pipe->dst_h <= (pipe->src_h / 4))
+				pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
+			else
+				pipe->op_mode |= MDP4_OP_SCALEY_FIR;
+		}
+
+		pipe->phasey_step = mdp4_scale_phase_step(29,
+					pipe->src_h, pipe->dst_h);
+	}
+
+	if (pipe->dst_w && pipe->src_w != pipe->dst_w) {
+		if (pipe->dst_w > pipe->src_w * 8)	/* too much */
+			return;
+		pipe->op_mode |= MDP4_OP_SCALEX_EN;
+
+		if (pipe->pipe_num >= OVERLAY_PIPE_VG1) {
+			if (pipe->dst_w <= (pipe->src_w / 4))
+				pipe->op_mode |= MDP4_OP_SCALEX_MN_PHASE;
+			else
+				pipe->op_mode |= MDP4_OP_SCALEX_FIR;
+		}
+
+		pipe->phasex_step = mdp4_scale_phase_step(29,
+					pipe->src_w, pipe->dst_w);
+	}
+}
+
+void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe)
+{
+	char *rgb_base;
+	uint32 src_size, src_xy, dst_size, dst_xy;
+	uint32 format, pattern;
+
+	rgb_base = MDP_BASE + MDP4_RGB_BASE;
+	rgb_base += (MDP4_RGB_OFF * pipe->pipe_num);
+
+	src_size = ((pipe->src_h << 16) | pipe->src_w);
+	src_xy = ((pipe->src_y << 16) | pipe->src_x);
+	dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
+	dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
+
+	format = mdp4_overlay_format(pipe);
+	pattern = mdp4_overlay_unpack_pattern(pipe);
+
+#ifdef MDP4_IGC_LUT_ENABLE
+	pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
+#endif
+
+	mdp4_scale_setup(pipe);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	outpdw(rgb_base + 0x0000, src_size);	/* MDP_RGB_SRC_SIZE */
+	outpdw(rgb_base + 0x0004, src_xy);	/* MDP_RGB_SRC_XY */
+	outpdw(rgb_base + 0x0008, dst_size);	/* MDP_RGB_DST_SIZE */
+	outpdw(rgb_base + 0x000c, dst_xy);	/* MDP_RGB_DST_XY */
+
+	outpdw(rgb_base + 0x0010, pipe->srcp0_addr);
+	outpdw(rgb_base + 0x0040, pipe->srcp0_ystride);
+
+	outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
+	outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
+	outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+	outpdw(rgb_base + 0x005c, pipe->phasex_step);
+	outpdw(rgb_base + 0x0060, pipe->phasey_step);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	mdp4_stat.pipe[pipe->pipe_num]++;
+}
+
+void mdp4_overlay_vg_setup(struct mdp4_overlay_pipe *pipe)
+{
+	char *vg_base;
+	uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
+	uint32 format, pattern;
+	int pnum;
+
+	pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
+	vg_base = MDP_BASE + MDP4_VIDEO_BASE;
+	vg_base += (MDP4_VIDEO_OFF * pnum);
+
+	frame_size = ((pipe->src_height << 16) | pipe->src_width);
+	src_size = ((pipe->src_h << 16) | pipe->src_w);
+	src_xy = ((pipe->src_y << 16) | pipe->src_x);
+	dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
+	dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
+
+	format = mdp4_overlay_format(pipe);
+	pattern = mdp4_overlay_unpack_pattern(pipe);
+
+	/* not RGB use VG pipe, pure VG pipe */
+	if (pipe->pipe_type != OVERLAY_TYPE_RGB)
+#ifdef MDP4_IGC_LUT_ENABLE
+		pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR |
+				MDP4_OP_IGC_LUT_EN);
+#else
+		pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
+#endif
+
+	mdp4_scale_setup(pipe);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	outpdw(vg_base + 0x0000, src_size);	/* MDP_RGB_SRC_SIZE */
+	outpdw(vg_base + 0x0004, src_xy);	/* MDP_RGB_SRC_XY */
+	outpdw(vg_base + 0x0008, dst_size);	/* MDP_RGB_DST_SIZE */
+	outpdw(vg_base + 0x000c, dst_xy);	/* MDP_RGB_DST_XY */
+	outpdw(vg_base + 0x0048, frame_size);	/* TILE frame size */
+
+	/* luma component plane */
+	outpdw(vg_base + 0x0010, pipe->srcp0_addr);
+
+	/* chroma component plane or  planar color 1 */
+	outpdw(vg_base + 0x0014, pipe->srcp1_addr);
+
+	/* planar color 2 */
+	outpdw(vg_base + 0x0018, pipe->srcp2_addr);
+
+	outpdw(vg_base + 0x0040,
+			pipe->srcp1_ystride << 16 | pipe->srcp0_ystride);
+
+	outpdw(vg_base + 0x0044,
+			pipe->srcp3_ystride << 16 | pipe->srcp2_ystride);
+
+	outpdw(vg_base + 0x0050, format);	/* MDP_RGB_SRC_FORMAT */
+	outpdw(vg_base + 0x0054, pattern);	/* MDP_RGB_SRC_UNPACK_PATTERN */
+	outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+	outpdw(vg_base + 0x005c, pipe->phasex_step);
+	outpdw(vg_base + 0x0060, pipe->phasey_step);
+
+	if (pipe->op_mode & MDP4_OP_DITHER_EN) {
+		outpdw(vg_base + 0x0068,
+			pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
+	}
+
+	if (pipe->flags & MDP_SHARPENING) {
+		outpdw(vg_base + 0x8200,
+			mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
+									0));
+		outpdw(vg_base + 0x8204,
+			mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
+									1));
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	mdp4_stat.pipe[pipe->pipe_num]++;
+}
+
+int mdp4_overlay_format2type(uint32 format)
+{
+	switch (format) {
+	case MDP_RGB_565:
+	case MDP_RGB_888:
+	case MDP_BGR_565:
+	case MDP_XRGB_8888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
+		return OVERLAY_TYPE_RGB;
+	case MDP_YCRYCB_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CBCR_H2V2_TILE:
+	case MDP_Y_CRCB_H2V2_TILE:
+	case MDP_Y_CR_CB_H2V2:
+	case MDP_Y_CB_CR_H2V2:
+	case MDP_Y_CRCB_H1V1:
+	case MDP_Y_CBCR_H1V1:
+		return OVERLAY_TYPE_VIDEO;
+	default:
+		mdp4_stat.err_format++;
+		return -ERANGE;
+	}
+
+}
+
+#define C3_ALPHA	3	/* alpha */
+#define C2_R_Cr		2	/* R/Cr */
+#define C1_B_Cb		1	/* B/Cb */
+#define C0_G_Y		0	/* G/luma */
+#define YUV_444_MAX_WIDTH		1280	/* Max width for YUV 444*/
+
+int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe)
+{
+	switch (pipe->src_format) {
+	case MDP_RGB_565:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 0;
+		pipe->r_bit = 1;	/* R, 5 bits */
+		pipe->b_bit = 1;	/* B, 5 bits */
+		pipe->g_bit = 2;	/* G, 6 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 2;
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 2;	/* 2 bpp */
+		break;
+	case MDP_RGB_888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 0;
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 2;
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 3;	/* 3 bpp */
+		break;
+	case MDP_BGR_565:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 0;
+		pipe->r_bit = 1;	/* R, 5 bits */
+		pipe->b_bit = 1;	/* B, 5 bits */
+		pipe->g_bit = 2;	/* G, 6 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 2;
+		pipe->element2 = C1_B_Cb;	/* B */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C2_R_Cr;	/* R */
+		pipe->bpp = 2;	/* 2 bpp */
+		break;
+	case MDP_XRGB_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;	/* alpha */
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 4;		/* 4 bpp */
+		break;
+	case MDP_ARGB_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 1;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;	/* alpha */
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 4;		/* 4 bpp */
+		break;
+	case MDP_RGBA_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 1;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;	/* alpha */
+		pipe->element2 = C1_B_Cb;	/* B */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C2_R_Cr;	/* R */
+		pipe->bpp = 4;		/* 4 bpp */
+		break;
+	case MDP_RGBX_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;	/* alpha */
+		pipe->element2 = C1_B_Cb;	/* B */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C2_R_Cr;	/* R */
+		pipe->bpp = 4;		/* 4 bpp */
+		break;
+	case MDP_BGRA_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 1;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;	/* alpha */
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 4;		/* 4 bpp */
+		break;
+	case MDP_YCRYCB_H2V1:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 0;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C0_G_Y;	/* G */
+		pipe->element2 = C2_R_Cr;	/* R */
+		pipe->element1 = C0_G_Y;	/* G */
+		pipe->element0 = C1_B_Cb;	/* B */
+		pipe->bpp = 2;		/* 2 bpp */
+		pipe->chroma_sample = MDP4_CHROMA_H2V1;
+		break;
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H1V1:
+	case MDP_Y_CBCR_H1V1:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
+		pipe->a_bit = 0;
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 1;		/* 2 */
+		pipe->element3 = C0_G_Y;	/* not used */
+		pipe->element2 = C0_G_Y;	/* not used */
+		if (pipe->src_format == MDP_Y_CRCB_H2V1) {
+			pipe->element1 = C2_R_Cr;	/* R */
+			pipe->element0 = C1_B_Cb;	/* B */
+			pipe->chroma_sample = MDP4_CHROMA_H2V1;
+		} else if (pipe->src_format == MDP_Y_CRCB_H1V1) {
+			pipe->element1 = C2_R_Cr;	/* R */
+			pipe->element0 = C1_B_Cb;	/* B */
+			if (pipe->src_width > YUV_444_MAX_WIDTH)
+				pipe->chroma_sample = MDP4_CHROMA_H1V2;
+			else
+				pipe->chroma_sample = MDP4_CHROMA_RGB;
+		} else if (pipe->src_format == MDP_Y_CBCR_H2V1) {
+			pipe->element1 = C1_B_Cb;	/* B */
+			pipe->element0 = C2_R_Cr;	/* R */
+			pipe->chroma_sample = MDP4_CHROMA_H2V1;
+		} else if (pipe->src_format == MDP_Y_CBCR_H1V1) {
+			pipe->element1 = C1_B_Cb;	/* B */
+			pipe->element0 = C2_R_Cr;	/* R */
+			if (pipe->src_width > YUV_444_MAX_WIDTH)
+				pipe->chroma_sample = MDP4_CHROMA_H1V2;
+			else
+				pipe->chroma_sample = MDP4_CHROMA_RGB;
+		} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
+			pipe->element1 = C2_R_Cr;	/* R */
+			pipe->element0 = C1_B_Cb;	/* B */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		} else if (pipe->src_format == MDP_Y_CBCR_H2V2) {
+			pipe->element1 = C1_B_Cb;	/* B */
+			pipe->element0 = C2_R_Cr;	/* R */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		}
+		pipe->bpp = 2;	/* 2 bpp */
+		break;
+	case MDP_Y_CBCR_H2V2_TILE:
+	case MDP_Y_CRCB_H2V2_TILE:
+		pipe->frame_format = MDP4_FRAME_FORMAT_VIDEO_SUPERTILE;
+		pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
+		pipe->a_bit = 0;
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 1;		/* 2 */
+		pipe->element3 = C0_G_Y;	/* not used */
+		pipe->element2 = C0_G_Y;	/* not used */
+		if (pipe->src_format == MDP_Y_CRCB_H2V2_TILE) {
+			pipe->element1 = C2_R_Cr;	/* R */
+			pipe->element0 = C1_B_Cb;	/* B */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		} else if (pipe->src_format == MDP_Y_CBCR_H2V2_TILE) {
+			pipe->element1 = C1_B_Cb;	/* B */
+			pipe->element0 = C2_R_Cr;	/* R */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		}
+		pipe->bpp = 2;	/* 2 bpp */
+		break;
+	case MDP_Y_CR_CB_H2V2:
+	case MDP_Y_CB_CR_H2V2:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_PLANAR;
+		pipe->a_bit = 0;
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 1;		/* 2 */
+		pipe->element3 = C0_G_Y;	/* not used */
+		pipe->element2 = C0_G_Y;	/* not used */
+		if (pipe->src_format == MDP_Y_CR_CB_H2V2) {
+			pipe->element1 = C2_R_Cr;	/* R */
+			pipe->element0 = C1_B_Cb;	/* B */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		} else if (pipe->src_format == MDP_Y_CB_CR_H2V2) {
+			pipe->element1 = C1_B_Cb;	/* B */
+			pipe->element0 = C2_R_Cr;	/* R */
+			pipe->chroma_sample = MDP4_CHROMA_420;
+		}
+		pipe->bpp = 2;	/* 2 bpp */
+		break;
+	default:
+		/* not likely */
+		mdp4_stat.err_format++;
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+/*
+ * color_key_convert: output with 12 bits color key
+ */
+static uint32 color_key_convert(int start, int num, uint32 color)
+{
+	uint32 data;
+
+	data = (color >> start) & ((1 << num) - 1);
+
+	/* convert to 8 bits */
+	if (num == 5)
+		data = ((data << 3) | (data >> 2));
+	else if (num == 6)
+		data = ((data << 2) | (data >> 4));
+
+	/* convert 8 bits to 12 bits */
+	data = (data << 4) | (data >> 4);
+
+	return data;
+}
+
+void transp_color_key(int format, uint32 transp,
+			uint32 *c0, uint32 *c1, uint32 *c2)
+{
+	int b_start, g_start, r_start;
+	int b_num, g_num, r_num;
+
+	switch (format) {
+	case MDP_RGB_565:
+		b_start = 0;
+		g_start = 5;
+		r_start = 11;
+		r_num = 5;
+		g_num = 6;
+		b_num = 5;
+		break;
+	case MDP_RGB_888:
+	case MDP_XRGB_8888:
+	case MDP_ARGB_8888:
+	case MDP_BGRA_8888:
+		b_start = 0;
+		g_start = 8;
+		r_start = 16;
+		r_num = 8;
+		g_num = 8;
+		b_num = 8;
+		break;
+	case MDP_RGBA_8888:
+	case MDP_RGBX_8888:
+		b_start = 16;
+		g_start = 8;
+		r_start = 0;
+		r_num = 8;
+		g_num = 8;
+		b_num = 8;
+		break;
+	case MDP_BGR_565:
+		b_start = 11;
+		g_start = 5;
+		r_start = 0;
+		r_num = 5;
+		g_num = 6;
+		b_num = 5;
+		break;
+	case MDP_Y_CB_CR_H2V2:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CBCR_H2V1:
+		b_start = 8;
+		g_start = 16;
+		r_start = 0;
+		r_num = 8;
+		g_num = 8;
+		b_num = 8;
+		break;
+	case MDP_Y_CR_CB_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CRCB_H1V1:
+	case MDP_Y_CBCR_H1V1:
+		b_start = 0;
+		g_start = 16;
+		r_start = 8;
+		r_num = 8;
+		g_num = 8;
+		b_num = 8;
+		break;
+	default:
+		b_start = 0;
+		g_start = 8;
+		r_start = 16;
+		r_num = 8;
+		g_num = 8;
+		b_num = 8;
+		break;
+	}
+
+	*c0 = color_key_convert(g_start, g_num, transp);
+	*c1 = color_key_convert(b_start, b_num, transp);
+	*c2 = color_key_convert(r_start, r_num, transp);
+}
+
+uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe)
+{
+	uint32	format;
+
+	format = 0;
+
+	if (pipe->solid_fill)
+		format |= MDP4_FORMAT_SOLID_FILL;
+
+	if (pipe->unpack_align_msb)
+		format |= MDP4_FORMAT_UNPACK_ALIGN_MSB;
+
+	if (pipe->unpack_tight)
+		format |= MDP4_FORMAT_UNPACK_TIGHT;
+
+	if (pipe->alpha_enable)
+		format |= MDP4_FORMAT_ALPHA_ENABLE;
+
+	if (pipe->flags & MDP_SOURCE_ROTATED_90)
+		format |= MDP4_FORMAT_90_ROTATED;
+	format |= (pipe->unpack_count << 13);
+	format |= ((pipe->bpp - 1) << 9);
+	format |= (pipe->a_bit << 6);
+	format |= (pipe->r_bit << 4);
+	format |= (pipe->b_bit << 2);
+	format |= pipe->g_bit;
+
+	format |= (pipe->frame_format << 29);
+
+	if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR ||
+			pipe->fetch_plane == OVERLAY_PLANE_PLANAR) {
+		/* video/graphic */
+		format |= (pipe->fetch_plane << 19);
+		format |= (pipe->chroma_site << 28);
+		format |= (pipe->chroma_sample << 26);
+	}
+
+	return format;
+}
+
+uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe)
+{
+	return (pipe->element3 << 24) | (pipe->element2 << 16) |
+			(pipe->element1 << 8) | pipe->element0;
+}
+
+/*
+ * mdp4_overlayproc_cfg: only be called from base layer
+ */
+void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 data, intf;
+	char *overlay_base;
+
+	intf = 0;
+	if (pipe->mixer_num == MDP4_MIXER1) {
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
+		intf = inpdw(MDP_BASE + 0x0038); /* MDP_DISP_INTF_SEL */
+		intf >>= 4;
+		intf &= 0x03;
+	} else
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+
+	if (mdp_is_in_isr == FALSE)
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/*
+	 * BLT only siupport at primary display
+	 */
+	if (pipe->mixer_num == MDP4_MIXER0 && pipe->blt_addr) {
+		int off, bpp;
+#ifdef BLT_RGB565
+		bpp = 2;  /* overlay ouput is RGB565 */
+#else
+		bpp = 3;  /* overlay ouput is RGB888 */
+#endif
+		data = pipe->src_height;
+		data <<= 16;
+		data |= pipe->src_width;
+		outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
+		if (ctrl->panel_mode & MDP4_PANEL_LCDC ||
+				ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
+			outpdw(overlay_base + 0x000c, pipe->blt_addr);
+			outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+			off = pipe->src_height * pipe->src_width * bpp;
+			outpdw(overlay_base + 0x001c, pipe->blt_addr + off);
+			/* LCDC - FRAME BUFFER + vsync rate */
+			outpdw(overlay_base + 0x0004, 0x02);
+		} else {	/* MDDI */
+			off = 0;
+			if (pipe->ov_cnt & 0x01)
+				off = pipe->src_height * pipe->src_width * bpp;
+
+			outpdw(overlay_base + 0x000c, pipe->blt_addr + off);
+			/* overlay ouput is RGB888 */
+			outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+			outpdw(overlay_base + 0x001c, pipe->blt_addr + off);
+			/* MDDI - BLT + on demand */
+			outpdw(overlay_base + 0x0004, 0x08);
+		}
+#ifdef BLT_RGB565
+		outpdw(overlay_base + 0x0014, 0x1); /* RGB565 */
+#else
+		outpdw(overlay_base + 0x0014, 0x0); /* RGB888 */
+#endif
+	} else {
+		data = pipe->src_height;
+		data <<= 16;
+		data |= pipe->src_width;
+		outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
+		outpdw(overlay_base + 0x000c, pipe->srcp0_addr);
+		outpdw(overlay_base + 0x0010, pipe->srcp0_ystride);
+		outpdw(overlay_base + 0x0004, 0x01); /* directout */
+	}
+
+	if (pipe->mixer_num == MDP4_MIXER1) {
+		if (intf == TV_INTF) {
+			outpdw(overlay_base + 0x0014, 0x02); /* yuv422 */
+			/* overlay1 CSC config */
+			outpdw(overlay_base + 0x0200, 0x05); /* rgb->yuv */
+		}
+	}
+
+#ifdef MDP4_IGC_LUT_ENABLE
+	outpdw(overlay_base + 0x0014, 0x4);	/* GC_LUT_EN, 888 */
+#endif
+
+	if (mdp_is_in_isr == FALSE)
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+int mdp4_overlay_pipe_staged(int mixer)
+{
+	uint32 data, mask, i;
+	int p1, p2;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	data = inpdw(MDP_BASE + 0x10100);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	p1 = 0;
+	p2 = 0;
+	for (i = 0; i < 8; i++) {
+		mask = data & 0x0f;
+		if (mask) {
+			if (mask <= 4)
+				p1++;
+			else
+				p2++;
+		}
+		data >>= 4;
+	}
+
+	if (mixer)
+		return p2;
+	else
+		return p1;
+}
+
+void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 data, mask, snum, stage, mixer, pnum;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	stage = pipe->mixer_stage;
+	mixer = pipe->mixer_num;
+	pnum = pipe->pipe_num;
+
+	/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1  */
+	data = inpdw(MDP_BASE + 0x10100);
+
+	if (mixer == MDP4_MIXER1)
+		stage += 8;
+
+	if (pipe->pipe_num >= OVERLAY_PIPE_VG1) {/* VG1 and VG2 */
+		pnum -= OVERLAY_PIPE_VG1; /* start from 0 */
+		snum = 0;
+		snum += (4 * pnum);
+	} else {
+		snum = 8;
+		snum += (4 * pnum);	/* RGB1 and RGB2 */
+	}
+
+	mask = 0x0f;
+	mask <<= snum;
+	stage <<= snum;
+	data &= ~mask;	/* clear old bits */
+
+	data |= stage;
+
+	outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
+
+	data = inpdw(MDP_BASE + 0x10100);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = pipe;	/* keep it */
+}
+
+void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 data, mask, snum, stage, mixer, pnum;
+
+	stage = pipe->mixer_stage;
+	mixer = pipe->mixer_num;
+	pnum = pipe->pipe_num;
+
+	if (pipe != ctrl->stage[mixer][stage])	/* not runing */
+		return;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1  */
+	data = inpdw(MDP_BASE + 0x10100);
+
+	if (mixer == MDP4_MIXER1)
+		stage += 8;
+
+	if (pipe->pipe_num >= OVERLAY_PIPE_VG1) {/* VG1 and VG2 */
+		pnum -= OVERLAY_PIPE_VG1; /* start from 0 */
+		snum = 0;
+		snum += (4 * pnum);
+	} else {
+		snum = 8;
+		snum += (4 * pnum);	/* RGB1 and RGB2 */
+	}
+
+	mask = 0x0f;
+	mask <<= snum;
+	data &= ~mask;	/* clear old bits */
+
+	outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
+
+	data = inpdw(MDP_BASE + 0x10100);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = NULL;	/* clear it */
+}
+
+void mdp4_mixer_blend_setup(struct mdp4_overlay_pipe *pipe)
+{
+	struct mdp4_overlay_pipe *bg_pipe;
+	unsigned char *overlay_base, *rgb_base;
+	uint32 c0, c1, c2, blend_op, constant_color = 0, rgb_src_format;
+	int off;
+
+	if (pipe->mixer_num) 	/* mixer number, /dev/fb0, /dev/fb1 */
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
+	else
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+
+	/* stage 0 to stage 2 */
+	off = 0x20 * (pipe->mixer_stage - MDP4_MIXER_STAGE0);
+
+	bg_pipe = mdp4_overlay_stage_pipe(pipe->mixer_num,
+					MDP4_MIXER_STAGE_BASE);
+	if (bg_pipe == NULL) {
+		pr_err("%s: Error: no bg_pipe\n", __func__);
+		return;
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	blend_op = 0;
+
+	if (pipe->is_fg) {
+		blend_op |= (MDP4_BLEND_FG_ALPHA_FG_CONST |
+				MDP4_BLEND_BG_ALPHA_BG_CONST);
+		outpdw(overlay_base + off + 0x108, pipe->alpha);
+		outpdw(overlay_base + off + 0x10c, 0xff - pipe->alpha);
+		if (pipe->alpha == 0xff) {
+			rgb_base = MDP_BASE + MDP4_RGB_BASE;
+			rgb_base += MDP4_RGB_OFF * bg_pipe->pipe_num;
+			rgb_src_format = inpdw(rgb_base + 0x50);
+			rgb_src_format |= MDP4_FORMAT_SOLID_FILL;
+			outpdw(rgb_base + 0x50, rgb_src_format);
+			outpdw(rgb_base + 0x1008, constant_color);
+		}
+	} else {
+		if (bg_pipe->alpha_enable && pipe->alpha_enable) {
+			/* both pipe have alpha */
+			blend_op |= (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
+				MDP4_BLEND_FG_INV_ALPHA |
+				MDP4_BLEND_BG_ALPHA_BG_PIXEL);
+		} else if (bg_pipe->alpha_enable && pipe->alpha_enable == 0) {
+			/* no alpha on both pipe */
+			blend_op = (MDP4_BLEND_BG_ALPHA_BG_PIXEL |
+				MDP4_BLEND_FG_ALPHA_BG_PIXEL |
+				MDP4_BLEND_FG_INV_ALPHA);
+		}
+	}
+
+
+	if (pipe->transp != MDP_TRANSP_NOP) {
+		if (pipe->is_fg) {
+			transp_color_key(pipe->src_format, pipe->transp,
+					&c0, &c1, &c2);
+			/* Fg blocked */
+			blend_op |= MDP4_BLEND_FG_TRANSP_EN;
+			/* lower limit */
+			outpdw(overlay_base + off + 0x110,
+					(c1 << 16 | c0));/* low */
+			outpdw(overlay_base + off + 0x114, c2);/* low */
+			/* upper limit */
+			outpdw(overlay_base + off + 0x118,
+					(c1 << 16 | c0));
+			outpdw(overlay_base + off + 0x11c, c2);
+		} else {
+			transp_color_key(bg_pipe->src_format,
+				pipe->transp, &c0, &c1, &c2);
+			/* bg blocked */
+			blend_op |= MDP4_BLEND_BG_TRANSP_EN;
+			/* lower limit */
+			outpdw(overlay_base + 0x180,
+					(c1 << 16 | c0));/* low */
+			outpdw(overlay_base + 0x184, c2);/* low */
+			/* upper limit */
+			outpdw(overlay_base + 0x188,
+					(c1 << 16 | c0));/* high */
+			outpdw(overlay_base + 0x18c, c2);/* high */
+		}
+	}
+
+	outpdw(overlay_base + off + 0x104, blend_op);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all)
+{
+	struct mdp4_overlay_pipe *bg_pipe;
+	uint32 bits = 0;
+
+	if (pipe->mixer_num == MDP4_MIXER1)
+		bits |= 0x02;
+	else
+		bits |= 0x01;
+
+	if (all) {
+		if (pipe->pipe_num <= OVERLAY_PIPE_RGB2) {
+			if (pipe->pipe_num == OVERLAY_PIPE_RGB2)
+				bits |= 0x20;
+			else
+				bits |= 0x10;
+		} else {
+			if (pipe->is_fg && pipe->alpha == 0xFF) {
+				bg_pipe = mdp4_overlay_stage_pipe(
+							pipe->mixer_num,
+							MDP4_MIXER_STAGE_BASE);
+				if (bg_pipe->pipe_num <= OVERLAY_PIPE_RGB2) {
+					if (bg_pipe->pipe_num ==
+							OVERLAY_PIPE_RGB2)
+						bits |= 0x20;
+					else
+						bits |= 0x10;
+				}
+			}
+			if (pipe->pipe_num == OVERLAY_PIPE_VG2)
+				bits |= 0x08;
+			else
+				bits |= 0x04;
+		}
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	outpdw(MDP_BASE + 0x18000, bits);	/* MDP_OVERLAY_REG_FLUSH */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage)
+{
+	return ctrl->stage[mixer][stage];
+}
+
+struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx)
+{
+	struct mdp4_overlay_pipe *pipe;
+
+	if (ndx <= 0 || ndx > MDP4_MAX_PIPE)
+		return NULL;
+
+	pipe = &ctrl->plist[ndx - 1];	/* ndx start from 1 */
+
+	if (pipe->pipe_used == 0)
+		return NULL;
+
+	return pipe;
+}
+
+struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(
+		int ptype, int mixer, int req_share)
+{
+	int i, j, ndx, found;
+	struct mdp4_overlay_pipe *pipe, *opipe;
+	struct mdp4_pipe_desc  *pd;
+
+	found = 0;
+	pipe = &ctrl->plist[0];
+
+	for (i = 0; i < MDP4_MAX_PIPE; i++) {
+		if (pipe->pipe_type == ptype && pipe->pipe_used == 0) {
+			pd = &ctrl->ov_pipe[pipe->pipe_num];
+			if (pd->share) { /* pipe can be shared */
+				if (pd->ref_cnt == 0) {
+					/* not yet been used */
+					found++;
+					break;
+				}
+				/* pipe occupied already */
+				if (req_share && pd->ref_cnt < MDP4_MAX_SHARE) {
+					for (j = 0; j < MDP4_MAX_SHARE; j++) {
+						ndx = pd->ndx_list[j];
+						if (ndx != 0)
+							break;
+					}
+					/* ndx satrt from 1 */
+					opipe = &ctrl->plist[ndx - 1];
+					/*
+					 * occupied pipe willing to share and
+					 * same mixer
+					 */
+					if (opipe->pipe_share &&
+						opipe->mixer_num == mixer) {
+						found++;
+						break;
+					}
+				}
+			} else {	/* not a shared pipe */
+				if (req_share == 0  && pd->ref_cnt == 0) {
+					found++;
+					break;
+				}
+			}
+		}
+		pipe++;
+	}
+
+	if (found) {
+		init_completion(&pipe->comp);
+		init_completion(&pipe->dmas_comp);
+		pr_info("%s: pipe=%x ndx=%d num=%d share=%d cnt=%d\n",
+			__func__, (int)pipe, pipe->pipe_ndx, pipe->pipe_num,
+			pd->share, pd->ref_cnt);
+		return pipe;
+	}
+
+	pr_debug("%s: ptype=%d mixer=%d req_share=%d FAILED\n",
+			__func__, ptype, mixer, req_share);
+
+	return NULL;
+}
+
+
+void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe)
+{
+	int i;
+	uint32 ptype, num, ndx;
+	struct mdp4_pipe_desc  *pd;
+
+	pr_debug("%s: pipe=%x ndx=%d\n", __func__,
+				(int)pipe, pipe->pipe_ndx);
+	pd = &ctrl->ov_pipe[pipe->pipe_num];
+	if (pd->ref_cnt) {
+		pd->ref_cnt--;
+		for (i = 0; i < MDP4_MAX_SHARE; i++) {
+			if (pd->ndx_list[i] == pipe->pipe_ndx) {
+				pd->ndx_list[i] = 0;
+				break;
+			}
+		}
+	}
+
+	pd->player = NULL;
+
+	ptype = pipe->pipe_type;
+	num = pipe->pipe_num;
+	ndx = pipe->pipe_ndx;
+
+	memset(pipe, 0, sizeof(*pipe));
+
+	pipe->pipe_type = ptype;
+	pipe->pipe_num = num;
+	pipe->pipe_ndx = ndx;
+}
+
+int mdp4_overlay_req_check(uint32 id, uint32 z_order, uint32 mixer)
+{
+	struct mdp4_overlay_pipe *pipe;
+
+	pipe = ctrl->stage[mixer][z_order];
+
+	if (pipe == NULL)
+		return 0;
+
+	if (pipe->pipe_ndx == id)	/* same req, recycle */
+		return 0;
+
+	if (id == MSMFB_NEW_REQUEST) {  /* new request */
+		if (pipe->pipe_num >= OVERLAY_PIPE_VG1) /* share pipe */
+			return 0;
+	}
+
+	return -EPERM;
+}
+
+static int mdp4_overlay_validate_downscale(struct mdp_overlay *req,
+	struct msm_fb_data_type *mfd, uint32 perf_level, uint32 pclk_rate)
+{
+	__u32 panel_clk_khz, mdp_clk_khz;
+	__u32 num_hsync_pix_clks, mdp_clks_per_hsync, src_wh;
+	__u32 hsync_period_ps, mdp_period_ps, total_hsync_period_ps;
+	unsigned long fill_rate_y_dir, fill_rate_x_dir;
+	unsigned long fillratex100, mdp_pixels_produced;
+	unsigned long mdp_clk_hz;
+
+	pr_debug("%s: LCDC Mode Downscale validation with MDP Core"
+		" Clk rate\n", __func__);
+	pr_debug("src_w %u, src_h %u, dst_w %u, dst_h %u\n",
+		req->src_rect.w, req->src_rect.h, req->dst_rect.w,
+		req->dst_rect.h);
+
+
+	panel_clk_khz = pclk_rate/1000;
+	mdp_clk_hz = mdp_perf_level2clk_rate(perf_level);
+
+	if (!mdp_clk_hz) {
+		pr_debug("mdp_perf_level2clk_rate returned 0,"
+				 "Downscale Validation incomplete\n");
+		return 0;
+	}
+
+	mdp_clk_khz = mdp_clk_hz/1000;
+
+	num_hsync_pix_clks = mfd->panel_info.lcdc.h_back_porch +
+		mfd->panel_info.lcdc.h_front_porch +
+		mfd->panel_info.lcdc.h_pulse_width +
+		mfd->panel_info.xres;
+
+	hsync_period_ps = 1000000000/panel_clk_khz;
+	mdp_period_ps = 1000000000/mdp_clk_khz;
+
+	total_hsync_period_ps = num_hsync_pix_clks * hsync_period_ps;
+	mdp_clks_per_hsync = total_hsync_period_ps/mdp_period_ps;
+
+	pr_debug("hsync_period_ps %u, mdp_period_ps %u,"
+		"total_hsync_period_ps %u\n", hsync_period_ps,
+		mdp_period_ps, total_hsync_period_ps);
+
+	src_wh = req->src_rect.w * req->src_rect.h;
+	if (src_wh % req->dst_rect.h)
+		fill_rate_y_dir = (src_wh / req->dst_rect.h) + 1;
+	else
+		fill_rate_y_dir = (src_wh / req->dst_rect.h);
+
+	fill_rate_x_dir = (mfd->panel_info.xres - req->dst_rect.w)
+		+ req->src_rect.w;
+
+	if (fill_rate_y_dir >= fill_rate_x_dir)
+		fillratex100 = 100 * fill_rate_y_dir / mfd->panel_info.xres;
+	else
+		fillratex100 = 100 * fill_rate_x_dir / mfd->panel_info.xres;
+
+	pr_debug("mdp_clks_per_hsync %u, fill_rate_y_dir %lu,"
+		"fill_rate_x_dir %lu\n", mdp_clks_per_hsync,
+		fill_rate_y_dir, fill_rate_x_dir);
+
+	mdp_pixels_produced = 100 * mdp_clks_per_hsync/fillratex100;
+	pr_debug("fillratex100 %lu, mdp_pixels_produced %lu\n",
+		fillratex100, mdp_pixels_produced);
+	if (mdp_pixels_produced <= mfd->panel_info.xres) {
+		pr_err("%s(): LCDC underflow detected during downscale\n",
+			__func__);
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
+			struct mdp4_overlay_pipe **ppipe,
+			struct msm_fb_data_type *mfd)
+{
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_pipe_desc  *pd;
+	int ret, ptype, req_share;
+	int j;
+
+	if (mfd == NULL) {
+		pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
+		return -ENODEV;
+	}
+
+	if (mixer >= MDP4_MAX_MIXER) {
+		pr_err("%s: mixer out of range!\n", __func__);
+		mdp4_stat.err_mixer++;
+		return -ERANGE;
+	}
+
+	if (req->z_order < 0 || req->z_order > 2) {
+		pr_err("%s: z_order=%d out of range!\n", __func__,
+				req->z_order);
+		mdp4_stat.err_zorder++;
+		return -ERANGE;
+	}
+
+	if (req->src_rect.h == 0 || req->src_rect.w == 0) {
+		pr_err("%s: src img of zero size!\n", __func__);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+
+	if (req->dst_rect.h > (req->src_rect.h * 8)) {	/* too much */
+		mdp4_stat.err_scale++;
+		pr_err("%s: scale up, too much (h)!\n", __func__);
+		return -ERANGE;
+	}
+
+	if (req->src_rect.h > (req->dst_rect.h * 8)) {	/* too little */
+		mdp4_stat.err_scale++;
+		pr_err("%s: scale down, too little (h)!\n", __func__);
+		return -ERANGE;
+	}
+
+	if (req->dst_rect.w > (req->src_rect.w * 8)) {	/* too much */
+		mdp4_stat.err_scale++;
+		pr_err("%s: scale up, too much (w)!\n", __func__);
+		return -ERANGE;
+	}
+
+	if (req->src_rect.w > (req->dst_rect.w * 8)) {	/* too little */
+		mdp4_stat.err_scale++;
+		pr_err("%s: scale down, too little (w)!\n", __func__);
+		return -ERANGE;
+	}
+
+	if (mdp_hw_revision == MDP4_REVISION_V1) {
+		/*  non integer down saceling ratio  smaller than 1/4
+		 *  is not supportted
+		 */
+		if (req->src_rect.h > (req->dst_rect.h * 4)) {
+			if (req->src_rect.h % req->dst_rect.h) {
+				mdp4_stat.err_scale++;
+				pr_err("%s: need integer (h)!\n", __func__);
+				return -ERANGE;
+			}
+		}
+
+		if (req->src_rect.w > (req->dst_rect.w * 4)) {
+			if (req->src_rect.w % req->dst_rect.w) {
+				mdp4_stat.err_scale++;
+				pr_err("%s: need integer (w)!\n", __func__);
+				return -ERANGE;
+			}
+		}
+	}
+
+	if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
+		((req->src_rect.y + req->src_rect.h) > req->src.height)) {
+		mdp4_stat.err_size++;
+		pr_err("%s invalid src rectangle\n", __func__);
+		return -ERANGE;
+	}
+
+	if (ctrl->panel_3d != MDP4_3D_SIDE_BY_SIDE) {
+		int xres;
+		int yres;
+
+		xres = mfd->panel_info.xres;
+		yres = mfd->panel_info.yres;
+
+		if (((req->dst_rect.x + req->dst_rect.w) > xres) ||
+			((req->dst_rect.y + req->dst_rect.h) > yres)) {
+			mdp4_stat.err_size++;
+			pr_err("%s invalid dst rectangle\n", __func__);
+			return -ERANGE;
+		}
+	}
+
+	ptype = mdp4_overlay_format2type(req->src.format);
+	if (ptype < 0) {
+		pr_err("%s: mdp4_overlay_format2type!\n", __func__);
+		return ptype;
+	}
+
+	req_share = (req->flags & MDP_OV_PIPE_SHARE);
+
+	if (req->id == MSMFB_NEW_REQUEST)  /* new request */
+		pipe = mdp4_overlay_pipe_alloc(ptype, mixer, req_share);
+	else
+		pipe = mdp4_overlay_ndx2pipe(req->id);
+
+	if (pipe == NULL) {
+		pr_err("%s: pipe == NULL!\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* no down scale at rgb pipe */
+	if (pipe->pipe_num <= OVERLAY_PIPE_RGB2) {
+		if ((req->src_rect.h > req->dst_rect.h) ||
+			(req->src_rect.w > req->dst_rect.w)) {
+				pr_err("%s: h>h || w>w!\n", __func__);
+				return -ERANGE;
+			}
+	}
+
+	pipe->src_format = req->src.format;
+	ret = mdp4_overlay_format2pipe(pipe);
+	if (ret < 0) {
+		pr_err("%s: mdp4_overlay_format2pipe!\n", __func__);
+		return ret;
+	}
+
+	/*
+	 * base layer == 1, reserved for frame buffer
+	 * zorder 0 == stage 0 == 2
+	 * zorder 1 == stage 1 == 3
+	 * zorder 2 == stage 2 == 4
+	 */
+	if (req->id == MSMFB_NEW_REQUEST) {  /* new request */
+		pd = &ctrl->ov_pipe[pipe->pipe_num];
+		for (j = 0; j < MDP4_MAX_SHARE; j++) {
+			if (pd->ndx_list[j] == 0) {
+				pd->ndx_list[j] = pipe->pipe_ndx;
+				break;
+			}
+		}
+		pipe->pipe_share = req_share;
+		pd->ref_cnt++;
+		pipe->pipe_used++;
+		pipe->mixer_num = mixer;
+		pipe->mixer_stage = req->z_order + MDP4_MIXER_STAGE0;
+		pr_debug("%s: zorder=%d pipe ndx=%d num=%d\n", __func__,
+			req->z_order, pipe->pipe_ndx, pipe->pipe_num);
+
+	}
+
+	pipe->src_width = req->src.width & 0x07ff;	/* source img width */
+	pipe->src_height = req->src.height & 0x07ff;	/* source img height */
+	pipe->src_h = req->src_rect.h & 0x07ff;
+	pipe->src_w = req->src_rect.w & 0x07ff;
+	pipe->src_y = req->src_rect.y & 0x07ff;
+	pipe->src_x = req->src_rect.x & 0x07ff;
+	pipe->dst_h = req->dst_rect.h & 0x07ff;
+	pipe->dst_w = req->dst_rect.w & 0x07ff;
+	pipe->dst_y = req->dst_rect.y & 0x07ff;
+	pipe->dst_x = req->dst_rect.x & 0x07ff;
+
+	pipe->op_mode = 0;
+
+	if (req->flags & MDP_FLIP_LR)
+		pipe->op_mode |= MDP4_OP_FLIP_LR;
+
+	if (req->flags & MDP_FLIP_UD)
+		pipe->op_mode |= MDP4_OP_FLIP_UD;
+
+	if (req->flags & MDP_DITHER)
+		pipe->op_mode |= MDP4_OP_DITHER_EN;
+
+	if (req->flags & MDP_DEINTERLACE)
+		pipe->op_mode |= MDP4_OP_DEINT_EN;
+
+	if (req->flags & MDP_DEINTERLACE_ODD)
+		pipe->op_mode |= MDP4_OP_DEINT_ODD_REF;
+
+	pipe->is_fg = req->is_fg;/* control alpha and color key */
+
+	pipe->alpha = req->alpha & 0x0ff;
+
+	pipe->transp = req->transp_mask;
+
+	*ppipe = pipe;
+
+	return 0;
+}
+
+static int get_img(struct msmfb_data *img, struct fb_info *info,
+	unsigned long *start, unsigned long *len, struct file **pp_file)
+{
+	int put_needed, ret = 0, fb_num;
+	struct file *file;
+#ifdef CONFIG_ANDROID_PMEM
+	unsigned long vstart;
+#endif
+
+	if (img->flags & MDP_BLIT_SRC_GEM) {
+		*pp_file = NULL;
+		return kgsl_gem_obj_addr(img->memory_id, (int) img->priv,
+					 start, len);
+	}
+
+#ifdef CONFIG_ANDROID_PMEM
+	if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
+		return 0;
+#endif
+	file = fget_light(img->memory_id, &put_needed);
+	if (file == NULL)
+		return -1;
+
+	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+		fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
+		if (get_fb_phys_info(start, len, fb_num))
+			ret = -1;
+		else
+			*pp_file = file;
+	} else
+		ret = -1;
+	if (ret)
+		fput_light(file, put_needed);
+	return ret;
+}
+
+int mdp4_overlay_3d(struct fb_info *info, struct msmfb_overlay_3d *req)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int ret = -EPERM;
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
+		return -EINTR;
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+	/* Only dsi_cmd panel support 3D */
+	if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+		mdp4_dsi_cmd_3d(mfd, req);
+		ret = 0;
+	}
+#endif
+	mutex_unlock(&mfd->dma->ov_mutex);
+
+	return ret;
+}
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+int mdp4_overlay_blt(struct fb_info *info, struct msmfb_overlay_blt *req)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (mfd == NULL)
+		return -ENODEV;
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
+		return -EINTR;
+
+	if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+		mdp4_dsi_overlay_blt(mfd, req);
+	else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+		mdp4_dsi_video_overlay_blt(mfd, req);
+	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+		mdp4_lcdc_overlay_blt(mfd, req);
+
+	mutex_unlock(&mfd->dma->ov_mutex);
+
+	return 0;
+}
+
+int mdp4_overlay_blt_offset(struct fb_info *info, struct msmfb_overlay_blt *req)
+{
+	int ret = 0;
+
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
+		return -EINTR;
+
+	if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+		ret = mdp4_dsi_overlay_blt_offset(mfd, req);
+	else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+		ret = mdp4_dsi_video_overlay_blt_offset(mfd, req);
+	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+		ret = mdp4_lcdc_overlay_blt_offset(mfd, req);
+
+	mutex_unlock(&mfd->dma->ov_mutex);
+
+	return ret;
+}
+#endif
+
+int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
+{
+	struct mdp4_overlay_pipe *pipe;
+
+	pipe = mdp4_overlay_ndx2pipe(req->id);
+	if (pipe == NULL)
+		return -ENODEV;
+
+	*req = pipe->req_data;
+
+	return 0;
+}
+
+#define OVERLAY_VGA_SIZE	0x04B000
+#define OVERLAY_720P_TILE_SIZE  0x0E6000
+#define OVERLAY_WSVGA_SIZE 0x98000 /* 1024x608, align 600 to 32bit */
+#define OVERLAY_PERF_LEVEL1	1
+#define OVERLAY_PERF_LEVEL2	2
+#define OVERLAY_PERF_LEVEL3	3
+#define OVERLAY_PERF_LEVEL4	4
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#define OVERLAY_BUS_SCALE_TABLE_BASE	6
+#endif
+
+static int mdp4_overlay_is_rgb_type(int format)
+{
+	switch (format) {
+	case MDP_RGB_565:
+	case MDP_RGB_888:
+	case MDP_BGR_565:
+	case MDP_XRGB_8888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_BGRA_8888:
+	case MDP_RGBX_8888:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static uint32 mdp4_overlay_get_perf_level(struct mdp_overlay *req)
+{
+	int is_fg;
+
+	if (req->is_fg && ((req->alpha & 0x0ff) == 0xff))
+		is_fg = 1;
+
+	if (req->flags & MDP_DEINTERLACE)
+		return OVERLAY_PERF_LEVEL1;
+
+	if (mdp4_overlay_is_rgb_type(req->src.format) && is_fg &&
+		((req->src.width * req->src.height) <= OVERLAY_WSVGA_SIZE))
+		return OVERLAY_PERF_LEVEL4;
+	else if (mdp4_overlay_is_rgb_type(req->src.format))
+		return OVERLAY_PERF_LEVEL1;
+
+	if (ctrl->ov_pipe[OVERLAY_PIPE_VG1].ref_cnt &&
+		ctrl->ov_pipe[OVERLAY_PIPE_VG2].ref_cnt)
+		return OVERLAY_PERF_LEVEL1;
+
+	if (req->src.width*req->src.height <= OVERLAY_VGA_SIZE)
+		return OVERLAY_PERF_LEVEL3;
+	else if (req->src.width*req->src.height <= OVERLAY_720P_TILE_SIZE)
+		return OVERLAY_PERF_LEVEL2;
+	else
+		return OVERLAY_PERF_LEVEL1;
+}
+
+int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int ret, mixer;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (mfd == NULL) {
+		pr_err("%s: mfd == NULL, -ENODEV\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!mfd->panel_power_on)	/* suspended */
+		return -EPERM;
+
+	if (req->src.format == MDP_FB_FORMAT)
+		req->src.format = mfd->fb_imgType;
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex)) {
+		pr_err("%s: mutex_lock_interruptible, -EINTR\n", __func__);
+		return -EINTR;
+	}
+
+	perf_level = mdp4_overlay_get_perf_level(req);
+
+	if ((mfd->panel_info.type == LCDC_PANEL) &&
+	    (req->src_rect.h >
+		req->dst_rect.h || req->src_rect.w > req->dst_rect.w)) {
+		if (mdp4_overlay_validate_downscale(req, mfd,
+			perf_level, mfd->panel_info.clk_rate)) {
+			mutex_unlock(&mfd->dma->ov_mutex);
+			return -ERANGE;
+		}
+	}
+	if ((mfd->panel_info.type == MIPI_VIDEO_PANEL) &&
+	    (req->src_rect.h >
+		req->dst_rect.h || req->src_rect.w > req->dst_rect.w)) {
+		if (mdp4_overlay_validate_downscale(req, mfd,
+			perf_level, (&mfd->panel_info.mipi)->dsi_pclk_rate)) {
+			mutex_unlock(&mfd->dma->ov_mutex);
+			return -ERANGE;
+		}
+	}
+	mixer = mfd->panel_info.pdest;	/* DISPLAY_1 or DISPLAY_2 */
+
+	ret = mdp4_overlay_req2pipe(req, mixer, &pipe, mfd);
+	if (ret < 0) {
+		mutex_unlock(&mfd->dma->ov_mutex);
+		pr_err("%s: mdp4_overlay_req2pipe, ret=%d\n", __func__, ret);
+		return ret;
+	}
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+	/*
+	 * writeback (blt) mode to provide work around for
+	 * dsi cmd mode interface hardware bug.
+	 */
+	if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+		if (mixer == MDP4_MIXER0 && req->dst_rect.x != 0) {
+			mdp4_dsi_blt_dmap_busy_wait(mfd);
+			mdp4_dsi_overlay_blt_start(mfd);
+		}
+	}
+#endif
+
+	/* return id back to user */
+	req->id = pipe->pipe_ndx;	/* pipe_ndx start from 1 */
+	pipe->req_data = *req;		/* keep original req */
+
+	pipe->flags = req->flags;
+
+	if (pipe->flags & MDP_SHARPENING) {
+		bool test = ((pipe->req_data.dpp.sharp_strength > 0) &&
+			((req->src_rect.w > req->dst_rect.w) &&
+			 (req->src_rect.h > req->dst_rect.h)));
+		if (test) {
+			pr_warn("%s: No sharpening while downscaling.\n",
+								__func__);
+			pipe->flags &= ~MDP_SHARPENING;
+		}
+	}
+
+	mdp4_stat.overlay_set[pipe->mixer_num]++;
+
+	if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+		if (mdp_hw_revision == MDP4_REVISION_V2_1 &&
+			pipe->mixer_num == MDP4_MIXER0)
+			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, true);
+	}
+
+	mdp4_del_res_rel = 0;
+	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp_set_core_clk(perf_level);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (pipe->mixer_num == MDP4_MIXER0) {
+		mdp_bus_scale_update_request(OVERLAY_BUS_SCALE_TABLE_BASE
+						- perf_level);
+	}
+#endif
+
+	return 0;
+}
+
+void  mdp4_overlay_resource_release(void)
+{
+	if (mdp4_del_res_rel) {
+		mdp_set_core_clk(OVERLAY_PERF_LEVEL4);
+		mdp4_del_res_rel = 0;
+	}
+}
+
+int mdp4_overlay_unset(struct fb_info *info, int ndx)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct mdp4_overlay_pipe *pipe;
+	uint32 flags;
+
+	if (mfd == NULL)
+		return -ENODEV;
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
+		return -EINTR;
+
+	pipe = mdp4_overlay_ndx2pipe(ndx);
+
+	if (pipe == NULL) {
+		mutex_unlock(&mfd->dma->ov_mutex);
+		return -ENODEV;
+	}
+
+	if (pipe->mixer_num == MDP4_MIXER1)
+		ctrl->mixer1_played = 0;
+	else {
+		/* mixer 0 */
+		ctrl->mixer0_played = 0;
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+			if (mfd->panel_power_on) {
+				mdp4_dsi_blt_dmap_busy_wait(mfd);
+			}
+		}
+#else
+		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+			if (mfd->panel_power_on)
+				mdp4_mddi_dma_busy_wait(mfd);
+		}
+#endif
+	}
+
+	mdp4_mixer_stage_down(pipe);
+
+	if (pipe->mixer_num == MDP4_MIXER0) {
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+			if (mfd->panel_power_on)
+				if (mdp4_dsi_overlay_blt_stop(mfd) == 0)
+					mdp4_dsi_cmd_overlay_restore();
+		}  else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
+			flags = pipe->flags;
+			pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+			mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
+			pipe->flags = flags;
+		}
+#else
+		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+			if (mdp_hw_revision == MDP4_REVISION_V2_1)
+				mdp4_overlay_status_write(
+					MDP4_OVERLAY_TYPE_UNSET, true);
+			if (mfd->panel_power_on)
+				mdp4_mddi_overlay_restore();
+		}
+#endif
+		else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
+			flags = pipe->flags;
+			pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+			mdp4_overlay_lcdc_vsync_push(mfd, pipe);
+			pipe->flags = flags;
+		}
+	}
+#ifdef CONFIG_FB_MSM_DTV
+	else {	/* mixer1, DTV, ATV */
+		flags = pipe->flags;
+		pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+		mdp4_overlay_dtv_vsync_push(mfd, pipe);
+		pipe->flags = flags;
+	}
+#endif
+
+	mdp4_stat.overlay_unset[pipe->mixer_num]++;
+
+	mdp4_overlay_pipe_free(pipe);
+
+	if (!(ctrl->ov_pipe[OVERLAY_PIPE_VG1].ref_cnt +
+		ctrl->ov_pipe[OVERLAY_PIPE_VG2].ref_cnt))
+		mdp4_del_res_rel = 1;
+
+	mutex_unlock(&mfd->dma->ov_mutex);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (pipe->mixer_num == MDP4_MIXER0)
+		if (mfd->panel_power_on)
+			mdp_bus_scale_update_request(2);
+#endif
+	return 0;
+}
+
+struct tile_desc {
+	uint32 width;  /* tile's width */
+	uint32 height; /* tile's height */
+	uint32 row_tile_w; /* tiles per row's width */
+	uint32 row_tile_h; /* tiles per row's height */
+};
+
+void tile_samsung(struct tile_desc *tp)
+{
+	/*
+	 * each row of samsung tile consists of two tiles in height
+	 * and two tiles in width which means width should align to
+	 * 64 x 2 bytes and height should align to 32 x 2 bytes.
+	 * video decoder generate two tiles in width and one tile
+	 * in height which ends up height align to 32 X 1 bytes.
+	 */
+	tp->width = 64;		/* 64 bytes */
+	tp->row_tile_w = 2;	/* 2 tiles per row's width */
+	tp->height = 32;	/* 32 bytes */
+	tp->row_tile_h = 1;	/* 1 tiles per row's height */
+}
+
+uint32 tile_mem_size(struct mdp4_overlay_pipe *pipe, struct tile_desc *tp)
+{
+	uint32 tile_w, tile_h;
+	uint32 row_num_w, row_num_h;
+
+
+	tile_w = tp->width * tp->row_tile_w;
+	tile_h = tp->height * tp->row_tile_h;
+
+	row_num_w = (pipe->src_width + tile_w - 1) / tile_w;
+	row_num_h = (pipe->src_height + tile_h - 1) / tile_h;
+	return ((row_num_w * row_num_h * tile_w * tile_h) + 8191) & ~8191;
+}
+
+int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req,
+		struct file **pp_src_file, struct file **pp_src_plane1_file,
+		struct file **pp_src_plane2_file)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct msmfb_data *img;
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_pipe_desc *pd;
+	ulong start, addr;
+	ulong len = 0;
+	struct file *p_src_file = 0;
+	struct file *p_src_plane1_file = 0, *p_src_plane2_file = 0;
+	uint32_t overlay_version = 0;
+
+	if (mfd == NULL)
+		return -ENODEV;
+
+	if (!mfd->panel_power_on) /* suspended */
+		return -EPERM;
+
+	pipe = mdp4_overlay_ndx2pipe(req->id);
+	if (pipe == NULL) {
+		pr_err("%s: req_id=%d Error\n", __func__, req->id);
+		return -ENODEV;
+	}
+
+	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
+		return -EINTR;
+
+	pd = &ctrl->ov_pipe[pipe->pipe_num];
+	if (pd->player && pipe != pd->player) {
+		if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+			mutex_unlock(&mfd->dma->ov_mutex);
+			return 0; /* ignore it, kicked out already */
+		}
+	}
+
+	pd->player = pipe;	/* keep */
+
+	img = &req->data;
+	get_img(img, info, &start, &len, &p_src_file);
+	if (len == 0) {
+		mutex_unlock(&mfd->dma->ov_mutex);
+		pr_err("%s: pmem Error\n", __func__);
+		return -1;
+	}
+	*pp_src_file = p_src_file;
+
+	addr = start + img->offset;
+	pipe->srcp0_addr = addr;
+	pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
+
+	if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
+		overlay_version = (req->version_key & ~VERSION_KEY_MASK);
+
+	if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
+		if (overlay_version > 0) {
+			img = &req->plane1_data;
+			get_img(img, info, &start, &len, &p_src_plane1_file);
+			if (len == 0) {
+				mutex_unlock(&mfd->dma->ov_mutex);
+				pr_err("%s: Error to get plane1\n", __func__);
+				return -EINVAL;
+			}
+			pipe->srcp1_addr = start + img->offset;
+			*pp_src_plane1_file = p_src_plane1_file;
+		} else if (pipe->frame_format ==
+				MDP4_FRAME_FORMAT_VIDEO_SUPERTILE) {
+			struct tile_desc tile;
+
+			tile_samsung(&tile);
+			pipe->srcp1_addr = addr + tile_mem_size(pipe, &tile);
+		} else {
+			pipe->srcp1_addr = addr + (pipe->src_width *
+						pipe->src_height);
+		}
+		pipe->srcp0_ystride = pipe->src_width;
+		if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
+			(pipe->src_format == MDP_Y_CBCR_H1V1)) {
+			if (pipe->src_width > YUV_444_MAX_WIDTH)
+				pipe->srcp1_ystride = pipe->src_width << 2;
+			else
+				pipe->srcp1_ystride = pipe->src_width << 1;
+		} else
+			pipe->srcp1_ystride = pipe->src_width;
+
+	} else if (pipe->fetch_plane == OVERLAY_PLANE_PLANAR) {
+		if (overlay_version > 0) {
+			img = &req->plane1_data;
+			get_img(img, info, &start, &len, &p_src_plane1_file);
+			if (len == 0) {
+				mutex_unlock(&mfd->dma->ov_mutex);
+				pr_err("%s: Error to get plane1\n", __func__);
+				return -EINVAL;
+			}
+			pipe->srcp1_addr = start + img->offset;
+			*pp_src_plane1_file = p_src_plane1_file;
+
+			img = &req->plane2_data;
+			get_img(img, info, &start, &len, &p_src_plane2_file);
+			if (len == 0) {
+				mutex_unlock(&mfd->dma->ov_mutex);
+				pr_err("%s: Error to get plane2\n", __func__);
+				return -EINVAL;
+			}
+			pipe->srcp2_addr = start + img->offset;
+			*pp_src_plane2_file = p_src_plane2_file;
+		} else {
+			addr += (pipe->src_width * pipe->src_height);
+			pipe->srcp1_addr = addr;
+			addr += ((pipe->src_width / 2) *
+					(pipe->src_height / 2));
+			pipe->srcp2_addr = addr;
+		}
+		pipe->srcp0_ystride = pipe->src_width;
+		pipe->srcp1_ystride = pipe->src_width / 2;
+		pipe->srcp2_ystride = pipe->src_width / 2;
+	}
+
+	if (pipe->pipe_num >= OVERLAY_PIPE_VG1)
+		mdp4_overlay_vg_setup(pipe);	/* video/graphic pipe */
+	else {
+		if (pipe->flags & MDP_SHARPENING) {
+			pr_warn(
+			"%s: Sharpening/Smoothing not supported on RGB pipe\n",
+								     __func__);
+			pipe->flags &= ~MDP_SHARPENING;
+		}
+		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
+	}
+
+	mdp4_mixer_blend_setup(pipe);
+	mdp4_mixer_stage_up(pipe);
+
+	if (pipe->mixer_num == MDP4_MIXER1) {
+		ctrl->mixer1_played++;
+		/* enternal interface */
+		if (ctrl->panel_mode & MDP4_PANEL_DTV)
+#ifdef CONFIG_FB_MSM_DTV
+			mdp4_overlay_dtv_ov_done_push(mfd, pipe);
+#else
+			mdp4_overlay_reg_flush(pipe, 1);
+#endif
+		else if (ctrl->panel_mode & MDP4_PANEL_ATV)
+			mdp4_overlay_reg_flush(pipe, 1);
+	} else {
+		/* primary interface */
+		ctrl->mixer0_played++;
+		if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+			mdp4_overlay_lcdc_vsync_push(mfd, pipe);
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+		else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+			mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
+#endif
+		else {
+			/* mddi & mipi dsi cmd mode */
+			if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
+				mdp4_stat.overlay_play[pipe->mixer_num]++;
+				mutex_unlock(&mfd->dma->ov_mutex);
+				return 0;
+			}
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+			if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+				mdp4_dsi_cmd_dma_busy_wait(mfd);
+				mdp4_dsi_cmd_kickoff_video(mfd, pipe);
+			}
+#else
+			if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+				mdp4_mddi_dma_busy_wait(mfd);
+				mdp4_mddi_kickoff_video(mfd, pipe);
+			}
+#endif
+		}
+	}
+
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
+
+	mutex_unlock(&mfd->dma->ov_mutex);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdp4_overlay_atv.c b/drivers/video/msm/mdp4_overlay_atv.c
new file mode 100644
index 0000000..8dcdec0
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_atv.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/fb.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+
+static struct mdp4_overlay_pipe *atv_pipe;
+
+int mdp4_atv_on(struct platform_device *pdev)
+{
+	uint8 *buf;
+	int bpp, ptype;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *pipe;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	if (atv_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1, 0);
+		if (pipe == NULL)
+			return -EBUSY;
+		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+		pipe->mixer_num  = MDP4_MIXER1;
+		pipe->src_format = mfd->fb_imgType;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_ATV);
+		mdp4_overlay_format2pipe(pipe);
+
+		atv_pipe = pipe; /* keep it */
+	} else {
+		pipe = atv_pipe;
+	}
+
+	printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n",
+					(int)pipe, pipe->pipe_ndx);
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* Turn the next panel on, get correct resolution
+		before configuring overlay pipe */
+	ret = panel_next_on(pdev);
+
+	pr_info("%s: fbi->var.yres: %d | fbi->var.xres: %d",
+			__func__, fbi->var.yres, fbi->var.xres);
+
+	/* MDP4 Config */
+	pipe->src_height = fbi->var.yres;
+	pipe->src_width = fbi->var.xres;
+	pipe->src_h = fbi->var.yres;
+	pipe->src_w = fbi->var.xres;
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->srcp0_addr = (uint32) buf;
+	pipe->srcp0_ystride = fbi->fix.line_length;
+
+	mdp4_overlay_dmae_xy(pipe);	/* dma_e */
+	mdp4_overlay_dmae_cfg(mfd, 1);
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	if (ret == 0)
+		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp4_atv_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ret = panel_next_off(pdev);
+
+	/* delay to make sure the last frame finishes */
+	msleep(100);
+
+	/* dis-engage rgb2 from mixer1 */
+	if (atv_pipe)
+		mdp4_mixer_stage_down(atv_pipe);
+
+	return ret;
+}
+
+/*
+ * mdp4_overlay1_done_atv: called from isr
+ */
+void mdp4_overlay1_done_atv()
+{
+	complete(&atv_pipe->comp);
+}
+
+void mdp4_atv_overlay(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	unsigned long flag;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since it's lcdc mode */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	pipe = atv_pipe;
+	pipe->srcp0_addr = (uint32) buf;
+	mdp4_overlay_rgb_setup(pipe);
+	mdp4_overlay_reg_flush(pipe, 1); /* rgb2 and mixer1 */
+
+	printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n",
+					(int)pipe, pipe->pipe_ndx);
+
+	/* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_OVERLAY1_TERM);
+	INIT_COMPLETION(atv_pipe->comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
+	mdp_intr_mask |= INTR_OVERLAY1_DONE;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&atv_pipe->comp);
+	mdp_disable_irq(MDP_OVERLAY1_TERM);
+
+	mdp4_stat.kickoff_atv++;
+	mdp4_overlay_resource_release();
+	mutex_unlock(&mfd->dma->ov_mutex);
+}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
new file mode 100644
index 0000000..22d9d3b
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/fb.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+#include "mipi_dsi.h"
+
+static struct mdp4_overlay_pipe *dsi_pipe;
+static struct msm_fb_data_type *dsi_mfd;
+static int busy_wait_cnt;
+static int dsi_state;
+static unsigned long  tout_expired;
+
+#define TOUT_PERIOD	HZ	/* 1 second */
+#define MS_100		(HZ/10)	/* 100 ms */
+
+static int vsync_start_y_adjust = 4;
+
+struct timer_list dsi_clock_timer;
+
+static int writeback_offset;
+
+void mdp4_overlay_dsi_state_set(int state)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	dsi_state = state;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void dsi_clock_tout(unsigned long data)
+{
+	if (mipi_dsi_clk_on) {
+		if (dsi_state == ST_DSI_PLAYING) {
+			mdp4_stat.dsi_clkoff++;
+			mipi_dsi_clk_disable();
+			mdp4_overlay_dsi_state_set(ST_DSI_CLK_OFF);
+		}
+	}
+}
+
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
+void mdp4_mipi_vsync_enable(struct msm_fb_data_type *mfd,
+		struct mdp4_overlay_pipe *pipe, int which)
+{
+	uint32 start_y, data, tear_en;
+
+	tear_en = (1 << which);
+
+	if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
+		(mfd->panel_info.lcd.vsync_enable)) {
+
+		if (vsync_start_y_adjust <= pipe->dst_y)
+			start_y = pipe->dst_y - vsync_start_y_adjust;
+		else
+			start_y = (mfd->total_lcd_lines - 1) -
+				(vsync_start_y_adjust - pipe->dst_y);
+		if (which == 0)
+			MDP_OUTP(MDP_BASE + 0x210, start_y);	/* primary */
+		else
+			MDP_OUTP(MDP_BASE + 0x214, start_y);	/* secondary */
+
+		data = inpdw(MDP_BASE + 0x20c);
+		data |= tear_en;
+		MDP_OUTP(MDP_BASE + 0x20c, data);
+	} else {
+		data = inpdw(MDP_BASE + 0x20c);
+		data &= ~tear_en;
+		MDP_OUTP(MDP_BASE + 0x20c, data);
+	}
+}
+
+void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	struct fb_info *fbi;
+	uint8 *src;
+	int ptype;
+	struct mdp4_overlay_pipe *pipe;
+	int bpp;
+	int ret;
+
+	if (mfd->key != MFD_KEY)
+		return;
+
+	dsi_mfd = mfd;		/* keep it */
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	if (dsi_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+		if (ptype < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0, 0);
+		if (pipe == NULL)
+			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+		pipe->mixer_num  = MDP4_MIXER0;
+		pipe->src_format = mfd->fb_imgType;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
+		ret = mdp4_overlay_format2pipe(pipe);
+		if (ret < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+		init_timer(&dsi_clock_timer);
+		dsi_clock_timer.function = dsi_clock_tout;
+		dsi_clock_timer.data = (unsigned long) mfd;;
+		dsi_clock_timer.expires = 0xffffffff;
+		add_timer(&dsi_clock_timer);
+		tout_expired = jiffies;
+
+		dsi_pipe = pipe; /* keep it */
+
+		fbi = mfd->fbi;
+		bpp = fbi->var.bits_per_pixel / 8;
+		src = (uint8 *) iBuf->buf;
+		writeback_offset = mdp4_overlay_writeback_setup(
+						fbi, pipe, src, bpp);
+
+		/*
+		 * configure dsi stream id
+		 * dma_p = 0, dma_s = 1
+		 */
+		MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
+		/* enable dsi trigger on dma_p */
+		MDP_OUTP(MDP_BASE + 0x000a4, 0x01);
+	} else {
+		pipe = dsi_pipe;
+	}
+
+	/* whole screen for base layer */
+	src = (uint8 *) iBuf->buf;
+
+
+	{
+		struct fb_info *fbi;
+
+		fbi = mfd->fbi;
+		if (pipe->is_3d) {
+			bpp = fbi->var.bits_per_pixel / 8;
+			pipe->src_height = pipe->src_height_3d;
+			pipe->src_width = pipe->src_width_3d;
+			pipe->src_h = pipe->src_height_3d;
+			pipe->src_w = pipe->src_width_3d;
+			pipe->dst_h = pipe->src_height_3d;
+			pipe->dst_w = pipe->src_width_3d;
+			pipe->srcp0_ystride = msm_fb_line_length(0,
+						pipe->src_width, bpp);
+		} else {
+			 /* 2D */
+			pipe->src_height = fbi->var.yres;
+			pipe->src_width = fbi->var.xres;
+			pipe->src_h = fbi->var.yres;
+			pipe->src_w = fbi->var.xres;
+			pipe->dst_h = fbi->var.yres;
+			pipe->dst_w = fbi->var.xres;
+			pipe->srcp0_ystride = fbi->fix.line_length;
+		}
+		pipe->src_y = 0;
+		pipe->src_x = 0;
+		pipe->dst_y = 0;
+		pipe->dst_x = 0;
+		pipe->srcp0_addr = (uint32)src;
+	}
+
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	mdp4_overlay_dmap_xy(pipe);
+
+	mdp4_overlay_dmap_cfg(mfd, 0);
+
+	mdp4_mipi_vsync_enable(mfd, pipe, 0);
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	wmb();
+}
+
+void mdp4_dsi_cmd_3d(struct msm_fb_data_type *mfd, struct msmfb_overlay_3d *r3d)
+{
+	struct fb_info *fbi;
+	struct mdp4_overlay_pipe *pipe;
+	int bpp;
+	uint8 *src = NULL;
+
+	if (dsi_pipe == NULL)
+		return;
+
+	dsi_pipe->is_3d = r3d->is_3d;
+	dsi_pipe->src_height_3d = r3d->height;
+	dsi_pipe->src_width_3d = r3d->width;
+
+	pipe = dsi_pipe;
+
+	if (pipe->is_3d)
+		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
+	else
+		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);
+
+	if (mfd->panel_power_on) {
+		mdp4_dsi_cmd_dma_busy_wait(mfd);
+		mdp4_dsi_blt_dmap_busy_wait(mfd);
+	}
+
+	fbi = mfd->fbi;
+	if (pipe->is_3d) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		pipe->src_height = pipe->src_height_3d;
+		pipe->src_width = pipe->src_width_3d;
+		pipe->src_h = pipe->src_height_3d;
+		pipe->src_w = pipe->src_width_3d;
+		pipe->dst_h = pipe->src_height_3d;
+		pipe->dst_w = pipe->src_width_3d;
+		pipe->srcp0_ystride = msm_fb_line_length(0,
+					pipe->src_width, bpp);
+	} else {
+		 /* 2D */
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	}
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->dst_y = 0;
+	pipe->dst_x = 0;
+	pipe->srcp0_addr = (uint32)src;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	mdp4_overlay_dmap_xy(pipe);
+
+	mdp4_overlay_dmap_cfg(mfd, 0);
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+	__func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr, current->pid);
+
+	if (dsi_pipe->blt_addr == 0) {
+		mdp4_dsi_cmd_dma_busy_wait(mfd);
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+		dsi_pipe->blt_end = 0;
+		dsi_pipe->blt_cnt = 0;
+		dsi_pipe->ov_cnt = 0;
+		dsi_pipe->dmap_cnt = 0;
+		dsi_pipe->blt_addr = dsi_pipe->blt_base;
+		mdp4_stat.writeback++;
+		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+		return 0;
+	}
+
+	return -EBUSY;
+}
+
+int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+
+	pr_debug("%s: blt_end=%d blt_addr=%x\n",
+		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr);
+
+	if ((dsi_pipe->blt_end == 0) && dsi_pipe->blt_addr) {
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+		dsi_pipe->blt_end = 1;	/* mark as end */
+		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+		return 0;
+	}
+
+	return -EBUSY;
+}
+
+int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	req->offset = writeback_offset;
+	req->width = dsi_pipe->src_width;
+	req->height = dsi_pipe->src_height;
+	req->bpp = dsi_pipe->bpp;
+
+	return sizeof(*req);
+}
+
+void mdp4_dsi_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	if (req->enable)
+		mdp4_dsi_overlay_blt_start(mfd);
+	else if (req->enable == 0)
+		mdp4_dsi_overlay_blt_stop(mfd);
+
+}
+#else
+int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	return 0;
+}
+int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
+{
+	return -EBUSY;
+}
+int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd)
+{
+	return -EBUSY;
+}
+#endif
+
+void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr, addr2;
+	int bpp;
+	char *overlay_base;
+
+
+	if (pipe->blt_addr == 0)
+		return;
+
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->dmap_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->blt_addr + off;
+
+	/* dmap */
+	MDP_OUTP(MDP_BASE + 0x90008, addr);
+
+	off = 0;
+	if (pipe->ov_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr2 = pipe->blt_addr + off;
+	/* overlay 0 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr2);
+	outpdw(overlay_base + 0x001c, addr2);
+}
+
+
+/*
+ * mdp4_dmap_done_dsi: called from isr
+ * DAM_P_DONE only used when blt enabled
+ */
+void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma)
+{
+	int diff;
+
+	dsi_pipe->dmap_cnt++;
+	diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
+	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
+			__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
+
+	if (diff <= 0) {
+		spin_lock(&mdp_spin_lock);
+		dma->dmap_busy = FALSE;
+		complete(&dma->dmap_comp);
+		spin_unlock(&mdp_spin_lock);
+		if (dsi_pipe->blt_end) {
+			dsi_pipe->blt_end = 0;
+			dsi_pipe->blt_addr = 0;
+			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
+				__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
+			mdp_intr_mask &= ~INTR_DMA_P_DONE;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		}
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
+		mdp_disable_irq_nosync(MDP_DMA2_TERM);  /* disable intr */
+		return;
+	}
+
+	spin_lock(&mdp_spin_lock);
+	dma->busy = FALSE;
+	spin_unlock(&mdp_spin_lock);
+	complete(&dma->comp);
+	if (busy_wait_cnt)
+		busy_wait_cnt--;
+
+	pr_debug("%s: kickoff dmap\n", __func__);
+
+	mdp4_blt_xy_update(dsi_pipe);
+	/* kick off dmap */
+	outpdw(MDP_BASE + 0x000c, 0x0);
+	/* trigger dsi cmd engine */
+	mipi_dsi_cmd_mdp_sw_trigger();
+
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
+}
+
+
+/*
+ * mdp4_overlay0_done_dsi_cmd: called from isr
+ */
+void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma)
+{
+	int diff;
+
+	if (dsi_pipe->blt_addr == 0) {
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
+		spin_lock(&mdp_spin_lock);
+		dma->busy = FALSE;
+		spin_unlock(&mdp_spin_lock);
+		complete(&dma->comp);
+		if (busy_wait_cnt)
+			busy_wait_cnt--;
+		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
+		return;
+	}
+
+	/* blt enabled */
+	if (dsi_pipe->blt_end == 0)
+		dsi_pipe->ov_cnt++;
+
+	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
+			__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
+
+	if (dsi_pipe->blt_cnt == 0) {
+		/* first kickoff since blt enabled */
+		mdp_intr_mask |= INTR_DMA_P_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	}
+	dsi_pipe->blt_cnt++;
+
+	diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
+	if (diff >= 2) {
+		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
+		return;
+	}
+
+	spin_lock(&mdp_spin_lock);
+	dma->busy = FALSE;
+	dma->dmap_busy = TRUE;
+	spin_unlock(&mdp_spin_lock);
+	complete(&dma->comp);
+	if (busy_wait_cnt)
+		busy_wait_cnt--;
+
+	pr_debug("%s: kickoff dmap\n", __func__);
+
+	mdp4_blt_xy_update(dsi_pipe);
+	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
+	/* kick off dmap */
+	outpdw(MDP_BASE + 0x000c, 0x0);
+	/* trigger dsi cmd engine */
+	mipi_dsi_cmd_mdp_sw_trigger();
+	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
+}
+
+void mdp4_dsi_cmd_overlay_restore(void)
+{
+	/* mutex holded by caller */
+	if (dsi_mfd && dsi_pipe) {
+		mdp4_dsi_cmd_dma_busy_wait(dsi_mfd);
+		mdp4_overlay_update_dsi_cmd(dsi_mfd);
+
+		if (dsi_pipe->blt_addr)
+			mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
+		mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
+	}
+}
+
+void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+	int need_wait = 0;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (mfd->dma->dmap_busy == TRUE) {
+		INIT_COMPLETION(mfd->dma->dmap_comp);
+		need_wait++;
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (need_wait) {
+		/* wait until DMA finishes the current job */
+		wait_for_completion(&mfd->dma->dmap_comp);
+	}
+}
+
+/*
+ * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
+ * dsi link is a shared resource and it can only be used
+ * while it is in idle state.
+ * ov_mutex need to be acquired before call this function.
+ */
+void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+	int need_wait = 0;
+
+
+
+	if (dsi_clock_timer.function) {
+		if (time_after(jiffies, tout_expired)) {
+			tout_expired = jiffies + TOUT_PERIOD;
+			mod_timer(&dsi_clock_timer, tout_expired);
+			tout_expired -= MS_100;
+		}
+	}
+
+	pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
+			__func__, current->pid, mipi_dsi_clk_on);
+
+	/* satrt dsi clock if necessary */
+	if (mipi_dsi_clk_on == 0) {
+		local_bh_disable();
+		mipi_dsi_clk_enable();
+		local_bh_enable();
+	}
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (mfd->dma->busy == TRUE) {
+		if (busy_wait_cnt == 0)
+			INIT_COMPLETION(mfd->dma->comp);
+		busy_wait_cnt++;
+		need_wait++;
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (need_wait) {
+		/* wait until DMA finishes the current job */
+		pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
+				__func__, current->pid, mipi_dsi_clk_on);
+		wait_for_completion(&mfd->dma->comp);
+	}
+	pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
+			 __func__, current->pid, mipi_dsi_clk_on);
+}
+
+void mdp4_dsi_cmd_kickoff_video(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	if (dsi_pipe->blt_addr && dsi_pipe->blt_cnt == 0)
+		mdp4_overlay_update_dsi_cmd(mfd);
+
+	pr_debug("%s: pid=%d\n", __func__, current->pid);
+
+	if (dsi_pipe->blt_addr)
+		mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
+
+	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
+}
+
+void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+
+	pr_debug("%s: pid=%d\n", __func__, current->pid);
+	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
+}
+
+
+void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	unsigned long flag;
+
+
+	mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_OVERLAY0_TERM);
+	mfd->dma->busy = TRUE;
+	if (dsi_pipe->blt_addr)
+		mfd->dma->dmap_busy = TRUE;
+	/* start OVERLAY pipe */
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
+
+	if (dsi_pipe->blt_addr == 0) {
+		/* trigger dsi cmd engine */
+		mipi_dsi_cmd_mdp_sw_trigger();
+	}
+}
+
+void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
+{
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	if (mfd && mfd->panel_power_on) {
+		mdp4_dsi_cmd_dma_busy_wait(mfd);
+
+		if (dsi_pipe && dsi_pipe->blt_addr)
+			mdp4_dsi_blt_dmap_busy_wait(mfd);
+
+		mdp4_overlay_update_dsi_cmd(mfd);
+
+		mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
+
+
+		mdp4_stat.kickoff_dsi++;
+
+	/* signal if pan function is waiting for the update completion */
+		if (mfd->pan_waiting) {
+			mfd->pan_waiting = FALSE;
+			complete(&mfd->pan_comp);
+		}
+	}
+	mdp4_overlay_resource_release();
+	mutex_unlock(&mfd->dma->ov_mutex);
+}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
new file mode 100644
index 0000000..88b81e7
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -0,0 +1,404 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#define DSI_VIDEO_BASE	0xE0000
+
+static int first_pixel_start_x;
+static int first_pixel_start_y;
+
+static int writeback_offset;
+
+static struct mdp4_overlay_pipe *dsi_pipe;
+
+static cmd_fxn_t display_on;
+
+void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn)
+{
+	display_on = fxn;
+}
+
+int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+	int dsi_width;
+	int dsi_height;
+	int dsi_bpp;
+	int dsi_border_clr;
+	int dsi_underflow_clr;
+	int dsi_hsync_skew;
+
+	int hsync_period;
+	int hsync_ctrl;
+	int vsync_period;
+	int display_hctl;
+	int display_v_start;
+	int display_v_end;
+	int active_hctl;
+	int active_h_start;
+	int active_h_end;
+	int active_v_start;
+	int active_v_end;
+	int ctrl_polarity;
+	int h_back_porch;
+	int h_front_porch;
+	int v_back_porch;
+	int v_front_porch;
+	int hsync_pulse_width;
+	int vsync_pulse_width;
+	int hsync_polarity;
+	int vsync_polarity;
+	int data_en_polarity;
+	int hsync_start_x;
+	int hsync_end_x;
+	uint8 *buf;
+	int bpp, ptype;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *pipe;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	if (dsi_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+		if (ptype < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0, 0);
+		if (pipe == NULL) {
+			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+			return -EBUSY;
+		}
+		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+		pipe->mixer_num  = MDP4_MIXER0;
+		pipe->src_format = mfd->fb_imgType;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_VIDEO);
+		ret = mdp4_overlay_format2pipe(pipe);
+		if (ret < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+		dsi_pipe = pipe; /* keep it */
+
+		writeback_offset = mdp4_overlay_writeback_setup(
+						fbi, pipe, buf, bpp);
+	} else {
+		pipe = dsi_pipe;
+	}
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	if (is_mdp4_hw_reset()) {
+		mdp4_hw_init();
+		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+	}
+
+	pipe->src_height = fbi->var.yres;
+	pipe->src_width = fbi->var.xres;
+	pipe->src_h = fbi->var.yres;
+	pipe->src_w = fbi->var.xres;
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->srcp0_addr = (uint32) buf;
+	pipe->srcp0_ystride = fbi->fix.line_length;
+	pipe->bpp = bpp;
+
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
+
+	mdp4_overlay_dmap_xy(pipe);	/* dma_p */
+	mdp4_overlay_dmap_cfg(mfd, 1);
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	/*
+	 * DSI timing setting
+	 */
+	h_back_porch = var->left_margin;
+	h_front_porch = var->right_margin;
+	v_back_porch = var->upper_margin;
+	v_front_porch = var->lower_margin;
+	hsync_pulse_width = var->hsync_len;
+	vsync_pulse_width = var->vsync_len;
+	dsi_border_clr = mfd->panel_info.lcdc.border_clr;
+	dsi_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
+	dsi_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
+	dsi_width = mfd->panel_info.xres +
+		mfd->panel_info.mipi.xres_pad;
+	dsi_height = mfd->panel_info.yres +
+		mfd->panel_info.mipi.yres_pad;
+	dsi_bpp = mfd->panel_info.bpp;
+
+	hsync_period = hsync_pulse_width + h_back_porch + dsi_width
+				+ h_front_porch;
+	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
+	hsync_start_x = h_back_porch + hsync_pulse_width;
+	hsync_end_x = hsync_period - h_front_porch - 1;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	vsync_period =
+	    (vsync_pulse_width + v_back_porch + dsi_height + v_front_porch);
+	display_v_start = ((vsync_pulse_width + v_back_porch) * hsync_period)
+				+ dsi_hsync_skew;
+	display_v_end =
+	  ((vsync_period - v_front_porch) * hsync_period) + dsi_hsync_skew - 1;
+
+	if (dsi_width != var->xres) {
+		active_h_start = hsync_start_x + first_pixel_start_x;
+		active_h_end = active_h_start + var->xres - 1;
+		active_hctl =
+		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
+	} else {
+		active_hctl = 0;
+	}
+
+	if (dsi_height != var->yres) {
+		active_v_start =
+		    display_v_start + first_pixel_start_y * hsync_period;
+		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
+		active_v_start |= ACTIVE_START_Y_EN;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	dsi_underflow_clr |= 0x80000000;	/* enable recovery */
+	hsync_polarity = 0;
+	vsync_polarity = 0;
+	data_en_polarity = 0;
+
+	ctrl_polarity =
+	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc,
+				vsync_pulse_width * hsync_period);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x10, display_hctl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x14, display_v_start);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x18, display_v_end);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x1c, active_hctl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x20, active_v_start);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x24, active_v_end);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x28, dsi_border_clr);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
+	mdp4_overlay_reg_flush(pipe, 1);
+	mdp_histogram_ctrl(TRUE);
+
+	ret = panel_next_on(pdev);
+	if (ret == 0) {
+		/* enable DSI block */
+		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+		if (display_on != NULL) {
+			msleep(50);
+			display_on(pdev);
+		}
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_histogram_ctrl(FALSE);
+	ret = panel_next_off(pdev);
+
+#ifdef MIPI_DSI_RGB_UNSTAGE
+	/* delay to make sure the last frame finishes */
+	msleep(100);
+
+	/* dis-engage rgb0 from mixer0 */
+	if (dsi_pipe)
+		mdp4_mixer_stage_down(dsi_pipe);
+#endif
+
+	return ret;
+}
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	req->offset = writeback_offset;
+	req->width = dsi_pipe->src_width;
+	req->height = dsi_pipe->src_height;
+	req->bpp = dsi_pipe->bpp;
+
+	return sizeof(*req);
+}
+
+void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	unsigned long flag;
+	int change = 0;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (req->enable && dsi_pipe->blt_addr == 0) {
+		dsi_pipe->blt_addr = dsi_pipe->blt_base;
+		change++;
+	} else if (req->enable == 0 && dsi_pipe->blt_addr) {
+		dsi_pipe->blt_addr = 0;
+		change++;
+	}
+	pr_debug("%s: blt_addr=%x\n", __func__, (int)dsi_pipe->blt_addr);
+	dsi_pipe->blt_cnt = 0;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (!change)
+		return;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	/*
+	 * it does not work by turnning dsi video timing enerator off
+	 * and configure new changes and tune it back on like LCDC.
+	 */
+	mdp4_overlayproc_cfg(dsi_pipe);
+	mdp4_overlay_dmap_xy(dsi_pipe);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+#else
+int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	return 0;
+}
+void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	return;
+}
+#endif
+
+void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+	 /* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
+	INIT_COMPLETION(dsi_pipe->comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_PRIMARY_VSYNC);
+	mdp_intr_mask |= INTR_PRIMARY_VSYNC;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&dsi_pipe->comp);
+	mdp_disable_irq(MDP_DMA2_TERM);
+}
+
+void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
+{
+
+	mdp4_overlay_reg_flush(pipe, 1);
+	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
+		return;
+
+	mdp4_overlay_dsi_video_wait4vsync(mfd);
+}
+
+/*
+ * mdp4_primary_vsync_dsi_video: called from isr
+ */
+void mdp4_primary_vsync_dsi_video(void)
+{
+	complete_all(&dsi_pipe->comp);
+}
+
+/*
+ * mdp4_overlay1_done_dsi: called from isr
+ */
+void mdp4_overlay0_done_dsi_video()
+{
+	complete(&dsi_pipe->comp);
+}
+
+
+void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since it's dsi video mode */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	pipe = dsi_pipe;
+	pipe->srcp0_addr = (uint32) buf;
+	mdp4_overlay_rgb_setup(pipe);
+	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
+	mdp4_stat.kickoff_dsi++;
+	mdp4_overlay_resource_release();
+}
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
new file mode 100644
index 0000000..71b460c
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -0,0 +1,404 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/fb.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#define DTV_BASE	0xD0000
+
+/*#define DEBUG*/
+#ifdef DEBUG
+static void __mdp_outp(uint32 port, uint32 value)
+{
+	uint32 in_val;
+
+	outpdw(port, value);
+	in_val = inpdw(port);
+	printk(KERN_INFO "MDP-DTV[%04x] => %08x [%08x]\n",
+		port-(uint32)(MDP_BASE + DTV_BASE), value, in_val);
+}
+
+#undef MDP_OUTP
+#define MDP_OUTP(port, value)	__mdp_outp((uint32)(port), (value))
+#endif
+
+static int first_pixel_start_x;
+static int first_pixel_start_y;
+
+static struct mdp4_overlay_pipe *dtv_pipe;
+
+int mdp4_dtv_on(struct platform_device *pdev)
+{
+	int dtv_width;
+	int dtv_height;
+	int dtv_bpp;
+	int dtv_border_clr;
+	int dtv_underflow_clr;
+	int dtv_hsync_skew;
+
+	int hsync_period;
+	int hsync_ctrl;
+	int vsync_period;
+	int display_hctl;
+	int display_v_start;
+	int display_v_end;
+	int active_hctl;
+	int active_h_start;
+	int active_h_end;
+	int active_v_start;
+	int active_v_end;
+	int ctrl_polarity;
+	int h_back_porch;
+	int h_front_porch;
+	int v_back_porch;
+	int v_front_porch;
+	int hsync_pulse_width;
+	int vsync_pulse_width;
+	int hsync_polarity;
+	int vsync_polarity;
+	int data_en_polarity;
+	int hsync_start_x;
+	int hsync_end_x;
+	uint8 *buf;
+	int bpp, ptype;
+	uint32 format;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *pipe;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	if (bpp == 2)
+		format = MDP_RGB_565;
+	else if (bpp == 3)
+		format = MDP_RGB_888;
+	else
+		format = MDP_ARGB_8888;
+
+	if (dtv_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(format);
+		if (ptype < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1, 0);
+		if (pipe == NULL) {
+			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+			return -EBUSY;
+		}
+		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+		pipe->mixer_num  = MDP4_MIXER1;
+		pipe->src_format = format;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DTV);
+		ret = mdp4_overlay_format2pipe(pipe);
+		if (ret < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+		dtv_pipe = pipe; /* keep it */
+	} else {
+		pipe = dtv_pipe;
+	}
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	pipe->src_height = fbi->var.yres;
+	pipe->src_width = fbi->var.xres;
+	pipe->src_h = fbi->var.yres;
+	pipe->src_w = fbi->var.xres;
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->srcp0_addr = (uint32) buf;
+	pipe->srcp0_ystride = fbi->fix.line_length;
+
+	mdp4_overlay_dmae_xy(pipe);	/* dma_e */
+	mdp4_overlay_dmae_cfg(mfd, 0);
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	/*
+	 * DTV timing setting
+	 */
+	h_back_porch = var->left_margin;
+	h_front_porch = var->right_margin;
+	v_back_porch = var->upper_margin;
+	v_front_porch = var->lower_margin;
+	hsync_pulse_width = var->hsync_len;
+	vsync_pulse_width = var->vsync_len;
+	dtv_border_clr = mfd->panel_info.lcdc.border_clr;
+	dtv_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
+	dtv_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
+
+	pr_info("%s: <ID=%d %dx%d (%d,%d,%d), (%d,%d,%d) %dMHz>\n", __func__,
+		var->reserved[3], var->xres, var->yres,
+		var->right_margin, var->hsync_len, var->left_margin,
+		var->lower_margin, var->vsync_len, var->upper_margin,
+		var->pixclock/1000/1000);
+
+	dtv_width = var->xres;
+	dtv_height = var->yres;
+	dtv_bpp = mfd->panel_info.bpp;
+
+	hsync_period =
+	    hsync_pulse_width + h_back_porch + dtv_width + h_front_porch;
+	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
+	hsync_start_x = hsync_pulse_width + h_back_porch;
+	hsync_end_x = hsync_period - h_front_porch - 1;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	vsync_period =
+	    (vsync_pulse_width + v_back_porch + dtv_height +
+	     v_front_porch) * hsync_period;
+	display_v_start =
+	    (vsync_pulse_width + v_back_porch) * hsync_period + dtv_hsync_skew;
+	display_v_end =
+	    vsync_period - (v_front_porch * hsync_period) + dtv_hsync_skew - 1;
+
+	if (dtv_width != var->xres) {
+		active_h_start = hsync_start_x + first_pixel_start_x;
+		active_h_end = active_h_start + var->xres - 1;
+		active_hctl =
+		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
+	} else {
+		active_hctl = 0;
+	}
+
+	if (dtv_height != var->yres) {
+		active_v_start =
+		    display_v_start + first_pixel_start_y * hsync_period;
+		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
+		active_v_start |= ACTIVE_START_Y_EN;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	dtv_underflow_clr |= 0x80000000;	/* enable recovery */
+	hsync_polarity = fbi->var.yres >= 720 ? 0 : 1;
+	vsync_polarity = fbi->var.yres >= 720 ? 0 : 1;
+	data_en_polarity = 0;
+
+	ctrl_polarity =
+	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+
+
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x4, hsync_ctrl);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x8, vsync_period);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0xc, vsync_pulse_width * hsync_period);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x18, display_hctl);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x1c, display_v_start);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x20, display_v_end);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x40, dtv_border_clr);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x44, dtv_underflow_clr);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x48, dtv_hsync_skew);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x50, ctrl_polarity);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x2c, active_hctl);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x30, active_v_start);
+	MDP_OUTP(MDP_BASE + DTV_BASE + 0x38, active_v_end);
+
+	/* Test pattern 8 x 8 pixel */
+	/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
+
+	ret = panel_next_on(pdev);
+	if (ret == 0) {
+		/* enable DTV block */
+		MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		dev_info(&pdev->dev, "mdp4_overlay_dtv: on");
+	} else {
+		dev_warn(&pdev->dev, "mdp4_overlay_dtv: panel_next_on failed");
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp4_dtv_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + DTV_BASE, 0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	/*
+	 * wait for vsync == 16.6 ms to make sure
+	 * the last frame finishes
+	*/
+	msleep(20);
+	pr_info("%s\n", __func__);
+
+	ret = panel_next_off(pdev);
+
+	/* dis-engage rgb2 from mixer1 */
+	if (dtv_pipe)
+		mdp4_mixer_stage_down(dtv_pipe);
+
+	/*
+	 * wait for another vsync == 16.6 ms to make sure
+	 * rgb2 dis-engaged
+	*/
+	msleep(20);
+
+	return ret;
+}
+
+static void mdp4_overlay_dtv_wait4vsync(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+	/* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_OVERLAY1_TERM);
+	INIT_COMPLETION(dtv_pipe->comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_EXTERNAL_VSYNC);
+	mdp_intr_mask |= INTR_EXTERNAL_VSYNC;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&dtv_pipe->comp);
+	mdp_disable_irq(MDP_OVERLAY1_TERM);
+}
+
+void mdp4_overlay_dtv_vsync_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
+{
+
+	mdp4_overlay_reg_flush(pipe, 1);
+	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
+		return;
+
+	mdp4_overlay_dtv_wait4vsync(mfd);
+}
+
+static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+	/* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_OVERLAY1_TERM);
+	INIT_COMPLETION(dtv_pipe->comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
+	mdp_intr_mask |= INTR_OVERLAY1_DONE;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&dtv_pipe->comp);
+	mdp_disable_irq(MDP_OVERLAY1_TERM);
+}
+
+void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
+{
+
+	mdp4_overlay_reg_flush(pipe, 1);
+	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
+		return;
+
+	mdp4_overlay_dtv_wait4_ov_done(mfd);
+}
+
+void mdp4_external_vsync_dtv()
+{
+	complete(&dtv_pipe->comp);
+}
+
+/*
+ * mdp4_overlay1_done_dtv: called from isr
+ */
+void mdp4_overlay1_done_dtv()
+{
+	complete(&dtv_pipe->comp);
+}
+
+void mdp4_dtv_overlay(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	unsigned long flag;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since it's lcdc mode */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	pipe = dtv_pipe;
+	pipe->srcp0_addr = (uint32) buf;
+	mdp4_overlay_rgb_setup(pipe);
+	mdp4_overlay_reg_flush(pipe, 1); /* rgb2 and mixer1 */
+
+	/* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_OVERLAY1_TERM);
+	INIT_COMPLETION(dtv_pipe->comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
+	mdp_intr_mask |= INTR_OVERLAY1_DONE;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&dtv_pipe->comp);
+	mdp_disable_irq(MDP_OVERLAY1_TERM);
+
+	mdp4_stat.kickoff_dtv++;
+	mdp4_overlay_resource_release();
+	mutex_unlock(&mfd->dma->ov_mutex);
+}
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
new file mode 100644
index 0000000..ed44d54
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -0,0 +1,395 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#ifdef CONFIG_FB_MSM_MDP40
+#define LCDC_BASE	0xC0000
+#else
+#define LCDC_BASE	0xE0000
+#endif
+
+int first_pixel_start_x;
+int first_pixel_start_y;
+
+static int writeback_offset;
+
+static struct mdp4_overlay_pipe *lcdc_pipe;
+static struct completion lcdc_comp;
+
+int mdp_lcdc_on(struct platform_device *pdev)
+{
+	int lcdc_width;
+	int lcdc_height;
+	int lcdc_bpp;
+	int lcdc_border_clr;
+	int lcdc_underflow_clr;
+	int lcdc_hsync_skew;
+
+	int hsync_period;
+	int hsync_ctrl;
+	int vsync_period;
+	int display_hctl;
+	int display_v_start;
+	int display_v_end;
+	int active_hctl;
+	int active_h_start;
+	int active_h_end;
+	int active_v_start;
+	int active_v_end;
+	int ctrl_polarity;
+	int h_back_porch;
+	int h_front_porch;
+	int v_back_porch;
+	int v_front_porch;
+	int hsync_pulse_width;
+	int vsync_pulse_width;
+	int hsync_polarity;
+	int vsync_polarity;
+	int data_en_polarity;
+	int hsync_start_x;
+	int hsync_end_x;
+	uint8 *buf;
+	int bpp, ptype;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *pipe;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	if (is_mdp4_hw_reset()) {
+		mdp4_hw_init();
+		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+	}
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	if (lcdc_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+		if (ptype < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0, 0);
+		if (pipe == NULL)
+			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+		pipe->mixer_num  = MDP4_MIXER0;
+		pipe->src_format = mfd->fb_imgType;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_LCDC);
+		ret = mdp4_overlay_format2pipe(pipe);
+		if (ret < 0)
+			printk(KERN_INFO "%s: format2pipe failed\n", __func__);
+		lcdc_pipe = pipe; /* keep it */
+		init_completion(&lcdc_comp);
+
+		writeback_offset = mdp4_overlay_writeback_setup(
+						fbi, pipe, buf, bpp);
+	} else {
+		pipe = lcdc_pipe;
+	}
+
+	pipe->src_height = fbi->var.yres;
+	pipe->src_width = fbi->var.xres;
+	pipe->src_h = fbi->var.yres;
+	pipe->src_w = fbi->var.xres;
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->srcp0_addr = (uint32) buf;
+	pipe->srcp0_ystride = fbi->fix.line_length;
+	pipe->bpp = bpp;
+
+	mdp4_overlay_dmap_xy(pipe);
+	mdp4_overlay_dmap_cfg(mfd, 1);
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	/*
+	 * LCDC timing setting
+	 */
+	h_back_porch = var->left_margin;
+	h_front_porch = var->right_margin;
+	v_back_porch = var->upper_margin;
+	v_front_porch = var->lower_margin;
+	hsync_pulse_width = var->hsync_len;
+	vsync_pulse_width = var->vsync_len;
+	lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
+	lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
+	lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
+
+	lcdc_width = var->xres;
+	lcdc_height = var->yres;
+	lcdc_bpp = mfd->panel_info.bpp;
+
+	hsync_period =
+	    hsync_pulse_width + h_back_porch + lcdc_width + h_front_porch;
+	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
+	hsync_start_x = hsync_pulse_width + h_back_porch;
+	hsync_end_x = hsync_period - h_front_porch - 1;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	vsync_period =
+	    (vsync_pulse_width + v_back_porch + lcdc_height +
+	     v_front_porch) * hsync_period;
+	display_v_start =
+	    (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
+	display_v_end =
+	    vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;
+
+	if (lcdc_width != var->xres) {
+		active_h_start = hsync_start_x + first_pixel_start_x;
+		active_h_end = active_h_start + var->xres - 1;
+		active_hctl =
+		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
+	} else {
+		active_hctl = 0;
+	}
+
+	if (lcdc_height != var->yres) {
+		active_v_start =
+		    display_v_start + first_pixel_start_y * hsync_period;
+		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
+		active_v_start |= ACTIVE_START_Y_EN;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+
+#ifdef CONFIG_FB_MSM_MDP40
+	hsync_polarity = 1;
+	vsync_polarity = 1;
+	lcdc_underflow_clr |= 0x80000000;	/* enable recovery */
+#else
+	hsync_polarity = 0;
+	vsync_polarity = 0;
+#endif
+	data_en_polarity = 0;
+
+	ctrl_polarity =
+	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x10, display_hctl);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x14, display_v_start);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x18, display_v_end);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x28, lcdc_border_clr);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x2c, lcdc_underflow_clr);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x30, lcdc_hsync_skew);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x38, ctrl_polarity);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
+	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
+
+	mdp4_overlay_reg_flush(pipe, 1);
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(2);
+#endif
+	mdp_histogram_ctrl(TRUE);
+
+	ret = panel_next_on(pdev);
+	if (ret == 0) {
+		/* enable LCDC block */
+		MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp_lcdc_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	mdp_histogram_ctrl(FALSE);
+	ret = panel_next_off(pdev);
+
+	/* delay to make sure the last frame finishes */
+	msleep(16);
+
+#ifdef LCDC_RGB_UNSTAGE
+	/* dis-engage rgb0 from mixer0 */
+	if (lcdc_pipe)
+		mdp4_mixer_stage_down(lcdc_pipe);
+#endif
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(0);
+#endif
+
+	return ret;
+}
+
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+int mdp4_lcdc_overlay_blt_offset(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	req->offset = writeback_offset;
+	req->width = lcdc_pipe->src_width;
+	req->height = lcdc_pipe->src_height;
+	req->bpp = lcdc_pipe->bpp;
+
+	return sizeof(*req);
+}
+
+void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_blt *req)
+{
+	unsigned long flag;
+	int change = 0;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (req->enable && lcdc_pipe->blt_addr == 0) {
+		lcdc_pipe->blt_addr = lcdc_pipe->blt_base;
+		change++;
+	} else if (req->enable == 0 && lcdc_pipe->blt_addr) {
+		lcdc_pipe->blt_addr = 0;
+		change++;
+	}
+	pr_debug("%s: blt_addr=%x\n", __func__, (int)lcdc_pipe->blt_addr);
+	lcdc_pipe->blt_cnt = 0;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (!change)
+		return;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + LCDC_BASE, 0);	/* stop lcdc */
+	msleep(50);
+	mdp4_overlayproc_cfg(lcdc_pipe);
+	mdp4_overlay_dmap_xy(lcdc_pipe);
+	MDP_OUTP(MDP_BASE + LCDC_BASE, 1);	/* start lcdc */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+#endif
+
+void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+
+	 /* enable irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
+	INIT_COMPLETION(lcdc_comp);
+	mfd->dma->waiting = TRUE;
+	outp32(MDP_INTR_CLEAR, INTR_PRIMARY_VSYNC);
+	mdp_intr_mask |= INTR_PRIMARY_VSYNC;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&lcdc_comp);
+	mdp_disable_irq(MDP_DMA2_TERM);
+}
+
+void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
+{
+
+	mdp4_overlay_reg_flush(pipe, 1);
+	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
+		return;
+
+	mdp4_overlay_lcdc_wait4vsync(mfd);
+}
+
+/*
+ * mdp4_primary_vsync_lcdc: called from isr
+ */
+void mdp4_primary_vsync_lcdc(void)
+{
+	complete_all(&lcdc_comp);
+}
+
+/*
+ * mdp4_overlay0_done_lcdc: called from isr
+ */
+void mdp4_overlay0_done_lcdc(void)
+{
+	complete_all(&lcdc_comp);
+}
+
+void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since it's lcdc mode */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	pipe = lcdc_pipe;
+	pipe->srcp0_addr = (uint32) buf;
+	mdp4_overlay_rgb_setup(pipe);
+	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp4_overlay_lcdc_vsync_push(mfd, pipe);
+	mdp4_stat.kickoff_lcdc++;
+	mdp4_overlay_resource_release();
+}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
new file mode 100644
index 0000000..2bf9faf
--- /dev/null
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -0,0 +1,612 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+static struct mdp4_overlay_pipe *mddi_pipe;
+static struct msm_fb_data_type *mddi_mfd;
+static int busy_wait_cnt;
+
+static int vsync_start_y_adjust = 4;
+
+static int dmap_vsync_enable;
+
+void mdp_dmap_vsync_set(int enable)
+{
+	dmap_vsync_enable = enable;
+}
+
+int mdp_dmap_vsync_get(void)
+{
+	return dmap_vsync_enable;
+}
+
+void mdp4_mddi_vsync_enable(struct msm_fb_data_type *mfd,
+		struct mdp4_overlay_pipe *pipe, int which)
+{
+	uint32 start_y, data, tear_en;
+
+	tear_en = (1 << which);
+
+	if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
+		(mfd->panel_info.lcd.vsync_enable)) {
+
+		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
+			/* need dmas dmap switch */
+			if (which == 0 && dmap_vsync_enable == 0 &&
+				mfd->panel_info.lcd.rev < 2) /* dma_p */
+				return;
+		}
+
+		if (vsync_start_y_adjust <= pipe->dst_y)
+			start_y = pipe->dst_y - vsync_start_y_adjust;
+		else
+			start_y = (mfd->total_lcd_lines - 1) -
+				(vsync_start_y_adjust - pipe->dst_y);
+		if (which == 0)
+			MDP_OUTP(MDP_BASE + 0x210, start_y);	/* primary */
+		else
+			MDP_OUTP(MDP_BASE + 0x214, start_y);	/* secondary */
+
+		data = inpdw(MDP_BASE + 0x20c);
+		data |= tear_en;
+		MDP_OUTP(MDP_BASE + 0x20c, data);
+	} else {
+		data = inpdw(MDP_BASE + 0x20c);
+		data &= ~tear_en;
+		MDP_OUTP(MDP_BASE + 0x20c, data);
+	}
+}
+
+#define WHOLESCREEN
+
+void mdp4_overlay_update_lcd(struct msm_fb_data_type *mfd)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	uint8 *src;
+	int ptype;
+	uint32 mddi_ld_param;
+	uint16 mddi_vdo_packet_reg;
+	struct mdp4_overlay_pipe *pipe;
+	int ret;
+
+	if (mfd->key != MFD_KEY)
+		return;
+
+	mddi_mfd = mfd;		/* keep it */
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	if (mddi_pipe == NULL) {
+		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+		if (ptype < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0, 0);
+		if (pipe == NULL)
+			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+		pipe->pipe_used++;
+		pipe->mixer_num  = MDP4_MIXER0;
+		pipe->src_format = mfd->fb_imgType;
+		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_MDDI);
+		ret = mdp4_overlay_format2pipe(pipe);
+		if (ret < 0)
+			printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+		mddi_pipe = pipe; /* keep it */
+		mddi_pipe->blt_end = 1;	/* mark as end */
+		mddi_ld_param = 0;
+		mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
+
+		if (mdp_hw_revision == MDP4_REVISION_V2_1) {
+			uint32	data;
+
+			data = inpdw(MDP_BASE + 0x0028);
+			data &= ~0x0300;	/* bit 8, 9, MASTER4 */
+			if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
+				data |= 0x0200;
+			else
+				data |= 0x0100;
+
+			MDP_OUTP(MDP_BASE + 0x00028, data);
+		}
+
+		if (mfd->panel_info.type == MDDI_PANEL) {
+			if (mfd->panel_info.pdest == DISPLAY_1)
+				mddi_ld_param = 0;
+			else
+				mddi_ld_param = 1;
+		} else {
+			mddi_ld_param = 2;
+		}
+
+		MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
+
+		if (mfd->panel_info.bpp == 24)
+			MDP_OUTP(MDP_BASE + 0x00094,
+			 (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
+		else if (mfd->panel_info.bpp == 16)
+			MDP_OUTP(MDP_BASE + 0x00094,
+			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
+		else
+			MDP_OUTP(MDP_BASE + 0x00094,
+			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
+
+		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+	} else {
+		pipe = mddi_pipe;
+	}
+
+	/* 0 for dma_p, client_id = 0 */
+	MDP_OUTP(MDP_BASE + 0x00090, 0);
+
+
+	src = (uint8 *) iBuf->buf;
+
+#ifdef WHOLESCREEN
+
+	{
+		struct fb_info *fbi;
+
+		fbi = mfd->fbi;
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->src_y = 0;
+		pipe->src_x = 0;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->dst_y = 0;
+		pipe->dst_x = 0;
+		pipe->srcp0_addr = (uint32)src;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	}
+
+#else
+	if (mdp4_overlay_active(MDP4_MIXER0)) {
+		struct fb_info *fbi;
+
+		fbi = mfd->fbi;
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->src_y = 0;
+		pipe->src_x = 0;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->dst_y = 0;
+		pipe->dst_x = 0;
+		pipe->srcp0_addr = (uint32) src;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	} else {
+		/* starting input address */
+		src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width)
+					* iBuf->bpp;
+
+		pipe->src_height = iBuf->dma_h;
+		pipe->src_width = iBuf->dma_w;
+		pipe->src_h = iBuf->dma_h;
+		pipe->src_w = iBuf->dma_w;
+		pipe->src_y = 0;
+		pipe->src_x = 0;
+		pipe->dst_h = iBuf->dma_h;
+		pipe->dst_w = iBuf->dma_w;
+		pipe->dst_y = iBuf->dma_y;
+		pipe->dst_x = iBuf->dma_x;
+		pipe->srcp0_addr = (uint32) src;
+		pipe->srcp0_ystride = iBuf->ibuf_width * iBuf->bpp;
+	}
+#endif
+
+	pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+
+	mdp4_overlay_rgb_setup(pipe);
+
+	mdp4_mixer_stage_up(pipe);
+
+	mdp4_overlayproc_cfg(pipe);
+
+	mdp4_overlay_dmap_xy(pipe);
+
+	mdp4_overlay_dmap_cfg(mfd, 0);
+
+	mdp4_mddi_vsync_enable(mfd, pipe, 0);
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+}
+
+int mdp4_mddi_overlay_blt_offset(int *off)
+{
+	if (mdp_hw_revision < MDP4_REVISION_V2_1) { /* need dmas dmap switch */
+		if (mddi_pipe->blt_end ||
+			(mdp4_overlay_mixer_play(mddi_pipe->mixer_num) == 0)) {
+			*off = -1;
+			return -EINVAL;
+		}
+	} else {
+		/* no dmas dmap switch */
+		if (mddi_pipe->blt_end) {
+			*off = -1;
+			return -EINVAL;
+		}
+	}
+
+	if (mddi_pipe->blt_cnt & 0x01)
+		*off = mddi_pipe->src_height * mddi_pipe->src_width * 3;
+	else
+		*off = 0;
+
+	return 0;
+}
+
+void mdp4_mddi_overlay_blt(ulong addr)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (addr) {
+		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp_intr_mask |= INTR_DMA_P_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		mddi_pipe->blt_cnt = 0;
+		mddi_pipe->blt_end = 0;
+		mddi_pipe->blt_addr = addr;
+	} else {
+		mddi_pipe->blt_end = 1;	/* mark as end */
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+	char *overlay_base;
+
+
+	if (pipe->blt_addr == 0)
+		return;
+
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->dmap_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+
+	addr = pipe->blt_addr + off;
+
+	/* dmap */
+	MDP_OUTP(MDP_BASE + 0x90008, addr);
+
+	/* overlay 0 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr);
+	outpdw(overlay_base + 0x001c, addr);
+}
+
+/*
+ * mdp4_dmap_done_mddi: called from isr
+ */
+void mdp4_dma_p_done_mddi(void)
+{
+	if (mddi_pipe->blt_end) {
+		mddi_pipe->blt_addr = 0;
+		mdp_intr_mask &= ~INTR_DMA_P_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		mdp4_overlayproc_cfg(mddi_pipe);
+		mdp4_overlay_dmap_xy(mddi_pipe);
+	}
+
+	/*
+	 * single buffer, no need to increase
+	 * mdd_pipe->dmap_cnt here
+	 */
+}
+
+/*
+ * mdp4_overlay0_done_mddi: called from isr
+ */
+void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma)
+{
+	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
+
+	dma->busy = FALSE;
+	complete(&dma->comp);
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK,
+			MDP_BLOCK_POWER_OFF, TRUE);
+
+	if (busy_wait_cnt)
+		busy_wait_cnt--;
+
+	pr_debug("%s: ISR-done\n", __func__);
+
+	if (mddi_pipe->blt_addr) {
+		if (mddi_pipe->blt_cnt == 0) {
+			mdp4_overlayproc_cfg(mddi_pipe);
+			mdp4_overlay_dmap_xy(mddi_pipe);
+			mddi_pipe->ov_cnt = 0;
+			mddi_pipe->dmap_cnt = 0;
+			/* BLT start from next frame */
+		} else {
+			mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON,
+						FALSE);
+			mdp4_blt_xy_update(mddi_pipe);
+			outpdw(MDP_BASE + 0x000c, 0x0); /* start DMAP */
+		}
+		mddi_pipe->blt_cnt++;
+		mddi_pipe->ov_cnt++;
+	}
+
+
+
+}
+
+void mdp4_mddi_overlay_restore(void)
+{
+	if (mddi_mfd == NULL)
+		return;
+
+	pr_debug("%s: resotre, pid=%d\n", __func__, current->pid);
+
+	if (mddi_mfd->panel_power_on == 0)
+		return;
+	if (mddi_mfd && mddi_pipe) {
+		mdp4_mddi_dma_busy_wait(mddi_mfd);
+		mdp4_overlay_update_lcd(mddi_mfd);
+		mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
+		mddi_mfd->dma_update_flag = 1;
+	}
+	if (mdp_hw_revision < MDP4_REVISION_V2_1) /* need dmas dmap switch */
+		mdp4_mddi_overlay_dmas_restore();
+}
+
+/*
+ * mdp4_mddi_cmd_dma_busy_wait: check mddi link activity
+ * dsi link is a shared resource and it can only be used
+ * while it is in idle state.
+ * ov_mutex need to be acquired before call this function.
+ */
+void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+	int need_wait = 0;
+
+	pr_debug("%s: START, pid=%d\n", __func__, current->pid);
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (mfd->dma->busy == TRUE) {
+		if (busy_wait_cnt == 0)
+			INIT_COMPLETION(mfd->dma->comp);
+		busy_wait_cnt++;
+		need_wait++;
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+
+	if (need_wait) {
+		/* wait until DMA finishes the current job */
+		pr_debug("%s: PENDING, pid=%d\n", __func__, current->pid);
+		wait_for_completion(&mfd->dma->comp);
+	}
+	pr_debug("%s: DONE, pid=%d\n", __func__, current->pid);
+}
+
+void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	pr_debug("%s: pid=%d\n", __func__, current->pid);
+	mdp4_mddi_overlay_kickoff(mfd, pipe);
+}
+
+void mdp4_mddi_kickoff_ui(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	pr_debug("%s: pid=%d\n", __func__, current->pid);
+	mdp4_mddi_overlay_kickoff(mfd, pipe);
+}
+
+
+void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	if (mdp_hw_revision == MDP4_REVISION_V2_1) {
+		if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) {
+			uint32  data;
+			data = inpdw(MDP_BASE + 0x0028);
+			data &= ~0x0300;        /* bit 8, 9, MASTER4 */
+			if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
+				data |= 0x0200;
+			else
+				data |= 0x0100;
+			MDP_OUTP(MDP_BASE + 0x00028, data);
+			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET,
+				false);
+		}
+		if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) {
+			uint32  data;
+			data = inpdw(MDP_BASE + 0x0028);
+			data &= ~0x0300;        /* bit 8, 9, MASTER4 */
+			MDP_OUTP(MDP_BASE + 0x00028, data);
+			mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false);
+		}
+	}
+	mdp_enable_irq(MDP_OVERLAY0_TERM);
+	mfd->dma->busy = TRUE;
+	/* start OVERLAY pipe */
+	mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
+}
+
+void mdp4_dma_s_update_lcd(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	uint32 outBpp = iBuf->bpp;
+	uint16 mddi_vdo_packet_reg;
+	uint32 dma_s_cfg_reg;
+
+	dma_s_cfg_reg = 0;
+
+	if (mfd->fb_imgType == MDP_RGBA_8888)
+		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; /* on purpose */
+	else if (mfd->fb_imgType == MDP_BGR_565)
+		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+	if (outBpp == 4)
+		dma_s_cfg_reg |= (1 << 26); /* xRGB8888 */
+	else if (outBpp == 2)
+		dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
+
+	dma_s_cfg_reg |= DMA_DITHER_EN;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	/* PIXELSIZE */
+	MDP_OUTP(MDP_BASE + 0xa0004, (pipe->dst_h << 16 | pipe->dst_w));
+	MDP_OUTP(MDP_BASE + 0xa0008, pipe->srcp0_addr);	/* ibuf address */
+	MDP_OUTP(MDP_BASE + 0xa000c, pipe->srcp0_ystride);/* ystride */
+
+	if (mfd->panel_info.bpp == 24) {
+		dma_s_cfg_reg |= DMA_DSTC0G_8BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+	} else if (mfd->panel_info.bpp == 18) {
+		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	} else {
+		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+	}
+
+	MDP_OUTP(MDP_BASE + 0xa0010, (pipe->dst_y << 16) | pipe->dst_x);
+
+	/* 1 for dma_s, client_id = 0 */
+	MDP_OUTP(MDP_BASE + 0x00090, 1);
+
+	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
+
+	if (mfd->panel_info.bpp == 24)
+		MDP_OUTP(MDP_BASE + 0x00094,
+			(MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
+	else if (mfd->panel_info.bpp == 16)
+		MDP_OUTP(MDP_BASE + 0x00094,
+			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
+	else
+		MDP_OUTP(MDP_BASE + 0x00094,
+			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
+
+	MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+
+	MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
+
+	mdp4_mddi_vsync_enable(mfd, pipe, 1);
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	mdp_enable_irq(MDP_DMA_S_TERM);
+	mfd->dma->busy = TRUE;
+	mfd->ibuf_flushed = TRUE;
+	/* start dma_s pipe */
+	mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
+
+	/* wait until DMA finishes the current job */
+	wait_for_completion(&mfd->dma->comp);
+	mdp_disable_irq(MDP_DMA_S_TERM);
+}
+
+void mdp4_mddi_overlay_dmas_restore(void)
+{
+	/* mutex held by caller */
+	if (mddi_mfd && mddi_pipe) {
+		mdp4_mddi_dma_busy_wait(mddi_mfd);
+		mdp4_dma_s_update_lcd(mddi_mfd, mddi_pipe);
+		mdp4_mddi_dma_s_kickoff(mddi_mfd, mddi_pipe);
+		mddi_mfd->dma_update_flag = 1;
+	}
+}
+
+void mdp4_mddi_overlay(struct msm_fb_data_type *mfd)
+{
+	mutex_lock(&mfd->dma->ov_mutex);
+
+	if (mfd && mfd->panel_power_on) {
+		mdp4_mddi_dma_busy_wait(mfd);
+		mdp4_overlay_update_lcd(mfd);
+
+		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
+			/* dmas dmap switch */
+			if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num)
+						== 0) {
+				mdp4_dma_s_update_lcd(mfd, mddi_pipe);
+				mdp4_mddi_dma_s_kickoff(mfd, mddi_pipe);
+			} else
+				mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
+		} else	/* no dams dmap switch  */
+			mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
+
+		mdp4_stat.kickoff_mddi++;
+
+	/* signal if pan function is waiting for the update completion */
+		if (mfd->pan_waiting) {
+			mfd->pan_waiting = FALSE;
+			complete(&mfd->pan_comp);
+		}
+	}
+	mdp4_overlay_resource_release();
+	mutex_unlock(&mfd->dma->ov_mutex);
+}
+
+int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+	struct msm_fb_data_type *mfd = info->par;
+	mutex_lock(&mfd->dma->ov_mutex);
+	if (mfd && mfd->panel_power_on) {
+		mdp4_mddi_dma_busy_wait(mfd);
+		mdp_hw_cursor_update(info, cursor);
+	}
+	mutex_unlock(&mfd->dma->ov_mutex);
+	return 0;
+}
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
new file mode 100644
index 0000000..52e4a82
--- /dev/null
+++ b/drivers/video/msm/mdp4_util.c
@@ -0,0 +1,2101 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+struct mdp4_statistic mdp4_stat;
+
+unsigned is_mdp4_hw_reset(void)
+{
+	unsigned hw_reset = 0;
+
+	/* Only revisions > v2.1 may be reset or powered off/on at runtime */
+	if (mdp_hw_revision > MDP4_REVISION_V2_1) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		hw_reset = !inpdw(MDP_BASE + 0x003c);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+
+	return hw_reset;
+}
+
+void mdp4_sw_reset(ulong bits)
+{
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	bits &= 0x1f;	/* 5 bits */
+	outpdw(MDP_BASE + 0x001c, bits);	/* MDP_SW_RESET */
+
+	while (inpdw(MDP_BASE + 0x001c) & bits) /* self clear when complete */
+		;
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	MSM_FB_DEBUG("mdp4_sw_reset: 0x%x\n", (int)bits);
+}
+
+void mdp4_overlay_cfg(int overlayer, int blt_mode, int refresh, int direct_out)
+{
+	ulong bits = 0;
+
+	if (blt_mode)
+		bits |= (1 << 3);
+	refresh &= 0x03;	/* 2 bites */
+	bits |= (refresh << 1);
+	direct_out &= 0x01;
+	bits |= direct_out;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+
+	if (overlayer == MDP4_MIXER0)
+		outpdw(MDP_BASE + 0x10004, bits); /* MDP_OVERLAY0_CFG */
+	else
+		outpdw(MDP_BASE + 0x18004, bits); /* MDP_OVERLAY1_CFG */
+
+	MSM_FB_DEBUG("mdp4_overlay_cfg: 0x%x\n",
+		(int)inpdw(MDP_BASE + 0x10004));
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_display_intf_sel(int output, ulong intf)
+{
+	ulong bits, mask, data;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	bits = inpdw(MDP_BASE + 0x0038);	/* MDP_DISP_INTF_SEL */
+
+	if (intf == DSI_VIDEO_INTF) {
+		data = 0x40;	/* bit 6 */
+		intf = MDDI_LCDC_INTF;
+		if (output == SECONDARY_INTF_SEL) {
+			MSM_FB_INFO("%s: Illegal INTF selected, output=%d \
+				intf=%d\n", __func__, output, (int)intf);
+		}
+	} else if (intf == DSI_CMD_INTF) {
+		data = 0x80;	/* bit 7 */
+		intf = MDDI_INTF;
+		if (output == EXTERNAL_INTF_SEL) {
+			MSM_FB_INFO("%s: Illegal INTF selected, output=%d \
+				intf=%d\n", __func__, output, (int)intf);
+		}
+	} else
+		data = 0;
+
+	mask = 0x03;	/* 2 bits */
+	intf &= 0x03;	/* 2 bits */
+
+	switch (output) {
+	case EXTERNAL_INTF_SEL:
+		intf <<= 4;
+		mask <<= 4;
+		break;
+	case SECONDARY_INTF_SEL:
+		intf &= 0x02;	/* only MDDI and EBI2 support */
+		intf <<= 2;
+		mask <<= 2;
+		break;
+	default:
+		break;
+	}
+
+	intf |= data;
+	mask |= data;
+
+	bits &= ~mask;
+	bits |= intf;
+
+	outpdw(MDP_BASE + 0x0038, bits);	/* MDP_DISP_INTF_SEL */
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+  MSM_FB_DEBUG("mdp4_display_intf_sel: 0x%x\n", (int)inpdw(MDP_BASE + 0x0038));
+}
+
+unsigned long mdp4_display_status(void)
+{
+	ulong status;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	status = inpdw(MDP_BASE + 0x0018) & 0x3ff;	/* MDP_DISPLAY_STATUS */
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	return status;
+}
+
+void mdp4_ebi2_lcd_setup(int lcd, ulong base, int ystride)
+{
+	/* always use memory map */
+	ystride &= 0x01fff;	/* 13 bits */
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	if (lcd == EBI2_LCD0) {
+		outpdw(MDP_BASE + 0x0060, base);/* MDP_EBI2_LCD0 */
+		outpdw(MDP_BASE + 0x0068, ystride);/* MDP_EBI2_LCD0_YSTRIDE */
+	} else {
+		outpdw(MDP_BASE + 0x0064, base);/* MDP_EBI2_LCD1 */
+		outpdw(MDP_BASE + 0x006c, ystride);/* MDP_EBI2_LCD1_YSTRIDE */
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mddi_setup(int mddi, unsigned long id)
+{
+	ulong 	bits;
+
+	if (mddi == MDDI_EXTERNAL_SET)
+		bits = 0x02;
+	else if (mddi == MDDI_SECONDARY_SET)
+		bits = 0x01;
+	else
+		bits = 0;	/* PRIMARY_SET */
+
+	id <<= 16;
+
+	bits |= id;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	outpdw(MDP_BASE + 0x0090, bits); /* MDP_MDDI_PARAM_WR_SEL */
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req)
+{
+
+	/* not implemented yet */
+	return -1;
+}
+
+void mdp4_fetch_cfg(uint32 core_clk)
+{
+
+	uint32 dmap_data, vg_data;
+	char *base;
+	int i;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	if (core_clk >= 90000000) { /* 90 Mhz */
+		dmap_data = 0x47; /* 16 bytes-burst x 8 req */
+		vg_data = 0x47; /* 16 bytes-burs x 8 req */
+	} else {
+		dmap_data = 0x27; /* 8 bytes-burst x 8 req */
+		vg_data = 0x43; /* 16 bytes-burst x 4 req */
+	}
+
+	MSM_FB_DEBUG("mdp4_fetch_cfg: dmap=%x vg=%x\n",
+			dmap_data, vg_data);
+
+	/* dma_p fetch config */
+	outpdw(MDP_BASE + 0x91004, dmap_data);
+	/* dma_e fetch config */
+	outpdw(MDP_BASE + 0xB1004, dmap_data);
+
+	/*
+	 * set up two vg pipes and two rgb pipes
+	 */
+	base = MDP_BASE + MDP4_VIDEO_BASE;
+	for (i = 0; i < 4; i++) {
+		outpdw(base + 0x1004, vg_data);
+		base += MDP4_VIDEO_OFF;
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_hw_init(void)
+{
+	ulong bits;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+#ifdef MDP4_ERROR
+	/*
+	 * Issue software reset on DMA_P will casue DMA_P dma engine stall
+	 * on LCDC mode. However DMA_P does not stall at MDDI mode.
+	 * This need further investigation.
+	 */
+	mdp4_sw_reset(0x17);
+#endif
+
+	mdp4_clear_lcdc();
+
+	mdp4_mixer_blend_init(0);
+	mdp4_mixer_blend_init(1);
+	mdp4_vg_qseed_init(0);
+	mdp4_vg_qseed_init(1);
+
+	/* yuv2rgb */
+	mdp4_vg_csc_mv_setup(0);
+	mdp4_vg_csc_mv_setup(1);
+	mdp4_vg_csc_pre_bv_setup(0);
+	mdp4_vg_csc_pre_bv_setup(1);
+	mdp4_vg_csc_post_bv_setup(0);
+	mdp4_vg_csc_post_bv_setup(1);
+	mdp4_vg_csc_pre_lv_setup(0);
+	mdp4_vg_csc_pre_lv_setup(1);
+	mdp4_vg_csc_post_lv_setup(0);
+	mdp4_vg_csc_post_lv_setup(1);
+
+	/* rgb2yuv */
+	mdp4_mixer1_csc_mv_setup();
+	mdp4_mixer1_csc_pre_bv_setup();
+	mdp4_mixer1_csc_post_bv_setup();
+	mdp4_mixer1_csc_pre_lv_setup();
+	mdp4_mixer1_csc_post_lv_setup();
+
+	mdp4_vg_igc_lut_setup(0);
+	mdp4_vg_igc_lut_setup(1);
+
+	mdp4_rgb_igc_lut_setup(0);
+	mdp4_rgb_igc_lut_setup(1);
+
+	outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
+
+	/* system interrupts */
+
+	bits =  mdp_intr_mask;
+	outpdw(MDP_BASE + 0x0050, bits);/* enable specififed interrupts */
+
+	/* histogram */
+	MDP_OUTP(MDP_BASE + 0x95010, 1);	/* auto clear HIST */
+
+	/* enable histogram interrupts */
+	outpdw(MDP_BASE + 0x9501c, INTR_HIST_DONE);
+
+	/* For the max read pending cmd config below, if the MDP clock     */
+	/* is less than the AXI clock, then we must use 3 pending          */
+	/* pending requests.  Otherwise, we should use 8 pending requests. */
+	/* In the future we should do this detection automatically.	   */
+
+	/* max read pending cmd config */
+	outpdw(MDP_BASE + 0x004c, 0x02222);	/* 3 pending requests */
+
+#ifndef CONFIG_FB_MSM_OVERLAY
+	/* both REFRESH_MODE and DIRECT_OUT are ignored at BLT mode */
+	mdp4_overlay_cfg(MDP4_MIXER0, OVERLAY_MODE_BLT, 0, 0);
+	mdp4_overlay_cfg(MDP4_MIXER1, OVERLAY_MODE_BLT, 0, 0);
+#endif
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	/* Mark hardware as initialized. Only revisions > v2.1 have a register
+	 * for tracking core reset status. */
+	if (mdp_hw_revision > MDP4_REVISION_V2_1)
+		outpdw(MDP_BASE + 0x003c, 1);
+}
+
+
+void mdp4_clear_lcdc(void)
+{
+	uint32 bits;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	bits = inpdw(MDP_BASE + 0xc0000);
+	if (bits & 0x01) { /* enabled already */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		return;
+	}
+
+	outpdw(MDP_BASE + 0xc0004, 0);	/* vsync ctrl out */
+	outpdw(MDP_BASE + 0xc0008, 0);	/* vsync period */
+	outpdw(MDP_BASE + 0xc000c, 0);	/* vsync pusle width */
+	outpdw(MDP_BASE + 0xc0010, 0);	/* lcdc display HCTL */
+	outpdw(MDP_BASE + 0xc0014, 0);	/* lcdc display v start */
+	outpdw(MDP_BASE + 0xc0018, 0);	/* lcdc display v end */
+	outpdw(MDP_BASE + 0xc001c, 0);	/* lcdc active hctl */
+	outpdw(MDP_BASE + 0xc0020, 0);	/* lcdc active v start */
+	outpdw(MDP_BASE + 0xc0024, 0);	/* lcdc active v end */
+	outpdw(MDP_BASE + 0xc0028, 0);	/* lcdc board color */
+	outpdw(MDP_BASE + 0xc002c, 0);	/* lcdc underflow ctrl */
+	outpdw(MDP_BASE + 0xc0030, 0);	/* lcdc hsync skew */
+	outpdw(MDP_BASE + 0xc0034, 0);	/* lcdc test ctl */
+	outpdw(MDP_BASE + 0xc0038, 0);	/* lcdc ctl polarity */
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+irqreturn_t mdp4_isr(int irq, void *ptr)
+{
+	uint32 isr, mask, panel;
+	struct mdp_dma_data *dma;
+
+	mdp_is_in_isr = TRUE;
+
+	/* complete all the reads before reading the interrupt
+	* status register - eliminate effects of speculative
+	* reads by the cpu
+	*/
+	rmb();
+	isr = inpdw(MDP_INTR_STATUS);
+	if (isr == 0)
+		goto out;
+
+	mdp4_stat.intr_tot++;
+	mask = inpdw(MDP_INTR_ENABLE);
+	outpdw(MDP_INTR_CLEAR, isr);
+
+	if (isr & INTR_PRIMARY_INTF_UDERRUN) {
+		mdp4_stat.intr_underrun_p++;
+		/* When underun occurs mdp clear the histogram registers
+		that are set before in hw_init so restore them back so
+		that histogram works.*/
+		MDP_OUTP(MDP_BASE + 0x95010, 1);
+		outpdw(MDP_BASE + 0x9501c, INTR_HIST_DONE);
+		if (mdp_is_hist_start == TRUE) {
+			MDP_OUTP(MDP_BASE + 0x95004,
+					mdp_hist.frame_cnt);
+			MDP_OUTP(MDP_BASE + 0x95000, 1);
+		}
+	}
+
+	if (isr & INTR_EXTERNAL_INTF_UDERRUN)
+		mdp4_stat.intr_underrun_e++;
+
+	isr &= mask;
+
+	if (isr == 0)
+		goto out;
+
+	panel = mdp4_overlay_panel_list();
+	if (isr & INTR_PRIMARY_VSYNC) {
+		dma = &dma2_data;
+		spin_lock(&mdp_spin_lock);
+		mdp_intr_mask &= ~INTR_PRIMARY_VSYNC;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		dma->waiting = FALSE;
+		spin_unlock(&mdp_spin_lock);
+		if (panel & MDP4_PANEL_LCDC)
+			mdp4_primary_vsync_lcdc();
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+		else if (panel & MDP4_PANEL_DSI_VIDEO)
+			mdp4_primary_vsync_dsi_video();
+#endif
+	}
+#ifdef CONFIG_FB_MSM_DTV
+	if (isr & INTR_EXTERNAL_VSYNC) {
+		dma = &dma_e_data;
+		spin_lock(&mdp_spin_lock);
+		mdp_intr_mask &= ~INTR_EXTERNAL_VSYNC;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		dma->waiting = FALSE;
+		spin_unlock(&mdp_spin_lock);
+		if (panel & MDP4_PANEL_DTV)
+			mdp4_external_vsync_dtv();
+	}
+#endif
+	if (isr & INTR_DMA_P_DONE) {
+		mdp4_stat.intr_dma_p++;
+		dma = &dma2_data;
+		if (panel & MDP4_PANEL_LCDC) {
+			/* disable LCDC interrupt */
+			spin_lock(&mdp_spin_lock);
+			mdp_intr_mask &= ~INTR_DMA_P_DONE;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+			dma->waiting = FALSE;
+			spin_unlock(&mdp_spin_lock);
+		} else { /* MDDI */
+#ifdef CONFIG_FB_MSM_OVERLAY
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+			mdp4_dma_p_done_dsi(dma);
+#else
+			mdp4_dma_p_done_mddi();
+			mdp_pipe_ctrl(MDP_DMA2_BLOCK,
+				MDP_BLOCK_POWER_OFF, TRUE);
+#endif
+#else
+			spin_lock(&mdp_spin_lock);
+			dma->busy = FALSE;
+			spin_unlock(&mdp_spin_lock);
+#endif
+		}
+#ifndef CONFIG_FB_MSM_MIPI_DSI
+		complete(&dma->comp);
+#endif
+	}
+	if (isr & INTR_DMA_S_DONE) {
+		mdp4_stat.intr_dma_s++;
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+		dma = &dma2_data;
+#else
+		dma = &dma_s_data;
+#endif
+
+		dma->busy = FALSE;
+		mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
+				MDP_BLOCK_POWER_OFF, TRUE);
+		complete(&dma->comp);
+	}
+	if (isr & INTR_DMA_E_DONE) {
+		mdp4_stat.intr_dma_e++;
+		dma = &dma_e_data;
+		spin_lock(&mdp_spin_lock);
+		mdp_intr_mask &= ~INTR_DMA_E_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		dma->busy = FALSE;
+
+		if (dma->waiting) {
+			dma->waiting = FALSE;
+			complete(&dma->comp);
+		}
+		spin_unlock(&mdp_spin_lock);
+	}
+#ifdef CONFIG_FB_MSM_OVERLAY
+	if (isr & INTR_OVERLAY0_DONE) {
+		mdp4_stat.intr_overlay0++;
+		dma = &dma2_data;
+		if (panel & (MDP4_PANEL_LCDC | MDP4_PANEL_DSI_VIDEO)) {
+			/* disable LCDC interrupt */
+			spin_lock(&mdp_spin_lock);
+			mdp_intr_mask &= ~INTR_OVERLAY0_DONE;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+			dma->waiting = FALSE;
+			spin_unlock(&mdp_spin_lock);
+			if (panel & MDP4_PANEL_LCDC)
+				mdp4_overlay0_done_lcdc();
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+			else if (panel & MDP4_PANEL_DSI_VIDEO)
+				mdp4_overlay0_done_dsi_video();
+#endif
+		} else {        /* MDDI, DSI_CMD  */
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+			if (panel & MDP4_PANEL_DSI_CMD)
+				mdp4_overlay0_done_dsi_cmd(dma);
+#else
+			if (panel & MDP4_PANEL_MDDI)
+				mdp4_overlay0_done_mddi(dma);
+#endif
+		}
+		mdp_hw_cursor_done();
+	}
+	if (isr & INTR_OVERLAY1_DONE) {
+		mdp4_stat.intr_overlay1++;
+		/* disable DTV interrupt */
+		dma = &dma_e_data;
+		spin_lock(&mdp_spin_lock);
+		mdp_intr_mask &= ~INTR_OVERLAY1_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+		dma->waiting = FALSE;
+		spin_unlock(&mdp_spin_lock);
+#if defined(CONFIG_FB_MSM_DTV)
+		if (panel & MDP4_PANEL_DTV)
+			mdp4_overlay1_done_dtv();
+#endif
+#if defined(CONFIG_FB_MSM_TVOUT)
+		if (panel & MDP4_PANEL_ATV)
+			mdp4_overlay1_done_atv();
+#endif
+	}
+#endif	/* OVERLAY */
+	if (isr & INTR_DMA_P_HISTOGRAM) {
+		isr = inpdw(MDP_DMA_P_HIST_INTR_STATUS);
+		mask = inpdw(MDP_DMA_P_HIST_INTR_ENABLE);
+		outpdw(MDP_DMA_P_HIST_INTR_CLEAR, isr);
+		isr &= mask;
+		if (isr & INTR_HIST_DONE) {
+			if (mdp_hist.r)
+				memcpy(mdp_hist.r, MDP_BASE + 0x95100,
+						mdp_hist.bin_cnt*4);
+			if (mdp_hist.g)
+				memcpy(mdp_hist.g, MDP_BASE + 0x95200,
+						mdp_hist.bin_cnt*4);
+			if (mdp_hist.b)
+				memcpy(mdp_hist.b, MDP_BASE + 0x95300,
+					mdp_hist.bin_cnt*4);
+			complete(&mdp_hist_comp);
+			if (mdp_is_hist_start == TRUE) {
+				MDP_OUTP(MDP_BASE + 0x95004,
+						mdp_hist.frame_cnt);
+				MDP_OUTP(MDP_BASE + 0x95000, 1);
+			}
+		}
+	}
+
+out:
+	mdp_is_in_isr = FALSE;
+
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * QSEED tables
+ */
+
+static uint32 vg_qseed_table0[] = {
+	0x5556aaff, 0x00000000, 0x00000000, 0x00000000
+};
+
+static uint32 vg_qseed_table1[] = {
+	0x76543210, 0xfedcba98
+};
+
+static uint32 vg_qseed_table2[] = {
+	0x02000000, 0x00000000, 0x01ff0ff9, 0x00000008,
+	0x01fb0ff2, 0x00000013, 0x01f50fed, 0x0ffe0020,
+	0x01ed0fe8, 0x0ffd002e, 0x01e30fe4, 0x0ffb003e,
+	0x01d80fe1, 0x0ff9004e, 0x01cb0fde, 0x0ff70060,
+	0x01bc0fdc, 0x0ff40074, 0x01ac0fdb, 0x0ff20087,
+	0x019a0fdb, 0x0fef009c, 0x01870fdb, 0x0fed00b1,
+	0x01740fdb, 0x0fea00c7, 0x01600fdc, 0x0fe700dd,
+	0x014b0fdd, 0x0fe500f3, 0x01350fdf, 0x0fe30109,
+	0x01200fe0, 0x0fe00120, 0x01090fe3, 0x0fdf0135,
+	0x00f30fe5, 0x0fdd014b, 0x00dd0fe7, 0x0fdc0160,
+	0x00c70fea, 0x0fdb0174, 0x00b10fed, 0x0fdb0187,
+	0x009c0fef, 0x0fdb019a, 0x00870ff2, 0x0fdb01ac,
+	0x00740ff4, 0x0fdc01bc, 0x00600ff7, 0x0fde01cb,
+	0x004e0ff9, 0x0fe101d8, 0x003e0ffb, 0x0fe401e3,
+	0x002e0ffd, 0x0fe801ed, 0x00200ffe, 0x0fed01f5,
+	0x00130000, 0x0ff201fb, 0x00080000, 0x0ff901ff,
+
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+
+	0x02000000, 0x00000000, 0x01fc0ff9, 0x0ffe000d,
+	0x01f60ff3, 0x0ffb001c, 0x01ef0fed, 0x0ff9002b,
+	0x01e60fe8, 0x0ff6003c, 0x01dc0fe4, 0x0ff3004d,
+	0x01d00fe0, 0x0ff1005f, 0x01c30fde, 0x0fee0071,
+	0x01b50fdb, 0x0feb0085, 0x01a70fd9, 0x0fe80098,
+	0x01960fd8, 0x0fe600ac, 0x01850fd7, 0x0fe300c1,
+	0x01730fd7, 0x0fe100d5, 0x01610fd7, 0x0fdf00e9,
+	0x014e0fd8, 0x0fdd00fd, 0x013b0fd8, 0x0fdb0112,
+	0x01250fda, 0x0fda0127, 0x01120fdb, 0x0fd8013b,
+	0x00fd0fdd, 0x0fd8014e, 0x00e90fdf, 0x0fd70161,
+	0x00d50fe1, 0x0fd70173, 0x00c10fe3, 0x0fd70185,
+	0x00ac0fe6, 0x0fd80196, 0x00980fe8, 0x0fd901a7,
+	0x00850feb, 0x0fdb01b5, 0x00710fee, 0x0fde01c3,
+	0x005f0ff1, 0x0fe001d0, 0x004d0ff3, 0x0fe401dc,
+	0x003c0ff6, 0x0fe801e6, 0x002b0ff9, 0x0fed01ef,
+	0x001c0ffb, 0x0ff301f6, 0x000d0ffe, 0x0ff901fc,
+
+	0x020f0034, 0x0f7a0043, 0x01e80023, 0x0fa8004d,
+	0x01d30016, 0x0fbe0059, 0x01c6000a, 0x0fc90067,
+	0x01bd0000, 0x0fce0075, 0x01b50ff7, 0x0fcf0085,
+	0x01ae0fee, 0x0fcf0095, 0x01a70fe6, 0x0fcd00a6,
+	0x019d0fe0, 0x0fcb00b8, 0x01940fd9, 0x0fc900ca,
+	0x01890fd4, 0x0fc700dc, 0x017d0fcf, 0x0fc600ee,
+	0x01700fcc, 0x0fc40100, 0x01620fc9, 0x0fc40111,
+	0x01540fc6, 0x0fc30123, 0x01430fc5, 0x0fc40134,
+	0x01340fc4, 0x0fc50143, 0x01230fc3, 0x0fc60154,
+	0x01110fc4, 0x0fc90162, 0x01000fc4, 0x0fcc0170,
+	0x00ee0fc6, 0x0fcf017d, 0x00dc0fc7, 0x0fd40189,
+	0x00ca0fc9, 0x0fd90194, 0x00b80fcb, 0x0fe0019d,
+	0x00a60fcd, 0x0fe601a7, 0x00950fcf, 0x0fee01ae,
+	0x00850fcf, 0x0ff701b5, 0x00750fce, 0x000001bd,
+	0x00670fc9, 0x000a01c6, 0x00590fbe, 0x001601d3,
+	0x004d0fa8, 0x002301e8, 0x00430f7a, 0x0034020f,
+
+	0x015c005e, 0x0fde0068, 0x015c0054, 0x0fdd0073,
+	0x015b004b, 0x0fdc007e, 0x015a0042, 0x0fdb0089,
+	0x01590039, 0x0fda0094, 0x01560030, 0x0fda00a0,
+	0x01530028, 0x0fda00ab, 0x014f0020, 0x0fda00b7,
+	0x014a0019, 0x0fdb00c2, 0x01450011, 0x0fdc00ce,
+	0x013e000b, 0x0fde00d9, 0x01390004, 0x0fdf00e4,
+	0x01310ffe, 0x0fe200ef, 0x01290ff9, 0x0fe400fa,
+	0x01200ff4, 0x0fe80104, 0x01180fef, 0x0feb010e,
+	0x010e0feb, 0x0fef0118, 0x01040fe8, 0x0ff40120,
+	0x00fa0fe4, 0x0ff90129, 0x00ef0fe2, 0x0ffe0131,
+	0x00e40fdf, 0x00040139, 0x00d90fde, 0x000b013e,
+	0x00ce0fdc, 0x00110145, 0x00c20fdb, 0x0019014a,
+	0x00b70fda, 0x0020014f, 0x00ab0fda, 0x00280153,
+	0x00a00fda, 0x00300156, 0x00940fda, 0x00390159,
+	0x00890fdb, 0x0042015a, 0x007e0fdc, 0x004b015b,
+	0x00730fdd, 0x0054015c, 0x00680fde, 0x005e015c,
+
+	0x01300068, 0x0ff80070, 0x01300060, 0x0ff80078,
+	0x012f0059, 0x0ff80080, 0x012d0052, 0x0ff80089,
+	0x012b004b, 0x0ff90091, 0x01290044, 0x0ff9009a,
+	0x0126003d, 0x0ffa00a3, 0x01220037, 0x0ffb00ac,
+	0x011f0031, 0x0ffc00b4, 0x011a002b, 0x0ffe00bd,
+	0x01150026, 0x000000c5, 0x010f0021, 0x000200ce,
+	0x010a001c, 0x000400d6, 0x01030018, 0x000600df,
+	0x00fd0014, 0x000900e6, 0x00f60010, 0x000c00ee,
+	0x00ee000c, 0x001000f6, 0x00e60009, 0x001400fd,
+	0x00df0006, 0x00180103, 0x00d60004, 0x001c010a,
+	0x00ce0002, 0x0021010f, 0x00c50000, 0x00260115,
+	0x00bd0ffe, 0x002b011a, 0x00b40ffc, 0x0031011f,
+	0x00ac0ffb, 0x00370122, 0x00a30ffa, 0x003d0126,
+	0x009a0ff9, 0x00440129, 0x00910ff9, 0x004b012b,
+	0x00890ff8, 0x0052012d, 0x00800ff8, 0x0059012f,
+	0x00780ff8, 0x00600130, 0x00700ff8, 0x00680130,
+
+	0x01050079, 0x0003007f, 0x01040073, 0x00030086,
+	0x0103006d, 0x0004008c, 0x01030066, 0x00050092,
+	0x01010060, 0x00060099, 0x0100005a, 0x0007009f,
+	0x00fe0054, 0x000900a5, 0x00fa004f, 0x000b00ac,
+	0x00f80049, 0x000d00b2, 0x00f50044, 0x000f00b8,
+	0x00f2003f, 0x001200bd, 0x00ef0039, 0x001500c3,
+	0x00ea0035, 0x001800c9, 0x00e60030, 0x001c00ce,
+	0x00e3002b, 0x001f00d3, 0x00dd0027, 0x002300d9,
+	0x00d90023, 0x002700dd, 0x00d3001f, 0x002b00e3,
+	0x00ce001c, 0x003000e6, 0x00c90018, 0x003500ea,
+	0x00c30015, 0x003900ef, 0x00bd0012, 0x003f00f2,
+	0x00b8000f, 0x004400f5, 0x00b2000d, 0x004900f8,
+	0x00ac000b, 0x004f00fa, 0x00a50009, 0x005400fe,
+	0x009f0007, 0x005a0100, 0x00990006, 0x00600101,
+	0x00920005, 0x00660103, 0x008c0004, 0x006d0103,
+	0x00860003, 0x00730104, 0x007f0003, 0x00790105,
+
+	0x00cf0088, 0x001d008c, 0x00ce0084, 0x0020008e,
+	0x00cd0080, 0x00210092, 0x00cd007b, 0x00240094,
+	0x00ca0077, 0x00270098, 0x00c90073, 0x0029009b,
+	0x00c8006f, 0x002c009d, 0x00c6006b, 0x002f00a0,
+	0x00c50067, 0x003200a2, 0x00c30062, 0x003600a5,
+	0x00c0005f, 0x003900a8, 0x00c0005b, 0x003b00aa,
+	0x00be0057, 0x003e00ad, 0x00ba0054, 0x004200b0,
+	0x00b90050, 0x004500b2, 0x00b7004c, 0x004900b4,
+	0x00b40049, 0x004c00b7, 0x00b20045, 0x005000b9,
+	0x00b00042, 0x005400ba, 0x00ad003e, 0x005700be,
+	0x00aa003b, 0x005b00c0, 0x00a80039, 0x005f00c0,
+	0x00a50036, 0x006200c3, 0x00a20032, 0x006700c5,
+	0x00a0002f, 0x006b00c6, 0x009d002c, 0x006f00c8,
+	0x009b0029, 0x007300c9, 0x00980027, 0x007700ca,
+	0x00940024, 0x007b00cd, 0x00920021, 0x008000cd,
+	0x008e0020, 0x008400ce, 0x008c001d, 0x008800cf,
+
+	0x008e0083, 0x006b0084, 0x008d0083, 0x006c0084,
+	0x008d0082, 0x006d0084, 0x008d0081, 0x006d0085,
+	0x008d0080, 0x006e0085, 0x008c007f, 0x006f0086,
+	0x008b007f, 0x00700086, 0x008b007e, 0x00710086,
+	0x008b007d, 0x00720086, 0x008a007d, 0x00730086,
+	0x008a007c, 0x00730087, 0x008a007b, 0x00740087,
+	0x0089007b, 0x00750087, 0x008a0079, 0x00750088,
+	0x008a0078, 0x00760088, 0x008a0077, 0x00770088,
+	0x00880077, 0x0077008a, 0x00880076, 0x0078008a,
+	0x00880075, 0x0079008a, 0x00870075, 0x007b0089,
+	0x00870074, 0x007b008a, 0x00870073, 0x007c008a,
+	0x00860073, 0x007d008a, 0x00860072, 0x007d008b,
+	0x00860071, 0x007e008b, 0x00860070, 0x007f008b,
+	0x0086006f, 0x007f008c, 0x0085006e, 0x0080008d,
+	0x0085006d, 0x0081008d, 0x0084006d, 0x0082008d,
+	0x0084006c, 0x0083008d, 0x0084006b, 0x0083008e,
+
+	0x023c0fe2, 0x00000fe2, 0x023a0fdb, 0x00000feb,
+	0x02360fd3, 0x0fff0ff8, 0x022e0fcf, 0x0ffc0007,
+	0x02250fca, 0x0ffa0017, 0x021a0fc6, 0x0ff70029,
+	0x020c0fc4, 0x0ff4003c, 0x01fd0fc1, 0x0ff10051,
+	0x01eb0fc0, 0x0fed0068, 0x01d80fc0, 0x0fe9007f,
+	0x01c30fc1, 0x0fe50097, 0x01ac0fc2, 0x0fe200b0,
+	0x01960fc3, 0x0fdd00ca, 0x017e0fc5, 0x0fd900e4,
+	0x01650fc8, 0x0fd500fe, 0x014b0fcb, 0x0fd20118,
+	0x01330fcd, 0x0fcd0133, 0x01180fd2, 0x0fcb014b,
+	0x00fe0fd5, 0x0fc80165, 0x00e40fd9, 0x0fc5017e,
+	0x00ca0fdd, 0x0fc30196, 0x00b00fe2, 0x0fc201ac,
+	0x00970fe5, 0x0fc101c3, 0x007f0fe9, 0x0fc001d8,
+	0x00680fed, 0x0fc001eb, 0x00510ff1, 0x0fc101fd,
+	0x003c0ff4, 0x0fc4020c, 0x00290ff7, 0x0fc6021a,
+	0x00170ffa, 0x0fca0225, 0x00070ffc, 0x0fcf022e,
+	0x0ff80fff, 0x0fd30236, 0x0feb0000, 0x0fdb023a,
+
+	0x02780fc4, 0x00000fc4, 0x02770fbc, 0x0fff0fce,
+	0x02710fb5, 0x0ffe0fdc, 0x02690fb0, 0x0ffa0fed,
+	0x025f0fab, 0x0ff70fff, 0x02500fa8, 0x0ff30015,
+	0x02410fa6, 0x0fef002a, 0x022f0fa4, 0x0feb0042,
+	0x021a0fa4, 0x0fe5005d, 0x02040fa5, 0x0fe10076,
+	0x01eb0fa7, 0x0fdb0093, 0x01d20fa9, 0x0fd600af,
+	0x01b80fab, 0x0fd000cd, 0x019d0faf, 0x0fca00ea,
+	0x01810fb2, 0x0fc50108, 0x01620fb7, 0x0fc10126,
+	0x01440fbb, 0x0fbb0146, 0x01260fc1, 0x0fb70162,
+	0x01080fc5, 0x0fb20181, 0x00ea0fca, 0x0faf019d,
+	0x00cd0fd0, 0x0fab01b8, 0x00af0fd6, 0x0fa901d2,
+	0x00930fdb, 0x0fa701eb, 0x00760fe1, 0x0fa50204,
+	0x005d0fe5, 0x0fa4021a, 0x00420feb, 0x0fa4022f,
+	0x002a0fef, 0x0fa60241, 0x00150ff3, 0x0fa80250,
+	0x0fff0ff7, 0x0fab025f, 0x0fed0ffa, 0x0fb00269,
+	0x0fdc0ffe, 0x0fb50271, 0x0fce0fff, 0x0fbc0277,
+
+	0x02a00fb0, 0x00000fb0, 0x029e0fa8, 0x0fff0fbb,
+	0x02980fa1, 0x0ffd0fca, 0x028f0f9c, 0x0ff90fdc,
+	0x02840f97, 0x0ff50ff0, 0x02740f94, 0x0ff10007,
+	0x02640f92, 0x0fec001e, 0x02500f91, 0x0fe70038,
+	0x023a0f91, 0x0fe00055, 0x02220f92, 0x0fdb0071,
+	0x02080f95, 0x0fd4008f, 0x01ec0f98, 0x0fce00ae,
+	0x01cf0f9b, 0x0fc700cf, 0x01b10f9f, 0x0fc100ef,
+	0x01920fa4, 0x0fbb010f, 0x01710faa, 0x0fb50130,
+	0x01520fae, 0x0fae0152, 0x01300fb5, 0x0faa0171,
+	0x010f0fbb, 0x0fa40192, 0x00ef0fc1, 0x0f9f01b1,
+	0x00cf0fc7, 0x0f9b01cf, 0x00ae0fce, 0x0f9801ec,
+	0x008f0fd4, 0x0f950208, 0x00710fdb, 0x0f920222,
+	0x00550fe0, 0x0f91023a, 0x00380fe7, 0x0f910250,
+	0x001e0fec, 0x0f920264, 0x00070ff1, 0x0f940274,
+	0x0ff00ff5, 0x0f970284, 0x0fdc0ff9, 0x0f9c028f,
+	0x0fca0ffd, 0x0fa10298, 0x0fbb0fff, 0x0fa8029e,
+
+	0x02c80f9c, 0x00000f9c, 0x02c70f94, 0x0ffe0fa7,
+	0x02c10f8c, 0x0ffc0fb7, 0x02b70f87, 0x0ff70fcb,
+	0x02aa0f83, 0x0ff30fe0, 0x02990f80, 0x0fee0ff9,
+	0x02870f7f, 0x0fe80012, 0x02720f7e, 0x0fe2002e,
+	0x025a0f7e, 0x0fdb004d, 0x02400f80, 0x0fd5006b,
+	0x02230f84, 0x0fcd008c, 0x02050f87, 0x0fc700ad,
+	0x01e60f8b, 0x0fbf00d0, 0x01c60f90, 0x0fb700f3,
+	0x01a30f96, 0x0fb00117, 0x01800f9c, 0x0faa013a,
+	0x015d0fa2, 0x0fa2015f, 0x013a0faa, 0x0f9c0180,
+	0x01170fb0, 0x0f9601a3, 0x00f30fb7, 0x0f9001c6,
+	0x00d00fbf, 0x0f8b01e6, 0x00ad0fc7, 0x0f870205,
+	0x008c0fcd, 0x0f840223, 0x006b0fd5, 0x0f800240,
+	0x004d0fdb, 0x0f7e025a, 0x002e0fe2, 0x0f7e0272,
+	0x00120fe8, 0x0f7f0287, 0x0ff90fee, 0x0f800299,
+	0x0fe00ff3, 0x0f8302aa, 0x0fcb0ff7, 0x0f8702b7,
+	0x0fb70ffc, 0x0f8c02c1, 0x0fa70ffe, 0x0f9402c7,
+
+	0x02f00f88, 0x00000f88, 0x02ee0f80, 0x0ffe0f94,
+	0x02e70f78, 0x0ffc0fa5, 0x02dd0f73, 0x0ff60fba,
+	0x02ce0f6f, 0x0ff20fd1, 0x02be0f6c, 0x0feb0feb,
+	0x02aa0f6b, 0x0fe50006, 0x02940f6a, 0x0fde0024,
+	0x02790f6c, 0x0fd60045, 0x025e0f6e, 0x0fcf0065,
+	0x023f0f72, 0x0fc60089, 0x021d0f77, 0x0fbf00ad,
+	0x01fd0f7b, 0x0fb600d2, 0x01da0f81, 0x0fad00f8,
+	0x01b50f87, 0x0fa6011e, 0x018f0f8f, 0x0f9e0144,
+	0x016b0f95, 0x0f95016b, 0x01440f9e, 0x0f8f018f,
+	0x011e0fa6, 0x0f8701b5, 0x00f80fad, 0x0f8101da,
+	0x00d20fb6, 0x0f7b01fd, 0x00ad0fbf, 0x0f77021d,
+	0x00890fc6, 0x0f72023f, 0x00650fcf, 0x0f6e025e,
+	0x00450fd6, 0x0f6c0279, 0x00240fde, 0x0f6a0294,
+	0x00060fe5, 0x0f6b02aa, 0x0feb0feb, 0x0f6c02be,
+	0x0fd10ff2, 0x0f6f02ce, 0x0fba0ff6, 0x0f7302dd,
+	0x0fa50ffc, 0x0f7802e7, 0x0f940ffe, 0x0f8002ee,
+
+	0x03180f74, 0x00000f74, 0x03160f6b, 0x0ffe0f81,
+	0x030e0f64, 0x0ffb0f93, 0x03030f5f, 0x0ff50fa9,
+	0x02f40f5b, 0x0ff00fc1, 0x02e20f58, 0x0fe90fdd,
+	0x02cd0f57, 0x0fe20ffa, 0x02b60f57, 0x0fda0019,
+	0x02990f59, 0x0fd1003d, 0x027b0f5c, 0x0fc90060,
+	0x02590f61, 0x0fc00086, 0x02370f66, 0x0fb700ac,
+	0x02130f6b, 0x0fae00d4, 0x01ee0f72, 0x0fa400fc,
+	0x01c70f79, 0x0f9b0125, 0x019f0f81, 0x0f93014d,
+	0x01760f89, 0x0f890178, 0x014d0f93, 0x0f81019f,
+	0x01250f9b, 0x0f7901c7, 0x00fc0fa4, 0x0f7201ee,
+	0x00d40fae, 0x0f6b0213, 0x00ac0fb7, 0x0f660237,
+	0x00860fc0, 0x0f610259, 0x00600fc9, 0x0f5c027b,
+	0x003d0fd1, 0x0f590299, 0x00190fda, 0x0f5702b6,
+	0x0ffa0fe2, 0x0f5702cd, 0x0fdd0fe9, 0x0f5802e2,
+	0x0fc10ff0, 0x0f5b02f4, 0x0fa90ff5, 0x0f5f0303,
+	0x0f930ffb, 0x0f64030e, 0x0f810ffe, 0x0f6b0316,
+
+	0x03400f60, 0x00000f60, 0x033e0f57, 0x0ffe0f6d,
+	0x03370f4f, 0x0ffa0f80, 0x032a0f4b, 0x0ff30f98,
+	0x031a0f46, 0x0fee0fb2, 0x03070f44, 0x0fe60fcf,
+	0x02f10f44, 0x0fde0fed, 0x02d70f44, 0x0fd6000f,
+	0x02b80f46, 0x0fcc0036, 0x02990f4a, 0x0fc3005a,
+	0x02750f4f, 0x0fb90083, 0x02500f55, 0x0fb000ab,
+	0x022a0f5b, 0x0fa500d6, 0x02020f63, 0x0f9a0101,
+	0x01d80f6b, 0x0f91012c, 0x01ae0f74, 0x0f870157,
+	0x01840f7c, 0x0f7c0184, 0x01570f87, 0x0f7401ae,
+	0x012c0f91, 0x0f6b01d8, 0x01010f9a, 0x0f630202,
+	0x00d60fa5, 0x0f5b022a, 0x00ab0fb0, 0x0f550250,
+	0x00830fb9, 0x0f4f0275, 0x005a0fc3, 0x0f4a0299,
+	0x00360fcc, 0x0f4602b8, 0x000f0fd6, 0x0f4402d7,
+	0x0fed0fde, 0x0f4402f1, 0x0fcf0fe6, 0x0f440307,
+	0x0fb20fee, 0x0f46031a, 0x0f980ff3, 0x0f4b032a,
+	0x0f800ffa, 0x0f4f0337, 0x0f6d0ffe, 0x0f57033e,
+
+	0x02000000, 0x00000000, 0x01ff0ff9, 0x00000008,
+	0x01fb0ff2, 0x00000013, 0x01f50fed, 0x0ffe0020,
+	0x01ed0fe8, 0x0ffd002e, 0x01e30fe4, 0x0ffb003e,
+	0x01d80fe1, 0x0ff9004e, 0x01cb0fde, 0x0ff70060,
+	0x01bc0fdc, 0x0ff40074, 0x01ac0fdb, 0x0ff20087,
+	0x019a0fdb, 0x0fef009c, 0x01870fdb, 0x0fed00b1,
+	0x01740fdb, 0x0fea00c7, 0x01600fdc, 0x0fe700dd,
+	0x014b0fdd, 0x0fe500f3, 0x01350fdf, 0x0fe30109,
+	0x01200fe0, 0x0fe00120, 0x01090fe3, 0x0fdf0135,
+	0x00f30fe5, 0x0fdd014b, 0x00dd0fe7, 0x0fdc0160,
+	0x00c70fea, 0x0fdb0174, 0x00b10fed, 0x0fdb0187,
+	0x009c0fef, 0x0fdb019a, 0x00870ff2, 0x0fdb01ac,
+	0x00740ff4, 0x0fdc01bc, 0x00600ff7, 0x0fde01cb,
+	0x004e0ff9, 0x0fe101d8, 0x003e0ffb, 0x0fe401e3,
+	0x002e0ffd, 0x0fe801ed, 0x00200ffe, 0x0fed01f5,
+	0x00130000, 0x0ff201fb, 0x00080000, 0x0ff901ff,
+
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+	0x02000000, 0x00000000, 0x02000000, 0x00000000,
+
+	0x02000000, 0x00000000, 0x01fc0ff9, 0x0ffe000d,
+	0x01f60ff3, 0x0ffb001c, 0x01ef0fed, 0x0ff9002b,
+	0x01e60fe8, 0x0ff6003c, 0x01dc0fe4, 0x0ff3004d,
+	0x01d00fe0, 0x0ff1005f, 0x01c30fde, 0x0fee0071,
+	0x01b50fdb, 0x0feb0085, 0x01a70fd9, 0x0fe80098,
+	0x01960fd8, 0x0fe600ac, 0x01850fd7, 0x0fe300c1,
+	0x01730fd7, 0x0fe100d5, 0x01610fd7, 0x0fdf00e9,
+	0x014e0fd8, 0x0fdd00fd, 0x013b0fd8, 0x0fdb0112,
+	0x01250fda, 0x0fda0127, 0x01120fdb, 0x0fd8013b,
+	0x00fd0fdd, 0x0fd8014e, 0x00e90fdf, 0x0fd70161,
+	0x00d50fe1, 0x0fd70173, 0x00c10fe3, 0x0fd70185,
+	0x00ac0fe6, 0x0fd80196, 0x00980fe8, 0x0fd901a7,
+	0x00850feb, 0x0fdb01b5, 0x00710fee, 0x0fde01c3,
+	0x005f0ff1, 0x0fe001d0, 0x004d0ff3, 0x0fe401dc,
+	0x003c0ff6, 0x0fe801e6, 0x002b0ff9, 0x0fed01ef,
+	0x001c0ffb, 0x0ff301f6, 0x000d0ffe, 0x0ff901fc,
+
+	0x020f0034, 0x0f7a0043, 0x01e80023, 0x0fa8004d,
+	0x01d30016, 0x0fbe0059, 0x01c6000a, 0x0fc90067,
+	0x01bd0000, 0x0fce0075, 0x01b50ff7, 0x0fcf0085,
+	0x01ae0fee, 0x0fcf0095, 0x01a70fe6, 0x0fcd00a6,
+	0x019d0fe0, 0x0fcb00b8, 0x01940fd9, 0x0fc900ca,
+	0x01890fd4, 0x0fc700dc, 0x017d0fcf, 0x0fc600ee,
+	0x01700fcc, 0x0fc40100, 0x01620fc9, 0x0fc40111,
+	0x01540fc6, 0x0fc30123, 0x01430fc5, 0x0fc40134,
+	0x01340fc4, 0x0fc50143, 0x01230fc3, 0x0fc60154,
+	0x01110fc4, 0x0fc90162, 0x01000fc4, 0x0fcc0170,
+	0x00ee0fc6, 0x0fcf017d, 0x00dc0fc7, 0x0fd40189,
+	0x00ca0fc9, 0x0fd90194, 0x00b80fcb, 0x0fe0019d,
+	0x00a60fcd, 0x0fe601a7, 0x00950fcf, 0x0fee01ae,
+	0x00850fcf, 0x0ff701b5, 0x00750fce, 0x000001bd,
+	0x00670fc9, 0x000a01c6, 0x00590fbe, 0x001601d3,
+	0x004d0fa8, 0x002301e8, 0x00430f7a, 0x0034020f,
+
+	0x015c005e, 0x0fde0068, 0x015c0054, 0x0fdd0073,
+	0x015b004b, 0x0fdc007e, 0x015a0042, 0x0fdb0089,
+	0x01590039, 0x0fda0094, 0x01560030, 0x0fda00a0,
+	0x01530028, 0x0fda00ab, 0x014f0020, 0x0fda00b7,
+	0x014a0019, 0x0fdb00c2, 0x01450011, 0x0fdc00ce,
+	0x013e000b, 0x0fde00d9, 0x01390004, 0x0fdf00e4,
+	0x01310ffe, 0x0fe200ef, 0x01290ff9, 0x0fe400fa,
+	0x01200ff4, 0x0fe80104, 0x01180fef, 0x0feb010e,
+	0x010e0feb, 0x0fef0118, 0x01040fe8, 0x0ff40120,
+	0x00fa0fe4, 0x0ff90129, 0x00ef0fe2, 0x0ffe0131,
+	0x00e40fdf, 0x00040139, 0x00d90fde, 0x000b013e,
+	0x00ce0fdc, 0x00110145, 0x00c20fdb, 0x0019014a,
+	0x00b70fda, 0x0020014f, 0x00ab0fda, 0x00280153,
+	0x00a00fda, 0x00300156, 0x00940fda, 0x00390159,
+	0x00890fdb, 0x0042015a, 0x007e0fdc, 0x004b015b,
+	0x00730fdd, 0x0054015c, 0x00680fde, 0x005e015c,
+
+	0x01300068, 0x0ff80070, 0x01300060, 0x0ff80078,
+	0x012f0059, 0x0ff80080, 0x012d0052, 0x0ff80089,
+	0x012b004b, 0x0ff90091, 0x01290044, 0x0ff9009a,
+	0x0126003d, 0x0ffa00a3, 0x01220037, 0x0ffb00ac,
+	0x011f0031, 0x0ffc00b4, 0x011a002b, 0x0ffe00bd,
+	0x01150026, 0x000000c5, 0x010f0021, 0x000200ce,
+	0x010a001c, 0x000400d6, 0x01030018, 0x000600df,
+	0x00fd0014, 0x000900e6, 0x00f60010, 0x000c00ee,
+	0x00ee000c, 0x001000f6, 0x00e60009, 0x001400fd,
+	0x00df0006, 0x00180103, 0x00d60004, 0x001c010a,
+	0x00ce0002, 0x0021010f, 0x00c50000, 0x00260115,
+	0x00bd0ffe, 0x002b011a, 0x00b40ffc, 0x0031011f,
+	0x00ac0ffb, 0x00370122, 0x00a30ffa, 0x003d0126,
+	0x009a0ff9, 0x00440129, 0x00910ff9, 0x004b012b,
+	0x00890ff8, 0x0052012d, 0x00800ff8, 0x0059012f,
+	0x00780ff8, 0x00600130, 0x00700ff8, 0x00680130,
+
+	0x01050079, 0x0003007f, 0x01040073, 0x00030086,
+	0x0103006d, 0x0004008c, 0x01030066, 0x00050092,
+	0x01010060, 0x00060099, 0x0100005a, 0x0007009f,
+	0x00fe0054, 0x000900a5, 0x00fa004f, 0x000b00ac,
+	0x00f80049, 0x000d00b2, 0x00f50044, 0x000f00b8,
+	0x00f2003f, 0x001200bd, 0x00ef0039, 0x001500c3,
+	0x00ea0035, 0x001800c9, 0x00e60030, 0x001c00ce,
+	0x00e3002b, 0x001f00d3, 0x00dd0027, 0x002300d9,
+	0x00d90023, 0x002700dd, 0x00d3001f, 0x002b00e3,
+	0x00ce001c, 0x003000e6, 0x00c90018, 0x003500ea,
+	0x00c30015, 0x003900ef, 0x00bd0012, 0x003f00f2,
+	0x00b8000f, 0x004400f5, 0x00b2000d, 0x004900f8,
+	0x00ac000b, 0x004f00fa, 0x00a50009, 0x005400fe,
+	0x009f0007, 0x005a0100, 0x00990006, 0x00600101,
+	0x00920005, 0x00660103, 0x008c0004, 0x006d0103,
+	0x00860003, 0x00730104, 0x007f0003, 0x00790105,
+
+	0x00cf0088, 0x001d008c, 0x00ce0084, 0x0020008e,
+	0x00cd0080, 0x00210092, 0x00cd007b, 0x00240094,
+	0x00ca0077, 0x00270098, 0x00c90073, 0x0029009b,
+	0x00c8006f, 0x002c009d, 0x00c6006b, 0x002f00a0,
+	0x00c50067, 0x003200a2, 0x00c30062, 0x003600a5,
+	0x00c0005f, 0x003900a8, 0x00c0005b, 0x003b00aa,
+	0x00be0057, 0x003e00ad, 0x00ba0054, 0x004200b0,
+	0x00b90050, 0x004500b2, 0x00b7004c, 0x004900b4,
+	0x00b40049, 0x004c00b7, 0x00b20045, 0x005000b9,
+	0x00b00042, 0x005400ba, 0x00ad003e, 0x005700be,
+	0x00aa003b, 0x005b00c0, 0x00a80039, 0x005f00c0,
+	0x00a50036, 0x006200c3, 0x00a20032, 0x006700c5,
+	0x00a0002f, 0x006b00c6, 0x009d002c, 0x006f00c8,
+	0x009b0029, 0x007300c9, 0x00980027, 0x007700ca,
+	0x00940024, 0x007b00cd, 0x00920021, 0x008000cd,
+	0x008e0020, 0x008400ce, 0x008c001d, 0x008800cf,
+
+	0x008e0083, 0x006b0084, 0x008d0083, 0x006c0084,
+	0x008d0082, 0x006d0084, 0x008d0081, 0x006d0085,
+	0x008d0080, 0x006e0085, 0x008c007f, 0x006f0086,
+	0x008b007f, 0x00700086, 0x008b007e, 0x00710086,
+	0x008b007d, 0x00720086, 0x008a007d, 0x00730086,
+	0x008a007c, 0x00730087, 0x008a007b, 0x00740087,
+	0x0089007b, 0x00750087, 0x008a0079, 0x00750088,
+	0x008a0078, 0x00760088, 0x008a0077, 0x00770088,
+	0x00880077, 0x0077008a, 0x00880076, 0x0078008a,
+	0x00880075, 0x0079008a, 0x00870075, 0x007b0089,
+	0x00870074, 0x007b008a, 0x00870073, 0x007c008a,
+	0x00860073, 0x007d008a, 0x00860072, 0x007d008b,
+	0x00860071, 0x007e008b, 0x00860070, 0x007f008b,
+	0x0086006f, 0x007f008c, 0x0085006e, 0x0080008d,
+	0x0085006d, 0x0081008d, 0x0084006d, 0x0082008d,
+	0x0084006c, 0x0083008d, 0x0084006b, 0x0083008e,
+
+	0x023c0fe2, 0x00000fe2, 0x023a0fdb, 0x00000feb,
+	0x02360fd3, 0x0fff0ff8, 0x022e0fcf, 0x0ffc0007,
+	0x02250fca, 0x0ffa0017, 0x021a0fc6, 0x0ff70029,
+	0x020c0fc4, 0x0ff4003c, 0x01fd0fc1, 0x0ff10051,
+	0x01eb0fc0, 0x0fed0068, 0x01d80fc0, 0x0fe9007f,
+	0x01c30fc1, 0x0fe50097, 0x01ac0fc2, 0x0fe200b0,
+	0x01960fc3, 0x0fdd00ca, 0x017e0fc5, 0x0fd900e4,
+	0x01650fc8, 0x0fd500fe, 0x014b0fcb, 0x0fd20118,
+	0x01330fcd, 0x0fcd0133, 0x01180fd2, 0x0fcb014b,
+	0x00fe0fd5, 0x0fc80165, 0x00e40fd9, 0x0fc5017e,
+	0x00ca0fdd, 0x0fc30196, 0x00b00fe2, 0x0fc201ac,
+	0x00970fe5, 0x0fc101c3, 0x007f0fe9, 0x0fc001d8,
+	0x00680fed, 0x0fc001eb, 0x00510ff1, 0x0fc101fd,
+	0x003c0ff4, 0x0fc4020c, 0x00290ff7, 0x0fc6021a,
+	0x00170ffa, 0x0fca0225, 0x00070ffc, 0x0fcf022e,
+	0x0ff80fff, 0x0fd30236, 0x0feb0000, 0x0fdb023a,
+
+	0x02780fc4, 0x00000fc4, 0x02770fbc, 0x0fff0fce,
+	0x02710fb5, 0x0ffe0fdc, 0x02690fb0, 0x0ffa0fed,
+	0x025f0fab, 0x0ff70fff, 0x02500fa8, 0x0ff30015,
+	0x02410fa6, 0x0fef002a, 0x022f0fa4, 0x0feb0042,
+	0x021a0fa4, 0x0fe5005d, 0x02040fa5, 0x0fe10076,
+	0x01eb0fa7, 0x0fdb0093, 0x01d20fa9, 0x0fd600af,
+	0x01b80fab, 0x0fd000cd, 0x019d0faf, 0x0fca00ea,
+	0x01810fb2, 0x0fc50108, 0x01620fb7, 0x0fc10126,
+	0x01440fbb, 0x0fbb0146, 0x01260fc1, 0x0fb70162,
+	0x01080fc5, 0x0fb20181, 0x00ea0fca, 0x0faf019d,
+	0x00cd0fd0, 0x0fab01b8, 0x00af0fd6, 0x0fa901d2,
+	0x00930fdb, 0x0fa701eb, 0x00760fe1, 0x0fa50204,
+	0x005d0fe5, 0x0fa4021a, 0x00420feb, 0x0fa4022f,
+	0x002a0fef, 0x0fa60241, 0x00150ff3, 0x0fa80250,
+	0x0fff0ff7, 0x0fab025f, 0x0fed0ffa, 0x0fb00269,
+	0x0fdc0ffe, 0x0fb50271, 0x0fce0fff, 0x0fbc0277,
+
+	0x02a00fb0, 0x00000fb0, 0x029e0fa8, 0x0fff0fbb,
+	0x02980fa1, 0x0ffd0fca, 0x028f0f9c, 0x0ff90fdc,
+	0x02840f97, 0x0ff50ff0, 0x02740f94, 0x0ff10007,
+	0x02640f92, 0x0fec001e, 0x02500f91, 0x0fe70038,
+	0x023a0f91, 0x0fe00055, 0x02220f92, 0x0fdb0071,
+	0x02080f95, 0x0fd4008f, 0x01ec0f98, 0x0fce00ae,
+	0x01cf0f9b, 0x0fc700cf, 0x01b10f9f, 0x0fc100ef,
+	0x01920fa4, 0x0fbb010f, 0x01710faa, 0x0fb50130,
+	0x01520fae, 0x0fae0152, 0x01300fb5, 0x0faa0171,
+	0x010f0fbb, 0x0fa40192, 0x00ef0fc1, 0x0f9f01b1,
+	0x00cf0fc7, 0x0f9b01cf, 0x00ae0fce, 0x0f9801ec,
+	0x008f0fd4, 0x0f950208, 0x00710fdb, 0x0f920222,
+	0x00550fe0, 0x0f91023a, 0x00380fe7, 0x0f910250,
+	0x001e0fec, 0x0f920264, 0x00070ff1, 0x0f940274,
+	0x0ff00ff5, 0x0f970284, 0x0fdc0ff9, 0x0f9c028f,
+	0x0fca0ffd, 0x0fa10298, 0x0fbb0fff, 0x0fa8029e,
+
+	0x02c80f9c, 0x00000f9c, 0x02c70f94, 0x0ffe0fa7,
+	0x02c10f8c, 0x0ffc0fb7, 0x02b70f87, 0x0ff70fcb,
+	0x02aa0f83, 0x0ff30fe0, 0x02990f80, 0x0fee0ff9,
+	0x02870f7f, 0x0fe80012, 0x02720f7e, 0x0fe2002e,
+	0x025a0f7e, 0x0fdb004d, 0x02400f80, 0x0fd5006b,
+	0x02230f84, 0x0fcd008c, 0x02050f87, 0x0fc700ad,
+	0x01e60f8b, 0x0fbf00d0, 0x01c60f90, 0x0fb700f3,
+	0x01a30f96, 0x0fb00117, 0x01800f9c, 0x0faa013a,
+	0x015d0fa2, 0x0fa2015f, 0x013a0faa, 0x0f9c0180,
+	0x01170fb0, 0x0f9601a3, 0x00f30fb7, 0x0f9001c6,
+	0x00d00fbf, 0x0f8b01e6, 0x00ad0fc7, 0x0f870205,
+	0x008c0fcd, 0x0f840223, 0x006b0fd5, 0x0f800240,
+	0x004d0fdb, 0x0f7e025a, 0x002e0fe2, 0x0f7e0272,
+	0x00120fe8, 0x0f7f0287, 0x0ff90fee, 0x0f800299,
+	0x0fe00ff3, 0x0f8302aa, 0x0fcb0ff7, 0x0f8702b7,
+	0x0fb70ffc, 0x0f8c02c1, 0x0fa70ffe, 0x0f9402c7,
+
+	0x02f00f88, 0x00000f88, 0x02ee0f80, 0x0ffe0f94,
+	0x02e70f78, 0x0ffc0fa5, 0x02dd0f73, 0x0ff60fba,
+	0x02ce0f6f, 0x0ff20fd1, 0x02be0f6c, 0x0feb0feb,
+	0x02aa0f6b, 0x0fe50006, 0x02940f6a, 0x0fde0024,
+	0x02790f6c, 0x0fd60045, 0x025e0f6e, 0x0fcf0065,
+	0x023f0f72, 0x0fc60089, 0x021d0f77, 0x0fbf00ad,
+	0x01fd0f7b, 0x0fb600d2, 0x01da0f81, 0x0fad00f8,
+	0x01b50f87, 0x0fa6011e, 0x018f0f8f, 0x0f9e0144,
+	0x016b0f95, 0x0f95016b, 0x01440f9e, 0x0f8f018f,
+	0x011e0fa6, 0x0f8701b5, 0x00f80fad, 0x0f8101da,
+	0x00d20fb6, 0x0f7b01fd, 0x00ad0fbf, 0x0f77021d,
+	0x00890fc6, 0x0f72023f, 0x00650fcf, 0x0f6e025e,
+	0x00450fd6, 0x0f6c0279, 0x00240fde, 0x0f6a0294,
+	0x00060fe5, 0x0f6b02aa, 0x0feb0feb, 0x0f6c02be,
+	0x0fd10ff2, 0x0f6f02ce, 0x0fba0ff6, 0x0f7302dd,
+	0x0fa50ffc, 0x0f7802e7, 0x0f940ffe, 0x0f8002ee,
+
+	0x03180f74, 0x00000f74, 0x03160f6b, 0x0ffe0f81,
+	0x030e0f64, 0x0ffb0f93, 0x03030f5f, 0x0ff50fa9,
+	0x02f40f5b, 0x0ff00fc1, 0x02e20f58, 0x0fe90fdd,
+	0x02cd0f57, 0x0fe20ffa, 0x02b60f57, 0x0fda0019,
+	0x02990f59, 0x0fd1003d, 0x027b0f5c, 0x0fc90060,
+	0x02590f61, 0x0fc00086, 0x02370f66, 0x0fb700ac,
+	0x02130f6b, 0x0fae00d4, 0x01ee0f72, 0x0fa400fc,
+	0x01c70f79, 0x0f9b0125, 0x019f0f81, 0x0f93014d,
+	0x01760f89, 0x0f890178, 0x014d0f93, 0x0f81019f,
+	0x01250f9b, 0x0f7901c7, 0x00fc0fa4, 0x0f7201ee,
+	0x00d40fae, 0x0f6b0213, 0x00ac0fb7, 0x0f660237,
+	0x00860fc0, 0x0f610259, 0x00600fc9, 0x0f5c027b,
+	0x003d0fd1, 0x0f590299, 0x00190fda, 0x0f5702b6,
+	0x0ffa0fe2, 0x0f5702cd, 0x0fdd0fe9, 0x0f5802e2,
+	0x0fc10ff0, 0x0f5b02f4, 0x0fa90ff5, 0x0f5f0303,
+	0x0f930ffb, 0x0f64030e, 0x0f810ffe, 0x0f6b0316,
+
+	0x03400f60, 0x00000f60, 0x033e0f57, 0x0ffe0f6d,
+	0x03370f4f, 0x0ffa0f80, 0x032a0f4b, 0x0ff30f98,
+	0x031a0f46, 0x0fee0fb2, 0x03070f44, 0x0fe60fcf,
+	0x02f10f44, 0x0fde0fed, 0x02d70f44, 0x0fd6000f,
+	0x02b80f46, 0x0fcc0036, 0x02990f4a, 0x0fc3005a,
+	0x02750f4f, 0x0fb90083, 0x02500f55, 0x0fb000ab,
+	0x022a0f5b, 0x0fa500d6, 0x02020f63, 0x0f9a0101,
+	0x01d80f6b, 0x0f91012c, 0x01ae0f74, 0x0f870157,
+	0x01840f7c, 0x0f7c0184, 0x01570f87, 0x0f7401ae,
+	0x012c0f91, 0x0f6b01d8, 0x01010f9a, 0x0f630202,
+	0x00d60fa5, 0x0f5b022a, 0x00ab0fb0, 0x0f550250,
+	0x00830fb9, 0x0f4f0275, 0x005a0fc3, 0x0f4a0299,
+	0x00360fcc, 0x0f4602b8, 0x000f0fd6, 0x0f4402d7,
+	0x0fed0fde, 0x0f4402f1, 0x0fcf0fe6, 0x0f440307,
+	0x0fb20fee, 0x0f46031a, 0x0f980ff3, 0x0f4b032a,
+	0x0f800ffa, 0x0f4f0337, 0x0f6d0ffe, 0x0f57033e
+};
+
+
+#define MDP4_QSEED_TABLE0_OFF 0x8100
+#define MDP4_QSEED_TABLE1_OFF 0x8200
+#define MDP4_QSEED_TABLE2_OFF 0x9000
+
+void mdp4_vg_qseed_init(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+						MDP4_QSEED_TABLE0_OFF);
+	for (i = 0; i < (sizeof(vg_qseed_table0) / sizeof(uint32)); i++) {
+		outpdw(off, vg_qseed_table0[i]);
+		off++;
+		/* This code is added to workaround the 1K Boundary AXI
+		Interleave operations from Scorpion that can potentially
+		corrupt the QSEED table. The idea is to complete the prevous
+		to the buffer before making the next write when address is
+		1KB aligned to ensure the write has been committed prior to
+		next instruction write that can go out from  the secondary AXI
+		port.This happens also because of the expected write sequence
+		from QSEED table, where LSP has to be written first then the
+		MSP to trigger both to write out to SRAM, if this has not been
+		the expectation, then corruption wouldn't have happened.*/
+
+		if (!((uint32)off & 0x3FF))
+			wmb();
+	}
+
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+						MDP4_QSEED_TABLE1_OFF);
+	for (i = 0; i < (sizeof(vg_qseed_table1) / sizeof(uint32)); i++) {
+		outpdw(off, vg_qseed_table1[i]);
+		off++;
+		if (!((uint32)off & 0x3FF))
+			wmb();
+	}
+
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+						MDP4_QSEED_TABLE2_OFF);
+	for (i = 0; i < (sizeof(vg_qseed_table2) / sizeof(uint32)); i++) {
+		outpdw(off, vg_qseed_table2[i]);
+		off++;
+		if (!((uint32)off & 0x3FF))
+			wmb();
+	}
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+}
+
+void mdp4_mixer_blend_init(mixer_num)
+{
+	unsigned char *overlay_base;
+	int off;
+
+	if (mixer_num) 	/* mixer number, /dev/fb0, /dev/fb1 */
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
+	else
+		overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* stage 0 to stage 2 */
+	off = 0;
+	outpdw(overlay_base + off + 0x104, 0x010);
+	outpdw(overlay_base + off + 0x108, 0xff);/* FG */
+	outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
+
+	off += 0x20;
+	outpdw(overlay_base + off + 0x104, 0x010);
+	outpdw(overlay_base + off + 0x108, 0xff);/* FG */
+	outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
+
+	off += 0x20;
+	outpdw(overlay_base + off + 0x104, 0x010);
+	outpdw(overlay_base + off + 0x108, 0xff);/* FG */
+	outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+
+static uint32 csc_matrix_tab[9] = {
+	0x0254, 0x0000, 0x0331,
+	0x0254, 0xff37, 0xfe60,
+	0x0254, 0x0409, 0x0000
+};
+
+static uint32 csc_pre_bv_tab[3] = {0xfff0, 0xff80, 0xff80 };
+static uint32 csc_post_bv_tab[3] = {0, 0, 0 };
+
+static  uint32 csc_pre_lv_tab[6] =  {0, 0xff, 0, 0xff, 0, 0xff };
+static  uint32 csc_post_lv_tab[6] = {0, 0xff, 0, 0xff, 0, 0xff };
+
+#define MDP4_CSC_MV_OFF 	0x4400
+#define MDP4_CSC_PRE_BV_OFF 	0x4500
+#define MDP4_CSC_POST_BV_OFF 	0x4580
+#define MDP4_CSC_PRE_LV_OFF 	0x4600
+#define MDP4_CSC_POST_LV_OFF 	0x4680
+
+void mdp4_vg_csc_mv_setup(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+					MDP4_CSC_MV_OFF);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 9; i++) {
+		outpdw(off, csc_matrix_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_vg_csc_pre_bv_setup(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+					MDP4_CSC_PRE_BV_OFF);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 3; i++) {
+		outpdw(off, csc_pre_bv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_vg_csc_post_bv_setup(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+					MDP4_CSC_POST_BV_OFF);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 3; i++) {
+		outpdw(off, csc_post_bv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_vg_csc_pre_lv_setup(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+					MDP4_CSC_PRE_LV_OFF);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 6; i++) {
+		outpdw(off, csc_pre_lv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_vg_csc_post_lv_setup(int vp_num)
+{
+	uint32 *off;
+	int i, voff;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
+					MDP4_CSC_POST_LV_OFF);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 6; i++) {
+		outpdw(off, csc_post_lv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+static uint32 csc_rgb2yuv_matrix_tab[9] = {
+	0x0083, 0x0102, 0x0032,
+	0x1fb5, 0x1f6c, 0x00e1,
+	0x00e1, 0x1f45, 0x1fdc
+};
+
+static uint32 csc_rgb2yuv_pre_bv_tab[3] = {0, 0, 0};
+
+static uint32 csc_rgb2yuv_post_bv_tab[3] = {0x0010, 0x0080, 0x0080};
+
+static  uint32 csc_rgb2yuv_pre_lv_tab[6] = {
+	0x00, 0xff, 0x00,
+	0xff, 0x00, 0xff
+};
+
+static  uint32 csc_rgb2yuv_post_lv_tab[6] = {
+	0x0010, 0x00eb, 0x0010,
+	0x00f0, 0x0010, 0x00f0
+};
+
+void mdp4_mixer1_csc_mv_setup(void)
+{
+	uint32 *off;
+	int i;
+
+	off = (uint32 *)(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x2400);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 9; i++) {
+		outpdw(off, csc_rgb2yuv_matrix_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mixer1_csc_pre_bv_setup(void)
+{
+	uint32 *off;
+	int i;
+
+	off = (uint32 *)(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x2500);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 3; i++) {
+		outpdw(off, csc_rgb2yuv_pre_bv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mixer1_csc_post_bv_setup(void)
+{
+	uint32 *off;
+	int i;
+
+	off = (uint32 *)(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x2580);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 3; i++) {
+		outpdw(off, csc_rgb2yuv_post_bv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mixer1_csc_pre_lv_setup(void)
+{
+	uint32 *off;
+	int i;
+
+	off = (uint32 *)(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x2600);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 6; i++) {
+		outpdw(off, csc_rgb2yuv_pre_lv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+void mdp4_mixer1_csc_post_lv_setup(void)
+{
+	uint32 *off;
+	int i;
+
+	off = (uint32 *)(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x2680);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	for (i = 0; i < 6; i++) {
+		outpdw(off, csc_rgb2yuv_post_lv_tab[i]);
+		off++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+
+char gc_lut[] = {
+	0x0, 0x1, 0x2, 0x2, 0x3, 0x4, 0x5, 0x6,
+	0x6, 0x7, 0x8, 0x9, 0xA, 0xA, 0xB, 0xC,
+	0xD, 0xD, 0xE, 0xF, 0xF, 0x10, 0x10, 0x11,
+	0x12, 0x12, 0x13, 0x13, 0x14, 0x14, 0x15, 0x15,
+	0x16, 0x16, 0x17, 0x17, 0x17, 0x18, 0x18, 0x19,
+	0x19, 0x19, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1C,
+	0x1C, 0x1D, 0x1D, 0x1D, 0x1E, 0x1E, 0x1E, 0x1F,
+	0x1F, 0x1F, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21,
+	0x22, 0x22, 0x22, 0x22, 0x23, 0x23, 0x23, 0x24,
+	0x24, 0x24, 0x25, 0x25, 0x25, 0x25, 0x26, 0x26,
+	0x26, 0x26, 0x27, 0x27, 0x27, 0x28, 0x28, 0x28,
+	0x28, 0x29, 0x29, 0x29, 0x29, 0x2A, 0x2A, 0x2A,
+	0x2A, 0x2B, 0x2B, 0x2B, 0x2B, 0x2B, 0x2C, 0x2C,
+	0x2C, 0x2C, 0x2D, 0x2D, 0x2D, 0x2D, 0x2E, 0x2E,
+	0x2E, 0x2E, 0x2E, 0x2F, 0x2F, 0x2F, 0x2F, 0x30,
+	0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
+	0x31, 0x32, 0x32, 0x32, 0x32, 0x32, 0x33, 0x33,
+	0x33, 0x33, 0x33, 0x34, 0x34, 0x34, 0x34, 0x34,
+	0x35, 0x35, 0x35, 0x35, 0x35, 0x36, 0x36, 0x36,
+	0x36, 0x36, 0x37, 0x37, 0x37, 0x37, 0x37, 0x37,
+	0x38, 0x38, 0x38, 0x38, 0x38, 0x39, 0x39, 0x39,
+	0x39, 0x39, 0x39, 0x3A, 0x3A, 0x3A, 0x3A, 0x3A,
+	0x3A, 0x3B, 0x3B, 0x3B, 0x3B, 0x3B, 0x3B, 0x3C,
+	0x3C, 0x3C, 0x3C, 0x3C, 0x3C, 0x3D, 0x3D, 0x3D,
+	0x3D, 0x3D, 0x3D, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E,
+	0x3E, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x40,
+	0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x41, 0x41,
+	0x41, 0x41, 0x41, 0x41, 0x42, 0x42, 0x42, 0x42,
+	0x42, 0x42, 0x42, 0x43, 0x43, 0x43, 0x43, 0x43,
+	0x43, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+	0x44, 0x45, 0x45, 0x45, 0x45, 0x45, 0x45, 0x45,
+	0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x47,
+	0x47, 0x47, 0x47, 0x47, 0x47, 0x47, 0x48, 0x48,
+	0x48, 0x48, 0x48, 0x48, 0x48, 0x48, 0x49, 0x49,
+	0x49, 0x49, 0x49, 0x49, 0x49, 0x4A, 0x4A, 0x4A,
+	0x4A, 0x4A, 0x4A, 0x4A, 0x4A, 0x4B, 0x4B, 0x4B,
+	0x4B, 0x4B, 0x4B, 0x4B, 0x4B, 0x4C, 0x4C, 0x4C,
+	0x4C, 0x4C, 0x4C, 0x4C, 0x4D, 0x4D, 0x4D, 0x4D,
+	0x4D, 0x4D, 0x4D, 0x4D, 0x4E, 0x4E, 0x4E, 0x4E,
+	0x4E, 0x4E, 0x4E, 0x4E, 0x4E, 0x4F, 0x4F, 0x4F,
+	0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0x50, 0x50, 0x50,
+	0x50, 0x50, 0x50, 0x50, 0x50, 0x51, 0x51, 0x51,
+	0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x52, 0x52,
+	0x52, 0x52, 0x52, 0x52, 0x52, 0x52, 0x53, 0x53,
+	0x53, 0x53, 0x53, 0x53, 0x53, 0x53, 0x53, 0x54,
+	0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54,
+	0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55,
+	0x55, 0x56, 0x56, 0x56, 0x56, 0x56, 0x56, 0x56,
+	0x56, 0x56, 0x57, 0x57, 0x57, 0x57, 0x57, 0x57,
+	0x57, 0x57, 0x57, 0x58, 0x58, 0x58, 0x58, 0x58,
+	0x58, 0x58, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59,
+	0x59, 0x59, 0x59, 0x59, 0x59, 0x59, 0x5A, 0x5A,
+	0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A,
+	0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B,
+	0x5B, 0x5B, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+	0x5C, 0x5C, 0x5C, 0x5C, 0x5D, 0x5D, 0x5D, 0x5D,
+	0x5D, 0x5D, 0x5D, 0x5D, 0x5D, 0x5D, 0x5E, 0x5E,
+	0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E,
+	0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F,
+	0x5F, 0x5F, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60,
+	0x60, 0x60, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61,
+	0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x62,
+	0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62,
+	0x62, 0x62, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+	0x63, 0x63, 0x63, 0x63, 0x63, 0x64, 0x64, 0x64,
+	0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64,
+	0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+	0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x66, 0x66,
+	0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x67, 0x67,
+	0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67,
+	0x67, 0x67, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68,
+	0x68, 0x68, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69,
+	0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69,
+	0x69, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A,
+	0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6B, 0x6B, 0x6B,
+	0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B,
+	0x6B, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C,
+	0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6D, 0x6D, 0x6D,
+	0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D,
+	0x6D, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E,
+	0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6F, 0x6F, 0x6F,
+	0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F,
+	0x6F, 0x6F, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70,
+	0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x71, 0x71,
+	0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
+	0x71, 0x71, 0x71, 0x72, 0x72, 0x72, 0x72, 0x72,
+	0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+	0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73,
+	0x73, 0x73, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74,
+	0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74,
+	0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75,
+	0x75, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75,
+	0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76,
+	0x76, 0x76, 0x76, 0x76, 0x76, 0x77, 0x77, 0x77,
+	0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77,
+	0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x78, 0x78,
+	0x78, 0x78, 0x78, 0x78, 0x78, 0x78, 0x78, 0x78,
+	0x78, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+	0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x7A, 0x7A,
+	0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A,
+	0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7B, 0x7B, 0x7B,
+	0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B,
+	0x7B, 0x7B, 0x7B, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C,
+	0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C,
+	0x7C, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D,
+	0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D,
+	0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E,
+	0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7F, 0x7F,
+	0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F,
+	0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x80, 0x80, 0x80,
+	0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+	0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81,
+	0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,
+	0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x82,
+	0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82,
+	0x82, 0x82, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,
+	0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,
+	0x83, 0x83, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84,
+	0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84,
+	0x84, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
+	0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
+	0x85, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86,
+	0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86,
+	0x86, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87,
+	0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87,
+	0x87, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+	0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+	0x88, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89,
+	0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89,
+	0x89, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A,
+	0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A,
+	0x8A, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B,
+	0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B,
+	0x8B, 0x8B, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C,
+	0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C,
+	0x8C, 0x8C, 0x8C, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D,
+	0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D,
+	0x8D, 0x8D, 0x8D, 0x8D, 0x8E, 0x8E, 0x8E, 0x8E,
+	0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E,
+	0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8F, 0x8F, 0x8F,
+	0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F,
+	0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x90, 0x90,
+	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
+	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91,
+	0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91,
+	0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91,
+	0x91, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92,
+	0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92,
+	0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93,
+	0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93,
+	0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94,
+	0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+	0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x95, 0x95,
+	0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95,
+	0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95,
+	0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96,
+	0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96,
+	0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x97,
+	0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x97,
+	0x97, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
+	0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
+	0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
+	0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+	0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+	0x99, 0x99, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A,
+	0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A,
+	0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9B, 0x9B, 0x9B,
+	0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B,
+	0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B,
+	0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C,
+	0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C,
+	0x9C, 0x9C, 0x9C, 0x9C, 0x9D, 0x9D, 0x9D, 0x9D,
+	0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D,
+	0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9E,
+	0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E,
+	0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E,
+	0x9E, 0x9E, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F,
+	0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F,
+	0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0xA0, 0xA0,
+	0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0,
+	0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0,
+	0xA0, 0xA0, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1,
+	0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1,
+	0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA2, 0xA2,
+	0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2,
+	0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2,
+	0xA2, 0xA2, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3,
+	0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3,
+	0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA4, 0xA4,
+	0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4,
+	0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4,
+	0xA4, 0xA4, 0xA4, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
+	0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
+	0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
+	0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
+	0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
+	0xA6, 0xA6, 0xA6, 0xA6, 0xA7, 0xA7, 0xA7, 0xA7,
+	0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7,
+	0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7,
+	0xA7, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8,
+	0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8,
+	0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA9,
+	0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9,
+	0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9,
+	0xA9, 0xA9, 0xA9, 0xA9, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+	0xAA, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
+	0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAC,
+	0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC,
+	0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC,
+	0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAD, 0xAD, 0xAD,
+	0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD,
+	0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD,
+	0xAD, 0xAD, 0xAD, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
+	0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
+	0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
+	0xAE, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
+	0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
+	0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xB0,
+	0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0,
+	0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0,
+	0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB1, 0xB1,
+	0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1,
+	0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1,
+	0xB1, 0xB1, 0xB1, 0xB1, 0xB2, 0xB2, 0xB2, 0xB2,
+	0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2,
+	0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2,
+	0xB2, 0xB2, 0xB2, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
+	0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
+	0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
+	0xB3, 0xB3, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
+	0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
+	0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
+	0xB4, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
+	0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
+	0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
+	0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
+	0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
+	0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
+	0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7,
+	0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7,
+	0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB8,
+	0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8,
+	0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8,
+	0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB9,
+	0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9,
+	0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9,
+	0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xBA,
+	0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA,
+	0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA,
+	0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBB,
+	0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
+	0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
+	0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
+	0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
+	0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
+	0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
+	0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
+	0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
+	0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
+	0xBD, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
+	0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
+	0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
+	0xBE, 0xBE, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
+	0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
+	0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
+	0xBF, 0xBF, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
+	0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
+	0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
+	0xC0, 0xC0, 0xC0, 0xC0, 0xC1, 0xC1, 0xC1, 0xC1,
+	0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1,
+	0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1,
+	0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC2, 0xC2, 0xC2,
+	0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2,
+	0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2,
+	0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC3, 0xC3,
+	0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
+	0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
+	0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
+	0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
+	0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
+	0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
+	0xC4, 0xC4, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
+	0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
+	0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
+	0xC5, 0xC5, 0xC5, 0xC5, 0xC6, 0xC6, 0xC6, 0xC6,
+	0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6,
+	0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6,
+	0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC7, 0xC7,
+	0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
+	0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
+	0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
+	0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+	0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+	0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
+	0xC8, 0xC8, 0xC8, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
+	0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
+	0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
+	0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xCA, 0xCA,
+	0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
+	0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
+	0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
+	0xCA, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
+	0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
+	0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
+	0xCB, 0xCB, 0xCB, 0xCB, 0xCC, 0xCC, 0xCC, 0xCC,
+	0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
+	0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
+	0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCD,
+	0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
+	0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
+	0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
+	0xCD, 0xCD, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
+	0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
+	0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
+	0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCF, 0xCF,
+	0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
+	0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
+	0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
+	0xCF, 0xCF, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
+	0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
+	0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
+	0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD1, 0xD1, 0xD1,
+	0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
+	0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
+	0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
+	0xD1, 0xD1, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
+	0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
+	0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
+	0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD3, 0xD3,
+	0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
+	0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
+	0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
+	0xD3, 0xD3, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
+	0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
+	0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
+	0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD5,
+	0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
+	0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
+	0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
+	0xD5, 0xD5, 0xD5, 0xD5, 0xD6, 0xD6, 0xD6, 0xD6,
+	0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
+	0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
+	0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
+	0xD6, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
+	0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
+	0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
+	0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD8, 0xD8,
+	0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
+	0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
+	0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
+	0xD8, 0xD8, 0xD8, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
+	0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
+	0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
+	0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
+	0xD9, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
+	0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
+	0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
+	0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDB, 0xDB,
+	0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
+	0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
+	0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
+	0xDB, 0xDB, 0xDB, 0xDB, 0xDC, 0xDC, 0xDC, 0xDC,
+	0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
+	0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
+	0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
+	0xDC, 0xDC, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
+	0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
+	0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
+	0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
+	0xDD, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
+	0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
+	0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
+	0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDF,
+	0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
+	0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
+	0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
+	0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xE0, 0xE0,
+	0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
+	0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
+	0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
+	0xE0, 0xE0, 0xE0, 0xE0, 0xE1, 0xE1, 0xE1, 0xE1,
+	0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
+	0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
+	0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
+	0xE1, 0xE1, 0xE1, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
+	0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
+	0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
+	0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
+	0xE2, 0xE2, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
+	0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
+	0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
+	0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
+	0xE3, 0xE3, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
+	0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
+	0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
+	0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
+	0xE4, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
+	0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
+	0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
+	0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
+	0xE5, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
+	0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
+	0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
+	0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
+	0xE6, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+	0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+	0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+	0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+	0xE7, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
+	0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
+	0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
+	0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
+	0xE8, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
+	0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
+	0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
+	0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
+	0xE9, 0xE9, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
+	0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
+	0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
+	0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
+	0xEA, 0xEA, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
+	0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
+	0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
+	0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
+	0xEB, 0xEB, 0xEB, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
+	0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
+	0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
+	0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
+	0xEC, 0xEC, 0xEC, 0xEC, 0xED, 0xED, 0xED, 0xED,
+	0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
+	0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
+	0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
+	0xED, 0xED, 0xED, 0xED, 0xED, 0xEE, 0xEE, 0xEE,
+	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEF, 0xEF,
+	0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
+	0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
+	0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
+	0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
+	0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
+	0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
+	0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
+	0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
+	0xF0, 0xF0, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
+	0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
+	0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
+	0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
+	0xF1, 0xF1, 0xF1, 0xF1, 0xF2, 0xF2, 0xF2, 0xF2,
+	0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
+	0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
+	0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
+	0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF3, 0xF3,
+	0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
+	0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
+	0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
+	0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
+	0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
+	0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
+	0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
+	0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
+	0xF4, 0xF4, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
+	0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
+	0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
+	0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
+	0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF6, 0xF6, 0xF6,
+	0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
+	0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
+	0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
+	0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
+	0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
+	0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
+	0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
+	0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
+	0xF7, 0xF7, 0xF7, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
+	0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
+	0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
+	0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
+	0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF9, 0xF9,
+	0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
+	0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
+	0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
+	0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
+	0xF9, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
+	0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
+	0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
+	0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
+	0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFB, 0xFB, 0xFB,
+	0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
+	0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
+	0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
+	0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
+	0xFB, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
+	0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
+	0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
+	0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
+	0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFD, 0xFD, 0xFD,
+	0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
+	0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
+	0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
+	0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
+	0xFD, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
+	0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
+	0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
+	0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
+	0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+void mdp4_mixer_gc_lut_setup(int mixer_num)
+{
+	unsigned char *base;
+	uint32 data;
+	char val;
+	int i, off;
+
+	if (mixer_num) 	/* mixer number, /dev/fb0, /dev/fb1 */
+		base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
+	else
+		base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+
+	base += 0x4000;	/* GC_LUT offset */
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	off = 0;
+	for (i = 0; i < 4096; i++) {
+		val = gc_lut[i];
+		data = (val << 16 | val << 8 | val); /* R, B, and G are same */
+		outpdw(base + off, data);
+		off += 4;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+uint32 igc_video_lut[] = {	 /* non linear */
+	0x0, 0x1, 0x2, 0x4, 0x5, 0x6, 0x7, 0x9,
+	0xA, 0xB, 0xC, 0xE, 0xF, 0x10, 0x12, 0x14,
+	0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F, 0x21, 0x23,
+	0x25, 0x28, 0x2A, 0x2D, 0x30, 0x32, 0x35, 0x38,
+	0x3B, 0x3E, 0x42, 0x45, 0x48, 0x4C, 0x4F, 0x53,
+	0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x70, 0x74,
+	0x79, 0x7E, 0x83, 0x88, 0x8D, 0x92, 0x97, 0x9C,
+	0xA2, 0xA8, 0xAD, 0xB3, 0xB9, 0xBF, 0xC5, 0xCC,
+	0xD2, 0xD8, 0xDF, 0xE6, 0xED, 0xF4, 0xFB, 0x102,
+	0x109, 0x111, 0x118, 0x120, 0x128, 0x130, 0x138, 0x140,
+	0x149, 0x151, 0x15A, 0x162, 0x16B, 0x174, 0x17D, 0x186,
+	0x190, 0x199, 0x1A3, 0x1AC, 0x1B6, 0x1C0, 0x1CA, 0x1D5,
+	0x1DF, 0x1EA, 0x1F4, 0x1FF, 0x20A, 0x215, 0x220, 0x22B,
+	0x237, 0x242, 0x24E, 0x25A, 0x266, 0x272, 0x27F, 0x28B,
+	0x298, 0x2A4, 0x2B1, 0x2BE, 0x2CB, 0x2D8, 0x2E6, 0x2F3,
+	0x301, 0x30F, 0x31D, 0x32B, 0x339, 0x348, 0x356, 0x365,
+	0x374, 0x383, 0x392, 0x3A1, 0x3B1, 0x3C0, 0x3D0, 0x3E0,
+	0x3F0, 0x400, 0x411, 0x421, 0x432, 0x443, 0x454, 0x465,
+	0x476, 0x487, 0x499, 0x4AB, 0x4BD, 0x4CF, 0x4E1, 0x4F3,
+	0x506, 0x518, 0x52B, 0x53E, 0x551, 0x565, 0x578, 0x58C,
+	0x5A0, 0x5B3, 0x5C8, 0x5DC, 0x5F0, 0x605, 0x61A, 0x62E,
+	0x643, 0x659, 0x66E, 0x684, 0x699, 0x6AF, 0x6C5, 0x6DB,
+	0x6F2, 0x708, 0x71F, 0x736, 0x74D, 0x764, 0x77C, 0x793,
+	0x7AB, 0x7C3, 0x7DB, 0x7F3, 0x80B, 0x824, 0x83D, 0x855,
+	0x86F, 0x888, 0x8A1, 0x8BB, 0x8D4, 0x8EE, 0x908, 0x923,
+	0x93D, 0x958, 0x973, 0x98E, 0x9A9, 0x9C4, 0x9DF, 0x9FB,
+	0xA17, 0xA33, 0xA4F, 0xA6C, 0xA88, 0xAA5, 0xAC2, 0xADF,
+	0xAFC, 0xB19, 0xB37, 0xB55, 0xB73, 0xB91, 0xBAF, 0xBCE,
+	0xBEC, 0xC0B, 0xC2A, 0xC4A, 0xC69, 0xC89, 0xCA8, 0xCC8,
+	0xCE8, 0xD09, 0xD29, 0xD4A, 0xD6B, 0xD8C, 0xDAD, 0xDCF,
+	0xDF0, 0xE12, 0xE34, 0xE56, 0xE79, 0xE9B, 0xEBE, 0xEE1,
+	0xF04, 0xF27, 0xF4B, 0xF6E, 0xF92, 0xFB6, 0xFDB, 0xFFF,
+};
+
+void mdp4_vg_igc_lut_setup(int vp_num)
+{
+	unsigned char *base;
+	int i, voff, off;
+	uint32 data, val;
+
+	voff = MDP4_VIDEO_OFF * vp_num;
+	base = MDP_BASE + MDP4_VIDEO_BASE + voff + 0x5000;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	off = 0;
+	for (i = 0; i < 256; i++) {
+		val = igc_video_lut[i];
+		data = (val << 16 | val);	/* color 0 and 1 */
+		outpdw(base + off, data);
+		outpdw(base + off + 0x800, val);	/* color 2 */
+		off += 4;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+uint32 igc_rgb_lut[] = {   /* linear */
+	0x0, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
+	0x80, 0x91, 0xA1, 0xB1, 0xC1, 0xD1, 0xE1, 0xF1,
+	0x101, 0x111, 0x121, 0x131, 0x141, 0x151, 0x161, 0x171,
+	0x181, 0x191, 0x1A2, 0x1B2, 0x1C2, 0x1D2, 0x1E2, 0x1F2,
+	0x202, 0x212, 0x222, 0x232, 0x242, 0x252, 0x262, 0x272,
+	0x282, 0x292, 0x2A2, 0x2B3, 0x2C3, 0x2D3, 0x2E3, 0x2F3,
+	0x303, 0x313, 0x323, 0x333, 0x343, 0x353, 0x363, 0x373,
+	0x383, 0x393, 0x3A3, 0x3B3, 0x3C4, 0x3D4, 0x3E4, 0x3F4,
+	0x404, 0x414, 0x424, 0x434, 0x444, 0x454, 0x464, 0x474,
+	0x484, 0x494, 0x4A4, 0x4B4, 0x4C4, 0x4D5, 0x4E5, 0x4F5,
+	0x505, 0x515, 0x525, 0x535, 0x545, 0x555, 0x565, 0x575,
+	0x585, 0x595, 0x5A5, 0x5B5, 0x5C5, 0x5D5, 0x5E6, 0x5F6,
+	0x606, 0x616, 0x626, 0x636, 0x646, 0x656, 0x666, 0x676,
+	0x686, 0x696, 0x6A6, 0x6B6, 0x6C6, 0x6D6, 0x6E6, 0x6F7,
+	0x707, 0x717, 0x727, 0x737, 0x747, 0x757, 0x767, 0x777,
+	0x787, 0x797, 0x7A7, 0x7B7, 0x7C7, 0x7D7, 0x7E7, 0x7F7,
+	0x808, 0x818, 0x828, 0x838, 0x848, 0x858, 0x868, 0x878,
+	0x888, 0x898, 0x8A8, 0x8B8, 0x8C8, 0x8D8, 0x8E8, 0x8F8,
+	0x908, 0x919, 0x929, 0x939, 0x949, 0x959, 0x969, 0x979,
+	0x989, 0x999, 0x9A9, 0x9B9, 0x9C9, 0x9D9, 0x9E9, 0x9F9,
+	0xA09, 0xA19, 0xA2A, 0xA3A, 0xA4A, 0xA5A, 0xA6A, 0xA7A,
+	0xA8A, 0xA9A, 0xAAA, 0xABA, 0xACA, 0xADA, 0xAEA, 0xAFA,
+	0xB0A, 0xB1A, 0xB2A, 0xB3B, 0xB4B, 0xB5B, 0xB6B, 0xB7B,
+	0xB8B, 0xB9B, 0xBAB, 0xBBB, 0xBCB, 0xBDB, 0xBEB, 0xBFB,
+	0xC0B, 0xC1B, 0xC2B, 0xC3B, 0xC4C, 0xC5C, 0xC6C, 0xC7C,
+	0xC8C, 0xC9C, 0xCAC, 0xCBC, 0xCCC, 0xCDC, 0xCEC, 0xCFC,
+	0xD0C, 0xD1C, 0xD2C, 0xD3C, 0xD4C, 0xD5D, 0xD6D, 0xD7D,
+	0xD8D, 0xD9D, 0xDAD, 0xDBD, 0xDCD, 0xDDD, 0xDED, 0xDFD,
+	0xE0D, 0xE1D, 0xE2D, 0xE3D, 0xE4D, 0xE5D, 0xE6E, 0xE7E,
+	0xE8E, 0xE9E, 0xEAE, 0xEBE, 0xECE, 0xEDE, 0xEEE, 0xEFE,
+	0xF0E, 0xF1E, 0xF2E, 0xF3E, 0xF4E, 0xF5E, 0xF6E, 0xF7F,
+	0xF8F, 0xF9F, 0xFAF, 0xFBF, 0xFCF, 0xFDF, 0xFEF, 0xFFF,
+};
+
+void mdp4_rgb_igc_lut_setup(int num)
+{
+	unsigned char *base;
+	int i, voff, off;
+	uint32 data, val;
+
+	voff = MDP4_RGB_OFF * num;
+	base = MDP_BASE + MDP4_RGB_BASE + voff + 0x5000;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	off = 0;
+	for (i = 0; i < 256; i++) {
+		val = igc_rgb_lut[i];
+		data = (val << 16 | val);	/* color 0 and 1 */
+		outpdw(base + off, data);
+		outpdw(base + off + 0x800, val);	/* color 2 */
+		off += 4;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+uint32 mdp4_rgb_igc_lut_cvt(uint32 ndx)
+{
+	return igc_rgb_lut[ndx & 0x0ff];
+}
+
+uint32_t mdp4_ss_table_value(int8_t value, int8_t index)
+{
+	uint32_t out = 0x0;
+	int8_t level = -1;
+	uint32_t mask = 0xffffffff;
+
+	if (value < 0) {
+		if (value == -128)
+			value = 127;
+		else
+			value = -value;
+		out = 0x11111111;
+	} else {
+		out = 0x88888888;
+		mask = 0x0fffffff;
+	}
+
+	if (value == 0)
+		level = 0;
+	else {
+		while (value > 0 && level < 7) {
+			level++;
+			value -= 16;
+		}
+	}
+
+	if (level == 0) {
+		if (index == 0)
+			out = 0x0;
+		else
+			out = 0x20000000;
+	} else {
+		out += (0x11111111 * level);
+		if (index == 1)
+			out &= mask;
+	}
+
+	return out;
+}
+
diff --git a/drivers/video/msm/mdp_csc_table.h b/drivers/video/msm/mdp_csc_table.h
index d1cde30..a0f72c0 100644
--- a/drivers/video/msm/mdp_csc_table.h
+++ b/drivers/video/msm/mdp_csc_table.h
@@ -1,4 +1,4 @@
-/* drivers/video/msm_fb/mdp_csc_table.h
+/* drivers/video/msm/mdp_csc_table.h
  *
  * Copyright (C) 2007 QUALCOMM Incorporated
  * Copyright (C) 2007 Google Incorporated
@@ -16,57 +16,116 @@
 static struct {
 	uint32_t reg;
 	uint32_t val;
-} csc_table[] = {
-	{ 0x40400, 0x83 },
-	{ 0x40404, 0x102 },
-	{ 0x40408, 0x32 },
-	{ 0x4040c, 0xffffffb5 },
-	{ 0x40410, 0xffffff6c },
-	{ 0x40414, 0xe1 },
-	{ 0x40418, 0xe1 },
-	{ 0x4041c, 0xffffff45 },
-	{ 0x40420, 0xffffffdc },
-	{ 0x40440, 0x254 },
-	{ 0x40444, 0x0 },
-	{ 0x40448, 0x331 },
-	{ 0x4044c, 0x254 },
-	{ 0x40450, 0xffffff38 },
-	{ 0x40454, 0xfffffe61 },
-	{ 0x40458, 0x254 },
-	{ 0x4045c, 0x409 },
-	{ 0x40460, 0x0 },
-	{ 0x40480, 0x5d },
-	{ 0x40484, 0x13a },
-	{ 0x40488, 0x20 },
-	{ 0x4048c, 0xffffffcd },
-	{ 0x40490, 0xffffff54 },
-	{ 0x40494, 0xe1 },
-	{ 0x40498, 0xe1 },
-	{ 0x4049c, 0xffffff35 },
-	{ 0x404a0, 0xffffffec },
-	{ 0x404c0, 0x254 },
-	{ 0x404c4, 0x0 },
-	{ 0x404c8, 0x396 },
-	{ 0x404cc, 0x254 },
-	{ 0x404d0, 0xffffff94 },
-	{ 0x404d4, 0xfffffef0 },
-	{ 0x404d8, 0x254 },
-	{ 0x404dc, 0x43a },
-	{ 0x404e0, 0x0 },
-	{ 0x40500, 0x10 },
-	{ 0x40504, 0x80 },
-	{ 0x40508, 0x80 },
-	{ 0x40540, 0x10 },
-	{ 0x40544, 0x80 },
-	{ 0x40548, 0x80 },
-	{ 0x40580, 0x10 },
-	{ 0x40584, 0xeb },
-	{ 0x40588, 0x10 },
-	{ 0x4058c, 0xf0 },
-	{ 0x405c0, 0x10 },
-	{ 0x405c4, 0xeb },
-	{ 0x405c8, 0x10 },
-	{ 0x405cc, 0xf0 },
+} csc_matrix_config_table[] = {
+	/* RGB -> YUV primary forward matrix (set1). */
+	{ MDP_CSC_PFMVn(0), 0x83 },
+	{ MDP_CSC_PFMVn(1), 0x102 },
+	{ MDP_CSC_PFMVn(2), 0x32 },
+	{ MDP_CSC_PFMVn(3), 0xffffffb5 },
+	{ MDP_CSC_PFMVn(4), 0xffffff6c },
+	{ MDP_CSC_PFMVn(5), 0xe1 },
+	{ MDP_CSC_PFMVn(6), 0xe1 },
+	{ MDP_CSC_PFMVn(7), 0xffffff45 },
+	{ MDP_CSC_PFMVn(8), 0xffffffdc },
+
+	/* YUV -> RGB primary reverse matrix (set2) */
+	{ MDP_CSC_PRMVn(0), 0x254 },
+	{ MDP_CSC_PRMVn(1), 0x0 },
+	{ MDP_CSC_PRMVn(2), 0x331 },
+	{ MDP_CSC_PRMVn(3), 0x254 },
+	{ MDP_CSC_PRMVn(4), 0xffffff38 },
+	{ MDP_CSC_PRMVn(5), 0xfffffe61 },
+	{ MDP_CSC_PRMVn(6), 0x254 },
+	{ MDP_CSC_PRMVn(7), 0x409 },
+	{ MDP_CSC_PRMVn(8), 0x0 },
+
+#ifndef CONFIG_MSM_MDP31
+	/* For MDP 2.2/3.0 */
+
+	/* primary limit vector */
+	{ MDP_CSC_PLVn(0), 0x10 },
+	{ MDP_CSC_PLVn(1), 0xeb },
+	{ MDP_CSC_PLVn(2), 0x10 },
+	{ MDP_CSC_PLVn(3), 0xf0 },
+
+	/* primary bias vector */
+	{ MDP_CSC_PBVn(0), 0x10 },
+	{ MDP_CSC_PBVn(1), 0x80 },
+	{ MDP_CSC_PBVn(2), 0x80 },
+
+#else /* CONFIG_MSM_MDP31 */
+
+	/* limit vectors configuration */
+	/* rgb -> yuv (set1) pre-limit vector */
+	{ MDP_PPP_CSC_PRE_LV1n(0), 0x10 },
+	{ MDP_PPP_CSC_PRE_LV1n(1), 0xeb },
+	{ MDP_PPP_CSC_PRE_LV1n(2), 0x10 },
+	{ MDP_PPP_CSC_PRE_LV1n(3), 0xf0 },
+	{ MDP_PPP_CSC_PRE_LV1n(4), 0x10 },
+	{ MDP_PPP_CSC_PRE_LV1n(5), 0xf0 },
+
+	/* rgb -> yuv (set1) post-limit vector */
+	{ MDP_PPP_CSC_POST_LV1n(0), 0x0 },
+	{ MDP_PPP_CSC_POST_LV1n(1), 0xff },
+	{ MDP_PPP_CSC_POST_LV1n(2), 0x0 },
+	{ MDP_PPP_CSC_POST_LV1n(3), 0xff },
+	{ MDP_PPP_CSC_POST_LV1n(4), 0x0 },
+	{ MDP_PPP_CSC_POST_LV1n(5), 0xff },
+
+	/* yuv -> rgb (set2) pre-limit vector */
+	{ MDP_PPP_CSC_PRE_LV2n(0), 0x0 },
+	{ MDP_PPP_CSC_PRE_LV2n(1), 0xff },
+	{ MDP_PPP_CSC_PRE_LV2n(2), 0x0 },
+	{ MDP_PPP_CSC_PRE_LV2n(3), 0xff },
+	{ MDP_PPP_CSC_PRE_LV2n(4), 0x0 },
+	{ MDP_PPP_CSC_PRE_LV2n(5), 0xff },
+
+	/* yuv -> rgb (set2) post-limit vector */
+	{ MDP_PPP_CSC_POST_LV2n(0), 0x10 },
+	{ MDP_PPP_CSC_POST_LV2n(1), 0xeb },
+	{ MDP_PPP_CSC_POST_LV2n(2), 0x10 },
+	{ MDP_PPP_CSC_POST_LV2n(3), 0xf0 },
+	{ MDP_PPP_CSC_POST_LV2n(4), 0x10 },
+	{ MDP_PPP_CSC_POST_LV2n(5), 0xf0 },
+
+	/* bias vectors configuration */
+
+	/* XXX: why is set2 used for rgb->yuv, but set1 */
+	/* used for yuv -> rgb??!? Seems to be the reverse of the
+	 * other vectors. */
+
+	/* RGB -> YUV pre-bias vector... */
+	{ MDP_PPP_CSC_PRE_BV2n(0), 0 },
+	{ MDP_PPP_CSC_PRE_BV2n(1), 0 },
+	{ MDP_PPP_CSC_PRE_BV2n(2), 0 },
+
+	/* RGB -> YUV post-bias vector */
+	{ MDP_PPP_CSC_POST_BV2n(0), 0x10 },
+	{ MDP_PPP_CSC_POST_BV2n(1), 0x80 },
+	{ MDP_PPP_CSC_POST_BV2n(2), 0x80 },
+
+	/* YUV -> RGB pre-bias vector... */
+	{ MDP_PPP_CSC_PRE_BV1n(0), 0x1f0 },
+	{ MDP_PPP_CSC_PRE_BV1n(1), 0x180 },
+	{ MDP_PPP_CSC_PRE_BV1n(2), 0x180 },
+
+	/* YUV -> RGB post-bias vector */
+	{ MDP_PPP_CSC_POST_BV1n(0), 0 },
+	{ MDP_PPP_CSC_POST_BV1n(1), 0 },
+	{ MDP_PPP_CSC_POST_BV1n(2), 0 },
+
+	/* luma filter coefficients */
+	{ MDP_PPP_DEINT_COEFFn(0), 0x3e0 },
+	{ MDP_PPP_DEINT_COEFFn(1), 0x360 },
+	{ MDP_PPP_DEINT_COEFFn(2), 0x120 },
+	{ MDP_PPP_DEINT_COEFFn(3), 0x140 },
+#endif
+};
+
+static struct {
+	uint32_t reg;
+	uint32_t val;
+} csc_color_lut[] = {
 	{ 0x40800, 0x0 },
 	{ 0x40804, 0x151515 },
 	{ 0x40808, 0x1d1d1d },
diff --git a/drivers/video/msm/mdp_cursor.c b/drivers/video/msm/mdp_cursor.c
new file mode 100644
index 0000000..f8c08e3
--- /dev/null
+++ b/drivers/video/msm/mdp_cursor.c
@@ -0,0 +1,264 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+
+#include <mach/hardware.h>
+#include <asm/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+
+static int cursor_enabled;
+
+#include "mdp4.h"
+
+#if	defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
+static struct workqueue_struct *mdp_cursor_ctrl_wq;
+static struct work_struct mdp_cursor_ctrl_worker;
+
+/* cursor configuration */
+static void *cursor_buf_phys;
+static __u32 width, height, bg_color;
+static int calpha_en, transp_en, alpha;
+static int sync_disabled = -1;
+
+void mdp_cursor_ctrl_workqueue_handler(struct work_struct *work)
+{
+	unsigned long flag;
+
+	/* disable vsync */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_disable_irq(MDP_OVERLAY0_TERM);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+void mdp_hw_cursor_init(void)
+{
+	mdp_cursor_ctrl_wq =
+			create_singlethread_workqueue("mdp_cursor_ctrl_wq");
+	INIT_WORK(&mdp_cursor_ctrl_worker, mdp_cursor_ctrl_workqueue_handler);
+}
+
+void mdp_hw_cursor_done(void)
+{
+	/* Cursor configuration:
+	 *
+	 * This is done in DMA_P_DONE ISR because the following registers are
+	 * not double buffered in hardware:
+	 *
+	 * MDP_DMA_P_CURSOR_SIZE, address = 0x90044
+	 * MDP_DMA_P_CURSOR_BLEND_CONFIG, address = 0x90060
+	 * MDP_DMA_P_CURSOR_BLEND_PARAM, address = 0x90064
+	 * MDP_DMA_P_CURSOR_BLEND_TRANS_LOW, address = 0x90068
+	 * MDP_DMA_P_CURSOR_BLEND_TRANS_HIG, address = 0x9006C
+	 *
+	 * Moving this code out of the ISR will cause the MDP to underrun!
+	 */
+	spin_lock(&mdp_spin_lock);
+	if (sync_disabled) {
+		spin_unlock(&mdp_spin_lock);
+		return;
+	}
+
+	MDP_OUTP(MDP_BASE + 0x90044, (height << 16) | width);
+	MDP_OUTP(MDP_BASE + 0x90048, cursor_buf_phys);
+
+	MDP_OUTP(MDP_BASE + 0x90060,
+		 (transp_en << 3) | (calpha_en << 1) |
+		 (inp32(MDP_BASE + 0x90060) & 0x1));
+
+	MDP_OUTP(MDP_BASE + 0x90064, (alpha << 24));
+	MDP_OUTP(MDP_BASE + 0x90068, (0xffffff & bg_color));
+	MDP_OUTP(MDP_BASE + 0x9006C, (0xffffff & bg_color));
+
+	/* enable/disable the cursor as per the last request */
+	if (cursor_enabled && !(inp32(MDP_BASE + 0x90060) & (0x1)))
+		MDP_OUTP(MDP_BASE + 0x90060, inp32(MDP_BASE + 0x90060) | 0x1);
+	else if (!cursor_enabled && (inp32(MDP_BASE + 0x90060) & (0x1)))
+		MDP_OUTP(MDP_BASE + 0x90060,
+					inp32(MDP_BASE + 0x90060) & (~0x1));
+
+	/* enqueue the task to disable MDP interrupts */
+	queue_work(mdp_cursor_ctrl_wq, &mdp_cursor_ctrl_worker);
+
+	/* update done */
+	sync_disabled = 1;
+	spin_unlock(&mdp_spin_lock);
+}
+
+static void mdp_hw_cursor_enable_vsync(void)
+{
+	/* if the cursor registers were updated (once or more) since the
+	 * last vsync, enable the vsync interrupt (if not already enabled)
+	 * for the next update
+	 */
+	if (sync_disabled) {
+
+		/* cancel pending task to disable MDP interrupts */
+		if (work_pending(&mdp_cursor_ctrl_worker))
+			cancel_work_sync(&mdp_cursor_ctrl_worker);
+		else
+			/* enable irq */
+			mdp_enable_irq(MDP_OVERLAY0_TERM);
+
+		sync_disabled = 0;
+
+		/* enable vsync intr */
+		outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
+		mdp_intr_mask |= INTR_OVERLAY0_DONE;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	}
+}
+
+int mdp_hw_cursor_sync_update(struct fb_info *info, struct fb_cursor *cursor)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_image *img = &cursor->image;
+	unsigned long flag;
+	int sync_needed = 0, ret = 0;
+
+	if ((img->width > MDP_CURSOR_WIDTH) ||
+	    (img->height > MDP_CURSOR_HEIGHT) ||
+	    (img->depth != 32))
+		return -EINVAL;
+
+	if (cursor->set & FB_CUR_SETPOS)
+		MDP_OUTP(MDP_BASE + 0x9004c, (img->dy << 16) | img->dx);
+
+	if (cursor->set & FB_CUR_SETIMAGE) {
+		ret = copy_from_user(mfd->cursor_buf, img->data,
+					img->width*img->height*4);
+		if (ret)
+			return ret;
+
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+		if (img->bg_color == 0xffffffff)
+			transp_en = 0;
+		else
+			transp_en = 1;
+
+		alpha = (img->fg_color & 0xff000000) >> 24;
+
+		if (alpha)
+			calpha_en = 0x2; /* xrgb */
+		else
+			calpha_en = 0x1; /* argb */
+
+		/* cursor parameters */
+		height = img->height;
+		width = img->width;
+		bg_color = img->bg_color;
+		cursor_buf_phys = mfd->cursor_buf_phys;
+
+		sync_needed = 1;
+	} else
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+
+	if ((cursor->enable) && (!cursor_enabled)) {
+		cursor_enabled = 1;
+		sync_needed = 1;
+	} else if ((!cursor->enable) && (cursor_enabled)) {
+		cursor_enabled = 0;
+		sync_needed = 1;
+	}
+
+	/* if sync cursor update is needed, enable vsync */
+	if (sync_needed)
+		mdp_hw_cursor_enable_vsync();
+
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	return 0;
+}
+#endif /* CONFIG_FB_MSM_OVERLAY && CONFIG_FB_MSM_MDP40 */
+
+int mdp_hw_cursor_update(struct fb_info *info, struct fb_cursor *cursor)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_image *img = &cursor->image;
+	int calpha_en, transp_en;
+	int alpha;
+	int ret = 0;
+
+	if ((img->width > MDP_CURSOR_WIDTH) ||
+	    (img->height > MDP_CURSOR_HEIGHT) ||
+	    (img->depth != 32))
+		return -EINVAL;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	if (cursor->set & FB_CUR_SETPOS)
+		MDP_OUTP(MDP_BASE + 0x9004c, (img->dy << 16) | img->dx);
+
+	if (cursor->set & FB_CUR_SETIMAGE) {
+		ret = copy_from_user(mfd->cursor_buf, img->data,
+					img->width*img->height*4);
+		if (ret)
+			return ret;
+
+		if (img->bg_color == 0xffffffff)
+			transp_en = 0;
+		else
+			transp_en = 1;
+
+		alpha = (img->fg_color & 0xff000000) >> 24;
+
+		if (alpha)
+			calpha_en = 0x2; /* xrgb */
+		else
+			calpha_en = 0x1; /* argb */
+
+		MDP_OUTP(MDP_BASE + 0x90044, (img->height << 16) | img->width);
+		MDP_OUTP(MDP_BASE + 0x90048, mfd->cursor_buf_phys);
+		/* order the writes the cursor_buf before updating the
+		 * hardware */
+		dma_coherent_pre_ops();
+		MDP_OUTP(MDP_BASE + 0x90060,
+			 (transp_en << 3) | (calpha_en << 1) |
+			 (inp32(MDP_BASE + 0x90060) & 0x1));
+#ifdef CONFIG_FB_MSM_MDP40
+		MDP_OUTP(MDP_BASE + 0x90064, (alpha << 24));
+		MDP_OUTP(MDP_BASE + 0x90068, (0xffffff & img->bg_color));
+		MDP_OUTP(MDP_BASE + 0x9006C, (0xffffff & img->bg_color));
+#else
+		MDP_OUTP(MDP_BASE + 0x90064,
+			 (alpha << 24) | (0xffffff & img->bg_color));
+		MDP_OUTP(MDP_BASE + 0x90068, 0);
+#endif
+	}
+
+	if ((cursor->enable) && (!cursor_enabled)) {
+		cursor_enabled = 1;
+		MDP_OUTP(MDP_BASE + 0x90060, inp32(MDP_BASE + 0x90060) | 0x1);
+	} else if ((!cursor->enable) && (cursor_enabled)) {
+		cursor_enabled = 0;
+		MDP_OUTP(MDP_BASE + 0x90060,
+			 inp32(MDP_BASE + 0x90060) & (~0x1));
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
new file mode 100644
index 0000000..b89e8c7
--- /dev/null
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -0,0 +1,1289 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#ifdef CONFIG_FB_MSM_MDP40
+#include "mdp4.h"
+#endif
+#include "mddihosti.h"
+#include "tvenc.h"
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
+#include "hdmi_msm.h"
+#endif
+
+#define MDP_DEBUG_BUF	2048
+
+static uint32	mdp_offset;
+static uint32	mdp_count;
+
+static char	debug_buf[MDP_DEBUG_BUF];
+
+/*
+ * MDP4
+ *
+ */
+
+static int mdp_offset_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int mdp_offset_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t mdp_offset_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	sscanf(debug_buf, "%x %d", &off, &cnt);
+
+	if (cnt <= 0)
+		cnt = 1;
+
+	mdp_offset = off;
+	mdp_count = cnt;
+
+	printk(KERN_INFO "%s: offset=%x cnt=%d\n", __func__,
+				mdp_offset, mdp_count);
+
+	return count;
+}
+
+static ssize_t mdp_offset_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(debug_buf, sizeof(debug_buf), "0x%08x %d\n",
+					mdp_offset, mdp_count);
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, debug_buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static const struct file_operations mdp_off_fops = {
+	.open = mdp_offset_open,
+	.release = mdp_offset_release,
+	.read = mdp_offset_read,
+	.write = mdp_offset_write,
+};
+
+static int mdp_reg_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int mdp_reg_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t mdp_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, data;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %x", &off, &data);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	outpdw(MDP_BASE + off, data);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	printk(KERN_INFO "%s: addr=%x data=%x\n", __func__, off, data);
+
+	return count;
+}
+
+static ssize_t mdp_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+	uint32 data;
+	int i, j, off, dlen, num;
+	char *bp, *cp;
+	int tot = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	j = 0;
+	num = 0;
+	bp = debug_buf;
+	cp = MDP_BASE + mdp_offset;
+	dlen = sizeof(debug_buf);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	while (j++ < 8) {
+		len = snprintf(bp, dlen, "0x%08x: ", (int)cp);
+		tot += len;
+		bp += len;
+		dlen -= len;
+		off = 0;
+		i = 0;
+		while (i++ < 4) {
+			data = inpdw(cp + off);
+			len = snprintf(bp, dlen, "%08x ", data);
+			tot += len;
+			bp += len;
+			dlen -= len;
+			off += 4;
+			num++;
+			if (num >= mdp_count)
+				break;
+		}
+		*bp++ = '\n';
+		--dlen;
+		tot++;
+		cp += off;
+		if (num >= mdp_count)
+			break;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	*bp = 0;
+	tot++;
+
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+
+static const struct file_operations mdp_reg_fops = {
+	.open = mdp_reg_open,
+	.release = mdp_reg_release,
+	.read = mdp_reg_read,
+	.write = mdp_reg_write,
+};
+
+#ifdef CONFIG_FB_MSM_MDP40
+static int mdp_stat_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int mdp_stat_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t mdp_stat_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	unsigned long flag;
+
+	if (count > sizeof(debug_buf))
+		return -EFAULT;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	memset((char *)&mdp4_stat, 0 , sizeof(mdp4_stat));	/* reset */
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	return count;
+}
+
+static ssize_t mdp_stat_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+	int tot = 0;
+	int dlen;
+	char *bp;
+	unsigned long flag;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	bp = debug_buf;
+	dlen = sizeof(debug_buf);
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	len = snprintf(bp, dlen, "intr_total:    %08lu\n",
+					mdp4_stat.intr_tot);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_dma_p:    %08lu\n",
+					mdp4_stat.intr_dma_p);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_dma_s:    %08lu\n",
+					mdp4_stat.intr_dma_s);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_dma_e:    %08lu\n",
+					mdp4_stat.intr_dma_e);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_overlay0: %08lu\n",
+					mdp4_stat.intr_overlay0);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_overlay1: %08lu\n",
+					mdp4_stat.intr_overlay1);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "unerrun_primary:  %08lu\n",
+					mdp4_stat.intr_underrun_p);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "unerrun_external:  %08lu\n\n",
+					mdp4_stat.intr_underrun_e);
+
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "intr_dsi  :    %08lu\n\n",
+					mdp4_stat.intr_dsi);
+
+	bp += len;
+	dlen -= len;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	len = snprintf(bp, dlen, "kickoff_mddi:      %08lu\n",
+					mdp4_stat.kickoff_mddi);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "kickoff_lcdc:      %08lu\n",
+					mdp4_stat.kickoff_lcdc);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "kickoff_dtv:       %08lu\n",
+					mdp4_stat.kickoff_dtv);
+
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "kickoff_atv:       %08lu\n",
+					mdp4_stat.kickoff_atv);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "kickoff_dsi:       %08lu\n\n",
+					mdp4_stat.kickoff_dsi);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "writeback:      %08lu\n",
+					mdp4_stat.writeback);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay0_set:   %08lu\n",
+					mdp4_stat.overlay_set[0]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay0_unset: %08lu\n",
+					mdp4_stat.overlay_unset[0]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay0_play:  %08lu\n",
+					mdp4_stat.overlay_play[0]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay1_set:   %08lu\n",
+					mdp4_stat.overlay_set[1]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay1_unset: %08lu\n",
+					mdp4_stat.overlay_unset[1]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "overlay1_play:  %08lu\n\n",
+					mdp4_stat.overlay_play[1]);
+
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "pipe_rgb1:  %08lu\n", mdp4_stat.pipe[0]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "pipe_rgb2:  %08lu\n", mdp4_stat.pipe[1]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "pipe_vg1:   %08lu\n", mdp4_stat.pipe[2]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "pipe_vg2:   %08lu\n\n", mdp4_stat.pipe[3]);
+
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "dsi_clkoff: %08lu\n\n", mdp4_stat.dsi_clkoff);
+
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "err_mixer:  %08lu\n", mdp4_stat.err_mixer);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "err_size:   %08lu\n", mdp4_stat.err_size);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "err_scale:  %08lu\n", mdp4_stat.err_scale);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "err_format: %08lu\n", mdp4_stat.err_format);
+	bp += len;
+	dlen -= len;
+
+	tot = (uint32)bp - (uint32)debug_buf;
+	*bp = 0;
+	tot++;
+
+	if (tot < 0)
+		return 0;
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+static const struct file_operations mdp_stat_fops = {
+	.open = mdp_stat_open,
+	.release = mdp_stat_release,
+	.read = mdp_stat_read,
+	.write = mdp_stat_write,
+};
+#endif
+
+/*
+ * MDDI
+ *
+ */
+
+struct mddi_reg {
+	char *name;
+	int off;
+};
+
+static struct mddi_reg mddi_regs_list[] = {
+	{"MDDI_CMD", MDDI_CMD},	 	/* 0x0000 */
+	{"MDDI_VERSION", MDDI_VERSION},  /* 0x0004 */
+	{"MDDI_PRI_PTR", MDDI_PRI_PTR},  /* 0x0008 */
+	{"MDDI_BPS",  MDDI_BPS}, 	/* 0x0010 */
+	{"MDDI_SPM", MDDI_SPM}, 	/* 0x0014 */
+	{"MDDI_INT", MDDI_INT}, 	/* 0x0018 */
+	{"MDDI_INTEN", MDDI_INTEN},	/* 0x001c */
+	{"MDDI_REV_PTR", MDDI_REV_PTR},	/* 0x0020 */
+	{"MDDI_	REV_SIZE", MDDI_REV_SIZE},/* 0x0024 */
+	{"MDDI_STAT", MDDI_STAT},	/* 0x0028 */
+	{"MDDI_REV_RATE_DIV", MDDI_REV_RATE_DIV}, /* 0x002c */
+	{"MDDI_REV_CRC_ERR", MDDI_REV_CRC_ERR}, /* 0x0030 */
+	{"MDDI_TA1_LEN", MDDI_TA1_LEN}, /* 0x0034 */
+	{"MDDI_TA2_LEN", MDDI_TA2_LEN}, /* 0x0038 */
+	{"MDDI_TEST", MDDI_TEST}, 	/* 0x0040 */
+	{"MDDI_REV_PKT_CNT", MDDI_REV_PKT_CNT}, /* 0x0044 */
+	{"MDDI_DRIVE_HI", MDDI_DRIVE_HI},/* 0x0048 */
+	{"MDDI_DRIVE_LO", MDDI_DRIVE_LO},	/* 0x004c */
+	{"MDDI_DISP_WAKE", MDDI_DISP_WAKE},/* 0x0050 */
+	{"MDDI_REV_ENCAP_SZ", MDDI_REV_ENCAP_SZ}, /* 0x0054 */
+	{"MDDI_RTD_VAL", MDDI_RTD_VAL}, /* 0x0058 */
+	{"MDDI_PAD_CTL", MDDI_PAD_CTL},	 /* 0x0068 */
+	{"MDDI_DRIVER_START_CNT", MDDI_DRIVER_START_CNT}, /* 0x006c */
+	{"MDDI_CORE_VER", MDDI_CORE_VER}, /* 0x008c */
+	{"MDDI_FIFO_ALLOC", MDDI_FIFO_ALLOC}, /* 0x0090 */
+	{"MDDI_PAD_IO_CTL", MDDI_PAD_IO_CTL}, /* 0x00a0 */
+	{"MDDI_PAD_CAL", MDDI_PAD_CAL},  /* 0x00a4 */
+	{0, 0}
+};
+
+static int mddi_reg_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int mddi_reg_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static void mddi_reg_write(int ndx, uint32 off, uint32 data)
+{
+	char *base;
+
+	if (ndx)
+		base = (char *)msm_emdh_base;
+	else
+		base = (char *)msm_pmdh_base;
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	writel(data, base + off);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	printk(KERN_INFO "%s: addr=%x data=%x\n",
+			__func__, (int)(base+off), (int)data);
+}
+
+static int mddi_reg_read(int ndx)
+{
+	struct mddi_reg *reg;
+	unsigned char *base;
+	int data;
+	char *bp;
+	int len = 0;
+	int tot = 0;
+	int dlen;
+
+	if (ndx)
+		base = msm_emdh_base;
+	else
+		base = msm_pmdh_base;
+
+	reg = mddi_regs_list;
+	bp = debug_buf;
+	dlen = sizeof(debug_buf);
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	while (reg->name) {
+		data = readl((u32)base + reg->off);
+		len = snprintf(bp, dlen, "%s:0x%08x\t\t= 0x%08x\n",
+					reg->name, reg->off, data);
+		tot += len;
+		bp += len;
+		dlen -= len;
+		reg++;
+	}
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	*bp = 0;
+	tot++;
+
+	return tot;
+}
+
+static ssize_t pmdh_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, data;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %x", &off, &data);
+
+	mddi_reg_write(0, off, data);
+
+	return count;
+}
+
+static ssize_t pmdh_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int tot = 0;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	tot = mddi_reg_read(0);	/* pmdh */
+
+	if (tot < 0)
+		return 0;
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+
+static const struct file_operations pmdh_fops = {
+	.open = mddi_reg_open,
+	.release = mddi_reg_release,
+	.read = pmdh_reg_read,
+	.write = pmdh_reg_write,
+};
+
+
+
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+static int vsync_reg_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int vsync_reg_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t vsync_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 enable;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x", &enable);
+
+	mdp_dmap_vsync_set(enable);
+
+	return count;
+}
+
+static ssize_t vsync_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	char *bp;
+	int len = 0;
+	int tot = 0;
+	int dlen;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	bp = debug_buf;
+	dlen = sizeof(debug_buf);
+	len = snprintf(bp, dlen, "%x\n", mdp_dmap_vsync_get());
+	tot += len;
+	bp += len;
+	*bp = 0;
+	tot++;
+
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+
+static const struct file_operations vsync_fops = {
+	.open = vsync_reg_open,
+	.release = vsync_reg_release,
+	.read = vsync_reg_read,
+	.write = vsync_reg_write,
+};
+#endif
+
+static ssize_t emdh_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, data;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %x", &off, &data);
+
+	mddi_reg_write(1, off, data);
+
+	return count;
+}
+
+static ssize_t emdh_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int tot = 0;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	tot = mddi_reg_read(1);	/* emdh */
+
+	if (tot < 0)
+		return 0;
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+static const struct file_operations emdh_fops = {
+	.open = mddi_reg_open,
+	.release = mddi_reg_release,
+	.read = emdh_reg_read,
+	.write = emdh_reg_write,
+};
+
+
+uint32 dbg_offset;
+uint32 dbg_count;
+char *dbg_base;
+
+
+static int dbg_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int dbg_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t dbg_base_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	return count;
+}
+
+static ssize_t dbg_base_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+	int tot = 0;
+	int dlen;
+	char *bp;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+
+	bp = debug_buf;
+	dlen = sizeof(debug_buf);
+
+	len = snprintf(bp, dlen, "mdp_base  :    %08x\n",
+				(int)msm_mdp_base);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "mddi_base :    %08x\n",
+				(int)msm_pmdh_base);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "emdh_base :    %08x\n",
+				(int)msm_emdh_base);
+	bp += len;
+	dlen -= len;
+#ifdef CONFIG_FB_MSM_TVOUT
+	len = snprintf(bp, dlen, "tvenv_base:    %08x\n",
+				(int)tvenc_base);
+	bp += len;
+	dlen -= len;
+#endif
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+	len = snprintf(bp, dlen, "mipi_dsi_base: %08x\n",
+				(int)mipi_dsi_base);
+	bp += len;
+	dlen -= len;
+#endif
+
+	tot = (uint32)bp - (uint32)debug_buf;
+	*bp = 0;
+	tot++;
+
+	if (tot < 0)
+		return 0;
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+static const struct file_operations dbg_base_fops = {
+	.open = dbg_open,
+	.release = dbg_release,
+	.read = dbg_base_read,
+	.write = dbg_base_write,
+};
+
+static ssize_t dbg_offset_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, cnt, num, base;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %d %x", &off, &num, &base);
+
+	if (cnt < 0)
+		cnt = 0;
+
+	if (cnt >= 1)
+		dbg_offset = off;
+	if (cnt >= 2)
+		dbg_count = num;
+	if (cnt >= 3)
+		dbg_base = (char *)base;
+
+	printk(KERN_INFO "%s: offset=%x cnt=%d base=%x\n", __func__,
+				dbg_offset, dbg_count, (int)dbg_base);
+
+	return count;
+}
+
+static ssize_t dbg_offset_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(debug_buf, sizeof(debug_buf), "0x%08x %d 0x%08x\n",
+				dbg_offset, dbg_count, (int)dbg_base);
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, debug_buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static const struct file_operations dbg_off_fops = {
+	.open = dbg_open,
+	.release = dbg_release,
+	.read = dbg_offset_read,
+	.write = dbg_offset_write,
+};
+
+
+static ssize_t dbg_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, data;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %x", &off, &data);
+
+	writel(data, dbg_base + off);
+
+	printk(KERN_INFO "%s: addr=%x data=%x\n",
+			__func__, (int)(dbg_base+off), (int)data);
+
+	return count;
+}
+
+static ssize_t dbg_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+	uint32 data;
+	int i, j, off, dlen, num;
+	char *bp, *cp;
+	int tot = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	if (dbg_base == 0)
+		return 0;	/* nothing to read */
+
+	j = 0;
+	num = 0;
+	bp = debug_buf;
+	cp = (char *)(dbg_base + dbg_offset);
+	dlen = sizeof(debug_buf);
+	while (j++ < 16) {
+		len = snprintf(bp, dlen, "0x%08x: ", (int)cp);
+		tot += len;
+		bp += len;
+		dlen -= len;
+		off = 0;
+		i = 0;
+		while (i++ < 4) {
+			data = readl(cp + off);
+			len = snprintf(bp, dlen, "%08x ", data);
+			tot += len;
+			bp += len;
+			dlen -= len;
+			off += 4;
+			num++;
+			if (num >= dbg_count)
+				break;
+		}
+		data = readl((u32)cp + off);
+		*bp++ = '\n';
+		--dlen;
+		tot++;
+		cp += off;
+		if (num >= dbg_count)
+			break;
+	}
+	*bp = 0;
+	tot++;
+
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+
+static const struct file_operations dbg_reg_fops = {
+	.open = dbg_open,
+	.release = dbg_release,
+	.read = dbg_reg_read,
+	.write = dbg_reg_write,
+};
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
+static uint32 hdmi_offset;
+static uint32 hdmi_count;
+
+static int hdmi_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	return 0;
+}
+
+static int hdmi_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t hdmi_offset_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, cnt, num;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %d", &off, &num);
+
+	if (cnt < 0)
+		cnt = 0;
+
+	if (cnt >= 1)
+		hdmi_offset = off;
+	if (cnt >= 2)
+		hdmi_count = num;
+
+	printk(KERN_INFO "%s: offset=%x cnt=%d\n", __func__,
+				hdmi_offset, hdmi_count);
+
+	return count;
+}
+
+static ssize_t hdmi_offset_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(debug_buf, sizeof(debug_buf), "0x%08x %d\n",
+				hdmi_offset, hdmi_count);
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, debug_buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static const struct file_operations hdmi_off_fops = {
+	.open = hdmi_open,
+	.release = hdmi_release,
+	.read = hdmi_offset_read,
+	.write = hdmi_offset_write,
+};
+
+
+static ssize_t hdmi_reg_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	uint32 off, data, base;
+	int cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	base = hdmi_msm_get_io_base();
+	if (base == 0)
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x %x", &off, &data);
+
+	writel(data, base + off);
+
+	printk(KERN_INFO "%s: addr=%x data=%x\n",
+			__func__, (int)(base+off), (int)data);
+
+	return count;
+}
+
+static ssize_t hdmi_reg_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	int len = 0;
+	uint32 data;
+	int i, j, off, dlen, num;
+	char *bp, *cp;
+	int tot = 0;
+
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	if (hdmi_msm_get_io_base() == 0)
+		return 0;	/* nothing to read */
+
+	j = 0;
+	num = 0;
+	bp = debug_buf;
+	cp = (char *)(hdmi_msm_get_io_base() + hdmi_offset);
+	dlen = sizeof(debug_buf);
+	while (j++ < 16) {
+		len = snprintf(bp, dlen, "0x%08x: ", (int)cp);
+		tot += len;
+		bp += len;
+		dlen -= len;
+		off = 0;
+		i = 0;
+		while (i++ < 4) {
+			data = readl(cp + off);
+			len = snprintf(bp, dlen, "%08x ", data);
+			tot += len;
+			bp += len;
+			dlen -= len;
+			off += 4;
+			num++;
+			if (num >= hdmi_count)
+				break;
+		}
+		data = readl((u32)cp + off);
+		*bp++ = '\n';
+		--dlen;
+		tot++;
+		cp += off;
+		if (num >= hdmi_count)
+			break;
+	}
+	*bp = 0;
+	tot++;
+
+	if (copy_to_user(buff, debug_buf, tot))
+		return -EFAULT;
+
+	*ppos += tot;	/* increase offset */
+
+	return tot;
+}
+
+
+static const struct file_operations hdmi_reg_fops = {
+	.open = hdmi_open,
+	.release = hdmi_release,
+	.read = hdmi_reg_read,
+	.write = hdmi_reg_write,
+};
+#endif
+
+/*
+ * debugfs
+ *
+ */
+
+int mdp_debugfs_init(void)
+{
+	struct dentry *dent = debugfs_create_dir("mdp", NULL);
+
+	if (IS_ERR(dent)) {
+		printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+			__FILE__, __LINE__, PTR_ERR(dent));
+		return -1;
+	}
+
+	if (debugfs_create_file("off", 0644, dent, 0, &mdp_off_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+	if (debugfs_create_file("reg", 0644, dent, 0, &mdp_reg_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+#ifdef CONFIG_FB_MSM_MDP40
+	if (debugfs_create_file("stat", 0644, dent, 0, &mdp_stat_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+#endif
+
+	dent = debugfs_create_dir("mddi", NULL);
+
+	if (IS_ERR(dent)) {
+		printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+			__FILE__, __LINE__, PTR_ERR(dent));
+		return -1;
+	}
+
+	if (debugfs_create_file("reg", 0644, dent, 0, &pmdh_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+	if (debugfs_create_file("vsync", 0644, dent, 0, &vsync_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+#endif
+
+	dent = debugfs_create_dir("emdh", NULL);
+
+	if (IS_ERR(dent)) {
+		printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+			__FILE__, __LINE__, PTR_ERR(dent));
+		return -1;
+	}
+
+	if (debugfs_create_file("reg", 0644, dent, 0, &emdh_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+	dent = debugfs_create_dir("mdp-dbg", NULL);
+
+	if (IS_ERR(dent)) {
+		printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+			__FILE__, __LINE__, PTR_ERR(dent));
+		return -1;
+	}
+
+	if (debugfs_create_file("base", 0644, dent, 0, &dbg_base_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+	if (debugfs_create_file("off", 0644, dent, 0, &dbg_off_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+	if (debugfs_create_file("reg", 0644, dent, 0, &dbg_reg_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
+	dent = debugfs_create_dir("hdmi", NULL);
+
+	if (IS_ERR(dent)) {
+		printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+			__FILE__, __LINE__, PTR_ERR(dent));
+		return PTR_ERR(dent);
+	}
+
+	if (debugfs_create_file("off", 0644, dent, 0, &hdmi_off_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: 'off' fail\n",
+			__FILE__, __LINE__);
+		return -ENOENT;
+	}
+
+	if (debugfs_create_file("reg", 0644, dent, 0, &hdmi_reg_fops)
+			== NULL) {
+		printk(KERN_ERR "%s(%d): debugfs_create_file: 'reg' fail\n",
+			__FILE__, __LINE__);
+		return -ENOENT;
+	}
+#endif
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c
new file mode 100644
index 0000000..a78c0db
--- /dev/null
+++ b/drivers/video/msm/mdp_dma.c
@@ -0,0 +1,602 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mddihost.h"
+
+static uint32 mdp_last_dma2_update_width;
+static uint32 mdp_last_dma2_update_height;
+static uint32 mdp_curr_dma2_update_width;
+static uint32 mdp_curr_dma2_update_height;
+
+ktime_t mdp_dma2_last_update_time = { 0 };
+
+int mdp_lcd_rd_cnt_offset_slow = 20;
+int mdp_lcd_rd_cnt_offset_fast = 20;
+int mdp_vsync_usec_wait_line_too_short = 5;
+uint32 mdp_dma2_update_time_in_usec;
+uint32 mdp_total_vdopkts;
+
+extern u32 msm_fb_debug_enabled;
+extern struct workqueue_struct *mdp_dma_wq;
+
+int vsync_start_y_adjust = 4;
+
+static void mdp_dma2_update_lcd(struct msm_fb_data_type *mfd)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	int mddi_dest = FALSE;
+	int cmd_mode = FALSE;
+	uint32 outBpp = iBuf->bpp;
+	uint32 dma2_cfg_reg;
+	uint8 *src;
+	uint32 mddi_ld_param;
+	uint16 mddi_vdo_packet_reg;
+#ifndef CONFIG_FB_MSM_MDP303
+	struct msm_fb_panel_data *pdata =
+	    (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+#endif
+	uint32 ystride = mfd->fbi->fix.line_length;
+	uint32 mddi_pkt_desc;
+
+	dma2_cfg_reg = DMA_PACK_ALIGN_LSB |
+		    DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS;
+
+#ifdef CONFIG_FB_MSM_MDP22
+	dma2_cfg_reg |= DMA_PACK_TIGHT;
+#endif
+
+#ifdef CONFIG_FB_MSM_MDP30
+	/*
+	 * Software workaround:  On 7x25/7x27, the MDP will not
+	 * respond if dma_w is 1 pixel.  Set the update width to
+	 * 2 pixels and adjust the x offset if needed.
+	 */
+	if (iBuf->dma_w == 1) {
+		iBuf->dma_w = 2;
+		if (iBuf->dma_x == (iBuf->ibuf_width - 2))
+			iBuf->dma_x--;
+	}
+#endif
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else if (mfd->fb_imgType == MDP_RGBA_8888)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+	if (outBpp == 4) {
+		dma2_cfg_reg |= DMA_IBUF_C3ALPHA_EN;
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888;
+	}
+
+	if (outBpp == 2)
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
+
+	mddi_ld_param = 0;
+	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
+
+	if ((mfd->panel_info.type == MDDI_PANEL) ||
+	    (mfd->panel_info.type == EXT_MDDI_PANEL)) {
+		dma2_cfg_reg |= DMA_OUT_SEL_MDDI;
+		mddi_dest = TRUE;
+
+		if (mfd->panel_info.type == MDDI_PANEL) {
+			mdp_total_vdopkts++;
+			if (mfd->panel_info.pdest == DISPLAY_1) {
+				dma2_cfg_reg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
+				mddi_ld_param = 0;
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+				mddi_window_adjust(mfd, iBuf->dma_x,
+						   iBuf->dma_w - 1, iBuf->dma_y,
+						   iBuf->dma_h - 1);
+#endif
+			} else {
+				dma2_cfg_reg |=
+				    DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY;
+				mddi_ld_param = 1;
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+				mddi_window_adjust(mfd, iBuf->dma_x,
+						   iBuf->dma_w - 1, iBuf->dma_y,
+						   iBuf->dma_h - 1);
+#endif
+			}
+		} else {
+			dma2_cfg_reg |= DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL;
+			mddi_ld_param = 2;
+		}
+#ifdef CONFIG_FB_MSM_MDP303
+	} else if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		cmd_mode = TRUE;
+		dma2_cfg_reg |= DMA_OUT_SEL_DSI_CMD;
+#endif
+	} else {
+		if (mfd->panel_info.pdest == DISPLAY_1) {
+			dma2_cfg_reg |= DMA_AHBM_LCD_SEL_PRIMARY;
+			outp32(MDP_EBI2_LCD0, mfd->data_port_phys);
+		} else {
+			dma2_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY;
+			outp32(MDP_EBI2_LCD1, mfd->data_port_phys);
+		}
+	}
+
+	src = (uint8 *) iBuf->buf;
+	/* starting input address */
+	src += iBuf->dma_x * outBpp + iBuf->dma_y * ystride;
+
+	mdp_curr_dma2_update_width = iBuf->dma_w;
+	mdp_curr_dma2_update_height = iBuf->dma_h;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+#ifdef CONFIG_FB_MSM_MDP22
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0184,
+			(iBuf->dma_h << 16 | iBuf->dma_w));
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0188, src);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x018C, ystride);
+#else
+	if (cmd_mode)
+		MDP_OUTP(MDP_BASE + 0x90004,
+			(mfd->panel_info.yres << 16 | mfd->panel_info.xres));
+	else
+		MDP_OUTP(MDP_BASE + 0x90004, (iBuf->dma_h << 16 | iBuf->dma_w));
+
+	MDP_OUTP(MDP_BASE + 0x90008, src);
+	MDP_OUTP(MDP_BASE + 0x9000c, ystride);
+#endif
+
+	if (mfd->panel_info.bpp == 18) {
+		mddi_pkt_desc = MDDI_VDO_PACKET_DESC;
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	} else if (mfd->panel_info.bpp == 24) {
+		mddi_pkt_desc = MDDI_VDO_PACKET_DESC_24;
+		dma2_cfg_reg |= DMA_DSTC0G_8BITS |      /* 888 24BPP */
+			DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+	} else {
+		mddi_pkt_desc = MDDI_VDO_PACKET_DESC_16;
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+	}
+
+#ifndef CONFIG_FB_MSM_MDP303
+
+	if (mddi_dest) {
+#ifdef CONFIG_FB_MSM_MDP22
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0194,
+			 (iBuf->dma_y << 16) | iBuf->dma_x);
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0, mddi_ld_param);
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4,
+			 (mddi_pkt_desc << 16) | mddi_vdo_packet_reg);
+#else
+		MDP_OUTP(MDP_BASE + 0x90010, (iBuf->dma_y << 16) | iBuf->dma_x);
+		MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
+		MDP_OUTP(MDP_BASE + 0x00094,
+			 (mddi_pkt_desc << 16) | mddi_vdo_packet_reg);
+#endif
+	} else {
+		/* setting EBI2 LCDC write window */
+		pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w,
+				iBuf->dma_h);
+	}
+#else
+	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		/* dma_p = 0, dma_s = 1 */
+		 MDP_OUTP(MDP_BASE + 0xF1000, 0x10);
+		 /* enable dsi trigger on dma_p */
+		 MDP_OUTP(MDP_BASE + 0xF1004, 0x01);
+	}
+#endif
+
+	/* dma2 config register */
+#ifdef MDP_HW_VSYNC
+	MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
+
+	if ((mfd->use_mdp_vsync) &&
+	    (mfd->ibuf.vsync_enable) && (mfd->panel_info.lcd.vsync_enable)) {
+		uint32 start_y;
+
+		if (vsync_start_y_adjust <= iBuf->dma_y)
+			start_y = iBuf->dma_y - vsync_start_y_adjust;
+		else
+			start_y =
+			    (mfd->total_lcd_lines - 1) - (vsync_start_y_adjust -
+							  iBuf->dma_y);
+
+		/*
+		 * MDP VSYNC clock must be On by now so, we don't have to
+		 * re-enable it
+		 */
+		MDP_OUTP(MDP_BASE + 0x210, start_y);
+		MDP_OUTP(MDP_BASE + 0x20c, 1);	/* enable prim vsync */
+	} else {
+		MDP_OUTP(MDP_BASE + 0x20c, 0);	/* disable prim vsync */
+	}
+#else
+#ifdef CONFIG_FB_MSM_MDP22
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0180, dma2_cfg_reg);
+#else
+	MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
+#endif
+#endif /* MDP_HW_VSYNC */
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
+
+static ktime_t vt = { 0 };
+int mdp_usec_diff_threshold = 100;
+int mdp_expected_usec_wait;
+
+enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht)
+{
+	struct msm_fb_data_type *mfd = NULL;
+
+	mfd = container_of(ht, struct msm_fb_data_type, dma_hrtimer);
+
+	mdp_pipe_kickoff(MDP_DMA2_TERM, mfd);
+
+	if (msm_fb_debug_enabled) {
+		ktime_t t;
+		int usec_diff;
+		int actual_wait;
+
+		t = ktime_get_real();
+
+		actual_wait = ktime_to_us(ktime_sub(t, vt));
+		usec_diff = actual_wait - mdp_expected_usec_wait;
+
+		if ((mdp_usec_diff_threshold < usec_diff) || (usec_diff < 0))
+			MSM_FB_DEBUG
+			    ("HRT Diff = %d usec Exp=%d usec  Act=%d usec\n",
+			     usec_diff, mdp_expected_usec_wait, actual_wait);
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+
+#ifdef CONFIG_FB_MSM_MDP303
+static int busy_wait_cnt;
+
+void	mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+{
+	unsigned long flag;
+	int need_wait = 0;
+
+#ifdef DSI_CLK_CTRL
+	mod_timer(&dsi_clock_timer, jiffies + HZ); /* one second */
+#endif
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+#ifdef DSI_CLK_CTRL
+	if (mipi_dsi_clk_on == 0)
+		mipi_dsi_clk_enable();
+#endif
+
+	if (mfd->dma->busy == TRUE) {
+		if (busy_wait_cnt == 0)
+			INIT_COMPLETION(mfd->dma->comp);
+		busy_wait_cnt++;
+		need_wait++;
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	if (need_wait) {
+		/* wait until DMA finishes the current job */
+		wait_for_completion(&mfd->dma->comp);
+	}
+}
+#endif
+
+static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term)
+{
+	/*
+	 * dma2 configure VSYNC block
+	 * vsync supported on Primary LCD only for now
+	 */
+	int32 mdp_lcd_rd_cnt;
+	uint32 usec_wait_time;
+	uint32 start_y;
+
+	/*
+	 * ToDo: if we can move HRT timer callback to workqueue, we can
+	 * move DMA2 power on under mdp_pipe_kickoff().
+	 * This will save a power for hrt time wait.
+	 * However if the latency for context switch (hrt irq -> workqueue)
+	 * is too big, we will miss the vsync timing.
+	 */
+	if (term == MDP_DMA2_TERM)
+		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	mdp_dma2_update_time_in_usec = ktime_to_us(mdp_dma2_last_update_time);
+
+	if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable)
+	    || (mfd->use_mdp_vsync)) {
+		mdp_pipe_kickoff(term, mfd);
+		return;
+	}
+	/* SW vsync logic starts here */
+
+	/* get current rd counter */
+	mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd);
+	if (mdp_dma2_update_time_in_usec != 0) {
+		uint32 num, den;
+
+		/*
+		 * roi width boundary calculation to know the size of pixel
+		 * width that MDP can send faster or slower than LCD read
+		 * pointer
+		 */
+
+		num = mdp_last_dma2_update_width * mdp_last_dma2_update_height;
+		den =
+		    (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) /
+		      1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000;
+
+		if (den == 0)
+			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
+			    mfd->panel_info.xres + 1;
+		else
+			mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
+			    (int)(num / den);
+	}
+
+	if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] >
+	    mdp_curr_dma2_update_width) {
+		/* MDP wrp is faster than LCD rdp */
+		mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast;
+	} else {
+		/* MDP wrp is slower than LCD rdp */
+		mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow;
+	}
+
+	if (mdp_lcd_rd_cnt < 0)
+		mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt;
+	else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines)
+		mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1;
+
+	/* get wrt pointer position */
+	start_y = mfd->ibuf.dma_y;
+
+	/* measure line difference between start_y and rd counter */
+	if (start_y > mdp_lcd_rd_cnt) {
+		/*
+		 * *100 for lcd_ref_hzx100 was already multiplied by 100
+		 * *1000000 is for usec conversion
+		 */
+
+		if ((start_y - mdp_lcd_rd_cnt) <=
+		    mdp_vsync_usec_wait_line_too_short)
+			usec_wait_time = 0;
+		else
+			usec_wait_time =
+			    ((start_y -
+			      mdp_lcd_rd_cnt) * 1000000) /
+			    ((mfd->total_lcd_lines *
+			      mfd->panel_info.lcd.refx100) / 100);
+	} else {
+		if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <=
+		    mdp_vsync_usec_wait_line_too_short)
+			usec_wait_time = 0;
+		else
+			usec_wait_time =
+			    ((start_y +
+			      (mfd->total_lcd_lines -
+			       mdp_lcd_rd_cnt)) * 1000000) /
+			    ((mfd->total_lcd_lines *
+			      mfd->panel_info.lcd.refx100) / 100);
+	}
+
+	mdp_last_dma2_update_width = mdp_curr_dma2_update_width;
+	mdp_last_dma2_update_height = mdp_curr_dma2_update_height;
+
+	if (usec_wait_time == 0) {
+		mdp_pipe_kickoff(term, mfd);
+	} else {
+		ktime_t wait_time;
+
+		wait_time = ns_to_ktime(usec_wait_time * 1000);
+
+		if (msm_fb_debug_enabled) {
+			vt = ktime_get_real();
+			mdp_expected_usec_wait = usec_wait_time;
+		}
+		hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL);
+	}
+}
+
+#ifdef MDDI_HOST_WINDOW_WORKAROUND
+static void mdp_dma2_update_sub(struct msm_fb_data_type *mfd);
+void mdp_dma2_update(struct msm_fb_data_type *mfd)
+{
+	MDPIBUF *iBuf;
+	uint32 upper_height;
+
+	if (mfd->panel.type == EXT_MDDI_PANEL) {
+		mdp_dma2_update_sub(mfd);
+		return;
+	}
+
+	iBuf = &mfd->ibuf;
+
+	upper_height =
+	    (uint32) mddi_assign_pkt_height((uint16) iBuf->dma_w,
+					    (uint16) iBuf->dma_h, 18);
+
+	if (upper_height >= iBuf->dma_h) {
+		mdp_dma2_update_sub(mfd);
+	} else {
+		uint32 lower_height;
+
+		/* sending the upper region first */
+		lower_height = iBuf->dma_h - upper_height;
+		iBuf->dma_h = upper_height;
+		mdp_dma2_update_sub(mfd);
+
+		/* sending the lower region second */
+		iBuf->dma_h = lower_height;
+		iBuf->dma_y += lower_height;
+		iBuf->vsync_enable = FALSE;
+		mdp_dma2_update_sub(mfd);
+	}
+}
+
+static void mdp_dma2_update_sub(struct msm_fb_data_type *mfd)
+#else
+void mdp_dma2_update(struct msm_fb_data_type *mfd)
+#endif
+{
+	down(&mfd->dma->mutex);
+	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
+		down(&mfd->sem);
+		mfd->ibuf_flushed = TRUE;
+		mdp_dma2_update_lcd(mfd);
+
+		mdp_enable_irq(MDP_DMA2_TERM);
+		mfd->dma->busy = TRUE;
+		INIT_COMPLETION(mfd->dma->comp);
+
+		/* schedule DMA to start */
+		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
+		up(&mfd->sem);
+
+		/* wait until DMA finishes the current job */
+		wait_for_completion_killable(&mfd->dma->comp);
+		mdp_disable_irq(MDP_DMA2_TERM);
+
+	/* signal if pan function is waiting for the update completion */
+		if (mfd->pan_waiting) {
+			mfd->pan_waiting = FALSE;
+			complete(&mfd->pan_comp);
+		}
+	}
+	up(&mfd->dma->mutex);
+}
+
+void mdp_lcd_update_workqueue_handler(struct work_struct *work)
+{
+	struct msm_fb_data_type *mfd = NULL;
+
+	mfd = container_of(work, struct msm_fb_data_type, dma_update_worker);
+	if (mfd)
+		mfd->dma_fnc(mfd);
+}
+
+void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
+			  boolean sync)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	MDPIBUF *iBuf;
+	int bpp = info->var.bits_per_pixel / 8;
+
+	down(&mfd->sem);
+	iBuf = &mfd->ibuf;
+	iBuf->buf = (uint8 *) info->fix.smem_start;
+	iBuf->buf += info->var.xoffset * bpp +
+			info->var.yoffset * info->fix.line_length;
+
+	iBuf->ibuf_width = info->var.xres_virtual;
+	iBuf->bpp = bpp;
+
+	iBuf->vsync_enable = sync;
+
+	if (dirty) {
+		/*
+		 * ToDo: dirty region check inside var.xoffset+xres
+		 * <-> var.yoffset+yres
+		 */
+		iBuf->dma_x = dirty->xoffset % info->var.xres;
+		iBuf->dma_y = dirty->yoffset % info->var.yres;
+		iBuf->dma_w = dirty->width;
+		iBuf->dma_h = dirty->height;
+	} else {
+		iBuf->dma_x = 0;
+		iBuf->dma_y = 0;
+		iBuf->dma_w = info->var.xres;
+		iBuf->dma_h = info->var.yres;
+	}
+	mfd->ibuf_flushed = FALSE;
+	up(&mfd->sem);
+}
+
+void mdp_dma_pan_update(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	MDPIBUF *iBuf;
+
+	iBuf = &mfd->ibuf;
+
+	if (mfd->sw_currently_refreshing) {
+		/* we need to wait for the pending update */
+		mfd->pan_waiting = TRUE;
+		if (!mfd->ibuf_flushed) {
+			wait_for_completion_killable(&mfd->pan_comp);
+		}
+		/* waiting for this update to complete */
+		mfd->pan_waiting = TRUE;
+		wait_for_completion_killable(&mfd->pan_comp);
+	} else
+		mfd->dma_fnc(mfd);
+}
+
+void mdp_refresh_screen(unsigned long data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+
+	if ((mfd->sw_currently_refreshing) && (mfd->sw_refreshing_enable)) {
+		init_timer(&mfd->refresh_timer);
+		mfd->refresh_timer.function = mdp_refresh_screen;
+		mfd->refresh_timer.data = data;
+
+		if (mfd->dma->busy)
+			/* come back in 1 msec */
+			mfd->refresh_timer.expires = jiffies + (HZ / 1000);
+		else
+			mfd->refresh_timer.expires =
+			    jiffies + mfd->refresh_timer_duration;
+
+		add_timer(&mfd->refresh_timer);
+
+		if (!mfd->dma->busy) {
+			if (!queue_work(mdp_dma_wq, &mfd->dma_update_worker)) {
+				MSM_FB_DEBUG("mdp_dma: can't queue_work! -> \
+			MDP/MDDI/LCD clock speed needs to be increased\n");
+			}
+		}
+	} else {
+		if (!mfd->hw_refresh)
+			complete(&mfd->refresher_comp);
+	}
+}
diff --git a/drivers/video/msm/mdp_dma_dsi_video.c b/drivers/video/msm/mdp_dma_dsi_video.c
new file mode 100644
index 0000000..505eb74
--- /dev/null
+++ b/drivers/video/msm/mdp_dma_dsi_video.c
@@ -0,0 +1,270 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/fb.h>
+#include <asm/system.h>
+#include <mach/hardware.h>
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#define DSI_VIDEO_BASE	0xF0000
+#define DMA_P_BASE      0x90000
+
+static int first_pixel_start_x;
+static int first_pixel_start_y;
+
+int mdp_dsi_video_on(struct platform_device *pdev)
+{
+	int dsi_width;
+	int dsi_height;
+	int dsi_bpp;
+	int dsi_border_clr;
+	int dsi_underflow_clr;
+	int dsi_hsync_skew;
+
+	int hsync_period;
+	int hsync_ctrl;
+	int vsync_period;
+	int display_hctl;
+	int display_v_start;
+	int display_v_end;
+	int active_hctl;
+	int active_h_start;
+	int active_h_end;
+	int active_v_start;
+	int active_v_end;
+	int ctrl_polarity;
+	int h_back_porch;
+	int h_front_porch;
+	int v_back_porch;
+	int v_front_porch;
+	int hsync_pulse_width;
+	int vsync_pulse_width;
+	int hsync_polarity;
+	int vsync_polarity;
+	int data_en_polarity;
+	int hsync_start_x;
+	int hsync_end_x;
+	uint8 *buf;
+	uint32 dma2_cfg_reg;
+
+	int bpp;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	dma2_cfg_reg = DMA_PACK_ALIGN_LSB | DMA_OUT_SEL_DSI_VIDEO;
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else if (mfd->fb_imgType == MDP_RGBA_8888)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+	if (bpp == 2)
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
+	else if (bpp == 3)
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB888;
+	else
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888;
+
+	switch (mfd->panel_info.bpp) {
+	case 24:
+		dma2_cfg_reg |= DMA_DSTC0G_8BITS |
+			DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+		break;
+	case 18:
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |
+			DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+		break;
+	case 16:
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |
+			DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+		break;
+	default:
+		printk(KERN_ERR "mdp lcdc can't support format %d bpp!\n",
+			mfd->panel_info.bpp);
+		return -ENODEV;
+	}
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* starting address */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf);
+
+	/* active window width and height */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x4, ((fbi->var.yres) << 16) |
+		(fbi->var.xres));
+
+	/* buffer ystride */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0xc, fbi->fix.line_length);
+
+	/* x/y coordinate = always 0 for lcdc */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x10, 0);
+
+	/* dma config */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE, dma2_cfg_reg);
+
+	/*
+	 * DSI timing setting
+	 */
+	h_back_porch = var->left_margin;
+	h_front_porch = var->right_margin;
+	v_back_porch = var->upper_margin;
+	v_front_porch = var->lower_margin;
+	hsync_pulse_width = var->hsync_len;
+	vsync_pulse_width = var->vsync_len;
+	dsi_border_clr = mfd->panel_info.lcdc.border_clr;
+	dsi_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
+	dsi_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
+	dsi_width = mfd->panel_info.xres;
+	dsi_height = mfd->panel_info.yres;
+	dsi_bpp = mfd->panel_info.bpp;
+	hsync_period = h_back_porch + dsi_width + h_front_porch + 1;
+	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
+	hsync_start_x = h_back_porch;
+	hsync_end_x = dsi_width + h_back_porch - 1;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	vsync_period =
+		(v_back_porch + dsi_height + v_front_porch + 1) * hsync_period;
+	display_v_start = v_back_porch * hsync_period + dsi_hsync_skew;
+	display_v_end = (dsi_height + v_back_porch) * hsync_period;
+
+	active_h_start = hsync_start_x + first_pixel_start_x;
+	active_h_end = active_h_start + var->xres - 1;
+	active_hctl = ACTIVE_START_X_EN |
+			(active_h_end << 16) | active_h_start;
+
+	active_v_start = display_v_start +
+			first_pixel_start_y * hsync_period;
+	active_v_end = active_v_start +	(var->yres) * hsync_period - 1;
+	active_v_start |= ACTIVE_START_Y_EN;
+
+	dsi_underflow_clr |= 0x80000000;	/* enable recovery */
+	hsync_polarity = 0;
+	vsync_polarity = 0;
+	data_en_polarity = 0;
+
+	ctrl_polarity =	(data_en_polarity << 2) |
+		(vsync_polarity << 1) | (hsync_polarity);
+
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc, vsync_pulse_width);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x10, display_hctl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x14, display_v_start);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x18, display_v_end);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x1c, active_hctl);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x20, active_v_start);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x24, active_v_end);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x28, dsi_border_clr);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
+
+	ret = panel_next_on(pdev);
+	if (ret == 0) {
+		/* enable DSI block */
+		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
+		/*Turning on DMA_P block*/
+		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	}
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp_dsi_video_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	/*Turning off DMA_P block*/
+	mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ret = panel_next_off(pdev);
+	/* delay to make sure the last frame finishes */
+	msleep(20);
+
+	return ret;
+}
+
+void mdp_dsi_video_update(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	unsigned long flag;
+	int irq_block = MDP_DMA2_TERM;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	down(&mfd->dma->mutex);
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+	/* no need to power on cmd block since it's dsi mode */
+	/* starting address */
+	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf);
+	/* enable  irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(irq_block);
+	INIT_COMPLETION(mfd->dma->comp);
+	mfd->dma->waiting = TRUE;
+
+	outp32(MDP_INTR_CLEAR, LCDC_FRAME_START);
+	mdp_intr_mask |= LCDC_FRAME_START;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&mfd->dma->comp);
+	mdp_disable_irq(irq_block);
+	up(&mfd->dma->mutex);
+}
diff --git a/drivers/video/msm/mdp_dma_lcdc.c b/drivers/video/msm/mdp_dma_lcdc.c
new file mode 100644
index 0000000..9ce7e13
--- /dev/null
+++ b/drivers/video/msm/mdp_dma_lcdc.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+
+#ifdef CONFIG_FB_MSM_MDP40
+#define LCDC_BASE	0xC0000
+#define DTV_BASE	0xD0000
+#define DMA_E_BASE      0xB0000
+#else
+#define LCDC_BASE	0xE0000
+#endif
+
+#define DMA_P_BASE      0x90000
+
+extern spinlock_t mdp_spin_lock;
+#ifndef CONFIG_FB_MSM_MDP40
+extern uint32 mdp_intr_mask;
+#endif
+
+int first_pixel_start_x;
+int first_pixel_start_y;
+
+int mdp_lcdc_on(struct platform_device *pdev)
+{
+	int lcdc_width;
+	int lcdc_height;
+	int lcdc_bpp;
+	int lcdc_border_clr;
+	int lcdc_underflow_clr;
+	int lcdc_hsync_skew;
+
+	int hsync_period;
+	int hsync_ctrl;
+	int vsync_period;
+	int display_hctl;
+	int display_v_start;
+	int display_v_end;
+	int active_hctl;
+	int active_h_start;
+	int active_h_end;
+	int active_v_start;
+	int active_v_end;
+	int ctrl_polarity;
+	int h_back_porch;
+	int h_front_porch;
+	int v_back_porch;
+	int v_front_porch;
+	int hsync_pulse_width;
+	int vsync_pulse_width;
+	int hsync_polarity;
+	int vsync_polarity;
+	int data_en_polarity;
+	int hsync_start_x;
+	int hsync_end_x;
+	uint8 *buf;
+	int bpp;
+	uint32 dma2_cfg_reg;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd;
+	uint32 dma_base;
+	uint32 timer_base = LCDC_BASE;
+	uint32 block = MDP_DMA2_BLOCK;
+	int ret;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	var = &fbi->var;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length;
+
+	dma2_cfg_reg = DMA_PACK_ALIGN_LSB | DMA_OUT_SEL_LCDC;
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else if (mfd->fb_imgType == MDP_RGBA_8888)
+		dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+	if (bpp == 2)
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
+	else if (bpp == 3)
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB888;
+	else
+		dma2_cfg_reg |= DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888;
+
+	switch (mfd->panel_info.bpp) {
+	case 24:
+		dma2_cfg_reg |= DMA_DSTC0G_8BITS |
+		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
+		break;
+
+	case 18:
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+		break;
+
+	case 16:
+		dma2_cfg_reg |= DMA_DSTC0G_6BITS |
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+		break;
+
+	default:
+		printk(KERN_ERR "mdp lcdc can't support format %d bpp!\n",
+		       mfd->panel_info.bpp);
+		return -ENODEV;
+	}
+
+	/* DMA register config */
+
+	dma_base = DMA_P_BASE;
+
+#ifdef CONFIG_FB_MSM_MDP40
+	if (mfd->panel.type == HDMI_PANEL)
+		dma_base = DMA_E_BASE;
+#endif
+
+	/* starting address */
+	MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf);
+	/* active window width and height */
+	MDP_OUTP(MDP_BASE + dma_base + 0x4, ((fbi->var.yres) << 16) |
+						(fbi->var.xres));
+	/* buffer ystride */
+	MDP_OUTP(MDP_BASE + dma_base + 0xc, fbi->fix.line_length);
+	/* x/y coordinate = always 0 for lcdc */
+	MDP_OUTP(MDP_BASE + dma_base + 0x10, 0);
+	/* dma config */
+	MDP_OUTP(MDP_BASE + dma_base, dma2_cfg_reg);
+
+	/*
+	 * LCDC timing setting
+	 */
+	h_back_porch = var->left_margin;
+	h_front_porch = var->right_margin;
+	v_back_porch = var->upper_margin;
+	v_front_porch = var->lower_margin;
+	hsync_pulse_width = var->hsync_len;
+	vsync_pulse_width = var->vsync_len;
+	lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
+	lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
+	lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
+
+	lcdc_width = mfd->panel_info.xres;
+	lcdc_height = mfd->panel_info.yres;
+	lcdc_bpp = mfd->panel_info.bpp;
+
+	hsync_period =
+	    hsync_pulse_width + h_back_porch + lcdc_width + h_front_porch;
+	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
+	hsync_start_x = hsync_pulse_width + h_back_porch;
+	hsync_end_x = hsync_period - h_front_porch - 1;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	vsync_period =
+	    (vsync_pulse_width + v_back_porch + lcdc_height +
+	     v_front_porch) * hsync_period;
+	display_v_start =
+	    (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
+	display_v_end =
+	    vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;
+
+	if (lcdc_width != var->xres) {
+		active_h_start = hsync_start_x + first_pixel_start_x;
+		active_h_end = active_h_start + var->xres - 1;
+		active_hctl =
+		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
+	} else {
+		active_hctl = 0;
+	}
+
+	if (lcdc_height != var->yres) {
+		active_v_start =
+		    display_v_start + first_pixel_start_y * hsync_period;
+		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
+		active_v_start |= ACTIVE_START_Y_EN;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+
+#ifdef CONFIG_FB_MSM_MDP40
+	if (mfd->panel.type == HDMI_PANEL) {
+		block = MDP_DMA_E_BLOCK;
+		timer_base = DTV_BASE;
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	} else {
+		hsync_polarity = 1;
+		vsync_polarity = 1;
+	}
+
+	lcdc_underflow_clr |= 0x80000000;	/* enable recovery */
+#else
+	hsync_polarity = 0;
+	vsync_polarity = 0;
+#endif
+	data_en_polarity = 0;
+
+	ctrl_polarity =
+	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+
+	MDP_OUTP(MDP_BASE + timer_base + 0x4, hsync_ctrl);
+	MDP_OUTP(MDP_BASE + timer_base + 0x8, vsync_period);
+	MDP_OUTP(MDP_BASE + timer_base + 0xc, vsync_pulse_width * hsync_period);
+	if (timer_base == LCDC_BASE) {
+		MDP_OUTP(MDP_BASE + timer_base + 0x10, display_hctl);
+		MDP_OUTP(MDP_BASE + timer_base + 0x14, display_v_start);
+		MDP_OUTP(MDP_BASE + timer_base + 0x18, display_v_end);
+		MDP_OUTP(MDP_BASE + timer_base + 0x28, lcdc_border_clr);
+		MDP_OUTP(MDP_BASE + timer_base + 0x2c, lcdc_underflow_clr);
+		MDP_OUTP(MDP_BASE + timer_base + 0x30, lcdc_hsync_skew);
+		MDP_OUTP(MDP_BASE + timer_base + 0x38, ctrl_polarity);
+		MDP_OUTP(MDP_BASE + timer_base + 0x1c, active_hctl);
+		MDP_OUTP(MDP_BASE + timer_base + 0x20, active_v_start);
+		MDP_OUTP(MDP_BASE + timer_base + 0x24, active_v_end);
+	} else {
+		MDP_OUTP(MDP_BASE + timer_base + 0x18, display_hctl);
+		MDP_OUTP(MDP_BASE + timer_base + 0x1c, display_v_start);
+		MDP_OUTP(MDP_BASE + timer_base + 0x20, display_v_end);
+		MDP_OUTP(MDP_BASE + timer_base + 0x40, lcdc_border_clr);
+		MDP_OUTP(MDP_BASE + timer_base + 0x44, lcdc_underflow_clr);
+		MDP_OUTP(MDP_BASE + timer_base + 0x48, lcdc_hsync_skew);
+		MDP_OUTP(MDP_BASE + timer_base + 0x50, ctrl_polarity);
+		MDP_OUTP(MDP_BASE + timer_base + 0x2c, active_hctl);
+		MDP_OUTP(MDP_BASE + timer_base + 0x30, active_v_start);
+		MDP_OUTP(MDP_BASE + timer_base + 0x38, active_v_end);
+	}
+
+	ret = panel_next_on(pdev);
+	if (ret == 0) {
+		/* enable LCDC block */
+		MDP_OUTP(MDP_BASE + timer_base, 1);
+		mdp_pipe_ctrl(block, MDP_BLOCK_POWER_ON, FALSE);
+	}
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	return ret;
+}
+
+int mdp_lcdc_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+	uint32 timer_base = LCDC_BASE;
+	uint32 block = MDP_DMA2_BLOCK;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+#ifdef CONFIG_FB_MSM_MDP40
+	if (mfd->panel.type == HDMI_PANEL) {
+		block = MDP_DMA_E_BLOCK;
+		timer_base = DTV_BASE;
+	}
+#endif
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + timer_base, 0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_pipe_ctrl(block, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ret = panel_next_off(pdev);
+
+	/* delay to make sure the last frame finishes */
+	msleep(16);
+
+	return ret;
+}
+
+void mdp_lcdc_update(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	unsigned long flag;
+	uint32 dma_base;
+	int irq_block = MDP_DMA2_TERM;
+#ifdef CONFIG_FB_MSM_MDP40
+	int intr = INTR_DMA_P_DONE;
+#endif
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since it's lcdc mode */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	dma_base = DMA_P_BASE;
+
+#ifdef CONFIG_FB_MSM_MDP40
+	if (mfd->panel.type == HDMI_PANEL) {
+		intr = INTR_DMA_E_DONE;
+		irq_block = MDP_DMA_E_TERM;
+		dma_base = DMA_E_BASE;
+	}
+#endif
+
+	/* starting address */
+	MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf);
+
+	/* enable LCDC irq */
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(irq_block);
+	INIT_COMPLETION(mfd->dma->comp);
+	mfd->dma->waiting = TRUE;
+#ifdef CONFIG_FB_MSM_MDP40
+	outp32(MDP_INTR_CLEAR, intr);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+#else
+	outp32(MDP_INTR_CLEAR, LCDC_FRAME_START);
+	mdp_intr_mask |= LCDC_FRAME_START;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+#endif
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	wait_for_completion_killable(&mfd->dma->comp);
+	mdp_disable_irq(irq_block);
+}
diff --git a/drivers/video/msm/mdp_dma_s.c b/drivers/video/msm/mdp_dma_s.c
new file mode 100644
index 0000000..22d79be
--- /dev/null
+++ b/drivers/video/msm/mdp_dma_s.c
@@ -0,0 +1,166 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+
+static void mdp_dma_s_update_lcd(struct msm_fb_data_type *mfd)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	int mddi_dest = FALSE;
+	uint32 outBpp = iBuf->bpp;
+	uint32 dma_s_cfg_reg;
+	uint8 *src;
+	struct msm_fb_panel_data *pdata =
+	    (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+
+	dma_s_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB |
+	    DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS;
+
+	if (mfd->fb_imgType == MDP_BGR_565)
+		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
+	else
+		dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
+
+	if (outBpp == 4)
+		dma_s_cfg_reg |= DMA_IBUF_C3ALPHA_EN;
+
+	if (outBpp == 2)
+		dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
+
+	if (mfd->panel_info.pdest != DISPLAY_2) {
+		printk(KERN_ERR "error: non-secondary type through dma_s!\n");
+		return;
+	}
+
+	if (mfd->panel_info.type == MDDI_PANEL ||
+		mfd->panel_info.type == EXT_MDDI_PANEL) {
+		dma_s_cfg_reg |= DMA_OUT_SEL_MDDI;
+		mddi_dest = TRUE;
+	} else {
+		dma_s_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY;
+		outp32(MDP_EBI2_LCD1, mfd->data_port_phys);
+	}
+
+	src = (uint8 *) iBuf->buf;
+	/* starting input address */
+	src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * outBpp;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	/* PIXELSIZE */
+	if (mfd->panel_info.type == MDDI_PANEL) {
+		MDP_OUTP(MDP_BASE + 0xa0004,
+			(iBuf->dma_h << 16 | iBuf->dma_w));
+		MDP_OUTP(MDP_BASE + 0xa0008, src);	/* ibuf address */
+		MDP_OUTP(MDP_BASE + 0xa000c,
+			iBuf->ibuf_width * outBpp);/* ystride */
+	} else {
+		MDP_OUTP(MDP_BASE + 0xb0004,
+			(iBuf->dma_h << 16 | iBuf->dma_w));
+		MDP_OUTP(MDP_BASE + 0xb0008, src);	/* ibuf address */
+		MDP_OUTP(MDP_BASE + 0xb000c,
+			iBuf->ibuf_width * outBpp);/* ystride */
+	}
+
+	if (mfd->panel_info.bpp == 18) {
+		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
+		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	} else {
+		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
+		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+	}
+
+	if (mddi_dest) {
+		if (mfd->panel_info.type == MDDI_PANEL) {
+			MDP_OUTP(MDP_BASE + 0xa0010,
+				(iBuf->dma_y << 16) | iBuf->dma_x);
+			MDP_OUTP(MDP_BASE + 0x00090, 1);
+		} else {
+			MDP_OUTP(MDP_BASE + 0xb0010,
+				(iBuf->dma_y << 16) | iBuf->dma_x);
+			MDP_OUTP(MDP_BASE + 0x00090, 2);
+		}
+		MDP_OUTP(MDP_BASE + 0x00094,
+				(MDDI_VDO_PACKET_DESC << 16) |
+				mfd->panel_info.mddi.vdopkt);
+	} else {
+		/* setting LCDC write window */
+		pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w,
+				iBuf->dma_h);
+	}
+
+	if (mfd->panel_info.type == MDDI_PANEL)
+		MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
+	else
+		MDP_OUTP(MDP_BASE + 0xb0000, dma_s_cfg_reg);
+
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	if (mfd->panel_info.type == MDDI_PANEL)
+		mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
+	else
+		mdp_pipe_kickoff(MDP_DMA_E_TERM, mfd);
+
+}
+
+void mdp_dma_s_update(struct msm_fb_data_type *mfd)
+{
+	down(&mfd->dma->mutex);
+	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
+		down(&mfd->sem);
+		mdp_enable_irq(MDP_DMA_S_TERM);
+		if (mfd->panel_info.type == MDDI_PANEL)
+			mdp_enable_irq(MDP_DMA_S_TERM);
+		else
+			mdp_enable_irq(MDP_DMA_E_TERM);
+		mfd->dma->busy = TRUE;
+		INIT_COMPLETION(mfd->dma->comp);
+		mfd->ibuf_flushed = TRUE;
+		mdp_dma_s_update_lcd(mfd);
+		up(&mfd->sem);
+
+		/* wait until DMA finishes the current job */
+		wait_for_completion_killable(&mfd->dma->comp);
+		if (mfd->panel_info.type == MDDI_PANEL)
+			mdp_disable_irq(MDP_DMA_S_TERM);
+		else
+			mdp_disable_irq(MDP_DMA_E_TERM);
+
+	/* signal if pan function is waiting for the update completion */
+		if (mfd->pan_waiting) {
+			mfd->pan_waiting = FALSE;
+			complete(&mfd->pan_comp);
+		}
+	}
+	up(&mfd->dma->mutex);
+}
diff --git a/drivers/video/msm/mdp_dma_tv.c b/drivers/video/msm/mdp_dma_tv.c
new file mode 100644
index 0000000..66d9422
--- /dev/null
+++ b/drivers/video/msm/mdp_dma_tv.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+
+#include <linux/fb.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+
+extern spinlock_t mdp_spin_lock;
+extern uint32 mdp_intr_mask;
+
+int mdp_dma3_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct fb_info *fbi;
+	uint8 *buf;
+	int bpp;
+	int ret = 0;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+
+	/* starting address[31..8] of Video frame buffer is CS0 */
+	MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3);
+
+	mdp_pipe_ctrl(MDP_DMA3_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	MDP_OUTP(MDP_BASE + 0xC0004, 0x4c60674); /* flicker filter enabled */
+	MDP_OUTP(MDP_BASE + 0xC0010, 0x20);	/* sobel treshold */
+
+	MDP_OUTP(MDP_BASE + 0xC0018, 0xeb0010);	/* Y  Max, Y  min */
+	MDP_OUTP(MDP_BASE + 0xC001C, 0xf00010);	/* Cb Max, Cb min */
+	MDP_OUTP(MDP_BASE + 0xC0020, 0xf00010);	/* Cb Max, Cb min */
+
+	MDP_OUTP(MDP_BASE + 0xC000C, 0x67686970); /* add a few chars for CC */
+	MDP_OUTP(MDP_BASE + 0xC0000, 0x1);	/* MDP tv out enable */
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	ret = panel_next_on(pdev);
+
+	return ret;
+}
+
+int mdp_dma3_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	ret = panel_next_off(pdev);
+	if (ret)
+		return ret;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	MDP_OUTP(MDP_BASE + 0xC0000, 0x0);
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	mdp_pipe_ctrl(MDP_DMA3_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	/* delay to make sure the last frame finishes */
+	msleep(16);
+
+	return ret;
+}
+
+void mdp_dma3_update(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi = mfd->fbi;
+	uint8 *buf;
+	int bpp;
+	unsigned long flag;
+
+	if (!mfd->panel_power_on)
+		return;
+
+	/* no need to power on cmd block since dma3 is running */
+	bpp = fbi->var.bits_per_pixel / 8;
+	buf = (uint8 *) fbi->fix.smem_start;
+	buf += fbi->var.xoffset * bpp +
+		fbi->var.yoffset * fbi->fix.line_length;
+	MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3);
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mdp_enable_irq(MDP_DMA3_TERM);
+	INIT_COMPLETION(mfd->dma->comp);
+	mfd->dma->waiting = TRUE;
+
+	outp32(MDP_INTR_CLEAR, TV_OUT_DMA3_START);
+	mdp_intr_mask |= TV_OUT_DMA3_START;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	wait_for_completion_killable(&mfd->dma->comp);
+	mdp_disable_irq(MDP_DMA3_TERM);
+}
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h
index d804774..f35a757 100644
--- a/drivers/video/msm/mdp_hw.h
+++ b/drivers/video/msm/mdp_hw.h
@@ -15,20 +15,61 @@
 #ifndef _MDP_HW_H_
 #define _MDP_HW_H_
 
+#include <linux/platform_device.h>
+#include <linux/wait.h>
 #include <mach/msm_iomap.h>
 #include <mach/msm_fb.h>
 
+typedef void (*mdp_dma_start_func_t)(void *private_data, uint32_t addr,
+				     uint32_t stride, uint32_t width,
+				     uint32_t height, uint32_t x, uint32_t y);
+
+struct mdp_out_interface {
+	uint32_t		registered:1;
+	void			*priv;
+
+	/* If the interface client wants to get DMA_DONE events */
+	uint32_t		dma_mask;
+	mdp_dma_start_func_t	dma_start;
+
+	struct msmfb_callback	*dma_cb;
+	wait_queue_head_t	dma_waitqueue;
+
+	/* If the interface client wants to be notified of non-DMA irqs,
+	 * e.g. LCDC/TV-out frame start */
+	uint32_t		irq_mask;
+	struct msmfb_callback	*irq_cb;
+};
+
 struct mdp_info {
+	spinlock_t lock;
 	struct mdp_device mdp_dev;
 	char * __iomem base;
 	int irq;
+	struct clk *clk;
+	struct clk *ebi1_clk;
+	struct mdp_out_interface out_if[MSM_MDP_NUM_INTERFACES];
+	int format;
+	int pack_pattern;
+	bool dma_config_dirty;
 };
+
+extern int mdp_out_if_register(struct mdp_device *mdp_dev, int interface,
+			       void *private_data, uint32_t dma_mask,
+			       mdp_dma_start_func_t dma_start);
+
+extern int mdp_out_if_req_irq(struct mdp_device *mdp_dev, int interface,
+			      uint32_t mask, struct msmfb_callback *cb);
+
 struct mdp_blit_req;
 struct mdp_device;
 int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
 		 struct file *src_file, unsigned long src_start,
 		 unsigned long src_len, struct file *dst_file,
 		 unsigned long dst_start, unsigned long dst_len);
+
+void mdp_ppp_dump_debug(const struct mdp_info *mdp);
+
 #define mdp_writel(mdp, value, offset) writel(value, mdp->base + offset)
 #define mdp_readl(mdp, offset) readl(mdp->base + offset)
 
@@ -48,10 +89,18 @@
 #define MDP_DISPLAY_STATUS               (0x00038)
 #define MDP_EBI2_LCD0                    (0x0003c)
 #define MDP_EBI2_LCD1                    (0x00040)
+#define MDP_EBI2_PORTMAP_MODE            (0x0005c)
+
+#ifndef CONFIG_MSM_MDP31
 #define MDP_DISPLAY0_ADDR                (0x00054)
 #define MDP_DISPLAY1_ADDR                (0x00058)
-#define MDP_EBI2_PORTMAP_MODE            (0x0005c)
-#define MDP_MODE                         (0x00060)
+#define MDP_PPP_CMD_MODE                 (0x00060)
+#else
+#define MDP_DISPLAY0_ADDR                (0x10000)
+#define MDP_DISPLAY1_ADDR                (0x10004)
+#define MDP_PPP_CMD_MODE                 (0x10060)
+#endif
+
 #define MDP_TV_OUT_STATUS                (0x00064)
 #define MDP_HW_VERSION                   (0x00070)
 #define MDP_SW_RESET                     (0x00074)
@@ -61,6 +110,8 @@
 #define MDP_SECONDARY_VSYNC_OUT_CTRL     (0x00084)
 #define MDP_EXTERNAL_VSYNC_OUT_CTRL      (0x00088)
 #define MDP_VSYNC_CTRL                   (0x0008c)
+#define MDP_MDDI_PARAM_WR_SEL            (0x00090)
+#define MDP_MDDI_PARAM                   (0x00094)
 #define MDP_CGC_EN                       (0x00100)
 #define MDP_CMD_STATUS                   (0x10008)
 #define MDP_PROFILE_EN                   (0x10010)
@@ -107,6 +158,7 @@
 #define MDP_FULL_BYPASS_WORD35           (0x1018c)
 #define MDP_FULL_BYPASS_WORD37           (0x10194)
 #define MDP_FULL_BYPASS_WORD39           (0x1019c)
+#define MDP_PPP_OUT_XY                   (0x1019c)
 #define MDP_FULL_BYPASS_WORD40           (0x101a0)
 #define MDP_FULL_BYPASS_WORD41           (0x101a4)
 #define MDP_FULL_BYPASS_WORD43           (0x101ac)
@@ -129,11 +181,27 @@
 #define MDP_FULL_BYPASS_WORD61           (0x101f4)
 #define MDP_FULL_BYPASS_WORD62           (0x101f8)
 #define MDP_FULL_BYPASS_WORD63           (0x101fc)
+
+#ifdef CONFIG_MSM_MDP31
+#define MDP_PPP_SRC_XY                   (0x10200)
+#define MDP_PPP_BG_XY                    (0x10204)
+#define MDP_PPP_SRC_IMAGE_SIZE           (0x10208)
+#define MDP_PPP_BG_IMAGE_SIZE            (0x1020c)
+#define MDP_PPP_SCALE_CONFIG             (0x10230)
+#define MDP_PPP_CSC_CONFIG               (0x10240)
+#define MDP_PPP_BLEND_BG_ALPHA_SEL       (0x70010)
+#endif
+
 #define MDP_TFETCH_TEST_MODE             (0x20004)
 #define MDP_TFETCH_STATUS                (0x20008)
 #define MDP_TFETCH_TILE_COUNT            (0x20010)
 #define MDP_TFETCH_FETCH_COUNT           (0x20014)
 #define MDP_TFETCH_CONSTANT_COLOR        (0x20040)
+#define MDP_BGTFETCH_TEST_MODE           (0x28004)
+#define MDP_BGTFETCH_STATUS              (0x28008)
+#define MDP_BGTFETCH_TILE_COUNT          (0x28010)
+#define MDP_BGTFETCH_FETCH_COUNT         (0x28014)
+#define MDP_BGTFETCH_CONSTANT_COLOR      (0x28040)
 #define MDP_CSC_BYPASS                   (0x40004)
 #define MDP_SCALE_COEFF_LSB              (0x5fffc)
 #define MDP_TV_OUT_CTL                   (0xc0000)
@@ -158,55 +226,49 @@
 #define MDP_TEST_MISR_CURR_VAL_DCLK      (0xd020c)
 #define MDP_TEST_CAPTURED_DCLK           (0xd0210)
 #define MDP_TEST_MISR_CAPT_VAL_DCLK      (0xd0214)
-#define MDP_LCDC_CTL                     (0xe0000)
+
+#define MDP_DMA_P_START                  (0x00044)
+#define MDP_DMA_P_CONFIG                 (0x90000)
+#define MDP_DMA_P_SIZE                   (0x90004)
+#define MDP_DMA_P_IBUF_ADDR              (0x90008)
+#define MDP_DMA_P_IBUF_Y_STRIDE          (0x9000c)
+#define MDP_DMA_P_OUT_XY                 (0x90010)
+#define MDP_DMA_P_COLOR_CORRECT_CONFIG   (0x90070)
+
+#define MDP_LCDC_EN                      (0xe0000)
 #define MDP_LCDC_HSYNC_CTL               (0xe0004)
-#define MDP_LCDC_VSYNC_CTL               (0xe0008)
-#define MDP_LCDC_ACTIVE_HCTL             (0xe000c)
-#define MDP_LCDC_ACTIVE_VCTL             (0xe0010)
-#define MDP_LCDC_BORDER_CLR              (0xe0014)
-#define MDP_LCDC_H_BLANK                 (0xe0018)
-#define MDP_LCDC_V_BLANK                 (0xe001c)
-#define MDP_LCDC_UNDERFLOW_CLR           (0xe0020)
-#define MDP_LCDC_HSYNC_SKEW              (0xe0024)
-#define MDP_LCDC_TEST_CTL                (0xe0028)
-#define MDP_LCDC_LINE_IRQ                (0xe002c)
-#define MDP_LCDC_CTL_POLARITY            (0xe0030)
-#define MDP_LCDC_DMA_CONFIG              (0xe1000)
-#define MDP_LCDC_DMA_SIZE                (0xe1004)
-#define MDP_LCDC_DMA_IBUF_ADDR           (0xe1008)
-#define MDP_LCDC_DMA_IBUF_Y_STRIDE       (0xe100c)
+#define MDP_LCDC_VSYNC_PERIOD            (0xe0008)
+#define MDP_LCDC_VSYNC_PULSE_WIDTH       (0xe000c)
+#define MDP_LCDC_DISPLAY_HCTL            (0xe0010)
+#define MDP_LCDC_DISPLAY_V_START         (0xe0014)
+#define MDP_LCDC_DISPLAY_V_END           (0xe0018)
+#define MDP_LCDC_ACTIVE_HCTL             (0xe001c)
+#define MDP_LCDC_ACTIVE_V_START          (0xe0020)
+#define MDP_LCDC_ACTIVE_V_END            (0xe0024)
+#define MDP_LCDC_BORDER_CLR              (0xe0028)
+#define MDP_LCDC_UNDERFLOW_CTL           (0xe002c)
+#define MDP_LCDC_HSYNC_SKEW              (0xe0030)
+#define MDP_LCDC_TEST_CTL                (0xe0034)
+#define MDP_LCDC_CTL_POLARITY            (0xe0038)
 
+#define MDP_PPP_SCALE_STATUS             (0x50000)
+#define MDP_PPP_BLEND_STATUS             (0x70000)
 
-#define MDP_DMA2_TERM 0x1
-#define MDP_DMA3_TERM 0x2
-#define MDP_PPP_TERM 0x3
+/* MDP_SW_RESET */
+#define MDP_PPP_SW_RESET                (1<<4)
 
 /* MDP_INTR_ENABLE */
-#define DL0_ROI_DONE           (1<<0)
-#define DL1_ROI_DONE           (1<<1)
-#define DL0_DMA2_TERM_DONE     (1<<2)
-#define DL1_DMA2_TERM_DONE     (1<<3)
-#define DL0_PPP_TERM_DONE      (1<<4)
-#define DL1_PPP_TERM_DONE      (1<<5)
-#define TV_OUT_DMA3_DONE       (1<<6)
-#define TV_ENC_UNDERRUN        (1<<7)
-#define DL0_FETCH_DONE         (1<<11)
-#define DL1_FETCH_DONE         (1<<12)
+#define DL0_ROI_DONE			(1<<0)
+#define TV_OUT_DMA3_DONE		(1<<6)
+#define TV_ENC_UNDERRUN			(1<<7)
 
-#define MDP_PPP_BUSY_STATUS (DL0_ROI_DONE| \
-			   DL1_ROI_DONE| \
-			   DL0_PPP_TERM_DONE| \
-			   DL1_PPP_TERM_DONE)
-
-#define MDP_ANY_INTR_MASK (DL0_ROI_DONE| \
-			   DL1_ROI_DONE| \
-			   DL0_DMA2_TERM_DONE| \
-			   DL1_DMA2_TERM_DONE| \
-			   DL0_PPP_TERM_DONE| \
-			   DL1_PPP_TERM_DONE| \
-			   DL0_FETCH_DONE| \
-			   DL1_FETCH_DONE| \
-			   TV_ENC_UNDERRUN)
+#ifdef CONFIG_MSM_MDP22
+#define MDP_DMA_P_DONE			(1 << 2)
+#else /* CONFIG_MSM_MDP31 */
+#define MDP_DMA_P_DONE			(1 << 14)
+#define MDP_LCDC_UNDERFLOW		(1 << 16)
+#define MDP_LCDC_FRAME_START		(1 << 15)
+#endif
 
 #define MDP_TOP_LUMA       16
 #define MDP_TOP_CHROMA     0
@@ -316,7 +378,12 @@
 #define PPP_OP_SCALE_X_ON (1<<0)
 #define PPP_OP_SCALE_Y_ON (1<<1)
 
+#ifndef CONFIG_MSM_MDP31
 #define PPP_OP_CONVERT_RGB2YCBCR 0
+#else
+#define PPP_OP_CONVERT_RGB2YCBCR (1<<30)
+#endif
+
 #define PPP_OP_CONVERT_YCBCR2RGB (1<<2)
 #define PPP_OP_CONVERT_ON (1<<3)
 
@@ -372,6 +439,13 @@
 #define PPP_OP_BG_CHROMA_SITE_COSITE 0
 #define PPP_OP_BG_CHROMA_SITE_OFFSITE (1<<27)
 
+#define PPP_BLEND_BG_USE_ALPHA_SEL      (1 << 0)
+#define PPP_BLEND_BG_ALPHA_REVERSE      (1 << 3)
+#define PPP_BLEND_BG_SRCPIXEL_ALPHA     (0 << 1)
+#define PPP_BLEND_BG_DSTPIXEL_ALPHA     (1 << 1)
+#define PPP_BLEND_BG_CONSTANT_ALPHA     (2 << 1)
+#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24)
+
 /* MDP_PPP_DESTINATION_CONFIG / MDP_FULL_BYPASS_WORD20 */
 #define PPP_DST_C0G_8BIT ((1<<0)|(1<<1))
 #define PPP_DST_C1B_8BIT ((1<<3)|(1<<2))
@@ -589,20 +663,71 @@
 #define PPP_ADDR_BG_CFG			MDP_FULL_BYPASS_WORD53
 #define PPP_ADDR_BG_PACK_PATTERN	MDP_FULL_BYPASS_WORD54
 
+/* color conversion matrix configuration registers */
+/* pfmv is mv1, prmv is mv2 */
+#define MDP_CSC_PFMVn(n)		(0x40400 + (4 * (n)))
+#define MDP_CSC_PRMVn(n)		(0x40440 + (4 * (n)))
+
+#ifdef CONFIG_MSM_MDP31
+#define MDP_PPP_CSC_PRE_BV1n(n)		(0x40500 + (4 * (n)))
+#define MDP_PPP_CSC_PRE_BV2n(n)		(0x40540 + (4 * (n)))
+#define MDP_PPP_CSC_POST_BV1n(n)	(0x40580 + (4 * (n)))
+#define MDP_PPP_CSC_POST_BV2n(n)	(0x405c0 + (4 * (n)))
+
+#define MDP_PPP_CSC_PRE_LV1n(n)		(0x40600 + (4 * (n)))
+#define MDP_PPP_CSC_PRE_LV2n(n)		(0x40640 + (4 * (n)))
+#define MDP_PPP_CSC_POST_LV1n(n)	(0x40680 + (4 * (n)))
+#define MDP_PPP_CSC_POST_LV2n(n)	(0x406c0 + (4 * (n)))
+
+#define MDP_PPP_SCALE_COEFF_D0_SET	(0)
+#define MDP_PPP_SCALE_COEFF_D1_SET	(1)
+#define MDP_PPP_SCALE_COEFF_D2_SET	(2)
+#define MDP_PPP_SCALE_COEFF_U1_SET	(3)
+#define MDP_PPP_SCALE_COEFF_LSBn(n)	(0x50400 + (8 * (n)))
+#define MDP_PPP_SCALE_COEFF_MSBn(n)	(0x50404 + (8 * (n)))
+
+#define MDP_PPP_DEINT_COEFFn(n)		(0x30010 + (4 * (n)))
+
+#define MDP_PPP_SCALER_FIR		(0)
+#define MDP_PPP_SCALER_MN		(1)
+
+#else /* !defined(CONFIG_MSM_MDP31) */
+
+#define MDP_CSC_PBVn(n)			(0x40500 + (4 * (n)))
+#define MDP_CSC_SBVn(n)			(0x40540 + (4 * (n)))
+#define MDP_CSC_PLVn(n)			(0x40580 + (4 * (n)))
+#define MDP_CSC_SLVn(n)			(0x405c0 + (4 * (n)))
+
+#endif
+
+
 /* MDP_DMA_CONFIG / MDP_FULL_BYPASS_WORD32 */
-#define DMA_DSTC0G_6BITS (1<<1)
-#define DMA_DSTC1B_6BITS (1<<3)
-#define DMA_DSTC2R_6BITS (1<<5)
 #define DMA_DSTC0G_5BITS (1<<0)
 #define DMA_DSTC1B_5BITS (1<<2)
 #define DMA_DSTC2R_5BITS (1<<4)
 
+#define DMA_DSTC0G_6BITS (2<<0)
+#define DMA_DSTC1B_6BITS (2<<2)
+#define DMA_DSTC2R_6BITS (2<<4)
+
+#define DMA_DSTC0G_8BITS (3<<0)
+#define DMA_DSTC1B_8BITS (3<<2)
+#define DMA_DSTC2R_8BITS (3<<4)
+
+#define DMA_DST_BITS_MASK 0x3F
+
 #define DMA_PACK_TIGHT (1<<6)
 #define DMA_PACK_LOOSE 0
 #define DMA_PACK_ALIGN_LSB 0
 #define DMA_PACK_ALIGN_MSB (1<<7)
+#define DMA_PACK_PATTERN_MASK (0x3f<<8)
 #define DMA_PACK_PATTERN_RGB \
 	(MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 2)<<8)
+#define DMA_PACK_PATTERN_BGR \
+	(MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 2)<<8)
+
+
+#ifdef CONFIG_MSM_MDP22
 
 #define DMA_OUT_SEL_AHB  0
 #define DMA_OUT_SEL_MDDI (1<<14)
@@ -610,16 +735,32 @@
 #define DMA_AHBM_LCD_SEL_SECONDARY (1<<15)
 #define DMA_IBUF_C3ALPHA_EN (1<<16)
 #define DMA_DITHER_EN (1<<17)
-
 #define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY 0
 #define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY (1<<18)
 #define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL (1<<19)
-
 #define DMA_IBUF_FORMAT_RGB565 (1<<20)
 #define DMA_IBUF_FORMAT_RGB888_OR_ARGB8888 0
-
+#define DMA_IBUF_FORMAT_MASK (1 << 20)
 #define DMA_IBUF_NONCONTIGUOUS (1<<21)
 
+#else /* CONFIG_MSM_MDP31 */
+
+#define DMA_OUT_SEL_AHB				(0 << 19)
+#define DMA_OUT_SEL_MDDI			(1 << 19)
+#define DMA_OUT_SEL_LCDC			(2 << 19)
+#define DMA_OUT_SEL_LCDC_MDDI			(3 << 19)
+#define DMA_DITHER_EN				(1 << 24)
+#define DMA_IBUF_FORMAT_RGB888			(0 << 25)
+#define DMA_IBUF_FORMAT_RGB565			(1 << 25)
+#define DMA_IBUF_FORMAT_XRGB8888		(2 << 25)
+#define DMA_IBUF_FORMAT_MASK			(3 << 25)
+#define DMA_IBUF_NONCONTIGUOUS			(0)
+
+#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY		(0)
+#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY	(0)
+#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL	(0)
+#endif
+
 /* MDDI REGISTER ? */
 #define MDDI_VDO_PACKET_DESC  0x5666
 #define MDDI_VDO_PACKET_PRIM  0xC3
diff --git a/drivers/video/msm/mdp_hw40.c b/drivers/video/msm/mdp_hw40.c
new file mode 100644
index 0000000..d36125e
--- /dev/null
+++ b/drivers/video/msm/mdp_hw40.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * Based on code from Code Aurora Forum.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "mdp_hw.h"
+
+static void mdp_dma_to_mddi(void *priv, uint32_t addr, uint32_t stride,
+			    uint32_t width, uint32_t height, uint32_t x,
+			    uint32_t y)
+{
+	struct mdp_info *mdp = priv;
+	uint32_t dma2_cfg;
+	uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
+
+	dma2_cfg = DMA_PACK_TIGHT |
+		DMA_PACK_ALIGN_LSB;
+
+	dma2_cfg |= mdp->dma_format;
+	dma2_cfg |= mdp->dma_pack_pattern;
+	dma2_cfg |= DMA_DITHER_EN;
+
+	/* 666 18BPP */
+	dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+
+	/* setup size, address, and stride */
+	mdp_writel(mdp, (height << 16) | (width), MDP_DMA_P_SIZE);
+	mdp_writel(mdp, addr, MDP_DMA_P_IBUF_ADDR);
+	mdp_writel(mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE);
+
+	/* set y & x offset and MDDI transaction parameters */
+	mdp_writel(mdp, (y << 16) | (x), MDP_DMA_P_OUT_XY);
+	mdp_writel(mdp, ld_param, MDP_MDDI_PARAM_WR_SEL);
+	mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
+		   MDP_MDDI_PARAM);
+
+	mdp_writel(mdp, 0x1, MDP_MDDI_DATA_XFR);
+	mdp_writel(mdp, dma2_cfg, MDP_DMA_P_CONFIG);
+	mdp_writel(mdp, 0, MDP_DMA_P_START);
+}
+
+int mdp_hw_init(struct mdp_info *mdp)
+{
+	int ret;
+
+	ret = mdp_out_if_register(&mdp->mdp_dev, MSM_MDDI_PMDH_INTERFACE, mdp,
+				  MDP_DMA_P_DONE, mdp_dma_to_mddi);
+	if (ret)
+		return ret;
+
+	mdp_writel(mdp, 0, MDP_INTR_ENABLE);
+	mdp_writel(mdp, 0, MDP_DMA_P_HIST_INTR_ENABLE);
+
+	/* XXX: why set this? QCT says it should be > mdp_pclk,
+	 * but they never set the clkrate of pclk */
+	mdp_set_core_clk(4);
+	pr_info("%s: mdp_clk=%lu\n", __func__, clk_get_rate(mdp->clk));
+
+	/* TODO: Configure the VG/RGB pipes fetch data */
+
+	/* this should work for any mdp_clk freq. 
+	 * TODO: use different value for mdp_clk freqs >= 90Mhz */
+	mdp_writel(mdp, 0x27, MDP_DMA_P_FETCH_CFG); /* 8 bytes-burst x 8 req */
+
+	mdp_writel(mdp, 0x3, MDP_EBI2_PORTMAP_MODE);
+
+	/* 3 pending requests */
+	mdp_writel(mdp, 0x02222, MDP_MAX_RD_PENDING_CMD_CONFIG);
+
+	/* no overlay processing, sw controls everything */
+	mdp_writel(mdp, 0, MDP_LAYERMIXER_IN_CFG);
+	mdp_writel(mdp, 1 << 3, MDP_OVERLAYPROC0_CFG);
+	mdp_writel(mdp, 1 << 3, MDP_OVERLAYPROC1_CFG);
+
+	/* XXX: HACK! hardcode to do mddi on primary */
+	mdp_writel(mdp, 0x2, MDP_DISP_INTF_SEL);
+	return 0;
+}
+
diff --git a/drivers/video/msm/mdp_hw_init.c b/drivers/video/msm/mdp_hw_init.c
new file mode 100644
index 0000000..8f8b4d3
--- /dev/null
+++ b/drivers/video/msm/mdp_hw_init.c
@@ -0,0 +1,716 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mdp.h"
+
+/* mdp primary csc limit vector */
+uint32 mdp_plv[] = { 0x10, 0xeb, 0x10, 0xf0 };
+
+/* Color Coefficient matrix for YUV -> RGB */
+struct mdp_ccs mdp_ccs_yuv2rgb = {
+	MDP_CCS_YUV2RGB,
+	{
+		0x254,
+		0x000,
+		0x331,
+		0x254,
+		0xff38,
+		0xfe61,
+		0x254,
+		0x409,
+		0x000,
+	},
+	{
+#ifdef CONFIG_FB_MSM_MDP31
+		0x1f0,
+		0x180,
+		0x180
+#else
+		0x10,
+		0x80,
+		0x80
+#endif
+	}
+};
+
+/* Color Coefficient matrix for RGB -> YUV */
+struct mdp_ccs mdp_ccs_rgb2yuv = {
+	MDP_CCS_RGB2YUV,
+	{
+		0x83,
+		0x102,
+		0x32,
+		0xffb5,
+		0xff6c,
+		0xe1,
+		0xe1,
+		0xff45,
+		0xffdc,
+	},
+#ifdef CONFIG_FB_MSM_MDP31
+	{
+		0x10,
+		0x80,
+		0x80
+	}
+#endif
+};
+
+static void mdp_load_lut_param(void)
+{
+	outpdw(MDP_BASE + 0x40800, 0x0);
+	outpdw(MDP_BASE + 0x40804, 0x151515);
+	outpdw(MDP_BASE + 0x40808, 0x1d1d1d);
+	outpdw(MDP_BASE + 0x4080c, 0x232323);
+	outpdw(MDP_BASE + 0x40810, 0x272727);
+	outpdw(MDP_BASE + 0x40814, 0x2b2b2b);
+	outpdw(MDP_BASE + 0x40818, 0x2f2f2f);
+	outpdw(MDP_BASE + 0x4081c, 0x333333);
+	outpdw(MDP_BASE + 0x40820, 0x363636);
+	outpdw(MDP_BASE + 0x40824, 0x393939);
+	outpdw(MDP_BASE + 0x40828, 0x3b3b3b);
+	outpdw(MDP_BASE + 0x4082c, 0x3e3e3e);
+	outpdw(MDP_BASE + 0x40830, 0x404040);
+	outpdw(MDP_BASE + 0x40834, 0x434343);
+	outpdw(MDP_BASE + 0x40838, 0x454545);
+	outpdw(MDP_BASE + 0x4083c, 0x474747);
+	outpdw(MDP_BASE + 0x40840, 0x494949);
+	outpdw(MDP_BASE + 0x40844, 0x4b4b4b);
+	outpdw(MDP_BASE + 0x40848, 0x4d4d4d);
+	outpdw(MDP_BASE + 0x4084c, 0x4f4f4f);
+	outpdw(MDP_BASE + 0x40850, 0x515151);
+	outpdw(MDP_BASE + 0x40854, 0x535353);
+	outpdw(MDP_BASE + 0x40858, 0x555555);
+	outpdw(MDP_BASE + 0x4085c, 0x565656);
+	outpdw(MDP_BASE + 0x40860, 0x585858);
+	outpdw(MDP_BASE + 0x40864, 0x5a5a5a);
+	outpdw(MDP_BASE + 0x40868, 0x5b5b5b);
+	outpdw(MDP_BASE + 0x4086c, 0x5d5d5d);
+	outpdw(MDP_BASE + 0x40870, 0x5e5e5e);
+	outpdw(MDP_BASE + 0x40874, 0x606060);
+	outpdw(MDP_BASE + 0x40878, 0x616161);
+	outpdw(MDP_BASE + 0x4087c, 0x636363);
+	outpdw(MDP_BASE + 0x40880, 0x646464);
+	outpdw(MDP_BASE + 0x40884, 0x666666);
+	outpdw(MDP_BASE + 0x40888, 0x676767);
+	outpdw(MDP_BASE + 0x4088c, 0x686868);
+	outpdw(MDP_BASE + 0x40890, 0x6a6a6a);
+	outpdw(MDP_BASE + 0x40894, 0x6b6b6b);
+	outpdw(MDP_BASE + 0x40898, 0x6c6c6c);
+	outpdw(MDP_BASE + 0x4089c, 0x6e6e6e);
+	outpdw(MDP_BASE + 0x408a0, 0x6f6f6f);
+	outpdw(MDP_BASE + 0x408a4, 0x707070);
+	outpdw(MDP_BASE + 0x408a8, 0x717171);
+	outpdw(MDP_BASE + 0x408ac, 0x727272);
+	outpdw(MDP_BASE + 0x408b0, 0x747474);
+	outpdw(MDP_BASE + 0x408b4, 0x757575);
+	outpdw(MDP_BASE + 0x408b8, 0x767676);
+	outpdw(MDP_BASE + 0x408bc, 0x777777);
+	outpdw(MDP_BASE + 0x408c0, 0x787878);
+	outpdw(MDP_BASE + 0x408c4, 0x797979);
+	outpdw(MDP_BASE + 0x408c8, 0x7a7a7a);
+	outpdw(MDP_BASE + 0x408cc, 0x7c7c7c);
+	outpdw(MDP_BASE + 0x408d0, 0x7d7d7d);
+	outpdw(MDP_BASE + 0x408d4, 0x7e7e7e);
+	outpdw(MDP_BASE + 0x408d8, 0x7f7f7f);
+	outpdw(MDP_BASE + 0x408dc, 0x808080);
+	outpdw(MDP_BASE + 0x408e0, 0x818181);
+	outpdw(MDP_BASE + 0x408e4, 0x828282);
+	outpdw(MDP_BASE + 0x408e8, 0x838383);
+	outpdw(MDP_BASE + 0x408ec, 0x848484);
+	outpdw(MDP_BASE + 0x408f0, 0x858585);
+	outpdw(MDP_BASE + 0x408f4, 0x868686);
+	outpdw(MDP_BASE + 0x408f8, 0x878787);
+	outpdw(MDP_BASE + 0x408fc, 0x888888);
+	outpdw(MDP_BASE + 0x40900, 0x898989);
+	outpdw(MDP_BASE + 0x40904, 0x8a8a8a);
+	outpdw(MDP_BASE + 0x40908, 0x8b8b8b);
+	outpdw(MDP_BASE + 0x4090c, 0x8c8c8c);
+	outpdw(MDP_BASE + 0x40910, 0x8d8d8d);
+	outpdw(MDP_BASE + 0x40914, 0x8e8e8e);
+	outpdw(MDP_BASE + 0x40918, 0x8f8f8f);
+	outpdw(MDP_BASE + 0x4091c, 0x8f8f8f);
+	outpdw(MDP_BASE + 0x40920, 0x909090);
+	outpdw(MDP_BASE + 0x40924, 0x919191);
+	outpdw(MDP_BASE + 0x40928, 0x929292);
+	outpdw(MDP_BASE + 0x4092c, 0x939393);
+	outpdw(MDP_BASE + 0x40930, 0x949494);
+	outpdw(MDP_BASE + 0x40934, 0x959595);
+	outpdw(MDP_BASE + 0x40938, 0x969696);
+	outpdw(MDP_BASE + 0x4093c, 0x969696);
+	outpdw(MDP_BASE + 0x40940, 0x979797);
+	outpdw(MDP_BASE + 0x40944, 0x989898);
+	outpdw(MDP_BASE + 0x40948, 0x999999);
+	outpdw(MDP_BASE + 0x4094c, 0x9a9a9a);
+	outpdw(MDP_BASE + 0x40950, 0x9b9b9b);
+	outpdw(MDP_BASE + 0x40954, 0x9c9c9c);
+	outpdw(MDP_BASE + 0x40958, 0x9c9c9c);
+	outpdw(MDP_BASE + 0x4095c, 0x9d9d9d);
+	outpdw(MDP_BASE + 0x40960, 0x9e9e9e);
+	outpdw(MDP_BASE + 0x40964, 0x9f9f9f);
+	outpdw(MDP_BASE + 0x40968, 0xa0a0a0);
+	outpdw(MDP_BASE + 0x4096c, 0xa0a0a0);
+	outpdw(MDP_BASE + 0x40970, 0xa1a1a1);
+	outpdw(MDP_BASE + 0x40974, 0xa2a2a2);
+	outpdw(MDP_BASE + 0x40978, 0xa3a3a3);
+	outpdw(MDP_BASE + 0x4097c, 0xa4a4a4);
+	outpdw(MDP_BASE + 0x40980, 0xa4a4a4);
+	outpdw(MDP_BASE + 0x40984, 0xa5a5a5);
+	outpdw(MDP_BASE + 0x40988, 0xa6a6a6);
+	outpdw(MDP_BASE + 0x4098c, 0xa7a7a7);
+	outpdw(MDP_BASE + 0x40990, 0xa7a7a7);
+	outpdw(MDP_BASE + 0x40994, 0xa8a8a8);
+	outpdw(MDP_BASE + 0x40998, 0xa9a9a9);
+	outpdw(MDP_BASE + 0x4099c, 0xaaaaaa);
+	outpdw(MDP_BASE + 0x409a0, 0xaaaaaa);
+	outpdw(MDP_BASE + 0x409a4, 0xababab);
+	outpdw(MDP_BASE + 0x409a8, 0xacacac);
+	outpdw(MDP_BASE + 0x409ac, 0xadadad);
+	outpdw(MDP_BASE + 0x409b0, 0xadadad);
+	outpdw(MDP_BASE + 0x409b4, 0xaeaeae);
+	outpdw(MDP_BASE + 0x409b8, 0xafafaf);
+	outpdw(MDP_BASE + 0x409bc, 0xafafaf);
+	outpdw(MDP_BASE + 0x409c0, 0xb0b0b0);
+	outpdw(MDP_BASE + 0x409c4, 0xb1b1b1);
+	outpdw(MDP_BASE + 0x409c8, 0xb2b2b2);
+	outpdw(MDP_BASE + 0x409cc, 0xb2b2b2);
+	outpdw(MDP_BASE + 0x409d0, 0xb3b3b3);
+	outpdw(MDP_BASE + 0x409d4, 0xb4b4b4);
+	outpdw(MDP_BASE + 0x409d8, 0xb4b4b4);
+	outpdw(MDP_BASE + 0x409dc, 0xb5b5b5);
+	outpdw(MDP_BASE + 0x409e0, 0xb6b6b6);
+	outpdw(MDP_BASE + 0x409e4, 0xb6b6b6);
+	outpdw(MDP_BASE + 0x409e8, 0xb7b7b7);
+	outpdw(MDP_BASE + 0x409ec, 0xb8b8b8);
+	outpdw(MDP_BASE + 0x409f0, 0xb8b8b8);
+	outpdw(MDP_BASE + 0x409f4, 0xb9b9b9);
+	outpdw(MDP_BASE + 0x409f8, 0xbababa);
+	outpdw(MDP_BASE + 0x409fc, 0xbababa);
+	outpdw(MDP_BASE + 0x40a00, 0xbbbbbb);
+	outpdw(MDP_BASE + 0x40a04, 0xbcbcbc);
+	outpdw(MDP_BASE + 0x40a08, 0xbcbcbc);
+	outpdw(MDP_BASE + 0x40a0c, 0xbdbdbd);
+	outpdw(MDP_BASE + 0x40a10, 0xbebebe);
+	outpdw(MDP_BASE + 0x40a14, 0xbebebe);
+	outpdw(MDP_BASE + 0x40a18, 0xbfbfbf);
+	outpdw(MDP_BASE + 0x40a1c, 0xc0c0c0);
+	outpdw(MDP_BASE + 0x40a20, 0xc0c0c0);
+	outpdw(MDP_BASE + 0x40a24, 0xc1c1c1);
+	outpdw(MDP_BASE + 0x40a28, 0xc1c1c1);
+	outpdw(MDP_BASE + 0x40a2c, 0xc2c2c2);
+	outpdw(MDP_BASE + 0x40a30, 0xc3c3c3);
+	outpdw(MDP_BASE + 0x40a34, 0xc3c3c3);
+	outpdw(MDP_BASE + 0x40a38, 0xc4c4c4);
+	outpdw(MDP_BASE + 0x40a3c, 0xc5c5c5);
+	outpdw(MDP_BASE + 0x40a40, 0xc5c5c5);
+	outpdw(MDP_BASE + 0x40a44, 0xc6c6c6);
+	outpdw(MDP_BASE + 0x40a48, 0xc6c6c6);
+	outpdw(MDP_BASE + 0x40a4c, 0xc7c7c7);
+	outpdw(MDP_BASE + 0x40a50, 0xc8c8c8);
+	outpdw(MDP_BASE + 0x40a54, 0xc8c8c8);
+	outpdw(MDP_BASE + 0x40a58, 0xc9c9c9);
+	outpdw(MDP_BASE + 0x40a5c, 0xc9c9c9);
+	outpdw(MDP_BASE + 0x40a60, 0xcacaca);
+	outpdw(MDP_BASE + 0x40a64, 0xcbcbcb);
+	outpdw(MDP_BASE + 0x40a68, 0xcbcbcb);
+	outpdw(MDP_BASE + 0x40a6c, 0xcccccc);
+	outpdw(MDP_BASE + 0x40a70, 0xcccccc);
+	outpdw(MDP_BASE + 0x40a74, 0xcdcdcd);
+	outpdw(MDP_BASE + 0x40a78, 0xcecece);
+	outpdw(MDP_BASE + 0x40a7c, 0xcecece);
+	outpdw(MDP_BASE + 0x40a80, 0xcfcfcf);
+	outpdw(MDP_BASE + 0x40a84, 0xcfcfcf);
+	outpdw(MDP_BASE + 0x40a88, 0xd0d0d0);
+	outpdw(MDP_BASE + 0x40a8c, 0xd0d0d0);
+	outpdw(MDP_BASE + 0x40a90, 0xd1d1d1);
+	outpdw(MDP_BASE + 0x40a94, 0xd2d2d2);
+	outpdw(MDP_BASE + 0x40a98, 0xd2d2d2);
+	outpdw(MDP_BASE + 0x40a9c, 0xd3d3d3);
+	outpdw(MDP_BASE + 0x40aa0, 0xd3d3d3);
+	outpdw(MDP_BASE + 0x40aa4, 0xd4d4d4);
+	outpdw(MDP_BASE + 0x40aa8, 0xd4d4d4);
+	outpdw(MDP_BASE + 0x40aac, 0xd5d5d5);
+	outpdw(MDP_BASE + 0x40ab0, 0xd6d6d6);
+	outpdw(MDP_BASE + 0x40ab4, 0xd6d6d6);
+	outpdw(MDP_BASE + 0x40ab8, 0xd7d7d7);
+	outpdw(MDP_BASE + 0x40abc, 0xd7d7d7);
+	outpdw(MDP_BASE + 0x40ac0, 0xd8d8d8);
+	outpdw(MDP_BASE + 0x40ac4, 0xd8d8d8);
+	outpdw(MDP_BASE + 0x40ac8, 0xd9d9d9);
+	outpdw(MDP_BASE + 0x40acc, 0xd9d9d9);
+	outpdw(MDP_BASE + 0x40ad0, 0xdadada);
+	outpdw(MDP_BASE + 0x40ad4, 0xdbdbdb);
+	outpdw(MDP_BASE + 0x40ad8, 0xdbdbdb);
+	outpdw(MDP_BASE + 0x40adc, 0xdcdcdc);
+	outpdw(MDP_BASE + 0x40ae0, 0xdcdcdc);
+	outpdw(MDP_BASE + 0x40ae4, 0xdddddd);
+	outpdw(MDP_BASE + 0x40ae8, 0xdddddd);
+	outpdw(MDP_BASE + 0x40aec, 0xdedede);
+	outpdw(MDP_BASE + 0x40af0, 0xdedede);
+	outpdw(MDP_BASE + 0x40af4, 0xdfdfdf);
+	outpdw(MDP_BASE + 0x40af8, 0xdfdfdf);
+	outpdw(MDP_BASE + 0x40afc, 0xe0e0e0);
+	outpdw(MDP_BASE + 0x40b00, 0xe0e0e0);
+	outpdw(MDP_BASE + 0x40b04, 0xe1e1e1);
+	outpdw(MDP_BASE + 0x40b08, 0xe1e1e1);
+	outpdw(MDP_BASE + 0x40b0c, 0xe2e2e2);
+	outpdw(MDP_BASE + 0x40b10, 0xe3e3e3);
+	outpdw(MDP_BASE + 0x40b14, 0xe3e3e3);
+	outpdw(MDP_BASE + 0x40b18, 0xe4e4e4);
+	outpdw(MDP_BASE + 0x40b1c, 0xe4e4e4);
+	outpdw(MDP_BASE + 0x40b20, 0xe5e5e5);
+	outpdw(MDP_BASE + 0x40b24, 0xe5e5e5);
+	outpdw(MDP_BASE + 0x40b28, 0xe6e6e6);
+	outpdw(MDP_BASE + 0x40b2c, 0xe6e6e6);
+	outpdw(MDP_BASE + 0x40b30, 0xe7e7e7);
+	outpdw(MDP_BASE + 0x40b34, 0xe7e7e7);
+	outpdw(MDP_BASE + 0x40b38, 0xe8e8e8);
+	outpdw(MDP_BASE + 0x40b3c, 0xe8e8e8);
+	outpdw(MDP_BASE + 0x40b40, 0xe9e9e9);
+	outpdw(MDP_BASE + 0x40b44, 0xe9e9e9);
+	outpdw(MDP_BASE + 0x40b48, 0xeaeaea);
+	outpdw(MDP_BASE + 0x40b4c, 0xeaeaea);
+	outpdw(MDP_BASE + 0x40b50, 0xebebeb);
+	outpdw(MDP_BASE + 0x40b54, 0xebebeb);
+	outpdw(MDP_BASE + 0x40b58, 0xececec);
+	outpdw(MDP_BASE + 0x40b5c, 0xececec);
+	outpdw(MDP_BASE + 0x40b60, 0xededed);
+	outpdw(MDP_BASE + 0x40b64, 0xededed);
+	outpdw(MDP_BASE + 0x40b68, 0xeeeeee);
+	outpdw(MDP_BASE + 0x40b6c, 0xeeeeee);
+	outpdw(MDP_BASE + 0x40b70, 0xefefef);
+	outpdw(MDP_BASE + 0x40b74, 0xefefef);
+	outpdw(MDP_BASE + 0x40b78, 0xf0f0f0);
+	outpdw(MDP_BASE + 0x40b7c, 0xf0f0f0);
+	outpdw(MDP_BASE + 0x40b80, 0xf1f1f1);
+	outpdw(MDP_BASE + 0x40b84, 0xf1f1f1);
+	outpdw(MDP_BASE + 0x40b88, 0xf2f2f2);
+	outpdw(MDP_BASE + 0x40b8c, 0xf2f2f2);
+	outpdw(MDP_BASE + 0x40b90, 0xf2f2f2);
+	outpdw(MDP_BASE + 0x40b94, 0xf3f3f3);
+	outpdw(MDP_BASE + 0x40b98, 0xf3f3f3);
+	outpdw(MDP_BASE + 0x40b9c, 0xf4f4f4);
+	outpdw(MDP_BASE + 0x40ba0, 0xf4f4f4);
+	outpdw(MDP_BASE + 0x40ba4, 0xf5f5f5);
+	outpdw(MDP_BASE + 0x40ba8, 0xf5f5f5);
+	outpdw(MDP_BASE + 0x40bac, 0xf6f6f6);
+	outpdw(MDP_BASE + 0x40bb0, 0xf6f6f6);
+	outpdw(MDP_BASE + 0x40bb4, 0xf7f7f7);
+	outpdw(MDP_BASE + 0x40bb8, 0xf7f7f7);
+	outpdw(MDP_BASE + 0x40bbc, 0xf8f8f8);
+	outpdw(MDP_BASE + 0x40bc0, 0xf8f8f8);
+	outpdw(MDP_BASE + 0x40bc4, 0xf9f9f9);
+	outpdw(MDP_BASE + 0x40bc8, 0xf9f9f9);
+	outpdw(MDP_BASE + 0x40bcc, 0xfafafa);
+	outpdw(MDP_BASE + 0x40bd0, 0xfafafa);
+	outpdw(MDP_BASE + 0x40bd4, 0xfafafa);
+	outpdw(MDP_BASE + 0x40bd8, 0xfbfbfb);
+	outpdw(MDP_BASE + 0x40bdc, 0xfbfbfb);
+	outpdw(MDP_BASE + 0x40be0, 0xfcfcfc);
+	outpdw(MDP_BASE + 0x40be4, 0xfcfcfc);
+	outpdw(MDP_BASE + 0x40be8, 0xfdfdfd);
+	outpdw(MDP_BASE + 0x40bec, 0xfdfdfd);
+	outpdw(MDP_BASE + 0x40bf0, 0xfefefe);
+	outpdw(MDP_BASE + 0x40bf4, 0xfefefe);
+	outpdw(MDP_BASE + 0x40bf8, 0xffffff);
+	outpdw(MDP_BASE + 0x40bfc, 0xffffff);
+	outpdw(MDP_BASE + 0x40c00, 0x0);
+	outpdw(MDP_BASE + 0x40c04, 0x0);
+	outpdw(MDP_BASE + 0x40c08, 0x0);
+	outpdw(MDP_BASE + 0x40c0c, 0x0);
+	outpdw(MDP_BASE + 0x40c10, 0x0);
+	outpdw(MDP_BASE + 0x40c14, 0x0);
+	outpdw(MDP_BASE + 0x40c18, 0x0);
+	outpdw(MDP_BASE + 0x40c1c, 0x0);
+	outpdw(MDP_BASE + 0x40c20, 0x0);
+	outpdw(MDP_BASE + 0x40c24, 0x0);
+	outpdw(MDP_BASE + 0x40c28, 0x0);
+	outpdw(MDP_BASE + 0x40c2c, 0x0);
+	outpdw(MDP_BASE + 0x40c30, 0x0);
+	outpdw(MDP_BASE + 0x40c34, 0x0);
+	outpdw(MDP_BASE + 0x40c38, 0x0);
+	outpdw(MDP_BASE + 0x40c3c, 0x0);
+	outpdw(MDP_BASE + 0x40c40, 0x10101);
+	outpdw(MDP_BASE + 0x40c44, 0x10101);
+	outpdw(MDP_BASE + 0x40c48, 0x10101);
+	outpdw(MDP_BASE + 0x40c4c, 0x10101);
+	outpdw(MDP_BASE + 0x40c50, 0x10101);
+	outpdw(MDP_BASE + 0x40c54, 0x10101);
+	outpdw(MDP_BASE + 0x40c58, 0x10101);
+	outpdw(MDP_BASE + 0x40c5c, 0x10101);
+	outpdw(MDP_BASE + 0x40c60, 0x10101);
+	outpdw(MDP_BASE + 0x40c64, 0x10101);
+	outpdw(MDP_BASE + 0x40c68, 0x20202);
+	outpdw(MDP_BASE + 0x40c6c, 0x20202);
+	outpdw(MDP_BASE + 0x40c70, 0x20202);
+	outpdw(MDP_BASE + 0x40c74, 0x20202);
+	outpdw(MDP_BASE + 0x40c78, 0x20202);
+	outpdw(MDP_BASE + 0x40c7c, 0x20202);
+	outpdw(MDP_BASE + 0x40c80, 0x30303);
+	outpdw(MDP_BASE + 0x40c84, 0x30303);
+	outpdw(MDP_BASE + 0x40c88, 0x30303);
+	outpdw(MDP_BASE + 0x40c8c, 0x30303);
+	outpdw(MDP_BASE + 0x40c90, 0x30303);
+	outpdw(MDP_BASE + 0x40c94, 0x40404);
+	outpdw(MDP_BASE + 0x40c98, 0x40404);
+	outpdw(MDP_BASE + 0x40c9c, 0x40404);
+	outpdw(MDP_BASE + 0x40ca0, 0x40404);
+	outpdw(MDP_BASE + 0x40ca4, 0x40404);
+	outpdw(MDP_BASE + 0x40ca8, 0x50505);
+	outpdw(MDP_BASE + 0x40cac, 0x50505);
+	outpdw(MDP_BASE + 0x40cb0, 0x50505);
+	outpdw(MDP_BASE + 0x40cb4, 0x50505);
+	outpdw(MDP_BASE + 0x40cb8, 0x60606);
+	outpdw(MDP_BASE + 0x40cbc, 0x60606);
+	outpdw(MDP_BASE + 0x40cc0, 0x60606);
+	outpdw(MDP_BASE + 0x40cc4, 0x70707);
+	outpdw(MDP_BASE + 0x40cc8, 0x70707);
+	outpdw(MDP_BASE + 0x40ccc, 0x70707);
+	outpdw(MDP_BASE + 0x40cd0, 0x70707);
+	outpdw(MDP_BASE + 0x40cd4, 0x80808);
+	outpdw(MDP_BASE + 0x40cd8, 0x80808);
+	outpdw(MDP_BASE + 0x40cdc, 0x80808);
+	outpdw(MDP_BASE + 0x40ce0, 0x90909);
+	outpdw(MDP_BASE + 0x40ce4, 0x90909);
+	outpdw(MDP_BASE + 0x40ce8, 0xa0a0a);
+	outpdw(MDP_BASE + 0x40cec, 0xa0a0a);
+	outpdw(MDP_BASE + 0x40cf0, 0xa0a0a);
+	outpdw(MDP_BASE + 0x40cf4, 0xb0b0b);
+	outpdw(MDP_BASE + 0x40cf8, 0xb0b0b);
+	outpdw(MDP_BASE + 0x40cfc, 0xb0b0b);
+	outpdw(MDP_BASE + 0x40d00, 0xc0c0c);
+	outpdw(MDP_BASE + 0x40d04, 0xc0c0c);
+	outpdw(MDP_BASE + 0x40d08, 0xd0d0d);
+	outpdw(MDP_BASE + 0x40d0c, 0xd0d0d);
+	outpdw(MDP_BASE + 0x40d10, 0xe0e0e);
+	outpdw(MDP_BASE + 0x40d14, 0xe0e0e);
+	outpdw(MDP_BASE + 0x40d18, 0xe0e0e);
+	outpdw(MDP_BASE + 0x40d1c, 0xf0f0f);
+	outpdw(MDP_BASE + 0x40d20, 0xf0f0f);
+	outpdw(MDP_BASE + 0x40d24, 0x101010);
+	outpdw(MDP_BASE + 0x40d28, 0x101010);
+	outpdw(MDP_BASE + 0x40d2c, 0x111111);
+	outpdw(MDP_BASE + 0x40d30, 0x111111);
+	outpdw(MDP_BASE + 0x40d34, 0x121212);
+	outpdw(MDP_BASE + 0x40d38, 0x121212);
+	outpdw(MDP_BASE + 0x40d3c, 0x131313);
+	outpdw(MDP_BASE + 0x40d40, 0x131313);
+	outpdw(MDP_BASE + 0x40d44, 0x141414);
+	outpdw(MDP_BASE + 0x40d48, 0x151515);
+	outpdw(MDP_BASE + 0x40d4c, 0x151515);
+	outpdw(MDP_BASE + 0x40d50, 0x161616);
+	outpdw(MDP_BASE + 0x40d54, 0x161616);
+	outpdw(MDP_BASE + 0x40d58, 0x171717);
+	outpdw(MDP_BASE + 0x40d5c, 0x171717);
+	outpdw(MDP_BASE + 0x40d60, 0x181818);
+	outpdw(MDP_BASE + 0x40d64, 0x191919);
+	outpdw(MDP_BASE + 0x40d68, 0x191919);
+	outpdw(MDP_BASE + 0x40d6c, 0x1a1a1a);
+	outpdw(MDP_BASE + 0x40d70, 0x1b1b1b);
+	outpdw(MDP_BASE + 0x40d74, 0x1b1b1b);
+	outpdw(MDP_BASE + 0x40d78, 0x1c1c1c);
+	outpdw(MDP_BASE + 0x40d7c, 0x1c1c1c);
+	outpdw(MDP_BASE + 0x40d80, 0x1d1d1d);
+	outpdw(MDP_BASE + 0x40d84, 0x1e1e1e);
+	outpdw(MDP_BASE + 0x40d88, 0x1f1f1f);
+	outpdw(MDP_BASE + 0x40d8c, 0x1f1f1f);
+	outpdw(MDP_BASE + 0x40d90, 0x202020);
+	outpdw(MDP_BASE + 0x40d94, 0x212121);
+	outpdw(MDP_BASE + 0x40d98, 0x212121);
+	outpdw(MDP_BASE + 0x40d9c, 0x222222);
+	outpdw(MDP_BASE + 0x40da0, 0x232323);
+	outpdw(MDP_BASE + 0x40da4, 0x242424);
+	outpdw(MDP_BASE + 0x40da8, 0x242424);
+	outpdw(MDP_BASE + 0x40dac, 0x252525);
+	outpdw(MDP_BASE + 0x40db0, 0x262626);
+	outpdw(MDP_BASE + 0x40db4, 0x272727);
+	outpdw(MDP_BASE + 0x40db8, 0x272727);
+	outpdw(MDP_BASE + 0x40dbc, 0x282828);
+	outpdw(MDP_BASE + 0x40dc0, 0x292929);
+	outpdw(MDP_BASE + 0x40dc4, 0x2a2a2a);
+	outpdw(MDP_BASE + 0x40dc8, 0x2b2b2b);
+	outpdw(MDP_BASE + 0x40dcc, 0x2c2c2c);
+	outpdw(MDP_BASE + 0x40dd0, 0x2c2c2c);
+	outpdw(MDP_BASE + 0x40dd4, 0x2d2d2d);
+	outpdw(MDP_BASE + 0x40dd8, 0x2e2e2e);
+	outpdw(MDP_BASE + 0x40ddc, 0x2f2f2f);
+	outpdw(MDP_BASE + 0x40de0, 0x303030);
+	outpdw(MDP_BASE + 0x40de4, 0x313131);
+	outpdw(MDP_BASE + 0x40de8, 0x323232);
+	outpdw(MDP_BASE + 0x40dec, 0x333333);
+	outpdw(MDP_BASE + 0x40df0, 0x333333);
+	outpdw(MDP_BASE + 0x40df4, 0x343434);
+	outpdw(MDP_BASE + 0x40df8, 0x353535);
+	outpdw(MDP_BASE + 0x40dfc, 0x363636);
+	outpdw(MDP_BASE + 0x40e00, 0x373737);
+	outpdw(MDP_BASE + 0x40e04, 0x383838);
+	outpdw(MDP_BASE + 0x40e08, 0x393939);
+	outpdw(MDP_BASE + 0x40e0c, 0x3a3a3a);
+	outpdw(MDP_BASE + 0x40e10, 0x3b3b3b);
+	outpdw(MDP_BASE + 0x40e14, 0x3c3c3c);
+	outpdw(MDP_BASE + 0x40e18, 0x3d3d3d);
+	outpdw(MDP_BASE + 0x40e1c, 0x3e3e3e);
+	outpdw(MDP_BASE + 0x40e20, 0x3f3f3f);
+	outpdw(MDP_BASE + 0x40e24, 0x404040);
+	outpdw(MDP_BASE + 0x40e28, 0x414141);
+	outpdw(MDP_BASE + 0x40e2c, 0x424242);
+	outpdw(MDP_BASE + 0x40e30, 0x434343);
+	outpdw(MDP_BASE + 0x40e34, 0x444444);
+	outpdw(MDP_BASE + 0x40e38, 0x464646);
+	outpdw(MDP_BASE + 0x40e3c, 0x474747);
+	outpdw(MDP_BASE + 0x40e40, 0x484848);
+	outpdw(MDP_BASE + 0x40e44, 0x494949);
+	outpdw(MDP_BASE + 0x40e48, 0x4a4a4a);
+	outpdw(MDP_BASE + 0x40e4c, 0x4b4b4b);
+	outpdw(MDP_BASE + 0x40e50, 0x4c4c4c);
+	outpdw(MDP_BASE + 0x40e54, 0x4d4d4d);
+	outpdw(MDP_BASE + 0x40e58, 0x4f4f4f);
+	outpdw(MDP_BASE + 0x40e5c, 0x505050);
+	outpdw(MDP_BASE + 0x40e60, 0x515151);
+	outpdw(MDP_BASE + 0x40e64, 0x525252);
+	outpdw(MDP_BASE + 0x40e68, 0x535353);
+	outpdw(MDP_BASE + 0x40e6c, 0x545454);
+	outpdw(MDP_BASE + 0x40e70, 0x565656);
+	outpdw(MDP_BASE + 0x40e74, 0x575757);
+	outpdw(MDP_BASE + 0x40e78, 0x585858);
+	outpdw(MDP_BASE + 0x40e7c, 0x595959);
+	outpdw(MDP_BASE + 0x40e80, 0x5b5b5b);
+	outpdw(MDP_BASE + 0x40e84, 0x5c5c5c);
+	outpdw(MDP_BASE + 0x40e88, 0x5d5d5d);
+	outpdw(MDP_BASE + 0x40e8c, 0x5e5e5e);
+	outpdw(MDP_BASE + 0x40e90, 0x606060);
+	outpdw(MDP_BASE + 0x40e94, 0x616161);
+	outpdw(MDP_BASE + 0x40e98, 0x626262);
+	outpdw(MDP_BASE + 0x40e9c, 0x646464);
+	outpdw(MDP_BASE + 0x40ea0, 0x656565);
+	outpdw(MDP_BASE + 0x40ea4, 0x666666);
+	outpdw(MDP_BASE + 0x40ea8, 0x686868);
+	outpdw(MDP_BASE + 0x40eac, 0x696969);
+	outpdw(MDP_BASE + 0x40eb0, 0x6a6a6a);
+	outpdw(MDP_BASE + 0x40eb4, 0x6c6c6c);
+	outpdw(MDP_BASE + 0x40eb8, 0x6d6d6d);
+	outpdw(MDP_BASE + 0x40ebc, 0x6f6f6f);
+	outpdw(MDP_BASE + 0x40ec0, 0x707070);
+	outpdw(MDP_BASE + 0x40ec4, 0x717171);
+	outpdw(MDP_BASE + 0x40ec8, 0x737373);
+	outpdw(MDP_BASE + 0x40ecc, 0x747474);
+	outpdw(MDP_BASE + 0x40ed0, 0x767676);
+	outpdw(MDP_BASE + 0x40ed4, 0x777777);
+	outpdw(MDP_BASE + 0x40ed8, 0x797979);
+	outpdw(MDP_BASE + 0x40edc, 0x7a7a7a);
+	outpdw(MDP_BASE + 0x40ee0, 0x7c7c7c);
+	outpdw(MDP_BASE + 0x40ee4, 0x7d7d7d);
+	outpdw(MDP_BASE + 0x40ee8, 0x7f7f7f);
+	outpdw(MDP_BASE + 0x40eec, 0x808080);
+	outpdw(MDP_BASE + 0x40ef0, 0x828282);
+	outpdw(MDP_BASE + 0x40ef4, 0x838383);
+	outpdw(MDP_BASE + 0x40ef8, 0x858585);
+	outpdw(MDP_BASE + 0x40efc, 0x868686);
+	outpdw(MDP_BASE + 0x40f00, 0x888888);
+	outpdw(MDP_BASE + 0x40f04, 0x898989);
+	outpdw(MDP_BASE + 0x40f08, 0x8b8b8b);
+	outpdw(MDP_BASE + 0x40f0c, 0x8d8d8d);
+	outpdw(MDP_BASE + 0x40f10, 0x8e8e8e);
+	outpdw(MDP_BASE + 0x40f14, 0x909090);
+	outpdw(MDP_BASE + 0x40f18, 0x919191);
+	outpdw(MDP_BASE + 0x40f1c, 0x939393);
+	outpdw(MDP_BASE + 0x40f20, 0x959595);
+	outpdw(MDP_BASE + 0x40f24, 0x969696);
+	outpdw(MDP_BASE + 0x40f28, 0x989898);
+	outpdw(MDP_BASE + 0x40f2c, 0x9a9a9a);
+	outpdw(MDP_BASE + 0x40f30, 0x9b9b9b);
+	outpdw(MDP_BASE + 0x40f34, 0x9d9d9d);
+	outpdw(MDP_BASE + 0x40f38, 0x9f9f9f);
+	outpdw(MDP_BASE + 0x40f3c, 0xa1a1a1);
+	outpdw(MDP_BASE + 0x40f40, 0xa2a2a2);
+	outpdw(MDP_BASE + 0x40f44, 0xa4a4a4);
+	outpdw(MDP_BASE + 0x40f48, 0xa6a6a6);
+	outpdw(MDP_BASE + 0x40f4c, 0xa7a7a7);
+	outpdw(MDP_BASE + 0x40f50, 0xa9a9a9);
+	outpdw(MDP_BASE + 0x40f54, 0xababab);
+	outpdw(MDP_BASE + 0x40f58, 0xadadad);
+	outpdw(MDP_BASE + 0x40f5c, 0xafafaf);
+	outpdw(MDP_BASE + 0x40f60, 0xb0b0b0);
+	outpdw(MDP_BASE + 0x40f64, 0xb2b2b2);
+	outpdw(MDP_BASE + 0x40f68, 0xb4b4b4);
+	outpdw(MDP_BASE + 0x40f6c, 0xb6b6b6);
+	outpdw(MDP_BASE + 0x40f70, 0xb8b8b8);
+	outpdw(MDP_BASE + 0x40f74, 0xbababa);
+	outpdw(MDP_BASE + 0x40f78, 0xbbbbbb);
+	outpdw(MDP_BASE + 0x40f7c, 0xbdbdbd);
+	outpdw(MDP_BASE + 0x40f80, 0xbfbfbf);
+	outpdw(MDP_BASE + 0x40f84, 0xc1c1c1);
+	outpdw(MDP_BASE + 0x40f88, 0xc3c3c3);
+	outpdw(MDP_BASE + 0x40f8c, 0xc5c5c5);
+	outpdw(MDP_BASE + 0x40f90, 0xc7c7c7);
+	outpdw(MDP_BASE + 0x40f94, 0xc9c9c9);
+	outpdw(MDP_BASE + 0x40f98, 0xcbcbcb);
+	outpdw(MDP_BASE + 0x40f9c, 0xcdcdcd);
+	outpdw(MDP_BASE + 0x40fa0, 0xcfcfcf);
+	outpdw(MDP_BASE + 0x40fa4, 0xd1d1d1);
+	outpdw(MDP_BASE + 0x40fa8, 0xd3d3d3);
+	outpdw(MDP_BASE + 0x40fac, 0xd5d5d5);
+	outpdw(MDP_BASE + 0x40fb0, 0xd7d7d7);
+	outpdw(MDP_BASE + 0x40fb4, 0xd9d9d9);
+	outpdw(MDP_BASE + 0x40fb8, 0xdbdbdb);
+	outpdw(MDP_BASE + 0x40fbc, 0xdddddd);
+	outpdw(MDP_BASE + 0x40fc0, 0xdfdfdf);
+	outpdw(MDP_BASE + 0x40fc4, 0xe1e1e1);
+	outpdw(MDP_BASE + 0x40fc8, 0xe3e3e3);
+	outpdw(MDP_BASE + 0x40fcc, 0xe5e5e5);
+	outpdw(MDP_BASE + 0x40fd0, 0xe7e7e7);
+	outpdw(MDP_BASE + 0x40fd4, 0xe9e9e9);
+	outpdw(MDP_BASE + 0x40fd8, 0xebebeb);
+	outpdw(MDP_BASE + 0x40fdc, 0xeeeeee);
+	outpdw(MDP_BASE + 0x40fe0, 0xf0f0f0);
+	outpdw(MDP_BASE + 0x40fe4, 0xf2f2f2);
+	outpdw(MDP_BASE + 0x40fe8, 0xf4f4f4);
+	outpdw(MDP_BASE + 0x40fec, 0xf6f6f6);
+	outpdw(MDP_BASE + 0x40ff0, 0xf8f8f8);
+	outpdw(MDP_BASE + 0x40ff4, 0xfbfbfb);
+	outpdw(MDP_BASE + 0x40ff8, 0xfdfdfd);
+	outpdw(MDP_BASE + 0x40ffc, 0xffffff);
+}
+
+#define   IRQ_EN_1__MDP_IRQ___M    0x00000800
+
+void mdp_hw_init(void)
+{
+	int i;
+
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* debug interface write access */
+	outpdw(MDP_BASE + 0x60, 1);
+
+	outp32(MDP_INTR_ENABLE, MDP_ANY_INTR_MASK);
+	outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
+	outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8, 0x0);
+	outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc, 0x0);
+	outpdw(MDP_BASE + 0x60, 0x1);
+	mdp_load_lut_param();
+
+	/*
+	 * clear up unused fg/main registers
+	 */
+	/* comp.plane 2&3 ystride */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0120, 0x0);
+	/* unpacked pattern */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x012c, 0x0);
+	/* unpacked pattern */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0130, 0x0);
+	/* unpacked pattern */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0134, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0158, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x15c, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0160, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0170, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0174, 0x0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x017c, 0x0);
+
+	/* comp.plane 2 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0114, 0x0);
+	/* comp.plane 3 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0118, 0x0);
+
+	/* clear up unused bg registers */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8, 0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0, 0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc, 0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0, 0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0);
+
+#ifndef CONFIG_FB_MSM_MDP22
+	MDP_OUTP(MDP_BASE + 0xE0000, 0);
+	MDP_OUTP(MDP_BASE + 0x100, 0xffffffff);
+	MDP_OUTP(MDP_BASE + 0x90070, 0);
+	MDP_OUTP(MDP_BASE + 0x94010, 1);
+	MDP_OUTP(MDP_BASE + 0x9401c, 2);
+#endif
+
+	/*
+	 * limit vector
+	 * pre gets applied before color matrix conversion
+	 * post is after ccs
+	 */
+	writel(mdp_plv[0], MDP_CSC_PRE_LV1n(0));
+	writel(mdp_plv[1], MDP_CSC_PRE_LV1n(1));
+	writel(mdp_plv[2], MDP_CSC_PRE_LV1n(2));
+	writel(mdp_plv[3], MDP_CSC_PRE_LV1n(3));
+
+#ifdef CONFIG_FB_MSM_MDP31
+	writel(mdp_plv[2], MDP_CSC_PRE_LV1n(4));
+	writel(mdp_plv[3], MDP_CSC_PRE_LV1n(5));
+
+	writel(0, MDP_CSC_POST_LV1n(0));
+	writel(0xff, MDP_CSC_POST_LV1n(1));
+	writel(0, MDP_CSC_POST_LV1n(2));
+	writel(0xff, MDP_CSC_POST_LV1n(3));
+	writel(0, MDP_CSC_POST_LV1n(4));
+	writel(0xff, MDP_CSC_POST_LV1n(5));
+
+	writel(0, MDP_CSC_PRE_LV2n(0));
+	writel(0xff, MDP_CSC_PRE_LV2n(1));
+	writel(0, MDP_CSC_PRE_LV2n(2));
+	writel(0xff, MDP_CSC_PRE_LV2n(3));
+	writel(0, MDP_CSC_PRE_LV2n(4));
+	writel(0xff, MDP_CSC_PRE_LV2n(5));
+
+	writel(mdp_plv[0], MDP_CSC_POST_LV2n(0));
+	writel(mdp_plv[1], MDP_CSC_POST_LV2n(1));
+	writel(mdp_plv[2], MDP_CSC_POST_LV2n(2));
+	writel(mdp_plv[3], MDP_CSC_POST_LV2n(3));
+	writel(mdp_plv[2], MDP_CSC_POST_LV2n(4));
+	writel(mdp_plv[3], MDP_CSC_POST_LV2n(5));
+#endif
+
+	/* primary forward matrix */
+	for (i = 0; i < MDP_CCS_SIZE; i++)
+		writel(mdp_ccs_rgb2yuv.ccs[i], MDP_CSC_PFMVn(i));
+
+#ifdef CONFIG_FB_MSM_MDP31
+	for (i = 0; i < MDP_BV_SIZE; i++)
+		writel(mdp_ccs_rgb2yuv.bv[i], MDP_CSC_POST_BV2n(i));
+
+	writel(0, MDP_CSC_PRE_BV2n(0));
+	writel(0, MDP_CSC_PRE_BV2n(1));
+	writel(0, MDP_CSC_PRE_BV2n(2));
+#endif
+	/* primary reverse matrix */
+	for (i = 0; i < MDP_CCS_SIZE; i++)
+		writel(mdp_ccs_yuv2rgb.ccs[i], MDP_CSC_PRMVn(i));
+
+	for (i = 0; i < MDP_BV_SIZE; i++)
+		writel(mdp_ccs_yuv2rgb.bv[i], MDP_CSC_PRE_BV1n(i));
+
+#ifdef CONFIG_FB_MSM_MDP31
+	writel(0, MDP_CSC_POST_BV1n(0));
+	writel(0, MDP_CSC_POST_BV1n(1));
+	writel(0, MDP_CSC_POST_BV1n(2));
+
+	outpdw(MDP_BASE + 0x30010, 0x03e0);
+	outpdw(MDP_BASE + 0x30014, 0x0360);
+	outpdw(MDP_BASE + 0x30018, 0x0120);
+	outpdw(MDP_BASE + 0x3001c, 0x0140);
+#endif
+	mdp_init_scale_table();
+
+#ifndef CONFIG_FB_MSM_MDP31
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0104,
+		 ((16 << 6) << 16) | (16) << 6);
+#endif
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+}
diff --git a/drivers/video/msm/mdp_lcdc.c b/drivers/video/msm/mdp_lcdc.c
new file mode 100644
index 0000000..be8d39d
--- /dev/null
+++ b/drivers/video/msm/mdp_lcdc.c
@@ -0,0 +1,432 @@
+/* drivers/video/msm/mdp_lcdc.c
+ *
+ * Copyright (c) 2009 Google Inc.
+ * Copyright (c) 2009 Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Dima Zavin <dima@android.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/mach-types.h>
+
+#include <mach/msm_fb.h>
+
+#include "mdp_hw.h"
+
+struct mdp_lcdc_info {
+	struct mdp_info			*mdp;
+	struct clk			*mdp_clk;
+	struct clk			*pclk;
+	struct clk			*pad_pclk;
+	struct msm_panel_data		fb_panel_data;
+	struct platform_device		fb_pdev;
+	struct msm_lcdc_platform_data	*pdata;
+	uint32_t fb_start;
+
+	struct msmfb_callback		frame_start_cb;
+	wait_queue_head_t		vsync_waitq;
+	int				got_vsync;
+
+	struct {
+		uint32_t	clk_rate;
+		uint32_t	hsync_ctl;
+		uint32_t	vsync_period;
+		uint32_t	vsync_pulse_width;
+		uint32_t	display_hctl;
+		uint32_t	display_vstart;
+		uint32_t	display_vend;
+		uint32_t	hsync_skew;
+		uint32_t	polarity;
+	} parms;
+};
+
+static struct mdp_device *mdp_dev;
+
+#define panel_to_lcdc(p) container_of((p), struct mdp_lcdc_info, fb_panel_data)
+
+static int lcdc_unblank(struct msm_panel_data *fb_panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+	struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops;
+
+	pr_info("%s: ()\n", __func__);
+	panel_ops->unblank(panel_ops);
+
+	return 0;
+}
+
+static int lcdc_blank(struct msm_panel_data *fb_panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+	struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops;
+
+	pr_info("%s: ()\n", __func__);
+	panel_ops->blank(panel_ops);
+
+	return 0;
+}
+
+static int lcdc_suspend(struct msm_panel_data *fb_panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+
+	pr_info("%s: suspending\n", __func__);
+
+	mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN);
+	clk_disable(lcdc->pad_pclk);
+	clk_disable(lcdc->pclk);
+	clk_disable(lcdc->mdp_clk);
+
+	return 0;
+}
+
+static int lcdc_resume(struct msm_panel_data *fb_panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+
+	pr_info("%s: resuming\n", __func__);
+
+	clk_enable(lcdc->mdp_clk);
+	clk_enable(lcdc->pclk);
+	clk_enable(lcdc->pad_pclk);
+	mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN);
+
+	return 0;
+}
+
+static int lcdc_hw_init(struct mdp_lcdc_info *lcdc)
+{
+	struct msm_panel_data *fb_panel = &lcdc->fb_panel_data;
+	uint32_t dma_cfg;
+
+	clk_enable(lcdc->mdp_clk);
+	clk_enable(lcdc->pclk);
+	clk_enable(lcdc->pad_pclk);
+
+	clk_set_rate(lcdc->pclk, lcdc->parms.clk_rate);
+	clk_set_rate(lcdc->pad_pclk, lcdc->parms.clk_rate);
+
+	/* write the lcdc params */
+	mdp_writel(lcdc->mdp, lcdc->parms.hsync_ctl, MDP_LCDC_HSYNC_CTL);
+	mdp_writel(lcdc->mdp, lcdc->parms.vsync_period, MDP_LCDC_VSYNC_PERIOD);
+	mdp_writel(lcdc->mdp, lcdc->parms.vsync_pulse_width,
+		   MDP_LCDC_VSYNC_PULSE_WIDTH);
+	mdp_writel(lcdc->mdp, lcdc->parms.display_hctl, MDP_LCDC_DISPLAY_HCTL);
+	mdp_writel(lcdc->mdp, lcdc->parms.display_vstart,
+		   MDP_LCDC_DISPLAY_V_START);
+	mdp_writel(lcdc->mdp, lcdc->parms.display_vend, MDP_LCDC_DISPLAY_V_END);
+	mdp_writel(lcdc->mdp, lcdc->parms.hsync_skew, MDP_LCDC_HSYNC_SKEW);
+
+	mdp_writel(lcdc->mdp, 0, MDP_LCDC_BORDER_CLR);
+	mdp_writel(lcdc->mdp, 0xff, MDP_LCDC_UNDERFLOW_CTL);
+	mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_HCTL);
+	mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_START);
+	mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_END);
+	mdp_writel(lcdc->mdp, lcdc->parms.polarity, MDP_LCDC_CTL_POLARITY);
+
+	/* config the dma_p block that drives the lcdc data */
+	mdp_writel(lcdc->mdp, lcdc->fb_start, MDP_DMA_P_IBUF_ADDR);
+	mdp_writel(lcdc->mdp, (((fb_panel->fb_data->yres & 0x7ff) << 16) |
+			       (fb_panel->fb_data->xres & 0x7ff)),
+		   MDP_DMA_P_SIZE);
+
+	mdp_writel(lcdc->mdp, 0, MDP_DMA_P_OUT_XY);
+
+	dma_cfg = mdp_readl(lcdc->mdp, MDP_DMA_P_CONFIG);
+	dma_cfg |= (DMA_PACK_ALIGN_LSB |
+		   DMA_PACK_PATTERN_RGB |
+		   DMA_DITHER_EN);
+	dma_cfg |= DMA_OUT_SEL_LCDC;
+	dma_cfg &= ~DMA_DST_BITS_MASK;
+
+	if (fb_panel->fb_data->output_format == MSM_MDP_OUT_IF_FMT_RGB666)
+		dma_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+	else
+		dma_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
+
+	mdp_writel(lcdc->mdp, dma_cfg, MDP_DMA_P_CONFIG);
+
+	/* enable the lcdc timing generation */
+	mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN);
+
+	return 0;
+}
+
+static void lcdc_wait_vsync(struct msm_panel_data *panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(panel);
+	int ret;
+
+	ret = wait_event_timeout(lcdc->vsync_waitq, lcdc->got_vsync, HZ / 2);
+	if (!ret && !lcdc->got_vsync)
+		pr_err("%s: timeout waiting for VSYNC\n", __func__);
+	lcdc->got_vsync = 0;
+}
+
+static void lcdc_request_vsync(struct msm_panel_data *fb_panel,
+			       struct msmfb_callback *vsync_cb)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+
+	/* the vsync callback will start the dma */
+	vsync_cb->func(vsync_cb);
+	lcdc->got_vsync = 0;
+	mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, MDP_LCDC_FRAME_START,
+			   &lcdc->frame_start_cb);
+	lcdc_wait_vsync(fb_panel);
+}
+
+static void lcdc_clear_vsync(struct msm_panel_data *fb_panel)
+{
+	struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel);
+	lcdc->got_vsync = 0;
+	mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, 0, NULL);
+}
+
+/* called in irq context with mdp lock held, when mdp gets the
+ * MDP_LCDC_FRAME_START interrupt */
+static void lcdc_frame_start(struct msmfb_callback *cb)
+{
+	struct mdp_lcdc_info *lcdc;
+
+	lcdc = container_of(cb, struct mdp_lcdc_info, frame_start_cb);
+
+	lcdc->got_vsync = 1;
+	wake_up(&lcdc->vsync_waitq);
+}
+
+static void lcdc_dma_start(void *priv, uint32_t addr, uint32_t stride,
+			   uint32_t width, uint32_t height, uint32_t x,
+			   uint32_t y)
+{
+	struct mdp_lcdc_info *lcdc = priv;
+
+	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+	if (mdp->dma_config_dirty)
+	{
+		mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN);
+		mdelay(20);
+		mdp_dev->configure_dma(mdp_dev);
+		mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN);
+	}
+	mdp_writel(lcdc->mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE);
+	mdp_writel(lcdc->mdp, addr, MDP_DMA_P_IBUF_ADDR);
+}
+
+static void precompute_timing_parms(struct mdp_lcdc_info *lcdc)
+{
+	struct msm_lcdc_timing *timing = lcdc->pdata->timing;
+	struct msm_fb_data *fb_data = lcdc->pdata->fb_data;
+	unsigned int hsync_period;
+	unsigned int hsync_start_x;
+	unsigned int hsync_end_x;
+	unsigned int vsync_period;
+	unsigned int display_vstart;
+	unsigned int display_vend;
+
+	hsync_period = (timing->hsync_back_porch +
+			fb_data->xres + timing->hsync_front_porch);
+	hsync_start_x = timing->hsync_back_porch;
+	hsync_end_x = hsync_start_x + fb_data->xres - 1;
+
+	vsync_period = (timing->vsync_back_porch +
+			fb_data->yres + timing->vsync_front_porch);
+	vsync_period *= hsync_period;
+
+	display_vstart = timing->vsync_back_porch;
+	display_vstart *= hsync_period;
+	display_vstart += timing->hsync_skew;
+
+	display_vend = (timing->vsync_back_porch + fb_data->yres) *
+		hsync_period;
+	display_vend += timing->hsync_skew - 1;
+
+	/* register values we pre-compute at init time from the timing
+	 * information in the panel info */
+	lcdc->parms.hsync_ctl = (((hsync_period & 0xfff) << 16) |
+				 (timing->hsync_pulse_width & 0xfff));
+	lcdc->parms.vsync_period = vsync_period & 0xffffff;
+	lcdc->parms.vsync_pulse_width = (timing->vsync_pulse_width *
+					 hsync_period) & 0xffffff;
+
+	lcdc->parms.display_hctl = (((hsync_end_x & 0xfff) << 16) |
+				    (hsync_start_x & 0xfff));
+	lcdc->parms.display_vstart = display_vstart & 0xffffff;
+	lcdc->parms.display_vend = display_vend & 0xffffff;
+	lcdc->parms.hsync_skew = timing->hsync_skew & 0xfff;
+	lcdc->parms.polarity = ((timing->hsync_act_low << 0) |
+				(timing->vsync_act_low << 1) |
+				(timing->den_act_low << 2));
+	lcdc->parms.clk_rate = timing->clk_rate;
+}
+
+static int mdp_lcdc_probe(struct platform_device *pdev)
+{
+	struct msm_lcdc_platform_data *pdata = pdev->dev.platform_data;
+	struct mdp_lcdc_info *lcdc;
+	int ret = 0;
+
+	if (!pdata) {
+		pr_err("%s: no LCDC platform data found\n", __func__);
+		return -EINVAL;
+	}
+
+	lcdc = kzalloc(sizeof(struct mdp_lcdc_info), GFP_KERNEL);
+	if (!lcdc)
+		return -ENOMEM;
+
+	/* We don't actually own the clocks, the mdp does. */
+	lcdc->mdp_clk = clk_get(mdp_dev->dev.parent, "mdp_clk");
+	if (IS_ERR(lcdc->mdp_clk)) {
+		pr_err("%s: failed to get mdp_clk\n", __func__);
+		ret = PTR_ERR(lcdc->mdp_clk);
+		goto err_get_mdp_clk;
+	}
+
+	lcdc->pclk = clk_get(mdp_dev->dev.parent, "lcdc_pclk_clk");
+	if (IS_ERR(lcdc->pclk)) {
+		pr_err("%s: failed to get lcdc_pclk\n", __func__);
+		ret = PTR_ERR(lcdc->pclk);
+		goto err_get_pclk;
+	}
+
+	lcdc->pad_pclk = clk_get(mdp_dev->dev.parent, "lcdc_pad_pclk_clk");
+	if (IS_ERR(lcdc->pad_pclk)) {
+		pr_err("%s: failed to get lcdc_pad_pclk\n", __func__);
+		ret = PTR_ERR(lcdc->pad_pclk);
+		goto err_get_pad_pclk;
+	}
+
+	init_waitqueue_head(&lcdc->vsync_waitq);
+	lcdc->pdata = pdata;
+	lcdc->frame_start_cb.func = lcdc_frame_start;
+
+	platform_set_drvdata(pdev, lcdc);
+
+	mdp_out_if_register(mdp_dev, MSM_LCDC_INTERFACE, lcdc, MDP_DMA_P_DONE,
+			    lcdc_dma_start);
+
+	precompute_timing_parms(lcdc);
+
+	lcdc->fb_start = pdata->fb_resource->start;
+	lcdc->mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+
+	lcdc->fb_panel_data.suspend = lcdc_suspend;
+	lcdc->fb_panel_data.resume = lcdc_resume;
+	lcdc->fb_panel_data.wait_vsync = lcdc_wait_vsync;
+	lcdc->fb_panel_data.request_vsync = lcdc_request_vsync;
+	lcdc->fb_panel_data.clear_vsync = lcdc_clear_vsync;
+	lcdc->fb_panel_data.blank = lcdc_blank;
+	lcdc->fb_panel_data.unblank = lcdc_unblank;
+	lcdc->fb_panel_data.fb_data = pdata->fb_data;
+	lcdc->fb_panel_data.interface_type = MSM_LCDC_INTERFACE;
+
+	ret = lcdc_hw_init(lcdc);
+	if (ret) {
+		pr_err("%s: Cannot initialize the mdp_lcdc\n", __func__);
+		goto err_hw_init;
+	}
+
+	lcdc->fb_pdev.name = "msm_panel";
+	lcdc->fb_pdev.id = pdata->fb_id;
+	lcdc->fb_pdev.resource = pdata->fb_resource;
+	lcdc->fb_pdev.num_resources = 1;
+	lcdc->fb_pdev.dev.platform_data = &lcdc->fb_panel_data;
+
+	if (pdata->panel_ops->init)
+		pdata->panel_ops->init(pdata->panel_ops);
+
+	ret = platform_device_register(&lcdc->fb_pdev);
+	if (ret) {
+		pr_err("%s: Cannot register msm_panel pdev\n", __func__);
+		goto err_plat_dev_reg;
+	}
+
+	pr_info("%s: initialized\n", __func__);
+
+	return 0;
+
+err_plat_dev_reg:
+err_hw_init:
+	platform_set_drvdata(pdev, NULL);
+	clk_put(lcdc->pad_pclk);
+err_get_pad_pclk:
+	clk_put(lcdc->pclk);
+err_get_pclk:
+	clk_put(lcdc->mdp_clk);
+err_get_mdp_clk:
+	kfree(lcdc);
+	return ret;
+}
+
+static int mdp_lcdc_remove(struct platform_device *pdev)
+{
+	struct mdp_lcdc_info *lcdc = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	clk_put(lcdc->pclk);
+	clk_put(lcdc->pad_pclk);
+	kfree(lcdc);
+
+	return 0;
+}
+
+static struct platform_driver mdp_lcdc_driver = {
+	.probe = mdp_lcdc_probe,
+	.remove = mdp_lcdc_remove,
+	.driver = {
+		.name	= "msm_mdp_lcdc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int mdp_lcdc_add_mdp_device(struct device *dev,
+				   struct class_interface *class_intf)
+{
+	/* might need locking if mulitple mdp devices */
+	if (mdp_dev)
+		return 0;
+	mdp_dev = container_of(dev, struct mdp_device, dev);
+	return platform_driver_register(&mdp_lcdc_driver);
+}
+
+static void mdp_lcdc_remove_mdp_device(struct device *dev,
+				       struct class_interface *class_intf)
+{
+	/* might need locking if mulitple mdp devices */
+	if (dev != &mdp_dev->dev)
+		return;
+	platform_driver_unregister(&mdp_lcdc_driver);
+	mdp_dev = NULL;
+}
+
+static struct class_interface mdp_lcdc_interface = {
+	.add_dev = &mdp_lcdc_add_mdp_device,
+	.remove_dev = &mdp_lcdc_remove_mdp_device,
+};
+
+static int __init mdp_lcdc_init(void)
+{
+	return register_mdp_client(&mdp_lcdc_interface);
+}
+
+module_init(mdp_lcdc_init);
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 2b6564e..19dfe82 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -1,7 +1,7 @@
-/* drivers/video/msm/mdp_ppp.c
+/* drivers/video/msm/src/drv/mdp/mdp_ppp.c
  *
- * Copyright (C) 2007 QUALCOMM Incorporated
  * Copyright (C) 2007 Google Incorporated
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -12,55 +12,35 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/fb.h>
-#include <linux/file.h>
-#include <linux/delay.h>
 #include <linux/msm_mdp.h>
-#include <mach/msm_fb.h>
+#include <linux/file.h>
+#include <linux/android_pmem.h>
+#include <linux/major.h>
 
-#include "mdp_hw.h"
-#include "mdp_scale_tables.h"
+#include "linux/proc_fs.h"
 
-#define DLOG(x...) do {} while (0)
+#include <mach/hardware.h>
+#include <linux/io.h>
 
-#define MDP_DOWNSCALE_BLUR (MDP_DOWNSCALE_MAX + 1)
-static int downscale_y_table = MDP_DOWNSCALE_MAX;
-static int downscale_x_table = MDP_DOWNSCALE_MAX;
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/msm_kgsl.h>
 
-struct mdp_regs {
-	uint32_t src0;
-	uint32_t src1;
-	uint32_t dst0;
-	uint32_t dst1;
-	uint32_t src_cfg;
-	uint32_t dst_cfg;
-	uint32_t src_pack;
-	uint32_t dst_pack;
-	uint32_t src_rect;
-	uint32_t dst_rect;
-	uint32_t src_ystride;
-	uint32_t dst_ystride;
-	uint32_t op;
-	uint32_t src_bpp;
-	uint32_t dst_bpp;
-	uint32_t edge;
-	uint32_t phasex_init;
-	uint32_t phasey_init;
-	uint32_t phasex_step;
-	uint32_t phasey_step;
-};
+#include "mdp.h"
+#include "msm_fb.h"
 
-static uint32_t pack_pattern[] = {
-	PPP_ARRAY0(PACK_PATTERN)
-};
-
-static uint32_t src_img_cfg[] = {
-	PPP_ARRAY1(CFG, SRC)
-};
-
-static uint32_t dst_img_cfg[] = {
-	PPP_ARRAY1(CFG, DST)
-};
+#define MDP_IS_IMGTYPE_BAD(x) (((x) >= MDP_IMGTYPE_LIMIT) && \
+				(((x) < MDP_IMGTYPE2_START) || \
+				 ((x) >= MDP_IMGTYPE_LIMIT2)))
 
 static uint32_t bytes_per_pixel[] = {
 	[MDP_RGB_565] = 2,
@@ -74,457 +54,499 @@
 	[MDP_Y_CBCR_H2V2] = 1,
 	[MDP_Y_CRCB_H2V1] = 1,
 	[MDP_Y_CRCB_H2V2] = 1,
-	[MDP_YCRYCB_H2V1] = 2
+	[MDP_YCRYCB_H2V1] = 2,
+	[MDP_BGR_565] = 2
 };
 
-static uint32_t dst_op_chroma[] = {
-	PPP_ARRAY1(CHROMA_SAMP, DST)
-};
+extern uint32 mdp_plv[];
+extern struct semaphore mdp_ppp_mutex;
 
-static uint32_t src_op_chroma[] = {
-	PPP_ARRAY1(CHROMA_SAMP, SRC)
-};
-
-static uint32_t bg_op_chroma[] = {
-	PPP_ARRAY1(CHROMA_SAMP, BG)
-};
-
-static void rotate_dst_addr_x(struct mdp_blit_req *req, struct mdp_regs *regs)
+int mdp_get_bytes_per_pixel(uint32_t format,
+				 struct msm_fb_data_type *mfd)
 {
-	regs->dst0 += (req->dst_rect.w -
-		       min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
-	regs->dst1 += (req->dst_rect.w -
-		       min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
+	int bpp = -EINVAL;
+	if (format == MDP_FB_FORMAT)
+		format = mfd->fb_imgType;
+	if (format < ARRAY_SIZE(bytes_per_pixel))
+		bpp = bytes_per_pixel[format];
+
+	if (bpp <= 0)
+		printk(KERN_ERR "%s incorrect format %d\n", __func__, format);
+	return bpp;
 }
 
-static void rotate_dst_addr_y(struct mdp_blit_req *req, struct mdp_regs *regs)
+static uint32 mdp_conv_matx_rgb2yuv(uint32 input_pixel,
+				    uint16 *matrix_and_bias_vector,
+				    uint32 *clamp_vector,
+				    uint32 *look_up_table)
 {
-	regs->dst0 += (req->dst_rect.h -
-		       min((uint32_t)16, req->dst_rect.h)) *
-		       regs->dst_ystride;
-	regs->dst1 += (req->dst_rect.h -
-		       min((uint32_t)16, req->dst_rect.h)) *
-		       regs->dst_ystride;
-}
+	uint8 input_C2, input_C0, input_C1;
+	uint32 output;
+	int32 comp_C2, comp_C1, comp_C0, temp;
+	int32 temp1, temp2, temp3;
+	int32 matrix[9];
+	int32 bias_vector[3];
+	int32 Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
+	int32 i;
+	uint32 _is_lookup_table_enabled;
 
-static void blit_rotate(struct mdp_blit_req *req,
-			struct mdp_regs *regs)
-{
-	if (req->flags == MDP_ROT_NOP)
-		return;
+	input_C2 = (input_pixel >> 16) & 0xFF;
+	input_C1 = (input_pixel >> 8) & 0xFF;
+	input_C0 = (input_pixel >> 0) & 0xFF;
 
-	regs->op |= PPP_OP_ROT_ON;
-	if ((req->flags & MDP_ROT_90 || req->flags & MDP_FLIP_LR) &&
-	    !(req->flags & MDP_ROT_90 && req->flags & MDP_FLIP_LR))
-		rotate_dst_addr_x(req, regs);
-	if (req->flags & MDP_ROT_90)
-		regs->op |= PPP_OP_ROT_90;
-	if (req->flags & MDP_FLIP_UD) {
-		regs->op |= PPP_OP_FLIP_UD;
-		rotate_dst_addr_y(req, regs);
-	}
-	if (req->flags & MDP_FLIP_LR)
-		regs->op |= PPP_OP_FLIP_LR;
-}
+	comp_C0 = input_C0;
+	comp_C1 = input_C1;
+	comp_C2 = input_C2;
 
-static void blit_convert(struct mdp_blit_req *req, struct mdp_regs *regs)
-{
-	if (req->src.format == req->dst.format)
-		return;
-	if (IS_RGB(req->src.format) && IS_YCRCB(req->dst.format)) {
-		regs->op |= PPP_OP_CONVERT_RGB2YCBCR | PPP_OP_CONVERT_ON;
-	} else if (IS_YCRCB(req->src.format) && IS_RGB(req->dst.format)) {
-		regs->op |= PPP_OP_CONVERT_YCBCR2RGB | PPP_OP_CONVERT_ON;
-		if (req->dst.format == MDP_RGB_565)
-			regs->op |= PPP_OP_CONVERT_MATRIX_SECONDARY;
-	}
-}
+	for (i = 0; i < 9; i++)
+		matrix[i] =
+		    ((int32) (((int32) matrix_and_bias_vector[i]) << 20)) >> 20;
 
-#define GET_BIT_RANGE(value, high, low) \
-	(((1 << (high - low + 1)) - 1) & (value >> low))
-static uint32_t transp_convert(struct mdp_blit_req *req)
-{
-	uint32_t transp = 0;
-	if (req->src.format == MDP_RGB_565) {
-		/* pad each value to 8 bits by copying the high bits into the
-		 * low end, convert RGB to RBG by switching low 2 components */
-		transp |= ((GET_BIT_RANGE(req->transp_mask, 15, 11) << 3) |
-			   (GET_BIT_RANGE(req->transp_mask, 15, 13))) << 16;
+	bias_vector[0] = (int32) (matrix_and_bias_vector[9] & 0xFF);
+	bias_vector[1] = (int32) (matrix_and_bias_vector[10] & 0xFF);
+	bias_vector[2] = (int32) (matrix_and_bias_vector[11] & 0xFF);
 
-		transp |= ((GET_BIT_RANGE(req->transp_mask, 4, 0) << 3) |
-			   (GET_BIT_RANGE(req->transp_mask, 4, 2))) << 8;
+	Y_low_limit = (int32) clamp_vector[0];
+	Y_high_limit = (int32) clamp_vector[1];
+	C_low_limit = (int32) clamp_vector[2];
+	C_high_limit = (int32) clamp_vector[3];
 
-		transp |= (GET_BIT_RANGE(req->transp_mask, 10, 5) << 2) |
-			  (GET_BIT_RANGE(req->transp_mask, 10, 9));
-	} else {
-		/* convert RGB to RBG */
-		transp |= (GET_BIT_RANGE(req->transp_mask, 15, 8)) |
-			  (GET_BIT_RANGE(req->transp_mask, 23, 16) << 16) |
-			  (GET_BIT_RANGE(req->transp_mask, 7, 0) << 8);
-	}
-	return transp;
-}
-#undef GET_BIT_RANGE
-
-static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs)
-{
-	/* TRANSP BLEND */
-	if (req->transp_mask != MDP_TRANSP_NOP) {
-		req->transp_mask = transp_convert(req);
-		if (req->alpha != MDP_ALPHA_NOP) {
-			/* use blended transparancy mode
-			 * pixel = (src == transp) ? dst : blend
-			 * blend is combo of blend_eq_sel and
-			 * blend_alpha_sel */
-			regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
-				PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
-				PPP_OP_BLEND_CONSTANT_ALPHA |
-				PPP_BLEND_ALPHA_TRANSP;
-		} else {
-			/* simple transparancy mode
-			 * pixel = (src == transp) ? dst : src */
-			regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
-				PPP_OP_BLEND_SRCPIXEL_TRANSP;
-		}
-	}
-
-	req->alpha &= 0xff;
-	/* ALPHA BLEND */
-	if (HAS_ALPHA(req->src.format)) {
-		regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
-			PPP_OP_BLEND_SRCPIXEL_ALPHA;
-	} else if (req->alpha < MDP_ALPHA_NOP) {
-		/* just blend by alpha */
-		regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
-			PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
-			PPP_OP_BLEND_CONSTANT_ALPHA;
-	}
-
-	regs->op |= bg_op_chroma[req->dst.format];
-}
-
-#define ONE_HALF	(1LL << 32)
-#define ONE		(1LL << 33)
-#define TWO		(2LL << 33)
-#define THREE		(3LL << 33)
-#define FRAC_MASK (ONE - 1)
-#define INT_MASK (~FRAC_MASK)
-
-static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin,
-			uint32_t *phase_init, uint32_t *phase_step)
-{
-	/* to improve precicsion calculations are done in U31.33 and converted
-	 * to U3.29 at the end */
-	int64_t k1, k2, k3, k4, tmp;
-	uint64_t n, d, os, os_p, od, od_p, oreq;
-	unsigned rpa = 0;
-	int64_t ip64, delta;
-
-	if (dim_out % 3 == 0)
-		rpa = !(dim_in % (dim_out / 3));
-
-	n = ((uint64_t)dim_out) << 34;
-	d = dim_in;
-	if (!d)
-		return -1;
-	do_div(n, d);
-	k3 = (n + 1) >> 1;
-	if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) {
-		DLOG("crap bad scale\n");
-		return -1;
-	}
-	n = ((uint64_t)dim_in) << 34;
-	d = (uint64_t)dim_out;
-	if (!d)
-		return -1;
-	do_div(n, d);
-	k1 = (n + 1) >> 1;
-	k2 = (k1 - ONE) >> 1;
-
-	*phase_init = (int)(k2 >> 4);
-	k4 = (k3 - ONE) >> 1;
-
-	if (rpa) {
-		os = ((uint64_t)origin << 33) - ONE_HALF;
-		tmp = (dim_out * os) + ONE_HALF;
-		if (!dim_in)
-			return -1;
-		do_div(tmp, dim_in);
-		od = tmp - ONE_HALF;
-	} else {
-		os = ((uint64_t)origin << 1) - 1;
-		od = (((k3 * os) >> 1) + k4);
-	}
-
-	od_p = od & INT_MASK;
-	if (od_p != od)
-		od_p += ONE;
-
-	if (rpa) {
-		tmp = (dim_in * od_p) + ONE_HALF;
-		if (!dim_in)
-			return -1;
-		do_div(tmp, dim_in);
-		os_p = tmp - ONE_HALF;
-	} else {
-		os_p = ((k1 * (od_p >> 33)) + k2);
-	}
-
-	oreq = (os_p & INT_MASK) - ONE;
-
-	ip64 = os_p - oreq;
-	delta = ((int64_t)(origin) << 33) - oreq;
-	ip64 -= delta;
-	/* limit to valid range before the left shift */
-	delta = (ip64 & (1LL << 63)) ? 4 : -4;
-	delta <<= 33;
-	while (abs((int)(ip64 >> 33)) > 4)
-		ip64 += delta;
-	*phase_init = (int)(ip64 >> 4);
-	*phase_step = (uint32_t)(k1 >> 4);
-	return 0;
-}
-
-static void load_scale_table(const struct mdp_info *mdp,
-			     struct mdp_table_entry *table, int len)
-{
-	int i;
-	for (i = 0; i < len; i++)
-		mdp_writel(mdp, table[i].val, table[i].reg);
-}
-
-enum {
-IMG_LEFT,
-IMG_RIGHT,
-IMG_TOP,
-IMG_BOTTOM,
-};
-
-static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst,
-			  uint32_t *interp1, uint32_t *interp2,
-			  uint32_t *repeat1, uint32_t *repeat2) {
-	if (src > 3 * dst) {
-		*interp1 = 0;
-		*interp2 = src - 1;
-		*repeat1 = 0;
-		*repeat2 = 0;
-	} else if (src == 3 * dst) {
-		*interp1 = 0;
-		*interp2 = src;
-		*repeat1 = 0;
-		*repeat2 = 1;
-	} else if (src > dst && src < 3 * dst) {
-		*interp1 = -1;
-		*interp2 = src;
-		*repeat1 = 1;
-		*repeat2 = 1;
-	} else if (src == dst) {
-		*interp1 = -1;
-		*interp2 = src + 1;
-		*repeat1 = 1;
-		*repeat2 = 2;
-	} else {
-		*interp1 = -2;
-		*interp2 = src + 1;
-		*repeat1 = 2;
-		*repeat2 = 2;
-	}
-	*interp1 += src_coord;
-	*interp2 += src_coord;
-}
-
-static int get_edge_cond(struct mdp_blit_req *req, struct mdp_regs *regs)
-{
-	int32_t luma_interp[4];
-	int32_t luma_repeat[4];
-	int32_t chroma_interp[4];
-	int32_t chroma_bound[4];
-	int32_t chroma_repeat[4];
-	uint32_t dst_w, dst_h;
-
-	memset(&luma_interp, 0, sizeof(int32_t) * 4);
-	memset(&luma_repeat, 0, sizeof(int32_t) * 4);
-	memset(&chroma_interp, 0, sizeof(int32_t) * 4);
-	memset(&chroma_bound, 0, sizeof(int32_t) * 4);
-	memset(&chroma_repeat, 0, sizeof(int32_t) * 4);
-	regs->edge = 0;
-
-	if (req->flags & MDP_ROT_90) {
-		dst_w = req->dst_rect.h;
-		dst_h = req->dst_rect.w;
-	} else {
-		dst_w = req->dst_rect.w;
-		dst_h = req->dst_rect.h;
-	}
-
-	if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) {
-		get_edge_info(req->src_rect.h, req->src_rect.y, dst_h,
-			      &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM],
-			      &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]);
-		get_edge_info(req->src_rect.w, req->src_rect.x, dst_w,
-			      &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT],
-			      &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]);
-	} else {
-		luma_interp[IMG_LEFT] = req->src_rect.x;
-		luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
-		luma_interp[IMG_TOP] = req->src_rect.y;
-		luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
-		luma_repeat[IMG_LEFT] = 0;
-		luma_repeat[IMG_TOP] = 0;
-		luma_repeat[IMG_RIGHT] = 0;
-		luma_repeat[IMG_BOTTOM] = 0;
-	}
-
-	chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT];
-	chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT];
-	chroma_interp[IMG_TOP] = luma_interp[IMG_TOP];
-	chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM];
-
-	chroma_bound[IMG_LEFT] = req->src_rect.x;
-	chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
-	chroma_bound[IMG_TOP] = req->src_rect.y;
-	chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
-
-	if (IS_YCRCB(req->src.format)) {
-		chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1;
-		chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1;
-
-		chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1;
-		chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1;
-	}
-
-	if (req->src.format == MDP_Y_CBCR_H2V2 ||
-	    req->src.format == MDP_Y_CRCB_H2V2) {
-		chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1;
-		chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1)
-					    >> 1;
-		chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1;
-		chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1;
-	}
-
-	chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] -
-				  chroma_interp[IMG_LEFT];
-	chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] -
-				  chroma_bound[IMG_RIGHT];
-	chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] -
-				  chroma_interp[IMG_TOP];
-	chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] -
-				  chroma_bound[IMG_BOTTOM];
-
-	if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 ||
-	    chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 ||
-	    chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 ||
-	    chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 ||
-	    luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 ||
-	    luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 ||
-	    luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 ||
-	    luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3)
-		return -1;
-
-	regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA;
-	regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA;
-	regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA;
-	regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA;
-	regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA;
-	regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA;
-	regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA;
-	regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA;
-	return 0;
-}
-
-static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req,
-		      struct mdp_regs *regs)
-{
-	uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
-	uint32_t scale_factor_x, scale_factor_y;
-	uint32_t downscale;
-	uint32_t dst_w, dst_h;
-
-	if (req->flags & MDP_ROT_90) {
-		dst_w = req->dst_rect.h;
-		dst_h = req->dst_rect.w;
-	} else {
-		dst_w = req->dst_rect.w;
-		dst_h = req->dst_rect.h;
-	}
-	if ((req->src_rect.w == dst_w)  && (req->src_rect.h == dst_h) &&
-	    !(req->flags & MDP_BLUR)) {
-		regs->phasex_init = 0;
-		regs->phasey_init = 0;
-		regs->phasex_step = 0;
-		regs->phasey_step = 0;
-		return 0;
-	}
-
-	if (scale_params(req->src_rect.w, dst_w, 1, &phase_init_x,
-			 &phase_step_x) ||
-	    scale_params(req->src_rect.h, dst_h, 1, &phase_init_y,
-			 &phase_step_y))
-		return -1;
-
-	scale_factor_x = (dst_w * 10) / req->src_rect.w;
-	scale_factor_y = (dst_h * 10) / req->src_rect.h;
-
-	if (scale_factor_x > 8)
-		downscale = MDP_DOWNSCALE_PT8TO1;
-	else if (scale_factor_x > 6)
-		downscale = MDP_DOWNSCALE_PT6TOPT8;
-	else if (scale_factor_x > 4)
-		downscale = MDP_DOWNSCALE_PT4TOPT6;
+	if (look_up_table == 0)	/* check for NULL point */
+		_is_lookup_table_enabled = 0;
 	else
-		downscale = MDP_DOWNSCALE_PT2TOPT4;
-	if (downscale != downscale_x_table) {
-		load_scale_table(mdp, mdp_downscale_x_table[downscale], 64);
-		downscale_x_table = downscale;
+		_is_lookup_table_enabled = 1;
+
+	if (_is_lookup_table_enabled == 1) {
+		comp_C2 = (look_up_table[comp_C2] >> 16) & 0xFF;
+		comp_C1 = (look_up_table[comp_C1] >> 8) & 0xFF;
+		comp_C0 = (look_up_table[comp_C0] >> 0) & 0xFF;
 	}
+	/*
+	 * Color Conversion
+	 * reorder input colors
+	 */
+	temp = comp_C2;
+	comp_C2 = comp_C1;
+	comp_C1 = comp_C0;
+	comp_C0 = temp;
 
-	if (scale_factor_y > 8)
-		downscale = MDP_DOWNSCALE_PT8TO1;
-	else if (scale_factor_y > 6)
-		downscale = MDP_DOWNSCALE_PT6TOPT8;
-	else if (scale_factor_y > 4)
-		downscale = MDP_DOWNSCALE_PT4TOPT6;
-	else
-		downscale = MDP_DOWNSCALE_PT2TOPT4;
-	if (downscale != downscale_y_table) {
-		load_scale_table(mdp, mdp_downscale_y_table[downscale], 64);
-		downscale_y_table = downscale;
-	}
+	/* matrix multiplication */
+	temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
+	temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
+	temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
 
-	regs->phasex_init = phase_init_x;
-	regs->phasey_init = phase_init_y;
-	regs->phasex_step = phase_step_x;
-	regs->phasey_step = phase_step_y;
-	regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
-	return 0;
+	comp_C0 = temp1 + 0x100;
+	comp_C1 = temp2 + 0x100;
+	comp_C2 = temp3 + 0x100;
 
+	/* take interger part */
+	comp_C0 >>= 9;
+	comp_C1 >>= 9;
+	comp_C2 >>= 9;
+
+	/* post bias (+) */
+	comp_C0 += bias_vector[0];
+	comp_C1 += bias_vector[1];
+	comp_C2 += bias_vector[2];
+
+	/* limit pixel to 8-bit */
+	if (comp_C0 < 0)
+		comp_C0 = 0;
+
+	if (comp_C0 > 255)
+		comp_C0 = 255;
+
+	if (comp_C1 < 0)
+		comp_C1 = 0;
+
+	if (comp_C1 > 255)
+		comp_C1 = 255;
+
+	if (comp_C2 < 0)
+		comp_C2 = 0;
+
+	if (comp_C2 > 255)
+		comp_C2 = 255;
+
+	/* clamp */
+	if (comp_C0 < Y_low_limit)
+		comp_C0 = Y_low_limit;
+
+	if (comp_C0 > Y_high_limit)
+		comp_C0 = Y_high_limit;
+
+	if (comp_C1 < C_low_limit)
+		comp_C1 = C_low_limit;
+
+	if (comp_C1 > C_high_limit)
+		comp_C1 = C_high_limit;
+
+	if (comp_C2 < C_low_limit)
+		comp_C2 = C_low_limit;
+
+	if (comp_C2 > C_high_limit)
+		comp_C2 = C_high_limit;
+
+	output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
+	return output;
 }
 
-static void blit_blur(const struct mdp_info *mdp, struct mdp_blit_req *req,
-		      struct mdp_regs *regs)
+uint32 mdp_conv_matx_yuv2rgb(uint32 input_pixel,
+			     uint16 *matrix_and_bias_vector,
+			     uint32 *clamp_vector, uint32 *look_up_table)
 {
-	if (!(req->flags & MDP_BLUR))
-		return;
+	uint8 input_C2, input_C0, input_C1;
+	uint32 output;
+	int32 comp_C2, comp_C1, comp_C0, temp;
+	int32 temp1, temp2, temp3;
+	int32 matrix[9];
+	int32 bias_vector[3];
+	int32 Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
+	int32 i;
+	uint32 _is_lookup_table_enabled;
 
-	if (!(downscale_x_table == MDP_DOWNSCALE_BLUR &&
-	      downscale_y_table == MDP_DOWNSCALE_BLUR)) {
-		load_scale_table(mdp, mdp_gaussian_blur_table, 128);
-		downscale_x_table = MDP_DOWNSCALE_BLUR;
-		downscale_y_table = MDP_DOWNSCALE_BLUR;
+	input_C2 = (input_pixel >> 16) & 0xFF;
+	input_C1 = (input_pixel >> 8) & 0xFF;
+	input_C0 = (input_pixel >> 0) & 0xFF;
+
+	comp_C0 = input_C0;
+	comp_C1 = input_C1;
+	comp_C2 = input_C2;
+
+	for (i = 0; i < 9; i++)
+		matrix[i] =
+		    ((int32) (((int32) matrix_and_bias_vector[i]) << 20)) >> 20;
+
+	bias_vector[0] = (int32) (matrix_and_bias_vector[9] & 0xFF);
+	bias_vector[1] = (int32) (matrix_and_bias_vector[10] & 0xFF);
+	bias_vector[2] = (int32) (matrix_and_bias_vector[11] & 0xFF);
+
+	Y_low_limit = (int32) clamp_vector[0];
+	Y_high_limit = (int32) clamp_vector[1];
+	C_low_limit = (int32) clamp_vector[2];
+	C_high_limit = (int32) clamp_vector[3];
+
+	if (look_up_table == 0)	/* check for NULL point */
+		_is_lookup_table_enabled = 0;
+	else
+		_is_lookup_table_enabled = 1;
+
+	/* clamp */
+	if (comp_C0 < Y_low_limit)
+		comp_C0 = Y_low_limit;
+
+	if (comp_C0 > Y_high_limit)
+		comp_C0 = Y_high_limit;
+
+	if (comp_C1 < C_low_limit)
+		comp_C1 = C_low_limit;
+
+	if (comp_C1 > C_high_limit)
+		comp_C1 = C_high_limit;
+
+	if (comp_C2 < C_low_limit)
+		comp_C2 = C_low_limit;
+
+	if (comp_C2 > C_high_limit)
+		comp_C2 = C_high_limit;
+
+	/*
+	 * Color Conversion
+	 * pre bias (-)
+	 */
+	comp_C0 -= bias_vector[0];
+	comp_C1 -= bias_vector[1];
+	comp_C2 -= bias_vector[2];
+
+	/* matrix multiplication */
+	temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
+	temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
+	temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
+
+	comp_C0 = temp1 + 0x100;
+	comp_C1 = temp2 + 0x100;
+	comp_C2 = temp3 + 0x100;
+
+	/* take interger part */
+	comp_C0 >>= 9;
+	comp_C1 >>= 9;
+	comp_C2 >>= 9;
+
+	/* reorder output colors */
+	temp = comp_C0;
+	comp_C0 = comp_C1;
+	comp_C1 = comp_C2;
+	comp_C2 = temp;
+
+	/* limit pixel to 8-bit */
+	if (comp_C0 < 0)
+		comp_C0 = 0;
+
+	if (comp_C0 > 255)
+		comp_C0 = 255;
+
+	if (comp_C1 < 0)
+		comp_C1 = 0;
+
+	if (comp_C1 > 255)
+		comp_C1 = 255;
+
+	if (comp_C2 < 0)
+		comp_C2 = 0;
+
+	if (comp_C2 > 255)
+		comp_C2 = 255;
+
+	/* Look-up table */
+	if (_is_lookup_table_enabled == 1) {
+		comp_C2 = (look_up_table[comp_C2] >> 16) & 0xFF;
+		comp_C1 = (look_up_table[comp_C1] >> 8) & 0xFF;
+		comp_C0 = (look_up_table[comp_C0] >> 0) & 0xFF;
 	}
 
-	regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+	output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
+	return output;
 }
 
+static uint32 mdp_calc_tpval(MDPIMG *mdpImg)
+{
+	uint32 tpVal;
+	uint8 plane_tp;
+
+	tpVal = 0;
+	if ((mdpImg->imgType == MDP_RGB_565)
+	    || (mdpImg->imgType == MDP_BGR_565)) {
+		/*
+		 * transparent color conversion into 24 bpp
+		 *
+		 * C2R_8BIT
+		 * left shift the entire bit and or it with the upper most bits
+		 */
+		plane_tp = (uint8) ((mdpImg->tpVal & 0xF800) >> 11);
+		tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
+
+		/* C1B_8BIT */
+		plane_tp = (uint8) (mdpImg->tpVal & 0x1F);
+		tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
+
+		/* C0G_8BIT */
+		plane_tp = (uint8) ((mdpImg->tpVal & 0x7E0) >> 5);
+		tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
+	} else {
+		/* 24bit RGB to RBG conversion */
+
+		tpVal = (mdpImg->tpVal & 0xFF00) >> 8;
+		tpVal |= (mdpImg->tpVal & 0xFF) << 8;
+		tpVal |= (mdpImg->tpVal & 0xFF0000);
+	}
+
+	return tpVal;
+}
+
+static uint8 *mdp_get_chroma_addr(MDPIBUF *iBuf)
+{
+	uint8 *dest1;
+
+	dest1 = NULL;
+	switch (iBuf->ibuf_type) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		dest1 = (uint8 *) iBuf->buf;
+		dest1 += iBuf->ibuf_width * iBuf->ibuf_height * iBuf->bpp;
+		break;
+
+	default:
+		break;
+	}
+
+	return dest1;
+}
+
+static void mdp_ppp_setbg(MDPIBUF *iBuf)
+{
+	uint8 *bg0_addr;
+	uint8 *bg1_addr;
+	uint32 bg0_ystride, bg1_ystride;
+	uint32 ppp_src_cfg_reg, unpack_pattern;
+	int v_slice, h_slice;
+
+	v_slice = h_slice = 1;
+	bg0_addr = (uint8 *) iBuf->buf;
+	bg1_addr = mdp_get_chroma_addr(iBuf);
+
+	bg0_ystride = iBuf->ibuf_width * iBuf->bpp;
+	bg1_ystride = iBuf->ibuf_width * iBuf->bpp;
+
+	switch (iBuf->ibuf_type) {
+	case MDP_BGR_565:
+	case MDP_RGB_565:
+		/* 888 = 3bytes
+		 * RGB = 3Components
+		 * RGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_5BITS | PPP_SRC_C0G_6BITS |
+			PPP_SRC_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
+			PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+			PPP_SRC_UNPACK_ALIGN_LSB |
+			PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		if (iBuf->ibuf_type == MDP_RGB_565)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+		else
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
+		break;
+
+	case MDP_RGB_888:
+		/*
+		 * 888 = 3bytes
+		 * RGB = 3Components
+		 * RGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
+		PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES |
+		PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+		PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		unpack_pattern =
+		    MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+		break;
+
+	case MDP_BGRA_8888:
+	case MDP_RGBA_8888:
+	case MDP_ARGB_8888:
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+		/*
+		 * 8888 = 4bytes
+		 * ARGB = 4Components
+		 * ARGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
+		PPP_SRC_C1B_8BITS | PPP_SRC_C3A_8BITS | PPP_SRC_C3_ALPHA_EN |
+		PPP_SRC_BPP_INTERLVD_4BYTES | PPP_SRC_INTERLVD_4COMPONENTS |
+		PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB |
+		PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		if (iBuf->ibuf_type == MDP_BGRA_8888)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else if (iBuf->ibuf_type == MDP_RGBA_8888 ||
+				 iBuf->ibuf_type == MDP_RGBX_8888)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
+						 8);
+		else if (iBuf->ibuf_type == MDP_XRGB_8888)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		break;
+
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
+		    PPP_SRC_C0G_8BITS |
+		    PPP_SRC_C1B_8BITS |
+		    PPP_SRC_C3A_8BITS |
+		    PPP_SRC_BPP_INTERLVD_2BYTES |
+		    PPP_SRC_INTERLVD_2COMPONENTS |
+		    PPP_SRC_UNPACK_TIGHT |
+		    PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
+
+		if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+		else
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+		v_slice = h_slice = 2;
+		break;
+
+	case MDP_YCRYCB_H2V1:
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
+		    PPP_SRC_C0G_8BITS |
+		    PPP_SRC_C1B_8BITS |
+		    PPP_SRC_C3A_8BITS |
+		    PPP_SRC_BPP_INTERLVD_2BYTES |
+		    PPP_SRC_INTERLVD_4COMPONENTS |
+		    PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB;
+
+		unpack_pattern =
+		    MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
+		h_slice = 2;
+		break;
+
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
+		    PPP_SRC_C0G_8BITS |
+		    PPP_SRC_C1B_8BITS |
+		    PPP_SRC_C3A_8BITS |
+		    PPP_SRC_BPP_INTERLVD_2BYTES |
+		    PPP_SRC_INTERLVD_2COMPONENTS |
+		    PPP_SRC_UNPACK_TIGHT |
+		    PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
+
+		if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+		else
+			unpack_pattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+		h_slice = 2;
+		break;
+
+	default:
+		return;
+	}
+
+	/* starting input address adjustment */
+	mdp_adjust_start_addr(&bg0_addr, &bg1_addr, v_slice, h_slice,
+			      iBuf->roi.lcd_x, iBuf->roi.lcd_y,
+			      iBuf->ibuf_width, iBuf->ibuf_height, iBuf->bpp,
+			      iBuf, 1);
+
+	/*
+	 * 0x01c0: background plane 0 addr
+	 * 0x01c4: background plane 1 addr
+	 * 0x01c8: background plane 2 addr
+	 * 0x01cc: bg y stride for plane 0 and 1
+	 * 0x01d0: bg y stride for plane 2
+	 * 0x01d4: bg src PPP config
+	 * 0x01d8: unpack pattern
+	 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c0, bg0_addr);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c4, bg1_addr);
+
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01cc,
+		 (bg1_ystride << 16) | bg0_ystride);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d4, ppp_src_cfg_reg);
+
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d8, unpack_pattern);
+}
+
+#define IS_PSEUDOPLNR(img) ((img == MDP_Y_CRCB_H2V2) | \
+				(img == MDP_Y_CBCR_H2V2) | \
+				(img == MDP_Y_CRCB_H2V1) | \
+				(img == MDP_Y_CBCR_H2V1))
 
 #define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp)
 
 #define Y_TO_CRCB_RATIO(format) \
 	((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ?  2 :\
-	 (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ?  1 : 1)
+	(format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ?  1 : 1)
 
+#ifdef CONFIG_ANDROID_PMEM
 static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
-		    uint32_t *len0, uint32_t *len1)
+			uint32_t *len0, uint32_t *len1)
 {
 	*len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
 	if (IS_PSEUDOPLNR(img->format))
@@ -533,199 +555,1016 @@
 		*len1 = 0;
 }
 
-static int valid_src_dst(unsigned long src_start, unsigned long src_len,
-			 unsigned long dst_start, unsigned long dst_len,
-			 struct mdp_blit_req *req, struct mdp_regs *regs)
+static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
+			struct file *p_src_file, struct file *p_dst_file)
 {
-	unsigned long src_min_ok = src_start;
-	unsigned long src_max_ok = src_start + src_len;
-	unsigned long dst_min_ok = dst_start;
-	unsigned long dst_max_ok = dst_start + dst_len;
-	uint32_t src0_len, src1_len, dst0_len, dst1_len;
-	get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len,
-		 &src1_len);
-	get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len,
-		 &dst1_len);
+	uint32_t src0_len, src1_len;
 
-	if (regs->src0 < src_min_ok || regs->src0 > src_max_ok ||
-	    regs->src0 + src0_len > src_max_ok) {
-		DLOG("invalid_src %x %x %lx %lx\n", regs->src0,
-		      src0_len, src_min_ok, src_max_ok);
-		return 0;
+	if (!(req->flags & MDP_BLIT_NON_CACHED)) {
+		/* flush src images to memory before dma to mdp */
+		get_len(&req->src, &req->src_rect, src_bpp,
+		&src0_len, &src1_len);
+
+		flush_pmem_file(p_src_file,
+		req->src.offset, src0_len);
+
+		if (IS_PSEUDOPLNR(req->src.format))
+			flush_pmem_file(p_src_file,
+				req->src.offset + src0_len, src1_len);
 	}
-	if (regs->src_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
-		if (regs->src1 < src_min_ok || regs->src1 > src_max_ok ||
-		    regs->src1 + src1_len > src_max_ok) {
-			DLOG("invalid_src1");
-			return 0;
+
+}
+#else
+static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
+			struct file *p_src_file, struct file *p_dst_file) { }
+#endif
+
+static void mdp_start_ppp(struct msm_fb_data_type *mfd, MDPIBUF *iBuf,
+struct mdp_blit_req *req, struct file *p_src_file, struct file *p_dst_file)
+{
+	uint8 *src0, *src1;
+	uint8 *dest0, *dest1;
+	uint16 inpBpp;
+	uint32 dest0_ystride;
+	uint32 src_width;
+	uint32 src_height;
+	uint32 src0_ystride;
+	uint32 dst_roi_width;
+	uint32 dst_roi_height;
+	uint32 ppp_src_cfg_reg, ppp_operation_reg, ppp_dst_cfg_reg;
+	uint32 alpha, tpVal;
+	uint32 packPattern;
+	uint32 dst_packPattern;
+	boolean inputRGB, outputRGB, pseudoplanr_output;
+	int sv_slice, sh_slice;
+	int dv_slice, dh_slice;
+	boolean perPixelAlpha = FALSE;
+	boolean ppp_lookUp_enable = FALSE;
+
+	sv_slice = sh_slice = dv_slice = dh_slice = 1;
+	alpha = tpVal = 0;
+	src_width = iBuf->mdpImg.width;
+	src_height = iBuf->roi.y + iBuf->roi.height;
+	src1 = NULL;
+	dest1 = NULL;
+
+	inputRGB = outputRGB = TRUE;
+	pseudoplanr_output = FALSE;
+	ppp_operation_reg = 0;
+	ppp_dst_cfg_reg = 0;
+	ppp_src_cfg_reg = 0;
+
+	/* Wait for the pipe to clear */
+	do { } while (mdp_ppp_pipe_wait() <= 0);
+
+	/*
+	 * destination config
+	 */
+	switch (iBuf->ibuf_type) {
+	case MDP_RGB_888:
+		dst_packPattern =
+		    MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+		ppp_dst_cfg_reg =
+		    PPP_DST_C0G_8BIT | PPP_DST_C1B_8BIT | PPP_DST_C2R_8BIT |
+		    PPP_DST_PACKET_CNT_INTERLVD_3ELEM | PPP_DST_PACK_TIGHT |
+		    PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI |
+		    PPP_DST_BPP_3BYTES | PPP_DST_PLANE_INTERLVD;
+		break;
+
+	case MDP_BGRA_8888:
+	case MDP_XRGB_8888:
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+	case MDP_RGBX_8888:
+		if (iBuf->ibuf_type == MDP_BGRA_8888)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else if (iBuf->ibuf_type == MDP_RGBA_8888 ||
+				 iBuf->ibuf_type == MDP_RGBX_8888)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
+						 8);
+		else if (iBuf->ibuf_type == MDP_XRGB_8888)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+
+		ppp_dst_cfg_reg = PPP_DST_C0G_8BIT |
+		    PPP_DST_C1B_8BIT |
+		    PPP_DST_C2R_8BIT |
+		    PPP_DST_C3A_8BIT |
+		    PPP_DST_C3ALPHA_EN |
+		    PPP_DST_PACKET_CNT_INTERLVD_4ELEM |
+		    PPP_DST_PACK_TIGHT |
+		    PPP_DST_PACK_ALIGN_LSB |
+		    PPP_DST_OUT_SEL_AXI |
+		    PPP_DST_BPP_4BYTES | PPP_DST_PLANE_INTERLVD;
+		break;
+
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		if (iBuf->ibuf_type == MDP_Y_CBCR_H2V2)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+		else
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+
+		ppp_dst_cfg_reg = PPP_DST_C2R_8BIT |
+		    PPP_DST_C0G_8BIT |
+		    PPP_DST_C1B_8BIT |
+		    PPP_DST_C3A_8BIT |
+		    PPP_DST_PACKET_CNT_INTERLVD_2ELEM |
+		    PPP_DST_PACK_TIGHT |
+		    PPP_DST_PACK_ALIGN_LSB |
+		    PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES;
+
+		ppp_operation_reg |= PPP_OP_DST_CHROMA_420;
+		outputRGB = FALSE;
+		pseudoplanr_output = TRUE;
+		/*
+		 * vertically (y direction) and horizontally (x direction)
+		 * sample reduction by 2
+		 */
+
+		/*
+		 * H2V2(YUV420) Cosite
+		 *
+		 * Y    Y    Y    Y
+		 * CbCr      CbCr
+		 * Y    Y    Y    Y
+		 * Y    Y    Y    Y
+		 * CbCr      CbCr
+		 * Y    Y    Y    Y
+		 */
+		dv_slice = dh_slice = 2;
+
+		/* (x,y) and (width,height) must be even numbern */
+		iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
+		iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
+		iBuf->roi.x = (iBuf->roi.x / 2) * 2;
+		iBuf->roi.width = (iBuf->roi.width / 2) * 2;
+
+		iBuf->roi.lcd_y = (iBuf->roi.lcd_y / 2) * 2;
+		iBuf->roi.dst_height = (iBuf->roi.dst_height / 2) * 2;
+		iBuf->roi.y = (iBuf->roi.y / 2) * 2;
+		iBuf->roi.height = (iBuf->roi.height / 2) * 2;
+		break;
+
+	case MDP_YCRYCB_H2V1:
+		dst_packPattern =
+		    MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
+		ppp_dst_cfg_reg =
+		    PPP_DST_C2R_8BIT | PPP_DST_C0G_8BIT | PPP_DST_C1B_8BIT |
+		    PPP_DST_C3A_8BIT | PPP_DST_PACKET_CNT_INTERLVD_4ELEM |
+		    PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB |
+		    PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES |
+		    PPP_DST_PLANE_INTERLVD;
+
+		ppp_operation_reg |= PPP_OP_DST_CHROMA_H2V1;
+		outputRGB = FALSE;
+		/*
+		 * horizontally (x direction) sample reduction by 2
+		 *
+		 * H2V1(YUV422) Cosite
+		 *
+		 * YCbCr    Y    YCbCr    Y
+		 * YCbCr    Y    YCbCr    Y
+		 * YCbCr    Y    YCbCr    Y
+		 * YCbCr    Y    YCbCr    Y
+		 */
+		dh_slice = 2;
+
+		/*
+		 * if it's TV-Out/MDP_YCRYCB_H2V1, let's go through the
+		 * preloaded gamma setting of 2.2 when the content is
+		 * non-linear ppp_lookUp_enable = TRUE;
+		 */
+
+		/* x and width must be even number */
+		iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
+		iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
+		iBuf->roi.x = (iBuf->roi.x / 2) * 2;
+		iBuf->roi.width = (iBuf->roi.width / 2) * 2;
+		break;
+
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+		else
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+
+		ppp_dst_cfg_reg = PPP_DST_C2R_8BIT |
+		    PPP_DST_C0G_8BIT |
+		    PPP_DST_C1B_8BIT |
+		    PPP_DST_C3A_8BIT |
+		    PPP_DST_PACKET_CNT_INTERLVD_2ELEM |
+		    PPP_DST_PACK_TIGHT |
+		    PPP_DST_PACK_ALIGN_LSB |
+		    PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES;
+
+		ppp_operation_reg |= PPP_OP_DST_CHROMA_H2V1;
+		outputRGB = FALSE;
+		pseudoplanr_output = TRUE;
+		/* horizontally (x direction) sample reduction by 2 */
+		dh_slice = 2;
+
+		/* x and width must be even number */
+		iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
+		iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
+		iBuf->roi.x = (iBuf->roi.x / 2) * 2;
+		iBuf->roi.width = (iBuf->roi.width / 2) * 2;
+		break;
+
+	case MDP_BGR_565:
+	case MDP_RGB_565:
+	default:
+		if (iBuf->ibuf_type == MDP_RGB_565)
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+		else
+			dst_packPattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
+
+		ppp_dst_cfg_reg = PPP_DST_C0G_6BIT |
+		    PPP_DST_C1B_5BIT |
+		    PPP_DST_C2R_5BIT |
+		    PPP_DST_PACKET_CNT_INTERLVD_3ELEM |
+		    PPP_DST_PACK_TIGHT |
+		    PPP_DST_PACK_ALIGN_LSB |
+		    PPP_DST_OUT_SEL_AXI |
+		    PPP_DST_BPP_2BYTES | PPP_DST_PLANE_INTERLVD;
+		break;
+	}
+
+	/* source config */
+	switch (iBuf->mdpImg.imgType) {
+	case MDP_RGB_888:
+		inpBpp = 3;
+		/*
+		 * 565 = 2bytes
+		 * RGB = 3Components
+		 * RGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
+			PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES |
+			PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+			PPP_SRC_UNPACK_ALIGN_LSB |
+			PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		packPattern = MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+
+		ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
+		    PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
+		break;
+
+	case MDP_BGRA_8888:
+	case MDP_RGBA_8888:
+	case MDP_ARGB_8888:
+		perPixelAlpha = TRUE;
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+		inpBpp = 4;
+		/*
+		 * 8888 = 4bytes
+		 * ARGB = 4Components
+		 * ARGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
+			PPP_SRC_C1B_8BITS | PPP_SRC_C3A_8BITS |
+			PPP_SRC_C3_ALPHA_EN | PPP_SRC_BPP_INTERLVD_4BYTES |
+			PPP_SRC_INTERLVD_4COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+			PPP_SRC_UNPACK_ALIGN_LSB |
+			PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		if (iBuf->mdpImg.imgType == MDP_BGRA_8888)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else if (iBuf->mdpImg.imgType == MDP_RGBA_8888 ||
+				 iBuf->mdpImg.imgType == MDP_RGBX_8888)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
+						 8);
+		else if (iBuf->ibuf_type == MDP_XRGB_8888)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+		else
+			packPattern =
+			    MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
+						 8);
+
+		ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
+		    PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
+		break;
+
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		inpBpp = 1;
+		src1 = (uint8 *) iBuf->mdpImg.cbcr_addr;
+
+		/*
+		 * CbCr = 2bytes
+		 * CbCr = 2Components
+		 * Y+CbCr
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
+			PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
+			PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+			PPP_SRC_UNPACK_ALIGN_LSB |
+			PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
+
+		if (iBuf->mdpImg.imgType == MDP_Y_CRCB_H2V2)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+		else
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+
+		ppp_operation_reg |= PPP_OP_COLOR_SPACE_YCBCR |
+		    PPP_OP_SRC_CHROMA_420 |
+		    PPP_OP_SRC_CHROMA_COSITE |
+		    PPP_OP_DST_CHROMA_RGB | PPP_OP_DST_CHROMA_COSITE;
+
+		inputRGB = FALSE;
+		sh_slice = sv_slice = 2;
+		break;
+
+	case MDP_YCRYCB_H2V1:
+		inpBpp = 2;
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
+		    PPP_SRC_C0G_8BITS |
+		    PPP_SRC_C1B_8BITS |
+		    PPP_SRC_C3A_8BITS |
+		    PPP_SRC_BPP_INTERLVD_2BYTES |
+		    PPP_SRC_INTERLVD_4COMPONENTS |
+		    PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB;
+
+		packPattern =
+		    MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
+
+		ppp_operation_reg |= PPP_OP_SRC_CHROMA_H2V1 |
+		    PPP_OP_SRC_CHROMA_COSITE | PPP_OP_DST_CHROMA_COSITE;
+
+		/*
+		 * if it's TV-Out/MDP_YCRYCB_H2V1, let's go through the
+		 * preloaded inverse gamma setting of 2.2 since they're
+		 * symetric when the content is non-linear
+		 * ppp_lookUp_enable = TRUE;
+		 */
+
+		/* x and width must be even number */
+		iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
+		iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
+		iBuf->roi.x = (iBuf->roi.x / 2) * 2;
+		iBuf->roi.width = (iBuf->roi.width / 2) * 2;
+
+		inputRGB = FALSE;
+		sh_slice = 2;
+		break;
+
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		inpBpp = 1;
+		src1 = (uint8 *) iBuf->mdpImg.cbcr_addr;
+
+		ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
+		    PPP_SRC_C0G_8BITS |
+		    PPP_SRC_C1B_8BITS |
+		    PPP_SRC_C3A_8BITS |
+		    PPP_SRC_BPP_INTERLVD_2BYTES |
+		    PPP_SRC_INTERLVD_2COMPONENTS |
+		    PPP_SRC_UNPACK_TIGHT |
+		    PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
+
+		if (iBuf->mdpImg.imgType == MDP_Y_CBCR_H2V1)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
+		else
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
+
+		ppp_operation_reg |= PPP_OP_SRC_CHROMA_H2V1 |
+		    PPP_OP_SRC_CHROMA_COSITE | PPP_OP_DST_CHROMA_COSITE;
+		inputRGB = FALSE;
+		sh_slice = 2;
+		break;
+
+	case MDP_BGR_565:
+	case MDP_RGB_565:
+	default:
+		inpBpp = 2;
+		/*
+		 * 565 = 2bytes
+		 * RGB = 3Components
+		 * RGB interleaved
+		 */
+		ppp_src_cfg_reg = PPP_SRC_C2R_5BITS | PPP_SRC_C0G_6BITS |
+			PPP_SRC_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
+			PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
+			PPP_SRC_UNPACK_ALIGN_LSB |
+			PPP_SRC_FETCH_PLANES_INTERLVD;
+
+		if (iBuf->mdpImg.imgType == MDP_RGB_565)
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
+		else
+			packPattern =
+			    MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
+
+		ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
+		    PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
+		break;
+
+	}
+
+	if (pseudoplanr_output)
+		ppp_dst_cfg_reg |= PPP_DST_PLANE_PSEUDOPLN;
+
+	/* YCbCr to RGB color conversion flag */
+	if ((!inputRGB) && (outputRGB)) {
+		ppp_operation_reg |= PPP_OP_CONVERT_YCBCR2RGB |
+		    PPP_OP_CONVERT_ON;
+
+		/*
+		 * primary/secondary is sort of misleading term...but
+		 * in mdp2.2/3.0 we only use primary matrix (forward/rev)
+		 * in mdp3.1 we use set1(prim) and set2(secd)
+		 */
+#ifdef CONFIG_FB_MSM_MDP31
+		ppp_operation_reg |= PPP_OP_CONVERT_MATRIX_SECONDARY |
+					PPP_OP_DST_RGB;
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0240, 0);
+#endif
+
+		if (ppp_lookUp_enable) {
+			ppp_operation_reg |= PPP_OP_LUT_C0_ON |
+			    PPP_OP_LUT_C1_ON | PPP_OP_LUT_C2_ON;
 		}
 	}
-	if (regs->dst0 < dst_min_ok || regs->dst0 > dst_max_ok ||
-	    regs->dst0 + dst0_len > dst_max_ok) {
-		DLOG("invalid_dst");
-		return 0;
-	}
-	if (regs->dst_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
-		if (regs->dst1 < dst_min_ok || regs->dst1 > dst_max_ok ||
-		    regs->dst1 + dst1_len > dst_max_ok) {
-			DLOG("invalid_dst1");
-			return 0;
+	/* RGB to YCbCr color conversion flag */
+	if ((inputRGB) && (!outputRGB)) {
+		ppp_operation_reg |= PPP_OP_CONVERT_RGB2YCBCR |
+		    PPP_OP_CONVERT_ON;
+
+#ifdef CONFIG_FB_MSM_MDP31
+		ppp_operation_reg |= PPP_OP_CONVERT_MATRIX_PRIMARY |
+					PPP_OP_DST_YCBCR;
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0240, 0x1e);
+#endif
+
+		if (ppp_lookUp_enable) {
+			ppp_operation_reg |= PPP_OP_LUT_C0_ON |
+			    PPP_OP_LUT_C1_ON | PPP_OP_LUT_C2_ON;
 		}
 	}
-	return 1;
+	/* YCbCr to YCbCr color conversion flag */
+	if ((!inputRGB) && (!outputRGB)) {
+		if ((ppp_lookUp_enable) &&
+		    (iBuf->mdpImg.imgType != iBuf->ibuf_type)) {
+			ppp_operation_reg |= PPP_OP_LUT_C0_ON;
+		}
+	}
+
+	ppp_src_cfg_reg |= (iBuf->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
+	ppp_src_cfg_reg |= (iBuf->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
+
+	if (req->flags & MDP_DEINTERLACE)
+		ppp_operation_reg |= PPP_OP_DEINT_EN;
+
+	/* Dither at DMA side only since iBuf format is RGB888 */
+	if (iBuf->mdpImg.mdpOp & MDPOP_DITHER)
+		ppp_operation_reg |= PPP_OP_DITHER_EN;
+
+	if (iBuf->mdpImg.mdpOp & MDPOP_ROTATION) {
+		ppp_operation_reg |= PPP_OP_ROT_ON;
+
+		if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
+			ppp_operation_reg |= PPP_OP_ROT_90;
+		}
+		if (iBuf->mdpImg.mdpOp & MDPOP_LR) {
+			ppp_operation_reg |= PPP_OP_FLIP_LR;
+		}
+		if (iBuf->mdpImg.mdpOp & MDPOP_UD) {
+			ppp_operation_reg |= PPP_OP_FLIP_UD;
+		}
+	}
+
+	src0_ystride = src_width * inpBpp;
+	dest0_ystride = iBuf->ibuf_width * iBuf->bpp;
+
+	/* no need to care about rotation since it's the real-XY. */
+	dst_roi_width = iBuf->roi.dst_width;
+	dst_roi_height = iBuf->roi.dst_height;
+
+	src0 = (uint8 *) iBuf->mdpImg.bmy_addr;
+	dest0 = (uint8 *) iBuf->buf;
+
+	/* Jumping from Y-Plane to Chroma Plane */
+	dest1 = mdp_get_chroma_addr(iBuf);
+
+	/* first pixel addr calculation */
+	mdp_adjust_start_addr(&src0, &src1, sv_slice, sh_slice, iBuf->roi.x,
+			      iBuf->roi.y, src_width, src_height, inpBpp, iBuf,
+			      0);
+	mdp_adjust_start_addr(&dest0, &dest1, dv_slice, dh_slice,
+			      iBuf->roi.lcd_x, iBuf->roi.lcd_y,
+			      iBuf->ibuf_width, iBuf->ibuf_height, iBuf->bpp,
+			      iBuf, 2);
+
+	/* set scale operation */
+	mdp_set_scale(iBuf, dst_roi_width, dst_roi_height,
+		      inputRGB, outputRGB, &ppp_operation_reg);
+
+	/*
+	 * setting background source for blending
+	 */
+	mdp_set_blend_attr(iBuf, &alpha, &tpVal, perPixelAlpha,
+			   &ppp_operation_reg);
+
+	if (ppp_operation_reg & PPP_OP_BLEND_ON) {
+		mdp_ppp_setbg(iBuf);
+
+		if (iBuf->ibuf_type == MDP_YCRYCB_H2V1) {
+			ppp_operation_reg |= PPP_OP_BG_CHROMA_H2V1;
+
+			if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP) {
+				tpVal = mdp_conv_matx_rgb2yuv(tpVal,
+						      (uint16 *) &
+						      mdp_ccs_rgb2yuv,
+						      &mdp_plv[0], NULL);
+			}
+		}
+	}
+
+	/*
+	 * 0x0004: enable dbg bus
+	 * 0x0100: "don't care" Edge Condit until scaling is on
+	 * 0x0104: xrc tile x&y size u7.6 format = 7bit.6bit
+	 * 0x0108: src pixel size
+	 * 0x010c: component plane 0 starting address
+	 * 0x011c: component plane 0 ystride
+	 * 0x0124: PPP source config register
+	 * 0x0128: unpacked pattern from lsb to msb (eg. RGB->BGR)
+	 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0108, (iBuf->roi.height << 16 |
+						      iBuf->roi.width));
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x010c, src0); /* comp.plane 0 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0110, src1); /* comp.plane 1 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x011c,
+		 (src0_ystride << 16 | src0_ystride));
+
+	/* setup for rgb 565 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0124, ppp_src_cfg_reg);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0128, packPattern);
+	/*
+	 * 0x0138: PPP destination operation register
+	 * 0x014c: constant_alpha|transparent_color
+	 * 0x0150: PPP destination config register
+	 * 0x0154: PPP packing pattern
+	 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0138, ppp_operation_reg);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x014c, alpha << 24 | (tpVal &
+								0xffffff));
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0150, ppp_dst_cfg_reg);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0154, dst_packPattern);
+
+	/*
+	 * 0x0164: ROI height and width
+	 * 0x0168: Component Plane 0 starting addr
+	 * 0x016c: Component Plane 1 starting addr
+	 * 0x0178: Component Plane 1/0 y stride
+	 */
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0164,
+		 (dst_roi_height << 16 | dst_roi_width));
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0168, dest0);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x016c, dest1);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0178,
+		 (dest0_ystride << 16 | dest0_ystride));
+
+	flush_imgs(req, inpBpp, iBuf->bpp, p_src_file, p_dst_file);
+#ifdef	CONFIG_FB_MSM_MDP31
+	MDP_OUTP(MDP_BASE + 0x00100, 0xFF00);
+#endif
+	mdp_pipe_kickoff(MDP_PPP_TERM, mfd);
 }
 
-
-static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs,
-		       struct file *src_file, struct file *dst_file)
+static int mdp_ppp_verify_req(struct mdp_blit_req *req)
 {
-}
+	u32 src_width, src_height, dst_width, dst_height;
 
-static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect,
-			    uint32_t base, uint32_t bpp, uint32_t cfg,
-			    uint32_t *addr, uint32_t *ystride)
-{
-	uint32_t compress_v = Y_TO_CRCB_RATIO(img->format);
-	uint32_t compress_h = 2;
-	uint32_t  offset;
+	if (req == NULL) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
 
-	if (IS_PSEUDOPLNR(img->format)) {
-		offset = (rect->x / compress_h) * compress_h;
-		offset += rect->y == 0 ? 0 :
-			  ((rect->y + 1) / compress_v) * img->width;
-		*addr = base + (img->width * img->height * bpp);
-		*addr += offset * bpp;
-		*ystride |= *ystride << 16;
+	if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
+	    MDP_IS_IMGTYPE_BAD(req->dst.format)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	if ((req->src.width == 0) || (req->src.height == 0) ||
+	    (req->src_rect.w == 0) || (req->src_rect.h == 0) ||
+	    (req->dst.width == 0) || (req->dst.height == 0) ||
+	    (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+
+		return -1;
+	}
+
+	if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
+	    ((req->src_rect.y + req->src_rect.h) > req->src.height)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
+	    ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	/*
+	 * scaling range check
+	 */
+	src_width = req->src_rect.w;
+	src_height = req->src_rect.h;
+
+	if (req->flags & MDP_ROT_90) {
+		dst_width = req->dst_rect.h;
+		dst_height = req->dst_rect.w;
 	} else {
-		*addr = 0;
+		dst_width = req->dst_rect.w;
+		dst_height = req->dst_rect.h;
 	}
-}
 
-static int send_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
-		     struct mdp_regs *regs, struct file *src_file,
-		     struct file *dst_file)
-{
-	mdp_writel(mdp, 1, 0x060);
-	mdp_writel(mdp, regs->src_rect, PPP_ADDR_SRC_ROI);
-	mdp_writel(mdp, regs->src0, PPP_ADDR_SRC0);
-	mdp_writel(mdp, regs->src1, PPP_ADDR_SRC1);
-	mdp_writel(mdp, regs->src_ystride, PPP_ADDR_SRC_YSTRIDE);
-	mdp_writel(mdp, regs->src_cfg, PPP_ADDR_SRC_CFG);
-	mdp_writel(mdp, regs->src_pack, PPP_ADDR_SRC_PACK_PATTERN);
+	switch (req->dst.format) {
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V2:
+		src_width = (src_width / 2) * 2;
+		src_height = (src_height / 2) * 2;
+		dst_width = (src_width / 2) * 2;
+		dst_height = (src_height / 2) * 2;
+		break;
 
-	mdp_writel(mdp, regs->op, PPP_ADDR_OPERATION);
-	mdp_writel(mdp, regs->phasex_init, PPP_ADDR_PHASEX_INIT);
-	mdp_writel(mdp, regs->phasey_init, PPP_ADDR_PHASEY_INIT);
-	mdp_writel(mdp, regs->phasex_step, PPP_ADDR_PHASEX_STEP);
-	mdp_writel(mdp, regs->phasey_step, PPP_ADDR_PHASEY_STEP);
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_YCRYCB_H2V1:
+		src_width = (src_width / 2) * 2;
+		dst_width = (src_width / 2) * 2;
+		break;
 
-	mdp_writel(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff),
-	       PPP_ADDR_ALPHA_TRANSP);
-
-	mdp_writel(mdp, regs->dst_cfg, PPP_ADDR_DST_CFG);
-	mdp_writel(mdp, regs->dst_pack, PPP_ADDR_DST_PACK_PATTERN);
-	mdp_writel(mdp, regs->dst_rect, PPP_ADDR_DST_ROI);
-	mdp_writel(mdp, regs->dst0, PPP_ADDR_DST0);
-	mdp_writel(mdp, regs->dst1, PPP_ADDR_DST1);
-	mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_DST_YSTRIDE);
-
-	mdp_writel(mdp, regs->edge, PPP_ADDR_EDGE);
-	if (regs->op & PPP_OP_BLEND_ON) {
-		mdp_writel(mdp, regs->dst0, PPP_ADDR_BG0);
-		mdp_writel(mdp, regs->dst1, PPP_ADDR_BG1);
-		mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_BG_YSTRIDE);
-		mdp_writel(mdp, src_img_cfg[req->dst.format], PPP_ADDR_BG_CFG);
-		mdp_writel(mdp, pack_pattern[req->dst.format],
-			   PPP_ADDR_BG_PACK_PATTERN);
+	default:
+		break;
 	}
-	flush_imgs(req, regs, src_file, dst_file);
-	mdp_writel(mdp, 0x1000, MDP_DISPLAY0_START);
+
+	if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
+	     MDP_MAX_X_SCALE_FACTOR)
+	    || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
+		MDP_MIN_X_SCALE_FACTOR)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
+	     MDP_MAX_Y_SCALE_FACTOR)
+	    || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
+		MDP_MIN_Y_SCALE_FACTOR)) {
+		printk(KERN_ERR "\n%s(): Error in Line %u", __func__,
+			__LINE__);
+		return -1;
+	}
 	return 0;
 }
 
-int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
-		 struct file *src_file, unsigned long src_start, unsigned long src_len,
-		 struct file *dst_file, unsigned long dst_start, unsigned long dst_len)
+int get_gem_img(struct mdp_img *img, unsigned long *start, unsigned long *len)
 {
-	struct mdp_regs regs = {0};
+	/* Set len to zero to appropriately error out if
+	   kgsl_gem_obj_addr fails */
 
-	if (unlikely(req->src.format >= MDP_IMGTYPE_LIMIT ||
-		     req->dst.format >= MDP_IMGTYPE_LIMIT)) {
-		printk(KERN_ERR "mpd_ppp: img is of wrong format\n");
-		return -EINVAL;
+	*len = 0;
+	return kgsl_gem_obj_addr(img->memory_id, (int) img->priv, start, len);
+}
+
+int get_img(struct mdp_img *img, struct fb_info *info, unsigned long *start,
+	    unsigned long *len, struct file **pp_file)
+{
+	int put_needed, ret = 0;
+	struct file *file;
+#ifdef CONFIG_ANDROID_PMEM
+	unsigned long vstart;
+#endif
+
+#ifdef CONFIG_ANDROID_PMEM
+	if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
+		return 0;
+#endif
+	file = fget_light(img->memory_id, &put_needed);
+	if (file == NULL)
+		return -1;
+
+	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+		*start = info->fix.smem_start;
+		*len = info->fix.smem_len;
+		*pp_file = file;
+	} else {
+		ret = -1;
+		fput_light(file, put_needed);
+	}
+	return ret;
+}
+
+
+void put_img(struct file *p_src_file)
+{
+#ifdef CONFIG_ANDROID_PMEM
+	if (p_src_file)
+		put_pmem_file(p_src_file);
+#endif
+}
+
+
+int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req)
+{
+	unsigned long src_start, dst_start;
+	unsigned long src_len = 0;
+	unsigned long dst_len = 0;
+	MDPIBUF iBuf;
+	u32 dst_width, dst_height;
+	struct file *p_src_file = 0 , *p_dst_file = 0;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (req->dst.format == MDP_FB_FORMAT)
+		req->dst.format =  mfd->fb_imgType;
+	if (req->src.format == MDP_FB_FORMAT)
+		req->src.format = mfd->fb_imgType;
+	if (req->flags & MDP_BLIT_SRC_GEM)
+		get_gem_img(&req->src, &src_start, &src_len);
+	else
+		get_img(&req->src, info, &src_start, &src_len, &p_src_file);
+	if (src_len == 0) {
+		printk(KERN_ERR "mdp_ppp: could not retrieve image from "
+		       "memory\n");
+		return -1;
+	}
+	if (req->flags & MDP_BLIT_DST_GEM)
+		get_gem_img(&req->dst, &dst_start, &dst_len);
+	else
+		get_img(&req->dst, info, &dst_start, &dst_len, &p_dst_file);
+	if (dst_len == 0) {
+		put_img(p_src_file);
+		printk(KERN_ERR "mdp_ppp: could not retrieve image from "
+		       "memory\n");
+		return -1;
+	}
+	if (mdp_ppp_verify_req(req)) {
+		printk(KERN_ERR "mdp_ppp: invalid image!\n");
+		put_img(p_src_file);
+		put_img(p_dst_file);
+		return -1;
 	}
 
-	if (unlikely(req->src_rect.x > req->src.width ||
-		     req->src_rect.y > req->src.height ||
-		     req->dst_rect.x > req->dst.width ||
-		     req->dst_rect.y > req->dst.height)) {
-		printk(KERN_ERR "mpd_ppp: img rect is outside of img!\n");
-		return -EINVAL;
+	iBuf.ibuf_width = req->dst.width;
+	iBuf.ibuf_height = req->dst.height;
+	iBuf.bpp = bytes_per_pixel[req->dst.format];
+
+	iBuf.ibuf_type = req->dst.format;
+	iBuf.buf = (uint8 *) dst_start;
+	iBuf.buf += req->dst.offset;
+
+	iBuf.roi.lcd_x = req->dst_rect.x;
+	iBuf.roi.lcd_y = req->dst_rect.y;
+	iBuf.roi.dst_width = req->dst_rect.w;
+	iBuf.roi.dst_height = req->dst_rect.h;
+
+	iBuf.roi.x = req->src_rect.x;
+	iBuf.roi.width = req->src_rect.w;
+	iBuf.roi.y = req->src_rect.y;
+	iBuf.roi.height = req->src_rect.h;
+
+	iBuf.mdpImg.width = req->src.width;
+	iBuf.mdpImg.imgType = req->src.format;
+
+	iBuf.mdpImg.bmy_addr = (uint32 *) (src_start + req->src.offset);
+	iBuf.mdpImg.cbcr_addr =
+	    (uint32 *) ((uint32) iBuf.mdpImg.bmy_addr +
+			req->src.width * req->src.height);
+
+	iBuf.mdpImg.mdpOp = MDPOP_NOP;
+
+	/* blending check */
+	if (req->transp_mask != MDP_TRANSP_NOP) {
+		iBuf.mdpImg.mdpOp |= MDPOP_TRANSP;
+		iBuf.mdpImg.tpVal = req->transp_mask;
+		iBuf.mdpImg.tpVal = mdp_calc_tpval(&iBuf.mdpImg);
+	} else {
+		iBuf.mdpImg.tpVal = 0;
 	}
 
-	/* set the src image configuration */
-	regs.src_cfg = src_img_cfg[req->src.format];
-	regs.src_cfg |= (req->src_rect.x & 0x1) ? PPP_SRC_BPP_ROI_ODD_X : 0;
-	regs.src_cfg |= (req->src_rect.y & 0x1) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
-	regs.src_rect = (req->src_rect.h << 16) | req->src_rect.w;
-	regs.src_pack = pack_pattern[req->src.format];
-
-	/* set the dest image configuration */
-	regs.dst_cfg = dst_img_cfg[req->dst.format] | PPP_DST_OUT_SEL_AXI;
-	regs.dst_rect = (req->dst_rect.h << 16) | req->dst_rect.w;
-	regs.dst_pack = pack_pattern[req->dst.format];
-
-	/* set src, bpp, start pixel and ystride */
-	regs.src_bpp = bytes_per_pixel[req->src.format];
-	regs.src0 = src_start + req->src.offset;
-	regs.src_ystride = req->src.width * regs.src_bpp;
-	get_chroma_addr(&req->src, &req->src_rect, regs.src0, regs.src_bpp,
-			regs.src_cfg, &regs.src1, &regs.src_ystride);
-	regs.src0 += (req->src_rect.x + (req->src_rect.y * req->src.width)) *
-		      regs.src_bpp;
-
-	/* set dst, bpp, start pixel and ystride */
-	regs.dst_bpp = bytes_per_pixel[req->dst.format];
-	regs.dst0 = dst_start + req->dst.offset;
-	regs.dst_ystride = req->dst.width * regs.dst_bpp;
-	get_chroma_addr(&req->dst, &req->dst_rect, regs.dst0, regs.dst_bpp,
-			regs.dst_cfg, &regs.dst1, &regs.dst_ystride);
-	regs.dst0 += (req->dst_rect.x + (req->dst_rect.y * req->dst.width)) *
-		      regs.dst_bpp;
-
-	if (!valid_src_dst(src_start, src_len, dst_start, dst_len, req,
-			   &regs)) {
-		printk(KERN_ERR "mpd_ppp: final src or dst location is "
-			"invalid, are you trying to make an image too large "
-			"or to place it outside the screen?\n");
-		return -EINVAL;
+	req->alpha &= 0xff;
+	if (req->alpha < MDP_ALPHA_NOP) {
+		iBuf.mdpImg.mdpOp |= MDPOP_ALPHAB;
+		iBuf.mdpImg.alpha = req->alpha;
+	} else {
+		iBuf.mdpImg.alpha = 0xff;
 	}
 
-	/* set up operation register */
-	regs.op = 0;
-	blit_rotate(req, &regs);
-	blit_convert(req, &regs);
+	/* rotation check */
+	if (req->flags & MDP_FLIP_LR)
+		iBuf.mdpImg.mdpOp |= MDPOP_LR;
+	if (req->flags & MDP_FLIP_UD)
+		iBuf.mdpImg.mdpOp |= MDPOP_UD;
+	if (req->flags & MDP_ROT_90)
+		iBuf.mdpImg.mdpOp |= MDPOP_ROT90;
 	if (req->flags & MDP_DITHER)
-		regs.op |= PPP_OP_DITHER_EN;
-	blit_blend(req, &regs);
-	if (blit_scale(mdp, req, &regs)) {
-		printk(KERN_ERR "mpd_ppp: error computing scale for img.\n");
-		return -EINVAL;
-	}
-	blit_blur(mdp, req, &regs);
-	regs.op |= dst_op_chroma[req->dst.format] |
-		   src_op_chroma[req->src.format];
+		iBuf.mdpImg.mdpOp |= MDPOP_DITHER;
 
-	/* if the image is YCRYCB, the x and w must be even */
-	if (unlikely(req->src.format == MDP_YCRYCB_H2V1)) {
-		req->src_rect.x = req->src_rect.x & (~0x1);
-		req->src_rect.w = req->src_rect.w & (~0x1);
-		req->dst_rect.x = req->dst_rect.x & (~0x1);
-		req->dst_rect.w = req->dst_rect.w & (~0x1);
-	}
-	if (get_edge_cond(req, &regs))
+	if (req->flags & MDP_BLEND_FG_PREMULT) {
+#ifdef CONFIG_FB_MSM_MDP31
+		iBuf.mdpImg.mdpOp |= MDPOP_FG_PM_ALPHA;
+#else
+		put_img(p_src_file);
+		put_img(p_dst_file);
 		return -EINVAL;
+#endif
+	}
 
-	send_blit(mdp, req, &regs, src_file, dst_file);
+	if (req->flags & MDP_DEINTERLACE) {
+#ifdef CONFIG_FB_MSM_MDP31
+		if ((req->src.format != MDP_Y_CBCR_H2V2) &&
+			(req->src.format != MDP_Y_CRCB_H2V2)) {
+#endif
+			put_img(p_src_file);
+			put_img(p_dst_file);
+			return -EINVAL;
+#ifdef CONFIG_FB_MSM_MDP31
+		}
+#endif
+	}
+
+	/* scale check */
+	if (req->flags & MDP_ROT_90) {
+		dst_width = req->dst_rect.h;
+		dst_height = req->dst_rect.w;
+	} else {
+		dst_width = req->dst_rect.w;
+		dst_height = req->dst_rect.h;
+	}
+
+	if ((iBuf.roi.width != dst_width) || (iBuf.roi.height != dst_height))
+		iBuf.mdpImg.mdpOp |= MDPOP_ASCALE;
+
+	if (req->flags & MDP_BLUR) {
+#ifdef CONFIG_FB_MSM_MDP31
+		if (req->flags & MDP_SHARPENING)
+			printk(KERN_WARNING
+				"mdp: MDP_SHARPENING is set with MDP_BLUR!\n");
+		req->flags |= MDP_SHARPENING;
+		req->sharpening_strength = -127;
+#else
+		iBuf.mdpImg.mdpOp |= MDPOP_ASCALE | MDPOP_BLUR;
+
+#endif
+	}
+
+	if (req->flags & MDP_SHARPENING) {
+#ifdef CONFIG_FB_MSM_MDP31
+		if ((req->sharpening_strength > 127) ||
+			(req->sharpening_strength < -127)) {
+			printk(KERN_ERR
+				"%s: sharpening strength out of range\n",
+				__func__);
+			put_img(p_src_file);
+			put_img(p_dst_file);
+			return -EINVAL;
+		}
+
+		iBuf.mdpImg.mdpOp |= MDPOP_ASCALE | MDPOP_SHARPENING;
+		iBuf.mdpImg.sp_value = req->sharpening_strength & 0xff;
+#else
+		put_img(p_src_file);
+		put_img(p_dst_file);
+		return -EINVAL;
+#endif
+	}
+
+	down(&mdp_ppp_mutex);
+	/* MDP cmd block enable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+#ifndef CONFIG_FB_MSM_MDP22
+	mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
+#else
+	/* bg tile fetching HW workaround */
+	if (((iBuf.mdpImg.mdpOp & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
+	     (req->src.format == MDP_ARGB_8888) ||
+	     (req->src.format == MDP_BGRA_8888) ||
+	     (req->src.format == MDP_RGBA_8888)) &&
+	    (iBuf.mdpImg.mdpOp & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
+		int dst_h, src_w, i;
+		uint32 mdpOp = iBuf.mdpImg.mdpOp;
+
+		src_w = req->src_rect.w;
+		dst_h = iBuf.roi.dst_height;
+
+		for (i = 0; i < (req->dst_rect.h / 16); i++) {
+			/* this tile size */
+			iBuf.roi.dst_height = 16;
+			iBuf.roi.width =
+			    (16 * req->src_rect.w) / req->dst_rect.h;
+
+			/* if it's out of scale range... */
+			if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+			     iBuf.roi.width) > MDP_MAX_X_SCALE_FACTOR)
+				iBuf.roi.width =
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				    MDP_MAX_X_SCALE_FACTOR;
+			else if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				  iBuf.roi.width) < MDP_MIN_X_SCALE_FACTOR)
+				iBuf.roi.width =
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				    MDP_MIN_X_SCALE_FACTOR;
+
+			mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
+
+			/* next tile location */
+			iBuf.roi.lcd_y += 16;
+			iBuf.roi.x += iBuf.roi.width;
+
+			/* this is for a remainder update */
+			dst_h -= 16;
+			src_w -= iBuf.roi.width;
+			/* restore mdpOp since MDPOP_ASCALE have been cleared */
+			iBuf.mdpImg.mdpOp = mdpOp;
+		}
+
+		if ((dst_h < 0) || (src_w < 0))
+			printk
+			    ("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
+			     __LINE__);
+
+		/* remainder update */
+		if ((dst_h > 0) && (src_w > 0)) {
+			u32 tmp_v;
+
+			iBuf.roi.dst_height = dst_h;
+			iBuf.roi.width = src_w;
+
+			if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+			     iBuf.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
+				tmp_v =
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				    MDP_MAX_X_SCALE_FACTOR +
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) %
+				    MDP_MAX_X_SCALE_FACTOR ? 1 : 0;
+
+				/* move x location as roi width gets bigger */
+				iBuf.roi.x -= tmp_v - iBuf.roi.width;
+				iBuf.roi.width = tmp_v;
+			} else
+			    if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				 iBuf.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
+				tmp_v =
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
+				    MDP_MIN_X_SCALE_FACTOR +
+				    (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) %
+				    MDP_MIN_X_SCALE_FACTOR ? 1 : 0;
+
+				/*
+				 * we don't move x location for continuity of
+				 * source image
+				 */
+				iBuf.roi.width = tmp_v;
+			}
+
+			mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
+		}
+	} else {
+		mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
+	}
+#endif
+
+	/* MDP cmd block disable */
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	up(&mdp_ppp_mutex);
+
+	put_img(p_src_file);
+	put_img(p_dst_file);
 	return 0;
 }
diff --git a/drivers/video/msm/mdp_ppp.h b/drivers/video/msm/mdp_ppp.h
new file mode 100644
index 0000000..e045643
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp.h
@@ -0,0 +1,82 @@
+/* drivers/video/msm/mdp_ppp.h
+ *
+ * Copyright (C) 2009 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VIDEO_MSM_MDP_PPP_H_
+#define _VIDEO_MSM_MDP_PPP_H_
+
+#include <linux/types.h>
+
+struct ppp_regs {
+	uint32_t src0;
+	uint32_t src1;
+	uint32_t dst0;
+	uint32_t dst1;
+	uint32_t src_cfg;
+	uint32_t dst_cfg;
+	uint32_t src_pack;
+	uint32_t dst_pack;
+	uint32_t src_rect;
+	uint32_t dst_rect;
+	uint32_t src_ystride;
+	uint32_t dst_ystride;
+	uint32_t op;
+	uint32_t src_bpp;
+	uint32_t dst_bpp;
+	uint32_t edge;
+	uint32_t phasex_init;
+	uint32_t phasey_init;
+	uint32_t phasex_step;
+	uint32_t phasey_step;
+
+	uint32_t bg0;
+	uint32_t bg1;
+	uint32_t bg_cfg;
+	uint32_t bg_bpp;
+	uint32_t bg_pack;
+	uint32_t bg_ystride;
+
+#ifdef CONFIG_MSM_MDP31
+	uint32_t src_xy;
+	uint32_t src_img_sz;
+	uint32_t dst_xy;
+	uint32_t bg_xy;
+	uint32_t bg_img_sz;
+	uint32_t bg_alpha_sel;
+
+	uint32_t scale_cfg;
+	uint32_t csc_cfg;
+#endif
+};
+
+struct mdp_info;
+struct mdp_rect;
+struct mdp_blit_req;
+
+void mdp_ppp_init_scale(const struct mdp_info *mdp);
+int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs,
+		      struct mdp_rect *src_rect, struct mdp_rect *dst_rect,
+		      uint32_t src_format, uint32_t dst_format);
+int mdp_ppp_load_blur(const struct mdp_info *mdp);
+
+#ifndef CONFIG_MSM_MDP31
+int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs);
+#else
+static inline int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req,
+				 struct ppp_regs *regs)
+{
+	return 0;
+}
+#endif
+
+#endif /* _VIDEO_MSM_MDP_PPP_H_ */
diff --git a/drivers/video/msm/mdp_ppp22.c b/drivers/video/msm/mdp_ppp22.c
new file mode 100644
index 0000000..9016f0a
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp22.c
@@ -0,0 +1,1091 @@
+/* drivers/video/msm/mdp_ppp22.c
+ *
+ * Copyright (C) 2007 Code Aurora Forum. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/msm_mdp.h>
+
+#include "mdp_hw.h"
+#include "mdp_ppp.h"
+
+struct mdp_table_entry {
+	uint32_t reg;
+	uint32_t val;
+};
+
+enum {
+	MDP_DOWNSCALE_PT2TOPT4,
+	MDP_DOWNSCALE_PT4TOPT6,
+	MDP_DOWNSCALE_PT6TOPT8,
+	MDP_DOWNSCALE_PT8TO1,
+	MDP_DOWNSCALE_MAX,
+
+	/* not technically in the downscale table list */
+	MDP_DOWNSCALE_BLUR,
+};
+
+static int downscale_x_table;
+static int downscale_y_table;
+
+static struct mdp_table_entry mdp_upscale_table[] = {
+	{ 0x5fffc, 0x0 },
+	{ 0x50200, 0x7fc00000 },
+	{ 0x5fffc, 0xff80000d },
+	{ 0x50204, 0x7ec003f9 },
+	{ 0x5fffc, 0xfec0001c },
+	{ 0x50208, 0x7d4003f3 },
+	{ 0x5fffc, 0xfe40002b },
+	{ 0x5020c, 0x7b8003ed },
+	{ 0x5fffc, 0xfd80003c },
+	{ 0x50210, 0x794003e8 },
+	{ 0x5fffc, 0xfcc0004d },
+	{ 0x50214, 0x76c003e4 },
+	{ 0x5fffc, 0xfc40005f },
+	{ 0x50218, 0x73c003e0 },
+	{ 0x5fffc, 0xfb800071 },
+	{ 0x5021c, 0x708003de },
+	{ 0x5fffc, 0xfac00085 },
+	{ 0x50220, 0x6d0003db },
+	{ 0x5fffc, 0xfa000098 },
+	{ 0x50224, 0x698003d9 },
+	{ 0x5fffc, 0xf98000ac },
+	{ 0x50228, 0x654003d8 },
+	{ 0x5fffc, 0xf8c000c1 },
+	{ 0x5022c, 0x610003d7 },
+	{ 0x5fffc, 0xf84000d5 },
+	{ 0x50230, 0x5c8003d7 },
+	{ 0x5fffc, 0xf7c000e9 },
+	{ 0x50234, 0x580003d7 },
+	{ 0x5fffc, 0xf74000fd },
+	{ 0x50238, 0x534003d8 },
+	{ 0x5fffc, 0xf6c00112 },
+	{ 0x5023c, 0x4e8003d8 },
+	{ 0x5fffc, 0xf6800126 },
+	{ 0x50240, 0x494003da },
+	{ 0x5fffc, 0xf600013a },
+	{ 0x50244, 0x448003db },
+	{ 0x5fffc, 0xf600014d },
+	{ 0x50248, 0x3f4003dd },
+	{ 0x5fffc, 0xf5c00160 },
+	{ 0x5024c, 0x3a4003df },
+	{ 0x5fffc, 0xf5c00172 },
+	{ 0x50250, 0x354003e1 },
+	{ 0x5fffc, 0xf5c00184 },
+	{ 0x50254, 0x304003e3 },
+	{ 0x5fffc, 0xf6000195 },
+	{ 0x50258, 0x2b0003e6 },
+	{ 0x5fffc, 0xf64001a6 },
+	{ 0x5025c, 0x260003e8 },
+	{ 0x5fffc, 0xf6c001b4 },
+	{ 0x50260, 0x214003eb },
+	{ 0x5fffc, 0xf78001c2 },
+	{ 0x50264, 0x1c4003ee },
+	{ 0x5fffc, 0xf80001cf },
+	{ 0x50268, 0x17c003f1 },
+	{ 0x5fffc, 0xf90001db },
+	{ 0x5026c, 0x134003f3 },
+	{ 0x5fffc, 0xfa0001e5 },
+	{ 0x50270, 0xf0003f6 },
+	{ 0x5fffc, 0xfb4001ee },
+	{ 0x50274, 0xac003f9 },
+	{ 0x5fffc, 0xfcc001f5 },
+	{ 0x50278, 0x70003fb },
+	{ 0x5fffc, 0xfe4001fb },
+	{ 0x5027c, 0x34003fe },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT2TOPT4[] = {
+	{ 0x5fffc, 0x740008c },
+	{ 0x50280, 0x33800088 },
+	{ 0x5fffc, 0x800008e },
+	{ 0x50284, 0x33400084 },
+	{ 0x5fffc, 0x8400092 },
+	{ 0x50288, 0x33000080 },
+	{ 0x5fffc, 0x9000094 },
+	{ 0x5028c, 0x3300007b },
+	{ 0x5fffc, 0x9c00098 },
+	{ 0x50290, 0x32400077 },
+	{ 0x5fffc, 0xa40009b },
+	{ 0x50294, 0x32000073 },
+	{ 0x5fffc, 0xb00009d },
+	{ 0x50298,  0x31c0006f },
+	{ 0x5fffc,  0xbc000a0 },
+	{ 0x5029c,  0x3140006b },
+	{ 0x5fffc,  0xc8000a2 },
+	{ 0x502a0,  0x31000067 },
+	{ 0x5fffc,  0xd8000a5 },
+	{ 0x502a4,  0x30800062 },
+	{ 0x5fffc,  0xe4000a8 },
+	{ 0x502a8,  0x2fc0005f },
+	{ 0x5fffc,  0xec000aa },
+	{ 0x502ac,  0x2fc0005b },
+	{ 0x5fffc,  0xf8000ad },
+	{ 0x502b0,  0x2f400057 },
+	{ 0x5fffc,  0x108000b0 },
+	{ 0x502b4,  0x2e400054 },
+	{ 0x5fffc,  0x114000b2 },
+	{ 0x502b8,  0x2e000050 },
+	{ 0x5fffc,  0x124000b4 },
+	{ 0x502bc,  0x2d80004c },
+	{ 0x5fffc,  0x130000b6 },
+	{ 0x502c0,  0x2d000049 },
+	{ 0x5fffc,  0x140000b8 },
+	{ 0x502c4,  0x2c800045 },
+	{ 0x5fffc,  0x150000b9 },
+	{ 0x502c8,  0x2c000042 },
+	{ 0x5fffc,  0x15c000bd },
+	{ 0x502cc,  0x2b40003e },
+	{ 0x5fffc,  0x16c000bf },
+	{ 0x502d0,  0x2a80003b },
+	{ 0x5fffc,  0x17c000bf },
+	{ 0x502d4,  0x2a000039 },
+	{ 0x5fffc,  0x188000c2 },
+	{ 0x502d8,  0x29400036 },
+	{ 0x5fffc,  0x19c000c4 },
+	{ 0x502dc,  0x28800032 },
+	{ 0x5fffc,  0x1ac000c5 },
+	{ 0x502e0,  0x2800002f },
+	{ 0x5fffc,  0x1bc000c7 },
+	{ 0x502e4,  0x2740002c },
+	{ 0x5fffc,  0x1cc000c8 },
+	{ 0x502e8,  0x26c00029 },
+	{ 0x5fffc,  0x1dc000c9 },
+	{ 0x502ec,  0x26000027 },
+	{ 0x5fffc,  0x1ec000cc },
+	{ 0x502f0,  0x25000024 },
+	{ 0x5fffc,  0x200000cc },
+	{ 0x502f4,  0x24800021 },
+	{ 0x5fffc,  0x210000cd },
+	{ 0x502f8,  0x23800020 },
+	{ 0x5fffc,  0x220000ce },
+	{ 0x502fc,  0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT4TOPT6[] = {
+	{ 0x5fffc,  0x740008c },
+	{ 0x50280,  0x33800088 },
+	{ 0x5fffc,  0x800008e },
+	{ 0x50284,  0x33400084 },
+	{ 0x5fffc,  0x8400092 },
+	{ 0x50288,  0x33000080 },
+	{ 0x5fffc,  0x9000094 },
+	{ 0x5028c,  0x3300007b },
+	{ 0x5fffc,  0x9c00098 },
+	{ 0x50290,  0x32400077 },
+	{ 0x5fffc,  0xa40009b },
+	{ 0x50294,  0x32000073 },
+	{ 0x5fffc,  0xb00009d },
+	{ 0x50298,  0x31c0006f },
+	{ 0x5fffc,  0xbc000a0 },
+	{ 0x5029c,  0x3140006b },
+	{ 0x5fffc,  0xc8000a2 },
+	{ 0x502a0,  0x31000067 },
+	{ 0x5fffc,  0xd8000a5 },
+	{ 0x502a4,  0x30800062 },
+	{ 0x5fffc,  0xe4000a8 },
+	{ 0x502a8,  0x2fc0005f },
+	{ 0x5fffc,  0xec000aa },
+	{ 0x502ac,  0x2fc0005b },
+	{ 0x5fffc,  0xf8000ad },
+	{ 0x502b0,  0x2f400057 },
+	{ 0x5fffc,  0x108000b0 },
+	{ 0x502b4,  0x2e400054 },
+	{ 0x5fffc,  0x114000b2 },
+	{ 0x502b8,  0x2e000050 },
+	{ 0x5fffc,  0x124000b4 },
+	{ 0x502bc,  0x2d80004c },
+	{ 0x5fffc,  0x130000b6 },
+	{ 0x502c0,  0x2d000049 },
+	{ 0x5fffc,  0x140000b8 },
+	{ 0x502c4,  0x2c800045 },
+	{ 0x5fffc,  0x150000b9 },
+	{ 0x502c8,  0x2c000042 },
+	{ 0x5fffc,  0x15c000bd },
+	{ 0x502cc,  0x2b40003e },
+	{ 0x5fffc,  0x16c000bf },
+	{ 0x502d0,  0x2a80003b },
+	{ 0x5fffc,  0x17c000bf },
+	{ 0x502d4,  0x2a000039 },
+	{ 0x5fffc,  0x188000c2 },
+	{ 0x502d8,  0x29400036 },
+	{ 0x5fffc,  0x19c000c4 },
+	{ 0x502dc,  0x28800032 },
+	{ 0x5fffc,  0x1ac000c5 },
+	{ 0x502e0,  0x2800002f },
+	{ 0x5fffc,  0x1bc000c7 },
+	{ 0x502e4,  0x2740002c },
+	{ 0x5fffc,  0x1cc000c8 },
+	{ 0x502e8,  0x26c00029 },
+	{ 0x5fffc,  0x1dc000c9 },
+	{ 0x502ec,  0x26000027 },
+	{ 0x5fffc,  0x1ec000cc },
+	{ 0x502f0,  0x25000024 },
+	{ 0x5fffc,  0x200000cc },
+	{ 0x502f4,  0x24800021 },
+	{ 0x5fffc,  0x210000cd },
+	{ 0x502f8,  0x23800020 },
+	{ 0x5fffc,  0x220000ce },
+	{ 0x502fc,  0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT6TOPT8[] = {
+	{ 0x5fffc,  0xfe000070 },
+	{ 0x50280,  0x4bc00068 },
+	{ 0x5fffc,  0xfe000078 },
+	{ 0x50284,  0x4bc00060 },
+	{ 0x5fffc,  0xfe000080 },
+	{ 0x50288,  0x4b800059 },
+	{ 0x5fffc,  0xfe000089 },
+	{ 0x5028c,  0x4b000052 },
+	{ 0x5fffc,  0xfe400091 },
+	{ 0x50290,  0x4a80004b },
+	{ 0x5fffc,  0xfe40009a },
+	{ 0x50294,  0x4a000044 },
+	{ 0x5fffc,  0xfe8000a3 },
+	{ 0x50298,  0x4940003d },
+	{ 0x5fffc,  0xfec000ac },
+	{ 0x5029c,  0x48400037 },
+	{ 0x5fffc,  0xff0000b4 },
+	{ 0x502a0,  0x47800031 },
+	{ 0x5fffc,  0xff8000bd },
+	{ 0x502a4,  0x4640002b },
+	{ 0x5fffc,  0xc5 },
+	{ 0x502a8,  0x45000026 },
+	{ 0x5fffc,  0x8000ce },
+	{ 0x502ac,  0x43800021 },
+	{ 0x5fffc,  0x10000d6 },
+	{ 0x502b0,  0x4240001c },
+	{ 0x5fffc,  0x18000df },
+	{ 0x502b4,  0x40800018 },
+	{ 0x5fffc,  0x24000e6 },
+	{ 0x502b8,  0x3f000014 },
+	{ 0x5fffc,  0x30000ee },
+	{ 0x502bc,  0x3d400010 },
+	{ 0x5fffc,  0x40000f5 },
+	{ 0x502c0,  0x3b80000c },
+	{ 0x5fffc,  0x50000fc },
+	{ 0x502c4,  0x39800009 },
+	{ 0x5fffc,  0x6000102 },
+	{ 0x502c8,  0x37c00006 },
+	{ 0x5fffc,  0x7000109 },
+	{ 0x502cc,  0x35800004 },
+	{ 0x5fffc,  0x840010e },
+	{ 0x502d0,  0x33800002 },
+	{ 0x5fffc,  0x9800114 },
+	{ 0x502d4,  0x31400000 },
+	{ 0x5fffc,  0xac00119 },
+	{ 0x502d8,  0x2f4003fe },
+	{ 0x5fffc,  0xc40011e },
+	{ 0x502dc,  0x2d0003fc },
+	{ 0x5fffc,  0xdc00121 },
+	{ 0x502e0,  0x2b0003fb },
+	{ 0x5fffc,  0xf400125 },
+	{ 0x502e4,  0x28c003fa },
+	{ 0x5fffc,  0x11000128 },
+	{ 0x502e8,  0x268003f9 },
+	{ 0x5fffc,  0x12c0012a },
+	{ 0x502ec,  0x244003f9 },
+	{ 0x5fffc,  0x1480012c },
+	{ 0x502f0,  0x224003f8 },
+	{ 0x5fffc,  0x1640012e },
+	{ 0x502f4,  0x200003f8 },
+	{ 0x5fffc,  0x1800012f },
+	{ 0x502f8,  0x1e0003f8 },
+	{ 0x5fffc,  0x1a00012f },
+	{ 0x502fc,  0x1c0003f8 },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT8TO1[] = {
+	{ 0x5fffc,  0x0 },
+	{ 0x50280,  0x7fc00000 },
+	{ 0x5fffc,  0xff80000d },
+	{ 0x50284,  0x7ec003f9 },
+	{ 0x5fffc,  0xfec0001c },
+	{ 0x50288,  0x7d4003f3 },
+	{ 0x5fffc,  0xfe40002b },
+	{ 0x5028c,  0x7b8003ed },
+	{ 0x5fffc,  0xfd80003c },
+	{ 0x50290,  0x794003e8 },
+	{ 0x5fffc,  0xfcc0004d },
+	{ 0x50294,  0x76c003e4 },
+	{ 0x5fffc,  0xfc40005f },
+	{ 0x50298,  0x73c003e0 },
+	{ 0x5fffc,  0xfb800071 },
+	{ 0x5029c,  0x708003de },
+	{ 0x5fffc,  0xfac00085 },
+	{ 0x502a0,  0x6d0003db },
+	{ 0x5fffc,  0xfa000098 },
+	{ 0x502a4,  0x698003d9 },
+	{ 0x5fffc,  0xf98000ac },
+	{ 0x502a8,  0x654003d8 },
+	{ 0x5fffc,  0xf8c000c1 },
+	{ 0x502ac,  0x610003d7 },
+	{ 0x5fffc,  0xf84000d5 },
+	{ 0x502b0,  0x5c8003d7 },
+	{ 0x5fffc,  0xf7c000e9 },
+	{ 0x502b4,  0x580003d7 },
+	{ 0x5fffc,  0xf74000fd },
+	{ 0x502b8,  0x534003d8 },
+	{ 0x5fffc,  0xf6c00112 },
+	{ 0x502bc,  0x4e8003d8 },
+	{ 0x5fffc,  0xf6800126 },
+	{ 0x502c0,  0x494003da },
+	{ 0x5fffc,  0xf600013a },
+	{ 0x502c4,  0x448003db },
+	{ 0x5fffc,  0xf600014d },
+	{ 0x502c8,  0x3f4003dd },
+	{ 0x5fffc,  0xf5c00160 },
+	{ 0x502cc,  0x3a4003df },
+	{ 0x5fffc,  0xf5c00172 },
+	{ 0x502d0,  0x354003e1 },
+	{ 0x5fffc,  0xf5c00184 },
+	{ 0x502d4,  0x304003e3 },
+	{ 0x5fffc,  0xf6000195 },
+	{ 0x502d8,  0x2b0003e6 },
+	{ 0x5fffc,  0xf64001a6 },
+	{ 0x502dc,  0x260003e8 },
+	{ 0x5fffc,  0xf6c001b4 },
+	{ 0x502e0,  0x214003eb },
+	{ 0x5fffc,  0xf78001c2 },
+	{ 0x502e4,  0x1c4003ee },
+	{ 0x5fffc,  0xf80001cf },
+	{ 0x502e8,  0x17c003f1 },
+	{ 0x5fffc,  0xf90001db },
+	{ 0x502ec,  0x134003f3 },
+	{ 0x5fffc,  0xfa0001e5 },
+	{ 0x502f0,  0xf0003f6 },
+	{ 0x5fffc,  0xfb4001ee },
+	{ 0x502f4,  0xac003f9 },
+	{ 0x5fffc,  0xfcc001f5 },
+	{ 0x502f8,  0x70003fb },
+	{ 0x5fffc,  0xfe4001fb },
+	{ 0x502fc,  0x34003fe },
+};
+
+struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX] = {
+	[MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_x_table_PT2TOPT4,
+	[MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_x_table_PT4TOPT6,
+	[MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_x_table_PT6TOPT8,
+	[MDP_DOWNSCALE_PT8TO1]  = mdp_downscale_x_table_PT8TO1,
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT2TOPT4[] = {
+	{ 0x5fffc,  0x740008c },
+	{ 0x50300,  0x33800088 },
+	{ 0x5fffc,  0x800008e },
+	{ 0x50304,  0x33400084 },
+	{ 0x5fffc,  0x8400092 },
+	{ 0x50308,  0x33000080 },
+	{ 0x5fffc,  0x9000094 },
+	{ 0x5030c,  0x3300007b },
+	{ 0x5fffc,  0x9c00098 },
+	{ 0x50310,  0x32400077 },
+	{ 0x5fffc,  0xa40009b },
+	{ 0x50314,  0x32000073 },
+	{ 0x5fffc,  0xb00009d },
+	{ 0x50318,  0x31c0006f },
+	{ 0x5fffc,  0xbc000a0 },
+	{ 0x5031c,  0x3140006b },
+	{ 0x5fffc,  0xc8000a2 },
+	{ 0x50320,  0x31000067 },
+	{ 0x5fffc,  0xd8000a5 },
+	{ 0x50324,  0x30800062 },
+	{ 0x5fffc,  0xe4000a8 },
+	{ 0x50328,  0x2fc0005f },
+	{ 0x5fffc,  0xec000aa },
+	{ 0x5032c,  0x2fc0005b },
+	{ 0x5fffc,  0xf8000ad },
+	{ 0x50330,  0x2f400057 },
+	{ 0x5fffc,  0x108000b0 },
+	{ 0x50334,  0x2e400054 },
+	{ 0x5fffc,  0x114000b2 },
+	{ 0x50338,  0x2e000050 },
+	{ 0x5fffc,  0x124000b4 },
+	{ 0x5033c,  0x2d80004c },
+	{ 0x5fffc,  0x130000b6 },
+	{ 0x50340,  0x2d000049 },
+	{ 0x5fffc,  0x140000b8 },
+	{ 0x50344,  0x2c800045 },
+	{ 0x5fffc,  0x150000b9 },
+	{ 0x50348,  0x2c000042 },
+	{ 0x5fffc,  0x15c000bd },
+	{ 0x5034c,  0x2b40003e },
+	{ 0x5fffc,  0x16c000bf },
+	{ 0x50350,  0x2a80003b },
+	{ 0x5fffc,  0x17c000bf },
+	{ 0x50354,  0x2a000039 },
+	{ 0x5fffc,  0x188000c2 },
+	{ 0x50358,  0x29400036 },
+	{ 0x5fffc,  0x19c000c4 },
+	{ 0x5035c,  0x28800032 },
+	{ 0x5fffc,  0x1ac000c5 },
+	{ 0x50360,  0x2800002f },
+	{ 0x5fffc,  0x1bc000c7 },
+	{ 0x50364,  0x2740002c },
+	{ 0x5fffc,  0x1cc000c8 },
+	{ 0x50368,  0x26c00029 },
+	{ 0x5fffc,  0x1dc000c9 },
+	{ 0x5036c,  0x26000027 },
+	{ 0x5fffc,  0x1ec000cc },
+	{ 0x50370,  0x25000024 },
+	{ 0x5fffc,  0x200000cc },
+	{ 0x50374,  0x24800021 },
+	{ 0x5fffc,  0x210000cd },
+	{ 0x50378,  0x23800020 },
+	{ 0x5fffc,  0x220000ce },
+	{ 0x5037c,  0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT4TOPT6[] = {
+	{ 0x5fffc,  0x740008c },
+	{ 0x50300,  0x33800088 },
+	{ 0x5fffc,  0x800008e },
+	{ 0x50304,  0x33400084 },
+	{ 0x5fffc,  0x8400092 },
+	{ 0x50308,  0x33000080 },
+	{ 0x5fffc,  0x9000094 },
+	{ 0x5030c,  0x3300007b },
+	{ 0x5fffc,  0x9c00098 },
+	{ 0x50310,  0x32400077 },
+	{ 0x5fffc,  0xa40009b },
+	{ 0x50314,  0x32000073 },
+	{ 0x5fffc,  0xb00009d },
+	{ 0x50318,  0x31c0006f },
+	{ 0x5fffc,  0xbc000a0 },
+	{ 0x5031c,  0x3140006b },
+	{ 0x5fffc,  0xc8000a2 },
+	{ 0x50320,  0x31000067 },
+	{ 0x5fffc,  0xd8000a5 },
+	{ 0x50324,  0x30800062 },
+	{ 0x5fffc,  0xe4000a8 },
+	{ 0x50328,  0x2fc0005f },
+	{ 0x5fffc,  0xec000aa },
+	{ 0x5032c,  0x2fc0005b },
+	{ 0x5fffc,  0xf8000ad },
+	{ 0x50330,  0x2f400057 },
+	{ 0x5fffc,  0x108000b0 },
+	{ 0x50334,  0x2e400054 },
+	{ 0x5fffc,  0x114000b2 },
+	{ 0x50338,  0x2e000050 },
+	{ 0x5fffc,  0x124000b4 },
+	{ 0x5033c,  0x2d80004c },
+	{ 0x5fffc,  0x130000b6 },
+	{ 0x50340,  0x2d000049 },
+	{ 0x5fffc,  0x140000b8 },
+	{ 0x50344,  0x2c800045 },
+	{ 0x5fffc,  0x150000b9 },
+	{ 0x50348,  0x2c000042 },
+	{ 0x5fffc,  0x15c000bd },
+	{ 0x5034c,  0x2b40003e },
+	{ 0x5fffc,  0x16c000bf },
+	{ 0x50350,  0x2a80003b },
+	{ 0x5fffc,  0x17c000bf },
+	{ 0x50354,  0x2a000039 },
+	{ 0x5fffc,  0x188000c2 },
+	{ 0x50358,  0x29400036 },
+	{ 0x5fffc,  0x19c000c4 },
+	{ 0x5035c,  0x28800032 },
+	{ 0x5fffc,  0x1ac000c5 },
+	{ 0x50360,  0x2800002f },
+	{ 0x5fffc,  0x1bc000c7 },
+	{ 0x50364,  0x2740002c },
+	{ 0x5fffc,  0x1cc000c8 },
+	{ 0x50368,  0x26c00029 },
+	{ 0x5fffc,  0x1dc000c9 },
+	{ 0x5036c,  0x26000027 },
+	{ 0x5fffc,  0x1ec000cc },
+	{ 0x50370,  0x25000024 },
+	{ 0x5fffc,  0x200000cc },
+	{ 0x50374,  0x24800021 },
+	{ 0x5fffc,  0x210000cd },
+	{ 0x50378,  0x23800020 },
+	{ 0x5fffc,  0x220000ce },
+	{ 0x5037c,  0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT6TOPT8[] = {
+	{ 0x5fffc,  0xfe000070 },
+	{ 0x50300,  0x4bc00068 },
+	{ 0x5fffc,  0xfe000078 },
+	{ 0x50304,  0x4bc00060 },
+	{ 0x5fffc,  0xfe000080 },
+	{ 0x50308,  0x4b800059 },
+	{ 0x5fffc,  0xfe000089 },
+	{ 0x5030c,  0x4b000052 },
+	{ 0x5fffc,  0xfe400091 },
+	{ 0x50310,  0x4a80004b },
+	{ 0x5fffc,  0xfe40009a },
+	{ 0x50314,  0x4a000044 },
+	{ 0x5fffc,  0xfe8000a3 },
+	{ 0x50318,  0x4940003d },
+	{ 0x5fffc,  0xfec000ac },
+	{ 0x5031c,  0x48400037 },
+	{ 0x5fffc,  0xff0000b4 },
+	{ 0x50320,  0x47800031 },
+	{ 0x5fffc,  0xff8000bd },
+	{ 0x50324,  0x4640002b },
+	{ 0x5fffc,  0xc5 },
+	{ 0x50328,  0x45000026 },
+	{ 0x5fffc,  0x8000ce },
+	{ 0x5032c,  0x43800021 },
+	{ 0x5fffc,  0x10000d6 },
+	{ 0x50330,  0x4240001c },
+	{ 0x5fffc,  0x18000df },
+	{ 0x50334,  0x40800018 },
+	{ 0x5fffc,  0x24000e6 },
+	{ 0x50338,  0x3f000014 },
+	{ 0x5fffc,  0x30000ee },
+	{ 0x5033c,  0x3d400010 },
+	{ 0x5fffc,  0x40000f5 },
+	{ 0x50340,  0x3b80000c },
+	{ 0x5fffc,  0x50000fc },
+	{ 0x50344,  0x39800009 },
+	{ 0x5fffc,  0x6000102 },
+	{ 0x50348,  0x37c00006 },
+	{ 0x5fffc,  0x7000109 },
+	{ 0x5034c,  0x35800004 },
+	{ 0x5fffc,  0x840010e },
+	{ 0x50350,  0x33800002 },
+	{ 0x5fffc,  0x9800114 },
+	{ 0x50354,  0x31400000 },
+	{ 0x5fffc,  0xac00119 },
+	{ 0x50358,  0x2f4003fe },
+	{ 0x5fffc,  0xc40011e },
+	{ 0x5035c,  0x2d0003fc },
+	{ 0x5fffc,  0xdc00121 },
+	{ 0x50360,  0x2b0003fb },
+	{ 0x5fffc,  0xf400125 },
+	{ 0x50364,  0x28c003fa },
+	{ 0x5fffc,  0x11000128 },
+	{ 0x50368,  0x268003f9 },
+	{ 0x5fffc,  0x12c0012a },
+	{ 0x5036c,  0x244003f9 },
+	{ 0x5fffc,  0x1480012c },
+	{ 0x50370,  0x224003f8 },
+	{ 0x5fffc,  0x1640012e },
+	{ 0x50374,  0x200003f8 },
+	{ 0x5fffc,  0x1800012f },
+	{ 0x50378,  0x1e0003f8 },
+	{ 0x5fffc,  0x1a00012f },
+	{ 0x5037c,  0x1c0003f8 },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT8TO1[] = {
+	{ 0x5fffc,  0x0 },
+	{ 0x50300,  0x7fc00000 },
+	{ 0x5fffc,  0xff80000d },
+	{ 0x50304,  0x7ec003f9 },
+	{ 0x5fffc,  0xfec0001c },
+	{ 0x50308,  0x7d4003f3 },
+	{ 0x5fffc,  0xfe40002b },
+	{ 0x5030c,  0x7b8003ed },
+	{ 0x5fffc,  0xfd80003c },
+	{ 0x50310,  0x794003e8 },
+	{ 0x5fffc,  0xfcc0004d },
+	{ 0x50314,  0x76c003e4 },
+	{ 0x5fffc,  0xfc40005f },
+	{ 0x50318,  0x73c003e0 },
+	{ 0x5fffc,  0xfb800071 },
+	{ 0x5031c,  0x708003de },
+	{ 0x5fffc,  0xfac00085 },
+	{ 0x50320,  0x6d0003db },
+	{ 0x5fffc,  0xfa000098 },
+	{ 0x50324,  0x698003d9 },
+	{ 0x5fffc,  0xf98000ac },
+	{ 0x50328,  0x654003d8 },
+	{ 0x5fffc,  0xf8c000c1 },
+	{ 0x5032c,  0x610003d7 },
+	{ 0x5fffc,  0xf84000d5 },
+	{ 0x50330,  0x5c8003d7 },
+	{ 0x5fffc,  0xf7c000e9 },
+	{ 0x50334,  0x580003d7 },
+	{ 0x5fffc,  0xf74000fd },
+	{ 0x50338,  0x534003d8 },
+	{ 0x5fffc,  0xf6c00112 },
+	{ 0x5033c,  0x4e8003d8 },
+	{ 0x5fffc,  0xf6800126 },
+	{ 0x50340,  0x494003da },
+	{ 0x5fffc,  0xf600013a },
+	{ 0x50344,  0x448003db },
+	{ 0x5fffc,  0xf600014d },
+	{ 0x50348,  0x3f4003dd },
+	{ 0x5fffc,  0xf5c00160 },
+	{ 0x5034c,  0x3a4003df },
+	{ 0x5fffc,  0xf5c00172 },
+	{ 0x50350,  0x354003e1 },
+	{ 0x5fffc,  0xf5c00184 },
+	{ 0x50354,  0x304003e3 },
+	{ 0x5fffc,  0xf6000195 },
+	{ 0x50358,  0x2b0003e6 },
+	{ 0x5fffc,  0xf64001a6 },
+	{ 0x5035c,  0x260003e8 },
+	{ 0x5fffc,  0xf6c001b4 },
+	{ 0x50360,  0x214003eb },
+	{ 0x5fffc,  0xf78001c2 },
+	{ 0x50364,  0x1c4003ee },
+	{ 0x5fffc,  0xf80001cf },
+	{ 0x50368,  0x17c003f1 },
+	{ 0x5fffc,  0xf90001db },
+	{ 0x5036c,  0x134003f3 },
+	{ 0x5fffc,  0xfa0001e5 },
+	{ 0x50370,  0xf0003f6 },
+	{ 0x5fffc,  0xfb4001ee },
+	{ 0x50374,  0xac003f9 },
+	{ 0x5fffc,  0xfcc001f5 },
+	{ 0x50378,  0x70003fb },
+	{ 0x5fffc,  0xfe4001fb },
+	{ 0x5037c,  0x34003fe },
+};
+
+struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX] = {
+	[MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_y_table_PT2TOPT4,
+	[MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_y_table_PT4TOPT6,
+	[MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_y_table_PT6TOPT8,
+	[MDP_DOWNSCALE_PT8TO1]  = mdp_downscale_y_table_PT8TO1,
+};
+
+struct mdp_table_entry mdp_gaussian_blur_table[] = {
+	/* max variance */
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50280, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50284, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50288, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5028c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50290, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50294, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50298, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5029c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ac, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502bc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502cc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502dc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ec, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502fc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50300, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50304, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50308, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5030c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50310, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50314, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50318, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5031c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50320, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50324, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50328, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5032c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50330, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50334, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50338, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5033c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50340, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50344, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50348, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5034c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50350, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50354, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50358, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5035c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50360, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50364, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50368, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5036c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50370, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50374, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50378, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5037c, 0x20000080 },
+};
+
+static void load_table(const struct mdp_info *mdp,
+		       struct mdp_table_entry *table, int len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		mdp_writel(mdp, table[i].val, table[i].reg);
+}
+
+enum {
+	IMG_LEFT,
+	IMG_RIGHT,
+	IMG_TOP,
+	IMG_BOTTOM,
+};
+
+static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst,
+			  uint32_t *interp1, uint32_t *interp2,
+			  uint32_t *repeat1, uint32_t *repeat2) {
+	if (src > 3 * dst) {
+		*interp1 = 0;
+		*interp2 = src - 1;
+		*repeat1 = 0;
+		*repeat2 = 0;
+	} else if (src == 3 * dst) {
+		*interp1 = 0;
+		*interp2 = src;
+		*repeat1 = 0;
+		*repeat2 = 1;
+	} else if (src > dst && src < 3 * dst) {
+		*interp1 = -1;
+		*interp2 = src;
+		*repeat1 = 1;
+		*repeat2 = 1;
+	} else if (src == dst) {
+		*interp1 = -1;
+		*interp2 = src + 1;
+		*repeat1 = 1;
+		*repeat2 = 2;
+	} else {
+		*interp1 = -2;
+		*interp2 = src + 1;
+		*repeat1 = 2;
+		*repeat2 = 2;
+	}
+	*interp1 += src_coord;
+	*interp2 += src_coord;
+}
+
+int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs)
+{
+	int32_t luma_interp[4];
+	int32_t luma_repeat[4];
+	int32_t chroma_interp[4];
+	int32_t chroma_bound[4];
+	int32_t chroma_repeat[4];
+	uint32_t dst_w, dst_h;
+
+	memset(&luma_interp, 0, sizeof(int32_t) * 4);
+	memset(&luma_repeat, 0, sizeof(int32_t) * 4);
+	memset(&chroma_interp, 0, sizeof(int32_t) * 4);
+	memset(&chroma_bound, 0, sizeof(int32_t) * 4);
+	memset(&chroma_repeat, 0, sizeof(int32_t) * 4);
+	regs->edge = 0;
+
+	if (req->flags & MDP_ROT_90) {
+		dst_w = req->dst_rect.h;
+		dst_h = req->dst_rect.w;
+	} else {
+		dst_w = req->dst_rect.w;
+		dst_h = req->dst_rect.h;
+	}
+
+	if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) {
+		get_edge_info(req->src_rect.h, req->src_rect.y, dst_h,
+			      &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM],
+			      &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]);
+		get_edge_info(req->src_rect.w, req->src_rect.x, dst_w,
+			      &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT],
+			      &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]);
+	} else {
+		luma_interp[IMG_LEFT] = req->src_rect.x;
+		luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
+		luma_interp[IMG_TOP] = req->src_rect.y;
+		luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
+		luma_repeat[IMG_LEFT] = 0;
+		luma_repeat[IMG_TOP] = 0;
+		luma_repeat[IMG_RIGHT] = 0;
+		luma_repeat[IMG_BOTTOM] = 0;
+	}
+
+	chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT];
+	chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT];
+	chroma_interp[IMG_TOP] = luma_interp[IMG_TOP];
+	chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM];
+
+	chroma_bound[IMG_LEFT] = req->src_rect.x;
+	chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
+	chroma_bound[IMG_TOP] = req->src_rect.y;
+	chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
+
+	if (IS_YCRCB(req->src.format)) {
+		chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1;
+		chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1;
+
+		chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1;
+		chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1;
+	}
+
+	if (req->src.format == MDP_Y_CBCR_H2V2 ||
+	    req->src.format == MDP_Y_CRCB_H2V2) {
+		chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1;
+		chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1)
+					    >> 1;
+		chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1;
+		chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1;
+	}
+
+	chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] -
+				  chroma_interp[IMG_LEFT];
+	chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] -
+				  chroma_bound[IMG_RIGHT];
+	chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] -
+				  chroma_interp[IMG_TOP];
+	chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] -
+				  chroma_bound[IMG_BOTTOM];
+
+	if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 ||
+	    chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 ||
+	    chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 ||
+	    chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 ||
+	    luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 ||
+	    luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 ||
+	    luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 ||
+	    luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3)
+		return -1;
+
+	regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA;
+	regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA;
+	regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA;
+	regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA;
+	regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA;
+	regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA;
+	regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA;
+	regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA;
+	return 0;
+}
+
+#define ONE_HALF	(1LL << 32)
+#define ONE		(1LL << 33)
+#define TWO		(2LL << 33)
+#define THREE		(3LL << 33)
+#define FRAC_MASK (ONE - 1)
+#define INT_MASK (~FRAC_MASK)
+
+static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin,
+			uint32_t *phase_init, uint32_t *phase_step)
+{
+	/* to improve precicsion calculations are done in U31.33 and converted
+	 * to U3.29 at the end */
+	int64_t k1, k2, k3, k4, tmp;
+	uint64_t n, d, os, os_p, od, od_p, oreq;
+	unsigned rpa = 0;
+	int64_t ip64, delta;
+
+	if (dim_out % 3 == 0)
+		rpa = !(dim_in % (dim_out / 3));
+
+	n = ((uint64_t)dim_out) << 34;
+	d = dim_in;
+	if (!d)
+		return -1;
+	do_div(n, d);
+	k3 = (n + 1) >> 1;
+	if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31))
+		return -1;
+
+	n = ((uint64_t)dim_in) << 34;
+	d = (uint64_t)dim_out;
+	if (!d)
+		return -1;
+	do_div(n, d);
+	k1 = (n + 1) >> 1;
+	k2 = (k1 - ONE) >> 1;
+
+	*phase_init = (int)(k2 >> 4);
+	k4 = (k3 - ONE) >> 1;
+
+	if (rpa) {
+		os = ((uint64_t)origin << 33) - ONE_HALF;
+		tmp = (dim_out * os) + ONE_HALF;
+		if (!dim_in)
+			return -1;
+		do_div(tmp, dim_in);
+		od = tmp - ONE_HALF;
+	} else {
+		os = ((uint64_t)origin << 1) - 1;
+		od = (((k3 * os) >> 1) + k4);
+	}
+
+	od_p = od & INT_MASK;
+	if (od_p != od)
+		od_p += ONE;
+
+	if (rpa) {
+		tmp = (dim_in * od_p) + ONE_HALF;
+		if (!dim_in)
+			return -1;
+		do_div(tmp, dim_in);
+		os_p = tmp - ONE_HALF;
+	} else {
+		os_p = ((k1 * (od_p >> 33)) + k2);
+	}
+
+	oreq = (os_p & INT_MASK) - ONE;
+
+	ip64 = os_p - oreq;
+	delta = ((int64_t)(origin) << 33) - oreq;
+	ip64 -= delta;
+	/* limit to valid range before the left shift */
+	delta = (ip64 & (1LL << 63)) ? 4 : -4;
+	delta <<= 33;
+	while (abs((int)(ip64 >> 33)) > 4)
+		ip64 += delta;
+	*phase_init = (int)(ip64 >> 4);
+	*phase_step = (uint32_t)(k1 >> 4);
+	return 0;
+}
+
+int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs,
+		      struct mdp_rect *src_rect, struct mdp_rect *dst_rect,
+		      uint32_t src_format, uint32_t dst_format)
+{
+	int downscale;
+	uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
+	uint32_t scale_factor_x, scale_factor_y;
+
+	if (scale_params(src_rect->w, dst_rect->w, 1, &phase_init_x,
+			 &phase_step_x) ||
+	    scale_params(src_rect->h, dst_rect->h, 1, &phase_init_y,
+			 &phase_step_y))
+		return -1;
+
+	regs->phasex_init = phase_init_x;
+	regs->phasey_init = phase_init_y;
+	regs->phasex_step = phase_step_x;
+	regs->phasey_step = phase_step_y;
+
+	scale_factor_x = (dst_rect->w * 10) / src_rect->w;
+	scale_factor_y = (dst_rect->h * 10) / src_rect->h;
+
+	if (scale_factor_x > 8)
+		downscale = MDP_DOWNSCALE_PT8TO1;
+	else if (scale_factor_x > 6)
+		downscale = MDP_DOWNSCALE_PT6TOPT8;
+	else if (scale_factor_x > 4)
+		downscale = MDP_DOWNSCALE_PT4TOPT6;
+	else
+		downscale = MDP_DOWNSCALE_PT2TOPT4;
+
+	if (downscale != downscale_x_table) {
+		load_table(mdp, mdp_downscale_x_table[downscale], 64);
+		downscale_x_table = downscale;
+	}
+
+	if (scale_factor_y > 8)
+		downscale = MDP_DOWNSCALE_PT8TO1;
+	else if (scale_factor_y > 6)
+		downscale = MDP_DOWNSCALE_PT6TOPT8;
+	else if (scale_factor_y > 4)
+		downscale = MDP_DOWNSCALE_PT4TOPT6;
+	else
+		downscale = MDP_DOWNSCALE_PT2TOPT4;
+
+	if (downscale != downscale_y_table) {
+		load_table(mdp, mdp_downscale_y_table[downscale], 64);
+		downscale_y_table = downscale;
+	}
+
+	return 0;
+}
+
+
+int mdp_ppp_load_blur(const struct mdp_info *mdp)
+{
+	if (!(downscale_x_table == MDP_DOWNSCALE_BLUR &&
+              downscale_y_table == MDP_DOWNSCALE_BLUR)) {
+		load_table(mdp, mdp_gaussian_blur_table, 128);
+		downscale_x_table = MDP_DOWNSCALE_BLUR;
+		downscale_y_table = MDP_DOWNSCALE_BLUR;
+	}
+
+	return 0;
+}
+
+void mdp_ppp_init_scale(const struct mdp_info *mdp)
+{
+	downscale_x_table = MDP_DOWNSCALE_MAX;
+	downscale_y_table = MDP_DOWNSCALE_MAX;
+
+	load_table(mdp, mdp_upscale_table, ARRAY_SIZE(mdp_upscale_table));
+}
diff --git a/drivers/video/msm/mdp_ppp31.c b/drivers/video/msm/mdp_ppp31.c
new file mode 100644
index 0000000..91764fe
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp31.c
@@ -0,0 +1,332 @@
+/* drivers/video/msm/mdp_ppp31.c
+ *
+ * Copyright (C) 2009 Code Aurora Forum. All rights reserved.
+ * Copyright (C) 2009 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/msm_mdp.h>
+
+#include "mdp_hw.h"
+#include "mdp_ppp.h"
+
+#define NUM_COEFFS			32
+
+struct mdp_scale_coeffs {
+	uint16_t	c[4][NUM_COEFFS];
+};
+
+struct mdp_scale_tbl_info {
+	uint16_t			offset;
+	uint32_t			set:2;
+	int				use_pr;
+	struct mdp_scale_coeffs		coeffs;
+};
+
+enum {
+	MDP_SCALE_PT2TOPT4,
+	MDP_SCALE_PT4TOPT6,
+	MDP_SCALE_PT6TOPT8,
+	MDP_SCALE_PT8TO8,
+	MDP_SCALE_MAX,
+};
+
+static struct mdp_scale_coeffs mdp_scale_pr_coeffs = {
+	.c = {
+		[0] = {
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+		},
+		[1] = {
+			511, 511, 511, 511, 511, 511, 511, 511,
+			511, 511, 511, 511, 511, 511, 511, 511,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+		},
+		[2] = {
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			511, 511, 511, 511, 511, 511, 511, 511,
+			511, 511, 511, 511, 511, 511, 511, 511,
+		},
+		[3] = {
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+			0, 0, 0, 0, 0, 0, 0, 0,
+		},
+	},
+};
+
+static struct mdp_scale_tbl_info mdp_scale_tbl[MDP_SCALE_MAX] = {
+	[ MDP_SCALE_PT2TOPT4 ]	= {
+		.offset		= 0,
+		.set		= MDP_PPP_SCALE_COEFF_D0_SET,
+		.use_pr		= -1,
+		.coeffs.c	= {
+			[0] = {
+				131, 131, 130, 129, 128, 127, 127, 126,
+				125, 125, 124, 123, 123, 121, 120, 119,
+				119, 118, 117, 117, 116, 115, 115, 114,
+				113, 112, 111, 110, 109, 109, 108, 107,
+			},
+			[1] = {
+				141, 140, 140, 140, 140, 139, 138, 138,
+				138, 137, 137, 137, 136, 137, 137, 137,
+				136, 136, 136, 135, 135, 135, 134, 134,
+				134, 134, 134, 133, 133, 132, 132, 132,
+			},
+			[2] = {
+				132, 132, 132, 133, 133, 134, 134, 134,
+				134, 134, 135, 135, 135, 136, 136, 136,
+				137, 137, 137, 136, 137, 137, 137, 138,
+				138, 138, 139, 140, 140, 140, 140, 141,
+			},
+			[3] = {
+				107, 108, 109, 109, 110, 111, 112, 113,
+				114, 115, 115, 116, 117, 117, 118, 119,
+				119, 120, 121, 123, 123, 124, 125, 125,
+				126, 127, 127, 128, 129, 130, 131, 131,
+			}
+		},
+	},
+	[ MDP_SCALE_PT4TOPT6 ] = {
+		.offset		= 32,
+		.set		= MDP_PPP_SCALE_COEFF_D1_SET,
+		.use_pr		= -1,
+		.coeffs.c	= {
+			[0] = {
+				136, 132, 128, 123, 119, 115, 111, 107,
+				103, 98, 95, 91, 87, 84, 80, 76,
+				73, 69, 66, 62, 59, 57, 54, 50,
+				47, 44, 41, 39, 36, 33, 32, 29,
+			},
+			[1] = {
+				206, 205, 204, 204, 201, 200, 199, 197,
+				196, 194, 191, 191, 189, 185, 184, 182,
+				180, 178, 176, 173, 170, 168, 165, 162,
+				160, 157, 155, 152, 148, 146, 142, 140,
+			},
+			[2] = {
+				140, 142, 146, 148, 152, 155, 157, 160,
+				162, 165, 168, 170, 173, 176, 178, 180,
+				182, 184, 185, 189, 191, 191, 194, 196,
+				197, 199, 200, 201, 204, 204, 205, 206,
+			},
+			[3] = {
+				29, 32, 33, 36, 39, 41, 44, 47,
+				50, 54, 57, 59, 62, 66, 69, 73,
+				76, 80, 84, 87, 91, 95, 98, 103,
+				107, 111, 115, 119, 123, 128, 132, 136,
+			},
+		},
+	},
+	[ MDP_SCALE_PT6TOPT8 ] = {
+		.offset		= 64,
+		.set		= MDP_PPP_SCALE_COEFF_D2_SET,
+		.use_pr		= -1,
+		.coeffs.c	= {
+			[0] = {
+				104, 96, 89, 82, 75, 68, 61, 55,
+				49, 43, 38, 33, 28, 24, 20, 16,
+				12, 9, 6, 4, 2, 0, -2, -4,
+				-5, -6, -7, -7, -8, -8, -8, -8,
+			},
+			[1] = {
+				303, 303, 302, 300, 298, 296, 293, 289,
+				286, 281, 276, 270, 265, 258, 252, 245,
+				238, 230, 223, 214, 206, 197, 189, 180,
+				172, 163, 154, 145, 137, 128, 120, 112,
+			},
+			[2] = {
+				112, 120, 128, 137, 145, 154, 163, 172,
+				180, 189, 197, 206, 214, 223, 230, 238,
+				245, 252, 258, 265, 270, 276, 281, 286,
+				289, 293, 296, 298, 300, 302, 303, 303,
+			},
+			[3] = {
+				-8, -8, -8, -8, -7, -7, -6, -5,
+				-4, -2, 0, 2, 4, 6, 9, 12,
+				16, 20, 24, 28, 33, 38, 43, 49,
+				55, 61, 68, 75, 82, 89, 96, 104,
+			},
+		},
+	},
+	[ MDP_SCALE_PT8TO8 ] = {
+		.offset		= 96,
+		.set		= MDP_PPP_SCALE_COEFF_U1_SET,
+		.use_pr		= -1,
+		.coeffs.c	= {
+			[0] = {
+				0, -7, -13, -19, -24, -28, -32, -34,
+				-37, -39, -40, -41, -41, -41, -40, -40,
+				-38, -37, -35, -33, -31, -29, -26, -24,
+				-21, -18, -15, -13, -10, -7, -5, -2,
+			},
+			[1] = {
+				511, 507, 501, 494, 485, 475, 463, 450,
+				436, 422, 405, 388, 370, 352, 333, 314,
+				293, 274, 253, 233, 213, 193, 172, 152,
+				133, 113, 95, 77, 60, 43, 28, 13,
+			},
+			[2] = {
+				0, 13, 28, 43, 60, 77, 95, 113,
+				133, 152, 172, 193, 213, 233, 253, 274,
+				294, 314, 333, 352, 370, 388, 405, 422,
+				436, 450, 463, 475, 485, 494, 501, 507,
+			},
+			[3] = {
+				0, -2, -5, -7, -10, -13, -15, -18,
+				-21, -24, -26, -29, -31, -33, -35, -37,
+				-38, -40, -40, -41, -41, -41, -40, -39,
+				-37, -34, -32, -28, -24, -19, -13, -7,
+			},
+		},
+	},
+};
+
+static void load_table(const struct mdp_info *mdp, int scale, int use_pr)
+{
+	int i;
+	uint32_t val;
+	struct mdp_scale_coeffs *coeffs;
+	struct mdp_scale_tbl_info *tbl = &mdp_scale_tbl[scale];
+
+	if (use_pr == tbl->use_pr)
+		return;
+
+	tbl->use_pr = use_pr;
+	if (!use_pr)
+		coeffs = &tbl->coeffs;
+	else
+		coeffs = &mdp_scale_pr_coeffs;
+
+	for (i = 0; i < NUM_COEFFS; ++i) {
+		val = ((coeffs->c[1][i] & 0x3ff) << 16) |
+			(coeffs->c[0][i] & 0x3ff);
+		mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_LSBn(tbl->offset + i));
+
+		val = ((coeffs->c[3][i] & 0x3ff) << 16) |
+			(coeffs->c[2][i] & 0x3ff);
+		mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_MSBn(tbl->offset + i));
+	}
+}
+
+#define SCALER_PHASE_BITS		29
+static void scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t scaler,
+			 uint32_t *phase_init, uint32_t *phase_step)
+{
+	uint64_t src = dim_in;
+	uint64_t dst = dim_out;
+	uint64_t numer;
+	uint64_t denom;
+
+	*phase_init = 0;
+
+	if (dst == 1) {
+		/* if destination is 1 pixel wide, the value of phase_step
+		 * is unimportant. */
+		*phase_step = (uint32_t) (src << SCALER_PHASE_BITS);
+		if (scaler == MDP_PPP_SCALER_FIR)
+			*phase_init =
+				(uint32_t) ((src - 1) << SCALER_PHASE_BITS);
+		return;
+	}
+
+	if (scaler == MDP_PPP_SCALER_FIR) {
+		numer = (src - 1) << SCALER_PHASE_BITS;
+		denom = dst - 1;
+		/* we want to round up the result*/
+		numer += denom - 1;
+	} else {
+		numer = src << SCALER_PHASE_BITS;
+		denom = dst;
+	}
+
+	do_div(numer, denom);
+	*phase_step = (uint32_t) numer;
+}
+
+static int scale_idx(int factor)
+{
+	int idx;
+
+	if (factor > 80)
+		idx = MDP_SCALE_PT8TO8;
+	else if (factor > 60)
+		idx = MDP_SCALE_PT6TOPT8;
+	else if (factor > 40)
+		idx = MDP_SCALE_PT4TOPT6;
+	else
+		idx = MDP_SCALE_PT2TOPT4;
+
+	return idx;
+}
+
+int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs,
+		      struct mdp_rect *src_rect, struct mdp_rect *dst_rect,
+		      uint32_t src_format, uint32_t dst_format)
+{
+	uint32_t x_fac;
+	uint32_t y_fac;
+	uint32_t scaler_x = MDP_PPP_SCALER_FIR;
+	uint32_t scaler_y = MDP_PPP_SCALER_FIR;
+	// Don't use pixel repeat mode, it looks bad
+	int use_pr = 0;
+	int x_idx;
+	int y_idx;
+
+	if (unlikely(src_rect->w > 2048 || src_rect->h > 2048))
+		return -ENOTSUPP;
+
+	x_fac = (dst_rect->w * 100) / src_rect->w;
+	y_fac = (dst_rect->h * 100) / src_rect->h;
+
+	/* if down-scaling by a factor smaller than 1/4, use M/N */
+	scaler_x = x_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR;
+	scaler_y = y_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR;
+	scale_params(src_rect->w, dst_rect->w, scaler_x, &regs->phasex_init,
+		     &regs->phasex_step);
+	scale_params(src_rect->h, dst_rect->h, scaler_y, &regs->phasey_init,
+		     &regs->phasey_step);
+
+	x_idx = scale_idx(x_fac);
+	y_idx = scale_idx(y_fac);
+	load_table(mdp, x_idx, use_pr);
+	load_table(mdp, y_idx, use_pr);
+
+	regs->scale_cfg = 0;
+	// Enable SVI when source or destination is YUV
+	if (!IS_RGB(src_format) && !IS_RGB(dst_format))
+		regs->scale_cfg |= (1 << 6);
+	regs->scale_cfg |= (mdp_scale_tbl[x_idx].set << 2) |
+		(mdp_scale_tbl[x_idx].set << 4);
+	regs->scale_cfg |= (scaler_x << 0) | (scaler_y << 1);
+
+	return 0;
+}
+
+int mdp_ppp_load_blur(const struct mdp_info *mdp)
+{
+	return -ENOTSUPP;
+}
+
+void mdp_ppp_init_scale(const struct mdp_info *mdp)
+{
+	int scale;
+	for (scale = 0; scale < MDP_SCALE_MAX; ++scale)
+		load_table(mdp, scale, 0);
+}
diff --git a/drivers/video/msm/mdp_ppp_v20.c b/drivers/video/msm/mdp_ppp_v20.c
new file mode 100644
index 0000000..8828a8f
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp_v20.c
@@ -0,0 +1,2483 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include "linux/proc_fs.h"
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <asm/div64.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+
+static MDP_SCALE_MODE mdp_curr_up_scale_xy;
+static MDP_SCALE_MODE mdp_curr_down_scale_x;
+static MDP_SCALE_MODE mdp_curr_down_scale_y;
+
+static long long mdp_do_div(long long num, long long den)
+{
+	do_div(num, den);
+	return num;
+}
+
+struct mdp_table_entry mdp_gaussian_blur_table[] = {
+	/* max variance */
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50280, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50284, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50288, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5028c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50290, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50294, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50298, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5029c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ac, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502bc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502cc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502dc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ec, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502fc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50300, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50304, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50308, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5030c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50310, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50314, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50318, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5031c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50320, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50324, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50328, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5032c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50330, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50334, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50338, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5033c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50340, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50344, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50348, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5034c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50350, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50354, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50358, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5035c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50360, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50364, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50368, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5036c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50370, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50374, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50378, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5037c, 0x20000080 },
+};
+
+static void load_scale_table(
+	struct mdp_table_entry *table, int len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		MDP_OUTP(MDP_BASE + table[i].reg, table[i].val);
+}
+
+static void mdp_load_pr_upscale_table(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50200, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50204, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50208, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5020c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50210, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50214, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50218, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5021c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50220, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50224, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50228, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5022c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50230, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50234, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50238, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5023c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50240, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50244, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50248, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5024c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50250, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50254, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50258, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5025c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50260, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50264, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50268, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5026c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50270, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50274, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50278, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5027c, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_x_point2TOpoint4(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_y_point2TOpoint4(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_x_point4TOpoint6(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_y_point4TOpoint6(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_x_point6TOpoint8(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_y_point6TOpoint8(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_x_point8TO1(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
+}
+
+static void mdp_load_pr_downscale_table_y_point8TO1(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
+}
+
+static void mdp_load_bc_upscale_table(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50200, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
+	MDP_OUTP(MDP_BASE + 0x50204, 0x7ec003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
+	MDP_OUTP(MDP_BASE + 0x50208, 0x7d4003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
+	MDP_OUTP(MDP_BASE + 0x5020c, 0x7b8003ed);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
+	MDP_OUTP(MDP_BASE + 0x50210, 0x794003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
+	MDP_OUTP(MDP_BASE + 0x50214, 0x76c003e4);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
+	MDP_OUTP(MDP_BASE + 0x50218, 0x73c003e0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
+	MDP_OUTP(MDP_BASE + 0x5021c, 0x708003de);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
+	MDP_OUTP(MDP_BASE + 0x50220, 0x6d0003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
+	MDP_OUTP(MDP_BASE + 0x50224, 0x698003d9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
+	MDP_OUTP(MDP_BASE + 0x50228, 0x654003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
+	MDP_OUTP(MDP_BASE + 0x5022c, 0x610003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
+	MDP_OUTP(MDP_BASE + 0x50230, 0x5c8003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
+	MDP_OUTP(MDP_BASE + 0x50234, 0x580003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
+	MDP_OUTP(MDP_BASE + 0x50238, 0x534003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
+	MDP_OUTP(MDP_BASE + 0x5023c, 0x4e8003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
+	MDP_OUTP(MDP_BASE + 0x50240, 0x494003da);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
+	MDP_OUTP(MDP_BASE + 0x50244, 0x448003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
+	MDP_OUTP(MDP_BASE + 0x50248, 0x3f4003dd);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
+	MDP_OUTP(MDP_BASE + 0x5024c, 0x3a4003df);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
+	MDP_OUTP(MDP_BASE + 0x50250, 0x354003e1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
+	MDP_OUTP(MDP_BASE + 0x50254, 0x304003e3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
+	MDP_OUTP(MDP_BASE + 0x50258, 0x2b0003e6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
+	MDP_OUTP(MDP_BASE + 0x5025c, 0x260003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
+	MDP_OUTP(MDP_BASE + 0x50260, 0x214003eb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
+	MDP_OUTP(MDP_BASE + 0x50264, 0x1c4003ee);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
+	MDP_OUTP(MDP_BASE + 0x50268, 0x17c003f1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
+	MDP_OUTP(MDP_BASE + 0x5026c, 0x134003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
+	MDP_OUTP(MDP_BASE + 0x50270, 0xf0003f6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
+	MDP_OUTP(MDP_BASE + 0x50274, 0xac003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
+	MDP_OUTP(MDP_BASE + 0x50278, 0x70003fb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
+	MDP_OUTP(MDP_BASE + 0x5027c, 0x34003fe);
+}
+
+static void mdp_load_bc_downscale_table_x_point2TOpoint4(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac00084);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x23400083);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b000084);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x23000083);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400084);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x23000082);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400085);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x23000081);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b800085);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x23000080);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc00086);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x22c0007f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c000086);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x2280007f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c400086);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x2280007e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c800086);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x2280007d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00086);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x2240007d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00087);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x2240007c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d000087);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x2240007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400087);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x2200007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400088);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x22400079);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d800088);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x22400078);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00088);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x22400077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00089);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x22000077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e000089);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x22000076);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e400089);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x22000075);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00088);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x21c00075);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00089);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x21c00074);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f000089);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x21c00073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f400089);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x21800073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f40008a);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x21800072);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f80008a);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x21800071);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008a);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x21800070);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008b);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x2180006f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2000008c);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x2140006e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2040008c);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x2140006d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2080008c);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x2100006d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008c);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x2100006c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008d);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x2100006b);
+}
+
+static void mdp_load_bc_downscale_table_y_point2TOpoint4(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac00084);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x23400083);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b000084);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x23000083);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400084);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x23000082);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400085);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x23000081);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b800085);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x23000080);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc00086);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x22c0007f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c000086);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x2280007f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c400086);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x2280007e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c800086);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x2280007d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00086);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x2240007d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00087);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x2240007c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d000087);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x2240007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400087);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x2200007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400088);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x22400079);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d800088);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x22400078);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00088);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x22400077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00089);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x22000077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e000089);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x22000076);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e400089);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x22000075);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00088);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x21c00075);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00089);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x21c00074);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f000089);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x21c00073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f400089);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x21800073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f40008a);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x21800072);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f80008a);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x21800071);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008a);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x21800070);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008b);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x2180006f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2000008c);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x2140006e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2040008c);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x2140006d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x2080008c);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x2100006d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008c);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x2100006c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008d);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x2100006b);
+}
+
+static void mdp_load_bc_downscale_table_x_point4TOpoint6(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x740008c);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x33800088);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x800008e);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x33400084);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x8400092);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x33000080);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9000094);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x3300007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9c00098);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x32400077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xa40009b);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x32000073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xb00009d);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x31c0006f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xbc000a0);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x3140006b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc8000a2);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x31000067);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xd8000a5);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x30800062);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xe4000a8);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x2fc0005f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xec000aa);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x2fc0005b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8000ad);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x2f400057);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x108000b0);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x2e400054);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x114000b2);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x2e000050);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x124000b4);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x2d80004c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x130000b6);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x2d000049);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x140000b8);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x2c800045);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x150000b9);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x2c000042);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x15c000bd);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x2b40003e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x16c000bf);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x2a80003b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x17c000bf);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x2a000039);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x188000c2);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x29400036);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x19c000c4);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x28800032);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac000c5);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x2800002f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc000c7);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x2740002c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc000c8);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x26c00029);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc000c9);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x26000027);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec000cc);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x25000024);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x200000cc);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x24800021);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x210000cd);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x23800020);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x220000ce);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x2300001d);
+}
+
+static void mdp_load_bc_downscale_table_y_point4TOpoint6(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x740008c);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x33800088);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x800008e);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x33400084);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x8400092);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x33000080);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9000094);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x3300007b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9c00098);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x32400077);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xa40009b);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x32000073);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xb00009d);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x31c0006f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xbc000a0);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x3140006b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc8000a2);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x31000067);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xd8000a5);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x30800062);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xe4000a8);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x2fc0005f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xec000aa);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x2fc0005b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8000ad);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x2f400057);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x108000b0);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x2e400054);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x114000b2);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x2e000050);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x124000b4);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x2d80004c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x130000b6);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x2d000049);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x140000b8);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x2c800045);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x150000b9);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x2c000042);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x15c000bd);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x2b40003e);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x16c000bf);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x2a80003b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x17c000bf);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x2a000039);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x188000c2);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x29400036);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x19c000c4);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x28800032);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac000c5);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x2800002f);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc000c7);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x2740002c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc000c8);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x26c00029);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc000c9);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x26000027);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec000cc);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x25000024);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x200000cc);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x24800021);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x210000cd);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x23800020);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x220000ce);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x2300001d);
+}
+
+static void mdp_load_bc_downscale_table_x_point6TOpoint8(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000070);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x4bc00068);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000078);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x4bc00060);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000080);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x4b800059);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000089);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x4b000052);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe400091);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x4a80004b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40009a);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x4a000044);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe8000a3);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x4940003d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec000ac);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x48400037);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff0000b4);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x47800031);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff8000bd);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x4640002b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc5);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x45000026);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x8000ce);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x43800021);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x10000d6);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x4240001c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x18000df);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x40800018);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x24000e6);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x3f000014);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x30000ee);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x3d400010);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x40000f5);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x3b80000c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x50000fc);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x39800009);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x6000102);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x37c00006);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x7000109);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x35800004);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x840010e);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x33800002);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9800114);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x31400000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xac00119);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x2f4003fe);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc40011e);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x2d0003fc);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xdc00121);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x2b0003fb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf400125);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x28c003fa);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x11000128);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x268003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x12c0012a);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x244003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1480012c);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0x224003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1640012e);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0x200003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1800012f);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x1e0003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1a00012f);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x1c0003f8);
+}
+
+static void mdp_load_bc_downscale_table_y_point6TOpoint8(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000070);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x4bc00068);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000078);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x4bc00060);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000080);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x4b800059);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000089);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x4b000052);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe400091);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x4a80004b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40009a);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x4a000044);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe8000a3);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x4940003d);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec000ac);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x48400037);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff0000b4);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x47800031);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff8000bd);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x4640002b);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc5);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x45000026);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x8000ce);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x43800021);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x10000d6);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x4240001c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x18000df);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x40800018);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x24000e6);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x3f000014);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x30000ee);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x3d400010);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x40000f5);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x3b80000c);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x50000fc);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x39800009);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x6000102);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x37c00006);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x7000109);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x35800004);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x840010e);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x33800002);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x9800114);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x31400000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xac00119);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x2f4003fe);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xc40011e);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x2d0003fc);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xdc00121);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x2b0003fb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf400125);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x28c003fa);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x11000128);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x268003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x12c0012a);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x244003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1480012c);
+	MDP_OUTP(MDP_BASE + 0x50370, 0x224003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1640012e);
+	MDP_OUTP(MDP_BASE + 0x50374, 0x200003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1800012f);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x1e0003f8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x1a00012f);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x1c0003f8);
+}
+
+static void mdp_load_bc_downscale_table_x_point8TO1(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
+	MDP_OUTP(MDP_BASE + 0x50284, 0x7ec003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
+	MDP_OUTP(MDP_BASE + 0x50288, 0x7d4003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
+	MDP_OUTP(MDP_BASE + 0x5028c, 0x7b8003ed);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
+	MDP_OUTP(MDP_BASE + 0x50290, 0x794003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
+	MDP_OUTP(MDP_BASE + 0x50294, 0x76c003e4);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
+	MDP_OUTP(MDP_BASE + 0x50298, 0x73c003e0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
+	MDP_OUTP(MDP_BASE + 0x5029c, 0x708003de);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
+	MDP_OUTP(MDP_BASE + 0x502a0, 0x6d0003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
+	MDP_OUTP(MDP_BASE + 0x502a4, 0x698003d9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
+	MDP_OUTP(MDP_BASE + 0x502a8, 0x654003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
+	MDP_OUTP(MDP_BASE + 0x502ac, 0x610003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
+	MDP_OUTP(MDP_BASE + 0x502b0, 0x5c8003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
+	MDP_OUTP(MDP_BASE + 0x502b4, 0x580003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
+	MDP_OUTP(MDP_BASE + 0x502b8, 0x534003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
+	MDP_OUTP(MDP_BASE + 0x502bc, 0x4e8003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
+	MDP_OUTP(MDP_BASE + 0x502c0, 0x494003da);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
+	MDP_OUTP(MDP_BASE + 0x502c4, 0x448003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
+	MDP_OUTP(MDP_BASE + 0x502c8, 0x3f4003dd);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
+	MDP_OUTP(MDP_BASE + 0x502cc, 0x3a4003df);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
+	MDP_OUTP(MDP_BASE + 0x502d0, 0x354003e1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
+	MDP_OUTP(MDP_BASE + 0x502d4, 0x304003e3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
+	MDP_OUTP(MDP_BASE + 0x502d8, 0x2b0003e6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
+	MDP_OUTP(MDP_BASE + 0x502dc, 0x260003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
+	MDP_OUTP(MDP_BASE + 0x502e0, 0x214003eb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
+	MDP_OUTP(MDP_BASE + 0x502e4, 0x1c4003ee);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
+	MDP_OUTP(MDP_BASE + 0x502e8, 0x17c003f1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
+	MDP_OUTP(MDP_BASE + 0x502ec, 0x134003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
+	MDP_OUTP(MDP_BASE + 0x502f0, 0xf0003f6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
+	MDP_OUTP(MDP_BASE + 0x502f4, 0xac003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
+	MDP_OUTP(MDP_BASE + 0x502f8, 0x70003fb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
+	MDP_OUTP(MDP_BASE + 0x502fc, 0x34003fe);
+}
+
+static void mdp_load_bc_downscale_table_y_point8TO1(void)
+{
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
+	MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
+	MDP_OUTP(MDP_BASE + 0x50304, 0x7ec003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
+	MDP_OUTP(MDP_BASE + 0x50308, 0x7d4003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
+	MDP_OUTP(MDP_BASE + 0x5030c, 0x7b8003ed);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
+	MDP_OUTP(MDP_BASE + 0x50310, 0x794003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
+	MDP_OUTP(MDP_BASE + 0x50314, 0x76c003e4);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
+	MDP_OUTP(MDP_BASE + 0x50318, 0x73c003e0);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
+	MDP_OUTP(MDP_BASE + 0x5031c, 0x708003de);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
+	MDP_OUTP(MDP_BASE + 0x50320, 0x6d0003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
+	MDP_OUTP(MDP_BASE + 0x50324, 0x698003d9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
+	MDP_OUTP(MDP_BASE + 0x50328, 0x654003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
+	MDP_OUTP(MDP_BASE + 0x5032c, 0x610003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
+	MDP_OUTP(MDP_BASE + 0x50330, 0x5c8003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
+	MDP_OUTP(MDP_BASE + 0x50334, 0x580003d7);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
+	MDP_OUTP(MDP_BASE + 0x50338, 0x534003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
+	MDP_OUTP(MDP_BASE + 0x5033c, 0x4e8003d8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
+	MDP_OUTP(MDP_BASE + 0x50340, 0x494003da);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
+	MDP_OUTP(MDP_BASE + 0x50344, 0x448003db);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
+	MDP_OUTP(MDP_BASE + 0x50348, 0x3f4003dd);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
+	MDP_OUTP(MDP_BASE + 0x5034c, 0x3a4003df);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
+	MDP_OUTP(MDP_BASE + 0x50350, 0x354003e1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
+	MDP_OUTP(MDP_BASE + 0x50354, 0x304003e3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
+	MDP_OUTP(MDP_BASE + 0x50358, 0x2b0003e6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
+	MDP_OUTP(MDP_BASE + 0x5035c, 0x260003e8);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
+	MDP_OUTP(MDP_BASE + 0x50360, 0x214003eb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
+	MDP_OUTP(MDP_BASE + 0x50364, 0x1c4003ee);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
+	MDP_OUTP(MDP_BASE + 0x50368, 0x17c003f1);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
+	MDP_OUTP(MDP_BASE + 0x5036c, 0x134003f3);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
+	MDP_OUTP(MDP_BASE + 0x50370, 0xf0003f6);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
+	MDP_OUTP(MDP_BASE + 0x50374, 0xac003f9);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
+	MDP_OUTP(MDP_BASE + 0x50378, 0x70003fb);
+	MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
+	MDP_OUTP(MDP_BASE + 0x5037c, 0x34003fe);
+}
+
+static int mdp_get_edge_cond(MDPIBUF *iBuf, uint32 *dup, uint32 *dup2)
+{
+	uint32 reg;
+	uint32 dst_roi_width;	/* Dimensions of DST ROI. */
+	uint32 dst_roi_height;	/* Used to calculate scaling ratios. */
+
+	/*
+	 * positions of the luma pixel(relative to the image ) required for
+	 * scaling the ROI
+	 */
+	int32 luma_interp_point_left = 0; /* left-most luma pixel needed */
+	int32 luma_interp_point_right = 0; /* right-most luma pixel needed */
+	int32 luma_interp_point_top = 0; /* top-most luma pixel needed */
+	int32 luma_interp_point_bottom = 0; /* bottom-most luma pixel needed */
+
+	/*
+	 * positions of the chroma pixel(relative to the image ) required for
+	 * interpolating a chroma value at all required luma positions
+	 */
+	/* left-most chroma pixel needed */
+	int32 chroma_interp_point_left = 0;
+	/* right-most chroma pixel needed */
+	int32 chroma_interp_point_right = 0;
+	/* top-most chroma pixel needed */
+	int32 chroma_interp_point_top = 0;
+	/* bottom-most chroma pixel needed */
+	int32 chroma_interp_point_bottom = 0;
+
+	/*
+	 * a rectangular region within the chroma plane of the "image".
+	 * Chroma pixels falling inside of this rectangle belongs to the ROI
+	 */
+	int32 chroma_bound_left = 0;
+	int32 chroma_bound_right = 0;
+	int32 chroma_bound_top = 0;
+	int32 chroma_bound_bottom = 0;
+
+	/*
+	 * number of chroma pixels to replicate on the left, right,
+	 * top and bottom edge of the ROI.
+	 */
+	int32 chroma_repeat_left = 0;
+	int32 chroma_repeat_right = 0;
+	int32 chroma_repeat_top = 0;
+	int32 chroma_repeat_bottom = 0;
+
+	/*
+	 * number of luma pixels to replicate on the left, right,
+	 * top and bottom edge of the ROI.
+	 */
+	int32 luma_repeat_left = 0;
+	int32 luma_repeat_right = 0;
+	int32 luma_repeat_top = 0;
+	int32 luma_repeat_bottom = 0;
+
+	boolean chroma_edge_enable;
+
+	uint32 _is_scale_enabled = 0;
+	uint32 _is_yuv_offsite_vertical = 0;
+
+	/* fg edge duplicate */
+	reg = 0x0;
+
+	if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) {	/* if scaling enabled */
+
+		_is_scale_enabled = 1;
+
+		/*
+		 * if rotation mode involves a 90 deg rotation, flip
+		 * dst_roi_width with dst_roi_height.
+		 * Scaling ratios is based on source ROI dimensions, and
+		 * dst ROI dimensions before rotation.
+		 */
+		if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
+			dst_roi_width = iBuf->roi.dst_height;
+			dst_roi_height = iBuf->roi.dst_width;
+		} else {
+			dst_roi_width = iBuf->roi.dst_width;
+			dst_roi_height = iBuf->roi.dst_height;
+		}
+
+		/*
+		 * Find out the luma pixels needed for scaling in the
+		 * x direction (LEFT and RIGHT).  Locations of pixels are
+		 * relative to the ROI. Upper-left corner of ROI corresponds
+		 * to coordinates (0,0). Also set the number of luma pixel
+		 * to repeat.
+		 */
+		if (iBuf->roi.width > 3 * dst_roi_width) {
+			/* scale factor < 1/3 */
+			luma_interp_point_left = 0;
+			luma_interp_point_right = (iBuf->roi.width - 1);
+			luma_repeat_left = 0;
+			luma_repeat_right = 0;
+		} else if (iBuf->roi.width == 3 * dst_roi_width) {
+			/* scale factor == 1/3 */
+			luma_interp_point_left = 0;
+			luma_interp_point_right = (iBuf->roi.width - 1) + 1;
+			luma_repeat_left = 0;
+			luma_repeat_right = 1;
+		} else if ((iBuf->roi.width > dst_roi_width) &&
+			   (iBuf->roi.width < 3 * dst_roi_width)) {
+			/* 1/3 < scale factor < 1 */
+			luma_interp_point_left = -1;
+			luma_interp_point_right = (iBuf->roi.width - 1) + 1;
+			luma_repeat_left = 1;
+			luma_repeat_right = 1;
+		}
+
+		else if (iBuf->roi.width == dst_roi_width) {
+			/* scale factor == 1 */
+			luma_interp_point_left = -1;
+			luma_interp_point_right = (iBuf->roi.width - 1) + 2;
+			luma_repeat_left = 1;
+			luma_repeat_right = 2;
+		} else {	/* (iBuf->roi.width < dst_roi_width) */
+			  /* scale factor > 1 */
+			luma_interp_point_left = -2;
+			luma_interp_point_right = (iBuf->roi.width - 1) + 2;
+			luma_repeat_left = 2;
+			luma_repeat_right = 2;
+		}
+
+		/*
+		 * Find out the number of pixels needed for scaling in the
+		 * y direction (TOP and BOTTOM).  Locations of pixels are
+		 * relative to the ROI. Upper-left corner of ROI corresponds
+		 * to coordinates (0,0). Also set the number of luma pixel
+		 * to repeat.
+		 */
+		if (iBuf->roi.height > 3 * dst_roi_height) {
+			/* scale factor < 1/3 */
+			luma_interp_point_top = 0;
+			luma_interp_point_bottom = (iBuf->roi.height - 1);
+			luma_repeat_top = 0;
+			luma_repeat_bottom = 0;
+		} else if (iBuf->roi.height == 3 * dst_roi_height) {
+			/* scale factor == 1/3 */
+			luma_interp_point_top = 0;
+			luma_interp_point_bottom = (iBuf->roi.height - 1) + 1;
+			luma_repeat_top = 0;
+			luma_repeat_bottom = 1;
+		} else if ((iBuf->roi.height > dst_roi_height) &&
+			   (iBuf->roi.height < 3 * dst_roi_height)) {
+			/* 1/3 < scale factor < 1 */
+			luma_interp_point_top = -1;
+			luma_interp_point_bottom = (iBuf->roi.height - 1) + 1;
+			luma_repeat_top = 1;
+			luma_repeat_bottom = 1;
+		} else if (iBuf->roi.height == dst_roi_height) {
+			/* scale factor == 1 */
+			luma_interp_point_top = -1;
+			luma_interp_point_bottom = (iBuf->roi.height - 1) + 2;
+			luma_repeat_top = 1;
+			luma_repeat_bottom = 2;
+		} else {	/* (iBuf->roi.height < dst_roi_height) */
+			 /* scale factor > 1 */
+			luma_interp_point_top = -2;
+			luma_interp_point_bottom = (iBuf->roi.height - 1) + 2;
+			luma_repeat_top = 2;
+			luma_repeat_bottom = 2;
+		}
+	}			/* if (iBuf->scale.scale_flag) */
+	else {			/* scaling disabled */
+		/*
+		 * Since no scaling needed, Tile Fetch does not require any
+		 * more luma pixel than what the ROI contains.
+		 */
+		luma_interp_point_left = (int32) 0;
+		luma_interp_point_right = (int32) (iBuf->roi.width - 1);
+		luma_interp_point_top = (int32) 0;
+		luma_interp_point_bottom = (int32) (iBuf->roi.height - 1);
+
+		luma_repeat_left = 0;
+		luma_repeat_right = 0;
+		luma_repeat_top = 0;
+		luma_repeat_bottom = 0;
+	}
+
+	/* After adding the ROI offsets, we have locations of
+	 * luma_interp_points relative to the image.
+	 */
+	luma_interp_point_left += (int32) (iBuf->roi.x);
+	luma_interp_point_right += (int32) (iBuf->roi.x);
+	luma_interp_point_top += (int32) (iBuf->roi.y);
+	luma_interp_point_bottom += (int32) (iBuf->roi.y);
+
+	/*
+	 * After adding the ROI offsets, we have locations of
+	 * chroma_interp_points relative to the image.
+	 */
+	chroma_interp_point_left = luma_interp_point_left;
+	chroma_interp_point_right = luma_interp_point_right;
+	chroma_interp_point_top = luma_interp_point_top;
+	chroma_interp_point_bottom = luma_interp_point_bottom;
+
+	chroma_edge_enable = TRUE;
+	/* find out which chroma pixels are needed for chroma upsampling. */
+	switch (iBuf->mdpImg.imgType) {
+		/*
+		 * cosite in horizontal axis
+		 * fully sampled in vertical axis
+		 */
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		/* floor( luma_interp_point_left / 2 ); */
+		chroma_interp_point_left = luma_interp_point_left >> 1;
+		/* floor( ( luma_interp_point_right + 1 ) / 2 ); */
+		chroma_interp_point_right = (luma_interp_point_right + 1) >> 1;
+
+		chroma_interp_point_top = luma_interp_point_top;
+		chroma_interp_point_bottom = luma_interp_point_bottom;
+		break;
+
+		/*
+		 * cosite in horizontal axis
+		 * offsite in vertical axis
+		 */
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		/* floor( luma_interp_point_left / 2) */
+		chroma_interp_point_left = luma_interp_point_left >> 1;
+
+		/* floor( ( luma_interp_point_right + 1 )/ 2 ) */
+		chroma_interp_point_right = (luma_interp_point_right + 1) >> 1;
+
+		/* floor( (luma_interp_point_top - 1 ) / 2 ) */
+		chroma_interp_point_top = (luma_interp_point_top - 1) >> 1;
+
+		/* floor( ( luma_interp_point_bottom + 1 ) / 2 ) */
+		chroma_interp_point_bottom =
+		    (luma_interp_point_bottom + 1) >> 1;
+
+		_is_yuv_offsite_vertical = 1;
+		break;
+
+	default:
+		chroma_edge_enable = FALSE;
+		chroma_interp_point_left = luma_interp_point_left;
+		chroma_interp_point_right = luma_interp_point_right;
+		chroma_interp_point_top = luma_interp_point_top;
+		chroma_interp_point_bottom = luma_interp_point_bottom;
+
+		break;
+	}
+
+	/* only if the image type is in YUV domain, we calculate chroma edge */
+	if (chroma_edge_enable) {
+		/* Defines which chroma pixels belongs to the roi */
+		switch (iBuf->mdpImg.imgType) {
+			/*
+			 * Cosite in horizontal direction, and fully sampled
+			 * in vertical direction.
+			 */
+		case MDP_Y_CBCR_H2V1:
+		case MDP_Y_CRCB_H2V1:
+		case MDP_YCRYCB_H2V1:
+			/*
+			 * width of chroma ROI is 1/2 of size of luma ROI
+			 * height of chroma ROI same as size of luma ROI
+			 */
+			chroma_bound_left = iBuf->roi.x / 2;
+
+			/* there are half as many chroma pixel as luma pixels */
+			chroma_bound_right =
+			    (iBuf->roi.width + iBuf->roi.x - 1) / 2;
+			chroma_bound_top = iBuf->roi.y;
+			chroma_bound_bottom =
+			    (iBuf->roi.height + iBuf->roi.y - 1);
+			break;
+
+		case MDP_Y_CBCR_H2V2:
+		case MDP_Y_CRCB_H2V2:
+			/*
+			 * cosite in horizontal dir, and offsite in vertical dir
+			 * width of chroma ROI is 1/2 of size of luma ROI
+			 * height of chroma ROI is 1/2 of size of luma ROI
+			 */
+
+			chroma_bound_left = iBuf->roi.x / 2;
+			chroma_bound_right =
+			    (iBuf->roi.width + iBuf->roi.x - 1) / 2;
+			chroma_bound_top = iBuf->roi.y / 2;
+			chroma_bound_bottom =
+			    (iBuf->roi.height + iBuf->roi.y - 1) / 2;
+			break;
+
+		default:
+			/*
+			 * If no valid chroma sub-sampling format specified,
+			 * assume 4:4:4 ( i.e. fully sampled).  Set ROI
+			 * boundaries for chroma same as ROI boundaries for
+			 * luma.
+			 */
+			chroma_bound_left = iBuf->roi.x;
+			chroma_bound_right = iBuf->roi.width + iBuf->roi.x - 1;
+			chroma_bound_top = iBuf->roi.y;
+			chroma_bound_bottom =
+			    (iBuf->roi.height + iBuf->roi.y - 1);
+			break;
+		}
+
+		/*
+		 * Knowing which chroma pixels are needed, and which chroma
+		 * pixels belong to the ROI (i.e. available for fetching ),
+		 * calculate how many chroma pixels Tile Fetch needs to
+		 * duplicate.  If any required chroma pixels falls outside
+		 * of the ROI, Tile Fetch must obtain them by replicating
+		 * pixels.
+		 */
+		if (chroma_bound_left > chroma_interp_point_left)
+			chroma_repeat_left =
+			    chroma_bound_left - chroma_interp_point_left;
+		else
+			chroma_repeat_left = 0;
+
+		if (chroma_interp_point_right > chroma_bound_right)
+			chroma_repeat_right =
+			    chroma_interp_point_right - chroma_bound_right;
+		else
+			chroma_repeat_right = 0;
+
+		if (chroma_bound_top > chroma_interp_point_top)
+			chroma_repeat_top =
+			    chroma_bound_top - chroma_interp_point_top;
+		else
+			chroma_repeat_top = 0;
+
+		if (chroma_interp_point_bottom > chroma_bound_bottom)
+			chroma_repeat_bottom =
+			    chroma_interp_point_bottom - chroma_bound_bottom;
+		else
+			chroma_repeat_bottom = 0;
+
+		if (_is_scale_enabled && (iBuf->roi.height == 1)
+		    && _is_yuv_offsite_vertical) {
+			chroma_repeat_bottom = 3;
+			chroma_repeat_top = 0;
+		}
+	}
+	/* make sure chroma repeats are non-negative */
+	if ((chroma_repeat_left < 0) || (chroma_repeat_right < 0) ||
+	    (chroma_repeat_top < 0) || (chroma_repeat_bottom < 0))
+		return -1;
+
+	/* make sure chroma repeats are no larger than 3 pixels */
+	if ((chroma_repeat_left > 3) || (chroma_repeat_right > 3) ||
+	    (chroma_repeat_top > 3) || (chroma_repeat_bottom > 3))
+		return -1;
+
+	/* make sure luma repeats are non-negative */
+	if ((luma_repeat_left < 0) || (luma_repeat_right < 0) ||
+	    (luma_repeat_top < 0) || (luma_repeat_bottom < 0))
+		return -1;
+
+	/* make sure luma repeats are no larger than 3 pixels */
+	if ((luma_repeat_left > 3) || (luma_repeat_right > 3) ||
+	    (luma_repeat_top > 3) || (luma_repeat_bottom > 3))
+		return -1;
+
+	/* write chroma_repeat_left to register */
+	reg |= (chroma_repeat_left & 3) << MDP_LEFT_CHROMA;
+
+	/* write chroma_repeat_right to register */
+	reg |= (chroma_repeat_right & 3) << MDP_RIGHT_CHROMA;
+
+	/* write chroma_repeat_top to register */
+	reg |= (chroma_repeat_top & 3) << MDP_TOP_CHROMA;
+
+	/* write chroma_repeat_bottom to register */
+	reg |= (chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA;
+
+	/* write luma_repeat_left to register */
+	reg |= (luma_repeat_left & 3) << MDP_LEFT_LUMA;
+
+	/* write luma_repeat_right to register */
+	reg |= (luma_repeat_right & 3) << MDP_RIGHT_LUMA;
+
+	/* write luma_repeat_top to register */
+	reg |= (luma_repeat_top & 3) << MDP_TOP_LUMA;
+
+	/* write luma_repeat_bottom to register */
+	reg |= (luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA;
+
+	/* done with reg */
+	*dup = reg;
+
+	/* bg edge duplicate */
+	reg = 0x0;
+
+	switch (iBuf->ibuf_type) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		/*
+		 * Edge condition for MDP_Y_CRCB/CBCR_H2V2 cosite only.
+		 * For 420 cosite, 1 chroma replicated on all sides except
+		 * left, so reg 101b8 should be 0x0209. For 420 offsite,
+		 * 1 chroma replicated all sides.
+		 */
+		if (iBuf->roi.lcd_y == 0) {
+			reg |= BIT(MDP_TOP_CHROMA);
+		}
+
+		if ((iBuf->roi.lcd_y + iBuf->roi.dst_height) ==
+		    iBuf->ibuf_height) {
+			reg |= BIT(MDP_BOTTOM_CHROMA);
+		}
+
+		if (((iBuf->roi.lcd_x + iBuf->roi.dst_width) ==
+		     iBuf->ibuf_width) && ((iBuf->roi.dst_width % 2) == 0)) {
+			reg |= BIT(MDP_RIGHT_CHROMA);
+		}
+
+		break;
+
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		if (((iBuf->roi.lcd_x + iBuf->roi.dst_width) ==
+		     iBuf->ibuf_width) && ((iBuf->roi.dst_width % 2) == 0)) {
+			reg |= BIT(MDP_RIGHT_CHROMA);
+		}
+		break;
+	default:
+		break;
+	}
+
+	*dup2 = reg;
+
+	return 0;
+}
+
+#define ADJUST_IP		/* for 1/3 scale factor fix */
+
+static int mdp_calc_scale_params(
+/* ROI origin coordinate for the dimension */
+					uint32 org,
+/* src ROI dimension */
+					uint32 dim_in,
+/* scaled ROI dimension*/
+					uint32 dim_out,
+/* is this ROI width dimension? */
+					boolean is_W,
+/* initial phase location address */
+					int32 *phase_init_ptr,
+/* phase increment location address */
+					uint32 *phase_step_ptr,
+/* ROI start over-fetch location address */
+					uint32 *num_repl_beg_ptr,
+/* ROI end over-fetch location address */
+					uint32 *num_repl_end_ptr)
+{
+	boolean rpa_on = FALSE;
+	int init_phase = 0;
+	uint32 beg_of = 0;
+	uint32 end_of = 0;
+	uint64 numer = 0;
+	uint64 denom = 0;
+	/*uint64 inverter = 1; */
+	int64 point5 = 1;
+	int64 one = 1;
+	int64 k1, k2, k3, k4;	/* linear equation coefficients */
+	uint64 int_mask;
+	uint64 fract_mask;
+	uint64 Os;
+	int64 Osprime;
+	int64 Od;
+	int64 Odprime;
+	int64 Oreq;
+	uint64 Es;
+	uint64 Ed;
+	uint64 Ereq;
+#ifdef ADJUST_IP
+	int64 IP64;
+	int64 delta;
+#endif
+	uint32 mult;
+
+	/*
+	 * The phase accumulator should really be rational for all cases in a
+	 * general purpose polyphase scaler for a tiled architecture with
+	 * non-zero * origin capability because there is no way to represent
+	 * certain scale factors in fixed point regardless of precision.
+	 * The error incurred in attempting to use fixed point is most
+	 * eggregious for SF where 1/SF is an integral multiple of 1/3.
+	 *
+	 * However, since the MDP2 has already been committed to HW, we
+	 * only use the rational phase accumulator (RPA) when 1/SF is an
+	 * integral multiple of 1/3.  This will help minimize regressions in
+	 * matching the HW to the C-Sim.
+	 */
+	/*
+	 * Set the RPA flag for this dimension.
+	 *
+	 * In order for 1/SF (dim_in/dim_out) to be an integral multiple of
+	 * 1/3, dim_out must be an integral multiple of 3.
+	 */
+	if (!(dim_out % 3)) {
+		mult = dim_out / 3;
+		rpa_on = (!(dim_in % mult));
+	}
+
+	numer = dim_out;
+	denom = dim_in;
+
+	/*
+	 * convert to U30.34 before division
+	 *
+	 * The K vectors carry 4 extra bits of precision
+	 * and are rounded.
+	 *
+	 * We initially go 5 bits over then round by adding
+	 * 1 and right shifting by 1
+	 * so final result is U31.33
+	 */
+	numer <<= PQF_PLUS_5;
+
+	/* now calculate the scale factor (aka k3) */
+	k3 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+	/* check scale factor for legal range [0.25 - 4.0] */
+	if (((k3 >> 4) < (1LL << PQF_MINUS_2)) ||
+	    ((k3 >> 4) > (1LL << PQF_PLUS_2))) {
+		return -1;
+	}
+
+	/* calculate inverse scale factor (aka k1) for phase init */
+	numer = dim_in;
+	denom = dim_out;
+	numer <<= PQF_PLUS_5;
+	k1 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+	/*
+	 * calculate initial phase and ROI overfetch
+	 */
+	/* convert point5 & one to S39.24 (will always be positive) */
+	point5 <<= (PQF_PLUS_4 - 1);
+	one <<= PQF_PLUS_4;
+	k2 = ((k1 - one) >> 1);
+	init_phase = (int)(k2 >> 4);
+	k4 = ((k3 - one) >> 1);
+	if (k3 == one) {
+		/* the simple case; SF = 1.0 */
+		beg_of = 1;
+		end_of = 2;
+	} else {
+		/* calculate the masks */
+		fract_mask = one - 1;
+		int_mask = ~fract_mask;
+
+		if (!rpa_on) {
+			/*
+			 * FIXED POINT IMPLEMENTATION
+			 */
+			if (!org) {
+				/* A fairly simple case; ROI origin = 0 */
+				if (k1 < one) {
+					/* upscaling */
+					beg_of = end_of = 2;
+				}
+				/* 0.33 <= SF < 1.0 */
+				else if (k1 < (3LL << PQF_PLUS_4))
+					beg_of = end_of = 1;
+				/* 0.33 == SF */
+				else if (k1 == (3LL << PQF_PLUS_4)) {
+					beg_of = 0;
+					end_of = 1;
+				}
+				/* 0.25 <= SF < 0.33 */
+				else
+					beg_of = end_of = 0;
+			} else {
+				/*
+				 * The complicated case; ROI origin != 0
+				 * init_phase needs to be adjusted
+				 * OF is also position dependent
+				 */
+
+				/* map (org - .5) into destination space */
+				Os = ((uint64) org << 1) - 1;
+				Od = ((k3 * Os) >> 1) + k4;
+
+				/* take the ceiling */
+				Odprime = (Od & int_mask);
+				if (Odprime != Od)
+					Odprime += one;
+
+				/* now map that back to source space */
+				Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2;
+
+				/* then floor & decrement to calculate the required
+				   starting coordinate */
+				Oreq = (Osprime & int_mask) - one;
+
+				/* calculate end coord in destination space then map to
+				   source space */
+				Ed = Odprime +
+				    ((uint64) dim_out << PQF_PLUS_4) - one;
+				Es = (k1 * (Ed >> PQF_PLUS_4)) + k2;
+
+				/* now floor & increment by 2 to calculate the required
+				   ending coordinate */
+				Ereq = (Es & int_mask) + (one << 1);
+
+				/* calculate initial phase */
+#ifdef ADJUST_IP
+
+				IP64 = Osprime - Oreq;
+				delta = ((int64) (org) << PQF_PLUS_4) - Oreq;
+				IP64 -= delta;
+
+				/* limit to valid range before the left shift */
+				delta = (IP64 & (1LL << 63)) ? 4 : -4;
+				delta <<= PQF_PLUS_4;
+				while (abs((int)(IP64 >> PQF_PLUS_4)) > 4)
+					IP64 += delta;
+
+				/* right shift to account for extra bits of precision */
+				init_phase = (int)(IP64 >> 4);
+
+#else /* ADJUST_IP */
+
+				/* just calculate the real initial phase */
+				init_phase = (int)((Osprime - Oreq) >> 4);
+
+#endif /* ADJUST_IP */
+
+				/* calculate the overfetch */
+				beg_of = org - (uint32) (Oreq >> PQF_PLUS_4);
+				end_of =
+				    (uint32) (Ereq >> PQF_PLUS_4) - (org +
+								     dim_in -
+								     1);
+			}
+		} else {
+			/*
+			 * RPA IMPLEMENTATION
+			 *
+			 * init_phase needs to be calculated in all RPA_on cases
+			 * because it's a numerator, not a fixed point value.
+			 */
+
+			/* map (org - .5) into destination space */
+			Os = ((uint64) org << PQF_PLUS_4) - point5;
+			Od = mdp_do_div((dim_out * (Os + point5)),
+					dim_in) - point5;
+
+			/* take the ceiling */
+			Odprime = (Od & int_mask);
+			if (Odprime != Od)
+				Odprime += one;
+
+			/* now map that back to source space */
+			Osprime =
+			    mdp_do_div((dim_in * (Odprime + point5)),
+				       dim_out) - point5;
+
+			/* then floor & decrement to calculate the required
+			   starting coordinate */
+			Oreq = (Osprime & int_mask) - one;
+
+			/* calculate end coord in destination space then map to
+			   source space */
+			Ed = Odprime + ((uint64) dim_out << PQF_PLUS_4) - one;
+			Es = mdp_do_div((dim_in * (Ed + point5)),
+					dim_out) - point5;
+
+			/* now floor & increment by 2 to calculate the required
+			   ending coordinate */
+			Ereq = (Es & int_mask) + (one << 1);
+
+			/* calculate initial phase */
+
+#ifdef ADJUST_IP
+
+			IP64 = Osprime - Oreq;
+			delta = ((int64) (org) << PQF_PLUS_4) - Oreq;
+			IP64 -= delta;
+
+			/* limit to valid range before the left shift */
+			delta = (IP64 & (1LL << 63)) ? 4 : -4;
+			delta <<= PQF_PLUS_4;
+			while (abs((int)(IP64 >> PQF_PLUS_4)) > 4)
+				IP64 += delta;
+
+			/* right shift to account for extra bits of precision */
+			init_phase = (int)(IP64 >> 4);
+
+#else /* ADJUST_IP */
+
+			/* just calculate the real initial phase */
+			init_phase = (int)((Osprime - Oreq) >> 4);
+
+#endif /* ADJUST_IP */
+
+			/* calculate the overfetch */
+			beg_of = org - (uint32) (Oreq >> PQF_PLUS_4);
+			end_of =
+			    (uint32) (Ereq >> PQF_PLUS_4) - (org + dim_in - 1);
+		}
+	}
+
+	/* return the scale parameters */
+	*phase_init_ptr = init_phase;
+	*phase_step_ptr = (uint32) (k1 >> 4);
+	*num_repl_beg_ptr = beg_of;
+	*num_repl_end_ptr = end_of;
+
+	return 0;
+}
+
+static uint8 *mdp_adjust_rot_addr(MDPIBUF *iBuf, uint8 *addr, uint32 uv)
+{
+	uint32 dest_ystride = iBuf->ibuf_width * iBuf->bpp;
+	uint32 h_slice = 1;
+
+	if (uv && ((iBuf->ibuf_type == MDP_Y_CBCR_H2V2) ||
+		(iBuf->ibuf_type == MDP_Y_CRCB_H2V2)))
+		h_slice = 2;
+
+	if (MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_ROT90) ^
+	    MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_LR)) {
+		addr =
+		    addr + (iBuf->roi.dst_width -
+			    MIN(16, iBuf->roi.dst_width)) * iBuf->bpp;
+	}
+	if (MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_UD)) {
+		addr =
+		    addr + ((iBuf->roi.dst_height -
+			MIN(16, iBuf->roi.dst_height))/h_slice) * dest_ystride;
+	}
+
+	return addr;
+}
+
+void mdp_set_scale(MDPIBUF *iBuf,
+		   uint32 dst_roi_width,
+		   uint32 dst_roi_height,
+		   boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr)
+{
+	uint32 dst_roi_width_scale;
+	uint32 dst_roi_height_scale;
+	boolean use_pr;
+	uint32 phasex_step = 0;
+	uint32 phasey_step = 0;
+	int32 phasex_init = 0;
+	int32 phasey_init = 0;
+	uint32 lines_dup = 0;
+	uint32 lines_dup_bg = 0;
+	uint32 dummy;
+	uint32 mdp_blur = 0;
+
+	if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) {
+		if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
+			dst_roi_width_scale = dst_roi_height;
+			dst_roi_height_scale = dst_roi_width;
+		} else {
+			dst_roi_width_scale = dst_roi_width;
+			dst_roi_height_scale = dst_roi_height;
+		}
+
+		mdp_blur = iBuf->mdpImg.mdpOp & MDPOP_BLUR;
+
+		if ((dst_roi_width_scale != iBuf->roi.width) ||
+		    (dst_roi_height_scale != iBuf->roi.height) ||
+			mdp_blur) {
+			*pppop_reg_ptr |=
+			    (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+
+		/* let's use SHIM logic to calculate the partial ROI scaling */
+#if 0
+			phasex_step =
+			    (uint32) mdp_do_div(0x20000000 * iBuf->roi.width,
+						dst_roi_width_scale);
+			phasey_step =
+			    (uint32) mdp_do_div(0x20000000 * iBuf->roi.height,
+						dst_roi_height_scale);
+
+/*
+    phasex_step= ((long long) iBuf->roi.width * 0x20000000)/dst_roi_width_scale;
+    phasey_step= ((long long)iBuf->roi.height * 0x20000000)/dst_roi_height_scale;
+*/
+
+			phasex_init =
+			    (((long long)phasex_step - 0x20000000) >> 1);
+			phasey_init =
+			    (((long long)phasey_step - 0x20000000) >> 1);
+
+#else
+			mdp_calc_scale_params(iBuf->roi.x, iBuf->roi.width,
+					      dst_roi_width_scale, 1,
+					      &phasex_init, &phasex_step,
+					      &dummy, &dummy);
+			mdp_calc_scale_params(iBuf->roi.y, iBuf->roi.height,
+					      dst_roi_height_scale, 0,
+					      &phasey_init, &phasey_step,
+					      &dummy, &dummy);
+#endif
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x013c,
+				 phasex_init);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0140,
+				 phasey_init);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0144,
+				 phasex_step);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0148,
+				 phasey_step);
+
+			/* disable the pixel repeat option for scaling */
+			use_pr = false;
+
+			if ((dst_roi_width_scale > iBuf->roi.width) ||
+			    (dst_roi_height_scale > iBuf->roi.height)) {
+				if ((use_pr)
+				    && (mdp_curr_up_scale_xy !=
+					MDP_PR_SCALE_UP)) {
+					mdp_load_pr_upscale_table();
+					mdp_curr_up_scale_xy = MDP_PR_SCALE_UP;
+				} else if ((!use_pr)
+					   && (mdp_curr_up_scale_xy !=
+					       MDP_BC_SCALE_UP)) {
+					mdp_load_bc_upscale_table();
+					mdp_curr_up_scale_xy = MDP_BC_SCALE_UP;
+				}
+			}
+
+			if (mdp_blur) {
+				load_scale_table(mdp_gaussian_blur_table,
+					ARRAY_SIZE(mdp_gaussian_blur_table));
+				mdp_curr_down_scale_x = MDP_SCALE_BLUR;
+				mdp_curr_down_scale_y = MDP_SCALE_BLUR;
+			}
+
+			/* 0.2 < x <= 1 scaling factor */
+			if ((dst_roi_width_scale <= iBuf->roi.width) &&
+				!mdp_blur) {
+				if (((dst_roi_width_scale * 10) /
+				     iBuf->roi.width) > 8) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_x !=
+						MDP_PR_SCALE_POINT8_1)) {
+						mdp_load_pr_downscale_table_x_point8TO1
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_PR_SCALE_POINT8_1;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_x !=
+						       MDP_BC_SCALE_POINT8_1)) {
+						mdp_load_bc_downscale_table_x_point8TO1
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_BC_SCALE_POINT8_1;
+					}
+				} else
+				    if (((dst_roi_width_scale * 10) /
+					 iBuf->roi.width) > 6) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_x !=
+						MDP_PR_SCALE_POINT6_POINT8)) {
+						mdp_load_pr_downscale_table_x_point6TOpoint8
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_PR_SCALE_POINT6_POINT8;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_x !=
+						       MDP_BC_SCALE_POINT6_POINT8))
+					{
+						mdp_load_bc_downscale_table_x_point6TOpoint8
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_BC_SCALE_POINT6_POINT8;
+					}
+				} else
+				    if (((dst_roi_width_scale * 10) /
+					 iBuf->roi.width) > 4) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_x !=
+						MDP_PR_SCALE_POINT4_POINT6)) {
+						mdp_load_pr_downscale_table_x_point4TOpoint6
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_PR_SCALE_POINT4_POINT6;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_x !=
+						       MDP_BC_SCALE_POINT4_POINT6))
+					{
+						mdp_load_bc_downscale_table_x_point4TOpoint6
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_BC_SCALE_POINT4_POINT6;
+					}
+				} else {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_x !=
+						MDP_PR_SCALE_POINT2_POINT4)) {
+						mdp_load_pr_downscale_table_x_point2TOpoint4
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_PR_SCALE_POINT2_POINT4;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_x !=
+						       MDP_BC_SCALE_POINT2_POINT4))
+					{
+						mdp_load_bc_downscale_table_x_point2TOpoint4
+						    ();
+						mdp_curr_down_scale_x =
+						    MDP_BC_SCALE_POINT2_POINT4;
+					}
+				}
+			}
+			/* 0.2 < y <= 1 scaling factor */
+			if ((dst_roi_height_scale <= iBuf->roi.height) &&
+				!mdp_blur) {
+				if (((dst_roi_height_scale * 10) /
+				     iBuf->roi.height) > 8) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_y !=
+						MDP_PR_SCALE_POINT8_1)) {
+						mdp_load_pr_downscale_table_y_point8TO1
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_PR_SCALE_POINT8_1;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_y !=
+						       MDP_BC_SCALE_POINT8_1)) {
+						mdp_load_bc_downscale_table_y_point8TO1
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_BC_SCALE_POINT8_1;
+					}
+				} else
+				    if (((dst_roi_height_scale * 10) /
+					 iBuf->roi.height) > 6) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_y !=
+						MDP_PR_SCALE_POINT6_POINT8)) {
+						mdp_load_pr_downscale_table_y_point6TOpoint8
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_PR_SCALE_POINT6_POINT8;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_y !=
+						       MDP_BC_SCALE_POINT6_POINT8))
+					{
+						mdp_load_bc_downscale_table_y_point6TOpoint8
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_BC_SCALE_POINT6_POINT8;
+					}
+				} else
+				    if (((dst_roi_height_scale * 10) /
+					 iBuf->roi.height) > 4) {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_y !=
+						MDP_PR_SCALE_POINT4_POINT6)) {
+						mdp_load_pr_downscale_table_y_point4TOpoint6
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_PR_SCALE_POINT4_POINT6;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_y !=
+						       MDP_BC_SCALE_POINT4_POINT6))
+					{
+						mdp_load_bc_downscale_table_y_point4TOpoint6
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_BC_SCALE_POINT4_POINT6;
+					}
+				} else {
+					if ((use_pr)
+					    && (mdp_curr_down_scale_y !=
+						MDP_PR_SCALE_POINT2_POINT4)) {
+						mdp_load_pr_downscale_table_y_point2TOpoint4
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_PR_SCALE_POINT2_POINT4;
+					} else if ((!use_pr)
+						   && (mdp_curr_down_scale_y !=
+						       MDP_BC_SCALE_POINT2_POINT4))
+					{
+						mdp_load_bc_downscale_table_y_point2TOpoint4
+						    ();
+						mdp_curr_down_scale_y =
+						    MDP_BC_SCALE_POINT2_POINT4;
+					}
+				}
+			}
+		} else {
+			iBuf->mdpImg.mdpOp &= ~(MDPOP_ASCALE);
+		}
+	}
+	/* setting edge condition here after scaling check */
+	if (mdp_get_edge_cond(iBuf, &lines_dup, &lines_dup_bg))
+		printk(KERN_ERR "msm_fb: mdp_get_edge_cond() error!\n");
+
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01b8, lines_dup);
+	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01bc, lines_dup_bg);
+}
+
+void mdp_init_scale_table(void)
+{
+	mdp_curr_up_scale_xy = MDP_INIT_SCALE;
+	mdp_curr_down_scale_x = MDP_INIT_SCALE;
+	mdp_curr_down_scale_y = MDP_INIT_SCALE;
+}
+
+void mdp_adjust_start_addr(uint8 **src0,
+			   uint8 **src1,
+			   int v_slice,
+			   int h_slice,
+			   int x,
+			   int y,
+			   uint32 width,
+			   uint32 height, int bpp, MDPIBUF *iBuf, int layer)
+{
+	*src0 += (x + y * width) * bpp;
+
+	/* if it's dest/bg buffer, we need to adjust it for rotation */
+	if (layer != 0)
+		*src0 = mdp_adjust_rot_addr(iBuf, *src0, 0);
+
+	if (*src1) {
+		/*
+		 * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
+		 * we need to shift x direction same as y dir for offsite
+		 */
+		*src1 +=
+		    ((x / h_slice) * h_slice +
+		     ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
+
+		/* if it's dest/bg buffer, we need to adjust it for rotation */
+		if (layer != 0)
+			*src1 = mdp_adjust_rot_addr(iBuf, *src1, 1);
+	}
+}
+
+void mdp_set_blend_attr(MDPIBUF *iBuf,
+			uint32 *alpha,
+			uint32 *tpVal,
+			uint32 perPixelAlpha, uint32 *pppop_reg_ptr)
+{
+	if (perPixelAlpha) {
+		*pppop_reg_ptr |= PPP_OP_ROT_ON |
+		    PPP_OP_BLEND_ON | PPP_OP_BLEND_SRCPIXEL_ALPHA;
+	} else {
+		if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+		    && (iBuf->mdpImg.alpha == 0xff)) {
+			iBuf->mdpImg.mdpOp &= ~(MDPOP_ALPHAB);
+		}
+
+		if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+		    && (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)) {
+			*pppop_reg_ptr |=
+			    PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+			    PPP_OP_BLEND_CONSTANT_ALPHA |
+			    PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
+			    PPP_BLEND_CALPHA_TRNASP;
+
+			*alpha = iBuf->mdpImg.alpha;
+			*tpVal = iBuf->mdpImg.tpVal;
+		} else {
+			if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP) {
+				*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				    PPP_OP_BLEND_ON |
+				    PPP_OP_BLEND_SRCPIXEL_TRANSP;
+				*tpVal = iBuf->mdpImg.tpVal;
+			} else if (iBuf->mdpImg.mdpOp & MDPOP_ALPHAB) {
+				*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				    PPP_OP_BLEND_ON |
+				    PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
+				    PPP_OP_BLEND_CONSTANT_ALPHA;
+				*alpha = iBuf->mdpImg.alpha;
+			}
+		}
+	}
+}
diff --git a/drivers/video/msm/mdp_ppp_v31.c b/drivers/video/msm/mdp_ppp_v31.c
new file mode 100644
index 0000000..ee6af53
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp_v31.c
@@ -0,0 +1,844 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include "linux/proc_fs.h"
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <asm/div64.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+
+#define MDP_SCALE_COEFF_NUM      32
+#define MDP_SCALE_0P2_TO_0P4_INDEX 0
+#define MDP_SCALE_0P4_TO_0P6_INDEX 32
+#define MDP_SCALE_0P6_TO_0P8_INDEX 64
+#define MDP_SCALE_0P8_TO_8P0_INDEX 96
+#define MDP_SCALE_COEFF_MASK 0x3ff
+
+#define MDP_SCALE_PR  0
+#define MDP_SCALE_FIR 1
+
+static uint32 mdp_scale_0p8_to_8p0_mode;
+static uint32 mdp_scale_0p6_to_0p8_mode;
+static uint32 mdp_scale_0p4_to_0p6_mode;
+static uint32 mdp_scale_0p2_to_0p4_mode;
+
+/* -------- All scaling range, "pixel repeat" -------- */
+static int16 mdp_scale_pixel_repeat_C0[MDP_SCALE_COEFF_NUM] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int16 mdp_scale_pixel_repeat_C1[MDP_SCALE_COEFF_NUM] = {
+	511, 511, 511, 511, 511, 511, 511, 511,
+	511, 511, 511, 511, 511, 511, 511, 511,
+	511, 511, 511, 511, 511, 511, 511, 511,
+	511, 511, 511, 511, 511, 511, 511, 511
+};
+
+static int16 mdp_scale_pixel_repeat_C2[MDP_SCALE_COEFF_NUM] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int16 mdp_scale_pixel_repeat_C3[MDP_SCALE_COEFF_NUM] = {
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/* --------------------------- FIR ------------------------------------- */
+/* -------- Downscale, ranging from 0.8x to 8.0x of original size -------- */
+
+static int16 mdp_scale_0p8_to_8p0_C0[MDP_SCALE_COEFF_NUM] = {
+	0, -7, -13, -19, -24, -28, -32, -34, -37, -39,
+	-40, -41, -41, -41, -40, -40, -38, -37, -35, -33,
+	-31, -29, -26, -24, -21, -18, -15, -13, -10, -7,
+	-5, -2
+};
+
+static int16 mdp_scale_0p8_to_8p0_C1[MDP_SCALE_COEFF_NUM] = {
+	511, 507, 501, 494, 485, 475, 463, 450, 436, 422,
+	405, 388, 370, 352, 333, 314, 293, 274, 253, 233,
+	213, 193, 172, 152, 133, 113, 95, 77, 60, 43,
+	28, 13
+};
+
+static int16 mdp_scale_0p8_to_8p0_C2[MDP_SCALE_COEFF_NUM] = {
+	0, 13, 28, 43, 60, 77, 95, 113, 133, 152,
+	172, 193, 213, 233, 253, 274, 294, 314, 333, 352,
+	370, 388, 405, 422, 436, 450, 463, 475, 485, 494,
+	501, 507,
+};
+
+static int16 mdp_scale_0p8_to_8p0_C3[MDP_SCALE_COEFF_NUM] = {
+	0, -2, -5, -7, -10, -13, -15, -18, -21, -24,
+	-26, -29, -31, -33, -35, -37, -38, -40, -40, -41,
+	-41, -41, -40, -39, -37, -34, -32, -28, -24, -19,
+	-13, -7
+};
+
+/* -------- Downscale, ranging from 0.6x to 0.8x of original size -------- */
+
+static int16 mdp_scale_0p6_to_0p8_C0[MDP_SCALE_COEFF_NUM] = {
+	104, 96, 89, 82, 75, 68, 61, 55, 49, 43,
+	38, 33, 28, 24, 20, 16, 12, 9, 6, 4,
+	2, 0, -2, -4, -5, -6, -7, -7, -8, -8,
+	-8, -8
+};
+
+static int16 mdp_scale_0p6_to_0p8_C1[MDP_SCALE_COEFF_NUM] = {
+	303, 303, 302, 300, 298, 296, 293, 289, 286, 281,
+	276, 270, 265, 258, 252, 245, 238, 230, 223, 214,
+	206, 197, 189, 180, 172, 163, 154, 145, 137, 128,
+	120, 112
+};
+
+static int16 mdp_scale_0p6_to_0p8_C2[MDP_SCALE_COEFF_NUM] = {
+	112, 120, 128, 137, 145, 154, 163, 172, 180, 189,
+	197, 206, 214, 223, 230, 238, 245, 252, 258, 265,
+	270, 276, 281, 286, 289, 293, 296, 298, 300, 302,
+	303, 303
+};
+
+static int16 mdp_scale_0p6_to_0p8_C3[MDP_SCALE_COEFF_NUM] = {
+	-8, -8, -8, -8, -7, -7, -6, -5, -4, -2,
+	0, 2, 4, 6, 9, 12, 16, 20, 24, 28,
+	33, 38, 43, 49, 55, 61, 68, 75, 82, 89,
+	96, 104
+};
+
+/* -------- Downscale, ranging from 0.4x to 0.6x of original size -------- */
+
+static int16 mdp_scale_0p4_to_0p6_C0[MDP_SCALE_COEFF_NUM] = {
+	136, 132, 128, 123, 119, 115, 111, 107, 103, 98,
+	95, 91, 87, 84, 80, 76, 73, 69, 66, 62,
+	59, 57, 54, 50, 47, 44, 41, 39, 36, 33,
+	32, 29
+};
+
+static int16 mdp_scale_0p4_to_0p6_C1[MDP_SCALE_COEFF_NUM] = {
+	206, 205, 204, 204, 201, 200, 199, 197, 196, 194,
+	191, 191, 189, 185, 184, 182, 180, 178, 176, 173,
+	170, 168, 165, 162, 160, 157, 155, 152, 148, 146,
+	142, 140
+};
+
+static int16 mdp_scale_0p4_to_0p6_C2[MDP_SCALE_COEFF_NUM] = {
+	140, 142, 146, 148, 152, 155, 157, 160, 162, 165,
+	168, 170, 173, 176, 178, 180, 182, 184, 185, 189,
+	191, 191, 194, 196, 197, 199, 200, 201, 204, 204,
+	205, 206
+};
+
+static int16 mdp_scale_0p4_to_0p6_C3[MDP_SCALE_COEFF_NUM] = {
+	29, 32, 33, 36, 39, 41, 44, 47, 50, 54,
+	57, 59, 62, 66, 69, 73, 76, 80, 84, 87,
+	91, 95, 98, 103, 107, 111, 115, 119, 123, 128,
+	132, 136
+};
+
+/* -------- Downscale, ranging from 0.2x to 0.4x of original size -------- */
+
+static int16 mdp_scale_0p2_to_0p4_C0[MDP_SCALE_COEFF_NUM] = {
+	131, 131, 130, 129, 128, 127, 127, 126, 125, 125,
+	124, 123, 123, 121, 120, 119, 119, 118, 117, 117,
+	116, 115, 115, 114, 113, 112, 111, 110, 109, 109,
+	108, 107
+};
+
+static int16 mdp_scale_0p2_to_0p4_C1[MDP_SCALE_COEFF_NUM] = {
+	141, 140, 140, 140, 140, 139, 138, 138, 138, 137,
+	137, 137, 136, 137, 137, 137, 136, 136, 136, 135,
+	135, 135, 134, 134, 134, 134, 134, 133, 133, 132,
+	132, 132
+};
+
+static int16 mdp_scale_0p2_to_0p4_C2[MDP_SCALE_COEFF_NUM] = {
+	132, 132, 132, 133, 133, 134, 134, 134, 134, 134,
+	135, 135, 135, 136, 136, 136, 137, 137, 137, 136,
+	137, 137, 137, 138, 138, 138, 139, 140, 140, 140,
+	140, 141
+};
+
+static int16 mdp_scale_0p2_to_0p4_C3[MDP_SCALE_COEFF_NUM] = {
+	107, 108, 109, 109, 110, 111, 112, 113, 114, 115,
+	115, 116, 117, 117, 118, 119, 119, 120, 121, 123,
+	123, 124, 125, 125, 126, 127, 127, 128, 129, 130,
+	131, 131
+};
+
+static void mdp_update_scale_table(int index, int16 *c0, int16 *c1,
+				   int16 *c2, int16 *c3)
+{
+	int i, val;
+
+	for (i = 0; i < MDP_SCALE_COEFF_NUM; i++) {
+		val =
+		    ((MDP_SCALE_COEFF_MASK & c1[i]) << 16) |
+		    (MDP_SCALE_COEFF_MASK & c0[i]);
+		writel(val, MDP_PPP_SCALE_COEFF_LSBn(index));
+		val =
+		    ((MDP_SCALE_COEFF_MASK & c3[i]) << 16) |
+		    (MDP_SCALE_COEFF_MASK & c2[i]);
+		writel(val, MDP_PPP_SCALE_COEFF_MSBn(index));
+		index++;
+	}
+}
+
+void mdp_init_scale_table(void)
+{
+	mdp_scale_0p2_to_0p4_mode = MDP_SCALE_FIR;
+	mdp_update_scale_table(MDP_SCALE_0P2_TO_0P4_INDEX,
+			       mdp_scale_0p2_to_0p4_C0,
+			       mdp_scale_0p2_to_0p4_C1,
+			       mdp_scale_0p2_to_0p4_C2,
+			       mdp_scale_0p2_to_0p4_C3);
+
+	mdp_scale_0p4_to_0p6_mode = MDP_SCALE_FIR;
+	mdp_update_scale_table(MDP_SCALE_0P4_TO_0P6_INDEX,
+			       mdp_scale_0p4_to_0p6_C0,
+			       mdp_scale_0p4_to_0p6_C1,
+			       mdp_scale_0p4_to_0p6_C2,
+			       mdp_scale_0p4_to_0p6_C3);
+
+	mdp_scale_0p6_to_0p8_mode = MDP_SCALE_FIR;
+	mdp_update_scale_table(MDP_SCALE_0P6_TO_0P8_INDEX,
+			       mdp_scale_0p6_to_0p8_C0,
+			       mdp_scale_0p6_to_0p8_C1,
+			       mdp_scale_0p6_to_0p8_C2,
+			       mdp_scale_0p6_to_0p8_C3);
+
+	mdp_scale_0p8_to_8p0_mode = MDP_SCALE_FIR;
+	mdp_update_scale_table(MDP_SCALE_0P8_TO_8P0_INDEX,
+			       mdp_scale_0p8_to_8p0_C0,
+			       mdp_scale_0p8_to_8p0_C1,
+			       mdp_scale_0p8_to_8p0_C2,
+			       mdp_scale_0p8_to_8p0_C3);
+}
+
+static long long mdp_do_div(long long num, long long den)
+{
+	do_div(num, den);
+	return num;
+}
+
+#define SCALER_PHASE_BITS 29
+#define HAL_MDP_PHASE_STEP_2P50    0x50000000
+#define HAL_MDP_PHASE_STEP_1P66    0x35555555
+#define HAL_MDP_PHASE_STEP_1P25    0x28000000
+
+struct phase_val {
+	int phase_init_x;
+	int phase_init_y;
+	int phase_step_x;
+	int phase_step_y;
+};
+
+static void mdp_calc_scaleInitPhase_3p1(uint32 in_w,
+					uint32 in_h,
+					uint32 out_w,
+					uint32 out_h,
+					boolean is_rotate,
+					boolean is_pp_x,
+					boolean is_pp_y, struct phase_val *pval)
+{
+	uint64 dst_ROI_width;
+	uint64 dst_ROI_height;
+	uint64 src_ROI_width;
+	uint64 src_ROI_height;
+
+	/*
+	 * phase_step_x, phase_step_y, phase_init_x and phase_init_y
+	 * are represented in fixed-point, unsigned 3.29 format
+	 */
+	uint32 phase_step_x = 0;
+	uint32 phase_step_y = 0;
+	uint32 phase_init_x = 0;
+	uint32 phase_init_y = 0;
+	uint32 yscale_filter_sel, xscale_filter_sel;
+	uint32 scale_unit_sel_x, scale_unit_sel_y;
+
+	uint64 numerator, denominator;
+	uint64 temp_dim;
+
+	src_ROI_width = in_w;
+	src_ROI_height = in_h;
+	dst_ROI_width = out_w;
+	dst_ROI_height = out_h;
+
+	/* if there is a 90 degree rotation */
+	if (is_rotate) {
+		/* decide whether to use FIR or M/N for scaling */
+
+		/* if down-scaling by a factor smaller than 1/4 */
+		if ((dst_ROI_height == 1 && src_ROI_width < 4) ||
+			(src_ROI_width < 4 * dst_ROI_height - 3))
+			scale_unit_sel_x = 0;/* use FIR scalar */
+		else
+			scale_unit_sel_x = 1;/* use M/N scalar */
+
+		/* if down-scaling by a factor smaller than 1/4 */
+		if ((dst_ROI_width == 1 && src_ROI_height < 4) ||
+			(src_ROI_height < 4 * dst_ROI_width - 3))
+			scale_unit_sel_y = 0;/* use FIR scalar */
+		else
+			scale_unit_sel_y = 1;/* use M/N scalar */
+	} else {
+		/* decide whether to use FIR or M/N for scaling */
+		if ((dst_ROI_width == 1 && src_ROI_width < 4) ||
+			(src_ROI_width < 4 * dst_ROI_width - 3))
+			scale_unit_sel_x = 0;/* use FIR scalar */
+		else
+			scale_unit_sel_x = 1;/* use M/N scalar */
+
+		if ((dst_ROI_height == 1 && src_ROI_height < 4) ||
+			(src_ROI_height < 4 * dst_ROI_height - 3))
+			scale_unit_sel_y = 0;/* use FIR scalar */
+		else
+			scale_unit_sel_y = 1;/* use M/N scalar */
+	}
+
+	/* if there is a 90 degree rotation */
+	if (is_rotate) {
+		/* swap the width and height of dst ROI */
+		temp_dim = dst_ROI_width;
+		dst_ROI_width = dst_ROI_height;
+		dst_ROI_height = temp_dim;
+	}
+
+	/* calculate phase step for the x direction */
+
+	/* if destination is only 1 pixel wide, the value of phase_step_x
+	   is unimportant. Assigning phase_step_x to src ROI width
+	   as an arbitrary value. */
+	if (dst_ROI_width == 1)
+		phase_step_x = (uint32) ((src_ROI_width) << SCALER_PHASE_BITS);
+
+	/* if using FIR scalar */
+	else if (scale_unit_sel_x == 0) {
+
+		/* Calculate the quotient ( src_ROI_width - 1 ) / ( dst_ROI_width - 1)
+		   with u3.29 precision. Quotient is rounded up to the larger
+		   29th decimal point. */
+		numerator = (src_ROI_width - 1) << SCALER_PHASE_BITS;
+		denominator = (dst_ROI_width - 1);	/* never equals to 0 because of the "( dst_ROI_width == 1 ) case" */
+		phase_step_x = (uint32) mdp_do_div((numerator + denominator - 1), denominator);	/* divide and round up to the larger 29th decimal point. */
+
+	}
+
+	/* if M/N scalar */
+	else if (scale_unit_sel_x == 1) {
+		/* Calculate the quotient ( src_ROI_width ) / ( dst_ROI_width)
+		   with u3.29 precision. Quotient is rounded down to the
+		   smaller 29th decimal point. */
+		numerator = (src_ROI_width) << SCALER_PHASE_BITS;
+		denominator = (dst_ROI_width);
+		phase_step_x = (uint32) mdp_do_div(numerator, denominator);
+	}
+	/* calculate phase step for the y direction */
+
+	/* if destination is only 1 pixel wide, the value of
+	   phase_step_x is unimportant. Assigning phase_step_x
+	   to src ROI width as an arbitrary value. */
+	if (dst_ROI_height == 1)
+		phase_step_y = (uint32) ((src_ROI_height) << SCALER_PHASE_BITS);
+
+	/* if FIR scalar */
+	else if (scale_unit_sel_y == 0) {
+		/* Calculate the quotient ( src_ROI_height - 1 ) / ( dst_ROI_height - 1)
+		   with u3.29 precision. Quotient is rounded up to the larger
+		   29th decimal point. */
+		numerator = (src_ROI_height - 1) << SCALER_PHASE_BITS;
+		denominator = (dst_ROI_height - 1);	/* never equals to 0 because of the "( dst_ROI_height == 1 )" case */
+		phase_step_y = (uint32) mdp_do_div((numerator + denominator - 1), denominator);	/* Quotient is rounded up to the larger 29th decimal point. */
+
+	}
+
+	/* if M/N scalar */
+	else if (scale_unit_sel_y == 1) {
+		/* Calculate the quotient ( src_ROI_height ) / ( dst_ROI_height)
+		   with u3.29 precision. Quotient is rounded down to the smaller
+		   29th decimal point. */
+		numerator = (src_ROI_height) << SCALER_PHASE_BITS;
+		denominator = (dst_ROI_height);
+		phase_step_y = (uint32) mdp_do_div(numerator, denominator);
+	}
+
+	/* decide which set of FIR coefficients to use */
+	if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
+		xscale_filter_sel = 0;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
+		xscale_filter_sel = 1;
+	else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
+		xscale_filter_sel = 2;
+	else
+		xscale_filter_sel = 3;
+
+	if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
+		yscale_filter_sel = 0;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
+		yscale_filter_sel = 1;
+	else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
+		yscale_filter_sel = 2;
+	else
+		yscale_filter_sel = 3;
+
+	/* calculate phase init for the x direction */
+
+	/* if using FIR scalar */
+	if (scale_unit_sel_x == 0) {
+		if (dst_ROI_width == 1)
+			phase_init_x =
+			    (uint32) ((src_ROI_width - 1) << SCALER_PHASE_BITS);
+		else
+			phase_init_x = 0;
+
+	}
+	/* M over N scalar  */
+	else if (scale_unit_sel_x == 1)
+		phase_init_x = 0;
+
+	/* calculate phase init for the y direction
+	   if using FIR scalar */
+	if (scale_unit_sel_y == 0) {
+		if (dst_ROI_height == 1)
+			phase_init_y =
+			    (uint32) ((src_ROI_height -
+				       1) << SCALER_PHASE_BITS);
+		else
+			phase_init_y = 0;
+
+	}
+	/* M over N scalar   */
+	else if (scale_unit_sel_y == 1)
+		phase_init_y = 0;
+
+	/* write registers */
+	pval->phase_step_x = (uint32) phase_step_x;
+	pval->phase_step_y = (uint32) phase_step_y;
+	pval->phase_init_x = (uint32) phase_init_x;
+	pval->phase_init_y = (uint32) phase_init_y;
+
+	return;
+}
+
+void mdp_set_scale(MDPIBUF *iBuf,
+		   uint32 dst_roi_width,
+		   uint32 dst_roi_height,
+		   boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr)
+{
+	uint32 dst_roi_width_scale;
+	uint32 dst_roi_height_scale;
+	struct phase_val pval;
+	boolean use_pr;
+	uint32 ppp_scale_config = 0;
+
+	if (!inputRGB)
+		ppp_scale_config |= BIT(6);
+
+	if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) {
+		if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
+			dst_roi_width_scale = dst_roi_height;
+			dst_roi_height_scale = dst_roi_width;
+		} else {
+			dst_roi_width_scale = dst_roi_width;
+			dst_roi_height_scale = dst_roi_height;
+		}
+
+		if ((dst_roi_width_scale != iBuf->roi.width) ||
+		    (dst_roi_height_scale != iBuf->roi.height) ||
+			(iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
+			*pppop_reg_ptr |=
+			    (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+
+			mdp_calc_scaleInitPhase_3p1(iBuf->roi.width,
+						    iBuf->roi.height,
+						    dst_roi_width,
+						    dst_roi_height,
+						    iBuf->mdpImg.
+						    mdpOp & MDPOP_ROT90, 1, 1,
+						    &pval);
+
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x013c,
+				 pval.phase_init_x);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0140,
+				 pval.phase_init_y);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0144,
+				 pval.phase_step_x);
+			MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0148,
+				 pval.phase_step_y);
+
+			/* disable the pixel repeat option for scaling */
+			use_pr = false;
+
+			/* x-direction */
+			if ((dst_roi_width_scale == iBuf->roi.width) &&
+				!(iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
+				*pppop_reg_ptr &= ~PPP_OP_SCALE_X_ON;
+			} else
+			    if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
+				8) {
+				if ((use_pr)
+				    && (mdp_scale_0p8_to_8p0_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p8_to_8p0_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P8_TO_8P0_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p8_to_8p0_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p8_to_8p0_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P8_TO_8P0_INDEX,
+					     mdp_scale_0p8_to_8p0_C0,
+					     mdp_scale_0p8_to_8p0_C1,
+					     mdp_scale_0p8_to_8p0_C2,
+					     mdp_scale_0p8_to_8p0_C3);
+				}
+				ppp_scale_config |= (SCALE_U1_SET << 2);
+			} else
+			    if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
+				6) {
+				if ((use_pr)
+				    && (mdp_scale_0p6_to_0p8_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p6_to_0p8_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P6_TO_0P8_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p6_to_0p8_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p6_to_0p8_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P6_TO_0P8_INDEX,
+					     mdp_scale_0p6_to_0p8_C0,
+					     mdp_scale_0p6_to_0p8_C1,
+					     mdp_scale_0p6_to_0p8_C2,
+					     mdp_scale_0p6_to_0p8_C3);
+				}
+				ppp_scale_config |= (SCALE_D2_SET << 2);
+			} else
+			    if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
+				4) {
+				if ((use_pr)
+				    && (mdp_scale_0p4_to_0p6_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p4_to_0p6_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P4_TO_0P6_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p4_to_0p6_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p4_to_0p6_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P4_TO_0P6_INDEX,
+					     mdp_scale_0p4_to_0p6_C0,
+					     mdp_scale_0p4_to_0p6_C1,
+					     mdp_scale_0p4_to_0p6_C2,
+					     mdp_scale_0p4_to_0p6_C3);
+				}
+				ppp_scale_config |= (SCALE_D1_SET << 2);
+			} else
+			if ((dst_roi_width_scale == 1 && iBuf->roi.width < 4) ||
+			(iBuf->roi.width < 4 * dst_roi_width_scale - 3)) {
+				if ((use_pr)
+				    && (mdp_scale_0p2_to_0p4_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p2_to_0p4_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P2_TO_0P4_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p2_to_0p4_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p2_to_0p4_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P2_TO_0P4_INDEX,
+					     mdp_scale_0p2_to_0p4_C0,
+					     mdp_scale_0p2_to_0p4_C1,
+					     mdp_scale_0p2_to_0p4_C2,
+					     mdp_scale_0p2_to_0p4_C3);
+				}
+				ppp_scale_config |= (SCALE_D0_SET << 2);
+			} else
+				ppp_scale_config |= BIT(0);
+
+			/* y-direction */
+			if ((dst_roi_height_scale == iBuf->roi.height) &&
+				!(iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
+				*pppop_reg_ptr &= ~PPP_OP_SCALE_Y_ON;
+			} else if (((dst_roi_height_scale * 10) /
+					iBuf->roi.height) > 8) {
+				if ((use_pr)
+				    && (mdp_scale_0p8_to_8p0_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p8_to_8p0_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P8_TO_8P0_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p8_to_8p0_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p8_to_8p0_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P8_TO_8P0_INDEX,
+					     mdp_scale_0p8_to_8p0_C0,
+					     mdp_scale_0p8_to_8p0_C1,
+					     mdp_scale_0p8_to_8p0_C2,
+					     mdp_scale_0p8_to_8p0_C3);
+				}
+				ppp_scale_config |= (SCALE_U1_SET << 4);
+			} else
+			    if (((dst_roi_height_scale * 10) /
+				 iBuf->roi.height) > 6) {
+				if ((use_pr)
+				    && (mdp_scale_0p6_to_0p8_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p6_to_0p8_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P6_TO_0P8_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p6_to_0p8_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p6_to_0p8_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P6_TO_0P8_INDEX,
+					     mdp_scale_0p6_to_0p8_C0,
+					     mdp_scale_0p6_to_0p8_C1,
+					     mdp_scale_0p6_to_0p8_C2,
+					     mdp_scale_0p6_to_0p8_C3);
+				}
+				ppp_scale_config |= (SCALE_D2_SET << 4);
+			} else
+			    if (((dst_roi_height_scale * 10) /
+				 iBuf->roi.height) > 4) {
+				if ((use_pr)
+				    && (mdp_scale_0p4_to_0p6_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p4_to_0p6_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P4_TO_0P6_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p4_to_0p6_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p4_to_0p6_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P4_TO_0P6_INDEX,
+					     mdp_scale_0p4_to_0p6_C0,
+					     mdp_scale_0p4_to_0p6_C1,
+					     mdp_scale_0p4_to_0p6_C2,
+					     mdp_scale_0p4_to_0p6_C3);
+				}
+				ppp_scale_config |= (SCALE_D1_SET << 4);
+			} else if ((dst_roi_height_scale == 1 &&
+			iBuf->roi.height < 4) ||
+			(iBuf->roi.height < 4 * dst_roi_height_scale - 3)) {
+				if ((use_pr)
+				    && (mdp_scale_0p2_to_0p4_mode !=
+					MDP_SCALE_PR)) {
+					mdp_scale_0p2_to_0p4_mode =
+					    MDP_SCALE_PR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P2_TO_0P4_INDEX,
+					     mdp_scale_pixel_repeat_C0,
+					     mdp_scale_pixel_repeat_C1,
+					     mdp_scale_pixel_repeat_C2,
+					     mdp_scale_pixel_repeat_C3);
+				} else if ((!use_pr)
+					   && (mdp_scale_0p2_to_0p4_mode !=
+					       MDP_SCALE_FIR)) {
+					mdp_scale_0p2_to_0p4_mode =
+					    MDP_SCALE_FIR;
+					mdp_update_scale_table
+					    (MDP_SCALE_0P2_TO_0P4_INDEX,
+					     mdp_scale_0p2_to_0p4_C0,
+					     mdp_scale_0p2_to_0p4_C1,
+					     mdp_scale_0p2_to_0p4_C2,
+					     mdp_scale_0p2_to_0p4_C3);
+				}
+				ppp_scale_config |= (SCALE_D0_SET << 4);
+			} else
+				ppp_scale_config |= BIT(1);
+
+			if (iBuf->mdpImg.mdpOp & MDPOP_SHARPENING) {
+				ppp_scale_config |= BIT(7);
+				MDP_OUTP(MDP_BASE + 0x50020,
+						iBuf->mdpImg.sp_value);
+			}
+
+			MDP_OUTP(MDP_BASE + 0x10230, ppp_scale_config);
+		} else {
+			iBuf->mdpImg.mdpOp &= ~(MDPOP_ASCALE);
+		}
+	}
+}
+
+void mdp_adjust_start_addr(uint8 **src0,
+			   uint8 **src1,
+			   int v_slice,
+			   int h_slice,
+			   int x,
+			   int y,
+			   uint32 width,
+			   uint32 height, int bpp, MDPIBUF *iBuf, int layer)
+{
+	switch (layer) {
+	case 0:
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0200, (y << 16) | (x));
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0208,
+			 (height << 16) | (width));
+		break;
+
+	case 1:
+		/* MDP 3.1 HW bug workaround */
+		if (iBuf->ibuf_type == MDP_YCRYCB_H2V1) {
+			*src0 += (x + y * width) * bpp;
+			x = y = 0;
+			width = iBuf->roi.dst_width;
+			height = iBuf->roi.dst_height;
+		}
+
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0204, (y << 16) | (x));
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x020c,
+			 (height << 16) | (width));
+		break;
+
+	case 2:
+		MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x019c, (y << 16) | (x));
+		break;
+	}
+}
+
+void mdp_set_blend_attr(MDPIBUF *iBuf,
+			uint32 *alpha,
+			uint32 *tpVal,
+			uint32 perPixelAlpha, uint32 *pppop_reg_ptr)
+{
+	int bg_alpha;
+
+	*alpha = iBuf->mdpImg.alpha;
+	*tpVal = iBuf->mdpImg.tpVal;
+
+	if (iBuf->mdpImg.mdpOp & MDPOP_FG_PM_ALPHA) {
+		if (perPixelAlpha) {
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+			PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA;
+			}
+		else {
+			if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+				&& (iBuf->mdpImg.alpha == 0xff)) {
+					iBuf->mdpImg.mdpOp &= ~(MDPOP_ALPHAB);
+				}
+
+			if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+				|| (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)) {
+				*pppop_reg_ptr |=
+				PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+				PPP_OP_BLEND_CONSTANT_ALPHA |
+				PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+			}
+		}
+
+		bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+				PPP_BLEND_BG_ALPHA_REVERSE;
+
+		if (perPixelAlpha)
+			bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
+		else {
+			bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
+			bg_alpha |= iBuf->mdpImg.alpha << 24;
+			}
+		outpdw(MDP_BASE + 0x70010, bg_alpha);
+
+		if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)
+			*pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
+	} else if (perPixelAlpha) {
+		*pppop_reg_ptr |= PPP_OP_ROT_ON |
+		    PPP_OP_BLEND_ON | PPP_OP_BLEND_SRCPIXEL_ALPHA;
+	} else {
+		if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+		    && (iBuf->mdpImg.alpha == 0xff)) {
+			iBuf->mdpImg.mdpOp &= ~(MDPOP_ALPHAB);
+		}
+
+		if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
+		    || (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)) {
+			*pppop_reg_ptr |=
+			    PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+			    PPP_OP_BLEND_CONSTANT_ALPHA |
+			    PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+		}
+
+		if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)
+			*pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
+	}
+}
diff --git a/drivers/video/msm/mdp_vsync.c b/drivers/video/msm/mdp_vsync.c
new file mode 100644
index 0000000..4108c89
--- /dev/null
+++ b/drivers/video/msm/mdp_vsync.c
@@ -0,0 +1,488 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <mach/gpio.h>
+
+#include "mdp.h"
+#include "msm_fb.h"
+#include "mddihost.h"
+
+#ifdef CONFIG_FB_MSM_MDP40
+#include "mdp4.h"
+
+#define MDP_SYNC_CFG_0		0x100
+#define MDP_SYNC_STATUS_0	0x10c
+#define MDP_SYNC_CFG_1		0x104
+#define MDP_SYNC_STATUS_1	0x110
+#define MDP_PRIM_VSYNC_OUT_CTRL	0x118
+#define MDP_SEC_VSYNC_OUT_CTRL	0x11C
+#define MDP_VSYNC_SEL		0x124
+#define MDP_PRIM_VSYNC_INIT_VAL	0x128
+#define MDP_SEC_VSYNC_INIT_VAL	0x12C
+#else
+#define MDP_SYNC_CFG_0		0x300
+#define MDP_SYNC_STATUS_0	0x30c
+#define MDP_PRIM_VSYNC_OUT_CTRL	0x318
+#define MDP_PRIM_VSYNC_INIT_VAL	0x328
+#endif
+
+extern mddi_lcd_type mddi_lcd_idx;
+extern spinlock_t mdp_spin_lock;
+extern struct workqueue_struct *mdp_vsync_wq;
+extern int lcdc_mode;
+extern int vsync_mode;
+
+#ifdef MDP_HW_VSYNC
+int vsync_above_th = 4;
+int vsync_start_th = 1;
+int vsync_load_cnt;
+int vsync_clk_status;
+DEFINE_MUTEX(vsync_clk_lock);
+DEFINE_MUTEX(vsync_timer_lock);
+
+static struct clk *mdp_vsync_clk;
+static struct msm_fb_data_type *vsync_mfd;
+static unsigned char timer_shutdown_flag;
+
+void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd)
+{
+	if (vsync_clk_status == 1)
+		return;
+	mutex_lock(&vsync_clk_lock);
+	if (mfd->use_mdp_vsync) {
+		clk_enable(mdp_vsync_clk);
+		vsync_clk_status = 1;
+	}
+	mutex_unlock(&vsync_clk_lock);
+}
+
+void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd)
+{
+	if (vsync_clk_status == 0)
+		return;
+	mutex_lock(&vsync_clk_lock);
+	if (mfd->use_mdp_vsync) {
+		clk_disable(mdp_vsync_clk);
+		vsync_clk_status = 0;
+	}
+	mutex_unlock(&vsync_clk_lock);
+}
+
+static void mdp_set_vsync(unsigned long data);
+void mdp_vsync_clk_enable(void)
+{
+	if (vsync_mfd) {
+		mdp_hw_vsync_clk_enable(vsync_mfd);
+		if (!vsync_mfd->vsync_resync_timer.function) {
+			mdp_set_vsync((unsigned long) vsync_mfd);
+		}
+	}
+}
+
+void mdp_vsync_clk_disable(void)
+{
+	if (vsync_mfd) {
+		if (vsync_mfd->vsync_resync_timer.function) {
+			mutex_lock(&vsync_timer_lock);
+			timer_shutdown_flag = 1;
+			mutex_unlock(&vsync_timer_lock);
+			del_timer_sync(&vsync_mfd->vsync_resync_timer);
+			mutex_lock(&vsync_timer_lock);
+			timer_shutdown_flag = 0;
+			mutex_unlock(&vsync_timer_lock);
+			vsync_mfd->vsync_resync_timer.function = NULL;
+		}
+
+		mdp_hw_vsync_clk_disable(vsync_mfd);
+	}
+}
+#endif
+
+static void mdp_set_vsync(unsigned long data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+	struct msm_fb_panel_data *pdata = NULL;
+
+	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+
+	vsync_mfd = mfd;
+	init_timer(&mfd->vsync_resync_timer);
+
+	if ((pdata) && (pdata->set_vsync_notifier == NULL))
+		return;
+
+	if ((mfd->panel_info.lcd.vsync_enable) && (mfd->panel_power_on)
+	    && (!mfd->vsync_handler_pending)) {
+		mfd->vsync_handler_pending = TRUE;
+		if (!queue_work(mdp_vsync_wq, &mfd->vsync_resync_worker)) {
+			MSM_FB_INFO
+			    ("mdp_set_vsync: can't queue_work! -> needs to increase vsync_resync_timer_duration\n");
+		}
+	} else {
+		MSM_FB_DEBUG
+		    ("mdp_set_vsync failed!  EN:%d  PWR:%d  PENDING:%d\n",
+		     mfd->panel_info.lcd.vsync_enable, mfd->panel_power_on,
+		     mfd->vsync_handler_pending);
+	}
+
+	mutex_lock(&vsync_timer_lock);
+	if (!timer_shutdown_flag) {
+		mfd->vsync_resync_timer.function = mdp_set_vsync;
+		mfd->vsync_resync_timer.data = data;
+		mfd->vsync_resync_timer.expires =
+			jiffies + mfd->panel_info.lcd.vsync_notifier_period;
+		add_timer(&mfd->vsync_resync_timer);
+	}
+	mutex_unlock(&vsync_timer_lock);
+}
+
+static void mdp_vsync_handler(void *data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+
+	if (vsync_clk_status == 0) {
+		pr_debug("Warning: vsync clk is disabled\n");
+		mfd->vsync_handler_pending = FALSE;
+		return;
+	}
+
+	if (mfd->use_mdp_vsync) {
+#ifdef MDP_HW_VSYNC
+		if (mfd->panel_power_on) {
+			MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_0, vsync_load_cnt);
+
+#ifdef CONFIG_FB_MSM_MDP40
+			if (mdp_hw_revision < MDP4_REVISION_V2_1)
+				MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_1,
+						vsync_load_cnt);
+#endif
+		}
+
+#endif
+	} else {
+		mfd->last_vsync_timetick = ktime_get_real();
+	}
+
+	mfd->vsync_handler_pending = FALSE;
+}
+
+irqreturn_t mdp_hw_vsync_handler_proxy(int irq, void *data)
+{
+	/*
+	 * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt
+	 * but getting inaccurate timing in mdp_vsync_handler()
+	 * disable_irq(MDP_HW_VSYNC_IRQ);
+	 */
+	mdp_vsync_handler(data);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef MDP_HW_VSYNC
+static void mdp_set_sync_cfg_0(struct msm_fb_data_type *mfd, int vsync_cnt)
+{
+	unsigned long cfg;
+
+	cfg = mfd->total_lcd_lines - 1;
+	cfg <<= MDP_SYNCFG_HGT_LOC;
+	if (mfd->panel_info.lcd.hw_vsync_mode)
+		cfg |= MDP_SYNCFG_VSYNC_EXT_EN;
+	cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt);
+
+	MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_0, cfg);
+}
+
+#ifdef CONFIG_FB_MSM_MDP40
+static void mdp_set_sync_cfg_1(struct msm_fb_data_type *mfd, int vsync_cnt)
+{
+	unsigned long cfg;
+
+	cfg = mfd->total_lcd_lines - 1;
+	cfg <<= MDP_SYNCFG_HGT_LOC;
+	if (mfd->panel_info.lcd.hw_vsync_mode)
+		cfg |= MDP_SYNCFG_VSYNC_EXT_EN;
+	cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt);
+
+	MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_1, cfg);
+}
+#endif
+#endif
+
+void mdp_config_vsync(struct msm_fb_data_type *mfd)
+{
+	/* vsync on primary lcd only for now */
+	if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1)
+	    || (!vsync_mode)) {
+		goto err_handle;
+	}
+
+	vsync_clk_status = 0;
+	if (mfd->panel_info.lcd.vsync_enable) {
+		mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch +
+		    mfd->panel_info.lcd.v_front_porch +
+		    mfd->panel_info.lcd.v_pulse_width;
+		mfd->total_lcd_lines =
+		    mfd->panel_info.yres + mfd->total_porch_lines;
+		mfd->lcd_ref_usec_time =
+		    100000000 / mfd->panel_info.lcd.refx100;
+		mfd->vsync_handler_pending = FALSE;
+
+		mfd->last_vsync_timetick.tv64 = 0;
+
+#ifdef MDP_HW_VSYNC
+		if (mdp_vsync_clk == NULL)
+			mdp_vsync_clk = clk_get(NULL, "mdp_vsync_clk");
+
+		if (IS_ERR(mdp_vsync_clk)) {
+			printk(KERN_ERR "error: can't get mdp_vsync_clk!\n");
+			mfd->use_mdp_vsync = 0;
+		} else
+			mfd->use_mdp_vsync = 1;
+
+		if (mfd->use_mdp_vsync) {
+			uint32 vsync_cnt_cfg, vsync_cnt_cfg_dem;
+			uint32 mdp_vsync_clk_speed_hz;
+
+			mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk);
+
+			if (mdp_vsync_clk_speed_hz == 0) {
+				mfd->use_mdp_vsync = 0;
+			} else {
+				/*
+				 * Do this calculation in 2 steps for
+				 * rounding uint32 properly.
+				 */
+				vsync_cnt_cfg_dem =
+				    (mfd->panel_info.lcd.refx100 *
+				     mfd->total_lcd_lines) / 100;
+				vsync_cnt_cfg =
+				    (mdp_vsync_clk_speed_hz) /
+				    vsync_cnt_cfg_dem;
+
+				/* MDP cmd block enable */
+				mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON,
+					      FALSE);
+				mdp_hw_vsync_clk_enable(mfd);
+
+				mdp_set_sync_cfg_0(mfd, vsync_cnt_cfg);
+
+
+#ifdef CONFIG_FB_MSM_MDP40
+				if (mdp_hw_revision < MDP4_REVISION_V2_1)
+					mdp_set_sync_cfg_1(mfd, vsync_cnt_cfg);
+#endif
+
+				/*
+				 * load the last line + 1 to be in the
+				 * safety zone
+				 */
+				vsync_load_cnt = mfd->panel_info.yres;
+
+				/* line counter init value at the next pulse */
+				MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_INIT_VAL,
+							vsync_load_cnt);
+#ifdef CONFIG_FB_MSM_MDP40
+				if (mdp_hw_revision < MDP4_REVISION_V2_1) {
+					MDP_OUTP(MDP_BASE +
+					MDP_SEC_VSYNC_INIT_VAL, vsync_load_cnt);
+				}
+#endif
+
+				/*
+				 * external vsync source pulse width and
+				 * polarity flip
+				 */
+				MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_OUT_CTRL,
+							BIT(0));
+#ifdef CONFIG_FB_MSM_MDP40
+				if (mdp_hw_revision < MDP4_REVISION_V2_1) {
+					MDP_OUTP(MDP_BASE +
+					MDP_SEC_VSYNC_OUT_CTRL, BIT(0));
+					MDP_OUTP(MDP_BASE +
+						MDP_VSYNC_SEL, 0x20);
+				}
+#endif
+
+				/* threshold */
+				MDP_OUTP(MDP_BASE + 0x200,
+					 (vsync_above_th << 16) |
+					 (vsync_start_th));
+
+				mdp_hw_vsync_clk_disable(mfd);
+				/* MDP cmd block disable */
+				mdp_pipe_ctrl(MDP_CMD_BLOCK,
+					      MDP_BLOCK_POWER_OFF, FALSE);
+			}
+		}
+#else
+		mfd->use_mdp_vsync = 0;
+		hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler;
+		mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4);
+#endif
+
+#ifdef CONFIG_FB_MSM_MDDI
+		mfd->channel_irq = 0;
+		if (mfd->panel_info.lcd.hw_vsync_mode) {
+			u32 vsync_gpio = mfd->vsync_gpio;
+			u32 ret;
+
+			if (vsync_gpio == -1) {
+				MSM_FB_INFO("vsync_gpio not defined!\n");
+				goto err_handle;
+			}
+
+			ret = gpio_tlmm_config(GPIO_CFG
+					(vsync_gpio,
+					(mfd->use_mdp_vsync) ? 1 : 0,
+					GPIO_CFG_INPUT,
+					GPIO_CFG_PULL_DOWN,
+					GPIO_CFG_2MA),
+					GPIO_CFG_ENABLE);
+			if (ret)
+				goto err_handle;
+
+			/*
+			 * if use_mdp_vsync, then no interrupt need since
+			 * mdp_vsync is feed directly to mdp to reset the
+			 * write pointer counter. therefore no irq_handler
+			 * need to reset write pointer counter.
+			 */
+			if (!mfd->use_mdp_vsync) {
+				mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio);
+				if (request_irq
+				    (mfd->channel_irq,
+				     &mdp_hw_vsync_handler_proxy,
+				     IRQF_TRIGGER_FALLING, "VSYNC_GPIO",
+				     (void *)mfd)) {
+					MSM_FB_INFO
+					("irq=%d failed! vsync_gpio=%d\n",
+						mfd->channel_irq,
+						vsync_gpio);
+					goto err_handle;
+				}
+			}
+		}
+#endif
+		mdp_hw_vsync_clk_enable(mfd);
+		mdp_set_vsync((unsigned long)mfd);
+	}
+
+	return;
+
+err_handle:
+	if (mfd->vsync_width_boundary)
+		vfree(mfd->vsync_width_boundary);
+	mfd->panel_info.lcd.vsync_enable = FALSE;
+	printk(KERN_ERR "%s: failed!\n", __func__);
+}
+
+void mdp_vsync_resync_workqueue_handler(struct work_struct *work)
+{
+	struct msm_fb_data_type *mfd = NULL;
+	int vsync_fnc_enabled = FALSE;
+	struct msm_fb_panel_data *pdata = NULL;
+
+	mfd = container_of(work, struct msm_fb_data_type, vsync_resync_worker);
+
+	if (mfd) {
+		if (mfd->panel_power_on) {
+			pdata =
+			    (struct msm_fb_panel_data *)mfd->pdev->dev.
+			    platform_data;
+
+			if (pdata->set_vsync_notifier != NULL) {
+				if (pdata->clk_func && !pdata->clk_func(2)) {
+					mfd->vsync_handler_pending = FALSE;
+					return;
+				}
+
+				pdata->set_vsync_notifier(
+						mdp_vsync_handler,
+						(void *)mfd);
+				vsync_fnc_enabled = TRUE;
+			}
+		}
+	}
+
+	if ((mfd) && (!vsync_fnc_enabled))
+		mfd->vsync_handler_pending = FALSE;
+}
+
+boolean mdp_hw_vsync_set_handler(msm_fb_vsync_handler_type handler, void *data)
+{
+	/*
+	 * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt
+	 * but getting inaccurate timing in mdp_vsync_handler()
+	 * enable_irq(MDP_HW_VSYNC_IRQ);
+	 */
+
+	return TRUE;
+}
+
+uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd)
+{
+	uint32 elapsed_usec_time;
+	uint32 lcd_line;
+	ktime_t last_vsync_timetick_local;
+	ktime_t curr_time;
+	unsigned long flag;
+
+	if ((!mfd->panel_info.lcd.vsync_enable) || (!vsync_mode))
+		return 0;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	last_vsync_timetick_local = mfd->last_vsync_timetick;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+	curr_time = ktime_get_real();
+	elapsed_usec_time = ktime_to_us(ktime_sub(curr_time,
+						last_vsync_timetick_local));
+
+	elapsed_usec_time = elapsed_usec_time % mfd->lcd_ref_usec_time;
+
+	/* lcd line calculation referencing to line counter = 0 */
+	lcd_line =
+	    (elapsed_usec_time * mfd->total_lcd_lines) / mfd->lcd_ref_usec_time;
+
+	/* lcd line adjusment referencing to the actual line counter at vsync */
+	lcd_line =
+	    (mfd->total_lcd_lines - mfd->panel_info.lcd.v_back_porch +
+	     lcd_line) % (mfd->total_lcd_lines + 1);
+
+	if (lcd_line > mfd->total_lcd_lines) {
+		MSM_FB_INFO
+		    ("mdp_get_lcd_line_counter: mdp_lcd_rd_cnt >= mfd->total_lcd_lines error!\n");
+	}
+
+	return lcd_line;
+}
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
new file mode 100644
index 0000000..61bb345
--- /dev/null
+++ b/drivers/video/msm/mipi_dsi.c
@@ -0,0 +1,548 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/clk.h>
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mdp.h"
+#include "mdp4.h"
+
+u32 dsi_irq;
+
+static int mipi_dsi_probe(struct platform_device *pdev);
+static int mipi_dsi_remove(struct platform_device *pdev);
+
+static int mipi_dsi_off(struct platform_device *pdev);
+static int mipi_dsi_on(struct platform_device *pdev);
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+static struct mipi_dsi_platform_data *mipi_dsi_pdata;
+
+static int vsync_gpio = -1;
+
+static struct platform_driver mipi_dsi_driver = {
+	.probe = mipi_dsi_probe,
+	.remove = mipi_dsi_remove,
+	.shutdown = NULL,
+	.driver = {
+		   .name = "mipi_dsi",
+		   },
+};
+
+struct device dsi_dev;
+
+static int mipi_dsi_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+	struct msm_panel_info *pinfo;
+
+	mfd = platform_get_drvdata(pdev);
+	pinfo = &mfd->panel_info;
+
+	if (mdp_rev >= MDP_REV_41)
+		mutex_lock(&mfd->dma->ov_mutex);
+	else
+		down(&mfd->dma->mutex);
+
+	mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);
+
+	/*
+	 * Description: dsi clock is need to perform shutdown.
+	 * mdp4_dsi_cmd_dma_busy_wait() will enable dsi clock if disabled.
+	 * also, wait until dma (overlay and dmap) finish.
+	 */
+	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		if (mdp_rev >= MDP_REV_41) {
+			mdp4_dsi_cmd_dma_busy_wait(mfd);
+			mdp4_dsi_blt_dmap_busy_wait(mfd);
+		} else {
+			mdp3_dsi_cmd_dma_busy_wait(mfd);
+		}
+	}
+
+	/*
+	 * Desctiption: change to DSI_CMD_MODE since it needed to
+	 * tx DCS dsiplay off comamnd to panel
+	 */
+	mipi_dsi_op_mode_config(DSI_CMD_MODE);
+
+	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		if (pinfo->lcd.vsync_enable) {
+			if (pinfo->lcd.hw_vsync_mode && vsync_gpio > 0)
+				gpio_free(vsync_gpio);
+
+			mipi_dsi_set_tear_off(mfd);
+		}
+	}
+
+	ret = panel_next_off(pdev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(0);
+#endif
+	/* disbale dsi engine */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, 0);
+
+	mipi_dsi_phy_ctrl(0);
+
+	local_bh_disable();
+	mipi_dsi_clk_disable();
+	local_bh_enable();
+
+	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
+		mipi_dsi_pdata->dsi_power_save(0);
+
+	if (mdp_rev >= MDP_REV_41)
+		mutex_unlock(&mfd->dma->ov_mutex);
+	else
+		up(&mfd->dma->mutex);
+
+	pr_debug("%s:\n", __func__);
+
+	return ret;
+}
+
+static int mipi_dsi_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct msm_fb_data_type *mfd;
+	struct fb_info *fbi;
+	struct fb_var_screeninfo *var;
+	struct msm_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, data;
+	u32 dummy_xres, dummy_yres;
+	int target_type = 0;
+
+	mfd = platform_get_drvdata(pdev);
+	fbi = mfd->fbi;
+	var = &fbi->var;
+	pinfo = &mfd->panel_info;
+
+	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
+		mipi_dsi_pdata->dsi_power_save(1);
+
+	clk_rate = mfd->fbi->var.pixclock;
+	clk_rate = min(clk_rate, mfd->panel_info.clk_max);
+
+	local_bh_disable();
+	mipi_dsi_clk_enable();
+	local_bh_enable();
+
+#ifndef CONFIG_FB_MSM_MDP303
+	mdp4_overlay_dsi_state_set(ST_DSI_RESUME);
+#endif
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);
+
+	hbp = var->left_margin;
+	hfp = var->right_margin;
+	vbp = var->upper_margin;
+	vfp = var->lower_margin;
+	hspw = var->hsync_len;
+	vspw = var->vsync_len;
+	width = mfd->panel_info.xres;
+	height = mfd->panel_info.yres;
+
+	mipi_dsi_phy_ctrl(1);
+
+	if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
+		target_type = mipi_dsi_pdata->target_type;
+
+	mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);
+
+	mipi  = &mfd->panel_info.mipi;
+	if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = mfd->panel_info.mipi.xres_pad;
+		dummy_yres = mfd->panel_info.mipi.yres_pad;
+
+		if (mdp_rev >= MDP_REV_41) {
+			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
+				((hspw + hbp + width + dummy_xres) << 16 |
+				(hspw + hbp)));
+			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
+				((vspw + vbp + height + dummy_yres) << 16 |
+				(vspw + vbp)));
+			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
+				(vspw + vbp + height + dummy_yres +
+					vfp - 1) << 16 | (hspw + hbp +
+					width + dummy_xres + hfp - 1));
+		} else {
+			/* DSI_LAN_SWAP_CTRL */
+			MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);
+
+			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
+				((hbp + width + dummy_xres) << 16 | (hbp)));
+			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
+				((vbp + height + dummy_yres) << 16 | (vbp)));
+			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
+				(vbp + height + dummy_yres + vfp) << 16 |
+					(hbp + width + dummy_xres + hfp));
+		}
+
+		MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
+		MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));
+
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+		MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+		data = height << 16 | width;
+		MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
+	}
+
+	mipi_dsi_host_init(mipi);
+	mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
+
+	ret = panel_next_on(pdev);
+
+	mipi_dsi_op_mode_config(mipi->mode);
+
+	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		if (pinfo->lcd.vsync_enable) {
+			if (pinfo->lcd.hw_vsync_mode && vsync_gpio > 0) {
+				if (gpio_request(vsync_gpio, "MDP_VSYNC") == 0)
+					gpio_direction_input(vsync_gpio);
+				else
+					pr_err("%s: unable to request gpio=%d\n",
+						__func__, vsync_gpio);
+			}
+			mipi_dsi_set_tear_on(mfd);
+		}
+	}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	mdp_bus_scale_update_request(2);
+#endif
+	return ret;
+}
+
+
+static int mipi_dsi_resource_initialized;
+
+static int mipi_dsi_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct fb_info *fbi;
+	struct msm_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+	uint8 lanes = 0, bpp;
+	uint32 h_period, v_period, dsi_pclk_rate;
+
+	resource_size_t size ;
+
+	if ((pdev->id == 1) && (pdev->num_resources >= 0)) {
+		mipi_dsi_pdata = pdev->dev.platform_data;
+
+		size =  resource_size(&pdev->resource[0]);
+		mipi_dsi_base =  ioremap(pdev->resource[0].start, size);
+
+		MSM_FB_INFO("mipi_dsi base phy_addr = 0x%x virt = 0x%x\n",
+				pdev->resource[0].start, (int) mipi_dsi_base);
+
+		if (!mipi_dsi_base)
+			return -ENOMEM;
+
+		if (mdp_rev >= MDP_REV_41) {
+			mmss_sfpb_base =  ioremap(MMSS_SFPB_BASE_PHY, 0x100);
+			MSM_FB_INFO("mmss_sfpb  base phy_addr = 0x%x,"
+				"virt = 0x%x\n", MMSS_SFPB_BASE_PHY,
+				(int) mmss_sfpb_base);
+
+			if (!mmss_sfpb_base)
+				return -ENOMEM;
+		}
+
+		dsi_irq = platform_get_irq(pdev, 0);
+		if (dsi_irq < 0) {
+			pr_err("mipi_dsi: can not get mdp irq\n");
+			return -ENOMEM;
+		}
+
+		rc = request_irq(dsi_irq, mipi_dsi_isr, IRQF_DISABLED,
+						"MIPI_DSI", 0);
+		if (rc) {
+			pr_err("mipi_dsi_host request_irq() failed!\n");
+			return rc;
+		}
+
+		disable_irq(dsi_irq);
+
+		if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata &&
+			mipi_dsi_pdata->target_type == 1) {
+			/* Target type is 1 for device with (De)serializer
+			 * 0x4f00000 is the base for TV Encoder.
+			 * Unused Offset 0x1000 is used for
+			 * (de)serializer on emulation platform
+			 */
+			periph_base = ioremap(MMSS_SERDES_BASE_PHY, 0x100);
+
+			if (periph_base) {
+				pr_debug("periph_base %p\n", periph_base);
+				writel(0x4, periph_base + 0x28);
+				writel(0xc, periph_base + 0x28);
+			} else {
+				pr_err("periph_base is NULL\n");
+				free_irq(dsi_irq, 0);
+				return -ENOMEM;
+			}
+		}
+
+		if (mipi_dsi_pdata) {
+			vsync_gpio = mipi_dsi_pdata->vsync_gpio;
+			pr_debug("%s: vsync_gpio=%d\n", __func__, vsync_gpio);
+
+			if (mdp_rev == MDP_REV_303 &&
+				mipi_dsi_pdata->dsi_client_reset) {
+				if (mipi_dsi_pdata->dsi_client_reset())
+					pr_err("%s: DSI Client Reset failed!\n",
+						__func__);
+				else
+					pr_debug("%s: DSI Client Reset success\n",
+						__func__);
+			}
+		}
+
+		mipi_dsi_resource_initialized = 1;
+
+		return 0;
+	}
+
+	mipi_dsi_clk_init(&pdev->dev);
+
+	if (!mipi_dsi_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_LCD;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		pr_err("mipi_dsi_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = mdp_dev->dev.platform_data;
+	pdata->on = mipi_dsi_on;
+	pdata->off = mipi_dsi_off;
+	pdata->next = pdev;
+
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+	pinfo = &mfd->panel_info;
+
+	if (mdp_rev == MDP_REV_303 &&
+		mipi_dsi_pdata->get_lane_config) {
+		if (mipi_dsi_pdata->get_lane_config() != 2) {
+			pr_info("Changing to DSI Single Mode Configuration\n");
+#ifdef CONFIG_FB_MSM_MDP303
+			update_lane_config(pinfo);
+#endif
+		}
+	}
+
+	if (mfd->index == 0)
+		mfd->fb_imgType = MSMFB_DEFAULT_TYPE;
+	else
+		mfd->fb_imgType = MDP_RGB_565;
+
+	fbi = mfd->fbi;
+	fbi->var.pixclock = mfd->panel_info.clk_rate;
+	fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch;
+	fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch;
+	fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch;
+	fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch;
+	fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width;
+	fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width;
+
+	h_period = ((mfd->panel_info.lcdc.h_pulse_width)
+			+ (mfd->panel_info.lcdc.h_back_porch)
+			+ (mfd->panel_info.xres)
+			+ (mfd->panel_info.lcdc.h_front_porch));
+
+	v_period = ((mfd->panel_info.lcdc.v_pulse_width)
+			+ (mfd->panel_info.lcdc.v_back_porch)
+			+ (mfd->panel_info.yres)
+			+ (mfd->panel_info.lcdc.v_front_porch));
+
+	mipi  = &mfd->panel_info.mipi;
+
+	if (mipi->data_lane3)
+		lanes += 1;
+	if (mipi->data_lane2)
+		lanes += 1;
+	if (mipi->data_lane1)
+		lanes += 1;
+	if (mipi->data_lane0)
+		lanes += 1;
+
+	if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+		bpp = 3;
+	else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+		 || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+		bpp = 2;
+	else
+		bpp = 3;		/* Default format set to RGB888 */
+
+	if (mfd->panel_info.type == MIPI_VIDEO_PANEL &&
+		!mfd->panel_info.clk_rate) {
+		h_period += mfd->panel_info.mipi.xres_pad;
+		v_period += mfd->panel_info.mipi.yres_pad;
+
+		if (lanes > 0) {
+			mfd->panel_info.clk_rate =
+			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+			   / lanes);
+		} else {
+			pr_err("%s: forcing mipi_dsi lanes to 1\n", __func__);
+			mfd->panel_info.clk_rate =
+				(h_period * v_period
+					 * (mipi->frame_rate) * bpp * 8);
+		}
+	}
+	pll_divider_config.clk_rate = mfd->panel_info.clk_rate;
+
+	rc = mipi_dsi_clk_div_config(bpp, lanes, &dsi_pclk_rate);
+	if (rc)
+		goto mipi_dsi_probe_err;
+
+	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 103300000))
+		dsi_pclk_rate = 35000000;
+	mipi->dsi_pclk_rate = dsi_pclk_rate;
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto mipi_dsi_probe_err;
+
+	pdev_list[pdev_list_cnt++] = pdev;
+
+return 0;
+
+mipi_dsi_probe_err:
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int mipi_dsi_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+	iounmap(mipi_dsi_base);
+	return 0;
+}
+
+static int mipi_dsi_register_driver(void)
+{
+	return platform_driver_register(&mipi_dsi_driver);
+}
+
+static int __init mipi_dsi_driver_init(void)
+{
+	int ret;
+
+	mipi_dsi_init();
+
+	ret = mipi_dsi_register_driver();
+
+	device_initialize(&dsi_dev);
+
+	if (ret) {
+		pr_err("mipi_dsi_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+module_init(mipi_dsi_driver_init);
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
new file mode 100644
index 0000000..2ed0596
--- /dev/null
+++ b/drivers/video/msm/mipi_dsi.h
@@ -0,0 +1,285 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MIPI_DSI_H
+#define MIPI_DSI_H
+
+#include <mach/scm-io.h>
+#include <linux/list.h>
+
+#ifdef BIT
+#undef BIT
+#endif
+
+#define BIT(x)  (1<<(x))
+
+#define MMSS_CC_BASE_PHY 0x04000000	/* mmss clcok control */
+#define MMSS_SFPB_BASE_PHY 0x05700000	/* mmss SFPB CFG */
+#define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
+
+#define MIPI_DSI_BASE mipi_dsi_base
+
+#define MIPI_OUTP(addr, data) writel((data), (addr))
+#define MIPI_INP(addr) readl(addr)
+
+#ifdef CONFIG_MSM_SECURE_IO
+#define MIPI_OUTP_SECURE(addr, data) secure_writel((data), (addr))
+#define MIPI_INP_SECURE(addr) secure_readl(addr)
+#else
+#define MIPI_OUTP_SECURE(addr, data) writel((data), (addr))
+#define MIPI_INP_SECURE(addr) readl(addr)
+#endif
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA	0
+#define MIPI_DSI_PANEL_WVGA	1
+#define MIPI_DSI_PANEL_WVGA_PT	2
+#define MIPI_DSI_PANEL_FWVGA_PT	3
+#define DSI_PANEL_MAX	3
+
+enum {		/* mipi dsi panel */
+	DSI_VIDEO_MODE,
+	DSI_CMD_MODE,
+};
+
+enum {
+	ST_DSI_CLK_OFF,
+	ST_DSI_SUSPEND,
+	ST_DSI_RESUME,
+	ST_DSI_PLAYING,
+	ST_DSI_NUM
+};
+
+enum {
+	EV_DSI_UPDATE,
+	EV_DSI_DONE,
+	EV_DSI_TOUT,
+	EV_DSI_NUM
+};
+
+enum {
+	LANDSCAPE = 1,
+	PORTRAIT = 2,
+};
+
+#define DSI_NON_BURST_SYNCH_PULSE	0
+#define DSI_NON_BURST_SYNCH_EVENT	1
+#define DSI_BURST_MODE			2
+
+
+#define DSI_RGB_SWAP_RGB	0
+#define DSI_RGB_SWAP_RBG	1
+#define DSI_RGB_SWAP_BGR	2
+#define DSI_RGB_SWAP_BRG	3
+#define DSI_RGB_SWAP_GRB	4
+#define DSI_RGB_SWAP_GBR	5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565		0
+#define DSI_VIDEO_DST_FORMAT_RGB666		1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE	2
+#define DSI_VIDEO_DST_FORMAT_RGB888		3
+
+#define DSI_CMD_DST_FORMAT_RGB111	0
+#define DSI_CMD_DST_FORMAT_RGB332	3
+#define DSI_CMD_DST_FORMAT_RGB444	4
+#define DSI_CMD_DST_FORMAT_RGB565	6
+#define DSI_CMD_DST_FORMAT_RGB666	7
+#define DSI_CMD_DST_FORMAT_RGB888	8
+
+#define DSI_INTR_ERROR_MASK		BIT(25)
+#define DSI_INTR_ERROR			BIT(24)
+#define DSI_INTR_VIDEO_DONE_MASK	BIT(17)
+#define DSI_INTR_VIDEO_DONE		BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK	BIT(9)
+#define DSI_INTR_CMD_MDP_DONE		BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK	BIT(1)
+#define DSI_INTR_CMD_DMA_DONE		BIT(0)
+
+#define DSI_CMD_TRIGGER_NONE		0x0	/* mdp trigger */
+#define DSI_CMD_TRIGGER_TE		0x02
+#define DSI_CMD_TRIGGER_SW		0x04
+#define DSI_CMD_TRIGGER_SW_SEOF		0x05	/* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE		0x06
+
+extern struct device dsi_dev;
+extern int mipi_dsi_clk_on;
+extern u32 dsi_irq;
+
+extern void  __iomem *periph_base;
+extern char *mmss_cc_base;	/* mutimedia sub system clock control */
+extern char *mmss_sfpb_base;	/* mutimedia sub system sfpb */
+
+struct dsiphy_pll_divider_config {
+	u32 clk_rate;
+	u32 fb_divider;
+	u32 ref_divider_ratio;
+	u32 bit_clk_divider;	/* oCLK1 */
+	u32 byte_clk_divider;	/* oCLK2 */
+	u32 dsi_clk_divider;	/* oCLK3 */
+};
+
+extern struct dsiphy_pll_divider_config pll_divider_config;
+
+struct dsi_clk_mnd_table {
+	uint8 lanes;
+	uint8 bpp;
+	uint8 dsiclk_div;
+	uint8 dsiclk_m;
+	uint8 dsiclk_n;
+	uint8 dsiclk_d;
+	uint8 pclk_m;
+	uint8 pclk_n;
+	uint8 pclk_d;
+};
+
+static const struct dsi_clk_mnd_table mnd_table[] = {
+	{ 1, 2, 8, 1, 1, 0, 1,  2, 1},
+	{ 1, 3, 8, 1, 1, 0, 1,  3, 2},
+	{ 2, 2, 4, 1, 1, 0, 1,  2, 1},
+	{ 2, 3, 4, 1, 1, 0, 1,  3, 2},
+	{ 3, 2, 1, 3, 8, 4, 3, 16, 8},
+	{ 3, 3, 1, 3, 8, 4, 1,  8, 4},
+	{ 4, 2, 2, 1, 1, 0, 1,  2, 1},
+	{ 4, 3, 2, 1, 1, 0, 1,  3, 2},
+};
+
+struct dsi_clk_desc {
+	uint32 src;
+	uint32 m;
+	uint32 n;
+	uint32 d;
+	uint32 mnd_mode;
+	uint32 pre_div_func;
+};
+
+#define DSI_HOST_HDR_SIZE	4
+#define DSI_HDR_LAST		BIT(31)
+#define DSI_HDR_LONG_PKT	BIT(30)
+#define DSI_HDR_BTA		BIT(29)
+#define DSI_HDR_VC(vc)		(((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype)	(((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data)	(((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data)	((data) & 0x0ff)
+#define DSI_HDR_WC(wc)		((wc) & 0x0ffff)
+
+#define DSI_BUF_SIZE	1024
+#define MIPI_DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+#define MIPI_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align  */
+
+struct dsi_buf {
+	uint32 *hdr;	/* dsi host header */
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* buffer */
+	int len;	/* data length */
+	dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE		0x05	/* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1	0x15	/* short write, 1 parameter */
+#define DTYPE_DCS_READ		0x06	/* read */
+#define DTYPE_DCS_LWRITE	0x39	/* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE		0x03	/* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1	0x13	/* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2	0x23	/* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE	0x29	/* long write */
+#define DTYPE_GEN_READ		0x04	/* long read, 0 parameter */
+#define DTYPE_GEN_READ1		0x14	/* long read, 1 parameter */
+#define DTYPE_GEN_READ2		0x24	/* long read, 2 parameter */
+
+#define DTYPE_TEAR_ON		0x35	/* set tear on */
+#define DTYPE_MAX_PKTSIZE	0x37	/* set max packet size */
+#define DTYPE_NULL_PKT		0x09	/* null packet, no data */
+#define DTYPE_BLANK_PKT		0x19	/* blankiing packet, no data */
+
+#define DTYPE_CM_ON		0x02	/* color mode off */
+#define DTYPE_CM_OFF		0x12	/* color mode on */
+#define DTYPE_PERIPHERAL_OFF	0x22
+#define DTYPE_PERIPHERAL_ON	0x32
+
+
+struct dsi_cmd_desc {
+	int dtype;
+	int last;
+	int vc;
+	int ack;	/* ask ACK from peripheral */
+	int wait;
+	int dlen;
+	char *payload;
+};
+
+
+typedef void (*kickoff_act)(void *);
+
+struct dsi_kickoff_action {
+	struct list_head act_entry;
+	kickoff_act	action;
+	void *data;
+};
+
+
+char *mipi_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
+char *mipi_dsi_buf_init(struct dsi_buf *dp);
+void mipi_dsi_init(void);
+int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
+int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
+		struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+
+int mipi_dsi_cmd_dma_tx(struct dsi_buf *dp);
+int mipi_dsi_cmd_reg_tx(uint32 data);
+int mipi_dsi_cmds_rx(struct msm_fb_data_type *mfd,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int len);
+int mipi_dsi_cmd_dma_rx(struct dsi_buf *tp, int rlen);
+void mipi_dsi_host_init(struct mipi_panel_info *pinfo);
+void mipi_dsi_op_mode_config(int mode);
+void mipi_dsi_cmd_mode_ctrl(int enable);
+void mdp4_dsi_cmd_trigger(void);
+void mipi_dsi_cmd_mdp_sw_trigger(void);
+void mipi_dsi_cmd_bta_sw_trigger(void);
+void mipi_dsi_ack_err_status(void);
+void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd);
+void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd);
+void mipi_dsi_clk_enable(void);
+void mipi_dsi_clk_disable(void);
+void mipi_dsi_pre_kickoff_action(void);
+void mipi_dsi_post_kickoff_action(void);
+void mipi_dsi_pre_kickoff_add(struct dsi_kickoff_action *act);
+void mipi_dsi_post_kickoff_add(struct dsi_kickoff_action *act);
+void mipi_dsi_pre_kickoff_del(struct dsi_kickoff_action *act);
+void mipi_dsi_post_kickoff_del(struct dsi_kickoff_action *act);
+
+irqreturn_t mipi_dsi_isr(int irq, void *ptr);
+
+void mipi_set_tx_power_mode(int mode);
+void mipi_dsi_phy_ctrl(int on);
+void mipi_dsi_phy_init(int panel_ndx, struct msm_panel_info const *panel_info,
+	int target_type);
+int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
+			    uint32 *expected_dsi_pclk);
+void mipi_dsi_clk_init(struct device *dev);
+void mipi_dsi_clk_deinit(struct device *dev);
+
+#ifdef CONFIG_FB_MSM_MDP303
+void update_lane_config(struct msm_panel_info *pinfo);
+#endif
+
+#endif /* MIPI_DSI_H */
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
new file mode 100644
index 0000000..6607e4c
--- /dev/null
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -0,0 +1,1248 @@
+
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/clk.h>
+#include <mach/dma.h>
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mdp.h"
+#include "mdp4.h"
+
+static struct completion dsi_dma_comp;
+static struct dsi_buf dsi_tx_buf;
+static int dsi_irq_enabled;
+static spinlock_t dsi_lock;
+
+static struct list_head pre_kickoff_list;
+static struct list_head post_kickoff_list;
+
+void mipi_dsi_init(void)
+{
+	init_completion(&dsi_dma_comp);
+	mipi_dsi_buf_alloc(&dsi_tx_buf, DSI_BUF_SIZE);
+	spin_lock_init(&dsi_lock);
+
+	INIT_LIST_HEAD(&pre_kickoff_list);
+	INIT_LIST_HEAD(&post_kickoff_list);
+}
+
+void mipi_dsi_enable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_lock, flags);
+	if (dsi_irq_enabled) {
+		pr_debug("%s: IRQ aleady enabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_lock, flags);
+		return;
+	}
+	dsi_irq_enabled = 1;
+	enable_irq(dsi_irq);
+	spin_unlock_irqrestore(&dsi_lock, flags);
+}
+
+void mipi_dsi_disable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_lock, flags);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ already disabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_lock, flags);
+		return;
+	}
+
+	dsi_irq_enabled = 0;
+	disable_irq(dsi_irq);
+	spin_unlock_irqrestore(&dsi_lock, flags);
+}
+
+/*
+ * mipi_dsi_disale_irq_nosync() should be called
+ * from interrupt context
+ */
+ void mipi_dsi_disable_irq_nosync(void)
+{
+	spin_lock(&dsi_lock);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ cannot be disabled\n", __func__);
+		return;
+	}
+
+	dsi_irq_enabled = 0;
+	disable_irq_nosync(dsi_irq);
+	spin_unlock(&dsi_lock);
+}
+
+static void mipi_dsi_action(struct list_head *act_list)
+{
+	struct list_head *lp;
+	struct dsi_kickoff_action *act;
+
+	list_for_each(lp, act_list) {
+		act = list_entry(lp, struct dsi_kickoff_action, act_entry);
+		if (act && act->action)
+			act->action(act->data);
+	}
+}
+
+void mipi_dsi_pre_kickoff_action(void)
+{
+	mipi_dsi_action(&pre_kickoff_list);
+}
+
+void mipi_dsi_post_kickoff_action(void)
+{
+	mipi_dsi_action(&post_kickoff_list);
+}
+
+/*
+ * mipi_dsi_pre_kickoff_add:
+ * ov_mutex need to be acquired before call this function.
+ */
+void mipi_dsi_pre_kickoff_add(struct dsi_kickoff_action *act)
+{
+	if (act)
+		list_add_tail(&act->act_entry, &pre_kickoff_list);
+}
+
+/*
+ * mipi_dsi_pre_kickoff_add:
+ * ov_mutex need to be acquired before call this function.
+ */
+void mipi_dsi_post_kickoff_add(struct dsi_kickoff_action *act)
+{
+	if (act)
+		list_add_tail(&act->act_entry, &post_kickoff_list);
+}
+
+/*
+ * mipi_dsi_pre_kickoff_add:
+ * ov_mutex need to be acquired before call this function.
+ */
+void mipi_dsi_pre_kickoff_del(struct dsi_kickoff_action *act)
+{
+	if (!list_empty(&pre_kickoff_list) && act)
+		list_del(&act->act_entry);
+}
+
+/*
+ * mipi_dsi_pre_kickoff_add:
+ * ov_mutex need to be acquired before call this function.
+ */
+void mipi_dsi_post_kickoff_del(struct dsi_kickoff_action *act)
+{
+	if (!list_empty(&post_kickoff_list) && act)
+		list_del(&act->act_entry);
+}
+
+/*
+ * mipi dsi buf mechanism
+ */
+char *mipi_dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+	dp->data += len;
+	return dp->data;
+}
+
+char *mipi_dsi_buf_unreserve(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	return dp->data;
+}
+
+char *mipi_dsi_buf_push(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	dp->len += len;
+	return dp->data;
+}
+
+char *mipi_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+	dp->hdr = (uint32 *)dp->data;
+	return mipi_dsi_buf_reserve(dp, hlen);
+}
+
+char *mipi_dsi_buf_init(struct dsi_buf *dp)
+{
+	int off;
+
+	dp->data = dp->start;
+	off = (int)dp->data;
+	/* 8 byte align */
+	off &= 0x07;
+	if (off)
+		off = 8 - off;
+	dp->data += off;
+	dp->len = 0;
+	return dp->data;
+}
+
+int mipi_dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+
+	dp->start = kmalloc(size, GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int)dp->start & 0x07)
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+
+	dp->data = dp->start;
+	dp->len = 0;
+	return size;
+}
+
+/*
+ * mipi dsi gerneric long write
+ */
+static int mipi_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	uint32 *hp;
+	int i, len;
+
+	bp = mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/* fill up payload */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi gerneric short write with 0, 1 2 parameters
+ */
+static int mipi_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int mipi_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int mipi_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	uint32 *hp;
+	int i, len;
+
+	bp = mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int mipi_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+	int len;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 1) ? 1 : cm->dlen;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int mipi_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	if (cm->dlen < 2 || cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs comamnd byte */
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);	/* parameter */
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+
+static int mipi_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mipi_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	uint32 *hp;
+
+	mipi_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mipi_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	int len = 0;
+
+	switch (cm->dtype) {
+	case DTYPE_GEN_WRITE:
+	case DTYPE_GEN_WRITE1:
+	case DTYPE_GEN_WRITE2:
+		len = mipi_dsi_generic_swrite(dp, cm);
+		break;
+	case DTYPE_GEN_LWRITE:
+		len = mipi_dsi_generic_lwrite(dp, cm);
+		break;
+	case DTYPE_GEN_READ:
+	case DTYPE_GEN_READ1:
+	case DTYPE_GEN_READ2:
+		len = mipi_dsi_generic_read(dp, cm);
+		break;
+	case DTYPE_DCS_LWRITE:
+		len = mipi_dsi_dcs_lwrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE:
+		len = mipi_dsi_dcs_swrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE1:
+		len = mipi_dsi_dcs_swrite1(dp, cm);
+		break;
+	case DTYPE_DCS_READ:
+		len = mipi_dsi_dcs_read(dp, cm);
+		break;
+	case DTYPE_MAX_PKTSIZE:
+		len = mipi_dsi_set_max_pktsize(dp, cm);
+		break;
+	case DTYPE_NULL_PKT:
+		len = mipi_dsi_null_pkt(dp, cm);
+		break;
+	case DTYPE_BLANK_PKT:
+		len = mipi_dsi_blank_pkt(dp, cm);
+		break;
+	case DTYPE_CM_ON:
+		len = mipi_dsi_cm_on(dp, cm);
+		break;
+	case DTYPE_CM_OFF:
+		len = mipi_dsi_cm_off(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_ON:
+		len = mipi_dsi_peripheral_on(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_OFF:
+		len = mipi_dsi_peripheral_off(dp, cm);
+		break;
+	default:
+		pr_debug("%s: dtype=%x NOT supported\n",
+					__func__, cm->dtype);
+		break;
+
+	}
+
+	return len;
+}
+
+void mipi_dsi_host_init(struct mipi_panel_info *pinfo)
+{
+	uint32 dsi_ctrl, intr_ctrl;
+	uint32 data;
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x000c, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x001c, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x003c, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0040, data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2);	/* clock enable & cmd mode */
+	intr_ctrl = 0;
+	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+	/* from frame buffer, low power mode */
+	/* DSI_COMMAND_MODE_DMA_CTRL */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0080, data); /* DSI_TRIG_CTRL */
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, pinfo->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	MIPI_OUTP(MIPI_DSI_BASE + 0xc0, data);	/* DSI_CLKOUT_TIMING_CTRL */
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x00c8, data); /* DSI_EOT_PACKET_CTRL */
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0108, 0x13ff3fe0); /* DSI_ERR_INT_MASK0 */
+
+	intr_ctrl |= DSI_INTR_ERROR_MASK;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x010c, intr_ctrl); /* DSI_INTL_CTRL */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	if (mdp_rev >= MDP_REV_41)
+		MIPI_OUTP(MIPI_DSI_BASE + 0x118, 0x23f); /* DSI_CLK_CTRL */
+	else
+		MIPI_OUTP(MIPI_DSI_BASE + 0x118, 0x33f); /* DSI_CLK_CTRL */
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl);
+
+	wmb();
+}
+
+void mipi_set_tx_power_mode(int mode)
+{
+	uint32 data = MIPI_INP(MIPI_DSI_BASE + 0x38);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x38, data);
+}
+
+void mipi_dsi_op_mode_config(int mode)
+{
+
+	uint32 dsi_ctrl, intr_ctrl;
+
+	dsi_ctrl = MIPI_INP(MIPI_DSI_BASE + 0x0000);
+	dsi_ctrl &= ~0x07;
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= 0x03;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
+	} else {		/* command mode */
+		dsi_ctrl |= 0x05;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+				DSI_INTR_CMD_MDP_DONE_MASK;
+	}
+
+	pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x010c, intr_ctrl); /* DSI_INTL_CTRL */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl);
+	wmb();
+}
+
+void mipi_dsi_cmd_mdp_sw_trigger(void)
+{
+	mipi_dsi_pre_kickoff_action();
+	mipi_dsi_enable_irq();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x090, 0x01);	/* trigger */
+	wmb();
+}
+
+
+void mipi_dsi_cmd_bta_sw_trigger(void)
+{
+	uint32 data;
+	int cnt = 0;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x094, 0x01);	/* trigger */
+	wmb();
+
+	while (cnt < 10000) {
+		data = MIPI_INP(MIPI_DSI_BASE + 0x0004);/* DSI_STATUS */
+		if ((data & 0x0010) == 0)
+			break;
+		cnt++;
+	}
+
+	mipi_dsi_ack_err_status();
+
+	pr_debug("%s: BTA done, cnt=%d\n", __func__, cnt);
+}
+
+static char set_tear_on[2] = {0x35, 0x00};
+static struct dsi_cmd_desc dsi_tear_on_cmd = {
+	DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_tear_on), set_tear_on};
+
+static char set_tear_off[2] = {0x34, 0x00};
+static struct dsi_cmd_desc dsi_tear_off_cmd = {
+	DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(set_tear_off), set_tear_off};
+
+void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd)
+{
+	mipi_dsi_buf_init(&dsi_tx_buf);
+	mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_on_cmd, 1);
+}
+
+void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd)
+{
+	mipi_dsi_buf_init(&dsi_tx_buf);
+	mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_off_cmd, 1);
+}
+
+int mipi_dsi_cmd_reg_tx(uint32 data)
+{
+#ifdef DSI_HOST_DEBUG
+	int i;
+	char *bp;
+
+	bp = (char *)&data;
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < 4; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+#endif
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0080, 0x04);/* sw trigger */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0, 0x135);
+
+	wmb();
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x038, data);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01);	/* trigger */
+	wmb();
+
+	udelay(300);
+
+	return 4;
+}
+
+/*
+ * mipi_dsi_cmds_tx:
+ * ov_mutex need to be acquired before call this function.
+ */
+int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
+		struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+{
+	struct dsi_cmd_desc *cm;
+	uint32 dsi_ctrl, ctrl;
+	int i, video_mode;
+
+	/* turn on cmd mode
+	* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	dsi_ctrl = MIPI_INP(MIPI_DSI_BASE + 0x0000);
+	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+	if (video_mode) {
+		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0000, ctrl);
+	} else { /* cmd mode */
+		/*
+		 * during boot up, cmd mode is configured
+		 * even it is video mode panel.
+		 */
+		/* make sure mdp dma is not txing pixel data */
+		if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+#ifndef CONFIG_FB_MSM_MDP303
+			mdp4_dsi_cmd_dma_busy_wait(mfd);
+#else
+			mdp3_dsi_cmd_dma_busy_wait(mfd);
+#endif
+		}
+	}
+
+	mipi_dsi_enable_irq();
+	cm = cmds;
+	mipi_dsi_buf_init(tp);
+	for (i = 0; i < cnt; i++) {
+		mipi_dsi_buf_init(tp);
+		mipi_dsi_cmd_dma_add(tp, cm);
+		mipi_dsi_cmd_dma_tx(tp);
+		if (cm->wait)
+			msleep(cm->wait);
+		cm++;
+	}
+	mipi_dsi_disable_irq();
+
+	if (video_mode)
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl); /* restore */
+
+	return cnt;
+}
+
+/* MIPI_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd[] = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0,
+		sizeof(max_pktsize), max_pktsize}
+};
+
+/*
+ * DSI panel reply with  MAX_RETURN_PACKET_SIZE bytes of data
+ * plus DCS header, ECC and CRC for DCS long read response
+ * mipi_dsi_controller only have 4x32 bits register ( 16 bytes) to
+ * hold data per transaction.
+ * MIPI_DSI_LEN equal to 8
+ * len should be either 4 or 8
+ * any return data more than MIPI_DSI_LEN need to be break down
+ * to multiple transactions.
+ *
+ * ov_mutex need to be acquired before call this function.
+ */
+int mipi_dsi_cmds_rx(struct msm_fb_data_type *mfd,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int len)
+{
+	int cnt, res;
+	static int pkt_size;
+
+	if (len <= 2)
+		cnt = 4;	/* short read */
+	else {
+		if (len > MIPI_DSI_LEN)
+			len = MIPI_DSI_LEN;	/* 8 bytes at most */
+
+		res = len & 0x03;
+		len += (4 - res); /* 4 bytes align */
+		/*
+		 * add extra 2 bytes to len to have overall
+		 * packet size is multipe by 4. This also make
+		 * sure 4 bytes dcs headerlocates within a
+		 * 32 bits register after shift in.
+		 * after all, len should be either 6 or 10.
+		 */
+		len += 2;
+		cnt = len + 6; /* 4 bytes header + 2 bytes crc */
+	}
+
+
+	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
+		/* make sure mdp dma is not txing pixel data */
+#ifndef CONFIG_FB_MSM_MDP303
+			mdp4_dsi_cmd_dma_busy_wait(mfd);
+#else
+			mdp3_dsi_cmd_dma_busy_wait(mfd);
+#endif
+	}
+
+	mipi_dsi_enable_irq();
+	if (pkt_size != len) {
+		/* set new max pkt size */
+		pkt_size = len;
+		max_pktsize[0] = pkt_size;
+		mipi_dsi_buf_init(tp);
+		mipi_dsi_cmd_dma_add(tp, pkt_size_cmd);
+		mipi_dsi_cmd_dma_tx(tp);
+	}
+
+	mipi_dsi_buf_init(tp);
+	mipi_dsi_cmd_dma_add(tp, cmds);
+
+	/* transmit read comamnd to client */
+	mipi_dsi_cmd_dma_tx(tp);
+	/*
+	 * once cmd_dma_done interrupt received,
+	 * return data from client is ready and stored
+	 * at RDBK_DATA register already
+	 */
+	mipi_dsi_cmd_dma_rx(rp, cnt);
+
+	mipi_dsi_disable_irq();
+
+	/* strip off dcs header & crc */
+	if (cnt > 4) { /* long response */
+		rp->data += 4; /* skip dcs header */
+		rp->len -= 6; /* deduct 4 bytes header + 2 bytes crc */
+		rp->len -= 2; /* extra 2 bytes added */
+	} else {
+		rp->data += 1; /* skip dcs short header */
+		rp->len -= 2; /* deduct 1 byte header + 1 byte ecc */
+	}
+
+	return rp->len;
+}
+
+int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
+{
+	int len;
+
+#ifdef DSI_HOST_DEBUG
+	int i;
+	char *bp;
+
+	bp = tp->data;
+
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < tp->len; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+#endif
+
+	len = tp->len;
+	len += 3;
+	len &= ~0x03;	/* multipled by 4 */
+
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_dev, tp->dmap))
+		pr_err("%s: dmap mapp failed\n", __func__);
+
+	INIT_COMPLETION(dsi_dma_comp);
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x048, len);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01);	/* trigger */
+	wmb();
+
+	wait_for_completion(&dsi_dma_comp);
+
+	dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+	tp->dmap = 0;
+	return tp->len;
+}
+
+int mipi_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
+{
+	uint32 *lp, data;
+	int i, off, cnt;
+
+	lp = (uint32 *)rp->data;
+	cnt = rlen;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	off = 0x068;	/* DSI_RDBK_DATA0 */
+	off += ((cnt - 1) * 4);
+
+
+	for (i = 0; i < cnt; i++) {
+		data = (uint32)MIPI_INP(MIPI_DSI_BASE + off);
+		*lp++ = ntohl(data);	/* to network byte order */
+		off -= 4;
+		rp->len += sizeof(*lp);
+	}
+
+	return rlen;
+}
+
+void mipi_dsi_irq_set(uint32 mask, uint32 irq)
+{
+	uint32 data;
+
+	data = MIPI_INP(MIPI_DSI_BASE + 0x010c);/* DSI_INTR_CTRL */
+	data &= ~mask;
+	data |= irq;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x010c, data);
+}
+
+
+void mipi_dsi_ack_err_status(void)
+{
+	uint32 status;
+
+	status = MIPI_INP(MIPI_DSI_BASE + 0x0064);/* DSI_ACK_ERR_STATUS */
+
+	if (status) {
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0064, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mipi_dsi_timeout_status(void)
+{
+	uint32 status;
+
+	status = MIPI_INP(MIPI_DSI_BASE + 0x00bc);/* DSI_TIMEOUT_STATUS */
+	if (status & 0x0111) {
+		MIPI_OUTP(MIPI_DSI_BASE + 0x00bc, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mipi_dsi_dln0_phy_err(void)
+{
+	uint32 status;
+
+	status = MIPI_INP(MIPI_DSI_BASE + 0x00b0);/* DSI_DLN0_PHY_ERR */
+
+	if (status & 0x011111) {
+		MIPI_OUTP(MIPI_DSI_BASE + 0x00b0, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mipi_dsi_fifo_status(void)
+{
+	uint32 status;
+
+	status = MIPI_INP(MIPI_DSI_BASE + 0x0008);/* DSI_FIFO_STATUS */
+
+	if (status & 0x44444489) {
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0008, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mipi_dsi_status(void)
+{
+	uint32 status;
+
+	status = MIPI_INP(MIPI_DSI_BASE + 0x0004);/* DSI_STATUS */
+
+	if (status & 0x80000000) {
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0004, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mipi_dsi_error(void)
+{
+	/* DSI_ERR_INT_MASK0 */
+	mipi_dsi_ack_err_status();	/* mask0, 0x01f */
+	mipi_dsi_timeout_status();	/* mask0, 0x0e0 */
+	mipi_dsi_fifo_status();		/* mask0, 0x133d00 */
+	mipi_dsi_status();		/* mask0, 0xc0100 */
+	mipi_dsi_dln0_phy_err();	/* mask0, 0x3e00000 */
+}
+
+
+irqreturn_t mipi_dsi_isr(int irq, void *ptr)
+{
+	uint32 isr;
+
+	isr = MIPI_INP(MIPI_DSI_BASE + 0x010c);/* DSI_INTR_CTRL */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x010c, isr);
+
+#ifdef CONFIG_FB_MSM_MDP40
+	mdp4_stat.intr_dsi++;
+#endif
+
+	if (isr & DSI_INTR_ERROR) {
+		mipi_dsi_error();
+	}
+
+	if (isr & DSI_INTR_VIDEO_DONE) {
+		/*
+		* do something  here
+		*/
+	}
+
+	if (isr & DSI_INTR_CMD_DMA_DONE) {
+		complete(&dsi_dma_comp);
+	}
+
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		mipi_dsi_disable_irq_nosync();
+		mipi_dsi_post_kickoff_action();
+	}
+
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
new file mode 100644
index 0000000..27108f2
--- /dev/null
+++ b/drivers/video/msm/mipi_novatek.c
@@ -0,0 +1,463 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_novatek.h"
+#include "mdp4.h"
+
+
+static struct mipi_dsi_novatek_platform_data *mipi_novatek_pdata;
+
+static struct dsi_buf novatek_tx_buf;
+static struct dsi_buf novatek_rx_buf;
+
+
+/* novatek blue panel */
+
+#ifdef NOVETAK_COMMANDS_UNUSED
+static char display_config_cmd_mode1[] = {
+	/* TYPE_DCS_LWRITE */
+	0x2A, 0x00, 0x00, 0x01,
+	0x3F, 0xFF, 0xFF, 0xFF
+};
+
+static char display_config_cmd_mode2[] = {
+	/* DTYPE_DCS_LWRITE */
+	0x2B, 0x00, 0x00, 0x01,
+	0xDF, 0xFF, 0xFF, 0xFF
+};
+
+static char display_config_cmd_mode3_666[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0x3A, 0x66, 0x15, 0x80 /* 666 Packed (18-bits) */
+};
+
+static char display_config_cmd_mode3_565[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0x3A, 0x55, 0x15, 0x80 /* 565 mode */
+};
+
+static char display_config_321[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0x66, 0x2e, 0x15, 0x00 /* Reg 0x66 : 2E */
+};
+
+static char display_config_323[] = {
+	/* DTYPE_DCS_WRITE */
+	0x13, 0x00, 0x05, 0x00 /* Reg 0x13 < Set for Normal Mode> */
+};
+
+static char display_config_2lan[] = {
+	/* DTYPE_DCS_WRITE */
+	0x61, 0x01, 0x02, 0xff /* Reg 0x61 : 01,02 < Set for 2 Data Lane > */
+};
+
+static char display_config_exit_sleep[] = {
+	/* DTYPE_DCS_WRITE */
+	0x11, 0x00, 0x05, 0x80 /* Reg 0x11 < exit sleep mode> */
+};
+
+static char display_config_TE_ON[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0x35, 0x00, 0x15, 0x80
+};
+
+static char display_config_39H[] = {
+	/* DTYPE_DCS_WRITE */
+	0x39, 0x00, 0x05, 0x80
+};
+
+static char display_config_set_tear_scanline[] = {
+	/* DTYPE_DCS_LWRITE */
+	0x44, 0x00, 0x00, 0xff
+};
+
+static char display_config_set_twolane[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0xae, 0x03, 0x15, 0x80
+};
+
+static char display_config_set_threelane[] = {
+	/* DTYPE_DCS_WRITE1 */
+	0xae, 0x05, 0x15, 0x80
+};
+
+#else
+
+static char sw_reset[2] = {0x01, 0x00}; /* DTYPE_DCS_WRITE */
+static char enter_sleep[2] = {0x10, 0x00}; /* DTYPE_DCS_WRITE */
+static char exit_sleep[2] = {0x11, 0x00}; /* DTYPE_DCS_WRITE */
+static char display_off[2] = {0x28, 0x00}; /* DTYPE_DCS_WRITE */
+static char display_on[2] = {0x29, 0x00}; /* DTYPE_DCS_WRITE */
+
+
+
+static char rgb_888[2] = {0x3A, 0x77}; /* DTYPE_DCS_WRITE1 */
+
+#if defined(NOVATEK_TWO_LANE)
+static char set_num_of_lanes[2] = {0xae, 0x03}; /* DTYPE_DCS_WRITE1 */
+#else  /* 1 lane */
+static char set_num_of_lanes[2] = {0xae, 0x01}; /* DTYPE_DCS_WRITE1 */
+#endif
+/* commands by Novatke */
+static char novatek_f4[2] = {0xf4, 0x55}; /* DTYPE_DCS_WRITE1 */
+static char novatek_8c[16] = { /* DTYPE_DCS_LWRITE */
+	0x8C, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x00, 0x30, 0xC0, 0xB7, 0x37};
+static char novatek_ff[2] = {0xff, 0x55 }; /* DTYPE_DCS_WRITE1 */
+
+static char set_width[5] = { /* DTYPE_DCS_LWRITE */
+	0x2A, 0x00, 0x00, 0x02, 0x1B}; /* 540 - 1 */
+static char set_height[5] = { /* DTYPE_DCS_LWRITE */
+	0x2B, 0x00, 0x00, 0x03, 0xBF}; /* 960 - 1 */
+#endif
+
+static char led_pwm1[2] = {0x51, 0x0};	/* DTYPE_DCS_WRITE1 */
+static char led_pwm2[2] = {0x53, 0x24}; /* DTYPE_DCS_WRITE1 */
+static char led_pwm3[2] = {0x55, 0x00}; /* DTYPE_DCS_WRITE1 */
+
+static struct dsi_cmd_desc novatek_cmd_backlight_cmds[] = {
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1},
+};
+
+static struct dsi_cmd_desc novatek_video_on_cmds[] = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50,
+		sizeof(sw_reset), sw_reset},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 10,
+		sizeof(exit_sleep), exit_sleep},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 10,
+		sizeof(display_on), display_on},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(set_num_of_lanes), set_num_of_lanes},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(rgb_888), rgb_888},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(led_pwm2), led_pwm2},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(led_pwm3), led_pwm3},
+};
+
+static struct dsi_cmd_desc novatek_cmd_on_cmds[] = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50,
+		sizeof(sw_reset), sw_reset},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 10,
+		sizeof(exit_sleep), exit_sleep},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 10,
+		sizeof(display_on), display_on},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 50,
+		sizeof(novatek_f4), novatek_f4},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 50,
+		sizeof(novatek_8c), novatek_8c},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 50,
+		sizeof(novatek_ff), novatek_ff},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(set_num_of_lanes), set_num_of_lanes},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 50,
+		sizeof(set_width), set_width},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 50,
+		sizeof(set_height), set_height},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 10,
+		sizeof(rgb_888), rgb_888},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 1,
+		sizeof(led_pwm2), led_pwm2},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 1,
+		sizeof(led_pwm3), led_pwm3},
+};
+
+static struct dsi_cmd_desc novatek_display_off_cmds[] = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, 10,
+		sizeof(display_off), display_off},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 120,
+		sizeof(enter_sleep), enter_sleep}
+};
+
+static char manufacture_id[2] = {0x04, 0x00}; /* DTYPE_DCS_READ */
+
+static struct dsi_cmd_desc novatek_manufacture_id_cmd = {
+	DTYPE_DCS_READ, 1, 0, 1, 5, sizeof(manufacture_id), manufacture_id};
+
+static uint32 mipi_novatek_manufacture_id(struct msm_fb_data_type *mfd)
+{
+	struct dsi_buf *rp, *tp;
+	struct dsi_cmd_desc *cmd;
+	uint32 *lp;
+
+	tp = &novatek_tx_buf;
+	rp = &novatek_rx_buf;
+	mipi_dsi_buf_init(rp);
+	mipi_dsi_buf_init(tp);
+
+	cmd = &novatek_manufacture_id_cmd;
+	mipi_dsi_cmds_rx(mfd, tp, rp, cmd, 3);
+	lp = (uint32 *)rp->data;
+	pr_info("%s: manufacture_id=%x", __func__, *lp);
+	return *lp;
+}
+
+static int fpga_addr;
+static bool support_3d;
+
+static void mipi_novatek_3d_init(int addr)
+{
+	fpga_addr = addr;
+}
+
+static void mipi_dsi_enable_3d_barrier(int mode)
+{
+	void __iomem *fpga_ptr;
+	uint32_t ptr_value = 0;
+
+	if (!fpga_addr && support_3d) {
+		pr_err("%s: fpga_addr not set. Failed to enable 3D barrier\n",
+					__func__);
+		return;
+	}
+
+	fpga_ptr = ioremap_nocache(fpga_addr, sizeof(uint32_t));
+	if (!fpga_ptr) {
+		pr_err("%s: FPGA ioremap failed. Failed to enable 3D barrier\n",
+					__func__);
+		return;
+	}
+
+	ptr_value = readl_relaxed(fpga_ptr);
+	if (mode == LANDSCAPE)
+		writel_relaxed(((0xFFFF0000 & ptr_value) | 1), fpga_ptr);
+	else if (mode == PORTRAIT)
+		writel_relaxed(((0xFFFF0000 & ptr_value) | 3), fpga_ptr);
+	else
+		writel_relaxed((0xFFFF0000 & ptr_value), fpga_ptr);
+
+	mb();
+	iounmap(fpga_ptr);
+}
+
+static int mipi_novatek_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct mipi_panel_info *mipi;
+	struct msm_panel_info *pinfo;
+
+	mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	pinfo = &mfd->panel_info;
+	if (pinfo->is_3d_panel)
+		support_3d = TRUE;
+
+	mipi  = &mfd->panel_info.mipi;
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_video_on_cmds,
+			ARRAY_SIZE(novatek_video_on_cmds));
+	} else {
+		mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_on_cmds,
+			ARRAY_SIZE(novatek_cmd_on_cmds));
+
+		mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
+
+		mipi_novatek_manufacture_id(mfd);
+	}
+
+	return 0;
+}
+
+static int mipi_novatek_lcd_off(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_display_off_cmds,
+			ARRAY_SIZE(novatek_display_off_cmds));
+
+	return 0;
+}
+
+
+
+static void mipi_novatek_set_backlight(struct msm_fb_data_type *mfd)
+{
+	struct mipi_panel_info *mipi;
+	static int bl_level_old;
+
+	mipi  = &mfd->panel_info.mipi;
+	if (bl_level_old == mfd->bl_level)
+		return;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+	/* mdp4_dsi_cmd_busy_wait: will turn on dsi clock also */
+	mdp4_dsi_cmd_dma_busy_wait(mfd);
+	mdp4_dsi_blt_dmap_busy_wait(mfd);
+
+	led_pwm1[1] = (unsigned char)(mfd->bl_level);
+	mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_backlight_cmds,
+			ARRAY_SIZE(novatek_cmd_backlight_cmds));
+	bl_level_old = mfd->bl_level;
+	mutex_unlock(&mfd->dma->ov_mutex);
+	return;
+}
+
+static int mipi_dsi_3d_barrier_sysfs_register(struct device *dev);
+static int barrier_mode;
+
+static int __devinit mipi_novatek_lcd_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mipi_novatek_pdata = pdev->dev.platform_data;
+
+	if (mipi_novatek_pdata && mipi_novatek_pdata->fpga_3d_config_addr)
+		mipi_novatek_3d_init(mipi_novatek_pdata->fpga_3d_config_addr);
+
+		/* create sysfs to control 3D barrier for the Sharp panel */
+		if (mipi_dsi_3d_barrier_sysfs_register(&pdev->dev)) {
+			pr_err("%s: Failed to register 3d Barrier sysfs\n",
+						__func__);
+			return -ENODEV;
+		}
+		barrier_mode = 0;
+
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mipi_novatek_lcd_probe,
+	.driver = {
+		.name   = "mipi_novatek",
+	},
+};
+
+static struct msm_fb_panel_data novatek_panel_data = {
+	.on		= mipi_novatek_lcd_on,
+	.off		= mipi_novatek_lcd_off,
+	.set_backlight = mipi_novatek_set_backlight,
+};
+
+static ssize_t mipi_dsi_3d_barrier_read(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf((char *)buf, sizeof(buf), "%u\n", barrier_mode);
+}
+
+static ssize_t mipi_dsi_3d_barrier_write(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t count)
+{
+	int ret = -1;
+	u32 data = 0;
+
+	if (sscanf((char *)buf, "%u", &data) != 1) {
+		dev_err(dev, "%s\n", __func__);
+		ret = -EINVAL;
+	} else {
+		barrier_mode = data;
+		if (data == 1)
+			mipi_dsi_enable_3d_barrier(LANDSCAPE);
+		else if (data == 2)
+			mipi_dsi_enable_3d_barrier(PORTRAIT);
+		else
+			mipi_dsi_enable_3d_barrier(0);
+	}
+
+	return count;
+}
+
+static struct device_attribute mipi_dsi_3d_barrier_attributes[] = {
+	__ATTR(enable_3d_barrier, 0666, mipi_dsi_3d_barrier_read,
+					 mipi_dsi_3d_barrier_write),
+};
+
+static int mipi_dsi_3d_barrier_sysfs_register(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mipi_dsi_3d_barrier_attributes); i++)
+		if (device_create_file(dev, mipi_dsi_3d_barrier_attributes + i))
+			goto error;
+
+	return 0;
+
+error:
+	for (; i >= 0 ; i--)
+		device_remove_file(dev, mipi_dsi_3d_barrier_attributes + i);
+	pr_err("%s: Unable to create interface\n", __func__);
+
+	return -ENODEV;
+}
+
+static int ch_used[3];
+
+int mipi_novatek_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	if ((channel >= 3) || ch_used[channel])
+		return -ENODEV;
+
+	ch_used[channel] = TRUE;
+
+	pdev = platform_device_alloc("mipi_novatek", (panel << 8)|channel);
+	if (!pdev)
+		return -ENOMEM;
+
+	novatek_panel_data.panel_info = *pinfo;
+
+	ret = platform_device_add_data(pdev, &novatek_panel_data,
+		sizeof(novatek_panel_data));
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init mipi_novatek_lcd_init(void)
+{
+	mipi_dsi_buf_alloc(&novatek_tx_buf, DSI_BUF_SIZE);
+	mipi_dsi_buf_alloc(&novatek_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+
+module_init(mipi_novatek_lcd_init);
diff --git a/drivers/video/msm/mipi_novatek.h b/drivers/video/msm/mipi_novatek.h
new file mode 100644
index 0000000..f84de9a
--- /dev/null
+++ b/drivers/video/msm/mipi_novatek.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MIPI_NOVATEK_BLUE_H
+#define MIPI_NOVATEK_BLUE_H
+
+#define NOVATEK_TWO_LANE
+
+int mipi_novatek_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel);
+
+#endif  /* MIPI_NOVATEK_BLUE_H */
diff --git a/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c b/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
new file mode 100644
index 0000000..b3bd6c8
--- /dev/null
+++ b/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_novatek.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_cmd_mode_phy_db = {
+/* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */
+		{0x03, 0x01, 0x01, 0x00},	/* regulator */
+		/* timing   */
+		{0xB4, 0x8D, 0x1D, 0x00, 0x20, 0x94, 0x20,
+		0x8F, 0x20, 0x03, 0x04},
+		{0x7f, 0x00, 0x00, 0x00},	/* phy ctrl */
+		{0xee, 0x02, 0x86, 0x00},	/* strength */
+		/* pll control */
+		{0x40, 0xf9, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63,
+#if defined(NOVATEK_TWO_LANE)
+		0x30, 0x07, 0x03,
+#else           /* default set to 1 lane */
+		0x30, 0x07, 0x07,
+#endif
+		0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0},
+};
+
+static int __init mipi_cmd_novatek_blue_qhd_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_cmd_novatek_qhd"))
+		return 0;
+#endif
+
+	pinfo.xres = 540;
+	pinfo.yres = 960;
+	pinfo.type = MIPI_CMD_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.lcdc.h_back_porch = 50;
+	pinfo.lcdc.h_front_porch = 50;
+	pinfo.lcdc.h_pulse_width = 20;
+	pinfo.lcdc.v_back_porch = 11;
+	pinfo.lcdc.v_front_porch = 10;
+	pinfo.lcdc.v_pulse_width = 5;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 255;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 454000000;
+	pinfo.is_3d_panel = FB_TYPE_3D_PANEL;
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.hw_vsync_mode = TRUE;
+	pinfo.lcd.refx100 = 6000; /* adjust refx100 to prevent tearing */
+
+	pinfo.mipi.mode = DSI_CMD_MODE;
+	pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+	pinfo.mipi.data_lane0 = TRUE;
+#if defined(NOVATEK_TWO_LANE)
+	pinfo.mipi.data_lane1 = TRUE;
+#endif
+	pinfo.mipi.t_clk_post = 0x22;
+	pinfo.mipi.t_clk_pre = 0x3f;
+	pinfo.mipi.stream = 0;	/* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */
+	pinfo.mipi.interleave_max = 1;
+	pinfo.mipi.insert_dcs_cmd = TRUE;
+	pinfo.mipi.wr_mem_continue = 0x3c;
+	pinfo.mipi.wr_mem_start = 0x2c;
+	pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db;
+
+	ret = mipi_novatek_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_WVGA_PT);
+	if (ret)
+		pr_err("%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_cmd_novatek_blue_qhd_pt_init);
diff --git a/drivers/video/msm/mipi_novatek_video_qhd_pt.c b/drivers/video/msm/mipi_novatek_video_qhd_pt.c
new file mode 100644
index 0000000..635b66e
--- /dev/null
+++ b/drivers/video/msm/mipi_novatek_video_qhd_pt.c
@@ -0,0 +1,98 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_novatek.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
+/* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */
+		{0x03, 0x01, 0x01, 0x00},	/* regulator */
+		/* timing   */
+		{0x82, 0x31, 0x13, 0x0, 0x42, 0x4D, 0x18,
+		0x35, 0x21, 0x03, 0x04},
+		{0x7f, 0x00, 0x00, 0x00},	/* phy ctrl */
+		{0xee, 0x02, 0x86, 0x00},	/* strength */
+		/* pll control */
+		{0x40, 0xf9, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63,
+#if defined(NOVATEK_TWO_LANE)
+		0x30, 0x07, 0x03,
+#else           /* default set to 1 lane */
+		0x30, 0x07, 0x07,
+#endif
+		0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0},
+};
+
+static int __init mipi_video_novatek_qhd_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_video_novatek_qhd"))
+		return 0;
+#endif
+
+	pinfo.xres = 540;
+	pinfo.yres = 960;
+	pinfo.type = MIPI_VIDEO_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.lcdc.h_back_porch = 80;
+	pinfo.lcdc.h_front_porch = 24;
+	pinfo.lcdc.h_pulse_width = 8;
+	pinfo.lcdc.v_back_porch = 16;
+	pinfo.lcdc.v_front_porch = 8;
+	pinfo.lcdc.v_pulse_width = 1;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 15;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+
+	pinfo.mipi.mode = DSI_VIDEO_MODE;
+	pinfo.mipi.pulse_mode_hsa_he = TRUE;
+	pinfo.mipi.hfp_power_stop = FALSE;
+	pinfo.mipi.hbp_power_stop = FALSE;
+	pinfo.mipi.hsa_power_stop = FALSE;
+	pinfo.mipi.eof_bllp_power_stop = TRUE;
+	pinfo.mipi.bllp_power_stop = TRUE;
+	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+	pinfo.mipi.data_lane0 = TRUE;
+#if defined(NOVATEK_TWO_LANE)
+	pinfo.mipi.data_lane1 = TRUE;
+#endif
+	pinfo.mipi.tx_eot_append = TRUE;
+	pinfo.mipi.t_clk_post = 0x04;
+	pinfo.mipi.t_clk_pre = 0x1c;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 60;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+
+	ret = mipi_novatek_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_WVGA_PT);
+	if (ret)
+		pr_err("%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_video_novatek_qhd_pt_init);
diff --git a/drivers/video/msm/mipi_renesas.c b/drivers/video/msm/mipi_renesas.c
new file mode 100644
index 0000000..652ca29
--- /dev/null
+++ b/drivers/video/msm/mipi_renesas.c
@@ -0,0 +1,1231 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_renesas.h"
+
+#define RENESAS_CMD_DELAY 0 /* 50 */
+#define RENESAS_SLEEP_OFF_DELAY 50
+static struct msm_panel_common_pdata *mipi_renesas_pdata;
+
+static struct dsi_buf renesas_tx_buf;
+static struct dsi_buf renesas_rx_buf;
+
+static char config_sleep_out[2] = {0x11, 0x00};
+static char config_CMD_MODE[2] = {0x40, 0x01};
+static char config_WRTXHT[7] = {0x92, 0x16, 0x08, 0x08, 0x00, 0x01, 0xe0};
+static char config_WRTXVT[7] = {0x8b, 0x02, 0x02, 0x02, 0x00, 0x03, 0x60};
+static char config_PLL2NR[2] = {0xa0, 0x24};
+static char config_PLL2NF1[2] = {0xa2, 0xd0};
+static char config_PLL2NF2[2] = {0xa4, 0x00};
+static char config_PLL2BWADJ1[2] = {0xa6, 0xd0};
+static char config_PLL2BWADJ2[2] = {0xa8, 0x00};
+static char config_PLL2CTL[2] = {0xaa, 0x00};
+static char config_DBICBR[2] = {0x48, 0x03};
+static char config_DBICTYPE[2] = {0x49, 0x00};
+static char config_DBICSET1[2] = {0x4a, 0x1c};
+static char config_DBICADD[2] = {0x4b, 0x00};
+static char config_DBICCTL[2] = {0x4e, 0x01};
+/* static char config_COLMOD_565[2] = {0x3a, 0x05}; */
+/* static char config_COLMOD_666PACK[2] = {0x3a, 0x06}; */
+static char config_COLMOD_888[2] = {0x3a, 0x07};
+static char config_MADCTL[2] = {0x36, 0x00};
+static char config_DBIOC[2] = {0x82, 0x40};
+static char config_CASET[7] = {0x2a, 0x00, 0x00, 0x00, 0x00, 0x01, 0xdf };
+static char config_PASET[7] = {0x2b, 0x00, 0x00, 0x00, 0x00, 0x03, 0x5f };
+static char config_TXON[2] = {0x81, 0x00};
+static char config_BLSET_TM[2] = {0xff, 0x6c};
+
+static char config_AGCPSCTL_TM[2] = {0x56, 0x08};
+
+static char config_DBICADD70[2] = {0x4b, 0x70};
+static char config_DBICSET_15[2] = {0x4a, 0x15};
+static char config_DBICADD72[2] = {0x4b, 0x72};
+
+static char config_Power_Ctrl_2a_cmd[3] = {0x4c, 0x40, 0x10};
+static char config_Auto_Sequencer_Setting_a_cmd[3] = {0x4c, 0x00, 0x00};
+static char Driver_Output_Ctrl_indx[3] = {0x4c, 0x00, 0x01};
+static char Driver_Output_Ctrl_cmd[3] = {0x4c, 0x03, 0x10};
+static char config_LCD_drive_AC_Ctrl_indx[3] = {0x4c, 0x00, 0x02};
+static char config_LCD_drive_AC_Ctrl_cmd[3] = {0x4c, 0x01, 0x00};
+static char config_Entry_Mode_indx[3] = {0x4c, 0x00, 0x03};
+static char config_Entry_Mode_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_Display_Ctrl_1_indx[3] = {0x4c, 0x00, 0x07};
+static char config_Display_Ctrl_1_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_Display_Ctrl_2_indx[3] = {0x4c, 0x00, 0x08};
+static char config_Display_Ctrl_2_cmd[3] = {0x4c, 0x00, 0x04};
+static char config_Display_Ctrl_3_indx[3] = {0x4c, 0x00, 0x09};
+static char config_Display_Ctrl_3_cmd[3] = {0x4c, 0x00, 0x0c};
+static char config_Display_IF_Ctrl_1_indx[3] = {0x4c, 0x00, 0x0c};
+static char config_Display_IF_Ctrl_1_cmd[3] = {0x4c, 0x40, 0x10};
+static char config_Display_IF_Ctrl_2_indx[3] = {0x4c, 0x00, 0x0e};
+static char config_Display_IF_Ctrl_2_cmd[3] = {0x4c, 0x00, 0x00};
+
+static char config_Panel_IF_Ctrl_1_indx[3] = {0x4c, 0x00, 0x20};
+static char config_Panel_IF_Ctrl_1_cmd[3] = {0x4c, 0x01, 0x3f};
+static char config_Panel_IF_Ctrl_3_indx[3] = {0x4c, 0x00, 0x22};
+static char config_Panel_IF_Ctrl_3_cmd[3] = {0x4c, 0x76, 0x00};
+static char config_Panel_IF_Ctrl_4_indx[3] = {0x4c, 0x00, 0x23};
+static char config_Panel_IF_Ctrl_4_cmd[3] = {0x4c, 0x1c, 0x0a};
+static char config_Panel_IF_Ctrl_5_indx[3] = {0x4c, 0x00, 0x24};
+static char config_Panel_IF_Ctrl_5_cmd[3] = {0x4c, 0x1c, 0x2c};
+static char config_Panel_IF_Ctrl_6_indx[3] = {0x4c, 0x00, 0x25};
+static char config_Panel_IF_Ctrl_6_cmd[3] = {0x4c, 0x1c, 0x4e};
+static char config_Panel_IF_Ctrl_8_indx[3] = {0x4c, 0x00, 0x27};
+static char config_Panel_IF_Ctrl_8_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_Panel_IF_Ctrl_9_indx[3] = {0x4c, 0x00, 0x28};
+static char config_Panel_IF_Ctrl_9_cmd[3] = {0x4c, 0x76, 0x0c};
+
+
+static char config_gam_adjust_00_indx[3] = {0x4c, 0x03, 0x00};
+static char config_gam_adjust_00_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_gam_adjust_01_indx[3] = {0x4c, 0x03, 0x01};
+static char config_gam_adjust_01_cmd[3] = {0x4c, 0x05, 0x02};
+static char config_gam_adjust_02_indx[3] = {0x4c, 0x03, 0x02};
+static char config_gam_adjust_02_cmd[3] = {0x4c, 0x07, 0x05};
+static char config_gam_adjust_03_indx[3] = {0x4c, 0x03, 0x03};
+static char config_gam_adjust_03_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_gam_adjust_04_indx[3] = {0x4c, 0x03, 0x04};
+static char config_gam_adjust_04_cmd[3] = {0x4c, 0x02, 0x00};
+static char config_gam_adjust_05_indx[3] = {0x4c, 0x03, 0x05};
+static char config_gam_adjust_05_cmd[3] = {0x4c, 0x07, 0x07};
+static char config_gam_adjust_06_indx[3] = {0x4c, 0x03, 0x06};
+static char config_gam_adjust_06_cmd[3] = {0x4c, 0x10, 0x10};
+static char config_gam_adjust_07_indx[3] = {0x4c, 0x03, 0x07};
+static char config_gam_adjust_07_cmd[3] = {0x4c, 0x02, 0x02};
+static char config_gam_adjust_08_indx[3] = {0x4c, 0x03, 0x08};
+static char config_gam_adjust_08_cmd[3] = {0x4c, 0x07, 0x04};
+static char config_gam_adjust_09_indx[3] = {0x4c, 0x03, 0x09};
+static char config_gam_adjust_09_cmd[3] = {0x4c, 0x07, 0x07};
+static char config_gam_adjust_0A_indx[3] = {0x4c, 0x03, 0x0a};
+static char config_gam_adjust_0A_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_gam_adjust_0B_indx[3] = {0x4c, 0x03, 0x0b};
+static char config_gam_adjust_0B_cmd[3] = {0x4c, 0x00, 0x00};
+static char config_gam_adjust_0C_indx[3] = {0x4c, 0x03, 0x0c};
+static char config_gam_adjust_0C_cmd[3] = {0x4c, 0x07, 0x07};
+static char config_gam_adjust_0D_indx[3] = {0x4c, 0x03, 0x0d};
+static char config_gam_adjust_0D_cmd[3] = {0x4c, 0x10, 0x10};
+static char config_gam_adjust_10_indx[3] = {0x4c, 0x03, 0x10};
+static char config_gam_adjust_10_cmd[3] = {0x4c, 0x01, 0x04};
+static char config_gam_adjust_11_indx[3] = {0x4c, 0x03, 0x11};
+static char config_gam_adjust_11_cmd[3] = {0x4c, 0x05, 0x03};
+static char config_gam_adjust_12_indx[3] = {0x4c, 0x03, 0x12};
+static char config_gam_adjust_12_cmd[3] = {0x4c, 0x03, 0x04};
+static char config_gam_adjust_15_indx[3] = {0x4c, 0x03, 0x15};
+static char config_gam_adjust_15_cmd[3] = {0x4c, 0x03, 0x04};
+static char config_gam_adjust_16_indx[3] = {0x4c, 0x03, 0x16};
+static char config_gam_adjust_16_cmd[3] = {0x4c, 0x03, 0x1c};
+static char config_gam_adjust_17_indx[3] = {0x4c, 0x03, 0x17};
+static char config_gam_adjust_17_cmd[3] = {0x4c, 0x02, 0x04};
+static char config_gam_adjust_18_indx[3] = {0x4c, 0x03, 0x18};
+static char config_gam_adjust_18_cmd[3] = {0x4c, 0x04, 0x02};
+static char config_gam_adjust_19_indx[3] = {0x4c, 0x03, 0x19};
+static char config_gam_adjust_19_cmd[3] = {0x4c, 0x03, 0x05};
+static char config_gam_adjust_1C_indx[3] = {0x4c, 0x03, 0x1c};
+static char config_gam_adjust_1C_cmd[3] = {0x4c, 0x07, 0x07};
+static char config_gam_adjust_1D_indx[3] = {0x4c, 0x03, 0x1D};
+static char config_gam_adjust_1D_cmd[3] = {0x4c, 0x02, 0x1f};
+static char config_gam_adjust_20_indx[3] = {0x4c, 0x03, 0x20};
+static char config_gam_adjust_20_cmd[3] = {0x4c, 0x05, 0x07};
+static char config_gam_adjust_21_indx[3] = {0x4c, 0x03, 0x21};
+static char config_gam_adjust_21_cmd[3] = {0x4c, 0x06, 0x04};
+static char config_gam_adjust_22_indx[3] = {0x4c, 0x03, 0x22};
+static char config_gam_adjust_22_cmd[3] = {0x4c, 0x04, 0x05};
+static char config_gam_adjust_27_indx[3] = {0x4c, 0x03, 0x27};
+static char config_gam_adjust_27_cmd[3] = {0x4c, 0x02, 0x03};
+static char config_gam_adjust_28_indx[3] = {0x4c, 0x03, 0x28};
+static char config_gam_adjust_28_cmd[3] = {0x4c, 0x03, 0x00};
+static char config_gam_adjust_29_indx[3] = {0x4c, 0x03, 0x29};
+static char config_gam_adjust_29_cmd[3] = {0x4c, 0x00, 0x02};
+
+static char config_Power_Ctrl_1_indx[3] = {0x4c, 0x01, 0x00};
+static char config_Power_Ctrl_1b_cmd[3] = {0x4c, 0x36, 0x3c};
+static char config_Power_Ctrl_2_indx[3] = {0x4c, 0x01, 0x01};
+static char config_Power_Ctrl_2b_cmd[3] = {0x4c, 0x40, 0x03};
+static char config_Power_Ctrl_3_indx[3] = {0x4c, 0x01, 0x02};
+static char config_Power_Ctrl_3a_cmd[3] = {0x4c, 0x00, 0x01};
+static char config_Power_Ctrl_4_indx[3] = {0x4c, 0x01, 0x03};
+static char config_Power_Ctrl_4a_cmd[3] = {0x4c, 0x3c, 0x58};
+static char config_Power_Ctrl_6_indx[3] = {0x4c, 0x01, 0x0c};
+static char config_Power_Ctrl_6a_cmd[3] = {0x4c, 0x01, 0x35};
+
+static char config_Auto_Sequencer_Setting_b_cmd[3] = {0x4c, 0x00, 0x02};
+
+static char config_Panel_IF_Ctrl_10_indx[3] = {0x4c, 0x00, 0x29};
+static char config_Panel_IF_Ctrl_10a_cmd[3] = {0x4c, 0x03, 0xbf};
+static char config_Auto_Sequencer_Setting_indx[3] = {0x4c, 0x01, 0x06};
+static char config_Auto_Sequencer_Setting_c_cmd[3] = {0x4c, 0x00, 0x03};
+static char config_Power_Ctrl_2c_cmd[3] = {0x4c, 0x40, 0x10};
+
+static char config_VIDEO[2] = {0x40, 0x00};
+
+static char config_Panel_IF_Ctrl_10_indx_off[3] = {0x4C, 0x00, 0x29};
+
+static char config_Panel_IF_Ctrl_10b_cmd_off[3] = {0x4C, 0x00, 0x02};
+
+static char config_Power_Ctrl_1a_cmd[3] = {0x4C, 0x30, 0x00};
+
+static struct dsi_cmd_desc renesas_sleep_off_cmds[] = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, RENESAS_SLEEP_OFF_DELAY,
+		sizeof(config_sleep_out), config_sleep_out }
+};
+
+static struct dsi_cmd_desc renesas_display_off_cmds[] = {
+	/* Choosing Command Mode */
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_CMD_MODE), config_CMD_MODE },
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_indx),
+			config_Auto_Sequencer_Setting_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_b_cmd),
+			config_Auto_Sequencer_Setting_b_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY * 2,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	/* After waiting >= 5 frames, turn OFF RGB signals
+	This is done by on DSI/MDP (depends on Vid/Cmd Mode.  */
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_indx),
+			config_Auto_Sequencer_Setting_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_a_cmd),
+			config_Auto_Sequencer_Setting_a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_10_indx_off),
+			config_Panel_IF_Ctrl_10_indx_off},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_10b_cmd_off),
+				config_Panel_IF_Ctrl_10b_cmd_off},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1_indx),
+				config_Power_Ctrl_1_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1a_cmd),
+				config_Power_Ctrl_1a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15}
+};
+
+static struct dsi_cmd_desc renesas_display_on_cmds[] = {
+	/* Choosing Command Mode */
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_CMD_MODE), config_CMD_MODE },
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_WRTXHT), config_WRTXHT },
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_WRTXVT), config_WRTXVT },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2NR), config_PLL2NR },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2NF1), config_PLL2NF1 },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2NF2), config_PLL2NF2 },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2BWADJ1), config_PLL2BWADJ1},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2BWADJ2), config_PLL2BWADJ2},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PLL2CTL), config_PLL2CTL},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICBR), config_DBICBR},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICTYPE), config_DBICTYPE},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET1), config_DBICSET1},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD), config_DBICADD},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICCTL), config_DBICCTL},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_COLMOD_888), config_COLMOD_888},
+	/* Choose config_COLMOD_565 or config_COLMOD_666PACK for other modes */
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_MADCTL), config_MADCTL},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBIOC), config_DBIOC},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_CASET), config_CASET},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_PASET), config_PASET},
+	{DTYPE_DCS_WRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_TXON), config_TXON},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_BLSET_TM), config_BLSET_TM},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_AGCPSCTL_TM), config_AGCPSCTL_TM},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1_indx), config_Power_Ctrl_1_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1a_cmd), config_Power_Ctrl_1a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2_indx), config_Power_Ctrl_2_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2a_cmd), config_Power_Ctrl_2a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_indx),
+			config_Auto_Sequencer_Setting_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_a_cmd),
+			config_Auto_Sequencer_Setting_a_cmd },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(Driver_Output_Ctrl_indx), Driver_Output_Ctrl_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(Driver_Output_Ctrl_cmd),
+			Driver_Output_Ctrl_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_LCD_drive_AC_Ctrl_indx),
+			config_LCD_drive_AC_Ctrl_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_LCD_drive_AC_Ctrl_cmd),
+			config_LCD_drive_AC_Ctrl_cmd },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Entry_Mode_indx),
+			config_Entry_Mode_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Entry_Mode_cmd),
+			config_Entry_Mode_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_1_indx),
+			config_Display_Ctrl_1_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_1_cmd),
+			config_Display_Ctrl_1_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_2_indx),
+			config_Display_Ctrl_2_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_2_cmd),
+			config_Display_Ctrl_2_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_3_indx),
+			config_Display_Ctrl_3_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_Ctrl_3_cmd),
+			config_Display_Ctrl_3_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_IF_Ctrl_1_indx),
+			config_Display_IF_Ctrl_1_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_IF_Ctrl_1_cmd),
+			config_Display_IF_Ctrl_1_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_IF_Ctrl_2_indx),
+			config_Display_IF_Ctrl_2_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Display_IF_Ctrl_2_cmd),
+			config_Display_IF_Ctrl_2_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_1_indx),
+			config_Panel_IF_Ctrl_1_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_1_cmd),
+			config_Panel_IF_Ctrl_1_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_3_indx),
+			config_Panel_IF_Ctrl_3_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_3_cmd),
+			config_Panel_IF_Ctrl_3_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_4_indx),
+			config_Panel_IF_Ctrl_4_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_4_cmd),
+			config_Panel_IF_Ctrl_4_cmd },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_5_indx),
+			config_Panel_IF_Ctrl_5_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_5_cmd),
+			config_Panel_IF_Ctrl_5_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_6_indx),
+			config_Panel_IF_Ctrl_6_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_6_cmd),
+			config_Panel_IF_Ctrl_6_cmd },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_8_indx),
+			config_Panel_IF_Ctrl_8_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_8_cmd),
+			config_Panel_IF_Ctrl_8_cmd },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_9_indx),
+			config_Panel_IF_Ctrl_9_indx },
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_9_cmd),
+			config_Panel_IF_Ctrl_9_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_00_indx),
+			config_gam_adjust_00_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_00_cmd),
+			config_gam_adjust_00_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_01_indx),
+			config_gam_adjust_01_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_01_cmd),
+			config_gam_adjust_01_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_02_indx),
+			config_gam_adjust_02_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_02_cmd),
+			config_gam_adjust_02_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_03_indx),
+			config_gam_adjust_03_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_03_cmd),
+			config_gam_adjust_03_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_04_indx), config_gam_adjust_04_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_04_cmd), config_gam_adjust_04_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_05_indx), config_gam_adjust_05_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_05_cmd), config_gam_adjust_05_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_06_indx), config_gam_adjust_06_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_06_cmd), config_gam_adjust_06_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_07_indx), config_gam_adjust_07_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_07_cmd), config_gam_adjust_07_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_08_indx), config_gam_adjust_08_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_08_cmd), config_gam_adjust_08_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_09_indx), config_gam_adjust_09_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_09_cmd), config_gam_adjust_09_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0A_indx), config_gam_adjust_0A_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0A_cmd), config_gam_adjust_0A_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0B_indx), config_gam_adjust_0B_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0B_cmd), config_gam_adjust_0B_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0C_indx), config_gam_adjust_0C_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0C_cmd), config_gam_adjust_0C_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0D_indx), config_gam_adjust_0D_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_0D_cmd), config_gam_adjust_0D_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_10_indx), config_gam_adjust_10_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_10_cmd), config_gam_adjust_10_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_11_indx), config_gam_adjust_11_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_11_cmd), config_gam_adjust_11_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_12_indx), config_gam_adjust_12_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_12_cmd), config_gam_adjust_12_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_15_indx), config_gam_adjust_15_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_15_cmd), config_gam_adjust_15_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_16_indx), config_gam_adjust_16_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_16_cmd), config_gam_adjust_16_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_17_indx), config_gam_adjust_17_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_17_cmd), config_gam_adjust_17_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_18_indx), config_gam_adjust_18_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_18_cmd), config_gam_adjust_18_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_19_indx), config_gam_adjust_19_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_19_cmd), config_gam_adjust_19_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_1C_indx), config_gam_adjust_1C_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_1C_cmd), config_gam_adjust_1C_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_1D_indx), config_gam_adjust_1D_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_1D_cmd), config_gam_adjust_1D_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_20_indx), config_gam_adjust_20_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_20_cmd), config_gam_adjust_20_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_21_indx), config_gam_adjust_21_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_21_cmd), config_gam_adjust_21_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_22_indx), config_gam_adjust_22_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_22_cmd), config_gam_adjust_22_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_27_indx), config_gam_adjust_27_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_27_cmd), config_gam_adjust_27_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_28_indx), config_gam_adjust_28_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_28_cmd), config_gam_adjust_28_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_29_indx), config_gam_adjust_29_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_gam_adjust_29_cmd), config_gam_adjust_29_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1_indx), config_Power_Ctrl_1_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_1b_cmd), config_Power_Ctrl_1b_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2_indx), config_Power_Ctrl_2_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2b_cmd), config_Power_Ctrl_2b_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_3_indx), config_Power_Ctrl_3_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_3a_cmd), config_Power_Ctrl_3a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_4_indx), config_Power_Ctrl_4_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_4a_cmd), config_Power_Ctrl_4a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_6_indx), config_Power_Ctrl_6_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_6a_cmd), config_Power_Ctrl_6a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_indx),
+			config_Auto_Sequencer_Setting_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_b_cmd),
+			config_Auto_Sequencer_Setting_b_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_10_indx),
+			config_Panel_IF_Ctrl_10_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Panel_IF_Ctrl_10a_cmd),
+			config_Panel_IF_Ctrl_10a_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_indx),
+			config_Auto_Sequencer_Setting_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Auto_Sequencer_Setting_c_cmd),
+			config_Auto_Sequencer_Setting_c_cmd},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD70), config_DBICADD70},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2_indx),
+			config_Power_Ctrl_2_indx},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_DBICADD72), config_DBICADD72},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_Power_Ctrl_2c_cmd),
+			config_Power_Ctrl_2c_cmd},
+
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0/* RENESAS_CMD_DELAY */,
+		sizeof(config_DBICSET_15), config_DBICSET_15},
+
+};
+
+static struct dsi_cmd_desc renesas_video_on_cmds[] = {
+{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_VIDEO), config_VIDEO}
+};
+
+static struct dsi_cmd_desc renesas_cmd_on_cmds[] = {
+{DTYPE_DCS_WRITE1, 1, 0, 0, RENESAS_CMD_DELAY,
+		sizeof(config_CMD_MODE), config_CMD_MODE},
+};
+
+static int mipi_renesas_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct mipi_panel_info *mipi;
+
+	mfd = platform_get_drvdata(pdev);
+	mipi  = &mfd->panel_info.mipi;
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_sleep_off_cmds,
+			ARRAY_SIZE(renesas_sleep_off_cmds));
+
+	mipi_set_tx_power_mode(1);
+	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_on_cmds,
+			ARRAY_SIZE(renesas_display_on_cmds));
+
+	if (mipi->mode == DSI_VIDEO_MODE)
+		mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_video_on_cmds,
+			ARRAY_SIZE(renesas_video_on_cmds));
+	else
+		mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_cmd_on_cmds,
+			ARRAY_SIZE(renesas_cmd_on_cmds));
+	mipi_set_tx_power_mode(0);
+
+	return 0;
+}
+
+static int mipi_renesas_lcd_off(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_off_cmds,
+			ARRAY_SIZE(renesas_display_off_cmds));
+
+	return 0;
+}
+
+static int __devinit mipi_renesas_lcd_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mipi_renesas_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static void mipi_renesas_set_backlight(struct msm_fb_data_type *mfd)
+{
+	int ret = -EPERM;
+	int bl_level;
+
+	bl_level = mfd->bl_level;
+
+	if (mipi_renesas_pdata && mipi_renesas_pdata->pmic_backlight)
+		ret = mipi_renesas_pdata->pmic_backlight(bl_level);
+	else
+		pr_err("%s(): Backlight level set failed", __func__);
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mipi_renesas_lcd_probe,
+	.driver = {
+		.name   = "mipi_renesas",
+	},
+};
+
+static struct msm_fb_panel_data renesas_panel_data = {
+	.on		= mipi_renesas_lcd_on,
+	.off	= mipi_renesas_lcd_off,
+	.set_backlight = mipi_renesas_set_backlight,
+};
+
+static int ch_used[3];
+
+int mipi_renesas_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+	if ((channel >= 3) || ch_used[channel])
+		return -ENODEV;
+
+	ch_used[channel] = TRUE;
+
+	pdev = platform_device_alloc("mipi_renesas", (panel << 8)|channel);
+	if (!pdev)
+		return -ENOMEM;
+
+	renesas_panel_data.panel_info = *pinfo;
+
+	ret = platform_device_add_data(pdev, &renesas_panel_data,
+		sizeof(renesas_panel_data));
+	if (ret) {
+		pr_err("%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init mipi_renesas_lcd_init(void)
+{
+	mipi_dsi_buf_alloc(&renesas_tx_buf, DSI_BUF_SIZE);
+	mipi_dsi_buf_alloc(&renesas_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+
+module_init(mipi_renesas_lcd_init);
diff --git a/drivers/video/msm/mipi_renesas.h b/drivers/video/msm/mipi_renesas.h
new file mode 100644
index 0000000..59ccfd0
--- /dev/null
+++ b/drivers/video/msm/mipi_renesas.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MIPI_RENESAS_H
+#define MIPI_RENESAS_H
+
+#define RENESAS_FWVGA_TWO_LANE
+
+int mipi_renesas_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel);
+
+#endif  /* MIPI_RENESAS_H */
diff --git a/drivers/video/msm/mipi_renesas_cmd_fwvga_pt.c b/drivers/video/msm/mipi_renesas_cmd_fwvga_pt.c
new file mode 100644
index 0000000..ff573a3
--- /dev/null
+++ b/drivers/video/msm/mipi_renesas_cmd_fwvga_pt.c
@@ -0,0 +1,162 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_renesas.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_cmd_mode_phy_db = {
+#ifdef CONFIG_FB_MSM_MDP303
+	/* DSI Bit Clock at 500 MHz, 2 lane, RGB888 */
+	/* regulator */
+	{0x03, 0x01, 0x01, 0x00},
+	/* timing   */
+	{0xb9, 0x8e, 0x1f, 0x00, 0x98, 0x9c, 0x22, 0x90,
+	0x18, 0x03, 0x04},
+	/* phy ctrl */
+	{0x7f, 0x00, 0x00, 0x00},
+	/* strength */
+	{0xbb, 0x02, 0x06, 0x00},
+	/* pll control */
+	{0x01, 0xec, 0x31, 0xd2, 0x00, 0x40, 0x37, 0x62,
+	0x01, 0x0f, 0x07,
+	0x05, 0x14, 0x03, 0x0, 0x0, 0x0, 0x20, 0x0, 0x02, 0x0},
+#else
+	/* DSI_BIT_CLK at 400MHz, 1 lane, RGB888 */
+	{0x03, 0x01, 0x01, 0x00},	/* regulator */
+	/* timing   */
+	{0x22, 0x0c, 0x7, 0x00, 0x10, 0x20, 0x10,
+	0xd, 0x8, 0x2, 0x3},
+	/* phy ctrl */
+	{0x7f, 0x00, 0x00, 0x00},
+	/* strength */
+	{0xee, 0x00, 0x6, 0x00},
+	/* pll control */
+	{0x40, 0x2f, 0xb1, 0xda, 0x00, 0x50, 0x48, 0x63,
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	0x33, 0x1f, 0x07,
+#else	/* default set to 1 lane */
+	0x30, 0x07, 0x07,
+#endif
+	0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0},
+#endif
+};
+
+static int __init mipi_cmd_renesas_fwvga_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_cmd_renesas_fwvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 480;
+	pinfo.yres = 864;
+	pinfo.type = MIPI_CMD_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+#ifdef CONFIG_FB_MSM_MDP303
+	pinfo.lcdc.h_back_porch = 100;
+	pinfo.lcdc.h_front_porch = 100;
+	pinfo.lcdc.h_pulse_width = 8;
+	pinfo.lcdc.v_back_porch = 20;
+	pinfo.lcdc.v_front_porch = 20;
+	pinfo.lcdc.v_pulse_width = 1;
+#else
+	pinfo.lcdc.h_front_porch = 50;
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.lcdc.h_back_porch = 400;
+	pinfo.lcdc.h_pulse_width = 5;
+	pinfo.lcdc.v_back_porch = 75;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 1;
+#else
+	pinfo.lcdc.h_back_porch = 50;
+	pinfo.lcdc.h_pulse_width = 20;
+	pinfo.lcdc.v_back_porch = 10;
+	pinfo.lcdc.v_front_porch = 10;
+	pinfo.lcdc.v_pulse_width = 5;
+#endif
+
+#endif /* CONFIG_FB_MSM_MDP303 */
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 100;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+
+#ifdef CONFIG_FB_MSM_MDP303
+	pinfo.clk_rate = 499000000;
+#else
+	pinfo.clk_rate = 152000000;
+#endif
+
+#ifdef USE_HW_VSYNC
+	pinfo.lcd.vsync_enable = TRUE;
+	pinfo.lcd.hw_vsync_mode = TRUE;
+#endif
+	pinfo.lcd.refx100 = 6000; /* adjust refx100 to prevent tearing */
+
+	pinfo.mipi.mode = DSI_CMD_MODE;
+	pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+	pinfo.mipi.data_lane0 = TRUE;
+#ifdef CONFIG_FB_MSM_MDP303
+	pinfo.mipi.data_lane1 = TRUE;
+	pinfo.mipi.t_clk_post = 0x20;
+	pinfo.mipi.t_clk_pre = 0x2F;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.te_sel = 0; /* TE from vsync gpio */
+	pinfo.mipi.interleave_max = 1;
+	pinfo.mipi.insert_dcs_cmd = TRUE;
+	pinfo.mipi.wr_mem_continue = 0x3c;
+	pinfo.mipi.wr_mem_start = 0x2c;
+	pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db;
+	pinfo.mipi.tx_eot_append = 0x01;
+	pinfo.mipi.rx_eot_ignore = 0;
+	pinfo.mipi.dlane_swap = 0x01;
+#else
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.mipi.data_lane1 = TRUE;
+#else
+	pinfo.mipi.data_lane1 = FALSE;
+#endif
+	pinfo.mipi.t_clk_post = 0x18;
+	pinfo.mipi.t_clk_pre = 0x14;
+	pinfo.mipi.stream = 0;	/* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */
+	pinfo.mipi.interleave_max = 1;
+	pinfo.mipi.insert_dcs_cmd = TRUE;
+	pinfo.mipi.wr_mem_continue = 0x3c;
+	pinfo.mipi.wr_mem_start = 0x2c;
+	pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db;
+#endif /* CONFIG_FB_MSM_MDP303 */
+
+	ret = mipi_renesas_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_FWVGA_PT);
+	if (ret)
+		pr_err("%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_cmd_renesas_fwvga_pt_init);
diff --git a/drivers/video/msm/mipi_renesas_video_fwvga_pt.c b/drivers/video/msm/mipi_renesas_video_fwvga_pt.c
new file mode 100644
index 0000000..0e49011
--- /dev/null
+++ b/drivers/video/msm/mipi_renesas_video_fwvga_pt.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_renesas.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
+#ifdef CONFIG_FB_MSM_MDP303
+	/* DSI Bit Clock at 500 MHz, 2 lane, RGB888 */
+	/* regulator */
+	{0x03, 0x01, 0x01, 0x00},
+	/* timing   */
+	{0xb9, 0x8e, 0x1f, 0x00, 0x98, 0x9c, 0x22, 0x90,
+	0x18, 0x03, 0x04},
+	/* phy ctrl */
+	{0x7f, 0x00, 0x00, 0x00},
+	/* strength */
+	{0xbb, 0x02, 0x06, 0x00},
+	/* pll control */
+	{0x00, 0xec, 0x31, 0xd2, 0x00, 0x40, 0x37, 0x62,
+	0x01, 0x0f, 0x07,
+	0x05, 0x14, 0x03, 0x0, 0x0, 0x0, 0x20, 0x0, 0x02, 0x0},
+#else
+	/* DSI_BIT_CLK at 400MHz, 1 lane, RGB888 */
+	/* regulator */
+	{0x03, 0x01, 0x01, 0x00},
+	/* timing   */
+	{0xaa, 0x3b, 0x1b, 0x00, 0x52, 0x58, 0x20, 0x3f,
+	0x2e, 0x03, 0x04},
+	/* phy ctrl */
+	{0x7f, 0x00, 0x00, 0x00},
+	/* strength */
+	{0xee, 0x00, 0x86, 0x00},
+	/* pll control */
+	{0x40, 0xc7, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63,
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	0x30, 0x07, 0x03,
+#else
+	/* default set to 1 lane */
+	0x30, 0x07, 0x07,
+#endif
+	0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0},
+#endif
+};
+
+static int __init mipi_video_renesas_fwvga_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_video_renesas_fwvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 480;
+	pinfo.yres = 864;
+	pinfo.type = MIPI_VIDEO_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+#ifdef CONFIG_FB_MSM_MDP303
+	pinfo.lcdc.h_back_porch = 100;
+	pinfo.lcdc.h_front_porch = 100;
+	pinfo.lcdc.h_pulse_width = 8;
+	pinfo.lcdc.v_back_porch = 20;
+	pinfo.lcdc.v_front_porch = 20;
+	pinfo.lcdc.v_pulse_width = 1;
+	pinfo.clk_rate = 499000000;
+#else
+
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.lcdc.h_back_porch = 400;
+#else
+	pinfo.lcdc.h_back_porch = 50;
+#endif
+	pinfo.lcdc.h_front_porch = 50;
+
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.lcdc.h_pulse_width = 5;
+#else
+	pinfo.lcdc.h_pulse_width = 20;
+#endif
+
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.lcdc.v_back_porch = 75;
+	pinfo.lcdc.v_front_porch = 5;
+	pinfo.lcdc.v_pulse_width = 1;
+#else
+	pinfo.lcdc.v_back_porch = 10;
+	pinfo.lcdc.v_front_porch = 10;
+	pinfo.lcdc.v_pulse_width = 5;
+#endif
+
+#endif
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 100;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+
+	pinfo.mipi.mode = DSI_VIDEO_MODE;
+	pinfo.mipi.pulse_mode_hsa_he = TRUE;
+	pinfo.mipi.hfp_power_stop = TRUE;
+	pinfo.mipi.hbp_power_stop = TRUE;
+	pinfo.mipi.hsa_power_stop = TRUE;
+	pinfo.mipi.eof_bllp_power_stop = TRUE;
+	pinfo.mipi.bllp_power_stop = TRUE;
+#ifdef CONFIG_FB_MSM_MDP303
+	pinfo.mipi.traffic_mode = DSI_BURST_MODE;
+	pinfo.mipi.dst_format =  DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+	pinfo.mipi.data_lane0 = TRUE;
+	pinfo.mipi.data_lane1 = TRUE;
+	pinfo.mipi.t_clk_post = 0x20;
+	pinfo.mipi.t_clk_pre = 0x2F;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 60;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+	pinfo.mipi.dlane_swap = 0x01;
+	pinfo.mipi.tx_eot_append = 0x01;
+#else
+	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+	pinfo.mipi.data_lane0 = TRUE;
+#if defined(RENESAS_FWVGA_TWO_LANE)
+	pinfo.mipi.data_lane1 = TRUE;
+#else
+	pinfo.mipi.data_lane1 = FALSE;
+#endif
+	pinfo.mipi.t_clk_post = 0x03;
+	pinfo.mipi.t_clk_pre = 0x24;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 60;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+#endif
+
+	ret = mipi_renesas_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_FWVGA_PT);
+	if (ret)
+		pr_err("%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_video_renesas_fwvga_pt_init);
diff --git a/drivers/video/msm/mipi_simulator.c b/drivers/video/msm/mipi_simulator.c
new file mode 100644
index 0000000..da697b5
--- /dev/null
+++ b/drivers/video/msm/mipi_simulator.c
@@ -0,0 +1,162 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_simulator.h"
+
+static struct dsi_buf simulator_tx_buf;
+static struct dsi_buf simulator_rx_buf;
+static struct msm_panel_common_pdata *mipi_simulator_pdata;
+
+static char display_on[2]  = {0x00, 0x00};
+static char display_off[2] = {0x00, 0x00};
+
+static struct dsi_cmd_desc display_on_cmds[] = {
+		{DTYPE_PERIPHERAL_ON, 1, 0, 0, 0, sizeof(display_on),
+				display_on}
+};
+static struct dsi_cmd_desc display_off_cmds[] = {
+		{DTYPE_PERIPHERAL_OFF, 1, 0, 0, 0, sizeof(display_off),
+				display_off}
+};
+
+static int mipi_simulator_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct mipi_panel_info *mipi;
+
+	mfd = platform_get_drvdata(pdev);
+	mipi  = &mfd->panel_info.mipi;
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	pr_debug("%s:%d, debug info (mode) : %d", __func__, __LINE__,
+		 mipi->mode);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_on_cmds,
+			ARRAY_SIZE(display_on_cmds));
+	} else {
+		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mipi_simulator_lcd_off(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct mipi_panel_info *mipi;
+
+	mfd = platform_get_drvdata(pdev);
+	mipi  = &mfd->panel_info.mipi;
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	pr_debug("%s:%d, debug info", __func__, __LINE__);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_off_cmds,
+			ARRAY_SIZE(display_off_cmds));
+	} else {
+		pr_debug("%s:%d, DONT REACH HERE", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __devinit mipi_simulator_lcd_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mipi_simulator_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+	pr_debug("%s:%d, debug info", __func__, __LINE__);
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mipi_simulator_lcd_probe,
+	.driver = {
+		.name   = "mipi_simulator",
+	},
+};
+
+static struct msm_fb_panel_data simulator_panel_data = {
+	.on		= mipi_simulator_lcd_on,
+	.off		= mipi_simulator_lcd_off,
+};
+
+static int ch_used[3];
+
+int mipi_simulator_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	if ((channel >= 3) || ch_used[channel])
+		return -ENODEV;
+
+	ch_used[channel] = TRUE;
+
+	pr_debug("%s:%d, debug info", __func__, __LINE__);
+
+	pdev = platform_device_alloc("mipi_simulator", (panel << 8)|channel);
+	if (!pdev)
+		return -ENOMEM;
+
+	simulator_panel_data.panel_info = *pinfo;
+
+	ret = platform_device_add_data(pdev, &simulator_panel_data,
+		sizeof(simulator_panel_data));
+	if (ret) {
+		pr_err(KERN_ERR
+		  "%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err(KERN_ERR
+		  "%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init mipi_simulator_lcd_init(void)
+{
+	mipi_dsi_buf_alloc(&simulator_tx_buf, DSI_BUF_SIZE);
+	mipi_dsi_buf_alloc(&simulator_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+
+module_init(mipi_simulator_lcd_init);
diff --git a/drivers/video/msm/mipi_simulator.h b/drivers/video/msm/mipi_simulator.h
new file mode 100644
index 0000000..274ce8f
--- /dev/null
+++ b/drivers/video/msm/mipi_simulator.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MIPI_SIMULATOR_H
+#define MIPI_SIMULATOR_H
+
+int mipi_simulator_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel);
+
+#endif  /* MIPI_SIMULATOR_H */
diff --git a/drivers/video/msm/mipi_simulator_video.c b/drivers/video/msm/mipi_simulator_video.c
new file mode 100644
index 0000000..8795554
--- /dev/null
+++ b/drivers/video/msm/mipi_simulator_video.c
@@ -0,0 +1,88 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_simulator.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
+	{0x03, 0x01, 0x01, 0x00},
+	{0xaa, 0x3b, 0x1b, 0x00, 0x52, 0x58, 0x20, 0x3f,
+		0x2e, 0x03, 0x04},
+	{0x7f, 0x00, 0x00, 0x00},
+	{0xee, 0x00, 0x86, 0x00},
+	{0x40, 0xc7, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63,
+		0x30, 0x07, 0x03,
+		0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0},
+};
+
+static int __init mipi_video_simulator_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_video_simulator"))
+		return 0;
+#endif
+	pinfo.xres = 640;
+	pinfo.yres = 480;
+	pinfo.type = MIPI_VIDEO_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+
+	pinfo.lcdc.h_back_porch  = 6;
+	pinfo.lcdc.h_front_porch = 6;
+	pinfo.lcdc.h_pulse_width = 2;
+	pinfo.lcdc.v_back_porch  = 6;
+	pinfo.lcdc.v_front_porch = 6;
+	pinfo.lcdc.v_pulse_width = 2;
+
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 15;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+
+	pinfo.mipi.mode = DSI_VIDEO_MODE;
+	pinfo.mipi.pulse_mode_hsa_he = TRUE;
+	pinfo.mipi.hfp_power_stop = TRUE;
+	pinfo.mipi.hbp_power_stop = TRUE;
+	pinfo.mipi.hsa_power_stop = TRUE;
+	pinfo.mipi.eof_bllp_power_stop = TRUE;
+	pinfo.mipi.bllp_power_stop = TRUE;
+	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+	pinfo.mipi.data_lane0 = TRUE;
+	pinfo.mipi.data_lane1 = TRUE;
+	pinfo.mipi.t_clk_post = 0x03;
+	pinfo.mipi.t_clk_pre = 0x24;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 60;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+
+	ret = mipi_simulator_device_register(&pinfo, MIPI_DSI_PRIM,
+		MIPI_DSI_PANEL_FWVGA_PT);
+	if (ret)
+		pr_err("%s: failed to register device!\n", __func__);
+
+		return ret;
+}
+
+module_init(mipi_video_simulator_init);
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
new file mode 100644
index 0000000..09fbf24
--- /dev/null
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -0,0 +1,292 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_toshiba.h"
+
+static struct msm_panel_common_pdata *mipi_toshiba_pdata;
+
+static struct dsi_buf toshiba_tx_buf;
+static struct dsi_buf toshiba_rx_buf;
+
+#ifdef TOSHIBA_CMDS_UNUSED
+static char one_lane[3] = {0xEF, 0x60, 0x62};
+static char dmode_wqvga[2] = {0xB3, 0x01};
+static char intern_wr_clk1_wqvga[3] = {0xef, 0x2f, 0x22};
+static char intern_wr_clk2_wqvga[3] = {0xef, 0x6e, 0x33};
+static char hor_addr_2A_wqvga[5] = {0x2A, 0x00, 0x00, 0x00, 0xef};
+static char hor_addr_2B_wqvga[5] = {0x2B, 0x00, 0x00, 0x01, 0xaa};
+static char if_sel_cmd[2] = {0x53, 0x00};
+#endif
+
+static char exit_sleep[2] = {0x11, 0x00};
+static char display_on[2] = {0x29, 0x00};
+static char display_off[2] = {0x28, 0x00};
+static char enter_sleep[2] = {0x10, 0x00};
+
+#ifdef CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT_PANEL
+static char mcap_off[2] = {0xb2, 0x00};
+static char ena_test_reg[3] = {0xEF, 0x01, 0x01};
+static char two_lane[3] = {0xEF, 0x60, 0x63};
+static char non_burst_sync_pulse[3] = {0xef, 0x61, 0x09};
+static char dmode_wvga[2] = {0xB3, 0x00};
+static char intern_wr_clk1_wvga[3] = {0xef, 0x2f, 0xcc};
+static char intern_wr_clk2_wvga[3] = {0xef, 0x6e, 0xdd};
+static char hor_addr_2A_wvga[5] = {0x2A, 0x00, 0x00, 0x01, 0xdf};
+static char hor_addr_2B_wvga[5] = {0x2B, 0x00, 0x00, 0x03, 0x55};
+static char if_sel_video[2] = {0x53, 0x01};
+
+static struct dsi_cmd_desc toshiba_display_on_cmds[] = {
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(mcap_off), mcap_off},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(ena_test_reg), ena_test_reg},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(two_lane), two_lane},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(non_burst_sync_pulse),
+					non_burst_sync_pulse},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dmode_wvga), dmode_wvga},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(intern_wr_clk1_wvga),
+					intern_wr_clk1_wvga},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(intern_wr_clk2_wvga),
+					intern_wr_clk2_wvga},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(hor_addr_2A_wvga),
+					hor_addr_2A_wvga},
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 0, sizeof(hor_addr_2B_wvga),
+					hor_addr_2B_wvga},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(if_sel_video), if_sel_video},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(exit_sleep), exit_sleep},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(display_on), display_on}
+};
+
+#endif
+
+#ifdef CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT_PANEL
+static char mcap_start[2] = {0xb0, 0x04};
+static char num_out_pixelform[3] = {0xb3, 0x00, 0x87};
+static char dsi_ctrl[3] = {0xb6, 0x30, 0x83};
+static char panel_driving[7] = {0xc0, 0x01, 0x00, 0x85, 0x00, 0x00, 0x00};
+static char dispV_timing[5] = {0xc1, 0x00, 0x10, 0x00, 0x01};
+static char dispCtrl[3] = {0xc3, 0x00, 0x19};
+static char test_mode_c4[2] = {0xc4, 0x03};
+static char dispH_timing[15] = {
+	/* TYPE_DCS_LWRITE */
+	0xc5, 0x00, 0x01, 0x05,
+	0x04, 0x5e, 0x00, 0x00,
+	0x00, 0x00, 0x0b, 0x17,
+	0x05, 0x00, 0x00
+};
+static char test_mode_c6[2] = {0xc6, 0x00};
+static char gamma_setA[13] = {
+	0xc8, 0x0a, 0x15, 0x18,
+	0x1b, 0x1c, 0x0d, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00
+};
+static char gamma_setB[13] = {
+	0xc9, 0x0d, 0x1d, 0x1f,
+	0x1f, 0x1f, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00
+};
+static char gamma_setC[13] = {
+	0xca, 0x1e, 0x1f, 0x1e,
+	0x1d, 0x1d, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00
+};
+static char powerSet_ChrgPmp[5] = {0xd0, 0x02, 0x00, 0xa3, 0xb8};
+static char testMode_d1[6] = {0xd1, 0x10, 0x14, 0x53, 0x64, 0x00};
+static char powerSet_SrcAmp[3] = {0xd2, 0xb3, 0x00};
+static char powerInt_PS[3] = {0xd3, 0x33, 0x03};
+static char vreg[2] = {0xd5, 0x00};
+static char test_mode_d6[2] = {0xd6, 0x01};
+static char timingCtrl_d7[9] = {
+	0xd7, 0x09, 0x00, 0x84,
+	0x81, 0x61, 0xbc, 0xb5,
+	0x05
+};
+static char timingCtrl_d8[7] = {
+	0xd8, 0x04, 0x25, 0x90,
+	0x4c, 0x92, 0x00
+};
+static char timingCtrl_d9[4] = {0xd9, 0x5b, 0x7f, 0x05};
+static char white_balance[6] = {0xcb, 0x00, 0x00, 0x00, 0x1c, 0x00};
+static char vcs_settings[2] = {0xdd, 0x53};
+static char vcom_dc_settings[2] = {0xde, 0x43};
+static char testMode_e3[5] = {0xe3, 0x00, 0x00, 0x00, 0x00};
+static char testMode_e4[6] = {0xe4, 0x00, 0x00, 0x22, 0xaa, 0x00};
+static char testMode_e5[2] = {0xe5, 0x00};
+static char testMode_fa[4] = {0xfa, 0x00, 0x00, 0x00};
+static char testMode_fd[5] = {0xfd, 0x00, 0x00, 0x00, 0x00};
+static char testMode_fe[5] = {0xfe, 0x00, 0x00, 0x00, 0x00};
+static char mcap_end[2] = {0xb0, 0x03};
+static char set_add_mode[2] = {0x36, 0x0};
+static char set_pixel_format[2] = {0x3a, 0x70};
+
+
+static struct dsi_cmd_desc toshiba_display_on_cmds[] = {
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 10, sizeof(mcap_start), mcap_start},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 10, sizeof(num_out_pixelform),
+		num_out_pixelform},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 10, sizeof(dsi_ctrl), dsi_ctrl},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(panel_driving), panel_driving},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispV_timing), dispV_timing},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispCtrl), dispCtrl},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_c4), test_mode_c4},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(dispH_timing), dispH_timing},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_c6), test_mode_c6},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setA), gamma_setA},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setB), gamma_setB},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(gamma_setC), gamma_setC},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerSet_ChrgPmp),
+		powerSet_ChrgPmp},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_d1), testMode_d1},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerSet_SrcAmp),
+		powerSet_SrcAmp},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(powerInt_PS), powerInt_PS},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vreg), vreg},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(test_mode_d6), test_mode_d6},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d7), timingCtrl_d7},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d8), timingCtrl_d8},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(timingCtrl_d9), timingCtrl_d9},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(white_balance), white_balance},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vcs_settings), vcs_settings},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(vcom_dc_settings),
+		vcom_dc_settings},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_e3), testMode_e3},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_e4), testMode_e4},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(testMode_e5), testMode_e5},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fa), testMode_fa},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fd), testMode_fd},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(testMode_fe), testMode_fe},
+	{DTYPE_GEN_WRITE2, 1, 0, 0, 0, sizeof(mcap_end), mcap_end},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_add_mode), set_add_mode},
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_pixel_format),
+		set_pixel_format},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(exit_sleep), exit_sleep},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(display_on), display_on}
+};
+#endif
+
+static struct dsi_cmd_desc toshiba_display_off_cmds[] = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(display_off), display_off},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 120, sizeof(enter_sleep), enter_sleep}
+};
+
+static int mipi_toshiba_lcd_on(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf, toshiba_display_on_cmds,
+			ARRAY_SIZE(toshiba_display_on_cmds));
+
+	return 0;
+}
+
+static int mipi_toshiba_lcd_off(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf, toshiba_display_off_cmds,
+			ARRAY_SIZE(toshiba_display_off_cmds));
+
+	return 0;
+}
+
+static int __devinit mipi_toshiba_lcd_probe(struct platform_device *pdev)
+{
+	if (pdev->id == 0) {
+		mipi_toshiba_pdata = pdev->dev.platform_data;
+		return 0;
+	}
+
+	msm_fb_add_device(pdev);
+
+	return 0;
+}
+
+static struct platform_driver this_driver = {
+	.probe  = mipi_toshiba_lcd_probe,
+	.driver = {
+		.name   = "mipi_toshiba",
+	},
+};
+
+static struct msm_fb_panel_data toshiba_panel_data = {
+	.on		= mipi_toshiba_lcd_on,
+	.off		= mipi_toshiba_lcd_off,
+};
+
+static int ch_used[3];
+
+int mipi_toshiba_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	if ((channel >= 3) || ch_used[channel])
+		return -ENODEV;
+
+	ch_used[channel] = TRUE;
+
+	pdev = platform_device_alloc("mipi_toshiba", (panel << 8)|channel);
+	if (!pdev)
+		return -ENOMEM;
+
+	toshiba_panel_data.panel_info = *pinfo;
+
+	ret = platform_device_add_data(pdev, &toshiba_panel_data,
+		sizeof(toshiba_panel_data));
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_add_data failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		printk(KERN_ERR
+		  "%s: platform_device_register failed!\n", __func__);
+		goto err_device_put;
+	}
+
+	return 0;
+
+err_device_put:
+	platform_device_put(pdev);
+	return ret;
+}
+
+static int __init mipi_toshiba_lcd_init(void)
+{
+	mipi_dsi_buf_alloc(&toshiba_tx_buf, DSI_BUF_SIZE);
+	mipi_dsi_buf_alloc(&toshiba_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+
+module_init(mipi_toshiba_lcd_init);
diff --git a/drivers/video/msm/mipi_toshiba.h b/drivers/video/msm/mipi_toshiba.h
new file mode 100644
index 0000000..657636a
--- /dev/null
+++ b/drivers/video/msm/mipi_toshiba.h
@@ -0,0 +1,21 @@
+
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MIPI_TOSHIBA_H
+#define MIPI_TOSHIBA_H
+
+int mipi_toshiba_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel);
+
+#endif  /* MIPI_TOSHIBA_H */
diff --git a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
new file mode 100644
index 0000000..0477725
--- /dev/null
+++ b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_toshiba.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
+	/* 600*1024, RGB888, 3 Lane 55 fps video mode */
+    /* regulator */
+	{0x03, 0x0a, 0x04, 0x00, 0x20},
+	/* timing */
+	{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
+	0x0c, 0x03, 0x04, 0xa0},
+    /* phy ctrl */
+	{0x5f, 0x00, 0x00, 0x10},
+    /* strength */
+	{0xff, 0x00, 0x06, 0x00},
+	/* pll control */
+	{0x0, 0x7f, 0x1, 0x1a, 0x00, 0x50, 0x48, 0x63,
+	0x41, 0x0f, 0x01,
+	0x00, 0x14, 0x03, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01 },
+};
+
+static int __init mipi_video_toshiba_wsvga_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_video_toshiba_wsvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 600;
+	pinfo.yres = 1024;
+	/*
+	 *
+	 * Panel's Horizontal input timing requirement is to
+	 * include dummy(pad) data of 200 clk in addition to
+	 * width and porch/sync width values
+	 */
+	pinfo.mipi.xres_pad = 200;
+	pinfo.mipi.yres_pad = 0;
+
+	pinfo.type = MIPI_VIDEO_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.lcdc.h_back_porch = 16;
+	pinfo.lcdc.h_front_porch = 23;
+	pinfo.lcdc.h_pulse_width = 8;
+	pinfo.lcdc.v_back_porch = 2;
+	pinfo.lcdc.v_front_porch = 7;
+	pinfo.lcdc.v_pulse_width = 2;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 15;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+	pinfo.clk_rate = 384000000;
+
+	pinfo.mipi.mode = DSI_VIDEO_MODE;
+	pinfo.mipi.pulse_mode_hsa_he = FALSE;
+	pinfo.mipi.hfp_power_stop = FALSE;
+	pinfo.mipi.hbp_power_stop = FALSE;
+	pinfo.mipi.hsa_power_stop = FALSE;
+	pinfo.mipi.eof_bllp_power_stop = FALSE;
+	pinfo.mipi.bllp_power_stop = FALSE;
+	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
+	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+	pinfo.mipi.data_lane0 = TRUE;
+	pinfo.mipi.data_lane1 = TRUE;
+	pinfo.mipi.data_lane2 = TRUE;
+	pinfo.mipi.t_clk_post = 0x20;
+	pinfo.mipi.t_clk_pre = 0x2d;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = 0;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 55;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+	pinfo.mipi.tx_eot_append = TRUE;
+
+	ret = mipi_toshiba_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_WVGA_PT);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_video_toshiba_wsvga_pt_init);
diff --git a/drivers/video/msm/mipi_toshiba_video_wvga_pt.c b/drivers/video/msm/mipi_toshiba_video_wvga_pt.c
new file mode 100644
index 0000000..1913513
--- /dev/null
+++ b/drivers/video/msm/mipi_toshiba_video_wvga_pt.c
@@ -0,0 +1,108 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "mipi_toshiba.h"
+
+static struct msm_panel_info pinfo;
+
+static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
+	/* 480*854, RGB888, 2 Lane 60 fps video mode */
+		{0x03, 0x01, 0x01, 0x00},	/* regulator */
+		/* timing   */
+		{0x6a, 0x22, 0x0f, 0x00, 0x30, 0x38, 0x13, 0x26,
+		0x1b, 0x03, 0x04},
+		{0x7f, 0x00, 0x00, 0x00},	/* phy ctrl */
+		{0xee, 0x03, 0x86, 0x03},	/* strength */
+		/* pll control */
+
+#define DSI_BIT_CLK_380MHZ
+
+#if defined(DSI_BIT_CLK_366MHZ)
+		{0x41, 0xdb, 0xb2, 0xf5, 0x00, 0x50, 0x48, 0x63,
+		0x31, 0x0f, 0x07,
+		0x05, 0x14, 0x03, 0x03, 0x03, 0x54, 0x06, 0x10, 0x04, 0x03 },
+#elif defined(DSI_BIT_CLK_380MHZ)
+		{0x41, 0xf7, 0xb2, 0xf5, 0x00, 0x50, 0x48, 0x63,
+		0x31, 0x0f, 0x07,
+		0x05, 0x14, 0x03, 0x03, 0x03, 0x54, 0x06, 0x10, 0x04, 0x03 },
+#elif defined(DSI_BIT_CLK_400MHZ)
+		{0x41, 0x8f, 0xb1, 0xda, 0x00, 0x50, 0x48, 0x63,
+		0x31, 0x0f, 0x07,
+		0x05, 0x14, 0x03, 0x03, 0x03, 0x54, 0x06, 0x10, 0x04, 0x03 },
+#else		/* 200 mhz */
+		{0x41, 0x8f, 0xb1, 0xda, 0x00, 0x50, 0x48, 0x63,
+		0x33, 0x1f, 0x0f,
+		0x05, 0x14, 0x03, 0x03, 0x03, 0x54, 0x06, 0x10, 0x04, 0x03 },
+#endif
+};
+
+static int __init mipi_video_toshiba_wvga_pt_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
+	if (msm_fb_detect_client("mipi_video_toshiba_wvga"))
+		return 0;
+#endif
+
+	pinfo.xres = 480;
+	pinfo.yres = 864; /* 856 for V1 surf */
+	pinfo.type = MIPI_VIDEO_PANEL;
+	pinfo.pdest = DISPLAY_1;
+	pinfo.wait_cycle = 0;
+	pinfo.bpp = 24;
+	pinfo.lcdc.h_back_porch = 64;
+	pinfo.lcdc.h_front_porch = 64;
+	pinfo.lcdc.h_pulse_width = 16;
+	pinfo.lcdc.v_back_porch = 8;
+	pinfo.lcdc.v_front_porch = 4;
+	pinfo.lcdc.v_pulse_width = 1;
+	pinfo.lcdc.border_clr = 0;	/* blk */
+	pinfo.lcdc.underflow_clr = 0xff;	/* blue */
+	pinfo.lcdc.hsync_skew = 0;
+	pinfo.bl_max = 15;
+	pinfo.bl_min = 1;
+	pinfo.fb_num = 2;
+
+	pinfo.mipi.mode = DSI_VIDEO_MODE;
+	pinfo.mipi.pulse_mode_hsa_he = TRUE;
+	pinfo.mipi.hfp_power_stop = FALSE;
+	pinfo.mipi.hbp_power_stop = FALSE;
+	pinfo.mipi.hsa_power_stop = FALSE;
+	pinfo.mipi.eof_bllp_power_stop = TRUE;
+	pinfo.mipi.bllp_power_stop = TRUE;
+	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+	pinfo.mipi.vc = 0;
+	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+	pinfo.mipi.data_lane0 = TRUE;
+	pinfo.mipi.data_lane1 = TRUE;
+	pinfo.mipi.t_clk_post = 0x04;
+	pinfo.mipi.t_clk_pre = 0x17;
+	pinfo.mipi.stream = 0; /* dma_p */
+	pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
+	pinfo.mipi.frame_rate = 60;
+	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
+
+	ret = mipi_toshiba_device_register(&pinfo, MIPI_DSI_PRIM,
+						MIPI_DSI_PANEL_WVGA_PT);
+	if (ret)
+		printk(KERN_ERR "%s: failed to register device!\n", __func__);
+
+	return ret;
+}
+
+module_init(mipi_video_toshiba_wvga_pt_init);
diff --git a/drivers/video/msm/msm_dss_io_7x27a.c b/drivers/video/msm/msm_dss_io_7x27a.c
new file mode 100644
index 0000000..8e1959a
--- /dev/null
+++ b/drivers/video/msm/msm_dss_io_7x27a.c
@@ -0,0 +1,390 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+
+/* multimedia sub system sfpb */
+char *mmss_sfpb_base;
+void  __iomem *periph_base;
+
+int mipi_dsi_clk_on;
+static struct dsi_clk_desc dsicore_clk;
+static struct dsi_clk_desc dsi_pclk;
+
+static struct clk *dsi_byte_div_clk;
+static struct clk *dsi_esc_clk;
+static struct clk *dsi_pixel_clk;
+static struct clk *dsi_clk;
+static struct clk *dsi_ref_clk;
+static struct clk *mdp_dsi_pclk;
+static struct clk *ahb_m_clk;
+static struct clk *ahb_s_clk;
+
+void mipi_dsi_clk_init(struct device *dev)
+{
+	dsi_esc_clk = clk_get(NULL, "dsi_esc_clk");
+	if (IS_ERR(dsi_esc_clk)) {
+		printk(KERN_ERR "can't find dsi_esc_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_byte_div_clk = clk_get(NULL, "dsi_byte_clk");
+	if (IS_ERR(dsi_byte_div_clk)) {
+		pr_err("can't find dsi_byte_div_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_pixel_clk = clk_get(NULL, "dsi_pixel_clk");
+	if (IS_ERR(dsi_pixel_clk)) {
+		pr_err("can't find dsi_pixel_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_clk = clk_get(NULL, "dsi_clk");
+	if (IS_ERR(dsi_clk)) {
+		pr_err("can't find dsi_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_ref_clk = clk_get(NULL, "dsi_ref_clk");
+	if (IS_ERR(dsi_ref_clk)) {
+		pr_err("can't find dsi_ref_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	mdp_dsi_pclk = clk_get(NULL, "mdp_dsi_pclk");
+	if (IS_ERR(mdp_dsi_pclk)) {
+		pr_err("can't find mdp_dsi_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	ahb_m_clk = clk_get(NULL, "ahb_m_clk");
+	if (IS_ERR(ahb_m_clk)) {
+		pr_err("can't find ahb_m_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	ahb_s_clk = clk_get(NULL, "ahb_s_clk");
+	if (IS_ERR(ahb_s_clk)) {
+		pr_err("can't find ahb_s_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	return;
+
+mipi_dsi_clk_err:
+	mipi_dsi_clk_deinit(NULL);
+
+}
+
+void mipi_dsi_clk_deinit(struct device *dev)
+{
+	clk_put(mdp_dsi_pclk);
+	clk_put(ahb_m_clk);
+	clk_put(ahb_s_clk);
+	clk_put(dsi_ref_clk);
+	clk_put(dsi_byte_div_clk);
+	clk_put(dsi_esc_clk);
+}
+
+static void mipi_dsi_clk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	uint32 data;
+	if (clk_en) {
+		data = (clk->pre_div_func) << 24 |
+			(clk->m) << 16 | (clk->n) << 8 |
+			((clk->d) * 2);
+		clk_set_rate(dsi_clk, data);
+		clk_enable(dsi_clk);
+	} else
+		clk_disable(dsi_clk);
+}
+
+static void mipi_dsi_pclk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	uint32 data;
+
+	if (clk_en) {
+		data = (clk->pre_div_func) << 24 | (clk->m) << 16
+			| (clk->n) << 8 | ((clk->d) * 2);
+		if ((clk_set_rate(dsi_pixel_clk, data)) < 0)
+			pr_err("%s: pixel clk set rate failed\n", __func__);
+		if (clk_enable(dsi_pixel_clk))
+			pr_err("%s clk enable failed\n", __func__);
+	} else {
+		clk_disable(dsi_pixel_clk);
+	}
+}
+
+static void mipi_dsi_calibration(void)
+{
+	MIPI_OUTP(MIPI_DSI_BASE + 0xf8, 0x00a105a1); /* cal_hw_ctrl */
+}
+
+#define PREF_DIV_RATIO 19
+struct dsiphy_pll_divider_config pll_divider_config;
+
+int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
+			    uint32 *expected_dsi_pclk)
+{
+	u32 fb_divider, rate, vco;
+	u32 div_ratio = 0;
+	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
+	if (pll_divider_config.clk_rate == 0)
+		pll_divider_config.clk_rate = 454000000;
+
+	rate = pll_divider_config.clk_rate / 1000000; /* In Mhz */
+
+	if (rate < 125) {
+		vco = rate * 8;
+		div_ratio = 8;
+	} else if (rate < 250) {
+		vco = rate * 4;
+		div_ratio = 4;
+	} else if (rate < 500) {
+		vco = rate * 2;
+		div_ratio = 2;
+	} else {
+		vco = rate * 1;
+		div_ratio = 1;
+	}
+
+	/* find the mnd settings from mnd_table entry */
+	for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) {
+		if (((mnd_entry->lanes) == lanes) &&
+			((mnd_entry->bpp) == bpp))
+			break;
+	}
+
+	if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) {
+		pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n",
+			__func__, lanes, bpp);
+		return -EINVAL;
+	}
+	fb_divider = ((vco * PREF_DIV_RATIO) / 27);
+	pll_divider_config.fb_divider = fb_divider;
+	pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO;
+	pll_divider_config.bit_clk_divider = div_ratio;
+	pll_divider_config.byte_clk_divider =
+			pll_divider_config.bit_clk_divider * 8;
+	pll_divider_config.dsi_clk_divider =
+			(mnd_entry->dsiclk_div) * div_ratio;
+
+	if ((mnd_entry->dsiclk_d == 0)
+		|| (mnd_entry->dsiclk_m == 1)) {
+		dsicore_clk.mnd_mode = 0;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.pre_div_func = (mnd_entry->dsiclk_n - 1);
+	} else {
+		dsicore_clk.mnd_mode = 2;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.m = mnd_entry->dsiclk_m;
+		dsicore_clk.n = mnd_entry->dsiclk_n;
+		dsicore_clk.d = mnd_entry->dsiclk_d;
+	}
+
+	if ((mnd_entry->pclk_d == 0)
+		|| (mnd_entry->pclk_m == 1)) {
+		dsi_pclk.mnd_mode = 0;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1);
+		*expected_dsi_pclk = ((vco * 1000000) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	} else {
+		dsi_pclk.mnd_mode = 2;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.m = mnd_entry->pclk_m;
+		dsi_pclk.n = mnd_entry->pclk_n;
+		dsi_pclk.d = mnd_entry->pclk_d;
+		*expected_dsi_pclk = ((vco * 1000000 * dsi_pclk.m) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	}
+	dsicore_clk.m = 1;
+	dsicore_clk.n = 1;
+	dsicore_clk.d = 2;
+	dsicore_clk.pre_div_func = 0;
+
+	dsi_pclk.m = 1;
+	dsi_pclk.n = 3;
+	dsi_pclk.d = 2;
+	dsi_pclk.pre_div_func = 0;
+	return 0;
+}
+
+void mipi_dsi_phy_init(int panel_ndx, struct msm_panel_info const *panel_info,
+	int target_type)
+{
+	struct mipi_dsi_phy_ctrl *pd;
+	int i, off;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
+	msleep(100);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2cc, 0x0003);/* regulator_ctrl_0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d0, 0x0001);/* regulator_ctrl_1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d4, 0x0001);/* regulator_ctrl_2 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d8, 0x0000);/* regulator_ctrl_3 */
+#ifdef DSI_POWER
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2dc, 0x0100);/* regulator_ctrl_4 */
+#endif
+
+	pd = (panel_info->mipi).dsi_phy_db;
+
+	off = 0x02cc;	/* regulator ctrl 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->regulator[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0260;	/* phy timig ctrl 0 */
+	for (i = 0; i < 11; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->timing[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0290;	/* ctrl 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->ctrl[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x02a0;	/* strength 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->strength[i]);
+		wmb();
+		off += 4;
+	}
+
+	mipi_dsi_calibration();
+
+	off = 0x0204;	/* pll ctrl 1, skip 0 */
+	for (i = 1; i < 21; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->pll[i]);
+		wmb();
+		off += 4;
+	}
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x100, 0x67);
+
+	/* pll ctrl 0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, pd->pll[0]);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, (pd->pll[0] | 0x01));
+}
+
+void mipi_dsi_clk_enable(void)
+{
+	unsigned data = 0;
+
+	if (mipi_dsi_clk_on) {
+		pr_err("%s: mipi_dsi_clk already ON\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 1;
+
+	clk_enable(dsi_ref_clk);
+	clk_set_rate(dsi_byte_div_clk, data);
+	clk_set_rate(dsi_esc_clk, data);
+	clk_enable(mdp_dsi_pclk);
+	clk_enable(ahb_m_clk);
+	clk_enable(ahb_s_clk);
+	clk_enable(dsi_byte_div_clk);
+	clk_enable(dsi_esc_clk);
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 1);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 1);
+}
+
+void mipi_dsi_clk_disable(void)
+{
+	if (mipi_dsi_clk_on == 0) {
+		pr_err("%s: mipi_dsi_clk already OFF\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 0;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 0);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 0);
+	clk_disable(dsi_esc_clk);
+	clk_disable(dsi_byte_div_clk);
+	clk_disable(mdp_dsi_pclk);
+	clk_disable(ahb_m_clk);
+	clk_disable(ahb_s_clk);
+	clk_disable(dsi_ref_clk);
+}
+
+void mipi_dsi_phy_ctrl(int on)
+{
+	if (on) {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x050);
+
+		/* DSIPHY_TPA_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0258, 0x00f);
+
+		/* DSIPHY_TPA_CTRL_2 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x025c, 0x000);
+	} else {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x05f);
+
+		/* DSIPHY_TPA_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0258, 0x08f);
+
+		/* DSIPHY_TPA_CTRL_2 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x025c, 0x001);
+
+		/* DSIPHY_REGULATOR_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x02cc, 0x02);
+
+		/* DSIPHY_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0290, 0x00);
+
+		/* DSIPHY_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0294, 0x7f);
+
+		/* DSIPHY_PLL_CTRL_0, disbale dsi pll */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0200, 0x40);
+
+		/* disbale dsi clk */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+	}
+}
+
+#ifdef CONFIG_FB_MSM_MDP303
+void update_lane_config(struct msm_panel_info *pinfo)
+{
+	struct mipi_dsi_phy_ctrl *pd;
+
+	pd = (pinfo->mipi).dsi_phy_db;
+	pinfo->mipi.data_lane1 = FALSE;
+	pd->pll[10] |= 0x08;
+
+	pinfo->yres = 320;
+	pinfo->lcdc.h_back_porch = 15;
+	pinfo->lcdc.h_front_porch = 21;
+	pinfo->lcdc.h_pulse_width = 5;
+	pinfo->lcdc.v_back_porch = 50;
+	pinfo->lcdc.v_front_porch = 101;
+	pinfo->lcdc.v_pulse_width = 50;
+}
+#endif
diff --git a/drivers/video/msm/msm_dss_io_8960.c b/drivers/video/msm/msm_dss_io_8960.c
new file mode 100644
index 0000000..ce9fb28
--- /dev/null
+++ b/drivers/video/msm/msm_dss_io_8960.c
@@ -0,0 +1,715 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "hdmi_msm.h"
+#include <mach/msm_iomap.h>
+
+/* HDMI PHY macros */
+#define HDMI_PHY_REG_0                   (0x00000400)
+#define HDMI_PHY_REG_1                   (0x00000404)
+#define HDMI_PHY_REG_2                   (0x00000408)
+#define HDMI_PHY_REG_3                   (0x0000040c)
+#define HDMI_PHY_REG_4                   (0x00000410)
+#define HDMI_PHY_REG_5                   (0x00000414)
+#define HDMI_PHY_REG_6                   (0x00000418)
+#define HDMI_PHY_REG_7                   (0x0000041c)
+#define HDMI_PHY_REG_8                   (0x00000420)
+#define HDMI_PHY_REG_9                   (0x00000424)
+#define HDMI_PHY_REG_10                  (0x00000428)
+#define HDMI_PHY_REG_11                  (0x0000042c)
+#define HDMI_PHY_REG_12                  (0x00000430)
+#define HDMI_PHY_REG_BIST_CFG            (0x00000434)
+#define HDMI_PHY_DEBUG_BUS_SEL           (0x00000438)
+#define HDMI_PHY_REG_MISC0               (0x0000043c)
+#define HDMI_PHY_REG_13                  (0x00000440)
+#define HDMI_PHY_REG_14                  (0x00000444)
+#define HDMI_PHY_REG_15                  (0x00000448)
+#define HDMI_PHY_CTRL			         (0x000002D4)
+
+/* HDMI PHY/PLL bit field macros */
+#define HDMI_PHY_PLL_STATUS0             (0x00000598)
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+#define PWRDN_B BIT(7)
+
+/* multimedia sub system clock control */
+char *mmss_cc_base = MSM_MMSS_CLK_CTL_BASE;
+/* multimedia sub system sfpb */
+char *mmss_sfpb_base;
+void  __iomem *periph_base;
+
+int mipi_dsi_clk_on;
+static struct dsi_clk_desc dsicore_clk;
+static struct dsi_clk_desc dsi_pclk;
+
+static struct clk *dsi_byte_div_clk;
+static struct clk *dsi_esc_clk;
+static struct clk *dsi_m_pclk;
+static struct clk *dsi_s_pclk;
+
+static struct clk *amp_pclk;
+
+void mipi_dsi_clk_init(struct device *dev)
+{
+	amp_pclk = clk_get(NULL, "amp_pclk");
+	if (IS_ERR(amp_pclk)) {
+		pr_err("can't find amp_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_m_pclk = clk_get(dev, "dsi_m_pclk");
+	if (IS_ERR(dsi_m_pclk)) {
+		pr_err("can't find dsi_m_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_s_pclk = clk_get(dev, "dsi_s_pclk");
+	if (IS_ERR(dsi_s_pclk)) {
+		pr_err("can't find dsi_s_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_byte_div_clk = clk_get(dev, "dsi_byte_div_clk");
+	if (IS_ERR(dsi_byte_div_clk)) {
+		pr_err("can't find dsi_byte_div_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_esc_clk = clk_get(dev, "dsi_esc_clk");
+	if (IS_ERR(dsi_esc_clk)) {
+		printk(KERN_ERR "can't find dsi_esc_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	return;
+
+mipi_dsi_clk_err:
+	mipi_dsi_clk_deinit(NULL);
+}
+
+void mipi_dsi_clk_deinit(struct device *dev)
+{
+	clk_put(amp_pclk);
+	clk_put(dsi_m_pclk);
+	clk_put(dsi_s_pclk);
+	clk_put(dsi_byte_div_clk);
+	clk_put(dsi_esc_clk);
+}
+
+static void mipi_dsi_clk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	char	*cc, *ns, *md;
+	int	pmxo_sel = 0;
+	char	mnd_en = 1, root_en = 1;
+	uint32	data, val;
+
+	cc = mmss_cc_base + 0x004c;
+	md = mmss_cc_base + 0x0050;
+	ns = mmss_cc_base + 0x0054;
+
+	if (clk_en) {
+		if (clk->mnd_mode == 0) {
+			data  = clk->pre_div_func << 14;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+			MIPI_OUTP_SECURE(cc, ((pmxo_sel << 8)
+						| (clk->mnd_mode << 6)
+						| (root_en << 2) | clk_en));
+		} else {
+			val = clk->d * 2;
+			data = (~val) & 0x0ff;
+			data |= clk->m << 8;
+			MIPI_OUTP_SECURE(md, data);
+
+			val = clk->n - clk->m;
+			data = (~val) & 0x0ff;
+			data <<= 24;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+
+			MIPI_OUTP_SECURE(cc, ((pmxo_sel << 8)
+					      | (clk->mnd_mode << 6)
+					      | (mnd_en << 5)
+					      | (root_en << 2) | clk_en));
+		}
+	} else
+		MIPI_OUTP_SECURE(cc, 0);
+
+	wmb();
+}
+
+static void mipi_dsi_sfpb_cfg(void)
+{
+	char *sfpb;
+	int data;
+
+	sfpb = mmss_sfpb_base + 0x058;
+
+	data = MIPI_INP(sfpb);
+	data |= 0x01800;
+	MIPI_OUTP(sfpb, data);
+	wmb();
+}
+
+static void mipi_dsi_pclk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	char	*cc, *ns, *md;
+	char	mnd_en = 1, root_en = 1;
+	uint32	data, val;
+
+	cc = mmss_cc_base + 0x0130;
+	md = mmss_cc_base + 0x0134;
+	ns = mmss_cc_base + 0x0138;
+
+	if (clk_en) {
+		if (clk->mnd_mode == 0) {
+			data  = clk->pre_div_func << 12;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+			MIPI_OUTP_SECURE(cc, ((clk->mnd_mode << 6)
+					      | (root_en << 2) | clk_en));
+		} else {
+			val = clk->d * 2;
+			data = (~val) & 0x0ff;
+			data |= clk->m << 8;
+			MIPI_OUTP_SECURE(md, data);
+
+			val = clk->n - clk->m;
+			data = (~val) & 0x0ff;
+			data <<= 24;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+
+			MIPI_OUTP_SECURE(cc, ((clk->mnd_mode << 6)
+					      | (mnd_en << 5)
+					      | (root_en << 2) | clk_en));
+		}
+	} else
+		MIPI_OUTP_SECURE(cc, 0);
+
+	wmb();
+}
+
+static void mipi_dsi_ahb_en(void)
+{
+	char	*ahb;
+
+	ahb = mmss_cc_base + 0x08;
+
+	pr_debug("%s: ahb=%x %x\n",
+		__func__, (int) ahb, MIPI_INP_SECURE(ahb));
+}
+
+static void mipi_dsi_calibration(void)
+{
+	int i = 0;
+	uint32 term_cnt = 5000;
+	int cal_busy = MIPI_INP(MIPI_DSI_BASE + 0x550);
+
+	/* DSI1_DSIPHY_REGULATOR_CAL_PWR_CFG */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0518, 0x01);
+
+	/* DSI1_DSIPHY_CAL_SW_CFG2 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0534, 0x0);
+	/* DSI1_DSIPHY_CAL_HW_CFG1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x053c, 0x5a);
+	/* DSI1_DSIPHY_CAL_HW_CFG3 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0544, 0x10);
+	/* DSI1_DSIPHY_CAL_HW_CFG4 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0548, 0x01);
+	/* DSI1_DSIPHY_CAL_HW_CFG0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0538, 0x01);
+
+	/* DSI1_DSIPHY_CAL_HW_TRIGGER */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0528, 0x01);
+	usleep_range(5000, 5000);
+	/* DSI1_DSIPHY_CAL_HW_TRIGGER */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0528, 0x00);
+
+	cal_busy = MIPI_INP(MIPI_DSI_BASE + 0x550);
+	while (cal_busy & 0x10) {
+		i++;
+		if (i > term_cnt) {
+			pr_err("DSI1 PHY REGULATOR NOT READY,"
+				"exceeded polling TIMEOUT!\n");
+			break;
+		}
+		cal_busy = MIPI_INP(MIPI_DSI_BASE + 0x550);
+	}
+}
+
+void mipi_dsi_phy_rdy_poll(void)
+{
+	uint32 phy_pll_busy;
+	uint32 i = 0;
+	uint32 term_cnt = 0xFFFFFF;
+
+	phy_pll_busy = MIPI_INP(MIPI_DSI_BASE + 0x280);
+	while (!(phy_pll_busy & 0x1)) {
+		i++;
+		if (i > term_cnt) {
+			pr_err("DSI1 PHY NOT READY, exceeded polling TIMEOUT!\n");
+			break;
+		}
+		phy_pll_busy = MIPI_INP(MIPI_DSI_BASE + 0x280);
+	}
+}
+
+#define PREF_DIV_RATIO 27
+struct dsiphy_pll_divider_config pll_divider_config;
+
+int mipi_dsi_phy_pll_config(u32 clk_rate)
+{
+	struct dsiphy_pll_divider_config *dividers;
+	u32 fb_divider, tmp;
+	dividers = &pll_divider_config;
+
+	/* DSIPHY_PLL_CTRL_x:    1     2     3     8     9     10 */
+	/* masks               0xff  0x07  0x3f  0x0f  0xff  0xff */
+
+	/* DSIPHY_PLL_CTRL_1 */
+	fb_divider = ((dividers->fb_divider) / 2) - 1;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x204, fb_divider & 0xff);
+
+	/* DSIPHY_PLL_CTRL_2 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x208);
+	tmp &= ~0x07;
+	tmp |= (fb_divider >> 8) & 0x07;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x208, tmp);
+
+	/* DSIPHY_PLL_CTRL_3 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x20c);
+	tmp &= ~0x3f;
+	tmp |= (dividers->ref_divider_ratio - 1) & 0x3f;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x20c, tmp);
+
+	/* DSIPHY_PLL_CTRL_8 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x220);
+	tmp &= ~0x0f;
+	tmp |= (dividers->bit_clk_divider - 1) & 0x0f;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x220, tmp);
+
+	/* DSIPHY_PLL_CTRL_9 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x224, (dividers->byte_clk_divider - 1));
+
+	/* DSIPHY_PLL_CTRL_10 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x228, (dividers->dsi_clk_divider - 1));
+
+	return 0;
+}
+
+int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
+			    uint32 *expected_dsi_pclk)
+{
+	u32 fb_divider, rate, vco;
+	u32 div_ratio = 0;
+	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
+	if (pll_divider_config.clk_rate == 0)
+		pll_divider_config.clk_rate = 454000000;
+
+	rate = pll_divider_config.clk_rate / 1000000; /* In Mhz */
+
+	if (rate < 125) {
+		vco = rate * 8;
+		div_ratio = 8;
+	} else if (rate < 250) {
+		vco = rate * 4;
+		div_ratio = 4;
+	} else if (rate < 500) {
+		vco = rate * 2;
+		div_ratio = 2;
+	} else {
+		vco = rate * 1;
+		div_ratio = 1;
+	}
+
+	/* find the mnd settings from mnd_table entry */
+	for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) {
+		if (((mnd_entry->lanes) == lanes) &&
+			((mnd_entry->bpp) == bpp))
+			break;
+	}
+
+	if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) {
+		pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n",
+			__func__, lanes, bpp);
+		return -EINVAL;
+	}
+	fb_divider = ((vco * PREF_DIV_RATIO) / 27);
+	pll_divider_config.fb_divider = fb_divider;
+	pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO;
+	pll_divider_config.bit_clk_divider = div_ratio;
+	pll_divider_config.byte_clk_divider =
+			pll_divider_config.bit_clk_divider * 8;
+	pll_divider_config.dsi_clk_divider =
+			(mnd_entry->dsiclk_div) * div_ratio;
+
+	if (mnd_entry->dsiclk_d == 0) {
+		dsicore_clk.mnd_mode = 0;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.pre_div_func = (mnd_entry->dsiclk_n - 1);
+	} else {
+		dsicore_clk.mnd_mode = 2;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.m = mnd_entry->dsiclk_m;
+		dsicore_clk.n = mnd_entry->dsiclk_n;
+		dsicore_clk.d = mnd_entry->dsiclk_d;
+	}
+
+	if ((mnd_entry->pclk_d == 0)
+		|| (mnd_entry->pclk_m == 1)) {
+		dsi_pclk.mnd_mode = 0;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1);
+		*expected_dsi_pclk = ((vco * 1000000) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	} else {
+		dsi_pclk.mnd_mode = 2;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.m = mnd_entry->pclk_m;
+		dsi_pclk.n = mnd_entry->pclk_n;
+		dsi_pclk.d = mnd_entry->pclk_d;
+		*expected_dsi_pclk = ((vco * 1000000 * dsi_pclk.m) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	}
+	return 0;
+}
+
+static void mipi_dsi_configure_serdes(void)
+{
+	void __iomem *cc;
+
+	/* PHY registers programemd thru S2P interface */
+	if (periph_base) {
+		MIPI_OUTP(periph_base + 0x2c, 0x000000b6);
+		MIPI_OUTP(periph_base + 0x2c, 0x000001b5);
+		MIPI_OUTP(periph_base + 0x2c, 0x000001b4);
+		MIPI_OUTP(periph_base + 0x2c, 0x000003b3);
+		MIPI_OUTP(periph_base + 0x2c, 0x000003a2);
+		MIPI_OUTP(periph_base + 0x2c, 0x000002a1);
+		MIPI_OUTP(periph_base + 0x2c, 0x000008a0);
+		MIPI_OUTP(periph_base + 0x2c, 0x00000d9f);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000109e);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000209d);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000109c);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000079a);
+		MIPI_OUTP(periph_base + 0x2c, 0x00000c99);
+		MIPI_OUTP(periph_base + 0x2c, 0x00002298);
+		MIPI_OUTP(periph_base + 0x2c, 0x000000a7);
+		MIPI_OUTP(periph_base + 0x2c, 0x000000a6);
+		MIPI_OUTP(periph_base + 0x2c, 0x000000a5);
+		MIPI_OUTP(periph_base + 0x2c, 0x00007fa4);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000eea8);
+		MIPI_OUTP(periph_base + 0x2c, 0x000006aa);
+		MIPI_OUTP(periph_base + 0x2c, 0x00002095);
+		MIPI_OUTP(periph_base + 0x2c, 0x00000493);
+		MIPI_OUTP(periph_base + 0x2c, 0x00001092);
+		MIPI_OUTP(periph_base + 0x2c, 0x00000691);
+		MIPI_OUTP(periph_base + 0x2c, 0x00005490);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000038d);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000148c);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000058b);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000078a);
+		MIPI_OUTP(periph_base + 0x2c, 0x00001f89);
+		MIPI_OUTP(periph_base + 0x2c, 0x00003388);
+		MIPI_OUTP(periph_base + 0x2c, 0x00006387);
+		MIPI_OUTP(periph_base + 0x2c, 0x00004886);
+		MIPI_OUTP(periph_base + 0x2c, 0x00005085);
+		MIPI_OUTP(periph_base + 0x2c, 0x00000084);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000da83);
+		MIPI_OUTP(periph_base + 0x2c, 0x0000b182);
+		MIPI_OUTP(periph_base + 0x2c, 0x00002f81);
+		MIPI_OUTP(periph_base + 0x2c, 0x00004080);
+		MIPI_OUTP(periph_base + 0x2c, 0x00004180);
+		MIPI_OUTP(periph_base + 0x2c, 0x000006aa);
+	}
+
+	cc = MIPI_DSI_BASE + 0x0130;
+	MIPI_OUTP(cc, 0x806c11c8);
+	MIPI_OUTP(cc, 0x804c11c8);
+	MIPI_OUTP(cc, 0x806d0080);
+	MIPI_OUTP(cc, 0x804d0080);
+	MIPI_OUTP(cc, 0x00000000);
+	MIPI_OUTP(cc, 0x807b1597);
+	MIPI_OUTP(cc, 0x805b1597);
+	MIPI_OUTP(cc, 0x807c0080);
+	MIPI_OUTP(cc, 0x805c0080);
+	MIPI_OUTP(cc, 0x00000000);
+	MIPI_OUTP(cc, 0x807911c8);
+	MIPI_OUTP(cc, 0x805911c8);
+	MIPI_OUTP(cc, 0x807a0080);
+	MIPI_OUTP(cc, 0x805a0080);
+	MIPI_OUTP(cc, 0x00000000);
+	MIPI_OUTP(cc, 0x80721555);
+	MIPI_OUTP(cc, 0x80521555);
+	MIPI_OUTP(cc, 0x80730000);
+	MIPI_OUTP(cc, 0x80530000);
+	MIPI_OUTP(cc, 0x00000000);
+}
+
+void mipi_dsi_phy_init(int panel_ndx, struct msm_panel_info const *panel_info,
+	int target_type)
+{
+	struct mipi_dsi_phy_ctrl *pd;
+	int i, off;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
+	msleep(100);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x500, 0x0003);/* regulator_ctrl_0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x504, 0x0001);/* regulator_ctrl_1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x508, 0x0001);/* regulator_ctrl_2 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x50c, 0x0000);/* regulator_ctrl_3 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x510, 0x0100);/* regulator_ctrl_4 */
+
+	pd = (panel_info->mipi).dsi_phy_db;
+
+	off = 0x0480;	/* strength 0 - 2 */
+	for (i = 0; i < 3; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->strength[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0470;	/* ctrl 0 - 3 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->ctrl[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0500;	/* regulator ctrl 0 - 4 */
+	for (i = 0; i < 5; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->regulator[i]);
+		wmb();
+		off += 4;
+	}
+	mipi_dsi_calibration();
+
+	off = 0x0204;	/* pll ctrl 1 - 19, skip 0 */
+	for (i = 1; i < 20; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->pll[i]);
+		wmb();
+		off += 4;
+	}
+
+	if (panel_info)
+		mipi_dsi_phy_pll_config(panel_info->clk_rate);
+
+	/* pll ctrl 0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, pd->pll[0]);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, (pd->pll[0] | 0x01));
+
+	mipi_dsi_phy_rdy_poll();
+
+	off = 0x0440;	/* phy timig ctrl 0 - 11 */
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->timing[i]);
+		wmb();
+		off += 4;
+	}
+
+	if (target_type == 1)
+		mipi_dsi_configure_serdes();
+}
+
+void mipi_dsi_clk_enable(void)
+{
+	if (mipi_dsi_clk_on) {
+		pr_err("%s: mipi_dsi_clk already ON\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 1;
+
+	clk_enable(amp_pclk); /* clock for AHB-master to AXI */
+	clk_enable(dsi_m_pclk);
+	clk_enable(dsi_s_pclk);
+	if (clk_set_rate(dsi_byte_div_clk, 1) < 0)	/* divided by 1 */
+		pr_err("%s: dsi_byte_div_clk - "
+			"clk_set_rate failed\n", __func__);
+	if (clk_set_rate(dsi_esc_clk, 2) < 0) /* divided by 2 */
+		pr_err("%s: dsi_esc_clk - "
+			"clk_set_rate failed\n", __func__);
+	clk_enable(dsi_byte_div_clk);
+	clk_enable(dsi_esc_clk);
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 1);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 1);
+	mipi_dsi_ahb_en();
+	mipi_dsi_sfpb_cfg();
+}
+
+void mipi_dsi_clk_disable(void)
+{
+	if (mipi_dsi_clk_on == 0) {
+		pr_err("%s: mipi_dsi_clk already OFF\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 0;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 0);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 0);
+	clk_disable(dsi_esc_clk);
+	clk_disable(dsi_byte_div_clk);
+	clk_disable(dsi_m_pclk);
+	clk_disable(dsi_s_pclk);
+	clk_disable(amp_pclk); /* clock for AHB-master to AXI */
+}
+
+void mipi_dsi_phy_ctrl(int on)
+{
+	if (on) {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x050);
+	} else {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x05f);
+
+		/* DSIPHY_REGULATOR_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0500, 0x02);
+
+		/* DSIPHY_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0470, 0x00);
+
+		/* DSIPHY_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0474, 0x7f);
+
+		/* DSIPHY_PLL_CTRL_0, disbale dsi pll */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0200, 0x40);
+
+		/* disbale dsi clk */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+	}
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+void hdmi_phy_reset(void)
+{
+	unsigned int phy_reset_polarity = 0x0;
+	unsigned int pll_reset_polarity = 0x0;
+
+	unsigned int val = HDMI_INP_ND(HDMI_PHY_CTRL);
+
+	phy_reset_polarity = val >> 3 & 0x1;
+	pll_reset_polarity = val >> 1 & 0x1;
+
+	if (phy_reset_polarity == 0)
+		HDMI_OUTP(HDMI_PHY_CTRL, val | SW_RESET);
+	else
+		HDMI_OUTP(HDMI_PHY_CTRL, val & (~SW_RESET));
+
+	if (pll_reset_polarity == 0)
+		HDMI_OUTP(HDMI_PHY_CTRL, val | SW_RESET_PLL);
+	else
+		HDMI_OUTP(HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+
+	msleep(100);
+
+	if (phy_reset_polarity == 0)
+		HDMI_OUTP(HDMI_PHY_CTRL, val & (~SW_RESET));
+	else
+		HDMI_OUTP(HDMI_PHY_CTRL, val | SW_RESET);
+
+	if (pll_reset_polarity == 0)
+		HDMI_OUTP(HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+	else
+		HDMI_OUTP(HDMI_PHY_CTRL, val | SW_RESET_PLL);
+}
+
+void hdmi_msm_init_phy(int video_format)
+{
+	uint32 offset;
+	pr_err("Video format is : %u\n", video_format);
+
+	HDMI_OUTP(HDMI_PHY_REG_0, 0x1B);
+	HDMI_OUTP(HDMI_PHY_REG_1, 0xf2);
+	HDMI_OUTP(HDMI_PHY_REG_2, 0x7F);
+	HDMI_OUTP(HDMI_PHY_REG_2, 0x3F);
+	HDMI_OUTP(HDMI_PHY_REG_2, 0x1F);
+
+	offset = HDMI_PHY_REG_4;
+	while (offset <= HDMI_PHY_REG_11) {
+		HDMI_OUTP(offset, 0x0);
+		offset += 0x4;
+	}
+
+	HDMI_OUTP(HDMI_PHY_REG_12, HDMI_INP(HDMI_PHY_REG_12) | PWRDN_B);
+	msleep(100);
+
+	HDMI_OUTP(HDMI_PHY_REG_3, 0x20);
+	HDMI_OUTP(HDMI_PHY_REG_12, 0x81);
+	HDMI_OUTP(HDMI_PHY_REG_2, 0x81);
+}
+
+void hdmi_msm_powerdown_phy(void)
+{
+	/* Power down PHY */
+	HDMI_OUTP_ND(HDMI_PHY_REG_2, 0x7F); /*0b01111111*/
+}
+
+void hdmi_frame_ctrl_cfg(const struct hdmi_disp_mode_timing_type *timing)
+{
+	/*  0x02C8 HDMI_FRAME_CTRL
+	 *  31 INTERLACED_EN   Interlaced or progressive enable bit
+	 *    0: Frame in progressive
+	 *    1: Frame is interlaced
+	 *  29 HSYNC_HDMI_POL  HSYNC polarity fed to HDMI core
+	 *     0: Active Hi Hsync, detect the rising edge of hsync
+	 *     1: Active lo Hsync, Detect the falling edge of Hsync
+	 *  28 VSYNC_HDMI_POL  VSYNC polarity fed to HDMI core
+	 *     0: Active Hi Vsync, detect the rising edge of vsync
+	 *     1: Active Lo Vsync, Detect the falling edge of Vsync
+	 *  12 RGB_MUX_SEL     ALPHA mdp4 input is RGB, mdp4 input is BGR
+	 */
+	HDMI_OUTP(0x02C8,
+		  ((timing->interlaced << 31) & 0x80000000)
+		| ((timing->active_low_h << 29) & 0x20000000)
+		| ((timing->active_low_v << 28) & 0x10000000));
+}
+
+void hdmi_msm_phy_status_poll(void)
+{
+	unsigned int lock_det, phy_ready;
+	lock_det = 0x1 & HDMI_INP_ND(HDMI_PHY_PLL_STATUS0);
+	if (lock_det) {
+		pr_debug("HDMI Phy PLL Lock Detect Bit is set\n");
+	} else {
+		pr_debug("HDMI Phy Lock Detect Bit is not set,"
+			 "waiting for lock detection\n");
+		do {
+			lock_det = 0x1 & \
+				HDMI_INP_ND(HDMI_PHY_PLL_STATUS0);
+		} while (!lock_det);
+	}
+
+	phy_ready = 0x1 & HDMI_INP_ND(HDMI_PHY_REG_15);
+	if (phy_ready) {
+		pr_debug("HDMI Phy Status bit is set and ready\n");
+	} else {
+		pr_debug("HDMI Phy Status bit is not set,"
+			"waiting for ready status\n");
+		do {
+			phy_ready = 0x1 & HDMI_INP_ND(HDMI_PHY_REG_15);
+		} while (!phy_ready);
+	}
+}
+
+#endif
diff --git a/drivers/video/msm/msm_dss_io_8x60.c b/drivers/video/msm/msm_dss_io_8x60.c
new file mode 100644
index 0000000..e38170f
--- /dev/null
+++ b/drivers/video/msm/msm_dss_io_8x60.c
@@ -0,0 +1,613 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include "msm_fb.h"
+#include "mipi_dsi.h"
+#include "hdmi_msm.h"
+#include <mach/msm_iomap.h>
+
+/* multimedia sub system clock control */
+char *mmss_cc_base = MSM_MMSS_CLK_CTL_BASE;
+/* multimedia sub system sfpb */
+char *mmss_sfpb_base;
+void  __iomem *periph_base;
+
+int mipi_dsi_clk_on;
+static struct dsi_clk_desc dsicore_clk;
+static struct dsi_clk_desc dsi_pclk;
+
+static struct clk *dsi_byte_div_clk;
+static struct clk *dsi_esc_clk;
+static struct clk *dsi_m_pclk;
+static struct clk *dsi_s_pclk;
+
+static struct clk *amp_pclk;
+
+void mipi_dsi_clk_init(struct device *dev)
+{
+	amp_pclk = clk_get(NULL, "amp_pclk");
+	if (IS_ERR(amp_pclk)) {
+		pr_err("can't find amp_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_m_pclk = clk_get(NULL, "dsi_m_pclk");
+	if (IS_ERR(dsi_m_pclk)) {
+		pr_err("can't find dsi_m_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_s_pclk = clk_get(NULL, "dsi_s_pclk");
+	if (IS_ERR(dsi_s_pclk)) {
+		pr_err("can't find dsi_s_pclk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_byte_div_clk = clk_get(NULL, "dsi_byte_div_clk");
+	if (IS_ERR(dsi_byte_div_clk)) {
+		pr_err("can't find dsi_byte_div_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	dsi_esc_clk = clk_get(NULL, "dsi_esc_clk");
+	if (IS_ERR(dsi_esc_clk)) {
+		printk(KERN_ERR "can't find dsi_esc_clk\n");
+		goto mipi_dsi_clk_err;
+	}
+
+	return;
+
+mipi_dsi_clk_err:
+	mipi_dsi_clk_deinit(NULL);
+}
+
+void mipi_dsi_clk_deinit(struct device *dev)
+{
+	clk_put(amp_pclk);
+	clk_put(dsi_m_pclk);
+	clk_put(dsi_s_pclk);
+	clk_put(dsi_byte_div_clk);
+	clk_put(dsi_esc_clk);
+}
+
+static void mipi_dsi_clk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	char	*cc, *ns, *md;
+	int	pmxo_sel = 0;
+	char	mnd_en = 1, root_en = 1;
+	uint32	data, val;
+
+	cc = mmss_cc_base + 0x004c;
+	md = mmss_cc_base + 0x0050;
+	ns = mmss_cc_base + 0x0054;
+
+	if (clk_en) {
+		if (clk->mnd_mode == 0) {
+			data  = clk->pre_div_func << 14;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+			MIPI_OUTP_SECURE(cc, ((pmxo_sel << 8)
+						| (clk->mnd_mode << 6)
+						| (root_en << 2) | clk_en));
+		} else {
+			val = clk->d * 2;
+			data = (~val) & 0x0ff;
+			data |= clk->m << 8;
+			MIPI_OUTP_SECURE(md, data);
+
+			val = clk->n - clk->m;
+			data = (~val) & 0x0ff;
+			data <<= 24;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+
+			MIPI_OUTP_SECURE(cc, ((pmxo_sel << 8)
+					      | (clk->mnd_mode << 6)
+					      | (mnd_en << 5)
+					      | (root_en << 2) | clk_en));
+		}
+
+	} else
+		MIPI_OUTP_SECURE(cc, 0);
+
+	wmb();
+}
+
+static void mipi_dsi_sfpb_cfg(void)
+{
+	char *sfpb;
+	int data;
+
+	sfpb = mmss_sfpb_base + 0x058;
+
+	data = MIPI_INP(sfpb);
+	data |= 0x01800;
+	MIPI_OUTP(sfpb, data);
+	wmb();
+}
+
+static void mipi_dsi_pclk_ctrl(struct dsi_clk_desc *clk, int clk_en)
+{
+	char	*cc, *ns, *md;
+	char	mnd_en = 1, root_en = 1;
+	uint32	data, val;
+
+	cc = mmss_cc_base + 0x0130;
+	md = mmss_cc_base + 0x0134;
+	ns = mmss_cc_base + 0x0138;
+
+	if (clk_en) {
+		if (clk->mnd_mode == 0) {
+			data  = clk->pre_div_func << 12;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+			MIPI_OUTP_SECURE(cc, ((clk->mnd_mode << 6)
+					      | (root_en << 2) | clk_en));
+		} else {
+			val = clk->d * 2;
+			data = (~val) & 0x0ff;
+			data |= clk->m << 8;
+			MIPI_OUTP_SECURE(md, data);
+
+			val = clk->n - clk->m;
+			data = (~val) & 0x0ff;
+			data <<= 24;
+			data |= clk->src;
+			MIPI_OUTP_SECURE(ns, data);
+
+			MIPI_OUTP_SECURE(cc, ((clk->mnd_mode << 6)
+					      | (mnd_en << 5)
+					      | (root_en << 2) | clk_en));
+		}
+
+	} else
+		MIPI_OUTP_SECURE(cc, 0);
+
+	wmb();
+}
+
+static void mipi_dsi_ahb_en(void)
+{
+	char	*ahb;
+
+	ahb = mmss_cc_base + 0x08;
+
+	pr_debug("%s: ahb=%x %x\n",
+		__func__, (int) ahb, MIPI_INP_SECURE(ahb));
+}
+
+static void mipi_dsi_calibration(void)
+{
+	uint32 data;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0xf4, 0x0000ff11); /* cal_ctrl */
+	MIPI_OUTP(MIPI_DSI_BASE + 0xf0, 0x01); /* cal_hw_trigger */
+
+	while (1) {
+		data = MIPI_INP(MIPI_DSI_BASE + 0xfc); /* cal_status */
+		if ((data & 0x10000000) == 0)
+			break;
+
+		udelay(10);
+	}
+}
+
+#define PREF_DIV_RATIO 27
+struct dsiphy_pll_divider_config pll_divider_config;
+
+
+int mipi_dsi_phy_pll_config(u32 clk_rate)
+{
+	struct dsiphy_pll_divider_config *dividers;
+	u32 fb_divider, tmp;
+	dividers = &pll_divider_config;
+
+	/* DSIPHY_PLL_CTRL_x:    1     2     3     8     9     10 */
+	/* masks               0xff  0x07  0x3f  0x0f  0xff  0xff */
+
+	/* DSIPHY_PLL_CTRL_1 */
+	fb_divider = ((dividers->fb_divider) / 2) - 1;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x204, fb_divider & 0xff);
+
+	/* DSIPHY_PLL_CTRL_2 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x208);
+	tmp &= ~0x07;
+	tmp |= (fb_divider >> 8) & 0x07;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x208, tmp);
+
+	/* DSIPHY_PLL_CTRL_3 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x20c);
+	tmp &= ~0x3f;
+	tmp |= (dividers->ref_divider_ratio - 1) & 0x3f;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x20c, tmp);
+
+	/* DSIPHY_PLL_CTRL_8 */
+	tmp = MIPI_INP(MIPI_DSI_BASE + 0x220);
+	tmp &= ~0x0f;
+	tmp |= (dividers->bit_clk_divider - 1) & 0x0f;
+	MIPI_OUTP(MIPI_DSI_BASE + 0x220, tmp);
+
+	/* DSIPHY_PLL_CTRL_9 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x224, (dividers->byte_clk_divider - 1));
+
+	/* DSIPHY_PLL_CTRL_10 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x228, (dividers->dsi_clk_divider - 1));
+
+	return 0;
+}
+
+int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
+			    uint32 *expected_dsi_pclk)
+{
+	u32 fb_divider, rate, vco;
+	u32 div_ratio = 0;
+	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
+	if (pll_divider_config.clk_rate == 0)
+		pll_divider_config.clk_rate = 454000000;
+
+	rate = pll_divider_config.clk_rate / 1000000; /* In Mhz */
+
+	if (rate < 125) {
+		vco = rate * 8;
+		div_ratio = 8;
+	} else if (rate < 250) {
+		vco = rate * 4;
+		div_ratio = 4;
+	} else if (rate < 500) {
+		vco = rate * 2;
+		div_ratio = 2;
+	} else {
+		vco = rate * 1;
+		div_ratio = 1;
+	}
+
+	/* find the mnd settings from mnd_table entry */
+	for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) {
+		if (((mnd_entry->lanes) == lanes) &&
+			((mnd_entry->bpp) == bpp))
+			break;
+	}
+
+	if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) {
+		pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n",
+			__func__, lanes, bpp);
+		return -EINVAL;
+	}
+	fb_divider = ((vco * PREF_DIV_RATIO) / 27);
+	pll_divider_config.fb_divider = fb_divider;
+	pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO;
+	pll_divider_config.bit_clk_divider = div_ratio;
+	pll_divider_config.byte_clk_divider =
+			pll_divider_config.bit_clk_divider * 8;
+	pll_divider_config.dsi_clk_divider =
+			(mnd_entry->dsiclk_div) * div_ratio;
+
+	if ((mnd_entry->dsiclk_d == 0)
+		|| (mnd_entry->dsiclk_m == 1)) {
+		dsicore_clk.mnd_mode = 0;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.pre_div_func = (mnd_entry->dsiclk_n - 1);
+	} else {
+		dsicore_clk.mnd_mode = 2;
+		dsicore_clk.src = 0x3;
+		dsicore_clk.m = mnd_entry->dsiclk_m;
+		dsicore_clk.n = mnd_entry->dsiclk_n;
+		dsicore_clk.d = mnd_entry->dsiclk_d;
+	}
+
+	if ((mnd_entry->pclk_d == 0)
+		|| (mnd_entry->pclk_m == 1)) {
+		dsi_pclk.mnd_mode = 0;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1);
+		*expected_dsi_pclk = ((vco * 1000000) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	} else {
+		dsi_pclk.mnd_mode = 2;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.m = mnd_entry->pclk_m;
+		dsi_pclk.n = mnd_entry->pclk_n;
+		dsi_pclk.d = mnd_entry->pclk_d;
+		*expected_dsi_pclk = ((vco * 1000000 * dsi_pclk.m) /
+					((pll_divider_config.dsi_clk_divider)
+					* (mnd_entry->pclk_n)));
+	}
+	return 0;
+}
+
+void mipi_dsi_phy_init(int panel_ndx, struct msm_panel_info const *panel_info,
+	int target_type)
+{
+	struct mipi_dsi_phy_ctrl *pd;
+	int i, off;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0001);/* start phy sw reset */
+	msleep(100);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x128, 0x0000);/* end phy w reset */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2cc, 0x0003);/* regulator_ctrl_0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d0, 0x0001);/* regulator_ctrl_1 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d4, 0x0001);/* regulator_ctrl_2 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2d8, 0x0000);/* regulator_ctrl_3 */
+#ifdef DSI_POWER
+	MIPI_OUTP(MIPI_DSI_BASE + 0x2dc, 0x0100);/* regulator_ctrl_4 */
+#endif
+
+	pd = (panel_info->mipi).dsi_phy_db;
+
+	off = 0x02cc;	/* regulator ctrl 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->regulator[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0260;	/* phy timig ctrl 0 */
+	for (i = 0; i < 11; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->timing[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x0290;	/* ctrl 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->ctrl[i]);
+		wmb();
+		off += 4;
+	}
+
+	off = 0x02a0;	/* strength 0 */
+	for (i = 0; i < 4; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->strength[i]);
+		wmb();
+		off += 4;
+	}
+
+	mipi_dsi_calibration();
+
+	off = 0x0204;	/* pll ctrl 1, skip 0 */
+	for (i = 1; i < 21; i++) {
+		MIPI_OUTP(MIPI_DSI_BASE + off, pd->pll[i]);
+		wmb();
+		off += 4;
+	}
+
+	if (panel_info)
+		mipi_dsi_phy_pll_config(panel_info->clk_rate);
+
+	/* pll ctrl 0 */
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, pd->pll[0]);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x200, (pd->pll[0] | 0x01));
+}
+
+void mipi_dsi_clk_enable(void)
+{
+	if (mipi_dsi_clk_on) {
+		pr_err("%s: mipi_dsi_clk already ON\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 1;
+
+	clk_enable(amp_pclk); /* clock for AHB-master to AXI */
+	clk_enable(dsi_m_pclk);
+	clk_enable(dsi_s_pclk);
+	if (clk_set_rate(dsi_byte_div_clk, 1) < 0)	/* divided by 1 */
+		pr_err("%s: clk_set_rate failed\n",	__func__);
+	clk_enable(dsi_byte_div_clk);
+	clk_enable(dsi_esc_clk);
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 1);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 1);
+	mipi_dsi_ahb_en();
+	mipi_dsi_sfpb_cfg();
+}
+
+void mipi_dsi_clk_disable(void)
+{
+	if (mipi_dsi_clk_on == 0) {
+		pr_err("%s: mipi_dsi_clk already OFF\n", __func__);
+		return;
+	}
+
+	mipi_dsi_clk_on = 0;
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+
+	mipi_dsi_pclk_ctrl(&dsi_pclk, 0);
+	mipi_dsi_clk_ctrl(&dsicore_clk, 0);
+	clk_disable(dsi_esc_clk);
+	clk_disable(dsi_byte_div_clk);
+	clk_disable(dsi_m_pclk);
+	clk_disable(dsi_s_pclk);
+	clk_disable(amp_pclk); /* clock for AHB-master to AXI */
+}
+
+void mipi_dsi_phy_ctrl(int on)
+{
+	if (on) {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x050);
+
+		/* DSIPHY_TPA_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0258, 0x00f);
+
+		/* DSIPHY_TPA_CTRL_2 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x025c, 0x000);
+	} else {
+		/* DSIPHY_PLL_CTRL_5 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0214, 0x05f);
+
+		/* DSIPHY_TPA_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0258, 0x08f);
+
+		/* DSIPHY_TPA_CTRL_2 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x025c, 0x001);
+
+		/* DSIPHY_REGULATOR_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x02cc, 0x02);
+
+		/* DSIPHY_CTRL_0 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0290, 0x00);
+
+		/* DSIPHY_CTRL_1 */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0294, 0x7f);
+
+		/* DSIPHY_PLL_CTRL_0, disbale dsi pll */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0200, 0x40);
+
+		/* disbale dsi clk */
+		MIPI_OUTP(MIPI_DSI_BASE + 0x0118, 0);
+	}
+}
+
+#ifdef CONFIG_FB_MSM_HDMI_COMMON
+#define SW_RESET BIT(2)
+void hdmi_phy_reset(void)
+{
+	unsigned int phy_reset_polarity = 0x0;
+	unsigned int val = HDMI_INP_ND(0x2D4);
+
+	phy_reset_polarity = val >> 3 & 0x1;
+
+	if (phy_reset_polarity == 0)
+		HDMI_OUTP(0x2D4, val | SW_RESET);
+	else
+		HDMI_OUTP(0x2D4, val & (~SW_RESET));
+
+	msleep(100);
+
+	if (phy_reset_polarity == 0)
+		HDMI_OUTP(0x2D4, val & (~SW_RESET));
+	else
+		HDMI_OUTP(0x2D4, val | SW_RESET);
+}
+
+void hdmi_msm_init_phy(int video_format)
+{
+	uint32 offset;
+	/* De-serializer delay D/C for non-lbk mode
+	 * PHY REG0 = (DESER_SEL(0) | DESER_DEL_CTRL(3)
+	 * | AMUX_OUT_SEL(0))
+	 */
+	HDMI_OUTP_ND(0x0300, 0x0C); /*0b00001100*/
+
+	if (video_format == HDMI_VFRMT_720x480p60_16_9) {
+		/* PHY REG1 = DTEST_MUX_SEL(5) | PLL_GAIN_SEL(0)
+		 * | OUTVOL_SWING_CTRL(3)
+		 */
+		HDMI_OUTP_ND(0x0304, 0x53); /*0b01010011*/
+	} else {
+		/* If the freq. is less than 120MHz, use low gain 0
+		 * for board with termination
+		 * PHY REG1 = DTEST_MUX_SEL(5) | PLL_GAIN_SEL(0)
+		 * | OUTVOL_SWING_CTRL(4)
+		 */
+		HDMI_OUTP_ND(0x0304, 0x54); /*0b01010100*/
+	}
+
+	/* No matter what, start from the power down mode
+	 * PHY REG2 = PD_PWRGEN | PD_PLL | PD_DRIVE_4 | PD_DRIVE_3
+	 * | PD_DRIVE_2 | PD_DRIVE_1 | PD_DESER
+	 */
+	HDMI_OUTP_ND(0x0308, 0x7F); /*0b01111111*/
+
+	/* Turn PowerGen on
+	 * PHY REG2 = PD_PLL | PD_DRIVE_4 | PD_DRIVE_3
+	 * | PD_DRIVE_2 | PD_DRIVE_1 | PD_DESER
+	 */
+	HDMI_OUTP_ND(0x0308, 0x3F); /*0b00111111*/
+
+	/* Turn PLL power on
+	 * PHY REG2 = PD_DRIVE_4 | PD_DRIVE_3
+	 * | PD_DRIVE_2 | PD_DRIVE_1 | PD_DESER
+	 */
+	HDMI_OUTP_ND(0x0308, 0x1F); /*0b00011111*/
+
+	/* Write to HIGH after PLL power down de-assert
+	 * PHY REG3 = PLL_ENABLE
+	 */
+	HDMI_OUTP_ND(0x030C, 0x01);
+	/* ASIC power on; PHY REG9 = 0 */
+	HDMI_OUTP_ND(0x0324, 0x00);
+	/* Enable PLL lock detect, PLL lock det will go high after lock
+	 * Enable the re-time logic
+	 * PHY REG12 = PLL_LOCK_DETECT_EN | RETIMING_ENABLE
+	 */
+	HDMI_OUTP_ND(0x0330, 0x03); /*0b00000011*/
+
+	/* Drivers are on
+	 * PHY REG2 = PD_DESER
+	 */
+	HDMI_OUTP_ND(0x0308, 0x01); /*0b00000001*/
+	/* If the RX detector is needed
+	 * PHY REG2 = RCV_SENSE_EN | PD_DESER
+	 */
+	HDMI_OUTP_ND(0x0308, 0x81); /*0b10000001*/
+
+	offset = 0x0310;
+	while (offset <= 0x032C) {
+		HDMI_OUTP(offset, 0x0);
+		offset += 0x4;
+	}
+
+	/* If we want to use lock enable based on counting
+	 * PHY REG12 = FORCE_LOCK | PLL_LOCK_DETECT_EN | RETIMING_ENABLE
+	 */
+	HDMI_OUTP_ND(0x0330, 0x13); /*0b00010011*/
+}
+
+void hdmi_msm_powerdown_phy(void)
+{
+	/* Disable PLL */
+	HDMI_OUTP_ND(0x030C, 0x00);
+	/* Power down PHY */
+	HDMI_OUTP_ND(0x0308, 0x7F); /*0b01111111*/
+}
+
+void hdmi_frame_ctrl_cfg(const struct hdmi_disp_mode_timing_type *timing)
+{
+	/*  0x02C8 HDMI_FRAME_CTRL
+	 *  31 INTERLACED_EN   Interlaced or progressive enable bit
+	 *    0: Frame in progressive
+	 *    1: Frame is interlaced
+	 *  29 HSYNC_HDMI_POL  HSYNC polarity fed to HDMI core
+	 *     0: Active Hi Hsync, detect the rising edge of hsync
+	 *     1: Active lo Hsync, Detect the falling edge of Hsync
+	 *  28 VSYNC_HDMI_POL  VSYNC polarity fed to HDMI core
+	 *     0: Active Hi Vsync, detect the rising edge of vsync
+	 *     1: Active Lo Vsync, Detect the falling edge of Vsync
+	 *  12 RGB_MUX_SEL     ALPHA mdp4 input is RGB, mdp4 input is BGR
+	 */
+	HDMI_OUTP(0x02C8,
+		  ((timing->interlaced << 31) & 0x80000000)
+		| ((timing->active_low_h << 29) & 0x20000000)
+		| ((timing->active_low_v << 28) & 0x10000000)
+		| (1 << 12));
+}
+
+void hdmi_msm_phy_status_poll(void)
+{
+	unsigned int phy_ready;
+	phy_ready = 0x1 & HDMI_INP_ND(0x33c);
+	if (phy_ready) {
+		pr_debug("HDMI Phy Status bit is set and ready\n");
+	} else {
+		pr_debug("HDMI Phy Status bit is not set,"
+			"waiting for ready status\n");
+		do {
+			phy_ready = 0x1 & HDMI_INP_ND(0x33c);
+		} while (!phy_ready);
+	}
+}
+#endif
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index ec35130..1c4cc72 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -3,6 +3,7 @@
  * Core MSM framebuffer driver.
  *
  * Copyright (C) 2007 Google Incorporated
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -14,627 +15,3045 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/platform_device.h>
 #include <linux/module.h>
-#include <linux/fb.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-
-#include <linux/freezer.h>
-#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
 #include <linux/msm_mdp.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <mach/msm_fb.h>
-#include <mach/board.h>
-#include <linux/workqueue.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <mach/board.h>
+#include <linux/uaccess.h>
 
-#define PRINT_FPS 0
-#define PRINT_BLIT_TIME 0
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/console.h>
+#include <linux/android_pmem.h>
+#include <linux/leds.h>
+#include <linux/pm_runtime.h>
 
-#define SLEEPING 0x4
-#define UPDATING 0x3
-#define FULL_UPDATE_DONE 0x2
-#define WAKING 0x1
-#define AWAKE 0x0
+#define MSM_FB_C
+#include "msm_fb.h"
+#include "mddihosti.h"
+#include "tvenc.h"
+#include "mdp.h"
+#include "mdp4.h"
 
-#define NONE 0
-#define SUSPEND_RESUME 0x1
-#define FPS 0x2
-#define BLIT_TIME 0x4
-#define SHOW_UPDATES 0x8
+#ifdef CONFIG_FB_MSM_LOGO
+#define INIT_IMAGE_FILE "/initlogo.rle"
+extern int load_565rle_image(char *filename);
+#endif
 
-#define DLOG(mask, fmt, args...) \
-do { \
-	if (msmfb_debug_mask & mask) \
-		printk(KERN_INFO "msmfb: "fmt, ##args); \
-} while (0)
+#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
+#define MSM_FB_NUM	3
+#endif
 
-static int msmfb_debug_mask;
-module_param_named(msmfb_debug_mask, msmfb_debug_mask, int,
-		   S_IRUGO | S_IWUSR | S_IWGRP);
+static unsigned char *fbram;
+static unsigned char *fbram_phys;
+static int fbram_size;
 
-struct mdp_device *mdp;
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
 
-struct msmfb_info {
-	struct fb_info *fb;
-	struct msm_panel_data *panel;
-	int xres;
-	int yres;
-	unsigned output_format;
-	unsigned yoffset;
-	unsigned frame_requested;
-	unsigned frame_done;
-	int sleeping;
-	unsigned update_frame;
-	struct {
-		int left;
-		int top;
-		int eright; /* exclusive */
-		int ebottom; /* exclusive */
-	} update_info;
-	char *black;
+int vsync_mode = 1;
 
-	spinlock_t update_lock;
-	struct mutex panel_init_lock;
-	wait_queue_head_t frame_wq;
-	struct work_struct resume_work;
-	struct msmfb_callback dma_callback;
-	struct msmfb_callback vsync_callback;
-	struct hrtimer fake_vsync;
-	ktime_t vsync_request_time;
+#define MAX_BLIT_REQ 256
+
+#define MAX_FBI_LIST 32
+static struct fb_info *fbi_list[MAX_FBI_LIST];
+static int fbi_list_index;
+
+static struct msm_fb_data_type *mfd_list[MAX_FBI_LIST];
+static int mfd_list_index;
+
+static u32 msm_fb_pseudo_palette[16] = {
+	0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
 };
 
-static int msmfb_open(struct fb_info *info, int user)
+u32 msm_fb_debug_enabled;
+/* Setting msm_fb_msg_level to 8 prints out ALL messages */
+u32 msm_fb_msg_level = 7;
+
+/* Setting mddi_msg_level to 8 prints out ALL messages */
+u32 mddi_msg_level = 5;
+
+extern int32 mdp_block_power_cnt[MDP_MAX_BLOCK];
+extern unsigned long mdp_timer_duration;
+
+static int msm_fb_register(struct msm_fb_data_type *mfd);
+static int msm_fb_open(struct fb_info *info, int user);
+static int msm_fb_release(struct fb_info *info, int user);
+static int msm_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info);
+static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd);
+int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd);
+static int msm_fb_check_var(struct fb_var_screeninfo *var,
+			    struct fb_info *info);
+static int msm_fb_set_par(struct fb_info *info);
+static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
+			    boolean op_enable);
+static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd);
+static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
+			unsigned long arg);
+static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma);
+
+#ifdef MSM_FB_ENABLE_DBGFS
+
+#define MSM_FB_MAX_DBGFS 1024
+#define MAX_BACKLIGHT_BRIGHTNESS 255
+
+int msm_fb_debugfs_file_index;
+struct dentry *msm_fb_debugfs_root;
+struct dentry *msm_fb_debugfs_file[MSM_FB_MAX_DBGFS];
+
+DEFINE_MUTEX(msm_fb_notify_update_sem);
+void msmfb_no_update_notify_timer_cb(unsigned long data)
 {
-	return 0;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+	if (!mfd)
+		pr_err("%s mfd NULL\n", __func__);
+	complete(&mfd->msmfb_no_update_notify);
 }
 
-static int msmfb_release(struct fb_info *info, int user)
+struct dentry *msm_fb_get_debugfs_root(void)
 {
-	return 0;
+	if (msm_fb_debugfs_root == NULL)
+		msm_fb_debugfs_root = debugfs_create_dir("msm_fb", NULL);
+
+	return msm_fb_debugfs_root;
 }
 
-/* Called from dma interrupt handler, must not sleep */
-static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback)
+void msm_fb_debugfs_file_create(struct dentry *root, const char *name,
+				u32 *var)
 {
-	unsigned long irq_flags;
-	struct msmfb_info *msmfb  = container_of(callback, struct msmfb_info,
-					       dma_callback);
-
-	spin_lock_irqsave(&msmfb->update_lock, irq_flags);
-	msmfb->frame_done = msmfb->frame_requested;
-	if (msmfb->sleeping == UPDATING &&
-	    msmfb->frame_done == msmfb->update_frame) {
-		DLOG(SUSPEND_RESUME, "full update completed\n");
-		schedule_work(&msmfb->resume_work);
-	}
-	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-	wake_up(&msmfb->frame_wq);
-}
-
-static int msmfb_start_dma(struct msmfb_info *msmfb)
-{
-	uint32_t x, y, w, h;
-	unsigned addr;
-	unsigned long irq_flags;
-	uint32_t yoffset;
-	s64 time_since_request;
-	struct msm_panel_data *panel = msmfb->panel;
-
-	spin_lock_irqsave(&msmfb->update_lock, irq_flags);
-	time_since_request = ktime_to_ns(ktime_sub(ktime_get(),
-			     msmfb->vsync_request_time));
-	if (time_since_request > 20 * NSEC_PER_MSEC) {
-		uint32_t us;
-		us = do_div(time_since_request, NSEC_PER_MSEC) / NSEC_PER_USEC;
-		printk(KERN_WARNING "msmfb_start_dma %lld.%03u ms after vsync "
-			"request\n", time_since_request, us);
-	}
-	if (msmfb->frame_done == msmfb->frame_requested) {
-		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-		return -1;
-	}
-	if (msmfb->sleeping == SLEEPING) {
-		DLOG(SUSPEND_RESUME, "tried to start dma while asleep\n");
-		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-		return -1;
-	}
-	x = msmfb->update_info.left;
-	y = msmfb->update_info.top;
-	w = msmfb->update_info.eright - x;
-	h = msmfb->update_info.ebottom - y;
-	yoffset = msmfb->yoffset;
-	msmfb->update_info.left = msmfb->xres + 1;
-	msmfb->update_info.top = msmfb->yres + 1;
-	msmfb->update_info.eright = 0;
-	msmfb->update_info.ebottom = 0;
-	if (unlikely(w > msmfb->xres || h > msmfb->yres ||
-		     w == 0 || h == 0)) {
-		printk(KERN_INFO "invalid update: %d %d %d "
-				"%d\n", x, y, w, h);
-		msmfb->frame_done = msmfb->frame_requested;
-		goto error;
-	}
-	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-
-	addr = ((msmfb->xres * (yoffset + y) + x) * 2);
-	mdp->dma(mdp, addr + msmfb->fb->fix.smem_start,
-		 msmfb->xres * 2, w, h, x, y, &msmfb->dma_callback,
-		 panel->interface_type);
-	return 0;
-error:
-	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-	/* some clients need to clear their vsync interrupt */
-	if (panel->clear_vsync)
-		panel->clear_vsync(panel);
-	wake_up(&msmfb->frame_wq);
-	return 0;
-}
-
-/* Called from esync interrupt handler, must not sleep */
-static void msmfb_handle_vsync_interrupt(struct msmfb_callback *callback)
-{
-	struct msmfb_info *msmfb = container_of(callback, struct msmfb_info,
-					       vsync_callback);
-	msmfb_start_dma(msmfb);
-}
-
-static enum hrtimer_restart msmfb_fake_vsync(struct hrtimer *timer)
-{
-	struct msmfb_info *msmfb  = container_of(timer, struct msmfb_info,
-					       fake_vsync);
-	msmfb_start_dma(msmfb);
-	return HRTIMER_NORESTART;
-}
-
-static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top,
-			     uint32_t eright, uint32_t ebottom,
-			     uint32_t yoffset, int pan_display)
-{
-	struct msmfb_info *msmfb = info->par;
-	struct msm_panel_data *panel = msmfb->panel;
-	unsigned long irq_flags;
-	int sleeping;
-	int retry = 1;
-
-	DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n",
-		left, top, eright, ebottom, yoffset, pan_display);
-restart:
-	spin_lock_irqsave(&msmfb->update_lock, irq_flags);
-
-	/* if we are sleeping, on a pan_display wait 10ms (to throttle back
-	 * drawing otherwise return */
-	if (msmfb->sleeping == SLEEPING) {
-		DLOG(SUSPEND_RESUME, "drawing while asleep\n");
-		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-		if (pan_display)
-			wait_event_interruptible_timeout(msmfb->frame_wq,
-				msmfb->sleeping != SLEEPING, HZ/10);
+	if (msm_fb_debugfs_file_index >= MSM_FB_MAX_DBGFS)
 		return;
-	}
 
-	sleeping = msmfb->sleeping;
-	/* on a full update, if the last frame has not completed, wait for it */
-	if ((pan_display && msmfb->frame_requested != msmfb->frame_done) ||
-			    sleeping == UPDATING) {
-		int ret;
-		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-		ret = wait_event_interruptible_timeout(msmfb->frame_wq,
-			msmfb->frame_done == msmfb->frame_requested &&
-			msmfb->sleeping != UPDATING, 5 * HZ);
-		if (ret <= 0 && (msmfb->frame_requested != msmfb->frame_done ||
-				 msmfb->sleeping == UPDATING)) {
-			if (retry && panel->request_vsync &&
-			    (sleeping == AWAKE)) {
-				panel->request_vsync(panel,
-					&msmfb->vsync_callback);
-				retry = 0;
-				printk(KERN_WARNING "msmfb_pan_display timeout "
-					"rerequest vsync\n");
-			} else {
-				printk(KERN_WARNING "msmfb_pan_display timeout "
-					"waiting for frame start, %d %d\n",
-					msmfb->frame_requested,
-					msmfb->frame_done);
-				return;
-			}
-		}
-		goto restart;
-	}
-
-
-	msmfb->frame_requested++;
-	/* if necessary, update the y offset, if this is the
-	 * first full update on resume, set the sleeping state */
-	if (pan_display) {
-		msmfb->yoffset = yoffset;
-		if (left == 0 && top == 0 && eright == info->var.xres &&
-		    ebottom == info->var.yres) {
-			if (sleeping == WAKING) {
-				msmfb->update_frame = msmfb->frame_requested;
-				DLOG(SUSPEND_RESUME, "full update starting\n");
-				msmfb->sleeping = UPDATING;
-			}
-		}
-	}
-
-	/* set the update request */
-	if (left < msmfb->update_info.left)
-		msmfb->update_info.left = left;
-	if (top < msmfb->update_info.top)
-		msmfb->update_info.top = top;
-	if (eright > msmfb->update_info.eright)
-		msmfb->update_info.eright = eright;
-	if (ebottom > msmfb->update_info.ebottom)
-		msmfb->update_info.ebottom = ebottom;
-	DLOG(SHOW_UPDATES, "update queued %d %d %d %d %d\n",
-		msmfb->update_info.left, msmfb->update_info.top,
-		msmfb->update_info.eright, msmfb->update_info.ebottom,
-		msmfb->yoffset);
-	spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-
-	/* if the panel is all the way on wait for vsync, otherwise sleep
-	 * for 16 ms (long enough for the dma to panel) and then begin dma */
-	msmfb->vsync_request_time = ktime_get();
-	if (panel->request_vsync && (sleeping == AWAKE)) {
-		panel->request_vsync(panel, &msmfb->vsync_callback);
-	} else {
-		if (!hrtimer_active(&msmfb->fake_vsync)) {
-			hrtimer_start(&msmfb->fake_vsync,
-				      ktime_set(0, NSEC_PER_SEC/60),
-				      HRTIMER_MODE_REL);
-		}
-	}
+	msm_fb_debugfs_file[msm_fb_debugfs_file_index++] =
+	    debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var);
 }
+#endif
 
-static void msmfb_update(struct fb_info *info, uint32_t left, uint32_t top,
-			 uint32_t eright, uint32_t ebottom)
+int msm_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
 {
-	msmfb_pan_update(info, left, top, eright, ebottom, 0, 0);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (!mfd->cursor_update)
+		return -ENODEV;
+
+	return mfd->cursor_update(info, cursor);
 }
 
-static void power_on_panel(struct work_struct *work)
+static int msm_fb_resource_initialized;
+
+#ifndef CONFIG_FB_BACKLIGHT
+static int lcd_backlight_registered;
+
+static void msm_fb_set_bl_brightness(struct led_classdev *led_cdev,
+					enum led_brightness value)
 {
-	struct msmfb_info *msmfb =
-		container_of(work, struct msmfb_info, resume_work);
-	struct msm_panel_data *panel = msmfb->panel;
-	unsigned long irq_flags;
+	struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
+	int bl_lvl;
 
-	mutex_lock(&msmfb->panel_init_lock);
-	DLOG(SUSPEND_RESUME, "turning on panel\n");
-	if (msmfb->sleeping == UPDATING) {
-		if (panel->unblank(panel)) {
-			printk(KERN_INFO "msmfb: panel unblank failed,"
-			       "not starting drawing\n");
-			goto error;
-		}
-		spin_lock_irqsave(&msmfb->update_lock, irq_flags);
-		msmfb->sleeping = AWAKE;
-		wake_up(&msmfb->frame_wq);
-		spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
-	}
-error:
-	mutex_unlock(&msmfb->panel_init_lock);
+	if (value > MAX_BACKLIGHT_BRIGHTNESS)
+		value = MAX_BACKLIGHT_BRIGHTNESS;
+
+	/* This maps android backlight level 0 to 255 into
+	   driver backlight level 0 to bl_max with rounding */
+	bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
+		/(2 * MAX_BACKLIGHT_BRIGHTNESS);
+
+	if (!bl_lvl && value)
+		bl_lvl = 1;
+
+	msm_fb_set_backlight(mfd, bl_lvl);
 }
 
-
-static int msmfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
-	if ((var->xres != info->var.xres) ||
-	    (var->yres != info->var.yres) ||
-	    (var->xres_virtual != info->var.xres_virtual) ||
-	    (var->yres_virtual != info->var.yres_virtual) ||
-	    (var->xoffset != info->var.xoffset) ||
-	    (var->bits_per_pixel != info->var.bits_per_pixel) ||
-	    (var->grayscale != info->var.grayscale))
-		 return -EINVAL;
-	return 0;
-}
-
-int msmfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
-{
-	struct msmfb_info *msmfb = info->par;
-	struct msm_panel_data *panel = msmfb->panel;
-
-	/* "UPDT" */
-	if ((panel->caps & MSMFB_CAP_PARTIAL_UPDATES) &&
-	    (var->reserved[0] == 0x54445055)) {
-		msmfb_pan_update(info, var->reserved[1] & 0xffff,
-				 var->reserved[1] >> 16,
-				 var->reserved[2] & 0xffff,
-				 var->reserved[2] >> 16, var->yoffset, 1);
-	} else {
-		msmfb_pan_update(info, 0, 0, info->var.xres, info->var.yres,
-				 var->yoffset, 1);
-	}
-	return 0;
-}
-
-static void msmfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
-{
-	cfb_fillrect(p, rect);
-	msmfb_update(p, rect->dx, rect->dy, rect->dx + rect->width,
-		     rect->dy + rect->height);
-}
-
-static void msmfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
-{
-	cfb_copyarea(p, area);
-	msmfb_update(p, area->dx, area->dy, area->dx + area->width,
-		     area->dy + area->height);
-}
-
-static void msmfb_imageblit(struct fb_info *p, const struct fb_image *image)
-{
-	cfb_imageblit(p, image);
-	msmfb_update(p, image->dx, image->dy, image->dx + image->width,
-		     image->dy + image->height);
-}
-
-
-static int msmfb_blit(struct fb_info *info,
-		      void __user *p)
-{
-	struct mdp_blit_req req;
-	struct mdp_blit_req_list req_list;
-	int i;
-	int ret;
-
-	if (copy_from_user(&req_list, p, sizeof(req_list)))
-		return -EFAULT;
-
-	for (i = 0; i < req_list.count; i++) {
-		struct mdp_blit_req_list *list =
-			(struct mdp_blit_req_list *)p;
-		if (copy_from_user(&req, &list->req[i], sizeof(req)))
-			return -EFAULT;
-		ret = mdp->blit(mdp, info, &req);
-		if (ret)
-			return ret;
-	}
-	return 0;
-}
-
-
-DEFINE_MUTEX(mdp_ppp_lock);
-
-static int msmfb_ioctl(struct fb_info *p, unsigned int cmd, unsigned long arg)
-{
-	void __user *argp = (void __user *)arg;
-	int ret;
-
-	switch (cmd) {
-	case MSMFB_GRP_DISP:
-		mdp->set_grp_disp(mdp, arg);
-		break;
-	case MSMFB_BLIT:
-		ret = msmfb_blit(p, argp);
-		if (ret)
-			return ret;
-		break;
-	default:
-			printk(KERN_INFO "msmfb unknown ioctl: %d\n", cmd);
-			return -EINVAL;
-	}
-	return 0;
-}
-
-static struct fb_ops msmfb_ops = {
-	.owner = THIS_MODULE,
-	.fb_open = msmfb_open,
-	.fb_release = msmfb_release,
-	.fb_check_var = msmfb_check_var,
-	.fb_pan_display = msmfb_pan_display,
-	.fb_fillrect = msmfb_fillrect,
-	.fb_copyarea = msmfb_copyarea,
-	.fb_imageblit = msmfb_imageblit,
-	.fb_ioctl = msmfb_ioctl,
+static struct led_classdev backlight_led = {
+	.name		= "lcd-backlight",
+	.brightness	= MAX_BACKLIGHT_BRIGHTNESS,
+	.brightness_set	= msm_fb_set_bl_brightness,
 };
+#endif
 
-static unsigned PP[16];
+static struct msm_fb_platform_data *msm_fb_pdata;
+static char panel_name[128];
+module_param_string(panel_name, panel_name, sizeof(panel_name) , 0);
 
-
-
-#define BITS_PER_PIXEL 16
-
-static void setup_fb_info(struct msmfb_info *msmfb)
+int msm_fb_detect_client(const char *name)
 {
-	struct fb_info *fb_info = msmfb->fb;
-	int r;
+	int ret = -EPERM;
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+	u32 id;
+#endif
 
-	/* finish setting up the fb_info struct */
-	strncpy(fb_info->fix.id, "msmfb", 16);
-	fb_info->fix.ypanstep = 1;
-
-	fb_info->fbops = &msmfb_ops;
-	fb_info->flags = FBINFO_DEFAULT;
-
-	fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
-	fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
-	fb_info->fix.line_length = msmfb->xres * 2;
-
-	fb_info->var.xres = msmfb->xres;
-	fb_info->var.yres = msmfb->yres;
-	fb_info->var.width = msmfb->panel->fb_data->width;
-	fb_info->var.height = msmfb->panel->fb_data->height;
-	fb_info->var.xres_virtual = msmfb->xres;
-	fb_info->var.yres_virtual = msmfb->yres * 2;
-	fb_info->var.bits_per_pixel = BITS_PER_PIXEL;
-	fb_info->var.accel_flags = 0;
-
-	fb_info->var.yoffset = 0;
-
-	if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) {
-		/*
-		 * Set the param in the fixed screen, so userspace can't
-		 * change it. This will be used to check for the
-		 * capability.
-		 */
-		fb_info->fix.reserved[0] = 0x5444;
-		fb_info->fix.reserved[1] = 0x5055;
-
-		/*
-		 * This preloads the value so that if userspace doesn't
-		 * change it, it will be a full update
-		 */
-		fb_info->var.reserved[0] = 0x54445055;
-		fb_info->var.reserved[1] = 0;
-		fb_info->var.reserved[2] = (uint16_t)msmfb->xres |
-					   ((uint32_t)msmfb->yres << 16);
+	MSM_FB_DEBUG("\n name = %s, panel_name = %s", name, panel_name);
+	if (strlen(panel_name)) {
+		if (!strcmp((char *)panel_name, name))
+			return 0;
+		else
+			return -EPERM;
 	}
 
-	fb_info->var.red.offset = 11;
-	fb_info->var.red.length = 5;
-	fb_info->var.red.msb_right = 0;
-	fb_info->var.green.offset = 5;
-	fb_info->var.green.length = 6;
-	fb_info->var.green.msb_right = 0;
-	fb_info->var.blue.offset = 0;
-	fb_info->var.blue.length = 5;
-	fb_info->var.blue.msb_right = 0;
+	if (msm_fb_pdata && msm_fb_pdata->detect_client) {
+		ret = msm_fb_pdata->detect_client(name);
 
-	r = fb_alloc_cmap(&fb_info->cmap, 16, 0);
-	fb_info->pseudo_palette = PP;
+		/* if it's non mddi panel, we need to pre-scan
+		   mddi client to see if we can disable mddi host */
 
-	PP[0] = 0;
-	for (r = 1; r < 16; r++)
-		PP[r] = 0xffffffff;
-}
-
-static int setup_fbmem(struct msmfb_info *msmfb, struct platform_device *pdev)
-{
-	struct fb_info *fb = msmfb->fb;
-	struct resource *resource;
-	unsigned long size = msmfb->xres * msmfb->yres *
-			     (BITS_PER_PIXEL >> 3) * 2;
-	unsigned char *fbram;
-
-	/* board file might have attached a resource describing an fb */
-	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!resource)
-		return -EINVAL;
-
-	/* check the resource is large enough to fit the fb */
-	if (resource->end - resource->start < size) {
-		printk(KERN_ERR "allocated resource is too small for "
-				"fb\n");
-		return -ENOMEM;
-	}
-	fb->fix.smem_start = resource->start;
-	fb->fix.smem_len = resource->end - resource->start;
-	fbram = ioremap(resource->start,
-			resource->end - resource->start);
-	if (fbram == 0) {
-		printk(KERN_ERR "msmfb: cannot allocate fbram!\n");
-		return -ENOMEM;
-	}
-	fb->screen_base = fbram;
-	return 0;
-}
-
-static int msmfb_probe(struct platform_device *pdev)
-{
-	struct fb_info *fb;
-	struct msmfb_info *msmfb;
-	struct msm_panel_data *panel = pdev->dev.platform_data;
-	int ret;
-
-	if (!panel) {
-		pr_err("msmfb_probe: no platform data\n");
-		return -EINVAL;
-	}
-	if (!panel->fb_data) {
-		pr_err("msmfb_probe: no fb_data\n");
-		return -EINVAL;
+#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
+		if (!ret && msm_fb_pdata->mddi_prescan)
+			id = mddi_get_client_id();
+#endif
 	}
 
-	fb = framebuffer_alloc(sizeof(struct msmfb_info), &pdev->dev);
-	if (!fb)
-		return -ENOMEM;
-	msmfb = fb->par;
-	msmfb->fb = fb;
-	msmfb->panel = panel;
-	msmfb->xres = panel->fb_data->xres;
-	msmfb->yres = panel->fb_data->yres;
-
-	ret = setup_fbmem(msmfb, pdev);
-	if (ret)
-		goto error_setup_fbmem;
-
-	setup_fb_info(msmfb);
-
-	spin_lock_init(&msmfb->update_lock);
-	mutex_init(&msmfb->panel_init_lock);
-	init_waitqueue_head(&msmfb->frame_wq);
-	INIT_WORK(&msmfb->resume_work, power_on_panel);
-	msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres,
-			       GFP_KERNEL);
-
-	printk(KERN_INFO "msmfb_probe() installing %d x %d panel\n",
-	       msmfb->xres, msmfb->yres);
-
-	msmfb->dma_callback.func = msmfb_handle_dma_interrupt;
-	msmfb->vsync_callback.func = msmfb_handle_vsync_interrupt;
-	hrtimer_init(&msmfb->fake_vsync, CLOCK_MONOTONIC,
-		     HRTIMER_MODE_REL);
-
-
-	msmfb->fake_vsync.function = msmfb_fake_vsync;
-
-	ret = register_framebuffer(fb);
-	if (ret)
-		goto error_register_framebuffer;
-
-	msmfb->sleeping = WAKING;
-
-	return 0;
-
-error_register_framebuffer:
-	iounmap(fb->screen_base);
-error_setup_fbmem:
-	framebuffer_release(msmfb->fb);
 	return ret;
 }
 
-static struct platform_driver msm_panel_driver = {
-	/* need to write remove */
-	.probe = msmfb_probe,
-	.driver = {.name = "msm_panel"},
+static ssize_t msm_fb_msm_fb_type(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct msm_fb_panel_data *pdata =
+		(struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+
+	switch (pdata->panel_info.type) {
+	case NO_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "no panel\n");
+		break;
+	case MDDI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "mddi panel\n");
+		break;
+	case EBI2_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "ebi2 panel\n");
+		break;
+	case LCDC_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "lcdc panel\n");
+		break;
+	case EXT_MDDI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "ext mddi panel\n");
+		break;
+	case TV_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "tv panel\n");
+		break;
+	case HDMI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "hdmi panel\n");
+		break;
+	case DTV_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "dtv panel\n");
+		break;
+	default:
+		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
+		break;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(msm_fb_type, S_IRUGO, msm_fb_msm_fb_type, NULL);
+static struct attribute *msm_fb_attrs[] = {
+	&dev_attr_msm_fb_type.attr,
+	NULL,
+};
+static struct attribute_group msm_fb_attr_group = {
+	.attrs = msm_fb_attrs,
 };
 
-
-static int msmfb_add_mdp_device(struct device *dev,
-				struct class_interface *class_intf)
+static int msm_fb_create_sysfs(struct platform_device *pdev)
 {
-	/* might need locking if mulitple mdp devices */
-	if (mdp)
+	int rc;
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	rc = sysfs_create_group(&mfd->fbi->dev->kobj, &msm_fb_attr_group);
+	if (rc)
+		MSM_FB_ERR("%s: sysfs group creation failed, rc=%d\n", __func__,
+			rc);
+	return rc;
+}
+static void msm_fb_remove_sysfs(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	sysfs_remove_group(&mfd->fbi->dev->kobj, &msm_fb_attr_group);
+}
+
+static int msm_fb_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	int rc;
+	int err = 0;
+
+	MSM_FB_DEBUG("msm_fb_probe\n");
+
+	if ((pdev->id == 0) && (pdev->num_resources > 0)) {
+		msm_fb_pdata = pdev->dev.platform_data;
+		fbram_size =
+			pdev->resource[0].end - pdev->resource[0].start + 1;
+		fbram_phys = (char *)pdev->resource[0].start;
+		fbram = ioremap((unsigned long)fbram_phys, fbram_size);
+
+		if (!fbram) {
+			printk(KERN_ERR "fbram ioremap failed!\n");
+			return -ENOMEM;
+		}
+		MSM_FB_DEBUG("msm_fb_probe:  phy_Addr = 0x%x virt = 0x%x\n",
+			     (int)fbram_phys, (int)fbram);
+
+		msm_fb_resource_initialized = 1;
 		return 0;
-	mdp = container_of(dev, struct mdp_device, dev);
-	return platform_driver_register(&msm_panel_driver);
+	}
+
+	if (!msm_fb_resource_initialized)
+		return -EPERM;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	mfd->panel_info.frame_count = 0;
+	mfd->bl_level = 0;
+#ifdef CONFIG_FB_MSM_OVERLAY
+	mfd->overlay_play_enable = 1;
+#endif
+	rc = msm_fb_register(mfd);
+	if (rc)
+		return rc;
+	err = pm_runtime_set_active(mfd->fbi->dev);
+	if (err < 0)
+		printk(KERN_ERR "pm_runtime: fail to set active.\n");
+	pm_runtime_enable(mfd->fbi->dev);
+#ifdef CONFIG_FB_BACKLIGHT
+	msm_fb_config_backlight(mfd);
+#else
+	/* android supports only one lcd-backlight/lcd for now */
+	if (!lcd_backlight_registered) {
+		if (led_classdev_register(&pdev->dev, &backlight_led))
+			printk(KERN_ERR "led_classdev_register failed\n");
+		else
+			lcd_backlight_registered = 1;
+	}
+#endif
+
+	pdev_list[pdev_list_cnt++] = pdev;
+	msm_fb_create_sysfs(pdev);
+	return 0;
 }
 
-static void msmfb_remove_mdp_device(struct device *dev,
-				struct class_interface *class_intf)
+static int msm_fb_remove(struct platform_device *pdev)
 {
-	/* might need locking if mulitple mdp devices */
-	if (dev != &mdp->dev)
-		return;
-	platform_driver_unregister(&msm_panel_driver);
-	mdp = NULL;
+	struct msm_fb_data_type *mfd;
+
+	MSM_FB_DEBUG("msm_fb_remove\n");
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	msm_fb_remove_sysfs(pdev);
+
+	pm_runtime_disable(mfd->fbi->dev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (msm_fb_suspend_sub(mfd))
+		printk(KERN_ERR "msm_fb_remove: can't stop the device %d\n", mfd->index);
+
+	if (mfd->channel_irq != 0)
+		free_irq(mfd->channel_irq, (void *)mfd);
+
+	if (mfd->vsync_width_boundary)
+		vfree(mfd->vsync_width_boundary);
+
+	if (mfd->vsync_resync_timer.function)
+		del_timer(&mfd->vsync_resync_timer);
+
+	if (mfd->refresh_timer.function)
+		del_timer(&mfd->refresh_timer);
+
+	if (mfd->dma_hrtimer.function)
+		hrtimer_cancel(&mfd->dma_hrtimer);
+
+	if (mfd->msmfb_no_update_notify_timer.function)
+		del_timer(&mfd->msmfb_no_update_notify_timer);
+	complete(&mfd->msmfb_no_update_notify);
+	complete(&mfd->msmfb_update_notify);
+
+	/* remove /dev/fb* */
+	unregister_framebuffer(mfd->fbi);
+
+#ifdef CONFIG_FB_BACKLIGHT
+	/* remove /sys/class/backlight */
+	backlight_device_unregister(mfd->fbi->bl_dev);
+#else
+	if (lcd_backlight_registered) {
+		lcd_backlight_registered = 0;
+		led_classdev_unregister(&backlight_led);
+	}
+#endif
+
+#ifdef MSM_FB_ENABLE_DBGFS
+	if (mfd->sub_dir)
+		debugfs_remove(mfd->sub_dir);
+#endif
+
+	return 0;
 }
 
-static struct class_interface msm_fb_interface = {
-	.add_dev = &msmfb_add_mdp_device,
-	.remove_dev = &msmfb_remove_mdp_device,
+#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
+static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct msm_fb_data_type *mfd;
+	int ret = 0;
+
+	MSM_FB_DEBUG("msm_fb_suspend\n");
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	console_lock();
+	fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
+
+	ret = msm_fb_suspend_sub(mfd);
+	if (ret != 0) {
+		printk(KERN_ERR "msm_fb: failed to suspend! %d\n", ret);
+		fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
+	} else {
+		pdev->dev.power.power_state = state;
+	}
+
+	console_unlock();
+	return ret;
+}
+#else
+#define msm_fb_suspend NULL
+#endif
+
+static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	if (mfd->msmfb_no_update_notify_timer.function)
+		del_timer(&mfd->msmfb_no_update_notify_timer);
+	complete(&mfd->msmfb_no_update_notify);
+
+	/*
+	 * suspend this channel
+	 */
+	mfd->suspend.sw_refreshing_enable = mfd->sw_refreshing_enable;
+	mfd->suspend.op_enable = mfd->op_enable;
+	mfd->suspend.panel_power_on = mfd->panel_power_on;
+
+	if (mfd->op_enable) {
+		ret =
+		     msm_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
+				      mfd->suspend.op_enable);
+		if (ret) {
+			MSM_FB_INFO
+			    ("msm_fb_suspend: can't turn off display!\n");
+			return ret;
+		}
+		mfd->op_enable = FALSE;
+	}
+	/*
+	 * try to power down
+	 */
+	mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+	/*
+	 * detach display channel irq if there's any
+	 * or wait until vsync-resync completes
+	 */
+	if ((mfd->dest == DISPLAY_LCD)) {
+		if (mfd->panel_info.lcd.vsync_enable) {
+			if (mfd->panel_info.lcd.hw_vsync_mode) {
+				if (mfd->channel_irq != 0)
+					disable_irq(mfd->channel_irq);
+			} else {
+				volatile boolean vh_pending;
+				do {
+					vh_pending = mfd->vsync_handler_pending;
+				} while (vh_pending);
+			}
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_fb_resume_sub(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	/* attach display channel irq if there's any */
+	if (mfd->channel_irq != 0)
+		enable_irq(mfd->channel_irq);
+
+	/* resume state var recover */
+	mfd->sw_refreshing_enable = mfd->suspend.sw_refreshing_enable;
+	mfd->op_enable = mfd->suspend.op_enable;
+
+	if (mfd->suspend.panel_power_on) {
+		ret =
+		     msm_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
+				      mfd->op_enable);
+		if (ret)
+			MSM_FB_INFO("msm_fb_resume: can't turn on display!\n");
+	}
+
+	return ret;
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
+static int msm_fb_resume(struct platform_device *pdev)
+{
+	/* This resume function is called when interrupt is enabled.
+	 */
+	int ret = 0;
+	struct msm_fb_data_type *mfd;
+
+	MSM_FB_DEBUG("msm_fb_resume\n");
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	console_lock();
+	ret = msm_fb_resume_sub(mfd);
+	pdev->dev.power.power_state = PMSG_ON;
+	fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
+	console_unlock();
+
+	return ret;
+}
+#else
+#define msm_fb_resume NULL
+#endif
+
+static int msm_fb_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int msm_fb_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int msm_fb_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static struct dev_pm_ops msm_fb_dev_pm_ops = {
+	.runtime_suspend = msm_fb_runtime_suspend,
+	.runtime_resume = msm_fb_runtime_resume,
+	.runtime_idle = msm_fb_runtime_idle,
 };
 
-static int __init msmfb_init(void)
+static struct platform_driver msm_fb_driver = {
+	.probe = msm_fb_probe,
+	.remove = msm_fb_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	.suspend = msm_fb_suspend,
+	.resume = msm_fb_resume,
+#endif
+	.shutdown = NULL,
+	.driver = {
+		   /* Driver name must match the device name added in platform.c. */
+		   .name = "msm_fb",
+		   .pm = &msm_fb_dev_pm_ops,
+		   },
+};
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && (defined(CONFIG_FB_MSM_OVERLAY) || \
+	 defined(CONFIG_FB_MSM_MDP303))
+static void memset32_io(u32 __iomem *_ptr, u32 val, size_t count)
 {
-	return register_mdp_client(&msm_fb_interface);
+	count >>= 2;
+	while (count--)
+		writel(val, _ptr++);
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void msmfb_early_suspend(struct early_suspend *h)
+{
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+						    early_suspend);
+#if defined(CONFIG_FB_MSM_OVERLAY) || defined(CONFIG_FB_MSM_MDP303)
+	/*
+	* For MDP with overlay, set framebuffer with black pixels
+	* to show black screen on HDMI.
+	*/
+	struct fb_info *fbi = mfd->fbi;
+	switch (mfd->fbi->var.bits_per_pixel) {
+	case 32:
+		memset32_io((void *)fbi->screen_base, 0xFF000000,
+							fbi->fix.smem_len);
+		break;
+	default:
+		memset32_io((void *)fbi->screen_base, 0x00, fbi->fix.smem_len);
+		break;
+	}
+#endif
+	msm_fb_suspend_sub(mfd);
 }
 
-module_init(msmfb_init);
+static void msmfb_early_resume(struct early_suspend *h)
+{
+	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
+						    early_suspend);
+	msm_fb_resume_sub(mfd);
+}
+#endif
+
+void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl)
+{
+	struct msm_fb_panel_data *pdata;
+
+	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+
+	if ((pdata) && (pdata->set_backlight)) {
+		down(&mfd->sem);
+		mfd->bl_level = bkl_lvl;
+		pdata->set_backlight(mfd);
+		up(&mfd->sem);
+	}
+}
+
+static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
+			    boolean op_enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct msm_fb_panel_data *pdata = NULL;
+	int ret = 0;
+
+	if (!op_enable)
+		return -EPERM;
+
+	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+	if ((!pdata) || (!pdata->on) || (!pdata->off)) {
+		printk(KERN_ERR "msm_fb_blank_sub: no panel operation detected!\n");
+		return -ENODEV;
+	}
+
+	switch (blank_mode) {
+	case FB_BLANK_UNBLANK:
+		if (!mfd->panel_power_on) {
+			msleep(16);
+			ret = pdata->on(mfd->pdev);
+			if (ret == 0) {
+				mfd->panel_power_on = TRUE;
+
+/* ToDo: possible conflict with android which doesn't expect sw refresher */
+/*
+	  if (!mfd->hw_refresh)
+	  {
+	    if ((ret = msm_fb_resume_sw_refresher(mfd)) != 0)
+	    {
+	      MSM_FB_INFO("msm_fb_blank_sub: msm_fb_resume_sw_refresher failed = %d!\n",ret);
+	    }
+	  }
+*/
+			}
+		}
+		break;
+
+	case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_NORMAL:
+	case FB_BLANK_POWERDOWN:
+	default:
+		if (mfd->panel_power_on) {
+			int curr_pwr_state;
+
+			mfd->op_enable = FALSE;
+			curr_pwr_state = mfd->panel_power_on;
+			mfd->panel_power_on = FALSE;
+
+			msleep(16);
+			ret = pdata->off(mfd->pdev);
+			if (ret)
+				mfd->panel_power_on = curr_pwr_state;
+
+			mfd->op_enable = TRUE;
+		}
+		break;
+	}
+
+	return ret;
+}
+
+static void msm_fb_fillrect(struct fb_info *info,
+			    const struct fb_fillrect *rect)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	cfb_fillrect(info, rect);
+	if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
+		!mfd->sw_currently_refreshing) {
+		struct fb_var_screeninfo var;
+
+		var = info->var;
+		var.reserved[0] = 0x54445055;
+		var.reserved[1] = (rect->dy << 16) | (rect->dx);
+		var.reserved[2] = ((rect->dy + rect->height) << 16) |
+		    (rect->dx + rect->width);
+
+		msm_fb_pan_display(&var, info);
+	}
+}
+
+static void msm_fb_copyarea(struct fb_info *info,
+			    const struct fb_copyarea *area)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	cfb_copyarea(info, area);
+	if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
+		!mfd->sw_currently_refreshing) {
+		struct fb_var_screeninfo var;
+
+		var = info->var;
+		var.reserved[0] = 0x54445055;
+		var.reserved[1] = (area->dy << 16) | (area->dx);
+		var.reserved[2] = ((area->dy + area->height) << 16) |
+		    (area->dx + area->width);
+
+		msm_fb_pan_display(&var, info);
+	}
+}
+
+static void msm_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	cfb_imageblit(info, image);
+	if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
+		!mfd->sw_currently_refreshing) {
+		struct fb_var_screeninfo var;
+
+		var = info->var;
+		var.reserved[0] = 0x54445055;
+		var.reserved[1] = (image->dy << 16) | (image->dx);
+		var.reserved[2] = ((image->dy + image->height) << 16) |
+		    (image->dx + image->width);
+
+		msm_fb_pan_display(&var, info);
+	}
+}
+
+static int msm_fb_blank(int blank_mode, struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	return msm_fb_blank_sub(blank_mode, info, mfd->op_enable);
+}
+
+static int msm_fb_set_lut(struct fb_cmap *cmap, struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (!mfd->lut_update)
+		return -ENODEV;
+
+	mfd->lut_update(info, cmap);
+	return 0;
+}
+
+/*
+ * Custom Framebuffer mmap() function for MSM driver.
+ * Differs from standard mmap() function by allowing for customized
+ * page-protection.
+ */
+static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma)
+{
+	/* Get frame buffer memory range. */
+	unsigned long start = info->fix.smem_start;
+	u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	if (off >= len) {
+		/* memory mapped io */
+		off -= len;
+		if (info->var.accel_flags) {
+			mutex_unlock(&info->lock);
+			return -EINVAL;
+		}
+		start = info->fix.mmio_start;
+		len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
+	}
+
+	/* Set VM flags. */
+	start &= PAGE_MASK;
+	if ((vma->vm_end - vma->vm_start + off) > len)
+		return -EINVAL;
+	off += start;
+	vma->vm_pgoff = off >> PAGE_SHIFT;
+	/* This is an IO map - tell maydump to skip this VMA */
+	vma->vm_flags |= VM_IO | VM_RESERVED;
+
+	/* Set VM page protection */
+	if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else if (mfd->mdp_fb_page_protection ==
+			MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE)
+		vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
+	else if (mfd->mdp_fb_page_protection ==
+			MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE)
+		vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
+	else if (mfd->mdp_fb_page_protection ==
+			MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE)
+		vma->vm_page_prot = pgprot_writebackwacache(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	/* Remap the frame buffer I/O range */
+	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+				vma->vm_end - vma->vm_start,
+				vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static struct fb_ops msm_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_open = msm_fb_open,
+	.fb_release = msm_fb_release,
+	.fb_read = NULL,
+	.fb_write = NULL,
+	.fb_cursor = NULL,
+	.fb_check_var = msm_fb_check_var,	/* vinfo check */
+	.fb_set_par = msm_fb_set_par,	/* set the video mode according to info->var */
+	.fb_setcolreg = NULL,	/* set color register */
+	.fb_blank = msm_fb_blank,	/* blank display */
+	.fb_pan_display = msm_fb_pan_display,	/* pan display */
+	.fb_fillrect = msm_fb_fillrect,	/* Draws a rectangle */
+	.fb_copyarea = msm_fb_copyarea,	/* Copy data from area to another */
+	.fb_imageblit = msm_fb_imageblit,	/* Draws a image to the display */
+	.fb_rotate = NULL,
+	.fb_sync = NULL,	/* wait for blit idle, optional */
+	.fb_ioctl = msm_fb_ioctl,	/* perform fb specific ioctl (optional) */
+	.fb_mmap = msm_fb_mmap,
+};
+
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+	/* The adreno GPU hardware requires that the pitch be aligned to
+	   32 pixels for color buffers, so for the cases where the GPU
+	   is writing directly to fb0, the framebuffer pitch
+	   also needs to be 32 pixel aligned */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
+static int msm_fb_register(struct msm_fb_data_type *mfd)
+{
+	int ret = -ENODEV;
+	int bpp;
+	struct msm_panel_info *panel_info = &mfd->panel_info;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_fix_screeninfo *fix;
+	struct fb_var_screeninfo *var;
+	int *id;
+	int fbram_offset;
+
+	/*
+	 * fb info initialization
+	 */
+	fix = &fbi->fix;
+	var = &fbi->var;
+
+	fix->type_aux = 0;	/* if type == FB_TYPE_INTERLEAVED_PLANES */
+	fix->visual = FB_VISUAL_TRUECOLOR;	/* True Color */
+	fix->ywrapstep = 0;	/* No support */
+	fix->mmio_start = 0;	/* No MMIO Address */
+	fix->mmio_len = 0;	/* No MMIO Address */
+	fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
+
+	var->xoffset = 0,	/* Offset from virtual to visible */
+	var->yoffset = 0,	/* resolution */
+	var->grayscale = 0,	/* No graylevels */
+	var->nonstd = 0,	/* standard pixel format */
+	var->activate = FB_ACTIVATE_VBL,	/* activate it at vsync */
+	var->height = -1,	/* height of picture in mm */
+	var->width = -1,	/* width of picture in mm */
+	var->accel_flags = 0,	/* acceleration flags */
+	var->sync = 0,	/* see FB_SYNC_* */
+	var->rotate = 0,	/* angle we rotate counter clockwise */
+	mfd->op_enable = FALSE;
+
+	switch (mfd->fb_imgType) {
+	case MDP_RGB_565:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 5;
+		var->red.offset = 11;
+		var->blue.length = 5;
+		var->green.length = 6;
+		var->red.length = 5;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 2;
+		break;
+
+	case MDP_RGB_888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 8;
+		var->red.offset = 16;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 3;
+		break;
+
+	case MDP_ARGB_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 8;
+		var->red.offset = 16;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+	case MDP_RGBA_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 8;
+		var->green.offset = 16;
+		var->red.offset = 24;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+	case MDP_YCRYCB_H2V1:
+		/* ToDo: need to check TV-Out YUV422i framebuffer format */
+		/*       we might need to create new type define */
+		fix->type = FB_TYPE_INTERLEAVED_PLANES;
+		fix->xpanstep = 2;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+
+		/* how about R/G/B offset? */
+		var->blue.offset = 0;
+		var->green.offset = 5;
+		var->red.offset = 11;
+		var->blue.length = 5;
+		var->green.length = 6;
+		var->red.length = 5;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 2;
+		break;
+
+	default:
+		MSM_FB_ERR("msm_fb_init: fb %d unkown image type!\n",
+			   mfd->index);
+		return ret;
+	}
+
+	fix->type = panel_info->is_3d_panel;
+
+	fix->line_length = msm_fb_line_length(mfd->index, panel_info->xres,
+					      bpp);
+	/* calculate smem_len based on max size of two supplied modes */
+	fix->smem_len = roundup(MAX(msm_fb_line_length(mfd->index,
+					       panel_info->xres,
+					       bpp) *
+			    panel_info->yres * mfd->fb_page,
+			    msm_fb_line_length(mfd->index,
+					       panel_info->mode2_xres,
+					       bpp) *
+			    panel_info->mode2_yres * mfd->fb_page), PAGE_SIZE);
+
+
+
+	mfd->var_xres = panel_info->xres;
+	mfd->var_yres = panel_info->yres;
+
+	var->pixclock = mfd->panel_info.clk_rate;
+	mfd->var_pixclock = var->pixclock;
+
+	var->xres = panel_info->xres;
+	var->yres = panel_info->yres;
+	var->xres_virtual = panel_info->xres;
+	var->yres_virtual = panel_info->yres * mfd->fb_page;
+	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
+	if (mfd->dest == DISPLAY_LCD) {
+		var->reserved[4] = panel_info->lcd.refx100 / 100;
+	} else {
+		var->reserved[4] = panel_info->clk_rate /
+			((panel_info->lcdc.h_back_porch +
+			  panel_info->lcdc.h_front_porch +
+			  panel_info->lcdc.h_pulse_width +
+			  panel_info->xres) *
+			 (panel_info->lcdc.v_back_porch +
+			  panel_info->lcdc.v_front_porch +
+			  panel_info->lcdc.v_pulse_width +
+			  panel_info->yres));
+	}
+		/*
+		 * id field for fb app
+		 */
+	    id = (int *)&mfd->panel;
+
+#if defined(CONFIG_FB_MSM_MDP22)
+	snprintf(fix->id, sizeof(fix->id), "msmfb22_%x", (__u32) *id);
+#elif defined(CONFIG_FB_MSM_MDP30)
+	snprintf(fix->id, sizeof(fix->id), "msmfb30_%x", (__u32) *id);
+#elif defined(CONFIG_FB_MSM_MDP31)
+	snprintf(fix->id, sizeof(fix->id), "msmfb31_%x", (__u32) *id);
+#elif defined(CONFIG_FB_MSM_MDP40)
+	snprintf(fix->id, sizeof(fix->id), "msmfb40_%x", (__u32) *id);
+#else
+	error CONFIG_FB_MSM_MDP undefined !
+#endif
+	 fbi->fbops = &msm_fb_ops;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->pseudo_palette = msm_fb_pseudo_palette;
+
+	mfd->ref_cnt = 0;
+	mfd->sw_currently_refreshing = FALSE;
+	mfd->sw_refreshing_enable = TRUE;
+	mfd->panel_power_on = FALSE;
+
+	mfd->pan_waiting = FALSE;
+	init_completion(&mfd->pan_comp);
+	init_completion(&mfd->refresher_comp);
+	sema_init(&mfd->sem, 1);
+
+	init_timer(&mfd->msmfb_no_update_notify_timer);
+	mfd->msmfb_no_update_notify_timer.function =
+			msmfb_no_update_notify_timer_cb;
+	mfd->msmfb_no_update_notify_timer.data = (unsigned long)mfd;
+	init_completion(&mfd->msmfb_update_notify);
+	init_completion(&mfd->msmfb_no_update_notify);
+
+	fbram_offset = PAGE_ALIGN((int)fbram)-(int)fbram;
+	fbram += fbram_offset;
+	fbram_phys += fbram_offset;
+	fbram_size -= fbram_offset;
+
+	if (fbram_size < fix->smem_len) {
+		printk(KERN_ERR "error: no more framebuffer memory!\n");
+		return -ENOMEM;
+	}
+
+	fbi->screen_base = fbram;
+	fbi->fix.smem_start = (unsigned long)fbram_phys;
+
+	memset(fbi->screen_base, 0x0, fix->smem_len);
+
+	mfd->op_enable = TRUE;
+	mfd->panel_power_on = FALSE;
+
+	/* cursor memory allocation */
+	if (mfd->cursor_update) {
+		mfd->cursor_buf = dma_alloc_coherent(NULL,
+					MDP_CURSOR_SIZE,
+					(dma_addr_t *) &mfd->cursor_buf_phys,
+					GFP_KERNEL);
+		if (!mfd->cursor_buf)
+			mfd->cursor_update = 0;
+	}
+
+	if (mfd->lut_update) {
+		ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+		if (ret)
+			printk(KERN_ERR "%s: fb_alloc_cmap() failed!\n",
+					__func__);
+	}
+
+	if (register_framebuffer(fbi) < 0) {
+		if (mfd->lut_update)
+			fb_dealloc_cmap(&fbi->cmap);
+
+		if (mfd->cursor_buf)
+			dma_free_coherent(NULL,
+				MDP_CURSOR_SIZE,
+				mfd->cursor_buf,
+				(dma_addr_t) mfd->cursor_buf_phys);
+
+		mfd->op_enable = FALSE;
+		return -EPERM;
+	}
+
+	fbram += fix->smem_len;
+	fbram_phys += fix->smem_len;
+	fbram_size -= fix->smem_len;
+
+	MSM_FB_INFO
+	    ("FrameBuffer[%d] %dx%d size=%d bytes is registered successfully!\n",
+	     mfd->index, fbi->var.xres, fbi->var.yres, fbi->fix.smem_len);
+
+#ifdef CONFIG_FB_MSM_LOGO
+	if (!load_565rle_image(INIT_IMAGE_FILE)) ;	/* Flip buffer */
+#endif
+	ret = 0;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	if (mfd->panel_info.type != DTV_PANEL) {
+		mfd->early_suspend.suspend = msmfb_early_suspend;
+		mfd->early_suspend.resume = msmfb_early_resume;
+		mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
+		register_early_suspend(&mfd->early_suspend);
+	}
+#endif
+
+#ifdef MSM_FB_ENABLE_DBGFS
+	{
+		struct dentry *root;
+		struct dentry *sub_dir;
+		char sub_name[2];
+
+		root = msm_fb_get_debugfs_root();
+		if (root != NULL) {
+			sub_name[0] = (char)(mfd->index + 0x30);
+			sub_name[1] = '\0';
+			sub_dir = debugfs_create_dir(sub_name, root);
+		} else {
+			sub_dir = NULL;
+		}
+
+		mfd->sub_dir = sub_dir;
+
+		if (sub_dir) {
+			msm_fb_debugfs_file_create(sub_dir, "op_enable",
+						   (u32 *) &mfd->op_enable);
+			msm_fb_debugfs_file_create(sub_dir, "panel_power_on",
+						   (u32 *) &mfd->
+						   panel_power_on);
+			msm_fb_debugfs_file_create(sub_dir, "ref_cnt",
+						   (u32 *) &mfd->ref_cnt);
+			msm_fb_debugfs_file_create(sub_dir, "fb_imgType",
+						   (u32 *) &mfd->fb_imgType);
+			msm_fb_debugfs_file_create(sub_dir,
+						   "sw_currently_refreshing",
+						   (u32 *) &mfd->
+						   sw_currently_refreshing);
+			msm_fb_debugfs_file_create(sub_dir,
+						   "sw_refreshing_enable",
+						   (u32 *) &mfd->
+						   sw_refreshing_enable);
+
+			msm_fb_debugfs_file_create(sub_dir, "xres",
+						   (u32 *) &mfd->panel_info.
+						   xres);
+			msm_fb_debugfs_file_create(sub_dir, "yres",
+						   (u32 *) &mfd->panel_info.
+						   yres);
+			msm_fb_debugfs_file_create(sub_dir, "bpp",
+						   (u32 *) &mfd->panel_info.
+						   bpp);
+			msm_fb_debugfs_file_create(sub_dir, "type",
+						   (u32 *) &mfd->panel_info.
+						   type);
+			msm_fb_debugfs_file_create(sub_dir, "wait_cycle",
+						   (u32 *) &mfd->panel_info.
+						   wait_cycle);
+			msm_fb_debugfs_file_create(sub_dir, "pdest",
+						   (u32 *) &mfd->panel_info.
+						   pdest);
+			msm_fb_debugfs_file_create(sub_dir, "backbuff",
+						   (u32 *) &mfd->panel_info.
+						   fb_num);
+			msm_fb_debugfs_file_create(sub_dir, "clk_rate",
+						   (u32 *) &mfd->panel_info.
+						   clk_rate);
+			msm_fb_debugfs_file_create(sub_dir, "frame_count",
+						   (u32 *) &mfd->panel_info.
+						   frame_count);
+
+
+			switch (mfd->dest) {
+			case DISPLAY_LCD:
+				msm_fb_debugfs_file_create(sub_dir,
+				"vsync_enable",
+				(u32 *)&mfd->panel_info.lcd.vsync_enable);
+				msm_fb_debugfs_file_create(sub_dir,
+				"refx100",
+				(u32 *) &mfd->panel_info.lcd. refx100);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_back_porch",
+				(u32 *) &mfd->panel_info.lcd.v_back_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_front_porch",
+				(u32 *) &mfd->panel_info.lcd.v_front_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_pulse_width",
+				(u32 *) &mfd->panel_info.lcd.v_pulse_width);
+				msm_fb_debugfs_file_create(sub_dir,
+				"hw_vsync_mode",
+				(u32 *) &mfd->panel_info.lcd.hw_vsync_mode);
+				msm_fb_debugfs_file_create(sub_dir,
+				"vsync_notifier_period", (u32 *)
+				&mfd->panel_info.lcd.vsync_notifier_period);
+				break;
+
+			case DISPLAY_LCDC:
+				msm_fb_debugfs_file_create(sub_dir,
+				"h_back_porch",
+				(u32 *) &mfd->panel_info.lcdc.h_back_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"h_front_porch",
+				(u32 *) &mfd->panel_info.lcdc.h_front_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"h_pulse_width",
+				(u32 *) &mfd->panel_info.lcdc.h_pulse_width);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_back_porch",
+				(u32 *) &mfd->panel_info.lcdc.v_back_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_front_porch",
+				(u32 *) &mfd->panel_info.lcdc.v_front_porch);
+				msm_fb_debugfs_file_create(sub_dir,
+				"v_pulse_width",
+				(u32 *) &mfd->panel_info.lcdc.v_pulse_width);
+				msm_fb_debugfs_file_create(sub_dir,
+				"border_clr",
+				(u32 *) &mfd->panel_info.lcdc.border_clr);
+				msm_fb_debugfs_file_create(sub_dir,
+				"underflow_clr",
+				(u32 *) &mfd->panel_info.lcdc.underflow_clr);
+				msm_fb_debugfs_file_create(sub_dir,
+				"hsync_skew",
+				(u32 *) &mfd->panel_info.lcdc.hsync_skew);
+				break;
+
+			default:
+				break;
+			}
+		}
+	}
+#endif /* MSM_FB_ENABLE_DBGFS */
+
+	return ret;
+}
+
+static int msm_fb_open(struct fb_info *info, int user)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int result;
+
+	result = pm_runtime_get_sync(info->dev);
+
+	if (result < 0) {
+		printk(KERN_ERR "pm_runtime: fail to wake up\n");
+	}
+
+
+	if (!mfd->ref_cnt) {
+		mdp_set_dma_pan_info(info, NULL, TRUE);
+
+		if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
+			printk(KERN_ERR "msm_fb_open: can't turn on display!\n");
+			return -1;
+		}
+	}
+
+	mfd->ref_cnt++;
+	return 0;
+}
+
+static int msm_fb_release(struct fb_info *info, int user)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int ret = 0;
+
+	if (!mfd->ref_cnt) {
+		MSM_FB_INFO("msm_fb_release: try to close unopened fb %d!\n",
+			    mfd->index);
+		return -EINVAL;
+	}
+
+	mfd->ref_cnt--;
+
+	if (!mfd->ref_cnt) {
+		if ((ret =
+		     msm_fb_blank_sub(FB_BLANK_POWERDOWN, info,
+				      mfd->op_enable)) != 0) {
+			printk(KERN_ERR "msm_fb_release: can't turn off display!\n");
+			return ret;
+		}
+	}
+
+	pm_runtime_put(info->dev);
+	return ret;
+}
+
+DEFINE_SEMAPHORE(msm_fb_pan_sem);
+
+static int msm_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct mdp_dirty_region dirty;
+	struct mdp_dirty_region *dirtyPtr = NULL;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if ((!mfd->op_enable) || (!mfd->panel_power_on))
+		return -EPERM;
+
+	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+		return -EINVAL;
+
+	if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+		return -EINVAL;
+
+	if (info->fix.xpanstep)
+		info->var.xoffset =
+		    (var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+	if (info->fix.ypanstep)
+		info->var.yoffset =
+		    (var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+	/* "UPDT" */
+	if (var->reserved[0] == 0x54445055) {
+		dirty.xoffset = var->reserved[1] & 0xffff;
+		dirty.yoffset = (var->reserved[1] >> 16) & 0xffff;
+
+		if ((var->reserved[2] & 0xffff) <= dirty.xoffset)
+			return -EINVAL;
+		if (((var->reserved[2] >> 16) & 0xffff) <= dirty.yoffset)
+			return -EINVAL;
+
+		dirty.width = (var->reserved[2] & 0xffff) - dirty.xoffset;
+		dirty.height =
+		    ((var->reserved[2] >> 16) & 0xffff) - dirty.yoffset;
+		info->var.yoffset = var->yoffset;
+
+		if (dirty.xoffset < 0)
+			return -EINVAL;
+
+		if (dirty.yoffset < 0)
+			return -EINVAL;
+
+		if ((dirty.xoffset + dirty.width) > info->var.xres)
+			return -EINVAL;
+
+		if ((dirty.yoffset + dirty.height) > info->var.yres)
+			return -EINVAL;
+
+		if ((dirty.width <= 0) || (dirty.height <= 0))
+			return -EINVAL;
+
+		dirtyPtr = &dirty;
+	}
+	complete(&mfd->msmfb_update_notify);
+	mutex_lock(&msm_fb_notify_update_sem);
+	if (mfd->msmfb_no_update_notify_timer.function)
+		del_timer(&mfd->msmfb_no_update_notify_timer);
+
+	mfd->msmfb_no_update_notify_timer.expires =
+				jiffies + ((1000 * HZ) / 1000);
+	add_timer(&mfd->msmfb_no_update_notify_timer);
+	mutex_unlock(&msm_fb_notify_update_sem);
+
+	down(&msm_fb_pan_sem);
+	mdp_set_dma_pan_info(info, dirtyPtr,
+			     (var->activate == FB_ACTIVATE_VBL));
+	mdp_dma_pan_update(info);
+	up(&msm_fb_pan_sem);
+
+	++mfd->panel_info.frame_count;
+	return 0;
+}
+
+static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (var->rotate != FB_ROTATE_UR)
+		return -EINVAL;
+	if (var->grayscale != info->var.grayscale)
+		return -EINVAL;
+
+	switch (var->bits_per_pixel) {
+	case 16:
+		if ((var->green.offset != 5) ||
+			!((var->blue.offset == 11)
+				|| (var->blue.offset == 0)) ||
+			!((var->red.offset == 11)
+				|| (var->red.offset == 0)) ||
+			(var->blue.length != 5) ||
+			(var->green.length != 6) ||
+			(var->red.length != 5) ||
+			(var->blue.msb_right != 0) ||
+			(var->green.msb_right != 0) ||
+			(var->red.msb_right != 0) ||
+			(var->transp.offset != 0) ||
+			(var->transp.length != 0))
+				return -EINVAL;
+		break;
+
+	case 24:
+		if ((var->blue.offset != 0) ||
+			(var->green.offset != 8) ||
+			(var->red.offset != 16) ||
+			(var->blue.length != 8) ||
+			(var->green.length != 8) ||
+			(var->red.length != 8) ||
+			(var->blue.msb_right != 0) ||
+			(var->green.msb_right != 0) ||
+			(var->red.msb_right != 0) ||
+			!(((var->transp.offset == 0) &&
+				(var->transp.length == 0)) ||
+			  ((var->transp.offset == 24) &&
+				(var->transp.length == 8))))
+				return -EINVAL;
+		break;
+
+	case 32:
+		/* Figure out if the user meant RGBA or ARGB
+		   and verify the position of the RGB components */
+
+		if (var->transp.offset == 24) {
+			if ((var->blue.offset != 0) ||
+			    (var->green.offset != 8) ||
+			    (var->red.offset != 16))
+				return -EINVAL;
+		} else if (var->transp.offset == 0) {
+			if ((var->blue.offset != 8) ||
+			    (var->green.offset != 16) ||
+			    (var->red.offset != 24))
+				return -EINVAL;
+		} else
+			return -EINVAL;
+
+		/* Check the common values for both RGBA and ARGB */
+
+		if ((var->blue.length != 8) ||
+		    (var->green.length != 8) ||
+		    (var->red.length != 8) ||
+		    (var->transp.length != 8) ||
+		    (var->blue.msb_right != 0) ||
+		    (var->green.msb_right != 0) ||
+		    (var->red.msb_right != 0))
+			return -EINVAL;
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
+		return -EINVAL;
+
+	if (info->fix.smem_len <
+		(var->xres_virtual*var->yres_virtual*(var->bits_per_pixel/8)))
+		return -EINVAL;
+
+	if ((var->xres == 0) || (var->yres == 0))
+		return -EINVAL;
+
+	if ((var->xres > MAX(mfd->panel_info.xres,
+			     mfd->panel_info.mode2_xres)) ||
+		(var->yres > MAX(mfd->panel_info.yres,
+				 mfd->panel_info.mode2_yres)))
+		return -EINVAL;
+
+	if (var->xoffset > (var->xres_virtual - var->xres))
+		return -EINVAL;
+
+	if (var->yoffset > (var->yres_virtual - var->yres))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int msm_fb_set_par(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	int old_imgType;
+	int blank = 0;
+
+	old_imgType = mfd->fb_imgType;
+	switch (var->bits_per_pixel) {
+	case 16:
+		if (var->red.offset == 0)
+			mfd->fb_imgType = MDP_BGR_565;
+		else
+			mfd->fb_imgType = MDP_RGB_565;
+		break;
+
+	case 24:
+		if ((var->transp.offset == 0) && (var->transp.length == 0))
+			mfd->fb_imgType = MDP_RGB_888;
+		else if ((var->transp.offset == 24) &&
+				(var->transp.length == 8)) {
+			mfd->fb_imgType = MDP_ARGB_8888;
+			info->var.bits_per_pixel = 32;
+		}
+		break;
+
+	case 32:
+		if (var->transp.offset == 24)
+			mfd->fb_imgType = MDP_ARGB_8888;
+		else
+			mfd->fb_imgType = MDP_RGBA_8888;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((mfd->var_pixclock != var->pixclock) ||
+		(mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
+				(mfd->var_pixclock != var->pixclock) ||
+				(mfd->var_xres != var->xres) ||
+				(mfd->var_yres != var->yres)))) {
+		mfd->var_xres = var->xres;
+		mfd->var_yres = var->yres;
+		mfd->var_pixclock = var->pixclock;
+		blank = 1;
+	}
+	mfd->fbi->fix.line_length = msm_fb_line_length(mfd->index, var->xres,
+						       var->bits_per_pixel/8);
+
+	if (blank) {
+		msm_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+		msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+	}
+
+	return 0;
+}
+
+static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd)
+{
+	if (mfd->hw_refresh)
+		return -EPERM;
+
+	if (mfd->sw_currently_refreshing) {
+		down(&mfd->sem);
+		mfd->sw_currently_refreshing = FALSE;
+		up(&mfd->sem);
+
+		/* wait until the refresher finishes the last job */
+		wait_for_completion_killable(&mfd->refresher_comp);
+	}
+
+	return 0;
+}
+
+int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd)
+{
+	boolean do_refresh;
+
+	if (mfd->hw_refresh)
+		return -EPERM;
+
+	down(&mfd->sem);
+	if ((!mfd->sw_currently_refreshing) && (mfd->sw_refreshing_enable)) {
+		do_refresh = TRUE;
+		mfd->sw_currently_refreshing = TRUE;
+	} else {
+		do_refresh = FALSE;
+	}
+	up(&mfd->sem);
+
+	if (do_refresh)
+		mdp_refresh_screen((unsigned long)mfd);
+
+	return 0;
+}
+
+#if defined CONFIG_FB_MSM_MDP31
+static int mdp_blit_split_height(struct fb_info *info,
+				struct mdp_blit_req *req)
+{
+	int ret;
+	struct mdp_blit_req splitreq;
+	int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
+	int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
+
+	splitreq = *req;
+	/* break dest roi at height*/
+	d_x_0 = d_x_1 = req->dst_rect.x;
+	d_w_0 = d_w_1 = req->dst_rect.w;
+	d_y_0 = req->dst_rect.y;
+	if (req->dst_rect.h % 32 == 3)
+		d_h_1 = (req->dst_rect.h - 3) / 2 - 1;
+	else if (req->dst_rect.h % 32 == 2)
+		d_h_1 = (req->dst_rect.h - 2) / 2 - 6;
+	else
+		d_h_1 = (req->dst_rect.h - 1) / 2 - 1;
+	d_h_0 = req->dst_rect.h - d_h_1;
+	d_y_1 = d_y_0 + d_h_0;
+	if (req->dst_rect.h == 3) {
+		d_h_1 = 2;
+		d_h_0 = 2;
+		d_y_1 = d_y_0 + 1;
+	}
+
+	/* blit first region */
+	if (((splitreq.flags & 0x07) == 0x04) ||
+		((splitreq.flags & 0x07) == 0x0)) {
+
+		if (splitreq.flags & MDP_ROT_90) {
+			s_y_0 = s_y_1 = req->src_rect.y;
+			s_h_0 = s_h_1 = req->src_rect.h;
+			s_x_0 = req->src_rect.x;
+			s_w_1 = (req->src_rect.w * d_h_1) / req->dst_rect.h;
+			s_w_0 = req->src_rect.w - s_w_1;
+			s_x_1 = s_x_0 + s_w_0;
+			if (d_h_1 >= 8 * s_w_1) {
+				s_w_1++;
+				s_x_1--;
+			}
+		} else {
+			s_x_0 = s_x_1 = req->src_rect.x;
+			s_w_0 = s_w_1 = req->src_rect.w;
+			s_y_0 = req->src_rect.y;
+			s_h_1 = (req->src_rect.h * d_h_1) / req->dst_rect.h;
+			s_h_0 = req->src_rect.h - s_h_1;
+			s_y_1 = s_y_0 + s_h_0;
+			if (d_h_1 >= 8 * s_h_1) {
+				s_h_1++;
+				s_y_1--;
+			}
+		}
+
+		splitreq.src_rect.h = s_h_0;
+		splitreq.src_rect.y = s_y_0;
+		splitreq.dst_rect.h = d_h_0;
+		splitreq.dst_rect.y = d_y_0;
+		splitreq.src_rect.x = s_x_0;
+		splitreq.src_rect.w = s_w_0;
+		splitreq.dst_rect.x = d_x_0;
+		splitreq.dst_rect.w = d_w_0;
+	} else {
+
+		if (splitreq.flags & MDP_ROT_90) {
+			s_y_0 = s_y_1 = req->src_rect.y;
+			s_h_0 = s_h_1 = req->src_rect.h;
+			s_x_0 = req->src_rect.x;
+			s_w_1 = (req->src_rect.w * d_h_0) / req->dst_rect.h;
+			s_w_0 = req->src_rect.w - s_w_1;
+			s_x_1 = s_x_0 + s_w_0;
+			if (d_h_0 >= 8 * s_w_1) {
+				s_w_1++;
+				s_x_1--;
+			}
+		} else {
+			s_x_0 = s_x_1 = req->src_rect.x;
+			s_w_0 = s_w_1 = req->src_rect.w;
+			s_y_0 = req->src_rect.y;
+			s_h_1 = (req->src_rect.h * d_h_0) / req->dst_rect.h;
+			s_h_0 = req->src_rect.h - s_h_1;
+			s_y_1 = s_y_0 + s_h_0;
+			if (d_h_0 >= 8 * s_h_1) {
+				s_h_1++;
+				s_y_1--;
+			}
+		}
+		splitreq.src_rect.h = s_h_0;
+		splitreq.src_rect.y = s_y_0;
+		splitreq.dst_rect.h = d_h_1;
+		splitreq.dst_rect.y = d_y_1;
+		splitreq.src_rect.x = s_x_0;
+		splitreq.src_rect.w = s_w_0;
+		splitreq.dst_rect.x = d_x_1;
+		splitreq.dst_rect.w = d_w_1;
+	}
+	ret = mdp_ppp_blit(info, &splitreq);
+	if (ret)
+		return ret;
+
+	/* blit second region */
+	if (((splitreq.flags & 0x07) == 0x04) ||
+		((splitreq.flags & 0x07) == 0x0)) {
+		splitreq.src_rect.h = s_h_1;
+		splitreq.src_rect.y = s_y_1;
+		splitreq.dst_rect.h = d_h_1;
+		splitreq.dst_rect.y = d_y_1;
+		splitreq.src_rect.x = s_x_1;
+		splitreq.src_rect.w = s_w_1;
+		splitreq.dst_rect.x = d_x_1;
+		splitreq.dst_rect.w = d_w_1;
+	} else {
+		splitreq.src_rect.h = s_h_1;
+		splitreq.src_rect.y = s_y_1;
+		splitreq.dst_rect.h = d_h_0;
+		splitreq.dst_rect.y = d_y_0;
+		splitreq.src_rect.x = s_x_1;
+		splitreq.src_rect.w = s_w_1;
+		splitreq.dst_rect.x = d_x_0;
+		splitreq.dst_rect.w = d_w_0;
+	}
+	ret = mdp_ppp_blit(info, &splitreq);
+	return ret;
+}
+#endif
+
+int mdp_blit(struct fb_info *info, struct mdp_blit_req *req)
+{
+	int ret;
+#if defined CONFIG_FB_MSM_MDP31 || defined CONFIG_FB_MSM_MDP30
+	unsigned int remainder = 0, is_bpp_4 = 0;
+	struct mdp_blit_req splitreq;
+	int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
+	int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
+
+	if (req->flags & MDP_ROT_90) {
+		if (((req->dst_rect.h == 1) && ((req->src_rect.w != 1) ||
+			(req->dst_rect.w != req->src_rect.h))) ||
+			((req->dst_rect.w == 1) && ((req->src_rect.h != 1) ||
+			(req->dst_rect.h != req->src_rect.w)))) {
+			printk(KERN_ERR "mpd_ppp: error scaling when size is 1!\n");
+			return -EINVAL;
+		}
+	} else {
+		if (((req->dst_rect.w == 1) && ((req->src_rect.w != 1) ||
+			(req->dst_rect.h != req->src_rect.h))) ||
+			((req->dst_rect.h == 1) && ((req->src_rect.h != 1) ||
+			(req->dst_rect.w != req->src_rect.w)))) {
+			printk(KERN_ERR "mpd_ppp: error scaling when size is 1!\n");
+			return -EINVAL;
+		}
+	}
+#endif
+	if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
+		printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
+		return -EINVAL;
+	}
+	if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
+		return 0;
+
+#if defined CONFIG_FB_MSM_MDP31
+	/* MDP width split workaround */
+	remainder = (req->dst_rect.w)%32;
+	ret = mdp_get_bytes_per_pixel(req->dst.format,
+					(struct msm_fb_data_type *)info->par);
+	if (ret <= 0) {
+		printk(KERN_ERR "mdp_ppp: incorrect bpp!\n");
+		return -EINVAL;
+	}
+	is_bpp_4 = (ret == 4) ? 1 : 0;
+
+	if ((is_bpp_4 && (remainder == 6 || remainder == 14 ||
+	remainder == 22 || remainder == 30)) || remainder == 3 ||
+	(remainder == 1 && req->dst_rect.w != 1) ||
+	(remainder == 2 && req->dst_rect.w != 2)) {
+		/* make new request as provide by user */
+		splitreq = *req;
+
+		/* break dest roi at width*/
+		d_y_0 = d_y_1 = req->dst_rect.y;
+		d_h_0 = d_h_1 = req->dst_rect.h;
+		d_x_0 = req->dst_rect.x;
+
+		if (remainder == 14)
+			d_w_1 = (req->dst_rect.w - 14) / 2 + 4;
+		else if (remainder == 22)
+			d_w_1 = (req->dst_rect.w - 22) / 2 + 10;
+		else if (remainder == 30)
+			d_w_1 = (req->dst_rect.w - 30) / 2 + 10;
+		else if (remainder == 6)
+			d_w_1 = req->dst_rect.w / 2 - 1;
+		else if (remainder == 3)
+			d_w_1 = (req->dst_rect.w - 3) / 2 - 1;
+		else if (remainder == 2)
+			d_w_1 = (req->dst_rect.w - 2) / 2 - 6;
+		else
+			d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
+		d_w_0 = req->dst_rect.w - d_w_1;
+		d_x_1 = d_x_0 + d_w_0;
+		if (req->dst_rect.w == 3) {
+			d_w_1 = 2;
+			d_w_0 = 2;
+			d_x_1 = d_x_0 + 1;
+		}
+
+		/* blit first region */
+		if (((splitreq.flags & 0x07) == 0x07) ||
+			((splitreq.flags & 0x07) == 0x0)) {
+
+			if (splitreq.flags & MDP_ROT_90) {
+				s_x_0 = s_x_1 = req->src_rect.x;
+				s_w_0 = s_w_1 = req->src_rect.w;
+				s_y_0 = req->src_rect.y;
+				s_h_1 = (req->src_rect.h * d_w_1) /
+					req->dst_rect.w;
+				s_h_0 = req->src_rect.h - s_h_1;
+				s_y_1 = s_y_0 + s_h_0;
+				if (d_w_1 >= 8 * s_h_1) {
+					s_h_1++;
+					s_y_1--;
+				}
+			} else {
+				s_y_0 = s_y_1 = req->src_rect.y;
+				s_h_0 = s_h_1 = req->src_rect.h;
+				s_x_0 = req->src_rect.x;
+				s_w_1 = (req->src_rect.w * d_w_1) /
+					req->dst_rect.w;
+				s_w_0 = req->src_rect.w - s_w_1;
+				s_x_1 = s_x_0 + s_w_0;
+				if (d_w_1 >= 8 * s_w_1) {
+					s_w_1++;
+					s_x_1--;
+				}
+			}
+
+			splitreq.src_rect.h = s_h_0;
+			splitreq.src_rect.y = s_y_0;
+			splitreq.dst_rect.h = d_h_0;
+			splitreq.dst_rect.y = d_y_0;
+			splitreq.src_rect.x = s_x_0;
+			splitreq.src_rect.w = s_w_0;
+			splitreq.dst_rect.x = d_x_0;
+			splitreq.dst_rect.w = d_w_0;
+		} else {
+			if (splitreq.flags & MDP_ROT_90) {
+				s_x_0 = s_x_1 = req->src_rect.x;
+				s_w_0 = s_w_1 = req->src_rect.w;
+				s_y_0 = req->src_rect.y;
+				s_h_1 = (req->src_rect.h * d_w_0) /
+					req->dst_rect.w;
+				s_h_0 = req->src_rect.h - s_h_1;
+				s_y_1 = s_y_0 + s_h_0;
+				if (d_w_0 >= 8 * s_h_1) {
+					s_h_1++;
+					s_y_1--;
+				}
+			} else {
+				s_y_0 = s_y_1 = req->src_rect.y;
+				s_h_0 = s_h_1 = req->src_rect.h;
+				s_x_0 = req->src_rect.x;
+				s_w_1 = (req->src_rect.w * d_w_0) /
+					req->dst_rect.w;
+				s_w_0 = req->src_rect.w - s_w_1;
+				s_x_1 = s_x_0 + s_w_0;
+				if (d_w_0 >= 8 * s_w_1) {
+					s_w_1++;
+					s_x_1--;
+				}
+			}
+			splitreq.src_rect.h = s_h_0;
+			splitreq.src_rect.y = s_y_0;
+			splitreq.dst_rect.h = d_h_1;
+			splitreq.dst_rect.y = d_y_1;
+			splitreq.src_rect.x = s_x_0;
+			splitreq.src_rect.w = s_w_0;
+			splitreq.dst_rect.x = d_x_1;
+			splitreq.dst_rect.w = d_w_1;
+		}
+
+		if ((splitreq.dst_rect.h % 32 == 3) ||
+			((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
+			((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
+			ret = mdp_blit_split_height(info, &splitreq);
+		else
+			ret = mdp_ppp_blit(info, &splitreq);
+		if (ret)
+			return ret;
+		/* blit second region */
+		if (((splitreq.flags & 0x07) == 0x07) ||
+			((splitreq.flags & 0x07) == 0x0)) {
+			splitreq.src_rect.h = s_h_1;
+			splitreq.src_rect.y = s_y_1;
+			splitreq.dst_rect.h = d_h_1;
+			splitreq.dst_rect.y = d_y_1;
+			splitreq.src_rect.x = s_x_1;
+			splitreq.src_rect.w = s_w_1;
+			splitreq.dst_rect.x = d_x_1;
+			splitreq.dst_rect.w = d_w_1;
+		} else {
+			splitreq.src_rect.h = s_h_1;
+			splitreq.src_rect.y = s_y_1;
+			splitreq.dst_rect.h = d_h_0;
+			splitreq.dst_rect.y = d_y_0;
+			splitreq.src_rect.x = s_x_1;
+			splitreq.src_rect.w = s_w_1;
+			splitreq.dst_rect.x = d_x_0;
+			splitreq.dst_rect.w = d_w_0;
+		}
+		if (((splitreq.dst_rect.h % 32) == 3) ||
+			((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
+			((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
+			ret = mdp_blit_split_height(info, &splitreq);
+		else
+			ret = mdp_ppp_blit(info, &splitreq);
+		if (ret)
+			return ret;
+	} else if ((req->dst_rect.h % 32) == 3 ||
+		((req->dst_rect.h % 32) == 1 && req->dst_rect.h != 1) ||
+		((req->dst_rect.h % 32) == 2 && req->dst_rect.h != 2))
+		ret = mdp_blit_split_height(info, req);
+	else
+		ret = mdp_ppp_blit(info, req);
+	return ret;
+#elif defined CONFIG_FB_MSM_MDP30
+	/* MDP width split workaround */
+	remainder = (req->dst_rect.w)%16;
+	ret = mdp_get_bytes_per_pixel(req->dst.format,
+					(struct msm_fb_data_type *)info->par);
+	if (ret <= 0) {
+		printk(KERN_ERR "mdp_ppp: incorrect bpp!\n");
+		return -EINVAL;
+	}
+	is_bpp_4 = (ret == 4) ? 1 : 0;
+
+	if ((is_bpp_4 && (remainder == 6 || remainder == 14))) {
+
+		/* make new request as provide by user */
+		splitreq = *req;
+
+		/* break dest roi at width*/
+		d_y_0 = d_y_1 = req->dst_rect.y;
+		d_h_0 = d_h_1 = req->dst_rect.h;
+		d_x_0 = req->dst_rect.x;
+
+		if (remainder == 14 || remainder == 6)
+			d_w_1 = req->dst_rect.w / 2;
+		else
+			d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
+
+		d_w_0 = req->dst_rect.w - d_w_1;
+		d_x_1 = d_x_0 + d_w_0;
+
+		/* blit first region */
+		if (((splitreq.flags & 0x07) == 0x07) ||
+			((splitreq.flags & 0x07) == 0x0)) {
+
+			if (splitreq.flags & MDP_ROT_90) {
+				s_x_0 = s_x_1 = req->src_rect.x;
+				s_w_0 = s_w_1 = req->src_rect.w;
+				s_y_0 = req->src_rect.y;
+				s_h_1 = (req->src_rect.h * d_w_1) /
+					req->dst_rect.w;
+				s_h_0 = req->src_rect.h - s_h_1;
+				s_y_1 = s_y_0 + s_h_0;
+				if (d_w_1 >= 8 * s_h_1) {
+					s_h_1++;
+					s_y_1--;
+				}
+			} else {
+				s_y_0 = s_y_1 = req->src_rect.y;
+				s_h_0 = s_h_1 = req->src_rect.h;
+				s_x_0 = req->src_rect.x;
+				s_w_1 = (req->src_rect.w * d_w_1) /
+					req->dst_rect.w;
+				s_w_0 = req->src_rect.w - s_w_1;
+				s_x_1 = s_x_0 + s_w_0;
+				if (d_w_1 >= 8 * s_w_1) {
+					s_w_1++;
+					s_x_1--;
+				}
+			}
+
+			splitreq.src_rect.h = s_h_0;
+			splitreq.src_rect.y = s_y_0;
+			splitreq.dst_rect.h = d_h_0;
+			splitreq.dst_rect.y = d_y_0;
+			splitreq.src_rect.x = s_x_0;
+			splitreq.src_rect.w = s_w_0;
+			splitreq.dst_rect.x = d_x_0;
+			splitreq.dst_rect.w = d_w_0;
+		} else {
+			if (splitreq.flags & MDP_ROT_90) {
+				s_x_0 = s_x_1 = req->src_rect.x;
+				s_w_0 = s_w_1 = req->src_rect.w;
+				s_y_0 = req->src_rect.y;
+				s_h_1 = (req->src_rect.h * d_w_0) /
+					req->dst_rect.w;
+				s_h_0 = req->src_rect.h - s_h_1;
+				s_y_1 = s_y_0 + s_h_0;
+				if (d_w_0 >= 8 * s_h_1) {
+					s_h_1++;
+					s_y_1--;
+				}
+			} else {
+				s_y_0 = s_y_1 = req->src_rect.y;
+				s_h_0 = s_h_1 = req->src_rect.h;
+				s_x_0 = req->src_rect.x;
+				s_w_1 = (req->src_rect.w * d_w_0) /
+					req->dst_rect.w;
+				s_w_0 = req->src_rect.w - s_w_1;
+				s_x_1 = s_x_0 + s_w_0;
+				if (d_w_0 >= 8 * s_w_1) {
+					s_w_1++;
+					s_x_1--;
+				}
+			}
+			splitreq.src_rect.h = s_h_0;
+			splitreq.src_rect.y = s_y_0;
+			splitreq.dst_rect.h = d_h_1;
+			splitreq.dst_rect.y = d_y_1;
+			splitreq.src_rect.x = s_x_0;
+			splitreq.src_rect.w = s_w_0;
+			splitreq.dst_rect.x = d_x_1;
+			splitreq.dst_rect.w = d_w_1;
+		}
+
+		/* No need to split in height */
+		ret = mdp_ppp_blit(info, &splitreq);
+
+		if (ret)
+			return ret;
+
+		/* blit second region */
+		if (((splitreq.flags & 0x07) == 0x07) ||
+			((splitreq.flags & 0x07) == 0x0)) {
+			splitreq.src_rect.h = s_h_1;
+			splitreq.src_rect.y = s_y_1;
+			splitreq.dst_rect.h = d_h_1;
+			splitreq.dst_rect.y = d_y_1;
+			splitreq.src_rect.x = s_x_1;
+			splitreq.src_rect.w = s_w_1;
+			splitreq.dst_rect.x = d_x_1;
+			splitreq.dst_rect.w = d_w_1;
+		} else {
+			splitreq.src_rect.h = s_h_1;
+			splitreq.src_rect.y = s_y_1;
+			splitreq.dst_rect.h = d_h_0;
+			splitreq.dst_rect.y = d_y_0;
+			splitreq.src_rect.x = s_x_1;
+			splitreq.src_rect.w = s_w_1;
+			splitreq.dst_rect.x = d_x_0;
+			splitreq.dst_rect.w = d_w_0;
+		}
+
+		/* No need to split in height ... just width */
+		ret = mdp_ppp_blit(info, &splitreq);
+
+		if (ret)
+			return ret;
+
+	} else
+		ret = mdp_ppp_blit(info, req);
+	return ret;
+#else
+	ret = mdp_ppp_blit(info, req);
+	return ret;
+#endif
+}
+
+typedef void (*msm_dma_barrier_function_pointer) (void *, size_t);
+
+static inline void msm_fb_dma_barrier_for_rect(struct fb_info *info,
+			struct mdp_img *img, struct mdp_rect *rect,
+			msm_dma_barrier_function_pointer dma_barrier_fp
+			)
+{
+	/*
+	 * Compute the start and end addresses of the rectangles.
+	 * NOTE: As currently implemented, the data between
+	 *       the end of one row and the start of the next is
+	 *       included in the address range rather than
+	 *       doing multiple calls for each row.
+	 */
+	unsigned long start;
+	size_t size;
+	char * const pmem_start = info->screen_base;
+	int bytes_per_pixel = mdp_get_bytes_per_pixel(img->format,
+					(struct msm_fb_data_type *)info->par);
+	if (bytes_per_pixel <= 0) {
+		printk(KERN_ERR "%s incorrect bpp!\n", __func__);
+		return;
+	}
+	start = (unsigned long)pmem_start + img->offset +
+		(img->width * rect->y + rect->x) * bytes_per_pixel;
+	size  = (rect->h * img->width + rect->w) * bytes_per_pixel;
+	(*dma_barrier_fp) ((void *) start, size);
+
+}
+
+static inline void msm_dma_nc_pre(void)
+{
+	dmb();
+}
+static inline void msm_dma_wt_pre(void)
+{
+	dmb();
+}
+static inline void msm_dma_todevice_wb_pre(void *start, size_t size)
+{
+	dma_cache_pre_ops(start, size, DMA_TO_DEVICE);
+}
+
+static inline void msm_dma_fromdevice_wb_pre(void *start, size_t size)
+{
+	dma_cache_pre_ops(start, size, DMA_FROM_DEVICE);
+}
+
+static inline void msm_dma_nc_post(void)
+{
+	dmb();
+}
+
+static inline void msm_dma_fromdevice_wt_post(void *start, size_t size)
+{
+	dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
+}
+
+static inline void msm_dma_todevice_wb_post(void *start, size_t size)
+{
+	dma_cache_post_ops(start, size, DMA_TO_DEVICE);
+}
+
+static inline void msm_dma_fromdevice_wb_post(void *start, size_t size)
+{
+	dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
+}
+
+/*
+ * Do the write barriers required to guarantee data is committed to RAM
+ * (from CPU cache or internal buffers) before a DMA operation starts.
+ * NOTE: As currently implemented, the data between
+ *       the end of one row and the start of the next is
+ *       included in the address range rather than
+ *       doing multiple calls for each row.
+*/
+static void msm_fb_ensure_memory_coherency_before_dma(struct fb_info *info,
+		struct mdp_blit_req *req_list,
+		int req_list_count)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+	int i;
+
+	/*
+	 * Normally, do the requested barriers for each address
+	 * range that corresponds to a rectangle.
+	 *
+	 * But if at least one write barrier is requested for data
+	 * going to or from the device but no address range is
+	 * needed for that barrier, then do the barrier, but do it
+	 * only once, no matter how many requests there are.
+	 */
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	switch (mfd->mdp_fb_page_protection)	{
+	default:
+	case MDP_FB_PAGE_PROTECTION_NONCACHED:
+	case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
+		/*
+		 * The following barrier is only done at most once,
+		 * since further calls would be redundant.
+		 */
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags
+				& MDP_NO_DMA_BARRIER_START)) {
+				msm_dma_nc_pre();
+				break;
+			}
+		}
+		break;
+
+	case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
+		/*
+		 * The following barrier is only done at most once,
+		 * since further calls would be redundant.
+		 */
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags
+				& MDP_NO_DMA_BARRIER_START)) {
+				msm_dma_wt_pre();
+				break;
+			}
+		}
+		break;
+
+	case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
+	case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags &
+					MDP_NO_DMA_BARRIER_START)) {
+
+				msm_fb_dma_barrier_for_rect(info,
+						&(req_list[i].src),
+						&(req_list[i].src_rect),
+						msm_dma_todevice_wb_pre
+						);
+
+				msm_fb_dma_barrier_for_rect(info,
+						&(req_list[i].dst),
+						&(req_list[i].dst_rect),
+						msm_dma_todevice_wb_pre
+						);
+			}
+		}
+		break;
+	}
+#else
+	dmb();
+#endif
+}
+
+
+/*
+ * Do the write barriers required to guarantee data will be re-read from RAM by
+ * the CPU after a DMA operation ends.
+ * NOTE: As currently implemented, the data between
+ *       the end of one row and the start of the next is
+ *       included in the address range rather than
+ *       doing multiple calls for each row.
+*/
+static void msm_fb_ensure_memory_coherency_after_dma(struct fb_info *info,
+		struct mdp_blit_req *req_list,
+		int req_list_count)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+	int i;
+
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	switch (mfd->mdp_fb_page_protection)	{
+	default:
+	case MDP_FB_PAGE_PROTECTION_NONCACHED:
+	case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
+		/*
+		 * The following barrier is only done at most once,
+		 * since further calls would be redundant.
+		 */
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags
+				& MDP_NO_DMA_BARRIER_END)) {
+				msm_dma_nc_post();
+				break;
+			}
+		}
+		break;
+
+	case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags &
+					MDP_NO_DMA_BARRIER_END)) {
+
+				msm_fb_dma_barrier_for_rect(info,
+						&(req_list[i].dst),
+						&(req_list[i].dst_rect),
+						msm_dma_fromdevice_wt_post
+						);
+			}
+		}
+		break;
+	case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
+	case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags &
+					MDP_NO_DMA_BARRIER_END)) {
+
+				msm_fb_dma_barrier_for_rect(info,
+						&(req_list[i].dst),
+						&(req_list[i].dst_rect),
+						msm_dma_fromdevice_wb_post
+						);
+			}
+		}
+		break;
+	}
+#else
+	dmb();
+#endif
+}
+
+/*
+ * NOTE: The userspace issues blit operations in a sequence, the sequence
+ * start with a operation marked START and ends in an operation marked
+ * END. It is guranteed by the userspace that all the blit operations
+ * between START and END are only within the regions of areas designated
+ * by the START and END operations and that the userspace doesnt modify
+ * those areas. Hence it would be enough to perform barrier/cache operations
+ * only on the START and END operations.
+ */
+static int msmfb_blit(struct fb_info *info, void __user *p)
+{
+	/*
+	 * CAUTION: The names of the struct types intentionally *DON'T* match
+	 * the names of the variables declared -- they appear to be swapped.
+	 * Read the code carefully and you should see that the variable names
+	 * make sense.
+	 */
+	const int MAX_LIST_WINDOW = 16;
+	struct mdp_blit_req req_list[MAX_LIST_WINDOW];
+	struct mdp_blit_req_list req_list_header;
+
+	int count, i, req_list_count;
+
+	/* Get the count size for the total BLIT request. */
+	if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
+		return -EFAULT;
+	p += sizeof(req_list_header);
+	count = req_list_header.count;
+	if (count < 0 || count >= MAX_BLIT_REQ)
+		return -EINVAL;
+	while (count > 0) {
+		/*
+		 * Access the requests through a narrow window to decrease copy
+		 * overhead and make larger requests accessible to the
+		 * coherency management code.
+		 * NOTE: The window size is intended to be larger than the
+		 *       typical request size, but not require more than 2
+		 *       kbytes of stack storage.
+		 */
+		req_list_count = count;
+		if (req_list_count > MAX_LIST_WINDOW)
+			req_list_count = MAX_LIST_WINDOW;
+		if (copy_from_user(&req_list, p,
+				sizeof(struct mdp_blit_req)*req_list_count))
+			return -EFAULT;
+
+		/*
+		 * Ensure that any data CPU may have previously written to
+		 * internal state (but not yet committed to memory) is
+		 * guaranteed to be committed to memory now.
+		 */
+		msm_fb_ensure_memory_coherency_before_dma(info,
+				req_list, req_list_count);
+
+		/*
+		 * Do the blit DMA, if required -- returning early only if
+		 * there is a failure.
+		 */
+		for (i = 0; i < req_list_count; i++) {
+			if (!(req_list[i].flags & MDP_NO_BLIT)) {
+				/* Do the actual blit. */
+				int ret = mdp_blit(info, &(req_list[i]));
+
+				/*
+				 * Note that early returns don't guarantee
+				 * memory coherency.
+				 */
+				if (ret)
+					return ret;
+			}
+		}
+
+		/*
+		 * Ensure that CPU cache and other internal CPU state is
+		 * updated to reflect any change in memory modified by MDP blit
+		 * DMA.
+		 */
+		msm_fb_ensure_memory_coherency_after_dma(info,
+				req_list,
+				req_list_count);
+
+		/* Go to next window of requests. */
+		count -= req_list_count;
+		p += sizeof(struct mdp_blit_req)*req_list_count;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_FB_MSM_OVERLAY
+static int msmfb_overlay_get(struct fb_info *info, void __user *p)
+{
+	struct mdp_overlay req;
+	int ret;
+
+	if (copy_from_user(&req, p, sizeof(req)))
+		return -EFAULT;
+
+	ret = mdp4_overlay_get(info, &req);
+	if (ret) {
+		printk(KERN_ERR "%s: ioctl failed \n",
+			__func__);
+		return ret;
+	}
+	if (copy_to_user(p, &req, sizeof(req))) {
+		printk(KERN_ERR "%s: copy2user failed \n",
+			__func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int msmfb_overlay_set(struct fb_info *info, void __user *p)
+{
+	struct mdp_overlay req;
+	int ret;
+
+	if (copy_from_user(&req, p, sizeof(req)))
+		return -EFAULT;
+
+	ret = mdp4_overlay_set(info, &req);
+	if (ret) {
+		printk(KERN_ERR "%s: ioctl failed, rc=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	if (copy_to_user(p, &req, sizeof(req))) {
+		printk(KERN_ERR "%s: copy2user failed \n",
+			__func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
+{
+	int	ret, ndx;
+
+	ret = copy_from_user(&ndx, argp, sizeof(ndx));
+	if (ret) {
+		printk(KERN_ERR "%s:msmfb_overlay_unset ioctl failed \n",
+			__func__);
+		return ret;
+	}
+
+	return mdp4_overlay_unset(info, ndx);
+}
+
+static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
+{
+	int	ret;
+	struct msmfb_overlay_data req;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct file *p_src_file = 0;
+	struct file *p_src_plane1_file = 0, *p_src_plane2_file = 0;
+
+	if (mfd->overlay_play_enable == 0)	/* nothing to do */
+		return 0;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		printk(KERN_ERR "%s:msmfb_overlay_play ioctl failed \n",
+			__func__);
+		return ret;
+	}
+
+	complete(&mfd->msmfb_update_notify);
+	mutex_lock(&msm_fb_notify_update_sem);
+	if (mfd->msmfb_no_update_notify_timer.function)
+		del_timer(&mfd->msmfb_no_update_notify_timer);
+
+	mfd->msmfb_no_update_notify_timer.expires =
+				jiffies + ((1000 * HZ) / 1000);
+	add_timer(&mfd->msmfb_no_update_notify_timer);
+	mutex_unlock(&msm_fb_notify_update_sem);
+
+	ret = mdp4_overlay_play(info, &req, &p_src_file, &p_src_plane1_file,
+				&p_src_plane2_file);
+
+#ifdef CONFIG_ANDROID_PMEM
+	if (p_src_file)
+		put_pmem_file(p_src_file);
+	if (p_src_plane1_file)
+		put_pmem_file(p_src_plane1_file);
+	if (p_src_plane2_file)
+		put_pmem_file(p_src_plane2_file);
+#endif
+
+	return ret;
+}
+
+static int msmfb_overlay_play_enable(struct fb_info *info, unsigned long *argp)
+{
+	int	ret, enable;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	ret = copy_from_user(&enable, argp, sizeof(enable));
+	if (ret) {
+		printk(KERN_ERR "%s:msmfb_overlay_play_enable ioctl failed \n",
+			__func__);
+		return ret;
+	}
+
+	mfd->overlay_play_enable = enable;
+
+	return 0;
+}
+
+
+#ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK
+static int msmfb_overlay_blt(struct fb_info *info, unsigned long *argp)
+{
+	int     ret;
+	struct msmfb_overlay_blt req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		printk(KERN_ERR "%s:msmfb_overlay_blt ioctl failed\n",
+			__func__);
+		return ret;
+	}
+
+	ret = mdp4_overlay_blt(info, &req);
+
+	return ret;
+}
+
+static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
+{
+	int	ret;
+	struct msmfb_overlay_blt req;
+
+	ret = mdp4_overlay_blt_offset(info, &req);
+
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret)
+		printk(KERN_ERR "%s:msmfb_overlay_blt_off ioctl failed\n",
+		__func__);
+
+	return ret;
+}
+#else
+static int msmfb_overlay_blt(struct fb_info *info, unsigned long *argp)
+{
+	return 0;
+}
+static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
+{
+	return 0;
+}
+#endif
+
+static int msmfb_overlay_3d(struct fb_info *info, unsigned long *argp)
+{
+	int	ret;
+	struct msmfb_overlay_3d req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("%s:msmfb_overlay_3d_ctrl ioctl failed\n",
+			__func__);
+		return ret;
+	}
+
+	ret = mdp4_overlay_3d(info, &req);
+
+	return ret;
+}
+
+#endif
+
+DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
+DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
+DEFINE_MUTEX(msm_fb_ioctl_hist_sem);
+
+/* Set color conversion matrix from user space */
+
+#ifndef CONFIG_FB_MSM_MDP40
+static void msmfb_set_color_conv(struct mdp_ccs *p)
+{
+	int i;
+
+	if (p->direction == MDP_CCS_RGB2YUV) {
+		/* MDP cmd block enable */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+		/* RGB->YUV primary forward matrix */
+		for (i = 0; i < MDP_CCS_SIZE; i++)
+			writel(p->ccs[i], MDP_CSC_PFMVn(i));
+
+		#ifdef CONFIG_FB_MSM_MDP31
+		for (i = 0; i < MDP_BV_SIZE; i++)
+			writel(p->bv[i], MDP_CSC_POST_BV2n(i));
+		#endif
+
+		/* MDP cmd block disable */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	} else {
+		/* MDP cmd block enable */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+		/* YUV->RGB primary reverse matrix */
+		for (i = 0; i < MDP_CCS_SIZE; i++)
+			writel(p->ccs[i], MDP_CSC_PRMVn(i));
+		for (i = 0; i < MDP_BV_SIZE; i++)
+			writel(p->bv[i], MDP_CSC_PRE_BV1n(i));
+
+		/* MDP cmd block disable */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+}
+#endif
+
+static int msmfb_notify_update(struct fb_info *info, unsigned long *argp)
+{
+	int ret, notify;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	ret = copy_from_user(&notify, argp, sizeof(int));
+	if (ret) {
+		pr_err("%s:ioctl failed\n", __func__);
+		return ret;
+	}
+
+	if (notify > NOTIFY_UPDATE_STOP)
+		return -EINVAL;
+
+	if (notify == NOTIFY_UPDATE_START) {
+		INIT_COMPLETION(mfd->msmfb_update_notify);
+		wait_for_completion_interruptible(&mfd->msmfb_update_notify);
+	} else {
+		INIT_COMPLETION(mfd->msmfb_no_update_notify);
+		wait_for_completion_interruptible(&mfd->msmfb_no_update_notify);
+	}
+	return 0;
+}
+
+static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
+			unsigned long arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	void __user *argp = (void __user *)arg;
+	struct fb_cursor cursor;
+	struct fb_cmap cmap;
+	struct mdp_histogram hist;
+#ifndef CONFIG_FB_MSM_MDP40
+	struct mdp_ccs ccs_matrix;
+#endif
+	struct mdp_page_protection fb_page_protection;
+	int ret = 0;
+
+	switch (cmd) {
+#ifdef CONFIG_FB_MSM_OVERLAY
+	case MSMFB_OVERLAY_GET:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_get(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_SET:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_set(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_UNSET:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_unset(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_PLAY:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_play(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_PLAY_ENABLE:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_play_enable(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_BLT:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_blt(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_BLT_OFFSET:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_blt_off(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_3D:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_3d(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+#endif
+	case MSMFB_BLIT:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_blit(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+
+		break;
+
+	/* Ioctl for setting ccs matrix from user space */
+	case MSMFB_SET_CCS_MATRIX:
+#ifndef CONFIG_FB_MSM_MDP40
+		ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix));
+		if (ret) {
+			printk(KERN_ERR
+				"%s:MSMFB_SET_CCS_MATRIX ioctl failed \n",
+				__func__);
+			return ret;
+		}
+
+		down(&msm_fb_ioctl_ppp_sem);
+		if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
+			mdp_ccs_rgb2yuv = ccs_matrix;
+		else
+			mdp_ccs_yuv2rgb = ccs_matrix;
+
+		msmfb_set_color_conv(&ccs_matrix) ;
+		up(&msm_fb_ioctl_ppp_sem);
+#else
+		ret = -EINVAL;
+#endif
+
+		break;
+
+	/* Ioctl for getting ccs matrix to user space */
+	case MSMFB_GET_CCS_MATRIX:
+#ifndef CONFIG_FB_MSM_MDP40
+		ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix)) ;
+		if (ret) {
+			printk(KERN_ERR
+				"%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
+				 __func__);
+			return ret;
+		}
+
+		down(&msm_fb_ioctl_ppp_sem);
+		if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
+			ccs_matrix = mdp_ccs_rgb2yuv;
+		 else
+			ccs_matrix =  mdp_ccs_yuv2rgb;
+
+		ret = copy_to_user(argp, &ccs_matrix, sizeof(ccs_matrix));
+
+		if (ret)	{
+			printk(KERN_ERR
+				"%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
+				 __func__);
+			return ret ;
+		}
+		up(&msm_fb_ioctl_ppp_sem);
+#else
+		ret = -EINVAL;
+#endif
+
+		break;
+
+	case MSMFB_GRP_DISP:
+#ifdef CONFIG_FB_MSM_MDP22
+		{
+			unsigned long grp_id;
+
+			ret = copy_from_user(&grp_id, argp, sizeof(grp_id));
+			if (ret)
+				return ret;
+
+			mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+			writel(grp_id, MDP_FULL_BYPASS_WORD43);
+			mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
+				      FALSE);
+			break;
+		}
+#else
+		return -EFAULT;
+#endif
+	case MSMFB_SUSPEND_SW_REFRESHER:
+		if (!mfd->panel_power_on)
+			return -EPERM;
+
+		mfd->sw_refreshing_enable = FALSE;
+		ret = msm_fb_stop_sw_refresher(mfd);
+		break;
+
+	case MSMFB_RESUME_SW_REFRESHER:
+		if (!mfd->panel_power_on)
+			return -EPERM;
+
+		mfd->sw_refreshing_enable = TRUE;
+		ret = msm_fb_resume_sw_refresher(mfd);
+		break;
+
+	case MSMFB_CURSOR:
+		ret = copy_from_user(&cursor, argp, sizeof(cursor));
+		if (ret)
+			return ret;
+
+		ret = msm_fb_cursor(info, &cursor);
+		break;
+
+	case MSMFB_SET_LUT:
+		ret = copy_from_user(&cmap, argp, sizeof(cmap));
+		if (ret)
+			return ret;
+
+		mutex_lock(&msm_fb_ioctl_lut_sem);
+		ret = msm_fb_set_lut(&cmap, info);
+		mutex_unlock(&msm_fb_ioctl_lut_sem);
+		break;
+
+	case MSMFB_HISTOGRAM:
+		if (!mfd->do_histogram)
+			return -ENODEV;
+
+		ret = copy_from_user(&hist, argp, sizeof(hist));
+		if (ret)
+			return ret;
+
+		mutex_lock(&msm_fb_ioctl_hist_sem);
+		ret = mfd->do_histogram(info, &hist);
+		mutex_unlock(&msm_fb_ioctl_hist_sem);
+		break;
+
+	case MSMFB_HISTOGRAM_START:
+		if (!mfd->do_histogram)
+			return -ENODEV;
+		ret = mdp_start_histogram(info);
+		break;
+
+	case MSMFB_HISTOGRAM_STOP:
+		if (!mfd->do_histogram)
+			return -ENODEV;
+		ret = mdp_stop_histogram(info);
+		break;
+
+
+	case MSMFB_GET_PAGE_PROTECTION:
+		fb_page_protection.page_protection
+			= mfd->mdp_fb_page_protection;
+		ret = copy_to_user(argp, &fb_page_protection,
+				sizeof(fb_page_protection));
+		if (ret)
+				return ret;
+		break;
+
+	case MSMFB_NOTIFY_UPDATE:
+		ret = msmfb_notify_update(info, argp);
+		break;
+
+	case MSMFB_SET_PAGE_PROTECTION:
+#if defined CONFIG_ARCH_QSD8X50 || defined CONFIG_ARCH_MSM8X60
+		ret = copy_from_user(&fb_page_protection, argp,
+				sizeof(fb_page_protection));
+		if (ret)
+				return ret;
+
+		/* Validate the proposed page protection settings. */
+		switch (fb_page_protection.page_protection)	{
+		case MDP_FB_PAGE_PROTECTION_NONCACHED:
+		case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
+		case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
+		/* Write-back cache (read allocate)  */
+		case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
+		/* Write-back cache (write allocate) */
+		case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
+			mfd->mdp_fb_page_protection =
+				fb_page_protection.page_protection;
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+#else
+		/*
+		 * Don't allow caching until 7k DMA cache operations are
+		 * available.
+		 */
+		ret = -EINVAL;
+#endif
+		break;
+
+	default:
+		MSM_FB_INFO("MDP: unknown ioctl (cmd=%x) received!\n", cmd);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_fb_register_driver(void)
+{
+	return platform_driver_register(&msm_fb_driver);
+}
+
+struct platform_device *msm_fb_add_device(struct platform_device *pdev)
+{
+	struct msm_fb_panel_data *pdata;
+	struct platform_device *this_dev = NULL;
+	struct fb_info *fbi;
+	struct msm_fb_data_type *mfd = NULL;
+	u32 type, id, fb_num;
+
+	if (!pdev)
+		return NULL;
+	id = pdev->id;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata)
+		return NULL;
+	type = pdata->panel_info.type;
+
+#if defined MSM_FB_NUM
+	/*
+	 * over written fb_num which defined
+	 * at panel_info
+	 *
+	 */
+	if (type == HDMI_PANEL || type == DTV_PANEL || type == TV_PANEL)
+		pdata->panel_info.fb_num = 1;
+	else
+		pdata->panel_info.fb_num = MSM_FB_NUM;
+
+	MSM_FB_INFO("setting pdata->panel_info.fb_num to %d. type: %d\n",
+			pdata->panel_info.fb_num, type);
+#endif
+	fb_num = pdata->panel_info.fb_num;
+
+	if (fb_num <= 0)
+		return NULL;
+
+	if (fbi_list_index >= MAX_FBI_LIST) {
+		printk(KERN_ERR "msm_fb: no more framebuffer info list!\n");
+		return NULL;
+	}
+	/*
+	 * alloc panel device data
+	 */
+	this_dev = msm_fb_device_alloc(pdata, type, id);
+
+	if (!this_dev) {
+		printk(KERN_ERR
+		"%s: msm_fb_device_alloc failed!\n", __func__);
+		return NULL;
+	}
+
+	/*
+	 * alloc framebuffer info + par data
+	 */
+	fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
+	if (fbi == NULL) {
+		platform_device_put(this_dev);
+		printk(KERN_ERR "msm_fb: can't alloca framebuffer info data!\n");
+		return NULL;
+	}
+
+	mfd = (struct msm_fb_data_type *)fbi->par;
+	mfd->key = MFD_KEY;
+	mfd->fbi = fbi;
+	mfd->panel.type = type;
+	mfd->panel.id = id;
+	mfd->fb_page = fb_num;
+	mfd->index = fbi_list_index;
+	mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
+
+	/* link to the latest pdev */
+	mfd->pdev = this_dev;
+
+	mfd_list[mfd_list_index++] = mfd;
+	fbi_list[fbi_list_index++] = fbi;
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(this_dev, mfd);
+
+	if (platform_device_add(this_dev)) {
+		printk(KERN_ERR "msm_fb: platform_device_add failed!\n");
+		platform_device_put(this_dev);
+		framebuffer_release(fbi);
+		fbi_list_index--;
+		return NULL;
+	}
+	return this_dev;
+}
+EXPORT_SYMBOL(msm_fb_add_device);
+
+int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num)
+{
+	struct fb_info *info;
+
+	if (fb_num > MAX_FBI_LIST)
+		return -1;
+
+	info = fbi_list[fb_num];
+	if (!info)
+		return -1;
+
+	*start = info->fix.smem_start;
+	*len = info->fix.smem_len;
+	return 0;
+}
+EXPORT_SYMBOL(get_fb_phys_info);
+
+int __init msm_fb_init(void)
+{
+	int rc = -ENODEV;
+
+	if (msm_fb_register_driver())
+		return rc;
+
+#ifdef MSM_FB_ENABLE_DBGFS
+	{
+		struct dentry *root;
+
+		if ((root = msm_fb_get_debugfs_root()) != NULL) {
+			msm_fb_debugfs_file_create(root,
+						   "msm_fb_msg_printing_level",
+						   (u32 *) &msm_fb_msg_level);
+			msm_fb_debugfs_file_create(root,
+						   "mddi_msg_printing_level",
+						   (u32 *) &mddi_msg_level);
+			msm_fb_debugfs_file_create(root, "msm_fb_debug_enabled",
+						   (u32 *) &msm_fb_debug_enabled);
+		}
+	}
+#endif
+
+	return 0;
+}
+
+module_init(msm_fb_init);
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
new file mode 100644
index 0000000..bdf32eb
--- /dev/null
+++ b/drivers/video/msm/msm_fb.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_FB_H
+#define MSM_FB_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include "linux/proc_fs.h"
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+#include <mach/board.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/memory.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/hrtimer.h>
+
+#include <linux/fb.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include "msm_fb_panel.h"
+#include "mdp.h"
+
+#define MSM_FB_DEFAULT_PAGE_SIZE 2
+#define MFD_KEY  0x11161126
+#define MSM_FB_MAX_DEV_LIST 32
+
+struct disp_info_type_suspend {
+	boolean op_enable;
+	boolean sw_refreshing_enable;
+	boolean panel_power_on;
+};
+
+struct msm_fb_data_type {
+	__u32 key;
+	__u32 index;
+	__u32 ref_cnt;
+	__u32 fb_page;
+
+	panel_id_type panel;
+	struct msm_panel_info panel_info;
+
+	DISP_TARGET dest;
+	struct fb_info *fbi;
+
+	boolean op_enable;
+	uint32 fb_imgType;
+	boolean sw_currently_refreshing;
+	boolean sw_refreshing_enable;
+	boolean hw_refresh;
+#ifdef CONFIG_FB_MSM_OVERLAY
+	int overlay_play_enable;
+#endif
+
+	MDPIBUF ibuf;
+	boolean ibuf_flushed;
+	struct timer_list refresh_timer;
+	struct completion refresher_comp;
+
+	boolean pan_waiting;
+	struct completion pan_comp;
+
+	/* vsync */
+	boolean use_mdp_vsync;
+	__u32 vsync_gpio;
+	__u32 total_lcd_lines;
+	__u32 total_porch_lines;
+	__u32 lcd_ref_usec_time;
+	__u32 refresh_timer_duration;
+
+	struct hrtimer dma_hrtimer;
+
+	boolean panel_power_on;
+	struct work_struct dma_update_worker;
+	struct semaphore sem;
+
+	struct timer_list vsync_resync_timer;
+	boolean vsync_handler_pending;
+	struct work_struct vsync_resync_worker;
+
+	ktime_t last_vsync_timetick;
+
+	__u32 *vsync_width_boundary;
+
+	unsigned int pmem_id;
+	struct disp_info_type_suspend suspend;
+
+	__u32 channel_irq;
+
+	struct mdp_dma_data *dma;
+	void (*dma_fnc) (struct msm_fb_data_type *mfd);
+	int (*cursor_update) (struct fb_info *info,
+			      struct fb_cursor *cursor);
+	int (*lut_update) (struct fb_info *info,
+			      struct fb_cmap *cmap);
+	int (*do_histogram) (struct fb_info *info,
+			      struct mdp_histogram *hist);
+	void *cursor_buf;
+	void *cursor_buf_phys;
+
+	void *cmd_port;
+	void *data_port;
+	void *data_port_phys;
+
+	__u32 bl_level;
+
+	struct platform_device *pdev;
+
+	__u32 var_xres;
+	__u32 var_yres;
+	__u32 var_pixclock;
+
+#ifdef MSM_FB_ENABLE_DBGFS
+	struct dentry *sub_dir;
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#ifdef CONFIG_FB_MSM_MDDI
+	struct early_suspend mddi_early_suspend;
+	struct early_suspend mddi_ext_early_suspend;
+#endif
+#endif
+	u32 mdp_fb_page_protection;
+
+	struct clk *ebi1_clk;
+	boolean dma_update_flag;
+	struct timer_list msmfb_no_update_notify_timer;
+	struct completion msmfb_update_notify;
+	struct completion msmfb_no_update_notify;
+};
+
+struct dentry *msm_fb_get_debugfs_root(void);
+void msm_fb_debugfs_file_create(struct dentry *root, const char *name,
+				u32 *var);
+void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl);
+
+struct platform_device *msm_fb_add_device(struct platform_device *pdev);
+
+int msm_fb_detect_client(const char *name);
+
+#ifdef CONFIG_FB_BACKLIGHT
+void msm_fb_config_backlight(struct msm_fb_data_type *mfd);
+#endif
+
+void fill_black_screen(void);
+void unfill_black_screen(void);
+
+#endif /* MSM_FB_H */
diff --git a/drivers/video/msm/msm_fb_bl.c b/drivers/video/msm/msm_fb_bl.c
new file mode 100644
index 0000000..9afbbf1
--- /dev/null
+++ b/drivers/video/msm/msm_fb_bl.c
@@ -0,0 +1,75 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/backlight.h>
+
+#include "msm_fb.h"
+
+static int msm_fb_bl_get_brightness(struct backlight_device *pbd)
+{
+	return pbd->props.brightness;
+}
+
+static int msm_fb_bl_update_status(struct backlight_device *pbd)
+{
+	struct msm_fb_data_type *mfd = bl_get_data(pbd);
+	__u32 bl_lvl;
+
+	bl_lvl = pbd->props.brightness;
+	bl_lvl = mfd->fbi->bl_curve[bl_lvl];
+	msm_fb_set_backlight(mfd, bl_lvl);
+	return 0;
+}
+
+static struct backlight_ops msm_fb_bl_ops = {
+	.get_brightness = msm_fb_bl_get_brightness,
+	.update_status = msm_fb_bl_update_status,
+};
+
+void msm_fb_config_backlight(struct msm_fb_data_type *mfd)
+{
+	struct msm_fb_panel_data *pdata;
+	struct backlight_device *pbd;
+	struct fb_info *fbi;
+	char name[16];
+	struct backlight_properties props;
+
+	fbi = mfd->fbi;
+	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+
+	if ((pdata) && (pdata->set_backlight)) {
+		snprintf(name, sizeof(name), "msmfb_bl%d", mfd->index);
+		props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+		props.brightness = FB_BACKLIGHT_LEVELS - 1;
+		pbd =
+		    backlight_device_register(name, fbi->dev, mfd,
+					      &msm_fb_bl_ops, &props);
+		if (!IS_ERR(pbd)) {
+			fbi->bl_dev = pbd;
+			fb_bl_default_curve(fbi,
+					    0,
+					    mfd->panel_info.bl_min,
+					    mfd->panel_info.bl_max);
+		} else {
+			fbi->bl_dev = NULL;
+			printk(KERN_ERR "msm_fb: backlight_device_register failed!\n");
+		}
+	}
+}
diff --git a/drivers/video/msm/msm_fb_def.h b/drivers/video/msm/msm_fb_def.h
new file mode 100644
index 0000000..1c1f392
--- /dev/null
+++ b/drivers/video/msm/msm_fb_def.h
@@ -0,0 +1,204 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_FB_DEF_H
+#define MSM_FB_DEF_H
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/console.h>
+#include <linux/android_pmem.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include "linux/proc_fs.h"
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+
+
+typedef s64 int64;
+typedef s32 int32;
+typedef s16 int16;
+typedef s8 int8;
+
+typedef u64 uint64;
+typedef u32 uint32;
+typedef u16 uint16;
+typedef u8 uint8;
+
+typedef s32 int4;
+typedef s16 int2;
+typedef s8 int1;
+
+typedef u32 uint4;
+typedef u16 uint2;
+typedef u8 uint1;
+
+typedef u32 dword;
+typedef u16 word;
+typedef u8 byte;
+
+typedef unsigned int boolean;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define MSM_FB_ENABLE_DBGFS
+#define FEATURE_MDDI
+
+#if defined(CONFIG_FB_MSM_DEFAULT_DEPTH_RGB565)
+#define MSMFB_DEFAULT_TYPE MDP_RGB_565
+#elif defined(CONFIG_FB_MSM_DEFAULT_DEPTH_ARGB8888)
+#define MSMFB_DEFAULT_TYPE MDP_ARGB_8888
+#elif defined(CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888)
+#define MSMFB_DEFAULT_TYPE MDP_RGBA_8888
+#else
+#define MSMFB_DEFAULT_TYPE MDP_RGB_565
+#endif
+
+#define outp32(addr, val) writel(val, addr)
+#define outp16(addr, val) writew(val, addr)
+#define outp8(addr, val) writeb(val, addr)
+#define outp(addr, val) outp32(addr, val)
+
+#ifndef MAX
+#define  MAX( x, y ) (((x) > (y)) ? (x) : (y))
+#endif
+
+#ifndef MIN
+#define  MIN( x, y ) (((x) < (y)) ? (x) : (y))
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+#define inp32(addr) readl(addr)
+#define inp16(addr) readw(addr)
+#define inp8(addr) readb(addr)
+#define inp(addr) inp32(addr)
+
+#define inpw(port)             readw(port)
+#define outpw(port, val)       writew(val, port)
+#define inpdw(port)            readl(port)
+#define outpdw(port, val)      writel(val, port)
+
+
+#define clk_busy_wait(x) msleep_interruptible((x)/1000)
+
+#define memory_barrier()
+
+#define assert(expr) \
+	if(!(expr)) { \
+		printk(KERN_ERR "msm_fb: assertion failed! %s,%s,%s,line=%d\n",\
+			#expr, __FILE__, __func__, __LINE__); \
+	}
+
+#define ASSERT(x)   assert(x)
+
+#define DISP_EBI2_LOCAL_DEFINE
+#ifdef DISP_EBI2_LOCAL_DEFINE
+#define LCD_PRIM_BASE_PHYS 0x98000000
+#define LCD_SECD_BASE_PHYS 0x9c000000
+#define EBI2_PRIM_LCD_RS_PIN 0x20000
+#define EBI2_SECD_LCD_RS_PIN 0x20000
+
+#define EBI2_PRIM_LCD_CLR 0xC0
+#define EBI2_PRIM_LCD_SEL 0x40
+
+#define EBI2_SECD_LCD_CLR 0x300
+#define EBI2_SECD_LCD_SEL 0x100
+#endif
+
+extern u32 msm_fb_msg_level;
+
+/*
+ * Message printing priorities:
+ * LEVEL 0 KERN_EMERG (highest priority)
+ * LEVEL 1 KERN_ALERT
+ * LEVEL 2 KERN_CRIT
+ * LEVEL 3 KERN_ERR
+ * LEVEL 4 KERN_WARNING
+ * LEVEL 5 KERN_NOTICE
+ * LEVEL 6 KERN_INFO
+ * LEVEL 7 KERN_DEBUG (Lowest priority)
+ */
+#define MSM_FB_EMERG(msg, ...)    \
+	if (msm_fb_msg_level > 0)  \
+		printk(KERN_EMERG msg, ## __VA_ARGS__);
+#define MSM_FB_ALERT(msg, ...)    \
+	if (msm_fb_msg_level > 1)  \
+		printk(KERN_ALERT msg, ## __VA_ARGS__);
+#define MSM_FB_CRIT(msg, ...)    \
+	if (msm_fb_msg_level > 2)  \
+		printk(KERN_CRIT msg, ## __VA_ARGS__);
+#define MSM_FB_ERR(msg, ...)    \
+	if (msm_fb_msg_level > 3)  \
+		printk(KERN_ERR msg, ## __VA_ARGS__);
+#define MSM_FB_WARNING(msg, ...)    \
+	if (msm_fb_msg_level > 4)  \
+		printk(KERN_WARNING msg, ## __VA_ARGS__);
+#define MSM_FB_NOTICE(msg, ...)    \
+	if (msm_fb_msg_level > 5)  \
+		printk(KERN_NOTICE msg, ## __VA_ARGS__);
+#define MSM_FB_INFO(msg, ...)    \
+	if (msm_fb_msg_level > 6)  \
+		printk(KERN_INFO msg, ## __VA_ARGS__);
+#define MSM_FB_DEBUG(msg, ...)    \
+	if (msm_fb_msg_level > 7)  \
+		printk(KERN_DEBUG msg, ## __VA_ARGS__);
+
+#ifdef MSM_FB_C
+unsigned char *msm_mdp_base;
+unsigned char *msm_pmdh_base;
+unsigned char *msm_emdh_base;
+unsigned char *mipi_dsi_base;
+#else
+extern unsigned char *msm_mdp_base;
+extern unsigned char *msm_pmdh_base;
+extern unsigned char *msm_emdh_base;
+extern unsigned char *mipi_dsi_base;
+#endif
+
+#undef ENABLE_MDDI_MULTI_READ_WRITE
+#undef ENABLE_FWD_LINK_SKEW_CALIBRATION
+
+#endif /* MSM_FB_DEF_H */
diff --git a/drivers/video/msm/msm_fb_panel.c b/drivers/video/msm/msm_fb_panel.c
new file mode 100644
index 0000000..84de095
--- /dev/null
+++ b/drivers/video/msm/msm_fb_panel.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+
+#include "msm_fb_panel.h"
+
+int panel_next_on(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_panel_data *pdata;
+	struct msm_fb_panel_data *next_pdata;
+	struct platform_device *next_pdev;
+
+	pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
+
+	if (pdata) {
+		next_pdev = pdata->next;
+		if (next_pdev) {
+			next_pdata =
+			    (struct msm_fb_panel_data *)next_pdev->dev.
+			    platform_data;
+			if ((next_pdata) && (next_pdata->on))
+				ret = next_pdata->on(next_pdev);
+		}
+	}
+
+	return ret;
+}
+
+int panel_next_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_panel_data *pdata;
+	struct msm_fb_panel_data *next_pdata;
+	struct platform_device *next_pdev;
+
+	pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
+
+	if (pdata) {
+		next_pdev = pdata->next;
+		if (next_pdev) {
+			next_pdata =
+			    (struct msm_fb_panel_data *)next_pdev->dev.
+			    platform_data;
+			if ((next_pdata) && (next_pdata->on))
+				ret = next_pdata->off(next_pdev);
+		}
+	}
+
+	return ret;
+}
+
+struct platform_device *msm_fb_device_alloc(struct msm_fb_panel_data *pdata,
+						u32 type, u32 id)
+{
+	struct platform_device *this_dev = NULL;
+	char dev_name[16];
+
+	switch (type) {
+	case EBI2_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "ebi2_lcd");
+		break;
+
+	case MDDI_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "mddi");
+		break;
+
+	case EXT_MDDI_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "mddi_ext");
+		break;
+
+	case TV_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "tvenc");
+		break;
+
+	case HDMI_PANEL:
+	case LCDC_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "lcdc");
+		break;
+
+	case DTV_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "dtv");
+		break;
+
+	case MIPI_VIDEO_PANEL:
+	case MIPI_CMD_PANEL:
+		snprintf(dev_name, sizeof(dev_name), "mipi_dsi");
+		break;
+
+	default:
+		return NULL;
+	}
+
+	if (pdata != NULL)
+		pdata->next = NULL;
+	else
+		return NULL;
+
+	this_dev =
+	    platform_device_alloc(dev_name, ((u32) type << 16) | (u32) id);
+
+	if (this_dev) {
+		if (platform_device_add_data
+		    (this_dev, pdata, sizeof(struct msm_fb_panel_data))) {
+			printk
+			    ("msm_fb_device_alloc: platform_device_add_data failed!\n");
+			platform_device_put(this_dev);
+			return NULL;
+		}
+	}
+
+	return this_dev;
+}
diff --git a/drivers/video/msm/msm_fb_panel.h b/drivers/video/msm/msm_fb_panel.h
new file mode 100644
index 0000000..bbf5a38
--- /dev/null
+++ b/drivers/video/msm/msm_fb_panel.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_FB_PANEL_H
+#define MSM_FB_PANEL_H
+
+#include "msm_fb_def.h"
+
+struct msm_fb_data_type;
+
+typedef void (*msm_fb_vsync_handler_type) (void *arg);
+
+/* panel id type */
+typedef struct panel_id_s {
+	uint16 id;
+	uint16 type;
+} panel_id_type;
+
+/* panel type list */
+#define NO_PANEL		0xffff	/* No Panel */
+#define MDDI_PANEL		1	/* MDDI */
+#define EBI2_PANEL		2	/* EBI2 */
+#define LCDC_PANEL		3	/* internal LCDC type */
+#define EXT_MDDI_PANEL		4	/* Ext.MDDI */
+#define TV_PANEL		5	/* TV */
+#define HDMI_PANEL		6	/* HDMI TV */
+#define DTV_PANEL		7	/* DTV */
+#define MIPI_VIDEO_PANEL	8	/* MIPI */
+#define MIPI_CMD_PANEL		9	/* MIPI */
+
+/* panel class */
+typedef enum {
+	DISPLAY_LCD = 0,	/* lcd = ebi2/mddi */
+	DISPLAY_LCDC,		/* lcdc */
+	DISPLAY_TV,		/* TV Out */
+	DISPLAY_EXT_MDDI,	/* External MDDI */
+} DISP_TARGET;
+
+/* panel device locaiton */
+typedef enum {
+	DISPLAY_1 = 0,		/* attached as first device */
+	DISPLAY_2,		/* attached on second device */
+	MAX_PHYS_TARGET_NUM,
+} DISP_TARGET_PHYS;
+
+/* panel info type */
+struct lcd_panel_info {
+	__u32 vsync_enable;
+	__u32 refx100;
+	__u32 v_back_porch;
+	__u32 v_front_porch;
+	__u32 v_pulse_width;
+	__u32 hw_vsync_mode;
+	__u32 vsync_notifier_period;
+	__u32 rev;
+};
+
+struct lcdc_panel_info {
+	__u32 h_back_porch;
+	__u32 h_front_porch;
+	__u32 h_pulse_width;
+	__u32 v_back_porch;
+	__u32 v_front_porch;
+	__u32 v_pulse_width;
+	__u32 border_clr;
+	__u32 underflow_clr;
+	__u32 hsync_skew;
+};
+
+struct mddi_panel_info {
+	__u32 vdopkt;
+};
+
+/* DSI PHY configuration */
+struct mipi_dsi_phy_ctrl {
+	uint32 regulator[5];
+	uint32 timing[12];
+	uint32 ctrl[4];
+	uint32 strength[4];
+	uint32 pll[21];
+};
+
+struct mipi_panel_info {
+	char mode;		/* video/cmd */
+	char interleave_mode;
+	char crc_check;
+	char ecc_check;
+	char dst_format;	/* shared by video and command */
+	char data_lane0;
+	char data_lane1;
+	char data_lane2;
+	char data_lane3;
+	char dlane_swap;	/* data lane swap */
+	char rgb_swap;
+	char b_sel;
+	char g_sel;
+	char r_sel;
+	char rx_eot_ignore;
+	char tx_eot_append;
+	char t_clk_post; /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+	char t_clk_pre;  /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+	char vc;	/* virtual channel */
+	struct mipi_dsi_phy_ctrl *dsi_phy_db;
+	/* video mode */
+	char pulse_mode_hsa_he;
+	char hfp_power_stop;
+	char hbp_power_stop;
+	char hsa_power_stop;
+	char eof_bllp_power_stop;
+	char bllp_power_stop;
+	char traffic_mode;
+	char frame_rate;
+	/* command mode */
+	char interleave_max;
+	char insert_dcs_cmd;
+	char wr_mem_continue;
+	char wr_mem_start;
+	char te_sel;
+	char stream;	/* 0 or 1 */
+	char mdp_trigger;
+	char dma_trigger;
+	uint32 dsi_pclk_rate;
+	/* Pad width */
+	uint32 xres_pad;
+	/* Pad height */
+	uint32 yres_pad;
+};
+
+struct msm_panel_info {
+	__u32 xres;
+	__u32 yres;
+	__u32 bpp;
+	__u32 mode2_xres;
+	__u32 mode2_yres;
+	__u32 mode2_bpp;
+	__u32 type;
+	__u32 wait_cycle;
+	DISP_TARGET_PHYS pdest;
+	__u32 bl_max;
+	__u32 bl_min;
+	__u32 fb_num;
+	__u32 clk_rate;
+	__u32 clk_min;
+	__u32 clk_max;
+	__u32 frame_count;
+	__u32 is_3d_panel;
+
+
+	struct mddi_panel_info mddi;
+	struct lcd_panel_info lcd;
+	struct lcdc_panel_info lcdc;
+
+	struct mipi_panel_info mipi;
+};
+
+#define MSM_FB_SINGLE_MODE_PANEL(pinfo)		\
+	do {					\
+		(pinfo)->mode2_xres = 0;	\
+		(pinfo)->mode2_yres = 0;	\
+		(pinfo)->mode2_bpp = 0;		\
+	} while (0)
+
+struct msm_fb_panel_data {
+	struct msm_panel_info panel_info;
+	void (*set_rect) (int x, int y, int xres, int yres);
+	void (*set_vsync_notifier) (msm_fb_vsync_handler_type, void *arg);
+	void (*set_backlight) (struct msm_fb_data_type *);
+
+	/* function entry chain */
+	int (*on) (struct platform_device *pdev);
+	int (*off) (struct platform_device *pdev);
+	struct platform_device *next;
+	int (*clk_func) (int enable);
+};
+
+/*===========================================================================
+  FUNCTIONS PROTOTYPES
+============================================================================*/
+struct platform_device *msm_fb_device_alloc(struct msm_fb_panel_data *pdata,
+						u32 type, u32 id);
+int panel_next_on(struct platform_device *pdev);
+int panel_next_off(struct platform_device *pdev);
+
+int lcdc_device_register(struct msm_panel_info *pinfo);
+
+int mddi_toshiba_device_register(struct msm_panel_info *pinfo,
+					u32 channel, u32 panel);
+
+#endif /* MSM_FB_PANEL_H */
diff --git a/drivers/video/msm/tvenc.c b/drivers/video/msm/tvenc.c
new file mode 100644
index 0000000..73e2428
--- /dev/null
+++ b/drivers/video/msm/tvenc.c
@@ -0,0 +1,521 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <mach/msm_reqs.h>
+
+#define TVENC_C
+#include "tvenc.h"
+#include "msm_fb.h"
+#include "mdp4.h"
+#ifdef CONFIG_MSM_NPA_SYSTEM_BUS
+/* NPA Flow ID */
+#define MSM_SYSTEM_BUS_RATE	MSM_AXI_FLOW_MDP_DTV_720P_2BPP
+#else
+/* AXI rate in KHz */
+#define MSM_SYSTEM_BUS_RATE	128000000
+#endif
+
+static int tvenc_probe(struct platform_device *pdev);
+static int tvenc_remove(struct platform_device *pdev);
+
+static int tvenc_off(struct platform_device *pdev);
+static int tvenc_on(struct platform_device *pdev);
+
+static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
+static int pdev_list_cnt;
+
+static struct clk *tvenc_clk;
+static struct clk *tvdac_clk;
+static struct clk *tvenc_pclk;
+static struct clk *mdp_tv_clk;
+#ifdef CONFIG_FB_MSM_MDP40
+static struct clk *tv_src_clk;
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static uint32_t tvenc_bus_scale_handle;
+#endif
+
+static int tvenc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int tvenc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static struct dev_pm_ops tvenc_dev_pm_ops = {
+	.runtime_suspend = tvenc_runtime_suspend,
+	.runtime_resume = tvenc_runtime_resume,
+};
+
+static struct platform_driver tvenc_driver = {
+	.probe = tvenc_probe,
+	.remove = tvenc_remove,
+	.suspend = NULL,
+	.resume = NULL,
+	.shutdown = NULL,
+	.driver = {
+		   .name = "tvenc",
+		   .pm = &tvenc_dev_pm_ops
+		   },
+};
+
+int tvenc_set_encoder_clock(boolean clock_on)
+{
+	int ret = 0;
+	if (clock_on) {
+#ifdef CONFIG_FB_MSM_MDP40
+		/* Consolidated clock used by both HDMI & TV encoder.
+		Clock exists only in MDP4 and not in older versions */
+		ret = clk_set_rate(tv_src_clk, 27000000);
+		if (ret) {
+			pr_err("%s: tvsrc_clk set rate failed! %d\n",
+				__func__, ret);
+			goto tvsrc_err;
+		}
+#endif
+		ret = clk_enable(tvenc_clk);
+		if (ret) {
+			pr_err("%s: tvenc_clk enable failed! %d\n",
+				__func__, ret);
+			goto tvsrc_err;
+		}
+
+		if (!IS_ERR(tvenc_pclk)) {
+			ret = clk_enable(tvenc_pclk);
+			if (ret) {
+				pr_err("%s: tvenc_pclk enable failed! %d\n",
+					__func__, ret);
+				goto tvencp_err;
+			}
+		}
+		return ret;
+	} else {
+		if (!IS_ERR(tvenc_pclk))
+			clk_disable(tvenc_pclk);
+		clk_disable(tvenc_clk);
+		return ret;
+	}
+tvencp_err:
+	clk_disable(tvenc_clk);
+tvsrc_err:
+	return ret;
+}
+
+int tvenc_set_clock(boolean clock_on)
+{
+	int ret = 0;
+	if (clock_on) {
+		if (tvenc_pdata->poll) {
+			ret = tvenc_set_encoder_clock(CLOCK_ON);
+			if (ret) {
+				pr_err("%s: TVenc clock(s) enable failed! %d\n",
+					__func__, ret);
+				goto tvenc_err;
+			}
+		}
+		ret = clk_enable(tvdac_clk);
+		if (ret) {
+			pr_err("%s: tvdac_clk enable failed! %d\n",
+				__func__, ret);
+			goto tvdac_err;
+		}
+		if (!IS_ERR(mdp_tv_clk)) {
+			ret = clk_enable(mdp_tv_clk);
+			if (ret) {
+				pr_err("%s: mdp_tv_clk enable failed! %d\n",
+					__func__, ret);
+				goto mdptv_err;
+			}
+		}
+		return ret;
+	} else {
+		if (!IS_ERR(mdp_tv_clk))
+			clk_disable(mdp_tv_clk);
+		clk_disable(tvdac_clk);
+		if (tvenc_pdata->poll)
+			tvenc_set_encoder_clock(CLOCK_OFF);
+		return ret;
+	}
+
+mdptv_err:
+	clk_disable(tvdac_clk);
+tvdac_err:
+	tvenc_set_encoder_clock(CLOCK_OFF);
+tvenc_err:
+	return ret;
+}
+
+static int tvenc_off(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+	ret = panel_next_off(pdev);
+	if (ret)
+		pr_err("%s: tvout_off failed! %d\n",
+		__func__, ret);
+
+	tvenc_set_clock(CLOCK_OFF);
+
+	if (tvenc_pdata && tvenc_pdata->pm_vid_en)
+		ret = tvenc_pdata->pm_vid_en(0);
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (tvenc_bus_scale_handle > 0)
+		msm_bus_scale_client_update_request(tvenc_bus_scale_handle,
+							0);
+#else
+	if (mfd->ebi1_clk)
+		clk_disable(mfd->ebi1_clk);
+#endif
+
+	if (ret)
+		pr_err("%s: pm_vid_en(off) failed! %d\n",
+		__func__, ret);
+	mdp4_extn_disp = 0;
+	return ret;
+}
+
+static int tvenc_on(struct platform_device *pdev)
+{
+	int ret = 0;
+
+#ifndef CONFIG_MSM_BUS_SCALING
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (tvenc_bus_scale_handle > 0)
+		msm_bus_scale_client_update_request(tvenc_bus_scale_handle,
+							1);
+#else
+	if (mfd->ebi1_clk)
+		clk_enable(mfd->ebi1_clk);
+#endif
+	mdp_set_core_clk(1);
+	mdp4_extn_disp = 1;
+	if (tvenc_pdata && tvenc_pdata->pm_vid_en)
+		ret = tvenc_pdata->pm_vid_en(1);
+	if (ret) {
+		pr_err("%s: pm_vid_en(on) failed! %d\n",
+		__func__, ret);
+		return ret;
+	}
+
+	ret = tvenc_set_clock(CLOCK_ON);
+	if (ret) {
+		pr_err("%s: tvenc_set_clock(CLOCK_ON) failed! %d\n",
+		__func__, ret);
+		tvenc_pdata->pm_vid_en(0);
+		goto error;
+	}
+
+	ret = panel_next_on(pdev);
+	if (ret) {
+		pr_err("%s: tvout_on failed! %d\n",
+		__func__, ret);
+		tvenc_set_clock(CLOCK_OFF);
+		tvenc_pdata->pm_vid_en(0);
+	}
+
+error:
+	return ret;
+
+}
+
+void tvenc_gen_test_pattern(struct msm_fb_data_type *mfd)
+{
+	uint32 reg = 0, i;
+
+	reg = readl(MSM_TV_ENC_CTL);
+	reg |= TVENC_CTL_TEST_PATT_EN;
+
+	for (i = 0; i < 3; i++) {
+		TV_OUT(TV_ENC_CTL, 0);	/* disable TV encoder */
+
+		switch (i) {
+			/*
+			 * TV Encoder - Color Bar Test Pattern
+			 */
+		case 0:
+			reg |= TVENC_CTL_TPG_CLRBAR;
+			break;
+			/*
+			 * TV Encoder - Red Frame Test Pattern
+			 */
+		case 1:
+			reg |= TVENC_CTL_TPG_REDCLR;
+			break;
+			/*
+			 * TV Encoder - Modulated Ramp Test Pattern
+			 */
+		default:
+			reg |= TVENC_CTL_TPG_MODRAMP;
+			break;
+		}
+
+		TV_OUT(TV_ENC_CTL, reg);
+		mdelay(5000);
+
+		switch (i) {
+			/*
+			 * TV Encoder - Color Bar Test Pattern
+			 */
+		case 0:
+			reg &= ~TVENC_CTL_TPG_CLRBAR;
+			break;
+			/*
+			 * TV Encoder - Red Frame Test Pattern
+			 */
+		case 1:
+			reg &= ~TVENC_CTL_TPG_REDCLR;
+			break;
+			/*
+			 * TV Encoder - Modulated Ramp Test Pattern
+			 */
+		default:
+			reg &= ~TVENC_CTL_TPG_MODRAMP;
+			break;
+		}
+	}
+}
+
+static int tvenc_resource_initialized;
+
+static int tvenc_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct platform_device *mdp_dev = NULL;
+	struct msm_fb_panel_data *pdata = NULL;
+	int rc;
+
+	if (pdev->id == 0) {
+		tvenc_base = ioremap(pdev->resource[0].start,
+					pdev->resource[0].end -
+					pdev->resource[0].start + 1);
+		if (!tvenc_base) {
+			pr_err("tvenc_base ioremap failed!\n");
+			return -ENOMEM;
+		}
+		tvenc_pdata = pdev->dev.platform_data;
+		tvenc_resource_initialized = 1;
+		return 0;
+	}
+
+	if (!tvenc_resource_initialized)
+		return -EPERM;
+
+	mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
+		return -ENOMEM;
+
+	if (tvenc_base == NULL)
+		return -ENOMEM;
+
+	mdp_dev = platform_device_alloc("mdp", pdev->id);
+	if (!mdp_dev)
+		return -ENOMEM;
+
+	/*
+	 * link to the latest pdev
+	 */
+	mfd->pdev = mdp_dev;
+	mfd->dest = DISPLAY_TV;
+
+	/*
+	 * alloc panel device data
+	 */
+	if (platform_device_add_data
+	    (mdp_dev, pdev->dev.platform_data,
+	     sizeof(struct msm_fb_panel_data))) {
+		pr_err("tvenc_probe: platform_device_add_data failed!\n");
+		platform_device_put(mdp_dev);
+		return -ENOMEM;
+	}
+	/*
+	 * data chain
+	 */
+	pdata = mdp_dev->dev.platform_data;
+	pdata->on = tvenc_on;
+	pdata->off = tvenc_off;
+	pdata->next = pdev;
+
+	/*
+	 * get/set panel specific fb info
+	 */
+	mfd->panel_info = pdata->panel_info;
+#ifdef CONFIG_FB_MSM_MDP40
+	mfd->fb_imgType = MDP_RGB_565;  /* base layer */
+#else
+	mfd->fb_imgType = MDP_YCRYCB_H2V1;
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (!tvenc_bus_scale_handle && tvenc_pdata &&
+		tvenc_pdata->bus_scale_table) {
+		tvenc_bus_scale_handle =
+			msm_bus_scale_register_client(
+				tvenc_pdata->bus_scale_table);
+		if (!tvenc_bus_scale_handle) {
+			printk(KERN_ERR "%s not able to get bus scale\n",
+				__func__);
+		}
+	}
+#else
+	mfd->ebi1_clk = clk_get(NULL, "ebi1_tv_clk");
+	if (IS_ERR(mfd->ebi1_clk)) {
+		rc = PTR_ERR(mfd->ebi1_clk);
+		goto tvenc_probe_err;
+	}
+	clk_set_rate(mfd->ebi1_clk, MSM_SYSTEM_BUS_RATE);
+#endif
+
+	/*
+	 * set driver data
+	 */
+	platform_set_drvdata(mdp_dev, mfd);
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = platform_device_add(mdp_dev);
+	if (rc)
+		goto tvenc_probe_err;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+
+
+	pdev_list[pdev_list_cnt++] = pdev;
+
+	return 0;
+
+tvenc_probe_err:
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (tvenc_pdata && tvenc_pdata->bus_scale_table &&
+		tvenc_bus_scale_handle > 0) {
+		msm_bus_scale_unregister_client(tvenc_bus_scale_handle);
+		tvenc_bus_scale_handle = 0;
+	}
+#endif
+	platform_device_put(mdp_dev);
+	return rc;
+}
+
+static int tvenc_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (tvenc_pdata && tvenc_pdata->bus_scale_table &&
+		tvenc_bus_scale_handle > 0) {
+		msm_bus_scale_unregister_client(tvenc_bus_scale_handle);
+		tvenc_bus_scale_handle = 0;
+	}
+#else
+	clk_put(mfd->ebi1_clk);
+#endif
+
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+static int tvenc_register_driver(void)
+{
+	return platform_driver_register(&tvenc_driver);
+}
+
+static int __init tvenc_driver_init(void)
+{
+	int ret;
+	tvenc_clk = clk_get(NULL, "tv_enc_clk");
+	tvdac_clk = clk_get(NULL, "tv_dac_clk");
+	tvenc_pclk = clk_get(NULL, "tv_enc_pclk");
+	mdp_tv_clk = clk_get(NULL, "mdp_tv_clk");
+
+#ifdef CONFIG_FB_MSM_MDP40
+	tv_src_clk = clk_get(NULL, "tv_src_clk");
+	if (IS_ERR(tv_src_clk))
+		tv_src_clk = tvenc_clk; /* Fallback to slave */
+#endif
+
+	if (IS_ERR(tvenc_clk)) {
+		pr_err("%s: error: can't get tvenc_clk!\n", __func__);
+		return PTR_ERR(tvenc_clk);
+	}
+
+	if (IS_ERR(tvdac_clk)) {
+		pr_err("%s: error: can't get tvdac_clk!\n", __func__);
+		return PTR_ERR(tvdac_clk);
+	}
+
+	if (IS_ERR(tvenc_pclk)) {
+		ret = PTR_ERR(tvenc_pclk);
+		if (-ENOENT == ret)
+			pr_info("%s: tvenc_pclk does not exist!\n", __func__);
+		else {
+			pr_err("%s: error: can't get tvenc_pclk!\n", __func__);
+			return ret;
+		}
+	}
+
+	if (IS_ERR(mdp_tv_clk)) {
+		ret = PTR_ERR(mdp_tv_clk);
+		if (-ENOENT == ret)
+			pr_info("%s: mdp_tv_clk does not exist!\n", __func__);
+		else {
+			pr_err("%s: error: can't get mdp_tv_clk!\n", __func__);
+			return ret;
+		}
+	}
+
+	return tvenc_register_driver();
+}
+
+module_init(tvenc_driver_init);
diff --git a/drivers/video/msm/tvenc.h b/drivers/video/msm/tvenc.h
new file mode 100644
index 0000000..c64c160
--- /dev/null
+++ b/drivers/video/msm/tvenc.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef TVENC_H
+#define TVENC_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+
+#include <mach/hardware.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include "msm_fb_panel.h"
+
+#define NTSC_M		0 /* North America, Korea */
+#define NTSC_J		1 /* Japan */
+#define PAL_BDGHIN	2 /* Non-argentina PAL-N */
+#define PAL_M		3 /* PAL-M */
+#define PAL_N		4 /* Argentina PAL-N */
+
+#define CLOCK_OFF	0
+#define CLOCK_ON	1
+
+/* 3.57954545 Mhz */
+#define TVENC_CTL_TV_MODE_NTSC_M_PAL60  0
+/* 3.57961149 Mhz */
+#define TVENC_CTL_TV_MODE_PAL_M			BIT(0)
+/*non-Argintina = 4.3361875 Mhz */
+#define TVENC_CTL_TV_MODE_PAL_BDGHIN		BIT(1)
+/*Argentina = 3.582055625 Mhz */
+#define TVENC_CTL_TV_MODE_PAL_N			(BIT(1)|BIT(0))
+
+#define TVENC_CTL_ENC_EN			BIT(2)
+#define TVENC_CTL_CC_EN				BIT(3)
+#define TVENC_CTL_CGMS_EN			BIT(4)
+#define TVENC_CTL_MACRO_EN			BIT(5)
+#define TVENC_CTL_Y_FILTER_W_NOTCH		BIT(6)
+#define TVENC_CTL_Y_FILTER_WO_NOTCH		0
+#define TVENC_CTL_Y_FILTER_EN			BIT(7)
+#define TVENC_CTL_CR_FILTER_EN			BIT(8)
+#define TVENC_CTL_CB_FILTER_EN			BIT(9)
+#define TVENC_CTL_SINX_FILTER_EN		BIT(10)
+#define TVENC_CTL_TEST_PATT_EN			BIT(11)
+#define TVENC_CTL_OUTPUT_INV			BIT(12)
+#define TVENC_CTL_PAL60_MODE			BIT(13)
+#define TVENC_CTL_NTSCJ_MODE			BIT(14)
+#define TVENC_CTL_S_VIDEO_EN			BIT(19)
+
+
+#define TVENC_CTL_TPG_CLRBAR			0
+#define TVENC_CTL_TPG_MODRAMP			BIT(15)
+#define TVENC_CTL_TPG_REDCLR			BIT(16)
+#define TVENC_CTL_TPG_NTSC_CBAR			(BIT(16)|BIT(15))
+#define TVENC_CTL_TPG_BLACK			BIT(17)
+#define TVENC_CTL_TPG_WHITE100			(BIT(17)|BIT(15))
+#define TVENC_CTL_TPG_YELLOW75			(BIT(17)|BIT(16))
+#define TVENC_CTL_TPG_CYAN75			(BIT(17)|BIT(16)|BIT(15))
+#define TVENC_CTL_TPG_GREEN75			BIT(18)
+#define TVENC_CTL_TPG_MAGENTA75			(BIT(18)|BIT(15))
+#define TVENC_CTL_TPG_RED75			(BIT(18)|BIT(16))
+#define TVENC_CTL_TPG_BLUE75			(BIT(18)|BIT(16)|BIT(15))
+#define TVENC_CTL_TPG_WHITE75			(BIT(18)|BIT(17))
+#define TVENC_CTL_TPG_WHITE_TRSTN		(BIT(18)|BIT(17)|BIT(15))
+
+#define TVENC_LOAD_DETECT_EN			BIT(8)
+
+#ifdef TVENC_C
+void *tvenc_base;
+struct tvenc_platform_data *tvenc_pdata;
+#else
+extern void *tvenc_base;
+extern struct tvenc_platform_data *tvenc_pdata;
+#endif
+
+#define TV_OUT(reg, v)		writel(v, tvenc_base + MSM_##reg)
+#define TV_IN(reg)		readl(tvenc_base + MSM_##reg)
+
+#define MSM_TV_ENC_CTL				0x00
+#define MSM_TV_LEVEL				0x04
+#define MSM_TV_GAIN				0x08
+#define MSM_TV_OFFSET				0x0c
+#define MSM_TV_CGMS				0x10
+#define MSM_TV_SYNC_1				0x14
+#define MSM_TV_SYNC_2				0x18
+#define MSM_TV_SYNC_3				0x1c
+#define MSM_TV_SYNC_4				0x20
+#define MSM_TV_SYNC_5				0x24
+#define MSM_TV_SYNC_6				0x28
+#define MSM_TV_SYNC_7				0x2c
+#define MSM_TV_BURST_V1				0x30
+#define MSM_TV_BURST_V2				0x34
+#define MSM_TV_BURST_V3				0x38
+#define MSM_TV_BURST_V4				0x3c
+#define MSM_TV_BURST_H				0x40
+#define MSM_TV_SOL_REQ_ODD			0x44
+#define MSM_TV_SOL_REQ_EVEN			0x48
+#define MSM_TV_DAC_CTL				0x4c
+#define MSM_TV_TEST_MUX				0x50
+#define MSM_TV_TEST_MODE			0x54
+#define MSM_TV_TEST_MISR_RESET			0x58
+#define MSM_TV_TEST_EXPORT_MISR			0x5c
+#define MSM_TV_TEST_MISR_CURR_VAL		0x60
+#define MSM_TV_TEST_SOF_CFG			0x64
+#define MSM_TV_DAC_INTF				0x100
+
+#define MSM_TV_INTR_ENABLE			0x200
+#define MSM_TV_INTR_STATUS			0x204
+#define MSM_TV_INTR_CLEAR			0x208
+
+int tvenc_set_encoder_clock(boolean clock_on);
+int tvenc_set_clock(boolean clock_on);
+#endif /* TVENC_H */
diff --git a/drivers/video/msm/tvout_msm.c b/drivers/video/msm/tvout_msm.c
new file mode 100644
index 0000000..6912961
--- /dev/null
+++ b/drivers/video/msm/tvout_msm.c
@@ -0,0 +1,648 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include "msm_fb.h"
+#include "tvenc.h"
+#include "external_common.h"
+
+#define TVOUT_HPD_DUTY_CYCLE 3000
+
+#define TV_DIMENSION_MAX_WIDTH		720
+#define TV_DIMENSION_MAX_HEIGHT		576
+
+struct tvout_msm_state_type {
+	struct external_common_state_type common;
+	struct platform_device *pdev;
+	struct timer_list hpd_state_timer;
+	struct timer_list hpd_work_timer;
+	struct work_struct hpd_work;
+	uint32 hpd_int_status;
+	uint32 prev_hpd_int_status;
+	uint32 five_retry;
+	int irq;
+	uint16 y_res;
+	boolean hpd_initialized;
+	boolean disp_powered_up;
+#ifdef CONFIG_SUSPEND
+	boolean pm_suspended;
+#endif
+
+};
+
+static struct tvout_msm_state_type *tvout_msm_state;
+static DEFINE_MUTEX(tvout_msm_state_mutex);
+
+static int tvout_off(struct platform_device *pdev);
+static int tvout_on(struct platform_device *pdev);
+static void tvout_check_status(void);
+
+static void tvout_msm_turn_on(boolean power_on)
+{
+	uint32 reg_val = 0;
+	reg_val = TV_IN(TV_ENC_CTL);
+	if (power_on) {
+		DEV_DBG("%s: TV Encoder turned on\n", __func__);
+		reg_val |= TVENC_CTL_ENC_EN;
+	} else {
+		DEV_DBG("%s: TV Encoder turned off\n", __func__);
+		reg_val = 0;
+	}
+	/* Enable TV Encoder*/
+	TV_OUT(TV_ENC_CTL, reg_val);
+}
+
+static void tvout_check_status()
+{
+	tvout_msm_state->hpd_int_status &= 0x05;
+	/* hpd_int_status could either be 0x05 or 0x04 for a cable
+		plug-out event when cable detect is driven by polling. */
+	if ((((tvout_msm_state->hpd_int_status == 0x05) ||
+		(tvout_msm_state->hpd_int_status == 0x04)) &&
+		(tvout_msm_state->prev_hpd_int_status == BIT(2))) ||
+		((tvout_msm_state->hpd_int_status == 0x01) &&
+		(tvout_msm_state->prev_hpd_int_status == BIT(0)))) {
+		DEV_DBG("%s: cable event sent already!", __func__);
+		return;
+	}
+
+	if (tvout_msm_state->hpd_int_status & BIT(2)) {
+		DEV_DBG("%s: cable plug-out\n", __func__);
+		mutex_lock(&external_common_state_hpd_mutex);
+		external_common_state->hpd_state = FALSE;
+		mutex_unlock(&external_common_state_hpd_mutex);
+		kobject_uevent(external_common_state->uevent_kobj,
+				KOBJ_OFFLINE);
+		tvout_msm_state->prev_hpd_int_status = BIT(2);
+	} else if (tvout_msm_state->hpd_int_status & BIT(0)) {
+		DEV_DBG("%s: cable plug-in\n", __func__);
+		mutex_lock(&external_common_state_hpd_mutex);
+		external_common_state->hpd_state = TRUE;
+		mutex_unlock(&external_common_state_hpd_mutex);
+		kobject_uevent(external_common_state->uevent_kobj,
+				KOBJ_ONLINE);
+		tvout_msm_state->prev_hpd_int_status = BIT(0);
+	}
+}
+
+/* ISR for TV out cable detect */
+static irqreturn_t tvout_msm_isr(int irq, void *dev_id)
+{
+	tvout_msm_state->hpd_int_status = TV_IN(TV_INTR_STATUS);
+	TV_OUT(TV_INTR_CLEAR, tvout_msm_state->hpd_int_status);
+	DEV_DBG("%s: ISR: 0x%02x\n", __func__,
+		tvout_msm_state->hpd_int_status & 0x05);
+
+	if (tvenc_pdata->poll)
+		if (!tvout_msm_state || !tvout_msm_state->disp_powered_up) {
+			DEV_DBG("%s: ISR ignored, display not yet powered on\n",
+				__func__);
+			return IRQ_HANDLED;
+		}
+	if (tvout_msm_state->hpd_int_status & BIT(0) ||
+		tvout_msm_state->hpd_int_status & BIT(2)) {
+		/* Use .75sec to debounce the interrupt */
+		mod_timer(&tvout_msm_state->hpd_state_timer, jiffies
+			+ msecs_to_jiffies(750));
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* Interrupt debounce timer */
+static void tvout_msm_hpd_state_timer(unsigned long data)
+{
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&tvout_msm_state_mutex);
+	if (tvout_msm_state->pm_suspended) {
+		mutex_unlock(&tvout_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return;
+	}
+	mutex_unlock(&tvout_msm_state_mutex);
+#endif
+
+	if (tvenc_pdata->poll)
+		if (!tvout_msm_state || !tvout_msm_state->disp_powered_up) {
+			DEV_DBG("%s: ignored, display powered off\n", __func__);
+			return;
+		}
+
+	/* TV_INTR_STATUS[0x204]
+		When a TV_ENC interrupt occurs, then reading this register will
+		indicate what caused the interrupt since that each bit indicates
+		the source of the interrupt that had happened. If multiple
+		interrupt sources had happened, then multiple bits of this
+		register will be set
+		Bit 0 : Load present on Video1
+		Bit 1 : Load present on Video2
+		Bit 2 : Load removed on Video1
+		Bit 3 : Load removed on Video2
+	*/
+
+	/* Locking interrupt status is not required because
+	last status read after debouncing is used */
+	if ((tvout_msm_state->hpd_int_status & 0x05) == 0x05) {
+		/* SW-workaround :If the status read after debouncing is
+		0x05(indicating both load present & load removed- which can't
+		happen in reality), force an update. If status remains 0x05
+		after retry, it's a cable unplug event */
+		if (++tvout_msm_state->five_retry < 2) {
+			uint32 reg;
+			DEV_DBG("tvout: Timer: 0x05\n");
+			TV_OUT(TV_INTR_CLEAR, 0xf);
+			reg = TV_IN(TV_DAC_INTF);
+			TV_OUT(TV_DAC_INTF, reg & ~TVENC_LOAD_DETECT_EN);
+			TV_OUT(TV_INTR_CLEAR, 0xf);
+			reg = TV_IN(TV_DAC_INTF);
+			TV_OUT(TV_DAC_INTF, reg | TVENC_LOAD_DETECT_EN);
+			return;
+		}
+	}
+	tvout_msm_state->five_retry = 0;
+	tvout_check_status();
+}
+
+static void tvout_msm_hpd_work(struct work_struct *work)
+{
+	uint32 reg;
+
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&tvout_msm_state_mutex);
+	if (tvout_msm_state->pm_suspended) {
+		mutex_unlock(&tvout_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return;
+	}
+	mutex_unlock(&tvout_msm_state_mutex);
+#endif
+
+	/* Enable power lines & clocks */
+	tvenc_pdata->pm_vid_en(1);
+	tvenc_set_clock(CLOCK_ON);
+
+	/* Enable encoder to get a stable interrupt */
+	reg = TV_IN(TV_ENC_CTL);
+	TV_OUT(TV_ENC_CTL, reg | TVENC_CTL_ENC_EN);
+
+	/* SW- workaround to update status register */
+	reg = TV_IN(TV_DAC_INTF);
+	TV_OUT(TV_DAC_INTF, reg & ~TVENC_LOAD_DETECT_EN);
+	TV_OUT(TV_INTR_CLEAR, 0xf);
+	reg = TV_IN(TV_DAC_INTF);
+	TV_OUT(TV_DAC_INTF, reg | TVENC_LOAD_DETECT_EN);
+
+	tvout_msm_state->hpd_int_status = TV_IN(TV_INTR_STATUS);
+
+	/* Disable TV encoder */
+	reg = TV_IN(TV_ENC_CTL);
+	TV_OUT(TV_ENC_CTL, reg & ~TVENC_CTL_ENC_EN);
+
+	/*Disable power lines & clocks */
+	tvenc_set_clock(CLOCK_OFF);
+	tvenc_pdata->pm_vid_en(0);
+
+	DEV_DBG("%s: ISR: 0x%02x\n", __func__,
+		tvout_msm_state->hpd_int_status & 0x05);
+
+	mod_timer(&tvout_msm_state->hpd_work_timer, jiffies
+		+ msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE));
+
+	tvout_check_status();
+}
+
+static void tvout_msm_hpd_work_timer(unsigned long data)
+{
+	schedule_work(&tvout_msm_state->hpd_work);
+}
+
+static int tvout_on(struct platform_device *pdev)
+{
+	uint32 reg = 0;
+	struct fb_var_screeninfo *var;
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+#ifdef CONFIG_SUSPEND
+	mutex_lock(&tvout_msm_state_mutex);
+	if (tvout_msm_state->pm_suspended) {
+		mutex_unlock(&tvout_msm_state_mutex);
+		DEV_WARN("%s: ignored, pm_suspended\n", __func__);
+		return -ENODEV;
+	}
+	mutex_unlock(&tvout_msm_state_mutex);
+#endif
+
+	var = &mfd->fbi->var;
+	if (var->reserved[3] >= NTSC_M && var->reserved[3] <= PAL_N)
+		external_common_state->video_resolution = var->reserved[3];
+
+	tvout_msm_state->pdev = pdev;
+	if (del_timer(&tvout_msm_state->hpd_work_timer))
+		DEV_DBG("%s: work timer stopped\n", __func__);
+
+	TV_OUT(TV_ENC_CTL, 0);	/* disable TV encoder */
+
+	switch (external_common_state->video_resolution) {
+	case NTSC_M:
+	case NTSC_J:
+		TV_OUT(TV_CGMS, 0x0);
+		/*  NTSC Timing */
+		TV_OUT(TV_SYNC_1, 0x0020009e);
+		TV_OUT(TV_SYNC_2, 0x011306B4);
+		TV_OUT(TV_SYNC_3, 0x0006000C);
+		TV_OUT(TV_SYNC_4, 0x0028020D);
+		TV_OUT(TV_SYNC_5, 0x005E02FB);
+		TV_OUT(TV_SYNC_6, 0x0006000C);
+		TV_OUT(TV_SYNC_7, 0x00000012);
+		TV_OUT(TV_BURST_V1, 0x0013020D);
+		TV_OUT(TV_BURST_V2, 0x0014020C);
+		TV_OUT(TV_BURST_V3, 0x0013020D);
+		TV_OUT(TV_BURST_V4, 0x0014020C);
+		TV_OUT(TV_BURST_H, 0x00AE00F2);
+		TV_OUT(TV_SOL_REQ_ODD, 0x00280208);
+		TV_OUT(TV_SOL_REQ_EVEN, 0x00290209);
+
+		reg |= TVENC_CTL_TV_MODE_NTSC_M_PAL60;
+
+		if (external_common_state->video_resolution == NTSC_M) {
+			/* Cr gain 11, Cb gain C6, y_gain 97 */
+			TV_OUT(TV_GAIN, 0x0081B697);
+		} else {
+			/* Cr gain 11, Cb gain C6, y_gain 97 */
+			TV_OUT(TV_GAIN, 0x008bc4a3);
+			reg |= TVENC_CTL_NTSCJ_MODE;
+		}
+
+		var->yres = 480;
+		break;
+	case PAL_BDGHIN:
+	case PAL_N:
+		/*  PAL Timing */
+		TV_OUT(TV_SYNC_1, 0x00180097);
+		TV_OUT(TV_SYNC_3, 0x0005000a);
+		TV_OUT(TV_SYNC_4, 0x00320271);
+		TV_OUT(TV_SYNC_5, 0x005602f9);
+		TV_OUT(TV_SYNC_6, 0x0005000a);
+		TV_OUT(TV_SYNC_7, 0x0000000f);
+		TV_OUT(TV_BURST_V1, 0x0012026e);
+		TV_OUT(TV_BURST_V2, 0x0011026d);
+		TV_OUT(TV_BURST_V3, 0x00100270);
+		TV_OUT(TV_BURST_V4, 0x0013026f);
+		TV_OUT(TV_SOL_REQ_ODD, 0x0030026e);
+		TV_OUT(TV_SOL_REQ_EVEN, 0x0031026f);
+
+		if (external_common_state->video_resolution == PAL_BDGHIN) {
+			/* Cr gain 11, Cb gain C6, y_gain 97 */
+			TV_OUT(TV_GAIN, 0x0088c1a0);
+			TV_OUT(TV_CGMS, 0x00012345);
+			TV_OUT(TV_SYNC_2, 0x011f06c0);
+			TV_OUT(TV_BURST_H, 0x00af00ea);
+			reg |= TVENC_CTL_TV_MODE_PAL_BDGHIN;
+		} else {
+			/* Cr gain 11, Cb gain C6, y_gain 97 */
+			TV_OUT(TV_GAIN, 0x0081b697);
+			TV_OUT(TV_CGMS, 0x000af317);
+			TV_OUT(TV_SYNC_2, 0x12006c0);
+			TV_OUT(TV_BURST_H, 0x00af00fa);
+			reg |= TVENC_CTL_TV_MODE_PAL_N;
+		}
+		var->yres = 576;
+		break;
+	case PAL_M:
+		/* Cr gain 11, Cb gain C6, y_gain 97 */
+		TV_OUT(TV_GAIN, 0x0081b697);
+		TV_OUT(TV_CGMS, 0x000af317);
+		TV_OUT(TV_TEST_MUX, 0x000001c3);
+		TV_OUT(TV_TEST_MODE, 0x00000002);
+		/*  PAL Timing */
+		TV_OUT(TV_SYNC_1, 0x0020009e);
+		TV_OUT(TV_SYNC_2, 0x011306b4);
+		TV_OUT(TV_SYNC_3, 0x0006000c);
+		TV_OUT(TV_SYNC_4, 0x0028020D);
+		TV_OUT(TV_SYNC_5, 0x005e02fb);
+		TV_OUT(TV_SYNC_6, 0x0006000c);
+		TV_OUT(TV_SYNC_7, 0x00000012);
+		TV_OUT(TV_BURST_V1, 0x0012020b);
+		TV_OUT(TV_BURST_V2, 0x0016020c);
+		TV_OUT(TV_BURST_V3, 0x00150209);
+		TV_OUT(TV_BURST_V4, 0x0013020c);
+		TV_OUT(TV_BURST_H, 0x00bf010b);
+		TV_OUT(TV_SOL_REQ_ODD, 0x00280208);
+		TV_OUT(TV_SOL_REQ_EVEN, 0x00290209);
+
+		reg |= TVENC_CTL_TV_MODE_PAL_M;
+		var->yres = 480;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	reg |= TVENC_CTL_Y_FILTER_EN | TVENC_CTL_CR_FILTER_EN |
+		TVENC_CTL_CB_FILTER_EN | TVENC_CTL_SINX_FILTER_EN;
+
+	/* DC offset to 0. */
+	TV_OUT(TV_LEVEL, 0x00000000);
+	TV_OUT(TV_OFFSET, 0x008080f0);
+
+#ifdef CONFIG_FB_MSM_TVOUT_SVIDEO
+	reg |= TVENC_CTL_S_VIDEO_EN;
+#endif
+#if defined(CONFIG_FB_MSM_MDP31)
+	TV_OUT(TV_DAC_INTF, 0x29);
+#endif
+	TV_OUT(TV_ENC_CTL, reg);
+
+	if (!tvout_msm_state->hpd_initialized) {
+		tvout_msm_state->hpd_initialized = TRUE;
+		/* Load detect enable */
+		reg = TV_IN(TV_DAC_INTF);
+		reg |= TVENC_LOAD_DETECT_EN;
+		TV_OUT(TV_DAC_INTF, reg);
+	}
+
+	tvout_msm_state->disp_powered_up = TRUE;
+	tvout_msm_turn_on(TRUE);
+
+	if (tvenc_pdata->poll) {
+		/* Enable Load present & removal interrupts for Video1 */
+		TV_OUT(TV_INTR_ENABLE, 0x5);
+
+		/* Enable interrupts when display is on */
+		enable_irq(tvout_msm_state->irq);
+	}
+	return 0;
+}
+
+static int tvout_off(struct platform_device *pdev)
+{
+	/* Disable TV encoder irqs when display is off */
+	if (tvenc_pdata->poll)
+		disable_irq(tvout_msm_state->irq);
+	tvout_msm_turn_on(FALSE);
+	tvout_msm_state->hpd_initialized = FALSE;
+	tvout_msm_state->disp_powered_up = FALSE;
+	if (tvenc_pdata->poll) {
+		mod_timer(&tvout_msm_state->hpd_work_timer, jiffies
+			+ msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE));
+	}
+	return 0;
+}
+
+static int __devinit tvout_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32 reg;
+	struct platform_device *fb_dev;
+
+#ifdef CONFIG_FB_MSM_TVOUT_NTSC_M
+	external_common_state->video_resolution = NTSC_M;
+#elif defined CONFIG_FB_MSM_TVOUT_NTSC_J
+	external_common_state->video_resolution = NTSC_J;
+#elif defined CONFIG_FB_MSM_TVOUT_PAL_M
+	external_common_state->video_resolution = PAL_M;
+#elif defined CONFIG_FB_MSM_TVOUT_PAL_N
+	external_common_state->video_resolution = PAL_N;
+#elif defined CONFIG_FB_MSM_TVOUT_PAL_BDGHIN
+	external_common_state->video_resolution = PAL_BDGHIN;
+#endif
+	external_common_state->dev = &pdev->dev;
+	if (pdev->id == 0) {
+		struct resource *res;
+
+		#define GET_RES(name, mode) do {			\
+			res = platform_get_resource_byname(pdev, mode, name); \
+			if (!res) {					\
+				DEV_DBG("'" name "' resource not found\n"); \
+				rc = -ENODEV;				\
+				goto error;				\
+			}						\
+		} while (0)
+
+		#define GET_IRQ(var, name) do {				\
+			GET_RES(name, IORESOURCE_IRQ);			\
+			var = res->start;				\
+		} while (0)
+
+		GET_IRQ(tvout_msm_state->irq, "tvout_device_irq");
+		#undef GET_IRQ
+		#undef GET_RES
+		return 0;
+	}
+
+	DEV_DBG("%s: tvout_msm_state->irq : %d",
+			__func__, tvout_msm_state->irq);
+
+	rc = request_irq(tvout_msm_state->irq, &tvout_msm_isr,
+		IRQF_TRIGGER_HIGH, "tvout_msm_isr", NULL);
+
+	if (rc) {
+		DEV_DBG("Init FAILED: IRQ request, rc=%d\n", rc);
+		goto error;
+	}
+	disable_irq(tvout_msm_state->irq);
+
+	init_timer(&tvout_msm_state->hpd_state_timer);
+	tvout_msm_state->hpd_state_timer.function =
+		tvout_msm_hpd_state_timer;
+	tvout_msm_state->hpd_state_timer.data = (uint32)NULL;
+	tvout_msm_state->hpd_state_timer.expires = jiffies
+						+ msecs_to_jiffies(1000);
+
+	if (tvenc_pdata->poll) {
+		init_timer(&tvout_msm_state->hpd_work_timer);
+		tvout_msm_state->hpd_work_timer.function =
+			tvout_msm_hpd_work_timer;
+		tvout_msm_state->hpd_work_timer.data = (uint32)NULL;
+		tvout_msm_state->hpd_work_timer.expires = jiffies
+						+ msecs_to_jiffies(1000);
+	}
+	fb_dev = msm_fb_add_device(pdev);
+	if (fb_dev) {
+		rc = external_common_state_create(fb_dev);
+		if (rc) {
+			DEV_ERR("Init FAILED: tvout_msm_state_create, rc=%d\n",
+				rc);
+			goto error;
+		}
+		if (tvenc_pdata->poll) {
+			/* Start polling timer to detect load */
+			mod_timer(&tvout_msm_state->hpd_work_timer, jiffies
+				+ msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE));
+		} else {
+			/* Enable interrupt to detect load */
+			tvenc_set_encoder_clock(CLOCK_ON);
+			reg = TV_IN(TV_DAC_INTF);
+			reg |= TVENC_LOAD_DETECT_EN;
+			TV_OUT(TV_DAC_INTF, reg);
+			TV_OUT(TV_INTR_ENABLE, 0x5);
+			enable_irq(tvout_msm_state->irq);
+		}
+	} else
+		DEV_ERR("Init FAILED: failed to add fb device\n");
+error:
+	return 0;
+}
+
+static int __devexit tvout_remove(struct platform_device *pdev)
+{
+	external_common_state_remove();
+	kfree(tvout_msm_state);
+	tvout_msm_state = NULL;
+	return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int tvout_device_pm_suspend(struct device *dev)
+{
+	mutex_lock(&tvout_msm_state_mutex);
+	if (tvout_msm_state->pm_suspended) {
+		mutex_unlock(&tvout_msm_state_mutex);
+		return 0;
+	}
+	if (tvenc_pdata->poll) {
+		if (del_timer(&tvout_msm_state->hpd_work_timer))
+			DEV_DBG("%s: suspending cable detect timer\n",
+				__func__);
+	} else {
+		disable_irq(tvout_msm_state->irq);
+		tvenc_set_encoder_clock(CLOCK_OFF);
+	}
+	tvout_msm_state->pm_suspended = TRUE;
+	mutex_unlock(&tvout_msm_state_mutex);
+	return 0;
+}
+
+static int tvout_device_pm_resume(struct device *dev)
+{
+	mutex_lock(&tvout_msm_state_mutex);
+	if (!tvout_msm_state->pm_suspended) {
+		mutex_unlock(&tvout_msm_state_mutex);
+		return 0;
+	}
+
+	if (tvenc_pdata->poll) {
+		tvout_msm_state->pm_suspended = FALSE;
+		mod_timer(&tvout_msm_state->hpd_work_timer, jiffies
+				+ msecs_to_jiffies(TVOUT_HPD_DUTY_CYCLE));
+		mutex_unlock(&tvout_msm_state_mutex);
+		DEV_DBG("%s: resuming cable detect timer\n", __func__);
+	} else {
+		tvenc_set_encoder_clock(CLOCK_ON);
+		tvout_msm_state->pm_suspended = FALSE;
+		mutex_unlock(&tvout_msm_state_mutex);
+		enable_irq(tvout_msm_state->irq);
+		DEV_DBG("%s: enable cable detect interrupt\n", __func__);
+	}
+	return 0;
+}
+#else
+#define tvout_device_pm_suspend	NULL
+#define tvout_device_pm_resume		NULL
+#endif
+
+
+static const struct dev_pm_ops tvout_device_pm_ops = {
+	.suspend = tvout_device_pm_suspend,
+	.resume = tvout_device_pm_resume,
+};
+
+static struct platform_driver this_driver = {
+	.probe  = tvout_probe,
+	.remove = tvout_remove,
+	.driver = {
+		.name	= "tvout_device",
+		.pm	= &tvout_device_pm_ops,
+	},
+};
+
+static struct msm_fb_panel_data tvout_panel_data = {
+	.panel_info.xres = TV_DIMENSION_MAX_WIDTH,
+	.panel_info.yres = TV_DIMENSION_MAX_HEIGHT,
+	.panel_info.type = TV_PANEL,
+	.panel_info.pdest = DISPLAY_2,
+	.panel_info.wait_cycle = 0,
+#ifdef CONFIG_FB_MSM_MDP40
+	.panel_info.bpp = 24,
+#else
+	.panel_info.bpp = 16,
+#endif
+	.panel_info.fb_num = 2,
+	.on = tvout_on,
+	.off = tvout_off,
+};
+
+static struct platform_device this_device = {
+	.name   = "tvout_device",
+	.id = 1,
+	.dev	= {
+		.platform_data = &tvout_panel_data,
+	}
+};
+
+static int __init tvout_init(void)
+{
+	int ret;
+	tvout_msm_state = kzalloc(sizeof(*tvout_msm_state), GFP_KERNEL);
+	if (!tvout_msm_state) {
+		DEV_ERR("tvout_msm_init FAILED: out of memory\n");
+		ret = -ENOMEM;
+		goto init_exit;
+	}
+
+	external_common_state = &tvout_msm_state->common;
+	ret = platform_driver_register(&this_driver);
+	if (ret) {
+		DEV_ERR("tvout_device_init FAILED: platform_driver_register\
+			rc=%d\n", ret);
+		goto init_exit;
+	}
+
+	ret = platform_device_register(&this_device);
+	if (ret) {
+		DEV_ERR("tvout_device_init FAILED: platform_driver_register\
+			rc=%d\n", ret);
+		platform_driver_unregister(&this_driver);
+		goto init_exit;
+	}
+
+	INIT_WORK(&tvout_msm_state->hpd_work, tvout_msm_hpd_work);
+	return 0;
+
+init_exit:
+	kfree(tvout_msm_state);
+	tvout_msm_state = NULL;
+	return ret;
+}
+
+static void __exit tvout_exit(void)
+{
+	platform_device_unregister(&this_device);
+	platform_driver_unregister(&this_driver);
+}
+
+module_init(tvout_init);
+module_exit(tvout_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("TV out driver");
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
new file mode 100644
index 0000000..b55c884
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -0,0 +1,634 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <mach/msm_memtypes.h>
+#include "vcd_ddl.h"
+#include "vcd_ddl_metadata.h"
+#include "vcd_res_tracker_api.h"
+
+static unsigned int first_time;
+
+u32 ddl_device_init(struct ddl_init_config *ddl_init_config,
+	void *client_data)
+{
+	struct ddl_context *ddl_context;
+	struct res_trk_firmware_addr firmware_addr;
+	u32 status = VCD_S_SUCCESS;
+	void *ptr = NULL;
+	DDL_MSG_HIGH("ddl_device_init");
+
+	if ((!ddl_init_config) || (!ddl_init_config->ddl_callback) ||
+		(!ddl_init_config->core_virtual_base_addr)) {
+		DDL_MSG_ERROR("ddl_dev_init:Bad_argument");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	ddl_context = ddl_get_context();
+	if (DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dev_init:Multiple_init");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!DDL_IS_IDLE(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dev_init:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	memset(ddl_context, 0, sizeof(struct ddl_context));
+	DDL_BUSY(ddl_context);
+	ddl_context->memtype = res_trk_get_mem_type();
+	if (ddl_context->memtype == -1) {
+		DDL_MSG_ERROR("ddl_dev_init:Illegal memtype");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	ddl_context->ddl_callback = ddl_init_config->ddl_callback;
+	if (ddl_init_config->interrupt_clr)
+		ddl_context->interrupt_clr =
+			ddl_init_config->interrupt_clr;
+	ddl_context->core_virtual_base_addr =
+		ddl_init_config->core_virtual_base_addr;
+	ddl_context->client_data = client_data;
+	ddl_context->ddl_hw_response.arg1 = DDL_INVALID_INTR_STATUS;
+
+	ddl_context->frame_channel_depth = VCD_FRAME_COMMAND_DEPTH;
+
+	DDL_MSG_LOW("%s() : virtual address of core(%x)\n", __func__,
+		(u32) ddl_init_config->core_virtual_base_addr);
+	vidc_1080p_set_device_base_addr(
+		ddl_context->core_virtual_base_addr);
+	ddl_context->cmd_state =	DDL_CMD_INVALID;
+	ddl_client_transact(DDL_INIT_CLIENTS, NULL);
+	ddl_context->fw_memory_size =
+		DDL_FW_INST_GLOBAL_CONTEXT_SPACE_SIZE;
+	if (ddl_context->memtype == MEMTYPE_SMI_KERNEL) {
+		ptr = ddl_pmem_alloc(&ddl_context->dram_base_a,
+			ddl_context->fw_memory_size, DDL_KILO_BYTE(128));
+	} else {
+		if (!res_trk_get_firmware_addr(&firmware_addr) &&
+		   firmware_addr.buf_size >= ddl_context->fw_memory_size) {
+			if (DDL_ADDR_IS_ALIGNED(firmware_addr.device_addr,
+				DDL_KILO_BYTE(128))) {
+				ptr = (void *) firmware_addr.base_addr;
+				ddl_context->dram_base_a.physical_base_addr =
+				ddl_context->dram_base_a.align_physical_addr =
+					(u8 *)firmware_addr.device_addr;
+				ddl_context->dram_base_a.align_virtual_addr  =
+				ddl_context->dram_base_a.virtual_base_addr =
+					firmware_addr.base_addr;
+				ddl_context->dram_base_a.buffer_size =
+					ddl_context->fw_memory_size;
+			} else {
+				DDL_MSG_ERROR("firmware base not aligned %p",
+					(void *)firmware_addr.device_addr);
+			}
+		}
+	}
+	if (!ptr) {
+		DDL_MSG_ERROR("Memory Aocation Failed for FW Base");
+		status = VCD_ERR_ALLOC_FAIL;
+	} else {
+		DDL_MSG_LOW("%s() : physical address of base(%x)\n",
+			 __func__, (u32) ddl_context->dram_base_a.\
+			align_physical_addr);
+		ddl_context->dram_base_b.align_physical_addr =
+			ddl_context->dram_base_a.align_physical_addr;
+		ddl_context->dram_base_b.align_virtual_addr  =
+			ddl_context->dram_base_a.align_virtual_addr;
+	}
+	if (!status) {
+		ptr = ddl_pmem_alloc(&ddl_context->metadata_shared_input,
+			DDL_METADATA_TOTAL_INPUTBUFSIZE,
+			DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!ptr) {
+			DDL_MSG_ERROR("ddl_device_init: metadata alloc fail");
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+	}
+	if (!status && !ddl_fw_init(&ddl_context->dram_base_a)) {
+		DDL_MSG_ERROR("ddl_dev_init:fw_init_failed");
+		status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (!status && ddl_context->memtype == MEMTYPE_EBI1)
+		clean_caches((unsigned long)firmware_addr.base_addr,
+		firmware_addr.buf_size,	firmware_addr.device_addr);
+
+	if (!status) {
+		ddl_context->cmd_state = DDL_CMD_DMA_INIT;
+		ddl_vidc_core_init(ddl_context);
+	} else {
+		ddl_release_context_buffers(ddl_context);
+		DDL_IDLE(ddl_context);
+	}
+	return status;
+}
+
+u32 ddl_device_release(void *client_data)
+{
+	struct ddl_context *ddl_context;
+
+	DDL_MSG_HIGH("ddl_device_release");
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_IDLE(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dev_rel:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dev_rel:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) {
+		DDL_MSG_ERROR("ddl_dev_rel:Client_present_err");
+		return VCD_ERR_CLIENT_PRESENT;
+	}
+	DDL_BUSY(ddl_context);
+	ddl_context->device_state = DDL_DEVICE_NOTINIT;
+	ddl_context->client_data = client_data;
+	ddl_context->cmd_state = DDL_CMD_INVALID;
+	ddl_vidc_core_term(ddl_context);
+	DDL_MSG_LOW("FW_ENDDONE");
+	ddl_context->core_virtual_base_addr = NULL;
+	ddl_release_context_buffers(ddl_context);
+	DDL_IDLE(ddl_context);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_open(u32 **ddl_handle, u32 decoding)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl;
+	void *ptr;
+	u32 status;
+
+	DDL_MSG_HIGH("ddl_open");
+	if (!ddl_handle) {
+		DDL_MSG_ERROR("ddl_open:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_open:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	status = ddl_client_transact(DDL_GET_CLIENT, &ddl);
+	if (status) {
+		DDL_MSG_ERROR("ddl_open:Client_trasac_failed");
+		return status;
+	}
+	ptr = ddl_pmem_alloc(&ddl->shared_mem[0],
+			DDL_FW_AUX_HOST_CMD_SPACE_SIZE, sizeof(u32));
+	if (!ptr)
+		status = VCD_ERR_ALLOC_FAIL;
+	if (!status && ddl_context->frame_channel_depth
+		== VCD_DUAL_FRAME_COMMAND_CHANNEL) {
+		ptr = ddl_pmem_alloc(&ddl->shared_mem[1],
+				DDL_FW_AUX_HOST_CMD_SPACE_SIZE, sizeof(u32));
+		if (!ptr) {
+			ddl_pmem_free(&ddl->shared_mem[0]);
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+	}
+	if (!status) {
+		memset(ddl->shared_mem[0].align_virtual_addr, 0,
+			DDL_FW_AUX_HOST_CMD_SPACE_SIZE);
+		if (ddl_context->frame_channel_depth ==
+			VCD_DUAL_FRAME_COMMAND_CHANNEL) {
+			memset(ddl->shared_mem[1].align_virtual_addr, 0,
+				DDL_FW_AUX_HOST_CMD_SPACE_SIZE);
+		}
+		DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_OPEN",
+		ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_OPEN;
+		ddl->codec_data.hdr.decoding = decoding;
+		ddl->decoding = decoding;
+		ddl_set_default_meta_data_hdr(ddl);
+		ddl_set_initial_default_values(ddl);
+		*ddl_handle	= (u32 *) ddl;
+	} else {
+		ddl_pmem_free(&ddl->shared_mem[0]);
+		if (ddl_context->frame_channel_depth
+			== VCD_DUAL_FRAME_COMMAND_CHANNEL)
+			ddl_pmem_free(&ddl->shared_mem[1]);
+		ddl_client_transact(DDL_FREE_CLIENT, &ddl);
+	}
+	return status;
+}
+
+u32 ddl_close(u32 **ddl_handle)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context **pp_ddl =
+		(struct ddl_client_context **)ddl_handle;
+
+	DDL_MSG_HIGH("ddl_close");
+	if (!pp_ddl || !*pp_ddl) {
+		DDL_MSG_ERROR("ddl_close:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_close:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!DDLCLIENT_STATE_IS(*pp_ddl, DDL_CLIENT_OPEN)) {
+		DDL_MSG_ERROR("ddl_close:Not_in_open_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	ddl_pmem_free(&(*pp_ddl)->shared_mem[0]);
+	if (ddl_context->frame_channel_depth ==
+		VCD_DUAL_FRAME_COMMAND_CHANNEL)
+		ddl_pmem_free(&(*pp_ddl)->shared_mem[1]);
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_INVALID",
+	ddl_get_state_string((*pp_ddl)->client_state));
+	(*pp_ddl)->client_state = DDL_CLIENT_INVALID;
+	ddl_codec_type_transact(*pp_ddl, true, (enum vcd_codec)0);
+	ddl_client_transact(DDL_FREE_CLIENT, pp_ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_encode_start(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+	struct ddl_encoder_data *encoder;
+	void *ptr;
+	u32 status = VCD_S_SUCCESS;
+	DDL_MSG_HIGH("ddl_encode_start");
+	if (vidc_msg_timing) {
+		if (first_time < 2) {
+			ddl_reset_core_time_variables(ENC_OP_TIME);
+			first_time++;
+		 }
+		ddl_set_core_start_time(__func__, ENC_OP_TIME);
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_start:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_start:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		DDL_MSG_ERROR("ddl_enc_start:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+		DDL_MSG_ERROR("ddl_enc_start:Not_opened");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_encoder_ready_to_start(ddl)) {
+		DDL_MSG_ERROR("ddl_enc_start:Err_param_settings");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	encoder = &ddl->codec_data.encoder;
+	status = ddl_allocate_enc_hw_buffers(ddl);
+	if (status)
+		return status;
+#ifdef DDL_BUF_LOG
+	ddl_list_buffers(ddl);
+#endif
+	if ((encoder->codec.codec == VCD_CODEC_MPEG4 &&
+		!encoder->short_header.short_header) ||
+		encoder->codec.codec == VCD_CODEC_H264) {
+		ptr = ddl_pmem_alloc(&encoder->seq_header,
+			DDL_ENC_SEQHEADER_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!ptr) {
+			ddl_free_enc_hw_buffers(ddl);
+			DDL_MSG_ERROR("ddl_enc_start:Seq_hdr_alloc_failed");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+	} else {
+		encoder->seq_header.buffer_size = 0;
+		encoder->seq_header.virtual_base_addr = 0;
+		encoder->seq_header.align_physical_addr = 0;
+		encoder->seq_header.align_virtual_addr = 0;
+	}
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+	ddl_vidc_channel_set(ddl);
+	return status;
+}
+
+u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header,
+	void *client_data)
+{
+	struct ddl_client_context  *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+	struct ddl_decoder_data *decoder;
+	u32 status = VCD_S_SUCCESS;
+
+	DDL_MSG_HIGH("ddl_decode_start");
+	if (vidc_msg_timing) {
+		ddl_reset_core_time_variables(DEC_OP_TIME);
+		ddl_reset_core_time_variables(DEC_IP_TIME);
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_start:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_start:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		DDL_MSG_ERROR("ddl_dec_start:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+		DDL_MSG_ERROR("ddl_dec_start:Not_in_opened_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if ((header) && ((!header->sequence_header_len) ||
+		(!header->sequence_header))) {
+		DDL_MSG_ERROR("ddl_dec_start:Bad_param_seq_header");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if (!ddl_decoder_ready_to_start(ddl, header)) {
+		DDL_MSG_ERROR("ddl_dec_start:Err_param_settings");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	decoder = &ddl->codec_data.decoder;
+	status = ddl_allocate_dec_hw_buffers(ddl);
+	if (status)
+		return status;
+#ifdef DDL_BUF_LOG
+	ddl_list_buffers(ddl);
+#endif
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+	if (header) {
+		decoder->header_in_start = true;
+		decoder->decode_config = *header;
+	} else {
+		decoder->header_in_start = false;
+		decoder->decode_config.sequence_header_len = 0;
+	}
+	ddl_vidc_channel_set(ddl);
+	return status;
+}
+
+u32 ddl_decode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_bits, void *client_data)
+{
+	u32 vcd_status = VCD_S_SUCCESS;
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+	struct ddl_decoder_data *decoder;
+	DDL_MSG_HIGH("ddl_decode_frame");
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_frame:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_frame:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		DDL_MSG_ERROR("ddl_dec_frame:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!input_bits || ((!input_bits->vcd_frm.physical ||
+		!input_bits->vcd_frm.data_len) &&
+		(!(VCD_FRAME_FLAG_EOS &	input_bits->vcd_frm.flags)))) {
+		DDL_MSG_ERROR("ddl_dec_frame:Bad_input_param");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) {
+		DDL_MSG_ERROR("Dec_frame:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	decoder = &(ddl->codec_data.decoder);
+	if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)	&&
+		!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) {
+		DDL_MSG_ERROR("ddl_dec_frame:Dpbs_requied");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+
+	ddl->input_frame = *input_bits;
+	if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME))
+		ddl_vidc_decode_frame_run(ddl);
+	else {
+		if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) {
+			DDL_MSG_ERROR("ddl_dec_frame:Dpbs_requied");
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+		} else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) {
+			vcd_status = ddl_vidc_decode_set_buffers(ddl);
+		if (vcd_status)
+			ddl_release_command_channel(ddl_context,
+				ddl->command_channel);
+		} else if (DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC)) {
+			if (decoder->codec.codec == VCD_CODEC_DIVX_3) {
+				if ((!decoder->client_frame_size.width) ||
+				(!decoder->client_frame_size.height))
+					return VCD_ERR_ILLEGAL_OP;
+		}
+		ddl->codec_data.decoder.decode_config.sequence_header =
+			ddl->input_frame.vcd_frm.physical;
+		ddl->codec_data.decoder.decode_config.sequence_header_len =
+			ddl->input_frame.vcd_frm.data_len;
+		ddl_vidc_decode_init_codec(ddl);
+		} else {
+			DDL_MSG_ERROR("Dec_frame:Wrong_state");
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+		}
+		if (vcd_status)
+			DDL_IDLE(ddl_context);
+		}
+	return vcd_status;
+}
+
+u32 ddl_encode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_frame,
+	struct ddl_frame_data_tag *output_bit, void *client_data)
+{
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+	u32 vcd_status = VCD_S_SUCCESS;
+
+	DDL_MSG_HIGH("ddl_encode_frame");
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, ENC_OP_TIME);
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_frame:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_frame:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		DDL_MSG_ERROR("ddl_enc_frame:Bad_handle");
+	return VCD_ERR_BAD_HANDLE;
+	}
+	if (!input_frame || !input_frame->vcd_frm.physical	||
+		!input_frame->vcd_frm.data_len) {
+		DDL_MSG_ERROR("ddl_enc_frame:Bad_input_params");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if ((((u32) input_frame->vcd_frm.physical +
+		input_frame->vcd_frm.offset) &
+		(DDL_STREAMBUF_ALIGN_GUARD_BYTES))) {
+		DDL_MSG_ERROR("ddl_enc_frame:Un_aligned_yuv_start_address");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if (!output_bit || !output_bit->vcd_frm.physical ||
+		!output_bit->vcd_frm.alloc_len) {
+		DDL_MSG_ERROR("ddl_enc_frame:Bad_output_params");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if ((ddl->codec_data.encoder.output_buf_req.sz +
+		output_bit->vcd_frm.offset) >
+		output_bit->vcd_frm.alloc_len)
+		DDL_MSG_ERROR("ddl_enc_frame:offset_large,"
+			"Exceeds_min_buf_size");
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) {
+		DDL_MSG_ERROR("ddl_enc_frame:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+
+	ddl->input_frame = *input_frame;
+	ddl->output_frame = *output_bit;
+	if (ddl->codec_data.encoder.i_period.b_frames > 0) {
+		if (!ddl->b_count) {
+			ddl->first_output_frame = *output_bit;
+			ddl->b_count++;
+		} else if (ddl->codec_data.encoder.i_period.b_frames >=
+			ddl->b_count) {
+			ddl->extra_output_frame[ddl->b_count-1] =
+				*output_bit;
+			ddl->output_frame = ddl->first_output_frame;
+			ddl->b_count++;
+		}
+	}
+	ddl_insert_input_frame_to_pool(ddl, input_frame);
+	if (!vcd_status)
+		ddl_vidc_encode_frame_run(ddl);
+	else
+		DDL_MSG_ERROR("insert to frame pool failed %u", vcd_status);
+	return vcd_status;
+}
+
+u32 ddl_decode_end(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+
+	DDL_MSG_HIGH("ddl_decode_end");
+	if (vidc_msg_timing) {
+		ddl_reset_core_time_variables(DEC_OP_TIME);
+		ddl_reset_core_time_variables(DEC_IP_TIME);
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_end:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_dec_end:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		DDL_MSG_ERROR("ddl_dec_end:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FAVIDC_ERROR)) {
+		DDL_MSG_ERROR("ddl_dec_end:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+	ddl_vidc_channel_end(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_encode_end(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context  *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	struct ddl_context *ddl_context;
+
+	DDL_MSG_HIGH("ddl_encode_end");
+	if (vidc_msg_timing)
+		ddl_reset_core_time_variables(ENC_OP_TIME);
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_end:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		DDL_MSG_ERROR("ddl_enc_end:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		DDL_MSG_ERROR("ddl_enc_end:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FAVIDC_ERROR)) {
+		DDL_MSG_ERROR("ddl_enc_end:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl_take_command_channel(ddl_context, ddl, client_data))
+		return VCD_ERR_BUSY;
+	ddl_vidc_channel_end(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_reset_hw(u32 mode)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl;
+	u32 i;
+
+	DDL_MSG_HIGH("ddl_reset_hw");
+	DDL_MSG_LOW("ddl_reset_hw:called");
+	ddl_context = ddl_get_context();
+	ddl_context->cmd_state = DDL_CMD_INVALID;
+	DDL_BUSY(ddl_context);
+	if (ddl_context->core_virtual_base_addr) {
+		vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_FIRST_STAGE);
+		msleep(DDL_SW_RESET_SLEEP);
+		vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_SECOND_STAGE);
+		msleep(DDL_SW_RESET_SLEEP);
+		ddl_context->core_virtual_base_addr = NULL;
+	}
+	ddl_context->device_state = DDL_DEVICE_NOTINIT;
+	for (i = 0; i < VCD_MAX_NO_CLIENT; i++) {
+		ddl = ddl_context->ddl_clients[i];
+		ddl_context->ddl_clients[i] = NULL;
+		if (ddl) {
+			ddl_release_client_internal_buffers(ddl);
+			ddl_client_transact(DDL_FREE_CLIENT, &ddl);
+		}
+	}
+	ddl_release_context_buffers(ddl_context);
+	memset(ddl_context, 0, sizeof(struct ddl_context));
+	return true;
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
new file mode 100644
index 0000000..9084ea8
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -0,0 +1,446 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_H_
+#define _VCD_DDL_H_
+
+#include "vcd_ddl_api.h"
+#include "vcd_ddl_core.h"
+#include "vcd_ddl_utils.h"
+#include "vidc.h"
+#include "vidc_hwio.h"
+#include "vidc_pix_cache.h"
+#include "vidc.h"
+
+#define DDL_IDLE_STATE  0
+#define DDL_BUSY_STATE  1
+#define DDL_ERROR_STATE 2
+#define DDL_RUN_STATE   3
+
+#define DDL_IS_BUSY(ddl_context) \
+		((ddl_context)->ddl_busy == DDL_BUSY_STATE)
+#define DDL_IS_IDLE(ddl_context) \
+		((ddl_context)->ddl_busy == DDL_IDLE_STATE)
+#define DDL_BUSY(ddl_context) \
+		((ddl_context)->ddl_busy = DDL_BUSY_STATE)
+#define DDL_IDLE(ddl_context) \
+		((ddl_context)->ddl_busy = DDL_IDLE_STATE)
+#define DDL_ERROR(ddl_context) \
+		((ddl_context)->ddl_busy = DDL_ERROR_STATE)
+#define DDL_RUN(ddl_context) \
+	((ddl_context)->ddl_busy = DDL_RUN_STATE)
+
+#define DDL_DEVICE_NOTINIT  0
+#define DDL_DEVICE_INITED   1
+#define DDL_DEVICE_HWFATAL  2
+
+#define DDL_IS_INITIALIZED(ddl_context) \
+	(ddl_context->device_state == DDL_DEVICE_INITED)
+#define DDLCOMMAND_STATE_IS(ddl_context, command_state) \
+	(command_state == (ddl_context)->cmd_state)
+#define DDLCLIENT_STATE_IS(ddl, state) \
+	(state == (ddl)->client_state)
+
+#define DDL_DPB_OP_INIT       1
+#define DDL_DPB_OP_MARK_FREE  2
+#define DDL_DPB_OP_MARK_BUSY  3
+#define DDL_DPB_OP_SET_MASK   4
+#define DDL_DPB_OP_RETRIEVE   5
+
+#define DDL_INIT_CLIENTS     0
+#define DDL_GET_CLIENT       1
+#define DDL_FREE_CLIENT      2
+#define DDL_ACTIVE_CLIENT    3
+
+#define DDL_INVALID_CHANNEL_ID  ((u32)~0)
+#define DDL_INVALID_CODEC_TYPE  ((u32)~0)
+#define DDL_INVALID_INTR_STATUS ((u32)~0)
+
+#define DDL_ENC_REQ_IFRAME        0x01
+#define DDL_ENC_CHANGE_IPERIOD    0x02
+#define DDL_ENC_CHANGE_BITRATE    0x04
+#define DDL_ENC_CHANGE_FRAMERATE  0x08
+#define DDL_ENC_CHANGE_CIR        0x10
+
+#define DDL_DEC_REQ_OUTPUT_FLUSH  0x1
+
+#define DDL_MIN_NUM_OF_B_FRAME  0
+#define DDL_MAX_NUM_OF_B_FRAME  1
+#define DDL_DEFAULT_NUM_OF_B_FRAME  DDL_MIN_NUM_OF_B_FRAME
+
+#define DDL_MIN_NUM_REF_FOR_P_FRAME             1
+#define DDL_MAX_NUM_REF_FOR_P_FRAME             2
+
+#define DDL_MAX_NUM_IN_INPUTFRAME_POOL          (DDL_MAX_NUM_OF_B_FRAME + 1)
+
+struct ddl_buf_addr{
+	u8  *virtual_base_addr;
+	u8  *physical_base_addr;
+	u8  *align_physical_addr;
+	u8  *align_virtual_addr;
+	u32 buffer_size;
+};
+enum ddl_cmd_state{
+	DDL_CMD_INVALID         = 0x0,
+	DDL_CMD_DMA_INIT        = 0x1,
+	DDL_CMD_CPU_RESET       = 0x2,
+	DDL_CMD_CHANNEL_SET     = 0x3,
+	DDL_CMD_INIT_CODEC      = 0x4,
+	DDL_CMD_HEADER_PARSE    = 0x5,
+	DDL_CMD_DECODE_SET_DPB  = 0x6,
+	DDL_CMD_DECODE_FRAME    = 0x7,
+	DDL_CMD_ENCODE_FRAME    = 0x8,
+	DDL_CMD_EOS             = 0x9,
+	DDL_CMD_CHANNEL_END     = 0xA,
+	DDL_CMD_32BIT           = 0x7FFFFFFF
+};
+enum ddl_client_state{
+	DDL_CLIENT_INVALID                 = 0x0,
+	DDL_CLIENT_OPEN                    = 0x1,
+	DDL_CLIENT_WAIT_FOR_CHDONE         = 0x2,
+	DDL_CLIENT_WAIT_FOR_INITCODEC      = 0x3,
+	DDL_CLIENT_WAIT_FOR_INITCODECDONE  = 0x4,
+	DDL_CLIENT_WAIT_FOR_DPB            = 0x5,
+	DDL_CLIENT_WAIT_FOR_DPBDONE        = 0x6,
+	DDL_CLIENT_WAIT_FOR_FRAME          = 0x7,
+	DDL_CLIENT_WAIT_FOR_FRAME_DONE     = 0x8,
+	DDL_CLIENT_WAIT_FOR_EOS_DONE       = 0x9,
+	DDL_CLIENT_WAIT_FOR_CHEND          = 0xA,
+	DDL_CLIENT_FATAL_ERROR             = 0xB,
+	DDL_CLIENT_FAVIDC_ERROR            = 0xC,
+	DDL_CLIENT_32BIT                   = 0x7FFFFFFF
+};
+struct ddl_hw_interface{
+	u32 cmd;
+	u32 arg1;
+	u32 arg2;
+	u32 arg3;
+	u32 arg4;
+};
+struct ddl_mask{
+	u32  client_mask;
+	u32  hw_mask;
+};
+struct ddl_yuv_buffer_size{
+	u32  size_yuv;
+	u32  size_y;
+	u32  size_c;
+};
+struct ddl_dec_buffer_size{
+	u32  sz_dpb0;
+	u32  sz_dpb1;
+	u32  sz_mv;
+	u32  sz_vert_nb_mv;
+	u32  sz_nb_ip;
+	u32  sz_luma;
+	u32  sz_chroma;
+	u32  sz_nb_dcac;
+	u32  sz_upnb_mv;
+	u32  sz_sub_anchor_mv;
+	u32  sz_overlap_xform;
+	u32  sz_bit_plane3;
+	u32  sz_bit_plane2;
+	u32  sz_bit_plane1;
+	u32  sz_stx_parser;
+	u32  sz_desc;
+	u32  sz_cpb;
+	u32  sz_context;
+};
+struct ddl_dec_buffers{
+	struct ddl_buf_addr desc;
+	struct ddl_buf_addr nb_dcac;
+	struct ddl_buf_addr upnb_mv;
+	struct ddl_buf_addr sub_anchor_mv;
+	struct ddl_buf_addr overlay_xform;
+	struct ddl_buf_addr bit_plane3;
+	struct ddl_buf_addr bit_plane2;
+	struct ddl_buf_addr bit_plane1;
+	struct ddl_buf_addr stx_parser;
+	struct ddl_buf_addr h264_mv[DDL_MAX_BUFFER_COUNT];
+	struct ddl_buf_addr h264_vert_nb_mv;
+	struct ddl_buf_addr h264_nb_ip;
+	struct ddl_buf_addr context;
+};
+struct ddl_enc_buffer_size{
+	u32  sz_cur_y;
+	u32  sz_cur_c;
+	u32  sz_dpb_y;
+	u32  sz_dpb_c;
+	u32  sz_strm;
+	u32  sz_mv;
+	u32  sz_col_zero;
+	u32  sz_md;
+	u32  sz_pred;
+	u32  sz_nbor_info;
+	u32  sz_acdc_coef;
+	u32  sz_mb_info;
+	u32  sz_context;
+};
+struct ddl_enc_buffers{
+	struct ddl_buf_addr dpb_y[4];
+	struct ddl_buf_addr dpb_c[4];
+	struct ddl_buf_addr mv;
+	struct ddl_buf_addr col_zero;
+	struct ddl_buf_addr md;
+	struct ddl_buf_addr pred;
+	struct ddl_buf_addr nbor_info;
+	struct ddl_buf_addr acdc_coef;
+	struct ddl_buf_addr mb_info;
+	struct ddl_buf_addr context;
+	u32  dpb_count;
+	u32  sz_dpb_y;
+	u32  sz_dpb_c;
+};
+struct ddl_codec_data_hdr{
+	u32  decoding;
+};
+struct ddl_encoder_data{
+	struct ddl_codec_data_hdr   hdr;
+	struct vcd_property_codec   codec;
+	struct vcd_property_frame_size  frame_size;
+	struct vcd_property_frame_rate  frame_rate;
+	struct vcd_property_target_bitrate  target_bit_rate;
+	struct vcd_property_profile  profile;
+	struct vcd_property_level  level;
+	struct vcd_property_rate_control  rc;
+	struct vcd_property_multi_slice  multi_slice;
+	struct ddl_buf_addr  meta_data_input;
+	struct vcd_property_short_header  short_header;
+	struct vcd_property_vop_timing  vop_timing;
+	struct vcd_property_db_config  db_control;
+	struct vcd_property_entropy_control  entropy_control;
+	struct vcd_property_i_period  i_period;
+	struct vcd_property_session_qp  session_qp;
+	struct vcd_property_qp_range  qp_range;
+	struct vcd_property_rc_level  rc_level;
+	struct vcd_property_frame_level_rc_params  frame_level_rc;
+	struct vcd_property_adaptive_rc_params  adaptive_rc;
+	struct vcd_property_intra_refresh_mb_number  intra_refresh;
+	struct vcd_property_buffer_format  buf_format;
+	struct vcd_property_buffer_format  recon_buf_format;
+	struct ddl_buf_addr  seq_header;
+	struct vcd_buffer_requirement  input_buf_req;
+	struct vcd_buffer_requirement  output_buf_req;
+	struct vcd_buffer_requirement  client_input_buf_req;
+	struct vcd_buffer_requirement  client_output_buf_req;
+	struct ddl_enc_buffers  hw_bufs;
+	struct ddl_yuv_buffer_size  input_buf_size;
+	struct vidc_1080p_enc_frame_info enc_frame_info;
+	u32  meta_data_enable_flag;
+	u32  suffix;
+	u32  meta_data_offset;
+	u32  hdr_ext_control;
+	u32  r_cframe_skip;
+	u32  vb_vbuffer_size;
+	u32  dynamic_prop_change;
+	u32  dynmic_prop_change_req;
+	u32  seq_header_length;
+	u32  intra_frame_insertion;
+	u32  mb_info_enable;
+	u32  ext_enc_control_val;
+	u32  num_references_for_p_frame;
+};
+struct ddl_decoder_data {
+	struct ddl_codec_data_hdr  hdr;
+	struct vcd_property_codec  codec;
+	struct vcd_property_buffer_format  buf_format;
+	struct vcd_property_frame_size  frame_size;
+	struct vcd_property_frame_size  client_frame_size;
+	struct vcd_property_profile  profile;
+	struct vcd_property_level  level;
+	struct ddl_buf_addr  meta_data_input;
+	struct vcd_property_post_filter  post_filter;
+	struct vcd_sequence_hdr  decode_config;
+	struct ddl_property_dec_pic_buffers  dp_buf;
+	struct ddl_mask  dpb_mask;
+	struct vcd_buffer_requirement  actual_input_buf_req;
+	struct vcd_buffer_requirement  min_input_buf_req;
+	struct vcd_buffer_requirement  client_input_buf_req;
+	struct vcd_buffer_requirement  actual_output_buf_req;
+	struct vcd_buffer_requirement  min_output_buf_req;
+	struct vcd_buffer_requirement  client_output_buf_req;
+	struct ddl_dec_buffers  hw_bufs;
+	struct ddl_yuv_buffer_size  dpb_buf_size;
+	struct vidc_1080p_dec_disp_info dec_disp_info;
+	u32  progressive_only;
+	u32  output_order;
+	u32  meta_data_enable_flag;
+	u32  suffix;
+	u32  meta_data_offset;
+	u32  header_in_start;
+	u32  min_dpb_num;
+	u32  y_cb_cr_size;
+	u32  dynamic_prop_change;
+	u32  dynmic_prop_change_req;
+	u32  flush_pending;
+	u32  meta_data_exists;
+	u32  idr_only_decoding;
+	u32  field_needed_for_prev_ip;
+	u32  prev_ip_frm_tag;
+	u32  cont_mode;
+};
+union ddl_codec_data{
+	struct ddl_codec_data_hdr  hdr;
+	struct ddl_decoder_data   decoder;
+	struct ddl_encoder_data   encoder;
+};
+struct ddl_context{
+	int memtype;
+	u8 *core_virtual_base_addr;
+	void *client_data;
+	u32 device_state;
+	u32 ddl_busy;
+	u32 cmd_err_status;
+	u32 disp_pic_err_status;
+	u32 pix_cache_enable;
+	u32 fw_version;
+	u32 fw_memory_size;
+	u32 cmd_seq_num;
+	u32 response_cmd_ch_id;
+	enum ddl_cmd_state cmd_state;
+	struct ddl_client_context *current_ddl[2];
+	struct ddl_buf_addr metadata_shared_input;
+	struct ddl_client_context *ddl_clients[VCD_MAX_NO_CLIENT];
+	struct ddl_buf_addr dram_base_a;
+	struct ddl_buf_addr dram_base_b;
+	struct ddl_hw_interface ddl_hw_response;
+	void (*ddl_callback) (u32 event, u32 status, void *payload,
+		size_t sz, u32 *ddl_handle, void *const client_data);
+	void (*interrupt_clr) (void);
+	void (*vidc_decode_seq_start[2])
+		(struct vidc_1080p_dec_seq_start_param *param);
+	void (*vidc_set_dec_resolution[2])
+		(u32 width, u32 height);
+	void(*vidc_decode_init_buffers[2])
+		(struct vidc_1080p_dec_init_buffers_param *param);
+	void(*vidc_decode_frame_start[2])
+		(struct vidc_1080p_dec_frame_start_param *param);
+	void(*vidc_encode_seq_start[2])
+		(struct vidc_1080p_enc_seq_start_param *param);
+	void(*vidc_encode_frame_start[2])
+		(struct vidc_1080p_enc_frame_start_param *param);
+	u32 frame_channel_depth;
+};
+struct ddl_client_context{
+	struct ddl_context  *ddl_context;
+	enum ddl_client_state  client_state;
+	struct ddl_frame_data_tag  first_output_frame;
+	struct ddl_frame_data_tag
+		extra_output_frame[DDL_MAX_NUM_OF_B_FRAME];
+	struct ddl_frame_data_tag  input_frame;
+	struct ddl_frame_data_tag  output_frame;
+	struct ddl_frame_data_tag
+		input_frame_pool[DDL_MAX_NUM_IN_INPUTFRAME_POOL];
+	union ddl_codec_data  codec_data;
+	enum ddl_cmd_state  cmd_state;
+	struct ddl_buf_addr  shared_mem[2];
+	void *client_data;
+	u32  decoding;
+	u32  channel_id;
+	u32  command_channel;
+	u32  b_count;
+	s32  extra_output_buf_count;
+	u32  instance_id;
+};
+
+struct ddl_context *ddl_get_context(void);
+void ddl_vidc_core_init(struct ddl_context *);
+void ddl_vidc_core_term(struct ddl_context *);
+void ddl_vidc_channel_set(struct ddl_client_context *);
+void ddl_vidc_channel_end(struct ddl_client_context *);
+void ddl_vidc_encode_init_codec(struct ddl_client_context *);
+void ddl_vidc_decode_init_codec(struct ddl_client_context *);
+void ddl_vidc_encode_frame_run(struct ddl_client_context *);
+void ddl_vidc_decode_frame_run(struct ddl_client_context *);
+void ddl_vidc_decode_eos_run(struct ddl_client_context *ddl);
+void ddl_release_context_buffers(struct ddl_context *);
+void ddl_release_client_internal_buffers(struct ddl_client_context *ddl);
+u32  ddl_vidc_decode_set_buffers(struct ddl_client_context *);
+u32  ddl_decoder_dpb_transact(struct ddl_decoder_data *decoder,
+	struct ddl_frame_data_tag *in_out_frame, u32 operation);
+u32  ddl_decoder_dpb_init(struct ddl_client_context *ddl);
+u32  ddl_client_transact(u32 , struct ddl_client_context **);
+u32  ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder,
+	u32 estimate);
+void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data
+	*encoder);
+void ddl_set_default_dec_property(struct ddl_client_context *);
+u32  ddl_encoder_ready_to_start(struct ddl_client_context *);
+u32  ddl_decoder_ready_to_start(struct ddl_client_context *,
+	struct vcd_sequence_hdr *);
+u32  ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size,
+	struct vcd_property_buffer_format *buf_format, u32 interlace,
+	u32 decoding, u32 *pn_c_offset);
+void ddl_calculate_stride(struct vcd_property_frame_size *frame_size,
+	u32 interlace);
+u32  ddl_codec_type_transact(struct ddl_client_context *ddl,
+	u32 remove, enum vcd_codec requested_codec);
+void ddl_vidc_encode_dynamic_property(struct ddl_client_context *ddl,
+	u32 enable);
+void ddl_vidc_decode_dynamic_property(struct ddl_client_context *ddl,
+	u32 enable);
+void ddl_set_initial_default_values(struct ddl_client_context *ddl);
+
+u32  ddl_take_command_channel(struct ddl_context *ddl_context,
+	struct ddl_client_context *ddl, void *client_data);
+void ddl_release_command_channel(struct ddl_context  *ddl_context,
+	u32 command_channel);
+struct ddl_client_context *ddl_get_current_ddl_client_for_channel_id(
+	struct ddl_context *ddl_context, u32 channel_id);
+struct ddl_client_context *ddl_get_current_ddl_client_for_command(
+	struct ddl_context *ddl_context,
+	enum ddl_cmd_state cmd_state);
+
+u32  ddl_get_yuv_buf_size(u32 width, u32 height, u32 format);
+void ddl_free_dec_hw_buffers(struct ddl_client_context *ddl);
+void ddl_free_enc_hw_buffers(struct ddl_client_context *ddl);
+void ddl_calc_dec_hw_buffers_size(enum vcd_codec codec, u32 width,
+	u32 height, u32 h264_dpb,
+	struct ddl_dec_buffer_size *buf_size);
+u32  ddl_allocate_dec_hw_buffers(struct ddl_client_context *ddl);
+u32  ddl_calc_enc_hw_buffers_size(enum vcd_codec codec, u32 width,
+	u32 height, enum vcd_yuv_buffer_format  input_format,
+	struct ddl_client_context *ddl,
+	struct ddl_enc_buffer_size *buf_size);
+u32  ddl_allocate_enc_hw_buffers(struct ddl_client_context *ddl);
+
+u32  ddl_handle_core_errors(struct ddl_context *ddl_context);
+void ddl_client_fatal_cb(struct ddl_client_context *ddl);
+void ddl_hw_fatal_cb(struct ddl_client_context *ddl);
+
+void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment);
+void ddl_pmem_free(struct ddl_buf_addr *addr);
+
+u32 ddl_get_input_frame_from_pool(struct ddl_client_context *ddl,
+	u8 *input_buffer_address);
+u32 ddl_insert_input_frame_to_pool(struct ddl_client_context *ddl,
+	struct ddl_frame_data_tag *ddl_input_frame);
+
+void ddl_decoder_chroma_dpb_change(struct ddl_client_context *ddl);
+u32  ddl_check_reconfig(struct ddl_client_context *ddl);
+void ddl_handle_reconfig(u32 res_change, struct ddl_client_context *ddl);
+
+#ifdef DDL_BUF_LOG
+void ddl_list_buffers(struct ddl_client_context *ddl);
+#endif
+#ifdef DDL_MSG_LOG
+s8 *ddl_get_state_string(enum ddl_client_state client_state);
+#endif
+extern unsigned char *vidc_video_codec_fw;
+extern u32 vidc_video_codec_fw_size;
+
+u32 ddl_fw_init(struct ddl_buf_addr *dram_base);
+void ddl_get_fw_info(const unsigned char **fw_array_addr,
+	unsigned int *fw_size);
+void ddl_fw_release(void);
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_api.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_api.h
new file mode 100644
index 0000000..51a0d13
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_api.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_API_H_
+#define _VCD_DDL_API_H_
+
+#include "vidc.h"
+#include "vcd_api.h"
+
+#define VCD_EVT_RESP_DDL_BASE             0x3000
+#define VCD_EVT_RESP_DEVICE_INIT          (VCD_EVT_RESP_DDL_BASE + 0x1)
+#define VCD_EVT_RESP_OUTPUT_REQ           (VCD_EVT_RESP_DDL_BASE + 0x2)
+#define VCD_EVT_RESP_EOS_DONE             (VCD_EVT_RESP_DDL_BASE + 0x3)
+#define VCD_EVT_RESP_TRANSACTION_PENDING  (VCD_EVT_RESP_DDL_BASE + 0x4)
+
+#define VCD_S_DDL_ERR_BASE       0x90000000
+#define VCD_ERR_MAX_NO_CODEC     (VCD_S_DDL_ERR_BASE + 0x1)
+#define VCD_ERR_CLIENT_PRESENT   (VCD_S_DDL_ERR_BASE + 0x2)
+#define VCD_ERR_CLIENT_FATAL     (VCD_S_DDL_ERR_BASE + 0x3)
+#define VCD_ERR_NO_SEQ_HDR       (VCD_S_DDL_ERR_BASE + 0x4)
+
+#define VCD_I_CUSTOM_BASE        (VCD_I_RESERVED_BASE)
+#define VCD_I_RC_LEVEL_CONFIG    (VCD_I_CUSTOM_BASE + 0x1)
+#define VCD_I_FRAME_LEVEL_RC     (VCD_I_CUSTOM_BASE + 0x2)
+#define VCD_I_ADAPTIVE_RC        (VCD_I_CUSTOM_BASE + 0x3)
+#define VCD_I_CUSTOM_DDL_BASE    (VCD_I_RESERVED_BASE + 0x100)
+#define DDL_I_INPUT_BUF_REQ      (VCD_I_CUSTOM_DDL_BASE + 0x1)
+#define DDL_I_OUTPUT_BUF_REQ     (VCD_I_CUSTOM_DDL_BASE + 0x2)
+#define DDL_I_DPB                (VCD_I_CUSTOM_DDL_BASE + 0x3)
+#define DDL_I_DPB_RELEASE        (VCD_I_CUSTOM_DDL_BASE + 0x4)
+#define DDL_I_DPB_RETRIEVE       (VCD_I_CUSTOM_DDL_BASE + 0x5)
+#define DDL_I_REQ_OUTPUT_FLUSH   (VCD_I_CUSTOM_DDL_BASE + 0x6)
+#define DDL_I_SEQHDR_ALIGN_BYTES (VCD_I_CUSTOM_DDL_BASE + 0x7)
+#define DDL_I_CAPABILITY         (VCD_I_CUSTOM_DDL_BASE + 0x8)
+#define DDL_I_FRAME_PROC_UNITS   (VCD_I_CUSTOM_DDL_BASE + 0x9)
+#define DDL_I_SEQHDR_PRESENT     (VCD_I_CUSTOM_DDL_BASE + 0xA)
+
+#define DDL_FRAME_VGA_SIZE     (640*480)
+#define DDL_FRAME_720P_WIDTH   1280
+#define DDL_FRAME_720P_HEIGHT  720
+
+struct vcd_property_rc_level{
+	u32 frame_level_rc;
+	u32 mb_level_rc;
+};
+struct vcd_property_frame_level_rc_params{
+	u32 reaction_coeff;
+};
+struct vcd_property_adaptive_rc_params{
+	u32 disable_dark_region_as_flag;
+	u32 disable_smooth_region_as_flag;
+	u32 disable_static_region_as_flag;
+	u32 disable_activity_region_flag;
+};
+struct ddl_property_dec_pic_buffers{
+	struct ddl_frame_data_tag *dec_pic_buffers;
+	u32 no_of_dec_pic_buf;
+};
+struct ddl_property_capability{
+	u32 max_num_client;
+	u32 general_command_depth;
+	u32 exclusive;
+	u32 frame_command_depth;
+	u32 ddl_time_out_in_ms;
+};
+struct ddl_init_config{
+	int memtype;
+	u8 *core_virtual_base_addr;
+	void (*interrupt_clr) (void);
+	void (*ddl_callback) (u32 event, u32 status, void *payload, size_t sz,
+		u32 *ddl_handle, void *const client_data);
+};
+struct ddl_frame_data_tag{
+	struct vcd_frame_data vcd_frm;
+	u32 frm_trans_end;
+	u32 frm_delta;
+};
+u32 ddl_device_init(struct ddl_init_config *ddl_init_config,
+	void *client_data);
+u32 ddl_device_release(void *client_data);
+u32 ddl_open(u32 **ddl_handle, u32 decoding);
+u32 ddl_close(u32 **ddl_handle);
+u32 ddl_encode_start(u32 *ddl_handle, void *client_data);
+u32 ddl_encode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_frame,
+	struct ddl_frame_data_tag *output_bit, void *client_data);
+u32 ddl_encode_end(u32 *ddl_handle, void *client_data);
+u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header,
+	void *client_data);
+u32 ddl_decode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_bits, void *client_data);
+u32 ddl_decode_end(u32 *ddl_handle, void *client_data);
+u32 ddl_set_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+u32 ddl_get_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+u32 ddl_process_core_response(void);
+u32 ddl_reset_hw(u32 mode);
+void ddl_read_and_clear_interrupt(void);
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
new file mode 100644
index 0000000..86ecec3
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -0,0 +1,134 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_CORE_H_
+#define _VCD_DDL_CORE_H_
+
+#define DDL_LINEAR_BUF_ALIGN_MASK         0xFFFFF800U
+#define DDL_LINEAR_BUF_ALIGN_GUARD_BYTES  0x7FF
+#define DDL_LINEAR_BUFFER_ALIGN_BYTES     2048
+#define DDL_TILE_BUF_ALIGN_MASK           0xFFFFE000U
+#define DDL_TILE_BUF_ALIGN_GUARD_BYTES    0x1FFF
+#define DDL_TILE_BUFFER_ALIGN_BYTES       8192
+
+#define DDL_YUV_BUF_TYPE_LINEAR 0
+#define DDL_YUV_BUF_TYPE_TILE   1
+
+#define DDL_NO_OF_MB(nWidth, nHeight) \
+	((((nWidth) + 15) >> 4) * (((nHeight) + 15) >> 4))
+
+#define DDL_MAX_FRAME_WIDTH   1920
+#define DDL_MAX_FRAME_HEIGHT  1088
+
+#define MAX_DPB_SIZE_L4PT0_MBS    DDL_KILO_BYTE(32)
+#define MAX_FRAME_SIZE_L4PT0_MBS  DDL_KILO_BYTE(8)
+
+#define DDL_MAX_MB_PER_FRAME (DDL_NO_OF_MB(DDL_MAX_FRAME_WIDTH,\
+	DDL_MAX_FRAME_HEIGHT))
+
+#define DDL_DB_LINE_BUF_SIZE\
+	(((((DDL_MAX_FRAME_WIDTH * 4) - 1) / 256) + 1) * 8 * 1024)
+
+#define DDL_MAX_FRAME_RATE               120
+#define DDL_INITIAL_FRAME_RATE            30
+
+#define DDL_MAX_BIT_RATE    (20*1024*1024)
+#define DDL_MAX_MB_PER_SEC  (DDL_MAX_MB_PER_FRAME * DDL_INITIAL_FRAME_RATE)
+
+#define DDL_SW_RESET_SLEEP               1
+#define VCD_MAX_NO_CLIENT                4
+#define VCD_SINGLE_FRAME_COMMAND_CHANNEL 1
+#define VCD_DUAL_FRAME_COMMAND_CHANNEL   2
+#define VCD_FRAME_COMMAND_DEPTH          VCD_SINGLE_FRAME_COMMAND_CHANNEL
+#define VCD_GENEVIDC_COMMAND_DEPTH        1
+#define VCD_COMMAND_EXCLUSIVE            true
+#define DDL_HW_TIMEOUT_IN_MS             1000
+#define DDL_STREAMBUF_ALIGN_GUARD_BYTES  0x7FF
+
+#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
+
+#define DDL_ENC_MIN_DPB_BUFFERS           2
+#define DDL_ENC_MAX_DPB_BUFFERS           4
+
+#define DDL_FW_AUX_HOST_CMD_SPACE_SIZE         (DDL_KILO_BYTE(10))
+#define DDL_FW_INST_GLOBAL_CONTEXT_SPACE_SIZE  (DDL_KILO_BYTE(500))
+#define DDL_FW_H264DEC_CONTEXT_SPACE_SIZE      (DDL_KILO_BYTE(800))
+#define DDL_FW_OTHER_CONTEXT_SPACE_SIZE        (DDL_KILO_BYTE(10))
+
+#define VCD_DEC_CPB_SIZE         (DDL_KILO_BYTE(512))
+#define DDL_DBG_CORE_DUMP_SIZE   (DDL_KILO_BYTE(10))
+
+#define DDL_BUFEND_PAD                    256
+#define DDL_ENC_SEQHEADER_SIZE            (512+DDL_BUFEND_PAD)
+#define DDL_MAX_BUFFER_COUNT              32
+#define DDL_MIN_BUFFER_COUNT              1
+
+#define DDL_MPEG_REFBUF_COUNT             2
+#define DDL_MPEG_COMV_BUF_NO              2
+#define DDL_H263_COMV_BUF_NO              0
+#define DDL_COMV_BUFLINE_NO               128
+#define DDL_VC1_COMV_BUFLINE_NO           32
+
+#define DDL_MAX_H264_QP            51
+#define DDL_MAX_MPEG4_QP           31
+
+#define DDL_CONCEALMENT_Y_COLOR                 16
+#define DDL_CONCEALMENT_C_COLOR                 128
+
+#define DDL_ALLOW_DEC_FRAMESIZE(width, height) \
+	((DDL_NO_OF_MB(width, height) <= \
+	MAX_FRAME_SIZE_L4PT0_MBS) && \
+	(width <= DDL_MAX_FRAME_WIDTH) && \
+	(height <= DDL_MAX_FRAME_WIDTH) && \
+	((width >= 32 && height >= 16) || \
+	(width >= 16 && height >= 32)))
+
+#define DDL_ALLOW_ENC_FRAMESIZE(width, height) \
+	((DDL_NO_OF_MB(width, height) <= \
+	MAX_FRAME_SIZE_L4PT0_MBS) && \
+	(width <= DDL_MAX_FRAME_WIDTH) && \
+	(height <= DDL_MAX_FRAME_WIDTH) && \
+	((width >= 32 && height >= 32)))
+
+#define DDL_LINEAR_ALIGN_WIDTH      16
+#define DDL_LINEAR_ALIGN_HEIGHT     16
+#define DDL_LINEAR_MULTIPLY_FACTOR  2048
+#define DDL_TILE_ALIGN_WIDTH        128
+#define DDL_TILE_ALIGN_HEIGHT       32
+#define DDL_TILE_MULTIPLY_FACTOR    8192
+#define DDL_TILE_ALIGN(val, grid) \
+	(((val) + (grid) - 1) / (grid) * (grid))
+
+#define VCD_DDL_720P_YUV_BUF_SIZE     ((1280*720*3) >> 1)
+#define VCD_DDL_WVGA_BUF_SIZE         (800*480)
+
+#define VCD_DDL_TEST_MAX_WIDTH        (DDL_MAX_FRAME_WIDTH)
+#define VCD_DDL_TEST_MAX_HEIGHT       (DDL_MAX_FRAME_HEIGHT)
+
+#define VCD_DDL_TEST_MAX_NUM_H264_DPB  8
+
+#define VCD_DDL_TEST_NUM_ENC_INPUT_BUFS   6
+#define VCD_DDL_TEST_NUM_ENC_OUTPUT_BUFS  4
+
+#define VCD_DDL_TEST_DEFAULT_WIDTH       176
+#define VCD_DDL_TEST_DEFAULT_HEIGHT      144
+
+#define DDL_PIXEL_CACHE_NOT_IDLE          0x4000
+#define DDL_PIXEL_CACHE_STATUS_READ_RETRY 10
+#define DDL_PIXEL_CACHE_STATUS_READ_SLEEP 200
+
+#define DDL_RESL_CHANGE_NO_CHANGE               0
+#define DDL_RESL_CHANGE_INCREASED               1
+#define DDL_RESL_CHANGE_DECREASED               2
+
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
new file mode 100644
index 0000000..d658647
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
@@ -0,0 +1,755 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vcd_ddl.h"
+#include "vcd_ddl_shared_mem.h"
+#include "vidc.h"
+
+static u32 ddl_handle_hw_fatal_errors(struct ddl_client_context *ddl);
+static u32 ddl_handle_client_fatal_errors(
+	struct ddl_client_context *ddl);
+static void ddl_input_failed_cb(struct ddl_client_context *ddl,
+	u32 vcd_event, u32 vcd_status);
+static u32 ddl_handle_core_recoverable_errors(
+	struct ddl_client_context *ddl);
+static u32 ddl_handle_core_warnings(u32 error_code);
+static void ddl_release_prev_field(
+	struct ddl_client_context *ddl);
+static u32 ddl_handle_dec_seq_hdr_fail_error(struct ddl_client_context *ddl);
+static void print_core_errors(u32 error_code);
+static void print_core_recoverable_errors(u32 error_code);
+
+void ddl_hw_fatal_cb(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 error_code = ddl_context->cmd_err_status;
+
+	DDL_MSG_FATAL("VIDC_HW_FATAL");
+	ddl->cmd_state = DDL_CMD_INVALID;
+	ddl_context->device_state = DDL_DEVICE_HWFATAL;
+
+	ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL, VCD_ERR_HW_FATAL,
+		&error_code, sizeof(error_code),
+		(u32 *)ddl, ddl->client_data);
+
+	ddl_release_command_channel(ddl_context, ddl->command_channel);
+}
+
+static u32 ddl_handle_hw_fatal_errors(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 status = false, error_code = ddl_context->cmd_err_status;
+
+	switch (error_code) {
+	case VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER:
+	case VIDC_1080P_ERROR_INVALID_COMMAND_ID:
+	case VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE:
+	case VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE:
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START:
+	case VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED:
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS:
+	case VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS:
+	case VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED:
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START:
+	case VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START:
+	case VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START:
+	case VIDC_1080P_ERROR_RESOLUTION_CHANGED:
+	case VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME:
+	case VIDC_1080P_ERROR_INVALID_COMMAND:
+	case VIDC_1080P_ERROR_INVALID_CODEC_TYPE:
+	case VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED:
+	case VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE:
+	case VIDC_1080P_ERROR_DIVIDE_BY_ZERO:
+	case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY:
+	case VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE:
+	case VIDC_1080P_ERROR_VSP_NOT_READY:
+	case VIDC_1080P_ERROR_BUFFER_FULL_STATE:
+		ddl_hw_fatal_cb(ddl);
+		status = true;
+	break;
+	default:
+	break;
+	}
+	return status;
+}
+
+void ddl_client_fatal_cb(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+
+	if (ddl->cmd_state == DDL_CMD_DECODE_FRAME)
+		ddl_vidc_decode_dynamic_property(ddl, false);
+	else if (ddl->cmd_state == DDL_CMD_ENCODE_FRAME)
+		ddl_vidc_encode_dynamic_property(ddl, false);
+	ddl->cmd_state = DDL_CMD_INVALID;
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_FAVIDC_ERROR",
+		ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_FAVIDC_ERROR;
+	ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL,
+		VCD_ERR_CLIENT_FATAL, NULL, 0, (u32 *)ddl,
+		ddl->client_data);
+	ddl_release_command_channel(ddl_context, ddl->command_channel);
+}
+
+static u32 ddl_handle_client_fatal_errors(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 status = false;
+
+	switch (ddl_context->cmd_err_status) {
+	case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE:
+	case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED:
+	case VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_INVALID_QP_VALUE:
+	case VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT:
+	case VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL:
+	case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT:
+	case VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE:
+	case VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER:
+	case VIDC_1080P_ERROR_NULL_DPB_POINTER:
+	case VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR:
+	case VIDC_1080P_ERROR_NULL_MV_POINTER:
+		status = true;
+		DDL_MSG_ERROR("VIDC_CLIENT_FATAL!!");
+	break;
+	default:
+	break;
+	}
+	if (!status)
+		DDL_MSG_ERROR("VIDC_UNKNOWN_OP_FAILED %d",
+				ddl_context->cmd_err_status);
+	ddl_client_fatal_cb(ddl);
+	return true;
+}
+
+static void ddl_input_failed_cb(struct ddl_client_context *ddl,
+	u32 vcd_event, u32 vcd_status)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 payload_size = sizeof(struct ddl_frame_data_tag);
+
+	ddl->cmd_state = DDL_CMD_INVALID;
+	if (ddl->decoding)
+		ddl_vidc_decode_dynamic_property(ddl, false);
+	else
+		ddl_vidc_encode_dynamic_property(ddl, false);
+	if (ddl->client_state == DDL_CLIENT_WAIT_FOR_INITCODECDONE) {
+		payload_size = 0;
+		DDL_MSG_LOW("ddl_state_transition: %s ~~> "
+			"DDL_CLIENT_WAIT_FOR_INITCODEC",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODEC;
+	} else {
+		DDL_MSG_LOW("ddl_state_transition: %s ~~> "
+			"DDL_CLIENT_WAIT_FOR_FRAME",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+	}
+	if (vcd_status == VCD_ERR_IFRAME_EXPECTED)
+		vcd_status = VCD_S_SUCCESS;
+	ddl_context->ddl_callback(vcd_event, vcd_status, &ddl->input_frame,
+		payload_size, (u32 *)ddl, ddl->client_data);
+}
+
+static u32 ddl_handle_core_recoverable_errors(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 vcd_status = VCD_S_SUCCESS;
+	u32 vcd_event = VCD_EVT_RESP_INPUT_DONE;
+	u32 eos = false, status = false;
+
+	if (ddl->decoding) {
+		if (ddl_handle_dec_seq_hdr_fail_error(ddl))
+			return true;
+	}
+
+	if ((ddl->cmd_state != DDL_CMD_DECODE_FRAME) &&
+		(ddl->cmd_state != DDL_CMD_ENCODE_FRAME))
+		return false;
+
+	if (ddl->decoding &&
+		(ddl->codec_data.decoder.field_needed_for_prev_ip == 1)) {
+		ddl->codec_data.decoder.field_needed_for_prev_ip = 0;
+		ddl_release_prev_field(ddl);
+		if (ddl_context->cmd_err_status ==
+		 VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED) {
+			ddl_vidc_decode_frame_run(ddl);
+			return true;
+		}
+	}
+
+	switch (ddl_context->cmd_err_status) {
+	case VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED:
+		vcd_status = VCD_ERR_IFRAME_EXPECTED;
+		break;
+	case VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST:
+		{
+			u32 pending_display = 0, release_mask;
+
+			release_mask =
+				ddl->codec_data.decoder.\
+				dpb_mask.hw_mask;
+			while (release_mask > 0) {
+				if (release_mask & 0x1)
+					pending_display++;
+				release_mask >>= 1;
+			}
+			if (pending_display >= ddl->codec_data.\
+				decoder.min_dpb_num) {
+				DDL_MSG_ERROR("VIDC_FW_ISSUE_REQ_BUF");
+				ddl_client_fatal_cb(ddl);
+				status = true;
+			} else {
+				vcd_event = VCD_EVT_RESP_OUTPUT_REQ;
+				DDL_MSG_LOW("VIDC_OUTPUT_BUF_REQ!!");
+			}
+			break;
+		}
+	case VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST:
+	case VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID:
+	case VIDC_1080P_ERROR_MB_COEFF_NOT_DONE:
+	case VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE:
+	case VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT:
+	case VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR:
+	case VIDC_1080P_ERROR_RESOLUTION_MISMATCH:
+	case VIDC_1080P_ERROR_NV_QUANT_ERR:
+	case VIDC_1080P_ERROR_SYNC_MARKER_ERR:
+	case VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_MEM_CORRUPTION:
+	case VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME:
+	case VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR:
+	case VIDC_1080P_ERROR_MV_RANGE_ERR:
+	case VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR:
+	case VIDC_1080P_ERROR_SLICE_ADDR_INVALID:
+	case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED:
+	case VIDC_1080P_ERROR_INCOMPLETE_FRAME:
+	case VIDC_1080P_ERROR_NALU_HEADER_ERROR:
+	case VIDC_1080P_ERROR_SPS_PARSE_ERROR:
+	case VIDC_1080P_ERROR_PPS_PARSE_ERROR:
+	case VIDC_1080P_ERROR_HEADER_NOT_FOUND:
+	case VIDC_1080P_ERROR_SLICE_PARSE_ERROR:
+	case VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED:
+		vcd_status = VCD_ERR_BITSTREAM_ERR;
+		DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR");
+		break;
+	case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE:
+	case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED:
+		if (ddl->decoding) {
+			vcd_status = VCD_ERR_BITSTREAM_ERR;
+			DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR");
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (((vcd_status) || (vcd_event != VCD_EVT_RESP_INPUT_DONE)) &&
+		!status) {
+				ddl->input_frame.frm_trans_end = true;
+		eos = ((vcd_event == VCD_EVT_RESP_INPUT_DONE) &&
+			(ddl->input_frame.vcd_frm.flags & VCD_FRAME_FLAG_EOS));
+		if (((ddl->decoding) && (eos)) || !ddl->decoding)
+			ddl->input_frame.frm_trans_end = false;
+		ddl_input_failed_cb(ddl, vcd_event, vcd_status);
+		if (!ddl->decoding) {
+			ddl->output_frame.frm_trans_end = !eos;
+			ddl->output_frame.vcd_frm.data_len = 0;
+			ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+				VCD_ERR_FAIL, &ddl->output_frame,
+				sizeof(struct ddl_frame_data_tag), (u32 *)ddl,
+				ddl->client_data);
+			if (eos) {
+				DDL_MSG_LOW("VIDC_ENC_EOS_DONE");
+				ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+					VCD_S_SUCCESS, NULL, 0, (u32 *)ddl,
+					ddl->client_data);
+			}
+		}
+		if ((ddl->decoding) && (eos))
+			ddl_vidc_decode_eos_run(ddl);
+		else
+			ddl_release_command_channel(ddl_context,
+				ddl->command_channel);
+			status = true;
+	}
+	return status;
+}
+
+static u32 ddl_handle_core_warnings(u32 err_status)
+{
+	u32 status = false;
+
+	switch (err_status) {
+	case VIDC_1080P_WARN_COMMAND_FLUSHED:
+	case VIDC_1080P_WARN_FRAME_RATE_UNKNOWN:
+	case VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN:
+	case VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN:
+	case VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN:
+	case VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN:
+	case VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR:
+	case VIDC_1080P_WARN_BROKEN_LINK:
+	case VIDC_1080P_WARN_FRAME_CONCEALED:
+	case VIDC_1080P_WARN_PROFILE_UNKNOWN:
+	case VIDC_1080P_WARN_LEVEL_UNKNOWN:
+	case VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED:
+	case VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED:
+	case VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER:
+	case VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER:
+	case VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_QP:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_SEI:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_VUI:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO:
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE:
+	case VIDC_1080P_WARN_RESOLUTION_WARNING:
+		status = true;
+		DDL_MSG_ERROR("VIDC_WARNING_IGNORED");
+	break;
+	default:
+	break;
+	}
+	return status;
+}
+
+u32 ddl_handle_core_errors(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id, status = false;
+	u32 disp_status;
+
+	if (!ddl_context->cmd_err_status &&
+		!ddl_context->disp_pic_err_status) {
+		DDL_MSG_ERROR("VIDC_NO_ERROR");
+		return false;
+	}
+		vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+		vidc_1080p_clear_returned_channel_inst_id();
+		ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	if (!ddl) {
+		DDL_MSG_ERROR("VIDC_SPURIOUS_INTERRUPT_ERROR");
+		return true;
+	}
+	if (ddl_context->cmd_err_status) {
+		print_core_errors(ddl_context->cmd_err_status);
+		print_core_recoverable_errors(ddl_context->cmd_err_status);
+	}
+	if (ddl_context->disp_pic_err_status)
+		print_core_errors(ddl_context->disp_pic_err_status);
+	status = ddl_handle_core_warnings(ddl_context->cmd_err_status);
+	disp_status = ddl_handle_core_warnings(
+		ddl_context->disp_pic_err_status);
+	if (!status && !disp_status) {
+		DDL_MSG_ERROR("ddl_warning:Unknown");
+		status = ddl_handle_hw_fatal_errors(ddl);
+		if (!status)
+			status = ddl_handle_core_recoverable_errors(ddl);
+		if (!status)
+			status = ddl_handle_client_fatal_errors(ddl);
+	}
+	return status;
+}
+
+static void ddl_release_prev_field(struct ddl_client_context *ddl)
+{
+	ddl->output_frame.vcd_frm.ip_frm_tag =
+		ddl->codec_data.decoder.prev_ip_frm_tag;
+		ddl->output_frame.vcd_frm.physical = NULL;
+		ddl->output_frame.vcd_frm.virtual = NULL;
+		ddl->output_frame.frm_trans_end = false;
+		ddl->ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+			VCD_ERR_INTRLCD_FIELD_DROP, &(ddl->output_frame),
+			sizeof(struct ddl_frame_data_tag),
+			(u32 *) ddl, ddl->client_data);
+}
+
+static u32 ddl_handle_dec_seq_hdr_fail_error(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	u32 status = false;
+
+	if ((ddl->cmd_state != DDL_CMD_HEADER_PARSE) ||
+		(ddl->client_state != DDL_CLIENT_WAIT_FOR_INITCODECDONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-HDDONE");
+		return false;
+	}
+
+	switch (ddl_context->cmd_err_status) {
+	case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE:
+	case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_HEADER_NOT_FOUND:
+	case VIDC_1080P_ERROR_SPS_PARSE_ERROR:
+	case VIDC_1080P_ERROR_PPS_PARSE_ERROR:
+	{
+		struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+		if (ddl_context->cmd_err_status ==
+			VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE
+			&& decoder->codec.codec == VCD_CODEC_H264) {
+			DDL_MSG_ERROR("Unsupported Feature for H264");
+			ddl_client_fatal_cb(ddl);
+			return true;
+		}
+		if ((ddl_context->cmd_err_status ==
+			VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED)
+			&& (decoder->codec.codec == VCD_CODEC_H263
+			|| decoder->codec.codec == VCD_CODEC_H264
+			|| decoder->codec.codec == VCD_CODEC_MPEG4
+			|| decoder->codec.codec == VCD_CODEC_VC1
+			|| decoder->codec.codec == VCD_CODEC_VC1_RCV)) {
+			DDL_MSG_ERROR("Unsupported resolution");
+			ddl_client_fatal_cb(ddl);
+			return true;
+		}
+
+		DDL_MSG_ERROR("SEQHDR-FAILED");
+		if (decoder->header_in_start) {
+			decoder->header_in_start = false;
+			ddl_context->ddl_callback(VCD_EVT_RESP_START,
+				VCD_ERR_SEQHDR_PARSE_FAIL, NULL, 0,
+				(u32 *) ddl, ddl->client_data);
+		} else {
+			ddl->input_frame.frm_trans_end = true;
+			if ((ddl->input_frame.vcd_frm.flags &
+				VCD_FRAME_FLAG_EOS)) {
+				ddl->input_frame.frm_trans_end = false;
+			}
+			ddl_vidc_decode_dynamic_property(ddl, false);
+			ddl_context->ddl_callback(
+				VCD_EVT_RESP_INPUT_DONE,
+				VCD_ERR_SEQHDR_PARSE_FAIL, &ddl->input_frame,
+				sizeof(struct ddl_frame_data_tag), (u32 *)ddl,
+				ddl->client_data);
+			if ((ddl->input_frame.vcd_frm.flags &
+				VCD_FRAME_FLAG_EOS)) {
+				DDL_MSG_HIGH("EOS_DONE-fromDDL");
+				ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+				VCD_S_SUCCESS, NULL, 0, (u32 *) ddl,
+				ddl->client_data);
+			}
+		}
+		DDL_MSG_LOW("ddl_state_transition: %s ~~> "
+			"DDL_CLIENT_WAIT_FOR_INITCODEC",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODEC;
+		ddl_release_command_channel(ddl_context, ddl->command_channel);
+		status = true;
+		break;
+	}
+	default:
+		break;
+	}
+	return status;
+}
+
+void print_core_errors(u32 error_code)
+{
+	s8 *string = NULL;
+
+	switch (error_code) {
+	case VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER:
+		string = "VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER";
+	break;
+	case VIDC_1080P_ERROR_INVALID_COMMAND_ID:
+		string = "VIDC_1080P_ERROR_INVALID_COMMAND_ID";
+	break;
+	case VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE:
+		string = "VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE";
+	break;
+	case VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE:
+		string =
+		"VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE";
+	break;
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START:
+		string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START";
+	break;
+	case VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED:
+		string = "VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED";
+	break;
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS:
+		string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS";
+	break;
+	case VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS:
+		string = "VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS";
+	break;
+	case VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED:
+		string = "VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED";
+	break;
+	case VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START:
+		string = "VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START";
+	break;
+	case VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START:
+		string = "VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START";
+	break;
+	case VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START:
+		string = "VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START";
+	break;
+	case VIDC_1080P_ERROR_RESOLUTION_CHANGED:
+		string = "VIDC_1080P_ERROR_RESOLUTION_CHANGED";
+	break;
+	case VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME:
+		string = "VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME";
+	break;
+	case VIDC_1080P_ERROR_INVALID_COMMAND:
+		string = "VIDC_1080P_ERROR_INVALID_COMMAND";
+	break;
+	case VIDC_1080P_ERROR_INVALID_CODEC_TYPE:
+		string = "VIDC_1080P_ERROR_INVALID_CODEC_TYPE";
+	break;
+	case VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED:
+		string = "VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED";
+	break;
+	case VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE:
+		string = "VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE";
+	break;
+	case VIDC_1080P_ERROR_DIVIDE_BY_ZERO:
+		string = "VIDC_1080P_ERROR_DIVIDE_BY_ZERO";
+	break;
+	case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY:
+		string = "VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY";
+	break;
+	case VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE:
+		string = "VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE";
+	break;
+	case VIDC_1080P_ERROR_VSP_NOT_READY:
+		string = "VIDC_1080P_ERROR_VSP_NOT_READY";
+	break;
+	case VIDC_1080P_ERROR_BUFFER_FULL_STATE:
+		string = "VIDC_1080P_ERROR_BUFFER_FULL_STATE";
+	break;
+	case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE:
+		string = "VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE";
+	break;
+	case VIDC_1080P_ERROR_HEADER_NOT_FOUND:
+		string = "VIDC_1080P_ERROR_HEADER_NOT_FOUND";
+	break;
+	case VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED:
+		string = "VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED";
+	break;
+	case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED:
+		string = "VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED:
+		string = "VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_ERROR_INVALID_QP_VALUE:
+		string = "VIDC_1080P_ERROR_INVALID_QP_VALUE";
+	break;
+	case VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT:
+		string = "VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT";
+	break;
+	case VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL:
+		string = "VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL";
+	break;
+	case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED:
+		string = "VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT:
+		string = "VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT";
+	break;
+	case VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE:
+		string = "VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE";
+	break;
+	case VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER:
+		string = "VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER";
+	break;
+	case VIDC_1080P_ERROR_NULL_DPB_POINTER:
+		string = "VIDC_1080P_ERROR_NULL_DPB_POINTER";
+	break;
+	case VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR:
+		string = "VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR";
+	break;
+	case VIDC_1080P_ERROR_NULL_MV_POINTER:
+		string = "VIDC_1080P_ERROR_NULL_MV_POINTER";
+	break;
+	case VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED:
+		string = "VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_WARN_COMMAND_FLUSHED:
+		string = "VIDC_1080P_WARN_COMMAND_FLUSHED";
+	break;
+	case VIDC_1080P_WARN_FRAME_RATE_UNKNOWN:
+		string = "VIDC_1080P_WARN_FRAME_RATE_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN:
+		string = "VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN:
+		string = "VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN:
+		string = "VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN:
+		string = "VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR:
+		string = "VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR";
+	break;
+	case VIDC_1080P_WARN_BROKEN_LINK:
+		string = "VIDC_1080P_WARN_BROKEN_LINK";
+	break;
+	case VIDC_1080P_WARN_FRAME_CONCEALED:
+		string = "VIDC_1080P_WARN_FRAME_CONCEALED";
+	break;
+	case VIDC_1080P_WARN_PROFILE_UNKNOWN:
+		string = "VIDC_1080P_WARN_PROFILE_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_LEVEL_UNKNOWN:
+		string = "VIDC_1080P_WARN_LEVEL_UNKNOWN";
+	break;
+	case VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED:
+		string = "VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED:
+		string = "VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER:
+		string = "VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER";
+	break;
+	case VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER:
+		string = "VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER";
+	break;
+	case VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT:
+		string =
+		"VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_QP:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_QP";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_SEI:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_SEI";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_VUI:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_VUI";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO";
+	break;
+	case VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE:
+		string = "VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE";
+	break;
+	case VIDC_1080P_WARN_RESOLUTION_WARNING:
+		string = "VIDC_1080P_WARN_RESOLUTION_WARNING";
+	break;
+	}
+	if (string)
+		DDL_MSG_ERROR("Error code = 0x%x : %s", error_code, string);
+}
+
+void print_core_recoverable_errors(u32 error_code)
+{
+	s8 *string = NULL;
+
+	switch (error_code) {
+	case VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED:
+		string = "VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED";
+	break;
+	case VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST:
+		string = "VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST";
+	break;
+	case VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST:
+		string = "VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST";
+	break;
+	case VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID:
+		string = "VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID";
+	break;
+	case VIDC_1080P_ERROR_MB_COEFF_NOT_DONE:
+		string = "VIDC_1080P_ERROR_MB_COEFF_NOT_DONE";
+	break;
+	case VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE:
+		string = "VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE";
+	break;
+	case VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT:
+		string = "VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT";
+	break;
+	case VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR:
+		string = "VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR";
+	break;
+	case VIDC_1080P_ERROR_RESOLUTION_MISMATCH:
+		string = "VIDC_1080P_ERROR_RESOLUTION_MISMATCH";
+	break;
+	case VIDC_1080P_ERROR_NV_QUANT_ERR:
+		string = "VIDC_1080P_ERROR_NV_QUANT_ERR";
+	break;
+	case VIDC_1080P_ERROR_SYNC_MARKER_ERR:
+		string = "VIDC_1080P_ERROR_SYNC_MARKER_ERR";
+	break;
+	case VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED:
+		string = "VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED";
+	break;
+	case VIDC_1080P_ERROR_MEM_CORRUPTION:
+		string = "VIDC_1080P_ERROR_MEM_CORRUPTION";
+	break;
+	case VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME:
+		string = "VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME";
+	break;
+	case VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR:
+		string = "VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR";
+	break;
+	case VIDC_1080P_ERROR_MV_RANGE_ERR:
+		string = "VIDC_1080P_ERROR_MV_RANGE_ERR";
+	break;
+	case VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR:
+		string = "VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR";
+	break;
+	case VIDC_1080P_ERROR_SLICE_ADDR_INVALID:
+		string = "VIDC_1080P_ERROR_SLICE_ADDR_INVALID";
+	break;
+	case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED:
+		string = "VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED";
+	break;
+	case VIDC_1080P_ERROR_INCOMPLETE_FRAME:
+		string = "VIDC_1080P_ERROR_INCOMPLETE_FRAME";
+	break;
+	case VIDC_1080P_ERROR_NALU_HEADER_ERROR:
+		string = "VIDC_1080P_ERROR_NALU_HEADER_ERROR";
+	break;
+	case VIDC_1080P_ERROR_SPS_PARSE_ERROR:
+		string = "VIDC_1080P_ERROR_SPS_PARSE_ERROR";
+	break;
+	case VIDC_1080P_ERROR_PPS_PARSE_ERROR:
+		string = "VIDC_1080P_ERROR_PPS_PARSE_ERROR";
+	break;
+	case VIDC_1080P_ERROR_SLICE_PARSE_ERROR:
+		string = "VIDC_1080P_ERROR_SLICE_PARSE_ERROR";
+	break;
+	}
+	if (string)
+		DDL_MSG_ERROR("Recoverable Error code = 0x%x : %s",
+					  error_code, string);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
new file mode 100644
index 0000000..1b700bd
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -0,0 +1,959 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <mach/msm_memtypes.h>
+#include "vcd_ddl.h"
+#include "vcd_ddl_shared_mem.h"
+
+struct ddl_context *ddl_get_context(void)
+{
+	static struct ddl_context ddl_context;
+	return &ddl_context;
+}
+
+#ifdef DDL_MSG_LOG
+s8 *ddl_get_state_string(enum ddl_client_state client_state)
+{
+	s8 *ptr;
+
+	switch (client_state) {
+	case DDL_CLIENT_INVALID:
+		ptr = "INVALID        ";
+	break;
+	case DDL_CLIENT_OPEN:
+		ptr = "OPEN   ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_CHDONE:
+		ptr = "WAIT_FOR_CHDONE       ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_INITCODEC:
+		ptr = "WAIT_FOR_INITCODEC    ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_INITCODECDONE:
+		ptr = "WAIT_FOR_INITCODECDONE";
+	break;
+	case DDL_CLIENT_WAIT_FOR_DPB:
+		ptr = "WAIT_FOR_DPB   ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_DPBDONE:
+		ptr = "WAIT_FOR_DPBDONE";
+	break;
+	case DDL_CLIENT_WAIT_FOR_FRAME:
+		ptr = "WAIT_FOR_FRAME ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_FRAME_DONE:
+		ptr = "WAIT_FOR_FRAME_DONE   ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_EOS_DONE:
+		ptr = "WAIT_FOR_EOS_DONE     ";
+	break;
+	case DDL_CLIENT_WAIT_FOR_CHEND:
+		ptr = "WAIT_FOR_CHEND ";
+	break;
+	case DDL_CLIENT_FATAL_ERROR:
+		ptr = "FATAL_ERROR";
+	break;
+	default:
+		ptr = "UNKNOWN        ";
+	break;
+	}
+	return ptr;
+}
+#endif
+
+u32 ddl_client_transact(u32 operation,
+	struct ddl_client_context **pddl_client)
+{
+	struct ddl_context *ddl_context;
+	u32 ret_status = VCD_ERR_FAIL;
+	s32 counter;
+
+	ddl_context = ddl_get_context();
+	switch (operation) {
+	case DDL_FREE_CLIENT:
+		ret_status = VCD_ERR_MAX_CLIENT;
+		for (counter = 0; (counter < VCD_MAX_NO_CLIENT) &&
+			(ret_status == VCD_ERR_MAX_CLIENT); ++counter) {
+			if (*pddl_client == ddl_context->ddl_clients
+				[counter]) {
+					kfree(*pddl_client);
+					*pddl_client = NULL;
+					ddl_context->ddl_clients[counter]
+						= NULL;
+				ret_status = VCD_S_SUCCESS;
+			}
+		}
+	break;
+	case DDL_GET_CLIENT:
+		ret_status = VCD_ERR_MAX_CLIENT;
+		for (counter = (VCD_MAX_NO_CLIENT - 1); (counter >= 0) &&
+			(ret_status == VCD_ERR_MAX_CLIENT); --counter) {
+			if (!ddl_context->ddl_clients[counter]) {
+				*pddl_client =
+					(struct ddl_client_context *)
+					kmalloc(sizeof(struct
+					ddl_client_context), GFP_KERNEL);
+				if (!*pddl_client)
+					ret_status = VCD_ERR_ALLOC_FAIL;
+				else {
+					memset(*pddl_client, 0,
+						sizeof(struct
+						ddl_client_context));
+					ddl_context->ddl_clients
+						[counter] = *pddl_client;
+					(*pddl_client)->ddl_context =
+						ddl_context;
+					ret_status = VCD_S_SUCCESS;
+				}
+			}
+		}
+	break;
+	case DDL_INIT_CLIENTS:
+		for (counter = 0; counter < VCD_MAX_NO_CLIENT; ++counter)
+			ddl_context->ddl_clients[counter] = NULL;
+		ret_status = VCD_S_SUCCESS;
+	break;
+	case DDL_ACTIVE_CLIENT:
+		for (counter = 0; counter < VCD_MAX_NO_CLIENT;
+			++counter) {
+			if (ddl_context->ddl_clients[counter]) {
+				ret_status = VCD_S_SUCCESS;
+				break;
+			}
+		}
+	break;
+	default:
+		ret_status = VCD_ERR_ILLEGAL_PARM;
+	break;
+	}
+	return ret_status;
+}
+
+u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *decoder,
+	struct ddl_frame_data_tag  *in_out_frame, u32 operation)
+{
+	struct ddl_frame_data_tag *found_frame = NULL;
+	struct ddl_mask *dpb_mask = &decoder->dpb_mask;
+	u32 vcd_status = VCD_S_SUCCESS, loopc;
+
+	switch (operation) {
+	case DDL_DPB_OP_MARK_BUSY:
+	case DDL_DPB_OP_MARK_FREE:
+		for (loopc = 0; !found_frame && loopc <
+			decoder->dp_buf.no_of_dec_pic_buf; ++loopc) {
+			if (in_out_frame->vcd_frm.physical ==
+				decoder->dp_buf.dec_pic_buffers[loopc].
+				vcd_frm.physical) {
+				found_frame = &(decoder->dp_buf.
+					dec_pic_buffers[loopc]);
+			break;
+			}
+		}
+		if (found_frame) {
+			if (operation == DDL_DPB_OP_MARK_BUSY) {
+				dpb_mask->hw_mask &=
+					(~(u32)(0x1 << loopc));
+				*in_out_frame = *found_frame;
+			} else if (operation == DDL_DPB_OP_MARK_FREE) {
+				dpb_mask->client_mask |= (0x1 << loopc);
+				*found_frame = *in_out_frame;
+			}
+		} else {
+			in_out_frame->vcd_frm.physical = NULL;
+			in_out_frame->vcd_frm.virtual = NULL;
+			vcd_status = VCD_ERR_BAD_POINTER;
+			DDL_MSG_ERROR("BUF_NOT_FOUND");
+		}
+	break;
+	case DDL_DPB_OP_SET_MASK:
+		dpb_mask->hw_mask |= dpb_mask->client_mask;
+		dpb_mask->client_mask = 0;
+	break;
+	case DDL_DPB_OP_INIT:
+	{
+		u32 dpb_size;
+		dpb_size = (!decoder->meta_data_offset) ?
+		decoder->dp_buf.dec_pic_buffers[0].vcd_frm.alloc_len :
+			decoder->meta_data_offset;
+	}
+	break;
+	case DDL_DPB_OP_RETRIEVE:
+	{
+		u32 position;
+		if (dpb_mask->client_mask) {
+			position = 0x1;
+			for (loopc = 0; loopc <
+				decoder->dp_buf.no_of_dec_pic_buf &&
+				!found_frame; ++loopc) {
+				if (dpb_mask->client_mask & position) {
+					found_frame = &decoder->dp_buf.
+						dec_pic_buffers[loopc];
+					dpb_mask->client_mask &=
+						~(position);
+				}
+				position <<= 1;
+			}
+		} else if (dpb_mask->hw_mask) {
+			position = 0x1;
+			for (loopc = 0; loopc <
+				decoder->dp_buf.no_of_dec_pic_buf &&
+				!found_frame; ++loopc) {
+				if (dpb_mask->hw_mask & position) {
+					found_frame = &decoder->dp_buf.
+						dec_pic_buffers[loopc];
+					dpb_mask->hw_mask &= ~(position);
+				}
+				position <<= 1;
+			}
+		}
+		if (found_frame)
+			*in_out_frame = *found_frame;
+		else {
+			in_out_frame->vcd_frm.physical = NULL;
+			in_out_frame->vcd_frm.virtual = NULL;
+		}
+	}
+	break;
+	default:
+	break;
+	}
+	return vcd_status;
+}
+
+u32 ddl_decoder_dpb_init(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct ddl_dec_buffers *dec_buffers = &decoder->hw_bufs;
+	struct ddl_frame_data_tag *frame;
+	u32 luma[DDL_MAX_BUFFER_COUNT], chroma[DDL_MAX_BUFFER_COUNT];
+	u32 mv[DDL_MAX_BUFFER_COUNT], luma_size, i, dpb;
+
+	frame = &decoder->dp_buf.dec_pic_buffers[0];
+	luma_size = ddl_get_yuv_buf_size(decoder->frame_size.width,
+			decoder->frame_size.height, DDL_YUV_BUF_TYPE_TILE);
+	dpb = decoder->dp_buf.no_of_dec_pic_buf;
+	DDL_MSG_LOW("%s Decoder num DPB buffers = %u Luma Size = %u"
+				 __func__, dpb, luma_size);
+	if (dpb > DDL_MAX_BUFFER_COUNT)
+		dpb = DDL_MAX_BUFFER_COUNT;
+	for (i = 0; i < dpb; i++) {
+		if (frame[i].vcd_frm.virtual) {
+			memset(frame[i].vcd_frm.virtual, 0x10101010, luma_size);
+			memset(frame[i].vcd_frm.virtual + luma_size, 0x80808080,
+					frame[i].vcd_frm.alloc_len - luma_size);
+		}
+
+		luma[i] = DDL_OFFSET(ddl_context->dram_base_a.
+			align_physical_addr, frame[i].vcd_frm.physical);
+		chroma[i] = luma[i] + luma_size;
+		DDL_MSG_LOW("%s Decoder Luma address = %x Chroma address = %x"
+					__func__, luma[i], chroma[i]);
+	}
+	switch (decoder->codec.codec) {
+	case VCD_CODEC_MPEG1:
+	case VCD_CODEC_MPEG2:
+		vidc_1080p_set_decode_recon_buffers(dpb, luma, chroma);
+	break;
+	case VCD_CODEC_DIVX_3:
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+	case VCD_CODEC_XVID:
+	case VCD_CODEC_MPEG4:
+		vidc_1080p_set_decode_recon_buffers(dpb, luma, chroma);
+		vidc_1080p_set_mpeg4_divx_decode_work_buffers(
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			dec_buffers->nb_dcac),
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			dec_buffers->upnb_mv),
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			dec_buffers->sub_anchor_mv),
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			dec_buffers->overlay_xform),
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			dec_buffers->stx_parser));
+	break;
+	case VCD_CODEC_H263:
+		vidc_1080p_set_decode_recon_buffers(dpb, luma, chroma);
+		vidc_1080p_set_h263_decode_work_buffers(
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->nb_dcac),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->upnb_mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->sub_anchor_mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->overlay_xform));
+	break;
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		vidc_1080p_set_decode_recon_buffers(dpb, luma, chroma);
+		vidc_1080p_set_vc1_decode_work_buffers(
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->nb_dcac),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->upnb_mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->sub_anchor_mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->overlay_xform),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->bit_plane1),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->bit_plane2),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->bit_plane3));
+	break;
+	case VCD_CODEC_H264:
+		for (i = 0; i < dpb; i++)
+			mv[i] = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+					dec_buffers->h264_mv[i]);
+		vidc_1080p_set_h264_decode_buffers(dpb,
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->h264_vert_nb_mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				dec_buffers->h264_nb_ip),
+			luma, chroma, mv);
+	break;
+	default:
+	break;
+	}
+	return VCD_S_SUCCESS;
+}
+
+void ddl_release_context_buffers(struct ddl_context *ddl_context)
+{
+	if (ddl_context->memtype == MEMTYPE_SMI_KERNEL) {
+		ddl_pmem_free(&ddl_context->dram_base_a);
+		ddl_pmem_free(&ddl_context->dram_base_b);
+	}
+	ddl_pmem_free(&ddl_context->metadata_shared_input);
+	ddl_fw_release();
+}
+
+void ddl_release_client_internal_buffers(struct ddl_client_context *ddl)
+{
+	if (ddl->decoding) {
+		struct ddl_decoder_data *decoder =
+			&(ddl->codec_data.decoder);
+		kfree(decoder->dp_buf.dec_pic_buffers);
+		decoder->dp_buf.dec_pic_buffers = NULL;
+		ddl_vidc_decode_dynamic_property(ddl, false);
+		decoder->decode_config.sequence_header_len = 0;
+		decoder->decode_config.sequence_header = NULL;
+		decoder->dpb_mask.client_mask = 0;
+		decoder->dpb_mask.hw_mask = 0;
+		decoder->dp_buf.no_of_dec_pic_buf = 0;
+		decoder->dynamic_prop_change = 0;
+		ddl_free_dec_hw_buffers(ddl);
+	} else {
+		struct ddl_encoder_data *encoder =
+			&(ddl->codec_data.encoder);
+		ddl_pmem_free(&encoder->seq_header);
+		ddl_vidc_encode_dynamic_property(ddl, false);
+		encoder->dynamic_prop_change = 0;
+		ddl_free_enc_hw_buffers(ddl);
+	}
+}
+
+u32 ddl_codec_type_transact(struct ddl_client_context *ddl,
+	u32 remove, enum vcd_codec requested_codec)
+{
+	if (requested_codec > VCD_CODEC_VC1_RCV ||
+		requested_codec < VCD_CODEC_H264)
+		return false;
+	if (!ddl->decoding && requested_codec != VCD_CODEC_MPEG4 &&
+		requested_codec != VCD_CODEC_H264 &&
+		requested_codec != VCD_CODEC_H263)
+		return false;
+
+	return true;
+}
+
+u32 ddl_take_command_channel(struct ddl_context *ddl_context,
+	struct ddl_client_context *ddl, void *client_data)
+{
+	u32  status = true;
+
+	if (!ddl_context->current_ddl[0]) {
+		ddl_context->current_ddl[0] = ddl;
+		ddl->client_data = client_data;
+		ddl->command_channel = 0;
+	} else if (!ddl_context->current_ddl[1]) {
+		ddl_context->current_ddl[1] = ddl;
+		ddl->client_data = client_data;
+		ddl->command_channel = 1;
+	} else
+		status = false;
+	if (status) {
+		if (ddl_context->current_ddl[0] &&
+			ddl_context->current_ddl[1])
+			DDL_BUSY(ddl_context);
+		else
+			DDL_RUN(ddl_context);
+	}
+	return status;
+}
+
+void ddl_release_command_channel(struct ddl_context *ddl_context,
+	u32 command_channel)
+{
+	ddl_context->current_ddl[command_channel]->client_data = NULL;
+	ddl_context->current_ddl[command_channel] = NULL;
+	if (!ddl_context->current_ddl[0] &&
+		!ddl_context->current_ddl[1])
+		DDL_IDLE(ddl_context);
+	else
+		DDL_RUN(ddl_context);
+}
+
+struct ddl_client_context *ddl_get_current_ddl_client_for_channel_id(
+	struct ddl_context *ddl_context, u32 channel_id)
+{
+	struct ddl_client_context *ddl;
+
+	if (ddl_context->current_ddl[0] && channel_id ==
+		ddl_context->current_ddl[0]->command_channel)
+		ddl = ddl_context->current_ddl[0];
+	else if (ddl_context->current_ddl[1] && channel_id ==
+		ddl_context->current_ddl[1]->command_channel)
+		ddl = ddl_context->current_ddl[1];
+	else {
+		DDL_MSG_LOW("STATE-CRITICAL-FRMRUN");
+		DDL_MSG_ERROR("Unexpected channel ID = %d", channel_id);
+		ddl = NULL;
+	}
+	return ddl;
+}
+
+struct ddl_client_context *ddl_get_current_ddl_client_for_command(
+	struct ddl_context *ddl_context,
+	enum ddl_cmd_state cmd_state)
+{
+	struct ddl_client_context *ddl;
+
+	if (ddl_context->current_ddl[0] &&
+		cmd_state == ddl_context->current_ddl[0]->cmd_state)
+		ddl = ddl_context->current_ddl[0];
+	else if (ddl_context->current_ddl[1] &&
+		cmd_state == ddl_context->current_ddl[1]->cmd_state)
+		ddl = ddl_context->current_ddl[1];
+	else {
+		DDL_MSG_LOW("STATE-CRITICAL-FRMRUN");
+		DDL_MSG_ERROR("Error: Unexpected cmd_state = %d",
+			cmd_state);
+		ddl = NULL;
+	}
+	return ddl;
+}
+
+u32 ddl_get_yuv_buf_size(u32 width, u32 height, u32 format)
+{
+	u32 mem_size, width_round_up, height_round_up, align;
+
+	width_round_up  = width;
+	height_round_up = height;
+	if (format == DDL_YUV_BUF_TYPE_TILE) {
+		width_round_up  = DDL_ALIGN(width, DDL_TILE_ALIGN_WIDTH);
+		height_round_up = DDL_ALIGN(height, DDL_TILE_ALIGN_HEIGHT);
+		align = DDL_TILE_MULTIPLY_FACTOR;
+	}
+	if (format == DDL_YUV_BUF_TYPE_LINEAR) {
+		width_round_up = DDL_ALIGN(width, DDL_LINEAR_ALIGN_WIDTH);
+		align = DDL_LINEAR_MULTIPLY_FACTOR;
+	}
+	mem_size = (width_round_up * height_round_up);
+	mem_size = DDL_ALIGN(mem_size, align);
+	return mem_size;
+}
+void ddl_free_dec_hw_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_dec_buffers *dec_bufs =
+		&ddl->codec_data.decoder.hw_bufs;
+	ddl_pmem_free(&dec_bufs->h264_nb_ip);
+	ddl_pmem_free(&dec_bufs->h264_vert_nb_mv);
+	ddl_pmem_free(&dec_bufs->nb_dcac);
+	ddl_pmem_free(&dec_bufs->upnb_mv);
+	ddl_pmem_free(&dec_bufs->sub_anchor_mv);
+	ddl_pmem_free(&dec_bufs->overlay_xform);
+	ddl_pmem_free(&dec_bufs->bit_plane3);
+	ddl_pmem_free(&dec_bufs->bit_plane2);
+	ddl_pmem_free(&dec_bufs->bit_plane1);
+	ddl_pmem_free(&dec_bufs->stx_parser);
+	ddl_pmem_free(&dec_bufs->desc);
+	ddl_pmem_free(&dec_bufs->context);
+	memset(dec_bufs, 0, sizeof(struct ddl_dec_buffers));
+}
+
+void ddl_free_enc_hw_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_enc_buffers *enc_bufs =
+		&ddl->codec_data.encoder.hw_bufs;
+	u32 i;
+
+	for (i = 0; i < enc_bufs->dpb_count; i++) {
+		ddl_pmem_free(&enc_bufs->dpb_y[i]);
+		ddl_pmem_free(&enc_bufs->dpb_c[i]);
+	}
+	ddl_pmem_free(&enc_bufs->mv);
+	ddl_pmem_free(&enc_bufs->col_zero);
+	ddl_pmem_free(&enc_bufs->md);
+	ddl_pmem_free(&enc_bufs->pred);
+	ddl_pmem_free(&enc_bufs->nbor_info);
+	ddl_pmem_free(&enc_bufs->acdc_coef);
+	ddl_pmem_free(&enc_bufs->context);
+	memset(enc_bufs, 0, sizeof(struct ddl_enc_buffers));
+}
+
+u32 ddl_get_input_frame_from_pool(struct ddl_client_context *ddl,
+	u8 *input_buffer_address)
+{
+	u32 vcd_status = VCD_S_SUCCESS, i, found = false;
+
+	for (i = 0; i < DDL_MAX_NUM_IN_INPUTFRAME_POOL && !found; i++) {
+		if (input_buffer_address ==
+			ddl->input_frame_pool[i].vcd_frm.physical) {
+			found = true;
+			ddl->input_frame = ddl->input_frame_pool[i];
+			memset(&ddl->input_frame_pool[i], 0,
+				sizeof(struct ddl_frame_data_tag));
+		}
+	}
+	if (!found)
+		vcd_status = VCD_ERR_FAIL;
+
+	return vcd_status;
+}
+
+u32 ddl_insert_input_frame_to_pool(struct ddl_client_context *ddl,
+	struct ddl_frame_data_tag *ddl_input_frame)
+{
+	u32 vcd_status = VCD_S_SUCCESS, i, found = false;
+
+	for (i = 0; i < DDL_MAX_NUM_IN_INPUTFRAME_POOL && !found; i++) {
+		if (!ddl->input_frame_pool[i].vcd_frm.physical) {
+			found = true;
+			ddl->input_frame_pool[i] = *ddl_input_frame;
+		}
+	}
+	if (!found)
+		vcd_status = VCD_ERR_FAIL;
+
+	return vcd_status;
+}
+
+void ddl_calc_dec_hw_buffers_size(enum vcd_codec codec, u32 width,
+	u32 height, u32 dpb, struct ddl_dec_buffer_size *buf_size)
+{
+	u32 sz_dpb0 = 0, sz_dpb1 = 0, sz_mv = 0;
+	u32 sz_luma = 0, sz_chroma = 0, sz_nb_dcac = 0, sz_upnb_mv = 0;
+	u32 sz_sub_anchor_mv = 0, sz_overlap_xform = 0, sz_bit_plane3 = 0;
+	u32 sz_bit_plane2 = 0, sz_bit_plane1 = 0, sz_stx_parser = 0;
+	u32 sz_desc, sz_cpb, sz_context, sz_vert_nb_mv = 0, sz_nb_ip = 0;
+
+	if (codec == VCD_CODEC_H264) {
+		sz_mv = ddl_get_yuv_buf_size(width,
+			height>>2, DDL_YUV_BUF_TYPE_TILE);
+		sz_nb_ip = DDL_KILO_BYTE(32);
+		sz_vert_nb_mv = DDL_KILO_BYTE(16);
+	} else {
+		if ((codec == VCD_CODEC_MPEG4) ||
+			(codec == VCD_CODEC_DIVX_3) ||
+			(codec == VCD_CODEC_DIVX_4) ||
+			(codec == VCD_CODEC_DIVX_5) ||
+			(codec == VCD_CODEC_DIVX_6) ||
+			(codec == VCD_CODEC_XVID) ||
+			(codec == VCD_CODEC_H263)) {
+			sz_nb_dcac = DDL_KILO_BYTE(16);
+			sz_upnb_mv = DDL_KILO_BYTE(68);
+			sz_sub_anchor_mv = DDL_KILO_BYTE(136);
+			sz_overlap_xform = DDL_KILO_BYTE(32);
+			if (codec != VCD_CODEC_H263)
+				sz_stx_parser = DDL_KILO_BYTE(68);
+		} else if ((codec == VCD_CODEC_VC1) ||
+			(codec == VCD_CODEC_VC1_RCV)) {
+			sz_nb_dcac = DDL_KILO_BYTE(16);
+			sz_upnb_mv = DDL_KILO_BYTE(68);
+			sz_sub_anchor_mv = DDL_KILO_BYTE(136);
+			sz_overlap_xform = DDL_KILO_BYTE(32);
+			sz_bit_plane3 = DDL_KILO_BYTE(2);
+			sz_bit_plane2 = DDL_KILO_BYTE(2);
+			sz_bit_plane1 = DDL_KILO_BYTE(2);
+		}
+	}
+	sz_desc = DDL_KILO_BYTE(128);
+	sz_cpb = VCD_DEC_CPB_SIZE;
+	if (codec == VCD_CODEC_H264)
+		sz_context = DDL_FW_H264DEC_CONTEXT_SPACE_SIZE;
+	else
+		sz_context = DDL_FW_OTHER_CONTEXT_SPACE_SIZE;
+	if (buf_size) {
+		buf_size->sz_dpb0           = sz_dpb0;
+		buf_size->sz_dpb1           = sz_dpb1;
+		buf_size->sz_mv             = sz_mv;
+		buf_size->sz_vert_nb_mv     = sz_vert_nb_mv;
+		buf_size->sz_nb_ip          = sz_nb_ip;
+		buf_size->sz_luma           = sz_luma;
+		buf_size->sz_chroma         = sz_chroma;
+		buf_size->sz_nb_dcac        = sz_nb_dcac;
+		buf_size->sz_upnb_mv        = sz_upnb_mv;
+		buf_size->sz_sub_anchor_mv  = sz_sub_anchor_mv;
+		buf_size->sz_overlap_xform  = sz_overlap_xform;
+		buf_size->sz_bit_plane3     = sz_bit_plane3;
+		buf_size->sz_bit_plane2     = sz_bit_plane2;
+		buf_size->sz_bit_plane1     = sz_bit_plane1;
+		buf_size->sz_stx_parser     = sz_stx_parser;
+		buf_size->sz_desc           = sz_desc;
+		buf_size->sz_cpb            = sz_cpb;
+		buf_size->sz_context        = sz_context;
+	}
+}
+
+u32 ddl_allocate_dec_hw_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_dec_buffers *dec_bufs;
+	struct ddl_dec_buffer_size buf_size;
+	u32 status = VCD_S_SUCCESS, dpb = 0;
+	u32 width = 0, height = 0;
+	u8 *ptr;
+
+	dec_bufs = &ddl->codec_data.decoder.hw_bufs;
+	ddl_calc_dec_hw_buffers_size(ddl->codec_data.decoder.
+		codec.codec, width, height, dpb, &buf_size);
+	if (buf_size.sz_context > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->context, buf_size.sz_context,
+			DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_nb_ip > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->h264_nb_ip, buf_size.sz_nb_ip,
+			DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_vert_nb_mv > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->h264_vert_nb_mv,
+			buf_size.sz_vert_nb_mv, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_nb_dcac > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->nb_dcac, buf_size.sz_nb_dcac,
+			DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_upnb_mv > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->upnb_mv, buf_size.sz_upnb_mv,
+			DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_sub_anchor_mv > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->sub_anchor_mv,
+			buf_size.sz_sub_anchor_mv, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_overlap_xform > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->overlay_xform,
+			buf_size.sz_overlap_xform, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_bit_plane3 > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->bit_plane3,
+			buf_size.sz_bit_plane3, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_bit_plane2 > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->bit_plane2,
+			buf_size.sz_bit_plane2, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_bit_plane1 > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->bit_plane1,
+			buf_size.sz_bit_plane1, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_stx_parser > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->stx_parser,
+			buf_size.sz_stx_parser, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (buf_size.sz_desc > 0) {
+		ptr = ddl_pmem_alloc(&dec_bufs->desc, buf_size.sz_desc,
+			DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (status)
+		ddl_free_dec_hw_buffers(ddl);
+	return status;
+}
+
+u32 ddl_calc_enc_hw_buffers_size(enum vcd_codec codec, u32 width,
+	u32 height, enum vcd_yuv_buffer_format input_format,
+	struct ddl_client_context *ddl,
+	struct ddl_enc_buffer_size *buf_size)
+{
+	u32 status = VCD_S_SUCCESS, mb_x, mb_y;
+	u32 sz_cur_y, sz_cur_c, sz_dpb_y, sz_dpb_c, sz_strm = 0, sz_mv;
+	u32 sz_md = 0, sz_pred = 0, sz_nbor_info = 0 , sz_acdc_coef = 0;
+	u32 sz_mb_info = 0, sz_context, sz_col_zero = 0;
+
+	mb_x = (width + 15) / 16;
+	mb_y = (height + 15) / 16;
+	sz_dpb_y = ddl_get_yuv_buf_size(width,
+		height, DDL_YUV_BUF_TYPE_TILE);
+	sz_dpb_c = ddl_get_yuv_buf_size(width, height>>1,
+		DDL_YUV_BUF_TYPE_TILE);
+	if (input_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA) {
+		sz_cur_y = ddl_get_yuv_buf_size(width, height,
+			DDL_YUV_BUF_TYPE_LINEAR);
+		sz_cur_c = ddl_get_yuv_buf_size(width, height>>1,
+			DDL_YUV_BUF_TYPE_LINEAR);
+	} else if (VCD_BUFFER_FORMAT_TILE_4x2 == input_format) {
+		sz_cur_y = sz_dpb_y;
+		sz_cur_c = sz_dpb_c;
+	} else
+		status = VCD_ERR_NOT_SUPPORTED;
+	if (!status) {
+		sz_strm = DDL_ALIGN(ddl_get_yuv_buf_size(width, height,
+			DDL_YUV_BUF_TYPE_LINEAR) + ddl_get_yuv_buf_size(width,
+			height/2, DDL_YUV_BUF_TYPE_LINEAR), DDL_KILO_BYTE(4));
+		sz_mv = DDL_ALIGN(2 * mb_x * 8, DDL_KILO_BYTE(2));
+		if ((codec == VCD_CODEC_MPEG4) ||
+			(codec == VCD_CODEC_H264)) {
+			sz_col_zero = DDL_ALIGN(((mb_x * mb_y + 7) / 8) *
+					8, DDL_KILO_BYTE(2));
+		}
+		if ((codec == VCD_CODEC_MPEG4) ||
+			(codec == VCD_CODEC_H263)) {
+			sz_acdc_coef = DDL_ALIGN((width / 2) * 8,
+						DDL_KILO_BYTE(2));
+		} else if (codec == VCD_CODEC_H264) {
+			sz_md = DDL_ALIGN(mb_x * 48, DDL_KILO_BYTE(2));
+			sz_pred = DDL_ALIGN(2 * 8 * 1024, DDL_KILO_BYTE(2));
+			if (ddl) {
+				if (ddl->codec_data.encoder.
+					entropy_control.entropy_sel ==
+					VCD_ENTROPY_SEL_CAVLC)
+					sz_nbor_info = DDL_ALIGN(8 * 8 * mb_x,
+						DDL_KILO_BYTE(2));
+				else if (ddl->codec_data.encoder.
+					entropy_control.entropy_sel ==
+					VCD_ENTROPY_SEL_CABAC)
+					sz_nbor_info = DDL_ALIGN(8 * 24 *
+						mb_x, DDL_KILO_BYTE(2));
+				if ((ddl->codec_data.encoder.
+					mb_info_enable) &&
+					(codec == VCD_CODEC_H264)) {
+					sz_mb_info = DDL_ALIGN(mb_x * mb_y *
+						6 * 8, DDL_KILO_BYTE(2));
+				}
+			}
+		} else {
+			sz_nbor_info = DDL_ALIGN(8 * 24 * mb_x,
+						DDL_KILO_BYTE(2));
+			sz_mb_info = DDL_ALIGN(mb_x * mb_y * 6 * 8,
+					DDL_KILO_BYTE(2));
+		}
+		sz_context = DDL_FW_OTHER_CONTEXT_SPACE_SIZE;
+		if (buf_size) {
+			buf_size->sz_cur_y      = sz_cur_y;
+			buf_size->sz_cur_c      = sz_cur_c;
+			buf_size->sz_dpb_y      = sz_dpb_y;
+			buf_size->sz_dpb_c      = sz_dpb_c;
+			buf_size->sz_strm       = sz_strm;
+			buf_size->sz_mv         = sz_mv;
+			buf_size->sz_col_zero   = sz_col_zero;
+			buf_size->sz_md         = sz_md;
+			buf_size->sz_pred       = sz_pred;
+			buf_size->sz_nbor_info  = sz_nbor_info;
+			buf_size->sz_acdc_coef  = sz_acdc_coef;
+			buf_size->sz_mb_info    = sz_mb_info;
+			buf_size->sz_context    = sz_context;
+		}
+	}
+	return status;
+}
+
+u32 ddl_allocate_enc_hw_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_enc_buffers *enc_bufs;
+	struct ddl_enc_buffer_size buf_size;
+	void *ptr;
+	u32 status = VCD_S_SUCCESS;
+
+	enc_bufs = &ddl->codec_data.encoder.hw_bufs;
+	enc_bufs->dpb_count = DDL_ENC_MIN_DPB_BUFFERS;
+
+	if ((ddl->codec_data.encoder.i_period.b_frames >
+		DDL_MIN_NUM_OF_B_FRAME) ||
+		(ddl->codec_data.encoder.num_references_for_p_frame
+		> DDL_MIN_NUM_REF_FOR_P_FRAME))
+		enc_bufs->dpb_count = DDL_ENC_MAX_DPB_BUFFERS;
+		DDL_MSG_HIGH("Encoder num DPB buffers allocated = %d",
+			enc_bufs->dpb_count);
+
+	status = ddl_calc_enc_hw_buffers_size(
+		ddl->codec_data.encoder.codec.codec,
+		ddl->codec_data.encoder.frame_size.width,
+		ddl->codec_data.encoder.frame_size.height,
+		ddl->codec_data.encoder.buf_format.buffer_format,
+		ddl, &buf_size);
+	buf_size.sz_strm = ddl->codec_data.encoder.
+		client_output_buf_req.sz;
+	if (!status) {
+		enc_bufs->sz_dpb_y = buf_size.sz_dpb_y;
+		enc_bufs->sz_dpb_c = buf_size.sz_dpb_c;
+		if (buf_size.sz_mv > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->mv, buf_size.sz_mv,
+				DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_col_zero > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->col_zero,
+				buf_size.sz_col_zero, DDL_KILO_BYTE(2));
+		if (!ptr)
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_md > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->md, buf_size.sz_md,
+				DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_pred > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->pred,
+				buf_size.sz_pred, DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_nbor_info > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->nbor_info,
+				buf_size.sz_nbor_info, DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_acdc_coef > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->acdc_coef,
+				buf_size.sz_acdc_coef, DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_mb_info > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->mb_info,
+				buf_size.sz_mb_info, DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (buf_size.sz_context > 0) {
+			ptr = ddl_pmem_alloc(&enc_bufs->context,
+				buf_size.sz_context, DDL_KILO_BYTE(2));
+			if (!ptr)
+				status = VCD_ERR_ALLOC_FAIL;
+		}
+		if (status)
+			ddl_free_enc_hw_buffers(ddl);
+	}
+	return status;
+}
+
+void ddl_decoder_chroma_dpb_change(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct ddl_frame_data_tag *frame =
+			&(decoder->dp_buf.dec_pic_buffers[0]);
+	u32 luma[DDL_MAX_BUFFER_COUNT];
+	u32 chroma[DDL_MAX_BUFFER_COUNT];
+	u32 luma_size, i, dpb;
+	luma_size = decoder->dpb_buf_size.size_y;
+	dpb = decoder->dp_buf.no_of_dec_pic_buf;
+	DDL_MSG_HIGH("%s Decoder num DPB buffers = %u Luma Size = %u"
+			 __func__, dpb, luma_size);
+	if (dpb > DDL_MAX_BUFFER_COUNT)
+		dpb = DDL_MAX_BUFFER_COUNT;
+	for (i = 0; i < dpb; i++) {
+		luma[i] = DDL_OFFSET(
+			ddl_context->dram_base_a.align_physical_addr,
+			frame[i].vcd_frm.physical);
+		chroma[i] = luma[i] + luma_size;
+		DDL_MSG_LOW("%s Decoder Luma address = %x"
+			"Chroma address = %x", __func__, luma[i], chroma[i]);
+	}
+	vidc_1080p_set_decode_recon_buffers(dpb, luma, chroma);
+}
+
+u32 ddl_check_reconfig(struct ddl_client_context *ddl)
+{
+	u32 need_reconfig = true;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	if (decoder->cont_mode) {
+		if ((decoder->actual_output_buf_req.sz <=
+			 decoder->client_output_buf_req.sz) &&
+			(decoder->actual_output_buf_req.actual_count <=
+			 decoder->client_output_buf_req.actual_count)) {
+			need_reconfig = false;
+			if (decoder->min_dpb_num >
+				decoder->min_output_buf_req.min_count) {
+				decoder->min_output_buf_req =
+					decoder->actual_output_buf_req;
+			}
+			DDL_MSG_LOW("%s Decoder width = %u height = %u "
+				"Client width = %u height = %u\n",
+				__func__, decoder->frame_size.width,
+				 decoder->frame_size.height,
+				 decoder->client_frame_size.width,
+				 decoder->client_frame_size.height);
+		}
+	} else {
+		if ((decoder->frame_size.width ==
+			decoder->client_frame_size.width) &&
+			(decoder->frame_size.height ==
+			decoder->client_frame_size.height) &&
+			(decoder->actual_output_buf_req.sz <=
+			decoder->client_output_buf_req.sz) &&
+			(decoder->actual_output_buf_req.min_count ==
+			decoder->client_output_buf_req.min_count) &&
+			(decoder->actual_output_buf_req.actual_count ==
+			decoder->client_output_buf_req.actual_count) &&
+			(decoder->frame_size.scan_lines ==
+			decoder->client_frame_size.scan_lines) &&
+			(decoder->frame_size.stride ==
+			decoder->client_frame_size.stride))
+				need_reconfig = false;
+	}
+	return need_reconfig;
+}
+
+void ddl_handle_reconfig(u32 res_change, struct ddl_client_context *ddl)
+{
+	if (res_change) {
+		DDL_MSG_LOW("%s Resolution change, start realloc\n",
+				 __func__);
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_EOS_DONE;
+		ddl->cmd_state = DDL_CMD_EOS;
+		vidc_1080p_frame_start_realloc(ddl->instance_id);
+	}
+}
+
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
new file mode 100644
index 0000000..be46e97
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -0,0 +1,1638 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "vcd_ddl.h"
+#include "vcd_ddl_shared_mem.h"
+#include "vcd_ddl_metadata.h"
+#include <linux/delay.h>
+
+static void ddl_decoder_input_done_callback(
+	struct ddl_client_context *ddl, u32 frame_transact_end);
+static u32 ddl_decoder_output_done_callback(
+	struct ddl_client_context *ddl, u32 frame_transact_end);
+static u32 ddl_get_decoded_frame(struct vcd_frame_data  *frame,
+	enum vidc_1080p_decode_frame frame_type);
+static u32 ddl_get_encoded_frame(struct vcd_frame_data *frame,
+	enum vcd_codec codec,
+	enum vidc_1080p_encode_frame frame_type);
+static void ddl_get_dec_profile_level(struct ddl_decoder_data *decoder,
+	u32 profile, u32 level);
+static void ddl_handle_enc_frame_done(struct ddl_client_context *ddl);
+
+static void ddl_fw_status_done_callback(struct ddl_context *ddl_context)
+{
+	DDL_MSG_MED("ddl_fw_status_done_callback");
+	if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_DMA_INIT)) {
+		DDL_MSG_ERROR("UNKWN_DMADONE");
+	} else {
+		DDL_MSG_LOW("FW_STATUS_DONE");
+		vidc_1080p_set_host2risc_cmd(VIDC_1080P_HOST2RISC_CMD_SYS_INIT,
+			ddl_context->fw_memory_size, 0, 0, 0);
+	}
+}
+
+static void ddl_sys_init_done_callback(struct ddl_context *ddl_context,
+	u32 fw_size)
+{
+	u32 vcd_status = VCD_S_SUCCESS;
+
+	DDL_MSG_MED("ddl_sys_init_done_callback");
+	if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_DMA_INIT)) {
+		DDL_MSG_ERROR("UNKNOWN_SYS_INIT_DONE");
+	} else {
+		ddl_context->cmd_state = DDL_CMD_INVALID;
+		DDL_MSG_LOW("SYS_INIT_DONE");
+		vidc_1080p_get_fw_version(&ddl_context->fw_version);
+		if (ddl_context->fw_memory_size >= fw_size) {
+			ddl_context->device_state = DDL_DEVICE_INITED;
+			vcd_status = VCD_S_SUCCESS;
+		} else
+			vcd_status = VCD_ERR_FAIL;
+		ddl_context->ddl_callback(VCD_EVT_RESP_DEVICE_INIT,
+			vcd_status, NULL, 0, NULL,
+			ddl_context->client_data);
+		DDL_IDLE(ddl_context);
+	}
+}
+
+static void ddl_decoder_eos_done_callback(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+
+	if (!ddl->decoding) {
+		DDL_MSG_ERROR("STATE-CRITICAL-EOSDONE");
+		ddl_client_fatal_cb(ddl);
+	} else {
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+		DDL_MSG_LOW("EOS_DONE");
+		ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+			VCD_S_SUCCESS, NULL, 0, (u32 *)ddl,
+			ddl->client_data);
+		ddl_release_command_channel(ddl_context,
+			ddl->command_channel);
+	}
+}
+
+static u32 ddl_channel_set_callback(struct ddl_context *ddl_context,
+	u32 instance_id)
+{
+	struct ddl_client_context *ddl;
+	u32 ret = false;
+
+	DDL_MSG_MED("ddl_channel_open_callback");
+	ddl = ddl_get_current_ddl_client_for_command(ddl_context,
+			DDL_CMD_CHANNEL_SET);
+	if (ddl) {
+		ddl->cmd_state = DDL_CMD_INVALID;
+		if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHDONE)) {
+			DDL_MSG_ERROR("STATE-CRITICAL-CHSET");
+			ddl_release_command_channel(ddl_context,
+			ddl->command_channel);
+		} else {
+			DDL_MSG_LOW("CH_SET_DONE");
+			DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+				"DDL_CLIENT_WAIT_FOR_INITCODEC",
+				ddl_get_state_string(ddl->client_state));
+			ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODEC;
+			ddl->instance_id = instance_id;
+			if (ddl->decoding) {
+				if (vidc_msg_timing)
+					ddl_calc_core_proc_time(__func__,
+						DEC_OP_TIME);
+				if (ddl->codec_data.decoder.header_in_start)
+					ddl_vidc_decode_init_codec(ddl);
+				else {
+					ddl_context->ddl_callback(
+						VCD_EVT_RESP_START,
+						VCD_S_SUCCESS, NULL, 0,
+						(u32 *)ddl,
+						ddl->client_data);
+					ddl_release_command_channel(
+						ddl_context,
+						ddl->command_channel);
+					ret = true;
+				}
+			} else
+				ddl_vidc_encode_init_codec(ddl);
+		}
+	}
+	return ret;
+}
+
+static u32 ddl_encoder_seq_done_callback(struct ddl_context *ddl_context,
+	struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder;
+
+	DDL_MSG_MED("ddl_encoder_seq_done_callback");
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-INITCODEC");
+		ddl_client_fatal_cb(ddl);
+		return true;
+	}
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, ENC_OP_TIME);
+	ddl->cmd_state = DDL_CMD_INVALID;
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_FRAME",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+	DDL_MSG_LOW("INIT_CODEC_DONE");
+	encoder = &ddl->codec_data.encoder;
+	vidc_1080p_get_encoder_sequence_header_size(
+		&encoder->seq_header_length);
+	if ((encoder->codec.codec == VCD_CODEC_H264) &&
+		(encoder->profile.profile == VCD_PROFILE_H264_BASELINE))
+		if ((encoder->seq_header.align_virtual_addr) &&
+			(encoder->seq_header_length > 6))
+			encoder->seq_header.align_virtual_addr[6] = 0xC0;
+	ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS,
+		NULL, 0, (u32 *) ddl, ddl->client_data);
+	ddl_release_command_channel(ddl_context,
+		ddl->command_channel);
+	return true;
+}
+
+static void parse_hdr_size_data(struct ddl_client_context *ddl,
+	struct vidc_1080p_seq_hdr_info *seq_hdr_info)
+{
+	u32 progressive;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	if (decoder->output_order == VCD_DEC_ORDER_DISPLAY) {
+		decoder->frame_size.width = seq_hdr_info->img_size_x;
+		decoder->frame_size.height = seq_hdr_info->img_size_y;
+		progressive = seq_hdr_info->disp_progressive;
+	} else {
+		vidc_sm_get_dec_order_resl(
+			&ddl->shared_mem[ddl->command_channel],
+			&decoder->frame_size.width,
+			&decoder->frame_size.height);
+		progressive = seq_hdr_info->dec_progressive;
+	}
+	decoder->min_dpb_num = seq_hdr_info->min_num_dpb;
+	vidc_sm_get_min_yc_dpb_sizes(
+		&ddl->shared_mem[ddl->command_channel],
+		&seq_hdr_info->min_luma_dpb_size,
+		&seq_hdr_info->min_chroma_dpb_size);
+	decoder->y_cb_cr_size = seq_hdr_info->min_luma_dpb_size +
+		seq_hdr_info->min_chroma_dpb_size;
+	decoder->dpb_buf_size.size_yuv = decoder->y_cb_cr_size;
+	decoder->dpb_buf_size.size_y =
+		seq_hdr_info->min_luma_dpb_size;
+	decoder->dpb_buf_size.size_c =
+		seq_hdr_info->min_chroma_dpb_size;
+	decoder->progressive_only = progressive ? false : true;
+}
+
+static void parse_hdr_crop_data(struct ddl_client_context *ddl,
+	struct vidc_1080p_seq_hdr_info *seq_hdr_info)
+{
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	u32 crop_exists = (decoder->output_order == VCD_DEC_ORDER_DISPLAY) ?
+		seq_hdr_info->disp_crop_exists : seq_hdr_info->dec_crop_exists;
+	if (crop_exists) {
+		if (decoder->output_order ==
+			VCD_DEC_ORDER_DISPLAY)
+			vidc_sm_get_crop_info(
+				&ddl->shared_mem[ddl->command_channel],
+				&seq_hdr_info->crop_left_offset,
+				&seq_hdr_info->crop_right_offset,
+				&seq_hdr_info->crop_top_offset,
+				&seq_hdr_info->crop_bottom_offset);
+		else
+			vidc_sm_get_dec_order_crop_info(
+				&ddl->shared_mem[ddl->command_channel],
+				&seq_hdr_info->crop_left_offset,
+				&seq_hdr_info->crop_right_offset,
+				&seq_hdr_info->crop_top_offset,
+				&seq_hdr_info->crop_bottom_offset);
+		decoder->frame_size.width -=
+			seq_hdr_info->crop_right_offset +
+			seq_hdr_info->crop_left_offset;
+		decoder->frame_size.height -=
+			seq_hdr_info->crop_top_offset +
+			seq_hdr_info->crop_bottom_offset;
+	}
+}
+
+static u32 ddl_decoder_seq_done_callback(struct ddl_context *ddl_context,
+	struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct vidc_1080p_seq_hdr_info seq_hdr_info;
+	u32 process_further = true;
+	struct ddl_profile_info_type disp_profile_info;
+
+	DDL_MSG_MED("ddl_decoder_seq_done_callback");
+	if (!ddl->decoding ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-HDDONE");
+		ddl_client_fatal_cb(ddl);
+	} else {
+		if (vidc_msg_timing)
+			ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+		ddl->cmd_state = DDL_CMD_INVALID;
+		DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+			"DDL_CLIENT_WAIT_FOR_DPB",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_DPB;
+		DDL_MSG_LOW("HEADER_DONE");
+		vidc_1080p_get_decode_seq_start_result(&seq_hdr_info);
+		parse_hdr_size_data(ddl, &seq_hdr_info);
+		if (!seq_hdr_info.img_size_x || !seq_hdr_info.img_size_y) {
+			DDL_MSG_ERROR("FATAL:ZeroImageSize");
+			ddl_client_fatal_cb(ddl);
+			return process_further;
+		}
+		vidc_sm_get_profile_info(&ddl->shared_mem
+			[ddl->command_channel], &disp_profile_info);
+		disp_profile_info.pic_profile = seq_hdr_info.profile;
+		disp_profile_info.pic_level = seq_hdr_info.level;
+		ddl_get_dec_profile_level(decoder, seq_hdr_info.profile,
+			seq_hdr_info.level);
+		switch (decoder->codec.codec) {
+		case VCD_CODEC_H264:
+			if (decoder->profile.profile == VCD_PROFILE_H264_HIGH ||
+				decoder->profile.profile ==
+				VCD_PROFILE_UNKNOWN) {
+				if ((disp_profile_info.chroma_format_idc >
+					VIDC_1080P_IDCFORMAT_420) ||
+					(disp_profile_info.bit_depth_luma_minus8
+					 || disp_profile_info.
+					bit_depth_chroma_minus8)) {
+					DDL_MSG_ERROR("Unsupported H.264 "
+						"feature: IDC format : %d, Bitdepth: %d",
+						disp_profile_info.
+						chroma_format_idc,
+						(disp_profile_info.
+						 bit_depth_luma_minus8
+						 ||	disp_profile_info.
+					bit_depth_chroma_minus8));
+					ddl_client_fatal_cb(ddl);
+					return process_further;
+				}
+			}
+			break;
+		case VCD_CODEC_MPEG4:
+		case VCD_CODEC_DIVX_4:
+		case VCD_CODEC_DIVX_5:
+		case VCD_CODEC_DIVX_6:
+		case VCD_CODEC_XVID:
+			if (seq_hdr_info.data_partition)
+				if ((seq_hdr_info.img_size_x *
+				seq_hdr_info.img_size_y) > (720 * 576)) {
+					DDL_MSG_ERROR("Unsupported DP clip");
+					ddl_client_fatal_cb(ddl);
+					return process_further;
+				}
+			break;
+		default:
+			break;
+		}
+		ddl_calculate_stride(&decoder->frame_size,
+			!decoder->progressive_only);
+		decoder->frame_size.scan_lines =
+		DDL_ALIGN(decoder->frame_size.height, DDL_TILE_ALIGN_HEIGHT);
+		decoder->frame_size.stride =
+		DDL_ALIGN(decoder->frame_size.width, DDL_TILE_ALIGN_WIDTH);
+		parse_hdr_crop_data(ddl, &seq_hdr_info);
+		if (decoder->codec.codec == VCD_CODEC_H264 &&
+			seq_hdr_info.level > VIDC_1080P_H264_LEVEL4) {
+			DDL_MSG_ERROR("WARNING: H264MaxLevelExceeded : %d",
+				seq_hdr_info.level);
+		}
+		ddl_set_default_decoder_buffer_req(decoder, false);
+		if (decoder->header_in_start) {
+			if (!(decoder->cont_mode) ||
+				(decoder->min_dpb_num >
+				 decoder->client_output_buf_req.min_count) ||
+				(decoder->actual_output_buf_req.sz >
+				 decoder->client_output_buf_req.sz)) {
+				decoder->client_frame_size =
+					 decoder->frame_size;
+				decoder->client_output_buf_req =
+					decoder->actual_output_buf_req;
+				decoder->client_input_buf_req =
+					decoder->actual_input_buf_req;
+			}
+			ddl_context->ddl_callback(VCD_EVT_RESP_START,
+				VCD_S_SUCCESS, NULL, 0, (u32 *) ddl,
+				ddl->client_data);
+			ddl_release_command_channel(ddl_context,
+				ddl->command_channel);
+		} else {
+			u32 seq_hdr_only_frame = false;
+			u32 need_reconfig = false;
+			struct vcd_frame_data *input_vcd_frm =
+				&ddl->input_frame.vcd_frm;
+			need_reconfig = ddl_check_reconfig(ddl);
+			DDL_MSG_HIGH("%s : need_reconfig = %u\n", __func__,
+				 need_reconfig);
+			if (input_vcd_frm->flags &
+				  VCD_FRAME_FLAG_EOS) {
+				need_reconfig = false;
+			}
+			if (((input_vcd_frm->flags &
+				VCD_FRAME_FLAG_CODECCONFIG) &&
+				(!(input_vcd_frm->flags &
+				VCD_FRAME_FLAG_SYNCFRAME))) ||
+				input_vcd_frm->data_len <=
+				seq_hdr_info.dec_frm_size) {
+				seq_hdr_only_frame = true;
+				input_vcd_frm->offset +=
+					seq_hdr_info.dec_frm_size;
+				input_vcd_frm->data_len = 0;
+				input_vcd_frm->flags |=
+					VCD_FRAME_FLAG_CODECCONFIG;
+				ddl->input_frame.frm_trans_end =
+					!need_reconfig;
+				ddl_context->ddl_callback(
+					VCD_EVT_RESP_INPUT_DONE,
+					VCD_S_SUCCESS, &ddl->input_frame,
+					sizeof(struct ddl_frame_data_tag),
+					(u32 *) ddl, ddl->client_data);
+			} else {
+				if (decoder->codec.codec ==
+					VCD_CODEC_VC1_RCV) {
+					vidc_sm_set_start_byte_number(
+						&ddl->shared_mem
+						[ddl->command_channel],
+						seq_hdr_info.dec_frm_size);
+				}
+			}
+			if (need_reconfig) {
+				struct ddl_frame_data_tag *payload =
+					&ddl->input_frame;
+				u32 payload_size =
+					sizeof(struct ddl_frame_data_tag);
+				decoder->client_frame_size =
+					decoder->frame_size;
+				decoder->client_output_buf_req =
+					decoder->actual_output_buf_req;
+				decoder->client_input_buf_req =
+					decoder->actual_input_buf_req;
+				if (seq_hdr_only_frame) {
+					payload = NULL;
+					payload_size = 0;
+				}
+				DDL_MSG_HIGH("%s : sending port reconfig\n",
+					 __func__);
+				ddl_context->ddl_callback(
+					VCD_EVT_IND_OUTPUT_RECONFIG,
+					VCD_S_SUCCESS, payload,
+					payload_size, (u32 *) ddl,
+					ddl->client_data);
+			}
+			if (!need_reconfig && !seq_hdr_only_frame) {
+				if (!ddl_vidc_decode_set_buffers(ddl))
+					process_further = false;
+				else {
+					DDL_MSG_ERROR("ddl_vidc_decode_set_"
+						"buffers failed");
+					ddl_client_fatal_cb(ddl);
+				}
+			} else
+				ddl_release_command_channel(ddl_context,
+					ddl->command_channel);
+		}
+	}
+	return process_further;
+}
+
+static u32 ddl_sequence_done_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id, ret;
+
+	vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	if (!ddl) {
+		DDL_MSG_ERROR("UNKWN_SEQ_DONE");
+		ret = true;
+	} else {
+		if (ddl->decoding)
+			ret = ddl_decoder_seq_done_callback(ddl_context,
+					ddl);
+		else
+			ret = ddl_encoder_seq_done_callback(ddl_context,
+					ddl);
+	}
+	return ret;
+}
+
+static u32 ddl_dpb_buffers_set_done_callback(
+	struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id, ret_status = true;
+
+	DDL_MSG_MED("ddl_dpb_buffers_set_done_callback");
+	vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl = ddl_get_current_ddl_client_for_command(ddl_context,
+			DDL_CMD_DECODE_SET_DPB);
+	if (ddl) {
+		ddl->cmd_state = DDL_CMD_INVALID;
+		if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE)) {
+			DDL_MSG_ERROR("STATE-CRITICAL-DPBDONE");
+			ddl_client_fatal_cb(ddl);
+		} else {
+			DDL_MSG_LOW("INTR_DPBDONE");
+			DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+				"DDL_CLIENT_WAIT_FOR_FRAME",
+				ddl_get_state_string(ddl->client_state));
+			if (vidc_msg_timing) {
+				ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+				ddl_reset_core_time_variables(DEC_OP_TIME);
+			}
+			ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+			ddl_vidc_decode_frame_run(ddl);
+			ret_status = false;
+		}
+	}
+	return ret_status;
+}
+
+static void ddl_encoder_frame_run_callback(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_encoder_data *encoder =
+		&(ddl->codec_data.encoder);
+	struct vcd_frame_data *output_frame =
+		&(ddl->output_frame.vcd_frm);
+	u32 bottom_frame_tag;
+
+	DDL_MSG_MED("ddl_encoder_frame_run_callback");
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-ENCFRMRUN");
+		ddl_client_fatal_cb(ddl);
+	} else {
+		if (vidc_msg_timing)
+			ddl_calc_core_proc_time(__func__, ENC_OP_TIME);
+		DDL_MSG_LOW("ENC_FRM_RUN_DONE");
+		ddl->cmd_state = DDL_CMD_INVALID;
+		vidc_1080p_get_encode_frame_info(&encoder->enc_frame_info);
+		vidc_sm_get_frame_tags(&ddl->shared_mem
+			[ddl->command_channel],
+			&output_frame->ip_frm_tag, &bottom_frame_tag);
+
+		if (encoder->meta_data_enable_flag)
+			vidc_sm_get_metadata_status(&ddl->shared_mem
+				[ddl->command_channel],
+				&encoder->enc_frame_info.meta_data_exists);
+
+		if (encoder->enc_frame_info.enc_frame_size ||
+			(encoder->enc_frame_info.enc_frame ==
+			VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED) ||
+			DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
+			u8 *input_buffer_address = NULL;
+			output_frame->data_len =
+				encoder->enc_frame_info.enc_frame_size;
+			output_frame->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+			ddl_get_encoded_frame(output_frame,
+				encoder->codec.codec,
+				encoder->enc_frame_info.enc_frame);
+			ddl_process_encoder_metadata(ddl);
+			ddl_vidc_encode_dynamic_property(ddl, false);
+			ddl->input_frame.frm_trans_end = false;
+			input_buffer_address = ddl_context->dram_base_a.\
+				align_physical_addr +
+				encoder->enc_frame_info.enc_luma_address;
+			ddl_get_input_frame_from_pool(ddl,
+				input_buffer_address);
+			ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE,
+				VCD_S_SUCCESS, &(ddl->input_frame),
+				sizeof(struct ddl_frame_data_tag),
+				(u32 *)ddl, ddl->client_data);
+			ddl->output_frame.frm_trans_end =
+				DDLCLIENT_STATE_IS(ddl,
+				DDL_CLIENT_WAIT_FOR_EOS_DONE) ? false : true;
+			ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+				VCD_S_SUCCESS, &(ddl->output_frame),
+				sizeof(struct ddl_frame_data_tag),
+				(u32 *)ddl, ddl->client_data);
+
+			if (DDLCLIENT_STATE_IS(ddl,
+				DDL_CLIENT_WAIT_FOR_EOS_DONE) &&
+				encoder->i_period.b_frames) {
+				if ((ddl->extra_output_buf_count < 0) ||
+					(ddl->extra_output_buf_count >
+					encoder->i_period.b_frames)) {
+					DDL_MSG_ERROR("Invalid B frame output"
+								"buffer index");
+				} else {
+					struct vidc_1080p_enc_frame_start_param
+						enc_param;
+					ddl->output_frame = ddl->\
+					extra_output_frame[ddl->\
+					extra_output_buf_count];
+					ddl->\
+					extra_output_buf_count--;
+					output_frame =
+					&ddl->output_frame.\
+					vcd_frm;
+					memset(&enc_param, 0,
+						sizeof(enc_param));
+					enc_param.cmd_seq_num =
+						++ddl_context->cmd_seq_num;
+					enc_param.inst_id = ddl->instance_id;
+					enc_param.shared_mem_addr_offset =
+					   DDL_ADDR_OFFSET(ddl_context->\
+						dram_base_a, ddl->shared_mem
+						[ddl->command_channel]);
+					enc_param.stream_buffer_addr_offset =
+						DDL_OFFSET(ddl_context->\
+						dram_base_a.\
+						align_physical_addr,
+						output_frame->physical);
+					enc_param.stream_buffer_size =
+					encoder->client_output_buf_req.sz;
+					enc_param.encode =
+					VIDC_1080P_ENC_TYPE_LAST_FRAME_DATA;
+					ddl->cmd_state = DDL_CMD_ENCODE_FRAME;
+					ddl_context->vidc_encode_frame_start
+						[ddl->command_channel]
+						(&enc_param);
+				} } else {
+				DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+					"DDL_CLIENT_WAIT_FOR_FRAME",
+					ddl_get_state_string(
+					ddl->client_state));
+				ddl->client_state =
+					DDL_CLIENT_WAIT_FOR_FRAME;
+				ddl_release_command_channel(ddl_context,
+				ddl->command_channel);
+			}
+		} else {
+			ddl_context->ddl_callback(
+				VCD_EVT_RESP_TRANSACTION_PENDING,
+				VCD_S_SUCCESS, NULL, 0, (u32 *)ddl,
+				ddl->client_data);
+			DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+				"DDL_CLIENT_WAIT_FOR_FRAME",
+			ddl_get_state_string(ddl->client_state));
+			ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+			ddl_release_command_channel(ddl_context,
+			ddl->command_channel);
+		}
+	}
+}
+
+static void get_dec_status(struct ddl_client_context *ddl,
+	 struct vidc_1080p_dec_disp_info *dec_disp_info,
+	 u32 output_order, u32 *status, u32 *rsl_chg)
+{
+	if (output_order == VCD_DEC_ORDER_DISPLAY) {
+		vidc_1080p_get_display_frame_result(dec_disp_info);
+		*status = dec_disp_info->display_status;
+		*rsl_chg = dec_disp_info->disp_resl_change;
+	} else {
+		vidc_1080p_get_decode_frame_result(dec_disp_info);
+		vidc_sm_get_dec_order_resl(
+			&ddl->shared_mem[ddl->command_channel],
+			&dec_disp_info->img_size_x,
+			&dec_disp_info->img_size_y);
+		*status = dec_disp_info->decode_status;
+		*rsl_chg = dec_disp_info->dec_resl_change;
+	}
+}
+
+static u32 ddl_decoder_frame_run_callback(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	u32 callback_end = false, ret_status = false;
+	u32 eos_present = false, rsl_chg;
+	u32 more_field_needed, extended_rsl_chg;
+	enum vidc_1080p_display_status disp_status;
+	DDL_MSG_MED("ddl_decoder_frame_run_callback");
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-DECFRMRUN");
+		ddl_client_fatal_cb(ddl);
+		ret_status = true;
+	} else {
+		DDL_MSG_LOW("DEC_FRM_RUN_DONE");
+		ddl->cmd_state = DDL_CMD_INVALID;
+		get_dec_status(ddl, &ddl->codec_data.decoder.dec_disp_info,
+			ddl->codec_data.decoder.output_order,
+			&disp_status, &rsl_chg);
+
+		vidc_sm_get_extended_decode_status(
+			&ddl->shared_mem[ddl->command_channel],
+			&more_field_needed,
+			&extended_rsl_chg);
+		decoder->field_needed_for_prev_ip =
+			more_field_needed;
+		decoder->prev_ip_frm_tag =
+			ddl->input_frame.vcd_frm.ip_frm_tag;
+
+		ddl_vidc_decode_dynamic_property(ddl, false);
+		if (rsl_chg != DDL_RESL_CHANGE_NO_CHANGE) {
+			ddl_handle_reconfig(rsl_chg, ddl);
+			ret_status = false;
+		} else {
+			if ((VCD_FRAME_FLAG_EOS &
+				ddl->input_frame.vcd_frm.flags)) {
+				callback_end = false;
+				eos_present = true;
+			}
+			if (disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DECODE_ONLY ||
+				disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DECODE_AND_DISPLAY) {
+				if (!eos_present)
+					callback_end =
+					(disp_status ==
+					VIDC_1080P_DISPLAY_STATUS_DECODE_ONLY);
+				ddl_decoder_input_done_callback(ddl,
+					callback_end);
+			}
+			if (disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DECODE_AND_DISPLAY ||
+				disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DISPLAY_ONLY) {
+				if (!eos_present)
+					callback_end = (disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DECODE_AND_DISPLAY);
+				if (ddl_decoder_output_done_callback(
+					ddl, callback_end))
+					ret_status = true;
+			}
+			if (!ret_status) {
+				if (disp_status ==
+					VIDC_1080P_DISPLAY_STATUS_DISPLAY_ONLY
+					|| disp_status ==
+					VIDC_1080P_DISPLAY_STATUS_DPB_EMPTY ||
+					disp_status ==
+					VIDC_1080P_DISPLAY_STATUS_NOOP) {
+					ddl_vidc_decode_frame_run(ddl);
+				} else if (eos_present)
+					ddl_vidc_decode_eos_run(ddl);
+				else {
+					ddl->client_state =
+						DDL_CLIENT_WAIT_FOR_FRAME;
+					ddl_release_command_channel(ddl_context,
+						ddl->command_channel);
+					ret_status = true;
+				}
+			}
+		}
+	}
+	return ret_status;
+}
+
+static u32 ddl_eos_frame_done_callback(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct ddl_mask *dpb_mask = &decoder->dpb_mask;
+	u32 ret_status = true, rsl_chg, more_field_needed;
+	enum vidc_1080p_display_status disp_status;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
+		DDL_MSG_ERROR("STATE-CRITICAL-EOSFRMRUN");
+		ddl_client_fatal_cb(ddl);
+	} else {
+		DDL_MSG_LOW("EOS_FRM_RUN_DONE");
+		ddl->cmd_state = DDL_CMD_INVALID;
+		get_dec_status(ddl, &ddl->codec_data.decoder.dec_disp_info,
+			ddl->codec_data.decoder.output_order,
+			&disp_status, &rsl_chg);
+		vidc_sm_get_extended_decode_status(
+			&ddl->shared_mem[ddl->command_channel],
+			&more_field_needed, &rsl_chg);
+
+		decoder->field_needed_for_prev_ip =
+			more_field_needed;
+		decoder->prev_ip_frm_tag =
+			ddl->input_frame.vcd_frm.ip_frm_tag;
+		ddl_vidc_decode_dynamic_property(ddl, false);
+		if (disp_status ==
+			VIDC_1080P_DISPLAY_STATUS_DPB_EMPTY) {
+			if (rsl_chg) {
+				decoder->header_in_start = false;
+				decoder->decode_config.sequence_header =
+					ddl->input_frame.vcd_frm.physical;
+				decoder->decode_config.sequence_header_len =
+					ddl->input_frame.vcd_frm.data_len;
+				ddl_vidc_decode_init_codec(ddl);
+				ret_status = false;
+			} else
+				ddl_decoder_eos_done_callback(ddl);
+		} else {
+			struct vidc_1080p_dec_frame_start_param dec_param;
+			ret_status = false;
+			if (disp_status ==
+				VIDC_1080P_DISPLAY_STATUS_DISPLAY_ONLY) {
+				if (ddl_decoder_output_done_callback(
+					ddl, false))
+					ret_status = true;
+			} else if (disp_status !=
+				VIDC_1080P_DISPLAY_STATUS_NOOP)
+				DDL_MSG_ERROR("EOS-STATE-CRITICAL-"
+					"WRONG-DISP-STATUS");
+			if (!ret_status) {
+				ddl_decoder_dpb_transact(decoder, NULL,
+					DDL_DPB_OP_SET_MASK);
+				ddl->cmd_state = DDL_CMD_EOS;
+
+				memset(&dec_param, 0, sizeof(dec_param));
+
+				dec_param.cmd_seq_num =
+					++ddl_context->cmd_seq_num;
+				dec_param.inst_id = ddl->instance_id;
+				dec_param.shared_mem_addr_offset =
+					DDL_ADDR_OFFSET(
+					ddl_context->dram_base_a,
+					ddl->shared_mem[ddl->command_channel]);
+				dec_param.release_dpb_bit_mask =
+					dpb_mask->hw_mask;
+				dec_param.decode =
+					VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA;
+
+				ddl_context->vidc_decode_frame_start[ddl->\
+					command_channel](&dec_param);
+			}
+		}
+	}
+	return ret_status;
+}
+
+static u32 ddl_frame_run_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id;
+	u32 return_status = true;
+
+	vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	if (ddl) {
+		if (ddl_context->pix_cache_enable) {
+			struct vidc_1080P_pix_cache_statistics
+			pixel_cache_stats;
+			vidc_pix_cache_get_statistics(&pixel_cache_stats);
+
+			DDL_MSG_HIGH(" pixel cache hits = %d,"
+				"miss = %d", pixel_cache_stats.access_hit,
+				pixel_cache_stats.access_miss);
+			DDL_MSG_HIGH(" pixel cache core reqs = %d,"
+				"axi reqs = %d", pixel_cache_stats.core_req,
+				pixel_cache_stats.axi_req);
+			DDL_MSG_HIGH(" pixel cache core bus stats = %d,"
+			"axi bus stats = %d", pixel_cache_stats.core_bus,
+				pixel_cache_stats.axi_bus);
+		}
+
+		if (ddl->cmd_state == DDL_CMD_DECODE_FRAME)
+			return_status = ddl_decoder_frame_run_callback(ddl);
+		else if (ddl->cmd_state == DDL_CMD_ENCODE_FRAME)
+			ddl_encoder_frame_run_callback(ddl);
+		else if (ddl->cmd_state == DDL_CMD_EOS)
+			return_status = ddl_eos_frame_done_callback(ddl);
+		else {
+			DDL_MSG_ERROR("UNKWN_FRAME_DONE");
+			return_status = false;
+		}
+	} else
+		return_status = false;
+
+	return return_status;
+}
+
+static void ddl_channel_end_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+
+	DDL_MSG_MED("ddl_channel_end_callback");
+	ddl = ddl_get_current_ddl_client_for_command(ddl_context,
+			DDL_CMD_CHANNEL_END);
+	if (ddl) {
+		ddl->cmd_state = DDL_CMD_INVALID;
+		if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHEND)) {
+			DDL_MSG_LOW("STATE-CRITICAL-CHEND");
+		} else {
+			DDL_MSG_LOW("CH_END_DONE");
+			ddl_release_client_internal_buffers(ddl);
+			ddl_context->ddl_callback(VCD_EVT_RESP_STOP,
+				VCD_S_SUCCESS, NULL, 0, (u32 *)ddl,
+				ddl->client_data);
+			DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+				"DDL_CLIENT_OPEN",
+				ddl_get_state_string(ddl->client_state));
+			ddl->client_state = DDL_CLIENT_OPEN;
+		}
+		ddl_release_command_channel(ddl_context,
+			ddl->command_channel);
+	}
+}
+
+static void ddl_edfu_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id;
+
+	DDL_MSG_MED("ddl_edfu_callback");
+	vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	if (ddl) {
+		if (ddl->cmd_state != DDL_CMD_ENCODE_FRAME)
+			DDL_MSG_LOW("UNKWN_EDFU");
+	}
+}
+
+static void ddl_encoder_eos_done(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+	u32 channel_inst_id;
+
+	vidc_1080p_get_returned_channel_inst_id(&channel_inst_id);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl = ddl_get_current_ddl_client_for_channel_id(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	if (!ddl || (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE))) {
+		DDL_MSG_ERROR("STATE-CRITICAL-EOSFRMDONE");
+		ddl_client_fatal_cb(ddl);
+	} else {
+		struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+		vidc_1080p_get_encode_frame_info(&encoder->enc_frame_info);
+		ddl_handle_enc_frame_done(ddl);
+		DDL_MSG_LOW("encoder_eos_done");
+		ddl->cmd_state = DDL_CMD_INVALID;
+		DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+				"DDL_CLIENT_WAIT_FOR_FRAME",
+				ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
+		DDL_MSG_LOW("eos_done");
+		ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+				VCD_S_SUCCESS, NULL, 0,
+				(u32 *)ddl, ddl->client_data);
+		ddl_release_command_channel(ddl_context,
+			ddl->command_channel);
+	}
+}
+
+static u32 ddl_process_intr_status(struct ddl_context *ddl_context,
+	u32 intr_status)
+{
+	u32 return_status = true;
+	switch (intr_status) {
+	case VIDC_1080P_RISC2HOST_CMD_OPEN_CH_RET:
+		return_status = ddl_channel_set_callback(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_CLOSE_CH_RET:
+		ddl_channel_end_callback(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_SEQ_DONE_RET:
+		return_status = ddl_sequence_done_callback(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_FRAME_DONE_RET:
+		return_status = ddl_frame_run_callback(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_SYS_INIT_RET:
+		ddl_sys_init_done_callback(ddl_context,
+			ddl_context->response_cmd_ch_id);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_FW_STATUS_RET:
+		ddl_fw_status_done_callback(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_EDFU_INT_RET:
+		ddl_edfu_callback(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_ENC_COMPLETE_RET:
+		ddl_encoder_eos_done(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_ERROR_RET:
+		DDL_MSG_ERROR("CMD_ERROR_INTR");
+		return_status = ddl_handle_core_errors(ddl_context);
+	break;
+	case VIDC_1080P_RISC2HOST_CMD_INIT_BUFFERS_RET:
+		return_status =
+			ddl_dpb_buffers_set_done_callback(ddl_context);
+	break;
+	default:
+		DDL_MSG_LOW("UNKWN_INTR");
+	break;
+	}
+	return return_status;
+}
+
+void ddl_read_and_clear_interrupt(void)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_hw_interface  *ddl_hw_response;
+
+	ddl_context = ddl_get_context();
+	if (!ddl_context->core_virtual_base_addr) {
+		DDL_MSG_LOW("SPURIOUS_INTERRUPT");
+	} else {
+		ddl_hw_response = &ddl_context->ddl_hw_response;
+		vidc_1080p_get_risc2host_cmd(&ddl_hw_response->cmd,
+			&ddl_hw_response->arg1, &ddl_hw_response->arg2,
+			&ddl_hw_response->arg3, &ddl_hw_response->arg4);
+		vidc_1080p_clear_risc2host_cmd();
+		vidc_1080p_clear_interrupt();
+		vidc_1080p_get_risc2host_cmd_status(ddl_hw_response->arg2,
+			&ddl_context->cmd_err_status,
+			&ddl_context->disp_pic_err_status);
+		ddl_context->response_cmd_ch_id = ddl_hw_response->arg1;
+	}
+}
+
+u32 ddl_process_core_response(void)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_hw_interface *ddl_hw_response;
+	u32 status = false;
+
+	ddl_context = ddl_get_context();
+	if (!ddl_context->core_virtual_base_addr) {
+		DDL_MSG_LOW("SPURIOUS_INTERRUPT");
+		return status;
+	}
+	ddl_hw_response = &ddl_context->ddl_hw_response;
+	status = ddl_process_intr_status(ddl_context, ddl_hw_response->cmd);
+	if (ddl_context->interrupt_clr)
+		(*ddl_context->interrupt_clr)();
+	return status;
+}
+
+static void ddl_decoder_input_done_callback(
+	struct ddl_client_context *ddl, u32 frame_transact_end)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vidc_1080p_dec_disp_info *dec_disp_info =
+		&decoder->dec_disp_info;
+	struct vcd_frame_data *input_vcd_frm = &(ddl->input_frame.vcd_frm);
+	u32 is_interlaced;
+	vidc_1080p_get_decoded_frame_size(
+		&dec_disp_info->input_bytes_consumed);
+	vidc_sm_set_start_byte_number(&ddl->shared_mem
+		[ddl->command_channel], 0);
+	vidc_1080p_get_decode_frame(&dec_disp_info->input_frame);
+	ddl_get_decoded_frame(input_vcd_frm,
+		dec_disp_info->input_frame);
+	vidc_1080p_get_decode_frame_result(dec_disp_info);
+	is_interlaced = (dec_disp_info->decode_coding ==
+		VIDC_1080P_DISPLAY_CODING_INTERLACED);
+	if (decoder->output_order == VCD_DEC_ORDER_DECODE) {
+		dec_disp_info->tag_bottom = is_interlaced ?
+			dec_disp_info->tag_top :
+			VCD_FRAMETAG_INVALID;
+		dec_disp_info->tag_top = input_vcd_frm->ip_frm_tag;
+	}
+	input_vcd_frm->interlaced = is_interlaced;
+	input_vcd_frm->offset += dec_disp_info->input_bytes_consumed;
+	input_vcd_frm->data_len -= dec_disp_info->input_bytes_consumed;
+	ddl->input_frame.frm_trans_end = frame_transact_end;
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, DEC_IP_TIME);
+	ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS,
+		&ddl->input_frame, sizeof(struct ddl_frame_data_tag),
+		(u32 *)ddl, ddl->client_data);
+}
+
+static void get_dec_op_done_data(struct vidc_1080p_dec_disp_info *dec_disp_info,
+	u32 output_order, u8 **physical, u32 *is_interlaced)
+{
+	enum vidc_1080p_display_coding disp_coding;
+	if (output_order == VCD_DEC_ORDER_DECODE) {
+		*physical = (u8 *)(dec_disp_info->decode_y_addr << 11);
+		disp_coding = dec_disp_info->decode_coding;
+	} else {
+		*physical = (u8 *)(dec_disp_info->display_y_addr << 11);
+		disp_coding = dec_disp_info->display_coding;
+	}
+	*is_interlaced = (disp_coding ==
+			VIDC_1080P_DISPLAY_CODING_INTERLACED);
+}
+
+static void get_dec_op_done_crop(u32 output_order,
+	struct vidc_1080p_dec_disp_info *dec_disp_info,
+	struct vcd_frame_rect *crop_data,
+	struct vcd_property_frame_size *op_frame_sz,
+	struct vcd_property_frame_size *frame_sz,
+	struct ddl_buf_addr *shared_mem)
+{
+	u32 crop_exists =
+		(output_order == VCD_DEC_ORDER_DECODE) ?
+		dec_disp_info->dec_crop_exists :
+		dec_disp_info->disp_crop_exists;
+	crop_data->left = 0;
+	crop_data->top = 0;
+	crop_data->right = dec_disp_info->img_size_x;
+	crop_data->bottom = dec_disp_info->img_size_y;
+	op_frame_sz->width = dec_disp_info->img_size_x;
+	op_frame_sz->height = dec_disp_info->img_size_y;
+	ddl_calculate_stride(op_frame_sz, false);
+	op_frame_sz->stride = DDL_ALIGN(op_frame_sz->width,
+				DDL_TILE_ALIGN_WIDTH);
+	op_frame_sz->scan_lines = DDL_ALIGN(op_frame_sz->height,
+					DDL_TILE_ALIGN_HEIGHT);
+	DDL_MSG_LOW("%s img_size_x = %u img_size_y = %u\n",
+				__func__, dec_disp_info->img_size_x,
+				dec_disp_info->img_size_y);
+	if (crop_exists) {
+		if (output_order == VCD_DEC_ORDER_DECODE)
+			vidc_sm_get_dec_order_crop_info(shared_mem,
+				&dec_disp_info->crop_left_offset,
+				&dec_disp_info->crop_right_offset,
+				&dec_disp_info->crop_top_offset,
+				&dec_disp_info->crop_bottom_offset);
+		else
+			vidc_sm_get_crop_info(shared_mem,
+				&dec_disp_info->crop_left_offset,
+				&dec_disp_info->crop_right_offset,
+				&dec_disp_info->crop_top_offset,
+				&dec_disp_info->crop_bottom_offset);
+		crop_data->left = dec_disp_info->crop_left_offset;
+		crop_data->top = dec_disp_info->crop_top_offset;
+		crop_data->right -= dec_disp_info->crop_right_offset;
+		crop_data->bottom -= dec_disp_info->crop_bottom_offset;
+		op_frame_sz->width = crop_data->right - crop_data->left;
+		op_frame_sz->height = crop_data->bottom - crop_data->top;
+	}
+}
+
+static u32 ddl_decoder_output_done_callback(
+	struct ddl_client_context *ddl, u32 frame_transact_end)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vidc_1080p_dec_disp_info *dec_disp_info =
+		&(decoder->dec_disp_info);
+	struct ddl_frame_data_tag *output_frame = &(ddl->output_frame);
+	struct vcd_frame_data *output_vcd_frm = &(output_frame->vcd_frm);
+	u32 vcd_status, free_luma_dpb = 0, disp_pict = 0, is_interlaced;
+	get_dec_op_done_data(dec_disp_info, decoder->output_order,
+		&output_vcd_frm->physical, &is_interlaced);
+	decoder->progressive_only = !(is_interlaced);
+	output_vcd_frm->frame = VCD_FRAME_YUV;
+	if (decoder->codec.codec == VCD_CODEC_MPEG4 ||
+		decoder->codec.codec == VCD_CODEC_VC1 ||
+		decoder->codec.codec == VCD_CODEC_VC1_RCV ||
+		(decoder->codec.codec >= VCD_CODEC_DIVX_3 &&
+		decoder->codec.codec <= VCD_CODEC_XVID)) {
+		vidc_sm_get_displayed_picture_frame(&ddl->shared_mem
+		[ddl->command_channel], &disp_pict);
+		if (decoder->output_order == VCD_DEC_ORDER_DISPLAY) {
+			if (!disp_pict) {
+				output_vcd_frm->frame = VCD_FRAME_NOTCODED;
+				vidc_sm_get_available_luma_dpb_address(
+					&ddl->shared_mem[ddl->command_channel],
+					&free_luma_dpb);
+			}
+		} else {
+			if (dec_disp_info->input_frame ==
+				VIDC_1080P_DECODE_FRAMETYPE_NOT_CODED) {
+				output_vcd_frm->frame = VCD_FRAME_NOTCODED;
+			vidc_sm_get_available_luma_dpb_dec_order_address(
+					&ddl->shared_mem[ddl->command_channel],
+					&free_luma_dpb);
+			}
+		}
+		if (free_luma_dpb)
+			output_vcd_frm->physical =
+				(u8 *)(free_luma_dpb << 11);
+	}
+	vcd_status = ddl_decoder_dpb_transact(decoder, output_frame,
+			DDL_DPB_OP_MARK_BUSY);
+	if (vcd_status) {
+		DDL_MSG_ERROR("CORRUPTED_OUTPUT_BUFFER_ADDRESS");
+		ddl_hw_fatal_cb(ddl);
+	} else {
+		vidc_sm_get_metadata_status(&ddl->shared_mem
+			[ddl->command_channel],
+			&decoder->meta_data_exists);
+		if (decoder->output_order == VCD_DEC_ORDER_DISPLAY)
+			vidc_sm_get_frame_tags(&ddl->shared_mem
+				[ddl->command_channel],
+				&dec_disp_info->tag_top,
+				&dec_disp_info->tag_bottom);
+		output_vcd_frm->ip_frm_tag = dec_disp_info->tag_top;
+		vidc_sm_get_picture_times(&ddl->shared_mem
+			[ddl->command_channel],
+			&dec_disp_info->pic_time_top,
+			&dec_disp_info->pic_time_bottom);
+		get_dec_op_done_crop(decoder->output_order, dec_disp_info,
+			&output_vcd_frm->dec_op_prop.disp_frm,
+			&output_vcd_frm->dec_op_prop.frm_size,
+			&decoder->frame_size,
+			&ddl->shared_mem[ddl_context->response_cmd_ch_id]);
+		if ((decoder->cont_mode) &&
+			((output_vcd_frm->dec_op_prop.frm_size.width !=
+			decoder->frame_size.width) ||
+			(output_vcd_frm->dec_op_prop.frm_size.height !=
+			decoder->frame_size.height) ||
+			(decoder->frame_size.width !=
+			decoder->client_frame_size.width) ||
+			(decoder->frame_size.height !=
+			decoder->client_frame_size.height))) {
+			DDL_MSG_LOW("%s o/p width = %u o/p height = %u"
+				"decoder width = %u decoder height = %u ",
+				__func__,
+				output_vcd_frm->dec_op_prop.frm_size.width,
+				output_vcd_frm->dec_op_prop.frm_size.height,
+				decoder->frame_size.width,
+				decoder->frame_size.height);
+			DDL_MSG_HIGH("%s Sending INFO_OP_RECONFIG event\n",
+				 __func__);
+			ddl_context->ddl_callback(
+				VCD_EVT_IND_INFO_OUTPUT_RECONFIG,
+				VCD_S_SUCCESS, NULL, 0,
+				(u32 *)ddl,
+				ddl->client_data);
+			decoder->frame_size =
+				 output_vcd_frm->dec_op_prop.frm_size;
+			decoder->client_frame_size = decoder->frame_size;
+			decoder->y_cb_cr_size =
+				ddl_get_yuv_buffer_size(&decoder->frame_size,
+					&decoder->buf_format,
+					(!decoder->progressive_only),
+					decoder->codec.codec, NULL);
+			decoder->actual_output_buf_req.sz =
+				decoder->y_cb_cr_size + decoder->suffix;
+			decoder->min_output_buf_req =
+				decoder->actual_output_buf_req;
+			DDL_MSG_LOW("%s y_cb_cr_size = %u "
+				"actual_output_buf_req.sz = %u"
+				"min_output_buf_req.sz = %u\n",
+				decoder->y_cb_cr_size,
+				decoder->actual_output_buf_req.sz,
+				decoder->min_output_buf_req.sz);
+			vidc_sm_set_chroma_addr_change(
+			&ddl->shared_mem[ddl->command_channel],
+			false);
+		}
+		output_vcd_frm->interlaced = is_interlaced;
+		output_vcd_frm->intrlcd_ip_frm_tag =
+			(!is_interlaced || !dec_disp_info->tag_bottom) ?
+			VCD_FRAMETAG_INVALID : dec_disp_info->tag_bottom;
+		output_vcd_frm->offset = 0;
+		output_vcd_frm->data_len = decoder->y_cb_cr_size;
+		if (free_luma_dpb) {
+			output_vcd_frm->data_len = 0;
+			output_vcd_frm->flags |= VCD_FRAME_FLAG_DECODEONLY;
+		}
+		output_vcd_frm->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+		output_frame->frm_trans_end = frame_transact_end;
+		if (vidc_msg_timing)
+			ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+		ddl_process_decoder_metadata(ddl);
+		ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+			vcd_status, output_frame,
+			sizeof(struct ddl_frame_data_tag),
+			(u32 *)ddl, ddl->client_data);
+	}
+	return vcd_status;
+}
+
+static u32 ddl_get_decoded_frame(struct vcd_frame_data  *frame,
+	enum vidc_1080p_decode_frame frame_type)
+{
+	u32 status = true;
+
+	switch (frame_type) {
+	case VIDC_1080P_DECODE_FRAMETYPE_I:
+		frame->flags |= VCD_FRAME_FLAG_SYNCFRAME;
+		frame->frame = VCD_FRAME_I;
+	break;
+	case VIDC_1080P_DECODE_FRAMETYPE_P:
+		frame->frame = VCD_FRAME_P;
+	break;
+	case VIDC_1080P_DECODE_FRAMETYPE_B:
+		frame->frame = VCD_FRAME_B;
+	break;
+	case VIDC_1080P_DECODE_FRAMETYPE_NOT_CODED:
+		frame->frame = VCD_FRAME_NOTCODED;
+		frame->data_len = 0;
+		DDL_MSG_HIGH("DDL_INFO:Decoder:NotCodedFrame>");
+	break;
+	case VIDC_1080P_DECODE_FRAMETYPE_OTHERS:
+		frame->frame = VCD_FRAME_YUV;
+	break;
+	case VIDC_1080P_DECODE_FRAMETYPE_32BIT:
+	default:
+		DDL_MSG_ERROR("UNKNOWN-FRAMETYPE");
+		status = false;
+	break;
+	}
+	return status;
+}
+
+static u32 ddl_get_encoded_frame(struct vcd_frame_data *frame,
+	enum vcd_codec codec,
+	enum vidc_1080p_encode_frame frame_type)
+{
+	u32 status = true;
+
+	if (codec == VCD_CODEC_H264) {
+		switch (frame_type) {
+		case VIDC_1080P_ENCODE_FRAMETYPE_NOT_CODED:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_I:
+			frame->flags |= VCD_FRAME_FLAG_SYNCFRAME;
+			frame->frame = VCD_FRAME_I;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_P:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_B:
+			frame->frame = VCD_FRAME_B;
+			frame->flags |= VCD_FRAME_FLAG_BFRAME;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED:
+			frame->frame = VCD_FRAME_NOTCODED;
+			frame->data_len = 0;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_OTHERS:
+			DDL_MSG_LOW("FRAMETYPE-OTHERS");
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_32BIT:
+		default:
+			DDL_MSG_LOW("UNKNOWN-FRAMETYPE");
+			status = false;
+		break;
+		}
+	} else if (codec == VCD_CODEC_MPEG4) {
+		switch (frame_type) {
+		case VIDC_1080P_ENCODE_FRAMETYPE_NOT_CODED:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_I:
+			frame->flags |= VCD_FRAME_FLAG_SYNCFRAME;
+			frame->frame = VCD_FRAME_I;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_P:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_B:
+			frame->frame = VCD_FRAME_B;
+			frame->flags |= VCD_FRAME_FLAG_BFRAME;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED:
+			frame->frame = VCD_FRAME_NOTCODED;
+			frame->data_len = 0;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_OTHERS:
+			DDL_MSG_LOW("FRAMETYPE-OTHERS");
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_32BIT:
+		default:
+			DDL_MSG_LOW("UNKNOWN-FRAMETYPE");
+			status = false;
+		break;
+		}
+	} else if (codec == VCD_CODEC_H263) {
+		switch (frame_type) {
+		case VIDC_1080P_ENCODE_FRAMETYPE_NOT_CODED:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_I:
+			frame->flags |= VCD_FRAME_FLAG_SYNCFRAME;
+			frame->frame = VCD_FRAME_I;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_P:
+			frame->frame = VCD_FRAME_P;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED:
+			frame->frame = VCD_FRAME_NOTCODED;
+			frame->data_len = 0;
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_OTHERS:
+			DDL_MSG_LOW("FRAMETYPE-OTHERS");
+		break;
+		case VIDC_1080P_ENCODE_FRAMETYPE_32BIT:
+		default:
+			DDL_MSG_LOW("UNKNOWN-FRAMETYPE");
+			status = false;
+		break;
+		}
+	} else
+		status = false;
+	DDL_MSG_HIGH("Enc Frame Type %u", (u32)frame->frame);
+	return status;
+}
+
+static void ddl_get_mpeg4_dec_level(enum vcd_codec_level *level,
+	u32 level_codec, enum vcd_codec_profile mpeg4_profile)
+{
+	switch (level_codec) {
+	case VIDC_1080P_MPEG4_LEVEL0:
+		*level = VCD_LEVEL_MPEG4_0;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL0b:
+		*level = VCD_LEVEL_MPEG4_0b;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL1:
+		*level = VCD_LEVEL_MPEG4_1;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL2:
+		*level = VCD_LEVEL_MPEG4_2;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL3:
+		*level = VCD_LEVEL_MPEG4_3;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL3b:
+		if (mpeg4_profile == VCD_PROFILE_MPEG4_SP)
+			*level = VCD_LEVEL_MPEG4_7;
+		else
+			*level = VCD_LEVEL_MPEG4_3b;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL4a:
+		*level = VCD_LEVEL_MPEG4_4a;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL5:
+		*level = VCD_LEVEL_MPEG4_5;
+	break;
+	case VIDC_1080P_MPEG4_LEVEL6:
+		*level = VCD_LEVEL_MPEG4_6;
+	break;
+	default:
+		*level = VCD_LEVEL_UNKNOWN;
+	break;
+	}
+}
+
+static void ddl_get_h264_dec_level(enum vcd_codec_level *level,
+	u32 level_codec)
+{
+	switch (level_codec) {
+	case VIDC_1080P_H264_LEVEL1:
+		*level = VCD_LEVEL_H264_1;
+	break;
+	case VIDC_1080P_H264_LEVEL1b:
+		*level = VCD_LEVEL_H264_1b;
+	break;
+	case VIDC_1080P_H264_LEVEL1p1:
+		*level = VCD_LEVEL_H264_1p1;
+	break;
+	case VIDC_1080P_H264_LEVEL1p2:
+		*level = VCD_LEVEL_H264_1p2;
+	break;
+	case VIDC_1080P_H264_LEVEL1p3:
+		*level = VCD_LEVEL_H264_1p3;
+	break;
+	case VIDC_1080P_H264_LEVEL2:
+		*level = VCD_LEVEL_H264_2;
+	break;
+	case VIDC_1080P_H264_LEVEL2p1:
+		*level = VCD_LEVEL_H264_2p1;
+	break;
+	case VIDC_1080P_H264_LEVEL2p2:
+		*level = VCD_LEVEL_H264_2p2;
+	break;
+	case VIDC_1080P_H264_LEVEL3:
+		*level = VCD_LEVEL_H264_3;
+	break;
+	case VIDC_1080P_H264_LEVEL3p1:
+		*level = VCD_LEVEL_H264_3p1;
+	break;
+	case VIDC_1080P_H264_LEVEL3p2:
+		*level = VCD_LEVEL_H264_3p2;
+	break;
+	case VIDC_1080P_H264_LEVEL4:
+		*level = VCD_LEVEL_H264_4;
+	break;
+	default:
+		*level = VCD_LEVEL_UNKNOWN;
+	break;
+	}
+}
+
+static void ddl_get_h263_dec_level(enum vcd_codec_level *level,
+	u32 level_codec)
+{
+	switch (level_codec) {
+	case VIDC_1080P_H263_LEVEL10:
+		*level = VCD_LEVEL_H263_10;
+	break;
+	case VIDC_1080P_H263_LEVEL20:
+		*level = VCD_LEVEL_H263_20;
+	break;
+	case VIDC_1080P_H263_LEVEL30:
+		*level = VCD_LEVEL_H263_30;
+	break;
+	case VIDC_1080P_H263_LEVEL40:
+		*level = VCD_LEVEL_H263_40;
+	break;
+	case VIDC_1080P_H263_LEVEL45:
+		*level = VCD_LEVEL_H263_45;
+	break;
+	case VIDC_1080P_H263_LEVEL50:
+		*level = VCD_LEVEL_H263_50;
+	break;
+	case VIDC_1080P_H263_LEVEL60:
+		*level = VCD_LEVEL_H263_60;
+	break;
+	case VIDC_1080P_H263_LEVEL70:
+		*level = VCD_LEVEL_H263_70;
+	break;
+	default:
+		*level = VCD_LEVEL_UNKNOWN;
+	break;
+	}
+}
+
+static void ddl_get_vc1_dec_level(enum vcd_codec_level *level,
+	u32 level_codec, enum vcd_codec_profile vc1_profile)
+{
+	if (vc1_profile == VCD_PROFILE_VC1_ADVANCE) {
+		switch (level_codec) {
+		case VIDC_SM_LEVEL_VC1_ADV_0:
+			*level = VCD_LEVEL_VC1_A_0;
+		break;
+		case VIDC_SM_LEVEL_VC1_ADV_1:
+			*level = VCD_LEVEL_VC1_A_1;
+		break;
+		case VIDC_SM_LEVEL_VC1_ADV_2:
+			*level = VCD_LEVEL_VC1_A_2;
+		break;
+		case VIDC_SM_LEVEL_VC1_ADV_3:
+			*level = VCD_LEVEL_VC1_A_3;
+		break;
+		case VIDC_SM_LEVEL_VC1_ADV_4:
+			*level = VCD_LEVEL_VC1_A_4;
+		break;
+		default:
+			*level = VCD_LEVEL_UNKNOWN;
+		break;
+		}
+	} else if (vc1_profile == VCD_PROFILE_VC1_MAIN) {
+		switch (level_codec) {
+		case VIDC_SM_LEVEL_VC1_LOW:
+			*level = VCD_LEVEL_VC1_M_LOW;
+		break;
+		case VIDC_SM_LEVEL_VC1_MEDIUM:
+			*level = VCD_LEVEL_VC1_M_MEDIUM;
+		break;
+		case VIDC_SM_LEVEL_VC1_HIGH:
+			*level = VCD_LEVEL_VC1_M_HIGH;
+		break;
+		default:
+			*level = VCD_LEVEL_UNKNOWN;
+		break;
+		}
+	} else if (vc1_profile == VCD_PROFILE_VC1_SIMPLE) {
+		switch (level_codec) {
+		case VIDC_SM_LEVEL_VC1_LOW:
+			*level = VCD_LEVEL_VC1_S_LOW;
+		break;
+		case VIDC_SM_LEVEL_VC1_MEDIUM:
+			*level = VCD_LEVEL_VC1_S_MEDIUM;
+		break;
+		default:
+			*level = VCD_LEVEL_UNKNOWN;
+		break;
+		}
+	}
+}
+
+static void ddl_get_mpeg2_dec_level(enum vcd_codec_level *level,
+	u32 level_codec)
+{
+	switch (level_codec) {
+	case VIDC_SM_LEVEL_MPEG2_LOW:
+		*level = VCD_LEVEL_MPEG2_LOW;
+	break;
+	case VIDC_SM_LEVEL_MPEG2_MAIN:
+		*level = VCD_LEVEL_MPEG2_MAIN;
+	break;
+	case VIDC_SM_LEVEL_MPEG2_HIGH_1440:
+		*level = VCD_LEVEL_MPEG2_HIGH_14;
+	break;
+	case VIDC_SM_LEVEL_MPEG2_HIGH:
+		*level = VCD_LEVEL_MPEG2_HIGH;
+	break;
+	default:
+		*level = VCD_LEVEL_UNKNOWN;
+	break;
+	}
+}
+
+static void ddl_get_dec_profile_level(struct ddl_decoder_data *decoder,
+	u32 profile_codec, u32 level_codec)
+{
+	enum vcd_codec_profile profile = VCD_PROFILE_UNKNOWN;
+	enum vcd_codec_level level = VCD_LEVEL_UNKNOWN;
+
+	switch (decoder->codec.codec) {
+	case VCD_CODEC_MPEG4:
+	case VCD_CODEC_XVID:
+		if (profile_codec == VIDC_SM_PROFILE_MPEG4_SIMPLE)
+			profile = VCD_PROFILE_MPEG4_SP;
+		else if (profile_codec == VIDC_SM_PROFILE_MPEG4_ADV_SIMPLE)
+			profile = VCD_PROFILE_MPEG4_ASP;
+		else
+			profile = VCD_PROFILE_UNKNOWN;
+		ddl_get_mpeg4_dec_level(&level, level_codec, profile);
+	break;
+	case VCD_CODEC_H264:
+		if (profile_codec == VIDC_SM_PROFILE_H264_BASELINE)
+			profile = VCD_PROFILE_H264_BASELINE;
+		else if (profile_codec == VIDC_SM_PROFILE_H264_MAIN)
+			profile = VCD_PROFILE_H264_MAIN;
+		else if (profile_codec == VIDC_SM_PROFILE_H264_HIGH)
+			profile = VCD_PROFILE_H264_HIGH;
+		else
+			profile = VCD_PROFILE_UNKNOWN;
+		ddl_get_h264_dec_level(&level, level_codec);
+	break;
+	case VCD_CODEC_H263:
+		if (profile_codec == VIDC_SM_PROFILE_H263_BASELINE)
+			profile = VCD_PROFILE_H263_BASELINE;
+		else
+			profile = VCD_PROFILE_UNKNOWN;
+		ddl_get_h263_dec_level(&level, level_codec);
+	break;
+	case VCD_CODEC_MPEG2:
+		if (profile_codec == VIDC_SM_PROFILE_MPEG2_MAIN)
+			profile = VCD_PROFILE_MPEG2_MAIN;
+		else if (profile_codec == VIDC_SM_PROFILE_MPEG2_SIMPLE)
+			profile = VCD_PROFILE_MPEG2_SIMPLE;
+		else
+			profile = VCD_PROFILE_UNKNOWN;
+		ddl_get_mpeg2_dec_level(&level, level_codec);
+	break;
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		if (profile_codec == VIDC_SM_PROFILE_VC1_SIMPLE)
+			profile = VCD_PROFILE_VC1_SIMPLE;
+		else if (profile_codec == VIDC_SM_PROFILE_VC1_MAIN)
+			profile = VCD_PROFILE_VC1_MAIN;
+		else if (profile_codec == VIDC_SM_PROFILE_VC1_ADVANCED)
+			profile = VCD_PROFILE_VC1_ADVANCE;
+		else
+			profile = VCD_PROFILE_UNKNOWN;
+		ddl_get_vc1_dec_level(&level, level_codec, profile);
+	break;
+	default:
+		if (!profile_codec)
+			profile = VCD_PROFILE_UNKNOWN;
+		if (!level)
+			level = VCD_LEVEL_UNKNOWN;
+	break;
+	}
+	decoder->profile.profile = profile;
+	decoder->level.level = level;
+}
+
+static void ddl_handle_enc_frame_done(struct ddl_client_context *ddl)
+{
+	struct ddl_context       *ddl_context = ddl->ddl_context;
+	struct ddl_encoder_data  *encoder = &(ddl->codec_data.encoder);
+	struct vcd_frame_data    *output_frame = &(ddl->output_frame.vcd_frm);
+	u32 bottom_frame_tag;
+	u8  *input_buffer_address = NULL;
+
+	vidc_sm_get_frame_tags(&ddl->shared_mem[ddl->command_channel],
+		&output_frame->ip_frm_tag, &bottom_frame_tag);
+	output_frame->data_len = encoder->enc_frame_info.enc_frame_size;
+	output_frame->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+	(void)ddl_get_encoded_frame(output_frame,
+		encoder->codec.codec, encoder->enc_frame_info.enc_frame);
+	ddl_process_encoder_metadata(ddl);
+	ddl_vidc_encode_dynamic_property(ddl, false);
+	ddl->input_frame.frm_trans_end = false;
+	input_buffer_address = ddl_context->dram_base_a.align_physical_addr +
+			encoder->enc_frame_info.enc_luma_address;
+	ddl_get_input_frame_from_pool(ddl, input_buffer_address);
+
+	ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE,
+		VCD_S_SUCCESS, &(ddl->input_frame),
+		sizeof(struct ddl_frame_data_tag),
+		(u32 *) ddl, ddl->client_data);
+
+	ddl->output_frame.frm_trans_end =
+		DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)
+			? false : true;
+
+	ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+		VCD_S_SUCCESS, &(ddl->output_frame),
+		sizeof(struct ddl_frame_data_tag),
+		(u32 *) ddl, ddl->client_data);
+
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
new file mode 100644
index 0000000..3f54756
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
@@ -0,0 +1,505 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vcd_ddl.h"
+#include "vcd_ddl_shared_mem.h"
+#include "vcd_ddl_metadata.h"
+
+static u32 *ddl_metadata_hdr_entry(struct ddl_client_context *ddl,
+	u32 meta_data)
+{
+	u32 skip_words = 0;
+	u32 *buffer;
+
+	if (ddl->decoding) {
+		buffer = (u32 *) ddl->codec_data.decoder.meta_data_input.
+			align_virtual_addr;
+		skip_words = 32 + 1;
+		buffer += skip_words;
+		switch (meta_data) {
+		default:
+		case VCD_METADATA_DATANONE:
+			skip_words = 0;
+		break;
+		case VCD_METADATA_QPARRAY:
+			skip_words = 3;
+		break;
+		case VCD_METADATA_CONCEALMB:
+			skip_words = 6;
+		break;
+		case VCD_METADATA_VC1:
+			skip_words = 9;
+		break;
+		case VCD_METADATA_SEI:
+			skip_words = 12;
+		break;
+		case VCD_METADATA_VUI:
+			skip_words = 15;
+		break;
+		case VCD_METADATA_PASSTHROUGH:
+			skip_words = 18;
+		break;
+		case VCD_METADATA_QCOMFILLER:
+			skip_words = 21;
+		break;
+		}
+	} else {
+		buffer = (u32 *) ddl->codec_data.encoder.meta_data_input.
+				align_virtual_addr;
+		skip_words = 2;
+		buffer += skip_words;
+		switch (meta_data) {
+		default:
+		case VCD_METADATA_DATANONE:
+			skip_words = 0;
+		break;
+		case VCD_METADATA_ENC_SLICE:
+			skip_words = 3;
+		break;
+		case VCD_METADATA_QCOMFILLER:
+			skip_words = 6;
+		break;
+		}
+	}
+	buffer += skip_words;
+	return buffer;
+}
+
+void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl)
+{
+	struct ddl_buf_addr *main_buffer =
+		&ddl->ddl_context->metadata_shared_input;
+	struct ddl_buf_addr *client_buffer;
+	u32 *hdr_entry;
+
+	if (ddl->decoding)
+		client_buffer = &(ddl->codec_data.decoder.meta_data_input);
+	else
+		client_buffer = &(ddl->codec_data.encoder.meta_data_input);
+	DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer,
+		ddl->instance_id);
+	hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
+	hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+	hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+	hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QCOMFILLER;
+	hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_DATANONE);
+	hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+	hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+	hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_DATANONE;
+	if (ddl->decoding) {
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QPARRAY);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QPARRAY;
+		hdr_entry = ddl_metadata_hdr_entry(ddl,	VCD_METADATA_CONCEALMB);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_CONCEALMB;
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_SEI);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_SEI;
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VUI);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VUI;
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VC1);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VC1;
+		hdr_entry = ddl_metadata_hdr_entry(ddl,
+			VCD_METADATA_PASSTHROUGH);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
+			VCD_METADATA_PASSTHROUGH;
+	} else {
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_ENC_SLICE);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_ENC_SLICE;
+	}
+}
+
+static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl)
+{
+	u32 flag = 0;
+
+	if (ddl->decoding) {
+		enum vcd_codec codec =
+			ddl->codec_data.decoder.codec.codec;
+
+		flag |= (VCD_METADATA_CONCEALMB | VCD_METADATA_PASSTHROUGH |
+				VCD_METADATA_QPARRAY);
+		if (codec == VCD_CODEC_H264)
+			flag |= (VCD_METADATA_SEI | VCD_METADATA_VUI);
+		else if (codec == VCD_CODEC_VC1 ||
+			codec == VCD_CODEC_VC1_RCV)
+			flag |= VCD_METADATA_VC1;
+	} else
+		flag |= VCD_METADATA_ENC_SLICE;
+	return flag;
+}
+
+void ddl_set_default_metadata_flag(struct ddl_client_context *ddl)
+{
+	if (ddl->decoding)
+		ddl->codec_data.decoder.meta_data_enable_flag = 0;
+	else
+		ddl->codec_data.encoder.meta_data_enable_flag = 0;
+}
+
+void ddl_set_default_decoder_metadata_buffer_size(struct ddl_decoder_data
+	*decoder, struct vcd_property_frame_size *frame_size,
+	struct vcd_buffer_requirement *output_buf_req)
+{
+	u32 flag = decoder->meta_data_enable_flag;
+	u32 suffix = 0, size = 0;
+
+	if (!flag) {
+		decoder->suffix = 0;
+		return;
+	}
+	if (flag & VCD_METADATA_QPARRAY) {
+		u32 num_of_mb = DDL_NO_OF_MB(frame_size->width,
+			frame_size->height);
+
+		size = DDL_METADATA_HDR_SIZE;
+		size += num_of_mb;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += size;
+	}
+	if (flag & VCD_METADATA_CONCEALMB) {
+		u32 num_of_mb = DDL_NO_OF_MB(frame_size->width,
+			frame_size->height);
+		size = DDL_METADATA_HDR_SIZE + (num_of_mb >> 3);
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += size;
+	}
+	if (flag & VCD_METADATA_VC1) {
+		size = DDL_METADATA_HDR_SIZE;
+		size += DDL_METADATA_VC1_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += size;
+	}
+	if (flag & VCD_METADATA_SEI) {
+		size = DDL_METADATA_HDR_SIZE;
+		size += DDL_METADATA_SEI_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += (size * DDL_METADATA_SEI_MAX);
+	}
+	if (flag & VCD_METADATA_VUI) {
+		size = DDL_METADATA_HDR_SIZE;
+		size += DDL_METADATA_VUI_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += (size);
+	}
+	if (flag & VCD_METADATA_PASSTHROUGH) {
+		size = DDL_METADATA_HDR_SIZE;
+		size += DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += (size);
+	}
+	size = DDL_METADATA_EXTRADATANONE_SIZE;
+	DDL_METADATA_ALIGNSIZE(size);
+	suffix += (size);
+	suffix += DDL_METADATA_EXTRAPAD_SIZE;
+	DDL_METADATA_ALIGNSIZE(suffix);
+	decoder->suffix = suffix;
+	output_buf_req->sz += suffix;
+	DDL_MSG_LOW("metadata output buf size : %d", suffix);
+}
+
+void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data
+	*encoder)
+{
+	u32 flag = encoder->meta_data_enable_flag;
+	u32 suffix = 0, size = 0;
+
+	if (!flag) {
+		encoder->suffix = 0;
+		return;
+	}
+	if (flag & VCD_METADATA_ENC_SLICE) {
+		u32 num_of_mb = DDL_NO_OF_MB(encoder->frame_size.width,
+			encoder->frame_size.height);
+		size = DDL_METADATA_HDR_SIZE;
+		size += 4;
+		size += (num_of_mb << 3);
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += size;
+	}
+	size = DDL_METADATA_EXTRADATANONE_SIZE;
+	DDL_METADATA_ALIGNSIZE(size);
+	suffix += (size);
+	suffix += DDL_METADATA_EXTRAPAD_SIZE;
+	DDL_METADATA_ALIGNSIZE(suffix);
+	encoder->suffix = suffix;
+	encoder->output_buf_req.sz += suffix;
+}
+
+u32 ddl_set_metadata_params(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	u32  vcd_status = VCD_ERR_ILLEGAL_PARM;
+	if (property_hdr->prop_id == VCD_I_METADATA_ENABLE) {
+		struct vcd_property_meta_data_enable *meta_data_enable =
+			(struct vcd_property_meta_data_enable *) property_value;
+		u32 *meta_data_enable_flag;
+		enum vcd_codec codec;
+
+		if (ddl->decoding) {
+			meta_data_enable_flag =
+			&(ddl->codec_data.decoder.meta_data_enable_flag);
+			codec = ddl->codec_data.decoder.codec.codec;
+		} else {
+			meta_data_enable_flag =
+				&ddl->codec_data.encoder.meta_data_enable_flag;
+			codec = ddl->codec_data.encoder.codec.codec;
+		}
+		if (sizeof(struct vcd_property_meta_data_enable) ==
+			property_hdr->sz &&
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && codec) {
+			u32 flag = ddl_supported_metadata_flag(ddl);
+			flag &= (meta_data_enable->meta_data_enable_flag);
+			if (flag)
+				flag |= DDL_METADATA_MANDATORY;
+			if (*meta_data_enable_flag != flag) {
+				*meta_data_enable_flag = flag;
+				if (ddl->decoding)
+					ddl_set_default_decoder_buffer_req(
+						&ddl->codec_data.decoder, true);
+				else
+					ddl_set_default_encoder_buffer_req(
+						&ddl->codec_data.encoder);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER) {
+		struct vcd_property_metadata_hdr *hdr =
+			(struct vcd_property_metadata_hdr *) property_value;
+
+		if (sizeof(struct vcd_property_metadata_hdr) ==
+			property_hdr->sz) {
+			u32 flag = ddl_supported_metadata_flag(ddl);
+
+			flag |= DDL_METADATA_MANDATORY;
+			flag &= hdr->meta_data_id;
+			if (!(flag & (flag - 1))) {
+				u32 *hdr_entry = ddl_metadata_hdr_entry(ddl,
+					flag);
+				hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] =
+					hdr->version;
+				hdr_entry[DDL_METADATA_HDR_PORT_INDEX] =
+					hdr->port_index;
+				hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
+					hdr->type;
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+	}
+	return vcd_status;
+}
+
+u32 ddl_get_metadata_params(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	if (property_hdr->prop_id == VCD_I_METADATA_ENABLE &&
+		sizeof(struct vcd_property_meta_data_enable) ==
+		property_hdr->sz) {
+		struct vcd_property_meta_data_enable *meta_data_enable =
+			(struct vcd_property_meta_data_enable *) property_value;
+
+		meta_data_enable->meta_data_enable_flag =
+			((ddl->decoding) ?
+			(ddl->codec_data.decoder.meta_data_enable_flag) :
+			(ddl->codec_data.encoder.meta_data_enable_flag));
+		vcd_status = VCD_S_SUCCESS;
+	} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER &&
+		sizeof(struct vcd_property_metadata_hdr) ==
+		property_hdr->sz) {
+		struct vcd_property_metadata_hdr *hdr =
+			(struct vcd_property_metadata_hdr *) property_value;
+		u32 flag = ddl_supported_metadata_flag(ddl);
+
+		flag |= DDL_METADATA_MANDATORY;
+		flag &= hdr->meta_data_id;
+		if (!(flag & (flag - 1))) {
+			u32 *hdr_entry = ddl_metadata_hdr_entry(ddl, flag);
+			hdr->version =
+				hdr_entry[DDL_METADATA_HDR_VERSION_INDEX];
+			hdr->port_index =
+				hdr_entry[DDL_METADATA_HDR_PORT_INDEX];
+			hdr->type = hdr_entry[DDL_METADATA_HDR_TYPE_INDEX];
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	return vcd_status;
+}
+
+void ddl_vidc_metadata_enable(struct ddl_client_context *ddl)
+{
+	u32 flag, extradata_enable = false;
+	u32 qp_enable = false, concealed_mb_enable = false;
+	u32 vc1_param_enable = false, sei_nal_enable = false;
+	u32 vui_enable = false, enc_slice_size_enable = false;
+
+	if (ddl->decoding)
+		flag = ddl->codec_data.decoder.meta_data_enable_flag;
+	else
+		flag = ddl->codec_data.encoder.meta_data_enable_flag;
+	if (flag) {
+		if (flag & VCD_METADATA_QPARRAY)
+			qp_enable = true;
+		if (flag & VCD_METADATA_CONCEALMB)
+			concealed_mb_enable = true;
+		if (flag & VCD_METADATA_VC1)
+			vc1_param_enable = true;
+		if (flag & VCD_METADATA_SEI)
+			sei_nal_enable = true;
+		if (flag & VCD_METADATA_VUI)
+			vui_enable = true;
+		if (flag & VCD_METADATA_ENC_SLICE)
+			enc_slice_size_enable = true;
+		if (flag & VCD_METADATA_PASSTHROUGH)
+			extradata_enable = true;
+	}
+
+	DDL_MSG_LOW("metadata enable flag : %d", sei_nal_enable);
+	vidc_sm_set_metadata_enable(&ddl->shared_mem
+		[ddl->command_channel], extradata_enable, qp_enable,
+		concealed_mb_enable, vc1_param_enable, sei_nal_enable,
+		vui_enable, enc_slice_size_enable);
+}
+
+u32 ddl_vidc_encode_set_metadata_output_buf(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+	struct vcd_frame_data *stream = &ddl->output_frame.vcd_frm;
+	struct ddl_context *ddl_context;
+	u32 ext_buffer_end, hw_metadata_start;
+	u32 *buffer;
+
+	ddl_context = ddl_get_context();
+	ext_buffer_end = (u32) stream->physical + stream->alloc_len;
+	if (!encoder->meta_data_enable_flag) {
+		ext_buffer_end &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		return ext_buffer_end;
+	}
+	hw_metadata_start = (ext_buffer_end - encoder->suffix) &
+		~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	ext_buffer_end = (hw_metadata_start - 1) &
+		~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	buffer = (u32 *) encoder->meta_data_input.align_virtual_addr;
+	*buffer++ = encoder->suffix;
+	*buffer  = DDL_OFFSET(ddl_context->dram_base_a.align_physical_addr,
+		hw_metadata_start);
+	encoder->meta_data_offset = hw_metadata_start - (u32) stream->physical;
+	return ext_buffer_end;
+}
+
+void ddl_vidc_decode_set_metadata_output(struct ddl_decoder_data *decoder)
+{
+	struct ddl_context *ddl_context;
+	u32 loopc, yuv_size;
+	u32 *buffer;
+
+	if (!decoder->meta_data_enable_flag) {
+		decoder->meta_data_offset = 0;
+		return;
+	}
+	ddl_context = ddl_get_context();
+	yuv_size = ddl_get_yuv_buffer_size(&decoder->client_frame_size,
+		&decoder->buf_format, !decoder->progressive_only,
+		decoder->hdr.decoding, NULL);
+	decoder->meta_data_offset = DDL_ALIGN_SIZE(yuv_size,
+		DDL_LINEAR_BUF_ALIGN_GUARD_BYTES, DDL_LINEAR_BUF_ALIGN_MASK);
+	buffer = (u32 *) decoder->meta_data_input.align_virtual_addr;
+	*buffer++ = decoder->suffix;
+	DDL_MSG_LOW("Metadata offset & size : %d/%d",
+		decoder->meta_data_offset, decoder->suffix);
+	for (loopc = 0; loopc < decoder->dp_buf.no_of_dec_pic_buf;
+		++loopc) {
+		*buffer++ = (u32)(decoder->meta_data_offset + (u8 *)
+			DDL_OFFSET(ddl_context->dram_base_a.
+			align_physical_addr, decoder->dp_buf.
+			dec_pic_buffers[loopc].vcd_frm.physical));
+	}
+}
+
+void ddl_process_encoder_metadata(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	struct vcd_frame_data *out_frame =
+		&(ddl->output_frame.vcd_frm);
+	u32 *qfiller_hdr, *qfiller, start_addr;
+	u32 qfiller_size;
+	if (!encoder->meta_data_enable_flag) {
+		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	if (!encoder->enc_frame_info.meta_data_exists) {
+		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
+	start_addr = (u32) ((u8 *)out_frame->virtual + out_frame->offset);
+	qfiller = (u32 *)((out_frame->data_len +
+				start_addr + 3) & ~3);
+	qfiller_size = (u32)((encoder->meta_data_offset +
+		(u8 *) out_frame->virtual) - (u8 *) qfiller);
+	qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
+	*qfiller++ = qfiller_size;
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+	*qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE);
+}
+
+void ddl_process_decoder_metadata(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *output_frame =
+		&(ddl->output_frame.vcd_frm);
+	u32 *qfiller_hdr, *qfiller;
+	u32 qfiller_size;
+
+	if (!decoder->meta_data_enable_flag) {
+		output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	if (!decoder->meta_data_exists) {
+		output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	DDL_MSG_LOW("processing metadata for decoder");
+	DDL_MSG_LOW("data_len/metadata_offset : %d/%d",
+		output_frame->data_len, decoder->meta_data_offset);
+	output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
+	if (output_frame->data_len != decoder->meta_data_offset) {
+		qfiller = (u32 *)((u32)((output_frame->data_len +
+			output_frame->offset  +
+				(u8 *) output_frame->virtual) + 3) & ~3);
+		qfiller_size = (u32)((decoder->meta_data_offset +
+				(u8 *) output_frame->virtual) -
+				(u8 *) qfiller);
+		qfiller_hdr = ddl_metadata_hdr_entry(ddl,
+				VCD_METADATA_QCOMFILLER);
+		*qfiller++ = qfiller_size;
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+		*qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE);
+	}
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h
new file mode 100644
index 0000000..c63b6a9
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_METADATA_H_
+#define _VCD_DDL_METADATA_H_
+
+#define DDL_MAX_DEC_METADATATYPE          8
+#define DDL_MAX_ENC_METADATATYPE          3
+#define DDL_METADATA_EXTRAPAD_SIZE      256
+#define DDL_METADATA_HDR_SIZE            20
+#define DDL_METADATA_EXTRADATANONE_SIZE  24
+#define DDL_METADATA_ALIGNSIZE(x) ((x) = (((x) + 0x7) & ~0x7))
+#define DDL_METADATA_MANDATORY \
+	(VCD_METADATA_DATANONE | VCD_METADATA_QCOMFILLER)
+#define DDL_METADATA_VC1_PAYLOAD_SIZE         (38*4)
+#define DDL_METADATA_SEI_PAYLOAD_SIZE          100
+#define DDL_METADATA_SEI_MAX                     5
+#define DDL_METADATA_VUI_PAYLOAD_SIZE          256
+#define DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE   68
+#define DDL_METADATA_CLIENT_INPUTBUFSIZE       256
+#define DDL_METADATA_TOTAL_INPUTBUFSIZE \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * VCD_MAX_NO_CLIENT)
+
+#define DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer,\
+	channel_id) { \
+	(client_buffer)->align_physical_addr = (u8 *) \
+	((u8 *)(main_buffer)->align_physical_addr + \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * channel_id)); \
+	(client_buffer)->align_virtual_addr = (u8 *) \
+	((u8 *)(main_buffer)->align_virtual_addr + \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * channel_id)); \
+	(client_buffer)->virtual_base_addr = 0;	\
+	}
+
+#define DDL_METADATA_HDR_VERSION_INDEX 0
+#define DDL_METADATA_HDR_PORT_INDEX    1
+#define DDL_METADATA_HDR_TYPE_INDEX    2
+
+void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl);
+u32 ddl_get_metadata_params(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+u32 ddl_set_metadata_params(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+void ddl_set_default_metadata_flag(struct ddl_client_context *ddl);
+void ddl_set_default_decoder_metadata_buffer_size(struct ddl_decoder_data
+	*decoder, struct vcd_property_frame_size *frame_size,
+	struct vcd_buffer_requirement *output_buf_req);
+void ddl_set_default_encoder_metadata_buffer_size(
+	struct ddl_encoder_data *encoder);
+void ddl_vidc_metadata_enable(struct ddl_client_context *ddl);
+u32 ddl_vidc_encode_set_metadata_output_buf(struct ddl_client_context *ddl);
+void ddl_vidc_decode_set_metadata_output(struct ddl_decoder_data *decoder);
+void ddl_process_encoder_metadata(struct ddl_client_context *ddl);
+void ddl_process_decoder_metadata(struct ddl_client_context *ddl);
+
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
new file mode 100644
index 0000000..b3656cd
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -0,0 +1,1904 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vcd_ddl.h"
+#include "vcd_ddl_metadata.h"
+
+static u32 ddl_set_dec_property(struct ddl_client_context *pddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+static u32 ddl_set_enc_property(struct ddl_client_context *pddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+static u32 ddl_get_dec_property(struct ddl_client_context *pddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+static u32 ddl_get_enc_property(struct ddl_client_context *pddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+static u32 ddl_set_enc_dynamic_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+static void ddl_set_default_enc_property(struct ddl_client_context *ddl);
+static void ddl_set_default_enc_profile(
+	struct ddl_encoder_data *encoder);
+static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder);
+static void ddl_set_default_enc_vop_timing(
+	struct ddl_encoder_data *encoder);
+static void ddl_set_default_enc_intra_period(
+	struct ddl_encoder_data *encoder);
+static void ddl_set_default_enc_rc_params(
+	struct ddl_encoder_data *encoder);
+static u32 ddl_valid_buffer_requirement(
+	struct vcd_buffer_requirement *original_buf_req,
+	struct vcd_buffer_requirement *req_buf_req);
+static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder);
+static u32 ddl_set_dec_buffers(struct ddl_decoder_data *decoder,
+	struct ddl_property_dec_pic_buffers *dpb);
+
+u32 ddl_set_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	u32 vcd_status;
+
+	DDL_MSG_HIGH("ddl_set_property");
+	if (!property_hdr || !property_value) {
+		DDL_MSG_ERROR("ddl_set_prop:Bad_argument");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		DDL_MSG_ERROR("ddl_set_prop:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (!ddl) {
+		DDL_MSG_ERROR("ddl_set_prop:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (ddl->decoding)
+		vcd_status = ddl_set_dec_property(ddl, property_hdr,
+				property_value);
+	else
+		vcd_status = ddl_set_enc_property(ddl, property_hdr,
+				property_value);
+	if (vcd_status)
+		DDL_MSG_ERROR("ddl_set_prop:FAILED");
+	return vcd_status;
+}
+
+u32 ddl_get_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl =
+		(struct ddl_client_context *) ddl_handle;
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+
+	DDL_MSG_HIGH("ddl_get_property");
+	if (!property_hdr || !property_value)
+		return VCD_ERR_ILLEGAL_PARM;
+	if (property_hdr->prop_id == DDL_I_CAPABILITY) {
+		if (sizeof(struct ddl_property_capability) ==
+			property_hdr->sz) {
+			struct ddl_property_capability *ddl_capability =
+				(struct ddl_property_capability *)
+				property_value;
+
+			ddl_capability->max_num_client = VCD_MAX_NO_CLIENT;
+			ddl_capability->exclusive = VCD_COMMAND_EXCLUSIVE;
+			ddl_capability->frame_command_depth =
+				VCD_FRAME_COMMAND_DEPTH;
+			ddl_capability->general_command_depth =
+				VCD_GENEVIDC_COMMAND_DEPTH;
+			ddl_capability->ddl_time_out_in_ms =
+				DDL_HW_TIMEOUT_IN_MS;
+			vcd_status = VCD_S_SUCCESS;
+		}
+		return vcd_status;
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context))
+		return VCD_ERR_ILLEGAL_OP;
+	if (!ddl)
+		return VCD_ERR_BAD_HANDLE;
+	if (ddl->decoding)
+		vcd_status = ddl_get_dec_property(ddl, property_hdr,
+				property_value);
+	else
+		vcd_status = ddl_get_enc_property(ddl, property_hdr,
+				property_value);
+	if (vcd_status)
+		DDL_MSG_ERROR("ddl_get_prop:FAILED");
+	else
+		DDL_MSG_MED("ddl_get_prop:SUCCESS");
+	return vcd_status;
+}
+
+u32 ddl_decoder_ready_to_start(struct ddl_client_context *ddl,
+	struct vcd_sequence_hdr  *header)
+{
+	struct ddl_decoder_data *decoder =
+		&(ddl->codec_data.decoder);
+
+	if (!decoder->codec.codec) {
+		DDL_MSG_ERROR("ddl_dec_start_check:Codec_not_set");
+		return false;
+	}
+	if ((!header) && (!decoder->client_frame_size.height ||
+		!decoder->client_frame_size.width)) {
+		DDL_MSG_ERROR("ddl_dec_start_check:"
+			"Client_height_width_default");
+		return false;
+	}
+	return true;
+}
+
+u32 ddl_encoder_ready_to_start(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+
+	if (!encoder->codec.codec || !encoder->frame_size.height ||
+		!encoder->frame_size.width ||
+		!encoder->frame_rate.fps_denominator ||
+		!encoder->frame_rate.fps_numerator ||
+		!encoder->target_bit_rate.target_bitrate)
+		return false;
+	if (encoder->frame_rate.fps_numerator >
+		(encoder->frame_rate.fps_denominator *
+		encoder->vop_timing.vop_time_resolution)) {
+		DDL_MSG_ERROR("ResVsFrameRateFailed!");
+		return false;
+	}
+	if (encoder->profile.profile == VCD_PROFILE_H264_BASELINE &&
+		encoder->entropy_control.entropy_sel == VCD_ENTROPY_SEL_CABAC) {
+		DDL_MSG_ERROR("H264BaseLineCABAC!!");
+		return false;
+	}
+	return true;
+}
+
+static u32 ddl_set_dec_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	u32  vcd_status = VCD_ERR_ILLEGAL_PARM ;
+
+	switch (property_hdr->prop_id) {
+	case DDL_I_DPB_RELEASE:
+		if ((sizeof(struct ddl_frame_data_tag) ==
+			property_hdr->sz) &&
+			(decoder->dp_buf.no_of_dec_pic_buf))
+			vcd_status = ddl_decoder_dpb_transact(decoder,
+				(struct ddl_frame_data_tag *)
+				property_value, DDL_DPB_OP_MARK_FREE);
+	break;
+	case DDL_I_DPB:
+	{
+		struct ddl_property_dec_pic_buffers *dpb =
+		(struct ddl_property_dec_pic_buffers *) property_value;
+
+		if ((sizeof(struct ddl_property_dec_pic_buffers) ==
+			property_hdr->sz) &&
+			(DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) &&
+			(dpb->no_of_dec_pic_buf ==
+			decoder->client_output_buf_req.actual_count))
+			vcd_status = ddl_set_dec_buffers(decoder, dpb);
+	}
+	break;
+	case DDL_I_REQ_OUTPUT_FLUSH:
+		if (sizeof(u32) == property_hdr->sz) {
+			decoder->dynamic_prop_change |=
+				DDL_DEC_REQ_OUTPUT_FLUSH;
+			decoder->dpb_mask.client_mask = 0;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_INPUT_BUF_REQ:
+	{
+		struct vcd_buffer_requirement *buffer_req =
+			(struct vcd_buffer_requirement *)property_value;
+
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz &&
+			(DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
+			(ddl_valid_buffer_requirement(
+			&decoder->min_input_buf_req, buffer_req))) {
+			decoder->client_input_buf_req = *buffer_req;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case DDL_I_OUTPUT_BUF_REQ:
+	{
+		struct vcd_buffer_requirement *buffer_req =
+			(struct vcd_buffer_requirement *)property_value;
+
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz &&
+			(DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
+			(ddl_valid_buffer_requirement(
+			&decoder->min_output_buf_req, buffer_req))) {
+				decoder->client_output_buf_req =
+					*buffer_req;
+				vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_CODEC:
+	{
+		struct vcd_property_codec *codec =
+			(struct vcd_property_codec *)property_value;
+		if (sizeof(struct vcd_property_codec) ==
+			property_hdr->sz &&
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+			ddl_codec_type_transact(ddl, false,
+			codec->codec)) {
+			if (decoder->codec.codec != codec->codec) {
+				decoder->codec = *codec;
+				ddl_set_default_dec_property(ddl);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_POST_FILTER:
+		if (sizeof(struct vcd_property_post_filter) ==
+			property_hdr->sz &&
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && (
+			decoder->codec.codec == VCD_CODEC_MPEG4 ||
+			decoder->codec.codec == VCD_CODEC_MPEG2)) {
+			decoder->post_filter =
+				*(struct vcd_property_post_filter *)
+				property_value;
+			vcd_status = VCD_S_SUCCESS;
+	}
+	break;
+	case VCD_I_FRAME_SIZE:
+	{
+		struct vcd_property_frame_size *frame_size =
+		(struct vcd_property_frame_size *) property_value;
+		if ((sizeof(struct vcd_property_frame_size) ==
+			property_hdr->sz) &&
+			(DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
+			(DDL_ALLOW_DEC_FRAMESIZE(frame_size->width,
+			frame_size->height))) {
+			if (decoder->client_frame_size.height !=
+				frame_size->height ||
+				decoder->client_frame_size.width !=
+				frame_size->width) {
+				decoder->client_frame_size = *frame_size;
+				ddl_set_default_decoder_buffer_req(decoder,
+					true);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_BUFFER_FORMAT:
+	{
+		struct vcd_property_buffer_format *tile =
+			(struct vcd_property_buffer_format *)
+			property_value;
+		if (sizeof(struct vcd_property_buffer_format) ==
+			property_hdr->sz &&
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+			tile->buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) {
+			if (tile->buffer_format !=
+				decoder->buf_format.buffer_format) {
+				decoder->buf_format = *tile;
+				ddl_set_default_decoder_buffer_req(
+					decoder, true);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_H264_MV_BUFFER:
+	{
+		int index, buffer_size;
+		u8 *phys_addr;
+		u8 *virt_addr;
+		struct vcd_property_h264_mv_buffer *mv_buff =
+			(struct vcd_property_h264_mv_buffer *)
+		property_value;
+		DDL_MSG_LOW("Entered VCD_I_H264_MV_BUFFER Virt: %p, Phys %p,"
+					"fd: %d size: %d count: %d\n",
+					mv_buff->kernel_virtual_addr,
+					mv_buff->physical_addr,
+					mv_buff->pmem_fd,
+					mv_buff->size, mv_buff->count);
+		if ((property_hdr->sz == sizeof(struct
+			vcd_property_h264_mv_buffer)) &&
+			(DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN))) {
+			phys_addr = mv_buff->physical_addr;
+			virt_addr = mv_buff->kernel_virtual_addr;
+			buffer_size = mv_buff->size/mv_buff->count;
+
+			for (index = 0; index < mv_buff->count; index++) {
+				ddl->codec_data.decoder.hw_bufs.
+					h264_mv[index].align_physical_addr
+					= phys_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					h264_mv[index].align_virtual_addr
+					= virt_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					h264_mv[index].buffer_size
+					= buffer_size;
+				ddl->codec_data.decoder.hw_bufs.
+					h264_mv[index].physical_base_addr
+					= phys_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					h264_mv[index].virtual_base_addr
+					= virt_addr;
+				DDL_MSG_LOW("Assigned %d buffer for "
+							"virt: %p, phys %p for "
+							"h264_mv_buffers "
+							"of size: %d\n",
+							index, virt_addr,
+							phys_addr, buffer_size);
+				phys_addr += buffer_size;
+				virt_addr += buffer_size;
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_FREE_H264_MV_BUFFER:
+		{
+			memset(&decoder->hw_bufs.h264_mv, 0, sizeof(struct
+					ddl_buf_addr) * DDL_MAX_BUFFER_COUNT);
+			vcd_status = VCD_S_SUCCESS;
+		}
+		break;
+	case VCD_I_OUTPUT_ORDER:
+		{
+			if (sizeof(u32) == property_hdr->sz &&
+				DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+					decoder->output_order =
+						*(u32 *)property_value;
+					vcd_status = VCD_S_SUCCESS;
+			}
+		}
+		break;
+	case VCD_I_DEC_PICTYPE:
+		{
+			if ((sizeof(u32) == property_hdr->sz) &&
+				DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+				decoder->idr_only_decoding =
+					*(u32 *)property_value;
+				ddl_set_default_decoder_buffer_req(
+						decoder, true);
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+		break;
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		DDL_MSG_MED("Meta Data Interface is Requested");
+		vcd_status = ddl_set_metadata_params(ddl, property_hdr,
+			property_value);
+		vcd_status = VCD_S_SUCCESS;
+		break;
+	case VCD_I_FRAME_RATE:
+		vcd_status = VCD_S_SUCCESS;
+		break;
+	case VCD_I_CONT_ON_RECONFIG:
+	{
+		DDL_MSG_LOW("Set property VCD_I_CONT_ON_RECONFIG\n");
+		if (sizeof(u32) == property_hdr->sz &&
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+				decoder->cont_mode = *(u32 *)property_value;
+				vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	default:
+		vcd_status = VCD_ERR_ILLEGAL_OP;
+		break;
+	}
+	return vcd_status;
+}
+
+static u32 ddl_check_valid_enc_level(struct vcd_property_codec *codec,
+	struct vcd_property_profile *profile,
+	struct vcd_property_level *level)
+{
+	u32 status = false;
+
+	if (codec && profile && level) {
+		switch (codec->codec) {
+		case VCD_CODEC_MPEG4:
+			status = (profile->profile ==
+				VCD_PROFILE_MPEG4_SP) &&
+				(level->level >= VCD_LEVEL_MPEG4_0) &&
+				(level->level <= VCD_LEVEL_MPEG4_6) &&
+				(VCD_LEVEL_MPEG4_3b != level->level);
+			status = status ||
+				((profile->profile ==
+				VCD_PROFILE_MPEG4_ASP) &&
+				(level->level >= VCD_LEVEL_MPEG4_0) &&
+				(level->level <= VCD_LEVEL_MPEG4_5));
+		break;
+		case VCD_CODEC_H264:
+		status = (level->level >= VCD_LEVEL_H264_1) &&
+				(level->level <= VCD_LEVEL_H264_4);
+		break;
+		case VCD_CODEC_H263:
+		status = (level->level >= VCD_LEVEL_H263_10) &&
+			(level->level <= VCD_LEVEL_H263_70);
+		break;
+		default:
+		break;
+		}
+	}
+	return status;
+}
+
+static u32 ddl_set_enc_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr,
+	void *property_value)
+{
+	struct ddl_encoder_data *encoder =
+		&(ddl->codec_data.encoder);
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+
+	if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) ||
+		DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE) ||
+		DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+		vcd_status = ddl_set_enc_dynamic_property(ddl,
+				property_hdr, property_value);
+	}
+	if (vcd_status) {
+		if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) ||
+			vcd_status != VCD_ERR_ILLEGAL_OP) {
+			DDL_MSG_ERROR("ddl_set_enc_property:"
+				"Fails_as_not_in_open_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	} else
+		return vcd_status;
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_FRAME_SIZE:
+	{
+		struct vcd_property_frame_size *frame_size =
+		(struct vcd_property_frame_size *) property_value;
+		if ((sizeof(struct vcd_property_frame_size) ==
+			property_hdr->sz) &&
+			(DDL_ALLOW_ENC_FRAMESIZE(frame_size->width,
+			frame_size->height))) {
+			if (encoder->frame_size.height != frame_size->height ||
+				encoder->frame_size.width !=
+				frame_size->width) {
+				ddl_calculate_stride(frame_size, false);
+				encoder->frame_size = *frame_size;
+				ddl_set_default_encoder_buffer_req(encoder);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_CODEC:
+	{
+		struct vcd_property_codec *codec =
+		(struct vcd_property_codec *) property_value;
+		if ((sizeof(struct vcd_property_codec) ==
+		property_hdr->sz) &&
+		(ddl_codec_type_transact(ddl, false, codec->codec))) {
+			if (codec->codec != encoder->codec.codec) {
+				encoder->codec = *codec;
+				ddl_set_default_enc_property(ddl);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_REQ_IFRAME:
+		vcd_status = VCD_S_SUCCESS;
+		break;
+	case VCD_I_INTRA_PERIOD:
+	{
+		struct vcd_property_i_period *i_period =
+			(struct vcd_property_i_period *)property_value;
+		if (sizeof(struct vcd_property_i_period) ==
+			property_hdr->sz &&
+			i_period->b_frames <= DDL_MAX_NUM_OF_B_FRAME) {
+			encoder->i_period = *i_period;
+			encoder->client_input_buf_req.min_count =
+				i_period->b_frames + 1;
+			encoder->client_input_buf_req.actual_count =
+				DDL_MAX(encoder->client_input_buf_req.\
+				actual_count, encoder->\
+				client_input_buf_req.min_count);
+			encoder->client_output_buf_req.min_count =
+				i_period->b_frames + 2;
+			encoder->client_output_buf_req.actual_count =
+				DDL_MAX(encoder->client_output_buf_req.\
+				actual_count, encoder->\
+				client_output_buf_req.min_count);
+			ddl->extra_output_buf_count =
+				i_period->b_frames - 1;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_PROFILE:
+	{
+		struct vcd_property_profile *profile =
+			(struct vcd_property_profile *)property_value;
+
+		if ((sizeof(struct vcd_property_profile) ==
+			property_hdr->sz) && ((
+			(encoder->codec.codec == VCD_CODEC_MPEG4) && (
+			profile->profile == VCD_PROFILE_MPEG4_SP ||
+			profile->profile == VCD_PROFILE_MPEG4_ASP)) ||
+			((encoder->codec.codec == VCD_CODEC_H264) &&
+			(profile->profile >= VCD_PROFILE_H264_BASELINE) &&
+			(profile->profile <= VCD_PROFILE_H264_HIGH)) ||
+			((encoder->codec.codec == VCD_CODEC_H263) &&
+			(profile->profile == VCD_PROFILE_H263_BASELINE)))) {
+			encoder->profile = *profile;
+			vcd_status = VCD_S_SUCCESS;
+			if (profile->profile == VCD_PROFILE_H264_BASELINE)
+				encoder->entropy_control.entropy_sel =
+					VCD_ENTROPY_SEL_CAVLC;
+			else
+				encoder->entropy_control.entropy_sel =
+					VCD_ENTROPY_SEL_CABAC;
+		}
+	}
+	break;
+	case VCD_I_LEVEL:
+	{
+		struct vcd_property_level *level =
+			(struct vcd_property_level *) property_value;
+
+		if ((sizeof(struct vcd_property_level) ==
+			property_hdr->sz) && (ddl_check_valid_enc_level
+			(&encoder->codec,
+			&encoder->profile, level))) {
+			encoder->level = *level;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_MULTI_SLICE:
+	{
+		struct vcd_property_multi_slice *multi_slice =
+			(struct vcd_property_multi_slice *)
+				property_value;
+
+		switch (multi_slice->m_slice_sel) {
+		case VCD_MSLICE_OFF:
+			vcd_status = VCD_S_SUCCESS;
+		break;
+		case VCD_MSLICE_BY_GOB:
+			if (encoder->codec.codec == VCD_CODEC_H263)
+				vcd_status = VCD_S_SUCCESS;
+		break;
+		case VCD_MSLICE_BY_MB_COUNT:
+			if (multi_slice->m_slice_size >= 1 &&
+				(multi_slice->m_slice_size <=
+				DDL_NO_OF_MB(encoder->frame_size.width,
+				encoder->frame_size.height)))
+				vcd_status = VCD_S_SUCCESS;
+		break;
+		case VCD_MSLICE_BY_BYTE_COUNT:
+			if (multi_slice->m_slice_size > 0)
+				vcd_status = VCD_S_SUCCESS;
+		break;
+		default:
+		break;
+		}
+		if (sizeof(struct vcd_property_multi_slice) ==
+			property_hdr->sz && !vcd_status) {
+			encoder->multi_slice = *multi_slice;
+			if (multi_slice->m_slice_sel == VCD_MSLICE_OFF)
+				encoder->multi_slice.m_slice_size = 0;
+		}
+	}
+	break;
+	case VCD_I_RATE_CONTROL:
+	{
+		struct vcd_property_rate_control *rate_control =
+			(struct vcd_property_rate_control *)
+			property_value;
+		if (sizeof(struct vcd_property_rate_control) ==
+			property_hdr->sz &&
+			rate_control->rate_control >=
+			VCD_RATE_CONTROL_OFF &&
+			rate_control->rate_control <=
+			VCD_RATE_CONTROL_CBR_CFR) {
+			encoder->rc = *rate_control;
+			ddl_set_default_enc_rc_params(encoder);
+			vcd_status = VCD_S_SUCCESS;
+		}
+
+	}
+	break;
+	case VCD_I_SHORT_HEADER:
+		if (sizeof(struct vcd_property_short_header) ==
+			property_hdr->sz &&
+			encoder->codec.codec ==
+			VCD_CODEC_MPEG4) {
+			encoder->short_header =
+			*(struct vcd_property_short_header *)
+				property_value;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_VOP_TIMING:
+	{
+		struct vcd_property_vop_timing *vop_time =
+			(struct vcd_property_vop_timing *)
+				property_value;
+
+		if ((sizeof(struct vcd_property_vop_timing) ==
+			property_hdr->sz) &&
+			(encoder->frame_rate.fps_numerator <=
+			vop_time->vop_time_resolution) &&
+			(encoder->codec.codec == VCD_CODEC_MPEG4)) {
+			encoder->vop_timing = *vop_time;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_HEADER_EXTENSION:
+		if (sizeof(u32) == property_hdr->sz &&
+			encoder->codec.codec == VCD_CODEC_MPEG4) {
+			encoder->hdr_ext_control = *(u32 *)property_value;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_ENTROPY_CTRL:
+	{
+		struct vcd_property_entropy_control *entropy_control =
+			(struct vcd_property_entropy_control *)
+			property_value;
+		if (sizeof(struct vcd_property_entropy_control) ==
+			property_hdr->sz &&
+			encoder->codec.codec == VCD_CODEC_H264 &&
+			entropy_control->entropy_sel >=
+			VCD_ENTROPY_SEL_CAVLC &&
+			entropy_control->entropy_sel <=
+			VCD_ENTROPY_SEL_CABAC) {
+			if ((entropy_control->entropy_sel ==
+			     VCD_ENTROPY_SEL_CABAC) &&
+			     (encoder->entropy_control.cabac_model ==
+			     VCD_CABAC_MODEL_NUMBER_1 ||
+			     encoder->entropy_control.cabac_model ==
+			     VCD_CABAC_MODEL_NUMBER_2)) {
+				vcd_status = VCD_ERR_ILLEGAL_PARM;
+			} else {
+				encoder->entropy_control = *entropy_control;
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+	}
+	break;
+	case VCD_I_DEBLOCKING:
+	{
+		struct vcd_property_db_config *db_config =
+			(struct vcd_property_db_config *) property_value;
+		if (sizeof(struct vcd_property_db_config) ==
+			property_hdr->sz  &&
+			encoder->codec.codec == VCD_CODEC_H264 &&
+			db_config->db_config >=
+			VCD_DB_ALL_BLOCKING_BOUNDARY &&
+			db_config->db_config <=
+			VCD_DB_SKIP_SLICE_BOUNDARY) {
+			encoder->db_control = *db_config;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_QP_RANGE:
+	{
+		struct vcd_property_qp_range *qp =
+			(struct vcd_property_qp_range *)property_value;
+
+		if ((sizeof(struct vcd_property_qp_range) ==
+			property_hdr->sz) && (qp->min_qp <=
+			qp->max_qp) && ((encoder->codec.codec ==
+			VCD_CODEC_H264 && qp->max_qp <= DDL_MAX_H264_QP) ||
+			(qp->max_qp <= DDL_MAX_MPEG4_QP))) {
+			encoder->qp_range = *qp;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_SESSION_QP:
+	{
+		struct vcd_property_session_qp *qp =
+			(struct vcd_property_session_qp *)property_value;
+		if ((sizeof(struct vcd_property_session_qp) ==
+			property_hdr->sz) &&
+			(qp->i_frame_qp >= encoder->qp_range.min_qp) &&
+			(qp->i_frame_qp <= encoder->qp_range.max_qp) &&
+			(qp->p_frame_qp >= encoder->qp_range.min_qp) &&
+			(qp->p_frame_qp <= encoder->qp_range.max_qp)) {
+			encoder->session_qp = *qp;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_RC_LEVEL_CONFIG:
+	{
+		struct vcd_property_rc_level *rc_level =
+			(struct vcd_property_rc_level *) property_value;
+		if (sizeof(struct vcd_property_rc_level) ==
+			property_hdr->sz &&
+			(encoder->rc.rate_control >=
+			VCD_RATE_CONTROL_VBR_VFR ||
+			encoder->rc.rate_control <=
+			VCD_RATE_CONTROL_CBR_VFR) &&
+			(!rc_level->mb_level_rc ||
+			encoder->codec.codec == VCD_CODEC_H264)) {
+			encoder->rc_level = *rc_level;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_FRAME_LEVEL_RC:
+	{
+		struct vcd_property_frame_level_rc_params
+			*frame_level_rc =
+			(struct vcd_property_frame_level_rc_params *)
+			property_value;
+		if ((sizeof(struct vcd_property_frame_level_rc_params) ==
+			property_hdr->sz) &&
+			(frame_level_rc->reaction_coeff) &&
+			(encoder->rc_level.frame_level_rc)) {
+			encoder->frame_level_rc = *frame_level_rc;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_ADAPTIVE_RC:
+		if ((sizeof(struct vcd_property_adaptive_rc_params) ==
+			property_hdr->sz) &&
+			(encoder->codec.codec == VCD_CODEC_H264) &&
+			(encoder->rc_level.mb_level_rc)) {
+			encoder->adaptive_rc =
+				*(struct vcd_property_adaptive_rc_params *)
+				property_value;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_BUFFER_FORMAT:
+	{
+		struct vcd_property_buffer_format *buffer_format =
+			(struct vcd_property_buffer_format *)
+			property_value;
+
+		if (sizeof(struct vcd_property_buffer_format) ==
+			property_hdr->sz &&
+			((buffer_format->buffer_format ==
+			VCD_BUFFER_FORMAT_NV12_16M2KA) ||
+			(VCD_BUFFER_FORMAT_TILE_4x2 ==
+			buffer_format->buffer_format))) {
+			if (buffer_format->buffer_format !=
+				encoder->buf_format.buffer_format) {
+				encoder->buf_format = *buffer_format;
+				ddl_set_default_encoder_buffer_req(encoder);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case DDL_I_INPUT_BUF_REQ:
+	{
+		struct vcd_buffer_requirement *buffer_req =
+			(struct vcd_buffer_requirement *)property_value;
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz && (ddl_valid_buffer_requirement(
+			&encoder->input_buf_req, buffer_req))) {
+			encoder->client_input_buf_req = *buffer_req;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case DDL_I_OUTPUT_BUF_REQ:
+	{
+		struct vcd_buffer_requirement *buffer_req =
+			(struct vcd_buffer_requirement *)property_value;
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz && (ddl_valid_buffer_requirement(
+			&encoder->output_buf_req, buffer_req))) {
+			encoder->client_output_buf_req = *buffer_req;
+			encoder->client_output_buf_req.sz =
+				DDL_ALIGN(buffer_req->sz,
+				DDL_KILO_BYTE(4));
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_RECON_BUFFERS:
+	{
+		int index;
+		struct vcd_property_enc_recon_buffer *recon_buffers =
+			(struct vcd_property_enc_recon_buffer *)property_value;
+		for (index = 0; index < 4; index++) {
+			if (!encoder->hw_bufs.dpb_y[index].align_physical_addr)
+				break;
+			else
+				continue;
+			}
+		if (property_hdr->sz == sizeof(struct
+			vcd_property_enc_recon_buffer)) {
+			encoder->hw_bufs.dpb_y[index].align_physical_addr =
+				recon_buffers->physical_addr;
+			encoder->hw_bufs.dpb_y[index].align_virtual_addr =
+				recon_buffers->kernel_virtual_addr;
+			encoder->hw_bufs.dpb_y[index].buffer_size =
+				recon_buffers->buffer_size;
+			encoder->hw_bufs.dpb_c[index].align_physical_addr =
+			recon_buffers->physical_addr + ddl_get_yuv_buf_size(
+				encoder->frame_size.width, encoder->frame_size.
+				height, DDL_YUV_BUF_TYPE_TILE);
+			encoder->hw_bufs.dpb_c[index].align_virtual_addr =
+				recon_buffers->kernel_virtual_addr +
+				recon_buffers->ysize;
+			DDL_MSG_LOW("Y::KVirt: %p,KPhys: %p"
+						"UV::KVirt: %p,KPhys: %p\n",
+			encoder->hw_bufs.dpb_y[index].align_virtual_addr,
+			encoder->hw_bufs.dpb_y[index].align_physical_addr,
+			encoder->hw_bufs.dpb_c[index].align_virtual_addr,
+			encoder->hw_bufs.dpb_c[index].align_physical_addr);
+			vcd_status = VCD_S_SUCCESS;
+			}
+	}
+	break;
+	case VCD_I_FREE_RECON_BUFFERS:
+	{
+		memset(&encoder->hw_bufs.dpb_y, 0,
+			sizeof(struct ddl_buf_addr) * 4);
+		memset(&encoder->hw_bufs.dpb_c, 0,
+			sizeof(struct ddl_buf_addr) * 4);
+		vcd_status = VCD_S_SUCCESS;
+		break;
+	}
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		DDL_MSG_ERROR("Meta Data Interface is Requested");
+		vcd_status = ddl_set_metadata_params(ddl, property_hdr,
+			property_value);
+		vcd_status = VCD_S_SUCCESS;
+	break;
+	default:
+		DDL_MSG_ERROR("INVALID ID %d\n", (int)property_hdr->prop_id);
+		vcd_status = VCD_ERR_ILLEGAL_OP;
+	break;
+	}
+	return vcd_status;
+}
+
+static u32 ddl_get_dec_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct vcd_property_frame_size *fz_size;
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	DDL_MSG_HIGH("property_hdr->prop_id:%x\n", property_hdr->prop_id);
+	switch (property_hdr->prop_id) {
+	case VCD_I_FRAME_SIZE:
+		if (sizeof(struct vcd_property_frame_size) ==
+			property_hdr->sz) {
+			ddl_calculate_stride(&decoder->client_frame_size,
+				!decoder->progressive_only);
+			fz_size =
+			&decoder->client_frame_size;
+			fz_size->stride =
+			DDL_TILE_ALIGN(fz_size->width,
+				DDL_TILE_ALIGN_WIDTH);
+			fz_size->scan_lines =
+			DDL_TILE_ALIGN(fz_size->height,
+				DDL_TILE_ALIGN_HEIGHT);
+			*(struct vcd_property_frame_size *)
+				property_value =
+					decoder->client_frame_size;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_PROFILE:
+		if (sizeof(struct vcd_property_profile) ==
+			property_hdr->sz) {
+			*(struct vcd_property_profile *)property_value =
+				decoder->profile;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LEVEL:
+		if (sizeof(struct vcd_property_level) ==
+			property_hdr->sz) {
+			*(struct vcd_property_level *)property_value =
+				decoder->level;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_PROGRESSIVE_ONLY:
+		if (sizeof(u32) == property_hdr->sz) {
+			*(u32 *)property_value =
+				decoder->progressive_only;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_INPUT_BUF_REQ:
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz) {
+			*(struct vcd_buffer_requirement *)
+				property_value =
+					decoder->client_input_buf_req;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_OUTPUT_BUF_REQ:
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+			property_value = decoder->client_output_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_CODEC:
+	if (sizeof(struct vcd_property_codec) ==
+		property_hdr->sz) {
+			*(struct vcd_property_codec *) property_value =
+				decoder->codec;
+			vcd_status = VCD_S_SUCCESS;
+	}
+	break;
+	case VCD_I_BUFFER_FORMAT:
+		if (sizeof(struct vcd_property_buffer_format) ==
+			property_hdr->sz) {
+			*(struct vcd_property_buffer_format *)
+				property_value = decoder->buf_format;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_POST_FILTER:
+		if (sizeof(struct vcd_property_post_filter) ==
+			property_hdr->sz) {
+			*(struct vcd_property_post_filter *)
+				property_value =
+					decoder->post_filter;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_SEQHDR_ALIGN_BYTES:
+		if (sizeof(u32) == property_hdr->sz) {
+			*(u32 *)property_value =
+				DDL_LINEAR_BUFFER_ALIGN_BYTES;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_FRAME_PROC_UNITS:
+		if (sizeof(u32) == property_hdr->sz) {
+			if (!decoder->progressive_only &&
+				(decoder->client_frame_size.width *
+				 decoder->client_frame_size.height) <=
+				DDL_FRAME_VGA_SIZE) {
+				*(u32 *) property_value = DDL_NO_OF_MB(
+					DDL_FRAME_720P_WIDTH,
+					DDL_FRAME_720P_HEIGHT);
+			} else {
+				*(u32 *) property_value = DDL_NO_OF_MB(
+					decoder->client_frame_size.width,
+					decoder->client_frame_size.height);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_DPB_RETRIEVE:
+		if (sizeof(struct ddl_frame_data_tag) ==
+			property_hdr->sz) {
+			vcd_status = ddl_decoder_dpb_transact(decoder,
+				(struct ddl_frame_data_tag *)
+				property_value, DDL_DPB_OP_RETRIEVE);
+		}
+	break;
+	case VCD_I_GET_H264_MV_SIZE:
+		if (property_hdr->sz == sizeof(struct
+			vcd_property_buffer_size)) {
+			struct vcd_property_buffer_size *mv_size =
+			(struct vcd_property_buffer_size *) property_value;
+			mv_size->size = ddl_get_yuv_buf_size(mv_size->width,
+				mv_size->height, DDL_YUV_BUF_TYPE_TILE);
+			mv_size->alignment = DDL_TILE_BUFFER_ALIGN_BYTES;
+			DDL_MSG_LOW("w: %d, h: %d, S: %d, "
+						"A: %d", mv_size->width,
+						mv_size->height, mv_size->size,
+						mv_size->alignment);
+			vcd_status = VCD_S_SUCCESS;
+		}
+		break;
+	case VCD_I_OUTPUT_ORDER:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				*(u32 *)property_value = decoder->output_order;
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+		break;
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		DDL_MSG_ERROR("Meta Data Interface is Requested");
+		vcd_status = ddl_get_metadata_params(ddl, property_hdr,
+			property_value);
+		vcd_status = VCD_S_SUCCESS;
+	break;
+	case VCD_I_CONT_ON_RECONFIG:
+		if (sizeof(u32) == property_hdr->sz) {
+			*(u32 *)property_value = decoder->cont_mode;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	default:
+		vcd_status = VCD_ERR_ILLEGAL_OP;
+	break;
+	}
+	return vcd_status;
+}
+
+static u32 ddl_get_enc_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_CODEC:
+		if (sizeof(struct vcd_property_codec) ==
+			property_hdr->sz) {
+			*(struct vcd_property_codec *) property_value =
+				encoder->codec;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_FRAME_SIZE:
+		if (sizeof(struct vcd_property_frame_size) ==
+			property_hdr->sz) {
+			*(struct vcd_property_frame_size *)
+				property_value = encoder->frame_size;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_FRAME_RATE:
+		if (sizeof(struct vcd_property_frame_rate) ==
+			property_hdr->sz) {
+			*(struct vcd_property_frame_rate *)
+				property_value = encoder->frame_rate;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_TARGET_BITRATE:
+		if (sizeof(struct vcd_property_target_bitrate) ==
+			property_hdr->sz) {
+			*(struct vcd_property_target_bitrate *)
+				property_value = encoder->target_bit_rate;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_RATE_CONTROL:
+		if (sizeof(struct vcd_property_rate_control) ==
+			property_hdr->sz) {
+			*(struct vcd_property_rate_control *)
+				property_value = encoder->rc;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_PROFILE:
+		if (sizeof(struct vcd_property_profile) ==
+			property_hdr->sz) {
+			*(struct vcd_property_profile *) property_value =
+				encoder->profile;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LEVEL:
+		if (sizeof(struct vcd_property_level) ==
+			property_hdr->sz) {
+			*(struct vcd_property_level *) property_value =
+				encoder->level;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_MULTI_SLICE:
+		if (sizeof(struct vcd_property_multi_slice) ==
+			property_hdr->sz) {
+			*(struct vcd_property_multi_slice *)
+				property_value = encoder->multi_slice;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_SEQ_HEADER:
+	{
+		struct vcd_sequence_hdr *seq_hdr =
+			(struct vcd_sequence_hdr *) property_value;
+
+		if (!encoder->seq_header_length) {
+			seq_hdr->sequence_header_len =
+				encoder->seq_header_length;
+			vcd_status = VCD_ERR_NO_SEQ_HDR;
+		} else if (sizeof(struct vcd_sequence_hdr) ==
+			property_hdr->sz &&
+			encoder->seq_header_length <=
+			seq_hdr->sequence_header_len) {
+			memcpy(seq_hdr->sequence_header,
+				encoder->seq_header.align_virtual_addr,
+				encoder->seq_header_length);
+			seq_hdr->sequence_header_len =
+				encoder->seq_header_length;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case DDL_I_SEQHDR_PRESENT:
+		if (sizeof(u32) == property_hdr->sz) {
+			if ((encoder->codec.codec ==
+				VCD_CODEC_MPEG4 &&
+				!encoder->short_header.short_header) ||
+				encoder->codec.codec == VCD_CODEC_H264)
+				*(u32 *) property_value = 0x1;
+			else
+				*(u32 *) property_value = 0x0;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_VOP_TIMING:
+		if (sizeof(struct vcd_property_vop_timing) ==
+			property_hdr->sz) {
+			*(struct vcd_property_vop_timing *)
+				property_value = encoder->vop_timing;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_SHORT_HEADER:
+		if (sizeof(struct vcd_property_short_header) ==
+			property_hdr->sz) {
+			if (encoder->codec.codec == VCD_CODEC_MPEG4) {
+				*(struct vcd_property_short_header *)
+					property_value =
+						encoder->short_header;
+				vcd_status = VCD_S_SUCCESS;
+			} else
+				vcd_status = VCD_ERR_ILLEGAL_OP;
+		}
+	break;
+	case VCD_I_ENTROPY_CTRL:
+		if (sizeof(struct vcd_property_entropy_control) ==
+			property_hdr->sz) {
+			if (encoder->codec.codec == VCD_CODEC_H264) {
+				*(struct vcd_property_entropy_control *)
+					property_value =
+						encoder->entropy_control;
+				vcd_status = VCD_S_SUCCESS;
+			} else
+				vcd_status = VCD_ERR_ILLEGAL_OP;
+		}
+	break;
+	case VCD_I_DEBLOCKING:
+		if (sizeof(struct vcd_property_db_config) ==
+			property_hdr->sz) {
+			if (encoder->codec.codec == VCD_CODEC_H264) {
+				*(struct vcd_property_db_config *)
+					property_value =
+						encoder->db_control;
+				vcd_status = VCD_S_SUCCESS;
+			} else
+				vcd_status = VCD_ERR_ILLEGAL_OP;
+		}
+	break;
+	case VCD_I_INTRA_PERIOD:
+		if (sizeof(struct vcd_property_i_period) ==
+			property_hdr->sz) {
+			*(struct vcd_property_i_period *)
+				property_value = encoder->i_period;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_QP_RANGE:
+		if (sizeof(struct vcd_property_qp_range) ==
+			property_hdr->sz) {
+			*(struct vcd_property_qp_range *)
+				property_value = encoder->qp_range;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_SESSION_QP:
+		if (sizeof(struct vcd_property_session_qp) ==
+			property_hdr->sz) {
+			*(struct vcd_property_session_qp *)
+				property_value = encoder->session_qp;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_RC_LEVEL_CONFIG:
+		if (sizeof(struct vcd_property_rc_level) ==
+			property_hdr->sz) {
+			*(struct vcd_property_rc_level *)
+				property_value = encoder->rc_level;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_FRAME_LEVEL_RC:
+		if (sizeof(struct vcd_property_frame_level_rc_params) ==
+			property_hdr->sz) {
+			*(struct vcd_property_frame_level_rc_params *)
+			property_value = encoder->frame_level_rc;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_ADAPTIVE_RC:
+		if (sizeof(struct vcd_property_adaptive_rc_params) ==
+			property_hdr->sz) {
+			*(struct vcd_property_adaptive_rc_params *)
+				property_value = encoder->adaptive_rc;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_INTRA_REFRESH:
+		if (sizeof(struct vcd_property_intra_refresh_mb_number) ==
+			property_hdr->sz) {
+			*(struct vcd_property_intra_refresh_mb_number *)
+				property_value = encoder->intra_refresh;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_INPUT_BUF_REQ:
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+			property_value = encoder->client_input_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_OUTPUT_BUF_REQ:
+		if (sizeof(struct vcd_buffer_requirement) ==
+			property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+			property_value = encoder->client_output_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_BUFFER_FORMAT:
+		if (sizeof(struct vcd_property_buffer_format) ==
+			property_hdr->sz) {
+			*(struct vcd_property_buffer_format *)
+			property_value = encoder->buf_format;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case DDL_I_FRAME_PROC_UNITS:
+		if (sizeof(u32) == property_hdr->sz &&
+			encoder->frame_size.width &&
+			encoder->frame_size.height) {
+			*(u32 *)property_value = DDL_NO_OF_MB(
+				encoder->frame_size.width,
+				encoder->frame_size.height);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_HEADER_EXTENSION:
+		if (sizeof(u32) == property_hdr->sz &&
+			encoder->codec.codec == VCD_CODEC_MPEG4) {
+			*(u32 *) property_value =
+				encoder->hdr_ext_control;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_GET_RECON_BUFFER_SIZE:
+	{
+		u32 ysize, uvsize;
+		if (property_hdr->sz == sizeof(struct
+			vcd_property_buffer_size)) {
+			struct vcd_property_buffer_size *recon_buff_size =
+			(struct vcd_property_buffer_size *) property_value;
+
+			ysize = ddl_get_yuv_buf_size(recon_buff_size->width,
+				recon_buff_size->height, DDL_YUV_BUF_TYPE_TILE);
+			uvsize = ddl_get_yuv_buf_size(recon_buff_size->width,
+				recon_buff_size->height/2,
+				DDL_YUV_BUF_TYPE_TILE);
+			recon_buff_size->size = ysize + uvsize;
+			recon_buff_size->alignment =
+				DDL_TILE_BUFFER_ALIGN_BYTES;
+			DDL_MSG_LOW("w: %d, h: %d, S: %d, A: %d",
+			recon_buff_size->width, recon_buff_size->height,
+			recon_buff_size->size, recon_buff_size->alignment);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		DDL_MSG_ERROR("Meta Data Interface is Requested");
+		vcd_status = ddl_get_metadata_params(ddl, property_hdr,
+			property_value);
+		vcd_status = VCD_S_SUCCESS;
+	break;
+	default:
+		vcd_status = VCD_ERR_ILLEGAL_OP;
+		break;
+	}
+	return vcd_status;
+}
+
+static u32 ddl_set_enc_dynamic_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+	u32  vcd_status = VCD_ERR_ILLEGAL_PARM;
+	u32  dynamic_prop_change = 0x0;
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_REQ_IFRAME:
+		if (sizeof(struct vcd_property_req_i_frame) ==
+			property_hdr->sz) {
+			dynamic_prop_change |= DDL_ENC_REQ_IFRAME;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_TARGET_BITRATE:
+	{
+		struct vcd_property_target_bitrate *bitrate =
+			(struct vcd_property_target_bitrate *)property_value;
+
+		if (sizeof(struct vcd_property_target_bitrate) ==
+			property_hdr->sz && bitrate->target_bitrate &&
+			bitrate->target_bitrate <= DDL_MAX_BIT_RATE) {
+			encoder->target_bit_rate = *bitrate;
+			dynamic_prop_change = DDL_ENC_CHANGE_BITRATE;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_INTRA_PERIOD:
+	{
+		struct vcd_property_i_period *i_period =
+			(struct vcd_property_i_period *)property_value;
+
+		if (sizeof(struct vcd_property_i_period) ==
+			property_hdr->sz) {
+			encoder->i_period = *i_period;
+			dynamic_prop_change = DDL_ENC_CHANGE_IPERIOD;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_FRAME_RATE:
+	{
+		struct vcd_property_frame_rate *frame_rate =
+			(struct vcd_property_frame_rate *)
+			property_value;
+		if (sizeof(struct vcd_property_frame_rate) ==
+			property_hdr->sz &&
+			frame_rate->fps_denominator &&
+			frame_rate->fps_numerator &&
+			frame_rate->fps_denominator <=
+			frame_rate->fps_numerator) {
+			encoder->frame_rate = *frame_rate;
+			dynamic_prop_change = DDL_ENC_CHANGE_FRAMERATE;
+			if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+				(encoder->codec.codec != VCD_CODEC_MPEG4 ||
+				encoder->short_header.short_header)) {
+				ddl_set_default_enc_vop_timing(encoder);
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	case VCD_I_INTRA_REFRESH:
+	{
+		struct vcd_property_intra_refresh_mb_number
+			*intra_refresh_mb_num =
+			(struct vcd_property_intra_refresh_mb_number *)
+			property_value;
+		u32 frame_mb_num = DDL_NO_OF_MB(encoder->frame_size.width,
+			encoder->frame_size.height);
+
+		if ((sizeof(struct vcd_property_intra_refresh_mb_number) ==
+			property_hdr->sz) &&
+			(intra_refresh_mb_num->cir_mb_number <= frame_mb_num)) {
+			encoder->intra_refresh = *intra_refresh_mb_num;
+			dynamic_prop_change = DDL_ENC_CHANGE_CIR;
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	default:
+		vcd_status = VCD_ERR_ILLEGAL_OP;
+		break;
+	}
+
+	if (!vcd_status && (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)
+		|| DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)))
+		encoder->dynamic_prop_change |= dynamic_prop_change;
+
+	return vcd_status;
+}
+
+void ddl_set_default_dec_property(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder =
+		&(ddl->codec_data.decoder);
+
+	if (decoder->codec.codec >= VCD_CODEC_MPEG2 &&
+		decoder->codec.codec <=  VCD_CODEC_XVID)
+		decoder->post_filter.post_filter = false;
+	else
+		decoder->post_filter.post_filter = false;
+	decoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_TILE_4x2;
+	decoder->client_frame_size.height = VCD_DDL_TEST_DEFAULT_HEIGHT;
+	decoder->client_frame_size.width  = VCD_DDL_TEST_DEFAULT_WIDTH;
+	decoder->client_frame_size.stride = VCD_DDL_TEST_DEFAULT_WIDTH;
+	decoder->client_frame_size.scan_lines = VCD_DDL_TEST_DEFAULT_HEIGHT;
+	decoder->progressive_only = 1;
+	decoder->idr_only_decoding = false;
+	decoder->output_order = VCD_DEC_ORDER_DISPLAY;
+	decoder->field_needed_for_prev_ip = 0;
+	decoder->cont_mode = 0;
+	ddl_set_default_metadata_flag(ddl);
+	ddl_set_default_decoder_buffer_req(decoder, true);
+}
+
+static void ddl_set_default_enc_property(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+
+	ddl_set_default_enc_profile(encoder);
+	ddl_set_default_enc_level(encoder);
+	encoder->rc.rate_control = VCD_RATE_CONTROL_VBR_VFR;
+	ddl_set_default_enc_rc_params(encoder);
+	ddl_set_default_enc_intra_period(encoder);
+	encoder->intra_refresh.cir_mb_number = 0;
+	ddl_set_default_enc_vop_timing(encoder);
+	encoder->multi_slice.m_slice_sel = VCD_MSLICE_OFF;
+	encoder->multi_slice.m_slice_size = 0;
+	ddl->b_count = 0;
+	encoder->short_header.short_header    = false;
+	encoder->entropy_control.entropy_sel  = VCD_ENTROPY_SEL_CAVLC;
+	encoder->entropy_control.cabac_model  = VCD_CABAC_MODEL_NUMBER_0;
+	encoder->db_control.db_config         =
+		VCD_DB_ALL_BLOCKING_BOUNDARY;
+	encoder->db_control.slice_alpha_offset = 0;
+	encoder->db_control.slice_beta_offset = 0;
+	encoder->recon_buf_format.buffer_format =
+		VCD_BUFFER_FORMAT_TILE_1x1;
+	encoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12_16M2KA;
+	encoder->hdr_ext_control = 0;
+	encoder->mb_info_enable  = false;
+	encoder->num_references_for_p_frame = DDL_MIN_NUM_REF_FOR_P_FRAME;
+	ddl_set_default_metadata_flag(ddl);
+	ddl_set_default_encoder_buffer_req(encoder);
+}
+
+static void ddl_set_default_enc_profile(struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+
+	if (codec == VCD_CODEC_MPEG4)
+		encoder->profile.profile = VCD_PROFILE_MPEG4_SP;
+	else if (codec == VCD_CODEC_H264)
+		encoder->profile.profile = VCD_PROFILE_H264_BASELINE;
+	else
+		encoder->profile.profile = VCD_PROFILE_H263_BASELINE;
+}
+
+static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+
+	if (codec == VCD_CODEC_MPEG4)
+		encoder->level.level = VCD_LEVEL_MPEG4_1;
+	else if (codec == VCD_CODEC_H264)
+		encoder->level.level = VCD_LEVEL_H264_1;
+	else
+		encoder->level.level = VCD_LEVEL_H263_10;
+}
+
+static void ddl_set_default_enc_vop_timing(
+	struct ddl_encoder_data *encoder)
+{
+	if (encoder->codec.codec == VCD_CODEC_MPEG4) {
+		encoder->vop_timing.vop_time_resolution =
+			(encoder->frame_rate.fps_numerator << 1) /
+			encoder->frame_rate.fps_denominator;
+	} else
+		encoder->vop_timing.vop_time_resolution =
+			DDL_FRAMERATE_SCALE(DDL_INITIAL_FRAME_RATE);
+}
+
+static void ddl_set_default_enc_intra_period(
+	struct ddl_encoder_data *encoder)
+{
+	switch (encoder->rc.rate_control) {
+	default:
+	case VCD_RATE_CONTROL_VBR_VFR:
+	case VCD_RATE_CONTROL_VBR_CFR:
+	case VCD_RATE_CONTROL_CBR_VFR:
+	case VCD_RATE_CONTROL_OFF:
+		encoder->i_period.p_frames =
+			((encoder->frame_rate.fps_numerator << 1) /
+			encoder->frame_rate.fps_denominator) - 1;
+	break;
+	case VCD_RATE_CONTROL_CBR_CFR:
+		encoder->i_period.p_frames =
+			((encoder->frame_rate.fps_numerator >> 1) /
+			encoder->frame_rate.fps_denominator) - 1;
+	break;
+	}
+	encoder->i_period.b_frames = DDL_DEFAULT_NUM_OF_B_FRAME;
+}
+
+static void ddl_set_default_enc_rc_params(
+	struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+	encoder->rc_level.frame_level_rc = true;
+	encoder->qp_range.min_qp = 0x1;
+	if (codec == VCD_CODEC_H264) {
+		encoder->qp_range.min_qp = 0x1;
+		encoder->qp_range.max_qp = 0x33;
+		encoder->session_qp.i_frame_qp = 0x14;
+		encoder->session_qp.p_frame_qp = 0x14;
+		encoder->session_qp.b_frame_qp = 0x14;
+		encoder->rc_level.mb_level_rc  = true;
+		encoder->adaptive_rc.disable_activity_region_flag = true;
+		encoder->adaptive_rc.disable_dark_region_as_flag = true;
+		encoder->adaptive_rc.disable_smooth_region_as_flag = true;
+		encoder->adaptive_rc.disable_static_region_as_flag = true;
+	} else {
+		encoder->qp_range.max_qp       = 0x1f;
+		encoder->qp_range.min_qp       = 0x1;
+		encoder->session_qp.i_frame_qp = 0xd;
+		encoder->session_qp.p_frame_qp = 0xd;
+		encoder->session_qp.b_frame_qp = 0xd;
+		encoder->rc_level.frame_level_rc = true;
+		encoder->rc_level.mb_level_rc  = false;
+	}
+	switch (encoder->rc.rate_control) {
+	case VCD_RATE_CONTROL_VBR_CFR:
+		encoder->r_cframe_skip = 0;
+		encoder->frame_level_rc.reaction_coeff = 0x1f4;
+	break;
+	case VCD_RATE_CONTROL_CBR_VFR:
+		encoder->r_cframe_skip = 1;
+		if (codec != VCD_CODEC_H264) {
+			encoder->session_qp.i_frame_qp = 0xf;
+			encoder->session_qp.p_frame_qp = 0xf;
+			encoder->session_qp.b_frame_qp = 0xf;
+		}
+		encoder->frame_level_rc.reaction_coeff = 0x14;
+	break;
+	case VCD_RATE_CONTROL_CBR_CFR:
+		encoder->r_cframe_skip = 0;
+		encoder->frame_level_rc.reaction_coeff = 0x6;
+	break;
+	case VCD_RATE_CONTROL_OFF:
+		encoder->r_cframe_skip = 0;
+		encoder->rc_level.frame_level_rc = false;
+		encoder->rc_level.mb_level_rc = false;
+	break;
+	case VCD_RATE_CONTROL_VBR_VFR:
+	default:
+		encoder->r_cframe_skip = 1;
+		encoder->frame_level_rc.reaction_coeff = 0x1f4;
+	break;
+	}
+}
+
+void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data *encoder)
+{
+	u32 y_cb_cr_size, y_size;
+	memset(&encoder->hw_bufs.dpb_y, 0, sizeof(struct ddl_buf_addr) * 4);
+	memset(&encoder->hw_bufs.dpb_c, 0, sizeof(struct ddl_buf_addr) * 4);
+
+	y_cb_cr_size = ddl_get_yuv_buffer_size(&encoder->frame_size,
+				&encoder->buf_format, false,
+				encoder->hdr.decoding, &y_size);
+	encoder->input_buf_size.size_yuv = y_cb_cr_size;
+	encoder->input_buf_size.size_y   = y_size;
+	encoder->input_buf_size.size_c   = y_cb_cr_size - y_size;
+	memset(&encoder->input_buf_req , 0 ,
+		sizeof(struct vcd_buffer_requirement));
+	encoder->input_buf_req.min_count    = 3;
+	encoder->input_buf_req.actual_count =
+		encoder->input_buf_req.min_count;
+	encoder->input_buf_req.max_count    = DDL_MAX_BUFFER_COUNT;
+	encoder->input_buf_req.sz = y_cb_cr_size;
+	if (encoder->buf_format.buffer_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA)
+		encoder->input_buf_req.align =
+			DDL_LINEAR_BUFFER_ALIGN_BYTES;
+	else if (VCD_BUFFER_FORMAT_TILE_4x2 ==
+		encoder->buf_format.buffer_format)
+		encoder->input_buf_req.align = DDL_TILE_BUFFER_ALIGN_BYTES;
+	encoder->client_input_buf_req = encoder->input_buf_req;
+	memset(&encoder->output_buf_req , 0 ,
+		sizeof(struct vcd_buffer_requirement));
+	encoder->output_buf_req.min_count    =
+		encoder->i_period.b_frames + 2;
+	encoder->output_buf_req.actual_count =
+		encoder->output_buf_req.min_count + 3;
+	encoder->output_buf_req.max_count    = DDL_MAX_BUFFER_COUNT;
+	encoder->output_buf_req.align	= DDL_LINEAR_BUFFER_ALIGN_BYTES;
+	if (y_cb_cr_size >= VCD_DDL_720P_YUV_BUF_SIZE)
+		y_cb_cr_size = y_cb_cr_size>>1;
+	encoder->output_buf_req.sz =
+		DDL_ALIGN(y_cb_cr_size, DDL_KILO_BYTE(4));
+	ddl_set_default_encoder_metadata_buffer_size(encoder);
+	encoder->client_output_buf_req = encoder->output_buf_req;
+}
+
+u32 ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder,
+	u32 estimate)
+{
+	struct vcd_property_frame_size *frame_size;
+	struct vcd_buffer_requirement *input_buf_req;
+	struct vcd_buffer_requirement *output_buf_req;
+	u32  min_dpb, y_cb_cr_size;
+
+	if (!decoder->codec.codec)
+		return false;
+	min_dpb = ddl_decoder_min_num_dpb(decoder);
+	if (estimate) {
+		frame_size = &decoder->client_frame_size;
+		output_buf_req = &decoder->client_output_buf_req;
+		input_buf_req = &decoder->client_input_buf_req;
+		y_cb_cr_size = ddl_get_yuv_buffer_size(frame_size,
+					&decoder->buf_format,
+					(!decoder->progressive_only),
+					decoder->hdr.decoding, NULL);
+	} else {
+		if (min_dpb >= decoder->min_dpb_num ||
+			decoder->idr_only_decoding) {
+			frame_size = &decoder->frame_size;
+			output_buf_req = &decoder->actual_output_buf_req;
+			input_buf_req = &decoder->actual_input_buf_req;
+			min_dpb = decoder->min_dpb_num;
+			y_cb_cr_size = decoder->y_cb_cr_size;
+		} else {
+			u32 max_dpb_size;
+
+			max_dpb_size = DDL_NO_OF_MB(
+				decoder->client_frame_size.stride,
+				decoder->client_frame_size.scan_lines);
+			max_dpb_size *= (decoder->min_dpb_num - 2);
+			DDL_MSG_ERROR("Error: H264MaxDpbSizeExceeded: %d > %d",
+				max_dpb_size, MAX_DPB_SIZE_L4PT0_MBS);
+			return false;
+		}
+	}
+	memset(output_buf_req, 0,
+		sizeof(struct vcd_buffer_requirement));
+	if ((!estimate && !decoder->idr_only_decoding) || (decoder->cont_mode))
+		output_buf_req->actual_count = min_dpb + 4;
+	else
+		output_buf_req->actual_count = min_dpb;
+	output_buf_req->min_count = min_dpb;
+	output_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
+	output_buf_req->sz = y_cb_cr_size;
+	DDL_MSG_LOW("output_buf_req->sz : %d", output_buf_req->sz);
+	if (decoder->buf_format.buffer_format != VCD_BUFFER_FORMAT_NV12)
+		output_buf_req->align = DDL_TILE_BUFFER_ALIGN_BYTES;
+	else
+		output_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+	ddl_set_default_decoder_metadata_buffer_size(decoder, frame_size,
+		output_buf_req);
+
+	decoder->min_output_buf_req = *output_buf_req;
+	memset(input_buf_req, 0,
+		sizeof(struct vcd_buffer_requirement));
+	input_buf_req->min_count = 1;
+	input_buf_req->actual_count = input_buf_req->min_count + 1;
+	input_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
+	input_buf_req->sz = (1024 * 1024);
+	input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+	decoder->min_input_buf_req = *input_buf_req;
+	return true;
+}
+
+u32 ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size,
+	struct vcd_property_buffer_format *buf_format,
+	u32 interlace, u32 decoding, u32 *pn_c_offset)
+{
+	struct vcd_property_frame_size frame_sz = *frame_size;
+	u32 total_memory_size = 0, c_offset = 0;
+	ddl_calculate_stride(&frame_sz, interlace);
+	if (buf_format->buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) {
+		u32 component_mem_size, width_round_up;
+		u32 height_round_up, height_chroma = (frame_sz.scan_lines >> 1);
+
+		width_round_up =
+			DDL_ALIGN(frame_sz.stride, DDL_TILE_ALIGN_WIDTH);
+		height_round_up =
+			DDL_ALIGN(frame_sz.scan_lines,
+						   DDL_TILE_ALIGN_HEIGHT);
+		component_mem_size = width_round_up * height_round_up;
+		component_mem_size = DDL_ALIGN(component_mem_size,
+			DDL_TILE_MULTIPLY_FACTOR);
+		c_offset = component_mem_size;
+		total_memory_size = ((component_mem_size +
+					DDL_TILE_BUF_ALIGN_GUARD_BYTES) &
+					DDL_TILE_BUF_ALIGN_MASK);
+		height_round_up = DDL_ALIGN(height_chroma,
+					DDL_TILE_ALIGN_HEIGHT);
+		component_mem_size = width_round_up * height_round_up;
+		component_mem_size = DDL_ALIGN(component_mem_size,
+					DDL_TILE_MULTIPLY_FACTOR);
+		total_memory_size += component_mem_size;
+	} else {
+		if (decoding)
+			total_memory_size = frame_sz.scan_lines *
+						frame_sz.stride;
+		else
+			total_memory_size = frame_sz.height * frame_sz.stride;
+		c_offset = DDL_ALIGN(total_memory_size,
+			DDL_LINEAR_MULTIPLY_FACTOR);
+		total_memory_size = c_offset + DDL_ALIGN(
+			total_memory_size >> 1, DDL_LINEAR_MULTIPLY_FACTOR);
+	}
+	if (pn_c_offset)
+		*pn_c_offset = c_offset;
+	return total_memory_size;
+}
+
+
+void ddl_calculate_stride(struct vcd_property_frame_size *frame_size,
+	u32 interlace)
+{
+	frame_size->stride = DDL_ALIGN(frame_size->width,
+					DDL_LINEAR_ALIGN_WIDTH);
+	if (interlace)
+		frame_size->scan_lines = DDL_ALIGN(frame_size->height,
+						DDL_TILE_ALIGN_HEIGHT);
+	else
+		frame_size->scan_lines = DDL_ALIGN(frame_size->height,
+						DDL_LINEAR_ALIGN_HEIGHT);
+}
+
+
+static u32 ddl_valid_buffer_requirement(struct vcd_buffer_requirement
+	*original_buf_req, struct vcd_buffer_requirement *req_buf_req)
+{
+	u32 status = false;
+
+	if (original_buf_req->max_count >= req_buf_req->actual_count &&
+		original_buf_req->min_count <=
+		req_buf_req->actual_count &&
+		!((original_buf_req->align - (u32)0x1) &
+		req_buf_req->align) &&
+		/*original_buf_req->align <= req_buf_req->align,*/
+		original_buf_req->sz <= req_buf_req->sz)
+		status = true;
+	else
+		DDL_MSG_ERROR("ddl_valid_buf_req:Failed");
+	return status;
+}
+
+static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder)
+{
+	u32 min_dpb = 0;
+
+	if (decoder->idr_only_decoding) {
+		min_dpb = DDL_MIN_BUFFER_COUNT;
+		if (decoder->post_filter.post_filter)
+			min_dpb *= 2;
+		return min_dpb;
+	}
+
+	switch (decoder->codec.codec) {
+	case VCD_CODEC_H264:
+	{
+		u32 yuv_size_in_mb = DDL_MIN(DDL_NO_OF_MB(
+			decoder->client_frame_size.stride,
+			decoder->client_frame_size.scan_lines),
+			MAX_FRAME_SIZE_L4PT0_MBS);
+		min_dpb = DDL_MIN((MAX_DPB_SIZE_L4PT0_MBS /
+				yuv_size_in_mb), 16);
+		min_dpb += 2;
+	}
+	break;
+	case VCD_CODEC_H263:
+		min_dpb = 3;
+	break;
+	default:
+	case VCD_CODEC_MPEG1:
+	case VCD_CODEC_MPEG2:
+	case VCD_CODEC_MPEG4:
+	case VCD_CODEC_DIVX_3:
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+	case VCD_CODEC_XVID:
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		min_dpb = 4;
+		if (decoder->post_filter.post_filter)
+			min_dpb *= 2;
+	break;
+	}
+	return min_dpb;
+}
+
+static u32 ddl_set_dec_buffers(struct ddl_decoder_data *decoder,
+	struct ddl_property_dec_pic_buffers *dpb)
+{
+	u32 vcd_status  = VCD_S_SUCCESS, loopc;
+
+
+	for (loopc = 0; !vcd_status &&
+		loopc < dpb->no_of_dec_pic_buf; ++loopc) {
+		if ((!DDL_ADDR_IS_ALIGNED(dpb->dec_pic_buffers[loopc].
+			vcd_frm.physical,
+			decoder->client_output_buf_req.align)) ||
+			(dpb->dec_pic_buffers[loopc].vcd_frm.alloc_len <
+			decoder->client_output_buf_req.sz))
+			vcd_status = VCD_ERR_ILLEGAL_PARM;
+	}
+	if (vcd_status) {
+		DDL_MSG_ERROR("ddl_set_prop:"
+			"Dpb_align_fail_or_alloc_size_small");
+		return vcd_status;
+	}
+	if (decoder->dp_buf.no_of_dec_pic_buf) {
+		kfree(decoder->dp_buf.dec_pic_buffers);
+		decoder->dp_buf.no_of_dec_pic_buf = 0;
+	}
+	decoder->dp_buf.dec_pic_buffers =
+		kmalloc(dpb->no_of_dec_pic_buf *
+			sizeof(struct ddl_frame_data_tag), GFP_KERNEL);
+	if (!decoder->dp_buf.dec_pic_buffers) {
+		DDL_MSG_ERROR("ddl_dec_set_prop:Dpb_container_alloc_failed");
+		return VCD_ERR_ALLOC_FAIL;
+	}
+	decoder->dp_buf.no_of_dec_pic_buf = dpb->no_of_dec_pic_buf;
+	for (loopc = 0; loopc < dpb->no_of_dec_pic_buf; ++loopc)
+		decoder->dp_buf.dec_pic_buffers[loopc] =
+			dpb->dec_pic_buffers[loopc];
+	decoder->dpb_mask.client_mask = 0;
+	decoder->dpb_mask.hw_mask = 0;
+	decoder->dynamic_prop_change = 0;
+	return VCD_S_SUCCESS;
+}
+
+void ddl_set_initial_default_values(struct ddl_client_context *ddl)
+{
+
+	if (ddl->decoding) {
+		ddl->codec_data.decoder.codec.codec = VCD_CODEC_MPEG4;
+		ddl_set_default_dec_property(ddl);
+	} else {
+		struct ddl_encoder_data *encoder =
+			&(ddl->codec_data.encoder);
+		encoder->codec.codec = VCD_CODEC_MPEG4;
+		encoder->target_bit_rate.target_bitrate = 64000;
+		encoder->frame_size.width = VCD_DDL_TEST_DEFAULT_WIDTH;
+		encoder->frame_size.height = VCD_DDL_TEST_DEFAULT_HEIGHT;
+		encoder->frame_size.scan_lines =
+			VCD_DDL_TEST_DEFAULT_HEIGHT;
+		encoder->frame_size.stride = VCD_DDL_TEST_DEFAULT_WIDTH;
+		encoder->frame_rate.fps_numerator = DDL_INITIAL_FRAME_RATE;
+		encoder->frame_rate.fps_denominator = 1;
+		ddl_set_default_enc_property(ddl);
+	}
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
new file mode 100644
index 0000000..70e1de1
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -0,0 +1,678 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vcd_ddl_shared_mem.h"
+
+#define VIDC_SM_EXTENDED_DECODE_STATUS_ADDR    0x0000
+#define VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_BMSK 0x1
+#define VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_SHFT 0x0
+#define VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_BMSK 0x4
+#define VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_SHFT 0x2
+
+#define VIDC_SM_SET_FRAME_TAG_ADDR             0x0004
+#define VIDC_SM_GET_FRAME_TAG_TOP_ADDR         0x0008
+#define VIDC_SM_GET_FRAME_TAG_BOTTOM_ADDR      0x000c
+#define VIDC_SM_PIC_TIME_TOP_ADDR              0x0010
+#define VIDC_SM_PIC_TIME_BOTTOM_ADDR           0x0014
+#define VIDC_SM_START_BYTE_NUM_ADDR            0x0018
+
+#define VIDC_SM_CROP_INFO1_ADDR                0x0020
+#define VIDC_SM_CROP_INFO1_RIGHT_OFFSET_BMSK   0xffff0000
+#define VIDC_SM_CROP_INFO1_RIGHT_OFFSET_SHFT   16
+#define VIDC_SM_CROP_INFO1_LEFT_OFFSET_BMSK    0x0000ffff
+#define VIDC_SM_CROP_INFO1_LEFT_OFFSET_SHFT    0
+
+#define VIDC_SM_CROP_INFO2_ADDR                0x0024
+#define VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_BMSK  0xffff0000
+#define VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_SHFT  16
+#define VIDC_SM_CROP_INFO2_TOP_OFFSET_BMSK     0x0000ffff
+#define VIDC_SM_CROP_INFO2_TOP_OFFSET_SHFT     0
+
+#define VIDC_SM_DISP_PIC_PROFILE_ADDR                       0x007c
+#define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_BMASK       0x0000ff00
+#define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_SHFT        8
+#define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_BMASK     0x0000001f
+#define VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_SHFT      0
+
+#define VIDC_SM_DISP_PIC_FRAME_TYPE_ADDR                    0x00c0
+#define VIDC_SM_DISP_PIC_FRAME_TYPE_BMSK                    0x00000003
+#define VIDC_SM_DISP_PIC_FRAME_TYPE_SHFT                    0
+
+#define VIDC_SM_FREE_LUMA_DPB_ADDR                          0x00c4
+#define VIDC_SM_FREE_LUMA_DPB_BMSK                          0xffffffff
+#define VIDC_SM_FREE_LUMA_DPB_SHFT                          0
+
+#define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_ADDR                0x00fc
+#define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_BMSK                0xffffffff
+#define VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_SHFT                0
+
+#define VIDC_SM_DEC_ORDER_WIDTH_ADDR                        0x00e8
+#define VIDC_SM_DEC_ORDER_WIDTH_BMSK                        0xffffffff
+#define VIDC_SM_DEC_ORDER_WIDTH_SHFT                        0
+
+#define VIDC_SM_DEC_ORDER_HEIGHT_ADDR                       0x00ec
+#define VIDC_SM_DEC_ORDER_HEIGHT_BMSK                       0xffffffff
+#define VIDC_SM_DEC_ORDER_HEIGHT_SHFT                       0
+
+#define VIDC_SM_DEC_CROP_INFO1_ADDR                         0x00f4
+#define VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_BMSK            0xffff0000
+#define VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_SHFT            16
+#define VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_BMSK             0x0000ffff
+#define VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_SHFT             0
+
+#define VIDC_SM_DEC_CROP_INFO2_ADDR                         0x00f8
+#define VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_BMSK           0xffff0000
+#define VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_SHFT           16
+#define VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_BMSK              0x0000ffff
+#define VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_SHFT              0
+
+#define VIDC_SM_IDR_DECODING_ONLY_ADDR                      0x0108
+#define VIDC_SM_IDR_DECODING_ONLY_BMSK                      0x00000001
+#define VIDC_SM_IDR_DECODING_ONLY_SHIFT                     0
+
+#define VIDC_SM_ENC_EXT_CTRL_ADDR                    0x0028
+#define VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_BMSK    0xffff0000
+#define VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_SHFT    16
+#define VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_BMSK       0x8
+#define VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_SHFT       3
+#define VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_BMSK  0x6
+#define VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_SHFT  1
+#define VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_BMSK         0x1
+#define VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_SHFT         0
+
+#define VIDC_SM_ENC_PARAM_CHANGE_ADDR                0x002c
+#define VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_BMSK    0x4
+#define VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_SHFT    2
+#define VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_BMSK  0x2
+#define VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_SHFT  1
+#define VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_BMSK       0x1
+#define VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_SHFT       0
+
+#define VIDC_SM_ENC_VOP_TIMING_ADDR                  0x0030
+#define VIDC_SM_ENC_VOP_TIMING_ENABLE_BMSK           0x80000000
+#define VIDC_SM_ENC_VOP_TIMING_ENABLE_SHFT           31
+#define VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_BMSK  0x7fff0000
+#define VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_SHFT  16
+#define VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_BMSK      0x0000ffff
+#define VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_SHFT      0
+
+#define VIDC_SM_ENC_HEC_PERIOD_ADDR                  0x0034
+
+#define VIDC_SM_H264_REF_L0_ADDR                    0x005c
+#define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_BMSK     0x80000000
+#define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_SHFT     31
+#define VIDC_SM_H264_REF_L0_CHRO_REF_1_BMSK         0x7f000000
+#define VIDC_SM_H264_REF_L0_CHRO_REF_1_SHFT         24
+#define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_BMSK     0x00800000
+#define VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_SHFT     23
+#define VIDC_SM_H264_REF_L0_CHRO_REF_0_BMSK         0x007f0000
+#define VIDC_SM_H264_REF_L0_CHRO_REF_0_SHFT         16
+#define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_BMSK     0x00008000
+#define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_SHFT     15
+#define VIDC_SM_H264_REF_L0_LUMA_REF_1_BMSK         0x00007f00
+#define VIDC_SM_H264_REF_L0_LUMA_REF_1_SHFT         8
+#define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_BMSK     0x00000080
+#define VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_SHFT     7
+#define VIDC_SM_H264_REF_L0_LUMA_REF_0_BMSK         0x0000007f
+#define VIDC_SM_H264_REF_L0_LUMA_REF_0_SHFT         0
+
+#define VIDC_SM_H264_REF_L1_ADDR                  0x0060
+#define VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_BMSK   0x00800000
+#define VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_SHFT   23
+#define VIDC_SM_H264_REF_L1_CHRO_REF_0_BMSK       0x007f0000
+#define VIDC_SM_H264_REF_L1_CHRO_REF_0_SHFT       16
+#define VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_BMSK   0x00000080
+#define VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_SHFT   7
+#define VIDC_SM_H264_REF_L1_LUMA_REF_0_BMSK       0x0000007f
+#define VIDC_SM_H264_REF_L1_LUMA_REF_0_SHFT       0
+
+#define VIDC_SM_P_B_FRAME_QP_ADDR               0x0070
+#define VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_BMASK   0x00000fc0
+#define VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_SHFT    6
+#define VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_BMASK   0x0000003f
+#define VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_SHFT    0
+
+#define VIDC_SM_NEW_RC_BIT_RATE_ADDR           0x0090
+#define VIDC_SM_NEW_RC_BIT_RATE_VALUE_BMASK    0xffffffff
+#define VIDC_SM_NEW_RC_BIT_RATE_VALUE_SHFT     0
+#define VIDC_SM_NEW_RC_FRAME_RATE_ADDR         0x0094
+#define VIDC_SM_NEW_RC_FRAME_RATE_VALUE_BMASK  0xffffffff
+#define VIDC_SM_NEW_RC_FRAME_RATE_VALUE_SHFT   0
+#define VIDC_SM_NEW_I_PERIOD_ADDR              0x0098
+#define VIDC_SM_NEW_I_PERIOD_VALUE_BMASK       0xffffffff
+#define VIDC_SM_NEW_I_PERIOD_VALUE_SHFT        0
+
+
+#define VIDC_SM_ALLOCATED_LUMA_DPB_SIZE_ADDR               0x0064
+#define VIDC_SM_ALLOCATED_CHROMA_DPB_SIZE_ADDR             0x0068
+#define VIDC_SM_ALLOCATED_MV_SIZE_ADDR                     0x006c
+#define VIDC_SM_FLUSH_CMD_TYPE_ADDR                        0x0080
+#define VIDC_SM_FLUSH_CMD_INBUF1_ADDR                      0x0084
+#define VIDC_SM_FLUSH_CMD_INBUF2_ADDR                      0x0088
+#define VIDC_SM_FLUSH_CMD_OUTBUF_ADDR                      0x008c
+#define VIDC_SM_MIN_LUMA_DPB_SIZE_ADDR                     0x00b0
+#define VIDC_SM_MIN_CHROMA_DPB_SIZE_ADDR                   0x00bc
+
+
+#define VIDC_SM_METADATA_ENABLE_ADDR                 0x0038
+#define VIDC_SM_METADATA_ENABLE_EXTRADATA_BMSK       0x40
+#define VIDC_SM_METADATA_ENABLE_EXTRADATA_SHFT       6
+#define VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_BMSK  0x20
+#define VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_SHFT  5
+#define VIDC_SM_METADATA_ENABLE_VUI_BMSK             0x10
+#define VIDC_SM_METADATA_ENABLE_VUI_SHFT             4
+#define VIDC_SM_METADATA_ENABLE_SEI_VIDC_BMSK         0x8
+#define VIDC_SM_METADATA_ENABLE_SEI_VIDC_SHFT         3
+#define VIDC_SM_METADATA_ENABLE_VC1_PARAM_BMSK       0x4
+#define VIDC_SM_METADATA_ENABLE_VC1_PARAM_SHFT       2
+#define VIDC_SM_METADATA_ENABLE_CONCEALED_MB_BMSK    0x2
+#define VIDC_SM_METADATA_ENABLE_CONCEALED_MB_SHFT    1
+#define VIDC_SM_METADATA_ENABLE_QP_BMSK              0x1
+#define VIDC_SM_METADATA_ENABLE_QP_SHFT              0
+
+
+#define VIDC_SM_METADATA_STATUS_ADDR         0x003c
+#define VIDC_SM_METADATA_STATUS_STATUS_BMSK  0x1
+#define VIDC_SM_METADATA_STATUS_STATUS_SHFT  0
+
+#define VIDC_SM_METADATA_DISPLAY_INDEX_ADDR   0x0040
+#define VIDC_SM_EXT_METADATA_START_ADDR_ADDR  0x0044
+
+#define VIDC_SM_PUT_EXTRADATA_ADDR      0x0048
+#define VIDC_SM_PUT_EXTRADATA_PUT_BMSK  0x1
+#define VIDC_SM_PUT_EXTRADATA_PUT_SHFT  0
+
+#define VIDC_SM_EXTRADATA_ADDR_ADDR     0x004c
+
+#define VIDC_SM_CHROMA_ADDR_CHANGE_ADDR   0x0148
+#define VIDC_SM_CHROMA_ADDR_CHANGE_BMASK  0x00000001
+#define VIDC_SM_CHROMA_ADDR_CHANGE_SHFT   0
+
+#define DDL_MEM_WRITE_32(base, offset, val) ddl_mem_write_32(\
+	(u32 *) ((u8 *) (base)->align_virtual_addr + (offset)), (val))
+#define DDL_MEM_READ_32(base, offset) ddl_mem_read_32(\
+	(u32 *) ((u8 *) (base)->align_virtual_addr + (offset)))
+
+#define DDL_SHARED_MEM_11BIT_RIGHT_SHIFT  11
+
+static void ddl_mem_write_32(u32 *addr, u32 data)
+{
+	*addr = data;
+}
+
+static u32 ddl_mem_read_32(u32 *addr)
+{
+	return *addr;
+}
+
+void vidc_sm_get_extended_decode_status(struct ddl_buf_addr *shared_mem,
+	u32 *more_field_needed,
+	u32 *resl_change)
+{
+	u32 decode_status = DDL_MEM_READ_32(shared_mem,
+					VIDC_SM_EXTENDED_DECODE_STATUS_ADDR);
+	if (more_field_needed)
+		*more_field_needed =
+				VIDC_GETFIELD(decode_status,
+				VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_BMSK,
+				VIDC_SM_EXT_DEC_STATUS_MORE_FIELD_NEEDED_SHFT);
+	if (resl_change)
+		*resl_change =
+				VIDC_GETFIELD(decode_status,
+				VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_BMSK,
+				VIDC_SM_EXT_DEC_STATUS_RESOLUTION_CHANGE_SHFT);
+}
+
+void vidc_sm_set_frame_tag(struct ddl_buf_addr *shared_mem,
+	u32 frame_tag)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_SET_FRAME_TAG_ADDR, frame_tag);
+}
+
+void vidc_sm_get_frame_tags(struct ddl_buf_addr *shared_mem,
+	u32  *pn_frame_tag_top, u32 *pn_frame_tag_bottom)
+{
+	*pn_frame_tag_top = DDL_MEM_READ_32(shared_mem,
+				VIDC_SM_GET_FRAME_TAG_TOP_ADDR);
+	*pn_frame_tag_bottom = DDL_MEM_READ_32(shared_mem,
+					VIDC_SM_GET_FRAME_TAG_BOTTOM_ADDR);
+}
+
+void vidc_sm_get_picture_times(struct ddl_buf_addr *shared_mem,
+	u32 *pn_time_top, u32 *pn_time_bottom)
+{
+	*pn_time_top = DDL_MEM_READ_32(shared_mem, VIDC_SM_PIC_TIME_TOP_ADDR);
+	*pn_time_bottom = DDL_MEM_READ_32(shared_mem,
+						VIDC_SM_PIC_TIME_BOTTOM_ADDR);
+}
+
+void vidc_sm_set_start_byte_number(struct ddl_buf_addr *shared_mem,
+	u32 byte_num)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_START_BYTE_NUM_ADDR, byte_num);
+}
+
+void vidc_sm_get_crop_info(struct ddl_buf_addr *shared_mem,
+	u32 *pn_left, u32 *pn_right, u32 *pn_top, u32 *pn_bottom)
+{
+	u32 info1, info2;
+
+	info1 = DDL_MEM_READ_32(shared_mem, VIDC_SM_CROP_INFO1_ADDR);
+
+	*pn_left = VIDC_GETFIELD(info1, VIDC_SM_CROP_INFO1_LEFT_OFFSET_BMSK,
+					VIDC_SM_CROP_INFO1_LEFT_OFFSET_SHFT);
+	*pn_right = VIDC_GETFIELD(info1, VIDC_SM_CROP_INFO1_RIGHT_OFFSET_BMSK,
+					VIDC_SM_CROP_INFO1_RIGHT_OFFSET_SHFT);
+	info2 = DDL_MEM_READ_32(shared_mem, VIDC_SM_CROP_INFO2_ADDR);
+	*pn_top = VIDC_GETFIELD(info2, VIDC_SM_CROP_INFO2_TOP_OFFSET_BMSK,
+					VIDC_SM_CROP_INFO2_TOP_OFFSET_SHFT);
+	*pn_bottom = VIDC_GETFIELD(info2,
+					VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_BMSK,
+					VIDC_SM_CROP_INFO2_BOTTOM_OFFSET_SHFT);
+}
+
+void vidc_sm_get_displayed_picture_frame(struct ddl_buf_addr
+	*shared_mem, u32  *n_disp_picture_frame)
+{
+	u32 disp_pict_frame;
+
+	disp_pict_frame = DDL_MEM_READ_32(shared_mem,
+					VIDC_SM_DISP_PIC_FRAME_TYPE_ADDR);
+	*n_disp_picture_frame = VIDC_GETFIELD(disp_pict_frame,
+			VIDC_SM_DISP_PIC_FRAME_TYPE_BMSK,
+			VIDC_SM_DISP_PIC_FRAME_TYPE_SHFT);
+}
+void vidc_sm_get_available_luma_dpb_address(struct ddl_buf_addr
+	*shared_mem, u32 *pn_free_luma_dpb_address)
+{
+	*pn_free_luma_dpb_address = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_FREE_LUMA_DPB_ADDR);
+}
+
+void vidc_sm_get_available_luma_dpb_dec_order_address(
+	struct ddl_buf_addr	*shared_mem,
+	u32 *pn_free_luma_dpb_address)
+{
+	*pn_free_luma_dpb_address = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_FREE_LUMA_DPB_DEC_ORDER_ADDR);
+}
+
+void vidc_sm_get_dec_order_resl(
+	struct ddl_buf_addr *shared_mem, u32 *width, u32 *height)
+{
+	*width = DDL_MEM_READ_32(shared_mem,
+			VIDC_SM_DEC_ORDER_WIDTH_ADDR);
+	*height = DDL_MEM_READ_32(shared_mem,
+			VIDC_SM_DEC_ORDER_HEIGHT_ADDR);
+}
+
+void vidc_sm_get_dec_order_crop_info(
+	struct ddl_buf_addr *shared_mem, u32 *left,
+	u32 *right, u32 *top, u32 *bottom)
+{
+	u32 crop_data;
+	crop_data = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_DEC_CROP_INFO1_ADDR);
+	*left = VIDC_GETFIELD(crop_data,
+		VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_BMSK,
+		VIDC_SM_DEC_CROP_INFO1_LEFT_OFFSET_SHFT);
+	*right = VIDC_GETFIELD(crop_data,
+		VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_BMSK,
+		VIDC_SM_DEC_CROP_INFO1_RIGHT_OFFSET_SHFT);
+	crop_data = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_DEC_CROP_INFO2_ADDR);
+	*top = VIDC_GETFIELD(crop_data,
+		VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_BMSK,
+		VIDC_SM_DEC_CROP_INFO2_TOP_OFFSET_SHFT);
+	*bottom = VIDC_GETFIELD(crop_data,
+		VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_BMSK,
+		VIDC_SM_DEC_CROP_INFO2_BOTTOM_OFFSET_SHFT);
+}
+
+void vidc_sm_set_extended_encoder_control(struct ddl_buf_addr
+	*shared_mem, u32 hec_enable,
+	enum VIDC_SM_frame_skip frame_skip_mode,
+	u32 seq_hdr_in_band, u32 vbv_buffer_size)
+{
+	u32 enc_ctrl;
+
+	enc_ctrl = VIDC_SETFIELD((hec_enable) ? 1 : 0,
+			VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_SHFT,
+			VIDC_SM_ENC_EXT_CTRL_HEC_ENABLE_BMSK) |
+			VIDC_SETFIELD((u32) frame_skip_mode,
+			VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_SHFT,
+			VIDC_SM_ENC_EXT_CTRL_FRAME_SKIP_ENABLE_BMSK) |
+			VIDC_SETFIELD((seq_hdr_in_band) ? 1 : 0 ,
+			VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_SHFT ,
+			VIDC_SM_ENC_EXT_CTRL_SEQ_HDR_CTRL_BMSK) |
+			VIDC_SETFIELD(vbv_buffer_size,
+			VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_SHFT,
+			VIDC_SM_ENC_EXT_CTRL_VBV_BUFFER_SIZE_BMSK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_EXT_CTRL_ADDR, enc_ctrl);
+}
+
+void vidc_sm_set_encoder_param_change(struct ddl_buf_addr *shared_mem,
+	u32 bit_rate_chg, u32 frame_rate_chg, u32 i_period_chg)
+{
+	u32 enc_param_chg;
+
+	enc_param_chg = VIDC_SETFIELD((bit_rate_chg) ? 1 : 0,
+				VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_SHFT,
+				VIDC_SM_ENC_PARAM_CHANGE_RC_BIT_RATE_BMSK) |
+				VIDC_SETFIELD((frame_rate_chg) ? 1 : 0,
+				VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_SHFT,
+				VIDC_SM_ENC_PARAM_CHANGE_RC_FRAME_RATE_BMSK) |
+				VIDC_SETFIELD((i_period_chg) ? 1 : 0,
+				VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_SHFT,
+				VIDC_SM_ENC_PARAM_CHANGE_I_PERIOD_BMSK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_PARAM_CHANGE_ADDR,
+		enc_param_chg);
+}
+
+void vidc_sm_set_encoder_vop_time(struct ddl_buf_addr *shared_mem,
+	u32 vop_time_enable, u32 time_resolution, u32 frame_delta)
+{
+	u32 vop_time;
+
+	vop_time = VIDC_SETFIELD((vop_time_enable) ? 1 : 0,
+			VIDC_SM_ENC_VOP_TIMING_ENABLE_SHFT ,
+			VIDC_SM_ENC_VOP_TIMING_ENABLE_BMSK) |
+			VIDC_SETFIELD(time_resolution ,
+			VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_SHFT,
+			VIDC_SM_ENC_VOP_TIMING_TIME_RESOLUTION_BMSK) |
+			VIDC_SETFIELD(frame_delta,
+			VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_SHFT,
+			VIDC_SM_ENC_VOP_TIMING_FRAME_DELTA_BMSK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_VOP_TIMING_ADDR, vop_time);
+}
+
+void vidc_sm_set_encoder_hec_period(struct ddl_buf_addr *shared_mem,
+	u32 hec_period)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_HEC_PERIOD_ADDR,
+		hec_period);
+}
+
+void vidc_sm_get_h264_encoder_reference_list0(struct ddl_buf_addr
+	*shared_mem, enum VIDC_SM_ref_picture *pe_luma_picture0,
+	u32 *pn_luma_picture_index0, enum VIDC_SM_ref_picture
+		*pe_luma_picture1, u32 *pn_luma_picture_index1,
+	enum VIDC_SM_ref_picture *pe_chroma_picture0,
+	u32 *pn_chroma_picture_index0,
+	enum VIDC_SM_ref_picture *pe_chroma_picture1,
+	u32 *pn_chroma_picture_index1)
+{
+	u32 ref_list;
+
+	ref_list = DDL_MEM_READ_32(shared_mem, VIDC_SM_H264_REF_L0_ADDR);
+
+	*pe_luma_picture0 = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_BMSK,
+				VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_0_SHFT);
+	*pn_luma_picture_index0 =
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_LUMA_REF_0_BMSK,
+				VIDC_SM_H264_REF_L0_LUMA_REF_0_SHFT);
+	*pe_luma_picture1 = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_BMSK,
+				VIDC_SM_H264_REF_L0_LUMA_BTM_FLG_1_SHFT);
+	*pn_luma_picture_index1 = VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_LUMA_REF_1_BMSK,
+				VIDC_SM_H264_REF_L0_LUMA_REF_1_SHFT);
+	*pe_chroma_picture0 = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_BMSK,
+				VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_0_SHFT);
+	*pn_chroma_picture_index0 = VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_CHRO_REF_0_BMSK,
+				VIDC_SM_H264_REF_L0_CHRO_REF_0_SHFT);
+	*pe_chroma_picture1 = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_BMSK,
+				VIDC_SM_H264_REF_L0_CHRO_BTM_FLG_1_SHFT);
+	*pn_chroma_picture_index1 =
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L0_CHRO_REF_1_BMSK,
+				VIDC_SM_H264_REF_L0_CHRO_REF_1_SHFT);
+}
+
+void vidc_sm_get_h264_encoder_reference_list1(struct ddl_buf_addr
+	*shared_mem, enum VIDC_SM_ref_picture *pe_luma_picture,
+	u32 *pn_luma_picture_index,
+	enum VIDC_SM_ref_picture *pe_chroma_picture,
+	u32 *pn_chroma_picture_index)
+{
+	u32 ref_list;
+
+	ref_list = DDL_MEM_READ_32(shared_mem, VIDC_SM_H264_REF_L1_ADDR);
+
+	*pe_luma_picture = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_BMSK,
+				VIDC_SM_H264_REF_L1_LUMA_BTM_FLG_0_SHFT);
+	*pn_luma_picture_index =
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L1_LUMA_REF_0_BMSK,
+				VIDC_SM_H264_REF_L1_LUMA_REF_0_SHFT);
+	*pe_chroma_picture = (enum VIDC_SM_ref_picture)
+				VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_BMSK,
+				VIDC_SM_H264_REF_L1_CHRO_BTM_FLG_0_SHFT);
+	*pn_chroma_picture_index = VIDC_GETFIELD(ref_list,
+				VIDC_SM_H264_REF_L1_CHRO_REF_0_BMSK,
+				VIDC_SM_H264_REF_L1_CHRO_REF_0_SHFT);
+}
+
+void vidc_sm_set_allocated_dpb_size(struct ddl_buf_addr *shared_mem,
+		u32 y_size, u32 c_size)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_LUMA_DPB_SIZE_ADDR,
+		y_size);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_CHROMA_DPB_SIZE_ADDR,
+		c_size);
+}
+
+void vidc_sm_set_allocated_h264_mv_size(struct ddl_buf_addr *shared_mem,
+	u32 mv_size)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ALLOCATED_MV_SIZE_ADDR,
+		mv_size);
+}
+
+void vidc_sm_get_min_yc_dpb_sizes(struct ddl_buf_addr *shared_mem,
+	u32 *pn_min_luma_dpb_size, u32 *pn_min_chroma_dpb_size)
+{
+	*pn_min_luma_dpb_size = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_MIN_LUMA_DPB_SIZE_ADDR);
+	*pn_min_chroma_dpb_size = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_MIN_CHROMA_DPB_SIZE_ADDR);
+}
+
+void vidc_sm_set_concealment_color(struct ddl_buf_addr *shared_mem,
+	u32 conceal_ycolor, u32 conceal_ccolor)
+{
+	u32 conceal_color;
+
+	conceal_color = (((conceal_ycolor << 8) & 0xff00) |
+		(conceal_ccolor & 0xff));
+	DDL_MEM_WRITE_32(shared_mem, 0x00f0, conceal_color);
+}
+
+void vidc_sm_set_metadata_enable(struct ddl_buf_addr *shared_mem,
+	u32 extradata_enable, u32 qp_enable, u32 concealed_mb_enable,
+	u32 vc1Param_enable, u32 sei_nal_enable, u32 vui_enable,
+	u32 enc_slice_size_enable)
+{
+	u32 metadata_enable;
+
+	metadata_enable = VIDC_SETFIELD((extradata_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_EXTRADATA_SHFT,
+				VIDC_SM_METADATA_ENABLE_EXTRADATA_BMSK) |
+				VIDC_SETFIELD((enc_slice_size_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_SHFT,
+				VIDC_SM_METADATA_ENABLE_ENC_SLICE_SIZE_BMSK) |
+				VIDC_SETFIELD((vui_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_VUI_SHFT,
+				VIDC_SM_METADATA_ENABLE_VUI_BMSK) |
+				VIDC_SETFIELD((sei_nal_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_SEI_VIDC_SHFT,
+				VIDC_SM_METADATA_ENABLE_SEI_VIDC_BMSK) |
+				VIDC_SETFIELD((vc1Param_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_VC1_PARAM_SHFT,
+				VIDC_SM_METADATA_ENABLE_VC1_PARAM_BMSK) |
+				VIDC_SETFIELD((concealed_mb_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_CONCEALED_MB_SHFT,
+				VIDC_SM_METADATA_ENABLE_CONCEALED_MB_BMSK) |
+				VIDC_SETFIELD((qp_enable) ? 1 : 0,
+				VIDC_SM_METADATA_ENABLE_QP_SHFT,
+				VIDC_SM_METADATA_ENABLE_QP_BMSK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_METADATA_ENABLE_ADDR,
+		metadata_enable);
+}
+
+void vidc_sm_get_metadata_status(struct ddl_buf_addr
+		*shared_mem, u32 *pb_metadata_present)
+{
+	u32 status;
+
+	status = DDL_MEM_READ_32(shared_mem, VIDC_SM_METADATA_STATUS_ADDR);
+	*pb_metadata_present = (u32) VIDC_GETFIELD(status,
+				VIDC_SM_METADATA_STATUS_STATUS_BMSK,
+				VIDC_SM_METADATA_STATUS_STATUS_SHFT);
+}
+
+void vidc_sm_get_metadata_display_index(struct ddl_buf_addr *shared_mem,
+	u32 *pn_dixplay_index)
+{
+	*pn_dixplay_index = DDL_MEM_READ_32(shared_mem,
+					VIDC_SM_METADATA_DISPLAY_INDEX_ADDR);
+}
+
+void vidc_sm_set_metadata_start_address(struct ddl_buf_addr *shared_mem,
+	u32 address)
+{
+	u32 address_shift = address;
+
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_EXT_METADATA_START_ADDR_ADDR,
+		address_shift);
+}
+
+void vidc_sm_set_extradata_presence(struct ddl_buf_addr *shared_mem,
+	u32 extradata_present)
+{
+	u32 put_extradata;
+
+	put_extradata = VIDC_SETFIELD((extradata_present) ? 1 : 0,
+				VIDC_SM_PUT_EXTRADATA_PUT_SHFT,
+				VIDC_SM_PUT_EXTRADATA_PUT_BMSK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_PUT_EXTRADATA_ADDR,
+			put_extradata);
+}
+
+void vidc_sm_set_extradata_addr(struct ddl_buf_addr *shared_mem,
+	u32 extradata_addr)
+{
+	u32 address_shift = extradata_addr;
+
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_EXTRADATA_ADDR_ADDR,
+		address_shift);
+}
+
+void vidc_sm_set_pand_b_frame_qp(struct ddl_buf_addr *shared_mem,
+	u32 b_frame_qp, u32 p_frame_qp)
+{
+	u32 nP_B_frame_qp;
+
+	nP_B_frame_qp = VIDC_SETFIELD(b_frame_qp,
+				VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_SHFT,
+				VIDC_SM_P_B_FRAME_QP_B_FRAME_QP_BMASK);
+	nP_B_frame_qp |= VIDC_SETFIELD(p_frame_qp,
+				VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_SHFT,
+				VIDC_SM_P_B_FRAME_QP_P_FRAME_QP_BMASK);
+	DDL_MEM_WRITE_32(shared_mem , VIDC_SM_P_B_FRAME_QP_ADDR,
+		nP_B_frame_qp);
+}
+
+
+void vidc_sm_get_profile_info(struct ddl_buf_addr *shared_mem,
+	struct ddl_profile_info_type *ddl_profile_info)
+{
+	u32 disp_pic_profile;
+
+	disp_pic_profile = DDL_MEM_READ_32(shared_mem,
+		VIDC_SM_DISP_PIC_PROFILE_ADDR);
+	ddl_profile_info->bit_depth_chroma_minus8 =
+		(disp_pic_profile  & 0x00380000) >> 19;
+	ddl_profile_info->bit_depth_luma_minus8 =
+		(disp_pic_profile & 0x00070000) >> 16;
+	ddl_profile_info->pic_profile = VIDC_GETFIELD(
+		disp_pic_profile,
+		VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_BMASK,
+		VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_PROFILE_SHFT);
+	ddl_profile_info->pic_level = VIDC_GETFIELD(
+		disp_pic_profile,
+		VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_BMASK,
+		VIDC_SM_DISP_PIC_PROFILE_DISP_PIC_LEVEL_SHFT);
+	ddl_profile_info->chroma_format_idc =
+		(disp_pic_profile & 0x60) >> 5;
+}
+
+void vidc_sm_set_encoder_new_bit_rate(struct ddl_buf_addr *shared_mem,
+	u32 new_bit_rate)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_RC_BIT_RATE_ADDR,
+		new_bit_rate);
+}
+
+void vidc_sm_set_encoder_new_frame_rate(struct ddl_buf_addr *shared_mem,
+	u32 new_frame_rate)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_RC_FRAME_RATE_ADDR,
+		new_frame_rate);
+}
+
+void vidc_sm_set_encoder_new_i_period(struct ddl_buf_addr *shared_mem,
+	u32 new_i_period)
+{
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_NEW_I_PERIOD_ADDR,
+		new_i_period);
+}
+void vidc_sm_set_encoder_init_rc_value(struct ddl_buf_addr *shared_mem,
+	u32 new_rc_value)
+{
+	DDL_MEM_WRITE_32(shared_mem, 0x011C, new_rc_value);
+
+}
+void vidc_sm_set_idr_decode_only(struct ddl_buf_addr *shared_mem,
+	u32 enable)
+{
+	u32 idr_decode_only = VIDC_SETFIELD((enable) ? 1 : 0,
+			VIDC_SM_IDR_DECODING_ONLY_SHIFT,
+			VIDC_SM_IDR_DECODING_ONLY_BMSK
+			);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_IDR_DECODING_ONLY_ADDR,
+			idr_decode_only);
+}
+
+void vidc_sm_set_chroma_addr_change(struct ddl_buf_addr *shared_mem,
+	u32 addr_change)
+{
+	u32 chroma_addr_change = VIDC_SETFIELD((addr_change) ? 1 : 0,
+					VIDC_SM_CHROMA_ADDR_CHANGE_SHFT,
+					VIDC_SM_CHROMA_ADDR_CHANGE_BMASK);
+	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_CHROMA_ADDR_CHANGE_ADDR,
+					 chroma_addr_change);
+
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
new file mode 100644
index 0000000..99d9651
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -0,0 +1,157 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_SHARED_MEM_H_
+#define _VCD_DDL_SHARED_MEM_H_
+
+#include "vcd_ddl.h"
+
+#define VIDC_SM_PROFILE_MPEG4_SIMPLE      (0)
+#define VIDC_SM_PROFILE_MPEG4_ADV_SIMPLE  (1)
+
+#define VIDC_SM_PROFILE_H264_BASELINE     (0)
+#define VIDC_SM_PROFILE_H264_MAIN         (1)
+#define VIDC_SM_PROFILE_H264_HIGH         (2)
+
+#define VIDC_SM_PROFILE_H263_BASELINE     (0)
+
+#define VIDC_SM_PROFILE_VC1_SIMPLE        (0)
+#define VIDC_SM_PROFILE_VC1_MAIN          (1)
+#define VIDC_SM_PROFILE_VC1_ADVANCED      (2)
+
+#define VIDC_SM_PROFILE_MPEG2_MAIN        (4)
+#define VIDC_SM_PROFILE_MPEG2_SIMPLE      (5)
+
+#define VIDC_SM_LEVEL_MPEG2_LOW        (10)
+#define VIDC_SM_LEVEL_MPEG2_MAIN        (8)
+#define VIDC_SM_LEVEL_MPEG2_HIGH_1440   (6)
+#define VIDC_SM_LEVEL_MPEG2_HIGH        (4)
+
+#define VIDC_SM_LEVEL_VC1_LOW     (0)
+#define VIDC_SM_LEVEL_VC1_MEDIUM  (2)
+#define VIDC_SM_LEVEL_VC1_HIGH    (4)
+
+#define VIDC_SM_LEVEL_VC1_ADV_0  (0)
+#define VIDC_SM_LEVEL_VC1_ADV_1  (1)
+#define VIDC_SM_LEVEL_VC1_ADV_2  (2)
+#define VIDC_SM_LEVEL_VC1_ADV_3  (3)
+#define VIDC_SM_LEVEL_VC1_ADV_4  (4)
+
+enum VIDC_SM_frame_skip {
+	VIDC_SM_FRAME_SKIP_DISABLE      = 0,
+	VIDC_SM_FRAME_SKIP_ENABLE_LEVEL = 1,
+	VIDC_SM_FRAME_SKIP_ENABLE_VBV   = 2
+};
+enum VIDC_SM_ref_picture {
+	VIDC_SM_REF_PICT_FRAME_OR_TOP_FIELD   = 0,
+	VIDC_SM_REF_PICT_BOTTOM_FIELD         = 1
+};
+
+struct ddl_profile_info_type {
+	u32 bit_depth_chroma_minus8;
+	u32 bit_depth_luma_minus8;
+	u32 pic_level;
+	u32 chroma_format_idc;
+	u32 pic_profile;
+};
+
+void vidc_sm_get_extended_decode_status(struct ddl_buf_addr *shared_mem,
+	u32 *more_field_needed,
+	u32 *resl_change);
+void vidc_sm_set_frame_tag(struct ddl_buf_addr *shared_mem,
+	u32 frame_tag);
+void vidc_sm_get_frame_tags(struct ddl_buf_addr *shared_mem,
+	u32 *pn_frame_tag_top, u32 *pn_frame_tag_bottom);
+void vidc_sm_get_picture_times(struct ddl_buf_addr *shared_mem,
+	u32 *pn_time_top, u32 *pn_time_bottom);
+void vidc_sm_set_start_byte_number(struct ddl_buf_addr *shared_mem,
+	u32 byte_num);
+void vidc_sm_get_crop_info(struct ddl_buf_addr *shared_mem, u32 *pn_left,
+	u32 *pn_right, u32 *pn_top, u32 *pn_bottom);
+void vidc_sm_get_displayed_picture_frame(struct ddl_buf_addr
+	*shared_mem, u32 *n_disp_picture_frame);
+void vidc_sm_get_available_luma_dpb_address(
+	struct ddl_buf_addr *shared_mem, u32 *pn_free_luma_dpb_address);
+void vidc_sm_get_available_luma_dpb_dec_order_address(
+	struct ddl_buf_addr *shared_mem, u32 *pn_free_luma_dpb_address);
+void vidc_sm_get_dec_order_resl(
+	struct ddl_buf_addr *shared_mem, u32 *width, u32 *height);
+void vidc_sm_get_dec_order_crop_info(
+	struct ddl_buf_addr *shared_mem, u32 *left,
+	u32 *right, u32 *top, u32 *bottom);
+void vidc_sm_set_extended_encoder_control(
+	struct ddl_buf_addr *shared_mem, u32 hec_enable,
+	enum VIDC_SM_frame_skip  frame_skip_mode, u32 seq_hdr_in_band,
+	u32 vbv_buffer_size);
+void vidc_sm_set_encoder_param_change(struct ddl_buf_addr *shared_mem,
+	u32 bit_rate_chg, u32 frame_rate_chg, u32 i_period_chg);
+void vidc_sm_set_encoder_vop_time(struct ddl_buf_addr *shared_mem,
+	u32 vop_time_enable, u32 time_resolution, u32 frame_delta);
+void vidc_sm_set_encoder_hec_period(struct ddl_buf_addr *shared_mem,
+	u32 hec_period);
+void vidc_sm_get_h264_encoder_reference_list0(
+	struct ddl_buf_addr *shared_mem,
+	enum VIDC_SM_ref_picture *pe_luma_picture0,
+	u32 *pn_luma_picture_index0,
+	enum VIDC_SM_ref_picture *pe_luma_picture1,
+	u32 *pn_luma_picture_index1,
+	enum VIDC_SM_ref_picture *pe_chroma_picture0,
+	u32 *pn_chroma_picture_index0,
+	enum VIDC_SM_ref_picture *pe_chroma_picture1,
+	u32 *pn_chroma_picture_index1);
+
+void vidc_sm_get_h264_encoder_reference_list1(
+	struct ddl_buf_addr *shared_mem,
+	enum VIDC_SM_ref_picture *pe_luma_picture,
+	u32 *pn_luma_picture_index,
+	enum VIDC_SM_ref_picture *pe_chroma_picture,
+	u32 *pn_chroma_picture_index);
+void vidc_sm_set_allocated_dpb_size(struct ddl_buf_addr *shared_mem,
+	u32 y_size, u32 c_size);
+void vidc_sm_set_allocated_h264_mv_size(struct ddl_buf_addr *shared_mem,
+	u32 mv_size);
+void vidc_sm_get_min_yc_dpb_sizes(struct ddl_buf_addr *shared_mem,
+	u32 *pn_min_luma_dpb_size, u32 *pn_min_chroma_dpb_size);
+void vidc_sm_set_metadata_enable(struct ddl_buf_addr *shared_mem,
+	u32 extradata_enable, u32 qp_enable, u32 concealed_mb_enable,
+	u32 vc1Param_enable, u32 sei_nal_enable, u32 vui_enable,
+	u32 enc_slice_size_enable);
+void vidc_sm_get_metadata_status(struct ddl_buf_addr *shared_mem,
+	u32 *pb_metadata_present);
+void vidc_sm_get_metadata_display_index(struct ddl_buf_addr *shared_mem,
+	u32 *pn_dixplay_index);
+void vidc_sm_set_metadata_start_address(struct ddl_buf_addr *shared_mem,
+	u32 address);
+void vidc_sm_set_extradata_presence(struct ddl_buf_addr *shared_mem,
+	u32 extradata_present);
+void vidc_sm_set_extradata_addr(struct ddl_buf_addr *shared_mem,
+	u32 extradata_addr);
+void vidc_sm_set_pand_b_frame_qp(struct ddl_buf_addr *shared_mem,
+	u32 b_frame_qp, u32 p_frame_qp);
+void vidc_sm_get_profile_info(struct ddl_buf_addr *shared_mem,
+	struct ddl_profile_info_type *ddl_profile_info);
+void vidc_sm_set_encoder_new_bit_rate(struct ddl_buf_addr *shared_mem,
+	u32 new_bit_rate);
+void vidc_sm_set_encoder_new_frame_rate(struct ddl_buf_addr *shared_mem,
+	u32 new_frame_rate);
+void vidc_sm_set_encoder_new_i_period(struct ddl_buf_addr *shared_mem,
+	u32 new_i_period);
+void vidc_sm_set_encoder_init_rc_value(struct ddl_buf_addr *shared_mem,
+	u32 new_rc_value);
+void vidc_sm_set_idr_decode_only(struct ddl_buf_addr *shared_mem,
+	u32 enable);
+void vidc_sm_set_concealment_color(struct ddl_buf_addr *shared_mem,
+	u32 conceal_ycolor, u32 conceal_ccolor);
+void vidc_sm_set_chroma_addr_change(struct ddl_buf_addr *shared_mem,
+	u32 addr_change);
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
new file mode 100644
index 0000000..46337af
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/memory_alloc.h>
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl.h"
+
+struct time_data {
+	unsigned int ddl_t1;
+	unsigned int ddl_ttotal;
+	unsigned int ddl_count;
+};
+static struct time_data proc_time[MAX_TIME_DATA];
+#define DDL_MSG_TIME(x...) printk(KERN_DEBUG x)
+
+#define DDL_FW_CHANGE_ENDIAN
+
+#ifdef DDL_BUF_LOG
+static void ddl_print_buffer(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf, u32 idx, u8 *str);
+static void ddl_print_port(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf);
+static void ddl_print_buffer_port(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf, u32 idx, u8 *str);
+#endif
+
+void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
+{
+	u32 alloc_size, offset = 0;
+	struct ddl_context *ddl_context;
+	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
+	if (!addr) {
+		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
+		return NULL;
+	}
+	ddl_context = ddl_get_context();
+	alloc_size = (sz + alignment);
+	addr->physical_base_addr = (u8 *) allocate_contiguous_memory_nomap(
+				alloc_size, ddl_context->memtype, SZ_4K);
+	if (!addr->physical_base_addr) {
+		DDL_MSG_ERROR("%s() : pmem alloc failed (%d)\n", __func__,
+			alloc_size);
+		return NULL;
+	}
+	DDL_MSG_LOW("%s() : pmem alloc physical base addr/sz 0x%x / %d\n",\
+		__func__, (u32)addr->physical_base_addr, alloc_size);
+	addr->virtual_base_addr = (u8 *)ioremap((unsigned long)
+		addr->physical_base_addr, alloc_size);
+	if (!addr->virtual_base_addr) {
+		DDL_MSG_ERROR("%s() : ioremap failed, virtual(%x)\n", __func__,
+			(u32)addr->virtual_base_addr);
+		free_contiguous_memory_by_paddr(
+			(unsigned long) addr->physical_base_addr);
+		addr->physical_base_addr = NULL;
+		return NULL;
+	}
+	DDL_MSG_LOW("%s() : pmem alloc virtual base addr/sz 0x%x / %d\n",\
+		__func__, (u32)addr->virtual_base_addr, alloc_size);
+	addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+		addr->physical_base_addr, alignment);
+	offset = (u32)(addr->align_physical_addr -
+			addr->physical_base_addr);
+	addr->align_virtual_addr = addr->virtual_base_addr + offset;
+	addr->buffer_size = sz;
+	DDL_MSG_LOW("\n%s() : alig_phy_addr(%p) alig_vir_addr(%p)",
+		__func__, addr->align_physical_addr, addr->align_virtual_addr);
+	DBG_PMEM("\n%s() OUT: phy_addr(%p) vir_addr(%p) size(%u)",
+		__func__, addr->physical_base_addr, addr->virtual_base_addr,
+		addr->buffer_size);
+	return addr->virtual_base_addr;
+}
+
+void ddl_pmem_free(struct ddl_buf_addr *addr)
+{
+	DBG_PMEM("\n%s() IN: phy_addr(%p) vir_addr(%p) size(%u)",
+		__func__, addr->physical_base_addr, addr->virtual_base_addr,
+		addr->buffer_size);
+	if (addr->virtual_base_addr)
+		iounmap((void *)addr->virtual_base_addr);
+	if (addr->physical_base_addr)
+		free_contiguous_memory_by_paddr(
+			(unsigned long) addr->physical_base_addr);
+	DBG_PMEM("\n%s() OUT: phy_addr(%p) vir_addr(%p) size(%u)",
+		__func__, addr->physical_base_addr, addr->virtual_base_addr,
+		addr->buffer_size);
+	addr->physical_base_addr   = NULL;
+	addr->virtual_base_addr    = NULL;
+	addr->align_virtual_addr   = NULL;
+	addr->align_physical_addr  = NULL;
+	addr->buffer_size = 0;
+}
+
+#ifdef DDL_BUF_LOG
+
+static void ddl_print_buffer(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf, u32 idx, u8 *str)
+{
+	struct ddl_buf_addr *base_ram;
+	s32  offset;
+	size_t sz, KB = 0;
+
+	base_ram = &ddl_context->dram_base_a;
+	offset = (s32) DDL_ADDR_OFFSET(*base_ram, *buf);
+	sz = buf->buffer_size;
+	if (sz > 0) {
+		if (!(sz % 1024)) {
+			sz /= 1024;
+			KB++;
+			if (!(sz % 1024)) {
+				sz /= 1024;
+				KB++;
+			}
+		}
+	}
+	DDL_MSG_LOW("\n%12s [%2d]:  0x%08x [0x%04x],  0x%08x(%d%s),  %s",
+		str, idx, (u32) buf->align_physical_addr,
+		(offset > 0) ? offset : 0, buf->buffer_size, sz,
+		((2 == KB) ? "MB" : (1 == KB) ? "KB" : ""),
+		(((u32) buf->virtual_base_addr) ? "Alloc" : ""));
+}
+
+static void ddl_print_port(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf)
+{
+	struct ddl_buf_addr *a = &ddl_context->dram_base_a;
+	struct ddl_buf_addr *b = &ddl_context->dram_base_b;
+
+	if (!buf->align_physical_addr || !buf->buffer_size)
+		return;
+	if (buf->align_physical_addr >= a->align_physical_addr &&
+		buf->align_physical_addr + buf->buffer_size <=
+		a->align_physical_addr + a->buffer_size)
+		DDL_MSG_LOW(" -A [0x%x]-", DDL_ADDR_OFFSET(*a, *buf));
+	else if (buf->align_physical_addr >= b->align_physical_addr &&
+		buf->align_physical_addr + buf->buffer_size <=
+		b->align_physical_addr + b->buffer_size)
+		DDL_MSG_LOW(" -B [0x%x]-", DDL_ADDR_OFFSET(*b, *buf));
+	else
+		DDL_MSG_LOW(" -?-");
+}
+
+static void ddl_print_buffer_port(struct ddl_context *ddl_context,
+	struct ddl_buf_addr *buf, u32 idx, u8 *str)
+{
+	DDL_MSG_LOW("\n");
+	ddl_print_buffer(ddl_context, buf, idx, str);
+	ddl_print_port(ddl_context, buf);
+}
+
+void ddl_list_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context;
+	u32 i;
+
+	ddl_context = ddl->ddl_context;
+	DDL_MSG_LOW("\n\n");
+	DDL_MSG_LOW("\n      Buffer     :     Start    [offs],      Size    \
+	(Size),     Alloc/Port");
+	DDL_MSG_LOW("\n-------------------------------------------------------\
+	-------------------------");
+	ddl_print_buffer(ddl_context, &ddl_context->dram_base_a, 0,
+		"dram_base_a");
+	ddl_print_buffer(ddl_context, &ddl_context->dram_base_b, 0,
+		"dram_base_b");
+	if (ddl->codec_data.hdr.decoding) {
+		struct ddl_dec_buffers  *dec_bufs =
+			&ddl->codec_data.decoder.hw_bufs;
+		for (i = 0; i < 32; i++)
+			ddl_print_buffer_port(ddl_context,
+				&dec_bufs->h264Mv[i], i, "h264Mv");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->h264Vert_nb_mv, 0, "h264Vert_nb_mv");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->h264Nb_ip, 0, "h264Nb_ip");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->nb_dcac, 0, "nb_dcac");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->upnb_mv, 0, "upnb_mv");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->sub_anchor_mv, 0, "sub_anchor_mv");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->overlay_xform, 0, "overlay_xform");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->bit_plane3, 0, "bit_plane3");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->bit_plane2, 0, "bit_plane2");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->bit_plane1, 0, "bit_plane1");
+		ddl_print_buffer_port(ddl_context,
+			dec_bufs->stx_parser, 0, "stx_parser");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->desc, 0, "desc");
+		ddl_print_buffer_port(ddl_context,
+			&dec_bufs->context, 0, "context");
+	} else {
+		struct ddl_enc_buffers  *enc_bufs =
+			&ddl->codec_data.encoder.hw_bufs;
+
+		for (i = 0; i < 4; i++)
+			ddl_print_buffer_port(ddl_context,
+				&enc_bufs->dpb_y[i], i, "dpb_y");
+		for (i = 0; i < 4; i++)
+			ddl_print_buffer_port(ddl_context,
+				&enc_bufs->dpb_c[i], i, "dpb_c");
+		ddl_print_buffer_port(ddl_context, &enc_bufs->mv, 0, "mv");
+		ddl_print_buffer_port(ddl_context,
+			&enc_bufs->col_zero, 0, "col_zero");
+		ddl_print_buffer_port(ddl_context, &enc_bufs->md, 0, "md");
+		ddl_print_buffer_port(ddl_context,
+			&enc_bufs->pred, 0, "pred");
+		ddl_print_buffer_port(ddl_context,
+			&enc_bufs->nbor_info, 0, "nbor_info");
+		ddl_print_buffer_port(ddl_context,
+			&enc_bufs->acdc_coef, 0, "acdc_coef");
+		ddl_print_buffer_port(ddl_context,
+			&enc_bufs->context, 0, "context");
+	}
+}
+#endif
+
+#ifdef DDL_FW_CHANGE_ENDIAN
+static void ddl_fw_change_endian(u8 *fw, u32 fw_size)
+{
+	u32 i = 0;
+	u8  temp;
+	for (i = 0; i < fw_size; i = i + 4) {
+		temp = fw[i];
+		fw[i] = fw[i+3];
+		fw[i+3] = temp;
+		temp = fw[i+1];
+		fw[i+1] = fw[i+2];
+		fw[i+2] = temp;
+	}
+	return;
+}
+#endif
+
+u32 ddl_fw_init(struct ddl_buf_addr *dram_base)
+{
+
+	u8 *dest_addr;
+
+	dest_addr = DDL_GET_ALIGNED_VITUAL(*dram_base);
+	if (vidc_video_codec_fw_size > dram_base->buffer_size ||
+		!vidc_video_codec_fw)
+		return false;
+	DDL_MSG_LOW("FW Addr / FW Size : %x/%d", (u32)vidc_video_codec_fw,
+		vidc_video_codec_fw_size);
+	memcpy(dest_addr, vidc_video_codec_fw,
+		vidc_video_codec_fw_size);
+#ifdef DDL_FW_CHANGE_ENDIAN
+	ddl_fw_change_endian(dest_addr, vidc_video_codec_fw_size);
+#endif
+	return true;
+}
+
+void ddl_fw_release(void)
+{
+
+}
+
+void ddl_set_core_start_time(const char *func_name, u32 index)
+{
+	u32 act_time;
+	struct timeval ddl_tv;
+	struct time_data *time_data = &proc_time[index];
+	do_gettimeofday(&ddl_tv);
+	act_time = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000);
+	if (!time_data->ddl_t1) {
+		time_data->ddl_t1 = act_time;
+		DDL_MSG_LOW("\n%s(): Start Time (%u)", func_name, act_time);
+	} else {
+		DDL_MSG_TIME("\n%s(): Timer already started! St(%u) Act(%u)",
+			func_name, time_data->ddl_t1, act_time);
+	}
+}
+
+void ddl_calc_core_proc_time(const char *func_name, u32 index)
+{
+	struct time_data *time_data = &proc_time[index];
+	if (time_data->ddl_t1) {
+		int ddl_t2;
+		struct timeval ddl_tv;
+		do_gettimeofday(&ddl_tv);
+		ddl_t2 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000);
+		time_data->ddl_ttotal += (ddl_t2 - time_data->ddl_t1);
+		time_data->ddl_count++;
+		DDL_MSG_TIME("\n%s(): cnt(%u) End Time (%u) Diff(%u) Avg(%u)",
+			func_name, time_data->ddl_count, ddl_t2,
+			ddl_t2 - time_data->ddl_t1,
+			time_data->ddl_ttotal/time_data->ddl_count);
+		time_data->ddl_t1 = 0;
+	}
+}
+
+void ddl_reset_core_time_variables(u32 index)
+{
+	proc_time[index].ddl_t1 = 0;
+	proc_time[index].ddl_ttotal = 0;
+	proc_time[index].ddl_count = 0;
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h
new file mode 100644
index 0000000..42a991c
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_DDL_UTILS_H_
+#define _VCD_DDL_UTILS_H_
+
+#include <linux/delay.h>
+#include "vidc_type.h"
+
+extern u32 vidc_msg_pmem;
+extern u32 vidc_msg_timing;
+
+enum timing_data {
+	DEC_OP_TIME,
+	DEC_IP_TIME,
+	ENC_OP_TIME,
+	MAX_TIME_DATA
+};
+
+#define DBG_PMEM(x...) \
+do { \
+	if (vidc_msg_pmem) \
+		printk(KERN_DEBUG x); \
+} while (0)
+
+#ifdef DDL_MSG_LOG
+#define DDL_MSG_LOW(x...)    printk(KERN_INFO x)
+#define DDL_MSG_MED(x...)    printk(KERN_INFO x)
+#define DDL_MSG_HIGH(x...)   printk(KERN_INFO x)
+#else
+#define DDL_MSG_LOW(x...)
+#define DDL_MSG_MED(x...)
+#define DDL_MSG_HIGH(x...)
+#endif
+
+#define DDL_MSG_ERROR(x...)  printk(KERN_INFO x)
+#define DDL_MSG_FATAL(x...)  printk(KERN_INFO x)
+
+#define DDL_ALIGN_SIZE(sz, guard_bytes, align_mask) \
+	(((u32)(sz) + guard_bytes) & align_mask)
+#define DDL_ADDR_IS_ALIGNED(addr, align_bytes) \
+	(!((u32)(addr) & ((align_bytes) - 1)))
+#define  DDL_ALIGN(val, grid) ((!(grid)) ? (val) : \
+		((((val) + (grid) - 1) / (grid)) * (grid)))
+#define  DDL_ALIGN_FLOOR(val, grid) ((!(grid)) ? (val) : \
+		(((val) / (grid)) * (grid)))
+#define DDL_OFFSET(base, addr) ((!(addr)) ? 0 : (u32)((u8 *) \
+		(addr) - (u8 *) (base)))
+#define DDL_ADDR_OFFSET(base, addr) DDL_OFFSET((base).align_physical_addr, \
+		(addr).align_physical_addr)
+#define DDL_GET_ALIGNED_VITUAL(x)   ((x).align_virtual_addr)
+#define DDL_KILO_BYTE(x)   ((x)*1024)
+#define DDL_MEGA_BYTE(x)   ((x)*1024*1024)
+#define DDL_FRAMERATE_SCALE(x)            ((x) * 1000)
+
+#define DDL_MIN(x, y)  ((x < y) ? x : y)
+#define DDL_MAX(x, y)  ((x > y) ? x : y)
+
+void ddl_set_core_start_time(const char *func_name, u32 index);
+void ddl_calc_core_proc_time(const char *func_name, u32 index);
+void ddl_reset_core_time_variables(u32 index);
+
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
new file mode 100644
index 0000000..2efd211
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -0,0 +1,925 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vcd_ddl.h"
+#include "vcd_ddl_metadata.h"
+#include "vcd_ddl_shared_mem.h"
+#include "vcd_core.h"
+
+#if defined(PIX_CACHE_DISABLE)
+#define DDL_PIX_CACHE_ENABLE  false
+#else
+#define DDL_PIX_CACHE_ENABLE  true
+#endif
+
+void ddl_vidc_core_init(struct ddl_context *ddl_context)
+{
+	struct vidc_1080P_pix_cache_config pixel_cache_config;
+
+	vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_FIRST_STAGE);
+	msleep(DDL_SW_RESET_SLEEP);
+	vidc_1080p_do_sw_reset(VIDC_1080P_RESET_IN_SEQ_SECOND_STAGE);
+	vidc_1080p_init_memory_controller(
+		(u32) ddl_context->dram_base_a.align_physical_addr,
+		(u32) ddl_context->dram_base_b.align_physical_addr);
+	vidc_1080p_clear_returned_channel_inst_id();
+	ddl_context->vidc_decode_seq_start[0] =
+		vidc_1080p_decode_seq_start_ch0;
+	ddl_context->vidc_decode_seq_start[1] =
+		vidc_1080p_decode_seq_start_ch1;
+	ddl_context->vidc_decode_init_buffers[0] =
+		vidc_1080p_decode_init_buffers_ch0;
+	ddl_context->vidc_decode_init_buffers[1] =
+		vidc_1080p_decode_init_buffers_ch1;
+	ddl_context->vidc_decode_frame_start[0] =
+		vidc_1080p_decode_frame_start_ch0;
+	ddl_context->vidc_decode_frame_start[1] =
+		vidc_1080p_decode_frame_start_ch1;
+	ddl_context->vidc_set_dec_resolution[0] =
+		vidc_1080p_set_dec_resolution_ch0;
+	ddl_context->vidc_set_dec_resolution[1] =
+		vidc_1080p_set_dec_resolution_ch1;
+	ddl_context->vidc_encode_seq_start[0] =
+		vidc_1080p_encode_seq_start_ch0;
+	ddl_context->vidc_encode_seq_start[1] =
+		vidc_1080p_encode_seq_start_ch1;
+	ddl_context->vidc_encode_frame_start[0] =
+		vidc_1080p_encode_frame_start_ch0;
+	ddl_context->vidc_encode_frame_start[1] =
+		vidc_1080p_encode_frame_start_ch1;
+	vidc_1080p_release_sw_reset();
+	ddl_context->pix_cache_enable = DDL_PIX_CACHE_ENABLE;
+	if (ddl_context->pix_cache_enable) {
+		vidc_pix_cache_sw_reset();
+		pixel_cache_config.cache_enable = true;
+		pixel_cache_config.prefetch_en = true;
+		pixel_cache_config.port_select = VIDC_1080P_PIX_CACHE_PORT_B;
+		pixel_cache_config.statistics_off = true;
+		pixel_cache_config.page_size =
+			VIDC_1080P_PIX_CACHE_PAGE_SIZE_1K;
+		vidc_pix_cache_init_config(&pixel_cache_config);
+	}
+}
+
+void ddl_vidc_core_term(struct ddl_context *ddl_context)
+{
+	if (ddl_context->pix_cache_enable) {
+		u32 pix_cache_idle = false;
+		u32 counter = 0;
+
+		vidc_pix_cache_set_halt(true);
+
+		do {
+			msleep(DDL_SW_RESET_SLEEP);
+			vidc_pix_cache_get_status_idle(&pix_cache_idle);
+			counter++;
+		} while (!pix_cache_idle &&
+			counter < DDL_PIXEL_CACHE_STATUS_READ_RETRY);
+
+		if (!pix_cache_idle) {
+			ddl_context->cmd_err_status =
+				DDL_PIXEL_CACHE_NOT_IDLE;
+			ddl_handle_core_errors(ddl_context);
+		}
+	}
+}
+
+void ddl_vidc_channel_set(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	enum vcd_codec *vcd_codec;
+	enum vidc_1080p_codec codec = VIDC_1080P_H264_DECODE;
+	const enum vidc_1080p_decode_p_cache_enable
+		dec_pix_cache = VIDC_1080P_DECODE_PCACHE_DISABLE;
+	const enum vidc_1080p_encode_p_cache_enable
+		enc_pix_cache = VIDC_1080P_ENCODE_PCACHE_ENABLE;
+	u32 pix_cache_ctrl, ctxt_mem_offset, ctxt_mem_size;
+
+	if (ddl->decoding) {
+		if (vidc_msg_timing)
+			ddl_set_core_start_time(__func__, DEC_OP_TIME);
+		vcd_codec = &(ddl->codec_data.decoder.codec.codec);
+		pix_cache_ctrl = (u32)dec_pix_cache;
+		ctxt_mem_offset = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+		ddl->codec_data.decoder.hw_bufs.context) >> 11;
+		ctxt_mem_size =
+			ddl->codec_data.decoder.hw_bufs.context.buffer_size;
+	} else {
+		vcd_codec = &(ddl->codec_data.encoder.codec.codec);
+		pix_cache_ctrl = (u32)enc_pix_cache;
+		ctxt_mem_offset = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			ddl->codec_data.encoder.hw_bufs.context) >> 11;
+		ctxt_mem_size =
+			ddl->codec_data.encoder.hw_bufs.context.buffer_size;
+	}
+	switch (*vcd_codec) {
+	default:
+	case VCD_CODEC_MPEG4:
+		if (ddl->decoding)
+			codec = VIDC_1080P_MPEG4_DECODE;
+		else
+			codec = VIDC_1080P_MPEG4_ENCODE;
+	break;
+	case VCD_CODEC_H264:
+		if (ddl->decoding)
+			codec = VIDC_1080P_H264_DECODE;
+		else
+			codec = VIDC_1080P_H264_ENCODE;
+	break;
+	case VCD_CODEC_DIVX_3:
+		if (ddl->decoding)
+			codec = VIDC_1080P_DIVX311_DECODE;
+	break;
+	case VCD_CODEC_DIVX_4:
+		if (ddl->decoding)
+			codec = VIDC_1080P_DIVX412_DECODE;
+	break;
+	case VCD_CODEC_DIVX_5:
+		if (ddl->decoding)
+			codec = VIDC_1080P_DIVX502_DECODE;
+	break;
+	case VCD_CODEC_DIVX_6:
+		if (ddl->decoding)
+			codec = VIDC_1080P_DIVX503_DECODE;
+	break;
+	case VCD_CODEC_XVID:
+		if (ddl->decoding)
+			codec = VIDC_1080P_MPEG4_DECODE;
+	break;
+	case VCD_CODEC_H263:
+		if (ddl->decoding)
+			codec = VIDC_1080P_H263_DECODE;
+		else
+			codec = VIDC_1080P_H263_ENCODE;
+	break;
+	case VCD_CODEC_MPEG1:
+	case VCD_CODEC_MPEG2:
+		if (ddl->decoding)
+			codec = VIDC_1080P_MPEG2_DECODE;
+	break;
+	case VCD_CODEC_VC1:
+		if (ddl->decoding)
+			codec = VIDC_1080P_VC1_DECODE;
+	break;
+	case VCD_CODEC_VC1_RCV:
+		if (ddl->decoding)
+			codec = VIDC_1080P_VC1_RCV_DECODE;
+	break;
+	}
+	ddl->cmd_state = DDL_CMD_CHANNEL_SET;
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_CHDONE",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_CHDONE;
+	vidc_1080p_set_host2risc_cmd(VIDC_1080P_HOST2RISC_CMD_OPEN_CH,
+		(u32)codec, pix_cache_ctrl, ctxt_mem_offset,
+		ctxt_mem_size);
+}
+
+void ddl_vidc_decode_init_codec(struct ddl_client_context *ddl)
+{
+	struct ddl_context  *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vidc_1080p_dec_seq_start_param seq_start_param;
+	u32 seq_size;
+
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+	vidc_1080p_set_decode_mpeg4_pp_filter(decoder->post_filter.post_filter);
+	vidc_sm_set_concealment_color(&ddl->shared_mem[ddl->command_channel],
+		DDL_CONCEALMENT_Y_COLOR, DDL_CONCEALMENT_C_COLOR);
+	ddl_vidc_metadata_enable(ddl);
+	vidc_sm_set_metadata_start_address(&ddl->shared_mem
+		[ddl->command_channel],
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+		ddl->codec_data.decoder.meta_data_input));
+
+	vidc_sm_set_idr_decode_only(&ddl->shared_mem[ddl->command_channel],
+			decoder->idr_only_decoding);
+
+	if ((decoder->codec.codec == VCD_CODEC_DIVX_3) ||
+	   (decoder->codec.codec == VCD_CODEC_VC1_RCV ||
+		decoder->codec.codec == VCD_CODEC_VC1))
+		ddl_context->vidc_set_dec_resolution
+		[ddl->command_channel](decoder->client_frame_size.width,
+		decoder->client_frame_size.height);
+	else
+	ddl_context->vidc_set_dec_resolution
+	[ddl->command_channel](0x0, 0x0);
+	DDL_MSG_LOW("HEADER-PARSE-START");
+	DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+	"DDL_CLIENT_WAIT_FOR_INITCODECDONE",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODECDONE;
+	ddl->cmd_state = DDL_CMD_HEADER_PARSE;
+	seq_start_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	seq_start_param.inst_id = ddl->instance_id;
+	seq_start_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+	ddl_context->dram_base_a, ddl->shared_mem
+		[ddl->command_channel]);
+	seq_start_param.stream_buffer_addr_offset =
+	DDL_OFFSET(ddl_context->dram_base_a.align_physical_addr,
+	decoder->decode_config.sequence_header);
+	seq_start_param.stream_buffersize =
+		decoder->client_input_buf_req.sz;
+	seq_size = decoder->decode_config.sequence_header_len +
+		DDL_LINEAR_BUFFER_ALIGN_BYTES + VCD_SEQ_HDR_PADDING_BYTES;
+	if (seq_start_param.stream_buffersize < seq_size)
+		seq_start_param.stream_buffersize = seq_size;
+	seq_start_param.stream_frame_size =
+		decoder->decode_config.sequence_header_len;
+	seq_start_param.descriptor_buffer_addr_offset =
+		DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+		decoder->hw_bufs.desc),
+	seq_start_param.descriptor_buffer_size =
+		decoder->hw_bufs.desc.buffer_size;
+	ddl_context->vidc_decode_seq_start[ddl->command_channel](
+		&seq_start_param);
+}
+
+void ddl_vidc_decode_dynamic_property(struct ddl_client_context *ddl,
+	u32 enable)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *bit_stream =
+		&(ddl->input_frame.vcd_frm);
+	struct ddl_context *ddl_context = ddl->ddl_context;
+
+	if (!enable) {
+		if (decoder->dynmic_prop_change_req)
+			decoder->dynmic_prop_change_req = false;
+		return;
+	}
+	if ((decoder->dynamic_prop_change & DDL_DEC_REQ_OUTPUT_FLUSH)) {
+		decoder->dynmic_prop_change_req = true;
+		decoder->dynamic_prop_change &= ~(DDL_DEC_REQ_OUTPUT_FLUSH);
+		decoder->dpb_mask.hw_mask = 0;
+		decoder->flush_pending = true;
+	}
+	if (((decoder->meta_data_enable_flag & VCD_METADATA_PASSTHROUGH)) &&
+		((VCD_FRAME_FLAG_EXTRADATA & bit_stream->flags))) {
+		u32 extradata_presence = true;
+		u8* tmp = ((u8 *) bit_stream->physical +
+				bit_stream->offset +
+				bit_stream->data_len + 3);
+		u32 extra_data_start = (u32) ((u32)tmp & ~3);
+
+		extra_data_start = extra_data_start -
+			(u32)ddl_context->dram_base_a.align_physical_addr;
+		decoder->dynmic_prop_change_req = true;
+		vidc_sm_set_extradata_addr(&ddl->shared_mem
+			[ddl->command_channel], extra_data_start);
+		vidc_sm_set_extradata_presence(&ddl->shared_mem
+			[ddl->command_channel], extradata_presence);
+	}
+}
+
+void ddl_vidc_encode_dynamic_property(struct ddl_client_context *ddl,
+	u32 enable)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32 frame_rate_change = false, bit_rate_change = false;
+	u32 i_period_change = false, reset_req = false;
+
+	if (!enable) {
+		if (encoder->dynmic_prop_change_req) {
+			reset_req = true;
+			encoder->dynmic_prop_change_req = false;
+		}
+	} else {
+		if ((encoder->dynamic_prop_change & DDL_ENC_REQ_IFRAME)) {
+			encoder->intra_frame_insertion = true;
+			encoder->dynamic_prop_change &=
+				~(DDL_ENC_REQ_IFRAME);
+		}
+		if ((encoder->dynamic_prop_change &
+			DDL_ENC_CHANGE_BITRATE)) {
+			bit_rate_change = true;
+			vidc_sm_set_encoder_new_bit_rate(
+				&ddl->shared_mem[ddl->command_channel],
+				encoder->target_bit_rate.target_bitrate);
+			encoder->dynamic_prop_change &=
+				~(DDL_ENC_CHANGE_BITRATE);
+		}
+		if ((encoder->dynamic_prop_change
+			& DDL_ENC_CHANGE_IPERIOD)) {
+			i_period_change = true;
+			vidc_sm_set_encoder_new_i_period(
+				&ddl->shared_mem[ddl->command_channel],
+				encoder->i_period.p_frames);
+			encoder->dynamic_prop_change &=
+				~(DDL_ENC_CHANGE_IPERIOD);
+		}
+		if ((encoder->dynamic_prop_change
+			& DDL_ENC_CHANGE_FRAMERATE)) {
+			frame_rate_change = true;
+			vidc_sm_set_encoder_new_frame_rate(
+				&ddl->shared_mem[ddl->command_channel],
+				(u32)(DDL_FRAMERATE_SCALE(encoder->\
+				frame_rate.fps_numerator) /
+				encoder->frame_rate.fps_denominator));
+			encoder->dynamic_prop_change &=
+				~(DDL_ENC_CHANGE_FRAMERATE);
+		}
+	}
+	if ((enable) || (reset_req)) {
+		vidc_sm_set_encoder_param_change(
+			&ddl->shared_mem[ddl->command_channel],
+			bit_rate_change, frame_rate_change,
+			i_period_change);
+	}
+}
+
+static void ddl_vidc_encode_set_profile_level(
+	struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32  encode_profile, level = 0;
+
+	switch (encoder->profile.profile) {
+	default:
+	case VCD_PROFILE_MPEG4_SP:
+		encode_profile = VIDC_1080P_PROFILE_MPEG4_SIMPLE;
+	break;
+	case VCD_PROFILE_MPEG4_ASP:
+		encode_profile = VIDC_1080P_PROFILE_MPEG4_ADV_SIMPLE;
+	break;
+	case VCD_PROFILE_H264_BASELINE:
+		encode_profile = VIDC_1080P_PROFILE_H264_BASELINE;
+	break;
+	case VCD_PROFILE_H264_MAIN:
+		encode_profile = VIDC_1080P_PROFILE_H264_MAIN;
+	break;
+	case VCD_PROFILE_H264_HIGH:
+		encode_profile = VIDC_1080P_PROFILE_H264_HIGH;
+	break;
+	}
+	switch (encoder->level.level) {
+	default:
+	case VCD_LEVEL_MPEG4_0:
+		level = VIDC_1080P_MPEG4_LEVEL0;
+	break;
+	case VCD_LEVEL_MPEG4_0b:
+		level = VIDC_1080P_MPEG4_LEVEL0b;
+	break;
+	case VCD_LEVEL_MPEG4_1:
+		level = VIDC_1080P_MPEG4_LEVEL1;
+	break;
+	case VCD_LEVEL_MPEG4_2:
+		level = VIDC_1080P_MPEG4_LEVEL2;
+	break;
+	case VCD_LEVEL_MPEG4_3:
+		level = VIDC_1080P_MPEG4_LEVEL3;
+	break;
+	case VCD_LEVEL_MPEG4_3b:
+		level = VIDC_1080P_MPEG4_LEVEL3b;
+	break;
+	case VCD_LEVEL_MPEG4_4:
+		level = VIDC_1080P_MPEG4_LEVEL4;
+	break;
+	case VCD_LEVEL_MPEG4_4a:
+		level = VIDC_1080P_MPEG4_LEVEL4a;
+	break;
+	case VCD_LEVEL_MPEG4_5:
+		level = VIDC_1080P_MPEG4_LEVEL5;
+	break;
+	case VCD_LEVEL_MPEG4_6:
+		level = VIDC_1080P_MPEG4_LEVEL6;
+	break;
+	case VCD_LEVEL_MPEG4_7:
+		level = VIDC_1080P_MPEG4_LEVEL7;
+	break;
+	case VCD_LEVEL_H264_1:
+		level = VIDC_1080P_H264_LEVEL1;
+	break;
+	case VCD_LEVEL_H264_1b:
+		level = VIDC_1080P_H264_LEVEL1b;
+	break;
+	case VCD_LEVEL_H264_1p1:
+		level = VIDC_1080P_H264_LEVEL1p1;
+	break;
+	case VCD_LEVEL_H264_1p2:
+		level = VIDC_1080P_H264_LEVEL1p2;
+	break;
+	case VCD_LEVEL_H264_1p3:
+		level = VIDC_1080P_H264_LEVEL1p3;
+	break;
+	case VCD_LEVEL_H264_2:
+		level = VIDC_1080P_H264_LEVEL2;
+	break;
+	case VCD_LEVEL_H264_2p1:
+		level = VIDC_1080P_H264_LEVEL2p1;
+	break;
+	case VCD_LEVEL_H264_2p2:
+		level = VIDC_1080P_H264_LEVEL2p2;
+	break;
+	case VCD_LEVEL_H264_3:
+		level = VIDC_1080P_H264_LEVEL3;
+	break;
+	case VCD_LEVEL_H264_3p1:
+		level = VIDC_1080P_H264_LEVEL3p1;
+	break;
+	case VCD_LEVEL_H264_3p2:
+		level = VIDC_1080P_H264_LEVEL3p2;
+	break;
+	case VCD_LEVEL_H264_4:
+		level = VIDC_1080P_H264_LEVEL4;
+	break;
+	case VCD_LEVEL_H263_10:
+		level = VIDC_1080P_H263_LEVEL10;
+	break;
+	case VCD_LEVEL_H263_20:
+		level = VIDC_1080P_H263_LEVEL20;
+	break;
+	case VCD_LEVEL_H263_30:
+		level = VIDC_1080P_H263_LEVEL30;
+	break;
+	case VCD_LEVEL_H263_40:
+		level = VIDC_1080P_H263_LEVEL40;
+	break;
+	case VCD_LEVEL_H263_45:
+		level = VIDC_1080P_H263_LEVEL45;
+	break;
+	case VCD_LEVEL_H263_50:
+		level = VIDC_1080P_H263_LEVEL50;
+	break;
+	case VCD_LEVEL_H263_60:
+		level = VIDC_1080P_H263_LEVEL60;
+	break;
+	case VCD_LEVEL_H263_70:
+		level = VIDC_1080P_H263_LEVEL70;
+	break;
+	}
+	vidc_1080p_set_encode_profile_level(encode_profile, level);
+}
+
+static void ddl_vidc_encode_set_multi_slice_info(
+	struct ddl_encoder_data *encoder)
+{
+	enum vidc_1080p_MSlice_selection m_slice_sel;
+	u32 i_multi_slice_size = 0, i_multi_slice_byte = 0;
+
+	if (!encoder) {
+		DDL_MSG_ERROR("Invalid Parameter");
+		return;
+	}
+
+	switch (encoder->multi_slice.m_slice_sel) {
+	default:
+	case VCD_MSLICE_OFF:
+		m_slice_sel = VIDC_1080P_MSLICE_DISABLE;
+	break;
+	case VCD_MSLICE_BY_MB_COUNT:
+		m_slice_sel = VIDC_1080P_MSLICE_BY_MB_COUNT;
+		i_multi_slice_size = encoder->multi_slice.m_slice_size;
+	break;
+	case VCD_MSLICE_BY_BYTE_COUNT:
+		m_slice_sel = VIDC_1080P_MSLICE_BY_BYTE_COUNT;
+		i_multi_slice_byte = encoder->multi_slice.m_slice_size;
+	break;
+	}
+	vidc_1080p_set_encode_multi_slice_control(m_slice_sel,
+		i_multi_slice_size, i_multi_slice_byte);
+}
+
+void ddl_vidc_encode_init_codec(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	struct ddl_enc_buffers *enc_buffers = &encoder->hw_bufs;
+	struct vidc_1080p_enc_seq_start_param seq_start_param;
+	enum vidc_1080p_memory_access_method mem_access_method;
+	enum vidc_1080p_DBConfig db_config;
+	enum VIDC_SM_frame_skip r_cframe_skip =
+		VIDC_SM_FRAME_SKIP_DISABLE;
+	u32 index, luma[4], chroma[4], hdr_ext_control = false;
+	const u32 recon_bufs = 4;
+
+	ddl_vidc_encode_set_profile_level(ddl);
+	vidc_1080p_set_encode_frame_size(encoder->frame_size.width,
+		encoder->frame_size.height);
+	vidc_1080p_encode_set_qp_params(encoder->qp_range.max_qp,
+		encoder->qp_range.min_qp);
+	vidc_1080p_encode_set_rc_config(encoder->rc_level.frame_level_rc,
+		encoder->rc_level.mb_level_rc,
+		encoder->session_qp.i_frame_qp);
+	if (encoder->hdr_ext_control > 0)
+		hdr_ext_control = true;
+	if (encoder->r_cframe_skip > 0)
+		r_cframe_skip = VIDC_SM_FRAME_SKIP_ENABLE_LEVEL;
+	vidc_sm_set_extended_encoder_control(&ddl->shared_mem
+		[ddl->command_channel], hdr_ext_control,
+		r_cframe_skip, false, 0);
+	vidc_sm_set_encoder_init_rc_value(&ddl->shared_mem
+		[ddl->command_channel],
+		encoder->target_bit_rate.target_bitrate);
+	vidc_sm_set_encoder_hec_period(&ddl->shared_mem
+		[ddl->command_channel], encoder->hdr_ext_control);
+		vidc_sm_set_encoder_vop_time(&ddl->shared_mem
+			[ddl->command_channel], true,
+			encoder->vop_timing.vop_time_resolution, 0);
+	if (encoder->rc_level.frame_level_rc)
+		vidc_1080p_encode_set_frame_level_rc_params((
+			DDL_FRAMERATE_SCALE(encoder->\
+			frame_rate.fps_numerator) /
+			encoder->frame_rate.fps_denominator),
+			encoder->target_bit_rate.target_bitrate,
+			encoder->frame_level_rc.reaction_coeff);
+	if (encoder->rc_level.mb_level_rc)
+		vidc_1080p_encode_set_mb_level_rc_params(
+			encoder->adaptive_rc.disable_dark_region_as_flag,
+			encoder->adaptive_rc.disable_smooth_region_as_flag,
+			encoder->adaptive_rc.disable_static_region_as_flag,
+			encoder->adaptive_rc.disable_activity_region_flag);
+	if ((!encoder->rc_level.frame_level_rc) &&
+		(!encoder->rc_level.mb_level_rc))
+		vidc_sm_set_pand_b_frame_qp(
+			&ddl->shared_mem[ddl->command_channel],
+			encoder->session_qp.b_frame_qp,
+			encoder->session_qp.p_frame_qp);
+	if (encoder->codec.codec == VCD_CODEC_MPEG4) {
+		vidc_1080p_set_mpeg4_encode_quarter_pel_control(false);
+		vidc_1080p_set_encode_field_picture_structure(false);
+	}
+	if (encoder->codec.codec == VCD_CODEC_H264) {
+		enum vidc_1080p_entropy_sel entropy_sel;
+		switch (encoder->entropy_control.entropy_sel) {
+		default:
+		case VCD_ENTROPY_SEL_CAVLC:
+			entropy_sel = VIDC_1080P_ENTROPY_SEL_CAVLC;
+		break;
+		case VCD_ENTROPY_SEL_CABAC:
+			entropy_sel = VIDC_1080P_ENTROPY_SEL_CABAC;
+		break;
+	}
+	vidc_1080p_set_h264_encode_entropy(entropy_sel);
+	switch (encoder->db_control.db_config) {
+	default:
+	case VCD_DB_ALL_BLOCKING_BOUNDARY:
+		db_config = VIDC_1080P_DB_ALL_BLOCKING_BOUNDARY;
+	break;
+	case VCD_DB_DISABLE:
+		db_config = VIDC_1080P_DB_DISABLE;
+	break;
+	case VCD_DB_SKIP_SLICE_BOUNDARY:
+		db_config = VIDC_1080P_DB_SKIP_SLICE_BOUNDARY;
+	break;
+	}
+	vidc_1080p_set_h264_encode_loop_filter(db_config,
+		encoder->db_control.slice_alpha_offset,
+		encoder->db_control.slice_beta_offset);
+	vidc_1080p_set_h264_encoder_p_frame_ref_count(encoder->\
+		num_references_for_p_frame);
+	if (encoder->profile.profile == VCD_PROFILE_H264_HIGH)
+		vidc_1080p_set_h264_encode_8x8transform_control(true);
+	}
+	vidc_1080p_set_encode_picture(encoder->i_period.p_frames,
+		encoder->i_period.b_frames);
+	vidc_1080p_set_encode_circular_intra_refresh(
+		encoder->intra_refresh.cir_mb_number);
+	ddl_vidc_encode_set_multi_slice_info(encoder);
+	ddl_vidc_metadata_enable(ddl);
+	if (encoder->meta_data_enable_flag)
+		vidc_sm_set_metadata_start_address(&ddl->shared_mem
+			[ddl->command_channel], DDL_ADDR_OFFSET(
+			ddl_context->dram_base_a,
+			ddl->codec_data.encoder.meta_data_input));
+	luma[0] = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			enc_buffers->dpb_y[0]);
+	luma[1] = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+			enc_buffers->dpb_y[1]);
+	if (encoder->hw_bufs.dpb_count == DDL_ENC_MAX_DPB_BUFFERS) {
+		luma[2] = DDL_ADDR_OFFSET(ddl_context->dram_base_b,
+			enc_buffers->dpb_y[2]);
+		luma[3] = DDL_ADDR_OFFSET(ddl_context->dram_base_b,
+			enc_buffers->dpb_y[3]);
+	}
+	for (index = 0; index < recon_bufs; index++)
+		chroma[index] = DDL_ADDR_OFFSET(ddl_context->dram_base_b,
+					enc_buffers->dpb_c[index]);
+	vidc_1080p_set_encode_recon_buffers(recon_bufs, luma, chroma);
+	switch (encoder->codec.codec) {
+	case VCD_CODEC_MPEG4:
+		vidc_1080p_set_mpeg4_encode_work_buffers(
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->col_zero),
+				DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->acdc_coef),
+				DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->mv));
+	break;
+	case VCD_CODEC_H263:
+		vidc_1080p_set_h263_encode_work_buffers(
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->mv),
+				DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->acdc_coef));
+	break;
+	case VCD_CODEC_H264:
+		vidc_1080p_set_h264_encode_work_buffers(
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->mv),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->col_zero),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->md),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_b,
+				enc_buffers->pred),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->nbor_info),
+			DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+				enc_buffers->mb_info));
+	break;
+	default:
+	break;
+	}
+	if (encoder->buf_format.buffer_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA)
+		mem_access_method = VIDC_1080P_TILE_LINEAR;
+	else
+		mem_access_method = VIDC_1080P_TILE_64x32;
+	vidc_1080p_set_encode_input_frame_format(mem_access_method);
+	vidc_1080p_set_encode_padding_control(0, 0, 0, 0);
+	DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+		"DDL_CLIENT_WAIT_FOR_INITCODECDONE",
+		ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_INITCODECDONE;
+	ddl->cmd_state = DDL_CMD_INIT_CODEC;
+	vidc_1080p_set_encode_field_picture_structure(false);
+	seq_start_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	seq_start_param.inst_id = ddl->instance_id;
+	seq_start_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+		ddl_context->dram_base_a, ddl->shared_mem
+		[ddl->command_channel]);
+	seq_start_param.stream_buffer_addr_offset = DDL_ADDR_OFFSET(
+		ddl_context->dram_base_a, encoder->seq_header);
+	seq_start_param.stream_buffer_size =
+		encoder->seq_header.buffer_size;
+	encoder->seq_header_length = 0;
+	ddl_context->vidc_encode_seq_start[ddl->command_channel](
+		&seq_start_param);
+}
+
+void ddl_vidc_channel_end(struct ddl_client_context *ddl)
+{
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_CHEND",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_CHEND;
+	ddl->cmd_state = DDL_CMD_CHANNEL_END;
+	vidc_1080p_set_host2risc_cmd(VIDC_1080P_HOST2RISC_CMD_CLOSE_CH,
+		ddl->instance_id, 0, 0, 0);
+}
+
+void ddl_vidc_encode_frame_run(struct ddl_client_context *ddl)
+{
+	struct vidc_1080p_enc_frame_start_param enc_param;
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_encoder_data  *encoder = &(ddl->codec_data.encoder);
+	struct ddl_enc_buffers *enc_buffers = &(encoder->hw_bufs);
+	struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm);
+	struct vcd_frame_data *input_vcd_frm =
+		&(ddl->input_frame.vcd_frm);
+	u32 dpb_addr_y[4], dpb_addr_c[4];
+	u32 index, y_addr, c_addr;
+
+	ddl_vidc_encode_set_metadata_output_buf(ddl);
+
+	encoder->enc_frame_info.meta_data_exists = false;
+
+	y_addr = DDL_OFFSET(ddl_context->dram_base_b.align_physical_addr,
+			input_vcd_frm->physical);
+	c_addr = (y_addr + encoder->input_buf_size.size_y);
+	if (input_vcd_frm->flags & VCD_FRAME_FLAG_EOS) {
+		enc_param.encode = VIDC_1080P_ENC_TYPE_LAST_FRAME_DATA;
+		DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+			"DDL_CLIENT_WAIT_FOR_EOS_DONE",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_EOS_DONE;
+	} else {
+		enc_param.encode = VIDC_1080P_ENC_TYPE_FRAME_DATA;
+		DDL_MSG_LOW("ddl_state_transition: %s ~~>"
+			"DDL_CLIENT_WAIT_FOR_FRAME_DONE",
+			ddl_get_state_string(ddl->client_state));
+		ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME_DONE;
+	}
+	ddl->cmd_state = DDL_CMD_ENCODE_FRAME;
+	if (encoder->dynamic_prop_change) {
+		encoder->dynmic_prop_change_req = true;
+		ddl_vidc_encode_dynamic_property(ddl, true);
+	}
+
+	vidc_1080p_set_encode_circular_intra_refresh(
+		encoder->intra_refresh.cir_mb_number);
+	ddl_vidc_encode_set_multi_slice_info(encoder);
+	enc_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	enc_param.inst_id = ddl->instance_id;
+	enc_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+			ddl_context->dram_base_a,
+			ddl->shared_mem[ddl->command_channel]);
+	enc_param.current_y_addr_offset = y_addr;
+	enc_param.current_c_addr_offset = c_addr;
+	enc_param.stream_buffer_addr_offset = DDL_OFFSET(
+	ddl_context->dram_base_a.align_physical_addr, stream->physical);
+	enc_param.stream_buffer_size =
+		encoder->client_output_buf_req.sz;
+
+	enc_param.intra_frame = encoder->intra_frame_insertion;
+	if (encoder->intra_frame_insertion)
+		encoder->intra_frame_insertion = false;
+	enc_param.input_flush = false;
+		vidc_sm_set_encoder_vop_time(
+			&ddl->shared_mem[ddl->command_channel], true,
+			encoder->vop_timing.vop_time_resolution,
+			ddl->input_frame.frm_delta);
+	vidc_sm_set_frame_tag(&ddl->shared_mem[ddl->command_channel],
+	ddl->input_frame.vcd_frm.ip_frm_tag);
+	if (ddl_context->pix_cache_enable) {
+		for (index = 0; index < enc_buffers->dpb_count;
+			index++) {
+			dpb_addr_y[index] =
+				(u32) VIDC_1080P_DEC_DPB_RESET_VALUE;
+			dpb_addr_c[index] = (u32) enc_buffers->dpb_c
+				[index].align_physical_addr;
+		}
+
+		dpb_addr_y[index] = (u32) input_vcd_frm->physical;
+		dpb_addr_c[index] = (u32) input_vcd_frm->physical +
+			encoder->input_buf_size.size_y;
+
+		vidc_pix_cache_init_luma_chroma_base_addr(
+			enc_buffers->dpb_count + 1, dpb_addr_y, dpb_addr_c);
+		vidc_pix_cache_set_frame_size(encoder->frame_size.width,
+			encoder->frame_size.height);
+		vidc_pix_cache_set_frame_range(enc_buffers->sz_dpb_y,
+			enc_buffers->sz_dpb_c);
+		vidc_pix_cache_clear_cache_tags();
+	}
+	ddl_context->vidc_encode_frame_start[ddl->command_channel] (
+		&enc_param);
+}
+
+u32 ddl_vidc_decode_set_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	u32 vcd_status = VCD_S_SUCCESS;
+	struct vidc_1080p_dec_init_buffers_param init_buf_param;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) {
+		DDL_MSG_ERROR("STATE-CRITICAL");
+		return VCD_ERR_FAIL;
+	}
+	ddl_vidc_decode_set_metadata_output(decoder);
+	if (decoder->dp_buf.no_of_dec_pic_buf <
+		decoder->client_output_buf_req.actual_count)
+		return VCD_ERR_BAD_STATE;
+	if (decoder->codec.codec == VCD_CODEC_H264) {
+		vidc_sm_set_allocated_h264_mv_size(
+			&ddl->shared_mem[ddl->command_channel],
+			decoder->hw_bufs.h264_mv[0].buffer_size);
+	}
+	if (vcd_status)
+		return vcd_status;
+#ifdef DDL_BUF_LOG
+	ddl_list_buffers(ddl);
+#endif
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_INIT);
+	ddl_decoder_dpb_init(ddl);
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_DPBDONE",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_DPBDONE;
+	ddl->cmd_state = DDL_CMD_DECODE_SET_DPB;
+	vidc_sm_set_allocated_dpb_size(
+		&ddl->shared_mem[ddl->command_channel],
+		decoder->dpb_buf_size.size_y,
+		decoder->dpb_buf_size.size_c);
+	init_buf_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	init_buf_param.inst_id = ddl->instance_id;
+	init_buf_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+				ddl_context->dram_base_a, ddl->shared_mem
+				[ddl->command_channel]);
+	init_buf_param.dpb_count = decoder->dp_buf.no_of_dec_pic_buf;
+	ddl_context->vidc_decode_init_buffers[ddl->command_channel] (
+		&init_buf_param);
+	return VCD_S_SUCCESS;
+}
+
+void ddl_vidc_decode_frame_run(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *bit_stream =
+		&(ddl->input_frame.vcd_frm);
+	struct ddl_dec_buffers *dec_buffers = &decoder->hw_bufs;
+	struct ddl_mask *dpb_mask = &ddl->codec_data.decoder.dpb_mask;
+	struct vidc_1080p_dec_frame_start_param dec_param;
+	u32 dpb_addr_y[32], index;
+	if (vidc_msg_timing) {
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+		ddl_set_core_start_time(__func__, DEC_IP_TIME);
+	}
+	if ((!bit_stream->data_len) || (!bit_stream->physical)) {
+		ddl_vidc_decode_eos_run(ddl);
+		return;
+	}
+	DDL_MSG_LOW("ddl_state_transition: %s ~~"
+		"DDL_CLIENT_WAIT_FOR_FRAME_DONE",
+		ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME_DONE;
+	ddl_vidc_decode_dynamic_property(ddl, true);
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK);
+	ddl->cmd_state = DDL_CMD_DECODE_FRAME;
+	dec_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	dec_param.inst_id = ddl->instance_id;
+	dec_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+				ddl_context->dram_base_a, ddl->shared_mem
+				[ddl->command_channel]);
+	dec_param.stream_buffer_addr_offset = DDL_OFFSET(
+			ddl_context->dram_base_a.align_physical_addr,
+			bit_stream->physical);
+	dec_param.stream_frame_size = bit_stream->data_len;
+	dec_param.stream_buffersize = decoder->client_input_buf_req.sz;
+	dec_param.descriptor_buffer_addr_offset = DDL_ADDR_OFFSET(
+	ddl_context->dram_base_a, dec_buffers->desc);
+	dec_param.descriptor_buffer_size = dec_buffers->desc.buffer_size;
+	dec_param.release_dpb_bit_mask = dpb_mask->hw_mask;
+	dec_param.decode = VIDC_1080P_DEC_TYPE_FRAME_DATA;
+	dec_param.dpb_count = decoder->dp_buf.no_of_dec_pic_buf;
+	if (decoder->flush_pending) {
+		dec_param.dpb_flush = true;
+		decoder->flush_pending = false;
+	} else
+		dec_param.dpb_flush = false;
+	vidc_sm_set_frame_tag(&ddl->shared_mem[ddl->command_channel],
+		bit_stream->ip_frm_tag);
+	if (ddl_context->pix_cache_enable) {
+		for (index = 0; index <
+			decoder->dp_buf.no_of_dec_pic_buf; index++) {
+			dpb_addr_y[index] = (u32)
+			decoder->dp_buf.dec_pic_buffers
+				[index].vcd_frm.physical;
+		}
+		vidc_pix_cache_init_luma_chroma_base_addr(
+			decoder->dp_buf.no_of_dec_pic_buf,
+			dpb_addr_y, NULL);
+		vidc_pix_cache_set_frame_range(decoder->dpb_buf_size.size_y,
+			decoder->dpb_buf_size.size_c);
+		vidc_pix_cache_clear_cache_tags();
+	}
+	ddl_context->vidc_decode_frame_start[ddl->command_channel] (
+		&dec_param);
+}
+
+void ddl_vidc_decode_eos_run(struct ddl_client_context *ddl)
+{
+	struct ddl_context *ddl_context = ddl->ddl_context;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *bit_stream =
+		&(ddl->input_frame.vcd_frm);
+	struct ddl_dec_buffers *dec_buffers = &(decoder->hw_bufs);
+	struct ddl_mask *dpb_mask =
+		&(ddl->codec_data.decoder.dpb_mask);
+	struct vidc_1080p_dec_frame_start_param dec_param;
+
+	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_EOS_DONE",
+	ddl_get_state_string(ddl->client_state));
+	ddl->client_state = DDL_CLIENT_WAIT_FOR_EOS_DONE;
+	if (decoder->output_order == VCD_DEC_ORDER_DECODE)
+		decoder->dynamic_prop_change |= DDL_DEC_REQ_OUTPUT_FLUSH;
+	ddl_vidc_decode_dynamic_property(ddl, true);
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK);
+	decoder->dynmic_prop_change_req = true;
+	ddl->cmd_state = DDL_CMD_EOS;
+	memset(&dec_param, 0, sizeof(dec_param));
+	dec_param.cmd_seq_num = ++ddl_context->cmd_seq_num;
+	dec_param.inst_id = ddl->instance_id;
+	dec_param.shared_mem_addr_offset = DDL_ADDR_OFFSET(
+			ddl_context->dram_base_a,
+			ddl->shared_mem[ddl->command_channel]);
+	dec_param.descriptor_buffer_addr_offset = DDL_ADDR_OFFSET(
+	ddl_context->dram_base_a, dec_buffers->desc);
+	dec_param.descriptor_buffer_size = dec_buffers->desc.buffer_size;
+	dec_param.release_dpb_bit_mask = dpb_mask->hw_mask;
+	dec_param.decode = VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA;
+	dec_param.dpb_count = decoder->dp_buf.no_of_dec_pic_buf;
+	if (decoder->flush_pending) {
+		dec_param.dpb_flush = true;
+		decoder->flush_pending = false;
+	} else
+		dec_param.dpb_flush = false;
+	vidc_sm_set_frame_tag(&ddl->shared_mem[ddl->command_channel],
+	bit_stream->ip_frm_tag);
+	ddl_context->vidc_decode_frame_start[ddl->command_channel] (
+		&dec_param);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.c b/drivers/video/msm/vidc/1080p/ddl/vidc.c
new file mode 100644
index 0000000..ae918f0
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.c
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc.h"
+#include "vidc_hwio.h"
+
+
+#define VIDC_1080P_INIT_CH_INST_ID      0x0000ffff
+#define VIDC_1080P_RESET_VI             0x3f7
+#define VIDC_1080P_RESET_VI_RISC        0x3f6
+#define VIDC_1080P_RESET_VI_VIDC_RISC    0x3f2
+#define VIDC_1080P_RESET_ALL            0
+#define VIDC_1080P_RESET_RISC           0x3fe
+#define VIDC_1080P_RESET_NONE           0x3ff
+#define VIDC_1080P_INTERRUPT_CLEAR      0
+#define VIDC_1080P_MAX_H264DECODER_DPB  32
+#define VIDC_1080P_MAX_DEC_RECON_BUF    32
+
+#define VIDC_1080P_SI_RG7_DISPLAY_STATUS_MASK    0x00000007
+#define VIDC_1080P_SI_RG7_DISPLAY_STATUS_SHIFT   0
+#define VIDC_1080P_SI_RG7_DISPLAY_CODING_MASK    0x00000008
+#define VIDC_1080P_SI_RG7_DISPLAY_CODING_SHIFT   3
+#define VIDC_1080P_SI_RG7_DISPLAY_RES_MASK       0x00000030
+#define VIDC_1080P_SI_RG7_DISPLAY_RES_SHIFT      4
+
+#define VIDC_1080P_SI_RG7_DISPLAY_CROP_MASK      0x00000040
+#define VIDC_1080P_SI_RG7_DISPLAY_CROP_SHIFT     6
+
+#define VIDC_1080P_SI_RG8_DECODE_FRAMETYPE_MASK  0x00000007
+
+#define VIDC_1080P_SI_RG10_NUM_DPB_BMSK      0x00003fff
+#define VIDC_1080P_SI_RG10_NUM_DPB_SHFT      0
+#define VIDC_1080P_SI_RG10_DPB_FLUSH_BMSK    0x00004000
+#define VIDC_1080P_SI_RG10_DPB_FLUSH_SHFT    14
+#define VIDC_1080P_SI_RG10_DMX_DISABLE_BMSK  0x00008000
+#define VIDC_1080P_SI_RG10_DMX_DISABLE_SHFT  15
+
+#define VIDC_1080P_SI_RG11_DECODE_STATUS_MASK    0x00000007
+#define VIDC_1080P_SI_RG11_DECODE_STATUS_SHIFT   0
+#define VIDC_1080P_SI_RG11_DECODE_CODING_MASK    0x00000008
+#define VIDC_1080P_SI_RG11_DECODE_CODING_SHIFT   3
+#define VIDC_1080P_SI_RG11_DECODE_RES_MASK       0x000000C0
+#define VIDC_1080P_SI_RG11_DECODE_RES_SHIFT      6
+#define VIDC_1080P_SI_RG11_DECODE_CROPP_MASK     0x00000100
+#define VIDC_1080P_SI_RG11_DECODE_CROPP_SHIFT    8
+
+#define VIDC_1080P_BASE_OFFSET_SHIFT         11
+
+
+#define VIDC_1080P_H264DEC_LUMA_ADDR      HWIO_REG_759068_ADDR
+#define VIDC_1080P_H264DEC_CHROMA_ADDR    HWIO_REG_515200_ADDR
+#define VIDC_1080P_H264DEC_MV_PLANE_ADDR  HWIO_REG_466192_ADDR
+
+#define VIDC_1080P_DEC_LUMA_ADDR        HWIO_REG_759068_ADDR
+#define VIDC_1080P_DEC_CHROMA_ADDR      HWIO_REG_515200_ADDR
+
+#define VIDC_1080P_DEC_TYPE_SEQ_HEADER         0x00010000
+#define VIDC_1080P_DEC_TYPE_FRAME_DATA         0x00020000
+#define VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA    0x00030000
+#define VIDC_1080P_DEC_TYPE_INIT_BUFFERS       0x00040000
+
+#define VIDC_1080P_ENC_TYPE_SEQ_HEADER       0x00010000
+#define VIDC_1080P_ENC_TYPE_FRAME_DATA       0x00020000
+#define VIDC_1080P_ENC_TYPE_LAST_FRAME_DATA  0x00030000
+
+#define VIDC_1080P_MAX_INTRA_PERIOD 0xffff
+
+u8 *VIDC_BASE_PTR;
+
+void vidc_1080p_do_sw_reset(enum vidc_1080p_reset init_flag)
+{
+	if (init_flag == VIDC_1080P_RESET_IN_SEQ_FIRST_STAGE) {
+		u32 sw_reset_value = 0;
+
+		VIDC_HWIO_IN(REG_557899, &sw_reset_value);
+		sw_reset_value &= (~HWIO_REG_557899_RSTN_VI_BMSK);
+		VIDC_HWIO_OUT(REG_557899, sw_reset_value);
+		sw_reset_value &= (~HWIO_REG_557899_RSTN_RISC_BMSK);
+		VIDC_HWIO_OUT(REG_557899, sw_reset_value);
+		sw_reset_value &= (~(HWIO_REG_557899_RSTN_VIDCCORE_BMSK |
+					HWIO_REG_557899_RSTN_DMX_BMSK));
+
+		VIDC_HWIO_OUT(REG_557899, sw_reset_value);
+	} else if (init_flag == VIDC_1080P_RESET_IN_SEQ_SECOND_STAGE) {
+		VIDC_HWIO_OUT(REG_557899, VIDC_1080P_RESET_ALL);
+		VIDC_HWIO_OUT(REG_557899, VIDC_1080P_RESET_RISC);
+	}
+}
+
+void vidc_1080p_release_sw_reset(void)
+{
+	u32 nAxiCtl;
+	u32 nAxiStatus;
+	u32 nRdWrBurst;
+	u32 nOut_Order;
+
+	nOut_Order = VIDC_SETFIELD(1, HWIO_REG_5519_AXI_AOOORD_SHFT,
+					HWIO_REG_5519_AXI_AOOORD_BMSK);
+	VIDC_HWIO_OUT(REG_5519, nOut_Order);
+
+	nOut_Order = VIDC_SETFIELD(1, HWIO_REG_606364_AXI_AOOOWR_SHFT,
+					HWIO_REG_606364_AXI_AOOOWR_BMSK);
+	VIDC_HWIO_OUT(REG_606364, nOut_Order);
+
+	nAxiCtl = VIDC_SETFIELD(1, HWIO_REG_471159_AXI_HALT_REQ_SHFT,
+				HWIO_REG_471159_AXI_HALT_REQ_BMSK);
+
+	VIDC_HWIO_OUT(REG_471159, nAxiCtl);
+
+	do {
+		VIDC_HWIO_IN(REG_437878, &nAxiStatus);
+		nAxiStatus = VIDC_GETFIELD(nAxiStatus,
+					 HWIO_REG_437878_AXI_HALT_ACK_BMSK,
+					 HWIO_REG_437878_AXI_HALT_ACK_SHFT);
+	} while (0x3 != nAxiStatus);
+
+	nAxiCtl  =  VIDC_SETFIELD(1,
+				HWIO_REG_471159_AXI_RESET_SHFT,
+				HWIO_REG_471159_AXI_RESET_BMSK);
+
+	VIDC_HWIO_OUT(REG_471159, nAxiCtl);
+	VIDC_HWIO_OUT(REG_471159, 0);
+
+	nRdWrBurst = VIDC_SETFIELD(8,
+				HWIO_REG_922106_XBAR_OUT_MAX_RD_BURST_SHFT,
+				HWIO_REG_922106_XBAR_OUT_MAX_RD_BURST_BMSK) |
+	VIDC_SETFIELD(8, HWIO_REG_922106_XBAR_OUT_MAX_WR_BURST_SHFT,
+				HWIO_REG_922106_XBAR_OUT_MAX_WR_BURST_BMSK);
+
+	VIDC_HWIO_OUT(REG_922106, nRdWrBurst);
+
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_611794, VIDC_1080P_HOST2RISC_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_557899, VIDC_1080P_RESET_NONE);
+}
+
+void vidc_1080p_clear_interrupt(void)
+{
+	VIDC_HWIO_OUT(REG_575377, VIDC_1080P_INTERRUPT_CLEAR);
+}
+
+void vidc_1080p_set_host2risc_cmd(enum vidc_1080p_host2risc_cmd
+	host2risc_command, u32 host2risc_arg1, u32 host2risc_arg2,
+	u32 host2risc_arg3, u32 host2risc_arg4)
+{
+	VIDC_HWIO_OUT(REG_611794, VIDC_1080P_HOST2RISC_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_356340, host2risc_arg1);
+	VIDC_HWIO_OUT(REG_899023, host2risc_arg2);
+	VIDC_HWIO_OUT(REG_987762, host2risc_arg3);
+	VIDC_HWIO_OUT(REG_544000, host2risc_arg4);
+	VIDC_HWIO_OUT(REG_611794, host2risc_command);
+}
+
+void vidc_1080p_get_risc2host_cmd(u32 *pn_risc2host_command,
+	u32 *pn_risc2host_arg1, u32 *pn_risc2host_arg2,
+	u32 *pn_risc2host_arg3, u32 *pn_risc2host_arg4)
+{
+	VIDC_HWIO_IN(REG_695082, pn_risc2host_command);
+	VIDC_HWIO_IN(REG_156596, pn_risc2host_arg1);
+	VIDC_HWIO_IN(REG_222292, pn_risc2host_arg2);
+	VIDC_HWIO_IN(REG_790962, pn_risc2host_arg3);
+	VIDC_HWIO_IN(REG_679882, pn_risc2host_arg4);
+}
+
+void vidc_1080p_get_risc2host_cmd_status(u32 err_status,
+	u32 *dec_err_status, u32 *disp_err_status)
+{
+	*dec_err_status = VIDC_GETFIELD(err_status,
+		VIDC_RISC2HOST_ARG2_VIDC_DEC_ERROR_STATUS_BMSK,
+		VIDC_RISC2HOST_ARG2_VIDC_DEC_ERROR_STATUS_SHFT);
+	*disp_err_status = VIDC_GETFIELD(err_status,
+		VIDC_RISC2HOST_ARG2_VIDC_DISP_ERROR_STATUS_BMSK,
+		VIDC_RISC2HOST_ARG2_VIDC_DISP_ERROR_STATUS_SHFT);
+
+}
+
+void vidc_1080p_clear_risc2host_cmd(void)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+}
+
+void vidc_1080p_get_fw_version(u32 *pn_fw_version)
+{
+	VIDC_HWIO_IN(REG_653206, pn_fw_version);
+}
+
+void vidc_1080p_get_fw_status(u32 *pn_fw_status)
+{
+	VIDC_HWIO_IN(REG_350619, pn_fw_status);
+}
+
+void vidc_1080p_init_memory_controller(u32 dram_base_addr_a,
+	u32 dram_base_addr_b)
+{
+	VIDC_HWIO_OUT(REG_64440, dram_base_addr_a);
+	VIDC_HWIO_OUT(REG_675915, dram_base_addr_b);
+}
+
+void vidc_1080p_get_memory_controller_status(u32 *pb_mc_abusy,
+	u32 *pb_mc_bbusy)
+{
+	u32 mc_status = 0;
+
+	VIDC_HWIO_IN(REG_399911, &mc_status);
+	*pb_mc_abusy = (u32) ((mc_status &
+			HWIO_REG_399911_MC_BUSY_A_BMSK) >>
+			HWIO_REG_399911_MC_BUSY_A_SHFT);
+	*pb_mc_bbusy = (u32) ((mc_status &
+			HWIO_REG_399911_MC_BUSY_B_BMSK) >>
+			HWIO_REG_399911_MC_BUSY_B_SHFT);
+}
+
+void vidc_1080p_set_h264_decode_buffers(u32 dpb, u32 dec_vert_nb_mv_offset,
+	u32 dec_nb_ip_offset, u32 *pn_dpb_luma_offset,
+	u32 *pn_dpb_chroma_offset, u32 *pn_mv_buffer_offset)
+{
+	u32 count = 0, num_dpb_used = dpb;
+	u8 *vidc_dpb_luma_reg = (u8 *) VIDC_1080P_H264DEC_LUMA_ADDR;
+	u8 *vidc_dpb_chroma_reg = (u8 *) VIDC_1080P_H264DEC_CHROMA_ADDR;
+	u8 *vidc_mv_buffer_reg = (u8 *) VIDC_1080P_H264DEC_MV_PLANE_ADDR;
+
+	VIDC_HWIO_OUT(REG_931311, (dec_vert_nb_mv_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_16277, (dec_nb_ip_offset >>
+	VIDC_1080P_BASE_OFFSET_SHIFT));
+	if (num_dpb_used > VIDC_1080P_MAX_H264DECODER_DPB)
+		num_dpb_used = VIDC_1080P_MAX_H264DECODER_DPB;
+	for (count = 0; count < num_dpb_used; count++) {
+		VIDC_OUT_DWORD(vidc_dpb_luma_reg,
+			(pn_dpb_luma_offset[count] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_OUT_DWORD(vidc_dpb_chroma_reg,
+			(pn_dpb_chroma_offset[count] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_OUT_DWORD(vidc_mv_buffer_reg,
+			(pn_mv_buffer_offset[count] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		vidc_dpb_luma_reg += 4;
+		vidc_dpb_chroma_reg += 4;
+		vidc_mv_buffer_reg += 4;
+	}
+}
+
+void vidc_1080p_set_decode_recon_buffers(u32 recon_buffer,
+	u32 *pn_dec_luma, u32 *pn_dec_chroma)
+{
+	u32 count = 0, recon_buf_to_program = recon_buffer;
+	u8 *dec_recon_luma_reg = (u8 *) VIDC_1080P_DEC_LUMA_ADDR;
+	u8 *dec_recon_chroma_reg = (u8 *) VIDC_1080P_DEC_CHROMA_ADDR;
+
+	if (recon_buf_to_program > VIDC_1080P_MAX_DEC_RECON_BUF)
+		recon_buf_to_program = VIDC_1080P_MAX_DEC_RECON_BUF;
+	for (count = 0; count < recon_buf_to_program; count++) {
+		VIDC_OUT_DWORD(dec_recon_luma_reg, (pn_dec_luma[count] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_OUT_DWORD(dec_recon_chroma_reg,
+			(pn_dec_chroma[count] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		dec_recon_luma_reg += 4;
+		dec_recon_chroma_reg += 4;
+	}
+}
+
+void vidc_1080p_set_mpeg4_divx_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset, u32 stx_parser_buffer_offset)
+{
+	VIDC_HWIO_OUT(REG_931311, (nb_dcac_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_16277, (upnb_mv_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_654169, (sub_anchor_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_802794,
+		(overlay_transform_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_252167, (stx_parser_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_h263_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset)
+{
+	VIDC_HWIO_OUT(REG_931311, (nb_dcac_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_16277, (upnb_mv_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_654169, (sub_anchor_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_802794,
+		(overlay_transform_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_vc1_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset, u32 bitplain1Buffer_offset,
+	u32 bitplain2Buffer_offset, u32 bitplain3Buffer_offset)
+{
+	VIDC_HWIO_OUT(REG_931311, (nb_dcac_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_16277, (upnb_mv_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_654169, (sub_anchor_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_802794,
+		(overlay_transform_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_724376, (bitplain3Buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_551674, (bitplain2Buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_115991, (bitplain1Buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_encode_recon_buffers(u32 recon_buffer,
+	u32 *pn_enc_luma, u32 *pn_enc_chroma)
+{
+	if (recon_buffer > 0) {
+		VIDC_HWIO_OUT(REG_294579, (pn_enc_luma[0] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_HWIO_OUT(REG_759068, (pn_enc_chroma[0] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+	}
+	if (recon_buffer > 1) {
+		VIDC_HWIO_OUT(REG_616802, (pn_enc_luma[1] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_HWIO_OUT(REG_833502, (pn_enc_chroma[1] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+	}
+	if (recon_buffer > 2) {
+		VIDC_HWIO_OUT(REG_61427, (pn_enc_luma[2] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_HWIO_OUT(REG_68356, (pn_enc_chroma[2] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+	}
+	if (recon_buffer > 3) {
+		VIDC_HWIO_OUT(REG_23318, (pn_enc_luma[3] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+		VIDC_HWIO_OUT(REG_127855, (pn_enc_chroma[3] >>
+			VIDC_1080P_BASE_OFFSET_SHIFT));
+	}
+}
+
+void vidc_1080p_set_h264_encode_work_buffers(u32 up_row_mv_buffer_offset,
+	u32 direct_colzero_flag_buffer_offset,
+	u32 upper_intra_md_buffer_offset,
+	u32 upper_intra_pred_buffer_offset, u32 nbor_infor_buffer_offset,
+	u32 mb_info_offset)
+{
+	VIDC_HWIO_OUT(REG_515200, (up_row_mv_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_69832,
+		(direct_colzero_flag_buffer_offset>>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_256132,
+		(upper_intra_md_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_475648,
+		(upper_intra_pred_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_29510, (nbor_infor_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_175929, (mb_info_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_h263_encode_work_buffers(u32 up_row_mv_buffer_offset,
+	u32 up_row_inv_quanti_coeff_buffer_offset)
+{
+	VIDC_HWIO_OUT(REG_515200, (up_row_mv_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_29510, (
+		up_row_inv_quanti_coeff_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_mpeg4_encode_work_buffers(u32 skip_flag_buffer_offset,
+	u32 up_row_inv_quanti_coeff_buffer_offset, u32 upper_mv_offset)
+{
+	VIDC_HWIO_OUT(REG_69832, (skip_flag_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_29510, (
+		up_row_inv_quanti_coeff_buffer_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+	VIDC_HWIO_OUT(REG_515200, (upper_mv_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT));
+}
+
+void vidc_1080p_set_encode_frame_size(u32 hori_size, u32 vert_size)
+{
+	VIDC_HWIO_OUT(REG_934655, hori_size);
+	VIDC_HWIO_OUT(REG_179070, vert_size);
+}
+
+void vidc_1080p_set_encode_profile_level(u32 encode_profile, u32 enc_level)
+{
+	u32 profile_level = 0;
+
+	profile_level = VIDC_SETFIELD(enc_level,
+				HWIO_REG_63643_LEVEL_SHFT,
+				HWIO_REG_63643_LEVEL_BMSK) |
+				VIDC_SETFIELD(encode_profile,
+				HWIO_REG_63643_PROFILE_SHFT,
+				HWIO_REG_63643_PROFILE_BMSK);
+	VIDC_HWIO_OUT(REG_63643, profile_level);
+}
+
+void vidc_1080p_set_encode_field_picture_structure(u32 enc_field_picture)
+{
+	VIDC_HWIO_OUT(REG_786024, enc_field_picture);
+}
+
+void vidc_1080p_set_decode_mpeg4_pp_filter(u32 lf_enables)
+{
+	VIDC_HWIO_OUT(REG_152500, lf_enables);
+}
+
+void vidc_1080p_set_decode_qp_save_control(u32 enable_q_pout)
+{
+	VIDC_HWIO_OUT(REG_143629, enable_q_pout);
+}
+
+void vidc_1080p_get_returned_channel_inst_id(u32 *pn_rtn_chid)
+{
+	VIDC_HWIO_IN(REG_607589, pn_rtn_chid);
+}
+
+void vidc_1080p_clear_returned_channel_inst_id(void)
+{
+	VIDC_HWIO_OUT(REG_607589, VIDC_1080P_INIT_CH_INST_ID);
+}
+
+void vidc_1080p_get_decode_seq_start_result(
+	struct vidc_1080p_seq_hdr_info *seq_hdr_info)
+{
+	u32 dec_disp_result;
+	u32 frame = 0;
+	VIDC_HWIO_IN(REG_845544, &seq_hdr_info->img_size_y);
+	VIDC_HWIO_IN(REG_859906, &seq_hdr_info->img_size_x);
+	VIDC_HWIO_IN(REG_490078, &seq_hdr_info->min_num_dpb);
+	VIDC_HWIO_IN(REG_489688, &seq_hdr_info->dec_frm_size);
+	VIDC_HWIO_IN(REG_853667, &dec_disp_result);
+	seq_hdr_info->disp_progressive = VIDC_GETFIELD(dec_disp_result,
+					VIDC_1080P_SI_RG7_DISPLAY_CODING_MASK,
+					VIDC_1080P_SI_RG7_DISPLAY_CODING_SHIFT);
+	seq_hdr_info->disp_crop_exists  = VIDC_GETFIELD(dec_disp_result,
+		VIDC_1080P_SI_RG7_DISPLAY_CROP_MASK,
+		VIDC_1080P_SI_RG7_DISPLAY_CROP_SHIFT);
+	VIDC_HWIO_IN(REG_692991, &dec_disp_result);
+	seq_hdr_info->dec_progressive = VIDC_GETFIELD(dec_disp_result,
+					VIDC_1080P_SI_RG11_DECODE_CODING_MASK,
+					VIDC_1080P_SI_RG11_DECODE_CODING_SHIFT);
+	seq_hdr_info->dec_crop_exists  = VIDC_GETFIELD(dec_disp_result,
+		VIDC_1080P_SI_RG11_DECODE_CROPP_MASK,
+		VIDC_1080P_SI_RG11_DECODE_CROPP_SHIFT);
+	VIDC_HWIO_IN(REG_760102, &frame);
+	seq_hdr_info->data_partition = ((frame & 0x8) >> 3);
+}
+
+void vidc_1080p_get_decoded_frame_size(u32 *pn_decoded_size)
+{
+	VIDC_HWIO_IN(REG_489688, pn_decoded_size);
+}
+
+void vidc_1080p_get_display_frame_result(
+	struct vidc_1080p_dec_disp_info *dec_disp_info)
+{
+	u32 display_result;
+	VIDC_HWIO_IN(REG_640904, &dec_disp_info->display_y_addr);
+	VIDC_HWIO_IN(REG_60114, &dec_disp_info->display_c_addr);
+	VIDC_HWIO_IN(REG_853667, &display_result);
+	VIDC_HWIO_IN(REG_845544, &dec_disp_info->img_size_y);
+	VIDC_HWIO_IN(REG_859906, &dec_disp_info->img_size_x);
+	dec_disp_info->display_status =
+		(enum vidc_1080p_display_status)
+		VIDC_GETFIELD(display_result,
+		VIDC_1080P_SI_RG7_DISPLAY_STATUS_MASK,
+		VIDC_1080P_SI_RG7_DISPLAY_STATUS_SHIFT);
+	dec_disp_info->display_coding =
+		(enum vidc_1080p_display_coding)
+	VIDC_GETFIELD(display_result, VIDC_1080P_SI_RG7_DISPLAY_CODING_MASK,
+		VIDC_1080P_SI_RG7_DISPLAY_CODING_SHIFT);
+	dec_disp_info->disp_resl_change = VIDC_GETFIELD(display_result,
+		VIDC_1080P_SI_RG7_DISPLAY_RES_MASK,
+		VIDC_1080P_SI_RG7_DISPLAY_RES_SHIFT);
+	dec_disp_info->disp_crop_exists = VIDC_GETFIELD(display_result,
+		VIDC_1080P_SI_RG7_DISPLAY_CROP_MASK,
+		VIDC_1080P_SI_RG7_DISPLAY_CROP_SHIFT);
+}
+
+void vidc_1080p_get_decode_frame(
+	enum vidc_1080p_decode_frame *pe_frame)
+{
+	u32 frame = 0;
+
+	VIDC_HWIO_IN(REG_760102, &frame);
+	*pe_frame = (enum vidc_1080p_decode_frame)
+		(frame & VIDC_1080P_SI_RG8_DECODE_FRAMETYPE_MASK);
+}
+
+void vidc_1080p_get_decode_frame_result(
+	struct vidc_1080p_dec_disp_info *dec_disp_info)
+{
+	u32 decode_result;
+
+	VIDC_HWIO_IN(REG_378318, &dec_disp_info->decode_y_addr);
+	VIDC_HWIO_IN(REG_203487, &dec_disp_info->decode_c_addr);
+	VIDC_HWIO_IN(REG_692991, &decode_result);
+	dec_disp_info->decode_status = (enum vidc_1080p_display_status)
+				VIDC_GETFIELD(decode_result,
+				VIDC_1080P_SI_RG11_DECODE_STATUS_MASK,
+				VIDC_1080P_SI_RG11_DECODE_STATUS_SHIFT);
+	dec_disp_info->decode_coding = (enum vidc_1080p_display_coding)
+				VIDC_GETFIELD(decode_result,
+				VIDC_1080P_SI_RG11_DECODE_CODING_MASK,
+				VIDC_1080P_SI_RG11_DECODE_CODING_SHIFT);
+	dec_disp_info->dec_resl_change = VIDC_GETFIELD(decode_result,
+		VIDC_1080P_SI_RG11_DECODE_RES_MASK,
+		VIDC_1080P_SI_RG11_DECODE_RES_SHIFT);
+	dec_disp_info->dec_crop_exists = VIDC_GETFIELD(decode_result,
+		VIDC_1080P_SI_RG11_DECODE_CROPP_MASK,
+		VIDC_1080P_SI_RG11_DECODE_CROPP_SHIFT);
+}
+
+void vidc_1080p_decode_seq_start_ch0(
+	struct vidc_1080p_dec_seq_start_param *param)
+{
+
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_117192,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_145068, param->stream_frame_size);
+	VIDC_HWIO_OUT(REG_921356,
+		param->descriptor_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_190381,  param->stream_buffersize);
+	VIDC_HWIO_OUT(REG_85655,  param->descriptor_buffer_size);
+	VIDC_HWIO_OUT(REG_889944,  param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_397087, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_DEC_TYPE_SEQ_HEADER |
+		param->inst_id);
+}
+
+void vidc_1080p_decode_seq_start_ch1(
+	struct vidc_1080p_dec_seq_start_param *param)
+{
+
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_980194,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_936704, param->stream_frame_size);
+	VIDC_HWIO_OUT(REG_821977,
+		param->descriptor_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_887095, param->stream_buffersize);
+	VIDC_HWIO_OUT(REG_576987, param->descriptor_buffer_size);
+	VIDC_HWIO_OUT(REG_652528, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_254093, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_DEC_TYPE_SEQ_HEADER |
+		param->inst_id);
+}
+
+void vidc_1080p_decode_frame_start_ch0(
+	struct vidc_1080p_dec_frame_start_param *param)
+{
+	u32 dpb_config;
+
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	if ((param->decode == VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA) &&
+		((!param->stream_buffer_addr_offset) ||
+		(!param->stream_frame_size))) {
+		VIDC_HWIO_OUT(REG_117192, 0);
+		VIDC_HWIO_OUT(REG_145068, 0);
+		VIDC_HWIO_OUT(REG_190381, 0);
+	} else {
+		VIDC_HWIO_OUT(REG_117192,
+			param->stream_buffer_addr_offset >>
+			VIDC_1080P_BASE_OFFSET_SHIFT);
+		VIDC_HWIO_OUT(REG_145068,
+			param->stream_frame_size);
+		VIDC_HWIO_OUT(REG_190381,
+			param->stream_buffersize);
+	}
+	dpb_config = VIDC_SETFIELD(param->dpb_flush,
+					VIDC_1080P_SI_RG10_DPB_FLUSH_SHFT,
+					VIDC_1080P_SI_RG10_DPB_FLUSH_BMSK) |
+				VIDC_SETFIELD(param->dpb_count,
+					VIDC_1080P_SI_RG10_NUM_DPB_SHFT,
+					VIDC_1080P_SI_RG10_NUM_DPB_BMSK);
+	VIDC_HWIO_OUT(REG_921356,
+		param->descriptor_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_85655, param->descriptor_buffer_size);
+	VIDC_HWIO_OUT(REG_86830, param->release_dpb_bit_mask);
+	VIDC_HWIO_OUT(REG_889944, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_404623, dpb_config);
+	VIDC_HWIO_OUT(REG_397087, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_666957, (u32)param->decode |
+		param->inst_id);
+}
+
+
+void vidc_1080p_decode_frame_start_ch1(
+	struct vidc_1080p_dec_frame_start_param *param)
+{
+	u32 dpb_config;
+
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	if ((param->decode == VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA) &&
+		((!param->stream_buffer_addr_offset) ||
+		(!param->stream_frame_size))) {
+		VIDC_HWIO_OUT(REG_980194, 0);
+		VIDC_HWIO_OUT(REG_936704, 0);
+		VIDC_HWIO_OUT(REG_887095, 0);
+	} else {
+		VIDC_HWIO_OUT(REG_980194,
+			param->stream_buffer_addr_offset >>
+			VIDC_1080P_BASE_OFFSET_SHIFT);
+		VIDC_HWIO_OUT(REG_936704,
+			param->stream_frame_size);
+		VIDC_HWIO_OUT(REG_887095,
+			param->stream_buffersize);
+	}
+	dpb_config = VIDC_SETFIELD(param->dpb_flush,
+					VIDC_1080P_SI_RG10_DPB_FLUSH_SHFT,
+					VIDC_1080P_SI_RG10_DPB_FLUSH_BMSK) |
+				VIDC_SETFIELD(param->dpb_count,
+					VIDC_1080P_SI_RG10_NUM_DPB_SHFT,
+					VIDC_1080P_SI_RG10_NUM_DPB_BMSK);
+	VIDC_HWIO_OUT(REG_821977,
+		param->descriptor_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_576987, param->descriptor_buffer_size);
+	VIDC_HWIO_OUT(REG_70448, param->release_dpb_bit_mask);
+	VIDC_HWIO_OUT(REG_652528, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_220637, dpb_config);
+	VIDC_HWIO_OUT(REG_254093, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_313350, (u32)param->decode |
+		param->inst_id);
+}
+
+void vidc_1080p_decode_init_buffers_ch0(
+	struct vidc_1080p_dec_init_buffers_param *param)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_889944, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_404623, param->dpb_count);
+	VIDC_HWIO_OUT(REG_397087, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_DEC_TYPE_INIT_BUFFERS |
+		param->inst_id);
+}
+
+void vidc_1080p_decode_init_buffers_ch1(
+	struct vidc_1080p_dec_init_buffers_param *param)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_652528,  param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_220637, param->dpb_count);
+	VIDC_HWIO_OUT(REG_254093, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_DEC_TYPE_INIT_BUFFERS |
+		param->inst_id);
+}
+
+void vidc_1080p_set_dec_resolution_ch0(u32 width, u32 height)
+{
+	VIDC_HWIO_OUT(REG_612810, height);
+	VIDC_HWIO_OUT(REG_175608, width);
+}
+
+void vidc_1080p_set_dec_resolution_ch1(u32 width, u32 height)
+{
+	VIDC_HWIO_OUT(REG_655721, height);
+	VIDC_HWIO_OUT(REG_548308, width);
+}
+
+void vidc_1080p_get_encode_frame_info(
+	struct vidc_1080p_enc_frame_info *frame_info)
+{
+	VIDC_HWIO_IN(REG_845544, &(frame_info->enc_frame_size));
+	VIDC_HWIO_IN(REG_859906,
+		&(frame_info->enc_picture_count));
+	VIDC_HWIO_IN(REG_490078,
+		&(frame_info->enc_write_pointer));
+	VIDC_HWIO_IN(REG_640904,
+		(u32 *)(&(frame_info->enc_frame)));
+	VIDC_HWIO_IN(REG_60114,
+		&(frame_info->enc_luma_address));
+	frame_info->enc_luma_address = frame_info->enc_luma_address <<
+		VIDC_1080P_BASE_OFFSET_SHIFT;
+	VIDC_HWIO_IN(REG_489688,
+		&(frame_info->enc_chroma_address));
+	frame_info->enc_chroma_address = frame_info->\
+		enc_chroma_address << VIDC_1080P_BASE_OFFSET_SHIFT;
+}
+
+void vidc_1080p_encode_seq_start_ch0(
+	struct vidc_1080p_enc_seq_start_param *param)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_117192,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_921356, param->stream_buffer_size);
+	VIDC_HWIO_OUT(REG_889944, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_397087, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_ENC_TYPE_SEQ_HEADER |
+		param->inst_id);
+}
+
+void vidc_1080p_encode_seq_start_ch1(
+	struct vidc_1080p_enc_seq_start_param *param)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_980194,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_821977, param->stream_buffer_size);
+	VIDC_HWIO_OUT(REG_652528, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_254093, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_ENC_TYPE_SEQ_HEADER |
+		param->inst_id);
+}
+
+void vidc_1080p_encode_frame_start_ch0(
+	struct vidc_1080p_enc_frame_start_param *param)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_117192,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_921356, param->stream_buffer_size);
+	VIDC_HWIO_OUT(REG_612810, param->current_y_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_175608, param->current_c_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_190381, param->intra_frame);
+	VIDC_HWIO_OUT(REG_889944, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_404623, param->input_flush);
+	VIDC_HWIO_OUT(REG_397087, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_666957, (u32)param->encode |
+		param->inst_id);
+}
+
+void vidc_1080p_encode_frame_start_ch1(
+	struct vidc_1080p_enc_frame_start_param *param)
+{
+
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_980194,
+		param->stream_buffer_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_821977, param->stream_buffer_size);
+	VIDC_HWIO_OUT(REG_655721, param->current_y_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_548308,  param->current_c_addr_offset >>
+		VIDC_1080P_BASE_OFFSET_SHIFT);
+	VIDC_HWIO_OUT(REG_887095, param->intra_frame);
+	VIDC_HWIO_OUT(REG_652528, param->shared_mem_addr_offset);
+	VIDC_HWIO_OUT(REG_404623, param->input_flush);
+	VIDC_HWIO_OUT(REG_254093, param->cmd_seq_num);
+	VIDC_HWIO_OUT(REG_313350, (u32)param->encode |
+		param->inst_id);
+}
+
+void vidc_1080p_set_encode_picture(u32 number_p, u32 number_b)
+{
+	u32 picture, ifrm_ctrl;
+	if (number_p >= VIDC_1080P_MAX_INTRA_PERIOD)
+		ifrm_ctrl = 0;
+	else
+		ifrm_ctrl = number_p + 1;
+	picture = VIDC_SETFIELD(1 ,
+				HWIO_REG_783891_ENC_PIC_TYPE_USE_SHFT,
+				HWIO_REG_783891_ENC_PIC_TYPE_USE_BMSK) |
+				VIDC_SETFIELD(ifrm_ctrl,
+					HWIO_REG_783891_I_FRM_CTRL_SHFT,
+					HWIO_REG_783891_I_FRM_CTRL_BMSK)
+				| VIDC_SETFIELD(number_b ,
+				HWIO_REG_783891_B_FRM_CTRL_SHFT ,
+				HWIO_REG_783891_B_FRM_CTRL_BMSK);
+	VIDC_HWIO_OUT(REG_783891, picture);
+}
+
+void vidc_1080p_set_encode_multi_slice_control(
+	enum vidc_1080p_MSlice_selection multiple_slice_selection,
+	u32 mslice_mb, u32 mslice_byte)
+{
+	VIDC_HWIO_OUT(REG_226332, multiple_slice_selection);
+	VIDC_HWIO_OUT(REG_696136, mslice_mb);
+	VIDC_HWIO_OUT(REG_515564, mslice_byte);
+}
+
+void vidc_1080p_set_encode_circular_intra_refresh(u32 cir_num)
+{
+	VIDC_HWIO_OUT(REG_886210, cir_num);
+}
+
+void vidc_1080p_set_encode_input_frame_format(
+	enum vidc_1080p_memory_access_method memory_format)
+{
+	VIDC_HWIO_OUT(REG_645603, memory_format);
+}
+
+void vidc_1080p_set_encode_padding_control(u32 pad_ctrl_on,
+	u32 cr_pad_val, u32 cb_pad_val, u32 luma_pad_val)
+{
+	u32 padding = VIDC_SETFIELD(pad_ctrl_on ,
+				HWIO_REG_811733_PAD_CTRL_ON_SHFT,
+				HWIO_REG_811733_PAD_CTRL_ON_BMSK) |
+			VIDC_SETFIELD(cr_pad_val ,
+				HWIO_REG_811733_CR_PAD_VIDC_SHFT ,
+				HWIO_REG_811733_CR_PAD_VIDC_BMSK) |
+			VIDC_SETFIELD(cb_pad_val ,
+				HWIO_REG_811733_CB_PAD_VIDC_SHFT ,
+				HWIO_REG_811733_CB_PAD_VIDC_BMSK) |
+			VIDC_SETFIELD(luma_pad_val ,
+				HWIO_REG_811733_LUMA_PAD_VIDC_SHFT ,
+				HWIO_REG_811733_LUMA_PAD_VIDC_BMSK) ;
+	VIDC_HWIO_OUT(REG_811733, padding);
+}
+
+void vidc_1080p_encode_set_rc_config(u32 enable_frame_level_rc,
+	u32 enable_mb_level_rc_flag, u32 frame_qp)
+{
+	u32 rc_config = VIDC_SETFIELD(enable_frame_level_rc ,
+					HWIO_REG_559908_FR_RC_EN_SHFT ,
+					HWIO_REG_559908_FR_RC_EN_BMSK) |
+			VIDC_SETFIELD(enable_mb_level_rc_flag ,
+					HWIO_REG_559908_MB_RC_EN_SHFT,
+					HWIO_REG_559908_MB_RC_EN_BMSK) |
+			VIDC_SETFIELD(frame_qp ,
+					HWIO_REG_559908_FRAME_QP_SHFT ,
+					HWIO_REG_559908_FRAME_QP_BMSK);
+	VIDC_HWIO_OUT(REG_559908, rc_config);
+}
+
+void vidc_1080p_encode_set_frame_level_rc_params(u32 rc_frame_rate,
+	u32 target_bitrate, u32 reaction_coeff)
+{
+	VIDC_HWIO_OUT(REG_977937, rc_frame_rate);
+	VIDC_HWIO_OUT(REG_166135, target_bitrate);
+	VIDC_HWIO_OUT(REG_550322, reaction_coeff);
+}
+
+void vidc_1080p_encode_set_qp_params(u32 max_qp, u32 min_qp)
+{
+	u32 qbound = VIDC_SETFIELD(max_qp , HWIO_REG_109072_MAX_QP_SHFT,
+					HWIO_REG_109072_MAX_QP_BMSK) |
+					VIDC_SETFIELD(min_qp,
+					HWIO_REG_109072_MIN_QP_SHFT ,
+					HWIO_REG_109072_MIN_QP_BMSK);
+	VIDC_HWIO_OUT(REG_109072, qbound);
+}
+
+void vidc_1080p_encode_set_mb_level_rc_params(u32 disable_dark_region_as_flag,
+	u32 disable_smooth_region_as_flag , u32 disable_static_region_as_flag,
+	u32 disable_activity_region_flag)
+{
+	u32 rc_active_feature = VIDC_SETFIELD(
+					disable_dark_region_as_flag,
+					HWIO_REG_949086_DARK_DISABLE_SHFT,
+					HWIO_REG_949086_DARK_DISABLE_BMSK) |
+					VIDC_SETFIELD(
+					disable_smooth_region_as_flag,
+					HWIO_REG_949086_SMOOTH_DISABLE_SHFT,
+					HWIO_REG_949086_SMOOTH_DISABLE_BMSK) |
+					VIDC_SETFIELD(
+					disable_static_region_as_flag,
+					HWIO_REG_949086_STATIC_DISABLE_SHFT,
+					HWIO_REG_949086_STATIC_DISABLE_BMSK) |
+					VIDC_SETFIELD(
+					disable_activity_region_flag,
+					HWIO_REG_949086_ACT_DISABLE_SHFT,
+					HWIO_REG_949086_ACT_DISABLE_BMSK);
+	VIDC_HWIO_OUT(REG_949086, rc_active_feature);
+}
+
+void vidc_1080p_set_h264_encode_entropy(
+	enum vidc_1080p_entropy_sel entropy_sel)
+{
+	VIDC_HWIO_OUT(REG_447796, entropy_sel);
+}
+
+void vidc_1080p_set_h264_encode_loop_filter(
+	enum vidc_1080p_DBConfig db_config, u32 slice_alpha_offset,
+	u32 slice_beta_offset)
+{
+	VIDC_HWIO_OUT(REG_152500, db_config);
+	VIDC_HWIO_OUT(REG_266285, slice_alpha_offset);
+	VIDC_HWIO_OUT(REG_964731, slice_beta_offset);
+}
+
+void vidc_1080p_set_h264_encoder_p_frame_ref_count(u32 max_reference)
+{
+	u32 ref_frames;
+	ref_frames = VIDC_SETFIELD(max_reference,
+		HWIO_REG_744348_P_SHFT,
+		HWIO_REG_744348_P_BMSK);
+	VIDC_HWIO_OUT(REG_744348, ref_frames);
+}
+
+void vidc_1080p_set_h264_encode_8x8transform_control(u32 enable_8x8transform)
+{
+	VIDC_HWIO_OUT(REG_672163, enable_8x8transform);
+}
+
+void vidc_1080p_set_mpeg4_encode_quarter_pel_control(
+	u32 enable_mpeg4_quarter_pel)
+{
+	VIDC_HWIO_OUT(REG_330132, enable_mpeg4_quarter_pel);
+}
+
+void vidc_1080p_set_device_base_addr(u8 *mapped_va)
+{
+	VIDC_BASE_PTR = mapped_va;
+}
+
+void vidc_1080p_get_intra_bias(u32 *bias)
+{
+	u32 intra_bias;
+
+	VIDC_HWIO_IN(REG_676866, &intra_bias);
+	*bias = VIDC_GETFIELD(intra_bias,
+					HWIO_REG_676866_RMSK,
+					HWIO_REG_676866_SHFT);
+}
+
+void vidc_1080p_set_intra_bias(u32 bias)
+{
+	u32 intra_bias;
+
+	intra_bias = VIDC_SETFIELD(bias,
+					HWIO_REG_676866_SHFT,
+					HWIO_REG_676866_RMSK);
+	VIDC_HWIO_OUT(REG_676866, intra_bias);
+}
+
+void vidc_1080p_get_bi_directional_bias(u32 *bi_directional_bias)
+{
+	u32 nbi_direct_bias;
+
+	VIDC_HWIO_IN(REG_54267, &nbi_direct_bias);
+	*bi_directional_bias = VIDC_GETFIELD(nbi_direct_bias,
+					HWIO_REG_54267_RMSK,
+					HWIO_REG_54267_SHFT);
+}
+
+void vidc_1080p_set_bi_directional_bias(u32 bi_directional_bias)
+{
+	u32 nbi_direct_bias;
+
+	nbi_direct_bias = VIDC_SETFIELD(bi_directional_bias,
+					HWIO_REG_54267_SHFT,
+					HWIO_REG_54267_RMSK);
+	VIDC_HWIO_OUT(REG_54267, nbi_direct_bias);
+}
+
+void vidc_1080p_get_encoder_sequence_header_size(u32 *seq_header_size)
+{
+	VIDC_HWIO_IN(REG_845544, seq_header_size);
+}
+
+void vidc_1080p_get_intermedia_stage_debug_counter(
+	u32 *intermediate_stage_counter)
+{
+	VIDC_HWIO_IN(REG_805993, intermediate_stage_counter);
+}
+
+void vidc_1080p_get_exception_status(u32 *exception_status)
+{
+	VIDC_HWIO_IN(REG_493355, exception_status);
+}
+
+void vidc_1080p_frame_start_realloc(u32 instance_id)
+{
+	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
+	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
+	VIDC_HWIO_OUT(REG_666957,
+		VIDC_1080P_DEC_TYPE_FRAME_START_REALLOC | instance_id);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.h b/drivers/video/msm/vidc/1080p/ddl/vidc.h
new file mode 100644
index 0000000..f871509
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.h
@@ -0,0 +1,544 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDC_H_
+#define _VIDC_H_
+
+#include "vidc_hwio_reg.h"
+
+#define VIDC_1080P_RISC2HOST_CMD_EMPTY               0
+#define VIDC_1080P_RISC2HOST_CMD_OPEN_CH_RET         1
+#define VIDC_1080P_RISC2HOST_CMD_CLOSE_CH_RET        2
+#define VIDC_1080P_RISC2HOST_CMD_SEQ_DONE_RET        4
+#define VIDC_1080P_RISC2HOST_CMD_FRAME_DONE_RET      5
+#define VIDC_1080P_RISC2HOST_CMD_ENC_COMPLETE_RET    7
+#define VIDC_1080P_RISC2HOST_CMD_SYS_INIT_RET        8
+#define VIDC_1080P_RISC2HOST_CMD_FW_STATUS_RET       9
+#define VIDC_1080P_RISC2HOST_CMD_FLUSH_COMMAND_RET  12
+#define VIDC_1080P_RISC2HOST_CMD_ABORT_RET          13
+#define VIDC_1080P_RISC2HOST_CMD_INIT_BUFFERS_RET   15
+#define VIDC_1080P_RISC2HOST_CMD_EDFU_INT_RET       16
+#define VIDC_1080P_RISC2HOST_CMD_ERROR_RET          32
+
+#define VIDC_RISC2HOST_ARG2_VIDC_DISP_ERROR_STATUS_BMSK  0xffff0000
+#define VIDC_RISC2HOST_ARG2_VIDC_DISP_ERROR_STATUS_SHFT  16
+#define VIDC_RISC2HOST_ARG2_VIDC_DEC_ERROR_STATUS_BMSK   0x0000ffff
+#define VIDC_RISC2HOST_ARG2_VIDC_DEC_ERROR_STATUS_SHFT   0
+
+#define VIDC_1080P_ERROR_INVALID_CHANNEL_NUMBER                  1
+#define VIDC_1080P_ERROR_INVALID_COMMAND_ID                      2
+#define VIDC_1080P_ERROR_CHANNEL_ALREADY_IN_USE                  3
+#define VIDC_1080P_ERROR_CHANNEL_NOT_OPEN_BEFORE_CHANNEL_CLOSE   4
+#define VIDC_1080P_ERROR_OPEN_CH_ERROR_SEQ_START                 5
+#define VIDC_1080P_ERROR_SEQ_START_ALREADY_CALLED                6
+#define VIDC_1080P_ERROR_OPEN_CH_ERROR_INIT_BUFFERS              7
+#define VIDC_1080P_ERROR_SEQ_START_ERROR_INIT_BUFFERS            8
+#define VIDC_1080P_ERROR_INIT_BUFFER_ALREADY_CALLED              9
+#define VIDC_1080P_ERROR_OPEN_CH_ERROR_FRAME_START              10
+#define VIDC_1080P_ERROR_SEQ_START_ERROR_FRAME_START            11
+#define VIDC_1080P_ERROR_INIT_BUFFERS_ERROR_FRAME_START         12
+#define VIDC_1080P_ERROR_RESOLUTION_CHANGED                     13
+#define VIDC_1080P_ERROR_INVALID_COMMAND_LAST_FRAME             14
+#define VIDC_1080P_ERROR_INVALID_COMMAND                        15
+#define VIDC_1080P_ERROR_INVALID_CODEC_TYPE                     16
+
+#define VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED                  20
+#define VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE              25
+#define VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE         27
+#define VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED               28
+
+#define VIDC_1080P_ERROR_HEADER_NOT_FOUND                 52
+#define VIDC_1080P_ERROR_VOS_END_CODE_RECEIVED            53
+#define VIDC_1080P_ERROR_FRAME_RATE_NOT_SUPPORTED         62
+#define VIDC_1080P_ERROR_INVALID_QP_VALUE                 63
+#define VIDC_1080P_ERROR_INVALID_RC_REACTION_COEFFICIENT  64
+#define VIDC_1080P_ERROR_INVALID_CPB_SIZE_AT_GIVEN_LEVEL  65
+#define VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED            66
+#define VIDC_1080P_ERROR_ALLOC_DPB_SIZE_NOT_SUFFICIENT    71
+#define VIDC_1080P_ERROR_NUM_DPB_OUT_OF_RANGE             74
+#define VIDC_1080P_ERROR_NULL_METADATA_INPUT_POINTER      77
+#define VIDC_1080P_ERROR_NULL_DPB_POINTER                 78
+#define VIDC_1080P_ERROR_NULL_OTH_EXT_BUFADDR             79
+#define VIDC_1080P_ERROR_NULL_MV_POINTER                  80
+#define VIDC_1080P_ERROR_DIVIDE_BY_ZERO                   81
+#define VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST           82
+#define VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY          83
+#define VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE              84
+#define VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID   85
+#define VIDC_1080P_ERROR_MB_COEFF_NOT_DONE        86
+#define VIDC_1080P_ERROR_CODEC_SLICE_NOT_DONE     87
+#define VIDC_1080P_ERROR_VIDC_CORE_TIME_OUT       88
+#define VIDC_1080P_ERROR_VC1_BITPLANE_DECODE_ERR  89
+#define VIDC_1080P_ERROR_VSP_NOT_READY            90
+#define VIDC_1080P_ERROR_BUFFER_FULL_STATE        91
+
+#define VIDC_1080P_ERROR_RESOLUTION_MISMATCH      112
+#define VIDC_1080P_ERROR_NV_QUANT_ERR             113
+#define VIDC_1080P_ERROR_SYNC_MARKER_ERR          114
+#define VIDC_1080P_ERROR_FEATURE_NOT_SUPPORTED    115
+#define VIDC_1080P_ERROR_MEM_CORRUPTION           116
+#define VIDC_1080P_ERROR_INVALID_REFERENCE_FRAME  117
+#define VIDC_1080P_ERROR_PICTURE_CODING_TYPE_ERR  118
+#define VIDC_1080P_ERROR_MV_RANGE_ERR             119
+#define VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR    120
+#define VIDC_1080P_ERROR_SLICE_ADDR_INVALID       121
+#define VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED         122
+#define VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED                123
+#define VIDC_1080P_ERROR_INCOMPLETE_FRAME                       124
+#define VIDC_1080P_ERROR_NO_BUFFER_RELEASED_FROM_HOST           125
+#define VIDC_1080P_ERROR_NULL_FW_DEBUG_INFO_POINTER             126
+#define VIDC_1080P_ERROR_ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT     127
+#define VIDC_1080P_ERROR_NALU_HEADER_ERROR       128
+#define VIDC_1080P_ERROR_SPS_PARSE_ERROR         129
+#define VIDC_1080P_ERROR_PPS_PARSE_ERROR         130
+#define VIDC_1080P_ERROR_SLICE_PARSE_ERROR       131
+#define VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED  171
+
+#define VIDC_1080P_WARN_COMMAND_FLUSHED                  145
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_NUM_CONCEAL_MB 150
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_QP             151
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_CONCEAL_MB     152
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_VC1_PARAM      153
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_SEI            154
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_VUI            155
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_EXTRA          156
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_DATA_NONE      157
+#define VIDC_1080P_WARN_FRAME_RATE_UNKNOWN               158
+#define VIDC_1080P_WARN_ASPECT_RATIO_UNKNOWN             159
+#define VIDC_1080P_WARN_COLOR_PRIMARIES_UNKNOWN          160
+#define VIDC_1080P_WARN_TRANSFER_CHAR_UNKNOWN            161
+#define VIDC_1080P_WARN_MATRIX_COEFF_UNKNOWN             162
+#define VIDC_1080P_WARN_NON_SEQ_SLICE_ADDR               163
+#define VIDC_1080P_WARN_BROKEN_LINK                      164
+#define VIDC_1080P_WARN_FRAME_CONCEALED                  165
+#define VIDC_1080P_WARN_PROFILE_UNKNOWN                  166
+#define VIDC_1080P_WARN_LEVEL_UNKNOWN                    167
+#define VIDC_1080P_WARN_BIT_RATE_NOT_SUPPORTED           168
+#define VIDC_1080P_WARN_COLOR_DIFF_FORMAT_NOT_SUPPORTED  169
+#define VIDC_1080P_WARN_NULL_EXTRA_METADATA_POINTER      170
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_MB_INFO        180
+#define VIDC_1080P_WARN_METADATA_NO_SPACE_SLICE_SIZE     181
+#define VIDC_1080P_WARN_RESOLUTION_WARNING               182
+
+#define VIDC_1080P_H264_ENC_TYPE_P       0
+#define VIDC_1080P_H264_ENC_TYPE_B       1
+#define VIDC_1080P_H264_ENC_TYPE_IDR     2
+#define VIDC_1080P_MP4_H263_ENC_TYPE_I   0
+#define VIDC_1080P_MP4_H263_ENC_TYPE_P   1
+#define VIDC_1080P_MP4_H263_ENC_TYPE_B   2
+
+#define VIDC_1080P_MPEG4_LEVEL0    0
+#define VIDC_1080P_MPEG4_LEVEL0b   9
+#define VIDC_1080P_MPEG4_LEVEL1    1
+#define VIDC_1080P_MPEG4_LEVEL2    2
+#define VIDC_1080P_MPEG4_LEVEL3    3
+#define VIDC_1080P_MPEG4_LEVEL3b   7
+#define VIDC_1080P_MPEG4_LEVEL4    4
+#define VIDC_1080P_MPEG4_LEVEL4a   4
+#define VIDC_1080P_MPEG4_LEVEL5    5
+#define VIDC_1080P_MPEG4_LEVEL6    6
+#define VIDC_1080P_MPEG4_LEVEL7    7
+
+#define VIDC_1080P_H264_LEVEL1     10
+#define VIDC_1080P_H264_LEVEL1b    9
+#define VIDC_1080P_H264_LEVEL1p1   11
+#define VIDC_1080P_H264_LEVEL1p2   12
+#define VIDC_1080P_H264_LEVEL1p3   13
+#define VIDC_1080P_H264_LEVEL2     20
+#define VIDC_1080P_H264_LEVEL2p1   21
+#define VIDC_1080P_H264_LEVEL2p2   22
+#define VIDC_1080P_H264_LEVEL3     30
+#define VIDC_1080P_H264_LEVEL3p1   31
+#define VIDC_1080P_H264_LEVEL3p2   32
+#define VIDC_1080P_H264_LEVEL4     40
+#define VIDC_1080P_H264_LEVEL5p1   51
+#define VIDC_1080P_H264_LEVEL_MAX  VIDC_1080P_H264_LEVEL5p1
+
+#define VIDC_1080P_H263_LEVEL10    10
+#define VIDC_1080P_H263_LEVEL20    20
+#define VIDC_1080P_H263_LEVEL30    30
+#define VIDC_1080P_H263_LEVEL40    40
+#define VIDC_1080P_H263_LEVEL45    45
+#define VIDC_1080P_H263_LEVEL50    50
+#define VIDC_1080P_H263_LEVEL60    60
+#define VIDC_1080P_H263_LEVEL70    70
+
+#define VIDC_1080P_BUS_ERROR_HANDLER                   0x01
+#define VIDC_1080P_ILLEVIDC_INSTRUCTION_HANDLER         0x02
+#define VIDC_1080P_TICK_HANDLER                        0x04
+#define VIDC_1080P_TRAP_HANDLER                        0x10
+#define VIDC_1080P_ALIGN_HANDLER                       0x20
+#define VIDC_1080P_RANGE_HANDLER                       0x40
+#define VIDC_1080P_DTLB_MISS_EXCEPTION_HANDLER         0x80
+#define VIDC_1080P_ITLB_MISS_EXCEPTION_HANDLER        0x100
+#define VIDC_1080P_DATA_PAGE_FAULT_EXCEPTION_HANDLER  0x200
+#define VIDC_1080P_INST_PAGE_FAULT_EXCEPTION_HANDLER  0x400
+
+enum vidc_1080p_reset{
+	VIDC_1080P_RESET_IN_SEQ_FIRST_STAGE   = 0x0,
+	VIDC_1080P_RESET_IN_SEQ_SECOND_STAGE  = 0x1,
+};
+enum vidc_1080p_memory_access_method{
+	VIDC_1080P_TILE_LINEAR = 0,
+	VIDC_1080P_TILE_16x16  = 2,
+	VIDC_1080P_TILE_64x32  = 3,
+	VIDC_1080P_TILE_32BIT  = 0x7FFFFFFF
+};
+enum vidc_1080p_host2risc_cmd{
+	VIDC_1080P_HOST2RISC_CMD_EMPTY          = 0,
+	VIDC_1080P_HOST2RISC_CMD_OPEN_CH        = 1,
+	VIDC_1080P_HOST2RISC_CMD_CLOSE_CH       = 2,
+	VIDC_1080P_HOST2RISC_CMD_SYS_INIT       = 3,
+	VIDC_1080P_HOST2RISC_CMD_FLUSH_COMMMAND = 4,
+	VIDC_1080P_HOST2RISC_CMD_CONTINUE_ENC   = 7,
+	VIDC_1080P_HOST2RISC_CMD_ABORT_ENC      = 8,
+	VIDC_1080P_HOST2RISC_CMD_32BIT          = 0x7FFFFFFF
+};
+enum vidc_1080p_decode_p_cache_enable{
+	VIDC_1080P_DECODE_PCACHE_ENABLE_P   = 0,
+	VIDC_1080P_DECODE_PCACHE_ENABLE_B   = 1,
+	VIDC_1080P_DECODE_PCACHE_ENABLE_PB  = 2,
+	VIDC_1080P_DECODE_PCACHE_DISABLE    = 3,
+	VIDC_1080P_DECODE_PCACHE_32BIT      = 0x7FFFFFFF
+};
+enum vidc_1080p_encode_p_cache_enable{
+	VIDC_1080P_ENCODE_PCACHE_ENABLE  = 0,
+	VIDC_1080P_ENCODE_PCACHE_DISABLE = 3,
+	VIDC_1080P_ENCODE_PCACHE_32BIT   = 0x7FFFFFFF
+};
+enum vidc_1080p_codec{
+	VIDC_1080P_H264_DECODE     = 0,
+	VIDC_1080P_VC1_DECODE      = 1,
+	VIDC_1080P_MPEG4_DECODE    = 2,
+	VIDC_1080P_MPEG2_DECODE    = 3,
+	VIDC_1080P_H263_DECODE     = 4,
+	VIDC_1080P_VC1_RCV_DECODE  = 5,
+	VIDC_1080P_DIVX311_DECODE  = 6,
+	VIDC_1080P_DIVX412_DECODE  = 7,
+	VIDC_1080P_DIVX502_DECODE  = 8,
+	VIDC_1080P_DIVX503_DECODE  = 9,
+	VIDC_1080P_H264_ENCODE    = 16,
+	VIDC_1080P_MPEG4_ENCODE   = 17,
+	VIDC_1080P_H263_ENCODE    = 18,
+	VIDC_1080P_CODEC_32BIT    = 0x7FFFFFFF
+};
+enum vidc_1080p_entropy_sel{
+	VIDC_1080P_ENTROPY_SEL_CAVLC = 0,
+	VIDC_1080P_ENTROPY_SEL_CABAC = 1,
+	VIDC_1080P_ENTROPY_32BIT     = 0x7FFFFFFF
+};
+enum vidc_1080p_DBConfig{
+	VIDC_1080P_DB_ALL_BLOCKING_BOUNDARY  = 0,
+	VIDC_1080P_DB_DISABLE                = 1,
+	VIDC_1080P_DB_SKIP_SLICE_BOUNDARY    = 2,
+	VIDC_1080P_DB_32BIT                  = 0x7FFFFFFF
+};
+enum vidc_1080p_MSlice_selection{
+	VIDC_1080P_MSLICE_DISABLE        = 0,
+	VIDC_1080P_MSLICE_BY_MB_COUNT    = 1,
+	VIDC_1080P_MSLICE_BY_BYTE_COUNT  = 3,
+	VIDC_1080P_MSLICE_32BIT          = 0x7FFFFFFF
+};
+enum vidc_1080p_display_status{
+	VIDC_1080P_DISPLAY_STATUS_DECODE_ONLY        = 0,
+	VIDC_1080P_DISPLAY_STATUS_DECODE_AND_DISPLAY = 1,
+	VIDC_1080P_DISPLAY_STATUS_DISPLAY_ONLY       = 2,
+	VIDC_1080P_DISPLAY_STATUS_DPB_EMPTY          = 3,
+	VIDC_1080P_DISPLAY_STATUS_NOOP               = 4,
+	VIDC_1080P_DISPLAY_STATUS_32BIT              = 0x7FFFFFFF
+};
+enum vidc_1080p_display_coding{
+	VIDC_1080P_DISPLAY_CODING_PROGRESSIVE_SCAN = 0,
+	VIDC_1080P_DISPLAY_CODING_INTERLACED       = 1,
+	VIDC_1080P_DISPLAY_CODING_32BIT            = 0x7FFFFFFF
+};
+enum vidc_1080p_decode_frame{
+	VIDC_1080P_DECODE_FRAMETYPE_NOT_CODED  = 0,
+	VIDC_1080P_DECODE_FRAMETYPE_I          = 1,
+	VIDC_1080P_DECODE_FRAMETYPE_P          = 2,
+	VIDC_1080P_DECODE_FRAMETYPE_B          = 3,
+	VIDC_1080P_DECODE_FRAMETYPE_OTHERS     = 4,
+	VIDC_1080P_DECODE_FRAMETYPE_32BIT      = 0x7FFFFFFF
+};
+enum vidc_1080p_encode_frame{
+	VIDC_1080P_ENCODE_FRAMETYPE_NOT_CODED  = 0,
+	VIDC_1080P_ENCODE_FRAMETYPE_I          = 1,
+	VIDC_1080P_ENCODE_FRAMETYPE_P          = 2,
+	VIDC_1080P_ENCODE_FRAMETYPE_B          = 3,
+	VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED    = 4,
+	VIDC_1080P_ENCODE_FRAMETYPE_OTHERS     = 5,
+	VIDC_1080P_ENCODE_FRAMETYPE_32BIT      = 0x7FFFFFFF
+
+};
+
+enum vidc_1080p_decode_idc_format {
+	VIDC_1080P_IDCFORMAT_MONOCHROME = 0,
+	VIDC_1080P_IDCFORMAT_420 = 1,
+	VIDC_1080P_IDCFORMAT_422 = 2,
+	VIDC_1080P_IDCFORMAT_444 = 3,
+	VIDC_1080P_IDCFORMAT_OTHERS = 4,
+	VIDC_1080P_IDCFORMAT_32BIT = 0x7FFFFFFF
+};
+
+#define VIDC_1080P_PROFILE_MPEG4_SIMPLE      0x00000000
+#define VIDC_1080P_PROFILE_MPEG4_ADV_SIMPLE  0x00000001
+
+#define VIDC_1080P_PROFILE_H264_MAIN         0x00000000
+#define VIDC_1080P_PROFILE_H264_HIGH         0x00000001
+#define VIDC_1080P_PROFILE_H264_BASELINE     0x00000002
+
+
+enum vidc_1080p_decode{
+	VIDC_1080P_DEC_TYPE_SEQ_HEADER       = 0x00010000,
+	VIDC_1080P_DEC_TYPE_FRAME_DATA       = 0x00020000,
+	VIDC_1080P_DEC_TYPE_LAST_FRAME_DATA  = 0x00030000,
+	VIDC_1080P_DEC_TYPE_INIT_BUFFERS     = 0x00040000,
+	VIDC_1080P_DEC_TYPE_FRAME_START_REALLOC = 0x00050000,
+	VIDC_1080P_DEC_TYPE_32BIT            = 0x7FFFFFFF
+};
+enum vidc_1080p_encode{
+	VIDC_1080P_ENC_TYPE_SEQ_HEADER       = 0x00010000,
+	VIDC_1080P_ENC_TYPE_FRAME_DATA       = 0x00020000,
+	VIDC_1080P_ENC_TYPE_LAST_FRAME_DATA  = 0x00030000,
+	VIDC_1080P_ENC_TYPE_32BIT            = 0x7FFFFFFF
+};
+struct vidc_1080p_dec_seq_start_param{
+	u32 cmd_seq_num;
+	u32 inst_id;
+	u32 shared_mem_addr_offset;
+	u32 stream_buffer_addr_offset;
+	u32 stream_buffersize;
+	u32 stream_frame_size;
+	u32 descriptor_buffer_addr_offset;
+	u32 descriptor_buffer_size;
+};
+struct vidc_1080p_dec_frame_start_param{
+	u32 cmd_seq_num;
+	u32 inst_id;
+	u32 shared_mem_addr_offset;
+	u32 stream_buffer_addr_offset;
+	u32 stream_buffersize;
+	u32 stream_frame_size;
+	u32 descriptor_buffer_addr_offset;
+	u32 descriptor_buffer_size;
+	u32 release_dpb_bit_mask;
+	u32 dpb_count;
+	u32 dpb_flush;
+	enum vidc_1080p_decode decode;
+};
+struct vidc_1080p_dec_init_buffers_param{
+	u32 cmd_seq_num;
+	u32 inst_id;
+	u32 shared_mem_addr_offset;
+	u32 dpb_count;
+};
+struct vidc_1080p_seq_hdr_info{
+	u32 img_size_x;
+	u32 img_size_y;
+	u32 dec_frm_size;
+	u32 min_num_dpb;
+	u32 min_luma_dpb_size;
+	u32 min_chroma_dpb_size;
+	u32 profile;
+	u32 level;
+	u32 disp_progressive;
+	u32 disp_crop_exists;
+	u32 dec_progressive;
+	u32 dec_crop_exists;
+	u32 crop_right_offset;
+	u32 crop_left_offset;
+	u32 crop_bottom_offset;
+	u32 crop_top_offset;
+	u32 data_partition;
+};
+struct vidc_1080p_enc_seq_start_param{
+	u32 cmd_seq_num;
+	u32 inst_id;
+	u32 shared_mem_addr_offset;
+	u32 stream_buffer_addr_offset;
+	u32 stream_buffer_size;
+};
+struct vidc_1080p_enc_frame_start_param{
+	u32 cmd_seq_num;
+	u32 inst_id;
+	u32 shared_mem_addr_offset;
+	u32 current_y_addr_offset;
+	u32 current_c_addr_offset;
+	u32 stream_buffer_addr_offset;
+	u32 stream_buffer_size;
+	u32 intra_frame;
+	u32 input_flush;
+	enum vidc_1080p_encode encode;
+};
+struct vidc_1080p_enc_frame_info{
+	u32 enc_frame_size;
+	u32 enc_picture_count;
+	u32 enc_write_pointer;
+	u32 enc_luma_address;
+	u32 enc_chroma_address;
+	enum vidc_1080p_encode_frame enc_frame;
+	u32 meta_data_exists;
+};
+struct vidc_1080p_dec_disp_info{
+	u32 disp_resl_change;
+	u32 dec_resl_change;
+	u32 reconfig_flush_done;
+	u32 img_size_x;
+	u32 img_size_y;
+	u32 display_y_addr;
+	u32 display_c_addr;
+	u32 decode_y_addr;
+	u32 decode_c_addr;
+	u32 tag_top;
+	u32 pic_time_top;
+	u32 tag_bottom;
+	u32 pic_time_bottom;
+	u32 metadata_exists;
+	u32 disp_crop_exists;
+	u32 dec_crop_exists;
+	u32 crop_right_offset;
+	u32 crop_left_offset;
+	u32 crop_bottom_offset;
+	u32 crop_top_offset;
+	u32 input_bytes_consumed;
+	u32 input_is_interlace;
+	u32 input_frame_num;
+	enum vidc_1080p_display_status display_status;
+	enum vidc_1080p_display_status decode_status;
+	enum vidc_1080p_display_coding display_coding;
+	enum vidc_1080p_display_coding decode_coding;
+	enum vidc_1080p_decode_frame input_frame;
+};
+void vidc_1080p_do_sw_reset(enum vidc_1080p_reset init_flag);
+void vidc_1080p_release_sw_reset(void);
+void vidc_1080p_clear_interrupt(void);
+void vidc_1080p_set_host2risc_cmd(
+	enum vidc_1080p_host2risc_cmd host2risc_command,
+	u32 host2risc_arg1, u32 host2risc_arg2,
+	u32 host2risc_arg3, u32 host2risc_arg4);
+void vidc_1080p_get_risc2host_cmd(u32 *pn_risc2host_command,
+	u32 *pn_risc2host_arg1, u32 *pn_risc2host_arg2,
+	u32 *pn_risc2host_arg3, u32 *pn_risc2host_arg4);
+void vidc_1080p_get_risc2host_cmd_status(u32 err_status,
+	u32 *dec_err_status, u32 *disp_err_status);
+void vidc_1080p_clear_risc2host_cmd(void);
+void vidc_1080p_get_fw_version(u32 *pn_fw_version);
+void vidc_1080p_get_fw_status(u32 *pn_fw_status);
+void vidc_1080p_init_memory_controller(u32 dram_base_addr_a,
+	u32 dram_base_addr_b);
+void vidc_1080p_get_memory_controller_status(u32 *pb_mc_abusy,
+	u32 *pb_mc_bbusy);
+void vidc_1080p_set_h264_decode_buffers(u32 dpb, u32 dec_vert_nb_mv_offset,
+	u32 dec_nb_ip_offset, u32 *pn_dpb_luma_offset,
+	u32 *pn_dpb_chroma_offset, u32 *pn_mv_buffer_offset);
+void vidc_1080p_set_decode_recon_buffers(u32 recon_buffer, u32 *pn_dec_luma,
+	u32 *pn_dec_chroma);
+void vidc_1080p_set_mpeg4_divx_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset, u32 stx_parser_buffer_offset);
+void vidc_1080p_set_h263_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset);
+void vidc_1080p_set_vc1_decode_work_buffers(u32 nb_dcac_buffer_offset,
+	u32 upnb_mv_buffer_offset, u32 sub_anchor_buffer_offset,
+	u32 overlay_transform_buffer_offset, u32 bitplain1Buffer_offset,
+	u32 bitplain2Buffer_offset, u32 bitplain3Buffer_offset);
+void vidc_1080p_set_encode_recon_buffers(u32 recon_buffer, u32 *pn_enc_luma,
+	u32 *pn_enc_chroma);
+void vidc_1080p_set_h264_encode_work_buffers(u32 up_row_mv_buffer_offset,
+	u32 direct_colzero_flag_buffer_offset,
+	u32 upper_intra_md_buffer_offset,
+	u32 upper_intra_pred_buffer_offset, u32 nbor_infor_buffer_offset,
+	u32 mb_info_offset);
+void vidc_1080p_set_h263_encode_work_buffers(u32 up_row_mv_buffer_offset,
+	u32 up_row_inv_quanti_coeff_buffer_offset);
+void vidc_1080p_set_mpeg4_encode_work_buffers(u32 skip_flag_buffer_offset,
+	u32 up_row_inv_quanti_coeff_buffer_offset, u32 upper_mv_offset);
+void vidc_1080p_set_encode_frame_size(u32 hori_size, u32 vert_size);
+void vidc_1080p_set_encode_profile_level(u32 encode_profile, u32 enc_level);
+void vidc_1080p_set_encode_field_picture_structure(u32 enc_field_picture);
+void vidc_1080p_set_decode_mpeg4_pp_filter(u32 lf_enables);
+void vidc_1080p_set_decode_qp_save_control(u32 enable_q_pout);
+void vidc_1080p_get_returned_channel_inst_id(u32 *pn_rtn_chid);
+void vidc_1080p_clear_returned_channel_inst_id(void);
+void vidc_1080p_get_decode_seq_start_result(
+	struct vidc_1080p_seq_hdr_info *seq_hdr_info);
+void vidc_1080p_get_decoded_frame_size(u32 *pn_decoded_size);
+void vidc_1080p_get_display_frame_result(
+	struct vidc_1080p_dec_disp_info *dec_disp_info);
+void vidc_1080p_get_decode_frame(
+	enum vidc_1080p_decode_frame *pe_frame);
+void vidc_1080p_get_decode_frame_result(
+	struct vidc_1080p_dec_disp_info *dec_disp_info);
+void vidc_1080p_decode_seq_start_ch0(
+	struct vidc_1080p_dec_seq_start_param *param);
+void vidc_1080p_decode_seq_start_ch1(
+	struct vidc_1080p_dec_seq_start_param *param);
+void vidc_1080p_decode_init_buffers_ch0
+	(struct vidc_1080p_dec_init_buffers_param *param);
+void vidc_1080p_decode_init_buffers_ch1(
+	struct vidc_1080p_dec_init_buffers_param *param);
+void vidc_1080p_decode_frame_start_ch0(
+	struct vidc_1080p_dec_frame_start_param *param);
+void vidc_1080p_decode_frame_start_ch1(
+	struct vidc_1080p_dec_frame_start_param *param);
+void vidc_1080p_set_dec_resolution_ch0(u32 width, u32 height);
+void vidc_1080p_set_dec_resolution_ch1(u32 width, u32 height);
+void vidc_1080p_get_encode_frame_info(
+	struct vidc_1080p_enc_frame_info *frame_info);
+void vidc_1080p_encode_seq_start_ch0(
+	struct vidc_1080p_enc_seq_start_param *param);
+void vidc_1080p_encode_seq_start_ch1(
+	struct vidc_1080p_enc_seq_start_param *param);
+void vidc_1080p_encode_frame_start_ch0(
+	struct vidc_1080p_enc_frame_start_param *param);
+void vidc_1080p_encode_frame_start_ch1(
+	struct vidc_1080p_enc_frame_start_param *param);
+void vidc_1080p_set_encode_picture(u32 ifrm_ctrl, u32 number_b);
+void vidc_1080p_set_encode_multi_slice_control(
+	enum vidc_1080p_MSlice_selection multiple_slice_selection,
+	u32 mslice_mb, u32 mslice_byte);
+void vidc_1080p_set_encode_circular_intra_refresh(u32 cir_num);
+void vidc_1080p_set_encode_input_frame_format(
+	enum vidc_1080p_memory_access_method memory_format);
+void vidc_1080p_set_encode_padding_control(u32 pad_ctrl_on,
+	u32 cr_pad_val, u32 cb_pad_val, u32 luma_pad_val);
+void vidc_1080p_encode_set_rc_config(u32 enable_frame_level_rc,
+	u32 enable_mb_level_rc_flag, u32 frame_qp);
+void vidc_1080p_encode_set_frame_level_rc_params(u32 rc_frame_rate,
+	u32 target_bitrate, u32 reaction_coeff);
+void vidc_1080p_encode_set_qp_params(u32 max_qp, u32 min_qp);
+void vidc_1080p_encode_set_mb_level_rc_params(u32 disable_dark_region_as_flag,
+	u32 disable_smooth_region_as_flag , u32 disable_static_region_as_flag,
+	u32 disable_activity_region_flag);
+void vidc_1080p_get_qp(u32 *pn_frame_qp);
+void vidc_1080p_set_h264_encode_entropy(
+	enum vidc_1080p_entropy_sel entropy_sel);
+void vidc_1080p_set_h264_encode_loop_filter(
+	enum vidc_1080p_DBConfig db_config, u32 slice_alpha_offset,
+	u32 slice_beta_offset);
+void vidc_1080p_set_h264_encoder_p_frame_ref_count(u32 max_reference);
+void vidc_1080p_set_h264_encode_8x8transform_control(u32 enable_8x8transform);
+void vidc_1080p_set_mpeg4_encode_quarter_pel_control(
+	u32 enable_mpeg4_quarter_pel);
+void vidc_1080p_set_device_base_addr(u8 *mapped_va);
+void vidc_1080p_get_intra_bias(u32 *intra_bias);
+void vidc_1080p_set_intra_bias(u32 intra_bias);
+void vidc_1080p_get_bi_directional_bias(u32 *bi_directional_bias);
+void vidc_1080p_set_bi_directional_bias(u32 bi_directional_bias);
+void vidc_1080p_get_encoder_sequence_header_size(u32 *seq_header_size);
+void vidc_1080p_get_intermedia_stage_debug_counter(
+	u32 *intermediate_stage_counter);
+void vidc_1080p_get_exception_status(u32 *exception_status);
+void vidc_1080p_frame_start_realloc(u32 instance_id);
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_hwio.h b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio.h
new file mode 100644
index 0000000..f63ebcd
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDC_HWIO_H_
+#define _VIDC_HWIO_H_
+
+#include "vidc_hwio_reg.h"
+
+#ifdef VIDC_REGISTER_LOG
+#define VIDC_REG_OUT(x...)  printk(KERN_DEBUG x)
+#define VIDC_REG_IN(x...)   printk(KERN_DEBUG x)
+#else
+#define VIDC_REG_OUT(x...)
+#define VIDC_REG_IN(x...)
+#endif
+
+#define __inpdw(port) (*((u32 *) (port)))
+#define __outpdw(port, val) (*((u32 *) (port)) = ((u32) (val)))
+
+#define in_dword(addr) (__inpdw(addr))
+#define in_dword_masked(addr, mask) (__inpdw(addr) & (mask))
+#define out_dword(addr, val) __outpdw(addr, val)
+
+#define out_dword_masked(io, mask, val, shadow) \
+do { \
+	shadow = (shadow & (u32)(~(mask))) | ((u32)((val) & (mask))); \
+	out_dword(io, shadow); \
+} while (0)
+#define out_dword_masked_ns(io, mask, val, current_reg_content) \
+	out_dword(io, ((current_reg_content & (u32)(~(mask))) | \
+	((u32)((val) & (mask)))))
+
+#define HWIO_IN(hwiosym)  HWIO_##hwiosym##_IN
+#define HWIO_INI(hwiosym, index)  HWIO_##hwiosym##_INI(index)
+#define HWIO_INM(hwiosym, mask)   HWIO_##hwiosym##_INM(mask)
+#define HWIO_INF(hwiosym, field)  (HWIO_INM(hwiosym, \
+	HWIO_FMSK(hwiosym, field)) >> HWIO_SHFT(hwiosym, field))
+
+#define HWIO_OUT(hwiosym, val)  HWIO_##hwiosym##_OUT(val)
+#define HWIO_OUTI(hwiosym, index, val)  HWIO_##hwiosym##_OUTI(index, val)
+#define HWIO_OUTM(hwiosym, mask, val)  HWIO_##hwiosym##_OUTM(mask, val)
+#define HWIO_OUTF(hwiosym, field, val)  HWIO_OUTM(hwiosym, \
+	HWIO_FMSK(hwiosym, field), (u32)(val) << HWIO_SHFT(hwiosym, field))
+
+#define HWIO_SHFT(hwio_regsym, hwio_fldsym) \
+	HWIO_##hwiosym##_##hwiofldsym##_SHFT
+#define HWIO_FMSK(hwio_regsym, hwio_fldsym) \
+	HWIO_##hwiosym##_##hwiofldsym##_BMSK
+
+#define VIDC_SETFIELD(val, shift, mask) \
+	(((val) << (shift)) & (mask))
+#define VIDC_GETFIELD(val, mask, shift) \
+	(((val) & (mask)) >> (shift))
+
+#define VIDC_HWIO_OUT(hwiosym, val) \
+do { \
+	VIDC_REG_OUT("\n(0x%x:"#hwiosym"=0x%x)", \
+	HWIO_##hwiosym##_ADDR - VIDC_BASE_PTR, val); \
+	mb(); \
+	HWIO_OUT(hwiosym, val); \
+} while (0)
+#define VIDC_HWIO_OUTI(hwiosym, index, val) \
+do { \
+	VIDC_REG_OUT("\n(0x%x:"#hwiosym"(%d)=0x%x)", \
+	HWIO_##hwiosym##_ADDR(index) - VIDC_BASE_PTR, index, val); \
+	mb(); \
+	HWIO_OUTI(hwiosym, index, val); \
+} while (0)
+#define VIDC_HWIO_OUTF(hwiosym, field, val) \
+do { \
+	VIDC_REG_OUT("\n(0x%x:"#hwiosym":0x%x:=0x%x)" , \
+	HWIO_##hwiosym##_ADDR - VIDC_BASE_PTR, \
+	HWIO_##hwiosym##_##field##_BMSK, val) \
+	mb(); \
+	HWIO_OUTF(hwiosym, field, val); \
+} while (0)
+#define VIDC_OUT_DWORD(addr, val) \
+do { \
+	VIDC_REG_OUT("\n(0x%x:"#addr"=0x%x)", \
+	addr - VIDC_BASE_PTR, val); \
+	mb(); \
+	out_dword(addr, val); \
+} while (0)
+#define VIDC_HWIO_IN(hwiosym, pval) \
+do { \
+	mb(); \
+	*pval = (u32) HWIO_IN(hwiosym); \
+	VIDC_REG_IN("\n(0x%x:"#hwiosym"=0x%x)", \
+	HWIO_##hwiosym##_ADDR - VIDC_BASE_PTR, *pval);\
+} while (0)
+#define VIDC_HWIO_INI(hwiosym, index, pval) \
+do { \
+	mb(); \
+	*pval = (u32) HWIO_INI(hwiosym, index); \
+	VIDC_REG_IN("(0x%x:"#hwiosym"(%d)==0x%x)", \
+	HWIO_##hwiosym##_ADDR(index) - VIDC_BASE_PTR, index, *pval); \
+} while (0)
+#define VIDC_HWIO_INF(hwiosym, mask, pval) \
+do { \
+	mb(); \
+	*pval = HWIO_INF(hwiosym, mask); \
+	VIDC_REG_IN("\n(0x%x:"#hwiosym"=0x%x)", \
+	HWIO_##hwiosym##_ADDR - VIDC_BASE_PTR, *pval); \
+} while (0)
+#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h
new file mode 100644
index 0000000..819cd6c
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h
@@ -0,0 +1,4544 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDC_HWIO_REG_H_
+#define _VIDC_HWIO_REG_H_
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include "vidc.h"
+
+extern u8 *VIDC_BASE_PTR;
+
+#define VIDC_BASE  VIDC_BASE_PTR
+
+#define VIDC_BLACKBIRD_REG_BASE  (VIDC_BASE + 0x00000000)
+#define VIDC_BLACKBIRD_REG_BASE_PHYS  0x04400000
+
+#define HWIO_REG_557899_ADDR  (VIDC_BLACKBIRD_REG_BASE + 00000000)
+#define HWIO_REG_557899_PHYS  (VIDC_BLACKBIRD_REG_BASE_PHYS + 00000000)
+#define HWIO_REG_557899_RMSK  0x3ff
+#define HWIO_REG_557899_SHFT  0
+#define HWIO_REG_557899_IN  in_dword_masked(HWIO_REG_557899_ADDR,\
+	HWIO_REG_557899_RMSK)
+#define HWIO_REG_557899_INM(m)  in_dword_masked(HWIO_REG_557899_ADDR, m)
+#define HWIO_REG_557899_OUT(v)  out_dword(HWIO_REG_557899_ADDR, v)
+#define HWIO_REG_557899_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_557899_ADDR, m, v, HWIO_REG_557899_IN);
+#define HWIO_REG_557899_RSTN_RG_MPEG2_BMSK     0x200
+#define HWIO_REG_557899_RSTN_RG_MPEG2_SHFT     0x9
+#define HWIO_REG_557899_RSTN_RG_MPEG4_BMSK     0x100
+#define HWIO_REG_557899_RSTN_RG_MPEG4_SHFT     0x8
+#define HWIO_REG_557899_RSTN_RG_VC1_BMSK       0x80
+#define HWIO_REG_557899_RSTN_RG_VC1_SHFT       0x7
+#define HWIO_REG_557899_RSTN_RG_H264_BMSK      0x40
+#define HWIO_REG_557899_RSTN_RG_H264_SHFT      0x6
+#define HWIO_REG_557899_RSTN_RG_COMMON_BMSK    0x20
+#define HWIO_REG_557899_RSTN_RG_COMMON_SHFT    0x5
+#define HWIO_REG_557899_RSTN_DMX_BMSK          0x10
+#define HWIO_REG_557899_RSTN_DMX_SHFT          0x4
+#define HWIO_REG_557899_RSTN_VI_BMSK           0x8
+#define HWIO_REG_557899_RSTN_VI_SHFT           0x3
+#define HWIO_REG_557899_RSTN_VIDCCORE_BMSK     0x4
+#define HWIO_REG_557899_RSTN_VIDCCORE_SHFT     0x2
+#define HWIO_REG_557899_RSTN_MC_BMSK           0x2
+#define HWIO_REG_557899_RSTN_MC_SHFT           0x1
+#define HWIO_REG_557899_RSTN_RISC_BMSK         0x1
+#define HWIO_REG_557899_RSTN_RISC_SHFT         0
+
+#define HWIO_REG_575377_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000008)
+#define HWIO_REG_575377_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000008)
+#define HWIO_REG_575377_RMSK 0x1
+#define HWIO_REG_575377_SHFT 0
+#define HWIO_REG_575377_IN  in_dword_masked(\
+	HWIO_REG_575377_ADDR, HWIO_REG_575377_RMSK)
+#define HWIO_REG_575377_INM(m) \
+	in_dword_masked(HWIO_REG_575377_ADDR, m)
+#define HWIO_REG_575377_OUT(v) \
+	out_dword(HWIO_REG_575377_ADDR, v)
+#define HWIO_REG_575377_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_575377_ADDR, m, v, HWIO_REG_575377_IN);
+#define HWIO_REG_575377_INTERRUPT_BMSK  0x1
+#define HWIO_REG_575377_INTERRUPT_SHFT  0
+
+#define HWIO_REG_611794_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000030)
+#define HWIO_REG_611794_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000030)
+#define HWIO_REG_611794_RMSK  0xffffffff
+#define HWIO_REG_611794_SHFT  0
+#define HWIO_REG_611794_IN  in_dword_masked(\
+	HWIO_REG_611794_ADDR, HWIO_REG_611794_RMSK)
+#define HWIO_REG_611794_INM(m) \
+	in_dword_masked(HWIO_REG_611794_ADDR, m)
+#define HWIO_REG_611794_OUT(v) \
+	out_dword(HWIO_REG_611794_ADDR, v)
+#define HWIO_REG_611794_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_611794_ADDR, m, v,\
+	HWIO_REG_611794_IN);
+#define HWIO_REG_611794_HOST2RISC_COMMAND_BMSK 0xffffffff
+#define HWIO_REG_611794_HOST2RISC_COMMAND_SHFT 0
+
+#define HWIO_REG_356340_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000034)
+#define HWIO_REG_356340_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000034)
+#define HWIO_REG_356340_RMSK  0xffffffff
+#define HWIO_REG_356340_SHFT  0
+#define HWIO_REG_356340_IN  in_dword_masked(\
+	HWIO_REG_356340_ADDR, HWIO_REG_356340_RMSK)
+#define HWIO_REG_356340_INM(m) \
+	in_dword_masked(HWIO_REG_356340_ADDR, m)
+#define HWIO_REG_356340_OUT(v) \
+	out_dword(HWIO_REG_356340_ADDR, v)
+#define HWIO_REG_356340_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_356340_ADDR, m, v, HWIO_REG_356340_IN);
+#define HWIO_REG_356340_HOST2RISC_ARG1_BMSK  0xffffffff
+#define HWIO_REG_356340_HOST2RISC_ARG1_SHFT  0
+
+#define HWIO_REG_899023_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000038)
+#define HWIO_REG_899023_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000038)
+#define HWIO_REG_899023_RMSK 0xffffffff
+#define HWIO_REG_899023_SHFT 0
+#define HWIO_REG_899023_IN  in_dword_masked(\
+	HWIO_REG_899023_ADDR, HWIO_REG_899023_RMSK)
+#define HWIO_REG_899023_INM(m) \
+	in_dword_masked(HWIO_REG_899023_ADDR, m)
+#define HWIO_REG_899023_OUT(v) \
+	out_dword(HWIO_REG_899023_ADDR, v)
+#define HWIO_REG_899023_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_899023_ADDR, m, v, HWIO_REG_899023_IN);
+#define HWIO_REG_899023_HOST2RISC_ARG2_BMSK  0xffffffff
+#define HWIO_REG_899023_HOST2RISC_ARG2_SHFT  0
+
+#define HWIO_REG_987762_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000003c)
+#define HWIO_REG_987762_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_REG_987762_RMSK  0xffffffff
+#define HWIO_REG_987762_SHFT  0
+#define HWIO_REG_987762_IN  in_dword_masked(\
+	HWIO_REG_987762_ADDR, HWIO_REG_987762_RMSK)
+#define HWIO_REG_987762_INM(m) \
+	in_dword_masked(HWIO_REG_987762_ADDR, m)
+#define HWIO_REG_987762_OUT(v) \
+	out_dword(HWIO_REG_987762_ADDR, v)
+#define HWIO_REG_987762_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_987762_ADDR, m, v, HWIO_REG_987762_IN);
+#define HWIO_REG_987762_HOST2RISC_ARG3_BMSK  0xffffffff
+#define HWIO_REG_987762_HOST2RISC_ARG3_SHFT  0
+
+#define HWIO_REG_544000_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000040)
+#define HWIO_REG_544000_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000040)
+#define HWIO_REG_544000_RMSK  0xffffffff
+#define HWIO_REG_544000_SHFT  0
+#define HWIO_REG_544000_IN  in_dword_masked(\
+	HWIO_REG_544000_ADDR, HWIO_REG_544000_RMSK)
+#define HWIO_REG_544000_INM(m)  \
+	in_dword_masked(HWIO_REG_544000_ADDR, m)
+#define HWIO_REG_544000_OUT(v)  \
+	out_dword(HWIO_REG_544000_ADDR, v)
+#define HWIO_REG_544000_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_544000_ADDR, m, v, HWIO_REG_544000_IN);
+#define HWIO_REG_544000_HOST2RISC_ARG4_BMSK  0xffffffff
+#define HWIO_REG_544000_HOST2RISC_ARG4_SHFT  0
+
+#define HWIO_REG_695082_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000044)
+#define HWIO_REG_695082_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000044)
+#define HWIO_REG_695082_RMSK  0xffffffff
+#define HWIO_REG_695082_SHFT  0
+#define HWIO_REG_695082_IN  in_dword_masked(\
+	HWIO_REG_695082_ADDR, HWIO_REG_695082_RMSK)
+#define HWIO_REG_695082_INM(m) \
+	in_dword_masked(HWIO_REG_695082_ADDR, m)
+#define HWIO_REG_695082_OUT(v) \
+	out_dword(HWIO_REG_695082_ADDR, v)
+#define HWIO_REG_695082_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_695082_ADDR, m, v, HWIO_REG_695082_IN);
+#define HWIO_REG_695082_RISC2HOST_COMMAND_BMSK  0xffffffff
+#define HWIO_REG_695082_RISC2HOST_COMMAND_SHFT  0
+
+#define HWIO_REG_156596_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000048)
+#define HWIO_REG_156596_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000048)
+#define HWIO_REG_156596_RMSK  0xffffffff
+#define HWIO_REG_156596_SHFT  0
+#define HWIO_REG_156596_IN  in_dword_masked(\
+	HWIO_REG_156596_ADDR, HWIO_REG_156596_RMSK)
+#define HWIO_REG_156596_INM(m) \
+	in_dword_masked(HWIO_REG_156596_ADDR, m)
+#define HWIO_REG_156596_OUT(v) \
+	out_dword(HWIO_REG_156596_ADDR, v)
+#define HWIO_REG_156596_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_156596_ADDR, m, v, HWIO_REG_156596_IN);
+#define HWIO_REG_156596_REG_156596_BMSK  0xffffffff
+#define HWIO_REG_156596_REG_156596_SHFT  0
+
+#define HWIO_REG_222292_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000004c)
+#define HWIO_REG_222292_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_REG_222292_RMSK  0xffffffff
+#define HWIO_REG_222292_SHFT  0
+#define HWIO_REG_222292_IN  in_dword_masked(\
+	HWIO_REG_222292_ADDR, HWIO_REG_222292_RMSK)
+#define HWIO_REG_222292_INM(m) \
+	in_dword_masked(HWIO_REG_222292_ADDR, m)
+#define HWIO_REG_222292_OUT(v) \
+	out_dword(HWIO_REG_222292_ADDR, v)
+#define HWIO_REG_222292_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_222292_ADDR, m, v, HWIO_REG_222292_IN);
+#define HWIO_REG_222292_REG_222292_BMSK  0xffffffff
+#define HWIO_REG_222292_REG_222292_SHFT  0
+
+#define HWIO_REG_790962_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000050)
+#define HWIO_REG_790962_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000050)
+#define HWIO_REG_790962_RMSK  0xffffffff
+#define HWIO_REG_790962_SHFT  0
+#define HWIO_REG_790962_IN  in_dword_masked(\
+	HWIO_REG_790962_ADDR, HWIO_REG_790962_RMSK)
+#define HWIO_REG_790962_INM(m) \
+	in_dword_masked(HWIO_REG_790962_ADDR, m)
+#define HWIO_REG_790962_OUT(v) \
+	out_dword(HWIO_REG_790962_ADDR, v)
+#define HWIO_REG_790962_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_790962_ADDR, m, v, HWIO_REG_790962_IN);
+#define HWIO_REG_790962_REG_790962_BMSK  0xffffffff
+#define HWIO_REG_790962_REG_790962_SHFT  0
+
+#define HWIO_REG_679882_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000054)
+#define HWIO_REG_679882_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000054)
+#define HWIO_REG_679882_RMSK  0xffffffff
+#define HWIO_REG_679882_SHFT  0
+#define HWIO_REG_679882_IN  in_dword_masked(\
+	HWIO_REG_679882_ADDR, HWIO_REG_679882_RMSK)
+#define HWIO_REG_679882_INM(m) \
+	in_dword_masked(HWIO_REG_679882_ADDR, m)
+#define HWIO_REG_679882_OUT(v) \
+	out_dword(HWIO_REG_679882_ADDR, v)
+#define HWIO_REG_679882_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_679882_ADDR, m, v, HWIO_REG_679882_IN);
+#define HWIO_REG_679882_REG_679882_BMSK  0xffffffff
+#define HWIO_REG_679882_REG_679882_SHFT  0
+
+#define HWIO_REG_653206_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000058)
+#define HWIO_REG_653206_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000058)
+#define HWIO_REG_653206_RMSK  0xffffff
+#define HWIO_REG_653206_SHFT  0
+#define HWIO_REG_653206_IN  in_dword_masked(\
+	HWIO_REG_653206_ADDR, HWIO_REG_653206_RMSK)
+#define HWIO_REG_653206_INM(m) \
+	in_dword_masked(HWIO_REG_653206_ADDR, m)
+#define HWIO_REG_653206_YEAR_BMSK   0xff0000
+#define HWIO_REG_653206_YEAR_SHFT   0x10
+#define HWIO_REG_653206_MONTH_BMSK  0xff00
+#define HWIO_REG_653206_MONTH_SHFT  0x8
+#define HWIO_REG_653206_DAY_BMSK    0xff
+#define HWIO_REG_653206_DAY_SHFT    0
+
+#define HWIO_REG_805993_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000064)
+#define HWIO_REG_805993_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000064)
+#define HWIO_REG_805993_RMSK  0xffffffff
+#define HWIO_REG_805993_SHFT  0
+#define HWIO_REG_805993_IN  in_dword_masked(\
+	HWIO_REG_805993_ADDR, HWIO_REG_805993_RMSK)
+#define HWIO_REG_805993_INM(m) \
+	in_dword_masked(HWIO_REG_805993_ADDR, m)
+#define HWIO_REG_805993_INTERMEDIATE_STAGE_COUNTER_BMSK  0xffffffff
+#define HWIO_REG_805993_INTERMEDIATE_STAGE_COUNTER_SHFT  0
+
+#define HWIO_REG_493355_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000068)
+#define HWIO_REG_493355_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000068)
+#define HWIO_REG_493355_RMSK  0xffffffff
+#define HWIO_REG_493355_SHFT  0
+#define HWIO_REG_493355_IN  in_dword_masked(\
+	HWIO_REG_493355_ADDR, HWIO_REG_493355_RMSK)
+#define HWIO_REG_493355_INM(m) \
+	in_dword_masked(HWIO_REG_493355_ADDR, m)
+#define HWIO_REG_493355_EXCEPTION_STATUS_BMSK  0xffffffff
+#define HWIO_REG_493355_EXCEPTION_STATUS_SHFT  0
+
+#define HWIO_REG_350619_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000080)
+#define HWIO_REG_350619_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000080)
+#define HWIO_REG_350619_RMSK  0x1
+#define HWIO_REG_350619_SHFT  0
+#define HWIO_REG_350619_IN  in_dword_masked(\
+	HWIO_REG_350619_ADDR, HWIO_REG_350619_RMSK)
+#define HWIO_REG_350619_INM(m) \
+	in_dword_masked(HWIO_REG_350619_ADDR, m)
+#define HWIO_REG_350619_FIRMWARE_STATUS_BMSK  0x1
+#define HWIO_REG_350619_FIRMWARE_STATUS_SHFT  0
+
+#define HWIO_REG_64440_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000508)
+#define HWIO_REG_64440_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000508)
+#define HWIO_REG_64440_RMSK  0xfffe0000
+#define HWIO_REG_64440_SHFT  0
+#define HWIO_REG_64440_IN  in_dword_masked(\
+	HWIO_REG_64440_ADDR, HWIO_REG_64440_RMSK)
+#define HWIO_REG_64440_INM(m) \
+	in_dword_masked(HWIO_REG_64440_ADDR, m)
+#define HWIO_REG_64440_OUT(v) \
+	out_dword(HWIO_REG_64440_ADDR, v)
+#define HWIO_REG_64440_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_64440_ADDR, m, v,\
+	HWIO_REG_64440_IN);
+#define HWIO_REG_64440_MC_DRAMBASE_ADDR_BMSK  0xfffe0000
+#define HWIO_REG_64440_MC_DRAMBASE_ADDR_SHFT  0x11
+
+#define HWIO_REG_675915_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000050c)
+#define HWIO_REG_675915_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000050c)
+#define HWIO_REG_675915_RMSK  0xfffe0000
+#define HWIO_REG_675915_SHFT  0
+#define HWIO_REG_675915_IN  in_dword_masked(\
+	HWIO_REG_675915_ADDR, HWIO_REG_675915_RMSK)
+#define HWIO_REG_675915_INM(m) \
+	in_dword_masked(HWIO_REG_675915_ADDR, m)
+#define HWIO_REG_675915_OUT(v) \
+	out_dword(HWIO_REG_675915_ADDR, v)
+#define HWIO_REG_675915_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_675915_ADDR, m, v,\
+	HWIO_REG_675915_IN);
+#define HWIO_REG_675915_MC_DRAMBASE_ADDR_BMSK  0xfffe0000
+#define HWIO_REG_675915_MC_DRAMBASE_ADDR_SHFT  0x11
+
+#define HWIO_REG_399911_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000510)
+#define HWIO_REG_399911_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000510)
+#define HWIO_REG_399911_RMSK  0x3
+#define HWIO_REG_399911_SHFT  0
+#define HWIO_REG_399911_IN  in_dword_masked(\
+	HWIO_REG_399911_ADDR, HWIO_REG_399911_RMSK)
+#define HWIO_REG_399911_INM(m)  in_dword_masked(HWIO_REG_399911_ADDR, m)
+#define HWIO_REG_399911_MC_BUSY_B_BMSK  0x2
+#define HWIO_REG_399911_MC_BUSY_B_SHFT  0x1
+#define HWIO_REG_399911_MC_BUSY_A_BMSK  0x1
+#define HWIO_REG_399911_MC_BUSY_A_SHFT  0
+
+#define HWIO_REG_515200_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000600)
+#define HWIO_REG_515200_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000600)
+#define HWIO_REG_515200_RMSK  0x1ffff
+#define HWIO_REG_515200_SHFT  0
+#define HWIO_REG_515200_IN  in_dword_masked(\
+	HWIO_REG_515200_ADDR, HWIO_REG_515200_RMSK)
+#define HWIO_REG_515200_INM(m) \
+	in_dword_masked(HWIO_REG_515200_ADDR, m)
+#define HWIO_REG_515200_OUT(v) \
+	out_dword(HWIO_REG_515200_ADDR, v)
+#define HWIO_REG_515200_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_515200_ADDR, m, v,\
+	HWIO_REG_515200_IN);
+#define HWIO_REG_515200_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_515200_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_29510_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000604)
+#define HWIO_REG_29510_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000604)
+#define HWIO_REG_29510_RMSK  0x1ffff
+#define HWIO_REG_29510_SHFT  0
+#define HWIO_REG_29510_IN  in_dword_masked(\
+	HWIO_REG_29510_ADDR, HWIO_REG_29510_RMSK)
+#define HWIO_REG_29510_INM(m) \
+	in_dword_masked(HWIO_REG_29510_ADDR, m)
+#define HWIO_REG_29510_OUT(v) \
+	out_dword(HWIO_REG_29510_ADDR, v)
+#define HWIO_REG_29510_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_29510_ADDR, m, v,\
+	HWIO_REG_29510_IN);
+#define HWIO_REG_29510_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_29510_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_256132_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000608)
+#define HWIO_REG_256132_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000608)
+#define HWIO_REG_256132_RMSK  0x1ffff
+#define HWIO_REG_256132_SHFT  0
+#define HWIO_REG_256132_IN  in_dword_masked(\
+	HWIO_REG_256132_ADDR, HWIO_REG_256132_RMSK)
+#define HWIO_REG_256132_INM(m) \
+	in_dword_masked(HWIO_REG_256132_ADDR, m)
+#define HWIO_REG_256132_OUT(v) \
+	out_dword(HWIO_REG_256132_ADDR, v)
+#define HWIO_REG_256132_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_256132_ADDR, m, v,\
+	HWIO_REG_256132_IN);
+#define HWIO_REG_256132_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_256132_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_885152_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000060c)
+#define HWIO_REG_885152_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000060c)
+#define HWIO_REG_885152_RMSK  0x1ffff
+#define HWIO_REG_885152_SHFT  0
+#define HWIO_REG_885152_IN  in_dword_masked(\
+	HWIO_REG_885152_ADDR, HWIO_REG_885152_RMSK)
+#define HWIO_REG_885152_INM(m) \
+	in_dword_masked(HWIO_REG_885152_ADDR, m)
+#define HWIO_REG_885152_OUT(v) \
+	out_dword(HWIO_REG_885152_ADDR, v)
+#define HWIO_REG_885152_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_885152_ADDR, m, v,\
+	HWIO_REG_885152_IN);
+#define HWIO_REG_885152_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_885152_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_69832_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000610)
+#define HWIO_REG_69832_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000610)
+#define HWIO_REG_69832_RMSK 0x1ffff
+#define HWIO_REG_69832_SHFT 0
+#define HWIO_REG_69832_IN  in_dword_masked(\
+	HWIO_REG_69832_ADDR, HWIO_REG_69832_RMSK)
+#define HWIO_REG_69832_INM(m) \
+	in_dword_masked(HWIO_REG_69832_ADDR, m)
+#define HWIO_REG_69832_OUT(v) \
+	out_dword(HWIO_REG_69832_ADDR, v)
+#define HWIO_REG_69832_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_69832_ADDR, m, v,\
+	HWIO_REG_69832_IN);
+#define HWIO_REG_69832_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_69832_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_686205_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000614)
+#define HWIO_REG_686205_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000614)
+#define HWIO_REG_686205_RMSK  0x1ffff
+#define HWIO_REG_686205_SHFT  0
+#define HWIO_REG_686205_IN  in_dword_masked(\
+	HWIO_REG_686205_ADDR, HWIO_REG_686205_RMSK)
+#define HWIO_REG_686205_INM(m) \
+	in_dword_masked(HWIO_REG_686205_ADDR, m)
+#define HWIO_REG_686205_OUT(v) \
+	out_dword(HWIO_REG_686205_ADDR, v)
+#define HWIO_REG_686205_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_686205_ADDR, m, v,\
+	HWIO_REG_686205_IN);
+#define HWIO_REG_686205_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_686205_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_728036_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000618)
+#define HWIO_REG_728036_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000618)
+#define HWIO_REG_728036_RMSK 0x1ffff
+#define HWIO_REG_728036_SHFT 0
+#define HWIO_REG_728036_IN  in_dword_masked(\
+	HWIO_REG_728036_ADDR, HWIO_REG_728036_RMSK)
+#define HWIO_REG_728036_INM(m) \
+	in_dword_masked(HWIO_REG_728036_ADDR, m)
+#define HWIO_REG_728036_OUT(v) \
+	out_dword(HWIO_REG_728036_ADDR, v)
+#define HWIO_REG_728036_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_728036_ADDR, m, v,\
+	HWIO_REG_728036_IN);
+#define HWIO_REG_728036_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_728036_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_294579_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000061c)
+#define HWIO_REG_294579_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000061c)
+#define HWIO_REG_294579_RMSK  0x1ffff
+#define HWIO_REG_294579_SHFT  0
+#define HWIO_REG_294579_IN  in_dword_masked(\
+	HWIO_REG_294579_ADDR, HWIO_REG_294579_RMSK)
+#define HWIO_REG_294579_INM(m) \
+	in_dword_masked(HWIO_REG_294579_ADDR, m)
+#define HWIO_REG_294579_OUT(v) \
+	out_dword(HWIO_REG_294579_ADDR, v)
+#define HWIO_REG_294579_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_294579_ADDR, m, v,\
+	HWIO_REG_294579_IN);
+#define HWIO_REG_294579_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_294579_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_61427_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000620)
+#define HWIO_REG_61427_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000620)
+#define HWIO_REG_61427_RMSK  0x1ffff
+#define HWIO_REG_61427_SHFT  0
+#define HWIO_REG_61427_IN  in_dword_masked(\
+	HWIO_REG_61427_ADDR, HWIO_REG_61427_RMSK)
+#define HWIO_REG_61427_INM(m) \
+	in_dword_masked(HWIO_REG_61427_ADDR, m)
+#define HWIO_REG_61427_OUT(v) \
+	out_dword(HWIO_REG_61427_ADDR, v)
+#define HWIO_REG_61427_OUTM(m , v)  out_dword_masked_ns(\
+	HWIO_REG_61427_ADDR, m, v,\
+	HWIO_REG_61427_IN);
+#define HWIO_REG_61427_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_61427_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_578196_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000624)
+#define HWIO_REG_578196_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000624)
+#define HWIO_REG_578196_RMSK  0x1ffff
+#define HWIO_REG_578196_SHFT  0
+#define HWIO_REG_578196_IN  in_dword_masked(\
+	HWIO_REG_578196_ADDR, HWIO_REG_578196_RMSK)
+#define HWIO_REG_578196_INM(m) \
+	in_dword_masked(HWIO_REG_578196_ADDR, m)
+#define HWIO_REG_578196_OUT(v) \
+	out_dword(HWIO_REG_578196_ADDR, v)
+#define HWIO_REG_578196_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_578196_ADDR, m, v,\
+	HWIO_REG_578196_IN);
+#define HWIO_REG_578196_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_578196_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_408588_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000628)
+#define HWIO_REG_408588_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000628)
+#define HWIO_REG_408588_RMSK  0x1ffff
+#define HWIO_REG_408588_SHFT  0
+#define HWIO_REG_408588_IN  in_dword_masked(\
+	HWIO_REG_408588_ADDR, HWIO_REG_408588_RMSK)
+#define HWIO_REG_408588_INM(m) \
+	in_dword_masked(HWIO_REG_408588_ADDR, m)
+#define HWIO_REG_408588_OUT(v) \
+	out_dword(HWIO_REG_408588_ADDR, v)
+#define HWIO_REG_408588_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_408588_ADDR, m, v,\
+	HWIO_REG_408588_IN);
+#define HWIO_REG_408588_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_408588_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_55617_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000062c)
+#define HWIO_REG_55617_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000062c)
+#define HWIO_REG_55617_RMSK  0x1ffff
+#define HWIO_REG_55617_SHFT  0
+#define HWIO_REG_55617_IN  in_dword_masked(\
+	HWIO_REG_55617_ADDR, HWIO_REG_55617_RMSK)
+#define HWIO_REG_55617_INM(m) \
+	in_dword_masked(HWIO_REG_55617_ADDR, m)
+#define HWIO_REG_55617_OUT(v) \
+	out_dword(HWIO_REG_55617_ADDR, v)
+#define HWIO_REG_55617_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_55617_ADDR, m, v,\
+	HWIO_REG_55617_IN);
+#define HWIO_REG_55617_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_55617_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_555239_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000630)
+#define HWIO_REG_555239_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000630)
+#define HWIO_REG_555239_RMSK  0x1ffff
+#define HWIO_REG_555239_SHFT  0
+#define HWIO_REG_555239_IN  in_dword_masked(\
+	HWIO_REG_555239_ADDR, HWIO_REG_555239_RMSK)
+#define HWIO_REG_555239_INM(m) \
+	in_dword_masked(HWIO_REG_555239_ADDR, m)
+#define HWIO_REG_555239_OUT(v) \
+	out_dword(HWIO_REG_555239_ADDR, v)
+#define HWIO_REG_555239_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_555239_ADDR, m, v,\
+	HWIO_REG_555239_IN);
+#define HWIO_REG_555239_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_555239_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_515333_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000634)
+#define HWIO_REG_515333_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000634)
+#define HWIO_REG_515333_RMSK  0x1ffff
+#define HWIO_REG_515333_SHFT  0
+#define HWIO_REG_515333_IN  in_dword_masked(\
+	HWIO_REG_515333_ADDR, HWIO_REG_515333_RMSK)
+#define HWIO_REG_515333_INM(m) \
+	in_dword_masked(HWIO_REG_515333_ADDR, m)
+#define HWIO_REG_515333_OUT(v) \
+	out_dword(HWIO_REG_515333_ADDR, v)
+#define HWIO_REG_515333_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_515333_ADDR, m, v,\
+	HWIO_REG_515333_IN);
+#define HWIO_REG_515333_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_515333_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_951675_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE  + 0x00000638)
+#define HWIO_REG_951675_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000638)
+#define HWIO_REG_951675_RMSK  0x1ffff
+#define HWIO_REG_951675_SHFT  0
+#define HWIO_REG_951675_IN  in_dword_masked(\
+	HWIO_REG_951675_ADDR, HWIO_REG_951675_RMSK)
+#define HWIO_REG_951675_INM(m) \
+	in_dword_masked(HWIO_REG_951675_ADDR, m)
+#define HWIO_REG_951675_OUT(v) \
+	out_dword(HWIO_REG_951675_ADDR, v)
+#define HWIO_REG_951675_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_951675_ADDR, m, v,\
+	HWIO_REG_951675_IN);
+#define HWIO_REG_951675_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_951675_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_500775_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE  + 0x0000063c)
+#define HWIO_REG_500775_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000063c)
+#define HWIO_REG_500775_RMSK  0x1ffff
+#define HWIO_REG_500775_SHFT  0
+#define HWIO_REG_500775_IN  in_dword_masked(\
+	HWIO_REG_500775_ADDR, HWIO_REG_500775_RMSK)
+#define HWIO_REG_500775_INM(m) \
+	in_dword_masked(HWIO_REG_500775_ADDR, m)
+#define HWIO_REG_500775_OUT(v) \
+	out_dword(HWIO_REG_500775_ADDR, v)
+#define HWIO_REG_500775_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_500775_ADDR, m, v,\
+	HWIO_REG_500775_IN);
+#define HWIO_REG_500775_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_500775_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_649786_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000640)
+#define HWIO_REG_649786_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000640)
+#define HWIO_REG_649786_RMSK  0x1ffff
+#define HWIO_REG_649786_SHFT  0
+#define HWIO_REG_649786_IN  in_dword_masked(\
+	HWIO_REG_649786_ADDR, HWIO_REG_649786_RMSK)
+#define HWIO_REG_649786_INM(m) \
+	in_dword_masked(HWIO_REG_649786_ADDR, m)
+#define HWIO_REG_649786_OUT(v) \
+	out_dword(HWIO_REG_649786_ADDR, v)
+#define HWIO_REG_649786_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_649786_ADDR, m, v,\
+	HWIO_REG_649786_IN);
+#define HWIO_REG_649786_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_649786_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_233366_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000644)
+#define HWIO_REG_233366_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000644)
+#define HWIO_REG_233366_RMSK  0x1ffff
+#define HWIO_REG_233366_SHFT  0
+#define HWIO_REG_233366_IN  in_dword_masked(\
+	HWIO_REG_233366_ADDR, HWIO_REG_233366_RMSK)
+#define HWIO_REG_233366_INM(m) \
+	in_dword_masked(HWIO_REG_233366_ADDR, m)
+#define HWIO_REG_233366_OUT(v) \
+	out_dword(HWIO_REG_233366_ADDR, v)
+#define HWIO_REG_233366_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_233366_ADDR, m, v,\
+	HWIO_REG_233366_IN);
+#define HWIO_REG_233366_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_233366_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_366750_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000648)
+#define HWIO_REG_366750_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000648)
+#define HWIO_REG_366750_RMSK  0x1ffff
+#define HWIO_REG_366750_SHFT  0
+#define HWIO_REG_366750_IN  in_dword_masked(\
+	HWIO_REG_366750_ADDR, HWIO_REG_366750_RMSK)
+#define HWIO_REG_366750_INM(m) \
+	in_dword_masked(HWIO_REG_366750_ADDR, m)
+#define HWIO_REG_366750_OUT(v) \
+	out_dword(HWIO_REG_366750_ADDR, v)
+#define HWIO_REG_366750_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_366750_ADDR, m, v,\
+	HWIO_REG_366750_IN);
+#define HWIO_REG_366750_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_366750_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_616292_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000064c)
+#define HWIO_REG_616292_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000064c)
+#define HWIO_REG_616292_RMSK  0x1ffff
+#define HWIO_REG_616292_SHFT  0
+#define HWIO_REG_616292_IN  in_dword_masked(\
+	HWIO_REG_616292_ADDR, HWIO_REG_616292_RMSK)
+#define HWIO_REG_616292_INM(m) \
+	in_dword_masked(HWIO_REG_616292_ADDR, m)
+#define HWIO_REG_616292_OUT(v) \
+	out_dword(HWIO_REG_616292_ADDR, v)
+#define HWIO_REG_616292_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_616292_ADDR, m, v,\
+	HWIO_REG_616292_IN);
+#define HWIO_REG_616292_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_616292_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_666754_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000650)
+#define HWIO_REG_666754_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000650)
+#define HWIO_REG_666754_RMSK  0x1ffff
+#define HWIO_REG_666754_SHFT  0
+#define HWIO_REG_666754_IN  in_dword_masked(\
+	HWIO_REG_666754_ADDR, HWIO_REG_666754_RMSK)
+#define HWIO_REG_666754_INM(m) \
+	in_dword_masked(HWIO_REG_666754_ADDR, m)
+#define HWIO_REG_666754_OUT(v) \
+	out_dword(HWIO_REG_666754_ADDR, v)
+#define HWIO_REG_666754_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_666754_ADDR, m, v,\
+	HWIO_REG_666754_IN);
+#define HWIO_REG_666754_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_666754_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_650155_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000654)
+#define HWIO_REG_650155_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000654)
+#define HWIO_REG_650155_RMSK  0x1ffff
+#define HWIO_REG_650155_SHFT  0
+#define HWIO_REG_650155_IN  in_dword_masked(\
+	HWIO_REG_650155_ADDR, HWIO_REG_650155_RMSK)
+#define HWIO_REG_650155_INM(m) \
+	in_dword_masked(HWIO_REG_650155_ADDR, m)
+#define HWIO_REG_650155_OUT(v) \
+	out_dword(HWIO_REG_650155_ADDR, v)
+#define HWIO_REG_650155_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_650155_ADDR, m, v,\
+	HWIO_REG_650155_IN);
+#define HWIO_REG_650155_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_650155_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_248198_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000658)
+#define HWIO_REG_248198_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000658)
+#define HWIO_REG_248198_RMSK  0x1ffff
+#define HWIO_REG_248198_SHFT  0
+#define HWIO_REG_248198_IN  in_dword_masked(\
+	HWIO_REG_248198_ADDR, HWIO_REG_248198_RMSK)
+#define HWIO_REG_248198_INM(m) \
+	in_dword_masked(HWIO_REG_248198_ADDR, m)
+#define HWIO_REG_248198_OUT(v) \
+	out_dword(HWIO_REG_248198_ADDR, v)
+#define HWIO_REG_248198_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_248198_ADDR, m, v,\
+	HWIO_REG_248198_IN);
+#define HWIO_REG_248198_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_248198_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_389428_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000065c)
+#define HWIO_REG_389428_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000065c)
+#define HWIO_REG_389428_RMSK  0x1ffff
+#define HWIO_REG_389428_SHFT  0
+#define HWIO_REG_389428_IN  in_dword_masked(\
+	HWIO_REG_389428_ADDR, HWIO_REG_389428_RMSK)
+#define HWIO_REG_389428_INM(m) \
+	in_dword_masked(HWIO_REG_389428_ADDR, m)
+#define HWIO_REG_389428_OUT(v) \
+	out_dword(HWIO_REG_389428_ADDR, v)
+#define HWIO_REG_389428_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_389428_ADDR, m, v,\
+	HWIO_REG_389428_IN);
+#define HWIO_REG_389428_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_389428_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_504308_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000660)
+#define HWIO_REG_504308_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000660)
+#define HWIO_REG_504308_RMSK  0x1ffff
+#define HWIO_REG_504308_SHFT  0
+#define HWIO_REG_504308_IN  in_dword_masked(\
+	HWIO_REG_504308_ADDR, HWIO_REG_504308_RMSK)
+#define HWIO_REG_504308_INM(m) \
+	in_dword_masked(HWIO_REG_504308_ADDR, m)
+#define HWIO_REG_504308_OUT(v) \
+	out_dword(HWIO_REG_504308_ADDR, v)
+#define HWIO_REG_504308_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_504308_ADDR, m, v,\
+	HWIO_REG_504308_IN);
+#define HWIO_REG_504308_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_504308_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_280814_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000664)
+#define HWIO_REG_280814_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000664)
+#define HWIO_REG_280814_RMSK  0x1ffff
+#define HWIO_REG_280814_SHFT  0
+#define HWIO_REG_280814_IN  in_dword_masked(\
+	HWIO_REG_280814_ADDR, HWIO_REG_280814_RMSK)
+#define HWIO_REG_280814_INM(m) \
+	in_dword_masked(HWIO_REG_280814_ADDR, m)
+#define HWIO_REG_280814_OUT(v) \
+	out_dword(HWIO_REG_280814_ADDR, v)
+#define HWIO_REG_280814_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_280814_ADDR, m, v,\
+	HWIO_REG_280814_IN);
+#define HWIO_REG_280814_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_280814_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_785484_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000668)
+#define HWIO_REG_785484_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000668)
+#define HWIO_REG_785484_RMSK  0x1ffff
+#define HWIO_REG_785484_SHFT  0
+#define HWIO_REG_785484_IN  in_dword_masked(\
+	HWIO_REG_785484_ADDR, HWIO_REG_785484_RMSK)
+#define HWIO_REG_785484_INM(m) \
+	in_dword_masked(HWIO_REG_785484_ADDR, m)
+#define HWIO_REG_785484_OUT(v) \
+		out_dword(HWIO_REG_785484_ADDR, v)
+#define HWIO_REG_785484_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_785484_ADDR, m, v,\
+	HWIO_REG_785484_IN);
+#define HWIO_REG_785484_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_785484_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_218455_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000066c)
+#define HWIO_REG_218455_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000066c)
+#define HWIO_REG_218455_RMSK  0x1ffff
+#define HWIO_REG_218455_SHFT  0
+#define HWIO_REG_218455_IN  in_dword_masked(\
+	HWIO_REG_218455_ADDR, HWIO_REG_218455_RMSK)
+#define HWIO_REG_218455_INM(m) \
+	in_dword_masked(HWIO_REG_218455_ADDR, m)
+#define HWIO_REG_218455_OUT(v) \
+	out_dword(HWIO_REG_218455_ADDR, v)
+#define HWIO_REG_218455_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_218455_ADDR, m, v,\
+	HWIO_REG_218455_IN);
+#define HWIO_REG_218455_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_218455_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_886591_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000670)
+#define HWIO_REG_886591_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000670)
+#define HWIO_REG_886591_RMSK  0x1ffff
+#define HWIO_REG_886591_SHFT  0
+#define HWIO_REG_886591_IN  in_dword_masked(\
+	HWIO_REG_886591_ADDR, HWIO_REG_886591_RMSK)
+#define HWIO_REG_886591_INM(m) \
+	in_dword_masked(HWIO_REG_886591_ADDR, m)
+#define HWIO_REG_886591_OUT(v) \
+	out_dword(HWIO_REG_886591_ADDR, v)
+#define HWIO_REG_886591_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_886591_ADDR, m, v,\
+	HWIO_REG_886591_IN);
+#define HWIO_REG_886591_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_886591_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_912449_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000674)
+#define HWIO_REG_912449_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000674)
+#define HWIO_REG_912449_RMSK  0x1ffff
+#define HWIO_REG_912449_SHFT  0
+#define HWIO_REG_912449_IN  in_dword_masked(\
+	HWIO_REG_912449_ADDR, HWIO_REG_912449_RMSK)
+#define HWIO_REG_912449_INM(m) \
+	in_dword_masked(HWIO_REG_912449_ADDR, m)
+#define HWIO_REG_912449_OUT(v) \
+	out_dword(HWIO_REG_912449_ADDR, v)
+#define HWIO_REG_912449_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_912449_ADDR, m, v,\
+	HWIO_REG_912449_IN);
+#define HWIO_REG_912449_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_912449_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_1065_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000678)
+#define HWIO_REG_1065_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000678)
+#define HWIO_REG_1065_RMSK  0x1ffff
+#define HWIO_REG_1065_SHFT  0
+#define HWIO_REG_1065_IN  in_dword_masked(\
+	HWIO_REG_1065_ADDR, HWIO_REG_1065_RMSK)
+#define HWIO_REG_1065_INM(m) \
+	in_dword_masked(HWIO_REG_1065_ADDR, m)
+#define HWIO_REG_1065_OUT(v) \
+	out_dword(HWIO_REG_1065_ADDR, v)
+#define HWIO_REG_1065_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_1065_ADDR, m, v,\
+	HWIO_REG_1065_IN);
+#define HWIO_REG_1065_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_1065_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_61838_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000067c)
+#define HWIO_REG_61838_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000067c)
+#define HWIO_REG_61838_RMSK  0x1ffff
+#define HWIO_REG_61838_SHFT  0
+#define HWIO_REG_61838_IN  in_dword_masked(\
+	HWIO_REG_61838_ADDR, HWIO_REG_61838_RMSK)
+#define HWIO_REG_61838_INM(m) \
+	in_dword_masked(HWIO_REG_61838_ADDR, m)
+#define HWIO_REG_61838_OUT(v) \
+	out_dword(HWIO_REG_61838_ADDR, v)
+#define HWIO_REG_61838_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_61838_ADDR, m, v,\
+	HWIO_REG_61838_IN);
+#define HWIO_REG_61838_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_61838_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_169838_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000680)
+#define HWIO_REG_169838_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000680)
+#define HWIO_REG_169838_RMSK  0x1ffff
+#define HWIO_REG_169838_SHFT  0
+#define HWIO_REG_169838_IN  in_dword_masked(\
+	HWIO_REG_169838_ADDR, HWIO_REG_169838_RMSK)
+#define HWIO_REG_169838_INM(m) \
+	in_dword_masked(HWIO_REG_169838_ADDR, m)
+#define HWIO_REG_169838_OUT(v) \
+	out_dword(HWIO_REG_169838_ADDR, v)
+#define HWIO_REG_169838_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_169838_ADDR, m, v,\
+	HWIO_REG_169838_IN);
+#define HWIO_REG_169838_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_169838_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_986147_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000684)
+#define HWIO_REG_986147_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000684)
+#define HWIO_REG_986147_RMSK  0x1ffff
+#define HWIO_REG_986147_SHFT  0
+#define HWIO_REG_986147_IN  in_dword_masked(\
+	HWIO_REG_986147_ADDR, HWIO_REG_986147_RMSK)
+#define HWIO_REG_986147_INM(m) \
+	in_dword_masked(HWIO_REG_986147_ADDR, m)
+#define HWIO_REG_986147_OUT(v)  \
+	out_dword(HWIO_REG_986147_ADDR, v)
+#define HWIO_REG_986147_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_986147_ADDR, m, v,\
+	HWIO_REG_986147_IN);
+#define HWIO_REG_986147_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_986147_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_678637_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000688)
+#define HWIO_REG_678637_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000688)
+#define HWIO_REG_678637_RMSK  0x1ffff
+#define HWIO_REG_678637_SHFT  0
+#define HWIO_REG_678637_IN  in_dword_masked(\
+	HWIO_REG_678637_ADDR, HWIO_REG_678637_RMSK)
+#define HWIO_REG_678637_INM(m) \
+	in_dword_masked(HWIO_REG_678637_ADDR, m)
+#define HWIO_REG_678637_OUT(v) \
+	out_dword(HWIO_REG_678637_ADDR, v)
+#define HWIO_REG_678637_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_678637_ADDR, m, v,\
+	HWIO_REG_678637_IN);
+#define HWIO_REG_678637_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_678637_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_931311_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000068c)
+#define HWIO_REG_931311_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000068c)
+#define HWIO_REG_931311_RMSK  0x1ffff
+#define HWIO_REG_931311_SHFT  0
+#define HWIO_REG_931311_IN  in_dword_masked(\
+	HWIO_REG_931311_ADDR, HWIO_REG_931311_RMSK)
+#define HWIO_REG_931311_INM(m) \
+	in_dword_masked(HWIO_REG_931311_ADDR, m)
+#define HWIO_REG_931311_OUT(v) \
+	out_dword(HWIO_REG_931311_ADDR, v)
+#define HWIO_REG_931311_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_931311_ADDR, m, v,\
+	HWIO_REG_931311_IN);
+#define HWIO_REG_931311_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_931311_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_16277_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000690)
+#define HWIO_REG_16277_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000690)
+#define HWIO_REG_16277_RMSK  0x1ffff
+#define HWIO_REG_16277_SHFT  0
+#define HWIO_REG_16277_IN  in_dword_masked(\
+	HWIO_REG_16277_ADDR, HWIO_REG_16277_RMSK)
+#define HWIO_REG_16277_INM(m) \
+	in_dword_masked(HWIO_REG_16277_ADDR, m)
+#define HWIO_REG_16277_OUT(v) \
+	out_dword(HWIO_REG_16277_ADDR, v)
+#define HWIO_REG_16277_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_16277_ADDR, m, v,\
+	HWIO_REG_16277_IN);
+#define HWIO_REG_16277_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_16277_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_654169_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE  + 0x00000694)
+#define HWIO_REG_654169_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000694)
+#define HWIO_REG_654169_RMSK  0x1ffff
+#define HWIO_REG_654169_SHFT  0
+#define HWIO_REG_654169_IN  in_dword_masked(\
+	HWIO_REG_654169_ADDR, HWIO_REG_654169_RMSK)
+#define HWIO_REG_654169_INM(m) \
+	in_dword_masked(HWIO_REG_654169_ADDR, m)
+#define HWIO_REG_654169_OUT(v) \
+	out_dword(HWIO_REG_654169_ADDR, v)
+#define HWIO_REG_654169_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_654169_ADDR, m, v,\
+	HWIO_REG_654169_IN);
+#define HWIO_REG_654169_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_654169_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_802794_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000698)
+#define HWIO_REG_802794_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000698)
+#define HWIO_REG_802794_RMSK  0x1ffff
+#define HWIO_REG_802794_SHFT  0
+#define HWIO_REG_802794_IN  in_dword_masked(\
+	HWIO_REG_802794_ADDR, HWIO_REG_802794_RMSK)
+#define HWIO_REG_802794_INM(m) \
+	in_dword_masked(HWIO_REG_802794_ADDR, m)
+#define HWIO_REG_802794_OUT(v) \
+	out_dword(HWIO_REG_802794_ADDR, v)
+#define HWIO_REG_802794_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_802794_ADDR, m, v,\
+	HWIO_REG_802794_IN);
+#define HWIO_REG_802794_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_802794_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_724376_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000069c)
+#define HWIO_REG_724376_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000069c)
+#define HWIO_REG_724376_RMSK  0x1ffff
+#define HWIO_REG_724376_SHFT  0
+#define HWIO_REG_724376_IN  in_dword_masked(\
+	HWIO_REG_724376_ADDR, HWIO_REG_724376_RMSK)
+#define HWIO_REG_724376_INM(m) \
+	in_dword_masked(HWIO_REG_724376_ADDR, m)
+#define HWIO_REG_724376_OUT(v) \
+	out_dword(HWIO_REG_724376_ADDR, v)
+#define HWIO_REG_724376_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_724376_ADDR, m, v,\
+	HWIO_REG_724376_IN);
+#define HWIO_REG_724376_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_724376_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_551674_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006a0)
+#define HWIO_REG_551674_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006a0)
+#define HWIO_REG_551674_RMSK  0x1ffff
+#define HWIO_REG_551674_SHFT  0
+#define HWIO_REG_551674_IN  in_dword_masked(\
+	HWIO_REG_551674_ADDR, HWIO_REG_551674_RMSK)
+#define HWIO_REG_551674_INM(m) \
+	in_dword_masked(HWIO_REG_551674_ADDR, m)
+#define HWIO_REG_551674_OUT(v) \
+	out_dword(HWIO_REG_551674_ADDR, v)
+#define HWIO_REG_551674_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_551674_ADDR, m, v,\
+	HWIO_REG_551674_IN);
+#define HWIO_REG_551674_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_551674_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_115991_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006a4)
+#define HWIO_REG_115991_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006a4)
+#define HWIO_REG_115991_RMSK  0x1ffff
+#define HWIO_REG_115991_SHFT  0
+#define HWIO_REG_115991_IN  in_dword_masked(\
+	HWIO_REG_115991_ADDR, HWIO_REG_115991_RMSK)
+#define HWIO_REG_115991_INM(m) \
+	in_dword_masked(HWIO_REG_115991_ADDR, m)
+#define HWIO_REG_115991_OUT(v) \
+	out_dword(HWIO_REG_115991_ADDR, v)
+#define HWIO_REG_115991_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_115991_ADDR, m, v,\
+	HWIO_REG_115991_IN);
+#define HWIO_REG_115991_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_115991_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_252167_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006a8)
+#define HWIO_REG_252167_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006a8)
+#define HWIO_REG_252167_RMSK  0x1ffff
+#define HWIO_REG_252167_SHFT  0
+#define HWIO_REG_252167_IN  in_dword_masked(\
+	HWIO_REG_252167_ADDR, HWIO_REG_252167_RMSK)
+#define HWIO_REG_252167_INM(m) \
+	in_dword_masked(HWIO_REG_252167_ADDR, m)
+#define HWIO_REG_252167_OUT(v) \
+	out_dword(HWIO_REG_252167_ADDR, v)
+#define HWIO_REG_252167_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_252167_ADDR, m, v,\
+	HWIO_REG_252167_IN);
+#define HWIO_REG_252167_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_252167_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_695516_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006ac)
+#define HWIO_REG_695516_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006ac)
+#define HWIO_REG_695516_RMSK  0x1ffff
+#define HWIO_REG_695516_SHFT  0
+#define HWIO_REG_695516_IN  in_dword_masked(\
+	HWIO_REG_695516_ADDR, HWIO_REG_695516_RMSK)
+#define HWIO_REG_695516_INM(m) \
+	in_dword_masked(HWIO_REG_695516_ADDR, m)
+#define HWIO_REG_695516_OUT(v) \
+	out_dword(HWIO_REG_695516_ADDR, v)
+#define HWIO_REG_695516_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_695516_ADDR, m, v,\
+	HWIO_REG_695516_IN);
+#define HWIO_REG_695516_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_695516_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_152193_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006b0)
+#define HWIO_REG_152193_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006b0)
+#define HWIO_REG_152193_RMSK  0x1ffff
+#define HWIO_REG_152193_SHFT  0
+#define HWIO_REG_152193_IN  in_dword_masked(\
+	HWIO_REG_152193_ADDR, HWIO_REG_152193_RMSK)
+#define HWIO_REG_152193_INM(m) \
+	in_dword_masked(HWIO_REG_152193_ADDR, m)
+#define HWIO_REG_152193_OUT(v) \
+	out_dword(HWIO_REG_152193_ADDR, v)
+#define HWIO_REG_152193_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_152193_ADDR, m, v,\
+	HWIO_REG_152193_IN);
+#define HWIO_REG_152193_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_152193_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_358705_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006b4)
+#define HWIO_REG_358705_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006b4)
+#define HWIO_REG_358705_RMSK  0x1ffff
+#define HWIO_REG_358705_SHFT  0
+#define HWIO_REG_358705_IN  in_dword_masked(\
+	HWIO_REG_358705_ADDR, HWIO_REG_358705_RMSK)
+#define HWIO_REG_358705_INM(m) \
+	in_dword_masked(HWIO_REG_358705_ADDR, m)
+#define HWIO_REG_358705_OUT(v) \
+	out_dword(HWIO_REG_358705_ADDR, v)
+#define HWIO_REG_358705_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_358705_ADDR, m, v,\
+	HWIO_REG_358705_IN);
+#define HWIO_REG_358705_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_358705_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_457068_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006b8)
+#define HWIO_REG_457068_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006b8)
+#define HWIO_REG_457068_RMSK  0x1ffff
+#define HWIO_REG_457068_SHFT  0
+#define HWIO_REG_457068_IN  in_dword_masked(\
+	HWIO_REG_457068_ADDR, HWIO_REG_457068_RMSK)
+#define HWIO_REG_457068_INM(m) \
+	in_dword_masked(HWIO_REG_457068_ADDR, m)
+#define HWIO_REG_457068_OUT(v) \
+	out_dword(HWIO_REG_457068_ADDR, v)
+#define HWIO_REG_457068_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_457068_ADDR, m, v,\
+	HWIO_REG_457068_IN);
+#define HWIO_REG_457068_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_457068_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_485412_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006bc)
+#define HWIO_REG_485412_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006bc)
+#define HWIO_REG_485412_RMSK  0x1ffff
+#define HWIO_REG_485412_SHFT  0
+#define HWIO_REG_485412_IN  in_dword_masked(\
+	HWIO_REG_485412_ADDR, HWIO_REG_485412_RMSK)
+#define HWIO_REG_485412_INM(m) \
+	in_dword_masked(HWIO_REG_485412_ADDR, m)
+#define HWIO_REG_485412_OUT(v) \
+	out_dword(HWIO_REG_485412_ADDR, v)
+#define HWIO_REG_485412_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_485412_ADDR, m, v,\
+	HWIO_REG_485412_IN);
+#define HWIO_REG_485412_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_485412_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_223131_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006c0)
+#define HWIO_REG_223131_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006c0)
+#define HWIO_REG_223131_RMSK  0x1ffff
+#define HWIO_REG_223131_SHFT  0
+#define HWIO_REG_223131_IN  in_dword_masked(\
+	HWIO_REG_223131_ADDR, HWIO_REG_223131_RMSK)
+#define HWIO_REG_223131_INM(m) \
+		in_dword_masked(HWIO_REG_223131_ADDR, m)
+#define HWIO_REG_223131_OUT(v) \
+		out_dword(HWIO_REG_223131_ADDR, v)
+#define HWIO_REG_223131_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_223131_ADDR, m, v,\
+	HWIO_REG_223131_IN);
+#define HWIO_REG_223131_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_223131_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_683737_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006c4)
+#define HWIO_REG_683737_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006c4)
+#define HWIO_REG_683737_RMSK  0x1ffff
+#define HWIO_REG_683737_SHFT  0
+#define HWIO_REG_683737_IN  in_dword_masked(\
+	HWIO_REG_683737_ADDR, HWIO_REG_683737_RMSK)
+#define HWIO_REG_683737_INM(m) \
+	in_dword_masked(HWIO_REG_683737_ADDR, m)
+#define HWIO_REG_683737_OUT(v) \
+	out_dword(HWIO_REG_683737_ADDR, v)
+#define HWIO_REG_683737_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_683737_ADDR, m, v,\
+	HWIO_REG_683737_IN);
+#define HWIO_REG_683737_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_683737_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_750474_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006c8)
+#define HWIO_REG_750474_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006c8)
+#define HWIO_REG_750474_RMSK  0x1ffff
+#define HWIO_REG_750474_SHFT  0
+#define HWIO_REG_750474_IN  in_dword_masked(\
+	HWIO_REG_750474_ADDR, HWIO_REG_750474_RMSK)
+#define HWIO_REG_750474_INM(m) \
+	in_dword_masked(HWIO_REG_750474_ADDR, m)
+#define HWIO_REG_750474_OUT(v) \
+	out_dword(HWIO_REG_750474_ADDR, v)
+#define HWIO_REG_750474_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_750474_ADDR, m, v,\
+	HWIO_REG_750474_IN);
+#define HWIO_REG_750474_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_750474_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_170086_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006cc)
+#define HWIO_REG_170086_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006cc)
+#define HWIO_REG_170086_RMSK  0x1ffff
+#define HWIO_REG_170086_SHFT  0
+#define HWIO_REG_170086_IN  in_dword_masked(\
+	HWIO_REG_170086_ADDR, HWIO_REG_170086_RMSK)
+#define HWIO_REG_170086_INM(m) \
+	in_dword_masked(HWIO_REG_170086_ADDR, m)
+#define HWIO_REG_170086_OUT(v) \
+	out_dword(HWIO_REG_170086_ADDR, v)
+#define HWIO_REG_170086_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_170086_ADDR, m, v,\
+	HWIO_REG_170086_IN);
+#define HWIO_REG_170086_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_170086_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_838595_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006d0)
+#define HWIO_REG_838595_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006d0)
+#define HWIO_REG_838595_RMSK  0x1ffff
+#define HWIO_REG_838595_SHFT  0
+#define HWIO_REG_838595_IN  in_dword_masked(\
+	HWIO_REG_838595_ADDR, HWIO_REG_838595_RMSK)
+#define HWIO_REG_838595_INM(m)  \
+	in_dword_masked(HWIO_REG_838595_ADDR, m)
+#define HWIO_REG_838595_OUT(v)  \
+	out_dword(HWIO_REG_838595_ADDR, v)
+#define HWIO_REG_838595_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_838595_ADDR, m, v,\
+	HWIO_REG_838595_IN);
+#define HWIO_REG_838595_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_838595_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_569788_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006d4)
+#define HWIO_REG_569788_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006d4)
+#define HWIO_REG_569788_RMSK  0x1ffff
+#define HWIO_REG_569788_SHFT  0
+#define HWIO_REG_569788_IN  in_dword_masked(\
+	HWIO_REG_569788_ADDR, HWIO_REG_569788_RMSK)
+#define HWIO_REG_569788_INM(m) \
+	in_dword_masked(HWIO_REG_569788_ADDR, m)
+#define HWIO_REG_569788_OUT(v) \
+	out_dword(HWIO_REG_569788_ADDR, v)
+#define HWIO_REG_569788_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_569788_ADDR, m, v,\
+	HWIO_REG_569788_IN);
+#define HWIO_REG_569788_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_569788_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_974527_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006d8)
+#define HWIO_REG_974527_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006d8)
+#define HWIO_REG_974527_RMSK  0x1ffff
+#define HWIO_REG_974527_SHFT  0
+#define HWIO_REG_974527_IN  in_dword_masked(\
+	HWIO_REG_974527_ADDR, HWIO_REG_974527_RMSK)
+#define HWIO_REG_974527_INM(m) \
+	in_dword_masked(HWIO_REG_974527_ADDR, m)
+#define HWIO_REG_974527_OUT(v) \
+	out_dword(HWIO_REG_974527_ADDR, v)
+#define HWIO_REG_974527_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_974527_ADDR, m, v,\
+	HWIO_REG_974527_IN);
+#define HWIO_REG_974527_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_974527_BASE_ADDR_SHFT   0
+
+#define HWIO_REG_316806_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006dc)
+#define HWIO_REG_316806_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006dc)
+#define HWIO_REG_316806_RMSK  0x1ffff
+#define HWIO_REG_316806_SHFT  0
+#define HWIO_REG_316806_IN  in_dword_masked(\
+	HWIO_REG_316806_ADDR, HWIO_REG_316806_RMSK)
+#define HWIO_REG_316806_INM(m) \
+	in_dword_masked(HWIO_REG_316806_ADDR, m)
+#define HWIO_REG_316806_OUT(v) \
+	out_dword(HWIO_REG_316806_ADDR, v)
+#define HWIO_REG_316806_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_316806_ADDR, m, v,\
+	HWIO_REG_316806_IN);
+#define HWIO_REG_316806_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_316806_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_900472_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006e0)
+#define HWIO_REG_900472_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006e0)
+#define HWIO_REG_900472_RMSK  0x1ffff
+#define HWIO_REG_900472_SHFT  0
+#define HWIO_REG_900472_IN  in_dword_masked(\
+	HWIO_REG_900472_ADDR, HWIO_REG_900472_RMSK)
+#define HWIO_REG_900472_INM(m) \
+	in_dword_masked(HWIO_REG_900472_ADDR, m)
+#define HWIO_REG_900472_OUT(v) \
+	out_dword(HWIO_REG_900472_ADDR, v)
+#define HWIO_REG_900472_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_900472_ADDR, m, v,\
+	HWIO_REG_900472_IN);
+#define HWIO_REG_900472_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_900472_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_256156_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006e4)
+#define HWIO_REG_256156_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006e4)
+#define HWIO_REG_256156_RMSK  0x1ffff
+#define HWIO_REG_256156_SHFT  0
+#define HWIO_REG_256156_IN  in_dword_masked(\
+	HWIO_REG_256156_ADDR, HWIO_REG_256156_RMSK)
+#define HWIO_REG_256156_INM(m) \
+	in_dword_masked(HWIO_REG_256156_ADDR, m)
+#define HWIO_REG_256156_OUT(v) \
+	out_dword(HWIO_REG_256156_ADDR, v)
+#define HWIO_REG_256156_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_256156_ADDR, m, v,\
+	HWIO_REG_256156_IN);
+#define HWIO_REG_256156_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_256156_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_335729_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006e8)
+#define HWIO_REG_335729_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006e8)
+#define HWIO_REG_335729_RMSK  0x1ffff
+#define HWIO_REG_335729_SHFT  0
+#define HWIO_REG_335729_IN  in_dword_masked(\
+	HWIO_REG_335729_ADDR, HWIO_REG_335729_RMSK)
+#define HWIO_REG_335729_INM(m) \
+	in_dword_masked(HWIO_REG_335729_ADDR, m)
+#define HWIO_REG_335729_OUT(v) \
+	out_dword(HWIO_REG_335729_ADDR, v)
+#define HWIO_REG_335729_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_335729_ADDR, m, v,\
+	HWIO_REG_335729_IN);
+#define HWIO_REG_335729_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_335729_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_303383_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006ec)
+#define HWIO_REG_303383_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006ec)
+#define HWIO_REG_303383_RMSK  0x1ffff
+#define HWIO_REG_303383_SHFT  0
+#define HWIO_REG_303383_IN  in_dword_masked(\
+	HWIO_REG_303383_ADDR, HWIO_REG_303383_RMSK)
+#define HWIO_REG_303383_INM(m) \
+	in_dword_masked(HWIO_REG_303383_ADDR, m)
+#define HWIO_REG_303383_OUT(v) \
+	out_dword(HWIO_REG_303383_ADDR, v)
+#define HWIO_REG_303383_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_303383_ADDR, m, v,\
+	HWIO_REG_303383_IN);
+#define HWIO_REG_303383_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_303383_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_180871_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006f0)
+#define HWIO_REG_180871_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006f0)
+#define HWIO_REG_180871_RMSK  0x1ffff
+#define HWIO_REG_180871_SHFT  0
+#define HWIO_REG_180871_IN  in_dword_masked(\
+	HWIO_REG_180871_ADDR, HWIO_REG_180871_RMSK)
+#define HWIO_REG_180871_INM(m) \
+	in_dword_masked(HWIO_REG_180871_ADDR, m)
+#define HWIO_REG_180871_OUT(v) \
+	out_dword(HWIO_REG_180871_ADDR, v)
+#define HWIO_REG_180871_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_180871_ADDR, m, v,\
+	HWIO_REG_180871_IN);
+#define HWIO_REG_180871_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_180871_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_514148_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006f4)
+#define HWIO_REG_514148_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006f4)
+#define HWIO_REG_514148_RMSK  0x1ffff
+#define HWIO_REG_514148_SHFT  0
+#define HWIO_REG_514148_IN  in_dword_masked(\
+	HWIO_REG_514148_ADDR, HWIO_REG_514148_RMSK)
+#define HWIO_REG_514148_INM(m) \
+	in_dword_masked(HWIO_REG_514148_ADDR, m)
+#define HWIO_REG_514148_OUT(v) \
+	out_dword(HWIO_REG_514148_ADDR, v)
+#define HWIO_REG_514148_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_514148_ADDR, m, v,\
+	HWIO_REG_514148_IN);
+#define HWIO_REG_514148_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_514148_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_578636_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006f8)
+#define HWIO_REG_578636_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006f8)
+#define HWIO_REG_578636_RMSK  0x1ffff
+#define HWIO_REG_578636_SHFT  0
+#define HWIO_REG_578636_IN  in_dword_masked(\
+	HWIO_REG_578636_ADDR, HWIO_REG_578636_RMSK)
+#define HWIO_REG_578636_INM(m) \
+	in_dword_masked(HWIO_REG_578636_ADDR, m)
+#define HWIO_REG_578636_OUT(v) \
+	out_dword(HWIO_REG_578636_ADDR, v)
+#define HWIO_REG_578636_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_578636_ADDR, m, v,\
+	HWIO_REG_578636_IN);
+#define HWIO_REG_578636_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_578636_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_888116_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000006fc)
+#define HWIO_REG_888116_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000006fc)
+#define HWIO_REG_888116_RMSK  0x1ffff
+#define HWIO_REG_888116_SHFT  0
+#define HWIO_REG_888116_IN  in_dword_masked(\
+	HWIO_REG_888116_ADDR, HWIO_REG_888116_RMSK)
+#define HWIO_REG_888116_INM(m) \
+	in_dword_masked(HWIO_REG_888116_ADDR, m)
+#define HWIO_REG_888116_OUT(v) \
+	out_dword(HWIO_REG_888116_ADDR, v)
+#define HWIO_REG_888116_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_888116_ADDR, m, v,\
+	HWIO_REG_888116_IN);
+#define HWIO_REG_888116_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_888116_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_759068_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000700)
+#define HWIO_REG_759068_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000700)
+#define HWIO_REG_759068_RMSK  0x1ffff
+#define HWIO_REG_759068_SHFT  0
+#define HWIO_REG_759068_IN  in_dword_masked(\
+	HWIO_REG_759068_ADDR, HWIO_REG_759068_RMSK)
+#define HWIO_REG_759068_INM(m) \
+	in_dword_masked(HWIO_REG_759068_ADDR, m)
+#define HWIO_REG_759068_OUT(v) \
+	out_dword(HWIO_REG_759068_ADDR, v)
+#define HWIO_REG_759068_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_759068_ADDR, m, v,\
+	HWIO_REG_759068_IN);
+#define HWIO_REG_759068_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_759068_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_68356_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000704)
+#define HWIO_REG_68356_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000704)
+#define HWIO_REG_68356_RMSK  0x1ffff
+#define HWIO_REG_68356_SHFT  0
+#define HWIO_REG_68356_IN   in_dword_masked(\
+	HWIO_REG_68356_ADDR, HWIO_REG_68356_RMSK)
+#define HWIO_REG_68356_INM(m) \
+	in_dword_masked(HWIO_REG_68356_ADDR, m)
+#define HWIO_REG_68356_OUT(v) \
+	out_dword(HWIO_REG_68356_ADDR, v)
+#define HWIO_REG_68356_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_68356_ADDR, m, v,\
+	HWIO_REG_68356_IN);
+#define HWIO_REG_68356_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_68356_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_833502_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000708)
+#define HWIO_REG_833502_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000708)
+#define HWIO_REG_833502_RMSK  0x1ffff
+#define HWIO_REG_833502_SHFT  0
+#define HWIO_REG_833502_IN  in_dword_masked(\
+	HWIO_REG_833502_ADDR, HWIO_REG_833502_RMSK)
+#define HWIO_REG_833502_INM(m) \
+	in_dword_masked(HWIO_REG_833502_ADDR, m)
+#define HWIO_REG_833502_OUT(v) \
+	out_dword(HWIO_REG_833502_ADDR, v)
+#define HWIO_REG_833502_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_833502_ADDR, m, v,\
+	HWIO_REG_833502_IN);
+#define HWIO_REG_833502_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_833502_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_127855_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000070c)
+#define HWIO_REG_127855_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000070c)
+#define HWIO_REG_127855_RMSK  0x1ffff
+#define HWIO_REG_127855_SHFT  0
+#define HWIO_REG_127855_IN  in_dword_masked(\
+	HWIO_REG_127855_ADDR, HWIO_REG_127855_RMSK)
+#define HWIO_REG_127855_INM(m) \
+	in_dword_masked(HWIO_REG_127855_ADDR, m)
+#define HWIO_REG_127855_OUT(v) \
+	out_dword(HWIO_REG_127855_ADDR, v)
+#define HWIO_REG_127855_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_127855_ADDR, m, v,\
+	HWIO_REG_127855_IN);
+#define HWIO_REG_127855_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_127855_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_616802_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000710)
+#define HWIO_REG_616802_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000710)
+#define HWIO_REG_616802_RMSK  0x1ffff
+#define HWIO_REG_616802_SHFT  0
+#define HWIO_REG_616802_IN  in_dword_masked(\
+	HWIO_REG_616802_ADDR, HWIO_REG_616802_RMSK)
+#define HWIO_REG_616802_INM(m) \
+	in_dword_masked(HWIO_REG_616802_ADDR, m)
+#define HWIO_REG_616802_OUT(v) \
+	out_dword(HWIO_REG_616802_ADDR, v)
+#define HWIO_REG_616802_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_616802_ADDR, m, v,\
+	HWIO_REG_616802_IN);
+#define HWIO_REG_616802_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_616802_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_23318_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000714)
+#define HWIO_REG_23318_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000714)
+#define HWIO_REG_23318_RMSK  0x1ffff
+#define HWIO_REG_23318_SHFT  0
+#define HWIO_REG_23318_IN  in_dword_masked(\
+	HWIO_REG_23318_ADDR, HWIO_REG_23318_RMSK)
+#define HWIO_REG_23318_INM(m) \
+	in_dword_masked(HWIO_REG_23318_ADDR, m)
+#define HWIO_REG_23318_OUT(v) \
+	out_dword(HWIO_REG_23318_ADDR, v)
+#define HWIO_REG_23318_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_23318_ADDR, m, v,\
+	HWIO_REG_23318_IN);
+#define HWIO_REG_23318_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_23318_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_317106_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000718)
+#define HWIO_REG_317106_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000718)
+#define HWIO_REG_317106_RMSK  0x1ffff
+#define HWIO_REG_317106_SHFT  0
+#define HWIO_REG_317106_IN  in_dword_masked(\
+	HWIO_REG_317106_ADDR, HWIO_REG_317106_RMSK)
+#define HWIO_REG_317106_INM(m) \
+	in_dword_masked(HWIO_REG_317106_ADDR, m)
+#define HWIO_REG_317106_OUT(v) \
+	out_dword(HWIO_REG_317106_ADDR, v)
+#define HWIO_REG_317106_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_317106_ADDR, m, v,\
+	HWIO_REG_317106_IN);
+#define HWIO_REG_317106_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_317106_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_603772_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000071c)
+#define HWIO_REG_603772_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000071c)
+#define HWIO_REG_603772_RMSK  0x1ffff
+#define HWIO_REG_603772_SHFT  0
+#define HWIO_REG_603772_IN  in_dword_masked(\
+	HWIO_REG_603772_ADDR, HWIO_REG_603772_RMSK)
+#define HWIO_REG_603772_INM(m) \
+	in_dword_masked(HWIO_REG_603772_ADDR, m)
+#define HWIO_REG_603772_OUT(v) \
+	out_dword(HWIO_REG_603772_ADDR, v)
+#define HWIO_REG_603772_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_603772_ADDR, m, v,\
+	HWIO_REG_603772_IN);
+#define HWIO_REG_603772_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_603772_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_175929_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000720)
+#define HWIO_REG_175929_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000720)
+#define HWIO_REG_175929_RMSK  0x1ffff
+#define HWIO_REG_175929_SHFT  0
+#define HWIO_REG_175929_IN  in_dword_masked(\
+	HWIO_REG_175929_ADDR, HWIO_REG_175929_RMSK)
+#define HWIO_REG_175929_INM(m) \
+	in_dword_masked(HWIO_REG_175929_ADDR, m)
+#define HWIO_REG_175929_OUT(v) \
+	out_dword(HWIO_REG_175929_ADDR, v)
+#define HWIO_REG_175929_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_175929_ADDR, m, v,\
+	HWIO_REG_175929_IN);
+#define HWIO_REG_175929_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_175929_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_11928_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000724)
+#define HWIO_REG_11928_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000724)
+#define HWIO_REG_11928_RMSK  0x1ffff
+#define HWIO_REG_11928_SHFT  0
+#define HWIO_REG_11928_IN  in_dword_masked(\
+	HWIO_REG_11928_ADDR, HWIO_REG_11928_RMSK)
+#define HWIO_REG_11928_INM(m) \
+	in_dword_masked(HWIO_REG_11928_ADDR, m)
+#define HWIO_REG_11928_OUT(v) \
+	out_dword(HWIO_REG_11928_ADDR, v)
+#define HWIO_REG_11928_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_11928_ADDR, m, v,\
+	HWIO_REG_11928_IN);
+#define HWIO_REG_11928_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_11928_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_772678_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000728)
+#define HWIO_REG_772678_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000728)
+#define HWIO_REG_772678_RMSK  0x1ffff
+#define HWIO_REG_772678_SHFT  0
+#define HWIO_REG_772678_IN  in_dword_masked(\
+	HWIO_REG_772678_ADDR, HWIO_REG_772678_RMSK)
+#define HWIO_REG_772678_INM(m) \
+	in_dword_masked(HWIO_REG_772678_ADDR, m)
+#define HWIO_REG_772678_OUT(v) \
+	out_dword(HWIO_REG_772678_ADDR, v)
+#define HWIO_REG_772678_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_772678_ADDR, m, v,\
+	HWIO_REG_772678_IN);
+#define HWIO_REG_772678_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_772678_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_603389_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000072c)
+#define HWIO_REG_603389_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000072c)
+#define HWIO_REG_603389_RMSK  0x1ffff
+#define HWIO_REG_603389_SHFT  0
+#define HWIO_REG_603389_IN  in_dword_masked(\
+	HWIO_REG_603389_ADDR, HWIO_REG_603389_RMSK)
+#define HWIO_REG_603389_INM(m) \
+	in_dword_masked(HWIO_REG_603389_ADDR, m)
+#define HWIO_REG_603389_OUT(v) \
+	out_dword(HWIO_REG_603389_ADDR, v)
+#define HWIO_REG_603389_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_603389_ADDR, m, v,\
+	HWIO_REG_603389_IN);
+#define HWIO_REG_603389_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_603389_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_989918_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000730)
+#define HWIO_REG_989918_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000730)
+#define HWIO_REG_989918_RMSK  0x1ffff
+#define HWIO_REG_989918_SHFT  0
+#define HWIO_REG_989918_IN  in_dword_masked(\
+	HWIO_REG_989918_ADDR, HWIO_REG_989918_RMSK)
+#define HWIO_REG_989918_INM(m) \
+	in_dword_masked(HWIO_REG_989918_ADDR, m)
+#define HWIO_REG_989918_OUT(v) \
+	out_dword(HWIO_REG_989918_ADDR, v)
+#define HWIO_REG_989918_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_989918_ADDR, m, v,\
+	HWIO_REG_989918_IN);
+#define HWIO_REG_989918_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_989918_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_5460_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000734)
+#define HWIO_REG_5460_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000734)
+#define HWIO_REG_5460_RMSK  0x1ffff
+#define HWIO_REG_5460_SHFT  0
+#define HWIO_REG_5460_IN  in_dword_masked(\
+	HWIO_REG_5460_ADDR, HWIO_REG_5460_RMSK)
+#define HWIO_REG_5460_INM(m) \
+	in_dword_masked(HWIO_REG_5460_ADDR, m)
+#define HWIO_REG_5460_OUT(v) \
+	out_dword(HWIO_REG_5460_ADDR, v)
+#define HWIO_REG_5460_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_5460_ADDR, m, v,\
+	HWIO_REG_5460_IN);
+#define HWIO_REG_5460_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_5460_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_734724_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000738)
+#define HWIO_REG_734724_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000738)
+#define HWIO_REG_734724_RMSK  0x1ffff
+#define HWIO_REG_734724_SHFT  0
+#define HWIO_REG_734724_IN  in_dword_masked(\
+	HWIO_REG_734724_ADDR, HWIO_REG_734724_RMSK)
+#define HWIO_REG_734724_INM(m) \
+	in_dword_masked(HWIO_REG_734724_ADDR, m)
+#define HWIO_REG_734724_OUT(v) \
+	out_dword(HWIO_REG_734724_ADDR, v)
+#define HWIO_REG_734724_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_734724_ADDR, m, v,\
+	HWIO_REG_734724_IN);
+#define HWIO_REG_734724_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_734724_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_451742_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000073c)
+#define HWIO_REG_451742_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000073c)
+#define HWIO_REG_451742_RMSK  0x1ffff
+#define HWIO_REG_451742_SHFT  0
+#define HWIO_REG_451742_IN  in_dword_masked(\
+	HWIO_REG_451742_ADDR, HWIO_REG_451742_RMSK)
+#define HWIO_REG_451742_INM(m) \
+	in_dword_masked(HWIO_REG_451742_ADDR, m)
+#define HWIO_REG_451742_OUT(v) \
+	out_dword(HWIO_REG_451742_ADDR, v)
+#define HWIO_REG_451742_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_451742_ADDR, m, v,\
+	HWIO_REG_451742_IN);
+#define HWIO_REG_451742_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_451742_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_475648_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000740)
+#define HWIO_REG_475648_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000740)
+#define HWIO_REG_475648_RMSK  0x1ffff
+#define HWIO_REG_475648_SHFT  0
+#define HWIO_REG_475648_IN  in_dword_masked(\
+	HWIO_REG_475648_ADDR, HWIO_REG_475648_RMSK)
+#define HWIO_REG_475648_INM(m) \
+	in_dword_masked(HWIO_REG_475648_ADDR, m)
+#define HWIO_REG_475648_OUT(v) \
+	out_dword(HWIO_REG_475648_ADDR, v)
+#define HWIO_REG_475648_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_475648_ADDR, m, v,\
+	HWIO_REG_475648_IN);
+#define HWIO_REG_475648_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_475648_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_284758_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000744)
+#define HWIO_REG_284758_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000744)
+#define HWIO_REG_284758_RMSK  0x1ffff
+#define HWIO_REG_284758_SHFT  0
+#define HWIO_REG_284758_IN  in_dword_masked(\
+	HWIO_REG_284758_ADDR, HWIO_REG_284758_RMSK)
+#define HWIO_REG_284758_INM(m) \
+	in_dword_masked(HWIO_REG_284758_ADDR, m)
+#define HWIO_REG_284758_OUT(v) \
+	out_dword(HWIO_REG_284758_ADDR, v)
+#define HWIO_REG_284758_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_284758_ADDR, m, v,\
+	HWIO_REG_284758_IN);
+#define HWIO_REG_284758_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_284758_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_523659_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000748)
+#define HWIO_REG_523659_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000748)
+#define HWIO_REG_523659_RMSK  0x1ffff
+#define HWIO_REG_523659_SHFT  0
+#define HWIO_REG_523659_IN  in_dword_masked(\
+	HWIO_REG_523659_ADDR, HWIO_REG_523659_RMSK)
+#define HWIO_REG_523659_INM(m) \
+	in_dword_masked(HWIO_REG_523659_ADDR, m)
+#define HWIO_REG_523659_OUT(v) \
+	out_dword(HWIO_REG_523659_ADDR, v)
+#define HWIO_REG_523659_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_523659_ADDR, m, v,\
+	HWIO_REG_523659_IN);
+#define HWIO_REG_523659_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_523659_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_815580_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000074c)
+#define HWIO_REG_815580_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000074c)
+#define HWIO_REG_815580_RMSK  0x1ffff
+#define HWIO_REG_815580_SHFT  0
+#define HWIO_REG_815580_IN  in_dword_masked(\
+	HWIO_REG_815580_ADDR, HWIO_REG_815580_RMSK)
+#define HWIO_REG_815580_INM(m) \
+	in_dword_masked(HWIO_REG_815580_ADDR, m)
+#define HWIO_REG_815580_OUT(v) \
+	out_dword(HWIO_REG_815580_ADDR, v)
+#define HWIO_REG_815580_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_815580_ADDR, m, v,\
+	HWIO_REG_815580_IN);
+#define HWIO_REG_815580_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_815580_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_546551_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000750)
+#define HWIO_REG_546551_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000750)
+#define HWIO_REG_546551_RMSK  0x1ffff
+#define HWIO_REG_546551_SHFT  0
+#define HWIO_REG_546551_IN  in_dword_masked(\
+	HWIO_REG_546551_ADDR, HWIO_REG_546551_RMSK)
+#define HWIO_REG_546551_INM(m) \
+	in_dword_masked(HWIO_REG_546551_ADDR, m)
+#define HWIO_REG_546551_OUT(v) \
+	out_dword(HWIO_REG_546551_ADDR, v)
+#define HWIO_REG_546551_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_546551_ADDR, m, v,\
+	HWIO_REG_546551_IN);
+#define HWIO_REG_546551_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_546551_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_769851_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000754)
+#define HWIO_REG_769851_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000754)
+#define HWIO_REG_769851_RMSK  0x1ffff
+#define HWIO_REG_769851_SHFT  0
+#define HWIO_REG_769851_IN  in_dword_masked(\
+	HWIO_REG_769851_ADDR, HWIO_REG_769851_RMSK)
+#define HWIO_REG_769851_INM(m) \
+	in_dword_masked(HWIO_REG_769851_ADDR, m)
+#define HWIO_REG_769851_OUT(v) \
+	out_dword(HWIO_REG_769851_ADDR, v)
+#define HWIO_REG_769851_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_769851_ADDR, m, v,\
+	HWIO_REG_769851_IN);
+#define HWIO_REG_769851_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_769851_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_205028_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000758)
+#define HWIO_REG_205028_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000758)
+#define HWIO_REG_205028_RMSK  0x1ffff
+#define HWIO_REG_205028_SHFT  0
+#define HWIO_REG_205028_IN  in_dword_masked(\
+	HWIO_REG_205028_ADDR, HWIO_REG_205028_RMSK)
+#define HWIO_REG_205028_INM(m) \
+	in_dword_masked(HWIO_REG_205028_ADDR, m)
+#define HWIO_REG_205028_OUT(v) \
+	out_dword(HWIO_REG_205028_ADDR, v)
+#define HWIO_REG_205028_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_205028_ADDR, m, v,\
+	HWIO_REG_205028_IN);
+#define HWIO_REG_205028_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_205028_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_206835_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000075c)
+#define HWIO_REG_206835_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000075c)
+#define HWIO_REG_206835_RMSK  0x1ffff
+#define HWIO_REG_206835_SHFT  0
+#define HWIO_REG_206835_IN  in_dword_masked(\
+	HWIO_REG_206835_ADDR, HWIO_REG_206835_RMSK)
+#define HWIO_REG_206835_INM(m) \
+	in_dword_masked(HWIO_REG_206835_ADDR, m)
+#define HWIO_REG_206835_OUT(v) \
+	out_dword(HWIO_REG_206835_ADDR, v)
+#define HWIO_REG_206835_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_206835_ADDR, m, v,\
+	HWIO_REG_206835_IN);
+#define HWIO_REG_206835_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_206835_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_582575_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000760)
+#define HWIO_REG_582575_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000760)
+#define HWIO_REG_582575_RMSK  0x1ffff
+#define HWIO_REG_582575_SHFT  0
+#define HWIO_REG_582575_IN  in_dword_masked(\
+	HWIO_REG_582575_ADDR, HWIO_REG_582575_RMSK)
+#define HWIO_REG_582575_INM(m) \
+	in_dword_masked(HWIO_REG_582575_ADDR, m)
+#define HWIO_REG_582575_OUT(v) \
+	out_dword(HWIO_REG_582575_ADDR, v)
+#define HWIO_REG_582575_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_582575_ADDR, m, v,\
+	HWIO_REG_582575_IN);
+#define HWIO_REG_582575_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_582575_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_120885_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000764)
+#define HWIO_REG_120885_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000764)
+#define HWIO_REG_120885_RMSK  0x1ffff
+#define HWIO_REG_120885_SHFT  0
+#define HWIO_REG_120885_IN  in_dword_masked(\
+	HWIO_REG_120885_ADDR, HWIO_REG_120885_RMSK)
+#define HWIO_REG_120885_INM(m) \
+	in_dword_masked(HWIO_REG_120885_ADDR, m)
+#define HWIO_REG_120885_OUT(v) \
+	out_dword(HWIO_REG_120885_ADDR, v)
+#define HWIO_REG_120885_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_120885_ADDR, m, v,\
+	HWIO_REG_120885_IN);
+#define HWIO_REG_120885_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_120885_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_496067_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000768)
+#define HWIO_REG_496067_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000768)
+#define HWIO_REG_496067_RMSK  0x1ffff
+#define HWIO_REG_496067_SHFT  0
+#define HWIO_REG_496067_IN  in_dword_masked(\
+	HWIO_REG_496067_ADDR, HWIO_REG_496067_RMSK)
+#define HWIO_REG_496067_INM(m) \
+	in_dword_masked(HWIO_REG_496067_ADDR, m)
+#define HWIO_REG_496067_OUT(v) \
+	out_dword(HWIO_REG_496067_ADDR, v)
+#define HWIO_REG_496067_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_496067_ADDR, m, v,\
+	HWIO_REG_496067_IN);
+#define HWIO_REG_496067_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_496067_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_472919_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000076c)
+#define HWIO_REG_472919_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000076c)
+#define HWIO_REG_472919_RMSK  0x1ffff
+#define HWIO_REG_472919_SHFT  0
+#define HWIO_REG_472919_IN  in_dword_masked(\
+	HWIO_REG_472919_ADDR, HWIO_REG_472919_RMSK)
+#define HWIO_REG_472919_INM(m) \
+	in_dword_masked(HWIO_REG_472919_ADDR, m)
+#define HWIO_REG_472919_OUT(v) \
+	out_dword(HWIO_REG_472919_ADDR, v)
+#define HWIO_REG_472919_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_472919_ADDR, m, v,\
+	HWIO_REG_472919_IN);
+#define HWIO_REG_472919_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_472919_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_486985_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000770)
+#define HWIO_REG_486985_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000770)
+#define HWIO_REG_486985_RMSK  0x1ffff
+#define HWIO_REG_486985_SHFT  0
+#define HWIO_REG_486985_IN  in_dword_masked(\
+	HWIO_REG_486985_ADDR, HWIO_REG_486985_RMSK)
+#define HWIO_REG_486985_INM(m) \
+	in_dword_masked(HWIO_REG_486985_ADDR, m)
+#define HWIO_REG_486985_OUT(v) \
+	out_dword(HWIO_REG_486985_ADDR, v)
+#define HWIO_REG_486985_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_486985_ADDR, m, v,\
+	HWIO_REG_486985_IN);
+#define HWIO_REG_486985_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_486985_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_964692_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000774)
+#define HWIO_REG_964692_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000774)
+#define HWIO_REG_964692_RMSK  0x1ffff
+#define HWIO_REG_964692_SHFT  0
+#define HWIO_REG_964692_IN  in_dword_masked(\
+	HWIO_REG_964692_ADDR, HWIO_REG_964692_RMSK)
+#define HWIO_REG_964692_INM(m) \
+	in_dword_masked(HWIO_REG_964692_ADDR, m)
+#define HWIO_REG_964692_OUT(v) \
+	out_dword(HWIO_REG_964692_ADDR, v)
+#define HWIO_REG_964692_OUTM(m, v)   out_dword_masked_ns(\
+	HWIO_REG_964692_ADDR, m, v,\
+	HWIO_REG_964692_IN);
+#define HWIO_REG_964692_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_964692_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_941116_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000778)
+#define HWIO_REG_941116_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000778)
+#define HWIO_REG_941116_RMSK  0x1ffff
+#define HWIO_REG_941116_SHFT  0
+#define HWIO_REG_941116_IN  in_dword_masked(\
+	HWIO_REG_941116_ADDR, HWIO_REG_941116_RMSK)
+#define HWIO_REG_941116_INM(m) \
+	in_dword_masked(HWIO_REG_941116_ADDR, m)
+#define HWIO_REG_941116_OUT(v) \
+	out_dword(HWIO_REG_941116_ADDR, v)
+#define HWIO_REG_941116_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_941116_ADDR, m, v,\
+	HWIO_REG_941116_IN);
+#define HWIO_REG_941116_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_941116_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_122567_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000077c)
+#define HWIO_REG_122567_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000077c)
+#define HWIO_REG_122567_RMSK  0x1ffff
+#define HWIO_REG_122567_SHFT  0
+#define HWIO_REG_122567_IN  in_dword_masked(\
+	HWIO_REG_122567_ADDR, HWIO_REG_122567_RMSK)
+#define HWIO_REG_122567_INM(m) \
+	in_dword_masked(HWIO_REG_122567_ADDR, m)
+#define HWIO_REG_122567_OUT(v) \
+	out_dword(HWIO_REG_122567_ADDR, v)
+#define HWIO_REG_122567_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_122567_ADDR, m, v,\
+	HWIO_REG_122567_IN);
+#define HWIO_REG_122567_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_122567_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_466192_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000780)
+#define HWIO_REG_466192_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000780)
+#define HWIO_REG_466192_RMSK  0x1ffff
+#define HWIO_REG_466192_SHFT  0
+#define HWIO_REG_466192_IN  in_dword_masked(\
+	HWIO_REG_466192_ADDR, HWIO_REG_466192_RMSK)
+#define HWIO_REG_466192_INM(m) \
+	in_dword_masked(HWIO_REG_466192_ADDR, m)
+#define HWIO_REG_466192_OUT(v) \
+	out_dword(HWIO_REG_466192_ADDR, v)
+#define HWIO_REG_466192_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_466192_ADDR, m, v,\
+	HWIO_REG_466192_IN);
+#define HWIO_REG_466192_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_466192_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_554890_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000784)
+#define HWIO_REG_554890_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000784)
+#define HWIO_REG_554890_RMSK  0x1ffff
+#define HWIO_REG_554890_SHFT  0
+#define HWIO_REG_554890_IN  in_dword_masked(\
+	HWIO_REG_554890_ADDR, HWIO_REG_554890_RMSK)
+#define HWIO_REG_554890_INM(m) \
+	in_dword_masked(HWIO_REG_554890_ADDR, m)
+#define HWIO_REG_554890_OUT(v)                          \
+	out_dword(HWIO_REG_554890_ADDR, v)
+#define HWIO_REG_554890_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_554890_ADDR, m, v,\
+	HWIO_REG_554890_IN);
+#define HWIO_REG_554890_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_554890_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_295616_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000788)
+#define HWIO_REG_295616_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000788)
+#define HWIO_REG_295616_RMSK  0x1ffff
+#define HWIO_REG_295616_SHFT  0
+#define HWIO_REG_295616_IN  in_dword_masked(\
+	HWIO_REG_295616_ADDR, HWIO_REG_295616_RMSK)
+#define HWIO_REG_295616_INM(m) \
+	in_dword_masked(HWIO_REG_295616_ADDR, m)
+#define HWIO_REG_295616_OUT(v) \
+	out_dword(HWIO_REG_295616_ADDR, v)
+#define HWIO_REG_295616_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_295616_ADDR, m, v,\
+	HWIO_REG_295616_IN);
+#define HWIO_REG_295616_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_295616_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_440836_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000078c)
+#define HWIO_REG_440836_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000078c)
+#define HWIO_REG_440836_RMSK  0x1ffff
+#define HWIO_REG_440836_SHFT  0
+#define HWIO_REG_440836_IN  in_dword_masked(\
+	HWIO_REG_440836_ADDR, HWIO_REG_440836_RMSK)
+#define HWIO_REG_440836_INM(m) \
+	in_dword_masked(HWIO_REG_440836_ADDR, m)
+#define HWIO_REG_440836_OUT(v) \
+	out_dword(HWIO_REG_440836_ADDR, v)
+#define HWIO_REG_440836_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_440836_ADDR, m, v,\
+	HWIO_REG_440836_IN);
+#define HWIO_REG_440836_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_440836_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_741154_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000790)
+#define HWIO_REG_741154_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000790)
+#define HWIO_REG_741154_RMSK  0x1ffff
+#define HWIO_REG_741154_SHFT  0
+#define HWIO_REG_741154_IN  in_dword_masked(\
+	HWIO_REG_741154_ADDR,\
+	HWIO_REG_741154_RMSK)
+#define HWIO_REG_741154_INM(m) \
+	in_dword_masked(HWIO_REG_741154_ADDR, m)
+#define HWIO_REG_741154_OUT(v) \
+	out_dword(HWIO_REG_741154_ADDR, v)
+#define HWIO_REG_741154_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_741154_ADDR, m, v,\
+	HWIO_REG_741154_IN);
+#define HWIO_REG_741154_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_741154_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_753139_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000794)
+#define HWIO_REG_753139_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000794)
+#define HWIO_REG_753139_RMSK  0x1ffff
+#define HWIO_REG_753139_SHFT  0
+#define HWIO_REG_753139_IN  in_dword_masked(\
+	HWIO_REG_753139_ADDR,\
+	HWIO_REG_753139_RMSK)
+#define HWIO_REG_753139_INM(m) \
+	in_dword_masked(HWIO_REG_753139_ADDR, m)
+#define HWIO_REG_753139_OUT(v) \
+	out_dword(HWIO_REG_753139_ADDR, v)
+#define HWIO_REG_753139_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_753139_ADDR, m, v,\
+	HWIO_REG_753139_IN);
+#define HWIO_REG_753139_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_753139_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_409994_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x00000798)
+#define HWIO_REG_409994_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000798)
+#define HWIO_REG_409994_RMSK  0x1ffff
+#define HWIO_REG_409994_SHFT  0
+#define HWIO_REG_409994_IN  in_dword_masked(\
+	HWIO_REG_409994_ADDR,\
+	HWIO_REG_409994_RMSK)
+#define HWIO_REG_409994_INM(m) \
+	in_dword_masked(HWIO_REG_409994_ADDR, m)
+#define HWIO_REG_409994_OUT(v) \
+	out_dword(HWIO_REG_409994_ADDR, v)
+#define HWIO_REG_409994_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_409994_ADDR, m, v,\
+	HWIO_REG_409994_IN);
+#define HWIO_REG_409994_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_409994_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_492611_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000079c)
+#define HWIO_REG_492611_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000079c)
+#define HWIO_REG_492611_RMSK  0x1ffff
+#define HWIO_REG_492611_SHFT  0
+#define HWIO_REG_492611_IN  in_dword_masked(\
+	HWIO_REG_492611_ADDR,\
+	HWIO_REG_492611_RMSK)
+#define HWIO_REG_492611_INM(m) \
+	in_dword_masked(HWIO_REG_492611_ADDR, m)
+#define HWIO_REG_492611_OUT(v) \
+	out_dword(HWIO_REG_492611_ADDR, v)
+#define HWIO_REG_492611_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_492611_ADDR, m, v,\
+	HWIO_REG_492611_IN);
+#define HWIO_REG_492611_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_492611_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_91427_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007a0)
+#define HWIO_REG_91427_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007a0)
+#define HWIO_REG_91427_RMSK  0x1ffff
+#define HWIO_REG_91427_SHFT  0
+#define HWIO_REG_91427_IN  in_dword_masked(\
+	HWIO_REG_91427_ADDR,\
+	HWIO_REG_91427_RMSK)
+#define HWIO_REG_91427_INM(m) \
+	in_dword_masked(HWIO_REG_91427_ADDR, m)
+#define HWIO_REG_91427_OUT(v) \
+	out_dword(HWIO_REG_91427_ADDR, v)
+#define HWIO_REG_91427_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_91427_ADDR, m, v,\
+	HWIO_REG_91427_IN);
+#define HWIO_REG_91427_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_91427_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_617696_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007a4)
+#define HWIO_REG_617696_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007a4)
+#define HWIO_REG_617696_RMSK  0x1ffff
+#define HWIO_REG_617696_SHFT  0
+#define HWIO_REG_617696_IN  in_dword_masked(\
+	HWIO_REG_617696_ADDR,\
+	HWIO_REG_617696_RMSK)
+#define HWIO_REG_617696_INM(m) \
+	in_dword_masked(HWIO_REG_617696_ADDR, m)
+#define HWIO_REG_617696_OUT(v) \
+	out_dword(HWIO_REG_617696_ADDR, v)
+#define HWIO_REG_617696_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_617696_ADDR, m, v,\
+	HWIO_REG_617696_IN);
+#define HWIO_REG_617696_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_617696_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_459602_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007a8)
+#define HWIO_REG_459602_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007a8)
+#define HWIO_REG_459602_RMSK  0x1ffff
+#define HWIO_REG_459602_SHFT  0
+#define HWIO_REG_459602_IN  in_dword_masked(\
+	HWIO_REG_459602_ADDR,\
+	HWIO_REG_459602_RMSK)
+#define HWIO_REG_459602_INM(m) \
+	in_dword_masked(HWIO_REG_459602_ADDR, m)
+#define HWIO_REG_459602_OUT(v) \
+	out_dword(HWIO_REG_459602_ADDR, v)
+#define HWIO_REG_459602_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_459602_ADDR, m, v,\
+	HWIO_REG_459602_IN);
+#define HWIO_REG_459602_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_459602_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_758_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007ac)
+#define HWIO_REG_758_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007ac)
+#define HWIO_REG_758_RMSK  0x1ffff
+#define HWIO_REG_758_SHFT  0
+#define HWIO_REG_758_IN  in_dword_masked(\
+	HWIO_REG_758_ADDR,\
+	HWIO_REG_758_RMSK)
+#define HWIO_REG_758_INM(m) \
+	in_dword_masked(HWIO_REG_758_ADDR, m)
+#define HWIO_REG_758_OUT(v) \
+	out_dword(HWIO_REG_758_ADDR, v)
+#define HWIO_REG_758_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_758_ADDR, m, v,\
+	HWIO_REG_758_IN);
+#define HWIO_REG_758_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_758_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_710606_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007b0)
+#define HWIO_REG_710606_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007b0)
+#define HWIO_REG_710606_RMSK  0x1ffff
+#define HWIO_REG_710606_SHFT  0
+#define HWIO_REG_710606_IN  in_dword_masked(\
+	HWIO_REG_710606_ADDR,\
+	HWIO_REG_710606_RMSK)
+#define HWIO_REG_710606_INM(m) \
+	in_dword_masked(HWIO_REG_710606_ADDR, m)
+#define HWIO_REG_710606_OUT(v) \
+	out_dword(HWIO_REG_710606_ADDR, v)
+#define HWIO_REG_710606_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_710606_ADDR, m, v,\
+	HWIO_REG_710606_IN);
+#define HWIO_REG_710606_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_710606_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_122975_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007b4)
+#define HWIO_REG_122975_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007b4)
+#define HWIO_REG_122975_RMSK  0x1ffff
+#define HWIO_REG_122975_SHFT  0
+#define HWIO_REG_122975_IN  in_dword_masked(\
+	HWIO_REG_122975_ADDR,\
+	HWIO_REG_122975_RMSK)
+#define HWIO_REG_122975_INM(m)\
+	in_dword_masked(HWIO_REG_122975_ADDR, m)
+#define HWIO_REG_122975_OUT(v)\
+	out_dword(HWIO_REG_122975_ADDR, v)
+#define HWIO_REG_122975_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_122975_ADDR, m, v,\
+	HWIO_REG_122975_IN);
+#define HWIO_REG_122975_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_122975_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_860205_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007b8)
+#define HWIO_REG_860205_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007b8)
+#define HWIO_REG_860205_RMSK  0x1ffff
+#define HWIO_REG_860205_SHFT  0
+#define HWIO_REG_860205_IN  in_dword_masked(\
+	HWIO_REG_860205_ADDR,\
+	HWIO_REG_860205_RMSK)
+#define HWIO_REG_860205_INM(m) \
+	in_dword_masked(HWIO_REG_860205_ADDR, m)
+#define HWIO_REG_860205_OUT(v) \
+	out_dword(HWIO_REG_860205_ADDR, v)
+#define HWIO_REG_860205_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_860205_ADDR, m, v,\
+	HWIO_REG_860205_IN);
+#define HWIO_REG_860205_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_860205_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_366154_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007bc)
+#define HWIO_REG_366154_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007bc)
+#define HWIO_REG_366154_RMSK  0x1ffff
+#define HWIO_REG_366154_SHFT  0
+#define HWIO_REG_366154_IN  in_dword_masked(\
+	HWIO_REG_366154_ADDR,\
+	HWIO_REG_366154_RMSK)
+#define HWIO_REG_366154_INM(m) \
+	in_dword_masked(HWIO_REG_366154_ADDR, m)
+#define HWIO_REG_366154_OUT(v) \
+	out_dword(HWIO_REG_366154_ADDR, v)
+#define HWIO_REG_366154_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_366154_ADDR, m, v,\
+	HWIO_REG_366154_IN);
+#define HWIO_REG_366154_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_366154_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_632247_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007c0)
+#define HWIO_REG_632247_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007c0)
+#define HWIO_REG_632247_RMSK  0x1ffff
+#define HWIO_REG_632247_SHFT  0
+#define HWIO_REG_632247_IN  in_dword_masked(\
+	HWIO_REG_632247_ADDR,\
+	HWIO_REG_632247_RMSK)
+#define HWIO_REG_632247_INM(m) \
+	in_dword_masked(HWIO_REG_632247_ADDR, m)
+#define HWIO_REG_632247_OUT(v) \
+	out_dword(HWIO_REG_632247_ADDR, v)
+#define HWIO_REG_632247_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_632247_ADDR, m, v,\
+	HWIO_REG_632247_IN);
+#define HWIO_REG_632247_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_632247_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_709312_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007c4)
+#define HWIO_REG_709312_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007c4)
+#define HWIO_REG_709312_RMSK  0x1ffff
+#define HWIO_REG_709312_SHFT  0
+#define HWIO_REG_709312_IN  in_dword_masked(\
+	HWIO_REG_709312_ADDR,\
+	HWIO_REG_709312_RMSK)
+#define HWIO_REG_709312_INM(m) \
+	in_dword_masked(HWIO_REG_709312_ADDR, m)
+#define HWIO_REG_709312_OUT(v) \
+	out_dword(HWIO_REG_709312_ADDR, v)
+#define HWIO_REG_709312_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_709312_ADDR, m, v,\
+	HWIO_REG_709312_IN);
+#define HWIO_REG_709312_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_709312_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_891367_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007c8)
+#define HWIO_REG_891367_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007c8)
+#define HWIO_REG_891367_RMSK  0x1ffff
+#define HWIO_REG_891367_SHFT  0
+#define HWIO_REG_891367_IN  in_dword_masked(\
+	HWIO_REG_891367_ADDR,\
+	HWIO_REG_891367_RMSK)
+#define HWIO_REG_891367_INM(m) \
+	in_dword_masked(HWIO_REG_891367_ADDR, m)
+#define HWIO_REG_891367_OUT(v) \
+	out_dword(HWIO_REG_891367_ADDR, v)
+#define HWIO_REG_891367_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_891367_ADDR, m, v,\
+	HWIO_REG_891367_IN);
+#define HWIO_REG_891367_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_891367_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_628746_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007cc)
+#define HWIO_REG_628746_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007cc)
+#define HWIO_REG_628746_RMSK  0x1ffff
+#define HWIO_REG_628746_SHFT  0
+#define HWIO_REG_628746_IN  in_dword_masked(\
+	HWIO_REG_628746_ADDR,\
+	HWIO_REG_628746_RMSK)
+#define HWIO_REG_628746_INM(m) \
+	in_dword_masked(HWIO_REG_628746_ADDR, m)
+#define HWIO_REG_628746_OUT(v) \
+	out_dword(HWIO_REG_628746_ADDR, v)
+#define HWIO_REG_628746_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_628746_ADDR, m, v,\
+	HWIO_REG_628746_IN);
+#define HWIO_REG_628746_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_628746_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_821010_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007d0)
+#define HWIO_REG_821010_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007d0)
+#define HWIO_REG_821010_RMSK  0x1ffff
+#define HWIO_REG_821010_SHFT  0
+#define HWIO_REG_821010_IN  in_dword_masked(\
+	HWIO_REG_821010_ADDR,\
+	HWIO_REG_821010_RMSK)
+#define HWIO_REG_821010_INM(m) \
+	in_dword_masked(HWIO_REG_821010_ADDR, m)
+#define HWIO_REG_821010_OUT(v) \
+	out_dword(HWIO_REG_821010_ADDR, v)
+#define HWIO_REG_821010_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_821010_ADDR, m, v,\
+	HWIO_REG_821010_IN);
+#define HWIO_REG_821010_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_821010_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_902098_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007d4)
+#define HWIO_REG_902098_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007d4)
+#define HWIO_REG_902098_RMSK  0x1ffff
+#define HWIO_REG_902098_SHFT  0
+#define HWIO_REG_902098_IN   in_dword_masked(\
+	HWIO_REG_902098_ADDR,\
+	HWIO_REG_902098_RMSK)
+#define HWIO_REG_902098_INM(m) \
+	in_dword_masked(HWIO_REG_902098_ADDR, m)
+#define HWIO_REG_902098_OUT(v) \
+	out_dword(HWIO_REG_902098_ADDR, v)
+#define HWIO_REG_902098_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_902098_ADDR, m, v,\
+	HWIO_REG_902098_IN);
+#define HWIO_REG_902098_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_902098_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_939091_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007d8)
+#define HWIO_REG_939091_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007d8)
+#define HWIO_REG_939091_RMSK  0x1ffff
+#define HWIO_REG_939091_SHFT  0
+#define HWIO_REG_939091_IN  in_dword_masked(\
+	HWIO_REG_939091_ADDR,\
+	HWIO_REG_939091_RMSK)
+#define HWIO_REG_939091_INM(m) \
+	in_dword_masked(HWIO_REG_939091_ADDR, m)
+#define HWIO_REG_939091_OUT(v) \
+	out_dword(HWIO_REG_939091_ADDR, v)
+#define HWIO_REG_939091_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_939091_ADDR, m, v,\
+	HWIO_REG_939091_IN);
+#define HWIO_REG_939091_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_939091_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_261074_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007dc)
+#define HWIO_REG_261074_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007dc)
+#define HWIO_REG_261074_RMSK  0x1ffff
+#define HWIO_REG_261074_SHFT  0
+#define HWIO_REG_261074_IN  in_dword_masked(\
+	HWIO_REG_261074_ADDR,\
+	HWIO_REG_261074_RMSK)
+#define HWIO_REG_261074_INM(m) \
+	in_dword_masked(HWIO_REG_261074_ADDR, m)
+#define HWIO_REG_261074_OUT(v) \
+	out_dword(HWIO_REG_261074_ADDR, v)
+#define HWIO_REG_261074_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_261074_ADDR, m, v,\
+	HWIO_REG_261074_IN);
+#define HWIO_REG_261074_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_261074_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_157718_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007e0)
+#define HWIO_REG_157718_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007e0)
+#define HWIO_REG_157718_RMSK  0x1ffff
+#define HWIO_REG_157718_SHFT  0
+#define HWIO_REG_157718_IN  in_dword_masked(\
+	HWIO_REG_157718_ADDR,\
+	HWIO_REG_157718_RMSK)
+#define HWIO_REG_157718_INM(m) \
+	in_dword_masked(HWIO_REG_157718_ADDR, m)
+#define HWIO_REG_157718_OUT(v) \
+	out_dword(HWIO_REG_157718_ADDR, v)
+#define HWIO_REG_157718_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_157718_ADDR, m, v,\
+	HWIO_REG_157718_IN);
+#define HWIO_REG_5552391_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_5552391_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_148889_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007e8)
+#define HWIO_REG_148889_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007e8)
+#define HWIO_REG_148889_RMSK  0x1ffff
+#define HWIO_REG_148889_SHFT  0
+#define HWIO_REG_148889_IN  in_dword_masked(\
+	HWIO_REG_148889_ADDR,\
+	HWIO_REG_148889_RMSK)
+#define HWIO_REG_148889_INM(m) \
+	in_dword_masked(HWIO_REG_148889_ADDR, m)
+#define HWIO_REG_148889_OUT(v) \
+	out_dword(HWIO_REG_148889_ADDR, v)
+#define HWIO_REG_148889_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_148889_ADDR, m, v,\
+	HWIO_REG_148889_IN);
+#define HWIO_REG_148889_BASE_ADDR_BMSK 0x1ffff
+#define HWIO_REG_148889_BASE_ADDR_SHFT 0
+
+#define HWIO_REG_396380_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007ec)
+#define HWIO_REG_396380_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007ec)
+#define HWIO_REG_396380_RMSK  0x1ffff
+#define HWIO_REG_396380_SHFT  0
+#define HWIO_REG_396380_IN  in_dword_masked(\
+	HWIO_REG_396380_ADDR,\
+	HWIO_REG_396380_RMSK)
+#define HWIO_REG_396380_INM(m) \
+	in_dword_masked(HWIO_REG_396380_ADDR, m)
+#define HWIO_REG_396380_OUT(v) \
+	out_dword(HWIO_REG_396380_ADDR, v)
+#define HWIO_REG_396380_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_396380_ADDR, m, v,\
+	HWIO_REG_396380_IN);
+#define HWIO_REG_396380_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_396380_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_351005_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007f0)
+#define HWIO_REG_351005_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007f0)
+#define HWIO_REG_351005_RMSK  0x1ffff
+#define HWIO_REG_351005_SHFT  0
+#define HWIO_REG_351005_IN  in_dword_masked(\
+	HWIO_REG_351005_ADDR,\
+	HWIO_REG_351005_RMSK)
+#define HWIO_REG_351005_INM(m) \
+	in_dword_masked(HWIO_REG_351005_ADDR, m)
+#define HWIO_REG_351005_OUT(v) \
+	out_dword(HWIO_REG_351005_ADDR, v)
+#define HWIO_REG_351005_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_351005_ADDR, m, v,\
+	HWIO_REG_351005_IN);
+#define HWIO_REG_351005_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_351005_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_863263_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007f4)
+#define HWIO_REG_863263_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007f4)
+#define HWIO_REG_863263_RMSK  0x1ffff
+#define HWIO_REG_863263_SHFT  0
+#define HWIO_REG_863263_IN  in_dword_masked(\
+	HWIO_REG_863263_ADDR,\
+	HWIO_REG_863263_RMSK)
+#define HWIO_REG_863263_INM(m) \
+	in_dword_masked(HWIO_REG_863263_ADDR, m)
+#define HWIO_REG_863263_OUT(v) \
+	out_dword(HWIO_REG_863263_ADDR, v)
+#define HWIO_REG_863263_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_863263_ADDR, m, v,\
+	HWIO_REG_863263_IN);
+#define HWIO_REG_863263_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_863263_BASE_ADDR_SHFT   0
+
+#define HWIO_REG_135009_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007f8)
+#define HWIO_REG_135009_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007f8)
+#define HWIO_REG_135009_RMSK  0x1ffff
+#define HWIO_REG_135009_SHFT  0
+#define HWIO_REG_135009_IN  in_dword_masked(\
+	HWIO_REG_135009_ADDR,\
+	HWIO_REG_135009_RMSK)
+#define HWIO_REG_135009_INM(m) \
+	in_dword_masked(HWIO_REG_135009_ADDR, m)
+#define HWIO_REG_135009_OUT(v) \
+	out_dword(HWIO_REG_135009_ADDR, v)
+#define HWIO_REG_135009_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_135009_ADDR, m, v,\
+	HWIO_REG_135009_IN);
+#define HWIO_REG_135009_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_135009_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_923883_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x000007fc)
+#define HWIO_REG_923883_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000007fc)
+#define HWIO_REG_923883_RMSK  0x1ffff
+#define HWIO_REG_923883_SHFT  0
+#define HWIO_REG_923883_IN  in_dword_masked(\
+	HWIO_REG_923883_ADDR,\
+	HWIO_REG_923883_RMSK)
+#define HWIO_REG_923883_INM(m) \
+	in_dword_masked(HWIO_REG_923883_ADDR, m)
+#define HWIO_REG_923883_OUT(v) \
+	out_dword(HWIO_REG_923883_ADDR, v)
+#define HWIO_REG_923883_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_923883_ADDR, m, v,\
+	HWIO_REG_923883_IN);
+#define HWIO_REG_923883_BASE_ADDR_BMSK  0x1ffff
+#define HWIO_REG_923883_BASE_ADDR_SHFT  0
+
+#define HWIO_REG_934655_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000818)
+#define HWIO_REG_934655_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000818)
+#define HWIO_REG_934655_RMSK  0x1fff
+#define HWIO_REG_934655_SHFT  0
+#define HWIO_REG_934655_IN \
+	in_dword_masked(HWIO_REG_934655_ADDR, HWIO_REG_934655_RMSK)
+#define HWIO_REG_934655_INM(m) \
+	in_dword_masked(HWIO_REG_934655_ADDR, m)
+#define HWIO_REG_934655_OUT(v) \
+	out_dword(HWIO_REG_934655_ADDR, v)
+#define HWIO_REG_934655_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_934655_ADDR, m, v, HWIO_REG_934655_IN);
+#define HWIO_REG_934655_FRAME_WIDTH_BMSK  0x1fff
+#define HWIO_REG_934655_FRAME_WIDTH_SHFT  0
+
+#define HWIO_REG_179070_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000081c)
+#define HWIO_REG_179070_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000081c)
+#define HWIO_REG_179070_RMSK  0x1fff
+#define HWIO_REG_179070_SHFT  0
+#define HWIO_REG_179070_IN  in_dword_masked(\
+	HWIO_REG_179070_ADDR, HWIO_REG_179070_RMSK)
+#define HWIO_REG_179070_INM(m) \
+	in_dword_masked(HWIO_REG_179070_ADDR, m)
+#define HWIO_REG_179070_OUT(v) \
+	out_dword(HWIO_REG_179070_ADDR, v)
+#define HWIO_REG_179070_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_179070_ADDR, m, v, HWIO_REG_179070_IN);
+#define HWIO_REG_179070_FRAME_HEIGHT_BMSK  0x1fff
+#define HWIO_REG_179070_FRAME_HEIGHT_SHFT  0
+
+#define HWIO_REG_63643_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000830)
+#define HWIO_REG_63643_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000830)
+#define HWIO_REG_63643_RMSK  0xff3f
+#define HWIO_REG_63643_SHFT  0
+#define HWIO_REG_63643_IN  in_dword_masked(\
+	HWIO_REG_63643_ADDR, HWIO_REG_63643_RMSK)
+#define HWIO_REG_63643_INM(m) \
+	in_dword_masked(HWIO_REG_63643_ADDR, m)
+#define HWIO_REG_63643_OUT(v) \
+	out_dword(HWIO_REG_63643_ADDR, v)
+#define HWIO_REG_63643_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_63643_ADDR, m, v, HWIO_REG_63643_IN);
+#define HWIO_REG_63643_LEVEL_BMSK    0xff00
+#define HWIO_REG_63643_LEVEL_SHFT    0x8
+#define HWIO_REG_63643_PROFILE_BMSK  0x3f
+#define HWIO_REG_63643_PROFILE_SHFT  0
+
+#define HWIO_REG_786024_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000083c)
+#define HWIO_REG_786024_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000083c)
+#define HWIO_REG_786024_RMSK  0x1
+#define HWIO_REG_786024_SHFT  0
+#define HWIO_REG_786024_IN  in_dword_masked(\
+	HWIO_REG_786024_ADDR, HWIO_REG_786024_RMSK)
+#define HWIO_REG_786024_INM(m) \
+	in_dword_masked(HWIO_REG_786024_ADDR, m)
+#define HWIO_REG_786024_OUT(v) \
+	out_dword(HWIO_REG_786024_ADDR, v)
+#define HWIO_REG_786024_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_786024_ADDR, m, v, HWIO_REG_786024_IN);
+#define HWIO_REG_786024_FIELD_BMSK  0x1
+#define HWIO_REG_786024_FIELD_SHFT  0
+
+#define HWIO_REG_152500_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000848)
+#define HWIO_REG_152500_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000848)
+#define HWIO_REG_152500_RMSK  0x3
+#define HWIO_REG_152500_SHFT  0
+#define HWIO_REG_152500_IN  in_dword_masked(\
+	HWIO_REG_152500_ADDR, HWIO_REG_152500_RMSK)
+#define HWIO_REG_152500_INM(m) \
+	in_dword_masked(HWIO_REG_152500_ADDR, m)
+#define HWIO_REG_152500_OUT(v) \
+	out_dword(HWIO_REG_152500_ADDR, v)
+#define HWIO_REG_152500_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_152500_ADDR, m, v, HWIO_REG_152500_IN);
+#define HWIO_REG_152500_LF_CONTROL_BMSK  0x3
+#define HWIO_REG_152500_LF_CONTROL_SHFT  0
+
+#define HWIO_REG_266285_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000084c)
+#define HWIO_REG_266285_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000084c)
+#define HWIO_REG_266285_RMSK  0x1f
+#define HWIO_REG_266285_SHFT  0
+#define HWIO_REG_266285_IN  in_dword_masked(\
+	HWIO_REG_266285_ADDR, HWIO_REG_266285_RMSK)
+#define HWIO_REG_266285_INM(m) \
+	in_dword_masked(HWIO_REG_266285_ADDR, m)
+#define HWIO_REG_266285_OUT(v) \
+	out_dword(HWIO_REG_266285_ADDR, v)
+#define HWIO_REG_266285_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_266285_ADDR, m, v, HWIO_REG_266285_IN);
+#define HWIO_REG_266285_LF_ALPHAS_OFF_BMSK  0x1f
+#define HWIO_REG_266285_LF_ALPHAS_OFF_SHFT  0
+
+#define HWIO_REG_964731_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000850)
+#define HWIO_REG_964731_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000850)
+#define HWIO_REG_964731_RMSK  0x1f
+#define HWIO_REG_964731_SHFT  0
+#define HWIO_REG_964731_IN  in_dword_masked(\
+	HWIO_REG_964731_ADDR, HWIO_REG_964731_RMSK)
+#define HWIO_REG_964731_INM(m) \
+	in_dword_masked(HWIO_REG_964731_ADDR, m)
+#define HWIO_REG_964731_OUT(v) \
+	out_dword(HWIO_REG_964731_ADDR, v)
+#define HWIO_REG_964731_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_964731_ADDR, m, v, HWIO_REG_964731_IN);
+#define HWIO_REG_964731_LF_BETA_OFF_BMSK  0x1f
+#define HWIO_REG_964731_LF_BETA_OFF_SHFT  0
+
+#define HWIO_REG_919924_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000c30)
+#define HWIO_REG_919924_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000c30)
+#define HWIO_REG_919924_RMSK  0xffffffff
+#define HWIO_REG_919924_SHFT  0
+#define HWIO_REG_919924_IN  in_dword_masked(\
+	HWIO_REG_919924_ADDR, HWIO_REG_919924_RMSK)
+#define HWIO_REG_919924_INM(m) \
+	in_dword_masked(HWIO_REG_919924_ADDR, m)
+#define HWIO_REG_919924_OUT(v) \
+	out_dword(HWIO_REG_919924_ADDR, v)
+#define HWIO_REG_919924_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_919924_ADDR, m, v, HWIO_REG_919924_IN);
+#define HWIO_REG_919924_VIDC_QP_OFFSET_BMSK  0xffffffff
+#define HWIO_REG_919924_VIDC_QP_OFFSET_SHFT  0
+
+#define HWIO_REG_143629_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00000c34)
+#define HWIO_REG_143629_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00000c34)
+#define HWIO_REG_143629_RMSK  0x1
+#define HWIO_REG_143629_SHFT  0
+#define HWIO_REG_143629_IN  in_dword_masked(\
+	HWIO_REG_143629_ADDR, HWIO_REG_143629_RMSK)
+#define HWIO_REG_143629_INM(m) \
+	in_dword_masked(HWIO_REG_143629_ADDR, m)
+#define HWIO_REG_143629_OUT(v) \
+	out_dword(HWIO_REG_143629_ADDR, v)
+#define HWIO_REG_143629_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_143629_ADDR, m, v, HWIO_REG_143629_IN);
+#define HWIO_REG_143629_REG_143629_BMSK  0x1
+#define HWIO_REG_143629_REG_143629_SHFT  0
+
+#define HWIO_REG_607589_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002000)
+#define HWIO_REG_607589_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002000)
+#define HWIO_REG_607589_RMSK  0xffffffff
+#define HWIO_REG_607589_SHFT  0
+#define HWIO_REG_607589_IN  in_dword_masked(\
+	HWIO_REG_607589_ADDR, HWIO_REG_607589_RMSK)
+#define HWIO_REG_607589_INM(m) \
+	in_dword_masked(HWIO_REG_607589_ADDR, m)
+#define HWIO_REG_607589_OUT(v) \
+	out_dword(HWIO_REG_607589_ADDR, v)
+#define HWIO_REG_607589_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_607589_ADDR, m, v, HWIO_REG_607589_IN);
+#define HWIO_REG_607589_RTN_CHID_BMSK  0xffffffff
+#define HWIO_REG_607589_RTN_CHID_SHFT  0
+
+#define HWIO_REG_845544_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002004)
+#define HWIO_REG_845544_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002004)
+#define HWIO_REG_845544_RMSK  0xffffffff
+#define HWIO_REG_845544_SHFT  0
+#define HWIO_REG_845544_IN  in_dword_masked(\
+	HWIO_REG_845544_ADDR, HWIO_REG_845544_RMSK)
+#define HWIO_REG_845544_INM(m) \
+	in_dword_masked(HWIO_REG_845544_ADDR, m)
+#define HWIO_REG_845544_OUT(v) \
+	out_dword(HWIO_REG_845544_ADDR, v)
+#define HWIO_REG_845544_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_845544_ADDR, m, v, HWIO_REG_845544_IN);
+#define HWIO_REG_845544_REG_845544_BMSK  0xffffffff
+#define HWIO_REG_845544_REG_845544_SHFT  0
+
+#define HWIO_REG_859906_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002008)
+#define HWIO_REG_859906_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002008)
+#define HWIO_REG_859906_RMSK  0xffffffff
+#define HWIO_REG_859906_SHFT  0
+#define HWIO_REG_859906_IN  in_dword_masked(\
+	HWIO_REG_859906_ADDR, HWIO_REG_859906_RMSK)
+#define HWIO_REG_859906_INM(m) \
+	in_dword_masked(HWIO_REG_859906_ADDR, m)
+#define HWIO_REG_859906_OUT(v) \
+	out_dword(HWIO_REG_859906_ADDR, v)
+#define HWIO_REG_859906_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_859906_ADDR, m, v, HWIO_REG_859906_IN);
+#define HWIO_REG_859906_REG_859906_BMSK  0xffffffff
+#define HWIO_REG_859906_REG_859906_SHFT  0
+
+#define HWIO_REG_490078_ADDR  (VIDC_BLACKBIRD_REG_BASE + 0x0000200c)
+#define HWIO_REG_490078_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000200c)
+#define HWIO_REG_490078_RMSK  0xffffffff
+#define HWIO_REG_490078_SHFT  0
+#define HWIO_REG_490078_IN  in_dword_masked(\
+	HWIO_REG_490078_ADDR, HWIO_REG_490078_RMSK)
+#define HWIO_REG_490078_INM(m) \
+	in_dword_masked(HWIO_REG_490078_ADDR, m)
+#define HWIO_REG_490078_OUT(v) \
+	out_dword(HWIO_REG_490078_ADDR, v)
+#define HWIO_REG_490078_OUTM(m, v) \
+	out_dword_masked_ns(HWIO_REG_490078_ADDR, m, v,\
+	HWIO_REG_490078_IN);
+#define HWIO_REG_490078_REG_490078_BMSK  0xffffffff
+#define HWIO_REG_490078_REG_490078_SHFT  0
+
+#define HWIO_REG_640904_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002010)
+#define HWIO_REG_640904_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002010)
+#define HWIO_REG_640904_RMSK  0xffffffff
+#define HWIO_REG_640904_SHFT  0
+#define HWIO_REG_640904_IN  in_dword_masked(\
+	HWIO_REG_640904_ADDR, HWIO_REG_640904_RMSK)
+#define HWIO_REG_640904_INM(m) \
+	in_dword_masked(HWIO_REG_640904_ADDR, m)
+#define HWIO_REG_640904_OUT(v) \
+	out_dword(HWIO_REG_640904_ADDR, v)
+#define HWIO_REG_640904_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_640904_ADDR, m, v, HWIO_REG_640904_IN);
+#define HWIO_REG_640904_REG_640904_BMSK  0xffffffff
+#define HWIO_REG_640904_REG_640904_SHFT  0
+
+#define HWIO_REG_60114_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002014)
+#define HWIO_REG_60114_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002014)
+#define HWIO_REG_60114_RMSK  0xffffffff
+#define HWIO_REG_60114_SHFT  0
+#define HWIO_REG_60114_IN  in_dword_masked(\
+	HWIO_REG_60114_ADDR, HWIO_REG_60114_RMSK)
+#define HWIO_REG_60114_INM(m) \
+	in_dword_masked(HWIO_REG_60114_ADDR, m)
+#define HWIO_REG_60114_OUT(v) \
+	out_dword(HWIO_REG_60114_ADDR, v)
+#define HWIO_REG_60114_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_60114_ADDR, m, v, HWIO_REG_60114_IN);
+#define HWIO_REG_60114_REG_60114_BMSK  0xffffffff
+#define HWIO_REG_60114_REG_60114_SHFT  0
+
+#define HWIO_REG_489688_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002018)
+#define HWIO_REG_489688_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002018)
+#define HWIO_REG_489688_RMSK  0xffffffff
+#define HWIO_REG_489688_SHFT  0
+#define HWIO_REG_489688_IN  in_dword_masked(\
+	HWIO_REG_489688_ADDR, HWIO_REG_489688_RMSK)
+#define HWIO_REG_489688_INM(m) \
+	in_dword_masked(HWIO_REG_489688_ADDR, m)
+#define HWIO_REG_489688_OUT(v) \
+	out_dword(HWIO_REG_489688_ADDR, v)
+#define HWIO_REG_489688_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_489688_ADDR, m, v, HWIO_REG_489688_IN);
+#define HWIO_REG_489688_REG_489688_BMSK  0xffffffff
+#define HWIO_REG_489688_REG_489688_SHFT  0
+
+#define HWIO_REG_853667_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000201c)
+#define HWIO_REG_853667_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000201c)
+#define HWIO_REG_853667_RMSK  0xffffffff
+#define HWIO_REG_853667_SHFT  0
+#define HWIO_REG_853667_IN  in_dword_masked(\
+	HWIO_REG_853667_ADDR, HWIO_REG_853667_RMSK)
+#define HWIO_REG_853667_INM(m) \
+	in_dword_masked(HWIO_REG_853667_ADDR, m)
+#define HWIO_REG_853667_OUT(v) \
+	out_dword(HWIO_REG_853667_ADDR, v)
+#define HWIO_REG_853667_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_853667_ADDR, m, v, HWIO_REG_853667_IN);
+#define HWIO_REG_853667_REG_853667_BMSK  0xffffffff
+#define HWIO_REG_853667_REG_853667_SHFT  0
+
+#define HWIO_REG_760102_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002020)
+#define HWIO_REG_760102_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002020)
+#define HWIO_REG_760102_RMSK  0xffffffff
+#define HWIO_REG_760102_SHFT  0
+#define HWIO_REG_760102_IN  in_dword_masked(\
+	HWIO_REG_760102_ADDR, HWIO_REG_760102_RMSK)
+#define HWIO_REG_760102_INM(m) \
+	in_dword_masked(HWIO_REG_760102_ADDR, m)
+#define HWIO_REG_760102_OUT(v) \
+	out_dword(HWIO_REG_760102_ADDR, v)
+#define HWIO_REG_760102_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_760102_ADDR, m, v, HWIO_REG_760102_IN);
+#define HWIO_REG_760102_REG_760102_BMSK  0xffffffff
+#define HWIO_REG_760102_REG_760102_SHFT  0
+
+#define HWIO_REG_378318_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002024)
+#define HWIO_REG_378318_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002024)
+#define HWIO_REG_378318_RMSK  0xffffffff
+#define HWIO_REG_378318_SHFT  0
+#define HWIO_REG_378318_IN  in_dword_masked(\
+	HWIO_REG_378318_ADDR, HWIO_REG_378318_RMSK)
+#define HWIO_REG_378318_INM(m) \
+	in_dword_masked(HWIO_REG_378318_ADDR, m)
+#define HWIO_REG_378318_OUT(v) \
+	out_dword(HWIO_REG_378318_ADDR, v)
+#define HWIO_REG_378318_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_378318_ADDR, m, v, HWIO_REG_378318_IN);
+#define HWIO_REG_378318_REG_378318_BMSK  0xffffffff
+#define HWIO_REG_378318_REG_378318_SHFT  0
+
+#define HWIO_REG_203487_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002028)
+#define HWIO_REG_203487_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002028)
+#define HWIO_REG_203487_RMSK  0xffffffff
+#define HWIO_REG_203487_SHFT  0
+#define HWIO_REG_203487_IN  in_dword_masked(\
+	HWIO_REG_203487_ADDR, HWIO_REG_203487_RMSK)
+#define HWIO_REG_203487_INM(m) \
+	in_dword_masked(HWIO_REG_203487_ADDR, m)
+#define HWIO_REG_203487_OUT(v) \
+	out_dword(HWIO_REG_203487_ADDR, v)
+#define HWIO_REG_203487_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_203487_ADDR, m, v, HWIO_REG_203487_IN);
+#define HWIO_REG_203487_REG_203487_BMSK  0xffffffff
+#define HWIO_REG_203487_REG_203487_SHFT  0
+
+#define HWIO_REG_692991_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000202c)
+#define HWIO_REG_692991_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000202c)
+#define HWIO_REG_692991_RMSK  0xffffffff
+#define HWIO_REG_692991_SHFT  0
+#define HWIO_REG_692991_IN  in_dword_masked(\
+	HWIO_REG_692991_ADDR, HWIO_REG_692991_RMSK)
+#define HWIO_REG_692991_INM(m) \
+	in_dword_masked(HWIO_REG_692991_ADDR, m)
+#define HWIO_REG_692991_OUT(v) \
+	out_dword(HWIO_REG_692991_ADDR, v)
+#define HWIO_REG_692991_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_692991_ADDR, m, v, HWIO_REG_692991_IN);
+#define HWIO_REG_692991_REG_692991_BMSK  0xffffffff
+#define HWIO_REG_692991_REG_692991_SHFT  0
+
+#define HWIO_REG_161740_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002030)
+#define HWIO_REG_161740_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002030)
+#define HWIO_REG_161740_RMSK  0xffffffff
+#define HWIO_REG_161740_SHFT  0
+#define HWIO_REG_161740_IN  in_dword_masked(\
+	HWIO_REG_161740_ADDR, HWIO_REG_161740_RMSK)
+#define HWIO_REG_161740_INM(m) \
+	in_dword_masked(HWIO_REG_161740_ADDR, m)
+#define HWIO_REG_161740_OUT(v) \
+	out_dword(HWIO_REG_161740_ADDR, v)
+#define HWIO_REG_161740_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_161740_ADDR, m, v, HWIO_REG_161740_IN);
+#define HWIO_REG_161740_REG_161740_BMSK  0xffffffff
+#define HWIO_REG_161740_REG_161740_SHFT  0
+
+#define HWIO_REG_930239_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002034)
+#define HWIO_REG_930239_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002034)
+#define HWIO_REG_930239_RMSK  0xffffffff
+#define HWIO_REG_930239_SHFT  0
+#define HWIO_REG_930239_IN  in_dword_masked(\
+	HWIO_REG_930239_ADDR, HWIO_REG_930239_RMSK)
+#define HWIO_REG_930239_INM(m) \
+	in_dword_masked(HWIO_REG_930239_ADDR, m)
+#define HWIO_REG_930239_OUT(v) \
+	out_dword(HWIO_REG_930239_ADDR, v)
+#define HWIO_REG_930239_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_930239_ADDR, m, v, HWIO_REG_930239_IN);
+#define HWIO_REG_930239_REG_930239_BMSK  0xffffffff
+#define HWIO_REG_930239_REG_930239_SHFT  0
+
+#define HWIO_REG_567827_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002038)
+#define HWIO_REG_567827_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002038)
+#define HWIO_REG_567827_RMSK  0xffffffff
+#define HWIO_REG_567827_SHFT  0
+#define HWIO_REG_567827_IN  in_dword_masked(\
+	HWIO_REG_567827_ADDR, HWIO_REG_567827_RMSK)
+#define HWIO_REG_567827_INM(m) \
+	in_dword_masked(HWIO_REG_567827_ADDR, m)
+#define HWIO_REG_567827_OUT(v) \
+	out_dword(HWIO_REG_567827_ADDR, v)
+#define HWIO_REG_567827_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_567827_ADDR, m, v, HWIO_REG_567827_IN);
+#define HWIO_REG_567827_REG_567827_BMSK 0xffffffff
+#define HWIO_REG_567827_REG_567827_SHFT 0
+
+#define HWIO_REG_542997_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000203c)
+#define HWIO_REG_542997_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000203c)
+#define HWIO_REG_542997_RMSK  0xffffffff
+#define HWIO_REG_542997_SHFT  0
+#define HWIO_REG_542997_IN  in_dword_masked(\
+	HWIO_REG_542997_ADDR, HWIO_REG_542997_RMSK)
+#define HWIO_REG_542997_INM(m) \
+	in_dword_masked(HWIO_REG_542997_ADDR, m)
+#define HWIO_REG_542997_OUT(v) \
+	out_dword(HWIO_REG_542997_ADDR, v)
+#define HWIO_REG_542997_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_542997_ADDR, m, v, HWIO_REG_542997_IN);
+#define HWIO_REG_542997_REG_542997_BMSK  0xffffffff
+#define HWIO_REG_542997_REG_542997_SHFT  0
+
+#define HWIO_REG_666957_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002040)
+#define HWIO_REG_666957_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002040)
+#define HWIO_REG_666957_RMSK  0x7ffff
+#define HWIO_REG_666957_SHFT  0
+#define HWIO_REG_666957_IN  in_dword_masked(\
+	HWIO_REG_666957_ADDR, HWIO_REG_666957_RMSK)
+#define HWIO_REG_666957_INM(m) \
+	in_dword_masked(HWIO_REG_666957_ADDR, m)
+#define HWIO_REG_666957_OUT(v) \
+	out_dword(HWIO_REG_666957_ADDR, v)
+#define HWIO_REG_666957_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_666957_ADDR, m, v, HWIO_REG_666957_IN);
+#define HWIO_REG_666957_CH_DEC_TYPE_BMSK  0x70000
+#define HWIO_REG_666957_CH_DEC_TYPE_SHFT  0x10
+#define HWIO_REG_666957_CH_INST_ID_BMSK   0xffff
+#define HWIO_REG_666957_CH_INST_ID_SHFT   0
+
+#define HWIO_REG_117192_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002044)
+#define HWIO_REG_117192_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002044)
+#define HWIO_REG_117192_RMSK  0xffffffff
+#define HWIO_REG_117192_SHFT  0
+#define HWIO_REG_117192_IN  in_dword_masked(\
+	HWIO_REG_117192_ADDR, HWIO_REG_117192_RMSK)
+#define HWIO_REG_117192_INM(m) \
+	in_dword_masked(HWIO_REG_117192_ADDR, m)
+#define HWIO_REG_117192_OUT(v) \
+	out_dword(HWIO_REG_117192_ADDR, v)
+#define HWIO_REG_117192_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_117192_ADDR, m, v, HWIO_REG_117192_IN);
+#define HWIO_REG_117192_REG_117192_BMSK  0xffffffff
+#define HWIO_REG_117192_REG_117192_SHFT  0
+
+#define HWIO_REG_145068_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002048)
+#define HWIO_REG_145068_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002048)
+#define HWIO_REG_145068_RMSK  0xffffffff
+#define HWIO_REG_145068_SHFT  0
+#define HWIO_REG_145068_IN  in_dword_masked(\
+	HWIO_REG_145068_ADDR, HWIO_REG_145068_RMSK)
+#define HWIO_REG_145068_INM(m) \
+	in_dword_masked(HWIO_REG_145068_ADDR, m)
+#define HWIO_REG_145068_OUT(v) \
+	out_dword(HWIO_REG_145068_ADDR, v)
+#define HWIO_REG_145068_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_145068_ADDR, m, v, HWIO_REG_145068_IN);
+#define HWIO_REG_145068_REG_145068_BMSK  0xffffffff
+#define HWIO_REG_145068_REG_145068_SHFT  0
+
+#define HWIO_REG_921356_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000204c)
+#define HWIO_REG_921356_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000204c)
+#define HWIO_REG_921356_RMSK  0xffffffff
+#define HWIO_REG_921356_SHFT  0
+#define HWIO_REG_921356_IN  in_dword_masked(\
+	HWIO_REG_921356_ADDR, HWIO_REG_921356_RMSK)
+#define HWIO_REG_921356_INM(m) \
+	in_dword_masked(HWIO_REG_921356_ADDR, m)
+#define HWIO_REG_921356_OUT(v) \
+	out_dword(HWIO_REG_921356_ADDR, v)
+#define HWIO_REG_921356_OUTM(m, v) out_dword_masked_ns(\
+	HWIO_REG_921356_ADDR, m, v, HWIO_REG_921356_IN);
+#define HWIO_REG_921356_REG_921356_BMSK  0xffffffff
+#define HWIO_REG_921356_REG_921356_SHFT  0
+
+#define HWIO_REG_612810_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002050)
+#define HWIO_REG_612810_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002050)
+#define HWIO_REG_612810_RMSK  0xffffffff
+#define HWIO_REG_612810_SHFT  0
+#define HWIO_REG_612810_IN  in_dword_masked(\
+	HWIO_REG_612810_ADDR, HWIO_REG_612810_RMSK)
+#define HWIO_REG_612810_INM(m) \
+	in_dword_masked(HWIO_REG_612810_ADDR, m)
+#define HWIO_REG_612810_OUT(v) \
+	out_dword(HWIO_REG_612810_ADDR, v)
+#define HWIO_REG_612810_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_612810_ADDR, m, v, HWIO_REG_612810_IN);
+#define HWIO_REG_612810_REG_612810_BMSK  0xffffffff
+#define HWIO_REG_612810_REG_612810_SHFT  0
+
+#define HWIO_REG_175608_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002054)
+#define HWIO_REG_175608_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002054)
+#define HWIO_REG_175608_RMSK  0xffffffff
+#define HWIO_REG_175608_SHFT  0
+#define HWIO_REG_175608_IN  in_dword_masked(\
+	HWIO_REG_175608_ADDR, HWIO_REG_175608_RMSK)
+#define HWIO_REG_175608_INM(m) \
+	in_dword_masked(HWIO_REG_175608_ADDR, m)
+#define HWIO_REG_175608_OUT(v) \
+	out_dword(HWIO_REG_175608_ADDR, v)
+#define HWIO_REG_175608_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_175608_ADDR, m, v, HWIO_REG_175608_IN);
+#define HWIO_REG_175608_REG_175608_BMSK  0xffffffff
+#define HWIO_REG_175608_REG_175608_SHFT  0
+
+#define HWIO_REG_190381_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002058)
+#define HWIO_REG_190381_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002058)
+#define HWIO_REG_190381_RMSK  0xffffffff
+#define HWIO_REG_190381_SHFT  0
+#define HWIO_REG_190381_IN  in_dword_masked(\
+	HWIO_REG_190381_ADDR, HWIO_REG_190381_RMSK)
+#define HWIO_REG_190381_INM(m) \
+	in_dword_masked(HWIO_REG_190381_ADDR, m)
+#define HWIO_REG_190381_OUT(v) \
+	out_dword(HWIO_REG_190381_ADDR, v)
+#define HWIO_REG_190381_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_190381_ADDR, m, v, HWIO_REG_190381_IN);
+#define HWIO_REG_190381_REG_190381_BMSK  0xffffffff
+#define HWIO_REG_190381_REG_190381_SHFT  0
+
+#define HWIO_REG_85655_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000205c)
+#define HWIO_REG_85655_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000205c)
+#define HWIO_REG_85655_RMSK  0xffffffff
+#define HWIO_REG_85655_SHFT  0
+#define HWIO_REG_85655_IN  in_dword_masked(\
+	HWIO_REG_85655_ADDR, HWIO_REG_85655_RMSK)
+#define HWIO_REG_85655_INM(m) \
+	in_dword_masked(HWIO_REG_85655_ADDR, m)
+#define HWIO_REG_85655_OUT(v) \
+	out_dword(HWIO_REG_85655_ADDR, v)
+#define HWIO_REG_85655_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_85655_ADDR, m, v, HWIO_REG_85655_IN);
+#define HWIO_REG_85655_REG_85655_BMSK  0xffffffff
+#define HWIO_REG_85655_REG_85655_SHFT  0
+
+#define HWIO_REG_86830_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002060)
+#define HWIO_REG_86830_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002060)
+#define HWIO_REG_86830_RMSK  0xffffffff
+#define HWIO_REG_86830_SHFT  0
+#define HWIO_REG_86830_IN  in_dword_masked(\
+	HWIO_REG_86830_ADDR, HWIO_REG_86830_RMSK)
+#define HWIO_REG_86830_INM(m) \
+	in_dword_masked(HWIO_REG_86830_ADDR, m)
+#define HWIO_REG_86830_OUT(v) \
+	out_dword(HWIO_REG_86830_ADDR, v)
+#define HWIO_REG_86830_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_86830_ADDR, m, v, HWIO_REG_86830_IN);
+#define HWIO_REG_86830_REG_86830_BMSK  0xffffffff
+#define HWIO_REG_86830_REG_86830_SHFT  0
+
+#define HWIO_REG_889944_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002064)
+#define HWIO_REG_889944_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002064)
+#define HWIO_REG_889944_RMSK  0xffffffff
+#define HWIO_REG_889944_SHFT  0
+#define HWIO_REG_889944_IN  in_dword_masked(\
+	HWIO_REG_889944_ADDR, HWIO_REG_889944_RMSK)
+#define HWIO_REG_889944_INM(m) \
+	in_dword_masked(HWIO_REG_889944_ADDR, m)
+#define HWIO_REG_889944_OUT(v) \
+	out_dword(HWIO_REG_889944_ADDR, v)
+#define HWIO_REG_889944_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_889944_ADDR, m, v, HWIO_REG_889944_IN);
+#define HWIO_REG_889944_HOST_WR_ADDR_BMSK  0xffffffff
+#define HWIO_REG_889944_HOST_WR_ADSR_SHFT  0
+
+#define HWIO_REG_404623_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002068)
+#define HWIO_REG_404623_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002068)
+#define HWIO_REG_404623_RMSK  0xffffffff
+#define HWIO_REG_404623_SHFT  0
+#define HWIO_REG_404623_IN  in_dword_masked(\
+	HWIO_REG_404623_ADDR, HWIO_REG_404623_RMSK)
+#define HWIO_REG_404623_INM(m) \
+	in_dword_masked(HWIO_REG_404623_ADDR, m)
+#define HWIO_REG_404623_OUT(v) \
+	out_dword(HWIO_REG_404623_ADDR, v)
+#define HWIO_REG_404623_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_404623_ADDR, m, v, HWIO_REG_404623_IN);
+#define HWIO_REG_404623_REG_404623_BMSK  0xffffffff
+#define HWIO_REG_404623_REG_404623_SHFT  0
+
+#define HWIO_REG_397087_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000206c)
+#define HWIO_REG_397087_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000206c)
+#define HWIO_REG_397087_RMSK  0xffffffff
+#define HWIO_REG_397087_SHFT  0
+#define HWIO_REG_397087_IN  in_dword_masked(\
+	HWIO_REG_397087_ADDR, HWIO_REG_397087_RMSK)
+#define HWIO_REG_397087_INM(m) \
+	in_dword_masked(HWIO_REG_397087_ADDR, m)
+#define HWIO_REG_397087_OUT(v) \
+	out_dword(HWIO_REG_397087_ADDR, v)
+#define HWIO_REG_397087_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_397087_ADDR, m, v, HWIO_REG_397087_IN);
+#define HWIO_REG_397087_CMD_SEQ_NUM_BMSK  0xffffffff
+#define HWIO_REG_397087_CMD_SEQ_NUM_SHFT  0
+
+#define HWIO_REG_212613_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002070)
+#define HWIO_REG_212613_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002070)
+#define HWIO_REG_212613_RMSK  0xffffffff
+#define HWIO_REG_212613_SHFT  0
+#define HWIO_REG_212613_IN  in_dword_masked(\
+	HWIO_REG_212613_ADDR, HWIO_REG_212613_RMSK)
+#define HWIO_REG_212613_INM(m) \
+	in_dword_masked(HWIO_REG_212613_ADDR, m)
+#define HWIO_REG_212613_OUT(v) \
+	out_dword(HWIO_REG_212613_ADDR, v)
+#define HWIO_REG_212613_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_212613_ADDR, m, v, HWIO_REG_212613_IN);
+#define HWIO_REG_212613_REG_212613_BMSK  0xffffffff
+#define HWIO_REG_212613_REG_212613_SHFT  0
+
+#define HWIO_REG_840123_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002074)
+#define HWIO_REG_840123_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002074)
+#define HWIO_REG_840123_RMSK  0xffffffff
+#define HWIO_REG_840123_SHFT  0
+#define HWIO_REG_840123_IN  in_dword_masked(\
+	HWIO_REG_840123_ADDR, HWIO_REG_840123_RMSK)
+#define HWIO_REG_840123_INM(m) \
+	in_dword_masked(HWIO_REG_840123_ADDR, m)
+#define HWIO_REG_840123_OUT(v) \
+	out_dword(HWIO_REG_840123_ADDR, v)
+#define HWIO_REG_840123_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_840123_ADDR, m, v, HWIO_REG_840123_IN);
+#define HWIO_REG_840123_REG_840123_BMSK  0xffffffff
+#define HWIO_REG_840123_REG_840123_SHFT  0
+
+#define HWIO_REG_520335_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002078)
+#define HWIO_REG_520335_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002078)
+#define HWIO_REG_520335_RMSK  0xffffffff
+#define HWIO_REG_520335_SHFT  0
+#define HWIO_REG_520335_IN  in_dword_masked(\
+	HWIO_REG_520335_ADDR, HWIO_REG_520335_RMSK)
+#define HWIO_REG_520335_INM(m) \
+	in_dword_masked(HWIO_REG_520335_ADDR, m)
+#define HWIO_REG_520335_OUT(v) \
+	out_dword(HWIO_REG_520335_ADDR, v)
+#define HWIO_REG_520335_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_520335_ADDR, m, v, HWIO_REG_520335_IN);
+#define HWIO_REG_520335_REG_196943_BMSK  0xffffffff
+#define HWIO_REG_520335_REG_196943_SHFT  0
+
+#define HWIO_REG_196943_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000207c)
+#define HWIO_REG_196943_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000207c)
+#define HWIO_REG_196943_RMSK  0xffffffff
+#define HWIO_REG_196943_SHFT  0
+#define HWIO_REG_196943_IN  in_dword_masked(\
+	HWIO_REG_196943_ADDR, HWIO_REG_196943_RMSK)
+#define HWIO_REG_196943_INM(m) \
+	in_dword_masked(HWIO_REG_196943_ADDR, m)
+#define HWIO_REG_196943_OUT(v) \
+	out_dword(HWIO_REG_196943_ADDR, v)
+#define HWIO_REG_196943_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_196943_ADDR, m, v, HWIO_REG_196943_IN);
+#define HWIO_REG_196943_REG_196943_BMSK  0xffffffff
+#define HWIO_REG_196943_REG_196943_SHFT   0
+
+#define HWIO_REG_313350_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002080)
+#define HWIO_REG_313350_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002080)
+#define HWIO_REG_313350_RMSK  0x7ffff
+#define HWIO_REG_313350_SHFT  0
+#define HWIO_REG_313350_IN  in_dword_masked(\
+	HWIO_REG_313350_ADDR, HWIO_REG_313350_RMSK)
+#define HWIO_REG_313350_INM(m) \
+	in_dword_masked(HWIO_REG_313350_ADDR, m)
+#define HWIO_REG_313350_OUT(v) \
+	out_dword(HWIO_REG_313350_ADDR, v)
+#define HWIO_REG_313350_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_313350_ADDR, m, v, HWIO_REG_313350_IN);
+#define HWIO_REG_313350_CH_DEC_TYPE_BMSK  0x70000
+#define HWIO_REG_313350_CH_DEC_TYPE_SHFT  0x10
+#define HWIO_REG_313350_CH_INST_ID_BMSK   0xffff
+#define HWIO_REG_313350_CH_INST_ID_SHFT   0
+
+#define HWIO_REG_980194_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002084)
+#define HWIO_REG_980194_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002084)
+#define HWIO_REG_980194_RMSK  0xffffffff
+#define HWIO_REG_980194_SHFT  0
+#define HWIO_REG_980194_IN  in_dword_masked(\
+	HWIO_REG_980194_ADDR, HWIO_REG_980194_RMSK)
+#define HWIO_REG_980194_INM(m) \
+	in_dword_masked(HWIO_REG_980194_ADDR, m)
+#define HWIO_REG_980194_OUT(v) \
+	out_dword(HWIO_REG_980194_ADDR, v)
+#define HWIO_REG_980194_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_980194_ADDR, m, v, HWIO_REG_980194_IN);
+#define HWIO_REG_980194_REG_980194_BMSK  0xffffffff
+#define HWIO_REG_980194_REG_980194_SHFT  0
+
+#define HWIO_REG_936704_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002088)
+#define HWIO_REG_936704_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002088)
+#define HWIO_REG_936704_RMSK  0xffffffff
+#define HWIO_REG_936704_SHFT  0
+#define HWIO_REG_936704_IN  in_dword_masked(\
+	HWIO_REG_936704_ADDR, HWIO_REG_936704_RMSK)
+#define HWIO_REG_936704_INM(m) \
+	in_dword_masked(HWIO_REG_936704_ADDR, m)
+#define HWIO_REG_936704_OUT(v) \
+	out_dword(HWIO_REG_936704_ADDR, v)
+#define HWIO_REG_936704_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_936704_ADDR, m, v, HWIO_REG_936704_IN);
+#define HWIO_REG_936704_REG_936704_BMSK  0xffffffff
+#define HWIO_REG_936704_REG_936704_SHFT  0
+
+#define HWIO_REG_821977_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000208c)
+#define HWIO_REG_821977_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000208c)
+#define HWIO_REG_821977_RMSK  0xffffffff
+#define HWIO_REG_821977_SHFT  0
+#define HWIO_REG_821977_IN  in_dword_masked(\
+	HWIO_REG_821977_ADDR, HWIO_REG_821977_RMSK)
+#define HWIO_REG_821977_INM(m) \
+	in_dword_masked(HWIO_REG_821977_ADDR, m)
+#define HWIO_REG_821977_OUT(v) \
+	out_dword(HWIO_REG_821977_ADDR, v)
+#define HWIO_REG_821977_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_821977_ADDR, m, v, HWIO_REG_821977_IN);
+#define HWIO_REG_821977_REG_821977_BMSK  0xffffffff
+#define HWIO_REG_821977_REG_821977_SHFT  0
+
+#define HWIO_REG_655721_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002090)
+#define HWIO_REG_655721_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002090)
+#define HWIO_REG_655721_RMSK  0xffffffff
+#define HWIO_REG_655721_SHFT  0
+#define HWIO_REG_655721_IN  in_dword_masked(\
+	HWIO_REG_655721_ADDR, HWIO_REG_655721_RMSK)
+#define HWIO_REG_655721_INM(m) \
+	in_dword_masked(HWIO_REG_655721_ADDR, m)
+#define HWIO_REG_655721_OUT(v) \
+	out_dword(HWIO_REG_655721_ADDR, v)
+#define HWIO_REG_655721_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_655721_ADDR, m, v, HWIO_REG_655721_IN);
+#define HWIO_REG_655721_REG_655721_BMSK  0xffffffff
+#define HWIO_REG_655721_REG_655721_SHFT  0
+
+#define HWIO_REG_548308_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002094)
+#define HWIO_REG_548308_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002094)
+#define HWIO_REG_548308_RMSK  0xffffffff
+#define HWIO_REG_548308_SHFT  0
+#define HWIO_REG_548308_IN  in_dword_masked(\
+	HWIO_REG_548308_ADDR, HWIO_REG_548308_RMSK)
+#define HWIO_REG_548308_INM(m) \
+	in_dword_masked(HWIO_REG_548308_ADDR, m)
+#define HWIO_REG_548308_OUT(v) \
+	out_dword(HWIO_REG_548308_ADDR, v)
+#define HWIO_REG_548308_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_548308_ADDR, m, v, HWIO_REG_548308_IN);
+#define HWIO_REG_548308_REG_548308_BMSK  0xffffffff
+#define HWIO_REG_548308_REG_548308_SHFT  0
+
+#define HWIO_REG_887095_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x00002098)
+#define HWIO_REG_887095_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x00002098)
+#define HWIO_REG_887095_RMSK  0xffffffff
+#define HWIO_REG_887095_SHFT  0
+#define HWIO_REG_887095_IN  in_dword_masked(\
+	HWIO_REG_887095_ADDR, HWIO_REG_887095_RMSK)
+#define HWIO_REG_887095_INM(m) \
+	in_dword_masked(HWIO_REG_887095_ADDR, m)
+#define HWIO_REG_887095_OUT(v) \
+	out_dword(HWIO_REG_887095_ADDR, v)
+#define HWIO_REG_887095_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_887095_ADDR, m, v, HWIO_REG_887095_IN);
+#define HWIO_REG_887095_REG_887095_BMSK  0xffffffff
+#define HWIO_REG_887095_REG_887095_SHFT  0
+
+#define HWIO_REG_576987_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000209c)
+#define HWIO_REG_576987_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000209c)
+#define HWIO_REG_576987_RMSK  0xffffffff
+#define HWIO_REG_576987_SHFT 0
+#define HWIO_REG_576987_IN  in_dword_masked(\
+	HWIO_REG_576987_ADDR, HWIO_REG_576987_RMSK)
+#define HWIO_REG_576987_INM(m) \
+	in_dword_masked(HWIO_REG_576987_ADDR, m)
+#define HWIO_REG_576987_OUT(v) \
+	out_dword(HWIO_REG_576987_ADDR, v)
+#define HWIO_REG_576987_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_576987_ADDR, m, v, HWIO_REG_576987_IN);
+#define HWIO_REG_576987_REG_576987_BMSK  0xffffffff
+#define HWIO_REG_576987_REG_576987_SHFT  0
+
+#define HWIO_REG_70448_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020a0)
+#define HWIO_REG_70448_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020a0)
+#define HWIO_REG_70448_RMSK  0xffffffff
+#define HWIO_REG_70448_SHFT  0
+#define HWIO_REG_70448_IN  in_dword_masked(\
+	HWIO_REG_70448_ADDR, HWIO_REG_70448_RMSK)
+#define HWIO_REG_70448_INM(m) \
+	in_dword_masked(HWIO_REG_70448_ADDR, m)
+#define HWIO_REG_70448_OUT(v) \
+	out_dword(HWIO_REG_70448_ADDR, v)
+#define HWIO_REG_70448_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_70448_ADDR, m, v, HWIO_REG_70448_IN);
+#define HWIO_REG_70448_REG_70448_BMSK  0xffffffff
+#define HWIO_REG_70448_REG_70448_SHFT  0
+
+#define HWIO_REG_652528_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020a4)
+#define HWIO_REG_652528_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020a4)
+#define HWIO_REG_652528_RMSK  0xffffffff
+#define HWIO_REG_652528_SHFT  0
+#define HWIO_REG_652528_IN  in_dword_masked(\
+	HWIO_REG_652528_ADDR, HWIO_REG_652528_RMSK)
+#define HWIO_REG_652528_INM(m) \
+	in_dword_masked(HWIO_REG_652528_ADDR, m)
+#define HWIO_REG_652528_OUT(v) \
+	out_dword(HWIO_REG_652528_ADDR, v)
+#define HWIO_REG_652528_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_652528_ADDR, m, v , HWIO_REG_652528_IN);
+#define HWIO_REG_652528_REG_652528_BMSK  0xffffffff
+#define HWIO_REG_652528_REG_652528_SHFT  0
+
+#define HWIO_REG_220637_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020a8)
+#define HWIO_REG_220637_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020a8)
+#define HWIO_REG_220637_RMSK  0xffffffff
+#define HWIO_REG_220637_SHFT  0
+#define HWIO_REG_220637_IN  in_dword_masked(\
+	HWIO_REG_220637_ADDR, HWIO_REG_220637_RMSK)
+#define HWIO_REG_220637_INM(m) \
+	in_dword_masked(HWIO_REG_220637_ADDR, m)
+#define HWIO_REG_220637_OUT(v) \
+	out_dword(HWIO_REG_220637_ADDR, v)
+#define HWIO_REG_220637_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_220637_ADDR, m, v, HWIO_REG_220637_IN);
+#define HWIO_REG_220637_REG_220637_BMSK  0xffffffff
+#define HWIO_REG_220637_REG_220637_SHFT  0
+
+#define HWIO_REG_254093_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020ac)
+#define HWIO_REG_254093_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020ac)
+#define HWIO_REG_254093_RMSK  0xffffffff
+#define HWIO_REG_254093_SHFT  0
+#define HWIO_REG_254093_IN  in_dword_masked(\
+	HWIO_REG_254093_ADDR, HWIO_REG_254093_RMSK)
+#define HWIO_REG_254093_INM(m) \
+	in_dword_masked(HWIO_REG_254093_ADDR, m)
+#define HWIO_REG_254093_OUT(v) \
+	out_dword(HWIO_REG_254093_ADDR, v)
+#define HWIO_REG_254093_OUTM(m, v)  out_dword_masked_ns\
+	(HWIO_REG_254093_ADDR, m, v, HWIO_REG_254093_IN);
+#define HWIO_REG_254093_REG_254093_BMSK  0xffffffff
+#define HWIO_REG_254093_REG_254093_SHFT  0
+
+#define HWIO_REG_160474_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020b0)
+#define HWIO_REG_160474_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020b0)
+#define HWIO_REG_160474_RMSK  0xffffffff
+#define HWIO_REG_160474_SHFT  0
+#define HWIO_REG_160474_IN  in_dword_masked(\
+	HWIO_REG_160474_ADDR, HWIO_REG_160474_RMSK)
+#define HWIO_REG_160474_INM(m) \
+	in_dword_masked(HWIO_REG_160474_ADDR, m)
+#define HWIO_REG_160474_OUT(v) \
+	out_dword(HWIO_REG_160474_ADDR, v)
+#define HWIO_REG_160474_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_160474_ADDR, m, v, HWIO_REG_160474_IN);
+#define HWIO_REG_160474_REG_160474_BMSK  0xffffffff
+#define HWIO_REG_160474_REG_160474_SHFT  0
+
+#define HWIO_REG_39027_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020b4)
+#define HWIO_REG_39027_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020b4)
+#define HWIO_REG_39027_RMSK  0xffffffff
+#define HWIO_REG_39027_SHFT  0
+#define HWIO_REG_39027_IN  in_dword_masked(\
+	HWIO_REG_39027_ADDR, HWIO_REG_39027_RMSK)
+#define HWIO_REG_39027_INM(m) \
+	in_dword_masked(HWIO_REG_39027_ADDR, m)
+#define HWIO_REG_39027_OUT(v) \
+	out_dword(HWIO_REG_39027_ADDR, v)
+#define HWIO_REG_39027_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_39027_ADDR, m, v, HWIO_REG_39027_IN);
+#define HWIO_REG_39027_REG_39027_BMSK  0xffffffff
+#define HWIO_REG_39027_REG_39027_SHFT  0
+
+#define HWIO_REG_74049_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020b8)
+#define HWIO_REG_74049_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020b8)
+#define HWIO_REG_74049_RMSK  0xffffffff
+#define HWIO_REG_74049_SHFT  0
+#define HWIO_REG_74049_IN  in_dword_masked(\
+	HWIO_REG_74049_ADDR, HWIO_REG_74049_RMSK)
+#define HWIO_REG_74049_INM(m) \
+	in_dword_masked(HWIO_REG_74049_ADDR, m)
+#define HWIO_REG_74049_OUT(v) \
+	out_dword(HWIO_REG_74049_ADDR, v)
+#define HWIO_REG_74049_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_74049_ADDR, m, v, HWIO_REG_74049_IN);
+#define HWIO_REG_74049_REG_74049_BMSK  0xffffffff
+#define HWIO_REG_74049_REG_74049_SHFT  0
+
+#define HWIO_REG_697870_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x000020bc)
+#define HWIO_REG_697870_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x000020bc)
+#define HWIO_REG_697870_RMSK  0xffffffff
+#define HWIO_REG_697870_SHFT  0
+#define HWIO_REG_697870_IN  in_dword_masked(\
+	HWIO_REG_697870_ADDR, HWIO_REG_697870_RMSK)
+#define HWIO_REG_697870_INM(m) \
+	in_dword_masked(HWIO_REG_697870_ADDR, m)
+#define HWIO_REG_697870_OUT(v) \
+	out_dword(HWIO_REG_697870_ADDR, v)
+#define HWIO_REG_697870_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_697870_ADDR, m, v, HWIO_REG_697870_IN);
+#define HWIO_REG_697870_REG_697870_BMSK  0xffffffff
+#define HWIO_REG_697870_REG_697870_SHFT  0
+
+#define HWIO_REG_783891_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c504)
+#define HWIO_REG_783891_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c504)
+#define HWIO_REG_783891_RMSK  0x7ffff
+#define HWIO_REG_783891_SHFT  0
+#define HWIO_REG_783891_IN  in_dword_masked(\
+	HWIO_REG_783891_ADDR, HWIO_REG_783891_RMSK)
+#define HWIO_REG_783891_INM(m) \
+	in_dword_masked(HWIO_REG_783891_ADDR, m)
+#define HWIO_REG_783891_OUT(v) \
+	out_dword(HWIO_REG_783891_ADDR, v)
+#define HWIO_REG_783891_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_783891_ADDR, m, v, HWIO_REG_783891_IN);
+#define HWIO_REG_783891_ENC_PIC_TYPE_USE_BMSK  0x40000
+#define HWIO_REG_783891_ENC_PIC_TYPE_USE_SHFT  0x12
+#define HWIO_REG_783891_B_FRM_CTRL_BMSK        0x30000
+#define HWIO_REG_783891_B_FRM_CTRL_SHFT        0x10
+#define HWIO_REG_783891_I_FRM_CTRL_BMSK        0xffff
+#define HWIO_REG_783891_I_FRM_CTRL_SHFT        0
+
+#define HWIO_REG_226332_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c50c)
+#define HWIO_REG_226332_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c50c)
+#define HWIO_REG_226332_RMSK  0x7
+#define HWIO_REG_226332_SHFT  0
+#define HWIO_REG_226332_IN  in_dword_masked(\
+	HWIO_REG_226332_ADDR, HWIO_REG_226332_RMSK)
+#define HWIO_REG_226332_INM(m)  in_dword_masked(HWIO_REG_226332_ADDR, m)
+#define HWIO_REG_226332_OUT(v)  out_dword(HWIO_REG_226332_ADDR, v)
+#define HWIO_REG_226332_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_226332_ADDR, m, v, HWIO_REG_226332_IN);
+#define HWIO_REG_226332_MSLICE_MODE_BMSK  0x6
+#define HWIO_REG_226332_MSLICE_MODE_SHFT  0x1
+#define HWIO_REG_226332_MSLICE_ENA_BMSK   0x1
+#define HWIO_REG_226332_MSLICE_ENA_SHFT   0
+
+#define HWIO_REG_696136_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c510)
+#define HWIO_REG_696136_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c510)
+#define HWIO_REG_696136_RMSK  0xffff
+#define HWIO_REG_696136_SHFT  0
+#define HWIO_REG_696136_IN  in_dword_masked(\
+	HWIO_REG_696136_ADDR, HWIO_REG_696136_RMSK)
+#define HWIO_REG_696136_INM(m)  in_dword_masked(HWIO_REG_696136_ADDR, m)
+#define HWIO_REG_696136_OUT(v)  out_dword(HWIO_REG_696136_ADDR, v)
+#define HWIO_REG_696136_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_696136_ADDR, m, v, HWIO_REG_696136_IN);
+#define HWIO_REG_696136_MSLICE_MB_BMSK  0xffff
+#define HWIO_REG_696136_MSLICE_MB_SHFT  0
+
+#define HWIO_REG_515564_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c514)
+#define HWIO_REG_515564_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c514)
+#define HWIO_REG_515564_RMSK  0xffffffff
+#define HWIO_REG_515564_SHFT  0
+#define HWIO_REG_515564_IN  in_dword_masked(\
+	HWIO_REG_515564_ADDR, HWIO_REG_515564_RMSK)
+#define HWIO_REG_515564_INM(m) \
+	in_dword_masked(HWIO_REG_515564_ADDR, m)
+#define HWIO_REG_515564_OUT(v)  out_dword(HWIO_REG_515564_ADDR, v)
+#define HWIO_REG_515564_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_515564_ADDR, m, v, HWIO_REG_515564_IN);
+#define HWIO_REG_515564_MSLICE_BIT_BMSK  0xffffffff
+#define HWIO_REG_515564_MSLICE_BIT_SHFT  0
+
+#define HWIO_REG_886210_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c518)
+#define HWIO_REG_886210_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c518)
+#define HWIO_REG_886210_RMSK  0xffff
+#define HWIO_REG_886210_SHFT  0
+#define HWIO_REG_886210_IN  in_dword_masked(\
+	HWIO_REG_886210_ADDR, HWIO_REG_886210_RMSK)
+#define HWIO_REG_886210_INM(m)  in_dword_masked(HWIO_REG_886210_ADDR, m)
+#define HWIO_REG_886210_OUT(v)  out_dword(HWIO_REG_886210_ADDR, v)
+#define HWIO_REG_886210_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_886210_ADDR, m, v, HWIO_REG_886210_IN);
+#define HWIO_REG_886210_CIR_NUM_BMSK  0xffff
+#define HWIO_REG_886210_CIR_NUM_SHFT  0
+
+#define HWIO_REG_645603_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c51c)
+#define HWIO_REG_645603_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c51c)
+#define HWIO_REG_645603_RMSK  0x3
+#define HWIO_REG_645603_SHFT  0
+#define HWIO_REG_645603_IN  in_dword_masked(\
+	HWIO_REG_645603_ADDR, HWIO_REG_645603_RMSK)
+#define HWIO_REG_645603_INM(m) \
+	in_dword_masked(HWIO_REG_645603_ADDR, m)
+#define HWIO_REG_645603_OUT(v)  out_dword(HWIO_REG_645603_ADDR, v)
+#define HWIO_REG_645603_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_645603_ADDR, m, v, HWIO_REG_645603_IN);
+#define HWIO_REG_645603_REG_645603_BMSK  0x3
+#define HWIO_REG_645603_REG_645603_SHFT  0
+
+#define HWIO_REG_811733_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c520)
+#define HWIO_REG_811733_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c520)
+#define HWIO_REG_811733_RMSK  0x80ffffff
+#define HWIO_REG_811733_SHFT  0
+#define HWIO_REG_811733_IN  in_dword_masked(\
+	HWIO_REG_811733_ADDR, HWIO_REG_811733_RMSK)
+#define HWIO_REG_811733_INM(m) \
+	in_dword_masked(HWIO_REG_811733_ADDR, m)
+#define HWIO_REG_811733_OUT(v)  out_dword(HWIO_REG_811733_ADDR, v)
+#define HWIO_REG_811733_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_811733_ADDR, m, v, HWIO_REG_811733_IN);
+#define HWIO_REG_811733_PAD_CTRL_ON_BMSK    0x80000000
+#define HWIO_REG_811733_PAD_CTRL_ON_SHFT    0x1f
+#define HWIO_REG_811733_CR_PAD_VIDC_BMSK    0xff0000
+#define HWIO_REG_811733_CR_PAD_VIDC_SHFT    0x10
+#define HWIO_REG_811733_CB_PAD_VIDC_BMSK    0xff00
+#define HWIO_REG_811733_CB_PAD_VIDC_SHFT    0x8
+#define HWIO_REG_811733_LUMA_PAD_VIDC_BMSK  0xff
+#define HWIO_REG_811733_LUMA_PAD_VIDC_SHFT  0
+
+#define HWIO_REG_676866_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c588)
+#define HWIO_REG_676866_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c588)
+#define HWIO_REG_676866_RMSK  0xffff
+#define HWIO_REG_676866_SHFT  0
+#define HWIO_REG_676866_IN  in_dword_masked(\
+	HWIO_REG_676866_ADDR, HWIO_REG_676866_RMSK)
+#define HWIO_REG_676866_INM(m) \
+	in_dword_masked(HWIO_REG_676866_ADDR, m)
+#define HWIO_REG_676866_OUT(v) \
+	out_dword(HWIO_REG_676866_ADDR, v)
+#define HWIO_REG_676866_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_676866_ADDR, m, v, HWIO_REG_676866_IN);
+#define HWIO_REG_676866_REG_676866_BMSK  0xffff
+#define HWIO_REG_676866_REG_676866_SHFT  0
+
+#define HWIO_REG_54267_ADDR \
+	(VIDC_BLACKBIRD_REG_BASE + 0x0000c58c)
+#define HWIO_REG_54267_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c58c)
+#define HWIO_REG_54267_RMSK  0xffff
+#define HWIO_REG_54267_SHFT  0
+#define HWIO_REG_54267_IN  in_dword_masked(\
+	HWIO_REG_54267_ADDR,\
+	HWIO_REG_54267_RMSK)
+#define HWIO_REG_54267_INM(m) \
+	in_dword_masked(HWIO_REG_54267_ADDR, m)
+#define HWIO_REG_54267_OUT(v) \
+	out_dword(HWIO_REG_54267_ADDR, v)
+#define HWIO_REG_54267_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_54267_ADDR, m, v,\
+	HWIO_REG_54267_IN);
+#define HWIO_REG_54267_REG_54267_BMSK  0xffff
+#define HWIO_REG_54267_REG_54267_SHFT  0
+
+#define HWIO_REG_559908_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c5a0)
+#define HWIO_REG_559908_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c5a0)
+#define HWIO_REG_559908_RMSK  0x33f
+#define HWIO_REG_559908_SHFT  0
+#define HWIO_REG_559908_IN  in_dword_masked(\
+	HWIO_REG_559908_ADDR, HWIO_REG_559908_RMSK)
+#define HWIO_REG_559908_INM(m)  in_dword_masked(HWIO_REG_559908_ADDR, m)
+#define HWIO_REG_559908_OUT(v)  out_dword(HWIO_REG_559908_ADDR, v)
+#define HWIO_REG_559908_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_559908_ADDR, m, v, HWIO_REG_559908_IN);
+#define HWIO_REG_559908_FR_RC_EN_BMSK  0x200
+#define HWIO_REG_559908_FR_RC_EN_SHFT  0x9
+#define HWIO_REG_559908_MB_RC_EN_BMSK  0x100
+#define HWIO_REG_559908_MB_RC_EN_SHFT  0x8
+#define HWIO_REG_559908_FRAME_QP_BMSK  0x3f
+#define HWIO_REG_559908_FRAME_QP_SHFT  0
+
+#define HWIO_REG_977937_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000d0d0)
+#define HWIO_REG_977937_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000d0d0)
+#define HWIO_REG_977937_RMSK  0xff
+#define HWIO_REG_977937_SHFT  0
+#define HWIO_REG_977937_IN  in_dword_masked(\
+	HWIO_REG_977937_ADDR, HWIO_REG_977937_RMSK)
+#define HWIO_REG_977937_INM(m)  in_dword_masked(HWIO_REG_977937_ADDR, m)
+#define HWIO_REG_977937_OUT(v)  out_dword(HWIO_REG_977937_ADDR, v)
+#define HWIO_REG_977937_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_977937_ADDR, m, v, HWIO_REG_977937_IN);
+#define HWIO_REG_977937_FRAME_RATE_BMSK  0xff
+#define HWIO_REG_977937_FRAME_RATE_SHFT  0
+
+#define HWIO_REG_166135_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c5a8)
+#define HWIO_REG_166135_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c5a8)
+#define HWIO_REG_166135_RMSK  0xffffffff
+#define HWIO_REG_166135_SHFT  0
+#define HWIO_REG_166135_IN  in_dword_masked(\
+	HWIO_REG_166135_ADDR, HWIO_REG_166135_RMSK)
+#define HWIO_REG_166135_INM(m)  in_dword_masked(HWIO_REG_166135_ADDR, m)
+#define HWIO_REG_166135_OUT(v)  out_dword(HWIO_REG_166135_ADDR, v)
+#define HWIO_REG_166135_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_166135_ADDR, m, v, HWIO_REG_166135_IN);
+#define HWIO_REG_166135_BIT_RATE_BMSK  0xffffffff
+#define HWIO_REG_166135_BIT_RATE_SHFT  0
+
+#define HWIO_REG_109072_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c5ac)
+#define HWIO_REG_109072_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c5ac)
+#define HWIO_REG_109072_RMSK  0x3fff
+#define HWIO_REG_109072_SHFT  0
+#define HWIO_REG_109072_IN  in_dword_masked(\
+	HWIO_REG_109072_ADDR, HWIO_REG_109072_RMSK)
+#define HWIO_REG_109072_INM(m)  in_dword_masked(HWIO_REG_109072_ADDR, m)
+#define HWIO_REG_109072_OUT(v)  out_dword(HWIO_REG_109072_ADDR, v)
+#define HWIO_REG_109072_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_109072_ADDR, m, v, HWIO_REG_109072_IN);
+#define HWIO_REG_109072_MAX_QP_BMSK  0x3f00
+#define HWIO_REG_109072_MAX_QP_SHFT  0x8
+#define HWIO_REG_109072_MIN_QP_BMSK  0x3f
+#define HWIO_REG_109072_MIN_QP_SHFT  0
+
+#define HWIO_REG_550322_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c5b0)
+#define HWIO_REG_550322_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c5b0)
+#define HWIO_REG_550322_RMSK  0xffff
+#define HWIO_REG_550322_SHFT  0
+#define HWIO_REG_550322_IN  in_dword_masked(\
+	HWIO_REG_550322_ADDR, HWIO_REG_550322_RMSK)
+#define HWIO_REG_550322_INM(m)  in_dword_masked(HWIO_REG_550322_ADDR, m)
+#define HWIO_REG_550322_OUT(v)  out_dword(HWIO_REG_550322_ADDR, v)
+#define HWIO_REG_550322_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_550322_ADDR, m, v, HWIO_REG_550322_IN);
+#define HWIO_REG_550322_REACT_PARA_BMSK  0xffff
+#define HWIO_REG_550322_REACT_PARA_SHFT  0
+
+#define HWIO_REG_949086_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c5b4)
+#define HWIO_REG_949086_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c5b4)
+#define HWIO_REG_949086_RMSK  0xf
+#define HWIO_REG_949086_SHFT  0
+#define HWIO_REG_949086_IN  in_dword_masked(\
+	HWIO_REG_949086_ADDR, HWIO_REG_949086_RMSK)
+#define HWIO_REG_949086_INM(m)  in_dword_masked(HWIO_REG_949086_ADDR, m)
+#define HWIO_REG_949086_OUT(v)  out_dword(HWIO_REG_949086_ADDR, v)
+#define HWIO_REG_949086_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_949086_ADDR, m, v, HWIO_REG_949086_IN);
+#define HWIO_REG_949086_DARK_DISABLE_BMSK    0x8
+#define HWIO_REG_949086_DARK_DISABLE_SHFT    0x3
+#define HWIO_REG_949086_SMOOTH_DISABLE_BMSK  0x4
+#define HWIO_REG_949086_SMOOTH_DISABLE_SHFT  0x2
+#define HWIO_REG_949086_STATIC_DISABLE_BMSK  0x2
+#define HWIO_REG_949086_STATIC_DISABLE_SHFT  0x1
+#define HWIO_REG_949086_ACT_DISABLE_BMSK     0x1
+#define HWIO_REG_949086_ACT_DISABLE_SHFT     0
+
+#define HWIO_REG_447796_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000d004)
+#define HWIO_REG_447796_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000d004)
+#define HWIO_REG_447796_RMSK  0x1
+#define HWIO_REG_447796_SHFT  0
+#define HWIO_REG_447796_IN  in_dword_masked(\
+	HWIO_REG_447796_ADDR, HWIO_REG_447796_RMSK)
+#define HWIO_REG_447796_INM(m) \
+	in_dword_masked(HWIO_REG_447796_ADDR, m)
+#define HWIO_REG_447796_OUT(v) \
+	out_dword(HWIO_REG_447796_ADDR, v)
+#define HWIO_REG_447796_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_447796_ADDR, m, v, HWIO_REG_447796_IN);
+#define HWIO_REG_447796_REG_447796_BMSK  0x1
+#define HWIO_REG_447796_REG_447796_SHFT  0
+
+#define HWIO_REG_744348_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000d010)
+#define HWIO_REG_744348_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000d010)
+#define HWIO_REG_744348_RMSK  0x7f
+#define HWIO_REG_744348_SHFT  0
+#define HWIO_REG_744348_IN  in_dword_masked(\
+	HWIO_REG_744348_ADDR, HWIO_REG_744348_RMSK)
+#define HWIO_REG_744348_INM(m) \
+	in_dword_masked(HWIO_REG_744348_ADDR, m)
+#define HWIO_REG_744348_OUT(v)  \
+	out_dword(HWIO_REG_744348_ADDR, v)
+#define HWIO_REG_744348_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_744348_ADDR, m, v, HWIO_REG_744348_IN);
+#define HWIO_REG_744348_P_BMSK  0x60
+#define HWIO_REG_744348_P_SHFT  0x5
+#define HWIO_REG_744348_BMSK  0x1f
+#define HWIO_REG_744348_SHFT  0
+
+#define HWIO_REG_672163_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000d034)
+#define HWIO_REG_672163_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000d034)
+#define HWIO_REG_672163_RMSK  0x1
+#define HWIO_REG_672163_SHFT  0
+#define HWIO_REG_672163_IN  in_dword_masked(\
+	HWIO_REG_672163_ADDR, HWIO_REG_672163_RMSK)
+#define HWIO_REG_672163_INM(m) \
+	in_dword_masked(HWIO_REG_672163_ADDR, m)
+#define HWIO_REG_672163_OUT(v) \
+	out_dword(HWIO_REG_672163_ADDR, v)
+#define HWIO_REG_672163_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_672163_ADDR, m, v,\
+	HWIO_REG_672163_IN);
+#define HWIO_REG_672163_ENC_TRANS_8X8_FLAG_BMSK  0x1
+#define HWIO_REG_672163_ENC_TRANS_8X8_FLAG_SHFT  0
+
+#define HWIO_REG_780908_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000d140)
+#define HWIO_REG_780908_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000d140)
+#define HWIO_REG_780908_RMSK  0x1
+#define HWIO_REG_780908_SHFT  0
+#define HWIO_REG_780908_IN  in_dword_masked(\
+	HWIO_REG_780908_ADDR, HWIO_REG_780908_RMSK)
+#define HWIO_REG_780908_INM(m)  in_dword_masked(\
+	HWIO_REG_780908_ADDR, m)
+#define HWIO_REG_780908_OUT(v)  out_dword(\
+	HWIO_REG_780908_ADDR, v)
+#define HWIO_REG_780908_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_780908_ADDR, m, v,\
+	HWIO_REG_780908_IN)
+#define HWIO_REG_780908_REG_780908_BMSK  0x1
+#define HWIO_REG_780908_REG_780908_SHFT  0
+
+#define HWIO_REG_330132_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000e008)
+#define HWIO_REG_330132_PHYS \
+	(VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000e008)
+#define HWIO_REG_330132_RMSK  0x1
+#define HWIO_REG_330132_SHFT  0
+#define HWIO_REG_330132_IN  in_dword_masked(\
+	HWIO_REG_330132_ADDR, HWIO_REG_330132_RMSK)
+#define HWIO_REG_330132_INM(m) \
+	in_dword_masked(HWIO_REG_330132_ADDR, m)
+#define HWIO_REG_330132_OUT(v) \
+	out_dword(HWIO_REG_330132_ADDR, v)
+#define HWIO_REG_330132_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_330132_ADDR, m, v, HWIO_REG_330132_IN);
+#define HWIO_REG_330132_MPEG4_QUART_PXL_BMSK  0x1
+#define HWIO_REG_330132_MPEG4_QUART_PXL_SHFT  0
+
+
+#define VIDC_MGEN2MAXI_REG_BASE (VIDC_BASE + 0x00080000)
+#define VIDC_MGEN2MAXI_REG_BASE_PHYS 0x04480000
+
+#define HWIO_REG_916352_ADDR (VIDC_MGEN2MAXI_REG_BASE + 00000000)
+#define HWIO_REG_916352_PHYS (VIDC_MGEN2MAXI_REG_BASE_PHYS + 00000000)
+#define HWIO_REG_916352_RMSK  0xff
+#define HWIO_REG_916352_SHFT  0
+#define HWIO_REG_916352_IN  in_dword_masked(\
+	HWIO_REG_916352_ADDR, HWIO_REG_916352_RMSK)
+#define HWIO_REG_916352_INM(m) \
+	in_dword_masked(HWIO_REG_916352_ADDR, m)
+#define HWIO_REG_916352_VERSION_BMSK  0xff
+#define HWIO_REG_916352_VERSION_SHFT  0
+
+#define HWIO_REG_5519_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000004)
+#define HWIO_REG_5519_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000004)
+#define HWIO_REG_5519_RMSK  0x1
+#define HWIO_REG_5519_SHFT  0
+#define HWIO_REG_5519_IN  in_dword_masked(\
+	HWIO_REG_5519_ADDR, HWIO_REG_5519_RMSK)
+#define HWIO_REG_5519_INM(m) \
+	in_dword_masked(HWIO_REG_5519_ADDR, m)
+#define HWIO_REG_5519_OUT(v) \
+	out_dword(HWIO_REG_5519_ADDR, v)
+#define HWIO_REG_5519_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_5519_ADDR, m, v, HWIO_REG_5519_IN);
+#define HWIO_REG_5519_AXI_AOOORD_BMSK  0x1
+#define HWIO_REG_5519_AXI_AOOORD_SHFT  0
+
+#define HWIO_REG_606364_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000008)
+#define HWIO_REG_606364_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_REG_606364_RMSK  0x1
+#define HWIO_REG_606364_SHFT  0
+#define HWIO_REG_606364_IN  in_dword_masked(\
+	HWIO_REG_606364_ADDR, HWIO_REG_606364_RMSK)
+#define HWIO_REG_606364_INM(m) \
+	in_dword_masked(HWIO_REG_606364_ADDR, m)
+#define HWIO_REG_606364_OUT(v) \
+	out_dword(HWIO_REG_606364_ADDR, v)
+#define HWIO_REG_606364_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_606364_ADDR, m, v, HWIO_REG_606364_IN);
+#define HWIO_REG_606364_AXI_AOOOWR_BMSK  0x1
+#define HWIO_REG_606364_AXI_AOOOWR_SHFT  0
+
+#define HWIO_REG_821472_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x0000000c)
+#define HWIO_REG_821472_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x0000000c)
+#define HWIO_REG_821472_RMSK  0xf
+#define HWIO_REG_821472_SHFT  0
+#define HWIO_REG_821472_IN  in_dword_masked(\
+	HWIO_REG_821472_ADDR, HWIO_REG_821472_RMSK)
+#define HWIO_REG_821472_INM(m) \
+	in_dword_masked(HWIO_REG_821472_ADDR, m)
+#define HWIO_REG_821472_OUT(v) \
+	out_dword(HWIO_REG_821472_ADDR, v)
+#define HWIO_REG_821472_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_821472_ADDR, m, v, HWIO_REG_821472_IN);
+#define HWIO_REG_821472_AXI_TYPE_BMSK  0xf
+#define HWIO_REG_821472_AXI_TYPE_SHFT  0
+
+#define HWIO_REG_988424_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE  + 0x00000010)
+#define HWIO_REG_988424_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000010)
+#define HWIO_REG_988424_RMSK  0x3
+#define HWIO_REG_988424_SHFT  0
+#define HWIO_REG_988424_IN  in_dword_masked(\
+	HWIO_REG_988424_ADDR,\
+	HWIO_REG_988424_RMSK)
+#define HWIO_REG_988424_INM(m) \
+	in_dword_masked(HWIO_REG_988424_ADDR, m)
+#define HWIO_REG_988424_OUT(v) \
+	out_dword(HWIO_REG_988424_ADDR, v)
+#define HWIO_REG_988424_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_988424_ADDR, m, v,\
+	HWIO_REG_988424_IN);
+#define HWIO_REG_988424_AXI_AREQPRIORITY_BMSK  0x3
+#define HWIO_REG_988424_AXI_AREQPRIORITY_SHFT  0
+
+#define HWIO_REG_471159_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000014)
+#define HWIO_REG_471159_PHYS (VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000014)
+#define HWIO_REG_471159_RMSK  0x801f1111
+#define HWIO_REG_471159_SHFT  0
+#define HWIO_REG_471159_IN  in_dword_masked(\
+	HWIO_REG_471159_ADDR, HWIO_REG_471159_RMSK)
+#define HWIO_REG_471159_INM(m) \
+	in_dword_masked(HWIO_REG_471159_ADDR, m)
+#define HWIO_REG_471159_OUT(v) \
+	out_dword(HWIO_REG_471159_ADDR, v)
+#define HWIO_REG_471159_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_471159_ADDR, m, v, HWIO_REG_471159_IN);
+#define HWIO_REG_471159_AXI_INTR_CLR_BMSK           0x80000000
+#define HWIO_REG_471159_AXI_INTR_CLR_SHFT           0x1f
+#define HWIO_REG_471159_AXI_WDTIMEOUT_LOG2_BMSK     0x1e0000
+#define HWIO_REG_471159_AXI_WDTIMEOUT_LOG2_SHFT     0x11
+#define HWIO_REG_471159_AXI_HALT_ON_WDTIMEOUT_BMSK  0x10000
+#define HWIO_REG_471159_AXI_HALT_ON_WDTIMEOUT_SHFT  0x10
+#define HWIO_REG_471159_AXI_HALT_ON_WR_ERR_BMSK     0x1000
+#define HWIO_REG_471159_AXI_HALT_ON_WR_ERR_SHFT     0xc
+#define HWIO_REG_471159_AXI_HALT_ON_RD_ERR_BMSK     0x100
+#define HWIO_REG_471159_AXI_HALT_ON_RD_ERR_SHFT     0x8
+#define HWIO_REG_471159_AXI_RESET_BMSK              0x10
+#define HWIO_REG_471159_AXI_RESET_SHFT              0x4
+#define HWIO_REG_471159_AXI_HALT_REQ_BMSK           0x1
+#define HWIO_REG_471159_AXI_HALT_REQ_SHFT            0
+
+#define HWIO_REG_437878_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000018)
+#define HWIO_REG_437878_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_REG_437878_RMSK  0x3333
+#define HWIO_REG_437878_SHFT  0
+#define HWIO_REG_437878_IN  in_dword_masked(\
+	HWIO_REG_437878_ADDR, HWIO_REG_437878_RMSK)
+#define HWIO_REG_437878_INM(m) \
+	in_dword_masked(HWIO_REG_437878_ADDR, m)
+#define HWIO_REG_437878_AXI_WDTIMEOUT_INTR_BMSK  0x3000
+#define HWIO_REG_437878_AXI_WDTIMEOUT_INTR_SHFT  0xc
+#define HWIO_REG_437878_AXI_ERR_INTR_BMSK        0x300
+#define HWIO_REG_437878_AXI_ERR_INTR_SHFT        0x8
+#define HWIO_REG_437878_AXI_IDLE_BMSK            0x30
+#define HWIO_REG_437878_AXI_IDLE_SHFT            0x4
+#define HWIO_REG_437878_AXI_HALT_ACK_BMSK        0x3
+#define HWIO_REG_437878_AXI_HALT_ACK_SHFT        0
+
+#define HWIO_REG_736158_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x0000001c)
+#define HWIO_REG_736158_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_REG_736158_RMSK  0x10fff
+#define HWIO_REG_736158_SHFT  0
+#define HWIO_REG_736158_IN  in_dword_masked(\
+	HWIO_REG_736158_ADDR,\
+	HWIO_REG_736158_RMSK)
+#define HWIO_REG_736158_INM(m) \
+	in_dword_masked(HWIO_REG_736158_ADDR, m)
+#define HWIO_REG_736158_AXI_WDTIMEOUT_BMSK  0x10000
+#define HWIO_REG_736158_AXI_WDTIMEOUT_SHFT  0x10
+#define HWIO_REG_736158_AXI_ERR_BMSK        0x800
+#define HWIO_REG_736158_AXI_ERR_SHFT        0xb
+#define HWIO_REG_736158_AXI_ERR_TYPE_BMSK   0x400
+#define HWIO_REG_736158_AXI_ERR_TYPE_SHFT   0xa
+#define HWIO_REG_736158_AXI_RESP_BMSK       0x300
+#define HWIO_REG_736158_AXI_RESP_SHFT       0x8
+#define HWIO_REG_736158_AXI_MID_BMSK        0xf0
+#define HWIO_REG_736158_AXI_MID_SHFT        0x4
+#define HWIO_REG_736158_AXI_TID_BMSK        0xf
+#define HWIO_REG_736158_AXI_TID_SHFT        0
+
+#define HWIO_REG_598415_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x00000020)
+#define HWIO_REG_598415_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_REG_598415_RMSK  0x10fff
+#define HWIO_REG_598415_SHFT  0
+#define HWIO_REG_598415_IN  in_dword_masked(\
+	HWIO_REG_598415_ADDR,\
+	HWIO_REG_598415_RMSK)
+#define HWIO_REG_598415_INM(m) \
+	in_dword_masked(HWIO_REG_598415_ADDR, m)
+#define HWIO_REG_598415_AXI_WDTIMEOUT_BMSK  0x10000
+#define HWIO_REG_598415_AXI_WDTIMEOUT_SHFT  0x10
+#define HWIO_REG_598415_AXI_ERR_BMSK        0x800
+#define HWIO_REG_598415_AXI_ERR_SHFT        0xb
+#define HWIO_REG_598415_AXI_ERR_TYPE_BMSK   0x400
+#define HWIO_REG_598415_AXI_ERR_TYPE_SHFT   0xa
+#define HWIO_REG_598415_AXI_RESP_BMSK       0x300
+#define HWIO_REG_598415_AXI_RESP_SHFT       0x8
+#define HWIO_REG_598415_AXI_MID_BMSK        0xf0
+#define HWIO_REG_598415_AXI_MID_SHFT        0x4
+#define HWIO_REG_598415_AXI_TID_BMSK        0xf
+#define HWIO_REG_598415_AXI_TID_SHFT        0
+
+#define HWIO_REG_439061_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000024)
+#define HWIO_REG_439061_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000024)
+#define HWIO_REG_439061_RMSK  0x11111ff
+#define HWIO_REG_439061_SHFT  0
+#define HWIO_REG_439061_IN  in_dword_masked(\
+	HWIO_REG_439061_ADDR, HWIO_REG_439061_RMSK)
+#define HWIO_REG_439061_INM(m) \
+	in_dword_masked(HWIO_REG_439061_ADDR, m)
+#define HWIO_REG_439061_OUT(v) \
+	out_dword(HWIO_REG_439061_ADDR, v)
+#define HWIO_REG_439061_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_439061_ADDR, m, v, HWIO_REG_439061_IN);
+#define HWIO_REG_439061_AXI_RD_LAT_REP_EN_BMSK  0x1000000
+#define HWIO_REG_439061_AXI_RD_LAT_REP_EN_SHFT  0x18
+#define HWIO_REG_439061_AXI_LSFR_EN_BMSK        0x100000
+#define HWIO_REG_439061_AXI_LSFR_EN_SHFT        0x14
+#define HWIO_REG_439061_AXI_MISR_RES_BMSK       0x10000
+#define HWIO_REG_439061_AXI_MISR_RES_SHFT       0x10
+#define HWIO_REG_439061_AXI_MISR_EN_BMSK        0x1000
+#define HWIO_REG_439061_AXI_MISR_EN_SHFT        0xc
+#define HWIO_REG_439061_AXI_MISR_WD_BMSK        0x100
+#define HWIO_REG_439061_AXI_MISR_WD_SHFT        0x8
+#define HWIO_REG_439061_AXI_CTR_EN_BMSK         0x80
+#define HWIO_REG_439061_AXI_CTR_EN_SHFT         0x7
+#define HWIO_REG_439061_AXI_CTR_RES_BMSK        0x40
+#define HWIO_REG_439061_AXI_CTR_RES_SHFT        0x6
+#define HWIO_REG_439061_AXI_TEST_ARB_SEL_BMSK   0x30
+#define HWIO_REG_439061_AXI_TEST_ARB_SEL_SHFT   0x4
+#define HWIO_REG_439061_AXI_TEST_OUT_SEL_BMSK   0xf
+#define HWIO_REG_439061_AXI_TEST_OUT_SEL_SHFT   0
+
+#define HWIO_REG_573121_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x00000028)
+#define HWIO_REG_573121_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000028)
+#define HWIO_REG_573121_RMSK  0xffffffff
+#define HWIO_REG_573121_SHFT  0
+#define HWIO_REG_573121_IN  in_dword_masked(\
+	HWIO_REG_573121_ADDR,\
+	HWIO_REG_573121_RMSK)
+#define HWIO_REG_573121_INM(m) \
+	in_dword_masked(HWIO_REG_573121_ADDR, m)
+#define HWIO_REG_573121_AXI_TEST_OUT_BMSK  0xffffffff
+#define HWIO_REG_573121_AXI_TEST_OUT_SHFT  0
+
+#define HWIO_REG_806413_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x0000002c)
+#define HWIO_REG_806413_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x0000002c)
+#define HWIO_REG_806413_RMSK  0xffffffff
+#define HWIO_REG_806413_SHFT  0
+#define HWIO_REG_806413_IN  in_dword_masked(\
+	HWIO_REG_806413_ADDR,\
+	HWIO_REG_806413_RMSK)
+#define HWIO_REG_806413_INM(m) \
+	in_dword_masked(HWIO_REG_806413_ADDR, m)
+#define HWIO_REG_806413_AXI_TEST_OUT_BMSK 0xffffffff
+#define HWIO_REG_806413_AXI_TEST_OUT_SHFT 0
+
+#define HWIO_REG_804110_ADDR (VIDC_MGEN2MAXI_REG_BASE + 0x00000030)
+#define HWIO_REG_804110_PHYS (VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000030)
+#define HWIO_REG_804110_RMSK  0xc00fffff
+#define HWIO_REG_804110_SHFT  0
+#define HWIO_REG_804110_IN  in_dword_masked(\
+	HWIO_REG_804110_ADDR, HWIO_REG_804110_RMSK)
+#define HWIO_REG_804110_INM(m) \
+	in_dword_masked(HWIO_REG_804110_ADDR, m)
+#define HWIO_REG_804110_OUT(v)  out_dword(HWIO_REG_804110_ADDR, v)
+#define HWIO_REG_804110_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_804110_ADDR, m, v, HWIO_REG_804110_IN);
+#define HWIO_REG_804110_ENABLE_BMSK                 0x80000000
+#define HWIO_REG_804110_ENABLE_SHFT                 0x1f
+#define HWIO_REG_804110_CONST_VIDC_BMSK             0x40000000
+#define HWIO_REG_804110_CONST_VIDC_SHFT             0x1e
+#define HWIO_REG_804110_VIDCV_1080P_VERSION_BMSK    0xff000
+#define HWIO_REG_804110_VIDCV_1080P_VERSION_SHFT    0xc
+#define HWIO_REG_804110_MGEN2MAXI_DATA_SEL_BMSK     0xf00
+#define HWIO_REG_804110_MGEN2MAXI_DATA_SEL_SHFT     0x8
+#define HWIO_REG_804110_MGEN2MAXI_XIN_SEL_BMSK      0x80
+#define HWIO_REG_804110_MGEN2MAXI_XIN_SEL_SHFT      0x7
+#define HWIO_REG_804110_MGEN2MAXI_ARB_SEL_BMSK      0x40
+#define HWIO_REG_804110_MGEN2MAXI_ARB_SEL_SHFT      0x6
+#define HWIO_REG_804110_MGEN2MAXI_TESTBUS_SEL_BMSK  0x30
+#define HWIO_REG_804110_MGEN2MAXI_TESTBUS_SEL_SHFT  0x4
+#define HWIO_REG_804110_AHB2AHB_TESTBUS_SEL_BMSK    0x8
+#define HWIO_REG_804110_AHB2AHB_TESTBUS_SEL_SHFT    0x3
+#define HWIO_REG_804110_MGEN2MAXI_AXI_SEL_BMSK      0x4
+#define HWIO_REG_804110_MGEN2MAXI_AXI_SEL_SHFT      0x2
+#define HWIO_REG_804110_SELECT_BMSK                 0x3
+#define HWIO_REG_804110_SELECT_SHFT                 0
+
+#define HWIO_REG_616440_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x00000034)
+#define HWIO_REG_616440_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000034)
+#define HWIO_REG_616440_RMSK  0xffff
+#define HWIO_REG_616440_SHFT  0
+#define HWIO_REG_616440_IN  in_dword_masked(\
+	HWIO_REG_616440_ADDR,\
+	HWIO_REG_616440_RMSK)
+#define HWIO_REG_616440_INM(m) \
+	in_dword_masked(HWIO_REG_616440_ADDR, m)
+#define HWIO_REG_616440_OUT(v) \
+	out_dword(HWIO_REG_616440_ADDR, v)
+#define HWIO_REG_616440_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_616440_ADDR, m, v,\
+	HWIO_REG_616440_IN);
+#define HWIO_REG_616440_XBAR_IN_RD_LIM_BMSK  0xff00
+#define HWIO_REG_616440_XBAR_IN_RD_LIM_SHFT  0x8
+#define HWIO_REG_616440_XBAR_IN_WR_LIM_BMSK  0xff
+#define HWIO_REG_616440_XBAR_IN_WR_LIM_SHFT  0
+
+#define HWIO_REG_527219_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x00000038)
+#define HWIO_REG_527219_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x00000038)
+#define HWIO_REG_527219_RMSK  0xffff
+#define HWIO_REG_527219_SHFT  0
+#define HWIO_REG_527219_IN  in_dword_masked(\
+	HWIO_REG_527219_ADDR,\
+	HWIO_REG_527219_RMSK)
+#define HWIO_REG_527219_INM(m) \
+	in_dword_masked(HWIO_REG_527219_ADDR, m)
+#define HWIO_REG_527219_OUT(v) \
+	out_dword(HWIO_REG_527219_ADDR, v)
+#define HWIO_REG_527219_OUTM(m, v) \out_dword_masked_ns(\
+	HWIO_REG_527219_ADDR, m, v,\
+	HWIO_REG_527219_IN);
+#define HWIO_REG_527219_XBAR_OUT_RD_LIM_BMSK  0xff00
+#define HWIO_REG_527219_XBAR_OUT_RD_LIM_SHFT  0x8
+#define HWIO_REG_527219_XBAR_OUT_WR_LIM_BMSK  0xff
+#define HWIO_REG_527219_XBAR_OUT_WR_LIM_SHFT  0
+
+#define HWIO_REG_922106_ADDR \
+	(VIDC_MGEN2MAXI_REG_BASE + 0x0000003c)
+#define HWIO_REG_922106_PHYS \
+	(VIDC_MGEN2MAXI_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_REG_922106_RMSK  0xffff
+#define HWIO_REG_922106_SHFT  0
+#define HWIO_REG_922106_IN  in_dword_masked(\
+	HWIO_REG_922106_ADDR,\
+	HWIO_REG_922106_RMSK)
+#define HWIO_REG_922106_INM(m) \
+	in_dword_masked(HWIO_REG_922106_ADDR, m)
+#define HWIO_REG_922106_OUT(v) \
+	out_dword(HWIO_REG_922106_ADDR, v)
+#define HWIO_REG_922106_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_922106_ADDR, m, v,\
+	HWIO_REG_922106_IN);
+#define HWIO_REG_922106_XBAR_OUT_MAX_RD_BURST_BMSK  0xff00
+#define HWIO_REG_922106_XBAR_OUT_MAX_RD_BURST_SHFT  0x8
+#define HWIO_REG_922106_XBAR_OUT_MAX_WR_BURST_BMSK  0xff
+#define HWIO_REG_922106_XBAR_OUT_MAX_WR_BURST_SHFT  0
+
+#define VIDC_ENHANCE_REG_BASE (VIDC_BASE + 0x000c0000)
+#define VIDC_ENHANCE_REG_BASE_PHYS  0x044c0000
+
+#define HWIO_REG_261029_ADDR (VIDC_ENHANCE_REG_BASE + 00000000)
+#define HWIO_REG_261029_PHYS (VIDC_ENHANCE_REG_BASE_PHYS + 00000000)
+#define HWIO_REG_261029_RMSK  0x10f
+#define HWIO_REG_261029_SHFT  0
+#define HWIO_REG_261029_IN  in_dword_masked(\
+	HWIO_REG_261029_ADDR, HWIO_REG_261029_RMSK)
+#define HWIO_REG_261029_INM(m) \
+	in_dword_masked(HWIO_REG_261029_ADDR, m)
+#define HWIO_REG_261029_OUT(v) \
+	out_dword(HWIO_REG_261029_ADDR, v)
+#define HWIO_REG_261029_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_261029_ADDR, m, v, HWIO_REG_261029_IN);
+#define HWIO_REG_261029_AUTO_INC_EN_BMSK  0x100
+#define HWIO_REG_261029_AUTO_INC_EN_SHFT  0x8
+#define HWIO_REG_261029_DMI_RAM_SEL_BMSK  0xf
+#define HWIO_REG_261029_DMI_RAM_SEL_SHFT  0
+
+#define HWIO_REG_576200_ADDR (VIDC_ENHANCE_REG_BASE + 0x00000004)
+#define HWIO_REG_576200_PHYS (VIDC_ENHANCE_REG_BASE_PHYS + 0x00000004)
+#define HWIO_REG_576200_RMSK  0x7ff
+#define HWIO_REG_576200_SHFT  0
+#define HWIO_REG_576200_IN  in_dword_masked(\
+	HWIO_REG_576200_ADDR, HWIO_REG_576200_RMSK)
+#define HWIO_REG_576200_INM(m) \
+	in_dword_masked(HWIO_REG_576200_ADDR, m)
+#define HWIO_REG_576200_OUT(v) \
+	out_dword(HWIO_REG_576200_ADDR, v)
+#define HWIO_REG_576200_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_576200_ADDR, m, v, HWIO_REG_576200_IN);
+#define HWIO_REG_576200_DMI_ADDR_BMSK  0x7ff
+#define HWIO_REG_576200_DMI_ADDR_SHFT  0
+
+#define HWIO_REG_917583_ADDR (VIDC_ENHANCE_REG_BASE + 0x00000008)
+#define HWIO_REG_917583_PHYS (VIDC_ENHANCE_REG_BASE_PHYS + 0x00000008)
+#define HWIO_REG_917583_RMSK  0xffffffff
+#define HWIO_REG_917583_SHFT   0
+#define HWIO_REG_917583_IN  in_dword_masked(\
+	HWIO_REG_917583_ADDR, HWIO_REG_917583_RMSK)
+#define HWIO_REG_917583_INM(m) \
+	in_dword_masked(HWIO_REG_917583_ADDR, m)
+#define HWIO_REG_917583_OUT(v) \
+	out_dword(HWIO_REG_917583_ADDR, v)
+#define HWIO_REG_917583_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_917583_ADDR, m, v, HWIO_REG_917583_IN);
+#define HWIO_REG_917583_DMI_DATA_HI_BMSK  0xffffffff
+#define HWIO_REG_917583_DMI_DATA_HI_SHFT  0
+
+#define HWIO_REG_556274_ADDR (VIDC_ENHANCE_REG_BASE + 0x0000000c)
+#define HWIO_REG_556274_PHYS (VIDC_ENHANCE_REG_BASE_PHYS + 0x0000000c)
+#define HWIO_REG_556274_RMSK  0xffffffff
+#define HWIO_REG_556274_SHFT  0
+#define HWIO_REG_556274_IN  in_dword_masked(\
+	HWIO_REG_556274_ADDR, HWIO_REG_556274_RMSK)
+#define HWIO_REG_556274_INM(m) \
+	in_dword_masked(HWIO_REG_556274_ADDR, m)
+#define HWIO_REG_556274_OUT(v) \
+	out_dword(HWIO_REG_556274_ADDR, v)
+#define HWIO_REG_556274_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_556274_ADDR, m, v, HWIO_REG_556274_IN);
+#define HWIO_REG_556274_DMI_DATA_LO_BMSK  0xffffffff
+#define HWIO_REG_556274_DMI_DATA_LO_SHFT  0
+
+#define HWIO_REG_39703_ADDR (VIDC_ENHANCE_REG_BASE + 0x00000010)
+#define HWIO_REG_39703_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x00000010)
+#define HWIO_REG_39703_RMSK  0x1f
+#define HWIO_REG_39703_SHFT  0
+#define HWIO_REG_39703_IN  in_dword_masked(\
+	HWIO_REG_39703_ADDR, HWIO_REG_39703_RMSK)
+#define HWIO_REG_39703_INM(m) \
+	in_dword_masked(HWIO_REG_39703_ADDR, m)
+#define HWIO_REG_39703_OUT(v) \
+	out_dword(HWIO_REG_39703_ADDR, v)
+#define HWIO_REG_39703_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_39703_ADDR, m, v, HWIO_REG_39703_IN);
+#define HWIO_REG_39703_PIX_CACHE_TB_SEL_BMSK  0x1f
+#define HWIO_REG_39703_PIX_CACHE_TB_SEL_SHFT  0
+
+#define HWIO_REG_169013_ADDR (VIDC_ENHANCE_REG_BASE + 0x00000014)
+#define HWIO_REG_169013_PHYS (VIDC_ENHANCE_REG_BASE_PHYS + 0x00000014)
+#define HWIO_REG_169013_RMSK  0x3
+#define HWIO_REG_169013_SHFT  0
+#define HWIO_REG_169013_IN  in_dword_masked(\
+	HWIO_REG_169013_ADDR, HWIO_REG_169013_RMSK)
+#define HWIO_REG_169013_INM(m) \
+	in_dword_masked(HWIO_REG_169013_ADDR, m)
+#define HWIO_REG_169013_OUT(v) \
+	out_dword(HWIO_REG_169013_ADDR, v)
+#define HWIO_REG_169013_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_169013_ADDR, m, v, HWIO_REG_169013_IN);
+#define HWIO_REG_169013_PIX_CACHE_SW_RESET_BMSK  0x2
+#define HWIO_REG_169013_PIX_CACHE_SW_RESET_SHFT  0x1
+#define HWIO_REG_169013_CRIF_RESET_BMSK          0x1
+#define HWIO_REG_169013_CRIF_RESET_SHFT          0
+
+#define HWIO_REG_22756_ADDR (VIDC_ENHANCE_REG_BASE + 0x00000018)
+#define HWIO_REG_22756_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x00000018)
+#define HWIO_REG_22756_RMSK  0x133f
+#define HWIO_REG_22756_SHFT  0
+#define HWIO_REG_22756_IN  in_dword_masked(\
+	HWIO_REG_22756_ADDR, HWIO_REG_22756_RMSK)
+#define HWIO_REG_22756_INM(m) \
+	in_dword_masked(HWIO_REG_22756_ADDR, m)
+#define HWIO_REG_22756_OUT(v) \
+	out_dword(HWIO_REG_22756_ADDR, v)
+#define HWIO_REG_22756_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_22756_ADDR, m, v, HWIO_REG_22756_IN);
+#define HWIO_REG_22756_CACHE_HALT_BMSK         0x1000
+#define HWIO_REG_22756_CACHE_HALT_SHFT         0xc
+#define HWIO_REG_22756_PAGE_SIZE_BMSK          0x300
+#define HWIO_REG_22756_PAGE_SIZE_SHFT          0x8
+#define HWIO_REG_22756_STATISTICS_OFF_BMSK     0x20
+#define HWIO_REG_22756_STATISTICS_OFF_SHFT     0x5
+#define HWIO_REG_22756_CACHE_PORT_SELECT_BMSK  0x10
+#define HWIO_REG_22756_CACHE_PORT_SELECT_SHFT  0x4
+#define HWIO_REG_22756_PREFETCH_EN_BMSK        0x8
+#define HWIO_REG_22756_PREFETCH_EN_SHFT        0x3
+#define HWIO_REG_22756_SS_TILE_FORMAT_BMSK     0x4
+#define HWIO_REG_22756_SS_TILE_FORMAT_SHFT     0x2
+#define HWIO_REG_22756_CACHE_EN_BMSK           0x2
+#define HWIO_REG_22756_CACHE_EN_SHFT           0x1
+#define HWIO_REG_22756_CACHE_TAG_CLEAR_BMSK    0x1
+#define HWIO_REG_22756_CACHE_TAG_CLEAR_SHFT    0
+
+#define HWIO_REG_951731_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x0000001c)
+#define HWIO_REG_951731_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_REG_951731_RMSK  0x7ff07ff
+#define HWIO_REG_951731_SHFT  0
+#define HWIO_REG_951731_IN  in_dword_masked(\
+	HWIO_REG_951731_ADDR,\
+	HWIO_REG_951731_RMSK)
+#define HWIO_REG_951731_INM(m) \
+	in_dword_masked(HWIO_REG_951731_ADDR, m)
+#define HWIO_REG_951731_OUT(v) \
+	out_dword(HWIO_REG_951731_ADDR, v)
+#define HWIO_REG_951731_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_951731_ADDR, m, v,\
+	HWIO_REG_951731_IN);
+#define HWIO_REG_951731_FRAME_HEIGHT_BMSK  0x7ff0000
+#define HWIO_REG_951731_FRAME_HEIGHT_SHFT  0x10
+#define HWIO_REG_951731_FRAME_WIDTH_BMSK   0x7ff
+#define HWIO_REG_951731_FRAME_WIDTH_SHFT   0
+
+#define HWIO_REG_905239_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x00000020)
+#define HWIO_REG_905239_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x00000020)
+#define HWIO_REG_905239_RMSK  0x3ffff
+#define HWIO_REG_905239_SHFT  0
+#define HWIO_REG_905239_IN  in_dword_masked(\
+	HWIO_REG_905239_ADDR,\
+	HWIO_REG_905239_RMSK)
+#define HWIO_REG_905239_INM(m) \
+	in_dword_masked(HWIO_REG_905239_ADDR, m)
+#define HWIO_REG_905239_OUT(v) \
+	out_dword(HWIO_REG_905239_ADDR, v)
+#define HWIO_REG_905239_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_905239_ADDR, m, v,\
+	HWIO_REG_905239_IN);
+#define HWIO_REG_905239_LINEAR_LUMA_BMSK  0x3ffff
+#define HWIO_REG_905239_LINEAR_LUMA_SHFT  0
+#define HWIO_REG_905239_TILE_LUMA_BMSK    0xff00
+#define HWIO_REG_905239_TILE_LUMA_SHFT    0x8
+#define HWIO_REG_905239_TILE_CHROMA_BMSK  0xff
+#define HWIO_REG_905239_TILE_CHROMA_SHFT  0
+
+#define HWIO_REG_804925_ADDR(n) \
+	(VIDC_ENHANCE_REG_BASE + 0x00000024 + 4 * (n))
+#define HWIO_REG_804925_PHYS(n) \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x00000024 + 4 * (n))
+#define HWIO_REG_804925_RMSK  0xfffffff8
+#define HWIO_REG_804925_SHFT  0
+#define HWIO_REG_804925_MAXn  0x12
+#define HWIO_REG_804925_INI(n) \
+	in_dword(HWIO_REG_804925_ADDR(n))
+#define HWIO_REG_804925_INMI(n, mask) \
+	in_dword_masked(HWIO_REG_804925_ADDR(n), mask)
+#define HWIO_REG_804925_OUTI(n, val) \
+	out_dword(HWIO_REG_804925_ADDR(n), val)
+#define HWIO_REG_804925_OUTMI(n, mask, val) \
+	out_dword_masked_ns(HWIO_REG_804925_ADDR(n),\
+	mask, val, HWIO_REG_804925_INI(n));
+#define HWIO_REG_804925_ADDR_BMSK  0xfffffff8
+#define HWIO_REG_804925_ADDR_SHFT  0x3
+
+#define HWIO_REG_41909_ADDR(n) \
+	(VIDC_ENHANCE_REG_BASE + 0x00000070 + 4 * (n))
+#define HWIO_REG_41909_PHYS(n) \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x00000070 + 4 * (n))
+#define HWIO_REG_41909_RMSK  0xfffffff8
+#define HWIO_REG_41909_SHFT  0
+#define HWIO_REG_41909_MAXn  0x12
+#define HWIO_REG_41909_INI(n) \
+	in_dword(HWIO_REG_41909_ADDR(n))
+#define HWIO_REG_41909_INMI(n, mask) \
+	in_dword_masked(HWIO_REG_41909_ADDR(n), mask)
+#define HWIO_REG_41909_OUTI(n, val) \
+	out_dword(HWIO_REG_41909_ADDR(n), val)
+#define HWIO_REG_41909_OUTMI(n, mask, val) \
+	out_dword_masked_ns(HWIO_REG_41909_ADDR(n),\
+	mask, val, HWIO_REG_41909_INI(n));
+#define HWIO_REG_41909_ADDR_BMSK  0xfffffff8
+#define HWIO_REG_41909_ADDR_SHFT  0x3
+
+#define HWIO_REG_919904_ADDR (VIDC_ENHANCE_REG_BASE + 0x000000bc)
+#define HWIO_REG_919904_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_REG_919904_RMSK  0x1
+#define HWIO_REG_919904_SHFT  0
+#define HWIO_REG_919904_IN  in_dword_masked(\
+	HWIO_REG_919904_ADDR,\
+	HWIO_REG_919904_RMSK)
+#define HWIO_REG_919904_INM(m) \
+	in_dword_masked(HWIO_REG_919904_ADDR, m)
+#define HWIO_REG_919904_IDLE_BMSK  0x1
+#define HWIO_REG_919904_IDLE_SHFT  0
+
+#define HWIO_REG_278310_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000c0)
+#define HWIO_REG_278310_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_REG_278310_RMSK  0xffffffff
+#define HWIO_REG_278310_SHFT  0
+#define HWIO_REG_278310_IN  in_dword_masked(\
+	HWIO_REG_278310_ADDR,\
+	HWIO_REG_278310_RMSK)
+#define HWIO_REG_278310_INM(m) \
+	in_dword_masked(HWIO_REG_278310_ADDR, m)
+#define HWIO_REG_278310_MISS_COUNT_BMSK  0xffffffff
+#define HWIO_REG_278310_MISS_COUNT_SHFT  0
+
+#define HWIO_REG_421222_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000c4)
+#define HWIO_REG_421222_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_REG_421222_RMSK  0xffffffff
+#define HWIO_REG_421222_SHFT  0
+#define HWIO_REG_421222_IN  in_dword_masked(\
+	HWIO_REG_421222_ADDR,\
+	HWIO_REG_421222_RMSK)
+#define HWIO_REG_421222_INM(m) \
+	in_dword_masked(HWIO_REG_421222_ADDR, m)
+#define HWIO_REG_421222_HIT_COUNT_BMSK  0xffffffff
+#define HWIO_REG_421222_HIT_COUNT_SHFT  0
+
+#define HWIO_REG_609607_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000c8)
+#define HWIO_REG_609607_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_REG_609607_RMSK  0xffffffff
+#define HWIO_REG_609607_SHFT  0
+#define HWIO_REG_609607_IN  in_dword_masked(\
+	HWIO_REG_609607_ADDR,\
+	HWIO_REG_609607_RMSK)
+#define HWIO_REG_609607_INM(m) \
+	in_dword_masked(HWIO_REG_609607_ADDR, m)
+#define HWIO_REG_609607_AXI_REQUEST_COUNT_BMSK  0xffffffff
+#define HWIO_REG_609607_AXI_REQUEST_COUNT_SHFT  0
+
+#define HWIO_REG_395232_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000cc)
+#define HWIO_REG_395232_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_REG_395232_RMSK  0xffffffff
+#define HWIO_REG_395232_SHFT  0
+#define HWIO_REG_395232_IN  in_dword_masked(\
+	HWIO_REG_395232_ADDR,\
+	HWIO_REG_395232_RMSK)
+#define HWIO_REG_395232_INM(m) \
+	in_dword_masked(HWIO_REG_395232_ADDR, m)
+#define HWIO_REG_395232_CORE_REQUEST_COUNT_BMSK \
+	0xffffffff
+#define HWIO_REG_395232_CORE_REQUEST_COUNT_SHFT  0
+
+#define HWIO_REG_450146_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000d0)
+#define HWIO_REG_450146_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_REG_450146_RMSK  0xffffffff
+#define HWIO_REG_450146_SHFT  0
+#define HWIO_REG_450146_IN  in_dword_masked(\
+	HWIO_REG_450146_ADDR,\
+	HWIO_REG_450146_RMSK)
+#define HWIO_REG_450146_INM(m) \
+	in_dword_masked(HWIO_REG_450146_ADDR, m)
+#define HWIO_REG_450146_AXI_BEAT_COUNT_BMSK  0xffffffff
+#define HWIO_REG_450146_AXI_BEAT_COUNT_SHFT  0
+
+#define HWIO_REG_610651_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000d4)
+#define HWIO_REG_610651_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_REG_610651_RMSK  0xffffffff
+#define HWIO_REG_610651_SHFT  0
+#define HWIO_REG_610651_IN  in_dword_masked(\
+	HWIO_REG_610651_ADDR,\
+	HWIO_REG_610651_RMSK)
+#define HWIO_REG_610651_INM(m) \
+	in_dword_masked(HWIO_REG_610651_ADDR, m)
+#define HWIO_REG_610651_CORE_BEAT_COUNT_BMSK  0xffffffff
+#define HWIO_REG_610651_CORE_BEAT_COUNT_SHFT  0
+
+#define HWIO_REG_883784_ADDR \
+	(VIDC_ENHANCE_REG_BASE + 0x000000d8)
+#define HWIO_REG_883784_PHYS \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000d8)
+#define HWIO_REG_883784_RMSK  0xffffffff
+#define HWIO_REG_883784_SHFT  0
+#define HWIO_REG_883784_IN  in_dword_masked(\
+	HWIO_REG_883784_ADDR,\
+	HWIO_REG_883784_RMSK)
+#define HWIO_REG_883784_INM(m) \
+	in_dword_masked(HWIO_REG_883784_ADDR, m)
+#define HWIO_REG_883784_OUT(v) \
+	out_dword(HWIO_REG_883784_ADDR, v)
+#define HWIO_REG_883784_OUTM(m, v)  out_dword_masked_ns(\
+	HWIO_REG_883784_ADDR, m, v,\
+	HWIO_REG_883784_IN);
+#define HWIO_REG_883784_COUNTER_BMSK    0xffffff00
+#define HWIO_REG_883784_COUNTER_SHFT    0x8
+#define HWIO_REG_883784_ID_BMSK         0xf0
+#define HWIO_REG_883784_ID_SHFT         0x4
+#define HWIO_REG_883784_IGNORE_ID_BMSK  0x8
+#define HWIO_REG_883784_IGNORE_ID_SHFT  0x3
+#define HWIO_REG_883784_INPUT_SEL_BMSK  0x6
+#define HWIO_REG_883784_INPUT_SEL_SHFT  0x1
+#define HWIO_REG_883784_MISR_EN_BMSK    0x1
+#define HWIO_REG_883784_MISR_EN_SHFT    0
+
+#define HWIO_REG_651391_ADDR(n) \
+	(VIDC_ENHANCE_REG_BASE + 0x000000dc + 4 * (n))
+#define HWIO_REG_651391_PHYS(n) \
+	(VIDC_ENHANCE_REG_BASE_PHYS + 0x000000dc + 4 * (n))
+#define HWIO_REG_651391_RMSK  0xffffffff
+#define HWIO_REG_651391_SHFT  0
+#define HWIO_REG_651391_MAXn  0x1
+#define HWIO_REG_651391_INI(n) \
+	in_dword(HWIO_REG_651391_ADDR(n))
+#define HWIO_REG_651391_INMI(n, mask) \
+	in_dword_masked(HWIO_REG_651391_ADDR(n), mask)
+#define HWIO_REG_651391_OUTI(n, val) \
+	out_dword(HWIO_REG_651391_ADDR(n), val)
+#define HWIO_REG_651391_SIGNATURE_BMSK  0xffffffff
+#define HWIO_REG_651391_SIGNATURE_SHFT  0
+
+#endif
+
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c
new file mode 100644
index 0000000..6870525
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_hwio_reg.h"
+#include "vidc_hwio.h"
+#include "vidc_pix_cache.h"
+
+
+#define VIDC_1080P_MAX_DEC_DPB 19
+#define VIDC_TILE_MULTIPLY_FACTOR 8192
+
+void vidc_pix_cache_sw_reset(void)
+{
+	u32 sw_reset_value = 0;
+
+	VIDC_HWIO_IN(REG_169013, &sw_reset_value);
+	sw_reset_value |= HWIO_REG_169013_PIX_CACHE_SW_RESET_BMSK;
+	VIDC_HWIO_OUT(REG_169013, sw_reset_value);
+	VIDC_HWIO_IN(REG_169013, &sw_reset_value);
+	sw_reset_value &= (~HWIO_REG_169013_PIX_CACHE_SW_RESET_BMSK);
+	VIDC_HWIO_OUT(REG_169013, sw_reset_value);
+}
+
+void vidc_pix_cache_init_luma_chroma_base_addr(u32 dpb,
+	u32 *pn_dpb_luma_offset, u32 *pn_dpb_chroma_offset)
+{
+	u32 count, num_dpb_used = dpb;
+	u32 dpb_reset_value = VIDC_1080P_DEC_DPB_RESET_VALUE;
+
+	if (num_dpb_used > VIDC_1080P_MAX_DEC_DPB)
+		num_dpb_used = VIDC_1080P_MAX_DEC_DPB;
+	for (count = 0; count < VIDC_1080P_MAX_DEC_DPB; count++) {
+		if (count < num_dpb_used) {
+			if (pn_dpb_luma_offset) {
+				VIDC_HWIO_OUTI(
+					REG_804925,
+					count, pn_dpb_luma_offset[count]);
+			} else {
+				VIDC_HWIO_OUTI(
+					REG_804925,
+					count, dpb_reset_value);
+			}
+			if (pn_dpb_chroma_offset) {
+				VIDC_HWIO_OUTI(
+					REG_41909,
+					count, pn_dpb_chroma_offset[count]);
+			} else {
+				VIDC_HWIO_OUTI(
+					REG_41909,
+					count, dpb_reset_value);
+			}
+		} else {
+			VIDC_HWIO_OUTI(REG_804925,
+				count, dpb_reset_value);
+			VIDC_HWIO_OUTI(REG_41909,
+				count, dpb_reset_value);
+		}
+	}
+}
+
+void vidc_pix_cache_set_frame_range(u32 luma_size, u32 chroma_size)
+{
+	u32 frame_range;
+
+	frame_range =
+		(((luma_size / VIDC_TILE_MULTIPLY_FACTOR) & 0xFF) << 8)|
+		((chroma_size / VIDC_TILE_MULTIPLY_FACTOR) & 0xFF);
+	VIDC_HWIO_OUT(REG_905239, frame_range);
+}
+void vidc_pix_cache_set_frame_size(u32 frame_width, u32 frame_height)
+{
+   u32 frame_size;
+   frame_size =  (((u32) (frame_height << HWIO_REG_951731_FRAME_HEIGHT_SHFT) &
+		HWIO_REG_951731_FRAME_HEIGHT_BMSK) |
+		((u32) (frame_width << HWIO_REG_951731_FRAME_WIDTH_SHFT) &
+		 HWIO_REG_951731_FRAME_WIDTH_BMSK));
+   VIDC_HWIO_OUT(REG_951731, frame_size);
+}
+
+void vidc_pix_cache_init_config(
+	struct vidc_1080P_pix_cache_config *config)
+{
+	u32 cfg_reg = 0;
+
+	if (config->cache_enable)
+		cfg_reg |= HWIO_REG_22756_CACHE_EN_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_CACHE_EN_BMSK);
+	if (config->port_select == VIDC_1080P_PIX_CACHE_PORT_A)
+		cfg_reg &=
+			(~HWIO_REG_22756_CACHE_PORT_SELECT_BMSK);
+	else
+		cfg_reg |= HWIO_REG_22756_CACHE_PORT_SELECT_BMSK;
+	if (!config->statistics_off)
+		cfg_reg |= HWIO_REG_22756_STATISTICS_OFF_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_STATISTICS_OFF_BMSK);
+	if (config->prefetch_en)
+		cfg_reg |= HWIO_REG_22756_PREFETCH_EN_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_PREFETCH_EN_BMSK);
+	cfg_reg &= (~HWIO_REG_22756_PAGE_SIZE_BMSK);
+	cfg_reg |= VIDC_SETFIELD(config->page_size,
+			HWIO_REG_22756_PAGE_SIZE_SHFT,
+			HWIO_REG_22756_PAGE_SIZE_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_set_prefetch_page_limit(u32 page_size_limit)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	cfg_reg &= (~HWIO_REG_22756_PAGE_SIZE_BMSK);
+	cfg_reg |= VIDC_SETFIELD(page_size_limit,
+			HWIO_REG_22756_PAGE_SIZE_SHFT,
+			HWIO_REG_22756_PAGE_SIZE_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_enable_prefetch(u32 prefetch_enable)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	if (prefetch_enable)
+		cfg_reg |= HWIO_REG_22756_PREFETCH_EN_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_PREFETCH_EN_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_disable_statistics(u32 statistics_off)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	if (!statistics_off)
+		cfg_reg |= HWIO_REG_22756_STATISTICS_OFF_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_STATISTICS_OFF_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_set_port(
+	enum vidc_1080P_pix_cache_port_sel port_select)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	if (port_select == VIDC_1080P_PIX_CACHE_PORT_A)
+		cfg_reg &=
+			(~HWIO_REG_22756_CACHE_PORT_SELECT_BMSK);
+	else
+		cfg_reg |= HWIO_REG_22756_CACHE_PORT_SELECT_BMSK;
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_enable_cache(u32 cache_enable)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	if (cache_enable)
+		cfg_reg |= HWIO_REG_22756_CACHE_EN_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_CACHE_EN_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_clear_cache_tags(void)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	cfg_reg |= HWIO_REG_22756_CACHE_TAG_CLEAR_BMSK;
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	cfg_reg &= (~HWIO_REG_22756_CACHE_TAG_CLEAR_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_set_halt(u32 halt_enable)
+{
+	u32 cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_22756, &cfg_reg);
+	if (halt_enable)
+		cfg_reg |= HWIO_REG_22756_CACHE_HALT_BMSK;
+	else
+		cfg_reg &= (~HWIO_REG_22756_CACHE_HALT_BMSK);
+	VIDC_HWIO_OUT(REG_22756, cfg_reg);
+}
+
+void vidc_pix_cache_get_status_idle(u32 *idle_status)
+{
+	VIDC_HWIO_IN(REG_919904, idle_status);
+}
+
+void vidc_pix_cache_set_ram(u32 ram_select)
+{
+	u32 dmi_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg);
+	dmi_cfg_reg &= (~HWIO_REG_261029_DMI_RAM_SEL_BMSK);
+	dmi_cfg_reg |= VIDC_SETFIELD(ram_select,
+			HWIO_REG_261029_AUTO_INC_EN_SHFT,
+			HWIO_REG_261029_DMI_RAM_SEL_BMSK);
+	VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg);
+}
+
+void vidc_pix_cache_set_auto_inc_ram_addr(u32 auto_inc_enable)
+{
+	u32 dmi_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg);
+	if (auto_inc_enable)
+		dmi_cfg_reg |= HWIO_REG_261029_AUTO_INC_EN_BMSK;
+	else
+		dmi_cfg_reg &= (~HWIO_REG_261029_AUTO_INC_EN_BMSK);
+	VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg);
+}
+
+
+void vidc_pix_cache_read_ram_data(u32 src_ram_address,
+	u32 ram_size, u32 *dest_address)
+{
+	u32 count, dmi_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg);
+	VIDC_HWIO_OUT(REG_576200, src_ram_address);
+	vidc_pix_cache_set_auto_inc_ram_addr(1);
+	for (count = 0; count < ram_size; count++) {
+		VIDC_HWIO_IN(REG_556274, dest_address);
+		dest_address++;
+		VIDC_HWIO_IN(REG_917583, dest_address);
+		dest_address++;
+	}
+	VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg);
+}
+
+void vidc_pix_cache_write_ram_data(u32 *src_address,
+	u32 ram_size, u32 dest_ram_address)
+{
+	u32 count, dmi_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg);
+	VIDC_HWIO_OUT(REG_576200, dest_ram_address);
+	vidc_pix_cache_set_auto_inc_ram_addr(1);
+	for (count = 0; count < ram_size; count++) {
+		VIDC_HWIO_OUT(REG_917583, *src_address);
+		src_address++;
+		VIDC_HWIO_OUT(REG_556274, *src_address);
+		src_address++;
+	}
+	VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg);
+}
+
+void vidc_pix_cache_get_statistics(
+	struct vidc_1080P_pix_cache_statistics *statistics)
+{
+	VIDC_HWIO_IN(REG_278310,
+		&statistics->access_miss);
+	VIDC_HWIO_IN(REG_421222,
+		&statistics->access_hit);
+	VIDC_HWIO_IN(REG_609607,
+		&statistics->axi_req);
+	VIDC_HWIO_IN(REG_395232,
+		&statistics->core_req);
+	VIDC_HWIO_IN(REG_450146,
+		&statistics->axi_bus);
+	VIDC_HWIO_IN(REG_610651,
+		&statistics->core_bus);
+}
+
+void vidc_pix_cache_enable_misr(u32 misr_enable)
+{
+   u32 misr_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_883784, &misr_cfg_reg);
+	if (misr_enable)
+		misr_cfg_reg |= HWIO_REG_883784_MISR_EN_BMSK;
+	else
+		misr_cfg_reg &=
+			(~HWIO_REG_883784_MISR_EN_BMSK);
+	VIDC_HWIO_OUT(REG_261029, misr_cfg_reg);
+}
+
+void vidc_pix_cache_set_misr_interface(u32 input_select)
+{
+	u32 misr_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_883784, &misr_cfg_reg);
+	misr_cfg_reg &= (~HWIO_REG_883784_INPUT_SEL_BMSK);
+	misr_cfg_reg |= VIDC_SETFIELD(input_select,
+			HWIO_REG_883784_INPUT_SEL_SHFT,
+			HWIO_REG_883784_INPUT_SEL_BMSK);
+	VIDC_HWIO_OUT(REG_261029, misr_cfg_reg);
+}
+
+void vidc_pix_cache_set_misr_id_filtering(
+	struct vidc_1080P_pix_cache_misr_id_filtering *filter_id)
+{
+	u32 misr_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_883784, &misr_cfg_reg);
+	if (filter_id->ignore_id)
+		misr_cfg_reg |=
+			HWIO_REG_883784_IGNORE_ID_BMSK;
+	else
+		misr_cfg_reg &=
+			(~HWIO_REG_883784_IGNORE_ID_BMSK);
+	misr_cfg_reg &= (~HWIO_REG_883784_ID_BMSK);
+	misr_cfg_reg |= VIDC_SETFIELD(filter_id->id,
+			HWIO_REG_883784_ID_SHFT,
+			HWIO_REG_883784_ID_BMSK);
+	VIDC_HWIO_OUT(REG_261029, misr_cfg_reg);
+}
+
+void vidc_pix_cache_set_misr_filter_trans(u32 no_of_trans)
+{
+	u32 misr_cfg_reg = 0;
+
+	VIDC_HWIO_IN(REG_883784, &misr_cfg_reg);
+	misr_cfg_reg &= (~HWIO_REG_883784_COUNTER_BMSK);
+	misr_cfg_reg |= VIDC_SETFIELD(no_of_trans,
+			HWIO_REG_883784_COUNTER_SHFT,
+			HWIO_REG_883784_COUNTER_BMSK);
+	VIDC_HWIO_OUT(REG_261029, misr_cfg_reg);
+}
+
+void vidc_pix_cache_get_misr_signatures(
+	struct vidc_1080P_pix_cache_misr_signature *signatures)
+{
+	VIDC_HWIO_INI(REG_651391, 0,
+		&signatures->signature0);
+	VIDC_HWIO_INI(REG_651391, 1,
+		&signatures->signature1);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.h b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.h
new file mode 100644
index 0000000..e8a93a1
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_CORE_PIXCACHE_
+#define _VIDEO_CORE_PIXCACHE_
+
+
+#include "vidc.h"
+
+#define VIDC_1080P_DEC_DPB_RESET_VALUE 0xFFFFFFF8
+
+enum vidc_1080P_pix_cache_port_sel{
+	VIDC_1080P_PIX_CACHE_PORT_A = 0,
+	VIDC_1080P_PIX_CACHE_PORT_B = 1,
+	VIDC_1080P_PIX_CACHE_PORT_32BIT = 0x7FFFFFFF
+};
+enum vidc_1080P_pix_cache_page_size{
+	VIDC_1080P_PIX_CACHE_PAGE_SIZE_1K = 0,
+	VIDC_1080P_PIX_CACHE_PAGE_SIZE_2K = 1,
+	VIDC_1080P_PIX_CACHE_PAGE_SIZE_4K = 2
+};
+struct vidc_1080P_pix_cache_config{
+	u32 cache_enable;
+	u32 prefetch_en;
+	enum vidc_1080P_pix_cache_port_sel port_select;
+	u32 statistics_off;
+	enum vidc_1080P_pix_cache_page_size page_size;
+};
+struct vidc_1080P_pix_cache_statistics{
+	u32 access_miss;
+	u32 access_hit;
+	u32 axi_req;
+	u32 core_req;
+	u32 axi_bus;
+	u32 core_bus;
+};
+struct vidc_1080P_pix_cache_misr_id_filtering{
+	u32 ignore_id;
+	u32 id;
+};
+struct vidc_1080P_pix_cache_misr_signature{
+	u32 signature0;
+	u32 signature1;
+};
+
+void vidc_pix_cache_sw_reset(void);
+void vidc_pix_cache_init_luma_chroma_base_addr(u32 dpb,
+	u32 *pn_dpb_luma_offset, u32 *pn_dpb_chroma_offset);
+void vidc_pix_cache_set_frame_range(u32 luma_size, u32 chroma_size);
+void vidc_pix_cache_set_frame_size(u32 frame_width, u32 frame_height);
+void vidc_pix_cache_init_config(
+	struct vidc_1080P_pix_cache_config *config);
+void vidc_pix_cache_set_prefetch_page_limit(u32 page_size_limit);
+void vidc_pix_cache_enable_prefetch(u32 prefetch_enable);
+void vidc_pix_cache_disable_statistics(u32 statistics_off);
+void vidc_pix_cache_set_port(
+	enum vidc_1080P_pix_cache_port_sel port_select);
+void vidc_pix_cache_enable_cache(u32 cache_enable);
+void vidc_pix_cache_clear_cache_tags(void);
+void vidc_pix_cache_set_halt(u32 halt_enable);
+void vidc_pix_cache_get_status_idle(u32 *idle_status);
+void vidc_pix_cache_set_ram(u32 ram_select);
+void vidc_pix_cache_set_auto_inc_ram_addr(u32 auto_inc_enable);
+void vidc_pix_cache_read_ram_data(u32 src_ram_address, u32 ram_size,
+	u32 *dest_address);
+void vidc_pix_cache_write_ram_data(u32 *src_address, u32 ram_size,
+	u32 dest_ram_address);
+void vidc_pix_cache_get_statistics(
+	struct vidc_1080P_pix_cache_statistics *statistics);
+void vidc_pix_cache_enable_misr(u32 misr_enable);
+void vidc_pix_cache_set_misr_interface(u32 input_select);
+void vidc_pix_cache_set_misr_id_filtering(
+	struct vidc_1080P_pix_cache_misr_id_filtering *filter_id);
+void vidc_pix_cache_set_misr_filter_trans(u32 no_of_trans);
+void vidc_pix_cache_get_misr_signatures(
+	struct vidc_1080P_pix_cache_misr_signature *signatures);
+#endif
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
new file mode 100644
index 0000000..ec6c39a
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -0,0 +1,508 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/pm_qos_params.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <mach/clk.h>
+#include <mach/msm_reqs.h>
+#include <mach/msm_memtypes.h>
+#include <linux/interrupt.h>
+#include <linux/memory_alloc.h>
+#include <asm/sizes.h>
+#include "vidc.h"
+#include "vcd_res_tracker.h"
+#include "vidc_init.h"
+
+static unsigned int vidc_clk_table[3] = {
+	48000000, 133330000, 200000000
+};
+static struct res_trk_context resource_context;
+
+#define VIDC_FW	"vidc_1080p.fw"
+#define VIDC_FW_SIZE SZ_1M
+
+unsigned char *vidc_video_codec_fw;
+u32 vidc_video_codec_fw_size;
+static u32 res_trk_get_clk(void);
+static void res_trk_put_clk(void);
+
+static u32 res_trk_get_clk()
+{
+	if (resource_context.vcodec_clk ||
+		resource_context.vcodec_pclk) {
+		VCDRES_MSG_ERROR("%s() Clock reference exists\n",
+						__func__);
+		goto bail_out;
+	}
+	resource_context.vcodec_clk = clk_get(resource_context.device,
+		"vcodec_clk");
+	if (IS_ERR(resource_context.vcodec_clk)) {
+		VCDRES_MSG_ERROR("%s(): vcodec_clk get failed\n",
+						__func__);
+		goto bail_out;
+	}
+	 resource_context.vcodec_pclk = clk_get(resource_context.device,
+			"vcodec_pclk");
+	if (IS_ERR(resource_context.vcodec_pclk)) {
+		VCDRES_MSG_ERROR("%s(): vcodec_pclk get failed\n",
+						__func__);
+		goto release_vcodec_clk;
+	}
+	resource_context.vcodec_axi_a_clk = clk_get(resource_context.device,
+			"vcodec_axi_a_clk");
+	if (IS_ERR(resource_context.vcodec_axi_a_clk)) {
+		VCDRES_MSG_ERROR("%s(): vcodec_axi_a_clk get failed\n",
+						__func__);
+		resource_context.vcodec_axi_a_clk = NULL;
+	}
+	resource_context.vcodec_axi_b_clk = clk_get(resource_context.device,
+			"vcodec_axi_b_clk");
+	if (IS_ERR(resource_context.vcodec_axi_b_clk)) {
+		VCDRES_MSG_ERROR("%s(): vcodec_axi_b_clk get failed\n",
+						__func__);
+		resource_context.vcodec_axi_b_clk = NULL;
+	}
+	if (clk_set_rate(resource_context.vcodec_clk,
+		vidc_clk_table[0])) {
+		VCDRES_MSG_ERROR("%s(): set rate failed in power up\n",
+						__func__);
+		goto release_vcodec_pclk;
+	}
+	return true;
+release_vcodec_pclk:
+	if (resource_context.vcodec_axi_a_clk)
+		clk_put(resource_context.vcodec_axi_a_clk);
+	if (resource_context.vcodec_axi_b_clk)
+		clk_put(resource_context.vcodec_axi_b_clk);
+	clk_put(resource_context.vcodec_pclk);
+	resource_context.vcodec_pclk = NULL;
+	resource_context.vcodec_axi_a_clk = NULL;
+	resource_context.vcodec_axi_b_clk = NULL;
+release_vcodec_clk:
+	clk_put(resource_context.vcodec_clk);
+	resource_context.vcodec_clk = NULL;
+bail_out:
+	return false;
+}
+
+static void res_trk_put_clk()
+{
+	if (resource_context.vcodec_clk)
+		clk_put(resource_context.vcodec_clk);
+	if (resource_context.vcodec_pclk)
+		clk_put(resource_context.vcodec_pclk);
+	if (resource_context.vcodec_axi_a_clk)
+		clk_put(resource_context.vcodec_axi_a_clk);
+	if (resource_context.vcodec_axi_b_clk)
+		clk_put(resource_context.vcodec_axi_b_clk);
+	resource_context.vcodec_axi_b_clk = NULL;
+	resource_context.vcodec_axi_a_clk = NULL;
+	resource_context.vcodec_clk = NULL;
+	resource_context.vcodec_pclk = NULL;
+}
+
+static u32 res_trk_shutdown_vidc(void)
+{
+	mutex_lock(&resource_context.lock);
+	if (resource_context.clock_enabled) {
+		mutex_unlock(&resource_context.lock);
+		VCDRES_MSG_LOW("\n Calling CLK disable in Power Down\n");
+		res_trk_disable_clocks();
+		mutex_lock(&resource_context.lock);
+	}
+	res_trk_put_clk();
+	if (resource_context.footswitch) {
+		if (regulator_disable(resource_context.footswitch))
+			VCDRES_MSG_ERROR("Regulator disable failed\n");
+		regulator_put(resource_context.footswitch);
+		resource_context.footswitch = NULL;
+	}
+	if (pm_runtime_put(resource_context.device) < 0)
+		VCDRES_MSG_ERROR("Error : pm_runtime_put failed");
+	mutex_unlock(&resource_context.lock);
+	return true;
+}
+
+u32 res_trk_enable_clocks(void)
+{
+	VCDRES_MSG_LOW("\n in res_trk_enable_clocks()");
+	mutex_lock(&resource_context.lock);
+	if (!resource_context.clock_enabled) {
+		VCDRES_MSG_LOW("Enabling IRQ in %s()\n", __func__);
+		enable_irq(resource_context.irq_num);
+		VCDRES_MSG_LOW("%s(): Enabling the clocks\n", __func__);
+		if (resource_context.vcodec_clk &&
+			resource_context.vcodec_pclk) {
+			if (clk_enable(resource_context.vcodec_pclk)) {
+				VCDRES_MSG_ERROR("vidc pclk Enable fail\n");
+				goto bail_out;
+			}
+			if (clk_enable(resource_context.vcodec_clk)) {
+				VCDRES_MSG_ERROR("vidc core clk Enable fail\n");
+				goto vidc_disable_pclk;
+			}
+			if (resource_context.vcodec_axi_a_clk &&
+				resource_context.vcodec_axi_b_clk) {
+				if (clk_enable(resource_context.
+					vcodec_axi_a_clk))
+					VCDRES_MSG_ERROR("a_clk Enable fail\n");
+				if (clk_enable(resource_context.
+					vcodec_axi_b_clk))
+					VCDRES_MSG_ERROR("b_clk Enable fail\n");
+			}
+
+			VCDRES_MSG_LOW("%s(): Clocks enabled!\n", __func__);
+		} else {
+		   VCDRES_MSG_ERROR("%s(): Clocks enable failed!\n",
+			__func__);
+		   goto bail_out;
+		}
+	}
+	resource_context.clock_enabled = 1;
+	mutex_unlock(&resource_context.lock);
+	return true;
+vidc_disable_pclk:
+	clk_disable(resource_context.vcodec_pclk);
+bail_out:
+	mutex_unlock(&resource_context.lock);
+	return false;
+}
+
+static u32 res_trk_sel_clk_rate(unsigned long hclk_rate)
+{
+	u32 status = true;
+	mutex_lock(&resource_context.lock);
+	if (clk_set_rate(resource_context.vcodec_clk,
+		hclk_rate)) {
+		VCDRES_MSG_ERROR("vidc hclk set rate failed\n");
+		status = false;
+	} else
+		resource_context.vcodec_clk_rate = hclk_rate;
+	mutex_unlock(&resource_context.lock);
+	return status;
+}
+
+static u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
+{
+	u32 status = true;
+	mutex_lock(&resource_context.lock);
+	if (phclk_rate) {
+		*phclk_rate = clk_get_rate(resource_context.vcodec_clk);
+		if (!(*phclk_rate)) {
+			VCDRES_MSG_ERROR("vidc hclk get rate failed\n");
+			status = false;
+		}
+	} else
+		status = false;
+	mutex_unlock(&resource_context.lock);
+	return status;
+}
+
+u32 res_trk_disable_clocks(void)
+{
+	u32 status = false;
+	VCDRES_MSG_LOW("in res_trk_disable_clocks()\n");
+	mutex_lock(&resource_context.lock);
+	if (resource_context.clock_enabled) {
+		VCDRES_MSG_LOW("Disabling IRQ in %s()\n", __func__);
+		disable_irq_nosync(resource_context.irq_num);
+		VCDRES_MSG_LOW("%s(): Disabling the clocks ...\n", __func__);
+		resource_context.clock_enabled = 0;
+		if (resource_context.vcodec_clk)
+			clk_disable(resource_context.vcodec_clk);
+		if (resource_context.vcodec_pclk)
+			clk_disable(resource_context.vcodec_pclk);
+		if (resource_context.vcodec_axi_a_clk)
+			clk_disable(resource_context.vcodec_axi_a_clk);
+		if (resource_context.vcodec_axi_b_clk)
+			clk_disable(resource_context.vcodec_axi_b_clk);
+		status = true;
+	}
+	mutex_unlock(&resource_context.lock);
+	return status;
+}
+
+static u32 res_trk_vidc_pwr_up(void)
+{
+	mutex_lock(&resource_context.lock);
+
+	if (pm_runtime_get(resource_context.device) < 0) {
+		VCDRES_MSG_ERROR("Error : pm_runtime_get failed\n");
+		goto bail_out;
+	}
+	resource_context.footswitch = regulator_get(NULL, "fs_ved");
+	if (IS_ERR(resource_context.footswitch)) {
+		VCDRES_MSG_ERROR("foot switch get failed\n");
+		resource_context.footswitch = NULL;
+	} else
+		regulator_enable(resource_context.footswitch);
+	if (!res_trk_get_clk())
+		goto rel_vidc_pm_runtime;
+	mutex_unlock(&resource_context.lock);
+	return true;
+
+rel_vidc_pm_runtime:
+	if (pm_runtime_put(resource_context.device) < 0)
+		VCDRES_MSG_ERROR("Error : pm_runtime_put failed");
+bail_out:
+	mutex_unlock(&resource_context.lock);
+	return false;
+}
+
+u32 res_trk_power_up(void)
+{
+	VCDRES_MSG_LOW("clk_regime_rail_enable");
+	VCDRES_MSG_LOW("clk_regime_sel_rail_control");
+#ifdef CONFIG_MSM_BUS_SCALING
+	resource_context.pcl = 0;
+	if (resource_context.vidc_bus_client_pdata) {
+		resource_context.pcl = msm_bus_scale_register_client(
+			resource_context.vidc_bus_client_pdata);
+		VCDRES_MSG_LOW("%s(), resource_context.pcl = %x", __func__,
+			 resource_context.pcl);
+	}
+	if (resource_context.pcl == 0) {
+		dev_err(resource_context.device,
+			"register bus client returned NULL\n");
+		return false;
+	}
+#endif
+	return res_trk_vidc_pwr_up();
+}
+
+u32 res_trk_power_down(void)
+{
+	VCDRES_MSG_LOW("clk_regime_rail_disable");
+#ifdef CONFIG_MSM_BUS_SCALING
+	msm_bus_scale_client_update_request(resource_context.pcl, 0);
+	msm_bus_scale_unregister_client(resource_context.pcl);
+#endif
+	VCDRES_MSG_MED("res_trk_power_down():: Calling "
+		"res_trk_shutdown_vidc()\n");
+	return res_trk_shutdown_vidc();
+}
+
+u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl)
+{
+	if (!pn_max_perf_lvl) {
+		VCDRES_MSG_ERROR("%s(): pn_max_perf_lvl is NULL\n",
+			__func__);
+		return false;
+	}
+	*pn_max_perf_lvl = RESTRK_1080P_MAX_PERF_LEVEL;
+	return true;
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+int res_trk_update_bus_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_level)
+{
+	struct vcd_clnt_ctxt *cctxt_itr = NULL;
+	u32 enc_perf_level = 0, dec_perf_level = 0;
+	u32 bus_clk_index, client_type = 0;
+	int rc = 0;
+
+	cctxt_itr = dev_ctxt->cctxt_list_head;
+	while (cctxt_itr) {
+		if (cctxt_itr->decoding)
+			dec_perf_level += cctxt_itr->reqd_perf_lvl;
+		else
+			enc_perf_level += cctxt_itr->reqd_perf_lvl;
+		cctxt_itr = cctxt_itr->next;
+	}
+	if (!enc_perf_level)
+		client_type = 1;
+	if (perf_level <= RESTRK_1080P_VGA_PERF_LEVEL)
+		bus_clk_index = 0;
+	else if (perf_level <= RESTRK_1080P_720P_PERF_LEVEL)
+		bus_clk_index = 1;
+	else
+		bus_clk_index = 2;
+
+	if (dev_ctxt->reqd_perf_lvl + dev_ctxt->curr_perf_lvl == 0)
+		bus_clk_index = 2;
+
+	bus_clk_index = (bus_clk_index << 1) + (client_type + 1);
+	VCDRES_MSG_LOW("%s(), bus_clk_index = %d", __func__, bus_clk_index);
+	VCDRES_MSG_LOW("%s(),context.pcl = %x", __func__, resource_context.pcl);
+	VCDRES_MSG_LOW("%s(), bus_perf_level = %x", __func__, perf_level);
+	rc = msm_bus_scale_client_update_request(resource_context.pcl,
+		bus_clk_index);
+	return rc;
+}
+#endif
+
+u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl,
+	struct vcd_dev_ctxt *dev_ctxt)
+{
+	u32 vidc_freq = 0;
+	if (!pn_set_perf_lvl || !dev_ctxt) {
+		VCDRES_MSG_ERROR("%s(): NULL pointer! dev_ctxt(%p)\n",
+			__func__, dev_ctxt);
+		return false;
+	}
+	VCDRES_MSG_LOW("%s(), req_perf_lvl = %d", __func__, req_perf_lvl);
+#ifdef CONFIG_MSM_BUS_SCALING
+	if (!res_trk_update_bus_perf_level(dev_ctxt, req_perf_lvl) < 0) {
+		VCDRES_MSG_ERROR("%s(): update buf perf level failed\n",
+			__func__);
+		return false;
+	}
+
+#endif
+	if (dev_ctxt->reqd_perf_lvl + dev_ctxt->curr_perf_lvl == 0)
+		req_perf_lvl = RESTRK_1080P_MAX_PERF_LEVEL;
+
+	if (req_perf_lvl <= RESTRK_1080P_VGA_PERF_LEVEL) {
+		vidc_freq = vidc_clk_table[0];
+		*pn_set_perf_lvl = RESTRK_1080P_VGA_PERF_LEVEL;
+	} else if (req_perf_lvl <= RESTRK_1080P_720P_PERF_LEVEL) {
+		vidc_freq = vidc_clk_table[1];
+		*pn_set_perf_lvl = RESTRK_1080P_720P_PERF_LEVEL;
+	} else {
+		vidc_freq = vidc_clk_table[2];
+		*pn_set_perf_lvl = RESTRK_1080P_MAX_PERF_LEVEL;
+	}
+	resource_context.perf_level = *pn_set_perf_lvl;
+	VCDRES_MSG_MED("VIDC: vidc_freq = %u, req_perf_lvl = %u\n",
+		vidc_freq, req_perf_lvl);
+#ifdef USE_RES_TRACKER
+    if (req_perf_lvl != RESTRK_1080P_MIN_PERF_LEVEL) {
+		VCDRES_MSG_MED("%s(): Setting vidc freq to %u\n",
+			__func__, vidc_freq);
+		if (!res_trk_sel_clk_rate(vidc_freq)) {
+			VCDRES_MSG_ERROR("%s(): res_trk_sel_clk_rate FAILED\n",
+				__func__);
+			*pn_set_perf_lvl = 0;
+			return false;
+		}
+	}
+#endif
+	VCDRES_MSG_MED("%s() set perl level : %d", __func__, *pn_set_perf_lvl);
+	return true;
+}
+
+u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl)
+{
+	unsigned long freq;
+
+	if (!pn_perf_lvl) {
+		VCDRES_MSG_ERROR("%s(): pn_perf_lvl is NULL\n",
+			__func__);
+		return false;
+	}
+	VCDRES_MSG_LOW("clk_regime_msm_get_clk_freq_hz");
+	if (!res_trk_get_clk_rate(&freq)) {
+		VCDRES_MSG_ERROR("%s(): res_trk_get_clk_rate FAILED\n",
+			__func__);
+		*pn_perf_lvl = 0;
+		return false;
+	}
+	*pn_perf_lvl = resource_context.perf_level;
+	VCDRES_MSG_MED("%s(): freq = %lu, *pn_perf_lvl = %u", __func__,
+		freq, *pn_perf_lvl);
+	return true;
+}
+
+u32 res_trk_download_firmware(void)
+{
+	const struct firmware *fw_video = NULL;
+	int rc = 0;
+	u32 status = true;
+
+	VCDRES_MSG_HIGH("%s(): Request firmware download\n",
+		__func__);
+	mutex_lock(&resource_context.lock);
+	rc = request_firmware(&fw_video, VIDC_FW,
+		resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_FW, rc);
+		status = false;
+		goto bail_out;
+	}
+	vidc_video_codec_fw = (unsigned char *)fw_video->data;
+	vidc_video_codec_fw_size = (u32) fw_video->size;
+bail_out:
+	mutex_unlock(&resource_context.lock);
+	return status;
+}
+
+void res_trk_init(struct device *device, u32 irq)
+{
+	if (resource_context.device || resource_context.irq_num ||
+		!device) {
+		VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n",
+			__func__);
+	} else {
+		memset(&resource_context, 0, sizeof(resource_context));
+		mutex_init(&resource_context.lock);
+		resource_context.device = device;
+		resource_context.irq_num = irq;
+		resource_context.vidc_platform_data =
+			(struct msm_vidc_platform_data *) device->platform_data;
+		if (resource_context.vidc_platform_data) {
+			resource_context.memtype =
+			resource_context.vidc_platform_data->memtype;
+#ifdef CONFIG_MSM_BUS_SCALING
+			resource_context.vidc_bus_client_pdata =
+			resource_context.vidc_platform_data->
+				vidc_bus_client_pdata;
+#endif
+		} else {
+			resource_context.memtype = -1;
+		}
+		resource_context.core_type = VCD_CORE_1080P;
+		if (resource_context.memtype == MEMTYPE_EBI1) {
+			resource_context.device_addr =
+			(phys_addr_t)
+			allocate_contiguous_memory_nomap(VIDC_FW_SIZE,
+					resource_context.memtype, SZ_4K);
+			if (resource_context.device_addr) {
+				resource_context.base_addr = (u8 *)
+				ioremap((unsigned long)
+				resource_context.device_addr, VIDC_FW_SIZE);
+				if (!resource_context.base_addr) {
+					free_contiguous_memory_by_paddr(
+					(unsigned long)
+					resource_context.device_addr);
+					resource_context.device_addr =
+						(phys_addr_t)NULL;
+				}
+			}
+		}
+	}
+}
+
+u32 res_trk_get_core_type(void){
+	return resource_context.core_type;
+}
+
+u32 res_trk_get_firmware_addr(struct res_trk_firmware_addr *firm_addr)
+{
+	int status = -1;
+	if (firm_addr && resource_context.base_addr &&
+		resource_context.device_addr) {
+		firm_addr->base_addr = resource_context.base_addr;
+		firm_addr->device_addr = resource_context.device_addr;
+		firm_addr->buf_size = VIDC_FW_SIZE;
+		status = 0;
+	}
+	return status;
+}
+
+u32 res_trk_get_mem_type(void){
+	return resource_context.memtype;
+}
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h
new file mode 100644
index 0000000..16680ad
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VIDEO_720P_RESOURCE_TRACKER_H_
+#define _VIDEO_720P_RESOURCE_TRACKER_H_
+
+#include <linux/regulator/consumer.h>
+#include "vcd_res_tracker_api.h"
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#endif
+#include <mach/board.h>
+
+#define RESTRK_1080P_VGA_PERF_LEVEL    VCD_MIN_PERF_LEVEL
+#define RESTRK_1080P_720P_PERF_LEVEL   108000
+#define RESTRK_1080P_1080P_PERF_LEVEL  244800
+
+#define RESTRK_1080P_MIN_PERF_LEVEL RESTRK_1080P_VGA_PERF_LEVEL
+#define RESTRK_1080P_MAX_PERF_LEVEL RESTRK_1080P_1080P_PERF_LEVEL
+struct res_trk_context {
+	struct device *device;
+	u32 irq_num;
+	struct mutex lock;
+	struct clk *vcodec_clk;
+	struct clk *vcodec_pclk;
+	struct clk *vcodec_axi_a_clk;
+	struct clk *vcodec_axi_b_clk;
+	unsigned long vcodec_clk_rate;
+	unsigned int clock_enabled;
+	unsigned int perf_level;
+	struct regulator *footswitch;
+	struct msm_vidc_platform_data *vidc_platform_data;
+	int memtype;
+#ifdef CONFIG_MSM_BUS_SCALING
+	struct msm_bus_scale_pdata *vidc_bus_client_pdata;
+	uint32_t     pcl;
+#endif
+	u32 core_type;
+	u8 *base_addr;
+	phys_addr_t device_addr;
+};
+
+#if DEBUG
+
+#define VCDRES_MSG_LOW(xx_fmt...)	printk(KERN_INFO "\n\t* " xx_fmt)
+#define VCDRES_MSG_MED(xx_fmt...)	printk(KERN_INFO "\n  * " xx_fmt)
+
+#else
+
+#define VCDRES_MSG_LOW(xx_fmt...)
+#define VCDRES_MSG_MED(xx_fmt...)
+
+#endif
+
+#define VCDRES_MSG_HIGH(xx_fmt...)	printk(KERN_WARNING "\n" xx_fmt)
+#define VCDRES_MSG_ERROR(xx_fmt...)	printk(KERN_ERR "\n err: " xx_fmt)
+#define VCDRES_MSG_FATAL(xx_fmt...)	printk(KERN_ERR "\n<FATAL> " xx_fmt)
+
+#endif
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
new file mode 100644
index 0000000..95cddd9
--- /dev/null
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VIDEO_720P_RESOURCE_TRACKER_API_H_
+#define _VIDEO_720P_RESOURCE_TRACKER_API_H_
+
+#include "vcd_core.h"
+
+struct res_trk_firmware_addr {
+	u8 *base_addr;
+	phys_addr_t device_addr;
+	u32 buf_size;
+};
+void res_trk_init(struct device *device, u32 irq);
+u32 res_trk_power_up(void);
+u32 res_trk_power_down(void);
+u32 res_trk_enable_clocks(void);
+u32 res_trk_disable_clocks(void);
+u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl);
+u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl,
+	struct vcd_dev_ctxt *dev_ctxt);
+u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl);
+u32 res_trk_download_firmware(void);
+u32 res_trk_get_core_type(void);
+u32 res_trk_get_firmware_addr(struct res_trk_firmware_addr *firm_addr);
+u32 res_trk_get_mem_type(void);
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
new file mode 100644
index 0000000..d27b354
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_metadata.h"
+#include "vcd_res_tracker_api.h"
+
+u32 ddl_device_init(struct ddl_init_config *ddl_init_config,
+		    void *client_data)
+{
+	struct ddl_context *ddl_context;
+	u32 status = VCD_S_SUCCESS;
+
+	if ((!ddl_init_config) ||
+	    (!ddl_init_config->ddl_callback) ||
+	    (!ddl_init_config->core_virtual_base_addr)
+	    ) {
+		VIDC_LOGERR_STRING("ddl_dev_init:Bad_argument");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	ddl_context = ddl_get_context();
+
+	if (DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dev_init:Multiple_init");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dev_init:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+
+	DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context));
+
+	DDL_BUSY(ddl_context);
+	ddl_context->memtype = res_trk_get_mem_type();
+	if (ddl_context->memtype == -1) {
+		VIDC_LOGERR_STRING("ddl_dev_init:Invalid Memtype");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	ddl_context->ddl_callback = ddl_init_config->ddl_callback;
+	ddl_context->interrupt_clr = ddl_init_config->interrupt_clr;
+	ddl_context->core_virtual_base_addr =
+	    ddl_init_config->core_virtual_base_addr;
+	ddl_context->client_data = client_data;
+
+	vidc_720p_set_device_virtual_base(ddl_context->
+					   core_virtual_base_addr);
+
+	ddl_context->current_ddl = NULL;
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	ddl_client_transact(DDL_INIT_CLIENTS, NULL);
+
+	ddl_pmem_alloc(&ddl_context->context_buf_addr,
+		       DDL_CONTEXT_MEMORY, DDL_LINEAR_BUFFER_ALIGN_BYTES);
+	if (!ddl_context->context_buf_addr.virtual_base_addr) {
+		VIDC_LOGERR_STRING("ddl_dev_init:Context_alloc_fail");
+		status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (!status) {
+		ddl_pmem_alloc(&ddl_context->db_line_buffer,
+			       DDL_DB_LINE_BUF_SIZE,
+			       DDL_TILE_BUFFER_ALIGN_BYTES);
+		if (!ddl_context->db_line_buffer.virtual_base_addr) {
+			VIDC_LOGERR_STRING("ddl_dev_init:Line_buf_alloc_fail");
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+	}
+
+	if (!status) {
+		ddl_pmem_alloc(&ddl_context->data_partition_tempbuf,
+					   DDL_MPEG4_DATA_PARTITION_BUF_SIZE,
+					   DDL_TILE_BUFFER_ALIGN_BYTES);
+		if (ddl_context->data_partition_tempbuf.virtual_base_addr \
+			== NULL) {
+			VIDC_LOGERR_STRING
+				("ddl_dev_init:Data_partition_buf_alloc_fail");
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+   }
+
+   if (!status) {
+
+		ddl_pmem_alloc(&ddl_context->metadata_shared_input,
+					   DDL_METADATA_TOTAL_INPUTBUFSIZE,
+					   DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!ddl_context->metadata_shared_input.virtual_base_addr) {
+			VIDC_LOGERR_STRING
+			("ddl_dev_init:metadata_shared_input_alloc_fail");
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+	 }
+
+	if (!status) {
+		ddl_pmem_alloc(&ddl_context->dbg_core_dump, \
+					   DDL_DBG_CORE_DUMP_SIZE,  \
+					   DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!ddl_context->dbg_core_dump.virtual_base_addr) {
+			VIDC_LOGERR_STRING
+				("ddl_dev_init:dbg_core_dump_alloc_failed");
+			status = VCD_ERR_ALLOC_FAIL;
+		}
+		ddl_context->enable_dbg_core_dump = 0;
+	}
+
+	if (!status && !vcd_fw_init()) {
+		VIDC_LOGERR_STRING("ddl_dev_init:fw_init_failed");
+		status = VCD_ERR_ALLOC_FAIL;
+	}
+	if (status) {
+		ddl_release_context_buffers(ddl_context);
+		DDL_IDLE(ddl_context);
+		return status;
+	}
+
+	ddl_move_command_state(ddl_context, DDL_CMD_DMA_INIT);
+
+	ddl_core_init(ddl_context);
+
+	return status;
+}
+
+u32 ddl_device_release(void *client_data)
+{
+	struct ddl_context *ddl_context;
+
+	ddl_context = ddl_get_context();
+
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dev_rel:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dev_rel:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) {
+		VIDC_LOGERR_STRING("ddl_dev_rel:Client_present_err");
+		return VCD_ERR_CLIENT_PRESENT;
+	}
+	DDL_BUSY(ddl_context);
+
+	ddl_context->device_state = DDL_DEVICE_NOTINIT;
+	ddl_context->client_data = client_data;
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	vidc_720p_stop_fw();
+
+	VIDC_LOG_STRING("FW_ENDDONE");
+	ddl_release_context_buffers(ddl_context);
+
+	DDL_IDLE(ddl_context);
+
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_open(u32 **ddl_handle, u32 decoding)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl;
+	u32 status;
+
+	if (!ddl_handle) {
+		VIDC_LOGERR_STRING("ddl_open:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_open:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	status = ddl_client_transact(DDL_GET_CLIENT, &ddl);
+
+	if (status) {
+		VIDC_LOGERR_STRING("ddl_open:Client_trasac_failed");
+		return status;
+	}
+
+	ddl_move_client_state(ddl, DDL_CLIENT_OPEN);
+
+	ddl->codec_data.hdr.decoding = decoding;
+	ddl->decoding = decoding;
+
+	ddl_set_default_meta_data_hdr(ddl);
+
+	ddl_set_initial_default_values(ddl);
+
+	*ddl_handle = (u32 *) ddl;
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_close(u32 **ddl_handle)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context **ddl =
+	    (struct ddl_client_context **)ddl_handle;
+
+	if (!ddl || !*ddl) {
+		VIDC_LOGERR_STRING("ddl_close:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_close:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (!DDLCLIENT_STATE_IS(*ddl, DDL_CLIENT_OPEN)) {
+		VIDC_LOGERR_STRING("ddl_close:Not_in_open_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	ddl_move_client_state(*ddl, DDL_CLIENT_INVALID);
+	if ((*ddl)->decoding) {
+		vcd_fw_transact(false, true,
+			(*ddl)->codec_data.decoder.codec.codec);
+	} else {
+		vcd_fw_transact(false, false,
+			(*ddl)->codec_data.encoder.codec.codec);
+	}
+	ddl_client_transact(DDL_FREE_CLIENT, ddl);
+
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_encode_start(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context;
+	struct ddl_encoder_data *encoder;
+	u32 dpb_size;
+
+	ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Not_opened");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (!ddl_encoder_ready_to_start(ddl)) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Err_param_settings");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	encoder = &ddl->codec_data.encoder;
+
+	dpb_size = ddl_get_yuv_buffer_size(&encoder->frame_size,
+					&encoder->re_con_buf_format, false,
+					encoder->codec.codec);
+
+	dpb_size *= DDL_ENC_NUM_DPB_BUFFERS;
+	ddl_pmem_alloc(&encoder->enc_dpb_addr,
+		       dpb_size, DDL_TILE_BUFFER_ALIGN_BYTES);
+	if (!encoder->enc_dpb_addr.virtual_base_addr) {
+		VIDC_LOGERR_STRING("ddl_enc_start:Dpb_alloc_failed");
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	if ((encoder->codec.codec == VCD_CODEC_MPEG4 &&
+	     !encoder->short_header.short_header) ||
+	    encoder->codec.codec == VCD_CODEC_H264) {
+		ddl_pmem_alloc(&encoder->seq_header,
+			       DDL_ENC_SEQHEADER_SIZE,
+			       DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!encoder->seq_header.virtual_base_addr) {
+			ddl_pmem_free(&encoder->enc_dpb_addr);
+			VIDC_LOGERR_STRING
+			    ("ddl_enc_start:Seq_hdr_alloc_failed");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+	} else {
+		encoder->seq_header.buffer_size = 0;
+		encoder->seq_header.virtual_base_addr = 0;
+	}
+
+	DDL_BUSY(ddl_context);
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+	ddl_channel_set(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_decode_start(u32 *ddl_handle,
+     struct vcd_sequence_hdr *header, void *client_data)
+{
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context;
+	struct ddl_decoder_data *decoder;
+
+	ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Not_in_opened_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if ((header) &&
+	    ((!header->sequence_header_len) ||
+	     (!header->sequence_header)
+	    )
+	    ) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Bad_param_seq_header");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	if (!ddl_decoder_ready_to_start(ddl, header)) {
+		VIDC_LOGERR_STRING("ddl_dec_start:Err_param_settings");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	DDL_BUSY(ddl_context);
+
+	decoder = &ddl->codec_data.decoder;
+	if (header) {
+		decoder->header_in_start = true;
+		decoder->decode_config = *header;
+	} else {
+		decoder->header_in_start = false;
+		decoder->decode_config.sequence_header_len = 0;
+	}
+
+	if (decoder->codec.codec == VCD_CODEC_H264) {
+		ddl_pmem_alloc(&decoder->h264Vsp_temp_buffer,
+			       DDL_DECODE_H264_VSPTEMP_BUFSIZE,
+			       DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!decoder->h264Vsp_temp_buffer.virtual_base_addr) {
+			DDL_IDLE(ddl_context);
+			VIDC_LOGERR_STRING
+			    ("ddl_dec_start:H264Sps_alloc_failed");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+	}
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+
+	ddl_channel_set(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_decode_frame(u32 *ddl_handle,
+     struct ddl_frame_data_tag *input_bits, void *client_data)
+{
+	u32 vcd_status = VCD_S_SUCCESS;
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_frame:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_frame:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_dec_frame:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!input_bits ||
+	    ((!input_bits->vcd_frm.physical ||
+	      !input_bits->vcd_frm.data_len) &&
+	     (!(VCD_FRAME_FLAG_EOS & input_bits->vcd_frm.flags))
+	    )
+	    ) {
+		VIDC_LOGERR_STRING("ddl_dec_frame:Bad_input_param");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	DDL_BUSY(ddl_context);
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+
+	ddl->input_frame = *input_bits;
+
+	if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) {
+		ddl_decode_frame_run(ddl);
+	} else {
+		if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) {
+			VIDC_LOGERR_STRING("ddl_dec_frame:Dpbs_requied");
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+		} else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) {
+			vcd_status = ddl_decode_set_buffers(ddl);
+		} else
+		    if (DDLCLIENT_STATE_IS
+			(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)) {
+			ddl->codec_data.decoder.decode_config.
+			    sequence_header =
+			    ddl->input_frame.vcd_frm.physical;
+			ddl->codec_data.decoder.decode_config.
+			    sequence_header_len =
+			    ddl->input_frame.vcd_frm.data_len;
+			ddl_decode_init_codec(ddl);
+		} else {
+			VIDC_LOGERR_STRING("Dec_frame:Wrong_state");
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+		}
+		if (vcd_status)
+			DDL_IDLE(ddl_context);
+	}
+	return vcd_status;
+}
+
+u32 ddl_encode_frame(u32 *ddl_handle,
+     struct ddl_frame_data_tag *input_frame,
+     struct ddl_frame_data_tag *output_bit, void *client_data)
+{
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context = ddl_get_context();
+
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, ENC_OP_TIME);
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!input_frame ||
+	    !input_frame->vcd_frm.physical ||
+	    ddl->codec_data.encoder.input_buf_req.sz !=
+	    input_frame->vcd_frm.data_len) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Bad_input_params");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if ((((u32) input_frame->vcd_frm.physical +
+		   input_frame->vcd_frm.offset) &
+		  (DDL_STREAMBUF_ALIGN_GUARD_BYTES)
+	    )
+	    ) {
+		VIDC_LOGERR_STRING
+		    ("ddl_enc_frame:Un_aligned_yuv_start_address");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if (!output_bit ||
+	    !output_bit->vcd_frm.physical ||
+	    !output_bit->vcd_frm.alloc_len) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Bad_output_params");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if ((ddl->codec_data.encoder.output_buf_req.sz +
+	     output_bit->vcd_frm.offset) >
+	    output_bit->vcd_frm.alloc_len) {
+		VIDC_LOGERR_STRING
+		    ("ddl_enc_frame:offset_large, Exceeds_min_buf_size");
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) {
+		VIDC_LOGERR_STRING("ddl_enc_frame:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	DDL_BUSY(ddl_context);
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+
+	ddl->input_frame = *input_frame;
+	ddl->output_frame = *output_bit;
+
+	ddl_encode_frame_run(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_decode_end(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context;
+
+	ddl_context = ddl_get_context();
+
+	if (vidc_msg_timing) {
+		ddl_reset_core_time_variables(DEC_OP_TIME);
+		ddl_reset_core_time_variables(DEC_IP_TIME);
+	}
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_end:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_dec_end:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || !ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_dec_end:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)
+	    ) {
+		VIDC_LOGERR_STRING("ddl_dec_end:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	DDL_BUSY(ddl_context);
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+
+	ddl_channel_end(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_encode_end(u32 *ddl_handle, void *client_data)
+{
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+	struct ddl_context *ddl_context;
+
+	ddl_context = ddl_get_context();
+
+	if (vidc_msg_timing)
+		ddl_reset_core_time_variables(ENC_OP_TIME);
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_end:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	if (DDL_IS_BUSY(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_enc_end:Ddl_busy");
+		return VCD_ERR_BUSY;
+	}
+	if (!ddl || ddl->decoding) {
+		VIDC_LOGERR_STRING("ddl_enc_end:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) &&
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)) {
+		VIDC_LOGERR_STRING("ddl_enc_end:Wrong_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	DDL_BUSY(ddl_context);
+
+	ddl_context->current_ddl = ddl;
+	ddl_context->client_data = client_data;
+
+	ddl_channel_end(ddl);
+	return VCD_S_SUCCESS;
+}
+
+u32 ddl_reset_hw(u32 mode)
+{
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl;
+	int i_client_num;
+
+	VIDC_LOG_STRING("ddl_reset_hw:called");
+	ddl_context = ddl_get_context();
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	DDL_BUSY(ddl_context);
+
+	if (ddl_context->core_virtual_base_addr)
+		vidc_720p_do_sw_reset();
+
+	ddl_context->device_state = DDL_DEVICE_NOTINIT;
+	for (i_client_num = 0; i_client_num < VCD_MAX_NO_CLIENT;
+			++i_client_num) {
+		ddl = ddl_context->ddl_clients[i_client_num];
+		ddl_context->ddl_clients[i_client_num] = NULL;
+		if (ddl) {
+			ddl_release_client_internal_buffers(ddl);
+			ddl_client_transact(DDL_FREE_CLIENT, &ddl);
+		}
+	}
+
+	ddl_release_context_buffers(ddl_context);
+	DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context));
+
+	return true;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
new file mode 100644
index 0000000..157b556
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
@@ -0,0 +1,282 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_H_
+#define _VCD_DDL_H_
+#include "vcd_ddl_api.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_firmware.h"
+#include "vidc.h"
+
+#undef DDL_INLINE
+#define DDL_INLINE
+
+#define DDL_BUSY_STATE 1
+#define DDL_IDLE_STATE 0
+#define DDL_ERROR_STATE 2
+#define DDL_IS_BUSY(ddl_context) \
+	(((ddl_context)->ddl_busy != DDL_IDLE_STATE))
+#define DDL_BUSY(ddl_context) \
+	((ddl_context)->ddl_busy = DDL_BUSY_STATE)
+#define DDL_IDLE(ddl_context) \
+	((ddl_context)->ddl_busy = DDL_IDLE_STATE)
+#define DDL_ERROR(ddl_context) \
+	((ddl_context)->ddl_busy = DDL_ERROR_STATE)
+
+#define DDL_DEVICE_NOTINIT  0
+#define DDL_DEVICE_INITED   1
+#define DDL_DEVICE_HWFATAL  2
+#define DDL_IS_INITIALIZED(ddl_context)  \
+(ddl_context->device_state == DDL_DEVICE_INITED)
+
+#define DDLCOMMAND_STATE_IS(ddl_context, command_state) \
+(command_state == (ddl_context)->cmd_state)
+
+#define DDLCLIENT_STATE_IS(ddl, current_state) \
+(current_state == (ddl)->client_state)
+
+#define DDL_DPB_OP_INIT       1
+#define DDL_DPB_OP_MARK_FREE  2
+#define DDL_DPB_OP_MARK_BUSY  3
+#define DDL_DPB_OP_SET_MASK   4
+#define DDL_DPB_OP_RETRIEVE   5
+
+#define DDL_INIT_CLIENTS     0
+#define DDL_GET_CLIENT       1
+#define DDL_FREE_CLIENT      2
+#define DDL_ACTIVE_CLIENT    3
+
+#define DDL_INVALID_CHANNEL_ID  ((u32)~0)
+#define DDL_INVALID_CODEC_TYPE ((u32)~0)
+
+#define DDL_ENC_REQ_IFRAME                      0x01
+#define DDL_ENC_CHANGE_IPERIOD                  0x02
+#define DDL_ENC_CHANGE_BITRATE                  0x04
+#define DDL_ENC_CHANGE_FRAMERATE                0x08
+#define DDL_ENC_CHANGE_CIR                      0x10
+
+#define DDL_DEC_REQ_OUTPUT_FLUSH                0x1
+
+struct ddl_buf_addr {
+	u32 *physical_base_addr;
+	u32 *virtual_base_addr;
+	u32 *align_physical_addr;
+	u32 *align_virtual_addr;
+	u32 buffer_size;
+};
+
+enum ddl_cmd_state {
+	DDL_CMD_INVALID = 0x0,
+	DDL_CMD_DMA_INIT = 0x1,
+	DDL_CMD_CPU_RESET = 0x2,
+	DDL_CMD_CHANNEL_SET = 0x3,
+	DDL_CMD_INIT_CODEC = 0x4,
+	DDL_CMD_HEADER_PARSE = 0x5,
+	DDL_CMD_DECODE_SET_DPB = 0x6,
+	DDL_CMD_DECODE_FRAME = 0x7,
+	DDL_CMD_ENCODE_FRAME = 0x8,
+	DDL_CMD_EOS = 0x9,
+	DDL_CMD_CHANNEL_END = 0xA,
+	DDL_CMD_32BIT = 0x7FFFFFFF
+};
+
+enum ddl_client_state {
+	DDL_CLIENT_INVALID = 0x0,
+	DDL_CLIENT_OPEN = 0x1,
+	DDL_CLIENT_WAIT_FOR_CHDONE = 0x2,
+	DDL_CLIENT_WAIT_FOR_INITCODEC = 0x3,
+	DDL_CLIENT_WAIT_FOR_INITCODECDONE = 0x4,
+	DDL_CLIENT_WAIT_FOR_DPB = 0x5,
+	DDL_CLIENT_WAIT_FOR_DPBDONE = 0x6,
+	DDL_CLIENT_WAIT_FOR_FRAME = 0x7,
+	DDL_CLIENT_WAIT_FOR_FRAME_DONE = 0x8,
+	DDL_CLIENT_WAIT_FOR_EOS_DONE = 0x9,
+	DDL_CLIENT_WAIT_FOR_CHEND = 0xA,
+	DDL_CLIENT_FATAL_ERROR = 0xB,
+	DDL_CLIENT_32BIT = 0x7FFFFFFF
+};
+
+struct ddl_mask {
+	u32 client_mask;
+	u32 hw_mask;
+};
+
+struct ddl_context;
+
+struct ddl_client_context;
+
+struct ddl_codec_data_hdr {
+	u32 decoding;
+};
+
+struct ddl_encoder_data {
+	struct ddl_codec_data_hdr hdr;
+	struct vcd_property_codec codec;
+	struct vcd_property_frame_size frame_size;
+	struct vcd_property_frame_rate frame_rate;
+	struct vcd_property_target_bitrate target_bit_rate;
+	struct vcd_property_profile profile;
+	struct vcd_property_level level;
+	struct vcd_property_rate_control rc;
+	struct vcd_property_multi_slice multi_slice;
+	u32 meta_data_enable_flag;
+	u32 suffix;
+	struct ddl_buf_addr meta_data_input;
+	u32 meta_data_offset;
+	struct vcd_property_short_header short_header;
+	struct vcd_property_vop_timing vop_timing;
+	u32 hdr_ext_control;
+	struct vcd_property_db_config db_control;
+	struct vcd_property_entropy_control entropy_control;
+	struct vcd_property_i_period i_period;
+	struct vcd_property_session_qp session_qp;
+	struct vcd_property_qp_range qp_range;
+	struct vcd_property_rc_level rc_level;
+	u32 r_cframe_skip;
+	u32 vb_vbuffer_size;
+	struct vcd_property_frame_level_rc_params frame_level_rc;
+	struct vcd_property_adaptive_rc_params adaptive_rc;
+	struct vcd_property_intra_refresh_mb_number intra_refresh;
+	struct vcd_property_buffer_format buf_format;
+	struct vcd_property_buffer_format re_con_buf_format;
+	u32 dynamic_prop_change;
+	u32 dynmic_prop_change_req;
+	u32 ext_enc_control_val;
+	struct vidc_720p_enc_frame_info enc_frame_info;
+	struct ddl_buf_addr enc_dpb_addr;
+	struct ddl_buf_addr seq_header;
+	struct vcd_buffer_requirement input_buf_req;
+	struct vcd_buffer_requirement output_buf_req;
+	struct vcd_buffer_requirement client_input_buf_req;
+	struct vcd_buffer_requirement client_output_buf_req;
+};
+
+struct ddl_decoder_data {
+	struct ddl_codec_data_hdr hdr;
+	struct vcd_property_codec codec;
+	struct vcd_property_buffer_format buf_format;
+	struct vcd_property_frame_size frame_size;
+	struct vcd_property_frame_size client_frame_size;
+	struct vcd_property_profile profile;
+	struct vcd_property_level level;
+	u32 progressive_only;
+	u32 output_order;
+	u32 meta_data_enable_flag;
+	u32 suffix;
+	struct ddl_buf_addr meta_data_input;
+	struct ddl_buf_addr ref_buffer;
+	u32 meta_data_offset;
+	struct vcd_property_post_filter post_filter;
+	struct vcd_sequence_hdr decode_config;
+	u32 header_in_start;
+	u32 min_dpb_num;
+	u32 y_cb_cr_size;
+	struct ddl_property_dec_pic_buffers dp_buf;
+	struct ddl_mask dpb_mask;
+	u32 dynamic_prop_change;
+	u32 dynmic_prop_change_req;
+	struct vidc_720p_dec_disp_info dec_disp_info;
+	struct ddl_buf_addr dpb_comv_buffer;
+	struct ddl_buf_addr h264Vsp_temp_buffer;
+	struct vcd_buffer_requirement actual_input_buf_req;
+	struct vcd_buffer_requirement min_input_buf_req;
+	struct vcd_buffer_requirement client_input_buf_req;
+	struct vcd_buffer_requirement actual_output_buf_req;
+	struct vcd_buffer_requirement min_output_buf_req;
+	struct vcd_buffer_requirement client_output_buf_req;
+};
+
+union ddl_codec_data {
+	struct ddl_codec_data_hdr hdr;
+	struct ddl_decoder_data decoder;
+	struct ddl_encoder_data encoder;
+};
+
+struct ddl_context {
+	int memtype;
+	u8 *core_virtual_base_addr;
+	void (*ddl_callback) (u32 event, u32 status, void *payload, size_t sz,
+			      u32 *ddl_handle, void *const client_data);
+	void *client_data;
+	void (*interrupt_clr) (void);
+	enum ddl_cmd_state cmd_state;
+	struct ddl_client_context *current_ddl;
+	struct ddl_buf_addr context_buf_addr;
+	struct ddl_buf_addr db_line_buffer;
+	struct ddl_buf_addr data_partition_tempbuf;
+	struct ddl_buf_addr metadata_shared_input;
+	struct ddl_buf_addr dbg_core_dump;
+	u32 enable_dbg_core_dump;
+	struct ddl_client_context *ddl_clients[VCD_MAX_NO_CLIENT];
+	u32 device_state;
+	u32 ddl_busy;
+	u32  intr_status;
+	u32 cmd_err_status;
+	u32 disp_pic_err_status;
+	u32 op_failed;
+};
+
+struct ddl_client_context {
+	struct ddl_context *ddl_context;
+	enum ddl_client_state client_state;
+	u32 decoding;
+	u32 channel_id;
+	struct ddl_frame_data_tag input_frame;
+	struct ddl_frame_data_tag output_frame;
+	union ddl_codec_data codec_data;
+};
+
+DDL_INLINE struct ddl_context *ddl_get_context(void);
+DDL_INLINE void ddl_move_command_state(struct ddl_context *ddl_context,
+				       enum ddl_cmd_state command_state);
+DDL_INLINE void ddl_move_client_state(struct ddl_client_context *ddl,
+				      enum ddl_client_state client_state);
+void ddl_core_init(struct ddl_context *);
+void ddl_core_start_cpu(struct ddl_context *);
+void ddl_channel_set(struct ddl_client_context *);
+void ddl_channel_end(struct ddl_client_context *);
+void ddl_encode_init_codec(struct ddl_client_context *);
+void ddl_decode_init_codec(struct ddl_client_context *);
+void ddl_encode_frame_run(struct ddl_client_context *);
+void ddl_decode_frame_run(struct ddl_client_context *);
+void  ddl_decode_eos_run(struct ddl_client_context *);
+void ddl_release_context_buffers(struct ddl_context *);
+void ddl_release_client_internal_buffers(struct ddl_client_context *ddl);
+u32 ddl_decode_set_buffers(struct ddl_client_context *);
+u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *decoder,
+			     struct ddl_frame_data_tag *in_out_frame,
+			     u32 operation);
+u32 ddl_client_transact(u32, struct ddl_client_context **);
+void ddl_set_default_decoder_buffer_req
+    (struct ddl_decoder_data *decoder, u32 estimate);
+void ddl_set_default_encoder_buffer_req
+    (struct ddl_encoder_data *encoder);
+void ddl_set_default_dec_property(struct ddl_client_context *);
+u32 ddl_encoder_ready_to_start(struct ddl_client_context *);
+u32 ddl_decoder_ready_to_start(struct ddl_client_context *,
+			       struct vcd_sequence_hdr *);
+u32 ddl_get_yuv_buffer_size
+    (struct vcd_property_frame_size *frame_size,
+     struct vcd_property_buffer_format *buf_format, u32 inter_lace,
+     enum vcd_codec codec);
+void ddl_calculate_stride(struct vcd_property_frame_size *frame_size,
+	u32 inter_lace, enum vcd_codec codec);
+void ddl_encode_dynamic_property(struct ddl_client_context *ddl,
+				 u32 enable);
+void ddl_decode_dynamic_property(struct ddl_client_context *ddl,
+				 u32 enable);
+void ddl_set_initial_default_values(struct ddl_client_context *ddl);
+u32 ddl_handle_core_errors(struct ddl_context *ddl_context);
+void ddl_client_fatal_cb(struct ddl_context *ddl_context);
+void ddl_hw_fatal_cb(struct ddl_context *ddl_context);
+u32 ddl_hal_engine_reset(struct ddl_context *ddl_context);
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_api.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_api.h
new file mode 100644
index 0000000..53cc93e
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_api.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_API_H_
+#define _VCD_DDL_API_H_
+#include "vcd_ddl_internal_property.h"
+
+struct ddl_init_config {
+	int memtype;
+	u8 *core_virtual_base_addr;
+	void (*interrupt_clr) (void);
+	void (*ddl_callback) (u32 event, u32 status, void *payload, size_t sz,
+		u32 *ddl_handle, void *const client_data);
+};
+
+struct ddl_frame_data_tag {
+	struct vcd_frame_data vcd_frm;
+	u32 frm_trans_end;
+	u32 frm_delta;
+};
+
+u32 ddl_device_init(struct ddl_init_config *ddl_init_config,
+					void *client_data);
+u32 ddl_device_release(void *client_data);
+u32 ddl_open(u32 **ddl_handle, u32 decoding);
+u32 ddl_close(u32 **ddl_handle);
+u32 ddl_encode_start(u32 *ddl_handle, void *client_data);
+u32 ddl_encode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_frame,
+	struct ddl_frame_data_tag *output_bit, void *client_data);
+u32 ddl_encode_end(u32 *ddl_handle, void *client_data);
+u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header,
+					void *client_data);
+u32 ddl_decode_frame(u32 *ddl_handle,
+	struct ddl_frame_data_tag *input_bits, void *client_data);
+u32 ddl_decode_end(u32 *ddl_handle, void *client_data);
+u32 ddl_set_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+u32 ddl_get_property(u32 *ddl_handle,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+void ddl_read_and_clear_interrupt(void);
+u32 ddl_process_core_response(void);
+u32 ddl_reset_hw(u32 mode);
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_core.h
new file mode 100644
index 0000000..9fdb668
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_core.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_CORE_H_
+#define _VCD_DDL_CORE_H_
+
+#define DDL_LINEAR_BUF_ALIGN_MASK   0xFFFFFFF8U
+#define DDL_LINEAR_BUF_ALIGN_GUARD_BYTES 0x7
+#define DDL_LINEAR_BUFFER_ALIGN_BYTES  8
+
+#define DDL_TILE_BUF_ALIGN_MASK   0xFFFFE000U
+#define DDL_TILE_BUF_ALIGN_GUARD_BYTES 0x1FFF
+#define DDL_TILE_BUFFER_ALIGN_BYTES  8192
+
+#define DDL_MAX_FRAME_WIDTH   (1280)
+#define DDL_MAX_FRAME_HEIGHT  (720)
+
+#define DDL_MAX_DP_FRAME_WIDTH  352
+#define DDL_MAX_DP_FRAME_HEIGHT 288
+
+#define DDL_MAX_BIT_RATE (14*1000*1000)
+
+#define DDL_SW_RESET_SLEEP 10
+
+#define VCD_MAX_NO_CLIENT  4
+#define VCD_FRAME_COMMAND_DEPTH 1
+#define VCD_GENERAL_COMMAND_DEPTH 1
+#define VCD_COMMAND_EXCLUSIVE true
+
+#define DDL_HW_TIMEOUT_IN_MS  1000
+
+#define DDL_STREAMBUF_ALIGN_GUARD_BYTES 0x7
+
+#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
+#define DDL_DB_LINE_BUF_SIZE \
+(((((DDL_MAX_FRAME_WIDTH * 4) - 1) / 256) + 1) * 8 * 1024)
+#define DDL_MPEG4_DATA_PARTITION_BUF_SIZE (64 * 1024)
+#define DDL_DECODE_H264_VSPTEMP_BUFSIZE 0x59c00
+#define DDL_ENC_NUM_DPB_BUFFERS 2
+
+#define DDL_DBG_CORE_DUMP_SIZE (10 * 1024)
+
+#define DDL_BUFEND_PAD    256
+#define DDL_ENC_SEQHEADER_SIZE (256+DDL_BUFEND_PAD)
+#define DDL_MAX_BUFFER_COUNT  32
+
+#define DDL_MPEG_REFBUF_COUNT  2
+
+#define DDL_MPEG_COMV_BUF_NO 2
+#define DDL_H263_COMV_BUF_NO 2
+#define DDL_COMV_BUFLINE_NO  128
+#define DDL_VC1_COMV_BUFLINE_NO  32
+#define DDL_MINIMUM_BYTE_PER_SLICE  1920
+
+#define DDL_MAX_H264_QP   51
+#define DDL_MAX_MPEG4_QP  31
+
+#define DDL_PADDING_HACK(addr) \
+ (addr) = (u32)((((u32)(addr) + DDL_STREAMBUF_ALIGN_GUARD_BYTES) & \
+			 ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES)) + DDL_BUFEND_PAD)
+
+#define DDL_QCIF_MBS 99
+#define DDL_CIF_MBS  396
+#define DDL_QVGA_MBS 300
+#define DDL_VGA_MBS  1200
+#define DDL_WVGA_MBS 1500
+#define DDL_720P_MBS 3600
+
+#define DDL_FRAMESIZE_DIV_FACTOR   (0xF)
+
+#define DDL_NO_OF_MB(width, height) \
+	(((width + 15) >> 4) * ((height + 15) >> 4))
+
+#define DDL_ALLOW_ENC_FRAMESIZE(width, height) \
+((DDL_NO_OF_MB(width, height) <= DDL_720P_MBS) \
+ && (((width) <= DDL_MAX_FRAME_WIDTH) &&            \
+     ((height) <= DDL_MAX_FRAME_WIDTH))            \
+ && ((width) >= 32 && (height) >= 32))
+
+#define DDL_VALIDATE_ENC_FRAMESIZE(width, height) \
+	(!((width) & DDL_FRAMESIZE_DIV_FACTOR) &&     \
+     !((height) & DDL_FRAMESIZE_DIV_FACTOR))
+
+#define DDL_TILE_ALIGN_WIDTH     128
+#define DDL_TILE_ALIGN_HEIGHT    32
+#define DDL_TILE_MULTIPLY_FACTOR 8192
+#define DDL_TILE_ALIGN(val, grid) \
+   (((val) + (grid) - 1) / (grid) * (grid))
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
new file mode 100644
index 0000000..1b62553
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
@@ -0,0 +1,595 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define ERR(x...) printk(KERN_ERR x)
+
+#define INVALID_CHANNEL_NUMBER  1
+#define INVALID_COMMAND_ID 2
+#define CHANNEL_ALREADY_IN_USE 3
+#define CHANNEL_NOT_SET_BEFORE_CHANNEL_CLOSE 4
+#define CHANNEL_SET_ERROR_INIT_CODEC 5
+#define INIT_CODEC_ALREADY_CALLED 6
+#define CHANNEL_SET_ERROR_INIT_BUFFERS 7
+#define INIT_CODEC_ERROR_INIT_BUFFERS 8
+#define INIT_BUFFER_ALREADY_CALLED  9
+#define CHANNEL_SET_ERROR_FRAME_RUN 10
+#define INIT_CODEC_ERROR_FRAME_RUN 11
+#define INIT_BUFFERS_ERROR_FRAME_RUN 12
+#define CODEC_LIMIT_EXCEEDED 13
+#define FIRMWARE_SIZE_ZERO 14
+#define FIRMWARE_ADDRESS_EXT_ZERO 15
+#define CONTEXT_DMA_IN_ERROR 16
+#define CONTEXT_DMA_OUT_ERROR 17
+#define PROGRAM_DMA_ERROR 18
+#define CONTEXT_STORE_EXT_ADD_ZERO 19
+#define MEM_ALLOCATION_FAILED 20
+
+
+#define UNSUPPORTED_FEATURE_IN_PROFILE 27
+#define RESOLUTION_NOT_SUPPORTED 28
+#define HEADER_NOT_FOUND 52
+#define MB_NUM_INVALID 61
+#define FRAME_RATE_NOT_SUPPORTED 62
+#define INVALID_QP_VALUE 63
+#define INVALID_RC_REACTION_COEFFICIENT 64
+#define INVALID_CPB_SIZE_AT_GIVEN_LEVEL 65
+
+#define ALLOC_DPB_SIZE_NOT_SUFFICIENT 71
+#define ALLOC_DB_SIZE_NOT_SUFFICIENT 72
+#define ALLOC_COMV_SIZE_NOT_SUFFICIENT 73
+#define NUM_BUF_OUT_OF_RANGE 74
+#define NULL_CONTEXT_POINTER 75
+#define NULL_COMAMND_CONTROL_COMM_POINTER 76
+#define NULL_METADATA_INPUT_POINTER 77
+#define NULL_DPB_POINTER 78
+#define NULL_DB_POINTER 79
+#define NULL_COMV_POINTER 80
+
+#define DIVIDE_BY_ZERO 81
+#define BIT_STREAM_BUF_EXHAUST 82
+#define DMA_NOT_STOPPED 83
+#define DMA_TX_NOT_COMPLETE 84
+
+#define MB_HEADER_NOT_DONE  85
+#define MB_COEFF_NOT_DONE 86
+#define CODEC_SLICE_NOT_DONE 87
+#define VME_NOT_READY 88
+#define VC1_BITPLANE_DECODE_ERR 89
+
+
+#define VSP_NOT_READY 90
+#define BUFFER_FULL_STATE 91
+
+#define RESOLUTION_MISMATCH 112
+#define NV_QUANT_ERR 113
+#define SYNC_MARKER_ERR 114
+#define FEATURE_NOT_SUPPORTED 115
+#define MEM_CORRUPTION  116
+#define INVALID_REFERENCE_FRAME  117
+#define PICTURE_CODING_TYPE_ERR  118
+#define MV_RANGE_ERR  119
+#define PICTURE_STRUCTURE_ERR 120
+#define SLICE_ADDR_INVALID  121
+#define NON_PAIRED_FIELD_NOT_SUPPORTED  122
+#define NON_FRAME_DATA_RECEIVED 123
+#define INCOMPLETE_FRAME  124
+#define NO_BUFFER_RELEASED_FROM_HOST  125
+#define PICTURE_MANAGEMENT_ERROR  128
+#define INVALID_MMCO  129
+#define INVALID_PIC_REORDERING 130
+#define INVALID_POC_TYPE 131
+#define ACTIVE_SPS_NOT_PRESENT 132
+#define ACTIVE_PPS_NOT_PRESENT 133
+#define INVALID_SPS_ID 134
+#define INVALID_PPS_ID 135
+
+
+#define METADATA_NO_SPACE_QP 151
+#define METADATA_NO_SAPCE_CONCEAL_MB 152
+#define METADATA_NO_SPACE_VC1_PARAM 153
+#define METADATA_NO_SPACE_SEI 154
+#define METADATA_NO_SPACE_VUI 155
+#define METADATA_NO_SPACE_EXTRA 156
+#define METADATA_NO_SPACE_DATA_NONE 157
+#define FRAME_RATE_UNKNOWN 158
+#define ASPECT_RATIO_UNKOWN 159
+#define COLOR_PRIMARIES_UNKNOWN 160
+#define TRANSFER_CHAR_UNKWON 161
+#define MATRIX_COEFF_UNKNOWN 162
+#define NON_SEQ_SLICE_ADDR 163
+#define BROKEN_LINK 164
+#define FRAME_CONCEALED 165
+#define PROFILE_UNKOWN 166
+#define LEVEL_UNKOWN 167
+#define BIT_RATE_NOT_SUPPORTED 168
+#define COLOR_DIFF_FORMAT_NOT_SUPPORTED 169
+#define NULL_EXTRA_METADATA_POINTER  170
+#define SYNC_POINT_NOT_RECEIVED_STARTED_DECODING  171
+#define NULL_FW_DEBUG_INFO_POINTER  172
+#define ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT  173
+#define MAX_STAGE_COUNTER_EXCEEDED 174
+
+#define METADATA_NO_SPACE_MB_INFO 180
+#define METADATA_NO_SPACE_SLICE_SIZE 181
+#define RESOLUTION_WARNING 182
+
+static void ddl_handle_npf_decoding_error(
+	struct ddl_context *ddl_context);
+
+static u32 ddl_handle_seqhdr_fail_error(
+	struct ddl_context *ddl_context);
+
+void ddl_hw_fatal_cb(struct ddl_context *ddl_context)
+{
+	/* Invalidate the command state */
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	ddl_context->device_state = DDL_DEVICE_HWFATAL;
+
+	/* callback to the client to indicate hw fatal error */
+	ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL,
+					VCD_ERR_HW_FATAL, NULL, 0,
+					(void *)ddl_context->current_ddl,
+					ddl_context->client_data);
+
+	DDL_IDLE(ddl_context);
+}
+
+static u32 ddl_handle_hw_fatal_errors(struct ddl_context
+			*ddl_context)
+{
+	u32 status = false;
+
+	switch (ddl_context->cmd_err_status) {
+
+	case INVALID_CHANNEL_NUMBER:
+	case INVALID_COMMAND_ID:
+	case CHANNEL_ALREADY_IN_USE:
+	case CHANNEL_NOT_SET_BEFORE_CHANNEL_CLOSE:
+	case CHANNEL_SET_ERROR_INIT_CODEC:
+	case INIT_CODEC_ALREADY_CALLED:
+	case CHANNEL_SET_ERROR_INIT_BUFFERS:
+	case INIT_CODEC_ERROR_INIT_BUFFERS:
+	case INIT_BUFFER_ALREADY_CALLED:
+	case CHANNEL_SET_ERROR_FRAME_RUN:
+	case INIT_CODEC_ERROR_FRAME_RUN:
+	case INIT_BUFFERS_ERROR_FRAME_RUN:
+	case CODEC_LIMIT_EXCEEDED:
+	case FIRMWARE_SIZE_ZERO:
+	case FIRMWARE_ADDRESS_EXT_ZERO:
+
+	case CONTEXT_DMA_IN_ERROR:
+	case CONTEXT_DMA_OUT_ERROR:
+	case PROGRAM_DMA_ERROR:
+	case CONTEXT_STORE_EXT_ADD_ZERO:
+	case MEM_ALLOCATION_FAILED:
+
+	case DIVIDE_BY_ZERO:
+	case DMA_NOT_STOPPED:
+	case DMA_TX_NOT_COMPLETE:
+
+	case VSP_NOT_READY:
+	case BUFFER_FULL_STATE:
+	case NULL_DB_POINTER:
+		ERR("HW FATAL ERROR");
+		ddl_hw_fatal_cb(ddl_context);
+		status = true;
+		break;
+	}
+	return status;
+}
+
+void ddl_client_fatal_cb(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context  *ddl =
+		ddl_context->current_ddl;
+
+	if (ddl_context->cmd_state == DDL_CMD_DECODE_FRAME)
+		ddl_decode_dynamic_property(ddl, false);
+	else if (ddl_context->cmd_state == DDL_CMD_ENCODE_FRAME)
+		ddl_encode_dynamic_property(ddl, false);
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	ddl_move_client_state(ddl, DDL_CLIENT_FATAL_ERROR);
+
+	ddl_context->ddl_callback
+	(
+		VCD_EVT_IND_HWERRFATAL,
+		VCD_ERR_CLIENT_FATAL,
+		NULL,
+		0,
+		(void *)ddl,
+		ddl_context->client_data
+	);
+
+	DDL_IDLE(ddl_context);
+}
+
+static u32 ddl_handle_client_fatal_errors(struct ddl_context
+			*ddl_context)
+{
+	u32 status = false;
+
+	switch (ddl_context->cmd_err_status) {
+	case MB_NUM_INVALID:
+	case FRAME_RATE_NOT_SUPPORTED:
+	case INVALID_QP_VALUE:
+	case INVALID_RC_REACTION_COEFFICIENT:
+	case INVALID_CPB_SIZE_AT_GIVEN_LEVEL:
+
+	case ALLOC_DPB_SIZE_NOT_SUFFICIENT:
+	case ALLOC_DB_SIZE_NOT_SUFFICIENT:
+	case ALLOC_COMV_SIZE_NOT_SUFFICIENT:
+	case NUM_BUF_OUT_OF_RANGE:
+	case NULL_CONTEXT_POINTER:
+	case NULL_COMAMND_CONTROL_COMM_POINTER:
+	case NULL_METADATA_INPUT_POINTER:
+	case NULL_DPB_POINTER:
+	case NULL_COMV_POINTER:
+		{
+			status = true;
+			break;
+		}
+	}
+
+	if (!status)
+		ERR("UNKNOWN-OP-FAILED");
+
+	ddl_client_fatal_cb(ddl_context);
+
+	return true;
+}
+
+static void ddl_input_failed_cb(struct ddl_context *ddl_context,
+			u32 vcd_event, u32 vcd_status)
+{
+	struct ddl_client_context  *ddl = ddl_context->current_ddl;
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	if (ddl->decoding)
+		ddl_decode_dynamic_property(ddl, false);
+	else
+		ddl_encode_dynamic_property(ddl, false);
+
+	ddl_context->ddl_callback(vcd_event,
+		vcd_status, &ddl->input_frame,
+		sizeof(struct ddl_frame_data_tag),
+		(void *)ddl, ddl_context->client_data);
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+}
+
+static u32 ddl_handle_core_recoverable_errors(struct ddl_context \
+			*ddl_context)
+{
+	struct ddl_client_context  *ddl = ddl_context->current_ddl;
+	u32   vcd_status = VCD_S_SUCCESS;
+	u32   vcd_event = VCD_EVT_RESP_INPUT_DONE;
+	u32   eos = false, pending_display = 0, release_mask = 0;
+
+	if (ddl->decoding)
+		if (ddl_handle_seqhdr_fail_error(ddl_context))
+			return true;
+
+	if (ddl_context->cmd_state != DDL_CMD_DECODE_FRAME &&
+		ddl_context->cmd_state != DDL_CMD_ENCODE_FRAME) {
+		return false;
+	}
+	switch (ddl_context->cmd_err_status) {
+	case NON_PAIRED_FIELD_NOT_SUPPORTED:
+		{
+			ddl_handle_npf_decoding_error(ddl_context);
+			return true;
+		}
+	case NO_BUFFER_RELEASED_FROM_HOST:
+		{
+			/* lets check sanity of this error */
+			release_mask =
+				ddl->codec_data.decoder.dpb_mask.hw_mask;
+			while (release_mask > 0) {
+				if ((release_mask & 0x1))
+					pending_display += 1;
+				release_mask >>= 1;
+			}
+
+			if (pending_display >=
+				ddl->codec_data.decoder.min_dpb_num) {
+				DBG("FWISSUE-REQBUF!!");
+				/* callback to client for client fatal error */
+				ddl_client_fatal_cb(ddl_context);
+				return true ;
+			}
+		vcd_event = VCD_EVT_RESP_OUTPUT_REQ;
+		break;
+		}
+	case BIT_STREAM_BUF_EXHAUST:
+	case MB_HEADER_NOT_DONE:
+	case MB_COEFF_NOT_DONE:
+	case CODEC_SLICE_NOT_DONE:
+	case VME_NOT_READY:
+	case VC1_BITPLANE_DECODE_ERR:
+		{
+			u32 reset_core;
+			/* need to reset the internal core hw engine */
+			reset_core = ddl_hal_engine_reset(ddl_context);
+			if (!reset_core)
+				return true;
+			/* fall through to process bitstream error handling */
+		}
+	case RESOLUTION_MISMATCH:
+	case NV_QUANT_ERR:
+	case SYNC_MARKER_ERR:
+	case FEATURE_NOT_SUPPORTED:
+	case MEM_CORRUPTION:
+	case INVALID_REFERENCE_FRAME:
+	case PICTURE_CODING_TYPE_ERR:
+	case MV_RANGE_ERR:
+	case PICTURE_STRUCTURE_ERR:
+	case SLICE_ADDR_INVALID:
+	case NON_FRAME_DATA_RECEIVED:
+	case INCOMPLETE_FRAME:
+	case PICTURE_MANAGEMENT_ERROR:
+	case INVALID_MMCO:
+	case INVALID_PIC_REORDERING:
+	case INVALID_POC_TYPE:
+	case ACTIVE_SPS_NOT_PRESENT:
+	case ACTIVE_PPS_NOT_PRESENT:
+		{
+			vcd_status = VCD_ERR_BITSTREAM_ERR;
+			break;
+		}
+	}
+
+	if (!vcd_status && vcd_event == VCD_EVT_RESP_INPUT_DONE)
+		return false;
+
+	ddl->input_frame.frm_trans_end = true;
+
+	eos = ((vcd_event == VCD_EVT_RESP_INPUT_DONE) &&
+		((VCD_FRAME_FLAG_EOS & ddl->input_frame.
+				vcd_frm.flags)));
+
+	if ((ddl->decoding && eos) ||
+		(!ddl->decoding))
+		ddl->input_frame.frm_trans_end = false;
+
+	if (vcd_event == VCD_EVT_RESP_INPUT_DONE &&
+		ddl->decoding &&
+		!ddl->codec_data.decoder.header_in_start &&
+		!ddl->codec_data.decoder.dec_disp_info.img_size_x &&
+		!ddl->codec_data.decoder.dec_disp_info.img_size_y
+		) {
+		/* this is first frame seq. header only case */
+		vcd_status = VCD_S_SUCCESS;
+		ddl->input_frame.vcd_frm.flags |=
+			VCD_FRAME_FLAG_CODECCONFIG;
+		ddl->input_frame.frm_trans_end = !eos;
+		/* put just some non - zero value */
+		ddl->codec_data.decoder.dec_disp_info.img_size_x = 0xff;
+	}
+	/* inform client about input failed */
+	ddl_input_failed_cb(ddl_context, vcd_event, vcd_status);
+
+	/* for Encoder case, we need to send output done also */
+	if (!ddl->decoding) {
+		/* transaction is complete after this callback */
+		ddl->output_frame.frm_trans_end = !eos;
+		/* error case: NO data present */
+		ddl->output_frame.vcd_frm.data_len = 0;
+		/* call back to client for output frame done */
+		ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
+		VCD_ERR_FAIL, &(ddl->output_frame),
+			sizeof(struct ddl_frame_data_tag),
+			(void *)ddl, ddl_context->client_data);
+
+		if (eos) {
+			DBG("ENC-EOS_DONE");
+			/* send client EOS DONE callback */
+			ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+				VCD_S_SUCCESS, NULL, 0, (void *)ddl,
+				ddl_context->client_data);
+		}
+	}
+
+	/* if it is decoder EOS case */
+	if (ddl->decoding && eos)
+		ddl_decode_eos_run(ddl);
+	else
+		DDL_IDLE(ddl_context);
+
+	return true;
+}
+
+static u32 ddl_handle_core_warnings(u32 err_status)
+{
+	u32 status = false;
+
+	switch (err_status) {
+	case FRAME_RATE_UNKNOWN:
+	case ASPECT_RATIO_UNKOWN:
+	case COLOR_PRIMARIES_UNKNOWN:
+	case TRANSFER_CHAR_UNKWON:
+	case MATRIX_COEFF_UNKNOWN:
+	case NON_SEQ_SLICE_ADDR:
+	case BROKEN_LINK:
+	case FRAME_CONCEALED:
+	case PROFILE_UNKOWN:
+	case LEVEL_UNKOWN:
+	case BIT_RATE_NOT_SUPPORTED:
+	case COLOR_DIFF_FORMAT_NOT_SUPPORTED:
+	case NULL_EXTRA_METADATA_POINTER:
+	case SYNC_POINT_NOT_RECEIVED_STARTED_DECODING:
+
+	case NULL_FW_DEBUG_INFO_POINTER:
+	case ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT:
+	case MAX_STAGE_COUNTER_EXCEEDED:
+
+	case METADATA_NO_SPACE_MB_INFO:
+	case METADATA_NO_SPACE_SLICE_SIZE:
+	case RESOLUTION_WARNING:
+
+	/* decoder warnings */
+	case METADATA_NO_SPACE_QP:
+	case METADATA_NO_SAPCE_CONCEAL_MB:
+	case METADATA_NO_SPACE_VC1_PARAM:
+	case METADATA_NO_SPACE_SEI:
+	case METADATA_NO_SPACE_VUI:
+	case METADATA_NO_SPACE_EXTRA:
+	case METADATA_NO_SPACE_DATA_NONE:
+		{
+			status = true;
+			DBG("CMD-WARNING-IGNORED!!");
+			break;
+		}
+	}
+	return status;
+}
+
+u32 ddl_handle_core_errors(struct ddl_context *ddl_context)
+{
+	u32 status = false;
+
+	if (!ddl_context->cmd_err_status &&
+		!ddl_context->disp_pic_err_status &&
+		!ddl_context->op_failed)
+		return false;
+
+	if (ddl_context->cmd_state == DDL_CMD_INVALID) {
+		DBG("SPURIOUS_INTERRUPT_ERROR");
+		return true;
+	}
+
+	if (!ddl_context->op_failed) {
+		u32 disp_status;
+		status = ddl_handle_core_warnings(ddl_context->
+			cmd_err_status);
+		disp_status = ddl_handle_core_warnings(
+			ddl_context->disp_pic_err_status);
+		if (!status && !disp_status)
+			DBG("ddl_warning:Unknown");
+
+		return false;
+	}
+
+	ERR("\n %s(): OPFAILED!!", __func__);
+	ERR("\n CMD_ERROR_STATUS = %u, DISP_ERR_STATUS = %u",
+		ddl_context->cmd_err_status,
+		ddl_context->disp_pic_err_status);
+
+	status = ddl_handle_hw_fatal_errors(ddl_context);
+
+	if (!status)
+		status = ddl_handle_core_recoverable_errors(ddl_context);
+
+	if (!status)
+		status = ddl_handle_client_fatal_errors(ddl_context);
+
+	return status;
+}
+
+void ddl_handle_npf_decoding_error(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	if (!ddl->decoding) {
+		ERR("FWISSUE-ENC-NPF!!!");
+		ddl_client_fatal_cb(ddl_context);
+		return;
+	}
+	vidc_720p_decode_display_info(&decoder->dec_disp_info);
+	ddl_decode_dynamic_property(ddl, false);
+	ddl->output_frame.vcd_frm.ip_frm_tag =
+		decoder->dec_disp_info.tag_top;
+	ddl->output_frame.vcd_frm.physical = NULL;
+	ddl->output_frame.frm_trans_end = false;
+	ddl->ddl_context->ddl_callback(
+		VCD_EVT_RESP_OUTPUT_DONE,
+		VCD_ERR_INTRLCD_FIELD_DROP,
+		&ddl->output_frame,
+		sizeof(struct ddl_frame_data_tag),
+		(void *)ddl,
+		ddl->ddl_context->client_data);
+	ddl_decode_frame_run(ddl);
+}
+
+u32 ddl_handle_seqhdr_fail_error(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	u32 status = false;
+	if (ddl_context->cmd_state == DDL_CMD_HEADER_PARSE &&
+		DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE)) {
+		switch (ddl_context->cmd_err_status) {
+		case UNSUPPORTED_FEATURE_IN_PROFILE:
+		case HEADER_NOT_FOUND:
+		case INVALID_SPS_ID:
+		case INVALID_PPS_ID:
+		case RESOLUTION_NOT_SUPPORTED:
+			ERR("SEQ-HDR-FAILED!!!");
+			if ((ddl_context->cmd_err_status ==
+				 RESOLUTION_NOT_SUPPORTED) &&
+				(decoder->codec.codec == VCD_CODEC_H264 ||
+				decoder->codec.codec == VCD_CODEC_H263 ||
+				decoder->codec.codec == VCD_CODEC_MPEG4 ||
+				decoder->codec.codec == VCD_CODEC_VC1_RCV ||
+				decoder->codec.codec == VCD_CODEC_VC1)) {
+				ddl_client_fatal_cb(ddl_context);
+				status = true;
+				break;
+			}
+			if (decoder->header_in_start) {
+				decoder->header_in_start = false;
+				ddl_context->ddl_callback(VCD_EVT_RESP_START,
+					VCD_ERR_SEQHDR_PARSE_FAIL,
+					NULL, 0, (void *)ddl,
+					ddl_context->client_data);
+			} else {
+				if (ddl->input_frame.vcd_frm.flags &
+					VCD_FRAME_FLAG_EOS)
+					ddl->input_frame.frm_trans_end = false;
+				else
+					ddl->input_frame.frm_trans_end = true;
+				ddl_decode_dynamic_property(ddl, false);
+				ddl_context->ddl_callback(
+					VCD_EVT_RESP_INPUT_DONE,
+					VCD_ERR_SEQHDR_PARSE_FAIL,
+					&ddl->input_frame,
+					sizeof(struct ddl_frame_data_tag),
+					(void *)ddl, ddl_context->client_data);
+				if (ddl->input_frame.vcd_frm.flags &
+					VCD_FRAME_FLAG_EOS)
+					ddl_context->ddl_callback(
+						VCD_EVT_RESP_EOS_DONE,
+						VCD_S_SUCCESS, NULL,
+						0, (void *)ddl,
+						ddl_context->client_data);
+			}
+			ddl_move_client_state(ddl,
+				DDL_CLIENT_WAIT_FOR_INITCODEC);
+			DDL_IDLE(ddl_context);
+			status = true;
+		}
+	}
+	return status;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.c
new file mode 100644
index 0000000..25aa6bc
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_firmware.h"
+#include "vcd_ddl_utils.h"
+
+#define VCDFW_TOTALNUM_IMAGE  7
+#define VCDFW_MAX_NO_IMAGE    2
+
+struct vcd_firmware {
+	u32 active_fw_img[VCDFW_TOTALNUM_IMAGE];
+	struct ddl_buf_addr boot_code;
+
+	struct ddl_buf_addr enc_mpeg4;
+	struct ddl_buf_addr encH264;
+
+	struct ddl_buf_addr dec_mpeg4;
+	struct ddl_buf_addr decH264;
+	struct ddl_buf_addr decH263;
+	struct ddl_buf_addr dec_mpeg2;
+	struct ddl_buf_addr dec_vc1;
+};
+
+static struct vcd_firmware vcd_firmware;
+
+
+static void vcd_fw_change_endian(unsigned char *fw, u32 fw_size)
+{
+	u32 i = 0;
+	unsigned char temp;
+	for (i = 0; i < fw_size; i = i + 4) {
+		temp = fw[i];
+		fw[i] = fw[i + 3];
+		fw[i + 3] = temp;
+
+		temp = fw[i + 1];
+		fw[i + 1] = fw[i + 2];
+		fw[i + 2] = temp;
+	}
+	return;
+}
+
+static u32 vcd_fw_prepare(struct ddl_buf_addr *fw_details,
+			 const unsigned char fw_array[],
+			 const unsigned int fw_array_size, u32 change_endian)
+{
+	u32 *buffer;
+
+	ddl_pmem_alloc(fw_details, fw_array_size,
+		       DDL_LINEAR_BUFFER_ALIGN_BYTES);
+	if (!fw_details->virtual_base_addr)
+		return false;
+
+	fw_details->buffer_size = fw_array_size / 4;
+
+	buffer = fw_details->align_virtual_addr;
+
+	memcpy(buffer, fw_array, fw_array_size);
+	if (change_endian)
+		vcd_fw_change_endian((unsigned char *)buffer, fw_array_size);
+	return true;
+}
+
+u32 vcd_fw_init(void)
+{
+	u32 status = false;
+
+	status = vcd_fw_prepare(&vcd_firmware.boot_code,
+				vidc_command_control_fw,
+				vidc_command_control_fw_size, false);
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.dec_mpeg4,
+					vidc_mpg4_dec_fw,
+					vidc_mpg4_dec_fw_size, true);
+	}
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.decH264,
+					vidc_h264_dec_fw,
+					vidc_h264_dec_fw_size, true);
+	}
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.decH263,
+					vidc_h263_dec_fw,
+					vidc_h263_dec_fw_size, true);
+	}
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.enc_mpeg4,
+					vidc_mpg4_enc_fw,
+					vidc_mpg4_enc_fw_size, true);
+	}
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.encH264,
+					vidc_h264_enc_fw,
+					vidc_h264_enc_fw_size, true);
+	}
+
+	if (status) {
+		status = vcd_fw_prepare(&vcd_firmware.dec_vc1,
+					vidc_vc1_dec_fw,
+					vidc_vc1_dec_fw_size, true);
+	}
+	return status;
+}
+
+
+static u32 get_dec_fw_image(struct vcd_fw_details *fw_details)
+{
+	u32 status = true;
+	switch (fw_details->codec) {
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+	case VCD_CODEC_XVID:
+	case VCD_CODEC_MPEG4:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.dec_mpeg4.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.dec_mpeg4.buffer_size;
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.decH264.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.decH264.buffer_size;
+			break;
+		}
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.dec_vc1.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.dec_vc1.buffer_size;
+			break;
+		}
+	case VCD_CODEC_MPEG2:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.dec_mpeg2.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.dec_mpeg2.buffer_size;
+			break;
+		}
+	case VCD_CODEC_H263:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.decH263.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.decH263.buffer_size;
+			break;
+		}
+	default:
+		{
+			status = false;
+			break;
+		}
+	}
+	return status;
+}
+
+static u32 get_enc_fw_image(struct vcd_fw_details *fw_details)
+{
+	u32 status = true;
+	switch (fw_details->codec) {
+	case VCD_CODEC_H263:
+	case VCD_CODEC_MPEG4:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.enc_mpeg4.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.enc_mpeg4.buffer_size;
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.encH264.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.encH264.buffer_size;
+			break;
+		}
+	default:
+		{
+			status = false;
+			break;
+		}
+	}
+	return status;
+}
+
+u32 vcd_get_fw_property(u32 prop_id, void *prop_details)
+{
+	u32 status = true;
+	struct vcd_fw_details *fw_details;
+	switch (prop_id) {
+	case VCD_FW_ENDIAN:
+		{
+			*(u32 *) prop_details = VCD_FW_BIG_ENDIAN;
+			break;
+		}
+	case VCD_FW_BOOTCODE:
+		{
+			fw_details =
+			    (struct vcd_fw_details *)prop_details;
+			fw_details->fw_buffer_addr =
+			    vcd_firmware.boot_code.align_physical_addr;
+			fw_details->fw_size =
+			    vcd_firmware.boot_code.buffer_size;
+			break;
+		}
+	case VCD_FW_DECODE:
+		{
+			fw_details =
+			    (struct vcd_fw_details *)prop_details;
+			status = get_dec_fw_image(fw_details);
+			break;
+		}
+	case VCD_FW_ENCODE:
+		{
+			fw_details =
+			    (struct vcd_fw_details *)prop_details;
+			status = get_enc_fw_image(fw_details);
+			break;
+		}
+	default:
+		{
+			status = false;
+			break;
+		}
+	}
+	return status;
+}
+
+u32 vcd_fw_transact(u32 add, u32 decoding, enum vcd_codec codec)
+{
+	u32 status = true;
+	u32 index = 0, active_fw = 0, loop_count;
+
+	if (decoding) {
+		switch (codec) {
+		case VCD_CODEC_DIVX_4:
+		case VCD_CODEC_DIVX_5:
+		case VCD_CODEC_DIVX_6:
+		case VCD_CODEC_XVID:
+		case VCD_CODEC_MPEG4:
+			{
+				index = 0;
+				break;
+			}
+		case VCD_CODEC_H264:
+			{
+				index = 1;
+				break;
+			}
+		case VCD_CODEC_H263:
+			{
+				index = 2;
+				break;
+			}
+		case VCD_CODEC_MPEG2:
+			{
+				index = 3;
+				break;
+			}
+		case VCD_CODEC_VC1:
+		case VCD_CODEC_VC1_RCV:
+			{
+				index = 4;
+				break;
+			}
+		default:
+			{
+				status = false;
+				break;
+			}
+		}
+	} else {
+		switch (codec) {
+		case VCD_CODEC_H263:
+		case VCD_CODEC_MPEG4:
+			{
+				index = 5;
+				break;
+			}
+		case VCD_CODEC_H264:
+			{
+				index = 6;
+				break;
+			}
+		default:
+			{
+				status = false;
+				break;
+			}
+		}
+	}
+
+	if (!status)
+		return status;
+
+	if (!add &&
+	    vcd_firmware.active_fw_img[index]
+	    ) {
+		--vcd_firmware.active_fw_img[index];
+		return status;
+	}
+
+	for (loop_count = 0; loop_count < VCDFW_TOTALNUM_IMAGE;
+	     ++loop_count) {
+		if (vcd_firmware.active_fw_img[loop_count])
+			++active_fw;
+	}
+
+	if (active_fw < VCDFW_MAX_NO_IMAGE ||
+	    vcd_firmware.active_fw_img[index] > 0) {
+		++vcd_firmware.active_fw_img[index];
+	} else {
+		status = false;
+	}
+	return status;
+}
+
+void vcd_fw_release(void)
+{
+	ddl_pmem_free(&vcd_firmware.boot_code);
+	ddl_pmem_free(&vcd_firmware.enc_mpeg4);
+	ddl_pmem_free(&vcd_firmware.encH264);
+	ddl_pmem_free(&vcd_firmware.dec_mpeg4);
+	ddl_pmem_free(&vcd_firmware.decH264);
+	ddl_pmem_free(&vcd_firmware.decH263);
+	ddl_pmem_free(&vcd_firmware.dec_mpeg2);
+	ddl_pmem_free(&vcd_firmware.dec_vc1);
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.h
new file mode 100644
index 0000000..7952dfb
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_firmware.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_FIRMWARE_H_
+#define _VCD_DDL_FIRMWARE_H_
+#include "vcd_property.h"
+
+#define VCD_FW_BIG_ENDIAN     0x0
+#define VCD_FW_LITTLE_ENDIAN  0x1
+
+struct vcd_fw_details {
+	enum vcd_codec codec;
+	u32 *fw_buffer_addr;
+	u32 fw_size;
+};
+
+#define VCD_FW_PROP_BASE         0x0
+
+#define VCD_FW_ENDIAN       (VCD_FW_PROP_BASE + 0x1)
+#define VCD_FW_BOOTCODE     (VCD_FW_PROP_BASE + 0x2)
+#define VCD_FW_DECODE     (VCD_FW_PROP_BASE + 0x3)
+#define VCD_FW_ENCODE     (VCD_FW_PROP_BASE + 0x4)
+
+extern unsigned char *vidc_command_control_fw;
+extern u32 vidc_command_control_fw_size;
+extern unsigned char *vidc_mpg4_dec_fw;
+extern u32 vidc_mpg4_dec_fw_size;
+extern unsigned char *vidc_h263_dec_fw;
+extern u32 vidc_h263_dec_fw_size;
+extern unsigned char *vidc_h264_dec_fw;
+extern u32 vidc_h264_dec_fw_size;
+extern unsigned char *vidc_mpg4_enc_fw;
+extern u32 vidc_mpg4_enc_fw_size;
+extern unsigned char *vidc_h264_enc_fw;
+extern u32 vidc_h264_enc_fw_size;
+extern unsigned char *vidc_vc1_dec_fw;
+extern u32 vidc_vc1_dec_fw_size;
+
+u32 vcd_fw_init(void);
+u32 vcd_get_fw_property(u32 prop_id, void *prop_details);
+u32 vcd_fw_transact(u32 add, u32 decoding, enum vcd_codec codec);
+void vcd_fw_release(void);
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_hal.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_hal.c
new file mode 100644
index 0000000..a81dd84
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_hal.c
@@ -0,0 +1,944 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_metadata.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+void ddl_core_init(struct ddl_context *ddl_context)
+{
+	char *psz_version;
+	struct vcd_fw_details fw_details;
+	u32 fw_endianness;
+	enum vidc_720p_endian dma_endian;
+	u32 interrupt_off;
+	enum vidc_720p_interrupt_level_selection interrupt_sel;
+	u32 intr_mask = 0x0;
+
+	vcd_get_fw_property(VCD_FW_BOOTCODE, &fw_details);
+	vcd_get_fw_property(VCD_FW_ENDIAN, &fw_endianness);
+	if (fw_endianness == VCD_FW_BIG_ENDIAN)
+		dma_endian = VIDC_720P_BIG_ENDIAN;
+	else
+		dma_endian = VIDC_720P_LITTLE_ENDIAN;
+
+	interrupt_off = false;
+	interrupt_sel = VIDC_720P_INTERRUPT_LEVEL_SEL;
+
+	intr_mask |= VIDC_720P_INTR_BUFFER_FULL;
+	intr_mask |= VIDC_720P_INTR_FW_DONE;
+	intr_mask |= VIDC_720P_INTR_DMA_DONE;
+	intr_mask |= VIDC_720P_INTR_FRAME_DONE;
+
+	vidc_720p_do_sw_reset();
+
+	vidc_720p_init(&psz_version,
+			fw_details.fw_size,
+			fw_details.fw_buffer_addr,
+			dma_endian,
+			interrupt_off, interrupt_sel, intr_mask);
+	return;
+}
+
+void ddl_core_start_cpu(struct ddl_context *ddl_context)
+{
+	u32 fw_endianness;
+	enum vidc_720p_endian dma_endian;
+	u32 dbg_core_dump_buf_size = 0;
+
+	vcd_get_fw_property(VCD_FW_ENDIAN, &fw_endianness);
+	if (fw_endianness == VCD_FW_BIG_ENDIAN)
+		dma_endian = VIDC_720P_LITTLE_ENDIAN;
+	else
+		dma_endian = VIDC_720P_BIG_ENDIAN;
+
+	ddl_move_command_state(ddl_context, DDL_CMD_CPU_RESET);
+
+	DBG("VSP_BUF_ADDR_SIZE %d",
+		ddl_context->context_buf_addr.buffer_size);
+	if (ddl_context->enable_dbg_core_dump) {
+		dbg_core_dump_buf_size = ddl_context->dbg_core_dump.
+			buffer_size;
+	}
+
+	vidc_720p_start_cpu(dma_endian,
+		ddl_context->context_buf_addr.align_physical_addr,
+		ddl_context->dbg_core_dump.align_physical_addr,
+		dbg_core_dump_buf_size);
+
+	VIDC_DEBUG_REGISTER_LOG;
+}
+
+void ddl_channel_set(struct ddl_client_context *ddl)
+{
+	enum vidc_720p_enc_dec_selection enc_dec_sel;
+	enum vidc_720p_codec codec;
+	enum vcd_codec *vcd_codec;
+	u32 fw_property_id;
+	struct vcd_fw_details fw_details;
+
+	if (ddl->decoding) {
+		if (vidc_msg_timing)
+			ddl_set_core_start_time(__func__, DEC_OP_TIME);
+		enc_dec_sel = VIDC_720P_DECODER;
+		fw_property_id = VCD_FW_DECODE;
+		vcd_codec = &(ddl->codec_data.decoder.codec.codec);
+	} else {
+		enc_dec_sel = VIDC_720P_ENCODER;
+		fw_property_id = VCD_FW_ENCODE;
+		vcd_codec = &(ddl->codec_data.encoder.codec.codec);
+	}
+	switch (*vcd_codec) {
+	default:
+	case VCD_CODEC_MPEG4:
+		{
+			codec = VIDC_720P_MPEG4;
+
+		if (ddl->decoding) {
+			vidc_720p_decode_set_mpeg4_data_partitionbuffer
+				(ddl->ddl_context->data_partition_tempbuf.
+				 align_physical_addr);
+		}
+
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			codec = VIDC_720P_H264;
+			break;
+		}
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+		{
+			codec = VIDC_720P_DIVX;
+			break;
+		}
+	case VCD_CODEC_XVID:
+		{
+			codec = VIDC_720P_XVID;
+			break;
+		}
+	case VCD_CODEC_H263:
+		{
+			codec = VIDC_720P_H263;
+			break;
+		}
+	case VCD_CODEC_MPEG2:
+		{
+			codec = VIDC_720P_MPEG2;
+			break;
+		}
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		{
+			codec = VIDC_720P_VC1;
+			break;
+		}
+	}
+
+	fw_details.codec = *vcd_codec;
+	vcd_get_fw_property(fw_property_id, &fw_details);
+	VIDC_DEBUG_REGISTER_LOG;
+
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_CHANNEL_SET);
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_CHDONE);
+
+	vidc_720p_set_channel(ddl->channel_id,
+			       enc_dec_sel,
+			       codec,
+			       fw_details.fw_buffer_addr,
+			       fw_details.fw_size);
+}
+
+void ddl_decode_init_codec(struct ddl_client_context *ddl)
+{
+	u32 seq_h = 0, seq_e = 0, start_byte_num = 0;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_sequence_hdr *seq_hdr = &decoder->decode_config;
+	enum vidc_720p_memory_access_method mem_access_method;
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+	ddl_metadata_enable(ddl);
+
+	vidc_720p_decode_set_error_control(true);
+
+	vidc_720p_decode_set_mpeg4Post_filter(decoder->post_filter.
+					       post_filter);
+
+	if (decoder->codec.codec == VCD_CODEC_H264) {
+		vidc_720p_decode_setH264VSPBuffer(decoder->
+						   h264Vsp_temp_buffer.
+						   align_physical_addr);
+		VIDC_LOG1("VSP_BUF_ADDR_SIZE",
+			   decoder->h264Vsp_temp_buffer.buffer_size);
+	}
+
+	if (decoder->codec.codec == VCD_CODEC_VC1_RCV ||
+		decoder->codec.codec == VCD_CODEC_VC1) {
+		vidc_720p_set_frame_size(decoder->client_frame_size.width,
+			decoder->client_frame_size.height);
+	} else {
+		vidc_720p_set_frame_size(0x0, 0x0);
+	}
+
+	switch (decoder->buf_format.buffer_format) {
+	default:
+	case VCD_BUFFER_FORMAT_NV12:
+		{
+			mem_access_method = VIDC_720P_TILE_LINEAR;
+			break;
+		}
+	case VCD_BUFFER_FORMAT_TILE_4x2:
+		{
+			mem_access_method = VIDC_720P_TILE_64x32;
+			break;
+		}
+	}
+	VIDC_LOG_STRING("HEADER-PARSE-START");
+	VIDC_DEBUG_REGISTER_LOG;
+	seq_h = (u32) seq_hdr->sequence_header;
+	start_byte_num = 8 - (seq_h & DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	seq_e = seq_h + seq_hdr->sequence_header_len;
+	seq_h &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	DDL_PADDING_HACK(seq_e);
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE);
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_HEADER_PARSE);
+
+	vidc_720p_decode_bitstream_header(ddl->channel_id,
+		   seq_hdr->sequence_header_len,
+		   start_byte_num,
+		   seq_h,
+		   seq_e,
+		   mem_access_method,
+		   decoder->output_order);
+}
+
+void ddl_decode_dynamic_property(struct ddl_client_context *ddl,
+				 u32 enable)
+{
+	uint8_t *temp = NULL;
+	u32 extra_datastart = 0;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *bit_stream =
+	    &(ddl->input_frame.vcd_frm);
+
+	if (!enable) {
+		if (decoder->dynmic_prop_change_req) {
+			decoder->dynmic_prop_change_req = false;
+			vidc_720p_decode_dynamic_req_reset();
+		}
+		return;
+	}
+	if ((decoder->dynamic_prop_change &
+				DDL_DEC_REQ_OUTPUT_FLUSH)) {
+		decoder->dynmic_prop_change_req = true;
+		decoder->dynamic_prop_change &= ~(DDL_DEC_REQ_OUTPUT_FLUSH);
+		decoder->dpb_mask.hw_mask = 0;
+		vidc_720p_decode_dynamic_req_set(VIDC_720P_FLUSH_REQ);
+	}
+	if (((decoder->meta_data_enable_flag & VCD_METADATA_PASSTHROUGH))
+	    && ((VCD_FRAME_FLAG_EXTRADATA & bit_stream->flags))
+	    ) {
+
+		temp = ((uint8_t *)bit_stream->physical +
+					bit_stream->offset +
+					bit_stream->data_len + 3);
+
+		extra_datastart = (u32) ((u32)temp & ~3);
+		decoder->dynmic_prop_change_req = true;
+
+		vidc_720p_decode_setpassthrough_start(extra_datastart);
+
+		vidc_720p_decode_dynamic_req_set(VIDC_720P_EXTRADATA);
+	}
+}
+
+void ddl_encode_dynamic_property(struct ddl_client_context *ddl,
+				 u32 enable)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32 enc_param_change = 0;
+
+	if (!enable) {
+		if (encoder->dynmic_prop_change_req) {
+			encoder->dynmic_prop_change_req = false;
+			encoder->ext_enc_control_val &=
+				~(VIDC_720P_ENC_IFRAME_REQ);
+			vidc_720p_encode_set_control_param
+			(encoder->ext_enc_control_val);
+			vidc_720p_encoder_set_param_change(enc_param_change);
+		}
+		return;
+	}
+	if ((encoder->dynamic_prop_change & DDL_ENC_REQ_IFRAME)) {
+		encoder->dynamic_prop_change &= ~(DDL_ENC_REQ_IFRAME);
+		encoder->ext_enc_control_val |= VIDC_720P_ENC_IFRAME_REQ;
+		vidc_720p_encode_set_control_param
+		(encoder->ext_enc_control_val);
+	}
+	if ((encoder->dynamic_prop_change & DDL_ENC_CHANGE_BITRATE)) {
+		vidc_720p_encode_set_bit_rate(
+		encoder->target_bit_rate.target_bitrate);
+		enc_param_change |= VIDC_720P_ENC_BITRATE_CHANGE;
+		encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_BITRATE);
+	}
+	if ((encoder->dynamic_prop_change & DDL_ENC_CHANGE_CIR)) {
+		vidc_720p_encode_set_intra_refresh_mb_number(
+			encoder->intra_refresh.cir_mb_number);
+		encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_CIR);
+	}
+	if ((encoder->dynamic_prop_change & DDL_ENC_CHANGE_IPERIOD)) {
+		vidc_720p_encode_set_i_period
+			(encoder->i_period.p_frames);
+		enc_param_change |= VIDC_720P_ENC_IPERIOD_CHANGE;
+		encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_IPERIOD);
+	}
+	if ((encoder->dynamic_prop_change &
+				DDL_ENC_CHANGE_FRAMERATE)) {
+		vidc_720p_encode_set_fps
+		    ((encoder->frame_rate.fps_numerator * 1000) /
+		     encoder->frame_rate.fps_denominator);
+		enc_param_change |= VIDC_720P_ENC_FRAMERATE_CHANGE;
+		encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_FRAMERATE);
+	}
+	if (enc_param_change)
+		vidc_720p_encoder_set_param_change(enc_param_change);
+}
+
+static void ddl_encode_set_profile_level(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32 profile;
+	u32 level;
+
+	switch (encoder->profile.profile) {
+	default:
+	case VCD_PROFILE_MPEG4_SP:
+		{
+			profile = VIDC_720P_PROFILE_MPEG4_SP;
+			break;
+		}
+	case VCD_PROFILE_MPEG4_ASP:
+		{
+			profile = VIDC_720P_PROFILE_MPEG4_ASP;
+			break;
+		}
+	case VCD_PROFILE_H264_BASELINE:
+		{
+			profile = VIDC_720P_PROFILE_H264_CPB;
+			break;
+		}
+	case VCD_PROFILE_H264_MAIN:
+		{
+			profile = VIDC_720P_PROFILE_H264_MAIN;
+			break;
+		}
+	case VCD_PROFILE_H264_HIGH:
+		{
+			profile = VIDC_720P_PROFILE_H264_HIGH;
+			break;
+		}
+	case VCD_PROFILE_H263_BASELINE:
+		{
+			profile = VIDC_720P_PROFILE_H263_BASELINE;
+			break;
+		}
+	}
+	switch (encoder->level.level) {
+	default:
+	case VCD_LEVEL_MPEG4_0:
+		{
+			level = VIDC_720P_MPEG4_LEVEL0;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_0b:
+		{
+			level = VIDC_720P_MPEG4_LEVEL0b;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_1:
+		{
+			level = VIDC_720P_MPEG4_LEVEL1;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_2:
+		{
+			level = VIDC_720P_MPEG4_LEVEL2;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_3:
+		{
+			level = VIDC_720P_MPEG4_LEVEL3;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_3b:
+		{
+			level = VIDC_720P_MPEG4_LEVEL3b;
+			break;
+		}
+
+	case VCD_LEVEL_MPEG4_4:
+	case VCD_LEVEL_MPEG4_4a:
+		{
+			level = VIDC_720P_MPEG4_LEVEL4a;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_5:
+		{
+			level = VIDC_720P_MPEG4_LEVEL5;
+			break;
+		}
+	case VCD_LEVEL_MPEG4_6:
+		{
+			level = VIDC_720P_MPEG4_LEVEL6;
+			break;
+		}
+	case VCD_LEVEL_H264_1:
+		{
+			level = VIDC_720P_H264_LEVEL1;
+			break;
+		}
+	case VCD_LEVEL_H264_1b:
+		{
+			level = VIDC_720P_H264_LEVEL1b;
+			break;
+		}
+	case VCD_LEVEL_H264_1p1:
+		{
+			level = VIDC_720P_H264_LEVEL1p1;
+			break;
+		}
+	case VCD_LEVEL_H264_1p2:
+		{
+			level = VIDC_720P_H264_LEVEL1p2;
+			break;
+		}
+	case VCD_LEVEL_H264_1p3:
+		{
+			level = VIDC_720P_H264_LEVEL1p3;
+			break;
+		}
+	case VCD_LEVEL_H264_2:
+		{
+			level = VIDC_720P_H264_LEVEL2;
+			break;
+		}
+	case VCD_LEVEL_H264_2p1:
+		{
+			level = VIDC_720P_H264_LEVEL2p1;
+			break;
+		}
+	case VCD_LEVEL_H264_2p2:
+		{
+			level = VIDC_720P_H264_LEVEL2p2;
+			break;
+		}
+	case VCD_LEVEL_H264_3:
+		{
+			level = VIDC_720P_H264_LEVEL3;
+			break;
+		}
+	case VCD_LEVEL_H264_3p1:
+		{
+			level = VIDC_720P_H264_LEVEL3p1;
+			break;
+		}
+	case VCD_LEVEL_H263_10:
+		{
+			level = VIDC_720P_H263_LEVEL10;
+			break;
+		}
+	case VCD_LEVEL_H263_20:
+		{
+			level = VIDC_720P_H263_LEVEL20;
+			break;
+		}
+	case VCD_LEVEL_H263_30:
+		{
+			level = VIDC_720P_H263_LEVEL30;
+			break;
+		}
+	case VCD_LEVEL_H263_40:
+		{
+			level = VIDC_720P_H263_LEVEL40;
+			break;
+		}
+	case VCD_LEVEL_H263_45:
+		{
+			level = VIDC_720P_H263_LEVEL45;
+			break;
+		}
+	case VCD_LEVEL_H263_50:
+		{
+			level = VIDC_720P_H263_LEVEL50;
+			break;
+		}
+	case VCD_LEVEL_H263_60:
+		{
+			level = VIDC_720P_H263_LEVEL60;
+			break;
+		}
+	case VCD_LEVEL_H263_70:
+		{
+			level = VIDC_720P_H263_LEVEL70;
+			break;
+		}
+	}
+	vidc_720p_encode_set_profile(profile, level);
+}
+
+void ddl_encode_init_codec(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	enum vidc_720p_memory_access_method mem_access_method;
+	enum vidc_720p_DBConfig db_config;
+	enum vidc_720p_MSlice_selection m_slice_sel;
+
+	ddl_encode_set_profile_level(ddl);
+
+	vidc_720p_set_frame_size
+	    (encoder->frame_size.width, encoder->frame_size.height);
+	vidc_720p_encode_set_qp_params
+	    (encoder->qp_range.max_qp, encoder->qp_range.min_qp);
+	vidc_720p_encode_set_rc_config
+	    (encoder->rc_level.frame_level_rc,
+	     encoder->rc_level.mb_level_rc,
+	     encoder->session_qp.i_frame_qp,
+	     encoder->session_qp.p_frame_qp);
+
+	if (encoder->r_cframe_skip) {
+		if (encoder->vb_vbuffer_size) {
+			encoder->ext_enc_control_val = (0x2 << 0x2) |
+			(encoder->vb_vbuffer_size << 0x10);
+		} else
+			encoder->ext_enc_control_val = (0x1 << 2);
+	} else
+		encoder->ext_enc_control_val = 0;
+
+	vidc_720p_encode_set_fps
+	    ((encoder->frame_rate.fps_numerator * 1000) /
+	     encoder->frame_rate.fps_denominator);
+
+	vidc_720p_encode_set_vop_time(
+			encoder->vop_timing.vop_time_resolution, 0);
+
+	if (encoder->rc_level.frame_level_rc) {
+		vidc_720p_encode_set_bit_rate
+		    (encoder->target_bit_rate.target_bitrate);
+
+		vidc_720p_encode_set_frame_level_rc_params
+		    (encoder->frame_level_rc.reaction_coeff);
+	}
+	if (encoder->rc_level.mb_level_rc) {
+		vidc_720p_encode_set_mb_level_rc_params
+		    (encoder->adaptive_rc.dark_region_as_flag,
+		     encoder->adaptive_rc.smooth_region_as_flag,
+		     encoder->adaptive_rc.static_region_as_flag,
+		     encoder->adaptive_rc.activity_region_flag);
+	}
+	if (encoder->codec.codec == VCD_CODEC_MPEG4) {
+		vidc_720p_encode_set_short_header
+		    (encoder->short_header.short_header);
+
+		if (encoder->hdr_ext_control) {
+			vidc_720p_encode_set_hec_period
+			(encoder->hdr_ext_control);
+			encoder->ext_enc_control_val |= (0x1 << 0x1);
+		}
+	}
+	/* set extended encoder control settings */
+	vidc_720p_encode_set_control_param
+	(encoder->ext_enc_control_val);
+
+	if (encoder->codec.codec == VCD_CODEC_H264) {
+		enum vidc_720p_entropy_sel entropy_sel;
+		enum vidc_720p_cabac_model cabac_model_number;
+		switch (encoder->entropy_control.entropy_sel) {
+		default:
+		case VCD_ENTROPY_SEL_CAVLC:
+			{
+				entropy_sel = VIDC_720P_ENTROPY_SEL_CAVLC;
+				break;
+			}
+		case VCD_ENTROPY_SEL_CABAC:
+			{
+				entropy_sel = VIDC_720P_ENTROPY_SEL_CABAC;
+				break;
+			}
+		}
+		switch (encoder->entropy_control.cabac_model) {
+		default:
+		case VCD_CABAC_MODEL_NUMBER_0:
+			{
+				cabac_model_number =
+				    VIDC_720P_CABAC_MODEL_NUMBER_0;
+				break;
+			}
+		case VCD_CABAC_MODEL_NUMBER_1:
+			{
+				cabac_model_number =
+				    VIDC_720P_CABAC_MODEL_NUMBER_1;
+				break;
+			}
+		case VCD_CABAC_MODEL_NUMBER_2:
+			{
+				cabac_model_number =
+				    VIDC_720P_CABAC_MODEL_NUMBER_2;
+				break;
+			}
+		}
+		vidc_720p_encode_set_entropy_control
+		    (entropy_sel, cabac_model_number);
+		switch (encoder->db_control.db_config) {
+		default:
+		case VCD_DB_ALL_BLOCKING_BOUNDARY:
+			{
+				db_config =
+				    VIDC_720P_DB_ALL_BLOCKING_BOUNDARY;
+				break;
+			}
+		case VCD_DB_DISABLE:
+			{
+				db_config =
+				    VIDC_720P_DB_DISABLE;
+				break;
+			}
+		case VCD_DB_SKIP_SLICE_BOUNDARY:
+			{
+				db_config =
+				    VIDC_720P_DB_SKIP_SLICE_BOUNDARY;
+				break;
+			}
+		}
+		vidc_720p_encode_set_db_filter_control
+		    (db_config,
+		     encoder->db_control.slice_alpha_offset,
+		     encoder->db_control.slice_beta_offset);
+	}
+
+	vidc_720p_encode_set_intra_refresh_mb_number
+	    (encoder->intra_refresh.cir_mb_number);
+
+	switch (encoder->multi_slice.m_slice_sel) {
+	default:
+	case VCD_MSLICE_OFF:
+		m_slice_sel = VIDC_720P_MSLICE_OFF;
+		break;
+	case VCD_MSLICE_BY_MB_COUNT:
+		{
+			m_slice_sel = VIDC_720P_MSLICE_BY_MB_COUNT;
+			break;
+		}
+	case VCD_MSLICE_BY_BYTE_COUNT:
+		{
+			m_slice_sel = VIDC_720P_MSLICE_BY_BYTE_COUNT;
+			break;
+		}
+	case VCD_MSLICE_BY_GOB:
+		{
+			m_slice_sel = VIDC_720P_MSLICE_BY_GOB;
+			break;
+		}
+	}
+	vidc_720p_encode_set_multi_slice_info
+	    (m_slice_sel, encoder->multi_slice.m_slice_size);
+
+	vidc_720p_encode_set_dpb_buffer
+	    (encoder->enc_dpb_addr.align_physical_addr,
+			 encoder->enc_dpb_addr.buffer_size);
+
+	VIDC_LOG1("ENC_DPB_ADDR_SIZE", encoder->enc_dpb_addr.buffer_size);
+
+	vidc_720p_encode_set_i_period(encoder->i_period.p_frames);
+
+	ddl_metadata_enable(ddl);
+
+	if (encoder->seq_header.virtual_base_addr) {
+		u32 ext_buffer_start, ext_buffer_end, start_byte_num;
+		ext_buffer_start =
+		    (u32) encoder->seq_header.align_physical_addr;
+		ext_buffer_end =
+		    ext_buffer_start + encoder->seq_header.buffer_size;
+		start_byte_num =
+		    (ext_buffer_start & DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		ext_buffer_start &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		ext_buffer_end &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		VIDC_LOG1("ENC_SEQHDR_ALLOC_SIZE",
+			   encoder->seq_header.buffer_size);
+		vidc_720p_encode_set_seq_header_buffer(ext_buffer_start,
+							ext_buffer_end,
+							start_byte_num);
+	}
+
+	if (encoder->re_con_buf_format.buffer_format ==
+		VCD_BUFFER_FORMAT_NV12)
+		mem_access_method = VIDC_720P_TILE_LINEAR;
+	else
+		mem_access_method = VIDC_720P_TILE_16x16;
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE);
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_INIT_CODEC);
+
+	vidc_720p_encode_init_codec(ddl->channel_id, mem_access_method);
+}
+
+void ddl_channel_end(struct ddl_client_context *ddl)
+{
+	VIDC_DEBUG_REGISTER_LOG;
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_CHEND);
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_CHANNEL_END);
+
+	vidc_720p_submit_command(ddl->channel_id, VIDC_720P_CMD_CHEND);
+}
+
+void ddl_encode_frame_run(struct ddl_client_context *ddl)
+{
+	u32 ext_buffer_start, ext_buffer_end;
+	u32 y_addr, c_addr;
+	u32 start_byte_number = 0;
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm);
+
+	ext_buffer_start = (u32) stream->physical + stream->offset;
+	ext_buffer_end = ddl_encode_set_metadata_output_buf(ddl);
+	start_byte_number =
+	    (ext_buffer_start & DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	if (start_byte_number) {
+		u32 upper_data, lower_data;
+		u32 *align_virtual_addr;
+		ext_buffer_start &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		align_virtual_addr = (u32 *) (((u32) stream->virtual +
+						 stream->offset) -
+						start_byte_number);
+		upper_data = *align_virtual_addr;
+		align_virtual_addr++;
+		lower_data = *align_virtual_addr;
+		vidc_720p_encode_unalign_bitstream(upper_data, lower_data);
+	}
+
+	y_addr = (u32) ddl->input_frame.vcd_frm.physical +
+	    ddl->input_frame.vcd_frm.offset;
+	c_addr = (y_addr + (encoder->frame_size.scan_lines *
+				encoder->frame_size.stride));
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE);
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_ENCODE_FRAME);
+
+	if (encoder->dynamic_prop_change) {
+		encoder->dynmic_prop_change_req = true;
+		ddl_encode_dynamic_property(ddl, true);
+	}
+	vidc_720p_encode_set_vop_time(
+			encoder->vop_timing.vop_time_resolution,
+			ddl->input_frame.frm_delta
+	    );
+
+	vidc_720p_encode_frame(ddl->channel_id,
+			ext_buffer_start,
+				ext_buffer_end,
+				start_byte_number, y_addr, c_addr);
+}
+
+u32 ddl_decode_set_buffers(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	u32 comv_buf_size = DDL_COMV_BUFLINE_NO, comv_buf_no = 0;
+	u32 ref_buf_no = 0;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) {
+		VIDC_LOG_STRING("STATE-CRITICAL");
+		return VCD_ERR_FAIL;
+	}
+	if (vidc_msg_timing)
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+	switch (decoder->codec.codec) {
+	default:
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+	case VCD_CODEC_XVID:
+	case VCD_CODEC_MPEG2:
+	case VCD_CODEC_MPEG4:
+		{
+			comv_buf_no = DDL_MPEG_COMV_BUF_NO;
+			ref_buf_no = DDL_MPEG_REFBUF_COUNT;
+			break;
+		}
+	case VCD_CODEC_H263:
+		{
+			comv_buf_no = DDL_H263_COMV_BUF_NO;
+			break;
+		}
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		{
+			comv_buf_no =
+			    decoder->client_output_buf_req.actual_count + 1;
+			comv_buf_size = DDL_VC1_COMV_BUFLINE_NO;
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			comv_buf_no =
+			    decoder->client_output_buf_req.actual_count;
+			break;
+		}
+	}
+
+	if (comv_buf_no) {
+		comv_buf_size *= (comv_buf_no *
+			(decoder->client_frame_size.stride >> 4) *
+			((decoder->client_frame_size.scan_lines >> 4) + 1));
+		if (decoder->dpb_comv_buffer.virtual_base_addr)
+			ddl_pmem_free(&decoder->dpb_comv_buffer);
+		ddl_pmem_alloc(&decoder->dpb_comv_buffer, comv_buf_size,
+			       DDL_LINEAR_BUFFER_ALIGN_BYTES);
+		if (!decoder->dpb_comv_buffer.virtual_base_addr) {
+			VIDC_LOGERR_STRING
+			    ("Dec_set_buf:Comv_buf_alloc_failed");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+		vidc_720p_decode_set_comv_buffer(decoder->dpb_comv_buffer.
+						  align_physical_addr,
+						  decoder->dpb_comv_buffer.
+						  buffer_size);
+	}
+	decoder->ref_buffer.align_physical_addr = NULL;
+	if (ref_buf_no) {
+		size_t sz, align_bytes;
+		sz = decoder->dp_buf.dec_pic_buffers[0].vcd_frm.alloc_len;
+		sz *= ref_buf_no;
+		align_bytes = decoder->client_output_buf_req.align;
+		if (decoder->ref_buffer.virtual_base_addr)
+			ddl_pmem_free(&decoder->ref_buffer);
+		ddl_pmem_alloc(&decoder->ref_buffer, sz, align_bytes);
+		if (!decoder->ref_buffer.virtual_base_addr) {
+			ddl_pmem_free(&decoder->dpb_comv_buffer);
+			VIDC_LOGERR_STRING
+			    ("Dec_set_buf:mpeg_ref_buf_alloc_failed");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+	}
+	ddl_decode_set_metadata_output(decoder);
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_INIT);
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE);
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_DECODE_SET_DPB);
+
+	vidc_720p_submit_command(ddl->channel_id,
+		VIDC_720P_CMD_INITBUFFERS);
+	return VCD_S_SUCCESS;
+}
+
+void ddl_decode_frame_run(struct ddl_client_context *ddl)
+{
+	u32 ext_buffer_start = 0, ext_buffer_end = 0;
+	u32 start_byte_num = 8;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+	struct vcd_frame_data *bit_stream =
+	    &(ddl->input_frame.vcd_frm);
+	if (vidc_msg_timing) {
+		ddl_set_core_start_time(__func__, DEC_OP_TIME);
+		ddl_set_core_start_time(__func__, DEC_IP_TIME);
+	}
+	if (!bit_stream->data_len ||
+		!bit_stream->physical) {
+		ddl_decode_eos_run(ddl);
+		return;
+	}
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE);
+
+	ddl_decode_dynamic_property(ddl, true);
+
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK);
+
+	ext_buffer_start = (u32)bit_stream->physical +
+		bit_stream->offset;
+	start_byte_num = 8 - (ext_buffer_start &
+		DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	ext_buffer_end = ext_buffer_start + bit_stream->data_len;
+	ext_buffer_start &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+	DDL_PADDING_HACK(ext_buffer_end);
+
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_DECODE_FRAME);
+
+	vidc_720p_decode_frame(ddl->channel_id,
+			ext_buffer_start,
+			ext_buffer_end,
+			bit_stream->data_len,
+			start_byte_num, bit_stream->ip_frm_tag);
+}
+
+void  ddl_decode_eos_run(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE);
+
+	ddl_decode_dynamic_property(ddl, true);
+
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK);
+
+	decoder->dynmic_prop_change_req = true;
+
+	ddl_move_command_state(ddl->ddl_context, DDL_CMD_EOS);
+
+	vidc_720p_issue_eos(ddl->channel_id);
+}
+
+u32 ddl_hal_engine_reset(struct ddl_context *ddl_context)
+{
+	u32 eng_reset;
+	u32 channel_id = 0;
+	u32 fw_endianness;
+	enum vidc_720p_endian dma_endian;
+	enum vidc_720p_interrupt_level_selection interrupt_sel;
+	u32 intr_mask = 0x0;
+
+	if (ddl_context->current_ddl)
+		channel_id = ddl_context->current_ddl->channel_id;
+
+	interrupt_sel = VIDC_720P_INTERRUPT_LEVEL_SEL;
+	/* Enable all the supported interrupt */
+	intr_mask |= VIDC_720P_INTR_BUFFER_FULL;
+	intr_mask |= VIDC_720P_INTR_FW_DONE;
+	intr_mask |= VIDC_720P_INTR_DMA_DONE;
+	intr_mask |= VIDC_720P_INTR_FRAME_DONE;
+
+	vcd_get_fw_property(VCD_FW_ENDIAN, &fw_endianness);
+	/* Reverse the endianness settings after boot code download */
+	if (fw_endianness == VCD_FW_BIG_ENDIAN)
+		dma_endian = VIDC_720P_LITTLE_ENDIAN;
+	else
+		dma_endian = VIDC_720P_BIG_ENDIAN;
+
+	/* Need to reset MFC silently */
+	eng_reset = vidc_720p_engine_reset(
+		channel_id,
+		dma_endian, interrupt_sel,
+		intr_mask);
+	if (!eng_reset) {
+		/* call the hw fatal callback if engine reset fails */
+		ddl_hw_fatal_cb(ddl_context);
+	}
+	return eng_reset ;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_helper.c
new file mode 100644
index 0000000..2899df6
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_helper.c
@@ -0,0 +1,286 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+
+DDL_INLINE struct ddl_context *ddl_get_context(void)
+{
+	static struct ddl_context ddl_context;
+	return &ddl_context;
+}
+
+DDL_INLINE void ddl_move_client_state(struct ddl_client_context *ddl,
+				      enum ddl_client_state client_state)
+{
+	ddl->client_state = client_state;
+}
+
+DDL_INLINE void ddl_move_command_state(struct ddl_context *ddl_context,
+				       enum ddl_cmd_state command_state)
+{
+	ddl_context->cmd_state = command_state;
+}
+
+u32 ddl_client_transact(u32 operation,
+			struct ddl_client_context **pddl_client)
+{
+	u32 ret_status = VCD_ERR_FAIL;
+	u32 counter;
+	struct ddl_context *ddl_context;
+
+	ddl_context = ddl_get_context();
+	switch (operation) {
+	case DDL_FREE_CLIENT:
+		{
+			if (pddl_client && *pddl_client) {
+				u32 channel_id;
+				channel_id = (*pddl_client)->channel_id;
+				if (channel_id < VCD_MAX_NO_CLIENT) {
+					ddl_context->
+					    ddl_clients[channel_id] = NULL;
+				} else {
+					VIDC_LOG_STRING("CHID_CORRUPTION");
+				}
+				DDL_FREE(*pddl_client);
+				ret_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_GET_CLIENT:
+		{
+			ret_status = VCD_ERR_MAX_CLIENT;
+			for (counter = 0; counter < VCD_MAX_NO_CLIENT &&
+			     ret_status == VCD_ERR_MAX_CLIENT; ++counter) {
+				if (!ddl_context->ddl_clients[counter]) {
+					*pddl_client =
+					    (struct ddl_client_context *)
+					    DDL_MALLOC(sizeof
+					       (struct ddl_client_context)
+					       );
+					if (!*pddl_client) {
+						ret_status = VCD_ERR_ALLOC_FAIL;
+					} else {
+						DDL_MEMSET(*pddl_client, 0,
+						   sizeof(struct
+						   ddl_client_context));
+						ddl_context->
+						    ddl_clients[counter] =
+						    *pddl_client;
+						(*pddl_client)->channel_id =
+						    counter;
+						(*pddl_client)->ddl_context =
+						    ddl_context;
+						ret_status = VCD_S_SUCCESS;
+					}
+				}
+			}
+			break;
+		}
+	case DDL_INIT_CLIENTS:
+		{
+			for (counter = 0; counter < VCD_MAX_NO_CLIENT;
+			     ++counter) {
+				ddl_context->ddl_clients[counter] = NULL;
+			}
+			ret_status = VCD_S_SUCCESS;
+			break;
+		}
+	case DDL_ACTIVE_CLIENT:
+		{
+			for (counter = 0; counter < VCD_MAX_NO_CLIENT;
+			     ++counter) {
+				if (ddl_context->ddl_clients[counter]) {
+					ret_status = VCD_S_SUCCESS;
+					break;
+				}
+			}
+			break;
+		}
+	default:
+		{
+			ret_status = VCD_ERR_ILLEGAL_PARM;
+			break;
+		}
+	}
+	return ret_status;
+}
+
+u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *decoder,
+			     struct ddl_frame_data_tag *in_out_frame,
+			     u32 operation)
+{
+	u32 vcd_status = VCD_S_SUCCESS;
+	u32 loopc;
+	struct ddl_frame_data_tag *found_frame = NULL;
+	struct ddl_mask *dpb_mask = &decoder->dpb_mask;
+
+	switch (operation) {
+	case DDL_DPB_OP_MARK_BUSY:
+	case DDL_DPB_OP_MARK_FREE:
+		{
+			for (loopc = 0; !found_frame &&
+			     loopc < decoder->dp_buf.no_of_dec_pic_buf;
+			     ++loopc) {
+				if (in_out_frame->vcd_frm.physical ==
+				    decoder->dp_buf.
+				    dec_pic_buffers[loopc].vcd_frm.
+				    physical) {
+					found_frame =
+					    &(decoder->dp_buf.
+					      dec_pic_buffers[loopc]);
+					break;
+				}
+			}
+
+			if (found_frame) {
+				if (operation == DDL_DPB_OP_MARK_BUSY) {
+					dpb_mask->hw_mask &=
+					    (~(0x1 << loopc));
+					*in_out_frame = *found_frame;
+				} else if (operation ==
+					DDL_DPB_OP_MARK_FREE) {
+					dpb_mask->client_mask |=
+					    (0x1 << loopc);
+					*found_frame = *in_out_frame;
+				}
+			} else {
+				in_out_frame->vcd_frm.physical = NULL;
+				in_out_frame->vcd_frm.virtual = NULL;
+				vcd_status = VCD_ERR_BAD_POINTER;
+				VIDC_LOG_STRING("BUF_NOT_FOUND");
+			}
+			break;
+		}
+	case DDL_DPB_OP_SET_MASK:
+		{
+			dpb_mask->hw_mask |= dpb_mask->client_mask;
+			dpb_mask->client_mask = 0;
+			vidc_720p_decode_set_dpb_release_buffer_mask
+			    (dpb_mask->hw_mask);
+			break;
+		}
+	case DDL_DPB_OP_INIT:
+		{
+			u32 dpb_size;
+			dpb_size = (!decoder->meta_data_offset) ?
+			    decoder->dp_buf.dec_pic_buffers[0].vcd_frm.
+			    alloc_len : decoder->meta_data_offset;
+			vidc_720p_decode_set_dpb_details(decoder->dp_buf.
+						  no_of_dec_pic_buf,
+						  dpb_size,
+						  decoder->ref_buffer.
+						  align_physical_addr);
+			for (loopc = 0;
+			     loopc < decoder->dp_buf.no_of_dec_pic_buf;
+			     ++loopc) {
+				vidc_720p_decode_set_dpb_buffers(loopc,
+							  (u32 *)
+							  decoder->
+							  dp_buf.
+							  dec_pic_buffers
+							  [loopc].
+							  vcd_frm.
+							  physical);
+				VIDC_LOG1("DEC_DPB_BUFn_SIZE",
+					   decoder->dp_buf.
+					   dec_pic_buffers[loopc].vcd_frm.
+					   alloc_len);
+			}
+			break;
+		}
+	case DDL_DPB_OP_RETRIEVE:
+		{
+			u32 position;
+			if (dpb_mask->client_mask) {
+				position = 0x1;
+				for (loopc = 0;
+				     loopc <
+				     decoder->dp_buf.no_of_dec_pic_buf
+				     && !found_frame; ++loopc) {
+					if (dpb_mask->
+					    client_mask & position) {
+						found_frame =
+						    &decoder->dp_buf.
+						    dec_pic_buffers[loopc];
+						dpb_mask->client_mask &=
+						    ~(position);
+					}
+					position <<= 1;
+				}
+			} else if (dpb_mask->hw_mask) {
+				position = 0x1;
+				for (loopc = 0;
+				     loopc <
+				     decoder->dp_buf.no_of_dec_pic_buf
+				     && !found_frame; ++loopc) {
+					if (dpb_mask->hw_mask
+							& position) {
+						found_frame =
+						    &decoder->dp_buf.
+						    dec_pic_buffers[loopc];
+						dpb_mask->hw_mask &=
+						    ~(position);
+					}
+					position <<= 1;
+				}
+			}
+			if (found_frame)
+				*in_out_frame = *found_frame;
+			else {
+				in_out_frame->vcd_frm.physical = NULL;
+				in_out_frame->vcd_frm.virtual = NULL;
+			}
+			break;
+		}
+	}
+	return vcd_status;
+}
+
+void ddl_release_context_buffers(struct ddl_context *ddl_context)
+{
+	ddl_pmem_free(&ddl_context->context_buf_addr);
+	ddl_pmem_free(&ddl_context->db_line_buffer);
+	ddl_pmem_free(&ddl_context->data_partition_tempbuf);
+	ddl_pmem_free(&ddl_context->metadata_shared_input);
+	ddl_pmem_free(&ddl_context->dbg_core_dump);
+
+	vcd_fw_release();
+}
+
+void ddl_release_client_internal_buffers(struct ddl_client_context *ddl)
+{
+	if (ddl->decoding) {
+		struct ddl_decoder_data *decoder =
+		    &(ddl->codec_data.decoder);
+		ddl_pmem_free(&decoder->h264Vsp_temp_buffer);
+		ddl_pmem_free(&decoder->dpb_comv_buffer);
+		ddl_pmem_free(&decoder->ref_buffer);
+		DDL_FREE(decoder->dp_buf.dec_pic_buffers);
+		ddl_decode_dynamic_property(ddl, false);
+		decoder->decode_config.sequence_header_len = 0;
+		decoder->decode_config.sequence_header = NULL;
+		decoder->dpb_mask.client_mask = 0;
+		decoder->dpb_mask.hw_mask = 0;
+		decoder->dp_buf.no_of_dec_pic_buf = 0;
+		decoder->dynamic_prop_change = 0;
+
+	} else {
+		struct ddl_encoder_data *encoder =
+		    &(ddl->codec_data.encoder);
+		ddl_pmem_free(&encoder->enc_dpb_addr);
+		ddl_pmem_free(&encoder->seq_header);
+		ddl_encode_dynamic_property(ddl, false);
+		encoder->dynamic_prop_change = 0;
+	}
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_internal_property.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_internal_property.h
new file mode 100644
index 0000000..00c00cd
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_internal_property.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_INTERNAL_PROPERTY_H_
+#define _VCD_DDL_INTERNAL_PROPERTY_H_
+#include "vcd_api.h"
+
+#define VCD_EVT_RESP_DDL_BASE          0x3000
+#define VCD_EVT_RESP_DEVICE_INIT       (VCD_EVT_RESP_DDL_BASE + 0x1)
+#define VCD_EVT_RESP_OUTPUT_REQ        (VCD_EVT_RESP_DDL_BASE + 0x2)
+#define VCD_EVT_RESP_EOS_DONE          (VCD_EVT_RESP_DDL_BASE + 0x3)
+#define VCD_EVT_RESP_TRANSACTION_PENDING (VCD_EVT_RESP_DDL_BASE + 0x4)
+
+#define VCD_S_DDL_ERR_BASE     0x90000000
+#define VCD_ERR_MAX_NO_CODEC   (VCD_S_DDL_ERR_BASE + 0x1)
+#define VCD_ERR_CLIENT_PRESENT (VCD_S_DDL_ERR_BASE + 0x2)
+#define VCD_ERR_CLIENT_FATAL   (VCD_S_DDL_ERR_BASE + 0x3)
+
+#define VCD_I_CUSTOM_BASE  (VCD_I_RESERVED_BASE)
+#define VCD_I_RC_LEVEL_CONFIG (VCD_I_CUSTOM_BASE + 0x1)
+#define VCD_I_FRAME_LEVEL_RC (VCD_I_CUSTOM_BASE + 0x2)
+#define VCD_I_ADAPTIVE_RC    (VCD_I_CUSTOM_BASE + 0x3)
+#define VCD_I_CUSTOM_DDL_BASE  (VCD_I_RESERVED_BASE + 0x100)
+#define DDL_I_INPUT_BUF_REQ  (VCD_I_CUSTOM_DDL_BASE + 0x1)
+#define DDL_I_OUTPUT_BUF_REQ (VCD_I_CUSTOM_DDL_BASE + 0x2)
+#define DDL_I_DPB       (VCD_I_CUSTOM_DDL_BASE + 0x3)
+#define DDL_I_DPB_RELEASE    (VCD_I_CUSTOM_DDL_BASE + 0x4)
+#define DDL_I_DPB_RETRIEVE  (VCD_I_CUSTOM_DDL_BASE + 0x5)
+#define DDL_I_REQ_OUTPUT_FLUSH   (VCD_I_CUSTOM_DDL_BASE + 0x6)
+#define DDL_I_SEQHDR_ALIGN_BYTES (VCD_I_CUSTOM_DDL_BASE + 0x7)
+#define DDL_I_SEQHDR_PRESENT (VCD_I_CUSTOM_DDL_BASE + 0xb)
+#define DDL_I_CAPABILITY    (VCD_I_CUSTOM_DDL_BASE + 0x8)
+#define DDL_I_FRAME_PROC_UNITS    (VCD_I_CUSTOM_DDL_BASE + 0x9)
+
+struct vcd_property_rc_level {
+	u32 frame_level_rc;
+	u32 mb_level_rc;
+};
+
+struct vcd_property_frame_level_rc_params {
+	u32 reaction_coeff;
+};
+
+struct vcd_property_adaptive_rc_params {
+	u32 dark_region_as_flag;
+	u32 smooth_region_as_flag;
+	u32 static_region_as_flag;
+	u32 activity_region_flag;
+};
+
+struct ddl_frame_data_tag;
+
+struct ddl_property_dec_pic_buffers {
+	struct ddl_frame_data_tag *dec_pic_buffers;
+	u32 no_of_dec_pic_buf;
+};
+
+struct ddl_property_capability {
+	u32 max_num_client;
+	u32 general_command_depth;
+	u32 frame_command_depth;
+	u32 exclusive;
+	u32   ddl_time_out_in_ms;
+};
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
new file mode 100644
index 0000000..ef5b717
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
@@ -0,0 +1,1122 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vidc.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_metadata.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+static void ddl_decoder_input_done_callback(
+	struct	ddl_client_context *ddl, u32 frame_transact_end);
+static u32 ddl_decoder_output_done_callback(
+	struct ddl_client_context *ddl, u32 frame_transact_end);
+
+static u32 ddl_get_frame
+    (struct vcd_frame_data *frame, u32 frame_type);
+
+static void ddl_getdec_profilelevel
+(struct ddl_decoder_data   *decoder, u32 profile, u32 level);
+
+static void ddl_dma_done_callback(struct ddl_context *ddl_context)
+{
+	if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_DMA_INIT)) {
+		VIDC_LOGERR_STRING("UNKWN_DMADONE");
+		return;
+	}
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	VIDC_LOG_STRING("DMA_DONE");
+	ddl_core_start_cpu(ddl_context);
+}
+
+static void ddl_cpu_started_callback(struct ddl_context *ddl_context)
+{
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	VIDC_LOG_STRING("CPU-STARTED");
+
+	if (!vidc_720p_cpu_start()) {
+		ddl_hw_fatal_cb(ddl_context);
+		return;
+	}
+
+	vidc_720p_set_deblock_line_buffer(
+			ddl_context->db_line_buffer.align_physical_addr,
+			ddl_context->db_line_buffer.buffer_size);
+	ddl_context->device_state = DDL_DEVICE_INITED;
+	ddl_context->ddl_callback(VCD_EVT_RESP_DEVICE_INIT, VCD_S_SUCCESS,
+			NULL, 0, NULL, ddl_context->client_data);
+	DDL_IDLE(ddl_context);
+}
+
+
+static u32 ddl_eos_done_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	u32 displaystatus, resl_change;
+
+	if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_EOS)) {
+		VIDC_LOGERR_STRING("UNKWN_EOSDONE");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+
+	if (!ddl ||
+		!ddl->decoding ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-EOSDONE");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	vidc_720p_eos_info(&displaystatus, &resl_change);
+	if ((enum vidc_720p_display_status)displaystatus
+		!= VIDC_720P_EMPTY_BUFFER) {
+		VIDC_LOG_STRING("EOSDONE-EMPTYBUF-ISSUE");
+	}
+
+	ddl_decode_dynamic_property(ddl, false);
+	if (resl_change == 0x1) {
+		ddl->codec_data.decoder.header_in_start = false;
+		ddl->codec_data.decoder.decode_config.sequence_header =
+			ddl->input_frame.vcd_frm.physical;
+		ddl->codec_data.decoder.decode_config.sequence_header_len =
+			ddl->input_frame.vcd_frm.data_len;
+		ddl_decode_init_codec(ddl);
+		return false;
+	}
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+	VIDC_LOG_STRING("EOS_DONE");
+	ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS,
+		NULL, 0, (u32 *) ddl, ddl_context->client_data);
+	DDL_IDLE(ddl_context);
+
+	return true;
+}
+
+static u32 ddl_channel_set_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	u32 return_status = false;
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	VIDC_DEBUG_REGISTER_LOG;
+
+	if (!ddl ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHDONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-CHSET");
+		DDL_IDLE(ddl_context);
+		return return_status;
+	}
+	VIDC_LOG_STRING("Channel-set");
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC);
+
+	if (ddl->decoding) {
+		if (vidc_msg_timing)
+			ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+		if (ddl->codec_data.decoder.header_in_start) {
+			ddl_decode_init_codec(ddl);
+		} else {
+			ddl_context->ddl_callback(VCD_EVT_RESP_START,
+				VCD_S_SUCCESS, NULL,
+				0, (u32 *) ddl,
+				ddl_context->client_data);
+
+			DDL_IDLE(ddl_context);
+			return_status = true;
+		}
+	} else {
+		ddl_encode_init_codec(ddl);
+	}
+	return return_status;
+}
+
+static void ddl_init_codec_done_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_encoder_data *encoder;
+
+	if (!ddl ||
+		ddl->decoding ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-INITCODEC");
+		ddl_client_fatal_cb(ddl_context);
+		return;
+	}
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+	VIDC_LOG_STRING("INIT_CODEC_DONE");
+
+	encoder = &ddl->codec_data.encoder;
+	if (encoder->seq_header.virtual_base_addr) {
+		vidc_720p_encode_get_header(&encoder->seq_header.
+			buffer_size);
+	}
+
+	ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL,
+		0, (u32 *) ddl, ddl_context->client_data);
+
+	DDL_IDLE(ddl_context);
+}
+
+static u32 ddl_header_done_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_decoder_data *decoder;
+	struct vidc_720p_seq_hdr_info seq_hdr_info;
+
+	u32 process_further = true;
+	u32 seq_hdr_only_frame = false;
+	u32 need_reconfig = true;
+	struct vcd_frame_data *input_vcd_frm;
+	struct ddl_frame_data_tag *reconfig_payload = NULL;
+	u32 reconfig_payload_size = 0;
+
+	if (!ddl ||
+		!ddl->decoding ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-HDDONE");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_DPB);
+	VIDC_LOG_STRING("HEADER_DONE");
+	VIDC_DEBUG_REGISTER_LOG;
+
+	vidc_720p_decode_get_seq_hdr_info(&seq_hdr_info);
+
+	decoder = &(ddl->codec_data.decoder);
+	decoder->frame_size.width = seq_hdr_info.img_size_x;
+	decoder->frame_size.height = seq_hdr_info.img_size_y;
+	decoder->min_dpb_num = seq_hdr_info.min_num_dpb;
+	decoder->y_cb_cr_size = seq_hdr_info.min_dpb_size;
+	decoder->progressive_only = 1 - seq_hdr_info.progressive;
+	if (!seq_hdr_info.img_size_x || !seq_hdr_info.img_size_y) {
+		VIDC_LOGERR_STRING("FATAL: ZeroImageSize");
+		ddl_client_fatal_cb(ddl_context);
+		return process_further;
+	}
+	if (seq_hdr_info.data_partitioned == 0x1 &&
+		decoder->codec.codec == VCD_CODEC_MPEG4 &&
+		seq_hdr_info.img_size_x > DDL_MAX_DP_FRAME_WIDTH &&
+		seq_hdr_info.img_size_y > DDL_MAX_DP_FRAME_HEIGHT)	{
+		ddl_client_fatal_cb(ddl_context);
+		return process_further;
+	}
+	ddl_getdec_profilelevel(decoder, seq_hdr_info.profile,
+		seq_hdr_info.level);
+	ddl_calculate_stride(&decoder->frame_size,
+			!decoder->progressive_only,
+			decoder->codec.codec);
+	if (decoder->buf_format.buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) {
+		decoder->frame_size.stride =
+		DDL_TILE_ALIGN(decoder->frame_size.width,
+					DDL_TILE_ALIGN_WIDTH);
+		decoder->frame_size.scan_lines =
+			DDL_TILE_ALIGN(decoder->frame_size.height,
+						 DDL_TILE_ALIGN_HEIGHT);
+	}
+	if (seq_hdr_info.crop_exists)	{
+		decoder->frame_size.width -=
+		(seq_hdr_info.crop_right_offset
+		+ seq_hdr_info.crop_left_offset);
+		decoder->frame_size.height -=
+		(seq_hdr_info.crop_top_offset +
+		seq_hdr_info.crop_bottom_offset);
+	}
+	ddl_set_default_decoder_buffer_req(decoder, false);
+
+	if (decoder->header_in_start) {
+		decoder->client_frame_size = decoder->frame_size;
+		decoder->client_output_buf_req =
+			decoder->actual_output_buf_req;
+		decoder->client_input_buf_req =
+			decoder->actual_input_buf_req;
+		ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS,
+			NULL, 0, (u32 *) ddl,	ddl_context->client_data);
+		DDL_IDLE(ddl_context);
+	} else {
+		DBG("%s(): Client data: WxH(%u x %u) SxSL(%u x %u) Sz(%u)\n",
+			__func__, decoder->client_frame_size.width,
+			decoder->client_frame_size.height,
+			decoder->client_frame_size.stride,
+			decoder->client_frame_size.scan_lines,
+			decoder->client_output_buf_req.sz);
+		DBG("%s(): DDL data: WxH(%u x %u) SxSL(%u x %u) Sz(%u)\n",
+			__func__, decoder->frame_size.width,
+			decoder->frame_size.height,
+			decoder->frame_size.stride,
+			decoder->frame_size.scan_lines,
+			decoder->actual_output_buf_req.sz);
+		DBG("%s(): min_dpb_num = %d actual_count = %d\n", __func__,
+			decoder->min_dpb_num,
+			decoder->client_output_buf_req.actual_count);
+
+		input_vcd_frm = &(ddl->input_frame.vcd_frm);
+
+		if (decoder->frame_size.width ==
+			decoder->client_frame_size.width
+			&& decoder->frame_size.height ==
+			decoder->client_frame_size.height
+			&& decoder->frame_size.stride ==
+			decoder->client_frame_size.stride
+			&& decoder->frame_size.scan_lines ==
+			decoder->client_frame_size.scan_lines
+			&& decoder->actual_output_buf_req.sz <=
+			decoder->client_output_buf_req.sz
+			&& decoder->actual_output_buf_req.actual_count <=
+			decoder->client_output_buf_req.actual_count
+			&& decoder->progressive_only)
+			need_reconfig = false;
+		if ((input_vcd_frm->data_len <= seq_hdr_info.dec_frm_size ||
+			 (input_vcd_frm->flags & VCD_FRAME_FLAG_CODECCONFIG)) &&
+			(!need_reconfig ||
+			 !(input_vcd_frm->flags & VCD_FRAME_FLAG_EOS))) {
+			input_vcd_frm->flags |=
+				VCD_FRAME_FLAG_CODECCONFIG;
+			seq_hdr_only_frame = true;
+			input_vcd_frm->data_len = 0;
+			ddl->input_frame.frm_trans_end = !need_reconfig;
+			ddl_context->ddl_callback(
+				VCD_EVT_RESP_INPUT_DONE,
+				VCD_S_SUCCESS, &ddl->input_frame,
+				sizeof(struct ddl_frame_data_tag),
+				(u32 *) ddl,
+				ddl->ddl_context->client_data);
+		} else if (decoder->codec.codec != VCD_CODEC_H263) {
+			input_vcd_frm->offset += seq_hdr_info.dec_frm_size;
+			input_vcd_frm->data_len -= seq_hdr_info.dec_frm_size;
+		}
+		if (need_reconfig) {
+			decoder->client_frame_size = decoder->frame_size;
+			decoder->client_output_buf_req =
+				decoder->actual_output_buf_req;
+			decoder->client_input_buf_req =
+				decoder->actual_input_buf_req;
+			if (!seq_hdr_only_frame) {
+				reconfig_payload = &ddl->input_frame;
+				reconfig_payload_size =
+					sizeof(struct ddl_frame_data_tag);
+			}
+			ddl_context->ddl_callback(VCD_EVT_IND_OUTPUT_RECONFIG,
+					VCD_S_SUCCESS, reconfig_payload,
+					reconfig_payload_size,
+					(u32 *) ddl,
+					ddl_context->client_data);
+		}
+		if (!need_reconfig && !seq_hdr_only_frame) {
+			if (ddl_decode_set_buffers(ddl) == VCD_S_SUCCESS)
+				process_further = false;
+			else
+				ddl_client_fatal_cb(ddl_context);
+		} else
+			DDL_IDLE(ddl_context);
+	}
+	return process_further;
+}
+
+static u32 ddl_dpb_buffers_set_done_callback(struct ddl_context
+						  *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	if (!ddl ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-DPBDONE");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+	if (vidc_msg_timing) {
+		ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+		ddl_reset_core_time_variables(DEC_OP_TIME);
+	}
+	VIDC_LOG_STRING("INTR_DPBDONE");
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+	ddl->codec_data.decoder.dec_disp_info.img_size_x = 0;
+	ddl->codec_data.decoder.dec_disp_info.img_size_y = 0;
+	ddl_decode_frame_run(ddl);
+	return false;
+}
+
+static void ddl_encoder_frame_run_callback(struct ddl_context
+					   *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32 eos_present = false;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-ENCFRMRUN");
+		ddl_client_fatal_cb(ddl_context);
+		return;
+	}
+
+	VIDC_LOG_STRING("ENC_FRM_RUN_DONE");
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	vidc_720p_enc_frame_info(&encoder->enc_frame_info);
+
+	ddl->output_frame.vcd_frm.ip_frm_tag =
+		ddl->input_frame.vcd_frm.ip_frm_tag;
+	ddl->output_frame.vcd_frm.data_len =
+		encoder->enc_frame_info.enc_size;
+	ddl->output_frame.vcd_frm.flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+	ddl_get_frame
+		(&(ddl->output_frame.vcd_frm),
+		 encoder->enc_frame_info.frame);
+	ddl_process_encoder_metadata(ddl);
+
+	ddl_encode_dynamic_property(ddl, false);
+
+	ddl->input_frame.frm_trans_end = false;
+	ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS,
+		&(ddl->input_frame), sizeof(struct ddl_frame_data_tag),
+		(u32 *) ddl, ddl_context->client_data);
+
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, ENC_OP_TIME);
+
+	/* check the presence of EOS */
+   eos_present =
+	((VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags));
+
+	ddl->output_frame.frm_trans_end = !eos_present;
+	ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS,
+		&(ddl->output_frame),	sizeof(struct ddl_frame_data_tag),
+		(u32 *) ddl, ddl_context->client_data);
+
+	if (eos_present) {
+		VIDC_LOG_STRING("ENC-EOS_DONE");
+		ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE,
+				VCD_S_SUCCESS, NULL, 0,	(u32 *)ddl,
+				ddl_context->client_data);
+	}
+
+	ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+	DDL_IDLE(ddl_context);
+}
+
+static u32 ddl_decoder_frame_run_callback(struct ddl_context
+					   *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct vidc_720p_dec_disp_info *dec_disp_info =
+	    &(ddl->codec_data.decoder.dec_disp_info);
+	u32 callback_end = false;
+	u32 status = true, eos_present = false;;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)) {
+		VIDC_LOG_STRING("STATE-CRITICAL-DECFRMRUN");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+
+	VIDC_LOG_STRING("DEC_FRM_RUN_DONE");
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	vidc_720p_decode_display_info(dec_disp_info);
+
+	ddl_decode_dynamic_property(ddl, false);
+
+	if (dec_disp_info->resl_change) {
+		VIDC_LOG_STRING
+			("DEC_FRM_RUN_DONE: RECONFIG");
+		ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE);
+		ddl_move_command_state(ddl_context, DDL_CMD_EOS);
+		vidc_720p_submit_command(ddl->channel_id,
+			VIDC_720P_CMD_FRAMERUN_REALLOCATE);
+		return false;
+	}
+
+	if ((VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags)) {
+		callback_end = false;
+		eos_present = true;
+	}
+
+
+	if (dec_disp_info->disp_status == VIDC_720P_DECODE_ONLY ||
+		dec_disp_info->disp_status
+			== VIDC_720P_DECODE_AND_DISPLAY) {
+		if (!eos_present)
+			callback_end = (dec_disp_info->disp_status
+					== VIDC_720P_DECODE_ONLY);
+
+	  ddl_decoder_input_done_callback(ddl, callback_end);
+	}
+
+	if (dec_disp_info->disp_status == VIDC_720P_DECODE_AND_DISPLAY
+		|| dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) {
+		if (!eos_present)
+			callback_end =
+			(dec_disp_info->disp_status
+				== VIDC_720P_DECODE_AND_DISPLAY);
+
+		if (ddl_decoder_output_done_callback(ddl, callback_end)
+			!= VCD_S_SUCCESS)
+			return true;
+	}
+
+	if (dec_disp_info->disp_status ==  VIDC_720P_DISPLAY_ONLY ||
+		dec_disp_info->disp_status ==  VIDC_720P_EMPTY_BUFFER) {
+		/* send the same input once again for decoding */
+		ddl_decode_frame_run(ddl);
+		/* client need to ignore the interrupt */
+		status = false;
+	} else if (eos_present) {
+		/* send EOS command to HW */
+		ddl_decode_eos_run(ddl);
+		/* client need to ignore the interrupt */
+		status = false;
+	} else {
+		ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME);
+		/* move to Idle */
+		DDL_IDLE(ddl_context);
+	}
+	return status;
+}
+
+static u32 ddl_eos_frame_done_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl = ddl_context->current_ddl;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vidc_720p_dec_disp_info *dec_disp_info =
+		&(decoder->dec_disp_info);
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) {
+		VIDC_LOGERR_STRING("STATE-CRITICAL-EOSFRMRUN");
+		ddl_client_fatal_cb(ddl_context);
+		return true;
+	}
+	VIDC_LOG_STRING("EOS_FRM_RUN_DONE");
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+
+	vidc_720p_decode_display_info(dec_disp_info);
+
+	ddl_decode_dynamic_property(ddl, false);
+
+	if (dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) {
+		if (ddl_decoder_output_done_callback(ddl, false)
+			!= VCD_S_SUCCESS)
+			return true;
+	} else
+		VIDC_LOG_STRING("STATE-CRITICAL-WRONG-DISP-STATUS");
+
+	ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK);
+	ddl_move_command_state(ddl_context, DDL_CMD_EOS);
+	vidc_720p_submit_command(ddl->channel_id,
+		VIDC_720P_CMD_FRAMERUN);
+	return false;
+}
+
+static void ddl_channel_end_callback(struct ddl_context *ddl_context)
+{
+	struct ddl_client_context *ddl;
+
+	ddl_move_command_state(ddl_context, DDL_CMD_INVALID);
+	VIDC_LOG_STRING("CH_END_DONE");
+
+	ddl = ddl_context->current_ddl;
+	if (!ddl ||
+		!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHEND)
+		) {
+		VIDC_LOG_STRING("STATE-CRITICAL-CHEND");
+		DDL_IDLE(ddl_context);
+		return;
+	}
+
+	ddl_release_client_internal_buffers(ddl);
+	ddl_context->ddl_callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS,
+		NULL, 0, (u32 *) ddl,	ddl_context->client_data);
+	ddl_move_client_state(ddl, DDL_CLIENT_OPEN);
+	DDL_IDLE(ddl_context);
+}
+
+static u32 ddl_operation_done_callback(struct ddl_context *ddl_context)
+{
+	u32 return_status = true;
+
+	switch (ddl_context->cmd_state) {
+	case DDL_CMD_DECODE_FRAME:
+		{
+			return_status = ddl_decoder_frame_run_callback(
+				ddl_context);
+			break;
+		}
+	case DDL_CMD_ENCODE_FRAME:
+		{
+			ddl_encoder_frame_run_callback(ddl_context);
+			break;
+		}
+	case DDL_CMD_CHANNEL_SET:
+		{
+			return_status = ddl_channel_set_callback(
+				ddl_context);
+			break;
+		}
+	case DDL_CMD_INIT_CODEC:
+		{
+			ddl_init_codec_done_callback(ddl_context);
+			break;
+		}
+	case DDL_CMD_HEADER_PARSE:
+		{
+			return_status = ddl_header_done_callback(
+				ddl_context);
+			break;
+		}
+	case DDL_CMD_DECODE_SET_DPB:
+		{
+			return_status = ddl_dpb_buffers_set_done_callback(
+				ddl_context);
+			break;
+		}
+	case DDL_CMD_CHANNEL_END:
+		{
+			ddl_channel_end_callback(ddl_context);
+			break;
+		}
+	case DDL_CMD_EOS:
+		{
+			return_status = ddl_eos_frame_done_callback(
+				ddl_context);
+			break;
+		}
+	case DDL_CMD_CPU_RESET:
+		{
+			ddl_cpu_started_callback(ddl_context);
+			break;
+		}
+	default:
+		{
+			VIDC_LOG_STRING("UNKWN_OPDONE");
+			return_status = false;
+			break;
+		}
+	}
+	return return_status;
+}
+
+static u32 ddl_process_intr_status(struct ddl_context *ddl_context,
+			u32 int_status)
+{
+	u32 status = true;
+	switch (int_status) {
+	case VIDC_720P_INTR_FRAME_DONE:
+		 {
+			status = ddl_operation_done_callback(ddl_context);
+			break;
+		 }
+	case VIDC_720P_INTR_DMA_DONE:
+		 {
+			ddl_dma_done_callback(ddl_context);
+			status = false;
+			break;
+		 }
+	case VIDC_720P_INTR_FW_DONE:
+		 {
+			status = ddl_eos_done_callback(ddl_context);
+			break;
+		 }
+	case VIDC_720P_INTR_BUFFER_FULL:
+		 {
+			VIDC_LOGERR_STRING("BUF_FULL_INTR");
+			ddl_hw_fatal_cb(ddl_context);
+			break;
+		 }
+	default:
+		 {
+			VIDC_LOGERR_STRING("UNKWN_INTR");
+			break;
+		 }
+	}
+	return status;
+}
+
+void ddl_read_and_clear_interrupt(void)
+{
+	struct ddl_context *ddl_context;
+
+	ddl_context = ddl_get_context();
+	if (!ddl_context->core_virtual_base_addr) {
+		VIDC_LOGERR_STRING("SPURIOUS_INTERRUPT");
+		return;
+	}
+	vidc_720p_get_interrupt_status(&ddl_context->intr_status,
+		&ddl_context->cmd_err_status,
+		&ddl_context->disp_pic_err_status,
+		&ddl_context->op_failed
+	);
+
+	vidc_720p_interrupt_done_clear();
+
+}
+
+u32 ddl_process_core_response(void)
+{
+	struct ddl_context *ddl_context;
+	u32 return_status = true;
+
+	ddl_context = ddl_get_context();
+	if (!ddl_context->core_virtual_base_addr) {
+		VIDC_LOGERR_STRING("UNKWN_INTR");
+		return false;
+	}
+
+	if (!ddl_handle_core_errors(ddl_context)) {
+		return_status = ddl_process_intr_status(ddl_context,
+			ddl_context->intr_status);
+	}
+
+	if (ddl_context->interrupt_clr)
+		(*ddl_context->interrupt_clr)();
+
+	return return_status;
+}
+
+static void ddl_decoder_input_done_callback(
+	struct	ddl_client_context *ddl, u32 frame_transact_end)
+{
+	struct vidc_720p_dec_disp_info *dec_disp_info =
+		&(ddl->codec_data.decoder.dec_disp_info);
+	struct vcd_frame_data *input_vcd_frm =
+		&(ddl->input_frame.vcd_frm);
+	ddl_get_frame(input_vcd_frm, dec_disp_info->
+		input_frame);
+
+	input_vcd_frm->interlaced = (dec_disp_info->
+		input_is_interlace);
+
+	input_vcd_frm->offset += dec_disp_info->input_bytes_consumed;
+	input_vcd_frm->data_len -= dec_disp_info->input_bytes_consumed;
+
+	ddl->input_frame.frm_trans_end = frame_transact_end;
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, DEC_IP_TIME);
+	ddl->ddl_context->ddl_callback(
+		VCD_EVT_RESP_INPUT_DONE,
+		VCD_S_SUCCESS,
+		&ddl->input_frame,
+		sizeof(struct ddl_frame_data_tag),
+		(void *)ddl,
+		ddl->ddl_context->client_data);
+}
+
+static u32 ddl_decoder_output_done_callback(
+	struct ddl_client_context *ddl,
+	u32 frame_transact_end)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vidc_720p_dec_disp_info *dec_disp_info =
+		&(decoder->dec_disp_info);
+	struct ddl_frame_data_tag *output_frame =
+		&ddl->output_frame;
+	struct vcd_frame_data *output_vcd_frm =
+		&(output_frame->vcd_frm);
+	u32 vcd_status;
+	u32 free_luma_dpb = 0;
+
+	output_vcd_frm->physical = (u8 *)dec_disp_info->y_addr;
+
+	if (decoder->codec.codec == VCD_CODEC_MPEG4 ||
+		decoder->codec.codec == VCD_CODEC_VC1 ||
+		decoder->codec.codec == VCD_CODEC_VC1_RCV ||
+		(decoder->codec.codec >= VCD_CODEC_DIVX_3 &&
+		 decoder->codec.codec <= VCD_CODEC_XVID)){
+		vidc_720p_decode_skip_frm_details(&free_luma_dpb);
+		if (free_luma_dpb)
+			output_vcd_frm->physical = (u8 *) free_luma_dpb;
+	}
+
+
+	vcd_status = ddl_decoder_dpb_transact(
+			decoder,
+			output_frame,
+			DDL_DPB_OP_MARK_BUSY);
+
+	if (vcd_status != VCD_S_SUCCESS) {
+		VIDC_LOGERR_STRING("CorruptedOutputBufferAddress");
+		ddl_hw_fatal_cb(ddl->ddl_context);
+		return vcd_status;
+	}
+
+	output_vcd_frm->ip_frm_tag =  dec_disp_info->tag_top;
+	if (dec_disp_info->crop_exists == 0x1) {
+		output_vcd_frm->dec_op_prop.disp_frm.left =
+			dec_disp_info->crop_left_offset;
+		output_vcd_frm->dec_op_prop.disp_frm.top =
+			dec_disp_info->crop_top_offset;
+		output_vcd_frm->dec_op_prop.disp_frm.right =
+			dec_disp_info->img_size_x -
+			dec_disp_info->crop_right_offset;
+		output_vcd_frm->dec_op_prop.disp_frm.bottom =
+			dec_disp_info->img_size_y -
+			dec_disp_info->crop_bottom_offset;
+	} else {
+		output_vcd_frm->dec_op_prop.disp_frm.left = 0;
+		output_vcd_frm->dec_op_prop.disp_frm.top = 0;
+		output_vcd_frm->dec_op_prop.disp_frm.right =
+			dec_disp_info->img_size_x;
+		output_vcd_frm->dec_op_prop.disp_frm.bottom =
+			dec_disp_info->img_size_y;
+	}
+	if (!dec_disp_info->disp_is_interlace) {
+		output_vcd_frm->interlaced = false;
+		output_vcd_frm->intrlcd_ip_frm_tag = VCD_FRAMETAG_INVALID;
+	} else {
+		output_vcd_frm->interlaced = true;
+		output_vcd_frm->intrlcd_ip_frm_tag =
+			dec_disp_info->tag_bottom;
+	}
+
+	output_vcd_frm->offset = 0;
+	output_vcd_frm->data_len = decoder->y_cb_cr_size;
+	if (free_luma_dpb) {
+		output_vcd_frm->data_len = 0;
+		output_vcd_frm->flags |= VCD_FRAME_FLAG_DECODEONLY;
+	}
+	output_vcd_frm->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
+	ddl_process_decoder_metadata(ddl);
+	output_frame->frm_trans_end = frame_transact_end;
+
+	if (vidc_msg_timing)
+		ddl_calc_core_proc_time(__func__, DEC_OP_TIME);
+
+	ddl->ddl_context->ddl_callback(
+		VCD_EVT_RESP_OUTPUT_DONE,
+		vcd_status,
+		output_frame,
+		sizeof(struct ddl_frame_data_tag),
+		(void *)ddl,
+		ddl->ddl_context->client_data);
+	return vcd_status;
+}
+
+static u32 ddl_get_frame
+	(struct vcd_frame_data *frame, u32 frametype) {
+	enum vidc_720p_frame vidc_frame =
+		(enum vidc_720p_frame)frametype;
+	u32 status = true;
+
+	switch (vidc_frame) {
+	case VIDC_720P_IFRAME:
+		{
+			frame->flags |= VCD_FRAME_FLAG_SYNCFRAME;
+			frame->frame = VCD_FRAME_I;
+			break;
+		}
+	case VIDC_720P_PFRAME:
+		{
+			frame->frame = VCD_FRAME_P;
+			break;
+		}
+	case VIDC_720P_BFRAME:
+		{
+			frame->frame = VCD_FRAME_B;
+			break;
+		}
+	case VIDC_720P_NOTCODED:
+		{
+			frame->frame = VCD_FRAME_NOTCODED;
+			frame->data_len = 0;
+			break;
+		}
+	default:
+		{
+			VIDC_LOG_STRING("CRITICAL-FRAMETYPE");
+			status = false;
+			break;
+		}
+	}
+	return status;
+}
+
+static void ddl_getmpeg4_declevel(enum vcd_codec_level *codec_level,
+	u32 level)
+{
+	switch (level) {
+	case VIDC_720P_MPEG4_LEVEL0:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_0;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL0b:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_0b;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL1:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_1;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL2:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_2;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL3:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_3;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL3b:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_3b;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL4a:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_4a;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL5:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_5;
+			break;
+		}
+	case VIDC_720P_MPEG4_LEVEL6:
+		{
+			*codec_level = VCD_LEVEL_MPEG4_6;
+			break;
+		}
+	}
+}
+
+static void ddl_geth264_declevel(enum vcd_codec_level *codec_level,
+	u32 level)
+{
+	switch (level) {
+	case VIDC_720P_H264_LEVEL1:
+		{
+			*codec_level = VCD_LEVEL_H264_1;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL1b:
+		{
+			*codec_level = VCD_LEVEL_H264_1b;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL1p1:
+		{
+			*codec_level = VCD_LEVEL_H264_1p1;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL1p2:
+		{
+			*codec_level = VCD_LEVEL_H264_1p2;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL1p3:
+		{
+			*codec_level = VCD_LEVEL_H264_1p3;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL2:
+		{
+			*codec_level = VCD_LEVEL_H264_2;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL2p1:
+		{
+			*codec_level = VCD_LEVEL_H264_2p1;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL2p2:
+		{
+			*codec_level = VCD_LEVEL_H264_2p2;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL3:
+		{
+			*codec_level = VCD_LEVEL_H264_3;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL3p1:
+		{
+			*codec_level = VCD_LEVEL_H264_3p1;
+			break;
+		}
+	case VIDC_720P_H264_LEVEL3p2:
+	{
+		*codec_level = VCD_LEVEL_H264_3p2;
+		break;
+	}
+
+	}
+}
+
+static void ddl_get_vc1_dec_level(
+	enum vcd_codec_level *codec_level, u32 level,
+	enum vcd_codec_profile vc1_profile)
+{
+	if (vc1_profile == VCD_PROFILE_VC1_ADVANCE)	{
+		switch (level) {
+		case VIDC_720P_VC1_LEVEL0:
+			{
+				*codec_level = VCD_LEVEL_VC1_A_0;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL1:
+			{
+				*codec_level = VCD_LEVEL_VC1_A_1;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL2:
+			{
+				*codec_level = VCD_LEVEL_VC1_A_2;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL3:
+			{
+				*codec_level = VCD_LEVEL_VC1_A_3;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL4:
+			{
+				*codec_level = VCD_LEVEL_VC1_A_4;
+				break;
+			}
+		}
+		return;
+	} else if (vc1_profile == VCD_PROFILE_VC1_MAIN) {
+		switch (level) {
+		case VIDC_720P_VC1_LEVEL_LOW:
+			{
+				*codec_level = VCD_LEVEL_VC1_M_LOW;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL_MED:
+			{
+				*codec_level = VCD_LEVEL_VC1_M_MEDIUM;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL_HIGH:
+			{
+				*codec_level = VCD_LEVEL_VC1_M_HIGH;
+				break;
+			}
+		}
+	} else if (vc1_profile == VCD_PROFILE_VC1_SIMPLE) {
+		switch (level) {
+		case VIDC_720P_VC1_LEVEL_LOW:
+			{
+				*codec_level = VCD_LEVEL_VC1_S_LOW;
+				break;
+			}
+		case VIDC_720P_VC1_LEVEL_MED:
+			{
+				*codec_level = VCD_LEVEL_VC1_S_MEDIUM;
+				break;
+			}
+		}
+	}
+}
+
+static void ddl_get_mpeg2_dec_level(enum vcd_codec_level *codec_level,
+								 u32 level)
+{
+	switch (level) {
+	case VIDCL_720P_MPEG2_LEVEL_LOW:
+		{
+			*codec_level = VCD_LEVEL_MPEG2_LOW;
+			break;
+		}
+	case VIDCL_720P_MPEG2_LEVEL_MAIN:
+		{
+			*codec_level = VCD_LEVEL_MPEG2_MAIN;
+			break;
+		}
+	case VIDCL_720P_MPEG2_LEVEL_HIGH14:
+		{
+			*codec_level = VCD_LEVEL_MPEG2_HIGH_14;
+			break;
+		}
+	}
+}
+
+static void ddl_getdec_profilelevel(struct ddl_decoder_data *decoder,
+		u32 profile, u32 level)
+{
+	enum vcd_codec_profile codec_profile = VCD_PROFILE_UNKNOWN;
+	enum vcd_codec_level codec_level = VCD_LEVEL_UNKNOWN;
+
+	switch (decoder->codec.codec) {
+	case VCD_CODEC_MPEG4:
+		{
+			if (profile == VIDC_720P_PROFILE_MPEG4_SP)
+				codec_profile = VCD_PROFILE_MPEG4_SP;
+			else if (profile == VIDC_720P_PROFILE_MPEG4_ASP)
+				codec_profile = VCD_PROFILE_MPEG4_ASP;
+
+			ddl_getmpeg4_declevel(&codec_level, level);
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			if (profile == VIDC_720P_PROFILE_H264_BASELINE)
+				codec_profile = VCD_PROFILE_H264_BASELINE;
+			else if (profile == VIDC_720P_PROFILE_H264_MAIN)
+				codec_profile = VCD_PROFILE_H264_MAIN;
+			else if (profile == VIDC_720P_PROFILE_H264_HIGH)
+				codec_profile = VCD_PROFILE_H264_HIGH;
+			ddl_geth264_declevel(&codec_level, level);
+			break;
+		}
+	default:
+	case VCD_CODEC_H263:
+		{
+			break;
+		}
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		{
+			if (profile == VIDC_720P_PROFILE_VC1_SP)
+				codec_profile = VCD_PROFILE_VC1_SIMPLE;
+			else if (profile == VIDC_720P_PROFILE_VC1_MAIN)
+				codec_profile = VCD_PROFILE_VC1_MAIN;
+			else if (profile == VIDC_720P_PROFILE_VC1_ADV)
+				codec_profile = VCD_PROFILE_VC1_ADVANCE;
+			ddl_get_vc1_dec_level(&codec_level, level, profile);
+			break;
+		}
+	case VCD_CODEC_MPEG2:
+		{
+			if (profile == VIDC_720P_PROFILE_MPEG2_MAIN)
+				codec_profile = VCD_PROFILE_MPEG2_MAIN;
+			else if (profile == VIDC_720P_PROFILE_MPEG2_SP)
+				codec_profile = VCD_PROFILE_MPEG2_SIMPLE;
+			ddl_get_mpeg2_dec_level(&codec_level, level);
+			break;
+		}
+	}
+
+	decoder->profile.profile = codec_profile;
+	decoder->level.level = codec_level;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.c
new file mode 100644
index 0000000..376ea6d
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.c
@@ -0,0 +1,580 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_metadata.h"
+
+static u32 *ddl_metadata_hdr_entry(struct ddl_client_context *ddl,
+				   u32 meta_data)
+{
+	u32 skip_words = 0;
+	u32 *buffer;
+
+	if (ddl->decoding) {
+		buffer = (u32 *)
+		    ddl->codec_data.decoder.meta_data_input.
+		    align_virtual_addr;
+		skip_words = 32 + 1;
+		buffer += skip_words;
+
+		switch (meta_data) {
+		default:
+		case VCD_METADATA_DATANONE:
+			{
+				skip_words = 0;
+				break;
+			}
+		case VCD_METADATA_QPARRAY:
+			{
+				skip_words = 3;
+				break;
+			}
+		case VCD_METADATA_CONCEALMB:
+			{
+				skip_words = 6;
+				break;
+			}
+		case VCD_METADATA_VC1:
+			{
+				skip_words = 9;
+				break;
+			}
+		case VCD_METADATA_SEI:
+			{
+				skip_words = 12;
+				break;
+			}
+		case VCD_METADATA_VUI:
+			{
+				skip_words = 15;
+				break;
+			}
+		case VCD_METADATA_PASSTHROUGH:
+			{
+				skip_words = 18;
+				break;
+			}
+		case VCD_METADATA_QCOMFILLER:
+			{
+				skip_words = 21;
+				break;
+			}
+		}
+	} else {
+		buffer = (u32 *)
+		    ddl->codec_data.encoder.meta_data_input.
+		    align_virtual_addr;
+		skip_words = 2;
+		buffer += skip_words;
+
+		switch (meta_data) {
+		default:
+		case VCD_METADATA_DATANONE:
+			{
+				skip_words = 0;
+				break;
+			}
+		case VCD_METADATA_ENC_SLICE:
+			{
+				skip_words = 3;
+				break;
+			}
+		case VCD_METADATA_QCOMFILLER:
+			{
+				skip_words = 6;
+				break;
+			}
+		}
+
+	}
+
+	buffer += skip_words;
+	return buffer;
+}
+
+void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl)
+{
+	struct ddl_buf_addr *main_buffer =
+	    &ddl->ddl_context->metadata_shared_input;
+	struct ddl_buf_addr *client_buffer;
+	u32 *hdr_entry;
+
+	if (ddl->decoding)
+		client_buffer = &(ddl->codec_data.decoder.meta_data_input);
+	else
+		client_buffer = &(ddl->codec_data.encoder.meta_data_input);
+
+	DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer,
+				     ddl->channel_id);
+
+	hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
+	hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+	hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+	hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QCOMFILLER;
+
+	hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_DATANONE);
+	hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+	hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+	hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_DATANONE;
+
+	if (ddl->decoding) {
+		hdr_entry =
+		    ddl_metadata_hdr_entry(ddl, VCD_METADATA_QPARRAY);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QPARRAY;
+
+		hdr_entry =
+		    ddl_metadata_hdr_entry(ddl, VCD_METADATA_CONCEALMB);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_CONCEALMB;
+
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_SEI);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_SEI;
+
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VUI);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VUI;
+
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VC1);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VC1;
+
+		hdr_entry =
+		    ddl_metadata_hdr_entry(ddl, VCD_METADATA_PASSTHROUGH);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
+		    VCD_METADATA_PASSTHROUGH;
+
+	} else {
+		hdr_entry =
+		    ddl_metadata_hdr_entry(ddl, VCD_METADATA_ENC_SLICE);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
+		    VCD_METADATA_ENC_SLICE;
+	}
+}
+
+static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl)
+{
+	u32 flag = 0;
+
+	if (ddl->decoding) {
+		enum vcd_codec codec =
+		    ddl->codec_data.decoder.codec.codec;
+
+		flag |= (VCD_METADATA_CONCEALMB |
+			   VCD_METADATA_PASSTHROUGH | VCD_METADATA_QPARRAY);
+		if (codec == VCD_CODEC_H264) {
+			flag |= (VCD_METADATA_SEI | VCD_METADATA_VUI);
+		} else if (codec == VCD_CODEC_VC1 ||
+			   codec == VCD_CODEC_VC1_RCV) {
+			flag |= VCD_METADATA_VC1;
+		}
+	} else {
+		flag |= VCD_METADATA_ENC_SLICE;
+	}
+
+	return flag;
+}
+
+void ddl_set_default_metadata_flag(struct ddl_client_context *ddl)
+{
+	if (ddl->decoding)
+		ddl->codec_data.decoder.meta_data_enable_flag = 0;
+	else
+		ddl->codec_data.encoder.meta_data_enable_flag = 0;
+}
+
+void ddl_set_default_decoder_metadata_buffer_size(
+	struct ddl_decoder_data *decoder,
+	struct vcd_property_frame_size *frame_size,
+	struct vcd_buffer_requirement *output_buf_req)
+{
+	u32 flag = decoder->meta_data_enable_flag;
+	u32 suffix = 0;
+	size_t sz = 0;
+
+	if (!flag) {
+		decoder->suffix = 0;
+		return;
+	}
+
+	if (flag & VCD_METADATA_QPARRAY) {
+		u32 num_of_mb =
+		    ((frame_size->width * frame_size->height) >> 8);
+		sz = DDL_METADATA_HDR_SIZE;
+		sz += num_of_mb;
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += sz;
+	}
+	if (flag & VCD_METADATA_CONCEALMB) {
+		u32 num_of_mb =
+		    ((frame_size->width * frame_size->height) >> 8);
+		sz = DDL_METADATA_HDR_SIZE + (num_of_mb >> 3);
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += sz;
+	}
+	if (flag & VCD_METADATA_VC1) {
+		sz = DDL_METADATA_HDR_SIZE;
+		sz += DDL_METADATA_VC1_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += sz;
+	}
+	if (flag & VCD_METADATA_SEI) {
+		sz = DDL_METADATA_HDR_SIZE;
+		sz += DDL_METADATA_SEI_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += (sz * DDL_METADATA_SEI_MAX);
+	}
+	if (flag & VCD_METADATA_VUI) {
+		sz = DDL_METADATA_HDR_SIZE;
+		sz += DDL_METADATA_VUI_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += (sz);
+	}
+	if (flag & VCD_METADATA_PASSTHROUGH) {
+		sz = DDL_METADATA_HDR_SIZE;
+		sz += DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += (sz);
+	}
+	sz = DDL_METADATA_EXTRADATANONE_SIZE;
+	DDL_METADATA_ALIGNSIZE(sz);
+	suffix += (sz);
+
+	suffix += DDL_METADATA_EXTRAPAD_SIZE;
+	DDL_METADATA_ALIGNSIZE(suffix);
+
+	decoder->suffix = suffix;
+	output_buf_req->sz += suffix;
+	return;
+}
+
+void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data
+						  *encoder)
+{
+	u32 flag = encoder->meta_data_enable_flag;
+	u32 suffix = 0;
+	size_t sz = 0;
+
+	if (!flag) {
+		encoder->suffix = 0;
+		return;
+	}
+
+	if (flag & VCD_METADATA_ENC_SLICE) {
+		u32 num_of_mb = (encoder->frame_size.width *
+				   encoder->frame_size.height / 16 / 16);
+		sz = DDL_METADATA_HDR_SIZE;
+
+		sz += 4;
+
+		sz += (8 * num_of_mb);
+		DDL_METADATA_ALIGNSIZE(sz);
+		suffix += sz;
+	}
+
+	sz = DDL_METADATA_EXTRADATANONE_SIZE;
+	DDL_METADATA_ALIGNSIZE(sz);
+	suffix += (sz);
+
+	suffix += DDL_METADATA_EXTRAPAD_SIZE;
+	DDL_METADATA_ALIGNSIZE(suffix);
+
+	encoder->suffix = suffix;
+	encoder->output_buf_req.sz += suffix;
+}
+
+u32 ddl_set_metadata_params(struct ddl_client_context *ddl,
+			    struct vcd_property_hdr *property_hdr,
+			    void *property_value)
+{
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	if (property_hdr->prop_id == VCD_I_METADATA_ENABLE) {
+		struct vcd_property_meta_data_enable *meta_data_enable =
+		    (struct vcd_property_meta_data_enable *)
+		    property_value;
+		u32 *meta_data_enable_flag;
+		enum vcd_codec codec;
+		if (ddl->decoding) {
+			meta_data_enable_flag =
+			    &(ddl->codec_data.decoder.
+			      meta_data_enable_flag);
+			codec = ddl->codec_data.decoder.codec.codec;
+		} else {
+			meta_data_enable_flag =
+			    &(ddl->codec_data.encoder.
+			      meta_data_enable_flag);
+			codec = ddl->codec_data.encoder.codec.codec;
+		}
+		if (sizeof(struct vcd_property_meta_data_enable) ==
+		    property_hdr->sz &&
+		    DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+					codec) {
+			u32 flag = ddl_supported_metadata_flag(ddl);
+			flag &= (meta_data_enable->meta_data_enable_flag);
+			if (flag)
+				flag |= DDL_METADATA_MANDATORY;
+			if (flag != *meta_data_enable_flag) {
+				*meta_data_enable_flag = flag;
+				if (ddl->decoding) {
+					ddl_set_default_decoder_buffer_req
+						(&ddl->codec_data.decoder,
+						 true);
+				} else {
+					ddl_set_default_encoder_buffer_req
+						(&ddl->codec_data.encoder);
+				}
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER) {
+		struct vcd_property_metadata_hdr *hdr =
+		    (struct vcd_property_metadata_hdr *)property_value;
+		if (sizeof(struct vcd_property_metadata_hdr) ==
+		    property_hdr->sz) {
+			u32 flag = ddl_supported_metadata_flag(ddl);
+			flag |= DDL_METADATA_MANDATORY;
+			flag &= hdr->meta_data_id;
+			if (!(flag & (flag - 1))) {
+				u32 *hdr_entry =
+				    ddl_metadata_hdr_entry(ddl, flag);
+				hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] =
+				    hdr->version;
+				hdr_entry[DDL_METADATA_HDR_PORT_INDEX] =
+				    hdr->port_index;
+				hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
+				    hdr->type;
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+	}
+	return vcd_status;
+}
+
+u32 ddl_get_metadata_params(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr,
+	void	*property_value)
+{
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM ;
+	if (property_hdr->prop_id == VCD_I_METADATA_ENABLE &&
+		sizeof(struct vcd_property_meta_data_enable)
+		== property_hdr->sz) {
+		struct vcd_property_meta_data_enable *meta_data_enable =
+			(struct vcd_property_meta_data_enable *)
+			property_value;
+		meta_data_enable->meta_data_enable_flag =
+			((ddl->decoding) ?
+			(ddl->codec_data.decoder.meta_data_enable_flag)
+			: (ddl->codec_data.encoder.meta_data_enable_flag));
+		vcd_status = VCD_S_SUCCESS;
+	} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER &&
+		sizeof(struct vcd_property_metadata_hdr) ==
+		property_hdr->sz) {
+		struct vcd_property_metadata_hdr *hdr =
+			(struct vcd_property_metadata_hdr *)
+			property_value;
+		u32 flag = ddl_supported_metadata_flag(ddl);
+		flag |= DDL_METADATA_MANDATORY;
+		flag &= hdr->meta_data_id;
+		if (!(flag & (flag - 1))) {
+			u32 *hdr_entry = ddl_metadata_hdr_entry(ddl,
+				flag);
+			hdr->version =
+			hdr_entry[DDL_METADATA_HDR_VERSION_INDEX];
+			hdr->port_index =
+			hdr_entry[DDL_METADATA_HDR_PORT_INDEX];
+			hdr->type =
+				hdr_entry[DDL_METADATA_HDR_TYPE_INDEX];
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	return vcd_status;
+}
+
+void ddl_metadata_enable(struct ddl_client_context *ddl)
+{
+	u32 flag, hal_flag = 0;
+	u32 *metadata_input;
+	if (ddl->decoding) {
+		flag = ddl->codec_data.decoder.meta_data_enable_flag;
+		metadata_input =
+		    ddl->codec_data.decoder.meta_data_input.
+		    align_physical_addr;
+	} else {
+		flag = ddl->codec_data.encoder.meta_data_enable_flag;
+		metadata_input =
+		    ddl->codec_data.encoder.meta_data_input.
+		    align_physical_addr;
+	}
+	if (flag) {
+		if (flag & VCD_METADATA_QPARRAY)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_QP;
+		if (flag & VCD_METADATA_CONCEALMB)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_CONCEALMB;
+		if (flag & VCD_METADATA_VC1)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_VC1;
+		if (flag & VCD_METADATA_SEI)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_SEI;
+		if (flag & VCD_METADATA_VUI)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_VUI;
+		if (flag & VCD_METADATA_ENC_SLICE)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_ENCSLICE;
+		if (flag & VCD_METADATA_PASSTHROUGH)
+			hal_flag |= VIDC_720P_METADATA_ENABLE_PASSTHROUGH;
+	} else {
+		metadata_input = 0;
+	}
+	vidc_720p_metadata_enable(hal_flag, metadata_input);
+}
+
+u32 ddl_encode_set_metadata_output_buf(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+	u32 *buffer;
+	struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm);
+	u32 ext_buffer_end, hw_metadata_start;
+
+	ext_buffer_end = (u32) stream->physical + stream->alloc_len;
+	if (!encoder->meta_data_enable_flag) {
+		ext_buffer_end &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+		return ext_buffer_end;
+	}
+	hw_metadata_start = (ext_buffer_end - encoder->suffix) &
+	    ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+
+	ext_buffer_end = (hw_metadata_start - 1) &
+	    ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
+
+	buffer = encoder->meta_data_input.align_virtual_addr;
+
+	*buffer++ = encoder->suffix;
+
+	*buffer = hw_metadata_start;
+
+	encoder->meta_data_offset =
+	    hw_metadata_start - (u32) stream->physical;
+
+	return ext_buffer_end;
+}
+
+void ddl_decode_set_metadata_output(struct ddl_decoder_data *decoder)
+{
+	u32 *buffer;
+	u32 loopc;
+
+	if (!decoder->meta_data_enable_flag) {
+		decoder->meta_data_offset = 0;
+		return;
+	}
+
+	decoder->meta_data_offset = ddl_get_yuv_buffer_size(
+		&decoder->client_frame_size, &decoder->buf_format,
+		(!decoder->progressive_only), decoder->codec.codec);
+
+	buffer = decoder->meta_data_input.align_virtual_addr;
+
+	*buffer++ = decoder->suffix;
+
+	for (loopc = 0; loopc < decoder->dp_buf.no_of_dec_pic_buf;
+	     ++loopc) {
+		*buffer++ = (u32) (decoder->meta_data_offset + (u8 *)
+				     decoder->dp_buf.
+				     dec_pic_buffers[loopc].vcd_frm.
+				     physical);
+	}
+}
+
+void ddl_process_encoder_metadata(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	struct vcd_frame_data *out_frame =
+	    &(ddl->output_frame.vcd_frm);
+	u32 *qfiller_hdr, *qfiller, start_addr;
+	u32 qfiller_size;
+
+	if (!encoder->meta_data_enable_flag) {
+		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+
+	if (!encoder->enc_frame_info.metadata_exists) {
+		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
+
+	start_addr = (u32) ((u8 *) out_frame->virtual +
+			      out_frame->offset);
+	qfiller = (u32 *) ((out_frame->data_len + start_addr + 3) & ~3);
+
+	qfiller_size = (u32) ((encoder->meta_data_offset +
+				 (u8 *) out_frame->virtual) -
+				(u8 *) qfiller);
+
+	qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
+
+	*qfiller++ = qfiller_size;
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
+	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+	*qfiller = (u32) (qfiller_size - DDL_METADATA_HDR_SIZE);
+}
+
+void ddl_process_decoder_metadata(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	struct vcd_frame_data *output_frame =
+	    &(ddl->output_frame.vcd_frm);
+	u32 *qfiller_hdr, *qfiller;
+	u32 qfiller_size;
+
+	if (!decoder->meta_data_enable_flag) {
+		output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+
+	if (!decoder->dec_disp_info.metadata_exists) {
+		output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
+		return;
+	}
+	output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
+
+	if (output_frame->data_len != decoder->meta_data_offset) {
+		qfiller = (u32 *) ((u32) ((output_frame->data_len +
+					     output_frame->offset +
+					     (u8 *) output_frame->virtual) +
+					    3) & ~3);
+
+		qfiller_size = (u32) ((decoder->meta_data_offset +
+					 (u8 *) output_frame->virtual) -
+					(u8 *) qfiller);
+
+		qfiller_hdr =
+		    ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
+		*qfiller++ = qfiller_size;
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
+		*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+		*qfiller = (u32) (qfiller_size - DDL_METADATA_HDR_SIZE);
+	}
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.h
new file mode 100644
index 0000000..ed43861
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_METADATA_H_
+#define _VCD_DDL_METADATA_H_
+
+#define DDL_MAX_DEC_METADATATYPE  (8)
+#define DDL_MAX_ENC_METADATATYPE  (3)
+
+#define DDL_METADATA_EXTRAPAD_SIZE (256)
+#define DDL_METADATA_HDR_SIZE (20)
+
+#define DDL_METADATA_EXTRADATANONE_SIZE (24)
+
+#define DDL_METADATA_ALIGNSIZE(x) ((x) = (((x) + 0x7) & ~0x7))
+
+#define DDL_METADATA_MANDATORY (VCD_METADATA_DATANONE | \
+				VCD_METADATA_QCOMFILLER)
+
+#define DDL_METADATA_VC1_PAYLOAD_SIZE (38*4)
+
+#define DDL_METADATA_SEI_PAYLOAD_SIZE (100)
+#define DDL_METADATA_SEI_MAX (5)
+
+#define DDL_METADATA_VUI_PAYLOAD_SIZE (256)
+
+#define DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE  (68)
+
+#define DDL_METADATA_CLIENT_INPUTBUFSIZE  (256)
+#define DDL_METADATA_TOTAL_INPUTBUFSIZE \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * VCD_MAX_NO_CLIENT)
+
+#define DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer, \
+		channel_id) \
+{ \
+  (client_buffer)->align_physical_addr = (u32 *)\
+	((u8 *)(main_buffer)->align_physical_addr + \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * (channel_id)) \
+	); \
+  (client_buffer)->align_virtual_addr = (u32 *)\
+	((u8 *)(main_buffer)->align_virtual_addr + \
+	(DDL_METADATA_CLIENT_INPUTBUFSIZE * (channel_id)) \
+	); \
+  (client_buffer)->virtual_base_addr = 0; \
+}
+
+#define DDL_METADATA_HDR_VERSION_INDEX 0
+#define DDL_METADATA_HDR_PORT_INDEX    1
+#define DDL_METADATA_HDR_TYPE_INDEX    2
+
+
+void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl);
+u32 ddl_get_metadata_params(struct ddl_client_context	*ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value);
+u32 ddl_set_metadata_params(struct ddl_client_context *ddl,
+			    struct vcd_property_hdr *property_hdr,
+			    void *property_value);
+void ddl_set_default_metadata_flag(struct ddl_client_context *ddl);
+void ddl_set_default_decoder_metadata_buffer_size
+    (struct ddl_decoder_data *decoder,
+	struct vcd_property_frame_size *frame_size,
+	struct vcd_buffer_requirement *output_buf_req);
+void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data
+						  *encoder);
+void ddl_metadata_enable(struct ddl_client_context *ddl);
+u32 ddl_encode_set_metadata_output_buf(struct ddl_client_context *ddl);
+void ddl_decode_set_metadata_output(struct ddl_decoder_data *decoder);
+void ddl_process_encoder_metadata(struct ddl_client_context *ddl);
+void ddl_process_decoder_metadata(struct ddl_client_context *ddl);
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_properties.c
new file mode 100644
index 0000000..73dba03
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_properties.c
@@ -0,0 +1,1919 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+#include "vcd_ddl_metadata.h"
+
+static u32 ddl_set_dec_property(struct ddl_client_context *pddl,
+				struct vcd_property_hdr *property_hdr,
+				void *property_value);
+static u32 ddl_set_enc_property(struct ddl_client_context *pddl,
+				struct vcd_property_hdr *property_hdr,
+				void *property_value);
+static u32 ddl_get_dec_property(struct ddl_client_context *pddl,
+				struct vcd_property_hdr *property_hdr,
+				void *property_value);
+static u32 ddl_get_enc_property(struct ddl_client_context *pddl,
+				struct vcd_property_hdr *property_hdr,
+				void *property_value);
+static u32 ddl_set_enc_dynamic_property(struct ddl_client_context *ddl,
+				struct vcd_property_hdr *property_hdr,
+				void *property_value);
+static void ddl_set_default_enc_property(struct ddl_client_context *ddl);
+static void ddl_set_default_enc_profile(struct ddl_encoder_data
+					*encoder);
+static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder);
+static void ddl_set_default_enc_vop_timing(struct ddl_encoder_data
+					   *encoder);
+static void ddl_set_default_enc_intra_period(struct ddl_encoder_data
+					     *encoder);
+static void ddl_set_default_enc_rc_params(struct ddl_encoder_data
+					  *encoder);
+static u32 ddl_valid_buffer_requirement(struct vcd_buffer_requirement
+					*original_buf_req,
+					struct vcd_buffer_requirement
+					*req_buf_req);
+static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder);
+static u32 ddl_set_dec_buffers
+    (struct ddl_decoder_data *decoder,
+     struct ddl_property_dec_pic_buffers *dpb);
+
+u32 ddl_set_property(u32 *ddl_handle,
+     struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	u32 vcd_status;
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+
+	if (!property_hdr || !property_value) {
+		VIDC_LOGERR_STRING("ddl_set_prop:Bad_argument");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	ddl_context = ddl_get_context();
+
+	if (!DDL_IS_INITIALIZED(ddl_context)) {
+		VIDC_LOGERR_STRING("ddl_set_prop:Not_inited");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (!ddl) {
+		VIDC_LOGERR_STRING("ddl_set_prop:Bad_handle");
+		return VCD_ERR_BAD_HANDLE;
+	}
+	if (ddl->decoding) {
+		vcd_status =
+		    ddl_set_dec_property(ddl, property_hdr,
+					 property_value);
+	} else {
+		vcd_status =
+		    ddl_set_enc_property(ddl, property_hdr,
+					 property_value);
+	}
+	if (vcd_status)
+		VIDC_LOGERR_STRING("ddl_set_prop:FAILED");
+
+	return vcd_status;
+}
+
+u32 ddl_get_property(u32 *ddl_handle,
+     struct vcd_property_hdr *property_hdr, void *property_value)
+{
+
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	struct ddl_context *ddl_context;
+	struct ddl_client_context *ddl =
+	    (struct ddl_client_context *)ddl_handle;
+
+	if (!property_hdr || !property_value)
+		return VCD_ERR_ILLEGAL_PARM;
+
+	if (property_hdr->prop_id == DDL_I_CAPABILITY) {
+		if (sizeof(struct ddl_property_capability) ==
+		    property_hdr->sz) {
+			struct ddl_property_capability *ddl_capability =
+			    (struct ddl_property_capability *)
+			    property_value;
+			ddl_capability->max_num_client = VCD_MAX_NO_CLIENT;
+			ddl_capability->exclusive =
+				VCD_COMMAND_EXCLUSIVE;
+			ddl_capability->frame_command_depth =
+				VCD_FRAME_COMMAND_DEPTH;
+			ddl_capability->general_command_depth =
+				VCD_GENERAL_COMMAND_DEPTH;
+			ddl_capability->ddl_time_out_in_ms =
+				DDL_HW_TIMEOUT_IN_MS;
+			vcd_status = VCD_S_SUCCESS;
+		}
+		return vcd_status;
+	}
+	ddl_context = ddl_get_context();
+	if (!DDL_IS_INITIALIZED(ddl_context))
+		return VCD_ERR_ILLEGAL_OP;
+
+	if (!ddl)
+		return VCD_ERR_BAD_HANDLE;
+
+	if (ddl->decoding) {
+		vcd_status =
+		    ddl_get_dec_property(ddl, property_hdr,
+					 property_value);
+	} else {
+		vcd_status =
+		    ddl_get_enc_property(ddl, property_hdr,
+					 property_value);
+	}
+	if (vcd_status)
+		VIDC_LOGERR_STRING("ddl_get_prop:FAILED");
+
+	return vcd_status;
+}
+
+u32 ddl_decoder_ready_to_start(struct ddl_client_context *ddl,
+     struct vcd_sequence_hdr *header)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	if (!decoder->codec.codec) {
+		VIDC_LOGERR_STRING("ddl_dec_start_check:Codec_not_set");
+		return false;
+	}
+	if ((!header) &&
+	    (!decoder->client_frame_size.height ||
+	     !decoder->client_frame_size.width)
+	    ) {
+		VIDC_LOGERR_STRING
+		    ("ddl_dec_start_check:Client_height_width_default");
+		return false;
+	}
+	return true;
+}
+
+u32 ddl_encoder_ready_to_start(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+
+	if (!encoder->codec.codec ||
+	    !encoder->frame_size.height ||
+	    !encoder->frame_size.width ||
+	    !encoder->frame_rate.fps_denominator ||
+	    !encoder->frame_rate.fps_numerator ||
+	    !encoder->target_bit_rate.target_bitrate) {
+		return false;
+	}
+	return true;
+}
+
+static u32 ddl_set_dec_property
+    (struct ddl_client_context *ddl,
+     struct vcd_property_hdr *property_hdr, void *property_value) {
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+	switch (property_hdr->prop_id) {
+	case DDL_I_DPB_RELEASE:
+		{
+			if (sizeof(struct ddl_frame_data_tag) ==
+			    property_hdr->sz
+			    && decoder->dp_buf.no_of_dec_pic_buf) {
+				vcd_status =
+				    ddl_decoder_dpb_transact(decoder,
+					     (struct ddl_frame_data_tag *)
+					     property_value,
+					     DDL_DPB_OP_MARK_FREE);
+			}
+			break;
+		}
+	case DDL_I_DPB:
+		{
+			struct ddl_property_dec_pic_buffers *dpb =
+			    (struct ddl_property_dec_pic_buffers *)
+			    property_value;
+
+			if (sizeof(struct ddl_property_dec_pic_buffers) ==
+			    property_hdr->sz &&
+			    (DDLCLIENT_STATE_IS
+			     (ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)
+			     || DDLCLIENT_STATE_IS(ddl,
+						   DDL_CLIENT_WAIT_FOR_DPB)
+			    ) &&
+			    dpb->no_of_dec_pic_buf >=
+			    decoder->client_output_buf_req.actual_count) {
+				vcd_status =
+				    ddl_set_dec_buffers(decoder, dpb);
+			}
+			break;
+		}
+	case DDL_I_REQ_OUTPUT_FLUSH:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				decoder->dynamic_prop_change |=
+				    DDL_DEC_REQ_OUTPUT_FLUSH;
+				decoder->dpb_mask.client_mask = 0;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_INPUT_BUF_REQ:
+		{
+			struct vcd_buffer_requirement *buffer_req =
+			    (struct vcd_buffer_requirement *)
+			    property_value;
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz &&
+			    (ddl_valid_buffer_requirement(
+						&decoder->min_input_buf_req,
+						buffer_req))) {
+				decoder->client_input_buf_req = *buffer_req;
+				decoder->client_input_buf_req.min_count =
+					decoder->min_input_buf_req.min_count;
+				decoder->client_input_buf_req.max_count =
+					decoder->min_input_buf_req.max_count;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_OUTPUT_BUF_REQ:
+		{
+			struct vcd_buffer_requirement *buffer_req =
+			    (struct vcd_buffer_requirement *)
+			    property_value;
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz &&
+			    (ddl_valid_buffer_requirement(
+						&decoder->min_output_buf_req,
+						buffer_req))) {
+				decoder->client_output_buf_req =
+				    *buffer_req;
+				decoder->client_output_buf_req.min_count =
+					decoder->min_output_buf_req.min_count;
+				decoder->client_output_buf_req.max_count =
+					decoder->min_output_buf_req.max_count;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+
+	case VCD_I_CODEC:
+		{
+			struct vcd_property_codec *codec =
+			    (struct vcd_property_codec *)property_value;
+			if (sizeof(struct vcd_property_codec) ==
+			    property_hdr->sz
+			    && DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)
+			    ) {
+				u32 status;
+				vcd_fw_transact(false, true,
+					decoder->codec.codec);
+				status = vcd_fw_transact(true, true,
+					codec->codec);
+				if (status) {
+					decoder->codec = *codec;
+					ddl_set_default_dec_property(ddl);
+					vcd_status = VCD_S_SUCCESS;
+				} else {
+					status = vcd_fw_transact(true, true,
+						decoder->codec.codec);
+					vcd_status = VCD_ERR_NOT_SUPPORTED;
+				}
+			}
+			break;
+		}
+	case VCD_I_POST_FILTER:
+		{
+			if (sizeof(struct vcd_property_post_filter) ==
+			    property_hdr->sz
+			    && DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+			    (decoder->codec.codec == VCD_CODEC_MPEG4 ||
+			     decoder->codec.codec == VCD_CODEC_MPEG2)
+			    ) {
+				decoder->post_filter =
+				    *(struct vcd_property_post_filter *)
+				    property_value;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_SIZE:
+		{
+			struct vcd_property_frame_size *frame_size =
+			    (struct vcd_property_frame_size *)
+			    property_value;
+
+			if ((sizeof(struct vcd_property_frame_size) ==
+					property_hdr->sz) &&
+				(DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN))) {
+				if (decoder->client_frame_size.height !=
+				    frame_size->height
+				    || decoder->client_frame_size.width !=
+				    frame_size->width) {
+					decoder->client_frame_size =
+					    *frame_size;
+					ddl_set_default_decoder_buffer_req
+					    (decoder, true);
+				}
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_BUFFER_FORMAT:
+		{
+			struct vcd_property_buffer_format *tile =
+			    (struct vcd_property_buffer_format *)
+			    property_value;
+			if (sizeof(struct vcd_property_buffer_format) ==
+			    property_hdr->sz &&
+			    DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+			    (tile->buffer_format == VCD_BUFFER_FORMAT_NV12
+			     || tile->buffer_format ==
+			     VCD_BUFFER_FORMAT_TILE_4x2)
+			    ) {
+				if (tile->buffer_format !=
+				    decoder->buf_format.buffer_format) {
+					decoder->buf_format = *tile;
+					ddl_set_default_decoder_buffer_req
+					    (decoder, true);
+				}
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		{
+			vcd_status = ddl_set_metadata_params(ddl,
+							     property_hdr,
+							     property_value);
+			break;
+		}
+	case VCD_I_OUTPUT_ORDER:
+		{
+			if (sizeof(u32) == property_hdr->sz &&
+				DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
+					decoder->output_order =
+						*(u32 *)property_value;
+					vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_RATE:
+		{
+			vcd_status = VCD_S_SUCCESS;
+			break;
+		}
+	default:
+		{
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+			break;
+		}
+	}
+	return vcd_status;
+}
+
+static u32 ddl_set_enc_property(struct ddl_client_context *ddl,
+	struct vcd_property_hdr *property_hdr, void *property_value)
+{
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+
+	if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) ||
+	   (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE) ||
+		DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)))
+		vcd_status = ddl_set_enc_dynamic_property(ddl,
+			property_hdr, property_value);
+	if (vcd_status == VCD_S_SUCCESS)
+		return vcd_status;
+
+	if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) ||
+		vcd_status != VCD_ERR_ILLEGAL_OP) {
+		VIDC_LOGERR_STRING
+			("ddl_set_enc_property:Fails_as_not_in_open_state");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_FRAME_SIZE:
+		{
+			struct vcd_property_frame_size *framesize =
+				(struct vcd_property_frame_size *)
+				property_value;
+
+			if (sizeof(struct vcd_property_frame_size)
+				== property_hdr->sz &&
+				DDL_ALLOW_ENC_FRAMESIZE(framesize->width,
+				framesize->height) &&
+				(encoder->codec.codec == VCD_CODEC_H264 ||
+				 DDL_VALIDATE_ENC_FRAMESIZE(framesize->width,
+				 framesize->height))
+				) {
+				encoder->frame_size = *framesize;
+				ddl_calculate_stride(&encoder->frame_size,
+					false, encoder->codec.codec);
+				ddl_set_default_encoder_buffer_req(encoder);
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_CODEC:
+		{
+			struct vcd_property_codec *codec =
+				(struct vcd_property_codec *)
+				property_value;
+			if (sizeof(struct vcd_property_codec) ==
+				property_hdr->sz) {
+				u32 status;
+
+				vcd_fw_transact(false, false,
+					encoder->codec.codec);
+
+				status = vcd_fw_transact(true, false,
+					codec->codec);
+				if (status) {
+					encoder->codec = *codec;
+					ddl_set_default_enc_property(ddl);
+					vcd_status = VCD_S_SUCCESS;
+				} else {
+					status = vcd_fw_transact(true, false,
+						encoder->codec.codec);
+					vcd_status = VCD_ERR_NOT_SUPPORTED;
+				}
+			}
+			break;
+		}
+	case VCD_I_PROFILE:
+		{
+			struct vcd_property_profile *profile =
+				(struct vcd_property_profile *)
+				property_value;
+			if ((sizeof(struct vcd_property_profile) ==
+				property_hdr->sz) &&
+				((encoder->codec.codec ==
+					VCD_CODEC_MPEG4 &&
+				  (profile->profile ==
+					VCD_PROFILE_MPEG4_SP ||
+					profile->profile ==
+					VCD_PROFILE_MPEG4_ASP)) ||
+				 (encoder->codec.codec ==
+					VCD_CODEC_H264 &&
+				 (profile->profile >=
+					VCD_PROFILE_H264_BASELINE ||
+				  profile->profile <=
+					VCD_PROFILE_H264_HIGH)) ||
+				 (encoder->codec.codec ==
+					VCD_CODEC_H263 &&
+				  profile->profile ==
+					VCD_PROFILE_H263_BASELINE))
+				) {
+				encoder->profile = *profile;
+				vcd_status = VCD_S_SUCCESS;
+
+				if (profile->profile ==
+					VCD_PROFILE_H264_BASELINE)
+					encoder->entropy_control.entropy_sel
+						= VCD_ENTROPY_SEL_CAVLC;
+				else
+					encoder->entropy_control.entropy_sel
+						= VCD_ENTROPY_SEL_CABAC;
+			}
+			break;
+		}
+	case VCD_I_LEVEL:
+		{
+			struct vcd_property_level *level =
+				(struct vcd_property_level *)
+				property_value;
+			if (
+				(sizeof(struct vcd_property_level) ==
+				 property_hdr->sz
+				) &&
+				(
+				(
+				(encoder->codec.
+				 codec == VCD_CODEC_MPEG4) &&
+				(level->level >= VCD_LEVEL_MPEG4_0) &&
+				(level->level <= VCD_LEVEL_MPEG4_6)
+				) ||
+				(
+				(encoder->codec.
+				 codec == VCD_CODEC_H264) &&
+				(level->level >= VCD_LEVEL_H264_1) &&
+				(level->level <= VCD_LEVEL_H264_3p1)
+				) ||
+				(
+				(encoder->codec.
+				 codec == VCD_CODEC_H263) &&
+				(level->level >= VCD_LEVEL_H263_10) &&
+				(level->level <= VCD_LEVEL_H263_70)
+				)
+				)
+				) {
+				encoder->level = *level;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_MULTI_SLICE:
+		{
+			struct vcd_property_multi_slice *multislice =
+				(struct vcd_property_multi_slice *)
+				property_value;
+			switch (multislice->m_slice_sel) {
+			case VCD_MSLICE_OFF:
+				{
+					vcd_status = VCD_S_SUCCESS;
+					break;
+				}
+			case VCD_MSLICE_BY_GOB:
+				{
+					if (encoder->codec.codec ==
+						VCD_CODEC_H263)
+						vcd_status = VCD_S_SUCCESS;
+					 break;
+				}
+			case VCD_MSLICE_BY_MB_COUNT:
+				{
+					if (multislice->m_slice_size
+						>= 1 && (multislice->
+						m_slice_size <=
+						(encoder->frame_size.height
+						* encoder->frame_size.width
+						/ 16 / 16))
+						) {
+						vcd_status = VCD_S_SUCCESS;
+					}
+					break;
+				  }
+			case VCD_MSLICE_BY_BYTE_COUNT:
+				{
+					if (multislice->m_slice_size > 0)
+						vcd_status = VCD_S_SUCCESS;
+					break;
+				}
+			default:
+				{
+					break;
+				}
+			}
+			if (sizeof(struct vcd_property_multi_slice) ==
+				property_hdr->sz &&
+				!vcd_status) {
+				encoder->multi_slice = *multislice;
+				if (multislice->m_slice_sel ==
+						VCD_MSLICE_OFF)
+					encoder->multi_slice.m_slice_size = 0;
+			}
+			break;
+		}
+	case VCD_I_RATE_CONTROL:
+		{
+			struct vcd_property_rate_control
+				*ratecontrol =
+				(struct vcd_property_rate_control *)
+				property_value;
+			if (sizeof(struct vcd_property_rate_control) ==
+				property_hdr->sz &&
+				ratecontrol->
+				rate_control >= VCD_RATE_CONTROL_OFF &&
+				ratecontrol->
+				rate_control <= VCD_RATE_CONTROL_CBR_CFR
+				) {
+				encoder->rc = *ratecontrol;
+				ddl_set_default_enc_rc_params(encoder);
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_SHORT_HEADER:
+		{
+
+		if (sizeof(struct vcd_property_short_header) ==
+			property_hdr->sz &&
+			encoder->codec.codec == VCD_CODEC_MPEG4) {
+			encoder->short_header =
+				*(struct vcd_property_short_header *)
+				property_value;
+			vcd_status = VCD_S_SUCCESS;
+		}
+
+			break;
+		}
+	case VCD_I_VOP_TIMING:
+		{
+			struct vcd_property_vop_timing *voptime =
+				(struct vcd_property_vop_timing *)
+				property_value;
+			if (
+				(sizeof(struct vcd_property_vop_timing) ==
+					  property_hdr->sz
+				) &&
+				(encoder->frame_rate.fps_numerator <=
+					voptime->vop_time_resolution)
+				) {
+				encoder->vop_timing = *voptime;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_HEADER_EXTENSION:
+		{
+			if (sizeof(u32) == property_hdr->sz &&
+				encoder->codec.codec == VCD_CODEC_MPEG4
+				) {
+				encoder->hdr_ext_control = *(u32 *)
+					property_value;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_ENTROPY_CTRL:
+		{
+			struct vcd_property_entropy_control
+				*entropy_control =
+				(struct vcd_property_entropy_control *)
+				property_value;
+			if (sizeof(struct vcd_property_entropy_control) ==
+				property_hdr->sz &&
+				encoder->codec.codec == VCD_CODEC_H264
+				&& entropy_control->
+				entropy_sel >= VCD_ENTROPY_SEL_CAVLC &&
+				entropy_control->entropy_sel <=
+				VCD_ENTROPY_SEL_CABAC) {
+				encoder->entropy_control = *entropy_control;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_DEBLOCKING:
+		{
+			struct vcd_property_db_config *dbconfig =
+				(struct vcd_property_db_config *)
+				property_value;
+			if (sizeof(struct vcd_property_db_config) ==
+				property_hdr->sz &&
+				encoder->codec.codec == VCD_CODEC_H264
+				&& dbconfig->db_config >=
+				VCD_DB_ALL_BLOCKING_BOUNDARY
+				&& dbconfig->db_config <=
+				VCD_DB_SKIP_SLICE_BOUNDARY
+				) {
+				encoder->db_control = *dbconfig;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_QP_RANGE:
+		{
+			struct vcd_property_qp_range *qp =
+				(struct vcd_property_qp_range *)
+				property_value;
+			if ((sizeof(struct vcd_property_qp_range) ==
+				property_hdr->sz) &&
+				(qp->min_qp <= qp->max_qp) &&
+				(
+				(encoder->codec.codec == VCD_CODEC_H264
+				&& qp->max_qp <= DDL_MAX_H264_QP) ||
+				(qp->max_qp <= DDL_MAX_MPEG4_QP)
+				)
+				) {
+				encoder->qp_range = *qp;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_SESSION_QP:
+		{
+			struct vcd_property_session_qp *qp =
+				(struct vcd_property_session_qp *)
+				property_value;
+
+		if ((sizeof(struct vcd_property_session_qp) ==
+			property_hdr->sz) &&
+			(qp->i_frame_qp >= encoder->qp_range.min_qp) &&
+			(qp->i_frame_qp <= encoder->qp_range.max_qp) &&
+			(qp->p_frame_qp >= encoder->qp_range.min_qp) &&
+			(qp->p_frame_qp <= encoder->qp_range.max_qp)
+			) {
+			encoder->session_qp = *qp;
+			vcd_status = VCD_S_SUCCESS;
+		}
+
+			break;
+		}
+	case VCD_I_RC_LEVEL_CONFIG:
+		{
+			struct vcd_property_rc_level *rc_level =
+				(struct vcd_property_rc_level *)
+				property_value;
+			if (sizeof(struct vcd_property_rc_level) ==
+				property_hdr->sz &&
+				(
+				encoder->rc.
+				rate_control >= VCD_RATE_CONTROL_VBR_VFR ||
+				encoder->rc.
+				rate_control <= VCD_RATE_CONTROL_CBR_VFR
+				) &&
+				(!rc_level->mb_level_rc ||
+				encoder->codec.codec == VCD_CODEC_H264
+				)
+				) {
+				encoder->rc_level = *rc_level;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_LEVEL_RC:
+		{
+
+		struct vcd_property_frame_level_rc_params
+			*frame_levelrc =
+			(struct vcd_property_frame_level_rc_params *)
+			property_value;
+
+			if ((sizeof(struct
+				vcd_property_frame_level_rc_params)
+				== property_hdr->sz) &&
+				(frame_levelrc->reaction_coeff) &&
+				(encoder->rc_level.frame_level_rc)
+				) {
+				encoder->frame_level_rc = *frame_levelrc;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_ADAPTIVE_RC:
+		{
+
+		if ((sizeof(struct
+			vcd_property_adaptive_rc_params)
+			== property_hdr->sz) &&
+			(encoder->codec.
+			codec == VCD_CODEC_H264) &&
+			(encoder->rc_level.mb_level_rc)) {
+
+			encoder->adaptive_rc =
+				*(struct vcd_property_adaptive_rc_params *)
+				property_value;
+
+			vcd_status = VCD_S_SUCCESS;
+		}
+
+			break;
+		}
+	case VCD_I_BUFFER_FORMAT:
+		{
+			struct vcd_property_buffer_format *tile =
+				(struct vcd_property_buffer_format *)
+				property_value;
+			if (sizeof(struct vcd_property_buffer_format) ==
+				property_hdr->sz &&
+				tile->buffer_format ==
+				VCD_BUFFER_FORMAT_NV12) {
+				encoder->buf_format = *tile;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_INPUT_BUF_REQ:
+		{
+			struct vcd_buffer_requirement *buffer_req =
+				(struct vcd_buffer_requirement *)
+				property_value;
+			if (sizeof(struct vcd_buffer_requirement) ==
+				property_hdr->sz &&
+				(ddl_valid_buffer_requirement(
+				&encoder->input_buf_req, buffer_req))
+				) {
+				encoder->client_input_buf_req = *buffer_req;
+				encoder->client_input_buf_req.min_count =
+					encoder->input_buf_req.min_count;
+				encoder->client_input_buf_req.max_count =
+					encoder->input_buf_req.max_count;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_OUTPUT_BUF_REQ:
+		{
+			struct vcd_buffer_requirement *buffer_req =
+				(struct vcd_buffer_requirement *)
+				property_value;
+			if (sizeof(struct vcd_buffer_requirement) ==
+				property_hdr->sz &&
+				(ddl_valid_buffer_requirement(
+				&encoder->output_buf_req, buffer_req))
+				) {
+				encoder->client_output_buf_req =
+					*buffer_req;
+				encoder->client_output_buf_req.min_count =
+					encoder->output_buf_req.min_count;
+				encoder->client_output_buf_req.max_count =
+					encoder->output_buf_req.max_count;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		{
+			vcd_status = ddl_set_metadata_params(
+				ddl, property_hdr, property_value);
+			break;
+		}
+	default:
+		{
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+			break;
+		}
+	}
+	return vcd_status;
+}
+
+static u32 ddl_get_dec_property
+    (struct ddl_client_context *ddl,
+     struct vcd_property_hdr *property_hdr, void *property_value) {
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_FRAME_SIZE:
+		{
+			struct vcd_property_frame_size *fz_size;
+			if (sizeof(struct vcd_property_frame_size) ==
+			    property_hdr->sz) {
+					ddl_calculate_stride(
+					&decoder->client_frame_size,
+					!decoder->progressive_only,
+					decoder->codec.codec);
+					if (decoder->buf_format.buffer_format
+						== VCD_BUFFER_FORMAT_TILE_4x2) {
+						fz_size =
+						&decoder->client_frame_size;
+						fz_size->stride =
+						DDL_TILE_ALIGN(fz_size->width,
+							DDL_TILE_ALIGN_WIDTH);
+						fz_size->scan_lines =
+						DDL_TILE_ALIGN(fz_size->height,
+							DDL_TILE_ALIGN_HEIGHT);
+					}
+					*(struct vcd_property_frame_size *)
+						property_value =
+						decoder->client_frame_size;
+					vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_PROFILE:
+		{
+			if (sizeof(struct vcd_property_profile) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_profile *)
+				    property_value = decoder->profile;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_LEVEL:
+		{
+			if (sizeof(struct vcd_property_level) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_level *)
+				    property_value = decoder->level;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_PROGRESSIVE_ONLY:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				*(u32 *) property_value =
+				    decoder->progressive_only;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_INPUT_BUF_REQ:
+		{
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+				    property_value =
+						decoder->client_input_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_OUTPUT_BUF_REQ:
+		{
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+				    property_value =
+						decoder->client_output_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_CODEC:
+		{
+			if (sizeof(struct vcd_property_codec) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_codec *)
+				    property_value = decoder->codec;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_BUFFER_FORMAT:
+		{
+			if (sizeof(struct vcd_property_buffer_format) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_buffer_format *)
+				    property_value = decoder->buf_format;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_POST_FILTER:
+		{
+			if (sizeof(struct vcd_property_post_filter) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_post_filter *)
+				    property_value = decoder->post_filter;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_SEQHDR_ALIGN_BYTES:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				*(u32 *) property_value =
+				    DDL_LINEAR_BUFFER_ALIGN_BYTES;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_FRAME_PROC_UNITS:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				struct vcd_property_frame_size frame_sz =
+					decoder->client_frame_size;
+				ddl_calculate_stride(&frame_sz,
+					!decoder->progressive_only,
+					decoder->codec.codec);
+				*(u32 *) property_value =
+				    ((frame_sz.stride >> 4) *
+				     (frame_sz.scan_lines >> 4));
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_DPB_RETRIEVE:
+		{
+			if (sizeof(struct ddl_frame_data_tag) ==
+			    property_hdr->sz) {
+				vcd_status =
+				    ddl_decoder_dpb_transact(decoder,
+					 (struct ddl_frame_data_tag *)
+					     property_value,
+					     DDL_DPB_OP_RETRIEVE);
+			}
+			break;
+		}
+	case VCD_I_OUTPUT_ORDER:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				*(u32 *)property_value = decoder->output_order;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		{
+			vcd_status = ddl_get_metadata_params(
+						   ddl,
+						   property_hdr,
+						   property_value);
+			break;
+		}
+	default:
+		{
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+			break;
+		}
+	}
+	return vcd_status;
+}
+
+static u32 ddl_get_enc_property
+    (struct ddl_client_context *ddl,
+     struct vcd_property_hdr *property_hdr, void *property_value) {
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+
+	struct vcd_property_entropy_control *entropy_control;
+	struct vcd_property_intra_refresh_mb_number *intra_refresh;
+
+	switch (property_hdr->prop_id) {
+	case VCD_I_CODEC:
+		{
+			if (sizeof(struct vcd_property_codec) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_codec *)
+					property_value =
+					encoder->codec;
+		    vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_SIZE:
+		{
+			if (sizeof(struct vcd_property_frame_size) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_frame_size *)
+					property_value =
+					encoder->frame_size;
+
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_RATE:
+		{
+			if (sizeof(struct vcd_property_frame_rate) ==
+				property_hdr->sz) {
+
+				*(struct vcd_property_frame_rate *)
+					property_value =
+					encoder->frame_rate;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_TARGET_BITRATE:
+		{
+
+			if (sizeof(struct vcd_property_target_bitrate) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_target_bitrate *)
+					property_value =
+					encoder->target_bit_rate;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_RATE_CONTROL:
+		{
+			if (sizeof(struct vcd_property_rate_control) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_rate_control *)
+				    property_value = encoder->rc;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_PROFILE:
+		{
+			if (sizeof(struct vcd_property_profile) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_profile *)
+				    property_value = encoder->profile;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_LEVEL:
+		{
+			if (sizeof(struct vcd_property_level) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_level *)
+				    property_value = encoder->level;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_MULTI_SLICE:
+		{
+			if (sizeof(struct vcd_property_multi_slice) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_multi_slice *)
+				    property_value = encoder->multi_slice;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_SEQ_HEADER:
+		{
+			struct vcd_sequence_hdr *seq_hdr =
+			    (struct vcd_sequence_hdr *)property_value;
+			if (encoder->seq_header.buffer_size &&
+			    sizeof(struct vcd_sequence_hdr) ==
+			    property_hdr->sz
+			    && encoder->seq_header.buffer_size <=
+			    seq_hdr->sequence_header_len) {
+				DDL_MEMCPY(seq_hdr->sequence_header,
+					   encoder->seq_header.
+					   align_virtual_addr,
+					   encoder->seq_header.buffer_size);
+				seq_hdr->sequence_header_len =
+				    encoder->seq_header.buffer_size;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_SEQHDR_PRESENT:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				if ((encoder->codec.
+					codec == VCD_CODEC_MPEG4 &&
+					!encoder->short_header.short_header)
+					|| encoder->codec.codec ==
+					VCD_CODEC_H264) {
+					*(u32 *)property_value = 0x1;
+				} else {
+					*(u32 *)property_value = 0x0;
+				}
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_VOP_TIMING:
+		{
+			if (sizeof(struct vcd_property_vop_timing) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_vop_timing *)
+				    property_value = encoder->vop_timing;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_SHORT_HEADER:
+		{
+			if (sizeof(struct vcd_property_short_header) ==
+			    property_hdr->sz) {
+				if (encoder->codec.codec ==
+					VCD_CODEC_MPEG4) {
+					*(struct vcd_property_short_header
+					  *)property_value =
+						encoder->short_header;
+					vcd_status = VCD_S_SUCCESS;
+				} else {
+					vcd_status = VCD_ERR_ILLEGAL_OP;
+				}
+			}
+			break;
+		}
+	case VCD_I_ENTROPY_CTRL:
+		{
+			entropy_control = property_value;
+			if (sizeof(struct vcd_property_entropy_control) ==
+			    property_hdr->sz) {
+				if (encoder->codec.codec ==
+					VCD_CODEC_H264) {
+					*entropy_control =
+				     encoder->entropy_control;
+					vcd_status = VCD_S_SUCCESS;
+				} else {
+					vcd_status = VCD_ERR_ILLEGAL_OP;
+				}
+			}
+			break;
+		}
+	case VCD_I_DEBLOCKING:
+		{
+			if (sizeof(struct vcd_property_db_config) ==
+			    property_hdr->sz) {
+				if (encoder->codec.codec ==
+					VCD_CODEC_H264) {
+					*(struct vcd_property_db_config *)
+					    property_value =
+					    encoder->db_control;
+					vcd_status = VCD_S_SUCCESS;
+				} else {
+					vcd_status = VCD_ERR_ILLEGAL_OP;
+				}
+			}
+			break;
+		}
+	case VCD_I_INTRA_PERIOD:
+		{
+			if (sizeof(struct vcd_property_i_period) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_i_period *)
+				    property_value = encoder->i_period;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_QP_RANGE:
+		{
+			if (sizeof(struct vcd_property_qp_range) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_qp_range *)
+				    property_value = encoder->qp_range;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_SESSION_QP:
+		{
+			if (sizeof(struct vcd_property_session_qp) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_session_qp *)
+				    property_value = encoder->session_qp;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_RC_LEVEL_CONFIG:
+		{
+			if (sizeof(struct vcd_property_rc_level) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_rc_level *)
+				    property_value = encoder->rc_level;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_LEVEL_RC:
+		{
+			if (sizeof
+			    (struct vcd_property_frame_level_rc_params) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_frame_level_rc_params
+				 *)property_value =
+				 encoder->frame_level_rc;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_ADAPTIVE_RC:
+		{
+			if (sizeof(struct vcd_property_adaptive_rc_params)
+			    == property_hdr->sz) {
+				*(struct vcd_property_adaptive_rc_params *)
+				    property_value = encoder->adaptive_rc;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_INTRA_REFRESH:
+		{
+			intra_refresh = property_value;
+			if (sizeof
+			    (struct vcd_property_intra_refresh_mb_number)
+			    == property_hdr->sz) {
+				*intra_refresh = encoder->intra_refresh;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_INPUT_BUF_REQ:
+		{
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+				    property_value =
+						encoder->client_input_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_OUTPUT_BUF_REQ:
+		{
+			if (sizeof(struct vcd_buffer_requirement) ==
+			    property_hdr->sz) {
+				*(struct vcd_buffer_requirement *)
+				    property_value =
+						encoder->client_output_buf_req;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_BUFFER_FORMAT:
+		{
+			if (sizeof(struct vcd_property_buffer_format) ==
+			    property_hdr->sz) {
+				*(struct vcd_property_buffer_format *)
+				    property_value = encoder->buf_format;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case DDL_I_FRAME_PROC_UNITS:
+		{
+			if (sizeof(u32) == property_hdr->sz) {
+				*(u32 *) property_value =
+				    ((encoder->frame_size.width >> 4) *
+				     (encoder->frame_size.height >> 4)
+				    );
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_HEADER_EXTENSION:
+		{
+			if (sizeof(u32) == property_hdr->sz &&
+			    encoder->codec.codec == VCD_CODEC_MPEG4) {
+				*(u32 *) property_value =
+				    encoder->hdr_ext_control;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_METADATA_ENABLE:
+	case VCD_I_METADATA_HEADER:
+		{
+			vcd_status = ddl_get_metadata_params(
+						   ddl,
+						   property_hdr,
+						   property_value);
+			break;
+		}
+	default:
+		{
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+			break;
+		}
+	}
+	return vcd_status;
+}
+
+static u32 ddl_set_enc_dynamic_property
+    (struct ddl_client_context *ddl,
+     struct vcd_property_hdr *property_hdr, void *property_value) {
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	u32 vcd_status = VCD_ERR_ILLEGAL_PARM, dynamic_prop_change = 0x0;
+	switch (property_hdr->prop_id) {
+	case VCD_I_REQ_IFRAME:
+		{
+			if (sizeof(struct vcd_property_req_i_frame) ==
+			    property_hdr->sz) {
+				dynamic_prop_change = DDL_ENC_REQ_IFRAME;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_TARGET_BITRATE:
+		{
+		    struct vcd_property_target_bitrate *bitrate =
+				(struct vcd_property_target_bitrate *)
+				property_value;
+			if (sizeof(struct vcd_property_target_bitrate) ==
+			 property_hdr->sz && bitrate->target_bitrate > 0
+			 && bitrate->target_bitrate <= DDL_MAX_BIT_RATE) {
+				encoder->target_bit_rate = *bitrate;
+				dynamic_prop_change = DDL_ENC_CHANGE_BITRATE;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_INTRA_PERIOD:
+		{
+			struct vcd_property_i_period *iperiod =
+				(struct vcd_property_i_period *)
+				property_value;
+			if (sizeof(struct vcd_property_i_period) ==
+				property_hdr->sz &&
+				!iperiod->b_frames) {
+				encoder->i_period = *iperiod;
+				dynamic_prop_change = DDL_ENC_CHANGE_IPERIOD;
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_FRAME_RATE:
+		{
+			struct vcd_property_frame_rate *frame_rate =
+			    (struct vcd_property_frame_rate *)
+			    property_value;
+			if (sizeof(struct vcd_property_frame_rate)
+			    == property_hdr->sz &&
+			    frame_rate->fps_denominator &&
+			    frame_rate->fps_numerator &&
+			    frame_rate->fps_denominator <=
+			    frame_rate->fps_numerator) {
+				encoder->frame_rate = *frame_rate;
+				dynamic_prop_change = DDL_ENC_CHANGE_FRAMERATE;
+				if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
+					(encoder->codec.codec != VCD_CODEC_MPEG4
+					 || encoder->short_header.short_header))
+					ddl_set_default_enc_vop_timing(encoder);
+				vcd_status = VCD_S_SUCCESS;
+			}
+			break;
+		}
+	case VCD_I_INTRA_REFRESH:
+		{
+			struct vcd_property_intra_refresh_mb_number
+				*intra_refresh_mbnum = (
+				struct vcd_property_intra_refresh_mb_number *)
+					property_value;
+			u32 frame_mbnum =
+				(encoder->frame_size.width >> 4) *
+				(encoder->frame_size.height >> 4);
+			if (sizeof(struct
+				vcd_property_intra_refresh_mb_number)
+				== property_hdr->sz &&
+				intra_refresh_mbnum->cir_mb_number <=
+				frame_mbnum) {
+				encoder->intra_refresh =
+					*intra_refresh_mbnum;
+				dynamic_prop_change = DDL_ENC_CHANGE_CIR;
+				vcd_status = VCD_S_SUCCESS;
+			}
+
+			break;
+		}
+	default:
+		{
+			vcd_status = VCD_ERR_ILLEGAL_OP;
+			break;
+		}
+	}
+	if (vcd_status == VCD_S_SUCCESS &&
+	(DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) ||
+	DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)))
+		encoder->dynamic_prop_change |= dynamic_prop_change;
+	return vcd_status;
+}
+
+void ddl_set_default_dec_property(struct ddl_client_context *ddl)
+{
+	struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
+
+	if (decoder->codec.codec == VCD_CODEC_MPEG4 ||
+	    decoder->codec.codec == VCD_CODEC_MPEG2)
+		decoder->post_filter.post_filter = true;
+	else
+		decoder->post_filter.post_filter = false;
+	decoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12;
+	decoder->client_frame_size.height = 144;
+	decoder->client_frame_size.width = 176;
+	decoder->client_frame_size.stride = 176;
+	decoder->client_frame_size.scan_lines = 144;
+	decoder->progressive_only = 1;
+	decoder->profile.profile = VCD_PROFILE_UNKNOWN;
+	decoder->level.level = VCD_LEVEL_UNKNOWN;
+	decoder->output_order = VCD_DEC_ORDER_DISPLAY;
+	ddl_set_default_metadata_flag(ddl);
+	ddl_set_default_decoder_buffer_req(decoder, true);
+}
+
+static void ddl_set_default_enc_property(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+
+	ddl_set_default_enc_profile(encoder);
+	ddl_set_default_enc_level(encoder);
+
+	encoder->rc.rate_control = VCD_RATE_CONTROL_VBR_VFR;
+	ddl_set_default_enc_rc_params(encoder);
+
+	ddl_set_default_enc_intra_period(encoder);
+
+	encoder->intra_refresh.cir_mb_number = 0;
+	ddl_set_default_enc_vop_timing(encoder);
+
+	encoder->multi_slice.m_slice_sel = VCD_MSLICE_OFF;
+	encoder->multi_slice.m_slice_size = 0;
+	encoder->short_header.short_header = false;
+
+	encoder->entropy_control.entropy_sel = VCD_ENTROPY_SEL_CAVLC;
+	encoder->entropy_control.cabac_model = VCD_CABAC_MODEL_NUMBER_0;
+	encoder->db_control.db_config = VCD_DB_ALL_BLOCKING_BOUNDARY;
+	encoder->db_control.slice_alpha_offset = 0;
+	encoder->db_control.slice_beta_offset = 0;
+
+	encoder->re_con_buf_format.buffer_format =
+		VCD_BUFFER_FORMAT_TILE_4x2;
+
+	encoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12;
+
+	encoder->hdr_ext_control = 0;
+
+	ddl_set_default_metadata_flag(ddl);
+
+	ddl_set_default_encoder_buffer_req(encoder);
+}
+
+static void ddl_set_default_enc_profile(struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+	if (codec == VCD_CODEC_MPEG4)
+		encoder->profile.profile = VCD_PROFILE_MPEG4_SP;
+	else if (codec == VCD_CODEC_H264)
+		encoder->profile.profile = VCD_PROFILE_H264_BASELINE;
+	else
+		encoder->profile.profile = VCD_PROFILE_H263_BASELINE;
+}
+
+static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+	if (codec == VCD_CODEC_MPEG4)
+		encoder->level.level = VCD_LEVEL_MPEG4_1;
+	else if (codec == VCD_CODEC_H264)
+		encoder->level.level = VCD_LEVEL_H264_1;
+	else
+		encoder->level.level = VCD_LEVEL_H263_10;
+}
+
+static void ddl_set_default_enc_vop_timing
+    (struct ddl_encoder_data *encoder)
+{
+	if (encoder->codec.codec == VCD_CODEC_MPEG4)
+		encoder->vop_timing.vop_time_resolution =
+		    (2 * encoder->frame_rate.fps_numerator) /
+		    encoder->frame_rate.fps_denominator;
+	else
+		encoder->vop_timing.vop_time_resolution = 0x7530;
+}
+
+static void ddl_set_default_enc_intra_period(
+		struct ddl_encoder_data *encoder)
+{
+	switch (encoder->rc.rate_control) {
+	default:
+	case VCD_RATE_CONTROL_VBR_VFR:
+	case VCD_RATE_CONTROL_VBR_CFR:
+	case VCD_RATE_CONTROL_CBR_VFR:
+	case VCD_RATE_CONTROL_OFF:
+		{
+			encoder->i_period.p_frames =
+			    ((encoder->frame_rate.fps_numerator << 1) /
+			     encoder->frame_rate.fps_denominator) - 1;
+			break;
+		}
+	case VCD_RATE_CONTROL_CBR_CFR:
+		{
+			encoder->i_period.p_frames =
+			    ((encoder->frame_rate.fps_numerator >> 1) /
+			     encoder->frame_rate.fps_denominator) - 1;
+			break;
+		}
+	}
+	encoder->i_period.b_frames = 0;
+}
+
+static void ddl_set_default_enc_rc_params(
+		struct ddl_encoder_data *encoder)
+{
+	enum vcd_codec codec = encoder->codec.codec;
+
+	encoder->rc_level.frame_level_rc = true;
+	encoder->qp_range.min_qp = 0x1;
+
+	if (codec == VCD_CODEC_H264) {
+		encoder->qp_range.max_qp = 0x33;
+		encoder->session_qp.i_frame_qp = 0x14;
+		encoder->session_qp.p_frame_qp = 0x14;
+
+		encoder->rc_level.mb_level_rc = true;
+		encoder->adaptive_rc.activity_region_flag = true;
+		encoder->adaptive_rc.dark_region_as_flag = true;
+		encoder->adaptive_rc.smooth_region_as_flag = true;
+		encoder->adaptive_rc.static_region_as_flag = true;
+	} else {
+		encoder->qp_range.max_qp = 0x1f;
+		encoder->session_qp.i_frame_qp = 0xd;
+		encoder->session_qp.p_frame_qp = 0xd;
+		encoder->rc_level.mb_level_rc = false;
+	}
+
+	switch (encoder->rc.rate_control) {
+	default:
+	case VCD_RATE_CONTROL_VBR_VFR:
+		{
+			encoder->r_cframe_skip = 1;
+			encoder->frame_level_rc.reaction_coeff = 0x1f4;
+			break;
+		}
+	case VCD_RATE_CONTROL_VBR_CFR:
+		{
+			encoder->r_cframe_skip = 0;
+			encoder->frame_level_rc.reaction_coeff = 0x1f4;
+			break;
+		}
+	case VCD_RATE_CONTROL_CBR_VFR:
+		{
+			encoder->r_cframe_skip = 1;
+			if (codec != VCD_CODEC_H264) {
+				encoder->session_qp.i_frame_qp = 0xf;
+				encoder->session_qp.p_frame_qp = 0xf;
+			}
+
+			encoder->frame_level_rc.reaction_coeff = 0x14;
+			break;
+		}
+	case VCD_RATE_CONTROL_CBR_CFR:
+		{
+			encoder->r_cframe_skip = 0;
+			encoder->frame_level_rc.reaction_coeff = 0x6;
+			break;
+		}
+	case VCD_RATE_CONTROL_OFF:
+		{
+			encoder->r_cframe_skip = 0;
+			encoder->rc_level.frame_level_rc = false;
+			encoder->rc_level.mb_level_rc = false;
+			break;
+		}
+	}
+}
+
+void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data *encoder)
+{
+	u32 y_cb_cr_size;
+
+	y_cb_cr_size = ddl_get_yuv_buffer_size(&encoder->frame_size,
+		&encoder->buf_format, false, encoder->codec.codec);
+
+	memset(&encoder->input_buf_req, 0,
+	       sizeof(struct vcd_buffer_requirement));
+
+	encoder->input_buf_req.min_count = 1;
+	encoder->input_buf_req.actual_count =
+	    encoder->input_buf_req.min_count + 8;
+	encoder->input_buf_req.max_count = DDL_MAX_BUFFER_COUNT;
+	encoder->input_buf_req.sz = y_cb_cr_size;
+	encoder->input_buf_req.align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+
+	encoder->client_input_buf_req = encoder->input_buf_req;
+
+	memset(&encoder->output_buf_req, 0,
+	       sizeof(struct vcd_buffer_requirement));
+
+	encoder->output_buf_req.min_count = 2;
+	encoder->output_buf_req.actual_count =
+	    encoder->output_buf_req.min_count + 3;
+	encoder->output_buf_req.max_count = DDL_MAX_BUFFER_COUNT;
+	encoder->output_buf_req.align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+	encoder->output_buf_req.sz = y_cb_cr_size;
+	ddl_set_default_encoder_metadata_buffer_size(encoder);
+	encoder->client_output_buf_req = encoder->output_buf_req;
+}
+
+void ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder,
+		u32 estimate)
+{
+	u32 y_cb_cr_size, min_dpb, num_mb;
+	struct vcd_property_frame_size  *frame_size;
+	struct vcd_buffer_requirement *output_buf_req, *input_buf_req;
+
+	if (!decoder->codec.codec)
+		return;
+
+	if (estimate) {
+		frame_size = &decoder->client_frame_size;
+		output_buf_req = &decoder->client_output_buf_req;
+		input_buf_req = &decoder->client_input_buf_req;
+		min_dpb = ddl_decoder_min_num_dpb(decoder);
+		 y_cb_cr_size = ddl_get_yuv_buffer_size(frame_size,
+			&decoder->buf_format, (!decoder->progressive_only),
+			decoder->codec.codec);
+	} else {
+		frame_size = &decoder->frame_size;
+		output_buf_req = &decoder->actual_output_buf_req;
+		input_buf_req = &decoder->actual_input_buf_req;
+		y_cb_cr_size = decoder->y_cb_cr_size;
+		min_dpb = decoder->min_dpb_num;
+	}
+
+	memset(output_buf_req, 0, sizeof(struct vcd_buffer_requirement));
+
+	output_buf_req->min_count = min_dpb;
+
+	num_mb = DDL_NO_OF_MB(frame_size->width, frame_size->height);
+	if (num_mb >= DDL_WVGA_MBS) {
+		output_buf_req->actual_count = min_dpb + 2;
+		if (output_buf_req->actual_count < 10)
+			output_buf_req->actual_count = 10;
+	} else
+		output_buf_req->actual_count = min_dpb + 5;
+
+	output_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
+	output_buf_req->sz = y_cb_cr_size;
+	if (decoder->buf_format.buffer_format != VCD_BUFFER_FORMAT_NV12)
+		output_buf_req->align = DDL_TILE_BUFFER_ALIGN_BYTES;
+	else
+		output_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+
+	ddl_set_default_decoder_metadata_buffer_size(decoder,
+		frame_size, output_buf_req);
+
+	decoder->min_output_buf_req = *output_buf_req;
+
+	memset(input_buf_req, 0, sizeof(struct vcd_buffer_requirement));
+
+	input_buf_req->min_count = 1;
+	input_buf_req->actual_count = input_buf_req->min_count + 3;
+	input_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
+	input_buf_req->sz = (1280*720*3*3) >> 3;
+	input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
+
+	decoder->min_input_buf_req = *input_buf_req;
+
+}
+
+u32 ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size,
+     struct vcd_property_buffer_format *buf_format, u32 inter_lace,
+     enum vcd_codec codec)
+{
+	struct vcd_property_frame_size frame_sz = *frame_size;
+	u32 total_memory_size;
+	ddl_calculate_stride(&frame_sz, inter_lace, codec);
+
+	if (buf_format->buffer_format != VCD_BUFFER_FORMAT_NV12) {
+		u32 component_mem_size;
+		u32 width_round_up;
+		u32 height_round_up;
+		u32 height_chroma = (frame_sz.scan_lines >> 1);
+
+		width_round_up =
+		    DDL_TILE_ALIGN(frame_sz.stride, DDL_TILE_ALIGN_WIDTH);
+		height_round_up =
+		    DDL_TILE_ALIGN(frame_sz.scan_lines, DDL_TILE_ALIGN_HEIGHT);
+
+		component_mem_size = width_round_up * height_round_up;
+		component_mem_size = DDL_TILE_ALIGN(component_mem_size,
+						      DDL_TILE_MULTIPLY_FACTOR);
+
+		total_memory_size = ((component_mem_size +
+					 DDL_TILE_BUF_ALIGN_GUARD_BYTES) &
+					DDL_TILE_BUF_ALIGN_MASK);
+
+		height_round_up =
+		    DDL_TILE_ALIGN(height_chroma, DDL_TILE_ALIGN_HEIGHT);
+		component_mem_size = width_round_up * height_round_up;
+		component_mem_size = DDL_TILE_ALIGN(component_mem_size,
+						      DDL_TILE_MULTIPLY_FACTOR);
+		total_memory_size += component_mem_size;
+	} else {
+		total_memory_size = frame_sz.scan_lines * frame_sz.stride;
+		total_memory_size += (total_memory_size >> 1);
+	}
+	return total_memory_size;
+}
+
+void ddl_calculate_stride(struct vcd_property_frame_size *frame_size,
+	u32 interlace, enum vcd_codec codec)
+{
+	frame_size->stride = ((frame_size->width + 15) >> 4) << 4;
+	if (!interlace || codec == VCD_CODEC_MPEG4 ||
+		codec == VCD_CODEC_DIVX_4 ||
+		codec == VCD_CODEC_DIVX_5 ||
+		codec == VCD_CODEC_DIVX_6 ||
+		codec == VCD_CODEC_XVID) {
+		frame_size->scan_lines =
+			((frame_size->height + 15) >> 4) << 4;
+	} else {
+		frame_size->scan_lines =
+			((frame_size->height + 31) >> 5) << 5;
+	}
+
+}
+
+static u32 ddl_valid_buffer_requirement
+	(struct vcd_buffer_requirement *original_buf_req,
+	struct vcd_buffer_requirement *req_buf_req)
+{
+	u32 status = false;
+	if (original_buf_req->max_count >= req_buf_req->actual_count &&
+		original_buf_req->min_count <= req_buf_req->actual_count &&
+		original_buf_req->align <= req_buf_req->align &&
+		original_buf_req->sz <= req_buf_req->sz) {
+		status = true;
+	} else {
+		VIDC_LOGERR_STRING("ddl_valid_buf_req:Failed");
+	}
+	return status;
+}
+
+static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder)
+{
+	u32 min_dpb = 0, yuv_size = 0;
+	struct vcd_property_frame_size frame_sz = decoder->client_frame_size;
+	switch (decoder->codec.codec) {
+	default:
+	case VCD_CODEC_MPEG4:
+	case VCD_CODEC_MPEG2:
+	case VCD_CODEC_DIVX_4:
+	case VCD_CODEC_DIVX_5:
+	case VCD_CODEC_DIVX_6:
+	case VCD_CODEC_XVID:
+		{
+			min_dpb = 3;
+			break;
+		}
+	case VCD_CODEC_H263:
+		{
+			min_dpb = 2;
+			break;
+		}
+	case VCD_CODEC_VC1:
+	case VCD_CODEC_VC1_RCV:
+		{
+			min_dpb = 4;
+			break;
+		}
+	case VCD_CODEC_H264:
+		{
+			ddl_calculate_stride(&frame_sz,
+				!decoder->progressive_only,
+				decoder->codec.codec);
+			yuv_size =
+			    ((frame_sz.scan_lines *
+			      frame_sz.stride * 3) >> 1);
+			min_dpb = 6912000 / yuv_size;
+			if (min_dpb > 16)
+				min_dpb = 16;
+
+			min_dpb += 2;
+			break;
+		}
+	}
+	return min_dpb;
+}
+
+static u32 ddl_set_dec_buffers
+    (struct ddl_decoder_data *decoder,
+     struct ddl_property_dec_pic_buffers *dpb) {
+	u32 vcd_status = VCD_S_SUCCESS;
+	u32 loopc;
+	for (loopc = 0; !vcd_status &&
+	     loopc < dpb->no_of_dec_pic_buf; ++loopc) {
+		if ((!DDL_ADDR_IS_ALIGNED
+		     (dpb->dec_pic_buffers[loopc].vcd_frm.physical,
+		      decoder->client_output_buf_req.align)
+		    )
+		    || (dpb->dec_pic_buffers[loopc].vcd_frm.alloc_len <
+			decoder->client_output_buf_req.sz)
+		    ) {
+			vcd_status = VCD_ERR_ILLEGAL_PARM;
+		}
+	}
+	if (vcd_status) {
+		VIDC_LOGERR_STRING
+		    ("ddl_set_prop:Dpb_align_fail_or_alloc_size_small");
+		return vcd_status;
+	}
+	if (decoder->dp_buf.no_of_dec_pic_buf) {
+		DDL_FREE(decoder->dp_buf.dec_pic_buffers);
+		decoder->dp_buf.no_of_dec_pic_buf = 0;
+	}
+	decoder->dp_buf.dec_pic_buffers =
+	    DDL_MALLOC(dpb->no_of_dec_pic_buf *
+		       sizeof(struct ddl_frame_data_tag));
+
+	if (!decoder->dp_buf.dec_pic_buffers) {
+		VIDC_LOGERR_STRING
+		    ("ddl_dec_set_prop:Dpb_container_alloc_failed");
+		return VCD_ERR_ALLOC_FAIL;
+	}
+	decoder->dp_buf.no_of_dec_pic_buf = dpb->no_of_dec_pic_buf;
+	for (loopc = 0; loopc < dpb->no_of_dec_pic_buf; ++loopc) {
+		decoder->dp_buf.dec_pic_buffers[loopc] =
+		    dpb->dec_pic_buffers[loopc];
+	}
+	decoder->dpb_mask.client_mask = 0;
+	decoder->dpb_mask.hw_mask = 0;
+	decoder->dynamic_prop_change = 0;
+	return VCD_S_SUCCESS;
+}
+
+void ddl_set_initial_default_values(struct ddl_client_context *ddl)
+{
+	if (ddl->decoding) {
+		ddl->codec_data.decoder.codec.codec = VCD_CODEC_MPEG4;
+		vcd_fw_transact(true, true,
+			ddl->codec_data.decoder.codec.codec);
+		ddl_set_default_dec_property(ddl);
+	} else {
+		struct ddl_encoder_data *encoder =
+		    &(ddl->codec_data.encoder);
+		encoder->codec.codec = VCD_CODEC_MPEG4;
+		vcd_fw_transact(true, false,
+			encoder->codec.codec);
+
+		encoder->target_bit_rate.target_bitrate = 64000;
+		encoder->frame_size.width = 176;
+		encoder->frame_size.height = 144;
+		encoder->frame_size.stride = 176;
+		encoder->frame_size.scan_lines = 144;
+		encoder->frame_rate.fps_numerator = 30;
+		encoder->frame_rate.fps_denominator = 1;
+		ddl_set_default_enc_property(ddl);
+	}
+
+	return;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
new file mode 100644
index 0000000..3b4528f
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
@@ -0,0 +1,223 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/memory_alloc.h>
+#include "vidc_type.h"
+#include "vcd_ddl_utils.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define DBG_TIME(x...) printk(KERN_DEBUG x)
+#define ERR(x...) printk(KERN_ERR x)
+
+struct time_data {
+	unsigned int ddl_t1;
+	unsigned int ddl_ttotal;
+	unsigned int ddl_count;
+};
+
+static struct time_data proc_time[MAX_TIME_DATA];
+
+#ifdef NO_IN_KERNEL_PMEM
+
+void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
+{
+	u32 guard_bytes, align_mask;
+	u32 physical_addr, align_offset;
+	dma_addr_t phy_addr;
+
+	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
+
+		guard_bytes = 31;
+		align_mask = 0xFFFFFFE0U;
+
+	} else {
+
+		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
+		align_mask = DDL_TILE_BUF_ALIGN_MASK;
+	}
+
+	buff_addr->virtual_base_addr =
+		kmalloc((sz + guard_bytes), GFP_KERNEL);
+
+	if (!buff_addr->virtual_base_addr) {
+		ERR("\n ERROR %s:%u kamlloc fails to allocate"
+			" sz + guard_bytes = %u\n", __func__, __LINE__,
+			(sz + guard_bytes));
+		return;
+	}
+
+	phy_addr = dma_map_single(NULL, buff_addr->virtual_base_addr,
+				  sz + guard_bytes, DMA_TO_DEVICE);
+
+	buff_addr->buffer_size = sz;
+	physical_addr = (u32) phy_addr;
+	buff_addr->align_physical_addr =
+	    (u32 *) ((physical_addr + guard_bytes) & align_mask);
+	align_offset =
+	    (u32) (buff_addr->align_physical_addr) - physical_addr;
+	buff_addr->align_virtual_addr =
+	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
+		     + align_offset);
+}
+
+void ddl_pmem_free(struct ddl_buf_addr *buff_addr)
+{
+	kfree(buff_addr->virtual_base_addr);
+	buff_addr->buffer_size = 0;
+	buff_addr->virtual_base_addr = NULL;
+}
+
+#else
+
+void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
+{
+	u32 guard_bytes, align_mask;
+	u32 physical_addr;
+	u32 align_offset;
+	u32 alloc_size;
+	struct ddl_context *ddl_context;
+
+	if (!buff_addr) {
+		ERR("\n%s() Invalid Parameters", __func__);
+		return;
+	}
+
+	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
+
+	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
+
+		guard_bytes = 31;
+		align_mask = 0xFFFFFFE0U;
+
+	} else {
+
+		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
+		align_mask = DDL_TILE_BUF_ALIGN_MASK;
+	}
+	ddl_context = ddl_get_context();
+	alloc_size = sz + guard_bytes;
+
+	physical_addr = (u32)
+		allocate_contiguous_memory_nomap(alloc_size,
+					ddl_context->memtype, SZ_4K);
+
+	if (!physical_addr) {
+		pr_err("%s(): could not allocate kernel pmem buffers\n",
+		       __func__);
+		goto bailout;
+	}
+	buff_addr->physical_base_addr = (u32 *) physical_addr;
+
+	buff_addr->virtual_base_addr =
+	    (u32 *) ioremap((unsigned long)physical_addr,
+			    sz + guard_bytes);
+	if (!buff_addr->virtual_base_addr) {
+
+		pr_err("%s: could not ioremap in kernel pmem buffers\n",
+		       __func__);
+			free_contiguous_memory_by_paddr(
+				(unsigned long) physical_addr);
+		goto bailout;
+	}
+	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
+	buff_addr->buffer_size = sz;
+
+	buff_addr->align_physical_addr =
+	    (u32 *) ((physical_addr + guard_bytes) & align_mask);
+
+	align_offset =
+	    (u32) (buff_addr->align_physical_addr) - physical_addr;
+
+	buff_addr->align_virtual_addr =
+	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
+		     + align_offset);
+
+	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
+		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
+		buff_addr->buffer_size);
+
+	return;
+bailout:
+	buff_addr->physical_base_addr = NULL;
+	buff_addr->virtual_base_addr = NULL;
+	buff_addr->buffer_size = 0;
+}
+
+void ddl_pmem_free(struct ddl_buf_addr *buff_addr)
+{
+	if (!buff_addr) {
+		ERR("\n %s() invalid arguments %p", __func__, buff_addr);
+		return;
+	}
+	DBG_PMEM("\n%s() IN: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
+		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
+		buff_addr->buffer_size);
+
+	if (buff_addr->virtual_base_addr)
+		iounmap((void *)buff_addr->virtual_base_addr);
+	if (buff_addr->physical_base_addr)
+		free_contiguous_memory_by_paddr(
+			(unsigned long) buff_addr->physical_base_addr);
+	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
+		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
+		buff_addr->buffer_size);
+	buff_addr->buffer_size = 0;
+	buff_addr->physical_base_addr = NULL;
+	buff_addr->virtual_base_addr = NULL;
+}
+#endif
+
+void ddl_set_core_start_time(const char *func_name, u32 index)
+{
+	u32 act_time;
+	struct timeval ddl_tv;
+	struct time_data *time_data = &proc_time[index];
+	do_gettimeofday(&ddl_tv);
+	act_time = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000);
+	if (!time_data->ddl_t1) {
+		time_data->ddl_t1 = act_time;
+		DBG("\n%s(): Start Time (%u)", func_name, act_time);
+	} else {
+		DBG_TIME("\n%s(): Timer already started! St(%u) Act(%u)",
+			func_name, time_data->ddl_t1, act_time);
+	}
+}
+
+void ddl_calc_core_proc_time(const char *func_name, u32 index)
+{
+	struct time_data *time_data = &proc_time[index];
+	if (time_data->ddl_t1) {
+		int ddl_t2;
+		struct timeval ddl_tv;
+		do_gettimeofday(&ddl_tv);
+		ddl_t2 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000);
+		time_data->ddl_ttotal += (ddl_t2 - time_data->ddl_t1);
+		time_data->ddl_count++;
+		DBG_TIME("\n%s(): cnt(%u) Diff(%u) Avg(%u)",
+			func_name, time_data->ddl_count,
+			ddl_t2 - time_data->ddl_t1,
+			time_data->ddl_ttotal/time_data->ddl_count);
+		time_data->ddl_t1 = 0;
+	}
+}
+
+void ddl_reset_core_time_variables(u32 index)
+{
+	proc_time[index].ddl_t1 = 0;
+	proc_time[index].ddl_ttotal = 0;
+	proc_time[index].ddl_count = 0;
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.h
new file mode 100644
index 0000000..4d39ef0
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DDL_UTILS_H_
+#define _VCD_DDL_UTILS_H_
+
+#include "vcd_ddl_core.h"
+#include "vcd_ddl.h"
+
+extern u32 vidc_msg_pmem;
+extern u32 vidc_msg_timing;
+
+enum timing_data {
+	DEC_OP_TIME,
+	DEC_IP_TIME,
+	ENC_OP_TIME,
+	MAX_TIME_DATA
+};
+
+#define DDL_INLINE
+
+#define DDL_ALIGN_SIZE(sz, guard_bytes, align_mask) \
+  (((u32)(sz) + guard_bytes) & align_mask)
+
+#define DDL_MALLOC(x)  kmalloc(x, GFP_KERNEL)
+#define DDL_FREE(x)   { if ((x)) kfree((x)); (x) = NULL; }
+
+#define DBG_PMEM(x...) \
+do { \
+	if (vidc_msg_pmem) \
+		printk(KERN_DEBUG x); \
+} while (0)
+
+void ddl_pmem_alloc(struct ddl_buf_addr *, u32, u32);
+
+void ddl_pmem_free(struct ddl_buf_addr *);
+
+void ddl_set_core_start_time(const char *func_name, u32 index);
+
+void ddl_calc_core_proc_time(const char *func_name, u32 index);
+
+void ddl_reset_core_time_variables(u32 index);
+
+#define DDL_ASSERT(x)
+#define DDL_MEMSET(src, value, len) memset((src), (value), (len))
+#define DDL_MEMCPY(dest, src, len)  memcpy((dest), (src), (len))
+
+#define DDL_ADDR_IS_ALIGNED(addr, align_bytes) \
+(!((u32)(addr) & ((align_bytes) - 1)))
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vidc.c b/drivers/video/msm/vidc/720p/ddl/vidc.c
new file mode 100644
index 0000000..8e7abc4
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vidc.c
@@ -0,0 +1,801 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/unistd.h>
+#include "vidc.h"
+#include "vidc_type.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define VIDC_720P_VERSION_STRING "VIDC_V1.0"
+u8 *vidc_base_addr;
+
+#ifdef VIDC_REGISTER_LOG_INTO_BUFFER
+char vidclog[VIDC_REGLOG_BUFSIZE];
+unsigned int vidclog_index;
+#endif
+
+void vidc_720p_set_device_virtual_base(u8 *core_virtual_base_addr)
+{
+	vidc_base_addr = core_virtual_base_addr;
+}
+
+void vidc_720p_init(char **ppsz_version, u32 i_firmware_size,
+		     u32 *pi_firmware_address,
+		     enum vidc_720p_endian dma_endian,
+		     u32 interrupt_off,
+		     enum vidc_720p_interrupt_level_selection
+		     interrupt_sel, u32 interrupt_mask)
+{
+	if (ppsz_version)
+		*ppsz_version = VIDC_720P_VERSION_STRING;
+
+	if (interrupt_sel == VIDC_720P_INTERRUPT_LEVEL_SEL)
+		VIDC_IO_OUT(REG_491082, 0);
+	else
+		VIDC_IO_OUT(REG_491082, 1);
+
+	if (interrupt_off)
+		VIDC_IO_OUT(REG_609676, 1);
+	else
+		VIDC_IO_OUT(REG_609676, 0);
+
+	VIDC_IO_OUT(REG_614776, 1);
+
+	VIDC_IO_OUT(REG_418173, 0);
+
+	VIDC_IO_OUT(REG_418173, interrupt_mask);
+
+	VIDC_IO_OUT(REG_736316, dma_endian);
+
+	VIDC_IO_OUT(REG_215724, 0);
+
+	VIDC_IO_OUT(REG_361582, 1);
+
+	VIDC_IO_OUT(REG_591577, i_firmware_size);
+
+	VIDC_IO_OUT(REG_203921, pi_firmware_address);
+
+	VIDC_IO_OUT(REG_531515_ADDR, 0);
+
+	VIDC_IO_OUT(REG_614413, 1);
+}
+
+u32 vidc_720p_do_sw_reset(void)
+{
+
+	u32 fw_start = 0;
+	VIDC_BUSY_WAIT(5);
+	VIDC_IO_OUT(REG_224135, 0);
+	VIDC_BUSY_WAIT(5);
+	VIDC_IO_OUT(REG_193553, 0);
+	VIDC_BUSY_WAIT(5);
+	VIDC_IO_OUT(REG_141269, 1);
+	VIDC_BUSY_WAIT(15);
+	VIDC_IO_OUT(REG_141269, 0);
+	VIDC_BUSY_WAIT(5);
+	VIDC_IO_IN(REG_193553, &fw_start);
+
+	if (!fw_start) {
+		DBG("\n VIDC-SW-RESET-FAILS!");
+		return false;
+	}
+	return true;
+}
+
+u32 vidc_720p_reset_is_success()
+{
+	u32 stagecounter = 0;
+	VIDC_IO_IN(REG_352831, &stagecounter);
+	stagecounter &= 0xff;
+	if (stagecounter != 0xe5) {
+		DBG("\n VIDC-CPU_RESET-FAILS!");
+		VIDC_IO_OUT(REG_224135, 0);
+		msleep(10);
+		return false;
+	}
+	return true;
+}
+
+void vidc_720p_start_cpu(enum vidc_720p_endian dma_endian,
+						  u32 *icontext_bufferstart,
+						  u32 *debug_core_dump_addr,
+						  u32  debug_buffer_size)
+{
+	u32 dbg_info_input0_reg = 0x1;
+	VIDC_IO_OUT(REG_361582, 0);
+	VIDC_IO_OUT(REG_958768, icontext_bufferstart);
+	VIDC_IO_OUT(REG_736316, dma_endian);
+	if (debug_buffer_size) {
+		dbg_info_input0_reg = (debug_buffer_size << 0x10)
+			| (0x2 << 1) | 0x1;
+		VIDC_IO_OUT(REG_166247, debug_core_dump_addr);
+	}
+	VIDC_IO_OUT(REG_699747, dbg_info_input0_reg);
+	VIDC_IO_OUT(REG_224135, 1);
+}
+
+u32 vidc_720p_cpu_start()
+{
+	u32 fw_status = 0x0;
+	VIDC_IO_IN(REG_381535, &fw_status);
+	if (fw_status != 0x02)
+		return false;
+	return true;
+}
+
+
+void vidc_720p_stop_fw(void)
+{
+   VIDC_IO_OUT(REG_193553, 0);
+   VIDC_IO_OUT(REG_224135, 0);
+}
+
+void vidc_720p_get_interrupt_status(u32 *interrupt_status,
+	u32 *cmd_err_status, u32 *disp_pic_err_status, u32 *op_failed)
+{
+	u32 err_status;
+	VIDC_IO_IN(REG_512143, interrupt_status);
+	VIDC_IO_IN(REG_300310, &err_status);
+	*cmd_err_status = err_status & 0xffff;
+	*disp_pic_err_status = (err_status & 0xffff0000) >> 16;
+	VIDC_IO_INF(REG_724381, OPERATION_FAILED, \
+				 op_failed);
+}
+
+void vidc_720p_interrupt_done_clear(void)
+{
+	VIDC_IO_OUT(REG_614776, 1);
+	VIDC_IO_OUT(REG_97293, 4);
+}
+
+void vidc_720p_submit_command(u32 ch_id, u32 cmd_id)
+{
+	u32 fw_status;
+	VIDC_IO_OUT(REG_97293, ch_id);
+	VIDC_IO_OUT(REG_62325, cmd_id);
+	VIDC_DEBUG_REGISTER_LOG;
+	VIDC_IO_IN(REG_381535, &fw_status);
+	VIDC_IO_OUT(REG_926519, fw_status);
+}
+
+u32 vidc_720p_engine_reset(u32 ch_id,
+	enum vidc_720p_endian dma_endian,
+	enum vidc_720p_interrupt_level_selection interrupt_sel,
+	u32 interrupt_mask
+)
+{
+	u32 op_done = 0;
+	u32 counter = 0;
+
+	VIDC_LOGERR_STRING("ENG-RESET!!");
+	/* issue the engine reset command */
+	vidc_720p_submit_command(ch_id, VIDC_720P_CMD_MFC_ENGINE_RESET);
+
+	do {
+		VIDC_BUSY_WAIT(20);
+		VIDC_IO_IN(REG_982553, &op_done);
+		counter++;
+	} while (!op_done && counter < 10);
+
+	if (!op_done) {
+		/* Reset fails */
+		return  false ;
+	}
+
+	/* write invalid channel id */
+	VIDC_IO_OUT(REG_97293, 4);
+
+	/* Set INT_PULSE_SEL */
+	if (interrupt_sel == VIDC_720P_INTERRUPT_LEVEL_SEL)
+		VIDC_IO_OUT(REG_491082, 0);
+	else
+		VIDC_IO_OUT(REG_491082, 1);
+
+	if (!interrupt_mask) {
+		/* Disable interrupt */
+		VIDC_IO_OUT(REG_609676, 1);
+	} else {
+	  /* Enable interrupt */
+		VIDC_IO_OUT(REG_609676, 0);
+	}
+
+	/* Clear any pending interrupt */
+	VIDC_IO_OUT(REG_614776, 1);
+
+	/* Set INT_ENABLE_REG */
+	VIDC_IO_OUT(REG_418173, interrupt_mask);
+
+	/*Sets the DMA endianness */
+	VIDC_IO_OUT(REG_736316, dma_endian);
+
+	/*Restore ARM endianness */
+	VIDC_IO_OUT(REG_215724, 0);
+
+	/* retun engine reset success */
+	return true ;
+}
+
+void vidc_720p_set_channel(u32 i_ch_id,
+			    enum vidc_720p_enc_dec_selection
+			    enc_dec_sel, enum vidc_720p_codec codec,
+			    u32 *pi_fw, u32 i_firmware_size)
+{
+	u32 std_sel = 0;
+	VIDC_IO_OUT(REG_661565, 0);
+
+	if (enc_dec_sel)
+		std_sel = VIDC_REG_713080_ENC_ON_BMSK;
+
+	std_sel |= (u32) codec;
+
+	VIDC_IO_OUT(REG_713080, std_sel);
+
+	switch (codec) {
+	default:
+	case VIDC_720P_DIVX:
+	case VIDC_720P_XVID:
+	case VIDC_720P_MPEG4:
+		{
+			if (enc_dec_sel == VIDC_720P_ENCODER)
+				VIDC_IO_OUT(REG_765787, pi_fw);
+			else
+				VIDC_IO_OUT(REG_225040, pi_fw);
+			break;
+		}
+	case VIDC_720P_H264:
+		{
+			if (enc_dec_sel == VIDC_720P_ENCODER)
+				VIDC_IO_OUT(REG_942456, pi_fw);
+			else
+				VIDC_IO_OUT(REG_942170_ADDR_3, pi_fw);
+			break;
+		}
+	case VIDC_720P_H263:
+		{
+			if (enc_dec_sel == VIDC_720P_ENCODER)
+				VIDC_IO_OUT(REG_765787, pi_fw);
+			else
+				VIDC_IO_OUT(REG_942170_ADDR_6, pi_fw);
+			break;
+		}
+	case VIDC_720P_VC1:
+		{
+			VIDC_IO_OUT(REG_880188, pi_fw);
+			break;
+		}
+	case VIDC_720P_MPEG2:
+		{
+			VIDC_IO_OUT(REG_40293, pi_fw);
+			break;
+		}
+	}
+	VIDC_IO_OUT(REG_591577, i_firmware_size);
+
+	vidc_720p_submit_command(i_ch_id, VIDC_720P_CMD_CHSET);
+}
+
+void vidc_720p_encode_set_profile(u32 i_profile, u32 i_level)
+{
+	u32 profile_level = i_profile|(i_level << 0x8);
+	VIDC_IO_OUT(REG_839021, profile_level);
+}
+
+void vidc_720p_set_frame_size(u32 i_size_x, u32 i_size_y)
+{
+	VIDC_IO_OUT(REG_999267, i_size_x);
+
+	VIDC_IO_OUT(REG_345712, i_size_y);
+}
+
+void vidc_720p_encode_set_fps(u32 i_rc_frame_rate)
+{
+	VIDC_IO_OUT(REG_625444, i_rc_frame_rate);
+}
+
+void vidc_720p_encode_set_short_header(u32 i_short_header)
+{
+	VIDC_IO_OUT(REG_314290, i_short_header);
+}
+
+void vidc_720p_encode_set_vop_time(u32 vop_time_resolution,
+				    u32 vop_time_increment)
+{
+	u32 enable_vop, vop_timing_reg;
+	if (!vop_time_resolution)
+		VIDC_IO_OUT(REG_64895, 0x0);
+	else {
+		enable_vop = 0x1;
+		vop_timing_reg = (enable_vop << 0x1f) |
+		(vop_time_resolution << 0x10) | vop_time_increment;
+		VIDC_IO_OUT(REG_64895, vop_timing_reg);
+	}
+}
+
+void vidc_720p_encode_set_hec_period(u32 hec_period)
+{
+	VIDC_IO_OUT(REG_407718, hec_period);
+}
+
+void vidc_720p_encode_set_qp_params(u32 i_max_qp, u32 i_min_qp)
+{
+	u32 qp = i_min_qp | (i_max_qp << 0x8);
+	VIDC_IO_OUT(REG_734318, qp);
+}
+
+void vidc_720p_encode_set_rc_config(u32 enable_frame_level_rc,
+				     u32 enable_mb_level_rc_flag,
+				     u32 i_frame_qp, u32 pframe_qp)
+{
+   u32 rc_config = i_frame_qp;
+
+	if (enable_frame_level_rc)
+		rc_config |= (0x1 << 0x9);
+
+	if (enable_mb_level_rc_flag)
+		rc_config |= (0x1 << 0x8);
+
+	VIDC_IO_OUT(REG_58211, rc_config);
+	VIDC_IO_OUT(REG_548359, pframe_qp);
+}
+
+void vidc_720p_encode_set_bit_rate(u32 i_target_bitrate)
+{
+	VIDC_IO_OUT(REG_174150, i_target_bitrate);
+}
+
+void vidc_720p_encoder_set_param_change(u32 enc_param_change)
+{
+	VIDC_IO_OUT(REG_804959, enc_param_change);
+}
+
+void vidc_720p_encode_set_control_param(u32 param_val)
+{
+	VIDC_IO_OUT(REG_128234, param_val);
+}
+
+void vidc_720p_encode_set_frame_level_rc_params(u32 i_reaction_coeff)
+{
+	VIDC_IO_OUT(REG_677784, i_reaction_coeff);
+}
+
+void vidc_720p_encode_set_mb_level_rc_params(u32 dark_region_as_flag,
+					      u32 smooth_region_as_flag,
+					      u32 static_region_as_flag,
+					      u32 activity_region_flag)
+{
+	u32 mb_level_rc = 0x0;
+	if (activity_region_flag)
+		mb_level_rc |= 0x1;
+	if (static_region_as_flag)
+		mb_level_rc |= (0x1 << 0x1);
+	if (smooth_region_as_flag)
+		mb_level_rc |= (0x1 << 0x2);
+	if (dark_region_as_flag)
+		mb_level_rc |= (0x1 << 0x3);
+	/* Write MB level rate control */
+	VIDC_IO_OUT(REG_995041, mb_level_rc);
+}
+
+void vidc_720p_encode_set_entropy_control(enum vidc_720p_entropy_sel
+					   entropy_sel,
+					   enum vidc_720p_cabac_model
+					   cabac_model_number)
+{
+	u32 num;
+	u32 entropy_params = (u32)entropy_sel;
+	/* Set Model Number */
+	if (entropy_sel == VIDC_720P_ENTROPY_SEL_CABAC) {
+		num = (u32)cabac_model_number;
+		entropy_params |= (num << 0x2);
+	}
+	/* Set Entropy parameters */
+	VIDC_IO_OUT(REG_504878, entropy_params);
+}
+
+void vidc_720p_encode_set_db_filter_control(enum vidc_720p_DBConfig
+					     db_config,
+					     u32 i_slice_alpha_offset,
+					     u32 i_slice_beta_offset)
+{
+	u32 deblock_params;
+	deblock_params = (u32)db_config;
+	deblock_params |=
+		((i_slice_beta_offset << 0x2) | (i_slice_alpha_offset << 0x7));
+
+	/* Write deblocking control settings */
+	VIDC_IO_OUT(REG_458130, deblock_params);
+}
+
+void vidc_720p_encode_set_intra_refresh_mb_number(u32 i_cir_mb_number)
+{
+	VIDC_IO_OUT(REG_857491, i_cir_mb_number);
+}
+
+void vidc_720p_encode_set_multi_slice_info(enum
+					    vidc_720p_MSlice_selection
+					    m_slice_sel,
+					    u32 multi_slice_size)
+{
+	switch (m_slice_sel) {
+	case VIDC_720P_MSLICE_BY_MB_COUNT:
+		{
+			VIDC_IO_OUT(REG_588301, 0x1);
+			VIDC_IO_OUT(REG_1517, m_slice_sel);
+			VIDC_IO_OUT(REG_105335, multi_slice_size);
+			break;
+		}
+	case VIDC_720P_MSLICE_BY_BYTE_COUNT:
+		{
+			VIDC_IO_OUT(REG_588301, 0x1);
+			VIDC_IO_OUT(REG_1517, m_slice_sel);
+			VIDC_IO_OUT(REG_561679, multi_slice_size);
+			break;
+		}
+	case VIDC_720P_MSLICE_BY_GOB:
+		{
+			VIDC_IO_OUT(REG_588301, 0x1);
+			break;
+		}
+	default:
+	case VIDC_720P_MSLICE_OFF:
+		{
+			VIDC_IO_OUT(REG_588301, 0x0);
+			break;
+		}
+	}
+}
+
+void vidc_720p_encode_set_dpb_buffer(u32 *pi_enc_dpb_addr, u32 alloc_len)
+{
+	VIDC_IO_OUT(REG_341928_ADDR, pi_enc_dpb_addr);
+	VIDC_IO_OUT(REG_319934, alloc_len);
+}
+
+void vidc_720p_encode_set_i_period(u32 i_i_period)
+{
+	VIDC_IO_OUT(REG_950374, i_i_period);
+}
+
+void vidc_720p_encode_init_codec(u32 i_ch_id,
+				  enum vidc_720p_memory_access_method
+				  memory_access_model)
+{
+
+	VIDC_IO_OUT(REG_841539, memory_access_model);
+	vidc_720p_submit_command(i_ch_id, VIDC_720P_CMD_INITCODEC);
+}
+
+void vidc_720p_encode_unalign_bitstream(u32 upper_unalign_word,
+					 u32 lower_unalign_word)
+{
+	VIDC_IO_OUT(REG_792026, upper_unalign_word);
+	VIDC_IO_OUT(REG_844152, lower_unalign_word);
+}
+
+void vidc_720p_encode_set_seq_header_buffer(u32 ext_buffer_start,
+					     u32 ext_buffer_end,
+					     u32 start_byte_num)
+{
+	VIDC_IO_OUT(REG_275113_ADDR, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_87912, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_988007_ADDR, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_66693, start_byte_num);
+}
+
+void vidc_720p_encode_frame(u32 ch_id,
+			     u32 ext_buffer_start,
+			     u32 ext_buffer_end,
+			     u32 start_byte_number, u32 y_addr,
+			     u32 c_addr)
+{
+	VIDC_IO_OUT(REG_275113_ADDR, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_988007_ADDR, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_87912, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_66693, start_byte_number);
+
+	VIDC_IO_OUT(REG_99105, y_addr);
+
+	VIDC_IO_OUT(REG_777113_ADDR, c_addr);
+
+	vidc_720p_submit_command(ch_id, VIDC_720P_CMD_FRAMERUN);
+}
+
+void vidc_720p_encode_get_header(u32 *pi_enc_header_size)
+{
+	VIDC_IO_IN(REG_114286, pi_enc_header_size);
+}
+
+void vidc_720p_enc_frame_info(struct vidc_720p_enc_frame_info
+			       *enc_frame_info)
+{
+	VIDC_IO_IN(REG_782249, &enc_frame_info->enc_size);
+
+	VIDC_IO_IN(REG_441270, &enc_frame_info->frame);
+
+	enc_frame_info->frame &= 0x03;
+
+	VIDC_IO_IN(REG_613254,
+		    &enc_frame_info->metadata_exists);
+}
+
+void vidc_720p_decode_bitstream_header(u32 ch_id,
+					u32 dec_unit_size,
+					u32 start_byte_num,
+					u32 ext_buffer_start,
+					u32 ext_buffer_end,
+					enum
+					vidc_720p_memory_access_method
+					memory_access_model,
+					u32 decode_order)
+{
+	VIDC_IO_OUT(REG_965480, decode_order);
+
+	VIDC_IO_OUT(REG_639999, 0x8080);
+
+	VIDC_IO_OUT(REG_275113_ADDR, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_988007_ADDR, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_87912, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_761892, dec_unit_size);
+
+	VIDC_IO_OUT(REG_66693, start_byte_num);
+
+	VIDC_IO_OUT(REG_841539, memory_access_model);
+
+	vidc_720p_submit_command(ch_id, VIDC_720P_CMD_INITCODEC);
+}
+
+void vidc_720p_decode_get_seq_hdr_info(struct vidc_720p_seq_hdr_info
+					*seq_hdr_info)
+{
+	u32 display_status;
+	VIDC_IO_IN(REG_999267, &seq_hdr_info->img_size_x);
+
+	VIDC_IO_IN(REG_345712, &seq_hdr_info->img_size_y);
+
+	VIDC_IO_IN(REG_257463, &seq_hdr_info->min_num_dpb);
+
+	VIDC_IO_IN(REG_854281, &seq_hdr_info->min_dpb_size);
+
+	VIDC_IO_IN(REG_580603, &seq_hdr_info->dec_frm_size);
+
+	VIDC_IO_INF(REG_606447, DISP_PIC_PROFILE,
+				 &seq_hdr_info->profile);
+
+	VIDC_IO_INF(REG_606447, DIS_PIC_LEVEL,
+				 &seq_hdr_info->level);
+
+	VIDC_IO_INF(REG_612715, DISPLAY_STATUS,
+				&display_status);
+	seq_hdr_info->progressive =
+			((display_status & 0x4) >> 2);
+	/* bit 3 is for crop existence */
+	seq_hdr_info->crop_exists = ((display_status & 0x8) >> 3);
+
+	if (seq_hdr_info->crop_exists) {
+		/* read the cropping information */
+		VIDC_IO_INF(REG_881638, CROP_RIGHT_OFFSET, \
+			&seq_hdr_info->crop_right_offset);
+		VIDC_IO_INF(REG_881638, CROP_LEFT_OFFSET, \
+			&seq_hdr_info->crop_left_offset);
+		VIDC_IO_INF(REG_161486, CROP_BOTTOM_OFFSET, \
+			&seq_hdr_info->crop_bottom_offset);
+		VIDC_IO_INF(REG_161486, CROP_TOP_OFFSET, \
+			&seq_hdr_info->crop_top_offset);
+	}
+	/* Read the MPEG4 data partitioning indication */
+	VIDC_IO_INF(REG_441270, DATA_PARTITIONED, \
+				&seq_hdr_info->data_partitioned);
+
+}
+
+void vidc_720p_decode_set_dpb_release_buffer_mask(u32
+						   i_dpb_release_buffer_mask)
+{
+	VIDC_IO_OUT(REG_603032, i_dpb_release_buffer_mask);
+}
+
+void vidc_720p_decode_set_dpb_buffers(u32 i_buf_index, u32 *pi_dpb_buffer)
+{
+	VIDC_IO_OUTI(REG_615716, i_buf_index, pi_dpb_buffer);
+}
+
+void vidc_720p_decode_set_comv_buffer(u32 *pi_dpb_comv_buffer,
+				       u32 alloc_len)
+{
+	VIDC_IO_OUT(REG_456376_ADDR, pi_dpb_comv_buffer);
+
+	VIDC_IO_OUT(REG_490443, alloc_len);
+}
+
+void vidc_720p_decode_set_dpb_details(u32 num_dpb, u32 alloc_len,
+				       u32 *ref_buffer)
+{
+	VIDC_IO_OUT(REG_518133, ref_buffer);
+
+	VIDC_IO_OUT(REG_267567, 0);
+
+	VIDC_IO_OUT(REG_883500, num_dpb);
+
+	VIDC_IO_OUT(REG_319934, alloc_len);
+}
+
+void vidc_720p_decode_set_mpeg4Post_filter(u32 enable_post_filter)
+{
+	if (enable_post_filter)
+		VIDC_IO_OUT(REG_443811, 0x1);
+	else
+		VIDC_IO_OUT(REG_443811, 0x0);
+}
+
+void vidc_720p_decode_set_error_control(u32 enable_error_control)
+{
+	if (enable_error_control)
+		VIDC_IO_OUT(REG_846346, 0);
+	else
+		VIDC_IO_OUT(REG_846346, 1);
+}
+
+void vidc_720p_set_deblock_line_buffer(u32 *pi_deblock_line_buffer_start,
+					u32 alloc_len)
+{
+	VIDC_IO_OUT(REG_979942, pi_deblock_line_buffer_start);
+
+	VIDC_IO_OUT(REG_101184, alloc_len);
+}
+
+void vidc_720p_decode_set_mpeg4_data_partitionbuffer(u32 *vsp_buf_start)
+{
+    VIDC_IO_OUT(REG_958768, vsp_buf_start);
+}
+
+void vidc_720p_decode_setH264VSPBuffer(u32 *pi_vsp_temp_buffer_start)
+{
+	VIDC_IO_OUT(REG_958768, pi_vsp_temp_buffer_start);
+}
+
+void vidc_720p_decode_frame(u32 ch_id, u32 ext_buffer_start,
+			     u32 ext_buffer_end, u32 dec_unit_size,
+			     u32 start_byte_num, u32 input_frame_tag)
+{
+	VIDC_IO_OUT(REG_275113_ADDR, ext_buffer_start);
+
+	VIDC_IO_OUT(REG_988007_ADDR, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_87912, ext_buffer_end);
+
+	VIDC_IO_OUT(REG_66693, start_byte_num);
+
+	VIDC_IO_OUT(REG_94750, input_frame_tag);
+
+	VIDC_IO_OUT(REG_761892, dec_unit_size);
+
+	vidc_720p_submit_command(ch_id, VIDC_720P_CMD_FRAMERUN);
+}
+
+void vidc_720p_issue_eos(u32 i_ch_id)
+{
+    VIDC_IO_OUT(REG_896825, 0x1);
+
+    VIDC_IO_OUT(REG_761892, 0);
+
+    vidc_720p_submit_command(i_ch_id, VIDC_720P_CMD_FRAMERUN);
+}
+
+void vidc_720p_eos_info(u32 *disp_status, u32 *resl_change)
+{
+   VIDC_IO_INF(REG_612715, DISPLAY_STATUS, disp_status);
+   (*disp_status) = (*disp_status) & 0x3;
+   VIDC_IO_INF(REG_724381, RESOLUTION_CHANGE, resl_change);
+}
+
+void vidc_720p_decode_display_info(struct vidc_720p_dec_disp_info
+				    *disp_info)
+{
+	u32 display_status = 0;
+	VIDC_IO_INF(REG_612715, DISPLAY_STATUS, &display_status);
+
+	disp_info->disp_status =
+	    (enum vidc_720p_display_status)((display_status & 0x3));
+
+	disp_info->disp_is_interlace = ((display_status & 0x4) >> 2);
+	disp_info->crop_exists = ((display_status & 0x8) >> 3);
+
+	disp_info->resl_change = ((display_status & 0x30) >> 4);
+
+	VIDC_IO_INF(REG_724381, RESOLUTION_CHANGE,
+		     &disp_info->reconfig_flush_done);
+
+	VIDC_IO_IN(REG_999267, &disp_info->img_size_x);
+
+	VIDC_IO_IN(REG_345712, &disp_info->img_size_y);
+	VIDC_IO_IN(REG_151345, &disp_info->y_addr);
+	VIDC_IO_IN(REG_293983, &disp_info->c_addr);
+	VIDC_IO_IN(REG_370409, &disp_info->tag_top);
+	VIDC_IO_IN(REG_438677, &disp_info->tag_bottom);
+	VIDC_IO_IN(REG_679165, &disp_info->pic_time_top);
+	VIDC_IO_IN(REG_374150, &disp_info->pic_time_bottom);
+
+	if (disp_info->crop_exists) {
+		VIDC_IO_INF(REG_881638, CROP_RIGHT_OFFSET,
+			&disp_info->crop_right_offset);
+		VIDC_IO_INF(REG_881638, CROP_LEFT_OFFSET,
+			&disp_info->crop_left_offset);
+		VIDC_IO_INF(REG_161486, CROP_BOTTOM_OFFSET,
+			&disp_info->crop_bottom_offset);
+		VIDC_IO_INF(REG_161486, CROP_TOP_OFFSET,
+			&disp_info->crop_top_offset);
+	}
+	VIDC_IO_IN(REG_613254, &disp_info->metadata_exists);
+
+	VIDC_IO_IN(REG_580603,
+		    &disp_info->input_bytes_consumed);
+
+	VIDC_IO_IN(REG_757835, &disp_info->input_frame_num);
+
+	VIDC_IO_INF(REG_441270, FRAME_TYPE,
+			   &disp_info->input_frame);
+
+	disp_info->input_is_interlace =
+	    ((disp_info->input_frame & 0x4) >> 2);
+
+	disp_info->input_frame &= 0x3;
+}
+
+void vidc_720p_decode_skip_frm_details(u32 *free_luma_dpb)
+{
+	u32 disp_frm;
+	VIDC_IO_IN(REG_697961, &disp_frm);
+
+	if (disp_frm == VIDC_720P_NOTCODED)
+		VIDC_IO_IN(REG_347105, free_luma_dpb);
+}
+
+void vidc_720p_metadata_enable(u32 flag, u32 *input_buffer)
+{
+	VIDC_IO_OUT(REG_854681, flag);
+	VIDC_IO_OUT(REG_988552, input_buffer);
+}
+
+void vidc_720p_decode_dynamic_req_reset(void)
+{
+	VIDC_IO_OUT(REG_76706, 0x0);
+	VIDC_IO_OUT(REG_147682, 0x0);
+	VIDC_IO_OUT(REG_896825, 0x0);
+}
+
+void vidc_720p_decode_dynamic_req_set(u32 property)
+{
+	if (property == VIDC_720P_FLUSH_REQ)
+		VIDC_IO_OUT(REG_76706, 0x1);
+	else if (property == VIDC_720P_EXTRADATA)
+		VIDC_IO_OUT(REG_147682, 0x1);
+}
+
+void vidc_720p_decode_setpassthrough_start(u32 pass_startaddr)
+{
+	VIDC_IO_OUT(REG_486169, pass_startaddr);
+}
diff --git a/drivers/video/msm/vidc/720p/ddl/vidc.h b/drivers/video/msm/vidc/720p/ddl/vidc.h
new file mode 100644
index 0000000..685b7cc
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/ddl/vidc.h
@@ -0,0 +1,2704 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef VIDC_H
+#define VIDC_H
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+
+#define VIDC_720P_IN(reg)                       VIDC_##reg##_IN
+#define VIDC_720P_INM(reg,  mask)                VIDC_##reg##_INM(mask)
+#define VIDC_720P_OUT(reg,  val)                 VIDC_##reg##_OUT(val)
+#define VIDC_720P_OUTI(reg,  index,  val)         VIDC_##reg##_OUTI(index, val)
+#define VIDC_720P_OUTM(reg,  mask,  val)          VIDC_##reg##_OUTM(mask,  val)
+#define VIDC_720P_SHFT(reg,  field)              VIDC_##reg##_##field##_SHFT
+#define VIDC_720P_FMSK(reg,  field)              VIDC_##reg##_##field##_BMSK
+
+#define VIDC_720P_INF(io, field) (VIDC_720P_INM(io, VIDC_720P_FMSK(io, field)) \
+		>> VIDC_720P_SHFT(io,  field))
+#define VIDC_720P_OUTF(io, field, val) \
+		VIDC_720P_OUTM(io, VIDC_720P_FMSK(io, field), \
+		val << VIDC_720P_SHFT(io,  field))
+
+#define __inpdw(port)	ioread32(port)
+#define __outpdw(port,  val) iowrite32(val, port)
+
+#define in_dword_masked(addr,  mask) (__inpdw(addr) & (mask))
+
+#define out_dword(addr,  val)        __outpdw(addr, val)
+
+#define out_dword_masked(io,  mask,  val,  shadow)  \
+do { \
+	shadow = (shadow & (u32)(~(mask))) | ((u32)((val) & (mask))); \
+	(void) out_dword(io,  shadow); \
+} while (0)
+
+#define out_dword_masked_ns(io,  mask,  val,  current_reg_content) \
+	(void) out_dword(io,  ((current_reg_content & (u32)(~(mask))) | \
+				((u32)((val) & (mask)))))
+
+extern u8 *vidc_base_addr;
+
+#define VIDC720P_BASE  vidc_base_addr
+#define VIDC_720P_WRAPPER_REG_BASE               (VIDC720P_BASE + \
+		0x00000000)
+#define VIDC_720P_WRAPPER_REG_BASE_PHYS          VIDC_720P_BASE_PHYS
+
+#define VIDC_REG_614413_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 00000000)
+#define VIDC_REG_614413_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 00000000)
+#define VIDC_REG_614413_RMSK                            0x1
+#define VIDC_REG_614413_SHFT                              0
+#define VIDC_REG_614413_IN                       \
+	in_dword_masked(VIDC_REG_614413_ADDR,        \
+		VIDC_REG_614413_RMSK)
+#define VIDC_REG_614413_INM(m)                   \
+	in_dword_masked(VIDC_REG_614413_ADDR,  m)
+#define VIDC_REG_614413_OUT(v)                   \
+	out_dword(VIDC_REG_614413_ADDR, v)
+#define VIDC_REG_614413_OUTM(m, v)                \
+do { \
+	out_dword_masked_ns(VIDC_REG_614413_ADDR, m, v, \
+			VIDC_REG_614413_IN); \
+} while (0)
+#define VIDC_REG_614413_DMA_START_BMSK                  0x1
+#define VIDC_REG_614413_DMA_START_SHFT                    0
+
+#define VIDC_REG_591577_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000000c)
+#define VIDC_REG_591577_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000000c)
+#define VIDC_REG_591577_RMSK                 0xffffffff
+#define VIDC_REG_591577_SHFT                          0
+#define VIDC_REG_591577_IN                   \
+	in_dword_masked(VIDC_REG_591577_ADDR,  \
+			VIDC_REG_591577_RMSK)
+#define VIDC_REG_591577_INM(m)               \
+	in_dword_masked(VIDC_REG_591577_ADDR,  m)
+#define VIDC_REG_591577_OUT(v)               \
+	out_dword(VIDC_REG_591577_ADDR, v)
+#define VIDC_REG_591577_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_591577_ADDR, m, v, \
+			VIDC_REG_591577_IN); \
+} while (0)
+#define VIDC_REG_591577_BOOTCODE_SIZE_BMSK   0xffffffff
+#define VIDC_REG_591577_BOOTCODE_SIZE_SHFT            0
+
+#define VIDC_REG_203921_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000014)
+#define VIDC_REG_203921_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000014)
+#define VIDC_REG_203921_RMSK                   0xffffffff
+#define VIDC_REG_203921_SHFT                            0
+#define VIDC_REG_203921_IN                     \
+	in_dword_masked(VIDC_REG_203921_ADDR,  \
+			VIDC_REG_203921_RMSK)
+#define VIDC_REG_203921_INM(m)                 \
+	in_dword_masked(VIDC_REG_203921_ADDR,  m)
+#define VIDC_REG_203921_OUT(v)                 \
+	out_dword(VIDC_REG_203921_ADDR, v)
+#define VIDC_REG_203921_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_203921_ADDR, m, v, \
+			VIDC_REG_203921_IN); \
+} while (0)
+#define VIDC_REG_203921_DMA_EXTADDR_BMSK       0xffffffff
+#define VIDC_REG_203921_DMA_EXTADDR_SHFT                0
+
+#define VIDC_REG_275113_ADDR_ADDR            \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000018)
+#define VIDC_REG_275113_ADDR_PHYS            \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000018)
+#define VIDC_REG_275113_ADDR_RMSK            0xffffffff
+#define VIDC_REG_275113_ADDR_SHFT                     0
+#define VIDC_REG_275113_ADDR_IN              \
+	in_dword_masked(VIDC_REG_275113_ADDR_ADDR,  \
+			VIDC_REG_275113_ADDR_RMSK)
+#define VIDC_REG_275113_ADDR_INM(m)          \
+	in_dword_masked(VIDC_REG_275113_ADDR_ADDR,  m)
+#define VIDC_REG_275113_ADDR_OUT(v)          \
+	out_dword(VIDC_REG_275113_ADDR_ADDR, v)
+#define VIDC_REG_275113_ADDR_OUTM(m, v)       \
+do { \
+	out_dword_masked_ns(VIDC_REG_275113_ADDR_ADDR, m, v, \
+			VIDC_REG_275113_ADDR_IN); \
+} while (0)
+#define VIDC_REG_742076_ADDR_BMSK 0xffffffff
+#define VIDC_REG_742076_ADDR_SHFT          0
+
+#define VIDC_REG_988007_ADDR_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000001c)
+#define VIDC_REG_988007_ADDR_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000001c)
+#define VIDC_REG_988007_ADDR_RMSK              0xffffffff
+#define VIDC_REG_988007_ADDR_SHFT                       0
+#define VIDC_REG_988007_ADDR_IN                \
+	in_dword_masked(VIDC_REG_988007_ADDR_ADDR,  \
+			VIDC_REG_988007_ADDR_RMSK)
+#define VIDC_REG_988007_ADDR_INM(m)            \
+	in_dword_masked(VIDC_REG_988007_ADDR_ADDR,  m)
+#define VIDC_REG_988007_ADDR_OUT(v)            \
+	out_dword(VIDC_REG_988007_ADDR_ADDR, v)
+#define VIDC_REG_988007_ADDR_OUTM(m, v)         \
+do { \
+	out_dword_masked_ns(VIDC_REG_988007_ADDR_ADDR, m, v, \
+			VIDC_REG_988007_ADDR_IN); \
+} while (0)
+#define VIDC_REG_988007_ADDR_EXT_BUF_END_ADDR_BMSK 0xffffffff
+#define VIDC_REG_988007_ADDR_EXT_BUF_END_ADDR_SHFT          0
+
+#define VIDC_REG_531515_ADDR_ADDR                  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000020)
+#define VIDC_REG_531515_ADDR_PHYS                  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000020)
+#define VIDC_REG_531515_ADDR_RMSK                  0xffffffff
+#define VIDC_REG_531515_ADDR_SHFT                           0
+#define VIDC_REG_531515_ADDR_IN                    \
+	in_dword_masked(VIDC_REG_531515_ADDR_ADDR,  \
+			VIDC_REG_531515_ADDR_RMSK)
+#define VIDC_REG_531515_ADDR_INM(m)                \
+	in_dword_masked(VIDC_REG_531515_ADDR_ADDR,  m)
+#define VIDC_REG_531515_ADDR_OUT(v)                \
+	out_dword(VIDC_REG_531515_ADDR_ADDR, v)
+#define VIDC_REG_531515_ADDR_OUTM(m, v)             \
+do { \
+	out_dword_masked_ns(VIDC_REG_531515_ADDR_ADDR, m, v, \
+			VIDC_REG_531515_ADDR_IN); \
+} while (0)
+#define VIDC_REG_531515_ADDR_DMA_INT_ADDR_BMSK     0xffffffff
+#define VIDC_REG_531515_ADDR_DMA_INT_ADDR_SHFT              0
+
+#define VIDC_REG_87912_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000024)
+#define VIDC_REG_87912_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000024)
+#define VIDC_REG_87912_RMSK                 0xffffffff
+#define VIDC_REG_87912_SHFT                          0
+#define VIDC_REG_87912_IN                   \
+	in_dword_masked(VIDC_REG_87912_ADDR,  \
+			VIDC_REG_87912_RMSK)
+#define VIDC_REG_87912_INM(m)               \
+	in_dword_masked(VIDC_REG_87912_ADDR,  m)
+#define VIDC_REG_87912_OUT(v)               \
+	out_dword(VIDC_REG_87912_ADDR, v)
+#define VIDC_REG_87912_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_87912_ADDR, m, v, \
+			VIDC_REG_87912_IN); \
+} while (0)
+#define VIDC_REG_87912_HOST_PTR_ADDR_BMSK   0xffffffff
+#define VIDC_REG_87912_HOST_PTR_ADDR_SHFT            0
+
+#define VIDC_REG_896825_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000028)
+#define VIDC_REG_896825_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000028)
+#define VIDC_REG_896825_RMSK                             0x1
+#define VIDC_REG_896825_SHFT                               0
+#define VIDC_REG_896825_IN                        \
+	in_dword_masked(VIDC_REG_896825_ADDR,         \
+	VIDC_REG_896825_RMSK)
+#define VIDC_REG_896825_INM(m)                    \
+	in_dword_masked(VIDC_REG_896825_ADDR,  m)
+#define VIDC_REG_896825_OUT(v)                    \
+	out_dword(VIDC_REG_896825_ADDR, v)
+#define VIDC_REG_896825_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_896825_ADDR, m, v, \
+			VIDC_REG_896825_IN); \
+} while (0)
+#define VIDC_REG_896825_LAST_DEC_BMSK                    0x1
+#define VIDC_REG_896825_LAST_DEC_SHFT                      0
+
+#define VIDC_REG_174526_ADDR                        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000002c)
+#define VIDC_REG_174526_PHYS                        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000002c)
+#define VIDC_REG_174526_RMSK                               0x1
+#define VIDC_REG_174526_SHFT                                 0
+#define VIDC_REG_174526_IN                          \
+	in_dword_masked(VIDC_REG_174526_ADDR,  VIDC_REG_174526_RMSK)
+#define VIDC_REG_174526_INM(m)                      \
+	in_dword_masked(VIDC_REG_174526_ADDR,  m)
+#define VIDC_REG_174526_DONE_M_BMSK                        0x1
+#define VIDC_REG_174526_DONE_M_SHFT                          0
+
+#define VIDC_REG_736316_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000044)
+#define VIDC_REG_736316_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000044)
+#define VIDC_REG_736316_RMSK                          0x1
+#define VIDC_REG_736316_SHFT                            0
+#define VIDC_REG_736316_IN                     \
+	in_dword_masked(VIDC_REG_736316_ADDR,  \
+			VIDC_REG_736316_RMSK)
+#define VIDC_REG_736316_INM(m)                 \
+	in_dword_masked(VIDC_REG_736316_ADDR,  m)
+#define VIDC_REG_736316_OUT(v)                 \
+	out_dword(VIDC_REG_736316_ADDR, v)
+#define VIDC_REG_736316_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_736316_ADDR, m, v, \
+			VIDC_REG_736316_IN); \
+} while (0)
+#define VIDC_REG_736316_BITS_ENDIAN_BMSK              0x1
+#define VIDC_REG_736316_BITS_ENDIAN_SHFT                0
+
+#define VIDC_REG_761892_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000054)
+#define VIDC_REG_761892_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000054)
+#define VIDC_REG_761892_RMSK                 0xffffffff
+#define VIDC_REG_761892_SHFT                          0
+#define VIDC_REG_761892_IN                   \
+	in_dword_masked(VIDC_REG_761892_ADDR,  \
+			VIDC_REG_761892_RMSK)
+#define VIDC_REG_761892_INM(m)               \
+	in_dword_masked(VIDC_REG_761892_ADDR,  m)
+#define VIDC_REG_761892_OUT(v)               \
+	out_dword(VIDC_REG_761892_ADDR, v)
+#define VIDC_REG_761892_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_761892_ADDR, m, v, \
+			VIDC_REG_761892_IN); \
+} while (0)
+#define VIDC_REG_761892_DEC_UNIT_SIZE_BMSK   0xffffffff
+#define VIDC_REG_761892_DEC_UNIT_SIZE_SHFT            0
+
+#define VIDC_REG_782249_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000058)
+#define VIDC_REG_782249_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000058)
+#define VIDC_REG_782249_RMSK                 0xffffffff
+#define VIDC_REG_782249_SHFT                          0
+#define VIDC_REG_782249_IN                   \
+	in_dword_masked(VIDC_REG_782249_ADDR,  \
+			VIDC_REG_782249_RMSK)
+#define VIDC_REG_782249_INM(m)               \
+	in_dword_masked(VIDC_REG_782249_ADDR,  m)
+#define VIDC_REG_782249_ENC_UNIT_SIZE_BMSK   0xffffffff
+#define VIDC_REG_782249_ENC_UNIT_SIZE_SHFT            0
+
+#define VIDC_REG_66693_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000005c)
+#define VIDC_REG_66693_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000005c)
+#define VIDC_REG_66693_RMSK                       0xf
+#define VIDC_REG_66693_SHFT                         0
+#define VIDC_REG_66693_IN                  \
+	in_dword_masked(VIDC_REG_66693_ADDR,  \
+			VIDC_REG_66693_RMSK)
+#define VIDC_REG_66693_INM(m)              \
+	in_dword_masked(VIDC_REG_66693_ADDR,  m)
+#define VIDC_REG_66693_OUT(v)              \
+	out_dword(VIDC_REG_66693_ADDR, v)
+#define VIDC_REG_66693_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_66693_ADDR, m, v, \
+			VIDC_REG_66693_IN); \
+} while (0)
+#define VIDC_REG_66693_START_BYTE_NUM_BMSK        0xf
+#define VIDC_REG_66693_START_BYTE_NUM_SHFT          0
+
+#define VIDC_REG_114286_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000060)
+#define VIDC_REG_114286_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000060)
+#define VIDC_REG_114286_RMSK               0xffffffff
+#define VIDC_REG_114286_SHFT                        0
+#define VIDC_REG_114286_IN                 \
+	in_dword_masked(VIDC_REG_114286_ADDR,  \
+			VIDC_REG_114286_RMSK)
+#define VIDC_REG_114286_INM(m)             \
+	in_dword_masked(VIDC_REG_114286_ADDR,  m)
+#define VIDC_REG_114286_ENC_HEADER_SIZE_BMSK 0xffffffff
+#define VIDC_REG_114286_ENC_HEADER_SIZE_SHFT          0
+
+#define VIDC_REG_713080_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000100)
+#define VIDC_REG_713080_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000100)
+#define VIDC_REG_713080_RMSK                         0x1f
+#define VIDC_REG_713080_SHFT                            0
+#define VIDC_REG_713080_IN                     \
+	in_dword_masked(VIDC_REG_713080_ADDR,  \
+			VIDC_REG_713080_RMSK)
+#define VIDC_REG_713080_INM(m)                 \
+	in_dword_masked(VIDC_REG_713080_ADDR,  m)
+#define VIDC_REG_713080_OUT(v)                 \
+	out_dword(VIDC_REG_713080_ADDR, v)
+#define VIDC_REG_713080_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_713080_ADDR, m, v, \
+			VIDC_REG_713080_IN); \
+} while (0)
+#define VIDC_REG_713080_ENC_ON_BMSK                  0x10
+#define VIDC_REG_713080_ENC_ON_SHFT                   0x4
+#define VIDC_REG_713080_STANDARD_SEL_BMSK             0xf
+#define VIDC_REG_713080_STANDARD_SEL_SHFT               0
+
+#define VIDC_REG_97293_ADDR                         \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000104)
+#define VIDC_REG_97293_PHYS                         \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000104)
+#define VIDC_REG_97293_RMSK                               0x1f
+#define VIDC_REG_97293_SHFT                                  0
+#define VIDC_REG_97293_IN                           \
+	in_dword_masked(VIDC_REG_97293_ADDR,  VIDC_REG_97293_RMSK)
+#define VIDC_REG_97293_INM(m)                       \
+	in_dword_masked(VIDC_REG_97293_ADDR,  m)
+#define VIDC_REG_97293_OUT(v)                       \
+	out_dword(VIDC_REG_97293_ADDR, v)
+#define VIDC_REG_97293_OUTM(m, v)                    \
+do { \
+	out_dword_masked_ns(VIDC_REG_97293_ADDR, m, v, \
+			VIDC_REG_97293_IN); \
+} while (0)
+#define VIDC_REG_97293_CH_ID_BMSK                         0x1f
+#define VIDC_REG_97293_CH_ID_SHFT                            0
+
+#define VIDC_REG_224135_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000108)
+#define VIDC_REG_224135_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000108)
+#define VIDC_REG_224135_RMSK                            0x1
+#define VIDC_REG_224135_SHFT                              0
+#define VIDC_REG_224135_IN                       \
+	in_dword_masked(VIDC_REG_224135_ADDR,        \
+	VIDC_REG_224135_RMSK)
+#define VIDC_REG_224135_INM(m)                   \
+	in_dword_masked(VIDC_REG_224135_ADDR,  m)
+#define VIDC_REG_224135_OUT(v)                   \
+	out_dword(VIDC_REG_224135_ADDR, v)
+#define VIDC_REG_224135_OUTM(m, v)                \
+do { \
+	out_dword_masked_ns(VIDC_REG_224135_ADDR, m, v, \
+			VIDC_REG_224135_IN); \
+} while (0)
+#define VIDC_REG_224135_CPU_RESET_BMSK                  0x1
+#define VIDC_REG_224135_CPU_RESET_SHFT                    0
+
+#define VIDC_REG_832522_ADDR                        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000010c)
+#define VIDC_REG_832522_PHYS                        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000010c)
+#define VIDC_REG_832522_RMSK                               0x1
+#define VIDC_REG_832522_SHFT                                 0
+#define VIDC_REG_832522_IN                          \
+	in_dword_masked(VIDC_REG_832522_ADDR,  VIDC_REG_832522_RMSK)
+#define VIDC_REG_832522_INM(m)                      \
+	in_dword_masked(VIDC_REG_832522_ADDR,  m)
+#define VIDC_REG_832522_OUT(v)                      \
+	out_dword(VIDC_REG_832522_ADDR, v)
+#define VIDC_REG_832522_OUTM(m, v)                   \
+do { \
+	out_dword_masked_ns(VIDC_REG_832522_ADDR, m, v, \
+			VIDC_REG_832522_IN); \
+} while (0)
+#define VIDC_REG_832522_FW_END_BMSK                        0x1
+#define VIDC_REG_832522_FW_END_SHFT                          0
+
+#define VIDC_REG_361582_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000110)
+#define VIDC_REG_361582_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000110)
+#define VIDC_REG_361582_RMSK                           0x1
+#define VIDC_REG_361582_SHFT                             0
+#define VIDC_REG_361582_IN                      \
+	in_dword_masked(VIDC_REG_361582_ADDR,  \
+			VIDC_REG_361582_RMSK)
+#define VIDC_REG_361582_INM(m)                  \
+	in_dword_masked(VIDC_REG_361582_ADDR,  m)
+#define VIDC_REG_361582_OUT(v)                  \
+	out_dword(VIDC_REG_361582_ADDR, v)
+#define VIDC_REG_361582_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_361582_ADDR, m, v, \
+			VIDC_REG_361582_IN); \
+} while (0)
+#define VIDC_REG_361582_BUS_MASTER_BMSK                0x1
+#define VIDC_REG_361582_BUS_MASTER_SHFT                  0
+
+#define VIDC_REG_314435_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000114)
+#define VIDC_REG_314435_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000114)
+#define VIDC_REG_314435_RMSK                          0x1
+#define VIDC_REG_314435_SHFT                            0
+#define VIDC_REG_314435_IN                     \
+	in_dword_masked(VIDC_REG_314435_ADDR,  \
+			VIDC_REG_314435_RMSK)
+#define VIDC_REG_314435_INM(m)                 \
+	in_dword_masked(VIDC_REG_314435_ADDR,  m)
+#define VIDC_REG_314435_OUT(v)                 \
+	out_dword(VIDC_REG_314435_ADDR, v)
+#define VIDC_REG_314435_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_314435_ADDR, m, v, \
+			VIDC_REG_314435_IN); \
+} while (0)
+#define VIDC_REG_314435_FRAME_START_BMSK              0x1
+#define VIDC_REG_314435_FRAME_START_SHFT                0
+
+#define VIDC_REG_999267_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000118)
+#define VIDC_REG_999267_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000118)
+#define VIDC_REG_999267_RMSK                        0xffff
+#define VIDC_REG_999267_SHFT                             0
+#define VIDC_REG_999267_IN                      \
+	in_dword_masked(VIDC_REG_999267_ADDR,  \
+			VIDC_REG_999267_RMSK)
+#define VIDC_REG_999267_INM(m)                  \
+	in_dword_masked(VIDC_REG_999267_ADDR,  m)
+#define VIDC_REG_999267_OUT(v)                  \
+	out_dword(VIDC_REG_999267_ADDR, v)
+#define VIDC_REG_999267_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_999267_ADDR, m, v, \
+			VIDC_REG_999267_IN); \
+} while (0)
+#define VIDC_REG_999267_IMG_SIZE_X_BMSK             0xffff
+#define VIDC_REG_999267_IMG_SIZE_X_SHFT                  0
+
+#define VIDC_REG_345712_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000011c)
+#define VIDC_REG_345712_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000011c)
+#define VIDC_REG_345712_RMSK                        0xffff
+#define VIDC_REG_345712_SHFT                             0
+#define VIDC_REG_345712_IN                      \
+	in_dword_masked(VIDC_REG_345712_ADDR,  \
+			VIDC_REG_345712_RMSK)
+#define VIDC_REG_345712_INM(m)                  \
+	in_dword_masked(VIDC_REG_345712_ADDR,  m)
+#define VIDC_REG_345712_OUT(v)                  \
+	out_dword(VIDC_REG_345712_ADDR, v)
+#define VIDC_REG_345712_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_345712_ADDR, m, v, \
+			VIDC_REG_345712_IN); \
+} while (0)
+#define VIDC_REG_345712_IMG_SIZE_Y_BMSK             0xffff
+#define VIDC_REG_345712_IMG_SIZE_Y_SHFT                  0
+
+#define VIDC_REG_443811_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000124)
+#define VIDC_REG_443811_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000124)
+#define VIDC_REG_443811_RMSK                              0x1
+#define VIDC_REG_443811_SHFT                                0
+#define VIDC_REG_443811_IN                         \
+	in_dword_masked(VIDC_REG_443811_ADDR,  VIDC_REG_443811_RMSK)
+#define VIDC_REG_443811_INM(m)                     \
+	in_dword_masked(VIDC_REG_443811_ADDR,  m)
+#define VIDC_REG_443811_OUT(v)                     \
+	out_dword(VIDC_REG_443811_ADDR, v)
+#define VIDC_REG_443811_OUTM(m, v)                  \
+do { \
+	out_dword_masked_ns(VIDC_REG_443811_ADDR, m, v, \
+			VIDC_REG_443811_IN); \
+} while (0)
+#define VIDC_REG_443811_POST_ON_BMSK                      0x1
+#define VIDC_REG_443811_POST_ON_SHFT                        0
+
+#define VIDC_REG_538267_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000128)
+#define VIDC_REG_538267_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000128)
+#define VIDC_REG_538267_RMSK                    0xffffffff
+#define VIDC_REG_538267_SHFT                             0
+#define VIDC_REG_538267_IN                      \
+	in_dword_masked(VIDC_REG_538267_ADDR,  \
+			VIDC_REG_538267_RMSK)
+#define VIDC_REG_538267_INM(m)                  \
+	in_dword_masked(VIDC_REG_538267_ADDR,  m)
+#define VIDC_REG_538267_OUT(v)                  \
+	out_dword(VIDC_REG_538267_ADDR, v)
+#define VIDC_REG_538267_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_538267_ADDR, m, v, \
+			VIDC_REG_538267_IN); \
+} while (0)
+#define VIDC_REG_538267_QUOTIENT_VAL_BMSK       0xffff0000
+#define VIDC_REG_538267_QUOTIENT_VAL_SHFT             0x10
+#define VIDC_REG_538267_REMAINDER_VAL_BMSK          0xffff
+#define VIDC_REG_538267_REMAINDER_VAL_SHFT               0
+
+#define VIDC_REG_661565_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000012c)
+#define VIDC_REG_661565_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000012c)
+#define VIDC_REG_661565_RMSK                       0x1
+#define VIDC_REG_661565_SHFT                         0
+#define VIDC_REG_661565_IN                  \
+	in_dword_masked(VIDC_REG_661565_ADDR,  \
+			VIDC_REG_661565_RMSK)
+#define VIDC_REG_661565_INM(m)              \
+	in_dword_masked(VIDC_REG_661565_ADDR,  m)
+#define VIDC_REG_661565_OUT(v)              \
+	out_dword(VIDC_REG_661565_ADDR, v)
+#define VIDC_REG_661565_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_661565_ADDR, m, v, \
+			VIDC_REG_661565_IN); \
+} while (0)
+#define VIDC_REG_661565_SEQUENCE_START_BMSK        0x1
+#define VIDC_REG_661565_SEQUENCE_START_SHFT          0
+
+#define VIDC_REG_141269_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000130)
+#define VIDC_REG_141269_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000130)
+#define VIDC_REG_141269_RMSK                             0x1
+#define VIDC_REG_141269_SHFT                               0
+#define VIDC_REG_141269_IN                        \
+	in_dword_masked(VIDC_REG_141269_ADDR,         \
+	VIDC_REG_141269_RMSK)
+#define VIDC_REG_141269_INM(m)                    \
+	in_dword_masked(VIDC_REG_141269_ADDR,  m)
+#define VIDC_REG_141269_OUT(v)                    \
+	out_dword(VIDC_REG_141269_ADDR, v)
+#define VIDC_REG_141269_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_141269_ADDR, m, v, \
+			VIDC_REG_141269_IN); \
+} while (0)
+#define VIDC_REG_141269_SW_RESET_BMSK                    0x1
+#define VIDC_REG_141269_SW_RESET_SHFT                      0
+
+#define VIDC_REG_193553_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000134)
+#define VIDC_REG_193553_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000134)
+#define VIDC_REG_193553_RMSK                             0x1
+#define VIDC_REG_193553_SHFT                               0
+#define VIDC_REG_193553_IN                        \
+	in_dword_masked(VIDC_REG_193553_ADDR,         \
+	VIDC_REG_193553_RMSK)
+#define VIDC_REG_193553_INM(m)                    \
+	in_dword_masked(VIDC_REG_193553_ADDR,  m)
+#define VIDC_REG_193553_OUT(v)                    \
+	out_dword(VIDC_REG_193553_ADDR, v)
+#define VIDC_REG_193553_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_193553_ADDR, m, v, \
+			VIDC_REG_193553_IN); \
+} while (0)
+#define VIDC_REG_193553_FW_START_BMSK                    0x1
+#define VIDC_REG_193553_FW_START_SHFT                      0
+
+#define VIDC_REG_215724_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000138)
+#define VIDC_REG_215724_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000138)
+#define VIDC_REG_215724_RMSK                           0x1
+#define VIDC_REG_215724_SHFT                             0
+#define VIDC_REG_215724_IN                      \
+	in_dword_masked(VIDC_REG_215724_ADDR,  \
+			VIDC_REG_215724_RMSK)
+#define VIDC_REG_215724_INM(m)                  \
+	in_dword_masked(VIDC_REG_215724_ADDR,  m)
+#define VIDC_REG_215724_OUT(v)                  \
+	out_dword(VIDC_REG_215724_ADDR, v)
+#define VIDC_REG_215724_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_215724_ADDR, m, v, \
+			VIDC_REG_215724_IN); \
+} while (0)
+#define VIDC_REG_215724_ARM_ENDIAN_BMSK                0x1
+#define VIDC_REG_215724_ARM_ENDIAN_SHFT                  0
+
+#define VIDC_REG_846346_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000013c)
+#define VIDC_REG_846346_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000013c)
+#define VIDC_REG_846346_RMSK                             0x1
+#define VIDC_REG_846346_SHFT                               0
+#define VIDC_REG_846346_IN                        \
+	in_dword_masked(VIDC_REG_846346_ADDR,         \
+	VIDC_REG_846346_RMSK)
+#define VIDC_REG_846346_INM(m)                    \
+	in_dword_masked(VIDC_REG_846346_ADDR,  m)
+#define VIDC_REG_846346_OUT(v)                    \
+	out_dword(VIDC_REG_846346_ADDR, v)
+#define VIDC_REG_846346_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_846346_ADDR, m, v, \
+			VIDC_REG_846346_IN); \
+} while (0)
+#define VIDC_REG_846346_ERR_CTRL_BMSK                    0x1
+#define VIDC_REG_846346_ERR_CTRL_SHFT                      0
+
+#define VIDC_REG_765787_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000200)
+#define VIDC_REG_765787_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000200)
+#define VIDC_REG_765787_RMSK                 0xffffffff
+#define VIDC_REG_765787_SHFT                          0
+#define VIDC_REG_765787_IN                   \
+	in_dword_masked(VIDC_REG_765787_ADDR,  \
+			VIDC_REG_765787_RMSK)
+#define VIDC_REG_765787_INM(m)               \
+	in_dword_masked(VIDC_REG_765787_ADDR,  m)
+#define VIDC_REG_765787_OUT(v)               \
+	out_dword(VIDC_REG_765787_ADDR, v)
+#define VIDC_REG_765787_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_765787_ADDR, m, v, \
+			VIDC_REG_765787_IN); \
+} while (0)
+#define VIDC_REG_765787_FW_STT_ADDR_0_BMSK   0xffffffff
+#define VIDC_REG_765787_FW_STT_ADDR_0_SHFT            0
+
+#define VIDC_REG_225040_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000204)
+#define VIDC_REG_225040_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000204)
+#define VIDC_REG_225040_RMSK                 0xffffffff
+#define VIDC_REG_225040_SHFT                          0
+#define VIDC_REG_225040_IN                   \
+	in_dword_masked(VIDC_REG_225040_ADDR,  \
+			VIDC_REG_225040_RMSK)
+#define VIDC_REG_225040_INM(m)               \
+	in_dword_masked(VIDC_REG_225040_ADDR,  m)
+#define VIDC_REG_225040_OUT(v)               \
+	out_dword(VIDC_REG_225040_ADDR, v)
+#define VIDC_REG_225040_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_225040_ADDR, m, v, \
+			VIDC_REG_225040_IN); \
+} while (0)
+#define VIDC_REG_225040_FW_STT_ADDR_1_BMSK   0xffffffff
+#define VIDC_REG_225040_FW_STT_ADDR_1_SHFT            0
+
+#define VIDC_REG_942456_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000208)
+#define VIDC_REG_942456_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000208)
+#define VIDC_REG_942456_RMSK                 0xffffffff
+#define VIDC_REG_942456_SHFT                          0
+#define VIDC_REG_942456_IN                   \
+	in_dword_masked(VIDC_REG_942456_ADDR,  \
+			VIDC_REG_942456_RMSK)
+#define VIDC_REG_942456_INM(m)               \
+	in_dword_masked(VIDC_REG_942456_ADDR,  m)
+#define VIDC_REG_942456_OUT(v)               \
+	out_dword(VIDC_REG_942456_ADDR, v)
+#define VIDC_REG_942456_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_942456_ADDR, m, v, \
+			VIDC_REG_942456_IN); \
+} while (0)
+#define VIDC_REG_942456_FW_STT_ADDR_2_BMSK   0xffffffff
+#define VIDC_REG_942456_FW_STT_ADDR_2_SHFT            0
+
+#define VIDC_REG_942170_ADDR_3_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000020c)
+#define VIDC_REG_942170_ADDR_3_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000020c)
+#define VIDC_REG_942170_ADDR_3_RMSK                 0xffffffff
+#define VIDC_REG_942170_ADDR_3_SHFT                          0
+#define VIDC_REG_942170_ADDR_3_IN                   \
+	in_dword_masked(VIDC_REG_942170_ADDR_3_ADDR,  \
+			VIDC_REG_942170_ADDR_3_RMSK)
+#define VIDC_REG_942170_ADDR_3_INM(m)               \
+	in_dword_masked(VIDC_REG_942170_ADDR_3_ADDR,  m)
+#define VIDC_REG_942170_ADDR_3_OUT(v)               \
+	out_dword(VIDC_REG_942170_ADDR_3_ADDR, v)
+#define VIDC_REG_942170_ADDR_3_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_942170_ADDR_3_ADDR, m, v, \
+			VIDC_REG_942170_ADDR_3_IN); \
+} while (0)
+#define VIDC_REG_942170_ADDR_3_FW_STT_ADDR_3_BMSK   0xffffffff
+#define VIDC_REG_942170_ADDR_3_FW_STT_ADDR_3_SHFT            0
+
+#define VIDC_REG_880188_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000210)
+#define VIDC_REG_880188_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000210)
+#define VIDC_REG_880188_RMSK                 0xffffffff
+#define VIDC_REG_880188_SHFT                          0
+#define VIDC_REG_880188_IN                   \
+	in_dword_masked(VIDC_REG_880188_ADDR,  \
+			VIDC_REG_880188_RMSK)
+#define VIDC_REG_880188_INM(m)               \
+	in_dword_masked(VIDC_REG_880188_ADDR,  m)
+#define VIDC_REG_880188_OUT(v)               \
+	out_dword(VIDC_REG_880188_ADDR, v)
+#define VIDC_REG_880188_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_880188_ADDR, m, v, \
+			VIDC_REG_880188_IN); \
+} while (0)
+#define VIDC_REG_880188_FW_STT_ADDR_4_BMSK   0xffffffff
+#define VIDC_REG_880188_FW_STT_ADDR_4_SHFT            0
+
+#define VIDC_REG_40293_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000214)
+#define VIDC_REG_40293_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000214)
+#define VIDC_REG_40293_RMSK                 0xffffffff
+#define VIDC_REG_40293_SHFT                          0
+#define VIDC_REG_40293_IN                   \
+	in_dword_masked(VIDC_REG_40293_ADDR,  \
+			VIDC_REG_40293_RMSK)
+#define VIDC_REG_40293_INM(m)               \
+	in_dword_masked(VIDC_REG_40293_ADDR,  m)
+#define VIDC_REG_40293_OUT(v)               \
+	out_dword(VIDC_REG_40293_ADDR, v)
+#define VIDC_REG_40293_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_40293_ADDR, m, v, \
+			VIDC_REG_40293_IN); \
+} while (0)
+#define VIDC_REG_40293_FW_STT_ADDR_5_BMSK   0xffffffff
+#define VIDC_REG_40293_FW_STT_ADDR_5_SHFT            0
+
+#define VIDC_REG_942170_ADDR_6_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000218)
+#define VIDC_REG_942170_ADDR_6_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000218)
+#define VIDC_REG_942170_ADDR_6_RMSK                 0xffffffff
+#define VIDC_REG_942170_ADDR_6_SHFT                          0
+#define VIDC_REG_942170_ADDR_6_IN                   \
+	in_dword_masked(VIDC_REG_942170_ADDR_6_ADDR,  \
+			VIDC_REG_942170_ADDR_6_RMSK)
+#define VIDC_REG_942170_ADDR_6_INM(m)               \
+	in_dword_masked(VIDC_REG_942170_ADDR_6_ADDR,  m)
+#define VIDC_REG_942170_ADDR_6_OUT(v)               \
+	out_dword(VIDC_REG_942170_ADDR_6_ADDR, v)
+#define VIDC_REG_942170_ADDR_6_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_942170_ADDR_6_ADDR, m, v, \
+			VIDC_REG_942170_ADDR_6_IN); \
+} while (0)
+#define VIDC_REG_942170_ADDR_6_FW_STT_ADDR_6_BMSK   0xffffffff
+#define VIDC_REG_942170_ADDR_6_FW_STT_ADDR_6_SHFT            0
+
+#define VIDC_REG_958768_ADDR                  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000230)
+#define VIDC_REG_958768_PHYS                  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000230)
+#define VIDC_REG_958768_RMSK                  0xffffffff
+#define VIDC_REG_958768_SHFT                           0
+#define VIDC_REG_958768_IN                    \
+	in_dword_masked(VIDC_REG_958768_ADDR,  \
+			VIDC_REG_958768_RMSK)
+#define VIDC_REG_958768_INM(m)                \
+	in_dword_masked(VIDC_REG_958768_ADDR,  m)
+#define VIDC_REG_958768_OUT(v)                \
+	out_dword(VIDC_REG_958768_ADDR, v)
+#define VIDC_REG_958768_OUTM(m, v)             \
+do { \
+	out_dword_masked_ns(VIDC_REG_958768_ADDR, m, v, \
+			VIDC_REG_958768_IN); \
+} while (0)
+#define VIDC_REG_699384_ADDR_BMSK     0xffffffff
+#define VIDC_REG_699384_ADDR_SHFT              0
+
+#define VIDC_REG_979942_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000234)
+#define VIDC_REG_979942_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000234)
+#define VIDC_REG_979942_RMSK                   0xffffffff
+#define VIDC_REG_979942_SHFT                            0
+#define VIDC_REG_979942_IN                     \
+	in_dword_masked(VIDC_REG_979942_ADDR,  \
+			VIDC_REG_979942_RMSK)
+#define VIDC_REG_979942_INM(m)                 \
+	in_dword_masked(VIDC_REG_979942_ADDR,  m)
+#define VIDC_REG_979942_OUT(v)                 \
+	out_dword(VIDC_REG_979942_ADDR, v)
+#define VIDC_REG_979942_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_979942_ADDR, m, v, \
+			VIDC_REG_979942_IN); \
+} while (0)
+#define VIDC_REG_979942_DB_STT_ADDR_BMSK       0xffffffff
+#define VIDC_REG_979942_DB_STT_ADDR_SHFT                0
+
+#define VIDC_REG_839021_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000300)
+#define VIDC_REG_839021_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000300)
+#define VIDC_REG_839021_RMSK                           0xff1f
+#define VIDC_REG_839021_SHFT                                0
+#define VIDC_REG_839021_IN                         \
+	in_dword_masked(VIDC_REG_839021_ADDR,  VIDC_REG_839021_RMSK)
+#define VIDC_REG_839021_INM(m)                     \
+	in_dword_masked(VIDC_REG_839021_ADDR,  m)
+#define VIDC_REG_839021_OUT(v)                     \
+	out_dword(VIDC_REG_839021_ADDR, v)
+#define VIDC_REG_839021_OUTM(m, v)                  \
+do { \
+	out_dword_masked_ns(VIDC_REG_839021_ADDR, m, v, \
+			VIDC_REG_839021_IN); \
+} while (0)
+#define VIDC_REG_839021_LEVEL_BMSK                     0xff00
+#define VIDC_REG_839021_LEVEL_SHFT                        0x8
+#define VIDC_REG_839021_PROFILE_BMSK                     0x1f
+#define VIDC_REG_839021_PROFILE_SHFT                        0
+
+#define VIDC_REG_950374_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000308)
+#define VIDC_REG_950374_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000308)
+#define VIDC_REG_950374_RMSK                          0xffff
+#define VIDC_REG_950374_SHFT                               0
+#define VIDC_REG_950374_IN                        \
+	in_dword_masked(VIDC_REG_950374_ADDR,         \
+	VIDC_REG_950374_RMSK)
+#define VIDC_REG_950374_INM(m)                    \
+	in_dword_masked(VIDC_REG_950374_ADDR,  m)
+#define VIDC_REG_950374_OUT(v)                    \
+	out_dword(VIDC_REG_950374_ADDR, v)
+#define VIDC_REG_950374_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_950374_ADDR, m, v, \
+			VIDC_REG_950374_IN); \
+} while (0)
+#define VIDC_REG_950374_I_PERIOD_BMSK                 0xffff
+#define VIDC_REG_950374_I_PERIOD_SHFT                      0
+
+#define VIDC_REG_504878_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000310)
+#define VIDC_REG_504878_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000310)
+#define VIDC_REG_504878_RMSK                      0xd
+#define VIDC_REG_504878_SHFT                        0
+#define VIDC_REG_504878_IN                 \
+	in_dword_masked(VIDC_REG_504878_ADDR,  \
+			VIDC_REG_504878_RMSK)
+#define VIDC_REG_504878_INM(m)             \
+	in_dword_masked(VIDC_REG_504878_ADDR,  m)
+#define VIDC_REG_504878_OUT(v)             \
+	out_dword(VIDC_REG_504878_ADDR, v)
+#define VIDC_REG_504878_OUTM(m, v)          \
+do { \
+	out_dword_masked_ns(VIDC_REG_504878_ADDR, m, v, \
+			VIDC_REG_504878_IN); \
+} while (0)
+#define VIDC_REG_504878_FIXED_NUMBER_BMSK         0xc
+#define VIDC_REG_504878_FIXED_NUMBER_SHFT         0x2
+#define VIDC_REG_504878_ENTROPY_SEL_BMSK          0x1
+#define VIDC_REG_504878_ENTROPY_SEL_SHFT            0
+
+#define VIDC_REG_458130_ADDR            \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000314)
+#define VIDC_REG_458130_PHYS            \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000314)
+#define VIDC_REG_458130_RMSK                 0xfff
+#define VIDC_REG_458130_SHFT                     0
+#define VIDC_REG_458130_IN              \
+	in_dword_masked(VIDC_REG_458130_ADDR,  \
+			VIDC_REG_458130_RMSK)
+#define VIDC_REG_458130_INM(m)          \
+	in_dword_masked(VIDC_REG_458130_ADDR,  m)
+#define VIDC_REG_458130_OUT(v)          \
+	out_dword(VIDC_REG_458130_ADDR, v)
+#define VIDC_REG_458130_OUTM(m, v)       \
+do { \
+	out_dword_masked_ns(VIDC_REG_458130_ADDR, m, v, \
+			VIDC_REG_458130_IN); \
+} while (0)
+#define VIDC_REG_458130_SLICE_ALPHA_C0_OFFSET_DIV2_BMSK      \
+	0xf80
+#define VIDC_REG_458130_SLICE_ALPHA_C0_OFFSET_DIV2_SHFT      \
+	0x7
+#define VIDC_REG_458130_SLICE_BETA_OFFSET_DIV2_BMSK       0x7c
+#define VIDC_REG_458130_SLICE_BETA_OFFSET_DIV2_SHFT        0x2
+#define \
+	\
+VIDC_REG_458130_DISABLE_DEBLOCKING_FILTER_IDC_BMSK        0x3
+#define \
+	\
+VIDC_REG_458130_DISABLE_DEBLOCKING_FILTER_IDC_SHFT          0
+
+#define VIDC_REG_314290_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000318)
+#define VIDC_REG_314290_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000318)
+#define VIDC_REG_314290_RMSK                          0x1
+#define VIDC_REG_314290_SHFT                            0
+#define VIDC_REG_314290_IN                     \
+	in_dword_masked(VIDC_REG_314290_ADDR,  \
+			VIDC_REG_314290_RMSK)
+#define VIDC_REG_314290_INM(m)                 \
+	in_dword_masked(VIDC_REG_314290_ADDR,  m)
+#define VIDC_REG_314290_OUT(v)                 \
+	out_dword(VIDC_REG_314290_ADDR, v)
+#define VIDC_REG_314290_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_314290_ADDR, m, v, \
+			VIDC_REG_314290_IN); \
+} while (0)
+#define VIDC_REG_314290_SHORT_HD_ON_BMSK              0x1
+#define VIDC_REG_314290_SHORT_HD_ON_SHFT                0
+
+#define VIDC_REG_588301_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000031c)
+#define VIDC_REG_588301_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000031c)
+#define VIDC_REG_588301_RMSK                           0x1
+#define VIDC_REG_588301_SHFT                             0
+#define VIDC_REG_588301_IN                      \
+	in_dword_masked(VIDC_REG_588301_ADDR,  \
+			VIDC_REG_588301_RMSK)
+#define VIDC_REG_588301_INM(m)                  \
+	in_dword_masked(VIDC_REG_588301_ADDR,  m)
+#define VIDC_REG_588301_OUT(v)                  \
+	out_dword(VIDC_REG_588301_ADDR, v)
+#define VIDC_REG_588301_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_588301_ADDR, m, v, \
+			VIDC_REG_588301_IN); \
+} while (0)
+#define VIDC_REG_588301_MSLICE_ENA_BMSK                0x1
+#define VIDC_REG_588301_MSLICE_ENA_SHFT                  0
+
+#define VIDC_REG_1517_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000320)
+#define VIDC_REG_1517_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000320)
+#define VIDC_REG_1517_RMSK                           0x3
+#define VIDC_REG_1517_SHFT                             0
+#define VIDC_REG_1517_IN                      \
+	in_dword_masked(VIDC_REG_1517_ADDR,  \
+			VIDC_REG_1517_RMSK)
+#define VIDC_REG_1517_INM(m)                  \
+	in_dword_masked(VIDC_REG_1517_ADDR,  m)
+#define VIDC_REG_1517_OUT(v)                  \
+	out_dword(VIDC_REG_1517_ADDR, v)
+#define VIDC_REG_1517_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_1517_ADDR, m, v, \
+			VIDC_REG_1517_IN); \
+} while (0)
+#define VIDC_REG_1517_MSLICE_SEL_BMSK                0x3
+#define VIDC_REG_1517_MSLICE_SEL_SHFT                  0
+
+#define VIDC_REG_105335_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000324)
+#define VIDC_REG_105335_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000324)
+#define VIDC_REG_105335_RMSK                     0xffffffff
+#define VIDC_REG_105335_SHFT                              0
+#define VIDC_REG_105335_IN                       \
+	in_dword_masked(VIDC_REG_105335_ADDR,        \
+	VIDC_REG_105335_RMSK)
+#define VIDC_REG_105335_INM(m)                   \
+	in_dword_masked(VIDC_REG_105335_ADDR,  m)
+#define VIDC_REG_105335_OUT(v)                   \
+	out_dword(VIDC_REG_105335_ADDR, v)
+#define VIDC_REG_105335_OUTM(m, v)                \
+do { \
+	out_dword_masked_ns(VIDC_REG_105335_ADDR, m, v, \
+			VIDC_REG_105335_IN); \
+} while (0)
+#define VIDC_REG_105335_MSLICE_MB_BMSK           0xffffffff
+#define VIDC_REG_105335_MSLICE_MB_SHFT                    0
+
+#define VIDC_REG_561679_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000328)
+#define VIDC_REG_561679_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000328)
+#define VIDC_REG_561679_RMSK                   0xffffffff
+#define VIDC_REG_561679_SHFT                            0
+#define VIDC_REG_561679_IN                     \
+	in_dword_masked(VIDC_REG_561679_ADDR,  \
+			VIDC_REG_561679_RMSK)
+#define VIDC_REG_561679_INM(m)                 \
+	in_dword_masked(VIDC_REG_561679_ADDR,  m)
+#define VIDC_REG_561679_OUT(v)                 \
+	out_dword(VIDC_REG_561679_ADDR, v)
+#define VIDC_REG_561679_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_561679_ADDR, m, v, \
+			VIDC_REG_561679_IN); \
+} while (0)
+#define VIDC_REG_561679_MSLICE_BYTE_BMSK       0xffffffff
+#define VIDC_REG_561679_MSLICE_BYTE_SHFT                0
+
+#define VIDC_REG_151345_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000400)
+#define VIDC_REG_151345_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000400)
+#define VIDC_REG_151345_RMSK                 0xffffffff
+#define VIDC_REG_151345_SHFT                          0
+#define VIDC_REG_151345_IN                   \
+	in_dword_masked(VIDC_REG_151345_ADDR,  \
+			VIDC_REG_151345_RMSK)
+#define VIDC_REG_151345_INM(m)               \
+	in_dword_masked(VIDC_REG_151345_ADDR,  m)
+#define VIDC_REG_151345_DISPLAY_Y_ADR_BMSK   0xffffffff
+#define VIDC_REG_151345_DISPLAY_Y_ADR_SHFT            0
+
+#define VIDC_REG_293983_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000404)
+#define VIDC_REG_293983_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000404)
+#define VIDC_REG_293983_RMSK                 0xffffffff
+#define VIDC_REG_293983_SHFT                          0
+#define VIDC_REG_293983_IN                   \
+	in_dword_masked(VIDC_REG_293983_ADDR,  \
+			VIDC_REG_293983_RMSK)
+#define VIDC_REG_293983_INM(m)               \
+	in_dword_masked(VIDC_REG_293983_ADDR,  m)
+#define VIDC_REG_293983_DISPLAY_C_ADR_BMSK   0xffffffff
+#define VIDC_REG_293983_DISPLAY_C_ADR_SHFT            0
+
+#define VIDC_REG_612715_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000408)
+#define VIDC_REG_612715_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000408)
+#define VIDC_REG_612715_RMSK                      0x3f
+#define VIDC_REG_612715_SHFT                         0
+#define VIDC_REG_612715_IN                  \
+	in_dword_masked(VIDC_REG_612715_ADDR,  \
+			VIDC_REG_612715_RMSK)
+#define VIDC_REG_612715_INM(m)              \
+	in_dword_masked(VIDC_REG_612715_ADDR,  m)
+#define VIDC_REG_612715_DISPLAY_STATUS_BMSK       0x3f
+#define VIDC_REG_612715_DISPLAY_STATUS_SHFT          0
+
+#define VIDC_REG_209364_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000040c)
+#define VIDC_REG_209364_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000040c)
+#define VIDC_REG_209364_RMSK                          0x1
+#define VIDC_REG_209364_SHFT                            0
+#define VIDC_REG_209364_IN                     \
+	in_dword_masked(VIDC_REG_209364_ADDR,  \
+			VIDC_REG_209364_RMSK)
+#define VIDC_REG_209364_INM(m)                 \
+	in_dword_masked(VIDC_REG_209364_ADDR,  m)
+#define VIDC_REG_209364_HEADER_DONE_BMSK              0x1
+#define VIDC_REG_209364_HEADER_DONE_SHFT                0
+
+#define VIDC_REG_757835_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000410)
+#define VIDC_REG_757835_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000410)
+#define VIDC_REG_757835_RMSK                     0xffffffff
+#define VIDC_REG_757835_SHFT                              0
+#define VIDC_REG_757835_IN                       \
+	in_dword_masked(VIDC_REG_757835_ADDR,        \
+	VIDC_REG_757835_RMSK)
+#define VIDC_REG_757835_INM(m)                   \
+	in_dword_masked(VIDC_REG_757835_ADDR,  m)
+#define VIDC_REG_757835_FRAME_NUM_BMSK           0xffffffff
+#define VIDC_REG_757835_FRAME_NUM_SHFT                    0
+
+#define VIDC_REG_352831_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000414)
+#define VIDC_REG_352831_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000414)
+#define VIDC_REG_352831_RMSK              0xffffffff
+#define VIDC_REG_352831_SHFT                       0
+#define VIDC_REG_352831_IN                \
+	in_dword_masked(VIDC_REG_352831_ADDR,  \
+			VIDC_REG_352831_RMSK)
+#define VIDC_REG_352831_INM(m)            \
+	in_dword_masked(VIDC_REG_352831_ADDR,  m)
+#define VIDC_REG_352831_DBG_INFO_OUTPUT0_BMSK 0xffffffff
+#define VIDC_REG_352831_DBG_INFO_OUTPUT0_SHFT          0
+
+#define VIDC_REG_668634_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000418)
+#define VIDC_REG_668634_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000418)
+#define VIDC_REG_668634_RMSK              0xffffffff
+#define VIDC_REG_668634_SHFT                       0
+#define VIDC_REG_668634_IN                \
+	in_dword_masked(VIDC_REG_668634_ADDR,  \
+			VIDC_REG_668634_RMSK)
+#define VIDC_REG_668634_INM(m)            \
+	in_dword_masked(VIDC_REG_668634_ADDR,  m)
+#define VIDC_REG_668634_DBG_INFO_OUTPUT1_BMSK 0xffffffff
+#define VIDC_REG_668634_DBG_INFO_OUTPUT1_SHFT          0
+
+#define VIDC_REG_609676_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000500)
+#define VIDC_REG_609676_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000500)
+#define VIDC_REG_609676_RMSK                              0x1
+#define VIDC_REG_609676_SHFT                                0
+#define VIDC_REG_609676_IN                         \
+	in_dword_masked(VIDC_REG_609676_ADDR,  VIDC_REG_609676_RMSK)
+#define VIDC_REG_609676_INM(m)                     \
+	in_dword_masked(VIDC_REG_609676_ADDR,  m)
+#define VIDC_REG_609676_OUT(v)                     \
+	out_dword(VIDC_REG_609676_ADDR, v)
+#define VIDC_REG_609676_OUTM(m, v)                  \
+do { \
+	out_dword_masked_ns(VIDC_REG_609676_ADDR, m, v, \
+			VIDC_REG_609676_IN); \
+} while (0)
+#define VIDC_REG_609676_INT_OFF_BMSK                      0x1
+#define VIDC_REG_609676_INT_OFF_SHFT                        0
+
+#define VIDC_REG_491082_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000504)
+#define VIDC_REG_491082_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000504)
+#define VIDC_REG_491082_RMSK                        0x1
+#define VIDC_REG_491082_SHFT                          0
+#define VIDC_REG_491082_IN                   \
+	in_dword_masked(VIDC_REG_491082_ADDR,  \
+			VIDC_REG_491082_RMSK)
+#define VIDC_REG_491082_INM(m)               \
+	in_dword_masked(VIDC_REG_491082_ADDR,  m)
+#define VIDC_REG_491082_OUT(v)               \
+	out_dword(VIDC_REG_491082_ADDR, v)
+#define VIDC_REG_491082_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_491082_ADDR, m, v, \
+			VIDC_REG_491082_IN); \
+} while (0)
+#define VIDC_REG_491082_INT_PULSE_SEL_BMSK          0x1
+#define VIDC_REG_491082_INT_PULSE_SEL_SHFT            0
+
+#define VIDC_REG_614776_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000508)
+#define VIDC_REG_614776_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000508)
+#define VIDC_REG_614776_RMSK                       0x1
+#define VIDC_REG_614776_SHFT                         0
+#define VIDC_REG_614776_IN                  \
+	in_dword_masked(VIDC_REG_614776_ADDR,  \
+			VIDC_REG_614776_RMSK)
+#define VIDC_REG_614776_INM(m)              \
+	in_dword_masked(VIDC_REG_614776_ADDR,  m)
+#define VIDC_REG_614776_OUT(v)              \
+	out_dword(VIDC_REG_614776_ADDR, v)
+#define VIDC_REG_614776_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_614776_ADDR, m, v, \
+			VIDC_REG_614776_IN); \
+} while (0)
+#define VIDC_REG_614776_INT_DONE_CLEAR_BMSK        0x1
+#define VIDC_REG_614776_INT_DONE_CLEAR_SHFT          0
+
+#define VIDC_REG_982553_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000050c)
+#define VIDC_REG_982553_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000050c)
+#define VIDC_REG_982553_RMSK                       0x1
+#define VIDC_REG_982553_SHFT                         0
+#define VIDC_REG_982553_IN                  \
+	in_dword_masked(VIDC_REG_982553_ADDR,  \
+			VIDC_REG_982553_RMSK)
+#define VIDC_REG_982553_INM(m)              \
+	in_dword_masked(VIDC_REG_982553_ADDR,  m)
+#define VIDC_REG_982553_OPERATION_DONE_BMSK        0x1
+#define VIDC_REG_982553_OPERATION_DONE_SHFT          0
+
+#define VIDC_REG_259967_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000510)
+#define VIDC_REG_259967_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000510)
+#define VIDC_REG_259967_RMSK                              0x1
+#define VIDC_REG_259967_SHFT                                0
+#define VIDC_REG_259967_IN                         \
+	in_dword_masked(VIDC_REG_259967_ADDR,  VIDC_REG_259967_RMSK)
+#define VIDC_REG_259967_INM(m)                     \
+	in_dword_masked(VIDC_REG_259967_ADDR,  m)
+#define VIDC_REG_259967_FW_DONE_BMSK                      0x1
+#define VIDC_REG_259967_FW_DONE_SHFT                        0
+
+#define VIDC_REG_512143_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000514)
+#define VIDC_REG_512143_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000514)
+#define VIDC_REG_512143_RMSK                         0x1f8
+#define VIDC_REG_512143_SHFT                             0
+#define VIDC_REG_512143_IN                      \
+	in_dword_masked(VIDC_REG_512143_ADDR,  \
+			VIDC_REG_512143_RMSK)
+#define VIDC_REG_512143_INM(m)                  \
+	in_dword_masked(VIDC_REG_512143_ADDR,  m)
+#define VIDC_REG_512143_FRAME_DONE_STAT_BMSK         0x100
+#define VIDC_REG_512143_FRAME_DONE_STAT_SHFT           0x8
+#define VIDC_REG_512143_DMA_DONE_STAT_BMSK            0x80
+#define VIDC_REG_512143_DMA_DONE_STAT_SHFT             0x7
+#define VIDC_REG_512143_HEADER_DONE_STAT_BMSK         0x40
+#define VIDC_REG_512143_HEADER_DONE_STAT_SHFT          0x6
+#define VIDC_REG_512143_FW_DONE_STAT_BMSK             0x20
+#define VIDC_REG_512143_FW_DONE_STAT_SHFT              0x5
+#define VIDC_REG_512143_OPERATION_FAILED_BMSK         0x10
+#define VIDC_REG_512143_OPERATION_FAILED_SHFT          0x4
+#define VIDC_REG_512143_STREAM_HDR_CHANGED_BMSK        0x8
+#define VIDC_REG_512143_STREAM_HDR_CHANGED_SHFT        0x3
+
+#define VIDC_REG_418173_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000518)
+#define VIDC_REG_418173_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000518)
+#define VIDC_REG_418173_RMSK                     0x1fa
+#define VIDC_REG_418173_SHFT                         0
+#define VIDC_REG_418173_IN                  \
+	in_dword_masked(VIDC_REG_418173_ADDR,  \
+			VIDC_REG_418173_RMSK)
+#define VIDC_REG_418173_INM(m)              \
+	in_dword_masked(VIDC_REG_418173_ADDR,  m)
+#define VIDC_REG_418173_OUT(v)              \
+	out_dword(VIDC_REG_418173_ADDR, v)
+#define VIDC_REG_418173_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_418173_ADDR, m, v, \
+			VIDC_REG_418173_IN); \
+} while (0)
+#define VIDC_REG_418173_FRAME_DONE_ENABLE_BMSK      0x100
+#define VIDC_REG_418173_FRAME_DONE_ENABLE_SHFT        0x8
+#define VIDC_REG_418173_DMA_DONE_ENABLE_BMSK       0x80
+#define VIDC_REG_418173_DMA_DONE_ENABLE_SHFT        0x7
+#define VIDC_REG_418173_HEADER_DONE_ENABLE_BMSK       0x40
+#define VIDC_REG_418173_HEADER_DONE_ENABLE_SHFT        0x6
+#define VIDC_REG_418173_FW_DONE_ENABLE_BMSK       0x20
+#define VIDC_REG_418173_FW_DONE_ENABLE_SHFT        0x5
+#define VIDC_REG_418173_OPERATION_FAILED_ENABLE_BMSK       0x10
+#define VIDC_REG_418173_OPERATION_FAILED_ENABLE_SHFT        0x4
+#define VIDC_REG_418173_STREAM_HDR_CHANGED_ENABLE_BMSK        0x8
+#define VIDC_REG_418173_STREAM_HDR_CHANGED_ENABLE_SHFT        0x3
+#define VIDC_REG_418173_BUFFER_FULL_ENABLE_BMSK        0x2
+#define VIDC_REG_418173_BUFFER_FULL_ENABLE_SHFT        0x1
+
+#define VIDC_REG_841539_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000600)
+#define VIDC_REG_841539_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000600)
+#define VIDC_REG_841539_RMSK                       0x3
+#define VIDC_REG_841539_SHFT                         0
+#define VIDC_REG_841539_IN                  \
+	in_dword_masked(VIDC_REG_841539_ADDR,  \
+			VIDC_REG_841539_RMSK)
+#define VIDC_REG_841539_INM(m)              \
+	in_dword_masked(VIDC_REG_841539_ADDR,  m)
+#define VIDC_REG_841539_OUT(v)              \
+	out_dword(VIDC_REG_841539_ADDR, v)
+#define VIDC_REG_841539_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_841539_ADDR, m, v, \
+			VIDC_REG_841539_IN); \
+} while (0)
+#define VIDC_REG_841539_TILE_MODE_BMSK             0x3
+#define VIDC_REG_841539_TILE_MODE_SHFT               0
+
+#define VIDC_REG_99105_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000800)
+#define VIDC_REG_99105_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000800)
+#define VIDC_REG_99105_RMSK                0xffffffff
+#define VIDC_REG_99105_SHFT                         0
+#define VIDC_REG_99105_IN                  \
+	in_dword_masked(VIDC_REG_99105_ADDR,  \
+			VIDC_REG_99105_RMSK)
+#define VIDC_REG_99105_INM(m)              \
+	in_dword_masked(VIDC_REG_99105_ADDR,  m)
+#define VIDC_REG_99105_OUT(v)              \
+	out_dword(VIDC_REG_99105_ADDR, v)
+#define VIDC_REG_99105_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_99105_ADDR, m, v, \
+			VIDC_REG_99105_IN); \
+} while (0)
+#define VIDC_REG_99105_ENC_CUR_Y_ADDR_BMSK 0xffffffff
+#define VIDC_REG_99105_ENC_CUR_Y_ADDR_SHFT          0
+
+#define VIDC_REG_777113_ADDR_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000804)
+#define VIDC_REG_777113_ADDR_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000804)
+#define VIDC_REG_777113_ADDR_RMSK                0xffffffff
+#define VIDC_REG_777113_ADDR_SHFT                         0
+#define VIDC_REG_777113_ADDR_IN                  \
+	in_dword_masked(VIDC_REG_777113_ADDR_ADDR,  \
+			VIDC_REG_777113_ADDR_RMSK)
+#define VIDC_REG_777113_ADDR_INM(m)              \
+	in_dword_masked(VIDC_REG_777113_ADDR_ADDR,  m)
+#define VIDC_REG_777113_ADDR_OUT(v)              \
+	out_dword(VIDC_REG_777113_ADDR_ADDR, v)
+#define VIDC_REG_777113_ADDR_OUTM(m, v)           \
+do { \
+	out_dword_masked_ns(VIDC_REG_777113_ADDR_ADDR, m, v, \
+			VIDC_REG_777113_ADDR_IN); \
+} while (0)
+#define VIDC_REG_777113_ADDR_ENC_CUR_C_ADDR_BMSK 0xffffffff
+#define VIDC_REG_777113_ADDR_ENC_CUR_C_ADDR_SHFT          0
+
+#define VIDC_REG_341928_ADDR_ADDR                  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000080c)
+#define VIDC_REG_341928_ADDR_PHYS                  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000080c)
+#define VIDC_REG_341928_ADDR_RMSK                  0xffffffff
+#define VIDC_REG_341928_ADDR_SHFT                           0
+#define VIDC_REG_341928_ADDR_IN                    \
+	in_dword_masked(VIDC_REG_341928_ADDR_ADDR,  \
+			VIDC_REG_341928_ADDR_RMSK)
+#define VIDC_REG_341928_ADDR_INM(m)                \
+	in_dword_masked(VIDC_REG_341928_ADDR_ADDR,  m)
+#define VIDC_REG_341928_ADDR_OUT(v)                \
+	out_dword(VIDC_REG_341928_ADDR_ADDR, v)
+#define VIDC_REG_341928_ADDR_OUTM(m, v)             \
+do { \
+	out_dword_masked_ns(VIDC_REG_341928_ADDR_ADDR, m, v, \
+			VIDC_REG_341928_ADDR_IN); \
+} while (0)
+#define VIDC_REG_341928_ADDR_ENC_DPB_ADR_BMSK      0xffffffff
+#define VIDC_REG_341928_ADDR_ENC_DPB_ADR_SHFT               0
+
+#define VIDC_REG_857491_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000810)
+#define VIDC_REG_857491_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000810)
+#define VIDC_REG_857491_RMSK                         0xfff
+#define VIDC_REG_857491_SHFT                             0
+#define VIDC_REG_857491_IN                      \
+	in_dword_masked(VIDC_REG_857491_ADDR,  \
+			VIDC_REG_857491_RMSK)
+#define VIDC_REG_857491_INM(m)                  \
+	in_dword_masked(VIDC_REG_857491_ADDR,  m)
+#define VIDC_REG_857491_OUT(v)                  \
+	out_dword(VIDC_REG_857491_ADDR, v)
+#define VIDC_REG_857491_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_857491_ADDR, m, v, \
+			VIDC_REG_857491_IN); \
+} while (0)
+#define VIDC_REG_857491_CIR_MB_NUM_BMSK              0xfff
+#define VIDC_REG_857491_CIR_MB_NUM_SHFT                  0
+
+#define VIDC_REG_518133_ADDR                  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000900)
+#define VIDC_REG_518133_PHYS                  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000900)
+#define VIDC_REG_518133_RMSK                  0xffffffff
+#define VIDC_REG_518133_SHFT                           0
+#define VIDC_REG_518133_IN                    \
+	in_dword_masked(VIDC_REG_518133_ADDR,  \
+			VIDC_REG_518133_RMSK)
+#define VIDC_REG_518133_INM(m)                \
+	in_dword_masked(VIDC_REG_518133_ADDR,  m)
+#define VIDC_REG_518133_OUT(v)                \
+	out_dword(VIDC_REG_518133_ADDR, v)
+#define VIDC_REG_518133_OUTM(m, v)             \
+do { \
+	out_dword_masked_ns(VIDC_REG_518133_ADDR, m, v, \
+			VIDC_REG_518133_IN); \
+} while (0)
+#define VIDC_REG_518133_DEC_DPB_ADDR_BMSK     0xffffffff
+#define VIDC_REG_518133_DEC_DPB_ADDR_SHFT              0
+
+#define VIDC_REG_456376_ADDR_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000904)
+#define VIDC_REG_456376_ADDR_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000904)
+#define VIDC_REG_456376_ADDR_RMSK                 0xffffffff
+#define VIDC_REG_456376_ADDR_SHFT                          0
+#define VIDC_REG_456376_ADDR_IN                   \
+	in_dword_masked(VIDC_REG_456376_ADDR_ADDR,  \
+			VIDC_REG_456376_ADDR_RMSK)
+#define VIDC_REG_456376_ADDR_INM(m)               \
+	in_dword_masked(VIDC_REG_456376_ADDR_ADDR,  m)
+#define VIDC_REG_456376_ADDR_OUT(v)               \
+	out_dword(VIDC_REG_456376_ADDR_ADDR, v)
+#define VIDC_REG_456376_ADDR_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_456376_ADDR_ADDR, m, v, \
+			VIDC_REG_456376_ADDR_IN); \
+} while (0)
+#define VIDC_REG_456376_ADDR_DPB_COMV_ADDR_BMSK   0xffffffff
+#define VIDC_REG_456376_ADDR_DPB_COMV_ADDR_SHFT            0
+
+#define VIDC_REG_267567_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000908)
+#define VIDC_REG_267567_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000908)
+#define VIDC_REG_267567_RMSK                 0xffffffff
+#define VIDC_REG_267567_SHFT                          0
+#define VIDC_REG_267567_IN                   \
+	in_dword_masked(VIDC_REG_267567_ADDR,  \
+			VIDC_REG_267567_RMSK)
+#define VIDC_REG_267567_INM(m)               \
+	in_dword_masked(VIDC_REG_267567_ADDR,  m)
+#define VIDC_REG_267567_OUT(v)               \
+	out_dword(VIDC_REG_267567_ADDR, v)
+#define VIDC_REG_267567_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_267567_ADDR, m, v, \
+			VIDC_REG_267567_IN); \
+} while (0)
+#define VIDC_REG_798486_ADDR_BMSK   0xffffffff
+#define VIDC_REG_798486_ADDR_SHFT            0
+
+#define VIDC_REG_105770_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x0000090c)
+#define VIDC_REG_105770_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x0000090c)
+#define VIDC_REG_105770_RMSK                            0xff
+#define VIDC_REG_105770_SHFT                               0
+#define VIDC_REG_105770_IN                        \
+	in_dword_masked(VIDC_REG_105770_ADDR,         \
+	VIDC_REG_105770_RMSK)
+#define VIDC_REG_105770_INM(m)                    \
+	in_dword_masked(VIDC_REG_105770_ADDR,  m)
+#define VIDC_REG_105770_DPB_SIZE_BMSK                   0xff
+#define VIDC_REG_105770_DPB_SIZE_SHFT                      0
+
+#define VIDC_REG_58211_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a00)
+#define VIDC_REG_58211_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a00)
+#define VIDC_REG_58211_RMSK                      0x33f
+#define VIDC_REG_58211_SHFT                          0
+#define VIDC_REG_58211_IN                   \
+	in_dword_masked(VIDC_REG_58211_ADDR,  \
+			VIDC_REG_58211_RMSK)
+#define VIDC_REG_58211_INM(m)               \
+	in_dword_masked(VIDC_REG_58211_ADDR,  m)
+#define VIDC_REG_58211_OUT(v)               \
+	out_dword(VIDC_REG_58211_ADDR, v)
+#define VIDC_REG_58211_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_58211_ADDR, m, v, \
+			VIDC_REG_58211_IN); \
+} while (0)
+#define VIDC_REG_58211_FR_RC_EN_BMSK             0x200
+#define VIDC_REG_58211_FR_RC_EN_SHFT               0x9
+#define VIDC_REG_58211_MB_RC_EN_BMSK             0x100
+#define VIDC_REG_58211_MB_RC_EN_SHFT               0x8
+#define VIDC_REG_58211_FRAME_QP_BMSK              0x3f
+#define VIDC_REG_58211_FRAME_QP_SHFT                 0
+
+#define VIDC_REG_548359_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a04)
+#define VIDC_REG_548359_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a04)
+#define VIDC_REG_548359_RMSK                          0x3f
+#define VIDC_REG_548359_SHFT                             0
+#define VIDC_REG_548359_IN                      \
+	in_dword_masked(VIDC_REG_548359_ADDR,  \
+			VIDC_REG_548359_RMSK)
+#define VIDC_REG_548359_INM(m)                  \
+	in_dword_masked(VIDC_REG_548359_ADDR,  m)
+#define VIDC_REG_548359_OUT(v)                  \
+	out_dword(VIDC_REG_548359_ADDR, v)
+#define VIDC_REG_548359_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_548359_ADDR, m, v, \
+			VIDC_REG_548359_IN); \
+} while (0)
+#define VIDC_REG_548359_P_FRAME_QP_BMSK               0x3f
+#define VIDC_REG_548359_P_FRAME_QP_SHFT                  0
+
+#define VIDC_REG_174150_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a08)
+#define VIDC_REG_174150_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a08)
+#define VIDC_REG_174150_RMSK                   0xffffffff
+#define VIDC_REG_174150_SHFT                            0
+#define VIDC_REG_174150_IN                     \
+	in_dword_masked(VIDC_REG_174150_ADDR,  \
+			VIDC_REG_174150_RMSK)
+#define VIDC_REG_174150_INM(m)                 \
+	in_dword_masked(VIDC_REG_174150_ADDR,  m)
+#define VIDC_REG_174150_OUT(v)                 \
+	out_dword(VIDC_REG_174150_ADDR, v)
+#define VIDC_REG_174150_OUTM(m, v)              \
+do { \
+	out_dword_masked_ns(VIDC_REG_174150_ADDR, m, v, \
+			VIDC_REG_174150_IN); \
+} while (0)
+#define VIDC_REG_174150_BIT_RATE_BMSK          0xffffffff
+#define VIDC_REG_174150_BIT_RATE_SHFT                   0
+
+#define VIDC_REG_734318_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a0c)
+#define VIDC_REG_734318_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a0c)
+#define VIDC_REG_734318_RMSK                         0x3f3f
+#define VIDC_REG_734318_SHFT                              0
+#define VIDC_REG_734318_IN                       \
+	in_dword_masked(VIDC_REG_734318_ADDR,        \
+	VIDC_REG_734318_RMSK)
+#define VIDC_REG_734318_INM(m)                   \
+	in_dword_masked(VIDC_REG_734318_ADDR,  m)
+#define VIDC_REG_734318_OUT(v)                   \
+	out_dword(VIDC_REG_734318_ADDR, v)
+#define VIDC_REG_734318_OUTM(m, v)                \
+do { \
+	out_dword_masked_ns(VIDC_REG_734318_ADDR, m, v, \
+			VIDC_REG_734318_IN); \
+} while (0)
+#define VIDC_REG_734318_MAX_QP_BMSK                  0x3f00
+#define VIDC_REG_734318_MAX_QP_SHFT                     0x8
+#define VIDC_REG_734318_MIN_QP_BMSK                    0x3f
+#define VIDC_REG_734318_MIN_QP_SHFT                       0
+
+#define VIDC_REG_677784_ADDR                      \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a10)
+#define VIDC_REG_677784_PHYS                      \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a10)
+#define VIDC_REG_677784_RMSK                          0xffff
+#define VIDC_REG_677784_SHFT                               0
+#define VIDC_REG_677784_IN                        \
+	in_dword_masked(VIDC_REG_677784_ADDR,         \
+	VIDC_REG_677784_RMSK)
+#define VIDC_REG_677784_INM(m)                    \
+	in_dword_masked(VIDC_REG_677784_ADDR,  m)
+#define VIDC_REG_677784_OUT(v)                    \
+	out_dword(VIDC_REG_677784_ADDR, v)
+#define VIDC_REG_677784_OUTM(m, v)                 \
+do { \
+	out_dword_masked_ns(VIDC_REG_677784_ADDR, m, v, \
+			VIDC_REG_677784_IN); \
+} while (0)
+#define VIDC_REG_677784_REACT_PARA_BMSK               0xffff
+#define VIDC_REG_677784_REACT_PARA_SHFT                    0
+
+#define VIDC_REG_995041_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a14)
+#define VIDC_REG_995041_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a14)
+#define VIDC_REG_995041_RMSK                           0xf
+#define VIDC_REG_995041_SHFT                             0
+#define VIDC_REG_995041_IN                      \
+	in_dword_masked(VIDC_REG_995041_ADDR,  \
+			VIDC_REG_995041_RMSK)
+#define VIDC_REG_995041_INM(m)                  \
+	in_dword_masked(VIDC_REG_995041_ADDR,  m)
+#define VIDC_REG_995041_OUT(v)                  \
+	out_dword(VIDC_REG_995041_ADDR, v)
+#define VIDC_REG_995041_OUTM(m, v)               \
+do { \
+	out_dword_masked_ns(VIDC_REG_995041_ADDR, m, v, \
+			VIDC_REG_995041_IN); \
+} while (0)
+#define VIDC_REG_995041_DARK_DISABLE_BMSK              0x8
+#define VIDC_REG_995041_DARK_DISABLE_SHFT              0x3
+#define VIDC_REG_995041_SMOOTH_DISABLE_BMSK            0x4
+#define VIDC_REG_995041_SMOOTH_DISABLE_SHFT            0x2
+#define VIDC_REG_995041_STATIC_DISABLE_BMSK            0x2
+#define VIDC_REG_995041_STATIC_DISABLE_SHFT            0x1
+#define VIDC_REG_995041_ACT_DISABLE_BMSK               0x1
+#define VIDC_REG_995041_ACT_DISABLE_SHFT                 0
+
+#define VIDC_REG_273649_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000a18)
+#define VIDC_REG_273649_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000a18)
+#define VIDC_REG_273649_RMSK                             0x3f
+#define VIDC_REG_273649_SHFT                                0
+#define VIDC_REG_273649_IN                         \
+	in_dword_masked(VIDC_REG_273649_ADDR,  VIDC_REG_273649_RMSK)
+#define VIDC_REG_273649_INM(m)                     \
+	in_dword_masked(VIDC_REG_273649_ADDR,  m)
+#define VIDC_REG_273649_QP_OUT_BMSK                      0x3f
+#define VIDC_REG_273649_QP_OUT_SHFT                         0
+
+#define VIDC_REG_548823_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000b00)
+#define VIDC_REG_548823_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000b00)
+#define VIDC_REG_548823_RMSK                   0xffffffff
+#define VIDC_REG_548823_SHFT                            0
+#define VIDC_REG_548823_IN                     \
+	in_dword_masked(VIDC_REG_548823_ADDR,  \
+			VIDC_REG_548823_RMSK)
+#define VIDC_REG_548823_INM(m)                 \
+	in_dword_masked(VIDC_REG_548823_ADDR,  m)
+#define VIDC_REG_548823_720P_VERSION_BMSK       0xffffffff
+#define VIDC_REG_548823_720P_VERSION_SHFT                0
+
+#define VIDC_REG_881638_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000c00)
+#define VIDC_REG_881638_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c00)
+#define VIDC_REG_881638_RMSK                     0xffffffff
+#define VIDC_REG_881638_SHFT                              0
+#define VIDC_REG_881638_IN                       \
+	in_dword_masked(VIDC_REG_881638_ADDR,        \
+	VIDC_REG_881638_RMSK)
+#define VIDC_REG_881638_INM(m)                   \
+	in_dword_masked(VIDC_REG_881638_ADDR,  m)
+#define VIDC_REG_881638_CROP_RIGHT_OFFSET_BMSK   0xffff0000
+#define VIDC_REG_881638_CROP_RIGHT_OFFSET_SHFT         0x10
+#define VIDC_REG_881638_CROP_LEFT_OFFSET_BMSK        0xffff
+#define VIDC_REG_881638_CROP_LEFT_OFFSET_SHFT             0
+
+#define VIDC_REG_161486_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000c04)
+#define VIDC_REG_161486_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c04)
+#define VIDC_REG_161486_RMSK                     0xffffffff
+#define VIDC_REG_161486_SHFT                              0
+#define VIDC_REG_161486_IN                       \
+	in_dword_masked(VIDC_REG_161486_ADDR,        \
+	VIDC_REG_161486_RMSK)
+#define VIDC_REG_161486_INM(m)                   \
+	in_dword_masked(VIDC_REG_161486_ADDR,  m)
+#define VIDC_REG_161486_CROP_BOTTOM_OFFSET_BMSK  0xffff0000
+#define VIDC_REG_161486_CROP_BOTTOM_OFFSET_SHFT        0x10
+#define VIDC_REG_161486_CROP_TOP_OFFSET_BMSK         0xffff
+#define VIDC_REG_161486_CROP_TOP_OFFSET_SHFT              0
+
+#define VIDC_REG_580603_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000c08)
+#define VIDC_REG_580603_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c08)
+#define VIDC_REG_580603_RMSK              0xffffffff
+#define VIDC_REG_580603_SHFT                       0
+#define VIDC_REG_580603_IN                \
+	in_dword_masked(VIDC_REG_580603_ADDR,  \
+			VIDC_REG_580603_RMSK)
+#define VIDC_REG_580603_INM(m)            \
+	in_dword_masked(VIDC_REG_580603_ADDR,  m)
+#define VIDC_REG_580603_720P_DEC_FRM_SIZE_BMSK 0xffffffff
+#define VIDC_REG_580603_720P_DEC_FRM_SIZE_SHFT          0
+
+
+#define VIDC_REG_606447_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000c0c)
+#define VIDC_REG_606447_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c0c)
+#define VIDC_REG_606447_RMSK  0xff1f
+#define VIDC_REG_606447_SHFT  0
+#define VIDC_REG_606447_IN                         \
+		in_dword_masked(VIDC_REG_606447_ADDR, \
+		VIDC_REG_606447_RMSK)
+#define VIDC_REG_606447_INM(m)                     \
+		in_dword_masked(VIDC_REG_606447_ADDR, m)
+#define VIDC_REG_606447_OUT(v)                     \
+		out_dword(VIDC_REG_606447_ADDR, v)
+#define VIDC_REG_606447_OUTM(m, v)                  \
+		out_dword_masked_ns(VIDC_REG_606447_ADDR, \
+		m, v, VIDC_REG_606447_IN); \
+
+#define VIDC_REG_606447_DIS_PIC_LEVEL_BMSK 0xff00
+#define VIDC_REG_606447_DIS_PIC_LEVEL_SHFT 0x8
+#define VIDC_REG_606447_DISP_PIC_PROFILE_BMSK 0x1f
+#define VIDC_REG_606447_DISP_PIC_PROFILE_SHFT 0
+
+#define VIDC_REG_854281_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE      + 0x00000c10)
+#define VIDC_REG_854281_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c10)
+#define VIDC_REG_854281_RMSK 0xffffffff
+#define VIDC_REG_854281_SHFT 0
+#define VIDC_REG_854281_IN \
+		in_dword_masked(VIDC_REG_854281_ADDR, \
+		VIDC_REG_854281_RMSK)
+#define VIDC_REG_854281_INM(m) \
+		in_dword_masked(VIDC_REG_854281_ADDR, m)
+#define VIDC_REG_854281_MIN_DPB_SIZE_BMSK 0xffffffff
+#define VIDC_REG_854281_MIN_DPB_SIZE_SHFT 0
+
+
+#define VIDC_REG_381535_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000c14)
+#define VIDC_REG_381535_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c14)
+#define VIDC_REG_381535_RMSK 0xffffffff
+#define VIDC_REG_381535_SHFT 0
+#define VIDC_REG_381535_IN \
+		in_dword_masked(VIDC_REG_381535_ADDR, \
+		VIDC_REG_381535_RMSK)
+#define VIDC_REG_381535_INM(m) \
+		in_dword_masked(VIDC_REG_381535_ADDR, m)
+#define VIDC_REG_381535_720P_FW_STATUS_BMSK 0xffffffff
+#define VIDC_REG_381535_720P_FW_STATUS_SHFT 0
+
+
+#define VIDC_REG_347105_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000c18)
+#define VIDC_REG_347105_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000c18)
+#define VIDC_REG_347105_RMSK 0xffffffff
+#define VIDC_REG_347105_SHFT 0
+#define VIDC_REG_347105_IN \
+		in_dword_masked(VIDC_REG_347105_ADDR, \
+		VIDC_REG_347105_RMSK)
+#define VIDC_REG_347105_INM(m) \
+		in_dword_masked(VIDC_REG_347105_ADDR, m)
+#define VIDC_REG_347105_FREE_LUMA_DPB_BMSK 0xffffffff
+#define VIDC_REG_347105_FREE_LUMA_DPB_SHFT 0
+
+
+#define VIDC_REG_62325_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000d00)
+#define VIDC_REG_62325_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d00)
+#define VIDC_REG_62325_RMSK                     0xf
+#define VIDC_REG_62325_SHFT                       0
+#define VIDC_REG_62325_IN                \
+		in_dword_masked(VIDC_REG_62325_ADDR,  \
+		VIDC_REG_62325_RMSK)
+#define VIDC_REG_62325_INM(m)            \
+	in_dword_masked(VIDC_REG_62325_ADDR,  m)
+#define VIDC_REG_62325_OUT(v)            \
+	out_dword(VIDC_REG_62325_ADDR, v)
+#define VIDC_REG_62325_OUTM(m, v)         \
+do { \
+	out_dword_masked_ns(VIDC_REG_62325_ADDR, m, v, \
+			VIDC_REG_62325_IN); \
+} while (0)
+#define VIDC_REG_62325_COMMAND_TYPE_BMSK        0xf
+#define VIDC_REG_62325_COMMAND_TYPE_SHFT          0
+
+#define VIDC_REG_101184_ADDR  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000d04)
+#define VIDC_REG_101184_PHYS  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d04)
+#define VIDC_REG_101184_RMSK                0xffffffff
+#define VIDC_REG_101184_SHFT                0
+#define VIDC_REG_101184_OUT(v)                     \
+	out_dword(VIDC_REG_101184_ADDR, v)
+
+#define VIDC_REG_490443_ADDR  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000d08)
+#define VIDC_REG_490443_PHYS  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d08)
+#define VIDC_REG_490443_RMSK                       \
+	0xffffffff
+#define \
+	\
+VIDC_REG_490443_SHFT                                0
+#define VIDC_REG_490443_OUT(v)                     \
+	out_dword(VIDC_REG_490443_ADDR, v)
+
+#define VIDC_REG_625444_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000d14)
+#define VIDC_REG_625444_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d14)
+#define VIDC_REG_625444_RMSK                 0xffffffff
+#define VIDC_REG_625444_SHFT                          0
+#define VIDC_REG_625444_IN                   \
+	in_dword_masked(VIDC_REG_625444_ADDR,  \
+			VIDC_REG_625444_RMSK)
+#define VIDC_REG_625444_INM(m)               \
+	in_dword_masked(VIDC_REG_625444_ADDR,  m)
+#define VIDC_REG_625444_OUT(v)               \
+	out_dword(VIDC_REG_625444_ADDR, v)
+#define VIDC_REG_625444_OUTM(m, v)            \
+do { \
+	out_dword_masked_ns(VIDC_REG_625444_ADDR, m, v, \
+			VIDC_REG_625444_IN); \
+} while (0)
+#define VIDC_REG_625444_FRAME_RATE_BMSK      0xffffffff
+#define VIDC_REG_625444_FRAME_RATE_SHFT               0
+
+#define VIDC_REG_639999_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000d20)
+#define VIDC_REG_639999_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d20)
+#define VIDC_REG_639999_RMSK                    0xffff
+#define VIDC_REG_639999_SHFT                         0
+#define VIDC_REG_639999_OUT(v)                  \
+	out_dword(VIDC_REG_639999_ADDR, v)
+
+#define VIDC_REG_64895_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e00)
+#define VIDC_REG_64895_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e00)
+#define VIDC_REG_64895_RMSK                    0xffffffff
+#define VIDC_REG_64895_SHFT                             0
+#define VIDC_REG_64895_OUT(v)                  \
+	out_dword(VIDC_REG_64895_ADDR, v)
+
+#define VIDC_REG_965480_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000e04)
+#define VIDC_REG_965480_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e04)
+#define VIDC_REG_965480_RMSK 0x1
+#define VIDC_REG_965480_SHFT 0
+#define VIDC_REG_965480_OUT(v) \
+		out_dword(VIDC_REG_965480_ADDR, v)
+
+#define VIDC_REG_804959_ADDR              \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e08)
+#define VIDC_REG_804959_PHYS              \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e08)
+#define VIDC_REG_804959_RMSK                     0x7
+#define VIDC_REG_804959_SHFT                       0
+#define VIDC_REG_804959_OUT(v)            \
+	out_dword(VIDC_REG_804959_ADDR, v)
+
+#define VIDC_REG_257463_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e10)
+#define VIDC_REG_257463_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e10)
+#define VIDC_REG_257463_RMSK                   0xffffffff
+#define VIDC_REG_257463_SHFT                            0
+#define VIDC_REG_257463_IN                     \
+	in_dword_masked(VIDC_REG_257463_ADDR,  \
+			VIDC_REG_257463_RMSK)
+#define VIDC_REG_257463_INM(m)                 \
+	in_dword_masked(VIDC_REG_257463_ADDR,  m)
+#define VIDC_REG_257463_MIN_NUM_DPB_BMSK       0xffffffff
+#define VIDC_REG_257463_MIN_NUM_DPB_SHFT                0
+
+#define VIDC_REG_883500_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e14)
+#define VIDC_REG_883500_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e14)
+#define VIDC_REG_883500_RMSK                       0xffffffff
+#define VIDC_REG_883500_SHFT                                0
+#define VIDC_REG_883500_OUT(v)                     \
+	out_dword(VIDC_REG_883500_ADDR, v)
+
+#define VIDC_REG_615716_ADDR(n)               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e18 + 4 * (n))
+#define VIDC_REG_615716_PHYS(n)               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e18 + 4 * (n))
+#define VIDC_REG_615716_RMSK                  0xffffffff
+#define VIDC_REG_615716_SHFT                           0
+#define VIDC_REG_615716_OUTI(n, v) \
+	out_dword(VIDC_REG_615716_ADDR(n), v)
+
+#define VIDC_REG_603032_ADDR                \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e98)
+#define VIDC_REG_603032_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e98)
+#define VIDC_REG_603032_RMSK                0xffffffff
+#define VIDC_REG_603032_SHFT                         0
+#define VIDC_REG_603032_OUT(v)              \
+	out_dword(VIDC_REG_603032_ADDR, v)
+
+#define VIDC_REG_300310_ADDR                  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000e9c)
+#define VIDC_REG_300310_PHYS                  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000e9c)
+#define VIDC_REG_300310_RMSK                  0xffffffff
+#define VIDC_REG_300310_SHFT                           0
+#define VIDC_REG_300310_IN                    \
+	in_dword_masked(VIDC_REG_300310_ADDR,  \
+			VIDC_REG_300310_RMSK)
+#define VIDC_REG_300310_INM(m)                \
+	in_dword_masked(VIDC_REG_300310_ADDR,  m)
+#define VIDC_REG_300310_ERROR_STATUS_BMSK     0xffffffff
+#define VIDC_REG_300310_ERROR_STATUS_SHFT              0
+
+#define VIDC_REG_792026_ADDR        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ea0)
+#define VIDC_REG_792026_PHYS        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ea0)
+#define VIDC_REG_792026_RMSK        0xffffffff
+#define VIDC_REG_792026_SHFT                 0
+#define VIDC_REG_792026_OUT(v)      \
+	out_dword(VIDC_REG_792026_ADDR, v)
+
+#define VIDC_REG_844152_ADDR        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ea4)
+#define VIDC_REG_844152_PHYS        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ea4)
+#define VIDC_REG_844152_RMSK        0xffffffff
+#define VIDC_REG_844152_SHFT                 0
+#define VIDC_REG_844152_OUT(v)      \
+	out_dword(VIDC_REG_844152_ADDR, v)
+
+#define VIDC_REG_370409_ADDR            \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ea8)
+#define VIDC_REG_370409_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ea8)
+#define VIDC_REG_370409_RMSK                0xffffffff
+#define VIDC_REG_370409_SHFT                         0
+#define VIDC_REG_370409_IN                  \
+	in_dword_masked(VIDC_REG_370409_ADDR,  \
+			VIDC_REG_370409_RMSK)
+#define VIDC_REG_370409_INM(m)              \
+	in_dword_masked(VIDC_REG_370409_ADDR,  m)
+#define VIDC_REG_370409_GET_FRAME_TAG_TOP_BMSK 0xffffffff
+#define VIDC_REG_370409_GET_FRAME_TAG_TOP_SHFT          0
+
+#define VIDC_REG_147682_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000eac)
+#define VIDC_REG_147682_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000eac)
+#define VIDC_REG_147682_RMSK                        0x1
+#define VIDC_REG_147682_SHFT                        0
+#define VIDC_REG_147682_OUT(v)             \
+	out_dword(VIDC_REG_147682_ADDR, v)
+
+#define VIDC_REG_407718_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000eb0)
+#define VIDC_REG_407718_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000eb0)
+#define VIDC_REG_407718_RMSK                    0xffffffff
+#define VIDC_REG_407718_SHFT                             0
+#define VIDC_REG_407718_OUT(v)                  \
+	out_dword(VIDC_REG_407718_ADDR, v)
+
+#define VIDC_REG_697961_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000eb4)
+#define VIDC_REG_697961_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000eb4)
+#define VIDC_REG_697961_RMSK 0x3
+#define VIDC_REG_697961_SHFT 0
+#define VIDC_REG_697961_IN \
+		in_dword_masked(VIDC_REG_697961_ADDR, \
+		VIDC_REG_697961_RMSK)
+#define VIDC_REG_697961_INM(m) \
+		in_dword_masked(VIDC_REG_697961_ADDR, m)
+#define VIDC_REG_697961_FRAME_TYPE_BMSK 0x3
+#define VIDC_REG_697961_FRAME_TYPE_SHFT 0
+
+
+#define VIDC_REG_613254_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000eb8)
+#define VIDC_REG_613254_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000eb8)
+#define VIDC_REG_613254_RMSK                      0x1
+#define VIDC_REG_613254_SHFT                        0
+#define VIDC_REG_613254_IN                 \
+	in_dword_masked(VIDC_REG_613254_ADDR,  \
+			VIDC_REG_613254_RMSK)
+#define VIDC_REG_613254_INM(m)             \
+	in_dword_masked(VIDC_REG_613254_ADDR,  m)
+#define VIDC_REG_613254_METADATA_STATUS_BMSK        0x1
+#define VIDC_REG_613254_METADATA_STATUS_SHFT          0
+#define VIDC_REG_441270_ADDR                    \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ebc)
+#define VIDC_REG_441270_PHYS                    \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ebc)
+#define VIDC_REG_441270_RMSK                           0xf
+#define VIDC_REG_441270_SHFT                             0
+#define VIDC_REG_441270_IN                      \
+	in_dword_masked(VIDC_REG_441270_ADDR,  \
+			VIDC_REG_441270_RMSK)
+#define VIDC_REG_441270_INM(m)                  \
+	in_dword_masked(VIDC_REG_441270_ADDR,  m)
+#define VIDC_REG_441270_DATA_PARTITIONED_BMSK 0x8
+#define VIDC_REG_441270_DATA_PARTITIONED_SHFT 0x3
+
+#define VIDC_REG_441270_FRAME_TYPE_BMSK                0x7
+#define VIDC_REG_441270_FRAME_TYPE_SHFT                  0
+
+#define VIDC_REG_724381_ADDR        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ec0)
+#define VIDC_REG_724381_PHYS        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ec0)
+#define VIDC_REG_724381_RMSK               0x3
+#define VIDC_REG_724381_SHFT                 0
+#define VIDC_REG_724381_IN          \
+	in_dword_masked(VIDC_REG_724381_ADDR,  \
+			VIDC_REG_724381_RMSK)
+#define VIDC_REG_724381_INM(m)      \
+	in_dword_masked(VIDC_REG_724381_ADDR,  m)
+#define VIDC_REG_724381_MORE_FIELD_NEEDED_BMSK       0x4
+#define VIDC_REG_724381_MORE_FIELD_NEEDED_SHFT       0x2
+#define VIDC_REG_724381_OPERATION_FAILED_BMSK        0x2
+#define VIDC_REG_724381_OPERATION_FAILED_SHFT        0x1
+#define VIDC_REG_724381_RESOLUTION_CHANGE_BMSK       0x1
+#define VIDC_REG_724381_RESOLUTION_CHANGE_SHFT         0
+
+#define VIDC_REG_854681_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ec4)
+#define VIDC_REG_854681_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ec4)
+#define VIDC_REG_854681_RMSK                     0x7f
+#define VIDC_REG_854681_SHFT                        0
+#define VIDC_REG_854681_OUT(v)             \
+	out_dword(VIDC_REG_854681_ADDR, v)
+
+#define VIDC_REG_128234_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ec8)
+#define VIDC_REG_128234_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ec8)
+#define VIDC_REG_128234_RMSK               0xffff000f
+#define VIDC_REG_128234_SHFT                        0
+#define VIDC_REG_128234_OUT(v)             \
+	out_dword(VIDC_REG_128234_ADDR, v)
+
+#define VIDC_REG_1137_ADDR        \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ecc)
+#define VIDC_REG_1137_PHYS        \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ecc)
+#define VIDC_REG_1137_RMSK        0xffffffff
+#define VIDC_REG_1137_SHFT                 0
+#define VIDC_REG_1137_IN          \
+	in_dword_masked(VIDC_REG_1137_ADDR,  \
+			VIDC_REG_1137_RMSK)
+#define VIDC_REG_1137_INM(m)      \
+	in_dword_masked(VIDC_REG_1137_ADDR,  m)
+#define VIDC_REG_1137_METADATA_DISPLAY_INDEX_BMSK \
+	0xffffffff
+#define \
+	\
+VIDC_REG_1137_METADATA_DISPLAY_INDEX_SHFT          0
+
+#define VIDC_REG_988552_ADDR       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ed0)
+#define VIDC_REG_988552_PHYS       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ed0)
+#define VIDC_REG_988552_RMSK       0xffffffff
+#define VIDC_REG_988552_SHFT                0
+#define VIDC_REG_988552_OUT(v)     \
+	out_dword(VIDC_REG_988552_ADDR, v)
+
+#define VIDC_REG_319934_ADDR  \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ed4)
+#define VIDC_REG_319934_PHYS  \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ed4)
+#define VIDC_REG_319934_RMSK                       0xffffffff
+#define VIDC_REG_319934_SHFT                   0
+#define VIDC_REG_319934_OUT(v)                     \
+	out_dword(VIDC_REG_319934_ADDR, v)
+
+#define VIDC_REG_679165_ADDR                   \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ed8)
+#define VIDC_REG_679165_PHYS                   \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ed8)
+#define VIDC_REG_679165_RMSK                   0xffffffff
+#define VIDC_REG_679165_SHFT                            0
+#define VIDC_REG_679165_IN                     \
+	in_dword_masked(VIDC_REG_679165_ADDR,  \
+			VIDC_REG_679165_RMSK)
+#define VIDC_REG_679165_INM(m)                 \
+	in_dword_masked(VIDC_REG_679165_ADDR,  m)
+#define VIDC_REG_679165_PIC_TIME_TOP_BMSK       0xffffffff
+#define VIDC_REG_679165_PIC_TIME_TOP_SHFT                0
+
+#define VIDC_REG_374150_ADDR                     \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000edc)
+#define VIDC_REG_374150_PHYS                     \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000edc)
+#define VIDC_REG_374150_RMSK                     0xffffffff
+#define VIDC_REG_374150_SHFT                              0
+#define VIDC_REG_374150_IN                       \
+	in_dword_masked(VIDC_REG_374150_ADDR,  \
+			VIDC_REG_374150_RMSK)
+#define VIDC_REG_374150_INM(m)                   \
+	in_dword_masked(VIDC_REG_374150_ADDR,  m)
+#define VIDC_REG_374150_PIC_TIME_BOTTOM_BMSK           0xffffffff
+#define VIDC_REG_374150_PIC_TIME_BOTTOM_SHFT                    0
+
+#define VIDC_REG_94750_ADDR                 \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ee0)
+#define VIDC_REG_94750_PHYS                 \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ee0)
+#define VIDC_REG_94750_RMSK                 0xffffffff
+#define VIDC_REG_94750_SHFT                          0
+#define VIDC_REG_94750_OUT(v)               \
+	out_dword(VIDC_REG_94750_ADDR, v)
+
+#define VIDC_REG_438677_ADDR          \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ee4)
+#define VIDC_REG_438677_PHYS                \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ee4)
+#define VIDC_REG_438677_RMSK                0xffffffff
+#define VIDC_REG_438677_SHFT                         0
+#define VIDC_REG_438677_IN                  \
+	in_dword_masked(VIDC_REG_438677_ADDR,  \
+			VIDC_REG_438677_RMSK)
+#define VIDC_REG_438677_INM(m)              \
+	in_dword_masked(VIDC_REG_438677_ADDR,  m)
+#define VIDC_REG_438677_GET_FRAME_TAG_BOTTOM_BMSK 0xffffffff
+#define VIDC_REG_438677_GET_FRAME_TAG_BOTTOM_SHFT          0
+
+#define VIDC_REG_76706_ADDR               \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00000ee8)
+#define VIDC_REG_76706_PHYS               \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000ee8)
+#define VIDC_REG_76706_RMSK                      0x1
+#define VIDC_REG_76706_SHFT                        0
+#define VIDC_REG_76706_OUT(v)             \
+	out_dword(VIDC_REG_76706_ADDR, v)
+
+#define VIDC_REG_809984_ADDR                       \
+	(VIDC_720P_WRAPPER_REG_BASE      + 0x00001000)
+#define VIDC_REG_809984_PHYS                       \
+	(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00001000)
+#define VIDC_REG_809984_RMSK                       0xffff0007
+#define VIDC_REG_809984_SHFT                                0
+#define VIDC_REG_809984_IN                         \
+	in_dword_masked(VIDC_REG_809984_ADDR,  VIDC_REG_809984_RMSK)
+#define VIDC_REG_809984_INM(m)                     \
+	in_dword_masked(VIDC_REG_809984_ADDR,  m)
+#define VIDC_REG_809984_720PV_720P_WRAPPER_VERSION_BMSK 0xffff0000
+#define VIDC_REG_809984_720PV_720P_WRAPPER_VERSION_SHFT       0x10
+#define VIDC_REG_809984_TEST_MUX_SEL_BMSK                 0x7
+#define VIDC_REG_809984_TEST_MUX_SEL_SHFT                   0
+
+
+#define VIDC_REG_699747_ADDR \
+       (VIDC_720P_WRAPPER_REG_BASE + 0x00000d0c)
+#define VIDC_REG_699747_PHYS \
+       (VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d0c)
+#define VIDC_REG_699747_RMSK 0xffffffff
+#define VIDC_REG_699747_SHFT 0
+#define VIDC_REG_699747_OUT(v)                  \
+		out_dword(VIDC_REG_699747_ADDR, v)
+
+#define VIDC_REG_166247_ADDR \
+       (VIDC_720P_WRAPPER_REG_BASE + 0x00000d10)
+#define VIDC_REG_166247_PHYS \
+       (VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d10)
+#define VIDC_REG_166247_RMSK 0xffffffff
+#define VIDC_REG_166247_SHFT 0
+#define VIDC_REG_166247_OUT(v)               \
+		out_dword(VIDC_REG_166247_ADDR, v)
+
+#define VIDC_REG_486169_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000d18)
+#define VIDC_REG_486169_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d18)
+#define VIDC_REG_486169_RMSK 0xffffffff
+#define VIDC_REG_486169_SHFT 0
+#define VIDC_REG_486169_OUT(v) \
+		out_dword(VIDC_REG_486169_ADDR, v)
+
+#define VIDC_REG_926519_ADDR \
+		(VIDC_720P_WRAPPER_REG_BASE + 0x00000d1c)
+#define VIDC_REG_926519_PHYS \
+		(VIDC_720P_WRAPPER_REG_BASE_PHYS + 0x00000d1c)
+#define VIDC_REG_926519_RMSK 0xffffffff
+#define VIDC_REG_926519_SHFT 0
+#define VIDC_REG_926519_OUT(v) \
+		out_dword(VIDC_REG_926519_ADDR, v)
+
+/** List all the levels and their register valus */
+
+#define VIDC_720P_PROFILE_MPEG4_SP      0
+#define VIDC_720P_PROFILE_MPEG4_ASP     1
+#define VIDC_720P_PROFILE_H264_BASELINE 0
+#define VIDC_720P_PROFILE_H264_MAIN     1
+#define VIDC_720P_PROFILE_H264_HIGH     2
+#define VIDC_720P_PROFILE_H264_CPB      3
+#define VIDC_720P_PROFILE_H263_BASELINE 0
+
+#define VIDC_720P_PROFILE_VC1_SP        0
+#define VIDC_720P_PROFILE_VC1_MAIN      1
+#define VIDC_720P_PROFILE_VC1_ADV       2
+#define VIDC_720P_PROFILE_MPEG2_MAIN    4
+#define VIDC_720P_PROFILE_MPEG2_SP      5
+
+#define VIDC_720P_MPEG4_LEVEL0  0
+#define VIDC_720P_MPEG4_LEVEL0b 9
+#define VIDC_720P_MPEG4_LEVEL1  1
+#define VIDC_720P_MPEG4_LEVEL2  2
+#define VIDC_720P_MPEG4_LEVEL3  3
+#define VIDC_720P_MPEG4_LEVEL3b 7
+#define VIDC_720P_MPEG4_LEVEL4a 4
+#define VIDC_720P_MPEG4_LEVEL5  5
+#define VIDC_720P_MPEG4_LEVEL6  6
+
+#define VIDC_720P_H264_LEVEL1     10
+#define VIDC_720P_H264_LEVEL1b    9
+#define VIDC_720P_H264_LEVEL1p1   11
+#define VIDC_720P_H264_LEVEL1p2   12
+#define VIDC_720P_H264_LEVEL1p3   13
+#define VIDC_720P_H264_LEVEL2     20
+#define VIDC_720P_H264_LEVEL2p1   21
+#define VIDC_720P_H264_LEVEL2p2   22
+#define VIDC_720P_H264_LEVEL3     30
+#define VIDC_720P_H264_LEVEL3p1   31
+#define VIDC_720P_H264_LEVEL3p2   32
+
+#define VIDC_720P_H263_LEVEL10    10
+#define VIDC_720P_H263_LEVEL20    20
+#define VIDC_720P_H263_LEVEL30    30
+#define VIDC_720P_H263_LEVEL40    40
+#define VIDC_720P_H263_LEVEL45    45
+#define VIDC_720P_H263_LEVEL50    50
+#define VIDC_720P_H263_LEVEL60    60
+#define VIDC_720P_H263_LEVEL70    70
+
+#define VIDC_720P_VC1_LEVEL_LOW    0
+#define VIDC_720P_VC1_LEVEL_MED    2
+#define VIDC_720P_VC1_LEVEL_HIGH   4
+#define VIDC_720P_VC1_LEVEL0       0
+#define VIDC_720P_VC1_LEVEL1       1
+#define VIDC_720P_VC1_LEVEL2       2
+#define VIDC_720P_VC1_LEVEL3       3
+#define VIDC_720P_VC1_LEVEL4       4
+
+#define VIDCL_720P_MPEG2_LEVEL_LOW 10
+#define VIDCL_720P_MPEG2_LEVEL_MAIN 8
+#define VIDCL_720P_MPEG2_LEVEL_HIGH14 6
+
+#define VIDC_720P_CMD_CHSET               0x0
+#define VIDC_720P_CMD_CHEND               0x2
+#define VIDC_720P_CMD_INITCODEC           0x3
+#define VIDC_720P_CMD_FRAMERUN            0x4
+#define VIDC_720P_CMD_INITBUFFERS         0x5
+#define VIDC_720P_CMD_FRAMERUN_REALLOCATE 0x6
+#define VIDC_720P_CMD_MFC_ENGINE_RESET 0x7
+
+enum vidc_720p_endian {
+	VIDC_720P_BIG_ENDIAN = 0x0,
+	VIDC_720P_LITTLE_ENDIAN = 0x1
+};
+
+enum vidc_720p_memory_access_method {
+	VIDC_720P_TILE_LINEAR = 0,
+	VIDC_720P_TILE_16x16 = 2,
+	VIDC_720P_TILE_64x32 = 3
+};
+
+enum vidc_720p_interrupt_control_mode {
+	VIDC_720P_INTERRUPT_MODE = 0,
+	VIDC_720P_POLL_MODE = 1
+};
+
+enum vidc_720p_interrupt_level_selection {
+	VIDC_720P_INTERRUPT_LEVEL_SEL = 0,
+	VIDC_720P_INTERRUPT_PULSE_SEL = 1
+};
+
+#define VIDC_720P_INTR_BUFFER_FULL             0x002
+#define VIDC_720P_INTR_FW_DONE                 0x020
+#define VIDC_720P_INTR_HEADER_DONE             0x040
+#define VIDC_720P_INTR_DMA_DONE                0x080
+#define VIDC_720P_INTR_FRAME_DONE              0x100
+
+enum vidc_720p_enc_dec_selection {
+	VIDC_720P_DECODER = 0,
+	VIDC_720P_ENCODER = 1
+};
+
+enum vidc_720p_codec {
+	VIDC_720P_MPEG4 = 0,
+	VIDC_720P_H264 = 1,
+	VIDC_720P_DIVX = 2,
+	VIDC_720P_XVID = 3,
+	VIDC_720P_H263 = 4,
+	VIDC_720P_MPEG2 = 5,
+	VIDC_720P_VC1 = 6
+};
+
+enum vidc_720p_frame {
+	VIDC_720P_NOTCODED = 0,
+	VIDC_720P_IFRAME = 1,
+	VIDC_720P_PFRAME = 2,
+	VIDC_720P_BFRAME = 3
+};
+
+enum vidc_720p_entropy_sel {
+	VIDC_720P_ENTROPY_SEL_CAVLC = 0,
+	VIDC_720P_ENTROPY_SEL_CABAC = 1
+};
+
+enum vidc_720p_cabac_model {
+	VIDC_720P_CABAC_MODEL_NUMBER_0 = 0,
+	VIDC_720P_CABAC_MODEL_NUMBER_1 = 1,
+	VIDC_720P_CABAC_MODEL_NUMBER_2 = 2
+};
+
+enum vidc_720p_DBConfig {
+	VIDC_720P_DB_ALL_BLOCKING_BOUNDARY = 0,
+	VIDC_720P_DB_DISABLE = 1,
+	VIDC_720P_DB_SKIP_SLICE_BOUNDARY = 2
+};
+
+enum vidc_720p_MSlice_selection {
+	VIDC_720P_MSLICE_BY_MB_COUNT = 0,
+	VIDC_720P_MSLICE_BY_BYTE_COUNT = 1,
+	VIDC_720P_MSLICE_BY_GOB = 2,
+	VIDC_720P_MSLICE_OFF = 3
+};
+
+enum vidc_720p_display_status {
+	VIDC_720P_DECODE_ONLY = 0,
+	VIDC_720P_DECODE_AND_DISPLAY = 1,
+	VIDC_720P_DISPLAY_ONLY = 2,
+	VIDC_720P_EMPTY_BUFFER = 3
+};
+
+#define VIDC_720P_ENC_IFRAME_REQ       0x1
+#define VIDC_720P_ENC_IPERIOD_CHANGE   0x1
+#define VIDC_720P_ENC_FRAMERATE_CHANGE 0x2
+#define VIDC_720P_ENC_BITRATE_CHANGE   0x4
+
+#define VIDC_720P_FLUSH_REQ     0x1
+#define VIDC_720P_EXTRADATA     0x2
+
+#define VIDC_720P_METADATA_ENABLE_QP           0x01
+#define VIDC_720P_METADATA_ENABLE_CONCEALMB    0x02
+#define VIDC_720P_METADATA_ENABLE_VC1          0x04
+#define VIDC_720P_METADATA_ENABLE_SEI          0x08
+#define VIDC_720P_METADATA_ENABLE_VUI          0x10
+#define VIDC_720P_METADATA_ENABLE_ENCSLICE     0x20
+#define VIDC_720P_METADATA_ENABLE_PASSTHROUGH  0x40
+
+struct vidc_720p_dec_disp_info {
+	enum vidc_720p_display_status disp_status;
+	u32 resl_change;
+	u32 reconfig_flush_done;
+	u32 img_size_x;
+	u32 img_size_y;
+	u32 y_addr;
+	u32 c_addr;
+	u32 tag_top;
+	u32 pic_time_top;
+	u32 disp_is_interlace;
+	u32 tag_bottom;
+	u32 pic_time_bottom;
+	u32 metadata_exists;
+	u32 crop_exists;
+	u32 crop_right_offset;
+	u32 crop_left_offset;
+	u32 crop_bottom_offset;
+	u32 crop_top_offset;
+	u32 input_frame;
+	u32 input_bytes_consumed;
+	u32 input_is_interlace;
+	u32 input_frame_num;
+};
+
+struct vidc_720p_seq_hdr_info {
+	u32 img_size_x;
+	u32 img_size_y;
+	u32 dec_frm_size;
+	u32 min_num_dpb;
+	u32 min_dpb_size;
+	u32 profile;
+	u32 level;
+	u32 progressive;
+	u32 data_partitioned;
+	u32  crop_exists;
+	u32  crop_right_offset;
+	u32  crop_left_offset;
+	u32  crop_bottom_offset;
+	u32  crop_top_offset;
+};
+
+struct vidc_720p_enc_frame_info {
+	u32 enc_size;
+	u32 frame;
+	u32 metadata_exists;
+};
+
+void vidc_720p_set_device_virtual_base(u8 *core_virtual_base_addr);
+
+void vidc_720p_init(char **ppsz_version, u32 i_firmware_size,
+	u32 *pi_firmware_address, enum vidc_720p_endian dma_endian,
+	u32 interrupt_off,
+	enum vidc_720p_interrupt_level_selection	interrupt_sel,
+	u32 interrupt_mask);
+
+u32 vidc_720p_do_sw_reset(void);
+
+u32 vidc_720p_reset_is_success(void);
+
+void vidc_720p_start_cpu(enum vidc_720p_endian dma_endian,
+		u32 *icontext_bufferstart, u32 *debug_core_dump_addr,
+		u32  debug_buffer_size);
+
+u32 vidc_720p_cpu_start(void);
+
+void vidc_720p_stop_fw(void);
+
+void vidc_720p_get_interrupt_status(u32 *interrupt_status,
+		u32 *cmd_err_status, u32 *disp_pic_err_status,
+		u32 *op_failed);
+
+void vidc_720p_interrupt_done_clear(void);
+
+void vidc_720p_submit_command(u32 ch_id, u32 cmd_id);
+
+
+void vidc_720p_set_channel(u32 i_ch_id,
+	enum vidc_720p_enc_dec_selection enc_dec_sel,
+	enum vidc_720p_codec codec, u32 *pi_fw, u32 i_firmware_size);
+
+u32 vidc_720p_engine_reset(u32 ch_id,
+   enum vidc_720p_endian dma_endian,
+   enum vidc_720p_interrupt_level_selection interrupt_sel,
+   u32 interrupt_mask
+);
+
+void vidc_720p_encode_set_profile(u32 i_profile, u32 i_level);
+
+void vidc_720p_set_frame_size(u32 i_size_x, u32 i_size_y);
+
+void vidc_720p_encode_set_fps(u32 i_rc_frame_rate);
+
+void vidc_720p_encode_set_vop_time(u32 vop_time_resolution,
+		u32 vop_time_increment);
+
+void vidc_720p_encode_set_hec_period(u32 hec_period);
+
+void vidc_720p_encode_set_short_header(u32 i_short_header);
+
+void vidc_720p_encode_set_qp_params(u32 i_max_qp, u32 i_min_qp);
+
+void vidc_720p_encode_set_rc_config(u32 enable_frame_level_rc,
+		u32 enable_mb_level_rc_flag, u32 i_frame_qp, u32 pframe_qp);
+
+void vidc_720p_encode_set_bit_rate(u32 i_target_bitrate);
+
+void vidc_720p_encoder_set_param_change(u32 enc_param_change);
+
+void vidc_720p_encode_set_control_param(u32 param_val);
+
+void vidc_720p_encode_set_frame_level_rc_params(u32 i_reaction_coeff);
+
+void vidc_720p_encode_set_mb_level_rc_params(u32 dark_region_as_flag,
+	u32 smooth_region_as_flag, u32 static_region_as_flag,
+	u32 activity_region_flag);
+
+void vidc_720p_encode_set_entropy_control(enum vidc_720p_entropy_sel \
+		entropy_sel,
+		enum vidc_720p_cabac_model cabac_model_number);
+
+void vidc_720p_encode_set_db_filter_control(enum vidc_720p_DBConfig
+		db_config, u32 i_slice_alpha_offset, u32 i_slice_beta_offset);
+
+void vidc_720p_encode_set_intra_refresh_mb_number(u32 i_cir_mb_number);
+
+void vidc_720p_encode_set_multi_slice_info(
+		enum vidc_720p_MSlice_selection m_slice_sel,
+		u32 multi_slice_size);
+
+void vidc_720p_encode_set_dpb_buffer(u32 *pi_enc_dpb_addr, u32 alloc_len);
+
+void vidc_720p_set_deblock_line_buffer(u32 *pi_deblock_line_buffer_start,
+		u32 alloc_len);
+
+void vidc_720p_encode_set_i_period(u32 i_i_period);
+
+void vidc_720p_encode_init_codec(u32 i_ch_id,
+	enum vidc_720p_memory_access_method memory_access_model);
+
+void vidc_720p_encode_unalign_bitstream(u32 upper_unalign_word,
+	u32 lower_unalign_word);
+
+void vidc_720p_encode_set_seq_header_buffer(u32 ext_buffer_start,
+	u32 ext_buffer_end, u32 start_byte_num);
+
+void vidc_720p_encode_frame(u32 ch_id, u32 ext_buffer_start,
+	u32 ext_buffer_end, u32 start_byte_number,
+	u32 y_addr, u32 c_addr);
+
+void vidc_720p_encode_get_header(u32 *pi_enc_header_size);
+
+void vidc_720p_enc_frame_info
+	(struct vidc_720p_enc_frame_info *enc_frame_info);
+
+void vidc_720p_decode_bitstream_header(u32 ch_id, u32 dec_unit_size,
+	u32 start_byte_num, u32 ext_buffer_start, u32 ext_buffer_end,
+	enum vidc_720p_memory_access_method memory_access_model,
+	u32 decode_order);
+
+void vidc_720p_decode_get_seq_hdr_info
+    (struct vidc_720p_seq_hdr_info *seq_hdr_info);
+
+void vidc_720p_decode_set_dpb_release_buffer_mask
+    (u32 i_dpb_release_buffer_mask);
+
+void vidc_720p_decode_set_dpb_buffers(u32 i_buf_index, u32 *pi_dpb_buffer);
+
+void vidc_720p_decode_set_comv_buffer
+    (u32 *pi_dpb_comv_buffer, u32 alloc_len);
+
+void vidc_720p_decode_set_dpb_details
+    (u32 num_dpb, u32 alloc_len, u32 *ref_buffer);
+
+void vidc_720p_decode_set_mpeg4Post_filter(u32 enable_post_filter);
+
+void vidc_720p_decode_set_error_control(u32 enable_error_control);
+
+void vidc_720p_decode_set_mpeg4_data_partitionbuffer(u32 *vsp_buf_start);
+
+void vidc_720p_decode_setH264VSPBuffer(u32 *pi_vsp_temp_buffer_start);
+
+void vidc_720p_decode_frame(u32 ch_id, u32 ext_buffer_start,
+		u32 ext_buffer_end, u32 dec_unit_size,
+		u32 start_byte_num, u32 input_frame_tag);
+
+void vidc_720p_issue_eos(u32 i_ch_id);
+void vidc_720p_eos_info(u32 *disp_status, u32 *resl_change);
+
+void vidc_720p_decode_display_info
+    (struct vidc_720p_dec_disp_info *disp_info);
+
+void vidc_720p_decode_skip_frm_details(u32 *free_luma_dpb);
+
+void vidc_720p_metadata_enable(u32 flag, u32 *input_buffer);
+
+void vidc_720p_decode_dynamic_req_reset(void);
+
+void vidc_720p_decode_dynamic_req_set(u32 property);
+
+void vidc_720p_decode_setpassthrough_start(u32 pass_startaddr);
+
+
+
+#define DDL_720P_REG_BASE VIDC_720P_WRAPPER_REG_BASE
+#define VIDC_BUSY_WAIT(n) udelay(n)
+
+#undef VIDC_REGISTER_LOG_MSG
+#undef VIDC_REGISTER_LOG_INTO_BUFFER
+
+#ifdef VIDC_REGISTER_LOG_MSG
+#define VIDC_MSG1(msg_format, a) printk(KERN_INFO msg_format, a)
+#define VIDC_MSG2(msg_format, a, b) printk(KERN_INFO msg_format, a, b)
+#define VIDC_MSG3(msg_format, a, b, c) printk(KERN_INFO msg_format, a, b, c)
+#else
+#define VIDC_MSG1(msg_format, a)
+#define VIDC_MSG2(msg_format, a, b)
+#define VIDC_MSG3(msg_format, a, b, c)
+#endif
+
+#ifdef VIDC_REGISTER_LOG_INTO_BUFFER
+
+#define VIDC_REGLOG_BUFSIZE 200000
+#define VIDC_REGLOG_MAX_PRINT_SIZE 100
+extern char vidclog[VIDC_REGLOG_BUFSIZE];
+extern unsigned int vidclog_index;
+
+#define VIDC_LOG_BUFFER_INIT \
+{if (vidclog_index) \
+  memset(vidclog, 0, vidclog_index+1); \
+  vidclog_index = 0; }
+
+#define VIDC_REGLOG_CHECK_BUFINDEX(req_size) \
+  vidclog_index = \
+  (vidclog_index+(req_size) < VIDC_REGLOG_BUFSIZE) ? vidclog_index : 0;
+
+#define VIDC_LOG_WRITE(reg, val) \
+{unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"(0x%x:"#reg"=0x%x)" , VIDC_##reg##_ADDR - DDL_720P_REG_BASE, val);\
+	vidclog_index += len; }
+
+#define VIDC_LOG_WRITEI(reg, index, val) \
+{unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"(0x%x:"#reg"=0x%x)" , VIDC_##reg##_ADDR(index)-DDL_720P_REG_BASE,  \
+	val); vidclog_index += len; }
+
+#define VIDC_LOG_WRITEF(reg, field, val) \
+{unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"(0x%x:"#reg":0x%x:=0x%x)" , VIDC_##reg##_ADDR - DDL_720P_REG_BASE,  \
+	VIDC_##reg##_##field##_BMSK,  val);\
+	vidclog_index += len; }
+
+#define VIDC_LOG_READ(reg, pval) \
+{ unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"(0x%x:"#reg"==0x%x)" , VIDC_##reg##_ADDR - DDL_720P_REG_BASE,  \
+	(u32)*pval); \
+	vidclog_index += len; }
+
+#define VIDC_STR_LOGBUFFER(str) \
+{ unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"<%s>" , str); vidclog_index += len; }
+
+#define VIDC_LONG_LOGBUFFER(str, arg1) \
+{ unsigned int len; \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], VIDC_REGLOG_MAX_PRINT_SIZE, \
+	"<%s=0x%x>" , str, arg1); vidclog_index += len; }
+
+#define VIDC_DEBUG_REGISTER_LOG \
+{ u32 val; unsigned int len; \
+	val = VIDC_720P_IN(REG_881638); \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], 50,  "[dbg1=%x]" , val); \
+	vidclog_index += len; \
+	val = VIDC_720P_IN(REG_161486); \
+	VIDC_REGLOG_CHECK_BUFINDEX(VIDC_REGLOG_MAX_PRINT_SIZE); \
+	len = snprintf(&vidclog[vidclog_index], 50,  "[dbg2=%x]" , val); \
+	vidclog_index += len; }
+
+#else
+#define VIDC_LOG_WRITE(reg, val)
+#define VIDC_LOG_WRITEI(reg, index, val)
+#define VIDC_LOG_WRITEF(reg, field, val)
+#define VIDC_LOG_READ(reg, pval)
+#define VIDC_LOG_BUFFER_INIT
+#define VIDC_STR_LOGBUFFER(str)
+#define VIDC_LONG_LOGBUFFER(str, arg1)
+#define VIDC_DEBUG_REGISTER_LOG
+#endif
+
+void vidcputlog(char *str);
+void vidcput_debug_reglog(void);
+
+#define VIDC_LOGERR_STRING(str) \
+do { \
+	VIDC_STR_LOGBUFFER(str); \
+	VIDC_MSG1("\n<%s>", str); \
+} while (0)
+
+#define VIDC_LOG_STRING(str) \
+do { \
+	VIDC_STR_LOGBUFFER(str); \
+	VIDC_MSG1("\n<%s>", str); \
+} while (0)
+
+#define VIDC_LOG1(str, arg1) \
+do { \
+	VIDC_LONG_LOGBUFFER(str, arg1); \
+	VIDC_MSG2("\n<%s=0x%08x>", str, arg1); \
+} while (0)
+
+#define VIDC_IO_OUT(reg,  val) \
+do { \
+	VIDC_LOG_WRITE(reg, (u32)val);  \
+	VIDC_MSG2("\n(0x%08x:"#reg"=0x%08x)",  \
+	(u32)(VIDC_##reg##_ADDR - DDL_720P_REG_BASE),  (u32)val); \
+	mb(); \
+	VIDC_720P_OUT(reg, val);  \
+} while (0)
+
+#define VIDC_IO_OUTI(reg,  index,  val) \
+do { \
+	VIDC_LOG_WRITEI(reg, index, (u32)val); \
+	VIDC_MSG2("\n(0x%08x:"#reg"=0x%08x)",  \
+	(u32)(VIDC_##reg##_ADDR(index)-DDL_720P_REG_BASE),  (u32)val); \
+	mb(); \
+	VIDC_720P_OUTI(reg, index, val);  \
+} while (0)
+
+#define VIDC_IO_OUTF(reg,  field,  val) \
+do { \
+	VIDC_LOG_WRITEF(reg, field, val); \
+	VIDC_MSG3("\n(0x%08x:"#reg":0x%x:=0x%08x)",  \
+	(u32)(VIDC_##reg##_ADDR - DDL_720P_REG_BASE),  \
+	VIDC_##reg##_##field##_BMSK,  (u32)val); \
+	mb(); \
+	VIDC_720P_OUTF(reg, field, val);  \
+} while (0)
+
+#define VIDC_IO_IN(reg, pval) \
+do { \
+	mb(); \
+	*pval = (u32) VIDC_720P_IN(reg); \
+	VIDC_LOG_READ(reg, pval); \
+	VIDC_MSG2("\n(0x%08x:"#reg"==0x%08x)",  \
+	(u32)(VIDC_##reg##_ADDR - DDL_720P_REG_BASE), (u32) *pval);  \
+} while (0)
+
+#define VIDC_IO_INF(reg, mask, pval) \
+do { \
+	mb(); \
+	*pval = VIDC_720P_INF(reg, mask); \
+	VIDC_LOG_READ(reg, pval); \
+	VIDC_MSG2("\n(0x%08x:"#reg"==0x%08x)",  \
+	(u32)(VIDC_##reg##_ADDR - DDL_720P_REG_BASE),  *pval); \
+} while (0)
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
new file mode 100644
index 0000000..12dcbf3
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
@@ -0,0 +1,722 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <mach/clk.h>
+#include <mach/msm_reqs.h>
+#include <linux/interrupt.h>
+#include "vidc_type.h"
+#include "vcd_res_tracker.h"
+#include "vidc_init.h"
+
+#define MSM_AXI_QOS_NAME "msm_vidc_reg"
+#define AXI_CLK_SCALING
+
+#define QVGA_PERF_LEVEL (300 * 30)
+#define VGA_PERF_LEVEL (1200 * 30)
+#define WVGA_PERF_LEVEL (1500 * 30)
+
+static unsigned int mfc_clk_freq_table[3] = {
+	61440000, 122880000, 170667000
+};
+
+#ifndef CONFIG_MSM_NPA_SYSTEM_BUS
+static unsigned int axi_clk_freq_table_enc[2] = {
+	122880, 192000
+};
+static unsigned int axi_clk_freq_table_dec[2] = {
+	122880, 192000
+};
+#else
+static unsigned int axi_clk_freq_table_enc[2] = {
+	MSM_AXI_FLOW_VIDEO_RECORDING_720P,
+	MSM_AXI_FLOW_VIDEO_RECORDING_720P
+};
+static unsigned int axi_clk_freq_table_dec[2] = {
+	MSM_AXI_FLOW_VIDEO_PLAYBACK_720P,
+	MSM_AXI_FLOW_VIDEO_PLAYBACK_720P
+};
+#endif
+
+static struct res_trk_context resource_context;
+
+#define VIDC_BOOT_FW			"vidc_720p_command_control.fw"
+#define VIDC_MPG4_DEC_FW		"vidc_720p_mp4_dec_mc.fw"
+#define VIDC_H263_DEC_FW		"vidc_720p_h263_dec_mc.fw"
+#define VIDC_H264_DEC_FW		"vidc_720p_h264_dec_mc.fw"
+#define VIDC_MPG4_ENC_FW		"vidc_720p_mp4_enc_mc.fw"
+#define VIDC_H264_ENC_FW		"vidc_720p_h264_enc_mc.fw"
+#define VIDC_VC1_DEC_FW		"vidc_720p_vc1_dec_mc.fw"
+
+unsigned char *vidc_command_control_fw;
+u32 vidc_command_control_fw_size;
+
+unsigned char *vidc_mpg4_dec_fw;
+u32 vidc_mpg4_dec_fw_size;
+
+unsigned char *vidc_h263_dec_fw;
+u32 vidc_h263_dec_fw_size;
+
+unsigned char *vidc_h264_dec_fw;
+u32 vidc_h264_dec_fw_size;
+
+unsigned char *vidc_mpg4_enc_fw;
+u32 vidc_mpg4_enc_fw_size;
+
+unsigned char *vidc_h264_enc_fw;
+u32 vidc_h264_enc_fw_size;
+
+unsigned char *vidc_vc1_dec_fw;
+u32 vidc_vc1_dec_fw_size;
+
+static u32 res_trk_disable_videocore(void)
+{
+	int rc = -1;
+	mutex_lock(&resource_context.lock);
+
+	if (!resource_context.rail_enabled) {
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+
+	if (!resource_context.clock_enabled &&
+		resource_context.pclk &&
+		resource_context.hclk &&
+		resource_context.hclk_div2) {
+
+		VCDRES_MSG_LOW("\nEnabling clk before disabling pwr rail\n");
+		if (clk_set_rate(resource_context.hclk,
+			mfc_clk_freq_table[0])) {
+			VCDRES_MSG_ERROR("\n pwr_rail_disable:"
+				 " set clk rate failed\n");
+			goto bail_out;
+		}
+
+		if (clk_enable(resource_context.pclk)) {
+			VCDRES_MSG_ERROR("vidc pclk Enable failed\n");
+			goto bail_out;
+		}
+
+		if (clk_enable(resource_context.hclk)) {
+			VCDRES_MSG_ERROR("vidc hclk Enable failed\n");
+			goto disable_pclk;
+		}
+
+		if (clk_enable(resource_context.hclk_div2)) {
+			VCDRES_MSG_ERROR("vidc hclk_div2 Enable failed\n");
+			goto disable_hclk;
+		}
+	} else {
+		VCDRES_MSG_ERROR("\ndisabling pwr rail: Enabling clk failed\n");
+		goto bail_out;
+	}
+
+	resource_context.rail_enabled = 0;
+	rc = clk_reset(resource_context.pclk, CLK_RESET_ASSERT);
+	if (rc) {
+		VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc);
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+	msleep(20);
+
+	clk_disable(resource_context.pclk);
+	clk_disable(resource_context.hclk);
+	clk_disable(resource_context.hclk_div2);
+
+	clk_put(resource_context.hclk_div2);
+	clk_put(resource_context.hclk);
+	clk_put(resource_context.pclk);
+
+	rc = regulator_disable(resource_context.regulator);
+	if (rc) {
+		VCDRES_MSG_ERROR("\n regulator disable failed %d\n", rc);
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+
+	resource_context.hclk_div2 = NULL;
+	resource_context.hclk = NULL;
+	resource_context.pclk = NULL;
+
+	mutex_unlock(&resource_context.lock);
+
+	return true;
+
+disable_hclk:
+	clk_disable(resource_context.hclk);
+disable_pclk:
+	clk_disable(resource_context.pclk);
+bail_out:
+	if (resource_context.pclk) {
+		clk_put(resource_context.pclk);
+		resource_context.pclk = NULL;
+	}
+	if (resource_context.hclk) {
+		clk_put(resource_context.hclk);
+		resource_context.hclk = NULL;
+	}
+	if (resource_context.hclk_div2) {
+		clk_put(resource_context.hclk_div2);
+		resource_context.hclk_div2 = NULL;
+	}
+	mutex_unlock(&resource_context.lock);
+	return false;
+}
+
+u32 res_trk_enable_clocks(void)
+{
+	VCDRES_MSG_LOW("\n in res_trk_enable_clocks()");
+
+	mutex_lock(&resource_context.lock);
+	if (!resource_context.clock_enabled) {
+		VCDRES_MSG_LOW("Enabling IRQ in %s()\n", __func__);
+		enable_irq(resource_context.irq_num);
+
+		VCDRES_MSG_LOW("%s(): Enabling the clocks ...\n", __func__);
+
+		if (clk_enable(resource_context.pclk)) {
+			VCDRES_MSG_ERROR("vidc pclk Enable failed\n");
+
+			clk_put(resource_context.hclk);
+			clk_put(resource_context.hclk_div2);
+			mutex_unlock(&resource_context.lock);
+			return false;
+		}
+
+		if (clk_enable(resource_context.hclk)) {
+			VCDRES_MSG_ERROR("vidc  hclk Enable failed\n");
+			clk_put(resource_context.pclk);
+			clk_put(resource_context.hclk_div2);
+			mutex_unlock(&resource_context.lock);
+			return false;
+		}
+
+		if (clk_enable(resource_context.hclk_div2)) {
+			VCDRES_MSG_ERROR("vidc  hclk Enable failed\n");
+			clk_put(resource_context.hclk);
+			clk_put(resource_context.pclk);
+			mutex_unlock(&resource_context.lock);
+			return false;
+		}
+	}
+
+	resource_context.clock_enabled = 1;
+	mutex_unlock(&resource_context.lock);
+	return true;
+}
+
+static u32 res_trk_sel_clk_rate(unsigned long hclk_rate)
+{
+	mutex_lock(&resource_context.lock);
+	if (clk_set_rate(resource_context.hclk,
+		hclk_rate)) {
+		VCDRES_MSG_ERROR("vidc hclk set rate failed\n");
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+	resource_context.hclk_rate = hclk_rate;
+	mutex_unlock(&resource_context.lock);
+	return true;
+}
+
+static u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
+{
+	if (!phclk_rate) {
+		VCDRES_MSG_ERROR("%s(): phclk_rate is NULL\n", __func__);
+		return false;
+	}
+	mutex_lock(&resource_context.lock);
+	*phclk_rate = clk_get_rate(resource_context.hclk);
+	if (!(*phclk_rate)) {
+		VCDRES_MSG_ERROR("vidc hclk get rate failed\n");
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+	mutex_unlock(&resource_context.lock);
+	return true;
+}
+
+u32 res_trk_disable_clocks(void)
+{
+	VCDRES_MSG_LOW("in res_trk_disable_clocks()\n");
+
+	mutex_lock(&resource_context.lock);
+
+	if (!resource_context.clock_enabled) {
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+
+	VCDRES_MSG_LOW("Disabling IRQ in %s()\n", __func__);
+	disable_irq_nosync(resource_context.irq_num);
+	VCDRES_MSG_LOW("%s(): Disabling the clocks ...\n", __func__);
+
+	resource_context.clock_enabled = 0;
+	clk_disable(resource_context.hclk);
+	clk_disable(resource_context.hclk_div2);
+	clk_disable(resource_context.pclk);
+	mutex_unlock(&resource_context.lock);
+
+	return true;
+}
+
+static u32 res_trk_enable_videocore(void)
+{
+	mutex_lock(&resource_context.lock);
+	if (!resource_context.rail_enabled) {
+		int rc = -1;
+
+		rc = regulator_enable(resource_context.regulator);
+		if (rc) {
+			VCDRES_MSG_ERROR("%s(): regulator_enable failed %d\n",
+							 __func__, rc);
+			goto bail_out;
+		}
+		VCDRES_MSG_LOW("%s(): regulator enable Success %d\n",
+							__func__, rc);
+
+		resource_context.pclk = clk_get(resource_context.device,
+			"mfc_pclk");
+
+		if (IS_ERR(resource_context.pclk)) {
+			VCDRES_MSG_ERROR("%s(): mfc_pclk get failed\n"
+							 , __func__);
+			goto disable_regulator;
+		}
+
+		resource_context.hclk = clk_get(resource_context.device,
+			"mfc_clk");
+
+		if (IS_ERR(resource_context.hclk)) {
+			VCDRES_MSG_ERROR("%s(): mfc_clk get failed\n"
+							 , __func__);
+
+			goto release_pclk;
+		}
+
+		resource_context.hclk_div2 =
+			clk_get(resource_context.device, "mfc_div2_clk");
+
+		if (IS_ERR(resource_context.hclk_div2)) {
+			VCDRES_MSG_ERROR("%s(): mfc_div2_clk get failed\n"
+							 , __func__);
+			goto release_hclk_pclk;
+		}
+
+		if (clk_set_rate(resource_context.hclk,
+			mfc_clk_freq_table[0])) {
+			VCDRES_MSG_ERROR("\n pwr_rail_enable:"
+				 " set clk rate failed\n");
+			goto release_all_clks;
+		}
+
+		if (clk_enable(resource_context.pclk)) {
+			VCDRES_MSG_ERROR("vidc pclk Enable failed\n");
+			goto release_all_clks;
+		}
+
+		if (clk_enable(resource_context.hclk)) {
+			VCDRES_MSG_ERROR("vidc hclk Enable failed\n");
+			goto disable_pclk;
+		}
+
+		if (clk_enable(resource_context.hclk_div2)) {
+			VCDRES_MSG_ERROR("vidc hclk_div2 Enable failed\n");
+			goto disable_hclk_pclk;
+		}
+
+		rc = clk_reset(resource_context.pclk, CLK_RESET_DEASSERT);
+		if (rc) {
+			VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc);
+			goto disable_and_release_all_clks;
+		}
+		msleep(20);
+
+		clk_disable(resource_context.pclk);
+		clk_disable(resource_context.hclk);
+		clk_disable(resource_context.hclk_div2);
+
+	}
+	resource_context.rail_enabled = 1;
+	mutex_unlock(&resource_context.lock);
+	return true;
+
+disable_and_release_all_clks:
+	clk_disable(resource_context.hclk_div2);
+disable_hclk_pclk:
+	clk_disable(resource_context.hclk);
+disable_pclk:
+	clk_disable(resource_context.pclk);
+release_all_clks:
+	clk_put(resource_context.hclk_div2);
+	resource_context.hclk_div2 = NULL;
+release_hclk_pclk:
+	clk_put(resource_context.hclk);
+	resource_context.hclk = NULL;
+release_pclk:
+	clk_put(resource_context.pclk);
+	resource_context.pclk = NULL;
+disable_regulator:
+	regulator_disable(resource_context.regulator);
+bail_out:
+	mutex_unlock(&resource_context.lock);
+	return false;
+}
+
+static u32 res_trk_convert_freq_to_perf_lvl(u64 freq)
+{
+	u64 perf_lvl;
+	u64 temp;
+
+	VCDRES_MSG_MED("\n %s():: freq = %u\n", __func__, (u32)freq);
+
+	if (!freq)
+		return 0;
+
+	temp = freq * 1000;
+	do_div(temp, VCD_RESTRK_HZ_PER_1000_PERFLVL);
+	perf_lvl = (u32)temp;
+	VCDRES_MSG_MED("\n %s(): perf_lvl = %u\n", __func__,
+		(u32)perf_lvl);
+
+	return (u32)perf_lvl;
+}
+
+static u32 res_trk_convert_perf_lvl_to_freq(u64 perf_lvl)
+{
+	u64 freq, temp;
+
+	VCDRES_MSG_MED("\n %s():: perf_lvl = %u\n", __func__,
+		(u32)perf_lvl);
+	temp = (perf_lvl * VCD_RESTRK_HZ_PER_1000_PERFLVL) + 999;
+	do_div(temp, 1000);
+	freq = (u32)temp;
+	VCDRES_MSG_MED("\n %s(): freq = %u\n", __func__, (u32)freq);
+
+	return (u32)freq;
+}
+
+static struct clk *ebi1_clk;
+
+u32 res_trk_power_up(void)
+{
+	VCDRES_MSG_LOW("clk_regime_rail_enable");
+	VCDRES_MSG_LOW("clk_regime_sel_rail_control");
+#ifdef AXI_CLK_SCALING
+{
+	VCDRES_MSG_MED("\n res_trk_power_up():: "
+		"Calling AXI add requirement\n");
+	ebi1_clk = clk_get(NULL, "ebi1_vcd_clk");
+	if (IS_ERR(ebi1_clk)) {
+		VCDRES_MSG_ERROR("Request AXI bus QOS fails.");
+		return false;
+	}
+	clk_enable(ebi1_clk);
+}
+#endif
+
+	VCDRES_MSG_MED("\n res_trk_power_up():: Calling "
+		"vidc_enable_pwr_rail()\n");
+	return res_trk_enable_videocore();
+}
+
+u32 res_trk_power_down(void)
+{
+	VCDRES_MSG_LOW("clk_regime_rail_disable");
+#ifdef AXI_CLK_SCALING
+	VCDRES_MSG_MED("\n res_trk_power_down()::"
+		"Calling AXI remove requirement\n");
+	clk_disable(ebi1_clk);
+	clk_put(ebi1_clk);
+#endif
+	VCDRES_MSG_MED("\n res_trk_power_down():: Calling "
+		"res_trk_disable_videocore()\n");
+	return res_trk_disable_videocore();
+}
+
+u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl)
+{
+	if (!pn_max_perf_lvl) {
+		VCDRES_MSG_ERROR("%s(): pn_max_perf_lvl is NULL\n",
+			__func__);
+		return false;
+	}
+
+	*pn_max_perf_lvl = VCD_RESTRK_MAX_PERF_LEVEL;
+	return true;
+}
+
+u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl,
+	struct vcd_dev_ctxt *dev_ctxt)
+{
+	struct vcd_clnt_ctxt *cctxt_itr = NULL;
+	u32 axi_freq = 0, mfc_freq = 0, calc_mfc_freq = 0;
+	u8 enc_clnt_present = false;
+
+	if (!pn_set_perf_lvl || !dev_ctxt) {
+		VCDRES_MSG_ERROR("%s(): NULL pointer! dev_ctxt(%p)\n",
+			__func__, dev_ctxt);
+		return false;
+	}
+
+	VCDRES_MSG_LOW("%s(), req_perf_lvl = %d", __func__, req_perf_lvl);
+	calc_mfc_freq = res_trk_convert_perf_lvl_to_freq(
+		(u64)req_perf_lvl);
+
+	if (calc_mfc_freq < VCD_RESTRK_MIN_FREQ_POINT)
+		calc_mfc_freq = VCD_RESTRK_MIN_FREQ_POINT;
+	else if (calc_mfc_freq > VCD_RESTRK_MAX_FREQ_POINT)
+		calc_mfc_freq = VCD_RESTRK_MAX_FREQ_POINT;
+
+	cctxt_itr = dev_ctxt->cctxt_list_head;
+	while (cctxt_itr) {
+		VCDRES_MSG_LOW("\n cctxt_itr = %p", cctxt_itr);
+		if (!cctxt_itr->decoding) {
+				VCDRES_MSG_LOW("\n Encoder client");
+				enc_clnt_present = true;
+				break;
+		} else {
+				VCDRES_MSG_LOW("\n Decoder client");
+		}
+		cctxt_itr = cctxt_itr->next;
+	}
+
+	if (enc_clnt_present) {
+		if (req_perf_lvl >= VGA_PERF_LEVEL) {
+			mfc_freq = mfc_clk_freq_table[2];
+			axi_freq = axi_clk_freq_table_enc[1];
+		} else {
+			mfc_freq = mfc_clk_freq_table[0];
+			axi_freq = axi_clk_freq_table_enc[0];
+		}
+		VCDRES_MSG_MED("\n ENCODER: axi_freq = %u"
+			", mfc_freq = %u, calc_mfc_freq = %u,"
+			" req_perf_lvl = %u", axi_freq,
+			mfc_freq, calc_mfc_freq,
+			req_perf_lvl);
+	} else {
+		if (req_perf_lvl <= QVGA_PERF_LEVEL) {
+			mfc_freq = mfc_clk_freq_table[0];
+			axi_freq = axi_clk_freq_table_dec[0];
+		} else {
+			axi_freq = axi_clk_freq_table_dec[0];
+			if (req_perf_lvl <= VGA_PERF_LEVEL)
+				mfc_freq = mfc_clk_freq_table[0];
+			else if (req_perf_lvl <= WVGA_PERF_LEVEL)
+				mfc_freq = mfc_clk_freq_table[1];
+			else {
+				mfc_freq = mfc_clk_freq_table[2];
+				axi_freq = axi_clk_freq_table_dec[1];
+			}
+		}
+		VCDRES_MSG_MED("\n DECODER: axi_freq = %u"
+			", mfc_freq = %u, calc_mfc_freq = %u,"
+			" req_perf_lvl = %u", axi_freq,
+			mfc_freq, calc_mfc_freq,
+			req_perf_lvl);
+	}
+
+#ifdef AXI_CLK_SCALING
+    if (req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) {
+		VCDRES_MSG_MED("\n %s(): Setting AXI freq to %u",
+			__func__, axi_freq);
+		clk_set_rate(ebi1_clk, axi_freq * 1000);
+	}
+#endif
+
+#ifdef USE_RES_TRACKER
+    if (req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) {
+		VCDRES_MSG_MED("\n %s(): Setting MFC freq to %u",
+			__func__, mfc_freq);
+		if (!res_trk_sel_clk_rate(mfc_freq)) {
+			VCDRES_MSG_ERROR("%s(): res_trk_sel_clk_rate FAILED\n",
+				__func__);
+			*pn_set_perf_lvl = 0;
+			return false;
+		}
+	}
+#endif
+
+	*pn_set_perf_lvl =
+	    res_trk_convert_freq_to_perf_lvl((u64) mfc_freq);
+	return true;
+}
+
+u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl)
+{
+	unsigned long freq;
+
+	if (!pn_perf_lvl) {
+		VCDRES_MSG_ERROR("%s(): pn_perf_lvl is NULL\n",
+			__func__);
+		return false;
+	}
+	VCDRES_MSG_LOW("clk_regime_msm_get_clk_freq_hz");
+	if (!res_trk_get_clk_rate(&freq)) {
+		VCDRES_MSG_ERROR("%s(): res_trk_get_clk_rate FAILED\n",
+			__func__);
+		*pn_perf_lvl = 0;
+		return false;
+	}
+
+	*pn_perf_lvl = res_trk_convert_freq_to_perf_lvl((u64) freq);
+	VCDRES_MSG_MED("%s(): freq = %lu, *pn_perf_lvl = %u", __func__,
+		freq, *pn_perf_lvl);
+	return true;
+}
+
+u32 res_trk_download_firmware(void)
+{
+	const struct firmware *fw_boot = NULL;
+	const struct firmware *fw_mpg4_dec = NULL;
+	const struct firmware *fw_h263_dec = NULL;
+	const struct firmware *fw_h264_dec = NULL;
+	const struct firmware *fw_mpg4_enc = NULL;
+	const struct firmware *fw_h264_enc = NULL;
+	const struct firmware *fw_vc1_dec = NULL;
+	int rc = 0;
+	u32 status = true;
+
+	VCDRES_MSG_HIGH("%s(): Request firmware download\n",
+		__func__);
+	mutex_lock(&resource_context.lock);
+	rc = request_firmware(&fw_boot, VIDC_BOOT_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_BOOT_FW, rc);
+		mutex_unlock(&resource_context.lock);
+		return false;
+	}
+	vidc_command_control_fw = (unsigned char *)fw_boot->data;
+	vidc_command_control_fw_size = (u32) fw_boot->size;
+
+	rc = request_firmware(&fw_mpg4_dec, VIDC_MPG4_DEC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_MPG4_DEC_FW, rc);
+		status = false;
+		goto boot_fw_free;
+	}
+	vidc_mpg4_dec_fw = (unsigned char *)fw_mpg4_dec->data;
+	vidc_mpg4_dec_fw_size = (u32) fw_mpg4_dec->size;
+
+
+	rc = request_firmware(&fw_h263_dec, VIDC_H263_DEC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_H263_DEC_FW, rc);
+		status = false;
+		goto mp4dec_fw_free;
+	}
+	vidc_h263_dec_fw = (unsigned char *)fw_h263_dec->data;
+	vidc_h263_dec_fw_size = (u32) fw_h263_dec->size;
+
+	rc = request_firmware(&fw_h264_dec, VIDC_H264_DEC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_H264_DEC_FW, rc);
+		status = false;
+		goto h263dec_fw_free;
+	}
+	vidc_h264_dec_fw = (unsigned char *)fw_h264_dec->data;
+	vidc_h264_dec_fw_size = (u32) fw_h264_dec->size;
+
+	rc = request_firmware(&fw_mpg4_enc, VIDC_MPG4_ENC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_MPG4_ENC_FW, rc);
+		status = false;
+		goto h264dec_fw_free;
+	}
+	vidc_mpg4_enc_fw = (unsigned char *)fw_mpg4_enc->data;
+	vidc_mpg4_enc_fw_size = (u32) fw_mpg4_enc->size;
+
+	rc = request_firmware(&fw_h264_enc, VIDC_H264_ENC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_H264_ENC_FW, rc);
+		status = false;
+		goto mp4enc_fw_free;
+	}
+	vidc_h264_enc_fw = (unsigned char *)fw_h264_enc->data;
+	vidc_h264_enc_fw_size = (u32) fw_h264_enc->size;
+
+	rc = request_firmware(&fw_vc1_dec, VIDC_VC1_DEC_FW,
+						  resource_context.device);
+	if (rc) {
+		VCDRES_MSG_ERROR("request_firmware for %s error %d\n",
+				VIDC_VC1_DEC_FW, rc);
+		status = false;
+		goto h264enc_fw_free;
+	}
+	vidc_vc1_dec_fw = (unsigned char *)fw_vc1_dec->data;
+	vidc_vc1_dec_fw_size = (u32) fw_vc1_dec->size;
+	mutex_unlock(&resource_context.lock);
+	return status;
+
+h264enc_fw_free:
+	release_firmware(fw_h264_enc);
+mp4enc_fw_free:
+	release_firmware(fw_mpg4_enc);
+h264dec_fw_free:
+	release_firmware(fw_h264_dec);
+h263dec_fw_free:
+	release_firmware(fw_h263_dec);
+mp4dec_fw_free:
+	release_firmware(fw_mpg4_dec);
+boot_fw_free:
+	release_firmware(fw_boot);
+	mutex_unlock(&resource_context.lock);
+	return false;
+}
+
+void res_trk_init(struct device *device, u32 irq)
+{
+	if (resource_context.device || resource_context.irq_num ||
+		!device) {
+		VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n",
+				__func__);
+		return;
+	}
+	memset(&resource_context, 0, sizeof(resource_context));
+	mutex_init(&resource_context.lock);
+	resource_context.device = device;
+	resource_context.irq_num = irq;
+	resource_context.core_type = VCD_CORE_720P;
+	resource_context.regulator = regulator_get(NULL, "fs_mfc");
+	resource_context.vidc_platform_data =
+		(struct msm_vidc_platform_data *) device->platform_data;
+	if (resource_context.vidc_platform_data) {
+		resource_context.memtype =
+		resource_context.vidc_platform_data->memtype;
+	} else {
+		resource_context.memtype = -1;
+	}
+}
+
+u32 res_trk_get_core_type(void){
+	return resource_context.core_type;
+}
+
+u32 res_trk_get_mem_type(void){
+	return resource_context.memtype;
+}
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
new file mode 100644
index 0000000..3012858
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VIDEO_720P_RESOURCE_TRACKER_H_
+#define _VIDEO_720P_RESOURCE_TRACKER_H_
+#include <mach/board.h>
+#include "vcd_res_tracker_api.h"
+
+#define VCD_RESTRK_MIN_PERF_LEVEL 37900
+#define VCD_RESTRK_MAX_PERF_LEVEL 108000
+#define VCD_RESTRK_MIN_FREQ_POINT 61440000
+#define VCD_RESTRK_MAX_FREQ_POINT 170667000
+#define VCD_RESTRK_HZ_PER_1000_PERFLVL 1580250
+
+struct res_trk_context {
+	struct device *device;
+	u32 irq_num;
+	struct mutex lock;
+	struct clk *hclk;
+	struct clk *hclk_div2;
+	struct clk *pclk;
+	unsigned long hclk_rate;
+	unsigned int clock_enabled;
+	unsigned int rail_enabled;
+	struct regulator *regulator;
+	struct msm_vidc_platform_data *vidc_platform_data;
+	u32 core_type;
+	int memtype;
+};
+
+#if DEBUG
+
+#define VCDRES_MSG_LOW(xx_fmt...)	printk(KERN_INFO "\n\t* " xx_fmt)
+#define VCDRES_MSG_MED(xx_fmt...)	printk(KERN_INFO "\n  * " xx_fmt)
+
+#else
+
+#define VCDRES_MSG_LOW(xx_fmt...)
+#define VCDRES_MSG_MED(xx_fmt...)
+
+#endif
+
+#define VCDRES_MSG_HIGH(xx_fmt...)	printk(KERN_WARNING "\n" xx_fmt)
+#define VCDRES_MSG_ERROR(xx_fmt...)	printk(KERN_ERR "\n err: " xx_fmt)
+#define VCDRES_MSG_FATAL(xx_fmt...)	printk(KERN_ERR "\n<FATAL> " xx_fmt)
+
+#endif
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h
new file mode 100644
index 0000000..db84743
--- /dev/null
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VIDEO_720P_RESOURCE_TRACKER_API_H_
+#define _VIDEO_720P_RESOURCE_TRACKER_API_H_
+
+#include "vcd_core.h"
+
+void res_trk_init(struct device *device, u32 irq);
+u32 res_trk_power_up(void);
+u32 res_trk_power_down(void);
+u32 res_trk_enable_clocks(void);
+u32 res_trk_disable_clocks(void);
+u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl);
+u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl,
+	struct vcd_dev_ctxt *dev_ctxt);
+u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl);
+u32 res_trk_download_firmware(void);
+u32 res_trk_get_core_type(void);
+u32 res_trk_get_mem_type(void);
+#endif
diff --git a/drivers/video/msm/vidc/Kconfig b/drivers/video/msm/vidc/Kconfig
new file mode 100644
index 0000000..9ffcb15
--- /dev/null
+++ b/drivers/video/msm/vidc/Kconfig
@@ -0,0 +1,39 @@
+#
+# VIDEO CORE
+#
+menuconfig MSM_VIDC
+	bool "Video Core Driver"
+	depends on ARCH_MSM8X60 || ARCH_MSM7X30 || ARCH_MSM8960
+	default y
+	---help---
+	Say Y here to see options for video device drivers.
+	If you say N, all options in this submenu will be skipped and disabled.
+
+config MSM_VIDC_720P
+	bool "720P Video Core"
+	depends on MSM_VIDC && ARCH_MSM7X30
+	default y
+	help
+	This option enables support for Video core.
+
+config MSM_VIDC_1080P
+	bool "1080P Video Core"
+	depends on MSM_VIDC && (ARCH_MSM8X60 || ARCH_MSM8960)
+	default y
+	help
+	This option enables support for Video core.
+
+config MSM_VIDC_VENC
+	tristate "Video encoder"
+	depends on MSM_VIDC
+	default y
+	help
+	This option enables support for Video encoder.
+
+config MSM_VIDC_VDEC
+	tristate "Video decoder"
+	depends on MSM_VIDC
+	default y
+	help
+	This option enables support for Video decoder.
+
diff --git a/drivers/video/msm/vidc/Makefile b/drivers/video/msm/vidc/Makefile
new file mode 100644
index 0000000..af41f18
--- /dev/null
+++ b/drivers/video/msm/vidc/Makefile
@@ -0,0 +1,62 @@
+ifdef CONFIG_MSM_VIDC_720P
+EXTRA_CFLAGS += -Idrivers/video/msm/vidc/720p/ddl
+EXTRA_CFLAGS += -Idrivers/video/msm/vidc/720p/resource_tracker
+endif
+
+ifdef CONFIG_MSM_VIDC_1080P
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/1080p/ddl
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/1080p/resource_tracker
+endif
+
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/common/dec
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/common/enc
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/common/vcd
+EXTRA_CFLAGS  += -Idrivers/video/msm/vidc/common/init
+
+obj-$(CONFIG_MSM_VIDC) += vidc.o
+
+vidc-objs :=	common/init/vidc_init.o \
+		common/vcd/vcd_api.o \
+		common/vcd/vcd_power_sm.o \
+		common/vcd/vcd_client_sm.o \
+		common/vcd/vcd_device_sm.o \
+		common/vcd/vcd_scheduler.o \
+		common/vcd/vcd_sub.o \
+
+ifdef CONFIG_MSM_VIDC_720P
+vidc-objs +=	720p/ddl/vcd_ddl_firmware.o \
+		720p/ddl/vcd_ddl_metadata.o \
+		720p/ddl/vidc.o \
+		720p/ddl/vcd_ddl_utils.o \
+		720p/ddl/vcd_ddl.o \
+		720p/ddl/vcd_ddl_helper.o \
+		720p/ddl/vcd_ddl_interrupt_handler.o \
+		720p/ddl/vcd_ddl_hal.o \
+		720p/ddl/vcd_ddl_properties.o \
+		720p/resource_tracker/vcd_res_tracker.o \
+		720p/ddl/vcd_ddl_errors.o
+endif
+
+ifdef CONFIG_MSM_VIDC_1080P
+vidc-objs +=	1080p/ddl/vcd_ddl_helper.o \
+		1080p/ddl/vcd_ddl_utils.o \
+		1080p/ddl/vcd_ddl_interrupt_handler.o \
+		1080p/ddl/vcd_ddl_properties.o \
+		1080p/ddl/vcd_ddl_errors.o \
+		1080p/ddl/vcd_ddl_shared_mem.o \
+		1080p/ddl/vidc.o \
+		1080p/ddl/vidc_pix_cache.o \
+		1080p/ddl/vcd_ddl_vidc.o \
+		1080p/ddl/vcd_ddl.o \
+		1080p/ddl/vcd_ddl_metadata.o \
+		1080p/resource_tracker/vcd_res_tracker.o
+endif
+
+obj-$(CONFIG_MSM_VIDC_VDEC) += vidc_vdec.o
+
+vidc_vdec-objs :=	common/dec/vdec.o
+
+obj-$(CONFIG_MSM_VIDC_VENC) += vidc_venc.o
+
+vidc_venc-objs :=	common/enc/venc.o \
+			common/enc/venc_internal.o
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
new file mode 100644
index 0000000..48d5119
--- /dev/null
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -0,0 +1,1904 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+
+#include "vidc_type.h"
+#include "vcd_api.h"
+#include "vdec_internal.h"
+#include "vidc_init.h"
+
+
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define INFO(x...) printk(KERN_INFO x)
+#define ERR(x...) printk(KERN_ERR x)
+
+#define VID_DEC_NAME		"msm_vidc_dec"
+
+static struct vid_dec_dev *vid_dec_device_p;
+static dev_t vid_dec_dev_num;
+static struct class *vid_dec_class;
+static s32 vid_dec_get_empty_client_index(void)
+{
+	u32 i, found = false;
+
+	for (i = 0; i < VIDC_MAX_NUM_CLIENTS; i++) {
+		if (!vid_dec_device_p->vdec_clients[i].vcd_handle) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		ERR("%s():ERROR No space for new client\n", __func__);
+		return -ENOMEM;
+	} else {
+		DBG("%s(): available client index = %u\n", __func__, i);
+		return i;
+	}
+}
+
+u32 vid_dec_get_status(u32 status)
+{
+	u32 vdec_status;
+
+	switch (status) {
+	case VCD_ERR_SEQHDR_PARSE_FAIL:
+	case VCD_ERR_BITSTREAM_ERR:
+	case VCD_S_SUCCESS:
+		vdec_status = VDEC_S_SUCCESS;
+		break;
+	case VCD_ERR_FAIL:
+		vdec_status = VDEC_S_EFAIL;
+		break;
+	case VCD_ERR_ALLOC_FAIL:
+		vdec_status = VDEC_S_ENOSWRES;
+		break;
+	case VCD_ERR_ILLEGAL_OP:
+		vdec_status = VDEC_S_EINVALCMD;
+		break;
+	case VCD_ERR_ILLEGAL_PARM:
+		vdec_status = VDEC_S_EBADPARAM;
+		break;
+	case VCD_ERR_BAD_POINTER:
+	case VCD_ERR_BAD_HANDLE:
+		vdec_status = VDEC_S_EFATAL;
+		break;
+	case VCD_ERR_NOT_SUPPORTED:
+		vdec_status = VDEC_S_ENOTSUPP;
+		break;
+	case VCD_ERR_BAD_STATE:
+		vdec_status = VDEC_S_EINVALSTATE;
+		break;
+	case VCD_ERR_BUSY:
+		vdec_status = VDEC_S_BUSY;
+		break;
+	case VCD_ERR_MAX_CLIENT:
+		vdec_status = VDEC_S_ENOHWRES;
+		break;
+	default:
+		vdec_status = VDEC_S_EFAIL;
+		break;
+	}
+
+	return vdec_status;
+}
+
+static void vid_dec_notify_client(struct video_client_ctx *client_ctx)
+{
+	if (client_ctx)
+		complete(&client_ctx->event);
+}
+
+void vid_dec_vcd_open_done(struct video_client_ctx *client_ctx,
+			   struct vcd_handle_container *handle_container)
+{
+	DBG("vid_dec_vcd_open_done\n");
+
+	if (client_ctx) {
+		if (handle_container)
+			client_ctx->vcd_handle = handle_container->handle;
+		else
+			ERR("%s(): ERROR. handle_container is NULL\n",
+			    __func__);
+
+		vid_dec_notify_client(client_ctx);
+	} else
+		ERR("%s(): ERROR. client_ctx is NULL\n", __func__);
+}
+
+static void vid_dec_input_frame_done(struct video_client_ctx *client_ctx,
+				     u32 event, u32 status,
+				     struct vcd_frame_data *vcd_frame_data)
+{
+	struct vid_dec_msg *vdec_msg;
+
+	if (!client_ctx || !vcd_frame_data) {
+		ERR("vid_dec_input_frame_done() NULL pointer\n");
+		return;
+	}
+
+	vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL);
+	if (!vdec_msg) {
+		ERR("vid_dec_input_frame_done(): cannot allocate vid_dec_msg "
+		    " buffer\n");
+		return;
+	}
+
+	vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status);
+
+	if (event == VCD_EVT_RESP_INPUT_DONE) {
+		vdec_msg->vdec_msg_info.msgcode =
+		    VDEC_MSG_RESP_INPUT_BUFFER_DONE;
+		DBG("Send INPUT_DON message to client = %p\n", client_ctx);
+
+	} else if (event == VCD_EVT_RESP_INPUT_FLUSHED) {
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_INPUT_FLUSHED;
+		DBG("Send INPUT_FLUSHED message to client = %p\n", client_ctx);
+	} else {
+		ERR("vid_dec_input_frame_done(): invalid event type: "
+			"%d\n", event);
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_INVALID;
+	}
+
+	vdec_msg->vdec_msg_info.msgdata.input_frame_clientdata =
+	    (void *)vcd_frame_data->frm_clnt_data;
+	vdec_msg->vdec_msg_info.msgdatasize = sizeof(void *);
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&vdec_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+static void vid_dec_output_frame_done(struct video_client_ctx *client_ctx,
+			u32 event, u32 status,
+			struct vcd_frame_data *vcd_frame_data)
+{
+	struct vid_dec_msg *vdec_msg;
+
+	unsigned long kernel_vaddr = 0, phy_addr = 0, user_vaddr = 0;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+	enum vdec_picture pic_type;
+
+	if (!client_ctx || !vcd_frame_data) {
+		ERR("vid_dec_input_frame_done() NULL pointer\n");
+		return;
+	}
+
+	vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL);
+	if (!vdec_msg) {
+		ERR("vid_dec_input_frame_done(): cannot allocate vid_dec_msg "
+		    " buffer\n");
+		return;
+	}
+
+	vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status);
+
+	if (event == VCD_EVT_RESP_OUTPUT_DONE)
+		vdec_msg->vdec_msg_info.msgcode =
+		    VDEC_MSG_RESP_OUTPUT_BUFFER_DONE;
+	else if (event == VCD_EVT_RESP_OUTPUT_FLUSHED)
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_OUTPUT_FLUSHED;
+	else {
+		ERR("QVD: vid_dec_output_frame_done invalid cmd type: "
+			"%d\n", event);
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_INVALID;
+	}
+
+	kernel_vaddr = (unsigned long)vcd_frame_data->virtual;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+				      false, &user_vaddr, &kernel_vaddr,
+				      &phy_addr, &pmem_fd, &file,
+				      &buffer_index) ||
+		(vcd_frame_data->flags & VCD_FRAME_FLAG_EOS)) {
+
+		/* Buffer address in user space */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.bufferaddr =
+		    (u8 *) user_vaddr;
+		/* Data length */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.len =
+		    vcd_frame_data->data_len;
+		vdec_msg->vdec_msg_info.msgdata.output_frame.flags =
+		    vcd_frame_data->flags;
+		/* Timestamp pass-through from input frame */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.time_stamp =
+		    vcd_frame_data->time_stamp;
+		/* Output frame client data */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.client_data =
+		    (void *)vcd_frame_data->frm_clnt_data;
+		/* Associated input frame client data */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.
+		    input_frame_clientdata =
+		    (void *)vcd_frame_data->ip_frm_tag;
+		/* Decoded picture width and height */
+		vdec_msg->vdec_msg_info.msgdata.output_frame.framesize.
+		bottom =
+		    vcd_frame_data->dec_op_prop.disp_frm.bottom;
+		vdec_msg->vdec_msg_info.msgdata.output_frame.framesize.left =
+		    vcd_frame_data->dec_op_prop.disp_frm.left;
+		vdec_msg->vdec_msg_info.msgdata.output_frame.framesize.right =
+			vcd_frame_data->dec_op_prop.disp_frm.right;
+		vdec_msg->vdec_msg_info.msgdata.output_frame.framesize.top =
+			vcd_frame_data->dec_op_prop.disp_frm.top;
+		if (vcd_frame_data->interlaced) {
+			vdec_msg->vdec_msg_info.msgdata.
+				output_frame.interlaced_format =
+				VDEC_InterlaceInterleaveFrameTopFieldFirst;
+		} else {
+			vdec_msg->vdec_msg_info.msgdata.
+				output_frame.interlaced_format =
+				VDEC_InterlaceFrameProgressive;
+		}
+		/* Decoded picture type */
+		switch (vcd_frame_data->frame) {
+		case VCD_FRAME_I:
+			pic_type = PICTURE_TYPE_I;
+			break;
+		case VCD_FRAME_P:
+			pic_type = PICTURE_TYPE_P;
+			break;
+		case VCD_FRAME_B:
+			pic_type = PICTURE_TYPE_B;
+			break;
+		case VCD_FRAME_NOTCODED:
+			pic_type = PICTURE_TYPE_SKIP;
+			break;
+		default:
+			pic_type = PICTURE_TYPE_UNKNOWN;
+		}
+		vdec_msg->vdec_msg_info.msgdata.output_frame.pic_type =
+			pic_type;
+		vdec_msg->vdec_msg_info.msgdatasize =
+		    sizeof(struct vdec_output_frameinfo);
+	} else {
+		ERR("vid_dec_output_frame_done UVA can not be found\n");
+		vdec_msg->vdec_msg_info.status_code = VDEC_S_EFATAL;
+	}
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&vdec_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+static void vid_dec_lean_event(struct video_client_ctx *client_ctx,
+			       u32 event, u32 status)
+{
+	struct vid_dec_msg *vdec_msg;
+
+	if (!client_ctx) {
+		ERR("%s(): !client_ctx pointer\n", __func__);
+		return;
+	}
+
+	vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL);
+	if (!vdec_msg) {
+		ERR("%s(): cannot allocate vid_dec_msg buffer\n", __func__);
+		return;
+	}
+
+	vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status);
+
+	switch (event) {
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_CONFIG_CHANGED"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_CONFIG_CHANGED;
+		break;
+	case VCD_EVT_IND_RESOURCES_LOST:
+		INFO("msm_vidc_dec: Sending VDEC_EVT_RESOURCES_LOST"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_EVT_RESOURCES_LOST;
+		break;
+	case VCD_EVT_RESP_FLUSH_INPUT_DONE:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_INPUT_DONE"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode =
+		    VDEC_MSG_RESP_FLUSH_INPUT_DONE;
+		break;
+	case VCD_EVT_RESP_FLUSH_OUTPUT_DONE:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_OUTPUT_DONE"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode =
+		    VDEC_MSG_RESP_FLUSH_OUTPUT_DONE;
+		break;
+	case VCD_EVT_IND_HWERRFATAL:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_HW_ERROR"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_HW_ERROR;
+		break;
+	case VCD_EVT_RESP_START:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_START_DONE"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_START_DONE;
+		break;
+	case VCD_EVT_RESP_STOP:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_STOP_DONE"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_STOP_DONE;
+		break;
+	case VCD_EVT_RESP_PAUSE:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_PAUSE_DONE"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_PAUSE_DONE;
+		break;
+	case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
+		INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_INFO_CONFIG_CHANGED"
+			 " to client");
+		vdec_msg->vdec_msg_info.msgcode =
+			 VDEC_MSG_EVT_INFO_CONFIG_CHANGED;
+		break;
+	default:
+		ERR("%s() : unknown event type\n", __func__);
+		break;
+	}
+
+	vdec_msg->vdec_msg_info.msgdatasize = 0;
+	if (client_ctx->stop_sync_cb &&
+	   (event == VCD_EVT_RESP_STOP || event == VCD_EVT_IND_HWERRFATAL)) {
+		client_ctx->stop_sync_cb = false;
+		complete(&client_ctx->event);
+		kfree(vdec_msg);
+		return;
+	}
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&vdec_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+
+void vid_dec_vcd_cb(u32 event, u32 status,
+		   void *info, size_t sz, void *handle, void *const client_data)
+{
+	struct video_client_ctx *client_ctx =
+	    (struct video_client_ctx *)client_data;
+
+	DBG("Entering %s()\n", __func__);
+
+	if (!client_ctx) {
+		ERR("%s(): client_ctx is NULL\n", __func__);
+		return;
+	}
+
+	client_ctx->event_status = status;
+
+	switch (event) {
+	case VCD_EVT_RESP_OPEN:
+		vid_dec_vcd_open_done(client_ctx,
+				      (struct vcd_handle_container *)
+				      info);
+		break;
+	case VCD_EVT_RESP_INPUT_DONE:
+	case VCD_EVT_RESP_INPUT_FLUSHED:
+		vid_dec_input_frame_done(client_ctx, event, status,
+					 (struct vcd_frame_data *)info);
+		break;
+	case VCD_EVT_RESP_OUTPUT_DONE:
+	case VCD_EVT_RESP_OUTPUT_FLUSHED:
+		vid_dec_output_frame_done(client_ctx, event, status,
+					  (struct vcd_frame_data *)info);
+		break;
+	case VCD_EVT_RESP_PAUSE:
+	case VCD_EVT_RESP_STOP:
+	case VCD_EVT_RESP_FLUSH_INPUT_DONE:
+	case VCD_EVT_RESP_FLUSH_OUTPUT_DONE:
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+	case VCD_EVT_IND_HWERRFATAL:
+	case VCD_EVT_IND_RESOURCES_LOST:
+	case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
+		vid_dec_lean_event(client_ctx, event, status);
+		break;
+	case VCD_EVT_RESP_START:
+		if (!client_ctx->seq_header_set)
+			vid_dec_lean_event(client_ctx, event, status);
+		else
+			vid_dec_notify_client(client_ctx);
+		break;
+	default:
+		ERR("%s() :  Error - Invalid event type =%u\n", __func__,
+		    event);
+		break;
+	}
+}
+
+static u32 vid_dec_set_codec(struct video_client_ctx *client_ctx,
+			     enum vdec_codec *vdec_codec)
+{
+	u32 result = true;
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_codec codec;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !vdec_codec)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_CODEC;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_codec);
+
+	switch (*vdec_codec) {
+	case VDEC_CODECTYPE_MPEG4:
+		codec.codec = VCD_CODEC_MPEG4;
+		break;
+	case VDEC_CODECTYPE_H264:
+		codec.codec = VCD_CODEC_H264;
+		break;
+	case VDEC_CODECTYPE_DIVX_3:
+		codec.codec = VCD_CODEC_DIVX_3;
+		break;
+	case VDEC_CODECTYPE_DIVX_5:
+		codec.codec = VCD_CODEC_DIVX_5;
+		break;
+	case VDEC_CODECTYPE_XVID:
+		codec.codec = VCD_CODEC_XVID;
+		break;
+	case VDEC_CODECTYPE_H263:
+		codec.codec = VCD_CODEC_H263;
+		break;
+	case VDEC_CODECTYPE_MPEG2:
+		codec.codec = VCD_CODEC_MPEG2;
+		break;
+	case VDEC_CODECTYPE_VC1:
+		codec.codec = VCD_CODEC_VC1;
+		break;
+	case VDEC_CODECTYPE_VC1_RCV:
+		codec.codec = VCD_CODEC_VC1_RCV;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	if (result) {
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					      &vcd_property_hdr, &codec);
+		if (vcd_status)
+			result = false;
+	}
+	return result;
+}
+
+static u32 vid_dec_set_output_format(struct video_client_ctx *client_ctx,
+				     enum vdec_output_fromat *output_format)
+{
+	u32 result = true;
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_format vcd_prop_buffer_format;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !output_format)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_BUFFER_FORMAT;;
+	vcd_property_hdr.sz =
+	    sizeof(struct vcd_property_buffer_format);
+
+	switch (*output_format) {
+	case VDEC_YUV_FORMAT_NV12:
+		vcd_prop_buffer_format.buffer_format = VCD_BUFFER_FORMAT_NV12;
+		break;
+	case VDEC_YUV_FORMAT_TILE_4x2:
+		vcd_prop_buffer_format.buffer_format =
+		    VCD_BUFFER_FORMAT_TILE_4x2;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	if (result)
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					      &vcd_property_hdr,
+					      &vcd_prop_buffer_format);
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_set_frame_resolution(struct video_client_ctx *client_ctx,
+					struct vdec_picsize *video_resoultion)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_frame_size frame_resolution;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !video_resoultion)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FRAME_SIZE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_frame_size);
+	frame_resolution.width = video_resoultion->frame_width;
+	frame_resolution.height = video_resoultion->frame_height;
+	frame_resolution.stride = video_resoultion->stride;
+	frame_resolution.scan_lines = video_resoultion->scan_lines;
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &frame_resolution);
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_get_frame_resolution(struct video_client_ctx *client_ctx,
+					struct vdec_picsize *video_resoultion)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_frame_size frame_resolution;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !video_resoultion)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FRAME_SIZE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_frame_size);
+
+	vcd_status = vcd_get_property(client_ctx->vcd_handle, &vcd_property_hdr,
+					  &frame_resolution);
+
+	video_resoultion->frame_width = frame_resolution.width;
+	video_resoultion->frame_height = frame_resolution.height;
+	video_resoultion->scan_lines = frame_resolution.scan_lines;
+	video_resoultion->stride = frame_resolution.stride;
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_get_progressive_only(struct video_client_ctx *client_ctx,
+					u32 *progressive_only)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	if (!client_ctx || !progressive_only)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_PROGRESSIVE_ONLY;
+	vcd_property_hdr.sz = sizeof(u32);
+	if (vcd_get_property(client_ctx->vcd_handle, &vcd_property_hdr,
+						 progressive_only))
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_set_picture_order(struct video_client_ctx *client_ctx,
+					u32 *picture_order)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL, vcd_picture_order, ret = true;
+	if (!client_ctx || !picture_order)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_OUTPUT_ORDER;
+	vcd_property_hdr.sz = sizeof(u32);
+	if (*picture_order == VDEC_ORDER_DISPLAY)
+		vcd_picture_order = VCD_DEC_ORDER_DISPLAY;
+	else if (*picture_order == VDEC_ORDER_DECODE)
+		vcd_picture_order = VCD_DEC_ORDER_DECODE;
+	else
+		ret = false;
+	if (ret) {
+		DBG("%s() : Setting output picture order: %d\n",
+		    __func__, vcd_picture_order);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &vcd_picture_order);
+		if (vcd_status != VCD_S_SUCCESS)
+			ret = false;
+	}
+	return ret;
+}
+
+static u32 vid_dec_set_frame_rate(struct video_client_ctx *client_ctx,
+					struct vdec_framerate *frame_rate)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_frame_rate vcd_frame_rate;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !frame_rate)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FRAME_RATE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_frame_rate);
+	vcd_frame_rate.fps_numerator = frame_rate->fps_numerator;
+	vcd_frame_rate.fps_denominator = frame_rate->fps_denominator;
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &vcd_frame_rate);
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_set_extradata(struct video_client_ctx *client_ctx,
+					u32 *extradata_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_meta_data_enable vcd_meta_data;
+	u32 vcd_status = VCD_ERR_FAIL;
+	if (!client_ctx || !extradata_flag)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_METADATA_ENABLE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_meta_data_enable);
+	vcd_meta_data.meta_data_enable_flag = *extradata_flag;
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &vcd_meta_data);
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_set_idr_only_decoding(struct video_client_ctx *client_ctx)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 enable = true;
+	if (!client_ctx)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_DEC_PICTYPE;
+	vcd_property_hdr.sz = sizeof(u32);
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &enable);
+	if (vcd_status)
+		return false;
+	return true;
+}
+
+static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx,
+					struct vdec_h264_mv *mv_data)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_h264_mv_buffer vcd_h264_mv_buffer;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 len;
+	struct file *file;
+
+	if (!client_ctx || !mv_data)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_H264_MV_BUFFER;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_h264_mv_buffer);
+
+	memset(&vcd_h264_mv_buffer, 0,
+		   sizeof(struct vcd_property_h264_mv_buffer));
+	vcd_h264_mv_buffer.size = mv_data->size;
+	vcd_h264_mv_buffer.count = mv_data->count;
+	vcd_h264_mv_buffer.pmem_fd = mv_data->pmem_fd;
+	vcd_h264_mv_buffer.offset = mv_data->offset;
+
+	if (get_pmem_file(vcd_h264_mv_buffer.pmem_fd,
+		(unsigned long *) (&(vcd_h264_mv_buffer.physical_addr)),
+		(unsigned long *) (&vcd_h264_mv_buffer.kernel_virtual_addr),
+		(unsigned long *) (&len), &file)) {
+		ERR("%s(): get_pmem_file failed\n", __func__);
+		return false;
+	}
+	put_pmem_file(file);
+
+	DBG("Virt: %p, Phys %p, fd: %d\n", vcd_h264_mv_buffer.
+		kernel_virtual_addr, vcd_h264_mv_buffer.physical_addr,
+		vcd_h264_mv_buffer.pmem_fd);
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &vcd_h264_mv_buffer);
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_set_cont_on_reconfig(struct video_client_ctx *client_ctx)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 enable = true;
+	if (!client_ctx)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_CONT_ON_RECONFIG;
+	vcd_property_hdr.sz = sizeof(u32);
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &enable);
+	if (vcd_status)
+		return false;
+	return true;
+}
+
+static u32 vid_dec_get_h264_mv_buffer_size(struct video_client_ctx *client_ctx,
+					struct vdec_mv_buff_size *mv_buff)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_size h264_mv_buffer_size;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !mv_buff)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_GET_H264_MV_SIZE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
+
+	h264_mv_buffer_size.width = mv_buff->width;
+	h264_mv_buffer_size.height = mv_buff->height;
+
+	vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &h264_mv_buffer_size);
+
+	mv_buff->width = h264_mv_buffer_size.width;
+	mv_buff->height = h264_mv_buffer_size.height;
+	mv_buff->size = h264_mv_buffer_size.size;
+	mv_buff->alignment = h264_mv_buffer_size.alignment;
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_free_h264_mv_buffers(struct video_client_ctx *client_ctx)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_size h264_mv_buffer_size;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FREE_H264_MV_BUFFER;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &h264_mv_buffer_size);
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+static u32 vid_dec_get_buffer_req(struct video_client_ctx *client_ctx,
+				  struct vdec_allocatorproperty *vdec_buf_req)
+{
+	u32 vcd_status = VCD_ERR_FAIL;
+	struct vcd_buffer_requirement vcd_buf_req;
+
+	if (!client_ctx || !vdec_buf_req)
+		return false;
+
+	if (vdec_buf_req->buffer_type == VDEC_BUFFER_TYPE_INPUT) {
+		vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle,
+							 VCD_BUFFER_INPUT,
+							 &vcd_buf_req);
+	} else {
+		vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle,
+							 VCD_BUFFER_OUTPUT,
+							 &vcd_buf_req);
+	}
+
+	if (vcd_status) {
+		return false;
+	} else {
+		vdec_buf_req->mincount = vcd_buf_req.min_count;
+		vdec_buf_req->maxcount = vcd_buf_req.max_count;
+		vdec_buf_req->actualcount = vcd_buf_req.actual_count;
+		vdec_buf_req->buffer_size = vcd_buf_req.sz;
+		vdec_buf_req->alignment = vcd_buf_req.align;
+		vdec_buf_req->buf_poolid = vcd_buf_req.buf_pool_id;
+
+		return true;
+	}
+}
+
+static u32 vid_dec_set_buffer(struct video_client_ctx *client_ctx,
+			      struct vdec_setbuffer_cmd *buffer_info)
+{
+	enum vcd_buffer_type buffer = VCD_BUFFER_INPUT;
+	enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT;
+	u32 vcd_status = VCD_ERR_FAIL;
+	unsigned long kernel_vaddr, buf_adr_offset = 0;
+
+	if (!client_ctx || !buffer_info)
+		return false;
+
+	if (buffer_info->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) {
+		dir_buffer = BUFFER_TYPE_OUTPUT;
+		buffer = VCD_BUFFER_OUTPUT;
+		buf_adr_offset = (unsigned long)buffer_info->buffer.offset;
+	}
+
+	/*If buffer cannot be set, ignore */
+	if (!vidc_insert_addr_table(client_ctx, dir_buffer,
+		(unsigned long)buffer_info->buffer.bufferaddr,
+		&kernel_vaddr, buffer_info->buffer.pmem_fd,
+		buf_adr_offset, MAX_VIDEO_NUM_OF_BUFF)) {
+		DBG("%s() : user_virt_addr = %p cannot be set.",
+		    __func__, buffer_info->buffer.bufferaddr);
+		return false;
+	}
+
+	vcd_status = vcd_set_buffer(client_ctx->vcd_handle,
+		buffer, (u8 *) kernel_vaddr,
+		buffer_info->buffer.buffer_len);
+
+	if (!vcd_status)
+		return true;
+	else
+		return false;
+}
+
+
+static u32 vid_dec_free_buffer(struct video_client_ctx *client_ctx,
+			      struct vdec_setbuffer_cmd *buffer_info)
+{
+	enum vcd_buffer_type buffer = VCD_BUFFER_INPUT;
+	enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT;
+	u32 vcd_status = VCD_ERR_FAIL;
+	unsigned long kernel_vaddr;
+
+	if (!client_ctx || !buffer_info)
+		return false;
+
+	if (buffer_info->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) {
+		dir_buffer = BUFFER_TYPE_OUTPUT;
+		buffer = VCD_BUFFER_OUTPUT;
+	}
+	/*If buffer NOT set, ignore */
+	if (!vidc_delete_addr_table(client_ctx, dir_buffer,
+				(unsigned long)buffer_info->buffer.bufferaddr,
+				&kernel_vaddr)) {
+		DBG("%s() : user_virt_addr = %p has not been set.",
+		    __func__, buffer_info->buffer.bufferaddr);
+		return true;
+	}
+
+	vcd_status = vcd_free_buffer(client_ctx->vcd_handle, buffer,
+					 (u8 *)kernel_vaddr);
+
+	if (!vcd_status)
+		return true;
+	else
+		return false;
+}
+
+static u32 vid_dec_pause_resume(struct video_client_ctx *client_ctx, u32 pause)
+{
+  u32 vcd_status;
+
+	if (!client_ctx) {
+		ERR("\n %s(): Invalid client_ctx", __func__);
+		return false;
+	}
+
+	if (pause) {
+		INFO("msm_vidc_dec: PAUSE command from client = %p\n",
+			 client_ctx);
+		vcd_status = vcd_pause(client_ctx->vcd_handle);
+	} else{
+		INFO("msm_vidc_dec: RESUME command from client = %p\n",
+			 client_ctx);
+		vcd_status = vcd_resume(client_ctx->vcd_handle);
+	}
+
+	if (vcd_status)
+		return false;
+
+	return true;
+
+}
+
+static u32 vid_dec_start_stop(struct video_client_ctx *client_ctx, u32 start)
+{
+	struct vid_dec_msg *vdec_msg = NULL;
+	u32 vcd_status;
+
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	if (!client_ctx) {
+		ERR("\n Invalid client_ctx");
+		return false;
+	}
+
+	if (start) {
+		if (client_ctx->seq_header_set) {
+			INFO("%s(): Seq Hdr set: Send START_DONE to client",
+				 __func__);
+			vdec_msg = kzalloc(sizeof(*vdec_msg), GFP_KERNEL);
+			if (!vdec_msg) {
+				ERR("vid_dec_start_stop: cannot allocate"
+				    "buffer\n");
+				return false;
+			}
+			vdec_msg->vdec_msg_info.msgcode =
+			    VDEC_MSG_RESP_START_DONE;
+			vdec_msg->vdec_msg_info.status_code = VDEC_S_SUCCESS;
+			vdec_msg->vdec_msg_info.msgdatasize = 0;
+			mutex_lock(&client_ctx->msg_queue_lock);
+			list_add_tail(&vdec_msg->list, &client_ctx->msg_queue);
+			mutex_unlock(&client_ctx->msg_queue_lock);
+
+			wake_up(&client_ctx->msg_wait);
+
+			DBG("Send START_DONE message to client = %p\n",
+			    client_ctx);
+
+		} else {
+			INFO("%s(): Calling decode_start()", __func__);
+			vcd_status =
+			    vcd_decode_start(client_ctx->vcd_handle, NULL);
+
+			if (vcd_status) {
+				ERR("%s(): vcd_decode_start failed."
+				    " vcd_status = %u\n", __func__, vcd_status);
+				return false;
+			}
+		}
+	} else {
+		INFO("%s(): Calling vcd_stop()", __func__);
+		mutex_lock(&vid_dec_device_p->lock);
+		vcd_status = VCD_ERR_FAIL;
+		if (!client_ctx->stop_called) {
+			client_ctx->stop_called = true;
+			vcd_status = vcd_stop(client_ctx->vcd_handle);
+		}
+		if (vcd_status) {
+			ERR("%s(): vcd_stop failed.  vcd_status = %u\n",
+				__func__, vcd_status);
+			mutex_unlock(&vid_dec_device_p->lock);
+			return false;
+		}
+		DBG("Send STOP_DONE message to client = %p\n", client_ctx);
+		mutex_unlock(&vid_dec_device_p->lock);
+	}
+	return true;
+}
+
+static u32 vid_dec_decode_frame(struct video_client_ctx *client_ctx,
+				struct vdec_input_frameinfo *input_frame_info)
+{
+	struct vcd_frame_data vcd_input_buffer;
+	unsigned long kernel_vaddr, phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !input_frame_info)
+		return false;
+
+	user_vaddr = (unsigned long)input_frame_info->bufferaddr;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT,
+				      true, &user_vaddr, &kernel_vaddr,
+				      &phy_addr, &pmem_fd, &file,
+				      &buffer_index)) {
+
+		/* kernel_vaddr  is found. send the frame to VCD */
+		memset((void *)&vcd_input_buffer, 0,
+		       sizeof(struct vcd_frame_data));
+		vcd_input_buffer.virtual =
+		    (u8 *) (kernel_vaddr + input_frame_info->pmem_offset);
+		vcd_input_buffer.offset = input_frame_info->offset;
+		vcd_input_buffer.frm_clnt_data =
+		    (u32) input_frame_info->client_data;
+		vcd_input_buffer.ip_frm_tag =
+		    (u32) input_frame_info->client_data;
+		vcd_input_buffer.data_len = input_frame_info->datalen;
+		vcd_input_buffer.time_stamp = input_frame_info->timestamp;
+		/* Rely on VCD using the same flags as OMX */
+		vcd_input_buffer.flags = input_frame_info->flags;
+
+		vcd_status = vcd_decode_frame(client_ctx->vcd_handle,
+					      &vcd_input_buffer);
+		if (!vcd_status)
+			return true;
+		else {
+			ERR("%s(): vcd_decode_frame failed = %u\n", __func__,
+			    vcd_status);
+			return false;
+		}
+
+	} else {
+		ERR("%s(): kernel_vaddr not found\n", __func__);
+		return false;
+	}
+}
+
+static u32 vid_dec_fill_output_buffer(struct video_client_ctx *client_ctx,
+		struct vdec_fillbuffer_cmd *fill_buffer_cmd)
+{
+	unsigned long kernel_vaddr, phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	struct vcd_frame_data vcd_frame;
+
+	if (!client_ctx || !fill_buffer_cmd)
+		return false;
+
+	user_vaddr = (unsigned long)fill_buffer_cmd->buffer.bufferaddr;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+				      true, &user_vaddr, &kernel_vaddr,
+				      &phy_addr, &pmem_fd, &file,
+				      &buffer_index)) {
+
+		memset((void *)&vcd_frame, 0,
+		       sizeof(struct vcd_frame_data));
+		vcd_frame.virtual = (u8 *) kernel_vaddr;
+		vcd_frame.frm_clnt_data = (u32) fill_buffer_cmd->client_data;
+		vcd_frame.alloc_len = fill_buffer_cmd->buffer.buffer_len;
+
+		vcd_status = vcd_fill_output_buffer(client_ctx->vcd_handle,
+						    &vcd_frame);
+		if (!vcd_status)
+			return true;
+		else {
+			ERR("%s(): vcd_fill_output_buffer failed = %u\n",
+			    __func__, vcd_status);
+			return false;
+		}
+	} else {
+		ERR("%s(): kernel_vaddr not found\n", __func__);
+		return false;
+	}
+}
+
+
+static u32 vid_dec_flush(struct video_client_ctx *client_ctx,
+			 enum vdec_bufferflush flush_dir)
+{
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	INFO("msm_vidc_dec: %s() called with dir = %u", __func__,
+		 flush_dir);
+	if (!client_ctx) {
+		ERR("\n Invalid client_ctx");
+		return false;
+	}
+
+	switch (flush_dir) {
+	case VDEC_FLUSH_TYPE_INPUT:
+		vcd_status = vcd_flush(client_ctx->vcd_handle, VCD_FLUSH_INPUT);
+		break;
+	case VDEC_FLUSH_TYPE_OUTPUT:
+		vcd_status = vcd_flush(client_ctx->vcd_handle,
+				       VCD_FLUSH_OUTPUT);
+		break;
+	case VDEC_FLUSH_TYPE_ALL:
+		vcd_status = vcd_flush(client_ctx->vcd_handle, VCD_FLUSH_ALL);
+		break;
+	default:
+		ERR("%s(): Inavlid flush cmd. flush_dir = %u\n", __func__,
+		    flush_dir);
+		return false;
+		break;
+	}
+
+	if (!vcd_status)
+		return true;
+	else {
+		ERR("%s(): vcd_flush failed. vcd_status = %u "
+		    " flush_dir = %u\n", __func__, vcd_status, flush_dir);
+		return false;
+	}
+}
+
+static u32 vid_dec_msg_pending(struct video_client_ctx *client_ctx)
+{
+	u32 islist_empty = 0;
+	mutex_lock(&client_ctx->msg_queue_lock);
+	islist_empty = list_empty(&client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+
+	if (islist_empty) {
+		DBG("%s(): vid_dec msg queue empty\n", __func__);
+		if (client_ctx->stop_msg) {
+			DBG("%s(): List empty and Stop Msg set\n",
+				__func__);
+			return client_ctx->stop_msg;
+		}
+	} else
+		DBG("%s(): vid_dec msg queue Not empty\n", __func__);
+
+	return !islist_empty;
+}
+
+static int vid_dec_get_next_msg(struct video_client_ctx *client_ctx,
+				struct vdec_msginfo *vdec_msg_info)
+{
+	int rc;
+	struct vid_dec_msg *vid_dec_msg = NULL;
+
+	if (!client_ctx)
+		return false;
+
+	rc = wait_event_interruptible(client_ctx->msg_wait,
+				      vid_dec_msg_pending(client_ctx));
+	if (rc < 0) {
+		DBG("rc = %d, stop_msg = %u\n", rc, client_ctx->stop_msg);
+		return rc;
+	} else if (client_ctx->stop_msg) {
+		DBG("rc = %d, stop_msg = %u\n", rc, client_ctx->stop_msg);
+		return -EIO;
+	}
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	if (!list_empty(&client_ctx->msg_queue)) {
+		DBG("%s(): After Wait\n", __func__);
+		vid_dec_msg = list_first_entry(&client_ctx->msg_queue,
+					       struct vid_dec_msg, list);
+		list_del(&vid_dec_msg->list);
+		memcpy(vdec_msg_info, &vid_dec_msg->vdec_msg_info,
+		       sizeof(struct vdec_msginfo));
+		kfree(vid_dec_msg);
+	}
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	return 0;
+}
+
+static long vid_dec_ioctl(struct file *file,
+			 unsigned cmd, unsigned long u_arg)
+{
+	struct video_client_ctx *client_ctx = NULL;
+	struct vdec_ioctl_msg vdec_msg;
+	u32 vcd_status;
+	unsigned long kernel_vaddr, phy_addr, len;
+	struct file *pmem_file;
+	u32 result = true;
+	void __user *arg = (void __user *)u_arg;
+
+	DBG("%s\n", __func__);
+	if (_IOC_TYPE(cmd) != VDEC_IOCTL_MAGIC)
+		return -ENOTTY;
+
+	client_ctx = (struct video_client_ctx *)file->private_data;
+	if (!client_ctx) {
+		ERR("!client_ctx. Cannot attach to device handle\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VDEC_IOCTL_SET_CODEC:
+	{
+		enum vdec_codec vdec_codec;
+		DBG("VDEC_IOCTL_SET_CODEC\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&vdec_codec,	vdec_msg.in,
+						   sizeof(vdec_codec)))
+			return -EFAULT;
+		DBG("setting code type = %u\n", vdec_codec);
+		result = vid_dec_set_codec(client_ctx, &vdec_codec);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_OUTPUT_FORMAT:
+	{
+		enum vdec_output_fromat output_format;
+		DBG("VDEC_IOCTL_SET_OUTPUT_FORMAT\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&output_format, vdec_msg.in,
+						   sizeof(output_format)))
+			return -EFAULT;
+
+		result = vid_dec_set_output_format(client_ctx, &output_format);
+
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_PICRES:
+	{
+		struct vdec_picsize video_resoultion;
+		DBG("VDEC_IOCTL_SET_PICRES\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&video_resoultion, vdec_msg.in,
+						   sizeof(video_resoultion)))
+			return -EFAULT;
+		result =
+		vid_dec_set_frame_resolution(client_ctx, &video_resoultion);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_GET_PICRES:
+	{
+		struct vdec_picsize video_resoultion;
+		DBG("VDEC_IOCTL_GET_PICRES\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&video_resoultion, vdec_msg.out,
+						   sizeof(video_resoultion)))
+			return -EFAULT;
+
+		result = vid_dec_get_frame_resolution(client_ctx,
+					&video_resoultion);
+
+		if (result) {
+			if (copy_to_user(vdec_msg.out, &video_resoultion,
+					sizeof(video_resoultion)))
+				return -EFAULT;
+		} else
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_BUFFER_REQ:
+	{
+		struct vdec_allocatorproperty vdec_buf_req;
+		struct vcd_buffer_requirement buffer_req;
+		DBG("VDEC_IOCTL_SET_BUFFER_REQ\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+
+		if (copy_from_user(&vdec_buf_req, vdec_msg.in,
+				   sizeof(vdec_buf_req)))
+			return -EFAULT;
+
+		buffer_req.actual_count = vdec_buf_req.actualcount;
+		buffer_req.align = vdec_buf_req.alignment;
+		buffer_req.max_count = vdec_buf_req.maxcount;
+		buffer_req.min_count = vdec_buf_req.mincount;
+		buffer_req.sz = vdec_buf_req.buffer_size;
+
+		switch (vdec_buf_req.buffer_type) {
+		case VDEC_BUFFER_TYPE_INPUT:
+			vcd_status =
+			vcd_set_buffer_requirements(client_ctx->vcd_handle,
+				VCD_BUFFER_INPUT, &buffer_req);
+			break;
+		case VDEC_BUFFER_TYPE_OUTPUT:
+			vcd_status =
+			vcd_set_buffer_requirements(client_ctx->vcd_handle,
+				VCD_BUFFER_OUTPUT, &buffer_req);
+			break;
+		default:
+			vcd_status = VCD_ERR_BAD_POINTER;
+			break;
+		}
+
+		if (vcd_status)
+			return -EFAULT;
+		break;
+	}
+	case VDEC_IOCTL_GET_BUFFER_REQ:
+	{
+		struct vdec_allocatorproperty vdec_buf_req;
+		DBG("VDEC_IOCTL_GET_BUFFER_REQ\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&vdec_buf_req, vdec_msg.out,
+				   sizeof(vdec_buf_req)))
+			return -EFAULT;
+
+		result = vid_dec_get_buffer_req(client_ctx, &vdec_buf_req);
+
+		if (result) {
+			if (copy_to_user(vdec_msg.out, &vdec_buf_req,
+					sizeof(vdec_buf_req)))
+				return -EFAULT;
+		} else
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_BUFFER:
+	{
+		struct vdec_setbuffer_cmd setbuffer;
+		DBG("VDEC_IOCTL_SET_BUFFER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&setbuffer, vdec_msg.in,
+				sizeof(setbuffer)))
+			return -EFAULT;
+		result = vid_dec_set_buffer(client_ctx, &setbuffer);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_FREE_BUFFER:
+	{
+		struct vdec_setbuffer_cmd setbuffer;
+		DBG("VDEC_IOCTL_FREE_BUFFER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&setbuffer, vdec_msg.in,
+				sizeof(setbuffer)))
+			return -EFAULT;
+		result = vid_dec_free_buffer(client_ctx, &setbuffer);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_CMD_START:
+	{
+		DBG(" VDEC_IOCTL_CMD_START\n");
+		result = vid_dec_start_stop(client_ctx, true);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_CMD_STOP:
+	{
+		DBG("VDEC_IOCTL_CMD_STOP\n");
+		result = vid_dec_start_stop(client_ctx, false);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_CMD_PAUSE:
+	{
+		result = vid_dec_pause_resume(client_ctx, true);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_CMD_RESUME:
+	{
+		DBG("VDEC_IOCTL_CMD_PAUSE\n");
+		result = vid_dec_pause_resume(client_ctx, false);
+
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_DECODE_FRAME:
+	{
+		struct vdec_input_frameinfo input_frame_info;
+		DBG("VDEC_IOCTL_DECODE_FRAME\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&input_frame_info, vdec_msg.in,
+				   sizeof(input_frame_info)))
+			return -EFAULT;
+
+		result = vid_dec_decode_frame(client_ctx, &input_frame_info);
+
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_FILL_OUTPUT_BUFFER:
+	{
+		struct vdec_fillbuffer_cmd fill_buffer_cmd;
+		DBG("VDEC_IOCTL_FILL_OUTPUT_BUFFER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&fill_buffer_cmd, vdec_msg.in,
+				   sizeof(fill_buffer_cmd)))
+			return -EFAULT;
+		result = vid_dec_fill_output_buffer(client_ctx,
+							&fill_buffer_cmd);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_CMD_FLUSH:
+	{
+		enum vdec_bufferflush flush_dir;
+		DBG("VDEC_IOCTL_CMD_FLUSH\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&flush_dir, vdec_msg.in,
+				   sizeof(flush_dir)))
+			return -EFAULT;
+		result = vid_dec_flush(client_ctx, flush_dir);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_GET_NEXT_MSG:
+	{
+		struct vdec_msginfo vdec_msg_info;
+		DBG("VDEC_IOCTL_GET_NEXT_MSG\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		result = vid_dec_get_next_msg(client_ctx, &vdec_msg_info);
+		if (result)
+			return result;
+		if (copy_to_user(vdec_msg.out, &vdec_msg_info,
+					sizeof(vdec_msg_info)))
+			return -EFAULT;
+		break;
+	}
+	case VDEC_IOCTL_STOP_NEXT_MSG:
+	{
+		DBG("VDEC_IOCTL_STOP_NEXT_MSG\n");
+		client_ctx->stop_msg = 1;
+		wake_up(&client_ctx->msg_wait);
+		break;
+	}
+	case VDEC_IOCTL_SET_SEQUENCE_HEADER:
+	{
+		struct vdec_seqheader seq_header;
+		struct vcd_sequence_hdr vcd_seq_hdr;
+		DBG("VDEC_IOCTL_SET_SEQUENCE_HEADER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) {
+			ERR("Copy from user vdec_msg failed\n");
+			return -EFAULT;
+		}
+		if (copy_from_user(&seq_header,	vdec_msg.in,
+				   sizeof(seq_header))) {
+			ERR("Copy from user seq_header failed\n");
+			return -EFAULT;
+		}
+		if (!seq_header.seq_header_len) {
+			ERR("Seq Len is Zero\n");
+			return -EFAULT;
+		}
+
+		if (get_pmem_file(seq_header.pmem_fd,
+				  &phy_addr, &kernel_vaddr, &len, &pmem_file)) {
+			ERR("%s(): get_pmem_file failed\n", __func__);
+			return false;
+		}
+		put_pmem_file(pmem_file);
+
+		vcd_seq_hdr.sequence_header_len = seq_header.seq_header_len;
+		kernel_vaddr += (unsigned long)seq_header.pmem_offset;
+		vcd_seq_hdr.sequence_header = (u8 *)kernel_vaddr;
+		if (!vcd_seq_hdr.sequence_header) {
+			ERR("Sequence Header pointer failed\n");
+			return -EFAULT;
+		}
+		client_ctx->seq_header_set = true;
+		if (vcd_decode_start(client_ctx->vcd_handle, &vcd_seq_hdr)) {
+			ERR("Decode start Failed\n");
+			client_ctx->seq_header_set = false;
+			return -EFAULT;
+		}
+		DBG("Wait Client completion Sequence Header\n");
+		wait_for_completion(&client_ctx->event);
+		vcd_seq_hdr.sequence_header = NULL;
+		if (client_ctx->event_status) {
+			ERR("Set Seq Header status is failed");
+			return -EFAULT;
+		}
+		break;
+	}
+	case VDEC_IOCTL_GET_NUMBER_INSTANCES:
+	{
+		DBG("VDEC_IOCTL_GET_NUMBER_INSTANCES\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_to_user(vdec_msg.out,
+			&vid_dec_device_p->num_clients, sizeof(u32)))
+			return -EFAULT;
+		break;
+	}
+	case VDEC_IOCTL_GET_INTERLACE_FORMAT:
+	{
+		u32 progressive_only, interlace_format;
+		DBG("VDEC_IOCTL_GET_INTERLACE_FORMAT\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		result = vid_dec_get_progressive_only(client_ctx,
+					&progressive_only);
+		if (result) {
+			interlace_format = progressive_only ?
+				VDEC_InterlaceFrameProgressive :
+				VDEC_InterlaceInterleaveFrameTopFieldFirst;
+			if (copy_to_user(vdec_msg.out, &interlace_format,
+					sizeof(u32)))
+				return -EFAULT;
+		} else
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_PICTURE_ORDER:
+	{
+		u32 picture_order;
+		DBG("VDEC_IOCTL_SET_PICTURE_ORDER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&picture_order, vdec_msg.in,
+						   sizeof(u32)))
+			return -EFAULT;
+		result =  vid_dec_set_picture_order(client_ctx, &picture_order);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_FRAME_RATE:
+	{
+		struct vdec_framerate frame_rate;
+		DBG("VDEC_IOCTL_SET_FRAME_RATE\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&frame_rate, vdec_msg.in,
+						   sizeof(frame_rate)))
+			return -EFAULT;
+		result = vid_dec_set_frame_rate(client_ctx, &frame_rate);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_EXTRADATA:
+	{
+		u32 extradata_flag;
+		DBG("VDEC_IOCTL_SET_EXTRADATA\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&extradata_flag, vdec_msg.in,
+						   sizeof(u32)))
+			return -EFAULT;
+		result = vid_dec_set_extradata(client_ctx, &extradata_flag);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_H264_MV_BUFFER:
+	{
+		struct vdec_h264_mv mv_data;
+		DBG("VDEC_IOCTL_SET_H264_MV_BUFFER\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&mv_data, vdec_msg.in,
+						   sizeof(mv_data)))
+			return -EFAULT;
+		result = vid_dec_set_h264_mv_buffers(client_ctx, &mv_data);
+
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_FREE_H264_MV_BUFFER:
+	{
+		DBG("VDEC_IOCTL_FREE_H264_MV_BUFFER\n");
+		result = vid_dec_free_h264_mv_buffers(client_ctx);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_GET_MV_BUFFER_SIZE:
+	{
+		struct vdec_mv_buff_size mv_buff;
+		DBG("VDEC_IOCTL_GET_MV_BUFFER_SIZE\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&mv_buff, vdec_msg.out,
+						   sizeof(mv_buff)))
+			return -EFAULT;
+		result = vid_dec_get_h264_mv_buffer_size(client_ctx, &mv_buff);
+		if (result) {
+			DBG(" Returning W: %d, H: %d, S: %d, A: %d",
+				mv_buff.width, mv_buff.height,
+				mv_buff.size, mv_buff.alignment);
+			if (copy_to_user(vdec_msg.out, &mv_buff,
+					sizeof(mv_buff)))
+				return -EFAULT;
+		} else
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_IDR_ONLY_DECODING:
+	{
+		result = vid_dec_set_idr_only_decoding(client_ctx);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_SET_CONT_ON_RECONFIG:
+	{
+		result = vid_dec_set_cont_on_reconfig(client_ctx);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	default:
+		ERR("%s(): Unsupported ioctl\n", __func__);
+		return -ENOTTY;
+		break;
+	}
+
+	return 0;
+}
+
+static u32 vid_dec_close_client(struct video_client_ctx *client_ctx)
+{
+	struct vid_dec_msg *vdec_msg;
+	u32 vcd_status;
+
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	if (!client_ctx || (!client_ctx->vcd_handle)) {
+		ERR("\n Invalid client_ctx");
+		return false;
+	}
+
+	mutex_lock(&vid_dec_device_p->lock);
+	if (!client_ctx->stop_called) {
+		client_ctx->stop_called = true;
+		client_ctx->stop_sync_cb = true;
+		vcd_status = vcd_stop(client_ctx->vcd_handle);
+		DBG("\n Stuck at the stop call");
+		if (!vcd_status)
+			wait_for_completion(&client_ctx->event);
+		DBG("\n Came out of wait event");
+	}
+	mutex_lock(&client_ctx->msg_queue_lock);
+	while (!list_empty(&client_ctx->msg_queue)) {
+		DBG("%s(): Delete remaining entries\n", __func__);
+		vdec_msg = list_first_entry(&client_ctx->msg_queue,
+						   struct vid_dec_msg, list);
+		if (vdec_msg) {
+			list_del(&vdec_msg->list);
+			kfree(vdec_msg);
+		}
+	}
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	vcd_status = vcd_close(client_ctx->vcd_handle);
+
+	if (vcd_status) {
+		mutex_unlock(&vid_dec_device_p->lock);
+		return false;
+	}
+	memset((void *)client_ctx, 0, sizeof(struct video_client_ctx));
+	vid_dec_device_p->num_clients--;
+	mutex_unlock(&vid_dec_device_p->lock);
+	return true;
+}
+
+static int vid_dec_open(struct inode *inode, struct file *file)
+{
+	s32 client_index;
+	struct video_client_ctx *client_ctx;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u8 client_count = 0;
+
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	mutex_lock(&vid_dec_device_p->lock);
+
+	client_count = vcd_get_num_of_clients();
+	if (client_count == VIDC_MAX_NUM_CLIENTS) {
+		ERR("ERROR : vid_dec_open() max number of clients"
+		    "limit reached\n");
+		mutex_unlock(&vid_dec_device_p->lock);
+		return -ENODEV;
+	}
+
+	DBG(" Virtual Address of ioremap is %p\n", vid_dec_device_p->virt_base);
+	if (!vid_dec_device_p->num_clients) {
+		if (!vidc_load_firmware())
+			return -ENODEV;
+	}
+
+	client_index = vid_dec_get_empty_client_index();
+	if (client_index == -1) {
+		ERR("%s() : No free clients client_index == -1\n", __func__);
+		return -ENODEV;
+	}
+	client_ctx = &vid_dec_device_p->vdec_clients[client_index];
+	vid_dec_device_p->num_clients++;
+	init_completion(&client_ctx->event);
+	mutex_init(&client_ctx->msg_queue_lock);
+	INIT_LIST_HEAD(&client_ctx->msg_queue);
+	init_waitqueue_head(&client_ctx->msg_wait);
+	client_ctx->stop_msg = 0;
+	client_ctx->stop_called = false;
+	client_ctx->stop_sync_cb = false;
+	vcd_status = vcd_open(vid_dec_device_p->device_handle, true,
+			      vid_dec_vcd_cb, client_ctx);
+	if (!vcd_status) {
+		wait_for_completion(&client_ctx->event);
+		if (client_ctx->event_status) {
+			ERR("callback for vcd_open returned error: %u",
+				client_ctx->event_status);
+			mutex_unlock(&vid_dec_device_p->lock);
+			return -EFAULT;
+		}
+	} else {
+		ERR("vcd_open returned error: %u", vcd_status);
+		mutex_unlock(&vid_dec_device_p->lock);
+		return -EFAULT;
+	}
+
+	client_ctx->seq_header_set = false;
+	file->private_data = client_ctx;
+	mutex_unlock(&vid_dec_device_p->lock);
+	return 0;
+}
+
+static int vid_dec_release(struct inode *inode, struct file *file)
+{
+	struct video_client_ctx *client_ctx = file->private_data;
+
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	vid_dec_close_client(client_ctx);
+	vidc_release_firmware();
+#ifndef USE_RES_TRACKER
+	vidc_disable_clk();
+#endif
+	INFO("msm_vidc_dec: Return from %s()", __func__);
+	return 0;
+}
+
+static const struct file_operations vid_dec_fops = {
+	.owner = THIS_MODULE,
+	.open = vid_dec_open,
+	.release = vid_dec_release,
+	.unlocked_ioctl = vid_dec_ioctl,
+};
+
+void vid_dec_interrupt_deregister(void)
+{
+}
+
+void vid_dec_interrupt_register(void *device_name)
+{
+}
+
+void vid_dec_interrupt_clear(void)
+{
+}
+
+void *vid_dec_map_dev_base_addr(void *device_name)
+{
+	return vid_dec_device_p->virt_base;
+}
+
+static int vid_dec_vcd_init(void)
+{
+	int rc;
+	struct vcd_init_config vcd_init_config;
+	u32 i;
+
+	/* init_timer(&hw_timer); */
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	vid_dec_device_p->num_clients = 0;
+
+	for (i = 0; i < VIDC_MAX_NUM_CLIENTS; i++) {
+		memset((void *)&vid_dec_device_p->vdec_clients[i], 0,
+		       sizeof(vid_dec_device_p->vdec_clients[i]));
+	}
+
+	mutex_init(&vid_dec_device_p->lock);
+	vid_dec_device_p->virt_base = vidc_get_ioaddr();
+	DBG("%s() : base address for VIDC core %u\n", __func__, \
+		(int)vid_dec_device_p->virt_base);
+
+	if (!vid_dec_device_p->virt_base) {
+		ERR("%s() : ioremap failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	vcd_init_config.device_name = "VIDC";
+	vcd_init_config.map_dev_base_addr = vid_dec_map_dev_base_addr;
+	vcd_init_config.interrupt_clr = vid_dec_interrupt_clear;
+	vcd_init_config.register_isr = vid_dec_interrupt_register;
+	vcd_init_config.deregister_isr = vid_dec_interrupt_deregister;
+	vcd_init_config.timer_create = vidc_timer_create;
+	vcd_init_config.timer_release = vidc_timer_release;
+	vcd_init_config.timer_start = vidc_timer_start;
+	vcd_init_config.timer_stop = vidc_timer_stop;
+
+	rc = vcd_init(&vcd_init_config, &vid_dec_device_p->device_handle);
+
+	if (rc) {
+		ERR("%s() : vcd_init failed\n", __func__);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init vid_dec_init(void)
+{
+	int rc = 0;
+	struct device *class_devp;
+
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	vid_dec_device_p = kzalloc(sizeof(struct vid_dec_dev), GFP_KERNEL);
+	if (!vid_dec_device_p) {
+		ERR("%s Unable to allocate memory for vid_dec_dev\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+	rc = alloc_chrdev_region(&vid_dec_dev_num, 0, 1, VID_DEC_NAME);
+	if (rc < 0) {
+		ERR("%s: alloc_chrdev_region Failed rc = %d\n",
+		       __func__, rc);
+		goto error_vid_dec_alloc_chrdev_region;
+	}
+
+	vid_dec_class = class_create(THIS_MODULE, VID_DEC_NAME);
+	if (IS_ERR(vid_dec_class)) {
+		rc = PTR_ERR(vid_dec_class);
+		ERR("%s: couldn't create vid_dec_class rc = %d\n",
+		       __func__, rc);
+
+		goto error_vid_dec_class_create;
+	}
+
+	class_devp = device_create(vid_dec_class, NULL, vid_dec_dev_num, NULL,
+				   VID_DEC_NAME);
+
+	if (IS_ERR(class_devp)) {
+		rc = PTR_ERR(class_devp);
+		ERR("%s: class device_create failed %d\n",
+		       __func__, rc);
+		goto error_vid_dec_class_device_create;
+	}
+
+  vid_dec_device_p->device = class_devp;
+
+	cdev_init(&vid_dec_device_p->cdev, &vid_dec_fops);
+	vid_dec_device_p->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&(vid_dec_device_p->cdev), vid_dec_dev_num, 1);
+
+	if (rc < 0) {
+		ERR("%s: cdev_add failed %d\n", __func__, rc);
+		goto error_vid_dec_cdev_add;
+	}
+	vid_dec_vcd_init();
+	return 0;
+
+error_vid_dec_cdev_add:
+	device_destroy(vid_dec_class, vid_dec_dev_num);
+error_vid_dec_class_device_create:
+	class_destroy(vid_dec_class);
+error_vid_dec_class_create:
+	unregister_chrdev_region(vid_dec_dev_num, 1);
+error_vid_dec_alloc_chrdev_region:
+	kfree(vid_dec_device_p);
+
+	return rc;
+}
+
+static void __exit vid_dec_exit(void)
+{
+	INFO("msm_vidc_dec: Inside %s()", __func__);
+	cdev_del(&(vid_dec_device_p->cdev));
+	device_destroy(vid_dec_class, vid_dec_dev_num);
+	class_destroy(vid_dec_class);
+	unregister_chrdev_region(vid_dec_dev_num, 1);
+	kfree(vid_dec_device_p);
+	INFO("msm_vidc_dec: Return from %s()", __func__);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Video decoder driver");
+MODULE_VERSION("1.0");
+
+module_init(vid_dec_init);
+module_exit(vid_dec_exit);
diff --git a/drivers/video/msm/vidc/common/dec/vdec_internal.h b/drivers/video/msm/vidc/common/dec/vdec_internal.h
new file mode 100644
index 0000000..867c3b3
--- /dev/null
+++ b/drivers/video/msm/vidc/common/dec/vdec_internal.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VDEC_INTERNAL_H
+#define VDEC_INTERNAL_H
+
+#include <linux/msm_vidc_dec.h>
+#include <linux/cdev.h>
+#include "vidc_init.h"
+
+struct vid_dec_msg {
+	struct list_head list;
+	struct vdec_msginfo vdec_msg_info;
+};
+
+struct vid_dec_dev {
+	struct cdev cdev;
+	struct device *device;
+	resource_size_t phys_base;
+	void __iomem *virt_base;
+	unsigned int irq;
+	struct clk *hclk;
+	struct clk *hclk_div2;
+	struct clk *pclk;
+	unsigned long hclk_rate;
+	struct mutex lock;
+	s32 device_handle;
+	struct video_client_ctx vdec_clients[VIDC_MAX_NUM_CLIENTS];
+	u32 num_clients;
+	void(*timer_handler)(void *);
+};
+
+#endif
diff --git a/drivers/video/msm/vidc/common/enc/venc.c b/drivers/video/msm/vidc/common/enc/venc.c
new file mode 100644
index 0000000..a69b810
--- /dev/null
+++ b/drivers/video/msm/vidc/common/enc/venc.c
@@ -0,0 +1,1549 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/clk.h>
+
+#include "vidc_type.h"
+#include "vcd_api.h"
+#include "venc_internal.h"
+#include "vidc_init.h"
+
+#define VID_ENC_NAME	"msm_vidc_enc"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define INFO(x...) printk(KERN_INFO x)
+#define ERR(x...) printk(KERN_ERR x)
+
+static struct vid_enc_dev *vid_enc_device_p;
+static dev_t vid_enc_dev_num;
+static struct class *vid_enc_class;
+static long vid_enc_ioctl(struct file *file,
+	unsigned cmd, unsigned long arg);
+static int stop_cmd;
+
+static s32 vid_enc_get_empty_client_index(void)
+{
+	u32 i;
+	u32 found = false;
+
+	for (i = 0; i < VIDC_MAX_NUM_CLIENTS; i++) {
+		if (!vid_enc_device_p->venc_clients[i].vcd_handle) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		ERR("%s():ERROR No space for new client\n",
+			__func__);
+		return -ENOMEM;
+	} else {
+		DBG("%s(): available client index = %u\n",
+			__func__, i);
+		return i;
+	}
+}
+
+
+u32 vid_enc_get_status(u32 status)
+{
+	u32 venc_status;
+
+	switch (status) {
+	case VCD_S_SUCCESS:
+		venc_status = VEN_S_SUCCESS;
+		break;
+	case VCD_ERR_FAIL:
+		venc_status = VEN_S_EFAIL;
+		break;
+	case VCD_ERR_ALLOC_FAIL:
+		venc_status = VEN_S_ENOSWRES;
+		break;
+	case VCD_ERR_ILLEGAL_OP:
+		venc_status = VEN_S_EINVALCMD;
+		break;
+	case VCD_ERR_ILLEGAL_PARM:
+		venc_status = VEN_S_EBADPARAM;
+		break;
+	case VCD_ERR_BAD_POINTER:
+	case VCD_ERR_BAD_HANDLE:
+		venc_status = VEN_S_EFATAL;
+		break;
+	case VCD_ERR_NOT_SUPPORTED:
+		venc_status = VEN_S_ENOTSUPP;
+		break;
+	case VCD_ERR_BAD_STATE:
+		venc_status = VEN_S_EINVALSTATE;
+		break;
+	case VCD_ERR_MAX_CLIENT:
+		venc_status = VEN_S_ENOHWRES;
+		break;
+	default:
+		venc_status = VEN_S_EFAIL;
+		break;
+	}
+	return venc_status;
+}
+
+static void vid_enc_notify_client(struct video_client_ctx *client_ctx)
+{
+	if (client_ctx)
+		complete(&client_ctx->event);
+}
+
+void vid_enc_vcd_open_done(struct video_client_ctx *client_ctx,
+	struct vcd_handle_container *handle_container)
+{
+	DBG("vid_enc_vcd_open_done\n");
+
+	if (client_ctx) {
+		if (handle_container)
+			client_ctx->vcd_handle = handle_container->handle;
+		else
+		ERR("%s(): ERROR. handle_container is NULL\n",
+		__func__);
+		vid_enc_notify_client(client_ctx);
+	} else
+		ERR("%s(): ERROR. client_ctx is NULL\n",
+			__func__);
+}
+
+static void vid_enc_input_frame_done(struct video_client_ctx *client_ctx,
+		u32 event, u32 status,
+		struct vcd_frame_data *vcd_frame_data)
+{
+	struct vid_enc_msg *venc_msg;
+
+	if (!client_ctx || !vcd_frame_data) {
+		ERR("vid_enc_input_frame_done() NULL pointer\n");
+		return;
+	}
+
+	venc_msg = kzalloc(sizeof(struct vid_enc_msg),
+					    GFP_KERNEL);
+	if (!venc_msg) {
+		ERR("vid_enc_input_frame_done(): cannot allocate vid_enc_msg "
+		" buffer\n");
+		return;
+	}
+
+	venc_msg->venc_msg_info.statuscode = vid_enc_get_status(status);
+
+	venc_msg->venc_msg_info.msgcode = VEN_MSG_INPUT_BUFFER_DONE;
+
+	switch (event) {
+	case VCD_EVT_RESP_INPUT_DONE:
+	   DBG("Send INPUT_DON message to client = %p\n",
+			client_ctx);
+	   break;
+	case VCD_EVT_RESP_INPUT_FLUSHED:
+		DBG("Send INPUT_FLUSHED message to client = %p\n",
+			client_ctx);
+	   break;
+	default:
+		ERR("vid_enc_input_frame_done(): invalid event type: "
+			"%d\n", event);
+		venc_msg->venc_msg_info.statuscode = VEN_S_EFATAL;
+	   break;
+	}
+
+	venc_msg->venc_msg_info.buf.clientdata =
+		(void *)vcd_frame_data->frm_clnt_data;
+	venc_msg->venc_msg_info.msgdata_size =
+		sizeof(struct vid_enc_msg);
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&venc_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+static void vid_enc_output_frame_done(struct video_client_ctx *client_ctx,
+		u32 event, u32 status,
+		struct vcd_frame_data *vcd_frame_data)
+{
+	struct vid_enc_msg *venc_msg;
+	unsigned long kernel_vaddr, phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+
+	if (!client_ctx || !vcd_frame_data) {
+		ERR("vid_enc_input_frame_done() NULL pointer\n");
+		return;
+	}
+
+	venc_msg = kzalloc(sizeof(struct vid_enc_msg),
+					   GFP_KERNEL);
+	if (!venc_msg) {
+		ERR("vid_enc_input_frame_done(): cannot allocate vid_enc_msg "
+		" buffer\n");
+		return;
+	}
+
+	venc_msg->venc_msg_info.statuscode = vid_enc_get_status(status);
+	venc_msg->venc_msg_info.msgcode = VEN_MSG_OUTPUT_BUFFER_DONE;
+
+	switch (event) {
+	case VCD_EVT_RESP_OUTPUT_DONE:
+	   DBG("Send INPUT_DON message to client = %p\n",
+			client_ctx);
+	   break;
+	case VCD_EVT_RESP_OUTPUT_FLUSHED:
+	   DBG("Send INPUT_FLUSHED message to client = %p\n",
+		   client_ctx);
+	   break;
+	default:
+	   ERR("QVD: vid_enc_output_frame_done invalid cmd type: %d\n", event);
+	   venc_msg->venc_msg_info.statuscode = VEN_S_EFATAL;
+	   break;
+	}
+
+	kernel_vaddr =
+		(unsigned long)vcd_frame_data->virtual;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+		false, &user_vaddr, &kernel_vaddr,
+		&phy_addr, &pmem_fd, &file,
+		&buffer_index)) {
+
+		/* Buffer address in user space */
+		venc_msg->venc_msg_info.buf.ptrbuffer =	(u8 *) user_vaddr;
+		/* Buffer address in user space */
+		venc_msg->venc_msg_info.buf.clientdata = (void *)
+		vcd_frame_data->frm_clnt_data;
+		/* Data length */
+		venc_msg->venc_msg_info.buf.len =
+			vcd_frame_data->data_len;
+		venc_msg->venc_msg_info.buf.flags =
+			vcd_frame_data->flags;
+		/* Timestamp pass-through from input frame */
+		venc_msg->venc_msg_info.buf.timestamp =
+			vcd_frame_data->time_stamp;
+
+		/* Decoded picture width and height */
+		venc_msg->venc_msg_info.msgdata_size =
+			sizeof(struct venc_buffer);
+	} else {
+		ERR("vid_enc_output_frame_done UVA can not be found\n");
+		venc_msg->venc_msg_info.statuscode =
+			VEN_S_EFATAL;
+	}
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&venc_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+static void vid_enc_lean_event(struct video_client_ctx *client_ctx,
+	u32 event, u32 status)
+{
+	struct vid_enc_msg *venc_msg;
+	if (!client_ctx) {
+		ERR("%s(): !client_ctx pointer\n",
+			__func__);
+		return;
+	}
+
+	venc_msg = kzalloc(sizeof(struct vid_enc_msg),
+			GFP_KERNEL);
+	if (!venc_msg) {
+		ERR("%s(): cannot allocate vid_enc_msg buffer\n",
+			__func__);
+		return;
+	}
+
+	venc_msg->venc_msg_info.statuscode =
+		vid_enc_get_status(status);
+
+	switch (event) {
+	case VCD_EVT_RESP_FLUSH_INPUT_DONE:
+		INFO("\n msm_vidc_enc: Sending VCD_EVT_RESP_FLUSH_INPUT_DONE"
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_FLUSH_INPUT_DONE;
+		break;
+	case VCD_EVT_RESP_FLUSH_OUTPUT_DONE:
+		INFO("\n msm_vidc_enc: Sending VCD_EVT_RESP_FLUSH_OUTPUT_DONE"
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_FLUSH_OUPUT_DONE;
+		break;
+
+	case VCD_EVT_RESP_START:
+		INFO("\n msm_vidc_enc: Sending VCD_EVT_RESP_START"
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_START;
+		break;
+
+	case VCD_EVT_RESP_STOP:
+		INFO("\n msm_vidc_enc: Sending VCD_EVT_RESP_STOP"
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_STOP;
+		break;
+
+	case VCD_EVT_RESP_PAUSE:
+		INFO("\n msm_vidc_enc: Sending VCD_EVT_RESP_PAUSE"
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_PAUSE;
+		break;
+
+	default:
+		ERR("%s() : unknown event type %u\n",
+			__func__, event);
+		break;
+	}
+
+	venc_msg->venc_msg_info.msgdata_size = 0;
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	list_add_tail(&venc_msg->list, &client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	wake_up(&client_ctx->msg_wait);
+}
+
+
+void vid_enc_vcd_cb(u32 event, u32 status,
+	void *info, size_t sz, void *handle,
+	void *const client_data)
+{
+	struct video_client_ctx *client_ctx =
+		(struct video_client_ctx *)client_data;
+
+	DBG("Entering %s()\n", __func__);
+
+	if (!client_ctx) {
+		ERR("%s(): client_ctx is NULL\n", __func__);
+		return;
+	}
+
+	client_ctx->event_status = status;
+
+	switch (event) {
+	case VCD_EVT_RESP_OPEN:
+		vid_enc_vcd_open_done(client_ctx,
+		(struct vcd_handle_container *)info);
+		break;
+
+	case VCD_EVT_RESP_INPUT_DONE:
+	case VCD_EVT_RESP_INPUT_FLUSHED:
+		vid_enc_input_frame_done(client_ctx, event,
+		status, (struct vcd_frame_data *)info);
+		break;
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+	case VCD_EVT_RESP_OUTPUT_FLUSHED:
+		vid_enc_output_frame_done(client_ctx, event, status,
+		(struct vcd_frame_data *)info);
+		break;
+
+	case VCD_EVT_RESP_PAUSE:
+	case VCD_EVT_RESP_START:
+	case VCD_EVT_RESP_STOP:
+	case VCD_EVT_RESP_FLUSH_INPUT_DONE:
+	case VCD_EVT_RESP_FLUSH_OUTPUT_DONE:
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+	case VCD_EVT_IND_HWERRFATAL:
+	case VCD_EVT_IND_RESOURCES_LOST:
+		vid_enc_lean_event(client_ctx, event, status);
+		break;
+
+	default:
+		ERR("%s() :  Error - Invalid event type =%u\n",
+		__func__, event);
+		break;
+	}
+}
+
+static u32 vid_enc_msg_pending(struct video_client_ctx *client_ctx)
+{
+	u32 islist_empty = 0;
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+	islist_empty = list_empty(&client_ctx->msg_queue);
+	mutex_unlock(&client_ctx->msg_queue_lock);
+
+	if (islist_empty) {
+		DBG("%s(): vid_enc msg queue empty\n",
+			__func__);
+		if (client_ctx->stop_msg) {
+			DBG("%s(): List empty and Stop Msg set\n",
+				__func__);
+			return client_ctx->stop_msg;
+		}
+	} else
+		DBG("%s(): vid_enc msg queue Not empty\n",
+			__func__);
+
+	return !islist_empty;
+}
+
+static u32 vid_enc_get_next_msg(struct video_client_ctx *client_ctx,
+		struct venc_msg *venc_msg_info)
+{
+	int rc;
+	struct vid_enc_msg *vid_enc_msg = NULL;
+
+	if (!client_ctx)
+		return false;
+
+	rc = wait_event_interruptible(client_ctx->msg_wait,
+		vid_enc_msg_pending(client_ctx));
+
+	if (rc < 0 || client_ctx->stop_msg) {
+		DBG("rc = %d, stop_msg = %u\n", rc, client_ctx->stop_msg);
+		return false;
+	}
+
+	mutex_lock(&client_ctx->msg_queue_lock);
+
+	if (!list_empty(&client_ctx->msg_queue)) {
+		DBG("%s(): After Wait\n", __func__);
+		vid_enc_msg = list_first_entry(&client_ctx->msg_queue,
+					struct vid_enc_msg, list);
+		list_del(&vid_enc_msg->list);
+		memcpy(venc_msg_info, &vid_enc_msg->venc_msg_info,
+		sizeof(struct venc_msg));
+		kfree(vid_enc_msg);
+	}
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	return true;
+}
+
+static u32 vid_enc_close_client(struct video_client_ctx *client_ctx)
+{
+	struct vid_enc_msg *vid_enc_msg = NULL;
+	u32 vcd_status;
+	int rc;
+
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+	if (!client_ctx || (!client_ctx->vcd_handle)) {
+		ERR("\n %s(): Invalid client_ctx", __func__);
+		return false;
+	}
+
+	mutex_lock(&vid_enc_device_p->lock);
+
+	if (!stop_cmd) {
+		vcd_status = vcd_stop(client_ctx->vcd_handle);
+		DBG("Waiting for VCD_STOP: Before Timeout\n");
+		if (!vcd_status) {
+			rc = wait_for_completion_timeout(&client_ctx->event,
+				5 * HZ);
+			if (!rc) {
+				ERR("%s:ERROR vcd_stop time out"
+				"rc = %d\n", __func__, rc);
+			}
+
+			if (client_ctx->event_status) {
+				ERR("%s:ERROR "
+				"vcd_stop Not successs\n", __func__);
+			}
+		}
+	}
+	DBG("VCD_STOPPED: After Timeout, calling VCD_CLOSE\n");
+	mutex_lock(&client_ctx->msg_queue_lock);
+	while (!list_empty(&client_ctx->msg_queue)) {
+		DBG("%s(): Delete remaining entries\n", __func__);
+		vid_enc_msg = list_first_entry(&client_ctx->msg_queue,
+					struct vid_enc_msg, list);
+		list_del(&vid_enc_msg->list);
+		kfree(vid_enc_msg);
+	}
+	mutex_unlock(&client_ctx->msg_queue_lock);
+	vcd_status = vcd_close(client_ctx->vcd_handle);
+
+	if (vcd_status) {
+		mutex_unlock(&vid_enc_device_p->lock);
+		return false;
+	}
+
+	memset((void *)client_ctx, 0,
+		sizeof(struct video_client_ctx));
+
+	vid_enc_device_p->num_clients--;
+	stop_cmd = 0;
+	mutex_unlock(&vid_enc_device_p->lock);
+	return true;
+}
+
+
+static int vid_enc_open(struct inode *inode, struct file *file)
+{
+	s32 client_index;
+	struct video_client_ctx *client_ctx;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u8 client_count = 0;
+
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+
+	mutex_lock(&vid_enc_device_p->lock);
+
+	stop_cmd = 0;
+	client_count = vcd_get_num_of_clients();
+	if (client_count == VIDC_MAX_NUM_CLIENTS) {
+		ERR("ERROR : vid_enc_open() max number of clients"
+		    "limit reached\n");
+		mutex_unlock(&vid_enc_device_p->lock);
+		return -ENODEV;
+	}
+
+	DBG(" Virtual Address of ioremap is %p\n", vid_enc_device_p->virt_base);
+	if (!vid_enc_device_p->num_clients) {
+		if (!vidc_load_firmware())
+			return -ENODEV;
+	}
+
+	client_index = vid_enc_get_empty_client_index();
+
+	if (client_index == -1) {
+		ERR("%s() : No free clients client_index == -1\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	client_ctx =
+		&vid_enc_device_p->venc_clients[client_index];
+	vid_enc_device_p->num_clients++;
+
+	init_completion(&client_ctx->event);
+	mutex_init(&client_ctx->msg_queue_lock);
+	INIT_LIST_HEAD(&client_ctx->msg_queue);
+	init_waitqueue_head(&client_ctx->msg_wait);
+	vcd_status = vcd_open(vid_enc_device_p->device_handle, false,
+		vid_enc_vcd_cb, client_ctx);
+	client_ctx->stop_msg = 0;
+
+	if (!vcd_status) {
+		wait_for_completion(&client_ctx->event);
+		if (client_ctx->event_status) {
+			ERR("callback for vcd_open returned error: %u",
+				client_ctx->event_status);
+			mutex_unlock(&vid_enc_device_p->lock);
+			return -EFAULT;
+		}
+	} else {
+		ERR("vcd_open returned error: %u", vcd_status);
+		mutex_unlock(&vid_enc_device_p->lock);
+		return -EFAULT;
+	}
+	file->private_data = client_ctx;
+	mutex_unlock(&vid_enc_device_p->lock);
+	return 0;
+}
+
+static int vid_enc_release(struct inode *inode, struct file *file)
+{
+	struct video_client_ctx *client_ctx = file->private_data;
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+	vid_enc_close_client(client_ctx);
+	vidc_release_firmware();
+#ifndef USE_RES_TRACKER
+	vidc_disable_clk();
+#endif
+	INFO("\n msm_vidc_enc: Return from %s()", __func__);
+	return 0;
+}
+
+static const struct file_operations vid_enc_fops = {
+	.owner = THIS_MODULE,
+	.open = vid_enc_open,
+	.release = vid_enc_release,
+	.unlocked_ioctl = vid_enc_ioctl,
+};
+
+void vid_enc_interrupt_deregister(void)
+{
+}
+
+void vid_enc_interrupt_register(void *device_name)
+{
+}
+
+void vid_enc_interrupt_clear(void)
+{
+}
+
+void *vid_enc_map_dev_base_addr(void *device_name)
+{
+	return vid_enc_device_p->virt_base;
+}
+
+static int vid_enc_vcd_init(void)
+{
+	int rc;
+	struct vcd_init_config vcd_init_config;
+	u32 i;
+
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+	vid_enc_device_p->num_clients = 0;
+
+	for (i = 0; i < VIDC_MAX_NUM_CLIENTS; i++) {
+		memset((void *)&vid_enc_device_p->venc_clients[i], 0,
+		sizeof(vid_enc_device_p->venc_clients[i]));
+	}
+
+	mutex_init(&vid_enc_device_p->lock);
+	vid_enc_device_p->virt_base = vidc_get_ioaddr();
+
+	if (!vid_enc_device_p->virt_base) {
+		ERR("%s() : ioremap failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	vcd_init_config.device_name = "VIDC";
+	vcd_init_config.map_dev_base_addr =
+		vid_enc_map_dev_base_addr;
+	vcd_init_config.interrupt_clr =
+		vid_enc_interrupt_clear;
+	vcd_init_config.register_isr =
+		vid_enc_interrupt_register;
+	vcd_init_config.deregister_isr =
+		vid_enc_interrupt_deregister;
+
+	rc = vcd_init(&vcd_init_config,
+		&vid_enc_device_p->device_handle);
+
+	if (rc) {
+		ERR("%s() : vcd_init failed\n",
+			__func__);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init vid_enc_init(void)
+{
+	int rc = 0;
+	struct device *class_devp;
+
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+	vid_enc_device_p = kzalloc(sizeof(struct vid_enc_dev),
+					 GFP_KERNEL);
+	if (!vid_enc_device_p) {
+		ERR("%s Unable to allocate memory for vid_enc_dev\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	rc = alloc_chrdev_region(&vid_enc_dev_num, 0, 1, VID_ENC_NAME);
+	if (rc < 0) {
+		ERR("%s: alloc_chrdev_region Failed rc = %d\n",
+			__func__, rc);
+		goto error_vid_enc_alloc_chrdev_region;
+	}
+
+	vid_enc_class = class_create(THIS_MODULE, VID_ENC_NAME);
+	if (IS_ERR(vid_enc_class)) {
+		rc = PTR_ERR(vid_enc_class);
+		ERR("%s: couldn't create vid_enc_class rc = %d\n",
+			__func__, rc);
+		goto error_vid_enc_class_create;
+	}
+
+	class_devp = device_create(vid_enc_class, NULL,
+				vid_enc_dev_num, NULL, VID_ENC_NAME);
+
+	if (IS_ERR(class_devp)) {
+		rc = PTR_ERR(class_devp);
+		ERR("%s: class device_create failed %d\n",
+		__func__, rc);
+		goto error_vid_enc_class_device_create;
+	}
+
+	vid_enc_device_p->device = class_devp;
+
+	cdev_init(&vid_enc_device_p->cdev, &vid_enc_fops);
+	vid_enc_device_p->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&(vid_enc_device_p->cdev), vid_enc_dev_num, 1);
+
+	if (rc < 0) {
+		ERR("%s: cdev_add failed %d\n",
+		__func__, rc);
+		goto error_vid_enc_cdev_add;
+	}
+	vid_enc_vcd_init();
+	return 0;
+
+error_vid_enc_cdev_add:
+	device_destroy(vid_enc_class, vid_enc_dev_num);
+error_vid_enc_class_device_create:
+	class_destroy(vid_enc_class);
+error_vid_enc_class_create:
+	unregister_chrdev_region(vid_enc_dev_num, 1);
+error_vid_enc_alloc_chrdev_region:
+	kfree(vid_enc_device_p);
+
+	return rc;
+}
+
+static void __exit vid_enc_exit(void)
+{
+	INFO("\n msm_vidc_enc: Inside %s()", __func__);
+	cdev_del(&(vid_enc_device_p->cdev));
+	device_destroy(vid_enc_class, vid_enc_dev_num);
+	class_destroy(vid_enc_class);
+	unregister_chrdev_region(vid_enc_dev_num, 1);
+	kfree(vid_enc_device_p);
+	INFO("\n msm_vidc_enc: Return from %s()", __func__);
+}
+static long vid_enc_ioctl(struct file *file,
+		unsigned cmd, unsigned long u_arg)
+{
+	struct video_client_ctx *client_ctx = NULL;
+	struct venc_ioctl_msg venc_msg;
+	void __user *arg = (void __user *)u_arg;
+	u32 result = true;
+
+	DBG("%s\n", __func__);
+
+	client_ctx = (struct video_client_ctx *)file->private_data;
+	if (!client_ctx) {
+		ERR("!client_ctx. Cannot attach to device handle\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VEN_IOCTL_CMD_READ_NEXT_MSG:
+	{
+		struct venc_msg cb_msg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_CMD_READ_NEXT_MSG\n");
+		result = vid_enc_get_next_msg(client_ctx, &cb_msg);
+		if (!result)
+			return -EIO;
+		if (copy_to_user(venc_msg.out, &cb_msg, sizeof(cb_msg)))
+			return -EFAULT;
+		break;
+	}
+	case VEN_IOCTL_CMD_STOP_READ_MSG:
+	{
+		DBG("VEN_IOCTL_CMD_STOP_READ_MSG\n");
+		client_ctx->stop_msg = 1;
+		wake_up(&client_ctx->msg_wait);
+		break;
+	}
+	case VEN_IOCTL_CMD_ENCODE_FRAME:
+	case VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER:
+	{
+		struct venc_buffer enc_buffer;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_CMD_ENCODE_FRAME"
+			"/VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER\n");
+		if (copy_from_user(&enc_buffer, venc_msg.in,
+						   sizeof(enc_buffer)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_CMD_ENCODE_FRAME)
+			result = vid_enc_encode_frame(client_ctx,
+					&enc_buffer);
+		else
+			result = vid_enc_fill_output_buffer(client_ctx,
+					&enc_buffer);
+		if (!result) {
+			DBG("\n VEN_IOCTL_CMD_ENCODE_FRAME/"
+				"VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER failed");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_INPUT_BUFFER:
+	case VEN_IOCTL_SET_OUTPUT_BUFFER:
+	{
+		enum venc_buffer_dir buffer_dir;
+		struct venc_bufferpayload buffer_info;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_SET_INPUT_BUFFER/VEN_IOCTL_SET_OUTPUT_BUFFER\n");
+		if (copy_from_user(&buffer_info, venc_msg.in,
+			sizeof(buffer_info)))
+			return -EFAULT;
+		buffer_dir = VEN_BUFFER_TYPE_INPUT;
+		if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER)
+			buffer_dir = VEN_BUFFER_TYPE_OUTPUT;
+		result = vid_enc_set_buffer(client_ctx, &buffer_info,
+				buffer_dir);
+		if (!result) {
+			DBG("\n VEN_IOCTL_SET_INPUT_BUFFER"
+				"/VEN_IOCTL_SET_OUTPUT_BUFFER failed");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_CMD_FREE_INPUT_BUFFER:
+	case VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER:
+	{
+		enum venc_buffer_dir buffer_dir;
+		struct venc_bufferpayload buffer_info;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_CMD_FREE_INPUT_BUFFER/"
+			"VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER\n");
+
+		if (copy_from_user(&buffer_info, venc_msg.in,
+			sizeof(buffer_info)))
+			return -EFAULT;
+
+		buffer_dir = VEN_BUFFER_TYPE_INPUT;
+		if (cmd == VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER)
+			buffer_dir = VEN_BUFFER_TYPE_OUTPUT;
+
+		result = vid_enc_free_buffer(client_ctx, &buffer_info,
+				buffer_dir);
+		if (!result) {
+			DBG("\n VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER"
+				"/VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER failed");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_INPUT_BUFFER_REQ:
+	case VEN_IOCTL_SET_OUTPUT_BUFFER_REQ:
+	{
+		struct venc_allocatorproperty allocatorproperty;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_SET_INPUT_BUFFER_REQ"
+			"/VEN_IOCTL_SET_OUTPUT_BUFFER_REQ\n");
+
+		if (copy_from_user(&allocatorproperty, venc_msg.in,
+			sizeof(allocatorproperty)))
+			return -EFAULT;
+
+		if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER_REQ)
+				result = vid_enc_set_buffer_req(client_ctx,
+						&allocatorproperty, false);
+		else
+			result = vid_enc_set_buffer_req(client_ctx,
+					&allocatorproperty, true);
+		if (!result) {
+			DBG("setting VEN_IOCTL_SET_OUTPUT_BUFFER_REQ/"
+			"VEN_IOCTL_SET_INPUT_BUFFER_REQ failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_GET_INPUT_BUFFER_REQ:
+	case VEN_IOCTL_GET_OUTPUT_BUFFER_REQ:
+	{
+		struct venc_allocatorproperty allocatorproperty;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_GET_INPUT_BUFFER_REQ/"
+			"VEN_IOCTL_GET_OUTPUT_BUFFER_REQ\n");
+
+		if (cmd == VEN_IOCTL_GET_OUTPUT_BUFFER_REQ)
+			result = vid_enc_get_buffer_req(client_ctx,
+					&allocatorproperty, false);
+		else
+			result = vid_enc_get_buffer_req(client_ctx,
+					&allocatorproperty, true);
+		if (!result)
+			return -EIO;
+		if (copy_to_user(venc_msg.out, &allocatorproperty,
+				sizeof(allocatorproperty)))
+			return -EFAULT;
+		break;
+	}
+	case VEN_IOCTL_CMD_FLUSH:
+	{
+		struct venc_bufferflush bufferflush;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_CMD_FLUSH\n");
+		if (copy_from_user(&bufferflush, venc_msg.in,
+			sizeof(bufferflush)))
+			return -EFAULT;
+		INFO("\n %s(): Calling vid_enc_flush with mode = %lu",
+			 __func__, bufferflush.flush_mode);
+		result = vid_enc_flush(client_ctx, &bufferflush);
+
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_FLUSH failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_CMD_START:
+	{
+		INFO("\n %s(): Executing VEN_IOCTL_CMD_START", __func__);
+		result = vid_enc_start_stop(client_ctx, true);
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_START failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_CMD_STOP:
+	{
+		INFO("\n %s(): Executing VEN_IOCTL_CMD_STOP", __func__);
+		result = vid_enc_start_stop(client_ctx, false);
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_STOP failed\n");
+			return -EIO;
+		}
+		stop_cmd = 1;
+		break;
+	}
+	case VEN_IOCTL_CMD_PAUSE:
+	{
+		INFO("\n %s(): Executing VEN_IOCTL_CMD_PAUSE", __func__);
+		result = vid_enc_pause_resume(client_ctx, true);
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_PAUSE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_CMD_RESUME:
+	{
+		INFO("\n %s(): Executing VEN_IOCTL_CMD_RESUME", __func__);
+		result = vid_enc_pause_resume(client_ctx, false);
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_RESUME failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_RECON_BUFFER:
+	{
+		struct venc_recon_addr venc_recon;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_SET_RECON_BUFFER\n");
+		if (copy_from_user(&venc_recon, venc_msg.in,
+				sizeof(venc_recon)))
+				return -EFAULT;
+		result = vid_enc_set_recon_buffers(client_ctx,
+					&venc_recon);
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_RECON_BUFFER failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_FREE_RECON_BUFFER:
+	{
+		DBG("VEN_IOCTL_FREE_RECON_BUFFER\n");
+		result = vid_enc_free_recon_buffers(client_ctx);
+		if (!result) {
+			ERR("VEN_IOCTL_FREE_RECON_BUFFER failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_GET_RECON_BUFFER_SIZE:
+	{
+		struct venc_recon_buff_size venc_recon_size;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_GET_RECON_BUFFER_SIZE\n");
+		if (copy_from_user(&venc_recon_size, venc_msg.out,
+						   sizeof(venc_recon_size)))
+				return -EFAULT;
+		result = vid_enc_get_recon_buffer_size(client_ctx,
+					&venc_recon_size);
+		if (result) {
+				if (copy_to_user(venc_msg.out, &venc_recon_size,
+					sizeof(venc_recon_size)))
+					return -EFAULT;
+			} else {
+				ERR("setting VEN_IOCTL_GET_RECON_BUFFER_SIZE"
+					"failed\n");
+				return -EIO;
+			}
+		break;
+	}
+	case VEN_IOCTL_SET_QP_RANGE:
+	case VEN_IOCTL_GET_QP_RANGE:
+	{
+		struct venc_qprange qprange;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_G(S)ET_QP_RANGE\n");
+		if (cmd == VEN_IOCTL_SET_QP_RANGE) {
+			if (copy_from_user(&qprange, venc_msg.in,
+				sizeof(qprange)))
+				return -EFAULT;
+			result = vid_enc_set_get_qprange(client_ctx,
+					&qprange, true);
+		} else {
+			result = vid_enc_set_get_qprange(client_ctx,
+					&qprange, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &qprange,
+					sizeof(qprange)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_G(S)ET_QP_RANGE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_HEC:
+	case VEN_IOCTL_GET_HEC:
+	{
+		struct venc_headerextension headerextension;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_HEC\n");
+		if (cmd == VEN_IOCTL_SET_HEC) {
+			if (copy_from_user(&headerextension, venc_msg.in,
+				sizeof(headerextension)))
+				return -EFAULT;
+
+			result = vid_enc_set_get_headerextension(client_ctx,
+					&headerextension, true);
+		} else {
+			result = vid_enc_set_get_headerextension(client_ctx,
+					&headerextension, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &headerextension,
+				sizeof(headerextension)))
+					return -EFAULT;
+			}
+		}
+
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_HEC failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_TARGET_BITRATE:
+	case VEN_IOCTL_GET_TARGET_BITRATE:
+	{
+		struct venc_targetbitrate targetbitrate;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_TARGET_BITRATE\n");
+		if (cmd == VEN_IOCTL_SET_TARGET_BITRATE) {
+			if (copy_from_user(&targetbitrate, venc_msg.in,
+				sizeof(targetbitrate)))
+				return -EFAULT;
+
+			result = vid_enc_set_get_bitrate(client_ctx,
+					&targetbitrate, true);
+		} else {
+			result = vid_enc_set_get_bitrate(client_ctx,
+					&targetbitrate, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &targetbitrate,
+					sizeof(targetbitrate)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_TARGET_BITRATE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_FRAME_RATE:
+	case VEN_IOCTL_GET_FRAME_RATE:
+	{
+		struct venc_framerate framerate;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_FRAME_RATE\n");
+		if (cmd == VEN_IOCTL_SET_FRAME_RATE) {
+			if (copy_from_user(&framerate, venc_msg.in,
+				sizeof(framerate)))
+				return -EFAULT;
+			result = vid_enc_set_get_framerate(client_ctx,
+					&framerate, true);
+		} else {
+			result = vid_enc_set_get_framerate(client_ctx,
+					&framerate,	false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &framerate,
+					sizeof(framerate)))
+					return -EFAULT;
+			}
+		}
+
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_FRAME_RATE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_VOP_TIMING_CFG:
+	case VEN_IOCTL_GET_VOP_TIMING_CFG:
+	{
+		struct venc_voptimingcfg voptimingcfg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_(G)SET_VOP_TIMING_CFG\n");
+		if (cmd == VEN_IOCTL_SET_VOP_TIMING_CFG) {
+			if (copy_from_user(&voptimingcfg, venc_msg.in,
+				sizeof(voptimingcfg)))
+				return -EFAULT;
+			result = vid_enc_set_get_voptimingcfg(client_ctx,
+					&voptimingcfg, true);
+		} else {
+			result = vid_enc_set_get_voptimingcfg(client_ctx,
+					&voptimingcfg, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &voptimingcfg,
+					sizeof(voptimingcfg)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_VOP_TIMING_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_RATE_CTRL_CFG:
+	case VEN_IOCTL_GET_RATE_CTRL_CFG:
+	{
+		struct venc_ratectrlcfg ratectrlcfg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_RATE_CTRL_CFG\n");
+		if (cmd == VEN_IOCTL_SET_RATE_CTRL_CFG) {
+			if (copy_from_user(&ratectrlcfg, venc_msg.in,
+				sizeof(ratectrlcfg)))
+				return -EFAULT;
+
+			result = vid_enc_set_get_ratectrlcfg(client_ctx,
+					&ratectrlcfg, true);
+		} else {
+			result = vid_enc_set_get_ratectrlcfg(client_ctx,
+					&ratectrlcfg, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &ratectrlcfg,
+					sizeof(ratectrlcfg)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_RATE_CTRL_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_MULTI_SLICE_CFG:
+	case VEN_IOCTL_GET_MULTI_SLICE_CFG:
+	{
+		struct venc_multiclicecfg multiclicecfg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_MULTI_SLICE_CFG\n");
+		if (cmd == VEN_IOCTL_SET_MULTI_SLICE_CFG) {
+			if (copy_from_user(&multiclicecfg, venc_msg.in,
+				sizeof(multiclicecfg)))
+				return -EFAULT;
+
+			result = vid_enc_set_get_multiclicecfg(client_ctx,
+					&multiclicecfg, true);
+		} else {
+			result = vid_enc_set_get_multiclicecfg(client_ctx,
+					&multiclicecfg, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &multiclicecfg,
+					sizeof(multiclicecfg)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_MULTI_SLICE_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_INTRA_REFRESH:
+	case VEN_IOCTL_GET_INTRA_REFRESH:
+	{
+		struct venc_intrarefresh intrarefresh;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_INTRA_REFRESH\n");
+		if (cmd == VEN_IOCTL_SET_INTRA_REFRESH) {
+			if (copy_from_user(&intrarefresh, venc_msg.in,
+				sizeof(intrarefresh)))
+				return -EFAULT;
+			result = vid_enc_set_get_intrarefresh(client_ctx,
+					&intrarefresh, true);
+		} else {
+			result = vid_enc_set_get_intrarefresh(client_ctx,
+					&intrarefresh, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &intrarefresh,
+					sizeof(intrarefresh)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_INTRA_REFRESH failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_DEBLOCKING_CFG:
+	case VEN_IOCTL_GET_DEBLOCKING_CFG:
+	{
+		struct venc_dbcfg dbcfg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_(G)SET_DEBLOCKING_CFG\n");
+		if (cmd == VEN_IOCTL_SET_DEBLOCKING_CFG) {
+			if (copy_from_user(&dbcfg, venc_msg.in,
+				sizeof(dbcfg)))
+				return -EFAULT;
+			result = vid_enc_set_get_dbcfg(client_ctx,
+					&dbcfg, true);
+		} else {
+			result = vid_enc_set_get_dbcfg(client_ctx,
+					&dbcfg, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &dbcfg,
+				sizeof(dbcfg)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_DEBLOCKING_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_ENTROPY_CFG:
+	case VEN_IOCTL_GET_ENTROPY_CFG:
+	{
+		struct venc_entropycfg entropy_cfg;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_ENTROPY_CFG\n");
+		if (cmd == VEN_IOCTL_SET_ENTROPY_CFG) {
+			if (copy_from_user(&entropy_cfg, venc_msg.in,
+				sizeof(entropy_cfg)))
+				return -EFAULT;
+			result = vid_enc_set_get_entropy_cfg(client_ctx,
+					&entropy_cfg, true);
+		} else {
+			result = vid_enc_set_get_entropy_cfg(client_ctx,
+					&entropy_cfg, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &entropy_cfg,
+				sizeof(entropy_cfg)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_ENTROPY_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_GET_SEQUENCE_HDR:
+	{
+		struct venc_seqheader seq_header, seq_header_user;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_GET_SEQUENCE_HDR\n");
+		if (copy_from_user(&seq_header_user, venc_msg.in,
+			sizeof(seq_header_user)))
+			return -EFAULT;
+		seq_header.hdrbufptr = NULL;
+		result = vid_enc_get_sequence_header(client_ctx,
+				&seq_header);
+		if (result && ((copy_to_user(seq_header_user.hdrbufptr,
+			seq_header.hdrbufptr, seq_header.hdrlen)) ||
+			(copy_to_user(&seq_header_user.hdrlen,
+			&seq_header.hdrlen,
+			sizeof(seq_header.hdrlen)))))
+				result = false;
+		kfree(seq_header.hdrbufptr);
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VEN_IOCTL_CMD_REQUEST_IFRAME:
+	{
+		result = vid_enc_request_iframe(client_ctx);
+		if (!result) {
+			ERR("setting VEN_IOCTL_CMD_REQUEST_IFRAME failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_INTRA_PERIOD:
+	case VEN_IOCTL_GET_INTRA_PERIOD:
+	{
+		struct venc_intraperiod intraperiod;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_INTRA_PERIOD\n");
+		if (cmd == VEN_IOCTL_SET_INTRA_PERIOD) {
+			if (copy_from_user(&intraperiod, venc_msg.in,
+				sizeof(intraperiod)))
+				return -EFAULT;
+			result = vid_enc_set_get_intraperiod(client_ctx,
+					&intraperiod, true);
+		} else {
+			result = vid_enc_set_get_intraperiod(client_ctx,
+					&intraperiod, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &intraperiod,
+					sizeof(intraperiod)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_INTRA_PERIOD failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_SESSION_QP:
+	case VEN_IOCTL_GET_SESSION_QP:
+	{
+		struct venc_sessionqp session_qp;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_(G)SET_SESSION_QP\n");
+		if (cmd == VEN_IOCTL_SET_SESSION_QP) {
+			if (copy_from_user(&session_qp,	venc_msg.in,
+				sizeof(session_qp)))
+				return -EFAULT;
+			result = vid_enc_set_get_session_qp(client_ctx,
+					&session_qp, true);
+		} else {
+			result = vid_enc_set_get_session_qp(client_ctx,
+					&session_qp, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &session_qp,
+					sizeof(session_qp)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_SESSION_QP failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_PROFILE_LEVEL:
+	case VEN_IOCTL_GET_PROFILE_LEVEL:
+	{
+		struct ven_profilelevel profile_level;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_(G)SET_PROFILE_LEVEL\n");
+		if (cmd == VEN_IOCTL_SET_PROFILE_LEVEL) {
+			if (copy_from_user(&profile_level, venc_msg.in,
+				sizeof(profile_level)))
+				return -EFAULT;
+			result = vid_enc_set_get_profile_level(client_ctx,
+					&profile_level, true);
+		} else {
+			result = vid_enc_set_get_profile_level(client_ctx,
+					&profile_level, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out,
+				&profile_level,	sizeof(profile_level)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_PROFILE_LEVEL failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_CODEC_PROFILE:
+	case VEN_IOCTL_GET_CODEC_PROFILE:
+	{
+		struct venc_profile profile;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("VEN_IOCTL_(G)SET_CODEC_PROFILE\n");
+		if (cmd == VEN_IOCTL_SET_CODEC_PROFILE) {
+			if (copy_from_user(&profile, venc_msg.in,
+					sizeof(profile)))
+				return -EFAULT;
+			result = vid_enc_set_get_profile(client_ctx,
+					&profile, true);
+		} else {
+			result = vid_enc_set_get_profile(client_ctx,
+					&profile, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &profile,
+						sizeof(profile)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_CODEC_PROFILE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_SHORT_HDR:
+	case VEN_IOCTL_GET_SHORT_HDR:
+	{
+		struct venc_switch encoder_switch;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("Getting VEN_IOCTL_(G)SET_SHORT_HDR\n");
+		if (cmd == VEN_IOCTL_SET_SHORT_HDR) {
+			if (copy_from_user(&encoder_switch,	venc_msg.in,
+				sizeof(encoder_switch)))
+				return -EFAULT;
+
+			result = vid_enc_set_get_short_header(client_ctx,
+					&encoder_switch, true);
+		} else {
+			result = vid_enc_set_get_short_header(client_ctx,
+					&encoder_switch, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &encoder_switch,
+					sizeof(encoder_switch)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_SHORT_HDR failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_BASE_CFG:
+	case VEN_IOCTL_GET_BASE_CFG:
+	{
+		struct venc_basecfg base_config;
+		DBG("VEN_IOCTL_SET_BASE_CFG\n");
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_SET_BASE_CFG) {
+			if (copy_from_user(&base_config, venc_msg.in,
+				sizeof(base_config)))
+				return -EFAULT;
+			result = vid_enc_set_get_base_cfg(client_ctx,
+					&base_config, true);
+		} else {
+			result = vid_enc_set_get_base_cfg(client_ctx,
+					&base_config, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &base_config,
+					sizeof(base_config)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_SET_BASE_CFG failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_LIVE_MODE:
+	case VEN_IOCTL_GET_LIVE_MODE:
+	{
+		struct venc_switch encoder_switch;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+
+		DBG("Getting VEN_IOCTL_(G)SET_LIVE_MODE\n");
+		if (cmd == VEN_IOCTL_SET_LIVE_MODE) {
+			if (copy_from_user(&encoder_switch,	venc_msg.in,
+				sizeof(encoder_switch)))
+				return -EFAULT;
+			result = vid_enc_set_get_live_mode(client_ctx,
+					&encoder_switch, true);
+		} else {
+			result = vid_enc_set_get_live_mode(client_ctx,
+					&encoder_switch, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &encoder_switch,
+					sizeof(encoder_switch)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("setting VEN_IOCTL_(G)SET_LIVE_MODE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_GET_NUMBER_INSTANCES:
+	{
+		DBG("VEN_IOCTL_GET_NUMBER_INSTANCES\n");
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (copy_to_user(venc_msg.out,
+			&vid_enc_device_p->num_clients, sizeof(u32)))
+			return -EFAULT;
+		break;
+	}
+	case VEN_IOCTL_SET_AC_PREDICTION:
+	case VEN_IOCTL_GET_AC_PREDICTION:
+	case VEN_IOCTL_SET_RVLC:
+	case VEN_IOCTL_GET_RVLC:
+	case VEN_IOCTL_SET_ROTATION:
+	case VEN_IOCTL_GET_ROTATION:
+	case VEN_IOCTL_SET_DATA_PARTITION:
+	case VEN_IOCTL_GET_DATA_PARTITION:
+	case VEN_IOCTL_GET_CAPABILITY:
+	default:
+		ERR("%s(): Unsupported ioctl %d\n", __func__, cmd);
+		return -ENOTTY;
+
+		break;
+	}
+	return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Video encoder driver");
+MODULE_VERSION("1.0");
+
+module_init(vid_enc_init);
+module_exit(vid_enc_exit);
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
new file mode 100644
index 0000000..d202d81
--- /dev/null
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -0,0 +1,1784 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/clk.h>
+
+#include "vidc_type.h"
+#include "vcd_api.h"
+#include "venc_internal.h"
+#include "vidc_init.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define ERR(x...) printk(KERN_ERR x)
+
+u32 vid_enc_set_get_base_cfg(struct video_client_ctx *client_ctx,
+		struct venc_basecfg *base_config, u32 set_flag)
+{
+	struct venc_targetbitrate venc_bitrate;
+	struct venc_framerate frame_rate;
+	u32 current_codec;
+
+	if (!client_ctx || !base_config)
+		return false;
+
+	if (!vid_enc_set_get_codec(client_ctx, &current_codec, false))
+			return false;
+
+	DBG("%s(): Current Codec Type = %u\n", __func__, current_codec);
+	if (current_codec != base_config->codectype) {
+		if (!vid_enc_set_get_codec(client_ctx,
+				(u32 *)&base_config->codectype, set_flag))
+			return false;
+	}
+
+	if (!vid_enc_set_get_inputformat(client_ctx,
+			(u32 *)&base_config->inputformat, set_flag))
+		return false;
+
+	if (!vid_enc_set_get_framesize(client_ctx,
+			(u32 *)&base_config->input_height,
+			(u32 *)&base_config->input_width, set_flag))
+		return false;
+
+	if (set_flag)
+		venc_bitrate.target_bitrate = base_config->targetbitrate;
+
+	if (!vid_enc_set_get_bitrate(client_ctx, &venc_bitrate, set_flag))
+		return false;
+
+	if (!set_flag)
+		base_config->targetbitrate = venc_bitrate.target_bitrate;
+
+	if (set_flag) {
+		frame_rate.fps_denominator = base_config->fps_den;
+		frame_rate.fps_numerator = base_config->fps_num;
+	}
+
+	if (!vid_enc_set_get_framerate(client_ctx, &frame_rate, set_flag))
+		return false;
+
+	if (!set_flag) {
+		base_config->fps_den = frame_rate.fps_denominator;
+		base_config->fps_num = frame_rate.fps_numerator;
+	}
+
+	return true;
+}
+
+u32 vid_enc_set_get_inputformat(struct video_client_ctx *client_ctx,
+		u32 *input_format, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_format format;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !input_format)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_BUFFER_FORMAT;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_buffer_format);
+
+	if (set_flag) {
+		switch (*input_format) {
+		case VEN_INPUTFMT_NV12:
+			format.buffer_format = VCD_BUFFER_FORMAT_NV12;
+			break;
+		case VEN_INPUTFMT_NV12_16M2KA:
+			format.buffer_format =
+				VCD_BUFFER_FORMAT_NV12_16M2KA;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &format);
+			if (vcd_status) {
+				status = false;
+				ERR("%s(): Set VCD_I_BUFFER_FORMAT Failed\n",
+						 __func__);
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &format);
+
+		if (vcd_status) {
+			status = false;
+			ERR("%s(): Get VCD_I_BUFFER_FORMAT Failed\n", __func__);
+		} else {
+			switch (format.buffer_format) {
+			case VCD_BUFFER_FORMAT_NV12:
+				*input_format = VEN_INPUTFMT_NV12;
+				break;
+			case VCD_BUFFER_FORMAT_TILE_4x2:
+				*input_format = VEN_INPUTFMT_NV21;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_codec(struct video_client_ctx *client_ctx, u32 *codec,
+		u32 set_flag)
+{
+	struct vcd_property_codec vcd_property_codec;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !codec)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_CODEC;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_codec);
+
+	if (set_flag) {
+		switch (*codec) {
+		case VEN_CODEC_MPEG4:
+			vcd_property_codec.codec = VCD_CODEC_MPEG4;
+			break;
+		case VEN_CODEC_H263:
+			vcd_property_codec.codec = VCD_CODEC_H263;
+			break;
+		case VEN_CODEC_H264:
+			vcd_property_codec.codec = VCD_CODEC_H264;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_codec);
+			if (vcd_status) {
+				status = false;
+				ERR("%s(): Set VCD_I_CODEC Failed\n", __func__);
+			}
+		}
+	}	else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_codec);
+
+		if (vcd_status) {
+			status = false;
+			ERR("%s(): Get VCD_I_CODEC Failed\n",
+					 __func__);
+		} else {
+			switch (vcd_property_codec.codec) {
+			case VCD_CODEC_H263:
+				*codec = VEN_CODEC_H263;
+				break;
+			case VCD_CODEC_H264:
+				*codec = VEN_CODEC_H264;
+				break;
+			case VCD_CODEC_MPEG4:
+				*codec = VEN_CODEC_MPEG4;
+				break;
+			case VCD_CODEC_DIVX_3:
+			case VCD_CODEC_DIVX_4:
+			case VCD_CODEC_DIVX_5:
+			case VCD_CODEC_DIVX_6:
+			case VCD_CODEC_MPEG1:
+			case VCD_CODEC_MPEG2:
+			case VCD_CODEC_VC1:
+			case VCD_CODEC_VC1_RCV:
+			case VCD_CODEC_XVID:
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_framesize(struct video_client_ctx *client_ctx,
+		u32 *height, u32 *width, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_frame_size frame_size;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !height || !width)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FRAME_SIZE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_frame_size);
+
+	vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &frame_size);
+
+	if (vcd_status) {
+		ERR("%s(): Get VCD_I_FRAME_SIZE Failed\n",
+				__func__);
+		return false;
+	}
+	if (set_flag) {
+		if (frame_size.height != *height ||
+			frame_size.width != *width) {
+			DBG("%s(): ENC Set Size (%d x %d)\n",
+				__func__, *height, *width);
+			frame_size.height = *height;
+			frame_size.width = *width;
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &frame_size);
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_FRAME_SIZE Failed\n",
+						__func__);
+				return false;
+			}
+		}
+	} else {
+		*height = frame_size.height;
+		*width = frame_size.width;
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_bitrate(struct video_client_ctx *client_ctx,
+		struct venc_targetbitrate *venc_bitrate, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_target_bitrate bit_rate;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_bitrate)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_TARGET_BITRATE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_target_bitrate);
+	if (set_flag) {
+		bit_rate.target_bitrate = venc_bitrate->target_bitrate;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &bit_rate);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_TARGET_BITRATE Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &bit_rate);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_TARGET_BITRATE Failed\n",
+					__func__);
+			return false;
+		}
+		venc_bitrate->target_bitrate = bit_rate.target_bitrate;
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_framerate(struct video_client_ctx *client_ctx,
+		struct venc_framerate *frame_rate, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_frame_rate vcd_frame_rate;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !frame_rate)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FRAME_RATE;
+	vcd_property_hdr.sz =
+				sizeof(struct vcd_property_frame_rate);
+
+	if (set_flag) {
+		vcd_frame_rate.fps_denominator = frame_rate->fps_denominator;
+		vcd_frame_rate.fps_numerator = frame_rate->fps_numerator;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &vcd_frame_rate);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_FRAME_RATE Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_frame_rate);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_FRAME_RATE Failed\n",
+					__func__);
+			return false;
+		}
+		frame_rate->fps_denominator = vcd_frame_rate.fps_denominator;
+		frame_rate->fps_numerator = vcd_frame_rate.fps_numerator;
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_live_mode(struct video_client_ctx *client_ctx,
+		struct venc_switch *encoder_switch, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_live live_mode;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LIVE;
+	vcd_property_hdr.sz =
+				sizeof(struct vcd_property_live);
+
+	if (set_flag) {
+		live_mode.live = 1;
+		if (!encoder_switch->status)
+			live_mode.live = 0;
+
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &live_mode);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_LIVE Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &live_mode);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LIVE Failed\n",
+					__func__);
+			return false;
+		}	else {
+			encoder_switch->status = 1;
+			if (!live_mode.live)
+				encoder_switch->status = 0;
+		}
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_short_header(struct video_client_ctx *client_ctx,
+		struct venc_switch *encoder_switch,	u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_short_header short_header;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !encoder_switch)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_SHORT_HEADER;
+	vcd_property_hdr.sz =
+				sizeof(struct vcd_property_short_header);
+
+	if (set_flag) {
+		short_header.short_header = (u32) encoder_switch->status;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &short_header);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_SHORT_HEADER Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &short_header);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_SHORT_HEADER Failed\n",
+					__func__);
+			return false;
+		}	else {
+			encoder_switch->status =
+				(u8) short_header.short_header;
+		}
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_profile(struct video_client_ctx *client_ctx,
+		struct venc_profile *profile, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_profile profile_type;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !profile)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_PROFILE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_profile);
+
+	if (set_flag) {
+		switch (profile->profile) {
+		case VEN_PROFILE_MPEG4_SP:
+			profile_type.profile = VCD_PROFILE_MPEG4_SP;
+			break;
+		case VEN_PROFILE_MPEG4_ASP:
+			profile_type.profile = VCD_PROFILE_MPEG4_ASP;
+			break;
+		case VEN_PROFILE_H264_BASELINE:
+			profile_type.profile = VCD_PROFILE_H264_BASELINE;
+			break;
+		case VEN_PROFILE_H264_MAIN:
+			profile_type.profile = VCD_PROFILE_H264_MAIN;
+			break;
+		case VEN_PROFILE_H264_HIGH:
+			profile_type.profile = VCD_PROFILE_H264_HIGH;
+			break;
+		case VEN_PROFILE_H263_BASELINE:
+			profile_type.profile = VCD_PROFILE_H263_BASELINE;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &profile_type);
+
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_PROFILE Failed\n",
+						__func__);
+				return false;
+			}
+		}
+	}	else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &profile_type);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_PROFILE Failed\n",
+					__func__);
+			return false;
+		} else {
+			switch (profile_type.profile) {
+			case VCD_PROFILE_H263_BASELINE:
+				profile->profile = VEN_PROFILE_H263_BASELINE;
+				break;
+			case VCD_PROFILE_H264_BASELINE:
+				profile->profile = VEN_PROFILE_H264_BASELINE;
+				break;
+			case VCD_PROFILE_H264_HIGH:
+				profile->profile = VEN_PROFILE_H264_HIGH;
+				break;
+			case VCD_PROFILE_H264_MAIN:
+				profile->profile = VEN_PROFILE_H264_MAIN;
+				break;
+			case VCD_PROFILE_MPEG4_ASP:
+				profile->profile = VEN_PROFILE_MPEG4_ASP;
+				break;
+			case VCD_PROFILE_MPEG4_SP:
+				profile->profile = VEN_PROFILE_MPEG4_SP;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_profile_level(struct video_client_ctx *client_ctx,
+		struct ven_profilelevel *profile_level,	u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_level level;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !profile_level)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LEVEL;
+	vcd_property_hdr.sz =
+			sizeof(struct vcd_property_level);
+
+	if (set_flag) {
+		switch (profile_level->level) {
+		case VEN_LEVEL_MPEG4_0:
+			level.level = VCD_LEVEL_MPEG4_0;
+			break;
+		case VEN_LEVEL_MPEG4_1:
+			level.level = VCD_LEVEL_MPEG4_1;
+			break;
+		case VEN_LEVEL_MPEG4_2:
+			level.level = VCD_LEVEL_MPEG4_2;
+			break;
+		case VEN_LEVEL_MPEG4_3:
+			level.level = VCD_LEVEL_MPEG4_3;
+			break;
+		case VEN_LEVEL_MPEG4_4:
+			level.level = VCD_LEVEL_MPEG4_4;
+			break;
+		case VEN_LEVEL_MPEG4_5:
+			level.level = VCD_LEVEL_MPEG4_5;
+			break;
+		case VEN_LEVEL_MPEG4_3b:
+			level.level = VCD_LEVEL_MPEG4_3b;
+			break;
+		case VEN_LEVEL_MPEG4_6:
+			level.level = VCD_LEVEL_MPEG4_6;
+			break;
+		case VEN_LEVEL_H264_1:
+			level.level = VCD_LEVEL_H264_1;
+			break;
+		case VEN_LEVEL_H264_1b:
+			level.level = VCD_LEVEL_H264_1b;
+			break;
+		case VEN_LEVEL_H264_1p1:
+			level.level = VCD_LEVEL_H264_1p1;
+			break;
+		case VEN_LEVEL_H264_1p2:
+			level.level = VCD_LEVEL_H264_1p2;
+			break;
+		case VEN_LEVEL_H264_1p3:
+			level.level = VCD_LEVEL_H264_1p3;
+			break;
+		case VEN_LEVEL_H264_2:
+			level.level = VCD_LEVEL_H264_2;
+			break;
+		case VEN_LEVEL_H264_2p1:
+			level.level = VCD_LEVEL_H264_2p1;
+			break;
+		case VEN_LEVEL_H264_2p2:
+			level.level = VCD_LEVEL_H264_2p2;
+			break;
+		case VEN_LEVEL_H264_3:
+			level.level = VCD_LEVEL_H264_3;
+			break;
+		case VEN_LEVEL_H264_3p1:
+			level.level = VCD_LEVEL_H264_3p1;
+			break;
+		case VEN_LEVEL_H264_4:
+			level.level = VCD_LEVEL_H264_4;
+			break;
+		case VEN_LEVEL_H263_10:
+			level.level = VCD_LEVEL_H263_10;
+			break;
+		case VEN_LEVEL_H263_20:
+			level.level = VCD_LEVEL_H263_20;
+			break;
+		case VEN_LEVEL_H263_30:
+			level.level = VCD_LEVEL_H263_30;
+			break;
+		case VEN_LEVEL_H263_40:
+			level.level = VCD_LEVEL_H263_40;
+			break;
+		case VEN_LEVEL_H263_45:
+			level.level = VCD_LEVEL_H263_45;
+			break;
+		case VEN_LEVEL_H263_50:
+			level.level = VCD_LEVEL_H263_50;
+			break;
+		case VEN_LEVEL_H263_60:
+			level.level = VCD_LEVEL_H263_60;
+			break;
+		case VEN_LEVEL_H263_70:
+			level.level = VCD_LEVEL_H263_70;
+			break;
+		default:
+			status = false;
+			break;
+		}
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+						&vcd_property_hdr, &level);
+
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_LEVEL Failed\n",
+						__func__);
+				return false;
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &level);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LEVEL Failed\n",
+					__func__);
+			return false;
+		} else {
+			switch (level.level) {
+			case VCD_LEVEL_MPEG4_0:
+				profile_level->level = VEN_LEVEL_MPEG4_0;
+				break;
+			case VCD_LEVEL_MPEG4_1:
+				profile_level->level = VEN_LEVEL_MPEG4_1;
+				break;
+			case VCD_LEVEL_MPEG4_2:
+				profile_level->level = VEN_LEVEL_MPEG4_2;
+				break;
+			case VCD_LEVEL_MPEG4_3:
+				profile_level->level = VEN_LEVEL_MPEG4_3;
+				break;
+			case VCD_LEVEL_MPEG4_4:
+				profile_level->level = VEN_LEVEL_MPEG4_4;
+				break;
+			case VCD_LEVEL_MPEG4_5:
+				profile_level->level = VEN_LEVEL_MPEG4_5;
+				break;
+			case VCD_LEVEL_MPEG4_3b:
+				profile_level->level = VEN_LEVEL_MPEG4_3b;
+				break;
+			case VCD_LEVEL_H264_1:
+				profile_level->level = VEN_LEVEL_H264_1;
+				break;
+			case VCD_LEVEL_H264_1b:
+				profile_level->level = VEN_LEVEL_H264_1b;
+				break;
+			case VCD_LEVEL_H264_1p1:
+				profile_level->level = VEN_LEVEL_H264_1p1;
+				break;
+			case VCD_LEVEL_H264_1p2:
+				profile_level->level = VEN_LEVEL_H264_1p2;
+				break;
+			case VCD_LEVEL_H264_1p3:
+				profile_level->level = VEN_LEVEL_H264_1p3;
+				break;
+			case VCD_LEVEL_H264_2:
+				profile_level->level = VEN_LEVEL_H264_2;
+				break;
+			case VCD_LEVEL_H264_2p1:
+				profile_level->level = VEN_LEVEL_H264_2p1;
+				break;
+			case VCD_LEVEL_H264_2p2:
+				profile_level->level = VEN_LEVEL_H264_2p2;
+				break;
+			case VCD_LEVEL_H264_3:
+				profile_level->level = VEN_LEVEL_H264_3;
+				break;
+			case VCD_LEVEL_H264_3p1:
+				profile_level->level = VEN_LEVEL_H264_3p1;
+				break;
+			case VCD_LEVEL_H264_3p2:
+				status = false;
+				break;
+			case VCD_LEVEL_H264_4:
+				profile_level->level = VEN_LEVEL_H264_4;
+				break;
+			case VCD_LEVEL_H263_10:
+				profile_level->level = VEN_LEVEL_H263_10;
+				break;
+			case VCD_LEVEL_H263_20:
+				profile_level->level = VEN_LEVEL_H263_20;
+				break;
+			case VCD_LEVEL_H263_30:
+				profile_level->level = VEN_LEVEL_H263_30;
+				break;
+			case VCD_LEVEL_H263_40:
+				profile_level->level = VEN_LEVEL_H263_40;
+				break;
+			case VCD_LEVEL_H263_45:
+				profile_level->level = VEN_LEVEL_H263_45;
+				break;
+			case VCD_LEVEL_H263_50:
+				profile_level->level = VEN_LEVEL_H263_50;
+				break;
+			case VCD_LEVEL_H263_60:
+				profile_level->level = VEN_LEVEL_H263_60;
+				break;
+			case VCD_LEVEL_H263_70:
+				status = false;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_session_qp(struct video_client_ctx *client_ctx,
+		struct venc_sessionqp *session_qp, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_session_qp qp;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !session_qp)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_SESSION_QP;
+	vcd_property_hdr.sz =
+			sizeof(struct vcd_property_session_qp);
+
+	if (set_flag) {
+		qp.i_frame_qp = session_qp->iframeqp;
+		qp.p_frame_qp = session_qp->pframqp;
+
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &qp);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_SESSION_QP Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &qp);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_SESSION_QP Failed\n",
+					__func__);
+			return false;
+		} else {
+			session_qp->iframeqp = qp.i_frame_qp;
+			session_qp->pframqp = qp.p_frame_qp;
+		}
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_intraperiod(struct video_client_ctx *client_ctx,
+		struct venc_intraperiod *intraperiod,	u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_i_period period;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !intraperiod)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_INTRA_PERIOD;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_i_period);
+
+	if (set_flag) {
+		period.p_frames = intraperiod->num_pframes;
+		period.b_frames = intraperiod->num_bframes;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &period);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_INTRA_PERIOD Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &period);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_INTRA_PERIOD Failed\n",
+					__func__);
+			return false;
+		} else
+			intraperiod->num_pframes = period.p_frames;
+	}
+	return true;
+}
+
+u32 vid_enc_request_iframe(struct video_client_ctx *client_ctx)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_req_i_frame request;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_REQ_IFRAME;
+	vcd_property_hdr.sz =
+				sizeof(struct vcd_property_req_i_frame);
+	request.req_i_frame = 1;
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &request);
+
+	if (vcd_status) {
+		ERR("%s(): Set VCD_I_REQ_IFRAME Failed\n",
+				__func__);
+		return false;
+	}
+	return status;
+}
+
+u32 vid_enc_get_sequence_header(struct video_client_ctx *client_ctx,
+		struct venc_seqheader	*seq_header)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_sequence_hdr hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx ||
+			!seq_header || !seq_header->bufsize)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_SEQ_HEADER;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_sequence_hdr);
+
+	hdr.sequence_header =
+		kzalloc(seq_header->bufsize, GFP_KERNEL);
+	seq_header->hdrbufptr = hdr.sequence_header;
+
+	if (!hdr.sequence_header)
+		return false;
+	hdr.sequence_header_len = seq_header->bufsize;
+	vcd_status = vcd_get_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &hdr);
+
+	if (vcd_status) {
+		ERR("%s(): Get VCD_I_SEQ_HEADER Failed\n",
+				__func__);
+		status = false;
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx,
+		struct venc_entropycfg *entropy_cfg, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_entropy_control control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !entropy_cfg)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_ENTROPY_CTRL;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_entropy_control);
+	if (set_flag) {
+		switch (entropy_cfg->longentropysel) {
+		case VEN_ENTROPY_MODEL_CAVLC:
+			control.entropy_sel = VCD_ENTROPY_SEL_CAVLC;
+			break;
+		case VEN_ENTROPY_MODEL_CABAC:
+			control.entropy_sel = VCD_ENTROPY_SEL_CABAC;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status && entropy_cfg->cabacmodel ==
+				VCD_ENTROPY_SEL_CABAC) {
+			switch (entropy_cfg->cabacmodel) {
+			case VEN_CABAC_MODEL_0:
+				control.cabac_model =
+					VCD_CABAC_MODEL_NUMBER_0;
+				break;
+			case VEN_CABAC_MODEL_1:
+				control.cabac_model =
+					VCD_CABAC_MODEL_NUMBER_1;
+				break;
+			case VEN_CABAC_MODEL_2:
+				control.cabac_model =
+					VCD_CABAC_MODEL_NUMBER_2;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_ENTROPY_CTRL Failed\n",
+						__func__);
+				status = false;
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_ENTROPY_CTRL Failed\n",
+					__func__);
+			status = false;
+		} else {
+			switch (control.entropy_sel) {
+			case VCD_ENTROPY_SEL_CABAC:
+				entropy_cfg->cabacmodel =
+					VEN_ENTROPY_MODEL_CABAC;
+				break;
+			case VCD_ENTROPY_SEL_CAVLC:
+				entropy_cfg->cabacmodel =
+					VEN_ENTROPY_MODEL_CAVLC;
+				break;
+			default:
+				status = false;
+				break;
+			}
+
+			if (status && control.entropy_sel ==
+					VCD_ENTROPY_SEL_CABAC) {
+				switch (control.cabac_model) {
+				case VCD_CABAC_MODEL_NUMBER_0:
+					entropy_cfg->cabacmodel =
+						VEN_CABAC_MODEL_0;
+					break;
+				case VCD_CABAC_MODEL_NUMBER_1:
+					entropy_cfg->cabacmodel =
+						VEN_CABAC_MODEL_1;
+					break;
+				case VCD_CABAC_MODEL_NUMBER_2:
+					entropy_cfg->cabacmodel =
+						VEN_CABAC_MODEL_2;
+					break;
+				default:
+					status = false;
+					break;
+				}
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_dbcfg(struct video_client_ctx *client_ctx,
+		struct venc_dbcfg *dbcfg, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_db_config control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !dbcfg)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_DEBLOCKING;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_db_config);
+
+	if (set_flag) {
+		switch (dbcfg->db_mode) {
+		case VEN_DB_DISABLE:
+			control.db_config = VCD_DB_DISABLE;
+			break;
+		case VEN_DB_ALL_BLKG_BNDRY:
+			control.db_config = VCD_DB_ALL_BLOCKING_BOUNDARY;
+			break;
+		case VEN_DB_SKIP_SLICE_BNDRY:
+			control.db_config = VCD_DB_SKIP_SLICE_BOUNDARY;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			control.slice_alpha_offset =
+				dbcfg->slicealpha_offset;
+			control.slice_beta_offset =
+				dbcfg->slicebeta_offset;
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &control);
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_DEBLOCKING Failed\n",
+						__func__);
+				status = false;
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_DEBLOCKING Failed\n",
+					__func__);
+			status = false;
+		} else {
+			switch (control.db_config) {
+			case VCD_DB_ALL_BLOCKING_BOUNDARY:
+				dbcfg->db_mode = VEN_DB_ALL_BLKG_BNDRY;
+				break;
+			case VCD_DB_DISABLE:
+				dbcfg->db_mode = VEN_DB_DISABLE;
+				break;
+			case VCD_DB_SKIP_SLICE_BOUNDARY:
+				dbcfg->db_mode = VEN_DB_SKIP_SLICE_BNDRY;
+				break;
+			default:
+				status = false;
+				break;
+			}
+			dbcfg->slicealpha_offset =
+				control.slice_alpha_offset;
+			dbcfg->slicebeta_offset =
+				control.slice_beta_offset;
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_intrarefresh(struct video_client_ctx *client_ctx,
+		struct venc_intrarefresh *intrarefresh, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_intra_refresh_mb_number control;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !intrarefresh)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_INTRA_REFRESH;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_intra_refresh_mb_number);
+
+	if (set_flag) {
+		control.cir_mb_number = intrarefresh->mbcount;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_INTRA_REFRESH Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_INTRA_REFRESH Failed\n",
+					__func__);
+			return false;
+		} else
+			intrarefresh->mbcount = control.cir_mb_number;
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_multiclicecfg(struct video_client_ctx *client_ctx,
+		struct venc_multiclicecfg *multiclicecfg,	u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_multi_slice control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !multiclicecfg)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_MULTI_SLICE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_multi_slice);
+
+	if (set_flag) {
+		switch (multiclicecfg->mslice_mode) {
+		case VEN_MSLICE_OFF:
+			control.m_slice_sel =
+				VCD_MSLICE_OFF;
+			break;
+		case VEN_MSLICE_CNT_MB:
+			control.m_slice_sel =
+				VCD_MSLICE_BY_MB_COUNT;
+			break;
+		case VEN_MSLICE_CNT_BYTE:
+			control.m_slice_sel =
+				VCD_MSLICE_BY_BYTE_COUNT;
+			break;
+		case VEN_MSLICE_GOB:
+			control.m_slice_sel =
+				VCD_MSLICE_BY_GOB;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			control.m_slice_size =
+				multiclicecfg->mslice_size;
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &control);
+
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_MULTI_SLICE Failed\n",
+						__func__);
+				status = false;
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_MULTI_SLICE Failed\n",
+					__func__);
+			status = false;
+		} else {
+			multiclicecfg->mslice_size =
+				control.m_slice_size;
+			switch (control.m_slice_sel) {
+			case VCD_MSLICE_OFF:
+				multiclicecfg->mslice_mode = VEN_MSLICE_OFF;
+				break;
+			case VCD_MSLICE_BY_MB_COUNT:
+				multiclicecfg->mslice_mode = VEN_MSLICE_CNT_MB;
+				break;
+			case VCD_MSLICE_BY_BYTE_COUNT:
+				multiclicecfg->mslice_mode =
+					VEN_MSLICE_CNT_BYTE;
+				break;
+			case VCD_MSLICE_BY_GOB:
+				multiclicecfg->mslice_mode =
+					VEN_MSLICE_GOB;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_ratectrlcfg(struct video_client_ctx *client_ctx,
+		struct venc_ratectrlcfg *ratectrlcfg,	u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_rate_control control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !ratectrlcfg)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_RATE_CONTROL;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_rate_control);
+
+	if (set_flag) {
+		switch (ratectrlcfg->rcmode) {
+		case VEN_RC_OFF:
+			control.rate_control = VCD_RATE_CONTROL_OFF;
+			break;
+		case VEN_RC_CBR_VFR:
+			control.rate_control = VCD_RATE_CONTROL_CBR_VFR;
+			break;
+		case VEN_RC_VBR_CFR:
+			control.rate_control = VCD_RATE_CONTROL_VBR_CFR;
+			break;
+		case VEN_RC_VBR_VFR:
+			control.rate_control = VCD_RATE_CONTROL_VBR_VFR;
+			break;
+		case VEN_RC_CBR_CFR:
+			control.rate_control = VCD_RATE_CONTROL_CBR_CFR;
+			break;
+		default:
+			status = false;
+			break;
+		}
+
+		if (status) {
+			vcd_status = vcd_set_property(client_ctx->vcd_handle,
+			&vcd_property_hdr, &control);
+			if (vcd_status) {
+				ERR("%s(): Set VCD_I_RATE_CONTROL Failed\n",
+						__func__);
+				status = false;
+			}
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_RATE_CONTROL Failed\n",
+					__func__);
+			status = false;
+		} else {
+			switch (control.rate_control) {
+			case VCD_RATE_CONTROL_OFF:
+				ratectrlcfg->rcmode = VEN_RC_OFF;
+				break;
+			case VCD_RATE_CONTROL_CBR_VFR:
+				ratectrlcfg->rcmode = VEN_RC_CBR_VFR;
+				break;
+			case VCD_RATE_CONTROL_VBR_CFR:
+				ratectrlcfg->rcmode = VEN_RC_VBR_CFR;
+				break;
+			case VCD_RATE_CONTROL_VBR_VFR:
+				ratectrlcfg->rcmode = VEN_RC_VBR_VFR;
+				break;
+			case VCD_RATE_CONTROL_CBR_CFR:
+				ratectrlcfg->rcmode = VEN_RC_CBR_CFR;
+				break;
+			default:
+				status = false;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_voptimingcfg(struct video_client_ctx *client_ctx,
+		struct	venc_voptimingcfg *voptimingcfg, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_vop_timing control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !voptimingcfg)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_VOP_TIMING;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_vop_timing);
+
+	if (set_flag) {
+		control.vop_time_resolution =
+		voptimingcfg->voptime_resolution;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_VOP_TIMING Failed\n",
+					__func__);
+			status = false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_VOP_TIMING Failed\n",
+					__func__);
+			status = false;
+		} else
+			voptimingcfg->voptime_resolution =
+			control.vop_time_resolution;
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_headerextension(struct video_client_ctx *client_ctx,
+		struct venc_headerextension *headerextension, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !headerextension)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_HEADER_EXTENSION;
+	vcd_property_hdr.sz = sizeof(u32);
+
+	if (set_flag) {
+		control = headerextension->header_extension;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_HEADER_EXTENSION Failed\n",
+					__func__);
+			status = false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_HEADER_EXTENSION Failed\n",
+					__func__);
+			status = false;
+		} else {
+			headerextension->header_extension = control;
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_set_get_qprange(struct video_client_ctx *client_ctx,
+		struct venc_qprange *qprange, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_qp_range control;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 status = true;
+
+	if (!client_ctx || !qprange)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_QP_RANGE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_qp_range);
+
+	if (set_flag) {
+		control.max_qp = qprange->maxqp;
+		control.min_qp = qprange->minqp;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+		&vcd_property_hdr, &control);
+
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_QP_RANGE Failed\n",
+					__func__);
+			status = false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_QP_RANGE Failed\n",
+					__func__);
+			status = false;
+		} else {
+			qprange->maxqp = control.max_qp;
+			qprange->minqp = control.min_qp;
+		}
+	}
+	return status;
+}
+
+u32 vid_enc_start_stop(struct video_client_ctx *client_ctx, u32 start)
+{
+	u32 vcd_status;
+
+	if (!client_ctx)
+		return false;
+
+	if (start) {
+			vcd_status = vcd_encode_start(client_ctx->vcd_handle);
+
+			if (vcd_status) {
+				ERR("%s(): vcd_encode_start failed."
+				" vcd_status = %u\n", __func__, vcd_status);
+				return false;
+			}
+	} else {
+		vcd_status = vcd_stop(client_ctx->vcd_handle);
+		if (vcd_status) {
+			ERR("%s(): vcd_stop failed.  vcd_status = %u\n",
+		__func__, vcd_status);
+			return false;
+		}
+		DBG("Send STOP_DONE message to client = %p\n",
+				client_ctx);
+	}
+	return true;
+}
+
+u32 vid_enc_pause_resume(struct video_client_ctx *client_ctx, u32 pause)
+{
+	u32 vcd_status;
+
+	if (!client_ctx)
+		return false;
+
+	if (pause) {
+		DBG("PAUSE command from client = %p\n",
+				client_ctx);
+		vcd_status = vcd_pause(client_ctx->vcd_handle);
+	} else {
+		DBG("Resume command from client = %p\n",
+				client_ctx);
+		vcd_status = vcd_resume(client_ctx->vcd_handle);
+	}
+
+	if (vcd_status)
+		return false;
+
+	return true;
+}
+
+u32 vid_enc_flush(struct video_client_ctx *client_ctx,
+		struct venc_bufferflush *bufferflush)
+{
+	u32 status = true, mode, vcd_status;
+
+	if (!client_ctx || !bufferflush)
+		return false;
+
+	switch (bufferflush->flush_mode) {
+	case VEN_FLUSH_INPUT:
+		mode = VCD_FLUSH_INPUT;
+		break;
+	case VEN_FLUSH_OUTPUT:
+		mode = VCD_FLUSH_OUTPUT;
+		break;
+	case VEN_FLUSH_ALL:
+		mode = VCD_FLUSH_ALL;
+		break;
+	default:
+		status = false;
+		break;
+	}
+	if (status) {
+		vcd_status = vcd_flush(client_ctx->vcd_handle, mode);
+		if (vcd_status)
+			status = false;
+	}
+	return status;
+}
+
+u32 vid_enc_get_buffer_req(struct video_client_ctx *client_ctx,
+		struct venc_allocatorproperty *venc_buf_req, u32 input_dir)
+{
+	enum vcd_buffer_type buffer;
+	struct vcd_buffer_requirement buffer_req;
+	u32 status = true;
+	u32 vcd_status;
+
+	if (!client_ctx || !venc_buf_req)
+		return false;
+
+	buffer = VCD_BUFFER_OUTPUT;
+	if (input_dir)
+		buffer = VCD_BUFFER_INPUT;
+
+	vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle,
+							buffer, &buffer_req);
+
+	if (vcd_status)
+		status = false;
+
+	if (status) {
+		venc_buf_req->actualcount = buffer_req.actual_count;
+		venc_buf_req->alignment = buffer_req.align;
+		venc_buf_req->datasize = buffer_req.sz;
+		venc_buf_req->mincount = buffer_req.min_count;
+		venc_buf_req->maxcount = buffer_req.max_count;
+		venc_buf_req->alignment = buffer_req.align;
+		venc_buf_req->bufpoolid = buffer_req.buf_pool_id;
+		venc_buf_req->suffixsize = 0;
+	}
+	return status;
+}
+
+u32 vid_enc_set_buffer_req(struct video_client_ctx *client_ctx,
+		struct venc_allocatorproperty *venc_buf_req, u32 input_dir)
+{
+	enum vcd_buffer_type buffer;
+	struct vcd_buffer_requirement buffer_req;
+	u32 status = true;
+	u32 vcd_status;
+
+	if (!client_ctx || !venc_buf_req)
+		return false;
+
+	buffer = VCD_BUFFER_OUTPUT;
+	if (input_dir)
+		buffer = VCD_BUFFER_INPUT;
+
+	buffer_req.actual_count = venc_buf_req->actualcount;
+	buffer_req.align = venc_buf_req->alignment;
+	buffer_req.sz = venc_buf_req->datasize;
+	buffer_req.min_count = venc_buf_req->mincount;
+	buffer_req.max_count = venc_buf_req->maxcount;
+	buffer_req.align = venc_buf_req->alignment;
+	buffer_req.buf_pool_id = 0;
+
+	vcd_status = vcd_set_buffer_requirements(client_ctx->vcd_handle,
+				buffer, &buffer_req);
+
+	if (vcd_status)
+		status = false;
+	return status;
+}
+
+u32 vid_enc_set_buffer(struct video_client_ctx *client_ctx,
+		struct venc_bufferpayload *buffer_info,
+		enum venc_buffer_dir buffer)
+{
+	enum vcd_buffer_type vcd_buffer_t = VCD_BUFFER_INPUT;
+	enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT;
+	u32 vcd_status = VCD_ERR_FAIL;
+	unsigned long kernel_vaddr;
+
+	if (!client_ctx || !buffer_info)
+		return false;
+
+	if (buffer == VEN_BUFFER_TYPE_OUTPUT) {
+		dir_buffer = BUFFER_TYPE_OUTPUT;
+		vcd_buffer_t = VCD_BUFFER_OUTPUT;
+	}
+
+	/*If buffer cannot be set, ignore */
+	if (!vidc_insert_addr_table(client_ctx, dir_buffer,
+					(unsigned long)buffer_info->pbuffer,
+					&kernel_vaddr,
+					buffer_info->fd,
+					(unsigned long)buffer_info->offset,
+					VID_ENC_MAX_NUM_OF_BUFF)) {
+		DBG("%s() : user_virt_addr = %p cannot be set.",
+		    __func__, buffer_info->pbuffer);
+		return false;
+	}
+
+	vcd_status = vcd_set_buffer(client_ctx->vcd_handle,
+				    vcd_buffer_t, (u8 *) kernel_vaddr,
+				    buffer_info->sz);
+
+	if (!vcd_status)
+		return true;
+	else
+		return false;
+}
+
+u32 vid_enc_free_buffer(struct video_client_ctx *client_ctx,
+		struct venc_bufferpayload *buffer_info,
+		enum venc_buffer_dir buffer)
+{
+	enum vcd_buffer_type buffer_vcd = VCD_BUFFER_INPUT;
+	enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT;
+	u32 vcd_status = VCD_ERR_FAIL;
+	unsigned long kernel_vaddr;
+
+	if (!client_ctx || !buffer_info)
+		return false;
+
+	if (buffer == VEN_BUFFER_TYPE_OUTPUT) {
+		dir_buffer = BUFFER_TYPE_OUTPUT;
+		buffer_vcd = VCD_BUFFER_OUTPUT;
+	}
+	/*If buffer NOT set, ignore */
+	if (!vidc_delete_addr_table(client_ctx, dir_buffer,
+				(unsigned long)buffer_info->pbuffer,
+				&kernel_vaddr)) {
+		DBG("%s() : user_virt_addr = %p has not been set.",
+		    __func__, buffer_info->pbuffer);
+		return true;
+	}
+
+	vcd_status = vcd_free_buffer(client_ctx->vcd_handle, buffer_vcd,
+					 (u8 *)kernel_vaddr);
+
+	if (!vcd_status)
+		return true;
+	else
+		return false;
+}
+
+u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx,
+		struct venc_buffer *input_frame_info)
+{
+	struct vcd_frame_data vcd_input_buffer;
+	unsigned long kernel_vaddr, phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !input_frame_info)
+		return false;
+
+	user_vaddr = (unsigned long)input_frame_info->ptrbuffer;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT,
+			true, &user_vaddr, &kernel_vaddr,
+			&phy_addr, &pmem_fd, &file,
+			&buffer_index)) {
+
+		/* kernel_vaddr  is found. send the frame to VCD */
+		memset((void *)&vcd_input_buffer, 0,
+					sizeof(struct vcd_frame_data));
+
+		vcd_input_buffer.virtual =
+		(u8 *) (kernel_vaddr + input_frame_info->offset);
+
+		vcd_input_buffer.offset = input_frame_info->offset;
+		vcd_input_buffer.frm_clnt_data =
+				(u32) input_frame_info->clientdata;
+		vcd_input_buffer.ip_frm_tag =
+				(u32) input_frame_info->clientdata;
+		vcd_input_buffer.data_len = input_frame_info->len;
+		vcd_input_buffer.time_stamp = input_frame_info->timestamp;
+
+		/* Rely on VCD using the same flags as OMX */
+		vcd_input_buffer.flags = input_frame_info->flags;
+
+		vcd_status = vcd_encode_frame(client_ctx->vcd_handle,
+		&vcd_input_buffer);
+		if (!vcd_status)
+			return true;
+		else {
+			ERR("%s(): vcd_encode_frame failed = %u\n",
+			__func__, vcd_status);
+			return false;
+		}
+
+	} else {
+		ERR("%s(): kernel_vaddr not found\n",
+				__func__);
+		return false;
+	}
+}
+
+u32 vid_enc_fill_output_buffer(struct video_client_ctx *client_ctx,
+		struct venc_buffer *output_frame_info)
+{
+	unsigned long kernel_vaddr, phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	struct vcd_frame_data vcd_frame;
+
+	if (!client_ctx || !output_frame_info)
+		return false;
+
+	user_vaddr = (unsigned long)output_frame_info->ptrbuffer;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+			true, &user_vaddr, &kernel_vaddr,
+			&phy_addr, &pmem_fd, &file,
+			&buffer_index)) {
+
+		memset((void *)&vcd_frame, 0,
+					 sizeof(struct vcd_frame_data));
+		vcd_frame.virtual = (u8 *) kernel_vaddr;
+		vcd_frame.frm_clnt_data = (u32) output_frame_info->clientdata;
+		vcd_frame.alloc_len = output_frame_info->sz;
+
+		vcd_status = vcd_fill_output_buffer(client_ctx->vcd_handle,
+								&vcd_frame);
+		if (!vcd_status)
+			return true;
+		else {
+			ERR("%s(): vcd_fill_output_buffer failed = %u\n",
+					__func__, vcd_status);
+			return false;
+		}
+	} else {
+		ERR("%s(): kernel_vaddr not found\n", __func__);
+		return false;
+	}
+}
+u32 vid_enc_set_recon_buffers(struct video_client_ctx *client_ctx,
+		struct venc_recon_addr *venc_recon)
+{
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 len;
+	struct file *file;
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_enc_recon_buffer control;
+
+	control.buffer_size = venc_recon->buffer_size;
+	control.kernel_virtual_addr = NULL;
+	control.physical_addr = NULL;
+	control.pmem_fd = venc_recon->pmem_fd;
+	control.offset = venc_recon->offset;
+
+	if (get_pmem_file(control.pmem_fd, (unsigned long *)
+		(&(control.physical_addr)), (unsigned long *)
+		(&control.kernel_virtual_addr),
+		(unsigned long *) (&len), &file)) {
+			ERR("%s(): get_pmem_file failed\n", __func__);
+			return false;
+		}
+		put_pmem_file(file);
+		DBG("Virt: %p, Phys %p, fd: %d", control.kernel_virtual_addr,
+			control.physical_addr, control.pmem_fd);
+
+		vcd_property_hdr.prop_id = VCD_I_RECON_BUFFERS;
+		vcd_property_hdr.sz =
+			sizeof(struct vcd_property_enc_recon_buffer);
+
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+						&vcd_property_hdr, &control);
+		if (!vcd_status) {
+			DBG("vcd_set_property returned success\n");
+			return true;
+		} else {
+			ERR("%s(): vid_enc_set_recon_buffers failed = %u\n",
+					__func__, vcd_status);
+			return false;
+		}
+}
+
+u32 vid_enc_free_recon_buffers(struct video_client_ctx *client_ctx)
+{
+	u32 vcd_status = VCD_ERR_FAIL;
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_enc_recon_buffer control;
+
+	vcd_property_hdr.prop_id = VCD_I_FREE_RECON_BUFFERS;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+						&vcd_property_hdr, &control);
+	return true;
+}
+
+u32 vid_enc_get_recon_buffer_size(struct video_client_ctx *client_ctx,
+		struct venc_recon_buff_size *venc_recon_size)
+{
+	u32 vcd_status = VCD_ERR_FAIL;
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_size control;
+
+	control.width = venc_recon_size->width;
+	control.height = venc_recon_size->height;
+
+	vcd_property_hdr.prop_id = VCD_I_GET_RECON_BUFFER_SIZE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
+
+	vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &control);
+
+	venc_recon_size->width = control.width;
+	venc_recon_size->height = control.height;
+	venc_recon_size->size = control.size;
+	venc_recon_size->alignment = control.alignment;
+	DBG("W: %d, H: %d, S: %d, A: %d", venc_recon_size->width,
+			venc_recon_size->height, venc_recon_size->size,
+			venc_recon_size->alignment);
+
+	if (!vcd_status) {
+		DBG("vcd_set_property returned success\n");
+		return true;
+		} else {
+			ERR("%s(): vid_enc_get_recon_buffer_size failed = %u\n",
+				__func__, vcd_status);
+			return false;
+		}
+}
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.h b/drivers/video/msm/vidc/common/enc/venc_internal.h
new file mode 100644
index 0000000..7d4ebca
--- /dev/null
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VENC_INTERNAL_H
+#define VENC_INTERNAL_H
+
+#include <linux/msm_vidc_enc.h>
+#include <linux/cdev.h>
+
+#include "vidc_init.h"
+
+#define VID_ENC_MAX_NUM_OF_BUFF 100
+
+enum venc_buffer_dir{
+  VEN_BUFFER_TYPE_INPUT,
+  VEN_BUFFER_TYPE_OUTPUT
+};
+
+struct vid_enc_msg {
+	struct list_head list;
+	struct venc_msg venc_msg_info;
+};
+
+struct vid_enc_dev {
+
+	struct cdev cdev;
+	struct device *device;
+	resource_size_t phys_base;
+	void __iomem *virt_base;
+	unsigned int irq;
+	struct clk *hclk;
+	struct clk *hclk_div2;
+	struct clk *pclk;
+	unsigned long hclk_rate;
+	struct mutex lock;
+	s32 device_handle;
+	struct video_client_ctx venc_clients[VIDC_MAX_NUM_CLIENTS];
+	u32 num_clients;
+};
+
+u32 vid_enc_set_get_base_cfg(struct video_client_ctx *client_ctx,
+		struct venc_basecfg *base_config, u32 set_flag);
+
+u32 vid_enc_set_get_inputformat(struct video_client_ctx *client_ctx,
+		u32 *input_format, u32 set_flag);
+
+u32 vid_enc_set_get_codec(struct video_client_ctx *client_ctx, u32 *codec,
+		u32 set_flag);
+
+u32 vid_enc_set_get_framesize(struct video_client_ctx *client_ctx,
+		u32 *height, u32 *width, u32 set_flag);
+
+u32 vid_enc_set_get_bitrate(struct video_client_ctx *client_ctx,
+		struct venc_targetbitrate *venc_bitrate, u32 set_flag);
+
+u32 vid_enc_set_get_framerate(struct video_client_ctx *client_ctx,
+		struct venc_framerate *frame_rate, u32 set_flag);
+
+u32 vid_enc_set_get_live_mode(struct video_client_ctx *client_ctx,
+		struct venc_switch *encoder_switch, u32 set_flag);
+
+u32 vid_enc_set_get_short_header(struct video_client_ctx *client_ctx,
+		struct venc_switch *encoder_switch, u32 set_flag);
+
+u32 vid_enc_set_get_profile(struct video_client_ctx *client_ctx,
+		struct venc_profile *profile, u32 set_flag);
+
+u32 vid_enc_set_get_profile_level(struct video_client_ctx *client_ctx,
+		struct ven_profilelevel *profile_level, u32 set_flag);
+
+u32 vid_enc_set_get_session_qp(struct video_client_ctx *client_ctx,
+		struct venc_sessionqp *session_qp, u32 set_flag);
+
+u32 vid_enc_set_get_intraperiod(struct video_client_ctx *client_ctx,
+		struct venc_intraperiod *intraperiod, u32 set_flag);
+
+u32 vid_enc_request_iframe(struct video_client_ctx *client_ctx);
+
+u32 vid_enc_get_sequence_header(struct video_client_ctx *client_ctx,
+		struct venc_seqheader *seq_header);
+
+u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx,
+		struct venc_entropycfg *entropy_cfg, u32 set_flag);
+
+u32 vid_enc_set_get_dbcfg(struct video_client_ctx *client_ctx,
+		struct venc_dbcfg *dbcfg, u32 set_flag);
+
+u32 vid_enc_set_get_intrarefresh(struct video_client_ctx *client_ctx,
+		struct venc_intrarefresh *intrarefresh,	u32 set_flag);
+
+u32 vid_enc_set_get_multiclicecfg(struct video_client_ctx *client_ctx,
+		struct venc_multiclicecfg *multiclicecfg, u32 set_flag);
+
+u32 vid_enc_set_get_ratectrlcfg(struct video_client_ctx *client_ctx,
+		struct venc_ratectrlcfg *ratectrlcfg, u32 set_flag);
+
+u32 vid_enc_set_get_voptimingcfg(struct video_client_ctx *client_ctx,
+		struct  venc_voptimingcfg *voptimingcfg, u32 set_flag);
+
+u32 vid_enc_set_get_headerextension(struct video_client_ctx *client_ctx,
+		struct venc_headerextension *headerextension, u32 set_flag);
+
+u32 vid_enc_set_get_qprange(struct video_client_ctx *client_ctx,
+		struct venc_qprange *qprange, u32 set_flag);
+
+u32 vid_enc_start_stop(struct video_client_ctx *client_ctx, u32 start);
+
+u32 vid_enc_pause_resume(struct video_client_ctx *client_ctx, u32 pause);
+
+u32 vid_enc_flush(struct video_client_ctx *client_ctx,
+		struct venc_bufferflush *bufferflush);
+
+u32 vid_enc_get_buffer_req(struct video_client_ctx *client_ctx,
+		struct venc_allocatorproperty *venc_buf_req, u32 input_dir);
+
+u32 vid_enc_set_buffer_req(struct video_client_ctx *client_ctx,
+		struct venc_allocatorproperty *venc_buf_req, u32 input_dir);
+
+u32 vid_enc_set_buffer(struct video_client_ctx *client_ctx,
+		struct venc_bufferpayload *buffer_info,
+		enum venc_buffer_dir buffer);
+
+u32 vid_enc_free_buffer(struct video_client_ctx *client_ctx,
+		struct venc_bufferpayload *buffer_info,
+		enum venc_buffer_dir buffer);
+
+u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx,
+		struct venc_buffer *input_frame_info);
+
+u32 vid_enc_fill_output_buffer(struct video_client_ctx *client_ctx,
+		struct venc_buffer *output_frame_info);
+
+u32 vid_enc_set_recon_buffers(struct video_client_ctx *client_ctx,
+		struct venc_recon_addr *venc_recon);
+
+u32 vid_enc_free_recon_buffers(struct video_client_ctx *client_ctx);
+
+u32 vid_enc_get_recon_buffer_size(struct video_client_ctx *client_ctx,
+		struct venc_recon_buff_size *venc_recon_size);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
new file mode 100644
index 0000000..cda3a91
--- /dev/null
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -0,0 +1,620 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <mach/clk.h>
+#include <linux/pm_runtime.h>
+
+#include "vcd_api.h"
+#include "vidc_init_internal.h"
+#include "vidc_init.h"
+#include "vcd_res_tracker_api.h"
+
+#if DEBUG
+#define DBG(x...) printk(KERN_DEBUG x)
+#else
+#define DBG(x...)
+#endif
+
+#define VIDC_NAME "msm_vidc_reg"
+
+#define ERR(x...) printk(KERN_ERR x)
+
+static struct vidc_dev *vidc_device_p;
+static dev_t vidc_dev_num;
+static struct class *vidc_class;
+
+static const struct file_operations vidc_fops = {
+	.owner = THIS_MODULE,
+	.open = NULL,
+	.release = NULL,
+	.unlocked_ioctl = NULL,
+};
+
+struct workqueue_struct *vidc_wq;
+struct workqueue_struct *vidc_timer_wq;
+static irqreturn_t vidc_isr(int irq, void *dev);
+static spinlock_t vidc_spin_lock;
+
+u32 vidc_msg_timing, vidc_msg_pmem;
+
+#ifdef VIDC_ENABLE_DBGFS
+struct dentry *vidc_debugfs_root;
+
+struct dentry *vidc_get_debugfs_root(void)
+{
+	if (vidc_debugfs_root == NULL)
+		vidc_debugfs_root = debugfs_create_dir("vidc", NULL);
+	return vidc_debugfs_root;
+}
+
+void vidc_debugfs_file_create(struct dentry *root, const char *name,
+				u32 *var)
+{
+	struct dentry *vidc_debugfs_file =
+	    debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var);
+	if (!vidc_debugfs_file)
+		ERR("%s(): Error creating/opening file %s\n", __func__, name);
+}
+#endif
+
+static void vidc_timer_fn(unsigned long data)
+{
+	unsigned long flag;
+	struct vidc_timer *hw_timer = NULL;
+	ERR("%s() Timer expired\n", __func__);
+	spin_lock_irqsave(&vidc_spin_lock, flag);
+	hw_timer = (struct vidc_timer *)data;
+	list_add_tail(&hw_timer->list, &vidc_device_p->vidc_timer_queue);
+	spin_unlock_irqrestore(&vidc_spin_lock, flag);
+	DBG("Queue the work for timer\n");
+	queue_work(vidc_timer_wq, &vidc_device_p->vidc_timer_worker);
+}
+
+static void vidc_timer_handler(struct work_struct *work)
+{
+	unsigned long flag = 0;
+	u32 islist_empty = 0;
+	struct vidc_timer *hw_timer = NULL;
+
+	ERR("%s() Timer expired\n", __func__);
+	do {
+		spin_lock_irqsave(&vidc_spin_lock, flag);
+		islist_empty = list_empty(&vidc_device_p->vidc_timer_queue);
+		if (!islist_empty) {
+			hw_timer = list_first_entry(
+				&vidc_device_p->vidc_timer_queue,
+				struct vidc_timer, list);
+			list_del(&hw_timer->list);
+		}
+		spin_unlock_irqrestore(&vidc_spin_lock, flag);
+		if (!islist_empty && hw_timer && hw_timer->cb_func)
+			hw_timer->cb_func(hw_timer->userdata);
+	} while (!islist_empty);
+}
+
+static void vidc_work_handler(struct work_struct *work)
+{
+	DBG("vidc_work_handler()");
+	vcd_read_and_clear_interrupt();
+	vcd_response_handler();
+	enable_irq(vidc_device_p->irq);
+	DBG("vidc_work_handler() done");
+}
+
+static DECLARE_WORK(vidc_work, vidc_work_handler);
+
+static int __devinit vidc_720p_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	DBG("Enter %s()\n", __func__);
+
+	if (pdev->id) {
+		ERR("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+	vidc_device_p->irq = platform_get_irq(pdev, 0);
+	if (unlikely(vidc_device_p->irq < 0)) {
+		ERR("%s(): Invalid irq = %d\n", __func__,
+					 vidc_device_p->irq);
+		return -ENXIO;
+	}
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!resource)) {
+		ERR("%s(): Invalid resource\n", __func__);
+		return -ENXIO;
+	}
+
+	vidc_device_p->phys_base = resource->start;
+	vidc_device_p->virt_base = ioremap(resource->start,
+	resource->end - resource->start + 1);
+
+	if (!vidc_device_p->virt_base) {
+		ERR("%s() : ioremap failed\n", __func__);
+		return -ENOMEM;
+	}
+	vidc_device_p->device = &pdev->dev;
+	mutex_init(&vidc_device_p->lock);
+
+	vidc_wq = create_singlethread_workqueue("vidc_worker_queue");
+	if (!vidc_wq) {
+		ERR("%s: create workque failed\n", __func__);
+		return -ENOMEM;
+	}
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	return 0;
+}
+
+static int __devexit vidc_720p_remove(struct platform_device *pdev)
+{
+	if (pdev->id) {
+		ERR("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static int vidc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int vidc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops vidc_dev_pm_ops = {
+	.runtime_suspend = vidc_runtime_suspend,
+	.runtime_resume = vidc_runtime_resume,
+};
+
+static struct platform_driver msm_vidc_720p_platform_driver = {
+	.probe = vidc_720p_probe,
+	.remove = vidc_720p_remove,
+	.driver = {
+		.name = "msm_vidc",
+		.pm   = &vidc_dev_pm_ops,
+	},
+};
+
+static void __exit vidc_exit(void)
+{
+	platform_driver_unregister(&msm_vidc_720p_platform_driver);
+}
+
+static irqreturn_t vidc_isr(int irq, void *dev)
+{
+	DBG("\n vidc_isr() %d ", irq);
+	disable_irq_nosync(irq);
+	queue_work(vidc_wq, &vidc_work);
+	return IRQ_HANDLED;
+}
+
+static int __init vidc_init(void)
+{
+	int rc = 0;
+	struct device *class_devp;
+#ifdef VIDC_ENABLE_DBGFS
+	struct dentry *root = NULL;
+#endif
+
+	vidc_device_p = kzalloc(sizeof(struct vidc_dev), GFP_KERNEL);
+	if (!vidc_device_p) {
+		ERR("%s Unable to allocate memory for vidc_dev\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	rc = alloc_chrdev_region(&vidc_dev_num, 0, 1, VIDC_NAME);
+	if (rc < 0) {
+		ERR("%s: alloc_chrdev_region Failed rc = %d\n",
+			__func__, rc);
+		goto error_vidc_alloc_chrdev_region;
+	}
+
+	vidc_class = class_create(THIS_MODULE, VIDC_NAME);
+	if (IS_ERR(vidc_class)) {
+		rc = PTR_ERR(vidc_class);
+		ERR("%s: couldn't create vidc_class rc = %d\n",
+		__func__, rc);
+
+		goto error_vidc_class_create;
+	}
+
+	class_devp = device_create(vidc_class, NULL, vidc_dev_num, NULL,
+					VIDC_NAME);
+
+	if (IS_ERR(class_devp)) {
+		rc = PTR_ERR(class_devp);
+		ERR("%s: class device_create failed %d\n",
+			__func__, rc);
+		goto error_vidc_class_device_create;
+	}
+
+	cdev_init(&vidc_device_p->cdev, &vidc_fops);
+	vidc_device_p->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&(vidc_device_p->cdev), vidc_dev_num, 1);
+
+	if (rc < 0) {
+		ERR("%s: cdev_add failed %d\n", __func__, rc);
+		goto error_vidc_cdev_add;
+	}
+
+	rc = platform_driver_register(&msm_vidc_720p_platform_driver);
+	if (rc) {
+		ERR("%s failed to load\n", __func__);
+		goto error_vidc_platfom_register;
+	}
+
+	rc = request_irq(vidc_device_p->irq, vidc_isr, IRQF_TRIGGER_HIGH,
+			 "vidc", vidc_device_p->device);
+
+	if (unlikely(rc)) {
+		ERR("%s() :request_irq failed\n", __func__);
+		goto error_vidc_platfom_register;
+	}
+	res_trk_init(vidc_device_p->device, vidc_device_p->irq);
+	vidc_timer_wq = create_singlethread_workqueue("vidc_timer_wq");
+	if (!vidc_timer_wq) {
+		ERR("%s: create workque failed\n", __func__);
+		rc = -ENOMEM;
+		goto error_vidc_platfom_register;
+	}
+	DBG("Disabling IRQ in %s()\n", __func__);
+	disable_irq_nosync(vidc_device_p->irq);
+	INIT_WORK(&vidc_device_p->vidc_timer_worker,
+			  vidc_timer_handler);
+	spin_lock_init(&vidc_spin_lock);
+	INIT_LIST_HEAD(&vidc_device_p->vidc_timer_queue);
+
+	vidc_device_p->ref_count = 0;
+	vidc_device_p->firmware_refcount = 0;
+	vidc_device_p->get_firmware = 0;
+#ifdef VIDC_ENABLE_DBGFS
+	root = vidc_get_debugfs_root();
+	if (root) {
+		vidc_debugfs_file_create(root, "vidc_msg_timing",
+				(u32 *) &vidc_msg_timing);
+		vidc_debugfs_file_create(root, "vidc_msg_pmem",
+				(u32 *) &vidc_msg_pmem);
+	}
+#endif
+	return 0;
+
+error_vidc_platfom_register:
+	cdev_del(&(vidc_device_p->cdev));
+error_vidc_cdev_add:
+	device_destroy(vidc_class, vidc_dev_num);
+error_vidc_class_device_create:
+	class_destroy(vidc_class);
+error_vidc_class_create:
+	unregister_chrdev_region(vidc_dev_num, 1);
+error_vidc_alloc_chrdev_region:
+	kfree(vidc_device_p);
+
+	return rc;
+}
+
+void __iomem *vidc_get_ioaddr(void)
+{
+	return (u8 *)vidc_device_p->virt_base;
+}
+EXPORT_SYMBOL(vidc_get_ioaddr);
+
+int vidc_load_firmware(void)
+{
+	u32 status = true;
+
+	mutex_lock(&vidc_device_p->lock);
+	if (!vidc_device_p->get_firmware) {
+		status = res_trk_download_firmware();
+		if (!status)
+			goto error;
+		vidc_device_p->get_firmware = 1;
+	}
+	vidc_device_p->firmware_refcount++;
+error:
+	mutex_unlock(&vidc_device_p->lock);
+	return status;
+}
+EXPORT_SYMBOL(vidc_load_firmware);
+
+void vidc_release_firmware(void)
+{
+	mutex_lock(&vidc_device_p->lock);
+	if (vidc_device_p->firmware_refcount > 0)
+		vidc_device_p->firmware_refcount--;
+	else
+		vidc_device_p->firmware_refcount = 0;
+	mutex_unlock(&vidc_device_p->lock);
+}
+EXPORT_SYMBOL(vidc_release_firmware);
+
+u32 vidc_lookup_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer,
+	u32 search_with_user_vaddr,
+	unsigned long *user_vaddr,
+	unsigned long *kernel_vaddr,
+	unsigned long *phy_addr, int *pmem_fd,
+	struct file **file, s32 *buffer_index)
+{
+	u32 num_of_buffers;
+	u32 i;
+	struct buf_addr_table *buf_addr_table;
+	u32 found = false;
+
+	if (!client_ctx)
+		return false;
+
+	if (buffer == BUFFER_TYPE_INPUT) {
+		buf_addr_table = client_ctx->input_buf_addr_table;
+		num_of_buffers = client_ctx->num_of_input_buffers;
+		DBG("%s(): buffer = INPUT\n", __func__);
+
+	} else {
+		buf_addr_table = client_ctx->output_buf_addr_table;
+		num_of_buffers = client_ctx->num_of_output_buffers;
+		DBG("%s(): buffer = OUTPUT\n", __func__);
+	}
+
+	for (i = 0; i < num_of_buffers; ++i) {
+		if (search_with_user_vaddr) {
+			if (*user_vaddr == buf_addr_table[i].user_vaddr) {
+				*kernel_vaddr = buf_addr_table[i].kernel_vaddr;
+				found = true;
+				DBG("%s() : client_ctx = %p."
+				" user_virt_addr = 0x%08lx is found",
+				__func__, client_ctx, *user_vaddr);
+				break;
+			}
+		} else {
+			if (*kernel_vaddr == buf_addr_table[i].kernel_vaddr) {
+				*user_vaddr = buf_addr_table[i].user_vaddr;
+				found = true;
+				DBG("%s() : client_ctx = %p."
+				" kernel_virt_addr = 0x%08lx is found",
+				__func__, client_ctx, *kernel_vaddr);
+				break;
+			}
+		}
+	}
+
+	if (found) {
+		*phy_addr = buf_addr_table[i].phy_addr;
+		*pmem_fd = buf_addr_table[i].pmem_fd;
+		*file = buf_addr_table[i].file;
+		*buffer_index = i;
+
+		if (search_with_user_vaddr)
+			DBG("kernel_vaddr = 0x%08lx, phy_addr = 0x%08lx "
+			" pmem_fd = %d, struct *file	= %p "
+			"buffer_index = %d\n", *kernel_vaddr,
+			*phy_addr, *pmem_fd, *file, *buffer_index);
+		else
+			DBG("user_vaddr = 0x%08lx, phy_addr = 0x%08lx "
+			" pmem_fd = %d, struct *file	= %p "
+			"buffer_index = %d\n", *user_vaddr, *phy_addr,
+			*pmem_fd, *file, *buffer_index);
+		return true;
+	} else {
+		if (search_with_user_vaddr)
+			DBG("%s() : client_ctx = %p user_virt_addr = 0x%08lx"
+			" Not Found.\n", __func__, client_ctx, *user_vaddr);
+		else
+			DBG("%s() : client_ctx = %p kernel_virt_addr = 0x%08lx"
+			" Not Found.\n", __func__, client_ctx,
+			*kernel_vaddr);
+		return false;
+	}
+}
+EXPORT_SYMBOL(vidc_lookup_addr_table);
+
+u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer, unsigned long user_vaddr,
+	unsigned long *kernel_vaddr, int pmem_fd,
+	unsigned long buffer_addr_offset, unsigned int max_num_buffers)
+{
+	unsigned long len, phys_addr;
+	struct file *file;
+	u32 *num_of_buffers = NULL;
+	u32 i;
+	struct buf_addr_table *buf_addr_table;
+
+	if (!client_ctx)
+		return false;
+
+	if (buffer == BUFFER_TYPE_INPUT) {
+		buf_addr_table = client_ctx->input_buf_addr_table;
+		num_of_buffers = &client_ctx->num_of_input_buffers;
+		DBG("%s(): buffer = INPUT #Buf = %d\n",
+			__func__, *num_of_buffers);
+
+	} else {
+		buf_addr_table = client_ctx->output_buf_addr_table;
+		num_of_buffers = &client_ctx->num_of_output_buffers;
+		DBG("%s(): buffer = OUTPUT #Buf = %d\n",
+			__func__, *num_of_buffers);
+	}
+
+	if (*num_of_buffers == max_num_buffers) {
+		ERR("%s(): Num of buffers reached max value : %d",
+			__func__, max_num_buffers);
+		return false;
+	}
+
+	i = 0;
+	while (i < *num_of_buffers &&
+		user_vaddr != buf_addr_table[i].user_vaddr)
+		i++;
+	if (i < *num_of_buffers) {
+		DBG("%s() : client_ctx = %p."
+			" user_virt_addr = 0x%08lx already set",
+			__func__, client_ctx, user_vaddr);
+		return false;
+	} else {
+		if (get_pmem_file(pmem_fd, &phys_addr,
+				kernel_vaddr, &len, &file)) {
+			ERR("%s(): get_pmem_file failed\n", __func__);
+			return false;
+		}
+		put_pmem_file(file);
+		phys_addr += buffer_addr_offset;
+		(*kernel_vaddr) += buffer_addr_offset;
+		buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr;
+		buf_addr_table[*num_of_buffers].kernel_vaddr = *kernel_vaddr;
+		buf_addr_table[*num_of_buffers].pmem_fd = pmem_fd;
+		buf_addr_table[*num_of_buffers].file = file;
+		buf_addr_table[*num_of_buffers].phy_addr = phys_addr;
+		*num_of_buffers = *num_of_buffers + 1;
+		DBG("%s() : client_ctx = %p, user_virt_addr = 0x%08lx, "
+			"kernel_vaddr = 0x%08lx inserted!",	__func__,
+			client_ctx, user_vaddr, *kernel_vaddr);
+	}
+	return true;
+}
+EXPORT_SYMBOL(vidc_insert_addr_table);
+
+u32 vidc_delete_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer,
+	unsigned long user_vaddr,
+	unsigned long *kernel_vaddr)
+{
+	u32 *num_of_buffers = NULL;
+	u32 i;
+	struct buf_addr_table *buf_addr_table;
+
+	if (!client_ctx)
+		return false;
+
+	if (buffer == BUFFER_TYPE_INPUT) {
+		buf_addr_table = client_ctx->input_buf_addr_table;
+		num_of_buffers = &client_ctx->num_of_input_buffers;
+		DBG("%s(): buffer = INPUT\n", __func__);
+
+	} else {
+		buf_addr_table = client_ctx->output_buf_addr_table;
+		num_of_buffers = &client_ctx->num_of_output_buffers;
+		DBG("%s(): buffer = OUTPUT\n", __func__);
+	}
+
+	if (!*num_of_buffers)
+		return false;
+
+	i = 0;
+	while (i < *num_of_buffers &&
+		user_vaddr != buf_addr_table[i].user_vaddr)
+		i++;
+	if (i == *num_of_buffers) {
+		DBG("%s() : client_ctx = %p."
+			" user_virt_addr = 0x%08lx NOT found",
+			__func__, client_ctx, user_vaddr);
+		return false;
+	}
+	*kernel_vaddr = buf_addr_table[i].kernel_vaddr;
+	if (i < (*num_of_buffers - 1)) {
+		buf_addr_table[i].user_vaddr =
+			buf_addr_table[*num_of_buffers - 1].user_vaddr;
+		buf_addr_table[i].kernel_vaddr =
+			buf_addr_table[*num_of_buffers - 1].kernel_vaddr;
+		buf_addr_table[i].phy_addr =
+			buf_addr_table[*num_of_buffers - 1].phy_addr;
+		buf_addr_table[i].pmem_fd =
+			buf_addr_table[*num_of_buffers - 1].pmem_fd;
+		buf_addr_table[i].file =
+			buf_addr_table[*num_of_buffers - 1].file;
+	}
+	*num_of_buffers = *num_of_buffers - 1;
+	DBG("%s() : client_ctx = %p."
+		" user_virt_addr = 0x%08lx is found and deleted",
+		__func__, client_ctx, user_vaddr);
+	return true;
+}
+EXPORT_SYMBOL(vidc_delete_addr_table);
+
+u32 vidc_timer_create(void (*timer_handler)(void *),
+	void *user_data, void **timer_handle)
+{
+	struct vidc_timer *hw_timer = NULL;
+	if (!timer_handler || !timer_handle) {
+		DBG("%s(): timer creation failed\n ", __func__);
+		return false;
+	}
+	hw_timer = kzalloc(sizeof(struct vidc_timer), GFP_KERNEL);
+	if (!hw_timer) {
+		DBG("%s(): timer creation failed in allocation\n ", __func__);
+		return false;
+	}
+	init_timer(&hw_timer->hw_timeout);
+	hw_timer->hw_timeout.data = (unsigned long)hw_timer;
+	hw_timer->hw_timeout.function = vidc_timer_fn;
+	hw_timer->cb_func = timer_handler;
+	hw_timer->userdata = user_data;
+	*timer_handle = hw_timer;
+	return true;
+}
+EXPORT_SYMBOL(vidc_timer_create);
+
+void  vidc_timer_release(void *timer_handle)
+{
+	kfree(timer_handle);
+}
+EXPORT_SYMBOL(vidc_timer_release);
+
+void  vidc_timer_start(void *timer_handle, u32 time_out)
+{
+	struct vidc_timer *hw_timer = (struct vidc_timer *)timer_handle;
+	DBG("%s(): start timer\n ", __func__);
+	if (hw_timer) {
+		hw_timer->hw_timeout.expires = jiffies + 1*HZ;
+		add_timer(&hw_timer->hw_timeout);
+	}
+}
+EXPORT_SYMBOL(vidc_timer_start);
+
+void  vidc_timer_stop(void *timer_handle)
+{
+	struct vidc_timer *hw_timer = (struct vidc_timer *)timer_handle;
+	DBG("%s(): stop timer\n ", __func__);
+	if (hw_timer)
+		del_timer(&hw_timer->hw_timeout);
+}
+EXPORT_SYMBOL(vidc_timer_stop);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Video decoder/encoder driver Init Module");
+MODULE_VERSION("1.0");
+module_init(vidc_init);
+module_exit(vidc_exit);
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.h b/drivers/video/msm/vidc/common/init/vidc_init.h
new file mode 100644
index 0000000..c472718
--- /dev/null
+++ b/drivers/video/msm/vidc/common/init/vidc_init.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VIDC_INIT_H
+#define VIDC_INIT_H
+
+#include "vidc_type.h"
+
+#define VIDC_MAX_NUM_CLIENTS 4
+#define MAX_VIDEO_NUM_OF_BUFF 100
+
+enum buffer_dir {
+	BUFFER_TYPE_INPUT,
+	BUFFER_TYPE_OUTPUT
+};
+
+struct buf_addr_table {
+	unsigned long user_vaddr;
+	unsigned long kernel_vaddr;
+	unsigned long phy_addr;
+	int pmem_fd;
+	struct file *file;
+};
+
+struct video_client_ctx {
+	void *vcd_handle;
+	u32 num_of_input_buffers;
+	u32 num_of_output_buffers;
+	struct buf_addr_table input_buf_addr_table[MAX_VIDEO_NUM_OF_BUFF];
+	struct buf_addr_table output_buf_addr_table[MAX_VIDEO_NUM_OF_BUFF];
+	struct list_head msg_queue;
+	struct mutex msg_queue_lock;
+	wait_queue_head_t msg_wait;
+	struct completion event;
+	u32 event_status;
+	u32 seq_header_set;
+	u32 stop_msg;
+	u32 stop_called;
+	u32 stop_sync_cb;
+};
+
+void __iomem *vidc_get_ioaddr(void);
+int vidc_load_firmware(void);
+void vidc_release_firmware(void);
+u32 vidc_lookup_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer, u32 search_with_user_vaddr,
+	unsigned long *user_vaddr, unsigned long *kernel_vaddr,
+	unsigned long *phy_addr, int *pmem_fd, struct file **file,
+	s32 *buffer_index);
+u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer, unsigned long user_vaddr,
+	unsigned long *kernel_vaddr, int pmem_fd,
+	unsigned long buffer_addr_offset,
+	unsigned int max_num_buffers);
+u32 vidc_delete_addr_table(struct video_client_ctx *client_ctx,
+	enum buffer_dir buffer, unsigned long user_vaddr,
+	unsigned long *kernel_vaddr);
+
+u32 vidc_timer_create(void (*timer_handler)(void *),
+	void *user_data, void **timer_handle);
+void  vidc_timer_release(void *timer_handle);
+void  vidc_timer_start(void *timer_handle, u32 time_out);
+void  vidc_timer_stop(void *timer_handle);
+
+
+#endif
diff --git a/drivers/video/msm/vidc/common/init/vidc_init_internal.h b/drivers/video/msm/vidc/common/init/vidc_init_internal.h
new file mode 100644
index 0000000..1d903ad
--- /dev/null
+++ b/drivers/video/msm/vidc/common/init/vidc_init_internal.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef VIDC_INIT_INTERNAL_H
+#define VIDC_INIT_INTERNAL_H
+
+#include <linux/cdev.h>
+
+struct vidc_timer {
+	struct list_head list;
+	struct timer_list hw_timeout;
+	void (*cb_func)(void *);
+	void *userdata;
+};
+
+struct vidc_dev {
+	struct cdev cdev;
+	struct device *device;
+	resource_size_t phys_base;
+	void __iomem *virt_base;
+	unsigned int irq;
+	unsigned int ref_count;
+	unsigned int firmware_refcount;
+	unsigned int get_firmware;
+	struct mutex lock;
+	s32 device_handle;
+	struct list_head vidc_timer_queue;
+	struct work_struct vidc_timer_worker;
+};
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd.h b/drivers/video/msm/vidc/common/vcd/vcd.h
new file mode 100644
index 0000000..b557752
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd.h
@@ -0,0 +1,393 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_H_
+#define _VCD_H_
+
+#include "vcd_api.h"
+#include "vcd_ddl_api.h"
+#include "vcd_res_tracker_api.h"
+#include "vcd_util.h"
+#include "vcd_client_sm.h"
+#include "vcd_core.h"
+#include "vcd_device_sm.h"
+
+void vcd_reset_device_channels(struct vcd_dev_ctxt *dev_ctxt);
+
+u32 vcd_get_command_channel
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc **transc);
+
+u32 vcd_get_command_channel_in_loop
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc **transc);
+
+void vcd_mark_command_channel
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc);
+
+void vcd_release_command_channel
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc);
+
+void vcd_release_multiple_command_channels(struct vcd_dev_ctxt *dev_ctxt,
+		u32 channels);
+
+void vcd_release_interim_command_channels(struct vcd_dev_ctxt *dev_ctxt);
+
+u32 vcd_get_frame_channel
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc **transc);
+
+u32 vcd_get_frame_channel_in_loop
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc **transc);
+
+void vcd_mark_frame_channel(struct vcd_dev_ctxt *dev_ctxt);
+
+void vcd_release_frame_channel
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc);
+
+void vcd_release_multiple_frame_channels(struct vcd_dev_ctxt *dev_ctxt,
+		u32 channels);
+
+void vcd_release_interim_frame_channels(struct vcd_dev_ctxt *dev_ctxt);
+u32 vcd_core_is_busy(struct vcd_dev_ctxt *dev_ctxt);
+
+void vcd_device_timer_start(struct vcd_dev_ctxt *dev_ctxt);
+void vcd_device_timer_stop(struct vcd_dev_ctxt *dev_ctxt);
+
+
+u32 vcd_init_device_context
+    (struct vcd_drv_ctxt *drv_ctxt, u32 ev_code);
+
+u32 vcd_deinit_device_context
+    (struct vcd_drv_ctxt *drv_ctxt, u32 ev_code);
+
+u32 vcd_init_client_context(struct vcd_clnt_ctxt *cctxt);
+
+void vcd_destroy_client_context(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_check_for_client_context
+    (struct vcd_dev_ctxt *dev_ctxt, s32 driver_id);
+
+u32 vcd_validate_driver_handle
+    (struct vcd_dev_ctxt *dev_ctxt, s32 driver_handle);
+
+void vcd_handle_for_last_clnt_close
+	(struct vcd_dev_ctxt *dev_ctxt, u32 send_deinit);
+
+u32 vcd_common_allocate_set_buffer
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer,
+     u32 buf_size, struct vcd_buffer_pool **buf_pool);
+
+u32 vcd_set_buffer_internal
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_buffer_pool *buf_pool, u8 *buffer, u32 buf_size);
+
+u32 vcd_allocate_buffer_internal
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_buffer_pool *buf_pool,
+     u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr);
+
+u32 vcd_free_one_buffer_internal
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer_type, u8 *buffer);
+
+u32 vcd_free_buffers_internal
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_buffer_pool *buf_pool);
+
+u32 vcd_alloc_buffer_pool_entries
+    (struct vcd_buffer_pool *buf_pool,
+     struct vcd_buffer_requirement *buf_req);
+
+void vcd_free_buffer_pool_entries(struct vcd_buffer_pool *buf_pool);
+
+void vcd_flush_in_use_buffer_pool_entries(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_buffer_pool *buf_pool, u32 event);
+
+void vcd_reset_buffer_pool_for_reuse(struct vcd_buffer_pool *buf_pool);
+
+struct vcd_buffer_entry *vcd_get_free_buffer_pool_entry
+    (struct vcd_buffer_pool *pool);
+
+struct vcd_buffer_entry *vcd_find_buffer_pool_entry
+    (struct vcd_buffer_pool *pool, u8 *v_addr);
+
+struct vcd_buffer_entry *vcd_buffer_pool_entry_de_q
+    (struct vcd_buffer_pool *pool);
+
+u32 vcd_buffer_pool_entry_en_q
+    (struct vcd_buffer_pool *pool,
+     struct vcd_buffer_entry *entry);
+
+u32 vcd_check_if_buffer_req_met(struct vcd_clnt_ctxt *cctxt,
+	enum vcd_buffer_type buffer_type);
+
+u32 vcd_client_cmd_en_q
+    (struct vcd_clnt_ctxt *cctxt, enum vcd_command command);
+
+void vcd_client_cmd_flush_and_en_q
+    (struct vcd_clnt_ctxt *cctxt, enum vcd_command command);
+
+u32 vcd_client_cmd_de_q
+    (struct vcd_clnt_ctxt *cctxt, enum vcd_command *command);
+
+u32 vcd_handle_recvd_eos
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame, u32 * pb_eos_handled);
+
+u32 vcd_handle_first_decode_frame(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_handle_input_frame
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame);
+
+u32 vcd_store_seq_hdr
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_sequence_hdr *seq_hdr);
+
+u32 vcd_set_frame_size
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_frame_size *frm_size);
+
+u32 vcd_set_frame_rate
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_frame_rate *fps);
+
+u32 vcd_calculate_frame_delta
+    (struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *frame);
+
+struct vcd_buffer_entry *vcd_check_fill_output_buffer
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *buffer);
+
+u32 vcd_handle_first_fill_output_buffer
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *buffer, u32 *b_handled);
+
+u32 vcd_handle_first_fill_output_buffer_for_enc
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *frm_entry, u32 *b_handled);
+
+u32 vcd_handle_first_fill_output_buffer_for_dec
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *frm_entry, u32 *b_handled);
+
+u32 vcd_schedule_frame(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt **cctxt, struct vcd_buffer_entry
+	**ip_buf_entry);
+
+u32 vcd_submit_command_in_continue
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc);
+
+u32 vcd_submit_cmd_sess_start(struct vcd_transc *transc);
+
+u32 vcd_submit_cmd_sess_end(struct vcd_transc *transc);
+
+void vcd_submit_cmd_client_close(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_submit_frame
+    (struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc);
+
+u32 vcd_try_submit_frame_in_continue(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc *transc);
+
+u32 vcd_process_cmd_sess_start(struct vcd_clnt_ctxt *cctxt);
+
+void vcd_try_submit_frame(struct vcd_dev_ctxt *dev_ctxt);
+
+u32 vcd_setup_with_ddl_capabilities(struct vcd_dev_ctxt *dev_ctxt);
+void vcd_handle_submit_frame_failed(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc *transc);
+
+struct vcd_transc *vcd_get_free_trans_tbl_entry
+    (struct vcd_dev_ctxt *dev_ctxt);
+
+void vcd_release_trans_tbl_entry(struct vcd_transc *trans_entry);
+
+void vcd_release_all_clnt_frm_transc(struct vcd_clnt_ctxt *cctxt);
+void vcd_release_all_clnt_transc(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_handle_input_done
+    (struct vcd_clnt_ctxt *cctxt,
+     void *payload, u32 event, u32 status);
+
+u32 vcd_handle_input_done_in_eos
+    (struct vcd_clnt_ctxt *cctxt, void *payload, u32 status);
+
+void vcd_handle_input_done_failed
+    (struct vcd_clnt_ctxt *cctxt, struct vcd_transc *transc);
+
+void vcd_handle_input_done_with_codec_config
+	(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc,
+	struct ddl_frame_data_tag *frm);
+
+void vcd_handle_input_done_for_interlacing
+    (struct vcd_clnt_ctxt *cctxt);
+
+void vcd_handle_input_done_with_trans_end
+    (struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_handle_frame_done
+    (struct vcd_clnt_ctxt *cctxt,
+     void *payload, u32 event, u32 status);
+
+void vcd_handle_frame_done_for_interlacing
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_transc *transc_ip1,
+     struct ddl_frame_data_tag *op_frm, u32 status);
+
+
+u32 vcd_handle_frame_done_in_eos
+    (struct vcd_clnt_ctxt *cctxt, void *payload, u32 status);
+
+u32 vcd_handle_output_required(struct vcd_clnt_ctxt *cctxt,
+	void *payload, u32 status);
+
+u32 vcd_handle_output_required_in_flushing(struct vcd_clnt_ctxt *cctxt,
+	void *payload);
+
+u32 vcd_handle_output_req_tran_end_in_eos(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_validate_io_done_pyld
+	(struct vcd_clnt_ctxt *cctxt, void *payload, u32 status);
+
+void vcd_handle_eos_trans_end(struct vcd_clnt_ctxt *cctxt);
+
+
+void vcd_handle_eos_done
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_transc *transc, u32 status);
+
+void vcd_send_frame_done_in_eos
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame, u32 valid_opbuf);
+
+void vcd_send_frame_done_in_eos_for_dec
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame);
+
+void vcd_send_frame_done_in_eos_for_enc
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame);
+
+void vcd_handle_start_done(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status);
+
+void vcd_handle_stop_done(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status);
+
+void vcd_handle_stop_done_in_starting(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status);
+
+void vcd_handle_stop_done_in_invalid(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status);
+
+void vcd_send_flush_done(struct vcd_clnt_ctxt *cctxt, u32 status);
+
+void vcd_process_pending_flush_in_eos(struct vcd_clnt_ctxt *cctxt);
+
+void vcd_process_pending_stop_in_eos(struct vcd_clnt_ctxt *cctxt);
+
+void vcd_handle_trans_pending(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_handle_ind_output_reconfig
+    (struct vcd_clnt_ctxt *cctxt, void* payload, u32 status);
+
+u32 vcd_handle_ind_output_reconfig_in_flushing
+    (struct vcd_clnt_ctxt *cctxt, void* payload, u32 status);
+
+void vcd_flush_output_buffers(struct vcd_clnt_ctxt *cctxt);
+
+void vcd_flush_bframe_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode);
+
+u32 vcd_flush_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode);
+void vcd_flush_buffers_in_err_fatal(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_power_event
+    (struct vcd_dev_ctxt *dev_ctxt,
+     struct vcd_clnt_ctxt *cctxt, u32 event);
+
+u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event,
+	struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_client_power_event
+    (struct vcd_dev_ctxt *dev_ctxt,
+     struct vcd_clnt_ctxt *cctxt, u32 event);
+
+u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt);
+
+u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl);
+
+u32 vcd_update_clnt_perf_lvl
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_frame_rate *fps, u32 frm_p_units);
+
+u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt);
+
+u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt);
+
+void vcd_handle_err_fatal(struct vcd_clnt_ctxt *cctxt,
+		u32 event, u32 status);
+
+void vcd_handle_device_err_fatal(struct vcd_dev_ctxt *dev_ctxt,
+		struct vcd_clnt_ctxt *cctxt);
+
+void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt,
+		u32 event);
+
+void vcd_handle_err_in_starting(struct vcd_clnt_ctxt *cctxt,
+		u32 status);
+
+void vcd_handle_ind_hw_err_fatal(struct vcd_clnt_ctxt *cctxt,
+		u32 event, u32 status);
+
+u32 vcd_return_op_buffer_to_hw(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_buffer_entry *buf_entry);
+
+u32 vcd_sched_create(struct list_head *sched_list);
+
+void vcd_sched_destroy(struct list_head *sched_clnt_list);
+
+u32 vcd_sched_add_client(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_sched_remove_client(struct vcd_sched_clnt_ctx *sched_cctxt);
+
+u32 vcd_sched_update_config(struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_sched_queue_buffer(
+	struct vcd_sched_clnt_ctx *sched_cctxt,
+	struct vcd_buffer_entry *buffer, u32 b_tail);
+
+u32 vcd_sched_dequeue_buffer(
+	struct vcd_sched_clnt_ctx *sched_cctxt,
+	struct vcd_buffer_entry **buffer);
+
+u32 vcd_sched_mark_client_eof(struct vcd_sched_clnt_ctx *sched_cctxt);
+
+u32 vcd_sched_suspend_resume_clnt(
+	struct vcd_clnt_ctxt *cctxt, u32 b_state);
+
+u32 vcd_sched_get_client_frame(struct list_head *sched_clnt_list,
+	struct vcd_clnt_ctxt **cctxt,
+	struct vcd_buffer_entry **buffer);
+
+void vcd_handle_clnt_fatal(struct vcd_clnt_ctxt *cctxt, u32 trans_end);
+
+void vcd_handle_clnt_fatal_input_done(struct vcd_clnt_ctxt *cctxt,
+	u32 trans_end);
+
+void vcd_handle_ind_info_output_reconfig
+	(struct vcd_clnt_ctxt *cctxt, u32 status);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.c b/drivers/video/msm/vidc/common/vcd/vcd_api.c
new file mode 100644
index 0000000..a57675e
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.c
@@ -0,0 +1,881 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd.h"
+
+u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_MED("vcd_init:");
+
+	if (!config ||
+	    !driver_handle || !config->map_dev_base_addr) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+	mutex_init(&drv_ctxt->dev_mutex);
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.init) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    init(drv_ctxt, config, driver_handle);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in device state %d",
+			      drv_ctxt->dev_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_init);
+
+u32 vcd_term(s32 driver_handle)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_MED("vcd_term:");
+
+	drv_ctxt = vcd_get_drv_context();
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.term) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    term(drv_ctxt, driver_handle);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in device state %d",
+			      drv_ctxt->dev_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+	mutex_unlock(&drv_ctxt->dev_mutex);
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_term);
+
+u32 vcd_open(s32 driver_handle, u32 decoding,
+	void (*callback) (u32 event, u32 status, void *info, size_t sz,
+		       void *handle, void *const client_data),
+	void *client_data)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_MED("vcd_open:");
+
+	if (!callback) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.open) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    open(drv_ctxt, driver_handle, decoding, callback,
+			    client_data);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in device state %d",
+			      drv_ctxt->dev_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_open);
+
+u32 vcd_close(void *handle)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_close:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+	mutex_lock(&drv_ctxt->dev_mutex);
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.close) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    close(drv_ctxt, cctxt);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in device state %d",
+			      drv_ctxt->dev_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+	mutex_unlock(&drv_ctxt->dev_mutex);
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_close);
+
+u32 vcd_encode_start(void *handle)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_encode_start:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.encode_start &&
+	    drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    encode_start(cctxt);
+	} else {
+		VCD_MSG_ERROR
+		    ("Unsupported API in dev power state %d OR client state %d",
+		     drv_ctxt->dev_ctxt.pwr_state,
+		     cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_encode_start);
+
+u32 vcd_encode_frame(void *handle, struct vcd_frame_data *input_frame)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_encode_frame:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!input_frame) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.encode_frame) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    encode_frame(cctxt, input_frame);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_encode_frame);
+
+u32 vcd_decode_start(void *handle, struct vcd_sequence_hdr *seq_hdr)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_decode_start:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.decode_start &&
+	    drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    decode_start(cctxt, seq_hdr);
+	} else {
+		VCD_MSG_ERROR
+		    ("Unsupported API in dev power state %d OR client state %d",
+		     drv_ctxt->dev_ctxt.pwr_state,
+		     cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_decode_start);
+
+u32 vcd_decode_frame(void *handle, struct vcd_frame_data *input_frame)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_decode_frame:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!input_frame) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.decode_frame) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    decode_frame(cctxt, input_frame);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_decode_frame);
+
+u32 vcd_pause(void *handle)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_pause:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.pause) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    pause(cctxt);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_pause);
+
+u32 vcd_resume(void *handle)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_resume:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.resume &&
+	    drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    resume(drv_ctxt, cctxt);
+	} else {
+		VCD_MSG_ERROR
+		    ("Unsupported API in dev power state %d OR client state %d",
+		     drv_ctxt->dev_ctxt.pwr_state,
+		     cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_resume);
+
+u32 vcd_flush(void *handle, u32 mode)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_flush:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.flush) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    flush(cctxt, mode);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_flush);
+
+u32 vcd_stop(void *handle)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_stop:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.stop &&
+	    drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    stop(cctxt);
+	} else {
+		VCD_MSG_ERROR
+		    ("Unsupported API in dev power state %d OR client state %d",
+		     drv_ctxt->dev_ctxt.pwr_state,
+		     cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_stop);
+
+u32 vcd_set_property(void *handle,
+     struct vcd_property_hdr *prop_hdr, void *prop_val)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_set_property:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!prop_hdr || !prop_val) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.set_property) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    set_property(cctxt, prop_hdr, prop_val);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_set_property);
+
+u32 vcd_get_property(void *handle,
+     struct vcd_property_hdr *prop_hdr, void *prop_val)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_get_property:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!prop_hdr || !prop_val) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.get_property) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    get_property(cctxt, prop_hdr, prop_val);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_get_property);
+
+u32 vcd_set_buffer_requirements(void *handle,
+     enum vcd_buffer_type buffer,
+     struct vcd_buffer_requirement *buffer_req)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_set_buffer_requirements:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!buffer_req) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.
+	    set_buffer_requirements) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    set_buffer_requirements(cctxt, buffer, buffer_req);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_set_buffer_requirements);
+
+u32 vcd_get_buffer_requirements(void *handle,
+     enum vcd_buffer_type buffer,
+     struct vcd_buffer_requirement *buffer_req)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_get_buffer_requirements:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!buffer_req) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.
+	    get_buffer_requirements) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    get_buffer_requirements(cctxt, buffer, buffer_req);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_get_buffer_requirements);
+
+u32 vcd_set_buffer(void *handle,
+     enum vcd_buffer_type buffer_type, u8 *buffer, u32 buf_size)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_set_buffer:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!buffer || !buf_size) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.set_buffer) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    set_buffer(cctxt, buffer_type, buffer, buf_size);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_set_buffer);
+
+u32 vcd_allocate_buffer(void *handle,
+     enum vcd_buffer_type buffer,
+     u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_allocate_buffer:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!vir_buf_addr || !phy_buf_addr
+	    || !buf_size) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.allocate_buffer) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    allocate_buffer(cctxt, buffer, buf_size,
+				       vir_buf_addr, phy_buf_addr);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_allocate_buffer);
+
+u32 vcd_free_buffer(void *handle, enum vcd_buffer_type buffer_type, u8 *buffer)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_free_buffer:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.free_buffer) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    free_buffer(cctxt, buffer_type, buffer);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_free_buffer);
+
+u32 vcd_fill_output_buffer(void *handle, struct vcd_frame_data *buffer)
+{
+	struct vcd_clnt_ctxt *cctxt =
+	    (struct vcd_clnt_ctxt *)handle;
+	struct vcd_drv_ctxt *drv_ctxt;
+	u32 rc;
+
+	VCD_MSG_MED("vcd_fill_output_buffer:");
+
+	if (!cctxt || cctxt->signature != VCD_SIGNATURE) {
+		VCD_MSG_ERROR("Bad client handle");
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (!buffer) {
+		VCD_MSG_ERROR("Bad parameters");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.fill_output_buffer) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+		    fill_output_buffer(cctxt, buffer);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+			      cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_fill_output_buffer);
+
+u32 vcd_set_device_power(s32 driver_handle,
+		enum vcd_power_state pwr_state)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_MED("vcd_set_device_power:");
+
+	drv_ctxt = vcd_get_drv_context();
+	mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.set_dev_pwr) {
+		rc = drv_ctxt->dev_state.state_table->ev_hdlr.
+		    set_dev_pwr(drv_ctxt, pwr_state);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in device state %d",
+			      drv_ctxt->dev_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	mutex_unlock(&drv_ctxt->dev_mutex);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(vcd_set_device_power);
+
+void vcd_read_and_clear_interrupt(void)
+{
+   VCD_MSG_LOW("vcd_read_and_clear_interrupt:");
+   ddl_read_and_clear_interrupt();
+}
+
+
+void vcd_response_handler(void)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_LOW("vcd_response_handler:");
+  drv_ctxt = vcd_get_drv_context();
+
+  mutex_lock(&drv_ctxt->dev_mutex);
+
+	if (!ddl_process_core_response()) {
+		VCD_MSG_HIGH
+		    ("ddl_process_core_response indicated no further"
+		     "processing");
+    mutex_unlock(&drv_ctxt->dev_mutex);
+		return;
+	}
+
+	if (drv_ctxt->dev_ctxt.command_continue)
+		vcd_continue();
+	mutex_unlock(&drv_ctxt->dev_mutex);
+}
+EXPORT_SYMBOL(vcd_response_handler);
+
+u8 vcd_get_num_of_clients(void)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+	struct vcd_clnt_ctxt *cctxt;
+	u8 count = 0;
+
+	VCD_MSG_LOW("vcd_get_num_of_clients:");
+	drv_ctxt = vcd_get_drv_context();
+
+	mutex_lock(&drv_ctxt->dev_mutex);
+	cctxt = drv_ctxt->dev_ctxt.cctxt_list_head;
+	while (cctxt) {
+		count++;
+		cctxt = cctxt->next;
+	}
+	mutex_unlock(&drv_ctxt->dev_mutex);
+	return count;
+}
+EXPORT_SYMBOL(vcd_get_num_of_clients);
+
+
+
+
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.h b/drivers/video/msm/vidc/common/vcd/vcd_api.h
new file mode 100644
index 0000000..38ea202
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.h
@@ -0,0 +1,140 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_API_H_
+#define _VCD_API_H_
+#include "vcd_property.h"
+#include "vcd_status.h"
+
+#define VCD_FRAME_FLAG_EOS 0x00000001
+#define VCD_FRAME_FLAG_DECODEONLY   0x00000004
+#define VCD_FRAME_FLAG_ENDOFFRAME 0x00000010
+#define VCD_FRAME_FLAG_SYNCFRAME 0x00000020
+#define VCD_FRAME_FLAG_EXTRADATA 0x00000040
+#define VCD_FRAME_FLAG_CODECCONFIG  0x00000080
+#define VCD_FRAME_FLAG_BFRAME 0x00100000
+
+#define VCD_FLUSH_INPUT   0x0001
+#define VCD_FLUSH_OUTPUT  0x0002
+#define VCD_FLUSH_ALL     0x0003
+
+#define VCD_FRAMETAG_INVALID  0xffffffff
+
+struct vcd_handle_container {
+	void *handle;
+};
+struct vcd_flush_cmd {
+	u32 mode;
+};
+
+enum vcd_frame {
+	VCD_FRAME_YUV = 1,
+	VCD_FRAME_I,
+	VCD_FRAME_P,
+	VCD_FRAME_B,
+	VCD_FRAME_NOTCODED,
+	VCD_FRAME_32BIT = 0x7fffffff
+};
+
+enum vcd_power_state {
+	VCD_PWR_STATE_ON = 1,
+	VCD_PWR_STATE_SLEEP,
+};
+
+struct vcd_frame_data {
+	u8 *virtual;
+	u8 *physical;
+	u32 alloc_len;
+	u32 data_len;
+	u32 offset;
+	s64 time_stamp;
+	u32 flags;
+	u32 frm_clnt_data;
+	struct vcd_property_dec_output_buffer dec_op_prop;
+	u32 interlaced;
+	enum vcd_frame frame;
+	u32 ip_frm_tag;
+	u32 intrlcd_ip_frm_tag;
+};
+
+struct vcd_sequence_hdr {
+	u8 *sequence_header;
+	u32 sequence_header_len;
+
+};
+
+enum vcd_buffer_type {
+	VCD_BUFFER_INPUT = 0x1,
+	VCD_BUFFER_OUTPUT = 0x2,
+	VCD_BUFFER_INVALID = 0x3,
+	VCD_BUFFER_32BIT = 0x7FFFFFFF
+};
+
+struct vcd_buffer_requirement {
+	u32 min_count;
+	u32 actual_count;
+	u32 max_count;
+	size_t sz;
+	u32 align;
+	u32 buf_pool_id;
+};
+
+struct vcd_init_config {
+	void *device_name;
+	void *(*map_dev_base_addr) (void *device_name);
+	void (*un_map_dev_base_addr) (void);
+	void (*interrupt_clr) (void);
+	void (*register_isr) (void *device_name);
+	void (*deregister_isr) (void);
+	u32  (*timer_create) (void (*timer_handler)(void *),
+		void *user_data, void **timer_handle);
+	void (*timer_release) (void *timer_handle);
+	void (*timer_start) (void *timer_handle, u32 time_out);
+	void (*timer_stop) (void *timer_handle);
+};
+
+u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle);
+u32 vcd_term(s32 driver_handle);
+u32 vcd_open(s32 driver_handle, u32 decoding,
+	void (*callback) (u32 event, u32 status, void *info, size_t sz,
+	void *handle, void *const client_data), void *client_data);
+u32 vcd_close(void *handle);
+u32 vcd_encode_start(void *handle);
+u32 vcd_encode_frame(void *handle, struct vcd_frame_data *input_frame);
+u32 vcd_decode_start(void *handle, struct vcd_sequence_hdr *seq_hdr);
+u32 vcd_decode_frame(void *handle, struct vcd_frame_data *input_frame);
+u32 vcd_pause(void *handle);
+u32 vcd_resume(void *handle);
+u32 vcd_flush(void *handle, u32 mode);
+u32 vcd_stop(void *handle);
+u32 vcd_set_property(void *handle, struct vcd_property_hdr *prop_hdr,
+					void *prop_val);
+u32 vcd_get_property(void *handle, struct vcd_property_hdr *prop_hdr,
+					 void *prop_val);
+u32 vcd_set_buffer_requirements(void *handle, enum vcd_buffer_type buffer,
+		struct vcd_buffer_requirement *buffer_req);
+u32 vcd_get_buffer_requirements(void *handle, enum vcd_buffer_type buffer,
+		struct vcd_buffer_requirement *buffer_req);
+u32 vcd_set_buffer(void *handle, enum vcd_buffer_type buffer_type,
+		u8 *buffer, u32 buf_size);
+u32 vcd_allocate_buffer(void *handle, enum vcd_buffer_type buffer,
+		u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr);
+
+u32 vcd_free_buffer(void *handle, enum vcd_buffer_type buffer_type, u8 *buffer);
+u32 vcd_fill_output_buffer(void *handle, struct vcd_frame_data *buffer);
+u32 vcd_set_device_power(s32 driver_handle,
+		enum vcd_power_state pwr_state);
+void vcd_read_and_clear_interrupt(void);
+void vcd_response_handler(void);
+u8 vcd_get_num_of_clients(void);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
new file mode 100644
index 0000000..973ed48
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
@@ -0,0 +1,1827 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd.h"
+
+static const struct vcd_clnt_state_table *vcd_clnt_state_table[];
+
+void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt,
+								  u32 event)
+{
+	if (cctxt->clnt_state.state == VCD_CLIENT_STATE_NULL) {
+		cctxt->callback(VCD_EVT_RESP_OPEN, VCD_ERR_HW_FATAL, NULL, 0,
+			cctxt, cctxt->client_data);
+		vcd_destroy_client_context(cctxt);
+		return;
+	}
+	if (event == VCD_EVT_RESP_BASE)
+		event = VCD_EVT_IND_HWERRFATAL;
+	if (cctxt->clnt_state.state != VCD_CLIENT_STATE_INVALID) {
+		cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0,
+			cctxt, cctxt->client_data);
+		vcd_flush_buffers_in_err_fatal(cctxt);
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_INVALID,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	}
+}
+
+static u32 vcd_close_in_open(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_close_in_open:");
+	if (cctxt->in_buf_pool.allocated ||
+		 cctxt->out_buf_pool.allocated) {
+		VCD_MSG_ERROR("\n Allocated buffers are not freed yet");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+	vcd_destroy_client_context(cctxt);
+	return rc;
+}
+
+static u32  vcd_close_in_invalid(struct vcd_clnt_ctxt *cctxt)
+{
+	VCD_MSG_LOW("vcd_close_in_invalid:");
+	if (cctxt->in_buf_pool.allocated ||
+		cctxt->out_buf_pool.allocated){
+		VCD_MSG_ERROR("Allocated buffers are not freed yet");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (cctxt->status.mask & VCD_CLEANING_UP)
+		cctxt->status.mask |= VCD_CLOSE_PENDING;
+	else
+		vcd_destroy_client_context(cctxt);
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_start_in_run_cmn(struct vcd_clnt_ctxt *cctxt)
+{
+	VCD_MSG_LOW("vcd_start_in_run_cmn:");
+	cctxt->callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0,
+					  cctxt, cctxt->client_data);
+	return VCD_S_SUCCESS;
+
+}
+
+static u32 vcd_encode_start_in_open(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_property_hdr prop_hdr;
+	struct vcd_property_vop_timing timing;
+
+	VCD_MSG_LOW("vcd_encode_start_in_open:");
+
+	if (cctxt->decoding) {
+		VCD_MSG_ERROR("vcd_encode_init for decoder client");
+
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (!cctxt->in_buf_pool.entries ||
+	    !cctxt->out_buf_pool.entries ||
+	    cctxt->in_buf_pool.validated != cctxt->in_buf_pool.count ||
+	    cctxt->out_buf_pool.validated !=
+	    cctxt->out_buf_pool.count) {
+		VCD_MSG_ERROR("Buffer pool is not completely setup yet");
+
+		return VCD_ERR_BAD_STATE;
+	}
+
+	rc = vcd_sched_add_client(cctxt);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_sched_add_client");
+
+	prop_hdr.prop_id = VCD_I_VOP_TIMING;
+	prop_hdr.sz = sizeof(struct vcd_property_vop_timing);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &timing);
+
+	VCD_FAILED_RETURN(rc, "Failed: Get VCD_I_VOP_TIMING");
+	if (!timing.vop_time_resolution) {
+		VCD_MSG_ERROR("Vop_time_resolution value is zero");
+		return VCD_ERR_FAIL;
+	}
+	cctxt->time_resoln = timing.vop_time_resolution;
+
+	rc = vcd_process_cmd_sess_start(cctxt);
+
+	if (!VCD_FAILED(rc)) {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_STARTING,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (encode_start));
+	}
+
+	return rc;
+}
+
+static u32  vcd_encode_start_in_run(struct vcd_clnt_ctxt
+	*cctxt)
+{
+	VCD_MSG_LOW("vcd_encode_start_in_run:");
+	(void) vcd_start_in_run_cmn(cctxt);
+	return VCD_S_SUCCESS;
+}
+
+
+static u32 vcd_encode_frame_cmn(struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame)
+{
+	VCD_MSG_LOW("vcd_encode_frame_cmn in %d:", cctxt->clnt_state.state);
+
+	if (cctxt->decoding) {
+		VCD_MSG_ERROR("vcd_encode_frame for decoder client");
+
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	return vcd_handle_input_frame(cctxt, input_frame);
+}
+
+static u32 vcd_decode_start_in_open
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_sequence_hdr *seq_hdr)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_decode_start_in_open:");
+
+	if (!cctxt->decoding) {
+		VCD_MSG_ERROR("vcd_decode_init for encoder client");
+
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	if (seq_hdr) {
+		VCD_MSG_HIGH("Seq hdr supplied. len = %d",
+			     seq_hdr->sequence_header_len);
+
+		rc = vcd_store_seq_hdr(cctxt, seq_hdr);
+
+	} else {
+		VCD_MSG_HIGH("Seq hdr not supplied");
+
+		cctxt->seq_hdr.sequence_header_len = 0;
+		cctxt->seq_hdr.sequence_header = NULL;
+	}
+
+	VCD_FAILED_RETURN(rc, "Err processing seq hdr");
+
+	rc = vcd_process_cmd_sess_start(cctxt);
+
+	if (!VCD_FAILED(rc)) {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_STARTING,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (decode_start));
+	}
+
+	return rc;
+}
+
+static u32 vcd_decode_start_in_run(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_sequence_hdr *seqhdr)
+{
+   VCD_MSG_LOW("vcd_decode_start_in_run:");
+   (void) vcd_start_in_run_cmn(cctxt);
+   return VCD_S_SUCCESS;
+}
+
+static u32 vcd_decode_frame_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *input_frame)
+{
+	VCD_MSG_LOW("vcd_decode_frame_cmn in %d:", cctxt->clnt_state.state);
+
+	if (!cctxt->decoding) {
+		VCD_MSG_ERROR("Decode_frame api called for Encoder client");
+
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	return vcd_handle_input_frame(cctxt, input_frame);
+}
+
+static u32 vcd_pause_in_run(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_pause_in_run:");
+
+	if (cctxt->sched_clnt_hdl) {
+		rc = vcd_sched_suspend_resume_clnt(cctxt, false);
+		VCD_FAILED_RETURN(rc, "Failed: vcd_sched_suspend_resume_clnt");
+	}
+
+	if (cctxt->status.frame_submitted > 0) {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_PAUSING,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (pause));
+
+	} else {
+		VCD_MSG_HIGH("No client frames are currently being processed");
+
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_PAUSED,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (pause));
+
+		cctxt->callback(VCD_EVT_RESP_PAUSE,
+				  VCD_S_SUCCESS,
+				  NULL, 0, cctxt, cctxt->client_data);
+
+		rc = vcd_power_event(cctxt->dev_ctxt, cctxt,
+				     VCD_EVT_PWR_CLNT_PAUSE);
+
+		if (VCD_FAILED(rc))
+			VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_PAUSE_END failed");
+
+	}
+
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_resume_in_paused(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_resume_in_paused:");
+
+
+	if (cctxt->sched_clnt_hdl) {
+		rc = vcd_power_event(cctxt->dev_ctxt,
+				     cctxt, VCD_EVT_PWR_CLNT_RESUME);
+
+		if (VCD_FAILED(rc)) {
+			VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_RESUME failed");
+		} else {
+			rc = vcd_sched_suspend_resume_clnt(cctxt, true);
+			if (VCD_FAILED(rc)) {
+				VCD_MSG_ERROR
+				    ("rc = 0x%x. Failed: "
+				     "vcd_sched_suspend_resume_clnt",
+				     rc);
+			}
+
+		}
+		if (!VCD_FAILED(rc)) {
+			vcd_do_client_state_transition(cctxt,
+						       VCD_CLIENT_STATE_RUN,
+						       CLIENT_STATE_EVENT_NUMBER
+						       (resume));
+			vcd_try_submit_frame(dev_ctxt);
+		}
+	} else {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_RUN,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (resume));
+	}
+
+	return rc;
+}
+
+static u32 vcd_flush_cmn(struct vcd_clnt_ctxt *cctxt, u32 mode)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_flush_cmn in %d:", cctxt->clnt_state.state);
+
+	rc = vcd_flush_buffers(cctxt, mode);
+
+	VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers");
+
+	if (cctxt->status.frame_submitted > 0) {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_FLUSHING,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (flush));
+	} else {
+		VCD_MSG_HIGH("All buffers are flushed");
+		cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
+		vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+	}
+
+	return rc;
+}
+
+static u32  vcd_flush_inopen(struct vcd_clnt_ctxt *cctxt,
+	u32 mode)
+{
+   VCD_MSG_LOW("vcd_flush_inopen:");
+   cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
+   vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+   return VCD_S_SUCCESS;
+}
+
+static u32 vcd_flush_in_flushing
+    (struct vcd_clnt_ctxt *cctxt, u32 mode)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_flush_in_flushing:");
+
+	rc = vcd_flush_buffers(cctxt, mode);
+
+	return rc;
+}
+
+static u32 vcd_flush_in_eos(struct vcd_clnt_ctxt *cctxt,
+	u32 mode)
+{
+	VCD_MSG_LOW("vcd_flush_in_eos:");
+
+	if (mode > VCD_FLUSH_ALL || !mode) {
+		VCD_MSG_ERROR("Invalid flush mode %d", mode);
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	VCD_MSG_MED("Flush mode requested %d", mode);
+
+	cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
+
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_flush_in_invalid(struct vcd_clnt_ctxt *cctxt,
+	u32 mode)
+{
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_flush_in_invalid:");
+	if (!(cctxt->status.mask & VCD_CLEANING_UP)) {
+		rc = vcd_flush_buffers(cctxt, mode);
+		if (!VCD_FAILED(rc)) {
+			VCD_MSG_HIGH("All buffers are flushed");
+			cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
+			vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+		}
+	} else
+		cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
+	return rc;
+}
+
+static u32 vcd_stop_cmn(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_transc *transc;
+
+	VCD_MSG_LOW("vcd_stop_cmn in %d:", cctxt->clnt_state.state);
+
+	rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
+
+	VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers");
+
+	if (!cctxt->status.frame_submitted) {
+
+		if (vcd_get_command_channel(dev_ctxt, &transc)) {
+			rc = vcd_power_event(dev_ctxt, cctxt,
+				VCD_EVT_PWR_CLNT_CMD_BEGIN);
+
+			if (!VCD_FAILED(rc)) {
+				transc->type = VCD_CMD_CODEC_STOP;
+				transc->cctxt = cctxt;
+
+				rc = vcd_submit_cmd_sess_end(transc);
+			} else {
+				VCD_MSG_ERROR("Failed:"
+					" VCD_EVT_PWR_CLNT_CMD_BEGIN");
+			}
+
+			if (VCD_FAILED(rc)) {
+				vcd_release_command_channel(dev_ctxt,
+							    transc);
+			}
+
+		} else {
+			vcd_client_cmd_flush_and_en_q(cctxt,
+						      VCD_CMD_CODEC_STOP);
+		}
+	}
+
+	if (VCD_FAILED(rc)) {
+		(void)vcd_power_event(dev_ctxt, cctxt,
+				      VCD_EVT_PWR_CLNT_CMD_FAIL);
+	} else {
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_STOPPING,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (stop));
+	}
+
+	return rc;
+}
+
+
+static u32  vcd_stop_inopen(struct vcd_clnt_ctxt *cctxt)
+{
+	VCD_MSG_LOW("vcd_stop_inopen:");
+
+	cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS,
+					  NULL, 0, cctxt,
+					  cctxt->client_data);
+
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_stop_in_run(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_stop_in_run:");
+	rc = vcd_stop_cmn(cctxt);
+	if (!VCD_FAILED(rc) &&
+		(cctxt->status.mask & VCD_FIRST_IP_RCVD)) {
+		rc = vcd_power_event(cctxt->dev_ctxt,
+				     cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME);
+	}
+	return rc;
+}
+
+static u32 vcd_stop_in_eos(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_stop_in_eos:");
+	if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
+		rc = vcd_stop_cmn(cctxt);
+		if (!VCD_FAILED(rc)) {
+			rc = vcd_power_event(cctxt->dev_ctxt,
+				cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME);
+			cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF;
+		}
+	} else
+		cctxt->status.mask |= VCD_STOP_PENDING;
+	return rc;
+}
+
+static u32  vcd_stop_in_invalid(struct vcd_clnt_ctxt *cctxt)
+{
+	VCD_MSG_LOW("vcd_stop_in_invalid:");
+	if (cctxt->status.mask & VCD_CLEANING_UP) {
+		cctxt->status.mask |= VCD_STOP_PENDING;
+	} else {
+		(void) vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
+		cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL,
+			0, cctxt,	cctxt->client_data);
+	}
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_set_property_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_hdr *prop_hdr, void *prop_val)
+{
+	u32 rc;
+	VCD_MSG_LOW("vcd_set_property_cmn in %d:", cctxt->clnt_state.state);
+	VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id);
+	if (!prop_hdr->sz || !prop_hdr->prop_id) {
+		VCD_MSG_MED("Bad parameters");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	rc = ddl_set_property(cctxt->ddl_handle, prop_hdr, prop_val);
+	VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
+	switch (prop_hdr->prop_id) {
+	case VCD_I_LIVE:
+		{
+			struct vcd_property_live *live =
+			    (struct vcd_property_live *)prop_val;
+			cctxt->live = live->live;
+			break;
+		}
+	case VCD_I_FRAME_RATE:
+		{
+			if (cctxt->sched_clnt_hdl) {
+				rc = vcd_set_frame_rate(cctxt,
+					(struct vcd_property_frame_rate *)
+					prop_val);
+			}
+			break;
+		}
+	case VCD_I_FRAME_SIZE:
+		{
+			if (cctxt->sched_clnt_hdl) {
+				rc = vcd_set_frame_size(cctxt,
+					(struct vcd_property_frame_size *)
+					prop_val);
+			}
+			break;
+		}
+	case VCD_I_INTRA_PERIOD:
+	   {
+		  struct vcd_property_i_period *iperiod =
+			 (struct vcd_property_i_period *)prop_val;
+		  cctxt->bframe = iperiod->b_frames;
+		  break;
+	   }
+	default:
+		{
+			break;
+		}
+	}
+	return rc;
+}
+
+static u32 vcd_get_property_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_hdr *prop_hdr, void *prop_val)
+{
+	VCD_MSG_LOW("vcd_get_property_cmn in %d:", cctxt->clnt_state.state);
+	VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id);
+	if (!prop_hdr->sz || !prop_hdr->prop_id) {
+		VCD_MSG_MED("Bad parameters");
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	return ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val);
+}
+
+static u32 vcd_set_buffer_requirements_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer,
+     struct vcd_buffer_requirement *buffer_req)
+{
+	struct vcd_property_hdr Prop_hdr;
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_pool *buf_pool;
+	u32 first_frm_recvd = 0;
+
+	VCD_MSG_LOW("vcd_set_buffer_requirements_cmn in %d:",
+		    cctxt->clnt_state.state);
+
+	if (!cctxt->decoding &&
+	    cctxt->clnt_state.state != VCD_CLIENT_STATE_OPEN) {
+		VCD_MSG_ERROR("Bad state (%d) for encoder",
+					cctxt->clnt_state.state);
+
+		return VCD_ERR_BAD_STATE;
+	}
+
+	VCD_MSG_MED("Buffer type = %d", buffer);
+
+	if (buffer == VCD_BUFFER_INPUT) {
+		Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
+		buf_pool = &cctxt->in_buf_pool;
+		first_frm_recvd = VCD_FIRST_IP_RCVD;
+	} else if (buffer == VCD_BUFFER_OUTPUT) {
+		Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
+		buf_pool = &cctxt->out_buf_pool;
+		first_frm_recvd = VCD_FIRST_OP_RCVD;
+	} else {
+		rc = VCD_ERR_ILLEGAL_PARM;
+	}
+
+	VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
+
+	if (buf_pool->validated > 0) {
+		VCD_MSG_ERROR("Need to free allocated buffers");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	first_frm_recvd &= cctxt->status.mask;
+	if (first_frm_recvd) {
+		VCD_MSG_ERROR("VCD SetBufReq called when data path is active");
+		return VCD_ERR_BAD_STATE;
+	}
+	Prop_hdr.sz = sizeof(*buffer_req);
+	rc = ddl_set_property(cctxt->ddl_handle, &Prop_hdr, buffer_req);
+	VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
+	if (buf_pool->entries) {
+		VCD_MSG_MED("Resetting buffer requirements");
+		vcd_free_buffer_pool_entries(buf_pool);
+	}
+	return rc;
+}
+
+static u32 vcd_get_buffer_requirements_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer,
+     struct vcd_buffer_requirement *buffer_req)
+{
+	struct vcd_property_hdr Prop_hdr;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_get_buffer_requirements_cmn in %d:",
+		    cctxt->clnt_state.state);
+
+	VCD_MSG_MED("Buffer type = %d", buffer);
+
+	if (buffer == VCD_BUFFER_INPUT)
+		Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
+	else if (buffer == VCD_BUFFER_OUTPUT)
+		Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
+	else
+		rc = VCD_ERR_ILLEGAL_PARM;
+
+	VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
+
+	Prop_hdr.sz = sizeof(*buffer_req);
+
+	return ddl_get_property(cctxt->ddl_handle, &Prop_hdr, buffer_req);
+
+}
+
+static u32 vcd_set_buffer_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer_type, u8 *buffer, u32 buf_size)
+{
+	u32 rc;
+	struct vcd_buffer_pool *buf_pool;
+
+	VCD_MSG_LOW("vcd_set_buffer_cmn in %d:", cctxt->clnt_state.state);
+
+	rc = vcd_common_allocate_set_buffer(cctxt, buffer_type, buf_size,
+					    &buf_pool);
+
+	if (!VCD_FAILED(rc)) {
+		rc = vcd_set_buffer_internal(cctxt, buf_pool, buffer,
+					     buf_size);
+	}
+
+	return rc;
+}
+
+static u32 vcd_allocate_buffer_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer,
+     u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr)
+{
+	u32 rc;
+	struct vcd_buffer_pool *buf_pool;
+
+	VCD_MSG_LOW("vcd_allocate_buffer_cmn in %d:",
+		    cctxt->clnt_state.state);
+
+	rc = vcd_common_allocate_set_buffer(cctxt, buffer, buf_size,
+					    &buf_pool);
+
+	if (!VCD_FAILED(rc)) {
+		rc = vcd_allocate_buffer_internal(cctxt,
+						  buf_pool,
+						  buf_size,
+						  vir_buf_addr,
+						  phy_buf_addr);
+	}
+
+	return rc;
+}
+
+static u32 vcd_free_buffer_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_buffer_type buffer_type, u8 *buffer)
+{
+
+	VCD_MSG_LOW("vcd_free_buffer_cmn in %d:", cctxt->clnt_state.state);
+
+	return vcd_free_one_buffer_internal(cctxt, buffer_type, buffer);
+}
+
+static u32 vcd_fill_output_buffer_cmn
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *buffer)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_entry *buf_entry;
+	u32 result = true;
+	u32 handled = true;
+	if (!cctxt || !buffer) {
+		VCD_MSG_ERROR("%s(): Inavlid params cctxt %p buffer %p",
+					__func__, cctxt, buffer);
+		return VCD_ERR_BAD_POINTER;
+	}
+	VCD_MSG_LOW("vcd_fill_output_buffer_cmn in %d:",
+		    cctxt->clnt_state.state);
+	if (cctxt->status.mask & VCD_IN_RECONFIG) {
+		buffer->time_stamp = 0;
+		buffer->data_len = 0;
+		VCD_MSG_LOW("In reconfig: Return output buffer");
+		cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+			VCD_S_SUCCESS,
+			buffer,
+			sizeof(struct vcd_frame_data),
+			cctxt, cctxt->client_data);
+		return rc;
+	}
+	buf_entry = vcd_check_fill_output_buffer(cctxt, buffer);
+	if (!buf_entry)
+		return VCD_ERR_BAD_POINTER;
+
+	if (!(cctxt->status.mask & VCD_FIRST_OP_RCVD)) {
+		rc = vcd_handle_first_fill_output_buffer(cctxt, buffer,
+			&handled);
+		VCD_FAILED_RETURN(rc,
+			"Failed: vcd_handle_first_fill_output_buffer");
+		if (handled)
+			return rc ;
+	}
+
+	result =
+	    vcd_buffer_pool_entry_en_q(&cctxt->out_buf_pool, buf_entry);
+
+	if (!result && !cctxt->decoding) {
+		VCD_MSG_ERROR("Failed: vcd_buffer_pool_entry_en_q");
+
+		return VCD_ERR_FAIL;
+	}
+
+	buf_entry->frame = *buffer;
+	rc = vcd_return_op_buffer_to_hw(cctxt, buf_entry);
+	if (!VCD_FAILED(rc) && cctxt->sched_clnt_hdl) {
+		cctxt->sched_clnt_hdl->tkns++;
+		vcd_try_submit_frame(cctxt->dev_ctxt);
+	}
+	return rc;
+}
+
+static u32 vcd_fill_output_buffer_in_eos
+    (struct vcd_clnt_ctxt *cctxt,
+     struct vcd_frame_data *buffer)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_entry *buf_entry;
+
+	VCD_MSG_LOW("vcd_fill_output_buffer_in_eos:");
+
+	buf_entry = vcd_check_fill_output_buffer(cctxt, buffer);
+	if (!buf_entry)
+		return VCD_ERR_BAD_POINTER;
+
+	if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
+		VCD_MSG_HIGH("Got an output buffer we were waiting for");
+
+		buf_entry->frame = *buffer;
+
+		buf_entry->frame.data_len = 0;
+		buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS;
+		buf_entry->frame.ip_frm_tag =
+		    cctxt->status.eos_trig_ip_frm.ip_frm_tag;
+		buf_entry->frame.time_stamp =
+		    cctxt->status.eos_trig_ip_frm.time_stamp;
+
+		cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+				  VCD_S_SUCCESS,
+				  &buf_entry->frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+
+		cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF;
+
+		vcd_do_client_state_transition(cctxt,
+					       VCD_CLIENT_STATE_RUN,
+					       CLIENT_STATE_EVENT_NUMBER
+					       (fill_output_buffer));
+
+	} else {
+		rc = vcd_fill_output_buffer_cmn(cctxt, buffer);
+	}
+
+	return rc;
+}
+
+static void vcd_clnt_cb_in_starting
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event, u32 status, void *payload, size_t sz,
+	 u32 *ddl_handle, void *const client_data)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	struct vcd_transc *transc =
+		(struct vcd_transc *)client_data;
+	VCD_MSG_LOW("vcd_clnt_cb_in_starting:");
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("vcd_clnt_cb_in_initing: Wrong DDL handle %p",
+			ddl_handle);
+		return;
+	}
+
+	switch (event) {
+	case VCD_EVT_RESP_START:
+		{
+			vcd_handle_start_done(cctxt,
+				(struct vcd_transc *)client_data,
+				status);
+			break;
+		}
+	case VCD_EVT_RESP_STOP:
+		{
+			vcd_handle_stop_done_in_starting(cctxt,
+				(struct vcd_transc *)client_data,
+				status);
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			cctxt->status.cmd_submitted--;
+			vcd_mark_command_channel(cctxt->dev_ctxt, transc);
+			vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START,
+				status);
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR("Unexpected callback event=%d status=%d "
+				"from DDL",	event, status);
+			dev_ctxt->command_continue = false;
+			break;
+		}
+	}
+}
+
+static void vcd_clnt_cb_in_run
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event,
+     u32 status,
+     void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+
+		return;
+	}
+
+	switch (event) {
+	case VCD_EVT_RESP_INPUT_DONE:
+		{
+			rc = vcd_handle_input_done(cctxt, payload, event,
+						   status);
+
+			break;
+		}
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+
+			rc = vcd_handle_frame_done(cctxt, payload, event,
+						   status);
+
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			rc = vcd_handle_output_required(cctxt, payload,
+				status);
+			break;
+		}
+
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			rc = vcd_handle_ind_output_reconfig(cctxt, payload,
+				status);
+      break;
+		}
+	case VCD_EVT_RESP_TRANSACTION_PENDING:
+		{
+			 vcd_handle_trans_pending(cctxt);
+			 break;
+		}
+
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			 vcd_handle_ind_hw_err_fatal(cctxt,
+				VCD_EVT_IND_HWERRFATAL, status);
+			 break;
+		}
+	case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
+		{
+			vcd_handle_ind_info_output_reconfig(cctxt, status);
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR
+			    ("Unexpected callback event=%d status=%d from DDL",
+			     event, status);
+			dev_ctxt->command_continue = false;
+
+			break;
+		}
+	}
+
+	if (!VCD_FAILED(rc) &&
+	    (event == VCD_EVT_RESP_INPUT_DONE ||
+	     event == VCD_EVT_RESP_OUTPUT_DONE ||
+	     event == VCD_EVT_RESP_OUTPUT_REQ)) {
+
+		if (((struct ddl_frame_data_tag *)
+					payload)->frm_trans_end)
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+	}
+}
+
+static void vcd_clnt_cb_in_eos
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event,
+     u32 status,
+     void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	struct vcd_transc *transc = NULL;
+	u32 frm_trans_end = false, rc = VCD_S_SUCCESS;
+
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+
+		return;
+	}
+
+	switch (event) {
+	case VCD_EVT_RESP_INPUT_DONE:
+		{
+			rc = vcd_handle_input_done_in_eos(cctxt, payload,
+						     status);
+
+			break;
+		}
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+			rc = vcd_handle_frame_done_in_eos(cctxt, payload,
+						     status);
+
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			rc = vcd_handle_output_required(cctxt, payload,
+					status);
+			break;
+		}
+	case VCD_EVT_RESP_EOS_DONE:
+		{
+			transc = (struct vcd_transc *)client_data;
+			vcd_handle_eos_done(cctxt, transc, status);
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			rc = vcd_handle_ind_output_reconfig(cctxt,
+				payload, status);
+			if (!VCD_FAILED(rc)) {
+				frm_trans_end = true;
+				payload = NULL;
+				vcd_do_client_state_transition(cctxt,
+					VCD_CLIENT_STATE_RUN,
+					CLIENT_STATE_EVENT_NUMBER
+					(clnt_cb));
+				VCD_MSG_LOW
+					("RECONFIGinEOS:Suspending Client");
+				rc = vcd_sched_suspend_resume_clnt(cctxt,
+						false);
+				if (VCD_FAILED(rc)) {
+					VCD_MSG_ERROR
+					("Failed: suspend_resume_clnt. rc=0x%x",
+						rc);
+				}
+			}
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			vcd_handle_ind_hw_err_fatal(cctxt,
+				VCD_EVT_IND_HWERRFATAL,	status);
+			break;
+		}
+	case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
+		{
+			vcd_handle_ind_info_output_reconfig(cctxt, status);
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR
+			    ("Unexpected callback event=%d status=%d from DDL",
+			     event, status);
+
+			dev_ctxt->command_continue = false;
+
+			break;
+		}
+
+	}
+	if (!VCD_FAILED(rc) &&
+		(event == VCD_EVT_RESP_INPUT_DONE ||
+		event == VCD_EVT_RESP_OUTPUT_DONE ||
+		event == VCD_EVT_RESP_OUTPUT_REQ ||
+		event == VCD_EVT_IND_OUTPUT_RECONFIG)) {
+		if (payload && ((struct ddl_frame_data_tag *)
+			payload)->frm_trans_end) {
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			frm_trans_end = true;
+		}
+		if (frm_trans_end && !cctxt->status.frame_submitted)
+			vcd_handle_eos_trans_end(cctxt);
+	}
+}
+
+static void vcd_clnt_cb_in_flushing
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event,
+     u32 status,
+     void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+	u32 frm_trans_end = false;
+
+	VCD_MSG_LOW("vcd_clnt_cb_in_flushing:");
+
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+
+		return;
+	}
+
+	switch (event) {
+	case VCD_EVT_RESP_INPUT_DONE:
+		{
+			rc = vcd_handle_input_done(cctxt,
+						   payload,
+						   VCD_EVT_RESP_INPUT_FLUSHED,
+						   status);
+
+			break;
+		}
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+
+			rc = vcd_handle_frame_done(cctxt,
+						   payload,
+						   VCD_EVT_RESP_OUTPUT_FLUSHED,
+						   status);
+
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			rc = vcd_handle_output_required_in_flushing(cctxt,
+				payload);
+			break;
+		}
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			rc = vcd_handle_ind_output_reconfig(cctxt,
+				payload, status);
+			if (!VCD_FAILED(rc)) {
+				frm_trans_end = true;
+				payload = NULL;
+			}
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			vcd_handle_ind_hw_err_fatal(cctxt,
+				VCD_EVT_IND_HWERRFATAL,	status);
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR
+			    ("Unexpected callback event=%d status=%d from DDL",
+			     event, status);
+
+			dev_ctxt->command_continue = false;
+
+			break;
+		}
+	}
+	if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE ||
+		event == VCD_EVT_RESP_OUTPUT_DONE ||
+		event == VCD_EVT_RESP_OUTPUT_REQ ||
+		event == VCD_EVT_IND_OUTPUT_RECONFIG))) {
+		if (payload &&
+			((struct ddl_frame_data_tag *)\
+			payload)->frm_trans_end) {
+
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			frm_trans_end = true;
+		}
+		if (frm_trans_end && !cctxt->status.frame_submitted) {
+			VCD_MSG_HIGH
+			    ("All pending frames recvd from DDL");
+			if (cctxt->status.mask & VCD_FLUSH_INPUT)
+				vcd_flush_bframe_buffers(cctxt,
+							VCD_FLUSH_INPUT);
+			if (cctxt->status.mask & VCD_FLUSH_OUTPUT)
+				vcd_flush_output_buffers(cctxt);
+			vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+			vcd_release_interim_frame_channels(dev_ctxt);
+			VCD_MSG_HIGH("Flush complete");
+			vcd_do_client_state_transition(cctxt,
+				VCD_CLIENT_STATE_RUN,
+				CLIENT_STATE_EVENT_NUMBER
+				(clnt_cb));
+		}
+	}
+}
+
+static void vcd_clnt_cb_in_stopping
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event,
+     u32 status,
+     void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+	u32 frm_trans_end = false;
+
+	VCD_MSG_LOW("vcd_clnt_cb_in_stopping:");
+
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+
+		return;
+	}
+
+	switch (event) {
+
+	case VCD_EVT_RESP_INPUT_DONE:
+		{
+			rc = vcd_handle_input_done(cctxt,
+						   payload,
+						   VCD_EVT_RESP_INPUT_FLUSHED,
+						   status);
+
+			break;
+		}
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+
+			rc = vcd_handle_frame_done(cctxt,
+						   payload,
+						   VCD_EVT_RESP_OUTPUT_FLUSHED,
+						   status);
+
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			rc = vcd_handle_output_required_in_flushing(cctxt,
+				payload);
+			break;
+		}
+	case VCD_EVT_RESP_STOP:
+		{
+			vcd_handle_stop_done(cctxt,
+					     (struct vcd_transc *)
+					     client_data, status);
+
+			break;
+		}
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			(void) vcd_handle_ind_output_reconfig(cctxt,
+				payload, status);
+
+			frm_trans_end = true;
+			payload = NULL;
+
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_STOP,
+				status);
+			break;
+		}
+
+	default:
+		{
+			VCD_MSG_ERROR
+			    ("Unexpected callback event=%d status=%d from DDL",
+			     event, status);
+
+			dev_ctxt->command_continue = false;
+
+			break;
+		}
+	}
+
+	if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE ||
+		event == VCD_EVT_RESP_OUTPUT_DONE) ||
+		event == VCD_EVT_RESP_OUTPUT_REQ ||
+		event == VCD_EVT_IND_OUTPUT_RECONFIG)) {
+
+		if (payload &&
+			((struct ddl_frame_data_tag *)\
+			payload)->frm_trans_end) {
+
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			frm_trans_end = true;
+		}
+		if (frm_trans_end && !cctxt->status.frame_submitted) {
+				VCD_MSG_HIGH
+					("All pending frames recvd from DDL");
+				vcd_flush_bframe_buffers(cctxt,
+							VCD_FLUSH_INPUT);
+				vcd_flush_output_buffers(cctxt);
+				cctxt->status.mask &= ~VCD_FLUSH_ALL;
+				vcd_release_all_clnt_frm_transc(cctxt);
+				VCD_MSG_HIGH
+				("All buffers flushed. Enqueuing stop cmd");
+				vcd_client_cmd_flush_and_en_q(cctxt,
+						VCD_CMD_CODEC_STOP);
+		}
+	}
+}
+
+static void vcd_clnt_cb_in_pausing
+    (struct vcd_clnt_ctxt *cctxt,
+     u32 event,
+     u32 status,
+     void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+	u32 frm_trans_end = false;
+
+	VCD_MSG_LOW("vcd_clnt_cb_in_pausing:");
+
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+
+		return;
+	}
+
+	switch (event) {
+	case VCD_EVT_RESP_INPUT_DONE:
+		{
+			rc = vcd_handle_input_done(cctxt, payload, event,
+						   status);
+
+			break;
+		}
+
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+			rc = vcd_handle_frame_done(cctxt, payload, event,
+						   status);
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			rc = vcd_handle_output_required(cctxt, payload,
+				status);
+			break;
+		}
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			rc = vcd_handle_ind_output_reconfig(cctxt,
+				payload, status);
+			if (!VCD_FAILED(rc)) {
+				frm_trans_end = true;
+				payload = NULL;
+			}
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			vcd_handle_ind_hw_err_fatal(cctxt,
+				VCD_EVT_RESP_PAUSE,	status);
+			rc = VCD_ERR_FAIL;
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR
+			    ("Unexpected callback event=%d status=%d from DDL",
+			     event, status);
+
+			dev_ctxt->command_continue = false;
+
+			break;
+		}
+
+	}
+
+	if (!VCD_FAILED(rc)) {
+
+		if (payload &&
+			((struct ddl_frame_data_tag *)\
+			payload)->frm_trans_end) {
+
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			frm_trans_end = true;
+		}
+		if (frm_trans_end && !cctxt->status.frame_submitted) {
+			VCD_MSG_HIGH
+			    ("All pending frames recvd from DDL");
+
+			cctxt->callback(VCD_EVT_RESP_PAUSE,
+					  VCD_S_SUCCESS,
+					  NULL,
+					  0,
+					  cctxt,
+					  cctxt->client_data);
+
+			vcd_do_client_state_transition(cctxt,
+					VCD_CLIENT_STATE_PAUSED,
+					CLIENT_STATE_EVENT_NUMBER
+						       (clnt_cb));
+
+			rc = vcd_power_event(cctxt->dev_ctxt,
+					     cctxt,
+					     VCD_EVT_PWR_CLNT_PAUSE);
+
+			if (VCD_FAILED(rc)) {
+				VCD_MSG_ERROR
+				    ("VCD_EVT_PWR_CLNT_PAUSE_END"
+				     "failed");
+			}
+		}
+	}
+}
+
+static void  vcd_clnt_cb_in_invalid(
+   struct vcd_clnt_ctxt *cctxt, u32 event, u32 status,
+   void *payload, size_t sz, u32 *ddl_handle,
+   void *const client_data
+)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	VCD_MSG_LOW("vcd_clnt_cb_in_invalid:");
+	if (cctxt->ddl_handle != ddl_handle) {
+		VCD_MSG_ERROR("ddl_handle mismatch");
+		return;
+	}
+	switch (event) {
+	case VCD_EVT_RESP_STOP:
+		{
+			vcd_handle_stop_done_in_invalid(cctxt,
+				(struct vcd_transc *)client_data,
+				status);
+			break;
+		}
+	case VCD_EVT_RESP_INPUT_DONE:
+	case VCD_EVT_RESP_OUTPUT_REQ:
+		{
+			if (cctxt->status.frame_submitted)
+				cctxt->status.frame_submitted--;
+			if (payload && ((struct ddl_frame_data_tag *)
+							payload)->frm_trans_end)
+				vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	case VCD_EVT_RESP_OUTPUT_DONE:
+		{
+			if (payload && ((struct ddl_frame_data_tag *)
+							payload)->frm_trans_end)
+				vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	case VCD_EVT_RESP_TRANSACTION_PENDING:
+		{
+			if (cctxt->status.frame_submitted)
+				cctxt->status.frame_submitted--;
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	case VCD_EVT_IND_HWERRFATAL:
+		{
+			if (status == VCD_ERR_HW_FATAL)
+				vcd_handle_stop_done_in_invalid(cctxt,
+					(struct vcd_transc *)client_data,
+					status);
+
+			break;
+		}
+	case VCD_EVT_RESP_EOS_DONE:
+		{
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	case VCD_EVT_IND_OUTPUT_RECONFIG:
+		{
+			if (cctxt->status.frame_submitted > 0)
+				cctxt->status.frame_submitted--;
+			else
+				cctxt->status.frame_delayed--;
+			vcd_mark_frame_channel(cctxt->dev_ctxt);
+			break;
+		}
+	default:
+		{
+			VCD_MSG_ERROR("Unexpected callback event=%d status=%d"
+				"from DDL",	event, status);
+			dev_ctxt->command_continue = false;
+			break;
+		}
+	}
+}
+
+static void vcd_clnt_enter_open
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_OPEN on api %d", state_event);
+}
+
+static void vcd_clnt_enter_starting
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_STARTING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_START;
+}
+
+static void vcd_clnt_enter_run
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_RUN on api %d", state_event);
+}
+
+static void vcd_clnt_enter_flushing
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_FLUSHING on api %d",
+		    state_event);
+}
+
+static void vcd_clnt_enter_stopping
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_STOPPING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_STOP;
+}
+
+static void vcd_clnt_enter_eos(struct vcd_clnt_ctxt *cctxt,
+	s32 state_event)
+{
+   u32     rc;
+   VCD_MSG_MED("Entering CLIENT_STATE_EOS on api %d", state_event);
+	rc = vcd_sched_suspend_resume_clnt(cctxt, false);
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt."
+					  "rc=0x%x", rc);
+}
+
+static void vcd_clnt_enter_pausing
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Entering CLIENT_STATE_PAUSING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_PAUSE;
+}
+
+static void vcd_clnt_enter_paused
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event)
+{
+	VCD_MSG_MED("Entering CLIENT_STATE_PAUSED on api %d",
+		state_event);
+}
+
+static void  vcd_clnt_enter_invalid(struct vcd_clnt_ctxt *cctxt,
+	s32 state_event)
+{
+	VCD_MSG_MED("Entering CLIENT_STATE_INVALID on api %d",
+		state_event);
+	cctxt->ddl_hdl_valid = false;
+	cctxt->status.mask &= ~(VCD_FIRST_IP_RCVD | VCD_FIRST_OP_RCVD);
+	if (cctxt->sched_clnt_hdl)
+		vcd_sched_suspend_resume_clnt(cctxt, false);
+}
+
+static void vcd_clnt_exit_open
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event)
+{
+	VCD_MSG_MED("Exiting CLIENT_STATE_OPEN on api %d", state_event);
+}
+
+static void vcd_clnt_exit_starting
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_STARTING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_BASE;
+}
+
+static void vcd_clnt_exit_run
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_RUN on api %d", state_event);
+}
+
+static void vcd_clnt_exit_flushing
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_FLUSHING on api %d",
+		    state_event);
+}
+
+static void vcd_clnt_exit_stopping
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_STOPPING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_BASE;
+}
+
+static void vcd_clnt_exit_eos
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event)
+{
+	u32 rc;
+	VCD_MSG_MED("Exiting CLIENT_STATE_EOS on api %d", state_event);
+	rc = vcd_sched_suspend_resume_clnt(cctxt, true);
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt. rc=0x%x",
+			rc);
+}
+
+static void vcd_clnt_exit_pausing
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_PAUSING on api %d",
+		    state_event);
+	cctxt->status.last_evt = VCD_EVT_RESP_BASE;
+}
+
+static void vcd_clnt_exit_paused
+    (struct vcd_clnt_ctxt *cctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting CLIENT_STATE_PAUSED on api %d",
+		    state_event);
+}
+
+static void  vcd_clnt_exit_invalid(struct vcd_clnt_ctxt *cctxt,
+	s32 state_event)
+{
+	VCD_MSG_MED("Exiting CLIENT_STATE_INVALID on api %d",
+		state_event);
+}
+
+void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt,
+     enum vcd_clnt_state_enum to_state, u32 ev_code)
+{
+	struct vcd_clnt_state_ctxt *state_ctxt;
+
+	if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) {
+		VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d",
+			      cctxt, to_state);
+	}
+
+	state_ctxt = &cctxt->clnt_state;
+
+	if (state_ctxt->state == to_state) {
+		VCD_MSG_HIGH("Client already in requested to_state=%d",
+			     to_state);
+
+		return;
+	}
+
+	VCD_MSG_MED("vcd_do_client_state_transition: C%d -> C%d, for api %d",
+		    (int)state_ctxt->state, (int)to_state, ev_code);
+
+	if (state_ctxt->state_table->exit)
+		state_ctxt->state_table->exit(cctxt, ev_code);
+
+
+	state_ctxt->state = to_state;
+	state_ctxt->state_table = vcd_clnt_state_table[to_state];
+
+	if (state_ctxt->state_table->entry)
+		state_ctxt->state_table->entry(cctxt, ev_code);
+}
+
+const struct vcd_clnt_state_table *vcd_get_client_state_table
+    (enum vcd_clnt_state_enum state) {
+	return vcd_clnt_state_table[state];
+}
+
+static const struct vcd_clnt_state_table vcd_clnt_table_open = {
+	{
+	 vcd_close_in_open,
+	 vcd_encode_start_in_open,
+	 NULL,
+	 vcd_decode_start_in_open,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_flush_inopen,
+	 vcd_stop_inopen,
+	 vcd_set_property_cmn,
+	 vcd_get_property_cmn,
+	 vcd_set_buffer_requirements_cmn,
+	 vcd_get_buffer_requirements_cmn,
+	 vcd_set_buffer_cmn,
+	 vcd_allocate_buffer_cmn,
+	 vcd_free_buffer_cmn,
+	 vcd_fill_output_buffer_cmn,
+	 NULL,
+	 },
+	vcd_clnt_enter_open,
+	vcd_clnt_exit_open
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_starting = {
+	{
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_get_property_cmn,
+	 NULL,
+	 vcd_get_buffer_requirements_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_clnt_cb_in_starting,
+	 },
+	vcd_clnt_enter_starting,
+	vcd_clnt_exit_starting
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_run = {
+	{
+	 NULL,
+	 vcd_encode_start_in_run,
+	 vcd_encode_frame_cmn,
+	 vcd_decode_start_in_run,
+	 vcd_decode_frame_cmn,
+	 vcd_pause_in_run,
+	 NULL,
+	 vcd_flush_cmn,
+	 vcd_stop_in_run,
+	 vcd_set_property_cmn,
+	 vcd_get_property_cmn,
+	 vcd_set_buffer_requirements_cmn,
+	 vcd_get_buffer_requirements_cmn,
+	 vcd_set_buffer_cmn,
+	 vcd_allocate_buffer_cmn,
+	 vcd_free_buffer_cmn,
+	 vcd_fill_output_buffer_cmn,
+	 vcd_clnt_cb_in_run,
+	 },
+	vcd_clnt_enter_run,
+	vcd_clnt_exit_run
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_flushing = {
+	{
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_flush_in_flushing,
+	 NULL,
+	 vcd_set_property_cmn,
+	 vcd_get_property_cmn,
+	 NULL,
+	 vcd_get_buffer_requirements_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_fill_output_buffer_cmn,
+	 vcd_clnt_cb_in_flushing,
+	 },
+	vcd_clnt_enter_flushing,
+	vcd_clnt_exit_flushing
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_stopping = {
+	{
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_get_property_cmn,
+	 NULL,
+	 vcd_get_buffer_requirements_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_clnt_cb_in_stopping,
+	 },
+	vcd_clnt_enter_stopping,
+	vcd_clnt_exit_stopping
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_eos = {
+	{
+	 NULL,
+	 NULL,
+	 vcd_encode_frame_cmn,
+	 NULL,
+	 vcd_decode_frame_cmn,
+	 NULL,
+	 NULL,
+	 vcd_flush_in_eos,
+	 vcd_stop_in_eos,
+	 NULL,
+	 vcd_get_property_cmn,
+	 NULL,
+	 vcd_get_buffer_requirements_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_fill_output_buffer_in_eos,
+	 vcd_clnt_cb_in_eos,
+	 },
+	vcd_clnt_enter_eos,
+	vcd_clnt_exit_eos
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_pausing = {
+	{
+	 NULL,
+	 NULL,
+	 vcd_encode_frame_cmn,
+	 NULL,
+	 vcd_decode_frame_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_set_property_cmn,
+	 vcd_get_property_cmn,
+	 NULL,
+	 vcd_get_buffer_requirements_cmn,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_fill_output_buffer_cmn,
+	 vcd_clnt_cb_in_pausing,
+	 },
+	vcd_clnt_enter_pausing,
+	vcd_clnt_exit_pausing
+};
+
+static const struct vcd_clnt_state_table vcd_clnt_table_paused = {
+	{
+	 NULL,
+	 NULL,
+	 vcd_encode_frame_cmn,
+	 NULL,
+	 vcd_decode_frame_cmn,
+	 NULL,
+	 vcd_resume_in_paused,
+	 vcd_flush_cmn,
+	 vcd_stop_cmn,
+	 vcd_set_property_cmn,
+	 vcd_get_property_cmn,
+	 vcd_set_buffer_requirements_cmn,
+	 vcd_get_buffer_requirements_cmn,
+	 vcd_set_buffer_cmn,
+	 vcd_allocate_buffer_cmn,
+	 NULL,
+	 vcd_fill_output_buffer_cmn,
+	 NULL,
+	 },
+	vcd_clnt_enter_paused,
+	vcd_clnt_exit_paused
+};
+static const struct vcd_clnt_state_table vcd_clnt_table_invalid = {
+   {
+      vcd_close_in_invalid,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      vcd_flush_in_invalid,
+      vcd_stop_in_invalid,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      NULL,
+      vcd_free_buffer_cmn,
+      NULL,
+      vcd_clnt_cb_in_invalid,
+   },
+   vcd_clnt_enter_invalid,
+   vcd_clnt_exit_invalid
+};
+
+static const struct vcd_clnt_state_table *vcd_clnt_state_table[] = {
+	NULL,
+	&vcd_clnt_table_open,
+	&vcd_clnt_table_starting,
+	&vcd_clnt_table_run,
+	&vcd_clnt_table_flushing,
+	&vcd_clnt_table_pausing,
+	&vcd_clnt_table_paused,
+	&vcd_clnt_table_stopping,
+	&vcd_clnt_table_eos,
+   &vcd_clnt_table_invalid
+};
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.h b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.h
new file mode 100644
index 0000000..e9ab41c
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_CLIENT_SM_H_
+#define _VCD_CLIENT_SM_H_
+#include "vcd_api.h"
+#include "vcd_ddl_api.h"
+
+struct vcd_clnt_state_table;
+struct vcd_clnt_state_ctxt;
+struct vcd_clnt_ctxt;
+
+enum vcd_clnt_state_enum {
+	VCD_CLIENT_STATE_NULL = 0,
+	VCD_CLIENT_STATE_OPEN,
+	VCD_CLIENT_STATE_STARTING,
+	VCD_CLIENT_STATE_RUN,
+	VCD_CLIENT_STATE_FLUSHING,
+	VCD_CLIENT_STATE_PAUSING,
+	VCD_CLIENT_STATE_PAUSED,
+	VCD_CLIENT_STATE_STOPPING,
+	VCD_CLIENT_STATE_EOS,
+	VCD_CLIENT_STATE_INVALID,
+	VCD_CLIENT_STATE_MAX,
+	VCD_CLIENT_STATE_32BIT = 0x7FFFFFFF
+};
+
+#define   CLIENT_STATE_EVENT_NUMBER(ppf) \
+    ((u32 *) (&(((struct vcd_clnt_state_table*)0)->ev_hdlr.ppf)) -  \
+    (u32 *) (&(((struct vcd_clnt_state_table*)0)->ev_hdlr.close)) \
+	+ 1)
+
+struct vcd_clnt_state_table {
+	struct {
+		u32(*close) (struct vcd_clnt_ctxt *cctxt);
+		u32(*encode_start) (struct vcd_clnt_ctxt *cctxt);
+		u32(*encode_frame) (struct vcd_clnt_ctxt *cctxt,
+				struct vcd_frame_data *input_frame);
+		u32(*decode_start) (struct vcd_clnt_ctxt *cctxt,
+				struct vcd_sequence_hdr *seq_hdr);
+		u32(*decode_frame) (struct vcd_clnt_ctxt *cctxt,
+				struct vcd_frame_data *input_frame);
+		u32(*pause) (struct vcd_clnt_ctxt *cctxt);
+		u32(*resume) (struct vcd_clnt_ctxt *cctxt);
+		u32(*flush) (struct vcd_clnt_ctxt *cctxt,
+				u32 mode);
+		u32(*stop) (struct vcd_clnt_ctxt *cctxt);
+		u32(*set_property) (struct vcd_clnt_ctxt *cctxt,
+				struct vcd_property_hdr *prop_hdr,
+				void *prop);
+		u32(*get_property) (struct vcd_clnt_ctxt *cctxt,
+				struct vcd_property_hdr *prop_hdr,
+				void *prop);
+		u32(*set_buffer_requirements) (struct vcd_clnt_ctxt *
+						  cctxt,
+						  enum vcd_buffer_type buffer,
+						  struct
+						  vcd_buffer_requirement *
+						  buffer_req);
+		u32(*get_buffer_requirements) (struct vcd_clnt_ctxt *
+						  cctxt,
+						  enum vcd_buffer_type buffer,
+						  struct
+						  vcd_buffer_requirement *
+						  buffer_req);
+		u32(*set_buffer) (struct vcd_clnt_ctxt *cctxt,
+				enum vcd_buffer_type buffer_type, u8 *buffer,
+				u32 buf_size);
+		u32(*allocate_buffer) (struct vcd_clnt_ctxt *cctxt,
+				enum vcd_buffer_type buffer, u32 buf_size,
+				u8 **vir_buf_addr, u8 **phy_buf_addr);
+		u32(*free_buffer) (struct vcd_clnt_ctxt *cctxt,
+				enum vcd_buffer_type buffer_type, u8 *buffer);
+		u32(*fill_output_buffer) (
+				struct vcd_clnt_ctxt *cctxt,
+				struct vcd_frame_data *buffer);
+		void (*clnt_cb) (struct vcd_clnt_ctxt *cctxt,
+				u32 event, u32 status, void *payload,
+				size_t sz, u32 *ddl_handle,
+				void *const client_data);
+	} ev_hdlr;
+
+	void (*entry) (struct vcd_clnt_ctxt *cctxt,
+			s32 state_event);
+	void (*exit) (struct vcd_clnt_ctxt *cctxt,
+			s32 state_event);
+};
+
+struct vcd_clnt_state_ctxt {
+	const struct vcd_clnt_state_table *state_table;
+	enum vcd_clnt_state_enum state;
+};
+
+extern void vcd_do_client_state_transition
+    (struct vcd_clnt_ctxt *cctxt,
+     enum vcd_clnt_state_enum to_state, u32 ev_code);
+
+extern const struct vcd_clnt_state_table *vcd_get_client_state_table(
+		enum vcd_clnt_state_enum state);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
new file mode 100644
index 0000000..e681feb
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -0,0 +1,220 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_CORE_H_
+#define _VCD_CORE_H_
+
+#include "vcd_api.h"
+#include "vcd_ddl_api.h"
+
+#include "vcd_util.h"
+#include "vcd_client_sm.h"
+#include "vcd_power_sm.h"
+
+#define VCD_SIGNATURE                        0x75017591U
+
+#define VCD_MIN_PERF_LEVEL                   37900
+
+#define VCD_DRIVER_INSTANCE_MAX              4
+
+#define VCD_MAX_CLIENT_TRANSACTIONS          32
+
+#define VCD_MAX_BUFFER_ENTRIES               32
+
+#define VCD_SEQ_HDR_PADDING_BYTES            256
+
+#define VCD_DEC_NUM_INTERLACED_FIELDS        2
+
+#define VCD_TIMESTAMP_RESOLUTION             1000000
+#define VCD_DEC_INITIAL_FRAME_RATE           30
+
+#define VCD_FIRST_IP_RCVD                    0x00000004
+#define VCD_FIRST_OP_RCVD                    0x00000008
+#define VCD_EOS_PREV_VALID                   0x00000010
+#define VCD_EOS_WAIT_OP_BUF                  0x00000020
+#define VCD_CLEANING_UP                      0x00000040
+#define VCD_STOP_PENDING                     0x00000080
+#define VCD_CLOSE_PENDING                    0x00000100
+#define VCD_IN_RECONFIG                      0x00000200
+#define VCD_FIRST_IP_DONE                    0x00000400
+
+enum vcd_command {
+	VCD_CMD_NONE,
+	VCD_CMD_DEVICE_INIT,
+	VCD_CMD_DEVICE_TERM,
+	VCD_CMD_DEVICE_RESET,
+	VCD_CMD_CODEC_START,
+	VCD_CMD_CODEC_STOP,
+	VCD_CMD_CODE_FRAME,
+	VCD_CMD_OUTPUT_FLUSH,
+	VCD_CMD_CLIENT_CLOSE
+};
+
+enum vcd_core_type {
+    VCD_CORE_1080P,
+    VCD_CORE_720P
+};
+
+struct vcd_cmd_q_element {
+	enum vcd_command pending_cmd;
+};
+
+struct vcd_buffer_entry {
+	struct list_head sched_list;
+	struct list_head list;
+	u32 valid;
+	u8 *alloc;
+	u8 *virtual;
+	u8 *physical;
+	size_t sz;
+	u32 allocated;
+	u32 in_use;
+	struct vcd_frame_data frame;
+
+};
+
+struct vcd_buffer_pool {
+	struct vcd_buffer_entry *entries;
+	u32 count;
+	struct vcd_buffer_requirement buf_req;
+	u32 validated;
+	u32 allocated;
+	u32 in_use;
+	struct list_head queue;
+	u16 q_len;
+};
+
+struct vcd_transc {
+	u32 in_use;
+	enum vcd_command type;
+	struct vcd_clnt_ctxt *cctxt;
+
+	struct vcd_buffer_entry *ip_buf_entry;
+
+	s64 time_stamp;
+	u32 ip_frm_tag;
+	enum vcd_frame frame;
+
+	struct vcd_buffer_entry *op_buf_entry;
+
+	u32 input_done;
+	u32 frame_done;
+};
+
+struct vcd_dev_ctxt {
+	u32 ddl_cmd_concurrency;
+	u32 ddl_frame_ch_depth;
+	u32 ddl_cmd_ch_depth;
+	u32 ddl_frame_ch_interim;
+	u32 ddl_cmd_ch_interim;
+	u32 ddl_frame_ch_free;
+	u32 ddl_cmd_ch_free;
+
+	struct list_head sched_clnt_list;
+
+	struct vcd_init_config config;
+
+	u32 driver_ids[VCD_DRIVER_INSTANCE_MAX];
+	u32 refs;
+	u8 *device_base_addr;
+	void *hw_timer_handle;
+	u32               hw_time_out;
+	struct vcd_clnt_ctxt *cctxt_list_head;
+
+	enum vcd_command pending_cmd;
+
+	u32 command_continue;
+
+	struct vcd_transc *trans_tbl;
+	u32 trans_tbl_size;
+
+	enum vcd_power_state pwr_state;
+	enum vcd_pwr_clk_state pwr_clk_state;
+	u32 active_clnts;
+	u32 max_perf_lvl;
+	u32 reqd_perf_lvl;
+	u32 curr_perf_lvl;
+	u32 set_perf_lvl_pending;
+
+};
+
+struct vcd_clnt_status {
+	u32 req_perf_lvl;
+	u32 frame_submitted;
+	u32 frame_delayed;
+	u32 cmd_submitted;
+	u32 int_field_cnt;
+	s64 first_ts;
+	s64 prev_ts;
+	u64 time_elapsed;
+	struct vcd_frame_data eos_trig_ip_frm;
+	struct ddl_frame_data_tag eos_prev_op_frm;
+	u32 eos_prev_op_frm_status;
+	u32	last_err;
+	u32	last_evt;
+	u32 mask;
+};
+
+struct vcd_sched_clnt_ctx {
+	struct list_head list;
+	u32 clnt_active;
+	void *clnt_data;
+	u32 tkns;
+	u32 round_perfrm;
+	u32 rounds;
+	struct list_head ip_frm_list;
+};
+
+struct vcd_clnt_ctxt {
+	u32 signature;
+	struct vcd_clnt_state_ctxt clnt_state;
+
+	s32 driver_id;
+
+	u32 live;
+	u32 decoding;
+	u32 bframe;
+
+	struct vcd_property_frame_rate frm_rate;
+	u32 frm_p_units;
+	u32 reqd_perf_lvl;
+	u32 time_resoln;
+
+	struct vcd_buffer_pool in_buf_pool;
+	struct vcd_buffer_pool out_buf_pool;
+
+	void (*callback) (u32 event, u32 status, void *info, size_t sz,
+			  void *handle, void *const client_data);
+	void *client_data;
+	struct vcd_sched_clnt_ctx *sched_clnt_hdl;
+	u32	ddl_hdl_valid;
+	u32 *ddl_handle;
+	struct vcd_dev_ctxt *dev_ctxt;
+	struct vcd_cmd_q_element cmd_q;
+	struct vcd_sequence_hdr seq_hdr;
+	u8 *seq_hdr_phy_addr;
+	struct vcd_clnt_status status;
+
+	struct vcd_clnt_ctxt *next;
+};
+
+#define VCD_BUFFERPOOL_INUSE_DECREMENT(val) \
+do { \
+	if ((val) > 0) \
+		val--; \
+	else { \
+		VCD_MSG_ERROR("%s(): Inconsistent val given in " \
+			" VCD_BUFFERPOOL_INUSE_DECREMENT\n", __func__); \
+	} \
+} while (0)
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
new file mode 100644
index 0000000..f8fb0fa
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -0,0 +1,1203 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd.h"
+
+static const struct vcd_dev_state_table *vcd_dev_state_table[];
+static const struct vcd_dev_state_table vcd_dev_table_null;
+
+struct vcd_drv_ctxt *vcd_get_drv_context(void)
+{
+	static struct vcd_drv_ctxt drv_context = {
+		{&vcd_dev_table_null, VCD_DEVICE_STATE_NULL},
+		{0},
+	};
+
+	return &drv_context;
+
+}
+
+void vcd_do_device_state_transition(struct vcd_drv_ctxt *drv_ctxt,
+	 enum vcd_dev_state_enum to_state, u32 ev_code)
+{
+	struct vcd_dev_state_ctxt *state_ctxt;
+
+	if (!drv_ctxt || to_state >= VCD_DEVICE_STATE_MAX) {
+		VCD_MSG_ERROR("Bad parameters. drv_ctxt=%p, to_state=%d",
+				  drv_ctxt, to_state);
+	}
+
+	state_ctxt = &drv_ctxt->dev_state;
+
+	if (state_ctxt->state == to_state) {
+		VCD_MSG_HIGH("Device already in requested to_state=%d",
+				 to_state);
+
+		return;
+	}
+
+	VCD_MSG_MED("vcd_do_device_state_transition: D%d -> D%d, for api %d",
+			(int)state_ctxt->state, (int)to_state, ev_code);
+
+	if (state_ctxt->state_table->exit)
+		state_ctxt->state_table->exit(drv_ctxt, ev_code);
+
+
+	state_ctxt->state = to_state;
+	state_ctxt->state_table = vcd_dev_state_table[to_state];
+
+	if (state_ctxt->state_table->entry)
+		state_ctxt->state_table->entry(drv_ctxt, ev_code);
+}
+
+void vcd_hw_timeout_handler(void *user_data)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+
+	VCD_MSG_HIGH("vcd_hw_timeout_handler:");
+	user_data = NULL;
+	drv_ctxt = vcd_get_drv_context();
+	mutex_lock(&drv_ctxt->dev_mutex);
+	if (drv_ctxt->dev_state.state_table->ev_hdlr.timeout)
+		drv_ctxt->dev_state.state_table->ev_hdlr.
+			timeout(drv_ctxt, user_data);
+	else
+		VCD_MSG_ERROR("hw_timeout unsupported in device state %d",
+			drv_ctxt->dev_state.state);
+	mutex_unlock(&drv_ctxt->dev_mutex);
+}
+
+void vcd_ddl_callback(u32 event, u32 status, void *payload,
+	size_t sz, u32 *ddl_handle, void *const client_data)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+	struct vcd_dev_ctxt *dev_ctxt;
+	struct vcd_dev_state_ctxt *dev_state;
+	struct vcd_clnt_ctxt *cctxt;
+	struct vcd_transc *transc;
+
+	VCD_MSG_LOW("vcd_ddl_callback:");
+
+	VCD_MSG_LOW("event=0x%x status=0x%x", event, status);
+
+	drv_ctxt = vcd_get_drv_context();
+	dev_ctxt = &drv_ctxt->dev_ctxt;
+	dev_state = &drv_ctxt->dev_state;
+
+	dev_ctxt->command_continue = true;
+	vcd_device_timer_stop(dev_ctxt);
+
+	switch (dev_state->state) {
+	case VCD_DEVICE_STATE_NULL:
+		{
+			VCD_MSG_HIGH("Callback unexpected in NULL state");
+			break;
+		}
+
+	case VCD_DEVICE_STATE_NOT_INIT:
+		{
+			VCD_MSG_HIGH("Callback unexpected in NOT_INIT state");
+			break;
+		}
+
+	case VCD_DEVICE_STATE_INITING:
+		{
+			if (dev_state->state_table->ev_hdlr.dev_cb) {
+				dev_state->state_table->ev_hdlr.
+					dev_cb(drv_ctxt, event, status,
+						  payload, sz, ddl_handle,
+						  client_data);
+			} else {
+				VCD_MSG_HIGH("No device handler in %d state",
+						 dev_state->state);
+			}
+			break;
+		}
+
+	case VCD_DEVICE_STATE_READY:
+		{
+			transc = (struct vcd_transc *)client_data;
+
+			if (!transc || !transc->in_use
+				|| !transc->cctxt) {
+				VCD_MSG_ERROR("Invalid clientdata "
+							  "received from DDL ");
+			} else {
+				cctxt = transc->cctxt;
+
+				if (cctxt->clnt_state.state_table->ev_hdlr.
+					clnt_cb) {
+					cctxt->clnt_state.state_table->
+						ev_hdlr.clnt_cb(cctxt,
+						event, status, payload,
+						sz,	ddl_handle,
+						client_data);
+				} else {
+					VCD_MSG_HIGH
+					("No client handler in"
+					" (dsm:READY, csm:%d) state",
+					(int)cctxt->clnt_state.state);
+
+					if (VCD_FAILED(status)) {
+						VCD_MSG_FATAL("DDL callback"
+						" returned failure 0x%x",
+						status);
+					}
+				}
+			}
+			break;
+		}
+
+	default:
+		{
+			VCD_MSG_ERROR("Unknown state");
+			break;
+		}
+
+	}
+
+}
+
+u32 vcd_init_device_context(struct vcd_drv_ctxt *drv_ctxt,
+		u32 ev_code)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	u32 rc;
+	struct ddl_init_config ddl_init;
+
+	VCD_MSG_LOW("vcd_init_device_context:");
+
+	dev_ctxt->pending_cmd = VCD_CMD_NONE;
+
+	rc = vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_BEGIN);
+	VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_INIT_BEGIN failed");
+
+	VCD_MSG_HIGH("Device powered ON and clocked");
+	rc = vcd_sched_create(&dev_ctxt->sched_clnt_list);
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_sched_create", rc);
+
+		(void)vcd_power_event(dev_ctxt, NULL,
+					  VCD_EVT_PWR_DEV_INIT_FAIL);
+
+		return rc;
+	}
+
+	VCD_MSG_HIGH("Created scheduler instance.");
+
+	ddl_init.core_virtual_base_addr = dev_ctxt->device_base_addr;
+	ddl_init.interrupt_clr = dev_ctxt->config.interrupt_clr;
+	ddl_init.ddl_callback = vcd_ddl_callback;
+
+	rc = ddl_device_init(&ddl_init, NULL);
+
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_init", rc);
+		vcd_sched_destroy(&dev_ctxt->sched_clnt_list);
+		(void)vcd_power_event(dev_ctxt, NULL,
+					  VCD_EVT_PWR_DEV_INIT_FAIL);
+	} else {
+		vcd_device_timer_start(dev_ctxt);
+		vcd_do_device_state_transition(drv_ctxt,
+						   VCD_DEVICE_STATE_INITING,
+						   ev_code);
+	}
+
+	return rc;
+}
+
+void vcd_handle_device_init_failed(struct vcd_drv_ctxt *drv_ctxt,
+		u32 status)
+{
+	struct vcd_clnt_ctxt *client;
+	struct vcd_clnt_ctxt *tmp_client;
+
+	VCD_MSG_ERROR("Device init failed. status = %d", status);
+
+	client = drv_ctxt->dev_ctxt.cctxt_list_head;
+	while (client) {
+		client->callback(VCD_EVT_RESP_OPEN,
+				   status, NULL, 0, 0, client->client_data);
+
+		tmp_client = client;
+		client = client->next;
+
+		vcd_destroy_client_context(tmp_client);
+	}
+	if (ddl_device_release(NULL))
+		VCD_MSG_ERROR("Failed: ddl_device_release");
+
+	vcd_sched_destroy(&drv_ctxt->dev_ctxt.sched_clnt_list);
+	if (vcd_power_event(&drv_ctxt->dev_ctxt,
+		NULL, VCD_EVT_PWR_DEV_INIT_FAIL))
+		VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_FAIL failed");
+
+	vcd_do_device_state_transition(drv_ctxt,
+		VCD_DEVICE_STATE_NOT_INIT,
+		DEVICE_STATE_EVENT_NUMBER(dev_cb));
+}
+
+u32 vcd_deinit_device_context(struct vcd_drv_ctxt *drv_ctxt,
+		u32 ev_code)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_deinit_device_context:");
+
+	rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL,
+				 VCD_EVT_PWR_DEV_TERM_BEGIN);
+
+	VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed");
+
+	rc = ddl_device_release(NULL);
+
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_release", rc);
+
+		(void)vcd_power_event(dev_ctxt, NULL,
+					  VCD_EVT_PWR_DEV_TERM_FAIL);
+	} else {
+		vcd_sched_destroy(&dev_ctxt->sched_clnt_list);
+		(void) vcd_power_event(dev_ctxt, NULL,
+			VCD_EVT_PWR_DEV_TERM_END);
+
+		vcd_do_device_state_transition(drv_ctxt,
+			VCD_DEVICE_STATE_NOT_INIT, ev_code);
+	}
+	return rc;
+}
+
+void vcd_term_driver_context(struct vcd_drv_ctxt *drv_ctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+
+	VCD_MSG_HIGH("All driver instances terminated");
+
+	if (dev_ctxt->config.deregister_isr)
+		dev_ctxt->config.deregister_isr();
+
+	if (dev_ctxt->config.un_map_dev_base_addr)
+		dev_ctxt->config.un_map_dev_base_addr();
+
+	if (dev_ctxt->config.timer_release)
+		dev_ctxt->config.timer_release(
+			dev_ctxt->hw_timer_handle);
+
+	kfree(dev_ctxt->trans_tbl);
+
+	memset(dev_ctxt, 0, sizeof(struct vcd_dev_ctxt));
+
+	vcd_do_device_state_transition(drv_ctxt,
+					   VCD_DEVICE_STATE_NULL,
+					   DEVICE_STATE_EVENT_NUMBER(term));
+
+}
+
+u32 vcd_reset_device_context(struct vcd_drv_ctxt *drv_ctxt,
+	u32 ev_code)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_reset_device_context:");
+	vcd_reset_device_channels(dev_ctxt);
+	rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL,
+						 VCD_EVT_PWR_DEV_TERM_BEGIN);
+	VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed");
+	if (ddl_reset_hw(0))
+		VCD_MSG_HIGH("HW Reset done");
+	else
+		VCD_MSG_FATAL("HW Reset failed");
+
+	(void)vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_TERM_END);
+
+	return VCD_S_SUCCESS;
+}
+
+void vcd_handle_device_err_fatal(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt *trig_clnt)
+{
+	struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
+	struct vcd_clnt_ctxt *tmp_clnt = NULL;
+	VCD_MSG_LOW("vcd_handle_device_err_fatal:");
+	while (cctxt) {
+		tmp_clnt = cctxt;
+		cctxt = cctxt->next;
+		if (tmp_clnt != trig_clnt)
+			vcd_clnt_handle_device_err_fatal(tmp_clnt,
+				tmp_clnt->status.last_evt);
+	}
+	dev_ctxt->pending_cmd = VCD_CMD_DEVICE_RESET;
+	if (!dev_ctxt->cctxt_list_head)
+		vcd_do_device_state_transition(vcd_get_drv_context(),
+			VCD_DEVICE_STATE_NOT_INIT,
+			DEVICE_STATE_EVENT_NUMBER(timeout));
+	else
+		vcd_do_device_state_transition(vcd_get_drv_context(),
+			VCD_DEVICE_STATE_INVALID,
+			DEVICE_STATE_EVENT_NUMBER(dev_cb));
+}
+
+void vcd_handle_for_last_clnt_close(
+	struct vcd_dev_ctxt *dev_ctxt, u32 send_deinit)
+{
+	if (!dev_ctxt->cctxt_list_head) {
+		VCD_MSG_HIGH("All clients are closed");
+		if (send_deinit)
+			(void) vcd_deinit_device_context(
+				vcd_get_drv_context(),
+				DEVICE_STATE_EVENT_NUMBER(close));
+		else
+			dev_ctxt->pending_cmd =
+			VCD_CMD_DEVICE_TERM;
+	}
+}
+void vcd_continue(void)
+{
+	struct vcd_drv_ctxt *drv_ctxt;
+	struct vcd_dev_ctxt *dev_ctxt;
+	u32 command_continue;
+	struct vcd_transc *transc;
+	u32 rc;
+	VCD_MSG_LOW("vcd_continue:");
+
+	drv_ctxt = vcd_get_drv_context();
+	dev_ctxt = &drv_ctxt->dev_ctxt;
+
+	dev_ctxt->command_continue = false;
+
+	if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_INIT) {
+		VCD_MSG_HIGH("VCD_CMD_DEVICE_INIT is pending");
+
+		dev_ctxt->pending_cmd = VCD_CMD_NONE;
+
+		(void)vcd_init_device_context(drv_ctxt,
+			DEVICE_STATE_EVENT_NUMBER(open));
+	} else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_TERM) {
+		VCD_MSG_HIGH("VCD_CMD_DEVICE_TERM is pending");
+
+		dev_ctxt->pending_cmd = VCD_CMD_NONE;
+
+		(void)vcd_deinit_device_context(drv_ctxt,
+			DEVICE_STATE_EVENT_NUMBER(close));
+	} else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_RESET) {
+		VCD_MSG_HIGH("VCD_CMD_DEVICE_RESET is pending");
+		dev_ctxt->pending_cmd = VCD_CMD_NONE;
+		(void)vcd_reset_device_context(drv_ctxt,
+			DEVICE_STATE_EVENT_NUMBER(dev_cb));
+	} else {
+		if (dev_ctxt->set_perf_lvl_pending) {
+			rc = vcd_power_event(dev_ctxt, NULL,
+						 VCD_EVT_PWR_DEV_SET_PERFLVL);
+
+			if (VCD_FAILED(rc)) {
+				VCD_MSG_ERROR
+					("VCD_EVT_PWR_CLNT_SET_PERFLVL failed");
+				VCD_MSG_HIGH
+					("Not running at desired perf level."
+					 "curr=%d, reqd=%d",
+					 dev_ctxt->curr_perf_lvl,
+					 dev_ctxt->reqd_perf_lvl);
+			} else {
+				dev_ctxt->set_perf_lvl_pending = false;
+			}
+		}
+
+		do {
+			command_continue = false;
+
+			if (vcd_get_command_channel_in_loop
+				(dev_ctxt, &transc)) {
+				if (vcd_submit_command_in_continue(dev_ctxt,
+					transc))
+					command_continue = true;
+				else {
+					VCD_MSG_MED
+						("No more commands to submit");
+
+					vcd_release_command_channel(dev_ctxt,
+						transc);
+
+					vcd_release_interim_command_channels
+						(dev_ctxt);
+				}
+			}
+		} while (command_continue);
+
+		do {
+			command_continue = false;
+
+			if (vcd_get_frame_channel_in_loop
+				(dev_ctxt, &transc)) {
+				if (vcd_try_submit_frame_in_continue(dev_ctxt,
+					transc)) {
+					command_continue = true;
+				} else {
+					VCD_MSG_MED("No more frames to submit");
+
+					vcd_release_frame_channel(dev_ctxt,
+								  transc);
+
+					vcd_release_interim_frame_channels
+						(dev_ctxt);
+				}
+			}
+
+		} while (command_continue);
+
+		if (!vcd_core_is_busy(dev_ctxt)) {
+			rc = vcd_power_event(dev_ctxt, NULL,
+				VCD_EVT_PWR_CLNT_CMD_END);
+
+			if (VCD_FAILED(rc))
+				VCD_MSG_ERROR("Failed:"
+					"VCD_EVT_PWR_CLNT_CMD_END");
+		}
+	}
+}
+
+static void vcd_pause_all_sessions(struct vcd_dev_ctxt *dev_ctxt)
+{
+	struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
+	u32 rc;
+
+	while (cctxt) {
+		if (cctxt->clnt_state.state_table->ev_hdlr.pause) {
+			rc = cctxt->clnt_state.state_table->ev_hdlr.
+				pause(cctxt);
+
+			if (VCD_FAILED(rc))
+				VCD_MSG_ERROR("Client pause failed");
+
+		}
+
+		cctxt = cctxt->next;
+	}
+}
+
+static void vcd_resume_all_sessions(struct vcd_dev_ctxt *dev_ctxt)
+{
+	struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
+	u32 rc;
+
+	while (cctxt) {
+		if (cctxt->clnt_state.state_table->ev_hdlr.resume) {
+			rc = cctxt->clnt_state.state_table->ev_hdlr.
+				resume(cctxt);
+
+			if (VCD_FAILED(rc))
+				VCD_MSG_ERROR("Client resume failed");
+
+		}
+
+		cctxt = cctxt->next;
+	}
+}
+
+static u32 vcd_init_cmn
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_init_config *config, s32 *driver_handle)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	s32 driver_id;
+
+	if (dev_ctxt->config.interrupt_clr !=
+		config->interrupt_clr
+		|| dev_ctxt->config.register_isr !=
+		config->register_isr
+		|| dev_ctxt->config.deregister_isr !=
+		config->deregister_isr
+		|| dev_ctxt->config.map_dev_base_addr !=
+		config->map_dev_base_addr
+		|| dev_ctxt->config.un_map_dev_base_addr !=
+		config->un_map_dev_base_addr) {
+		VCD_MSG_ERROR("Device config mismatch");
+		VCD_MSG_HIGH("VCD will be using config from 1st vcd_init");
+	}
+
+	*driver_handle = 0;
+
+	driver_id = 0;
+	while (driver_id < VCD_DRIVER_INSTANCE_MAX &&
+		   dev_ctxt->driver_ids[driver_id]) {
+		++driver_id;
+	}
+
+	if (driver_id == VCD_DRIVER_INSTANCE_MAX) {
+		VCD_MSG_ERROR("Max driver instances reached");
+
+		return VCD_ERR_FAIL;
+	}
+
+	++dev_ctxt->refs;
+	dev_ctxt->driver_ids[driver_id] = true;
+	*driver_handle = driver_id + 1;
+
+	VCD_MSG_HIGH("Driver_id = %d. No of driver instances = %d",
+			 driver_id, dev_ctxt->refs);
+
+	return VCD_S_SUCCESS;
+
+}
+
+static u32 vcd_init_in_null
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_init_config *config, s32 *driver_handle) {
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	u32 done_create_timer = false;
+	VCD_MSG_LOW("vcd_init_in_dev_null:");
+
+
+	dev_ctxt->config = *config;
+
+	dev_ctxt->device_base_addr =
+		(u8 *)config->map_dev_base_addr(
+			dev_ctxt->config.device_name);
+
+	if (!dev_ctxt->device_base_addr) {
+		VCD_MSG_ERROR("NULL Device_base_addr");
+
+		return VCD_ERR_FAIL;
+	}
+
+	if (config->register_isr) {
+		config->register_isr(dev_ctxt->config.
+			device_name);
+	}
+
+	if (config->timer_create) {
+		if (config->timer_create(vcd_hw_timeout_handler,
+			NULL, &dev_ctxt->hw_timer_handle))
+			done_create_timer = true;
+		else {
+			VCD_MSG_ERROR("timercreate failed");
+			return VCD_ERR_FAIL;
+		}
+	}
+
+
+	rc = vcd_init_cmn(drv_ctxt, config, driver_handle);
+
+	if (!VCD_FAILED(rc)) {
+		vcd_do_device_state_transition(drv_ctxt,
+						   VCD_DEVICE_STATE_NOT_INIT,
+						   DEVICE_STATE_EVENT_NUMBER
+						   (init));
+	} else {
+		if (dev_ctxt->config.un_map_dev_base_addr)
+			dev_ctxt->config.un_map_dev_base_addr();
+
+		if (dev_ctxt->config.deregister_isr)
+			dev_ctxt->config.deregister_isr();
+
+		if (done_create_timer && dev_ctxt->config.timer_release)
+			dev_ctxt->config.timer_release(dev_ctxt->
+				hw_timer_handle);
+
+	}
+
+	return rc;
+
+}
+
+static u32 vcd_init_in_not_init
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_init_config *config, s32 *driver_handle)
+{
+
+	VCD_MSG_LOW("vcd_init_in_dev_not_init:");
+
+	return vcd_init_cmn(drv_ctxt, config, driver_handle);
+
+}
+
+static u32 vcd_init_in_initing
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_init_config *config, s32 *driver_handle) {
+
+	VCD_MSG_LOW("vcd_init_in_dev_initing:");
+
+	return vcd_init_cmn(drv_ctxt, config, driver_handle);
+
+}
+
+static u32 vcd_init_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_init_config *config, s32 *driver_handle)
+{
+	VCD_MSG_LOW("vcd_init_in_dev_ready:");
+
+	return vcd_init_cmn(drv_ctxt, config, driver_handle);
+}
+
+static u32 vcd_term_cmn
+	(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+
+	if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) {
+		VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle);
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	if (vcd_check_for_client_context(dev_ctxt,
+				driver_handle - 1)) {
+		VCD_MSG_ERROR("Driver has active client");
+
+		return VCD_ERR_BAD_STATE;
+	}
+
+	--dev_ctxt->refs;
+	dev_ctxt->driver_ids[driver_handle - 1] = false;
+
+	VCD_MSG_HIGH("Driver_id %d terminated. No of driver instances = %d",
+			 driver_handle - 1, dev_ctxt->refs);
+
+	return VCD_S_SUCCESS;
+}
+
+static u32 vcd_term_in_not_init
+	(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_term_in_dev_not_init:");
+
+	rc = vcd_term_cmn(drv_ctxt, driver_handle);
+
+	if (!VCD_FAILED(rc) && !dev_ctxt->refs)
+		vcd_term_driver_context(drv_ctxt);
+
+	return rc;
+}
+
+static u32 vcd_term_in_initing
+	(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
+{
+	VCD_MSG_LOW("vcd_term_in_dev_initing:");
+
+	return vcd_term_cmn(drv_ctxt, driver_handle);
+}
+
+static u32 vcd_term_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
+{
+	VCD_MSG_LOW("vcd_term_in_dev_ready:");
+
+	return vcd_term_cmn(drv_ctxt, driver_handle);
+}
+
+static u32  vcd_term_in_invalid(struct vcd_drv_ctxt *drv_ctxt,
+							 s32  driver_handle)
+{
+	u32 rc;
+	VCD_MSG_LOW("vcd_term_in_invalid:");
+	rc = vcd_term_cmn(drv_ctxt, driver_handle);
+	if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.refs)
+		vcd_term_driver_context(drv_ctxt);
+
+	return rc;
+}
+
+static u32 vcd_open_cmn
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 s32 driver_handle,
+	 u32 decoding,
+	 void (*callback) (u32 event, u32 status, void *info, size_t sz,
+			   void *handle, void *const client_data),
+	 void *client_data, struct vcd_clnt_ctxt ** clnt_cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	struct vcd_clnt_ctxt *cctxt;
+	struct vcd_clnt_ctxt *client;
+
+	if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) {
+		VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle);
+
+		return VCD_ERR_BAD_HANDLE;
+	}
+
+	cctxt =	(struct vcd_clnt_ctxt *)
+		kmalloc(sizeof(struct vcd_clnt_ctxt), GFP_KERNEL);
+	if (!cctxt) {
+		VCD_MSG_ERROR("No memory for client ctxt");
+
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	memset(cctxt, 0, sizeof(struct vcd_clnt_ctxt));
+	cctxt->dev_ctxt = dev_ctxt;
+	cctxt->driver_id = driver_handle - 1;
+	cctxt->decoding = decoding;
+	cctxt->callback = callback;
+	cctxt->client_data = client_data;
+	cctxt->status.last_evt = VCD_EVT_RESP_OPEN;
+	INIT_LIST_HEAD(&cctxt->in_buf_pool.queue);
+	INIT_LIST_HEAD(&cctxt->out_buf_pool.queue);
+	client = dev_ctxt->cctxt_list_head;
+	dev_ctxt->cctxt_list_head = cctxt;
+	cctxt->next = client;
+
+	*clnt_cctxt = cctxt;
+
+	return VCD_S_SUCCESS;
+
+}
+
+static u32 vcd_open_in_not_init
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 s32 driver_handle,
+	 u32 decoding,
+	 void (*callback) (u32 event, u32 status, void *info, size_t sz,
+			   void *handle, void *const client_data),
+	 void *client_data)
+{
+	struct vcd_clnt_ctxt *cctxt;
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_open_in_dev_not_init:");
+
+	rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
+			  client_data, &cctxt);
+
+	VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn");
+
+	rc = vcd_init_device_context(drv_ctxt,
+					 DEVICE_STATE_EVENT_NUMBER(open));
+
+	if (VCD_FAILED(rc))
+		vcd_destroy_client_context(cctxt);
+
+	return rc;
+}
+
+static u32 vcd_open_in_initing(struct vcd_drv_ctxt *drv_ctxt,
+	 s32 driver_handle, u32 decoding,
+	 void (*callback) (u32 event, u32 status, void *info, size_t sz,
+			   void *handle, void *const client_data),
+	 void *client_data)
+{
+	struct vcd_clnt_ctxt *cctxt;
+
+	VCD_MSG_LOW("vcd_open_in_dev_initing:");
+
+	return vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
+				 client_data, &cctxt);
+}
+
+static u32 vcd_open_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 s32 driver_handle,
+	 u32 decoding,
+	 void (*callback) (u32 event, u32 status, void *info, size_t sz,
+			   void *handle, void *const client_data),
+	 void *client_data)
+{
+	struct vcd_clnt_ctxt *cctxt;
+	struct vcd_handle_container container;
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_open_in_dev_ready:");
+
+	rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
+			  client_data, &cctxt);
+
+	VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn");
+
+	rc = vcd_init_client_context(cctxt);
+
+	if (!VCD_FAILED(rc)) {
+		container.handle = (void *)cctxt;
+
+		callback(VCD_EVT_RESP_OPEN,
+			 VCD_S_SUCCESS,
+			 &container,
+			 sizeof(container), container.handle, client_data);
+	} else {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_init_client_context", rc);
+
+		vcd_destroy_client_context(cctxt);
+	}
+
+	return rc;
+}
+
+static u32 vcd_close_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_clnt_ctxt *cctxt) {
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_close_in_dev_ready:");
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.close) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+			close(cctxt);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+				  cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	if (!VCD_FAILED(rc))
+		vcd_handle_for_last_clnt_close(&drv_ctxt->dev_ctxt, true);
+
+	return rc;
+}
+
+static u32  vcd_close_in_dev_invalid(struct vcd_drv_ctxt *drv_ctxt,
+	struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc;
+	VCD_MSG_LOW("vcd_close_in_dev_invalid:");
+	if (cctxt->clnt_state.state_table->ev_hdlr.close) {
+		rc = cctxt->clnt_state.state_table->
+			ev_hdlr.close(cctxt);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+					  cctxt->clnt_state.state);
+		rc = VCD_ERR_BAD_STATE;
+	}
+	if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.
+		cctxt_list_head) {
+		VCD_MSG_HIGH("All INVALID clients are closed");
+		vcd_do_device_state_transition(drv_ctxt,
+			VCD_DEVICE_STATE_NOT_INIT,
+			DEVICE_STATE_EVENT_NUMBER(close));
+	}
+	return rc;
+}
+
+static u32 vcd_resume_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 struct vcd_clnt_ctxt *cctxt) {
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_resume_in_ready:");
+
+	if (cctxt->clnt_state.state_table->ev_hdlr.resume) {
+		rc = cctxt->clnt_state.state_table->ev_hdlr.
+			resume(cctxt);
+	} else {
+		VCD_MSG_ERROR("Unsupported API in client state %d",
+				  cctxt->clnt_state.state);
+
+		rc = VCD_ERR_BAD_STATE;
+	}
+
+	return rc;
+}
+
+static u32 vcd_set_dev_pwr_in_ready
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 enum vcd_power_state pwr_state)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+
+	VCD_MSG_LOW("vcd_set_dev_pwr_in_ready:");
+
+	switch (pwr_state) {
+	case VCD_PWR_STATE_SLEEP:
+		{
+			if (dev_ctxt->pwr_state == VCD_PWR_STATE_ON)
+				vcd_pause_all_sessions(dev_ctxt);
+			dev_ctxt->pwr_state = VCD_PWR_STATE_SLEEP;
+			break;
+		}
+
+	case VCD_PWR_STATE_ON:
+		{
+			if (dev_ctxt->pwr_state == VCD_PWR_STATE_SLEEP)
+				vcd_resume_all_sessions(dev_ctxt);
+			dev_ctxt->pwr_state = VCD_PWR_STATE_ON;
+			break;
+		}
+
+	default:
+		{
+			VCD_MSG_ERROR("Invalid power state requested %d",
+					  pwr_state);
+			break;
+		}
+
+	}
+
+	return rc;
+}
+
+static void vcd_dev_cb_in_initing
+	(struct vcd_drv_ctxt *drv_ctxt,
+	 u32 event,
+	 u32 status,
+	 void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
+{
+	struct vcd_dev_ctxt *dev_ctxt;
+	struct vcd_clnt_ctxt *client;
+	struct vcd_clnt_ctxt *tmp_client;
+	struct vcd_handle_container container;
+	u32 rc = VCD_S_SUCCESS;
+	u32 client_inited = false;
+	u32 fail_all_open = false;
+
+	VCD_MSG_LOW("vcd_dev_cb_in_initing:");
+
+	if (event != VCD_EVT_RESP_DEVICE_INIT) {
+		VCD_MSG_ERROR("vcd_dev_cb_in_initing: Unexpected event %d",
+				  (int)event);
+		return;
+	}
+
+	dev_ctxt = &drv_ctxt->dev_ctxt;
+
+	dev_ctxt->command_continue = false;
+
+	if (VCD_FAILED(status)) {
+		vcd_handle_device_init_failed(drv_ctxt, status);
+
+		return;
+	}
+
+	vcd_do_device_state_transition(drv_ctxt,
+					   VCD_DEVICE_STATE_READY,
+					   DEVICE_STATE_EVENT_NUMBER(open));
+
+	if (!dev_ctxt->cctxt_list_head) {
+		VCD_MSG_HIGH("All clients are closed");
+
+		dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM;
+
+		return;
+	}
+
+	if (!dev_ctxt->ddl_cmd_ch_depth
+		|| !dev_ctxt->trans_tbl)
+		rc = vcd_setup_with_ddl_capabilities(dev_ctxt);
+
+
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR
+			("rc = 0x%x: Failed vcd_setup_with_ddl_capabilities",
+			 rc);
+
+		fail_all_open = true;
+	}
+
+	client = dev_ctxt->cctxt_list_head;
+	while (client) {
+		if (!fail_all_open)
+			rc = vcd_init_client_context(client);
+
+
+		if (!VCD_FAILED(rc)) {
+			container.handle = (void *)client;
+			client->callback(VCD_EVT_RESP_OPEN,
+					   VCD_S_SUCCESS,
+					   &container,
+					   sizeof(container),
+					   container.handle,
+					   client->client_data);
+
+			client = client->next;
+
+			client_inited = true;
+		} else {
+			VCD_MSG_ERROR
+				("rc = 0x%x, Failed: vcd_init_client_context",
+				 rc);
+
+			client->callback(VCD_EVT_RESP_OPEN,
+					   rc,
+					   NULL, 0, 0, client->client_data);
+
+			tmp_client = client;
+			client = client->next;
+
+			vcd_destroy_client_context(tmp_client);
+		}
+	}
+
+	if (!client_inited || fail_all_open) {
+		VCD_MSG_ERROR("All client open requests failed");
+
+		dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM;
+	} else {
+		if (vcd_power_event(dev_ctxt, NULL,
+					 VCD_EVT_PWR_DEV_INIT_END)) {
+			VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_END failed");
+		}
+	}
+}
+
+static void  vcd_hw_timeout_cmn(struct vcd_drv_ctxt *drv_ctxt,
+							  void *user_data)
+{
+	struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
+	VCD_MSG_LOW("vcd_hw_timeout_cmn:");
+	vcd_device_timer_stop(dev_ctxt);
+
+	vcd_handle_device_err_fatal(dev_ctxt, NULL);
+
+	/* Reset HW. */
+	(void) vcd_reset_device_context(drv_ctxt,
+		DEVICE_STATE_EVENT_NUMBER(timeout));
+}
+
+static void vcd_dev_enter_null
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Entering DEVICE_STATE_NULL on api %d", state_event);
+
+}
+
+static void vcd_dev_enter_not_init
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Entering DEVICE_STATE_NOT_INIT on api %d",
+			state_event);
+
+}
+
+static void vcd_dev_enter_initing
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Entering DEVICE_STATE_INITING on api %d",
+			state_event);
+
+}
+
+static void vcd_dev_enter_ready
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Entering DEVICE_STATE_READY on api %d",
+			state_event);
+}
+
+static void vcd_dev_enter_invalid(struct vcd_drv_ctxt *drv_ctxt,
+							   s32 state_event)
+{
+   VCD_MSG_MED("Entering DEVICE_STATE_INVALID on api %d", state_event);
+}
+
+static void vcd_dev_exit_null
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting DEVICE_STATE_NULL on api %d", state_event);
+}
+
+static void vcd_dev_exit_not_init
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting DEVICE_STATE_NOT_INIT on api %d",
+			state_event);
+
+}
+
+static void vcd_dev_exit_initing
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting DEVICE_STATE_INITING on api %d",
+			state_event);
+}
+
+static void vcd_dev_exit_ready
+	(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
+	VCD_MSG_MED("Exiting DEVICE_STATE_READY on api %d", state_event);
+}
+
+static void vcd_dev_exit_invalid(struct vcd_drv_ctxt *drv_ctxt,
+							  s32 state_event)
+{
+   VCD_MSG_MED("Exiting DEVICE_STATE_INVALID on api %d", state_event);
+}
+
+static const struct vcd_dev_state_table vcd_dev_table_null = {
+	{
+	 vcd_init_in_null,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 },
+	vcd_dev_enter_null,
+	vcd_dev_exit_null
+};
+
+static const struct vcd_dev_state_table vcd_dev_table_not_init = {
+	{
+	 vcd_init_in_not_init,
+	 vcd_term_in_not_init,
+	 vcd_open_in_not_init,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 NULL,
+	 },
+	vcd_dev_enter_not_init,
+	vcd_dev_exit_not_init
+};
+
+static const struct vcd_dev_state_table vcd_dev_table_initing = {
+	{
+	 vcd_init_in_initing,
+	 vcd_term_in_initing,
+	 vcd_open_in_initing,
+	 NULL,
+	 NULL,
+	 NULL,
+	 vcd_dev_cb_in_initing,
+	 vcd_hw_timeout_cmn,
+	 },
+	vcd_dev_enter_initing,
+	vcd_dev_exit_initing
+};
+
+static const struct vcd_dev_state_table vcd_dev_table_ready = {
+	{
+	 vcd_init_in_ready,
+	 vcd_term_in_ready,
+	 vcd_open_in_ready,
+	 vcd_close_in_ready,
+	 vcd_resume_in_ready,
+	 vcd_set_dev_pwr_in_ready,
+	 NULL,
+	 vcd_hw_timeout_cmn,
+	 },
+	vcd_dev_enter_ready,
+	vcd_dev_exit_ready
+};
+
+static const struct vcd_dev_state_table vcd_dev_table_in_invalid = {
+	{
+		NULL,
+		vcd_term_in_invalid,
+		NULL,
+		vcd_close_in_dev_invalid,
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+	vcd_dev_enter_invalid,
+	vcd_dev_exit_invalid
+};
+
+static const struct vcd_dev_state_table *vcd_dev_state_table[] = {
+	&vcd_dev_table_null,
+	&vcd_dev_table_not_init,
+	&vcd_dev_table_initing,
+	&vcd_dev_table_ready,
+	&vcd_dev_table_in_invalid
+};
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.h b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.h
new file mode 100644
index 0000000..8245966
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DEVICE_SM_H_
+#define _VCD_DEVICE_SM_H_
+
+#include "vcd_api.h"
+#include "vcd_ddl_api.h"
+#include "vcd_core.h"
+
+struct vcd_dev_state_table;
+struct vcd_dev_state_ctxt;
+struct vcd_drv_ctxt;
+
+enum vcd_dev_state_enum {
+	VCD_DEVICE_STATE_NULL = 0,
+	VCD_DEVICE_STATE_NOT_INIT,
+	VCD_DEVICE_STATE_INITING,
+	VCD_DEVICE_STATE_READY,
+	VCD_DEVICE_STATE_INVALID,
+	VCD_DEVICE_STATE_MAX,
+	VCD_DEVICE_STATE_32BIT = 0x7FFFFFFF
+};
+
+struct vcd_dev_state_table {
+	struct {
+		u32(*init) (struct vcd_drv_ctxt *drv_ctxt,
+				struct vcd_init_config *config,
+				s32 *driver_handle);
+
+		u32(*term) (struct vcd_drv_ctxt *drv_ctxt,
+				s32 driver_handle);
+
+		u32(*open) (struct vcd_drv_ctxt *drv_ctxt,
+				s32 driver_handle, u32 decoding,
+				void (*callback) (u32 event, u32 status,
+					void *info, size_t sz, void *handle,
+					void *const client_data),
+				void *client_data);
+
+		u32(*close) (struct vcd_drv_ctxt *drv_ctxt,
+				struct vcd_clnt_ctxt *cctxt);
+
+		u32(*resume) (struct vcd_drv_ctxt *drv_ctxt,
+				struct vcd_clnt_ctxt *cctxt);
+
+		u32(*set_dev_pwr) (struct vcd_drv_ctxt *drv_ctxt,
+				enum vcd_power_state pwr_state);
+
+		void (*dev_cb) (struct vcd_drv_ctxt *drv_ctxt,
+				u32 event, u32 status, void *payload,
+				size_t sz, u32 *ddl_handle,
+				void *const client_data);
+
+		void (*timeout) (struct vcd_drv_ctxt *drv_ctxt,
+							void *user_data);
+	} ev_hdlr;
+
+	void (*entry) (struct vcd_drv_ctxt *drv_ctxt,
+			s32 state_event);
+	void (*exit) (struct vcd_drv_ctxt *drv_ctxt,
+			s32 state_event);
+};
+
+#define   DEVICE_STATE_EVENT_NUMBER(ppf) \
+	((u32 *) (&(((struct vcd_dev_state_table*)0)->ev_hdlr.ppf)) - \
+	(u32 *) (&(((struct vcd_dev_state_table*)0)->ev_hdlr.init)) \
+	+ 1)
+
+struct vcd_dev_state_ctxt {
+	const struct vcd_dev_state_table *state_table;
+
+	enum vcd_dev_state_enum state;
+};
+
+struct vcd_drv_ctxt {
+	struct vcd_dev_state_ctxt dev_state;
+	struct vcd_dev_ctxt dev_ctxt;
+	struct mutex dev_mutex;
+};
+
+
+extern struct vcd_drv_ctxt *vcd_get_drv_context(void);
+
+void vcd_continue(void);
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
new file mode 100644
index 0000000..fff6a3b
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
@@ -0,0 +1,351 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_power_sm.h"
+#include "vcd_core.h"
+#include "vcd.h"
+
+u32 vcd_power_event(
+	struct vcd_dev_ctxt *dev_ctxt,
+     struct vcd_clnt_ctxt *cctxt, u32 event)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_MED("Device power state = %d", dev_ctxt->pwr_clk_state);
+	VCD_MSG_MED("event = 0x%x", event);
+	switch (event) {
+
+	case VCD_EVT_PWR_DEV_INIT_BEGIN:
+	case VCD_EVT_PWR_DEV_INIT_END:
+	case VCD_EVT_PWR_DEV_INIT_FAIL:
+	case VCD_EVT_PWR_DEV_TERM_BEGIN:
+	case VCD_EVT_PWR_DEV_TERM_END:
+	case VCD_EVT_PWR_DEV_TERM_FAIL:
+	case VCD_EVT_PWR_DEV_SLEEP_BEGIN:
+	case VCD_EVT_PWR_DEV_SLEEP_END:
+	case VCD_EVT_PWR_DEV_SET_PERFLVL:
+	case VCD_EVT_PWR_DEV_HWTIMEOUT:
+		{
+			rc = vcd_device_power_event(dev_ctxt, event,
+				cctxt);
+			break;
+		}
+
+	case VCD_EVT_PWR_CLNT_CMD_BEGIN:
+	case VCD_EVT_PWR_CLNT_CMD_END:
+	case VCD_EVT_PWR_CLNT_CMD_FAIL:
+	case VCD_EVT_PWR_CLNT_PAUSE:
+	case VCD_EVT_PWR_CLNT_RESUME:
+	case VCD_EVT_PWR_CLNT_FIRST_FRAME:
+	case VCD_EVT_PWR_CLNT_LAST_FRAME:
+	case VCD_EVT_PWR_CLNT_ERRFATAL:
+		{
+			rc = vcd_client_power_event(dev_ctxt, cctxt, event);
+			break;
+		}
+
+	}
+
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("vcd_power_event: event 0x%x failed", event);
+
+
+	return rc;
+
+}
+
+u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event,
+	struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_ERR_FAIL;
+	u32 set_perf_lvl;
+
+	switch (event) {
+
+	case VCD_EVT_PWR_DEV_INIT_BEGIN:
+	{
+		if (dev_ctxt->pwr_clk_state ==
+			VCD_PWRCLK_STATE_OFF) {
+			if (res_trk_get_max_perf_level(&dev_ctxt->
+				max_perf_lvl)) {
+				if (res_trk_power_up()) {
+					dev_ctxt->pwr_clk_state =
+					VCD_PWRCLK_STATE_ON_NOTCLOCKED;
+					dev_ctxt->curr_perf_lvl = 0;
+					dev_ctxt->reqd_perf_lvl = 0;
+					dev_ctxt->active_clnts = 0;
+					dev_ctxt->
+						set_perf_lvl_pending = false;
+					rc = vcd_enable_clock(dev_ctxt,
+						cctxt);
+					if (VCD_FAILED(rc)) {
+						(void)res_trk_power_down();
+						dev_ctxt->pwr_clk_state =
+							VCD_PWRCLK_STATE_OFF;
+					}
+				}
+			}
+		}
+
+		break;
+	}
+
+	case VCD_EVT_PWR_DEV_INIT_END:
+	case VCD_EVT_PWR_DEV_TERM_FAIL:
+	case VCD_EVT_PWR_DEV_SLEEP_BEGIN:
+	case VCD_EVT_PWR_DEV_HWTIMEOUT:
+		{
+			rc = vcd_gate_clock(dev_ctxt);
+
+			break;
+		}
+
+	case VCD_EVT_PWR_DEV_INIT_FAIL:
+	case VCD_EVT_PWR_DEV_TERM_END:
+		{
+			if (dev_ctxt->pwr_clk_state !=
+				VCD_PWRCLK_STATE_OFF) {
+				(void)vcd_disable_clock(dev_ctxt);
+				(void)res_trk_power_down();
+
+				dev_ctxt->pwr_clk_state =
+				    VCD_PWRCLK_STATE_OFF;
+				dev_ctxt->curr_perf_lvl = 0;
+				dev_ctxt->reqd_perf_lvl = 0;
+				dev_ctxt->active_clnts = 0;
+				dev_ctxt->set_perf_lvl_pending = false;
+				rc = VCD_S_SUCCESS;
+			}
+
+			break;
+		}
+
+	case VCD_EVT_PWR_DEV_TERM_BEGIN:
+	case VCD_EVT_PWR_DEV_SLEEP_END:
+		{
+			rc = vcd_un_gate_clock(dev_ctxt);
+
+			break;
+		}
+
+	case VCD_EVT_PWR_DEV_SET_PERFLVL:
+		{
+			set_perf_lvl =
+			    dev_ctxt->reqd_perf_lvl >
+			    0 ? dev_ctxt->
+			    reqd_perf_lvl : VCD_MIN_PERF_LEVEL;
+			rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl);
+			break;
+		}
+	}
+	return rc;
+}
+
+u32 vcd_client_power_event(
+	struct vcd_dev_ctxt *dev_ctxt,
+    struct vcd_clnt_ctxt *cctxt, u32 event)
+{
+	u32 rc = VCD_ERR_FAIL;
+
+	switch (event) {
+
+	case VCD_EVT_PWR_CLNT_CMD_BEGIN:
+		{
+			rc = vcd_un_gate_clock(dev_ctxt);
+			break;
+		}
+
+	case VCD_EVT_PWR_CLNT_CMD_END:
+		{
+			rc = vcd_gate_clock(dev_ctxt);
+			break;
+		}
+
+	case VCD_EVT_PWR_CLNT_CMD_FAIL:
+		{
+			if (!vcd_core_is_busy(dev_ctxt))
+				rc = vcd_gate_clock(dev_ctxt);
+
+			break;
+		}
+
+	case VCD_EVT_PWR_CLNT_PAUSE:
+	case VCD_EVT_PWR_CLNT_LAST_FRAME:
+	case VCD_EVT_PWR_CLNT_ERRFATAL:
+		{
+			if (cctxt) {
+				rc = VCD_S_SUCCESS;
+				if (cctxt->status.req_perf_lvl) {
+					dev_ctxt->reqd_perf_lvl -=
+						cctxt->reqd_perf_lvl;
+					cctxt->status.req_perf_lvl = false;
+					rc = vcd_set_perf_level(dev_ctxt,
+						dev_ctxt->reqd_perf_lvl);
+				}
+			}
+
+			break;
+		}
+
+	case VCD_EVT_PWR_CLNT_RESUME:
+	case VCD_EVT_PWR_CLNT_FIRST_FRAME:
+		{
+			if (cctxt) {
+				rc = VCD_S_SUCCESS;
+				if (!cctxt->status.req_perf_lvl) {
+					dev_ctxt->reqd_perf_lvl +=
+						cctxt->reqd_perf_lvl;
+					cctxt->status.req_perf_lvl = true;
+
+					rc = vcd_set_perf_level(dev_ctxt,
+						dev_ctxt->reqd_perf_lvl);
+				}
+			}
+			break;
+		}
+	}
+
+	return rc;
+}
+
+u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	u32 set_perf_lvl;
+
+	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) {
+		VCD_MSG_ERROR("vcd_enable_clock(): Already in state "
+			"VCD_PWRCLK_STATE_OFF\n");
+		rc = VCD_ERR_FAIL;
+	} else if (dev_ctxt->pwr_clk_state ==
+		VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
+		set_perf_lvl =
+				dev_ctxt->reqd_perf_lvl >
+				0 ? dev_ctxt->
+				reqd_perf_lvl : VCD_MIN_PERF_LEVEL;
+		rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl);
+		if (!VCD_FAILED(rc)) {
+			if (res_trk_enable_clocks()) {
+				dev_ctxt->pwr_clk_state =
+					VCD_PWRCLK_STATE_ON_CLOCKED;
+			}
+		} else {
+			rc = VCD_ERR_FAIL;
+		}
+
+	}
+
+	if (!VCD_FAILED(rc))
+		dev_ctxt->active_clnts++;
+
+	return rc;
+}
+
+u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+
+	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) {
+		VCD_MSG_ERROR("vcd_disable_clock(): Already in state "
+			"VCD_PWRCLK_STATE_OFF\n");
+		rc = VCD_ERR_FAIL;
+	} else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED ||
+		dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) {
+		dev_ctxt->active_clnts--;
+
+		if (!dev_ctxt->active_clnts) {
+			if (!res_trk_disable_clocks())
+				rc = VCD_ERR_FAIL;
+
+			dev_ctxt->pwr_clk_state =
+			    VCD_PWRCLK_STATE_ON_NOTCLOCKED;
+			dev_ctxt->curr_perf_lvl = 0;
+		}
+	}
+
+	return rc;
+}
+
+u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (!vcd_core_is_busy(dev_ctxt)) {
+		if (res_trk_set_perf_level(perf_lvl,
+			&dev_ctxt->curr_perf_lvl, dev_ctxt)) {
+			dev_ctxt->set_perf_lvl_pending = false;
+		} else {
+			rc = VCD_ERR_FAIL;
+			dev_ctxt->set_perf_lvl_pending = true;
+		}
+
+	} else {
+		dev_ctxt->set_perf_lvl_pending = true;
+	}
+
+	return rc;
+}
+
+u32 vcd_update_clnt_perf_lvl(
+	struct vcd_clnt_ctxt *cctxt,
+     struct vcd_property_frame_rate *fps, u32 frm_p_units)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 new_perf_lvl;
+	new_perf_lvl =
+	    frm_p_units * fps->fps_numerator / fps->fps_denominator;
+	if (cctxt->status.req_perf_lvl) {
+		dev_ctxt->reqd_perf_lvl =
+		    dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl +
+		    new_perf_lvl;
+		rc = vcd_set_perf_level(cctxt->dev_ctxt,
+			dev_ctxt->reqd_perf_lvl);
+	}
+	cctxt->reqd_perf_lvl = new_perf_lvl;
+	return rc;
+}
+
+u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF ||
+		dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
+		VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__);
+		rc = VCD_ERR_FAIL;
+	} else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED)
+		rc = VCD_S_SUCCESS;
+	else if (res_trk_disable_clocks())
+		dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKGATED;
+	else
+		rc = VCD_ERR_FAIL;
+	return rc;
+}
+
+u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF ||
+		dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
+		VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__);
+		rc = VCD_ERR_FAIL;
+	} else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED)
+		rc = VCD_S_SUCCESS;
+	else if (res_trk_enable_clocks())
+		dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED;
+	else
+		rc = VCD_ERR_FAIL;
+	return rc;
+}
+
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.h b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.h
new file mode 100644
index 0000000..26ce019
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_POWERSM_H_
+#define _VCD_POWERSM_H_
+
+#define VCD_EVT_PWR_BASE                0x5000
+#define VCD_EVT_PWR_DEV_INIT_BEGIN      (VCD_EVT_PWR_BASE + 0x1)
+#define VCD_EVT_PWR_DEV_INIT_END        (VCD_EVT_PWR_BASE + 0x2)
+#define VCD_EVT_PWR_DEV_INIT_FAIL       (VCD_EVT_PWR_BASE + 0x3)
+#define VCD_EVT_PWR_DEV_TERM_BEGIN      (VCD_EVT_PWR_BASE + 0x4)
+#define VCD_EVT_PWR_DEV_TERM_END        (VCD_EVT_PWR_BASE + 0x5)
+#define VCD_EVT_PWR_DEV_TERM_FAIL       (VCD_EVT_PWR_BASE + 0x6)
+#define VCD_EVT_PWR_DEV_SLEEP_BEGIN     (VCD_EVT_PWR_BASE + 0x7)
+#define VCD_EVT_PWR_DEV_SLEEP_END       (VCD_EVT_PWR_BASE + 0x8)
+#define VCD_EVT_PWR_DEV_SET_PERFLVL     (VCD_EVT_PWR_BASE + 0x9)
+#define VCD_EVT_PWR_DEV_HWTIMEOUT       (VCD_EVT_PWR_BASE + 0xa)
+#define VCD_EVT_PWR_CLNT_CMD_BEGIN      (VCD_EVT_PWR_BASE + 0xb)
+#define VCD_EVT_PWR_CLNT_CMD_END        (VCD_EVT_PWR_BASE + 0xc)
+#define VCD_EVT_PWR_CLNT_CMD_FAIL       (VCD_EVT_PWR_BASE + 0xd)
+#define VCD_EVT_PWR_CLNT_PAUSE          (VCD_EVT_PWR_BASE + 0xe)
+#define VCD_EVT_PWR_CLNT_RESUME         (VCD_EVT_PWR_BASE + 0xf)
+#define VCD_EVT_PWR_CLNT_FIRST_FRAME    (VCD_EVT_PWR_BASE + 0x10)
+#define VCD_EVT_PWR_CLNT_LAST_FRAME     (VCD_EVT_PWR_BASE + 0x11)
+#define VCD_EVT_PWR_CLNT_ERRFATAL       (VCD_EVT_PWR_BASE + 0x12)
+
+enum vcd_pwr_clk_state {
+	VCD_PWRCLK_STATE_OFF = 0,
+	VCD_PWRCLK_STATE_ON_NOTCLOCKED,
+	VCD_PWRCLK_STATE_ON_CLOCKED,
+	VCD_PWRCLK_STATE_ON_CLOCKGATED
+};
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_property.h b/drivers/video/msm/vidc/common/vcd/vcd_property.h
new file mode 100644
index 0000000..2cb894e
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_property.h
@@ -0,0 +1,342 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_DRIVER_PROPERTY_H_
+#define _VCD_DRIVER_PROPERTY_H_
+
+#define VCD_START_BASE       0x0
+#define VCD_I_LIVE           (VCD_START_BASE + 0x1)
+#define VCD_I_CODEC          (VCD_START_BASE + 0x2)
+#define VCD_I_FRAME_SIZE     (VCD_START_BASE + 0x3)
+#define VCD_I_METADATA_ENABLE  (VCD_START_BASE + 0x4)
+#define VCD_I_METADATA_HEADER  (VCD_START_BASE + 0x5)
+#define VCD_I_PROFILE        (VCD_START_BASE + 0x6)
+#define VCD_I_LEVEL          (VCD_START_BASE + 0x7)
+#define VCD_I_BUFFER_FORMAT  (VCD_START_BASE + 0x8)
+#define VCD_I_FRAME_RATE  (VCD_START_BASE + 0x9)
+#define VCD_I_TARGET_BITRATE (VCD_START_BASE + 0xA)
+#define VCD_I_MULTI_SLICE    (VCD_START_BASE + 0xB)
+#define VCD_I_ENTROPY_CTRL   (VCD_START_BASE + 0xC)
+#define VCD_I_DEBLOCKING     (VCD_START_BASE + 0xD)
+#define VCD_I_RATE_CONTROL   (VCD_START_BASE + 0xE)
+#define VCD_I_QP_RANGE      (VCD_START_BASE + 0xF)
+#define VCD_I_SESSION_QP    (VCD_START_BASE + 0x10)
+#define VCD_I_INTRA_PERIOD   (VCD_START_BASE + 0x11)
+#define VCD_I_VOP_TIMING     (VCD_START_BASE + 0x12)
+#define VCD_I_SHORT_HEADER   (VCD_START_BASE + 0x13)
+#define VCD_I_SEQ_HEADER    (VCD_START_BASE + 0x14)
+#define VCD_I_HEADER_EXTENSION   (VCD_START_BASE + 0x15)
+#define VCD_I_INTRA_REFRESH  (VCD_START_BASE + 0x16)
+#define VCD_I_POST_FILTER    (VCD_START_BASE + 0x17)
+#define VCD_I_PROGRESSIVE_ONLY (VCD_START_BASE + 0x18)
+#define VCD_I_OUTPUT_ORDER (VCD_START_BASE + 0x19)
+#define VCD_I_RECON_BUFFERS   (VCD_START_BASE + 0x1A)
+#define VCD_I_FREE_RECON_BUFFERS   (VCD_START_BASE + 0x1B)
+#define VCD_I_GET_RECON_BUFFER_SIZE   (VCD_START_BASE + 0x1C)
+#define VCD_I_H264_MV_BUFFER   (VCD_START_BASE + 0x1D)
+#define VCD_I_FREE_H264_MV_BUFFER (VCD_START_BASE + 0x1E)
+#define VCD_I_GET_H264_MV_SIZE (VCD_START_BASE + 0x1F)
+#define VCD_I_DEC_PICTYPE (VCD_START_BASE + 0x20)
+#define VCD_I_CONT_ON_RECONFIG (VCD_START_BASE + 0x21)
+
+#define VCD_START_REQ      (VCD_START_BASE + 0x1000)
+#define VCD_I_REQ_IFRAME   (VCD_START_REQ + 0x1)
+
+#define VCD_I_RESERVED_BASE  (VCD_START_BASE + 0x10000)
+
+struct vcd_property_hdr {
+	u32    prop_id;
+	size_t sz;
+};
+
+struct vcd_property_live {
+	u32             live;
+};
+
+enum vcd_codec {
+	VCD_CODEC_H264      = 0x1,
+	VCD_CODEC_H263      = 0x2,
+	VCD_CODEC_MPEG1     = 0x3,
+	VCD_CODEC_MPEG2     = 0x4,
+	VCD_CODEC_MPEG4     = 0x5,
+	VCD_CODEC_DIVX_3    = 0x6,
+	VCD_CODEC_DIVX_4    = 0x7,
+	VCD_CODEC_DIVX_5    = 0x8,
+	VCD_CODEC_DIVX_6    = 0x9,
+	VCD_CODEC_XVID      = 0xA,
+	VCD_CODEC_VC1       = 0xB,
+	VCD_CODEC_VC1_RCV   = 0xC
+};
+
+struct vcd_property_codec {
+	enum vcd_codec       codec;
+};
+
+struct vcd_property_frame_size {
+	u32              width;
+	u32              height;
+	u32              stride;
+	u32              scan_lines;
+};
+
+
+#define VCD_METADATA_DATANONE       0x001
+#define VCD_METADATA_QCOMFILLER     0x002
+#define VCD_METADATA_QPARRAY        0x004
+#define VCD_METADATA_CONCEALMB      0x008
+#define VCD_METADATA_SEI            0x010
+#define VCD_METADATA_VUI            0x020
+#define VCD_METADATA_VC1            0x040
+#define VCD_METADATA_PASSTHROUGH    0x080
+#define VCD_METADATA_ENC_SLICE      0x100
+
+struct vcd_property_meta_data_enable {
+	u32 meta_data_enable_flag;
+};
+
+struct vcd_property_metadata_hdr {
+	u32 meta_data_id;
+	u32 version;
+	u32 port_index;
+	u32 type;
+};
+
+struct vcd_property_frame_rate {
+	u32              fps_denominator;
+	u32              fps_numerator;
+};
+
+struct vcd_property_target_bitrate {
+	u32             target_bitrate;
+};
+
+enum vcd_yuv_buffer_format {
+	VCD_BUFFER_FORMAT_NV12      = 0x1,
+	VCD_BUFFER_FORMAT_TILE_4x2    = 0x2,
+	VCD_BUFFER_FORMAT_NV12_16M2KA = 0x3,
+	VCD_BUFFER_FORMAT_TILE_1x1    = 0x4
+};
+
+struct vcd_property_buffer_format {
+	enum vcd_yuv_buffer_format  buffer_format;
+};
+
+struct vcd_property_post_filter {
+	u32           post_filter;
+};
+
+enum vcd_codec_profile {
+	VCD_PROFILE_UNKNOWN       = 0x0,
+	VCD_PROFILE_MPEG4_SP      = 0x1,
+	VCD_PROFILE_MPEG4_ASP     = 0x2,
+	VCD_PROFILE_H264_BASELINE = 0x3,
+	VCD_PROFILE_H264_MAIN     = 0x4,
+	VCD_PROFILE_H264_HIGH     = 0x5,
+	VCD_PROFILE_H263_BASELINE = 0x6,
+	VCD_PROFILE_VC1_SIMPLE    = 0x7,
+	VCD_PROFILE_VC1_MAIN      = 0x8,
+	VCD_PROFILE_VC1_ADVANCE   = 0x9,
+	VCD_PROFILE_MPEG2_MAIN    = 0xA,
+	VCD_PROFILE_MPEG2_SIMPLE  = 0xB
+};
+
+struct vcd_property_profile {
+	enum vcd_codec_profile       profile;
+};
+
+enum vcd_codec_level {
+   VCD_LEVEL_UNKNOWN       = 0x0,
+   VCD_LEVEL_MPEG4_0       = 0x1,
+   VCD_LEVEL_MPEG4_0b      = 0x2,
+   VCD_LEVEL_MPEG4_1       = 0x3,
+   VCD_LEVEL_MPEG4_2       = 0x4,
+   VCD_LEVEL_MPEG4_3       = 0x5,
+   VCD_LEVEL_MPEG4_3b      = 0x6,
+   VCD_LEVEL_MPEG4_4       = 0x7,
+   VCD_LEVEL_MPEG4_4a      = 0x8,
+   VCD_LEVEL_MPEG4_5       = 0x9,
+   VCD_LEVEL_MPEG4_6       = 0xA,
+   VCD_LEVEL_MPEG4_7       = 0xB,
+   VCD_LEVEL_MPEG4_X       = 0xC,
+   VCD_LEVEL_H264_1        = 0x10,
+   VCD_LEVEL_H264_1b       = 0x11,
+   VCD_LEVEL_H264_1p1      = 0x12,
+   VCD_LEVEL_H264_1p2      = 0x13,
+   VCD_LEVEL_H264_1p3      = 0x14,
+   VCD_LEVEL_H264_2        = 0x15,
+   VCD_LEVEL_H264_2p1      = 0x16,
+   VCD_LEVEL_H264_2p2      = 0x17,
+   VCD_LEVEL_H264_3        = 0x18,
+   VCD_LEVEL_H264_3p1      = 0x19,
+   VCD_LEVEL_H264_3p2      = 0x1A,
+   VCD_LEVEL_H264_4        = 0x1B,
+   VCD_LEVEL_H264_4p1      = 0x1C,
+   VCD_LEVEL_H264_4p2      = 0x1D,
+   VCD_LEVEL_H264_5        = 0x1E,
+   VCD_LEVEL_H264_5p1      = 0x1F,
+   VCD_LEVEL_H263_10       = 0x20,
+   VCD_LEVEL_H263_20       = 0x21,
+   VCD_LEVEL_H263_30       = 0x22,
+   VCD_LEVEL_H263_40       = 0x23,
+   VCD_LEVEL_H263_45       = 0x24,
+   VCD_LEVEL_H263_50       = 0x25,
+   VCD_LEVEL_H263_60       = 0x26,
+   VCD_LEVEL_H263_70       = 0x27,
+   VCD_LEVEL_H263_X        = 0x28,
+   VCD_LEVEL_MPEG2_LOW     = 0x30,
+   VCD_LEVEL_MPEG2_MAIN    = 0x31,
+   VCD_LEVEL_MPEG2_HIGH_14 = 0x32,
+   VCD_LEVEL_MPEG2_HIGH    = 0x33,
+   VCD_LEVEL_MPEG2_X       = 0x34,
+   VCD_LEVEL_VC1_S_LOW     = 0x40,
+   VCD_LEVEL_VC1_S_MEDIUM  = 0x41,
+   VCD_LEVEL_VC1_M_LOW     = 0x42,
+   VCD_LEVEL_VC1_M_MEDIUM  = 0x43,
+   VCD_LEVEL_VC1_M_HIGH    = 0x44,
+   VCD_LEVEL_VC1_A_0       = 0x45,
+   VCD_LEVEL_VC1_A_1       = 0x46,
+   VCD_LEVEL_VC1_A_2       = 0x47,
+   VCD_LEVEL_VC1_A_3       = 0x48,
+   VCD_LEVEL_VC1_A_4       = 0x49,
+   VCD_LEVEL_VC1_X         = 0x4A
+};
+
+struct vcd_property_level {
+	enum vcd_codec_level   level;
+};
+
+enum vcd_m_slice_sel {
+	VCD_MSLICE_OFF             = 0x1,
+	VCD_MSLICE_BY_MB_COUNT     = 0x2,
+	VCD_MSLICE_BY_BYTE_COUNT   = 0x3,
+	VCD_MSLICE_BY_GOB          = 0x4
+};
+
+struct vcd_property_multi_slice {
+	enum vcd_m_slice_sel   m_slice_sel;
+	u32             m_slice_size;
+};
+
+enum vcd_entropy_sel {
+	VCD_ENTROPY_SEL_CAVLC = 0x1,
+	VCD_ENTROPY_SEL_CABAC = 0x2
+};
+
+enum vcd_cabac_model {
+	VCD_CABAC_MODEL_NUMBER_0 = 0x1,
+	VCD_CABAC_MODEL_NUMBER_1 = 0x2,
+	VCD_CABAC_MODEL_NUMBER_2 = 0x3
+};
+
+struct vcd_property_entropy_control {
+	enum vcd_entropy_sel  entropy_sel;
+	enum vcd_cabac_model  cabac_model;
+};
+
+enum vcd_db_config {
+	VCD_DB_ALL_BLOCKING_BOUNDARY = 0x1,
+	VCD_DB_DISABLE               = 0x2,
+	VCD_DB_SKIP_SLICE_BOUNDARY   = 0x3
+};
+struct vcd_property_db_config {
+	enum vcd_db_config    db_config;
+	u32             slice_alpha_offset;
+	u32             slice_beta_offset;
+};
+
+enum vcd_rate_control {
+	VCD_RATE_CONTROL_OFF      = 0x1,
+	VCD_RATE_CONTROL_VBR_VFR  = 0x2,
+	VCD_RATE_CONTROL_VBR_CFR  = 0x3,
+	VCD_RATE_CONTROL_CBR_VFR  = 0x4,
+	VCD_RATE_CONTROL_CBR_CFR  = 0x5
+};
+
+struct vcd_property_rate_control {
+	enum vcd_rate_control     rate_control;
+};
+
+struct vcd_property_qp_range {
+	u32              max_qp;
+	u32              min_qp;
+};
+
+struct vcd_property_session_qp {
+	u32 i_frame_qp;
+	u32 p_frame_qp;
+	u32	b_frame_qp;
+};
+
+struct vcd_property_i_period {
+	u32 p_frames;
+	u32 b_frames;
+};
+
+struct vcd_property_vop_timing {
+	u32   vop_time_resolution;
+};
+
+struct vcd_property_short_header {
+	u32             short_header;
+};
+
+struct vcd_property_intra_refresh_mb_number {
+	u32            cir_mb_number;
+};
+
+struct vcd_property_req_i_frame {
+	u32        req_i_frame;
+};
+
+struct vcd_frame_rect{
+   u32   left;
+   u32   top;
+   u32   right;
+   u32   bottom;
+};
+
+struct vcd_property_dec_output_buffer {
+	struct vcd_frame_rect   disp_frm;
+	struct vcd_property_frame_size frm_size;
+};
+
+enum vcd_output_order {
+   VCD_DEC_ORDER_DISPLAY  = 0x0,
+   VCD_DEC_ORDER_DECODE   = 0x1
+};
+
+struct vcd_property_enc_recon_buffer{
+	u8 *kernel_virtual_addr;
+	u8 *physical_addr;
+	u32 buffer_size;
+	u32 ysize;
+	int pmem_fd;
+	u32 offset;
+};
+
+struct vcd_property_h264_mv_buffer{
+	u8 *kernel_virtual_addr;
+	u8 *physical_addr;
+	u32 size;
+	u32 count;
+	int pmem_fd;
+	u32 offset;
+};
+
+struct vcd_property_buffer_size{
+	int width;
+	int height;
+	int size;
+	int alignment;
+};
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c
new file mode 100644
index 0000000..34a3445
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c
@@ -0,0 +1,286 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd.h"
+
+#define NORMALIZATION_FACTOR 3600
+#define ADJUST_CLIENT_ROUNDS(client, round_adjustment) \
+do {\
+	if ((client)->rounds < round_adjustment) {\
+		(client)->rounds = 0;\
+		VCD_MSG_HIGH("%s(): WARNING: Scheduler list unsorted",\
+			__func__);\
+	} else\
+		(client)->rounds -= round_adjustment;\
+} while (0)
+
+u32 vcd_sched_create(struct list_head *sched_list)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (!sched_list) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else
+		INIT_LIST_HEAD(sched_list);
+	return rc;
+}
+
+void vcd_sched_destroy(struct list_head *sched_clnt_list)
+{
+	struct vcd_sched_clnt_ctx *sched_clnt, *sched_clnt_next;
+	if (sched_clnt_list)
+		list_for_each_entry_safe(sched_clnt,
+			sched_clnt_next, sched_clnt_list, list) {
+			list_del_init(&sched_clnt->list);
+			sched_clnt->clnt_active = false;
+		}
+}
+
+void insert_client_in_list(struct list_head *sched_clnt_list,
+	struct vcd_sched_clnt_ctx *sched_new_clnt, bool tail)
+{
+	struct vcd_sched_clnt_ctx *sched_clnt;
+	if (!list_empty(sched_clnt_list)) {
+		if (tail)
+			sched_clnt = list_entry(sched_clnt_list->prev,
+				struct vcd_sched_clnt_ctx, list);
+		else
+			sched_clnt = list_first_entry(sched_clnt_list,
+				struct vcd_sched_clnt_ctx, list);
+		sched_new_clnt->rounds = sched_clnt->rounds;
+	} else
+		sched_new_clnt->rounds = 0;
+	if (tail)
+		list_add_tail(&sched_new_clnt->list, sched_clnt_list);
+	else
+		list_add(&sched_new_clnt->list, sched_clnt_list);
+}
+
+u32 vcd_sched_add_client(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_property_hdr prop_hdr;
+	struct vcd_sched_clnt_ctx *sched_cctxt;
+	u32 rc = VCD_S_SUCCESS;
+	if (!cctxt) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else if (cctxt->sched_clnt_hdl)
+		VCD_MSG_HIGH(
+			"%s(): Scheduler client already exists!", __func__);
+	else {
+		sched_cctxt = (struct vcd_sched_clnt_ctx *)
+			kmalloc(sizeof(struct vcd_sched_clnt_ctx),
+					GFP_KERNEL);
+		if (sched_cctxt) {
+
+			prop_hdr.prop_id = DDL_I_FRAME_PROC_UNITS;
+			prop_hdr.sz = sizeof(cctxt->frm_p_units);
+			rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr,
+						  &cctxt->frm_p_units);
+			VCD_FAILED_RETURN(rc,
+				"Failed: Get DDL_I_FRAME_PROC_UNITS");
+			if (cctxt->decoding) {
+				cctxt->frm_rate.fps_numerator =
+					VCD_DEC_INITIAL_FRAME_RATE;
+				cctxt->frm_rate.fps_denominator = 1;
+			} else {
+				prop_hdr.prop_id = VCD_I_FRAME_RATE;
+				prop_hdr.sz = sizeof(cctxt->frm_rate);
+				rc = ddl_get_property(cctxt->ddl_handle,
+						&prop_hdr, &cctxt->frm_rate);
+				VCD_FAILED_RETURN(rc,
+					"Failed: Get VCD_I_FRAME_RATE");
+			}
+			cctxt->reqd_perf_lvl = cctxt->frm_p_units *
+				cctxt->frm_rate.fps_numerator /
+				cctxt->frm_rate.fps_denominator;
+
+			cctxt->sched_clnt_hdl = sched_cctxt;
+			memset(sched_cctxt, 0,
+				sizeof(struct vcd_sched_clnt_ctx));
+			sched_cctxt->tkns = 0;
+			sched_cctxt->round_perfrm = NORMALIZATION_FACTOR *
+				cctxt->frm_rate.fps_denominator /
+				cctxt->frm_rate.fps_numerator;
+			sched_cctxt->clnt_active = true;
+			sched_cctxt->clnt_data = cctxt;
+			INIT_LIST_HEAD(&sched_cctxt->ip_frm_list);
+
+			insert_client_in_list(
+				&cctxt->dev_ctxt->sched_clnt_list,
+				sched_cctxt, false);
+		}
+	}
+	return rc;
+}
+
+u32 vcd_sched_remove_client(struct vcd_sched_clnt_ctx *sched_cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_clnt_ctxt *cctxt;
+	if (!sched_cctxt) {
+		VCD_MSG_ERROR("%s(): Invalid handle ptr", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else if (!list_empty(&sched_cctxt->ip_frm_list)) {
+		VCD_MSG_ERROR(
+			"%s(): Cannot remove client, queue no empty", __func__);
+		rc = VCD_ERR_ILLEGAL_OP;
+	} else {
+		cctxt = sched_cctxt->clnt_data;
+		list_del(&sched_cctxt->list);
+		memset(sched_cctxt, 0,
+			sizeof(struct vcd_sched_clnt_ctx));
+		kfree(sched_cctxt);
+	}
+	return rc;
+}
+
+u32 vcd_sched_update_config(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (!cctxt || !cctxt->sched_clnt_hdl) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else {
+		cctxt->sched_clnt_hdl->rounds /=
+			cctxt->sched_clnt_hdl->round_perfrm;
+		cctxt->sched_clnt_hdl->round_perfrm =
+			NORMALIZATION_FACTOR *
+			cctxt->frm_rate.fps_denominator /
+			cctxt->frm_rate.fps_numerator;
+		cctxt->sched_clnt_hdl->rounds *=
+			cctxt->sched_clnt_hdl->round_perfrm;
+	}
+	return rc;
+}
+
+u32 vcd_sched_queue_buffer(
+	struct vcd_sched_clnt_ctx *sched_cctxt,
+	struct vcd_buffer_entry *buffer, u32 tail)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (!sched_cctxt || !buffer) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else if (tail)
+		list_add_tail(&buffer->sched_list,
+				&sched_cctxt->ip_frm_list);
+	else
+		list_add(&buffer->sched_list, &sched_cctxt->ip_frm_list);
+	return rc;
+}
+
+u32 vcd_sched_dequeue_buffer(
+	struct vcd_sched_clnt_ctx *sched_cctxt,
+	struct vcd_buffer_entry **buffer)
+{
+	u32 rc = VCD_ERR_QEMPTY;
+	if (!sched_cctxt || !buffer) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else {
+		*buffer = NULL;
+		if (!list_empty(&sched_cctxt->ip_frm_list)) {
+			*buffer = list_first_entry(
+					&sched_cctxt->ip_frm_list,
+					struct vcd_buffer_entry,
+					sched_list);
+			list_del(&(*buffer)->sched_list);
+			rc = VCD_S_SUCCESS;
+		}
+	}
+	return rc;
+}
+
+u32 vcd_sched_mark_client_eof(struct vcd_sched_clnt_ctx *sched_cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_entry *buffer = NULL;
+	if (!sched_cctxt) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else if (!list_empty(&sched_cctxt->ip_frm_list)) {
+		buffer = list_entry(sched_cctxt->ip_frm_list.prev,
+			struct vcd_buffer_entry, sched_list);
+		buffer->frame.flags |= VCD_FRAME_FLAG_EOS;
+	} else
+		rc = VCD_ERR_QEMPTY;
+	return rc;
+}
+
+u32 vcd_sched_suspend_resume_clnt(
+	struct vcd_clnt_ctxt *cctxt, u32 state)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_sched_clnt_ctx *sched_cctxt;
+	if (!cctxt || !cctxt->sched_clnt_hdl) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else {
+		sched_cctxt = cctxt->sched_clnt_hdl;
+		if (state != sched_cctxt->clnt_active) {
+			sched_cctxt->clnt_active = state;
+			if (state)
+				insert_client_in_list(&cctxt->dev_ctxt->\
+					sched_clnt_list, sched_cctxt, false);
+			else
+				list_del_init(&sched_cctxt->list);
+		}
+	}
+	return rc;
+}
+
+u32 vcd_sched_get_client_frame(struct list_head *sched_clnt_list,
+	struct vcd_clnt_ctxt **cctxt,
+	struct vcd_buffer_entry **buffer)
+{
+	u32 rc = VCD_ERR_QEMPTY, round_adjustment = 0;
+	struct vcd_sched_clnt_ctx *sched_clnt, *clnt_nxt;
+	if (!sched_clnt_list || !cctxt || !buffer) {
+		VCD_MSG_ERROR("%s(): Invalid parameter", __func__);
+		rc = VCD_ERR_ILLEGAL_PARM;
+	} else if (!list_empty(sched_clnt_list)) {
+		*cctxt = NULL;
+		*buffer = NULL;
+		list_for_each_entry_safe(sched_clnt,
+			clnt_nxt, sched_clnt_list, list) {
+			if (&sched_clnt->list == sched_clnt_list->next)
+				round_adjustment = sched_clnt->rounds;
+			if (*cctxt) {
+				if ((*cctxt)->sched_clnt_hdl->rounds >=
+					sched_clnt->rounds)
+					list_move(&(*cctxt)->sched_clnt_hdl\
+						->list, &sched_clnt->list);
+				ADJUST_CLIENT_ROUNDS(sched_clnt,
+					round_adjustment);
+			} else if (sched_clnt->tkns &&
+				!list_empty(&sched_clnt->ip_frm_list)) {
+				*cctxt = sched_clnt->clnt_data;
+				sched_clnt->rounds += sched_clnt->round_perfrm;
+			} else
+				ADJUST_CLIENT_ROUNDS(sched_clnt,
+						round_adjustment);
+		}
+		if (*cctxt) {
+			rc = vcd_sched_dequeue_buffer(
+				(*cctxt)->sched_clnt_hdl, buffer);
+			if (rc == VCD_S_SUCCESS) {
+				(*cctxt)->sched_clnt_hdl->tkns--;
+				ADJUST_CLIENT_ROUNDS((*cctxt)->\
+					sched_clnt_hdl, round_adjustment);
+			}
+		}
+	}
+	return rc;
+}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_status.h b/drivers/video/msm/vidc/common/vcd/vcd_status.h
new file mode 100644
index 0000000..807718f
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_status.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VCD_ERR_STATUS_H_
+#define _VCD_ERR_STATUS_H_
+
+#define VCD_EVT_RESP_BASE                 0x1000
+#define VCD_EVT_RESP_OPEN                 (VCD_EVT_RESP_BASE + 0x1)
+#define VCD_EVT_RESP_START                (VCD_EVT_RESP_BASE + 0x2)
+#define VCD_EVT_RESP_STOP                 (VCD_EVT_RESP_BASE + 0x3)
+#define VCD_EVT_RESP_PAUSE                (VCD_EVT_RESP_BASE + 0x4)
+#define VCD_EVT_RESP_FLUSH_INPUT_DONE     (VCD_EVT_RESP_BASE + 0x5)
+#define VCD_EVT_RESP_FLUSH_OUTPUT_DONE    (VCD_EVT_RESP_BASE + 0x6)
+#define VCD_EVT_RESP_INPUT_FLUSHED        (VCD_EVT_RESP_BASE + 0x7)
+#define VCD_EVT_RESP_OUTPUT_FLUSHED       (VCD_EVT_RESP_BASE + 0x8)
+#define VCD_EVT_RESP_INPUT_DONE           (VCD_EVT_RESP_BASE + 0x9)
+#define VCD_EVT_RESP_OUTPUT_DONE          (VCD_EVT_RESP_BASE + 0xa)
+
+#define VCD_EVT_IND_BASE                  0x2000
+#define VCD_EVT_IND_INPUT_RECONFIG        (VCD_EVT_IND_BASE + 0x1)
+#define VCD_EVT_IND_OUTPUT_RECONFIG       (VCD_EVT_IND_BASE + 0x2)
+#define VCD_EVT_IND_HWERRFATAL            (VCD_EVT_IND_BASE + 0x3)
+#define VCD_EVT_IND_RESOURCES_LOST        (VCD_EVT_IND_BASE + 0x4)
+#define VCD_EVT_IND_INFO_OUTPUT_RECONFIG  (VCD_EVT_IND_BASE + 0x5)
+
+#define VCD_S_SUCCESS           0x0
+
+#define VCD_S_ERR_BASE                    0x80000000
+#define VCD_ERR_FAIL                      (VCD_S_ERR_BASE + 0x01)
+#define VCD_ERR_ALLOC_FAIL                (VCD_S_ERR_BASE + 0x02)
+#define VCD_ERR_ILLEGAL_OP                (VCD_S_ERR_BASE + 0x03)
+#define VCD_ERR_ILLEGAL_PARM              (VCD_S_ERR_BASE + 0x04)
+#define VCD_ERR_BAD_POINTER               (VCD_S_ERR_BASE + 0x05)
+#define VCD_ERR_BAD_HANDLE                (VCD_S_ERR_BASE + 0x06)
+#define VCD_ERR_NOT_SUPPORTED             (VCD_S_ERR_BASE + 0x07)
+#define VCD_ERR_BAD_STATE                 (VCD_S_ERR_BASE + 0x08)
+#define VCD_ERR_BUSY                      (VCD_S_ERR_BASE + 0x09)
+#define VCD_ERR_MAX_CLIENT                (VCD_S_ERR_BASE + 0x0a)
+#define VCD_ERR_IFRAME_EXPECTED           (VCD_S_ERR_BASE + 0x0b)
+#define VCD_ERR_INTRLCD_FIELD_DROP        (VCD_S_ERR_BASE + 0x0c)
+#define VCD_ERR_HW_FATAL                  (VCD_S_ERR_BASE + 0x0d)
+#define VCD_ERR_BITSTREAM_ERR             (VCD_S_ERR_BASE + 0x0e)
+#define VCD_ERR_QEMPTY                    (VCD_S_ERR_BASE + 0x0f)
+#define VCD_ERR_SEQHDR_PARSE_FAIL         (VCD_S_ERR_BASE + 0x10)
+#define VCD_ERR_INPUT_NOT_PROCESSED       (VCD_S_ERR_BASE + 0x11)
+#define VCD_ERR_INDEX_NOMORE              (VCD_S_ERR_BASE + 0x12)
+
+#define VCD_FAILED(rc)   ((rc > VCD_S_ERR_BASE) ? true : false)
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
new file mode 100644
index 0000000..6bc591b
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -0,0 +1,3057 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/div64.h>
+
+#include "vidc_type.h"
+#include "vcd.h"
+#include "vdec_internal.h"
+#include <linux/memory_alloc.h>
+
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+
+static int vcd_pmem_alloc(size_t sz, u8 **kernel_vaddr, u8 **phy_addr)
+{
+	u32 memtype;
+
+	if (!kernel_vaddr || !phy_addr) {
+		pr_err("\n%s: Invalid parameters", __func__);
+		return -ENOMEM;
+	}
+	memtype = res_trk_get_mem_type();
+	*phy_addr = (u8 *) allocate_contiguous_memory_nomap(sz,
+					memtype, SZ_4K);
+	if (!*phy_addr) {
+		*kernel_vaddr = ioremap((unsigned long)*phy_addr, sz);
+
+		if (!*kernel_vaddr) {
+			pr_err("%s: could not ioremap in kernel pmem buffers\n",
+			       __func__);
+			free_contiguous_memory_by_paddr(
+				(unsigned long) *phy_addr);
+			*phy_addr = NULL;
+			return -ENOMEM;
+		}
+		pr_debug("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+			 (u32) *phy_addr, (u32) *kernel_vaddr);
+		return 0;
+	} else {
+		pr_err("%s: could not allocte in kernel pmem buffers\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+}
+
+static int vcd_pmem_free(u8 *kernel_vaddr, u8 *phy_addr)
+{
+	if (kernel_vaddr)
+		iounmap((void *)kernel_vaddr);
+	if (phy_addr)
+		free_contiguous_memory_by_paddr((unsigned long)phy_addr);
+	kernel_vaddr = NULL;
+	phy_addr = NULL;
+	return 0;
+}
+
+u8 *vcd_pmem_get_physical(struct video_client_ctx *client_ctx,
+			  unsigned long kernel_vaddr)
+{
+	unsigned long phy_addr, user_vaddr;
+	int pmem_fd;
+	struct file *file;
+	s32 buffer_index = -1;
+
+	if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT,
+					  false, &user_vaddr, &kernel_vaddr,
+					  &phy_addr, &pmem_fd, &file,
+					  &buffer_index)) {
+
+		return (u8 *) phy_addr;
+	} else if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+		false, &user_vaddr, &kernel_vaddr, &phy_addr, &pmem_fd, &file,
+		&buffer_index)) {
+		return (u8 *) phy_addr;
+	} else {
+		VCD_MSG_ERROR("Couldn't get physical address");
+
+		return NULL;
+	}
+
+}
+
+void vcd_reset_device_channels(struct vcd_dev_ctxt *dev_ctxt)
+{
+	dev_ctxt->ddl_frame_ch_free = dev_ctxt->ddl_frame_ch_depth;
+	dev_ctxt->ddl_cmd_ch_free   = dev_ctxt->ddl_cmd_ch_depth;
+	dev_ctxt->ddl_frame_ch_interim = 0;
+	dev_ctxt->ddl_cmd_ch_interim = 0;
+}
+
+u32 vcd_get_command_channel(
+	struct vcd_dev_ctxt *dev_ctxt,
+	 struct vcd_transc **transc)
+{
+	u32 result = false;
+
+	*transc = NULL;
+
+	if (dev_ctxt->ddl_cmd_ch_free > 0) {
+		if (dev_ctxt->ddl_cmd_concurrency) {
+			--dev_ctxt->ddl_cmd_ch_free;
+			result = true;
+		} else if ((dev_ctxt->ddl_frame_ch_free +
+			 dev_ctxt->ddl_frame_ch_interim)
+			== dev_ctxt->ddl_frame_ch_depth) {
+				--dev_ctxt->ddl_cmd_ch_free;
+				result = true;
+		}
+	}
+
+	if (result) {
+		*transc = vcd_get_free_trans_tbl_entry(dev_ctxt);
+
+		if (!*transc) {
+			result = false;
+
+			vcd_release_command_channel(dev_ctxt, *transc);
+		}
+
+	}
+	return result;
+}
+
+u32 vcd_get_command_channel_in_loop(
+	struct vcd_dev_ctxt *dev_ctxt,
+	 struct vcd_transc **transc)
+{
+	u32 result = false;
+
+	*transc = NULL;
+
+	if (dev_ctxt->ddl_cmd_ch_interim > 0) {
+		if (dev_ctxt->ddl_cmd_concurrency) {
+			--dev_ctxt->ddl_cmd_ch_interim;
+			result = true;
+		} else if ((dev_ctxt->ddl_frame_ch_free +
+				dev_ctxt->ddl_frame_ch_interim)
+				== dev_ctxt->ddl_frame_ch_depth) {
+				--dev_ctxt->ddl_cmd_ch_interim;
+				result = true;
+		}
+	} else {
+		result = vcd_get_command_channel(dev_ctxt, transc);
+	}
+
+	if (result && !*transc) {
+		*transc = vcd_get_free_trans_tbl_entry(dev_ctxt);
+
+		if (!*transc) {
+			result = false;
+
+			++dev_ctxt->ddl_cmd_ch_interim;
+		}
+
+	}
+
+	return result;
+}
+
+void vcd_mark_command_channel(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc *transc)
+{
+	++dev_ctxt->ddl_cmd_ch_interim;
+
+	vcd_release_trans_tbl_entry(transc);
+	if (dev_ctxt->ddl_cmd_ch_interim +
+		dev_ctxt->ddl_cmd_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_ERROR("\n Command channel access counters messed up");
+	}
+}
+
+void vcd_release_command_channel(
+	struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc)
+{
+	++dev_ctxt->ddl_cmd_ch_free;
+
+	vcd_release_trans_tbl_entry(transc);
+	if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_ERROR("\n Command channel access counters messed up");
+	}
+}
+
+void vcd_release_multiple_command_channels(struct vcd_dev_ctxt
+	*dev_ctxt, u32 channels)
+{
+	dev_ctxt->ddl_cmd_ch_free += channels;
+
+	if (dev_ctxt->ddl_cmd_ch_interim +
+		dev_ctxt->ddl_cmd_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_ERROR("\n Command channel access counters messed up");
+	}
+}
+
+void vcd_release_interim_command_channels(struct vcd_dev_ctxt *dev_ctxt)
+{
+	dev_ctxt->ddl_cmd_ch_free += dev_ctxt->ddl_cmd_ch_interim;
+	dev_ctxt->ddl_cmd_ch_interim = 0;
+
+	if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_ERROR("\n Command channel access counters messed up");
+	}
+}
+
+u32 vcd_get_frame_channel(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc **transc)
+{
+	u32 result = false;
+
+	if (dev_ctxt->ddl_frame_ch_free > 0) {
+		if (dev_ctxt->ddl_cmd_concurrency) {
+			--dev_ctxt->ddl_frame_ch_free;
+			result = true;
+		} else if ((dev_ctxt->ddl_cmd_ch_free +
+			 dev_ctxt->ddl_cmd_ch_interim)
+			== dev_ctxt->ddl_cmd_ch_depth) {
+			--dev_ctxt->ddl_frame_ch_free;
+			result = true;
+		}
+	}
+
+	if (result) {
+		*transc = vcd_get_free_trans_tbl_entry(dev_ctxt);
+
+		if (!*transc) {
+			result = false;
+
+			vcd_release_frame_channel(dev_ctxt, *transc);
+		} else {
+			(*transc)->type = VCD_CMD_CODE_FRAME;
+		}
+
+	}
+
+	return result;
+}
+
+u32 vcd_get_frame_channel_in_loop(
+	struct vcd_dev_ctxt *dev_ctxt,
+	 struct vcd_transc **transc)
+{
+	u32 result = false;
+
+	*transc = NULL;
+
+	if (dev_ctxt->ddl_frame_ch_interim > 0) {
+		if (dev_ctxt->ddl_cmd_concurrency) {
+			--dev_ctxt->ddl_frame_ch_interim;
+			result = true;
+		} else if ((dev_ctxt->ddl_cmd_ch_free +
+			 dev_ctxt->ddl_cmd_ch_interim)
+			== dev_ctxt->ddl_cmd_ch_depth) {
+			--dev_ctxt->ddl_frame_ch_interim;
+			result = true;
+		}
+	} else {
+		result = vcd_get_frame_channel(dev_ctxt, transc);
+	}
+
+	if (result && !*transc) {
+		*transc = vcd_get_free_trans_tbl_entry(dev_ctxt);
+
+		if (!*transc) {
+			result = false;
+			VCD_MSG_FATAL("\n%s: All transactions are busy;"
+				"Couldnt find free one\n", __func__);
+			++dev_ctxt->ddl_frame_ch_interim;
+		} else
+			(*transc)->type = VCD_CMD_CODE_FRAME;
+	}
+
+	return result;
+}
+
+void vcd_mark_frame_channel(struct vcd_dev_ctxt *dev_ctxt)
+{
+	++dev_ctxt->ddl_frame_ch_interim;
+
+	if (dev_ctxt->ddl_frame_ch_interim +
+		dev_ctxt->ddl_frame_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_FATAL("Frame channel access counters messed up");
+	}
+}
+
+void vcd_release_frame_channel(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc *transc)
+{
+	++dev_ctxt->ddl_frame_ch_free;
+
+	vcd_release_trans_tbl_entry(transc);
+
+	if (dev_ctxt->ddl_frame_ch_interim +
+		dev_ctxt->ddl_frame_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_FATAL("Frame channel access counters messed up");
+	}
+}
+
+void vcd_release_multiple_frame_channels(struct vcd_dev_ctxt
+	*dev_ctxt, u32 channels)
+{
+	dev_ctxt->ddl_frame_ch_free += channels;
+
+	if (dev_ctxt->ddl_frame_ch_interim +
+		dev_ctxt->ddl_frame_ch_free >
+		dev_ctxt->ddl_frame_ch_depth) {
+		VCD_MSG_FATAL("Frame channel access counters messed up");
+	}
+}
+
+void vcd_release_interim_frame_channels(struct vcd_dev_ctxt
+	*dev_ctxt)
+{
+	dev_ctxt->ddl_frame_ch_free +=
+		dev_ctxt->ddl_frame_ch_interim;
+	dev_ctxt->ddl_frame_ch_interim = 0;
+
+	if (dev_ctxt->ddl_frame_ch_free >
+		dev_ctxt->ddl_cmd_ch_depth) {
+		VCD_MSG_FATAL("Frame channel access counters messed up");
+	}
+}
+
+u32 vcd_core_is_busy(struct vcd_dev_ctxt *dev_ctxt)
+{
+	if (((dev_ctxt->ddl_cmd_ch_free +
+		  dev_ctxt->ddl_cmd_ch_interim) !=
+		 dev_ctxt->ddl_cmd_ch_depth)
+		||
+		((dev_ctxt->ddl_frame_ch_free +
+		  dev_ctxt->ddl_frame_ch_interim) !=
+		 dev_ctxt->ddl_frame_ch_depth)
+	  ) {
+		return true;
+	} else {
+		return false;
+	}
+}
+
+void vcd_device_timer_start(struct vcd_dev_ctxt *dev_ctxt)
+{
+	if (dev_ctxt->config.timer_start)
+		dev_ctxt->config.timer_start(dev_ctxt->hw_timer_handle,
+			dev_ctxt->hw_time_out);
+}
+
+void vcd_device_timer_stop(struct vcd_dev_ctxt *dev_ctxt)
+{
+	if (dev_ctxt->config.timer_stop)
+		dev_ctxt->config.timer_stop(dev_ctxt->hw_timer_handle);
+}
+
+
+u32 vcd_common_allocate_set_buffer(
+	struct vcd_clnt_ctxt *cctxt,
+	 enum vcd_buffer_type buffer,
+	 u32 buf_size, struct vcd_buffer_pool **buffer_pool)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_requirement Buf_req;
+	struct vcd_property_hdr Prop_hdr;
+	struct vcd_buffer_pool *buf_pool;
+
+	if (buffer == VCD_BUFFER_INPUT) {
+		Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
+		buf_pool = &cctxt->in_buf_pool;
+	} else if (buffer == VCD_BUFFER_OUTPUT) {
+		Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
+		buf_pool = &cctxt->out_buf_pool;
+	} else {
+		rc = VCD_ERR_ILLEGAL_PARM;
+	}
+	VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
+
+	*buffer_pool = buf_pool;
+
+	if (buf_pool->count > 0 &&
+		buf_pool->validated == buf_pool->count) {
+		VCD_MSG_ERROR("Buffer pool is full");
+		return VCD_ERR_FAIL;
+	}
+
+	if (!buf_pool->entries) {
+		Prop_hdr.sz = sizeof(Buf_req);
+		rc = ddl_get_property(cctxt->ddl_handle, &Prop_hdr, &Buf_req);
+		if (!VCD_FAILED(rc)) {
+			rc = vcd_alloc_buffer_pool_entries(buf_pool,
+							   &Buf_req);
+		} else {
+			VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_get_property",
+					  rc);
+		}
+	}
+
+	if (!VCD_FAILED(rc)) {
+		if (buf_pool->buf_req.sz > buf_size) {
+			VCD_MSG_ERROR("\n required buffer sz %u "
+				"allocated sz %u", buf_pool->buf_req.
+				sz, buf_size);
+
+			rc = VCD_ERR_ILLEGAL_PARM;
+		}
+	}
+
+	return rc;
+}
+
+u32 vcd_set_buffer_internal(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_buffer_pool *buf_pool, u8 *buffer, u32 buf_size)
+{
+	struct vcd_buffer_entry *buf_entry;
+	u8 *physical;
+
+	buf_entry = vcd_find_buffer_pool_entry(buf_pool, buffer);
+	if (buf_entry) {
+		VCD_MSG_ERROR("This buffer address already exists");
+
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	physical = (u8 *) vcd_pmem_get_physical(
+		cctxt->client_data, (unsigned long)buffer);
+
+	if (!physical) {
+		VCD_MSG_ERROR("Couldn't get physical address");
+		return VCD_ERR_BAD_POINTER;
+	}
+	if (((u32) physical % buf_pool->buf_req.align)) {
+		VCD_MSG_ERROR("Physical addr is not aligned");
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	buf_entry = vcd_get_free_buffer_pool_entry(buf_pool);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Can't allocate buffer pool is full");
+		return VCD_ERR_FAIL;
+	}
+
+	buf_entry->virtual = buffer;
+	buf_entry->physical = physical;
+	buf_entry->sz = buf_size;
+	buf_entry->frame.alloc_len = buf_size;
+	buf_entry->allocated = false;
+
+	buf_entry->frame.virtual = buf_entry->virtual;
+	buf_entry->frame.physical = buf_entry->physical;
+
+	buf_pool->validated++;
+
+	return VCD_S_SUCCESS;
+
+}
+
+u32 vcd_allocate_buffer_internal(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_buffer_pool *buf_pool,
+	 u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr)
+{
+	struct vcd_buffer_entry *buf_entry;
+	struct vcd_buffer_requirement *buf_req;
+	u32 addr;
+	int rc = 0;
+
+	buf_entry = vcd_get_free_buffer_pool_entry(buf_pool);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Can't allocate buffer pool is full");
+
+		return VCD_ERR_FAIL;
+	}
+
+	buf_req = &buf_pool->buf_req;
+
+	buf_size += buf_req->align;
+
+	rc = vcd_pmem_alloc(buf_size, &buf_entry->alloc,
+				&buf_entry->physical);
+
+	if (rc < 0) {
+		VCD_MSG_ERROR("Buffer allocation failed");
+
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	buf_entry->sz = buf_size;
+	buf_entry->frame.alloc_len = buf_size;
+
+	if (!buf_entry->physical) {
+		VCD_MSG_ERROR("Couldn't get physical address");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	buf_entry->allocated = true;
+
+	if (buf_req->align > 0) {
+
+		addr = (u32) buf_entry->physical;
+		addr += buf_req->align;
+		addr -= (addr % buf_req->align);
+		buf_entry->virtual = buf_entry->alloc;
+		buf_entry->virtual += (u32) (addr - (u32)
+			buf_entry->physical);
+		buf_entry->physical = (u8 *) addr;
+	} else {
+		VCD_MSG_LOW("No buffer alignment required");
+
+		buf_entry->virtual = buf_entry->alloc;
+
+	}
+
+	buf_entry->frame.virtual = buf_entry->virtual;
+	buf_entry->frame.physical = buf_entry->physical;
+
+	*vir_buf_addr = buf_entry->virtual;
+	*phy_buf_addr = buf_entry->physical;
+
+	buf_pool->allocated++;
+	buf_pool->validated++;
+
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_free_one_buffer_internal(
+	struct vcd_clnt_ctxt *cctxt,
+	 enum vcd_buffer_type buffer_type, u8 *buffer)
+{
+	struct vcd_buffer_pool *buf_pool;
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_entry *buf_entry;
+	u32 first_frm_recvd = 0;
+
+	if (buffer_type == VCD_BUFFER_INPUT) {
+		buf_pool = &cctxt->in_buf_pool;
+		first_frm_recvd = VCD_FIRST_IP_RCVD;
+	} else if (buffer_type == VCD_BUFFER_OUTPUT) {
+		buf_pool = &cctxt->out_buf_pool;
+		first_frm_recvd = VCD_FIRST_OP_RCVD;
+	} else
+		rc = VCD_ERR_ILLEGAL_PARM;
+
+	VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
+
+	first_frm_recvd &= cctxt->status.mask;
+	if (first_frm_recvd) {
+		VCD_MSG_ERROR(
+			"VCD free buffer called when data path is active");
+		return VCD_ERR_BAD_STATE;
+	}
+
+	buf_entry = vcd_find_buffer_pool_entry(buf_pool, buffer);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Buffer addr %p not found. Can't free buffer",
+				  buffer);
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+	if (buf_entry->in_use) {
+		VCD_MSG_ERROR("\n Buffer is in use and is not flushed");
+		return VCD_ERR_ILLEGAL_OP;
+	}
+
+	VCD_MSG_LOW("Freeing buffer %p. Allocated %d",
+			buf_entry->virtual, buf_entry->allocated);
+
+	if (buf_entry->allocated) {
+		vcd_pmem_free(buf_entry->alloc, buf_entry->physical);
+		buf_pool->allocated--;
+	}
+
+	memset(buf_entry, 0, sizeof(struct vcd_buffer_entry));
+	buf_pool->validated--;
+	if (buf_pool->validated == 0)
+		vcd_free_buffer_pool_entries(buf_pool);
+
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_free_buffers_internal(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_buffer_pool *buf_pool)
+{
+	u32 rc = VCD_S_SUCCESS;
+	u32 i;
+
+	VCD_MSG_LOW("vcd_free_buffers_internal:");
+
+	if (buf_pool->entries) {
+		for (i = 1; i <= buf_pool->count; i++) {
+			if (buf_pool->entries[i].valid &&
+				buf_pool->entries[i].allocated) {
+				vcd_pmem_free(buf_pool->entries[i].alloc,
+						  buf_pool->entries[i].
+						  physical);
+			}
+		}
+
+	}
+
+	vcd_reset_buffer_pool_for_reuse(buf_pool);
+
+	return rc;
+}
+
+u32 vcd_alloc_buffer_pool_entries(
+	struct vcd_buffer_pool *buf_pool,
+	 struct vcd_buffer_requirement *buf_req)
+{
+
+	VCD_MSG_LOW("vcd_alloc_buffer_pool_entries:");
+
+	buf_pool->buf_req = *buf_req;
+
+	buf_pool->count = buf_req->actual_count;
+	buf_pool->entries = (struct vcd_buffer_entry *)
+		kzalloc((sizeof(struct vcd_buffer_entry) *
+			   (VCD_MAX_BUFFER_ENTRIES + 1)), GFP_KERNEL);
+
+	if (!buf_pool->entries) {
+		VCD_MSG_ERROR("Buf_pool entries alloc failed");
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	INIT_LIST_HEAD(&buf_pool->queue);
+	buf_pool->entries[0].valid = true;
+	buf_pool->q_len = 0;
+
+	buf_pool->validated = 0;
+	buf_pool->allocated = 0;
+	buf_pool->in_use = 0;
+
+	return VCD_S_SUCCESS;
+}
+
+void vcd_free_buffer_pool_entries(struct vcd_buffer_pool *buf_pool)
+{
+	VCD_MSG_LOW("vcd_free_buffer_pool_entries:");
+	kfree(buf_pool->entries);
+	memset(buf_pool, 0, sizeof(struct vcd_buffer_pool));
+	INIT_LIST_HEAD(&buf_pool->queue);
+}
+
+void vcd_flush_in_use_buffer_pool_entries(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_buffer_pool *buf_pool, u32 event)
+{
+	u32 i;
+	VCD_MSG_LOW("vcd_flush_buffer_pool_entries: event=0x%x", event);
+
+	if (buf_pool->entries) {
+		for (i = 0; i <= buf_pool->count; i++) {
+			if (buf_pool->entries[i].virtual &&
+				buf_pool->entries[i].in_use) {
+				cctxt->callback(event, VCD_S_SUCCESS,
+					&buf_pool->entries[i].frame,
+					sizeof(struct vcd_frame_data),
+					cctxt, cctxt->client_data);
+				buf_pool->entries[i].in_use = false;
+				VCD_BUFFERPOOL_INUSE_DECREMENT(
+					buf_pool->in_use);
+		 }
+		}
+	}
+}
+
+
+void vcd_reset_buffer_pool_for_reuse(struct vcd_buffer_pool *buf_pool)
+{
+	VCD_MSG_LOW("vcd_reset_buffer_pool_for_reuse:");
+
+	if (buf_pool->entries) {
+		memset(&buf_pool->entries[1], 0,
+			sizeof(struct vcd_buffer_entry) *
+			VCD_MAX_BUFFER_ENTRIES);
+	}
+	buf_pool->q_len = 0;
+
+	buf_pool->validated = 0;
+	buf_pool->allocated = 0;
+	buf_pool->in_use = 0;
+	INIT_LIST_HEAD(&buf_pool->queue);
+}
+
+struct vcd_buffer_entry *vcd_get_free_buffer_pool_entry
+	(struct vcd_buffer_pool *pool) {
+	u32 i;
+
+	i = 1;
+	while (i <= pool->count && pool->entries[i].valid)
+		i++;
+
+
+	if (i <= pool->count) {
+		pool->entries[i].valid = true;
+
+		return &pool->entries[i];
+	} else {
+		return NULL;
+	}
+}
+
+struct vcd_buffer_entry *vcd_find_buffer_pool_entry
+	(struct vcd_buffer_pool *pool, u8 *addr)
+{
+	u32 i;
+	u32 found = false;
+
+	for (i = 0; i <= pool->count && !found; i++) {
+		if (pool->entries[i].virtual == addr)
+			found = true;
+
+	}
+
+	if (found)
+		return &pool->entries[i - 1];
+	else
+		return NULL;
+
+}
+
+u32 vcd_buffer_pool_entry_en_q(
+	struct vcd_buffer_pool *pool,
+	 struct vcd_buffer_entry *entry)
+{
+	struct vcd_buffer_entry *list_itr;
+
+	if (pool->q_len == pool->count)
+		return false;
+
+	list_for_each_entry(list_itr, &pool->queue, list)
+	if (list_itr == entry) {
+		VCD_MSG_HIGH("\n this output buffer is already present"
+			" in queue");
+		VCD_MSG_HIGH("\n Vir Addr %p Phys Addr %p",
+			entry->virtual, entry->physical);
+		return false;
+	}
+
+	list_add_tail(&entry->list, &pool->queue);
+	pool->q_len++;
+
+	return true;
+}
+
+struct vcd_buffer_entry *vcd_buffer_pool_entry_de_q
+	(struct vcd_buffer_pool *pool) {
+	struct vcd_buffer_entry *entry;
+
+	if (!pool || !pool->q_len)
+		return NULL;
+
+	entry = list_first_entry(&pool->queue,
+		struct vcd_buffer_entry, list);
+
+	if (entry) {
+		list_del(&entry->list);
+		pool->q_len--;
+	}
+
+	return entry;
+}
+
+void vcd_flush_bframe_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode)
+{
+	int i;
+	struct vcd_buffer_pool *buf_pool;
+
+	if (!cctxt->decoding && cctxt->bframe) {
+		buf_pool = (mode == VCD_FLUSH_INPUT) ?
+			&cctxt->in_buf_pool : &cctxt->out_buf_pool;
+		if (buf_pool->entries != NULL) {
+			for (i = 1; i <= buf_pool->count; i++) {
+				if ((buf_pool->entries[i].in_use) &&
+					(buf_pool->entries[i].frame.virtual
+					 != NULL)) {
+					if (mode == VCD_FLUSH_INPUT) {
+						cctxt->callback(
+						VCD_EVT_RESP_INPUT_FLUSHED,
+						VCD_S_SUCCESS,
+						&(buf_pool->entries[i].frame),
+						sizeof(struct vcd_frame_data),
+						cctxt, cctxt->client_data);
+					} else {
+						buf_pool->entries[i].
+							frame.data_len = 0;
+						cctxt->callback(
+						VCD_EVT_RESP_OUTPUT_FLUSHED,
+						VCD_S_SUCCESS,
+						&(buf_pool->entries[i].frame),
+						sizeof(struct vcd_frame_data),
+						cctxt,
+						cctxt->client_data);
+					}
+				VCD_BUFFERPOOL_INUSE_DECREMENT(
+					buf_pool->in_use);
+				buf_pool->entries[i].in_use = false;
+				}
+			}
+		}
+	}
+}
+
+void vcd_flush_output_buffers(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_buffer_pool *buf_pool;
+	struct vcd_buffer_entry *buf_entry;
+	u32 count = 0;
+	struct vcd_property_hdr prop_hdr;
+
+	VCD_MSG_LOW("vcd_flush_output_buffers:");
+	buf_pool = &cctxt->out_buf_pool;
+	buf_entry = vcd_buffer_pool_entry_de_q(buf_pool);
+	while (buf_entry) {
+		if (!cctxt->decoding || buf_entry->in_use) {
+			buf_entry->frame.data_len = 0;
+			cctxt->callback(VCD_EVT_RESP_OUTPUT_FLUSHED,
+					VCD_S_SUCCESS,
+					&buf_entry->frame,
+					sizeof(struct vcd_frame_data),
+					cctxt, cctxt->client_data);
+			if (buf_entry->in_use) {
+				VCD_BUFFERPOOL_INUSE_DECREMENT(
+					buf_pool->in_use);
+				buf_entry->in_use = false;
+			}
+			count++;
+		}
+		buf_entry = vcd_buffer_pool_entry_de_q(buf_pool);
+	}
+	vcd_flush_bframe_buffers(cctxt, VCD_FLUSH_OUTPUT);
+	if (buf_pool->in_use || buf_pool->q_len) {
+		VCD_MSG_ERROR("%s(): WARNING in_use(%u) or q_len(%u) not zero!",
+			__func__, buf_pool->in_use, buf_pool->q_len);
+		buf_pool->in_use = buf_pool->q_len = 0;
+		}
+	if (cctxt->sched_clnt_hdl) {
+		if (count > cctxt->sched_clnt_hdl->tkns)
+			cctxt->sched_clnt_hdl->tkns = 0;
+		else
+			cctxt->sched_clnt_hdl->tkns -= count;
+	}
+
+	if (cctxt->ddl_hdl_valid && cctxt->decoding) {
+		prop_hdr.prop_id = DDL_I_REQ_OUTPUT_FLUSH;
+		prop_hdr.sz = sizeof(u32);
+		count = 0x1;
+
+		(void)ddl_set_property(cctxt->ddl_handle, &prop_hdr,
+					&count);
+	}
+	vcd_release_all_clnt_frm_transc(cctxt);
+	cctxt->status.mask &= ~VCD_IN_RECONFIG;
+}
+
+u32 vcd_flush_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_entry *buf_entry;
+
+	VCD_MSG_LOW("vcd_flush_buffers:");
+
+	if (mode > VCD_FLUSH_ALL || !(mode & VCD_FLUSH_ALL)) {
+		VCD_MSG_ERROR("Invalid flush mode %d", mode);
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	VCD_MSG_MED("Flush mode %d requested", mode);
+	if ((mode & VCD_FLUSH_INPUT) &&
+		cctxt->sched_clnt_hdl) {
+
+		rc = vcd_sched_dequeue_buffer(
+			cctxt->sched_clnt_hdl, &buf_entry);
+		while (!VCD_FAILED(rc) && buf_entry) {
+			if (buf_entry->virtual) {
+				cctxt->callback(VCD_EVT_RESP_INPUT_FLUSHED,
+						VCD_S_SUCCESS,
+						&buf_entry->frame,
+						sizeof(struct
+							 vcd_frame_data),
+						cctxt,
+						cctxt->client_data);
+				}
+
+			buf_entry->in_use = false;
+			VCD_BUFFERPOOL_INUSE_DECREMENT(
+				cctxt->in_buf_pool.in_use);
+			buf_entry = NULL;
+			rc = vcd_sched_dequeue_buffer(
+				cctxt->sched_clnt_hdl, &buf_entry);
+		}
+	}
+	if (rc != VCD_ERR_QEMPTY)
+		VCD_FAILED_RETURN(rc, "Failed: vcd_sched_dequeue_buffer");
+	if (cctxt->status.frame_submitted > 0)
+		cctxt->status.mask |= mode;
+	else {
+		if (mode & VCD_FLUSH_INPUT)
+			vcd_flush_bframe_buffers(cctxt, VCD_FLUSH_INPUT);
+		if (mode & VCD_FLUSH_OUTPUT)
+			vcd_flush_output_buffers(cctxt);
+	}
+	return VCD_S_SUCCESS;
+}
+
+void vcd_flush_buffers_in_err_fatal(struct vcd_clnt_ctxt *cctxt)
+{
+	VCD_MSG_LOW("\n vcd_flush_buffers_in_err_fatal:");
+	(void) vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
+	vcd_flush_in_use_buffer_pool_entries(cctxt,
+		&cctxt->in_buf_pool, VCD_EVT_RESP_INPUT_FLUSHED);
+	vcd_flush_in_use_buffer_pool_entries(cctxt,
+		&cctxt->out_buf_pool, VCD_EVT_RESP_OUTPUT_FLUSHED);
+	vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+}
+
+u32 vcd_init_client_context(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc;
+	VCD_MSG_LOW("vcd_init_client_context:");
+	rc = ddl_open(&cctxt->ddl_handle, cctxt->decoding);
+	VCD_FAILED_RETURN(rc, "Failed: ddl_open");
+	cctxt->ddl_hdl_valid = true;
+	cctxt->clnt_state.state = VCD_CLIENT_STATE_OPEN;
+	cctxt->clnt_state.state_table =
+		vcd_get_client_state_table(VCD_CLIENT_STATE_OPEN);
+	cctxt->signature = VCD_SIGNATURE;
+	cctxt->live = true;
+	cctxt->bframe = 0;
+	cctxt->cmd_q.pending_cmd = VCD_CMD_NONE;
+	cctxt->status.last_evt = VCD_EVT_RESP_BASE;
+	return rc;
+}
+
+void vcd_destroy_client_context(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt;
+	struct vcd_clnt_ctxt *client;
+	struct vcd_buffer_entry *buf_entry;
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_destroy_client_context:");
+
+	dev_ctxt = cctxt->dev_ctxt;
+
+	if (cctxt == dev_ctxt->cctxt_list_head) {
+		VCD_MSG_MED("Clnt list head clnt being removed");
+
+		dev_ctxt->cctxt_list_head = cctxt->next;
+	} else {
+		client = dev_ctxt->cctxt_list_head;
+		while (client && cctxt != client->next)
+			client = client->next;
+		if (client)
+			client->next = cctxt->next;
+		if (!client) {
+			rc = VCD_ERR_FAIL;
+			VCD_MSG_ERROR("Client not found in client list");
+		}
+	}
+
+	if (VCD_FAILED(rc))
+		return;
+
+	if (cctxt->sched_clnt_hdl) {
+		rc = VCD_S_SUCCESS;
+		while (!VCD_FAILED(rc)) {
+			rc = vcd_sched_dequeue_buffer(
+				cctxt->sched_clnt_hdl, &buf_entry);
+			if (rc != VCD_ERR_QEMPTY && VCD_FAILED(rc))
+				VCD_MSG_ERROR("\n Failed: "
+					"vcd_sched_de_queue_buffer");
+		}
+		rc = vcd_sched_remove_client(cctxt->sched_clnt_hdl);
+		if (VCD_FAILED(rc))
+			VCD_MSG_ERROR("\n Failed: sched_remove_client");
+		cctxt->sched_clnt_hdl = NULL;
+	}
+
+	if (cctxt->seq_hdr.sequence_header) {
+		vcd_pmem_free(cctxt->seq_hdr.sequence_header,
+				  cctxt->seq_hdr_phy_addr);
+		cctxt->seq_hdr.sequence_header = NULL;
+	}
+
+	vcd_free_buffers_internal(cctxt, &cctxt->in_buf_pool);
+	vcd_free_buffers_internal(cctxt, &cctxt->out_buf_pool);
+	vcd_free_buffer_pool_entries(&cctxt->in_buf_pool);
+	vcd_free_buffer_pool_entries(&cctxt->out_buf_pool);
+	vcd_release_all_clnt_transc(cctxt);
+
+	if (cctxt->ddl_hdl_valid) {
+		(void)ddl_close(&cctxt->ddl_handle);
+		cctxt->ddl_hdl_valid = false;
+	}
+
+	cctxt->signature = 0;
+	cctxt->clnt_state.state = VCD_CLIENT_STATE_NULL;
+	cctxt->clnt_state.state_table = NULL;
+
+	kfree(cctxt);
+}
+
+u32 vcd_check_for_client_context(
+	struct vcd_dev_ctxt *dev_ctxt, s32 driver_id)
+{
+	struct vcd_clnt_ctxt *client;
+
+	client = dev_ctxt->cctxt_list_head;
+	while (client && client->driver_id != driver_id)
+		client = client->next;
+
+	if (!client)
+		return false;
+	else
+		return true;
+}
+
+u32 vcd_validate_driver_handle(
+	struct vcd_dev_ctxt *dev_ctxt, s32 driver_handle)
+{
+	driver_handle--;
+
+	if (driver_handle < 0 ||
+		driver_handle >= VCD_DRIVER_INSTANCE_MAX ||
+		!dev_ctxt->driver_ids[driver_handle]) {
+		return false;
+	} else {
+		return true;
+	}
+}
+
+u32 vcd_client_cmd_en_q(
+	struct vcd_clnt_ctxt *cctxt, enum vcd_command command)
+{
+	u32 result;
+
+	if (cctxt->cmd_q.pending_cmd == VCD_CMD_NONE) {
+		cctxt->cmd_q.pending_cmd = command;
+		result = true;
+	} else {
+		result = false;
+	}
+
+	return result;
+}
+
+void vcd_client_cmd_flush_and_en_q(
+	struct vcd_clnt_ctxt *cctxt, enum vcd_command command)
+{
+	cctxt->cmd_q.pending_cmd = command;
+}
+
+u32 vcd_client_cmd_de_q(struct vcd_clnt_ctxt *cctxt,
+	enum vcd_command *command)
+{
+	if (cctxt->cmd_q.pending_cmd == VCD_CMD_NONE)
+		return false;
+
+	*command = cctxt->cmd_q.pending_cmd;
+	cctxt->cmd_q.pending_cmd = VCD_CMD_NONE;
+
+	return true;
+}
+
+u32 vcd_get_next_queued_client_cmd(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt **cctxt, enum vcd_command *command)
+{
+	struct vcd_clnt_ctxt *client = dev_ctxt->cctxt_list_head;
+	u32 result = false;
+
+	while (client && !result) {
+		*cctxt = client;
+		result = vcd_client_cmd_de_q(client, command);
+		client = client->next;
+	}
+	return result;
+}
+
+u32 vcd_submit_cmd_sess_start(struct vcd_transc *transc)
+{
+	u32 rc;
+	struct vcd_sequence_hdr Seq_hdr;
+
+	VCD_MSG_LOW("vcd_submit_cmd_sess_start:");
+
+	if (transc->cctxt->decoding) {
+
+		if (transc->cctxt->seq_hdr.sequence_header) {
+			Seq_hdr.sequence_header_len =
+				transc->cctxt->seq_hdr.
+				sequence_header_len;
+			Seq_hdr.sequence_header =
+				transc->cctxt->seq_hdr_phy_addr;
+
+			rc = ddl_decode_start(transc->cctxt->ddl_handle,
+						  &Seq_hdr, (void *)transc);
+		} else {
+			rc = ddl_decode_start(transc->cctxt->ddl_handle,
+						  NULL, (void *)transc);
+		}
+
+	} else {
+		rc = ddl_encode_start(transc->cctxt->ddl_handle,
+					  (void *)transc);
+	}
+	if (!VCD_FAILED(rc)) {
+		transc->cctxt->status.cmd_submitted++;
+		vcd_device_timer_start(transc->cctxt->dev_ctxt);
+	} else
+		VCD_MSG_ERROR("rc = 0x%x. Failed: ddl start", rc);
+
+	return rc;
+}
+
+u32 vcd_submit_cmd_sess_end(struct vcd_transc *transc)
+{
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_submit_cmd_sess_end:");
+
+	if (transc->cctxt->decoding) {
+		rc = ddl_decode_end(transc->cctxt->ddl_handle,
+					(void *)transc);
+	} else {
+		rc = ddl_encode_end(transc->cctxt->ddl_handle,
+					(void *)transc);
+	}
+	if (!VCD_FAILED(rc)) {
+		transc->cctxt->status.cmd_submitted++;
+		vcd_device_timer_start(transc->cctxt->dev_ctxt);
+	} else
+		VCD_MSG_ERROR("rc = 0x%x. Failed: ddl end", rc);
+
+	return rc;
+}
+
+void vcd_submit_cmd_client_close(struct vcd_clnt_ctxt *cctxt)
+{
+	(void) ddl_close(&cctxt->ddl_handle);
+	cctxt->ddl_hdl_valid = false;
+	cctxt->status.mask &= ~VCD_CLEANING_UP;
+	if (cctxt->status.mask & VCD_CLOSE_PENDING) {
+		vcd_destroy_client_context(cctxt);
+		vcd_handle_for_last_clnt_close(cctxt->dev_ctxt, true);
+	}
+}
+
+u32 vcd_submit_command_in_continue(struct vcd_dev_ctxt
+	*dev_ctxt, struct vcd_transc *transc)
+{
+	struct vcd_property_hdr   prop_hdr;
+	struct vcd_clnt_ctxt *client = NULL;
+	enum vcd_command cmd = VCD_CMD_NONE;
+	u32 rc = VCD_ERR_FAIL;
+	u32 result = false, flush = 0, event = 0;
+	u32 command_break = false;
+
+	VCD_MSG_LOW("\n vcd_submit_command_in_continue:");
+
+	while (!command_break) {
+		result = vcd_get_next_queued_client_cmd(dev_ctxt,
+			&client, &cmd);
+
+		if (!result)
+			command_break = true;
+		else {
+			transc->type = cmd;
+			transc->cctxt = client;
+
+		 switch (cmd) {
+		 case VCD_CMD_CODEC_START:
+			{
+				rc = vcd_submit_cmd_sess_start(transc);
+				event = VCD_EVT_RESP_START;
+				break;
+			}
+		 case VCD_CMD_CODEC_STOP:
+			{
+				rc = vcd_submit_cmd_sess_end(transc);
+				event = VCD_EVT_RESP_STOP;
+				break;
+			}
+		 case VCD_CMD_OUTPUT_FLUSH:
+			{
+				prop_hdr.prop_id = DDL_I_REQ_OUTPUT_FLUSH;
+				prop_hdr.sz = sizeof(u32);
+				flush = 0x1;
+				(void) ddl_set_property(client->ddl_handle,
+						 &prop_hdr, &flush);
+				vcd_release_command_channel(dev_ctxt,
+					transc);
+				rc = VCD_S_SUCCESS;
+				break;
+			}
+		 case VCD_CMD_CLIENT_CLOSE:
+			{
+				vcd_submit_cmd_client_close(client);
+				vcd_release_command_channel(dev_ctxt,
+					transc);
+				rc = VCD_S_SUCCESS;
+				break;
+			}
+		 default:
+			{
+				VCD_MSG_ERROR("\n vcd_submit_command: Unknown"
+					"command %d", (int)cmd);
+				break;
+			}
+		 }
+
+		 if (!VCD_FAILED(rc)) {
+			command_break = true;
+		 } else	{
+			VCD_MSG_ERROR("vcd_submit_command %d: failed 0x%x",
+				cmd, rc);
+			client->callback(event, rc, NULL, 0, client,
+				client->client_data);
+		 }
+	  }
+	}
+	return result;
+}
+
+u32 vcd_schedule_frame(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_clnt_ctxt **cctxt, struct vcd_buffer_entry
+	**ip_buf_entry)
+{
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_schedule_frame:");
+
+	if (!dev_ctxt->cctxt_list_head) {
+		VCD_MSG_HIGH("Client list empty");
+		return false;
+	}
+	rc = vcd_sched_get_client_frame(&dev_ctxt->sched_clnt_list,
+		cctxt, ip_buf_entry);
+	if (rc == VCD_ERR_QEMPTY) {
+		VCD_MSG_HIGH("No frame available. Sched queues are empty");
+		return false;
+	}
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_FATAL("vcd_submit_frame: sched_de_queue_frame"
+			"failed 0x%x", rc);
+	  return false;
+	}
+	if (!*cctxt || !*ip_buf_entry) {
+		VCD_MSG_FATAL("Sched returned invalid values. ctxt=%p,"
+			"ipbuf=%p",	*cctxt, *ip_buf_entry);
+		return false;
+	}
+	return true;
+}
+
+void vcd_try_submit_frame(struct vcd_dev_ctxt *dev_ctxt)
+{
+	struct vcd_transc *transc;
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_clnt_ctxt *cctxt = NULL;
+	struct vcd_buffer_entry *ip_buf_entry = NULL;
+	u32 result = false;
+
+	VCD_MSG_LOW("vcd_try_submit_frame:");
+
+	if (!vcd_get_frame_channel(dev_ctxt, &transc))
+		return;
+
+	if (!vcd_schedule_frame(dev_ctxt, &cctxt, &ip_buf_entry)) {
+		vcd_release_frame_channel(dev_ctxt, transc);
+		return;
+	}
+
+	rc = vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_BEGIN);
+
+	if (!VCD_FAILED(rc)) {
+		transc->cctxt = cctxt;
+		transc->ip_buf_entry = ip_buf_entry;
+
+		result = vcd_submit_frame(dev_ctxt, transc);
+	} else {
+		VCD_MSG_ERROR("Failed: VCD_EVT_PWR_CLNT_CMD_BEGIN");
+		(void) vcd_sched_queue_buffer(
+			cctxt->sched_clnt_hdl, ip_buf_entry, false);
+		cctxt->sched_clnt_hdl->tkns++;
+	}
+
+	if (!result) {
+		vcd_release_frame_channel(dev_ctxt, transc);
+		(void) vcd_power_event(dev_ctxt, cctxt,
+				VCD_EVT_PWR_CLNT_CMD_FAIL);
+	}
+}
+
+u32 vcd_submit_frame(struct vcd_dev_ctxt *dev_ctxt,
+					 struct vcd_transc *transc)
+{
+	struct vcd_clnt_ctxt *cctxt = NULL;
+	struct vcd_frame_data *ip_frm_entry;
+	struct vcd_buffer_entry *op_buf_entry = NULL;
+	u32 rc = VCD_S_SUCCESS;
+	u32 evcode = 0;
+	struct ddl_frame_data_tag ddl_ip_frm;
+	struct ddl_frame_data_tag ddl_op_frm;
+
+	VCD_MSG_LOW("vcd_submit_frame:");
+	cctxt = transc->cctxt;
+	ip_frm_entry = &transc->ip_buf_entry->frame;
+
+	transc->op_buf_entry = op_buf_entry;
+	transc->ip_frm_tag = ip_frm_entry->ip_frm_tag;
+	transc->time_stamp = ip_frm_entry->time_stamp;
+	ip_frm_entry->ip_frm_tag = (u32) transc;
+	memset(&ddl_ip_frm, 0, sizeof(ddl_ip_frm));
+	memset(&ddl_op_frm, 0, sizeof(ddl_op_frm));
+	if (cctxt->decoding) {
+		evcode = CLIENT_STATE_EVENT_NUMBER(decode_frame);
+		ddl_ip_frm.vcd_frm = *ip_frm_entry;
+		rc = ddl_decode_frame(cctxt->ddl_handle, &ddl_ip_frm,
+							   (void *) transc);
+	} else {
+		op_buf_entry = vcd_buffer_pool_entry_de_q(
+			&cctxt->out_buf_pool);
+		if (!op_buf_entry) {
+			VCD_MSG_ERROR("Sched provided frame when no"
+				"op buffer was present");
+			rc = VCD_ERR_FAIL;
+		} else {
+			op_buf_entry->in_use = true;
+			cctxt->out_buf_pool.in_use++;
+			ddl_ip_frm.vcd_frm = *ip_frm_entry;
+			ddl_ip_frm.frm_delta =
+				vcd_calculate_frame_delta(cctxt,
+					ip_frm_entry);
+
+			ddl_op_frm.vcd_frm = op_buf_entry->frame;
+
+			evcode = CLIENT_STATE_EVENT_NUMBER(encode_frame);
+
+			rc = ddl_encode_frame(cctxt->ddl_handle,
+				&ddl_ip_frm, &ddl_op_frm, (void *) transc);
+		}
+	}
+	ip_frm_entry->ip_frm_tag = transc->ip_frm_tag;
+	if (!VCD_FAILED(rc)) {
+		vcd_device_timer_start(dev_ctxt);
+		cctxt->status.frame_submitted++;
+		if (ip_frm_entry->flags & VCD_FRAME_FLAG_EOS)
+			vcd_do_client_state_transition(cctxt,
+				VCD_CLIENT_STATE_EOS, evcode);
+	} else {
+		VCD_MSG_ERROR("Frame submission failed. rc = 0x%x", rc);
+		vcd_handle_submit_frame_failed(dev_ctxt, transc);
+	}
+	return true;
+}
+
+u32 vcd_try_submit_frame_in_continue(struct vcd_dev_ctxt *dev_ctxt,
+	struct vcd_transc *transc)
+{
+	struct vcd_clnt_ctxt *cctxt = NULL;
+	struct vcd_buffer_entry *ip_buf_entry = NULL;
+
+	VCD_MSG_LOW("vcd_try_submit_frame_in_continue:");
+
+	if (!vcd_schedule_frame(dev_ctxt, &cctxt, &ip_buf_entry))
+		return false;
+
+	transc->cctxt = cctxt;
+	transc->ip_buf_entry = ip_buf_entry;
+
+	return vcd_submit_frame(dev_ctxt, transc);
+}
+
+u32 vcd_process_cmd_sess_start(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_transc *transc;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_process_cmd_sess_start:");
+	if (vcd_get_command_channel(cctxt->dev_ctxt, &transc)) {
+		rc = vcd_power_event(cctxt->dev_ctxt,
+					 cctxt, VCD_EVT_PWR_CLNT_CMD_BEGIN);
+
+		if (!VCD_FAILED(rc)) {
+			transc->type = VCD_CMD_CODEC_START;
+			transc->cctxt = cctxt;
+			rc = vcd_submit_cmd_sess_start(transc);
+		} else {
+			VCD_MSG_ERROR("Failed: VCD_EVT_PWR_CLNT_CMD_BEGIN");
+		}
+
+		if (VCD_FAILED(rc)) {
+			vcd_release_command_channel(cctxt->dev_ctxt,
+							transc);
+		}
+	} else {
+		u32 result;
+
+		result = vcd_client_cmd_en_q(cctxt, VCD_CMD_CODEC_START);
+		if (!result) {
+			rc = VCD_ERR_BUSY;
+			VCD_MSG_ERROR("%s(): vcd_client_cmd_en_q() "
+				"failed\n", __func__);
+		}
+	}
+
+	if (VCD_FAILED(rc)) {
+		(void)vcd_power_event(cctxt->dev_ctxt,
+					  cctxt, VCD_EVT_PWR_CLNT_CMD_FAIL);
+	}
+
+	return rc;
+}
+
+void vcd_send_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *input_frame, u32 valid_opbuf)
+{
+	VCD_MSG_LOW("vcd_send_frame_done_in_eos:");
+
+	if (!input_frame->virtual && !valid_opbuf) {
+		VCD_MSG_MED("Sending NULL output with EOS");
+
+		cctxt->out_buf_pool.entries[0].frame.flags =
+			VCD_FRAME_FLAG_EOS;
+		cctxt->out_buf_pool.entries[0].frame.data_len = 0;
+		cctxt->out_buf_pool.entries[0].frame.time_stamp =
+			input_frame->time_stamp;
+		cctxt->out_buf_pool.entries[0].frame.ip_frm_tag =
+			input_frame->ip_frm_tag;
+
+		cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+				  VCD_S_SUCCESS,
+				  &cctxt->out_buf_pool.entries[0].frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+
+		memset(&cctxt->out_buf_pool.entries[0].frame,
+			   0, sizeof(struct vcd_frame_data));
+	} else if (!input_frame->data_len) {
+		if (cctxt->decoding) {
+			vcd_send_frame_done_in_eos_for_dec(cctxt,
+							   input_frame);
+		} else {
+			vcd_send_frame_done_in_eos_for_enc(cctxt,
+							   input_frame);
+		}
+
+	}
+}
+
+void vcd_send_frame_done_in_eos_for_dec(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *input_frame)
+{
+	struct vcd_buffer_entry *buf_entry;
+	struct vcd_property_hdr prop_hdr;
+	u32 rc;
+	struct ddl_frame_data_tag ddl_frm;
+
+	prop_hdr.prop_id = DDL_I_DPB_RETRIEVE;
+	prop_hdr.sz = sizeof(struct ddl_frame_data_tag);
+	memset(&ddl_frm, 0, sizeof(ddl_frm));
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &ddl_frm);
+
+	if (VCD_FAILED(rc) || !ddl_frm.vcd_frm.virtual) {
+		cctxt->status.eos_trig_ip_frm = *input_frame;
+		cctxt->status.mask |= VCD_EOS_WAIT_OP_BUF;
+		return;
+	}
+
+	buf_entry = vcd_find_buffer_pool_entry(&cctxt->out_buf_pool,
+		ddl_frm.vcd_frm.virtual);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Unrecognized buffer address provided = %p",
+				  ddl_frm.vcd_frm.virtual);
+		return;
+	} else {
+		if (cctxt->sched_clnt_hdl->tkns)
+			cctxt->sched_clnt_hdl->tkns--;
+
+		VCD_MSG_MED("Sending non-NULL output with EOS");
+
+		buf_entry->frame.data_len = 0;
+		buf_entry->frame.offset = 0;
+		buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS;
+		buf_entry->frame.ip_frm_tag = input_frame->ip_frm_tag;
+		buf_entry->frame.time_stamp = input_frame->time_stamp;
+
+		cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+				  VCD_S_SUCCESS,
+				  &buf_entry->frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+
+		buf_entry->in_use = false;
+		VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->out_buf_pool.in_use);
+	}
+}
+
+void vcd_send_frame_done_in_eos_for_enc(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *input_frame)
+{
+	struct vcd_buffer_entry *op_buf_entry;
+
+	if (!cctxt->out_buf_pool.q_len) {
+		cctxt->status.eos_trig_ip_frm = *input_frame;
+
+		cctxt->status.mask |= VCD_EOS_WAIT_OP_BUF;
+
+		return;
+	}
+
+	op_buf_entry = vcd_buffer_pool_entry_de_q(&cctxt->out_buf_pool);
+	if (!op_buf_entry) {
+		VCD_MSG_ERROR("%s(): vcd_buffer_pool_entry_de_q() "
+			"failed\n", __func__);
+	} else {
+		if (cctxt->sched_clnt_hdl->tkns)
+			cctxt->sched_clnt_hdl->tkns--;
+
+		VCD_MSG_MED("Sending non-NULL output with EOS");
+
+		op_buf_entry->frame.data_len = 0;
+		op_buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS;
+		op_buf_entry->frame.ip_frm_tag =
+			input_frame->ip_frm_tag;
+		op_buf_entry->frame.time_stamp = input_frame->time_stamp;
+
+		cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+				  VCD_S_SUCCESS,
+				  &op_buf_entry->frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+	}
+}
+
+u32 vcd_handle_recvd_eos(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *input_frame, u32 *pb_eos_handled)
+{
+	u32 rc;
+
+	VCD_MSG_LOW("vcd_handle_recvd_eos:");
+
+	*pb_eos_handled = false;
+
+	if (input_frame->virtual &&
+			input_frame->data_len)
+		return VCD_S_SUCCESS;
+
+	input_frame->data_len = 0;
+	rc = vcd_sched_mark_client_eof(cctxt->sched_clnt_hdl);
+	if (VCD_FAILED(rc) && rc != VCD_ERR_QEMPTY)
+		return rc;
+
+	if (rc == VCD_S_SUCCESS)
+		*pb_eos_handled = true;
+	else if (cctxt->decoding && !input_frame->virtual)
+		cctxt->sched_clnt_hdl->tkns++;
+	else if (!cctxt->decoding) {
+		vcd_send_frame_done_in_eos(cctxt, input_frame, false);
+		if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
+			vcd_do_client_state_transition(cctxt,
+				VCD_CLIENT_STATE_EOS,
+				CLIENT_STATE_EVENT_NUMBER
+				(encode_frame));
+		}
+		*pb_eos_handled = true;
+	}
+
+	if (*pb_eos_handled &&
+		input_frame->virtual &&
+		!input_frame->data_len) {
+		cctxt->callback(VCD_EVT_RESP_INPUT_DONE,
+				  VCD_S_SUCCESS,
+				  input_frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+	}
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_handle_first_decode_frame(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc =  VCD_ERR_BAD_STATE;
+
+	VCD_MSG_LOW("vcd_handle_first_decode_frame:");
+	if (!cctxt->in_buf_pool.entries ||
+		!cctxt->out_buf_pool.entries ||
+		cctxt->in_buf_pool.validated !=
+		cctxt->in_buf_pool.count ||
+		cctxt->out_buf_pool.validated !=
+		cctxt->out_buf_pool.count)
+		VCD_MSG_ERROR("Buffer pool is not completely setup yet");
+	else if (!cctxt->sched_clnt_hdl) {
+		rc = vcd_sched_add_client(cctxt);
+		VCD_FAILED_RETURN(rc, "Failed: vcd_add_client_to_sched");
+		cctxt->sched_clnt_hdl->tkns =
+			cctxt->out_buf_pool.q_len;
+	} else
+		rc = vcd_sched_suspend_resume_clnt(cctxt, true);
+	return rc;
+}
+
+u32 vcd_setup_with_ddl_capabilities(struct vcd_dev_ctxt *dev_ctxt)
+{
+	struct vcd_property_hdr Prop_hdr;
+	struct ddl_property_capability capability;
+	u32 rc = VCD_S_SUCCESS;
+
+	VCD_MSG_LOW("vcd_setup_with_ddl_capabilities:");
+
+	if (!dev_ctxt->ddl_cmd_ch_depth) {
+		Prop_hdr.prop_id = DDL_I_CAPABILITY;
+		Prop_hdr.sz = sizeof(capability);
+
+		/*
+		** Since this is underlying core's property we don't need a
+		** ddl client handle.
+		*/
+		rc = ddl_get_property(NULL, &Prop_hdr, &capability);
+
+		if (!VCD_FAILED(rc)) {
+			/*
+			** Allocate the transaction table.
+			*/
+			dev_ctxt->trans_tbl_size =
+				(VCD_MAX_CLIENT_TRANSACTIONS *
+				capability.max_num_client) +
+				capability.general_command_depth;
+
+			dev_ctxt->trans_tbl = (struct vcd_transc *)
+				kzalloc((sizeof(struct vcd_transc) *
+				dev_ctxt->trans_tbl_size), GFP_KERNEL);
+
+			if (!dev_ctxt->trans_tbl) {
+				VCD_MSG_ERROR("Transaction table alloc failed");
+				rc = VCD_ERR_ALLOC_FAIL;
+			} else	{
+				dev_ctxt->ddl_cmd_concurrency =
+					!capability.exclusive;
+				dev_ctxt->ddl_frame_ch_depth =
+					capability.frame_command_depth;
+				dev_ctxt->ddl_cmd_ch_depth =
+					capability.general_command_depth;
+
+				vcd_reset_device_channels(dev_ctxt);
+
+				dev_ctxt->hw_time_out =
+					capability.ddl_time_out_in_ms;
+
+			}
+		}
+	}
+	return rc;
+}
+
+struct vcd_transc *vcd_get_free_trans_tbl_entry
+	(struct vcd_dev_ctxt *dev_ctxt) {
+	u32 i;
+
+	if (!dev_ctxt->trans_tbl)
+		return NULL;
+
+	i = 0;
+	while (i < dev_ctxt->trans_tbl_size &&
+		   dev_ctxt->trans_tbl[i].in_use)
+		i++;
+
+	if (i == dev_ctxt->trans_tbl_size) {
+		return NULL;
+	} else {
+		memset(&dev_ctxt->trans_tbl[i], 0,
+			   sizeof(struct vcd_transc));
+
+		dev_ctxt->trans_tbl[i].in_use = true;
+
+		return &dev_ctxt->trans_tbl[i];
+	}
+}
+
+void vcd_release_trans_tbl_entry(struct vcd_transc *trans_entry)
+{
+	if (trans_entry)
+		trans_entry->in_use = false;
+}
+
+u32 vcd_handle_input_done(
+	struct vcd_clnt_ctxt *cctxt,
+	 void *payload, u32 event, u32 status)
+{
+	struct vcd_transc *transc;
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *) payload;
+	u32 rc;
+
+	if (!cctxt->status.frame_submitted &&
+		!cctxt->status.frame_delayed) {
+		VCD_MSG_ERROR("Input done was not expected");
+		return VCD_ERR_BAD_STATE;
+	}
+
+	rc = vcd_validate_io_done_pyld(cctxt, payload, status);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "Bad input done payload");
+
+	transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag;
+
+	if ((transc->ip_buf_entry->frame.virtual !=
+		 frame->vcd_frm.virtual)
+		|| !transc->ip_buf_entry->in_use) {
+		VCD_MSG_ERROR("Bad frm transaction state");
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	frame->vcd_frm.ip_frm_tag = transc->ip_frm_tag;
+	transc->frame = frame->vcd_frm.frame;
+
+	cctxt->callback(event,
+			status,
+			&frame->vcd_frm,
+			sizeof(struct vcd_frame_data),
+			cctxt, cctxt->client_data);
+
+	transc->ip_buf_entry->in_use = false;
+	VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->in_buf_pool.in_use);
+	transc->ip_buf_entry = NULL;
+	transc->input_done = true;
+
+	if (transc->input_done && transc->frame_done)
+		vcd_release_trans_tbl_entry(transc);
+
+	if (VCD_FAILED(status)) {
+		VCD_MSG_ERROR("INPUT_DONE returned err = 0x%x", status);
+		vcd_handle_input_done_failed(cctxt, transc);
+	} else
+		cctxt->status.mask |= VCD_FIRST_IP_DONE;
+
+	if (cctxt->status.frame_submitted > 0)
+		cctxt->status.frame_submitted--;
+	else
+		cctxt->status.frame_delayed--;
+
+	if (!VCD_FAILED(status) &&
+		cctxt->decoding) {
+		if (frame->vcd_frm.flags & VCD_FRAME_FLAG_CODECCONFIG) {
+			VCD_MSG_HIGH(
+				"INPUT_DONE with VCD_FRAME_FLAG_CODECCONFIG");
+			vcd_handle_input_done_with_codec_config(cctxt,
+				transc, frame);
+			frame->vcd_frm.flags &= ~VCD_FRAME_FLAG_CODECCONFIG;
+		}
+		if (frame->vcd_frm.interlaced)
+			vcd_handle_input_done_for_interlacing(cctxt);
+		if (frame->frm_trans_end)
+			vcd_handle_input_done_with_trans_end(cctxt);
+	}
+
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_handle_input_done_in_eos(
+	struct vcd_clnt_ctxt *cctxt, void *payload, u32 status)
+{
+	struct vcd_transc *transc;
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *) payload;
+	u32 rc = VCD_ERR_FAIL, codec_config = false;
+	u32 core_type = res_trk_get_core_type();
+	rc = vcd_validate_io_done_pyld(cctxt, payload, status);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_validate_io_done_pyld");
+	transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag;
+	codec_config = frame->vcd_frm.flags & VCD_FRAME_FLAG_CODECCONFIG;
+	rc = vcd_handle_input_done(cctxt,
+		payload, VCD_EVT_RESP_INPUT_DONE, status);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_handle_input_done");
+	if (frame->vcd_frm.flags & VCD_FRAME_FLAG_EOS) {
+		VCD_MSG_HIGH("Got input done for EOS initiator");
+		transc->input_done = false;
+		transc->in_use = true;
+		if (codec_config ||
+			((status == VCD_ERR_BITSTREAM_ERR) &&
+			 !(cctxt->status.mask & VCD_FIRST_IP_DONE) &&
+			 (core_type == VCD_CORE_720P)))
+			vcd_handle_eos_done(cctxt, transc, VCD_S_SUCCESS);
+	}
+	return rc;
+}
+
+u32 vcd_validate_io_done_pyld(
+	struct vcd_clnt_ctxt *cctxt, void *payload, u32 status)
+{
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *) payload;
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	struct vcd_transc *transc = NULL;
+	u32 rc = VCD_S_SUCCESS, i = 0;
+
+	if (!frame) {
+		VCD_MSG_ERROR("Bad payload from DDL");
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag;
+	if (dev_ctxt->trans_tbl) {
+		while (i < dev_ctxt->trans_tbl_size &&
+			transc != &dev_ctxt->trans_tbl[i])
+			i++;
+		if (i == dev_ctxt->trans_tbl_size ||
+			!dev_ctxt->trans_tbl[i].in_use)
+			rc = VCD_ERR_CLIENT_FATAL;
+	} else
+		rc = VCD_ERR_CLIENT_FATAL;
+
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_FATAL(
+			"vcd_validate_io_done_pyld: invalid transaction");
+	} else if (!frame->vcd_frm.virtual &&
+		status != VCD_ERR_INTRLCD_FIELD_DROP)
+		rc = VCD_ERR_BAD_POINTER;
+
+	return rc;
+}
+
+void vcd_handle_input_done_failed(
+	struct vcd_clnt_ctxt *cctxt, struct vcd_transc *transc)
+{
+	if (cctxt->decoding) {
+		cctxt->sched_clnt_hdl->tkns++;
+		vcd_release_trans_tbl_entry(transc);
+	}
+}
+
+void vcd_handle_input_done_with_codec_config(
+	struct vcd_clnt_ctxt *cctxt, struct vcd_transc *transc,
+	struct ddl_frame_data_tag *frm)
+{
+	cctxt->sched_clnt_hdl->tkns++;
+	if (frm->frm_trans_end)
+		vcd_release_trans_tbl_entry(transc);
+}
+
+void vcd_handle_input_done_for_interlacing(struct vcd_clnt_ctxt *cctxt)
+{
+	cctxt->status.int_field_cnt++;
+	if (cctxt->status.int_field_cnt == 1)
+		cctxt->sched_clnt_hdl->tkns++;
+	else if (cctxt->status.int_field_cnt ==
+		VCD_DEC_NUM_INTERLACED_FIELDS)
+		cctxt->status.int_field_cnt = 0;
+}
+
+void vcd_handle_input_done_with_trans_end(
+	struct vcd_clnt_ctxt *cctxt)
+{
+	if (!cctxt->decoding)
+		return;
+	if (cctxt->out_buf_pool.in_use <
+		cctxt->out_buf_pool.buf_req.min_count)
+		return;
+	if (!cctxt->sched_clnt_hdl->tkns)
+		cctxt->sched_clnt_hdl->tkns++;
+}
+
+u32 vcd_handle_output_required(struct vcd_clnt_ctxt
+	*cctxt, void *payload, u32 status)
+{
+	struct vcd_transc *transc;
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *)payload;
+	u32 rc = VCD_S_SUCCESS;
+
+	if (!cctxt->status.frame_submitted &&
+		!cctxt->status.frame_delayed) {
+		VCD_MSG_ERROR("\n Input done was not expected");
+		return VCD_ERR_BAD_STATE;
+	}
+
+	rc = vcd_validate_io_done_pyld(cctxt, payload, status);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "\n Bad input done payload");
+
+	transc = (struct vcd_transc *)frame->
+		vcd_frm.ip_frm_tag;
+
+	if ((transc->ip_buf_entry->frame.virtual !=
+		 frame->vcd_frm.virtual) ||
+		!transc->ip_buf_entry->in_use) {
+		VCD_MSG_ERROR("\n Bad frm transaction state");
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+		return VCD_ERR_BAD_STATE;
+	}
+	rc = vcd_sched_queue_buffer(cctxt->sched_clnt_hdl,
+			transc->ip_buf_entry, false);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_sched_queue_buffer");
+
+	transc->ip_buf_entry = NULL;
+	vcd_release_trans_tbl_entry(transc);
+	frame->frm_trans_end = true;
+
+	if (VCD_FAILED(status))
+		VCD_MSG_ERROR("\n OUTPUT_REQ returned err = 0x%x",
+			status);
+
+	if (cctxt->status.frame_submitted > 0)
+		cctxt->status.frame_submitted--;
+	else
+		cctxt->status.frame_delayed--;
+
+
+	if (!VCD_FAILED(status) &&
+		cctxt->decoding &&
+		frame->vcd_frm.interlaced) {
+		if (cctxt->status.int_field_cnt > 0) {
+			VCD_MSG_ERROR("\n Not expected: OUTPUT_REQ"
+				"for 2nd interlace field");
+			rc = VCD_ERR_FAIL;
+		}
+	}
+
+	return rc;
+}
+
+u32 vcd_handle_output_required_in_flushing(
+struct vcd_clnt_ctxt *cctxt, void *payload)
+{
+	u32 rc;
+	struct vcd_transc *transc;
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *)payload;
+
+	rc = vcd_validate_io_done_pyld(cctxt, payload, VCD_S_SUCCESS);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal_input_done(cctxt, frame->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "Bad input done payload");
+
+	transc = (struct vcd_transc *)
+		(((struct ddl_frame_data_tag *)payload)->
+		 vcd_frm.ip_frm_tag);
+
+	((struct ddl_frame_data_tag *)payload)->
+		vcd_frm.interlaced = false;
+
+	rc = vcd_handle_input_done(cctxt, payload,
+			VCD_EVT_RESP_INPUT_FLUSHED, VCD_S_SUCCESS);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_handle_input_done");
+
+	vcd_release_trans_tbl_entry(transc);
+	((struct ddl_frame_data_tag *)payload)->frm_trans_end = true;
+
+	return rc;
+}
+
+u32 vcd_handle_frame_done(
+	struct vcd_clnt_ctxt *cctxt,
+	 void *payload, u32 event, u32 status)
+{
+	struct vcd_buffer_entry *op_buf_entry = NULL;
+	struct ddl_frame_data_tag *op_frm =
+		(struct ddl_frame_data_tag *) payload;
+	struct vcd_transc *transc;
+	u32 rc;
+
+	rc = vcd_validate_io_done_pyld(cctxt, payload, status);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal(cctxt, op_frm->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "Bad payload recvd");
+
+	transc = (struct vcd_transc *)op_frm->vcd_frm.ip_frm_tag;
+
+	if (op_frm->vcd_frm.virtual) {
+
+		if (!transc->op_buf_entry) {
+			op_buf_entry =
+				vcd_find_buffer_pool_entry(
+					&cctxt->out_buf_pool,
+					op_frm->vcd_frm.
+					virtual);
+		} else {
+			op_buf_entry = transc->op_buf_entry;
+		}
+
+		if (!op_buf_entry) {
+			VCD_MSG_ERROR("Invalid output buffer returned"
+				"from DDL");
+			vcd_handle_clnt_fatal(cctxt, op_frm->frm_trans_end);
+			rc = VCD_ERR_BAD_POINTER;
+		} else if (!op_buf_entry->in_use) {
+			VCD_MSG_ERROR("Bad output buffer 0x%p recvd from DDL",
+					  op_buf_entry->frame.virtual);
+			vcd_handle_clnt_fatal(cctxt, op_frm->frm_trans_end);
+			rc = VCD_ERR_BAD_POINTER;
+		} else {
+			op_buf_entry->in_use = false;
+			VCD_BUFFERPOOL_INUSE_DECREMENT(
+				cctxt->out_buf_pool.in_use);
+			VCD_MSG_LOW("outBufPool.InUse = %d",
+						cctxt->out_buf_pool.in_use);
+		}
+	}
+	VCD_FAILED_RETURN(rc, "Bad output buffer pointer");
+	op_frm->vcd_frm.time_stamp = transc->time_stamp;
+	op_frm->vcd_frm.ip_frm_tag = transc->ip_frm_tag;
+	if (cctxt->decoding)
+		op_frm->vcd_frm.frame = transc->frame;
+	else
+		transc->frame = op_frm->vcd_frm.frame;
+	transc->frame_done = true;
+
+	if (transc->input_done && transc->frame_done)
+		vcd_release_trans_tbl_entry(transc);
+
+	if (status == VCD_ERR_INTRLCD_FIELD_DROP ||
+		(op_frm->vcd_frm.intrlcd_ip_frm_tag !=
+		VCD_FRAMETAG_INVALID &&
+		op_frm->vcd_frm.intrlcd_ip_frm_tag)) {
+		vcd_handle_frame_done_for_interlacing(cctxt, transc,
+							  op_frm, status);
+	}
+
+	if (status != VCD_ERR_INTRLCD_FIELD_DROP) {
+		cctxt->callback(event,
+			status,
+			&op_frm->vcd_frm,
+			sizeof(struct vcd_frame_data),
+			cctxt, cctxt->client_data);
+	}
+	return rc;
+}
+
+u32 vcd_handle_frame_done_in_eos(
+	struct vcd_clnt_ctxt *cctxt, void *payload, u32 status)
+{
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *) payload;
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_LOW("vcd_handle_frame_done_in_eos:");
+	rc = vcd_validate_io_done_pyld(cctxt, payload, status);
+	if (rc == VCD_ERR_CLIENT_FATAL)
+		vcd_handle_clnt_fatal(cctxt, frame->frm_trans_end);
+	VCD_FAILED_RETURN(rc, "Bad payload received");
+
+	if (cctxt->status.mask & VCD_EOS_PREV_VALID) {
+		rc = vcd_handle_frame_done(cctxt,
+			(void *)&cctxt->status.
+			eos_prev_op_frm,
+			VCD_EVT_RESP_OUTPUT_DONE,
+			cctxt->status.eos_prev_op_frm_status);
+		VCD_FAILED_RETURN(rc, "Failed: vcd_handle_frame_done");
+	}
+
+	cctxt->status.eos_prev_op_frm = *frame;
+	cctxt->status.eos_prev_op_frm_status = status;
+	cctxt->status.mask |= VCD_EOS_PREV_VALID;
+	return rc;
+}
+
+void vcd_handle_frame_done_for_interlacing(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_transc *transc_ip1,
+	 struct ddl_frame_data_tag *op_frm, u32 status)
+{
+	struct vcd_transc *transc_ip2 =
+		(struct vcd_transc *)op_frm->\
+		vcd_frm.intrlcd_ip_frm_tag;
+
+	if (status == VCD_ERR_INTRLCD_FIELD_DROP) {
+		cctxt->status.int_field_cnt = 0;
+		return;
+	}
+
+	op_frm->vcd_frm.intrlcd_ip_frm_tag = transc_ip2->ip_frm_tag;
+
+	transc_ip2->frame_done = true;
+
+	if (transc_ip2->input_done && transc_ip2->frame_done)
+		vcd_release_trans_tbl_entry(transc_ip2);
+
+	if (!transc_ip1->frame || !transc_ip2->frame) {
+		VCD_MSG_ERROR("DDL didn't provided frame type");
+		return;
+	}
+}
+
+u32 vcd_handle_first_fill_output_buffer(
+	struct vcd_clnt_ctxt *cctxt,
+	struct vcd_frame_data *buffer,
+	u32 *handled)
+{
+	u32 rc = VCD_S_SUCCESS;
+	rc = vcd_check_if_buffer_req_met(cctxt, VCD_BUFFER_OUTPUT);
+	VCD_FAILED_RETURN(rc, "Output buffer requirements not met");
+	if (cctxt->out_buf_pool.q_len > 0) {
+		VCD_MSG_ERROR("Old output buffers were not flushed out");
+		return VCD_ERR_BAD_STATE;
+	}
+	cctxt->status.mask |= VCD_FIRST_OP_RCVD;
+	if (cctxt->sched_clnt_hdl)
+		rc = vcd_sched_suspend_resume_clnt(cctxt, true);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_sched_suspend_resume_clnt");
+	if (cctxt->decoding)
+		rc = vcd_handle_first_fill_output_buffer_for_dec(
+			cctxt, buffer, handled);
+	else
+		rc = vcd_handle_first_fill_output_buffer_for_enc(
+			cctxt, buffer, handled);
+	return rc;
+}
+
+u32 vcd_handle_first_fill_output_buffer_for_enc(
+	struct vcd_clnt_ctxt *cctxt,
+	struct vcd_frame_data *frm_entry,
+	u32 *handled)
+{
+	u32 rc, seqhdr_present = 0;
+	struct vcd_property_hdr prop_hdr;
+	struct vcd_sequence_hdr seq_hdr;
+	struct vcd_property_codec codec;
+	*handled = true;
+	prop_hdr.prop_id = DDL_I_SEQHDR_PRESENT;
+	prop_hdr.sz = sizeof(seqhdr_present);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &seqhdr_present);
+	VCD_FAILED_RETURN(rc, "Failed: DDL_I_SEQHDR_PRESENT");
+	if (!seqhdr_present) {
+		*handled = false;
+		return VCD_S_SUCCESS;
+	}
+
+	prop_hdr.prop_id = VCD_I_CODEC;
+	prop_hdr.sz = sizeof(struct vcd_property_codec);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &codec);
+	if (!VCD_FAILED(rc)) {
+		if (codec.codec != VCD_CODEC_H263) {
+			prop_hdr.prop_id = VCD_I_SEQ_HEADER;
+			prop_hdr.sz = sizeof(struct vcd_sequence_hdr);
+			seq_hdr.sequence_header = frm_entry->virtual;
+			seq_hdr.sequence_header_len =
+				frm_entry->alloc_len;
+			rc = ddl_get_property(cctxt->ddl_handle,
+				&prop_hdr, &seq_hdr);
+			if (!VCD_FAILED(rc)) {
+				frm_entry->data_len =
+					seq_hdr.sequence_header_len;
+				frm_entry->time_stamp = 0;
+				frm_entry->flags |=
+					VCD_FRAME_FLAG_CODECCONFIG;
+				cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
+					VCD_S_SUCCESS, frm_entry,
+					sizeof(struct vcd_frame_data),
+					cctxt,
+					cctxt->client_data);
+			} else
+			VCD_MSG_ERROR(
+				"rc = 0x%x. Failed:\
+				ddl_get_property: VCD_I_SEQ_HEADER",
+				rc);
+		} else
+			VCD_MSG_LOW("Codec Type is H.263\n");
+	} else
+		VCD_MSG_ERROR(
+			"rc = 0x%x. Failed: ddl_get_property:VCD_I_CODEC",
+			rc);
+	return rc;
+}
+
+u32 vcd_handle_first_fill_output_buffer_for_dec(
+	struct vcd_clnt_ctxt *cctxt,
+	struct vcd_frame_data *frm_entry,
+	u32 *handled)
+{
+	u32 rc;
+	struct vcd_property_hdr prop_hdr;
+	struct vcd_buffer_pool *out_buf_pool;
+	struct ddl_property_dec_pic_buffers dpb;
+	struct ddl_frame_data_tag *dpb_list;
+	u8 i;
+
+	(void)frm_entry;
+	*handled = true;
+	prop_hdr.prop_id = DDL_I_DPB;
+	prop_hdr.sz = sizeof(dpb);
+	out_buf_pool = &cctxt->out_buf_pool;
+
+	dpb_list = (struct ddl_frame_data_tag *)
+		kmalloc((sizeof(struct ddl_frame_data_tag) *
+		out_buf_pool->count), GFP_KERNEL);
+
+	if (!dpb_list) {
+		VCD_MSG_ERROR("Memory allocation failure");
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	for (i = 1; i <= out_buf_pool->count; i++)
+		dpb_list[i - 1].vcd_frm = out_buf_pool->entries[i].frame;
+
+	dpb.dec_pic_buffers = dpb_list;
+	dpb.no_of_dec_pic_buf = out_buf_pool->count;
+	rc = ddl_set_property(cctxt->ddl_handle, &prop_hdr, &dpb);
+
+	kfree(dpb_list);
+	*handled = false;
+
+	return VCD_S_SUCCESS;
+}
+
+void vcd_handle_eos_trans_end(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (cctxt->status.mask & VCD_EOS_PREV_VALID) {
+		rc = vcd_handle_frame_done(cctxt,
+			(void *)&cctxt->status.eos_prev_op_frm,
+			VCD_EVT_RESP_OUTPUT_DONE,
+			cctxt->status.eos_prev_op_frm_status);
+		cctxt->status.mask &= ~VCD_EOS_PREV_VALID;
+	}
+	if (VCD_FAILED(rc))
+		return;
+
+	if (cctxt->status.mask & VCD_FLUSH_ALL)
+		vcd_process_pending_flush_in_eos(cctxt);
+
+	if (cctxt->status.mask & VCD_STOP_PENDING)
+		vcd_process_pending_stop_in_eos(cctxt);
+	else {
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_RUN,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	}
+}
+
+void vcd_handle_eos_done(struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_transc *transc, u32 status)
+{
+	struct vcd_frame_data  vcd_frm;
+	u32 rc = VCD_S_SUCCESS, sent_eos_frm = false;
+	VCD_MSG_LOW("vcd_handle_eos_done:");
+
+	if (VCD_FAILED(status))
+		VCD_MSG_ERROR("EOS DONE returned error = 0x%x", status);
+
+	if (cctxt->status.mask & VCD_EOS_PREV_VALID) {
+		cctxt->status.eos_prev_op_frm.vcd_frm.flags |=
+			VCD_FRAME_FLAG_EOS;
+
+		rc = vcd_handle_frame_done(cctxt,
+						(void *)&cctxt->status.
+						eos_prev_op_frm,
+						VCD_EVT_RESP_OUTPUT_DONE,
+						cctxt->status.
+							eos_prev_op_frm_status);
+		cctxt->status.mask &= ~VCD_EOS_PREV_VALID;
+		if (!VCD_FAILED(rc) &&
+			cctxt->status.eos_prev_op_frm_status !=
+				VCD_ERR_INTRLCD_FIELD_DROP)
+			sent_eos_frm = true;
+	}
+	if (!sent_eos_frm) {
+		if (transc->ip_buf_entry) {
+			transc->ip_buf_entry->frame.ip_frm_tag =
+				transc->ip_frm_tag;
+
+			vcd_send_frame_done_in_eos(cctxt,
+				&transc->ip_buf_entry->frame, false);
+		} else {
+			memset(&vcd_frm, 0, sizeof(struct vcd_frame_data));
+			vcd_frm.ip_frm_tag = transc->ip_frm_tag;
+			vcd_frm.time_stamp = transc->time_stamp;
+			vcd_frm.flags = VCD_FRAME_FLAG_EOS;
+			vcd_send_frame_done_in_eos(cctxt, &vcd_frm, true);
+		}
+	}
+	if (VCD_FAILED(rc))
+		return;
+	if (transc->ip_buf_entry) {
+		if (transc->ip_buf_entry->frame.virtual) {
+			transc->ip_buf_entry->frame.ip_frm_tag =
+				transc->ip_frm_tag;
+			cctxt->callback(VCD_EVT_RESP_INPUT_DONE,
+					  VCD_S_SUCCESS,
+					  &transc->ip_buf_entry->frame,
+					  sizeof(struct vcd_frame_data),
+					  cctxt, cctxt->client_data);
+		}
+		transc->ip_buf_entry->in_use = false;
+		VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->in_buf_pool.in_use);
+		transc->ip_buf_entry = NULL;
+		if (cctxt->status.frame_submitted)
+			cctxt->status.frame_submitted--;
+		else
+			cctxt->status.frame_delayed--;
+	}
+
+	vcd_release_trans_tbl_entry(transc);
+	if (cctxt->status.mask & VCD_FLUSH_ALL)
+		vcd_process_pending_flush_in_eos(cctxt);
+
+	if (cctxt->status.mask & VCD_STOP_PENDING) {
+		vcd_process_pending_stop_in_eos(cctxt);
+	} else if (!(cctxt->status.mask & VCD_EOS_WAIT_OP_BUF)) {
+		vcd_do_client_state_transition(cctxt,
+						   VCD_CLIENT_STATE_RUN,
+						   CLIENT_STATE_EVENT_NUMBER
+						   (clnt_cb));
+	}
+}
+
+void vcd_handle_start_done(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status)
+{
+	cctxt->status.cmd_submitted--;
+	vcd_mark_command_channel(cctxt->dev_ctxt, transc);
+
+	if (!VCD_FAILED(status)) {
+		cctxt->callback(VCD_EVT_RESP_START, status, NULL,
+			0, cctxt,	cctxt->client_data);
+
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_RUN,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	} else {
+		VCD_MSG_ERROR("ddl callback returned failure."
+			"status = 0x%x", status);
+		vcd_handle_err_in_starting(cctxt, status);
+	}
+}
+
+void vcd_handle_stop_done(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status)
+{
+
+	VCD_MSG_LOW("vcd_handle_stop_done:");
+	cctxt->status.cmd_submitted--;
+	vcd_mark_command_channel(cctxt->dev_ctxt, transc);
+
+	if (!VCD_FAILED(status)) {
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_OPEN,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	} else {
+		VCD_MSG_FATAL("STOP_DONE returned error = 0x%x", status);
+		status = VCD_ERR_HW_FATAL;
+		vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt);
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_INVALID,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	}
+
+	cctxt->callback(VCD_EVT_RESP_STOP, status, NULL, 0, cctxt,
+					  cctxt->client_data);
+
+	memset(&cctxt->status, 0, sizeof(struct vcd_clnt_status));
+}
+
+void vcd_handle_stop_done_in_starting(struct vcd_clnt_ctxt
+	*cctxt, struct vcd_transc *transc, u32 status)
+{
+	VCD_MSG_LOW("vcd_handle_stop_done_in_starting:");
+	cctxt->status.cmd_submitted--;
+	vcd_mark_command_channel(cctxt->dev_ctxt, transc);
+	if (!VCD_FAILED(status)) {
+		cctxt->callback(VCD_EVT_RESP_START, cctxt->status.
+			last_err, NULL, 0, cctxt, cctxt->client_data);
+		vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_OPEN,
+			   CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	} else {
+		VCD_MSG_FATAL("VCD Cleanup: STOP_DONE returned error "
+			"= 0x%x", status);
+		vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START,
+			VCD_ERR_HW_FATAL);
+	}
+}
+
+void vcd_handle_stop_done_in_invalid(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_transc *transc, u32 status)
+{
+	u32 rc;
+	VCD_MSG_LOW("vcd_handle_stop_done_in_invalid:");
+
+	cctxt->status.cmd_submitted--;
+	vcd_mark_command_channel(cctxt->dev_ctxt, transc);
+
+	if (!VCD_FAILED(status)) {
+		vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CLIENT_CLOSE);
+		if (cctxt->status.frame_submitted) {
+			vcd_release_multiple_frame_channels(cctxt->dev_ctxt,
+			cctxt->status.frame_submitted);
+
+			cctxt->status.frame_submitted = 0;
+			cctxt->status.frame_delayed = 0;
+		}
+		if (cctxt->status.cmd_submitted) {
+			vcd_release_multiple_command_channels(
+				cctxt->dev_ctxt,
+				cctxt->status.cmd_submitted);
+			cctxt->status.cmd_submitted = 0;
+		}
+	} else {
+		VCD_MSG_FATAL("VCD Cleanup: STOP_DONE returned error "
+			"= 0x%x", status);
+		vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt);
+		cctxt->status.mask &= ~VCD_CLEANING_UP;
+	}
+	vcd_flush_buffers_in_err_fatal(cctxt);
+	VCD_MSG_HIGH("VCD cleanup: All buffers are returned");
+	if (cctxt->status.mask & VCD_STOP_PENDING) {
+		cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0,
+			cctxt, cctxt->client_data);
+		cctxt->status.mask &= ~VCD_STOP_PENDING;
+	}
+	rc = vcd_power_event(cctxt->dev_ctxt, cctxt,
+						  VCD_EVT_PWR_CLNT_ERRFATAL);
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_ERRFATAL failed");
+	if (!(cctxt->status.mask & VCD_CLEANING_UP) &&
+		cctxt->status.mask & VCD_CLOSE_PENDING) {
+		vcd_destroy_client_context(cctxt);
+		vcd_handle_for_last_clnt_close(cctxt->dev_ctxt, false);
+	}
+}
+
+u32 vcd_handle_input_frame(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *input_frame)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	struct vcd_buffer_entry *buf_entry;
+	struct vcd_frame_data *frm_entry;
+	u32 rc = VCD_S_SUCCESS;
+	u32 eos_handled = false;
+
+	VCD_MSG_LOW("vcd_handle_input_frame:");
+
+	VCD_MSG_LOW("input buffer: addr=(0x%p), sz=(%d), len=(%d)",
+			input_frame->virtual, input_frame->alloc_len,
+			input_frame->data_len);
+
+	if (!input_frame->virtual &&
+		!(input_frame->flags & VCD_FRAME_FLAG_EOS)) {
+		VCD_MSG_ERROR("Bad frame ptr/len/EOS combination");
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+
+	if (!input_frame->data_len &&
+		!(input_frame->flags & VCD_FRAME_FLAG_EOS)) {
+		VCD_MSG_MED("data_len = 0, returning INPUT DONE");
+		cctxt->callback(VCD_EVT_RESP_INPUT_DONE,
+				  VCD_ERR_INPUT_NOT_PROCESSED,
+				  input_frame,
+				  sizeof(struct vcd_frame_data),
+				  cctxt, cctxt->client_data);
+		return VCD_S_SUCCESS;
+	}
+
+	if (!(cctxt->status.mask & VCD_FIRST_IP_RCVD)) {
+		if (cctxt->decoding)
+			rc = vcd_handle_first_decode_frame(cctxt);
+
+		if (!VCD_FAILED(rc)) {
+			cctxt->status.first_ts = input_frame->time_stamp;
+			cctxt->status.prev_ts = cctxt->status.first_ts;
+
+			cctxt->status.mask |= VCD_FIRST_IP_RCVD;
+
+			(void)vcd_power_event(cctxt->dev_ctxt,
+						  cctxt,
+						  VCD_EVT_PWR_CLNT_FIRST_FRAME);
+		}
+	}
+	VCD_FAILED_RETURN(rc, "Failed: First frame handling");
+
+	buf_entry = vcd_find_buffer_pool_entry(&cctxt->in_buf_pool,
+						 input_frame->virtual);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Bad buffer addr: %p", input_frame->virtual);
+		return VCD_ERR_FAIL;
+	}
+
+	if (buf_entry->in_use) {
+		VCD_MSG_ERROR("An inuse input frame is being"
+			"re-queued to scheduler");
+		return VCD_ERR_FAIL;
+	}
+
+	if (input_frame->alloc_len > buf_entry->sz) {
+		VCD_MSG_ERROR("Bad buffer Alloc_len %d, Actual sz=%d",
+			input_frame->alloc_len, buf_entry->sz);
+
+		return VCD_ERR_ILLEGAL_PARM;
+	}
+
+	frm_entry = &buf_entry->frame;
+
+	*frm_entry = *input_frame;
+	frm_entry->physical = buf_entry->physical;
+
+	if (input_frame->flags & VCD_FRAME_FLAG_EOS) {
+		rc = vcd_handle_recvd_eos(cctxt, input_frame,
+					  &eos_handled);
+	}
+
+	if (VCD_FAILED(rc) || eos_handled) {
+		VCD_MSG_HIGH("rc = 0x%x, eos_handled = %d", rc,
+				 eos_handled);
+
+		return rc;
+	}
+	rc = vcd_sched_queue_buffer(
+		cctxt->sched_clnt_hdl, buf_entry, true);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_sched_queue_buffer");
+
+	buf_entry->in_use = true;
+	cctxt->in_buf_pool.in_use++;
+	vcd_try_submit_frame(dev_ctxt);
+	return rc;
+}
+
+void vcd_release_all_clnt_frm_transc(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 i, cntr = 0;
+	VCD_MSG_LOW("vcd_release_all_clnt_frm_transc:");
+	for (i = 0; i < dev_ctxt->trans_tbl_size; i++) {
+		if (dev_ctxt->trans_tbl[i].in_use &&
+			cctxt == dev_ctxt->trans_tbl[i].cctxt) {
+			if (dev_ctxt->trans_tbl[i].
+				type == VCD_CMD_CODE_FRAME ||
+				dev_ctxt->trans_tbl[i].
+				type == VCD_CMD_NONE) {
+				vcd_release_trans_tbl_entry(&dev_ctxt->
+								trans_tbl[i]);
+			} else {
+				VCD_MSG_LOW("vcd_transaction in use type(%u)",
+					dev_ctxt->trans_tbl[i].type);
+				cntr++;
+			}
+		}
+	}
+	if (cntr)
+		VCD_MSG_ERROR("vcd_transactions still in use: (%d)", cntr);
+}
+
+void vcd_release_all_clnt_transc(struct vcd_clnt_ctxt *cctxt)
+{
+	struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
+	u32 i;
+
+	VCD_MSG_LOW("vcd_release_all_clnt_transc:");
+
+	for (i = 0; i < dev_ctxt->trans_tbl_size; i++) {
+		if (dev_ctxt->trans_tbl[i].in_use &&
+			cctxt == dev_ctxt->trans_tbl[i].cctxt) {
+				vcd_release_trans_tbl_entry(
+					&dev_ctxt->trans_tbl[i]);
+		}
+	}
+}
+
+void vcd_send_flush_done(struct vcd_clnt_ctxt *cctxt, u32 status)
+{
+	VCD_MSG_LOW("vcd_send_flush_done:");
+
+	if (cctxt->status.mask & VCD_FLUSH_INPUT) {
+		cctxt->callback(VCD_EVT_RESP_FLUSH_INPUT_DONE,
+			status, NULL, 0, cctxt, cctxt->client_data);
+		cctxt->status.mask &= ~VCD_FLUSH_INPUT;
+	}
+
+	if (cctxt->status.mask & VCD_FLUSH_OUTPUT) {
+		cctxt->callback(VCD_EVT_RESP_FLUSH_OUTPUT_DONE,
+			status, NULL, 0, cctxt, cctxt->client_data);
+		cctxt->status.mask &= ~VCD_FLUSH_OUTPUT;
+	}
+}
+
+u32 vcd_store_seq_hdr(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_sequence_hdr *seq_hdr)
+{
+	u32 rc;
+	struct vcd_property_hdr prop_hdr;
+	u32 align;
+	u8 *virtual_aligned;
+	u32 addr;
+	int ret = 0;
+
+	if (!seq_hdr->sequence_header_len
+		|| !seq_hdr->sequence_header) {
+		VCD_MSG_ERROR("Bad seq hdr");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	if (cctxt->seq_hdr.sequence_header) {
+		VCD_MSG_HIGH("Old seq hdr detected");
+
+		vcd_pmem_free(cctxt->seq_hdr.sequence_header,
+				  cctxt->seq_hdr_phy_addr);
+		cctxt->seq_hdr.sequence_header = NULL;
+	}
+
+	cctxt->seq_hdr.sequence_header_len =
+		seq_hdr->sequence_header_len;
+
+	prop_hdr.prop_id = DDL_I_SEQHDR_ALIGN_BYTES;
+	prop_hdr.sz = sizeof(u32);
+
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &align);
+
+	VCD_FAILED_RETURN(rc,
+			  "Failed: ddl_get_property DDL_I_SEQHDR_ALIGN_BYTES");
+
+	VCD_MSG_MED("Seq hdr alignment bytes = %d", align);
+
+	ret = vcd_pmem_alloc(cctxt->seq_hdr.sequence_header_len + align +
+				 VCD_SEQ_HDR_PADDING_BYTES,
+				 &(cctxt->seq_hdr.sequence_header),
+				 &(cctxt->seq_hdr_phy_addr));
+
+	if (ret < 0) {
+		VCD_MSG_ERROR("Seq hdr allocation failed");
+
+		return VCD_ERR_ALLOC_FAIL;
+	}
+
+	if (!cctxt->seq_hdr_phy_addr) {
+		VCD_MSG_ERROR("Couldn't get physical address");
+
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	if (align > 0) {
+		addr = (u32) cctxt->seq_hdr_phy_addr;
+		addr += align;
+		addr -= (addr % align);
+		virtual_aligned = cctxt->seq_hdr.sequence_header;
+		virtual_aligned += (u32) (addr -
+			(u32) cctxt->seq_hdr_phy_addr);
+		cctxt->seq_hdr_phy_addr = (u8 *) addr;
+	} else {
+		virtual_aligned = cctxt->seq_hdr.sequence_header;
+	}
+
+	memcpy(virtual_aligned, seq_hdr->sequence_header,
+		seq_hdr->sequence_header_len);
+
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_set_frame_rate(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_property_frame_rate *fps)
+{
+	u32 rc;
+	cctxt->frm_rate = *fps;
+	rc = vcd_update_clnt_perf_lvl(cctxt, &cctxt->frm_rate,
+					  cctxt->frm_p_units);
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_update_clnt_perf_lvl",
+				  rc);
+	}
+	rc = vcd_sched_update_config(cctxt);
+	return rc;
+}
+
+u32 vcd_set_frame_size(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_property_frame_size *frm_size)
+{
+	struct vcd_property_hdr prop_hdr;
+	u32 rc;
+	u32 frm_p_units;
+	(void)frm_size;
+
+	prop_hdr.prop_id = DDL_I_FRAME_PROC_UNITS;
+	prop_hdr.sz = sizeof(frm_p_units);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &frm_p_units);
+	VCD_FAILED_RETURN(rc, "Failed: Get DDL_I_FRAME_PROC_UNITS");
+
+	cctxt->frm_p_units = frm_p_units;
+
+	rc = vcd_update_clnt_perf_lvl(cctxt, &cctxt->frm_rate,
+					  frm_p_units);
+	if (VCD_FAILED(rc)) {
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_update_clnt_perf_lvl",
+				  rc);
+	}
+	return rc;
+}
+
+void vcd_process_pending_flush_in_eos(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	VCD_MSG_HIGH("Buffer flush is pending");
+	rc = vcd_flush_buffers(cctxt, cctxt->status.mask & VCD_FLUSH_ALL);
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_flush_buffers", rc);
+	cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF;
+	vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
+}
+
+void vcd_process_pending_stop_in_eos(struct vcd_clnt_ctxt *cctxt)
+{
+	u32 rc = VCD_S_SUCCESS;
+	rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
+	if (VCD_FAILED(rc))
+		VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_flush_buffers", rc);
+	VCD_MSG_HIGH("All buffers are returned. Enqueuing stop cmd");
+	vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP);
+	cctxt->status.mask &= ~VCD_STOP_PENDING;
+	vcd_do_client_state_transition(cctxt,
+					   VCD_CLIENT_STATE_STOPPING,
+					   CLIENT_STATE_EVENT_NUMBER(stop));
+}
+
+u32 vcd_calculate_frame_delta(
+	struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *frame)
+{
+	u32 frm_delta;
+	u64 temp, max = ~((u64)0);
+
+	if (frame->time_stamp >= cctxt->status.prev_ts)
+		temp = frame->time_stamp - cctxt->status.prev_ts;
+	else
+		temp = (max - cctxt->status.prev_ts) +
+			frame->time_stamp;
+
+	VCD_MSG_LOW("Curr_ts=%lld  Prev_ts=%lld Diff=%llu",
+			frame->time_stamp, cctxt->status.prev_ts, temp);
+
+	temp *= cctxt->time_resoln;
+	(void)do_div(temp, VCD_TIMESTAMP_RESOLUTION);
+	frm_delta = temp;
+	cctxt->status.time_elapsed += frm_delta;
+
+	temp = (cctxt->status.time_elapsed * VCD_TIMESTAMP_RESOLUTION);
+	(void)do_div(temp, cctxt->time_resoln);
+	cctxt->status.prev_ts = cctxt->status.first_ts + temp;
+
+	VCD_MSG_LOW("Time_elapsed=%llu, Drift=%llu, new Prev_ts=%lld",
+			cctxt->status.time_elapsed, temp,
+			cctxt->status.prev_ts);
+
+	return frm_delta;
+}
+
+struct vcd_buffer_entry *vcd_check_fill_output_buffer
+	(struct vcd_clnt_ctxt *cctxt,
+	 struct vcd_frame_data *buffer) {
+	struct vcd_buffer_pool *buf_pool = &cctxt->out_buf_pool;
+	struct vcd_buffer_entry *buf_entry;
+
+	if (!buf_pool->entries) {
+		VCD_MSG_ERROR("Buffers not set or allocated yet");
+
+		return NULL;
+	}
+
+	if (!buffer->virtual) {
+		VCD_MSG_ERROR("NULL buffer address provided");
+		return NULL;
+	}
+
+	buf_entry =
+		vcd_find_buffer_pool_entry(buf_pool, buffer->virtual);
+	if (!buf_entry) {
+		VCD_MSG_ERROR("Unrecognized buffer address provided = %p",
+				  buffer->virtual);
+		return NULL;
+	}
+
+	if (buf_entry->in_use) {
+		VCD_MSG_ERROR
+			("An inuse output frame is being provided for reuse");
+		return NULL;
+	}
+
+	if ((buffer->alloc_len < buf_pool->buf_req.sz ||
+		 buffer->alloc_len > buf_entry->sz) &&
+		 !(cctxt->status.mask & VCD_IN_RECONFIG)) {
+		VCD_MSG_ERROR
+			("Bad buffer Alloc_len = %d, Actual sz = %d, "
+			 " Min sz = %u",
+			 buffer->alloc_len, buf_entry->sz,
+			 buf_pool->buf_req.sz);
+		return NULL;
+	}
+
+	return buf_entry;
+}
+
+void vcd_handle_ind_hw_err_fatal(struct vcd_clnt_ctxt *cctxt,
+	u32 event, u32 status)
+{
+	if (cctxt->status.frame_submitted) {
+		cctxt->status.frame_submitted--;
+		vcd_mark_frame_channel(cctxt->dev_ctxt);
+	}
+	vcd_handle_err_fatal(cctxt, event, status);
+}
+
+void vcd_handle_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event,
+						  u32 status)
+{
+	VCD_MSG_LOW("vcd_handle_err_fatal: event=%x, err=%x", event, status);
+	if (!VCD_FAILED_FATAL(status))
+		return;
+
+	if (VCD_FAILED_DEVICE_FATAL(status)) {
+		vcd_clnt_handle_device_err_fatal(cctxt, event);
+		vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt);
+	} else if (VCD_FAILED_CLIENT_FATAL(status)) {
+		cctxt->status.last_evt = event;
+		cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0, cctxt,
+						   cctxt->client_data);
+		cctxt->status.mask |= VCD_CLEANING_UP;
+		vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP);
+		vcd_do_client_state_transition(cctxt,
+			VCD_CLIENT_STATE_INVALID,
+			CLIENT_STATE_EVENT_NUMBER(clnt_cb));
+	}
+}
+
+void vcd_handle_err_in_starting(struct vcd_clnt_ctxt *cctxt,
+								u32 status)
+{
+	VCD_MSG_LOW("\n vcd_handle_err_in_starting:");
+	if (VCD_FAILED_FATAL(status)) {
+		vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START, status);
+	} else {
+		cctxt->status.last_err = status;
+		VCD_MSG_HIGH("\n VCD cleanup: Enqueuing stop cmd");
+		vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP);
+	}
+}
+
+void vcd_handle_trans_pending(struct vcd_clnt_ctxt *cctxt)
+{
+	if (!cctxt->status.frame_submitted) {
+		VCD_MSG_ERROR("Transaction pending response was not expected");
+		return;
+	}
+	cctxt->status.frame_submitted--;
+	cctxt->status.frame_delayed++;
+	vcd_mark_frame_channel(cctxt->dev_ctxt);
+}
+void vcd_handle_submit_frame_failed(struct vcd_dev_ctxt
+	*dev_ctxt, struct vcd_transc *transc)
+{
+	struct vcd_clnt_ctxt *cctxt = transc->cctxt;
+	u32 rc;
+
+	vcd_mark_frame_channel(dev_ctxt);
+	vcd_release_trans_tbl_entry(transc);
+
+	vcd_handle_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL,
+		VCD_ERR_CLIENT_FATAL);
+
+	if (vcd_get_command_channel(dev_ctxt, &transc)) {
+		transc->type = VCD_CMD_CODEC_STOP;
+		transc->cctxt = cctxt;
+		rc = vcd_submit_cmd_sess_end(transc);
+		if (VCD_FAILED(rc))	{
+			vcd_release_command_channel(dev_ctxt, transc);
+			VCD_MSG_ERROR("rc = 0x%x. Failed: VCD_SubmitCmdSessEnd",
+				rc);
+		}
+	}
+}
+
+u32 vcd_check_if_buffer_req_met(struct vcd_clnt_ctxt *cctxt,
+	enum vcd_buffer_type buffer)
+{
+	struct vcd_property_hdr prop_hdr;
+	struct vcd_buffer_pool *buf_pool;
+	struct vcd_buffer_requirement buf_req;
+	u32 rc;
+	u8 i;
+
+	if (buffer == VCD_BUFFER_INPUT) {
+		prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
+		buf_pool = &cctxt->in_buf_pool;
+	} else {
+		prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
+		buf_pool = &cctxt->out_buf_pool;
+	}
+
+	prop_hdr.sz = sizeof(buf_req);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &buf_req);
+	VCD_FAILED_RETURN(rc, "Failed: ddl_GetProperty");
+
+	buf_pool->buf_req = buf_req;
+	if (buf_pool->count < buf_req.actual_count) {
+		VCD_MSG_ERROR("Buf requirement count not met");
+		return VCD_ERR_FAIL;
+	}
+
+	if (buf_pool->count > buf_req.actual_count)
+		buf_pool->count = buf_req.actual_count;
+
+	if (!buf_pool->entries ||
+	buf_pool->validated != buf_pool->count) {
+		VCD_MSG_ERROR("Buffer pool is not completely setup yet");
+		return VCD_ERR_BAD_STATE;
+	}
+	for (i = 1; (rc == VCD_S_SUCCESS && i <= buf_pool->count); i++) {
+		if (buf_pool->entries[i].sz <
+			buf_pool->buf_req.sz) {
+			VCD_MSG_ERROR(
+				"BufReq sz not met:\
+					addr=(0x%p) sz=%d ReqSize=%d",
+				buf_pool->entries[i].virtual,
+				buf_pool->entries[i].sz,
+				buf_pool->buf_req.sz);
+			rc = VCD_ERR_FAIL;
+		}
+	}
+	return rc;
+}
+
+u32 vcd_handle_ind_output_reconfig(
+	struct vcd_clnt_ctxt *cctxt, void* payload, u32 status)
+{
+	struct ddl_frame_data_tag *frame =
+		(struct ddl_frame_data_tag *)payload;
+	struct vcd_property_hdr prop_hdr;
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_buffer_pool *out_buf_pool;
+	struct vcd_buffer_requirement buf_req;
+
+	if (frame)
+		rc = vcd_handle_output_required(cctxt, payload, status);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_handle_output_required in reconfig");
+	vcd_mark_frame_channel(cctxt->dev_ctxt);
+
+	rc = vcd_sched_suspend_resume_clnt(cctxt, false);
+	VCD_FAILED_RETURN(rc, "Failed: vcd_sched_suspend_resume_clnt");
+	out_buf_pool = &cctxt->out_buf_pool;
+	prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
+	prop_hdr.sz = sizeof(buf_req);
+	rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &buf_req);
+	VCD_FAILED_RETURN(rc, "Failed: ddl_GetProperty");
+
+	out_buf_pool->buf_req = buf_req;
+
+	if (out_buf_pool->count < buf_req.actual_count) {
+		VCD_MSG_HIGH("Output buf requirement count increased");
+		out_buf_pool->count = buf_req.actual_count;
+	}
+
+	if (buf_req.actual_count > VCD_MAX_BUFFER_ENTRIES) {
+		VCD_MSG_ERROR("\n New act count exceeds Max count(32)");
+		return VCD_ERR_FAIL;
+	}
+
+	if (!VCD_FAILED(rc)) {
+		rc = vcd_set_frame_size(cctxt, NULL);
+		VCD_FAILED_RETURN(rc, "Failed: set_frame_size in reconfig");
+		cctxt->status.mask &= ~VCD_FIRST_OP_RCVD;
+		cctxt->status.mask |= VCD_IN_RECONFIG;
+		cctxt->callback(VCD_EVT_IND_OUTPUT_RECONFIG,
+			status, NULL, 0, cctxt,
+			cctxt->client_data);
+	}
+	return rc;
+}
+
+u32 vcd_handle_ind_output_reconfig_in_flushing(
+	struct vcd_clnt_ctxt *cctxt, void* payload, u32 status)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (cctxt->status.mask & VCD_FLUSH_INPUT && payload) {
+		(void)vcd_handle_input_done(cctxt, payload,
+		VCD_EVT_RESP_INPUT_FLUSHED, status);
+		payload = NULL;
+	}
+	rc = vcd_handle_ind_output_reconfig(cctxt, payload, status);
+	return rc;
+}
+
+u32 vcd_return_op_buffer_to_hw(struct vcd_clnt_ctxt *cctxt,
+	struct vcd_buffer_entry *buf_entry)
+{
+	u32 rc = VCD_S_SUCCESS;
+	struct vcd_frame_data *frm_entry = &buf_entry->frame;
+
+	VCD_MSG_LOW("vcd_return_op_buffer_to_hw in %d:",
+		    cctxt->clnt_state.state);
+	frm_entry->physical = buf_entry->physical;
+	frm_entry->ip_frm_tag = VCD_FRAMETAG_INVALID;
+	frm_entry->intrlcd_ip_frm_tag = VCD_FRAMETAG_INVALID;
+	frm_entry->data_len = 0;
+
+	if (cctxt->decoding) {
+		struct vcd_property_hdr Prop_hdr;
+		struct ddl_frame_data_tag ddl_frm;
+		Prop_hdr.prop_id = DDL_I_DPB_RELEASE;
+		Prop_hdr.sz =
+			sizeof(struct ddl_frame_data_tag);
+		memset(&ddl_frm, 0, sizeof(ddl_frm));
+		ddl_frm.vcd_frm = *frm_entry;
+		rc = ddl_set_property(cctxt->ddl_handle, &Prop_hdr,
+				      &ddl_frm);
+		if (VCD_FAILED(rc)) {
+			VCD_MSG_ERROR("Error returning output buffer to"
+					" HW. rc = 0x%x", rc);
+			buf_entry->in_use = false;
+		} else {
+			cctxt->out_buf_pool.in_use++;
+			buf_entry->in_use = true;
+		}
+	}
+	return rc;
+}
+
+void vcd_handle_clnt_fatal(struct vcd_clnt_ctxt *cctxt, u32 trans_end)
+{
+	if (trans_end)
+		vcd_mark_frame_channel(cctxt->dev_ctxt);
+	vcd_handle_err_fatal(cctxt,
+		VCD_EVT_IND_HWERRFATAL, VCD_ERR_CLIENT_FATAL);
+}
+
+void vcd_handle_clnt_fatal_input_done(struct vcd_clnt_ctxt *cctxt,
+	u32 trans_end)
+{
+	if (cctxt->status.frame_submitted > 0)
+		cctxt->status.frame_submitted--;
+	vcd_handle_clnt_fatal(cctxt, trans_end);
+}
+
+void vcd_handle_ind_info_output_reconfig(
+	struct vcd_clnt_ctxt *cctxt, u32 status)
+{
+	if (cctxt) {
+		cctxt->callback(VCD_EVT_IND_INFO_OUTPUT_RECONFIG, status, NULL,
+		 0, cctxt, cctxt->client_data);
+	}
+}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_util.c b/drivers/video/msm/vidc/common/vcd/vcd_util.c
new file mode 100644
index 0000000..ba991f1
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_util.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vidc_type.h"
+#include "vcd_util.h"
+
+u32 vcd_critical_section_create(u32 **p_cs)
+{
+	struct mutex *lock;
+	if (!p_cs) {
+		VCD_MSG_ERROR("Bad critical section ptr");
+		return VCD_ERR_BAD_POINTER;
+	} else {
+		lock = kmalloc(sizeof(struct mutex), GFP_KERNEL);
+		if (!lock) {
+			VCD_MSG_ERROR("Failed: vcd_critical_section_create");
+			return VCD_ERR_ALLOC_FAIL;
+		}
+		mutex_init(lock);
+		*p_cs = (u32 *) lock;
+		return VCD_S_SUCCESS;
+	}
+}
+
+u32 vcd_critical_section_release(u32 *cs)
+{
+	struct mutex *lock = (struct mutex *)cs;
+	if (!lock) {
+		VCD_MSG_ERROR("Bad critical section object");
+		return VCD_ERR_BAD_POINTER;
+	}
+
+	mutex_destroy(lock);
+	kfree(cs);
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_critical_section_enter(u32 *cs)
+{
+	struct mutex *lock = (struct mutex *)cs;
+	if (!lock) {
+		VCD_MSG_ERROR("Bad critical section object");
+		return VCD_ERR_BAD_POINTER;
+	} else
+		mutex_lock(lock);
+
+	return VCD_S_SUCCESS;
+}
+
+u32 vcd_critical_section_leave(u32 *cs)
+{
+	struct mutex *lock = (struct mutex *)cs;
+
+	if (!lock) {
+		VCD_MSG_ERROR("Bad critical section object");
+
+		return VCD_ERR_BAD_POINTER;
+	} else
+		mutex_unlock(lock);
+
+	return VCD_S_SUCCESS;
+}
+
+int vcd_pmem_alloc(u32 size, u8 **kernel_vaddr, u8 **phy_addr)
+{
+	*phy_addr =
+	    (u8 *) pmem_kalloc(size, PMEM_MEMTYPE | PMEM_ALIGNMENT_4K);
+
+	if (!IS_ERR((void *)*phy_addr)) {
+
+		*kernel_vaddr = ioremap((unsigned long)*phy_addr, size);
+
+		if (!*kernel_vaddr) {
+			pr_err("%s: could not ioremap in kernel pmem buffers\n",
+			       __func__);
+			pmem_kfree((s32) *phy_addr);
+			return -ENOMEM;
+		}
+		pr_debug("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
+			 (u32) *phy_addr, (u32) *kernel_vaddr);
+		return 0;
+	} else {
+		pr_err("%s: could not allocte in kernel pmem buffers\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+}
+
+int vcd_pmem_free(u8 *kernel_vaddr, u8 *phy_addr)
+{
+	iounmap((void *)kernel_vaddr);
+	pmem_kfree((s32) phy_addr);
+
+	return 0;
+}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_util.h b/drivers/video/msm/vidc/common/vcd/vcd_util.h
new file mode 100644
index 0000000..07ad651
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vcd_util.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _VCD_UTIL_H_
+#define _VCD_UTIL_H_
+#include "vidc_type.h"
+#include "vcd_api.h"
+
+#if DEBUG
+
+#define VCD_MSG_LOW(xx_fmt...)		printk(KERN_INFO "\n\t* " xx_fmt)
+#define VCD_MSG_MED(xx_fmt...)		printk(KERN_INFO "\n  * " xx_fmt)
+#define VCD_MSG_HIGH(xx_fmt...)		printk(KERN_WARNING "\n" xx_fmt)
+
+#else
+
+#define VCD_MSG_LOW(xx_fmt...)
+#define VCD_MSG_MED(xx_fmt...)
+#define VCD_MSG_HIGH(xx_fmt...)
+
+#endif
+
+#define VCD_MSG_ERROR(xx_fmt...)	printk(KERN_ERR "\n err: " xx_fmt)
+#define VCD_MSG_FATAL(xx_fmt...)	printk(KERN_ERR "\n<FATAL> " xx_fmt)
+
+#define VCD_FAILED_RETURN(rc, xx_fmt...)		\
+	do {						\
+		if (VCD_FAILED(rc)) {			\
+			printk(KERN_ERR  xx_fmt);	\
+			return rc;			\
+		}					\
+	} while	(0)
+
+#define VCD_FAILED_DEVICE_FATAL(rc) \
+	(rc == VCD_ERR_HW_FATAL ? true : false)
+#define VCD_FAILED_CLIENT_FATAL(rc) \
+	(rc == VCD_ERR_CLIENT_FATAL ? true : false)
+
+#define VCD_FAILED_FATAL(rc)  \
+	((VCD_FAILED_DEVICE_FATAL(rc) || VCD_FAILED_CLIENT_FATAL(rc)) \
+	? true : false)
+
+#endif
diff --git a/drivers/video/msm/vidc/common/vcd/vidc_type.h b/drivers/video/msm/vidc/common/vcd/vidc_type.h
new file mode 100644
index 0000000..bd87c0d
--- /dev/null
+++ b/drivers/video/msm/vidc/common/vcd/vidc_type.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef VIDC_TYPE_H
+#define VIDC_TYPE_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/dma-mapping.h>
+#include <linux/android_pmem.h>
+
+#define DEBUG   0
+#define VIDC_ENABLE_DBGFS
+
+#define USE_RES_TRACKER
+#endif